summaryrefslogtreecommitdiff
path: root/tools/perf/scripts/python/stackcollapse.py
blob: b1c4def1410a8aea617b0843b681eb7435a892dc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
# stackcollapse.py - format perf samples with one line per distinct call stack
# SPDX-License-Identifier: GPL-2.0
#
# This script's output has two space-separated fields.  The first is a semicolon
# separated stack including the program name (from the "comm" field) and the
# function names from the call stack.  The second is a count:
#
#  swapper;start_kernel;rest_init;cpu_idle;default_idle;native_safe_halt 2
#
# The file is sorted according to the first field.
#
# Input may be created and processed using:
#
#  perf record -a -g -F 99 sleep 60
#  perf script report stackcollapse > out.stacks-folded
#
# (perf script record stackcollapse works too).
#
# Written by Paolo Bonzini <pbonzini@redhat.com>
# Based on Brendan Gregg's stackcollapse-perf.pl script.

from __future__ import print_function

import os
import sys
from collections import defaultdict
from optparse import OptionParser, make_option

sys.path.append(os.environ['PERF_EXEC_PATH'] + \
    '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')

from perf_trace_context import *
from Core import *
from EventClass import *

# command line parsing

option_list = [
    # formatting options for the bottom entry of the stack
    make_option("--include-tid", dest="include_tid",
                 action="store_true", default=False,
                 help="include thread id in stack"),
    make_option("--include-pid", dest="include_pid",
                 action="store_true", default=False,
                 help="include process id in stack"),
    make_option("--no-comm", dest="include_comm",
                 action="store_false", default=True,
                 help="do not separate stacks according to comm"),
    make_option("--tidy-java", dest="tidy_java",
                 action="store_true", default=False,
                 help="beautify Java signatures"),
    make_option("--kernel", dest="annotate_kernel",
                 action="store_true", default=False,
                 help="annotate kernel functions with _[k]")
]

parser = OptionParser(option_list=option_list)
(opts, args) = parser.parse_args()

if len(args) != 0:
    parser.error("unexpected command line argument")
if opts.include_tid and not opts.include_comm:
    parser.error("requesting tid but not comm is invalid")
if opts.include_pid and not opts.include_comm:
    parser.error("requesting pid but not comm is invalid")

# event handlers

lines = defaultdict(lambda: 0)

def process_event(param_dict):
    def tidy_function_name(sym, dso):
        if sym is None:
            sym = '[unknown]'

        sym = sym.replace(';', ':')
        if opts.tidy_java:
            # the original stackcollapse-perf.pl script gives the
            # example of converting this:
            #    Lorg/mozilla/javascript/MemberBox;.<init>(Ljava/lang/reflect/Method;)V
            # to this:
            #    org/mozilla/javascript/MemberBox:.init
            sym = sym.replace('<', '')
            sym = sym.replace('>', '')
            if sym[0] == 'L' and sym.find('/'):
                sym = sym[1:]
            try:
                sym = sym[:sym.index('(')]
            except ValueError:
                pass

        if opts.annotate_kernel and dso == '[kernel.kallsyms]':
            return sym + '_[k]'
        else:
            return sym

    stack = list()
    if 'callchain' in param_dict:
        for entry in param_dict['callchain']:
            entry.setdefault('sym', dict())
            entry['sym'].setdefault('name', None)
            entry.setdefault('dso', None)
            stack.append(tidy_function_name(entry['sym']['name'],
                                            entry['dso']))
    else:
        param_dict.setdefault('symbol', None)
        param_dict.setdefault('dso', None)
        stack.append(tidy_function_name(param_dict['symbol'],
                                        param_dict['dso']))

    if opts.include_comm:
        comm = param_dict["comm"].replace(' ', '_')
        sep = "-"
        if opts.include_pid:
            comm = comm + sep + str(param_dict['sample']['pid'])
            sep = "/"
        if opts.include_tid:
            comm = comm + sep + str(param_dict['sample']['tid'])
        stack.append(comm)

    stack_string = ';'.join(reversed(stack))
    lines[stack_string] = lines[stack_string] + 1

def trace_end():
    list = sorted(lines)
    for stack in list:
        print("%s %d" % (stack, lines[stack]))
style='width: 0.7%;'/> -rw-r--r--Documentation/PCI/endpoint/pci-test-howto.rst174
-rw-r--r--Documentation/accounting/delay-accounting.rst42
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt37
-rw-r--r--Documentation/admin-guide/media/ipu3.rst6
-rw-r--r--Documentation/admin-guide/mm/damon/start.rst67
-rw-r--r--Documentation/admin-guide/mm/damon/usage.rst392
-rw-r--r--Documentation/admin-guide/mm/memory-hotplug.rst4
-rw-r--r--Documentation/admin-guide/mm/transhuge.rst82
-rw-r--r--Documentation/admin-guide/workload-tracing.rst2
-rw-r--r--Documentation/arch/arm64/silicon-errata.rst3
-rw-r--r--Documentation/arch/riscv/hwprobe.rst10
-rw-r--r--Documentation/block/ublk.rst2
-rw-r--r--Documentation/core-api/min_heap.rst2
-rw-r--r--Documentation/core-api/xarray.rst24
-rw-r--r--Documentation/devicetree/bindings/arm/altera/socfpga-system.txt25
-rw-r--r--Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml6
-rw-r--r--Documentation/devicetree/bindings/arm/arm,coresight-static-replicator.yaml19
-rw-r--r--Documentation/devicetree/bindings/arm/arm,embedded-trace-extension.yaml6
-rw-r--r--Documentation/devicetree/bindings/arm/aspeed/aspeed.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/atmel-at91.yaml7
-rw-r--r--Documentation/devicetree/bindings/arm/atmel-sysregs.txt14
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcmbca.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/blaize.yaml40
-rw-r--r--Documentation/devicetree/bindings/arm/fsl.yaml12
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek.yaml65
-rw-r--r--Documentation/devicetree/bindings/arm/qcom-soc.yaml9
-rw-r--r--Documentation/devicetree/bindings/arm/qcom.yaml64
-rw-r--r--Documentation/devicetree/bindings/arm/rockchip.yaml94
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/samsung-boards.yaml3
-rw-r--r--Documentation/devicetree/bindings/arm/stm32/stm32.yaml7
-rw-r--r--Documentation/devicetree/bindings/cache/qcom,llcc.yaml20
-rw-r--r--Documentation/devicetree/bindings/connector/usb-connector.yaml7
-rw-r--r--Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml2
-rw-r--r--Documentation/devicetree/bindings/crypto/qcom,prng.yaml5
-rw-r--r--Documentation/devicetree/bindings/crypto/qcom-qce.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml8
-rw-r--r--Documentation/devicetree/bindings/display/bridge/fsl,imx8mp-hdmi-tx.yaml32
-rw-r--r--Documentation/devicetree/bindings/display/bridge/samsung,mipi-dsim.yaml76
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,aal.yaml52
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,ovl.yaml12
-rw-r--r--Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml118
-rw-r--r--Documentation/devicetree/bindings/display/msm/dsi-phy-10nm.yaml48
-rw-r--r--Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml34
-rw-r--r--Documentation/devicetree/bindings/display/msm/dsi-phy-20nm.yaml36
-rw-r--r--Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml34
-rw-r--r--Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml34
-rw-r--r--Documentation/devicetree/bindings/display/msm/qcom,sa8775p-mdss.yaml7
-rw-r--r--Documentation/devicetree/bindings/display/renesas,cmm.yaml12
-rw-r--r--Documentation/devicetree/bindings/dma/adi,axi-dmac.txt61
-rw-r--r--Documentation/devicetree/bindings/dma/adi,axi-dmac.yaml129
-rw-r--r--Documentation/devicetree/bindings/dma/allwinner,sun4i-a10-dma.yaml4
-rw-r--r--Documentation/devicetree/bindings/dma/atmel,sama5d4-dma.yaml79
-rw-r--r--Documentation/devicetree/bindings/dma/atmel-xdma.txt54
-rw-r--r--Documentation/devicetree/bindings/dma/fsl,edma.yaml34
-rw-r--r--Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.yaml60
-rw-r--r--Documentation/devicetree/bindings/dma/qcom,gpi.yaml4
-rw-r--r--Documentation/devicetree/bindings/dma/stm32/st,stm32-dmamux.yaml10
-rw-r--r--Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml5
-rw-r--r--Documentation/devicetree/bindings/dts-coding-style.rst16
-rw-r--r--Documentation/devicetree/bindings/firmware/qcom,scm.yaml2
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml11
-rw-r--r--Documentation/devicetree/bindings/iio/accel/kionix,kx022a.yaml11
-rw-r--r--Documentation/devicetree/bindings/iio/accel/nxp,fxls8962af.yaml20
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad4000.yaml75
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml7
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml13
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7173.yaml12
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml15
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7780.yaml11
-rw-r--r--Documentation/devicetree/bindings/iio/adc/renesas,rzg2l-adc.yaml37
-rw-r--r--Documentation/devicetree/bindings/iio/chemical/bosch,bme680.yaml62
-rw-r--r--Documentation/devicetree/bindings/iio/dac/adi,ad5791.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/dac/rohm,bd79703.yaml62
-rw-r--r--Documentation/devicetree/bindings/iio/imu/adi,adis16480.yaml42
-rw-r--r--Documentation/devicetree/bindings/iio/imu/bosch,bmi160.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/imu/bosch,bmi270.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/imu/bosch,bmi323.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/light/rohm,bu27008.yaml49
-rw-r--r--Documentation/devicetree/bindings/iio/light/rohm,bu27010.yaml50
-rw-r--r--Documentation/devicetree/bindings/iio/light/ti,opt4060.yaml51
-rw-r--r--Documentation/devicetree/bindings/iio/pressure/bmp085.yaml29
-rw-r--r--Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml3
-rw-r--r--Documentation/devicetree/bindings/input/mediatek,pmic-keys.yaml1
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml3
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml1
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,sm8750-rpmh.yaml136
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic.yaml4
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-vic.yaml10
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.yaml30
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/microchip,lan966x-oic.yaml5
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.yaml3
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/riscv,imsics.yaml2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ti,omap4-wugen-mpu.txt (renamed from Documentation/devicetree/bindings/interrupt-controller/ti,omap4-wugen-mpu)0
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu.yaml23
-rw-r--r--Documentation/devicetree/bindings/iommu/qcom,iommu.yaml1
-rw-r--r--Documentation/devicetree/bindings/iommu/rockchip,iommu.yaml1
-rw-r--r--Documentation/devicetree/bindings/mailbox/google,gs101-mbox.yaml69
-rw-r--r--Documentation/devicetree/bindings/mailbox/microchip,sbi-ipc.yaml123
-rw-r--r--Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/allwinner,sun50i-h6-vpu-g2.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/amlogic,meson-ir-tx.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/amphion,vpu.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/fsl,imx6ull-pxp.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/i2c/sony,imx290.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/mediatek,vcodec-decoder.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/mediatek,vcodec-encoder.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/mediatek,vcodec-subdev-decoder.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/microchip,sama5d4-vdec.yaml19
-rw-r--r--Documentation/devicetree/bindings/media/nxp,imx8-isi.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/nxp,imx8mq-vpu.yaml41
-rw-r--r--Documentation/devicetree/bindings/media/qcom,msm8916-camss.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/qcom,msm8916-venus.yaml12
-rw-r--r--Documentation/devicetree/bindings/media/qcom,msm8996-camss.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sc7180-venus.yaml12
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sc7280-camss.yaml425
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sc7280-venus.yaml12
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sc8280xp-camss.yaml40
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sdm660-camss.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sdm845-camss.yaml21
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sdm845-venus-v2.yaml12
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sm8250-camss.yaml29
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sm8250-venus.yaml12
-rw-r--r--Documentation/devicetree/bindings/media/rockchip,rk3568-vepu.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/rockchip-vpu.yaml31
-rw-r--r--Documentation/devicetree/bindings/media/st,stm32-dcmipp.yaml53
-rw-r--r--Documentation/devicetree/bindings/media/st,stm32mp25-csi.yaml125
-rw-r--r--Documentation/devicetree/bindings/media/video-interfaces.yaml21
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/qca,ath79-ddr-controller.yaml7
-rw-r--r--Documentation/devicetree/bindings/mfd/mediatek,mt6397.yaml2
-rw-r--r--Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt53
-rw-r--r--Documentation/devicetree/bindings/mtd/cdns,hp-nfc.yaml75
-rw-r--r--Documentation/devicetree/bindings/mtd/davinci-nand.txt94
-rw-r--r--Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml4
-rw-r--r--Documentation/devicetree/bindings/mtd/microchip,mchp48l640.yaml5
-rw-r--r--Documentation/devicetree/bindings/mtd/nuvoton,ma35d1-nand.yaml95
-rw-r--r--Documentation/devicetree/bindings/mtd/ti,davinci-nand.yaml124
-rw-r--r--Documentation/devicetree/bindings/net/qcom,ethqos.yaml8
-rw-r--r--Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml4
-rw-r--r--Documentation/devicetree/bindings/nvmem/rmem.yaml1
-rw-r--r--Documentation/devicetree/bindings/opp/allwinner,sun50i-h6-operating-points.yaml1
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-common.yaml4
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-ep.yaml39
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml25
-rw-r--r--Documentation/devicetree/bindings/pci/layerscape-pcie-gen4.txt52
-rw-r--r--Documentation/devicetree/bindings/pci/mbvl,gpex40-pcie.yaml173
-rw-r--r--Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/mobiveil-pcie.txt72
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-sm8550.yaml9
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie.yaml4
-rw-r--r--Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/phy-rockchip-naneng-combphy.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml21
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml6
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml13
-rw-r--r--Documentation/devicetree/bindings/pinctrl/atmel,at91rm9200-pinctrl.yaml72
-rw-r--r--Documentation/devicetree/bindings/pinctrl/mediatek,mt7988-pinctrl.yaml575
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,ipq5424-tlmm.yaml4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8917-pinctrl.yaml160
-rw-r--r--Documentation/devicetree/bindings/pinctrl/realtek,rtd1315e-pinctrl.yaml54
-rw-r--r--Documentation/devicetree/bindings/pinctrl/realtek,rtd1319d-pinctrl.yaml54
-rw-r--r--Documentation/devicetree/bindings/pinctrl/realtek,rtd1619b-pinctrl.yaml54
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml7
-rw-r--r--Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/xlnx,pinctrl-zynq.yaml61
-rw-r--r--Documentation/devicetree/bindings/power/domain-idle-state.yaml5
-rw-r--r--Documentation/devicetree/bindings/power/raspberrypi,bcm2835-power.yaml42
-rw-r--r--Documentation/devicetree/bindings/power/reset/atmel,sama5d2-shdwc.yaml3
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq24190.yaml1
-rw-r--r--Documentation/devicetree/bindings/power/supply/gpio-charger.yaml6
-rw-r--r--Documentation/devicetree/bindings/power/supply/ltc4162-l.yaml6
-rw-r--r--Documentation/devicetree/bindings/power/supply/maxim,max17042.yaml1
-rw-r--r--Documentation/devicetree/bindings/power/supply/st,stc3117.yaml74
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sa8775p-pas.yaml44
-rw-r--r--Documentation/devicetree/bindings/riscv/cpus.yaml20
-rw-r--r--Documentation/devicetree/bindings/riscv/extensions.yaml10
-rw-r--r--Documentation/devicetree/bindings/riscv/spacemit.yaml28
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-mxc.yaml10
-rw-r--r--Documentation/devicetree/bindings/serial/8250.yaml4
-rw-r--r--Documentation/devicetree/bindings/serial/nxp,sc16is7xx.yaml3
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,scif.yaml5
-rw-r--r--Documentation/devicetree/bindings/soc/altera/altr,sys-mgr.yaml51
-rw-r--r--Documentation/devicetree/bindings/soc/amlogic/amlogic,meson-gx-hhi-sysctrl.yaml14
-rw-r--r--Documentation/devicetree/bindings/soc/bcm/raspberrypi,bcm2835-power.txt47
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml1
-rw-r--r--Documentation/devicetree/bindings/soc/renesas/renesas.yaml16
-rw-r--r--Documentation/devicetree/bindings/soc/rockchip/grf.yaml1
-rw-r--r--Documentation/devicetree/bindings/soc/samsung/exynos-pmu.yaml1
-rw-r--r--Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml1
-rw-r--r--Documentation/devicetree/bindings/soc/samsung/samsung,exynos-sysreg.yaml8
-rw-r--r--Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml10
-rw-r--r--Documentation/devicetree/bindings/sound/adi,ssm2518.yaml20
-rw-r--r--Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml31
-rw-r--r--Documentation/devicetree/bindings/sound/awinic,aw88395.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/everest,es71x4.yaml10
-rw-r--r--Documentation/devicetree/bindings/sound/everest,es7241.yaml19
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,easrc.yaml32
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,micfil.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,mqs.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,xcvr.yaml34
-rw-r--r--Documentation/devicetree/bindings/sound/intel,keembay-i2s.yaml32
-rw-r--r--Documentation/devicetree/bindings/sound/mediatek,mt8188-mt6359.yaml16
-rw-r--r--Documentation/devicetree/bindings/sound/neofidelity,ntp8918.yaml26
-rw-r--r--Documentation/devicetree/bindings/sound/realtek,rt5682.yaml156
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsnd.yaml6
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rz-ssi.yaml19
-rw-r--r--Documentation/devicetree/bindings/sound/rt5682.txt98
-rw-r--r--Documentation/devicetree/bindings/sound/ti,pcm1681.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/ti,pcm6240.yaml32
-rw-r--r--Documentation/devicetree/bindings/sound/ti,tas2562.yaml30
-rw-r--r--Documentation/devicetree/bindings/sound/ti,tas2770.yaml34
-rw-r--r--Documentation/devicetree/bindings/sound/ti,tas2781.yaml38
-rw-r--r--Documentation/devicetree/bindings/sound/ti,tas27xx.yaml34
-rw-r--r--Documentation/devicetree/bindings/sound/ti,tas57xx.yaml36
-rw-r--r--Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml4
-rw-r--r--Documentation/devicetree/bindings/sram/qcom,imem.yaml1
-rw-r--r--Documentation/devicetree/bindings/thermal/qcom-tsens.yaml1
-rw-r--r--Documentation/devicetree/bindings/timer/fsl,imxgpt.yaml3
-rw-r--r--Documentation/devicetree/bindings/timer/sifive,clint.yaml1
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.yaml4
-rw-r--r--Documentation/devicetree/bindings/ufs/qcom,ufs.yaml2
-rw-r--r--Documentation/devicetree/bindings/ufs/renesas,ufs.yaml16
-rw-r--r--Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml28
-rw-r--r--Documentation/devicetree/bindings/usb/aspeed,usb-vhub.yaml44
-rw-r--r--Documentation/devicetree/bindings/usb/brcm,bdc.yaml14
-rw-r--r--Documentation/devicetree/bindings/usb/cypress,hx3.yaml24
-rw-r--r--Documentation/devicetree/bindings/usb/dwc2.yaml4
-rw-r--r--Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml20
-rw-r--r--Documentation/devicetree/bindings/usb/gpio-sbu-mux.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/intel,keembay-dwc3.yaml32
-rw-r--r--Documentation/devicetree/bindings/usb/ite,it5205.yaml18
-rw-r--r--Documentation/devicetree/bindings/usb/maxim,max33359.yaml9
-rw-r--r--Documentation/devicetree/bindings/usb/maxim,max3420-udc.yaml28
-rw-r--r--Documentation/devicetree/bindings/usb/nvidia,tegra210-xusb.yaml4
-rw-r--r--Documentation/devicetree/bindings/usb/qcom,dwc3.yaml6
-rw-r--r--Documentation/devicetree/bindings/usb/renesas,rzv2m-usb3drd.yaml36
-rw-r--r--Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml24
-rw-r--r--Documentation/devicetree/bindings/usb/renesas,usbhs.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/snps,dwc3-common.yaml415
-rw-r--r--Documentation/devicetree/bindings/usb/snps,dwc3.yaml391
-rw-r--r--Documentation/devicetree/bindings/usb/ti,hd3ss3220.yaml38
-rw-r--r--Documentation/devicetree/bindings/usb/ti,tusb73x0-pci.yaml6
-rw-r--r--Documentation/devicetree/bindings/usb/ti,usb8020b.yaml20
-rw-r--r--Documentation/devicetree/bindings/usb/ti,usb8041.yaml16
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml6
-rw-r--r--Documentation/devicetree/bindings/w1/maxim,ds2482.yaml2
-rw-r--r--Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml1
-rw-r--r--Documentation/devicetree/of_unittest.rst12
-rw-r--r--Documentation/driver-api/crypto/iaa/iaa-crypto.rst9
-rw-r--r--Documentation/driver-api/driver-model/devres.rst1
-rw-r--r--Documentation/driver-api/extcon.rst255
-rw-r--r--Documentation/driver-api/index.rst1
-rw-r--r--Documentation/driver-api/media/tx-rx.rst9
-rw-r--r--Documentation/driver-api/pps.rst40
-rw-r--r--Documentation/driver-api/scsi.rst5
-rw-r--r--Documentation/filesystems/bcachefs/SubmittingPatches.rst98
-rw-r--r--Documentation/filesystems/bcachefs/index.rst1
-rw-r--r--Documentation/filesystems/debugfs.rst12
-rw-r--r--Documentation/filesystems/fuse-io-uring.rst99
-rw-r--r--Documentation/filesystems/index.rst1
-rw-r--r--Documentation/filesystems/locking.rst7
-rw-r--r--Documentation/filesystems/nfs/localio.rst104
-rw-r--r--Documentation/filesystems/porting.rst16
-rw-r--r--Documentation/filesystems/proc.rst88
-rw-r--r--Documentation/filesystems/squashfs.rst14
-rw-r--r--Documentation/filesystems/vfs.rst24
-rw-r--r--Documentation/gpu/zynqmp.rst2
-rw-r--r--Documentation/iio/ad4695.rst2
-rw-r--r--Documentation/iio/adis16480.rst3
-rw-r--r--Documentation/iio/index.rst1
-rw-r--r--Documentation/iio/opt4060.rst61
-rw-r--r--Documentation/kbuild/gendwarfksyms.rst308
-rw-r--r--Documentation/kbuild/index.rst1
-rw-r--r--Documentation/kbuild/modules.rst20
-rw-r--r--Documentation/mm/damon/design.rst167
-rw-r--r--Documentation/mm/damon/monitoring_intervals_tuning_example.rst247
-rw-r--r--Documentation/mm/process_addrs.rst4
-rw-r--r--Documentation/mm/split_page_table_lock.rst4
-rw-r--r--Documentation/networking/can.rst4
-rw-r--r--Documentation/networking/mptcp-sysctl.rst2
-rw-r--r--Documentation/networking/napi.rst2
-rw-r--r--Documentation/power/video.rst2
-rw-r--r--Documentation/process/changes.rst6
-rw-r--r--Documentation/scheduler/sched-ext.rst6
-rw-r--r--Documentation/scsi/scsi_eh.rst46
-rw-r--r--Documentation/scsi/scsi_mid_low_api.rst206
-rw-r--r--Documentation/sound/designs/midi-2.0.rst18
-rw-r--r--Documentation/sunrpc/xdr/nfs4_1.x186
-rw-r--r--Documentation/trace/events.rst24
-rw-r--r--Documentation/translations/sp_SP/index.rst2
-rw-r--r--Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst248
-rw-r--r--Documentation/translations/zh_CN/devicetree/of_unittest.rst2
-rw-r--r--Documentation/translations/zh_TW/admin-guide/mm/damon/usage.rst248
-rw-r--r--Documentation/usb/usbip_protocol.rst12
-rw-r--r--Documentation/userspace-api/check_exec.rst144
-rw-r--r--Documentation/userspace-api/index.rst2
-rw-r--r--Documentation/userspace-api/ioctl/ioctl-number.rst2
-rw-r--r--Documentation/userspace-api/ntsync.rst385
-rw-r--r--Documentation/userspace-api/sysfs-platform_profile.rst38
-rw-r--r--Documentation/virt/hyperv/hibernation.rst336
-rw-r--r--Documentation/virt/hyperv/index.rst1
-rw-r--r--Documentation/virt/kvm/api.rst12
-rw-r--r--Documentation/virt/kvm/devices/vcpu.rst14
-rw-r--r--Documentation/wmi/driver-development-guide.rst4
-rw-r--r--MAINTAINERS312
-rw-r--r--Makefile4
-rw-r--r--arch/alpha/include/asm/elf.h6
-rw-r--r--arch/alpha/include/asm/pgtable.h2
-rw-r--r--arch/alpha/include/asm/processor.h8
-rw-r--r--arch/alpha/kernel/core_cia.c5
-rw-r--r--arch/alpha/kernel/core_marvel.c10
-rw-r--r--arch/alpha/kernel/osf_sys.c11
-rw-r--r--arch/alpha/kernel/pci.c13
-rw-r--r--arch/alpha/kernel/pci_iommu.c10
-rw-r--r--arch/alpha/lib/fpreg.c1
-rw-r--r--arch/alpha/mm/init.c2
-rw-r--r--arch/arc/Kconfig7
-rw-r--r--arch/arc/Makefile3
-rw-r--r--arch/arc/boot/dts/Makefile9
-rw-r--r--arch/arc/configs/axs101_defconfig2
-rw-r--r--arch/arc/configs/axs103_defconfig2
-rw-r--r--arch/arc/configs/axs103_smp_defconfig2
-rw-r--r--arch/arc/configs/haps_hs_defconfig2
-rw-r--r--arch/arc/configs/haps_hs_smp_defconfig2
-rw-r--r--arch/arc/configs/hsdk_defconfig2
-rw-r--r--arch/arc/configs/nsim_700_defconfig2
-rw-r--r--arch/arc/configs/nsimosci_defconfig2
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig2
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig2
-rw-r--r--arch/arc/configs/tb10x_defconfig2
-rw-r--r--arch/arc/configs/vdk_hs38_defconfig2
-rw-r--r--arch/arc/configs/vdk_hs38_smp_defconfig2
-rw-r--r--arch/arc/include/asm/pgalloc.h9
-rw-r--r--arch/arc/kernel/unaligned.c5
-rw-r--r--arch/arm/Kconfig5
-rw-r--r--arch/arm/Kconfig.assembler6
-rw-r--r--arch/arm/boot/dts/allwinner/suniv-f1c100s-licheepi-nano.dts8
-rw-r--r--arch/arm/boot/dts/allwinner/suniv-f1c100s.dtsi24
-rw-r--r--arch/arm/boot/dts/amlogic/meson.dtsi4
-rw-r--r--arch/arm/boot/dts/aspeed/Makefile2
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtjefferson.dts622
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtmitchell.dts18
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-catalina.dts191
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-harma.dts45
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-minerva.dts998
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-yosemite4.dts925
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-blueridge.dts46
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-everest.dts27
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-fuji.dts111
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-rainier.dts17
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-sbp1.dts6086
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-system1.dts31
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-quanta-s6q.dts8
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-vegman-rx20.dts6
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-vegman.dtsi2
-rw-r--r--arch/arm/boot/dts/broadcom/Makefile1
-rw-r--r--arch/arm/boot/dts/broadcom/bcm53015-meraki-mr26.dts20
-rw-r--r--arch/arm/boot/dts/broadcom/bcm53340-ubnt-unifi-switch8.dts1
-rw-r--r--arch/arm/boot/dts/broadcom/bcm6846-genexis-xg6846b.dts244
-rw-r--r--arch/arm/boot/dts/broadcom/bcm6846.dtsi120
-rw-r--r--arch/arm/boot/dts/broadcom/bcm953012hr.dts1
-rw-r--r--arch/arm/boot/dts/broadcom/bcm953012k.dts1
-rw-r--r--arch/arm/boot/dts/broadcom/bcm958522er.dts1
-rw-r--r--arch/arm/boot/dts/broadcom/bcm958525er.dts1
-rw-r--r--arch/arm/boot/dts/broadcom/bcm958525xmc.dts1
-rw-r--r--arch/arm/boot/dts/broadcom/bcm958622hr.dts1
-rw-r--r--arch/arm/boot/dts/broadcom/bcm958623hr.dts1
-rw-r--r--arch/arm/boot/dts/broadcom/bcm958625hr.dts1
-rw-r--r--arch/arm/boot/dts/broadcom/bcm958625k.dts1
-rw-r--r--arch/arm/boot/dts/broadcom/bcm988312hr.dts1
-rw-r--r--arch/arm/boot/dts/intel/socfpga/socfpga_arria10.dtsi6
-rw-r--r--arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_mcvevk.dts2
-rw-r--r--arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_socdk.dts6
-rw-r--r--arch/arm/boot/dts/marvell/mmp2-olpc-xo-1-75.dts4
-rw-r--r--arch/arm/boot/dts/mediatek/mt7623.dtsi2
-rw-r--r--arch/arm/boot/dts/microchip/Makefile3
-rw-r--r--arch/arm/boot/dts/microchip/at91-sam9x75_curiosity.dts54
-rw-r--r--arch/arm/boot/dts/microchip/at91-sama5d27_wlsom1_ek.dts1
-rw-r--r--arch/arm/boot/dts/microchip/at91-sama5d29_curiosity.dts1
-rw-r--r--arch/arm/boot/dts/microchip/at91-sama7d65_curiosity.dts89
-rw-r--r--arch/arm/boot/dts/microchip/sam9x60.dtsi12
-rw-r--r--arch/arm/boot/dts/microchip/sam9x7.dtsi38
-rw-r--r--arch/arm/boot/dts/microchip/sama7d65-pinfunc.h947
-rw-r--r--arch/arm/boot/dts/microchip/sama7d65.dtsi144
-rw-r--r--arch/arm/boot/dts/nuvoton/nuvoton-npcm730-gbs.dts6
-rw-r--r--arch/arm/boot/dts/nuvoton/nuvoton-npcm750-runbmc-olympus.dts2
-rw-r--r--arch/arm/boot/dts/nvidia/tegra124-nyan.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx51-zii-rdu1.dts2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx51-zii-scu2-mezz.dts2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6q-bx50v3.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-sabresd.dtsi5
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl.dtsi6
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6sl-evk.dts2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6sl.dtsi6
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6sll-evk.dts2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6sx-sdb.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6sx.dtsi6
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-14x14-evk.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi61
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7-tqma7.dtsi3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7d-mba7.dts3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7d-sdb.dts2
-rw-r--r--arch/arm/boot/dts/qcom/qcom-sdx55.dtsi7
-rw-r--r--arch/arm/boot/dts/qcom/qcom-sdx65.dtsi6
-rw-r--r--arch/arm/boot/dts/renesas/r7s72100.dtsi10
-rw-r--r--arch/arm/boot/dts/samsung/exynos4212-tab3.dtsi31
-rw-r--r--arch/arm/boot/dts/st/Makefile1
-rw-r--r--arch/arm/boot/dts/st/stih410-b2260.dts4
-rw-r--r--arch/arm/boot/dts/st/stih410.dtsi34
-rw-r--r--arch/arm/boot/dts/st/stm32mp131.dtsi40
-rw-r--r--arch/arm/boot/dts/st/stm32mp135f-dk.dts12
-rw-r--r--arch/arm/boot/dts/st/stm32mp13xx-dhcor-som.dtsi16
-rw-r--r--arch/arm/boot/dts/st/stm32mp151.dtsi43
-rw-r--r--arch/arm/boot/dts/st/stm32mp153c-lxa-tac-gen3.dts267
-rw-r--r--arch/arm/boot/dts/st/stm32mp157c-ev1.dts9
-rw-r--r--arch/arm/boot/dts/st/stm32mp157c-lxa-tac-gen1.dts84
-rw-r--r--arch/arm/boot/dts/st/stm32mp157c-lxa-tac-gen2.dts84
-rw-r--r--arch/arm/boot/dts/st/stm32mp15xc-lxa-tac.dtsi100
-rw-r--r--arch/arm/boot/dts/st/stm32mp15xx-dhcom-drc02.dtsi12
-rw-r--r--arch/arm/boot/dts/st/stm32mp15xx-dhcom-pdk2.dtsi10
-rw-r--r--arch/arm/boot/dts/st/stm32mp15xx-dhcom-picoitx.dtsi10
-rw-r--r--arch/arm/boot/dts/st/stm32mp15xx-dhcom-som.dtsi7
-rw-r--r--arch/arm/boot/dts/st/stm32mp15xx-dkx.dtsi18
-rw-r--r--arch/arm/boot/dts/ti/omap/am437x-l4.dtsi18
-rw-r--r--arch/arm/boot/dts/ti/omap/dra7-l4.dtsi2
-rw-r--r--arch/arm/boot/dts/ti/omap/omap3-gta04.dtsi16
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig1
-rw-r--r--arch/arm/configs/milbeaut_m10v_defconfig1
-rw-r--r--arch/arm/configs/multi_v7_defconfig2
-rw-r--r--arch/arm/configs/omap2plus_defconfig1
-rw-r--r--arch/arm/configs/pxa_defconfig1
-rw-r--r--arch/arm/configs/sama7_defconfig1
-rw-r--r--arch/arm/configs/shmobile_defconfig1
-rw-r--r--arch/arm/configs/stm32_defconfig12
-rw-r--r--arch/arm/crypto/Kconfig25
-rw-r--r--arch/arm/crypto/Makefile4
-rw-r--r--arch/arm/crypto/crc32-ce-glue.c247
-rw-r--r--arch/arm/crypto/crct10dif-ce-glue.c124
-rw-r--r--arch/arm/include/asm/cache.h6
-rw-r--r--arch/arm/include/asm/cachetype.h13
-rw-r--r--arch/arm/include/asm/ecard.h2
-rw-r--r--arch/arm/include/asm/tlb.h10
-rw-r--r--arch/arm/include/asm/vfp.h10
-rw-r--r--arch/arm/include/asm/vfpmacros.h11
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/cacheinfo.c173
-rw-r--r--arch/arm/kernel/isa.c2
-rw-r--r--arch/arm/kernel/setup.c10
-rw-r--r--arch/arm/lib/Makefile6
-rw-r--r--arch/arm/lib/crc-t10dif-core.S (renamed from arch/arm/crypto/crct10dif-ce-core.S)0
-rw-r--r--arch/arm/lib/crc-t10dif-glue.c80
-rw-r--r--arch/arm/lib/crc32-core.S (renamed from arch/arm/crypto/crc32-ce-core.S)5
-rw-r--r--arch/arm/lib/crc32-glue.c123
-rw-r--r--arch/arm/mach-at91/Kconfig11
-rw-r--r--arch/arm/mach-at91/pm.c31
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c2
-rw-r--r--arch/arm/mach-omap2/powerdomain.c2
-rw-r--r--arch/arm/mach-pxa/sharpsl_pm.c8
-rw-r--r--arch/arm/mach-rpc/ecard.c2
-rw-r--r--arch/arm/mm/mmu.c17
-rw-r--r--arch/arm/mm/nommu.c5
-rw-r--r--arch/arm/mm/pgd.c16
-rw-r--r--arch/arm/vfp/vfpinstr.h22
-rw-r--r--arch/arm/vfp/vfpmodule.c2
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/Kconfig.platforms5
-rw-r--r--arch/arm64/boot/dts/Makefile1
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a100.dtsi33
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h313-tanix-tx1.dts1
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10_swvp.dts1
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p231.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-q201.dts3
-rw-r--r--arch/arm64/boot/dts/blaize/Makefile2
-rw-r--r--arch/arm64/boot/dts/blaize/blaize-blzp1600-cb2.dts83
-rw-r--r--arch/arm64/boot/dts/blaize/blaize-blzp1600-som.dtsi23
-rw-r--r--arch/arm64/boot/dts/blaize/blaize-blzp1600.dtsi205
-rw-r--r--arch/arm64/boot/dts/broadcom/Makefile1
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm2712-d-rpi-5-b.dts37
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm2712-rpi-5-b.dts42
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm2712.dtsi193
-rw-r--r--arch/arm64/boot/dts/broadcom/bcmbca/Makefile1
-rw-r--r--arch/arm64/boot/dts/broadcom/bcmbca/bcm4906-netgear-r8000p.dts12
-rw-r--r--arch/arm64/boot/dts/broadcom/bcmbca/bcm4906-zyxel-ex3510b.dts196
-rw-r--r--arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi18
-rw-r--r--arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts2
-rw-r--r--arch/arm64/boot/dts/broadcom/northstar2/ns2-xmc.dts1
-rw-r--r--arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi2
-rw-r--r--arch/arm64/boot/dts/exynos/Makefile4
-rw-r--r--arch/arm64/boot/dts/exynos/exynos850-e850-96.dts15
-rw-r--r--arch/arm64/boot/dts/exynos/exynos8895.dtsi82
-rw-r--r--arch/arm64/boot/dts/exynos/exynos9810-pinctrl.dtsi503
-rw-r--r--arch/arm64/boot/dts/exynos/exynos9810-starlte.dts119
-rw-r--r--arch/arm64/boot/dts/exynos/exynos9810.dtsi273
-rw-r--r--arch/arm64/boot/dts/exynos/exynos990-r8s.dts115
-rw-r--r--arch/arm64/boot/dts/exynos/exynos990-x1s-common.dtsi98
-rw-r--r--arch/arm64/boot/dts/exynos/exynos990-x1s.dts28
-rw-r--r--arch/arm64/boot/dts/exynos/exynos990-x1slte.dts28
-rw-r--r--arch/arm64/boot/dts/exynos/exynos990.dtsi50
-rw-r--r--arch/arm64/boot/dts/exynos/exynosautov920.dtsi83
-rw-r--r--arch/arm64/boot/dts/exynos/google/gs101-oriole.dts104
-rw-r--r--arch/arm64/boot/dts/exynos/google/gs101.dtsi5
-rw-r--r--arch/arm64/boot/dts/freescale/Makefile13
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-phg.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-bsh-smm-s2-display.dtsi28
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-aristainetos3-adpismarc.dts37
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-aristainetos3-helios-lvds.dtso113
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-aristainetos3-helios.dts98
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-aristainetos3-proton2s.dts161
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-aristainetos3a-som-v1.dtsi1107
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-evk-imx-lvds-hdmi-common.dtsi29
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-evk-lvds0-imx-dlvds-hdmi-channel0.dtso44
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-evk-lvds0-imx-lvds-hdmi-common.dtsi43
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-evk-lvds0-imx-lvds-hdmi.dtso28
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-evk-lvds1-imx-dlvds-hdmi-channel0.dtso44
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-evk-lvds1-imx-lvds-hdmi-common.dtsi43
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-evk-lvds1-imx-lvds-hdmi.dtso28
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-evk.dts6
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-skov-revb-mi1010ait-1cp1.dts8
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts8
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-14x14-evk.dts92
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-9x9-qsb.dts14
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts8
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts8
-rw-r--r--arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts52
-rw-r--r--arch/arm64/boot/dts/freescale/imx95.dtsi93
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220.dtsi2
-rw-r--r--arch/arm64/boot/dts/intel/socfpga_agilex.dtsi3
-rw-r--r--arch/arm64/boot/dts/intel/socfpga_agilex5.dtsi24
-rw-r--r--arch/arm64/boot/dts/marvell/armada-7040-db.dts1
-rw-r--r--arch/arm64/boot/dts/marvell/armada-7040-mochabin.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts1
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-db.dts5
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi3
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-puzzle-m801.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp11x.dtsi2
-rw-r--r--arch/arm64/boot/dts/marvell/cn9130-crb-B.dts1
-rw-r--r--arch/arm64/boot/dts/marvell/cn9131-cf-solidwan.dts4
-rw-r--r--arch/arm64/boot/dts/marvell/cn9131-db.dtsi1
-rw-r--r--arch/arm64/boot/dts/marvell/cn9132-db.dtsi1
-rw-r--r--arch/arm64/boot/dts/mediatek/Makefile19
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712-evb.dts1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt6359.dtsi1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-sata.dtso34
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4-emmc.dtso33
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4-sd.dtso31
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4.dts398
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7988a.dtsi365
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi29
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-evb.dts25
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts15
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow.dtsi15
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi9
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183.dtsi5
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8186-corsola-chinchou-sku0.dts18
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8186-corsola-chinchou-sku1.dts35
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8186-corsola-chinchou-sku16.dts29
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8186-corsola-chinchou.dtsi321
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8186-corsola-starmie-sku0.dts31
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8186-corsola-starmie-sku1.dts31
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8186-corsola-starmie.dtsi472
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi8
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8186.dtsi8
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku0.dts32
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku1.dts59
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku2.dts59
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku3.dts32
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku4.dts48
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku5.dts72
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku6.dts72
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku7.dts48
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri.dtsi316
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8188-geralt.dtsi1156
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8188.dtsi9
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8192-asurada-hayato-r5-sku2.dts65
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8192-asurada-spherion-r4.dts78
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi3
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195-demo.dts10
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195.dtsi5
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8365-evk.dts1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8365.dtsi3
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8390-genio-700-evk.dts48
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8516.dtsi22
-rw-r--r--arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra234.dtsi8
-rw-r--r--arch/arm64/boot/dts/qcom/Makefile12
-rw-r--r--arch/arm64/boot/dts/qcom/ipq5332.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/ipq5424-rdp466.dts169
-rw-r--r--arch/arm64/boot/dts/qcom/ipq5424.dtsi519
-rw-r--r--arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi24
-rw-r--r--arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts113
-rw-r--r--arch/arm64/boot/dts/qcom/ipq9574.dtsi449
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-serranove.dts58
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8917-xiaomi-riva.dts333
-rw-r--r--arch/arm64/boot/dts/qcom/msm8917.dtsi1954
-rw-r--r--arch/arm64/boot/dts/qcom/msm8939.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts21
-rw-r--r--arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon.dtsi5
-rw-r--r--arch/arm64/boot/dts/qcom/msm8994.dtsi11
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi9
-rw-r--r--arch/arm64/boot/dts/qcom/pm660l.dtsi6
-rw-r--r--arch/arm64/boot/dts/qcom/pm8150.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/pm8937.dtsi150
-rw-r--r--arch/arm64/boot/dts/qcom/pmd8028.dtsi62
-rw-r--r--arch/arm64/boot/dts/qcom/pmi8950.dtsi17
-rw-r--r--arch/arm64/boot/dts/qcom/pmih0108.dtsi68
-rw-r--r--arch/arm64/boot/dts/qcom/pmk8350.dtsi72
-rw-r--r--arch/arm64/boot/dts/qcom/qcm6490-fairphone-fp5.dts101
-rw-r--r--arch/arm64/boot/dts/qcom/qcm6490-idp.dts8
-rw-r--r--arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/qcs404.dtsi6
-rw-r--r--arch/arm64/boot/dts/qcom/qcs615-ride.dts343
-rw-r--r--arch/arm64/boot/dts/qcom/qcs615.dtsi3670
-rw-r--r--arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts41
-rw-r--r--arch/arm64/boot/dts/qcom/qcs8300-ride.dts370
-rw-r--r--arch/arm64/boot/dts/qcom/qcs8300.dtsi3548
-rw-r--r--arch/arm64/boot/dts/qcom/qcs8550-aim300.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/qdu1000-idp.dts19
-rw-r--r--arch/arm64/boot/dts/qcom/qdu1000.dtsi16
-rw-r--r--arch/arm64/boot/dts/qcom/qrb4210-rb2.dts61
-rw-r--r--arch/arm64/boot/dts/qcom/qrb5165-rb5.dts5
-rw-r--r--arch/arm64/boot/dts/qcom/qru1000-idp.dts19
-rw-r--r--arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi82
-rw-r--r--arch/arm64/boot/dts/qcom/sa8775p.dtsi402
-rw-r--r--arch/arm64/boot/dts/qcom/sar2130p-qar2130p.dts558
-rw-r--r--arch/arm64/boot/dts/qcom/sar2130p.dtsi3123
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-quackingstick.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180.dtsi20
-rw-r--r--arch/arm64/boot/dts/qcom/sc7280.dtsi6
-rw-r--r--arch/arm64/boot/dts/qcom/sc8180x-lenovo-flex-5g.dts4
-rw-r--r--arch/arm64/boot/dts/qcom/sc8180x-primus.dts4
-rw-r--r--arch/arm64/boot/dts/qcom/sc8180x.dtsi6
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp-huawei-gaokun3.dts1318
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp-microsoft-blackrock.dts1325
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp.dtsi52
-rw-r--r--arch/arm64/boot/dts/qcom/sdm450-lenovo-tbx605f.dts97
-rw-r--r--arch/arm64/boot/dts/qcom/sdm630.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sdm670-google-sargo.dts37
-rw-r--r--arch/arm64/boot/dts/qcom/sdm670.dtsi204
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-db845c-navigation-mezzanine.dtso42
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts1
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845.dtsi24
-rw-r--r--arch/arm64/boot/dts/qcom/sdx75.dtsi6
-rw-r--r--arch/arm64/boot/dts/qcom/sm4250.dtsi39
-rw-r--r--arch/arm64/boot/dts/qcom/sm4450.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sm6115.dtsi95
-rw-r--r--arch/arm64/boot/dts/qcom/sm6125.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sm6350.dtsi8
-rw-r--r--arch/arm64/boot/dts/qcom/sm6375.dtsi12
-rw-r--r--arch/arm64/boot/dts/qcom/sm7225-fairphone-fp4.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150-hdk.dts5
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts9
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150-mtp.dts5
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi5
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250-hdk.dts5
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250-mtp.dts5
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi5
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-common.dtsi120
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250-xiaomi-pipa.dts5
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250.dtsi34
-rw-r--r--arch/arm64/boot/dts/qcom/sm8350-hdk.dts7
-rw-r--r--arch/arm64/boot/dts/qcom/sm8350.dtsi498
-rw-r--r--arch/arm64/boot/dts/qcom/sm8450.dtsi946
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550-hdk.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550-mtp.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550-qrd.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550-samsung-q5q.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550-sony-xperia-yodo-pdx234.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550.dtsi296
-rw-r--r--arch/arm64/boot/dts/qcom/sm8650-hdk.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sm8650-mtp.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sm8650-qrd.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sm8650.dtsi504
-rw-r--r--arch/arm64/boot/dts/qcom/sm8750-mtp.dts794
-rw-r--r--arch/arm64/boot/dts/qcom/sm8750-pmics.dtsi188
-rw-r--r--arch/arm64/boot/dts/qcom/sm8750-qrd.dts792
-rw-r--r--arch/arm64/boot/dts/qcom/sm8750.dtsi2907
-rw-r--r--arch/arm64/boot/dts/qcom/x1e001de-devkit.dts1371
-rw-r--r--arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts320
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts60
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-crd.dts14
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-dell-xps13-9345.dts305
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-hp-omnibook-x14.dts1693
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts52
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi527
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100-qcp.dts298
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100.dtsi2326
-rw-r--r--arch/arm64/boot/dts/renesas/Makefile12
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779a0-falcon-ethernet.dtsi242
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779a0.dtsi10
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779g0.dtsi40
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779g2-white-hawk-single.dts62
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779g3-white-hawk-single.dts16
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779g3.dtsi12
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts298
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779h0.dtsi73
-rw-r--r--arch/arm64/boot/dts/renesas/r9a08g045.dtsi237
-rw-r--r--arch/arm64/boot/dts/renesas/r9a09g047.dtsi387
-rw-r--r--arch/arm64/boot/dts/renesas/r9a09g047e37.dtsi18
-rw-r--r--arch/arm64/boot/dts/renesas/r9a09g047e57-smarc.dts31
-rw-r--r--arch/arm64/boot/dts/renesas/r9a09g047e57.dtsi13
-rw-r--r--arch/arm64/boot/dts/renesas/r9a09g057h44-rzv2h-evk.dts36
-rw-r--r--arch/arm64/boot/dts/renesas/renesas-smarc2.dtsi24
-rw-r--r--arch/arm64/boot/dts/renesas/rzg3e-smarc-som.dtsi28
-rw-r--r--arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi56
-rw-r--r--arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi83
-rw-r--r--arch/arm64/boot/dts/renesas/ulcb-kf.dtsi18
-rw-r--r--arch/arm64/boot/dts/renesas/ulcb.dtsi5
-rw-r--r--arch/arm64/boot/dts/renesas/white-hawk-ard-audio-da7212.dtso (renamed from arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-ard-audio-da7212.dtso)6
-rw-r--r--arch/arm64/boot/dts/renesas/white-hawk-csi-dsi.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/white-hawk-single.dtsi73
-rw-r--r--arch/arm64/boot/dts/rockchip/Makefile7
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3308-rock-s0.dts25
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-a1.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock64.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-bigtreetech-cb2-manta.dts10
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-bigtreetech-cb2.dtsi904
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-bigtreetech-pi2.dts10
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-mecsbc.dts19
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-qnap-ts433.dts61
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5.dts10
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3576-evb1-v10.dts731
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3576.dtsi169
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3582-radxa-e52c.dts743
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-base.dtsi3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-io.dtsi81
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts82
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-firefly-core-3588j.dtsi443
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-firefly-itx-3588j.dts702
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-h96-max-v58.dts802
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-compact.dtsi151
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-max.dts60
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts894
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-orangepi-5.dtsi805
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-nanopi-r6.dtsi18
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts2
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts2
-rw-r--r--arch/arm64/boot/dts/sprd/sc2731.dtsi6
-rw-r--r--arch/arm64/boot/dts/sprd/sc9863a.dtsi14
-rw-r--r--arch/arm64/boot/dts/sprd/sp9860g-1h10.dts9
-rw-r--r--arch/arm64/boot/dts/st/stm32mp251.dtsi234
-rw-r--r--arch/arm64/boot/dts/st/stm32mp257f-ev1.dts97
-rw-r--r--arch/arm64/boot/dts/ti/Makefile19
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-main.dtsi1
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-phycore-som.dtsi11
-rw-r--r--arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts2
-rw-r--r--arch/arm64/boot/dts/ti/k3-am625-sk.dts7
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62a-main.dtsi1
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62a-wakeup.dtsi36
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi5
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p5-sk.dts4
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62x-phyboard-lyra.dtsi24
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi6
-rw-r--r--arch/arm64/boot/dts/ti/k3-am64-main.dtsi22
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-pcie.dts (renamed from arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-pcie.dtso)14
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-usb3.dts (renamed from arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-usb3.dtso)13
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts6
-rw-r--r--arch/arm64/boot/dts/ti/k3-am67a-beagley-ai.dts158
-rw-r--r--arch/arm64/boot/dts/ti/k3-am68-sk-base-board-pcie1-ep.dtso53
-rw-r--r--arch/arm64/boot/dts/ti/k3-am69-sk-pcie0-ep.dtso53
-rw-r--r--arch/arm64/boot/dts/ti/k3-am69-sk.dts41
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts4
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi7
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-evm-pcie1-ep.dtso53
-rw-r--r--arch/arm64/boot/dts/ti/k3-j722s-evm.dts102
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4-j742s2-evm-common.dtsi8
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi22
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4-j742s2-mcu-wakeup-common.dtsi12
-rw-r--r--arch/arm64/boot/dts/ti/k3-pinctrl.h19
-rw-r--r--arch/arm64/configs/defconfig33
-rw-r--r--arch/arm64/crypto/Kconfig10
-rw-r--r--arch/arm64/crypto/Makefile3
-rw-r--r--arch/arm64/crypto/crct10dif-ce-glue.c132
-rw-r--r--arch/arm64/hyperv/hv_core.c3
-rw-r--r--arch/arm64/hyperv/mshyperv.c4
-rw-r--r--arch/arm64/include/asm/cputype.h2
-rw-r--r--arch/arm64/include/asm/hyperv-tlfs.h71
-rw-r--r--arch/arm64/include/asm/kvm_arm.h2
-rw-r--r--arch/arm64/include/asm/kvm_asm.h14
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h65
-rw-r--r--arch/arm64/include/asm/kvm_host.h136
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h18
-rw-r--r--arch/arm64/include/asm/kvm_nested.h3
-rw-r--r--arch/arm64/include/asm/kvm_pgtable.h38
-rw-r--r--arch/arm64/include/asm/kvm_pkvm.h51
-rw-r--r--arch/arm64/include/asm/memory.h5
-rw-r--r--arch/arm64/include/asm/mshyperv.h7
-rw-r--r--arch/arm64/include/asm/pgalloc.h18
-rw-r--r--arch/arm64/include/asm/stacktrace/nvhe.h2
-rw-r--r--arch/arm64/include/asm/sysreg.h16
-rw-r--r--arch/arm64/include/asm/tlb.h21
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h3
-rw-r--r--arch/arm64/kernel/cpu_errata.c8
-rw-r--r--arch/arm64/kernel/fpsimd.c4
-rw-r--r--arch/arm64/kernel/image-vars.h3
-rw-r--r--arch/arm64/kernel/process.c2
-rw-r--r--arch/arm64/kernel/setup.c4
-rw-r--r--arch/arm64/kvm/arch_timer.c188
-rw-r--r--arch/arm64/kvm/arm.c106
-rw-r--r--arch/arm64/kvm/debug.c416
-rw-r--r--arch/arm64/kvm/emulate-nested.c81
-rw-r--r--arch/arm64/kvm/fpsimd.c12
-rw-r--r--arch/arm64/kvm/guest.c31
-rw-r--r--arch/arm64/kvm/handle_exit.c5
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/debug-sr.h42
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h39
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h43
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/fixed_config.h223
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/gfp.h6
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/mem_protect.h39
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/memory.h50
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/pkvm.h23
-rw-r--r--arch/arm64/kvm/hyp/nvhe/debug-sr.c72
-rw-r--r--arch/arm64/kvm/hyp/nvhe/host.S4
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c233
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c884
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mm.c12
-rw-r--r--arch/arm64/kvm/hyp/nvhe/page_alloc.c14
-rw-r--r--arch/arm64/kvm/hyp/nvhe/pkvm.c410
-rw-r--r--arch/arm64/kvm/hyp/nvhe/setup.c8
-rw-r--r--arch/arm64/kvm/hyp/nvhe/stacktrace.c4
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c52
-rw-r--r--arch/arm64/kvm/hyp/nvhe/sys_regs.c404
-rw-r--r--arch/arm64/kvm/hyp/nvhe/timer-sr.c16
-rw-r--r--arch/arm64/kvm/hyp/pgtable.c13
-rw-r--r--arch/arm64/kvm/hyp/vhe/debug-sr.c5
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c107
-rw-r--r--arch/arm64/kvm/hyp/vhe/sysreg-sr.c4
-rw-r--r--arch/arm64/kvm/mmu.c108
-rw-r--r--arch/arm64/kvm/nested.c47
-rw-r--r--arch/arm64/kvm/pkvm.c201
-rw-r--r--arch/arm64/kvm/reset.c6
-rw-r--r--arch/arm64/kvm/stacktrace.c9
-rw-r--r--arch/arm64/kvm/sys_regs.c421
-rw-r--r--arch/arm64/kvm/trace_handle_exit.h75
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3.c11
-rw-r--r--arch/arm64/lib/Makefile6
-rw-r--r--arch/arm64/lib/crc-t10dif-core.S (renamed from arch/arm64/crypto/crct10dif-ce-core.S)0
-rw-r--r--arch/arm64/lib/crc-t10dif-glue.c81
-rw-r--r--arch/arm64/lib/crc32-glue.c25
-rw-r--r--arch/arm64/mm/pgd.c4
-rw-r--r--arch/arm64/net/bpf_jit_comp.c48
-rw-r--r--arch/arm64/tools/cpucaps1
-rw-r--r--arch/arm64/tools/sysreg32
-rw-r--r--arch/csky/include/asm/pgalloc.h4
-rw-r--r--arch/hexagon/include/asm/cmpxchg.h2
-rw-r--r--arch/hexagon/include/asm/pgalloc.h4
-rw-r--r--arch/hexagon/include/asm/setup.h20
-rw-r--r--arch/hexagon/include/uapi/asm/setup.h14
-rw-r--r--arch/hexagon/kernel/time.c3
-rw-r--r--arch/hexagon/kernel/traps.c6
-rw-r--r--arch/loongarch/Kbuild1
-rw-r--r--arch/loongarch/Kconfig4
-rw-r--r--arch/loongarch/boot/dts/Makefile2
-rw-r--r--arch/loongarch/configs/loongson3_defconfig7
-rw-r--r--arch/loongarch/crypto/Kconfig9
-rw-r--r--arch/loongarch/crypto/Makefile2
-rw-r--r--arch/loongarch/crypto/crc32-loongarch.c300
-rw-r--r--arch/loongarch/include/asm/cpu-info.h1
-rw-r--r--arch/loongarch/include/asm/hw_breakpoint.h4
-rw-r--r--arch/loongarch/include/asm/kvm_host.h1
-rw-r--r--arch/loongarch/include/asm/kvm_para.h3
-rw-r--r--arch/loongarch/include/asm/kvm_vcpu.h1
-rw-r--r--arch/loongarch/include/asm/loongarch.h76
-rw-r--r--arch/loongarch/include/asm/pgalloc.h2
-rw-r--r--arch/loongarch/include/asm/pgtable-bits.h7
-rw-r--r--arch/loongarch/include/uapi/asm/kvm_para.h1
-rw-r--r--arch/loongarch/include/uapi/asm/ptrace.h10
-rw-r--r--arch/loongarch/kernel/Makefile2
-rw-r--r--arch/loongarch/kernel/cacheinfo.c6
-rw-r--r--arch/loongarch/kernel/cpu-probe.c1
-rw-r--r--arch/loongarch/kernel/hw_breakpoint.c16
-rw-r--r--arch/loongarch/kernel/kdebugfs.c168
-rw-r--r--arch/loongarch/kernel/ptrace.c6
-rw-r--r--arch/loongarch/kernel/setup.c2
-rw-r--r--arch/loongarch/kernel/switch.S2
-rw-r--r--arch/loongarch/kernel/time.c2
-rw-r--r--arch/loongarch/kernel/traps.c13
-rw-r--r--arch/loongarch/kernel/unaligned.c8
-rw-r--r--arch/loongarch/kvm/exit.c30
-rw-r--r--arch/loongarch/kvm/main.c18
-rw-r--r--arch/loongarch/kvm/vcpu.c7
-rw-r--r--arch/loongarch/lib/Makefile2
-rw-r--r--arch/loongarch/lib/crc32-loongarch.c135
-rw-r--r--arch/loongarch/mm/init.c13
-rw-r--r--arch/loongarch/mm/pgtable.c7
-rw-r--r--arch/loongarch/power/platform.c2
-rw-r--r--arch/loongarch/power/suspend_asm.S10
-rw-r--r--arch/m68k/coldfire/m5441x.c20
-rw-r--r--arch/m68k/configs/amiga_defconfig3
-rw-r--r--arch/m68k/configs/apollo_defconfig3
-rw-r--r--arch/m68k/configs/atari_defconfig3
-rw-r--r--arch/m68k/configs/bvme6000_defconfig3
-rw-r--r--arch/m68k/configs/hp300_defconfig3
-rw-r--r--arch/m68k/configs/mac_defconfig3
-rw-r--r--arch/m68k/configs/multi_defconfig3
-rw-r--r--arch/m68k/configs/mvme147_defconfig3
-rw-r--r--arch/m68k/configs/mvme16x_defconfig3
-rw-r--r--arch/m68k/configs/q40_defconfig3
-rw-r--r--arch/m68k/configs/sun3_defconfig3
-rw-r--r--arch/m68k/configs/sun3x_defconfig3
-rw-r--r--arch/m68k/include/asm/mcf_pgalloc.h7
-rw-r--r--arch/m68k/include/asm/motorola_pgalloc.h6
-rw-r--r--arch/m68k/include/asm/sun3_pgalloc.h4
-rw-r--r--arch/m68k/mm/init.c5
-rw-r--r--arch/m68k/mm/mcfmmu.c10
-rw-r--r--arch/m68k/mm/motorola.c26
-rw-r--r--arch/m68k/mm/sun3mmu.c10
-rw-r--r--arch/m68k/sun3/sun3dvma.c6
-rw-r--r--arch/microblaze/include/asm/pgalloc.h7
-rw-r--r--arch/mips/Kconfig8
-rw-r--r--arch/mips/Makefile3
-rw-r--r--arch/mips/boot/dts/Makefile2
-rw-r--r--arch/mips/boot/dts/brcm/Makefile2
-rw-r--r--arch/mips/boot/dts/cavium-octeon/Makefile2
-rw-r--r--arch/mips/boot/dts/ingenic/Makefile2
-rw-r--r--arch/mips/boot/dts/lantiq/Makefile2
-rw-r--r--arch/mips/boot/dts/loongson/Makefile2
-rw-r--r--arch/mips/boot/dts/mobileye/eyeq5.dtsi22
-rw-r--r--arch/mips/boot/dts/mscc/Makefile3
-rw-r--r--arch/mips/boot/dts/mti/Makefile2
-rw-r--r--arch/mips/boot/dts/pic32/Makefile2
-rw-r--r--arch/mips/boot/dts/ralink/Makefile2
-rw-r--r--arch/mips/configs/bigsur_defconfig1
-rw-r--r--arch/mips/configs/decstation_64_defconfig2
-rw-r--r--arch/mips/configs/decstation_defconfig2
-rw-r--r--arch/mips/configs/decstation_r4k_defconfig2
-rw-r--r--arch/mips/configs/eyeq5_defconfig1
-rw-r--r--arch/mips/configs/eyeq6_defconfig1
-rw-r--r--arch/mips/configs/generic/32r6.config2
-rw-r--r--arch/mips/configs/generic/64r6.config1
-rw-r--r--arch/mips/configs/ip27_defconfig1
-rw-r--r--arch/mips/configs/ip30_defconfig1
-rw-r--r--arch/mips/crypto/Kconfig9
-rw-r--r--arch/mips/crypto/Makefile2
-rw-r--r--arch/mips/crypto/crc32-mips.c354
-rw-r--r--arch/mips/include/asm/mach-loongson64/boot_param.h8
-rw-r--r--arch/mips/include/asm/mipsregs.h4
-rw-r--r--arch/mips/include/asm/pgalloc.h8
-rw-r--r--arch/mips/kernel/cevt-bcm1480.c2
-rw-r--r--arch/mips/kernel/ftrace.c2
-rw-r--r--arch/mips/kernel/head.S1
-rw-r--r--arch/mips/kernel/mips-mt.c7
-rw-r--r--arch/mips/kernel/setup.c5
-rw-r--r--arch/mips/kernel/spram.c4
-rw-r--r--arch/mips/kernel/traps.c47
-rw-r--r--arch/mips/kernel/vdso.c10
-rw-r--r--arch/mips/lib/Makefile2
-rw-r--r--arch/mips/lib/crc32-mips.c192
-rw-r--r--arch/mips/loongson64/boardinfo.c2
-rw-r--r--arch/mips/loongson64/env.c3
-rw-r--r--arch/mips/math-emu/cp1emu.c2
-rw-r--r--arch/mips/mm/pgtable.c8
-rw-r--r--arch/mips/pci/pci-legacy.c8
-rw-r--r--arch/nios2/include/asm/pgalloc.h2
-rw-r--r--arch/nios2/mm/pgtable.c3
-rw-r--r--arch/openrisc/Kbuild1
-rw-r--r--arch/openrisc/Kconfig5
-rw-r--r--arch/openrisc/boot/dts/Makefile2
-rw-r--r--arch/openrisc/configs/or1klitex_defconfig2
-rw-r--r--arch/openrisc/configs/or1ksim_defconfig2
-rw-r--r--arch/openrisc/configs/simple_smp_defconfig2
-rw-r--r--arch/openrisc/include/asm/pgalloc.h8
-rw-r--r--arch/openrisc/include/asm/ptrace.h73
-rw-r--r--arch/openrisc/kernel/entry.S4
-rw-r--r--arch/openrisc/kernel/ptrace.c96
-rw-r--r--arch/openrisc/kernel/signal.c2
-rw-r--r--arch/openrisc/mm/ioremap.c5
-rw-r--r--arch/parisc/Kconfig4
-rw-r--r--arch/parisc/include/asm/io.h4
-rw-r--r--arch/parisc/include/asm/pgalloc.h39
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c2
-rw-r--r--arch/parisc/kernel/vdso32/Makefile2
-rw-r--r--arch/parisc/kernel/vdso64/Makefile2
-rw-r--r--arch/parisc/lib/io.c47
-rw-r--r--arch/parisc/mm/init.c20
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/configs/powernv_defconfig2
-rw-r--r--arch/powerpc/configs/ppc64_defconfig3
-rw-r--r--arch/powerpc/crypto/Kconfig33
-rw-r--r--arch/powerpc/crypto/Makefile5
-rw-r--r--arch/powerpc/crypto/aes-gcm-p10-glue.c9
-rw-r--r--arch/powerpc/crypto/crc-vpmsum_test.c133
-rw-r--r--arch/powerpc/crypto/crc32c-vpmsum_glue.c173
-rw-r--r--arch/powerpc/include/asm/tlb.h1
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c10
-rw-r--r--arch/powerpc/kernel/idle.c2
-rw-r--r--arch/powerpc/kernel/module_64.c24
-rw-r--r--arch/powerpc/kernel/pci_32.c5
-rw-r--r--arch/powerpc/kernel/setup-common.c5
-rw-r--r--arch/powerpc/kernel/setup_32.c8
-rw-r--r--arch/powerpc/kvm/book3s_hv.c2
-rw-r--r--arch/powerpc/lib/Makefile6
-rw-r--r--arch/powerpc/lib/crc-t10dif-glue.c (renamed from arch/powerpc/crypto/crct10dif-vpmsum_glue.c)69
-rw-r--r--arch/powerpc/lib/crc32-glue.c92
-rw-r--r--arch/powerpc/lib/crc32-vpmsum_core.S (renamed from arch/powerpc/crypto/crc32-vpmsum_core.S)0
-rw-r--r--arch/powerpc/lib/crc32c-vpmsum_asm.S (renamed from arch/powerpc/crypto/crc32c-vpmsum_asm.S)0
-rw-r--r--arch/powerpc/lib/crct10dif-vpmsum_asm.S (renamed from arch/powerpc/crypto/crct10dif-vpmsum_asm.S)0
-rw-r--r--arch/powerpc/mm/book3s32/mmu.c5
-rw-r--r--arch/powerpc/mm/book3s64/mmu_context.c2
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c8
-rw-r--r--arch/powerpc/mm/kasan/init_book3e_64.c8
-rw-r--r--arch/powerpc/mm/kasan/init_book3s_64.c2
-rw-r--r--arch/powerpc/mm/nohash/mmu_context.c16
-rw-r--r--arch/powerpc/mm/pgtable-frag.c4
-rw-r--r--arch/powerpc/mm/pgtable_32.c7
-rw-r--r--arch/powerpc/platforms/powermac/nvram.c5
-rw-r--r--arch/powerpc/platforms/powernv/memtrace.c31
-rw-r--r--arch/powerpc/platforms/powernv/opal.c7
-rw-r--r--arch/powerpc/platforms/ps3/setup.c5
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c6
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c3
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c2
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c2
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c2
-rw-r--r--arch/powerpc/sysdev/msi_bitmap.c5
-rw-r--r--arch/riscv/Kconfig1
-rw-r--r--arch/riscv/Kconfig.errata11
-rw-r--r--arch/riscv/Kconfig.socs5
-rw-r--r--arch/riscv/Kconfig.vendor26
-rw-r--r--arch/riscv/Makefile.postlink8
-rw-r--r--arch/riscv/boot/dts/Makefile1
-rw-r--r--arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi3
-rw-r--r--arch/riscv/boot/dts/spacemit/Makefile2
-rw-r--r--arch/riscv/boot/dts/spacemit/k1-bananapi-f3.dts26
-rw-r--r--arch/riscv/boot/dts/spacemit/k1-pinctrl.dtsi20
-rw-r--r--arch/riscv/boot/dts/spacemit/k1.dtsi452
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110-milkv-mars.dts18
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110-pine64-star64.dts18
-rw-r--r--arch/riscv/boot/dts/thead/th1520.dtsi16
-rw-r--r--arch/riscv/configs/defconfig4
-rw-r--r--arch/riscv/errata/thead/errata.c28
-rw-r--r--arch/riscv/include/asm/bitops.h20
-rw-r--r--arch/riscv/include/asm/bugs.h22
-rw-r--r--arch/riscv/include/asm/cpufeature.h2
-rw-r--r--arch/riscv/include/asm/csr.h15
-rw-r--r--arch/riscv/include/asm/errata_list.h3
-rw-r--r--arch/riscv/include/asm/futex.h2
-rw-r--r--arch/riscv/include/asm/hwprobe.h5
-rw-r--r--arch/riscv/include/asm/kvm_host.h5
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_sbi.h1
-rw-r--r--arch/riscv/include/asm/pgalloc.h72
-rw-r--r--arch/riscv/include/asm/switch_to.h2
-rw-r--r--arch/riscv/include/asm/tlb.h18
-rw-r--r--arch/riscv/include/asm/vector.h222
-rw-r--r--arch/riscv/include/asm/vendor_extensions/thead.h47
-rw-r--r--arch/riscv/include/asm/vendor_extensions/thead_hwprobe.h19
-rw-r--r--arch/riscv/include/asm/vendor_extensions/vendor_hwprobe.h37
-rw-r--r--arch/riscv/include/asm/vendorid_list.h1
-rw-r--r--arch/riscv/include/uapi/asm/hwprobe.h3
-rw-r--r--arch/riscv/include/uapi/asm/kvm.h7
-rw-r--r--arch/riscv/include/uapi/asm/vendor/thead.h3
-rw-r--r--arch/riscv/kernel/Makefile2
-rw-r--r--arch/riscv/kernel/bugs.c60
-rw-r--r--arch/riscv/kernel/cpufeature.c59
-rw-r--r--arch/riscv/kernel/kernel_mode_vector.c8
-rw-r--r--arch/riscv/kernel/process.c6
-rw-r--r--arch/riscv/kernel/setup.c4
-rw-r--r--arch/riscv/kernel/signal.c6
-rw-r--r--arch/riscv/kernel/smp.c1
-rw-r--r--arch/riscv/kernel/sys_hwprobe.c5
-rw-r--r--arch/riscv/kernel/vector.c28
-rw-r--r--arch/riscv/kernel/vendor_extensions.c10
-rw-r--r--arch/riscv/kernel/vendor_extensions/Makefile2
-rw-r--r--arch/riscv/kernel/vendor_extensions/thead.c29
-rw-r--r--arch/riscv/kernel/vendor_extensions/thead_hwprobe.c19
-rw-r--r--arch/riscv/kvm/Makefile1
-rw-r--r--arch/riscv/kvm/vcpu.c7
-rw-r--r--arch/riscv/kvm/vcpu_exit.c37
-rw-r--r--arch/riscv/kvm/vcpu_onereg.c6
-rw-r--r--arch/riscv/kvm/vcpu_sbi.c4
-rw-r--r--arch/riscv/kvm/vcpu_sbi_system.c73
-rw-r--r--arch/riscv/lib/Makefile3
-rw-r--r--arch/riscv/lib/crc32-riscv.c (renamed from arch/riscv/lib/crc32.c)25
-rw-r--r--arch/riscv/mm/fault.c52
-rw-r--r--arch/riscv/mm/init.c12
-rw-r--r--arch/riscv/mm/kasan_init.c14
-rw-r--r--arch/s390/Kconfig11
-rw-r--r--arch/s390/Makefile2
-rw-r--r--arch/s390/Makefile.postlink6
-rw-r--r--arch/s390/appldata/appldata_base.c2
-rw-r--r--arch/s390/boot/als.c10
-rw-r--r--arch/s390/boot/boot.h26
-rw-r--r--arch/s390/boot/decompressor.c12
-rw-r--r--arch/s390/boot/ipl_parm.c20
-rw-r--r--arch/s390/boot/ipl_report.c3
-rw-r--r--arch/s390/boot/kaslr.c4
-rw-r--r--arch/s390/boot/pgm_check_info.c53
-rw-r--r--arch/s390/boot/physmem_info.c103
-rw-r--r--arch/s390/boot/printk.c224
-rw-r--r--arch/s390/boot/startup.c47
-rw-r--r--arch/s390/boot/vmem.c135
-rw-r--r--arch/s390/configs/debug_defconfig4
-rw-r--r--arch/s390/configs/defconfig3
-rw-r--r--arch/s390/crypto/Kconfig12
-rw-r--r--arch/s390/crypto/Makefile2
-rw-r--r--arch/s390/crypto/crc32-vx.c306
-rw-r--r--arch/s390/include/asm/asm-extable.h14
-rw-r--r--arch/s390/include/asm/asm.h2
-rw-r--r--arch/s390/include/asm/bitops.h2
-rw-r--r--arch/s390/include/asm/boot_data.h51
-rw-r--r--arch/s390/include/asm/fpu-insn.h183
-rw-r--r--arch/s390/include/asm/ftrace.h1
-rw-r--r--arch/s390/include/asm/futex.h107
-rw-r--r--arch/s390/include/asm/gmap.h20
-rw-r--r--arch/s390/include/asm/kvm_host.h6
-rw-r--r--arch/s390/include/asm/page.h4
-rw-r--r--arch/s390/include/asm/pgalloc.h40
-rw-r--r--arch/s390/include/asm/pgtable.h21
-rw-r--r--arch/s390/include/asm/physmem_info.h4
-rw-r--r--arch/s390/include/asm/sclp.h1
-rw-r--r--arch/s390/include/asm/tlb.h10
-rw-r--r--arch/s390/include/asm/uaccess.h548
-rw-r--r--arch/s390/include/asm/uv.h6
-rw-r--r--arch/s390/kernel/crash_dump.c43
-rw-r--r--arch/s390/kernel/debug.c2
-rw-r--r--arch/s390/kernel/early.c3
-rw-r--r--arch/s390/kernel/ftrace.c5
-rw-r--r--arch/s390/kernel/hiperdispatch.c2
-rw-r--r--arch/s390/kernel/lgr.c2
-rw-r--r--arch/s390/kernel/numa.c8
-rw-r--r--arch/s390/kernel/setup.c57
-rw-r--r--arch/s390/kernel/smp.c11
-rw-r--r--arch/s390/kernel/time.c4
-rw-r--r--arch/s390/kernel/topology.c14
-rw-r--r--arch/s390/kernel/uv.c292
-rw-r--r--arch/s390/kernel/vmlinux.lds.S1
-rw-r--r--arch/s390/kvm/Makefile2
-rw-r--r--arch/s390/kvm/gaccess.c44
-rw-r--r--arch/s390/kvm/gmap-vsie.c142
-rw-r--r--arch/s390/kvm/gmap.c212
-rw-r--r--arch/s390/kvm/gmap.h39
-rw-r--r--arch/s390/kvm/intercept.c7
-rw-r--r--arch/s390/kvm/interrupt.c19
-rw-r--r--arch/s390/kvm/kvm-s390.c237
-rw-r--r--arch/s390/kvm/kvm-s390.h19
-rw-r--r--arch/s390/kvm/pv.c21
-rw-r--r--arch/s390/kvm/vsie.c106
-rw-r--r--arch/s390/lib/Makefile3
-rw-r--r--arch/s390/lib/crc32-glue.c92
-rw-r--r--arch/s390/lib/crc32-vx.h (renamed from arch/s390/crypto/crc32-vx.h)0
-rw-r--r--arch/s390/lib/crc32be-vx.c (renamed from arch/s390/crypto/crc32be-vx.c)0
-rw-r--r--arch/s390/lib/crc32le-vx.c (renamed from arch/s390/crypto/crc32le-vx.c)0
-rw-r--r--arch/s390/lib/uaccess.c90
-rw-r--r--arch/s390/mm/cmm.c4
-rw-r--r--arch/s390/mm/extable.c30
-rw-r--r--arch/s390/mm/gmap.c681
-rw-r--r--arch/s390/mm/pgalloc.c30
-rw-r--r--arch/s390/mm/vmem.c2
-rw-r--r--arch/s390/pci/pci_bus.c1
-rw-r--r--arch/s390/purgatory/Makefile2
-rw-r--r--arch/s390/tools/gen_opcode_table.c27
-rw-r--r--arch/sh/Kbuild1
-rw-r--r--arch/sh/Kconfig7
-rw-r--r--arch/sh/boards/Kconfig4
-rw-r--r--arch/sh/boot/dts/Makefile2
-rw-r--r--arch/sh/include/asm/pgalloc.h2
-rw-r--r--arch/sh/kernel/irq.c4
-rw-r--r--arch/sh/kernel/setup.c4
-rw-r--r--arch/sh/mm/init.c10
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/crypto/Kconfig10
-rw-r--r--arch/sparc/crypto/Makefile4
-rw-r--r--arch/sparc/crypto/crc32c_glue.c184
-rw-r--r--arch/sparc/include/asm/tlb_64.h1
-rw-r--r--arch/sparc/kernel/irq_32.c12
-rw-r--r--arch/sparc/kernel/irq_64.c11
-rw-r--r--arch/sparc/kernel/pci.c2
-rw-r--r--arch/sparc/kernel/pci_common.c2
-rw-r--r--arch/sparc/kernel/prom_32.c4
-rw-r--r--arch/sparc/kernel/vio.c6
-rw-r--r--arch/sparc/lib/Makefile2
-rw-r--r--arch/sparc/lib/crc32_glue.c93
-rw-r--r--arch/sparc/lib/crc32c_asm.S (renamed from arch/sparc/crypto/crc32c_asm.S)2
-rw-r--r--arch/sparc/mm/init_64.c2
-rw-r--r--arch/sparc/mm/srmmu.c16
-rw-r--r--arch/um/drivers/net_kern.c5
-rw-r--r--arch/um/drivers/rtc_kern.c7
-rw-r--r--arch/um/drivers/vector_kern.c5
-rw-r--r--arch/um/include/asm/fixmap.h56
-rw-r--r--arch/um/include/asm/pgalloc.h6
-rw-r--r--arch/um/include/asm/pgtable.h7
-rw-r--r--arch/um/kernel/load_file.c4
-rw-r--r--arch/um/kernel/mem.c22
-rw-r--r--arch/um/kernel/process.c8
-rw-r--r--arch/um/kernel/um_arch.c12
-rw-r--r--arch/um/os-Linux/main.c8
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/Makefile.postlink6
-rw-r--r--arch/x86/boot/compressed/Makefile1
-rw-r--r--arch/x86/coco/sev/Makefile7
-rw-r--r--arch/x86/coco/sev/core.c19
-rw-r--r--arch/x86/coco/sev/shared.c16
-rw-r--r--arch/x86/coco/tdx/Makefile2
-rw-r--r--arch/x86/coco/tdx/debug.c69
-rw-r--r--arch/x86/coco/tdx/tdx.c44
-rw-r--r--arch/x86/crypto/Kconfig32
-rw-r--r--arch/x86/crypto/Makefile10
-rw-r--r--arch/x86/crypto/aegis128-aesni-glue.c1
-rw-r--r--arch/x86/crypto/aes-gcm-avx10-x86_64.S119
-rw-r--r--arch/x86/crypto/aes-xts-avx-x86_64.S329
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c10
-rw-r--r--arch/x86/crypto/blowfish_glue.c1
-rw-r--r--arch/x86/crypto/camellia_glue.c1
-rw-r--r--arch/x86/crypto/crc32-pclmul_glue.c202
-rw-r--r--arch/x86/crypto/crc32c-intel_glue.c250
-rw-r--r--arch/x86/crypto/crct10dif-pclmul_glue.c143
-rw-r--r--arch/x86/crypto/des3_ede_glue.c1
-rw-r--r--arch/x86/crypto/twofish_glue.c1
-rw-r--r--arch/x86/entry/vdso/vdso32-setup.c2
-rw-r--r--arch/x86/hyperv/hv_apic.c1
-rw-r--r--arch/x86/hyperv/hv_init.c23
-rw-r--r--arch/x86/hyperv/hv_proc.c3
-rw-r--r--arch/x86/hyperv/hv_vtl.c2
-rw-r--r--arch/x86/hyperv/ivm.c1
-rw-r--r--arch/x86/hyperv/mmu.c1
-rw-r--r--arch/x86/hyperv/nested.c2
-rw-r--r--arch/x86/include/asm/efi.h3
-rw-r--r--arch/x86/include/asm/hpet.h1
-rw-r--r--arch/x86/include/asm/hyperv-tlfs.h811
-rw-r--r--arch/x86/include/asm/init.h2
-rw-r--r--arch/x86/include/asm/intel_punit_ipc.h7
-rw-r--r--arch/x86/include/asm/kexec.h54
-rw-r--r--arch/x86/include/asm/kvm-x86-ops.h6
-rw-r--r--arch/x86/include/asm/kvm_host.h110
-rw-r--r--arch/x86/include/asm/mmu.h2
-rw-r--r--arch/x86/include/asm/mmu_context.h1
-rw-r--r--arch/x86/include/asm/mshyperv.h3
-rw-r--r--arch/x86/include/asm/pgalloc.h18
-rw-r--r--arch/x86/include/asm/sections.h1
-rw-r--r--arch/x86/include/asm/setup.h2
-rw-r--r--arch/x86/include/asm/sev-common.h1
-rw-r--r--arch/x86/include/asm/shared/tdx.h38
-rw-r--r--arch/x86/include/asm/svm.h2
-rw-r--r--arch/x86/include/asm/tdx.h3
-rw-r--r--arch/x86/include/asm/tlb.h15
-rw-r--r--arch/x86/include/asm/tlbflush.h1
-rw-r--r--arch/x86/include/uapi/asm/amd_hsmp.h64
-rw-r--r--arch/x86/include/uapi/asm/kvm.h1
-rw-r--r--arch/x86/kernel/acpi/boot.c55
-rw-r--r--arch/x86/kernel/alternative.c10
-rw-r--r--arch/x86/kernel/apic/io_apic.c9
-rw-r--r--arch/x86/kernel/callthunks.c6
-rw-r--r--arch/x86/kernel/cpu/bus_lock.c2
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c2
-rw-r--r--arch/x86/kernel/e820.c5
-rw-r--r--arch/x86/kernel/head64.c40
-rw-r--r--arch/x86/kernel/head_64.S12
-rw-r--r--arch/x86/kernel/hpet.c6
-rw-r--r--arch/x86/kernel/ksysfs.c18
-rw-r--r--arch/x86/kernel/machine_kexec_32.c7
-rw-r--r--arch/x86/kernel/machine_kexec_64.c95
-rw-r--r--arch/x86/kernel/paravirt.c12
-rw-r--r--arch/x86/kernel/relocate_kernel_64.S193
-rw-r--r--arch/x86/kernel/setup.c5
-rw-r--r--arch/x86/kernel/vm86_32.c5
-rw-r--r--arch/x86/kernel/vmlinux.lds.S45
-rw-r--r--arch/x86/kvm/cpuid.c972
-rw-r--r--arch/x86/kvm/cpuid.h128
-rw-r--r--arch/x86/kvm/governed_features.h22
-rw-r--r--arch/x86/kvm/hyperv.c2
-rw-r--r--arch/x86/kvm/kvm_emulate.h2
-rw-r--r--arch/x86/kvm/lapic.c31
-rw-r--r--arch/x86/kvm/lapic.h1
-rw-r--r--arch/x86/kvm/mmu.h33
-rw-r--r--arch/x86/kvm/mmu/mmu.c109
-rw-r--r--arch/x86/kvm/mmu/mmu_internal.h80
-rw-r--r--arch/x86/kvm/mmu/spte.h5
-rw-r--r--arch/x86/kvm/mmu/tdp_iter.c10
-rw-r--r--arch/x86/kvm/mmu/tdp_iter.h21
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c325
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.h51
-rw-r--r--arch/x86/kvm/pmu.c1
-rw-r--r--arch/x86/kvm/reverse_cpuid.h23
-rw-r--r--arch/x86/kvm/smm.c10
-rw-r--r--arch/x86/kvm/svm/nested.c22
-rw-r--r--arch/x86/kvm/svm/pmu.c8
-rw-r--r--arch/x86/kvm/svm/sev.c43
-rw-r--r--arch/x86/kvm/svm/svm.c78
-rw-r--r--arch/x86/kvm/svm/svm.h21
-rw-r--r--arch/x86/kvm/trace.h17
-rw-r--r--arch/x86/kvm/vmx/hyperv.h2
-rw-r--r--arch/x86/kvm/vmx/hyperv_evmcs.h2
-rw-r--r--arch/x86/kvm/vmx/main.c4
-rw-r--r--arch/x86/kvm/vmx/nested.c102
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c4
-rw-r--r--arch/x86/kvm/vmx/sgx.c14
-rw-r--r--arch/x86/kvm/vmx/vmx.c176
-rw-r--r--arch/x86/kvm/vmx/vmx.h6
-rw-r--r--arch/x86/kvm/vmx/vmx_onhyperv.h2
-rw-r--r--arch/x86/kvm/vmx/x86_ops.h6
-rw-r--r--arch/x86/kvm/x86.c264
-rw-r--r--arch/x86/kvm/x86.h34
-rw-r--r--arch/x86/lib/Makefile7
-rw-r--r--arch/x86/lib/crc-t10dif-glue.c51
-rw-r--r--arch/x86/lib/crc32-glue.c124
-rw-r--r--arch/x86/lib/crc32-pclmul.S (renamed from arch/x86/crypto/crc32-pclmul_asm.S)19
-rw-r--r--arch/x86/lib/crc32c-3way.S (renamed from arch/x86/crypto/crc32c-pcl-intel-asm_64.S)63
-rw-r--r--arch/x86/lib/crct10dif-pcl-asm_64.S (renamed from arch/x86/crypto/crct10dif-pcl-asm_64.S)0
-rw-r--r--arch/x86/mm/fault.c1
-rw-r--r--arch/x86/mm/pat/set_memory.c2
-rw-r--r--arch/x86/mm/pgtable.c57
-rw-r--r--arch/x86/mm/tlb.c59
-rw-r--r--arch/x86/pci/fixup.c30
-rw-r--r--arch/x86/platform/efi/efi.c10
-rw-r--r--arch/x86/platform/efi/quirks.c5
-rw-r--r--arch/x86/platform/olpc/olpc_dt.c6
-rw-r--r--arch/x86/tools/relocs.c8
-rw-r--r--arch/x86/um/asm/archparam.h20
-rw-r--r--arch/x86/um/shared/sysdep/ptrace.h2
-rw-r--r--arch/x86/virt/vmx/tdx/tdx.c95
-rw-r--r--arch/x86/virt/vmx/tdx/tdx.h40
-rw-r--r--arch/x86/virt/vmx/tdx/tdx_global_metadata.c48
-rw-r--r--arch/x86/virt/vmx/tdx/tdx_global_metadata.h25
-rw-r--r--arch/x86/xen/mmu_pv.c4
-rw-r--r--arch/x86/xen/p2m.c8
-rw-r--r--arch/x86/xen/xen-head.S11
-rw-r--r--arch/xtensa/include/asm/pgalloc.h2
-rw-r--r--arch/xtensa/include/asm/processor.h4
-rw-r--r--arch/xtensa/kernel/setup.c2
-rw-r--r--arch/xtensa/mm/kasan_init.c6
-rw-r--r--arch/xtensa/platforms/iss/simdisk.c3
-rw-r--r--block/blk-cgroup.c11
-rw-r--r--block/blk-core.c1
-rw-r--r--block/blk-ia-ranges.c4
-rw-r--r--block/blk-iocost.c14
-rw-r--r--block/blk-iolatency.c6
-rw-r--r--block/blk-mq-cpumap.c3
-rw-r--r--block/blk-mq-sysfs.c40
-rw-r--r--block/blk-mq.c21
-rw-r--r--block/blk-pm.c2
-rw-r--r--block/blk-rq-qos.c12
-rw-r--r--block/blk-settings.c5
-rw-r--r--block/blk-sysfs.c13
-rw-r--r--block/blk-throttle.c5
-rw-r--r--block/blk-zoned.c5
-rw-r--r--block/elevator.c16
-rw-r--r--block/fops.c5
-rw-r--r--crypto/Kconfig19
-rw-r--r--crypto/Makefile5
-rw-r--r--crypto/aegis128-core.c2
-rw-r--r--crypto/ahash.c158
-rw-r--r--crypto/algapi.c31
-rw-r--r--crypto/anubis.c14
-rw-r--r--crypto/aria_generic.c37
-rw-r--r--crypto/asymmetric_keys/asymmetric_type.c10
-rw-r--r--crypto/crc32_generic.c8
-rw-r--r--crypto/crc32c_generic.c12
-rw-r--r--crypto/crct10dif_common.c82
-rw-r--r--crypto/crct10dif_generic.c82
-rw-r--r--crypto/fips.c6
-rw-r--r--crypto/keywrap.c320
-rw-r--r--crypto/khazad.c17
-rw-r--r--crypto/proc.c9
-rw-r--r--crypto/seed.c48
-rw-r--r--crypto/sig.c4
-rw-r--r--crypto/skcipher.c369
-rw-r--r--crypto/tcrypt.c4
-rw-r--r--crypto/tea.c83
-rw-r--r--crypto/testmgr.c26
-rw-r--r--crypto/testmgr.h192
-rw-r--r--crypto/vmac.c696
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.c5
-rw-r--r--drivers/accel/habanalabs/common/context.c3
-rw-r--r--drivers/accel/habanalabs/common/device.c2
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_drv.c3
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_ioctl.c11
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c8
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c84
-rw-r--r--drivers/acpi/acpi_extlog.c14
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/numa/hmat.c24
-rw-r--r--drivers/acpi/numa/srat.c95
-rw-r--r--drivers/acpi/platform_profile.c647
-rw-r--r--drivers/acpi/prmt.c4
-rw-r--r--drivers/acpi/property.c11
-rw-r--r--drivers/acpi/resource.c6
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/x86/utils.c13
-rw-r--r--drivers/android/binder.c13
-rw-r--r--drivers/android/binder_alloc.c366
-rw-r--r--drivers/android/binder_alloc.h45
-rw-r--r--drivers/android/binder_alloc_selftest.c18
-rw-r--r--drivers/android/binder_internal.h11
-rw-r--r--drivers/android/binder_trace.h2
-rw-r--r--drivers/android/binderfs.c2
-rw-r--r--drivers/ata/ahci.c2
-rw-r--r--drivers/ata/ahci.h15
-rw-r--r--drivers/ata/ahci_brcm.c3
-rw-r--r--drivers/ata/ahci_ceva.c6
-rw-r--r--drivers/ata/ahci_st.c6
-rw-r--r--drivers/ata/ata_generic.c2
-rw-r--r--drivers/ata/ata_piix.c2
-rw-r--r--drivers/ata/libahci_platform.c40
-rw-r--r--drivers/ata/libata-core.c4
-rw-r--r--drivers/ata/libata-sata.c8
-rw-r--r--drivers/ata/libata-scsi.c19
-rw-r--r--drivers/ata/libata-sff.c18
-rw-r--r--drivers/ata/pata_atp867x.c2
-rw-r--r--drivers/ata/pata_macio.c8
-rw-r--r--drivers/ata/pata_piccolo.c2
-rw-r--r--drivers/ata/pata_rdc.c2
-rw-r--r--drivers/ata/sata_gemini.c32
-rw-r--r--drivers/ata/sata_gemini.h1
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/ata/sata_nv.c24
-rw-r--r--drivers/ata/sata_sil24.c4
-rw-r--r--drivers/ata/sata_sis.c2
-rw-r--r--drivers/ata/sata_uli.c2
-rw-r--r--drivers/ata/sata_vsc.c2
-rw-r--r--drivers/auxdisplay/img-ascii-lcd.c8
-rw-r--r--drivers/base/bus.c9
-rw-r--r--drivers/base/class.c42
-rw-r--r--drivers/base/core.c83
-rw-r--r--drivers/base/cpu.c3
-rw-r--r--drivers/base/devcoredump.c22
-rw-r--r--drivers/base/devres.c23
-rw-r--r--drivers/base/driver.c9
-rw-r--r--drivers/base/firmware_loader/fallback_table.c2
-rw-r--r--drivers/base/firmware_loader/sysfs.c14
-rw-r--r--drivers/base/memory.c4
-rw-r--r--drivers/base/power/main.c18
-rw-r--r--drivers/base/property.c38
-rw-r--r--drivers/base/swnode.c1
-rw-r--r--drivers/base/test/Kconfig1
-rw-r--r--drivers/base/test/platform-device-test.c41
-rw-r--r--drivers/block/aoe/aoedev.c5
-rw-r--r--drivers/block/ataflop.c5
-rw-r--r--drivers/block/loop.c23
-rw-r--r--drivers/block/nbd.c7
-rw-r--r--drivers/block/rbd.c5
-rw-r--r--drivers/block/sunvdc.c11
-rw-r--r--drivers/block/swim3.c5
-rw-r--r--drivers/block/virtio_blk.c33
-rw-r--r--drivers/block/xen-blkback/blkback.c2
-rw-r--r--drivers/block/zram/zram_drv.c300
-rw-r--r--drivers/block/zram/zram_drv.h5
-rw-r--r--drivers/bluetooth/btnxpuart.c3
-rw-r--r--drivers/bluetooth/btusb.c12
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c8
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c36
-rw-r--r--drivers/bus/mhi/host/boot.c1
-rw-r--r--drivers/bus/mhi/host/pci_generic.c57
-rw-r--r--drivers/bus/moxtet.c2
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/cdx/cdx.c3
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c2
-rw-r--r--drivers/char/misc.c39
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/char/tpm/eventlog/acpi.c15
-rw-r--r--drivers/char/virtio_console.c4
-rw-r--r--drivers/clk/clk-en7523.c1
-rw-r--r--drivers/clk/ti/clk.c5
-rw-r--r--drivers/clocksource/hyperv_timer.c2
-rw-r--r--drivers/cpufreq/Kconfig.arm1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c5
-rw-r--r--drivers/cpufreq/amd-pstate.c20
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c5
-rw-r--r--drivers/cpufreq/cpufreq.c21
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c11
-rw-r--r--drivers/cpuidle/cpuidle-psci-domain.c1
-rw-r--r--drivers/cpuidle/governors/teo.c197
-rw-r--r--drivers/crypto/Kconfig17
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/bcm/spu.c7
-rw-r--r--drivers/crypto/caam/blob_gen.c3
-rw-r--r--drivers/crypto/ccp/dbc.c53
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c13
-rw-r--r--drivers/crypto/hisilicon/qm.c291
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h3
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c161
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h11
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c13
-rw-r--r--drivers/crypto/hisilicon/zip/Makefile2
-rw-r--r--drivers/crypto/hisilicon/zip/dae_main.c262
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h8
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c52
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c2
-rw-r--r--drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c36
-rw-r--r--drivers/crypto/n2_asm.S96
-rw-r--r--drivers/crypto/n2_core.c2168
-rw-r--r--drivers/crypto/n2_core.h232
-rw-r--r--drivers/crypto/omap-aes.c34
-rw-r--r--drivers/crypto/omap-aes.h6
-rw-r--r--drivers/crypto/omap-des.c40
-rw-r--r--drivers/crypto/qce/aead.c2
-rw-r--r--drivers/crypto/qce/core.c129
-rw-r--r--drivers/crypto/qce/core.h9
-rw-r--r--drivers/crypto/qce/dma.c22
-rw-r--r--drivers/crypto/qce/dma.h3
-rw-r--r--drivers/crypto/qce/sha.c2
-rw-r--r--drivers/crypto/qce/skcipher.c2
-rw-r--r--drivers/crypto/tegra/tegra-se-aes.c7
-rw-r--r--drivers/crypto/tegra/tegra-se-hash.c7
-rw-r--r--drivers/cxl/core/hdm.c2
-rw-r--r--drivers/cxl/core/pci.c10
-rw-r--r--drivers/cxl/core/pmem.c15
-rw-r--r--drivers/cxl/core/region.c23
-rw-r--r--drivers/cxl/core/regs.c56
-rw-r--r--drivers/cxl/core/trace.h259
-rw-r--r--drivers/cxl/cxl.h7
-rw-r--r--drivers/cxl/pci.c6
-rw-r--r--drivers/cxl/port.c2
-rw-r--r--drivers/dma/Kconfig6
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/amd/Kconfig28
-rw-r--r--drivers/dma/amd/Makefile2
-rw-r--r--drivers/dma/amd/ae4dma/Makefile10
-rw-r--r--drivers/dma/amd/ae4dma/ae4dma-dev.c157
-rw-r--r--drivers/dma/amd/ae4dma/ae4dma-pci.c158
-rw-r--r--drivers/dma/amd/ae4dma/ae4dma.h100
-rw-r--r--drivers/dma/amd/ptdma/Makefile (renamed from drivers/dma/ptdma/Makefile)0
-rw-r--r--drivers/dma/amd/ptdma/ptdma-debugfs.c (renamed from drivers/dma/ptdma/ptdma-debugfs.c)79
-rw-r--r--drivers/dma/amd/ptdma/ptdma-dev.c (renamed from drivers/dma/ptdma/ptdma-dev.c)0
-rw-r--r--drivers/dma/amd/ptdma/ptdma-dmaengine.c (renamed from drivers/dma/ptdma/ptdma-dmaengine.c)226
-rw-r--r--drivers/dma/amd/ptdma/ptdma-pci.c (renamed from drivers/dma/ptdma/ptdma-pci.c)0
-rw-r--r--drivers/dma/amd/ptdma/ptdma.h (renamed from drivers/dma/ptdma/ptdma.h)4
-rw-r--r--drivers/dma/amd/qdma/qdma.c22
-rw-r--r--drivers/dma/bcm2835-dma.c22
-rw-r--r--drivers/dma/fsl-edma-common.c36
-rw-r--r--drivers/dma/fsl-edma-common.h3
-rw-r--r--drivers/dma/fsl-edma-main.c115
-rw-r--r--drivers/dma/idxd/cdev.c5
-rw-r--r--drivers/dma/idxd/idxd.h15
-rw-r--r--drivers/dma/idxd/init.c481
-rw-r--r--drivers/dma/idxd/irq.c85
-rw-r--r--drivers/dma/idxd/registers.h1
-rw-r--r--drivers/dma/idxd/sysfs.c10
-rw-r--r--drivers/dma/mv_xor.c5
-rw-r--r--drivers/dma/ptdma/Kconfig13
-rw-r--r--drivers/dma/qcom/bam_dma.c24
-rw-r--r--drivers/dma/qcom/gpi.c31
-rw-r--r--drivers/dma/sh/rcar-dmac.c4
-rw-r--r--drivers/dma/sun4i-dma.c208
-rw-r--r--drivers/dma/tegra210-adma.c86
-rw-r--r--drivers/dma/ti/edma.c7
-rw-r--r--drivers/dma/ti/k3-udma.c16
-rw-r--r--drivers/dma/xilinx/xdma.c8
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c20
-rw-r--r--drivers/extcon/extcon-fsa9480.c2
-rw-r--r--drivers/extcon/extcon-ptn5150.c2
-rw-r--r--drivers/extcon/extcon-rtk-type-c.c2
-rw-r--r--drivers/firewire/core-device.c4
-rw-r--r--drivers/firewire/device-attribute-test.c2
-rw-r--r--drivers/firewire/ohci.c44
-rw-r--r--drivers/firewire/sbp2.c10
-rw-r--r--drivers/firmware/Kconfig2
-rw-r--r--drivers/firmware/arm_scmi/bus.c4
-rw-r--r--drivers/firmware/arm_scmi/common.h4
-rw-r--r--drivers/firmware/arm_scmi/driver.c74
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.c12
-rw-r--r--drivers/firmware/arm_scmi/transports/mailbox.c1
-rw-r--r--drivers/firmware/arm_scmi/transports/smc.c1
-rw-r--r--drivers/firmware/arm_scmi/transports/virtio.c1
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c5
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c5
-rw-r--r--drivers/firmware/cirrus/Kconfig18
-rw-r--r--drivers/firmware/cirrus/Makefile2
-rw-r--r--drivers/firmware/cirrus/test/Makefile23
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_bin.c199
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c752
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c367
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_utils.c13
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c473
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_bin.c2556
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c600
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c688
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c3282
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c1851
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c2669
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c2211
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c1347
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_tests.c14
-rw-r--r--drivers/firmware/efi/dev-path-parser.c4
-rw-r--r--drivers/firmware/efi/efi.c3
-rw-r--r--drivers/firmware/efi/libstub/Makefile2
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c9
-rw-r--r--drivers/firmware/efi/libstub/efi-stub.c49
-rw-r--r--drivers/firmware/efi/libstub/efistub.h20
-rw-r--r--drivers/firmware/efi/libstub/gop.c323
-rw-r--r--drivers/firmware/efi/libstub/kaslr.c4
-rw-r--r--drivers/firmware/efi/libstub/mem.c20
-rw-r--r--drivers/firmware/efi/libstub/pci.c34
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c4
-rw-r--r--drivers/firmware/efi/libstub/relocate.c10
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c164
-rw-r--r--drivers/firmware/efi/sysfb_efi.c2
-rw-r--r--drivers/firmware/iscsi_ibft.c5
-rw-r--r--drivers/firmware/qcom/qcom_scm-smc.c6
-rw-r--r--drivers/firmware/qcom/qcom_scm.c271
-rw-r--r--drivers/firmware/qcom/qcom_scm.h4
-rw-r--r--drivers/firmware/stratix10-svc.c9
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c117
-rw-r--r--drivers/fpga/dfl-afu-error.c59
-rw-r--r--drivers/fpga/dfl-afu-main.c278
-rw-r--r--drivers/fpga/dfl-afu-region.c51
-rw-r--r--drivers/fpga/dfl-afu.h26
-rw-r--r--drivers/fpga/dfl-fme-br.c24
-rw-r--r--drivers/fpga/dfl-fme-error.c98
-rw-r--r--drivers/fpga/dfl-fme-main.c95
-rw-r--r--drivers/fpga/dfl-fme-pr.c86
-rw-r--r--drivers/fpga/dfl.c445
-rw-r--r--drivers/fpga/dfl.h140
-rw-r--r--drivers/gpio/Kconfig1
-rw-r--r--drivers/gpio/gpio-mxc.c3
-rw-r--r--drivers/gpio/gpio-pca953x.c19
-rw-r--r--drivers/gpio/gpio-sim.c20
-rw-r--r--drivers/gpu/drm/Kconfig3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c156
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h2249
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h2
-rw-r--r--drivers/gpu/drm/amd/include/amd_pcie.h18
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c286
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c4
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c2
-rw-r--r--drivers/gpu/drm/display/drm_dp_cec.c14
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c9
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c15
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c36
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c5
-rw-r--r--drivers/gpu/drm/tiny/bochs.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c16
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c11
-rw-r--r--drivers/gpu/drm/xe/regs/xe_oa_regs.h6
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c40
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.h2
-rw-r--r--drivers/gpu/drm/xe/xe_device.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c14
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.h6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c3
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.c4
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c21
-rw-r--r--drivers/gpu/drm/xe/xe_observation.c2
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c3
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.h1
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_common.h1
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c4
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c24
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c38
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h24
-rw-r--r--drivers/hv/channel_mgmt.c61
-rw-r--r--drivers/hv/connection.c4
-rw-r--r--drivers/hv/hv_balloon.c22
-rw-r--r--drivers/hv/hv_common.c19
-rw-r--r--drivers/hv/hv_kvp.c2
-rw-r--r--drivers/hv/hv_snapshot.c2
-rw-r--r--drivers/hv/hyperv_vmbus.h16
-rw-r--r--drivers/hv/vmbus_drv.c31
-rw-r--r--drivers/hwmon/hwmon.c2
-rw-r--r--drivers/hwmon/spd5118.c8
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c113
-rw-r--r--drivers/hwtracing/coresight/coresight-dummy.c81
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c55
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c10
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-platform.c27
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h3
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-self-hosted-trace.h9
-rw-r--r--drivers/hwtracing/coresight/coresight-tpda.c19
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.c7
-rw-r--r--drivers/hwtracing/coresight/coresight-trace-id.c43
-rw-r--r--drivers/hwtracing/coresight/coresight-trace-id.h9
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.c15
-rw-r--r--drivers/hwtracing/intel_th/core.c3
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-i801.c125
-rw-r--r--drivers/i2c/i2c-core-base.c122
-rw-r--r--drivers/i3c/master.c14
-rw-r--r--drivers/i3c/master/Kconfig11
-rw-r--r--drivers/i3c/master/dw-i3c-master.c15
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c3
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/Makefile1
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dat_v1.c11
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dma.c17
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c148
-rw-r--r--drivers/iio/accel/adxl345.h81
-rw-r--r--drivers/iio/accel/adxl345_core.c417
-rw-r--r--drivers/iio/accel/adxl345_i2c.c2
-rw-r--r--drivers/iio/accel/adxl345_spi.c7
-rw-r--r--drivers/iio/accel/bma220_spi.c2
-rw-r--r--drivers/iio/accel/fxls8962af-core.c14
-rw-r--r--drivers/iio/accel/fxls8962af-i2c.c2
-rw-r--r--drivers/iio/accel/fxls8962af.h2
-rw-r--r--drivers/iio/accel/kionix-kx022a-i2c.c4
-rw-r--r--drivers/iio/accel/kionix-kx022a-spi.c4
-rw-r--r--drivers/iio/accel/kionix-kx022a.c169
-rw-r--r--drivers/iio/accel/kionix-kx022a.h14
-rw-r--r--drivers/iio/adc/ad4000.c313
-rw-r--r--drivers/iio/adc/ad4695.c2
-rw-r--r--drivers/iio/adc/ad7124.c217
-rw-r--r--drivers/iio/adc/ad7173.c119
-rw-r--r--drivers/iio/adc/ad7192.c4
-rw-r--r--drivers/iio/adc/ad7606.c48
-rw-r--r--drivers/iio/adc/ad7606.h2
-rw-r--r--drivers/iio/adc/ad7625.c8
-rw-r--r--drivers/iio/adc/ad7791.c1
-rw-r--r--drivers/iio/adc/ad7793.c3
-rw-r--r--drivers/iio/adc/ad7944.c2
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c194
-rw-r--r--drivers/iio/adc/dln2-adc.c21
-rw-r--r--drivers/iio/adc/ina2xx-adc.c2
-rw-r--r--drivers/iio/adc/max1118.c2
-rw-r--r--drivers/iio/adc/max11410.c2
-rw-r--r--drivers/iio/adc/max1363.c30
-rw-r--r--drivers/iio/adc/mcp3911.c2
-rw-r--r--drivers/iio/adc/meson_saradc.c47
-rw-r--r--drivers/iio/adc/pac1921.c95
-rw-r--r--drivers/iio/adc/rockchip_saradc.c2
-rw-r--r--drivers/iio/adc/rtq6056.c2
-rw-r--r--drivers/iio/adc/rzg2l_adc.c429
-rw-r--r--drivers/iio/adc/ti-adc081c.c2
-rw-r--r--drivers/iio/adc/ti-adc084s021.c2
-rw-r--r--drivers/iio/adc/ti-ads1015.c2
-rw-r--r--drivers/iio/adc/ti-ads1119.c2
-rw-r--r--drivers/iio/adc/ti-ads131e08.c2
-rw-r--r--drivers/iio/adc/ti-lmp92064.c2
-rw-r--r--drivers/iio/adc/ti-tsc2046.c2
-rw-r--r--drivers/iio/adc/vf610_adc.c100
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c19
-rw-r--r--drivers/iio/chemical/bme680.h2
-rw-r--r--drivers/iio/chemical/bme680_core.c124
-rw-r--r--drivers/iio/chemical/bme680_i2c.c1
-rw-r--r--drivers/iio/chemical/bme680_spi.c1
-rw-r--r--drivers/iio/chemical/ccs811.c2
-rw-r--r--drivers/iio/chemical/ens160_core.c2
-rw-r--r--drivers/iio/chemical/scd30_core.c2
-rw-r--r--drivers/iio/chemical/scd4x.c2
-rw-r--r--drivers/iio/common/inv_sensors/inv_sensors_timestamp.c4
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_iio.c12
-rw-r--r--drivers/iio/dac/Kconfig10
-rw-r--r--drivers/iio/dac/Makefile1
-rw-r--r--drivers/iio/dac/ad3552r-common.c5
-rw-r--r--drivers/iio/dac/ad3552r-hs.c6
-rw-r--r--drivers/iio/dac/ad3552r.h8
-rw-r--r--drivers/iio/dac/ad5624r.h4
-rw-r--r--drivers/iio/dac/ad5686-spi.c6
-rw-r--r--drivers/iio/dac/ad5686.c62
-rw-r--r--drivers/iio/dac/ad5686.h6
-rw-r--r--drivers/iio/dac/ad5696-i2c.c6
-rw-r--r--drivers/iio/dac/ad7293.c68
-rw-r--r--drivers/iio/dac/ad8801.c81
-rw-r--r--drivers/iio/dac/ltc2632.c69
-rw-r--r--drivers/iio/dac/ltc2688.c44
-rw-r--r--drivers/iio/dac/max5821.c36
-rw-r--r--drivers/iio/dac/mcp4725.c2
-rw-r--r--drivers/iio/dac/rohm-bd79703.c162
-rw-r--r--drivers/iio/gyro/adxrs290.c2
-rw-r--r--drivers/iio/gyro/bmg160_core.c2
-rw-r--r--drivers/iio/gyro/itg3200_buffer.c2
-rw-r--r--drivers/iio/gyro/mpu3050-core.c2
-rw-r--r--drivers/iio/humidity/am2315.c2
-rw-r--r--drivers/iio/humidity/hdc100x.c2
-rw-r--r--drivers/iio/humidity/hts221.h2
-rw-r--r--drivers/iio/imu/adis16480.c75
-rw-r--r--drivers/iio/imu/bmi323/bmi323_core.c2
-rw-r--r--drivers/iio/imu/bno055/bno055.c10
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c2
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c25
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c6
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c5
-rw-r--r--drivers/iio/imu/st_lsm6dsx/Kconfig18
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c6
-rw-r--r--drivers/iio/industrialio-buffer.c2
-rw-r--r--drivers/iio/industrialio-gts-helper.c77
-rw-r--r--drivers/iio/inkern.c11
-rw-r--r--drivers/iio/light/Kconfig32
-rw-r--r--drivers/iio/light/Makefile2
-rw-r--r--drivers/iio/light/adjd_s311.c2
-rw-r--r--drivers/iio/light/as73211.c26
-rw-r--r--drivers/iio/light/bh1745.c2
-rw-r--r--drivers/iio/light/cm3232.c18
-rw-r--r--drivers/iio/light/hid-sensor-prox.c1
-rw-r--r--drivers/iio/light/isl29125.c2
-rw-r--r--drivers/iio/light/ltr501.c2
-rw-r--r--drivers/iio/light/max44000.c2
-rw-r--r--drivers/iio/light/opt4060.c1343
-rw-r--r--drivers/iio/light/rohm-bu27008.c1635
-rw-r--r--drivers/iio/light/rohm-bu27034.c75
-rw-r--r--drivers/iio/light/rpr0521.c2
-rw-r--r--drivers/iio/light/st_uvis25.h2
-rw-r--r--drivers/iio/light/tcs3414.c2
-rw-r--r--drivers/iio/light/tcs3472.c2
-rw-r--r--drivers/iio/light/veml3235.c274
-rw-r--r--drivers/iio/light/veml6030.c76
-rw-r--r--drivers/iio/magnetometer/af8133j.c2
-rw-r--r--drivers/iio/magnetometer/ak8974.c2
-rw-r--r--drivers/iio/magnetometer/ak8975.c2
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c2
-rw-r--r--drivers/iio/magnetometer/hmc5843.h2
-rw-r--r--drivers/iio/magnetometer/mag3110.c2
-rw-r--r--drivers/iio/magnetometer/yamaha-yas530.c2
-rw-r--r--drivers/iio/multiplexer/iio-mux.c84
-rw-r--r--drivers/iio/pressure/bmp280-core.c39
-rw-r--r--drivers/iio/pressure/bmp280.h8
-rw-r--r--drivers/iio/pressure/hsc030pa.h2
-rw-r--r--drivers/iio/pressure/ms5611_core.c2
-rw-r--r--drivers/iio/pressure/rohm-bm1390.c80
-rw-r--r--drivers/iio/proximity/as3935.c2
-rw-r--r--drivers/iio/proximity/aw96103.c2
-rw-r--r--drivers/iio/proximity/hx9023s.c95
-rw-r--r--drivers/iio/proximity/mb1232.c2
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c2
-rw-r--r--drivers/iio/proximity/srf08.c2
-rw-r--r--drivers/iio/proximity/sx_common.h2
-rw-r--r--drivers/iio/resolver/ad2s1210.c2
-rw-r--r--drivers/iio/temperature/tmp006.c2
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c69
-rw-r--r--drivers/infiniband/core/cache.c35
-rw-r--r--drivers/infiniband/core/device.c116
-rw-r--r--drivers/infiniband/core/ud_header.c83
-rw-r--r--drivers/infiniband/core/uverbs_marshall.c42
-rw-r--r--drivers/infiniband/hw/Makefile2
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h5
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c11
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c47
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c339
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c7
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c117
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h6
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c8
-rw-r--r--drivers/infiniband/hw/efa/efa.h8
-rw-r--r--drivers/infiniband/hw/efa/efa_com.h6
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c28
-rw-r--r--drivers/infiniband/hw/erdma/Kconfig2
-rw-r--r--drivers/infiniband/hw/erdma/erdma.h14
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cm.c71
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cmdq.c26
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cq.c65
-rw-r--r--drivers/infiniband/hw/erdma/erdma_eq.c6
-rw-r--r--drivers/infiniband/hw/erdma/erdma_hw.h135
-rw-r--r--drivers/infiniband/hw/erdma/erdma_main.c62
-rw-r--r--drivers/infiniband/hw/erdma/erdma_qp.c301
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c568
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h166
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h14
-rw-r--r--drivers/infiniband/hw/hfi1/intr.c31
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.h2
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c14
-rw-r--r--drivers/infiniband/hw/hns/Kconfig20
-rw-r--r--drivers/infiniband/hw/hns/Makefile9
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c13
-rw-r--r--drivers/infiniband/hw/irdma/osdep.h4
-rw-r--r--drivers/infiniband/hw/irdma/protos.h4
-rw-r--r--drivers/infiniband/hw/irdma/utils.c71
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c60
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h18
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c286
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c12
-rw-r--r--drivers/infiniband/hw/mlx5/main.c4
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h6
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c17
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c70
-rw-r--r--drivers/infiniband/hw/mlx5/restrack.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c16
-rw-r--r--drivers/infiniband/hw/usnic/usnic_abi.h2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c73
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c66
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c22
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c11
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c6
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c8
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c5
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c3
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c6
-rw-r--r--drivers/input/Kconfig14
-rw-r--r--drivers/input/Makefile1
-rw-r--r--drivers/input/evbug.c100
-rw-r--r--drivers/input/ff-core.c91
-rw-r--r--drivers/input/ff-memless.c18
-rw-r--r--drivers/input/input-mt.c34
-rw-r--r--drivers/input/input-poller.c4
-rw-r--r--drivers/input/input.c339
-rw-r--r--drivers/input/joystick/sidewinder.c3
-rw-r--r--drivers/input/joystick/xpad.c9
-rw-r--r--drivers/input/keyboard/atkbd.c2
-rw-r--r--drivers/input/keyboard/dlink-dir685-touchkeys.c3
-rw-r--r--drivers/input/keyboard/lm8323.c3
-rw-r--r--drivers/input/misc/ideapad_slidebar.c4
-rw-r--r--drivers/input/misc/max77693-haptic.c3
-rw-r--r--drivers/input/misc/mma8450.c16
-rw-r--r--drivers/input/misc/nxp-bbnsm-pwrkey.c8
-rw-r--r--drivers/input/misc/regulator-haptic.c3
-rw-r--r--drivers/input/mouse/elan_i2c_core.c3
-rw-r--r--drivers/input/mouse/synaptics.c56
-rw-r--r--drivers/input/mouse/synaptics.h1
-rw-r--r--drivers/input/serio/i8042.c17
-rw-r--r--drivers/input/touchscreen/egalax_ts.c3
-rw-r--r--drivers/interconnect/qcom/Kconfig9
-rw-r--r--drivers/interconnect/qcom/Makefile2
-rw-r--r--drivers/interconnect/qcom/sm8750.c1705
-rw-r--r--drivers/iommu/Kconfig12
-rw-r--r--drivers/iommu/amd/amd_iommu.h9
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h41
-rw-r--r--drivers/iommu/amd/init.c253
-rw-r--r--drivers/iommu/amd/iommu.c532
-rw-r--r--drivers/iommu/amd/pasid.c3
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c8
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c15
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c298
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h31
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c8
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-impl.c5
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c2
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c121
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h3
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c43
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.h2
-rw-r--r--drivers/iommu/hyperv-iommu.c4
-rw-r--r--drivers/iommu/intel/Makefile2
-rw-r--r--drivers/iommu/intel/cache.c11
-rw-r--r--drivers/iommu/intel/cap_audit.c217
-rw-r--r--drivers/iommu/intel/cap_audit.h131
-rw-r--r--drivers/iommu/intel/iommu.c50
-rw-r--r--drivers/iommu/intel/irq_remapping.c8
-rw-r--r--drivers/iommu/intel/pasid.c22
-rw-r--r--drivers/iommu/intel/pasid.h6
-rw-r--r--drivers/iommu/io-pgtable-arm.c227
-rw-r--r--drivers/iommu/iommu.c37
-rw-r--r--drivers/iommu/iommufd/fault.c44
-rw-r--r--drivers/iommu/iommufd/hw_pagetable.c10
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h29
-rw-r--r--drivers/iommu/iommufd/iova_bitmap.c2
-rw-r--r--drivers/iommu/iommufd/main.c32
-rw-r--r--drivers/iommu/iommufd/selftest.c45
-rw-r--r--drivers/iommu/msm_iommu.c51
-rw-r--r--drivers/iommu/mtk_iommu.c9
-rw-r--r--drivers/iommu/mtk_iommu_v1.c3
-rw-r--r--drivers/iommu/of_iommu.c2
-rw-r--r--drivers/iommu/riscv/iommu-pci.c8
-rw-r--r--drivers/iommu/riscv/iommu-platform.c108
-rw-r--r--drivers/iommu/riscv/iommu.c14
-rw-r--r--drivers/iommu/riscv/iommu.h1
-rw-r--r--drivers/iommu/rockchip-iommu.c3
-rw-r--r--drivers/irqchip/Kconfig1
-rw-r--r--drivers/irqchip/irq-apple-aic.c3
-rw-r--r--drivers/irqchip/irq-mvebu-icu.c3
-rw-r--r--drivers/irqchip/irq-partition-percpu.c2
-rw-r--r--drivers/irqchip/irq-riscv-imsic-early.c2
-rw-r--r--drivers/irqchip/irq-thead-c900-aclint-sswi.c2
-rw-r--r--drivers/leds/leds-turris-omnia.c2
-rw-r--r--drivers/macintosh/smu.c6
-rw-r--r--drivers/mailbox/Kconfig24
-rw-r--r--drivers/mailbox/Makefile4
-rw-r--r--drivers/mailbox/exynos-mailbox.c157
-rw-r--r--drivers/mailbox/mailbox-mchp-ipc-sbi.c504
-rw-r--r--drivers/mailbox/mailbox-mpfs.c2
-rw-r--r--drivers/mailbox/mailbox-th1520.c6
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c1
-rw-r--r--drivers/mailbox/qcom-ipcc.c16
-rw-r--r--drivers/mailbox/tegra-hsp.c6
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c2
-rw-r--r--drivers/md/dm-crypt.c42
-rw-r--r--drivers/md/dm-io.c1
-rw-r--r--drivers/md/dm-linear.c5
-rw-r--r--drivers/md/dm-ps-io-affinity.c2
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-raid1.c5
-rw-r--r--drivers/md/dm-stripe.c5
-rw-r--r--drivers/md/dm-table.c29
-rw-r--r--drivers/md/dm.c31
-rw-r--r--drivers/md/md-bitmap.c5
-rw-r--r--drivers/md/md-linear.c4
-rw-r--r--drivers/md/md.c7
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c54
-rw-r--r--drivers/media/cec/core/cec-adap.c5
-rw-r--r--drivers/media/cec/core/cec-core.c5
-rw-r--r--drivers/media/cec/core/cec-pin-error-inj.c3
-rw-r--r--drivers/media/cec/core/cec-pin.c3
-rw-r--r--drivers/media/cec/platform/cec-gpio/cec-gpio.c7
-rw-r--r--drivers/media/common/b2c2/flexcop-common.h4
-rw-r--r--drivers/media/common/b2c2/flexcop-misc.c13
-rw-r--r--drivers/media/dvb-core/dmxdev.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c8
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c18
-rw-r--r--drivers/media/i2c/ccs/ccs-data.c15
-rw-r--r--drivers/media/i2c/ds90ub913.c26
-rw-r--r--drivers/media/i2c/ds90ub953.c56
-rw-r--r--drivers/media/i2c/ds90ub960.c188
-rw-r--r--drivers/media/i2c/imx208.c2
-rw-r--r--drivers/media/i2c/imx290.c81
-rw-r--r--drivers/media/i2c/imx296.c2
-rw-r--r--drivers/media/i2c/imx412.c42
-rw-r--r--drivers/media/i2c/ov2740.c58
-rw-r--r--drivers/media/i2c/ov5640.c1
-rw-r--r--drivers/media/i2c/ov9282.c2
-rw-r--r--drivers/media/pci/b2c2/flexcop-dma.c17
-rw-r--r--drivers/media/pci/cx18/cx18-gpio.c15
-rw-r--r--drivers/media/pci/cx18/cx18-gpio.h1
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-buttress.c8
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-cpd.c2
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys.c1
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.c8
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.h3
-rw-r--r--drivers/media/pci/mgb4/mgb4_sysfs_in.c12
-rw-r--r--drivers/media/pci/mgb4/mgb4_vin.c20
-rw-r--r--drivers/media/pci/mgb4/mgb4_vin.h4
-rw-r--r--drivers/media/pci/mgb4/mgb4_vout.c14
-rw-r--r--drivers/media/pci/saa7164/saa7164-vbi.c2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-core.c4
-rw-r--r--drivers/media/platform/broadcom/bcm2835-unicam.c42
-rw-r--r--drivers/media/platform/marvell/mcam-core.c7
-rw-r--r--drivers/media/platform/marvell/mmp-driver.c21
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c77
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h1
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c537
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h29
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c2
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h1
-rw-r--r--drivers/media/platform/nuvoton/npcm-video.c4
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c7
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c14
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h1
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c3
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c13
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c5
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.h7
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c8
-rw-r--r--drivers/media/platform/qcom/camss/camss.c379
-rw-r--r--drivers/media/platform/qcom/camss/camss.h5
-rw-r--r--drivers/media/platform/qcom/venus/Kconfig1
-rw-r--r--drivers/media/platform/qcom/venus/core.c113
-rw-r--r--drivers/media/platform/qcom/venus/core.h4
-rw-r--r--drivers/media/platform/qcom/venus/hfi.c23
-rw-r--r--drivers/media/platform/qcom/venus/hfi.h2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c11
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c18
-rw-r--r--drivers/media/platform/qcom/venus/venc.c18
-rw-r--r--drivers/media/platform/renesas/rcar-csi2.c91
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c2
-rw-r--r--drivers/media/platform/rockchip/rga/rga-buf.c2
-rw-r--r--drivers/media/platform/rockchip/rga/rga-hw.c2
-rw-r--r--drivers/media/platform/rockchip/rga/rga-hw.h2
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c2
-rw-r--r--drivers/media/platform/rockchip/rga/rga.h2
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c4
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c3
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-errno.c131
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-errno.h1
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-param.c9
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-param.h1
-rw-r--r--drivers/media/platform/samsung/exynos4-is/mipi-csis.c10
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-core.c13
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c7
-rw-r--r--drivers/media/platform/st/stm32/Kconfig14
-rw-r--r--drivers/media/platform/st/stm32/Makefile1
-rw-r--r--drivers/media/platform/st/stm32/stm32-csi.c1137
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/Makefile2
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c128
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-byteproc.c119
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-common.h4
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c122
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-input.c540
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-parallel.c440
-rw-r--r--drivers/media/platform/verisilicon/hantro.h9
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2.c2
-rw-r--r--drivers/media/platform/verisilicon/hantro_postproc.c32
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.c21
-rw-r--r--drivers/media/platform/verisilicon/imx8m_vpu_hw.c10
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c9
-rw-r--r--drivers/media/radio/Kconfig4
-rw-r--r--drivers/media/radio/Makefile1
-rw-r--r--drivers/media/radio/wl128x/Kconfig15
-rw-r--r--drivers/media/radio/wl128x/Makefile7
-rw-r--r--drivers/media/radio/wl128x/fmdrv.h229
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c1676
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.h389
-rw-r--r--drivers/media/radio/wl128x/fmdrv_rx.c820
-rw-r--r--drivers/media/radio/wl128x/fmdrv_rx.h45
-rw-r--r--drivers/media/radio/wl128x/fmdrv_tx.c413
-rw-r--r--drivers/media/radio/wl128x/fmdrv_tx.h24
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c604
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.h20
-rw-r--r--drivers/media/rc/iguanair.c4
-rw-r--r--drivers/media/rc/imon_raw.c2
-rw-r--r--drivers/media/rc/mceusb.c5
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_bridge.c8
-rw-r--r--drivers/media/tuners/fc0013.c64
-rw-r--r--drivers/media/tuners/fc0013.h11
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-avcore.c1
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c18
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c12
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c88
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c368
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c9
-rw-r--r--drivers/media/usb/uvc/uvc_status.c8
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c164
-rw-r--r--drivers/media/usb/uvc/uvc_video.c59
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h15
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c43
-rw-r--r--drivers/media/v4l2-core/v4l2-mc.c2
-rw-r--r--drivers/memory/omap-gpmc.c33
-rw-r--r--drivers/memory/tegra/tegra20-emc.c8
-rw-r--r--drivers/memory/ti-aemif.c192
-rw-r--r--drivers/memstick/core/memstick.c46
-rw-r--r--drivers/message/fusion/mptfc.c14
-rw-r--r--drivers/message/fusion/mptsas.c14
-rw-r--r--drivers/message/fusion/mptscsih.c10
-rw-r--r--drivers/message/fusion/mptscsih.h5
-rw-r--r--drivers/message/fusion/mptspi.c19
-rw-r--r--drivers/misc/Kconfig4
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/c2port/core.c29
-rw-r--r--drivers/misc/cardreader/rtsx_usb.c15
-rw-r--r--drivers/misc/cxl/sysfs.c8
-rw-r--r--drivers/misc/ds1682.c8
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c6
-rw-r--r--drivers/misc/eeprom/max6875.c4
-rw-r--r--drivers/misc/fastrpc.c66
-rw-r--r--drivers/misc/keba/cp500.c69
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c3
-rw-r--r--drivers/misc/misc_minor_kunit.c69
-rw-r--r--drivers/misc/ntsync.c1000
-rw-r--r--drivers/misc/ocxl/sysfs.c4
-rw-r--r--drivers/misc/pch_phub.c8
-rw-r--r--drivers/misc/pci_endpoint_test.c352
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c4
-rw-r--r--drivers/misc/sram.c8
-rw-r--r--drivers/mtd/devices/mchp48l640.c28
-rw-r--r--drivers/mtd/devices/phram.c13
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c6
-rw-r--r--drivers/mtd/hyperbus/hbmc-am654.c21
-rw-r--r--drivers/mtd/mtd_blkdevs.c5
-rw-r--r--drivers/mtd/nand/Makefile2
-rw-r--r--drivers/mtd/nand/onenand/onenand_base.c1
-rw-r--r--drivers/mtd/nand/qpic_common.c759
-rw-r--r--drivers/mtd/nand/raw/Kconfig12
-rw-r--r--drivers/mtd/nand/raw/Makefile1
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c5
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c137
-rw-r--r--drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c1029
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c1773
-rw-r--r--drivers/mtd/nand/spi/Makefile2
-rw-r--r--drivers/mtd/nand/spi/alliancememory.c4
-rw-r--r--drivers/mtd/nand/spi/ato.c4
-rw-r--r--drivers/mtd/nand/spi/core.c38
-rw-r--r--drivers/mtd/nand/spi/esmt.c4
-rw-r--r--drivers/mtd/nand/spi/foresee.c14
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c16
-rw-r--r--drivers/mtd/nand/spi/macronix.c4
-rw-r--r--drivers/mtd/nand/spi/micron.c8
-rw-r--r--drivers/mtd/nand/spi/paragon.c4
-rw-r--r--drivers/mtd/nand/spi/skyhigh.c147
-rw-r--r--drivers/mtd/nand/spi/toshiba.c4
-rw-r--r--drivers/mtd/nand/spi/winbond.c27
-rw-r--r--drivers/mtd/nand/spi/xtx.c4
-rw-r--r--drivers/mtd/spi-nor/atmel.c4
-rw-r--r--drivers/mtd/spi-nor/core.c19
-rw-r--r--drivers/mtd/spi-nor/core.h6
-rw-r--r--drivers/mtd/spi-nor/macronix.c9
-rw-r--r--drivers/mtd/spi-nor/spansion.c10
-rw-r--r--drivers/mtd/spi-nor/sysfs.c8
-rw-r--r--drivers/mtd/ubi/build.c2
-rw-r--r--drivers/mtd/ubi/cdev.c70
-rw-r--r--drivers/mtd/ubi/ubi.h2
-rw-r--r--drivers/mtd/ubi/wl.c21
-rw-r--r--drivers/mux/core.c2
-rw-r--r--drivers/net/bonding/bond_debugfs.c9
-rw-r--r--drivers/net/bonding/bond_main.c19
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c19
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c4
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c39
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h8
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c16
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c93
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c31
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c19
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser_rt.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c150
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h43
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.c6
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c15
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c25
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c76
-rw-r--r--drivers/net/ethernet/marvell/skge.c5
-rw-r--r--drivers/net/ethernet/marvell/sky2.c5
-rw-r--r--drivers/net/ethernet/mediatek/airoha_eth.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c32
-rw-r--r--drivers/net/ethernet/realtek/8139too.c4
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c22
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c36
-rw-r--r--drivers/net/ethernet/sun/niu.c10
-rw-r--r--drivers/net/ethernet/via/via-rhine.c11
-rw-r--r--drivers/net/netdevsim/ethtool.c2
-rw-r--r--drivers/net/netdevsim/hwstats.c29
-rw-r--r--drivers/net/netdevsim/netdevsim.h1
-rw-r--r--drivers/net/netdevsim/udp_tunnels.c23
-rw-r--r--drivers/net/phy/marvell-88q2xxx.c33
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c2
-rw-r--r--drivers/net/tun.c14
-rw-r--r--drivers/net/usb/ipheth.c69
-rw-r--r--drivers/net/usb/rtl8150.c22
-rw-r--r--drivers/net/vmxnet3/vmxnet3_xdp.c14
-rw-r--r--drivers/net/vxlan/vxlan_vnifilter.c5
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.c28
-rw-r--r--drivers/net/wireless/broadcom/b43/debugfs.c27
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/debugfs.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c12
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c2
-rw-r--r--drivers/nvdimm/bus.c2
-rw-r--r--drivers/nvdimm/claim.c9
-rw-r--r--drivers/nvme/host/core.c25
-rw-r--r--drivers/nvme/host/fc.c35
-rw-r--r--drivers/nvme/host/multipath.c2
-rw-r--r--drivers/nvme/host/pci.c12
-rw-r--r--drivers/nvme/host/sysfs.c2
-rw-r--r--drivers/nvme/target/admin-cmd.c1
-rw-r--r--drivers/nvme/target/fabrics-cmd.c2
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c2
-rw-r--r--drivers/nvme/target/nvmet.h2
-rw-r--r--drivers/nvmem/core.c37
-rw-r--r--drivers/nvmem/imx-ocotp-ele.c38
-rw-r--r--drivers/nvmem/qcom-spmi-sdam.c1
-rw-r--r--drivers/nvmem/rmem.c95
-rw-r--r--drivers/of/address.c35
-rw-r--r--drivers/of/base.c25
-rw-r--r--drivers/of/fdt.c41
-rw-r--r--drivers/of/fdt_address.c21
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/of/kobj.c4
-rw-r--r--drivers/of/of_private.h20
-rw-r--r--drivers/of/of_reserved_mem.c15
-rw-r--r--drivers/of/pdt.c2
-rw-r--r--drivers/of/platform.c23
-rw-r--r--drivers/of/property.c35
-rw-r--r--drivers/of/unittest-data/tests-platform.dtsi5
-rw-r--r--drivers/of/unittest.c17
-rw-r--r--drivers/opp/debugfs.c10
-rw-r--r--drivers/parport/parport_serial.c12
-rw-r--r--drivers/pci/ats.c2
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c27
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c449
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c13
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c52
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c56
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h19
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c69
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c7
-rw-r--r--drivers/pci/controller/pci-host-common.c2
-rw-r--r--drivers/pci/controller/pci-mvebu.c1
-rw-r--r--drivers/pci/controller/pcie-apple.c75
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c117
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c2
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c5
-rw-r--r--drivers/pci/controller/pcie-rockchip.c219
-rw-r--r--drivers/pci/controller/pcie-rockchip.h35
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c50
-rw-r--r--drivers/pci/controller/plda/pcie-microchip-host.c96
-rw-r--r--drivers/pci/controller/plda/pcie-plda-host.c17
-rw-r--r--drivers/pci/controller/plda/pcie-plda.h6
-rw-r--r--drivers/pci/devres.c74
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c25
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c37
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c1
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c6
-rw-r--r--drivers/pci/iov.c8
-rw-r--r--drivers/pci/of.c22
-rw-r--r--drivers/pci/of_property.c4
-rw-r--r--drivers/pci/p2pdma.c6
-rw-r--r--drivers/pci/pci-sysfs.c150
-rw-r--r--drivers/pci/pci.c275
-rw-r--r--drivers/pci/pci.h23
-rw-r--r--drivers/pci/pcie/Makefile2
-rw-r--r--drivers/pci/pcie/aer.c15
-rw-r--r--drivers/pci/pcie/aspm.c30
-rw-r--r--drivers/pci/pcie/dpc.c22
-rw-r--r--drivers/pci/pcie/tlp.c115
-rw-r--r--drivers/pci/probe.c107
-rw-r--r--drivers/pci/quirks.c18
-rw-r--r--drivers/pci/switch/switchtec.c26
-rw-r--r--drivers/pci/tph.c2
-rw-r--r--drivers/pci/vpd.c14
-rw-r--r--drivers/perf/arm_pmuv3.c2
-rw-r--r--drivers/perf/riscv_pmu_sbi.c2
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c1
-rw-r--r--drivers/phy/freescale/phy-fsl-samsung-hdmi.c49
-rw-r--r--drivers/phy/hisilicon/phy-hi3670-pcie.c11
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c2
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c44
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi-mt8195.h3
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi.c28
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi.h4
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c40
-rw-r--r--drivers/phy/phy-core.c23
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c100
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c278
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4_20.h5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6.h3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c55
-rw-r--r--drivers/phy/rockchip/phy-rockchip-naneng-combphy.c279
-rw-r--r--drivers/phy/rockchip/phy-rockchip-pcie.c148
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c2
-rw-r--r--drivers/phy/samsung/Kconfig1
-rw-r--r--drivers/phy/samsung/phy-samsung-ufs.c6
-rw-r--r--drivers/phy/tegra/Kconfig5
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c3
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-lochnagar.c3
-rw-r--r--drivers/pinctrl/core.c50
-rw-r--r--drivers/pinctrl/mediatek/Kconfig7
-rw-r--r--drivers/pinctrl/mediatek/Makefile1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7988.c1556
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c57
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h1
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c5
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c42
-rw-r--r--drivers/pinctrl/pinconf-generic.c8
-rw-r--r--drivers/pinctrl/pinctrl-amd.c30
-rw-r--r--drivers/pinctrl/pinctrl-amd.h7
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c42
-rw-r--r--drivers/pinctrl/pinctrl-gemini.c11
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c2
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c20
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c200
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.h3
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c2
-rw-r--r--drivers/pinctrl/qcom/Kconfig.msm6
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5424.c34
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8917.c1620
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c10
-rw-r--r--drivers/pinctrl/renesas/Kconfig1
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c190
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c4
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c81
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c12
-rw-r--r--drivers/platform/chrome/Kconfig7
-rw-r--r--drivers/platform/chrome/Makefile4
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c46
-rw-r--r--drivers/platform/chrome/cros_ec_typec.h1
-rw-r--r--drivers/platform/chrome/cros_typec_altmode.c373
-rw-r--r--drivers/platform/chrome/cros_typec_altmode.h51
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.c20
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c113
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c2
-rw-r--r--drivers/platform/mellanox/mlxreg-io.c2
-rw-r--r--drivers/platform/surface/surface_platform_profile.c44
-rw-r--r--drivers/platform/x86/acer-wmi.c550
-rw-r--r--drivers/platform/x86/amd/hsmp/acpi.c12
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.c47
-rw-r--r--drivers/platform/x86/amd/hsmp/plat.c12
-rw-r--r--drivers/platform/x86/amd/pmc/Makefile2
-rw-r--r--drivers/platform/x86/amd/pmc/mp1_stb.c332
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c391
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.h24
-rw-r--r--drivers/platform/x86/amd/pmf/Makefile2
-rw-r--r--drivers/platform/x86/amd/pmf/acpi.c30
-rw-r--r--drivers/platform/x86/amd/pmf/core.c20
-rw-r--r--drivers/platform/x86/amd/pmf/pmf-quirks.c66
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h39
-rw-r--r--drivers/platform/x86/amd/pmf/spc.c75
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c49
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c3
-rw-r--r--drivers/platform/x86/asus-wmi.c55
-rw-r--r--drivers/platform/x86/asus-wmi.h3
-rw-r--r--drivers/platform/x86/dell/Kconfig1
-rw-r--r--drivers/platform/x86/dell/Makefile1
-rw-r--r--drivers/platform/x86/dell/alienware-wmi.c522
-rw-r--r--drivers/platform/x86/dell/dcdbas.c10
-rw-r--r--drivers/platform/x86/dell/dcdbas.h8
-rw-r--r--drivers/platform/x86/dell/dell-laptop.c60
-rw-r--r--drivers/platform/x86/dell/dell-lis3lv02d.c256
-rw-r--r--drivers/platform/x86/dell/dell-pc.c69
-rw-r--r--drivers/platform/x86/dell/dell-smo8800-ids.h27
-rw-r--r--drivers/platform/x86/dell/dell-smo8800.c16
-rw-r--r--drivers/platform/x86/dell/dell-uart-backlight.c2
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/sysman.c17
-rw-r--r--drivers/platform/x86/dell/dell_rbu.c20
-rw-r--r--drivers/platform/x86/firmware_attributes_class.c42
-rw-r--r--drivers/platform/x86/firmware_attributes_class.h5
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c8
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/bioscfg.c14
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c449
-rw-r--r--drivers/platform/x86/hp/hp_accel.c4
-rw-r--r--drivers/platform/x86/ideapad-laptop.c43
-rw-r--r--drivers/platform/x86/inspur_platform_profile.c43
-rw-r--r--drivers/platform/x86/intel/Kconfig1
-rw-r--r--drivers/platform/x86/intel/bytcrc_pwrsrc.c79
-rw-r--r--drivers/platform/x86/intel/ifs/ifs.h9
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c14
-rw-r--r--drivers/platform/x86/intel/int3472/common.c2
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c24
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470.c3
-rw-r--r--drivers/platform/x86/intel/plr_tpmi.c2
-rw-r--r--drivers/platform/x86/intel/pmc/core.c4
-rw-r--r--drivers/platform/x86/intel/pmt/class.c4
-rw-r--r--drivers/platform/x86/intel/punit_ipc.c33
-rw-r--r--drivers/platform/x86/intel/sdsi.c34
-rw-r--r--drivers/platform/x86/lenovo-wmi-camera.c69
-rw-r--r--drivers/platform/x86/msi-laptop.c6
-rw-r--r--drivers/platform/x86/panasonic-laptop.c4
-rw-r--r--drivers/platform/x86/quickstart.c1
-rw-r--r--drivers/platform/x86/serdev_helpers.h60
-rw-r--r--drivers/platform/x86/serial-multi-instantiate.c12
-rw-r--r--drivers/platform/x86/think-lmi.c13
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c46
-rw-r--r--drivers/platform/x86/toshiba_acpi.c4
-rw-r--r--drivers/platform/x86/wmi-bmof.c75
-rw-r--r--drivers/platform/x86/x86-android-tablets/Makefile2
-rw-r--r--drivers/platform/x86/x86-android-tablets/asus.c4
-rw-r--r--drivers/platform/x86/x86-android-tablets/core.c31
-rw-r--r--drivers/platform/x86/x86-android-tablets/lenovo.c8
-rw-r--r--drivers/platform/x86/x86-android-tablets/other.c16
-rw-r--r--drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c261
-rw-r--r--drivers/platform/x86/x86-android-tablets/x86-android-tablets.h13
-rw-r--r--drivers/pmdomain/arm/scmi_pm_domain.c8
-rw-r--r--drivers/pmdomain/core.c15
-rw-r--r--drivers/pmdomain/imx/gpcv2.c2
-rw-r--r--drivers/pmdomain/imx/imx8m-blk-ctrl.c1
-rw-r--r--drivers/pmdomain/imx/imx8mp-blk-ctrl.c1
-rw-r--r--drivers/pmdomain/mediatek/Kconfig12
-rw-r--r--drivers/pmdomain/mediatek/Makefile8
-rw-r--r--drivers/pmdomain/mediatek/airoha-cpu-pmdomain.c144
-rw-r--r--drivers/pmdomain/ti/ti_sci_pm_domains.c81
-rw-r--r--drivers/power/reset/Kconfig4
-rw-r--r--drivers/power/reset/as3722-poweroff.c2
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c1
-rw-r--r--drivers/power/reset/gpio-poweroff.c8
-rw-r--r--drivers/power/reset/keystone-reset.c18
-rw-r--r--drivers/power/supply/88pm860x_battery.c4
-rw-r--r--drivers/power/supply/Kconfig9
-rw-r--r--drivers/power/supply/Makefile1
-rw-r--r--drivers/power/supply/ab8500_btemp.c5
-rw-r--r--drivers/power/supply/ab8500_chargalg.c5
-rw-r--r--drivers/power/supply/ab8500_charger.c5
-rw-r--r--drivers/power/supply/ab8500_fg.c33
-rw-r--r--drivers/power/supply/apm_power.c6
-rw-r--r--drivers/power/supply/bq2415x_charger.c36
-rw-r--r--drivers/power/supply/bq24190_charger.c29
-rw-r--r--drivers/power/supply/bq24257_charger.c8
-rw-r--r--drivers/power/supply/bq27xxx_battery.c39
-rw-r--r--drivers/power/supply/charger-manager.c3
-rw-r--r--drivers/power/supply/cpcap-charger.c3
-rw-r--r--drivers/power/supply/cros_charge-control.c200
-rw-r--r--drivers/power/supply/da9030_battery.c3
-rw-r--r--drivers/power/supply/ds2760_battery.c8
-rw-r--r--drivers/power/supply/ds2780_battery.c24
-rw-r--r--drivers/power/supply/ds2781_battery.c24
-rw-r--r--drivers/power/supply/ds2782_battery.c89
-rw-r--r--drivers/power/supply/gpio-charger.c13
-rw-r--r--drivers/power/supply/ip5xxx_power.c572
-rw-r--r--drivers/power/supply/ltc4162-l-charger.c440
-rw-r--r--drivers/power/supply/max17042_battery.c203
-rw-r--r--drivers/power/supply/max1720x_battery.c66
-rw-r--r--drivers/power/supply/mm8013.c2
-rw-r--r--drivers/power/supply/olpc_battery.c11
-rw-r--r--drivers/power/supply/power_supply.h31
-rw-r--r--drivers/power/supply/power_supply_core.c266
-rw-r--r--drivers/power/supply/power_supply_hwmon.c50
-rw-r--r--drivers/power/supply/power_supply_sysfs.c192
-rw-r--r--drivers/power/supply/sbs-battery.c5
-rw-r--r--drivers/power/supply/stc3117_fuel_gauge.c612
-rw-r--r--drivers/power/supply/surface_battery.c4
-rw-r--r--drivers/power/supply/test_power.c113
-rw-r--r--drivers/power/supply/ug3105_battery.c4
-rw-r--r--drivers/powercap/powercap_sys.c3
-rw-r--r--drivers/pps/Makefile3
-rw-r--r--drivers/pps/clients/pps-gpio.c10
-rw-r--r--drivers/pps/clients/pps-ktimer.c4
-rw-r--r--drivers/pps/clients/pps-ldisc.c6
-rw-r--r--drivers/pps/clients/pps_parport.c4
-rw-r--r--drivers/pps/generators/Kconfig22
-rw-r--r--drivers/pps/generators/Makefile4
-rw-r--r--drivers/pps/generators/pps_gen-dummy.c96
-rw-r--r--drivers/pps/generators/pps_gen.c344
-rw-r--r--drivers/pps/generators/sysfs.c75
-rw-r--r--drivers/pps/kapi.c10
-rw-r--r--drivers/pps/kc.c10
-rw-r--r--drivers/pps/pps.c127
-rw-r--r--drivers/ptp/ptp_chardev.c4
-rw-r--r--drivers/ptp/ptp_clock.c8
-rw-r--r--drivers/ptp/ptp_ocp.c2
-rw-r--r--drivers/pwm/core.c15
-rw-r--r--drivers/pwm/pwm-microchip-core.c2
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/tps6287x-regulator.c57
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c17
-rw-r--r--drivers/remoteproc/mtk_scp.c12
-rw-r--r--drivers/remoteproc/omap_remoteproc.c24
-rw-r--r--drivers/remoteproc/remoteproc_core.c14
-rw-r--r--drivers/remoteproc/st_remoteproc.c54
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c88
-rw-r--r--drivers/reset/amlogic/reset-meson-aux.c97
-rw-r--r--drivers/rpmsg/rpmsg_core.c4
-rw-r--r--drivers/rtc/Kconfig2
-rw-r--r--drivers/rtc/rtc-88pm80x.c2
-rw-r--r--drivers/rtc/rtc-88pm860x.c2
-rw-r--r--drivers/rtc/rtc-amlogic-a4.c6
-rw-r--r--drivers/rtc/rtc-armada38x.c2
-rw-r--r--drivers/rtc/rtc-as3722.c2
-rw-r--r--drivers/rtc/rtc-at91rm9200.c2
-rw-r--r--drivers/rtc/rtc-at91sam9.c2
-rw-r--r--drivers/rtc/rtc-cadence.c2
-rw-r--r--drivers/rtc/rtc-cmos.c7
-rw-r--r--drivers/rtc/rtc-cpcap.c2
-rw-r--r--drivers/rtc/rtc-cros-ec.c2
-rw-r--r--drivers/rtc/rtc-da9055.c2
-rw-r--r--drivers/rtc/rtc-ds3232.c2
-rw-r--r--drivers/rtc/rtc-isl1208.c2
-rw-r--r--drivers/rtc/rtc-jz4740.c2
-rw-r--r--drivers/rtc/rtc-loongson.c17
-rw-r--r--drivers/rtc/rtc-lp8788.c2
-rw-r--r--drivers/rtc/rtc-lpc32xx.c2
-rw-r--r--drivers/rtc/rtc-max77686.c2
-rw-r--r--drivers/rtc/rtc-max8925.c2
-rw-r--r--drivers/rtc/rtc-max8997.c2
-rw-r--r--drivers/rtc/rtc-meson-vrtc.c2
-rw-r--r--drivers/rtc/rtc-mpc5121.c2
-rw-r--r--drivers/rtc/rtc-mt6397.c2
-rw-r--r--drivers/rtc/rtc-mv.c4
-rw-r--r--drivers/rtc/rtc-mxc.c2
-rw-r--r--drivers/rtc/rtc-mxc_v2.c2
-rw-r--r--drivers/rtc/rtc-omap.c2
-rw-r--r--drivers/rtc/rtc-palmas.c2
-rw-r--r--drivers/rtc/rtc-pcf2127.c82
-rw-r--r--drivers/rtc/rtc-pcf85063.c11
-rw-r--r--drivers/rtc/rtc-pic32.c2
-rw-r--r--drivers/rtc/rtc-pm8xxx.c2
-rw-r--r--drivers/rtc/rtc-pxa.c2
-rw-r--r--drivers/rtc/rtc-rc5t583.c2
-rw-r--r--drivers/rtc/rtc-rc5t619.c2
-rw-r--r--drivers/rtc/rtc-renesas-rtca3.c2
-rw-r--r--drivers/rtc/rtc-rk808.c2
-rw-r--r--drivers/rtc/rtc-s3c.c2
-rw-r--r--drivers/rtc/rtc-s5m.c2
-rw-r--r--drivers/rtc/rtc-sa1100.c2
-rw-r--r--drivers/rtc/rtc-sc27xx.c4
-rw-r--r--drivers/rtc/rtc-sh.c2
-rw-r--r--drivers/rtc/rtc-spear.c4
-rw-r--r--drivers/rtc/rtc-stm32.c22
-rw-r--r--drivers/rtc/rtc-sun6i.c2
-rw-r--r--drivers/rtc/rtc-sunplus.c4
-rw-r--r--drivers/rtc/rtc-tegra.c2
-rw-r--r--drivers/rtc/rtc-test.c2
-rw-r--r--drivers/rtc/rtc-tps6586x.c2
-rw-r--r--drivers/rtc/rtc-tps65910.c2
-rw-r--r--drivers/rtc/rtc-tps6594.c2
-rw-r--r--drivers/rtc/rtc-twl.c2
-rw-r--r--drivers/rtc/rtc-wm831x.c2
-rw-r--r--drivers/rtc/rtc-wm8350.c2
-rw-r--r--drivers/rtc/rtc-xgene.c4
-rw-r--r--drivers/rtc/rtc-zynqmp.c8
-rw-r--r--drivers/s390/char/sclp.c12
-rw-r--r--drivers/s390/char/vmlogrdr.c8
-rw-r--r--drivers/s390/cio/chp.c28
-rw-r--r--drivers/s390/scsi/zfcp_fc.c7
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c4
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c15
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c2
-rw-r--r--drivers/s390/scsi/zfcp_unit.c2
-rw-r--r--drivers/scsi/3w-9xxx.c9
-rw-r--r--drivers/scsi/3w-sas.c21
-rw-r--r--drivers/scsi/3w-xxxx.c10
-rw-r--r--drivers/scsi/53c700.c19
-rw-r--r--drivers/scsi/BusLogic.c9
-rw-r--r--drivers/scsi/BusLogic.h3
-rw-r--r--drivers/scsi/a100u2w.c2
-rw-r--r--drivers/scsi/aacraid/linit.c8
-rw-r--r--drivers/scsi/advansys.c25
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c8
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c8
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_gram.y1
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y1
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_scan.l3
-rw-r--r--drivers/scsi/am53c974.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c12
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c12
-rw-r--r--drivers/scsi/atp870u.c2
-rw-r--r--drivers/scsi/bfa/bfad_im.c26
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c14
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c7
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c20
-rw-r--r--drivers/scsi/cxlflash/main.c2
-rw-r--r--drivers/scsi/cxlflash/superpipe.c2
-rw-r--r--drivers/scsi/dc395x.c14
-rw-r--r--drivers/scsi/dmx3191d.c2
-rw-r--r--drivers/scsi/elx/efct/efct_driver.c2
-rw-r--r--drivers/scsi/esas2r/esas2r.h12
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c32
-rw-r--r--drivers/scsi/esp_scsi.c14
-rw-r--r--drivers/scsi/esp_scsi.h2
-rw-r--r--drivers/scsi/fcoe/fcoe.c2
-rw-r--r--drivers/scsi/fdomain_pci.c2
-rw-r--r--drivers/scsi/fnic/Makefile5
-rw-r--r--drivers/scsi/fnic/fdls_disc.c4997
-rw-r--r--drivers/scsi/fnic/fdls_fc.h253
-rw-r--r--drivers/scsi/fnic/fip.c1005
-rw-r--r--drivers/scsi/fnic/fip.h159
-rw-r--r--drivers/scsi/fnic/fnic.h288
-rw-r--r--drivers/scsi/fnic/fnic_attrs.c12
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c11
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c1742
-rw-r--r--drivers/scsi/fnic/fnic_fdls.h434
-rw-r--r--drivers/scsi/fnic/fnic_fip.h48
-rw-r--r--drivers/scsi/fnic/fnic_io.h14
-rw-r--r--drivers/scsi/fnic/fnic_isr.c28
-rw-r--r--drivers/scsi/fnic/fnic_main.c758
-rw-r--r--drivers/scsi/fnic/fnic_pci_subsys_devid.c131
-rw-r--r--drivers/scsi/fnic/fnic_res.c77
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c1161
-rw-r--r--drivers/scsi/fnic/fnic_stats.h49
-rw-r--r--drivers/scsi/fnic/fnic_trace.c97
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h5
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c13
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c10
-rw-r--r--drivers/scsi/hpsa.c20
-rw-r--r--drivers/scsi/hptiop.c8
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c20
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c8
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/ipr.c48
-rw-r--r--drivers/scsi/ips.c6
-rw-r--r--drivers/scsi/ips.h3
-rw-r--r--drivers/scsi/isci/remote_device.c29
-rw-r--r--drivers/scsi/isci/remote_device.h17
-rw-r--r--drivers/scsi/iscsi_tcp.c6
-rw-r--r--drivers/scsi/libfc/fc_fcp.c6
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c210
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c55
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h85
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c29
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c61
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c64
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vmid.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c22
-rw-r--r--drivers/scsi/megaraid.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c16
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c8
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c20
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c5
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c24
-rw-r--r--drivers/scsi/mvsas/mv_init.c2
-rw-r--r--drivers/scsi/mvumi.c5
-rw-r--r--drivers/scsi/myrb.c23
-rw-r--r--drivers/scsi/myrs.c13
-rw-r--r--drivers/scsi/ncr53c8xx.c9
-rw-r--r--drivers/scsi/nsp32.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_defs.h2
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c5
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c78
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h2
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c59
-rw-r--r--drivers/scsi/pmcraid.c24
-rw-r--r--drivers/scsi/ps3rom.c5
-rw-r--r--drivers/scsi/qedf/qedf_attr.c10
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h2
-rw-r--r--drivers/scsi/qedf/qedf_main.c5
-rw-r--r--drivers/scsi/qedi/qedi_dbg.h2
-rw-r--r--drivers/scsi/qedi/qedi_main.c8
-rw-r--r--drivers/scsi/qla1280.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c80
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c124
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c28
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c14
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c12
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c11
-rw-r--r--drivers/scsi/qlogicpti.c5
-rw-r--r--drivers/scsi/scsi_debug.c32
-rw-r--r--drivers/scsi/scsi_error.c26
-rw-r--r--drivers/scsi/scsi_ioctl.c35
-rw-r--r--drivers/scsi/scsi_lib.c35
-rw-r--r--drivers/scsi/scsi_lib_test.c7
-rw-r--r--drivers/scsi/scsi_scan.c49
-rw-r--r--drivers/scsi/scsi_sysctl.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c20
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c37
-rw-r--r--drivers/scsi/scsi_transport_sas.c10
-rw-r--r--drivers/scsi/scsi_transport_spi.c3
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c13
-rw-r--r--drivers/scsi/snic/snic_main.c14
-rw-r--r--drivers/scsi/st.c6
-rw-r--r--drivers/scsi/st.h1
-rw-r--r--drivers/scsi/stex.c6
-rw-r--r--drivers/scsi/storvsc_drv.c29
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c17
-rw-r--r--drivers/scsi/virtio_scsi.c2
-rw-r--r--drivers/scsi/xen-scsifront.c11
-rw-r--r--drivers/slimbus/core.c17
-rw-r--r--drivers/slimbus/messaging.c2
-rw-r--r--drivers/soc/atmel/soc.c2
-rw-r--r--drivers/soc/imx/Makefile2
-rw-r--r--drivers/soc/imx/soc-imx9.c128
-rw-r--r--drivers/soc/litex/litex_soc_ctrl.c23
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c18
-rw-r--r--drivers/soc/mediatek/mtk-devapc.c19
-rw-r--r--drivers/soc/qcom/Kconfig2
-rw-r--r--drivers/soc/qcom/llcc-qcom.c58
-rw-r--r--drivers/soc/qcom/pmic_glink.c70
-rw-r--r--drivers/soc/qcom/pmic_glink_altmode.c11
-rw-r--r--drivers/soc/qcom/qcom_pd_mapper.c2
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c2
-rw-r--r--drivers/soc/qcom/smem_state.c3
-rw-r--r--drivers/soc/qcom/smp2p.c2
-rw-r--r--drivers/soc/qcom/socinfo.c3
-rw-r--r--drivers/soc/renesas/Kconfig5
-rw-r--r--drivers/soc/samsung/exynos-pmu.c2
-rw-r--r--drivers/soc/tegra/cbb/tegra-cbb.c20
-rw-r--r--drivers/soc/tegra/cbb/tegra234-cbb.c2
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c17
-rw-r--r--drivers/soundwire/amd_manager.c5
-rw-r--r--drivers/soundwire/bus.c65
-rw-r--r--drivers/soundwire/bus.h3
-rw-r--r--drivers/soundwire/bus_type.c3
-rw-r--r--drivers/soundwire/generic_bandwidth_allocation.c316
-rw-r--r--drivers/soundwire/irq.c12
-rw-r--r--drivers/soundwire/irq.h5
-rw-r--r--drivers/soundwire/mipi_disco.c40
-rw-r--r--drivers/soundwire/qcom.c2
-rw-r--r--drivers/soundwire/stream.c71
-rw-r--r--drivers/spi/spi-omap2-mcspi.c11
-rw-r--r--drivers/spmi/hisi-spmi-controller.c3
-rw-r--r--drivers/spmi/spmi.c2
-rw-r--r--drivers/staging/fbtft/fb_ssd1351.c3
-rw-r--r--drivers/staging/gpib/agilent_82350b/agilent_82350b.c138
-rw-r--r--drivers/staging/gpib/agilent_82350b/agilent_82350b.h12
-rw-r--r--drivers/staging/gpib/agilent_82357a/agilent_82357a.c161
-rw-r--r--drivers/staging/gpib/cb7210/cb7210.c588
-rw-r--r--drivers/staging/gpib/cb7210/cb7210.h5
-rw-r--r--drivers/staging/gpib/cec/cec.h4
-rw-r--r--drivers/staging/gpib/cec/cec_gpib.c60
-rw-r--r--drivers/staging/gpib/common/gpib_os.c126
-rw-r--r--drivers/staging/gpib/common/iblib.c2
-rw-r--r--drivers/staging/gpib/eastwood/fluke_gpib.c182
-rw-r--r--drivers/staging/gpib/eastwood/fluke_gpib.h2
-rw-r--r--drivers/staging/gpib/fmh_gpib/fmh_gpib.c255
-rw-r--r--drivers/staging/gpib/fmh_gpib/fmh_gpib.h2
-rw-r--r--drivers/staging/gpib/gpio/gpib_bitbang.c65
-rw-r--r--drivers/staging/gpib/hp_82335/hp82335.c60
-rw-r--r--drivers/staging/gpib/hp_82335/hp82335.h3
-rw-r--r--drivers/staging/gpib/hp_82341/hp_82341.c119
-rw-r--r--drivers/staging/gpib/hp_82341/hp_82341.h2
-rw-r--r--drivers/staging/gpib/include/amcc5920.h2
-rw-r--r--drivers/staging/gpib/include/gpibP.h2
-rw-r--r--drivers/staging/gpib/ines/ines.h7
-rw-r--r--drivers/staging/gpib/ines/ines_gpib.c524
-rw-r--r--drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c66
-rw-r--r--drivers/staging/gpib/ni_usb/ni_usb_gpib.c89
-rw-r--r--drivers/staging/gpib/ni_usb/ni_usb_gpib.h2
-rw-r--r--drivers/staging/gpib/pc2/pc2_gpib.c248
-rw-r--r--drivers/staging/gpib/tnt4882/mite.c2
-rw-r--r--drivers/staging/gpib/tnt4882/mite.h2
-rw-r--r--drivers/staging/gpib/tnt4882/tnt4882_gpib.c634
-rw-r--r--drivers/staging/greybus/camera.c17
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm_bo.c4
-rw-r--r--drivers/staging/media/imx/imx-media-of.c8
-rw-r--r--drivers/staging/media/max96712/max96712.c4
-rw-r--r--drivers/staging/rtl8723bs/Makefile1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_io.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c6
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c55
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c4
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com.h5
-rw-r--r--drivers/staging/rtl8723bs/include/hal_intf.h1
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_intf.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_recv.h18
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c1286
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c1
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c2
-rw-r--r--drivers/staging/vme_user/vme_tsi148.c3
-rw-r--r--drivers/target/iscsi/Kconfig4
-rw-r--r--drivers/target/iscsi/iscsi_target.c168
-rw-r--r--drivers/target/iscsi/iscsi_target.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c48
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c50
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c21
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c48
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c5
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c58
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h2
-rw-r--r--drivers/target/target_core_stat.c4
-rw-r--r--drivers/tee/optee/smc_abi.c5
-rw-r--r--drivers/thunderbolt/ctl.c11
-rw-r--r--drivers/thunderbolt/ctl.h1
-rw-r--r--drivers/thunderbolt/debugfs.c67
-rw-r--r--drivers/thunderbolt/eeprom.c78
-rw-r--r--drivers/thunderbolt/path.c4
-rw-r--r--drivers/thunderbolt/retimer.c2
-rw-r--r--drivers/thunderbolt/tb.c196
-rw-r--r--drivers/thunderbolt/tb.h5
-rw-r--r--drivers/thunderbolt/test.c90
-rw-r--r--drivers/thunderbolt/tunnel.c406
-rw-r--r--drivers/thunderbolt/tunnel.h61
-rw-r--r--drivers/thunderbolt/xdomain.c2
-rw-r--r--drivers/tty/mips_ejtag_fdc.c4
-rw-r--r--drivers/tty/n_gsm.c39
-rw-r--r--drivers/tty/pty.c2
-rw-r--r--drivers/tty/serial/8250/8250.h4
-rw-r--r--drivers/tty/serial/8250/8250_bcm2835aux.c4
-rw-r--r--drivers/tty/serial/8250/8250_core.c1
-rw-r--r--drivers/tty/serial/8250/8250_omap.c11
-rw-r--r--drivers/tty/serial/8250/8250_pci.c76
-rw-r--r--drivers/tty/serial/8250/8250_pci1xxxx.c60
-rw-r--r--drivers/tty/serial/8250/8250_port.c97
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/altera_jtaguart.c10
-rw-r--r--drivers/tty/serial/altera_uart.c7
-rw-r--r--drivers/tty/serial/amba-pl011.c126
-rw-r--r--drivers/tty/serial/atmel_serial.c18
-rw-r--r--drivers/tty/serial/fsl_lpuart.c7
-rw-r--r--drivers/tty/serial/kgdb_nmi.c101
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c2
-rw-r--r--drivers/tty/serial/sc16is7xx.c37
-rw-r--r--drivers/tty/serial/serial_core.c265
-rw-r--r--drivers/tty/serial/sh-sci.c95
-rw-r--r--drivers/tty/serial/xilinx_uartps.c8
-rw-r--r--drivers/tty/tty_io.c5
-rw-r--r--drivers/tty/vt/selection.c14
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/ufs/core/ufs-sysfs.c7
-rw-r--r--drivers/ufs/core/ufs_bsg.c2
-rw-r--r--drivers/ufs/core/ufshcd-crypto.c26
-rw-r--r--drivers/ufs/core/ufshcd.c380
-rw-r--r--drivers/ufs/host/ufs-qcom.c93
-rw-r--r--drivers/ufs/host/ufshcd-pci.c2
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c28
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/uio/uio_dmem_genirq.c2
-rw-r--r--drivers/uio/uio_hv_generic.c86
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c13
-rw-r--r--drivers/usb/cdns3/core.c4
-rw-r--r--drivers/usb/chipidea/host.c13
-rw-r--r--drivers/usb/class/usblp.c2
-rw-r--r--drivers/usb/common/common.c14
-rw-r--r--drivers/usb/common/usb-conn-gpio.c3
-rw-r--r--drivers/usb/core/config.c19
-rw-r--r--drivers/usb/core/driver.c7
-rw-r--r--drivers/usb/core/generic.c12
-rw-r--r--drivers/usb/core/hcd-pci.c15
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/core/hub.c10
-rw-r--r--drivers/usb/core/port.c3
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/core/sysfs.c12
-rw-r--r--drivers/usb/dwc3/core.c55
-rw-r--r--drivers/usb/dwc3/core.h5
-rw-r--r--drivers/usb/dwc3/dwc3-am62.c83
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c13
-rw-r--r--drivers/usb/dwc3/dwc3-st.c6
-rw-r--r--drivers/usb/dwc3/gadget.c111
-rw-r--r--drivers/usb/fotg210/fotg210-core.c5
-rw-r--r--drivers/usb/gadget/function/f_ecm.c4
-rw-r--r--drivers/usb/gadget/function/f_ncm.c3
-rw-r--r--drivers/usb/gadget/function/f_tcm.c719
-rw-r--r--drivers/usb/gadget/function/storage_common.h2
-rw-r--r--drivers/usb/gadget/function/tcm.h28
-rw-r--r--drivers/usb/gadget/function/u_serial.c11
-rw-r--r--drivers/usb/gadget/legacy/inode.c3
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/hub.c3
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c3
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-gadget.c13
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c3
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c3
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c3
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c3
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c3
-rw-r--r--drivers/usb/host/sl811-hcd.c3
-rw-r--r--drivers/usb/host/xhci-caps.h6
-rw-r--r--drivers/usb/host/xhci-dbgcap.c2
-rw-r--r--drivers/usb/host/xhci-dbgtty.c98
-rw-r--r--drivers/usb/host/xhci-debugfs.c25
-rw-r--r--drivers/usb/host/xhci-plat.c3
-rw-r--r--drivers/usb/host/xhci-ring.c18
-rw-r--r--drivers/usb/host/xhci-tegra.c7
-rw-r--r--drivers/usb/host/xhci.c3
-rw-r--r--drivers/usb/host/xhci.h4
-rw-r--r--drivers/usb/image/microtek.c4
-rw-r--r--drivers/usb/mtu3/mtu3_debugfs.c43
-rw-r--r--drivers/usb/mtu3/mtu3_dr.c3
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c3
-rw-r--r--drivers/usb/musb/da8xx.c3
-rw-r--r--drivers/usb/musb/musb_core.c3
-rw-r--r--drivers/usb/musb/musb_dsps.c3
-rw-r--r--drivers/usb/musb/musb_gadget.c3
-rw-r--r--drivers/usb/musb/musb_host.c3
-rw-r--r--drivers/usb/phy/phy-fsl-usb.c3
-rw-r--r--drivers/usb/phy/phy-mv-usb.c3
-rw-r--r--drivers/usb/phy/phy-tahvo.c3
-rw-r--r--drivers/usb/phy/phy.c26
-rw-r--r--drivers/usb/serial/ch341.c35
-rw-r--r--drivers/usb/serial/quatech2.c2
-rw-r--r--drivers/usb/storage/Kconfig3
-rw-r--r--drivers/usb/storage/scsiglue.c10
-rw-r--r--drivers/usb/storage/shuttle_usbat.c4
-rw-r--r--drivers/usb/storage/transport.c8
-rw-r--r--drivers/usb/storage/uas.c10
-rw-r--r--drivers/usb/typec/altmodes/Kconfig9
-rw-r--r--drivers/usb/typec/altmodes/Makefile2
-rw-r--r--drivers/usb/typec/altmodes/displayport.c4
-rw-r--r--drivers/usb/typec/altmodes/nvidia.c2
-rw-r--r--drivers/usb/typec/altmodes/thunderbolt.c388
-rw-r--r--drivers/usb/typec/bus.c6
-rw-r--r--drivers/usb/typec/class.c47
-rw-r--r--drivers/usb/typec/hd3ss3220.c207
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c2
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c24
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c3
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c3
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c4
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c17
-rw-r--r--drivers/usb/typec/tcpm/tcpci_mt6370.c1
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c104
-rw-r--r--drivers/usb/typec/ucsi/Kconfig13
-rw-r--r--drivers/usb/typec/ucsi/Makefile1
-rw-r--r--drivers/usb/typec/ucsi/cros_ec_ucsi.c333
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h1
-rw-r--r--drivers/usb/typec/ucsi/ucsi_yoga_c630.c2
-rw-r--r--drivers/usb/usbip/stub_rx.c2
-rw-r--r--drivers/usb/usbip/stub_tx.c2
-rw-r--r--drivers/usb/usbip/vhci_hcd.c13
-rw-r--r--drivers/usb/usbip/vhci_rx.c6
-rw-r--r--drivers/usb/usbip/vudc_sysfs.c8
-rw-r--r--drivers/usb/usbip/vudc_tx.c2
-rw-r--r--drivers/vdpa/octeon_ep/octep_vdpa.h32
-rw-r--r--drivers/vdpa/octeon_ep/octep_vdpa_hw.c38
-rw-r--r--drivers/vdpa/octeon_ep/octep_vdpa_main.c99
-rw-r--r--drivers/vdpa/solidrun/snet_main.c57
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c9
-rw-r--r--drivers/vfio/mdev/mdev_core.c4
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c14
-rw-r--r--drivers/vfio/pci/nvgrace-gpu/main.c169
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c13
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c40
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c38
-rw-r--r--drivers/vfio/pci/virtio/migrate.c6
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c10
-rw-r--r--drivers/vhost/net.c5
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c8
-rw-r--r--drivers/video/fbdev/efifb.c4
-rw-r--r--drivers/video/fbdev/omap/lcd_dma.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc.c11
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss-of.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c17
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h1
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c6
-rw-r--r--drivers/video/fbdev/sm501fb.c5
-rw-r--r--drivers/video/fbdev/udlfb.c8
-rw-r--r--drivers/video/fbdev/vga16fb.c7
-rw-r--r--drivers/video/hdmi.c28
-rw-r--r--drivers/virtio/virtio.c94
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_mem.c103
-rw-r--r--drivers/virtio/virtio_pci_common.c41
-rw-r--r--drivers/w1/masters/ds2482.c26
-rw-r--r--drivers/w1/slaves/w1_ds2406.c10
-rw-r--r--drivers/w1/slaves/w1_ds2408.c42
-rw-r--r--drivers/w1/slaves/w1_ds2413.c14
-rw-r--r--drivers/w1/slaves/w1_ds2430.c10
-rw-r--r--drivers/w1/slaves/w1_ds2431.c10
-rw-r--r--drivers/w1/slaves/w1_ds2433.c24
-rw-r--r--drivers/w1/slaves/w1_ds2438.c34
-rw-r--r--drivers/w1/slaves/w1_ds2780.c8
-rw-r--r--drivers/w1/slaves/w1_ds2781.c8
-rw-r--r--drivers/w1/slaves/w1_ds2805.c10
-rw-r--r--drivers/w1/slaves/w1_ds28e04.c18
-rw-r--r--drivers/w1/slaves/w1_ds28e17.c4
-rw-r--r--drivers/w1/w1.c12
-rw-r--r--drivers/watchdog/da9052_wdt.c13
-rw-r--r--drivers/watchdog/max77620_wdt.c1
-rw-r--r--drivers/watchdog/rti_wdt.c4
-rw-r--r--drivers/watchdog/rzv2h_wdt.c16
-rw-r--r--drivers/watchdog/sp805_wdt.c3
-rw-r--r--drivers/xen/balloon.c2
-rw-r--r--drivers/xen/pcpu.c2
-rw-r--r--drivers/xen/pvcalls-front.c14
-rw-r--r--drivers/xen/pvcalls-front.h2
-rw-r--r--fs/9p/v9fs.h2
-rw-r--r--fs/9p/vfs_dentry.c26
-rw-r--r--fs/afs/dir.c40
-rw-r--r--fs/aio.c2
-rw-r--r--fs/anon_inodes.c4
-rw-r--r--fs/bcachefs/Kconfig1
-rw-r--r--fs/bcachefs/alloc_background.c47
-rw-r--r--fs/bcachefs/alloc_foreground.c10
-rw-r--r--fs/bcachefs/alloc_types.h1
-rw-r--r--fs/bcachefs/btree_cache.c5
-rw-r--r--fs/bcachefs/btree_iter.c3
-rw-r--r--fs/bcachefs/btree_key_cache.c5
-rw-r--r--fs/bcachefs/btree_trans_commit.c2
-rw-r--r--fs/bcachefs/buckets_waiting_for_journal.c12
-rw-r--r--fs/bcachefs/buckets_waiting_for_journal.h4
-rw-r--r--fs/bcachefs/compress.c31
-rw-r--r--fs/bcachefs/compress.h4
-rw-r--r--fs/bcachefs/data_update.c50
-rw-r--r--fs/bcachefs/debug.c1
-rw-r--r--fs/bcachefs/fsck.c2
-rw-r--r--fs/bcachefs/inode.h4
-rw-r--r--fs/bcachefs/io_write.c4
-rw-r--r--fs/bcachefs/io_write.h2
-rw-r--r--fs/bcachefs/journal.c110
-rw-r--r--fs/bcachefs/journal.h10
-rw-r--r--fs/bcachefs/journal_io.c2
-rw-r--r--fs/bcachefs/journal_reclaim.c142
-rw-r--r--fs/bcachefs/journal_reclaim.h3
-rw-r--r--fs/bcachefs/journal_types.h14
-rw-r--r--fs/bcachefs/movinggc.c11
-rw-r--r--fs/bcachefs/opts.h18
-rw-r--r--fs/bcachefs/rebalance.c8
-rw-r--r--fs/bcachefs/rebalance.h20
-rw-r--r--fs/bcachefs/recovery.c1
-rw-r--r--fs/bcachefs/sb-errors_format.h2
-rw-r--r--fs/bcachefs/str_hash.c24
-rw-r--r--fs/bcachefs/subvolume.c7
-rw-r--r--fs/bcachefs/super.c11
-rw-r--r--fs/bcachefs/super.h1
-rw-r--r--fs/bcachefs/trace.h40
-rw-r--r--fs/bcachefs/util.h2
-rw-r--r--fs/binfmt_elf.c4
-rw-r--r--fs/binfmt_elf_fdpic.c4
-rw-r--r--fs/btrfs/ctree.c2
-rw-r--r--fs/btrfs/extent_io.c2
-rw-r--r--fs/btrfs/inode.c1
-rw-r--r--fs/btrfs/ioctl.c9
-rw-r--r--fs/btrfs/ordered-data.c12
-rw-r--r--fs/btrfs/qgroup.c11
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/btrfs/transaction.c4
-rw-r--r--fs/cachefiles/error_inject.c2
-rw-r--r--fs/ceph/debugfs.c2
-rw-r--r--fs/ceph/dir.c25
-rw-r--r--fs/ceph/mds_client.c41
-rw-r--r--fs/ceph/mds_client.h2
-rw-r--r--fs/ceph/quota.c2
-rw-r--r--fs/coda/dir.c3
-rw-r--r--fs/coda/sysctl.c2
-rw-r--r--fs/coredump.c2
-rw-r--r--fs/crypto/fname.c22
-rw-r--r--fs/dcache.c107
-rw-r--r--fs/debugfs/file.c165
-rw-r--r--fs/debugfs/inode.c208
-rw-r--r--fs/debugfs/internal.h50
-rw-r--r--fs/devpts/inode.c2
-rw-r--r--fs/ecryptfs/dentry.c18
-rw-r--r--fs/efivarfs/file.c59
-rw-r--r--fs/efivarfs/inode.c58
-rw-r--r--fs/efivarfs/internal.h27
-rw-r--r--fs/efivarfs/super.c275
-rw-r--r--fs/efivarfs/vars.c181
-rw-r--r--fs/erofs/compress.h23
-rw-r--r--fs/erofs/decompressor.c7
-rw-r--r--fs/erofs/erofs_fs.h3
-rw-r--r--fs/erofs/fileio.c4
-rw-r--r--fs/erofs/super.c32
-rw-r--r--fs/erofs/xattr.c4
-rw-r--r--fs/erofs/zdata.c245
-rw-r--r--fs/erofs/zmap.c125
-rw-r--r--fs/erofs/zutil.c6
-rw-r--r--fs/eventpoll.c2
-rw-r--r--fs/exec.c44
-rw-r--r--fs/exfat/namei.c11
-rw-r--r--fs/ext4/Kconfig3
-rw-r--r--fs/ext4/ext4.h25
-rw-r--r--fs/ext4/fast_commit.c29
-rw-r--r--fs/ext4/fast_commit.h3
-rw-r--r--fs/ext4/file.c3
-rw-r--r--fs/ext4/super.c18
-rw-r--r--fs/f2fs/Kconfig3
-rw-r--r--fs/f2fs/compress.c38
-rw-r--r--fs/f2fs/data.c63
-rw-r--r--fs/f2fs/dir.c53
-rw-r--r--fs/f2fs/f2fs.h49
-rw-r--r--fs/f2fs/file.c37
-rw-r--r--fs/f2fs/gc.c13
-rw-r--r--fs/f2fs/inline.c7
-rw-r--r--fs/f2fs/inode.c19
-rw-r--r--fs/f2fs/namei.c1
-rw-r--r--fs/f2fs/node.c10
-rw-r--r--fs/f2fs/recovery.c4
-rw-r--r--fs/f2fs/segment.c210
-rw-r--r--fs/f2fs/super.c15
-rw-r--r--fs/f2fs/sysfs.c3
-rw-r--r--fs/fat/namei_vfat.c19
-rw-r--r--fs/fcntl.c4
-rw-r--r--fs/file_table.c22
-rw-r--r--fs/fuse/Kconfig12
-rw-r--r--fs/fuse/Makefile1
-rw-r--r--fs/fuse/dax.c11
-rw-r--r--fs/fuse/dev.c127
-rw-r--r--fs/fuse/dev_uring.c1319
-rw-r--r--fs/fuse/dev_uring_i.h205
-rw-r--r--fs/fuse/dir.c48
-rw-r--r--fs/fuse/fuse_dev_i.h66
-rw-r--r--fs/fuse/fuse_i.h32
-rw-r--r--fs/fuse/inode.c14
-rw-r--r--fs/fuse/sysctl.c2
-rw-r--r--fs/fuse/xattr.c7
-rw-r--r--fs/gfs2/dentry.c31
-rw-r--r--fs/gfs2/glock.c2
-rw-r--r--fs/gfs2/main.c1
-rw-r--r--fs/gfs2/quota.c4
-rw-r--r--fs/hfs/sysdep.c3
-rw-r--r--fs/hostfs/hostfs_kern.c81
-rw-r--r--fs/hugetlbfs/inode.c7
-rw-r--r--fs/inode.c2
-rw-r--r--fs/iomap/swapfile.c1
-rw-r--r--fs/isofs/compress.c12
-rw-r--r--fs/jbd2/Kconfig2
-rw-r--r--fs/jbd2/journal.c30
-rw-r--r--fs/jfs/namei.c3
-rw-r--r--fs/kernfs/dir.c3
-rw-r--r--fs/kernfs/file.c2
-rw-r--r--fs/libfs.c15
-rw-r--r--fs/lockd/svc.c10
-rw-r--r--fs/locks.c2
-rw-r--r--fs/namei.c20
-rw-r--r--fs/namespace.c56
-rw-r--r--fs/nfs/Kconfig3
-rw-r--r--fs/nfs/callback.c4
-rw-r--r--fs/nfs/callback_proc.c2
-rw-r--r--fs/nfs/callback_xdr.c1
-rw-r--r--fs/nfs/client.c6
-rw-r--r--fs/nfs/dir.c62
-rw-r--r--fs/nfs/direct.c1
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c52
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.h1
-rw-r--r--fs/nfs/inode.c3
-rw-r--r--fs/nfs/internal.h9
-rw-r--r--fs/nfs/localio.c236
-rw-r--r--fs/nfs/namespace.c2
-rw-r--r--fs/nfs/nfs3proc.c51
-rw-r--r--fs/nfs/nfs42proc.c24
-rw-r--r--fs/nfs/nfs42xdr.c4
-rw-r--r--fs/nfs/nfs4proc.c20
-rw-r--r--fs/nfs/nfs4state.c1
-rw-r--r--fs/nfs/nfs4sysctl.c2
-rw-r--r--fs/nfs/nfstrace.h32
-rw-r--r--fs/nfs/pagelist.c5
-rw-r--r--fs/nfs/proc.c6
-rw-r--r--fs/nfs/sysctl.c2
-rw-r--r--fs/nfs/sysfs.c6
-rw-r--r--fs/nfs/write.c3
-rw-r--r--fs/nfs_common/Makefile3
-rw-r--r--fs/nfs_common/common.c89
-rw-r--r--fs/nfs_common/localio_trace.c10
-rw-r--r--fs/nfs_common/localio_trace.h56
-rw-r--r--fs/nfs_common/nfslocalio.c250
-rw-r--r--fs/nfsd/Makefile16
-rw-r--r--fs/nfsd/export.c25
-rw-r--r--fs/nfsd/filecache.c20
-rw-r--r--fs/nfsd/localio.c9
-rw-r--r--fs/nfsd/netns.h18
-rw-r--r--fs/nfsd/nfs4callback.c64
-rw-r--r--fs/nfsd/nfs4proc.c31
-rw-r--r--fs/nfsd/nfs4recover.c1
-rw-r--r--fs/nfsd/nfs4state.c526
-rw-r--r--fs/nfsd/nfs4xdr.c338
-rw-r--r--fs/nfsd/nfs4xdr_gen.c256
-rw-r--r--fs/nfsd/nfs4xdr_gen.h25
-rw-r--r--fs/nfsd/nfsctl.c48
-rw-r--r--fs/nfsd/nfsd.h13
-rw-r--r--fs/nfsd/nfsfh.c2
-rw-r--r--fs/nfsd/nfssvc.c77
-rw-r--r--fs/nfsd/state.h36
-rw-r--r--fs/nfsd/trace.h1
-rw-r--r--fs/nfsd/xdr4.h2
-rw-r--r--fs/nfsd/xdr4cb.h10
-rw-r--r--fs/nilfs2/alloc.c67
-rw-r--r--fs/nilfs2/alloc.h2
-rw-r--r--fs/nilfs2/bmap.c124
-rw-r--r--fs/nilfs2/btnode.c3
-rw-r--r--fs/nilfs2/btree.c7
-rw-r--r--fs/nilfs2/cpfile.c69
-rw-r--r--fs/nilfs2/dat.c45
-rw-r--r--fs/nilfs2/dir.c13
-rw-r--r--fs/nilfs2/gcinode.c24
-rw-r--r--fs/nilfs2/ifile.c37
-rw-r--r--fs/nilfs2/inode.c22
-rw-r--r--fs/nilfs2/ioctl.c236
-rw-r--r--fs/nilfs2/mdt.c63
-rw-r--r--fs/nilfs2/namei.c39
-rw-r--r--fs/nilfs2/nilfs.h4
-rw-r--r--fs/nilfs2/page.c39
-rw-r--r--fs/nilfs2/recovery.c62
-rw-r--r--fs/nilfs2/segbuf.c12
-rw-r--r--fs/nilfs2/segment.c66
-rw-r--r--fs/nilfs2/sufile.c112
-rw-r--r--fs/nilfs2/sufile.h22
-rw-r--r--fs/nilfs2/super.c10
-rw-r--r--fs/nilfs2/the_nilfs.c26
-rw-r--r--fs/notify/dnotify/dnotify.c2
-rw-r--r--fs/notify/fanotify/fanotify.c31
-rw-r--r--fs/notify/fanotify/fanotify.h15
-rw-r--r--fs/notify/fanotify/fanotify_user.c152
-rw-r--r--fs/notify/fsnotify.c89
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c2
-rw-r--r--fs/notify/inotify/inotify_user.c2
-rw-r--r--fs/ntfs3/attrib.c15
-rw-r--r--fs/ntfs3/dir.c2
-rw-r--r--fs/ntfs3/frecord.c74
-rw-r--r--fs/ntfs3/fsntfs.c6
-rw-r--r--fs/ntfs3/index.c6
-rw-r--r--fs/ntfs3/inode.c3
-rw-r--r--fs/ntfs3/ntfs_fs.h21
-rw-r--r--fs/ntfs3/record.c79
-rw-r--r--fs/ocfs2/alloc.c157
-rw-r--r--fs/ocfs2/alloc.h8
-rw-r--r--fs/ocfs2/aops.c337
-rw-r--r--fs/ocfs2/aops.h17
-rw-r--r--fs/ocfs2/cluster/heartbeat.c28
-rw-r--r--fs/ocfs2/cluster/masklog.h2
-rw-r--r--fs/ocfs2/cluster/quorum.c6
-rw-r--r--fs/ocfs2/cluster/tcp.c8
-rw-r--r--fs/ocfs2/dcache.c14
-rw-r--r--fs/ocfs2/dlm/dlmapi.h2
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c9
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c12
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c13
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c23
-rw-r--r--fs/ocfs2/dlmglue.c31
-rw-r--r--fs/ocfs2/dlmglue.h6
-rw-r--r--fs/ocfs2/extent_map.c10
-rw-r--r--fs/ocfs2/file.c8
-rw-r--r--fs/ocfs2/inode.c28
-rw-r--r--fs/ocfs2/ioctl.c2
-rw-r--r--fs/ocfs2/journal.c2
-rw-r--r--fs/ocfs2/mmap.c18
-rw-r--r--fs/ocfs2/move_extents.c8
-rw-r--r--fs/ocfs2/namei.c7
-rw-r--r--fs/ocfs2/ocfs2_fs.h8
-rw-r--r--fs/ocfs2/ocfs2_ioctl.h2
-rw-r--r--fs/ocfs2/ocfs2_lockid.h2
-rw-r--r--fs/ocfs2/ocfs2_trace.h20
-rw-r--r--fs/ocfs2/quota_global.c5
-rw-r--r--fs/ocfs2/refcounttree.c41
-rw-r--r--fs/ocfs2/reservations.h4
-rw-r--r--fs/ocfs2/stack_o2cb.c2
-rw-r--r--fs/ocfs2/stackglue.c2
-rw-r--r--fs/ocfs2/stackglue.h2
-rw-r--r--fs/ocfs2/super.c593
-rw-r--r--fs/ocfs2/symlink.c16
-rw-r--r--fs/ocfs2/xattr.c10
-rw-r--r--fs/open.c63
-rw-r--r--fs/orangefs/dcache.c22
-rw-r--r--fs/orangefs/orangefs-debugfs.c20
-rw-r--r--fs/overlayfs/namei.c2
-rw-r--r--fs/overlayfs/super.c22
-rw-r--r--fs/pidfs.c12
-rw-r--r--fs/pipe.c8
-rw-r--r--fs/proc/Kconfig19
-rw-r--r--fs/proc/base.c17
-rw-r--r--fs/proc/fd.c3
-rw-r--r--fs/proc/generic.c6
-rw-r--r--fs/proc/proc_sysctl.c3
-rw-r--r--fs/proc/vmcore.c283
-rw-r--r--fs/quota/dquot.c2
-rw-r--r--fs/smb/client/asn1.c2
-rw-r--r--fs/smb/client/cifs_spnego.c4
-rw-r--r--fs/smb/client/cifsacl.c25
-rw-r--r--fs/smb/client/cifsencrypt.c162
-rw-r--r--fs/smb/client/cifsfs.c6
-rw-r--r--fs/smb/client/cifsfs.h4
-rw-r--r--fs/smb/client/cifsglob.h111
-rw-r--r--fs/smb/client/cifspdu.h120
-rw-r--r--fs/smb/client/cifsproto.h9
-rw-r--r--fs/smb/client/cifssmb.c4
-rw-r--r--fs/smb/client/connect.c137
-rw-r--r--fs/smb/client/dfs.c94
-rw-r--r--fs/smb/client/dfs.h51
-rw-r--r--fs/smb/client/dfs_cache.c45
-rw-r--r--fs/smb/client/dir.c9
-rw-r--r--fs/smb/client/dns_resolve.c108
-rw-r--r--fs/smb/client/dns_resolve.h23
-rw-r--r--fs/smb/client/fs_context.c108
-rw-r--r--fs/smb/client/fs_context.h22
-rw-r--r--fs/smb/client/inode.c14
-rw-r--r--fs/smb/client/link.c60
-rw-r--r--fs/smb/client/misc.c29
-rw-r--r--fs/smb/client/netmisc.c14
-rw-r--r--fs/smb/client/nterr.c9
-rw-r--r--fs/smb/client/nterr.h1
-rw-r--r--fs/smb/client/readdir.c2
-rw-r--r--fs/smb/client/reparse.c517
-rw-r--r--fs/smb/client/reparse.h2
-rw-r--r--fs/smb/client/rfc1002pdu.h6
-rw-r--r--fs/smb/client/sess.c3
-rw-r--r--fs/smb/client/smb1ops.c38
-rw-r--r--fs/smb/client/smb2file.c56
-rw-r--r--fs/smb/client/smb2inode.c207
-rw-r--r--fs/smb/client/smb2maperror.c4
-rw-r--r--fs/smb/client/smb2ops.c50
-rw-r--r--fs/smb/client/smb2pdu.c6
-rw-r--r--fs/smb/client/smb2pdu.h2
-rw-r--r--fs/smb/client/smb2proto.h5
-rw-r--r--fs/smb/client/trace.h1
-rw-r--r--fs/smb/common/smb2pdu.h14
-rw-r--r--fs/smb/server/ksmbd_netlink.h3
-rw-r--r--fs/smb/server/server.h1
-rw-r--r--fs/smb/server/smb2pdu.c4
-rw-r--r--fs/smb/server/transport_ipc.c35
-rw-r--r--fs/smb/server/transport_ipc.h2
-rw-r--r--fs/smb/server/transport_tcp.c73
-rw-r--r--fs/smb/server/transport_tcp.h1
-rw-r--r--fs/smb/server/vfs.c7
-rw-r--r--fs/smb/server/vfs.h1
-rw-r--r--fs/splice.c2
-rw-r--r--fs/squashfs/Kconfig6
-rw-r--r--fs/squashfs/cache.c10
-rw-r--r--fs/squashfs/file.c90
-rw-r--r--fs/squashfs/file_cache.c6
-rw-r--r--fs/squashfs/file_direct.c11
-rw-r--r--fs/squashfs/squashfs.h13
-rw-r--r--fs/squashfs/super.c21
-rw-r--r--fs/stat.c4
-rw-r--r--fs/super.c2
-rw-r--r--fs/sysctls.c2
-rw-r--r--fs/sysfs/file.c2
-rw-r--r--fs/tracefs/inode.c3
-rw-r--r--fs/ubifs/debug.c23
-rw-r--r--fs/ubifs/lpt_commit.c1
-rw-r--r--fs/userfaultfd.c2
-rw-r--r--fs/vboxsf/dir.c3
-rw-r--r--fs/vboxsf/super.c3
-rw-r--r--fs/verity/init.c2
-rw-r--r--fs/xfs/Makefile6
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.c3
-rw-r--r--fs/xfs/libxfs/xfs_attr.c4
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c47
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c111
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.h3
-rw-r--r--fs/xfs/libxfs/xfs_btree.c411
-rw-r--r--fs/xfs/libxfs/xfs_btree.h28
-rw-r--r--fs/xfs/libxfs/xfs_btree_mem.c1
-rw-r--r--fs/xfs/libxfs/xfs_btree_staging.c10
-rw-r--r--fs/xfs/libxfs/xfs_defer.h2
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c9
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h1
-rw-r--r--fs/xfs/libxfs/xfs_errortag.h4
-rw-r--r--fs/xfs/libxfs/xfs_exchmaps.c4
-rw-r--r--fs/xfs/libxfs/xfs_format.h51
-rw-r--r--fs/xfs/libxfs/xfs_fs.h10
-rw-r--r--fs/xfs/libxfs/xfs_health.h6
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c65
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c201
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h6
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h16
-rw-r--r--fs/xfs/libxfs/xfs_log_recover.h4
-rw-r--r--fs/xfs/libxfs/xfs_metadir.c4
-rw-r--r--fs/xfs/libxfs/xfs_metafile.c223
-rw-r--r--fs/xfs/libxfs/xfs_metafile.h13
-rw-r--r--fs/xfs/libxfs/xfs_ondisk.h4
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c278
-rw-r--r--fs/xfs/libxfs/xfs_refcount.h23
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c178
-rw-r--r--fs/xfs/libxfs/xfs_rmap.h12
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c2
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.h9
-rw-r--r--fs/xfs/libxfs/xfs_rtgroup.c74
-rw-r--r--fs/xfs/libxfs/xfs_rtgroup.h58
-rw-r--r--fs/xfs/libxfs/xfs_rtrefcount_btree.c757
-rw-r--r--fs/xfs/libxfs/xfs_rtrefcount_btree.h189
-rw-r--r--fs/xfs/libxfs/xfs_rtrmap_btree.c1035
-rw-r--r--fs/xfs/libxfs/xfs_rtrmap_btree.h210
-rw-r--r--fs/xfs/libxfs/xfs_sb.c14
-rw-r--r--fs/xfs/libxfs/xfs_shared.h21
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c37
-rw-r--r--fs/xfs/libxfs/xfs_trans_space.h13
-rw-r--r--fs/xfs/libxfs/xfs_types.h7
-rw-r--r--fs/xfs/scrub/agheader_repair.c2
-rw-r--r--fs/xfs/scrub/alloc_repair.c5
-rw-r--r--fs/xfs/scrub/bmap.c126
-rw-r--r--fs/xfs/scrub/bmap_repair.c148
-rw-r--r--fs/xfs/scrub/common.c170
-rw-r--r--fs/xfs/scrub/common.h26
-rw-r--r--fs/xfs/scrub/cow_repair.c180
-rw-r--r--fs/xfs/scrub/health.c2
-rw-r--r--fs/xfs/scrub/inode.c41
-rw-r--r--fs/xfs/scrub/inode_repair.c193
-rw-r--r--fs/xfs/scrub/metapath.c6
-rw-r--r--fs/xfs/scrub/newbt.c42
-rw-r--r--fs/xfs/scrub/newbt.h1
-rw-r--r--fs/xfs/scrub/quota.c8
-rw-r--r--fs/xfs/scrub/quota_repair.c2
-rw-r--r--fs/xfs/scrub/reap.c288
-rw-r--r--fs/xfs/scrub/reap.h9
-rw-r--r--fs/xfs/scrub/refcount.c2
-rw-r--r--fs/xfs/scrub/refcount_repair.c6
-rw-r--r--fs/xfs/scrub/repair.c197
-rw-r--r--fs/xfs/scrub/repair.h24
-rw-r--r--fs/xfs/scrub/rgb_bitmap.h37
-rw-r--r--fs/xfs/scrub/rgsuper.c6
-rw-r--r--fs/xfs/scrub/rmap_repair.c91
-rw-r--r--fs/xfs/scrub/rtb_bitmap.h37
-rw-r--r--fs/xfs/scrub/rtbitmap.c77
-rw-r--r--fs/xfs/scrub/rtbitmap.h55
-rw-r--r--fs/xfs/scrub/rtbitmap_repair.c451
-rw-r--r--fs/xfs/scrub/rtrefcount.c661
-rw-r--r--fs/xfs/scrub/rtrefcount_repair.c783
-rw-r--r--fs/xfs/scrub/rtrmap.c323
-rw-r--r--fs/xfs/scrub/rtrmap_repair.c1006
-rw-r--r--fs/xfs/scrub/rtsummary.c17
-rw-r--r--fs/xfs/scrub/rtsummary_repair.c3
-rw-r--r--fs/xfs/scrub/scrub.c18
-rw-r--r--fs/xfs/scrub/scrub.h28
-rw-r--r--fs/xfs/scrub/stats.c2
-rw-r--r--fs/xfs/scrub/tempexch.h2
-rw-r--r--fs/xfs/scrub/tempfile.c21
-rw-r--r--fs/xfs/scrub/trace.c1
-rw-r--r--fs/xfs/scrub/trace.h280
-rw-r--r--fs/xfs/xfs_aops.c2
-rw-r--r--fs/xfs/xfs_attr_inactive.c5
-rw-r--r--fs/xfs/xfs_buf.c642
-rw-r--r--fs/xfs/xfs_buf.h12
-rw-r--r--fs/xfs/xfs_buf_item.h5
-rw-r--r--fs/xfs/xfs_buf_item_recover.c19
-rw-r--r--fs/xfs/xfs_discard.c2
-rw-r--r--fs/xfs/xfs_dquot.c26
-rw-r--r--fs/xfs/xfs_dquot.h3
-rw-r--r--fs/xfs/xfs_drain.c20
-rw-r--r--fs/xfs/xfs_drain.h7
-rw-r--r--fs/xfs/xfs_error.c3
-rw-r--r--fs/xfs/xfs_exchrange.c74
-rw-r--r--fs/xfs/xfs_file.c13
-rw-r--r--fs/xfs/xfs_fsmap.c193
-rw-r--r--fs/xfs/xfs_fsops.c30
-rw-r--r--fs/xfs/xfs_health.c2
-rw-r--r--fs/xfs/xfs_inode.c26
-rw-r--r--fs/xfs/xfs_inode.h16
-rw-r--r--fs/xfs/xfs_inode_item.c30
-rw-r--r--fs/xfs/xfs_inode_item_recover.c48
-rw-r--r--fs/xfs/xfs_ioctl.c21
-rw-r--r--fs/xfs/xfs_iomap.c6
-rw-r--r--fs/xfs/xfs_log.c2
-rw-r--r--fs/xfs/xfs_log_recover.c4
-rw-r--r--fs/xfs/xfs_mount.c14
-rw-r--r--fs/xfs/xfs_mount.h25
-rw-r--r--fs/xfs/xfs_notify_failure.c230
-rw-r--r--fs/xfs/xfs_notify_failure.h11
-rw-r--r--fs/xfs/xfs_qm.c10
-rw-r--r--fs/xfs/xfs_qm_bhv.c26
-rw-r--r--fs/xfs/xfs_quota.h5
-rw-r--r--fs/xfs/xfs_refcount_item.c240
-rw-r--r--fs/xfs/xfs_reflink.c321
-rw-r--r--fs/xfs/xfs_reflink.h4
-rw-r--r--fs/xfs/xfs_rmap_item.c216
-rw-r--r--fs/xfs/xfs_rtalloc.c121
-rw-r--r--fs/xfs/xfs_rtalloc.h20
-rw-r--r--fs/xfs/xfs_stats.c5
-rw-r--r--fs/xfs/xfs_stats.h3
-rw-r--r--fs/xfs/xfs_super.c144
-rw-r--r--fs/xfs/xfs_super.h1
-rw-r--r--fs/xfs/xfs_sysctl.c2
-rw-r--r--fs/xfs/xfs_trace.h270
-rw-r--r--fs/xfs/xfs_trans.c6
-rw-r--r--fs/xfs/xfs_trans.h1
-rw-r--r--fs/xfs/xfs_trans_ail.c9
-rw-r--r--fs/xfs/xfs_trans_buf.c8
-rw-r--r--fs/xfs/xfs_trans_dquot.c8
-rw-r--r--include/acpi/acpi_numa.h5
-rw-r--r--include/acpi/acpixf.h1
-rw-r--r--include/asm-generic/early_ioremap.h2
-rw-r--r--include/asm-generic/hyperv-tlfs.h874
-rw-r--r--include/asm-generic/mshyperv.h7
-rw-r--r--include/asm-generic/pgalloc.h83
-rw-r--r--include/asm-generic/syscall.h2
-rw-r--r--include/asm-generic/tlb.h24
-rw-r--r--include/asm-generic/vmlinux.lds.h1
-rw-r--r--include/clocksource/arm_arch_timer.h6
-rw-r--r--include/clocksource/hyperv_timer.h2
-rw-r--r--include/crypto/gf128mul.h6
-rw-r--r--include/crypto/internal/hash.h23
-rw-r--r--include/crypto/internal/skcipher.h14
-rw-r--r--include/cxl/event.h28
-rw-r--r--include/drm/drm_print.h1
-rw-r--r--include/dt-bindings/arm/qcom,ids.h1
-rw-r--r--include/dt-bindings/iio/adc/adi,ad4695.h (renamed from include/dt-bindings/iio/adi,ad4695.h)0
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8750-rpmh.h143
-rw-r--r--include/dt-bindings/media/video-interfaces.h7
-rw-r--r--include/dt-bindings/pinctrl/renesas,r9a09g047-pinctrl.h41
-rw-r--r--include/dt-bindings/pinctrl/renesas,r9a09g057-pinctrl.h31
-rw-r--r--include/dt-bindings/reset/amlogic,meson-a1-audio-reset.h36
-rw-r--r--include/dt-bindings/sound/qcom,wcd9335.h1
-rw-r--r--include/hyperv/hvgdk.h308
-rw-r--r--include/hyperv/hvgdk_ext.h46
-rw-r--r--include/hyperv/hvgdk_mini.h1348
-rw-r--r--include/hyperv/hvhdk.h733
-rw-r--r--include/hyperv/hvhdk_mini.h311
-rw-r--r--include/keys/system_keyring.h2
-rw-r--r--include/kunit/platform_device.h1
-rw-r--r--include/kunit/test.h4
-rw-r--r--include/kvm/arm_arch_timer.h23
-rw-r--r--include/linux/adreno-smmu-priv.h7
-rw-r--r--include/linux/aer.h12
-rw-r--r--include/linux/alloc_tag.h11
-rw-r--r--include/linux/amd-iommu.h4
-rw-r--r--include/linux/amd-pmf-io.h15
-rw-r--r--include/linux/binfmts.h7
-rw-r--r--include/linux/bitmap.h2
-rw-r--r--include/linux/bitops.h31
-rw-r--r--include/linux/bits.h5
-rw-r--r--include/linux/blk-mq.h18
-rw-r--r--include/linux/blkdev.h3
-rw-r--r--include/linux/bpf.h17
-rw-r--r--include/linux/bpf_verifier.h26
-rw-r--r--include/linux/btf.h5
-rw-r--r--include/linux/bug.h10
-rw-r--r--include/linux/cacheinfo.h2
-rw-r--r--include/linux/call_once.h45
-rw-r--r--include/linux/ceph/ceph_fs.h14
-rw-r--r--include/linux/compiler.h54
-rw-r--r--include/linux/coresight.h17
-rw-r--r--include/linux/cpu.h1
-rw-r--r--include/linux/cpumask.h9
-rw-r--r--include/linux/crash_dump.h41
-rw-r--r--include/linux/crc-t10dif.h28
-rw-r--r--include/linux/crc32.h50
-rw-r--r--include/linux/crc32c.h7
-rw-r--r--include/linux/damon.h112
-rw-r--r--include/linux/dcache.h24
-rw-r--r--include/linux/debugfs.h44
-rw-r--r--include/linux/delayacct.h14
-rw-r--r--include/linux/device-mapper.h3
-rw-r--r--include/linux/device.h66
-rw-r--r--include/linux/device/bus.h8
-rw-r--r--include/linux/device/class.h10
-rw-r--r--include/linux/device/driver.h2
-rw-r--r--include/linux/efi.h4
-rw-r--r--include/linux/export.h15
-rw-r--r--include/linux/fanotify.h18
-rw-r--r--include/linux/firmware/cirrus/cs_dsp_test_utils.h160
-rw-r--r--include/linux/firmware/qcom/qcom_scm.h8
-rw-r--r--include/linux/fs.h97
-rw-r--r--include/linux/fscrypt.h7
-rw-r--r--include/linux/fsl/mc.h30
-rw-r--r--include/linux/fsnotify.h78
-rw-r--r--include/linux/fsnotify_backend.h53
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/fwnode.h3
-rw-r--r--include/linux/gfp.h30
-rw-r--r--include/linux/hdmi.h1
-rw-r--r--include/linux/hisi_acc_qm.h8
-rw-r--r--include/linux/hrtimer_defs.h1
-rw-r--r--include/linux/huge_mm.h2
-rw-r--r--include/linux/hugetlb.h19
-rw-r--r--include/linux/hyperv.h11
-rw-r--r--include/linux/i2c.h10
-rw-r--r--include/linux/i3c/device.h2
-rw-r--r--include/linux/i8042.h28
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h10
-rw-r--r--include/linux/iio/buffer.h2
-rw-r--r--include/linux/iio/consumer.h4
-rw-r--r--include/linux/iio/iio-gts-helper.h6
-rw-r--r--include/linux/iio/iio-opaque.h2
-rw-r--r--include/linux/iio/iio.h2
-rw-r--r--include/linux/iio/imu/adis.h1
-rw-r--r--include/linux/iio/timer/stm32-timer-trigger.h6
-rw-r--r--include/linux/io-pgtable.h11
-rw-r--r--include/linux/io_uring/cmd.h2
-rw-r--r--include/linux/io_uring_types.h3
-rw-r--r--include/linux/iommu.h5
-rw-r--r--include/linux/jbd2.h33
-rw-r--r--include/linux/jiffies.h2
-rw-r--r--include/linux/jump_label.h3
-rw-r--r--include/linux/kallsyms.h2
-rw-r--r--include/linux/kasan.h4
-rw-r--r--include/linux/kcore.h13
-rw-r--r--include/linux/kdb.h3
-rw-r--r--include/linux/kgdb.h2
-rw-r--r--include/linux/kobject_ns.h2
-rw-r--r--include/linux/ksm.h1
-rw-r--r--include/linux/kvm_host.h38
-rw-r--r--include/linux/libata.h19
-rw-r--r--include/linux/list_lru.h44
-rw-r--r--include/linux/lockref.h7
-rw-r--r--include/linux/lz4.h6
-rw-r--r--include/linux/mailbox/exynos-message.h19
-rw-r--r--include/linux/mailbox/mchp-ipc.h33
-rw-r--r--include/linux/memblock.h10
-rw-r--r--include/linux/memcontrol.h43
-rw-r--r--include/linux/memfd.h23
-rw-r--r--include/linux/memory/ti-aemif.h32
-rw-r--r--include/linux/memory_hotplug.h5
-rw-r--r--include/linux/migrate.h6
-rw-r--r--include/linux/min_heap.h72
-rw-r--r--include/linux/minmax.h205
-rw-r--r--include/linux/miscdevice.h2
-rw-r--r--include/linux/mm.h130
-rw-r--r--include/linux/mm_inline.h99
-rw-r--r--include/linux/mm_types.h5
-rw-r--r--include/linux/mmdebug.h14
-rw-r--r--include/linux/mmzone.h99
-rw-r--r--include/linux/module.h19
-rw-r--r--include/linux/mtd/nand-qpic-common.h478
-rw-r--r--include/linux/mtd/spinand.h58
-rw-r--r--include/linux/netdevice.h6
-rw-r--r--include/linux/nfs4.h9
-rw-r--r--include/linux/nfs_common.h3
-rw-r--r--include/linux/nfs_fs.h22
-rw-r--r--include/linux/nfs_fs_sb.h3
-rw-r--r--include/linux/nfs_xdr.h8
-rw-r--r--include/linux/nfslocalio.h48
-rw-r--r--include/linux/numa_memblks.h3
-rw-r--r--include/linux/nvmem-provider.h4
-rw-r--r--include/linux/of.h30
-rw-r--r--include/linux/of_address.h1
-rw-r--r--include/linux/of_platform.h2
-rw-r--r--include/linux/omap-gpmc.h4
-rw-r--r--include/linux/page-flags.h23
-rw-r--r--include/linux/page-isolation.h2
-rw-r--r--include/linux/pagemap.h24
-rw-r--r--include/linux/pci-ecam.h4
-rw-r--r--include/linux/pci-epf.h4
-rw-r--r--include/linux/pci.h5
-rw-r--r--include/linux/pci_ids.h11
-rw-r--r--include/linux/pgtable.h9
-rw-r--r--include/linux/platform_data/cros_ec_commands.h28
-rw-r--r--include/linux/platform_data/keyscan-davinci.h29
-rw-r--r--include/linux/platform_profile.h33
-rw-r--r--include/linux/pm.h8
-rw-r--r--include/linux/pm_domain.h1
-rw-r--r--include/linux/power/bq27xxx_battery.h1
-rw-r--r--include/linux/power_supply.h63
-rw-r--r--include/linux/pps_gen_kernel.h78
-rw-r--r--include/linux/pps_kernel.h3
-rw-r--r--include/linux/property.h15
-rw-r--r--include/linux/pwm.h17
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/sched/hotplug.h4
-rw-r--r--include/linux/scmi_imx_protocol.h9
-rw-r--r--include/linux/seqlock.h1
-rw-r--r--include/linux/serial_8250.h4
-rw-r--r--include/linux/soc/mediatek/mtk-cmdq.h13
-rw-r--r--include/linux/soundwire/sdw.h154
-rw-r--r--include/linux/stat.h4
-rw-r--r--include/linux/string.h15
-rw-r--r--include/linux/sunrpc/cache.h2
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/linux/sunrpc/gss_asn1.h81
-rw-r--r--include/linux/sunrpc/gss_krb5.h1
-rw-r--r--include/linux/sunrpc/svc.h13
-rw-r--r--include/linux/sunrpc/svc_xprt.h22
-rw-r--r--include/linux/sunrpc/xdrgen/nfs4_1.h153
-rw-r--r--include/linux/sunrpc/xprtmultipath.h1
-rw-r--r--include/linux/swap.h35
-rw-r--r--include/linux/swap_cgroup.h14
-rw-r--r--include/linux/swap_slots.h3
-rw-r--r--include/linux/sysfs.h6
-rw-r--r--include/linux/task_work.h3
-rw-r--r--include/linux/time64.h5
-rw-r--r--include/linux/trace_events.h14
-rw-r--r--include/linux/tracepoint.h20
-rw-r--r--include/linux/types.h2
-rw-r--r--include/linux/usb/pd.h22
-rw-r--r--include/linux/usb/phy.h5
-rw-r--r--include/linux/usb/storage.h8
-rw-r--r--include/linux/usb/tcpm.h3
-rw-r--r--include/linux/usb/typec.h2
-rw-r--r--include/linux/usb/typec_tbt.h1
-rw-r--r--include/linux/verification.h2
-rw-r--r--include/linux/virtio.h8
-rw-r--r--include/linux/vmw_vmci_defs.h14
-rw-r--r--include/media/cec.h1
-rw-r--r--include/media/v4l2-mediabus.h21
-rw-r--r--include/net/page_pool/types.h1
-rw-r--r--include/net/sch_generic.h2
-rw-r--r--include/net/xfrm.h16
-rw-r--r--include/rdma/ib_cache.h16
-rw-r--r--include/rdma/ib_marshall.h3
-rw-r--r--include/rdma/ib_pack.h3
-rw-r--r--include/rdma/ib_verbs.h24
-rw-r--r--include/rv/da_monitor.h4
-rw-r--r--include/scsi/libfc.h2
-rw-r--r--include/scsi/libsas.h9
-rw-r--r--include/scsi/scsi_bsg_iscsi.h2
-rw-r--r--include/scsi/scsi_device.h4
-rw-r--r--include/scsi/scsi_host.h26
-rw-r--r--include/scsi/scsi_transport_iscsi.h8
-rw-r--r--include/soc/amlogic/reset-meson-aux.h23
-rw-r--r--include/soc/qcom/tcs.h26
-rw-r--r--include/sound/hdaudio_ext.h45
-rw-r--r--include/sound/pcm.h7
-rw-r--r--include/sound/rawmidi.h11
-rw-r--r--include/sound/sdca.h7
-rw-r--r--include/sound/sdca_function.h3
-rw-r--r--include/sound/simple_card_utils.h15
-rw-r--r--include/sound/soc-dai.h3
-rw-r--r--include/sound/soc.h12
-rw-r--r--include/sound/soc_sdw_utils.h2
-rw-r--r--include/sound/ump.h1
-rw-r--r--include/target/iscsi/iscsi_target_core.h3
-rw-r--r--include/trace/events/capability.h57
-rw-r--r--include/trace/events/f2fs.h39
-rw-r--r--include/trace/events/mmap_lock.h32
-rw-r--r--include/trace/events/mmflags.h3
-rw-r--r--include/trace/events/rxrpc.h1
-rw-r--r--include/trace/events/task.h44
-rw-r--r--include/uapi/asm-generic/fcntl.h1
-rw-r--r--include/uapi/drm/amdgpu_drm.h9
-rw-r--r--include/uapi/linux/audit.h1
-rw-r--r--include/uapi/linux/bpf.h10
-rw-r--r--include/uapi/linux/dm-ioctl.h4
-rw-r--r--include/uapi/linux/fanotify.h18
-rw-r--r--include/uapi/linux/fcntl.h4
-rw-r--r--include/uapi/linux/fuse.h76
-rw-r--r--include/uapi/linux/input-event-codes.h1
-rw-r--r--include/uapi/linux/iommufd.h4
-rw-r--r--include/uapi/linux/kvm.h8
-rw-r--r--include/uapi/linux/nfs4.h7
-rw-r--r--include/uapi/linux/ntsync.h42
-rw-r--r--include/uapi/linux/pci_regs.h16
-rw-r--r--include/uapi/linux/pcitest.h1
-rw-r--r--include/uapi/linux/pps_gen.h37
-rw-r--r--include/uapi/linux/securebits.h24
-rw-r--r--include/uapi/linux/taskstats.h17
-rw-r--r--include/uapi/linux/usb/functionfs.h8
-rw-r--r--include/uapi/linux/vduse.h2
-rw-r--r--include/uapi/linux/virtio_pci.h14
-rw-r--r--include/uapi/mtd/ubi-user.h33
-rw-r--r--include/uapi/sound/asequencer.h12
-rw-r--r--include/uapi/sound/asound.h8
-rw-r--r--include/uapi/sound/compress_params.h23
-rw-r--r--include/uapi/sound/fcp.h120
-rw-r--r--include/uapi/sound/sof/tokens.h2
-rw-r--r--include/uapi/sound/tlv.h2
-rw-r--r--include/ufs/ufs.h9
-rw-r--r--include/ufs/ufshcd.h39
-rw-r--r--include/ufs/ufshci.h5
-rw-r--r--init/Kconfig4
-rw-r--r--init/do_mounts_initrd.c4
-rw-r--r--init/main.c18
-rw-r--r--io_uring/Makefile2
-rw-r--r--io_uring/alloc_cache.c44
-rw-r--r--io_uring/alloc_cache.h69
-rw-r--r--io_uring/filetable.c2
-rw-r--r--io_uring/futex.c6
-rw-r--r--io_uring/io_uring.c14
-rw-r--r--io_uring/io_uring.h21
-rw-r--r--io_uring/memmap.c4
-rw-r--r--io_uring/msg_ring.c4
-rw-r--r--io_uring/net.c134
-rw-r--r--io_uring/net.h20
-rw-r--r--io_uring/poll.c6
-rw-r--r--io_uring/register.c8
-rw-r--r--io_uring/rsrc.c88
-rw-r--r--io_uring/rsrc.h5
-rw-r--r--io_uring/rw.c41
-rw-r--r--io_uring/rw.h27
-rw-r--r--io_uring/timeout.c2
-rw-r--r--io_uring/uring_cmd.c19
-rw-r--r--io_uring/waitid.c2
-rw-r--r--ipc/ipc_sysctl.c2
-rw-r--r--ipc/mq_sysctl.c2
-rw-r--r--ipc/util.c11
-rw-r--r--kernel/acct.c2
-rw-r--r--kernel/audit.c2
-rw-r--r--kernel/bpf/arena.c18
-rw-r--r--kernel/bpf/arraymap.c6
-rw-r--r--kernel/bpf/bpf_cgrp_storage.c15
-rw-r--r--kernel/bpf/bpf_inode_storage.c9
-rw-r--r--kernel/bpf/bpf_local_storage.c38
-rw-r--r--kernel/bpf/bpf_struct_ops.c21
-rw-r--r--kernel/bpf/bpf_task_storage.c15
-rw-r--r--kernel/bpf/btf.c31
-rw-r--r--kernel/bpf/cpumask.c2
-rw-r--r--kernel/bpf/hashtab.c79
-rw-r--r--kernel/bpf/helpers.c43
-rw-r--r--kernel/bpf/log.c21
-rw-r--r--kernel/bpf/lpm_trie.c20
-rw-r--r--kernel/bpf/range_tree.c2
-rw-r--r--kernel/bpf/syscall.c14
-rw-r--r--kernel/bpf/sysfs_btf.c12
-rw-r--r--kernel/bpf/verifier.c1172
-rw-r--r--kernel/capability.c8
-rw-r--r--kernel/cpu.c14
-rw-r--r--kernel/debug/kdb/kdb_support.c24
-rw-r--r--kernel/delayacct.c57
-rw-r--r--kernel/events/uprobes.c4
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c38
-rw-r--r--kernel/futex/core.c5
-rw-r--r--kernel/futex/futex.h11
-rw-r--r--kernel/futex/pi.c2
-rw-r--r--kernel/futex/waitwake.c7
-rw-r--r--kernel/gcov/clang.c6
-rwxr-xr-xkernel/gen_kheaders.sh42
-rw-r--r--kernel/hung_task.c4
-rw-r--r--kernel/irq_work.c2
-rw-r--r--kernel/kexec_core.c25
-rw-r--r--kernel/kheaders.c19
-rw-r--r--kernel/kprobes.c594
-rw-r--r--kernel/ksysfs.c21
-rw-r--r--kernel/kthread.c6
-rw-r--r--kernel/latencytop.c8
-rw-r--r--kernel/locking/lockdep.c2
-rw-r--r--kernel/module/Kconfig56
-rw-r--r--kernel/module/internal.h28
-rw-r--r--kernel/module/main.c168
-rw-r--r--kernel/module/strict_rwx.c13
-rw-r--r--kernel/module/sysfs.c122
-rw-r--r--kernel/module/version.c47
-rw-r--r--kernel/padata.c45
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/params.c22
-rw-r--r--kernel/pid.c2
-rw-r--r--kernel/pid_namespace.c2
-rw-r--r--kernel/pid_sysctl.h2
-rw-r--r--kernel/power/hibernate.c7
-rw-r--r--kernel/power/snapshot.c5
-rw-r--r--kernel/printk/sysctl.c2
-rw-r--r--kernel/rcu/tiny.c2
-rw-r--r--kernel/rcu/tree.c2
-rw-r--r--kernel/reboot.c2
-rw-r--r--kernel/resource.c3
-rw-r--r--kernel/sched/autogroup.c2
-rw-r--r--kernel/sched/core.c36
-rw-r--r--kernel/sched/cpufreq_schedutil.c6
-rw-r--r--kernel/sched/deadline.c2
-rw-r--r--kernel/sched/debug.c2
-rw-r--r--kernel/sched/ext.c335
-rw-r--r--kernel/sched/fair.c21
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/sched/sched.h51
-rw-r--r--kernel/sched/stats.h9
-rw-r--r--kernel/sched/syscalls.c7
-rw-r--r--kernel/sched/topology.c2
-rw-r--r--kernel/seccomp.c14
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/smp.c4
-rw-r--r--kernel/stackleak.c5
-rw-r--r--kernel/stop_machine.c2
-rw-r--r--kernel/sys.c3
-rw-r--r--kernel/sysctl-test.c6
-rw-r--r--kernel/sysctl.c4
-rw-r--r--kernel/task_work.c14
-rw-r--r--kernel/time/clocksource.c9
-rw-r--r--kernel/time/hrtimer.c125
-rw-r--r--kernel/time/timer.c2
-rw-r--r--kernel/time/timer_migration.c10
-rw-r--r--kernel/trace/bpf_trace.c58
-rw-r--r--kernel/trace/fgraph.c1
-rw-r--r--kernel/trace/ftrace.c19
-rw-r--r--kernel/trace/ring_buffer.c9
-rw-r--r--kernel/trace/rv/Kconfig27
-rw-r--r--kernel/trace/rv/Makefile3
-rw-r--r--kernel/trace/rv/monitors/wip/Kconfig12
-rw-r--r--kernel/trace/rv/monitors/wip/wip.c2
-rw-r--r--kernel/trace/rv/monitors/wip/wip_trace.h15
-rw-r--r--kernel/trace/rv/monitors/wwnr/Kconfig11
-rw-r--r--kernel/trace/rv/monitors/wwnr/wwnr.c2
-rw-r--r--kernel/trace/rv/monitors/wwnr/wwnr_trace.h16
-rw-r--r--kernel/trace/rv/rv.c2
-rw-r--r--kernel/trace/rv/rv_trace.h (renamed from include/trace/events/rv.h)26
-rw-r--r--kernel/trace/trace.c297
-rw-r--r--kernel/trace/trace.h16
-rw-r--r--kernel/trace/trace_dynevent.c23
-rw-r--r--kernel/trace/trace_entries.h8
-rw-r--r--kernel/trace/trace_eprobe.c36
-rw-r--r--kernel/trace/trace_events.c465
-rw-r--r--kernel/trace/trace_events_filter.c23
-rw-r--r--kernel/trace/trace_events_hist.c119
-rw-r--r--kernel/trace/trace_events_synth.c17
-rw-r--r--kernel/trace/trace_events_trigger.c67
-rw-r--r--kernel/trace/trace_events_user.c2
-rw-r--r--kernel/trace/trace_functions_graph.c35
-rw-r--r--kernel/trace/trace_irqsoff.c5
-rw-r--r--kernel/trace/trace_kprobe.c155
-rw-r--r--kernel/trace/trace_osnoise.c57
-rw-r--r--kernel/trace/trace_probe.c51
-rw-r--r--kernel/trace/trace_sched_wakeup.c6
-rw-r--r--kernel/trace/trace_stack.c6
-rw-r--r--kernel/trace/trace_stat.c26
-rw-r--r--kernel/trace/trace_uprobe.c15
-rw-r--r--kernel/ucount.c8
-rw-r--r--kernel/umh.c2
-rw-r--r--kernel/utsname_sysctl.c4
-rw-r--r--kernel/watchdog.c4
-rw-r--r--kernel/workqueue.c2
-rw-r--r--lib/Kconfig89
-rw-r--r--lib/Kconfig.debug56
-rw-r--r--lib/Makefile4
-rw-r--r--lib/alloc_tag.c6
-rw-r--r--lib/atomic64.c78
-rw-r--r--lib/cpumask.c5
-rw-r--r--lib/crc-t10dif.c156
-rw-r--r--lib/crc16_kunit.c155
-rw-r--r--lib/crc32.c237
-rw-r--r--lib/crc32defs.h59
-rw-r--r--lib/crc32test.c852
-rw-r--r--lib/crc_kunit.c435
-rw-r--r--lib/crypto/aesgcm.c2
-rw-r--r--lib/crypto/gf128mul.c75
-rw-r--r--lib/fault-inject.c28
-rw-r--r--lib/gen_crc32table.c113
-rw-r--r--lib/inflate.c2
-rw-r--r--lib/kobject.c24
-rw-r--r--lib/kunit/Kconfig12
-rw-r--r--lib/kunit/debugfs.c2
-rw-r--r--lib/kunit/executor.c21
-rw-r--r--lib/kunit/test.c6
-rw-r--r--lib/kunit_iov_iter.c5
-rw-r--r--lib/libcrc32c.c74
-rw-r--r--lib/list_debug.c22
-rw-r--r--lib/list_sort.c7
-rw-r--r--lib/lz4/lz4_compress.c1
-rw-r--r--lib/lz4/lz4_decompress.c1
-rw-r--r--lib/lz4/lz4defs.h4
-rw-r--r--lib/lz4/lz4hc_compress.c1
-rw-r--r--lib/maple_tree.c73
-rw-r--r--lib/math/Makefile1
-rw-r--r--lib/math/tests/Makefile1
-rw-r--r--lib/math/tests/int_sqrt_kunit.c66
-rw-r--r--lib/rhashtable.c14
-rw-r--r--lib/sort.c7
-rw-r--r--lib/stackinit_kunit.c108
-rw-r--r--lib/test_bpf.c64
-rw-r--r--lib/test_maple_tree.c56
-rw-r--r--lib/test_min_heap.c30
-rw-r--r--lib/test_sysctl.c6
-rw-r--r--lib/test_vmalloc.c2
-rw-r--r--lib/test_xarray.c35
-rw-r--r--lib/xarray.c78
-rw-r--r--mm/Kconfig72
-rw-r--r--mm/Makefile1
-rw-r--r--mm/cma.h2
-rw-r--r--mm/compaction.c36
-rw-r--r--mm/damon/Kconfig30
-rw-r--r--mm/damon/Makefile1
-rw-r--r--mm/damon/core.c254
-rw-r--r--mm/damon/dbgfs.c1148
-rw-r--r--mm/damon/paddr.c80
-rw-r--r--mm/damon/reclaim.c2
-rw-r--r--mm/damon/sysfs-common.h16
-rw-r--r--mm/damon/sysfs-schemes.c271
-rw-r--r--mm/damon/sysfs.c188
-rw-r--r--mm/damon/tests/.kunitconfig7
-rw-r--r--mm/damon/tests/core-kunit.h14
-rw-r--r--mm/damon/tests/dbgfs-kunit.h173
-rw-r--r--mm/damon/tests/vaddr-kunit.h2
-rw-r--r--mm/damon/vaddr.c2
-rw-r--r--mm/debug.c71
-rw-r--r--mm/early_ioremap.c8
-rw-r--r--mm/filemap.c207
-rw-r--r--mm/gup.c107
-rw-r--r--mm/huge_memory.c19
-rw-r--r--mm/hugetlb.c421
-rw-r--r--mm/hugetlb_cgroup.c18
-rw-r--r--mm/hugetlb_vmemmap.c2
-rw-r--r--mm/internal.h62
-rw-r--r--mm/kasan/generic.c18
-rw-r--r--mm/kasan/hw_tags.c5
-rw-r--r--mm/kasan/kasan.h18
-rw-r--r--mm/kasan/kasan_test_c.c4
-rw-r--r--mm/kasan/sw_tags.c3
-rw-r--r--mm/kfence/core.c2
-rw-r--r--mm/kfence/kfence_test.c3
-rw-r--r--mm/kfence/report.c3
-rw-r--r--mm/khugepaged.c45
-rw-r--r--mm/kmemleak.c6
-rw-r--r--mm/kmsan/shadow.c8
-rw-r--r--mm/ksm.c19
-rw-r--r--mm/madvise.c7
-rw-r--r--mm/memblock.c20
-rw-r--r--mm/memcontrol-v1.c16
-rw-r--r--mm/memcontrol.c114
-rw-r--r--mm/memfd.c139
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/memory.c286
-rw-r--r--mm/memory_hotplug.c48
-rw-r--r--mm/mempolicy.c83
-rw-r--r--mm/migrate.c165
-rw-r--r--mm/mm_init.c8
-rw-r--r--mm/mmap.c575
-rw-r--r--mm/mmap_lock.c50
-rw-r--r--mm/mmu_gather.c25
-rw-r--r--mm/mseal.c6
-rw-r--r--mm/nommu.c7
-rw-r--r--mm/numa.c8
-rw-r--r--mm/numa_emulation.c45
-rw-r--r--mm/numa_memblks.c2
-rw-r--r--mm/oom_kill.c10
-rw-r--r--mm/page-writeback.c55
-rw-r--r--mm/page_alloc.c181
-rw-r--r--mm/page_frag_cache.c6
-rw-r--r--mm/page_idle.c10
-rw-r--r--mm/page_io.c1
-rw-r--r--mm/page_isolation.c12
-rw-r--r--mm/percpu.c70
-rw-r--r--mm/pt_reclaim.c71
-rw-r--r--mm/readahead.c63
-rw-r--r--mm/rodata_test.c7
-rw-r--r--mm/secretmem.c3
-rw-r--r--mm/shmem.c331
-rw-r--r--mm/shrinker_debug.c16
-rw-r--r--mm/slab_common.c2
-rw-r--r--mm/slub.c25
-rw-r--r--mm/sparse-vmemmap.c5
-rw-r--r--mm/sparse.c5
-rw-r--r--mm/swap.c74
-rw-r--r--mm/swap_cgroup.c233
-rw-r--r--mm/swap_slots.c78
-rw-r--r--mm/swap_state.c1
-rw-r--r--mm/swapfile.c1261
-rw-r--r--mm/truncate.c53
-rw-r--r--mm/userfaultfd.c55
-rw-r--r--mm/util.c17
-rw-r--r--mm/vma.c582
-rw-r--r--mm/vma.h48
-rw-r--r--mm/vma_internal.h1
-rw-r--r--mm/vmalloc.c4
-rw-r--r--mm/vmscan.c520
-rw-r--r--mm/workingset.c65
-rw-r--r--mm/zpdesc.h182
-rw-r--r--mm/zsmalloc.c440
-rw-r--r--mm/zswap.c2
-rw-r--r--net/bluetooth/l2cap_sock.c4
-rw-r--r--net/bluetooth/mgmt.c2
-rw-r--r--net/bpf/test_run.c1
-rw-r--r--net/core/bpf_sk_storage.c11
-rw-r--r--net/core/dev.c53
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/page_pool.c9
-rw-r--r--net/core/page_pool_priv.h2
-rw-r--r--net/core/page_pool_user.c15
-rw-r--r--net/dsa/dsa.c2
-rw-r--r--net/ethtool/ioctl.c4
-rw-r--r--net/ethtool/rss.c3
-rw-r--r--net/hsr/hsr_debugfs.c9
-rw-r--r--net/hsr/hsr_forward.c7
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/ipmr_base.c3
-rw-r--r--net/ipv4/tcp_output.c9
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/ioam6_iptunnel.c14
-rw-r--r--net/ipv6/rpl_iptunnel.c15
-rw-r--r--net/ipv6/seg6_iptunnel.c15
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/ipv6/xfrm6_output.c4
-rw-r--r--net/mac80211/debugfs_netdev.c11
-rw-r--r--net/mptcp/ctrl.c4
-rw-r--r--net/mptcp/options.c13
-rw-r--r--net/mptcp/pm_netlink.c3
-rw-r--r--net/mptcp/protocol.c4
-rw-r--r--net/mptcp/protocol.h30
-rw-r--r--net/ncsi/ncsi-manage.c13
-rw-r--r--net/ncsi/ncsi-rsp.c18
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c21
-rw-r--r--net/netfilter/nf_tables_api.c8
-rw-r--r--net/nfc/nci/hci.c2
-rw-r--r--net/rose/af_rose.c24
-rw-r--r--net/rose/rose_timer.c15
-rw-r--r--net/rxrpc/ar-internal.h2
-rw-r--r--net/rxrpc/call_object.c6
-rw-r--r--net/rxrpc/conn_event.c21
-rw-r--r--net/rxrpc/conn_object.c1
-rw-r--r--net/rxrpc/input.c12
-rw-r--r--net/rxrpc/peer_event.c16
-rw-r--r--net/rxrpc/peer_object.c12
-rw-r--r--net/rxrpc/sendmsg.c2
-rw-r--r--net/sched/sch_ets.c2
-rw-r--r--net/sched/sch_fifo.c3
-rw-r--r--net/sched/sch_netem.c2
-rw-r--r--net/socket.c5
-rw-r--r--net/sunrpc/auth_gss/Makefile2
-rw-r--r--net/sunrpc/auth_gss/gss_generic_token.c231
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c55
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_internal.h7
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c1
-rw-r--r--net/sunrpc/cache.c53
-rw-r--r--net/sunrpc/clnt.c29
-rw-r--r--net/sunrpc/debugfs.c15
-rw-r--r--net/sunrpc/rpc_pipe.c14
-rw-r--r--net/sunrpc/svc.c4
-rw-r--r--net/sunrpc/svc_xprt.c41
-rw-r--r--net/sunrpc/svcsock.c12
-rw-r--r--net/sunrpc/xdr.c6
-rw-r--r--net/sunrpc/xprtmultipath.c17
-rw-r--r--net/vmw_vsock/af_vsock.c13
-rw-r--r--net/vmw_vsock/hyperv_transport.c6
-rw-r--r--net/wireless/core.c5
-rw-r--r--net/wireless/wext-core.c4
-rw-r--r--net/xfrm/xfrm_interface_core.c2
-rw-r--r--net/xfrm/xfrm_output.c7
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--net/xfrm/xfrm_replay.c10
-rw-r--r--net/xfrm/xfrm_state.c93
-rw-r--r--rust/Makefile37
-rw-r--r--rust/bindings/bindings_helper.h4
-rw-r--r--rust/helpers/device.c10
-rw-r--r--rust/helpers/helpers.c5
-rw-r--r--rust/helpers/io.c101
-rw-r--r--rust/helpers/pci.c18
-rw-r--r--rust/helpers/platform.c13
-rw-r--r--rust/helpers/rcu.c13
-rw-r--r--rust/kernel/device.rs7
-rw-r--r--rust/kernel/device_id.rs165
-rw-r--r--rust/kernel/devres.rs201
-rw-r--r--rust/kernel/driver.rs188
-rw-r--r--rust/kernel/init.rs2
-rw-r--r--rust/kernel/io.rs260
-rw-r--r--rust/kernel/lib.rs20
-rw-r--r--rust/kernel/miscdevice.rs100
-rw-r--r--rust/kernel/of.rs60
-rw-r--r--rust/kernel/pci.rs434
-rw-r--r--rust/kernel/platform.rs200
-rw-r--r--rust/kernel/revocable.rs219
-rw-r--r--rust/kernel/sync.rs1
-rw-r--r--rust/kernel/sync/rcu.rs47
-rw-r--r--rust/kernel/types.rs11
-rw-r--r--rust/macros/module.rs4
-rw-r--r--samples/Kconfig11
-rw-r--r--samples/Makefile3
-rw-r--r--samples/bpf/Makefile2
-rw-r--r--samples/bpf/xdp2skb_meta_kern.c1
-rw-r--r--samples/check-exec/.gitignore2
-rw-r--r--samples/check-exec/Makefile15
-rw-r--r--samples/check-exec/inc.c212
-rwxr-xr-xsamples/check-exec/run-script-ask.inc9
-rwxr-xr-xsamples/check-exec/script-ask.inc5
-rwxr-xr-xsamples/check-exec/script-exec.inc4
-rw-r--r--samples/check-exec/script-noexec.inc4
-rw-r--r--samples/check-exec/set-exec.c85
-rw-r--r--samples/damon/Kconfig30
-rw-r--r--samples/damon/Makefile4
-rw-r--r--samples/damon/prcl.c136
-rw-r--r--samples/damon/wsse.c116
-rw-r--r--samples/landlock/sandboxer.c7
-rw-r--r--samples/livepatch/livepatch-callbacks-busymod.c3
-rw-r--r--samples/livepatch/livepatch-shadow-fix1.c3
-rw-r--r--samples/livepatch/livepatch-shadow-mod.c15
-rw-r--r--samples/rust/Kconfig31
-rw-r--r--samples/rust/Makefile3
-rw-r--r--samples/rust/rust_driver_pci.rs110
-rw-r--r--samples/rust/rust_driver_platform.rs49
-rw-r--r--samples/rust/rust_misc_device.rs238
-rw-r--r--scripts/Makefile3
-rw-r--r--scripts/Makefile.build37
-rw-r--r--scripts/Makefile.defconf13
-rw-r--r--scripts/Makefile.extrawarn18
-rw-r--r--scripts/Makefile.lib13
-rw-r--r--scripts/Makefile.modinst2
-rw-r--r--scripts/Makefile.modpost2
-rwxr-xr-xscripts/checkpatch.pl26
-rw-r--r--scripts/coccinelle/misc/secs_to_jiffies.cocci22
-rw-r--r--scripts/gdb/linux/cpus.py2
-rw-r--r--scripts/gendwarfksyms/.gitignore2
-rw-r--r--scripts/gendwarfksyms/Makefile12
-rw-r--r--scripts/gendwarfksyms/cache.c51
-rw-r--r--scripts/gendwarfksyms/die.c166
-rw-r--r--scripts/gendwarfksyms/dwarf.c1159
-rw-r--r--scripts/gendwarfksyms/examples/kabi.h157
-rw-r--r--scripts/gendwarfksyms/examples/kabi_ex.c30
-rw-r--r--scripts/gendwarfksyms/examples/kabi_ex.h263
-rw-r--r--scripts/gendwarfksyms/examples/symbolptr.c33
-rw-r--r--scripts/gendwarfksyms/gendwarfksyms.c187
-rw-r--r--scripts/gendwarfksyms/gendwarfksyms.h296
-rw-r--r--scripts/gendwarfksyms/kabi.c336
-rw-r--r--scripts/gendwarfksyms/symbols.c341
-rw-r--r--scripts/gendwarfksyms/types.c481
-rw-r--r--scripts/generate_rust_target.rs18
-rw-r--r--scripts/genksyms/Makefile18
-rw-r--r--scripts/genksyms/genksyms.c107
-rw-r--r--scripts/genksyms/genksyms.h9
-rw-r--r--scripts/genksyms/lex.l17
-rw-r--r--scripts/genksyms/parse.y166
-rw-r--r--scripts/kconfig/Makefile4
-rw-r--r--scripts/kconfig/confdata.c6
-rw-r--r--scripts/kconfig/qconf.cc8
-rw-r--r--scripts/kconfig/symbol.c1
-rwxr-xr-xscripts/link-vmlinux.sh6
-rw-r--r--scripts/mod/devicetable-offsets.c1
-rw-r--r--scripts/mod/file2alias.c9
-rw-r--r--scripts/mod/modpost.c106
-rw-r--r--scripts/mod/modpost.h6
-rw-r--r--scripts/module.lds.S1
-rw-r--r--scripts/package/PKGBUILD1
-rwxr-xr-xscripts/package/builddeb24
-rwxr-xr-xscripts/package/install-extmod-build33
-rwxr-xr-xscripts/package/mkdebian2
-rwxr-xr-xscripts/spdxcheck.py6
-rw-r--r--scripts/spelling.txt37
-rwxr-xr-xscripts/tags.sh4
-rw-r--r--security/Kconfig.hardening1
-rw-r--r--security/apparmor/lsm.c2
-rw-r--r--security/bpf/hooks.c1
-rw-r--r--security/commoncap.c90
-rw-r--r--security/integrity/ima/ima_appraise.c27
-rw-r--r--security/integrity/ima/ima_main.c29
-rw-r--r--security/keys/sysctl.c2
-rw-r--r--security/keys/trusted-keys/trusted_dcp.c22
-rw-r--r--security/landlock/access.h77
-rw-r--r--security/landlock/fs.c114
-rw-r--r--security/landlock/fs.h1
-rw-r--r--security/landlock/ruleset.c26
-rw-r--r--security/landlock/ruleset.h52
-rw-r--r--security/landlock/syscalls.c39
-rw-r--r--security/security.c10
-rw-r--r--security/selinux/hooks.c3
-rw-r--r--security/tomoyo/common.c32
-rw-r--r--security/tomoyo/domain.c11
-rw-r--r--security/yama/yama_lsm.c6
-rw-r--r--sound/core/memory.c41
-rw-r--r--sound/core/pcm_native.c2
-rw-r--r--sound/core/rawmidi.c3
-rw-r--r--sound/core/seq/Kconfig4
-rw-r--r--sound/core/seq/oss/seq_oss_device.h5
-rw-r--r--sound/core/seq/oss/seq_oss_init.c8
-rw-r--r--sound/core/seq/oss/seq_oss_synth.c84
-rw-r--r--sound/core/seq/seq_clientmgr.c19
-rw-r--r--sound/core/seq/seq_system.c34
-rw-r--r--sound/core/seq/seq_system.h35
-rw-r--r--sound/core/seq/seq_ump_client.c42
-rw-r--r--sound/core/ump.c112
-rw-r--r--sound/firewire/fireface/ff-protocol-former.c4
-rw-r--r--sound/hda/hdac_component.c7
-rw-r--r--sound/hda/hdac_stream.c63
-rw-r--r--sound/isa/sb/sb16_csp.c5
-rw-r--r--sound/pci/ac97/ac97_codec.c2
-rw-r--r--sound/pci/ac97/ac97_proc.c8
-rw-r--r--sound/pci/ad1889.c8
-rw-r--r--sound/pci/cmipci.c2
-rw-r--r--sound/pci/ctxfi/ctdaio.c48
-rw-r--r--sound/pci/emu10k1/emuproc.c15
-rw-r--r--sound/pci/ens1370.c6
-rw-r--r--sound/pci/hda/Kconfig14
-rw-r--r--sound/pci/hda/Makefile2
-rw-r--r--sound/pci/hda/hda_auto_parser.c8
-rw-r--r--sound/pci/hda/hda_auto_parser.h1
-rw-r--r--sound/pci/hda/hda_hwdep.c2
-rw-r--r--sound/pci/hda/hda_intel.c4
-rw-r--r--sound/pci/hda/hda_sysfs.c2
-rw-r--r--sound/pci/hda/ideapad_hotkey_led_helper.c36
-rw-r--r--sound/pci/hda/patch_conexant.c13
-rw-r--r--sound/pci/hda/patch_realtek.c60
-rw-r--r--sound/pci/hda/tas2781-spi.h158
-rw-r--r--sound/pci/hda/tas2781_hda_spi.c1265
-rw-r--r--sound/pci/hda/tas2781_spi_fwlib.c2006
-rw-r--r--sound/pci/lola/lola_clock.c2
-rw-r--r--sound/pci/nm256/nm256.c8
-rw-r--r--sound/pci/rme32.c13
-rw-r--r--sound/pci/rme96.c13
-rw-r--r--sound/pci/rme9652/hdsp.c10
-rw-r--r--sound/pci/rme9652/hdspm.c15
-rw-r--r--sound/pci/rme9652/rme9652.c5
-rw-r--r--sound/pci/sonicvibes.c8
-rw-r--r--sound/pci/trident/trident_main.c4
-rw-r--r--sound/soc/amd/Kconfig2
-rw-r--r--sound/soc/amd/acp/acp-i2s.c1
-rw-r--r--sound/soc/amd/ps/pci-ps.c16
-rw-r--r--sound/soc/amd/yc/acp6x-mach.c28
-rw-r--r--sound/soc/codecs/Kconfig2
-rw-r--r--sound/soc/codecs/Makefile8
-rw-r--r--sound/soc/codecs/ad193x-i2c.c3
-rw-r--r--sound/soc/codecs/adau1761-i2c.c5
-rw-r--r--sound/soc/codecs/adau1781-i2c.c5
-rw-r--r--sound/soc/codecs/adau1977-i2c.c5
-rw-r--r--sound/soc/codecs/alc5623.c10
-rw-r--r--sound/soc/codecs/alc5632.c6
-rw-r--r--sound/soc/codecs/aw88081.c333
-rw-r--r--sound/soc/codecs/aw88081.h43
-rw-r--r--sound/soc/codecs/cs35l56.c8
-rw-r--r--sound/soc/codecs/cs42l43.c2
-rw-r--r--sound/soc/codecs/cs42l51-i2c.c6
-rw-r--r--sound/soc/codecs/cs42l84.c2
-rw-r--r--sound/soc/codecs/da7213.c2
-rw-r--r--sound/soc/codecs/es8316.c2
-rw-r--r--sound/soc/codecs/es8323.c2
-rw-r--r--sound/soc/codecs/es8326.c4
-rw-r--r--sound/soc/codecs/madera.c7
-rw-r--r--sound/soc/codecs/max98088.c4
-rw-r--r--sound/soc/codecs/max98090.c18
-rw-r--r--sound/soc/codecs/max98095.c4
-rw-r--r--sound/soc/codecs/nau8824.c8
-rw-r--r--sound/soc/codecs/ntp8835.c2
-rw-r--r--sound/soc/codecs/ntp8918.c2
-rw-r--r--sound/soc/codecs/pcm186x-i2c.c3
-rw-r--r--sound/soc/codecs/pcm6240.c3
-rw-r--r--sound/soc/codecs/peb2466.c3
-rw-r--r--sound/soc/codecs/rt5514.c3
-rw-r--r--sound/soc/codecs/rt5682-i2c.c6
-rw-r--r--sound/soc/codecs/rt5682.c12
-rw-r--r--sound/soc/codecs/rt5682.h2
-rw-r--r--sound/soc/codecs/rt715-sdw.c41
-rw-r--r--sound/soc/codecs/rt715.h3
-rw-r--r--sound/soc/codecs/sma1307.c4
-rw-r--r--sound/soc/codecs/ssm2602-i2c.c5
-rw-r--r--sound/soc/codecs/tas2562.c4
-rw-r--r--sound/soc/codecs/tas2781-i2c.c71
-rw-r--r--sound/soc/codecs/tas5720.c10
-rw-r--r--sound/soc/codecs/tlv320adc3xxx.c4
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c6
-rw-r--r--sound/soc/codecs/tlv320aic3x-i2c.c3
-rw-r--r--sound/soc/codecs/tpa6130a2.c4
-rw-r--r--sound/soc/codecs/uda1342.c2
-rw-r--r--sound/soc/codecs/wcd9335.c2
-rw-r--r--sound/soc/codecs/wm8904.c13
-rw-r--r--sound/soc/codecs/wm8985.c4
-rw-r--r--sound/soc/fsl/Kconfig3
-rw-r--r--sound/soc/fsl/Makefile2
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c2
-rw-r--r--sound/soc/fsl/fsl_asrc.c179
-rw-r--r--sound/soc/fsl/fsl_asrc.h2
-rw-r--r--sound/soc/fsl/fsl_asrc_common.h70
-rw-r--r--sound/soc/fsl/fsl_asrc_m2m.c729
-rw-r--r--sound/soc/fsl/fsl_easrc.c261
-rw-r--r--sound/soc/fsl/fsl_easrc.h4
-rw-r--r--sound/soc/fsl/fsl_micfil.c131
-rw-r--r--sound/soc/fsl/fsl_micfil.h2
-rw-r--r--sound/soc/fsl/fsl_mqs.c28
-rw-r--r--sound/soc/fsl/fsl_sai.c7
-rw-r--r--sound/soc/fsl/fsl_sai.h3
-rw-r--r--sound/soc/fsl/fsl_utils.c45
-rw-r--r--sound/soc/fsl/fsl_utils.h5
-rw-r--r--sound/soc/fsl/fsl_xcvr.c404
-rw-r--r--sound/soc/fsl/fsl_xcvr.h13
-rw-r--r--sound/soc/fsl/imx-audmux.c2
-rw-r--r--sound/soc/fsl/imx-card.c2
-rw-r--r--sound/soc/fsl/imx-rpmsg.c2
-rw-r--r--sound/soc/generic/audio-graph-card.c48
-rw-r--r--sound/soc/generic/audio-graph-card2.c318
-rw-r--r--sound/soc/generic/simple-card-utils.c89
-rw-r--r--sound/soc/generic/simple-card.c58
-rw-r--r--sound/soc/intel/avs/apl.c3
-rw-r--r--sound/soc/intel/avs/cnl.c1
-rw-r--r--sound/soc/intel/avs/core.c24
-rw-r--r--sound/soc/intel/avs/debugfs.c1
-rw-r--r--sound/soc/intel/avs/ipc.c25
-rw-r--r--sound/soc/intel/avs/loader.c36
-rw-r--r--sound/soc/intel/avs/messages.c22
-rw-r--r--sound/soc/intel/avs/messages.h3
-rw-r--r--sound/soc/intel/avs/pcm.c5
-rw-r--r--sound/soc/intel/avs/registers.h47
-rw-r--r--sound/soc/intel/avs/skl.c1
-rw-r--r--sound/soc/intel/avs/topology.c4
-rw-r--r--sound/soc/intel/avs/trace.h38
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c17
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_generic.c2
-rw-r--r--sound/soc/intel/boards/sof_sdw.c56
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-arl-match.c45
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-lnl-match.c70
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-mtl-match.c289
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-ptl-match.c148
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-tgl-match.c194
-rw-r--r--sound/soc/intel/keembay/kmb_platform.c2
-rw-r--r--sound/soc/mediatek/common/mtk-soundcard-driver.c4
-rw-r--r--sound/soc/mediatek/mt8192/mt8192-afe-pcm.c19
-rw-r--r--sound/soc/mediatek/mt8365/Makefile2
-rw-r--r--sound/soc/mediatek/mt8365/mt8365-mt6357.c11
-rw-r--r--sound/soc/qcom/common.c6
-rw-r--r--sound/soc/qcom/lpass-platform.c6
-rw-r--r--sound/soc/qcom/sc7180.c2
-rw-r--r--sound/soc/qcom/sdm845.c5
-rw-r--r--sound/soc/renesas/Kconfig2
-rw-r--r--sound/soc/renesas/rz-ssi.c228
-rw-r--r--sound/soc/rockchip/rockchip_i2s_tdm.c59
-rw-r--r--sound/soc/sdca/Makefile2
-rw-r--r--sound/soc/sdca/sdca_device.c2
-rw-r--r--sound/soc/sdca/sdca_functions.c132
-rw-r--r--sound/soc/sdw_utils/soc_sdw_cs_amp.c46
-rw-r--r--sound/soc/sdw_utils/soc_sdw_utils.c13
-rw-r--r--sound/soc/soc-card.c4
-rw-r--r--sound/soc/soc-core.c58
-rw-r--r--sound/soc/soc-dai.c27
-rw-r--r--sound/soc/soc-dapm.c14
-rw-r--r--sound/soc/soc-pcm.c32
-rw-r--r--sound/soc/soc-topology.c10
-rw-r--r--sound/soc/sof/imx/imx8.c24
-rw-r--r--sound/soc/sof/imx/imx8m.c52
-rw-r--r--sound/soc/sof/imx/imx8ulp.c3
-rw-r--r--sound/soc/sof/intel/atom.c16
-rw-r--r--sound/soc/sof/intel/bdw.c19
-rw-r--r--sound/soc/sof/intel/byt.c3
-rw-r--r--sound/soc/sof/intel/hda-dai.c12
-rw-r--r--sound/soc/sof/intel/hda-pcm.c15
-rw-r--r--sound/soc/sof/intel/hda.c5
-rw-r--r--sound/soc/sof/ipc4-topology.c2
-rw-r--r--sound/soc/sof/mediatek/mt8186/mt8186.c2
-rw-r--r--sound/soc/sof/mediatek/mt8195/mt8195.c6
-rw-r--r--sound/soc/sof/sof-audio.h1
-rw-r--r--sound/soc/sof/sof-client-ipc-flood-test.c39
-rw-r--r--sound/soc/sof/sof-priv.h8
-rw-r--r--sound/soc/sof/topology.c4
-rw-r--r--sound/soc/sunxi/sun4i-codec.c409
-rw-r--r--sound/soc/sunxi/sun4i-spdif.c24
-rw-r--r--sound/soc/xilinx/xlnx_spdif.c38
-rw-r--r--sound/usb/Makefile1
-rw-r--r--sound/usb/fcp.c1134
-rw-r--r--sound/usb/fcp.h7
-rw-r--r--sound/usb/line6/toneport.c2
-rw-r--r--sound/usb/mixer_quirks.c7
-rw-r--r--sound/usb/mixer_scarlett2.c8
-rw-r--r--sound/usb/quirks.c4
-rw-r--r--tools/accounting/getdelays.c67
-rw-r--r--tools/accounting/procacct.c5
-rw-r--r--tools/arch/arm64/include/asm/sysreg.h410
-rw-r--r--tools/bootconfig/main.c4
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-btf.rst9
-rw-r--r--tools/bpf/bpftool/Makefile7
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool7
-rw-r--r--tools/bpf/bpftool/btf.c51
-rw-r--r--tools/bpf/bpftool/cfg.c1
-rw-r--r--tools/bpf/bpftool/feature.c23
-rw-r--r--tools/bpf/resolve_btfids/main.c12
-rw-r--r--tools/build/Build.include2
-rw-r--r--tools/build/Makefile.build20
-rw-r--r--tools/build/Makefile.feature46
-rw-r--r--tools/build/feature/Makefile10
-rw-r--r--tools/build/feature/test-all.c15
-rw-r--r--tools/build/feature/test-libaudit.c11
-rw-r--r--tools/build/feature/test-libelf-zstd.c9
-rw-r--r--tools/include/linux/filter.h10
-rw-r--r--tools/include/linux/kasan-tags.h15
-rw-r--r--tools/include/nolibc/sys.h18
-rw-r--r--tools/include/uapi/linux/bpf.h10
-rw-r--r--tools/include/uapi/linux/if_xdp.h4
-rw-r--r--tools/lib/api/fs/fs.c6
-rw-r--r--tools/lib/bpf/bpf.c3
-rw-r--r--tools/lib/bpf/bpf.h5
-rw-r--r--tools/lib/bpf/btf.c3
-rw-r--r--tools/lib/bpf/btf_relocate.c2
-rw-r--r--tools/lib/bpf/libbpf.c53
-rw-r--r--tools/lib/bpf/libbpf.h9
-rw-r--r--tools/lib/bpf/libbpf.map4
-rw-r--r--tools/lib/bpf/linker.c248
-rw-r--r--tools/lib/bpf/usdt.c2
-rw-r--r--tools/lib/perf/Documentation/libperf.txt1
-rw-r--r--tools/lib/perf/cpumap.c131
-rw-r--r--tools/lib/perf/evlist.c2
-rw-r--r--tools/lib/perf/include/internal/cpumap.h4
-rw-r--r--tools/lib/perf/include/perf/cpumap.h6
-rw-r--r--tools/lib/perf/libperf.map1
-rw-r--r--tools/net/ynl/lib/ynl.c2
-rw-r--r--tools/pci/Build1
-rw-r--r--tools/pci/Makefile58
-rw-r--r--tools/pci/pcitest.c250
-rw-r--r--tools/pci/pcitest.sh72
-rw-r--r--tools/perf/Documentation/perf-check.txt2
-rw-r--r--tools/perf/Documentation/perf-config.txt2
-rw-r--r--tools/perf/Documentation/perf-ftrace.txt19
-rw-r--r--tools/perf/Documentation/perf-intel-pt.txt596
-rw-r--r--tools/perf/Documentation/perf-lock.txt4
-rw-r--r--tools/perf/Documentation/perf-record.txt4
-rw-r--r--tools/perf/Documentation/perf-test.txt20
-rw-r--r--tools/perf/Documentation/perf-trace.txt5
-rw-r--r--tools/perf/MANIFEST3
-rw-r--r--tools/perf/Makefile.config132
-rw-r--r--tools/perf/Makefile.perf56
-rw-r--r--tools/perf/arch/alpha/entry/syscalls/Kbuild2
-rw-r--r--tools/perf/arch/alpha/entry/syscalls/Makefile.syscalls5
-rw-r--r--tools/perf/arch/alpha/entry/syscalls/syscall.tbl504
-rw-r--r--tools/perf/arch/alpha/include/syscall_table.h2
-rw-r--r--tools/perf/arch/arc/entry/syscalls/Kbuild2
-rw-r--r--tools/perf/arch/arc/entry/syscalls/Makefile.syscalls3
-rw-r--r--tools/perf/arch/arc/include/syscall_table.h2
-rw-r--r--tools/perf/arch/arm/entry/syscalls/Kbuild4
-rw-r--r--tools/perf/arch/arm/entry/syscalls/Makefile.syscalls2
-rw-r--r--tools/perf/arch/arm/entry/syscalls/syscall.tbl483
-rw-r--r--tools/perf/arch/arm/include/syscall_table.h2
-rw-r--r--tools/perf/arch/arm64/Makefile22
-rw-r--r--tools/perf/arch/arm64/entry/syscalls/Kbuild3
-rw-r--r--tools/perf/arch/arm64/entry/syscalls/Makefile.syscalls6
-rwxr-xr-xtools/perf/arch/arm64/entry/syscalls/mksyscalltbl46
-rw-r--r--tools/perf/arch/arm64/entry/syscalls/syscall_32.tbl476
l---------tools/perf/arch/arm64/entry/syscalls/syscall_64.tbl1
-rw-r--r--tools/perf/arch/arm64/include/syscall_table.h8
-rw-r--r--tools/perf/arch/arm64/util/arm-spe.c90
-rw-r--r--tools/perf/arch/csky/entry/syscalls/Kbuild2
-rw-r--r--tools/perf/arch/csky/entry/syscalls/Makefile.syscalls3
-rw-r--r--tools/perf/arch/csky/include/syscall_table.h2
-rw-r--r--tools/perf/arch/loongarch/Makefile22
-rw-r--r--tools/perf/arch/loongarch/entry/syscalls/Kbuild2
-rw-r--r--tools/perf/arch/loongarch/entry/syscalls/Makefile.syscalls3
-rwxr-xr-xtools/perf/arch/loongarch/entry/syscalls/mksyscalltbl45
-rw-r--r--tools/perf/arch/loongarch/include/syscall_table.h2
-rw-r--r--tools/perf/arch/mips/Makefile18
-rw-r--r--tools/perf/arch/mips/entry/syscalls/Kbuild2
-rw-r--r--tools/perf/arch/mips/entry/syscalls/Makefile.syscalls5
-rw-r--r--tools/perf/arch/mips/entry/syscalls/mksyscalltbl32
-rw-r--r--tools/perf/arch/mips/include/syscall_table.h2
-rw-r--r--tools/perf/arch/parisc/entry/syscalls/Kbuild3
-rw-r--r--tools/perf/arch/parisc/entry/syscalls/Makefile.syscalls6
-rw-r--r--tools/perf/arch/parisc/entry/syscalls/syscall.tbl463
-rw-r--r--tools/perf/arch/parisc/include/syscall_table.h8
-rw-r--r--tools/perf/arch/powerpc/Makefile25
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/Kbuild3
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/Makefile.syscalls6
-rwxr-xr-xtools/perf/arch/powerpc/entry/syscalls/mksyscalltbl39
-rw-r--r--tools/perf/arch/powerpc/include/syscall_table.h8
-rw-r--r--tools/perf/arch/powerpc/util/perf_regs.c3
-rw-r--r--tools/perf/arch/riscv/Makefile22
-rw-r--r--tools/perf/arch/riscv/entry/syscalls/Kbuild2
-rw-r--r--tools/perf/arch/riscv/entry/syscalls/Makefile.syscalls4
-rwxr-xr-xtools/perf/arch/riscv/entry/syscalls/mksyscalltbl47
-rw-r--r--tools/perf/arch/riscv/include/syscall_table.h8
-rw-r--r--tools/perf/arch/s390/Makefile21
-rw-r--r--tools/perf/arch/s390/entry/syscalls/Kbuild2
-rw-r--r--tools/perf/arch/s390/entry/syscalls/Makefile.syscalls5
-rwxr-xr-xtools/perf/arch/s390/entry/syscalls/mksyscalltbl32
-rw-r--r--tools/perf/arch/s390/include/syscall_table.h2
-rw-r--r--tools/perf/arch/sh/entry/syscalls/Kbuild2
-rw-r--r--tools/perf/arch/sh/entry/syscalls/Makefile.syscalls4
-rw-r--r--tools/perf/arch/sh/entry/syscalls/syscall.tbl472
-rw-r--r--tools/perf/arch/sh/include/syscall_table.h2
-rw-r--r--tools/perf/arch/sparc/entry/syscalls/Kbuild3
-rw-r--r--tools/perf/arch/sparc/entry/syscalls/Makefile.syscalls5
-rw-r--r--tools/perf/arch/sparc/entry/syscalls/syscall.tbl514
-rw-r--r--tools/perf/arch/sparc/include/syscall_table.h8
-rw-r--r--tools/perf/arch/x86/Build1
-rw-r--r--tools/perf/arch/x86/Makefile25
-rw-r--r--tools/perf/arch/x86/entry/syscalls/Kbuild3
-rw-r--r--tools/perf/arch/x86/entry/syscalls/Makefile.syscalls6
-rwxr-xr-xtools/perf/arch/x86/entry/syscalls/syscalltbl.sh42
-rw-r--r--tools/perf/arch/x86/include/syscall_table.h8
-rw-r--r--tools/perf/arch/x86/util/Build2
-rw-r--r--tools/perf/arch/x86/util/iostat.c4
-rw-r--r--tools/perf/arch/xtensa/entry/syscalls/Kbuild2
-rw-r--r--tools/perf/arch/xtensa/entry/syscalls/Makefile.syscalls4
-rw-r--r--tools/perf/arch/xtensa/entry/syscalls/syscall.tbl439
-rw-r--r--tools/perf/arch/xtensa/include/syscall_table.h2
-rw-r--r--tools/perf/bench/epoll-wait.c7
-rw-r--r--tools/perf/bench/inject-buildid.c13
-rw-r--r--tools/perf/builtin-annotate.c1
-rw-r--r--tools/perf/builtin-check.c2
-rw-r--r--tools/perf/builtin-config.c38
-rw-r--r--tools/perf/builtin-diff.c5
-rw-r--r--tools/perf/builtin-ftrace.c149
-rw-r--r--tools/perf/builtin-help.c2
-rw-r--r--tools/perf/builtin-inject.c8
-rw-r--r--tools/perf/builtin-kmem.c12
-rw-r--r--tools/perf/builtin-kvm.c61
-rw-r--r--tools/perf/builtin-kwork.c7
-rw-r--r--tools/perf/builtin-lock.c281
-rw-r--r--tools/perf/builtin-mem.c1
-rw-r--r--tools/perf/builtin-record.c6
-rw-r--r--tools/perf/builtin-report.c6
-rw-r--r--tools/perf/builtin-sched.c1
-rw-r--r--tools/perf/builtin-script.c404
-rw-r--r--tools/perf/builtin-stat.c27
-rw-r--r--tools/perf/builtin-top.c6
-rw-r--r--tools/perf/builtin-trace.c137
-rw-r--r--tools/perf/builtin.h6
-rwxr-xr-xtools/perf/check-headers.sh9
-rw-r--r--tools/perf/perf.c6
-rw-r--r--tools/perf/perf.h2
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/exception.json2
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/general.json2
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l1d_cache.json6
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l2_cache.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l3_cache.json4
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/ll_cache.json4
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/memory.json2
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/metrics.json93
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/retired.json4
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/spec_operation.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/stall.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/tlb.json4
-rw-r--r--tools/perf/pmu-events/arch/arm64/common-and-microarch.json715
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/core-imp-def.json6
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/cycle_accounting.json122
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/energy.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/exception.json42
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/fp_operation.json209
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/gcycle.json97
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/general.json10
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/hwpf.json52
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l1d_cache.json113
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l1i_cache.json52
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l2_cache.json160
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l3_cache.json159
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/ll_cache.json10
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/memory.json10
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/pipeline.json208
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/pmu.json10
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/retired.json30
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/spec_operation.json171
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/stall.json94
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/sve.json254
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/tlb.json362
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/trace.json18
-rw-r--r--tools/perf/pmu-events/arch/arm64/mapfile.csv1
-rw-r--r--tools/perf/pmu-events/arch/arm64/recommended.json5
-rwxr-xr-xtools/perf/pmu-events/jevents.py16
-rw-r--r--tools/perf/scripts/Makefile.syscalls61
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/Context.c20
-rw-r--r--tools/perf/scripts/python/mem-phys-addr.py177
-rwxr-xr-xtools/perf/scripts/syscalltbl.sh86
-rw-r--r--tools/perf/tests/Build6
-rw-r--r--tools/perf/tests/builtin-test.c225
-rw-r--r--tools/perf/tests/code-reading.c92
-rw-r--r--tools/perf/tests/cpumap.c62
-rw-r--r--tools/perf/tests/event_groups.c31
-rw-r--r--tools/perf/tests/make7
-rw-r--r--tools/perf/tests/parse-events.c25
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_adding_blacklisted.sh4
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_adding_kernel.sh8
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_basic.sh4
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_invalid_options.sh9
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_line_semantics.sh9
-rwxr-xr-xtools/perf/tests/shell/base_report/setup.sh2
-rwxr-xr-xtools/perf/tests/shell/base_report/test_basic.sh2
-rw-r--r--tools/perf/tests/shell/common/init.sh7
-rw-r--r--tools/perf/tests/shell/coresight/Makefile2
-rwxr-xr-xtools/perf/tests/shell/ftrace.sh5
-rw-r--r--tools/perf/tests/shell/lib/perf_json_output_lint.py14
-rwxr-xr-xtools/perf/tests/shell/perftool-testsuite_probe.sh2
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh36
-rwxr-xr-xtools/perf/tests/shell/stat+std_output.sh2
-rwxr-xr-xtools/perf/tests/shell/stat.sh6
-rwxr-xr-xtools/perf/tests/shell/test_arm_spe.sh30
-rwxr-xr-xtools/perf/tests/shell/test_brstack.sh4
-rwxr-xr-xtools/perf/tests/shell/test_intel_pt.sh28
-rwxr-xr-xtools/perf/tests/shell/test_task_analyzer.sh2
-rwxr-xr-xtools/perf/tests/shell/trace_btf_enum.sh8
-rwxr-xr-xtools/perf/tests/shell/trace_btf_general.sh94
-rw-r--r--tools/perf/tests/sigtrap.c20
-rw-r--r--tools/perf/tests/stat.c16
-rw-r--r--tools/perf/tests/switch-tracking.c2
-rw-r--r--tools/perf/tests/tests-scripts.c2
-rw-r--r--tools/perf/tests/tests.h10
-rw-r--r--tools/perf/tests/workloads/landlock.c2
-rwxr-xr-xtools/perf/trace/beauty/arch_errno_names.sh3
-rw-r--r--tools/perf/ui/browsers/annotate.c2
-rw-r--r--tools/perf/ui/browsers/scripts.c177
-rw-r--r--tools/perf/ui/gtk/annotate.c16
-rw-r--r--tools/perf/ui/hist.c2
-rw-r--r--tools/perf/util/Build7
-rw-r--r--tools/perf/util/annotate.c108
-rw-r--r--tools/perf/util/annotate.h36
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-decoder.h9
-rw-r--r--tools/perf/util/arm-spe.c86
-rw-r--r--tools/perf/util/auxtrace.c67
-rw-r--r--tools/perf/util/auxtrace.h6
-rw-r--r--tools/perf/util/bpf-event.c10
-rw-r--r--tools/perf/util/bpf_ftrace.c15
-rw-r--r--tools/perf/util/bpf_kwork.c2
-rw-r--r--tools/perf/util/bpf_kwork_top.c2
-rw-r--r--tools/perf/util/bpf_lock_contention.c142
-rw-r--r--tools/perf/util/bpf_off_cpu.c5
-rw-r--r--tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c11
-rw-r--r--tools/perf/util/bpf_skel/func_latency.bpf.c46
-rw-r--r--tools/perf/util/bpf_skel/kwork_top.bpf.c4
-rw-r--r--tools/perf/util/bpf_skel/lock_contention.bpf.c95
-rw-r--r--tools/perf/util/bpf_skel/lock_data.h15
-rw-r--r--tools/perf/util/bpf_skel/vmlinux/vmlinux.h8
-rw-r--r--tools/perf/util/btf.c27
-rw-r--r--tools/perf/util/btf.h10
-rw-r--r--tools/perf/util/cgroup.c2
-rw-r--r--tools/perf/util/config.c27
-rw-r--r--tools/perf/util/config.h1
-rw-r--r--tools/perf/util/cpumap.c4
-rw-r--r--tools/perf/util/data-convert-bt.c10
-rw-r--r--tools/perf/util/data-convert-json.c8
-rw-r--r--tools/perf/util/disasm.c88
-rw-r--r--tools/perf/util/dlfilter.c3
-rw-r--r--tools/perf/util/env.c30
-rw-r--r--tools/perf/util/env.h6
-rw-r--r--tools/perf/util/evsel.c310
-rw-r--r--tools/perf/util/evsel.h13
-rw-r--r--tools/perf/util/evsel_config.h1
-rw-r--r--tools/perf/util/evsel_fprintf.c4
-rw-r--r--tools/perf/util/expr.c5
-rw-r--r--tools/perf/util/ftrace.h9
-rwxr-xr-xtools/perf/util/generate-cmdlist.sh4
-rw-r--r--tools/perf/util/header.c8
-rw-r--r--tools/perf/util/hist.c114
-rw-r--r--tools/perf/util/hist.h14
-rw-r--r--tools/perf/util/intel-pt-decoder/Build18
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c3
-rw-r--r--tools/perf/util/jitdump.c15
-rw-r--r--tools/perf/util/kvm-stat.c70
-rw-r--r--tools/perf/util/kvm-stat.h3
-rw-r--r--tools/perf/util/kwork.h7
-rw-r--r--tools/perf/util/llvm-c-helpers.cpp1
-rw-r--r--tools/perf/util/lock-contention.c143
-rw-r--r--tools/perf/util/lock-contention.h20
-rw-r--r--tools/perf/util/machine.c4
-rw-r--r--tools/perf/util/maps.c7
-rw-r--r--tools/perf/util/mem-events.c5
-rw-r--r--tools/perf/util/namespaces.c7
-rw-r--r--tools/perf/util/namespaces.h3
-rw-r--r--tools/perf/util/parse-events.c26
-rw-r--r--tools/perf/util/parse-events.h1
-rw-r--r--tools/perf/util/parse-events.l1
-rw-r--r--tools/perf/util/path.c8
-rw-r--r--tools/perf/util/path.h2
-rw-r--r--tools/perf/util/perf_event_attr_fprintf.c7
-rw-r--r--tools/perf/util/pmu.c31
-rw-r--r--tools/perf/util/probe-event.c50
-rw-r--r--tools/perf/util/probe-event.h1
-rw-r--r--tools/perf/util/probe-finder.c15
-rw-r--r--tools/perf/util/probe-finder.h5
-rw-r--r--tools/perf/util/python.c341
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c3
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c66
-rw-r--r--tools/perf/util/session.c1
-rw-r--r--tools/perf/util/sort.c33
-rw-r--r--tools/perf/util/stat-display.c242
-rw-r--r--tools/perf/util/stat-shadow.c5
-rw-r--r--tools/perf/util/stat.h3
-rw-r--r--tools/perf/util/stream.c7
-rw-r--r--tools/perf/util/stream.h10
-rw-r--r--tools/perf/util/string.c15
-rw-r--r--tools/perf/util/svghelper.c1
-rw-r--r--tools/perf/util/symbol-elf.c6
-rw-r--r--tools/perf/util/symbol.c9
-rw-r--r--tools/perf/util/synthetic-events.c14
-rw-r--r--tools/perf/util/syscalltbl.c90
-rw-r--r--tools/perf/util/syscalltbl.h1
-rw-r--r--tools/perf/util/trace-event-parse.c2
-rw-r--r--tools/perf/util/trace-event-scripting.c187
-rw-r--r--tools/perf/util/trace-event.h7
-rw-r--r--tools/perf/util/values.c106
-rw-r--r--tools/perf/util/values.h9
-rw-r--r--tools/power/x86/intel-speed-select/isst-config.c2
-rw-r--r--tools/power/x86/intel-speed-select/isst-core-tpmi.c2
-rw-r--r--tools/power/x86/turbostat/turbostat.832
-rw-r--r--tools/power/x86/turbostat/turbostat.c652
-rw-r--r--tools/sched_ext/include/scx/common.bpf.h178
-rw-r--r--tools/sched_ext/include/scx/common.h6
-rw-r--r--tools/sched_ext/include/scx/compat.bpf.h5
-rw-r--r--tools/sched_ext/include/scx/compat.h1
-rw-r--r--tools/sched_ext/include/scx/enums.autogen.bpf.h105
-rw-r--r--tools/sched_ext/include/scx/enums.autogen.h41
-rw-r--r--tools/sched_ext/include/scx/enums.bpf.h12
-rw-r--r--tools/sched_ext/include/scx/enums.h27
-rw-r--r--tools/sched_ext/include/scx/user_exit_info.h9
-rw-r--r--tools/sched_ext/scx_central.bpf.c13
-rw-r--r--tools/sched_ext/scx_central.c1
-rw-r--r--tools/sched_ext/scx_flatcg.bpf.c25
-rw-r--r--tools/sched_ext/scx_flatcg.c1
-rw-r--r--tools/sched_ext/scx_qmap.bpf.c2
-rw-r--r--tools/sched_ext/scx_qmap.c2
-rw-r--r--tools/sched_ext/scx_simple.bpf.c9
-rw-r--r--tools/scripts/syscall.tbl409
-rw-r--r--tools/testing/cxl/test/cxl.c2
-rw-r--r--tools/testing/cxl/test/mem.c23
-rw-r--r--tools/testing/cxl/test/mock.c6
-rw-r--r--tools/testing/ktest/examples/include/defaults.conf2
-rwxr-xr-xtools/testing/ktest/ktest.pl9
-rw-r--r--tools/testing/kunit/configs/all_tests.config3
-rwxr-xr-xtools/testing/kunit/kunit.py11
-rw-r--r--tools/testing/kunit/kunit_kernel.py3
-rw-r--r--tools/testing/kunit/qemu_configs/arm64.py2
-rw-r--r--tools/testing/radix-tree/multiorder.c4
-rw-r--r--tools/testing/selftests/Makefile2
-rw-r--r--tools/testing/selftests/acct/acct_syscall.c2
-rw-r--r--tools/testing/selftests/arm64/fp/kernel-test.c3
-rw-r--r--tools/testing/selftests/bpf/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/Makefile113
-rw-r--r--tools/testing/selftests/bpf/bpf_test_modorder_x/Makefile19
-rw-r--r--tools/testing/selftests/bpf/bpf_test_modorder_y/Makefile19
-rw-r--r--tools/testing/selftests/bpf/bpf_test_no_cfi/Makefile19
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/Makefile20
-rw-r--r--tools/testing/selftests/bpf/config1
-rw-r--r--tools/testing/selftests/bpf/network_helpers.c2
-rw-r--r--tools/testing/selftests/bpf/network_helpers.h96
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_distill.c76
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_skb_direct_packet_access.c28
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_reloc.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fd_array.c441
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fill_link_info.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector.c329
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector_classification.c792
-rw-r--r--tools/testing/selftests/bpf/prog_tests/free_timer.c165
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c27
-rw-r--r--tools/testing/selftests/bpf/prog_tests/missed.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt_sk.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_bonding.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c166
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_metadata.c21
-rw-r--r--tools/testing/selftests/bpf/progs/bad_struct_ops.c2
-rw-r--r--tools/testing/selftests/bpf/progs/cb_refs.c2
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_skb_direct_packet_access.c15
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_fail.c6
-rw-r--r--tools/testing/selftests/bpf/progs/epilogue_exit.c4
-rw-r--r--tools/testing/selftests/bpf/progs/epilogue_tailcall.c4
-rw-r--r--tools/testing/selftests/bpf/progs/exceptions_fail.c4
-rw-r--r--tools/testing/selftests/bpf/progs/find_vma.c2
-rw-r--r--tools/testing/selftests/bpf/progs/free_timer.c71
-rw-r--r--tools/testing/selftests/bpf/progs/irq.c444
-rw-r--r--tools/testing/selftests/bpf/progs/iters.c14
-rw-r--r--tools/testing/selftests/bpf/progs/iters_testmod.c2
-rw-r--r--tools/testing/selftests/bpf/progs/jit_probe_mem.c2
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_destructive.c2
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_fail.c2
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_race.c2
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_test.c2
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c2
-rw-r--r--tools/testing/selftests/bpf/progs/local_kptr_stash.c2
-rw-r--r--tools/testing/selftests/bpf/progs/map_kptr.c2
-rw-r--r--tools/testing/selftests/bpf/progs/map_kptr_fail.c4
-rw-r--r--tools/testing/selftests/bpf/progs/missed_kprobe.c2
-rw-r--r--tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c8
-rw-r--r--tools/testing/selftests/bpf/progs/nested_acquire.c2
-rw-r--r--tools/testing/selftests/bpf/progs/preempt_lock.c28
-rw-r--r--tools/testing/selftests/bpf/progs/pro_epilogue.c4
-rw-r--r--tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c4
-rw-r--r--tools/testing/selftests/bpf/progs/sock_addr_kern.c2
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_detach.c2
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_forgotten_cb.c2
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c2
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c2
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_module.c2
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c2
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_nulled_out_cb.c2
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_private_stack.c2
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c2
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c2
-rw-r--r--tools/testing/selftests/bpf/progs/syscall.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect.h2
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_fill_link_info.c13
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func10.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_module_attach.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c12
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_redirect.c26
-rw-r--r--tools/testing/selftests/bpf/progs/uninit_stack.c5
-rw-r--r--tools/testing/selftests/bpf/progs/unsupported_ops.c2
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_array_access.c188
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_basic_stack.c2
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bounds.c134
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_const_or.c4
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c12
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_int_ptr.c2
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_map_in_map.c2
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_may_goto_1.c97
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_may_goto_2.c28
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_mtu.c2
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_raw_stack.c4
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_spin_lock.c28
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_unpriv.c2
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_var_off.c8
-rw-r--r--tools/testing/selftests/bpf/progs/wq.c2
-rw-r--r--tools/testing/selftests/bpf/progs/wq_failures.c2
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool_synctypes.py28
-rw-r--r--tools/testing/selftests/bpf/test_flow_dissector.c780
-rwxr-xr-xtools/testing/selftests/bpf/test_flow_dissector.sh178
-rw-r--r--tools/testing/selftests/bpf/test_kmods/.gitignore (renamed from tools/testing/selftests/bpf/bpf_testmod/.gitignore)0
-rw-r--r--tools/testing/selftests/bpf/test_kmods/Makefile21
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_test_modorder_x.c (renamed from tools/testing/selftests/bpf/bpf_test_modorder_x/bpf_test_modorder_x.c)0
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_test_modorder_y.c (renamed from tools/testing/selftests/bpf/bpf_test_modorder_y/bpf_test_modorder_y.c)0
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_test_no_cfi.c (renamed from tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c)0
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_testmod-events.h (renamed from tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h)0
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_testmod.c (renamed from tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c)0
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_testmod.h (renamed from tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h)0
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h (renamed from tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h)0
-rw-r--r--tools/testing/selftests/bpf/test_progs.c15
-rw-r--r--tools/testing/selftests/bpf/test_progs.h15
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_tunnel.sh1
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_redirect.sh79
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/map_kptr.c2
-rw-r--r--tools/testing/selftests/bpf/veristat.c159
-rw-r--r--tools/testing/selftests/bpf/xdp_hw_metadata.c2
-rw-r--r--tools/testing/selftests/damon/.gitignore3
-rw-r--r--tools/testing/selftests/damon/Makefile11
-rw-r--r--tools/testing/selftests/damon/config1
-rwxr-xr-xtools/testing/selftests/damon/debugfs_attrs.sh17
-rwxr-xr-xtools/testing/selftests/damon/debugfs_duplicate_context_creation.sh27
-rwxr-xr-xtools/testing/selftests/damon/debugfs_empty_targets.sh21
-rwxr-xr-xtools/testing/selftests/damon/debugfs_huge_count_read_write.sh22
-rwxr-xr-xtools/testing/selftests/damon/debugfs_rm_non_contexts.sh19
-rwxr-xr-xtools/testing/selftests/damon/debugfs_schemes.sh19
-rwxr-xr-xtools/testing/selftests/damon/debugfs_target_ids.sh19
-rw-r--r--tools/testing/selftests/damon/debugfs_target_ids_pid_leak.c68
-rwxr-xr-xtools/testing/selftests/damon/debugfs_target_ids_pid_leak.sh22
-rw-r--r--tools/testing/selftests/damon/debugfs_target_ids_read_before_terminate_race.c80
-rwxr-xr-xtools/testing/selftests/damon/debugfs_target_ids_read_before_terminate_race.sh14
-rw-r--r--tools/testing/selftests/damon/huge_count_read_write.c46
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/rss_ctx.py9
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh16
-rw-r--r--tools/testing/selftests/drivers/ntsync/.gitignore1
-rw-r--r--tools/testing/selftests/drivers/ntsync/Makefile7
-rw-r--r--tools/testing/selftests/drivers/ntsync/config1
-rw-r--r--tools/testing/selftests/drivers/ntsync/ntsync.c1343
-rwxr-xr-xtools/testing/selftests/efivarfs/efivarfs.sh168
-rw-r--r--tools/testing/selftests/exec/.gitignore4
-rw-r--r--tools/testing/selftests/exec/Makefile19
-rwxr-xr-xtools/testing/selftests/exec/check-exec-tests.sh205
-rw-r--r--tools/testing/selftests/exec/check-exec.c463
-rw-r--r--tools/testing/selftests/exec/config2
-rw-r--r--tools/testing/selftests/exec/false.c5
-rw-r--r--tools/testing/selftests/filesystems/statmount/.gitignore1
-rw-r--r--tools/testing/selftests/filesystems/statmount/statmount_test.c22
-rw-r--r--tools/testing/selftests/ftrace/Makefile2
-rw-r--r--tools/testing/selftests/ftrace/poll.c74
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/mount_options.tc8
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_uprobe.tc4
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/event-mod.tc191
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-poll.tc74
-rwxr-xr-xtools/testing/selftests/gpio/gpio-sim.sh31
-rw-r--r--tools/testing/selftests/ipc/msgque.c2
-rw-r--r--tools/testing/selftests/kselftest.h28
-rw-r--r--tools/testing/selftests/kselftest/ksft.py3
-rw-r--r--tools/testing/selftests/kselftest/ktap_helpers.sh6
-rw-r--r--tools/testing/selftests/kselftest_harness.h24
-rw-r--r--tools/testing/selftests/kvm/.gitignore1
-rw-r--r--tools/testing/selftests/kvm/Makefile345
-rw-r--r--tools/testing/selftests/kvm/Makefile.kvm330
-rw-r--r--tools/testing/selftests/kvm/arm64/aarch32_id_regs.c (renamed from tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c)12
-rw-r--r--tools/testing/selftests/kvm/arm64/arch_timer.c (renamed from tools/testing/selftests/kvm/aarch64/arch_timer.c)0
-rw-r--r--tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c (renamed from tools/testing/selftests/kvm/aarch64/arch_timer_edge_cases.c)0
-rw-r--r--tools/testing/selftests/kvm/arm64/debug-exceptions.c (renamed from tools/testing/selftests/kvm/aarch64/debug-exceptions.c)4
-rw-r--r--tools/testing/selftests/kvm/arm64/get-reg-list.c (renamed from tools/testing/selftests/kvm/aarch64/get-reg-list.c)0
-rw-r--r--tools/testing/selftests/kvm/arm64/hypercalls.c (renamed from tools/testing/selftests/kvm/aarch64/hypercalls.c)6
-rw-r--r--tools/testing/selftests/kvm/arm64/mmio_abort.c (renamed from tools/testing/selftests/kvm/aarch64/mmio_abort.c)0
-rw-r--r--tools/testing/selftests/kvm/arm64/no-vgic-v3.c (renamed from tools/testing/selftests/kvm/aarch64/no-vgic-v3.c)2
-rw-r--r--tools/testing/selftests/kvm/arm64/page_fault_test.c (renamed from tools/testing/selftests/kvm/aarch64/page_fault_test.c)0
-rw-r--r--tools/testing/selftests/kvm/arm64/psci_test.c (renamed from tools/testing/selftests/kvm/aarch64/psci_test.c)8
-rw-r--r--tools/testing/selftests/kvm/arm64/set_id_regs.c (renamed from tools/testing/selftests/kvm/aarch64/set_id_regs.c)24
-rw-r--r--tools/testing/selftests/kvm/arm64/smccc_filter.c (renamed from tools/testing/selftests/kvm/aarch64/smccc_filter.c)0
-rw-r--r--tools/testing/selftests/kvm/arm64/vcpu_width_config.c (renamed from tools/testing/selftests/kvm/aarch64/vcpu_width_config.c)0
-rw-r--r--tools/testing/selftests/kvm/arm64/vgic_init.c (renamed from tools/testing/selftests/kvm/aarch64/vgic_init.c)0
-rw-r--r--tools/testing/selftests/kvm/arm64/vgic_irq.c (renamed from tools/testing/selftests/kvm/aarch64/vgic_irq.c)0
-rw-r--r--tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c (renamed from tools/testing/selftests/kvm/aarch64/vgic_lpi_stress.c)0
-rw-r--r--tools/testing/selftests/kvm/arm64/vpmu_counter_access.c (renamed from tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c)19
-rw-r--r--tools/testing/selftests/kvm/dirty_log_perf_test.c2
-rw-r--r--tools/testing/selftests/kvm/include/arm64/arch_timer.h (renamed from tools/testing/selftests/kvm/include/aarch64/arch_timer.h)0
-rw-r--r--tools/testing/selftests/kvm/include/arm64/delay.h (renamed from tools/testing/selftests/kvm/include/aarch64/delay.h)0
-rw-r--r--tools/testing/selftests/kvm/include/arm64/gic.h (renamed from tools/testing/selftests/kvm/include/aarch64/gic.h)0
-rw-r--r--tools/testing/selftests/kvm/include/arm64/gic_v3.h (renamed from tools/testing/selftests/kvm/include/aarch64/gic_v3.h)0
-rw-r--r--tools/testing/selftests/kvm/include/arm64/gic_v3_its.h (renamed from tools/testing/selftests/kvm/include/aarch64/gic_v3_its.h)0
-rw-r--r--tools/testing/selftests/kvm/include/arm64/kvm_util_arch.h (renamed from tools/testing/selftests/kvm/include/aarch64/kvm_util_arch.h)0
-rw-r--r--tools/testing/selftests/kvm/include/arm64/processor.h (renamed from tools/testing/selftests/kvm/include/aarch64/processor.h)0
-rw-r--r--tools/testing/selftests/kvm/include/arm64/spinlock.h (renamed from tools/testing/selftests/kvm/include/aarch64/spinlock.h)0
-rw-r--r--tools/testing/selftests/kvm/include/arm64/ucall.h (renamed from tools/testing/selftests/kvm/include/aarch64/ucall.h)0
-rw-r--r--tools/testing/selftests/kvm/include/arm64/vgic.h (renamed from tools/testing/selftests/kvm/include/aarch64/vgic.h)0
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h10
-rw-r--r--tools/testing/selftests/kvm/include/s390/debug_print.h (renamed from tools/testing/selftests/kvm/include/s390x/debug_print.h)0
-rw-r--r--tools/testing/selftests/kvm/include/s390/diag318_test_handler.h (renamed from tools/testing/selftests/kvm/include/s390x/diag318_test_handler.h)0
-rw-r--r--tools/testing/selftests/kvm/include/s390/facility.h (renamed from tools/testing/selftests/kvm/include/s390x/facility.h)0
-rw-r--r--tools/testing/selftests/kvm/include/s390/kvm_util_arch.h (renamed from tools/testing/selftests/kvm/include/s390x/kvm_util_arch.h)0
-rw-r--r--tools/testing/selftests/kvm/include/s390/processor.h (renamed from tools/testing/selftests/kvm/include/s390x/processor.h)0
-rw-r--r--tools/testing/selftests/kvm/include/s390/sie.h (renamed from tools/testing/selftests/kvm/include/s390x/sie.h)0
-rw-r--r--tools/testing/selftests/kvm/include/s390/ucall.h (renamed from tools/testing/selftests/kvm/include/s390x/ucall.h)0
-rw-r--r--tools/testing/selftests/kvm/include/x86/apic.h (renamed from tools/testing/selftests/kvm/include/x86_64/apic.h)2
-rw-r--r--tools/testing/selftests/kvm/include/x86/evmcs.h (renamed from tools/testing/selftests/kvm/include/x86_64/evmcs.h)3
-rw-r--r--tools/testing/selftests/kvm/include/x86/hyperv.h (renamed from tools/testing/selftests/kvm/include/x86_64/hyperv.h)3
-rw-r--r--tools/testing/selftests/kvm/include/x86/kvm_util_arch.h (renamed from tools/testing/selftests/kvm/include/x86_64/kvm_util_arch.h)0
-rw-r--r--tools/testing/selftests/kvm/include/x86/mce.h (renamed from tools/testing/selftests/kvm/include/x86_64/mce.h)2
-rw-r--r--tools/testing/selftests/kvm/include/x86/pmu.h (renamed from tools/testing/selftests/kvm/include/x86_64/pmu.h)0
-rw-r--r--tools/testing/selftests/kvm/include/x86/processor.h (renamed from tools/testing/selftests/kvm/include/x86_64/processor.h)27
-rw-r--r--tools/testing/selftests/kvm/include/x86/sev.h (renamed from tools/testing/selftests/kvm/include/x86_64/sev.h)0
-rw-r--r--tools/testing/selftests/kvm/include/x86/svm.h (renamed from tools/testing/selftests/kvm/include/x86_64/svm.h)6
-rw-r--r--tools/testing/selftests/kvm/include/x86/svm_util.h (renamed from tools/testing/selftests/kvm/include/x86_64/svm_util.h)3
-rw-r--r--tools/testing/selftests/kvm/include/x86/ucall.h (renamed from tools/testing/selftests/kvm/include/x86_64/ucall.h)0
-rw-r--r--tools/testing/selftests/kvm/include/x86/vmx.h (renamed from tools/testing/selftests/kvm/include/x86_64/vmx.h)2
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/gic.c (renamed from tools/testing/selftests/kvm/lib/aarch64/gic.c)0
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/gic_private.h (renamed from tools/testing/selftests/kvm/lib/aarch64/gic_private.h)0
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/gic_v3.c (renamed from tools/testing/selftests/kvm/lib/aarch64/gic_v3.c)0
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c (renamed from tools/testing/selftests/kvm/lib/aarch64/gic_v3_its.c)0
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/handlers.S (renamed from tools/testing/selftests/kvm/lib/aarch64/handlers.S)0
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/processor.c (renamed from tools/testing/selftests/kvm/lib/aarch64/processor.c)8
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/spinlock.c (renamed from tools/testing/selftests/kvm/lib/aarch64/spinlock.c)0
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/ucall.c (renamed from tools/testing/selftests/kvm/lib/aarch64/ucall.c)0
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/vgic.c (renamed from tools/testing/selftests/kvm/lib/aarch64/vgic.c)0
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c3
-rw-r--r--tools/testing/selftests/kvm/lib/riscv/processor.c66
-rw-r--r--tools/testing/selftests/kvm/lib/s390/diag318_test_handler.c (renamed from tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c)0
-rw-r--r--tools/testing/selftests/kvm/lib/s390/facility.c (renamed from tools/testing/selftests/kvm/lib/s390x/facility.c)0
-rw-r--r--tools/testing/selftests/kvm/lib/s390/processor.c (renamed from tools/testing/selftests/kvm/lib/s390x/processor.c)0
-rw-r--r--tools/testing/selftests/kvm/lib/s390/ucall.c (renamed from tools/testing/selftests/kvm/lib/s390x/ucall.c)0
-rw-r--r--tools/testing/selftests/kvm/lib/x86/apic.c (renamed from tools/testing/selftests/kvm/lib/x86_64/apic.c)0
-rw-r--r--tools/testing/selftests/kvm/lib/x86/handlers.S (renamed from tools/testing/selftests/kvm/lib/x86_64/handlers.S)0
-rw-r--r--tools/testing/selftests/kvm/lib/x86/hyperv.c (renamed from tools/testing/selftests/kvm/lib/x86_64/hyperv.c)0
-rw-r--r--tools/testing/selftests/kvm/lib/x86/memstress.c (renamed from tools/testing/selftests/kvm/lib/x86_64/memstress.c)2
-rw-r--r--tools/testing/selftests/kvm/lib/x86/pmu.c (renamed from tools/testing/selftests/kvm/lib/x86_64/pmu.c)0
-rw-r--r--tools/testing/selftests/kvm/lib/x86/processor.c (renamed from tools/testing/selftests/kvm/lib/x86_64/processor.c)2
-rw-r--r--tools/testing/selftests/kvm/lib/x86/sev.c (renamed from tools/testing/selftests/kvm/lib/x86_64/sev.c)0
-rw-r--r--tools/testing/selftests/kvm/lib/x86/svm.c (renamed from tools/testing/selftests/kvm/lib/x86_64/svm.c)1
-rw-r--r--tools/testing/selftests/kvm/lib/x86/ucall.c (renamed from tools/testing/selftests/kvm/lib/x86_64/ucall.c)0
-rw-r--r--tools/testing/selftests/kvm/lib/x86/vmx.c (renamed from tools/testing/selftests/kvm/lib/x86_64/vmx.c)2
-rw-r--r--tools/testing/selftests/kvm/mmu_stress_test.c (renamed from tools/testing/selftests/kvm/max_guest_memory_test.c)162
-rw-r--r--tools/testing/selftests/kvm/riscv/arch_timer.c2
-rw-r--r--tools/testing/selftests/kvm/riscv/ebreak_test.c2
-rw-r--r--tools/testing/selftests/kvm/riscv/get-reg-list.c18
-rw-r--r--tools/testing/selftests/kvm/riscv/sbi_pmu_test.c2
-rw-r--r--tools/testing/selftests/kvm/s390/cmma_test.c (renamed from tools/testing/selftests/kvm/s390x/cmma_test.c)4
-rw-r--r--tools/testing/selftests/kvm/s390/config (renamed from tools/testing/selftests/kvm/s390x/config)0
-rw-r--r--tools/testing/selftests/kvm/s390/cpumodel_subfuncs_test.c (renamed from tools/testing/selftests/kvm/s390x/cpumodel_subfuncs_test.c)0
-rw-r--r--tools/testing/selftests/kvm/s390/debug_test.c (renamed from tools/testing/selftests/kvm/s390x/debug_test.c)0
-rw-r--r--tools/testing/selftests/kvm/s390/memop.c (renamed from tools/testing/selftests/kvm/s390x/memop.c)0
-rw-r--r--tools/testing/selftests/kvm/s390/resets.c (renamed from tools/testing/selftests/kvm/s390x/resets.c)2
-rw-r--r--tools/testing/selftests/kvm/s390/shared_zeropage_test.c (renamed from tools/testing/selftests/kvm/s390x/shared_zeropage_test.c)0
-rw-r--r--tools/testing/selftests/kvm/s390/sync_regs_test.c (renamed from tools/testing/selftests/kvm/s390x/sync_regs_test.c)0
-rw-r--r--tools/testing/selftests/kvm/s390/tprot.c (renamed from tools/testing/selftests/kvm/s390x/tprot.c)0
-rw-r--r--tools/testing/selftests/kvm/s390/ucontrol_test.c (renamed from tools/testing/selftests/kvm/s390x/ucontrol_test.c)32
-rw-r--r--tools/testing/selftests/kvm/set_memory_region_test.c59
-rw-r--r--tools/testing/selftests/kvm/steal_time.c3
-rw-r--r--tools/testing/selftests/kvm/x86/amx_test.c (renamed from tools/testing/selftests/kvm/x86_64/amx_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/apic_bus_clock_test.c (renamed from tools/testing/selftests/kvm/x86_64/apic_bus_clock_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/cpuid_test.c (renamed from tools/testing/selftests/kvm/x86_64/cpuid_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/cr4_cpuid_sync_test.c (renamed from tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/debug_regs.c (renamed from tools/testing/selftests/kvm/x86_64/debug_regs.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/dirty_log_page_splitting_test.c (renamed from tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/exit_on_emulation_failure_test.c (renamed from tools/testing/selftests/kvm/x86_64/exit_on_emulation_failure_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/feature_msrs_test.c (renamed from tools/testing/selftests/kvm/x86_64/feature_msrs_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/fix_hypercall_test.c (renamed from tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/flds_emulation.h (renamed from tools/testing/selftests/kvm/x86_64/flds_emulation.h)0
-rw-r--r--tools/testing/selftests/kvm/x86/hwcr_msr_test.c (renamed from tools/testing/selftests/kvm/x86_64/hwcr_msr_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_clock.c (renamed from tools/testing/selftests/kvm/x86_64/hyperv_clock.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_cpuid.c (renamed from tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_evmcs.c (renamed from tools/testing/selftests/kvm/x86_64/hyperv_evmcs.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_extended_hypercalls.c (renamed from tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_features.c (renamed from tools/testing/selftests/kvm/x86_64/hyperv_features.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_ipi.c (renamed from tools/testing/selftests/kvm/x86_64/hyperv_ipi.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_svm_test.c (renamed from tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c (renamed from tools/testing/selftests/kvm/x86_64/hyperv_tlb_flush.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/kvm_clock_test.c (renamed from tools/testing/selftests/kvm/x86_64/kvm_clock_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/kvm_pv_test.c (renamed from tools/testing/selftests/kvm/x86_64/kvm_pv_test.c)38
-rw-r--r--tools/testing/selftests/kvm/x86/max_vcpuid_cap_test.c (renamed from tools/testing/selftests/kvm/x86_64/max_vcpuid_cap_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/monitor_mwait_test.c (renamed from tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/nested_exceptions_test.c (renamed from tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/nx_huge_pages_test.c (renamed from tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c)0
-rwxr-xr-xtools/testing/selftests/kvm/x86/nx_huge_pages_test.sh (renamed from tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh)0
-rw-r--r--tools/testing/selftests/kvm/x86/platform_info_test.c (renamed from tools/testing/selftests/kvm/x86_64/platform_info_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/pmu_counters_test.c (renamed from tools/testing/selftests/kvm/x86_64/pmu_counters_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/pmu_event_filter_test.c (renamed from tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/private_mem_conversions_test.c (renamed from tools/testing/selftests/kvm/x86_64/private_mem_conversions_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/private_mem_kvm_exits_test.c (renamed from tools/testing/selftests/kvm/x86_64/private_mem_kvm_exits_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/recalc_apic_map_test.c (renamed from tools/testing/selftests/kvm/x86_64/recalc_apic_map_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/set_boot_cpu_id.c (renamed from tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/set_sregs_test.c (renamed from tools/testing/selftests/kvm/x86_64/set_sregs_test.c)63
-rw-r--r--tools/testing/selftests/kvm/x86/sev_init2_tests.c (renamed from tools/testing/selftests/kvm/x86_64/sev_init2_tests.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/sev_migrate_tests.c (renamed from tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/sev_smoke_test.c (renamed from tools/testing/selftests/kvm/x86_64/sev_smoke_test.c)2
-rw-r--r--tools/testing/selftests/kvm/x86/smaller_maxphyaddr_emulation_test.c (renamed from tools/testing/selftests/kvm/x86_64/smaller_maxphyaddr_emulation_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/smm_test.c (renamed from tools/testing/selftests/kvm/x86_64/smm_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/state_test.c (renamed from tools/testing/selftests/kvm/x86_64/state_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/svm_int_ctl_test.c (renamed from tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/svm_nested_shutdown_test.c (renamed from tools/testing/selftests/kvm/x86_64/svm_nested_shutdown_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/svm_nested_soft_inject_test.c (renamed from tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/svm_vmcall_test.c (renamed from tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/sync_regs_test.c (renamed from tools/testing/selftests/kvm/x86_64/sync_regs_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/triple_fault_event_test.c (renamed from tools/testing/selftests/kvm/x86_64/triple_fault_event_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/tsc_msrs_test.c (renamed from tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/tsc_scaling_sync.c (renamed from tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/ucna_injection_test.c (renamed from tools/testing/selftests/kvm/x86_64/ucna_injection_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/userspace_io_test.c (renamed from tools/testing/selftests/kvm/x86_64/userspace_io_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/userspace_msr_exit_test.c (renamed from tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_apic_access_test.c (renamed from tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_close_while_nested_test.c (renamed from tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_dirty_log_test.c (renamed from tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_exception_with_invalid_guest_state.c (renamed from tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_invalid_nested_guest_state.c (renamed from tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_msrs_test.c (renamed from tools/testing/selftests/kvm/x86_64/vmx_msrs_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_nested_tsc_scaling_test.c (renamed from tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c (renamed from tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_preemption_timer_test.c (renamed from tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_set_nested_state_test.c (renamed from tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_tsc_adjust_test.c (renamed from tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/xapic_ipi_test.c (renamed from tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/xapic_state_test.c (renamed from tools/testing/selftests/kvm/x86_64/xapic_state_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c (renamed from tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/xen_shinfo_test.c (renamed from tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/xen_vmcall_test.c (renamed from tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c)0
-rw-r--r--tools/testing/selftests/kvm/x86/xss_msr_test.c (renamed from tools/testing/selftests/kvm/x86_64/xss_msr_test.c)0
-rw-r--r--tools/testing/selftests/landlock/Makefile6
-rw-r--r--tools/testing/selftests/landlock/common.h38
-rw-r--r--tools/testing/selftests/landlock/fs_test.c184
-rw-r--r--tools/testing/selftests/landlock/ptrace_test.c2
-rw-r--r--tools/testing/selftests/landlock/sandbox-and-launch.c82
-rw-r--r--tools/testing/selftests/landlock/wait-pipe.c42
-rw-r--r--tools/testing/selftests/landlock/wrappers.h47
-rw-r--r--tools/testing/selftests/livepatch/functions.sh3
-rw-r--r--tools/testing/selftests/media_tests/regression_test.txt8
-rw-r--r--tools/testing/selftests/memfd/memfd_test.c2
-rw-r--r--tools/testing/selftests/mm/.gitignore5
-rw-r--r--tools/testing/selftests/mm/Makefile16
-rw-r--r--tools/testing/selftests/mm/config1
-rw-r--r--tools/testing/selftests/mm/cow.c10
-rw-r--r--tools/testing/selftests/mm/guard-pages.c83
-rw-r--r--tools/testing/selftests/mm/ksm_tests.c2
-rw-r--r--tools/testing/selftests/mm/migration.c99
-rw-r--r--tools/testing/selftests/mm/mkdirty.c1
-rw-r--r--tools/testing/selftests/mm/mremap_test.c17
-rw-r--r--tools/testing/selftests/mm/mseal_test.c8
-rw-r--r--tools/testing/selftests/mm/pagemap_ioctl.c118
-rw-r--r--tools/testing/selftests/mm/pkey-arm64.h6
-rw-r--r--tools/testing/selftests/mm/pkey-helpers.h61
-rw-r--r--tools/testing/selftests/mm/pkey-powerpc.h4
-rw-r--r--tools/testing/selftests/mm/pkey-x86.h6
-rw-r--r--tools/testing/selftests/mm/pkey_sighandler_tests.c32
-rw-r--r--tools/testing/selftests/mm/pkey_util.c40
-rw-r--r--tools/testing/selftests/mm/protection_keys.c212
-rwxr-xr-xtools/testing/selftests/mm/run_vmtests.sh6
-rw-r--r--tools/testing/selftests/mm/seal_elf.c137
-rw-r--r--tools/testing/selftests/mm/soft-dirty.c2
-rw-r--r--tools/testing/selftests/mm/split_huge_page_test.c46
-rw-r--r--tools/testing/selftests/mm/thp_settings.c4
-rw-r--r--tools/testing/selftests/mm/thp_settings.h4
-rw-r--r--tools/testing/selftests/mm/uffd-unit-tests.c4
-rw-r--r--tools/testing/selftests/mm/uffd-wp-mremap.c380
-rw-r--r--tools/testing/selftests/mm/virtual_address_range.c41
-rw-r--r--tools/testing/selftests/mm/vm_util.c68
-rw-r--r--tools/testing/selftests/mm/vm_util.h1
-rw-r--r--tools/testing/selftests/mm/write_to_hugetlbfs.c2
-rwxr-xr-xtools/testing/selftests/net/bpf_offload.py14
-rw-r--r--tools/testing/selftests/net/lib/Makefile2
-rw-r--r--tools/testing/selftests/net/mptcp/Makefile2
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect.c2
-rw-r--r--tools/testing/selftests/net/openvswitch/Makefile2
-rwxr-xr-xtools/testing/selftests/net/packetdrill/ksft_runner.sh4
-rw-r--r--tools/testing/selftests/net/udpgso.c26
-rw-r--r--tools/testing/selftests/nolibc/Makefile11
-rw-r--r--tools/testing/selftests/nolibc/nolibc-test.c44
-rwxr-xr-xtools/testing/selftests/nolibc/run-tests.sh9
-rw-r--r--tools/testing/selftests/pci_endpoint/.gitignore2
-rw-r--r--tools/testing/selftests/pci_endpoint/Makefile7
-rw-r--r--tools/testing/selftests/pci_endpoint/config4
-rw-r--r--tools/testing/selftests/pci_endpoint/pci_endpoint_test.c221
-rw-r--r--tools/testing/selftests/pidfd/pidfd_test.c2
-rw-r--r--tools/testing/selftests/resctrl/Makefile1
-rw-r--r--tools/testing/selftests/resctrl/cmt_test.c4
-rw-r--r--tools/testing/selftests/resctrl/mba_test.c2
-rw-r--r--tools/testing/selftests/resctrl/mbm_test.c4
-rw-r--r--tools/testing/selftests/resctrl/resctrl.h6
-rw-r--r--tools/testing/selftests/resctrl/resctrl_tests.c9
-rw-r--r--tools/testing/selftests/resctrl/resctrlfs.c137
-rw-r--r--tools/testing/selftests/ring-buffer/map_test.c8
-rw-r--r--tools/testing/selftests/riscv/vector/.gitignore3
-rw-r--r--tools/testing/selftests/riscv/vector/Makefile17
-rw-r--r--tools/testing/selftests/riscv/vector/v_exec_initval_nolibc.c94
-rw-r--r--tools/testing/selftests/riscv/vector/v_helpers.c68
-rw-r--r--tools/testing/selftests/riscv/vector/v_helpers.h8
-rw-r--r--tools/testing/selftests/riscv/vector/v_initval.c22
-rw-r--r--tools/testing/selftests/riscv/vector/v_initval_nolibc.c72
-rw-r--r--tools/testing/selftests/riscv/vector/vstate_exec_nolibc.c20
-rw-r--r--tools/testing/selftests/riscv/vector/vstate_prctl.c305
-rw-r--r--tools/testing/selftests/rseq/param_test.c24
-rw-r--r--tools/testing/selftests/rseq/rseq-or1k-bits.h412
-rw-r--r--tools/testing/selftests/rseq/rseq-or1k-thread-pointer.h13
-rw-r--r--tools/testing/selftests/rseq/rseq-or1k.h181
-rw-r--r--tools/testing/selftests/rseq/rseq-thread-pointer.h2
-rw-r--r--tools/testing/selftests/rseq/rseq.c32
-rw-r--r--tools/testing/selftests/rseq/rseq.h11
-rwxr-xr-xtools/testing/selftests/run_kselftest.sh2
-rw-r--r--tools/testing/selftests/sched_ext/runner.c15
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c199
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json34
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/fifo.json23
-rw-r--r--tools/testing/selftests/timers/clocksource-switch.c6
-rw-r--r--tools/testing/selftests/tmpfs/bug-link-o-tmpfile.c41
-rw-r--r--tools/testing/selftests/vDSO/parse_vdso.c110
-rw-r--r--tools/testing/selftests/x86/lam.c2
-rw-r--r--tools/testing/selftests/zram/.gitignore2
-rw-r--r--tools/testing/vma/vma.c64
-rw-r--r--tools/testing/vma/vma_internal.h220
-rw-r--r--tools/testing/vsock/util.c88
-rw-r--r--tools/testing/vsock/util.h2
-rw-r--r--tools/testing/vsock/vsock_test.c122
-rw-r--r--tools/tracing/rtla/Makefile4
-rw-r--r--tools/tracing/rtla/src/osnoise.c38
-rw-r--r--tools/tracing/rtla/src/osnoise.h2
-rw-r--r--tools/tracing/rtla/src/osnoise_hist.c5
-rw-r--r--tools/tracing/rtla/src/osnoise_top.c5
-rw-r--r--tools/tracing/rtla/src/timerlat_hist.c37
-rw-r--r--tools/tracing/rtla/src/timerlat_top.c40
-rw-r--r--tools/tracing/rtla/src/trace.c65
-rw-r--r--tools/tracing/rtla/src/trace.h4
-rw-r--r--tools/tracing/rtla/tests/engine.sh48
-rw-r--r--tools/tracing/rtla/tests/hwnoise.t21
-rw-r--r--tools/tracing/rtla/tests/osnoise.t19
-rw-r--r--tools/tracing/rtla/tests/timerlat.t27
-rw-r--r--tools/verification/dot2/automata.py36
-rw-r--r--tools/verification/dot2/dot2c.py4
-rw-r--r--tools/verification/dot2/dot2k17
-rw-r--r--tools/verification/dot2/dot2k.py226
-rw-r--r--tools/verification/dot2/dot2k_templates/Kconfig6
-rw-r--r--tools/verification/dot2/dot2k_templates/main.c91
-rw-r--r--tools/verification/dot2/dot2k_templates/main_global.c91
-rw-r--r--tools/verification/dot2/dot2k_templates/main_per_cpu.c91
-rw-r--r--tools/verification/dot2/dot2k_templates/main_per_task.c91
-rw-r--r--tools/verification/dot2/dot2k_templates/trace.h13
-rw-r--r--virt/kvm/guest_memfd.c36
-rw-r--r--virt/kvm/kvm_main.c140
5354 files changed, 235919 insertions, 81039 deletions
diff --git a/.mailmap b/.mailmap
index 42e42cabb36d..ae0adc499f4a 100644
--- a/.mailmap
+++ b/.mailmap
@@ -142,13 +142,17 @@ Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@bootlin.com>
Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@free-electrons.com>
Brendan Higgins <brendan.higgins@linux.dev> <brendanhiggins@google.com>
Brian Avery <b.avery@hp.com>
+Brian Cain <bcain@kernel.org> <brian.cain@oss.qualcomm.com>
+Brian Cain <bcain@kernel.org> <bcain@quicinc.com>
Brian King <brking@us.ibm.com>
Brian Silverman <bsilver16384@gmail.com> <brian.silverman@bluerivertech.com>
Bryan Tan <bryan-bt.tan@broadcom.com> <bryantan@vmware.com>
Cai Huoqing <cai.huoqing@linux.dev> <caihuoqing@baidu.com>
Can Guo <quic_cang@quicinc.com> <cang@codeaurora.org>
Carl Huang <quic_cjhuang@quicinc.com> <cjhuang@codeaurora.org>
-Carlos Bilbao <carlos.bilbao.osdev@gmail.com> <carlos.bilbao@amd.com>
+Carlos Bilbao <carlos.bilbao@kernel.org> <carlos.bilbao@amd.com>
+Carlos Bilbao <carlos.bilbao@kernel.org> <carlos.bilbao.osdev@gmail.com>
+Carlos Bilbao <carlos.bilbao@kernel.org> <bilbao@vt.edu>
Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
Chao Yu <chao@kernel.org> <chao2.yu@samsung.com>
@@ -165,6 +169,7 @@ Christian Brauner <brauner@kernel.org> <christian.brauner@canonical.com>
Christian Brauner <brauner@kernel.org> <christian.brauner@ubuntu.com>
Christian Marangi <ansuelsmth@gmail.com>
Christophe Ricard <christophe.ricard@gmail.com>
+Christopher Obbard <christopher.obbard@linaro.org> <chris.obbard@collabora.com>
Christoph Hellwig <hch@lst.de>
Chuck Lever <chuck.lever@oracle.com> <cel@kernel.org>
Chuck Lever <chuck.lever@oracle.com> <cel@netapp.com>
@@ -261,6 +266,7 @@ Guo Ren <guoren@kernel.org> <ren_guo@c-sky.com>
Guru Das Srinagesh <quic_gurus@quicinc.com> <gurus@codeaurora.org>
Gustavo Padovan <gustavo@las.ic.unicamp.br>
Gustavo Padovan <padovan@profusion.mobi>
+Hamza Mahfooz <hamzamahfooz@linux.microsoft.com> <hamza.mahfooz@amd.com>
Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org>
Hans Verkuil <hverkuil@xs4all.nl> <hansverk@cisco.com>
Hans Verkuil <hverkuil@xs4all.nl> <hverkuil-cisco@xs4all.nl>
@@ -415,6 +421,7 @@ Liam Mark <quic_lmark@quicinc.com> <lmark@codeaurora.org>
Linas Vepstas <linas@austin.ibm.com>
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
+Linus Lüssing <linus.luessing@c0d3.blue> <ll@simonwunderlich.de>
<linux-hardening@vger.kernel.org> <kernel-hardening@lists.openwall.com>
Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
@@ -760,6 +767,7 @@ Wolfram Sang <wsa@kernel.org> <wsa@the-dreams.de>
Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
Yanteng Si <si.yanteng@linux.dev> <siyanteng@loongson.cn>
Ying Huang <huang.ying.caritas@gmail.com> <ying.huang@intel.com>
+Yosry Ahmed <yosry.ahmed@linux.dev> <yosryahmed@google.com>
Yusuke Goda <goda.yusuke@renesas.com>
Zack Rusin <zack.rusin@broadcom.com> <zackr@vmware.com>
Zhu Yanjun <zyjzyj2000@gmail.com> <yanjunz@nvidia.com>
diff --git a/CREDITS b/CREDITS
index cda68f04d5f1..53d11a46fd69 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2515,11 +2515,9 @@ D: SLS distribution
D: Initial implementation of VC's, pty's and select()
N: Pavel Machek
-E: pavel@ucw.cz
+E: pavel@kernel.org
P: 4096R/92DFCE96 4FA7 9EEF FCD4 C44F C585 B8C7 C060 2241 92DF CE96
-D: Softcursor for vga, hypertech cdrom support, vcsa bugfix, nbd,
-D: sun4/330 port, capabilities for elf, speedup for rm on ext2, USB,
-D: work on suspend-to-ram/disk, killing duplicates from ioctl32,
+D: NBD, Sun4/330 port, USB, work on suspend-to-ram/disk,
D: Altera SoCFPGA and Nokia N900 support.
S: Czech Republic
@@ -4339,7 +4337,7 @@ D: Freescale Highspeed USB device driver
D: Freescale QE SoC support and Ethernet driver
S: B-1206 Jingmao Guojigongyu
S: 16 Baliqiao Nanjie, Beijing 101100
-S: People's Repulic of China
+S: People's Republic of China
N: Vlad Yasevich
E: vyasevich@gmail.com
diff --git a/Documentation/ABI/stable/sysfs-class-bluetooth b/Documentation/ABI/stable/sysfs-class-bluetooth
new file mode 100644
index 000000000000..36be02471174
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-class-bluetooth
@@ -0,0 +1,9 @@
+What: /sys/class/bluetooth/hci<index>/reset
+Date: 14-Jan-2025
+KernelVersion: 6.13
+Contact: linux-bluetooth@vger.kernel.org
+Description: This write-only attribute allows users to trigger the vendor reset
+ method on the Bluetooth device when arbitrary data is written.
+ The reset may or may not be done through the device transport
+ (e.g., UART/USB), and can also be done through an out-of-band
+ approach such as GPIO.
diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-dummy-source b/Documentation/ABI/testing/sysfs-bus-coresight-devices-dummy-source
new file mode 100644
index 000000000000..0830661ef656
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-dummy-source
@@ -0,0 +1,15 @@
+What: /sys/bus/coresight/devices/dummy_source<N>/enable_source
+Date: Dec 2024
+KernelVersion: 6.14
+Contact: Mao Jinlong <quic_jinlmao@quicinc.com>
+Description: (RW) Enable/disable tracing of dummy source. A sink should be activated
+ before enabling the source. The path of coresight components linking
+ the source to the sink is configured and managed automatically by the
+ coresight framework.
+
+What: /sys/bus/coresight/devices/dummy_source<N>/traceid
+Date: Dec 2024
+KernelVersion: 6.14
+Contact: Mao Jinlong <quic_jinlmao@quicinc.com>
+Description: (R) Show the trace ID that will appear in the trace stream
+ coming from this trace entity.
diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices b/Documentation/ABI/testing/sysfs-bus-event_source-devices
new file mode 100644
index 000000000000..79b268319df1
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices
@@ -0,0 +1,24 @@
+What: /sys/bus/event_source/devices/<pmu>
+Date: 2014/02/24
+Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description: Performance Monitoring Unit (<pmu>)
+
+ Each <pmu> directory, for a PMU device, is a name
+ optionally followed by an underscore and then either a
+ decimal or hexadecimal number. For example, cpu is a
+ PMU name without a suffix as is intel_bts,
+ uncore_imc_0 is a PMU name with a 0 numeric suffix,
+ ddr_pmu_87e1b0000000 is a PMU name with a hex
+ suffix. The hex suffix must be more than two
+ characters long to avoid ambiguity with PMUs like the
+ S390 cpum_cf.
+
+ Tools can treat PMUs with the same name that differ by
+ suffix as instances of the same PMU for the sake of,
+ for example, opening an event. For example, the PMUs
+ uncore_imc_free_running_0 and
+ uncore_imc_free_running_1 have an event data_read;
+ opening the data_read event on a PMU specified as
+ uncore_imc_free_running should be treated as opening
+ the data_read event on PMU uncore_imc_free_running_0
+ and PMU uncore_imc_free_running_1.
diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-events b/Documentation/ABI/testing/sysfs-bus-event_source-devices-events
index e7efeab2ee83..0fe1b9487202 100644
--- a/Documentation/ABI/testing/sysfs-bus-event_source-devices-events
+++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-events
@@ -37,11 +37,13 @@ Description: Per-pmu performance monitoring events specific to the running syste
performance monitoring event supported by the <pmu>. The name
of the file is the name of the event.
- As performance monitoring event names are case
- insensitive in the perf tool, the perf tool only looks
- for lower or upper case event names in sysfs to avoid
+ As performance monitoring event names are case insensitive
+ in the perf tool, the perf tool only looks for all lower
+ case or all upper case event names in sysfs to avoid
scanning the directory. It is therefore required the
- name of the event here is either lower or upper case.
+ name of the event here is either completely lower or upper
+ case, with no mixed-case characters. Numbers, '.', '_', and
+ '-' are also allowed.
File contents:
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index f83bd6829285..25d366d452a5 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -168,18 +168,6 @@ Description:
is required is a consistent labeling. Units after application
of scale and offset are millivolts.
-What: /sys/bus/iio/devices/iio:deviceX/in_currentY_raw
-What: /sys/bus/iio/devices/iio:deviceX/in_currentY_supply_raw
-KernelVersion: 3.17
-Contact: linux-iio@vger.kernel.org
-Description:
- Raw (unscaled no bias removal etc.) current measurement from
- channel Y. In special cases where the channel does not
- correspond to externally available input one of the named
- versions may be used. The number must always be specified and
- unique to allow association with event codes. Units after
- application of scale and offset are milliamps.
-
What: /sys/bus/iio/devices/iio:deviceX/in_powerY_raw
KernelVersion: 4.5
Contact: linux-iio@vger.kernel.org
@@ -227,7 +215,7 @@ Description:
same scaling as _raw.
What: /sys/bus/iio/devices/iio:deviceX/in_temp_raw
-What: /sys/bus/iio/devices/iio:deviceX/in_tempX_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_tempY_raw
What: /sys/bus/iio/devices/iio:deviceX/in_temp_x_raw
What: /sys/bus/iio/devices/iio:deviceX/in_temp_y_raw
What: /sys/bus/iio/devices/iio:deviceX/in_temp_ambient_raw
@@ -416,11 +404,11 @@ Contact: linux-iio@vger.kernel.org
Description:
Scaled humidity measurement in milli percent.
-What: /sys/bus/iio/devices/iio:deviceX/in_X_mean_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_Y_mean_raw
KernelVersion: 3.5
Contact: linux-iio@vger.kernel.org
Description:
- Averaged raw measurement from channel X. The number of values
+ Averaged raw measurement from channel Y. The number of values
used for averaging is device specific. The converting rules for
normal raw values also applies to the averaged raw values.
@@ -448,7 +436,7 @@ What: /sys/bus/iio/devices/iio:deviceX/in_humidityrelative_offset
What: /sys/bus/iio/devices/iio:deviceX/in_magn_offset
What: /sys/bus/iio/devices/iio:deviceX/in_rot_offset
What: /sys/bus/iio/devices/iio:deviceX/in_angl_offset
-What: /sys/bus/iio/devices/iio:deviceX/in_capacitanceX_offset
+What: /sys/bus/iio/devices/iio:deviceX/in_capacitanceY_offset
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
@@ -508,6 +496,9 @@ What: /sys/bus/iio/devices/iio:deviceX/in_angl_scale
What: /sys/bus/iio/devices/iio:deviceX/in_intensity_x_scale
What: /sys/bus/iio/devices/iio:deviceX/in_intensity_y_scale
What: /sys/bus/iio/devices/iio:deviceX/in_intensity_z_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_intensity_red_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_intensity_green_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_intensity_blue_scale
What: /sys/bus/iio/devices/iio:deviceX/in_concentration_co2_scale
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
@@ -660,10 +651,10 @@ What: /sys/.../iio:deviceX/in_magn_scale_available
What: /sys/.../iio:deviceX/in_illuminance_scale_available
What: /sys/.../iio:deviceX/in_intensity_scale_available
What: /sys/.../iio:deviceX/in_proximity_scale_available
-What: /sys/.../iio:deviceX/in_voltageX_scale_available
+What: /sys/.../iio:deviceX/in_voltageY_scale_available
What: /sys/.../iio:deviceX/in_voltage-voltage_scale_available
-What: /sys/.../iio:deviceX/out_voltageX_scale_available
-What: /sys/.../iio:deviceX/out_altvoltageX_scale_available
+What: /sys/.../iio:deviceX/out_voltageY_scale_available
+What: /sys/.../iio:deviceX/out_altvoltageY_scale_available
What: /sys/.../iio:deviceX/in_capacitance_scale_available
What: /sys/.../iio:deviceX/in_pressure_scale_available
What: /sys/.../iio:deviceX/in_pressureY_scale_available
@@ -681,6 +672,7 @@ What: /sys/bus/iio/devices/iio:deviceX/in_intensity_red_hardwaregain
What: /sys/bus/iio/devices/iio:deviceX/in_intensity_green_hardwaregain
What: /sys/bus/iio/devices/iio:deviceX/in_intensity_blue_hardwaregain
What: /sys/bus/iio/devices/iio:deviceX/in_intensity_clear_hardwaregain
+What: /sys/bus/iio/devices/iio:deviceX/in_illuminance_hardwaregain
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
@@ -1562,7 +1554,7 @@ Description:
This attribute is used to read the amount of quadrature error
present in the device at a given time.
-What: /sys/.../iio:deviceX/in_accelX_power_mode
+What: /sys/.../iio:deviceX/in_accelY_power_mode
KernelVersion: 3.11
Contact: linux-iio@vger.kernel.org
Description:
@@ -1633,6 +1625,10 @@ What: /sys/.../iio:deviceX/in_intensityY_uv_raw
What: /sys/.../iio:deviceX/in_intensityY_uva_raw
What: /sys/.../iio:deviceX/in_intensityY_uvb_raw
What: /sys/.../iio:deviceX/in_intensityY_duv_raw
+What: /sys/.../iio:deviceX/in_intensity_red_raw
+What: /sys/.../iio:deviceX/in_intensity_green_raw
+What: /sys/.../iio:deviceX/in_intensity_blue_raw
+What: /sys/.../iio:deviceX/in_intensity_clear_raw
KernelVersion: 3.4
Contact: linux-iio@vger.kernel.org
Description:
@@ -1691,16 +1687,19 @@ Description:
Raw value of rotation from true/magnetic north measured with
or without compensation from tilt sensors.
-What: /sys/bus/iio/devices/iio:deviceX/in_currentX_raw
-What: /sys/bus/iio/devices/iio:deviceX/in_currentX_i_raw
-What: /sys/bus/iio/devices/iio:deviceX/in_currentX_q_raw
-KernelVersion: 3.18
+What: /sys/bus/iio/devices/iio:deviceX/in_currentY_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_currentY_supply_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_currentY_i_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_currentY_q_raw
+KernelVersion: 3.17
Contact: linux-iio@vger.kernel.org
Description:
- Raw current measurement from channel X. Units are in milliamps
+ Raw current measurement from channel Y. Units are in milliamps
after application of scale and offset. If no offset or scale is
present, output should be considered as processed with the
- unit in milliamps.
+ unit in milliamps. In special cases where the channel does not
+ correspond to externally available input one of the named
+ versions may be used.
Channels with 'i' and 'q' modifiers always exist in pairs and both
channels refer to the same signal. The 'i' channel contains the in-phase
@@ -1864,9 +1863,9 @@ Description:
hardware fifo watermark level.
What: /sys/bus/iio/devices/iio:deviceX/in_temp_calibemissivity
-What: /sys/bus/iio/devices/iio:deviceX/in_tempX_calibemissivity
+What: /sys/bus/iio/devices/iio:deviceX/in_tempY_calibemissivity
What: /sys/bus/iio/devices/iio:deviceX/in_temp_object_calibemissivity
-What: /sys/bus/iio/devices/iio:deviceX/in_tempX_object_calibemissivity
+What: /sys/bus/iio/devices/iio:deviceX/in_tempY_object_calibemissivity
KernelVersion: 4.1
Contact: linux-iio@vger.kernel.org
Description:
@@ -1887,17 +1886,17 @@ Description:
is considered as one sample for <type>[_name]_sampling_frequency.
What: /sys/bus/iio/devices/iio:deviceX/in_concentration_raw
-What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_concentrationY_raw
What: /sys/bus/iio/devices/iio:deviceX/in_concentration_co2_raw
-What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_co2_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_concentrationY_co2_raw
What: /sys/bus/iio/devices/iio:deviceX/in_concentration_ethanol_raw
-What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_ethanol_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_concentrationY_ethanol_raw
What: /sys/bus/iio/devices/iio:deviceX/in_concentration_h2_raw
-What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_h2_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_concentrationY_h2_raw
What: /sys/bus/iio/devices/iio:deviceX/in_concentration_o2_raw
-What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_o2_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_concentrationY_o2_raw
What: /sys/bus/iio/devices/iio:deviceX/in_concentration_voc_raw
-What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_voc_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_concentrationY_voc_raw
KernelVersion: 4.3
Contact: linux-iio@vger.kernel.org
Description:
@@ -1905,9 +1904,9 @@ Description:
after application of scale and offset are percents.
What: /sys/bus/iio/devices/iio:deviceX/in_resistance_raw
-What: /sys/bus/iio/devices/iio:deviceX/in_resistanceX_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_resistanceY_raw
What: /sys/bus/iio/devices/iio:deviceX/out_resistance_raw
-What: /sys/bus/iio/devices/iio:deviceX/out_resistanceX_raw
+What: /sys/bus/iio/devices/iio:deviceX/out_resistanceY_raw
KernelVersion: 4.3
Contact: linux-iio@vger.kernel.org
Description:
@@ -2096,7 +2095,7 @@ Description:
One of the following thermocouple types: B, E, J, K, N, R, S, T.
What: /sys/bus/iio/devices/iio:deviceX/in_temp_object_calibambient
-What: /sys/bus/iio/devices/iio:deviceX/in_tempX_object_calibambient
+What: /sys/bus/iio/devices/iio:deviceX/in_tempY_object_calibambient
KernelVersion: 5.10
Contact: linux-iio@vger.kernel.org
Description:
@@ -2172,9 +2171,9 @@ Description:
- a range specified as "[min step max]"
-What: /sys/bus/iio/devices/iio:deviceX/in_voltageX_sampling_frequency
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_sampling_frequency
What: /sys/bus/iio/devices/iio:deviceX/in_powerY_sampling_frequency
-What: /sys/bus/iio/devices/iio:deviceX/in_currentZ_sampling_frequency
+What: /sys/bus/iio/devices/iio:deviceX/in_currentY_sampling_frequency
KernelVersion: 5.20
Contact: linux-iio@vger.kernel.org
Description:
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-adc-ad-sigma-delta b/Documentation/ABI/testing/sysfs-bus-iio-adc-ad-sigma-delta
new file mode 100644
index 000000000000..a5a8a579f4f3
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-adc-ad-sigma-delta
@@ -0,0 +1,23 @@
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_sys_calibration
+KernelVersion: 5.5
+Contact: linux-iio@vger.kernel.org
+Description:
+ This attribute, if available, initiates the system calibration procedure. This is done on a
+ single channel at a time. Write '1' to start the calibration.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_sys_calibration_mode_available
+KernelVersion: 5.5
+Contact: linux-iio@vger.kernel.org
+Description:
+ This attribute, if available, returns a list with the possible calibration modes.
+ There are two available options:
+ "zero_scale" - calibrate to zero scale
+ "full_scale" - calibrate to full scale
+
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_sys_calibration_mode
+KernelVersion: 5.5
+Contact: linux-iio@vger.kernel.org
+Description:
+ This attribute, if available, sets up the calibration mode used in the system calibration
+ procedure. Reading returns the current calibration mode.
+ Writing sets the system calibration mode.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-adc-ad7192 b/Documentation/ABI/testing/sysfs-bus-iio-adc-ad7192
index f8315202c8f0..28be1cabf112 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-adc-ad7192
+++ b/Documentation/ABI/testing/sysfs-bus-iio-adc-ad7192
@@ -19,33 +19,9 @@ Description:
the bridge can be disconnected (when it is not being used
using the bridge_switch_en attribute.
-What: /sys/bus/iio/devices/iio:deviceX/in_voltagex_sys_calibration
-KernelVersion:
-Contact: linux-iio@vger.kernel.org
-Description:
- Initiates the system calibration procedure. This is done on a
- single channel at a time. Write '1' to start the calibration.
-
What: /sys/bus/iio/devices/iio:deviceX/in_voltage2-voltage2_shorted_raw
KernelVersion:
Contact: linux-iio@vger.kernel.org
Description:
Measure voltage from AIN2 pin connected to AIN(+)
and AIN(-) shorted.
-
-What: /sys/bus/iio/devices/iio:deviceX/in_voltagex_sys_calibration_mode_available
-KernelVersion:
-Contact: linux-iio@vger.kernel.org
-Description:
- Reading returns a list with the possible calibration modes.
- There are two available options:
- "zero_scale" - calibrate to zero scale
- "full_scale" - calibrate to full scale
-
-What: /sys/bus/iio/devices/iio:deviceX/in_voltagex_sys_calibration_mode
-KernelVersion:
-Contact: linux-iio@vger.kernel.org
-Description:
- Sets up the calibration mode used in the system calibration
- procedure. Reading returns the current calibration mode.
- Writing sets the system calibration mode.
diff --git a/Documentation/ABI/testing/sysfs-class-platform-profile b/Documentation/ABI/testing/sysfs-class-platform-profile
new file mode 100644
index 000000000000..dc72adfb830a
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-platform-profile
@@ -0,0 +1,48 @@
+What: /sys/class/platform-profile/platform-profile-X/name
+Date: March 2025
+KernelVersion: 6.14
+Description: Name of the class device given by the driver.
+
+ RO
+
+What: /sys/class/platform-profile/platform-profile-X/choices
+Date: March 2025
+KernelVersion: 6.14
+Description: This file contains a space-separated list of profiles supported
+ for this device.
+
+ Drivers must use the following standard profile-names:
+
+ ==================== ========================================
+ low-power Low power consumption
+ cool Cooler operation
+ quiet Quieter operation
+ balanced Balance between low power consumption
+ and performance
+ balanced-performance Balance between performance and low
+ power consumption with a slight bias
+ towards performance
+ performance High performance operation
+ custom Driver defined custom profile
+ ==================== ========================================
+
+ RO
+
+What: /sys/class/platform-profile/platform-profile-X/profile
+Date: March 2025
+KernelVersion: 6.14
+Description: Reading this file gives the current selected profile for this
+ device. Writing this file with one of the strings from
+ platform_profile_choices changes the profile to the new value.
+
+ This file can be monitored for changes by polling for POLLPRI,
+ POLLPRI will be signaled on any changes, independent of those
+ changes coming from a userspace write; or coming from another
+ source such as e.g. a hotkey triggered profile change handled
+ either directly by the embedded-controller or fully handled
+ inside the kernel.
+
+ This file may also emit the string 'custom' to indicate
+ that the driver is using a driver defined custom profile.
+
+ RW
diff --git a/Documentation/ABI/testing/sysfs-class-power b/Documentation/ABI/testing/sysfs-class-power
index 45180b62d426..2a5c1a09a28f 100644
--- a/Documentation/ABI/testing/sysfs-class-power
+++ b/Documentation/ABI/testing/sysfs-class-power
@@ -407,10 +407,30 @@ Description:
Access: Read, Write
+ Reading this returns the current active value, e.g. 'Standard'.
+ Check charge_types to get the values supported by the battery.
+
Valid values:
"Unknown", "N/A", "Trickle", "Fast", "Standard",
"Adaptive", "Custom", "Long Life", "Bypass"
+What: /sys/class/power_supply/<supply_name>/charge_types
+Date: December 2024
+Contact: linux-pm@vger.kernel.org
+Description:
+ Identical to charge_type but reading returns a list of supported
+ charge-types with the currently active type surrounded by square
+ brackets, e.g.: "Fast [Standard] Long_Life".
+
+ power_supply class devices may support both charge_type and
+ charge_types for backward compatibility. In this case both will
+ always have the same active value and the active value can be
+ changed by writing either property.
+
+ Note charge-types which contain a space such as "Long Life" will
+ have the space replaced by a '_' resulting in e.g. "Long_Life".
+ When writing charge-types both variants are accepted.
+
What: /sys/class/power_supply/<supply_name>/charge_term_current
Date: July 2014
Contact: linux-pm@vger.kernel.org
@@ -433,7 +453,7 @@ Description:
Valid values:
"Unknown", "Good", "Overheat", "Dead",
- "Over voltage", "Unspecified failure", "Cold",
+ "Over voltage", "Under voltage", "Unspecified failure", "Cold",
"Watchdog timer expire", "Safety timer expire",
"Over current", "Calibration required", "Warm",
"Cool", "Hot", "No battery"
@@ -793,3 +813,12 @@ Description:
Access: Read
Valid values: 1-31
+
+What: /sys/class/power_supply/<supply_name>/extensions/<extension_name>
+Date: March 2025
+Contact: linux-pm@vger.kernel.org
+Description:
+ Reports the extensions registered to the power supply.
+ Each entry is a link to the device which registered the extension.
+
+ Access: Read
diff --git a/Documentation/ABI/testing/sysfs-class-power-max1720x b/Documentation/ABI/testing/sysfs-class-power-max1720x
new file mode 100644
index 000000000000..7d895bfda9ce
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-power-max1720x
@@ -0,0 +1,32 @@
+What: /sys/class/power_supply/max1720x/temp_ain1
+Date: January 2025
+KernelVersion: 6.14
+Contact: Dimitri Fedrau <dimitri.fedrau@liebherr.com>
+Description:
+ Reports the current temperature reading from AIN1 thermistor.
+
+ Access: Read
+
+ Valid values: Represented in 1/10 Degrees Celsius
+
+What: /sys/class/power_supply/max1720x/temp_ain2
+Date: January 2025
+KernelVersion: 6.14
+Contact: Dimitri Fedrau <dimitri.fedrau@liebherr.com>
+Description:
+ Reports the current temperature reading from AIN2 thermistor.
+
+ Access: Read
+
+ Valid values: Represented in 1/10 Degrees Celsius
+
+What: /sys/class/power_supply/max1720x/temp_int
+Date: January 2025
+KernelVersion: 6.14
+Contact: Dimitri Fedrau <dimitri.fedrau@liebherr.com>
+Description:
+ Reports the current temperature reading from internal die.
+
+ Access: Read
+
+ Valid values: Represented in 1/10 Degrees Celsius
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-damon b/Documentation/ABI/testing/sysfs-kernel-mm-damon
index f1b90cf1249b..b057eddefbfc 100644
--- a/Documentation/ABI/testing/sysfs-kernel-mm-damon
+++ b/Documentation/ABI/testing/sysfs-kernel-mm-damon
@@ -355,10 +355,15 @@ Description: If 'target' is written to the 'type' file, writing to or
What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/filters/<F>/matching
Date: Dec 2022
Contact: SeongJae Park <sj@kernel.org>
-Description: Writing 'Y' or 'N' to this file sets whether to filter out
- pages that do or do not match to the 'type' and 'memcg_path',
- respectively. Filter out means the action of the scheme will
- not be applied to.
+Description: Writing 'Y' or 'N' to this file sets whether the filter is for
+ the memory of the 'type', or all except the 'type'.
+
+What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/filters/<F>/allow
+Date: Jan 2025
+Contact: SeongJae Park <sj@kernel.org>
+Description: Writing 'Y' or 'N' to this file sets whether to allow or reject
+ applying the scheme's action to the memory that satisfies the
+ 'type' and the 'matching' of the directory.
What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/stats/nr_tried
Date: Mar 2022
@@ -384,6 +389,12 @@ Contact: SeongJae Park <sj@kernel.org>
Description: Reading this file returns the total size of regions that the
action of the scheme has successfully applied in bytes.
+What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/stats/sz_ops_filter_passed
+Date: Dec 2024
+Contact: SeongJae Park <sj@kernel.org>
+Description: Reading this file returns the total size of memory that passed
+ DAMON operations layer-handled filters of the scheme in bytes.
+
What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/stats/qt_exceeds
Date: Mar 2022
Contact: SeongJae Park <sj@kernel.org>
@@ -424,3 +435,10 @@ Contact: SeongJae Park <sj@kernel.org>
Description: Reading this file returns the 'age' of a memory region that
corresponding DAMON-based Operation Scheme's action has tried
to be applied.
+
+What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/tried_regions/<R>/sz_filter_passed
+Date: Dec 2024
+Contact: SeongJae Park <sj@kernel.org>
+Description: Reading this file returns the size of the memory in the region
+ that passed DAMON operations layer-handled filters of the
+ scheme in bytes.
diff --git a/Documentation/ABI/testing/sysfs-platform-mellanox-pmc b/Documentation/ABI/testing/sysfs-platform-mellanox-pmc
new file mode 100644
index 000000000000..29b3f9c58e00
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-mellanox-pmc
@@ -0,0 +1,64 @@
+HID Driver Description
+MLNXBFD0 mlxbf-pmc Performance counters (BlueField-1)
+MLNXBFD1 mlxbf-pmc Performance counters (BlueField-2)
+MLNXBFD2 mlxbf-pmc Performance counters (BlueField-3)
+
+What: /sys/bus/platform/devices/<HID>/hwmon/hwmonX/<block>/event_list
+Date: Dec 2020
+KernelVersion: 5.10
+Contact: "Shravan Kumar Ramani <shravankr@nvidia.com>"
+Description:
+ List of events supported by the counters in the specific block.
+ It is used to extract the event number or ID associated with
+ each event.
+
+What: /sys/bus/platform/devices/<HID>/hwmon/hwmonX/<block>/event<N>
+Date: Dec 2020
+KernelVersion: 5.10
+Contact: "Shravan Kumar Ramani <shravankr@nvidia.com>"
+Description:
+ Event monitored by corresponding counter. This is used to
+ program or read back the event that should be or is currently
+ being monitored by counter<N>.
+
+What: /sys/bus/platform/devices/<HID>/hwmon/hwmonX/<block>/counter<N>
+Date: Dec 2020
+KernelVersion: 5.10
+Contact: "Shravan Kumar Ramani <shravankr@nvidia.com>"
+Description:
+ Counter value of the event being monitored. This is used to
+ read the counter value of the event which was programmed using
+ event<N>. This is also used to clear or reset the counter value
+ by writing 0 to the counter sysfs.
+
+What: /sys/bus/platform/devices/<HID>/hwmon/hwmonX/<block>/enable
+Date: Dec 2020
+KernelVersion: 5.10
+Contact: "Shravan Kumar Ramani <shravankr@nvidia.com>"
+Description:
+ Start or stop counters. This is used to start the counters
+ for monitoring the programmed events and also to stop the
+ counters after the desired duration. Writing value 1 will
+ start all the counters in the block, and writing 0 will
+ stop all the counters together.
+
+What: /sys/bus/platform/devices/<HID>/hwmon/hwmonX/<block>/<reg>
+Date: Dec 2020
+KernelVersion: 5.10
+Contact: "Shravan Kumar Ramani <shravankr@nvidia.com>"
+Description:
+ Value of register. This is used to read or reset the registers
+ where various performance statistics are counted for each block.
+ Writing 0 to the sysfs will clear the counter, writing any other
+ value is not allowed.
+
+What: /sys/bus/platform/devices/<HID>/hwmon/hwmonX/<block>/count_clock
+Date: Mar 2025
+KernelVersion: 6.14
+Contact: "Shravan Kumar Ramani <shravankr@nvidia.com>"
+Description:
+ Use a counter for counting cycles. This is used to repurpose/dedicate
+ any of the counters in the block to counting cycles. Each counter is
+ represented by a bit (bit 0 for counter0, bit1 for counter1 and so on)
+ and setting the corresponding bit will reserve that specific counter
+ for counting cycles and override the event<N> setting.
diff --git a/Documentation/ABI/testing/sysfs-platform_profile b/Documentation/ABI/testing/sysfs-platform_profile
index baf1d125f9f8..125324ab53a9 100644
--- a/Documentation/ABI/testing/sysfs-platform_profile
+++ b/Documentation/ABI/testing/sysfs-platform_profile
@@ -33,3 +33,8 @@ Description: Reading this file gives the current selected profile for this
source such as e.g. a hotkey triggered profile change handled
either directly by the embedded-controller or fully handled
inside the kernel.
+
+ This file may also emit the string 'custom' to indicate
+ that multiple platform profiles drivers are in use but
+ have different values. This string can not be written to
+ this interface and is solely for informational purposes.
diff --git a/Documentation/ABI/testing/sysfs-pps-gen b/Documentation/ABI/testing/sysfs-pps-gen
new file mode 100644
index 000000000000..2519207b88fd
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-pps-gen
@@ -0,0 +1,43 @@
+What: /sys/class/pps-gen/
+Date: February 2025
+KernelVersion: 6.13
+Contact: Rodolfo Giometti <giometti@enneenne.com>
+Description:
+ The /sys/class/pps-gen/ directory contains files and
+ directories that provide a unified interface to the PPS
+ generators.
+
+What: /sys/class/pps-gen/pps-genX/
+Date: February 2025
+KernelVersion: 6.13
+Contact: Rodolfo Giometti <giometti@enneenne.com>
+Description:
+ The /sys/class/pps-gen/pps-genX/ directory is related to X-th
+ PPS generator in the system. Each directory contain files to
+ manage and control its PPS generator.
+
+What: /sys/class/pps-gen/pps-genX/enable
+Date: February 2025
+KernelVersion: 6.13
+Contact: Rodolfo Giometti <giometti@enneenne.com>
+Description:
+ This write-only file enables or disables generation of the
+ PPS signal.
+
+What: /sys/class/pps-gen/pps-genX/system
+Date: February 2025
+KernelVersion: 6.13
+Contact: Rodolfo Giometti <giometti@enneenne.com>
+Description:
+ This read-only file returns "1" if the generator takes the
+ timing from the system clock, while it returns "0" if not
+ (i.e. from a peripheral device clock).
+
+What: /sys/class/pps-gen/pps-genX/time
+Date: February 2025
+KernelVersion: 6.13
+Contact: Rodolfo Giometti <giometti@enneenne.com>
+Description:
+ This read-only file contains the current time stored into the
+ generator clock as two integers representing the current time
+ seconds and nanoseconds.
diff --git a/Documentation/PCI/endpoint/pci-test-howto.rst b/Documentation/PCI/endpoint/pci-test-howto.rst
index 909f770a07d6..aafc17ef3fd3 100644
--- a/Documentation/PCI/endpoint/pci-test-howto.rst
+++ b/Documentation/PCI/endpoint/pci-test-howto.rst
@@ -81,8 +81,8 @@ device, the following commands can be used::
# echo 0x104c > functions/pci_epf_test/func1/vendorid
# echo 0xb500 > functions/pci_epf_test/func1/deviceid
- # echo 16 > functions/pci_epf_test/func1/msi_interrupts
- # echo 8 > functions/pci_epf_test/func1/msix_interrupts
+ # echo 32 > functions/pci_epf_test/func1/msi_interrupts
+ # echo 2048 > functions/pci_epf_test/func1/msix_interrupts
Binding pci-epf-test Device to EP Controller
@@ -123,113 +123,83 @@ above::
Using Endpoint Test function Device
-----------------------------------
-pcitest.sh added in tools/pci/ can be used to run all the default PCI endpoint
-tests. To compile this tool the following commands should be used::
+Kselftest added in tools/testing/selftests/pci_endpoint can be used to run all
+the default PCI endpoint tests. To build the Kselftest for PCI endpoint
+subsystem, the following commands should be used::
# cd <kernel-dir>
- # make -C tools/pci
+ # make -C tools/testing/selftests/pci_endpoint
or if you desire to compile and install in your system::
# cd <kernel-dir>
- # make -C tools/pci install
+ # make -C tools/testing/selftests/pci_endpoint INSTALL_PATH=/usr/bin install
-The tool and script will be located in <rootfs>/usr/bin/
+The test will be located in <rootfs>/usr/bin/
-
-pcitest.sh Output
-~~~~~~~~~~~~~~~~~
+Kselftest Output
+~~~~~~~~~~~~~~~~
::
- # pcitest.sh
- BAR tests
-
- BAR0: OKAY
- BAR1: OKAY
- BAR2: OKAY
- BAR3: OKAY
- BAR4: NOT OKAY
- BAR5: NOT OKAY
-
- Interrupt tests
-
- SET IRQ TYPE TO LEGACY: OKAY
- LEGACY IRQ: NOT OKAY
- SET IRQ TYPE TO MSI: OKAY
- MSI1: OKAY
- MSI2: OKAY
- MSI3: OKAY
- MSI4: OKAY
- MSI5: OKAY
- MSI6: OKAY
- MSI7: OKAY
- MSI8: OKAY
- MSI9: OKAY
- MSI10: OKAY
- MSI11: OKAY
- MSI12: OKAY
- MSI13: OKAY
- MSI14: OKAY
- MSI15: OKAY
- MSI16: OKAY
- MSI17: NOT OKAY
- MSI18: NOT OKAY
- MSI19: NOT OKAY
- MSI20: NOT OKAY
- MSI21: NOT OKAY
- MSI22: NOT OKAY
- MSI23: NOT OKAY
- MSI24: NOT OKAY
- MSI25: NOT OKAY
- MSI26: NOT OKAY
- MSI27: NOT OKAY
- MSI28: NOT OKAY
- MSI29: NOT OKAY
- MSI30: NOT OKAY
- MSI31: NOT OKAY
- MSI32: NOT OKAY
- SET IRQ TYPE TO MSI-X: OKAY
- MSI-X1: OKAY
- MSI-X2: OKAY
- MSI-X3: OKAY
- MSI-X4: OKAY
- MSI-X5: OKAY
- MSI-X6: OKAY
- MSI-X7: OKAY
- MSI-X8: OKAY
- MSI-X9: NOT OKAY
- MSI-X10: NOT OKAY
- MSI-X11: NOT OKAY
- MSI-X12: NOT OKAY
- MSI-X13: NOT OKAY
- MSI-X14: NOT OKAY
- MSI-X15: NOT OKAY
- MSI-X16: NOT OKAY
- [...]
- MSI-X2047: NOT OKAY
- MSI-X2048: NOT OKAY
-
- Read Tests
-
- SET IRQ TYPE TO MSI: OKAY
- READ ( 1 bytes): OKAY
- READ ( 1024 bytes): OKAY
- READ ( 1025 bytes): OKAY
- READ (1024000 bytes): OKAY
- READ (1024001 bytes): OKAY
-
- Write Tests
-
- WRITE ( 1 bytes): OKAY
- WRITE ( 1024 bytes): OKAY
- WRITE ( 1025 bytes): OKAY
- WRITE (1024000 bytes): OKAY
- WRITE (1024001 bytes): OKAY
-
- Copy Tests
-
- COPY ( 1 bytes): OKAY
- COPY ( 1024 bytes): OKAY
- COPY ( 1025 bytes): OKAY
- COPY (1024000 bytes): OKAY
- COPY (1024001 bytes): OKAY
+ # pci_endpoint_test
+ TAP version 13
+ 1..16
+ # Starting 16 tests from 9 test cases.
+ # RUN pci_ep_bar.BAR0.BAR_TEST ...
+ # OK pci_ep_bar.BAR0.BAR_TEST
+ ok 1 pci_ep_bar.BAR0.BAR_TEST
+ # RUN pci_ep_bar.BAR1.BAR_TEST ...
+ # OK pci_ep_bar.BAR1.BAR_TEST
+ ok 2 pci_ep_bar.BAR1.BAR_TEST
+ # RUN pci_ep_bar.BAR2.BAR_TEST ...
+ # OK pci_ep_bar.BAR2.BAR_TEST
+ ok 3 pci_ep_bar.BAR2.BAR_TEST
+ # RUN pci_ep_bar.BAR3.BAR_TEST ...
+ # OK pci_ep_bar.BAR3.BAR_TEST
+ ok 4 pci_ep_bar.BAR3.BAR_TEST
+ # RUN pci_ep_bar.BAR4.BAR_TEST ...
+ # OK pci_ep_bar.BAR4.BAR_TEST
+ ok 5 pci_ep_bar.BAR4.BAR_TEST
+ # RUN pci_ep_bar.BAR5.BAR_TEST ...
+ # OK pci_ep_bar.BAR5.BAR_TEST
+ ok 6 pci_ep_bar.BAR5.BAR_TEST
+ # RUN pci_ep_basic.CONSECUTIVE_BAR_TEST ...
+ # OK pci_ep_basic.CONSECUTIVE_BAR_TEST
+ ok 7 pci_ep_basic.CONSECUTIVE_BAR_TEST
+ # RUN pci_ep_basic.LEGACY_IRQ_TEST ...
+ # OK pci_ep_basic.LEGACY_IRQ_TEST
+ ok 8 pci_ep_basic.LEGACY_IRQ_TEST
+ # RUN pci_ep_basic.MSI_TEST ...
+ # OK pci_ep_basic.MSI_TEST
+ ok 9 pci_ep_basic.MSI_TEST
+ # RUN pci_ep_basic.MSIX_TEST ...
+ # OK pci_ep_basic.MSIX_TEST
+ ok 10 pci_ep_basic.MSIX_TEST
+ # RUN pci_ep_data_transfer.memcpy.READ_TEST ...
+ # OK pci_ep_data_transfer.memcpy.READ_TEST
+ ok 11 pci_ep_data_transfer.memcpy.READ_TEST
+ # RUN pci_ep_data_transfer.memcpy.WRITE_TEST ...
+ # OK pci_ep_data_transfer.memcpy.WRITE_TEST
+ ok 12 pci_ep_data_transfer.memcpy.WRITE_TEST
+ # RUN pci_ep_data_transfer.memcpy.COPY_TEST ...
+ # OK pci_ep_data_transfer.memcpy.COPY_TEST
+ ok 13 pci_ep_data_transfer.memcpy.COPY_TEST
+ # RUN pci_ep_data_transfer.dma.READ_TEST ...
+ # OK pci_ep_data_transfer.dma.READ_TEST
+ ok 14 pci_ep_data_transfer.dma.READ_TEST
+ # RUN pci_ep_data_transfer.dma.WRITE_TEST ...
+ # OK pci_ep_data_transfer.dma.WRITE_TEST
+ ok 15 pci_ep_data_transfer.dma.WRITE_TEST
+ # RUN pci_ep_data_transfer.dma.COPY_TEST ...
+ # OK pci_ep_data_transfer.dma.COPY_TEST
+ ok 16 pci_ep_data_transfer.dma.COPY_TEST
+ # PASSED: 16 / 16 tests passed.
+ # Totals: pass:16 fail:0 xfail:0 xpass:0 skip:0 error:0
+
+
+Testcase 16 (pci_ep_data_transfer.dma.COPY_TEST) will fail for most of the DMA
+capable endpoint controllers due to the absence of the MEMCPY over DMA. For such
+controllers, it is advisable to skip this testcase using this
+command::
+
+ # pci_endpoint_test -f pci_ep_bar -f pci_ep_basic -v memcpy -T COPY_TEST -v dma
diff --git a/Documentation/accounting/delay-accounting.rst b/Documentation/accounting/delay-accounting.rst
index f61c01fc376e..210c194d4a7b 100644
--- a/Documentation/accounting/delay-accounting.rst
+++ b/Documentation/accounting/delay-accounting.rst
@@ -100,29 +100,29 @@ Get delays, since system boot, for pid 10::
# ./getdelays -d -p 10
(output similar to next case)
-Get sum of delays, since system boot, for all pids with tgid 5::
+Get sum and peak of delays, since system boot, for all pids with tgid 242::
- # ./getdelays -d -t 5
+ bash-4.4# ./getdelays -d -t 242
print delayacct stats ON
- TGID 5
-
-
- CPU count real total virtual total delay total delay average
- 8 7000000 6872122 3382277 0.423ms
- IO count delay total delay average
- 0 0 0.000ms
- SWAP count delay total delay average
- 0 0 0.000ms
- RECLAIM count delay total delay average
- 0 0 0.000ms
- THRASHING count delay total delay average
- 0 0 0.000ms
- COMPACT count delay total delay average
- 0 0 0.000ms
- WPCOPY count delay total delay average
- 0 0 0.000ms
- IRQ count delay total delay average
- 0 0 0.000ms
+ TGID 242
+
+
+ CPU count real total virtual total delay total delay average delay max delay min
+ 39 156000000 156576579 2111069 0.054ms 0.212296ms 0.031307ms
+ IO count delay total delay average delay max delay min
+ 0 0 0.000ms 0.000000ms 0.000000ms
+ SWAP count delay total delay average delay max delay min
+ 0 0 0.000ms 0.000000ms 0.000000ms
+ RECLAIM count delay total delay average delay max delay min
+ 0 0 0.000ms 0.000000ms 0.000000ms
+ THRASHING count delay total delay average delay max delay min
+ 0 0 0.000ms 0.000000ms 0.000000ms
+ COMPACT count delay total delay average delay max delay min
+ 0 0 0.000ms 0.000000ms 0.000000ms
+ WPCOPY count delay total delay average delay max delay min
+ 156 11215873 0.072ms 0.207403ms 0.033913ms
+ IRQ count delay total delay average delay max delay min
+ 0 0 0.000ms 0.000000ms 0.000000ms
Get IO accounting for pid 1, it works only with -p::
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 73f2b3b0f4a5..fb8752b42ec8 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2824,17 +2824,21 @@
nvhe: Standard nVHE-based mode, without support for
protected guests.
- protected: nVHE-based mode with support for guests whose
- state is kept private from the host.
+ protected: Mode with support for guests whose state is
+ kept private from the host, using VHE or
+ nVHE depending on HW support.
nested: VHE-based mode with support for nested
- virtualization. Requires at least ARMv8.3
- hardware.
+ virtualization. Requires at least ARMv8.4
+ hardware (with FEAT_NV2).
Defaults to VHE/nVHE based on hardware support. Setting
mode to "protected" will disable kexec and hibernation
- for the host. "nested" is experimental and should be
- used with extreme caution.
+ for the host. To force nVHE on VHE hardware, add
+ "arm64_sw.hvhe=0 id_aa64mmfr1.vh=0" to the
+ command-line.
+ "nested" is experimental and should be used with
+ extreme caution.
kvm-arm.vgic_v3_group0_trap=
[KVM,ARM,EARLY] Trap guest accesses to GICv3 group-0
@@ -3495,8 +3499,8 @@
[KNL] Set the initial state for the memory hotplug
onlining policy. If not specified, the default value is
set according to the
- CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE kernel config
- option.
+ CONFIG_MHP_DEFAULT_ONLINE_TYPE kernel config
+ options.
See Documentation/admin-guide/mm/memory-hotplug.rst.
memmap=exactmap [KNL,X86,EARLY] Enable setting of an exact
@@ -4830,7 +4834,7 @@
'1' – force enabled
'x' – unchanged
For example,
- pci=config_acs=10x
+ pci=config_acs=10x@pci:0:0
would configure all devices that support
ACS to enable P2P Request Redirect, disable
Translation Blocking, and leave Source
@@ -7161,6 +7165,14 @@
comma-separated list of trace events to enable. See
also Documentation/trace/events.rst
+ To enable modules, use :mod: keyword:
+
+ trace_event=:mod:<module>
+
+ The value before :mod: will only enable specific events
+ that are part of the module. See the above mentioned
+ document for more information.
+
trace_instance=[instance-info]
[FTRACE] Create a ring buffer instance early in boot up.
This will be listed in:
@@ -7295,6 +7307,13 @@
See Documentation/admin-guide/mm/transhuge.rst
for more details.
+ transparent_hugepage_tmpfs= [KNL]
+ Format: [always|within_size|advise|never]
+ Can be used to control the default hugepage allocation policy
+ for the tmpfs mount.
+ See Documentation/admin-guide/mm/transhuge.rst
+ for more details.
+
trusted.source= [KEYS]
Format: <string>
This parameter identifies the trust source as a backend
diff --git a/Documentation/admin-guide/media/ipu3.rst b/Documentation/admin-guide/media/ipu3.rst
index 83b3cd03b35c..9c190942932e 100644
--- a/Documentation/admin-guide/media/ipu3.rst
+++ b/Documentation/admin-guide/media/ipu3.rst
@@ -98,7 +98,7 @@ frames in packed raw Bayer format to IPU3 CSI2 receiver.
# and that ov5670 sensor is connected to i2c bus 10 with address 0x36
export SDEV=$(media-ctl -d $MDEV -e "ov5670 10-0036")
- # Establish the link for the media devices using media-ctl [#f3]_
+ # Establish the link for the media devices using media-ctl
media-ctl -d $MDEV -l "ov5670:0 -> ipu3-csi2 0:0[1]"
# Set the format for the media devices
@@ -589,12 +589,8 @@ preserved.
References
==========
-.. [#f5] drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
-
.. [#f1] https://github.com/intel/nvt
.. [#f2] http://git.ideasonboard.org/yavta.git
-.. [#f3] http://git.ideasonboard.org/?p=media-ctl.git;a=summary
-
.. [#f4] ImgU limitation requires an additional 16x16 for all input resolutions
diff --git a/Documentation/admin-guide/mm/damon/start.rst b/Documentation/admin-guide/mm/damon/start.rst
index c4dddf6733cd..ede14b679d02 100644
--- a/Documentation/admin-guide/mm/damon/start.rst
+++ b/Documentation/admin-guide/mm/damon/start.rst
@@ -42,32 +42,45 @@ the execution. ::
$ git clone https://github.com/sjp38/masim; cd masim; make
$ sudo damo start "./masim ./configs/stairs.cfg --quiet"
- $ sudo ./damo show
- 0 addr [85.541 TiB , 85.541 TiB ) (57.707 MiB ) access 0 % age 10.400 s
- 1 addr [85.541 TiB , 85.542 TiB ) (413.285 MiB) access 0 % age 11.400 s
- 2 addr [127.649 TiB , 127.649 TiB) (57.500 MiB ) access 0 % age 1.600 s
- 3 addr [127.649 TiB , 127.649 TiB) (32.500 MiB ) access 0 % age 500 ms
- 4 addr [127.649 TiB , 127.649 TiB) (9.535 MiB ) access 100 % age 300 ms
- 5 addr [127.649 TiB , 127.649 TiB) (8.000 KiB ) access 60 % age 0 ns
- 6 addr [127.649 TiB , 127.649 TiB) (6.926 MiB ) access 0 % age 1 s
- 7 addr [127.998 TiB , 127.998 TiB) (120.000 KiB) access 0 % age 11.100 s
- 8 addr [127.998 TiB , 127.998 TiB) (8.000 KiB ) access 40 % age 100 ms
- 9 addr [127.998 TiB , 127.998 TiB) (4.000 KiB ) access 0 % age 11 s
- total size: 577.590 MiB
- $ sudo ./damo stop
+ $ sudo damo report access
+ heatmap: 641111111000000000000000000000000000000000000000000000[...]33333333333333335557984444[...]7
+ # min/max temperatures: -1,840,000,000, 370,010,000, column size: 3.925 MiB
+ 0 addr 86.182 TiB size 8.000 KiB access 0 % age 14.900 s
+ 1 addr 86.182 TiB size 8.000 KiB access 60 % age 0 ns
+ 2 addr 86.182 TiB size 3.422 MiB access 0 % age 4.100 s
+ 3 addr 86.182 TiB size 2.004 MiB access 95 % age 2.200 s
+ 4 addr 86.182 TiB size 29.688 MiB access 0 % age 14.100 s
+ 5 addr 86.182 TiB size 29.516 MiB access 0 % age 16.700 s
+ 6 addr 86.182 TiB size 29.633 MiB access 0 % age 17.900 s
+ 7 addr 86.182 TiB size 117.652 MiB access 0 % age 18.400 s
+ 8 addr 126.990 TiB size 62.332 MiB access 0 % age 9.500 s
+ 9 addr 126.990 TiB size 13.980 MiB access 0 % age 5.200 s
+ 10 addr 126.990 TiB size 9.539 MiB access 100 % age 3.700 s
+ 11 addr 126.990 TiB size 16.098 MiB access 0 % age 6.400 s
+ 12 addr 127.987 TiB size 132.000 KiB access 0 % age 2.900 s
+ total size: 314.008 MiB
+ $ sudo damo stop
The first command of the above example downloads and builds an artificial
memory access generator program called ``masim``. The second command asks DAMO
-to execute the artificial generator process start via the given command and
-make DAMON monitors the generator process. The third command retrieves the
-current snapshot of the monitored access pattern of the process from DAMON and
-shows the pattern in a human readable format.
-
-Each line of the output shows which virtual address range (``addr [XX, XX)``)
-of the process is how frequently (``access XX %``) accessed for how long time
-(``age XX``). For example, the fifth region of ~9 MiB size is being most
-frequently accessed for last 300 milliseconds. Finally, the fourth command
-stops DAMON.
+to start the program via the given command and make DAMON monitors the newly
+started process. The third command retrieves the current snapshot of the
+monitored access pattern of the process from DAMON and shows the pattern in a
+human readable format.
+
+The first line of the output shows the relative access temperature (hotness) of
+the regions in a single row hetmap format. Each column on the heatmap
+represents regions of same size on the monitored virtual address space. The
+position of the colun on the row and the number on the column represents the
+relative location and access temperature of the region. ``[...]`` means
+unmapped huge regions on the virtual address spaces. The second line shows
+additional information for better understanding the heatmap.
+
+Each line of the output from the third line shows which virtual address range
+(``addr XX size XX``) of the process is how frequently (``access XX %``)
+accessed for how long time (``age XX``). For example, the evelenth region of
+~9.5 MiB size is being most frequently accessed for last 3.7 seconds. Finally,
+the fourth command stops DAMON.
Note that DAMON can monitor not only virtual address spaces but multiple types
of address spaces including the physical address space.
@@ -95,7 +108,7 @@ Visualizing Recorded Patterns
You can visualize the pattern in a heatmap, showing which memory region
(x-axis) got accessed when (y-axis) and how frequently (number).::
- $ sudo damo report heats --heatmap stdout
+ $ sudo damo report heatmap
22222222222222222222222222222222222222211111111111111111111111111111111111111100
44444444444444444444444444444444444444434444444444444444444444444444444444443200
44444444444444444444444444444444444444433444444444444444444444444444444444444200
@@ -160,6 +173,6 @@ Data Access Pattern Aware Memory Management
Below command makes every memory region of size >=4K that has not accessed for
>=60 seconds in your workload to be swapped out. ::
- $ sudo damo schemes --damos_access_rate 0 0 --damos_sz_region 4K max \
- --damos_age 60s max --damos_action pageout \
- <pid of your workload>
+ $ sudo damo start --damos_access_rate 0 0 --damos_sz_region 4K max \
+ --damos_age 60s max --damos_action pageout \
+ <pid of your workload>
diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst
index d9be9f7caa7d..47a44bd348ab 100644
--- a/Documentation/admin-guide/mm/damon/usage.rst
+++ b/Documentation/admin-guide/mm/damon/usage.rst
@@ -26,12 +26,6 @@ DAMON provides below interfaces for different users.
writing kernel space DAMON application programs for you. You can even extend
DAMON for various address spaces. For detail, please refer to the interface
:doc:`document </mm/damon/api>`.
-- *debugfs interface. (DEPRECATED!)*
- :ref:`This <debugfs_interface>` is almost identical to :ref:`sysfs interface
- <sysfs_interface>`. This is deprecated, so users should move to the
- :ref:`sysfs interface <sysfs_interface>`. If you depend on this and cannot
- move, please report your usecase to damon@lists.linux.dev and
- linux-mm@kvack.org.
.. _sysfs_interface:
@@ -89,10 +83,10 @@ comma (",").
│ │ │ │ │ │ │ │ │ 0/target_metric,target_value,current_value
│ │ │ │ │ │ │ :ref:`watermarks <sysfs_watermarks>`/metric,interval_us,high,mid,low
│ │ │ │ │ │ │ :ref:`filters <sysfs_filters>`/nr_filters
- │ │ │ │ │ │ │ │ 0/type,matching,memcg_id
- │ │ │ │ │ │ │ :ref:`stats <sysfs_schemes_stats>`/nr_tried,sz_tried,nr_applied,sz_applied,qt_exceeds
+ │ │ │ │ │ │ │ │ 0/type,matching,allow,memcg_path,addr_start,addr_end,target_idx
+ │ │ │ │ │ │ │ :ref:`stats <sysfs_schemes_stats>`/nr_tried,sz_tried,nr_applied,sz_applied,sz_ops_filter_passed,qt_exceeds
│ │ │ │ │ │ │ :ref:`tried_regions <sysfs_schemes_tried_regions>`/total_bytes
- │ │ │ │ │ │ │ │ 0/start,end,nr_accesses,age
+ │ │ │ │ │ │ │ │ 0/start,end,nr_accesses,age,sz_filter_passed
│ │ │ │ │ │ │ │ ...
│ │ │ │ │ │ ...
│ │ │ │ ...
@@ -412,59 +406,62 @@ number (``N``) to the file creates the number of child directories named ``0``
to ``N-1``. Each directory represents each filter. The filters are evaluated
in the numeric order.
-Each filter directory contains six files, namely ``type``, ``matcing``,
-``memcg_path``, ``addr_start``, ``addr_end``, and ``target_idx``. To ``type``
-file, you can write one of five special keywords: ``anon`` for anonymous pages,
-``memcg`` for specific memory cgroup, ``young`` for young pages, ``addr`` for
-specific address range (an open-ended interval), or ``target`` for specific
-DAMON monitoring target filtering. In case of the memory cgroup filtering, you
-can specify the memory cgroup of the interest by writing the path of the memory
-cgroup from the cgroups mount point to ``memcg_path`` file. In case of the
-address range filtering, you can specify the start and end address of the range
-to ``addr_start`` and ``addr_end`` files, respectively. For the DAMON
-monitoring target filtering, you can specify the index of the target between
-the list of the DAMON context's monitoring targets list to ``target_idx`` file.
-You can write ``Y`` or ``N`` to ``matching`` file to filter out pages that does
-or does not match to the type, respectively. Then, the scheme's action will
-not be applied to the pages that specified to be filtered out.
+Each filter directory contains seven files, namely ``type``, ``matching``,
+``allow``, ``memcg_path``, ``addr_start``, ``addr_end``, and ``target_idx``.
+To ``type`` file, you can write one of five special keywords: ``anon`` for
+anonymous pages, ``memcg`` for specific memory cgroup, ``young`` for young
+pages, ``addr`` for specific address range (an open-ended interval), or
+``target`` for specific DAMON monitoring target filtering. Meaning of the
+types are same to the description on the :ref:`design doc
+<damon_design_damos_filters>`.
+
+In case of the memory cgroup filtering, you can specify the memory cgroup of
+the interest by writing the path of the memory cgroup from the cgroups mount
+point to ``memcg_path`` file. In case of the address range filtering, you can
+specify the start and end address of the range to ``addr_start`` and
+``addr_end`` files, respectively. For the DAMON monitoring target filtering,
+you can specify the index of the target between the list of the DAMON context's
+monitoring targets list to ``target_idx`` file.
+
+You can write ``Y`` or ``N`` to ``matching`` file to specify whether the filter
+is for memory that matches the ``type``. You can write ``Y`` or ``N`` to
+``allow`` file to specify if applying the action to the memory that satisfies
+the ``type`` and ``matching`` should be allowed or not.
For example, below restricts a DAMOS action to be applied to only non-anonymous
pages of all memory cgroups except ``/having_care_already``.::
# echo 2 > nr_filters
- # # filter out anonymous pages
+ # # disallow anonymous pages
echo anon > 0/type
echo Y > 0/matching
+ echo N > 0/allow
# # further filter out all cgroups except one at '/having_care_already'
echo memcg > 1/type
echo /having_care_already > 1/memcg_path
echo Y > 1/matching
+ echo N > 1/allow
-Note that ``anon`` and ``memcg`` filters are currently supported only when
-``paddr`` :ref:`implementation <sysfs_context>` is being used.
-
-Also, memory regions that are filtered out by ``addr`` or ``target`` filters
-are not counted as the scheme has tried to those, while regions that filtered
-out by other type filters are counted as the scheme has tried to. The
-difference is applied to :ref:`stats <damos_stats>` and
-:ref:`tried regions <sysfs_schemes_tried_regions>`.
+Refer to the :ref:`DAMOS filters design documentation
+<damon_design_damos_filters>` for more details including how multiple filters
+of different ``allow`` works, when each of the filters are supported, and
+differences on stats.
.. _sysfs_schemes_stats:
schemes/<N>/stats/
------------------
-DAMON counts the total number and bytes of regions that each scheme is tried to
-be applied, the two numbers for the regions that each scheme is successfully
-applied, and the total number of the quota limit exceeds. This statistics can
-be used for online analysis or tuning of the schemes.
+DAMON counts statistics for each scheme. This statistics can be used for
+online analysis or tuning of the schemes. Refer to :ref:`design doc
+<damon_design_damos_stat>` for more details about the stats.
The statistics can be retrieved by reading the files under ``stats`` directory
-(``nr_tried``, ``sz_tried``, ``nr_applied``, ``sz_applied``, and
-``qt_exceeds``), respectively. The files are not updated in real time, so you
-should ask DAMON sysfs interface to update the content of the files for the
-stats by writing a special keyword, ``update_schemes_stats`` to the relevant
-``kdamonds/<N>/state`` file.
+(``nr_tried``, ``sz_tried``, ``nr_applied``, ``sz_applied``,
+``sz_ops_filter_passed``, and ``qt_exceeds``), respectively. The files are not
+updated in real time, so you should ask DAMON sysfs interface to update the
+content of the files for the stats by writing a special keyword,
+``update_schemes_stats`` to the relevant ``kdamonds/<N>/state`` file.
.. _sysfs_schemes_tried_regions:
@@ -501,10 +498,10 @@ set the ``access pattern`` as their interested pattern that they want to query.
tried_regions/<N>/
------------------
-In each region directory, you will find four files (``start``, ``end``,
-``nr_accesses``, and ``age``). Reading the files will show the start and end
-addresses, ``nr_accesses``, and ``age`` of the region that corresponding
-DAMON-based operation scheme ``action`` has tried to be applied.
+In each region directory, you will find five files (``start``, ``end``,
+``nr_accesses``, ``age``, and ``sz_filter_passed``). Reading the files will
+show the properties of the region that corresponding DAMON-based operation
+scheme ``action`` has tried to be applied.
Example
~~~~~~~
@@ -600,306 +597,3 @@ fields are as usual. It shows the index of the DAMON context (``ctx_idx=X``)
of the scheme in the list of the contexts of the context's kdamond, the index
of the scheme (``scheme_idx=X``) in the list of the schemes of the context, in
addition to the output of ``damon_aggregated`` tracepoint.
-
-
-.. _debugfs_interface:
-
-debugfs Interface (DEPRECATED!)
-===============================
-
-.. note::
-
- THIS IS DEPRECATED!
-
- DAMON debugfs interface is deprecated, so users should move to the
- :ref:`sysfs interface <sysfs_interface>`. If you depend on this and cannot
- move, please report your usecase to damon@lists.linux.dev and
- linux-mm@kvack.org.
-
-DAMON exports nine files, ``DEPRECATED``, ``attrs``, ``target_ids``,
-``init_regions``, ``schemes``, ``monitor_on_DEPRECATED``, ``kdamond_pid``,
-``mk_contexts`` and ``rm_contexts`` under its debugfs directory,
-``<debugfs>/damon/``.
-
-
-``DEPRECATED`` is a read-only file for the DAMON debugfs interface deprecation
-notice. Reading it returns the deprecation notice, as below::
-
- # cat DEPRECATED
- DAMON debugfs interface is deprecated, so users should move to DAMON_SYSFS. If you cannot, please report your usecase to damon@lists.linux.dev and linux-mm@kvack.org.
-
-
-Attributes
-----------
-
-Users can get and set the ``sampling interval``, ``aggregation interval``,
-``update interval``, and min/max number of monitoring target regions by
-reading from and writing to the ``attrs`` file. To know about the monitoring
-attributes in detail, please refer to the :doc:`/mm/damon/design`. For
-example, below commands set those values to 5 ms, 100 ms, 1,000 ms, 10 and
-1000, and then check it again::
-
- # cd <debugfs>/damon
- # echo 5000 100000 1000000 10 1000 > attrs
- # cat attrs
- 5000 100000 1000000 10 1000
-
-
-Target IDs
-----------
-
-Some types of address spaces supports multiple monitoring target. For example,
-the virtual memory address spaces monitoring can have multiple processes as the
-monitoring targets. Users can set the targets by writing relevant id values of
-the targets to, and get the ids of the current targets by reading from the
-``target_ids`` file. In case of the virtual address spaces monitoring, the
-values should be pids of the monitoring target processes. For example, below
-commands set processes having pids 42 and 4242 as the monitoring targets and
-check it again::
-
- # cd <debugfs>/damon
- # echo 42 4242 > target_ids
- # cat target_ids
- 42 4242
-
-Users can also monitor the physical memory address space of the system by
-writing a special keyword, "``paddr\n``" to the file. Because physical address
-space monitoring doesn't support multiple targets, reading the file will show a
-fake value, ``42``, as below::
-
- # cd <debugfs>/damon
- # echo paddr > target_ids
- # cat target_ids
- 42
-
-Note that setting the target ids doesn't start the monitoring.
-
-
-Initial Monitoring Target Regions
----------------------------------
-
-In case of the virtual address space monitoring, DAMON automatically sets and
-updates the monitoring target regions so that entire memory mappings of target
-processes can be covered. However, users can want to limit the monitoring
-region to specific address ranges, such as the heap, the stack, or specific
-file-mapped area. Or, some users can know the initial access pattern of their
-workloads and therefore want to set optimal initial regions for the 'adaptive
-regions adjustment'.
-
-In contrast, DAMON do not automatically sets and updates the monitoring target
-regions in case of physical memory monitoring. Therefore, users should set the
-monitoring target regions by themselves.
-
-In such cases, users can explicitly set the initial monitoring target regions
-as they want, by writing proper values to the ``init_regions`` file. The input
-should be a sequence of three integers separated by white spaces that represent
-one region in below form.::
-
- <target idx> <start address> <end address>
-
-The ``target idx`` should be the index of the target in ``target_ids`` file,
-starting from ``0``, and the regions should be passed in address order. For
-example, below commands will set a couple of address ranges, ``1-100`` and
-``100-200`` as the initial monitoring target region of pid 42, which is the
-first one (index ``0``) in ``target_ids``, and another couple of address
-ranges, ``20-40`` and ``50-100`` as that of pid 4242, which is the second one
-(index ``1``) in ``target_ids``.::
-
- # cd <debugfs>/damon
- # cat target_ids
- 42 4242
- # echo "0 1 100 \
- 0 100 200 \
- 1 20 40 \
- 1 50 100" > init_regions
-
-Note that this sets the initial monitoring target regions only. In case of
-virtual memory monitoring, DAMON will automatically updates the boundary of the
-regions after one ``update interval``. Therefore, users should set the
-``update interval`` large enough in this case, if they don't want the
-update.
-
-
-Schemes
--------
-
-Users can get and set the DAMON-based operation :ref:`schemes
-<damon_design_damos>` by reading from and writing to ``schemes`` debugfs file.
-Reading the file also shows the statistics of each scheme. To the file, each
-of the schemes should be represented in each line in below form::
-
- <target access pattern> <action> <quota> <watermarks>
-
-You can disable schemes by simply writing an empty string to the file.
-
-Target Access Pattern
-~~~~~~~~~~~~~~~~~~~~~
-
-The target access :ref:`pattern <damon_design_damos_access_pattern>` of the
-scheme. The ``<target access pattern>`` is constructed with three ranges in
-below form::
-
- min-size max-size min-acc max-acc min-age max-age
-
-Specifically, bytes for the size of regions (``min-size`` and ``max-size``),
-number of monitored accesses per aggregate interval for access frequency
-(``min-acc`` and ``max-acc``), number of aggregate intervals for the age of
-regions (``min-age`` and ``max-age``) are specified. Note that the ranges are
-closed interval.
-
-Action
-~~~~~~
-
-The ``<action>`` is a predefined integer for memory management :ref:`actions
-<damon_design_damos_action>`. The mapping between the ``<action>`` values and
-the memory management actions is as below. For the detailed meaning of the
-action and DAMON operations set supporting each action, please refer to the
-list on :ref:`design doc <damon_design_damos_action>`.
-
- - 0: ``willneed``
- - 1: ``cold``
- - 2: ``pageout``
- - 3: ``hugepage``
- - 4: ``nohugepage``
- - 5: ``stat``
-
-Quota
-~~~~~
-
-Users can set the :ref:`quotas <damon_design_damos_quotas>` of the given scheme
-via the ``<quota>`` in below form::
-
- <ms> <sz> <reset interval> <priority weights>
-
-This makes DAMON to try to use only up to ``<ms>`` milliseconds for applying
-the action to memory regions of the ``target access pattern`` within the
-``<reset interval>`` milliseconds, and to apply the action to only up to
-``<sz>`` bytes of memory regions within the ``<reset interval>``. Setting both
-``<ms>`` and ``<sz>`` zero disables the quota limits.
-
-For the :ref:`prioritization <damon_design_damos_quotas_prioritization>`, users
-can set the weights for the three properties in ``<priority weights>`` in below
-form::
-
- <size weight> <access frequency weight> <age weight>
-
-Watermarks
-~~~~~~~~~~
-
-Users can specify :ref:`watermarks <damon_design_damos_watermarks>` of the
-given scheme via ``<watermarks>`` in below form::
-
- <metric> <check interval> <high mark> <middle mark> <low mark>
-
-``<metric>`` is a predefined integer for the metric to be checked. The
-supported numbers and their meanings are as below.
-
- - 0: Ignore the watermarks
- - 1: System's free memory rate (per thousand)
-
-The value of the metric is checked every ``<check interval>`` microseconds.
-
-If the value is higher than ``<high mark>`` or lower than ``<low mark>``, the
-scheme is deactivated. If the value is lower than ``<mid mark>``, the scheme
-is activated.
-
-.. _damos_stats:
-
-Statistics
-~~~~~~~~~~
-
-It also counts the total number and bytes of regions that each scheme is tried
-to be applied, the two numbers for the regions that each scheme is successfully
-applied, and the total number of the quota limit exceeds. This statistics can
-be used for online analysis or tuning of the schemes.
-
-The statistics can be shown by reading the ``schemes`` file. Reading the file
-will show each scheme you entered in each line, and the five numbers for the
-statistics will be added at the end of each line.
-
-Example
-~~~~~~~
-
-Below commands applies a scheme saying "If a memory region of size in [4KiB,
-8KiB] is showing accesses per aggregate interval in [0, 5] for aggregate
-interval in [10, 20], page out the region. For the paging out, use only up to
-10ms per second, and also don't page out more than 1GiB per second. Under the
-limitation, page out memory regions having longer age first. Also, check the
-free memory rate of the system every 5 seconds, start the monitoring and paging
-out when the free memory rate becomes lower than 50%, but stop it if the free
-memory rate becomes larger than 60%, or lower than 30%".::
-
- # cd <debugfs>/damon
- # scheme="4096 8192 0 5 10 20 2" # target access pattern and action
- # scheme+=" 10 $((1024*1024*1024)) 1000" # quotas
- # scheme+=" 0 0 100" # prioritization weights
- # scheme+=" 1 5000000 600 500 300" # watermarks
- # echo "$scheme" > schemes
-
-
-Turning On/Off
---------------
-
-Setting the files as described above doesn't incur effect unless you explicitly
-start the monitoring. You can start, stop, and check the current status of the
-monitoring by writing to and reading from the ``monitor_on_DEPRECATED`` file.
-Writing ``on`` to the file starts the monitoring of the targets with the
-attributes. Writing ``off`` to the file stops those. DAMON also stops if
-every target process is terminated. Below example commands turn on, off, and
-check the status of DAMON::
-
- # cd <debugfs>/damon
- # echo on > monitor_on_DEPRECATED
- # echo off > monitor_on_DEPRECATED
- # cat monitor_on_DEPRECATED
- off
-
-Please note that you cannot write to the above-mentioned debugfs files while
-the monitoring is turned on. If you write to the files while DAMON is running,
-an error code such as ``-EBUSY`` will be returned.
-
-
-Monitoring Thread PID
----------------------
-
-DAMON does requested monitoring with a kernel thread called ``kdamond``. You
-can get the pid of the thread by reading the ``kdamond_pid`` file. When the
-monitoring is turned off, reading the file returns ``none``. ::
-
- # cd <debugfs>/damon
- # cat monitor_on_DEPRECATED
- off
- # cat kdamond_pid
- none
- # echo on > monitor_on_DEPRECATED
- # cat kdamond_pid
- 18594
-
-
-Using Multiple Monitoring Threads
----------------------------------
-
-One ``kdamond`` thread is created for each monitoring context. You can create
-and remove monitoring contexts for multiple ``kdamond`` required use case using
-the ``mk_contexts`` and ``rm_contexts`` files.
-
-Writing the name of the new context to the ``mk_contexts`` file creates a
-directory of the name on the DAMON debugfs directory. The directory will have
-DAMON debugfs files for the context. ::
-
- # cd <debugfs>/damon
- # ls foo
- # ls: cannot access 'foo': No such file or directory
- # echo foo > mk_contexts
- # ls foo
- # attrs init_regions kdamond_pid schemes target_ids
-
-If the context is not needed anymore, you can remove it and the corresponding
-directory by putting the name of the context to the ``rm_contexts`` file. ::
-
- # echo foo > rm_contexts
- # ls foo
- # ls: cannot access 'foo': No such file or directory
-
-Note that ``mk_contexts``, ``rm_contexts``, and ``monitor_on_DEPRECATED`` files
-are in the root directory only.
diff --git a/Documentation/admin-guide/mm/memory-hotplug.rst b/Documentation/admin-guide/mm/memory-hotplug.rst
index cb2c080f400c..33c886f3d198 100644
--- a/Documentation/admin-guide/mm/memory-hotplug.rst
+++ b/Documentation/admin-guide/mm/memory-hotplug.rst
@@ -280,8 +280,8 @@ The following files are currently defined:
blocks; configure auto-onlining.
The default value depends on the
- CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE kernel configuration
- option.
+ CONFIG_MHP_DEFAULT_ONLINE_TYPE kernel configuration
+ options.
See the ``state`` property of memory blocks for details.
``block_size_bytes`` read-only: the size in bytes of a memory block.
diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
index 8872203df088..dff8d5985f0f 100644
--- a/Documentation/admin-guide/mm/transhuge.rst
+++ b/Documentation/admin-guide/mm/transhuge.rst
@@ -332,6 +332,12 @@ allocation policy for the internal shmem mount by using the kernel parameter
seven valid policies for shmem (``always``, ``within_size``, ``advise``,
``never``, ``deny``, and ``force``).
+Similarly to ``transparent_hugepage_shmem``, you can control the default
+hugepage allocation policy for the tmpfs mount by using the kernel parameter
+``transparent_hugepage_tmpfs=<policy>``, where ``<policy>`` is one of the
+four valid policies for tmpfs (``always``, ``within_size``, ``advise``,
+``never``). The tmpfs mount default policy is ``never``.
+
In the same manner as ``thp_anon`` controls each supported anonymous THP
size, ``thp_shmem`` controls each supported shmem THP size. ``thp_shmem``
has the same format as ``thp_anon``, but also supports the policy
@@ -352,8 +358,21 @@ default to ``never``.
Hugepages in tmpfs/shmem
========================
-You can control hugepage allocation policy in tmpfs with mount option
-``huge=``. It can have following values:
+Traditionally, tmpfs only supported a single huge page size ("PMD"). Today,
+it also supports smaller sizes just like anonymous memory, often referred
+to as "multi-size THP" (mTHP). Huge pages of any size are commonly
+represented in the kernel as "large folios".
+
+While there is fine control over the huge page sizes to use for the internal
+shmem mount (see below), ordinary tmpfs mounts will make use of all available
+huge page sizes without any control over the exact sizes, behaving more like
+other file systems.
+
+tmpfs mounts
+------------
+
+The THP allocation policy for tmpfs mounts can be adjusted using the mount
+option: ``huge=``. It can have following values:
always
Attempt to allocate huge pages every time we need a new page;
@@ -363,24 +382,24 @@ never
within_size
Only allocate huge page if it will be fully within i_size.
- Also respect fadvise()/madvise() hints;
+ Also respect madvise() hints;
advise
- Only allocate huge pages if requested with fadvise()/madvise();
+ Only allocate huge pages if requested with madvise();
+
+Remember, that the kernel may use huge pages of all available sizes, and
+that no fine control as for the internal tmpfs mount is available.
-The default policy is ``never``.
+The default policy in the past was ``never``, but it can now be adjusted
+using the kernel parameter ``transparent_hugepage_tmpfs=<policy>``.
``mount -o remount,huge= /mountpoint`` works fine after mount: remounting
``huge=never`` will not attempt to break up huge pages at all, just stop more
from being allocated.
-There's also sysfs knob to control hugepage allocation policy for internal
-shmem mount: /sys/kernel/mm/transparent_hugepage/shmem_enabled. The mount
-is used for SysV SHM, memfds, shared anonymous mmaps (of /dev/zero or
-MAP_ANONYMOUS), GPU drivers' DRM objects, Ashmem.
-
-In addition to policies listed above, shmem_enabled allows two further
-values:
+In addition to policies listed above, the sysfs knob
+/sys/kernel/mm/transparent_hugepage/shmem_enabled will affect the
+allocation policy of tmpfs mounts, when set to the following values:
deny
For use in emergencies, to force the huge option off from
@@ -388,13 +407,24 @@ deny
force
Force the huge option on for all - very useful for testing;
-Shmem can also use "multi-size THP" (mTHP) by adding a new sysfs knob to
-control mTHP allocation:
-'/sys/kernel/mm/transparent_hugepage/hugepages-<size>kB/shmem_enabled',
-and its value for each mTHP is essentially consistent with the global
-setting. An 'inherit' option is added to ensure compatibility with these
-global settings. Conversely, the options 'force' and 'deny' are dropped,
-which are rather testing artifacts from the old ages.
+shmem / internal tmpfs
+----------------------
+The mount internal tmpfs mount is used for SysV SHM, memfds, shared anonymous
+mmaps (of /dev/zero or MAP_ANONYMOUS), GPU drivers' DRM objects, Ashmem.
+
+To control the THP allocation policy for this internal tmpfs mount, the
+sysfs knob /sys/kernel/mm/transparent_hugepage/shmem_enabled and the knobs
+per THP size in
+'/sys/kernel/mm/transparent_hugepage/hugepages-<size>kB/shmem_enabled'
+can be used.
+
+The global knob has the same semantics as the ``huge=`` mount options
+for tmpfs mounts, except that the different huge page sizes can be controlled
+individually, and will only use the setting of the global knob when the
+per-size knob is set to 'inherit'.
+
+The options 'force' and 'deny' are dropped for the individual sizes, which
+are rather testing artifacts from the old ages.
always
Attempt to allocate <size> huge pages every time we need a new page;
@@ -408,10 +438,10 @@ never
within_size
Only allocate <size> huge page if it will be fully within i_size.
- Also respect fadvise()/madvise() hints;
+ Also respect madvise() hints;
advise
- Only allocate <size> huge pages if requested with fadvise()/madvise();
+ Only allocate <size> huge pages if requested with madvise();
Need of application restart
===========================
@@ -561,6 +591,16 @@ swpin
is incremented every time a huge page is swapped in from a non-zswap
swap device in one piece.
+swpin_fallback
+ is incremented if swapin fails to allocate or charge a huge page
+ and instead falls back to using huge pages with lower orders or
+ small pages.
+
+swpin_fallback_charge
+ is incremented if swapin fails to charge a huge page and instead
+ falls back to using huge pages with lower orders or small pages
+ even though the allocation was successful.
+
swpout
is incremented every time a huge page is swapped out to a non-zswap
swap device in one piece without splitting.
diff --git a/Documentation/admin-guide/workload-tracing.rst b/Documentation/admin-guide/workload-tracing.rst
index b2e254ec8ee8..6be38c1b9c5b 100644
--- a/Documentation/admin-guide/workload-tracing.rst
+++ b/Documentation/admin-guide/workload-tracing.rst
@@ -83,7 +83,7 @@ scripts/ver_linux is a good way to check if your system already has
the necessary tools::
sudo apt-get build-essentials flex bison yacc
- sudo apt install libelf-dev systemtap-sdt-dev libaudit-dev libslang2-dev libperl-dev libdw-dev
+ sudo apt install libelf-dev systemtap-sdt-dev libslang2-dev libperl-dev libdw-dev
cscope is a good tool to browse kernel sources. Let's install it now::
diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst
index b42fea07c5ce..f074f6219f5c 100644
--- a/Documentation/arch/arm64/silicon-errata.rst
+++ b/Documentation/arch/arm64/silicon-errata.rst
@@ -198,7 +198,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-V3 | #3312417 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
-| ARM | MMU-500 | #841119,826419 | N/A |
+| ARM | MMU-500 | #841119,826419 | ARM_SMMU_MMU_500_CPRE_ERRATA|
+| | | #562869,1047329 | |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | MMU-600 | #1076982,1209401| N/A |
+----------------+-----------------+-----------------+-----------------------------+
diff --git a/Documentation/arch/riscv/hwprobe.rst b/Documentation/arch/riscv/hwprobe.rst
index 955fbcd19ce9..f273ea15a8e8 100644
--- a/Documentation/arch/riscv/hwprobe.rst
+++ b/Documentation/arch/riscv/hwprobe.rst
@@ -293,3 +293,13 @@ The following keys are defined:
* :c:macro:`RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED`: Misaligned vector accesses are
not supported at all and will generate a misaligned address fault.
+
+* :c:macro:`RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0`: A bitmask containing the
+ thead vendor extensions that are compatible with the
+ :c:macro:`RISCV_HWPROBE_BASE_BEHAVIOR_IMA`: base system behavior.
+
+ * T-HEAD
+
+ * :c:macro:`RISCV_HWPROBE_VENDOR_EXT_XTHEADVECTOR`: The xtheadvector vendor
+ extension is supported in the T-Head ISA extensions spec starting from
+ commit a18c801634 ("Add T-Head VECTOR vendor extension. ").
diff --git a/Documentation/block/ublk.rst b/Documentation/block/ublk.rst
index 51665a3e6a50..1e0e7358e14a 100644
--- a/Documentation/block/ublk.rst
+++ b/Documentation/block/ublk.rst
@@ -333,6 +333,4 @@ References
.. [#userspace_readme] https://github.com/ming1/ubdsrv/blob/master/README
-.. [#stefan] https://lore.kernel.org/linux-block/YoOr6jBfgVm8GvWg@stefanha-x1.localdomain/
-
.. [#xiaoguang] https://lore.kernel.org/linux-block/YoOr6jBfgVm8GvWg@stefanha-x1.localdomain/
diff --git a/Documentation/core-api/min_heap.rst b/Documentation/core-api/min_heap.rst
index 0c636c8b7aa5..683bc6d09f00 100644
--- a/Documentation/core-api/min_heap.rst
+++ b/Documentation/core-api/min_heap.rst
@@ -4,6 +4,8 @@
Min Heap API
============
+:Author: Kuan-Wei Chiu <visitorckw@gmail.com>
+
Introduction
============
diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst
index 77e0ece2b1d6..f6a3eef4fe7f 100644
--- a/Documentation/core-api/xarray.rst
+++ b/Documentation/core-api/xarray.rst
@@ -42,8 +42,8 @@ call xa_tag_pointer() to create an entry with a tag, xa_untag_pointer()
to turn a tagged entry back into an untagged pointer and xa_pointer_tag()
to retrieve the tag of an entry. Tagged pointers use the same bits that
are used to distinguish value entries from normal pointers, so you must
-decide whether they want to store value entries or tagged pointers in
-any particular XArray.
+decide whether you want to store value entries or tagged pointers in any
+particular XArray.
The XArray does not support storing IS_ERR() pointers as some
conflict with value entries or internal entries.
@@ -52,8 +52,9 @@ An unusual feature of the XArray is the ability to create entries which
occupy a range of indices. Once stored to, looking up any index in
the range will return the same entry as looking up any other index in
the range. Storing to any index will store to all of them. Multi-index
-entries can be explicitly split into smaller entries, or storing ``NULL``
-into any entry will cause the XArray to forget about the range.
+entries can be explicitly split into smaller entries. Unsetting (using
+xa_erase() or xa_store() with ``NULL``) any entry will cause the XArray
+to forget about the range.
Normal API
==========
@@ -63,13 +64,14 @@ for statically allocated XArrays or xa_init() for dynamically
allocated ones. A freshly-initialised XArray contains a ``NULL``
pointer at every index.
-You can then set entries using xa_store() and get entries
-using xa_load(). xa_store will overwrite any entry with the
-new entry and return the previous entry stored at that index. You can
-use xa_erase() instead of calling xa_store() with a
-``NULL`` entry. There is no difference between an entry that has never
-been stored to, one that has been erased and one that has most recently
-had ``NULL`` stored to it.
+You can then set entries using xa_store() and get entries using
+xa_load(). xa_store() will overwrite any entry with the new entry and
+return the previous entry stored at that index. You can unset entries
+using xa_erase() or by setting the entry to ``NULL`` using xa_store().
+There is no difference between an entry that has never been stored to
+and one that has been erased with xa_erase(); an entry that has most
+recently had ``NULL`` stored to it is also equivalent except if the
+XArray was initialized with ``XA_FLAGS_ALLOC``.
You can conditionally replace an entry at an index by using
xa_cmpxchg(). Like cmpxchg(), it will only succeed if
diff --git a/Documentation/devicetree/bindings/arm/altera/socfpga-system.txt b/Documentation/devicetree/bindings/arm/altera/socfpga-system.txt
deleted file mode 100644
index 82edbaaa3f85..000000000000
--- a/Documentation/devicetree/bindings/arm/altera/socfpga-system.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Altera SOCFPGA System Manager
-
-Required properties:
-- compatible : "altr,sys-mgr"
-- reg : Should contain 1 register ranges(address and length)
-- cpu1-start-addr : CPU1 start address in hex.
-
-Example:
- sysmgr@ffd08000 {
- compatible = "altr,sys-mgr";
- reg = <0xffd08000 0x1000>;
- cpu1-start-addr = <0xffd080c4>;
- };
-
-ARM64 - Stratix10
-Required properties:
-- compatible : "altr,sys-mgr-s10"
-- reg : Should contain 1 register range(address and length)
- for system manager register.
-
-Example:
- sysmgr@ffd12000 {
- compatible = "altr,sys-mgr-s10";
- reg = <0xffd12000 0x228>;
- };
diff --git a/Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml b/Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml
index 04a8c37b4aff..742dc4e25d3b 100644
--- a/Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml
+++ b/Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml
@@ -38,6 +38,12 @@ properties:
enum:
- arm,coresight-dummy-source
+ arm,static-trace-id:
+ description: If dummy source needs static id support, use this to set trace id.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 111
+
out-ports:
$ref: /schemas/graph.yaml#/properties/ports
diff --git a/Documentation/devicetree/bindings/arm/arm,coresight-static-replicator.yaml b/Documentation/devicetree/bindings/arm/arm,coresight-static-replicator.yaml
index 1892a091ac35..a6f793ea03b6 100644
--- a/Documentation/devicetree/bindings/arm/arm,coresight-static-replicator.yaml
+++ b/Documentation/devicetree/bindings/arm/arm,coresight-static-replicator.yaml
@@ -45,7 +45,22 @@ properties:
patternProperties:
'^port@[01]$':
description: Output connections to CoreSight Trace bus
- $ref: /schemas/graph.yaml#/properties/port
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+
+ properties:
+ endpoint:
+ $ref: /schemas/graph.yaml#/$defs/endpoint-base
+ unevaluatedProperties: false
+
+ properties:
+ filter-source:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ phandle to the coresight trace source device matching the
+ hard coded filtering for this port
+
+ remote-endpoint: true
required:
- compatible
@@ -72,6 +87,7 @@ examples:
reg = <0>;
replicator_out_port0: endpoint {
remote-endpoint = <&etb_in_port>;
+ filter-source = <&tpdm_video>;
};
};
@@ -79,6 +95,7 @@ examples:
reg = <1>;
replicator_out_port1: endpoint {
remote-endpoint = <&tpiu_in_port>;
+ filter-source = <&tpdm_mdss>;
};
};
};
diff --git a/Documentation/devicetree/bindings/arm/arm,embedded-trace-extension.yaml b/Documentation/devicetree/bindings/arm/arm,embedded-trace-extension.yaml
index f725e6940993..9c2c9ac9705a 100644
--- a/Documentation/devicetree/bindings/arm/arm,embedded-trace-extension.yaml
+++ b/Documentation/devicetree/bindings/arm/arm,embedded-trace-extension.yaml
@@ -23,7 +23,7 @@ description: |
properties:
$nodename:
- pattern: "^ete([0-9a-f]+)$"
+ pattern: "^ete(-[0-9]+)?$"
compatible:
items:
- const: arm,embedded-trace-extension
@@ -55,13 +55,13 @@ examples:
# An ETE node without legacy CoreSight connections
- |
- ete0 {
+ ete-0 {
compatible = "arm,embedded-trace-extension";
cpu = <&cpu_0>;
};
# An ETE node with legacy CoreSight connections
- |
- ete1 {
+ ete-1 {
compatible = "arm,embedded-trace-extension";
cpu = <&cpu_1>;
diff --git a/Documentation/devicetree/bindings/arm/aspeed/aspeed.yaml b/Documentation/devicetree/bindings/arm/aspeed/aspeed.yaml
index 2f92b8ab08fa..01333ac111fb 100644
--- a/Documentation/devicetree/bindings/arm/aspeed/aspeed.yaml
+++ b/Documentation/devicetree/bindings/arm/aspeed/aspeed.yaml
@@ -74,6 +74,7 @@ properties:
- description: AST2600 based boards
items:
- enum:
+ - ampere,mtjefferson-bmc
- ampere,mtmitchell-bmc
- aspeed,ast2600-evb
- aspeed,ast2600-evb-a1
@@ -91,6 +92,7 @@ properties:
- ibm,everest-bmc
- ibm,fuji-bmc
- ibm,rainier-bmc
+ - ibm,sbp1-bmc
- ibm,system1-bmc
- ibm,tacoma-bmc
- inventec,starscream-bmc
diff --git a/Documentation/devicetree/bindings/arm/atmel-at91.yaml b/Documentation/devicetree/bindings/arm/atmel-at91.yaml
index 7160ec80ac1b..0ec29366e6c2 100644
--- a/Documentation/devicetree/bindings/arm/atmel-at91.yaml
+++ b/Documentation/devicetree/bindings/arm/atmel-at91.yaml
@@ -180,6 +180,13 @@ properties:
- const: atmel,sama5d4
- const: atmel,sama5
+ - description: Microchip SAMA7D65 Curiosity Board
+ items:
+ - const: microchip,sama7d65-curiosity
+ - const: microchip,sama7d65
+ - const: microchip,sama7d6
+ - const: microchip,sama7
+
- items:
- const: microchip,sama7g5ek # SAMA7G5 Evaluation Kit
- const: microchip,sama7g5
diff --git a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
index 76e2b7978250..1a173e92bb13 100644
--- a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
@@ -13,6 +13,7 @@ PIT Timer required properties:
PIT64B Timer required properties:
- compatible: Should be "microchip,sam9x60-pit64b" or
"microchip,sam9x7-pit64b", "microchip,sam9x60-pit64b"
+ "microchip,sama7d65-pit64b", "microchip,sam9x60-pit64b"
- reg: Should contain registers location and length
- interrupts: Should contain interrupt for PIT64B timer
- clocks: Should contain the available clock sources for PIT64B timer.
@@ -27,12 +28,13 @@ Its subnodes can be:
- watchdog: compatible should be "atmel,at91rm9200-wdt"
RAMC SDRAM/DDR Controller required properties:
-- compatible: Should be "atmel,at91rm9200-sdramc", "syscon"
- "atmel,at91sam9260-sdramc",
- "atmel,at91sam9g45-ddramc",
- "atmel,sama5d3-ddramc",
- "microchip,sam9x60-ddramc",
- "microchip,sama7g5-uddrc",
+- compatible: Should be "atmel,at91rm9200-sdramc", "syscon" or
+ "atmel,at91sam9260-sdramc" or
+ "atmel,at91sam9g45-ddramc" or
+ "atmel,sama5d3-ddramc" or
+ "microchip,sam9x60-ddramc" or
+ "microchip,sama7g5-uddrc" or
+ "microchip,sama7d65-uddrc", "microchip,sama7g5-uddrc" or
"microchip,sam9x7-ddramc", "atmel,sama5d3-ddramc".
- reg: Should contain registers location and length
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcmbca.yaml b/Documentation/devicetree/bindings/arm/bcm/brcm,bcmbca.yaml
index 07892cbdd23c..354bb1420cdd 100644
--- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcmbca.yaml
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcmbca.yaml
@@ -34,6 +34,7 @@ properties:
- enum:
- netgear,r8000p
- tplink,archer-c2300-v1
+ - zyxel,ex3510b
- const: brcm,bcm4906
- const: brcm,bcm4908
- const: brcm,bcmbca
@@ -115,6 +116,7 @@ properties:
items:
- enum:
- brcm,bcm96846
+ - genexis,xg6846b
- const: brcm,bcm6846
- const: brcm,bcmbca
diff --git a/Documentation/devicetree/bindings/arm/blaize.yaml b/Documentation/devicetree/bindings/arm/blaize.yaml
new file mode 100644
index 000000000000..af39e2756407
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/blaize.yaml
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/blaize.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Blaize Platforms
+
+maintainers:
+ - James Cowgill <james.cowgill@blaize.com>
+ - Matt Redfearn <matt.redfearn@blaize.com>
+ - Neil Jones <neil.jones@blaize.com>
+ - Nikolaos Pasaloukos <nikolaos.pasaloukos@blaize.com>
+
+description: |
+ Blaize Platforms using SoCs designed by Blaize Inc.
+
+ The products based on the BLZP1600 SoC:
+
+ - BLZP1600-SoM: SoM (System on Module)
+ - BLZP1600-CB2: Development board CB2 based on BLZP1600-SoM
+
+ BLZP1600 SoC integrates a dual core ARM Cortex A53 cluster
+ and a Blaize Graph Streaming Processor for AI and ML workloads,
+ plus a suite of connectivity and other peripherals.
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ oneOf:
+ - description: Blaize BLZP1600 based boards
+ items:
+ - enum:
+ - blaize,blzp1600-cb2
+ - const: blaize,blzp1600
+
+additionalProperties: true
+
+...
diff --git a/Documentation/devicetree/bindings/arm/fsl.yaml b/Documentation/devicetree/bindings/arm/fsl.yaml
index 6e0dcf4307f1..0db2cbd7891f 100644
--- a/Documentation/devicetree/bindings/arm/fsl.yaml
+++ b/Documentation/devicetree/bindings/arm/fsl.yaml
@@ -1091,6 +1091,7 @@ properties:
- dmo,imx8mp-data-modul-edm-sbc # i.MX8MP eDM SBC
- emcraft,imx8mp-navqp # i.MX8MP Emcraft Systems NavQ+ Kit
- fsl,imx8mp-evk # i.MX8MP EVK Board
+ - fsl,imx8mp-evk-revb4 # i.MX8MP EVK Rev B4 Board
- gateworks,imx8mp-gw71xx-2x # i.MX8MP Gateworks Board
- gateworks,imx8mp-gw72xx-2x # i.MX8MP Gateworks Board
- gateworks,imx8mp-gw73xx-2x # i.MX8MP Gateworks Board
@@ -1106,6 +1107,15 @@ properties:
- ysoft,imx8mp-iota2-lumpy # Y Soft i.MX8MP IOTA2 Lumpy Board
- const: fsl,imx8mp
+ - description: ABB Boards with i.MX8M Plus Modules from ADLink
+ items:
+ - enum:
+ - abb,imx8mp-aristanetos3-adpismarc # i.MX8MP ABB SoM on PI SMARC Board
+ - abb,imx8mp-aristanetos3-helios # i.MX8MP ABB SoM on helios Board
+ - abb,imx8mp-aristanetos3-proton2s # i.MX8MP ABB SoM on proton2s Board
+ - const: abb,imx8mp-aristanetos3-som # i.MX8MP ABB SoM
+ - const: fsl,imx8mp
+
- description: Avnet (MSC Branded) Boards with SM2S i.MX8M Plus Modules
items:
- const: avnet,sm2s-imx8mp-14N0600E-ep1 # SM2S-IMX8PLUS-14N0600E on SM2-MB-EP1 Carrier Board
@@ -1262,6 +1272,7 @@ properties:
items:
- enum:
- fsl,imx8qm-mek # i.MX8QM MEK Board
+ - fsl,imx8qm-mek-revd # i.MX8QM MEK Rev D Board
- toradex,apalis-imx8 # Apalis iMX8 Modules
- toradex,apalis-imx8-v1.1 # Apalis iMX8 V1.1 Modules
- const: fsl,imx8qm
@@ -1290,6 +1301,7 @@ properties:
- enum:
- einfochips,imx8qxp-ai_ml # i.MX8QXP AI_ML Board
- fsl,imx8qxp-mek # i.MX8QXP MEK Board
+ - fsl,imx8qxp-mek-wcpu # i.MX8QXP MEK WCPU Board
- const: fsl,imx8qxp
- description: i.MX8DXL based Boards
diff --git a/Documentation/devicetree/bindings/arm/mediatek.yaml b/Documentation/devicetree/bindings/arm/mediatek.yaml
index 1d4bb50fcd8d..3ce34d68c213 100644
--- a/Documentation/devicetree/bindings/arm/mediatek.yaml
+++ b/Documentation/devicetree/bindings/arm/mediatek.yaml
@@ -239,6 +239,34 @@ properties:
- enum:
- mediatek,mt8183-pumpkin
- const: mediatek,mt8183
+ - description: Google Chinchou (Asus Chromebook CZ1104CM2A/CZ1204CM2A)
+ items:
+ - const: google,chinchou-sku0
+ - const: google,chinchou-sku2
+ - const: google,chinchou-sku4
+ - const: google,chinchou-sku5
+ - const: google,chinchou
+ - const: mediatek,mt8186
+ - description: Google Chinchou (Asus Chromebook CZ1104FM2A/CZ1204FM2A/CZ1104CM2A/CZ1204CM2A)
+ items:
+ - const: google,chinchou-sku1
+ - const: google,chinchou-sku3
+ - const: google,chinchou-sku6
+ - const: google,chinchou-sku7
+ - const: google,chinchou-sku17
+ - const: google,chinchou-sku20
+ - const: google,chinchou-sku22
+ - const: google,chinchou-sku23
+ - const: google,chinchou
+ - const: mediatek,mt8186
+ - description: Google Chinchou360 (Asus Chromebook CZ1104FM2A/CZ1204FM2A Flip)
+ items:
+ - const: google,chinchou-sku16
+ - const: google,chinchou-sku18
+ - const: google,chinchou-sku19
+ - const: google,chinchou-sku21
+ - const: google,chinchou
+ - const: mediatek,mt8186
- description: Google Magneton (Lenovo IdeaPad Slim 3 Chromebook (14M868))
items:
- const: google,steelix-sku393219
@@ -263,6 +291,19 @@ properties:
- const: google,steelix-sku196608
- const: google,steelix
- const: mediatek,mt8186
+ - description: Google Starmie (ASUS Chromebook Enterprise CM30 (CM3001))
+ items:
+ - const: google,starmie-sku0
+ - const: google,starmie-sku2
+ - const: google,starmie-sku3
+ - const: google,starmie
+ - const: mediatek,mt8186
+ - description: Google Starmie (ASUS Chromebook Enterprise CM30 (CM3001))
+ items:
+ - const: google,starmie-sku1
+ - const: google,starmie-sku4
+ - const: google,starmie
+ - const: mediatek,mt8186
- description: Google Steelix (Lenovo 300e Yoga Chromebook Gen 4)
items:
- enum:
@@ -307,6 +348,19 @@ properties:
- enum:
- mediatek,mt8186-evb
- const: mediatek,mt8186
+ - description: Google Ciri (Lenovo Chromebook Duet (11", 9))
+ items:
+ - enum:
+ - google,ciri-sku0
+ - google,ciri-sku1
+ - google,ciri-sku2
+ - google,ciri-sku3
+ - google,ciri-sku4
+ - google,ciri-sku5
+ - google,ciri-sku6
+ - google,ciri-sku7
+ - const: google,ciri
+ - const: mediatek,mt8188
- items:
- enum:
- mediatek,mt8188-evb
@@ -316,12 +370,6 @@ properties:
- const: google,hayato-rev1
- const: google,hayato
- const: mediatek,mt8192
- - description: Google Hayato rev5
- items:
- - const: google,hayato-rev5-sku2
- - const: google,hayato-sku2
- - const: google,hayato
- - const: mediatek,mt8192
- description: Google Spherion (Acer Chromebook 514)
items:
- const: google,spherion-rev3
@@ -330,11 +378,6 @@ properties:
- const: google,spherion-rev0
- const: google,spherion
- const: mediatek,mt8192
- - description: Google Spherion rev4 (Acer Chromebook 514)
- items:
- - const: google,spherion-rev4
- - const: google,spherion
- - const: mediatek,mt8192
- items:
- enum:
- mediatek,mt8192-evb
diff --git a/Documentation/devicetree/bindings/arm/qcom-soc.yaml b/Documentation/devicetree/bindings/arm/qcom-soc.yaml
index d0751a572af3..a77d68dcad4e 100644
--- a/Documentation/devicetree/bindings/arm/qcom-soc.yaml
+++ b/Documentation/devicetree/bindings/arm/qcom-soc.yaml
@@ -23,7 +23,7 @@ description: |
select:
properties:
compatible:
- pattern: "^qcom,.*(apq|ipq|mdm|msm|qcm|qcs|q[dr]u|sa|sc|sd[amx]|sm|x1e)[0-9]+.*$"
+ pattern: "^qcom,.*(apq|ipq|mdm|msm|qcm|qcs|q[dr]u|sa|sar|sc|sd[amx]|sm|x1[ep])[0-9]+.*$"
required:
- compatible
@@ -31,7 +31,8 @@ properties:
compatible:
oneOf:
# Preferred naming style for compatibles of SoC components:
- - pattern: "^qcom,(apq|ipq|mdm|msm|qcm|qcs|q[dr]u|sa|sc|sd[amx]|sm|x1e)[0-9]+(pro)?-.*$"
+ - pattern: "^qcom,(apq|ipq|mdm|msm|qcm|qcs|q[dr]u|sa|sc|sd[amx]|sm|x1[ep])[0-9]+(pro)?-.*$"
+ - pattern: "^qcom,sar[0-9]+[a-z]?-.*$"
- pattern: "^qcom,(sa|sc)8[0-9]+[a-z][a-z]?-.*$"
# Legacy namings - variations of existing patterns/compatibles are OK,
@@ -39,9 +40,9 @@ properties:
- pattern: "^qcom,[ak]pss-wdt-(apq|ipq|mdm|msm|qcm|qcs|q[dr]u|sa|sc|sd[amx]|sm)[0-9]+.*$"
- pattern: "^qcom,gcc-(apq|ipq|mdm|msm|qcm|qcs|q[dr]u|sa|sc|sd[amx]|sm)[0-9]+.*$"
- pattern: "^qcom,mmcc-(apq|ipq|mdm|msm|qcm|qcs|q[dr]u|sa|sc|sd[amx]|sm)[0-9]+.*$"
- - pattern: "^qcom,pcie-(apq|ipq|mdm|msm|qcm|qcs|q[dr]u|sa|sc|sd[amx]|sm|x1e)[0-9]+.*$"
+ - pattern: "^qcom,pcie-(apq|ipq|mdm|msm|qcm|qcs|q[dr]u|sa|sc|sd[amx]|sm|x1[ep])[0-9]+.*$"
- pattern: "^qcom,rpm-(apq|ipq|mdm|msm|qcm|qcs|q[dr]u|sa|sc|sd[amx]|sm)[0-9]+.*$"
- - pattern: "^qcom,scm-(apq|ipq|mdm|msm|qcm|qcs|q[dr]u|sa|sc|sd[amx]|sm|x1e)[0-9]+.*$"
+ - pattern: "^qcom,scm-(apq|ipq|mdm|msm|qcm|qcs|q[dr]u|sa|sc|sd[amx]|sm|x1[ep])[0-9]+.*$"
- enum:
- qcom,dsi-ctrl-6g-qcm2290
- qcom,gpucc-sdm630
diff --git a/Documentation/devicetree/bindings/arm/qcom.yaml b/Documentation/devicetree/bindings/arm/qcom.yaml
index 9679fed7259b..618a87693ac1 100644
--- a/Documentation/devicetree/bindings/arm/qcom.yaml
+++ b/Documentation/devicetree/bindings/arm/qcom.yaml
@@ -19,29 +19,42 @@ description: |
apq8016
apq8026
+ apq8064
apq8074
apq8084
+ apq8094
apq8096
ipq4018
+ ipq4019
ipq5018
ipq5332
+ ipq5424
ipq6018
+ ipq8064
ipq8074
ipq9574
mdm9615
msm8226
+ msm8660
msm8916
+ msm8917
+ msm8926
+ msm8929
msm8939
msm8953
msm8956
msm8960
msm8974
+ msm8974pro
msm8976
msm8992
msm8994
msm8996
+ msm8996pro
msm8998
qcs404
+ qcs615
+ qcs8300
qcs8550
qcm2290
qcm6490
@@ -53,6 +66,7 @@ description: |
sa8155p
sa8540p
sa8775p
+ sar2130p
sc7180
sc7280
sc8180x
@@ -84,7 +98,10 @@ description: |
sm8450
sm8550
sm8650
+ sm8750
+ x1e78100
x1e80100
+ x1p42100
There are many devices in the list below that run the standard ChromeOS
bootloader setup and use the open source depthcharge bootloader to boot the
@@ -252,6 +269,11 @@ properties:
- items:
- enum:
+ - xiaomi,riva
+ - const: qcom,msm8917
+
+ - items:
+ - enum:
- motorola,potter
- xiaomi,daisy
- xiaomi,mido
@@ -354,6 +376,11 @@ properties:
- items:
- enum:
+ - qcom,ipq5424-rdp466
+ - const: qcom,ipq5424
+
+ - items:
+ - enum:
- mikrotik,rb3011
- qcom,ipq8064-ap148
- const: qcom,ipq8064
@@ -408,6 +435,12 @@ properties:
- qcom,qru1000-idp
- const: qcom,qru1000
+ - description: Qualcomm AR2 Gen1 platform
+ items:
+ - enum:
+ - qcom,qar2130p
+ - const: qcom,sar2130p
+
- items:
- enum:
- acer,aspire1
@@ -822,8 +855,10 @@ properties:
- items:
- enum:
+ - huawei,gaokun3
- lenovo,thinkpad-x13s
- microsoft,arcata
+ - microsoft,blackrock
- qcom,sc8280xp-crd
- qcom,sc8280xp-qrd
- const: qcom,sc8280xp
@@ -900,6 +935,16 @@ properties:
- items:
- enum:
+ - qcom,qcs8300-ride
+ - const: qcom,qcs8300
+
+ - items:
+ - enum:
+ - qcom,qcs615-ride
+ - const: qcom,qcs615
+
+ - items:
+ - enum:
- qcom,sa8155p-adp
- const: qcom,sa8155p
@@ -1066,6 +1111,18 @@ properties:
- items:
- enum:
+ - qcom,sm8750-mtp
+ - qcom,sm8750-qrd
+ - const: qcom,sm8750
+
+ - items:
+ - enum:
+ - qcom,x1e001de-devkit
+ - const: qcom,x1e001de
+ - const: qcom,x1e80100
+
+ - items:
+ - enum:
- lenovo,thinkpad-t14s
- const: qcom,x1e78100
- const: qcom,x1e80100
@@ -1074,6 +1131,7 @@ properties:
- enum:
- asus,vivobook-s15
- dell,xps13-9345
+ - hp,omnibook-x14
- lenovo,yoga-slim7x
- microsoft,romulus13
- microsoft,romulus15
@@ -1081,6 +1139,11 @@ properties:
- qcom,x1e80100-qcp
- const: qcom,x1e80100
+ - items:
+ - enum:
+ - qcom,x1p42100-crd
+ - const: qcom,x1p42100
+
# Board compatibles go above
qcom,msm-id:
@@ -1158,6 +1221,7 @@ allOf:
- qcom,apq8026
- qcom,apq8094
- qcom,apq8096
+ - qcom,msm8917
- qcom,msm8939
- qcom,msm8953
- qcom,msm8956
diff --git a/Documentation/devicetree/bindings/arm/rockchip.yaml b/Documentation/devicetree/bindings/arm/rockchip.yaml
index 753199a12923..522a6f0450ea 100644
--- a/Documentation/devicetree/bindings/arm/rockchip.yaml
+++ b/Documentation/devicetree/bindings/arm/rockchip.yaml
@@ -81,6 +81,17 @@ properties:
- const: azw,beelink-a1
- const: rockchip,rk3328
+ - description: BigTreeTech CB2 Manta M4/8P
+ items:
+ - const: bigtreetech,cb2-manta
+ - const: bigtreetech,cb2
+ - const: rockchip,rk3566
+
+ - description: BigTreeTech Pi 2
+ items:
+ - const: bigtreetech,pi2
+ - const: rockchip,rk3566
+
- description: bq Curie 2 tablet
items:
- const: mundoreader,bq-curie2
@@ -167,6 +178,13 @@ properties:
- const: engicam,px30-core
- const: rockchip,px30
+ - description: Firefly Core-3588J-based boards
+ items:
+ - enum:
+ - firefly,itx-3588j
+ - const: firefly,core-3588j
+ - const: rockchip,rk3588
+
- description: Firefly Core-PX30-JD4 on MB-JD4-PX30 baseboard
items:
- const: firefly,px30-jd4-core-mb
@@ -597,6 +615,11 @@ properties:
- const: google,veyron
- const: rockchip,rk3288
+ - description: H96 Max V58 TV Box
+ items:
+ - const: haochuangyi,h96-max-v58
+ - const: rockchip,rk3588
+
- description: Haoyu MarsBoard RK3066
items:
- const: haoyu,marsboard-rk3066
@@ -812,6 +835,12 @@ properties:
- const: radxa,e20c
- const: rockchip,rk3528
+ - description: Radxa E52C
+ items:
+ - const: radxa,e52c
+ - const: rockchip,rk3582
+ - const: rockchip,rk3588s
+
- description: Radxa Rock
items:
- const: radxa,rock
@@ -1006,6 +1035,21 @@ properties:
- const: rockchip,rk3399-sapphire-excavator
- const: rockchip,rk3399
+ - description: Rockchip RK3566 BOX Evaluation Demo board
+ items:
+ - const: rockchip,rk3566-box-demo
+ - const: rockchip,rk3566
+
+ - description: Rockchip RK3568 Evaluation board
+ items:
+ - const: rockchip,rk3568-evb1-v10
+ - const: rockchip,rk3568
+
+ - description: Rockchip RK3576 Evaluation board
+ items:
+ - const: rockchip,rk3576-evb1-v10
+ - const: rockchip,rk3576
+
- description: Rockchip RK3588 Evaluation board
items:
- const: rockchip,rk3588-evb1-v10
@@ -1026,6 +1070,23 @@ properties:
- const: rockchip,rk3588-toybrick-x0
- const: rockchip,rk3588
+ - description: Sinovoip RK3308 Banana Pi P2 Pro
+ items:
+ - const: sinovoip,rk3308-bpi-p2pro
+ - const: rockchip,rk3308
+
+ - description: Sinovoip RK3568 Banana Pi R2 Pro
+ items:
+ - const: sinovoip,rk3568-bpi-r2pro
+ - const: rockchip,rk3568
+
+ - description: Sonoff iHost Smart Home Hub
+ items:
+ - const: itead,sonoff-ihost
+ - enum:
+ - rockchip,rv1126
+ - rockchip,rv1109
+
- description: Theobroma Systems PX30-uQ7 with Haikou baseboard
items:
- const: tsd,px30-ringneck-haikou
@@ -1075,9 +1136,11 @@ properties:
- const: xunlong,orangepi-3b
- const: rockchip,rk3566
- - description: Xunlong Orange Pi 5 Plus
+ - description: Xunlong Orange Pi 5 Max/Plus
items:
- - const: xunlong,orangepi-5-plus
+ - enum:
+ - xunlong,orangepi-5-max
+ - xunlong,orangepi-5-plus
- const: rockchip,rk3588
- description: Xunlong Orange Pi R1 Plus / LTS
@@ -1099,33 +1162,6 @@ properties:
- const: zkmagic,a95x-z2
- const: rockchip,rk3318
- - description: Rockchip RK3566 BOX Evaluation Demo board
- items:
- - const: rockchip,rk3566-box-demo
- - const: rockchip,rk3566
-
- - description: Rockchip RK3568 Evaluation board
- items:
- - const: rockchip,rk3568-evb1-v10
- - const: rockchip,rk3568
-
- - description: Sinovoip RK3308 Banana Pi P2 Pro
- items:
- - const: sinovoip,rk3308-bpi-p2pro
- - const: rockchip,rk3308
-
- - description: Sinovoip RK3568 Banana Pi R2 Pro
- items:
- - const: sinovoip,rk3568-bpi-r2pro
- - const: rockchip,rk3568
-
- - description: Sonoff iHost Smart Home Hub
- items:
- - const: itead,sonoff-ihost
- - enum:
- - rockchip,rv1126
- - rockchip,rv1109
-
additionalProperties: true
...
diff --git a/Documentation/devicetree/bindings/arm/samsung/samsung-boards.yaml b/Documentation/devicetree/bindings/arm/samsung/samsung-boards.yaml
index b5ba5ffc36d6..fab29f95d8e6 100644
--- a/Documentation/devicetree/bindings/arm/samsung/samsung-boards.yaml
+++ b/Documentation/devicetree/bindings/arm/samsung/samsung-boards.yaml
@@ -240,6 +240,9 @@ properties:
items:
- enum:
- samsung,c1s # Samsung Galaxy Note20 5G (SM-N981B)
+ - samsung,r8s # Samsung Galaxy S20 FE (SM-G780F)
+ - samsung,x1s # Samsung Galaxy S20 5G (SM-G981B)
+ - samsung,x1slte # Samsung Galaxy S20 (SM-G980F)
- const: samsung,exynos990
- description: Exynos Auto v9 based boards
diff --git a/Documentation/devicetree/bindings/arm/stm32/stm32.yaml b/Documentation/devicetree/bindings/arm/stm32/stm32.yaml
index 703d4b574398..b6c56d4ce6b9 100644
--- a/Documentation/devicetree/bindings/arm/stm32/stm32.yaml
+++ b/Documentation/devicetree/bindings/arm/stm32/stm32.yaml
@@ -91,6 +91,13 @@ properties:
- const: dh,stm32mp153c-dhcor-som
- const: st,stm32mp153
+ - description: Octavo OSD32MP153 System-in-Package based boards
+ items:
+ - enum:
+ - lxa,stm32mp153c-tac-gen3 # Linux Automation TAC (Generation 3)
+ - const: oct,stm32mp153x-osd32
+ - const: st,stm32mp153
+
- items:
- enum:
- shiratech,stm32mp157a-iot-box # IoT Box
diff --git a/Documentation/devicetree/bindings/cache/qcom,llcc.yaml b/Documentation/devicetree/bindings/cache/qcom,llcc.yaml
index 03b1941eaa33..e5effbb4a606 100644
--- a/Documentation/devicetree/bindings/cache/qcom,llcc.yaml
+++ b/Documentation/devicetree/bindings/cache/qcom,llcc.yaml
@@ -20,6 +20,7 @@ description: |
properties:
compatible:
enum:
+ - qcom,ipq5424-llcc
- qcom,qcs615-llcc
- qcom,qcs8300-llcc
- qcom,qdu1000-llcc
@@ -42,11 +43,11 @@ properties:
- qcom,x1e80100-llcc
reg:
- minItems: 2
+ minItems: 1
maxItems: 10
reg-names:
- minItems: 2
+ minItems: 1
maxItems: 10
interrupts:
@@ -71,6 +72,21 @@ allOf:
compatible:
contains:
enum:
+ - qcom,ipq5424-llcc
+ then:
+ properties:
+ reg:
+ items:
+ - description: LLCC0 base register region
+ reg-names:
+ items:
+ - const: llcc0_base
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- qcom,sar1130p-llcc
- qcom,sar2130p-llcc
then:
diff --git a/Documentation/devicetree/bindings/connector/usb-connector.yaml b/Documentation/devicetree/bindings/connector/usb-connector.yaml
index 67700440e23b..11e40d225b9f 100644
--- a/Documentation/devicetree/bindings/connector/usb-connector.yaml
+++ b/Documentation/devicetree/bindings/connector/usb-connector.yaml
@@ -293,6 +293,13 @@ properties:
PD negotiation till BC1.2 detection completes.
default: 0
+ pd-revision:
+ description: Specifies the maximum USB PD revision and version supported by
+ the connector. This property is specified in the following order;
+ <revision_major, revision_minor, version_major, version_minor>.
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ maxItems: 4
+
dependencies:
sink-vdos-v1: [ sink-vdos ]
sink-vdos: [ sink-vdos-v1 ]
diff --git a/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml b/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml
index 0304f074cf08..08fe6a707a37 100644
--- a/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml
+++ b/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml
@@ -13,12 +13,14 @@ properties:
compatible:
items:
- enum:
+ - qcom,qcs8300-inline-crypto-engine
- qcom,sa8775p-inline-crypto-engine
- qcom,sc7180-inline-crypto-engine
- qcom,sc7280-inline-crypto-engine
- qcom,sm8450-inline-crypto-engine
- qcom,sm8550-inline-crypto-engine
- qcom,sm8650-inline-crypto-engine
+ - qcom,sm8750-inline-crypto-engine
- const: qcom,inline-crypto-engine
reg:
diff --git a/Documentation/devicetree/bindings/crypto/qcom,prng.yaml b/Documentation/devicetree/bindings/crypto/qcom,prng.yaml
index 048b769a73c0..5e6f8b642545 100644
--- a/Documentation/devicetree/bindings/crypto/qcom,prng.yaml
+++ b/Documentation/devicetree/bindings/crypto/qcom,prng.yaml
@@ -17,12 +17,17 @@ properties:
- qcom,prng-ee # 8996 and later using EE
- items:
- enum:
+ - qcom,ipq5332-trng
+ - qcom,ipq5424-trng
+ - qcom,ipq9574-trng
+ - qcom,qcs8300-trng
- qcom,sa8255p-trng
- qcom,sa8775p-trng
- qcom,sc7280-trng
- qcom,sm8450-trng
- qcom,sm8550-trng
- qcom,sm8650-trng
+ - qcom,sm8750-trng
- const: qcom,trng
reg:
diff --git a/Documentation/devicetree/bindings/crypto/qcom-qce.yaml b/Documentation/devicetree/bindings/crypto/qcom-qce.yaml
index 62310add2e44..3ed56d9d378e 100644
--- a/Documentation/devicetree/bindings/crypto/qcom-qce.yaml
+++ b/Documentation/devicetree/bindings/crypto/qcom-qce.yaml
@@ -45,6 +45,7 @@ properties:
- items:
- enum:
+ - qcom,qcs8300-qce
- qcom,sa8775p-qce
- qcom,sc7280-qce
- qcom,sm6350-qce
@@ -53,6 +54,7 @@ properties:
- qcom,sm8450-qce
- qcom,sm8550-qce
- qcom,sm8650-qce
+ - qcom,sm8750-qce
- const: qcom,sm8150-qce
- const: qcom,qce
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
index 84d68b8cfccc..416fe263ac92 100644
--- a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
@@ -148,10 +148,10 @@ examples:
/* TMDS Output */
hdmi_tx_tmds_port: port@1 {
- reg = <1>;
+ reg = <1>;
- hdmi_tx_tmds_out: endpoint {
- remote-endpoint = <&hdmi_connector_in>;
- };
+ hdmi_tx_tmds_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
};
};
diff --git a/Documentation/devicetree/bindings/display/bridge/fsl,imx8mp-hdmi-tx.yaml b/Documentation/devicetree/bindings/display/bridge/fsl,imx8mp-hdmi-tx.yaml
index 3791c9f4ebab..05442d437755 100644
--- a/Documentation/devicetree/bindings/display/bridge/fsl,imx8mp-hdmi-tx.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/fsl,imx8mp-hdmi-tx.yaml
@@ -82,21 +82,21 @@ examples:
power-domains = <&hdmi_blk_ctrl IMX8MP_HDMIBLK_PD_HDMI_TX>;
reg-io-width = <1>;
ports {
- #address-cells = <1>;
- #size-cells = <0>;
- port@0 {
- reg = <0>;
-
- hdmi_tx_from_pvi: endpoint {
- remote-endpoint = <&pvi_to_hdmi_tx>;
- };
- };
-
- port@1 {
- reg = <1>;
- hdmi_tx_out: endpoint {
- remote-endpoint = <&hdmi0_con>;
- };
- };
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+
+ endpoint {
+ remote-endpoint = <&pvi_to_hdmi_tx>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ endpoint {
+ remote-endpoint = <&hdmi0_con>;
+ };
+ };
};
};
diff --git a/Documentation/devicetree/bindings/display/bridge/samsung,mipi-dsim.yaml b/Documentation/devicetree/bindings/display/bridge/samsung,mipi-dsim.yaml
index 4ed7a799ba26..1acad99f3965 100644
--- a/Documentation/devicetree/bindings/display/bridge/samsung,mipi-dsim.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/samsung,mipi-dsim.yaml
@@ -27,7 +27,9 @@ properties:
- fsl,imx8mm-mipi-dsim
- fsl,imx8mp-mipi-dsim
- items:
- - const: fsl,imx8mn-mipi-dsim
+ - enum:
+ - fsl,imx7d-mipi-dsim
+ - fsl,imx8mn-mipi-dsim
- const: fsl,imx8mm-mipi-dsim
reg:
@@ -241,40 +243,40 @@ examples:
#include <dt-bindings/interrupt-controller/arm-gic.h>
dsi@13900000 {
- compatible = "samsung,exynos5433-mipi-dsi";
- reg = <0x13900000 0xC0>;
- interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
- phys = <&mipi_phy 1>;
- phy-names = "dsim";
- clocks = <&cmu_disp CLK_PCLK_DSIM0>,
- <&cmu_disp CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8>,
- <&cmu_disp CLK_PHYCLK_MIPIDPHY0_RXCLKESC0>,
- <&cmu_disp CLK_SCLK_RGB_VCLK_TO_DSIM0>,
- <&cmu_disp CLK_SCLK_DSIM0>;
- clock-names = "bus_clk",
- "phyclk_mipidphy0_bitclkdiv8",
- "phyclk_mipidphy0_rxclkesc0",
- "sclk_rgb_vclk_to_dsim0",
- "sclk_mipi";
- power-domains = <&pd_disp>;
- vddcore-supply = <&ldo6_reg>;
- vddio-supply = <&ldo7_reg>;
- samsung,burst-clock-frequency = <512000000>;
- samsung,esc-clock-frequency = <16000000>;
- samsung,pll-clock-frequency = <24000000>;
- pinctrl-names = "default";
- pinctrl-0 = <&te_irq>;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
-
- dsi_to_mic: endpoint {
- remote-endpoint = <&mic_to_dsi>;
- };
- };
- };
+ compatible = "samsung,exynos5433-mipi-dsi";
+ reg = <0x13900000 0xC0>;
+ interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&mipi_phy 1>;
+ phy-names = "dsim";
+ clocks = <&cmu_disp CLK_PCLK_DSIM0>,
+ <&cmu_disp CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8>,
+ <&cmu_disp CLK_PHYCLK_MIPIDPHY0_RXCLKESC0>,
+ <&cmu_disp CLK_SCLK_RGB_VCLK_TO_DSIM0>,
+ <&cmu_disp CLK_SCLK_DSIM0>;
+ clock-names = "bus_clk",
+ "phyclk_mipidphy0_bitclkdiv8",
+ "phyclk_mipidphy0_rxclkesc0",
+ "sclk_rgb_vclk_to_dsim0",
+ "sclk_mipi";
+ power-domains = <&pd_disp>;
+ vddcore-supply = <&ldo6_reg>;
+ vddio-supply = <&ldo7_reg>;
+ samsung,burst-clock-frequency = <512000000>;
+ samsung,esc-clock-frequency = <16000000>;
+ samsung,pll-clock-frequency = <24000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&te_irq>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dsi_to_mic: endpoint {
+ remote-endpoint = <&mic_to_dsi>;
+ };
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,aal.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,aal.yaml
index 47ddba5c41af..5d2089dc596e 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,aal.yaml
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,aal.yaml
@@ -104,30 +104,30 @@ examples:
#size-cells = <2>;
aal@14015000 {
- compatible = "mediatek,mt8173-disp-aal";
- reg = <0 0x14015000 0 0x1000>;
- interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_LOW>;
- power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
- clocks = <&mmsys CLK_MM_DISP_AAL>;
- mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x5000 0x1000>;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
- aal0_in: endpoint {
- remote-endpoint = <&ccorr0_out>;
- };
- };
-
- port@1 {
- reg = <1>;
- aal0_out: endpoint {
- remote-endpoint = <&gamma0_in>;
- };
- };
- };
- };
+ compatible = "mediatek,mt8173-disp-aal";
+ reg = <0 0x14015000 0 0x1000>;
+ interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DISP_AAL>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x5000 0x1000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ endpoint {
+ remote-endpoint = <&ccorr0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ endpoint {
+ remote-endpoint = <&gamma0_in>;
+ };
+ };
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,ovl.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,ovl.yaml
index 9ea796a033b2..4f110635afb6 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,ovl.yaml
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,ovl.yaml
@@ -26,6 +26,7 @@ properties:
- mediatek,mt8173-disp-ovl
- mediatek,mt8183-disp-ovl
- mediatek,mt8192-disp-ovl
+ - mediatek,mt8195-disp-ovl
- mediatek,mt8195-mdp3-ovl
- items:
- enum:
@@ -38,14 +39,15 @@ properties:
- const: mediatek,mt8173-disp-ovl
- items:
- enum:
- - mediatek,mt8188-disp-ovl
- - mediatek,mt8195-disp-ovl
- - const: mediatek,mt8183-disp-ovl
- - items:
- - enum:
- mediatek,mt8186-disp-ovl
- mediatek,mt8365-disp-ovl
- const: mediatek,mt8192-disp-ovl
+ - items:
+ - const: mediatek,mt8188-disp-ovl
+ - const: mediatek,mt8195-disp-ovl
+ - items:
+ - const: mediatek,mt8188-mdp3-ovl
+ - const: mediatek,mt8195-mdp3-ovl
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
index a9636b76854d..ffbd1dc9470e 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
+++ b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
@@ -418,63 +418,63 @@ unevaluatedProperties: false
examples:
- |
- #include <dt-bindings/interrupt-controller/arm-gic.h>
- #include <dt-bindings/clock/qcom,dispcc-sdm845.h>
- #include <dt-bindings/clock/qcom,gcc-sdm845.h>
- #include <dt-bindings/power/qcom-rpmpd.h>
-
- dsi@ae94000 {
- compatible = "qcom,sc7180-dsi-ctrl", "qcom,mdss-dsi-ctrl";
- reg = <0x0ae94000 0x400>;
- reg-names = "dsi_ctrl";
-
- #address-cells = <1>;
- #size-cells = <0>;
-
- interrupt-parent = <&mdss>;
- interrupts = <4>;
-
- clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK>,
- <&dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
- <&dispcc DISP_CC_MDSS_PCLK0_CLK>,
- <&dispcc DISP_CC_MDSS_ESC0_CLK>,
- <&dispcc DISP_CC_MDSS_AHB_CLK>,
- <&dispcc DISP_CC_MDSS_AXI_CLK>;
- clock-names = "byte",
- "byte_intf",
- "pixel",
- "core",
- "iface",
- "bus";
-
- phys = <&dsi0_phy>;
- phy-names = "dsi";
-
- assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>, <&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
- assigned-clock-parents = <&dsi_phy 0>, <&dsi_phy 1>;
-
- power-domains = <&rpmhpd SC7180_CX>;
- operating-points-v2 = <&dsi_opp_table>;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
- dsi0_in: endpoint {
- remote-endpoint = <&dpu_intf1_out>;
- };
- };
-
- port@1 {
- reg = <1>;
- dsi0_out: endpoint {
- remote-endpoint = <&sn65dsi86_in>;
- data-lanes = <0 1 2 3>;
- qcom,te-source = "mdp_vsync_e";
- };
- };
- };
- };
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+ #include <dt-bindings/clock/qcom,gcc-sdm845.h>
+ #include <dt-bindings/power/qcom-rpmpd.h>
+
+ dsi@ae94000 {
+ compatible = "qcom,sc7180-dsi-ctrl", "qcom,mdss-dsi-ctrl";
+ reg = <0x0ae94000 0x400>;
+ reg-names = "dsi_ctrl";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupt-parent = <&mdss>;
+ interrupts = <4>;
+
+ clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK>,
+ <&dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
+ <&dispcc DISP_CC_MDSS_PCLK0_CLK>,
+ <&dispcc DISP_CC_MDSS_ESC0_CLK>,
+ <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&dispcc DISP_CC_MDSS_AXI_CLK>;
+ clock-names = "byte",
+ "byte_intf",
+ "pixel",
+ "core",
+ "iface",
+ "bus";
+
+ phys = <&dsi0_phy>;
+ phy-names = "dsi";
+
+ assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>, <&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+ assigned-clock-parents = <&dsi_phy 0>, <&dsi_phy 1>;
+
+ power-domains = <&rpmhpd SC7180_CX>;
+ operating-points-v2 = <&dsi_opp_table>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ endpoint {
+ remote-endpoint = <&dpu_intf1_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ endpoint {
+ remote-endpoint = <&sn65dsi86_in>;
+ data-lanes = <0 1 2 3>;
+ qcom,te-source = "mdp_vsync_e";
+ };
+ };
+ };
+ };
...
diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-10nm.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-10nm.yaml
index 69d13867b7cf..fc9abf090f0d 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi-phy-10nm.yaml
+++ b/Documentation/devicetree/bindings/display/msm/dsi-phy-10nm.yaml
@@ -74,28 +74,28 @@ unevaluatedProperties: false
examples:
- |
- #include <dt-bindings/clock/qcom,dispcc-sdm845.h>
- #include <dt-bindings/clock/qcom,rpmh.h>
-
- dsi-phy@ae94400 {
- compatible = "qcom,dsi-phy-10nm";
- reg = <0x0ae94400 0x200>,
- <0x0ae94600 0x280>,
- <0x0ae94a00 0x1e0>;
- reg-names = "dsi_phy",
- "dsi_phy_lane",
- "dsi_pll";
-
- #clock-cells = <1>;
- #phy-cells = <0>;
-
- vdds-supply = <&vdda_mipi_dsi0_pll>;
- clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
- <&rpmhcc RPMH_CXO_CLK>;
- clock-names = "iface", "ref";
-
- qcom,phy-rescode-offset-top = /bits/ 8 <0 0 0 0 0>;
- qcom,phy-rescode-offset-bot = /bits/ 8 <0 0 0 0 0>;
- qcom,phy-drive-ldo-level = <400>;
- };
+ #include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+ #include <dt-bindings/clock/qcom,rpmh.h>
+
+ dsi-phy@ae94400 {
+ compatible = "qcom,dsi-phy-10nm";
+ reg = <0x0ae94400 0x200>,
+ <0x0ae94600 0x280>,
+ <0x0ae94a00 0x1e0>;
+ reg-names = "dsi_phy",
+ "dsi_phy_lane",
+ "dsi_pll";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ vdds-supply = <&vdda_mipi_dsi0_pll>;
+ clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface", "ref";
+
+ qcom,phy-rescode-offset-top = /bits/ 8 <0 0 0 0 0>;
+ qcom,phy-rescode-offset-bot = /bits/ 8 <0 0 0 0 0>;
+ qcom,phy-drive-ldo-level = <400>;
+ };
...
diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml
index 29bbc2f1c766..206a9a4b3845 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml
+++ b/Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml
@@ -56,24 +56,24 @@ unevaluatedProperties: false
examples:
- |
- #include <dt-bindings/clock/qcom,dispcc-sdm845.h>
- #include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+ #include <dt-bindings/clock/qcom,rpmh.h>
- dsi-phy@ae94400 {
- compatible = "qcom,dsi-phy-14nm";
- reg = <0x0ae94400 0x200>,
- <0x0ae94600 0x280>,
- <0x0ae94a00 0x1e0>;
- reg-names = "dsi_phy",
- "dsi_phy_lane",
- "dsi_pll";
+ dsi-phy@ae94400 {
+ compatible = "qcom,dsi-phy-14nm";
+ reg = <0x0ae94400 0x200>,
+ <0x0ae94600 0x280>,
+ <0x0ae94a00 0x1e0>;
+ reg-names = "dsi_phy",
+ "dsi_phy_lane",
+ "dsi_pll";
- #clock-cells = <1>;
- #phy-cells = <0>;
+ #clock-cells = <1>;
+ #phy-cells = <0>;
- vcca-supply = <&vcca_reg>;
- clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
- <&rpmhcc RPMH_CXO_CLK>;
- clock-names = "iface", "ref";
- };
+ vcca-supply = <&vcca_reg>;
+ clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface", "ref";
+ };
...
diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-20nm.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-20nm.yaml
index 7e6687cb002b..93570052992a 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi-phy-20nm.yaml
+++ b/Documentation/devicetree/bindings/display/msm/dsi-phy-20nm.yaml
@@ -45,26 +45,26 @@ unevaluatedProperties: false
examples:
- |
- #include <dt-bindings/clock/qcom,dispcc-sdm845.h>
- #include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+ #include <dt-bindings/clock/qcom,rpmh.h>
- dsi-phy@fd922a00 {
- compatible = "qcom,dsi-phy-20nm";
- reg = <0xfd922a00 0xd4>,
- <0xfd922b00 0x2b0>,
- <0xfd922d80 0x7b>;
- reg-names = "dsi_pll",
- "dsi_phy",
- "dsi_phy_regulator";
+ dsi-phy@fd922a00 {
+ compatible = "qcom,dsi-phy-20nm";
+ reg = <0xfd922a00 0xd4>,
+ <0xfd922b00 0x2b0>,
+ <0xfd922d80 0x7b>;
+ reg-names = "dsi_pll",
+ "dsi_phy",
+ "dsi_phy_regulator";
- #clock-cells = <1>;
- #phy-cells = <0>;
+ #clock-cells = <1>;
+ #phy-cells = <0>;
- vcca-supply = <&vcca_reg>;
- vddio-supply = <&vddio_reg>;
+ vcca-supply = <&vcca_reg>;
+ vddio-supply = <&vddio_reg>;
- clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
- <&rpmhcc RPMH_CXO_CLK>;
- clock-names = "iface", "ref";
- };
+ clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface", "ref";
+ };
...
diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml
index a55c2445d189..371befa9f9d2 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml
+++ b/Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml
@@ -51,25 +51,25 @@ unevaluatedProperties: false
examples:
- |
- #include <dt-bindings/clock/qcom,dispcc-sdm845.h>
- #include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+ #include <dt-bindings/clock/qcom,rpmh.h>
- dsi-phy@fd922a00 {
- compatible = "qcom,dsi-phy-28nm-lp";
- reg = <0xfd922a00 0xd4>,
- <0xfd922b00 0x2b0>,
- <0xfd922d80 0x7b>;
- reg-names = "dsi_pll",
- "dsi_phy",
- "dsi_phy_regulator";
+ dsi-phy@fd922a00 {
+ compatible = "qcom,dsi-phy-28nm-lp";
+ reg = <0xfd922a00 0xd4>,
+ <0xfd922b00 0x2b0>,
+ <0xfd922d80 0x7b>;
+ reg-names = "dsi_pll",
+ "dsi_phy",
+ "dsi_phy_regulator";
- #clock-cells = <1>;
- #phy-cells = <0>;
+ #clock-cells = <1>;
+ #phy-cells = <0>;
- vddio-supply = <&vddio_reg>;
+ vddio-supply = <&vddio_reg>;
- clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
- <&rpmhcc RPMH_CXO_CLK>;
- clock-names = "iface", "ref";
- };
+ clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface", "ref";
+ };
...
diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml
index 7e764eac3ef3..321470435e65 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml
+++ b/Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml
@@ -54,23 +54,23 @@ unevaluatedProperties: false
examples:
- |
- #include <dt-bindings/clock/qcom,dispcc-sm8250.h>
- #include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/clock/qcom,dispcc-sm8250.h>
+ #include <dt-bindings/clock/qcom,rpmh.h>
- dsi-phy@ae94400 {
- compatible = "qcom,dsi-phy-7nm";
- reg = <0x0ae94400 0x200>,
- <0x0ae94600 0x280>,
- <0x0ae94900 0x260>;
- reg-names = "dsi_phy",
- "dsi_phy_lane",
- "dsi_pll";
+ dsi-phy@ae94400 {
+ compatible = "qcom,dsi-phy-7nm";
+ reg = <0x0ae94400 0x200>,
+ <0x0ae94600 0x280>,
+ <0x0ae94900 0x260>;
+ reg-names = "dsi_phy",
+ "dsi_phy_lane",
+ "dsi_pll";
- #clock-cells = <1>;
- #phy-cells = <0>;
+ #clock-cells = <1>;
+ #phy-cells = <0>;
- vdds-supply = <&vreg_l5a_0p88>;
- clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
- <&rpmhcc RPMH_CXO_CLK>;
- clock-names = "iface", "ref";
- };
+ vdds-supply = <&vreg_l5a_0p88>;
+ clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface", "ref";
+ };
diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sa8775p-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sa8775p-mdss.yaml
index 4536bb2f971f..a90a8b3f1a9e 100644
--- a/Documentation/devicetree/bindings/display/msm/qcom,sa8775p-mdss.yaml
+++ b/Documentation/devicetree/bindings/display/msm/qcom,sa8775p-mdss.yaml
@@ -78,7 +78,6 @@ examples:
"mdp1-mem",
"cpu-cfg";
-
resets = <&dispcc_core_bcr>;
power-domains = <&dispcc_gdsc>;
@@ -129,7 +128,7 @@ examples:
port@0 {
reg = <0>;
dpu_intf0_out: endpoint {
- remote-endpoint = <&mdss0_dp0_in>;
+ remote-endpoint = <&mdss0_dp0_in>;
};
};
};
@@ -209,8 +208,8 @@ examples:
};
port@1 {
- reg = <1>;
- mdss0_dp_out: endpoint { };
+ reg = <1>;
+ mdss0_dp_out: endpoint { };
};
};
diff --git a/Documentation/devicetree/bindings/display/renesas,cmm.yaml b/Documentation/devicetree/bindings/display/renesas,cmm.yaml
index 561efaaa5a91..fc4933c343cd 100644
--- a/Documentation/devicetree/bindings/display/renesas,cmm.yaml
+++ b/Documentation/devicetree/bindings/display/renesas,cmm.yaml
@@ -58,10 +58,10 @@ examples:
#include <dt-bindings/power/r8a7796-sysc.h>
cmm0: cmm@fea40000 {
- compatible = "renesas,r8a7796-cmm",
- "renesas,rcar-gen3-cmm";
- reg = <0xfea40000 0x1000>;
- power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
- clocks = <&cpg CPG_MOD 711>;
- resets = <&cpg 711>;
+ compatible = "renesas,r8a7796-cmm",
+ "renesas,rcar-gen3-cmm";
+ reg = <0xfea40000 0x1000>;
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ clocks = <&cpg CPG_MOD 711>;
+ resets = <&cpg 711>;
};
diff --git a/Documentation/devicetree/bindings/dma/adi,axi-dmac.txt b/Documentation/devicetree/bindings/dma/adi,axi-dmac.txt
deleted file mode 100644
index cd17684aaab5..000000000000
--- a/Documentation/devicetree/bindings/dma/adi,axi-dmac.txt
+++ /dev/null
@@ -1,61 +0,0 @@
-Analog Devices AXI-DMAC DMA controller
-
-Required properties:
- - compatible: Must be "adi,axi-dmac-1.00.a".
- - reg: Specification for the controllers memory mapped register map.
- - interrupts: Specification for the controllers interrupt.
- - clocks: Phandle and specifier to the controllers AXI interface clock
- - #dma-cells: Must be 1.
-
-Required sub-nodes:
- - adi,channels: This sub-node must contain a sub-node for each DMA channel. For
- the channel sub-nodes the following bindings apply. They must match the
- configuration options of the peripheral as it was instantiated.
-
-Required properties for adi,channels sub-node:
- - #size-cells: Must be 0
- - #address-cells: Must be 1
-
-Required channel sub-node properties:
- - reg: Which channel this node refers to.
- - adi,source-bus-width,
- adi,destination-bus-width: Width of the source or destination bus in bits.
- - adi,source-bus-type,
- adi,destination-bus-type: Type of the source or destination bus. Must be one
- of the following:
- 0 (AXI_DMAC_TYPE_AXI_MM): Memory mapped AXI interface
- 1 (AXI_DMAC_TYPE_AXI_STREAM): Streaming AXI interface
- 2 (AXI_DMAC_TYPE_AXI_FIFO): FIFO interface
-
-Deprecated optional channel properties:
- - adi,length-width: Width of the DMA transfer length register.
- - adi,cyclic: Must be set if the channel supports hardware cyclic DMA
- transfers.
- - adi,2d: Must be set if the channel supports hardware 2D DMA transfers.
-
-DMA clients connected to the AXI-DMAC DMA controller must use the format
-described in the dma.txt file using a one-cell specifier. The value of the
-specifier refers to the DMA channel index.
-
-Example:
-
-dma: dma@7c420000 {
- compatible = "adi,axi-dmac-1.00.a";
- reg = <0x7c420000 0x10000>;
- interrupts = <0 57 0>;
- clocks = <&clkc 16>;
- #dma-cells = <1>;
-
- adi,channels {
- #size-cells = <0>;
- #address-cells = <1>;
-
- dma-channel@0 {
- reg = <0>;
- adi,source-bus-width = <32>;
- adi,source-bus-type = <ADI_AXI_DMAC_TYPE_MM_AXI>;
- adi,destination-bus-width = <64>;
- adi,destination-bus-type = <ADI_AXI_DMAC_TYPE_FIFO>;
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/dma/adi,axi-dmac.yaml b/Documentation/devicetree/bindings/dma/adi,axi-dmac.yaml
new file mode 100644
index 000000000000..63b6fb0423c2
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/adi,axi-dmac.yaml
@@ -0,0 +1,129 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/adi,axi-dmac.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices AXI-DMAC DMA controller
+
+description: |
+ FPGA-based DMA controller designed for use with high-speed converter hardware.
+
+ http://analogdevicesinc.github.io/hdl/library/axi_dmac/index.html
+
+maintainers:
+ - Nuno Sa <nuno.sa@analog.com>
+
+additionalProperties: false
+
+properties:
+ compatible:
+ const: adi,axi-dmac-1.00.a
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ "#dma-cells":
+ const: 1
+
+ adi,channels:
+ deprecated: true
+ type: object
+ description:
+ This sub-node must contain a sub-node for each DMA channel. This node is
+ only required for IP versions older than 4.3.a and should otherwise be
+ omitted.
+ additionalProperties: false
+
+ properties:
+ "#size-cells":
+ const: 0
+ "#address-cells":
+ const: 1
+
+ patternProperties:
+ "^dma-channel@[0-9a-f]+$":
+ type: object
+ description:
+ DMA channel properties based on HDL compile-time configuration.
+ additionalProperties: false
+
+ properties:
+ reg:
+ maxItems: 1
+
+ adi,source-bus-width:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Width of the source bus in bits.
+ enum: [8, 16, 32, 64, 128]
+
+ adi,destination-bus-width:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Width of the destination bus in bits.
+ enum: [8, 16, 32, 64, 128]
+
+ adi,source-bus-type:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ Type of the source bus.
+
+ 0: Memory mapped AXI interface
+ 1: Streaming AXI interface
+ 2: FIFO interface
+ enum: [0, 1, 2]
+
+ adi,destination-bus-type:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Type of the destination bus (see adi,source-bus-type).
+ enum: [0, 1, 2]
+
+ adi,length-width:
+ deprecated: true
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Width of the DMA transfer length register.
+
+ adi,cyclic:
+ deprecated: true
+ type: boolean
+ description:
+ Must be set if the channel supports hardware cyclic DMA transfers.
+
+ adi,2d:
+ deprecated: true
+ type: boolean
+ description:
+ Must be set if the channel supports hardware 2D DMA transfers.
+
+ required:
+ - reg
+ - adi,source-bus-width
+ - adi,destination-bus-width
+ - adi,source-bus-type
+ - adi,destination-bus-type
+
+ required:
+ - "#size-cells"
+ - "#address-cells"
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - "#dma-cells"
+
+examples:
+ - |
+ dma-controller@7c420000 {
+ compatible = "adi,axi-dmac-1.00.a";
+ reg = <0x7c420000 0x10000>;
+ interrupts = <0 57 0>;
+ clocks = <&clkc 16>;
+ #dma-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/allwinner,sun4i-a10-dma.yaml b/Documentation/devicetree/bindings/dma/allwinner,sun4i-a10-dma.yaml
index 02d5bd035409..9b5180c0a7c4 100644
--- a/Documentation/devicetree/bindings/dma/allwinner,sun4i-a10-dma.yaml
+++ b/Documentation/devicetree/bindings/dma/allwinner,sun4i-a10-dma.yaml
@@ -22,7 +22,9 @@ properties:
number.
compatible:
- const: allwinner,sun4i-a10-dma
+ enum:
+ - allwinner,sun4i-a10-dma
+ - allwinner,suniv-f1c100s-dma
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/dma/atmel,sama5d4-dma.yaml b/Documentation/devicetree/bindings/dma/atmel,sama5d4-dma.yaml
new file mode 100644
index 000000000000..9ca1c5d1f00f
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/atmel,sama5d4-dma.yaml
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/atmel,sama5d4-dma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip AT91 Extensible Direct Memory Access Controller
+
+maintainers:
+ - Nicolas Ferre <nicolas.ferre@microchip.com>
+ - Charan Pedumuru <charan.pedumuru@microchip.com>
+
+description:
+ The DMA Controller (XDMAC) is a AHB-protocol central direct memory access
+ controller. It performs peripheral data transfer and memory move operations
+ over one or two bus ports through the unidirectional communication
+ channel. Each channel is fully programmable and provides both peripheral
+ or memory-to-memory transfers. The channel features are configurable at
+ implementation.
+
+allOf:
+ - $ref: dma-controller.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - atmel,sama5d4-dma
+ - microchip,sama7g5-dma
+ - items:
+ - enum:
+ - microchip,sam9x60-dma
+ - microchip,sam9x7-dma
+ - const: atmel,sama5d4-dma
+
+ "#dma-cells":
+ description: |
+ Represents the number of integer cells in the `dmas` property of client
+ devices. The single cell specifies the channel configuration register:
+ - bit 13: SIF (Source Interface Identifier) for memory interface.
+ - bit 14: DIF (Destination Interface Identifier) for peripheral interface.
+ - bit 30-24: PERID (Peripheral Identifier).
+ const: 1
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: dma_clk
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - "#dma-cells"
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/at91.h>
+ #include <dt-bindings/dma/at91.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ dma-controller@f0008000 {
+ compatible = "atmel,sama5d4-dma";
+ reg = <0xf0008000 0x1000>;
+ interrupts = <20 IRQ_TYPE_LEVEL_HIGH 0>;
+ #dma-cells = <1>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 20>;
+ clock-names = "dma_clk";
+ };
diff --git a/Documentation/devicetree/bindings/dma/atmel-xdma.txt b/Documentation/devicetree/bindings/dma/atmel-xdma.txt
deleted file mode 100644
index 76d649b3a25d..000000000000
--- a/Documentation/devicetree/bindings/dma/atmel-xdma.txt
+++ /dev/null
@@ -1,54 +0,0 @@
-* Atmel Extensible Direct Memory Access Controller (XDMAC)
-
-* XDMA Controller
-Required properties:
-- compatible: Should be "atmel,sama5d4-dma", "microchip,sam9x60-dma" or
- "microchip,sama7g5-dma" or
- "microchip,sam9x7-dma", "atmel,sama5d4-dma".
-- reg: Should contain DMA registers location and length.
-- interrupts: Should contain DMA interrupt.
-- #dma-cells: Must be <1>, used to represent the number of integer cells in
-the dmas property of client devices.
- - The 1st cell specifies the channel configuration register:
- - bit 13: SIF, source interface identifier, used to get the memory
- interface identifier,
- - bit 14: DIF, destination interface identifier, used to get the peripheral
- interface identifier,
- - bit 30-24: PERID, peripheral identifier.
-
-Example:
-
-dma1: dma-controller@f0004000 {
- compatible = "atmel,sama5d4-dma";
- reg = <0xf0004000 0x200>;
- interrupts = <50 4 0>;
- #dma-cells = <1>;
-};
-
-
-* DMA clients
-DMA clients connected to the Atmel XDMA controller must use the format
-described in the dma.txt file, using a one-cell specifier for each channel.
-The two cells in order are:
-1. A phandle pointing to the DMA controller.
-2. Channel configuration register. Configurable fields are:
- - bit 13: SIF, source interface identifier, used to get the memory
- interface identifier,
- - bit 14: DIF, destination interface identifier, used to get the peripheral
- interface identifier,
- - bit 30-24: PERID, peripheral identifier.
-
-Example:
-
-i2c2: i2c@f8024000 {
- compatible = "atmel,at91sam9x5-i2c";
- reg = <0xf8024000 0x4000>;
- interrupts = <34 4 6>;
- dmas = <&dma1
- (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
- | AT91_XDMAC_DT_PERID(6))>,
- <&dma1
- (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
- | AT91_XDMAC_DT_PERID(7))>;
- dma-names = "tx", "rx";
-};
diff --git a/Documentation/devicetree/bindings/dma/fsl,edma.yaml b/Documentation/devicetree/bindings/dma/fsl,edma.yaml
index d54140f18d34..4f925469533e 100644
--- a/Documentation/devicetree/bindings/dma/fsl,edma.yaml
+++ b/Documentation/devicetree/bindings/dma/fsl,edma.yaml
@@ -26,9 +26,13 @@ properties:
- fsl,imx93-edma3
- fsl,imx93-edma4
- fsl,imx95-edma5
+ - nxp,s32g2-edma
- items:
- const: fsl,ls1028a-edma
- const: fsl,vf610-edma
+ - items:
+ - const: nxp,s32g3-edma
+ - const: nxp,s32g2-edma
reg:
minItems: 1
@@ -221,6 +225,36 @@ allOf:
properties:
power-domains: false
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: nxp,s32g2-edma
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: dmamux0
+ - const: dmamux1
+ interrupts:
+ minItems: 3
+ maxItems: 3
+ interrupt-names:
+ items:
+ - const: tx-0-15
+ - const: tx-16-31
+ - const: err
+ reg:
+ minItems: 3
+ maxItems: 3
+ "#dma-cells":
+ const: 2
+ dma-channels:
+ const: 32
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.yaml b/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.yaml
index 877147e95ecc..d3f8c269916c 100644
--- a/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.yaml
+++ b/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.yaml
@@ -13,9 +13,6 @@ description: |
maintainers:
- Jon Hunter <jonathanh@nvidia.com>
-allOf:
- - $ref: dma-controller.yaml#
-
properties:
compatible:
oneOf:
@@ -29,7 +26,19 @@ properties:
- const: nvidia,tegra186-adma
reg:
- maxItems: 1
+ description:
+ The 'page' region describes the address space of the page
+ used for accessing the DMA channel registers. The 'global'
+ region describes the address space of the global DMA registers.
+ In the absence of the 'reg-names' property, there must be a
+ single entry that covers the address space of the global DMA
+ registers and the DMA channel registers.
+ minItems: 1
+ maxItems: 2
+
+ reg-names:
+ minItems: 1
+ maxItems: 2
interrupts:
description: |
@@ -63,6 +72,49 @@ required:
- clocks
- clock-names
+allOf:
+ - $ref: dma-controller.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - nvidia,tegra210-adma
+ then:
+ properties:
+ reg:
+ items:
+ - description: Full address space range of DMA registers.
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - nvidia,tegra186-adma
+ then:
+ anyOf:
+ - properties:
+ reg:
+ items:
+ - description: Full address space range of DMA registers.
+ - properties:
+ reg:
+ items:
+ - description: Channel Page address space range of DMA registers.
+ reg-names:
+ items:
+ - const: page
+ - properties:
+ reg:
+ items:
+ - description: Channel Page address space range of DMA registers.
+ - description: Global Page address space range of DMA registers.
+ reg-names:
+ items:
+ - const: page
+ - const: global
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml
index 4ad56a409b9c..7052468b15c8 100644
--- a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml
+++ b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml
@@ -25,7 +25,9 @@ properties:
- items:
- enum:
- qcom,qcm2290-gpi-dma
+ - qcom,qcs8300-gpi-dma
- qcom,qdu1000-gpi-dma
+ - qcom,sa8775p-gpi-dma
- qcom,sar2130p-gpi-dma
- qcom,sc7280-gpi-dma
- qcom,sdx75-gpi-dma
@@ -35,10 +37,12 @@ properties:
- qcom,sm8450-gpi-dma
- qcom,sm8550-gpi-dma
- qcom,sm8650-gpi-dma
+ - qcom,sm8750-gpi-dma
- qcom,x1e80100-gpi-dma
- const: qcom,sm6350-gpi-dma
- items:
- enum:
+ - qcom,qcs615-gpi-dma
- qcom,sdm670-gpi-dma
- qcom,sm6125-gpi-dma
- qcom,sm8150-gpi-dma
diff --git a/Documentation/devicetree/bindings/dma/stm32/st,stm32-dmamux.yaml b/Documentation/devicetree/bindings/dma/stm32/st,stm32-dmamux.yaml
index f26c914a3a9a..b7bca1a83769 100644
--- a/Documentation/devicetree/bindings/dma/stm32/st,stm32-dmamux.yaml
+++ b/Documentation/devicetree/bindings/dma/stm32/st,stm32-dmamux.yaml
@@ -15,6 +15,16 @@ allOf:
properties:
"#dma-cells":
const: 3
+ description: |
+ Each cell represents the following:
+ 1. The mux input number/line for the request
+ 2. Bitfield representing DMA channel configuration that is passed
+ to the real DMA controller
+ 3. Bitfield representing device dependent DMA features passed to
+ the real DMA controller
+
+ For bitfield definitions of cells 2 and 3, see the associated
+ bindings doc for the actual DMA controller in st,stm32-dma.yaml.
compatible:
const: st,stm32h7-dmamux
diff --git a/Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml b/Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml
index 27b8e1636560..b5bc842c5a0e 100644
--- a/Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml
+++ b/Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml
@@ -34,6 +34,7 @@ properties:
- ti,am62a-dmss-bcdma-csirx
- ti,am64-dmss-bcdma
- ti,j721s2-dmss-bcdma-csi
+ - ti,j722s-dmss-bcdma-csi
reg:
minItems: 3
@@ -196,7 +197,9 @@ allOf:
properties:
compatible:
contains:
- const: ti,j721s2-dmss-bcdma-csi
+ enum:
+ - ti,j721s2-dmss-bcdma-csi
+ - ti,j722s-dmss-bcdma-csi
then:
properties:
ti,sci-rm-range-bchan: false
diff --git a/Documentation/devicetree/bindings/dts-coding-style.rst b/Documentation/devicetree/bindings/dts-coding-style.rst
index 8a68331075a0..4772ded8a987 100644
--- a/Documentation/devicetree/bindings/dts-coding-style.rst
+++ b/Documentation/devicetree/bindings/dts-coding-style.rst
@@ -162,14 +162,17 @@ Example::
status = "okay";
}
-Indentation
------------
+Indentation and wrapping
+------------------------
-1. Use indentation according to Documentation/process/coding-style.rst.
+1. Use indentation and wrap lines according to
+ Documentation/process/coding-style.rst.
2. Each entry in arrays with multiple cells, e.g. "reg" with two IO addresses,
shall be enclosed in <>.
-3. For arrays spanning across lines, it is preferred to align the continued
- entries with opening < from the first line.
+3. For arrays spanning across lines, it is preferred to split on item boundary
+ and align the continued entries with opening < from the first line.
+ Usually avoid splitting individual items unless they significantly exceed
+ line wrap limit.
Example::
@@ -177,6 +180,9 @@ Example::
compatible = "qcom,sm8550-tsens", "qcom,tsens-v2";
reg = <0x0 0x0c271000 0x0 0x1000>,
<0x0 0x0c222000 0x0 0x1000>;
+ /* Lines exceeding coding style line wrap limit: */
+ interconnects = <&aggre1_noc MASTER_USB3_0 0 &mc_virt SLAVE_EBI1 0>,
+ <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3_0 0>;
};
Organizing DTSI and DTS
diff --git a/Documentation/devicetree/bindings/firmware/qcom,scm.yaml b/Documentation/devicetree/bindings/firmware/qcom,scm.yaml
index 2ee030000007..8cdaac8011ba 100644
--- a/Documentation/devicetree/bindings/firmware/qcom,scm.yaml
+++ b/Documentation/devicetree/bindings/firmware/qcom,scm.yaml
@@ -26,6 +26,7 @@ properties:
- qcom,scm-ipq4019
- qcom,scm-ipq5018
- qcom,scm-ipq5332
+ - qcom,scm-ipq5424
- qcom,scm-ipq6018
- qcom,scm-ipq806x
- qcom,scm-ipq8074
@@ -42,6 +43,7 @@ properties:
- qcom,scm-msm8996
- qcom,scm-msm8998
- qcom,scm-qcm2290
+ - qcom,scm-qcs615
- qcom,scm-qcs8300
- qcom,scm-qdu1000
- qcom,scm-sa8255p
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml
index abd4aa335fbc..9318817ea135 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml
@@ -33,6 +33,7 @@ properties:
- rockchip,rk3188-mali
- rockchip,rk3228-mali
- samsung,exynos4210-mali
+ - st,stih410-mali
- stericsson,db8500-mali
- xlnx,zynqmp-mali
- const: arm,mali-400
diff --git a/Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml b/Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml
index 280ed479ef5a..84d949392012 100644
--- a/Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml
+++ b/Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml
@@ -37,10 +37,17 @@ properties:
interrupts:
maxItems: 1
+ interrupt-names:
+ items:
+ - enum: [INT1, INT2]
+
+dependencies:
+ interrupts: [ interrupt-names ]
+ interrupt-names: [ interrupts ]
+
required:
- compatible
- reg
- - interrupts
allOf:
- $ref: /schemas/spi/spi-peripheral-props.yaml#
@@ -61,6 +68,7 @@ examples:
reg = <0x2a>;
interrupt-parent = <&gpio0>;
interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "INT1";
};
};
- |
@@ -79,5 +87,6 @@ examples:
spi-cpha;
interrupt-parent = <&gpio0>;
interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "INT2";
};
};
diff --git a/Documentation/devicetree/bindings/iio/accel/kionix,kx022a.yaml b/Documentation/devicetree/bindings/iio/accel/kionix,kx022a.yaml
index 66ea894dbe55..f07c70e51c45 100644
--- a/Documentation/devicetree/bindings/iio/accel/kionix,kx022a.yaml
+++ b/Documentation/devicetree/bindings/iio/accel/kionix,kx022a.yaml
@@ -4,23 +4,26 @@
$id: http://devicetree.org/schemas/iio/accel/kionix,kx022a.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: ROHM/Kionix KX022A, KX132-1211 and KX132ACR-LBZ Accelerometers
+title: ROHM/Kionix KX022A, KX132/134-1211 and KX132/134ACR-LBZ Accelerometers
maintainers:
- Matti Vaittinen <mazziesaccount@gmail.com>
description: |
KX022A, KX132ACR-LBZ and KX132-1211 are 3-axis accelerometers supporting
- +/- 2G, 4G, 8G and 16G ranges, variable output data-rates and a
- hardware-fifo buffering. These accelerometers can be accessed either
- via I2C or SPI.
+ +/- 2G, 4G, 8G and 16G ranges. The KX134ACR-LBZ and KX134-1211 support
+ +/- 8G, 16G, 32G and 64G. All the sensors also have variable output
+ data-rates and a hardware-fifo buffering. These accelerometers can be
+ accessed either via I2C or SPI.
properties:
compatible:
enum:
- kionix,kx022a
- kionix,kx132-1211
+ - kionix,kx134-1211
- rohm,kx132acr-lbz
+ - rohm,kx134acr-lbz
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/accel/nxp,fxls8962af.yaml b/Documentation/devicetree/bindings/iio/accel/nxp,fxls8962af.yaml
index 783c7ddfcd90..c175f4c4cbdb 100644
--- a/Documentation/devicetree/bindings/iio/accel/nxp,fxls8962af.yaml
+++ b/Documentation/devicetree/bindings/iio/accel/nxp,fxls8962af.yaml
@@ -14,12 +14,20 @@ description: |
SPI and I2C interface.
https://www.nxp.com/docs/en/data-sheet/FXLS8962AF.pdf
https://www.nxp.com/docs/en/data-sheet/FXLS8964AF.pdf
+ https://www.nxp.com/docs/en/data-sheet/FXLS8967AF.pdf
+ https://www.nxp.com/docs/en/data-sheet/FXLS8974CF.pdf
properties:
compatible:
- enum:
- - nxp,fxls8962af
- - nxp,fxls8964af
+ oneOf:
+ - enum:
+ - nxp,fxls8962af
+ - nxp,fxls8964af
+ - items:
+ - enum:
+ - nxp,fxls8967af
+ - nxp,fxls8974cf
+ - const: nxp,fxls8962af
reg:
maxItems: 1
@@ -38,6 +46,11 @@ properties:
drive-open-drain:
type: boolean
+ wakeup-source:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Enable wake on accelerometer event
+
required:
- compatible
- reg
@@ -61,6 +74,7 @@ examples:
interrupt-parent = <&gpio0>;
interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "INT1";
+ wakeup-source;
};
};
- |
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad4000.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad4000.yaml
index e413a9d8d2a2..96e01a97dd95 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad4000.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad4000.yaml
@@ -19,49 +19,82 @@ description: |
https://www.analog.com/media/en/technical-documentation/data-sheets/ad4020-4021-4022.pdf
https://www.analog.com/media/en/technical-documentation/data-sheets/adaq4001.pdf
https://www.analog.com/media/en/technical-documentation/data-sheets/adaq4003.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad7685.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad7686.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad7687.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad7688.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad7690.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad7691.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad7693.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad7942.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad7946.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad7980.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad7982.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad7983.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad7984.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad7988-1_7988-5.pdf
$ref: /schemas/spi/spi-peripheral-props.yaml#
properties:
compatible:
oneOf:
- - const: adi,ad4000
+ - enum:
+ - adi,ad4000
+ - adi,ad4001
+ - adi,ad4002
+ - adi,ad4003
+ - adi,ad4020
+ - adi,adaq4001
+ - adi,adaq4003
+ - adi,ad7687
+ - adi,ad7691
+ - adi,ad7942
+ - adi,ad7946
+ - adi,ad7983
- items:
- enum:
- adi,ad4004
- adi,ad4008
- const: adi,ad4000
-
- - const: adi,ad4001
- items:
- enum:
- adi,ad4005
- const: adi,ad4001
-
- - const: adi,ad4002
- items:
- enum:
- adi,ad4006
- adi,ad4010
- const: adi,ad4002
-
- - const: adi,ad4003
- items:
- enum:
- adi,ad4007
- adi,ad4011
- const: adi,ad4003
-
- - const: adi,ad4020
- items:
- enum:
- adi,ad4021
- adi,ad4022
- const: adi,ad4020
-
- - const: adi,adaq4001
-
- - const: adi,adaq4003
+ - items:
+ - enum:
+ - adi,ad7685
+ - adi,ad7686
+ - adi,ad7980
+ - adi,ad7988-1
+ - adi,ad7988-5
+ - const: adi,ad7983
+ - items:
+ - enum:
+ - adi,ad7688
+ - adi,ad7693
+ - const: adi,ad7687
+ - items:
+ - enum:
+ - adi,ad7690
+ - adi,ad7982
+ - adi,ad7984
+ - const: adi,ad7691
reg:
maxItems: 1
@@ -133,6 +166,22 @@ required:
- ref-supply
allOf:
+ # Single-channel PulSAR devices have SDI either tied to VIO, GND, or host CS.
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - adi,ad7687
+ - adi,ad7691
+ - adi,ad7942
+ - adi,ad7946
+ - adi,ad7983
+ then:
+ properties:
+ adi,sdi-pin:
+ enum: [ high, low, cs ]
+ default: cs
# The configuration register can only be accessed if SDI is connected to MOSI
- if:
required:
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml
index 310f046e139f..7d2229dee444 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml
@@ -134,8 +134,9 @@ patternProperties:
description:
Describes the common mode channel for single channels. 0xFF is REFGND
and OxFE is COM. Macros are available for these values in
- dt-bindings/iio/adi,ad4695.h. Values 1 to 15 correspond to INx inputs.
- Only odd numbered INx inputs can be used as common mode channels.
+ dt-bindings/iio/adc/adi,ad4695.h. Values 1 to 15 correspond to INx
+ inputs. Only odd numbered INx inputs can be used as common mode
+ channels.
enum: [1, 3, 5, 7, 9, 11, 13, 15, 0xFE, 0xFF]
default: 0xFF
@@ -209,7 +210,7 @@ unevaluatedProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
- #include <dt-bindings/iio/adi,ad4695.h>
+ #include <dt-bindings/iio/adc/adi,ad4695.h>
spi {
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml
index 35ed04350e28..7146a654ae38 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml
@@ -37,6 +37,17 @@ properties:
description: IRQ line for the ADC
maxItems: 1
+ rdy-gpios:
+ description:
+ GPIO reading the R̅D̅Y̅ line. Having such a GPIO is technically optional but
+ highly recommended because DOUT/R̅D̅Y̅ toggles during SPI transfers (in its
+ DOUT aka MISO role) and so usually triggers a spurious interrupt. The
+ distinction between such a spurious event and a real one can only be done
+ by reading such a GPIO. (There is a register telling the same
+ information, but accessing that one needs a SPI transfer which then
+ triggers another interrupt event.)
+ maxItems: 1
+
'#address-cells':
const: 1
@@ -111,6 +122,7 @@ unevaluatedProperties: false
examples:
- |
+ #include <dt-bindings/gpio/gpio.h>
spi {
#address-cells = <1>;
#size-cells = <0>;
@@ -121,6 +133,7 @@ examples:
spi-max-frequency = <5000000>;
interrupts = <25 2>;
interrupt-parent = <&gpio>;
+ rdy-gpios = <&gpio 25 GPIO_ACTIVE_LOW>;
refin1-supply = <&adc_vref>;
clocks = <&ad7124_mclk>;
clock-names = "mclk";
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7173.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7173.yaml
index ad15cf9bc2ff..21ee319d4675 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7173.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7173.yaml
@@ -135,6 +135,17 @@ properties:
'#clock-cells':
const: 0
+ rdy-gpios:
+ description:
+ GPIO reading the R̅D̅Y̅ line. Having such a GPIO is technically optional but
+ highly recommended because DOUT/R̅D̅Y̅ toggles during SPI transfers (in its
+ DOUT aka MISO role) and so usually triggers a spurious interrupt. The
+ distinction between such a spurious event and a real one can only be done
+ by reading such a GPIO. (There is a register telling the same
+ information, but accessing that one needs a SPI transfer which then
+ triggers another interrupt event.)
+ maxItems: 1
+
patternProperties:
"^channel@[0-9a-f]$":
type: object
@@ -443,6 +454,7 @@ examples:
interrupts = <25 IRQ_TYPE_EDGE_FALLING>;
interrupt-names = "rdy";
interrupt-parent = <&gpio>;
+ rdy-gpios = <&gpio 25 GPIO_ACTIVE_LOW>;
spi-max-frequency = <5000000>;
gpio-controller;
#gpio-cells = <2>;
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
index 66dd1c549bd3..0bd2c6906c83 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
@@ -106,6 +106,17 @@ properties:
description: see Documentation/devicetree/bindings/iio/adc/adc.yaml
type: boolean
+ rdy-gpios:
+ description:
+ GPIO reading the R̅D̅Y̅ line. Having such a GPIO is technically optional but
+ highly recommended because DOUT/R̅D̅Y̅ toggles during SPI transfers (in its
+ DOUT aka MISO role) and so usually triggers a spurious interrupt. The
+ distinction between such a spurious event and a real one can only be done
+ by reading such a GPIO. (There is a register telling the same
+ information, but accessing that one needs a SPI transfer which then
+ triggers another interrupt event.)
+ maxItems: 1
+
patternProperties:
"^channel@[0-9a-f]+$":
type: object
@@ -181,6 +192,7 @@ unevaluatedProperties: false
examples:
- |
+ #include <dt-bindings/gpio/gpio.h>
spi {
#address-cells = <1>;
#size-cells = <0>;
@@ -195,6 +207,7 @@ examples:
clock-names = "mclk";
interrupts = <25 0x2>;
interrupt-parent = <&gpio>;
+ rdy-gpios = <&gpio 25 GPIO_ACTIVE_LOW>;
aincom-supply = <&aincom>;
dvdd-supply = <&dvdd>;
avdd-supply = <&avdd>;
@@ -207,6 +220,7 @@ examples:
};
};
- |
+ #include <dt-bindings/gpio/gpio.h>
spi {
#address-cells = <1>;
#size-cells = <0>;
@@ -224,6 +238,7 @@ examples:
#clock-cells = <0>;
interrupts = <25 0x2>;
interrupt-parent = <&gpio>;
+ rdy-gpios = <&gpio 25 GPIO_ACTIVE_LOW>;
aincom-supply = <&aincom>;
dvdd-supply = <&dvdd>;
avdd-supply = <&avdd>;
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7780.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7780.yaml
index be2616ff9af6..5c8df45bfab0 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7780.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7780.yaml
@@ -63,6 +63,17 @@ properties:
marked GPIO_ACTIVE_LOW.
maxItems: 1
+ rdy-gpios:
+ description:
+ GPIO reading the R̅D̅Y̅ line. Having such a GPIO is technically optional but
+ highly recommended because DOUT/R̅D̅Y̅ toggles during SPI transfers (in its
+ DOUT aka MISO role) and so usually triggers a spurious interrupt. The
+ distinction between such a spurious event and a real one can only be done
+ by reading such a GPIO. (There is a register telling the same
+ information, but accessing that one needs a SPI transfer which then
+ triggers another interrupt event.)
+ maxItems: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/iio/adc/renesas,rzg2l-adc.yaml b/Documentation/devicetree/bindings/iio/adc/renesas,rzg2l-adc.yaml
index ba86c7b7d622..40341d541726 100644
--- a/Documentation/devicetree/bindings/iio/adc/renesas,rzg2l-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/renesas,rzg2l-adc.yaml
@@ -17,12 +17,15 @@ description: |
properties:
compatible:
- items:
- - enum:
- - renesas,r9a07g043-adc # RZ/G2UL and RZ/Five
- - renesas,r9a07g044-adc # RZ/G2L
- - renesas,r9a07g054-adc # RZ/V2L
- - const: renesas,rzg2l-adc
+ oneOf:
+ - items:
+ - enum:
+ - renesas,r9a07g043-adc # RZ/G2UL and RZ/Five
+ - renesas,r9a07g044-adc # RZ/G2L
+ - renesas,r9a07g054-adc # RZ/V2L
+ - const: renesas,rzg2l-adc
+ - items:
+ - const: renesas,r9a08g045-adc # RZ/G3S
reg:
maxItems: 1
@@ -57,6 +60,9 @@ properties:
'#size-cells':
const: 0
+ "#io-channel-cells":
+ const: 1
+
required:
- compatible
- reg
@@ -68,7 +74,7 @@ required:
- reset-names
patternProperties:
- "^channel@[0-7]$":
+ "^channel@[0-8]$":
$ref: adc.yaml
type: object
description: |
@@ -78,6 +84,8 @@ patternProperties:
reg:
description: |
The channel number.
+ minimum: 0
+ maximum: 8
required:
- reg
@@ -92,18 +100,25 @@ allOf:
const: renesas,r9a07g043-adc
then:
patternProperties:
- "^channel@[2-7]$": false
+ "^channel@[2-8]$": false
"^channel@[0-1]$":
properties:
reg:
- minimum: 0
maximum: 1
- else:
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,r9a07g044-adc
+ - renesas,r9a07g054-adc
+ then:
patternProperties:
+ "^channel@[8]$": false
"^channel@[0-7]$":
properties:
reg:
- minimum: 0
maximum: 7
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/iio/chemical/bosch,bme680.yaml b/Documentation/devicetree/bindings/iio/chemical/bosch,bme680.yaml
new file mode 100644
index 000000000000..fe98ec44f081
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/chemical/bosch,bme680.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/chemical/bosch,bme680.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Bosch BME680 Gas sensor
+
+maintainers:
+ - Vasileios Amoiridis <vassilisamir@gmail.com>
+
+description: >
+ BME680 is a gas sensor which combines relative humidity, barometric pressure,
+ ambient temperature and gas (VOC - Volatile Organic Compounds) measurements.
+
+ https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bme680-ds001.pdf
+
+properties:
+ compatible:
+ const: bosch,bme680
+
+ reg:
+ maxItems: 1
+
+ vdd-supply: true
+ vddio-supply: true
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ co2-sensor@77 {
+ compatible = "bosch,bme680";
+ reg = <0x77>;
+ vddio-supply = <&vddio>;
+ vdd-supply = <&vdd>;
+ };
+ };
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ co2-sensor@0 {
+ compatible = "bosch,bme680";
+ reg = <0>;
+ spi-max-frequency = <500000>;
+ vddio-supply = <&vddio>;
+ vdd-supply = <&vdd>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ad5791.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ad5791.yaml
index 79cb4b78a88a..2bd89e0aa46b 100644
--- a/Documentation/devicetree/bindings/iio/dac/adi,ad5791.yaml
+++ b/Documentation/devicetree/bindings/iio/dac/adi,ad5791.yaml
@@ -91,7 +91,7 @@ examples:
vrefn-supply = <&dac_vrefn>;
reset-gpios = <&gpio_bd 16 GPIO_ACTIVE_LOW>;
clear-gpios = <&gpio_bd 17 GPIO_ACTIVE_LOW>;
- ldac-gpios = <&gpio_bd 18 GPIO_ACTIVE_HIGH>;
+ ldac-gpios = <&gpio_bd 18 GPIO_ACTIVE_LOW>;
};
};
...
diff --git a/Documentation/devicetree/bindings/iio/dac/rohm,bd79703.yaml b/Documentation/devicetree/bindings/iio/dac/rohm,bd79703.yaml
new file mode 100644
index 000000000000..941a49c93943
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/rohm,bd79703.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2024 ROHM Semiconductor.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/dac/rohm,bd79703.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ROHM BD79703 DAC device driver
+
+maintainers:
+ - Matti Vaittinen <mazziesaccount@gmail.com>
+
+description: |
+ The ROHM BD79703 is a 6 channel, 8-bit DAC.
+ Datasheet can be found here:
+ https://fscdn.rohm.com/en/products/databook/datasheet/ic/data_converter/dac/bd79702fv-lb_bd79703fv-lb-e.pdf
+
+properties:
+ compatible:
+ const: rohm,bd79703
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 30000000
+
+ vfs-supply:
+ description:
+ The regulator to use as a full scale voltage. The voltage should be between 2.7V .. VCC
+
+ vcc-supply:
+ description:
+ The regulator supplying the operating voltage. Should be between 2.7V ... 5.5V
+
+required:
+ - compatible
+ - reg
+ - spi-max-frequency
+ - vfs-supply
+ - vcc-supply
+
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+additionalProperties: false
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dac@0 {
+ compatible = "rohm,bd79703";
+ reg = <0>;
+ spi-max-frequency = <30000000>;
+ vcc-supply = <&vcc>;
+ vfs-supply = <&vref>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/imu/adi,adis16480.yaml b/Documentation/devicetree/bindings/iio/imu/adi,adis16480.yaml
index e3eec38897bf..7a1a74fec281 100644
--- a/Documentation/devicetree/bindings/iio/imu/adi,adis16480.yaml
+++ b/Documentation/devicetree/bindings/iio/imu/adi,adis16480.yaml
@@ -11,24 +11,30 @@ maintainers:
properties:
compatible:
- enum:
- - adi,adis16375
- - adi,adis16480
- - adi,adis16485
- - adi,adis16488
- - adi,adis16490
- - adi,adis16495-1
- - adi,adis16495-2
- - adi,adis16495-3
- - adi,adis16497-1
- - adi,adis16497-2
- - adi,adis16497-3
- - adi,adis16545-1
- - adi,adis16545-2
- - adi,adis16545-3
- - adi,adis16547-1
- - adi,adis16547-2
- - adi,adis16547-3
+ oneOf:
+ - enum:
+ - adi,adis16375
+ - adi,adis16480
+ - adi,adis16485
+ - adi,adis16486
+ - adi,adis16488
+ - adi,adis16489
+ - adi,adis16490
+ - adi,adis16495-1
+ - adi,adis16495-2
+ - adi,adis16495-3
+ - adi,adis16497-1
+ - adi,adis16497-2
+ - adi,adis16497-3
+ - adi,adis16545-1
+ - adi,adis16545-2
+ - adi,adis16545-3
+ - adi,adis16547-1
+ - adi,adis16547-2
+ - adi,adis16547-3
+ - items:
+ - const: adi,adis16487
+ - const: adi,adis16485
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/imu/bosch,bmi160.yaml b/Documentation/devicetree/bindings/iio/imu/bosch,bmi160.yaml
index 3b0a2d8b2e91..2cf8a0c7eb4c 100644
--- a/Documentation/devicetree/bindings/iio/imu/bosch,bmi160.yaml
+++ b/Documentation/devicetree/bindings/iio/imu/bosch,bmi160.yaml
@@ -37,6 +37,7 @@ properties:
to "INT2" if INT2 pin should be used instead
drive-open-drain:
+ type: boolean
description: |
set if the specified interrupt pin should be configured as
open drain. If not set, defaults to push-pull.
diff --git a/Documentation/devicetree/bindings/iio/imu/bosch,bmi270.yaml b/Documentation/devicetree/bindings/iio/imu/bosch,bmi270.yaml
index 7b0cde1c9b0a..860a6c1fea3c 100644
--- a/Documentation/devicetree/bindings/iio/imu/bosch,bmi270.yaml
+++ b/Documentation/devicetree/bindings/iio/imu/bosch,bmi270.yaml
@@ -41,6 +41,7 @@ properties:
- INT2
drive-open-drain:
+ type: boolean
description:
set if the specified interrupt pins should be configured as
open drain. If not set, defaults to push-pull.
diff --git a/Documentation/devicetree/bindings/iio/imu/bosch,bmi323.yaml b/Documentation/devicetree/bindings/iio/imu/bosch,bmi323.yaml
index 64ef26e19669..7bf8294a8f2e 100644
--- a/Documentation/devicetree/bindings/iio/imu/bosch,bmi323.yaml
+++ b/Documentation/devicetree/bindings/iio/imu/bosch,bmi323.yaml
@@ -38,6 +38,7 @@ properties:
- INT2
drive-open-drain:
+ type: boolean
description:
set if the specified interrupt pin should be configured as
open drain. If not set, defaults to push-pull.
diff --git a/Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml b/Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml
index f91954870a44..0bce71529e34 100644
--- a/Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml
+++ b/Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml
@@ -16,6 +16,7 @@ properties:
compatible:
oneOf:
- enum:
+ - invensense,iam20380
- invensense,iam20680
- invensense,icm20608
- invensense,icm20609
diff --git a/Documentation/devicetree/bindings/iio/light/rohm,bu27008.yaml b/Documentation/devicetree/bindings/iio/light/rohm,bu27008.yaml
deleted file mode 100644
index 4f66fd47b016..000000000000
--- a/Documentation/devicetree/bindings/iio/light/rohm,bu27008.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/iio/light/rohm,bu27008.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: ROHM BU27008 color sensor
-
-maintainers:
- - Matti Vaittinen <mazziesaccount@gmail.com>
-
-description:
- The ROHM BU27008 is a sensor with 5 photodiodes (red, green, blue, clear
- and IR) with four configurable channels. Red and green being always
- available and two out of the rest three (blue, clear, IR) can be
- selected to be simultaneously measured. Typical application is adjusting
- LCD backlight of TVs, mobile phones and tablet PCs.
-
-properties:
- compatible:
- const: rohm,bu27008
-
- reg:
- maxItems: 1
-
- interrupts:
- maxItems: 1
-
- vdd-supply: true
-
-required:
- - compatible
- - reg
-
-additionalProperties: false
-
-examples:
- - |
- i2c {
- #address-cells = <1>;
- #size-cells = <0>;
-
- light-sensor@38 {
- compatible = "rohm,bu27008";
- reg = <0x38>;
- };
- };
-
-...
diff --git a/Documentation/devicetree/bindings/iio/light/rohm,bu27010.yaml b/Documentation/devicetree/bindings/iio/light/rohm,bu27010.yaml
deleted file mode 100644
index bed42d5d0d94..000000000000
--- a/Documentation/devicetree/bindings/iio/light/rohm,bu27010.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/iio/light/rohm,bu27010.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: ROHM BU27010 color sensor
-
-maintainers:
- - Matti Vaittinen <mazziesaccount@gmail.com>
-
-description: |
- The ROHM BU27010 is a sensor with 6 photodiodes (red, green, blue, clear,
- IR and flickering detection) with five configurable channels. Red, green
- and flickering detection being always available and two out of the rest
- three (blue, clear, IR) can be selected to be simultaneously measured.
- Typical application is adjusting LCD/OLED backlight of TVs, mobile phones
- and tablet PCs.
-
-properties:
- compatible:
- const: rohm,bu27010
-
- reg:
- maxItems: 1
-
- interrupts:
- maxItems: 1
-
- vdd-supply: true
-
-required:
- - compatible
- - reg
- - vdd-supply
-
-additionalProperties: false
-
-examples:
- - |
- i2c {
- #address-cells = <1>;
- #size-cells = <0>;
-
- light-sensor@38 {
- compatible = "rohm,bu27010";
- reg = <0x38>;
- vdd-supply = <&vdd>;
- };
- };
diff --git a/Documentation/devicetree/bindings/iio/light/ti,opt4060.yaml b/Documentation/devicetree/bindings/iio/light/ti,opt4060.yaml
new file mode 100644
index 000000000000..568fb2a9b7a3
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/ti,opt4060.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/light/ti,opt4060.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments OPT4060 RGBW Color Sensor
+
+maintainers:
+ - Per-Daniel Olsson <perdaniel.olsson@axis.com>
+
+description:
+ Texas Instrument RGBW high resolution color sensor over I2C.
+ https://www.ti.com/lit/gpn/opt4060
+
+properties:
+ compatible:
+ enum:
+ - ti,opt4060
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ vdd-supply: true
+
+required:
+ - compatible
+ - reg
+ - vdd-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ light-sensor@44 {
+ compatible = "ti,opt4060";
+ reg = <0x44>;
+ vdd-supply = <&vdd_reg>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/pressure/bmp085.yaml b/Documentation/devicetree/bindings/iio/pressure/bmp085.yaml
index cb201cecfa1a..706b7e24f182 100644
--- a/Documentation/devicetree/bindings/iio/pressure/bmp085.yaml
+++ b/Documentation/devicetree/bindings/iio/pressure/bmp085.yaml
@@ -55,12 +55,16 @@ properties:
If not set, defaults to push-pull configuration.
type: boolean
+ spi-max-frequency:
+ maximum: 10000000
+
required:
- compatible
- vddd-supply
- vdda-supply
allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
- if:
properties:
compatible:
@@ -73,6 +77,16 @@ allOf:
then:
properties:
interrupts: false
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - bosch,bmp085
+ - bosch,bmp180
+ then:
+ properties:
+ spi-max-frequency: false
additionalProperties: false
@@ -93,3 +107,18 @@ examples:
vdda-supply = <&bar>;
};
};
+ - |
+ # include <dt-bindings/gpio/gpio.h>
+ # include <dt-bindings/interrupt-controller/irq.h>
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pressure@0 {
+ compatible = "bosch,bmp280";
+ reg = <0>;
+ spi-max-frequency = <10000000>;
+ reset-gpios = <&gpio0 26 GPIO_ACTIVE_LOW>;
+ vddd-supply = <&foo>;
+ vdda-supply = <&bar>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml b/Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml
index 47aac8794b68..517a4ac1bea3 100644
--- a/Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml
+++ b/Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml
@@ -26,6 +26,9 @@ properties:
- items:
- enum:
- mediatek,mt6873-keypad
+ - mediatek,mt8183-keypad
+ - mediatek,mt8365-keypad
+ - mediatek,mt8516-keypad
- const: mediatek,mt6779-keypad
reg:
diff --git a/Documentation/devicetree/bindings/input/mediatek,pmic-keys.yaml b/Documentation/devicetree/bindings/input/mediatek,pmic-keys.yaml
index 60f09caa0e4c..b95435bd6a9b 100644
--- a/Documentation/devicetree/bindings/input/mediatek,pmic-keys.yaml
+++ b/Documentation/devicetree/bindings/input/mediatek,pmic-keys.yaml
@@ -25,6 +25,7 @@ properties:
compatible:
enum:
- mediatek,mt6323-keys
+ - mediatek,mt6328-keys
- mediatek,mt6331-keys
- mediatek,mt6357-keys
- mediatek,mt6358-keys
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml b/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
index 251410aabf38..83bcf0575cd3 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
@@ -26,6 +26,7 @@ properties:
- items:
- enum:
- qcom,qcm2290-cpu-bwmon
+ - qcom,qcs615-cpu-bwmon
- qcom,qcs8300-cpu-bwmon
- qcom,sa8775p-cpu-bwmon
- qcom,sc7180-cpu-bwmon
@@ -37,10 +38,12 @@ properties:
- qcom,sm8250-cpu-bwmon
- qcom,sm8550-cpu-bwmon
- qcom,sm8650-cpu-bwmon
+ - qcom,sm8750-cpu-bwmon
- qcom,x1e80100-cpu-bwmon
- const: qcom,sdm845-bwmon # BWMON v4, unified register space
- items:
- enum:
+ - qcom,qcs615-llcc-bwmon
- qcom,qcs8300-llcc-bwmon
- qcom,sa8775p-llcc-bwmon
- qcom,sc7180-llcc-bwmon
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml b/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml
index 21dae0b92819..4ac0863205b3 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml
@@ -33,6 +33,7 @@ properties:
- qcom,sm6375-cpucp-l3
- qcom,sm8250-epss-l3
- qcom,sm8350-epss-l3
+ - qcom,sm8650-epss-l3
- const: qcom,epss-l3
reg:
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,sm8750-rpmh.yaml b/Documentation/devicetree/bindings/interconnect/qcom,sm8750-rpmh.yaml
new file mode 100644
index 000000000000..a816acc301e1
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/qcom,sm8750-rpmh.yaml
@@ -0,0 +1,136 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interconnect/qcom,sm8750-rpmh.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm RPMh Network-On-Chip Interconnect on SM8750
+
+maintainers:
+ - Abel Vesa <abel.vesa@linaro.org>
+ - Neil Armstrong <neil.armstrong@linaro.org>
+
+description: |
+ RPMh interconnect providers support system bandwidth requirements through
+ RPMh hardware accelerators known as Bus Clock Manager (BCM). The provider is
+ able to communicate with the BCM through the Resource State Coordinator (RSC)
+ associated with each execution environment. Provider nodes must point to at
+ least one RPMh device child node pertaining to their RSC and each provider
+ can map to multiple RPMh resources.
+
+ See also:: include/dt-bindings/interconnect/qcom,sm8750-rpmh.h
+
+properties:
+ compatible:
+ enum:
+ - qcom,sm8750-aggre1-noc
+ - qcom,sm8750-aggre2-noc
+ - qcom,sm8750-clk-virt
+ - qcom,sm8750-cnoc-main
+ - qcom,sm8750-config-noc
+ - qcom,sm8750-gem-noc
+ - qcom,sm8750-lpass-ag-noc
+ - qcom,sm8750-lpass-lpiaon-noc
+ - qcom,sm8750-lpass-lpicx-noc
+ - qcom,sm8750-mc-virt
+ - qcom,sm8750-mmss-noc
+ - qcom,sm8750-nsp-noc
+ - qcom,sm8750-pcie-anoc
+ - qcom,sm8750-system-noc
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+ maxItems: 2
+
+required:
+ - compatible
+
+allOf:
+ - $ref: qcom,rpmh-common.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sm8750-clk-virt
+ - qcom,sm8750-mc-virt
+ then:
+ properties:
+ reg: false
+ else:
+ required:
+ - reg
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sm8750-pcie-anoc
+ then:
+ properties:
+ clocks:
+ items:
+ - description: aggre-NOC PCIe AXI clock
+ - description: cfg-NOC PCIe a-NOC AHB clock
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sm8750-aggre1-noc
+ then:
+ properties:
+ clocks:
+ items:
+ - description: aggre UFS PHY AXI clock
+ - description: aggre USB3 PRIM AXI clock
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sm8750-aggre2-noc
+ then:
+ properties:
+ clocks:
+ items:
+ - description: RPMH CC IPA clock
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sm8750-aggre1-noc
+ - qcom,sm8750-aggre2-noc
+ - qcom,sm8750-pcie-anoc
+ then:
+ required:
+ - clocks
+ else:
+ properties:
+ clocks: false
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ clk_virt: interconnect-0 {
+ compatible = "qcom,sm8750-clk-virt";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ aggre1_noc: interconnect@16e0000 {
+ compatible = "qcom,sm8750-aggre1-noc";
+ reg = <0x016e0000 0x16400>;
+ #interconnect-cells = <2>;
+ clocks = <&gcc_phy_axi_clk>, <&gcc_prim_axi_clk>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic.yaml b/Documentation/devicetree/bindings/interrupt-controller/arm,gic.yaml
index a2846e493497..7173c4b5a228 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic.yaml
@@ -110,8 +110,8 @@ properties:
interrupts:
description: Interrupt source of the parent interrupt controller on
- secondary GICs, or VGIC maintenance interrupt on primary GIC (see
- below).
+ secondary GICs, or VGIC maintenance interrupt on primary GIC (see "GICv2
+ with virtualization extensions" paragraph in the "reg" property).
maxItems: 1
cpu-offset:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-vic.yaml b/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-vic.yaml
index 73e8b9a39bd7..86516cd44b9d 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-vic.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-vic.yaml
@@ -52,11 +52,11 @@ additionalProperties: false
examples:
- |
interrupt-controller@1e6c0080 {
- compatible = "aspeed,ast2400-vic";
- reg = <0x1e6c0080 0x80>;
- interrupt-controller;
- #interrupt-cells = <1>;
- valid-sources = <0xffffffff 0x0007ffff>;
+ compatible = "aspeed,ast2400-vic";
+ reg = <0x1e6c0080 0x80>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ valid-sources = <0xffffffff 0x0007ffff>;
};
...
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.yaml
index 786f2426399b..0fcbe304cd05 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.yaml
@@ -130,23 +130,23 @@ required:
examples:
- |
irq0_intc: interrupt-controller@f0406800 {
- compatible = "brcm,bcm7120-l2-intc";
- interrupt-parent = <&intc>;
- #interrupt-cells = <1>;
- reg = <0xf0406800 0x8>;
- interrupt-controller;
- interrupts = <0x0 0x42 0x0>, <0x0 0x40 0x0>;
- brcm,int-map-mask = <0xeb8>, <0x140>;
- brcm,int-fwd-mask = <0x7>;
+ compatible = "brcm,bcm7120-l2-intc";
+ interrupt-parent = <&intc>;
+ #interrupt-cells = <1>;
+ reg = <0xf0406800 0x8>;
+ interrupt-controller;
+ interrupts = <0x0 0x42 0x0>, <0x0 0x40 0x0>;
+ brcm,int-map-mask = <0xeb8>, <0x140>;
+ brcm,int-fwd-mask = <0x7>;
};
- |
irq1_intc: interrupt-controller@10000020 {
- compatible = "brcm,bcm3380-l2-intc";
- reg = <0x10000024 0x4>, <0x1000002c 0x4>,
- <0x10000020 0x4>, <0x10000028 0x4>;
- interrupt-controller;
- #interrupt-cells = <1>;
- interrupt-parent = <&cpu_intc>;
- interrupts = <2>;
+ compatible = "brcm,bcm3380-l2-intc";
+ reg = <0x10000024 0x4>, <0x1000002c 0x4>,
+ <0x10000020 0x4>, <0x10000028 0x4>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>;
};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/microchip,lan966x-oic.yaml b/Documentation/devicetree/bindings/interrupt-controller/microchip,lan966x-oic.yaml
index b2adc7174177..dca16e202da9 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/microchip,lan966x-oic.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/microchip,lan966x-oic.yaml
@@ -14,9 +14,8 @@ allOf:
description: |
The Microchip LAN966x outband interrupt controller (OIC) maps the internal
- interrupt sources of the LAN966x device to an external interrupt.
- When the LAN966x device is used as a PCI device, the external interrupt is
- routed to the PCI interrupt.
+ interrupt sources of the LAN966x device to a PCI interrupt when the LAN966x
+ device is used as a PCI device.
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.yaml b/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.yaml
index a54da66a89e7..f06b40f88778 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.yaml
@@ -26,6 +26,8 @@ properties:
compatible:
items:
- enum:
+ - qcom,qcs615-pdc
+ - qcom,qcs8300-pdc
- qcom,qdu1000-pdc
- qcom,sa8255p-pdc
- qcom,sa8775p-pdc
@@ -47,6 +49,7 @@ properties:
- qcom,sm8450-pdc
- qcom,sm8550-pdc
- qcom,sm8650-pdc
+ - qcom,sm8750-pdc
- qcom,x1e80100-pdc
- const: qcom,pdc
diff --git a/Documentation/devicetree/bindings/interrupt-controller/riscv,imsics.yaml b/Documentation/devicetree/bindings/interrupt-controller/riscv,imsics.yaml
index 84976f17a4a1..c23b5c09fdb9 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/riscv,imsics.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/riscv,imsics.yaml
@@ -142,7 +142,7 @@ examples:
<&cpu2_intc 11>,
<&cpu3_intc 11>,
<&cpu4_intc 11>;
- reg = <0x28000000 0x4000>;
+ reg = <0x24000000 0x4000>;
interrupt-controller;
#interrupt-cells = <0>;
msi-controller;
diff --git a/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
index 7e1451f9786a..3dfe425909d1 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
@@ -59,6 +59,7 @@ properties:
- enum:
- canaan,k210-plic
- sifive,fu540-c000-plic
+ - spacemit,k1-plic
- starfive,jh7100-plic
- starfive,jh7110-plic
- const: sifive,plic-1.0.0
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,omap4-wugen-mpu b/Documentation/devicetree/bindings/interrupt-controller/ti,omap4-wugen-mpu.txt
index 422d6908f8b2..422d6908f8b2 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/ti,omap4-wugen-mpu
+++ b/Documentation/devicetree/bindings/interrupt-controller/ti,omap4-wugen-mpu.txt
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
index c1e11bc6b7a0..032fdc27127b 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
@@ -61,6 +61,7 @@ properties:
- qcom,sm8450-smmu-500
- qcom,sm8550-smmu-500
- qcom,sm8650-smmu-500
+ - qcom,sm8750-smmu-500
- qcom,x1e80100-smmu-500
- const: qcom,smmu-500
- const: arm,mmu-500
@@ -88,6 +89,7 @@ properties:
items:
- enum:
- qcom,qcm2290-smmu-500
+ - qcom,qcs615-smmu-500
- qcom,sa8255p-smmu-500
- qcom,sa8775p-smmu-500
- qcom,sar2130p-smmu-500
@@ -102,6 +104,7 @@ properties:
- qcom,sm8450-smmu-500
- qcom,sm8550-smmu-500
- qcom,sm8650-smmu-500
+ - qcom,sm8750-smmu-500
- qcom,x1e80100-smmu-500
- const: qcom,adreno-smmu
- const: qcom,smmu-500
@@ -122,6 +125,7 @@ properties:
- qcom,msm8996-smmu-v2
- qcom,sc7180-smmu-v2
- qcom,sdm630-smmu-v2
+ - qcom,sdm670-smmu-v2
- qcom,sdm845-smmu-v2
- qcom,sm6350-smmu-v2
- qcom,sm7150-smmu-v2
@@ -474,6 +478,7 @@ allOf:
items:
- enum:
- qcom,qcm2290-smmu-500
+ - qcom,qcs615-smmu-500
- qcom,sm6115-smmu-500
- qcom,sm6125-smmu-500
- const: qcom,adreno-smmu
@@ -550,6 +555,23 @@ allOf:
- description: GPU SNoC bus clock
- description: GPU AHB clock
+ - if:
+ properties:
+ compatible:
+ items:
+ - const: qcom,sm8750-smmu-500
+ - const: qcom,adreno-smmu
+ - const: qcom,smmu-500
+ - const: arm,mmu-500
+ then:
+ properties:
+ clock-names:
+ items:
+ - const: hlos
+ clocks:
+ items:
+ - description: HLOS vote clock
+
# Disallow clocks for all other platforms with specific compatibles
- if:
properties:
@@ -559,7 +581,6 @@ allOf:
- cavium,smmu-v2
- marvell,ap806-smmu-500
- nvidia,smmu-500
- - qcom,qcs615-smmu-500
- qcom,qcs8300-smmu-500
- qcom,qdu1000-smmu-500
- qcom,sa8255p-smmu-500
diff --git a/Documentation/devicetree/bindings/iommu/qcom,iommu.yaml b/Documentation/devicetree/bindings/iommu/qcom,iommu.yaml
index f8cebc9e8cd9..5ae9a628261f 100644
--- a/Documentation/devicetree/bindings/iommu/qcom,iommu.yaml
+++ b/Documentation/devicetree/bindings/iommu/qcom,iommu.yaml
@@ -21,6 +21,7 @@ properties:
- items:
- enum:
- qcom,msm8916-iommu
+ - qcom,msm8917-iommu
- qcom,msm8953-iommu
- const: qcom,msm-iommu-v1
- items:
diff --git a/Documentation/devicetree/bindings/iommu/rockchip,iommu.yaml b/Documentation/devicetree/bindings/iommu/rockchip,iommu.yaml
index 621dde0e45d8..6ce41d11ff5e 100644
--- a/Documentation/devicetree/bindings/iommu/rockchip,iommu.yaml
+++ b/Documentation/devicetree/bindings/iommu/rockchip,iommu.yaml
@@ -25,6 +25,7 @@ properties:
- rockchip,rk3568-iommu
- items:
- enum:
+ - rockchip,rk3576-iommu
- rockchip,rk3588-iommu
- const: rockchip,rk3568-iommu
diff --git a/Documentation/devicetree/bindings/mailbox/google,gs101-mbox.yaml b/Documentation/devicetree/bindings/mailbox/google,gs101-mbox.yaml
new file mode 100644
index 000000000000..e249db4c1fbc
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/google,gs101-mbox.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright 2024 Linaro Ltd.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mailbox/google,gs101-mbox.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung Exynos Mailbox Controller
+
+maintainers:
+ - Tudor Ambarus <tudor.ambarus@linaro.org>
+
+description:
+ The Samsung Exynos mailbox controller, used on Google GS101 SoC, has 16 flag
+ bits for hardware interrupt generation and a shared register for passing
+ mailbox messages. When the controller is used by the ACPM interface
+ the shared register is ignored and the mailbox controller acts as a doorbell.
+ The controller just raises the interrupt to the firmware after the
+ ACPM interface has written the message to SRAM.
+
+properties:
+ compatible:
+ const: google,gs101-mbox
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: pclk
+
+ interrupts:
+ description: IRQ line for the RX mailbox.
+ maxItems: 1
+
+ '#mbox-cells':
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+ - '#mbox-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/google,gs101.h>
+
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ ap2apm_mailbox: mailbox@17610000 {
+ compatible = "google,gs101-mbox";
+ reg = <0x17610000 0x1000>;
+ clocks = <&cmu_apm CLK_GOUT_APM_MAILBOX_APM_AP_PCLK>;
+ clock-names = "pclk";
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH 0>;
+ #mbox-cells = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mailbox/microchip,sbi-ipc.yaml b/Documentation/devicetree/bindings/mailbox/microchip,sbi-ipc.yaml
new file mode 100644
index 000000000000..8ed67ea7c883
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/microchip,sbi-ipc.yaml
@@ -0,0 +1,123 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mailbox/microchip,sbi-ipc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip Inter-processor communication (IPC) mailbox controller
+
+maintainers:
+ - Valentina Fernandez <valentina.fernandezalanis@microchip.com>
+
+description:
+ The Microchip Inter-processor Communication (IPC) facilitates
+ message passing between processors using an interrupt signaling
+ mechanism.
+
+properties:
+ compatible:
+ oneOf:
+ - description:
+ Intended for use by software running in supervisor privileged
+ mode (s-mode). This SBI interface is compatible with the Mi-V
+ Inter-hart Communication (IHC) IP.
+ const: microchip,sbi-ipc
+
+ - description:
+ Intended for use by the SBI implementation in machine mode
+ (m-mode), this compatible string is for the MIV_IHC Soft-IP.
+ const: microchip,miv-ihc-rtl-v2
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 1
+ maxItems: 5
+
+ interrupt-names:
+ minItems: 1
+ maxItems: 5
+ items:
+ enum:
+ - hart-0
+ - hart-1
+ - hart-2
+ - hart-3
+ - hart-4
+ - hart-5
+
+ "#mbox-cells":
+ description: >
+ For "microchip,sbi-ipc", the cell represents the global "logical"
+ channel IDs. The meaning of channel IDs are platform firmware dependent.
+
+ For "microchip,miv-ihc-rtl-v2", the cell represents the physical
+ channel and does not vary based on the platform firmware.
+ const: 1
+
+ microchip,ihc-chan-disabled-mask:
+ description: >
+ Represents the enable/disable state of the bi-directional IHC
+ channels within the MIV-IHC IP configuration.
+
+ A bit set to '1' indicates that the corresponding channel is disabled,
+ and any read or write operations to that channel will return zero.
+
+ A bit set to '0' indicates that the corresponding channel is enabled
+ and will be accessible through its dedicated address range registers.
+
+ The actual enable/disable state of each channel is determined by the
+ IP block’s configuration.
+ $ref: /schemas/types.yaml#/definitions/uint16
+ maximum: 0x7fff
+ default: 0
+
+required:
+ - compatible
+ - interrupts
+ - interrupt-names
+ - "#mbox-cells"
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: microchip,sbi-ipc
+ then:
+ properties:
+ reg:
+ not: {}
+ description:
+ The 'microchip,sbi-ipc' operates in a programming model
+ that does not require memory-mapped I/O (MMIO) registers
+ since it uses SBI ecalls provided by the m-mode/firmware
+ SBI implementation to access hardware registers.
+ microchip,ihc-chan-disabled-mask: false
+ else:
+ required:
+ - reg
+ - microchip,ihc-chan-disabled-mask
+
+additionalProperties: false
+
+examples:
+ - |
+ mailbox {
+ compatible = "microchip,sbi-ipc";
+ interrupt-parent = <&plic>;
+ interrupts = <180>, <179>, <178>;
+ interrupt-names = "hart-1", "hart-2", "hart-3";
+ #mbox-cells = <1>;
+ };
+ - |
+ mailbox@50000000 {
+ compatible = "microchip,miv-ihc-rtl-v2";
+ microchip,ihc-chan-disabled-mask = /bits/ 16 <0>;
+ reg = <0x50000000 0x1c000>;
+ interrupt-parent = <&plic>;
+ interrupts = <180>, <179>, <178>;
+ interrupt-names = "hart-1", "hart-2", "hart-3";
+ #mbox-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
index 9d2dfd85b207..78f68dacd028 100644
--- a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
+++ b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
@@ -20,6 +20,7 @@ properties:
- enum:
- qcom,ipq5018-apcs-apps-global
- qcom,ipq5332-apcs-apps-global
+ - qcom,ipq5424-apcs-apps-global
- qcom,ipq8074-apcs-apps-global
- qcom,ipq9574-apcs-apps-global
- const: qcom,ipq6018-apcs-apps-global
@@ -44,6 +45,7 @@ properties:
- const: qcom,msm8994-apcs-kpss-global
- items:
- enum:
+ - qcom,qcs615-apss-shared
- qcom,sc7180-apss-shared
- qcom,sc8180x-apss-shared
- qcom,sm8150-apss-shared
diff --git a/Documentation/devicetree/bindings/media/allwinner,sun50i-h6-vpu-g2.yaml b/Documentation/devicetree/bindings/media/allwinner,sun50i-h6-vpu-g2.yaml
index a4f06bbdfe49..8ba5177ac631 100644
--- a/Documentation/devicetree/bindings/media/allwinner,sun50i-h6-vpu-g2.yaml
+++ b/Documentation/devicetree/bindings/media/allwinner,sun50i-h6-vpu-g2.yaml
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-
%YAML 1.2
---
$id: http://devicetree.org/schemas/media/allwinner,sun50i-h6-vpu-g2.yaml#
diff --git a/Documentation/devicetree/bindings/media/amlogic,meson-ir-tx.yaml b/Documentation/devicetree/bindings/media/amlogic,meson-ir-tx.yaml
index 377acce93423..6da8a6aded23 100644
--- a/Documentation/devicetree/bindings/media/amlogic,meson-ir-tx.yaml
+++ b/Documentation/devicetree/bindings/media/amlogic,meson-ir-tx.yaml
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-
%YAML 1.2
---
$id: http://devicetree.org/schemas/media/amlogic,meson-ir-tx.yaml#
diff --git a/Documentation/devicetree/bindings/media/amphion,vpu.yaml b/Documentation/devicetree/bindings/media/amphion,vpu.yaml
index 9801de3ed84e..5a920d9e78c7 100644
--- a/Documentation/devicetree/bindings/media/amphion,vpu.yaml
+++ b/Documentation/devicetree/bindings/media/amphion,vpu.yaml
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-
%YAML 1.2
---
$id: http://devicetree.org/schemas/media/amphion,vpu.yaml#
diff --git a/Documentation/devicetree/bindings/media/fsl,imx6ull-pxp.yaml b/Documentation/devicetree/bindings/media/fsl,imx6ull-pxp.yaml
index 84a5e894ace4..3f47744459aa 100644
--- a/Documentation/devicetree/bindings/media/fsl,imx6ull-pxp.yaml
+++ b/Documentation/devicetree/bindings/media/fsl,imx6ull-pxp.yaml
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-
%YAML 1.2
---
$id: http://devicetree.org/schemas/media/fsl,imx6ull-pxp.yaml#
diff --git a/Documentation/devicetree/bindings/media/i2c/sony,imx290.yaml b/Documentation/devicetree/bindings/media/i2c/sony,imx290.yaml
index bf05ca48601a..fa69bd21c8da 100644
--- a/Documentation/devicetree/bindings/media/i2c/sony,imx290.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/sony,imx290.yaml
@@ -33,6 +33,8 @@ properties:
- sony,imx290lqr # Colour
- sony,imx290llr # Monochrome
- sony,imx327lqr # Colour
+ - sony,imx462lqr # Colour
+ - sony,imx462llr # Monochrome
- const: sony,imx290
deprecated: true
diff --git a/Documentation/devicetree/bindings/media/mediatek,vcodec-decoder.yaml b/Documentation/devicetree/bindings/media/mediatek,vcodec-decoder.yaml
index b401c67e3ba0..d726d141a434 100644
--- a/Documentation/devicetree/bindings/media/mediatek,vcodec-decoder.yaml
+++ b/Documentation/devicetree/bindings/media/mediatek,vcodec-decoder.yaml
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-
%YAML 1.2
---
$id: http://devicetree.org/schemas/media/mediatek,vcodec-decoder.yaml#
diff --git a/Documentation/devicetree/bindings/media/mediatek,vcodec-encoder.yaml b/Documentation/devicetree/bindings/media/mediatek,vcodec-encoder.yaml
index b45743d0a9ec..110e8f5f1f9e 100644
--- a/Documentation/devicetree/bindings/media/mediatek,vcodec-encoder.yaml
+++ b/Documentation/devicetree/bindings/media/mediatek,vcodec-encoder.yaml
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-
%YAML 1.2
---
$id: http://devicetree.org/schemas/media/mediatek,vcodec-encoder.yaml#
diff --git a/Documentation/devicetree/bindings/media/mediatek,vcodec-subdev-decoder.yaml b/Documentation/devicetree/bindings/media/mediatek,vcodec-subdev-decoder.yaml
index a500a585c692..5865e6f0be89 100644
--- a/Documentation/devicetree/bindings/media/mediatek,vcodec-subdev-decoder.yaml
+++ b/Documentation/devicetree/bindings/media/mediatek,vcodec-subdev-decoder.yaml
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-
%YAML 1.2
---
$id: http://devicetree.org/schemas/media/mediatek,vcodec-subdev-decoder.yaml#
diff --git a/Documentation/devicetree/bindings/media/microchip,sama5d4-vdec.yaml b/Documentation/devicetree/bindings/media/microchip,sama5d4-vdec.yaml
index 59b805ca47c5..ede086d55add 100644
--- a/Documentation/devicetree/bindings/media/microchip,sama5d4-vdec.yaml
+++ b/Documentation/devicetree/bindings/media/microchip,sama5d4-vdec.yaml
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-
%YAML 1.2
---
$id: http://devicetree.org/schemas/media/microchip,sama5d4-vdec.yaml#
@@ -36,12 +35,12 @@ additionalProperties: false
examples:
- |
- #include <dt-bindings/clock/at91.h>
- #include <dt-bindings/interrupt-controller/irq.h>
-
- vdec0: vdec@300000 {
- compatible = "microchip,sama5d4-vdec";
- reg = <0x00300000 0x100000>;
- interrupts = <19 IRQ_TYPE_LEVEL_HIGH 4>;
- clocks = <&pmc PMC_TYPE_PERIPHERAL 19>;
- };
+ #include <dt-bindings/clock/at91.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ vdec@300000 {
+ compatible = "microchip,sama5d4-vdec";
+ reg = <0x00300000 0x100000>;
+ interrupts = <19 IRQ_TYPE_LEVEL_HIGH 4>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 19>;
+ };
diff --git a/Documentation/devicetree/bindings/media/nxp,imx8-isi.yaml b/Documentation/devicetree/bindings/media/nxp,imx8-isi.yaml
index 4d5348d456a1..f43b91984f01 100644
--- a/Documentation/devicetree/bindings/media/nxp,imx8-isi.yaml
+++ b/Documentation/devicetree/bindings/media/nxp,imx8-isi.yaml
@@ -21,6 +21,7 @@ properties:
enum:
- fsl,imx8mn-isi
- fsl,imx8mp-isi
+ - fsl,imx8ulp-isi
- fsl,imx93-isi
reg:
@@ -75,6 +76,7 @@ allOf:
contains:
enum:
- fsl,imx8mn-isi
+ - fsl,imx8ulp-isi
- fsl,imx93-isi
then:
properties:
diff --git a/Documentation/devicetree/bindings/media/nxp,imx8mq-vpu.yaml b/Documentation/devicetree/bindings/media/nxp,imx8mq-vpu.yaml
index 3d58f02b0c5d..19528262810a 100644
--- a/Documentation/devicetree/bindings/media/nxp,imx8mq-vpu.yaml
+++ b/Documentation/devicetree/bindings/media/nxp,imx8mq-vpu.yaml
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-
%YAML 1.2
---
$id: http://devicetree.org/schemas/media/nxp,imx8mq-vpu.yaml#
@@ -44,26 +43,26 @@ additionalProperties: false
examples:
- |
- #include <dt-bindings/clock/imx8mq-clock.h>
- #include <dt-bindings/power/imx8mq-power.h>
- #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/imx8mq-clock.h>
+ #include <dt-bindings/power/imx8mq-power.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
- vpu_g1: video-codec@38300000 {
- compatible = "nxp,imx8mq-vpu-g1";
- reg = <0x38300000 0x10000>;
- interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MQ_CLK_VPU_G1_ROOT>;
- power-domains = <&vpu_blk_ctrl IMX8MQ_VPUBLK_PD_G1>;
- };
+ video-codec@38300000 {
+ compatible = "nxp,imx8mq-vpu-g1";
+ reg = <0x38300000 0x10000>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_VPU_G1_ROOT>;
+ power-domains = <&vpu_blk_ctrl IMX8MQ_VPUBLK_PD_G1>;
+ };
- |
- #include <dt-bindings/clock/imx8mq-clock.h>
- #include <dt-bindings/power/imx8mq-power.h>
- #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/imx8mq-clock.h>
+ #include <dt-bindings/power/imx8mq-power.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
- vpu_g2: video-codec@38300000 {
- compatible = "nxp,imx8mq-vpu-g2";
- reg = <0x38310000 0x10000>;
- interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MQ_CLK_VPU_G2_ROOT>;
- power-domains = <&vpu_blk_ctrl IMX8MQ_VPUBLK_PD_G2>;
- };
+ video-codec@38300000 {
+ compatible = "nxp,imx8mq-vpu-g2";
+ reg = <0x38310000 0x10000>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_VPU_G2_ROOT>;
+ power-domains = <&vpu_blk_ctrl IMX8MQ_VPUBLK_PD_G2>;
+ };
diff --git a/Documentation/devicetree/bindings/media/qcom,msm8916-camss.yaml b/Documentation/devicetree/bindings/media/qcom,msm8916-camss.yaml
index 9cc0a968a401..3469a43f00d4 100644
--- a/Documentation/devicetree/bindings/media/qcom,msm8916-camss.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,msm8916-camss.yaml
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-
%YAML 1.2
---
$id: http://devicetree.org/schemas/media/qcom,msm8916-camss.yaml#
diff --git a/Documentation/devicetree/bindings/media/qcom,msm8916-venus.yaml b/Documentation/devicetree/bindings/media/qcom,msm8916-venus.yaml
index 9410f13ca97c..da140c2e3d3f 100644
--- a/Documentation/devicetree/bindings/media/qcom,msm8916-venus.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,msm8916-venus.yaml
@@ -45,6 +45,7 @@ properties:
required:
- compatible
+ deprecated: true
additionalProperties: false
video-encoder:
@@ -57,13 +58,12 @@ properties:
required:
- compatible
+ deprecated: true
additionalProperties: false
required:
- compatible
- iommus
- - video-decoder
- - video-encoder
unevaluatedProperties: false
@@ -83,12 +83,4 @@ examples:
power-domains = <&gcc VENUS_GDSC>;
iommus = <&apps_iommu 5>;
memory-region = <&venus_mem>;
-
- video-decoder {
- compatible = "venus-decoder";
- };
-
- video-encoder {
- compatible = "venus-encoder";
- };
};
diff --git a/Documentation/devicetree/bindings/media/qcom,msm8996-camss.yaml b/Documentation/devicetree/bindings/media/qcom,msm8996-camss.yaml
index 5cb0e337ea6e..644646de338a 100644
--- a/Documentation/devicetree/bindings/media/qcom,msm8996-camss.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,msm8996-camss.yaml
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-
%YAML 1.2
---
$id: http://devicetree.org/schemas/media/qcom,msm8996-camss.yaml#
diff --git a/Documentation/devicetree/bindings/media/qcom,sc7180-venus.yaml b/Documentation/devicetree/bindings/media/qcom,sc7180-venus.yaml
index 5cec1d077cda..83c4a5d95f02 100644
--- a/Documentation/devicetree/bindings/media/qcom,sc7180-venus.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sc7180-venus.yaml
@@ -70,6 +70,7 @@ properties:
required:
- compatible
+ deprecated: true
additionalProperties: false
video-encoder:
@@ -82,14 +83,13 @@ properties:
required:
- compatible
+ deprecated: true
additionalProperties: false
required:
- compatible
- power-domain-names
- iommus
- - video-decoder
- - video-encoder
unevaluatedProperties: false
@@ -114,12 +114,4 @@ examples:
"vcodec0_core", "vcodec0_bus";
iommus = <&apps_smmu 0x0c00 0x60>;
memory-region = <&venus_mem>;
-
- video-decoder {
- compatible = "venus-decoder";
- };
-
- video-encoder {
- compatible = "venus-encoder";
- };
};
diff --git a/Documentation/devicetree/bindings/media/qcom,sc7280-camss.yaml b/Documentation/devicetree/bindings/media/qcom,sc7280-camss.yaml
new file mode 100644
index 000000000000..e11141b812a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/qcom,sc7280-camss.yaml
@@ -0,0 +1,425 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/qcom,sc7280-camss.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SC7280 CAMSS ISP
+
+maintainers:
+ - Azam Sadiq Pasha Kapatrala Syed <akapatra@quicinc.com>
+ - Hariram Purushothaman <hariramp@quicinc.com>
+
+description:
+ The CAMSS IP is a CSI decoder and ISP present on Qualcomm platforms.
+
+properties:
+ compatible:
+ const: qcom,sc7280-camss
+
+ reg:
+ maxItems: 15
+
+ reg-names:
+ items:
+ - const: csid0
+ - const: csid1
+ - const: csid2
+ - const: csid_lite0
+ - const: csid_lite1
+ - const: csiphy0
+ - const: csiphy1
+ - const: csiphy2
+ - const: csiphy3
+ - const: csiphy4
+ - const: vfe0
+ - const: vfe1
+ - const: vfe2
+ - const: vfe_lite0
+ - const: vfe_lite1
+
+ clocks:
+ maxItems: 33
+
+ clock-names:
+ items:
+ - const: camnoc_axi
+ - const: cpas_ahb
+ - const: csiphy0
+ - const: csiphy0_timer
+ - const: csiphy1
+ - const: csiphy1_timer
+ - const: csiphy2
+ - const: csiphy2_timer
+ - const: csiphy3
+ - const: csiphy3_timer
+ - const: csiphy4
+ - const: csiphy4_timer
+ - const: gcc_camera_ahb
+ - const: gcc_cam_hf_axi
+ - const: icp_ahb
+ - const: vfe0
+ - const: vfe0_axi
+ - const: vfe0_cphy_rx
+ - const: vfe0_csid
+ - const: vfe1
+ - const: vfe1_axi
+ - const: vfe1_cphy_rx
+ - const: vfe1_csid
+ - const: vfe2
+ - const: vfe2_axi
+ - const: vfe2_cphy_rx
+ - const: vfe2_csid
+ - const: vfe_lite0
+ - const: vfe_lite0_cphy_rx
+ - const: vfe_lite0_csid
+ - const: vfe_lite1
+ - const: vfe_lite1_cphy_rx
+ - const: vfe_lite1_csid
+
+ interrupts:
+ maxItems: 15
+
+ interrupt-names:
+ items:
+ - const: csid0
+ - const: csid1
+ - const: csid2
+ - const: csid_lite0
+ - const: csid_lite1
+ - const: csiphy0
+ - const: csiphy1
+ - const: csiphy2
+ - const: csiphy3
+ - const: csiphy4
+ - const: vfe0
+ - const: vfe1
+ - const: vfe2
+ - const: vfe_lite0
+ - const: vfe_lite1
+
+ interconnects:
+ maxItems: 2
+
+ interconnect-names:
+ items:
+ - const: ahb
+ - const: hf_0
+
+ iommus:
+ maxItems: 1
+
+ power-domains:
+ items:
+ - description: IFE0 GDSC - Image Front End, Global Distributed Switch Controller.
+ - description: IFE1 GDSC - Image Front End, Global Distributed Switch Controller.
+ - description: IFE2 GDSC - Image Front End, Global Distributed Switch Controller.
+ - description: Titan GDSC - Titan ISP Block, Global Distributed Switch Controller.
+
+ power-domain-names:
+ items:
+ - const: ife0
+ - const: ife1
+ - const: ife2
+ - const: top
+
+ vdda-phy-supply:
+ description:
+ Phandle to a regulator supply to PHY core block.
+
+ vdda-pll-supply:
+ description:
+ Phandle to 1.8V regulator supply to PHY refclk pll block.
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ description:
+ CSI input ports.
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description:
+ Input port for receiving CSI data on CSIPHY 0.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+
+ required:
+ - data-lanes
+
+ port@1:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description:
+ Input port for receiving CSI data on CSIPHY 1.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+
+ required:
+ - data-lanes
+
+ port@2:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description:
+ Input port for receiving CSI data on CSIPHY 2.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+
+ required:
+ - data-lanes
+
+ port@3:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description:
+ Input port for receiving CSI data on CSIPHY 3.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+
+ required:
+ - data-lanes
+
+ port@4:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description:
+ Input port for receiving CSI data on CSIPHY 4.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+
+ required:
+ - data-lanes
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - clocks
+ - clock-names
+ - interrupts
+ - interrupt-names
+ - interconnects
+ - interconnect-names
+ - iommus
+ - power-domains
+ - power-domain-names
+ - vdda-phy-supply
+ - vdda-pll-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,camcc-sc7280.h>
+ #include <dt-bindings/clock/qcom,gcc-sc7280.h>
+ #include <dt-bindings/interconnect/qcom,sc7280.h>
+ #include <dt-bindings/interconnect/qcom,icc.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/qcom-rpmpd.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ isp@acb3000 {
+ compatible = "qcom,sc7280-camss";
+
+ reg = <0x0 0x0acb3000 0x0 0x1000>,
+ <0x0 0x0acba000 0x0 0x1000>,
+ <0x0 0x0acc1000 0x0 0x1000>,
+ <0x0 0x0acc8000 0x0 0x1000>,
+ <0x0 0x0accf000 0x0 0x1000>,
+ <0x0 0x0ace0000 0x0 0x2000>,
+ <0x0 0x0ace2000 0x0 0x2000>,
+ <0x0 0x0ace4000 0x0 0x2000>,
+ <0x0 0x0ace6000 0x0 0x2000>,
+ <0x0 0x0ace8000 0x0 0x2000>,
+ <0x0 0x0acaf000 0x0 0x4000>,
+ <0x0 0x0acb6000 0x0 0x4000>,
+ <0x0 0x0acbd000 0x0 0x4000>,
+ <0x0 0x0acc4000 0x0 0x4000>,
+ <0x0 0x0accb000 0x0 0x4000>;
+ reg-names = "csid0",
+ "csid1",
+ "csid2",
+ "csid_lite0",
+ "csid_lite1",
+ "csiphy0",
+ "csiphy1",
+ "csiphy2",
+ "csiphy3",
+ "csiphy4",
+ "vfe0",
+ "vfe1",
+ "vfe2",
+ "vfe_lite0",
+ "vfe_lite1";
+
+ clocks = <&camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&camcc CAM_CC_CPAS_AHB_CLK>,
+ <&camcc CAM_CC_CSIPHY0_CLK>,
+ <&camcc CAM_CC_CSI0PHYTIMER_CLK>,
+ <&camcc CAM_CC_CSIPHY1_CLK>,
+ <&camcc CAM_CC_CSI1PHYTIMER_CLK>,
+ <&camcc CAM_CC_CSIPHY2_CLK>,
+ <&camcc CAM_CC_CSI2PHYTIMER_CLK>,
+ <&camcc CAM_CC_CSIPHY3_CLK>,
+ <&camcc CAM_CC_CSI3PHYTIMER_CLK>,
+ <&camcc CAM_CC_CSIPHY4_CLK>,
+ <&camcc CAM_CC_CSI4PHYTIMER_CLK>,
+ <&gcc GCC_CAMERA_AHB_CLK>,
+ <&gcc GCC_CAMERA_HF_AXI_CLK>,
+ <&camcc CAM_CC_ICP_AHB_CLK>,
+ <&camcc CAM_CC_IFE_0_CLK>,
+ <&camcc CAM_CC_IFE_0_AXI_CLK>,
+ <&camcc CAM_CC_IFE_0_CPHY_RX_CLK>,
+ <&camcc CAM_CC_IFE_0_CSID_CLK>,
+ <&camcc CAM_CC_IFE_1_CLK>,
+ <&camcc CAM_CC_IFE_1_AXI_CLK>,
+ <&camcc CAM_CC_IFE_1_CPHY_RX_CLK>,
+ <&camcc CAM_CC_IFE_1_CSID_CLK>,
+ <&camcc CAM_CC_IFE_2_CLK>,
+ <&camcc CAM_CC_IFE_2_AXI_CLK>,
+ <&camcc CAM_CC_IFE_2_CPHY_RX_CLK>,
+ <&camcc CAM_CC_IFE_2_CSID_CLK>,
+ <&camcc CAM_CC_IFE_LITE_0_CLK>,
+ <&camcc CAM_CC_IFE_LITE_0_CPHY_RX_CLK>,
+ <&camcc CAM_CC_IFE_LITE_0_CSID_CLK>,
+ <&camcc CAM_CC_IFE_LITE_1_CLK>,
+ <&camcc CAM_CC_IFE_LITE_1_CPHY_RX_CLK>,
+ <&camcc CAM_CC_IFE_LITE_1_CSID_CLK>;
+ clock-names = "camnoc_axi",
+ "cpas_ahb",
+ "csiphy0",
+ "csiphy0_timer",
+ "csiphy1",
+ "csiphy1_timer",
+ "csiphy2",
+ "csiphy2_timer",
+ "csiphy3",
+ "csiphy3_timer",
+ "csiphy4",
+ "csiphy4_timer",
+ "gcc_camera_ahb",
+ "gcc_cam_hf_axi",
+ "icp_ahb",
+ "vfe0",
+ "vfe0_axi",
+ "vfe0_cphy_rx",
+ "vfe0_csid",
+ "vfe1",
+ "vfe1_axi",
+ "vfe1_cphy_rx",
+ "vfe1_csid",
+ "vfe2",
+ "vfe2_axi",
+ "vfe2_cphy_rx",
+ "vfe2_csid",
+ "vfe_lite0",
+ "vfe_lite0_cphy_rx",
+ "vfe_lite0_csid",
+ "vfe_lite1",
+ "vfe_lite1_cphy_rx",
+ "vfe_lite1_csid";
+
+ interrupts = <GIC_SPI 464 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 466 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 640 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 468 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 359 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 477 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 478 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 479 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 448 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 122 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 465 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 467 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 641 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 469 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 360 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "csid0",
+ "csid1",
+ "csid2",
+ "csid_lite0",
+ "csid_lite1",
+ "csiphy0",
+ "csiphy1",
+ "csiphy2",
+ "csiphy3",
+ "csiphy4",
+ "vfe0",
+ "vfe1",
+ "vfe2",
+ "vfe_lite0",
+ "vfe_lite1";
+
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &cnoc2 SLAVE_CAMERA_CFG QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&mmss_noc MASTER_CAMNOC_HF QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "ahb",
+ "hf_0";
+
+ iommus = <&apps_smmu 0x800 0x4e0>;
+
+ power-domains = <&camcc CAM_CC_IFE_0_GDSC>,
+ <&camcc CAM_CC_IFE_1_GDSC>,
+ <&camcc CAM_CC_IFE_2_GDSC>,
+ <&camcc CAM_CC_TITAN_TOP_GDSC>;
+ power-domain-names = "ife0",
+ "ife1",
+ "ife2",
+ "top";
+
+ vdda-phy-supply = <&vreg_l10c_0p88>;
+ vdda-pll-supply = <&vreg_l6b_1p2>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/qcom,sc7280-venus.yaml b/Documentation/devicetree/bindings/media/qcom,sc7280-venus.yaml
index 10c334e6b3dc..413c5b4ee650 100644
--- a/Documentation/devicetree/bindings/media/qcom,sc7280-venus.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sc7280-venus.yaml
@@ -68,6 +68,7 @@ properties:
required:
- compatible
+ deprecated: true
additionalProperties: false
video-encoder:
@@ -80,14 +81,13 @@ properties:
required:
- compatible
+ deprecated: true
additionalProperties: false
required:
- compatible
- power-domain-names
- iommus
- - video-decoder
- - video-encoder
unevaluatedProperties: false
@@ -125,14 +125,6 @@ examples:
memory-region = <&video_mem>;
- video-decoder {
- compatible = "venus-decoder";
- };
-
- video-encoder {
- compatible = "venus-encoder";
- };
-
video-firmware {
iommus = <&apps_smmu 0x21a2 0x0>;
};
diff --git a/Documentation/devicetree/bindings/media/qcom,sc8280xp-camss.yaml b/Documentation/devicetree/bindings/media/qcom,sc8280xp-camss.yaml
index c0bc31709873..9936f0132417 100644
--- a/Documentation/devicetree/bindings/media/qcom,sc8280xp-camss.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sc8280xp-camss.yaml
@@ -328,26 +328,26 @@ examples:
vdda-phy-supply = <&vreg_l6d>;
vdda-pll-supply = <&vreg_l4d>;
- interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 478 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 479 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 640 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 641 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 758 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 759 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 760 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 761 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 762 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 764 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 359 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 360 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 448 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 464 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 465 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 466 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 467 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 468 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 469 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 477 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 478 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 479 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 640 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 641 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 758 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 759 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 760 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 761 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 762 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 764 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "csid1_lite",
"vfe_lite1",
diff --git a/Documentation/devicetree/bindings/media/qcom,sdm660-camss.yaml b/Documentation/devicetree/bindings/media/qcom,sdm660-camss.yaml
index 584106e275f6..68d8670557f5 100644
--- a/Documentation/devicetree/bindings/media/qcom,sdm660-camss.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sdm660-camss.yaml
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-
%YAML 1.2
---
$id: http://devicetree.org/schemas/media/qcom,sdm660-camss.yaml#
diff --git a/Documentation/devicetree/bindings/media/qcom,sdm845-camss.yaml b/Documentation/devicetree/bindings/media/qcom,sdm845-camss.yaml
index ec4380a0a03f..289494f561e5 100644
--- a/Documentation/devicetree/bindings/media/qcom,sdm845-camss.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sdm845-camss.yaml
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-
%YAML 1.2
---
$id: http://devicetree.org/schemas/media/qcom,sdm845-camss.yaml#
@@ -296,16 +295,16 @@ examples:
"vfe_lite_cphy_rx",
"vfe_lite_src";
- interrupts = <GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 478 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 479 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 464 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 466 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 468 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 477 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 478 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 479 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 448 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 465 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 467 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 469 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "csid0",
"csid1",
diff --git a/Documentation/devicetree/bindings/media/qcom,sdm845-venus-v2.yaml b/Documentation/devicetree/bindings/media/qcom,sdm845-venus-v2.yaml
index 6228fd2b3246..c839cb1ebc09 100644
--- a/Documentation/devicetree/bindings/media/qcom,sdm845-venus-v2.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sdm845-venus-v2.yaml
@@ -70,6 +70,7 @@ properties:
required:
- compatible
+ deprecated: true
additionalProperties: false
video-core1:
@@ -82,14 +83,13 @@ properties:
required:
- compatible
+ deprecated: true
additionalProperties: false
required:
- compatible
- power-domain-names
- iommus
- - video-core0
- - video-core1
unevaluatedProperties: false
@@ -119,12 +119,4 @@ examples:
iommus = <&apps_smmu 0x10a0 0x8>,
<&apps_smmu 0x10b0 0x0>;
memory-region = <&venus_mem>;
-
- video-core0 {
- compatible = "venus-decoder";
- };
-
- video-core1 {
- compatible = "venus-encoder";
- };
};
diff --git a/Documentation/devicetree/bindings/media/qcom,sm8250-camss.yaml b/Documentation/devicetree/bindings/media/qcom,sm8250-camss.yaml
index fa5073c0fd1e..a372d991e652 100644
--- a/Documentation/devicetree/bindings/media/qcom,sm8250-camss.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sm8250-camss.yaml
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-
%YAML 1.2
---
$id: http://devicetree.org/schemas/media/qcom,sm8250-camss.yaml#
@@ -329,20 +328,20 @@ examples:
vdda-phy-supply = <&vreg_l5a_0p88>;
vdda-pll-supply = <&vreg_l9a_1p2>;
- interrupts = <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 478 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 479 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 477 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 478 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 479 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 448 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 86 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 89 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 464 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 466 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 468 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 359 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 465 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 467 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 469 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 360 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "csiphy0",
"csiphy1",
"csiphy2",
diff --git a/Documentation/devicetree/bindings/media/qcom,sm8250-venus.yaml b/Documentation/devicetree/bindings/media/qcom,sm8250-venus.yaml
index f66033ae8b59..da54493220c9 100644
--- a/Documentation/devicetree/bindings/media/qcom,sm8250-venus.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sm8250-venus.yaml
@@ -73,6 +73,7 @@ properties:
required:
- compatible
+ deprecated: true
additionalProperties: false
video-encoder:
@@ -85,6 +86,7 @@ properties:
required:
- compatible
+ deprecated: true
additionalProperties: false
required:
@@ -95,8 +97,6 @@ required:
- iommus
- resets
- reset-names
- - video-decoder
- - video-encoder
unevaluatedProperties: false
@@ -132,12 +132,4 @@ examples:
resets = <&gcc GCC_VIDEO_AXI0_CLK_ARES>,
<&videocc VIDEO_CC_MVS0C_CLK_ARES>;
reset-names = "bus", "core";
-
- video-decoder {
- compatible = "venus-decoder";
- };
-
- video-encoder {
- compatible = "venus-encoder";
- };
};
diff --git a/Documentation/devicetree/bindings/media/rockchip,rk3568-vepu.yaml b/Documentation/devicetree/bindings/media/rockchip,rk3568-vepu.yaml
index 947ad699cc5e..d246f5d38427 100644
--- a/Documentation/devicetree/bindings/media/rockchip,rk3568-vepu.yaml
+++ b/Documentation/devicetree/bindings/media/rockchip,rk3568-vepu.yaml
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-
%YAML 1.2
---
$id: http://devicetree.org/schemas/media/rockchip,rk3568-vepu.yaml#
diff --git a/Documentation/devicetree/bindings/media/rockchip-vpu.yaml b/Documentation/devicetree/bindings/media/rockchip-vpu.yaml
index 719aeb2dc593..8c2501634080 100644
--- a/Documentation/devicetree/bindings/media/rockchip-vpu.yaml
+++ b/Documentation/devicetree/bindings/media/rockchip-vpu.yaml
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-
%YAML 1.2
---
$id: http://devicetree.org/schemas/media/rockchip-vpu.yaml#
@@ -92,18 +91,18 @@ additionalProperties: false
examples:
- |
- #include <dt-bindings/clock/rk3288-cru.h>
- #include <dt-bindings/interrupt-controller/arm-gic.h>
- #include <dt-bindings/power/rk3288-power.h>
-
- vpu: video-codec@ff9a0000 {
- compatible = "rockchip,rk3288-vpu";
- reg = <0xff9a0000 0x800>;
- interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "vepu", "vdpu";
- clocks = <&cru ACLK_VCODEC>, <&cru HCLK_VCODEC>;
- clock-names = "aclk", "hclk";
- power-domains = <&power RK3288_PD_VIDEO>;
- iommus = <&vpu_mmu>;
- };
+ #include <dt-bindings/clock/rk3288-cru.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/rk3288-power.h>
+
+ video-codec@ff9a0000 {
+ compatible = "rockchip,rk3288-vpu";
+ reg = <0xff9a0000 0x800>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "vepu", "vdpu";
+ clocks = <&cru ACLK_VCODEC>, <&cru HCLK_VCODEC>;
+ clock-names = "aclk", "hclk";
+ power-domains = <&power RK3288_PD_VIDEO>;
+ iommus = <&vpu_mmu>;
+ };
diff --git a/Documentation/devicetree/bindings/media/st,stm32-dcmipp.yaml b/Documentation/devicetree/bindings/media/st,stm32-dcmipp.yaml
index 87731f3ce7bd..7b03a77adbce 100644
--- a/Documentation/devicetree/bindings/media/st,stm32-dcmipp.yaml
+++ b/Documentation/devicetree/bindings/media/st,stm32-dcmipp.yaml
@@ -12,7 +12,9 @@ maintainers:
properties:
compatible:
- const: st,stm32mp13-dcmipp
+ enum:
+ - st,stm32mp13-dcmipp
+ - st,stm32mp25-dcmipp
reg:
maxItems: 1
@@ -21,11 +23,24 @@ properties:
maxItems: 1
clocks:
- maxItems: 1
+ items:
+ - description: bus clock
+ - description: csi clock
+ minItems: 1
+
+ clock-names:
+ items:
+ - const: kclk
+ - const: mclk
+ minItems: 1
resets:
maxItems: 1
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
port:
$ref: /schemas/graph.yaml#/$defs/port-base
unevaluatedProperties: false
@@ -39,7 +54,7 @@ properties:
properties:
bus-type:
- enum: [5, 6]
+ enum: [4, 5, 6]
default: 5
bus-width:
@@ -50,9 +65,6 @@ properties:
hsync-active: true
vsync-active: true
- required:
- - pclk-sample
-
required:
- compatible
- reg
@@ -61,6 +73,35 @@ required:
- resets
- port
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - st,stm32mp13-dcmipp
+ then:
+ properties:
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ maxItems: 1
+
+ port:
+ properties:
+ endpoint:
+ properties:
+ bus-type:
+ enum: [5, 6]
+ else:
+ properties:
+ clocks:
+ minItems: 2
+
+ clock-names:
+ minItems: 2
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/media/st,stm32mp25-csi.yaml b/Documentation/devicetree/bindings/media/st,stm32mp25-csi.yaml
new file mode 100644
index 000000000000..33bedfe41924
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/st,stm32mp25-csi.yaml
@@ -0,0 +1,125 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/st,stm32mp25-csi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 CSI controller
+
+description:
+ The STM32 CSI controller allows connecting a CSI based
+ camera to the DCMIPP camera pipeline.
+
+maintainers:
+ - Alain Volmat <alain.volmat@foss.st.com>
+
+properties:
+ compatible:
+ enum:
+ - st,stm32mp25-csi
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 3
+
+ clock-names:
+ items:
+ - const: pclk
+ - const: txesc
+ - const: csi2phy
+
+ resets:
+ maxItems: 1
+
+ vdd-supply:
+ description: Digital core power supply (0.91V)
+
+ vdda18-supply:
+ description: System analog power supply (1.8V)
+
+ access-controllers:
+ minItems: 1
+ maxItems: 2
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description:
+ Input port node
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes:
+ minItems: 1
+ items:
+ - const: 1
+ - const: 2
+
+ required:
+ - data-lanes
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Output port node
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - resets
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/st,stm32mp25-rcc.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/media/video-interfaces.h>
+ #include <dt-bindings/reset/st,stm32mp25-rcc.h>
+ csi@48020000 {
+ compatible = "st,stm32mp25-csi";
+ reg = <0x48020000 0x2000>;
+ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rcc CSI_R>;
+ clocks = <&rcc CK_KER_CSI>, <&rcc CK_KER_CSITXESC>, <&rcc CK_KER_CSIPHY>;
+ clock-names = "pclk", "txesc", "csi2phy";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ endpoint {
+ remote-endpoint = <&imx335_ep>;
+ data-lanes = <1 2>;
+ bus-type = <MEDIA_BUS_TYPE_CSI2_DPHY>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ endpoint {
+ remote-endpoint = <&dcmipp_0>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/media/video-interfaces.yaml b/Documentation/devicetree/bindings/media/video-interfaces.yaml
index 26e3e7d7c67b..038e85b45bef 100644
--- a/Documentation/devicetree/bindings/media/video-interfaces.yaml
+++ b/Documentation/devicetree/bindings/media/video-interfaces.yaml
@@ -210,6 +210,27 @@ properties:
lane-polarities property is omitted, the value must be interpreted as 0
(normal). This property is valid for serial busses only.
+ line-orders:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 8
+ items:
+ enum:
+ - 0 # ABC
+ - 1 # ACB
+ - 2 # BAC
+ - 3 # BCA
+ - 4 # CAB
+ - 5 # CBA
+ description:
+ An array of line orders of the CSI-2 C-PHY data lanes. The order of the
+ lanes are the same as in data-lanes property. Valid values are 0-5 as
+ defined in the MIPI Discovery and Configuration (DisCo) Specification for
+ Imaging. The length of the array must be the same length as the
+ data-lanes property. If the line-orders property is omitted, the value
+ shall be interpreted as 0 (ABC). This property is valid for CSI-2 C-PHY
+ busses only.
+
strobe:
$ref: /schemas/types.yaml#/definitions/uint32
enum: [ 0, 1 ]
diff --git a/Documentation/devicetree/bindings/memory-controllers/qca,ath79-ddr-controller.yaml b/Documentation/devicetree/bindings/memory-controllers/qca,ath79-ddr-controller.yaml
index 0c511ab906bf..8b937f90a1fb 100644
--- a/Documentation/devicetree/bindings/memory-controllers/qca,ath79-ddr-controller.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/qca,ath79-ddr-controller.yaml
@@ -52,10 +52,3 @@ examples:
#qca,ddr-wb-channel-cells = <1>;
};
-
- interrupt-controller {
- // ...
- qca,ddr-wb-channel-interrupts = <2>, <3>, <4>, <5>;
- qca,ddr-wb-channels = <&ddr_ctrl 3>, <&ddr_ctrl 2>,
- <&ddr_ctrl 0>, <&ddr_ctrl 1>;
- };
diff --git a/Documentation/devicetree/bindings/mfd/mediatek,mt6397.yaml b/Documentation/devicetree/bindings/mfd/mediatek,mt6397.yaml
index 86451f151a6a..6a89b479d10f 100644
--- a/Documentation/devicetree/bindings/mfd/mediatek,mt6397.yaml
+++ b/Documentation/devicetree/bindings/mfd/mediatek,mt6397.yaml
@@ -36,6 +36,7 @@ properties:
- enum:
- mediatek,mt6323
- mediatek,mt6331 # "mediatek,mt6331" for PMIC MT6331 and MT6332.
+ - mediatek,mt6328
- mediatek,mt6358
- mediatek,mt6359
- mediatek,mt6397
@@ -88,6 +89,7 @@ properties:
oneOf:
- enum:
- mediatek,mt6323-regulator
+ - mediatek,mt6328-regulator
- mediatek,mt6358-regulator
- mediatek,mt6359-regulator
- mediatek,mt6397-regulator
diff --git a/Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt b/Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
deleted file mode 100644
index d2eada5044b2..000000000000
--- a/Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-* Cadence NAND controller
-
-Required properties:
- - compatible : "cdns,hp-nfc"
- - reg : Contains two entries, each of which is a tuple consisting of a
- physical address and length. The first entry is the address and
- length of the controller register set. The second entry is the
- address and length of the Slave DMA data port.
- - reg-names: should contain "reg" and "sdma"
- - #address-cells: should be 1. The cell encodes the chip select connection.
- - #size-cells : should be 0.
- - interrupts : The interrupt number.
- - clocks: phandle of the controller core clock (nf_clk).
-
-Optional properties:
- - dmas: shall reference DMA channel associated to the NAND controller
- - cdns,board-delay-ps : Estimated Board delay. The value includes the total
- round trip delay for the signals and is used for deciding on values
- associated with data read capture. The example formula for SDR mode is
- the following:
- board delay = RE#PAD delay + PCB trace to device + PCB trace from device
- + DQ PAD delay
-
-Child nodes represent the available NAND chips.
-
-Required properties of NAND chips:
- - reg: shall contain the native Chip Select ids from 0 to max supported by
- the cadence nand flash controller
-
-See Documentation/devicetree/bindings/mtd/nand-controller.yaml for more details on
-generic bindings.
-
-Example:
-
-nand_controller: nand-controller@60000000 {
- compatible = "cdns,hp-nfc";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x60000000 0x10000>, <0x80000000 0x10000>;
- reg-names = "reg", "sdma";
- clocks = <&nf_clk>;
- cdns,board-delay-ps = <4830>;
- interrupts = <2 0>;
- nand@0 {
- reg = <0>;
- label = "nand-1";
- };
- nand@1 {
- reg = <1>;
- label = "nand-2";
- };
-
-};
diff --git a/Documentation/devicetree/bindings/mtd/cdns,hp-nfc.yaml b/Documentation/devicetree/bindings/mtd/cdns,hp-nfc.yaml
new file mode 100644
index 000000000000..0bed37a994c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/cdns,hp-nfc.yaml
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/cdns,hp-nfc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cadence NAND controller
+
+maintainers:
+ - Niravkumar L Rabara <niravkumar.l.rabara@intel.com>
+
+allOf:
+ - $ref: nand-controller.yaml
+
+properties:
+ compatible:
+ items:
+ - const: cdns,hp-nfc
+
+ reg:
+ items:
+ - description: Controller register set
+ - description: Slave DMA data port register set
+
+ reg-names:
+ items:
+ - const: reg
+ - const: sdma
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ dmas:
+ maxItems: 1
+
+ cdns,board-delay-ps:
+ description: |
+ Estimated Board delay. The value includes the total round trip
+ delay for the signals and is used for deciding on values associated
+ with data read capture. The example formula for SDR mode is the
+ following.
+ board delay = RE#PAD delay + PCB trace to device + PCB trace from device
+ + DQ PAD delay
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - clocks
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ nand-controller@10b80000 {
+ compatible = "cdns,hp-nfc";
+ reg = <0x10b80000 0x10000>,
+ <0x10840000 0x10000>;
+ reg-names = "reg", "sdma";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&nf_clk>;
+ cdns,board-delay-ps = <4830>;
+
+ nand@0 {
+ reg = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mtd/davinci-nand.txt b/Documentation/devicetree/bindings/mtd/davinci-nand.txt
deleted file mode 100644
index eb8e2ff4dbd2..000000000000
--- a/Documentation/devicetree/bindings/mtd/davinci-nand.txt
+++ /dev/null
@@ -1,94 +0,0 @@
-Device tree bindings for Texas instruments Davinci/Keystone NAND controller
-
-This file provides information, what the device node for the davinci/keystone
-NAND interface contains.
-
-Documentation:
-Davinci DM646x - https://www.ti.com/lit/ug/sprueq7c/sprueq7c.pdf
-Kestone - https://www.ti.com/lit/ug/sprugz3a/sprugz3a.pdf
-
-Required properties:
-
-- compatible: "ti,davinci-nand"
- "ti,keystone-nand"
-
-- reg: Contains 2 offset/length values:
- - offset and length for the access window.
- - offset and length for accessing the AEMIF
- control registers.
-
-- ti,davinci-chipselect: number of chipselect. Indicates on the
- davinci_nand driver which chipselect is used
- for accessing the nand.
- Can be in the range [0-3].
-
-Recommended properties :
-
-- ti,davinci-mask-ale: mask for ALE. Needed for executing address
- phase. These offset will be added to the base
- address for the chip select space the NAND Flash
- device is connected to.
- If not set equal to 0x08.
-
-- ti,davinci-mask-cle: mask for CLE. Needed for executing command
- phase. These offset will be added to the base
- address for the chip select space the NAND Flash
- device is connected to.
- If not set equal to 0x10.
-
-- ti,davinci-mask-chipsel: mask for chipselect address. Needed to mask
- addresses for given chipselect.
-
-- nand-ecc-mode: operation mode of the NAND ecc mode. ECC mode
- valid values for davinci driver:
- - "none"
- - "soft"
- - "hw"
-
-- ti,davinci-ecc-bits: used ECC bits, currently supported 1 or 4.
-
-- nand-bus-width: buswidth 8 or 16. If not present 8.
-
-- nand-on-flash-bbt: use flash based bad block table support. OOB
- identifier is saved in OOB area. If not present
- false.
-
-Deprecated properties:
-
-- ti,davinci-ecc-mode: operation mode of the NAND ecc mode. ECC mode
- valid values for davinci driver:
- - "none"
- - "soft"
- - "hw"
-
-- ti,davinci-nand-buswidth: buswidth 8 or 16. If not present 8.
-
-- ti,davinci-nand-use-bbt: use flash based bad block table support. OOB
- identifier is saved in OOB area. If not present
- false.
-
-Nand device bindings may contain additional sub-nodes describing partitions of
-the address space. See mtd.yaml for more detail. The NAND Flash timing
-values must be programmed in the chip select’s node of AEMIF
-memory-controller (see Documentation/devicetree/bindings/memory-controllers/
-davinci-aemif.txt).
-
-Example(da850 EVM ):
-
-nand_cs3@62000000 {
- compatible = "ti,davinci-nand";
- reg = <0x62000000 0x807ff
- 0x68000000 0x8000>;
- ti,davinci-chipselect = <1>;
- ti,davinci-mask-ale = <0>;
- ti,davinci-mask-cle = <0>;
- ti,davinci-mask-chipsel = <0>;
- nand-ecc-mode = "hw";
- ti,davinci-ecc-bits = <4>;
- nand-on-flash-bbt;
-
- partition@180000 {
- label = "ubifs";
- reg = <0x180000 0x7e80000>;
- };
-};
diff --git a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml
index 6e3afb42926e..335f8204aa1e 100644
--- a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml
+++ b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml
@@ -96,6 +96,10 @@ properties:
If "broken-flash-reset" is present then having this property does not
make any difference.
+ vcc-supply:
+ description:
+ Supply for the SPI NOR power.
+
spi-cpol: true
spi-cpha: true
diff --git a/Documentation/devicetree/bindings/mtd/microchip,mchp48l640.yaml b/Documentation/devicetree/bindings/mtd/microchip,mchp48l640.yaml
index 0ff32bd00bf6..5c6b628c608d 100644
--- a/Documentation/devicetree/bindings/mtd/microchip,mchp48l640.yaml
+++ b/Documentation/devicetree/bindings/mtd/microchip,mchp48l640.yaml
@@ -16,8 +16,9 @@ description: |
properties:
compatible:
- items:
- - const: microchip,48l640
+ enum:
+ - fujitsu,mb85rs128ty
+ - microchip,48l640
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/mtd/nuvoton,ma35d1-nand.yaml b/Documentation/devicetree/bindings/mtd/nuvoton,ma35d1-nand.yaml
new file mode 100644
index 000000000000..0b651450a8f1
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/nuvoton,ma35d1-nand.yaml
@@ -0,0 +1,95 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/nuvoton,ma35d1-nand.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Nuvoton MA35D1 NAND Flash Interface (NFI) Controller
+
+maintainers:
+ - Hui-Ping Chen <hpchen0nvt@gmail.com>
+
+allOf:
+ - $ref: nand-controller.yaml#
+
+properties:
+ compatible:
+ enum:
+ - nuvoton,ma35d1-nand-controller
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+patternProperties:
+ "^nand@[a-f0-9]$":
+ type: object
+ $ref: raw-nand-chip.yaml
+ properties:
+ reg:
+ minimum: 0
+ maximum: 1
+
+ nand-ecc-step-size:
+ enum: [512, 1024]
+
+ nand-ecc-strength:
+ enum: [8, 12, 24]
+
+ required:
+ - reg
+
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/nuvoton,ma35d1-clk.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ nand-controller@401A0000 {
+ compatible = "nuvoton,ma35d1-nand-controller";
+ reg = <0x0 0x401A0000 0x0 0x1000>;
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk NAND_GATE>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ nand@0 {
+ reg = <0>;
+ nand-on-flash-bbt;
+ nand-ecc-step-size = <512>;
+ nand-ecc-strength = <8>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ uboot@0 {
+ label = "nand-uboot";
+ read-only;
+ reg = <0x0 0x300000>;
+ };
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/mtd/ti,davinci-nand.yaml b/Documentation/devicetree/bindings/mtd/ti,davinci-nand.yaml
new file mode 100644
index 000000000000..ed24b0ea86e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/ti,davinci-nand.yaml
@@ -0,0 +1,124 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/ti,davinci-nand.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI DaVinci NAND controller
+
+maintainers:
+ - Marcus Folkesson <marcus.folkesson@gmail.com>
+
+allOf:
+ - $ref: nand-controller.yaml
+
+properties:
+ compatible:
+ enum:
+ - ti,davinci-nand
+ - ti,keystone-nand
+
+ reg:
+ items:
+ - description: Access window.
+ - description: AEMIF control registers.
+
+ partitions:
+ $ref: /schemas/mtd/partitions/partitions.yaml
+
+ ti,davinci-chipselect:
+ description:
+ Number of chipselect. Indicate on the davinci_nand driver which
+ chipselect is used for accessing the nand.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
+
+ ti,davinci-mask-ale:
+ description:
+ Mask for ALE. Needed for executing address phase. These offset will be
+ added to the base address for the chip select space the NAND Flash
+ device is connected to.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 0x08
+
+ ti,davinci-mask-cle:
+ description:
+ Mask for CLE. Needed for executing command phase. These offset will be
+ added to the base address for the chip select space the NAND Flash device
+ is connected to.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 0x10
+
+ ti,davinci-mask-chipsel:
+ description:
+ Mask for chipselect address. Needed to mask addresses for given
+ chipselect.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 0
+
+ ti,davinci-ecc-bits:
+ description: Used ECC bits.
+ enum: [1, 4]
+
+ ti,davinci-ecc-mode:
+ description: Operation mode of the NAND ECC mode.
+ $ref: /schemas/types.yaml#/definitions/string
+ enum: [none, soft, hw, on-die]
+ deprecated: true
+
+ ti,davinci-nand-buswidth:
+ description: Bus width to the NAND chip.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [8, 16]
+ default: 8
+ deprecated: true
+
+ ti,davinci-nand-use-bbt:
+ type: boolean
+ description:
+ Use flash based bad block table support. OOB identifier is saved in OOB
+ area.
+ deprecated: true
+
+required:
+ - compatible
+ - reg
+ - ti,davinci-chipselect
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ bus {
+ #address-cells = <2>;
+ #size-cells = <1>;
+
+ nand-controller@2000000,0 {
+ compatible = "ti,davinci-nand";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0 0x02000000 0x02000000>,
+ <1 0x00000000 0x00008000>;
+
+ ti,davinci-chipselect = <1>;
+ ti,davinci-mask-ale = <0>;
+ ti,davinci-mask-cle = <0>;
+ ti,davinci-mask-chipsel = <0>;
+
+ ti,davinci-nand-buswidth = <16>;
+ ti,davinci-ecc-mode = "hw";
+ ti,davinci-ecc-bits = <4>;
+ ti,davinci-nand-use-bbt;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "u-boot env";
+ reg = <0 0x020000>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/qcom,ethqos.yaml b/Documentation/devicetree/bindings/net/qcom,ethqos.yaml
index f117471fb06f..e7ee0d9efed8 100644
--- a/Documentation/devicetree/bindings/net/qcom,ethqos.yaml
+++ b/Documentation/devicetree/bindings/net/qcom,ethqos.yaml
@@ -22,12 +22,12 @@ properties:
oneOf:
- items:
- enum:
- - qcom,qcs8300-ethqos
- - const: qcom,sa8775p-ethqos
+ - qcom,qcs615-ethqos
+ - const: qcom,qcs404-ethqos
- items:
- enum:
- - qcom,qcs615-ethqos
- - const: qcom,sm8150-ethqos
+ - qcom,qcs8300-ethqos
+ - const: qcom,sa8775p-ethqos
- enum:
- qcom,qcs404-ethqos
- qcom,sa8775p-ethqos
diff --git a/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml b/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml
index 80845c722ae4..d37f544ab8aa 100644
--- a/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml
+++ b/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml
@@ -20,18 +20,22 @@ properties:
- qcom,apq8064-qfprom
- qcom,apq8084-qfprom
- qcom,ipq5332-qfprom
+ - qcom,ipq5424-qfprom
- qcom,ipq6018-qfprom
- qcom,ipq8064-qfprom
- qcom,ipq8074-qfprom
- qcom,ipq9574-qfprom
- qcom,msm8226-qfprom
- qcom,msm8916-qfprom
+ - qcom,msm8917-qfprom
- qcom,msm8974-qfprom
- qcom,msm8976-qfprom
- qcom,msm8996-qfprom
- qcom,msm8998-qfprom
- qcom,qcm2290-qfprom
- qcom,qcs404-qfprom
+ - qcom,qcs615-qfprom
+ - qcom,qcs8300-qfprom
- qcom,sc7180-qfprom
- qcom,sc7280-qfprom
- qcom,sc8280xp-qfprom
diff --git a/Documentation/devicetree/bindings/nvmem/rmem.yaml b/Documentation/devicetree/bindings/nvmem/rmem.yaml
index 1ec0d09bcafa..85f9f5de3906 100644
--- a/Documentation/devicetree/bindings/nvmem/rmem.yaml
+++ b/Documentation/devicetree/bindings/nvmem/rmem.yaml
@@ -16,6 +16,7 @@ properties:
compatible:
items:
- enum:
+ - mobileye,eyeq5-bootloader-config
- raspberrypi,bootloader-config
- raspberrypi,bootloader-public-key
- const: nvmem-rmem
diff --git a/Documentation/devicetree/bindings/opp/allwinner,sun50i-h6-operating-points.yaml b/Documentation/devicetree/bindings/opp/allwinner,sun50i-h6-operating-points.yaml
index ec5e424bb3c8..75ab552f6ecd 100644
--- a/Documentation/devicetree/bindings/opp/allwinner,sun50i-h6-operating-points.yaml
+++ b/Documentation/devicetree/bindings/opp/allwinner,sun50i-h6-operating-points.yaml
@@ -22,6 +22,7 @@ allOf:
properties:
compatible:
enum:
+ - allwinner,sun50i-a100-operating-points
- allwinner,sun50i-h6-operating-points
- allwinner,sun50i-h616-operating-points
diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-common.yaml b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-common.yaml
index a8b34f58f8f4..cddbe21f99f2 100644
--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-common.yaml
+++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-common.yaml
@@ -17,11 +17,11 @@ description:
properties:
clocks:
minItems: 3
- maxItems: 4
+ maxItems: 5
clock-names:
minItems: 3
- maxItems: 4
+ maxItems: 5
num-lanes:
const: 1
diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-ep.yaml
index 84ca12e8b25b..0b3526de1d62 100644
--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-ep.yaml
@@ -22,6 +22,7 @@ properties:
- fsl,imx8mm-pcie-ep
- fsl,imx8mq-pcie-ep
- fsl,imx8mp-pcie-ep
+ - fsl,imx8q-pcie-ep
- fsl,imx95-pcie-ep
clocks:
@@ -78,6 +79,20 @@ allOf:
properties:
compatible:
enum:
+ - fsl,imx8q-pcie-ep
+ then:
+ properties:
+ reg:
+ maxItems: 2
+ reg-names:
+ items:
+ - const: dbi
+ - const: addr_space
+
+ - if:
+ properties:
+ compatible:
+ enum:
- fsl,imx95-pcie-ep
then:
properties:
@@ -103,13 +118,21 @@ allOf:
properties:
clocks:
minItems: 4
+ maxItems: 4
clock-names:
items:
- const: pcie
- const: pcie_bus
- const: pcie_phy
- const: pcie_aux
- else:
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - fsl,imx8mm-pcie-ep
+ - fsl,imx8mp-pcie-ep
+ then:
properties:
clocks:
maxItems: 3
@@ -119,6 +142,20 @@ allOf:
- const: pcie_bus
- const: pcie_aux
+ - if:
+ properties:
+ compatible:
+ enum:
+ - fsl,imxq-pcie-ep
+ then:
+ properties:
+ clocks:
+ maxItems: 3
+ clock-names:
+ items:
+ - const: dbi
+ - const: mstr
+ - const: slv
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
index 1e05c560d797..4c76cd3f98a9 100644
--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
@@ -40,10 +40,11 @@ properties:
- description: PCIe PHY clock.
- description: Additional required clock entry for imx6sx-pcie,
imx6sx-pcie-ep, imx8mq-pcie, imx8mq-pcie-ep.
+ - description: PCIe reference clock.
clock-names:
minItems: 3
- maxItems: 4
+ maxItems: 5
interrupts:
items:
@@ -127,7 +128,7 @@ allOf:
then:
properties:
clocks:
- minItems: 4
+ maxItems: 4
clock-names:
items:
- const: pcie
@@ -140,11 +141,10 @@ allOf:
compatible:
enum:
- fsl,imx8mq-pcie
- - fsl,imx95-pcie
then:
properties:
clocks:
- minItems: 4
+ maxItems: 4
clock-names:
items:
- const: pcie
@@ -200,6 +200,23 @@ allOf:
- const: mstr
- const: slv
+ - if:
+ properties:
+ compatible:
+ enum:
+ - fsl,imx95-pcie
+ then:
+ properties:
+ clocks:
+ maxItems: 5
+ clock-names:
+ items:
+ - const: pcie
+ - const: pcie_bus
+ - const: pcie_phy
+ - const: pcie_aux
+ - const: ref
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/pci/layerscape-pcie-gen4.txt b/Documentation/devicetree/bindings/pci/layerscape-pcie-gen4.txt
deleted file mode 100644
index b40fb5d15d3d..000000000000
--- a/Documentation/devicetree/bindings/pci/layerscape-pcie-gen4.txt
+++ /dev/null
@@ -1,52 +0,0 @@
-NXP Layerscape PCIe Gen4 controller
-
-This PCIe controller is based on the Mobiveil PCIe IP and thus inherits all
-the common properties defined in mobiveil-pcie.txt.
-
-Required properties:
-- compatible: should contain the platform identifier such as:
- "fsl,lx2160a-pcie"
-- reg: base addresses and lengths of the PCIe controller register blocks.
- "csr_axi_slave": Bridge config registers
- "config_axi_slave": PCIe controller registers
-- interrupts: A list of interrupt outputs of the controller. Must contain an
- entry for each entry in the interrupt-names property.
-- interrupt-names: It could include the following entries:
- "intr": The interrupt that is asserted for controller interrupts
- "aer": Asserted for aer interrupt when chip support the aer interrupt with
- none MSI/MSI-X/INTx mode,but there is interrupt line for aer.
- "pme": Asserted for pme interrupt when chip support the pme interrupt with
- none MSI/MSI-X/INTx mode,but there is interrupt line for pme.
-- dma-coherent: Indicates that the hardware IP block can ensure the coherency
- of the data transferred from/to the IP block. This can avoid the software
- cache flush/invalid actions, and improve the performance significantly.
-- msi-parent : See the generic MSI binding described in
- Documentation/devicetree/bindings/interrupt-controller/msi.txt.
-
-Example:
-
- pcie@3400000 {
- compatible = "fsl,lx2160a-pcie";
- reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
- 0x80 0x00000000 0x0 0x00001000>; /* configuration space */
- reg-names = "csr_axi_slave", "config_axi_slave";
- interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
- <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
- <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
- interrupt-names = "aer", "pme", "intr";
- #address-cells = <3>;
- #size-cells = <2>;
- device_type = "pci";
- apio-wins = <8>;
- ppio-wins = <8>;
- dma-coherent;
- bus-range = <0x0 0xff>;
- msi-parent = <&its>;
- ranges = <0x82000000 0x0 0x40000000 0x80 0x40000000 0x0 0x40000000>;
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
- <0000 0 0 2 &gic 0 0 GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
- <0000 0 0 3 &gic 0 0 GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
- <0000 0 0 4 &gic 0 0 GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
- };
diff --git a/Documentation/devicetree/bindings/pci/mbvl,gpex40-pcie.yaml b/Documentation/devicetree/bindings/pci/mbvl,gpex40-pcie.yaml
new file mode 100644
index 000000000000..d286b77921e0
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/mbvl,gpex40-pcie.yaml
@@ -0,0 +1,173 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/mbvl,gpex40-pcie.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mobiveil AXI PCIe Host Bridge
+
+maintainers:
+ - Frank Li <Frank Li@nxp.com>
+
+description:
+ Mobiveil's GPEX 4.0 is a PCIe Gen4 host bridge IP. This configurable IP
+ has up to 8 outbound and inbound windows for address translation.
+
+ NXP Layerscape PCIe Gen4 controller (Deprecated) base on Mobiveil's GPEX 4.0.
+
+properties:
+ compatible:
+ enum:
+ - fsl,lx2160a-pcie
+ - mbvl,gpex40-pcie
+
+ reg:
+ items:
+ - description: PCIe controller registers
+ - description: Bridge config registers
+ - description: GPIO registers to control slot power
+ - description: MSI registers
+ minItems: 2
+
+ reg-names:
+ items:
+ - const: csr_axi_slave
+ - const: config_axi_slave
+ - const: gpio_slave
+ - const: apb_csr
+ minItems: 2
+
+ apio-wins:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ number of requested APIO outbound windows
+ 1. Config window
+ 2. Memory window
+ default: 2
+ maximum: 256
+
+ ppio-wins:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: number of requested PPIO inbound windows
+ default: 1
+ maximum: 256
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 1
+
+ interrupts:
+ minItems: 1
+ maxItems: 3
+
+ interrupt-names:
+ minItems: 1
+ maxItems: 3
+
+ dma-coherent: true
+
+ msi-parent: true
+
+required:
+ - compatible
+ - reg
+ - reg-names
+
+allOf:
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
+ - if:
+ properties:
+ compatible:
+ enum:
+ - fsl,lx2160a-pcie
+ then:
+ properties:
+ reg:
+ maxItems: 2
+
+ reg-names:
+ maxItems: 2
+
+ interrupts:
+ minItems: 3
+
+ interrupt-names:
+ items:
+ - const: aer
+ - const: pme
+ - const: intr
+ else:
+ properties:
+ dma-coherent: false
+ msi-parent: false
+ interrupts:
+ maxItems: 1
+ interrupt-names: false
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ pcie@b0000000 {
+ compatible = "mbvl,gpex40-pcie";
+ reg = <0xb0000000 0x00010000>,
+ <0xa0000000 0x00001000>,
+ <0xff000000 0x00200000>,
+ <0xb0010000 0x00001000>;
+ reg-names = "csr_axi_slave",
+ "config_axi_slave",
+ "gpio_slave",
+ "apb_csr";
+ ranges = <0x83000000 0 0x00000000 0xa8000000 0 0x8000000>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ apio-wins = <2>;
+ ppio-wins = <1>;
+ bus-range = <0x00 0xff>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 0 &pci_express 0>,
+ <0 0 0 1 &pci_express 1>,
+ <0 0 0 2 &pci_express 2>,
+ <0 0 0 3 &pci_express 3>;
+ };
+
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ pcie@3400000 {
+ compatible = "fsl,lx2160a-pcie";
+ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
+ 0x80 0x00000000 0x0 0x00001000>; /* configuration space */
+ reg-names = "csr_axi_slave", "config_axi_slave";
+ ranges = <0x82000000 0x0 0x40000000 0x80 0x40000000 0x0 0x40000000>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
+ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
+ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
+ interrupt-names = "aer", "pme", "intr";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ apio-wins = <8>;
+ ppio-wins = <8>;
+ dma-coherent;
+ bus-range = <0x00 0xff>;
+ msi-parent = <&its>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 2 &gic 0 0 GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 3 &gic 0 0 GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 4 &gic 0 0 GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml b/Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml
index 2e1547569702..103574d18dbc 100644
--- a/Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml
+++ b/Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml
@@ -50,6 +50,8 @@ properties:
items:
pattern: '^fic[0-3]$'
+ dma-coherent: true
+
ranges:
minItems: 1
maxItems: 3
diff --git a/Documentation/devicetree/bindings/pci/mobiveil-pcie.txt b/Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
deleted file mode 100644
index 64156993e052..000000000000
--- a/Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
+++ /dev/null
@@ -1,72 +0,0 @@
-* Mobiveil AXI PCIe Root Port Bridge DT description
-
-Mobiveil's GPEX 4.0 is a PCIe Gen4 root port bridge IP. This configurable IP
-has up to 8 outbound and inbound windows for the address translation.
-
-Required properties:
-- #address-cells: Address representation for root ports, set to <3>
-- #size-cells: Size representation for root ports, set to <2>
-- #interrupt-cells: specifies the number of cells needed to encode an
- interrupt source. The value must be 1.
-- compatible: Should contain "mbvl,gpex40-pcie"
-- reg: Should contain PCIe registers location and length
- Mandatory:
- "config_axi_slave": PCIe controller registers
- "csr_axi_slave" : Bridge config registers
- Optional:
- "gpio_slave" : GPIO registers to control slot power
- "apb_csr" : MSI registers
-
-- device_type: must be "pci"
-- apio-wins : number of requested apio outbound windows
- default 2 outbound windows are configured -
- 1. Config window
- 2. Memory window
-- ppio-wins : number of requested ppio inbound windows
- default 1 inbound memory window is configured.
-- bus-range: PCI bus numbers covered
-- interrupt-controller: identifies the node as an interrupt controller
-- #interrupt-cells: specifies the number of cells needed to encode an
- interrupt source. The value must be 1.
-- interrupts: The interrupt line of the PCIe controller
- last cell of this field is set to 4 to
- denote it as IRQ_TYPE_LEVEL_HIGH type interrupt.
-- interrupt-map-mask,
- interrupt-map: standard PCI properties to define the mapping of the
- PCI interface to interrupt numbers.
-- ranges: ranges for the PCI memory regions (I/O space region is not
- supported by hardware)
- Please refer to the standard PCI bus binding document for a more
- detailed explanation
-
-
-Example:
-++++++++
- pcie0: pcie@a0000000 {
- #address-cells = <3>;
- #size-cells = <2>;
- compatible = "mbvl,gpex40-pcie";
- reg = <0xa0000000 0x00001000>,
- <0xb0000000 0x00010000>,
- <0xff000000 0x00200000>,
- <0xb0010000 0x00001000>;
- reg-names = "config_axi_slave",
- "csr_axi_slave",
- "gpio_slave",
- "apb_csr";
- device_type = "pci";
- apio-wins = <2>;
- ppio-wins = <1>;
- bus-range = <0x00000000 0x000000ff>;
- interrupt-controller;
- interrupt-parent = <&gic>;
- #interrupt-cells = <1>;
- interrupts = < 0 89 4 >;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0 0 0 0 &pci_express 0>,
- <0 0 0 1 &pci_express 1>,
- <0 0 0 2 &pci_express 2>,
- <0 0 0 3 &pci_express 3>;
- ranges = < 0x83000000 0 0x00000000 0xa8000000 0 0x8000000>;
-
- };
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8550.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8550.yaml
index 2b5498a35dcc..dbce671ba011 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8550.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8550.yaml
@@ -57,9 +57,10 @@ properties:
interrupts:
minItems: 8
- maxItems: 8
+ maxItems: 9
interrupt-names:
+ minItems: 8
items:
- const: msi0
- const: msi1
@@ -69,6 +70,7 @@ properties:
- const: msi5
- const: msi6
- const: msi7
+ - const: global
resets:
minItems: 1
@@ -139,9 +141,10 @@ examples:
<GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi0", "msi1", "msi2", "msi3",
- "msi4", "msi5", "msi6", "msi7";
+ "msi4", "msi5", "msi6", "msi7", "global";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
interrupt-map = <0 0 0 1 &intc 0 0 0 149 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie.yaml
index bd87f6b49d68..7235d6554cfb 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie.yaml
@@ -32,6 +32,10 @@ properties:
- qcom,pcie-sdm845
- qcom,pcie-sdx55
- items:
+ - enum:
+ - qcom,pcie-ipq5424
+ - const: qcom,pcie-ipq9574
+ - items:
- const: qcom,pcie-msm8998
- const: qcom,pcie-msm8996
diff --git a/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml b/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml
index 989fb0fa2577..b63a759ec2d7 100644
--- a/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml
+++ b/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml
@@ -17,6 +17,7 @@ properties:
enum:
- xlnx,versal-cpm-host-1.00
- xlnx,versal-cpm5-host
+ - xlnx,versal-cpm5-host1
reg:
items:
diff --git a/Documentation/devicetree/bindings/phy/phy-rockchip-naneng-combphy.yaml b/Documentation/devicetree/bindings/phy/phy-rockchip-naneng-combphy.yaml
index d3cd7997879f..1b3de6678c08 100644
--- a/Documentation/devicetree/bindings/phy/phy-rockchip-naneng-combphy.yaml
+++ b/Documentation/devicetree/bindings/phy/phy-rockchip-naneng-combphy.yaml
@@ -13,6 +13,7 @@ properties:
compatible:
enum:
- rockchip,rk3568-naneng-combphy
+ - rockchip,rk3576-naneng-combphy
- rockchip,rk3588-naneng-combphy
reg:
diff --git a/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml
index 58ce2d91d28c..f60804687412 100644
--- a/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml
@@ -15,12 +15,21 @@ description:
properties:
compatible:
- enum:
- - qcom,ipq6018-qmp-pcie-phy
- - qcom,ipq8074-qmp-gen3-pcie-phy
- - qcom,ipq8074-qmp-pcie-phy
- - qcom,ipq9574-qmp-gen3x1-pcie-phy
- - qcom,ipq9574-qmp-gen3x2-pcie-phy
+ oneOf:
+ - enum:
+ - qcom,ipq6018-qmp-pcie-phy
+ - qcom,ipq8074-qmp-gen3-pcie-phy
+ - qcom,ipq8074-qmp-pcie-phy
+ - qcom,ipq9574-qmp-gen3x1-pcie-phy
+ - qcom,ipq9574-qmp-gen3x2-pcie-phy
+ - items:
+ - enum:
+ - qcom,ipq5424-qmp-gen3x1-pcie-phy
+ - const: qcom,ipq9574-qmp-gen3x1-pcie-phy
+ - items:
+ - enum:
+ - qcom,ipq5424-qmp-gen3x2-pcie-phy
+ - const: qcom,ipq9574-qmp-gen3x2-pcie-phy
reg:
items:
diff --git a/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml
index 4aed4b5d65ec..39851ba9de43 100644
--- a/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml
@@ -18,6 +18,7 @@ properties:
oneOf:
- items:
- enum:
+ - qcom,ipq5424-qusb2-phy
- qcom,ipq6018-qusb2-phy
- qcom,ipq8074-qusb2-phy
- qcom,ipq9574-qusb2-phy
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
index 13fdf5f1beba..89391649e0b5 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
@@ -16,8 +16,10 @@ description:
properties:
compatible:
enum:
+ - qcom,qcs615-qmp-gen3x1-pcie-phy
- qcom,sa8775p-qmp-gen4x2-pcie-phy
- qcom,sa8775p-qmp-gen4x4-pcie-phy
+ - qcom,sar2130p-qmp-gen3x2-pcie-phy
- qcom,sc8180x-qmp-pcie-phy
- qcom,sc8280xp-qmp-gen3x1-pcie-phy
- qcom,sc8280xp-qmp-gen3x2-pcie-phy
@@ -32,6 +34,7 @@ properties:
- qcom,sm8250-qmp-gen3x2-pcie-phy
- qcom,sm8250-qmp-modem-pcie-phy
- qcom,sm8350-qmp-gen3x1-pcie-phy
+ - qcom,sm8350-qmp-gen3x2-pcie-phy
- qcom,sm8450-qmp-gen3x1-pcie-phy
- qcom,sm8450-qmp-gen4x2-pcie-phy
- qcom,sm8550-qmp-gen3x2-pcie-phy
@@ -139,6 +142,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,sar2130p-qmp-gen3x2-pcie-phy
- qcom,sc8180x-qmp-pcie-phy
- qcom,sdm845-qhp-pcie-phy
- qcom,sdm845-qmp-pcie-phy
@@ -149,6 +153,7 @@ allOf:
- qcom,sm8250-qmp-gen3x2-pcie-phy
- qcom,sm8250-qmp-modem-pcie-phy
- qcom,sm8350-qmp-gen3x1-pcie-phy
+ - qcom,sm8350-qmp-gen3x2-pcie-phy
- qcom,sm8450-qmp-gen3x1-pcie-phy
- qcom,sm8450-qmp-gen3x2-pcie-phy
- qcom,sm8550-qmp-gen3x2-pcie-phy
@@ -167,6 +172,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,qcs615-qmp-gen3x1-pcie-phy
- qcom,sc8280xp-qmp-gen3x1-pcie-phy
- qcom,sc8280xp-qmp-gen3x2-pcie-phy
- qcom,sc8280xp-qmp-gen3x4-pcie-phy
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml
index baf5134ea3d8..a1b55168e050 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml
@@ -16,6 +16,7 @@ description:
properties:
compatible:
enum:
+ - qcom,ipq5424-qmp-usb3-phy
- qcom,ipq6018-qmp-usb3-phy
- qcom,ipq8074-qmp-usb3-phy
- qcom,ipq9574-qmp-usb3-phy
@@ -89,6 +90,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,ipq5424-qmp-usb3-phy
- qcom,ipq6018-qmp-usb3-phy
- qcom,ipq8074-qmp-usb3-phy
- qcom,ipq9574-qmp-usb3-phy
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml
index 2d0d7e9e6431..358a6736a951 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml
@@ -16,6 +16,7 @@ description:
properties:
compatible:
enum:
+ - qcom,sar2130p-qmp-usb3-dp-phy
- qcom,sc7180-qmp-usb3-dp-phy
- qcom,sc7280-qmp-usb3-dp-phy
- qcom,sc8180x-qmp-usb3-dp-phy
@@ -127,6 +128,7 @@ allOf:
properties:
compatible:
enum:
+ - qcom,sar2130p-qmp-usb3-dp-phy
- qcom,sc8280xp-qmp-usb43dp-phy
- qcom,sm6350-qmp-usb3-dp-phy
- qcom,sm8550-qmp-usb3-dp-phy
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml
index 450240570314..990b78765427 100644
--- a/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml
@@ -44,6 +44,7 @@ properties:
- allwinner,sun8i-r40-pinctrl
- allwinner,sun8i-v3-pinctrl
- allwinner,sun8i-v3s-pinctrl
+ - allwinner,sun8i-v853-pinctrl
- allwinner,sun9i-a80-pinctrl
- allwinner,sun9i-a80-r-pinctrl
- allwinner,sun20i-d1-pinctrl
@@ -183,6 +184,18 @@ allOf:
properties:
compatible:
enum:
+ - allwinner,sun8i-v853-pinctrl
+
+ then:
+ properties:
+ interrupts:
+ minItems: 8
+ maxItems: 8
+
+ - if:
+ properties:
+ compatible:
+ enum:
- allwinner,sun20i-d1-pinctrl
then:
diff --git a/Documentation/devicetree/bindings/pinctrl/atmel,at91rm9200-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/atmel,at91rm9200-pinctrl.yaml
index 1bb386b42039..a7ede29c1444 100644
--- a/Documentation/devicetree/bindings/pinctrl/atmel,at91rm9200-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/atmel,at91rm9200-pinctrl.yaml
@@ -145,40 +145,40 @@ additionalProperties:
examples:
- |
- #include <dt-bindings/clock/at91.h>
- #include <dt-bindings/interrupt-controller/irq.h>
- #include <dt-bindings/pinctrl/at91.h>
-
- pinctrl@fffff400 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "atmel,at91rm9200-pinctrl", "simple-mfd";
- ranges = <0xfffff400 0xfffff400 0x600>;
-
- atmel,mux-mask = <
- /* A B */
- 0xffffffff 0xffc00c3b /* pioA */
- 0xffffffff 0x7fff3ccf /* pioB */
- 0xffffffff 0x007fffff /* pioC */
- >;
-
- dbgu {
- pinctrl_dbgu: dbgu-0 {
- atmel,pins =
- <AT91_PIOB 14 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
- AT91_PIOB 15 AT91_PERIPH_A AT91_PINCTRL_NONE>;
- };
- };
-
- pioA: gpio@fffff400 {
- compatible = "atmel,at91rm9200-gpio";
- reg = <0xfffff400 0x200>;
- interrupts = <2 IRQ_TYPE_LEVEL_HIGH 1>;
- #gpio-cells = <2>;
- gpio-controller;
- interrupt-controller;
- #interrupt-cells = <2>;
- clocks = <&pmc PMC_TYPE_PERIPHERAL 2>;
- };
- };
+ #include <dt-bindings/clock/at91.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/pinctrl/at91.h>
+
+ pinctrl@fffff400 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "atmel,at91rm9200-pinctrl", "simple-mfd";
+ ranges = <0xfffff400 0xfffff400 0x600>;
+
+ atmel,mux-mask = <
+ /* A B */
+ 0xffffffff 0xffc00c3b /* pioA */
+ 0xffffffff 0x7fff3ccf /* pioB */
+ 0xffffffff 0x007fffff /* pioC */
+ >;
+
+ dbgu {
+ dbgu-0 {
+ atmel,pins =
+ <AT91_PIOB 14 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
+ AT91_PIOB 15 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+ };
+ };
+
+ gpio@fffff400 {
+ compatible = "atmel,at91rm9200-gpio";
+ reg = <0xfffff400 0x200>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH 1>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 2>;
+ };
+ };
...
diff --git a/Documentation/devicetree/bindings/pinctrl/mediatek,mt7988-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mediatek,mt7988-pinctrl.yaml
new file mode 100644
index 000000000000..26dfe7e7735a
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/mediatek,mt7988-pinctrl.yaml
@@ -0,0 +1,575 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/mediatek,mt7988-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek MT7988 Pin Controller
+
+maintainers:
+ - Sean Wang <sean.wang@kernel.org>
+
+description:
+ The MediaTek's MT7988 Pin controller is used to control SoC pins.
+
+properties:
+ compatible:
+ enum:
+ - mediatek,mt7988-pinctrl
+
+ reg:
+ minItems: 7
+ maxItems: 7
+
+ reg-names:
+ items:
+ - const: gpio
+ - const: iocfg_tr
+ - const: iocfg_br
+ - const: iocfg_rb
+ - const: iocfg_lb
+ - const: iocfg_tl
+ - const: eint
+
+ gpio-controller: true
+
+ "#gpio-cells":
+ const: 2
+
+ gpio-ranges:
+ minItems: 1
+ maxItems: 5
+ description:
+ GPIO valid number range.
+
+ interrupt-controller: true
+
+ interrupts:
+ maxItems: 1
+
+ "#interrupt-cells":
+ const: 2
+
+allOf:
+ - $ref: pinctrl.yaml#
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - gpio-controller
+ - "#gpio-cells"
+
+patternProperties:
+ '-pins$':
+ type: object
+ additionalProperties: false
+
+ properties:
+ mux:
+ type: object
+ additionalProperties: false
+ $ref: /schemas/pinctrl/pinmux-node.yaml
+ description: |
+ pinmux configuration nodes.
+
+ The following table shows the effective values of "group", "function"
+ properties and chip pinout pins
+
+ groups function pins (in pin#)
+ ---------------------------------------------------------------------
+ "tops_jtag0_0" "jtag" 0, 1, 2, 3, 4
+ "wo0_jtag" "jtag" 50, 51, 52, 53, 54
+ "wo1_jtag" "jtag" 50, 51, 52, 53, 54
+ "wo2_jtag" "jtag" 50, 51, 52, 53, 54
+ "jtag" "jtag" 58, 59, 60, 61, 62
+ "tops_jtag0_1" "jtag" 58, 59, 60, 61, 62
+ "int_usxgmii" "int_usxgmii" 2, 3
+ "pwm0" "pwm" 57
+ "pwm1" "pwm" 21
+ "pwm2" "pwm" 80
+ "pwm2_0" "pwm" 58
+ "pwm3" "pwm" 81
+ "pwm3_0" "pwm" 59
+ "pwm4" "pwm" 82
+ "pwm4_0" "pwm" 60
+ "pwm5" "pwm" 83
+ "pwm5_0" "pwm" 61
+ "pwm6" "pwm" 69
+ "pwm6_0" "pwm" 62
+ "pwm7" "pwm" 70
+ "pwm7_0" "pwm" 4
+ "dfd" "dfd" 0, 1, 2, 3, 4
+ "xfi_phy0_i2c0" "i2c" 0, 1
+ "xfi_phy1_i2c0" "i2c" 0, 1
+ "xfi_phy_pll_i2c0" "i2c" 3, 4
+ "xfi_phy_pll_i2c1" "i2c" 3, 4
+ "i2c0_0" "i2c" 5, 6
+ "i2c1_sfp" "i2c" 5, 6
+ "xfi_pextp_phy0_i2c" "i2c" 5, 6
+ "xfi_pextp_phy1_i2c" "i2c" 5, 6
+ "i2c0_1" "i2c" 15, 16
+ "u30_phy_i2c0" "i2c" 15, 16
+ "u32_phy_i2c0" "i2c" 15, 16
+ "xfi_phy0_i2c1" "i2c" 15, 16
+ "xfi_phy1_i2c1" "i2c" 15, 16
+ "xfi_phy_pll_i2c2" "i2c" 15, 16
+ "i2c1_0" "i2c" 17, 18
+ "u30_phy_i2c1" "i2c" 17, 18
+ "u32_phy_i2c1" "i2c" 17, 18
+ "xfi_phy_pll_i2c3" "i2c" 17, 18
+ "sgmii0_i2c" "i2c" 17, 18
+ "sgmii1_i2c" "i2c" 17, 18
+ "i2c1_2" "i2c" 69, 70
+ "i2c2_0" "i2c" 69, 70
+ "i2c2_1" "i2c" 71, 72
+ "mdc_mdio0" "eth" 5, 6
+ "2p5g_ext_mdio" "eth" 28, 29
+ "gbe_ext_mdio" "eth" 30, 31
+ "mdc_mdio1" "eth" 69, 70
+ "pcie_wake_n0_0" "pcie" 7
+ "pcie_clk_req_n0_0" "pcie" 8
+ "pcie_wake_n3_0" "pcie" 9
+ "pcie_clk_req_n3" "pcie" 10
+ "pcie_clk_req_n0_1" "pcie" 10
+ "pcie_p0_phy_i2c" "pcie" 7, 8
+ "pcie_p1_phy_i2c" "pcie" 7, 8
+ "pcie_p3_phy_i2c" "pcie" 9, 10
+ "pcie_p2_phy_i2c" "pcie" 7, 8
+ "ckm_phy_i2c" "pcie" 9, 10
+ "pcie_wake_n0_1" "pcie" 13
+ "pcie_wake_n3_1" "pcie" 14
+ "pcie_2l_0_pereset" "pcie" 19
+ "pcie_1l_1_pereset" "pcie" 20
+ "pcie_clk_req_n2_1" "pcie" 63
+ "pcie_2l_1_pereset" "pcie" 73
+ "pcie_1l_0_pereset" "pcie" 74
+ "pcie_wake_n1_0" "pcie" 75
+ "pcie_clk_req_n1" "pcie" 76
+ "pcie_wake_n2_0" "pcie" 77
+ "pcie_clk_req_n2_0" "pcie" 78
+ "pcie_wake_n2_1" "pcie" 79
+ "pmic" "pmic" 11
+ "watchdog" "watchdog" 12
+ "spi0_wp_hold" "spi" 22, 23
+ "spi0" "spi" 24, 25, 26, 27
+ "spi1" "spi" 28, 29, 30, 31
+ "spi2" "spi" 32, 33, 34, 35
+ "spi2_wp_hold" "spi" 36, 37
+ "snfi" "flash" 22, 23, 24, 25, 26, 27
+ "emmc_45" "flash" 21, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37
+ "sdcard" "flash" 32, 33, 34, 35, 36, 37
+ "emmc_51" "flash" 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49
+ "uart2" "uart" 0, 1, 2, 3
+ "tops_uart0_0" "uart" 22, 23
+ "uart2_0" "uart" 28, 29, 30, 31
+ "uart1_0" "uart" 32, 33, 34, 35
+ "uart2_1" "uart" 32, 33, 34, 35
+ "net_wo0_uart_txd_0" "uart" 28
+ "net_wo1_uart_txd_0" "uart" 29
+ "net_wo2_uart_txd_0" "uart" 30
+ "tops_uart1_0" "uart" 28, 29
+ "tops_uart0_1" "uart" 30, 31
+ "tops_uart1_1" "uart" 36, 37
+ "uart0" "uart" 55, 56
+ "tops_uart0_2" "uart" 55, 56
+ "uart2_2" "uart" 50, 51, 52, 53
+ "uart1_1" "uart" 58, 59, 60, 61
+ "uart2_3" "uart" 58, 59, 60, 61
+ "uart1_2" "uart" 80, 81, 82, 83
+ "uart1_2_lite" "uart" 80, 81
+ "tops_uart1_2" "uart" 80, 81
+ "net_wo0_uart_txd_1" "uart" 80
+ "net_wo1_uart_txd_1" "uart" 81
+ "net_wo2_uart_txd_1" "uart" 82
+ "udi" "udi" 32, 33, 34, 35, 36
+ "i2s" "audio" 50, 51, 52, 53, 54
+ "pcm" "audio" 50, 51, 52, 53
+ "gbe0_led1" "led" 58
+ "gbe1_led1" "led" 59
+ "gbe2_led1" "led" 60
+ "gbe3_led1" "led" 61
+ "2p5gbe_led1" "led" 62
+ "gbe0_led0" "led" 64
+ "gbe1_led0" "led" 65
+ "gbe2_led0" "led" 66
+ "gbe3_led0" "led" 67
+ "2p5gbe_led0" "led" 68
+ "drv_vbus_p1" "usb" 63
+ "drv_vbus" "usb" 79
+
+ properties:
+ function:
+ description:
+ A string containing the name of the function to mux to the group.
+ enum: [audio, dfd, eth, flash, i2c, int_usxgmii, jtag, led, pcie, pmic, pwm, spi,
+ uart, udi, usb, watchdog]
+ groups:
+ description:
+ An array of strings. Each string contains the name of a group.
+
+ required:
+ - function
+ - groups
+
+ allOf:
+ - if:
+ properties:
+ function:
+ const: audio
+ then:
+ properties:
+ groups:
+ enum: [i2s, pcm]
+ - if:
+ properties:
+ function:
+ const: jtag
+ then:
+ properties:
+ groups:
+ enum: [jtag, tops_jtag0_0, tops_jtag0_1, wo0_jtag, wo1_jtag, wo2_jtag]
+ - if:
+ properties:
+ function:
+ const: int_usxgmii
+ then:
+ properties:
+ groups:
+ const: int_usxgmii
+ - if:
+ properties:
+ function:
+ const: dfd
+ then:
+ properties:
+ groups:
+ const: dfd
+ - if:
+ properties:
+ function:
+ const: flash
+ then:
+ properties:
+ groups:
+ enum: [emmc_45, emmc_51, sdcard, snfi]
+ - if:
+ properties:
+ function:
+ const: eth
+ then:
+ properties:
+ groups:
+ enum: [2p5g_ext_mdio, gbe_ext_mdio, mdc_mdio0, mdc_mdio1]
+ - if:
+ properties:
+ function:
+ const: i2c
+ then:
+ properties:
+ groups:
+ enum: [xfi_phy0_i2c0, xfi_phy1_i2c0, xfi_phy_pll_i2c0,
+ xfi_phy_pll_i2c1, i2c0_0, i2c1_sfp, xfi_pextp_phy0_i2c,
+ xfi_pextp_phy1_i2c, i2c0_1, u30_phy_i2c0, u32_phy_i2c0,
+ xfi_phy0_i2c1, xfi_phy1_i2c1, xfi_phy_pll_i2c2, i2c1_0,
+ u30_phy_i2c1, u32_phy_i2c1, xfi_phy_pll_i2c3, sgmii0_i2c,
+ sgmii1_i2c, i2c1_2, i2c2_0, i2c2_1]
+ - if:
+ properties:
+ function:
+ const: led
+ then:
+ properties:
+ groups:
+ enum: [2p5gbe_led0, 2p5gbe_led1, gbe0_led0, gbe0_led1, gbe1_led0, gbe1_led1,
+ gbe2_led0, gbe2_led1, gbe3_led0, gbe3_led1, wf5g_led0, wf5g_led1]
+ - if:
+ properties:
+ function:
+ const: pcie
+ then:
+ properties:
+ groups:
+ items:
+ enum: [pcie_wake_n0_0, pcie_clk_req_n0_0, pcie_wake_n3_0,
+ pcie_clk_req_n3, pcie_p0_phy_i2c, pcie_p1_phy_i2c,
+ pcie_p3_phy_i2c, pcie_p2_phy_i2c, ckm_phy_i2c,
+ pcie_wake_n0_1, pcie_wake_n3_1, pcie_2l_0_pereset,
+ pcie_1l_1_pereset, pcie_clk_req_n2_1, pcie_2l_1_pereset,
+ pcie_1l_0_pereset, pcie_wake_n1_0, pcie_clk_req_n1,
+ pcie_wake_n2_0, pcie_clk_req_n2_0, pcie_wake_n2_1,
+ pcie_clk_req_n0_1]
+ maxItems: 3
+ - if:
+ properties:
+ function:
+ const: pmic
+ then:
+ properties:
+ groups:
+ const: pmic
+ - if:
+ properties:
+ function:
+ const: pwm
+ then:
+ properties:
+ groups:
+ items:
+ enum: [pwm0, pwm1, pwm2, pwm2_0, pwm3, pwm3_0, pwm4, pwm4_0, pwm5, pwm5_0,
+ pwm6, pwm6_0, pwm7, pwm7_0]
+ maxItems: 2
+ - if:
+ properties:
+ function:
+ const: spi
+ then:
+ properties:
+ groups:
+ items:
+ enum: [spi0, spi0_wp_hold, spi1, spi2, spi2_wp_hold]
+ maxItems: 2
+ - if:
+ properties:
+ function:
+ const: uart
+ then:
+ properties:
+ groups:
+ items:
+ enum: [net_wo0_uart_txd_0, net_wo0_uart_txd_1, net_wo1_uart_txd_0,
+ net_wo1_uart_txd_1, net_wo2_uart_txd_0, net_wo2_uart_txd_1,
+ tops_uart0_0, tops_uart0_1, tops_uart0_2, tops_uart1_0,
+ tops_uart1_1, tops_uart1_2, uart0, uart1_0, uart1_1, uart1_2,
+ uart1_2_lite, uart2, uart2_0, uart2_1, uart2_3]
+ maxItems: 2
+ - if:
+ properties:
+ function:
+ const: watchdog
+ then:
+ properties:
+ groups:
+ const: watchdog
+ - if:
+ properties:
+ function:
+ const: udi
+ then:
+ properties:
+ groups:
+ const: udi
+ - if:
+ properties:
+ function:
+ const: usb
+ then:
+ properties:
+ groups:
+ items:
+ enum: [drv_vbus, drv_vbus_p1]
+ maxItems: 1
+
+ patternProperties:
+ '^conf(-[-a-z]*)?$':
+ type: object
+ additionalProperties: false
+ description:
+ pinconf configuration nodes.
+ $ref: /schemas/pinctrl/pincfg-node.yaml
+
+ properties:
+ pins:
+ description:
+ An array of strings. Each string contains the name of a pin.
+ items:
+ enum: [UART2_RXD, UART2_TXD, UART2_CTS, UART2_RTS, GPIO_A, SMI_0_MDC,
+ SMI_0_MDIO, PCIE30_2L_0_WAKE_N, PCIE30_2L_0_CLKREQ_N,
+ PCIE30_1L_1_WAKE_N, PCIE30_1L_1_CLKREQ_N, GPIO_P, WATCHDOG,
+ GPIO_RESET, GPIO_WPS, PMIC_I2C_SCL, PMIC_I2C_SDA, I2C_1_SCL,
+ I2C_1_SDA, PCIE30_2L_0_PRESET_N, PCIE30_1L_1_PRESET_N, PWMD1,
+ SPI0_WP, SPI0_HOLD, SPI0_CSB, SPI0_MISO, SPI0_MOSI, SPI0_CLK,
+ SPI1_CSB, SPI1_MISO, SPI1_MOSI, SPI1_CLK, SPI2_CLK, SPI2_MOSI,
+ SPI2_MISO, SPI2_CSB, SPI2_HOLD, SPI2_WP, EMMC_RSTB, EMMC_DSL,
+ EMMC_CK, EMMC_CMD, EMMC_DATA_7, EMMC_DATA_6, EMMC_DATA_5,
+ EMMC_DATA_4, EMMC_DATA_3, EMMC_DATA_2, EMMC_DATA_1,
+ EMMC_DATA_0, PCM_FS_I2S_LRCK, PCM_CLK_I2S_BCLK,
+ PCM_DRX_I2S_DIN, PCM_DTX_I2S_DOUT, PCM_MCK_I2S_MCLK,
+ UART0_RXD, UART0_TXD, PWMD0, JTAG_JTDI, JTAG_JTDO, JTAG_JTMS,
+ JTAG_JTCLK, JTAG_JTRST_N, USB_DRV_VBUS_P1, LED_A, LED_B, LED_C,
+ LED_D, LED_E, GPIO_B, GPIO_C, I2C_2_SCL, I2C_2_SDA,
+ PCIE30_2L_1_PRESET_N, PCIE30_1L_0_PRESET_N,
+ PCIE30_2L_1_WAKE_N, PCIE30_2L_1_CLKREQ_N,
+ PCIE30_1L_0_WAKE_N, PCIE30_1L_0_CLKREQ_N, USB_DRV_VBUS_P0,
+ UART1_RXD, UART1_TXD, UART1_CTS, UART1_RTS]
+ maxItems: 84
+
+ bias-disable: true
+
+ bias-pull-up:
+ oneOf:
+ - type: boolean
+ description: normal pull up.
+ - enum: [100, 101, 102, 103]
+ description:
+ PUPD/R1/R0 pull down type. See MTK_PUPD_SET_R1R0 defines in
+ dt-bindings/pinctrl/mt65xx.h.
+
+ bias-pull-down:
+ oneOf:
+ - type: boolean
+ description: normal pull down.
+ - enum: [100, 101, 102, 103]
+ description:
+ PUPD/R1/R0 pull down type. See MTK_PUPD_SET_R1R0 defines in
+ dt-bindings/pinctrl/mt65xx.h.
+
+ input-enable: true
+
+ input-disable: true
+
+ output-enable: true
+
+ output-low: true
+
+ output-high: true
+
+ input-schmitt-enable: true
+
+ input-schmitt-disable: true
+
+ drive-strength:
+ enum: [2, 4, 6, 8, 10, 12, 14, 16]
+
+ mediatek,pull-up-adv:
+ description: |
+ Valid arguments for 'mediatek,pull-up-adv' are '0', '1', '2', '3'
+ Pull up settings for 2 pull resistors, R0 and R1. Valid arguments
+ are described as below:
+ 0: (R1, R0) = (0, 0) which means R1 disabled and R0 disabled.
+ 1: (R1, R0) = (0, 1) which means R1 disabled and R0 enabled.
+ 2: (R1, R0) = (1, 0) which means R1 enabled and R0 disabled.
+ 3: (R1, R0) = (1, 1) which means R1 enabled and R0 enabled.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
+
+ mediatek,pull-down-adv:
+ description: |
+ Valid arguments for 'mediatek,pull-up-adv' are '0', '1', '2', '3'
+ Pull down settings for 2 pull resistors, R0 and R1. Valid arguments
+ are described as below:
+ 0: (R1, R0) = (0, 0) which means R1 disabled and R0 disabled.
+ 1: (R1, R0) = (0, 1) which means R1 disabled and R0 enabled.
+ 2: (R1, R0) = (1, 0) which means R1 enabled and R0 disabled.
+ 3: (R1, R0) = (1, 1) which means R1 enabled and R0 enabled.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
+
+ required:
+ - pins
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/pinctrl/mt65xx.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ pio: pinctrl@1001f000 {
+ compatible = "mediatek,mt7988-pinctrl";
+ reg = <0 0x1001f000 0 0x1000>,
+ <0 0x11c10000 0 0x1000>,
+ <0 0x11d00000 0 0x1000>,
+ <0 0x11d20000 0 0x1000>,
+ <0 0x11e00000 0 0x1000>,
+ <0 0x11f00000 0 0x1000>,
+ <0 0x1000b000 0 0x1000>;
+ reg-names = "gpio", "iocfg_tr",
+ "iocfg_br", "iocfg_rb",
+ "iocfg_lb", "iocfg_tl", "eint";
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pio 0 0 84>;
+ interrupt-controller;
+ interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+ #interrupt-cells = <2>;
+
+ i2c0_pins: i2c0-g0-pins {
+ mux {
+ function = "i2c";
+ groups = "i2c0_1";
+ };
+ };
+
+ mdio0_pins: mdio0-pins {
+ mux {
+ function = "eth";
+ groups = "mdc_mdio0";
+ };
+
+ conf {
+ pins = "SMI_0_MDC", "SMI_0_MDIO";
+ drive-strength = <8>;
+ };
+ };
+
+ mmc0_pins_emmc_51: mmc0-emmc-51-pins {
+ mux {
+ function = "flash";
+ groups = "emmc_51";
+ };
+ };
+
+ mmc0_pins_sdcard: mmc0-sdcard-pins {
+ mux {
+ function = "flash";
+ groups = "sdcard";
+ };
+ };
+
+ pcie0_pins: pcie0-pins {
+ mux {
+ function = "pcie";
+ groups = "pcie_2l_0_pereset", "pcie_clk_req_n0_0",
+ "pcie_wake_n0_0";
+ };
+ };
+
+ pcie1_pins: pcie1-pins {
+ mux {
+ function = "pcie";
+ groups = "pcie_2l_1_pereset", "pcie_clk_req_n1",
+ "pcie_wake_n1_0";
+ };
+ };
+
+ pcie2_pins: pcie2-pins {
+ mux {
+ function = "pcie";
+ groups = "pcie_1l_0_pereset", "pcie_clk_req_n2_0",
+ "pcie_wake_n2_0";
+ };
+ };
+
+ pcie3_pins: pcie3-pins {
+ mux {
+ function = "pcie";
+ groups = "pcie_1l_1_pereset", "pcie_clk_req_n3",
+ "pcie_wake_n3_0";
+ };
+ };
+
+ uart0_pins: uart0-pins {
+ mux {
+ function = "uart";
+ groups = "uart0";
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq5424-tlmm.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,ipq5424-tlmm.yaml
index 5e64a232fc7a..df284d3645c1 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,ipq5424-tlmm.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq5424-tlmm.yaml
@@ -79,8 +79,8 @@ $defs:
qdss_cti_trig_out_b0, qdss_cti_trig_in_b1, qdss_cti_trig_out_b1,
qdss_traceclk_a, qdss_tracectl_a, qdss_tracedata_a, qspi_clk,
qspi_cs, qspi_data, resout, rx0, rx1, rx2, sdc_clk, sdc_cmd,
- sdc_data, spi0, spi1, spi10, spi11, tsens_max, uart0, uart1,
- wci_txd, wci_rxd, wsi_clk, wsi_data ]
+ sdc_data, spi0_cs, spi0_clk, spi0_miso, spi0_mosi, spi1, spi10,
+ spi11, tsens_max, uart0, uart1, wci_txd, wci_rxd, wsi_clk, wsi_data ]
required:
- pins
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8917-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,msm8917-pinctrl.yaml
new file mode 100644
index 000000000000..16d0c010e581
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8917-pinctrl.yaml
@@ -0,0 +1,160 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/qcom,msm8917-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm MSM8917 TLMM pin controller
+
+maintainers:
+ - Barnabas Czeman <barnabas.czeman@mainlining.org>
+
+description:
+ Top Level Mode Multiplexer pin controller in Qualcomm MSM8917 SoC.
+
+properties:
+ compatible:
+ const: qcom,msm8917-pinctrl
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ gpio-reserved-ranges:
+ minItems: 1
+ maxItems: 66
+
+ gpio-line-names:
+ maxItems: 134
+
+patternProperties:
+ "-state$":
+ oneOf:
+ - $ref: "#/$defs/qcom-msm8917-tlmm-state"
+ - patternProperties:
+ "-pins$":
+ $ref: "#/$defs/qcom-msm8917-tlmm-state"
+ additionalProperties: false
+
+$defs:
+ qcom-msm8917-tlmm-state:
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+ $ref: qcom,tlmm-common.yaml#/$defs/qcom-tlmm-state
+ unevaluatedProperties: false
+
+ properties:
+ pins:
+ description:
+ List of gpio pins affected by the properties specified in this
+ subnode.
+ items:
+ oneOf:
+ - pattern: "^gpio([0-9]|[1-9][0-9]|1[0-2][0-9]|13[0-3])$"
+ - enum: [ sdc1_clk, sdc1_cmd, sdc1_data, sdc1_rclk, sdc2_clk,
+ sdc2_cmd, sdc2_data, qdsd_clk, qdsd_cmd, qdsd_data0,
+ qdsd_data1, qdsd_data2, qdsd_data3 ]
+ minItems: 1
+ maxItems: 16
+
+ function:
+ description:
+ Specify the alternative function to be configured for the specified
+ pins.
+
+ enum: [ accel_int, adsp_ext, alsp_int, atest_bbrx0, atest_bbrx1,
+ atest_char, atest_char0, atest_char1, atest_char2,
+ atest_char3, atest_combodac_to_gpio_native,
+ atest_gpsadc_dtest0_native, atest_gpsadc_dtest1_native,
+ atest_tsens, atest_wlan0, atest_wlan1, audio_ref,
+ audio_reset, bimc_dte0, bimc_dte1, blsp6_spi, blsp8_spi,
+ blsp_i2c1, blsp_i2c2, blsp_i2c3, blsp_i2c4, blsp_i2c5,
+ blsp_i2c6, blsp_i2c7, blsp_i2c8, blsp_spi1, blsp_spi2,
+ blsp_spi3, blsp_spi4, blsp_spi5, blsp_spi6, blsp_spi7,
+ blsp_spi8, blsp_uart1, blsp_uart2, blsp_uart3, blsp_uart4,
+ blsp_uart5, blsp_uart6, blsp_uart7, blsp_uart8, cam0_ldo,
+ cam1_rst, cam1_standby, cam2_rst, cam2_standby, cam_mclk,
+ cci_async, cci_i2c, cci_timer0, cci_timer1, cdc_pdm0,
+ codec_int1, codec_int2, codec_mad, coex_uart, cri_trng,
+ cri_trng0, cri_trng1, dbg_out, dmic0_clk, dmic0_data,
+ ebi_cdc, ebi_ch0, ext_lpass, forced_usb, fp_gpio, fp_int,
+ gcc_gp1_clk_a, gcc_gp1_clk_b, gcc_gp2_clk_a, gcc_gp2_clk_b,
+ gcc_gp3_clk_a, gcc_gp3_clk_b, gcc_plltest, gcc_tlmm, gpio,
+ gsm0_tx, key_focus, key_snapshot, key_volp, ldo_en,
+ ldo_update, lpass_slimbus, lpass_slimbus0, lpass_slimbus1,
+ m_voc, mag_int, mdp_vsync, mipi_dsi0, modem_tsync, nav_pps,
+ nav_pps_in_a, nav_pps_in_b, nav_tsync, nfc_pwr, ov_ldo,
+ pa_indicator, pbs0, pbs1, pbs2, pri_mi2s, pri_mi2s_mclk_a,
+ pri_mi2s_mclk_b, pri_mi2s_ws, prng_rosc,
+ pwr_crypto_enabled_a, pwr_crypto_enabled_b,
+ pwr_modem_enabled_a, pwr_modem_enabled_b, pwr_nav_enabled_a,
+ pwr_nav_enabled_b, qdss_cti_trig_in_a0, qdss_cti_trig_in_a1,
+ qdss_cti_trig_in_b0, qdss_cti_trig_in_b1,
+ qdss_cti_trig_out_a0, qdss_cti_trig_out_a1,
+ qdss_cti_trig_out_b0, qdss_cti_trig_out_b1, qdss_traceclk_a,
+ qdss_traceclk_b, qdss_tracectl_a, qdss_tracectl_b,
+ qdss_tracedata_a, qdss_tracedata_b, sd_write, sdcard_det,
+ sec_mi2s, sec_mi2s_mclk_a, sec_mi2s_mclk_b, sensor_rst,
+ smb_int, ssbi_wtr1, ts_resout, ts_sample, uim1_clk,
+ uim1_data, uim1_present, uim1_reset, uim2_clk, uim2_data,
+ uim2_present, uim2_reset, uim_batt, us_emitter, us_euro,
+ wcss_bt, wcss_fm, wcss_wlan, wcss_wlan0, wcss_wlan1,
+ wcss_wlan2, webcam_rst, webcam_standby, wsa_io, wsa_irq ]
+
+ required:
+ - pins
+
+allOf:
+ - $ref: /schemas/pinctrl/qcom,tlmm-common.yaml#
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ tlmm: pinctrl@1000000 {
+ compatible = "qcom,msm8917-pinctrl";
+ reg = <0x01000000 0x300000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ gpio-ranges = <&tlmm 0 0 134>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ blsp1-uart2-sleep-state {
+ pins = "gpio4", "gpio5";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ spi1-default-state {
+ spi-pins {
+ pins = "gpio0", "gpio1", "gpio3";
+ function = "blsp_spi1";
+
+ drive-strength = <12>;
+ bias-disable;
+ };
+
+ cs-pins {
+ pins = "gpio2";
+ function = "gpio";
+
+ drive-strength = <16>;
+ bias-disable;
+ output-high;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/realtek,rtd1315e-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/realtek,rtd1315e-pinctrl.yaml
index fc6c65fea73b..90bd49d87d2e 100644
--- a/Documentation/devicetree/bindings/pinctrl/realtek,rtd1315e-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/realtek,rtd1315e-pinctrl.yaml
@@ -159,30 +159,30 @@ additionalProperties: false
examples:
- |
- pinctrl@4e000 {
- compatible = "realtek,rtd1315e-pinctrl";
- reg = <0x4e000 0x130>;
-
- emmc-hs200-pins {
- pins = "emmc_clk",
- "emmc_cmd",
- "emmc_data_0",
- "emmc_data_1",
- "emmc_data_2",
- "emmc_data_3",
- "emmc_data_4",
- "emmc_data_5",
- "emmc_data_6",
- "emmc_data_7";
- function = "emmc";
- realtek,drive-strength-p = <0x2>;
- realtek,drive-strength-n = <0x2>;
- };
-
- i2c-0-pins {
- pins = "gpio_12",
- "gpio_13";
- function = "i2c0";
- drive-strength = <4>;
- };
- };
+ pinctrl@4e000 {
+ compatible = "realtek,rtd1315e-pinctrl";
+ reg = <0x4e000 0x130>;
+
+ emmc-hs200-pins {
+ pins = "emmc_clk",
+ "emmc_cmd",
+ "emmc_data_0",
+ "emmc_data_1",
+ "emmc_data_2",
+ "emmc_data_3",
+ "emmc_data_4",
+ "emmc_data_5",
+ "emmc_data_6",
+ "emmc_data_7";
+ function = "emmc";
+ realtek,drive-strength-p = <0x2>;
+ realtek,drive-strength-n = <0x2>;
+ };
+
+ i2c-0-pins {
+ pins = "gpio_12",
+ "gpio_13";
+ function = "i2c0";
+ drive-strength = <4>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/realtek,rtd1319d-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/realtek,rtd1319d-pinctrl.yaml
index f07361d60acd..b6211c8544ca 100644
--- a/Documentation/devicetree/bindings/pinctrl/realtek,rtd1319d-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/realtek,rtd1319d-pinctrl.yaml
@@ -158,30 +158,30 @@ additionalProperties: false
examples:
- |
- pinctrl@4e000 {
- compatible = "realtek,rtd1319d-pinctrl";
- reg = <0x4e000 0x130>;
-
- emmc-hs200-pins {
- pins = "emmc_clk",
- "emmc_cmd",
- "emmc_data_0",
- "emmc_data_1",
- "emmc_data_2",
- "emmc_data_3",
- "emmc_data_4",
- "emmc_data_5",
- "emmc_data_6",
- "emmc_data_7";
- function = "emmc";
- realtek,drive-strength-p = <0x2>;
- realtek,drive-strength-n = <0x2>;
- };
-
- i2c-0-pins {
- pins = "gpio_12",
- "gpio_13";
- function = "i2c0";
- drive-strength = <4>;
- };
- };
+ pinctrl@4e000 {
+ compatible = "realtek,rtd1319d-pinctrl";
+ reg = <0x4e000 0x130>;
+
+ emmc-hs200-pins {
+ pins = "emmc_clk",
+ "emmc_cmd",
+ "emmc_data_0",
+ "emmc_data_1",
+ "emmc_data_2",
+ "emmc_data_3",
+ "emmc_data_4",
+ "emmc_data_5",
+ "emmc_data_6",
+ "emmc_data_7";
+ function = "emmc";
+ realtek,drive-strength-p = <0x2>;
+ realtek,drive-strength-n = <0x2>;
+ };
+
+ i2c-0-pins {
+ pins = "gpio_12",
+ "gpio_13";
+ function = "i2c0";
+ drive-strength = <4>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/realtek,rtd1619b-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/realtek,rtd1619b-pinctrl.yaml
index 671e4ec84624..e88bc649cc73 100644
--- a/Documentation/devicetree/bindings/pinctrl/realtek,rtd1619b-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/realtek,rtd1619b-pinctrl.yaml
@@ -157,30 +157,30 @@ additionalProperties: false
examples:
- |
- pinctrl@4e000 {
- compatible = "realtek,rtd1619b-pinctrl";
- reg = <0x4e000 0x130>;
-
- emmc-hs200-pins {
- pins = "emmc_clk",
- "emmc_cmd",
- "emmc_data_0",
- "emmc_data_1",
- "emmc_data_2",
- "emmc_data_3",
- "emmc_data_4",
- "emmc_data_5",
- "emmc_data_6",
- "emmc_data_7";
- function = "emmc";
- realtek,drive-strength-p = <0x2>;
- realtek,drive-strength-n = <0x2>;
- };
-
- i2c-0-pins {
- pins = "gpio_12",
- "gpio_13";
- function = "i2c0";
- drive-strength = <4>;
- };
- };
+ pinctrl@4e000 {
+ compatible = "realtek,rtd1619b-pinctrl";
+ reg = <0x4e000 0x130>;
+
+ emmc-hs200-pins {
+ pins = "emmc_clk",
+ "emmc_cmd",
+ "emmc_data_0",
+ "emmc_data_1",
+ "emmc_data_2",
+ "emmc_data_3",
+ "emmc_data_4",
+ "emmc_data_5",
+ "emmc_data_6",
+ "emmc_data_7";
+ function = "emmc";
+ realtek,drive-strength-p = <0x2>;
+ realtek,drive-strength-n = <0x2>;
+ };
+
+ i2c-0-pins {
+ pins = "gpio_12",
+ "gpio_13";
+ function = "i2c0";
+ drive-strength = <4>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
index a1805b6e3f63..768bb3c2b456 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
@@ -26,6 +26,7 @@ properties:
- renesas,r9a07g043-pinctrl # RZ/G2UL{Type-1,Type-2} and RZ/Five
- renesas,r9a07g044-pinctrl # RZ/G2{L,LC}
- renesas,r9a08g045-pinctrl # RZ/G3S
+ - renesas,r9a09g047-pinctrl # RZ/G3E
- renesas,r9a09g057-pinctrl # RZ/V2H(P)
- items:
@@ -125,7 +126,7 @@ additionalProperties:
drive-push-pull: true
renesas,output-impedance:
description:
- Output impedance for pins on the RZ/V2H(P) SoC. The value provided by this
+ Output impedance for pins on the RZ/{G3E,V2H(P)} SoC. The value provided by this
property corresponds to register bit values that can be set in the PFC_IOLH_mn
register, which adjusts the drive strength value and is pin-dependent.
$ref: /schemas/types.yaml#/definitions/uint32
@@ -142,7 +143,9 @@ allOf:
properties:
compatible:
contains:
- const: renesas,r9a09g057-pinctrl
+ enum:
+ - renesas,r9a09g047-pinctrl
+ - renesas,r9a09g057-pinctrl
then:
properties:
resets:
diff --git a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml
index 6a23d845f1f2..80a2b1934849 100644
--- a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml
@@ -44,6 +44,7 @@ properties:
- rockchip,rk3328-pinctrl
- rockchip,rk3368-pinctrl
- rockchip,rk3399-pinctrl
+ - rockchip,rk3562-pinctrl
- rockchip,rk3568-pinctrl
- rockchip,rk3576-pinctrl
- rockchip,rk3588-pinctrl
diff --git a/Documentation/devicetree/bindings/pinctrl/xlnx,pinctrl-zynq.yaml b/Documentation/devicetree/bindings/pinctrl/xlnx,pinctrl-zynq.yaml
index de6c10ba36c4..70548cb37ada 100644
--- a/Documentation/devicetree/bindings/pinctrl/xlnx,pinctrl-zynq.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/xlnx,pinctrl-zynq.yaml
@@ -180,38 +180,31 @@ additionalProperties: false
examples:
- |
#include <dt-bindings/pinctrl/pinctrl-zynq.h>
- pinctrl0: pinctrl@700 {
- compatible = "xlnx,pinctrl-zynq";
- reg = <0x700 0x200>;
- syscon = <&slcr>;
-
- pinctrl_uart1_default: uart1-default {
- mux {
- groups = "uart1_10_grp";
- function = "uart1";
- };
-
- conf {
- groups = "uart1_10_grp";
- slew-rate = <0>;
- power-source = <IO_STANDARD_LVCMOS18>;
- };
-
- conf-rx {
- pins = "MIO49";
- bias-high-impedance;
- };
-
- conf-tx {
- pins = "MIO48";
- bias-disable;
- };
- };
+ pinctrl@700 {
+ compatible = "xlnx,pinctrl-zynq";
+ reg = <0x700 0x200>;
+ syscon = <&slcr>;
+
+ uart1-default {
+ mux {
+ groups = "uart1_10_grp";
+ function = "uart1";
+ };
+
+ conf {
+ groups = "uart1_10_grp";
+ slew-rate = <0>;
+ power-source = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO49";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO48";
+ bias-disable;
+ };
+ };
};
-
- uart1 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart1_default>;
- };
-
-...
diff --git a/Documentation/devicetree/bindings/power/domain-idle-state.yaml b/Documentation/devicetree/bindings/power/domain-idle-state.yaml
index ec1f6f669e50..4dd4f59bbbec 100644
--- a/Documentation/devicetree/bindings/power/domain-idle-state.yaml
+++ b/Documentation/devicetree/bindings/power/domain-idle-state.yaml
@@ -54,6 +54,11 @@ patternProperties:
(i.e. idle states node with entry-method property is set to "psci")
must specify this property.
+ idle-state-name:
+ $ref: /schemas/types.yaml#/definitions/string
+ description:
+ A string used as a descriptive name for the idle state.
+
required:
- compatible
- entry-latency-us
diff --git a/Documentation/devicetree/bindings/power/raspberrypi,bcm2835-power.yaml b/Documentation/devicetree/bindings/power/raspberrypi,bcm2835-power.yaml
new file mode 100644
index 000000000000..57579f70264c
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/raspberrypi,bcm2835-power.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/raspberrypi,bcm2835-power.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom BCM2835 power domain
+
+maintainers:
+ - Alexander Aring <alex.aring@gmail.com>
+ - Florian Fainelli <florian.fainelli@broadcom.com>
+
+description:
+ The Raspberry Pi power domain manages power for various subsystems
+ in the Raspberry Pi BCM2835 SoC.
+
+properties:
+ compatible:
+ enum:
+ - raspberrypi,bcm2835-power
+
+ firmware:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: Reference to the RPi firmware device node
+
+ "#power-domain-cells":
+ const: 1
+
+required:
+ - compatible
+ - firmware
+ - "#power-domain-cells"
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ power-controller {
+ compatible = "raspberrypi,bcm2835-power";
+ firmware = <&firmware>;
+ #power-domain-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/power/reset/atmel,sama5d2-shdwc.yaml b/Documentation/devicetree/bindings/power/reset/atmel,sama5d2-shdwc.yaml
index 8c58e12cdb60..0735ceb7c103 100644
--- a/Documentation/devicetree/bindings/power/reset/atmel,sama5d2-shdwc.yaml
+++ b/Documentation/devicetree/bindings/power/reset/atmel,sama5d2-shdwc.yaml
@@ -22,6 +22,9 @@ properties:
- enum:
- atmel,sama5d2-shdwc
- microchip,sam9x60-shdwc
+ - items:
+ - const: microchip,sam9x7-shdwc
+ - const: microchip,sam9x60-shdwc
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/power/supply/bq24190.yaml b/Documentation/devicetree/bindings/power/supply/bq24190.yaml
index 131b7e57d22f..07adf88997b4 100644
--- a/Documentation/devicetree/bindings/power/supply/bq24190.yaml
+++ b/Documentation/devicetree/bindings/power/supply/bq24190.yaml
@@ -21,6 +21,7 @@ properties:
- ti,bq24192i
- ti,bq24196
- ti,bq24296
+ - ti,bq24297
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/power/supply/gpio-charger.yaml b/Documentation/devicetree/bindings/power/supply/gpio-charger.yaml
index 89f8e2bcb2d7..25826bfc289c 100644
--- a/Documentation/devicetree/bindings/power/supply/gpio-charger.yaml
+++ b/Documentation/devicetree/bindings/power/supply/gpio-charger.yaml
@@ -58,6 +58,10 @@ properties:
charge-current-limit-gpios property. Bit 1 second to last
GPIO and so on.
+ charge-current-limit-default-microamp:
+ description: Default charge current limit. Must be listed in
+ charge-current-limit-mapping.
+
required:
- compatible
@@ -72,6 +76,7 @@ anyOf:
dependencies:
charge-current-limit-gpios: [ charge-current-limit-mapping ]
charge-current-limit-mapping: [ charge-current-limit-gpios ]
+ charge-current-limit-default-microamp: [charge-current-limit-mapping]
additionalProperties: false
@@ -91,4 +96,5 @@ examples:
charge-current-limit-mapping = <2500000 0x00>, // 2.5 A => both GPIOs low
<700000 0x01>, // 700 mA => GPIO A.12 high
<0 0x02>; // 0 mA => GPIO A.11 high
+ charge-current-limit-default-microamp = <700000>;
};
diff --git a/Documentation/devicetree/bindings/power/supply/ltc4162-l.yaml b/Documentation/devicetree/bindings/power/supply/ltc4162-l.yaml
index 29d536541152..06595a953659 100644
--- a/Documentation/devicetree/bindings/power/supply/ltc4162-l.yaml
+++ b/Documentation/devicetree/bindings/power/supply/ltc4162-l.yaml
@@ -17,12 +17,18 @@ description: |
panels, etc., and a rechargeable Lithium-Ion/Polymer battery.
Specifications about the charger can be found at:
+ https://www.analog.com/en/products/ltc4162-l.html
+ https://www.analog.com/en/products/ltc4162-f.html
https://www.analog.com/en/products/ltc4162-s.html
+ https://www.analog.com/en/products/ltc4015.html
properties:
compatible:
enum:
+ - lltc,ltc4015
+ - lltc,ltc4162-f
- lltc,ltc4162-l
+ - lltc,ltc4162-s
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/power/supply/maxim,max17042.yaml b/Documentation/devicetree/bindings/power/supply/maxim,max17042.yaml
index 085e2504d0dc..14242de7fc08 100644
--- a/Documentation/devicetree/bindings/power/supply/maxim,max17042.yaml
+++ b/Documentation/devicetree/bindings/power/supply/maxim,max17042.yaml
@@ -19,6 +19,7 @@ properties:
- maxim,max17047
- maxim,max17050
- maxim,max17055
+ - maxim,max77705-battery
- maxim,max77849-battery
reg:
diff --git a/Documentation/devicetree/bindings/power/supply/st,stc3117.yaml b/Documentation/devicetree/bindings/power/supply/st,stc3117.yaml
new file mode 100644
index 000000000000..e486131a27a9
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/st,stc3117.yaml
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/supply/st,stc3117.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STC3117 Fuel Gauge Unit Power Supply
+
+maintainers:
+ - Hardevsinh Palaniya <hardevsinh.palaniya@siliconsignals.io>
+ - Bhavin Sharma <bhavin.sharma@siliconsignals.io>
+
+description: |
+ The STC3117 includes the STMicroelectronics OptimGauge algorithm.
+ It provides accurate battery state-of-charge (SOC) monitoring, tracks
+ battery parameter changes with operation conditions, temperature,
+ and aging, and allows the application to get a battery state-of-health
+ (SOH) indication.
+
+ An alarm output signals low SOC or low voltage conditions and also
+ indicates fault conditions like a missing or swapped battery.
+
+ Datasheet is available at
+ https://www.st.com/resource/en/datasheet/stc3117.pdf
+
+allOf:
+ - $ref: power-supply.yaml#
+
+properties:
+ compatible:
+ enum:
+ - st,stc3117
+
+ reg:
+ maxItems: 1
+
+ monitored-battery:
+ description: |
+ The fuel gauge uses the following battery properties:
+ - charge-full-design-microamp-hours
+ - voltage-min-design-microvolt
+ - voltage-max-design-microvolt
+
+ shunt-resistor-micro-ohms:
+ description: Current sense resistor
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - monitored-battery
+ - shunt-resistor-micro-ohms
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ battery@70 {
+ compatible = "st,stc3117";
+ reg = <0x70>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <31 IRQ_TYPE_LEVEL_LOW>;
+ monitored-battery = <&bat>;
+ shunt-resistor-micro-ohms = <10000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sa8775p-pas.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sa8775p-pas.yaml
index 7fe401a06805..a66007951d58 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,sa8775p-pas.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sa8775p-pas.yaml
@@ -15,12 +15,25 @@ description:
properties:
compatible:
- enum:
- - qcom,sa8775p-adsp-pas
- - qcom,sa8775p-cdsp0-pas
- - qcom,sa8775p-cdsp1-pas
- - qcom,sa8775p-gpdsp0-pas
- - qcom,sa8775p-gpdsp1-pas
+ oneOf:
+ - items:
+ - enum:
+ - qcom,qcs8300-adsp-pas
+ - const: qcom,sa8775p-adsp-pas
+ - items:
+ - enum:
+ - qcom,qcs8300-cdsp-pas
+ - const: qcom,sa8775p-cdsp0-pas
+ - items:
+ - enum:
+ - qcom,qcs8300-gpdsp-pas
+ - const: qcom,sa8775p-gpdsp0-pas
+ - enum:
+ - qcom,sa8775p-adsp-pas
+ - qcom,sa8775p-cdsp0-pas
+ - qcom,sa8775p-cdsp1-pas
+ - qcom,sa8775p-gpdsp0-pas
+ - qcom,sa8775p-gpdsp1-pas
reg:
maxItems: 1
@@ -63,8 +76,9 @@ allOf:
- if:
properties:
compatible:
- enum:
- - qcom,sa8775p-adsp-pas
+ contains:
+ enum:
+ - qcom,sa8775p-adsp-pas
then:
properties:
power-domains:
@@ -79,9 +93,10 @@ allOf:
- if:
properties:
compatible:
- enum:
- - qcom,sa8775p-cdsp0-pas
- - qcom,sa8775p-cdsp1-pas
+ contains:
+ enum:
+ - qcom,sa8775p-cdsp0-pas
+ - qcom,sa8775p-cdsp1-pas
then:
properties:
power-domains:
@@ -98,9 +113,10 @@ allOf:
- if:
properties:
compatible:
- enum:
- - qcom,sa8775p-gpdsp0-pas
- - qcom,sa8775p-gpdsp1-pas
+ contains:
+ enum:
+ - qcom,sa8775p-gpdsp0-pas
+ - qcom,sa8775p-gpdsp1-pas
then:
properties:
power-domains:
diff --git a/Documentation/devicetree/bindings/riscv/cpus.yaml b/Documentation/devicetree/bindings/riscv/cpus.yaml
index 8edc8261241a..2c72f148a74b 100644
--- a/Documentation/devicetree/bindings/riscv/cpus.yaml
+++ b/Documentation/devicetree/bindings/riscv/cpus.yaml
@@ -26,6 +26,18 @@ description: |
allOf:
- $ref: /schemas/cpu.yaml#
- $ref: extensions.yaml
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - thead,c906
+ - thead,c910
+ - thead,c920
+ then:
+ properties:
+ thead,vlenb: false
properties:
compatible:
@@ -46,6 +58,7 @@ properties:
- sifive,u7
- sifive,u74
- sifive,u74-mc
+ - spacemit,x60
- thead,c906
- thead,c908
- thead,c910
@@ -95,6 +108,13 @@ properties:
description:
The blocksize in bytes for the Zicboz cache operations.
+ thead,vlenb:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ VLEN/8, the vector register length in bytes. This property is required on
+ thead systems where the vector register length is not identical on all harts, or
+ the vlenb CSR is not available.
+
# RISC-V has multiple properties for cache op block sizes as the sizes
# differ between individual CBO extensions
cache-op-block-size: false
diff --git a/Documentation/devicetree/bindings/riscv/extensions.yaml b/Documentation/devicetree/bindings/riscv/extensions.yaml
index 9c7dd7e75e0c..a63b994e0763 100644
--- a/Documentation/devicetree/bindings/riscv/extensions.yaml
+++ b/Documentation/devicetree/bindings/riscv/extensions.yaml
@@ -621,6 +621,10 @@ properties:
latency, as ratified in commit 56ed795 ("Update
riscv-crypto-spec-vector.adoc") of riscv-crypto.
+ # vendor extensions, each extension sorted alphanumerically under the
+ # vendor they belong to. Vendors are sorted alphanumerically as well.
+
+ # Andes
- const: xandespmu
description:
The Andes Technology performance monitor extension for counter overflow
@@ -628,6 +632,12 @@ properties:
Registers in the AX45MP datasheet.
https://www.andestech.com/wp-content/uploads/AX45MP-1C-Rev.-5.0.0-Datasheet.pdf
+ # T-HEAD
+ - const: xtheadvector
+ description:
+ The T-HEAD specific 0.7.1 vector implementation as written in
+ https://github.com/T-head-Semi/thead-extension-spec/blob/95358cb2cca9489361c61d335e03d3134b14133f/xtheadvector.adoc.
+
allOf:
# Zcb depends on Zca
- if:
diff --git a/Documentation/devicetree/bindings/riscv/spacemit.yaml b/Documentation/devicetree/bindings/riscv/spacemit.yaml
new file mode 100644
index 000000000000..52e55077af1a
--- /dev/null
+++ b/Documentation/devicetree/bindings/riscv/spacemit.yaml
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/riscv/spacemit.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SpacemiT SoC-based boards
+
+maintainers:
+ - Yangyu Chen <cyy@cyyself.name>
+ - Yixun Lan <dlan@gentoo.org>
+
+description:
+ SpacemiT SoC-based boards
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - bananapi,bpi-f3
+ - const: spacemit,k1
+
+additionalProperties: true
+
+...
diff --git a/Documentation/devicetree/bindings/rtc/rtc-mxc.yaml b/Documentation/devicetree/bindings/rtc/rtc-mxc.yaml
index a14b52178c4b..2599b847f406 100644
--- a/Documentation/devicetree/bindings/rtc/rtc-mxc.yaml
+++ b/Documentation/devicetree/bindings/rtc/rtc-mxc.yaml
@@ -14,9 +14,13 @@ maintainers:
properties:
compatible:
- enum:
- - fsl,imx1-rtc
- - fsl,imx21-rtc
+ oneOf:
+ - const: fsl,imx1-rtc
+ - const: fsl,imx21-rtc
+ - items:
+ - enum:
+ - fsl,imx31-rtc
+ - const: fsl,imx21-rtc
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/serial/8250.yaml b/Documentation/devicetree/bindings/serial/8250.yaml
index 692aa05500fd..0bde2379e864 100644
--- a/Documentation/devicetree/bindings/serial/8250.yaml
+++ b/Documentation/devicetree/bindings/serial/8250.yaml
@@ -111,7 +111,9 @@ properties:
- mediatek,mt7623-btif
- const: mediatek,mtk-btif
- items:
- - const: mrvl,mmp-uart
+ - enum:
+ - mrvl,mmp-uart
+ - spacemit,k1-uart
- const: intel,xscale-uart
- items:
- enum:
diff --git a/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.yaml b/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.yaml
index 88871480018e..ab39b95dae40 100644
--- a/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.yaml
+++ b/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.yaml
@@ -23,6 +23,8 @@ properties:
maxItems: 1
interrupts:
+ description:
+ When missing, device driver uses polling instead.
maxItems: 1
clocks:
@@ -76,7 +78,6 @@ properties:
required:
- compatible
- reg
- - interrupts
allOf:
- $ref: /schemas/spi/spi-peripheral-props.yaml#
diff --git a/Documentation/devicetree/bindings/serial/renesas,scif.yaml b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
index 51d9fb0f4763..8e82999e6acb 100644
--- a/Documentation/devicetree/bindings/serial/renesas,scif.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
@@ -83,6 +83,11 @@ properties:
- const: renesas,scif-r9a09g057 # RZ/V2H(P)
+ - items:
+ - enum:
+ - renesas,scif-r9a09g047 # RZ/G3E
+ - const: renesas,scif-r9a09g057 # RZ/V2H fallback
+
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/soc/altera/altr,sys-mgr.yaml b/Documentation/devicetree/bindings/soc/altera/altr,sys-mgr.yaml
new file mode 100644
index 000000000000..d56ff4c05ae5
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/altera/altr,sys-mgr.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/altera/altr,sys-mgr.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Altera SOCFPGA System Manager
+
+maintainers:
+ - Dinh Nguyen <dinguyen@kernel.org>
+
+properties:
+ compatible:
+ oneOf:
+ - description: Cyclone5/Arria5/Arria10
+ const: altr,sys-mgr
+ - description: Stratix10 SoC
+ items:
+ - const: altr,sys-mgr-s10
+ - const: altr,sys-mgr
+
+ reg:
+ maxItems: 1
+
+ cpu1-start-addr:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: CPU1 start address in hex
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: altr,sys-mgr-s10
+ then:
+ properties:
+ cpu1-start-addr: false
+
+additionalProperties: false
+
+examples:
+ - |
+ sysmgr@ffd08000 {
+ compatible = "altr,sys-mgr";
+ reg = <0xffd08000 0x1000>;
+ cpu1-start-addr = <0xffd080c4>;
+ };
diff --git a/Documentation/devicetree/bindings/soc/amlogic/amlogic,meson-gx-hhi-sysctrl.yaml b/Documentation/devicetree/bindings/soc/amlogic/amlogic,meson-gx-hhi-sysctrl.yaml
index c6bce40946d4..3dc66f1de023 100644
--- a/Documentation/devicetree/bindings/soc/amlogic/amlogic,meson-gx-hhi-sysctrl.yaml
+++ b/Documentation/devicetree/bindings/soc/amlogic/amlogic,meson-gx-hhi-sysctrl.yaml
@@ -13,6 +13,7 @@ properties:
compatible:
items:
- enum:
+ - amlogic,meson-hhi-sysctrl
- amlogic,meson-gx-hhi-sysctrl
- amlogic,meson-gx-ao-sysctrl
- amlogic,meson-axg-hhi-sysctrl
@@ -40,6 +41,19 @@ allOf:
properties:
compatible:
enum:
+ - amlogic,meson-hhi-sysctrl
+ then:
+ properties:
+ clock-controller:
+ $ref: /schemas/clock/amlogic,meson8-clkc.yaml#
+
+ pinctrl: false
+ phy: false
+
+ - if:
+ properties:
+ compatible:
+ enum:
- amlogic,meson-gx-hhi-sysctrl
- amlogic,meson-axg-hhi-sysctrl
then:
diff --git a/Documentation/devicetree/bindings/soc/bcm/raspberrypi,bcm2835-power.txt b/Documentation/devicetree/bindings/soc/bcm/raspberrypi,bcm2835-power.txt
deleted file mode 100644
index 30942cf7992b..000000000000
--- a/Documentation/devicetree/bindings/soc/bcm/raspberrypi,bcm2835-power.txt
+++ /dev/null
@@ -1,47 +0,0 @@
-Raspberry Pi power domain driver
-
-Required properties:
-
-- compatible: Should be "raspberrypi,bcm2835-power".
-- firmware: Reference to the RPi firmware device node.
-- #power-domain-cells: Should be <1>, we providing multiple power domains.
-
-The valid defines for power domain are:
-
- RPI_POWER_DOMAIN_I2C0
- RPI_POWER_DOMAIN_I2C1
- RPI_POWER_DOMAIN_I2C2
- RPI_POWER_DOMAIN_VIDEO_SCALER
- RPI_POWER_DOMAIN_VPU1
- RPI_POWER_DOMAIN_HDMI
- RPI_POWER_DOMAIN_USB
- RPI_POWER_DOMAIN_VEC
- RPI_POWER_DOMAIN_JPEG
- RPI_POWER_DOMAIN_H264
- RPI_POWER_DOMAIN_V3D
- RPI_POWER_DOMAIN_ISP
- RPI_POWER_DOMAIN_UNICAM0
- RPI_POWER_DOMAIN_UNICAM1
- RPI_POWER_DOMAIN_CCP2RX
- RPI_POWER_DOMAIN_CSI2
- RPI_POWER_DOMAIN_CPI
- RPI_POWER_DOMAIN_DSI0
- RPI_POWER_DOMAIN_DSI1
- RPI_POWER_DOMAIN_TRANSPOSER
- RPI_POWER_DOMAIN_CCP2TX
- RPI_POWER_DOMAIN_CDP
- RPI_POWER_DOMAIN_ARM
-
-Example:
-
-power: power {
- compatible = "raspberrypi,bcm2835-power";
- firmware = <&firmware>;
- #power-domain-cells = <1>;
-};
-
-Example for using power domain:
-
-&usb {
- power-domains = <&power RPI_POWER_DOMAIN_USB>;
-};
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml
index e63f800c6caa..41fbbe059d80 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml
@@ -25,6 +25,7 @@ properties:
compatible:
items:
- enum:
+ - qcom,qcs615-aoss-qmp
- qcom,qcs8300-aoss-qmp
- qcom,qdu1000-aoss-qmp
- qcom,sa8255p-aoss-qmp
diff --git a/Documentation/devicetree/bindings/soc/renesas/renesas.yaml b/Documentation/devicetree/bindings/soc/renesas/renesas.yaml
index a8da7573cdaf..225c0f07ae94 100644
--- a/Documentation/devicetree/bindings/soc/renesas/renesas.yaml
+++ b/Documentation/devicetree/bindings/soc/renesas/renesas.yaml
@@ -360,19 +360,21 @@ properties:
- renesas,white-hawk-cpu # White Hawk CPU board (RTP8A779G0ASKB0FC0SA000)
- const: renesas,r8a779g0
- - description: R-Car V4H (R8A779G2)
- items:
- - enum:
- - renesas,white-hawk-single # White Hawk Single board (RTP8A779G2ASKB0F10SA001)
- - const: renesas,r8a779g2
- - const: renesas,r8a779g0
-
- items:
- enum:
- renesas,white-hawk-breakout # White Hawk BreakOut board (RTP8A779G0ASKB0SB0SA000)
- const: renesas,white-hawk-cpu
- const: renesas,r8a779g0
+ - description: R-Car V4H (R8A779G[23])
+ items:
+ - enum:
+ - renesas,white-hawk-single # White Hawk Single board (RTP8A779G[23]ASKB0F10SA001)
+ - enum:
+ - renesas,r8a779g2 # ES2.x
+ - renesas,r8a779g3 # ES3.x
+ - const: renesas,r8a779g0
+
- description: R-Car V4M (R8A779H0)
items:
- enum:
diff --git a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
index 7eca9e1ad6a3..61f38b68a4a3 100644
--- a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
+++ b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
@@ -23,6 +23,7 @@ properties:
- rockchip,rk3576-bigcore-grf
- rockchip,rk3576-cci-grf
- rockchip,rk3576-gpu-grf
+ - rockchip,rk3576-hdptxphy-grf
- rockchip,rk3576-litcore-grf
- rockchip,rk3576-npu-grf
- rockchip,rk3576-php-grf
diff --git a/Documentation/devicetree/bindings/soc/samsung/exynos-pmu.yaml b/Documentation/devicetree/bindings/soc/samsung/exynos-pmu.yaml
index 6cdfe7e059a3..8e6d051d8c97 100644
--- a/Documentation/devicetree/bindings/soc/samsung/exynos-pmu.yaml
+++ b/Documentation/devicetree/bindings/soc/samsung/exynos-pmu.yaml
@@ -55,6 +55,7 @@ properties:
- samsung,exynos7885-pmu
- samsung,exynos8895-pmu
- samsung,exynos9810-pmu
+ - samsung,exynos990-pmu
- samsung,exynosautov9-pmu
- samsung,exynosautov920-pmu
- tesla,fsd-pmu
diff --git a/Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml b/Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml
index f80fcbc3128b..5b046932fbc3 100644
--- a/Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml
+++ b/Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml
@@ -64,6 +64,7 @@ properties:
samsung,mode:
$ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
description:
Selects USI function (which serial protocol to use). Refer to
<include/dt-bindings/soc/samsung,exynos-usi.h> for valid USI mode values.
diff --git a/Documentation/devicetree/bindings/soc/samsung/samsung,exynos-sysreg.yaml b/Documentation/devicetree/bindings/soc/samsung/samsung,exynos-sysreg.yaml
index 3ca220582897..a75aef240629 100644
--- a/Documentation/devicetree/bindings/soc/samsung/samsung,exynos-sysreg.yaml
+++ b/Documentation/devicetree/bindings/soc/samsung/samsung,exynos-sysreg.yaml
@@ -21,6 +21,10 @@ properties:
- samsung,exynos3-sysreg
- samsung,exynos4-sysreg
- samsung,exynos5-sysreg
+ - samsung,exynos8895-fsys0-sysreg
+ - samsung,exynos8895-fsys1-sysreg
+ - samsung,exynos8895-peric0-sysreg
+ - samsung,exynos8895-peric1-sysreg
- samsung,exynosautov920-peric0-sysreg
- samsung,exynosautov920-peric1-sysreg
- tesla,fsd-cam-sysreg
@@ -79,6 +83,10 @@ allOf:
- samsung,exynos850-cmgp-sysreg
- samsung,exynos850-peri-sysreg
- samsung,exynos850-sysreg
+ - samsung,exynos8895-fsys0-sysreg
+ - samsung,exynos8895-fsys1-sysreg
+ - samsung,exynos8895-peric0-sysreg
+ - samsung,exynos8895-peric1-sysreg
then:
required:
- clocks
diff --git a/Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml b/Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml
index 3cb1471cc6b6..927b3200e29e 100644
--- a/Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml
+++ b/Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml
@@ -92,6 +92,16 @@ properties:
description: |
This property is as per sci-pm-domain.txt.
+ clocks:
+ items:
+ - description: ICSSG_CORE Clock
+ - description: ICSSG_IEP Clock
+ - description: ICSSG_RGMII_MHZ_250 Clock
+ - description: ICSSG_RGMII_MHZ_50 Clock
+ - description: ICSSG_RGMII_MHZ_5 Clock
+ - description: ICSSG_UART Clock
+ - description: ICSSG_ICLK Clock
+
patternProperties:
memories@[a-f0-9]+$:
diff --git a/Documentation/devicetree/bindings/sound/adi,ssm2518.yaml b/Documentation/devicetree/bindings/sound/adi,ssm2518.yaml
index f3f32540779c..f1beae84cad1 100644
--- a/Documentation/devicetree/bindings/sound/adi,ssm2518.yaml
+++ b/Documentation/devicetree/bindings/sound/adi,ssm2518.yaml
@@ -36,12 +36,14 @@ unevaluatedProperties: false
examples:
- |
- i2c {
- #address-cells = <1>;
- #size-cells = <0>;
- codec@34 {
- compatible = "adi,ssm2518";
- reg = <0x34>;
- gpios = <&gpio 5 0>;
- };
- };
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ codec@34 {
+ compatible = "adi,ssm2518";
+ reg = <0x34>;
+ gpios = <&gpio 5 GPIO_ACTIVE_HIGH>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml
index ebc9097f936a..ccae64ce3071 100644
--- a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml
+++ b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml
@@ -23,6 +23,7 @@ properties:
- allwinner,sun8i-h3-codec
- allwinner,sun8i-v3s-codec
- allwinner,sun50i-h616-codec
+ - allwinner,suniv-f1c100s-codec
reg:
maxItems: 1
@@ -77,6 +78,7 @@ properties:
- MIC1
- MIC2
- MIC3
+ - MIC
# Microphone Biases from the SoC
- HBIAS
@@ -87,6 +89,8 @@ properties:
- Headset Mic
- Line In
- Line Out
+ - Right FM In
+ - Left FM In
- Mic
- Speaker
@@ -270,6 +274,33 @@ allOf:
- const: rx
- const: tx
+ - if:
+ properties:
+ compatible:
+ enum:
+ - allwinner,suniv-f1c100s-codec
+
+ then:
+ properties:
+ allwinner,audio-routing:
+ items:
+ enum:
+ - HP
+ - HPCOM
+ - LINEIN
+ - LINEOUT
+ - MIC
+ - HBIAS
+ - MBIAS
+ - Headphone
+ - Headset Mic
+ - Line In
+ - Line Out
+ - Right FM In
+ - Left FM In
+ - Mic
+ - Speaker
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/sound/awinic,aw88395.yaml b/Documentation/devicetree/bindings/sound/awinic,aw88395.yaml
index 3b0b743e49c4..6676406bf2de 100644
--- a/Documentation/devicetree/bindings/sound/awinic,aw88395.yaml
+++ b/Documentation/devicetree/bindings/sound/awinic,aw88395.yaml
@@ -18,6 +18,7 @@ properties:
compatible:
enum:
- awinic,aw88081
+ - awinic,aw88083
- awinic,aw88261
- awinic,aw88395
- awinic,aw88399
@@ -58,6 +59,7 @@ allOf:
contains:
enum:
- awinic,aw88081
+ - awinic,aw88083
- awinic,aw88261
then:
properties:
diff --git a/Documentation/devicetree/bindings/sound/everest,es71x4.yaml b/Documentation/devicetree/bindings/sound/everest,es71x4.yaml
index fd1b32812228..efe9f3fd3778 100644
--- a/Documentation/devicetree/bindings/sound/everest,es71x4.yaml
+++ b/Documentation/devicetree/bindings/sound/everest,es71x4.yaml
@@ -53,10 +53,10 @@ unevaluatedProperties: false
examples:
- |
- codec {
- compatible = "everest,es7134";
- #sound-dai-cells = <0>;
- VDD-supply = <&vdd_supply>;
- };
+ codec {
+ compatible = "everest,es7134";
+ #sound-dai-cells = <0>;
+ VDD-supply = <&vdd_supply>;
+ };
...
diff --git a/Documentation/devicetree/bindings/sound/everest,es7241.yaml b/Documentation/devicetree/bindings/sound/everest,es7241.yaml
index f179af758730..e5cfb40f1ef2 100644
--- a/Documentation/devicetree/bindings/sound/everest,es7241.yaml
+++ b/Documentation/devicetree/bindings/sound/everest,es7241.yaml
@@ -54,14 +54,15 @@ unevaluatedProperties: false
examples:
- |
- #include <dt-bindings/gpio/gpio.h>
- codec {
- compatible = "everest,es7241";
- #sound-dai-cells = <0>;
- reset-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
- VDDP-supply = <&vddp_supply>;
- VDDA-supply = <&vdda_supply>;
- VDDD-supply = <&vddd_supply>;
- };
+ #include <dt-bindings/gpio/gpio.h>
+
+ codec {
+ compatible = "everest,es7241";
+ #sound-dai-cells = <0>;
+ reset-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
+ VDDP-supply = <&vddp_supply>;
+ VDDA-supply = <&vdda_supply>;
+ VDDD-supply = <&vddd_supply>;
+ };
...
diff --git a/Documentation/devicetree/bindings/sound/fsl,easrc.yaml b/Documentation/devicetree/bindings/sound/fsl,easrc.yaml
index 0782f3f9947f..c454110f4281 100644
--- a/Documentation/devicetree/bindings/sound/fsl,easrc.yaml
+++ b/Documentation/devicetree/bindings/sound/fsl,easrc.yaml
@@ -87,20 +87,20 @@ examples:
#include <dt-bindings/clock/imx8mn-clock.h>
easrc: easrc@300c0000 {
- compatible = "fsl,imx8mn-easrc";
- reg = <0x300c0000 0x10000>;
- interrupts = <0x0 122 0x4>;
- clocks = <&clk IMX8MN_CLK_ASRC_ROOT>;
- clock-names = "mem";
- dmas = <&sdma2 16 23 0> , <&sdma2 17 23 0>,
- <&sdma2 18 23 0> , <&sdma2 19 23 0>,
- <&sdma2 20 23 0> , <&sdma2 21 23 0>,
- <&sdma2 22 23 0> , <&sdma2 23 23 0>;
- dma-names = "ctx0_rx", "ctx0_tx",
- "ctx1_rx", "ctx1_tx",
- "ctx2_rx", "ctx2_tx",
- "ctx3_rx", "ctx3_tx";
- firmware-name = "imx/easrc/easrc-imx8mn.bin";
- fsl,asrc-rate = <8000>;
- fsl,asrc-format = <2>;
+ compatible = "fsl,imx8mn-easrc";
+ reg = <0x300c0000 0x10000>;
+ interrupts = <0x0 122 0x4>;
+ clocks = <&clk IMX8MN_CLK_ASRC_ROOT>;
+ clock-names = "mem";
+ dmas = <&sdma2 16 23 0> , <&sdma2 17 23 0>,
+ <&sdma2 18 23 0> , <&sdma2 19 23 0>,
+ <&sdma2 20 23 0> , <&sdma2 21 23 0>,
+ <&sdma2 22 23 0> , <&sdma2 23 23 0>;
+ dma-names = "ctx0_rx", "ctx0_tx",
+ "ctx1_rx", "ctx1_tx",
+ "ctx2_rx", "ctx2_tx",
+ "ctx3_rx", "ctx3_tx";
+ firmware-name = "imx/easrc/easrc-imx8mn.bin";
+ fsl,asrc-rate = <8000>;
+ fsl,asrc-format = <2>;
};
diff --git a/Documentation/devicetree/bindings/sound/fsl,micfil.yaml b/Documentation/devicetree/bindings/sound/fsl,micfil.yaml
index c1e9803fc113..c47b7a097490 100644
--- a/Documentation/devicetree/bindings/sound/fsl,micfil.yaml
+++ b/Documentation/devicetree/bindings/sound/fsl,micfil.yaml
@@ -25,6 +25,7 @@ properties:
- fsl,imx8mm-micfil
- fsl,imx8mp-micfil
- fsl,imx93-micfil
+ - fsl,imx943-micfil
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/sound/fsl,mqs.yaml b/Documentation/devicetree/bindings/sound/fsl,mqs.yaml
index 030ccc173130..8c22e8348b14 100644
--- a/Documentation/devicetree/bindings/sound/fsl,mqs.yaml
+++ b/Documentation/devicetree/bindings/sound/fsl,mqs.yaml
@@ -23,6 +23,8 @@ properties:
- fsl,imx8qm-mqs
- fsl,imx8qxp-mqs
- fsl,imx93-mqs
+ - fsl,imx943-aonmix-mqs
+ - fsl,imx943-wakeupmix-mqs
- fsl,imx95-aonmix-mqs
- fsl,imx95-netcmix-mqs
diff --git a/Documentation/devicetree/bindings/sound/fsl,xcvr.yaml b/Documentation/devicetree/bindings/sound/fsl,xcvr.yaml
index 5e2801014221..f68d0e0ecfe5 100644
--- a/Documentation/devicetree/bindings/sound/fsl,xcvr.yaml
+++ b/Documentation/devicetree/bindings/sound/fsl,xcvr.yaml
@@ -140,21 +140,21 @@ examples:
#include <dt-bindings/reset/imx8mp-reset.h>
xcvr: xcvr@30cc0000 {
- compatible = "fsl,imx8mp-xcvr";
- reg = <0x30cc0000 0x800>,
- <0x30cc0800 0x400>,
- <0x30cc0c00 0x080>,
- <0x30cc0e00 0x080>;
- reg-names = "ram", "regs", "rxfifo", "txfifo";
- interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&audiomix_clk IMX8MP_CLK_AUDIOMIX_EARC_IPG>,
- <&audiomix_clk IMX8MP_CLK_AUDIOMIX_EARC_PHY>,
- <&audiomix_clk IMX8MP_CLK_AUDIOMIX_SPBA2_ROOT>,
- <&audiomix_clk IMX8MP_CLK_AUDIOMIX_AUDPLL_ROOT>;
- clock-names = "ipg", "phy", "spba", "pll_ipg";
- dmas = <&sdma2 30 2 0>, <&sdma2 31 2 0>;
- dma-names = "rx", "tx";
- resets = <&audiomix_reset 0>;
+ compatible = "fsl,imx8mp-xcvr";
+ reg = <0x30cc0000 0x800>,
+ <0x30cc0800 0x400>,
+ <0x30cc0c00 0x080>,
+ <0x30cc0e00 0x080>;
+ reg-names = "ram", "regs", "rxfifo", "txfifo";
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&audiomix_clk IMX8MP_CLK_AUDIOMIX_EARC_IPG>,
+ <&audiomix_clk IMX8MP_CLK_AUDIOMIX_EARC_PHY>,
+ <&audiomix_clk IMX8MP_CLK_AUDIOMIX_SPBA2_ROOT>,
+ <&audiomix_clk IMX8MP_CLK_AUDIOMIX_AUDPLL_ROOT>;
+ clock-names = "ipg", "phy", "spba", "pll_ipg";
+ dmas = <&sdma2 30 2 0>, <&sdma2 31 2 0>;
+ dma-names = "rx", "tx";
+ resets = <&audiomix_reset 0>;
};
diff --git a/Documentation/devicetree/bindings/sound/intel,keembay-i2s.yaml b/Documentation/devicetree/bindings/sound/intel,keembay-i2s.yaml
index 76b6f2cf25df..dca617860938 100644
--- a/Documentation/devicetree/bindings/sound/intel,keembay-i2s.yaml
+++ b/Documentation/devicetree/bindings/sound/intel,keembay-i2s.yaml
@@ -72,19 +72,19 @@ unevaluatedProperties: false
examples:
- |
- #include <dt-bindings/interrupt-controller/arm-gic.h>
- #include <dt-bindings/interrupt-controller/irq.h>
- #define KEEM_BAY_PSS_AUX_I2S3
- #define KEEM_BAY_PSS_I2S3
- i2s3: i2s@20140000 {
- compatible = "intel,keembay-i2s";
- #sound-dai-cells = <0>;
- reg = <0x20140000 0x200>, /* I2S registers */
- <0x202a00a4 0x4>; /* I2S gen configuration */
- reg-names = "i2s-regs", "i2s_gen_cfg";
- interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "osc", "apb_clk";
- clocks = <&scmi_clk KEEM_BAY_PSS_AUX_I2S3>, <&scmi_clk KEEM_BAY_PSS_I2S3>;
- dmas = <&axi_dma0 29>, <&axi_dma0 33>;
- dma-names = "tx", "rx";
- };
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #define KEEM_BAY_PSS_AUX_I2S3
+ #define KEEM_BAY_PSS_I2S3
+ i2s@20140000 {
+ compatible = "intel,keembay-i2s";
+ #sound-dai-cells = <0>;
+ reg = <0x20140000 0x200>, /* I2S registers */
+ <0x202a00a4 0x4>; /* I2S gen configuration */
+ reg-names = "i2s-regs", "i2s_gen_cfg";
+ interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "osc", "apb_clk";
+ clocks = <&scmi_clk KEEM_BAY_PSS_AUX_I2S3>, <&scmi_clk KEEM_BAY_PSS_I2S3>;
+ dmas = <&axi_dma0 29>, <&axi_dma0 33>;
+ dma-names = "tx", "rx";
+ };
diff --git a/Documentation/devicetree/bindings/sound/mediatek,mt8188-mt6359.yaml b/Documentation/devicetree/bindings/sound/mediatek,mt8188-mt6359.yaml
index ba482747f0e6..362e729b51b4 100644
--- a/Documentation/devicetree/bindings/sound/mediatek,mt8188-mt6359.yaml
+++ b/Documentation/devicetree/bindings/sound/mediatek,mt8188-mt6359.yaml
@@ -14,11 +14,15 @@ allOf:
properties:
compatible:
- enum:
- - mediatek,mt8188-es8326
- - mediatek,mt8188-mt6359-evb
- - mediatek,mt8188-nau8825
- - mediatek,mt8188-rt5682s
+ oneOf:
+ - enum:
+ - mediatek,mt8188-es8326
+ - mediatek,mt8188-mt6359-evb
+ - mediatek,mt8188-nau8825
+ - mediatek,mt8188-rt5682s
+ - items:
+ - const: mediatek,mt8390-mt6359-evk
+ - const: mediatek,mt8188-mt6359-evb
audio-routing:
description:
@@ -56,6 +60,8 @@ patternProperties:
- ETDM2_OUT_BE
- ETDM3_OUT_BE
- PCM1_BE
+ - DL_SRC_BE
+ - UL_SRC_BE
codec:
description: Holds subnode which indicates codec dai.
diff --git a/Documentation/devicetree/bindings/sound/neofidelity,ntp8918.yaml b/Documentation/devicetree/bindings/sound/neofidelity,ntp8918.yaml
index 952768b35902..6946177e391a 100644
--- a/Documentation/devicetree/bindings/sound/neofidelity,ntp8918.yaml
+++ b/Documentation/devicetree/bindings/sound/neofidelity,ntp8918.yaml
@@ -55,16 +55,18 @@ unevaluatedProperties: false
examples:
- |
- #include <dt-bindings/gpio/gpio.h>
- i2c {
- #address-cells = <1>;
- #size-cells = <0>;
- audio-codec@2a {
- compatible = "neofidelity,ntp8918";
- #sound-dai-cells = <0>;
- reg = <0x2a>;
- clocks = <&clkc 150>, <&clkc 151>, <&clkc 152>;
- clock-names = "wck", "scl", "bck";
- reset-gpios = <&gpio 5 GPIO_ACTIVE_LOW>;
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ audio-codec@2a {
+ compatible = "neofidelity,ntp8918";
+ #sound-dai-cells = <0>;
+ reg = <0x2a>;
+ clocks = <&clkc 150>, <&clkc 151>, <&clkc 152>;
+ clock-names = "wck", "scl", "bck";
+ reset-gpios = <&gpio 5 GPIO_ACTIVE_LOW>;
+ };
};
- };
diff --git a/Documentation/devicetree/bindings/sound/realtek,rt5682.yaml b/Documentation/devicetree/bindings/sound/realtek,rt5682.yaml
new file mode 100644
index 000000000000..39333ea05646
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/realtek,rt5682.yaml
@@ -0,0 +1,156 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/realtek,rt5682.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Realtek rt5682 and rt5682i codecs
+
+maintainers:
+ - Bard Liao <bardliao@realtek.com>
+
+allOf:
+ - $ref: dai-common.yaml#
+
+properties:
+ compatible:
+ enum:
+ - realtek,rt5682
+ - realtek,rt5682i
+
+ reg:
+ maxItems: 1
+ description: I2C address of the device.
+
+ interrupts:
+ maxItems: 1
+ description: The CODEC's interrupt output.
+
+ realtek,dmic1-data-pin:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum:
+ - 0 # dmic1 data is not used
+ - 1 # using GPIO2 pin as dmic1 data pin
+ - 2 # using GPIO5 pin as dmic1 data pin
+ description:
+ Specify which GPIO pin be used as DMIC1 data pin.
+
+ realtek,dmic1-clk-pin:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum:
+ - 0 # using GPIO1 pin as dmic1 clock pin
+ - 1 # using GPIO3 pin as dmic1 clock pin
+ description:
+ Specify which GPIO pin be used as DMIC1 clk pin.
+
+ realtek,jd-src:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum:
+ - 0 # No JD is used
+ - 1 # using JD1 as JD source
+ description:
+ Specify which JD source be used.
+
+ realtek,ldo1-en-gpios:
+ description:
+ The GPIO that controls the CODEC's LDO1_EN pin.
+
+ realtek,btndet-delay:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ The debounce delay for push button.
+ The delay time is realtek,btndet-delay value multiple of 8.192 ms.
+ If absent, the default is 16.
+
+ realtek,dmic-clk-rate-hz:
+ description:
+ Set the clock rate (hz) for the requirement of the particular DMIC.
+
+ realtek,dmic-delay-ms:
+ description:
+ Set the delay time (ms) for the requirement of the particular DMIC.
+
+ realtek,dmic-clk-driving-high:
+ type: boolean
+ description:
+ Set the high driving of the DMIC clock out.
+
+ clocks:
+ items:
+ - description: phandle and clock specifier for codec MCLK.
+
+ clock-names:
+ items:
+ - const: mclk
+
+ "#clock-cells":
+ const: 1
+
+ clock-output-names:
+ minItems: 2
+ maxItems: 2
+ description: Name given for DAI word clock and bit clock outputs.
+
+ "#sound-dai-cells":
+ const: 1
+
+ AVDD-supply:
+ description: Regulator supplying analog power through the AVDD pin.
+
+ MICVDD-supply:
+ description: Regulator supplying power for the microphone bias through
+ the MICVDD pin.
+
+ VBAT-supply:
+ description: Regulator supplying battery power through the VBAT pin.
+
+ DBVDD-supply:
+ description: Regulator supplying I/O power through the DBVDD pin.
+
+ LDO1-IN-supply:
+ description: Regulator supplying power to the digital core and charge
+ pump through the LDO1_IN pin.
+
+required:
+ - compatible
+ - reg
+ - AVDD-supply
+ - VBAT-supply
+ - MICVDD-supply
+ - DBVDD-supply
+ - LDO1-IN-supply
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ codec@1a {
+ compatible = "realtek,rt5682";
+ reg = <0x1a>;
+ interrupts = <6 IRQ_TYPE_LEVEL_HIGH>;
+ realtek,ldo1-en-gpios =
+ <&gpio 2 GPIO_ACTIVE_HIGH>;
+ realtek,dmic1-data-pin = <1>;
+ realtek,dmic1-clk-pin = <1>;
+ realtek,jd-src = <1>;
+
+ #clock-cells = <1>;
+ clock-output-names = "rt5682-dai-wclk", "rt5682-dai-bclk";
+
+ clocks = <&osc>;
+ clock-names = "mclk";
+
+ AVDD-supply = <&avdd_reg>;
+ VBAT-supply = <&vbat_reg>;
+ MICVDD-supply = <&micvdd_reg>;
+ DBVDD-supply = <&dbvdd_reg>;
+ LDO1-IN-supply = <&ldo1_in_reg>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml b/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml
index 6d0d1514cd42..e8a2acb92646 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml
@@ -112,12 +112,6 @@ properties:
description: List of necessary clock names.
# details are defined below
- post-init-providers:
- description: At least if rsnd is using DPCM connection on Audio-Graph-Card2,
- fw_devlink might doesn't have enough information to break the cycle. rsnd
- driver will not be probed in such case. Same problem might occur with
- Multi-CPU/Codec or Codec2Codec.
-
# ports is below
port:
$ref: audio-graph-port.yaml#/definitions/port-base
diff --git a/Documentation/devicetree/bindings/sound/renesas,rz-ssi.yaml b/Documentation/devicetree/bindings/sound/renesas,rz-ssi.yaml
index f4610eaed1e1..e4cdbf2202b9 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rz-ssi.yaml
+++ b/Documentation/devicetree/bindings/sound/renesas,rz-ssi.yaml
@@ -19,6 +19,7 @@ properties:
- renesas,r9a07g043-ssi # RZ/G2UL and RZ/Five
- renesas,r9a07g044-ssi # RZ/G2{L,LC}
- renesas,r9a07g054-ssi # RZ/V2L
+ - renesas,r9a08g045-ssi # RZ/G3S
- const: renesas,rz-ssi
reg:
@@ -57,24 +58,6 @@ properties:
dmas:
minItems: 1
maxItems: 2
- description:
- The first cell represents a phandle to dmac.
- The second cell specifies the encoded MID/RID values of the SSI port
- connected to the DMA client and the slave channel configuration
- parameters.
- bits[0:9] - Specifies MID/RID value of a SSI channel as below
- MID/RID value of SSI rx0 = 0x256
- MID/RID value of SSI tx0 = 0x255
- MID/RID value of SSI rx1 = 0x25a
- MID/RID value of SSI tx1 = 0x259
- MID/RID value of SSI rt2 = 0x25f
- MID/RID value of SSI rx3 = 0x262
- MID/RID value of SSI tx3 = 0x261
- bit[10] - HIEN = 1, Detects a request in response to the rising edge
- of the signal
- bit[11] - LVL = 0, Detects based on the edge
- bits[12:14] - AM = 2, Bus cycle mode
- bit[15] - TM = 0, Single transfer mode
dma-names:
oneOf:
diff --git a/Documentation/devicetree/bindings/sound/rt5682.txt b/Documentation/devicetree/bindings/sound/rt5682.txt
deleted file mode 100644
index 5e1d08de18a5..000000000000
--- a/Documentation/devicetree/bindings/sound/rt5682.txt
+++ /dev/null
@@ -1,98 +0,0 @@
-RT5682 audio CODEC
-
-This device supports I2C only.
-
-Required properties:
-
-- compatible : "realtek,rt5682" or "realtek,rt5682i"
-
-- reg : The I2C address of the device.
-
-- AVDD-supply: phandle to the regulator supplying analog power through the
- AVDD pin
-
-- MICVDD-supply: phandle to the regulator supplying power for the microphone
- bias through the MICVDD pin. Either MICVDD or VBAT should be present.
-
-- VBAT-supply: phandle to the regulator supplying battery power through the
- VBAT pin. Either MICVDD or VBAT should be present.
-
-- DBVDD-supply: phandle to the regulator supplying I/O power through the DBVDD
- pin.
-
-- LDO1-IN-supply: phandle to the regulator supplying power to the digital core
- and charge pump through the LDO1_IN pin.
-
-Optional properties:
-
-- interrupts : The CODEC's interrupt output.
-
-- realtek,dmic1-data-pin
- 0: dmic1 is not used
- 1: using GPIO2 pin as dmic1 data pin
- 2: using GPIO5 pin as dmic1 data pin
-
-- realtek,dmic1-clk-pin
- 0: using GPIO1 pin as dmic1 clock pin
- 1: using GPIO3 pin as dmic1 clock pin
-
-- realtek,jd-src
- 0: No JD is used
- 1: using JD1 as JD source
-
-- realtek,ldo1-en-gpios : The GPIO that controls the CODEC's LDO1_EN pin.
-
-- realtek,btndet-delay
- The debounce delay for push button.
- The delay time is realtek,btndet-delay value multiple of 8.192 ms.
- If absent, the default is 16.
-
-- #clock-cells : Should be set to '<1>', wclk and bclk sources provided.
-- clock-output-names : Name given for DAI clocks output.
-
-- clocks : phandle and clock specifier for codec MCLK.
-- clock-names : Clock name string for 'clocks' attribute, should be "mclk".
-
-- realtek,dmic-clk-rate-hz : Set the clock rate (hz) for the requirement of
- the particular DMIC.
-
-- realtek,dmic-delay-ms : Set the delay time (ms) for the requirement of
- the particular DMIC.
-
-- realtek,dmic-clk-driving-high : Set the high driving of the DMIC clock out.
-
-- #sound-dai-cells: Should be set to '<1>'.
-
-Pins on the device (for linking into audio routes) for RT5682:
-
- * DMIC L1
- * DMIC R1
- * IN1P
- * HPOL
- * HPOR
-
-Example:
-
-rt5682 {
- compatible = "realtek,rt5682i";
- reg = <0x1a>;
- interrupt-parent = <&gpio>;
- interrupts = <TEGRA_GPIO(U, 6) IRQ_TYPE_LEVEL_HIGH>;
- realtek,ldo1-en-gpios =
- <&gpio TEGRA_GPIO(R, 2) GPIO_ACTIVE_HIGH>;
- realtek,dmic1-data-pin = <1>;
- realtek,dmic1-clk-pin = <1>;
- realtek,jd-src = <1>;
- realtek,btndet-delay = <16>;
-
- #clock-cells = <1>;
- clock-output-names = "rt5682-dai-wclk", "rt5682-dai-bclk";
-
- clocks = <&osc>;
- clock-names = "mclk";
-
- AVDD-supply = <&avdd_reg>;
- MICVDD-supply = <&micvdd_reg>;
- DBVDD-supply = <&dbvdd_reg>;
- LDO1-IN-supply = <&ldo1_in_reg>;
-};
diff --git a/Documentation/devicetree/bindings/sound/ti,pcm1681.yaml b/Documentation/devicetree/bindings/sound/ti,pcm1681.yaml
index 5aa00617291c..1f0e6787a746 100644
--- a/Documentation/devicetree/bindings/sound/ti,pcm1681.yaml
+++ b/Documentation/devicetree/bindings/sound/ti,pcm1681.yaml
@@ -4,7 +4,7 @@
$id: http://devicetree.org/schemas/sound/ti,pcm1681.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Texas Instruments PCM1681 8-channel PWM Processor
+title: Texas Instruments PCM1681 8-channel Digital-to-Analog Converter
maintainers:
- Shenghao Ding <shenghao-ding@ti.com>
diff --git a/Documentation/devicetree/bindings/sound/ti,pcm6240.yaml b/Documentation/devicetree/bindings/sound/ti,pcm6240.yaml
index dd5b08e3d7a1..d89b4255b51c 100644
--- a/Documentation/devicetree/bindings/sound/ti,pcm6240.yaml
+++ b/Documentation/devicetree/bindings/sound/ti,pcm6240.yaml
@@ -159,19 +159,21 @@ additionalProperties: false
examples:
- |
- #include <dt-bindings/gpio/gpio.h>
- i2c {
- /* example for two devices with interrupt support */
- #address-cells = <1>;
- #size-cells = <0>;
- pcm6240: audio-codec@48 {
- compatible = "ti,pcm6240";
- reg = <0x48>, /* primary-device */
- <0x4b>; /* secondary-device */
- #sound-dai-cells = <0>;
- reset-gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
- interrupt-parent = <&gpio1>;
- interrupts = <15>;
- };
- };
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ /* example for two devices with interrupt support */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ audio-codec@48 {
+ compatible = "ti,pcm6240";
+ reg = <0x48>, /* primary-device */
+ <0x4b>; /* secondary-device */
+ #sound-dai-cells = <0>;
+ reset-gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <15>;
+ };
+ };
...
diff --git a/Documentation/devicetree/bindings/sound/ti,tas2562.yaml b/Documentation/devicetree/bindings/sound/ti,tas2562.yaml
index 8bc3b0c7531e..3763ca16b91f 100644
--- a/Documentation/devicetree/bindings/sound/ti,tas2562.yaml
+++ b/Documentation/devicetree/bindings/sound/ti,tas2562.yaml
@@ -65,17 +65,19 @@ unevaluatedProperties: false
examples:
- |
- #include <dt-bindings/gpio/gpio.h>
- i2c {
- #address-cells = <1>;
- #size-cells = <0>;
- codec: codec@4c {
- compatible = "ti,tas2562";
- reg = <0x4c>;
- #sound-dai-cells = <0>;
- interrupt-parent = <&gpio1>;
- interrupts = <14>;
- shutdown-gpios = <&gpio1 15 0>;
- ti,imon-slot-no = <0>;
- };
- };
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ codec@4c {
+ compatible = "ti,tas2562";
+ reg = <0x4c>;
+ #sound-dai-cells = <0>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <14>;
+ shutdown-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
+ ti,imon-slot-no = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/ti,tas2770.yaml b/Documentation/devicetree/bindings/sound/ti,tas2770.yaml
index 362c2e6154f0..5e7aea43aced 100644
--- a/Documentation/devicetree/bindings/sound/ti,tas2770.yaml
+++ b/Documentation/devicetree/bindings/sound/ti,tas2770.yaml
@@ -69,19 +69,21 @@ unevaluatedProperties: false
examples:
- |
- #include <dt-bindings/gpio/gpio.h>
- i2c {
- #address-cells = <1>;
- #size-cells = <0>;
- codec: codec@41 {
- compatible = "ti,tas2770";
- reg = <0x41>;
- #sound-dai-cells = <0>;
- interrupt-parent = <&gpio1>;
- interrupts = <14>;
- reset-gpio = <&gpio1 15 0>;
- shutdown-gpios = <&gpio1 14 0>;
- ti,imon-slot-no = <0>;
- ti,vmon-slot-no = <2>;
- };
- };
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ codec@41 {
+ compatible = "ti,tas2770";
+ reg = <0x41>;
+ #sound-dai-cells = <0>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <14>;
+ reset-gpio = <&gpio1 15 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpio1 14 GPIO_ACTIVE_HIGH>;
+ ti,imon-slot-no = <0>;
+ ti,vmon-slot-no = <2>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/ti,tas2781.yaml b/Documentation/devicetree/bindings/sound/ti,tas2781.yaml
index 976238689249..5ea1cdc593b5 100644
--- a/Documentation/devicetree/bindings/sound/ti,tas2781.yaml
+++ b/Documentation/devicetree/bindings/sound/ti,tas2781.yaml
@@ -101,22 +101,24 @@ additionalProperties: false
examples:
- |
- #include <dt-bindings/gpio/gpio.h>
- i2c {
- /* example with quad tas2781s, such as tablet or pad device */
- #address-cells = <1>;
- #size-cells = <0>;
- quad_tas2781: tas2781@38 {
- compatible = "ti,tas2781";
- reg = <0x38>, /* Audio slot 0 */
- <0x3a>, /* Audio slot 1 */
- <0x39>, /* Audio slot 2 */
- <0x3b>; /* Audio slot 3 */
-
- #sound-dai-cells = <0>;
- reset-gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
- interrupt-parent = <&gpio1>;
- interrupts = <15>;
- };
- };
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ /* example with quad tas2781s, such as tablet or pad device */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ audio-codec@38 {
+ compatible = "ti,tas2781";
+ reg = <0x38>, /* Audio slot 0 */
+ <0x3a>, /* Audio slot 1 */
+ <0x39>, /* Audio slot 2 */
+ <0x3b>; /* Audio slot 3 */
+
+ #sound-dai-cells = <0>;
+ reset-gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <15>;
+ };
+ };
...
diff --git a/Documentation/devicetree/bindings/sound/ti,tas27xx.yaml b/Documentation/devicetree/bindings/sound/ti,tas27xx.yaml
index 530bc3937847..5447482179c1 100644
--- a/Documentation/devicetree/bindings/sound/ti,tas27xx.yaml
+++ b/Documentation/devicetree/bindings/sound/ti,tas27xx.yaml
@@ -62,21 +62,23 @@ unevaluatedProperties: false
examples:
- |
- #include <dt-bindings/gpio/gpio.h>
- i2c {
- #address-cells = <1>;
- #size-cells = <0>;
- codec: codec@38 {
- compatible = "ti,tas2764";
- reg = <0x38>;
- #sound-dai-cells = <0>;
- interrupt-parent = <&gpio1>;
- interrupts = <14>;
- reset-gpios = <&gpio1 15 0>;
- shutdown-gpios = <&gpio1 15 0>;
- ti,imon-slot-no = <0>;
- ti,vmon-slot-no = <2>;
- };
- };
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ codec@38 {
+ compatible = "ti,tas2764";
+ reg = <0x38>;
+ #sound-dai-cells = <0>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <14>;
+ reset-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
+ ti,imon-slot-no = <0>;
+ ti,vmon-slot-no = <2>;
+ };
+ };
...
diff --git a/Documentation/devicetree/bindings/sound/ti,tas57xx.yaml b/Documentation/devicetree/bindings/sound/ti,tas57xx.yaml
index 2f917238db95..74f7d02b424b 100644
--- a/Documentation/devicetree/bindings/sound/ti,tas57xx.yaml
+++ b/Documentation/devicetree/bindings/sound/ti,tas57xx.yaml
@@ -112,22 +112,24 @@ unevaluatedProperties: false
examples:
- |
- i2c {
- #address-cells = <1>;
- #size-cells = <0>;
-
- codec@2a {
- compatible = "ti,tas5717";
- reg = <0x2a>;
- #sound-dai-cells = <0>;
- reset-gpios = <&gpio1 15 0>;
- pdn-gpios = <&gpio1 15 0>;
- AVDD-supply = <&avdd_supply>;
- DVDD-supply = <&dvdd_supply>;
- HPVDD-supply = <&hpvdd_supply>;
- PVDD_AB-supply = <&pvdd_ab_supply>;
- PVDD_CD-supply = <&pvdd_cd_supply>;
- };
- };
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ codec@2a {
+ compatible = "ti,tas5717";
+ reg = <0x2a>;
+ #sound-dai-cells = <0>;
+ reset-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
+ pdn-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
+ AVDD-supply = <&avdd_supply>;
+ DVDD-supply = <&dvdd_supply>;
+ HPVDD-supply = <&hpvdd_supply>;
+ PVDD_AB-supply = <&pvdd_ab_supply>;
+ PVDD_CD-supply = <&pvdd_cd_supply>;
+ };
+ };
...
diff --git a/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml b/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml
index d9322704f358..a7236f7db4ec 100644
--- a/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml
+++ b/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml
@@ -47,7 +47,9 @@ properties:
- const: allwinner,sun8i-v3s-system-control
- const: allwinner,sun8i-h3-system-control
- items:
- - const: allwinner,sun50i-h6-system-control
+ - enum:
+ - allwinner,sun50i-a100-system-control
+ - allwinner,sun50i-h6-system-control
- const: allwinner,sun50i-a64-system-control
reg:
diff --git a/Documentation/devicetree/bindings/sram/qcom,imem.yaml b/Documentation/devicetree/bindings/sram/qcom,imem.yaml
index 9b06bcd01957..2711f90d9664 100644
--- a/Documentation/devicetree/bindings/sram/qcom,imem.yaml
+++ b/Documentation/devicetree/bindings/sram/qcom,imem.yaml
@@ -20,6 +20,7 @@ properties:
- qcom,apq8064-imem
- qcom,msm8226-imem
- qcom,msm8974-imem
+ - qcom,msm8976-imem
- qcom,qcs404-imem
- qcom,qcs8300-imem
- qcom,qdu1000-imem
diff --git a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
index ed5de0f92a9e..b9829bb22cc0 100644
--- a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
+++ b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
@@ -80,6 +80,7 @@ properties:
- description: v2 of TSENS with combined interrupt
items:
- enum:
+ - qcom,ipq6018-tsens
- qcom,ipq9574-tsens
- const: qcom,ipq8074-tsens
diff --git a/Documentation/devicetree/bindings/timer/fsl,imxgpt.yaml b/Documentation/devicetree/bindings/timer/fsl,imxgpt.yaml
index e2607377cbae..9898dc7ea97b 100644
--- a/Documentation/devicetree/bindings/timer/fsl,imxgpt.yaml
+++ b/Documentation/devicetree/bindings/timer/fsl,imxgpt.yaml
@@ -21,6 +21,7 @@ properties:
- items:
- enum:
- fsl,imx25-gpt
+ - fsl,imx35-gpt
- fsl,imx50-gpt
- fsl,imx51-gpt
- fsl,imx53-gpt
@@ -31,6 +32,7 @@ properties:
- enum:
- fsl,imx6sl-gpt
- fsl,imx6sx-gpt
+ - fsl,imx7d-gpt
- fsl,imx8mp-gpt
- fsl,imxrt1050-gpt
- fsl,imxrt1170-gpt
@@ -38,7 +40,6 @@ properties:
- items:
- enum:
- fsl,imx6ul-gpt
- - fsl,imx7d-gpt
- const: fsl,imx6sx-gpt
reg:
diff --git a/Documentation/devicetree/bindings/timer/sifive,clint.yaml b/Documentation/devicetree/bindings/timer/sifive,clint.yaml
index b42d43d2de48..76d83aea4e2b 100644
--- a/Documentation/devicetree/bindings/timer/sifive,clint.yaml
+++ b/Documentation/devicetree/bindings/timer/sifive,clint.yaml
@@ -31,6 +31,7 @@ properties:
- enum:
- canaan,k210-clint # Canaan Kendryte K210
- sifive,fu540-c000-clint # SiFive FU540
+ - spacemit,k1-clint # SpacemiT K1
- starfive,jh7100-clint # StarFive JH7100
- starfive,jh7110-clint # StarFive JH7110
- starfive,jh8100-clint # StarFive JH8100
diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml
index 6bdcd055e763..fadbd3c041c8 100644
--- a/Documentation/devicetree/bindings/trivial-devices.yaml
+++ b/Documentation/devicetree/bindings/trivial-devices.yaml
@@ -55,8 +55,6 @@ properties:
- atmel,atsha204a
# BPA-RS600: Power Supply
- blutek,bpa-rs600
- # Bosch Sensortec pressure, temperature, humididty and VOC sensor
- - bosch,bme680
# CM32181: Ambient Light Sensor
- capella,cm32181
# CM3232: Ambient Light Sensor
@@ -147,6 +145,8 @@ properties:
- injoinic,ip5207
# Injoinic IP5209 2.4A Power Bank IC with I2C
- injoinic,ip5209
+ # Injoinic IP5306 2.1A Power Bank IC with I2C option
+ - injoinic,ip5306
# Inspur Power System power supply unit version 1
- inspur,ipsps1
# Intel common redudant power supply crps185
diff --git a/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml b/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml
index cde334e3206b..a03fff5df5ef 100644
--- a/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml
+++ b/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml
@@ -26,6 +26,7 @@ properties:
- qcom,msm8994-ufshc
- qcom,msm8996-ufshc
- qcom,msm8998-ufshc
+ - qcom,qcs615-ufshc
- qcom,qcs8300-ufshc
- qcom,sa8775p-ufshc
- qcom,sc7180-ufshc
@@ -243,6 +244,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,qcs615-ufshc
- qcom,sm6115-ufshc
- qcom,sm6125-ufshc
then:
diff --git a/Documentation/devicetree/bindings/ufs/renesas,ufs.yaml b/Documentation/devicetree/bindings/ufs/renesas,ufs.yaml
index f04f9f61fa9f..1949a15e73d2 100644
--- a/Documentation/devicetree/bindings/ufs/renesas,ufs.yaml
+++ b/Documentation/devicetree/bindings/ufs/renesas,ufs.yaml
@@ -50,12 +50,12 @@ examples:
#include <dt-bindings/power/r8a779f0-sysc.h>
ufs: ufs@e686000 {
- compatible = "renesas,r8a779f0-ufs";
- reg = <0xe6860000 0x100>;
- interrupts = <GIC_SPI 235 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 1514>, <&ufs30_clk>;
- clock-names = "fck", "ref_clk";
- freq-table-hz = <200000000 200000000>, <38400000 38400000>;
- power-domains = <&sysc R8A779F0_PD_ALWAYS_ON>;
- resets = <&cpg 1514>;
+ compatible = "renesas,r8a779f0-ufs";
+ reg = <0xe6860000 0x100>;
+ interrupts = <GIC_SPI 235 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 1514>, <&ufs30_clk>;
+ clock-names = "fck", "ref_clk";
+ freq-table-hz = <200000000 200000000>, <38400000 38400000>;
+ power-domains = <&sysc R8A779F0_PD_ALWAYS_ON>;
+ resets = <&cpg 1514>;
};
diff --git a/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml b/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
index 720879820f66..b4e744ebffd1 100644
--- a/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
+++ b/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
@@ -112,19 +112,19 @@ examples:
#include <dt-bindings/clock/exynos7-clk.h>
ufs: ufs@15570000 {
- compatible = "samsung,exynos7-ufs";
- reg = <0x15570000 0x100>,
- <0x15570100 0x100>,
- <0x15571000 0x200>,
- <0x15572000 0x300>;
- reg-names = "hci", "vs_hci", "unipro", "ufsp";
- interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clock_fsys1 ACLK_UFS20_LINK>,
- <&clock_fsys1 SCLK_UFSUNIPRO20_USER>;
- clock-names = "core_clk", "sclk_unipro_main";
- pinctrl-names = "default";
- pinctrl-0 = <&ufs_rst_n &ufs_refclk_out>;
- phys = <&ufs_phy>;
- phy-names = "ufs-phy";
+ compatible = "samsung,exynos7-ufs";
+ reg = <0x15570000 0x100>,
+ <0x15570100 0x100>,
+ <0x15571000 0x200>,
+ <0x15572000 0x300>;
+ reg-names = "hci", "vs_hci", "unipro", "ufsp";
+ interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clock_fsys1 ACLK_UFS20_LINK>,
+ <&clock_fsys1 SCLK_UFSUNIPRO20_USER>;
+ clock-names = "core_clk", "sclk_unipro_main";
+ pinctrl-names = "default";
+ pinctrl-0 = <&ufs_rst_n &ufs_refclk_out>;
+ phys = <&ufs_phy>;
+ phy-names = "ufs-phy";
};
...
diff --git a/Documentation/devicetree/bindings/usb/aspeed,usb-vhub.yaml b/Documentation/devicetree/bindings/usb/aspeed,usb-vhub.yaml
index a86bcd95100e..7f22f9c031b2 100644
--- a/Documentation/devicetree/bindings/usb/aspeed,usb-vhub.yaml
+++ b/Documentation/devicetree/bindings/usb/aspeed,usb-vhub.yaml
@@ -113,27 +113,27 @@ examples:
- |
#include <dt-bindings/clock/aspeed-clock.h>
vhub: usb-vhub@1e6a0000 {
- compatible = "aspeed,ast2500-usb-vhub";
- reg = <0x1e6a0000 0x300>;
- interrupts = <5>;
- clocks = <&syscon ASPEED_CLK_GATE_USBPORT1CLK>;
- aspeed,vhub-downstream-ports = <5>;
- aspeed,vhub-generic-endpoints = <15>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usb2ad_default>;
-
- vhub-vendor-id = <0x1d6b>;
- vhub-product-id = <0x0107>;
- vhub-device-revision = <0x0100>;
- vhub-strings {
- #address-cells = <1>;
- #size-cells = <0>;
-
- string@409 {
- reg = <0x409>;
- manufacturer = "ASPEED";
- product = "USB Virtual Hub";
- serial-number = "0000";
- };
+ compatible = "aspeed,ast2500-usb-vhub";
+ reg = <0x1e6a0000 0x300>;
+ interrupts = <5>;
+ clocks = <&syscon ASPEED_CLK_GATE_USBPORT1CLK>;
+ aspeed,vhub-downstream-ports = <5>;
+ aspeed,vhub-generic-endpoints = <15>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb2ad_default>;
+
+ vhub-vendor-id = <0x1d6b>;
+ vhub-product-id = <0x0107>;
+ vhub-device-revision = <0x0100>;
+ vhub-strings {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ string@409 {
+ reg = <0x409>;
+ manufacturer = "ASPEED";
+ product = "USB Virtual Hub";
+ serial-number = "0000";
};
+ };
};
diff --git a/Documentation/devicetree/bindings/usb/brcm,bdc.yaml b/Documentation/devicetree/bindings/usb/brcm,bdc.yaml
index 9e561fee98f1..f9375c69e86b 100644
--- a/Documentation/devicetree/bindings/usb/brcm,bdc.yaml
+++ b/Documentation/devicetree/bindings/usb/brcm,bdc.yaml
@@ -41,10 +41,10 @@ additionalProperties: false
examples:
- |
- usb@f0b02000 {
- compatible = "brcm,bdc-udc-v2";
- reg = <0xf0b02000 0xfc4>;
- interrupts = <0x0 0x60 0x0>;
- phys = <&usbphy_0 0x0>;
- clocks = <&sw_usbd>;
- };
+ usb@f0b02000 {
+ compatible = "brcm,bdc-udc-v2";
+ reg = <0xf0b02000 0xfc4>;
+ interrupts = <0x0 0x60 0x0>;
+ phys = <&usbphy_0 0x0>;
+ clocks = <&sw_usbd>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/cypress,hx3.yaml b/Documentation/devicetree/bindings/usb/cypress,hx3.yaml
index e44e88d993d0..1033b7a4b8f9 100644
--- a/Documentation/devicetree/bindings/usb/cypress,hx3.yaml
+++ b/Documentation/devicetree/bindings/usb/cypress,hx3.yaml
@@ -56,21 +56,21 @@ examples:
/* 2.0 hub on port 1 */
hub_2_0: hub@1 {
- compatible = "usb4b4,6504";
- reg = <1>;
- peer-hub = <&hub_3_0>;
- reset-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
- vdd-supply = <&reg_1v2_usb>;
- vdd2-supply = <&reg_3v3_usb>;
+ compatible = "usb4b4,6504";
+ reg = <1>;
+ peer-hub = <&hub_3_0>;
+ reset-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
+ vdd-supply = <&reg_1v2_usb>;
+ vdd2-supply = <&reg_3v3_usb>;
};
/* 3.0 hub on port 2 */
hub_3_0: hub@2 {
- compatible = "usb4b4,6506";
- reg = <2>;
- peer-hub = <&hub_2_0>;
- reset-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
- vdd-supply = <&reg_1v2_usb>;
- vdd2-supply = <&reg_3v3_usb>;
+ compatible = "usb4b4,6506";
+ reg = <2>;
+ peer-hub = <&hub_2_0>;
+ reset-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
+ vdd-supply = <&reg_1v2_usb>;
+ vdd2-supply = <&reg_3v3_usb>;
};
};
diff --git a/Documentation/devicetree/bindings/usb/dwc2.yaml b/Documentation/devicetree/bindings/usb/dwc2.yaml
index a5f2e3442a0e..e83d30a91b88 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.yaml
+++ b/Documentation/devicetree/bindings/usb/dwc2.yaml
@@ -192,7 +192,7 @@ unevaluatedProperties: false
examples:
- |
- usb@101c0000 {
+ usb@101c0000 {
compatible = "rockchip,rk3066-usb", "snps,dwc2";
reg = <0x10180000 0x40000>;
interrupts = <18>;
@@ -200,6 +200,6 @@ examples:
clock-names = "otg";
phys = <&usbphy>;
phy-names = "usb2-phy";
- };
+ };
...
diff --git a/Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml b/Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml
index 8b25b9a01ced..e3a7df91f7f1 100644
--- a/Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml
+++ b/Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml
@@ -87,21 +87,21 @@ examples:
#size-cells = <0>;
typec-mux@42 {
- compatible = "fcs,fsa4480";
- reg = <0x42>;
+ compatible = "fcs,fsa4480";
+ reg = <0x42>;
- interrupts-extended = <&tlmm 2 IRQ_TYPE_LEVEL_LOW>;
+ interrupts-extended = <&tlmm 2 IRQ_TYPE_LEVEL_LOW>;
- vcc-supply = <&vreg_bob>;
+ vcc-supply = <&vreg_bob>;
- mode-switch;
- orientation-switch;
+ mode-switch;
+ orientation-switch;
- port {
- fsa4480_ept: endpoint {
- remote-endpoint = <&typec_controller>;
+ port {
+ fsa4480_ept: endpoint {
+ remote-endpoint = <&typec_controller>;
+ };
};
- };
};
};
...
diff --git a/Documentation/devicetree/bindings/usb/gpio-sbu-mux.yaml b/Documentation/devicetree/bindings/usb/gpio-sbu-mux.yaml
index 8a5f837eff94..e588514fab2d 100644
--- a/Documentation/devicetree/bindings/usb/gpio-sbu-mux.yaml
+++ b/Documentation/devicetree/bindings/usb/gpio-sbu-mux.yaml
@@ -20,6 +20,7 @@ properties:
items:
- enum:
- nxp,cbdtu02043
+ - onnn,fsusb42
- onnn,fsusb43l10x
- pericom,pi3usb102
- ti,tmuxhs4212
diff --git a/Documentation/devicetree/bindings/usb/intel,keembay-dwc3.yaml b/Documentation/devicetree/bindings/usb/intel,keembay-dwc3.yaml
index d3511f48cd55..1a75544a8c31 100644
--- a/Documentation/devicetree/bindings/usb/intel,keembay-dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/intel,keembay-dwc3.yaml
@@ -58,20 +58,20 @@ examples:
#define KEEM_BAY_A53_AUX_USB_SUSPEND
usb {
- compatible = "intel,keembay-dwc3";
- clocks = <&scmi_clk KEEM_BAY_A53_AUX_USB>,
- <&scmi_clk KEEM_BAY_A53_AUX_USB_REF>,
- <&scmi_clk KEEM_BAY_A53_AUX_USB_ALT_REF>,
- <&scmi_clk KEEM_BAY_A53_AUX_USB_SUSPEND>;
- clock-names = "async_master", "ref", "alt_ref", "suspend";
- ranges;
- #address-cells = <1>;
- #size-cells = <1>;
-
- usb@34000000 {
- compatible = "snps,dwc3";
- reg = <0x34000000 0x10000>;
- interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
- dr_mode = "peripheral";
- };
+ compatible = "intel,keembay-dwc3";
+ clocks = <&scmi_clk KEEM_BAY_A53_AUX_USB>,
+ <&scmi_clk KEEM_BAY_A53_AUX_USB_REF>,
+ <&scmi_clk KEEM_BAY_A53_AUX_USB_ALT_REF>,
+ <&scmi_clk KEEM_BAY_A53_AUX_USB_SUSPEND>;
+ clock-names = "async_master", "ref", "alt_ref", "suspend";
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ usb@34000000 {
+ compatible = "snps,dwc3";
+ reg = <0x34000000 0x10000>;
+ interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+ dr_mode = "peripheral";
+ };
};
diff --git a/Documentation/devicetree/bindings/usb/ite,it5205.yaml b/Documentation/devicetree/bindings/usb/ite,it5205.yaml
index 36ec4251b5f2..889710733de5 100644
--- a/Documentation/devicetree/bindings/usb/ite,it5205.yaml
+++ b/Documentation/devicetree/bindings/usb/ite,it5205.yaml
@@ -54,19 +54,19 @@ examples:
#size-cells = <0>;
typec-mux@48 {
- compatible = "ite,it5205";
- reg = <0x48>;
+ compatible = "ite,it5205";
+ reg = <0x48>;
- mode-switch;
- orientation-switch;
+ mode-switch;
+ orientation-switch;
- vcc-supply = <&mt6359_vibr_ldo_reg>;
+ vcc-supply = <&mt6359_vibr_ldo_reg>;
- port {
- it5205_usbss_sbu: endpoint {
- remote-endpoint = <&typec_controller>;
+ port {
+ it5205_usbss_sbu: endpoint {
+ remote-endpoint = <&typec_controller>;
+ };
};
- };
};
};
...
diff --git a/Documentation/devicetree/bindings/usb/maxim,max33359.yaml b/Documentation/devicetree/bindings/usb/maxim,max33359.yaml
index 20b62228371b..3de4dc40b791 100644
--- a/Documentation/devicetree/bindings/usb/maxim,max33359.yaml
+++ b/Documentation/devicetree/bindings/usb/maxim,max33359.yaml
@@ -13,8 +13,12 @@ description: Maxim TCPCI Type-C PD controller
properties:
compatible:
- enum:
- - maxim,max33359
+ oneOf:
+ - enum:
+ - maxim,max33359
+ - items:
+ - const: maxim,max77759-tcpci
+ - const: maxim,max33359
reg:
maxItems: 1
@@ -70,6 +74,7 @@ examples:
PDO_FIXED_DUAL_ROLE)
PDO_FIXED(9000, 2000, 0)>;
sink-bc12-completion-time-ms = <500>;
+ pd-revision = /bits/ 8 <0x03 0x01 0x01 0x08>;
};
};
};
diff --git a/Documentation/devicetree/bindings/usb/maxim,max3420-udc.yaml b/Documentation/devicetree/bindings/usb/maxim,max3420-udc.yaml
index 8e0f4ecc010d..6edb1fc5044e 100644
--- a/Documentation/devicetree/bindings/usb/maxim,max3420-udc.yaml
+++ b/Documentation/devicetree/bindings/usb/maxim,max3420-udc.yaml
@@ -50,18 +50,18 @@ additionalProperties: false
examples:
- |
- #include <dt-bindings/gpio/gpio.h>
- #include <dt-bindings/interrupt-controller/irq.h>
- spi {
- #address-cells = <1>;
- #size-cells = <0>;
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
- udc@0 {
- compatible = "maxim,max3420-udc";
- reg = <0>;
- interrupt-parent = <&gpio>;
- interrupts = <0 IRQ_TYPE_EDGE_FALLING>, <10 IRQ_TYPE_EDGE_BOTH>;
- interrupt-names = "udc", "vbus";
- spi-max-frequency = <12500000>;
- };
- };
+ udc@0 {
+ compatible = "maxim,max3420-udc";
+ reg = <0>;
+ interrupt-parent = <&gpio>;
+ interrupts = <0 IRQ_TYPE_EDGE_FALLING>, <10 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "udc", "vbus";
+ spi-max-frequency = <12500000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/usb/nvidia,tegra210-xusb.yaml b/Documentation/devicetree/bindings/usb/nvidia,tegra210-xusb.yaml
index 90296613b3a5..c0e313c70bba 100644
--- a/Documentation/devicetree/bindings/usb/nvidia,tegra210-xusb.yaml
+++ b/Documentation/devicetree/bindings/usb/nvidia,tegra210-xusb.yaml
@@ -189,7 +189,7 @@ examples:
#size-cells = <0>;
ethernet@1 {
- compatible = "usb955,9ff";
- reg = <1>;
+ compatible = "usb955,9ff";
+ reg = <1>;
};
};
diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
index 935e204b607b..a2b3cf625e5b 100644
--- a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
@@ -16,6 +16,7 @@ properties:
- qcom,ipq4019-dwc3
- qcom,ipq5018-dwc3
- qcom,ipq5332-dwc3
+ - qcom,ipq5424-dwc3
- qcom,ipq6018-dwc3
- qcom,ipq8064-dwc3
- qcom,ipq8074-dwc3
@@ -26,6 +27,7 @@ properties:
- qcom,msm8998-dwc3
- qcom,qcm2290-dwc3
- qcom,qcs404-dwc3
+ - qcom,qcs615-dwc3
- qcom,qcs8300-dwc3
- qcom,qdu1000-dwc3
- qcom,sa8775p-dwc3
@@ -341,6 +343,7 @@ allOf:
contains:
enum:
- qcom,qcm2290-dwc3
+ - qcom,qcs615-dwc3
- qcom,sar2130p-dwc3
- qcom,sc8180x-dwc3
- qcom,sc8180x-dwc3-mp
@@ -453,8 +456,10 @@ allOf:
then:
properties:
interrupts:
+ minItems: 3
maxItems: 4
interrupt-names:
+ minItems: 3
items:
- const: pwr_event
- const: dp_hs_phy_irq
@@ -469,6 +474,7 @@ allOf:
- qcom,ipq4019-dwc3
- qcom,ipq8064-dwc3
- qcom,msm8994-dwc3
+ - qcom,qcs615-dwc3
- qcom,qcs8300-dwc3
- qcom,qdu1000-dwc3
- qcom,sa8775p-dwc3
diff --git a/Documentation/devicetree/bindings/usb/renesas,rzv2m-usb3drd.yaml b/Documentation/devicetree/bindings/usb/renesas,rzv2m-usb3drd.yaml
index ff625600d9af..b87e139c29e5 100644
--- a/Documentation/devicetree/bindings/usb/renesas,rzv2m-usb3drd.yaml
+++ b/Documentation/devicetree/bindings/usb/renesas,rzv2m-usb3drd.yaml
@@ -104,26 +104,26 @@ examples:
#size-cells = <1>;
usb3host: usb@85060000 {
- compatible = "renesas,r9a09g011-xhci",
- "renesas,rzv2m-xhci";
- reg = <0x85060000 0x2000>;
- interrupts = <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD R9A09G011_USB_ACLK_H>,
- <&cpg CPG_MOD R9A09G011_USB_PCLK>;
- clock-names = "axi", "reg";
- power-domains = <&cpg>;
- resets = <&cpg R9A09G011_USB_ARESETN_H>;
+ compatible = "renesas,r9a09g011-xhci",
+ "renesas,rzv2m-xhci";
+ reg = <0x85060000 0x2000>;
+ interrupts = <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD R9A09G011_USB_ACLK_H>,
+ <&cpg CPG_MOD R9A09G011_USB_PCLK>;
+ clock-names = "axi", "reg";
+ power-domains = <&cpg>;
+ resets = <&cpg R9A09G011_USB_ARESETN_H>;
};
usb3peri: usb3peri@85070000 {
- compatible = "renesas,r9a09g011-usb3-peri",
- "renesas,rzv2m-usb3-peri";
- reg = <0x85070000 0x400>;
- interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD R9A09G011_USB_ACLK_P>,
- <&cpg CPG_MOD R9A09G011_USB_PCLK>;
- clock-names = "axi", "reg";
- power-domains = <&cpg>;
- resets = <&cpg R9A09G011_USB_ARESETN_P>;
+ compatible = "renesas,r9a09g011-usb3-peri",
+ "renesas,rzv2m-usb3-peri";
+ reg = <0x85070000 0x400>;
+ interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD R9A09G011_USB_ACLK_P>,
+ <&cpg CPG_MOD R9A09G011_USB_PCLK>;
+ clock-names = "axi", "reg";
+ power-domains = <&cpg>;
+ resets = <&cpg R9A09G011_USB_ARESETN_P>;
};
};
diff --git a/Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml b/Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml
index b2b811a0ade8..4e56e4ffeaf2 100644
--- a/Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml
+++ b/Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml
@@ -132,19 +132,19 @@ examples:
usb-role-switch;
ports {
- #address-cells = <1>;
- #size-cells = <0>;
- port@0 {
- reg = <0>;
- usb3_hs_ep: endpoint {
- remote-endpoint = <&hs_ep>;
- };
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ usb3_hs_ep: endpoint {
+ remote-endpoint = <&hs_ep>;
};
- port@1 {
- reg = <1>;
- usb3_role_switch: endpoint {
- remote-endpoint = <&hd3ss3220_out_ep>;
- };
+ };
+ port@1 {
+ reg = <1>;
+ usb3_role_switch: endpoint {
+ remote-endpoint = <&hd3ss3220_out_ep>;
};
+ };
};
};
diff --git a/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml b/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml
index b23ef29bf794..980f325341d4 100644
--- a/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml
+++ b/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml
@@ -26,6 +26,7 @@ properties:
- renesas,usbhs-r9a07g043 # RZ/G2UL and RZ/Five
- renesas,usbhs-r9a07g044 # RZ/G2{L,LC}
- renesas,usbhs-r9a07g054 # RZ/V2L
+ - renesas,usbhs-r9a08g045 # RZ/G3S
- const: renesas,rzg2l-usbhs
- items:
@@ -130,6 +131,7 @@ allOf:
- renesas,usbhs-r9a07g043
- renesas,usbhs-r9a07g044
- renesas,usbhs-r9a07g054
+ - renesas,usbhs-r9a08g045
then:
properties:
interrupts:
diff --git a/Documentation/devicetree/bindings/usb/snps,dwc3-common.yaml b/Documentation/devicetree/bindings/usb/snps,dwc3-common.yaml
new file mode 100644
index 000000000000..c956053fd036
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/snps,dwc3-common.yaml
@@ -0,0 +1,415 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/snps,dwc3-common.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys DesignWare USB3 Controller common properties
+
+maintainers:
+ - Felipe Balbi <balbi@kernel.org>
+
+description:
+ Defines the properties of the DWC3 core as being embedded in either an
+ vendor-specific implementation or as a standalone component.
+
+allOf:
+ - $ref: usb-drd.yaml#
+ - if:
+ properties:
+ dr_mode:
+ const: peripheral
+
+ required:
+ - dr_mode
+ then:
+ $ref: usb.yaml#
+ else:
+ $ref: usb-xhci.yaml#
+
+properties:
+ extcon:
+ maxItems: 1
+ deprecated: true
+
+ usb-phy:
+ minItems: 1
+ items:
+ - description: USB2/HS PHY
+ - description: USB3/SS PHY
+
+ phys:
+ minItems: 1
+ maxItems: 19
+
+ phy-names:
+ minItems: 1
+ maxItems: 19
+ oneOf:
+ - items:
+ enum: [ usb2-phy, usb3-phy ]
+ - items:
+ pattern: "^usb(2-([0-9]|1[0-4])|3-[0-3])$"
+
+ snps,usb2-lpm-disable:
+ description: Indicate if we don't want to enable USB2 HW LPM for host
+ mode.
+ type: boolean
+
+ snps,usb3_lpm_capable:
+ description: Determines if platform is USB3 LPM capable
+ type: boolean
+
+ snps,usb2-gadget-lpm-disable:
+ description: Indicate if we don't want to enable USB2 HW LPM for gadget
+ mode.
+ type: boolean
+
+ snps,dis-start-transfer-quirk:
+ description:
+ When set, disable isoc START TRANSFER command failure SW work-around
+ for DWC_usb31 version 1.70a-ea06 and prior.
+ type: boolean
+
+ snps,disable_scramble_quirk:
+ description:
+ True when SW should disable data scrambling. Only really useful for FPGA
+ builds.
+ type: boolean
+
+ snps,has-lpm-erratum:
+ description: True when DWC3 was configured with LPM Erratum enabled
+ type: boolean
+
+ snps,lpm-nyet-threshold:
+ description: LPM NYET threshold
+ $ref: /schemas/types.yaml#/definitions/uint8
+
+ snps,u2exit_lfps_quirk:
+ description: Set if we want to enable u2exit lfps quirk
+ type: boolean
+
+ snps,u2ss_inp3_quirk:
+ description: Set if we enable P3 OK for U2/SS Inactive quirk
+ type: boolean
+
+ snps,req_p1p2p3_quirk:
+ description:
+ When set, the core will always request for P1/P2/P3 transition sequence.
+ type: boolean
+
+ snps,del_p1p2p3_quirk:
+ description:
+ When set core will delay P1/P2/P3 until a certain amount of 8B10B errors
+ occur.
+ type: boolean
+
+ snps,del_phy_power_chg_quirk:
+ description: When set core will delay PHY power change from P0 to P1/P2/P3.
+ type: boolean
+
+ snps,lfps_filter_quirk:
+ description: When set core will filter LFPS reception.
+ type: boolean
+
+ snps,rx_detect_poll_quirk:
+ description:
+ when set core will disable a 400us delay to start Polling LFPS after
+ RX.Detect.
+ type: boolean
+
+ snps,tx_de_emphasis_quirk:
+ description: When set core will set Tx de-emphasis value
+ type: boolean
+
+ snps,tx_de_emphasis:
+ description:
+ The value driven to the PHY is controlled by the LTSSM during USB3
+ Compliance mode.
+ $ref: /schemas/types.yaml#/definitions/uint8
+ enum:
+ - 0 # -6dB de-emphasis
+ - 1 # -3.5dB de-emphasis
+ - 2 # No de-emphasis
+
+ snps,dis_u3_susphy_quirk:
+ description: When set core will disable USB3 suspend phy
+ type: boolean
+
+ snps,dis_u2_susphy_quirk:
+ description: When set core will disable USB2 suspend phy
+ type: boolean
+
+ snps,dis_enblslpm_quirk:
+ description:
+ When set clears the enblslpm in GUSB2PHYCFG, disabling the suspend signal
+ to the PHY.
+ type: boolean
+
+ snps,dis-u1-entry-quirk:
+ description: Set if link entering into U1 needs to be disabled
+ type: boolean
+
+ snps,dis-u2-entry-quirk:
+ description: Set if link entering into U2 needs to be disabled
+ type: boolean
+
+ snps,dis_rxdet_inp3_quirk:
+ description:
+ When set core will disable receiver detection in PHY P3 power state.
+ type: boolean
+
+ snps,dis-u2-freeclk-exists-quirk:
+ description:
+ When set, clear the u2_freeclk_exists in GUSB2PHYCFG, specify that USB2
+ PHY doesn't provide a free-running PHY clock.
+ type: boolean
+
+ snps,dis-del-phy-power-chg-quirk:
+ description:
+ When set core will change PHY power from P0 to P1/P2/P3 without delay.
+ type: boolean
+
+ snps,dis-tx-ipgap-linecheck-quirk:
+ description: When set, disable u2mac linestate check during HS transmit
+ type: boolean
+
+ snps,parkmode-disable-ss-quirk:
+ description:
+ When set, all SuperSpeed bus instances in park mode are disabled.
+ type: boolean
+
+ snps,parkmode-disable-hs-quirk:
+ description:
+ When set, all HighSpeed bus instances in park mode are disabled.
+ type: boolean
+
+ snps,dis_metastability_quirk:
+ description:
+ When set, disable metastability workaround. CAUTION! Use only if you are
+ absolutely sure of it.
+ type: boolean
+
+ snps,dis-split-quirk:
+ description:
+ When set, change the way URBs are handled by the driver. Needed to
+ avoid -EPROTO errors with usbhid on some devices (Hikey 970).
+ type: boolean
+
+ snps,gfladj-refclk-lpm-sel-quirk:
+ description:
+ When set, run the SOF/ITP counter based on ref_clk.
+ type: boolean
+
+ snps,resume-hs-terminations:
+ description:
+ Fix the issue of HS terminations CRC error on resume by enabling this
+ quirk. When set, all the termsel, xcvrsel, opmode becomes 0 during end
+ of resume. This option is to support certain legacy ULPI PHYs.
+ type: boolean
+
+ snps,ulpi-ext-vbus-drv:
+ description:
+ Some ULPI USB PHY does not support internal VBUS supply, and driving
+ the CPEN pin, requires the configuration of the ulpi DRVVBUSEXTERNAL
+ bit. When set, the xhci host will configure the USB2 PHY drives VBUS
+ with an external supply.
+ type: boolean
+
+ snps,is-utmi-l1-suspend:
+ description:
+ True when DWC3 asserts output signal utmi_l1_suspend_n, false when
+ asserts utmi_sleep_n.
+ type: boolean
+
+ snps,hird-threshold:
+ description: HIRD threshold
+ $ref: /schemas/types.yaml#/definitions/uint8
+
+ snps,hsphy_interface:
+ description:
+ High-Speed PHY interface selection between UTMI+ and ULPI when the
+ DWC_USB3_HSPHY_INTERFACE has value 3.
+ $ref: /schemas/types.yaml#/definitions/string
+ enum: [utmi, ulpi]
+
+ snps,quirk-frame-length-adjustment:
+ description:
+ Value for GFLADJ_30MHZ field of GFLADJ register for post-silicon frame
+ length adjustment when the fladj_30mhz_sdbnd signal is invalid or
+ incorrect.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 0x3f
+
+ snps,ref-clock-period-ns:
+ description:
+ Value for REFCLKPER field of GUCTL register for reference clock period in
+ nanoseconds, when the hardware set default does not match the actual
+ clock.
+
+ This binding is deprecated. Instead, provide an appropriate reference clock.
+ minimum: 8
+ maximum: 62
+ deprecated: true
+
+ snps,rx-thr-num-pkt:
+ description:
+ USB RX packet threshold count. In host mode, this field specifies
+ the space that must be available in the RX FIFO before the core can
+ start the corresponding USB RX transaction (burst).
+ In device mode, this field specifies the space that must be
+ available in the RX FIFO before the core can send ERDY for a
+ flow-controlled endpoint. It is only used for SuperSpeed.
+ The valid values for this field are from 1 to 15. (DWC3 SuperSpeed
+ USB 3.0 Controller Databook)
+ $ref: /schemas/types.yaml#/definitions/uint8
+ minimum: 1
+ maximum: 15
+
+ snps,rx-max-burst:
+ description:
+ Max USB RX burst size. In host mode, this field specifies the
+ Maximum Bulk IN burst the DWC_usb3 core can perform. When the system
+ bus is slower than the USB, RX FIFO can overrun during a long burst.
+ You can program a smaller value to this field to limit the RX burst
+ size that the core can perform. It only applies to SS Bulk,
+ Isochronous, and Interrupt IN endpoints in the host mode.
+ In device mode, this field specifies the NUMP value that is sent in
+ ERDY for an OUT endpoint.
+ The valid values for this field are from 1 to 16. (DWC3 SuperSpeed
+ USB 3.0 Controller Databook)
+ $ref: /schemas/types.yaml#/definitions/uint8
+ minimum: 1
+ maximum: 16
+
+ snps,tx-thr-num-pkt:
+ description:
+ USB TX packet threshold count. This field specifies the number of
+ packets that must be in the TXFIFO before the core can start
+ transmission for the corresponding USB transaction (burst).
+ This count is valid in both host and device modes. It is only used
+ for SuperSpeed operation.
+ Valid values are from 1 to 15. (DWC3 SuperSpeed USB 3.0 Controller
+ Databook)
+ $ref: /schemas/types.yaml#/definitions/uint8
+ minimum: 1
+ maximum: 15
+
+ snps,tx-max-burst:
+ description:
+ Max USB TX burst size. When the system bus is slower than the USB,
+ TX FIFO can underrun during a long burst. Program a smaller value
+ to this field to limit the TX burst size that the core can execute.
+ In Host mode, it only applies to SS Bulk, Isochronous, and Interrupt
+ OUT endpoints. This value is not used in device mode.
+ Valid values are from 1 to 16. (DWC3 SuperSpeed USB 3.0 Controller
+ Databook)
+ $ref: /schemas/types.yaml#/definitions/uint8
+ minimum: 1
+ maximum: 16
+
+ snps,rx-thr-num-pkt-prd:
+ description:
+ Periodic ESS RX packet threshold count (host mode only). Set this and
+ snps,rx-max-burst-prd to a valid, non-zero value 1-16 (DWC_usb31
+ programming guide section 1.2.4) to enable periodic ESS RX threshold.
+ $ref: /schemas/types.yaml#/definitions/uint8
+ minimum: 1
+ maximum: 16
+
+ snps,rx-max-burst-prd:
+ description:
+ Max periodic ESS RX burst size (host mode only). Set this and
+ snps,rx-thr-num-pkt-prd to a valid, non-zero value 1-16 (DWC_usb31
+ programming guide section 1.2.4) to enable periodic ESS RX threshold.
+ $ref: /schemas/types.yaml#/definitions/uint8
+ minimum: 1
+ maximum: 16
+
+ snps,tx-thr-num-pkt-prd:
+ description:
+ Periodic ESS TX packet threshold count (host mode only). Set this and
+ snps,tx-max-burst-prd to a valid, non-zero value 1-16 (DWC_usb31
+ programming guide section 1.2.3) to enable periodic ESS TX threshold.
+ $ref: /schemas/types.yaml#/definitions/uint8
+ minimum: 1
+ maximum: 16
+
+ snps,tx-max-burst-prd:
+ description:
+ Max periodic ESS TX burst size (host mode only). Set this and
+ snps,tx-thr-num-pkt-prd to a valid, non-zero value 1-16 (DWC_usb31
+ programming guide section 1.2.3) to enable periodic ESS TX threshold.
+ $ref: /schemas/types.yaml#/definitions/uint8
+ minimum: 1
+ maximum: 16
+
+ tx-fifo-resize:
+ description: Determines if the TX fifos can be dynamically resized depending
+ on the number of IN endpoints used and if bursting is supported. This
+ may help improve bandwidth on platforms with higher system latencies, as
+ increased fifo space allows for the controller to prefetch data into its
+ internal memory.
+ type: boolean
+
+ tx-fifo-max-num:
+ description: Specifies the max number of packets the txfifo resizing logic
+ can account for when higher endpoint bursting is used. (bMaxBurst > 6) The
+ higher the number, the more fifo space the txfifo resizing logic will
+ allocate for that endpoint.
+ $ref: /schemas/types.yaml#/definitions/uint8
+ minimum: 3
+
+ snps,incr-burst-type-adjustment:
+ description:
+ Value for INCR burst type of GSBUSCFG0 register, undefined length INCR
+ burst type enable and INCRx type. A single value means INCRX burst mode
+ enabled. If more than one value specified, undefined length INCR burst
+ type will be enabled with burst lengths utilized up to the maximum
+ of the values passed in this property.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 8
+ uniqueItems: true
+ items:
+ enum: [1, 4, 8, 16, 32, 64, 128, 256]
+
+ num-hc-interrupters:
+ maximum: 8
+ default: 1
+
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ This port is used with the 'usb-role-switch' property to connect the
+ dwc3 to type C connector.
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+ description:
+ Those ports should be used with any connector to the data bus of this
+ controller using the OF graph bindings specified if the "usb-role-switch"
+ property is used.
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: High Speed (HS) data bus.
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Super Speed (SS) data bus.
+
+ wakeup-source:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Enable USB remote wakeup.
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: true
+...
+
diff --git a/Documentation/devicetree/bindings/usb/snps,dwc3.yaml b/Documentation/devicetree/bindings/usb/snps,dwc3.yaml
index 1cd0ca90127d..4380bb6fa2f0 100644
--- a/Documentation/devicetree/bindings/usb/snps,dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/snps,dwc3.yaml
@@ -15,18 +15,7 @@ description:
compatible string.
allOf:
- - $ref: usb-drd.yaml#
- - if:
- properties:
- dr_mode:
- const: peripheral
-
- required:
- - dr_mode
- then:
- $ref: usb.yaml#
- else:
- $ref: usb-xhci.yaml#
+ - $ref: snps,dwc3-common.yaml#
properties:
compatible:
@@ -70,32 +59,9 @@ properties:
dma-coherent: true
- extcon:
- maxItems: 1
- deprecated: true
-
iommus:
maxItems: 1
- usb-phy:
- minItems: 1
- items:
- - description: USB2/HS PHY
- - description: USB3/SS PHY
-
- phys:
- minItems: 1
- maxItems: 19
-
- phy-names:
- minItems: 1
- maxItems: 19
- oneOf:
- - items:
- enum: [ usb2-phy, usb3-phy ]
- - items:
- pattern: "^usb(2-([0-9]|1[0-4])|3-[0-3])$"
-
power-domains:
description:
The DWC3 has 2 power-domains. The power management unit (PMU) and
@@ -109,361 +75,6 @@ properties:
resets:
minItems: 1
- snps,usb2-lpm-disable:
- description: Indicate if we don't want to enable USB2 HW LPM for host
- mode.
- type: boolean
-
- snps,usb3_lpm_capable:
- description: Determines if platform is USB3 LPM capable
- type: boolean
-
- snps,usb2-gadget-lpm-disable:
- description: Indicate if we don't want to enable USB2 HW LPM for gadget
- mode.
- type: boolean
-
- snps,dis-start-transfer-quirk:
- description:
- When set, disable isoc START TRANSFER command failure SW work-around
- for DWC_usb31 version 1.70a-ea06 and prior.
- type: boolean
-
- snps,disable_scramble_quirk:
- description:
- True when SW should disable data scrambling. Only really useful for FPGA
- builds.
- type: boolean
-
- snps,has-lpm-erratum:
- description: True when DWC3 was configured with LPM Erratum enabled
- type: boolean
-
- snps,lpm-nyet-threshold:
- description: LPM NYET threshold
- $ref: /schemas/types.yaml#/definitions/uint8
-
- snps,u2exit_lfps_quirk:
- description: Set if we want to enable u2exit lfps quirk
- type: boolean
-
- snps,u2ss_inp3_quirk:
- description: Set if we enable P3 OK for U2/SS Inactive quirk
- type: boolean
-
- snps,req_p1p2p3_quirk:
- description:
- When set, the core will always request for P1/P2/P3 transition sequence.
- type: boolean
-
- snps,del_p1p2p3_quirk:
- description:
- When set core will delay P1/P2/P3 until a certain amount of 8B10B errors
- occur.
- type: boolean
-
- snps,del_phy_power_chg_quirk:
- description: When set core will delay PHY power change from P0 to P1/P2/P3.
- type: boolean
-
- snps,lfps_filter_quirk:
- description: When set core will filter LFPS reception.
- type: boolean
-
- snps,rx_detect_poll_quirk:
- description:
- when set core will disable a 400us delay to start Polling LFPS after
- RX.Detect.
- type: boolean
-
- snps,tx_de_emphasis_quirk:
- description: When set core will set Tx de-emphasis value
- type: boolean
-
- snps,tx_de_emphasis:
- description:
- The value driven to the PHY is controlled by the LTSSM during USB3
- Compliance mode.
- $ref: /schemas/types.yaml#/definitions/uint8
- enum:
- - 0 # -6dB de-emphasis
- - 1 # -3.5dB de-emphasis
- - 2 # No de-emphasis
-
- snps,dis_u3_susphy_quirk:
- description: When set core will disable USB3 suspend phy
- type: boolean
-
- snps,dis_u2_susphy_quirk:
- description: When set core will disable USB2 suspend phy
- type: boolean
-
- snps,dis_enblslpm_quirk:
- description:
- When set clears the enblslpm in GUSB2PHYCFG, disabling the suspend signal
- to the PHY.
- type: boolean
-
- snps,dis-u1-entry-quirk:
- description: Set if link entering into U1 needs to be disabled
- type: boolean
-
- snps,dis-u2-entry-quirk:
- description: Set if link entering into U2 needs to be disabled
- type: boolean
-
- snps,dis_rxdet_inp3_quirk:
- description:
- When set core will disable receiver detection in PHY P3 power state.
- type: boolean
-
- snps,dis-u2-freeclk-exists-quirk:
- description:
- When set, clear the u2_freeclk_exists in GUSB2PHYCFG, specify that USB2
- PHY doesn't provide a free-running PHY clock.
- type: boolean
-
- snps,dis-del-phy-power-chg-quirk:
- description:
- When set core will change PHY power from P0 to P1/P2/P3 without delay.
- type: boolean
-
- snps,dis-tx-ipgap-linecheck-quirk:
- description: When set, disable u2mac linestate check during HS transmit
- type: boolean
-
- snps,parkmode-disable-ss-quirk:
- description:
- When set, all SuperSpeed bus instances in park mode are disabled.
- type: boolean
-
- snps,parkmode-disable-hs-quirk:
- description:
- When set, all HighSpeed bus instances in park mode are disabled.
- type: boolean
-
- snps,dis_metastability_quirk:
- description:
- When set, disable metastability workaround. CAUTION! Use only if you are
- absolutely sure of it.
- type: boolean
-
- snps,dis-split-quirk:
- description:
- When set, change the way URBs are handled by the driver. Needed to
- avoid -EPROTO errors with usbhid on some devices (Hikey 970).
- type: boolean
-
- snps,gfladj-refclk-lpm-sel-quirk:
- description:
- When set, run the SOF/ITP counter based on ref_clk.
- type: boolean
-
- snps,resume-hs-terminations:
- description:
- Fix the issue of HS terminations CRC error on resume by enabling this
- quirk. When set, all the termsel, xcvrsel, opmode becomes 0 during end
- of resume. This option is to support certain legacy ULPI PHYs.
- type: boolean
-
- snps,ulpi-ext-vbus-drv:
- description:
- Some ULPI USB PHY does not support internal VBUS supply, and driving
- the CPEN pin, requires the configuration of the ulpi DRVVBUSEXTERNAL
- bit. When set, the xhci host will configure the USB2 PHY drives VBUS
- with an external supply.
- type: boolean
-
- snps,is-utmi-l1-suspend:
- description:
- True when DWC3 asserts output signal utmi_l1_suspend_n, false when
- asserts utmi_sleep_n.
- type: boolean
-
- snps,hird-threshold:
- description: HIRD threshold
- $ref: /schemas/types.yaml#/definitions/uint8
-
- snps,hsphy_interface:
- description:
- High-Speed PHY interface selection between UTMI+ and ULPI when the
- DWC_USB3_HSPHY_INTERFACE has value 3.
- $ref: /schemas/types.yaml#/definitions/string
- enum: [utmi, ulpi]
-
- snps,quirk-frame-length-adjustment:
- description:
- Value for GFLADJ_30MHZ field of GFLADJ register for post-silicon frame
- length adjustment when the fladj_30mhz_sdbnd signal is invalid or
- incorrect.
- $ref: /schemas/types.yaml#/definitions/uint32
- minimum: 0
- maximum: 0x3f
-
- snps,ref-clock-period-ns:
- description:
- Value for REFCLKPER field of GUCTL register for reference clock period in
- nanoseconds, when the hardware set default does not match the actual
- clock.
-
- This binding is deprecated. Instead, provide an appropriate reference clock.
- minimum: 8
- maximum: 62
- deprecated: true
-
- snps,rx-thr-num-pkt:
- description:
- USB RX packet threshold count. In host mode, this field specifies
- the space that must be available in the RX FIFO before the core can
- start the corresponding USB RX transaction (burst).
- In device mode, this field specifies the space that must be
- available in the RX FIFO before the core can send ERDY for a
- flow-controlled endpoint. It is only used for SuperSpeed.
- The valid values for this field are from 1 to 15. (DWC3 SuperSpeed
- USB 3.0 Controller Databook)
- $ref: /schemas/types.yaml#/definitions/uint8
- minimum: 1
- maximum: 15
-
- snps,rx-max-burst:
- description:
- Max USB RX burst size. In host mode, this field specifies the
- Maximum Bulk IN burst the DWC_usb3 core can perform. When the system
- bus is slower than the USB, RX FIFO can overrun during a long burst.
- You can program a smaller value to this field to limit the RX burst
- size that the core can perform. It only applies to SS Bulk,
- Isochronous, and Interrupt IN endpoints in the host mode.
- In device mode, this field specifies the NUMP value that is sent in
- ERDY for an OUT endpoint.
- The valid values for this field are from 1 to 16. (DWC3 SuperSpeed
- USB 3.0 Controller Databook)
- $ref: /schemas/types.yaml#/definitions/uint8
- minimum: 1
- maximum: 16
-
- snps,tx-thr-num-pkt:
- description:
- USB TX packet threshold count. This field specifies the number of
- packets that must be in the TXFIFO before the core can start
- transmission for the corresponding USB transaction (burst).
- This count is valid in both host and device modes. It is only used
- for SuperSpeed operation.
- Valid values are from 1 to 15. (DWC3 SuperSpeed USB 3.0 Controller
- Databook)
- $ref: /schemas/types.yaml#/definitions/uint8
- minimum: 1
- maximum: 15
-
- snps,tx-max-burst:
- description:
- Max USB TX burst size. When the system bus is slower than the USB,
- TX FIFO can underrun during a long burst. Program a smaller value
- to this field to limit the TX burst size that the core can execute.
- In Host mode, it only applies to SS Bulk, Isochronous, and Interrupt
- OUT endpoints. This value is not used in device mode.
- Valid values are from 1 to 16. (DWC3 SuperSpeed USB 3.0 Controller
- Databook)
- $ref: /schemas/types.yaml#/definitions/uint8
- minimum: 1
- maximum: 16
-
- snps,rx-thr-num-pkt-prd:
- description:
- Periodic ESS RX packet threshold count (host mode only). Set this and
- snps,rx-max-burst-prd to a valid, non-zero value 1-16 (DWC_usb31
- programming guide section 1.2.4) to enable periodic ESS RX threshold.
- $ref: /schemas/types.yaml#/definitions/uint8
- minimum: 1
- maximum: 16
-
- snps,rx-max-burst-prd:
- description:
- Max periodic ESS RX burst size (host mode only). Set this and
- snps,rx-thr-num-pkt-prd to a valid, non-zero value 1-16 (DWC_usb31
- programming guide section 1.2.4) to enable periodic ESS RX threshold.
- $ref: /schemas/types.yaml#/definitions/uint8
- minimum: 1
- maximum: 16
-
- snps,tx-thr-num-pkt-prd:
- description:
- Periodic ESS TX packet threshold count (host mode only). Set this and
- snps,tx-max-burst-prd to a valid, non-zero value 1-16 (DWC_usb31
- programming guide section 1.2.3) to enable periodic ESS TX threshold.
- $ref: /schemas/types.yaml#/definitions/uint8
- minimum: 1
- maximum: 16
-
- snps,tx-max-burst-prd:
- description:
- Max periodic ESS TX burst size (host mode only). Set this and
- snps,tx-thr-num-pkt-prd to a valid, non-zero value 1-16 (DWC_usb31
- programming guide section 1.2.3) to enable periodic ESS TX threshold.
- $ref: /schemas/types.yaml#/definitions/uint8
- minimum: 1
- maximum: 16
-
- tx-fifo-resize:
- description: Determines if the TX fifos can be dynamically resized depending
- on the number of IN endpoints used and if bursting is supported. This
- may help improve bandwidth on platforms with higher system latencies, as
- increased fifo space allows for the controller to prefetch data into its
- internal memory.
- type: boolean
-
- tx-fifo-max-num:
- description: Specifies the max number of packets the txfifo resizing logic
- can account for when higher endpoint bursting is used. (bMaxBurst > 6) The
- higher the number, the more fifo space the txfifo resizing logic will
- allocate for that endpoint.
- $ref: /schemas/types.yaml#/definitions/uint8
- minimum: 3
-
- snps,incr-burst-type-adjustment:
- description:
- Value for INCR burst type of GSBUSCFG0 register, undefined length INCR
- burst type enable and INCRx type. A single value means INCRX burst mode
- enabled. If more than one value specified, undefined length INCR burst
- type will be enabled with burst lengths utilized up to the maximum
- of the values passed in this property.
- $ref: /schemas/types.yaml#/definitions/uint32-array
- minItems: 1
- maxItems: 8
- uniqueItems: true
- items:
- enum: [1, 4, 8, 16, 32, 64, 128, 256]
-
- num-hc-interrupters:
- maximum: 8
- default: 1
-
- port:
- $ref: /schemas/graph.yaml#/properties/port
- description:
- This port is used with the 'usb-role-switch' property to connect the
- dwc3 to type C connector.
-
- ports:
- $ref: /schemas/graph.yaml#/properties/ports
- description:
- Those ports should be used with any connector to the data bus of this
- controller using the OF graph bindings specified if the "usb-role-switch"
- property is used.
-
- properties:
- port@0:
- $ref: /schemas/graph.yaml#/properties/port
- description: High Speed (HS) data bus.
-
- port@1:
- $ref: /schemas/graph.yaml#/properties/port
- description: Super Speed (SS) data bus.
-
- wakeup-source:
- $ref: /schemas/types.yaml#/definitions/flag
- description:
- Enable USB remote wakeup.
-
unevaluatedProperties: false
required:
diff --git a/Documentation/devicetree/bindings/usb/ti,hd3ss3220.yaml b/Documentation/devicetree/bindings/usb/ti,hd3ss3220.yaml
index 54c6586cb56d..bec1c8047bc0 100644
--- a/Documentation/devicetree/bindings/usb/ti,hd3ss3220.yaml
+++ b/Documentation/devicetree/bindings/usb/ti,hd3ss3220.yaml
@@ -56,26 +56,26 @@ examples:
#size-cells = <0>;
hd3ss3220@47 {
- compatible = "ti,hd3ss3220";
- reg = <0x47>;
- interrupt-parent = <&gpio6>;
- interrupts = <3>;
+ compatible = "ti,hd3ss3220";
+ reg = <0x47>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <3>;
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
- port@0 {
- reg = <0>;
- hd3ss3220_in_ep: endpoint {
- remote-endpoint = <&ss_ep>;
- };
- };
- port@1 {
- reg = <1>;
- hd3ss3220_out_ep: endpoint {
- remote-endpoint = <&usb3_role_switch>;
- };
- };
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ hd3ss3220_in_ep: endpoint {
+ remote-endpoint = <&ss_ep>;
+ };
};
+ port@1 {
+ reg = <1>;
+ hd3ss3220_out_ep: endpoint {
+ remote-endpoint = <&usb3_role_switch>;
+ };
+ };
+ };
};
};
diff --git a/Documentation/devicetree/bindings/usb/ti,tusb73x0-pci.yaml b/Documentation/devicetree/bindings/usb/ti,tusb73x0-pci.yaml
index ddda734f36fb..c4a91b3d6612 100644
--- a/Documentation/devicetree/bindings/usb/ti,tusb73x0-pci.yaml
+++ b/Documentation/devicetree/bindings/usb/ti,tusb73x0-pci.yaml
@@ -48,8 +48,8 @@ examples:
device_type = "pci";
usb@0 {
- compatible = "pci104c,8241";
- reg = <0x0 0x0 0x0 0x0 0x0>;
- ti,pwron-active-high;
+ compatible = "pci104c,8241";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ ti,pwron-active-high;
};
};
diff --git a/Documentation/devicetree/bindings/usb/ti,usb8020b.yaml b/Documentation/devicetree/bindings/usb/ti,usb8020b.yaml
index 8ef117793e11..61217da8b2f3 100644
--- a/Documentation/devicetree/bindings/usb/ti,usb8020b.yaml
+++ b/Documentation/devicetree/bindings/usb/ti,usb8020b.yaml
@@ -51,19 +51,19 @@ examples:
/* 2.0 hub on port 1 */
hub_2_0: hub@1 {
- compatible = "usb451,8027";
- reg = <1>;
- peer-hub = <&hub_3_0>;
- reset-gpios = <&pio 7 GPIO_ACTIVE_HIGH>;
- vdd-supply = <&usb_hub_fixed_3v3>;
+ compatible = "usb451,8027";
+ reg = <1>;
+ peer-hub = <&hub_3_0>;
+ reset-gpios = <&pio 7 GPIO_ACTIVE_HIGH>;
+ vdd-supply = <&usb_hub_fixed_3v3>;
};
/* 3.0 hub on port 2 */
hub_3_0: hub@2 {
- compatible = "usb451,8025";
- reg = <2>;
- peer-hub = <&hub_2_0>;
- reset-gpios = <&pio 7 GPIO_ACTIVE_HIGH>;
- vdd-supply = <&usb_hub_fixed_3v3>;
+ compatible = "usb451,8025";
+ reg = <2>;
+ peer-hub = <&hub_2_0>;
+ reset-gpios = <&pio 7 GPIO_ACTIVE_HIGH>;
+ vdd-supply = <&usb_hub_fixed_3v3>;
};
};
diff --git a/Documentation/devicetree/bindings/usb/ti,usb8041.yaml b/Documentation/devicetree/bindings/usb/ti,usb8041.yaml
index c2e29bd61e11..bce730a5e237 100644
--- a/Documentation/devicetree/bindings/usb/ti,usb8041.yaml
+++ b/Documentation/devicetree/bindings/usb/ti,usb8041.yaml
@@ -51,17 +51,17 @@ examples:
/* 2.0 hub on port 1 */
hub_2_0: hub@1 {
- compatible = "usb451,8142";
- reg = <1>;
- peer-hub = <&hub_3_0>;
- reset-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
+ compatible = "usb451,8142";
+ reg = <1>;
+ peer-hub = <&hub_3_0>;
+ reset-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
};
/* 3.0 hub on port 2 */
hub_3_0: hub@2 {
- compatible = "usb451,8140";
- reg = <2>;
- peer-hub = <&hub_2_0>;
- reset-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
+ compatible = "usb451,8140";
+ reg = <2>;
+ peer-hub = <&hub_2_0>;
+ reset-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
};
};
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 60592ebe4382..5079ca6ce1d1 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -218,6 +218,8 @@ patternProperties:
description: Shenzhen BigTree Tech Co., LTD
"^bitmain,.*":
description: Bitmain Technologies
+ "^blaize,.*":
+ description: Blaize, Inc.
"^blutek,.*":
description: BluTek Power
"^boe,.*":
@@ -575,6 +577,8 @@ patternProperties:
description: Gemtek Technology Co., Ltd.
"^genesys,.*":
description: Genesys Logic, Inc.
+ "^genexis,.*":
+ description: Genexis BV/AB
"^geniatech,.*":
description: Geniatech, Inc.
"^giantec,.*":
@@ -1338,6 +1342,8 @@ patternProperties:
description: Siemens AG
"^sifive,.*":
description: SiFive, Inc.
+ "^siflower,.*":
+ description: Shanghai Siflower Communication Co.
"^sigma,.*":
description: Sigma Designs, Inc.
"^sii,.*":
diff --git a/Documentation/devicetree/bindings/w1/maxim,ds2482.yaml b/Documentation/devicetree/bindings/w1/maxim,ds2482.yaml
index 422becc6e1fa..fe6b3f9a3f8b 100644
--- a/Documentation/devicetree/bindings/w1/maxim,ds2482.yaml
+++ b/Documentation/devicetree/bindings/w1/maxim,ds2482.yaml
@@ -25,6 +25,8 @@ properties:
reg:
maxItems: 1
+ vcc-supply: true
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml b/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
index 34896a39fa91..49e2b807db0b 100644
--- a/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
@@ -20,6 +20,7 @@ properties:
- qcom,kpss-wdt-ipq4019
- qcom,apss-wdt-ipq5018
- qcom,apss-wdt-ipq5332
+ - qcom,apss-wdt-ipq5424
- qcom,apss-wdt-ipq9574
- qcom,apss-wdt-msm8226
- qcom,apss-wdt-msm8974
diff --git a/Documentation/devicetree/of_unittest.rst b/Documentation/devicetree/of_unittest.rst
index 8864b52d1195..a6c05962add3 100644
--- a/Documentation/devicetree/of_unittest.rst
+++ b/Documentation/devicetree/of_unittest.rst
@@ -50,15 +50,13 @@ from 'scripts/dtc/of_unittest_expect --help'.
The Device Tree Source file (drivers/of/unittest-data/testcases.dts) contains
the test data required for executing the unit tests automated in
-drivers/of/unittest.c. Currently, following Device Tree Source Include files
-(.dtsi) are included in testcases.dts::
+drivers/of/unittest.c. See the content of the folder::
- drivers/of/unittest-data/tests-interrupts.dtsi
- drivers/of/unittest-data/tests-platform.dtsi
- drivers/of/unittest-data/tests-phandle.dtsi
- drivers/of/unittest-data/tests-match.dtsi
+ drivers/of/unittest-data/tests-*.dtsi
-When the kernel is build with OF_SELFTEST enabled, then the following make
+for the Device Tree Source Include files (.dtsi) included in testcases.dts.
+
+When the kernel is build with CONFIG_OF_UNITTEST enabled, then the following make
rule::
$(obj)/%.dtb: $(src)/%.dts FORCE
diff --git a/Documentation/driver-api/crypto/iaa/iaa-crypto.rst b/Documentation/driver-api/crypto/iaa/iaa-crypto.rst
index bba40158dd5c..8e50b900d51c 100644
--- a/Documentation/driver-api/crypto/iaa/iaa-crypto.rst
+++ b/Documentation/driver-api/crypto/iaa/iaa-crypto.rst
@@ -272,7 +272,7 @@ The available attributes are:
echo async_irq > /sys/bus/dsa/drivers/crypto/sync_mode
Async mode without interrupts (caller must poll) can be enabled by
- writing 'async' to it::
+ writing 'async' to it (please see Caveat)::
echo async > /sys/bus/dsa/drivers/crypto/sync_mode
@@ -283,6 +283,13 @@ The available attributes are:
The default mode is 'sync'.
+ Caveat: since the only mechanism that iaa_crypto currently implements
+ for async polling without interrupts is via the 'sync' mode as
+ described earlier, writing 'async' to
+ '/sys/bus/dsa/drivers/crypto/sync_mode' will internally enable the
+ 'sync' mode. This is to ensure correct iaa_crypto behavior until true
+ async polling without interrupts is enabled in iaa_crypto.
+
.. _iaa_default_config:
IAA Default Configuration
diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst
index d594d0ea0e9d..d75728eb05f8 100644
--- a/Documentation/driver-api/driver-model/devres.rst
+++ b/Documentation/driver-api/driver-model/devres.rst
@@ -404,7 +404,6 @@ PHY
devm_usb_get_phy()
devm_usb_get_phy_by_node()
devm_usb_get_phy_by_phandle()
- devm_usb_put_phy()
PINCTRL
devm_pinctrl_get()
diff --git a/Documentation/driver-api/extcon.rst b/Documentation/driver-api/extcon.rst
new file mode 100644
index 000000000000..d3217b9cdcd5
--- /dev/null
+++ b/Documentation/driver-api/extcon.rst
@@ -0,0 +1,255 @@
+=======================
+Extcon Device Subsystem
+=======================
+
+Overview
+========
+
+The Extcon (External Connector) subsystem provides a unified framework for
+managing external connectors in Linux systems. It allows drivers to report
+the state of external connectors and provides a standardized interface for
+userspace to query and monitor these states.
+
+Extcon is particularly useful in modern devices with multiple connectivity
+options, such as smartphones, tablets, and laptops. It helps manage various
+types of connectors, including:
+
+1. USB connectors (e.g., USB-C, micro-USB)
+2. Charging ports (e.g., fast charging, wireless charging)
+3. Audio jacks (e.g., 3.5mm headphone jack)
+4. Video outputs (e.g., HDMI, DisplayPort)
+5. Docking stations
+
+Real-world examples:
+
+1. Smartphone USB-C port:
+ A single USB-C port on a smartphone can serve multiple functions. Extcon
+ can manage the different states of this port, such as:
+ - USB data connection
+ - Charging (various types like fast charging, USB Power Delivery)
+ - Audio output (USB-C headphones)
+ - Video output (USB-C to HDMI adapter)
+
+2. Laptop docking station:
+ When a laptop is connected to a docking station, multiple connections are
+ made simultaneously. Extcon can handle the state changes for:
+ - Power delivery
+ - External displays
+ - USB hub connections
+ - Ethernet connectivity
+
+3. Wireless charging pad:
+ Extcon can manage the state of a wireless charging connection, allowing
+ the system to respond appropriately when a device is placed on or removed
+ from the charging pad.
+
+4. Smart TV HDMI ports:
+ In a smart TV, Extcon can manage multiple HDMI ports, detecting when
+ devices are connected or disconnected, and potentially identifying the
+ type of device (e.g., gaming console, set-top box, Blu-ray player).
+
+The Extcon framework simplifies the development of drivers for these complex
+scenarios by providing a standardized way to report and query connector
+states, handle mutually exclusive connections, and manage connector
+properties. This allows for more robust and flexible handling of external
+connections in modern devices.
+
+Key Components
+==============
+
+extcon_dev
+----------
+
+The core structure representing an Extcon device::
+
+ struct extcon_dev {
+ const char *name;
+ const unsigned int *supported_cable;
+ const u32 *mutually_exclusive;
+
+ /* Internal data */
+ struct device dev;
+ unsigned int id;
+ struct raw_notifier_head nh_all;
+ struct raw_notifier_head *nh;
+ struct list_head entry;
+ int max_supported;
+ spinlock_t lock;
+ u32 state;
+
+ /* Sysfs related */
+ struct device_type extcon_dev_type;
+ struct extcon_cable *cables;
+ struct attribute_group attr_g_muex;
+ struct attribute **attrs_muex;
+ struct device_attribute *d_attrs_muex;
+ };
+
+Key fields:
+
+- ``name``: Name of the Extcon device
+- ``supported_cable``: Array of supported cable types
+- ``mutually_exclusive``: Array defining mutually exclusive cable types
+ This field is crucial for enforcing hardware constraints. It's an array of
+ 32-bit unsigned integers, where each element represents a set of mutually
+ exclusive cable types. The array should be terminated with a 0.
+
+ For example:
+
+ ::
+
+ static const u32 mutually_exclusive[] = {
+ BIT(0) | BIT(1), /* Cable 0 and 1 are mutually exclusive */
+ BIT(2) | BIT(3) | BIT(4), /* Cables 2, 3, and 4 are mutually exclusive */
+ 0 /* Terminator */
+ };
+
+ In this example, cables 0 and 1 cannot be connected simultaneously, and
+ cables 2, 3, and 4 are also mutually exclusive. This is useful for
+ scenarios like a single port that can either be USB or HDMI, but not both
+ at the same time.
+
+ The Extcon core uses this information to prevent invalid combinations of
+ cable states, ensuring that the reported states are always consistent
+ with the hardware capabilities.
+
+- ``state``: Current state of the device (bitmap of connected cables)
+
+
+extcon_cable
+------------
+
+Represents an individual cable managed by an Extcon device::
+
+ struct extcon_cable {
+ struct extcon_dev *edev;
+ int cable_index;
+ struct attribute_group attr_g;
+ struct device_attribute attr_name;
+ struct device_attribute attr_state;
+ struct attribute *attrs[3];
+ union extcon_property_value usb_propval[EXTCON_PROP_USB_CNT];
+ union extcon_property_value chg_propval[EXTCON_PROP_CHG_CNT];
+ union extcon_property_value jack_propval[EXTCON_PROP_JACK_CNT];
+ union extcon_property_value disp_propval[EXTCON_PROP_DISP_CNT];
+ DECLARE_BITMAP(usb_bits, EXTCON_PROP_USB_CNT);
+ DECLARE_BITMAP(chg_bits, EXTCON_PROP_CHG_CNT);
+ DECLARE_BITMAP(jack_bits, EXTCON_PROP_JACK_CNT);
+ DECLARE_BITMAP(disp_bits, EXTCON_PROP_DISP_CNT);
+ };
+
+Core Functions
+==============
+
+.. kernel-doc:: drivers/extcon/extcon.c
+ :identifiers: extcon_get_state
+
+.. kernel-doc:: drivers/extcon/extcon.c
+ :identifiers: extcon_set_state
+
+.. kernel-doc:: drivers/extcon/extcon.c
+ :identifiers: extcon_set_state_sync
+
+.. kernel-doc:: drivers/extcon/extcon.c
+ :identifiers: extcon_get_property
+
+
+Sysfs Interface
+===============
+
+Extcon devices expose the following sysfs attributes:
+
+- ``name``: Name of the Extcon device
+- ``state``: Current state of all supported cables
+- ``cable.N/name``: Name of the Nth supported cable
+- ``cable.N/state``: State of the Nth supported cable
+
+Usage Example
+-------------
+
+.. code-block:: c
+
+ #include <linux/module.h>
+ #include <linux/platform_device.h>
+ #include <linux/extcon.h>
+
+ struct my_extcon_data {
+ struct extcon_dev *edev;
+ struct device *dev;
+ };
+
+ static const unsigned int my_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_NONE,
+ };
+
+ static int my_extcon_probe(struct platform_device *pdev)
+ {
+ struct my_extcon_data *data;
+ int ret;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = &pdev->dev;
+
+ /* Initialize extcon device */
+ data->edev = devm_extcon_dev_allocate(data->dev, my_extcon_cable);
+ if (IS_ERR(data->edev)) {
+ dev_err(data->dev, "Failed to allocate extcon device\n");
+ return PTR_ERR(data->edev);
+ }
+
+ /* Register extcon device */
+ ret = devm_extcon_dev_register(data->dev, data->edev);
+ if (ret < 0) {
+ dev_err(data->dev, "Failed to register extcon device\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ /* Example: Set initial state */
+ extcon_set_state_sync(data->edev, EXTCON_USB, true);
+
+ dev_info(data->dev, "My extcon driver probed successfully\n");
+ return 0;
+ }
+
+ static int my_extcon_remove(struct platform_device *pdev)
+ {
+ struct my_extcon_data *data = platform_get_drvdata(pdev);
+
+ /* Example: Clear state before removal */
+ extcon_set_state_sync(data->edev, EXTCON_USB, false);
+
+ dev_info(data->dev, "My extcon driver removed\n");
+ return 0;
+ }
+
+ static const struct of_device_id my_extcon_of_match[] = {
+ { .compatible = "my,extcon-device", },
+ { },
+ };
+ MODULE_DEVICE_TABLE(of, my_extcon_of_match);
+
+ static struct platform_driver my_extcon_driver = {
+ .driver = {
+ .name = "my-extcon-driver",
+ .of_match_table = my_extcon_of_match,
+ },
+ .probe = my_extcon_probe,
+ .remove = my_extcon_remove,
+ };
+
+ module_platform_driver(my_extcon_driver);
+
+This example demonstrates:
+---------------------------
+
+- Defining supported cable types (USB and USB Host in this case).
+- Allocating and registering an extcon device.
+- Setting an initial state for a cable (USB connected in this example).
+- Clearing the state when the driver is removed.
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index 7f83e05769b4..16e2c4ec3c01 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -86,6 +86,7 @@ Subsystem-specific APIs
dmaengine/index
dpll
edac
+ extcon
firmware/index
fpga/index
frame-buffer
diff --git a/Documentation/driver-api/media/tx-rx.rst b/Documentation/driver-api/media/tx-rx.rst
index dd09484df1d3..c71003f74b1c 100644
--- a/Documentation/driver-api/media/tx-rx.rst
+++ b/Documentation/driver-api/media/tx-rx.rst
@@ -50,7 +50,7 @@ The :ref:`V4L2_CID_LINK_FREQ <v4l2-cid-link-freq>` control is used to tell the
receiver the frequency of the bus (i.e. it is not the same as the symbol rate).
``.enable_streams()`` and ``.disable_streams()`` callbacks
-^^^^^^^^^^^^^^^^^^^^^^^^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The struct v4l2_subdev_pad_ops->enable_streams() and struct
v4l2_subdev_pad_ops->disable_streams() callbacks are used by the receiver driver
@@ -79,14 +79,15 @@ where
* - link_freq
- The value of the ``V4L2_CID_LINK_FREQ`` integer64 menu item.
* - nr_of_lanes
- - Number of data lanes used on the CSI-2 link. This can
- be obtained from the OF endpoint configuration.
+ - Number of data lanes used on the CSI-2 link.
* - 2
- Data is transferred on both rising and falling edge of the signal.
* - bits_per_sample
- Number of bits per sample.
* - k
- - 16 for D-PHY and 7 for C-PHY
+ - 16 for D-PHY and 7 for C-PHY.
+
+Information on whether D-PHY or C-PHY is used, and the value of ``nr_of_lanes``, can be obtained from the OF endpoint configuration.
.. note::
diff --git a/Documentation/driver-api/pps.rst b/Documentation/driver-api/pps.rst
index 78dded03e5d8..71ad04c82d6c 100644
--- a/Documentation/driver-api/pps.rst
+++ b/Documentation/driver-api/pps.rst
@@ -202,6 +202,46 @@ Sometimes one needs to be able not only to catch PPS signals but to produce
them also. For example, running a distributed simulation, which requires
computers' clock to be synchronized very tightly.
+To do so the class pps-gen has been added. PPS generators can be
+registered in the kernel by defining a struct pps_gen_source_info as
+follows::
+
+ static struct pps_gen_source_info pps_gen_dummy_info = {
+ .name = "dummy",
+ .use_system_clock = true,
+ .get_time = pps_gen_dummy_get_time,
+ .enable = pps_gen_dummy_enable,
+ };
+
+Where the use_system_clock states if the generator uses the system
+clock to generate its pulses, or they are from a peripheral device
+clock. Method get_time() is used to query the time stored into the
+generator clock, while the method enable() is used to enable or
+disable the PPS pulse generation.
+
+Then calling the function pps_gen_register_source() in your
+initialization routine as follows creates a new generator in the
+system::
+
+ pps_gen = pps_gen_register_source(&pps_gen_dummy_info);
+
+Generators SYSFS support
+------------------------
+
+If the SYSFS filesystem is enabled in the kernel it provides a new class::
+
+ $ ls /sys/class/pps-gen/
+ pps-gen0/ pps-gen1/ pps-gen2/
+
+Every directory is the ID of a PPS generator defined in the system and
+inside of it you find several files::
+
+ $ ls -F /sys/class/pps-gen/pps-gen0/
+ dev enable name power/ subsystem@ system time uevent
+
+To enable the PPS signal generation you can use the command below::
+
+ $ echo 1 > /sys/class/pps-gen/pps-gen0/enable
Parallel port generator
------------------------
diff --git a/Documentation/driver-api/scsi.rst b/Documentation/driver-api/scsi.rst
index 273281474c09..bf2be96cc2d6 100644
--- a/Documentation/driver-api/scsi.rst
+++ b/Documentation/driver-api/scsi.rst
@@ -126,7 +126,7 @@ Manage scsi_dev_info_list, which tracks blacklisted and whitelisted
devices.
.. kernel-doc:: drivers/scsi/scsi_devinfo.c
- :internal:
+ :export:
drivers/scsi/scsi_ioctl.c
~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -162,7 +162,6 @@ statistics and to pass information directly to the lowlevel driver. I.E.
plumbing to manage /proc/scsi/\*
.. kernel-doc:: drivers/scsi/scsi_proc.c
- :internal:
drivers/scsi/scsi_netlink.c
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -193,7 +192,7 @@ else, sequentially scan LUNs up until some maximum is reached, or a LUN
is seen that cannot have a device attached to it.
.. kernel-doc:: drivers/scsi/scsi_scan.c
- :internal:
+ :export:
drivers/scsi/scsi_sysctl.c
~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/filesystems/bcachefs/SubmittingPatches.rst b/Documentation/filesystems/bcachefs/SubmittingPatches.rst
new file mode 100644
index 000000000000..026b12ae0d6a
--- /dev/null
+++ b/Documentation/filesystems/bcachefs/SubmittingPatches.rst
@@ -0,0 +1,98 @@
+Submitting patches to bcachefs:
+===============================
+
+Patches must be tested before being submitted, either with the xfstests suite
+[0], or the full bcachefs test suite in ktest [1], depending on what's being
+touched. Note that ktest wraps xfstests and will be an easier method to running
+it for most users; it includes single-command wrappers for all the mainstream
+in-kernel local filesystems.
+
+Patches will undergo more testing after being merged (including
+lockdep/kasan/preempt/etc. variants), these are not generally required to be
+run by the submitter - but do put some thought into what you're changing and
+which tests might be relevant, e.g. are you dealing with tricky memory layout
+work? kasan, are you doing locking work? then lockdep; and ktest includes
+single-command variants for the debug build types you'll most likely need.
+
+The exception to this rule is incomplete WIP/RFC patches: if you're working on
+something nontrivial, it's encouraged to send out a WIP patch to let people
+know what you're doing and make sure you're on the right track. Just make sure
+it includes a brief note as to what's done and what's incomplete, to avoid
+confusion.
+
+Rigorous checkpatch.pl adherence is not required (many of its warnings are
+considered out of date), but try not to deviate too much without reason.
+
+Focus on writing code that reads well and is organized well; code should be
+aesthetically pleasing.
+
+CI:
+===
+
+Instead of running your tests locally, when running the full test suite it's
+prefereable to let a server farm do it in parallel, and then have the results
+in a nice test dashboard (which can tell you which failures are new, and
+presents results in a git log view, avoiding the need for most bisecting).
+
+That exists [2], and community members may request an account. If you work for
+a big tech company, you'll need to help out with server costs to get access -
+but the CI is not restricted to running bcachefs tests: it runs any ktest test
+(which generally makes it easy to wrap other tests that can run in qemu).
+
+Other things to think about:
+============================
+
+- How will we debug this code? Is there sufficient introspection to diagnose
+ when something starts acting wonky on a user machine?
+
+ We don't necessarily need every single field of every data structure visible
+ with introspection, but having the important fields of all the core data
+ types wired up makes debugging drastically easier - a bit of thoughtful
+ foresight greatly reduces the need to have people build custom kernels with
+ debug patches.
+
+ More broadly, think about all the debug tooling that might be needed.
+
+- Does it make the codebase more or less of a mess? Can we also try to do some
+ organizing, too?
+
+- Do new tests need to be written? New assertions? How do we know and verify
+ that the code is correct, and what happens if something goes wrong?
+
+ We don't yet have automated code coverage analysis or easy fault injection -
+ but for now, pretend we did and ask what they might tell us.
+
+ Assertions are hugely important, given that we don't yet have a systems
+ language that can do ergonomic embedded correctness proofs. Hitting an assert
+ in testing is much better than wandering off into undefined behaviour la-la
+ land - use them. Use them judiciously, and not as a replacement for proper
+ error handling, but use them.
+
+- Does it need to be performance tested? Should we add new peformance counters?
+
+ bcachefs has a set of persistent runtime counters which can be viewed with
+ the 'bcachefs fs top' command; this should give users a basic idea of what
+ their filesystem is currently doing. If you're doing a new feature or looking
+ at old code, think if anything should be added.
+
+- If it's a new on disk format feature - have upgrades and downgrades been
+ tested? (Automated tests exists but aren't in the CI, due to the hassle of
+ disk image management; coordinate to have them run.)
+
+Mailing list, IRC:
+==================
+
+Patches should hit the list [3], but much discussion and code review happens on
+IRC as well [4]; many people appreciate the more conversational approach and
+quicker feedback.
+
+Additionally, we have a lively user community doing excellent QA work, which
+exists primarily on IRC. Please make use of that resource; user feedback is
+important for any nontrivial feature, and documenting it in commit messages
+would be a good idea.
+
+[0]: git://git.kernel.org/pub/scm/fs/xfs/xfstests-dev.git
+[1]: https://evilpiepirate.org/git/ktest.git/
+[2]: https://evilpiepirate.org/~testdashboard/ci/
+[3]: linux-bcachefs@vger.kernel.org
+[4]: irc.oftc.net#bcache, #bcachefs-dev
diff --git a/Documentation/filesystems/bcachefs/index.rst b/Documentation/filesystems/bcachefs/index.rst
index 95fc4b90739e..7db4d7ceab58 100644
--- a/Documentation/filesystems/bcachefs/index.rst
+++ b/Documentation/filesystems/bcachefs/index.rst
@@ -9,4 +9,5 @@ bcachefs Documentation
:numbered:
CodingStyle
+ SubmittingPatches
errorcodes
diff --git a/Documentation/filesystems/debugfs.rst b/Documentation/filesystems/debugfs.rst
index dc35da8b8792..f7f977ffbf8d 100644
--- a/Documentation/filesystems/debugfs.rst
+++ b/Documentation/filesystems/debugfs.rst
@@ -211,18 +211,16 @@ seq_file content.
There are a couple of other directory-oriented helper functions::
- struct dentry *debugfs_rename(struct dentry *old_dir,
- struct dentry *old_dentry,
- struct dentry *new_dir,
- const char *new_name);
+ struct dentry *debugfs_change_name(struct dentry *dentry,
+ const char *fmt, ...);
struct dentry *debugfs_create_symlink(const char *name,
struct dentry *parent,
const char *target);
-A call to debugfs_rename() will give a new name to an existing debugfs
-file, possibly in a different directory. The new_name must not exist prior
-to the call; the return value is old_dentry with updated information.
+A call to debugfs_change_name() will give a new name to an existing debugfs
+file, always in the same directory. The new_name must not exist prior
+to the call; the return value is 0 on success and -E... on failuer.
Symbolic links can be created with debugfs_create_symlink().
There is one important thing that all debugfs users must take into account:
diff --git a/Documentation/filesystems/fuse-io-uring.rst b/Documentation/filesystems/fuse-io-uring.rst
new file mode 100644
index 000000000000..d73dd0dbd238
--- /dev/null
+++ b/Documentation/filesystems/fuse-io-uring.rst
@@ -0,0 +1,99 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================================
+FUSE-over-io-uring design documentation
+=======================================
+
+This documentation covers basic details how the fuse
+kernel/userspace communication through io-uring is configured
+and works. For generic details about FUSE see fuse.rst.
+
+This document also covers the current interface, which is
+still in development and might change.
+
+Limitations
+===========
+As of now not all requests types are supported through io-uring, userspace
+is required to also handle requests through /dev/fuse after io-uring setup
+is complete. Specifically notifications (initiated from the daemon side)
+and interrupts.
+
+Fuse io-uring configuration
+===========================
+
+Fuse kernel requests are queued through the classical /dev/fuse
+read/write interface - until io-uring setup is complete.
+
+In order to set up fuse-over-io-uring fuse-server (user-space)
+needs to submit SQEs (opcode = IORING_OP_URING_CMD) to the /dev/fuse
+connection file descriptor. Initial submit is with the sub command
+FUSE_URING_REQ_REGISTER, which will just register entries to be
+available in the kernel.
+
+Once at least one entry per queue is submitted, kernel starts
+to enqueue to ring queues.
+Note, every CPU core has its own fuse-io-uring queue.
+Userspace handles the CQE/fuse-request and submits the result as
+subcommand FUSE_URING_REQ_COMMIT_AND_FETCH - kernel completes
+the requests and also marks the entry available again. If there are
+pending requests waiting the request will be immediately submitted
+to the daemon again.
+
+Initial SQE
+-----------::
+
+ | | FUSE filesystem daemon
+ | |
+ | | >io_uring_submit()
+ | | IORING_OP_URING_CMD /
+ | | FUSE_URING_CMD_REGISTER
+ | | [wait cqe]
+ | | >io_uring_wait_cqe() or
+ | | >io_uring_submit_and_wait()
+ | |
+ | >fuse_uring_cmd() |
+ | >fuse_uring_register() |
+
+
+Sending requests with CQEs
+--------------------------::
+
+ | | FUSE filesystem daemon
+ | | [waiting for CQEs]
+ | "rm /mnt/fuse/file" |
+ | |
+ | >sys_unlink() |
+ | >fuse_unlink() |
+ | [allocate request] |
+ | >fuse_send_one() |
+ | ... |
+ | >fuse_uring_queue_fuse_req |
+ | [queue request on fg queue] |
+ | >fuse_uring_add_req_to_ring_ent() |
+ | ... |
+ | >fuse_uring_copy_to_ring() |
+ | >io_uring_cmd_done() |
+ | >request_wait_answer() |
+ | [sleep on req->waitq] |
+ | | [receives and handles CQE]
+ | | [submit result and fetch next]
+ | | >io_uring_submit()
+ | | IORING_OP_URING_CMD/
+ | | FUSE_URING_CMD_COMMIT_AND_FETCH
+ | >fuse_uring_cmd() |
+ | >fuse_uring_commit_fetch() |
+ | >fuse_uring_commit() |
+ | >fuse_uring_copy_from_ring() |
+ | [ copy the result to the fuse req] |
+ | >fuse_uring_req_end() |
+ | >fuse_request_end() |
+ | [wake up req->waitq] |
+ | >fuse_uring_next_fuse_req |
+ | [wait or handle next req] |
+ | |
+ | [req->waitq woken up] |
+ | <fuse_unlink() |
+ | <sys_unlink() |
+
+
+
diff --git a/Documentation/filesystems/index.rst b/Documentation/filesystems/index.rst
index 44e9e77ffe0d..2636f2a41bd3 100644
--- a/Documentation/filesystems/index.rst
+++ b/Documentation/filesystems/index.rst
@@ -98,6 +98,7 @@ Documentation for filesystem implementations.
hpfs
fuse
fuse-io
+ fuse-io-uring
inotify
isofs
nilfs2
diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst
index f5e3676db954..d20a32b77b60 100644
--- a/Documentation/filesystems/locking.rst
+++ b/Documentation/filesystems/locking.rst
@@ -17,7 +17,8 @@ dentry_operations
prototypes::
- int (*d_revalidate)(struct dentry *, unsigned int);
+ int (*d_revalidate)(struct inode *, const struct qstr *,
+ struct dentry *, unsigned int);
int (*d_weak_revalidate)(struct dentry *, unsigned int);
int (*d_hash)(const struct dentry *, struct qstr *);
int (*d_compare)(const struct dentry *,
@@ -30,6 +31,8 @@ prototypes::
struct vfsmount *(*d_automount)(struct path *path);
int (*d_manage)(const struct path *, bool);
struct dentry *(*d_real)(struct dentry *, enum d_real_type type);
+ bool (*d_unalias_trylock)(const struct dentry *);
+ void (*d_unalias_unlock)(const struct dentry *);
locking rules:
@@ -49,6 +52,8 @@ d_dname: no no no no
d_automount: no no yes no
d_manage: no no yes (ref-walk) maybe
d_real no no yes no
+d_unalias_trylock yes no no no
+d_unalias_unlock yes no no no
================== =========== ======== ============== ========
inode_operations
diff --git a/Documentation/filesystems/nfs/localio.rst b/Documentation/filesystems/nfs/localio.rst
index bd1967e2eab3..79808b37d745 100644
--- a/Documentation/filesystems/nfs/localio.rst
+++ b/Documentation/filesystems/nfs/localio.rst
@@ -218,64 +218,30 @@ NFS Client and Server Interlock
===============================
LOCALIO provides the nfs_uuid_t object and associated interfaces to
-allow proper network namespace (net-ns) and NFSD object refcounting:
-
- We don't want to keep a long-term counted reference on each NFSD's
- net-ns in the client because that prevents a server container from
- completely shutting down.
-
- So we avoid taking a reference at all and rely on the per-cpu
- reference to the server (detailed below) being sufficient to keep
- the net-ns active. This involves allowing the NFSD's net-ns exit
- code to iterate all active clients and clear their ->net pointers
- (which are needed to find the per-cpu-refcount for the nfsd_serv).
-
- Details:
-
- - Embed nfs_uuid_t in nfs_client. nfs_uuid_t provides a list_head
- that can be used to find the client. It does add the 16-byte
- uuid_t to nfs_client so it is bigger than needed (given that
- uuid_t is only used during the initial NFS client and server
- LOCALIO handshake to determine if they are local to each other).
- If that is really a problem we can find a fix.
-
- - When the nfs server confirms that the uuid_t is local, it moves
- the nfs_uuid_t onto a per-net-ns list in NFSD's nfsd_net.
-
- - When each server's net-ns is shutting down - in a "pre_exit"
- handler, all these nfs_uuid_t have their ->net cleared. There is
- an rcu_synchronize() call between pre_exit() handlers and exit()
- handlers so any caller that sees nfs_uuid_t ->net as not NULL can
- safely manage the per-cpu-refcount for nfsd_serv.
-
- - The client's nfs_uuid_t is passed to nfsd_open_local_fh() so it
- can safely dereference ->net in a private rcu_read_lock() section
- to allow safe access to the associated nfsd_net and nfsd_serv.
-
-So LOCALIO required the introduction and use of NFSD's percpu_ref to
-interlock nfsd_destroy_serv() and nfsd_open_local_fh(), to ensure each
-nn->nfsd_serv is not destroyed while in use by nfsd_open_local_fh(), and
+allow proper network namespace (net-ns) and NFSD object refcounting.
+
+LOCALIO required the introduction and use of NFSD's percpu nfsd_net_ref
+to interlock nfsd_shutdown_net() and nfsd_open_local_fh(), to ensure
+each net-ns is not destroyed while in use by nfsd_open_local_fh(), and
warrants a more detailed explanation:
- nfsd_open_local_fh() uses nfsd_serv_try_get() before opening its
+ nfsd_open_local_fh() uses nfsd_net_try_get() before opening its
nfsd_file handle and then the caller (NFS client) must drop the
- reference for the nfsd_file and associated nn->nfsd_serv using
- nfs_file_put_local() once it has completed its IO.
+ reference for the nfsd_file and associated net-ns using
+ nfsd_file_put_local() once it has completed its IO.
This interlock working relies heavily on nfsd_open_local_fh() being
afforded the ability to safely deal with the possibility that the
NFSD's net-ns (and nfsd_net by association) may have been destroyed
- by nfsd_destroy_serv() via nfsd_shutdown_net() -- which is only
- possible given the nfs_uuid_t ->net pointer managemenet detailed
- above.
-
-All told, this elaborate interlock of the NFS client and server has been
-verified to fix an easy to hit crash that would occur if an NFSD
-instance running in a container, with a LOCALIO client mounted, is
-shutdown. Upon restart of the container and associated NFSD the client
-would go on to crash due to NULL pointer dereference that occurred due
-to the LOCALIO client's attempting to nfsd_open_local_fh(), using
-nn->nfsd_serv, without having a proper reference on nn->nfsd_serv.
+ by nfsd_destroy_serv() via nfsd_shutdown_net().
+
+This interlock of the NFS client and server has been verified to fix an
+easy to hit crash that would occur if an NFSD instance running in a
+container, with a LOCALIO client mounted, is shutdown. Upon restart of
+the container and associated NFSD, the client would go on to crash due
+to NULL pointer dereference that occurred due to the LOCALIO client's
+attempting to nfsd_open_local_fh() without having a proper reference on
+NFSD's net-ns.
NFS Client issues IO instead of Server
======================================
@@ -306,10 +272,26 @@ is issuing IO to the underlying local filesystem that it is sharing with
the NFS server. See: fs/nfs/localio.c:nfs_local_doio() and
fs/nfs/localio.c:nfs_local_commit().
+With normal NFS that makes use of RPC to issue IO to the server, if an
+application uses O_DIRECT the NFS client will bypass the pagecache but
+the NFS server will not. The NFS server's use of buffered IO affords
+applications to be less precise with their alignment when issuing IO to
+the NFS client. But if all applications properly align their IO, LOCALIO
+can be configured to use end-to-end O_DIRECT semantics from the NFS
+client to the underlying local filesystem, that it is sharing with
+the NFS server, by setting the 'localio_O_DIRECT_semantics' nfs module
+parameter to Y, e.g.:
+
+ echo Y > /sys/module/nfs/parameters/localio_O_DIRECT_semantics
+
+Once enabled, it will cause LOCALIO to use end-to-end O_DIRECT semantics
+(but again, this may cause IO to fail if applications do not properly
+align their IO).
+
Security
========
-Localio is only supported when UNIX-style authentication (AUTH_UNIX, aka
+LOCALIO is only supported when UNIX-style authentication (AUTH_UNIX, aka
AUTH_SYS) is used.
Care is taken to ensure the same NFS security mechanisms are used
@@ -324,6 +306,24 @@ client is afforded this same level of access (albeit in terms of the NFS
protocol via SUNRPC). No other namespaces (user, mount, etc) have been
altered or purposely extended from the server to the client.
+Module Parameters
+=================
+
+/sys/module/nfs/parameters/localio_enabled (bool)
+controls if LOCALIO is enabled, defaults to Y. If client and server are
+local but 'localio_enabled' is set to N then LOCALIO will not be used.
+
+/sys/module/nfs/parameters/localio_O_DIRECT_semantics (bool)
+controls if O_DIRECT extends down to the underlying filesystem, defaults
+to N. Application IO must be logical blocksize aligned, otherwise
+O_DIRECT will fail.
+
+/sys/module/nfsv3/parameters/nfs3_localio_probe_throttle (uint)
+controls if NFSv3 read and write IOs will trigger (re)enabling of
+LOCALIO every N (nfs3_localio_probe_throttle) IOs, defaults to 0
+(disabled). Must be power-of-2, admin keeps all the pieces if they
+misconfigure (too low a value or non-power-of-2).
+
Testing
=======
diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst
index c1c121055204..1639e78e3146 100644
--- a/Documentation/filesystems/porting.rst
+++ b/Documentation/filesystems/porting.rst
@@ -1141,3 +1141,19 @@ pointer are gone.
set_blocksize() takes opened struct file instead of struct block_device now
and it *must* be opened exclusive.
+
+---
+
+** mandatory**
+
+->d_revalidate() gets two extra arguments - inode of parent directory and
+name our dentry is expected to have. Both are stable (dir is pinned in
+non-RCU case and will stay around during the call in RCU case, and name
+is guaranteed to stay unchanging). Your instance doesn't have to use
+either, but it often helps to avoid a lot of painful boilerplate.
+Note that while name->name is stable and NUL-terminated, it may (and
+often will) have name->name[name->len] equal to '/' rather than '\0' -
+in normal case it points into the pathname being looked up.
+NOTE: if you need something like full path from the root of filesystem,
+you are still on your own - this assists with simple cases, but it's not
+magic.
diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst
index 6a882c57a7e7..09f0aed5a08b 100644
--- a/Documentation/filesystems/proc.rst
+++ b/Documentation/filesystems/proc.rst
@@ -48,6 +48,7 @@ fixes/update part 1.1 Stefani Seibold <stefani@seibold.net> June 9 2009
3.11 /proc/<pid>/patch_state - Livepatch patch operation state
3.12 /proc/<pid>/arch_status - Task architecture specific information
3.13 /proc/<pid>/fd - List of symlinks to open files
+ 3.14 /proc/<pid/ksm_stat - Information about the process's ksm status.
4 Configuring procfs
4.1 Mount options
@@ -484,14 +485,15 @@ Memory Area, or VMA) there is a series of lines such as the following::
THPeligible: 0
VmFlags: rd ex mr mw me dw
-The first of these lines shows the same information as is displayed for the
-mapping in /proc/PID/maps. Following lines show the size of the mapping
-(size); the size of each page allocated when backing a VMA (KernelPageSize),
-which is usually the same as the size in the page table entries; the page size
-used by the MMU when backing a VMA (in most cases, the same as KernelPageSize);
-the amount of the mapping that is currently resident in RAM (RSS); the
-process' proportional share of this mapping (PSS); and the number of clean and
-dirty shared and private pages in the mapping.
+The first of these lines shows the same information as is displayed for
+the mapping in /proc/PID/maps. Following lines show the size of the
+mapping (size); the size of each page allocated when backing a VMA
+(KernelPageSize), which is usually the same as the size in the page table
+entries; the page size used by the MMU when backing a VMA (in most cases,
+the same as KernelPageSize); the amount of the mapping that is currently
+resident in RAM (RSS); the process's proportional share of this mapping
+(PSS); and the number of clean and dirty shared and private pages in the
+mapping.
The "proportional set size" (PSS) of a process is the count of pages it has
in memory, where each page is divided by the number of processes sharing it.
@@ -2232,6 +2234,74 @@ The number of open files for the process is stored in 'size' member
of stat() output for /proc/<pid>/fd for fast access.
-------------------------------------------------------
+3.14 /proc/<pid/ksm_stat - Information about the process's ksm status
+---------------------------------------------------------------------
+When CONFIG_KSM is enabled, each process has this file which displays
+the information of ksm merging status.
+
+Example
+~~~~~~~
+
+::
+
+ / # cat /proc/self/ksm_stat
+ ksm_rmap_items 0
+ ksm_zero_pages 0
+ ksm_merging_pages 0
+ ksm_process_profit 0
+ ksm_merge_any: no
+ ksm_mergeable: no
+
+Description
+~~~~~~~~~~~
+
+ksm_rmap_items
+^^^^^^^^^^^^^^
+
+The number of ksm_rmap_item structures in use. The structure
+ksm_rmap_item stores the reverse mapping information for virtual
+addresses. KSM will generate a ksm_rmap_item for each ksm-scanned page of
+the process.
+
+ksm_zero_pages
+^^^^^^^^^^^^^^
+
+When /sys/kernel/mm/ksm/use_zero_pages is enabled, it represent how many
+empty pages are merged with kernel zero pages by KSM.
+
+ksm_merging_pages
+^^^^^^^^^^^^^^^^^
+
+It represents how many pages of this process are involved in KSM merging
+(not including ksm_zero_pages). It is the same with what
+/proc/<pid>/ksm_merging_pages shows.
+
+ksm_process_profit
+^^^^^^^^^^^^^^^^^^
+
+The profit that KSM brings (Saved bytes). KSM can save memory by merging
+identical pages, but also can consume additional memory, because it needs
+to generate a number of rmap_items to save each scanned page's brief rmap
+information. Some of these pages may be merged, but some may not be abled
+to be merged after being checked several times, which are unprofitable
+memory consumed.
+
+ksm_merge_any
+^^^^^^^^^^^^^
+
+It specifies whether the process's 'mm is added by prctl() into the
+candidate list of KSM or not, and if KSM scanning is fully enabled at
+process level.
+
+ksm_mergeable
+^^^^^^^^^^^^^
+
+It specifies whether any VMAs of the process''s mms are currently
+applicable to KSM.
+
+More information about KSM can be found in
+Documentation/admin-guide/mm/ksm.rst.
+
Chapter 4: Configuring procfs
=============================
@@ -2261,7 +2331,7 @@ arguments are now protected against local eavesdroppers.
hidepid=invisible or hidepid=2 means hidepid=1 plus all /proc/<pid>/ will be
fully invisible to other users. It doesn't mean that it hides a fact whether a
process with a specific pid value exists (it can be learned by other means, e.g.
-by "kill -0 $PID"), but it hides process' uid and gid, which may be learned by
+by "kill -0 $PID"), but it hides process's uid and gid, which may be learned by
stat()'ing /proc/<pid>/ otherwise. It greatly complicates an intruder's task of
gathering information about running processes, whether some daemon runs with
elevated privileges, whether other user runs some sensitive program, whether
diff --git a/Documentation/filesystems/squashfs.rst b/Documentation/filesystems/squashfs.rst
index 4af8d6207509..45653b3228f9 100644
--- a/Documentation/filesystems/squashfs.rst
+++ b/Documentation/filesystems/squashfs.rst
@@ -6,7 +6,7 @@ Squashfs 4.0 Filesystem
Squashfs is a compressed read-only filesystem for Linux.
-It uses zlib, lz4, lzo, or xz compression to compress files, inodes and
+It uses zlib, lz4, lzo, xz or zstd compression to compress files, inodes and
directories. Inodes in the system are very small and all blocks are packed to
minimise data overhead. Block sizes greater than 4K are supported up to a
maximum of 1Mbytes (default block size 128K).
@@ -16,8 +16,8 @@ use (i.e. in cases where a .tar.gz file may be used), and in constrained
block device/memory systems (e.g. embedded systems) where low overhead is
needed.
-Mailing list: squashfs-devel@lists.sourceforge.net
-Web site: www.squashfs.org
+Mailing list (kernel code): linux-fsdevel@vger.kernel.org
+Web site: github.com/plougher/squashfs-tools
1. Filesystem Features
----------------------
@@ -58,11 +58,9 @@ inodes have different sizes).
As squashfs is a read-only filesystem, the mksquashfs program must be used to
create populated squashfs filesystems. This and other squashfs utilities
-can be obtained from http://www.squashfs.org. Usage instructions can be
-obtained from this site also.
-
-The squashfs-tools development tree is now located on kernel.org
- git://git.kernel.org/pub/scm/fs/squashfs/squashfs-tools.git
+are very likely packaged by your linux distribution (called squashfs-tools).
+The source code can be obtained from github.com/plougher/squashfs-tools.
+Usage instructions can also be obtained from this site.
2.1 Mount options
-----------------
diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst
index 0b18af3f954e..31eea688609a 100644
--- a/Documentation/filesystems/vfs.rst
+++ b/Documentation/filesystems/vfs.rst
@@ -1251,7 +1251,8 @@ defined:
.. code-block:: c
struct dentry_operations {
- int (*d_revalidate)(struct dentry *, unsigned int);
+ int (*d_revalidate)(struct inode *, const struct qstr *,
+ struct dentry *, unsigned int);
int (*d_weak_revalidate)(struct dentry *, unsigned int);
int (*d_hash)(const struct dentry *, struct qstr *);
int (*d_compare)(const struct dentry *,
@@ -1264,6 +1265,8 @@ defined:
struct vfsmount *(*d_automount)(struct path *);
int (*d_manage)(const struct path *, bool);
struct dentry *(*d_real)(struct dentry *, enum d_real_type type);
+ bool (*d_unalias_trylock)(const struct dentry *);
+ void (*d_unalias_unlock)(const struct dentry *);
};
``d_revalidate``
@@ -1427,6 +1430,25 @@ defined:
For non-regular files, the 'dentry' argument is returned.
+``d_unalias_trylock``
+ if present, will be called by d_splice_alias() before moving a
+ preexisting attached alias. Returning false prevents __d_move(),
+ making d_splice_alias() fail with -ESTALE.
+
+ Rationale: setting FS_RENAME_DOES_D_MOVE will prevent d_move()
+ and d_exchange() calls from the outside of filesystem methods;
+ however, it does not guarantee that attached dentries won't
+ be renamed or moved by d_splice_alias() finding a preexisting
+ alias for a directory inode. Normally we would not care;
+ however, something that wants to stabilize the entire path to
+ root over a blocking operation might need that. See 9p for one
+ (and hopefully only) example.
+
+``d_unalias_unlock``
+ should be paired with ``d_unalias_trylock``; that one is called after
+ __d_move() call in __d_unalias().
+
+
Each dentry has a pointer to its parent dentry, as well as a hash list
of child dentries. Child dentries are basically like files in a
directory.
diff --git a/Documentation/gpu/zynqmp.rst b/Documentation/gpu/zynqmp.rst
index f57bfa0ad6ec..1a6f9193de22 100644
--- a/Documentation/gpu/zynqmp.rst
+++ b/Documentation/gpu/zynqmp.rst
@@ -144,6 +144,4 @@ Internals
.. kernel-doc:: drivers/gpu/drm/xlnx/zynqmp_dp.c
-.. kernel-doc:: drivers/gpu/drm/xlnx/zynqmp_dpsub.c
-
.. kernel-doc:: drivers/gpu/drm/xlnx/zynqmp_kms.c
diff --git a/Documentation/iio/ad4695.rst b/Documentation/iio/ad4695.rst
index 33ed29b7c98a..9ec8bf466c15 100644
--- a/Documentation/iio/ad4695.rst
+++ b/Documentation/iio/ad4695.rst
@@ -101,7 +101,7 @@ The macro comes from:
.. code-block::
- #include <dt-bindings/iio/adi,ad4695.h>
+ #include <dt-bindings/iio/adc/adi,ad4695.h>
Pairing two INx pins
^^^^^^^^^^^^^^^^^^^^
diff --git a/Documentation/iio/adis16480.rst b/Documentation/iio/adis16480.rst
index bc78fa04d958..4a2d40e0daa7 100644
--- a/Documentation/iio/adis16480.rst
+++ b/Documentation/iio/adis16480.rst
@@ -12,7 +12,10 @@ This driver supports Analog Device's IMUs on SPI bus.
* `ADIS16375 <https://www.analog.com/ADIS16375>`_
* `ADIS16480 <https://www.analog.com/ADIS16480>`_
* `ADIS16485 <https://www.analog.com/ADIS16485>`_
+* `ADIS16486 <https://www.analog.com/ADIS16486>`_
+* `ADIS16487 <https://www.analog.com/ADIS16487>`_
* `ADIS16488 <https://www.analog.com/ADIS16488>`_
+* `ADIS16489 <https://www.analog.com/ADIS16489>`_
* `ADIS16490 <https://www.analog.com/ADIS16490>`_
* `ADIS16495 <https://www.analog.com/ADIS16495>`_
* `ADIS16497 <https://www.analog.com/ADIS16497>`_
diff --git a/Documentation/iio/index.rst b/Documentation/iio/index.rst
index 074dbbf7ba0a..5710f5b9e958 100644
--- a/Documentation/iio/index.rst
+++ b/Documentation/iio/index.rst
@@ -29,3 +29,4 @@ Industrial I/O Kernel Drivers
adxl380
bno055
ep93xx_adc
+ opt4060
diff --git a/Documentation/iio/opt4060.rst b/Documentation/iio/opt4060.rst
new file mode 100644
index 000000000000..eb155089b6d2
--- /dev/null
+++ b/Documentation/iio/opt4060.rst
@@ -0,0 +1,61 @@
+==============================
+OPT4060 driver
+==============================
+
+1. Overview
+=============================
+
+This driver supports the Texas Instrument RGBW high resolution color sensor over
+I2C.
+https://www.ti.com/lit/gpn/opt4060
+
+The driver supports:
+- Raw values for red, green, blue and clear.
+- Illuminance values.
+- Scaled color values for red, green and blue.
+- IIO events for thresholds.
+- IIO triggered buffer using both its own data ready trigger and triggers from
+other drivers.
+
+2. Illuminance calculation
+=============================
+
+Illuminance is calculated using the wide spectrum green channel.
+
+lux = GREEN_RAW x 2.15e-3
+
+The value is accessed from:
+/sys/bus/iio/devices/iio:deviceX/in_illuminance_input
+
+See section 8.4.5.2 in the data sheet for additional details.
+
+3. Color scale values
+=============================
+
+The sensor has different sensitivity for the different color components and
+compensating factors are exposed from the driver.
+
+The values are accessed from:
+/sys/bus/iio/devices/iio:deviceX/in_intensity_red_scale
+/sys/bus/iio/devices/iio:deviceX/in_intensity_green_scale
+/sys/bus/iio/devices/iio:deviceX/in_intensity_blue_scale
+
+A userspace application can multiply the raw values with the scale values so
+that for a particular test light source, typically white, the measurement
+intensity is the same across the different color channels. This is calculated
+in the following way:
+
+R = RED_RAW x SCALE_RED(2.4)
+G = GREEN_RAW x SCALE_GREEN(1.0)
+B = BLUE_RAW x SCALE_BLUE(1.3)
+
+The data sheet suggests using the scaled values to normalize the scaled R, G
+and B values. This is useful to get a value for the ratio between colors
+independent of light intensity. A userspace application can do this in the
+following way:
+
+R_NORMALIZED = R / (R + G + B)
+G_NORMALIZED = G / (R + G + B)
+B_NORMALIZED = B / (R + G + B)
+
+See section 8.4.5.2 in the data sheet for additional details.
diff --git a/Documentation/kbuild/gendwarfksyms.rst b/Documentation/kbuild/gendwarfksyms.rst
new file mode 100644
index 000000000000..e4beaae7e456
--- /dev/null
+++ b/Documentation/kbuild/gendwarfksyms.rst
@@ -0,0 +1,308 @@
+=======================
+DWARF module versioning
+=======================
+
+1. Introduction
+===============
+
+When CONFIG_MODVERSIONS is enabled, symbol versions for modules
+are typically calculated from preprocessed source code using the
+**genksyms** tool. However, this is incompatible with languages such
+as Rust, where the source code has insufficient information about
+the resulting ABI. With CONFIG_GENDWARFKSYMS (and CONFIG_DEBUG_INFO)
+selected, **gendwarfksyms** is used instead to calculate symbol versions
+from the DWARF debugging information, which contains the necessary
+details about the final module ABI.
+
+1.1. Usage
+==========
+
+gendwarfksyms accepts a list of object files on the command line, and a
+list of symbol names (one per line) in standard input::
+
+ Usage: gendwarfksyms [options] elf-object-file ... < symbol-list
+
+ Options:
+ -d, --debug Print debugging information
+ --dump-dies Dump DWARF DIE contents
+ --dump-die-map Print debugging information about die_map changes
+ --dump-types Dump type strings
+ --dump-versions Dump expanded type strings used for symbol versions
+ -s, --stable Support kABI stability features
+ -T, --symtypes file Write a symtypes file
+ -h, --help Print this message
+
+
+2. Type information availability
+================================
+
+While symbols are typically exported in the same translation unit (TU)
+where they're defined, it's also perfectly fine for a TU to export
+external symbols. For example, this is done when calculating symbol
+versions for exports in stand-alone assembly code.
+
+To ensure the compiler emits the necessary DWARF type information in the
+TU where symbols are actually exported, gendwarfksyms adds a pointer
+to exported symbols in the `EXPORT_SYMBOL()` macro using the following
+macro::
+
+ #define __GENDWARFKSYMS_EXPORT(sym) \
+ static typeof(sym) *__gendwarfksyms_ptr_##sym __used \
+ __section(".discard.gendwarfksyms") = &sym;
+
+
+When a symbol pointer is found in DWARF, gendwarfksyms can use its
+type for calculating symbol versions even if the symbol is defined
+elsewhere. The name of the symbol pointer is expected to start with
+`__gendwarfksyms_ptr_`, followed by the name of the exported symbol.
+
+3. Symtypes output format
+=========================
+
+Similarly to genksyms, gendwarfksyms supports writing a symtypes
+file for each processed object that contain types for exported
+symbols and each referenced type that was used in calculating symbol
+versions. These files can be useful when trying to determine what
+exactly caused symbol versions to change between builds. To generate
+symtypes files during a kernel build, set `KBUILD_SYMTYPES=1`.
+
+Matching the existing format, the first column of each line contains
+either a type reference or a symbol name. Type references have a
+one-letter prefix followed by "#" and the name of the type. Four
+reference types are supported::
+
+ e#<type> = enum
+ s#<type> = struct
+ t#<type> = typedef
+ u#<type> = union
+
+Type names with spaces in them are wrapped in single quotes, e.g.::
+
+ s#'core::result::Result<u8, core::num::error::ParseIntError>'
+
+The rest of the line contains a type string. Unlike with genksyms that
+produces C-style type strings, gendwarfksyms uses the same simple parsed
+DWARF format produced by **--dump-dies**, but with type references
+instead of fully expanded strings.
+
+4. Maintaining a stable kABI
+============================
+
+Distribution maintainers often need the ability to make ABI compatible
+changes to kernel data structures due to LTS updates or backports. Using
+the traditional `#ifndef __GENKSYMS__` to hide these changes from symbol
+versioning won't work when processing object files. To support this
+use case, gendwarfksyms provides kABI stability features designed to
+hide changes that won't affect the ABI when calculating versions. These
+features are all gated behind the **--stable** command line flag and are
+not used in the mainline kernel. To use stable features during a kernel
+build, set `KBUILD_GENDWARFKSYMS_STABLE=1`.
+
+Examples for using these features are provided in the
+**scripts/gendwarfksyms/examples** directory, including helper macros
+for source code annotation. Note that as these features are only used to
+transform the inputs for symbol versioning, the user is responsible for
+ensuring that their changes actually won't break the ABI.
+
+4.1. kABI rules
+===============
+
+kABI rules allow distributions to fine-tune certain parts
+of gendwarfksyms output and thus control how symbol
+versions are calculated. These rules are defined in the
+`.discard.gendwarfksyms.kabi_rules` section of the object file and
+consist of simple null-terminated strings with the following structure::
+
+ version\0type\0target\0value\0
+
+This string sequence is repeated as many times as needed to express all
+the rules. The fields are as follows:
+
+- `version`: Ensures backward compatibility for future changes to the
+ structure. Currently expected to be "1".
+- `type`: Indicates the type of rule being applied.
+- `target`: Specifies the target of the rule, typically the fully
+ qualified name of the DWARF Debugging Information Entry (DIE).
+- `value`: Provides rule-specific data.
+
+The following helper macro, for example, can be used to specify rules
+in the source code::
+
+ #define __KABI_RULE(hint, target, value) \
+ static const char __PASTE(__gendwarfksyms_rule_, \
+ __COUNTER__)[] __used __aligned(1) \
+ __section(".discard.gendwarfksyms.kabi_rules") = \
+ "1\0" #hint "\0" #target "\0" #value
+
+
+Currently, only the rules discussed in this section are supported, but
+the format is extensible enough to allow further rules to be added as
+need arises.
+
+4.1.1. Managing definition visibility
+=====================================
+
+A declaration can change into a full definition when additional includes
+are pulled into the translation unit. This changes the versions of any
+symbol that references the type even if the ABI remains unchanged. As
+it may not be possible to drop includes without breaking the build, the
+`declonly` rule can be used to specify a type as declaration-only, even
+if the debugging information contains the full definition.
+
+The rule fields are expected to be as follows:
+
+- `type`: "declonly"
+- `target`: The fully qualified name of the target data structure
+ (as shown in **--dump-dies** output).
+- `value`: This field is ignored.
+
+Using the `__KABI_RULE` macro, this rule can be defined as::
+
+ #define KABI_DECLONLY(fqn) __KABI_RULE(declonly, fqn, )
+
+Example usage::
+
+ struct s {
+ /* definition */
+ };
+
+ KABI_DECLONLY(s);
+
+4.1.2. Adding enumerators
+=========================
+
+For enums, all enumerators and their values are included in calculating
+symbol versions, which becomes a problem if we later need to add more
+enumerators without changing symbol versions. The `enumerator_ignore`
+rule allows us to hide named enumerators from the input.
+
+The rule fields are expected to be as follows:
+
+- `type`: "enumerator_ignore"
+- `target`: The fully qualified name of the target enum
+ (as shown in **--dump-dies** output) and the name of the
+ enumerator field separated by a space.
+- `value`: This field is ignored.
+
+Using the `__KABI_RULE` macro, this rule can be defined as::
+
+ #define KABI_ENUMERATOR_IGNORE(fqn, field) \
+ __KABI_RULE(enumerator_ignore, fqn field, )
+
+Example usage::
+
+ enum e {
+ A, B, C, D,
+ };
+
+ KABI_ENUMERATOR_IGNORE(e, B);
+ KABI_ENUMERATOR_IGNORE(e, C);
+
+If the enum additionally includes an end marker and new values must
+be added in the middle, we may need to use the old value for the last
+enumerator when calculating versions. The `enumerator_value` rule allows
+us to override the value of an enumerator for version calculation:
+
+- `type`: "enumerator_value"
+- `target`: The fully qualified name of the target enum
+ (as shown in **--dump-dies** output) and the name of the
+ enumerator field separated by a space.
+- `value`: Integer value used for the field.
+
+Using the `__KABI_RULE` macro, this rule can be defined as::
+
+ #define KABI_ENUMERATOR_VALUE(fqn, field, value) \
+ __KABI_RULE(enumerator_value, fqn field, value)
+
+Example usage::
+
+ enum e {
+ A, B, C, LAST,
+ };
+
+ KABI_ENUMERATOR_IGNORE(e, C);
+ KABI_ENUMERATOR_VALUE(e, LAST, 2);
+
+4.3. Adding structure members
+=============================
+
+Perhaps the most common ABI compatible change is adding a member to a
+kernel data structure. When changes to a structure are anticipated,
+distribution maintainers can pre-emptively reserve space in the
+structure and take it into use later without breaking the ABI. If
+changes are needed to data structures without reserved space, existing
+alignment holes can potentially be used instead. While kABI rules could
+be added for these type of changes, using unions is typically a more
+natural method. This section describes gendwarfksyms support for using
+reserved space in data structures and hiding members that don't change
+the ABI when calculating symbol versions.
+
+4.3.1. Reserving space and replacing members
+============================================
+
+Space is typically reserved for later use by appending integer types, or
+arrays, to the end of the data structure, but any type can be used. Each
+reserved member needs a unique name, but as the actual purpose is usually
+not known at the time the space is reserved, for convenience, names that
+start with `__kabi_` are left out when calculating symbol versions::
+
+ struct s {
+ long a;
+ long __kabi_reserved_0; /* reserved for future use */
+ };
+
+The reserved space can be taken into use by wrapping the member in a
+union, which includes the original type and the replacement member::
+
+ struct s {
+ long a;
+ union {
+ long __kabi_reserved_0; /* original type */
+ struct b b; /* replaced field */
+ };
+ };
+
+If the `__kabi_` naming scheme was used when reserving space, the name
+of the first member of the union must start with `__kabi_reserved`. This
+ensures the original type is used when calculating versions, but the name
+is again left out. The rest of the union is ignored.
+
+If we're replacing a member that doesn't follow this naming convention,
+we also need to preserve the original name to avoid changing versions,
+which we can do by changing the first union member's name to start with
+`__kabi_renamed` followed by the original name.
+
+The examples include `KABI_(RESERVE|USE|REPLACE)*` macros that help
+simplify the process and also ensure the replacement member is correctly
+aligned and its size won't exceed the reserved space.
+
+4.3.2. Hiding members
+=====================
+
+Predicting which structures will require changes during the support
+timeframe isn't always possible, in which case one might have to resort
+to placing new members into existing alignment holes::
+
+ struct s {
+ int a;
+ /* a 4-byte alignment hole */
+ unsigned long b;
+ };
+
+
+While this won't change the size of the data structure, one needs to
+be able to hide the added members from symbol versioning. Similarly
+to reserved fields, this can be accomplished by wrapping the added
+member to a union where one of the fields has a name starting with
+`__kabi_ignored`::
+
+ struct s {
+ int a;
+ union {
+ char __kabi_ignored_0;
+ int n;
+ };
+ unsigned long b;
+ };
+
+With **--stable**, both versions produce the same symbol version.
diff --git a/Documentation/kbuild/index.rst b/Documentation/kbuild/index.rst
index cee2f99f734b..e82af05cd652 100644
--- a/Documentation/kbuild/index.rst
+++ b/Documentation/kbuild/index.rst
@@ -21,6 +21,7 @@ Kernel Build System
reproducible-builds
gcc-plugins
llvm
+ gendwarfksyms
.. only:: subproject and html
diff --git a/Documentation/kbuild/modules.rst b/Documentation/kbuild/modules.rst
index 101de236cd0c..a42f00d8cb90 100644
--- a/Documentation/kbuild/modules.rst
+++ b/Documentation/kbuild/modules.rst
@@ -423,6 +423,26 @@ Symbols From the Kernel (vmlinux + modules)
1) It lists all exported symbols from vmlinux and all modules.
2) It lists the CRC if CONFIG_MODVERSIONS is enabled.
+Version Information Formats
+---------------------------
+
+ Exported symbols have information stored in __ksymtab or __ksymtab_gpl
+ sections. Symbol names and namespaces are stored in __ksymtab_strings,
+ using a format similar to the string table used for ELF. If
+ CONFIG_MODVERSIONS is enabled, the CRCs corresponding to exported
+ symbols will be added to the __kcrctab or __kcrctab_gpl.
+
+ If CONFIG_BASIC_MODVERSIONS is enabled (default with
+ CONFIG_MODVERSIONS), imported symbols will have their symbol name and
+ CRC stored in the __versions section of the importing module. This
+ mode only supports symbols of length up to 64 bytes.
+
+ If CONFIG_EXTENDED_MODVERSIONS is enabled (required to enable both
+ CONFIG_MODVERSIONS and CONFIG_RUST at the same time), imported symbols
+ will have their symbol name recorded in the __version_ext_names
+ section as a series of concatenated, null-terminated strings. CRCs for
+ these symbols will be recorded in the __version_ext_crcs section.
+
Symbols and External Modules
----------------------------
diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst
index f9c50525bdbf..e28c6a1b40ae 100644
--- a/Documentation/mm/damon/design.rst
+++ b/Documentation/mm/damon/design.rst
@@ -203,6 +203,8 @@ This scheme, however, cannot preserve the quality of the output if the
assumption is not guaranteed.
+.. _damon_design_adaptive_regions_adjustment:
+
Adaptive Regions Adjustment
~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -264,6 +266,61 @@ tracepoints. For more details, please refer to the documentations for
respectively.
+.. _damon_design_monitoring_params_tuning_guide:
+
+Monitoring Parameters Tuning Guide
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In short, set ``aggregation interval`` to capture meaningful amount of accesses
+for the purpose. The amount of accesses can be measured using ``nr_accesses``
+and ``age`` of regions in the aggregated monitoring results snapshot. The
+default value of the interval, ``100ms``, turns out to be too short in many
+cases. Set ``sampling interval`` proportional to ``aggregation interval``. By
+default, ``1/20`` is recommended as the ratio.
+
+``Aggregation interval`` should be set as the time interval that the workload
+can make an amount of accesses for the monitoring purpose, within the interval.
+If the interval is too short, only small number of accesses are captured. As a
+result, the monitoring results look everything is samely accessed only rarely.
+For many purposes, that would be useless. If it is too long, however, the time
+to converge regions with the :ref:`regions adjustment mechanism
+<damon_design_adaptive_regions_adjustment>` can be too long, depending on the
+time scale of the given purpose. This could happen if the workload is actually
+making only rare accesses but the user thinks the amount of accesses for the
+monitoring purpose too high. For such cases, the target amount of access to
+capture per ``aggregation interval`` should carefully reconsidered. Also, note
+that the captured amount of accesses is represented with not only
+``nr_accesses``, but also ``age``. For example, even if every region on the
+monitoring results show zero ``nr_accesses``, regions could still be
+distinguished using ``age`` values as the recency information.
+
+Hence the optimum value of ``aggregation interval`` depends on the access
+intensiveness of the workload. The user should tune the interval based on the
+amount of access that captured on each aggregated snapshot of the monitoring
+results.
+
+Note that the default value of the interval is 100 milliseconds, which is too
+short in many cases, especially on large systems.
+
+``Sampling interval`` defines the resolution of each aggregation. If it is set
+too large, monitoring results will look like every region was samely rarely
+accessed, or samely frequently accessed. That is, regions become
+undistinguishable based on access pattern, and therefore the results will be
+useless in many use cases. If ``sampling interval`` is too small, it will not
+degrade the resolution, but will increase the monitoring overhead. If it is
+appropriate enough to provide a resolution of the monitoring results that
+sufficient for the given purpose, it shouldn't be unnecessarily further
+lowered. It is recommended to be set proportional to ``aggregation interval``.
+By default, the ratio is set as ``1/20``, and it is still recommended.
+
+Refer to below documents for an example tuning based on the above guide.
+
+.. toctree::
+ :maxdepth: 1
+
+ monitoring_intervals_tuning_example
+
+
.. _damon_design_damos:
Operation Schemes
@@ -504,9 +561,34 @@ have a list of latency-critical processes.
To let users optimize DAMOS schemes with such special knowledge, DAMOS provides
a feature called DAMOS filters. The feature allows users to set an arbitrary
-number of filters for each scheme. Each filter specifies the type of target
-memory, and whether it should exclude the memory of the type (filter-out), or
-all except the memory of the type (filter-in).
+number of filters for each scheme. Each filter specifies
+
+- a type of memory (``type``),
+- whether it is for the memory of the type or all except the type
+ (``matching``), and
+- whether it is to allow (include) or reject (exclude) applying
+ the scheme's action to the memory (``allow``).
+
+When multiple filters are installed, each filter is evaluated in the installed
+order. If a part of memory is matched to one of the filter, next filters are
+ignored. If the memory passes through the filters evaluation stage because it
+is not matched to any of the filters, applying the scheme's action to it is
+allowed, same to the behavior when no filter exists.
+
+For example, let's assume 1) a filter for allowing anonymous pages and 2)
+another filter for rejecting young pages are installed in the order. If a page
+of a region that eligible to apply the scheme's action is an anonymous page,
+the scheme's action will be applied to the page regardless of whether it is
+young or not, since it matches with the first allow-filter. If the page is
+not anonymous but young, the scheme's action will not be applied, since the
+second reject-filter blocks it. If the page is neither anonymous nor young,
+the page will pass through the filters evaluation stage since there is no
+matching filter, and the action will be applied to the page.
+
+Note that the action can equally be applied to memory that either explicitly
+filter-allowed or filters evaluation stage passed. It means that installing
+allow-filters at the end of the list makes no practical change but only
+filters-checking overhead.
For efficient handling of filters, some types of filters are handled by the
core layer, while others are handled by operations set. In the latter case,
@@ -516,7 +598,7 @@ filter are not counted as the scheme has tried to the region. In contrast, if
a memory regions is filtered by an operations set layer-handled filter, it is
counted as the scheme has tried. This difference affects the statistics.
-Below types of filters are currently supported.
+Below ``type`` of filters are currently supported.
- anonymous page
- Applied to pages that containing data that not stored in files.
@@ -539,6 +621,60 @@ To know how user-space can set the watermarks via :ref:`DAMON sysfs interface
<sysfs_interface>`, refer to :ref:`filters <sysfs_filters>` part of the
documentation.
+.. _damon_design_damos_stat:
+
+Statistics
+~~~~~~~~~~
+
+The statistics of DAMOS behaviors that designed to help monitoring, tuning and
+debugging of DAMOS.
+
+DAMOS accounts below statistics for each scheme, from the beginning of the
+scheme's execution.
+
+- ``nr_tried``: Total number of regions that the scheme is tried to be applied.
+- ``sz_trtied``: Total size of regions that the scheme is tried to be applied.
+- ``sz_ops_filter_passed``: Total bytes that passed operations set
+ layer-handled DAMOS filters.
+- ``nr_applied``: Total number of regions that the scheme is applied.
+- ``sz_applied``: Total size of regions that the scheme is applied.
+- ``qt_exceeds``: Total number of times the quota of the scheme has exceeded.
+
+"A scheme is tried to be applied to a region" means DAMOS core logic determined
+the region is eligible to apply the scheme's :ref:`action
+<damon_design_damos_action>`. The :ref:`access pattern
+<damon_design_damos_access_pattern>`, :ref:`quotas
+<damon_design_damos_quotas>`, :ref:`watermarks
+<damon_design_damos_watermarks>`, and :ref:`filters
+<damon_design_damos_filters>` that handled on core logic could affect this.
+The core logic will only ask the underlying :ref:`operation set
+<damon_operations_set>` to do apply the action to the region, so whether the
+action is really applied or not is unclear. That's why it is called "tried".
+
+"A scheme is applied to a region" means the :ref:`operation set
+<damon_operations_set>` has applied the action to at least a part of the
+region. The :ref:`filters <damon_design_damos_filters>` that handled by the
+operation set, and the types of the :ref:`action <damon_design_damos_action>`
+and the pages of the region can affect this. For example, if a filter is set
+to exclude anonymous pages and the region has only anonymous pages, or if the
+action is ``pageout`` while all pages of the region are unreclaimable, applying
+the action to the region will fail.
+
+To know how user-space can read the stats via :ref:`DAMON sysfs interface
+<sysfs_interface>`, refer to :ref:s`stats <sysfs_stats>` part of the
+documentation.
+
+Regions Walking
+~~~~~~~~~~~~~~~
+
+DAMOS feature allowing users access each region that a DAMOS action has just
+applied. Using this feature, DAMON :ref:`API <damon_design_api>` allows users
+access full properties of the regions including the access monitoring results
+and amount of the region's internal memory that passed the DAMOS filters.
+:ref:`DAMON sysfs interface <sysfs_interface>` also allows users read the data
+via special :ref:`files <sysfs_schemes_tried_regions>`.
+
+.. _damon_design_api:
Application Programming Interface
---------------------------------
@@ -573,15 +709,11 @@ General Purpose User Interface Modules
DAMON modules that provide user space ABIs for general purpose DAMON usage in
runtime.
-DAMON user interface modules, namely 'DAMON sysfs interface' and 'DAMON debugfs
-interface' are DAMON API user kernel modules that provide ABIs to the
-user-space. Please note that DAMON debugfs interface is currently deprecated.
-
-Like many other ABIs, the modules create files on sysfs and debugfs, allow
-users to specify their requests to and get the answers from DAMON by writing to
-and reading from the files. As a response to such I/O, DAMON user interface
-modules control DAMON and retrieve the results as user requested via the DAMON
-API, and return the results to the user-space.
+Like many other ABIs, the modules create files on pseudo file systems like
+'sysfs', allow users to specify their requests to and get the answers from
+DAMON by writing to and reading from the files. As a response to such I/O,
+DAMON user interface modules control DAMON and retrieve the results as user
+requested via the DAMON API, and return the results to the user-space.
The ABIs are designed to be used for user space applications development,
rather than human beings' fingers. Human users are recommended to use such
@@ -590,8 +722,9 @@ Github (https://github.com/damonitor/damo), Pypi
(https://pypistats.org/packages/damo), and Fedora
(https://packages.fedoraproject.org/pkgs/python-damo/damo/).
-Please refer to the ABI :doc:`document </admin-guide/mm/damon/usage>` for
-details of the interfaces.
+Currently, one module for this type, namely 'DAMON sysfs interface' is
+available. Please refer to the ABI :ref:`doc <sysfs_interface>` for details of
+the interfaces.
Special-Purpose Access-aware Kernel Modules
@@ -599,8 +732,8 @@ Special-Purpose Access-aware Kernel Modules
DAMON modules that provide user space ABI for specific purpose DAMON usage.
-DAMON sysfs/debugfs user interfaces are for full control of all DAMON features
-in runtime. For each special-purpose system-wide data access-aware system
+DAMON user interface modules are for full control of all DAMON features in
+runtime. For each special-purpose system-wide data access-aware system
operations such as proactive reclamation or LRU lists balancing, the interfaces
could be simplified by removing unnecessary knobs for the specific purpose, and
extended for boot-time and even compile time control. Default values of DAMON
diff --git a/Documentation/mm/damon/monitoring_intervals_tuning_example.rst b/Documentation/mm/damon/monitoring_intervals_tuning_example.rst
new file mode 100644
index 000000000000..334a854efb40
--- /dev/null
+++ b/Documentation/mm/damon/monitoring_intervals_tuning_example.rst
@@ -0,0 +1,247 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=================================================
+DAMON Moniting Interval Parameters Tuning Example
+=================================================
+
+DAMON's monitoring parameters need tuning based on given workload and the
+monitoring purpose. There is a :ref:`tuning guide
+<damon_design_monitoring_params_tuning_guide>` for that. This document
+provides an example tuning based on the guide.
+
+Setup
+=====
+
+For below example, DAMON of Linux kernel v6.11 and `damo
+<https://github.com/damonitor/damo>`_ (DAMON user-space tool) v2.5.9 was used to
+monitor and visualize access patterns on the physical address space of a system
+running a real-world server workload.
+
+5ms/100ms intervals: Too Short Interval
+=======================================
+
+Let's start by capturing the access pattern snapshot on the physical address
+space of the system using DAMON, with the default interval parameters (5
+milliseconds and 100 milliseconds for the sampling and the aggregation
+intervals, respectively). Wait ten minutes between the start of DAMON and
+the capturing of the snapshot, to show a meaningful time-wise access patterns.
+::
+
+ # damo start
+ # sleep 600
+ # damo record --snapshot 0 1
+ # damo stop
+
+Then, list the DAMON-found regions of different access patterns, sorted by the
+"access temperature". "Access temperature" is a metric representing the
+access-hotness of a region. It is calculated as a weighted sum of the access
+frequency and the age of the region. If the access frequency is 0 %, the
+temperature is multipled by minus one. That is, if a region is not accessed,
+it gets minus temperature and it gets lower as not accessed for longer time.
+The sorting is in temperature-ascendint order, so the region at the top of the
+list is the coldest, and the one at the bottom is the hottest one. ::
+
+ # damo report access --sort_regions_by temperature
+ 0 addr 16.052 GiB size 5.985 GiB access 0 % age 5.900 s # coldest
+ 1 addr 22.037 GiB size 6.029 GiB access 0 % age 5.300 s
+ 2 addr 28.065 GiB size 6.045 GiB access 0 % age 5.200 s
+ 3 addr 10.069 GiB size 5.983 GiB access 0 % age 4.500 s
+ 4 addr 4.000 GiB size 6.069 GiB access 0 % age 4.400 s
+ 5 addr 62.008 GiB size 3.992 GiB access 0 % age 3.700 s
+ 6 addr 56.795 GiB size 5.213 GiB access 0 % age 3.300 s
+ 7 addr 39.393 GiB size 6.096 GiB access 0 % age 2.800 s
+ 8 addr 50.782 GiB size 6.012 GiB access 0 % age 2.800 s
+ 9 addr 34.111 GiB size 5.282 GiB access 0 % age 2.300 s
+ 10 addr 45.489 GiB size 5.293 GiB access 0 % age 1.800 s # hottest
+ total size: 62.000 GiB
+
+The list shows not seemingly hot regions, and only minimum access pattern
+diversity. Every region has zero access frequency. The number of region is
+10, which is the default ``min_nr_regions value``. Size of each region is also
+nearly idential. We can suspect this is because “adaptive regions adjustment”
+mechanism was not well working. As the guide suggested, we can get relative
+hotness of regions using ``age`` as the recency information. That would be
+better than nothing, but given the fact that the longest age is only about 6
+seconds while we waited about ten minuts, it is unclear how useful this will
+be.
+
+The temperature ranges to total size of regions of each range histogram
+visualization of the results also shows no interesting distribution pattern. ::
+
+ # damo report access --style temperature-sz-hist
+ <temperature> <total size>
+ [-,590,000,000, -,549,000,000) 5.985 GiB |********** |
+ [-,549,000,000, -,508,000,000) 12.074 GiB |********************|
+ [-,508,000,000, -,467,000,000) 0 B | |
+ [-,467,000,000, -,426,000,000) 12.052 GiB |********************|
+ [-,426,000,000, -,385,000,000) 0 B | |
+ [-,385,000,000, -,344,000,000) 3.992 GiB |******* |
+ [-,344,000,000, -,303,000,000) 5.213 GiB |********* |
+ [-,303,000,000, -,262,000,000) 12.109 GiB |********************|
+ [-,262,000,000, -,221,000,000) 5.282 GiB |********* |
+ [-,221,000,000, -,180,000,000) 0 B | |
+ [-,180,000,000, -,139,000,000) 5.293 GiB |********* |
+ total size: 62.000 GiB
+
+In short, the parameters provide poor quality monitoring results for hot
+regions detection. According to the :ref:`guide
+<damon_design_monitoring_params_tuning_guide>`, this is due to the too short
+aggregation interval.
+
+100ms/2s intervals: Starts Showing Small Hot Regions
+====================================================
+
+Following the guide, increase the interval 20 times (100 milliseocnds and 2
+seconds for sampling and aggregation intervals, respectively). ::
+
+ # damo start -s 100ms -a 2s
+ # sleep 600
+ # damo record --snapshot 0 1
+ # damo stop
+ # damo report access --sort_regions_by temperature
+ 0 addr 10.180 GiB size 6.117 GiB access 0 % age 7 m 8 s # coldest
+ 1 addr 49.275 GiB size 6.195 GiB access 0 % age 6 m 14 s
+ 2 addr 62.421 GiB size 3.579 GiB access 0 % age 6 m 4 s
+ 3 addr 40.154 GiB size 6.127 GiB access 0 % age 5 m 40 s
+ 4 addr 16.296 GiB size 6.182 GiB access 0 % age 5 m 32 s
+ 5 addr 34.254 GiB size 5.899 GiB access 0 % age 5 m 24 s
+ 6 addr 46.281 GiB size 2.995 GiB access 0 % age 5 m 20 s
+ 7 addr 28.420 GiB size 5.835 GiB access 0 % age 5 m 6 s
+ 8 addr 4.000 GiB size 6.180 GiB access 0 % age 4 m 16 s
+ 9 addr 22.478 GiB size 5.942 GiB access 0 % age 3 m 58 s
+ 10 addr 55.470 GiB size 915.645 MiB access 0 % age 3 m 6 s
+ 11 addr 56.364 GiB size 6.056 GiB access 0 % age 2 m 8 s
+ 12 addr 56.364 GiB size 4.000 KiB access 95 % age 16 s
+ 13 addr 49.275 GiB size 4.000 KiB access 100 % age 8 m 24 s # hottest
+ total size: 62.000 GiB
+ # damo report access --style temperature-sz-hist
+ <temperature> <total size>
+ [-42,800,000,000, -33,479,999,000) 22.018 GiB |***************** |
+ [-33,479,999,000, -24,159,998,000) 27.090 GiB |********************|
+ [-24,159,998,000, -14,839,997,000) 6.836 GiB |****** |
+ [-14,839,997,000, -5,519,996,000) 6.056 GiB |***** |
+ [-5,519,996,000, 3,800,005,000) 4.000 KiB |* |
+ [3,800,005,000, 13,120,006,000) 0 B | |
+ [13,120,006,000, 22,440,007,000) 0 B | |
+ [22,440,007,000, 31,760,008,000) 0 B | |
+ [31,760,008,000, 41,080,009,000) 0 B | |
+ [41,080,009,000, 50,400,010,000) 0 B | |
+ [50,400,010,000, 59,720,011,000) 4.000 KiB |* |
+ total size: 62.000 GiB
+
+DAMON found two distinct 4 KiB regions that pretty hot. The regions are also
+well aged. The hottest 4 KiB region was keeping the access frequency for about
+8 minutes, and the coldest region was keeping no access for about 7 minutes.
+The distribution on the histogram also looks like having a pattern.
+
+Especially, the finding of the 4 KiB regions among the 62 GiB total memory
+shows DAMON’s adaptive regions adjustment is working as designed.
+
+Still the number of regions is close to the ``min_nr_regions``, and sizes of
+cold regions are similar, though. Apparently it is improved, but it still has
+rooms to improve.
+
+400ms/8s intervals: Pretty Improved Results
+===========================================
+
+Increase the intervals four times (400 milliseconds and 8 seconds
+for sampling and aggregation intervals, respectively). ::
+
+ # damo start -s 400ms -a 8s
+ # sleep 600
+ # damo record --snapshot 0 1
+ # damo stop
+ # damo report access --sort_regions_by temperature
+ 0 addr 64.492 GiB size 1.508 GiB access 0 % age 6 m 48 s # coldest
+ 1 addr 21.749 GiB size 5.674 GiB access 0 % age 6 m 8 s
+ 2 addr 27.422 GiB size 5.801 GiB access 0 % age 6 m
+ 3 addr 49.431 GiB size 8.675 GiB access 0 % age 5 m 28 s
+ 4 addr 33.223 GiB size 5.645 GiB access 0 % age 5 m 12 s
+ 5 addr 58.321 GiB size 6.170 GiB access 0 % age 5 m 4 s
+ [...]
+ 25 addr 6.615 GiB size 297.531 MiB access 15 % age 0 ns
+ 26 addr 9.513 GiB size 12.000 KiB access 20 % age 0 ns
+ 27 addr 9.511 GiB size 108.000 KiB access 25 % age 0 ns
+ 28 addr 9.513 GiB size 20.000 KiB access 25 % age 0 ns
+ 29 addr 9.511 GiB size 12.000 KiB access 30 % age 0 ns
+ 30 addr 9.520 GiB size 4.000 KiB access 40 % age 0 ns
+ [...]
+ 41 addr 9.520 GiB size 4.000 KiB access 80 % age 56 s
+ 42 addr 9.511 GiB size 12.000 KiB access 100 % age 6 m 16 s
+ 43 addr 58.321 GiB size 4.000 KiB access 100 % age 6 m 24 s
+ 44 addr 9.512 GiB size 4.000 KiB access 100 % age 6 m 48 s
+ 45 addr 58.106 GiB size 4.000 KiB access 100 % age 6 m 48 s # hottest
+ total size: 62.000 GiB
+ # damo report access --style temperature-sz-hist
+ <temperature> <total size>
+ [-40,800,000,000, -32,639,999,000) 21.657 GiB |********************|
+ [-32,639,999,000, -24,479,998,000) 17.938 GiB |***************** |
+ [-24,479,998,000, -16,319,997,000) 16.885 GiB |**************** |
+ [-16,319,997,000, -8,159,996,000) 586.879 MiB |* |
+ [-8,159,996,000, 5,000) 4.946 GiB |***** |
+ [5,000, 8,160,006,000) 260.000 KiB |* |
+ [8,160,006,000, 16,320,007,000) 0 B | |
+ [16,320,007,000, 24,480,008,000) 0 B | |
+ [24,480,008,000, 32,640,009,000) 0 B | |
+ [32,640,009,000, 40,800,010,000) 16.000 KiB |* |
+ [40,800,010,000, 48,960,011,000) 8.000 KiB |* |
+ total size: 62.000 GiB
+
+The number of regions having different access patterns has significantly
+increased. Size of each region is also more varied. Total size of non-zero
+access frequency regions is also significantly increased. Maybe this is already
+good enough to make some meaningful memory management efficieny changes.
+
+800ms/16s intervals: Another bias
+=================================
+
+Further double the intervals (800 milliseconds and 16 seconds for sampling
+and aggregation intervals, respectively). The results is more improved for the
+hot regions detection, but starts looking degrading cold regions detection. ::
+
+ # damo start -s 800ms -a 16s
+ # sleep 600
+ # damo record --snapshot 0 1
+ # damo stop
+ # damo report access --sort_regions_by temperature
+ 0 addr 64.781 GiB size 1.219 GiB access 0 % age 4 m 48 s
+ 1 addr 24.505 GiB size 2.475 GiB access 0 % age 4 m 16 s
+ 2 addr 26.980 GiB size 504.273 MiB access 0 % age 4 m
+ 3 addr 29.443 GiB size 2.462 GiB access 0 % age 4 m
+ 4 addr 37.264 GiB size 5.645 GiB access 0 % age 4 m
+ 5 addr 31.905 GiB size 5.359 GiB access 0 % age 3 m 44 s
+ [...]
+ 20 addr 8.711 GiB size 40.000 KiB access 5 % age 2 m 40 s
+ 21 addr 27.473 GiB size 1.970 GiB access 5 % age 4 m
+ 22 addr 48.185 GiB size 4.625 GiB access 5 % age 4 m
+ 23 addr 47.304 GiB size 902.117 MiB access 10 % age 4 m
+ 24 addr 8.711 GiB size 4.000 KiB access 100 % age 4 m
+ 25 addr 20.793 GiB size 3.713 GiB access 5 % age 4 m 16 s
+ 26 addr 8.773 GiB size 4.000 KiB access 100 % age 4 m 16 s
+ total size: 62.000 GiB
+ # damo report access --style temperature-sz-hist
+ <temperature> <total size>
+ [-28,800,000,000, -23,359,999,000) 12.294 GiB |***************** |
+ [-23,359,999,000, -17,919,998,000) 9.753 GiB |************* |
+ [-17,919,998,000, -12,479,997,000) 15.131 GiB |********************|
+ [-12,479,997,000, -7,039,996,000) 0 B | |
+ [-7,039,996,000, -1,599,995,000) 7.506 GiB |********** |
+ [-1,599,995,000, 3,840,006,000) 6.127 GiB |********* |
+ [3,840,006,000, 9,280,007,000) 0 B | |
+ [9,280,007,000, 14,720,008,000) 136.000 KiB |* |
+ [14,720,008,000, 20,160,009,000) 40.000 KiB |* |
+ [20,160,009,000, 25,600,010,000) 11.188 GiB |*************** |
+ [25,600,010,000, 31,040,011,000) 4.000 KiB |* |
+ total size: 62.000 GiB
+
+It found more non-zero access frequency regions. The number of regions is still
+much higher than the ``min_nr_regions``, but it is reduced from that of the
+previous setup. And apparently the distribution seems bit biased to hot
+regions.
+
+Conclusion
+==========
+
+With the above experimental tuning results, we can conclude the theory and the
+guide makes sense to at least this workload, and could be applied to similar
+cases.
diff --git a/Documentation/mm/process_addrs.rst b/Documentation/mm/process_addrs.rst
index 1d416658d7f5..81417fa2ed20 100644
--- a/Documentation/mm/process_addrs.rst
+++ b/Documentation/mm/process_addrs.rst
@@ -531,6 +531,10 @@ are extra requirements for accessing them:
new page table has been installed in the same location and filled with
entries. Writers normally need to take the PTE lock and revalidate that the
PMD entry still refers to the same PTE-level page table.
+ If the writer does not care whether it is the same PTE-level page table, it
+ can take the PMD lock and revalidate that the contents of pmd entry still meet
+ the requirements. In particular, this also happens in :c:func:`!retract_page_tables`
+ when handling :c:macro:`!MADV_COLLAPSE`.
To access PTE-level page tables, a helper like :c:func:`!pte_offset_map_lock` or
:c:func:`!pte_offset_map` can be used depending on stability requirements.
diff --git a/Documentation/mm/split_page_table_lock.rst b/Documentation/mm/split_page_table_lock.rst
index 581446d4a4eb..8e1ceb0a6619 100644
--- a/Documentation/mm/split_page_table_lock.rst
+++ b/Documentation/mm/split_page_table_lock.rst
@@ -62,7 +62,7 @@ Support of split page table lock by an architecture
===================================================
There's no need in special enabling of PTE split page table lock: everything
-required is done by pagetable_pte_ctor() and pagetable_pte_dtor(), which
+required is done by pagetable_pte_ctor() and pagetable_dtor(), which
must be called on PTE table allocation / freeing.
Make sure the architecture doesn't use slab allocator for page table
@@ -73,7 +73,7 @@ PMD split lock only makes sense if you have more than two page table
levels.
PMD split lock enabling requires pagetable_pmd_ctor() call on PMD table
-allocation and pagetable_pmd_dtor() on freeing.
+allocation and pagetable_dtor() on freeing.
Allocation usually happens in pmd_alloc_one(), freeing in pmd_free() and
pmd_free_tlb(), but make sure you cover all PMD table allocation / freeing
diff --git a/Documentation/networking/can.rst b/Documentation/networking/can.rst
index 62519d38c58b..b018ce346392 100644
--- a/Documentation/networking/can.rst
+++ b/Documentation/networking/can.rst
@@ -699,10 +699,10 @@ RAW socket option CAN_RAW_JOIN_FILTERS
The CAN_RAW socket can set multiple CAN identifier specific filters that
lead to multiple filters in the af_can.c filter processing. These filters
-are indenpendent from each other which leads to logical OR'ed filters when
+are independent from each other which leads to logical OR'ed filters when
applied (see :ref:`socketcan-rawfilter`).
-This socket option joines the given CAN filters in the way that only CAN
+This socket option joins the given CAN filters in the way that only CAN
frames are passed to user space that matched *all* given CAN filters. The
semantic for the applied filters is therefore changed to a logical AND.
diff --git a/Documentation/networking/mptcp-sysctl.rst b/Documentation/networking/mptcp-sysctl.rst
index dc45c0211353..03e1d3610333 100644
--- a/Documentation/networking/mptcp-sysctl.rst
+++ b/Documentation/networking/mptcp-sysctl.rst
@@ -41,7 +41,7 @@ blackhole_timeout - INTEGER (seconds)
MPTCP is re-enabled and will reset to the initial value when the
blackhole issue goes away.
- 0 to disable the blackhole detection.
+ 0 to disable the blackhole detection. This is a per-namespace sysctl.
Default: 3600
diff --git a/Documentation/networking/napi.rst b/Documentation/networking/napi.rst
index 6083210ab2a4..f970a2be271a 100644
--- a/Documentation/networking/napi.rst
+++ b/Documentation/networking/napi.rst
@@ -362,7 +362,7 @@ It is expected that ``irq-suspend-timeout`` will be set to a value much larger
than ``gro_flush_timeout`` as ``irq-suspend-timeout`` should suspend IRQs for
the duration of one userland processing cycle.
-While it is not stricly necessary to use ``napi_defer_hard_irqs`` and
+While it is not strictly necessary to use ``napi_defer_hard_irqs`` and
``gro_flush_timeout`` to use IRQ suspension, their use is strongly
recommended.
diff --git a/Documentation/power/video.rst b/Documentation/power/video.rst
index 337a2ba9f32f..8ab2458d1304 100644
--- a/Documentation/power/video.rst
+++ b/Documentation/power/video.rst
@@ -190,7 +190,7 @@ Toshiba Portege 3020CT s3_mode (3)
Toshiba Satellite 4030CDT s3_mode (3) (S1 also works OK)
Toshiba Satellite 4080XCDT s3_mode (3) (S1 also works OK)
Toshiba Satellite 4090XCDT ??? [#f1]_
-Toshiba Satellite P10-554 s3_bios,s3_mode (4)[#f3]_
+Toshiba Satellite P10-554 s3_bios,s3_mode (4) [#f3]_
Toshiba M30 (2) xor X with nvidia driver using internal AGP
Uniwill 244IIO ??? [#f1]_
=============================== ===============================================
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index 82b5e378eebf..a0beca805362 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -59,7 +59,6 @@ iptables 1.4.2 iptables -V
openssl & libcrypto 1.0.0 openssl version
bc 1.06.95 bc --version
Sphinx\ [#f1]_ 2.4.4 sphinx-build --version
-cpio any cpio --version
GNU tar 1.28 tar --version
gtags (optional) 6.6.5 gtags --version
mkimage (optional) 2017.01 mkimage --version
@@ -536,11 +535,6 @@ mcelog
- <https://www.mcelog.org/>
-cpio
-----
-
-- <https://www.gnu.org/software/cpio/>
-
Networking
**********
diff --git a/Documentation/scheduler/sched-ext.rst b/Documentation/scheduler/sched-ext.rst
index 6d83df3dd74e..c4672d7df2f7 100644
--- a/Documentation/scheduler/sched-ext.rst
+++ b/Documentation/scheduler/sched-ext.rst
@@ -242,9 +242,9 @@ The following briefly shows how a waking task is scheduled and executed.
task was inserted directly from ``ops.select_cpu()``). ``ops.enqueue()``
can make one of the following decisions:
- * Immediately insert the task into either the global or local DSQ by
- calling ``scx_bpf_dsq_insert()`` with ``SCX_DSQ_GLOBAL`` or
- ``SCX_DSQ_LOCAL``, respectively.
+ * Immediately insert the task into either the global or a local DSQ by
+ calling ``scx_bpf_dsq_insert()`` with one of the following options:
+ ``SCX_DSQ_GLOBAL``, ``SCX_DSQ_LOCAL``, or ``SCX_DSQ_LOCAL_ON | cpu``.
* Immediately insert the task into a custom DSQ by calling
``scx_bpf_dsq_insert()`` with a DSQ ID which is smaller than 2^63.
diff --git a/Documentation/scsi/scsi_eh.rst b/Documentation/scsi/scsi_eh.rst
index 104d09e9af09..36cff176c5e6 100644
--- a/Documentation/scsi/scsi_eh.rst
+++ b/Documentation/scsi/scsi_eh.rst
@@ -54,13 +54,13 @@ invoking hostt->queuecommand() or the block layer will time it out.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
For all non-EH commands, scsi_done() is the completion callback. It
-just calls blk_complete_request() to delete the block layer timer and
-raise SCSI_SOFTIRQ
+just calls blk_mq_complete_request() to delete the block layer timer and
+raise BLOCK_SOFTIRQ.
-SCSI_SOFTIRQ handler scsi_softirq calls scsi_decide_disposition() to
-determine what to do with the command. scsi_decide_disposition()
-looks at the scmd->result value and sense data to determine what to do
-with the command.
+The BLOCK_SOFTIRQ indirectly calls scsi_complete(), which calls
+scsi_decide_disposition() to determine what to do with the command.
+scsi_decide_disposition() looks at the scmd->result value and sense
+data to determine what to do with the command.
- SUCCESS
@@ -110,7 +110,7 @@ The timeout handler is scsi_timeout(). When a timeout occurs, this function
retry which failed), when retries are exceeded, or when the EH deadline is
expired. In these cases Step #3 is taken.
- 3. scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD) is invoked for the
+ 3. scsi_eh_scmd_add(scmd) is invoked for the
command. See [1-4] for more information.
1.3 Asynchronous command aborts
@@ -277,7 +277,6 @@ scmd->allowed.
:ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
- - scsi_setup_cmd_retry()
- move from local eh_work_q to local eh_done_q
:LOCKING: none
@@ -317,7 +316,7 @@ scmd->allowed.
``scsi_eh_get_sense``
This action is taken for each error-completed
- (!SCSI_EH_CANCEL_CMD) commands without valid sense data. Most
+ command without valid sense data. Most
SCSI transports/LLDDs automatically acquire sense data on
command failures (autosense). Autosense is recommended for
performance reasons and as sense information could get out of
@@ -347,30 +346,6 @@ scmd->allowed.
- otherwise
No action.
- 3. If !list_empty(&eh_work_q), invoke scsi_eh_abort_cmds().
-
- ``scsi_eh_abort_cmds``
-
- This action is taken for each timed out command when
- no_async_abort is enabled in the host template.
- hostt->eh_abort_handler() is invoked for each scmd. The
- handler returns SUCCESS if it has succeeded to make LLDD and
- all related hardware forget about the scmd.
-
- If a timedout scmd is successfully aborted and the sdev is
- either offline or ready, scsi_eh_finish_cmd() is invoked for
- the scmd. Otherwise, the scmd is left in eh_work_q for
- higher-severity actions.
-
- Note that both offline and ready status mean that the sdev is
- ready to process new scmds, where processing also implies
- immediate failing; thus, if a sdev is in one of the two
- states, no further recovery action is needed.
-
- Device readiness is tested using scsi_eh_tur() which issues
- TEST_UNIT_READY command. Note that the scmd must have been
- aborted successfully before reusing it for TEST_UNIT_READY.
-
4. If !list_empty(&eh_work_q), invoke scsi_eh_ready_devs()
``scsi_eh_ready_devs``
@@ -384,7 +359,7 @@ scmd->allowed.
For each sdev which has failed scmds with valid sense data
of which scsi_check_sense()'s verdict is FAILED,
- START_STOP_UNIT command is issued w/ start=1. Note that
+ START STOP UNIT command is issued w/ start=1. Note that
as we explicitly choose error-completed scmds, it is known
that lower layers have forgotten about the scmd and we can
reuse it for STU.
@@ -478,9 +453,6 @@ except for #1 must be implemented by eh_strategy_handler().
- shost->host_failed is zero.
- - Each scmd is in such a state that scsi_setup_cmd_retry() on the
- scmd doesn't make any difference.
-
- shost->eh_cmd_q is cleared.
- Each scmd->eh_entry is cleared.
diff --git a/Documentation/scsi/scsi_mid_low_api.rst b/Documentation/scsi/scsi_mid_low_api.rst
index 2df29b92e196..3cd6dce98e74 100644
--- a/Documentation/scsi/scsi_mid_low_api.rst
+++ b/Documentation/scsi/scsi_mid_low_api.rst
@@ -101,7 +101,7 @@ supplied functions" below.
Those functions in group b) are listed in a section entitled "Interface
functions" below. Their function pointers are placed in the members of
"struct scsi_host_template", an instance of which is passed to
-scsi_host_alloc() [#]_. Those interface functions that the LLD does not
+scsi_host_alloc(). Those interface functions that the LLD does not
wish to supply should have NULL placed in the corresponding member of
struct scsi_host_template. Defining an instance of struct
scsi_host_template at file scope will cause NULL to be placed in function
@@ -112,12 +112,9 @@ Those usages in group c) should be handled with care, especially in a
that are shared with the mid level and other layers.
All functions defined within an LLD and all data defined at file scope
-should be static. For example the slave_alloc() function in an LLD
+should be static. For example the sdev_init() function in an LLD
called "xxx" could be defined as
-``static int xxx_slave_alloc(struct scsi_device * sdev) { /* code */ }``
-
-.. [#] the scsi_host_alloc() function is a replacement for the rather vaguely
- named scsi_register() function in most situations.
+``static int xxx_sdev_init(struct scsi_device * sdev) { /* code */ }``
Hotplug initialization model
@@ -149,21 +146,21 @@ scsi devices of which only the first 2 respond::
scsi_add_host() ---->
scsi_scan_host() -------+
|
- slave_alloc()
- slave_configure() --> scsi_change_queue_depth()
+ sdev_init()
+ sdev_configure() --> scsi_change_queue_depth()
|
- slave_alloc()
- slave_configure()
+ sdev_init()
+ sdev_configure()
|
- slave_alloc() ***
- slave_destroy() ***
+ sdev_init() ***
+ sdev_destroy() ***
*** For scsi devices that the mid level tries to scan but do not
- respond, a slave_alloc(), slave_destroy() pair is called.
+ respond, a sdev_init(), sdev_destroy() pair is called.
If the LLD wants to adjust the default queue settings, it can invoke
-scsi_change_queue_depth() in its slave_configure() routine.
+scsi_change_queue_depth() in its sdev_configure() routine.
When an HBA is being removed it could be as part of an orderly shutdown
associated with the LLD module being unloaded (e.g. with the "rmmod"
@@ -176,8 +173,8 @@ same::
===----------------------=========-----------------===------
scsi_remove_host() ---------+
|
- slave_destroy()
- slave_destroy()
+ sdev_destroy()
+ sdev_destroy()
scsi_host_put()
It may be useful for a LLD to keep track of struct Scsi_Host instances
@@ -202,8 +199,8 @@ An LLD can use this sequence to make the mid level aware of a SCSI device::
===-------------------=========--------------------===------
scsi_add_device() ------+
|
- slave_alloc()
- slave_configure() [--> scsi_change_queue_depth()]
+ sdev_init()
+ sdev_configure() [--> scsi_change_queue_depth()]
In a similar fashion, an LLD may become aware that a SCSI device has been
removed (unplugged) or the connection to it has been interrupted. Some
@@ -218,12 +215,12 @@ upper layers with this sequence::
===----------------------=========-----------------===------
scsi_remove_device() -------+
|
- slave_destroy()
+ sdev_destroy()
It may be useful for an LLD to keep track of struct scsi_device instances
-(a pointer is passed as the parameter to slave_alloc() and
-slave_configure() callbacks). Such instances are "owned" by the mid-level.
-struct scsi_device instances are freed after slave_destroy().
+(a pointer is passed as the parameter to sdev_init() and
+sdev_configure() callbacks). Such instances are "owned" by the mid-level.
+struct scsi_device instances are freed after sdev_destroy().
Reference Counting
@@ -302,14 +299,12 @@ Summary:
- scsi_host_alloc - return a new scsi_host instance whose refcount==1
- scsi_host_get - increments Scsi_Host instance's refcount
- scsi_host_put - decrements Scsi_Host instance's refcount (free if 0)
- - scsi_register - create and register a scsi host adapter instance.
- scsi_remove_device - detach and remove a SCSI device
- scsi_remove_host - detach and remove all SCSI devices owned by host
- scsi_report_bus_reset - report scsi _bus_ reset observed
- scsi_scan_host - scan SCSI bus
- scsi_track_queue_full - track successive QUEUE_FULL events
- scsi_unblock_requests - allow further commands to be queued to given host
- - scsi_unregister - [calls scsi_host_put()]
Details::
@@ -331,7 +326,7 @@ Details::
* bus scan when an HBA is added (i.e. scsi_scan_host()). So it
* should only be called if the HBA becomes aware of a new scsi
* device (lu) after scsi_scan_host() has completed. If successful
- * this call can lead to slave_alloc() and slave_configure() callbacks
+ * this call can lead to sdev_init() and sdev_configure() callbacks
* into the LLD.
*
* Defined in: drivers/scsi/scsi_scan.c
@@ -374,8 +369,8 @@ Details::
* Might block: no
*
* Notes: Can be invoked any time on a SCSI device controlled by this
- * LLD. [Specifically during and after slave_configure() and prior to
- * slave_destroy().] Can safely be invoked from interrupt code.
+ * LLD. [Specifically during and after sdev_configure() and prior to
+ * sdev_destroy().] Can safely be invoked from interrupt code.
*
* Defined in: drivers/scsi/scsi.c [see source code for more notes]
*
@@ -475,27 +470,6 @@ Details::
/**
- * scsi_register - create and register a scsi host adapter instance.
- * @sht: pointer to scsi host template
- * @privsize: extra bytes to allocate in hostdata array (which is the
- * last member of the returned Scsi_Host instance)
- *
- * Returns pointer to new Scsi_Host instance or NULL on failure
- *
- * Might block: yes
- *
- * Notes: When this call returns to the LLD, the SCSI bus scan on
- * this host has _not_ yet been done.
- * The hostdata array (by default zero length) is a per host scratch
- * area for the LLD.
- *
- * Defined in: drivers/scsi/hosts.c .
- **/
- struct Scsi_Host * scsi_register(struct scsi_host_template * sht,
- int privsize)
-
-
- /**
* scsi_remove_device - detach and remove a SCSI device
* @sdev: a pointer to a scsi device instance
*
@@ -506,7 +480,7 @@ Details::
* Notes: If an LLD becomes aware that a scsi device (lu) has
* been removed but its host is still present then it can request
* the removal of that scsi device. If successful this call will
- * lead to the slave_destroy() callback being invoked. sdev is an
+ * lead to the sdev_destroy() callback being invoked. sdev is an
* invalid pointer after this call.
*
* Defined in: drivers/scsi/scsi_sysfs.c .
@@ -524,7 +498,7 @@ Details::
*
* Notes: Should only be invoked if the "hotplug initialization
* model" is being used. It should be called _prior_ to
- * scsi_unregister().
+ * calling scsi_host_put().
*
* Defined in: drivers/scsi/hosts.c .
**/
@@ -601,43 +575,24 @@ Details::
void scsi_unblock_requests(struct Scsi_Host * shost)
- /**
- * scsi_unregister - unregister and free memory used by host instance
- * @shp: pointer to scsi host instance to unregister.
- *
- * Returns nothing
- *
- * Might block: no
- *
- * Notes: Should not be invoked if the "hotplug initialization
- * model" is being used. Called internally by exit_this_scsi_driver()
- * in the "passive initialization model". Hence a LLD has no need to
- * call this function directly.
- *
- * Defined in: drivers/scsi/hosts.c .
- **/
- void scsi_unregister(struct Scsi_Host * shp)
-
-
-
Interface Functions
===================
Interface functions are supplied (defined) by LLDs and their function
pointers are placed in an instance of struct scsi_host_template which
-is passed to scsi_host_alloc() [or scsi_register() / init_this_scsi_driver()].
+is passed to scsi_host_alloc().
Some are mandatory. Interface functions should be declared static. The
-accepted convention is that driver "xyz" will declare its slave_configure()
+accepted convention is that driver "xyz" will declare its sdev_configure()
function as::
- static int xyz_slave_configure(struct scsi_device * sdev);
+ static int xyz_sdev_configure(struct scsi_device * sdev);
and so forth for all interface functions listed below.
-A pointer to this function should be placed in the 'slave_configure' member
+A pointer to this function should be placed in the 'sdev_configure' member
of a "struct scsi_host_template" instance. A pointer to such an instance
-should be passed to the mid level's scsi_host_alloc() [or scsi_register() /
-init_this_scsi_driver()].
+should be passed to the mid level's scsi_host_alloc().
+.
The interface functions are also described in the include/scsi/scsi_host.h
file immediately above their definition point in "struct scsi_host_template".
@@ -657,9 +612,9 @@ Summary:
- ioctl - driver can respond to ioctls
- proc_info - supports /proc/scsi/{driver_name}/{host_no}
- queuecommand - queue scsi command, invoke 'done' on completion
- - slave_alloc - prior to any commands being sent to a new device
- - slave_configure - driver fine tuning for given device after attach
- - slave_destroy - given device is about to be shut down
+ - sdev_init - prior to any commands being sent to a new device
+ - sdev_configure - driver fine tuning for given device after attach
+ - sdev_destroy - given device is about to be shut down
Details::
@@ -728,11 +683,7 @@ Details::
*
* Calling context: kernel thread
*
- * Notes: If 'no_async_abort' is defined this callback
- * will be invoked from scsi_eh thread. No other commands
- * will then be queued on current host during eh.
- * Otherwise it will be called whenever scsi_timeout()
- * is called due to a command timeout.
+ * Notes: This is called only for a command that has timed out.
*
* Optionally defined in: LLD
**/
@@ -817,10 +768,6 @@ Details::
* The SCSI_IOCTL_PROBE_HOST ioctl yields the string returned by this
* function (or struct Scsi_Host::name if this function is not
* available).
- * In a similar manner, init_this_scsi_driver() outputs to the console
- * each host's "info" (or name) for the driver it is registering.
- * Also if proc_info() is not supplied, the output of this function
- * is used instead.
*
* Optionally defined in: LLD
**/
@@ -960,7 +907,7 @@ Details::
/**
- * slave_alloc - prior to any commands being sent to a new device
+ * sdev_init - prior to any commands being sent to a new device
* (i.e. just prior to scan) this call is made
* @sdp: pointer to new device (about to be scanned)
*
@@ -975,24 +922,24 @@ Details::
* prior to its initial scan. The corresponding scsi device may not
* exist but the mid level is just about to scan for it (i.e. send
* and INQUIRY command plus ...). If a device is found then
- * slave_configure() will be called while if a device is not found
- * slave_destroy() is called.
+ * sdev_configure() will be called while if a device is not found
+ * sdev_destroy() is called.
* For more details see the include/scsi/scsi_host.h file.
*
* Optionally defined in: LLD
**/
- int slave_alloc(struct scsi_device *sdp)
+ int sdev_init(struct scsi_device *sdp)
/**
- * slave_configure - driver fine tuning for given device just after it
+ * sdev_configure - driver fine tuning for given device just after it
* has been first scanned (i.e. it responded to an
* INQUIRY)
* @sdp: device that has just been attached
*
* Returns 0 if ok. Any other return is assumed to be an error and
* the device is taken offline. [offline devices will _not_ have
- * slave_destroy() called on them so clean up resources.]
+ * sdev_destroy() called on them so clean up resources.]
*
* Locks: none
*
@@ -1004,11 +951,11 @@ Details::
*
* Optionally defined in: LLD
**/
- int slave_configure(struct scsi_device *sdp)
+ int sdev_configure(struct scsi_device *sdp)
/**
- * slave_destroy - given device is about to be shut down. All
+ * sdev_destroy - given device is about to be shut down. All
* activity has ceased on this device.
* @sdp: device that is about to be shut down
*
@@ -1023,12 +970,12 @@ Details::
* by this driver for given device should be freed now. No further
* commands will be sent for this sdp instance. [However the device
* could be re-attached in the future in which case a new instance
- * of struct scsi_device would be supplied by future slave_alloc()
- * and slave_configure() calls.]
+ * of struct scsi_device would be supplied by future sdev_init()
+ * and sdev_configure() calls.]
*
* Optionally defined in: LLD
**/
- void slave_destroy(struct scsi_device *sdp)
+ void sdev_destroy(struct scsi_device *sdp)
@@ -1039,7 +986,7 @@ struct scsi_host_template
There is one "struct scsi_host_template" instance per LLD [#]_. It is
typically initialized as a file scope static in a driver's header file. That
way members that are not explicitly initialized will be set to 0 or NULL.
-Member of interest:
+Members of interest:
name
- name of driver (may contain spaces, please limit to
@@ -1055,6 +1002,13 @@ Member of interest:
- primary callback that the mid level uses to inject
SCSI commands into an LLD.
+ vendor_id
+ - a unique value that identifies the vendor supplying
+ the LLD for the Scsi_Host. Used most often in validating
+ vendor-specific message requests. Value consists of an
+ identifier type and a vendor-specific value.
+ See scsi_netlink.h for a description of valid formats.
+
The structure is defined and commented in include/scsi/scsi_host.h
.. [#] In extreme situations a single driver may have several instances
@@ -1095,9 +1049,6 @@ of interest:
- maximum number of commands that can be queued on devices
controlled by the host. Overridden by LLD calls to
scsi_change_queue_depth().
- no_async_abort
- - 1=>Asynchronous aborts are not supported
- - 0=>Timed-out commands will be aborted asynchronously
hostt
- pointer to driver's struct scsi_host_template from which
this struct Scsi_Host instance was spawned
@@ -1106,22 +1057,10 @@ of interest:
transportt
- pointer to driver's struct scsi_transport_template instance
(if any). FC and SPI transports currently supported.
- sh_list
- - a double linked list of pointers to all struct Scsi_Host
- instances (currently ordered by ascending host_no)
- my_devices
- - a double linked list of pointers to struct scsi_device
- instances that belong to this host.
hostdata[0]
- area reserved for LLD at end of struct Scsi_Host. Size
- is set by the second argument (named 'xtr_bytes') to
- scsi_host_alloc() or scsi_register().
- vendor_id
- - a unique value that identifies the vendor supplying
- the LLD for the Scsi_Host. Used most often in validating
- vendor-specific message requests. Value consists of an
- identifier type and a vendor-specific value.
- See scsi_netlink.h for a description of valid formats.
+ is set by the second argument (named 'privsize') to
+ scsi_host_alloc().
The scsi_host structure is defined in include/scsi/scsi_host.h
@@ -1143,30 +1082,11 @@ Members of interest:
cmnd
- array containing SCSI command
- cmnd_len
+ cmd_len
- length (in bytes) of SCSI command
sc_data_direction
- direction of data transfer in data phase. See
"enum dma_data_direction" in include/linux/dma-mapping.h
- request_bufflen
- - number of data bytes to transfer (0 if no data phase)
- use_sg
- - ==0 -> no scatter gather list, hence transfer data
- to/from request_buffer
- - >0 -> scatter gather list (actually an array) in
- request_buffer with use_sg elements
- request_buffer
- - either contains data buffer or scatter gather list
- depending on the setting of use_sg. Scatter gather
- elements are defined by 'struct scatterlist' found
- in include/linux/scatterlist.h .
- done
- - function pointer that should be invoked by LLD when the
- SCSI command is completed (successfully or otherwise).
- Should only be called by an LLD if the LLD has accepted
- the command (i.e. queuecommand() returned or will return
- 0). The LLD may invoke 'done' prior to queuecommand()
- finishing.
result
- should be set by LLD prior to calling 'done'. A value
of 0 implies a successfully completed command (and all
@@ -1189,13 +1109,13 @@ Members of interest:
device
- pointer to scsi_device object that this command is
associated with.
- resid
+ resid_len (access by calling scsi_set_resid() / scsi_get_resid())
- an LLD should set this unsigned integer to the requested
transfer length (i.e. 'request_bufflen') less the number
- of bytes that are actually transferred. 'resid' is
+ of bytes that are actually transferred. 'resid_len' is
preset to 0 so an LLD can ignore it if it cannot detect
underruns (overruns should not be reported). An LLD
- should set 'resid' prior to invoking 'done'. The most
+ should set 'resid_len' prior to invoking 'done'. The most
interesting case is data transfers from a SCSI target
device (e.g. READs) that underrun.
underflow
@@ -1204,10 +1124,10 @@ Members of interest:
figure. Not many LLDs implement this check and some that
do just output an error message to the log rather than
report a DID_ERROR. Better for an LLD to implement
- 'resid'.
+ 'resid_len'.
-It is recommended that a LLD set 'resid' on data transfers from a SCSI
-target device (e.g. READs). It is especially important that 'resid' is set
+It is recommended that a LLD set 'resid_len' on data transfers from a SCSI
+target device (e.g. READs). It is especially important that 'resid_len' is set
when such data transfers have sense keys of MEDIUM ERROR and HARDWARE ERROR
(and possibly RECOVERED ERROR). In these cases if a LLD is in doubt how much
data has been received then the safest approach is to indicate no bytes have
@@ -1217,7 +1137,7 @@ a LLD might use these helpers::
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
where 'SCpnt' is a pointer to a scsi_cmnd object. To indicate only three 512
-bytes blocks has been received 'resid' could be set like this::
+bytes blocks have been received 'resid_len' could be set like this::
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt) - (3 * 512));
diff --git a/Documentation/sound/designs/midi-2.0.rst b/Documentation/sound/designs/midi-2.0.rst
index 086487ca7ab1..71a343c93fe7 100644
--- a/Documentation/sound/designs/midi-2.0.rst
+++ b/Documentation/sound/designs/midi-2.0.rst
@@ -293,6 +293,17 @@ Rawmidi API Extensions
status 0x05). When UMP core receives such a message, it updates the
UMP EP info and the corresponding sequencer clients as well.
+* The legacy rawmidi device number is found in the new `tied_device`
+ field of the rawmidi info.
+ On the other hand, the UMP rawmidi device number is found in
+ `tied_device` field of the legacy rawmidi info, too.
+
+* Each substream of the legacy rawmidi may be enabled / disabled
+ dynamically depending on the UMP FB state.
+ When the selected substream is inactive, it's indicated by the bit
+ 0x10 (`SNDRV_RAWMIDI_INFO_STREAM_INACTIVE`) in the `flags` field of
+ the legacy rawmidi info.
+
Control API Extensions
======================
@@ -377,6 +388,13 @@ Sequencer API Extensions
announcement to the ALSA sequencer system port, similarly like the
normal port change notification.
+* There are two extended event types for notifying the UMP Endpoint and
+ Function Block changes via the system announcement port:
+ type 68 (`SNDRV_SEQ_EVENT_UMP_EP_CHANGE`) and type 69
+ (`SNDRV_SEQ_EVENT_UMP_BLOCK_CHANGE`). They take the new type,
+ `snd_seq_ev_ump_notify` in the payload, indicating the client number
+ and the FB number that are changed.
+
MIDI2 USB Gadget Function Driver
================================
diff --git a/Documentation/sunrpc/xdr/nfs4_1.x b/Documentation/sunrpc/xdr/nfs4_1.x
new file mode 100644
index 000000000000..ca95150a3a29
--- /dev/null
+++ b/Documentation/sunrpc/xdr/nfs4_1.x
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2010 IETF Trust and the persons identified
+ * as the document authors. All rights reserved.
+ *
+ * The document authors are identified in RFC 3530 and
+ * RFC 5661.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer in the documentation and/or other
+ * materials provided with the distribution.
+ *
+ * - Neither the name of Internet Society, IETF or IETF
+ * Trust, nor the names of specific contributors, may be
+ * used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS
+ * AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+pragma header nfs4;
+
+/*
+ * Basic typedefs for RFC 1832 data type definitions
+ */
+typedef hyper int64_t;
+typedef unsigned int uint32_t;
+
+/*
+ * Basic data types
+ */
+typedef uint32_t bitmap4<>;
+
+/*
+ * Timeval
+ */
+struct nfstime4 {
+ int64_t seconds;
+ uint32_t nseconds;
+};
+
+
+/*
+ * The following content was extracted from draft-ietf-nfsv4-delstid
+ */
+
+typedef bool fattr4_offline;
+
+
+const FATTR4_OFFLINE = 83;
+
+
+struct open_arguments4 {
+ bitmap4 oa_share_access;
+ bitmap4 oa_share_deny;
+ bitmap4 oa_share_access_want;
+ bitmap4 oa_open_claim;
+ bitmap4 oa_create_mode;
+};
+
+
+enum open_args_share_access4 {
+ OPEN_ARGS_SHARE_ACCESS_READ = 1,
+ OPEN_ARGS_SHARE_ACCESS_WRITE = 2,
+ OPEN_ARGS_SHARE_ACCESS_BOTH = 3
+};
+
+
+enum open_args_share_deny4 {
+ OPEN_ARGS_SHARE_DENY_NONE = 0,
+ OPEN_ARGS_SHARE_DENY_READ = 1,
+ OPEN_ARGS_SHARE_DENY_WRITE = 2,
+ OPEN_ARGS_SHARE_DENY_BOTH = 3
+};
+
+
+enum open_args_share_access_want4 {
+ OPEN_ARGS_SHARE_ACCESS_WANT_ANY_DELEG = 3,
+ OPEN_ARGS_SHARE_ACCESS_WANT_NO_DELEG = 4,
+ OPEN_ARGS_SHARE_ACCESS_WANT_CANCEL = 5,
+ OPEN_ARGS_SHARE_ACCESS_WANT_SIGNAL_DELEG_WHEN_RESRC_AVAIL
+ = 17,
+ OPEN_ARGS_SHARE_ACCESS_WANT_PUSH_DELEG_WHEN_UNCONTENDED
+ = 18,
+ OPEN_ARGS_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS = 20,
+ OPEN_ARGS_SHARE_ACCESS_WANT_OPEN_XOR_DELEGATION = 21
+};
+
+
+enum open_args_open_claim4 {
+ OPEN_ARGS_OPEN_CLAIM_NULL = 0,
+ OPEN_ARGS_OPEN_CLAIM_PREVIOUS = 1,
+ OPEN_ARGS_OPEN_CLAIM_DELEGATE_CUR = 2,
+ OPEN_ARGS_OPEN_CLAIM_DELEGATE_PREV = 3,
+ OPEN_ARGS_OPEN_CLAIM_FH = 4,
+ OPEN_ARGS_OPEN_CLAIM_DELEG_CUR_FH = 5,
+ OPEN_ARGS_OPEN_CLAIM_DELEG_PREV_FH = 6
+};
+
+
+enum open_args_createmode4 {
+ OPEN_ARGS_CREATEMODE_UNCHECKED4 = 0,
+ OPEN_ARGS_CREATE_MODE_GUARDED = 1,
+ OPEN_ARGS_CREATEMODE_EXCLUSIVE4 = 2,
+ OPEN_ARGS_CREATE_MODE_EXCLUSIVE4_1 = 3
+};
+
+
+typedef open_arguments4 fattr4_open_arguments;
+pragma public fattr4_open_arguments;
+
+
+%/*
+% * Determine what OPEN supports.
+% */
+const FATTR4_OPEN_ARGUMENTS = 86;
+
+
+
+
+const OPEN4_RESULT_NO_OPEN_STATEID = 0x00000010;
+
+
+/*
+ * attributes for the delegation times being
+ * cached and served by the "client"
+ */
+typedef nfstime4 fattr4_time_deleg_access;
+typedef nfstime4 fattr4_time_deleg_modify;
+pragma public fattr4_time_deleg_access;
+pragma public fattr4_time_deleg_modify;
+
+
+%/*
+% * New RECOMMENDED Attribute for
+% * delegation caching of times
+% */
+const FATTR4_TIME_DELEG_ACCESS = 84;
+const FATTR4_TIME_DELEG_MODIFY = 85;
+
+
+
+/* new flags for share_access field of OPEN4args */
+const OPEN4_SHARE_ACCESS_WANT_DELEG_MASK = 0xFF00;
+const OPEN4_SHARE_ACCESS_WANT_NO_PREFERENCE = 0x0000;
+const OPEN4_SHARE_ACCESS_WANT_READ_DELEG = 0x0100;
+const OPEN4_SHARE_ACCESS_WANT_WRITE_DELEG = 0x0200;
+const OPEN4_SHARE_ACCESS_WANT_ANY_DELEG = 0x0300;
+const OPEN4_SHARE_ACCESS_WANT_NO_DELEG = 0x0400;
+const OPEN4_SHARE_ACCESS_WANT_CANCEL = 0x0500;
+
+const OPEN4_SHARE_ACCESS_WANT_SIGNAL_DELEG_WHEN_RESRC_AVAIL = 0x10000;
+const OPEN4_SHARE_ACCESS_WANT_PUSH_DELEG_WHEN_UNCONTENDED = 0x20000;
+const OPEN4_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS = 0x100000;
+const OPEN4_SHARE_ACCESS_WANT_OPEN_XOR_DELEGATION = 0x200000;
+
+enum open_delegation_type4 {
+ OPEN_DELEGATE_NONE = 0,
+ OPEN_DELEGATE_READ = 1,
+ OPEN_DELEGATE_WRITE = 2,
+ OPEN_DELEGATE_NONE_EXT = 3, /* new to v4.1 */
+ OPEN_DELEGATE_READ_ATTRS_DELEG = 4,
+ OPEN_DELEGATE_WRITE_ATTRS_DELEG = 5
+};
diff --git a/Documentation/trace/events.rst b/Documentation/trace/events.rst
index 759907c20e75..2d88a2acacc0 100644
--- a/Documentation/trace/events.rst
+++ b/Documentation/trace/events.rst
@@ -55,6 +55,30 @@ command::
# echo 'irq:*' > /sys/kernel/tracing/set_event
+The set_event file may also be used to enable events associated to only
+a specific module::
+
+ # echo ':mod:<module>' > /sys/kernel/tracing/set_event
+
+Will enable all events in the module ``<module>``. If the module is not yet
+loaded, the string will be saved and when a module is that matches ``<module>``
+is loaded, then it will apply the enabling of events then.
+
+The text before ``:mod:`` will be parsed to specify specific events that the
+module creates::
+
+ # echo '<match>:mod:<module>' > /sys/kernel/tracing/set_event
+
+The above will enable any system or event that ``<match>`` matches. If
+``<match>`` is ``"*"`` then it will match all events.
+
+To enable only a specific event within a system::
+
+ # echo '<system>:<event>:mod:<module>' > /sys/kernel/tracing/set_event
+
+If ``<event>`` is ``"*"`` then it will match all events within the system
+for a given module.
+
2.2 Via the 'enable' toggle
---------------------------
diff --git a/Documentation/translations/sp_SP/index.rst b/Documentation/translations/sp_SP/index.rst
index aae7018b0d1a..2b50283e1608 100644
--- a/Documentation/translations/sp_SP/index.rst
+++ b/Documentation/translations/sp_SP/index.rst
@@ -7,7 +7,7 @@ Traducción al español
\kerneldocCJKoff
-:maintainer: Carlos Bilbao <carlos.bilbao.osdev@gmail.com>
+:maintainer: Carlos Bilbao <carlos.bilbao@kernel.org>
.. _sp_disclaimer:
diff --git a/Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst b/Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst
index 50f6f0b6bf11..9d7cb51be493 100644
--- a/Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst
+++ b/Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst
@@ -26,12 +26,7 @@ DAMON 为不同的用户提供了下面这些接口。
使用它,用户可以通过读取和写入特殊的sysfs文件来使用DAMON的主要功能。因此,你可以编写和使
用你个性化的DAMON sysfs包装程序,代替你读/写sysfs文件。 `DAMON用户空间工具
<https://github.com/damonitor/damo>`_ 就是这种程序的一个例子 它同时支持虚拟和物理地址
- 空间的监测。注意,这个界面只提供简单的监测结果 :ref:`统计 <damos_stats>`。对于详细的监测
- 结果,DAMON提供了一个:ref:`跟踪点 <tracepoint>`。
-- *debugfs interface.*
- :ref:`这 <debugfs_interface>` 几乎与:ref:`sysfs interface <sysfs_interface>` 接
- 口相同。这将在下一个LTS内核发布后被移除,所以用户应该转移到
- :ref:`sysfs interface <sysfs_interface>`。
+ 空间的监测。
- *内核空间编程接口。*
:doc:`这 </mm/damon/api>` 这是为内核空间程序员准备的。使用它,用户可以通过为你编写内
核空间的DAMON应用程序,最灵活有效地利用DAMON的每一个功能。你甚至可以为各种地址空间扩展DAMON。
@@ -335,247 +330,6 @@ tried_regions/<N>/
请注意,我们强烈建议使用用户空间的工具,如 `damo <https://github.com/damonitor/damo>`_ ,
而不是像上面那样手动读写文件。以上只是一个例子。
-debugfs接口
-===========
-
-.. note::
-
- DAMON debugfs接口将在下一个LTS内核发布后被移除,所以用户应该转移到
- :ref:`sysfs接口<sysfs_interface>`。
-
-DAMON导出了八个文件, ``attrs``, ``target_ids``, ``init_regions``,
-``schemes``, ``monitor_on_DEPRECATED``, ``kdamond_pid``, ``mk_contexts`` 和
-``rm_contexts`` under its debugfs directory, ``<debugfs>/damon/``.
-
-
-属性
-----
-
-用户可以通过读取和写入 ``attrs`` 文件获得和设置 ``采样间隔`` 、 ``聚集间隔`` 、 ``更新间隔``
-以及监测目标区域的最小/最大数量。要详细了解监测属性,请参考 `:doc:/mm/damon/design` 。例如,
-下面的命令将这些值设置为5ms、100ms、1000ms、10和1000,然后再次检查::
-
- # cd <debugfs>/damon
- # echo 5000 100000 1000000 10 1000 > attrs
- # cat attrs
- 5000 100000 1000000 10 1000
-
-
-目标ID
-------
-
-一些类型的地址空间支持多个监测目标。例如,虚拟内存地址空间的监测可以有多个进程作为监测目标。用户
-可以通过写入目标的相关id值来设置目标,并通过读取 ``target_ids`` 文件来获得当前目标的id。在监
-测虚拟地址空间的情况下,这些值应该是监测目标进程的pid。例如,下面的命令将pid为42和4242的进程设
-为监测目标,并再次检查::
-
- # cd <debugfs>/damon
- # echo 42 4242 > target_ids
- # cat target_ids
- 42 4242
-
-用户还可以通过在文件中写入一个特殊的关键字 "paddr\n" 来监测系统的物理内存地址空间。因为物理地
-址空间监测不支持多个目标,读取文件会显示一个假值,即 ``42`` ,如下图所示::
-
- # cd <debugfs>/damon
- # echo paddr > target_ids
- # cat target_ids
- 42
-
-请注意,设置目标ID并不启动监测。
-
-
-初始监测目标区域
-----------------
-
-在虚拟地址空间监测的情况下,DAMON自动设置和更新监测的目标区域,这样就可以覆盖目标进程的整个
-内存映射。然而,用户可能希望将监测区域限制在特定的地址范围内,如堆、栈或特定的文件映射区域。
-或者,一些用户可以知道他们工作负载的初始访问模式,因此希望为“自适应区域调整”设置最佳初始区域。
-
-相比之下,DAMON在物理内存监测的情况下不会自动设置和更新监测目标区域。因此,用户应该自己设置
-监测目标区域。
-
-在这种情况下,用户可以通过在 ``init_regions`` 文件中写入适当的值,明确地设置他们想要的初
-始监测目标区域。输入应该是一个由三个整数组成的队列,用空格隔开,代表一个区域的形式如下::
-
- <target idx> <start address> <end address>
-
-目标idx应该是 ``target_ids`` 文件中目标的索引,从 ``0`` 开始,区域应该按照地址顺序传递。
-例如,下面的命令将设置几个地址范围, ``1-100`` 和 ``100-200`` 作为pid 42的初始监测目标
-区域,这是 ``target_ids`` 中的第一个(索引 ``0`` ),另外几个地址范围, ``20-40`` 和
-``50-100`` 作为pid 4242的地址,这是 ``target_ids`` 中的第二个(索引 ``1`` )::
-
- # cd <debugfs>/damon
- # cat target_ids
- 42 4242
- # echo "0 1 100 \
- 0 100 200 \
- 1 20 40 \
- 1 50 100" > init_regions
-
-请注意,这只是设置了初始的监测目标区域。在虚拟内存监测的情况下,DAMON会在一个 ``更新间隔``
-后自动更新区域的边界。因此,在这种情况下,如果用户不希望更新的话,应该把 ``更新间隔`` 设
-置得足够大。
-
-
-方案
-----
-
-对于通常的基于DAMON的数据访问感知的内存管理优化,用户只是希望系统对特定访问模式的内存区域应用内
-存管理操作。DAMON从用户那里接收这种形式化的操作方案,并将这些方案应用到目标进程中。
-
-用户可以通过读取和写入 ``scheme`` debugfs文件来获得和设置这些方案。读取该文件还可以显示每个
-方案的统计数据。在文件中,每一个方案都应该在每一行中以下列形式表示出来::
-
- <target access pattern> <action> <quota> <watermarks>
-
-你可以通过简单地在文件中写入一个空字符串来禁用方案。
-
-目标访问模式
-~~~~~~~~~~~~
-
-``<目标访问模式>`` 是由三个范围构成的,形式如下::
-
- min-size max-size min-acc max-acc min-age max-age
-
-具体来说,区域大小的字节数( `min-size` 和 `max-size` ),访问频率的每聚合区间的监测访问次
-数( `min-acc` 和 `max-acc` ),区域年龄的聚合区间数( `min-age` 和 `max-age` )都被指定。
-请注意,这些范围是封闭区间。
-
-动作
-~~~~
-
-``<action>`` 是一个预定义的内存管理动作的整数,DAMON将应用于具有目标访问模式的区域。支持
-的数字和它们的含义如下::
-
- - 0: Call ``madvise()`` for the region with ``MADV_WILLNEED``
- - 1: Call ``madvise()`` for the region with ``MADV_COLD``
- - 2: Call ``madvise()`` for the region with ``MADV_PAGEOUT``
- - 3: Call ``madvise()`` for the region with ``MADV_HUGEPAGE``
- - 4: Call ``madvise()`` for the region with ``MADV_NOHUGEPAGE``
- - 5: Do nothing but count the statistics
-
-配额
-~~~~
-
-每个 ``动作`` 的最佳 ``目标访问模式`` 取决于工作负载,所以不容易找到。更糟糕的是,将某个
-动作的方案设置得过于激进会导致严重的开销。为了避免这种开销,用户可以通过下面表格中的 ``<quota>``
-来限制方案的时间和大小配额::
-
- <ms> <sz> <reset interval> <priority weights>
-
-这使得DAMON在 ``<reset interval>`` 毫秒内,尽量只用 ``<ms>`` 毫秒的时间对 ``目标访
-问模式`` 的内存区域应用动作,并在 ``<reset interval>`` 内只对最多<sz>字节的内存区域应
-用动作。将 ``<ms>`` 和 ``<sz>`` 都设置为零,可以禁用配额限制。
-
-当预计超过配额限制时,DAMON会根据 ``目标访问模式`` 的大小、访问频率和年龄,对发现的内存
-区域进行优先排序。为了实现个性化的优先级,用户可以在 ``<优先级权重>`` 中设置这三个属性的
-权重,具体形式如下::
-
- <size weight> <access frequency weight> <age weight>
-
-水位
-~~~~
-
-有些方案需要根据系统特定指标的当前值来运行,如自由内存比率。对于这种情况,用户可以为该条
-件指定水位。::
-
- <metric> <check interval> <high mark> <middle mark> <low mark>
-
-``<metric>`` 是一个预定义的整数,用于要检查的度量。支持的数字和它们的含义如下。
-
- - 0: 忽视水位
- - 1: 系统空闲内存率 (千分比)
-
-每隔 ``<检查间隔>`` 微秒检查一次公制的值。
-
-如果该值高于 ``<高标>`` 或低于 ``<低标>`` ,该方案被停用。如果该值低于 ``<中标>`` ,
-该方案将被激活。
-
-统计数据
-~~~~~~~~
-
-它还统计每个方案被尝试应用的区域的总数量和字节数,每个方案被成功应用的区域的两个数量,以
-及超过配额限制的总数量。这些统计数据可用于在线分析或调整方案。
-
-统计数据可以通过读取方案文件来显示。读取该文件将显示你在每一行中输入的每个 ``方案`` ,
-统计的五个数字将被加在每一行的末尾。
-
-例子
-~~~~
-
-下面的命令应用了一个方案:”如果一个大小为[4KiB, 8KiB]的内存区域在[10, 20]的聚合时间
-间隔内显示出每一个聚合时间间隔[0, 5]的访问量,请分页出该区域。对于分页,每秒最多只能使
-用10ms,而且每秒分页不能超过1GiB。在这一限制下,首先分页出具有较长年龄的内存区域。另外,
-每5秒钟检查一次系统的可用内存率,当可用内存率低于50%时开始监测和分页,但如果可用内存率
-大于60%,或低于30%,则停止监测“::
-
- # cd <debugfs>/damon
- # scheme="4096 8192 0 5 10 20 2" # target access pattern and action
- # scheme+=" 10 $((1024*1024*1024)) 1000" # quotas
- # scheme+=" 0 0 100" # prioritization weights
- # scheme+=" 1 5000000 600 500 300" # watermarks
- # echo "$scheme" > schemes
-
-
-开关
-----
-
-除非你明确地启动监测,否则如上所述的文件设置不会产生效果。你可以通过写入和读取 ``monitor_on_DEPRECATED``
-文件来启动、停止和检查监测的当前状态。写入 ``on`` 该文件可以启动对有属性的目标的监测。写入
-``off`` 该文件则停止这些目标。如果每个目标进程被终止,DAMON也会停止。下面的示例命令开启、关
-闭和检查DAMON的状态::
-
- # cd <debugfs>/damon
- # echo on > monitor_on_DEPRECATED
- # echo off > monitor_on_DEPRECATED
- # cat monitor_on_DEPRECATED
- off
-
-请注意,当监测开启时,你不能写到上述的debugfs文件。如果你在DAMON运行时写到这些文件,将会返
-回一个错误代码,如 ``-EBUSY`` 。
-
-
-监测线程PID
------------
-
-DAMON通过一个叫做kdamond的内核线程来进行请求监测。你可以通过读取 ``kdamond_pid`` 文件获
-得该线程的 ``pid`` 。当监测被 ``关闭`` 时,读取该文件不会返回任何信息::
-
- # cd <debugfs>/damon
- # cat monitor_on_DEPRECATED
- off
- # cat kdamond_pid
- none
- # echo on > monitor_on_DEPRECATED
- # cat kdamond_pid
- 18594
-
-
-使用多个监测线程
-----------------
-
-每个监测上下文都会创建一个 ``kdamond`` 线程。你可以使用 ``mk_contexts`` 和 ``rm_contexts``
-文件为多个 ``kdamond`` 需要的用例创建和删除监测上下文。
-
-将新上下文的名称写入 ``mk_contexts`` 文件,在 ``DAMON debugfs`` 目录上创建一个该名称的目录。
-该目录将有该上下文的 ``DAMON debugfs`` 文件::
-
- # cd <debugfs>/damon
- # ls foo
- # ls: cannot access 'foo': No such file or directory
- # echo foo > mk_contexts
- # ls foo
- # attrs init_regions kdamond_pid schemes target_ids
-
-如果不再需要上下文,你可以通过把上下文的名字放到 ``rm_contexts`` 文件中来删除它和相应的目录::
-
- # echo foo > rm_contexts
- # ls foo
- # ls: cannot access 'foo': No such file or directory
-
-注意, ``mk_contexts`` 、 ``rm_contexts`` 和 ``monitor_on_DEPRECATED`` 文件只在根目录下。
-
监测结果的监测点
================
diff --git a/Documentation/translations/zh_CN/devicetree/of_unittest.rst b/Documentation/translations/zh_CN/devicetree/of_unittest.rst
index 11eb08ca8866..5c1a8e0cfd16 100644
--- a/Documentation/translations/zh_CN/devicetree/of_unittest.rst
+++ b/Documentation/translations/zh_CN/devicetree/of_unittest.rst
@@ -40,7 +40,7 @@ OF Selftest被设计用来测试提供给设备驱动开发者的接口(includ
drivers/of/unittest-data/tests-phandle.dtsi
drivers/of/unittest-data/tests-match.dtsi
-当内核在启用OF_SELFTEST的情况下被构建时,那么下面的make规则::
+当内核在启用CONFIG_OF_UNITTEST的情况下被构建时,那么下面的make规则::
$(obj)/%.dtb: $(src)/%.dts FORCE
$(call if_changed_dep, dtc)
diff --git a/Documentation/translations/zh_TW/admin-guide/mm/damon/usage.rst b/Documentation/translations/zh_TW/admin-guide/mm/damon/usage.rst
index fbbbbad59ee4..d3fd4f850793 100644
--- a/Documentation/translations/zh_TW/admin-guide/mm/damon/usage.rst
+++ b/Documentation/translations/zh_TW/admin-guide/mm/damon/usage.rst
@@ -26,12 +26,7 @@ DAMON 爲不同的用戶提供了下面這些接口。
使用它,用戶可以通過讀取和寫入特殊的sysfs文件來使用DAMON的主要功能。因此,你可以編寫和使
用你個性化的DAMON sysfs包裝程序,代替你讀/寫sysfs文件。 `DAMON用戶空間工具
<https://github.com/damonitor/damo>`_ 就是這種程序的一個例子 它同時支持虛擬和物理地址
- 空間的監測。注意,這個界面只提供簡單的監測結果 :ref:`統計 <damos_stats>`。對於詳細的監測
- 結果,DAMON提供了一個:ref:`跟蹤點 <tracepoint>`。
-- *debugfs interface.*
- :ref:`這 <debugfs_interface>` 幾乎與:ref:`sysfs interface <sysfs_interface>` 接
- 口相同。這將在下一個LTS內核發佈後被移除,所以用戶應該轉移到
- :ref:`sysfs interface <sysfs_interface>`。
+ 空間的監測。
- *內核空間編程接口。*
:doc:`這 </mm/damon/api>` 這是爲內核空間程序員準備的。使用它,用戶可以通過爲你編寫內
核空間的DAMON應用程序,最靈活有效地利用DAMON的每一個功能。你甚至可以爲各種地址空間擴展DAMON。
@@ -335,247 +330,6 @@ tried_regions/<N>/
請注意,我們強烈建議使用用戶空間的工具,如 `damo <https://github.com/damonitor/damo>`_ ,
而不是像上面那樣手動讀寫文件。以上只是一個例子。
-debugfs接口
-===========
-
-.. note::
-
- DAMON debugfs接口將在下一個LTS內核發佈後被移除,所以用戶應該轉移到
- :ref:`sysfs接口<sysfs_interface>`。
-
-DAMON導出了八個文件, ``attrs``, ``target_ids``, ``init_regions``,
-``schemes``, ``monitor_on_DEPRECATED``, ``kdamond_pid``, ``mk_contexts`` 和
-``rm_contexts`` under its debugfs directory, ``<debugfs>/damon/``.
-
-
-屬性
-----
-
-用戶可以通過讀取和寫入 ``attrs`` 文件獲得和設置 ``採樣間隔`` 、 ``聚集間隔`` 、 ``更新間隔``
-以及監測目標區域的最小/最大數量。要詳細瞭解監測屬性,請參考 `:doc:/mm/damon/design` 。例如,
-下面的命令將這些值設置爲5ms、100ms、1000ms、10和1000,然後再次檢查::
-
- # cd <debugfs>/damon
- # echo 5000 100000 1000000 10 1000 > attrs
- # cat attrs
- 5000 100000 1000000 10 1000
-
-
-目標ID
-------
-
-一些類型的地址空間支持多個監測目標。例如,虛擬內存地址空間的監測可以有多個進程作爲監測目標。用戶
-可以通過寫入目標的相關id值來設置目標,並通過讀取 ``target_ids`` 文件來獲得當前目標的id。在監
-測虛擬地址空間的情況下,這些值應該是監測目標進程的pid。例如,下面的命令將pid爲42和4242的進程設
-爲監測目標,並再次檢查::
-
- # cd <debugfs>/damon
- # echo 42 4242 > target_ids
- # cat target_ids
- 42 4242
-
-用戶還可以通過在文件中寫入一個特殊的關鍵字 "paddr\n" 來監測系統的物理內存地址空間。因爲物理地
-址空間監測不支持多個目標,讀取文件會顯示一個假值,即 ``42`` ,如下圖所示::
-
- # cd <debugfs>/damon
- # echo paddr > target_ids
- # cat target_ids
- 42
-
-請注意,設置目標ID並不啓動監測。
-
-
-初始監測目標區域
-----------------
-
-在虛擬地址空間監測的情況下,DAMON自動設置和更新監測的目標區域,這樣就可以覆蓋目標進程的整個
-內存映射。然而,用戶可能希望將監測區域限制在特定的地址範圍內,如堆、棧或特定的文件映射區域。
-或者,一些用戶可以知道他們工作負載的初始訪問模式,因此希望爲“自適應區域調整”設置最佳初始區域。
-
-相比之下,DAMON在物理內存監測的情況下不會自動設置和更新監測目標區域。因此,用戶應該自己設置
-監測目標區域。
-
-在這種情況下,用戶可以通過在 ``init_regions`` 文件中寫入適當的值,明確地設置他們想要的初
-始監測目標區域。輸入應該是一個由三個整數組成的隊列,用空格隔開,代表一個區域的形式如下::
-
- <target idx> <start address> <end address>
-
-目標idx應該是 ``target_ids`` 文件中目標的索引,從 ``0`` 開始,區域應該按照地址順序傳遞。
-例如,下面的命令將設置幾個地址範圍, ``1-100`` 和 ``100-200`` 作爲pid 42的初始監測目標
-區域,這是 ``target_ids`` 中的第一個(索引 ``0`` ),另外幾個地址範圍, ``20-40`` 和
-``50-100`` 作爲pid 4242的地址,這是 ``target_ids`` 中的第二個(索引 ``1`` )::
-
- # cd <debugfs>/damon
- # cat target_ids
- 42 4242
- # echo "0 1 100 \
- 0 100 200 \
- 1 20 40 \
- 1 50 100" > init_regions
-
-請注意,這只是設置了初始的監測目標區域。在虛擬內存監測的情況下,DAMON會在一個 ``更新間隔``
-後自動更新區域的邊界。因此,在這種情況下,如果用戶不希望更新的話,應該把 ``更新間隔`` 設
-置得足夠大。
-
-
-方案
-----
-
-對於通常的基於DAMON的數據訪問感知的內存管理優化,用戶只是希望系統對特定訪問模式的內存區域應用內
-存管理操作。DAMON從用戶那裏接收這種形式化的操作方案,並將這些方案應用到目標進程中。
-
-用戶可以通過讀取和寫入 ``scheme`` debugfs文件來獲得和設置這些方案。讀取該文件還可以顯示每個
-方案的統計數據。在文件中,每一個方案都應該在每一行中以下列形式表示出來::
-
- <target access pattern> <action> <quota> <watermarks>
-
-你可以通過簡單地在文件中寫入一個空字符串來禁用方案。
-
-目標訪問模式
-~~~~~~~~~~~~
-
-``<目標訪問模式>`` 是由三個範圍構成的,形式如下::
-
- min-size max-size min-acc max-acc min-age max-age
-
-具體來說,區域大小的字節數( `min-size` 和 `max-size` ),訪問頻率的每聚合區間的監測訪問次
-數( `min-acc` 和 `max-acc` ),區域年齡的聚合區間數( `min-age` 和 `max-age` )都被指定。
-請注意,這些範圍是封閉區間。
-
-動作
-~~~~
-
-``<action>`` 是一個預定義的內存管理動作的整數,DAMON將應用於具有目標訪問模式的區域。支持
-的數字和它們的含義如下::
-
- - 0: Call ``madvise()`` for the region with ``MADV_WILLNEED``
- - 1: Call ``madvise()`` for the region with ``MADV_COLD``
- - 2: Call ``madvise()`` for the region with ``MADV_PAGEOUT``
- - 3: Call ``madvise()`` for the region with ``MADV_HUGEPAGE``
- - 4: Call ``madvise()`` for the region with ``MADV_NOHUGEPAGE``
- - 5: Do nothing but count the statistics
-
-配額
-~~~~
-
-每個 ``動作`` 的最佳 ``目標訪問模式`` 取決於工作負載,所以不容易找到。更糟糕的是,將某個
-動作的方案設置得過於激進會導致嚴重的開銷。爲了避免這種開銷,用戶可以通過下面表格中的 ``<quota>``
-來限制方案的時間和大小配額::
-
- <ms> <sz> <reset interval> <priority weights>
-
-這使得DAMON在 ``<reset interval>`` 毫秒內,儘量只用 ``<ms>`` 毫秒的時間對 ``目標訪
-問模式`` 的內存區域應用動作,並在 ``<reset interval>`` 內只對最多<sz>字節的內存區域應
-用動作。將 ``<ms>`` 和 ``<sz>`` 都設置爲零,可以禁用配額限制。
-
-當預計超過配額限制時,DAMON會根據 ``目標訪問模式`` 的大小、訪問頻率和年齡,對發現的內存
-區域進行優先排序。爲了實現個性化的優先級,用戶可以在 ``<優先級權重>`` 中設置這三個屬性的
-權重,具體形式如下::
-
- <size weight> <access frequency weight> <age weight>
-
-水位
-~~~~
-
-有些方案需要根據系統特定指標的當前值來運行,如自由內存比率。對於這種情況,用戶可以爲該條
-件指定水位。::
-
- <metric> <check interval> <high mark> <middle mark> <low mark>
-
-``<metric>`` 是一個預定義的整數,用於要檢查的度量。支持的數字和它們的含義如下。
-
- - 0: 忽視水位
- - 1: 系統空閒內存率 (千分比)
-
-每隔 ``<檢查間隔>`` 微秒檢查一次公制的值。
-
-如果該值高於 ``<高標>`` 或低於 ``<低標>`` ,該方案被停用。如果該值低於 ``<中標>`` ,
-該方案將被激活。
-
-統計數據
-~~~~~~~~
-
-它還統計每個方案被嘗試應用的區域的總數量和字節數,每個方案被成功應用的區域的兩個數量,以
-及超過配額限制的總數量。這些統計數據可用於在線分析或調整方案。
-
-統計數據可以通過讀取方案文件來顯示。讀取該文件將顯示你在每一行中輸入的每個 ``方案`` ,
-統計的五個數字將被加在每一行的末尾。
-
-例子
-~~~~
-
-下面的命令應用了一個方案:”如果一個大小爲[4KiB, 8KiB]的內存區域在[10, 20]的聚合時間
-間隔內顯示出每一個聚合時間間隔[0, 5]的訪問量,請分頁出該區域。對於分頁,每秒最多隻能使
-用10ms,而且每秒分頁不能超過1GiB。在這一限制下,首先分頁出具有較長年齡的內存區域。另外,
-每5秒鐘檢查一次系統的可用內存率,當可用內存率低於50%時開始監測和分頁,但如果可用內存率
-大於60%,或低於30%,則停止監測“::
-
- # cd <debugfs>/damon
- # scheme="4096 8192 0 5 10 20 2" # target access pattern and action
- # scheme+=" 10 $((1024*1024*1024)) 1000" # quotas
- # scheme+=" 0 0 100" # prioritization weights
- # scheme+=" 1 5000000 600 500 300" # watermarks
- # echo "$scheme" > schemes
-
-
-開關
-----
-
-除非你明確地啓動監測,否則如上所述的文件設置不會產生效果。你可以通過寫入和讀取 ``monitor_on_DEPRECATED``
-文件來啓動、停止和檢查監測的當前狀態。寫入 ``on`` 該文件可以啓動對有屬性的目標的監測。寫入
-``off`` 該文件則停止這些目標。如果每個目標進程被終止,DAMON也會停止。下面的示例命令開啓、關
-閉和檢查DAMON的狀態::
-
- # cd <debugfs>/damon
- # echo on > monitor_on_DEPRECATED
- # echo off > monitor_on_DEPRECATED
- # cat monitor_on_DEPRECATED
- off
-
-請注意,當監測開啓時,你不能寫到上述的debugfs文件。如果你在DAMON運行時寫到這些文件,將會返
-回一個錯誤代碼,如 ``-EBUSY`` 。
-
-
-監測線程PID
------------
-
-DAMON通過一個叫做kdamond的內核線程來進行請求監測。你可以通過讀取 ``kdamond_pid`` 文件獲
-得該線程的 ``pid`` 。當監測被 ``關閉`` 時,讀取該文件不會返回任何信息::
-
- # cd <debugfs>/damon
- # cat monitor_on_DEPRECATED
- off
- # cat kdamond_pid
- none
- # echo on > monitor_on_DEPRECATED
- # cat kdamond_pid
- 18594
-
-
-使用多個監測線程
-----------------
-
-每個監測上下文都會創建一個 ``kdamond`` 線程。你可以使用 ``mk_contexts`` 和 ``rm_contexts``
-文件爲多個 ``kdamond`` 需要的用例創建和刪除監測上下文。
-
-將新上下文的名稱寫入 ``mk_contexts`` 文件,在 ``DAMON debugfs`` 目錄上創建一個該名稱的目錄。
-該目錄將有該上下文的 ``DAMON debugfs`` 文件::
-
- # cd <debugfs>/damon
- # ls foo
- # ls: cannot access 'foo': No such file or directory
- # echo foo > mk_contexts
- # ls foo
- # attrs init_regions kdamond_pid schemes target_ids
-
-如果不再需要上下文,你可以通過把上下文的名字放到 ``rm_contexts`` 文件中來刪除它和相應的目錄::
-
- # echo foo > rm_contexts
- # ls foo
- # ls: cannot access 'foo': No such file or directory
-
-注意, ``mk_contexts`` 、 ``rm_contexts`` 和 ``monitor_on_DEPRECATED`` 文件只在根目錄下。
-
監測結果的監測點
================
diff --git a/Documentation/usb/usbip_protocol.rst b/Documentation/usb/usbip_protocol.rst
index adc158967cc6..3da1df3d94f5 100644
--- a/Documentation/usb/usbip_protocol.rst
+++ b/Documentation/usb/usbip_protocol.rst
@@ -285,17 +285,17 @@ OP_REP_IMPORT:
+-----------+--------+------------+---------------------------------------------------+
| 0x138 | 2 | | bcdDevice |
+-----------+--------+------------+---------------------------------------------------+
-| 0x139 | 1 | | bDeviceClass |
+| 0x13A | 1 | | bDeviceClass |
+-----------+--------+------------+---------------------------------------------------+
-| 0x13A | 1 | | bDeviceSubClass |
+| 0x13B | 1 | | bDeviceSubClass |
+-----------+--------+------------+---------------------------------------------------+
-| 0x13B | 1 | | bDeviceProtocol |
+| 0x13C | 1 | | bDeviceProtocol |
+-----------+--------+------------+---------------------------------------------------+
-| 0x13C | 1 | | bConfigurationValue |
+| 0x13D | 1 | | bConfigurationValue |
+-----------+--------+------------+---------------------------------------------------+
-| 0x13D | 1 | | bNumConfigurations |
+| 0x13E | 1 | | bNumConfigurations |
+-----------+--------+------------+---------------------------------------------------+
-| 0x13E | 1 | | bNumInterfaces |
+| 0x13F | 1 | | bNumInterfaces |
+-----------+--------+------------+---------------------------------------------------+
The following four commands have a common basic header called
diff --git a/Documentation/userspace-api/check_exec.rst b/Documentation/userspace-api/check_exec.rst
new file mode 100644
index 000000000000..05dfe3b56f71
--- /dev/null
+++ b/Documentation/userspace-api/check_exec.rst
@@ -0,0 +1,144 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. Copyright © 2024 Microsoft Corporation
+
+===================
+Executability check
+===================
+
+The ``AT_EXECVE_CHECK`` :manpage:`execveat(2)` flag, and the
+``SECBIT_EXEC_RESTRICT_FILE`` and ``SECBIT_EXEC_DENY_INTERACTIVE`` securebits
+are intended for script interpreters and dynamic linkers to enforce a
+consistent execution security policy handled by the kernel. See the
+`samples/check-exec/inc.c`_ example.
+
+Whether an interpreter should check these securebits or not depends on the
+security risk of running malicious scripts with respect to the execution
+environment, and whether the kernel can check if a script is trustworthy or
+not. For instance, Python scripts running on a server can use arbitrary
+syscalls and access arbitrary files. Such interpreters should then be
+enlighten to use these securebits and let users define their security policy.
+However, a JavaScript engine running in a web browser should already be
+sandboxed and then should not be able to harm the user's environment.
+
+Script interpreters or dynamic linkers built for tailored execution environments
+(e.g. hardened Linux distributions or hermetic container images) could use
+``AT_EXECVE_CHECK`` without checking the related securebits if backward
+compatibility is handled by something else (e.g. atomic update ensuring that
+all legitimate libraries are allowed to be executed). It is then recommended
+for script interpreters and dynamic linkers to check the securebits at run time
+by default, but also to provide the ability for custom builds to behave like if
+``SECBIT_EXEC_RESTRICT_FILE`` or ``SECBIT_EXEC_DENY_INTERACTIVE`` were always
+set to 1 (i.e. always enforce restrictions).
+
+AT_EXECVE_CHECK
+===============
+
+Passing the ``AT_EXECVE_CHECK`` flag to :manpage:`execveat(2)` only performs a
+check on a regular file and returns 0 if execution of this file would be
+allowed, ignoring the file format and then the related interpreter dependencies
+(e.g. ELF libraries, script's shebang).
+
+Programs should always perform this check to apply kernel-level checks against
+files that are not directly executed by the kernel but passed to a user space
+interpreter instead. All files that contain executable code, from the point of
+view of the interpreter, should be checked. However the result of this check
+should only be enforced according to ``SECBIT_EXEC_RESTRICT_FILE`` or
+``SECBIT_EXEC_DENY_INTERACTIVE.``.
+
+The main purpose of this flag is to improve the security and consistency of an
+execution environment to ensure that direct file execution (e.g.
+``./script.sh``) and indirect file execution (e.g. ``sh script.sh``) lead to
+the same result. For instance, this can be used to check if a file is
+trustworthy according to the caller's environment.
+
+In a secure environment, libraries and any executable dependencies should also
+be checked. For instance, dynamic linking should make sure that all libraries
+are allowed for execution to avoid trivial bypass (e.g. using ``LD_PRELOAD``).
+For such secure execution environment to make sense, only trusted code should
+be executable, which also requires integrity guarantees.
+
+To avoid race conditions leading to time-of-check to time-of-use issues,
+``AT_EXECVE_CHECK`` should be used with ``AT_EMPTY_PATH`` to check against a
+file descriptor instead of a path.
+
+SECBIT_EXEC_RESTRICT_FILE and SECBIT_EXEC_DENY_INTERACTIVE
+==========================================================
+
+When ``SECBIT_EXEC_RESTRICT_FILE`` is set, a process should only interpret or
+execute a file if a call to :manpage:`execveat(2)` with the related file
+descriptor and the ``AT_EXECVE_CHECK`` flag succeed.
+
+This secure bit may be set by user session managers, service managers,
+container runtimes, sandboxer tools... Except for test environments, the
+related ``SECBIT_EXEC_RESTRICT_FILE_LOCKED`` bit should also be set.
+
+Programs should only enforce consistent restrictions according to the
+securebits but without relying on any other user-controlled configuration.
+Indeed, the use case for these securebits is to only trust executable code
+vetted by the system configuration (through the kernel), so we should be
+careful to not let untrusted users control this configuration.
+
+However, script interpreters may still use user configuration such as
+environment variables as long as it is not a way to disable the securebits
+checks. For instance, the ``PATH`` and ``LD_PRELOAD`` variables can be set by
+a script's caller. Changing these variables may lead to unintended code
+executions, but only from vetted executable programs, which is OK. For this to
+make sense, the system should provide a consistent security policy to avoid
+arbitrary code execution e.g., by enforcing a write xor execute policy.
+
+When ``SECBIT_EXEC_DENY_INTERACTIVE`` is set, a process should never interpret
+interactive user commands (e.g. scripts). However, if such commands are passed
+through a file descriptor (e.g. stdin), its content should be interpreted if a
+call to :manpage:`execveat(2)` with the related file descriptor and the
+``AT_EXECVE_CHECK`` flag succeed.
+
+For instance, script interpreters called with a script snippet as argument
+should always deny such execution if ``SECBIT_EXEC_DENY_INTERACTIVE`` is set.
+
+This secure bit may be set by user session managers, service managers,
+container runtimes, sandboxer tools... Except for test environments, the
+related ``SECBIT_EXEC_DENY_INTERACTIVE_LOCKED`` bit should also be set.
+
+Here is the expected behavior for a script interpreter according to combination
+of any exec securebits:
+
+1. ``SECBIT_EXEC_RESTRICT_FILE=0`` and ``SECBIT_EXEC_DENY_INTERACTIVE=0``
+
+ Always interpret scripts, and allow arbitrary user commands (default).
+
+ No threat, everyone and everything is trusted, but we can get ahead of
+ potential issues thanks to the call to :manpage:`execveat(2)` with
+ ``AT_EXECVE_CHECK`` which should always be performed but ignored by the
+ script interpreter. Indeed, this check is still important to enable systems
+ administrators to verify requests (e.g. with audit) and prepare for
+ migration to a secure mode.
+
+2. ``SECBIT_EXEC_RESTRICT_FILE=1`` and ``SECBIT_EXEC_DENY_INTERACTIVE=0``
+
+ Deny script interpretation if they are not executable, but allow
+ arbitrary user commands.
+
+ The threat is (potential) malicious scripts run by trusted (and not fooled)
+ users. That can protect against unintended script executions (e.g. ``sh
+ /tmp/*.sh``). This makes sense for (semi-restricted) user sessions.
+
+3. ``SECBIT_EXEC_RESTRICT_FILE=0`` and ``SECBIT_EXEC_DENY_INTERACTIVE=1``
+
+ Always interpret scripts, but deny arbitrary user commands.
+
+ This use case may be useful for secure services (i.e. without interactive
+ user session) where scripts' integrity is verified (e.g. with IMA/EVM or
+ dm-verity/IPE) but where access rights might not be ready yet. Indeed,
+ arbitrary interactive commands would be much more difficult to check.
+
+4. ``SECBIT_EXEC_RESTRICT_FILE=1`` and ``SECBIT_EXEC_DENY_INTERACTIVE=1``
+
+ Deny script interpretation if they are not executable, and also deny
+ any arbitrary user commands.
+
+ The threat is malicious scripts run by untrusted users (but trusted code).
+ This makes sense for system services that may only execute trusted scripts.
+
+.. Links
+.. _samples/check-exec/inc.c:
+ https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/samples/check-exec/inc.c
diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst
index 274cc7546efc..b1395d94b3fd 100644
--- a/Documentation/userspace-api/index.rst
+++ b/Documentation/userspace-api/index.rst
@@ -35,6 +35,7 @@ Security-related interfaces
mfd_noexec
spec_ctrl
tee
+ check_exec
Devices and I/O
===============
@@ -63,6 +64,7 @@ Everything else
vduse
futex2
perf_ring_buffer
+ ntsync
.. only:: subproject and html
diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
index 243f1f1b554a..6d1465315df3 100644
--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
@@ -283,6 +283,7 @@ Code Seq# Include File Comments
'p' 80-9F linux/ppdev.h user-space parport
<mailto:tim@cyberelk.net>
'p' A1-A5 linux/pps.h LinuxPPS
+'p' B1-B3 linux/pps_gen.h LinuxPPS
<mailto:giometti@linux.it>
'q' 00-1F linux/serio.h
'q' 80-FF linux/telephony.h Internet PhoneJACK, Internet LineJACK
@@ -311,6 +312,7 @@ Code Seq# Include File Comments
<mailto:oe@port.de>
'z' 10-4F drivers/s390/crypto/zcrypt_api.h conflict!
'|' 00-7F linux/media.h
+'|' 80-9F samples/ Any sample and example drivers
0x80 00-1F linux/fb.h
0x81 00-1F linux/vduse.h
0x89 00-06 arch/x86/include/asm/sockios.h
diff --git a/Documentation/userspace-api/ntsync.rst b/Documentation/userspace-api/ntsync.rst
new file mode 100644
index 000000000000..25e7c4aef968
--- /dev/null
+++ b/Documentation/userspace-api/ntsync.rst
@@ -0,0 +1,385 @@
+===================================
+NT synchronization primitive driver
+===================================
+
+This page documents the user-space API for the ntsync driver.
+
+ntsync is a support driver for emulation of NT synchronization
+primitives by user-space NT emulators. It exists because implementation
+in user-space, using existing tools, cannot match Windows performance
+while offering accurate semantics. It is implemented entirely in
+software, and does not drive any hardware device.
+
+This interface is meant as a compatibility tool only, and should not
+be used for general synchronization. Instead use generic, versatile
+interfaces such as futex(2) and poll(2).
+
+Synchronization primitives
+==========================
+
+The ntsync driver exposes three types of synchronization primitives:
+semaphores, mutexes, and events.
+
+A semaphore holds a single volatile 32-bit counter, and a static 32-bit
+integer denoting the maximum value. It is considered signaled (that is,
+can be acquired without contention, or will wake up a waiting thread)
+when the counter is nonzero. The counter is decremented by one when a
+wait is satisfied. Both the initial and maximum count are established
+when the semaphore is created.
+
+A mutex holds a volatile 32-bit recursion count, and a volatile 32-bit
+identifier denoting its owner. A mutex is considered signaled when its
+owner is zero (indicating that it is not owned). The recursion count is
+incremented when a wait is satisfied, and ownership is set to the given
+identifier.
+
+A mutex also holds an internal flag denoting whether its previous owner
+has died; such a mutex is said to be abandoned. Owner death is not
+tracked automatically based on thread death, but rather must be
+communicated using ``NTSYNC_IOC_MUTEX_KILL``. An abandoned mutex is
+inherently considered unowned.
+
+Except for the "unowned" semantics of zero, the actual value of the
+owner identifier is not interpreted by the ntsync driver at all. The
+intended use is to store a thread identifier; however, the ntsync
+driver does not actually validate that a calling thread provides
+consistent or unique identifiers.
+
+An event is similar to a semaphore with a maximum count of one. It holds
+a volatile boolean state denoting whether it is signaled or not. There
+are two types of events, auto-reset and manual-reset. An auto-reset
+event is designaled when a wait is satisfied; a manual-reset event is
+not. The event type is specified when the event is created.
+
+Unless specified otherwise, all operations on an object are atomic and
+totally ordered with respect to other operations on the same object.
+
+Objects are represented by files. When all file descriptors to an
+object are closed, that object is deleted.
+
+Char device
+===========
+
+The ntsync driver creates a single char device /dev/ntsync. Each file
+description opened on the device represents a unique instance intended
+to back an individual NT virtual machine. Objects created by one ntsync
+instance may only be used with other objects created by the same
+instance.
+
+ioctl reference
+===============
+
+All operations on the device are done through ioctls. There are four
+structures used in ioctl calls::
+
+ struct ntsync_sem_args {
+ __u32 count;
+ __u32 max;
+ };
+
+ struct ntsync_mutex_args {
+ __u32 owner;
+ __u32 count;
+ };
+
+ struct ntsync_event_args {
+ __u32 signaled;
+ __u32 manual;
+ };
+
+ struct ntsync_wait_args {
+ __u64 timeout;
+ __u64 objs;
+ __u32 count;
+ __u32 owner;
+ __u32 index;
+ __u32 alert;
+ __u32 flags;
+ __u32 pad;
+ };
+
+Depending on the ioctl, members of the structure may be used as input,
+output, or not at all.
+
+The ioctls on the device file are as follows:
+
+.. c:macro:: NTSYNC_IOC_CREATE_SEM
+
+ Create a semaphore object. Takes a pointer to struct
+ :c:type:`ntsync_sem_args`, which is used as follows:
+
+ .. list-table::
+
+ * - ``count``
+ - Initial count of the semaphore.
+ * - ``max``
+ - Maximum count of the semaphore.
+
+ Fails with ``EINVAL`` if ``count`` is greater than ``max``.
+ On success, returns a file descriptor the created semaphore.
+
+.. c:macro:: NTSYNC_IOC_CREATE_MUTEX
+
+ Create a mutex object. Takes a pointer to struct
+ :c:type:`ntsync_mutex_args`, which is used as follows:
+
+ .. list-table::
+
+ * - ``count``
+ - Initial recursion count of the mutex.
+ * - ``owner``
+ - Initial owner of the mutex.
+
+ If ``owner`` is nonzero and ``count`` is zero, or if ``owner`` is
+ zero and ``count`` is nonzero, the function fails with ``EINVAL``.
+ On success, returns a file descriptor the created mutex.
+
+.. c:macro:: NTSYNC_IOC_CREATE_EVENT
+
+ Create an event object. Takes a pointer to struct
+ :c:type:`ntsync_event_args`, which is used as follows:
+
+ .. list-table::
+
+ * - ``signaled``
+ - If nonzero, the event is initially signaled, otherwise
+ nonsignaled.
+ * - ``manual``
+ - If nonzero, the event is a manual-reset event, otherwise
+ auto-reset.
+
+ On success, returns a file descriptor the created event.
+
+The ioctls on the individual objects are as follows:
+
+.. c:macro:: NTSYNC_IOC_SEM_POST
+
+ Post to a semaphore object. Takes a pointer to a 32-bit integer,
+ which on input holds the count to be added to the semaphore, and on
+ output contains its previous count.
+
+ If adding to the semaphore's current count would raise the latter
+ past the semaphore's maximum count, the ioctl fails with
+ ``EOVERFLOW`` and the semaphore is not affected. If raising the
+ semaphore's count causes it to become signaled, eligible threads
+ waiting on this semaphore will be woken and the semaphore's count
+ decremented appropriately.
+
+.. c:macro:: NTSYNC_IOC_MUTEX_UNLOCK
+
+ Release a mutex object. Takes a pointer to struct
+ :c:type:`ntsync_mutex_args`, which is used as follows:
+
+ .. list-table::
+
+ * - ``owner``
+ - Specifies the owner trying to release this mutex.
+ * - ``count``
+ - On output, contains the previous recursion count.
+
+ If ``owner`` is zero, the ioctl fails with ``EINVAL``. If ``owner``
+ is not the current owner of the mutex, the ioctl fails with
+ ``EPERM``.
+
+ The mutex's count will be decremented by one. If decrementing the
+ mutex's count causes it to become zero, the mutex is marked as
+ unowned and signaled, and eligible threads waiting on it will be
+ woken as appropriate.
+
+.. c:macro:: NTSYNC_IOC_SET_EVENT
+
+ Signal an event object. Takes a pointer to a 32-bit integer, which on
+ output contains the previous state of the event.
+
+ Eligible threads will be woken, and auto-reset events will be
+ designaled appropriately.
+
+.. c:macro:: NTSYNC_IOC_RESET_EVENT
+
+ Designal an event object. Takes a pointer to a 32-bit integer, which
+ on output contains the previous state of the event.
+
+.. c:macro:: NTSYNC_IOC_PULSE_EVENT
+
+ Wake threads waiting on an event object while leaving it in an
+ unsignaled state. Takes a pointer to a 32-bit integer, which on
+ output contains the previous state of the event.
+
+ A pulse operation can be thought of as a set followed by a reset,
+ performed as a single atomic operation. If two threads are waiting on
+ an auto-reset event which is pulsed, only one will be woken. If two
+ threads are waiting a manual-reset event which is pulsed, both will
+ be woken. However, in both cases, the event will be unsignaled
+ afterwards, and a simultaneous read operation will always report the
+ event as unsignaled.
+
+.. c:macro:: NTSYNC_IOC_READ_SEM
+
+ Read the current state of a semaphore object. Takes a pointer to
+ struct :c:type:`ntsync_sem_args`, which is used as follows:
+
+ .. list-table::
+
+ * - ``count``
+ - On output, contains the current count of the semaphore.
+ * - ``max``
+ - On output, contains the maximum count of the semaphore.
+
+.. c:macro:: NTSYNC_IOC_READ_MUTEX
+
+ Read the current state of a mutex object. Takes a pointer to struct
+ :c:type:`ntsync_mutex_args`, which is used as follows:
+
+ .. list-table::
+
+ * - ``owner``
+ - On output, contains the current owner of the mutex, or zero
+ if the mutex is not currently owned.
+ * - ``count``
+ - On output, contains the current recursion count of the mutex.
+
+ If the mutex is marked as abandoned, the function fails with
+ ``EOWNERDEAD``. In this case, ``count`` and ``owner`` are set to
+ zero.
+
+.. c:macro:: NTSYNC_IOC_READ_EVENT
+
+ Read the current state of an event object. Takes a pointer to struct
+ :c:type:`ntsync_event_args`, which is used as follows:
+
+ .. list-table::
+
+ * - ``signaled``
+ - On output, contains the current state of the event.
+ * - ``manual``
+ - On output, contains 1 if the event is a manual-reset event,
+ and 0 otherwise.
+
+.. c:macro:: NTSYNC_IOC_KILL_OWNER
+
+ Mark a mutex as unowned and abandoned if it is owned by the given
+ owner. Takes an input-only pointer to a 32-bit integer denoting the
+ owner. If the owner is zero, the ioctl fails with ``EINVAL``. If the
+ owner does not own the mutex, the function fails with ``EPERM``.
+
+ Eligible threads waiting on the mutex will be woken as appropriate
+ (and such waits will fail with ``EOWNERDEAD``, as described below).
+
+.. c:macro:: NTSYNC_IOC_WAIT_ANY
+
+ Poll on any of a list of objects, atomically acquiring at most one.
+ Takes a pointer to struct :c:type:`ntsync_wait_args`, which is
+ used as follows:
+
+ .. list-table::
+
+ * - ``timeout``
+ - Absolute timeout in nanoseconds. If ``NTSYNC_WAIT_REALTIME``
+ is set, the timeout is measured against the REALTIME clock;
+ otherwise it is measured against the MONOTONIC clock. If the
+ timeout is equal to or earlier than the current time, the
+ function returns immediately without sleeping. If ``timeout``
+ is U64_MAX, the function will sleep until an object is
+ signaled, and will not fail with ``ETIMEDOUT``.
+ * - ``objs``
+ - Pointer to an array of ``count`` file descriptors
+ (specified as an integer so that the structure has the same
+ size regardless of architecture). If any object is
+ invalid, the function fails with ``EINVAL``.
+ * - ``count``
+ - Number of objects specified in the ``objs`` array.
+ If greater than ``NTSYNC_MAX_WAIT_COUNT``, the function fails
+ with ``EINVAL``.
+ * - ``owner``
+ - Mutex owner identifier. If any object in ``objs`` is a mutex,
+ the ioctl will attempt to acquire that mutex on behalf of
+ ``owner``. If ``owner`` is zero, the ioctl fails with
+ ``EINVAL``.
+ * - ``index``
+ - On success, contains the index (into ``objs``) of the object
+ which was signaled. If ``alert`` was signaled instead,
+ this contains ``count``.
+ * - ``alert``
+ - Optional event object file descriptor. If nonzero, this
+ specifies an "alert" event object which, if signaled, will
+ terminate the wait. If nonzero, the identifier must point to a
+ valid event.
+ * - ``flags``
+ - Zero or more flags. Currently the only flag is
+ ``NTSYNC_WAIT_REALTIME``, which causes the timeout to be
+ measured against the REALTIME clock instead of MONOTONIC.
+ * - ``pad``
+ - Unused, must be set to zero.
+
+ This function attempts to acquire one of the given objects. If unable
+ to do so, it sleeps until an object becomes signaled, subsequently
+ acquiring it, or the timeout expires. In the latter case the ioctl
+ fails with ``ETIMEDOUT``. The function only acquires one object, even
+ if multiple objects are signaled.
+
+ A semaphore is considered to be signaled if its count is nonzero, and
+ is acquired by decrementing its count by one. A mutex is considered
+ to be signaled if it is unowned or if its owner matches the ``owner``
+ argument, and is acquired by incrementing its recursion count by one
+ and setting its owner to the ``owner`` argument. An auto-reset event
+ is acquired by designaling it; a manual-reset event is not affected
+ by acquisition.
+
+ Acquisition is atomic and totally ordered with respect to other
+ operations on the same object. If two wait operations (with different
+ ``owner`` identifiers) are queued on the same mutex, only one is
+ signaled. If two wait operations are queued on the same semaphore,
+ and a value of one is posted to it, only one is signaled.
+
+ If an abandoned mutex is acquired, the ioctl fails with
+ ``EOWNERDEAD``. Although this is a failure return, the function may
+ otherwise be considered successful. The mutex is marked as owned by
+ the given owner (with a recursion count of 1) and as no longer
+ abandoned, and ``index`` is still set to the index of the mutex.
+
+ The ``alert`` argument is an "extra" event which can terminate the
+ wait, independently of all other objects.
+
+ It is valid to pass the same object more than once, including by
+ passing the same event in the ``objs`` array and in ``alert``. If a
+ wakeup occurs due to that object being signaled, ``index`` is set to
+ the lowest index corresponding to that object.
+
+ The function may fail with ``EINTR`` if a signal is received.
+
+.. c:macro:: NTSYNC_IOC_WAIT_ALL
+
+ Poll on a list of objects, atomically acquiring all of them. Takes a
+ pointer to struct :c:type:`ntsync_wait_args`, which is used
+ identically to ``NTSYNC_IOC_WAIT_ANY``, except that ``index`` is
+ always filled with zero on success if not woken via alert.
+
+ This function attempts to simultaneously acquire all of the given
+ objects. If unable to do so, it sleeps until all objects become
+ simultaneously signaled, subsequently acquiring them, or the timeout
+ expires. In the latter case the ioctl fails with ``ETIMEDOUT`` and no
+ objects are modified.
+
+ Objects may become signaled and subsequently designaled (through
+ acquisition by other threads) while this thread is sleeping. Only
+ once all objects are simultaneously signaled does the ioctl acquire
+ them and return. The entire acquisition is atomic and totally ordered
+ with respect to other operations on any of the given objects.
+
+ If an abandoned mutex is acquired, the ioctl fails with
+ ``EOWNERDEAD``. Similarly to ``NTSYNC_IOC_WAIT_ANY``, all objects are
+ nevertheless marked as acquired. Note that if multiple mutex objects
+ are specified, there is no way to know which were marked as
+ abandoned.
+
+ As with "any" waits, the ``alert`` argument is an "extra" event which
+ can terminate the wait. Critically, however, an "all" wait will
+ succeed if all members in ``objs`` are signaled, *or* if ``alert`` is
+ signaled. In the latter case ``index`` will be set to ``count``. As
+ with "any" waits, if both conditions are filled, the former takes
+ priority, and objects in ``objs`` will be acquired.
+
+ Unlike ``NTSYNC_IOC_WAIT_ANY``, it is not valid to pass the same
+ object more than once, nor is it valid to pass the same object in
+ ``objs`` and in ``alert``. If this is attempted, the function fails
+ with ``EINVAL``.
diff --git a/Documentation/userspace-api/sysfs-platform_profile.rst b/Documentation/userspace-api/sysfs-platform_profile.rst
index 4fccde2e4563..7f013356118a 100644
--- a/Documentation/userspace-api/sysfs-platform_profile.rst
+++ b/Documentation/userspace-api/sysfs-platform_profile.rst
@@ -40,3 +40,41 @@ added. Drivers which wish to introduce new profile names must:
1. Explain why the existing profile names cannot be used.
2. Add the new profile name, along with a clear description of the
expected behaviour, to the sysfs-platform_profile ABI documentation.
+
+"Custom" profile support
+========================
+The platform_profile class also supports profiles advertising a "custom"
+profile. This is intended to be set by drivers when the setttings in the
+driver have been modified in a way that a standard profile doesn't represent
+the current state.
+
+Multiple driver support
+=======================
+When multiple drivers on a system advertise a platform profile handler, the
+platform profile handler core will only advertise the profiles that are
+common between all drivers to the ``/sys/firmware/acpi`` interfaces.
+
+This is to ensure there is no ambiguity on what the profile names mean when
+all handlers don't support a profile.
+
+Individual drivers will register a 'platform_profile' class device that has
+similar semantics as the ``/sys/firmware/acpi/platform_profile`` interface.
+
+To discover which driver is associated with a platform profile handler the
+user can read the ``name`` attribute of the class device.
+
+To discover available profiles from the class interface the user can read the
+``choices`` attribute.
+
+If a user wants to select a profile for a specific driver, they can do so
+by writing to the ``profile`` attribute of the driver's class device.
+
+This will allow users to set different profiles for different drivers on the
+same system. If the selected profile by individual drivers differs the
+platform profile handler core will display the profile 'custom' to indicate
+that the profiles are not the same.
+
+While the ``platform_profile`` attribute has the value ``custom``, writing a
+common profile from ``platform_profile_choices`` to the platform_profile
+attribute of the platform profile handler core will set the profile for all
+drivers.
diff --git a/Documentation/virt/hyperv/hibernation.rst b/Documentation/virt/hyperv/hibernation.rst
new file mode 100644
index 000000000000..4ff27f4a317a
--- /dev/null
+++ b/Documentation/virt/hyperv/hibernation.rst
@@ -0,0 +1,336 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Hibernating Guest VMs
+=====================
+
+Background
+----------
+Linux supports the ability to hibernate itself in order to save power.
+Hibernation is sometimes called suspend-to-disk, as it writes a memory
+image to disk and puts the hardware into the lowest possible power
+state. Upon resume from hibernation, the hardware is restarted and the
+memory image is restored from disk so that it can resume execution
+where it left off. See the "Hibernation" section of
+Documentation/admin-guide/pm/sleep-states.rst.
+
+Hibernation is usually done on devices with a single user, such as a
+personal laptop. For example, the laptop goes into hibernation when
+the cover is closed, and resumes when the cover is opened again.
+Hibernation and resume happen on the same hardware, and Linux kernel
+code orchestrating the hibernation steps assumes that the hardware
+configuration is not changed while in the hibernated state.
+
+Hibernation can be initiated within Linux by writing "disk" to
+/sys/power/state or by invoking the reboot system call with the
+appropriate arguments. This functionality may be wrapped by user space
+commands such "systemctl hibernate" that are run directly from a
+command line or in response to events such as the laptop lid closing.
+
+Considerations for Guest VM Hibernation
+---------------------------------------
+Linux guests on Hyper-V can also be hibernated, in which case the
+hardware is the virtual hardware provided by Hyper-V to the guest VM.
+Only the targeted guest VM is hibernated, while other guest VMs and
+the underlying Hyper-V host continue to run normally. While the
+underlying Windows Hyper-V and physical hardware on which it is
+running might also be hibernated using hibernation functionality in
+the Windows host, host hibernation and its impact on guest VMs is not
+in scope for this documentation.
+
+Resuming a hibernated guest VM can be more challenging than with
+physical hardware because VMs make it very easy to change the hardware
+configuration between the hibernation and resume. Even when the resume
+is done on the same VM that hibernated, the memory size might be
+changed, or virtual NICs or SCSI controllers might be added or
+removed. Virtual PCI devices assigned to the VM might be added or
+removed. Most such changes cause the resume steps to fail, though
+adding a new virtual NIC, SCSI controller, or vPCI device should work.
+
+Additional complexity can ensue because the disks of the hibernated VM
+can be moved to another newly created VM that otherwise has the same
+virtual hardware configuration. While it is desirable for resume from
+hibernation to succeed after such a move, there are challenges. See
+details on this scenario and its limitations in the "Resuming on a
+Different VM" section below.
+
+Hyper-V also provides ways to move a VM from one Hyper-V host to
+another. Hyper-V tries to ensure processor model and Hyper-V version
+compatibility using VM Configuration Versions, and prevents moves to
+a host that isn't compatible. Linux adapts to host and processor
+differences by detecting them at boot time, but such detection is not
+done when resuming execution in the hibernation image. If a VM is
+hibernated on one host, then resumed on a host with a different processor
+model or Hyper-V version, settings recorded in the hibernation image
+may not match the new host. Because Linux does not detect such
+mismatches when resuming the hibernation image, undefined behavior
+and failures could result.
+
+
+Enabling Guest VM Hibernation
+-----------------------------
+Hibernation of a Hyper-V guest VM is disabled by default because
+hibernation is incompatible with memory hot-add, as provided by the
+Hyper-V balloon driver. If hot-add is used and the VM hibernates, it
+hibernates with more memory than it started with. But when the VM
+resumes from hibernation, Hyper-V gives the VM only the originally
+assigned memory, and the memory size mismatch causes resume to fail.
+
+To enable a Hyper-V VM for hibernation, the Hyper-V administrator must
+enable the ACPI virtual S4 sleep state in the ACPI configuration that
+Hyper-V provides to the guest VM. Such enablement is accomplished by
+modifying a WMI property of the VM, the steps for which are outside
+the scope of this documentation but are available on the web.
+Enablement is treated as the indicator that the administrator
+prioritizes Linux hibernation in the VM over hot-add, so the Hyper-V
+balloon driver in Linux disables hot-add. Enablement is indicated if
+the contents of /sys/power/disk contains "platform" as an option. The
+enablement is also visible in /sys/bus/vmbus/hibernation. See function
+hv_is_hibernation_supported().
+
+Linux supports ACPI sleep states on x86, but not on arm64. So Linux
+guest VM hibernation is not available on Hyper-V for arm64.
+
+Initiating Guest VM Hibernation
+-------------------------------
+Guest VMs can self-initiate hibernation using the standard Linux
+methods of writing "disk" to /sys/power/state or the reboot system
+call. As an additional layer, Linux guests on Hyper-V support the
+"Shutdown" integration service, via which a Hyper-V administrator can
+tell a Linux VM to hibernate using a command outside the VM. The
+command generates a request to the Hyper-V shutdown driver in Linux,
+which sends the uevent "EVENT=hibernate". See kernel functions
+shutdown_onchannelcallback() and send_hibernate_uevent(). A udev rule
+must be provided in the VM that handles this event and initiates
+hibernation.
+
+Handling VMBus Devices During Hibernation & Resume
+--------------------------------------------------
+The VMBus bus driver, and the individual VMBus device drivers,
+implement suspend and resume functions that are called as part of the
+Linux orchestration of hibernation and of resuming from hibernation.
+The overall approach is to leave in place the data structures for the
+primary VMBus channels and their associated Linux devices, such as
+SCSI controllers and others, so that they are captured in the
+hibernation image. This approach allows any state associated with the
+device to be persisted across the hibernation/resume. When the VM
+resumes, the devices are re-offered by Hyper-V and are connected to
+the data structures that already exist in the resumed hibernation
+image.
+
+VMBus devices are identified by class and instance GUID. (See section
+"VMBus device creation/deletion" in
+Documentation/virt/hyperv/vmbus.rst.) Upon resume from hibernation,
+the resume functions expect that the devices offered by Hyper-V have
+the same class/instance GUIDs as the devices present at the time of
+hibernation. Having the same class/instance GUIDs allows the offered
+devices to be matched to the primary VMBus channel data structures in
+the memory of the now resumed hibernation image. If any devices are
+offered that don't match primary VMBus channel data structures that
+already exist, they are processed normally as newly added devices. If
+primary VMBus channels that exist in the resumed hibernation image are
+not matched with a device offered in the resumed VM, the resume
+sequence waits for 10 seconds, then proceeds. But the unmatched device
+is likely to cause errors in the resumed VM.
+
+When resuming existing primary VMBus channels, the newly offered
+relids might be different because relids can change on each VM boot,
+even if the VM configuration hasn't changed. The VMBus bus driver
+resume function matches the class/instance GUIDs, and updates the
+relids in case they have changed.
+
+VMBus sub-channels are not persisted in the hibernation image. Each
+VMBus device driver's suspend function must close any sub-channels
+prior to hibernation. Closing a sub-channel causes Hyper-V to send a
+RESCIND_CHANNELOFFER message, which Linux processes by freeing the
+channel data structures so that all vestiges of the sub-channel are
+removed. By contrast, primary channels are marked closed and their
+ring buffers are freed, but Hyper-V does not send a rescind message,
+so the channel data structure continues to exist. Upon resume, the
+device driver's resume function re-allocates the ring buffer and
+re-opens the existing channel. It then communicates with Hyper-V to
+re-open sub-channels from scratch.
+
+The Linux ends of Hyper-V sockets are forced closed at the time of
+hibernation. The guest can't force closing the host end of the socket,
+but any host-side actions on the host end will produce an error.
+
+VMBus devices use the same suspend function for the "freeze" and the
+"poweroff" phases, and the same resume function for the "thaw" and
+"restore" phases. See the "Entering Hibernation" section of
+Documentation/driver-api/pm/devices.rst for the sequencing of the
+phases.
+
+Detailed Hibernation Sequence
+-----------------------------
+1. The Linux power management (PM) subsystem prepares for
+ hibernation by freezing user space processes and allocating
+ memory to hold the hibernation image.
+2. As part of the "freeze" phase, Linux PM calls the "suspend"
+ function for each VMBus device in turn. As described above, this
+ function removes sub-channels, and leaves the primary channel in
+ a closed state.
+3. Linux PM calls the "suspend" function for the VMBus bus, which
+ closes any Hyper-V socket channels and unloads the top-level
+ VMBus connection with the Hyper-V host.
+4. Linux PM disables non-boot CPUs, creates the hibernation image in
+ the previously allocated memory, then re-enables non-boot CPUs.
+ The hibernation image contains the memory data structures for the
+ closed primary channels, but no sub-channels.
+5. As part of the "thaw" phase, Linux PM calls the "resume" function
+ for the VMBus bus, which re-establishes the top-level VMBus
+ connection and requests that Hyper-V re-offer the VMBus devices.
+ As offers are received for the primary channels, the relids are
+ updated as previously described.
+6. Linux PM calls the "resume" function for each VMBus device. Each
+ device re-opens its primary channel, and communicates with Hyper-V
+ to re-establish sub-channels if appropriate. The sub-channels
+ are re-created as new channels since they were previously removed
+ entirely in Step 2.
+7. With VMBus devices now working again, Linux PM writes the
+ hibernation image from memory to disk.
+8. Linux PM repeats Steps 2 and 3 above as part of the "poweroff"
+ phase. VMBus channels are closed and the top-level VMBus
+ connection is unloaded.
+9. Linux PM disables non-boot CPUs, and then enters ACPI sleep state
+ S4. Hibernation is now complete.
+
+Detailed Resume Sequence
+------------------------
+1. The guest VM boots into a fresh Linux OS instance. During boot,
+ the top-level VMBus connection is established, and synthetic
+ devices are enabled. This happens via the normal paths that don't
+ involve hibernation.
+2. Linux PM hibernation code reads swap space is to find and read
+ the hibernation image into memory. If there is no hibernation
+ image, then this boot becomes a normal boot.
+3. If this is a resume from hibernation, the "freeze" phase is used
+ to shutdown VMBus devices and unload the top-level VMBus
+ connection in the running fresh OS instance, just like Steps 2
+ and 3 in the hibernation sequence.
+4. Linux PM disables non-boot CPUs, and transfers control to the
+ read-in hibernation image. In the now-running hibernation image,
+ non-boot CPUs are restarted.
+5. As part of the "resume" phase, Linux PM repeats Steps 5 and 6
+ from the hibernation sequence. The top-level VMBus connection is
+ re-established, and offers are received and matched to primary
+ channels in the image. Relids are updated. VMBus device resume
+ functions re-open primary channels and re-create sub-channels.
+6. Linux PM exits the hibernation resume sequence and the VM is now
+ running normally from the hibernation image.
+
+Key-Value Pair (KVP) Pseudo-Device Anomalies
+--------------------------------------------
+The VMBus KVP device behaves differently from other pseudo-devices
+offered by Hyper-V. When the KVP primary channel is closed, Hyper-V
+sends a rescind message, which causes all vestiges of the device to be
+removed. But Hyper-V then re-offers the device, causing it to be newly
+re-created. The removal and re-creation occurs during the "freeze"
+phase of hibernation, so the hibernation image contains the re-created
+KVP device. Similar behavior occurs during the "freeze" phase of the
+resume sequence while still in the fresh OS instance. But in both
+cases, the top-level VMBus connection is subsequently unloaded, which
+causes the device to be discarded on the Hyper-V side. So no harm is
+done and everything still works.
+
+Virtual PCI devices
+-------------------
+Virtual PCI devices are physical PCI devices that are mapped directly
+into the VM's physical address space so the VM can interact directly
+with the hardware. vPCI devices include those accessed via what Hyper-V
+calls "Discrete Device Assignment" (DDA), as well as SR-IOV NIC
+Virtual Functions (VF) devices. See Documentation/virt/hyperv/vpci.rst.
+
+Hyper-V DDA devices are offered to guest VMs after the top-level VMBus
+connection is established, just like VMBus synthetic devices. They are
+statically assigned to the VM, and their instance GUIDs don't change
+unless the Hyper-V administrator makes changes to the configuration.
+DDA devices are represented in Linux as virtual PCI devices that have
+a VMBus identity as well as a PCI identity. Consequently, Linux guest
+hibernation first handles DDA devices as VMBus devices in order to
+manage the VMBus channel. But then they are also handled as PCI
+devices using the hibernation functions implemented by their native
+PCI driver.
+
+SR-IOV NIC VFs also have a VMBus identity as well as a PCI
+identity, and overall are processed similarly to DDA devices. A
+difference is that VFs are not offered to the VM during initial boot
+of the VM. Instead, the VMBus synthetic NIC driver first starts
+operating and communicates to Hyper-V that it is prepared to accept a
+VF, and then the VF offer is made. However, the VMBus connection
+might later be unloaded and then re-established without the VM being
+rebooted, as happens in Steps 3 and 5 in the Detailed Hibernation
+Sequence above and in the Detailed Resume Sequence. In such a case,
+the VFs likely became part of the VM during initial boot, so when the
+VMBus connection is re-established, the VFs are offered on the
+re-established connection without intervention by the synthetic NIC driver.
+
+UIO Devices
+-----------
+A VMBus device can be exposed to user space using the Hyper-V UIO
+driver (uio_hv_generic.c) so that a user space driver can control and
+operate the device. However, the VMBus UIO driver does not support the
+suspend and resume operations needed for hibernation. If a VMBus
+device is configured to use the UIO driver, hibernating the VM fails
+and Linux continues to run normally. The most common use of the Hyper-V
+UIO driver is for DPDK networking, but there are other uses as well.
+
+Resuming on a Different VM
+--------------------------
+This scenario occurs in the Azure public cloud in that a hibernated
+customer VM only exists as saved configuration and disks -- the VM no
+longer exists on any Hyper-V host. When the customer VM is resumed, a
+new Hyper-V VM with identical configuration is created, likely on a
+different Hyper-V host. That new Hyper-V VM becomes the resumed
+customer VM, and the steps the Linux kernel takes to resume from the
+hibernation image must work in that new VM.
+
+While the disks and their contents are preserved from the original VM,
+the Hyper-V-provided VMBus instance GUIDs of the disk controllers and
+other synthetic devices would typically be different. The difference
+would cause the resume from hibernation to fail, so several things are
+done to solve this problem:
+
+* For VMBus synthetic devices that support only a single instance,
+ Hyper-V always assigns the same instance GUIDs. For example, the
+ Hyper-V mouse, the shutdown pseudo-device, the time sync pseudo
+ device, etc., always have the same instance GUID, both for local
+ Hyper-V installs as well as in the Azure cloud.
+
+* VMBus synthetic SCSI controllers may have multiple instances in a
+ VM, and in the general case instance GUIDs vary from VM to VM.
+ However, Azure VMs always have exactly two synthetic SCSI
+ controllers, and Azure code overrides the normal Hyper-V behavior
+ so these controllers are always assigned the same two instance
+ GUIDs. Consequently, when a customer VM is resumed on a newly
+ created VM, the instance GUIDs match. But this guarantee does not
+ hold for local Hyper-V installs.
+
+* Similarly, VMBus synthetic NICs may have multiple instances in a
+ VM, and the instance GUIDs vary from VM to VM. Again, Azure code
+ overrides the normal Hyper-V behavior so that the instance GUID
+ of a synthetic NIC in a customer VM does not change, even if the
+ customer VM is deallocated or hibernated, and then re-constituted
+ on a newly created VM. As with SCSI controllers, this behavior
+ does not hold for local Hyper-V installs.
+
+* vPCI devices do not have the same instance GUIDs when resuming
+ from hibernation on a newly created VM. Consequently, Azure does
+ not support hibernation for VMs that have DDA devices such as
+ NVMe controllers or GPUs. For SR-IOV NIC VFs, Azure removes the
+ VF from the VM before it hibernates so that the hibernation image
+ does not contain a VF device. When the VM is resumed it
+ instantiates a new VF, rather than trying to match against a VF
+ that is present in the hibernation image. Because Azure must
+ remove any VFs before initiating hibernation, Azure VM
+ hibernation must be initiated externally from the Azure Portal or
+ Azure CLI, which in turn uses the Shutdown integration service to
+ tell Linux to do the hibernation. If hibernation is self-initiated
+ within the Azure VM, VFs remain in the hibernation image, and are
+ not resumed properly.
+
+In summary, Azure takes special actions to remove VFs and to ensure
+that VMBus device instance GUIDs match on a new/different VM, allowing
+hibernation to work for most general-purpose Azure VMs sizes. While
+similar special actions could be taken when resuming on a different VM
+on a local Hyper-V install, orchestrating such actions is not provided
+out-of-the-box by local Hyper-V and so requires custom scripting.
diff --git a/Documentation/virt/hyperv/index.rst b/Documentation/virt/hyperv/index.rst
index 79bc4080329e..c84c40fd61c9 100644
--- a/Documentation/virt/hyperv/index.rst
+++ b/Documentation/virt/hyperv/index.rst
@@ -11,4 +11,5 @@ Hyper-V Enlightenments
vmbus
clocks
vpci
+ hibernation
coco
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 3514c4018f40..2b52eb77e29c 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -1419,7 +1419,7 @@ fetch) is injected in the guest.
S390:
^^^^^
-Returns -EINVAL if the VM has the KVM_VM_S390_UCONTROL flag set.
+Returns -EINVAL or -EEXIST if the VM has the KVM_VM_S390_UCONTROL flag set.
Returns -EINVAL if called on a protected VM.
4.36 KVM_SET_TSS_ADDR
@@ -1825,15 +1825,18 @@ emulate them efficiently. The fields in each entry are defined as follows:
the values returned by the cpuid instruction for
this function/index combination
-The TSC deadline timer feature (CPUID leaf 1, ecx[24]) is always returned
-as false, since the feature depends on KVM_CREATE_IRQCHIP for local APIC
-support. Instead it is reported via::
+x2APIC (CPUID leaf 1, ecx[21) and TSC deadline timer (CPUID leaf 1, ecx[24])
+may be returned as true, but they depend on KVM_CREATE_IRQCHIP for in-kernel
+emulation of the local APIC. TSC deadline timer support is also reported via::
ioctl(KVM_CHECK_EXTENSION, KVM_CAP_TSC_DEADLINE_TIMER)
if that returns true and you use KVM_CREATE_IRQCHIP, or if you emulate the
feature in userspace, then you can enable the feature for KVM_SET_CPUID2.
+Enabling x2APIC in KVM_SET_CPUID2 requires KVM_CREATE_IRQCHIP as KVM doesn't
+support forwarding x2APIC MSR accesses to userspace, i.e. KVM does not support
+emulating x2APIC in userspace.
4.47 KVM_PPC_GET_PVINFO
-----------------------
@@ -7673,6 +7676,7 @@ branch to guests' 0x200 interrupt vector.
:Architectures: x86
:Parameters: args[0] defines which exits are disabled
:Returns: 0 on success, -EINVAL when args[0] contains invalid exits
+ or if any vCPUs have already been created
Valid bits in args[0] are::
diff --git a/Documentation/virt/kvm/devices/vcpu.rst b/Documentation/virt/kvm/devices/vcpu.rst
index 31f14ec4a65b..31a9576c07af 100644
--- a/Documentation/virt/kvm/devices/vcpu.rst
+++ b/Documentation/virt/kvm/devices/vcpu.rst
@@ -142,8 +142,8 @@ the cpu field to the processor id.
:Architectures: ARM64
-2.1. ATTRIBUTES: KVM_ARM_VCPU_TIMER_IRQ_VTIMER, KVM_ARM_VCPU_TIMER_IRQ_PTIMER
------------------------------------------------------------------------------
+2.1. ATTRIBUTES: KVM_ARM_VCPU_TIMER_IRQ_{VTIMER,PTIMER,HVTIMER,HPTIMER}
+-----------------------------------------------------------------------
:Parameters: in kvm_device_attr.addr the address for the timer interrupt is a
pointer to an int
@@ -159,10 +159,12 @@ A value describing the architected timer interrupt number when connected to an
in-kernel virtual GIC. These must be a PPI (16 <= intid < 32). Setting the
attribute overrides the default values (see below).
-============================= ==========================================
-KVM_ARM_VCPU_TIMER_IRQ_VTIMER The EL1 virtual timer intid (default: 27)
-KVM_ARM_VCPU_TIMER_IRQ_PTIMER The EL1 physical timer intid (default: 30)
-============================= ==========================================
+============================== ==========================================
+KVM_ARM_VCPU_TIMER_IRQ_VTIMER The EL1 virtual timer intid (default: 27)
+KVM_ARM_VCPU_TIMER_IRQ_PTIMER The EL1 physical timer intid (default: 30)
+KVM_ARM_VCPU_TIMER_IRQ_HVTIMER The EL2 virtual timer intid (default: 28)
+KVM_ARM_VCPU_TIMER_IRQ_HPTIMER The EL2 physical timer intid (default: 26)
+============================== ==========================================
Setting the same PPI for different timers will prevent the VCPUs from running.
Setting the interrupt number on a VCPU configures all VCPUs created at that
diff --git a/Documentation/wmi/driver-development-guide.rst b/Documentation/wmi/driver-development-guide.rst
index 676873c98680..f7e1089a0559 100644
--- a/Documentation/wmi/driver-development-guide.rst
+++ b/Documentation/wmi/driver-development-guide.rst
@@ -41,6 +41,10 @@ helps in understanding how the WMI device is supposed to work. The path of the A
method associated with a given WMI device can be retrieved using the ``lswmi`` utility
as mentioned above.
+If you are attempting to port a driver to Linux and are working on a Windows
+system, `WMIExplorer <https://github.com/vinaypamnani/wmie2>`_ can be useful
+for inspecting available WMI methods and invoking them directly.
+
Basic WMI driver structure
--------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index 1c04ea006eb5..25c86f47353d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -824,7 +824,7 @@ F: drivers/media/platform/sunxi/sun4i-csi/
ALLWINNER A31 CSI DRIVER
M: Yong Deng <yong.deng@magewell.com>
-M: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+M: Paul Kocialkowski <paulk@sys-base.io>
L: linux-media@vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media.git
@@ -832,7 +832,7 @@ F: Documentation/devicetree/bindings/media/allwinner,sun6i-a31-csi.yaml
F: drivers/media/platform/sunxi/sun6i-csi/
ALLWINNER A31 ISP DRIVER
-M: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+M: Paul Kocialkowski <paulk@sys-base.io>
L: linux-media@vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media.git
@@ -841,7 +841,7 @@ F: drivers/staging/media/sunxi/sun6i-isp/
F: drivers/staging/media/sunxi/sun6i-isp/uapi/sun6i-isp-config.h
ALLWINNER A31 MIPI CSI-2 BRIDGE DRIVER
-M: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+M: Paul Kocialkowski <paulk@sys-base.io>
L: linux-media@vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media.git
@@ -884,7 +884,7 @@ F: drivers/thermal/sun8i_thermal.c
ALLWINNER VPU DRIVER
M: Maxime Ripard <mripard@kernel.org>
-M: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+M: Paul Kocialkowski <paulk@sys-base.io>
L: linux-media@vger.kernel.org
S: Maintained
F: drivers/staging/media/sunxi/cedrus/
@@ -987,6 +987,12 @@ L: linux-edac@vger.kernel.org
S: Supported
F: drivers/ras/amd/atl/*
+AMD AE4DMA DRIVER
+M: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+L: dmaengine@vger.kernel.org
+S: Supported
+F: drivers/dma/amd/ae4dma/
+
AMD AXI W1 DRIVER
M: Kris Chaplin <kris.chaplin@amd.com>
R: Thomas Delev <thomas.delev@amd.com>
@@ -1084,7 +1090,7 @@ F: drivers/video/fbdev/geode/
AMD HSMP DRIVER
M: Naveen Krishna Chatradhi <naveenkrishna.chatradhi@amd.com>
-R: Carlos Bilbao <carlos.bilbao.osdev@gmail.com>
+R: Carlos Bilbao <carlos.bilbao@kernel.org>
L: platform-driver-x86@vger.kernel.org
S: Maintained
F: Documentation/arch/x86/amd_hsmp.rst
@@ -1148,7 +1154,7 @@ F: include/linux/pds/
AMD PMC DRIVER
M: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
L: platform-driver-x86@vger.kernel.org
-S: Maintained
+S: Supported
F: drivers/platform/x86/amd/pmc/
AMD PMF DRIVER
@@ -1179,8 +1185,8 @@ F: tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py
AMD PTDMA DRIVER
M: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
L: dmaengine@vger.kernel.org
-S: Maintained
-F: drivers/dma/ptdma/
+S: Supported
+F: drivers/dma/amd/ptdma/
AMD QDMA DRIVER
M: Nishad Saraf <nishads@amd.com>
@@ -1318,7 +1324,7 @@ W: https://ez.analog.com/linux-software-drivers
F: Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml
F: Documentation/iio/ad4695.rst
F: drivers/iio/adc/ad4695.c
-F: include/dt-bindings/iio/adi,ad4695.h
+F: include/dt-bindings/iio/adc/adi,ad4695.h
ANALOG DEVICES INC AD7091R DRIVER
M: Marcelo Schmitt <marcelo.schmitt@analog.com>
@@ -2203,7 +2209,6 @@ F: sound/soc/codecs/cs42l84.*
F: sound/soc/codecs/ssm3515.c
ARM/APPLE MACHINE SUPPORT
-M: Hector Martin <marcan@marcan.st>
M: Sven Peter <sven@svenpeter.dev>
R: Alyssa Rosenzweig <alyssa@rosenzweig.io>
L: asahi@lists.linux.dev
@@ -2309,6 +2314,15 @@ F: arch/arm64/boot/dts/bitmain/
F: drivers/clk/clk-bm1880.c
F: drivers/pinctrl/pinctrl-bm1880.c
+ARM/BLAIZE ARCHITECTURE
+M: James Cowgill <james.cowgill@blaize.com>
+M: Matt Redfearn <matt.redfearn@blaize.com>
+M: Neil Jones <neil.jones@blaize.com>
+M: Nikolaos Pasaloukos <nikolaos.pasaloukos@blaize.com>
+S: Maintained
+F: Documentation/devicetree/bindings/arm/blaize.yaml
+F: arch/arm64/boot/dts/blaize/
+
ARM/CALXEDA HIGHBANK ARCHITECTURE
M: Andre Przywara <andre.przywara@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2855,7 +2869,7 @@ ARM/NXP S32G ARCHITECTURE
R: Chester Lin <chester62515@gmail.com>
R: Matthias Brugger <mbrugger@suse.com>
R: Ghennadi Procopciuc <ghennadi.procopciuc@oss.nxp.com>
-L: NXP S32 Linux Team <s32@nxp.com>
+R: NXP S32 Linux Team <s32@nxp.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm64/boot/dts/freescale/s32g*.dts*
@@ -3055,6 +3069,7 @@ F: drivers/*/*s3c24*
F: drivers/*/*s3c64xx*
F: drivers/*/*s5pv210*
F: drivers/clocksource/samsung_pwm_timer.c
+F: drivers/mailbox/exynos-mailbox.c
F: drivers/memory/samsung/
F: drivers/pwm/pwm-samsung.c
F: drivers/soc/samsung/
@@ -3939,6 +3954,7 @@ M: Kent Overstreet <kent.overstreet@linux.dev>
L: linux-bcachefs@vger.kernel.org
S: Supported
C: irc://irc.oftc.net/bcache
+P: Documentation/filesystems/bcachefs/SubmittingPatches.rst
T: git https://evilpiepirate.org/git/bcachefs.git
F: fs/bcachefs/
F: Documentation/filesystems/bcachefs/
@@ -4086,6 +4102,7 @@ S: Supported
W: http://www.bluez.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git
+F: Documentation/ABI/stable/sysfs-class-bluetooth
F: include/net/bluetooth/
F: net/bluetooth/
@@ -4800,6 +4817,7 @@ F: drivers/scsi/mpi3mr/
BROADCOM NETXTREME-E ROCE DRIVER
M: Selvin Xavier <selvin.xavier@broadcom.com>
+M: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
L: linux-rdma@vger.kernel.org
S: Supported
W: http://www.broadcom.com
@@ -5043,7 +5061,7 @@ F: drivers/media/platform/cadence/cdns-csi2*
CADENCE NAND DRIVER
L: linux-mtd@lists.infradead.org
S: Orphan
-F: Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
+F: Documentation/devicetree/bindings/mtd/cdns,hp-nfc.yaml
F: drivers/mtd/nand/raw/cadence-nand-controller.c
CADENCE USB3 DRD IP DRIVER
@@ -5182,6 +5200,7 @@ M: Serge Hallyn <serge@hallyn.com>
L: linux-security-module@vger.kernel.org
S: Supported
F: include/linux/capability.h
+F: include/trace/events/capability.h
F: include/uapi/linux/capability.h
F: kernel/capability.c
F: security/commoncap.c
@@ -5354,6 +5373,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
F: drivers/char/
F: drivers/misc/
F: include/linux/miscdevice.h
+F: samples/rust/rust_misc_device.rs
X: drivers/char/agp/
X: drivers/char/hw_random/
X: drivers/char/ipmi/
@@ -5469,9 +5489,12 @@ F: include/linux/platform_data/cros_usbpd_notify.h
CHROMEOS EC USB TYPE-C DRIVER
M: Prashant Malani <pmalani@chromium.org>
+M: Benson Leung <bleung@chromium.org>
+M: Abhishek Pandit-Subedi <abhishekpandit@chromium.org>
L: chrome-platform@lists.linux.dev
S: Maintained
F: drivers/platform/chrome/cros_ec_typec.*
+F: drivers/platform/chrome/cros_typec_altmode.*
F: drivers/platform/chrome/cros_typec_switch.c
F: drivers/platform/chrome/cros_typec_vdm.*
@@ -5487,6 +5510,13 @@ L: chrome-platform@lists.linux.dev
S: Maintained
F: drivers/watchdog/cros_ec_wdt.c
+CHROMEOS UCSI DRIVER
+M: Abhishek Pandit-Subedi <abhishekpandit@chromium.org>
+M: Łukasz Bartosik <ukaszb@chromium.org>
+L: chrome-platform@lists.linux.dev
+S: Maintained
+F: drivers/usb/typec/ucsi/cros_ec_ucsi.c
+
CHRONTEL CH7322 CEC DRIVER
M: Joe Tessler <jrt@google.com>
L: linux-media@vger.kernel.org
@@ -5535,8 +5565,8 @@ L: patches@opensource.cirrus.com
S: Supported
W: https://github.com/CirrusLogic/linux-drivers/wiki
T: git https://github.com/CirrusLogic/linux-drivers.git
-F: drivers/firmware/cirrus/*
-F: include/linux/firmware/cirrus/*
+F: drivers/firmware/cirrus/
+F: include/linux/firmware/cirrus/
CIRRUS LOGIC EP93XX ETHERNET DRIVER
M: Hartley Sweeten <hsweeten@visionengravers.com>
@@ -5827,7 +5857,7 @@ F: drivers/usb/atm/cxacru.c
CONFIDENTIAL COMPUTING THREAT MODEL FOR X86 VIRTUALIZATION (SNP/TDX)
M: Elena Reshetova <elena.reshetova@intel.com>
-M: Carlos Bilbao <carlos.bilbao.osdev@gmail.com>
+M: Carlos Bilbao <carlos.bilbao@kernel.org>
S: Maintained
F: Documentation/security/snp-tdx-threat-model.rst
@@ -6092,6 +6122,17 @@ S: Maintained
F: Documentation/filesystems/cramfs.rst
F: fs/cramfs/
+CRC LIBRARY
+M: Eric Biggers <ebiggers@kernel.org>
+R: Ard Biesheuvel <ardb@kernel.org>
+L: linux-crypto@vger.kernel.org
+S: Maintained
+T: git https://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/linux.git crc-next
+F: Documentation/staging/crc*
+F: arch/*/lib/crc*
+F: include/linux/crc*
+F: lib/crc*
+
CREATIVE SB0540
M: Bastien Nocera <hadess@hadess.net>
L: linux-input@vger.kernel.org
@@ -6371,6 +6412,7 @@ F: Documentation/mm/damon/
F: include/linux/damon.h
F: include/trace/events/damon.h
F: mm/damon/
+F: samples/damon/
F: tools/testing/selftests/damon/
DAVICOM FAST ETHERNET (DMFE) NETWORK DRIVER
@@ -6691,19 +6733,14 @@ L: linux-rtc@vger.kernel.org
S: Maintained
F: drivers/rtc/rtc-sd2405al.c
-DH ELECTRONICS IMX6 DHCOM/DHCOR BOARD SUPPORT
+DH ELECTRONICS DHSOM SOM AND BOARD SUPPORT
M: Christoph Niedermaier <cniedermaier@dh-electronics.com>
-L: kernel@dh-electronics.com
-S: Maintained
-F: arch/arm/boot/dts/nxp/imx/imx6*-dhcom-*
-F: arch/arm/boot/dts/nxp/imx/imx6*-dhcor-*
-
-DH ELECTRONICS STM32MP1 DHCOM/DHCOR BOARD SUPPORT
M: Marek Vasut <marex@denx.de>
L: kernel@dh-electronics.com
S: Maintained
-F: arch/arm/boot/dts/st/stm32mp1*-dhcom-*
-F: arch/arm/boot/dts/st/stm32mp1*-dhcor-*
+N: dhcom
+N: dhcor
+N: dhsom
DIALOG SEMICONDUCTOR DRIVERS
M: Support Opensource <support.opensource@diasemi.com>
@@ -7062,6 +7099,7 @@ F: include/linux/component.h
DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
R: "Rafael J. Wysocki" <rafael@kernel.org>
+R: Danilo Krummrich <dakr@kernel.org>
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
F: Documentation/core-api/kobject.rst
@@ -7072,8 +7110,14 @@ F: include/linux/debugfs.h
F: include/linux/fwnode.h
F: include/linux/kobj*
F: include/linux/property.h
+F: include/linux/sysfs.h
F: lib/kobj*
F: rust/kernel/device.rs
+F: rust/kernel/device_id.rs
+F: rust/kernel/devres.rs
+F: rust/kernel/driver.rs
+F: rust/kernel/platform.rs
+F: samples/rust/rust_driver_platform.rs
DRIVERS FOR OMAP ADAPTIVE VOLTAGE SCALING (AVS)
M: Nishanth Menon <nm@ti.com>
@@ -7278,7 +7322,7 @@ F: Documentation/devicetree/bindings/display/panel/lg,sw43408.yaml
F: drivers/gpu/drm/panel/panel-lg-sw43408.c
DRM DRIVER FOR LOGICVC DISPLAY CONTROLLER
-M: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+M: Paul Kocialkowski <paulk@sys-base.io>
S: Supported
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/logicvc/
@@ -8681,6 +8725,7 @@ L: linux-kernel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/chanwoo/extcon.git
F: Documentation/devicetree/bindings/extcon/
+F: Documentation/driver-api/extcon.rst
F: Documentation/firmware-guide/acpi/extcon-intel-int3496.rst
F: drivers/extcon/
F: include/linux/extcon.h
@@ -8995,14 +9040,16 @@ L: linux-input@vger.kernel.org
S: Maintained
F: drivers/input/joystick/fsia6b.c
-FOCUSRITE SCARLETT2 MIXER DRIVER (Scarlett Gen 2+ and Clarett)
+FOCUSRITE CONTROL PROTOCOL/SCARLETT2 MIXER DRIVERS (Scarlett Gen 2+, Clarett, and Vocaster)
M: Geoffrey D. Bennett <g@b4.vu>
L: linux-sound@vger.kernel.org
S: Maintained
-W: https://github.com/geoffreybennett/scarlett-gen2
-B: https://github.com/geoffreybennett/scarlett-gen2/issues
-T: git https://github.com/geoffreybennett/scarlett-gen2.git
+W: https://github.com/geoffreybennett/linux-fcp
+B: https://github.com/geoffreybennett/linux-fcp/issues
+T: git https://github.com/geoffreybennett/linux-fcp.git
+F: include/uapi/sound/fcp.h
F: include/uapi/sound/scarlett2.h
+F: sound/usb/fcp.c
F: sound/usb/mixer_scarlett2.c
FORCEDETH GIGABIT ETHERNET DRIVER
@@ -9371,7 +9418,7 @@ F: fs/freevxfs/
FREEZER
M: "Rafael J. Wysocki" <rafael@kernel.org>
-M: Pavel Machek <pavel@ucw.cz>
+M: Pavel Machek <pavel@kernel.org>
L: linux-pm@vger.kernel.org
S: Supported
F: Documentation/power/freezing-of-tasks.rst
@@ -9594,6 +9641,13 @@ W: https://linuxtv.org
T: git git://linuxtv.org/media.git
F: drivers/media/radio/radio-gemtek*
+GENDWARFKSYMS
+M: Sami Tolvanen <samitolvanen@google.com>
+L: linux-modules@vger.kernel.org
+L: linux-kbuild@vger.kernel.org
+S: Maintained
+F: scripts/gendwarfksyms/
+
GENERIC ARCHITECTURE TOPOLOGY
M: Sudeep Holla <sudeep.holla@arm.com>
L: linux-kernel@vger.kernel.org
@@ -9799,9 +9853,12 @@ F: drivers/firmware/google/
GOOGLE TENSOR SoC SUPPORT
M: Peter Griffin <peter.griffin@linaro.org>
+R: André Draszik <andre.draszik@linaro.org>
+R: Tudor Ambarus <tudor.ambarus@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org
S: Maintained
+C: irc://irc.oftc.net/pixel6-kernel-dev
F: Documentation/devicetree/bindings/clock/google,gs101-clock.yaml
F: arch/arm64/boot/dts/exynos/google/
F: drivers/clk/samsung/clk-gs101.c
@@ -9821,7 +9878,7 @@ S: Maintained
F: drivers/staging/gpib/
GPIO ACPI SUPPORT
-M: Mika Westerberg <mika.westerberg@linux.intel.com>
+M: Mika Westerberg <westeri@kernel.org>
M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
L: linux-gpio@vger.kernel.org
L: linux-acpi@vger.kernel.org
@@ -10076,7 +10133,8 @@ F: include/trace/events/handshake.h
F: net/handshake/
HANTRO VPU CODEC DRIVER
-M: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
+M: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+M: Benjamin Gaignard <benjamin.gaignard@collabora.com>
M: Philipp Zabel <p.zabel@pengutronix.de>
L: linux-media@vger.kernel.org
L: linux-rockchip@lists.infradead.org
@@ -10195,7 +10253,7 @@ F: drivers/video/fbdev/hgafb.c
HIBERNATION (aka Software Suspend, aka swsusp)
M: "Rafael J. Wysocki" <rafael@kernel.org>
-M: Pavel Machek <pavel@ucw.cz>
+M: Pavel Machek <pavel@kernel.org>
L: linux-pm@vger.kernel.org
S: Supported
B: https://bugzilla.kernel.org
@@ -10716,10 +10774,8 @@ F: Documentation/devicetree/bindings/bus/microsoft,vmbus.yaml
F: Documentation/networking/device_drivers/ethernet/microsoft/netvsc.rst
F: Documentation/virt/hyperv
F: arch/arm64/hyperv
-F: arch/arm64/include/asm/hyperv-tlfs.h
F: arch/arm64/include/asm/mshyperv.h
F: arch/x86/hyperv
-F: arch/x86/include/asm/hyperv-tlfs.h
F: arch/x86/include/asm/mshyperv.h
F: arch/x86/include/asm/trace/hyperv.h
F: arch/x86/kernel/cpu/mshyperv.c
@@ -10735,9 +10791,13 @@ F: drivers/pci/controller/pci-hyperv.c
F: drivers/scsi/storvsc_drv.c
F: drivers/uio/uio_hv_generic.c
F: drivers/video/fbdev/hyperv_fb.c
-F: include/asm-generic/hyperv-tlfs.h
F: include/asm-generic/mshyperv.h
F: include/clocksource/hyperv_timer.h
+F: include/hyperv/hvgdk.h
+F: include/hyperv/hvgdk_ext.h
+F: include/hyperv/hvgdk_mini.h
+F: include/hyperv/hvhdk.h
+F: include/hyperv/hvhdk_mini.h
F: include/linux/hyperv.h
F: include/net/mana
F: include/uapi/linux/hyperv.h
@@ -11271,7 +11331,7 @@ S: Orphan
F: drivers/video/fbdev/imsttfb.c
INDEX OF FURTHER KERNEL DOCUMENTATION
-M: Carlos Bilbao <carlos.bilbao.osdev@gmail.com>
+M: Carlos Bilbao <carlos.bilbao@kernel.org>
S: Maintained
F: Documentation/process/kernel-docs.rst
@@ -11485,9 +11545,8 @@ F: drivers/mfd/intel_pmc_bxt.c
F: include/linux/mfd/intel_pmc_bxt.h
INTEL C600 SERIES SAS CONTROLLER DRIVER
-M: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
L: linux-scsi@vger.kernel.org
-S: Supported
+S: Orphan
T: git git://git.code.sf.net/p/intel-sas/isci
F: drivers/scsi/isci/
@@ -12661,8 +12720,8 @@ F: arch/arm64/include/asm/kvm*
F: arch/arm64/include/uapi/asm/kvm*
F: arch/arm64/kvm/
F: include/kvm/arm_*
-F: tools/testing/selftests/kvm/*/aarch64/
-F: tools/testing/selftests/kvm/aarch64/
+F: tools/testing/selftests/kvm/*/arm64/
+F: tools/testing/selftests/kvm/arm64/
KERNEL VIRTUAL MACHINE FOR LOONGARCH (KVM/LoongArch)
M: Tianrui Zhao <zhaotianrui@loongson.cn>
@@ -12733,8 +12792,8 @@ F: arch/s390/kvm/
F: arch/s390/mm/gmap.c
F: drivers/s390/char/uvdevice.c
F: tools/testing/selftests/drivers/s390x/uvdevice/
-F: tools/testing/selftests/kvm/*/s390x/
-F: tools/testing/selftests/kvm/s390x/
+F: tools/testing/selftests/kvm/*/s390/
+F: tools/testing/selftests/kvm/s390/
KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86)
M: Sean Christopherson <seanjc@google.com>
@@ -12751,8 +12810,8 @@ F: arch/x86/include/uapi/asm/svm.h
F: arch/x86/include/uapi/asm/vmx.h
F: arch/x86/kvm/
F: arch/x86/kvm/*/
-F: tools/testing/selftests/kvm/*/x86_64/
-F: tools/testing/selftests/kvm/x86_64/
+F: tools/testing/selftests/kvm/*/x86/
+F: tools/testing/selftests/kvm/x86/
KERNFS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -13065,8 +13124,8 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/har
F: scripts/leaking_addresses.pl
LED SUBSYSTEM
-M: Pavel Machek <pavel@ucw.cz>
M: Lee Jones <lee@kernel.org>
+M: Pavel Machek <pavel@kernel.org>
L: linux-leds@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lee/leds.git
@@ -14565,6 +14624,14 @@ W: https://linuxtv.org
T: git git://linuxtv.org/media.git
F: drivers/media/dvb-frontends/stv6111*
+MEDIA DRIVERS FOR STM32 - CSI
+M: Alain Volmat <alain.volmat@foss.st.com>
+L: linux-media@vger.kernel.org
+S: Supported
+T: git git://linuxtv.org/media_tree.git
+F: Documentation/devicetree/bindings/media/st,stm32mp25-csi.yaml
+F: drivers/media/platform/st/stm32/stm32-csi.c
+
MEDIA DRIVERS FOR STM32 - DCMI / DCMIPP
M: Hugues Fruchet <hugues.fruchet@foss.st.com>
M: Alain Volmat <alain.volmat@foss.st.com>
@@ -15139,7 +15206,15 @@ L: linux-mm@kvack.org
S: Maintained
W: http://www.linux-mm.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
+F: mm/mlock.c
F: mm/mmap.c
+F: mm/mprotect.c
+F: mm/mremap.c
+F: mm/mseal.c
+F: mm/vma.c
+F: mm/vma.h
+F: mm/vma_internal.h
+F: tools/testing/vma/
MEMORY TECHNOLOGY DEVICES (MTD)
M: Miquel Raynal <miquel.raynal@bootlin.com>
@@ -16210,6 +16285,7 @@ F: drivers/scsi/sun3_scsi_vme.c
NCSI LIBRARY
M: Samuel Mendoza-Jonas <sam@mendozajonas.com>
+R: Paul Fertser <fercerpav@gmail.com>
S: Maintained
F: net/ncsi/
@@ -16386,6 +16462,22 @@ F: include/net/dsa.h
F: net/dsa/
F: tools/testing/selftests/drivers/net/dsa/
+NETWORKING [ETHTOOL]
+M: Andrew Lunn <andrew@lunn.ch>
+M: Jakub Kicinski <kuba@kernel.org>
+F: Documentation/netlink/specs/ethtool.yaml
+F: Documentation/networking/ethtool-netlink.rst
+F: include/linux/ethtool*
+F: include/uapi/linux/ethtool*
+F: net/ethtool/
+F: tools/testing/selftests/drivers/net/*/ethtool*
+
+NETWORKING [ETHTOOL CABLE TEST]
+M: Andrew Lunn <andrew@lunn.ch>
+F: net/ethtool/cabletest.c
+F: tools/testing/selftests/drivers/net/*/ethtool*
+K: cable_test
+
NETWORKING [GENERAL]
M: "David S. Miller" <davem@davemloft.net>
M: Eric Dumazet <edumazet@google.com>
@@ -16544,6 +16636,8 @@ F: tools/testing/selftests/net/mptcp/
NETWORKING [TCP]
M: Eric Dumazet <edumazet@google.com>
+M: Neal Cardwell <ncardwell@google.com>
+R: Kuniyuki Iwashima <kuniyu@amazon.com>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/networking/net_cachelines/tcp_sock.rst
@@ -16571,6 +16665,31 @@ F: include/net/tls.h
F: include/uapi/linux/tls.h
F: net/tls/*
+NETWORKING [SOCKETS]
+M: Eric Dumazet <edumazet@google.com>
+M: Kuniyuki Iwashima <kuniyu@amazon.com>
+M: Paolo Abeni <pabeni@redhat.com>
+M: Willem de Bruijn <willemb@google.com>
+S: Maintained
+F: include/linux/sock_diag.h
+F: include/linux/socket.h
+F: include/linux/sockptr.h
+F: include/net/sock.h
+F: include/net/sock_reuseport.h
+F: include/uapi/linux/socket.h
+F: net/core/*sock*
+F: net/core/scm.c
+F: net/socket.c
+
+NETWORKING [UNIX SOCKETS]
+M: Kuniyuki Iwashima <kuniyu@amazon.com>
+S: Maintained
+F: include/net/af_unix.h
+F: include/net/netns/unix.h
+F: include/uapi/linux/unix_diag.h
+F: net/unix/
+F: tools/testing/selftests/net/af_unix/
+
NETXEN (1/10) GbE SUPPORT
M: Manish Chopra <manishc@marvell.com>
M: Rahul Verma <rahulv@marvell.com>
@@ -16672,8 +16791,8 @@ F: arch/nios2/
NITRO ENCLAVES (NE)
M: Alexandru Ciobotaru <alcioa@amazon.com>
+R: The AWS Nitro Enclaves Team <aws-nitro-enclaves-devel@amazon.com>
L: linux-kernel@vger.kernel.org
-L: The AWS Nitro Enclaves Team <aws-nitro-enclaves-devel@amazon.com>
S: Supported
W: https://aws.amazon.com/ec2/nitro/nitro-enclaves/
F: Documentation/virt/ne_overview.rst
@@ -16684,8 +16803,8 @@ F: samples/nitro_enclaves/
NITRO SECURE MODULE (NSM)
M: Alexander Graf <graf@amazon.com>
+R: The AWS Nitro Enclaves Team <aws-nitro-enclaves-devel@amazon.com>
L: linux-kernel@vger.kernel.org
-L: The AWS Nitro Enclaves Team <aws-nitro-enclaves-devel@amazon.com>
S: Supported
W: https://aws.amazon.com/ec2/nitro/nitro-enclaves/
F: drivers/misc/nsm.c
@@ -16704,7 +16823,7 @@ F: include/linux/tick.h
F: kernel/time/tick*.*
NOKIA N900 CAMERA SUPPORT (ET8EK8 SENSOR, AD5820 FOCUS)
-M: Pavel Machek <pavel@ucw.cz>
+M: Pavel Machek <pavel@kernel.org>
M: Sakari Ailus <sakari.ailus@iki.fi>
L: linux-media@vger.kernel.org
S: Maintained
@@ -16781,6 +16900,15 @@ T: git https://github.com/Paragon-Software-Group/linux-ntfs3.git
F: Documentation/filesystems/ntfs3.rst
F: fs/ntfs3/
+NTSYNC SYNCHRONIZATION PRIMITIVE DRIVER
+M: Elizabeth Figura <zfigura@codeweavers.com>
+L: wine-devel@winehq.org
+S: Supported
+F: Documentation/userspace-api/ntsync.rst
+F: drivers/misc/ntsync.c
+F: include/uapi/linux/ntsync.h
+F: tools/testing/selftests/drivers/ntsync/
+
NUBUS SUBSYSTEM
M: Finn Thain <fthain@linux-m68k.org>
L: linux-m68k@lists.linux-m68k.org
@@ -16862,6 +16990,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/srini/nvmem.git
F: Documentation/ABI/stable/sysfs-bus-nvmem
F: Documentation/devicetree/bindings/nvmem/
F: drivers/nvmem/
+F: include/dt-bindings/nvmem/
F: include/linux/nvmem-consumer.h
F: include/linux/nvmem-provider.h
@@ -17569,6 +17698,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
F: Documentation/ABI/testing/sysfs-firmware-ofw
F: drivers/of/
F: include/linux/of*.h
+F: rust/kernel/of.rs
F: scripts/dtc/
F: tools/testing/selftests/dt/
K: of_overlay_notifier_
@@ -17625,6 +17755,7 @@ L: netdev@vger.kernel.org
L: dev@openvswitch.org
S: Maintained
W: http://openvswitch.org
+F: Documentation/networking/openvswitch.rst
F: include/uapi/linux/openvswitch.h
F: net/openvswitch/
F: tools/testing/selftests/net/openvswitch/
@@ -17975,7 +18106,7 @@ M: Karthikeyan Mitran <m.karthikeyan@mobiveil.co.in>
M: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
L: linux-pci@vger.kernel.org
S: Supported
-F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
+F: Documentation/devicetree/bindings/pci/mbvl,gpex40-pcie.yaml
F: drivers/pci/controller/mobiveil/pcie-mobiveil*
PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
@@ -17999,7 +18130,6 @@ M: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-F: Documentation/devicetree/bindings/pci/layerscape-pcie-gen4.txt
F: drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
PCI DRIVER FOR PLDA PCIE IP
@@ -18077,7 +18207,7 @@ F: Documentation/PCI/endpoint/*
F: Documentation/misc-devices/pci-endpoint-test.rst
F: drivers/misc/pci_endpoint_test.c
F: drivers/pci/endpoint/
-F: tools/pci/
+F: tools/testing/selftests/pci_endpoint/
PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC
M: Mahesh J Salgaonkar <mahesh@linux.ibm.com>
@@ -18170,6 +18300,8 @@ F: include/asm-generic/pci*
F: include/linux/of_pci.h
F: include/linux/pci*
F: include/uapi/linux/pci*
+F: rust/kernel/pci.rs
+F: samples/rust/rust_driver_pci.rs
PCIE BANDWIDTH CONTROLLER
M: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
@@ -18498,8 +18630,8 @@ M: Fabio Estevam <festevam@gmail.com>
M: Shawn Guo <shawnguo@kernel.org>
M: Jacky Bai <ping.bai@nxp.com>
R: Pengutronix Kernel Team <kernel@pengutronix.de>
+R: NXP S32 Linux Team <s32@nxp.com>
L: linux-gpio@vger.kernel.org
-L: NXP S32 Linux Team <s32@nxp.com>
S: Maintained
F: Documentation/devicetree/bindings/pinctrl/fsl,*
F: Documentation/devicetree/bindings/pinctrl/nxp,s32*
@@ -18783,11 +18915,13 @@ L: linuxpps@ml.enneenne.com (subscribers-only)
S: Maintained
W: http://wiki.enneenne.com/index.php/LinuxPPS_support
F: Documentation/ABI/testing/sysfs-pps
+F: Documentation/ABI/testing/sysfs-pps-gen
F: Documentation/devicetree/bindings/pps/pps-gpio.yaml
F: Documentation/driver-api/pps.rst
F: drivers/pps/
F: include/linux/pps*.h
F: include/uapi/linux/pps.h
+F: include/uapi/linux/pps_gen.h
PRESSURE STALL INFORMATION (PSI)
M: Johannes Weiner <hannes@cmpxchg.org>
@@ -19362,7 +19496,7 @@ F: drivers/misc/fastrpc.c
F: include/uapi/misc/fastrpc.h
QUALCOMM HEXAGON ARCHITECTURE
-M: Brian Cain <bcain@quicinc.com>
+M: Brian Cain <brian.cain@oss.qualcomm.com>
L: linux-hexagon@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bcain/linux.git
@@ -19643,7 +19777,7 @@ F: drivers/ras/amd/fmpm.c
RASPBERRY PI PISP BACK END
M: Jacopo Mondi <jacopo.mondi@ideasonboard.com>
-L: Raspberry Pi Kernel Maintenance <kernel-list@raspberrypi.com>
+R: Raspberry Pi Kernel Maintenance <kernel-list@raspberrypi.com>
L: linux-media@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/media/raspberrypi,pispbe.yaml
@@ -19766,6 +19900,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rcu/linux.git rcu/dev
F: Documentation/RCU/
F: include/linux/rcu*
F: kernel/rcu/
+F: rust/kernel/sync/rcu.rs
X: Documentation/RCU/torture.rst
X: include/linux/srcu*.h
X: kernel/rcu/srcu*.c
@@ -20152,7 +20287,7 @@ F: net/rfkill/
RHASHTABLE
M: Thomas Graf <tgraf@suug.ch>
M: Herbert Xu <herbert@gondor.apana.org.au>
-L: netdev@vger.kernel.org
+L: linux-crypto@vger.kernel.org
S: Maintained
F: include/linux/rhashtable-types.h
F: include/linux/rhashtable.h
@@ -20259,6 +20394,15 @@ F: drivers/perf/riscv_pmu.c
F: drivers/perf/riscv_pmu_legacy.c
F: drivers/perf/riscv_pmu_sbi.c
+RISC-V SPACEMIT SoC Support
+M: Yixun Lan <dlan@gentoo.org>
+L: linux-riscv@lists.infradead.org
+S: Maintained
+T: git https://github.com/spacemit-com/linux
+F: arch/riscv/boot/dts/spacemit/
+N: spacemit
+K: spacemit
+
RISC-V THEAD SoC SUPPORT
M: Drew Fustini <drew@pdp7.com>
M: Guo Ren <guoren@kernel.org>
@@ -20369,6 +20513,11 @@ L: linux-serial@vger.kernel.org
S: Odd Fixes
F: drivers/tty/serial/rp2.*
+ROHM BD79703 DAC
+M: Matti Vaittinen <mazziesaccount@gmail.com>
+S: Supported
+F: drivers/iio/dac/rohm-bd79703.c
+
ROHM BD99954 CHARGER IC
M: Matti Vaittinen <mazziesaccount@gmail.com>
S: Supported
@@ -20397,7 +20546,6 @@ ROHM BU270xx LIGHT SENSOR DRIVERs
M: Matti Vaittinen <mazziesaccount@gmail.com>
L: linux-iio@vger.kernel.org
S: Supported
-F: drivers/iio/light/rohm-bu27008.c
F: drivers/iio/light/rohm-bu27034.c
ROHM MULTIFUNCTION BD9571MWV-M PMIC DEVICE DRIVERS
@@ -20739,8 +20887,7 @@ F: arch/s390/include/uapi/asm/zcrypt.h
F: drivers/s390/crypto/
S390 ZFCP DRIVER
-M: Steffen Maier <maier@linux.ibm.com>
-M: Benjamin Block <bblock@linux.ibm.com>
+M: Nihar Panda <niharp@linux.ibm.com>
L: linux-s390@vger.kernel.org
S: Supported
F: drivers/s390/scsi/zfcp_*
@@ -20795,6 +20942,15 @@ F: arch/arm64/boot/dts/exynos/exynos850*
F: drivers/clk/samsung/clk-exynos850.c
F: include/dt-bindings/clock/exynos850.h
+SAMSUNG EXYNOS MAILBOX DRIVER
+M: Tudor Ambarus <tudor.ambarus@linaro.org>
+L: linux-kernel@vger.kernel.org
+L: linux-samsung-soc@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/mailbox/google,gs101-mbox.yaml
+F: drivers/mailbox/exynos-mailbox.c
+F: include/linux/mailbox/exynos-message.h
+
SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER
M: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-crypto@vger.kernel.org
@@ -21321,6 +21477,7 @@ M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
L: linux-sound@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/slimbus/
+F: Documentation/driver-api/slimbus.rst
F: drivers/slimbus/
F: include/linux/slimbus.h
@@ -22101,7 +22258,7 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
F: drivers/media/dvb-frontends/sp2*
SPANISH DOCUMENTATION
-M: Carlos Bilbao <carlos.bilbao.osdev@gmail.com>
+M: Carlos Bilbao <carlos.bilbao@kernel.org>
R: Avadhut Naik <avadhut.naik@amd.com>
S: Maintained
F: Documentation/translations/sp_SP/
@@ -22251,6 +22408,14 @@ T: git git://linuxtv.org/media.git
F: Documentation/devicetree/bindings/media/i2c/st,st-mipid02.yaml
F: drivers/media/i2c/st-mipid02.c
+ST STC3117 FUEL GAUGE DRIVER
+M: Hardevsinh Palaniya <hardevsinh.palaniya@siliconsignals.io>
+M: Bhavin Sharma <bhavin.sharma@siliconsignals.io>
+L: linux-pm@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/power/supply/st,stc3117.yaml
+F: drivers/power/supply/stc3117_fuel_gauge.c
+
ST STM32 FIREWALL
M: Gatien Chevallier <gatien.chevallier@foss.st.com>
S: Maintained
@@ -22395,7 +22560,6 @@ F: drivers/phy/starfive/phy-jh7110-dphy-rx.c
STARFIVE JH7110 DPHY TX DRIVER
M: Keith Zhao <keith.zhao@starfivetech.com>
-M: Shengyang Chen <shengyang.chen@starfivetech.com>
S: Supported
F: Documentation/devicetree/bindings/phy/starfive,jh7110-dphy-tx.yaml
F: drivers/phy/starfive/phy-jh7110-dphy-tx.c
@@ -22685,7 +22849,7 @@ F: drivers/sh/
SUSPEND TO RAM
M: "Rafael J. Wysocki" <rafael@kernel.org>
M: Len Brown <len.brown@intel.com>
-M: Pavel Machek <pavel@ucw.cz>
+M: Pavel Machek <pavel@kernel.org>
L: linux-pm@vger.kernel.org
S: Supported
B: https://bugzilla.kernel.org
@@ -23630,6 +23794,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux.git
F: Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml
F: drivers/pmdomain/ti/omap_prm.c
F: drivers/soc/ti/*
+F: include/linux/pruss_driver.h
TI LM49xxx FAMILY ASoC CODEC DRIVERS
M: M R Swami Reddy <mr.swami.reddy@ti.com>
@@ -24524,7 +24689,11 @@ L: linux-media@vger.kernel.org
S: Maintained
W: http://www.ideasonboard.org/uvc/
T: git git://linuxtv.org/media.git
+F: Documentation/userspace-api/media/drivers/uvcvideo.rst
+F: Documentation/userspace-api/media/v4l/metafmt-uvc.rst
+F: drivers/media/common/uvc.c
F: drivers/media/usb/uvc/
+F: include/linux/usb/uvc.h
F: include/uapi/linux/uvcvideo.h
USB WEBCAM GADGET
@@ -25109,21 +25278,6 @@ F: include/uapi/linux/vsockmon.h
F: net/vmw_vsock/
F: tools/testing/vsock/
-VMA
-M: Andrew Morton <akpm@linux-foundation.org>
-M: Liam R. Howlett <Liam.Howlett@oracle.com>
-M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
-R: Vlastimil Babka <vbabka@suse.cz>
-R: Jann Horn <jannh@google.com>
-L: linux-mm@kvack.org
-S: Maintained
-W: https://www.linux-mm.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
-F: mm/vma.c
-F: mm/vma.h
-F: mm/vma_internal.h
-F: tools/testing/vma/
-
VMALLOC
M: Andrew Morton <akpm@linux-foundation.org>
R: Uladzislau Rezki <urezki@gmail.com>
@@ -25628,11 +25782,13 @@ F: arch/x86/entry/vdso/
XARRAY
M: Matthew Wilcox <willy@infradead.org>
L: linux-fsdevel@vger.kernel.org
+L: linux-mm@kvack.org
S: Supported
F: Documentation/core-api/xarray.rst
F: include/linux/idr.h
F: include/linux/xarray.h
F: lib/idr.c
+F: lib/test_xarray.c
F: lib/xarray.c
F: tools/testing/radix-tree
@@ -26112,7 +26268,7 @@ K: zstd
ZSWAP COMPRESSED SWAP CACHING
M: Johannes Weiner <hannes@cmpxchg.org>
-M: Yosry Ahmed <yosryahmed@google.com>
+M: Yosry Ahmed <yosry.ahmed@linux.dev>
M: Nhat Pham <nphamcs@gmail.com>
R: Chengming Zhou <chengming.zhou@linux.dev>
L: linux-mm@kvack.org
diff --git a/Makefile b/Makefile
index 4117cc79748b..89628e354ca7 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
-PATCHLEVEL = 13
+PATCHLEVEL = 14
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc2
NAME = Baby Opossum Posse
# *DOCUMENTATION*
diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
index 4d7c46f50382..50c82187e60e 100644
--- a/arch/alpha/include/asm/elf.h
+++ b/arch/alpha/include/asm/elf.h
@@ -74,7 +74,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
-#define elf_check_arch(x) ((x)->e_machine == EM_ALPHA)
+#define elf_check_arch(x) (((x)->e_machine == EM_ALPHA) && !((x)->e_flags & EF_ALPHA_32BIT))
/*
* These are used to set parameters in the core dumps.
@@ -137,10 +137,6 @@ extern int dump_elf_task(elf_greg_t *dest, struct task_struct *task);
: amask (AMASK_CIX) ? "ev6" : "ev67"); \
})
-#define SET_PERSONALITY(EX) \
- set_personality(((EX).e_flags & EF_ALPHA_32BIT) \
- ? PER_LINUX_32BIT : PER_LINUX)
-
extern int alpha_l1i_cacheshape;
extern int alpha_l1d_cacheshape;
extern int alpha_l2_cacheshape;
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
index 635f0a5f5bbd..02e8817a8921 100644
--- a/arch/alpha/include/asm/pgtable.h
+++ b/arch/alpha/include/asm/pgtable.h
@@ -360,7 +360,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
extern void paging_init(void);
-/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */
+/* We have our own get_unmapped_area */
#define HAVE_ARCH_UNMAPPED_AREA
#endif /* _ALPHA_PGTABLE_H */
diff --git a/arch/alpha/include/asm/processor.h b/arch/alpha/include/asm/processor.h
index 55bb1c09fd39..5dce5518a211 100644
--- a/arch/alpha/include/asm/processor.h
+++ b/arch/alpha/include/asm/processor.h
@@ -8,23 +8,19 @@
#ifndef __ASM_ALPHA_PROCESSOR_H
#define __ASM_ALPHA_PROCESSOR_H
-#include <linux/personality.h> /* for ADDR_LIMIT_32BIT */
-
/*
* We have a 42-bit user address space: 4TB user VM...
*/
#define TASK_SIZE (0x40000000000UL)
-#define STACK_TOP \
- (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL)
+#define STACK_TOP (0x00120000000UL)
#define STACK_TOP_MAX 0x00120000000UL
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
-#define TASK_UNMAPPED_BASE \
- ((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : TASK_SIZE / 2)
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 2)
/* This is dead. Everything has been moved to thread_info. */
struct thread_struct { };
diff --git a/arch/alpha/kernel/core_cia.c b/arch/alpha/kernel/core_cia.c
index ca3d9c732b61..6e577228e175 100644
--- a/arch/alpha/kernel/core_cia.c
+++ b/arch/alpha/kernel/core_cia.c
@@ -331,10 +331,7 @@ cia_prepare_tbia_workaround(int window)
long i;
/* Use minimal 1K map. */
- ppte = memblock_alloc(CIA_BROKEN_TBIA_SIZE, 32768);
- if (!ppte)
- panic("%s: Failed to allocate %u bytes align=0x%x\n",
- __func__, CIA_BROKEN_TBIA_SIZE, 32768);
+ ppte = memblock_alloc_or_panic(CIA_BROKEN_TBIA_SIZE, 32768);
pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1;
for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i)
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index b22248044bf0..b1bfbd11980d 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -81,10 +81,7 @@ mk_resource_name(int pe, int port, char *str)
char *name;
sprintf(tmp, "PCI %s PE %d PORT %d", str, pe, port);
- name = memblock_alloc(strlen(tmp) + 1, SMP_CACHE_BYTES);
- if (!name)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- strlen(tmp) + 1);
+ name = memblock_alloc_or_panic(strlen(tmp) + 1, SMP_CACHE_BYTES);
strcpy(name, tmp);
return name;
@@ -119,10 +116,7 @@ alloc_io7(unsigned int pe)
return NULL;
}
- io7 = memblock_alloc(sizeof(*io7), SMP_CACHE_BYTES);
- if (!io7)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(*io7));
+ io7 = memblock_alloc_or_panic(sizeof(*io7), SMP_CACHE_BYTES);
io7->pe = pe;
raw_spin_lock_init(&io7->irq_lock);
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 86185021f75a..a08e8edef1a4 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1210,8 +1210,7 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
return ret;
}
-/* Get an address range which is currently unmapped. Similar to the
- generic version except that we know how to honor ADDR_LIMIT_32BIT. */
+/* Get an address range which is currently unmapped. */
static unsigned long
arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
@@ -1230,13 +1229,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags, vm_flags_t vm_flags)
{
- unsigned long limit;
-
- /* "32 bit" actually means 31 bit, since pointers sign extend. */
- if (current->personality & ADDR_LIMIT_32BIT)
- limit = 0x80000000;
- else
- limit = TASK_SIZE;
+ unsigned long limit = TASK_SIZE;
if (len > limit)
return -ENOMEM;
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index 4458eb7f44f0..8e9b4ac86b7e 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -391,10 +391,7 @@ alloc_pci_controller(void)
{
struct pci_controller *hose;
- hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES);
- if (!hose)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(*hose));
+ hose = memblock_alloc_or_panic(sizeof(*hose), SMP_CACHE_BYTES);
*hose_tail = hose;
hose_tail = &hose->next;
@@ -405,13 +402,7 @@ alloc_pci_controller(void)
struct resource * __init
alloc_resource(void)
{
- void *ptr = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
-
- if (!ptr)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(struct resource));
-
- return ptr;
+ return memblock_alloc_or_panic(sizeof(struct resource), SMP_CACHE_BYTES);
}
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index 7fcf3e9b7103..681f56089d9c 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -71,14 +71,8 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
if (align < mem_size)
align = mem_size;
- arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
- if (!arena)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(*arena));
- arena->ptes = memblock_alloc(mem_size, align);
- if (!arena->ptes)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, mem_size, align);
+ arena = memblock_alloc_or_panic(sizeof(*arena), SMP_CACHE_BYTES);
+ arena->ptes = memblock_alloc_or_panic(mem_size, align);
spin_lock_init(&arena->lock);
arena->hose = hose;
diff --git a/arch/alpha/lib/fpreg.c b/arch/alpha/lib/fpreg.c
index 9a238e7536ae..3d32165043f8 100644
--- a/arch/alpha/lib/fpreg.c
+++ b/arch/alpha/lib/fpreg.c
@@ -10,7 +10,6 @@
#include <linux/preempt.h>
#include <asm/fpu.h>
#include <asm/thread_info.h>
-#include <asm/fpu.h>
#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
#define STT(reg,val) asm volatile ("ftoit $f"#reg",%0" : "=r"(val));
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index 4fe618446e4c..61c2198b1359 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -42,7 +42,7 @@ pgd_alloc(struct mm_struct *mm)
{
pgd_t *ret, *init;
- ret = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+ ret = __pgd_alloc(mm, 0);
init = pgd_offset(&init_mm, 0UL);
if (ret) {
#ifdef CONFIG_ALPHA_LARGE_VMALLOC
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 62da5827f471..f27e6b90428e 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -18,6 +18,7 @@ config ARC
select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC
select ARCH_32BIT_OFF_T
select BUILDTIME_TABLE_SORT
+ select GENERIC_BUILTIN_DTB
select CLONE_BACKWARDS
select COMMON_CLK
select DMA_DIRECT_REMAP
@@ -550,11 +551,11 @@ config ARC_DBG_JUMP_LABEL
part of static keys (jump labels) related code.
endif
-config ARC_BUILTIN_DTB_NAME
+config BUILTIN_DTB_NAME
string "Built in DTB"
+ default "nsim_700"
help
- Set the name of the DTB to embed in the vmlinux binary
- Leaving it blank selects the "nsim_700" dtb.
+ Set the name of the DTB to embed in the vmlinux binary.
endmenu # "ARC Architecture Configuration"
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index fb98478ed1ab..0c5e6e6314f2 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -82,9 +82,6 @@ KBUILD_CFLAGS += $(cflags-y)
KBUILD_AFLAGS += $(KBUILD_CFLAGS)
KBUILD_LDFLAGS += $(ldflags-y)
-# w/o this dtb won't embed into kernel binary
-core-y += arch/arc/boot/dts/
-
core-y += arch/arc/plat-sim/
core-$(CONFIG_ARC_PLAT_TB10X) += arch/arc/plat-tb10x/
core-$(CONFIG_ARC_PLAT_AXS10X) += arch/arc/plat-axs10x/
diff --git a/arch/arc/boot/dts/Makefile b/arch/arc/boot/dts/Makefile
index 48704dfdf75c..ee5664f0640d 100644
--- a/arch/arc/boot/dts/Makefile
+++ b/arch/arc/boot/dts/Makefile
@@ -1,13 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-# Built-in dtb
-builtindtb-y := nsim_700
-ifneq ($(CONFIG_ARC_BUILTIN_DTB_NAME),)
- builtindtb-y := $(CONFIG_ARC_BUILTIN_DTB_NAME)
-endif
-
-obj-y += $(builtindtb-y).dtb.o
-dtb-y := $(builtindtb-y).dtb
+dtb-y := $(addsuffix .dtb, $(CONFIG_BUILTIN_DTB_NAME))
# for CONFIG_OF_ALL_DTBS test
dtb- := $(patsubst $(src)/%.dts,%.dtb, $(wildcard $(src)/*.dts))
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index 319bbe270322..a7cd526dd7ca 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -23,7 +23,7 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_ARC_PLAT_AXS10X=y
CONFIG_AXS101=y
CONFIG_ARC_CACHE_LINE_SHIFT=5
-CONFIG_ARC_BUILTIN_DTB_NAME="axs101"
+CONFIG_BUILTIN_DTB_NAME="axs101"
CONFIG_PREEMPT=y
# CONFIG_COMPACTION is not set
CONFIG_NET=y
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index 8c1f1a111a17..afa6a348f444 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -22,7 +22,7 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_ARC_PLAT_AXS10X=y
CONFIG_AXS103=y
CONFIG_ISA_ARCV2=y
-CONFIG_ARC_BUILTIN_DTB_NAME="axs103"
+CONFIG_BUILTIN_DTB_NAME="axs103"
CONFIG_PREEMPT=y
# CONFIG_COMPACTION is not set
CONFIG_NET=y
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index 75cab9f25b5b..2bfa6371953c 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -22,7 +22,7 @@ CONFIG_ARC_PLAT_AXS10X=y
CONFIG_AXS103=y
CONFIG_ISA_ARCV2=y
CONFIG_SMP=y
-CONFIG_ARC_BUILTIN_DTB_NAME="axs103_idu"
+CONFIG_BUILTIN_DTB_NAME="axs103_idu"
CONFIG_PREEMPT=y
# CONFIG_COMPACTION is not set
CONFIG_NET=y
diff --git a/arch/arc/configs/haps_hs_defconfig b/arch/arc/configs/haps_hs_defconfig
index 8c3ed5d6e6c3..3a1577112078 100644
--- a/arch/arc/configs/haps_hs_defconfig
+++ b/arch/arc/configs/haps_hs_defconfig
@@ -14,7 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
-CONFIG_ARC_BUILTIN_DTB_NAME="haps_hs"
+CONFIG_BUILTIN_DTB_NAME="haps_hs"
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_COMPACTION is not set
diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig
index 6fc98c1b9b36..a3cf940b1f5b 100644
--- a/arch/arc/configs/haps_hs_smp_defconfig
+++ b/arch/arc/configs/haps_hs_smp_defconfig
@@ -16,7 +16,7 @@ CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SMP=y
-CONFIG_ARC_BUILTIN_DTB_NAME="haps_hs_idu"
+CONFIG_BUILTIN_DTB_NAME="haps_hs_idu"
CONFIG_KPROBES=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
index 9e79154b5535..1558e8e87767 100644
--- a/arch/arc/configs/hsdk_defconfig
+++ b/arch/arc/configs/hsdk_defconfig
@@ -20,7 +20,7 @@ CONFIG_ISA_ARCV2=y
CONFIG_SMP=y
CONFIG_LINUX_LINK_BASE=0x90000000
CONFIG_LINUX_RAM_BASE=0x80000000
-CONFIG_ARC_BUILTIN_DTB_NAME="hsdk"
+CONFIG_BUILTIN_DTB_NAME="hsdk"
CONFIG_PREEMPT=y
# CONFIG_COMPACTION is not set
CONFIG_NET=y
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
index 51092c39e360..f8b3235d9a65 100644
--- a/arch/arc/configs/nsim_700_defconfig
+++ b/arch/arc/configs/nsim_700_defconfig
@@ -17,7 +17,7 @@ CONFIG_PERF_EVENTS=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_ISA_ARCOMPACT=y
-CONFIG_ARC_BUILTIN_DTB_NAME="nsim_700"
+CONFIG_BUILTIN_DTB_NAME="nsim_700"
CONFIG_KPROBES=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index 70c17bca4939..ee45dc0877fb 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -19,7 +19,7 @@ CONFIG_ISA_ARCOMPACT=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci"
+CONFIG_BUILTIN_DTB_NAME="nsimosci"
# CONFIG_COMPACTION is not set
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index 59a3b6642fe7..e0a309970c20 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -19,7 +19,7 @@ CONFIG_KPROBES=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_ISA_ARCV2=y
-CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci_hs"
+CONFIG_BUILTIN_DTB_NAME="nsimosci_hs"
# CONFIG_COMPACTION is not set
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index 1419fc946a08..88325b8b49cf 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -16,7 +16,7 @@ CONFIG_MODULES=y
CONFIG_ISA_ARCV2=y
CONFIG_SMP=y
# CONFIG_ARC_TIMERS_64BIT is not set
-CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci_hs_idu"
+CONFIG_BUILTIN_DTB_NAME="nsimosci_hs_idu"
CONFIG_PREEMPT=y
# CONFIG_COMPACTION is not set
CONFIG_NET=y
diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
index 5aba3d850fa2..865fbc19ef03 100644
--- a/arch/arc/configs/tb10x_defconfig
+++ b/arch/arc/configs/tb10x_defconfig
@@ -26,7 +26,7 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_ARC_PLAT_TB10X=y
CONFIG_ARC_CACHE_LINE_SHIFT=5
CONFIG_HZ=250
-CONFIG_ARC_BUILTIN_DTB_NAME="abilis_tb100_dvk"
+CONFIG_BUILTIN_DTB_NAME="abilis_tb100_dvk"
CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_COMPACTION is not set
CONFIG_NET=y
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig
index 50c343913825..03d9ac20baa9 100644
--- a/arch/arc/configs/vdk_hs38_defconfig
+++ b/arch/arc/configs/vdk_hs38_defconfig
@@ -13,7 +13,7 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_ARC_PLAT_AXS10X=y
CONFIG_AXS103=y
CONFIG_ISA_ARCV2=y
-CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38"
+CONFIG_BUILTIN_DTB_NAME="vdk_hs38"
CONFIG_PREEMPT=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
index 6d9e1d9f71d2..c09488992f13 100644
--- a/arch/arc/configs/vdk_hs38_smp_defconfig
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -15,7 +15,7 @@ CONFIG_AXS103=y
CONFIG_ISA_ARCV2=y
CONFIG_SMP=y
# CONFIG_ARC_TIMERS_64BIT is not set
-CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp"
+CONFIG_BUILTIN_DTB_NAME="vdk_hs38_smp"
CONFIG_PREEMPT=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
index 096b8ef58edb..dfae070fe8d5 100644
--- a/arch/arc/include/asm/pgalloc.h
+++ b/arch/arc/include/asm/pgalloc.h
@@ -53,19 +53,14 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte_
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- pgd_t *ret = (pgd_t *) __get_free_page(GFP_KERNEL);
+ pgd_t *ret = __pgd_alloc(mm, 0);
if (ret) {
int num, num2;
- num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
- memzero(ret, num * sizeof(pgd_t));
+ num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
num2 = VMALLOC_SIZE / PGDIR_SIZE;
memcpy(ret + num, swapper_pg_dir + num, num2 * sizeof(pgd_t));
-
- memzero(ret + num + num2,
- (PTRS_PER_PGD - num - num2) * sizeof(pgd_t));
-
}
return ret;
}
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
index d2f5ceaaed1b..3b2d8b1bd271 100644
--- a/arch/arc/kernel/unaligned.c
+++ b/arch/arc/kernel/unaligned.c
@@ -200,7 +200,6 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
struct callee_regs *cregs)
{
struct disasm_state state;
- char buf[TASK_COMM_LEN];
/* handle user mode only and only if enabled by sysadmin */
if (!user_mode(regs) || !unaligned_enabled)
@@ -212,11 +211,11 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
" performance significantly\n. To enable further"
" logging of such instances, please \n"
" echo 0 > /proc/sys/kernel/ignore-unaligned-usertrap\n",
- get_task_comm(buf, current), task_pid_nr(current));
+ current->comm, task_pid_nr(current));
} else {
/* Add rate limiting if it gets down to it */
pr_warn("%s(%d): unaligned access to/from 0x%lx by PC: 0x%lx\n",
- get_task_comm(buf, current), task_pid_nr(current),
+ current->comm, task_pid_nr(current),
address, regs->ret);
}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 202397be76d8..835b5f100e92 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -5,8 +5,11 @@ config ARM
select ARCH_32BIT_OFF_T
select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE if HAVE_KRETPROBES && FRAME_POINTER && !ARM_UNWIND
select ARCH_HAS_BINFMT_FLAT
+ select ARCH_HAS_CACHE_LINE_SIZE if OF
select ARCH_HAS_CPU_CACHE_ALIASING
select ARCH_HAS_CPU_FINALIZE_INIT if MMU
+ select ARCH_HAS_CRC32 if KERNEL_MODE_NEON
+ select ARCH_HAS_CRC_T10DIF if KERNEL_MODE_NEON
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DMA_ALLOC if MMU
@@ -1751,5 +1754,3 @@ config ARCH_HIBERNATION_POSSIBLE
default y if ARCH_SUSPEND_POSSIBLE
endmenu
-
-source "arch/arm/Kconfig.assembler"
diff --git a/arch/arm/Kconfig.assembler b/arch/arm/Kconfig.assembler
deleted file mode 100644
index 5cb31aae1188..000000000000
--- a/arch/arm/Kconfig.assembler
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-config AS_VFP_VMRS_FPINST
- def_bool $(as-instr,.fpu vfpv2\nvmrs r0$(comma)FPINST)
- help
- Supported by binutils >= 2.24 and LLVM integrated assembler.
diff --git a/arch/arm/boot/dts/allwinner/suniv-f1c100s-licheepi-nano.dts b/arch/arm/boot/dts/allwinner/suniv-f1c100s-licheepi-nano.dts
index 43896723a994..472ded0aafcf 100644
--- a/arch/arm/boot/dts/allwinner/suniv-f1c100s-licheepi-nano.dts
+++ b/arch/arm/boot/dts/allwinner/suniv-f1c100s-licheepi-nano.dts
@@ -62,6 +62,14 @@
status = "okay";
};
+&codec {
+ allwinner,audio-routing =
+ "Headphone", "HP",
+ "Headphone", "HPCOM",
+ "MIC", "Mic";
+ status = "okay";
+};
+
&usb_otg {
dr_mode = "otg";
status = "okay";
diff --git a/arch/arm/boot/dts/allwinner/suniv-f1c100s.dtsi b/arch/arm/boot/dts/allwinner/suniv-f1c100s.dtsi
index 3c61d59ab5f8..e4b41bc93852 100644
--- a/arch/arm/boot/dts/allwinner/suniv-f1c100s.dtsi
+++ b/arch/arm/boot/dts/allwinner/suniv-f1c100s.dtsi
@@ -6,6 +6,7 @@
#include <dt-bindings/clock/suniv-ccu-f1c100s.h>
#include <dt-bindings/reset/suniv-ccu-f1c100s.h>
+#include <dt-bindings/dma/sun4i-a10.h>
/ {
#address-cells = <1>;
@@ -159,6 +160,15 @@
status = "disabled";
};
+ dma: dma-controller@1c02000 {
+ compatible = "allwinner,suniv-f1c100s-dma";
+ reg = <0x01c02000 0x1000>;
+ interrupts = <18>;
+ clocks = <&ccu CLK_BUS_DMA>;
+ resets = <&ccu RST_BUS_DMA>;
+ #dma-cells = <2>;
+ };
+
ccu: clock@1c20000 {
compatible = "allwinner,suniv-f1c100s-ccu";
reg = <0x01c20000 0x400>;
@@ -326,5 +336,19 @@
resets = <&ccu RST_BUS_UART2>;
status = "disabled";
};
+
+ codec: codec@1c23c00 {
+ #sound-dai-cells = <0>;
+ compatible = "allwinner,suniv-f1c100s-codec";
+ reg = <0x01c23c00 0x400>;
+ interrupts = <21>;
+ clocks = <&ccu CLK_BUS_CODEC>, <&ccu CLK_CODEC>;
+ clock-names = "apb", "codec";
+ dmas = <&dma SUN4I_DMA_NORMAL 12>,
+ <&dma SUN4I_DMA_NORMAL 12>;
+ dma-names = "rx", "tx";
+ resets = <&ccu RST_BUS_CODEC>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm/boot/dts/amlogic/meson.dtsi b/arch/arm/boot/dts/amlogic/meson.dtsi
index 8cb0fc78b2af..28ec2c821cdc 100644
--- a/arch/arm/boot/dts/amlogic/meson.dtsi
+++ b/arch/arm/boot/dts/amlogic/meson.dtsi
@@ -255,8 +255,6 @@
usb0: usb@c9040000 {
compatible = "snps,dwc2";
- #address-cells = <1>;
- #size-cells = <0>;
reg = <0xc9040000 0x40000>;
interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usb0_phy>;
@@ -270,8 +268,6 @@
usb1: usb@c90c0000 {
compatible = "snps,dwc2";
- #address-cells = <1>;
- #size-cells = <0>;
reg = <0xc90c0000 0x40000>;
interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usb1_phy>;
diff --git a/arch/arm/boot/dts/aspeed/Makefile b/arch/arm/boot/dts/aspeed/Makefile
index c4f064e4b073..2e5f4833a073 100644
--- a/arch/arm/boot/dts/aspeed/Makefile
+++ b/arch/arm/boot/dts/aspeed/Makefile
@@ -6,6 +6,7 @@ dtb-$(CONFIG_ARCH_ASPEED) += \
aspeed-bmc-amd-daytonax.dtb \
aspeed-bmc-amd-ethanolx.dtb \
aspeed-bmc-ampere-mtjade.dtb \
+ aspeed-bmc-ampere-mtjefferson.dtb \
aspeed-bmc-ampere-mtmitchell.dtb \
aspeed-bmc-arm-stardragon4800-rep2.dtb \
aspeed-bmc-asrock-e3c246d4i.dtb \
@@ -40,6 +41,7 @@ dtb-$(CONFIG_ARCH_ASPEED) += \
aspeed-bmc-ibm-rainier.dtb \
aspeed-bmc-ibm-rainier-1s4u.dtb \
aspeed-bmc-ibm-rainier-4u.dtb \
+ aspeed-bmc-ibm-sbp1.dtb \
aspeed-bmc-ibm-system1.dtb \
aspeed-bmc-intel-s2600wf.dtb \
aspeed-bmc-inspur-fp5280g2.dtb \
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtjefferson.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtjefferson.dts
new file mode 100644
index 000000000000..c435359a4bd9
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtjefferson.dts
@@ -0,0 +1,622 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2024 Ampere Computing LLC.
+
+/dts-v1/;
+
+#include "aspeed-g6.dtsi"
+#include <dt-bindings/i2c/i2c.h>
+#include <dt-bindings/gpio/aspeed-gpio.h>
+
+/ {
+ model = "Ampere Mt. Jefferson BMC";
+ compatible = "ampere,mtjefferson-bmc", "aspeed,ast2600";
+
+ aliases {
+ i2c20 = &i2c4_bus70_chn0;
+ i2c22 = &i2c4_bus70_chn2;
+
+ /*
+ * I2C OCP alias port
+ */
+ i2c30 = &ocpslot;
+
+ /*
+ * I2C NVMe alias port
+ */
+ i2c48 = &nvmeslot_0;
+ i2c49 = &nvmeslot_1;
+ i2c50 = &nvmeslot_2;
+ i2c51 = &nvmeslot_3;
+ i2c52 = &nvmeslot_4;
+ i2c53 = &nvmeslot_5;
+ i2c54 = &nvmeslot_6;
+ i2c55 = &nvmeslot_7;
+ i2c56 = &nvmeslot_8;
+ i2c57 = &nvmeslot_9;
+ i2c58 = &nvmeslot_10;
+ i2c59 = &nvmeslot_11;
+ };
+
+ chosen {
+ stdout-path = &uart5;
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x80000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ gfx_memory: framebuffer {
+ size = <0x01000000>;
+ alignment = <0x01000000>;
+ compatible = "shared-dma-pool";
+ reusable;
+ };
+
+ video_engine_memory: video {
+ size = <0x04000000>;
+ alignment = <0x01000000>;
+ compatible = "shared-dma-pool";
+ reusable;
+ };
+
+ vga_memory: region@bf000000 {
+ no-map;
+ compatible = "shared-dma-pool";
+ reg = <0xbf000000 0x01000000>; /* 16M */
+ };
+ };
+
+ voltage_mon_reg: voltage-mon-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "ltc2497_reg";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ led-bmc-ready {
+ gpios = <&gpio0 ASPEED_GPIO(W, 5) (GPIO_ACTIVE_HIGH | GPIO_TRANSITORY)>;
+ };
+
+ led-sw-heartbeat {
+ gpios = <&gpio0 ASPEED_GPIO(N, 3) GPIO_ACTIVE_HIGH>;
+ };
+
+ led-identify {
+ gpios = <&gpio0 ASPEED_GPIO(S, 3) GPIO_ACTIVE_HIGH>;
+ };
+
+ led-fault {
+ gpios = <&gpio0 ASPEED_GPIO(P, 4) GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&adc0 0>, <&adc0 1>, <&adc0 2>,
+ <&adc_i2c_2 0>, <&adc_i2c_2 1>,
+ <&adc_i2c_2 2>, <&adc_i2c_2 3>,
+ <&adc_i2c_2 4>, <&adc_i2c_2 5>,
+ <&adc_i2c_2 6>, <&adc_i2c_2 7>,
+ <&adc_i2c_2 8>, <&adc_i2c_2 9>,
+ <&adc_i2c_2 10>, <&adc_i2c_2 11>,
+ <&adc_i2c_2 12>, <&adc_i2c_2 13>,
+ <&adc_i2c_2 14>, <&adc_i2c_2 15>,
+ <&adc_i2c_0 0>, <&adc_i2c_0 1>,
+ <&adc_i2c_0 2>, <&adc_i2c_0 3>,
+ <&adc_i2c_0 4>, <&adc_i2c_0 5>,
+ <&adc_i2c_0 6>, <&adc_i2c_0 7>,
+ <&adc_i2c_0 8>, <&adc_i2c_0 9>,
+ <&adc_i2c_0 10>, <&adc_i2c_0 11>,
+ <&adc_i2c_0 12>;
+ };
+};
+
+&mdio0 {
+ status = "okay";
+
+ ethphy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ };
+};
+
+&mac0 {
+ status = "okay";
+
+ phy-mode = "rgmii";
+ phy-handle = <&ethphy0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rgmii1_default>;
+};
+
+&mac3 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rmii4_default>;
+ use-ncsi;
+};
+
+&fmc {
+ status = "okay";
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "bmc";
+ spi-max-frequency = <50000000>;
+#include "openbmc-flash-layout-64.dtsi"
+ };
+
+ flash@1 {
+ status = "okay";
+ m25p,fast-read;
+ label = "alt-bmc";
+ spi-max-frequency = <50000000>;
+#include "openbmc-flash-layout-64-alt.dtsi"
+ };
+};
+
+&spi1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1_default>;
+
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "pnor";
+ spi-max-frequency = <20000000>;
+ };
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+};
+
+&i2c1 {
+ status = "okay";
+};
+
+&i2c2 {
+ status = "okay";
+};
+
+&i2c3 {
+ status = "okay";
+ bus-frequency = <1000000>;
+ multi-master;
+ mctp-controller;
+
+ mctp@10 {
+ compatible = "mctp-i2c-controller";
+ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>;
+ };
+};
+
+&i2c4 {
+ status = "okay";
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ pagesize = <32>;
+ };
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9545";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x70>;
+ i2c-mux-idle-disconnect;
+
+ i2c4_bus70_chn0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0>;
+
+ eeprom@52 {
+ compatible = "atmel,24c256";
+ reg = <0x52>;
+ pagesize = <32>;
+ };
+ temperature-sensor@48 {
+ compatible = "ti,tmp75";
+ reg = <0x48>;
+ };
+ temperature-sensor@49 {
+ compatible = "ti,tmp75";
+ reg = <0x49>;
+ };
+ temperature-sensor@4a{
+ compatible = "ti,tmp75";
+ reg = <0x4a>;
+ };
+ temperature-sensor@4b {
+ compatible = "ti,tmp464";
+ reg = <0x4b>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@0 {
+ reg = <0x0>;
+ status = "disabled";
+ };
+ channel@1 {
+ reg = <0x1>;
+ status = "disabled";
+ };
+ channel@2 {
+ reg = <0x2>;
+ status = "disabled";
+ };
+ channel@3 {
+ reg = <0x3>;
+ status = "disabled";
+ };
+ channel@4 {
+ reg = <0x4>;
+ };
+ };
+ temperature-sensor@4d {
+ compatible = "ti,tmp75";
+ reg = <0x4d>;
+ };
+ temperature-sensor@4e {
+ compatible = "ti,tmp75";
+ reg = <0x4e>;
+ };
+ temperature-sensor@4f {
+ compatible = "ti,tmp75";
+ reg = <0x4f>;
+ };
+ temperature-sensor@28 {
+ compatible = "nuvoton,nct7802";
+ reg = <0x28>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ channel@1 { /* RTD1 */
+ reg = <1>;
+ sensor-type = "temperature";
+ temperature-mode = "thermistor";
+ };
+ };
+ adc_i2c_0: adc@14 {
+ compatible = "lltc,ltc2497";
+ reg = <0x14>;
+ vref-supply = <&voltage_mon_reg>;
+ #io-channel-cells = <1>;
+ };
+ };
+
+ i2c4_bus70_chn2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x2>;
+
+ adc_i2c_2: adc@14 {
+ compatible = "lltc,ltc2497";
+ reg = <0x14>;
+ vref-supply = <&voltage_mon_reg>;
+ #io-channel-cells = <1>;
+ };
+ };
+ };
+};
+
+&i2c5 {
+ status = "okay";
+ i2c-mux@70 {
+ compatible = "nxp,pca9548";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x70>;
+ i2c-mux-idle-disconnect;
+
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0>;
+
+ i2c-mux@71 {
+ compatible = "nxp,pca9548";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x71>;
+ i2c-mux-idle-disconnect;
+
+ nvmeslot_8: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0>;
+ };
+ nvmeslot_9: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x1>;
+ };
+ nvmeslot_10: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x2>;
+ };
+ nvmeslot_11: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x3>;
+ };
+ };
+
+ i2c-mux@72 {
+ compatible = "nxp,pca9548";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x72>;
+ i2c-mux-idle-disconnect;
+
+ nvmeslot_4: i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x4>;
+ };
+ nvmeslot_5: i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x5>;
+ };
+ nvmeslot_6: i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x6>;
+ };
+ nvmeslot_7: i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x7>;
+ };
+ };
+
+ i2c-mux@74 {
+ compatible = "nxp,pca9548";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x74>;
+ i2c-mux-idle-disconnect;
+
+ ocpslot: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0>;
+
+ ocpslot_temp: temperature-sensor@1f {
+ compatible = "ti,tmp421";
+ reg = <0x1f>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@0 {
+ reg = <0x0>;
+ status = "disabled";
+ };
+ channel@1 {
+ reg = <0x1>;
+ };
+ };
+ };
+
+ nvmeslot_0: i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x4>;
+ };
+ nvmeslot_1: i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x5>;
+ };
+ nvmeslot_2: i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x6>;
+ };
+ nvmeslot_3: i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x7>;
+ };
+ };
+ };
+ };
+};
+
+&i2c6 {
+ status = "okay";
+
+ rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ };
+};
+
+&i2c7 {
+ status = "okay";
+
+ temperature-sensor@4f {
+ compatible = "ti,tmp75";
+ reg = <0x4f>;
+ };
+};
+
+&i2c8 {
+ status = "okay";
+
+ fan-controller@5c {
+ compatible = "onnn,adt7462";
+ reg = <0x5c>;
+ };
+};
+
+&i2c9 {
+ status = "okay";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+ };
+
+ eeprom@52 {
+ compatible = "atmel,24c02";
+ reg = <0x52>;
+ };
+
+ temperature-sensor@18 {
+ compatible = "jedec,jc-42.4-temp";
+ reg = <0x18>;
+ };
+
+ temperature-sensor@1a {
+ compatible = "jedec,jc-42.4-temp";
+ reg = <0x1a>;
+ };
+};
+
+&i2c10 {
+ status = "okay";
+};
+
+&i2c11 {
+ status = "okay";
+ ssif-bmc@10 {
+ compatible = "ssif-bmc";
+ reg = <0x10>;
+ };
+};
+
+&i2c14 {
+ status = "okay";
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ pagesize = <32>;
+ };
+
+ bmc_ast2600_cpu: temperature-sensor@48 {
+ compatible = "ti,tmp75";
+ reg = <0x48>;
+ };
+};
+
+&i2c15 {
+ status = "okay";
+ gpio_expander1: gpio-expander@22 {
+ compatible = "nxp,pca9535";
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names =
+ "presence-ocp1","presence-ocp2",
+ "","",
+ "","",
+ "","",
+ "","",
+ "","",
+ "","",
+ "","";
+ };
+};
+
+&adc0 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_adc0_default &pinctrl_adc1_default
+ &pinctrl_adc2_default>;
+};
+
+&vhub {
+ status = "okay";
+};
+
+&video {
+ status = "okay";
+ memory-region = <&video_engine_memory>;
+};
+
+&gpio0 {
+ gpio-line-names =
+ /*A0-A7*/ "","","","","cpu-type-detect","i2c2-reset-n","i2c6-reset-n","i2c5-reset-n",
+ /*B0-B7*/ "","","","","host0-sysreset-n","host0-pmin-n","fru-rd-complete",
+ "chassis-id-sel",
+ /*C0-C7*/ "s0-vrd-fault-n","","bmc-debug-mode","","cpld-3v3-irq-n","","vrd-sel",
+ "spd-sel",
+ /*D0-D7*/ "presence-ps0","presence-ps1","hsc-12vmain-alt2-n","ext-high-temp-n",
+ "","","","",
+ /*E0-E7*/ "eth-phy-rst-n","eth-phy-int-n","","","","","","",
+ /*F0-F7*/ "s0-pcp-oc-warn-n","","power-chassis-control",
+ "cpu-bios-recover","s0-heartbeat","hs-scout-proc-hot","s0-vr-hot-n","",
+ /*G0-G7*/ "","","hsc-12vmain-alt1-n","","","bp-cpld-program-en","led-fp-sta-gr",
+ "led-fp-sta-amb",
+ /*H0-H7*/ "jtag-program-sel","jtag-cmpl2","wd-disable-n","power-chassis-good","","",
+ "","",
+ /*I0-I7*/ "","","","","","","power-button","rtc-battery-voltage-read-enable",
+ /*J0-J7*/ "","","","","","","","",
+ /*K0-K7*/ "","","","","","","","",
+ /*L0-L7*/ "","","","","reset-button","","","",
+ /*M0-M7*/ "nmi-n","s0-ddr-save","soc-spi-nor-access","presence-cpu0","s0-rtc-lock",
+ "","","",
+ /*N0-N7*/ "hpm-fw-recovery","hpm-stby-rst-n","jtag-sel-s0","led-sw-hb",
+ "jtag-dbgr-prsnt-n","","","",
+ /*O0-O7*/ "","","","","","","","",
+ /*P0-P7*/ "ps0-ac-loss-n","ps1-ac-loss-n","","","led-fault","user-mode","jtag-srst-n",
+ "led-bmc-hb",
+ /*Q0-Q7*/ "","","","","","","","",
+ /*R0-R7*/ "","","","","","","","",
+ /*S0-S7*/ "","","identify-button","led-identify","","spi-nor-access","host0-ready","",
+ /*T0-T7*/ "","","","","","","","",
+ /*U0-U7*/ "","","","","","","","",
+ /*V0-V7*/ "s0-hightemp-n","s0-fault-alert","s0-sys-auth-failure-n",
+ "host0-reboot-ack-n","s0-fw-boot-ok","host0-shd-req-n",
+ "host0-shd-ack-n","s0-overtemp-n",
+ /*W0-W7*/ "ocp-aux-pwren","ocp-main-pwren","ocp-pgood","",
+ "bmc-ok","bmc-ready","spi0-program-sel","spi0-backup-sel",
+ /*X0-X7*/ "","","","","","","","",
+ /*Y0-Y7*/ "","","","vrd-prg-en-n","","","","host0-special-boot",
+ /*Z0-Z7*/ "","ps0-pgood","ps1-pgood","","","","","";
+
+ ocp-aux-pwren-hog {
+ gpio-hog;
+ gpios = <ASPEED_GPIO(W, 0) GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "ocp-aux-pwren";
+ };
+
+};
+
+&gpio1 {
+ gpio-line-names =
+ /*18A0-18A7*/ "","","","","","","","",
+ /*18B0-18B7*/ "","","","","s0-soc-pgood","vga-ft-press-n","emmc-rst-n","s01-uart1-sel",
+ /*18C0-18C7*/ "uart1-mode0","uart1-mode1","uart2-mode0","uart2-mode1",
+ "","","","",
+ /*18D0-18D7*/ "","","","","","","","",
+ /*18E0-18E3*/ "","","","";
+};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtmitchell.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtmitchell.dts
index 0295f5adcfbc..2b336aa0146d 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtmitchell.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtmitchell.dts
@@ -677,6 +677,12 @@
#size-cells = <0>;
#gpio-cells = <2>;
+ gpio-line-names =
+ "ext-vref-sel","","presence-hdd-bp5-n","presence-hdd-bp6-n",
+ "","bmc-riser-en-n","bmc-ocp1-en-n","bmc-ocp0-en-n",
+ "","","","",
+ "","","","";
+
bmc-ocp0-en-hog {
gpio-hog;
gpios = <7 GPIO_ACTIVE_LOW>;
@@ -684,6 +690,16 @@
line-name = "bmc-ocp0-en-n";
};
};
+
+ fan-controller0@20 {
+ compatible = "maxim,max31790";
+ reg = <0x20>;
+ };
+
+ fan-controller1@2f {
+ compatible = "maxim,max31790";
+ reg = <0x2f>;
+ };
};
&i2c9 {
@@ -958,7 +974,7 @@
"fan-fault","psu-fault",
"","",
"","",
- "","",
+ "gpi0","gpi1",
"","",
"","",
"","",
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-catalina.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-catalina.dts
index 82835e96317d..c151984289bc 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-catalina.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-catalina.dts
@@ -50,14 +50,14 @@
i2c45 = &i2c0mux5ch1;
i2c46 = &i2c0mux5ch2;
i2c47 = &i2c0mux5ch3;
- i2c48 = &i2c30mux0ch0;
- i2c49 = &i2c30mux0ch1;
- i2c50 = &i2c30mux0ch2;
- i2c51 = &i2c30mux0ch3;
- i2c52 = &i2c30mux0ch4;
- i2c53 = &i2c30mux0ch5;
- i2c54 = &i2c30mux0ch6;
- i2c55 = &i2c30mux0ch7;
+ i2c48 = &i2c5mux0ch0;
+ i2c49 = &i2c5mux0ch1;
+ i2c50 = &i2c5mux0ch2;
+ i2c51 = &i2c5mux0ch3;
+ i2c52 = &i2c5mux0ch4;
+ i2c53 = &i2c5mux0ch5;
+ i2c54 = &i2c5mux0ch6;
+ i2c55 = &i2c5mux0ch7;
};
chosen {
@@ -153,6 +153,13 @@
status = "okay";
};
+&mac2 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ncsi3_default>;
+ use-ncsi;
+};
+
&mac3 {
status = "okay";
pinctrl-names = "default";
@@ -185,6 +192,7 @@
reg = <0x71>;
#address-cells = <1>;
#size-cells = <0>;
+ i2c-mux-idle-disconnect;
i2c0mux0ch0: i2c@0 {
#address-cells = <1>;
@@ -213,6 +221,7 @@
reg = <0x72>;
#address-cells = <1>;
#size-cells = <0>;
+ i2c-mux-idle-disconnect;
i2c0mux1ch0: i2c@0 {
#address-cells = <1>;
@@ -242,79 +251,6 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <2>;
- i2c-mux@70 {
- compatible = "nxp,pca9548";
- reg = <0x70>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- i2c30mux0ch0: i2c@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
- };
- i2c30mux0ch1: i2c@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
- };
- i2c30mux0ch2: i2c@2 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <2>;
- };
- i2c30mux0ch3: i2c@3 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <3>;
- };
- i2c30mux0ch4: i2c@4 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <4>;
- };
- i2c30mux0ch5: i2c@5 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <5>;
- };
- i2c30mux0ch6: i2c@6 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <6>;
- // HDD FRU EEPROM
- eeprom@52 {
- compatible = "atmel,24c64";
- reg = <0x52>;
- };
- };
- i2c30mux0ch7: i2c@7 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <7>;
-
- power-sensor@40 {
- compatible = "ti,ina230";
- reg = <0x40>;
- shunt-resistor = <2000>;
- };
- power-sensor@41 {
- compatible = "ti,ina230";
- reg = <0x41>;
- shunt-resistor = <2000>;
- };
- power-sensor@44 {
- compatible = "ti,ina230";
- reg = <0x44>;
- shunt-resistor = <2000>;
- };
- power-sensor@45 {
- compatible = "ti,ina230";
- reg = <0x45>;
- shunt-resistor = <2000>;
- };
- };
- };
};
i2c0mux1ch3: i2c@3 {
#address-cells = <1>;
@@ -328,6 +264,7 @@
reg = <0x73>;
#address-cells = <1>;
#size-cells = <0>;
+ i2c-mux-idle-disconnect;
i2c0mux2ch0: i2c@0 {
#address-cells = <1>;
@@ -356,6 +293,7 @@
reg = <0x75>;
#address-cells = <1>;
#size-cells = <0>;
+ i2c-mux-idle-disconnect;
i2c0mux3ch0: i2c@0 {
#address-cells = <1>;
@@ -384,6 +322,7 @@
reg = <0x76>;
#address-cells = <1>;
#size-cells = <0>;
+ i2c-mux-idle-disconnect;
i2c0mux4ch0: i2c@0 {
#address-cells = <1>;
@@ -426,6 +365,7 @@
reg = <0x77>;
#address-cells = <1>;
#size-cells = <0>;
+ i2c-mux-idle-disconnect;
i2c0mux5ch0: i2c@0 {
#address-cells = <1>;
@@ -512,12 +452,12 @@
power-monitor@42 {
compatible = "lltc,ltc4287";
reg = <0x42>;
- shunt-resistor-micro-ohms = <200>;
+ shunt-resistor-micro-ohms = <100>;
};
power-monitor@43 {
compatible = "lltc,ltc4287";
reg = <0x43>;
- shunt-resistor-micro-ohms = <200>;
+ shunt-resistor-micro-ohms = <100>;
};
};
i2c1mux0ch5: i2c@5 {
@@ -593,8 +533,6 @@
reg = <0x20>;
gpio-controller;
#gpio-cells = <2>;
- interrupt-parent = <&gpio0>;
- interrupts = <ASPEED_GPIO(B, 4) IRQ_TYPE_LEVEL_LOW>;
};
// Module 1 IOEXP
@@ -603,8 +541,6 @@
reg = <0x21>;
gpio-controller;
#gpio-cells = <2>;
- interrupt-parent = <&gpio0>;
- interrupts = <ASPEED_GPIO(B, 4) IRQ_TYPE_LEVEL_LOW>;
};
// HMC IOEXP
@@ -613,8 +549,6 @@
reg = <0x27>;
gpio-controller;
#gpio-cells = <2>;
- interrupt-parent = <&gpio0>;
- interrupts = <ASPEED_GPIO(B, 4) IRQ_TYPE_LEVEL_LOW>;
};
// Module 0 EEPROM
@@ -640,6 +574,81 @@
&i2c5 {
status = "okay";
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9548";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ i2c5mux0ch0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+ i2c5mux0ch1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+ i2c5mux0ch2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+ i2c5mux0ch3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ i2c5mux0ch4: i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+ };
+ i2c5mux0ch5: i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+ };
+ i2c5mux0ch6: i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <6>;
+ // HDD FRU EEPROM
+ eeprom@52 {
+ compatible = "atmel,24c64";
+ reg = <0x52>;
+ };
+ };
+ i2c5mux0ch7: i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
+
+ power-sensor@40 {
+ compatible = "ti,ina230";
+ reg = <0x40>;
+ shunt-resistor = <2000>;
+ };
+ power-sensor@41 {
+ compatible = "ti,ina230";
+ reg = <0x41>;
+ shunt-resistor = <2000>;
+ };
+ power-sensor@44 {
+ compatible = "ti,ina230";
+ reg = <0x44>;
+ shunt-resistor = <2000>;
+ };
+ power-sensor@45 {
+ compatible = "ti,ina230";
+ reg = <0x45>;
+ shunt-resistor = <2000>;
+ };
+ };
+ };
};
&i2c6 {
@@ -834,9 +843,9 @@
};
// OCP NIC1 FRU EEPROM
- eeprom@50 {
+ eeprom@52 {
compatible = "atmel,24c64";
- reg = <0x50>;
+ reg = <0x52>;
};
};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-harma.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-harma.dts
index cf3f807a38fe..9cb511a846e3 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-harma.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-harma.dts
@@ -135,10 +135,6 @@
use-ncsi;
};
-&rtc {
- status = "okay";
-};
-
&fmc {
status = "okay";
@@ -397,12 +393,6 @@
reg = <0x31>;
gpio-controller;
#gpio-cells = <2>;
-
- gpio-line-names =
- "","","","",
- "","","presence-cmm","",
- "","","","",
- "","","","";
};
// PTTV FRU
@@ -426,12 +416,6 @@
reg = <0x31>;
gpio-controller;
#gpio-cells = <2>;
-
- gpio-line-names =
- "","","","",
- "","","presence-cmm","",
- "","","","",
- "","","","";
};
// Aegis FRU
@@ -506,6 +490,11 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <3>;
+
+ rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ };
};
};
};
@@ -565,7 +554,7 @@
/*B0-B7*/ "","","","",
"bmc-spi-mux-select-0","led-identify","","",
/*C0-C7*/ "reset-cause-platrst","","","","",
- "cpu0-err-alert","","",
+ "power-hsc-good","power-chassis-good","",
/*D0-D7*/ "","","sol-uart-select","","","","","",
/*E0-E7*/ "","","","","","","","",
/*F0-F7*/ "","","","","","","","",
@@ -584,14 +573,16 @@
/*O0-O7*/ "","","","","","","","",
/*P0-P7*/ "power-button","power-host-control",
"reset-button","","led-power","","","",
- /*Q0-Q7*/ "","","","","","power-chassis-control","","",
+ /*Q0-Q7*/
+ "","","","",
+ "","power-chassis-control","","uart-switch-button",
/*R0-R7*/ "","","","","","","","",
/*S0-S7*/ "","","","","","","","",
/*T0-T7*/ "","","","","","","","",
/*U0-U7*/ "","","","","","","led-identify-gate","",
/*V0-V7*/ "","","","",
"rtc-battery-voltage-read-enable","",
- "power-chassis-good","",
+ "","",
/*W0-W7*/ "","","","","","","","",
/*X0-X7*/ "","","","","","","","",
/*Y0-Y7*/ "","","","","","","","",
@@ -672,7 +663,7 @@
"presence-asic-modules-0","rt-cpu0-p1-force-enable",
"presence-asic-modules-1","bios-debug-msg-disable",
"","uart-control-buffer-select",
- "","ac-control-n",
+ "presence-cmm","ac-control-n",
/*G0-G3 line 96-103*/
"FM_CPU_CORETYPE2","",
"FM_CPU_CORETYPE1","",
@@ -684,7 +675,7 @@
"FM_BOARD_REV_ID2","",
"FM_BOARD_REV_ID1","",
/*H0-H3 line 112-119*/
- "FM_BOARD_REV_ID0","",
+ "FM_BOARD_REV_ID0","reset-control-cmos-clear",
"","","","","","",
/*H4-H7 line 120-127*/
"","",
@@ -699,7 +690,7 @@
/*I4-I7 line 136-143*/
"","","","","","","","",
/*J0-J3 line 144-151*/
- "","","","","","","","",
+ "","","power-card-enable","","","","","",
/*J4-J7 line 152-159*/
"SLOT_ID_BCB_0","",
"SLOT_ID_BCB_1","",
@@ -715,9 +706,15 @@
"cpu0-thermtrip-alert","",
"reset-cause-pcie","",
/*L4-L7 line 184-191*/
- "pvdd11-ocp-alert","","","","","","","",
+ "pvdd11-ocp-alert","",
+ "power-fault-n","",
+ "asic0-card-type-detection0-n","",
+ "asic0-card-type-detection1-n","",
/*M0-M3 line 192-199*/
- "","","","","","","","",
+ "asic0-card-type-detection2-n","",
+ "uart-switch-lsb","",
+ "uart-switch-msb","",
+ "","",
/*M4-M7 line 200-207*/
"","","","","","","","",
/*N0-N3 line 208-215*/
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-minerva.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-minerva.dts
index 41e2246cfbd1..ef96b17becb2 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-minerva.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-minerva.dts
@@ -23,6 +23,32 @@
i2c19 = &imux19;
i2c20 = &imux20;
i2c21 = &imux21;
+ i2c22 = &imux22;
+ i2c23 = &imux23;
+ i2c24 = &imux24;
+ i2c25 = &imux25;
+ i2c26 = &imux26;
+ i2c27 = &imux27;
+ i2c28 = &imux28;
+ i2c29 = &imux29;
+ i2c30 = &imux30;
+ i2c31 = &imux31;
+ i2c32 = &imux32;
+ i2c33 = &imux33;
+ i2c34 = &imux34;
+ i2c35 = &imux35;
+ i2c36 = &imux36;
+ i2c37 = &imux37;
+ i2c38 = &imux38;
+ i2c39 = &imux39;
+ i2c40 = &imux40;
+ i2c41 = &imux41;
+ i2c42 = &imux42;
+ i2c43 = &imux43;
+ i2c44 = &imux44;
+ i2c45 = &imux45;
+ i2c46 = &imux46;
+ i2c47 = &imux47;
spi1 = &spi_gpio;
};
@@ -75,6 +101,11 @@
gpios = <&leds_gpio 10 GPIO_ACTIVE_LOW>;
default-state = "off";
};
+
+ led-5 {
+ label = "bmc_ready_noled";
+ gpios = <&sgpiom0 141 (GPIO_ACTIVE_HIGH|GPIO_TRANSITORY)>;
+ };
};
spi_gpio: spi {
@@ -182,6 +213,63 @@
gpio-controller;
#gpio-cells = <2>;
};
+
+ gpio@11 {
+ compatible = "nxp,pca9555";
+ reg = <0x11>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <238 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "PWRGD_P24V_SMPWROK", "P1V5_PWROK",
+ "P3V3_PWROK", "P5V_PWROK",
+ "P12V_SCM_PWROK", "P12V_PWROK",
+ "P24V_PWROK", "P48V_HSC_PWROK",
+ "ERR_GPIO_IRQ", "TMP75_ALERT_N",
+ "BMC_PWROK", "P12V_INA230_ALERT_N",
+ "P24V_INA230_ALERT_N","",
+ "P48V_HSC_ALERT_N", "P1V05_PWROK";
+ };
+
+ gpio@12 {
+ compatible = "nxp,pca9555";
+ reg = <0x12>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <240 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "P1V05_PWR_FAIL", "P1V5_PWR_FAIL",
+ "P24V_PWR_FAIL", "P24V_SM_PWR_FAIL",
+ "IRQ_NW0/1/2_N", "IRQ_NW3/4/5_N",
+ "RTC_INT_N_R", "ERR_GPIO_IRQ",
+ "", "",
+ "", "",
+ "", "",
+ "", "";
+ };
+
+ gpio@13 {
+ compatible = "nxp,pca9555";
+ reg = <0x13>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <242 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "", "",
+ "", "",
+ "", "",
+ "", "",
+ "RACKMON_A_1", "RACKMON_A_2",
+ "RACKMON_B_1", "RACKMON_B_2",
+ "", "",
+ "", "";
+ };
};
&i2c1 {
@@ -213,10 +301,11 @@
#size-cells = <0>;
i2c-mux-idle-disconnect;
- imux16: i2c@0 {
+ // FCB 1
+ imux16: i2c@1 {
#address-cells = <1>;
#size-cells = <0>;
- reg = <0>;
+ reg = <1>;
eeprom@50 {
compatible = "atmel,24c128";
@@ -258,12 +347,88 @@
compatible = "ti,tmp75";
reg = <0x4b>;
};
- };
- imux17: i2c@1 {
+ gpio@11 {
+ compatible = "nxp,pca9555";
+ reg = <0x11>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <218 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "P48V_FAN1_PWRGD_R", "P48V_FAN2_PWRGD_R",
+ "P48V_FAN3_PWRGD_R", "P48V_FAN4_PWRGD_R",
+ "FCB_1_P48V_ZONE0_PWRGD_R", "FCB_1_P48V_ZONE1_PWRGD_R",
+ "FCB_1_PWRGD_P3V3_R", "",
+ "", "",
+ "", "",
+ "", "",
+ "", "";
+ };
+
+ gpio@12 {
+ compatible = "nxp,pca9555";
+ reg = <0x12>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <218 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "INA238_FAN1_ALERT_N", "INA238_FAN2_ALERT_N",
+ "INA238_FAN3_ALERT_N", "INA238_FAN4_ALERT_N",
+ "FCB_1_TMP75_ALERT_N", "",
+ "", "",
+ "FAN1_PRSNT", "FAN2_PRSNT",
+ "FAN3_PRSNT", "FAN4_PRSNT",
+ "", "",
+ "", "";
+ };
+
+ gpio@13 {
+ compatible = "nxp,pca9555";
+ reg = <0x13>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <218 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "FAN1_IL_TACH_ALERT", "FAN1_OL_TACH_ALERT",
+ "FAN2_IL_TACH_ALERT", "FAN2_OL_TACH_ALERT",
+ "FAN3_IL_TACH_ALERT", "FAN3_OL_TACH_ALERT",
+ "FAN4_IL_TACH_ALERT", "FAN4_IL_TACH_ALERT",
+ "", "",
+ "", "",
+ "", "",
+ "", "";
+ };
+
+ gpio@17 {
+ compatible = "nxp,pca9555";
+ reg = <0x17>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <218 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "FCB_1_P1V0_POWER_FAIL", "FCB_1_P1V8_POWER_FAIL",
+ "FCB_1_P48V_ZONE0_POWER_FAIL", "FAN1_POWER_FAIL",
+ "FAN2_POWER_FAIL", "FAN3_POWER_FAIL",
+ "FAN4_POWER_FAIL", "",
+ "", "",
+ "", "",
+ "", "",
+ "", "";
+ };
+ };
+ // FCB 2
+ imux17: i2c@0 {
#address-cells = <1>;
#size-cells = <0>;
- reg = <1>;
+ reg = <0>;
eeprom@50 {
compatible = "atmel,24c128";
@@ -305,12 +470,88 @@
compatible = "ti,tmp75";
reg = <0x4b>;
};
- };
- imux18: i2c@2 {
+ gpio@11 {
+ compatible = "nxp,pca9555";
+ reg = <0x11>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <220 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "P48V_FAN5_PWRGD_R", "P48V_FAN6_PWRGD_R",
+ "P48V_FAN7_PWRGD_R", "P48V_FAN8_PWRGD_R",
+ "FCB_2_P48V_ZONE0_PWRGD_R", "FCB_2_P48V_ZONE1_PWRGD_R",
+ "FCB_2_PWRGD_P3V3_R", "",
+ "", "",
+ "", "",
+ "", "",
+ "", "";
+ };
+
+ gpio@12 {
+ compatible = "nxp,pca9555";
+ reg = <0x12>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <220 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "INA238_FAN5_ALERT_N", "INA238_FAN6_ALERT_N",
+ "INA238_FAN7_ALERT_N", "INA238_FAN8_ALERT_N",
+ "FCB_2_TMP75_ALERT_N", "",
+ "", "",
+ "FAN5_PRSNT", "FAN6_PRSNT",
+ "FAN7_PRSNT", "FAN8_PRSNT",
+ "", "",
+ "", "";
+ };
+
+ gpio@13 {
+ compatible = "nxp,pca9555";
+ reg = <0x13>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <220 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "FAN5_IL_TACH_ALERT", "FAN5_OL_TACH_ALERT",
+ "FAN6_IL_TACH_ALERT", "FAN6_OL_TACH_ALERT",
+ "FAN7_IL_TACH_ALERT", "FAN7_OL_TACH_ALERT",
+ "FAN8_IL_TACH_ALERT", "FAN8_IL_TACH_ALERT",
+ "", "",
+ "", "",
+ "", "",
+ "", "";
+ };
+
+ gpio@17 {
+ compatible = "nxp,pca9555";
+ reg = <0x17>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <220 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "FCB_2_P1V0_POWER_FAIL", "FCB_2_P1V8_POWER_FAIL",
+ "FCB_2_P48V_ZONE0_POWER_FAIL", "FAN5_POWER_FAIL",
+ "FAN6_POWER_FAIL", "FAN7_POWER_FAIL",
+ "FAN8_POWER_FAIL", "",
+ "", "",
+ "", "",
+ "", "",
+ "", "";
+ };
+ };
+ // FCB 3
+ imux18: i2c@3 {
#address-cells = <1>;
#size-cells = <0>;
- reg = <2>;
+ reg = <3>;
eeprom@50 {
compatible = "atmel,24c128";
@@ -352,12 +593,88 @@
compatible = "ti,tmp75";
reg = <0x4b>;
};
- };
- imux19: i2c@3 {
+ gpio@11 {
+ compatible = "nxp,pca9555";
+ reg = <0x11>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <230 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "P48V_FAN9_PWRGD_R", "P48V_FAN10_PWRGD_R",
+ "P48V_FAN11_PWRGD_R", "P48V_FAN12_PWRGD_R",
+ "FCB_3_P48V_ZONE0_PWRGD_R", "FCB_3_P48V_ZONE1_PWRGD_R",
+ "FCB_3_PWRGD_P3V3_R", "",
+ "", "",
+ "", "",
+ "", "",
+ "", "";
+ };
+
+ gpio@12 {
+ compatible = "nxp,pca9555";
+ reg = <0x12>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <230 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "INA238_FAN9_ALERT_N", "INA238_FAN10_ALERT_N",
+ "INA238_FAN11_ALERT_N", "INA238_FAN12_ALERT_N",
+ "FCB_3_TMP75_ALERT_N", "",
+ "", "",
+ "FAN9_PRSNT", "FAN10_PRSNT",
+ "FAN11_PRSNT", "FAN12_PRSNT",
+ "", "",
+ "", "";
+ };
+
+ gpio@13 {
+ compatible = "nxp,pca9555";
+ reg = <0x13>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <230 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "FAN9_IL_TACH_ALERT", "FAN9_OL_TACH_ALERT",
+ "FAN10_IL_TACH_ALERT", "FAN10_OL_TACH_ALERT",
+ "FAN11_IL_TACH_ALERT", "FAN11_OL_TACH_ALERT",
+ "FAN12_IL_TACH_ALERT", "FAN12_IL_TACH_ALERT",
+ "", "",
+ "", "",
+ "", "",
+ "", "";
+ };
+
+ gpio@17 {
+ compatible = "nxp,pca9555";
+ reg = <0x17>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <230 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "FCB_3_P1V0_POWER_FAIL", "FCB_3_P1V8_POWER_FAIL",
+ "FCB_3_P48V_ZONE0_POWER_FAIL", "FAN9_POWER_FAIL",
+ "FAN10_POWER_FAIL", "FAN11_POWER_FAIL",
+ "FAN12_POWER_FAIL", "",
+ "", "",
+ "", "",
+ "", "",
+ "", "";
+ };
+ };
+ // FCB 4
+ imux19: i2c@2 {
#address-cells = <1>;
#size-cells = <0>;
- reg = <3>;
+ reg = <2>;
eeprom@50 {
compatible = "atmel,24c128";
@@ -399,9 +716,85 @@
compatible = "ti,tmp75";
reg = <0x4b>;
};
- };
- imux20: i2c@5 {
+ gpio@11 {
+ compatible = "nxp,pca9555";
+ reg = <0x11>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <232 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "P48V_FAN13_PWRGD_R", "P48V_FAN14_PWRGD_R",
+ "P48V_FAN15_PWRGD_R", "P48V_FAN16_PWRGD_R",
+ "FCB_4_P48V_ZONE0_PWRGD_R", "FCB_4_P48V_ZONE1_PWRGD_R",
+ "FCB_4_PWRGD_P3V3_R", "",
+ "", "",
+ "", "",
+ "", "",
+ "", "";
+ };
+
+ gpio@12 {
+ compatible = "nxp,pca9555";
+ reg = <0x12>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <232 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "INA238_FAN13_ALERT_N", "INA238_FAN14_ALERT_N",
+ "INA238_FAN15_ALERT_N", "INA238_FAN16_ALERT_N",
+ "FCB_4_TMP75_ALERT_N", "",
+ "", "",
+ "FAN13_PRSNT", "FAN14_PRSNT",
+ "FAN15_PRSNT", "FAN16_PRSNT",
+ "", "",
+ "", "";
+ };
+
+ gpio@13 {
+ compatible = "nxp,pca9555";
+ reg = <0x13>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <232 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "FAN13_IL_TACH_ALERT", "FAN13_OL_TACH_ALERT",
+ "FAN14_IL_TACH_ALERT", "FAN14_OL_TACH_ALERT",
+ "FAN15_IL_TACH_ALERT", "FAN15_OL_TACH_ALERT",
+ "FAN16_IL_TACH_ALERT", "FAN16_IL_TACH_ALERT",
+ "", "",
+ "", "",
+ "", "",
+ "", "";
+ };
+
+ gpio@17 {
+ compatible = "nxp,pca9555";
+ reg = <0x17>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <232 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "FCB_4_P1V0_POWER_FAIL", "FCB_4_P1V8_POWER_FAIL",
+ "FCB_4_P48V_ZONE0_POWER_FAIL", "FAN13_POWER_FAIL",
+ "FAN14_POWER_FAIL", "FAN15_POWER_FAIL",
+ "FAN16_POWER_FAIL", "",
+ "", "",
+ "", "",
+ "", "",
+ "", "";
+ };
+ };
+ // FCB 5
+ imux20: i2c@4 {
#address-cells = <1>;
#size-cells = <0>;
reg = <4>;
@@ -445,9 +838,85 @@
compatible = "ti,tmp75";
reg = <0x4b>;
};
- };
- imux21: i2c@4 {
+ gpio@11 {
+ compatible = "nxp,pca9555";
+ reg = <0x11>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <254 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "P48V_FAN20_PWRGD_R", "P48V_FAN19_PWRGD_R",
+ "P48V_FAN18_PWRGD_R", "P48V_FAN17_PWRGD_R",
+ "FCB_5_P48V_ZONE0_PWRGD_R", "FCB_5_P48V_ZONE1_PWRGD_R",
+ "FCB_5_PWRGD_P3V3_R", "",
+ "", "",
+ "", "",
+ "", "",
+ "", "";
+ };
+
+ gpio@12 {
+ compatible = "nxp,pca9555";
+ reg = <0x12>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <254 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "INA238_FAN20_ALERT_N", "INA238_FAN19_ALERT_N",
+ "INA238_FAN18_ALERT_N", "INA238_FAN17_ALERT_N",
+ "FCB_5_TMP75_ALERT_N", "",
+ "", "",
+ "FAN20_PRSNT", "FAN19_PRSNT",
+ "FAN18_PRSNT", "FAN17_PRSNT",
+ "", "",
+ "", "";
+ };
+
+ gpio@13 {
+ compatible = "nxp,pca9555";
+ reg = <0x13>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <254 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "FAN20_IL_TACH_ALERT", "FAN20_OL_TACH_ALERT",
+ "FAN19_IL_TACH_ALERT", "FAN19_OL_TACH_ALERT",
+ "FAN18_IL_TACH_ALERT", "FAN18_OL_TACH_ALERT",
+ "FAN17_IL_TACH_ALERT", "FAN17_OL_TACH_ALERT",
+ "", "",
+ "", "",
+ "", "",
+ "", "";
+ };
+
+ gpio@17 {
+ compatible = "nxp,pca9555";
+ reg = <0x17>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <254 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "FCB_5_P1V0_POWER_FAIL", "FCB_5_P1V8_POWER_FAIL",
+ "FCB_5_P48V_ZONE0_POWER_FAIL", "FAN20_POWER_FAIL",
+ "FAN19_POWER_FAIL", "FAN18_POWER_FAIL",
+ "FAN17_POWER_FAIL", "",
+ "", "",
+ "", "",
+ "", "",
+ "", "";
+ };
+ };
+ // FCB 6
+ imux21: i2c@5 {
#address-cells = <1>;
#size-cells = <0>;
reg = <5>;
@@ -491,24 +960,316 @@
compatible = "ti,tmp75";
reg = <0x4b>;
};
+
+ gpio@11 {
+ compatible = "nxp,pca9555";
+ reg = <0x11>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <252 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "P48V_FAN24_PWRGD_R", "P48V_FAN23_PWRGD_R",
+ "P48V_FAN22_PWRGD_R", "P48V_FAN21_PWRGD_R",
+ "FCB_6_P48V_ZONE0_PWRGD_R", "FCB_6_P48V_ZONE1_PWRGD_R",
+ "FCB_6_PWRGD_P3V3_R", "",
+ "", "",
+ "", "",
+ "", "",
+ "", "";
+ };
+
+ gpio@12 {
+ compatible = "nxp,pca9555";
+ reg = <0x12>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <252 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "INA238_FAN24_ALERT_N", "INA238_FAN23_ALERT_N",
+ "INA238_FAN22_ALERT_N", "INA238_FAN21_ALERT_N",
+ "FCB_6_TMP75_ALERT_N", "",
+ "", "",
+ "FAN24_PRSNT", "FAN23_PRSNT",
+ "FAN22_PRSNT", "FAN21_PRSNT",
+ "", "",
+ "", "";
+ };
+
+ gpio@13 {
+ compatible = "nxp,pca9555";
+ reg = <0x13>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <252 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "FAN24_IL_TACH_ALERT", "FAN24_OL_TACH_ALERT",
+ "FAN23_IL_TACH_ALERT", "FAN23_OL_TACH_ALERT",
+ "FAN22_IL_TACH_ALERT", "FAN22_OL_TACH_ALERT",
+ "FAN21_IL_TACH_ALERT", "FAN21_OL_TACH_ALERT",
+ "", "",
+ "", "",
+ "", "",
+ "", "";
+ };
+
+ gpio@17 {
+ compatible = "nxp,pca9555";
+ reg = <0x17>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&sgpiom0>;
+ interrupts = <252 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-line-names =
+ "FCB_6_P1V0_POWER_FAIL", "FCB_6_P1V8_POWER_FAIL",
+ "FCB_6_P48V_ZONE0_POWER_FAIL", "FAN24_POWER_FAIL",
+ "FAN23_POWER_FAIL", "FAN22_POWER_FAIL",
+ "FAN21_POWER_FAIL", "",
+ "", "",
+ "", "",
+ "", "",
+ "", "";
+ };
+ };
+
+ imux22: i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <6>;
+ };
+
+ imux23: i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
};
};
};
&i2c3 {
status = "okay";
+
+ i2c-mux@72 {
+ compatible = "nxp,pca9545";
+ reg = <0x72>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ imux24: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ imux25: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ imux26: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ imux27: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+ };
};
&i2c4 {
status = "okay";
+
+ i2c-mux@72 {
+ compatible = "nxp,pca9545";
+ reg = <0x72>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ imux28: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ imux29: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ imux30: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ imux31: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+ };
};
&i2c5 {
status = "okay";
+
+ i2c-mux@72 {
+ compatible = "nxp,pca9545";
+ reg = <0x72>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ imux32: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ imux33: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ imux34: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ imux35: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+ };
};
&i2c6 {
status = "okay";
+
+ i2c-mux@72 {
+ compatible = "nxp,pca9545";
+ reg = <0x72>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ imux36: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ imux37: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ imux38: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ imux39: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+ };
};
&i2c7 {
@@ -531,14 +1292,111 @@
compatible = "nxp,pcf8563";
reg = <0x51>;
};
+
+ rtc@68 {
+ compatible = "dallas,ds1339";
+ reg = <0x68>;
+ };
};
&i2c12 {
status = "okay";
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9545";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ imux40: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ imux41: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ imux42: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ imux43: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ };
};
&i2c13 {
status = "okay";
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9545";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ imux44: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ imux45: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ imux46: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ imux47: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ };
};
&i2c14 {
@@ -627,7 +1485,6 @@
gpio-line-names =
/*"input pin","output pin"*/
/*A0 - A7*/
- "PRSNT_MTIA_BLADE0_N","PWREN_MTIA_BLADE0_EN_N",
"PRSNT_MTIA_BLADE1_N","PWREN_MTIA_BLADE1_EN_N",
"PRSNT_MTIA_BLADE2_N","PWREN_MTIA_BLADE2_EN_N",
"PRSNT_MTIA_BLADE3_N","PWREN_MTIA_BLADE3_EN_N",
@@ -635,8 +1492,8 @@
"PRSNT_MTIA_BLADE5_N","PWREN_MTIA_BLADE5_EN_N",
"PRSNT_MTIA_BLADE6_N","PWREN_MTIA_BLADE6_EN_N",
"PRSNT_MTIA_BLADE7_N","PWREN_MTIA_BLADE7_EN_N",
- /*B0 - B7*/
"PRSNT_MTIA_BLADE8_N","PWREN_MTIA_BLADE8_EN_N",
+ /*B0 - B7*/
"PRSNT_MTIA_BLADE9_N","PWREN_MTIA_BLADE9_EN_N",
"PRSNT_MTIA_BLADE10_N","PWREN_MTIA_BLADE10_EN_N",
"PRSNT_MTIA_BLADE11_N","PWREN_MTIA_BLADE11_EN_N",
@@ -644,80 +1501,80 @@
"PRSNT_MTIA_BLADE13_N","PWREN_MTIA_BLADE13_EN_N",
"PRSNT_MTIA_BLADE14_N","PWREN_MTIA_BLADE14_EN_N",
"PRSNT_MTIA_BLADE15_N","PWREN_MTIA_BLADE15_EN_N",
+ "PRSNT_MTIA_BLADE16_N","PWREN_MTIA_BLADE16_EN_N",
/*C0 - C7*/
- "PRSNT_NW_BLADE0_N","PWREN_NW_BLADE0_EN_N",
"PRSNT_NW_BLADE1_N","PWREN_NW_BLADE1_EN_N",
"PRSNT_NW_BLADE2_N","PWREN_NW_BLADE2_EN_N",
"PRSNT_NW_BLADE3_N","PWREN_NW_BLADE3_EN_N",
"PRSNT_NW_BLADE4_N","PWREN_NW_BLADE4_EN_N",
"PRSNT_NW_BLADE5_N","PWREN_NW_BLADE5_EN_N",
- "PRSNT_FCB_TOP_0_N","PWREN_MTIA_BLADE0_HSC_EN_N",
- "PRSNT_FCB_TOP_1_N","PWREN_MTIA_BLADE1_HSC_EN_N",
+ "PRSNT_NW_BLADE6_N","PWREN_NW_BLADE6_EN_N",
+ "PRSNT_FCB_1_N","PWREN_MTIA_BLADE1_HSC_EN_N",
+ "PRSNT_FCB_2_N","PWREN_MTIA_BLADE2_HSC_EN_N",
/*D0 - D7*/
- "PRSNT_FCB_MIDDLE_0_N","PWREN_MTIA_BLADE2_HSC_EN_N",
- "PRSNT_FCB_MIDDLE_1_N","PWREN_MTIA_BLADE3_HSC_EN_N",
- "PRSNT_FCB_BOTTOM_1_N","PWREN_MTIA_BLADE4_HSC_EN_N",
- "PRSNT_FCB_BOTTOM_0_N","PWREN_MTIA_BLADE5_HSC_EN_N",
- "PWRGD_MTIA_BLADE0_PWROK_N","PWREN_MTIA_BLADE6_HSC_EN_N",
+ "PRSNT_FCB_3_N","PWREN_MTIA_BLADE3_HSC_EN_N",
+ "PRSNT_FCB_4_N","PWREN_MTIA_BLADE4_HSC_EN_N",
+ "PRSNT_FCB_6_N","PWREN_MTIA_BLADE5_HSC_EN_N",
+ "PRSNT_FCB_5_N","PWREN_MTIA_BLADE6_HSC_EN_N",
"PWRGD_MTIA_BLADE1_PWROK_N","PWREN_MTIA_BLADE7_HSC_EN_N",
"PWRGD_MTIA_BLADE2_PWROK_N","PWREN_MTIA_BLADE8_HSC_EN_N",
"PWRGD_MTIA_BLADE3_PWROK_N","PWREN_MTIA_BLADE9_HSC_EN_N",
- /*E0 - E7*/
"PWRGD_MTIA_BLADE4_PWROK_N","PWREN_MTIA_BLADE10_HSC_EN_N",
+ /*E0 - E7*/
"PWRGD_MTIA_BLADE5_PWROK_N","PWREN_MTIA_BLADE11_HSC_EN_N",
"PWRGD_MTIA_BLADE6_PWROK_N","PWREN_MTIA_BLADE12_HSC_EN_N",
"PWRGD_MTIA_BLADE7_PWROK_N","PWREN_MTIA_BLADE13_HSC_EN_N",
"PWRGD_MTIA_BLADE8_PWROK_N","PWREN_MTIA_BLADE14_HSC_EN_N",
"PWRGD_MTIA_BLADE9_PWROK_N","PWREN_MTIA_BLADE15_HSC_EN_N",
- "PWRGD_MTIA_BLADE10_PWROK_N","PWREN_NW_BLADE0_HSC_EN_N",
+ "PWRGD_MTIA_BLADE10_PWROK_N","PWREN_MTIA_BLADE16_HSC_EN_N",
"PWRGD_MTIA_BLADE11_PWROK_N","PWREN_NW_BLADE1_HSC_EN_N",
- /*F0 - F7*/
"PWRGD_MTIA_BLADE12_PWROK_N","PWREN_NW_BLADE2_HSC_EN_N",
+ /*F0 - F7*/
"PWRGD_MTIA_BLADE13_PWROK_N","PWREN_NW_BLADE3_HSC_EN_N",
"PWRGD_MTIA_BLADE14_PWROK_N","PWREN_NW_BLADE4_HSC_EN_N",
"PWRGD_MTIA_BLADE15_PWROK_N","PWREN_NW_BLADE5_HSC_EN_N",
- "PWRGD_NW_BLADE0_PWROK_N","PWREN_FCB_TOP_0_EN_N",
- "PWRGD_NW_BLADE1_PWROK_N","PWREN_FCB_TOP_1_EN_N",
- "PWRGD_NW_BLADE2_PWROK_N","PWREN_FCB_MIDDLE_0_EN_N",
- "PWRGD_NW_BLADE3_PWROK_N","PWREN_FCB_MIDDLE_1_EN_N",
+ "PWRGD_MTIA_BLADE16_PWROK_N","PWREN_NW_BLADE6_HSC_EN_N",
+ "PWRGD_NW_BLADE1_PWROK_N","PWREN_SGPIO_FCB_2_EN_N",
+ "PWRGD_NW_BLADE2_PWROK_N","PWREN_SGPIO_FCB_1_EN_N",
+ "PWRGD_NW_BLADE3_PWROK_N","PWREN_SGPIO_FCB_4_EN_N",
+ "PWRGD_NW_BLADE4_PWROK_N","PWREN_SGPIO_FCB_3_EN_N",
/*G0 - G7*/
- "PWRGD_NW_BLADE4_PWROK_N","PWREN_FCB_BOTTOM_1_EN_N",
- "PWRGD_NW_BLADE5_PWROK_N","PWREN_FCB_BOTTOM_0_EN_N",
- "PWRGD_FCB_TOP_0_PWROK_N","FM_CMM_AC_CYCLE_N",
- "PWRGD_FCB_TOP_1_PWROK_N","MGMT_SFP_TX_DIS",
- "PWRGD_FCB_MIDDLE_0_PWROK_N","FM_MDIO_SW_SEL",
- "PWRGD_FCB_MIDDLE_1_PWROK_N","FM_P24V_SMPWR_EN",
- "PWRGD_FCB_BOTTOM_1_PWROK_N","",
- "PWRGD_FCB_BOTTOM_0_PWROK_N","",
+ "PWRGD_NW_BLADE5_PWROK_N","PWREN_SGPIO_FCB_5_EN_N",
+ "PWRGD_NW_BLADE6_PWROK_N","PWREN_SGPIO_FCB_6_EN_N",
+ "PWRGD_FCB_1","FM_BMC_RST_RTCRST_R",
+ "PWRGD_FCB_2","",
+ "PWRGD_FCB_3","FM_MDIO_SW_SEL",
+ "PWRGD_FCB_4","FM_P24V_SMPWR_EN",
+ "PWRGD_FCB_6","",
+ "PWRGD_FCB_5","",
/*H0 - H7*/
- "LEAK_DETECT_MTIA_BLADE0_N","",
"LEAK_DETECT_MTIA_BLADE1_N","",
"LEAK_DETECT_MTIA_BLADE2_N","",
"LEAK_DETECT_MTIA_BLADE3_N","",
"LEAK_DETECT_MTIA_BLADE4_N","",
"LEAK_DETECT_MTIA_BLADE5_N","",
"LEAK_DETECT_MTIA_BLADE6_N","",
- "LEAK_DETECT_MTIA_BLADE7_N","",
+ "LEAK_DETECT_MTIA_BLADE7_N","ERR_INJECT_CMM_PWR_FAIL_N",
+ "LEAK_DETECT_MTIA_BLADE8_N","",
/*I0 - I7*/
- "LEAK_DETECT_MTIA_BLADE8_N","RST_I2CRST_FCB_BOTTOM_1_N",
- "LEAK_DETECT_MTIA_BLADE9_N","RST_I2CRST_FCB_BOTTOM_0_N",
- "LEAK_DETECT_MTIA_BLADE10_N","RST_I2CRST_FCB_MIDDLE_0_N",
- "LEAK_DETECT_MTIA_BLADE11_N","RST_I2CRST_FCB_MIDDLE_1_N",
- "LEAK_DETECT_MTIA_BLADE12_N","RST_I2CRST_FCB_TOP_0_N",
- "LEAK_DETECT_MTIA_BLADE13_N","RST_I2CRST_FCB_TOP_1_N",
- "LEAK_DETECT_MTIA_BLADE14_N","BMC_READY",
- "LEAK_DETECT_MTIA_BLADE15_N","FM_88E6393X_BIN_UPDATE_EN_N",
+ "LEAK_DETECT_MTIA_BLADE9_N","RST_I2CRST_FCB_5_N",
+ "LEAK_DETECT_MTIA_BLADE10_N","RST_I2CRST_FCB_6_N",
+ "LEAK_DETECT_MTIA_BLADE11_N","RST_I2CRST_FCB_4_N",
+ "LEAK_DETECT_MTIA_BLADE12_N","RST_I2CRST_FCB_3_N",
+ "LEAK_DETECT_MTIA_BLADE13_N","RST_I2CRST_FCB_2_N",
+ "LEAK_DETECT_MTIA_BLADE14_N","RST_I2CRST_FCB_1_N",
+ "LEAK_DETECT_MTIA_BLADE15_N","BMC_READY",
+ "LEAK_DETECT_MTIA_BLADE16_N","FM_88E6393X_BIN_UPDATE_EN_N",
/*J0 - J7*/
- "LEAK_DETECT_NW_BLADE0_N","WATER_VALVE_CLOSED_N",
- "LEAK_DETECT_NW_BLADE1_N","",
+ "LEAK_DETECT_NW_BLADE1_N","WATER_VALVE_CLOSED_N",
"LEAK_DETECT_NW_BLADE2_N","",
"LEAK_DETECT_NW_BLADE3_N","",
"LEAK_DETECT_NW_BLADE4_N","",
"LEAK_DETECT_NW_BLADE5_N","",
- "PWRGD_MTIA_BLADE0_HSC_PWROK_N","",
+ "LEAK_DETECT_NW_BLADE6_N","",
"PWRGD_MTIA_BLADE1_HSC_PWROK_N","",
- /*K0 - K7*/
"PWRGD_MTIA_BLADE2_HSC_PWROK_N","",
+ /*K0 - K7*/
"PWRGD_MTIA_BLADE3_HSC_PWROK_N","",
"PWRGD_MTIA_BLADE4_HSC_PWROK_N","",
"PWRGD_MTIA_BLADE5_HSC_PWROK_N","",
@@ -725,49 +1582,50 @@
"PWRGD_MTIA_BLADE7_HSC_PWROK_N","",
"PWRGD_MTIA_BLADE8_HSC_PWROK_N","",
"PWRGD_MTIA_BLADE9_HSC_PWROK_N","",
- /*L0 - L7*/
"PWRGD_MTIA_BLADE10_HSC_PWROK_N","",
+ /*L0 - L7*/
"PWRGD_MTIA_BLADE11_HSC_PWROK_N","",
"PWRGD_MTIA_BLADE12_HSC_PWROK_N","",
"PWRGD_MTIA_BLADE13_HSC_PWROK_N","",
"PWRGD_MTIA_BLADE14_HSC_PWROK_N","",
"PWRGD_MTIA_BLADE15_HSC_PWROK_N","",
- "PWRGD_NW_BLADE0_HSC_PWROK_N","",
+ "PWRGD_MTIA_BLADE16_HSC_PWROK_N","",
"PWRGD_NW_BLADE1_HSC_PWROK_N","",
- /*M0 - M7*/
"PWRGD_NW_BLADE2_HSC_PWROK_N","",
+ /*M0 - M7*/
"PWRGD_NW_BLADE3_HSC_PWROK_N","",
"PWRGD_NW_BLADE4_HSC_PWROK_N","",
"PWRGD_NW_BLADE5_HSC_PWROK_N","",
+ "PWRGD_NW_BLADE6_HSC_PWROK_N","",
"RPU_READY","",
"IT_GEAR_RPU_LINK_N","",
"IT_GEAR_LEAK","",
"WATER_VALVE_CLOSED_N","",
/*N0 - N7*/
- "VALVE_STS0","",
- "VALVE_STS1","",
- "PCA9555_IRQ0_N","",
+ "VALVE_STATUS_0","",
+ "VALVE_STATUS_1","",
"PCA9555_IRQ1_N","",
+ "PCA9555_IRQ2_N","",
"CR_TOGGLE_BOOT_N","",
- "IRQ_FCB_TOP0_N","",
- "IRQ_FCB_TOP1_N","",
+ "IRQ_FCB_1_N","",
+ "IRQ_FCB_2_N","",
"CMM_CABLE_CARTRIDGE_PRSNT_BOT_N","",
/*O0 - O7*/
"CMM_CABLE_CARTRIDGE_PRSNT_TOP_N","",
"BOT_BCB_CABLE_PRSNT_N","",
"TOP_BCB_CABLE_PRSNT_N","",
- "IRQ_FCB_MID0_N","",
- "IRQ_FCB_MID1_N","",
+ "IRQ_FCB_3_N","",
+ "IRQ_FCB_4_N","",
"CHASSIS_LEAK0_DETECT_N","",
"CHASSIS_LEAK1_DETECT_N","",
- "VALVE_RMON_A_1","",
+ "PCA9555_IRQ3_N","",
/*P0 - P7*/
- "VALVE_RMON_A_2","",
- "VALVE_RMON_B_1","",
- "VALVE_RMON_B_2","",
+ "PCA9555_IRQ4_N","",
+ "PCA9555_IRQ5_N","",
+ "CMM_AC_PWR_BTN_N","",
"RPU_READY_SPARE","",
"IT_GEAR_LEAK_SPARE","",
"IT_GEAR_RPU_LINK_SPARE_N","",
- "IRQ_FCB_BOT0_N","",
- "IRQ_FCB_BOT0_N","";
+ "IRQ_FCB_6_N","",
+ "IRQ_FCB_5_N","";
};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-yosemite4.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-yosemite4.dts
index 98477792aa00..29f224bccd63 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-yosemite4.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-yosemite4.dts
@@ -17,6 +17,27 @@
serial6 = &uart7;
serial7 = &uart8;
serial8 = &uart9;
+
+ i2c16 = &imux16;
+ i2c17 = &imux17;
+ i2c18 = &imux18;
+ i2c19 = &imux19;
+ i2c20 = &imux20;
+ i2c21 = &imux21;
+ i2c22 = &imux22;
+ i2c23 = &imux23;
+ i2c24 = &imux24;
+ i2c25 = &imux25;
+ i2c26 = &imux26;
+ i2c27 = &imux27;
+ i2c28 = &imux28;
+ i2c29 = &imux29;
+ i2c30 = &imux30;
+ i2c31 = &imux31;
+ i2c32 = &imux32;
+ i2c33 = &imux33;
+ i2c34 = &imux34;
+ i2c35 = &imux35;
};
chosen {
@@ -32,7 +53,25 @@
compatible = "iio-hwmon";
io-channels = <&adc0 0>, <&adc0 1>, <&adc0 2>, <&adc0 3>,
<&adc0 4>, <&adc0 5>, <&adc0 6>, <&adc0 7>,
- <&adc1 0>, <&adc1 1>;
+ <&adc1 0>, <&adc1 1>, <&adc1 7>;
+ };
+
+ spi {
+ compatible = "spi-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sck-gpios = <&gpio0 ASPEED_GPIO(X, 3) GPIO_ACTIVE_HIGH>;
+ mosi-gpios = <&gpio0 ASPEED_GPIO(X, 4) GPIO_ACTIVE_HIGH>;
+ miso-gpios = <&gpio0 ASPEED_GPIO(X, 5) GPIO_ACTIVE_HIGH>;
+ cs-gpios = <&gpio0 ASPEED_GPIO(X, 0) GPIO_ACTIVE_LOW>;
+ num-chipselects = <1>;
+
+ tpm@0 {
+ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
+ reg = <0>;
+ spi-max-frequency = <33000000>;
+ };
};
};
@@ -83,6 +122,13 @@
aspeed,ext-pulse-duration = <256>;
};
+&wdt2 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdtrst2_default>;
+ aspeed,reset-type = "system";
+};
+
&mac2 {
status = "okay";
pinctrl-names = "default";
@@ -105,15 +151,17 @@
status = "okay";
m25p,fast-read;
label = "bmc";
- spi-rx-bus-width = <4>;
+ spi-tx-bus-width = <2>;
+ spi-rx-bus-width = <2>;
spi-max-frequency = <50000000>;
-#include "openbmc-flash-layout-64.dtsi"
+#include "openbmc-flash-layout-128.dtsi"
};
flash@1 {
status = "okay";
m25p,fast-read;
- label = "bmc2";
- spi-rx-bus-width = <4>;
+ label = "alt-bmc";
+ spi-tx-bus-width = <2>;
+ spi-rx-bus-width = <2>;
spi-max-frequency = <50000000>;
};
};
@@ -129,9 +177,38 @@
reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>;
};
+ gpio@21 {
+ compatible = "nxp,pca9506";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@22 {
+ compatible = "nxp,pca9506";
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@23 {
+ compatible = "nxp,pca9506";
+ reg = <0x23>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@24 {
+ compatible = "nxp,pca9506";
+ reg = <0x24>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
power-sensor@40 {
- compatible = "adi,adm1278";
+ compatible = "adi,adm1281";
reg = <0x40>;
+ shunt-resistor-micro-ohms = <500>;
};
};
@@ -146,9 +223,38 @@
reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>;
};
+ gpio@21 {
+ compatible = "nxp,pca9506";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@22 {
+ compatible = "nxp,pca9506";
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@23 {
+ compatible = "nxp,pca9506";
+ reg = <0x23>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@24 {
+ compatible = "nxp,pca9506";
+ reg = <0x24>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
power-sensor@40 {
- compatible = "adi,adm1278";
+ compatible = "adi,adm1281";
reg = <0x40>;
+ shunt-resistor-micro-ohms = <500>;
};
};
@@ -163,9 +269,38 @@
reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>;
};
+ gpio@21 {
+ compatible = "nxp,pca9506";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@22 {
+ compatible = "nxp,pca9506";
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@23 {
+ compatible = "nxp,pca9506";
+ reg = <0x23>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@24 {
+ compatible = "nxp,pca9506";
+ reg = <0x24>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
power-sensor@40 {
- compatible = "adi,adm1278";
+ compatible = "adi,adm1281";
reg = <0x40>;
+ shunt-resistor-micro-ohms = <500>;
};
};
@@ -180,9 +315,38 @@
reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>;
};
+ gpio@21 {
+ compatible = "nxp,pca9506";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@22 {
+ compatible = "nxp,pca9506";
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@23 {
+ compatible = "nxp,pca9506";
+ reg = <0x23>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@24 {
+ compatible = "nxp,pca9506";
+ reg = <0x24>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
power-sensor@40 {
- compatible = "adi,adm1278";
+ compatible = "adi,adm1281";
reg = <0x40>;
+ shunt-resistor-micro-ohms = <500>;
};
};
@@ -197,9 +361,38 @@
reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>;
};
+ gpio@21 {
+ compatible = "nxp,pca9506";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@22 {
+ compatible = "nxp,pca9506";
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@23 {
+ compatible = "nxp,pca9506";
+ reg = <0x23>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@24 {
+ compatible = "nxp,pca9506";
+ reg = <0x24>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
power-sensor@40 {
- compatible = "adi,adm1278";
+ compatible = "adi,adm1281";
reg = <0x40>;
+ shunt-resistor-micro-ohms = <500>;
};
};
@@ -214,9 +407,38 @@
reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>;
};
+ gpio@21 {
+ compatible = "nxp,pca9506";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@22 {
+ compatible = "nxp,pca9506";
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@23 {
+ compatible = "nxp,pca9506";
+ reg = <0x23>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@24 {
+ compatible = "nxp,pca9506";
+ reg = <0x24>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
power-sensor@40 {
- compatible = "adi,adm1278";
+ compatible = "adi,adm1281";
reg = <0x40>;
+ shunt-resistor-micro-ohms = <500>;
};
};
@@ -231,9 +453,38 @@
reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>;
};
+ gpio@21 {
+ compatible = "nxp,pca9506";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@22 {
+ compatible = "nxp,pca9506";
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@23 {
+ compatible = "nxp,pca9506";
+ reg = <0x23>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@24 {
+ compatible = "nxp,pca9506";
+ reg = <0x24>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
power-sensor@40 {
- compatible = "adi,adm1278";
+ compatible = "adi,adm1281";
reg = <0x40>;
+ shunt-resistor-micro-ohms = <500>;
};
};
@@ -248,48 +499,363 @@
reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>;
};
+ gpio@21 {
+ compatible = "nxp,pca9506";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@22 {
+ compatible = "nxp,pca9506";
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@23 {
+ compatible = "nxp,pca9506";
+ reg = <0x23>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@24 {
+ compatible = "nxp,pca9506";
+ reg = <0x24>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
power-sensor@40 {
- compatible = "adi,adm1278";
+ compatible = "adi,adm1281";
reg = <0x40>;
+ shunt-resistor-micro-ohms = <500>;
};
};
&i2c8 {
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "okay";
bus-frequency = <400000>;
i2c-mux@70 {
compatible = "nxp,pca9544";
- idle-state = <0>;
- i2c-mux-idle-disconnect;
reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ imux16: i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@49 {
+ compatible = "nxp,pca9537";
+ reg = <0x49>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+
+ eeprom@51 {
+ compatible = "atmel,24c128";
+ reg = <0x51>;
+ };
+
+ eeprom@54 {
+ compatible = "atmel,24c128";
+ reg = <0x54>;
+ };
+ };
+
+ imux17: i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@49 {
+ compatible = "nxp,pca9537";
+ reg = <0x49>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+
+ eeprom@51 {
+ compatible = "atmel,24c128";
+ reg = <0x51>;
+ };
+
+ eeprom@54 {
+ compatible = "atmel,24c128";
+ reg = <0x54>;
+ };
+ };
+
+ imux18: i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@49 {
+ compatible = "nxp,pca9537";
+ reg = <0x49>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+
+ eeprom@51 {
+ compatible = "atmel,24c128";
+ reg = <0x51>;
+ };
+
+ eeprom@54 {
+ compatible = "atmel,24c128";
+ reg = <0x54>;
+ };
+ };
+
+ imux19: i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@49 {
+ compatible = "nxp,pca9537";
+ reg = <0x49>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+
+ eeprom@51 {
+ compatible = "atmel,24c128";
+ reg = <0x51>;
+ };
+
+ eeprom@54 {
+ compatible = "atmel,24c128";
+ reg = <0x54>;
+ };
+ };
};
};
&i2c9 {
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "okay";
bus-frequency = <400000>;
i2c-mux@71 {
compatible = "nxp,pca9544";
- idle-state = <0>;
- i2c-mux-idle-disconnect;
reg = <0x71>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ imux20: i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@49 {
+ compatible = "nxp,pca9537";
+ reg = <0x49>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+
+ eeprom@51 {
+ compatible = "atmel,24c128";
+ reg = <0x51>;
+ };
+
+ eeprom@54 {
+ compatible = "atmel,24c128";
+ reg = <0x54>;
+ };
+ };
+
+ imux21: i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@49 {
+ compatible = "nxp,pca9537";
+ reg = <0x49>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+
+ eeprom@51 {
+ compatible = "atmel,24c128";
+ reg = <0x51>;
+ };
+
+ eeprom@54 {
+ compatible = "atmel,24c128";
+ reg = <0x54>;
+ };
+ };
+
+ imux22: i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@49 {
+ compatible = "nxp,pca9537";
+ reg = <0x49>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+
+ eeprom@51 {
+ compatible = "atmel,24c128";
+ reg = <0x51>;
+ };
+
+ eeprom@54 {
+ compatible = "atmel,24c128";
+ reg = <0x54>;
+ };
+ };
+
+ imux23: i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@49 {
+ compatible = "nxp,pca9537";
+ reg = <0x49>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+
+ eeprom@51 {
+ compatible = "atmel,24c128";
+ reg = <0x51>;
+ };
+
+ eeprom@54 {
+ compatible = "atmel,24c128";
+ reg = <0x54>;
+ };
+ };
};
};
&i2c10 {
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "okay";
bus-frequency = <400000>;
+ i2c-mux@74 {
+ compatible = "nxp,pca9544";
+ reg = <0x74>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ imux28: i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio@20 {
+ compatible = "nxp,pca9506";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@21 {
+ compatible = "nxp,pca9506";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@22 {
+ compatible = "nxp,pca9506";
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@23 {
+ compatible = "nxp,pca9506";
+ reg = <0x23>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@24 {
+ compatible = "nxp,pca9506";
+ reg = <0x24>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names = "","","","",
+ "NIC0_MAIN_PWR_EN",
+ "NIC1_MAIN_PWR_EN",
+ "NIC2_MAIN_PWR_EN",
+ "NIC3_MAIN_PWR_EN",
+ "","","","","","","","",
+ "","","","","","","","",
+ "","","","","","","","";
+ };
+ };
+
+ imux29: i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
};
&i2c11 {
status = "okay";
power-sensor@10 {
- compatible = "adi, adm1272";
+ compatible = "adi,adm1272";
reg = <0x10>;
};
power-sensor@12 {
- compatible = "adi, adm1272";
+ compatible = "adi,adm1272";
reg = <0x12>;
};
@@ -298,6 +864,20 @@
reg = <0x20>;
gpio-controller;
#gpio-cells = <2>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <98 IRQ_TYPE_LEVEL_LOW>;
+ gpio-line-names = "P48V_OCP_GPIO1", "P48V_OCP_GPIO2",
+ "P48V_OCP_GPIO3", "FAN_BOARD_0_REVISION_0_R",
+ "FAN_BOARD_0_REVISION_1_R",
+ "FAN_BOARD_1_REVISION_0_R",
+ "FAN_BOARD_1_REVISION_1_R", "RST_MUX_R_N",
+ "RST_LED_CONTROL_FAN_BOARD_0_N",
+ "RST_LED_CONTROL_FAN_BOARD_1_N",
+ "RST_IOEXP_FAN_BOARD_0_N",
+ "RST_IOEXP_FAN_BOARD_1_N",
+ "PWRGD_LOAD_SWITCH_FAN_BOARD_0_R",
+ "PWRGD_LOAD_SWITCH_FAN_BOARD_1_R",
+ "", "";
};
gpio@21 {
@@ -305,6 +885,19 @@
reg = <0x21>;
gpio-controller;
#gpio-cells = <2>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <98 IRQ_TYPE_LEVEL_LOW>;
+ gpio-line-names = "HSC_OCP_SLOT_ODD_GPIO1",
+ "HSC_OCP_SLOT_ODD_GPIO2",
+ "HSC_OCP_SLOT_ODD_GPIO3",
+ "HSC_OCP_SLOT_EVEN_GPIO1",
+ "HSC_OCP_SLOT_EVEN_GPIO2",
+ "HSC_OCP_SLOT_EVEN_GPIO3",
+ "ADC_TYPE_0_R", "ADC_TYPE_1_R",
+ "MEDUSA_BOARD_REV_0", "MEDUSA_BOARD_REV_1",
+ "MEDUSA_BOARD_REV_2", "MEDUSA_BOARD_TYPE",
+ "DELTA_MODULE_TYPE", "P12V_HSC_TYPE",
+ "", "";
};
gpio@22 {
@@ -312,6 +905,16 @@
reg = <0x22>;
gpio-controller;
#gpio-cells = <2>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <98 IRQ_TYPE_LEVEL_LOW>;
+ gpio-line-names = "CARD_TYPE_SLOT1", "CARD_TYPE_SLOT2",
+ "CARD_TYPE_SLOT3", "CARD_TYPE_SLOT4",
+ "CARD_TYPE_SLOT5", "CARD_TYPE_SLOT6",
+ "CARD_TYPE_SLOT7", "CARD_TYPE_SLOT8",
+ "OC_P48V_HSC_0_N", "FLT_P48V_HSC_0_N",
+ "OC_P48V_HSC_1_N", "FLT_P48V_HSC_1_N",
+ "EN_P48V_AUX_0", "EN_P48V_AUX_1",
+ "PWRGD_P12V_AUX_0", "PWRGD_P12V_AUX_1";
};
gpio@23 {
@@ -319,6 +922,16 @@
reg = <0x23>;
gpio-controller;
#gpio-cells = <2>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <98 IRQ_TYPE_LEVEL_LOW>;
+ gpio-line-names = "HSC1_ALERT1_R_N", "HSC2_ALERT1_R_N",
+ "HSC3_ALERT1_R_N", "HSC4_ALERT1_R_N",
+ "HSC5_ALERT1_R_N", "HSC6_ALERT1_R_N",
+ "HSC7_ALERT1_R_N", "HSC8_ALERT1_R_N",
+ "HSC1_ALERT2_R_N", "HSC2_ALERT2_R_N",
+ "HSC3_ALERT2_R_N", "HSC4_ALERT2_R_N",
+ "HSC5_ALERT2_R_N", "HSC6_ALERT2_R_N",
+ "HSC7_ALERT2_R_N", "HSC8_ALERT2_R_N";
};
temperature-sensor@48 {
@@ -331,39 +944,84 @@
reg = <0x49>;
};
- temperature-sensor@4a {
- compatible = "ti,tmp75";
- reg = <0x4a>;
- };
-
- temperature-sensor@4b {
- compatible = "ti,tmp75";
- reg = <0x4b>;
- };
-
eeprom@54 {
- compatible = "atmel,24c256";
+ compatible = "atmel,24c128";
reg = <0x54>;
};
};
&i2c12 {
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "okay";
bus-frequency = <400000>;
- temperature-sensor@48 {
- compatible = "ti,tmp75";
- reg = <0x48>;
- };
+ i2c-mux@70 {
+ compatible = "nxp,pca9544";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
- eeprom@50 {
- compatible = "atmel,24c128";
- reg = <0x50>;
- };
+ imux34: i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ temperature-sensor@48 {
+ compatible = "ti,tmp75";
+ reg = <0x48>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+
+ eeprom@54 {
+ compatible = "atmel,24c64";
+ reg = <0x54>;
+ };
+
+ rtc@6f {
+ compatible = "nuvoton,nct3018y";
+ reg = <0x6f>;
+ };
- rtc@6f {
- compatible = "nuvoton,nct3018y";
- reg = <0x6f>;
+ gpio@20 {
+ compatible = "nxp,pca9506";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@21 {
+ compatible = "nxp,pca9506";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@22 {
+ compatible = "nxp,pca9506";
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@23 {
+ compatible = "nxp,pca9506";
+ reg = <0x23>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+ };
+
+ imux35: i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
};
};
@@ -380,24 +1038,26 @@
};
&i2c14 {
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "okay";
bus-frequency = <400000>;
adc@1d {
compatible = "ti,adc128d818";
reg = <0x1d>;
- ti,mode = /bits/ 8 <2>;
+ ti,mode = /bits/ 8 <1>;
};
- adc@35 {
+ adc@36 {
compatible = "ti,adc128d818";
- reg = <0x35>;
- ti,mode = /bits/ 8 <2>;
+ reg = <0x36>;
+ ti,mode = /bits/ 8 <1>;
};
adc@37 {
compatible = "ti,adc128d818";
reg = <0x37>;
- ti,mode = /bits/ 8 <2>;
+ ti,mode = /bits/ 8 <1>;
};
power-sensor@40 {
@@ -440,43 +1100,67 @@
reg = <0x51>;
};
- i2c-mux@71 {
- compatible = "nxp,pca9846";
+ i2c-mux@73 {
+ compatible = "nxp,pca9544";
+ reg = <0x73>;
#address-cells = <1>;
#size-cells = <0>;
-
- idle-state = <0>;
i2c-mux-idle-disconnect;
- reg = <0x71>;
- i2c@0 {
+ imux32: i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ adc@35 {
+ compatible = "maxim,max11617";
+ reg = <0x35>;
+ };
+ };
+
+ imux33: i2c@1 {
+ reg = <1>;
#address-cells = <1>;
#size-cells = <0>;
+ adc@35 {
+ compatible = "maxim,max11617";
+ reg = <0x35>;
+ };
+ };
+ };
+
+ i2c-mux@74 {
+ compatible = "nxp,pca9546";
+ reg = <0x74>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-mux-idle-disconnect;
+
+ imux30: i2c@0 {
reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
adc@1f {
compatible = "ti,adc128d818";
reg = <0x1f>;
- ti,mode = /bits/ 8 <2>;
+ ti,mode = /bits/ 8 <1>;
};
pwm@20{
- compatible = "max31790";
+ compatible = "maxim,max31790";
reg = <0x20>;
- #address-cells = <1>;
- #size-cells = <0>;
};
gpio@22{
compatible = "ti,tca6424";
reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
};
- pwm@23{
- compatible = "max31790";
- reg = <0x23>;
- #address-cells = <1>;
- #size-cells = <0>;
+ pwm@2f{
+ compatible = "maxim,max31790";
+ reg = <0x2f>;
};
adc@33 {
@@ -499,34 +1183,32 @@
};
};
- i2c@1 {
+ imux31: i2c@1 {
+ reg = <1>;
#address-cells = <1>;
#size-cells = <0>;
- reg = <0>;
adc@1f {
compatible = "ti,adc128d818";
reg = <0x1f>;
- ti,mode = /bits/ 8 <2>;
+ ti,mode = /bits/ 8 <1>;
};
pwm@20{
- compatible = "max31790";
+ compatible = "maxim,max31790";
reg = <0x20>;
- #address-cells = <1>;
- #size-cells = <0>;
};
gpio@22{
compatible = "ti,tca6424";
reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
};
- pwm@23{
- compatible = "max31790";
- reg = <0x23>;
- #address-cells = <1>;
- #size-cells = <0>;
+ pwm@2f{
+ compatible = "maxim,max31790";
+ reg = <0x2f>;
};
adc@33 {
@@ -549,56 +1231,89 @@
};
};
};
+};
- i2c-mux@73 {
+&i2c15 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+ multi-master;
+ bus-frequency = <400000>;
+
+ mctp@10 {
+ compatible = "mctp-i2c-controller";
+ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>;
+ };
+
+ i2c-mux@72 {
compatible = "nxp,pca9544";
+ reg = <0x72>;
#address-cells = <1>;
#size-cells = <0>;
- idle-state = <0>;
- i2c-mux-idle-disconnect;
- reg = <0x73>;
-
- i2c@0 {
+ imux24: i2c@0 {
+ reg = <0>;
#address-cells = <1>;
#size-cells = <0>;
- reg = <0>;
+ mctp-controller;
+ temperature-sensor@1f {
+ compatible = "ti,tmp421";
+ reg = <0x1f>;
+ };
- adc@35 {
- compatible = "maxim,max11617";
- reg = <0x35>;
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
};
};
- i2c@1 {
+ imux25: i2c@1 {
+ reg = <1>;
#address-cells = <1>;
#size-cells = <0>;
- reg = <0>;
+ mctp-controller;
+ temperature-sensor@1f {
+ compatible = "ti,tmp421";
+ reg = <0x1f>;
+ };
- adc@35 {
- compatible = "maxim,max11617";
- reg = <0x35>;
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
};
};
- };
-};
-&i2c15 {
- status = "okay";
- mctp-controller;
- multi-master;
- bus-frequency = <400000>;
+ imux26: i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ mctp-controller;
+ temperature-sensor@1f {
+ compatible = "ti,tmp421";
+ reg = <0x1f>;
+ };
- mctp@10 {
- compatible = "mctp-i2c-controller";
- reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>;
- };
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
- i2c-mux@72 {
- compatible = "nxp,pca9544";
- idle-state = <0>;
- i2c-mux-idle-disconnect;
- reg = <0x72>;
+ imux27: i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ mctp-controller;
+ temperature-sensor@1f {
+ compatible = "ti,tmp421";
+ reg = <0x1f>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
};
};
@@ -612,10 +1327,10 @@
&adc1 {
status = "okay";
- pinctrl-0 = <&pinctrl_adc8_default &pinctrl_adc9_default>;
+ pinctrl-0 = <&pinctrl_adc8_default &pinctrl_adc9_default
+ &pinctrl_adc15_default>;
};
-
&ehci0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-blueridge.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-blueridge.dts
index dfe5cc3edb52..bc4c46235421 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-blueridge.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-blueridge.dts
@@ -207,7 +207,8 @@
/*F0-F7*/ "","","rtc-battery-voltage-read-enable","reset-cause-pinhole","","",
"factory-reset-toggle","",
/*G0-G7*/ "","","","","","","","",
- /*H0-H7*/ "","bmc-ingraham0","rear-enc-id0","rear-enc-fault0","","","","",
+ /*H0-H7*/ "","led-bmc-ingraham0","led-rear-enc-id0","led-rear-enc-fault0","","","",
+ "",
/*I0-I7*/ "","","","","","","bmc-secure-boot","",
/*J0-J7*/ "","","","","","","","",
/*K0-K7*/ "","","","","","","","",
@@ -215,7 +216,7 @@
/*M0-M7*/ "","","","","","","","",
/*N0-N7*/ "","","","","","","","",
/*O0-O7*/ "","","","usb-power","","","","",
- /*P0-P7*/ "","","","","pcieslot-power","","","",
+ /*P0-P7*/ "","","","","led-pcieslot-power","","","",
/*Q0-Q7*/ "cfam-reset","","regulator-standby-faulted","","","","","",
/*R0-R7*/ "bmc-tpm-reset","power-chassis-control","power-chassis-good","","","","",
"",
@@ -739,7 +740,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@10 {
+ led@a {
reg = <10>;
default-state = "keep";
label = "ddimm10";
@@ -747,7 +748,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@11 {
+ led@b {
reg = <11>;
default-state = "keep";
label = "ddimm11";
@@ -755,7 +756,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@12 {
+ led@c {
reg = <12>;
default-state = "keep";
label = "ddimm12";
@@ -763,7 +764,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@13 {
+ led@d {
reg = <13>;
default-state = "keep";
label = "ddimm13";
@@ -771,7 +772,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@14 {
+ led@e {
reg = <14>;
default-state = "keep";
label = "ddimm14";
@@ -779,7 +780,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@15 {
+ led@f {
reg = <15>;
default-state = "keep";
label = "ddimm15";
@@ -876,7 +877,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@10 {
+ led@a {
reg = <10>;
default-state = "keep";
label = "ddimm26";
@@ -884,7 +885,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@11 {
+ led@b {
reg = <11>;
default-state = "keep";
label = "ddimm27";
@@ -892,7 +893,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@12 {
+ led@c {
reg = <12>;
default-state = "keep";
label = "ddimm28";
@@ -900,7 +901,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@13 {
+ led@d {
reg = <13>;
default-state = "keep";
label = "ddimm29";
@@ -908,7 +909,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@14 {
+ led@e {
reg = <14>;
default-state = "keep";
label = "ddimm30";
@@ -916,7 +917,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@15 {
+ led@f {
reg = <15>;
default-state = "keep";
label = "ddimm31";
@@ -1005,7 +1006,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@10 {
+ led@a {
reg = <10>;
default-state = "keep";
label = "pcieslot7";
@@ -1013,7 +1014,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@11 {
+ led@b {
reg = <11>;
default-state = "keep";
label = "pcieslot8";
@@ -1021,7 +1022,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@12 {
+ led@c {
reg = <12>;
default-state = "keep";
label = "pcieslot9";
@@ -1029,7 +1030,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@13 {
+ led@d {
reg = <13>;
default-state = "keep";
label = "pcieslot10";
@@ -1037,7 +1038,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@14 {
+ led@e {
reg = <14>;
default-state = "keep";
label = "pcieslot11";
@@ -1045,7 +1046,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@15 {
+ led@f {
reg = <15>;
default-state = "keep";
label = "tpm-wilson";
@@ -1231,8 +1232,9 @@
#gpio-cells = <2>;
gpio-line-names =
- "", "", "", "", "", "", "", "",
- "", "", "", "", "", "", "power-config-full-load", "";
+ "", "", "", "", "", "", "P10_DCM0_PRES", "P10_DCM1_PRES",
+ "", "", "", "", "PRESENT_VRM_DCM0_N", "PRESENT_VRM_DCM1_N",
+ "power-config-full-load", "";
};
led-controller@61 {
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-everest.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-everest.dts
index 513077a1f4be..9961508ee872 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-everest.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-everest.dts
@@ -353,6 +353,33 @@
"presence-base-op",
"";
};
+
+ led-controller@63 {
+ compatible = "nxp,pca9552";
+ reg = <0x63>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "presence-vrm-c12",
+ "presence-vrm-c13",
+ "presence-vrm-c15",
+ "presence-vrm-c16",
+ "presence-vrm-c17",
+ "presence-vrm-c18",
+ "presence-vrm-c20",
+ "presence-vrm-c21",
+ "presence-vrm-c54",
+ "presence-vrm-c55",
+ "presence-vrm-c57",
+ "presence-vrm-c58",
+ "presence-vrm-c59",
+ "presence-vrm-c60",
+ "presence-vrm-c62",
+ "presence-vrm-c63";
+ };
};
&i2c1 {
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-fuji.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-fuji.dts
index c24e464e5faa..9a43fc7bcebe 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-fuji.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-fuji.dts
@@ -355,6 +355,33 @@
"presence-base-op",
"";
};
+
+ led-controller@63 {
+ compatible = "nxp,pca9552";
+ reg = <0x63>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "presence-vrm-c12",
+ "presence-vrm-c13",
+ "presence-vrm-c15",
+ "presence-vrm-c16",
+ "presence-vrm-c17",
+ "presence-vrm-c18",
+ "presence-vrm-c20",
+ "presence-vrm-c21",
+ "presence-vrm-c54",
+ "presence-vrm-c55",
+ "presence-vrm-c57",
+ "presence-vrm-c58",
+ "presence-vrm-c59",
+ "presence-vrm-c60",
+ "presence-vrm-c62",
+ "presence-vrm-c63";
+ };
};
&i2c1 {
@@ -949,7 +976,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@10 {
+ led@a {
reg = <10>;
default-state = "keep";
label = "pcieslot-c10";
@@ -957,7 +984,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@11 {
+ led@b {
reg = <11>;
default-state = "keep";
label = "pcieslot-c11";
@@ -1058,7 +1085,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@10 {
+ led@a {
reg = <10>;
default-state = "keep";
label = "ddimm10";
@@ -1066,7 +1093,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@11 {
+ led@b {
reg = <11>;
default-state = "keep";
label = "ddimm11";
@@ -1074,7 +1101,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@12 {
+ led@c {
reg = <12>;
default-state = "keep";
label = "ddimm12";
@@ -1082,7 +1109,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@13 {
+ led@d {
reg = <13>;
default-state = "keep";
label = "ddimm13";
@@ -1090,7 +1117,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@14 {
+ led@e {
reg = <14>;
default-state = "keep";
label = "ddimm14";
@@ -1098,7 +1125,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@15 {
+ led@f {
reg = <15>;
default-state = "keep";
label = "ddimm15";
@@ -1195,7 +1222,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@10 {
+ led@a {
reg = <10>;
default-state = "keep";
label = "ddimm26";
@@ -1203,7 +1230,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@11 {
+ led@b {
reg = <11>;
default-state = "keep";
label = "ddimm27";
@@ -1211,7 +1238,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@12 {
+ led@c {
reg = <12>;
default-state = "keep";
label = "ddimm28";
@@ -1219,7 +1246,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@13 {
+ led@d {
reg = <13>;
default-state = "keep";
label = "ddimm29";
@@ -1227,7 +1254,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@14 {
+ led@e {
reg = <14>;
default-state = "keep";
label = "ddimm30";
@@ -1235,7 +1262,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@15 {
+ led@f {
reg = <15>;
default-state = "keep";
label = "ddimm31";
@@ -1332,7 +1359,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@10 {
+ led@a {
reg = <10>;
default-state = "keep";
label = "ddimm42";
@@ -1340,7 +1367,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@11 {
+ led@b {
reg = <11>;
default-state = "keep";
label = "ddimm43";
@@ -1348,7 +1375,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@12 {
+ led@c {
reg = <12>;
default-state = "keep";
label = "ddimm44";
@@ -1356,7 +1383,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@13 {
+ led@d {
reg = <13>;
default-state = "keep";
label = "ddimm45";
@@ -1364,7 +1391,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@14 {
+ led@e {
reg = <14>;
default-state = "keep";
label = "ddimm46";
@@ -1372,7 +1399,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@15 {
+ led@f {
reg = <15>;
default-state = "keep";
label = "ddimm47";
@@ -1469,7 +1496,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@10 {
+ led@a {
reg = <10>;
default-state = "keep";
label = "ddimm58";
@@ -1477,7 +1504,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@11 {
+ led@b {
reg = <11>;
default-state = "keep";
label = "ddimm59";
@@ -1485,7 +1512,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@12 {
+ led@c {
reg = <12>;
default-state = "keep";
label = "ddimm60";
@@ -1493,7 +1520,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@13 {
+ led@d {
reg = <13>;
default-state = "keep";
label = "ddimm61";
@@ -1501,7 +1528,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@14 {
+ led@e {
reg = <14>;
default-state = "keep";
label = "ddimm62";
@@ -1509,7 +1536,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@15 {
+ led@f {
reg = <15>;
default-state = "keep";
label = "ddimm63";
@@ -1598,7 +1625,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@10 {
+ led@a {
reg = <10>;
default-state = "keep";
label = "vrm6";
@@ -1606,7 +1633,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@11 {
+ led@b {
reg = <11>;
default-state = "keep";
label = "vrm7";
@@ -1614,7 +1641,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@12 {
+ led@c {
reg = <12>;
default-state = "keep";
label = "vrm12";
@@ -1622,7 +1649,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@13 {
+ led@d {
reg = <13>;
default-state = "keep";
label = "vrm13";
@@ -1630,7 +1657,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@14 {
+ led@e {
reg = <14>;
default-state = "keep";
label = "vrm14";
@@ -1638,7 +1665,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@15 {
+ led@f {
reg = <15>;
default-state = "keep";
label = "vrm15";
@@ -1727,7 +1754,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@10 {
+ led@a {
reg = <10>;
default-state = "keep";
label = "vrm2";
@@ -1735,7 +1762,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@11 {
+ led@b {
reg = <11>;
default-state = "keep";
label = "vrm3";
@@ -1743,7 +1770,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@12 {
+ led@c {
reg = <12>;
default-state = "keep";
label = "vrm8";
@@ -1751,7 +1778,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@13 {
+ led@d {
reg = <13>;
default-state = "keep";
label = "vrm9";
@@ -1759,7 +1786,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@14 {
+ led@e {
reg = <14>;
default-state = "keep";
label = "vrm10";
@@ -1767,7 +1794,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@15 {
+ led@f {
reg = <15>;
default-state = "keep";
label = "vrm11";
@@ -2118,7 +2145,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@10 {
+ led@a {
reg = <10>;
default-state = "keep";
label = "fan0";
@@ -2126,7 +2153,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@11 {
+ led@b {
reg = <11>;
default-state = "keep";
label = "fan1";
@@ -2134,7 +2161,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@12 {
+ led@c {
reg = <12>;
default-state = "keep";
label = "fan2";
@@ -2142,7 +2169,7 @@
type = <PCA955X_TYPE_LED>;
};
- led@13 {
+ led@d {
reg = <13>;
default-state = "keep";
label = "fan3";
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-rainier.dts
index 0776b72c2199..638a2c1c7892 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-rainier.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-rainier.dts
@@ -109,22 +109,22 @@
compatible = "gpio-leds";
/* BMC Card fault LED at the back */
- bmc-ingraham0 {
+ led-bmc-ingraham0 {
gpios = <&gpio0 ASPEED_GPIO(H, 1) GPIO_ACTIVE_LOW>;
};
/* Enclosure ID LED at the back */
- rear-enc-id0 {
+ led-rear-enc-id0 {
gpios = <&gpio0 ASPEED_GPIO(H, 2) GPIO_ACTIVE_LOW>;
};
/* Enclosure fault LED at the back */
- rear-enc-fault0 {
+ led-rear-enc-fault0 {
gpios = <&gpio0 ASPEED_GPIO(H, 3) GPIO_ACTIVE_LOW>;
};
/* PCIE slot power LED */
- pcieslot-power {
+ led-pcieslot-power {
gpios = <&gpio0 ASPEED_GPIO(P, 4) GPIO_ACTIVE_LOW>;
};
};
@@ -203,7 +203,7 @@
/*E0-E7*/ "","","","","","","","",
/*F0-F7*/ "","","rtc-battery-voltage-read-enable","reset-cause-pinhole","","","factory-reset-toggle","",
/*G0-G7*/ "","","","","","","","",
- /*H0-H7*/ "","bmc-ingraham0","rear-enc-id0","rear-enc-fault0","","","","",
+ /*H0-H7*/ "","led-bmc-ingraham0","led-rear-enc-id0","led-rear-enc-fault0","","","","",
/*I0-I7*/ "","","","","","","bmc-secure-boot","",
/*J0-J7*/ "","","","","","","","",
/*K0-K7*/ "","","","","","","","",
@@ -211,7 +211,7 @@
/*M0-M7*/ "","","","","","","","",
/*N0-N7*/ "","","","","","","","",
/*O0-O7*/ "","","","usb-power","","","","",
- /*P0-P7*/ "","","","","pcieslot-power","","","",
+ /*P0-P7*/ "","","","","led-pcieslot-power","","","",
/*Q0-Q7*/ "cfam-reset","","regulator-standby-faulted","","","","","",
/*R0-R7*/ "bmc-tpm-reset","power-chassis-control","power-chassis-good","","","","","",
/*S0-S7*/ "presence-ps0","presence-ps1","presence-ps2","presence-ps3",
@@ -1280,8 +1280,9 @@
#gpio-cells = <2>;
gpio-line-names =
- "", "", "", "", "", "", "", "",
- "", "", "", "", "", "", "power-config-full-load", "";
+ "", "", "", "", "", "", "P10_DCM0_PRES", "P10_DCM1_PRES",
+ "", "", "", "", "PRESENT_VRM_DCM0_N", "PRESENT_VRM_DCM1_N",
+ "power-config-full-load", "";
};
pca_pres2: pca9552@61 {
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-sbp1.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-sbp1.dts
new file mode 100644
index 000000000000..8d98be3d5f2e
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-sbp1.dts
@@ -0,0 +1,6086 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2024 IBM Corp.
+/dts-v1/;
+#include <dt-bindings/gpio/aspeed-gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/i2c/i2c.h>
+#include "aspeed-g6.dtsi"
+
+/ {
+ model = "IBM SBP1";
+ compatible = "ibm,sbp1-bmc", "aspeed,ast2600";
+
+ chosen {
+ stdout-path = &uart1;
+ };
+
+ memory@80000000 {
+ reg = <0x80000000 0x20000000>;
+ device_type = "memory";
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ gfx_memory: framebuffer {
+ size = <0x01000000>;
+ alignment = <0x01000000>;
+ compatible = "shared-dma-pool";
+ reusable;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-power {
+ label = "LED_BMC_READY";
+ gpios = <&gpio0 ASPEED_GPIO(H, 1) GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_GREEN>;
+ default-state = "off";
+ retain-state-suspended;
+ panic-indicator;
+ };
+
+ led-id-tpm {
+ label = "LED_ID_TPM";
+ gpios = <&smb_pex_vr_ctrl 12 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-bat {
+ label = "LED_ID_BAT";
+ gpios = <&smb_pex_vr_ctrl 16 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-mgmt-port2 {
+ label = "LED_ID_MGMT_PORT2";
+ gpios = <&smb_pex_vr_ctrl 17 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-mgmt-port1 {
+ label = "LED_ID_MGMT_PORT1";
+ gpios = <&smb_pex_vr_ctrl 18 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-nic1-port1 {
+ label = "LED_ID_NIC1_PORT1";
+ gpios = <&smb_pex_vr_ctrl 22 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-nic1-port2 {
+ label = "LED_ID_NIC1_PORT2";
+ gpios = <&smb_pex_vr_ctrl 23 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-nic2-port1 {
+ label = "LED_ID_NIC2_PORT1";
+ gpios = <&smb_pex_vr_ctrl 24 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-nic2-port2 {
+ label = "LED_ID_NIC2_PORT2";
+ gpios = <&smb_pex_vr_ctrl 25 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-m2-ssd2 {
+ label = "LED_ID_M2_SSD2";
+ gpios = <&smb_pex_vr_ctrl 36 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-m2-ssd1 {
+ label = "LED_ID_M2_SSD1";
+ gpios = <&smb_pex_vr_ctrl 37 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dwr-frnt-p {
+ label = "LED_ID_DWR_FRNT_P";
+ gpios = <&smb_svc_pex_cpu3_led 37 GPIO_ACTIVE_HIGH>;
+ color = <LED_COLOR_ID_BLUE>;
+
+ default-state = "on";
+ retain-state-suspended;
+ retain-state-shutdown;
+ };
+
+ led-pwr-dwr-frnt {
+ label = "LED_PWR_DWR_FRNT";
+ gpios = <&smb_svc_pex_cpu3_led 36 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_GREEN>;
+
+ retain-state-suspended;
+ retain-state-shutdown;
+ };
+
+ led-pwr-dwr-back {
+ label = "LED_PWR_DWR_BACK";
+ gpios = <&smb_pex_vr_ctrl 34 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_GREEN>;
+
+ retain-state-suspended;
+ retain-state-shutdown;
+ };
+
+ led-id-dwr-back-p {
+ label = "LED_ID_DWR_BACK_P";
+ gpios = <&smb_pex_vr_ctrl 35 GPIO_ACTIVE_HIGH>;
+ color = <LED_COLOR_ID_BLUE>;
+
+ default-state = "on";
+ retain-state-suspended;
+ retain-state-shutdown;
+ };
+
+ led-id-cpu0 {
+ label = "LED_ID_CPU0";
+ gpios = <&smb_svc_pex_cpu0_led 39 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-cpu1 {
+ label = "LED_ID_CPU1";
+ gpios = <&smb_svc_pex_cpu1_led 39 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-cpu2 {
+ label = "LED_ID_CPU2";
+ gpios = <&smb_svc_pex_cpu2_led 39 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-cpu3 {
+ label = "LED_ID_CPU3";
+ gpios = <&smb_svc_pex_cpu3_led 39 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c0e2 {
+ label = "LED_ID_DIMM_C0E2";
+ gpios = <&smb_svc_pex_cpu0_led 20 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c0e1 {
+ label = "LED_ID_DIMM_C0E1";
+ gpios = <&smb_svc_pex_cpu0_led 21 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c0f2 {
+ label = "LED_ID_DIMM_C0F2";
+ gpios = <&smb_svc_pex_cpu0_led 22 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c0f1 {
+ label = "LED_ID_DIMM_C0F1";
+ gpios = <&smb_svc_pex_cpu0_led 23 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c0g2 {
+ label = "LED_ID_DIMM_C0G2";
+ gpios = <&smb_svc_pex_cpu0_led 24 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c0g1 {
+ label = "LED_ID_DIMM_C0G1";
+ gpios = <&smb_svc_pex_cpu0_led 25 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c0h2 {
+ label = "LED_ID_DIMM_C0H2";
+ gpios = <&smb_svc_pex_cpu0_led 26 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c0h1 {
+ label = "LED_ID_DIMM_C0H1";
+ gpios = <&smb_svc_pex_cpu0_led 27 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c0a2 {
+ label = "LED_ID_DIMM_C0A2";
+ gpios = <&smb_svc_pex_cpu0_led 28 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c0a1 {
+ label = "LED_ID_DIMM_C0A1";
+ gpios = <&smb_svc_pex_cpu0_led 29 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c0b2 {
+ label = "LED_ID_DIMM_C0B2";
+ gpios = <&smb_svc_pex_cpu0_led 30 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c0b1 {
+ label = "LED_ID_DIMM_C0B1";
+ gpios = <&smb_svc_pex_cpu0_led 31 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c0c2 {
+ label = "LED_ID_DIMM_C0C2";
+ gpios = <&smb_svc_pex_cpu0_led 32 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c0c1 {
+ label = "LED_ID_DIMM_C0C1";
+ gpios = <&smb_svc_pex_cpu0_led 33 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c0d2 {
+ label = "LED_ID_DIMM_C0D2";
+ gpios = <&smb_svc_pex_cpu0_led 34 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c0d1 {
+ label = "LED_ID_DIMM_C0D1";
+ gpios = <&smb_svc_pex_cpu0_led 35 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c1e2 {
+ label = "LED_ID_DIMM_C1E2";
+ gpios = <&smb_svc_pex_cpu1_led 20 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c1e1 {
+ label = "LED_ID_DIMM_C1E1";
+ gpios = <&smb_svc_pex_cpu1_led 21 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c1f2 {
+ label = "LED_ID_DIMM_C1F2";
+ gpios = <&smb_svc_pex_cpu1_led 22 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c1f1 {
+ label = "LED_ID_DIMM_C1F1";
+ gpios = <&smb_svc_pex_cpu1_led 23 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c1g2 {
+ label = "LED_ID_DIMM_C1G2";
+ gpios = <&smb_svc_pex_cpu1_led 24 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c1g1 {
+ label = "LED_ID_DIMM_C1G1";
+ gpios = <&smb_svc_pex_cpu1_led 25 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c1h2 {
+ label = "LED_ID_DIMM_C1H2";
+ gpios = <&smb_svc_pex_cpu1_led 26 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c1h1 {
+ label = "LED_ID_DIMM_C1H1";
+ gpios = <&smb_svc_pex_cpu1_led 27 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c1a2 {
+ label = "LED_ID_DIMM_C1A2";
+ gpios = <&smb_svc_pex_cpu1_led 28 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c1a1 {
+ label = "LED_ID_DIMM_C1A1";
+ gpios = <&smb_svc_pex_cpu1_led 29 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c1b2 {
+ label = "LED_ID_DIMM_C1B2";
+ gpios = <&smb_svc_pex_cpu1_led 30 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c1b1 {
+ label = "LED_ID_DIMM_C1B1";
+ gpios = <&smb_svc_pex_cpu1_led 31 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c1c2 {
+ label = "LED_ID_DIMM_C1C2";
+ gpios = <&smb_svc_pex_cpu1_led 32 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c1c1 {
+ label = "LED_ID_DIMM_C1C1";
+ gpios = <&smb_svc_pex_cpu1_led 33 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c1d2 {
+ label = "LED_ID_DIMM_C1D2";
+ gpios = <&smb_svc_pex_cpu1_led 34 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c1d1 {
+ label = "LED_ID_DIMM_C1D1";
+ gpios = <&smb_svc_pex_cpu1_led 35 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c2e2 {
+ label = "LED_ID_DIMM_C2E2";
+ gpios = <&smb_svc_pex_cpu2_led 20 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c2e1 {
+ label = "LED_ID_DIMM_C2E1";
+ gpios = <&smb_svc_pex_cpu2_led 21 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c2f2 {
+ label = "LED_ID_DIMM_C2F2";
+ gpios = <&smb_svc_pex_cpu2_led 22 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c2f1 {
+ label = "LED_ID_DIMM_C2F1";
+ gpios = <&smb_svc_pex_cpu2_led 23 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c2g2 {
+ label = "LED_ID_DIMM_C2G2";
+ gpios = <&smb_svc_pex_cpu2_led 24 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c2g1 {
+ label = "LED_ID_DIMM_C2G1";
+ gpios = <&smb_svc_pex_cpu2_led 25 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c2h2 {
+ label = "LED_ID_DIMM_C2H2";
+ gpios = <&smb_svc_pex_cpu2_led 26 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c2h1 {
+ label = "LED_ID_DIMM_C2H1";
+ gpios = <&smb_svc_pex_cpu2_led 27 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c2a2 {
+ label = "LED_ID_DIMM_C2A2";
+ gpios = <&smb_svc_pex_cpu2_led 28 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c2a1 {
+ label = "LED_ID_DIMM_C2A1";
+ gpios = <&smb_svc_pex_cpu2_led 29 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c2b2 {
+ label = "LED_ID_DIMM_C2B2";
+ gpios = <&smb_svc_pex_cpu2_led 30 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c2b1 {
+ label = "LED_ID_DIMM_C2B1";
+ gpios = <&smb_svc_pex_cpu2_led 31 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c2c2 {
+ label = "LED_ID_DIMM_C2C2";
+ gpios = <&smb_svc_pex_cpu2_led 32 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c2c1 {
+ label = "LED_ID_DIMM_C2C1";
+ gpios = <&smb_svc_pex_cpu2_led 33 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c2d2 {
+ label = "LED_ID_DIMM_C2D2";
+ gpios = <&smb_svc_pex_cpu2_led 34 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c2d1 {
+ label = "LED_ID_DIMM_C2D1";
+ gpios = <&smb_svc_pex_cpu2_led 35 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c3e2 {
+ label = "LED_ID_DIMM_C3E2";
+ gpios = <&smb_svc_pex_cpu3_led 20 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c3e1 {
+ label = "LED_ID_DIMM_C3E1";
+ gpios = <&smb_svc_pex_cpu3_led 21 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c3f2 {
+ label = "LED_ID_DIMM_C3F2";
+ gpios = <&smb_svc_pex_cpu3_led 22 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c3f1 {
+ label = "LED_ID_DIMM_C3F1";
+ gpios = <&smb_svc_pex_cpu3_led 23 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c3g2 {
+ label = "LED_ID_DIMM_C3G2";
+ gpios = <&smb_svc_pex_cpu3_led 24 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c3g1 {
+ label = "LED_ID_DIMM_C3G1";
+ gpios = <&smb_svc_pex_cpu3_led 25 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c3h2 {
+ label = "LED_ID_DIMM_C3H2";
+ gpios = <&smb_svc_pex_cpu3_led 26 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c3h1 {
+ label = "LED_ID_DIMM_C3H1";
+ gpios = <&smb_svc_pex_cpu3_led 27 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c3a2 {
+ label = "LED_ID_DIMM_C3A2";
+ gpios = <&smb_svc_pex_cpu3_led 28 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c3a1 {
+ label = "LED_ID_DIMM_C3A1";
+ gpios = <&smb_svc_pex_cpu3_led 29 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c3b2 {
+ label = "LED_ID_DIMM_C3B2";
+ gpios = <&smb_svc_pex_cpu3_led 30 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c3b1 {
+ label = "LED_ID_DIMM_C3B1";
+ gpios = <&smb_svc_pex_cpu3_led 31 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c3c2 {
+ label = "LED_ID_DIMM_C3C2";
+ gpios = <&smb_svc_pex_cpu3_led 32 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c3c1 {
+ label = "LED_ID_DIMM_C3C1";
+ gpios = <&smb_svc_pex_cpu3_led 33 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c3d2 {
+ label = "LED_ID_DIMM_C3D2";
+ gpios = <&smb_svc_pex_cpu3_led 34 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-c3d1 {
+ label = "LED_ID_DIMM_C3D1";
+ gpios = <&smb_svc_pex_cpu3_led 35 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd01 {
+ label = "LED_ID_RSSD01";
+ gpios = <&smb_svc_pex_rssd01_16 0 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd02 {
+ label = "LED_ID_RSSD02";
+ gpios = <&smb_svc_pex_rssd01_16 1 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd03 {
+ label = "LED_ID_RSSD03";
+ gpios = <&smb_svc_pex_rssd01_16 2 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd04 {
+ label = "LED_ID_RSSD04";
+ gpios = <&smb_svc_pex_rssd01_16 3 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd05 {
+ label = "LED_ID_RSSD05";
+ gpios = <&smb_svc_pex_rssd01_16 4 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd06 {
+ label = "LED_ID_RSSD06";
+ gpios = <&smb_svc_pex_rssd01_16 5 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd07 {
+ label = "LED_ID_RSSD07";
+ gpios = <&smb_svc_pex_rssd01_16 6 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd08 {
+ label = "LED_ID_RSSD08";
+ gpios = <&smb_svc_pex_rssd01_16 7 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd09 {
+ label = "LED_ID_RSSD09";
+ gpios = <&smb_svc_pex_rssd01_16 8 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd10 {
+ label = "LED_ID_RSSD10";
+ gpios = <&smb_svc_pex_rssd01_16 9 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd11 {
+ label = "LED_ID_RSSD11";
+ gpios = <&smb_svc_pex_rssd01_16 10 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd12 {
+ label = "LED_ID_RSSD12";
+ gpios = <&smb_svc_pex_rssd01_16 11 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd13 {
+ label = "LED_ID_RSSD13";
+ gpios = <&smb_svc_pex_rssd01_16 12 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd14 {
+ label = "LED_ID_RSSD14";
+ gpios = <&smb_svc_pex_rssd01_16 13 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd15 {
+ label = "LED_ID_RSSD15";
+ gpios = <&smb_svc_pex_rssd01_16 14 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd16 {
+ label = "LED_ID_RSSD16";
+ gpios = <&smb_svc_pex_rssd01_16 15 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd17 {
+ label = "LED_ID_RSSD17";
+ gpios = <&smb_svc_pex_rssd17_32 0 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd18 {
+ label = "LED_ID_RSSD18";
+ gpios = <&smb_svc_pex_rssd17_32 1 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd19 {
+ label = "LED_ID_RSSD19";
+ gpios = <&smb_svc_pex_rssd17_32 2 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd20 {
+ label = "LED_ID_RSSD20";
+ gpios = <&smb_svc_pex_rssd17_32 3 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd21 {
+ label = "LED_ID_RSSD21";
+ gpios = <&smb_svc_pex_rssd17_32 4 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd22 {
+ label = "LED_ID_RSSD22";
+ gpios = <&smb_svc_pex_rssd17_32 5 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd23 {
+ label = "LED_ID_RSSD23";
+ gpios = <&smb_svc_pex_rssd17_32 6 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd24 {
+ label = "LED_ID_RSSD24";
+ gpios = <&smb_svc_pex_rssd17_32 7 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd25 {
+ label = "LED_ID_RSSD25";
+ gpios = <&smb_svc_pex_rssd17_32 8 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd26 {
+ label = "LED_ID_RSSD26";
+ gpios = <&smb_svc_pex_rssd17_32 9 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd27 {
+ label = "LED_ID_RSSD27";
+ gpios = <&smb_svc_pex_rssd17_32 10 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd28 {
+ label = "LED_ID_RSSD28";
+ gpios = <&smb_svc_pex_rssd17_32 11 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd29 {
+ label = "LED_ID_RSSD29";
+ gpios = <&smb_svc_pex_rssd17_32 12 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd30 {
+ label = "LED_ID_RSSD30";
+ gpios = <&smb_svc_pex_rssd17_32 13 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd31 {
+ label = "LED_ID_RSSD31";
+ gpios = <&smb_svc_pex_rssd17_32 14 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-dimm-rssd32 {
+ label = "LED_ID_RSSD32";
+ gpios = <&smb_svc_pex_rssd17_32 15 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-fan-asm01 {
+ label = "LED_ID_FAN_ASM01";
+ gpios = <&smb_svc_pex_rssd01_16 32 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-fan-asm02 {
+ label = "LED_ID_FAN_ASM02";
+ gpios = <&smb_svc_pex_rssd01_16 33 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-fan-asm03 {
+ label = "LED_ID_FAN_ASM03";
+ gpios = <&smb_svc_pex_rssd01_16 34 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-fan-asm04 {
+ label = "LED_ID_FAN_ASM04";
+ gpios = <&smb_svc_pex_rssd01_16 35 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-fan-asm05 {
+ label = "LED_ID_FAN_ASM05";
+ gpios = <&smb_svc_pex_rssd01_16 36 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-fan-asm06 {
+ label = "LED_ID_FAN_ASM06";
+ gpios = <&smb_svc_pex_rssd01_16 37 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-fan-asm07 {
+ label = "LED_ID_FAN_ASM07";
+ gpios = <&smb_svc_pex_rssd17_32 32 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-fan-asm08 {
+ label = "LED_ID_FAN_ASM08";
+ gpios = <&smb_svc_pex_rssd17_32 33 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-fan-asm09 {
+ label = "LED_ID_FAN_ASM09";
+ gpios = <&smb_svc_pex_rssd17_32 34 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-fan-asm10 {
+ label = "LED_ID_FAN_ASM10";
+ gpios = <&smb_svc_pex_rssd17_32 35 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-fan-asm11 {
+ label = "LED_ID_FAN_ASM11";
+ gpios = <&smb_svc_pex_rssd17_32 36 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+
+ led-id-fan-asm12 {
+ label = "LED_ID_FAN_ASM12";
+ gpios = <&smb_svc_pex_rssd17_32 37 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_YELLOW>;
+ };
+ };
+
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&p12v_vd 0>, <&p5v_aux_vd 0>, <&p5v_bmc_aux_vd 0>, <&p3v3_aux_vd 0>,
+ <&p3v3_bmc_aux_vd 0>, <&p1v8_bmc_aux_vd 0>, <&adc1 4>, <&adc0 2>, <&adc1 0>,
+ <&p2V5_aux_vd 0>, <&p3v3_rtc_vd 0>;
+ };
+
+ p12v_vd: voltage-divider1 {
+ compatible = "voltage-divider";
+ io-channels = <&adc1 3>;
+ #io-channel-cells = <1>;
+
+ /*
+ * Scale the system voltage by 1127/127 to fit the ADC range.
+ * Use small nominator to prevent integer overflow.
+ */
+ output-ohms = <15>;
+ full-ohms = <133>;
+ };
+
+ p5v_aux_vd: voltage-divider2 {
+ compatible = "voltage-divider";
+ io-channels = <&adc1 5>;
+ #io-channel-cells = <1>;
+
+ /*
+ * Scale the system voltage by 1365/365 to fit the ADC range.
+ * Use small nominator to prevent integer overflow.
+ */
+ output-ohms = <50>;
+ full-ohms = <187>;
+ };
+
+ p5v_bmc_aux_vd: voltage-divider3 {
+ compatible = "voltage-divider";
+ io-channels = <&adc0 3>;
+ #io-channel-cells = <1>;
+
+ /*
+ * Scale the system voltage by 1365/365 to fit the ADC range.
+ * Use small nominator to prevent integer overflow.
+ */
+ output-ohms = <50>;
+ full-ohms = <187>;
+ };
+
+ p3v3_aux_vd: voltage-divider4 {
+ compatible = "voltage-divider";
+ io-channels = <&adc1 2>;
+ #io-channel-cells = <1>;
+
+ /*
+ * Scale the system voltage by 1698/698 to fit the ADC range.
+ * Use small nominator to prevent integer overflow.
+ */
+ output-ohms = <14>;
+ full-ohms = <34>;
+ };
+
+ p3v3_bmc_aux_vd: voltage-divider5 {
+ compatible = "voltage-divider";
+ io-channels = <&adc0 7>;
+ #io-channel-cells = <1>;
+
+ /*
+ * Scale the system voltage by 1698/698 to fit the ADC range.
+ * Use small nominator to prevent integer overflow.
+ */
+ output-ohms = <14>;
+ full-ohms = <34>;
+ };
+
+ p1v8_bmc_aux_vd: voltage-divider6 {
+ compatible = "voltage-divider";
+ io-channels = <&adc0 6>;
+ #io-channel-cells = <1>;
+
+ /*
+ * Scale the system voltage by 4000/3000 to fit the ADC range.
+ * Use small nominator to prevent integer overflow.
+ */
+ output-ohms = <3>;
+ full-ohms = <4>;
+ };
+
+ p2V5_aux_vd: voltage-divider7 {
+ compatible = "voltage-divider";
+ io-channels = <&adc1 1>;
+ #io-channel-cells = <1>;
+
+ /*
+ * Scale the system voltage by 2100/1100 to fit the ADC range.
+ * Use small nominator to prevent integer overflow.
+ */
+ output-ohms = <11>;
+ full-ohms = <21>;
+ };
+
+ p3v3_rtc_vd: voltage-divider8 {
+ compatible = "voltage-divider";
+ io-channels = <&adc1 7>;
+ #io-channel-cells = <1>;
+
+ /*
+ * Scale the system voltage by 231000/100000 to fit the ADC range.
+ * Use small nominator to prevent integer overflow.
+ */
+ output-ohms = <100>;
+ full-ohms = <231>;
+ };
+
+ thermistor0: thermistor-0 {
+ compatible = "epcos,b57891s0103";
+ pullup-uv = <3300000>;
+ pullup-ohm = <10000>;
+ pulldown-ohm = <0>;
+ io-channels = <&adc0 0>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ thermistor1: thermistor-1 {
+ compatible = "epcos,b57891s0103";
+ pullup-uv = <3300000>;
+ pullup-ohm = <10000>;
+ pulldown-ohm = <0>;
+ io-channels = <&adc0 1>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ thermistor2: thermistor-2 {
+ compatible = "epcos,b57891s0103";
+ pullup-uv = <3300000>;
+ pullup-ohm = <10000>;
+ pulldown-ohm = <0>;
+ io-channels = <&adc0 4>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ thermistor3: thermistor-3 {
+ compatible = "epcos,b57891s0103";
+ pullup-uv = <3300000>;
+ pullup-ohm = <10000>;
+ pulldown-ohm = <0>;
+ io-channels = <&adc0 5>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ p12v: fixedregulator-p12v {
+ compatible = "regulator-fixed";
+ regulator-name = "p12v";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ p3v3_bmc_aux: fixedregulator-p3v3-bmc-aux {
+ compatible = "regulator-fixed";
+ regulator-name = "p3v3_bmc_aux";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ p1v8_bmc_aux: fixedregulator-p1v8-bmc-aux {
+ compatible = "regulator-fixed";
+ regulator-name = "p1v8_bmc_aux";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ p1v2_bmc_aux: fixedregulator-p1v2-bmc-aux {
+ compatible = "regulator-fixed";
+ regulator-name = "p1v2_bmc_aux";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ p12v-a-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&p12v_a>;
+ };
+
+ p12v-b-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&p12v_b>;
+ };
+
+ p12v-c-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&p12v_c>;
+ };
+
+ p12v-d-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&p12v_d>;
+ };
+
+ pvccinfaon-cpu0-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvccinfaon_cpu0>;
+ };
+
+ pvccfa-ehv-cpu0-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvccfa_ehv_cpu0>;
+ };
+
+ pvnn-main-cpu0-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvnn_main_cpu0>;
+ };
+
+ pvccin-cpu0-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvccin_cpu0>;
+ };
+
+ pvccfa-ehv-fivra-cpu0-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvccfa_ehv_fivra_cpu0>;
+ };
+
+ pvccd-hv-cpu0-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvccd_hv_cpu0>;
+ };
+
+ pvpp-hbm-cpu0-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvpp_hbm_cpu0>;
+ };
+
+ pvccinfaon-cpu1-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvccinfaon_cpu1>;
+ };
+
+ pvccfa-ehv-cpu1-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvccfa_ehv_cpu1>;
+ };
+
+ pvnn-main-cpu1-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvnn_main_cpu1>;
+ };
+
+ pvccin-cpu1-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvccin_cpu1>;
+ };
+
+ pvccfa-ehv-fivra-cpu1-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvccfa_ehv_fivra_cpu1>;
+ };
+
+ pvccd-hv-cpu1-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvccd_hv_cpu1>;
+ };
+
+ pvpp-hbm-cpu1-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvpp_hbm_cpu1>;
+ };
+
+ pvccinfaon-cpu2-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvccinfaon_cpu2>;
+ };
+
+ pvccfa-ehv-cpu2-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvccfa_ehv_cpu2>;
+ };
+
+ pvnn-main-cpu2-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvnn_main_cpu2>;
+ };
+
+ pvccin-cpu2-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvccin_cpu2>;
+ };
+
+ pvccfa-ehv-fivra-cpu2-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvccfa_ehv_fivra_cpu2>;
+ };
+
+ pvccd-hv-cpu2-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvccd_hv_cpu2>;
+ };
+
+ pvpp-hbm-cpu2-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvpp_hbm_cpu2>;
+ };
+
+ pvccinfaon-cpu3-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvccinfaon_cpu3>;
+ };
+
+ pvccfa-ehv-cpu3-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvccfa_ehv_cpu3>;
+ };
+
+ pvnn-main-cpu3-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvnn_main_cpu3>;
+ };
+
+ pvccin-cpu3-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvccin_cpu3>;
+ };
+
+ pvccfa-ehv-fivra-cpu3-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvccfa_ehv_fivra_cpu3>;
+ };
+
+ pvccd-hv-cpu3-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvccd_hv_cpu3>;
+ };
+
+ pvpp-hbm-cpu3-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvpp_hbm_cpu3>;
+ };
+
+ p1v05-pch-aux-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&p1v05_pch_aux>;
+ };
+
+ p1v8-pch-aux-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&p1v8_pch_aux>;
+ };
+
+ p3v3-pch-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&p3v3_pch>;
+ };
+
+ p5v-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&p5v>;
+ };
+
+ smb-m2-ssb-ssd2 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_smb_m2_ssb_ssd2>;
+ };
+
+ smb-m2-ssb-ssd1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_smb_m2_ssb_ssd1>;
+ };
+
+ ssb-rssd01-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd01>;
+ };
+
+ ssb-rssd01-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd01>;
+ };
+
+ ssb-rssd02-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd02>;
+ };
+
+ ssb-rssd02-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd02>;
+ };
+
+ ssb-rssd03-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd03>;
+ };
+
+ ssb-rssd03-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd03>;
+ };
+
+ ssb-rssd04-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd04>;
+ };
+
+ ssb-rssd04-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd04>;
+ };
+
+ ssb-rssd05-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd05>;
+ };
+
+ ssb-rssd05-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd05>;
+ };
+
+ ssb-rssd06-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd06>;
+ };
+
+ ssb-rssd06-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd06>;
+ };
+
+ ssb-rssd07-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd07>;
+ };
+
+ ssb-rssd07-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd07>;
+ };
+
+ ssb-rssd08-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd08>;
+ };
+
+ ssb-rssd08-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd08>;
+ };
+
+ ssb-rssd09-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd09>;
+ };
+
+ ssb-rssd09-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd09>;
+ };
+
+ ssb-rssd10-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd10>;
+ };
+
+ ssb-rssd10-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd10>;
+ };
+
+ ssb-rssd11-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd11>;
+ };
+
+ ssb-rssd11-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd11>;
+ };
+
+ ssb-rssd12-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd12>;
+ };
+
+ ssb-rssd12-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd12>;
+ };
+
+ ssb-rssd13-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd13>;
+ };
+
+ ssb-rssd13-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd13>;
+ };
+
+ ssb-rssd14-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd14>;
+ };
+
+ ssb-rssd14-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd14>;
+ };
+
+ ssb-rssd15-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd15>;
+ };
+
+ ssb-rssd15-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd15>;
+ };
+
+ ssb-rssd16-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd16>;
+ };
+
+ ssb-rssd16-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd16>;
+ };
+
+ ssb-rssd17-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd17>;
+ };
+
+ ssb-rssd17-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd17>;
+ };
+
+ ssb-rssd18-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd18>;
+ };
+
+ ssb-rssd18-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd18>;
+ };
+
+ ssb-rssd19-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd19>;
+ };
+
+ ssb-rssd19-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd19>;
+ };
+
+ ssb-rssd20-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd20>;
+ };
+
+ ssb-rssd20-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd20>;
+ };
+
+ ssb-rssd21-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd21>;
+ };
+
+ ssb-rssd21-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd21>;
+ };
+
+ ssb-rssd22-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd22>;
+ };
+
+ ssb-rssd22-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd22>;
+ };
+
+ ssb-rssd23-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd23>;
+ };
+
+ ssb-rssd23-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd23>;
+ };
+
+ ssb-rssd24-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd24>;
+ };
+
+ ssb-rssd24-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd24>;
+ };
+
+ ssb-rssd25-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd25>;
+ };
+
+ ssb-rssd25-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd25>;
+ };
+
+ ssb-rssd26-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd26>;
+ };
+
+ ssb-rssd26-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd26>;
+ };
+
+ ssb-rssd27-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd27>;
+ };
+
+ ssb-rssd27-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd27>;
+ };
+
+ ssb-rssd28-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd28>;
+ };
+
+ ssb-rssd28-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd28>;
+ };
+
+ ssb-rssd29-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd29>;
+ };
+
+ ssb-rssd29-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd29>;
+ };
+
+ ssb-rssd30-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd30>;
+ };
+
+ ssb-rssd30-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd30>;
+ };
+
+ ssb-rssd31-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd31>;
+ };
+
+ ssb-rssd31-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd31>;
+ };
+
+ ssb-rssd32-sw0 {
+ compatible = "regulator-output";
+ vout-supply = <&sw0_ssb_rssd32>;
+ };
+
+ ssb-rssd32-sw1 {
+ compatible = "regulator-output";
+ vout-supply = <&sw1_ssb_rssd32>;
+ };
+
+ p3v3-nic-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&p3v3_nic>;
+ };
+
+ p1v8-nic-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&p1v8_nic>;
+ };
+
+ p1v2-nic-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&p1v2_nic>;
+ };
+
+ pvcore-nic1-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvcore_nic1>;
+ };
+
+ pvcore-nic2-consumer {
+ compatible = "regulator-output";
+ vout-supply = <&pvcore_nic2>;
+ };
+};
+
+&peci0 {
+ status = "okay";
+};
+
+&vuart1 {
+ status = "okay";
+};
+
+&lpc_snoop {
+ status = "okay";
+ snoop-ports = <0x80>, <0x81>;
+};
+
+&fmc {
+ status = "okay";
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "bmc";
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+#include "openbmc-flash-layout-64.dtsi"
+ };
+
+ flash@1 {
+ status = "okay";
+ m25p,fast-read;
+ label = "alt-bmc";
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+#include "openbmc-flash-layout-64-alt.dtsi"
+ };
+};
+
+&uart1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_txd1_default
+ &pinctrl_rxd1_default
+ &pinctrl_nrts1_default
+ &pinctrl_ndtr1_default
+ &pinctrl_ndsr1_default
+ &pinctrl_ncts1_default
+ &pinctrl_ndcd1_default
+ &pinctrl_nri1_default>;
+};
+
+&uart5 {
+ status = "disabled";
+};
+
+&gpio1 {
+ status = "disabled";
+};
+
+&video {
+ status = "okay";
+};
+
+&vhub {
+ status = "okay";
+};
+
+&pinctrl {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_vgahs_default &pinctrl_vgavs_default>;
+};
+
+&mdio2 {
+ status = "okay";
+
+ ethphy2: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ reset-gpios = <&gpio0 ASPEED_GPIO(V, 7) GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <300>;
+ };
+};
+
+&mdio3 {
+ status = "okay";
+
+ ethphy3: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ reset-gpios = <&gpio0 ASPEED_GPIO(G, 2) GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <300>;
+ };
+};
+
+&mac2 {
+ status = "okay";
+
+ phy-mode = "rgmii";
+ phy-handle = <&ethphy2>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rgmii3_default>;
+};
+
+&mac3 {
+ status = "okay";
+
+ phy-mode = "rgmii";
+ phy-handle = <&ethphy3>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rgmii4_default>;
+};
+
+&adc0 {
+ status = "okay";
+ vref-supply = <&p1v8_bmc_aux>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_adc0_default
+ &pinctrl_adc1_default
+ &pinctrl_adc2_default
+ &pinctrl_adc3_default
+ &pinctrl_adc4_default
+ &pinctrl_adc5_default
+ &pinctrl_adc6_default
+ &pinctrl_adc7_default>;
+};
+
+&adc1 {
+ status = "okay";
+ vref-supply = <&p1v8_bmc_aux>;
+ aspeed,battery-sensing;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_adc8_default
+ &pinctrl_adc9_default
+ &pinctrl_adc10_default
+ &pinctrl_adc11_default
+ &pinctrl_adc12_default
+ &pinctrl_adc13_default
+ &pinctrl_adc15_default>;
+};
+
+&kcs3 {
+ status = "okay";
+ aspeed,lpc-io-reg = <0xca2>;
+};
+
+&gpio0 {
+ status = "okay";
+ gpio-line-names =
+ /* A0 - A7 */
+ "", "", "", "", "", "", "", "",
+ /* B0 - B7 */
+ "", "", "FM_ADR_TRIGGER_R_N", "RST_PLTRST_BUF_N", "BMC_TPM_RESET_N", "BMC_TPM_IRQ_N",
+ "PCH_TPM_RESET_N", "PCH_TPM_IRQ_N",
+ /* C0 - C7 */
+ "", "", "", "", "", "", "", "",
+ /* D0 - D7 */
+ "", "", "", "", "", "", "", "",
+ /* E0 - E7 */
+ "", "", "", "", "", "", "", "",
+ /* F0 - F7 */
+ "", "", "", "BMC_MUX_CPU1_RST_INT_N", "BMC_MUX_CPU2_RST_INT_N", "", "", "",
+ /* G0 - G7 */
+ "FM_SSD_CLK_DRVR1_EN", "FM_CK440Q_DEV_EN", "BMC_MAC1_RESET_N", "FM_DB2000_DEV_EN",
+ "FM_CPU_RMCA_LVT3_N", "FM_CPU_CATERR_LVT3_N", "FM_DBP_PRESENT_N", "",
+ /* H0 - H7 */
+ "SMB_SVC_PEX_RSSD17_32_INT", "LED_BMC_RDY", "RST_DBP_N", "", "", "", "", "",
+ /* I0 - I7 */
+ "JTAG_MUX_MODE_SEL", "JTAG_MUX_TRANS_ENBL", "JTAG_MUX_LSP_SEL5", "JTAG_MUX_MSTR_SEL",
+ "JTAG_MUX_LSP_SEL3", "", "JTAG_MUX_ENBL_N", "JTAG_MUX_RST_N",
+ /* J0 - J7 */
+ "", "", "", "", "", "", "", "",
+ /* K0 - K7 */
+ "", "", "", "", "", "", "", "",
+ /* L0 - L7 */
+ "", "", "", "", "RST_RTCRST_N", "RST_SRTCRST_N", "", "",
+ /* M0 - M7 */
+ "BMC_UART1_CTS_N", "BMC_UART1_DCD_N", "BMC_UART1_DSR_N", "BMC_UART1_RI_N",
+ "BMC_UART1_DTR_N", "BMC_UART1_RTS_N", "", "",
+ /* N0 - N7 */
+ "IRQ_BMC_PCH_NMI", "", "FM_PCH_BMC_THERMTRIP_N", "FM_BIOS_POST_CMPLT_N", "RST_PLTRST_N",
+ "FM_FLASH_SEC_OVRD", "FM_SMI_ACTIVE_N", "PWRGD_DBP",
+ /* O0 - O7 */
+ "CATERR_CPU2_EN", "H_LVT1_THERMTRIP_N", "CATERR_CPU3_EN", "SMB_SVC_PEX_CPU0_LED_INT",
+ "H_LVT1_MEMTRIP_N", "", "CATERR_CPU1_EN", "FM_PCH_ADR_COMPLETE_N",
+ /* P0 - P7 */
+ "PWRGD_SYS_PWROK", "PWRGD_PCH_PWROK", "BMC_MUX_CPU3_RST_INT_N", "BMC_MUX_SVC_RSSD_INT",
+ "FM_SLPS4_N", "IRQ_SML0_ALERT_N", "FM_SLPS3_N", "LED_BMC_HB",
+ /* Q0 - Q7 */
+ "", "PEX_BMC_RST", "PEX_VR_CTRL_RST", "PEX_NIC_RST", "PEX_CPU0_LED_RST", "PEX_CPU1_LED_RST",
+ "PEX_CPU2_LED_RST", "PEX_CPU3_LED_RST",
+ /* R0 - R7 */
+ "BMC_MUX_FANSSB_RSSD17_32_RST_INT_N", "BMC_MUX_FANPWM_RSSD01_16_RST_INT_N",
+ "BMC_MUX_SVC_VR_RST_INT_N", "BMC_MUX_NIC_RST_INT_N", "BMC_MUX_SVC_EXP_RST_INT_N",
+ "FM_CPU_ERR2_LVT3_N", "BMC_MUX_CPU0_RST_INT_N", "BMC_MUX_M2_RST_INT_N",
+ /* S0 - S7 */
+ "SMB_SVC_PEX_RSSD01_16_INT", "RST_PCH_RSMRST_R_N", "", "", "BMC_ROT_FPGA_RESET_N",
+ "FM_SSD_CLK_DRVR0_EN", "", "",
+ /* T0 - T7 */
+ "", "", "", "", "", "", "", "",
+ /* U0 - U7 */
+ "", "", "", "", "", "", "", "",
+ /* V0 - V7 */
+ "BMC_PEX_IRQ_INT", "RTC_BATT_TEST", "SMB_PEX_VR_CTRL_INT", "SMB_SVC_PEX_CPU3_LED_INT",
+ "PWRGD_CPUPWRGD", "SMB_SVC_PEX_CPU2_LED_INT", "SMB_SVC_PEX_CPU1_LED_INT",
+ "BMC_MAC0_RESET_N",
+ /* W0 - W7 */
+ "", "", "", "", "", "", "", "",
+ /* X0 - X7 */
+ "", "", "", "", "", "", "", "",
+ /* Y0 - Y7 */
+ "FM_THROTTLE_N", "FM_PASSWORD_CLEAR_N", "H_LVT3_CATERR_DLY_N", "FM_CPU_OL_INT_R_N", "", "",
+ "", "",
+ /* Z0 - Z7 */
+ "FM_CPU_ERR0_LVT3_N", "FM_CPU_ERR1_LVT3_N", "BMC_MUX_VR_PCH_CPU_RST_INT_N",
+ "JTAG_MUX_LSP_SEL1", "", "JTAG_MUX_LSP_SEL4", "JTAG_MUX_LSP_SEL2", "";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio0_unbiased_default>;
+};
+
+&pinctrl {
+ pinctrl_gpio0_unbiased_default: gpio_default {
+ pins = "AB15", "AD14", "R23", "A18", "AD24", "AD15", "AE14", "AC15", "U25", "AA24",
+ "V24", "W26", "AA23", "V26", "U24", "V25", "AE15", "C15", "F15";
+ bias-disable;
+ };
+};
+
+&i2c1 {
+ status = "okay";
+
+ bmc_mux_nic: mux@77 {
+ compatible = "maxim,max7357";
+ reg = <0x77>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reset-gpios = <&gpio0 ASPEED_GPIO(R, 3) (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ vdd-supply = <&p3v3_aux>;
+
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ smb_pex_nic: pinctrl@20 {
+ compatible = "cypress,cy8c9540";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-parent = <&smb_pex_vr_ctrl>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ vdd-supply = <&p3v3_aux>;
+ reset-gpios = <&gpio0 ASPEED_GPIO(Q, 3) GPIO_ACTIVE_HIGH>;
+
+ gpio-reserved-ranges = <19 1>, <22 6>, <30 6>, <38 2>;
+
+ gpio-line-names =
+ /* GPORT0 */
+ "IRQ_NIC2_OVT_WRNG", "FM_NIC2_ALLSTANDBY_N", "IRQ_NIC2_OVT_SHTDN",
+ "SMB_VR_PVCORE_NIC2_ALERT_N", "FM_NIC2_PERST1_N",
+ "SMB_NIC2_ALERT_N", "FM_NIC2_PERST3_N", "FM_NIC2_PERST2_N",
+ /* GPORT1 */
+ "FM_NIC1_RST_N", "FM_NIC1_PERST0_N", "FM_NIC1_PERST2_N",
+ "FM_NIC1_PERST3_N", "SMB_NIC1_ALERT_N", "FM_NIC1_PERST1_N",
+ "SMB_VR_PVCORE_NIC1_ALERT_N", "IRQ_NIC1_OVT_SHTDN",
+ /* GPORT2 */
+ "SMB_VR_P3V3_NIC_ALERT_N", "FM_NIC2_FLASH_PRSNT",
+ "FM_NIC1_FLASH_PRSNT", "",
+ /* GPORT3 */
+ "FM_NIC2_PERST0_N", "FM_NIC2_RST_N", "", "", "", "", "", "",
+ /* GPORT4 */
+ "FM_NIC1_ALLSTANDBY_N", "IRQ_NIC1_OVT_WRNG", "", "", "", "", "", "",
+ /* GPORT5 */
+ "SMB_VR_P1V8_NIC_ALERT_N", "SMB_VR_P1V2_NIC_ALERT_N", "", "";
+
+ pinctrl-0 = <&U62160_pins>;
+ pinctrl-names = "default";
+ U62160_pins: cfg-pins {
+ pins = "gp03", "gp16", "gp20", "gp50", "gp51";
+ function = "gpio";
+ input-enable;
+ bias-pull-up;
+ };
+ };
+ };
+
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pvcore_nic2: ir38263-pvcore-nic2@40 {
+ compatible = "infineon,ir38263";
+ reg = <0x40>;
+
+ regulator-name = "pvcore_nic2";
+ regulator-enable-ramp-delay = <2000>;
+ vin-supply = <&p12v>;
+ };
+ };
+
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pvcore_nic1: ir38263-pvcore-nic1@40 {
+ compatible = "infineon,ir38263";
+ reg = <0x40>;
+
+ regulator-name = "pvcore_nic1";
+ regulator-enable-ramp-delay = <2000>;
+ vin-supply = <&p12v>;
+ };
+ };
+
+ i2c@4 {
+ reg = <4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c@5 {
+ reg = <5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ p3v3_nic: ir38263-p3v3-nic@40 {
+ compatible = "infineon,ir38263";
+ reg = <0x40>;
+
+ regulator-name = "p3v3_nic";
+ regulator-enable-ramp-delay = <2000>;
+ vin-supply = <&p12v>;
+ };
+ };
+
+ i2c@6 {
+ reg = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ p1v2_nic: ir38263-p1v2-nic@40 {
+ compatible = "infineon,ir38263";
+ reg = <0x40>;
+
+ regulator-name = "p1v2_nic";
+ regulator-enable-ramp-delay = <2000>;
+ vin-supply = <&p12v>;
+ };
+ };
+
+ i2c@7 {
+ reg = <7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ p1v8_nic: ir38263-p1v8-nic@40 {
+ compatible = "infineon,ir38263";
+ reg = <0x40>;
+
+ regulator-name = "p1v8_nic";
+ regulator-enable-ramp-delay = <2000>;
+ vin-supply = <&p12v>;
+ };
+ };
+ };
+};
+
+&i2c2 {
+ status = "okay";
+};
+
+&i2c3 {
+ status = "okay";
+
+ i2cmux1: mux@77 {
+ compatible = "maxim,max7357";
+ reg = <0x77>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&gpio0 ASPEED_GPIO(R, 7) (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ vdd-supply = <&p3v3_aux>;
+
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ smb_m2_ssb_ssd1: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+ vss1-supply = <&p3v3_aux>;
+
+ interrupt-parent = <&smb_pex_vr_ctrl>;
+ interrupts = <30 IRQ_TYPE_LEVEL_LOW>;
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "m2_ssb_ssd1:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_smb_m2_ssb_ssd1: sw0 {
+ shunt-resistor-micro-ohms = <12000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <2800000>;
+ regulator-name = "p3v3_m2_ssd1";
+ regulator-enable-ramp-delay = <10000>;
+ };
+ };
+ };
+ };
+
+ i2c@4 {
+ reg = <4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c@5 {
+ reg = <5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ smb_m2_ssb_ssd2: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+
+ interrupt-parent = <&smb_pex_vr_ctrl>;
+ interrupts = <39 IRQ_TYPE_LEVEL_LOW>;
+ vss1-supply = <&p3v3_aux>;
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "m2_ssb_ssd2:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_smb_m2_ssb_ssd2: sw0 {
+ shunt-resistor-micro-ohms = <12000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <2800000>;
+ regulator-name = "p3v3_m2_ssd2";
+ regulator-enable-ramp-delay = <10000>;
+ };
+ };
+ };
+ };
+
+ i2c@6 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c@7 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+};
+
+&i2c4 {
+ status = "okay";
+ multi-master;
+ bus-frequency = <1000000>;
+
+ bmc-slave@10 {
+ compatible = "ipmb-dev";
+ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>;
+
+ i2c-protocol;
+ };
+};
+
+&i2c5 {
+ status = "okay";
+
+ i2cmux2: mux@77 {
+ compatible = "maxim,max7357";
+ reg = <0x77>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&gpio0 ASPEED_GPIO(Z, 2) (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ vdd-supply = <&p3v3_aux>;
+
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ p1v05_pch_aux: ir38263-p1v05-pch-aux@40 {
+ compatible = "infineon,ir38263";
+ reg = <0x40>;
+
+ regulator-name = "p1v05_pch_aux";
+ regulator-enable-ramp-delay = <2000>;
+ vin-supply = <&p12v>;
+ };
+ };
+
+ i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ p1v8_pch_aux: ir38060-p1v8-pch-aux@40 {
+ compatible = "infineon,ir38060";
+ reg = <0x40>;
+
+ regulator-name = "p1v8_pch_aux";
+ regulator-enable-ramp-delay = <2000>;
+ vin-supply = <&p12v>;
+ };
+ };
+
+ i2c@4 {
+ reg = <4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c@5 {
+ reg = <5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c@6 {
+ reg = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c@7 {
+ reg = <7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+};
+
+&i2c14 {
+ status = "okay";
+
+ i2cmux13: mux@77 {
+ compatible = "maxim,max7357";
+ reg = <0x77>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&gpio0 ASPEED_GPIO(R, 6) (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ vdd-supply = <&p3v3_aux>;
+
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ smb_pex_cpu0_event: pinctrl@20 {
+ compatible = "cypress,cy8c9540";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-parent = <&smb_pex_vr_ctrl>;
+ interrupts = <10 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ vdd-supply = <&p3v3_aux>;
+ reset-gpios = <&smb_svc_pex_cpu0_led 16 GPIO_ACTIVE_HIGH>;
+
+ gpio-reserved-ranges = <14 2>, <21 1>, <25 3>, <33 1>;
+
+ gpio-line-names =
+ /* GPORT0 */
+ "PWRGD_CHD_CPU0", "PWRGD_CHC_CPU0",
+ "PWRGD_CHB_CPU0", "PWRGD_CHA_CPU0",
+ "PWRGD_CHE_CPU0", "PWRGD_CHF_CPU0",
+ "PWRGD_CHG_CPU0", "PWRGD_CHH_CPU0",
+ /* GPORT1 */
+ "SMB_VR_PVPP_HBM_CPU0_ALERT_N", "SMB_VR_PVCCINFAON_CPU0_ALERT_N",
+ "SMB_VR_PVNN_MAIN_CPU0_ALERT_N", "SMB_VR_PVCCD_HV_CPU0_ALERT_N",
+ "SMB_VR_PVCCIN_CPU0_ALERT_N", "SEL_SMB_DIMM_CPU0",
+ "", "",
+ /* GPORT2 */
+ "PWRGD_LVC3_CPU0_AB_DRAM_G", "PWRGD_LVC3_CPU0_CD_DRAM_G",
+ "PWRGD_LVC3_CPU0_EF_DRAM_G", "PWRGD_LVC3_CPU0_GH_DRAM_G",
+ /* GPORT3 */
+ "FM_CPU0_DISABLE_COD_N", "",
+ "RST_LVC3_CPU0_RESET_N", "PWRGD_LVC3_CPU0_PWRGOOD",
+ "PWRGD_PLT_AUX_CPU0_LVT3", "",
+ "", "",
+ /* GPORT4 */
+ "H_LVT3_CPU0_PROCHOT_N", "H_LVT3_CPU0_MEMHOT_IN_N",
+ "H_LVT3_CPU0_MEMHOT_OUT_N", "H_LVT3_CPU0_MEMTRIP_OUT_N",
+ "H_LVT3_CPU0_THERMTRIP_OUT_N", "",
+ "H_LVT3_CPU0_NMI", "FM_S3M_CPU0_CD_INIT_ERROR",
+ /* GPORT5 */
+ "FM_CPU0_PKG_ID0", "FM_CPU0_PKG_ID1",
+ "FM_CPU0_PROC_ID0", "FM_CPU0_PROC_ID1";
+
+ pinctrl-0 = <&U62080_pins>;
+ pinctrl-names = "default";
+ U62080_pins: cfg-pins {
+ pins = "gp10", "gp11", "gp12", "gp13", "gp14";
+ function = "gpio";
+ input-enable;
+ bias-pull-up;
+ };
+ };
+ };
+
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pvccinfaon-pvccfa-cpu0@58 {
+ compatible = "mps,mp2971";
+ reg = <0x58>;
+ interrupt-parent = <&smb_pex_cpu0_event>;
+ interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ pvccinfaon_cpu0: vout0 {
+ regulator-name = "pvccinfaon_cpu0";
+ regulator-enable-ramp-delay = <200>;
+ };
+ pvccfa_ehv_cpu0: vout1 {
+ regulator-name = "pvccfa_ehv_cpu0";
+ regulator-enable-ramp-delay = <200>;
+ };
+ };
+ };
+ tda38640-pvnn-main-cpu0@40 {
+ compatible = "infineon,tda38640";
+ reg = <0x40>;
+ interrupt-parent = <&smb_pex_cpu0_event>;
+ interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ pvnn_main_cpu0: vout {
+ regulator-name = "pvnn_main_cpu0";
+ regulator-enable-ramp-delay = <200>;
+ };
+ };
+ };
+ };
+
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mp2973-pvccin-pvccfa-cpu0@58 {
+ compatible = "mps,mp2973";
+ reg = <0x58>;
+ interrupt-parent = <&smb_pex_cpu0_event>;
+ interrupts = <12 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ pvccin_cpu0: vout0 {
+ regulator-name = "pvccin_cpu0";
+ regulator-enable-ramp-delay = <200>;
+ };
+ pvccfa_ehv_fivra_cpu0: vout1 {
+ regulator-name = "pvccfa_ehv_fivra_cpu0";
+ regulator-enable-ramp-delay = <200>;
+ };
+ };
+ };
+ };
+
+ i2c@4 {
+ reg = <4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ tda38640-pvccd-hv-cpu0@40 {
+ compatible = "infineon,tda38640";
+ reg = <0x40>;
+ interrupt-parent = <&smb_pex_cpu0_event>;
+ interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+ infineon,en-pin-fixed-level;
+
+ regulators {
+ pvccd_hv_cpu0: vout {
+ regulator-name = "pvccd_hv_cpu0";
+ regulator-enable-ramp-delay = <200>;
+ };
+ };
+ };
+ };
+
+ i2c@5 {
+ reg = <5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ tda38640-pvpp-hbm-cpu0@40 {
+ compatible = "infineon,tda38640";
+ reg = <0x40>;
+ interrupt-parent = <&smb_pex_cpu0_event>;
+ interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ pvpp_hbm_cpu0: vout {
+ regulator-name = "pvpp_hbm_cpu0";
+ regulator-enable-ramp-delay = <200>;
+ };
+ };
+ };
+ };
+
+ i2c@6 {
+ reg = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c@7 {
+ reg = <7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+};
+
+&i2c7 {
+ status = "okay";
+
+ i2cmux4: mux@77 {
+ compatible = "maxim,max7357";
+ reg = <0x77>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&gpio0 ASPEED_GPIO(F, 3) (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ vdd-supply = <&p3v3_aux>;
+
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ smb_pex_cpu1_event: pinctrl@20 {
+ compatible = "cypress,cy8c9540";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-parent = <&smb_pex_vr_ctrl>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ vdd-supply = <&p3v3_aux>;
+ reset-gpios = <&smb_svc_pex_cpu1_led 16 GPIO_ACTIVE_HIGH>;
+
+ gpio-reserved-ranges = <14 2>, <21 1>, <25 3>, <33 1>;
+
+ gpio-line-names =
+ /* GPORT0 */
+ "PWRGD_CHD_CPU1", "PWRGD_CHC_CPU1",
+ "PWRGD_CHB_CPU1", "PWRGD_CHA_CPU1",
+ "PWRGD_CHE_CPU1", "PWRGD_CHF_CPU1",
+ "PWRGD_CHG_CPU1", "PWRGD_CHH_CPU1",
+ /* GPORT1 */
+ "SMB_VR_PVPP_HBM_CPU1_ALERT_N", "SMB_VR_PVCCINFAON_CPU1_ALERT_N",
+ "SMB_VR_PVNN_MAIN_CPU1_ALERT_N", "SMB_VR_PVCCD_HV_CPU1_ALERT_N",
+ "SMB_VR_PVCCIN_CPU1_ALERT_N", "SEL_SMB_DIMM_CPU1",
+ "", "",
+ /* GPORT2 */
+ "PWRGD_LVC3_CPU1_AB_DRAM_G", "PWRGD_LVC3_CPU1_CD_DRAM_G",
+ "PWRGD_LVC3_CPU1_EF_DRAM_G", "PWRGD_LVC3_CPU1_GH_DRAM_G",
+ /* GPORT3 */
+ "FM_CPU1_DISABLE_COD_N", "",
+ "RST_LVC3_CPU1_RESET_N", "PWRGD_LVC3_CPU1_PWRGOOD",
+ "PWRGD_PLT_AUX_CPU1_LVT3", "",
+ "", "",
+ /* GPORT4 */
+ "H_LVT3_CPU1_PROCHOT_N", "H_LVT3_CPU1_MEMHOT_IN_N",
+ "H_LVT3_CPU1_MEMHOT_OUT_N", "H_LVT3_CPU1_MEMTRIP_OUT_N",
+ "H_LVT3_CPU1_THERMTRIP_OUT_N", "",
+ "H_LVT3_CPU1_NMI", "FM_S3M_CPU1_CD_INIT_ERROR",
+ /* GPORT5 */
+ "FM_CPU1_PKG_ID0", "FM_CPU1_PKG_ID1",
+ "FM_CPU1_PROC_ID0", "FM_CPU1_PROC_ID1";
+
+ pinctrl-0 = <&U62090_pins>;
+ pinctrl-names = "default";
+ U62090_pins: cfg-pins {
+ pins = "gp10", "gp11", "gp12", "gp13", "gp14";
+ function = "gpio";
+ input-enable;
+ bias-pull-up;
+ };
+ };
+ };
+
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pvccinfaon-pvccfa-cpu1@58 {
+ compatible = "mps,mp2971";
+ reg = <0x58>;
+ interrupt-parent = <&smb_pex_cpu1_event>;
+ interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ pvccinfaon_cpu1: vout0 {
+ regulator-name = "pvccinfaon_cpu1";
+ regulator-enable-ramp-delay = <200>;
+ };
+ pvccfa_ehv_cpu1: vout1 {
+ regulator-name = "pvccfa_ehv_cpu1";
+ regulator-enable-ramp-delay = <200>;
+ };
+ };
+ };
+ tda38640-pvnn-main-cpu1@40 {
+ compatible = "infineon,tda38640";
+ reg = <0x40>;
+ interrupt-parent = <&smb_pex_cpu1_event>;
+ interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ pvnn_main_cpu1: vout {
+ regulator-name = "pvnn_main_cpu1";
+ regulator-enable-ramp-delay = <200>;
+ };
+ };
+ };
+ };
+
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mp2973-pvccin-pvccfa-cpu1@58 {
+ compatible = "mps,mp2973";
+ reg = <0x58>;
+ interrupt-parent = <&smb_pex_cpu1_event>;
+ interrupts = <12 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ pvccin_cpu1: vout0 {
+ regulator-name = "pvccin_cpu1";
+ regulator-enable-ramp-delay = <200>;
+ };
+ pvccfa_ehv_fivra_cpu1: vout1 {
+ regulator-name = "pvccfa_ehv_fivra_cpu1";
+ regulator-enable-ramp-delay = <200>;
+ };
+ };
+ };
+ };
+
+ i2c@4 {
+ reg = <4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ tda38640-pvccd-hv-cpu1@40 {
+ compatible = "infineon,tda38640";
+ reg = <0x40>;
+ interrupt-parent = <&smb_pex_cpu1_event>;
+ interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+ infineon,en-pin-fixed-level;
+
+ regulators {
+ pvccd_hv_cpu1: vout {
+ regulator-name = "pvccd_hv_cpu1";
+ regulator-enable-ramp-delay = <200>;
+ };
+ };
+ };
+ };
+
+ i2c@5 {
+ reg = <5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ tda38640-pvpp-hbm-cpu1@40 {
+ compatible = "infineon,tda38640";
+ reg = <0x40>;
+ interrupt-parent = <&smb_pex_cpu1_event>;
+ interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ pvpp_hbm_cpu1: vout {
+ regulator-name = "pvpp_hbm_cpu1";
+ regulator-enable-ramp-delay = <200>;
+ };
+ };
+ };
+ };
+
+ i2c@6 {
+ reg = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c@7 {
+ reg = <7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+};
+
+&i2c6 {
+ status = "okay";
+
+ i2cmux3: mux@77 {
+ compatible = "maxim,max7357";
+ reg = <0x77>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vdd-supply = <&p3v3_aux>;
+
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ smb_pex_cpu2_event: pinctrl@20 {
+ compatible = "cypress,cy8c9540";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-parent = <&smb_pex_vr_ctrl>;
+ interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ vdd-supply = <&p3v3_aux>;
+ reset-gpios = <&smb_svc_pex_cpu2_led 16 GPIO_ACTIVE_HIGH>;
+
+ gpio-reserved-ranges = <14 2>, <21 1>, <25 3>, <33 1>;
+
+ gpio-line-names =
+ /* GPORT0 */
+ "PWRGD_CHD_CPU2", "PWRGD_CHC_CPU2",
+ "PWRGD_CHB_CPU2", "PWRGD_CHA_CPU2",
+ "PWRGD_CHE_CPU2", "PWRGD_CHF_CPU2",
+ "PWRGD_CHG_CPU2", "PWRGD_CHH_CPU2",
+ /* GPORT1 */
+ "SMB_VR_PVPP_HBM_CPU2_ALERT_N", "SMB_VR_PVCCINFAON_CPU2_ALERT_N",
+ "SMB_VR_PVNN_MAIN_CPU2_ALERT_N", "SMB_VR_PVCCD_HV_CPU2_ALERT_N",
+ "SMB_VR_PVCCIN_CPU2_ALERT_N", "SEL_SMB_DIMM_CPU2",
+ "", "",
+ /* GPORT2 */
+ "PWRGD_LVC3_CPU2_AB_DRAM_G", "PWRGD_LVC3_CPU2_CD_DRAM_G",
+ "PWRGD_LVC3_CPU2_EF_DRAM_G", "PWRGD_LVC3_CPU2_GH_DRAM_G",
+ /* GPORT3 */
+ "FM_CPU2_DISABLE_COD_N", "",
+ "RST_LVC3_CPU2_RESET_N", "PWRGD_LVC3_CPU2_PWRGOOD",
+ "PWRGD_PLT_AUX_CPU2_LVT3", "",
+ "", "",
+ /* GPORT4 */
+ "H_LVT3_CPU2_PROCHOT_N", "H_LVT3_CPU2_MEMHOT_IN_N",
+ "H_LVT3_CPU2_MEMHOT_OUT_N", "H_LVT3_CPU2_MEMTRIP_OUT_N",
+ "H_LVT3_CPU2_THERMTRIP_OUT_N", "",
+ "H_LVT3_CPU2_NMI", "FM_S3M_CPU2_CD_INIT_ERROR",
+ /* GPORT5 */
+ "FM_CPU2_PKG_ID0", "FM_CPU2_PKG_ID1",
+ "FM_CPU2_PROC_ID0", "FM_CPU2_PROC_ID1";
+
+ pinctrl-0 = <&U62100_pins>;
+ pinctrl-names = "default";
+ U62100_pins: cfg-pins {
+ pins = "gp10", "gp11", "gp12", "gp13", "gp14";
+ function = "gpio";
+ input-enable;
+ bias-pull-up;
+ };
+ };
+ };
+
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pvccinfaon-pvccfa-cpu2@58 {
+ compatible = "mps,mp2971";
+ reg = <0x58>;
+ interrupt-parent = <&smb_pex_cpu2_event>;
+ interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ pvccinfaon_cpu2: vout0 {
+ regulator-name = "pvccinfaon_cpu2";
+ regulator-enable-ramp-delay = <200>;
+ };
+ pvccfa_ehv_cpu2: vout1 {
+ regulator-name = "pvccfa_ehv_cpu2";
+ regulator-enable-ramp-delay = <200>;
+ };
+ };
+ };
+ tda38640-pvnn-main-cpu2@40 {
+ compatible = "infineon,tda38640";
+ reg = <0x40>;
+ interrupt-parent = <&smb_pex_cpu2_event>;
+ interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ pvnn_main_cpu2: vout {
+ regulator-name = "pvnn_main_cpu2";
+ regulator-enable-ramp-delay = <200>;
+ };
+ };
+ };
+ };
+
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mp2973-pvccin-pvccfa-cpu2@58 {
+ compatible = "mps,mp2973";
+ reg = <0x58>;
+ interrupt-parent = <&smb_pex_cpu2_event>;
+ interrupts = <12 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ pvccin_cpu2: vout0 {
+ regulator-name = "pvccin_cpu2";
+ regulator-enable-ramp-delay = <200>;
+ };
+ pvccfa_ehv_fivra_cpu2: vout1 {
+ regulator-name = "pvccfa_ehv_fivra_cpu2";
+ regulator-enable-ramp-delay = <200>;
+ };
+ };
+ };
+ };
+
+ i2c@4 {
+ reg = <4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ tda38640-pvccd-hv-cpu2@40 {
+ compatible = "infineon,tda38640";
+ reg = <0x40>;
+ interrupt-parent = <&smb_pex_cpu2_event>;
+ interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+ infineon,en-pin-fixed-level;
+
+ regulators {
+ pvccd_hv_cpu2: vout {
+ regulator-name = "pvccd_hv_cpu2";
+ regulator-enable-ramp-delay = <200>;
+ };
+ };
+ };
+ };
+
+ i2c@5 {
+ reg = <5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ tda38640-pvpp-hbm-cpu2@40 {
+ compatible = "infineon,tda38640";
+ reg = <0x40>;
+ interrupt-parent = <&smb_pex_cpu2_event>;
+ interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ pvpp_hbm_cpu2: vout {
+ regulator-name = "pvpp_hbm_cpu2";
+ regulator-enable-ramp-delay = <200>;
+ };
+ };
+ };
+ };
+
+ i2c@6 {
+ reg = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c@7 {
+ reg = <7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+};
+
+&i2c12 {
+ status = "okay";
+
+ i2cmux22: mux@77 {
+ compatible = "maxim,max7357";
+ reg = <0x77>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&gpio0 ASPEED_GPIO(P, 2) (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ vdd-supply = <&p3v3_aux>;
+
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ smb_pex_cpu3_event: pinctrl@20 {
+ compatible = "cypress,cy8c9540";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-parent = <&smb_pex_vr_ctrl>;
+ interrupts = <11 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ vdd-supply = <&p3v3_aux>;
+ reset-gpios = <&smb_svc_pex_cpu3_led 16 GPIO_ACTIVE_HIGH>;
+
+ gpio-reserved-ranges = <14 2>, <21 1>, <25 3>, <33 1>;
+
+ gpio-line-names =
+ /* GPORT0 */
+ "PWRGD_CHD_CPU3", "PWRGD_CHC_CPU3",
+ "PWRGD_CHB_CPU3", "PWRGD_CHA_CPU3",
+ "PWRGD_CHE_CPU3", "PWRGD_CHF_CPU3",
+ "PWRGD_CHG_CPU3", "PWRGD_CHH_CPU3",
+ /* GPORT1 */
+ "SMB_VR_PVPP_HBM_CPU3_ALERT_N", "SMB_VR_PVCCINFAON_CPU3_ALERT_N",
+ "SMB_VR_PVNN_MAIN_CPU3_ALERT_N", "SMB_VR_PVCCD_HV_CPU3_ALERT_N",
+ "SMB_VR_PVCCIN_CPU3_ALERT_N", "SEL_SMB_DIMM_CPU3",
+ "", "",
+ /* GPORT2 */
+ "PWRGD_LVC3_CPU3_AB_DRAM_G", "PWRGD_LVC3_CPU3_CD_DRAM_G",
+ "PWRGD_LVC3_CPU3_EF_DRAM_G", "PWRGD_LVC3_CPU3_GH_DRAM_G",
+ /* GPORT3 */
+ "FM_CPU3_DISABLE_COD_N", "",
+ "RST_LVC3_CPU3_RESET_N", "PWRGD_LVC3_CPU3_PWRGOOD",
+ "PWRGD_PLT_AUX_CPU3_LVT3", "",
+ "", "",
+ /* GPORT4 */
+ "H_LVT3_CPU3_PROCHOT_N", "H_LVT3_CPU3_MEMHOT_IN_N",
+ "H_LVT3_CPU3_MEMHOT_OUT_N", "H_LVT3_CPU3_MEMTRIP_OUT_N",
+ "H_LVT3_CPU3_THERMTRIP_OUT_N", "",
+ "H_LVT3_CPU3_NMI", "FM_S3M_CPU3_CD_INIT_ERROR",
+ /* GPORT5 */
+ "FM_CPU3_PKG_ID0", "FM_CPU3_PKG_ID1",
+ "FM_CPU3_PROC_ID0", "FM_CPU3_PROC_ID1";
+
+ pinctrl-0 = <&U62110_pins>;
+ pinctrl-names = "default";
+ U62110_pins: cfg-pins {
+ pins = "gp10", "gp11", "gp12", "gp13", "gp14";
+ function = "gpio";
+ input-enable;
+ bias-pull-up;
+ };
+ };
+ };
+
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pvccinfaon-pvccfa-cpu3@58 {
+ compatible = "mps,mp2971";
+ reg = <0x58>;
+ interrupt-parent = <&smb_pex_cpu3_event>;
+ interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ pvccinfaon_cpu3: vout0 {
+ regulator-name = "pvccinfaon_cpu3";
+ regulator-enable-ramp-delay = <200>;
+ };
+ pvccfa_ehv_cpu3: vout1 {
+ regulator-name = "pvccfa_ehv_cpu3";
+ regulator-enable-ramp-delay = <200>;
+ };
+ };
+ };
+ tda38640-pvnn-main-cpu3@40 {
+ compatible = "infineon,tda38640";
+ reg = <0x40>;
+ interrupt-parent = <&smb_pex_cpu3_event>;
+ interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ pvnn_main_cpu3: vout {
+ regulator-name = "pvnn_main_cpu3";
+ regulator-enable-ramp-delay = <200>;
+ };
+ };
+ };
+ };
+
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mp2973-pvccin-pvccfa-cpu3@58 {
+ compatible = "mps,mp2973";
+ reg = <0x58>;
+ interrupt-parent = <&smb_pex_cpu3_event>;
+ interrupts = <12 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ pvccin_cpu3: vout0 {
+ regulator-name = "pvccin_cpu3";
+ regulator-enable-ramp-delay = <200>;
+ };
+ pvccfa_ehv_fivra_cpu3: vout1 {
+ regulator-name = "pvccfa_ehv_fivra_cpu3";
+ regulator-enable-ramp-delay = <200>;
+ };
+ };
+ };
+ };
+
+ i2c@4 {
+ reg = <4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ tda38640-pvccd-hv-cpu3@40 {
+ compatible = "infineon,tda38640";
+ reg = <0x40>;
+ interrupt-parent = <&smb_pex_cpu3_event>;
+ interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+ infineon,en-pin-fixed-level;
+
+ regulators {
+ pvccd_hv_cpu3: vout {
+ regulator-name = "pvccd_hv_cpu3";
+ regulator-enable-ramp-delay = <200>;
+ };
+ };
+ };
+ };
+
+ i2c@5 {
+ reg = <5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ tda38640-pvpp-hbm-cpu3@40 {
+ compatible = "infineon,tda38640";
+ reg = <0x40>;
+ interrupt-parent = <&smb_pex_cpu3_event>;
+ interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ pvpp_hbm_cpu3: vout {
+ regulator-name = "pvpp_hbm_cpu3";
+ regulator-enable-ramp-delay = <200>;
+ };
+ };
+ };
+ };
+
+ i2c@6 {
+ reg = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c@7 {
+ reg = <7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+};
+
+&i2c15 {
+ status = "okay";
+
+ i2cmux14: mux@77 {
+ compatible = "maxim,max7357";
+ reg = <0x77>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&gpio0 ASPEED_GPIO(R, 1) (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ vdd-supply = <&p3v3_aux>;
+
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2cmux15: mux@70 {
+ compatible = "maxim,max7357";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&bmc_pex_irq 11 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ vdd-supply = <&p3v3_aux>;
+ };
+ };
+
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2cmux16: mux@70 {
+ compatible = "maxim,max7357";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&bmc_pex_irq 2 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ vdd-supply = <&p3v3_aux>;
+ };
+ };
+
+ i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2cmux17: mux@70 {
+ compatible = "maxim,max7357";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&bmc_pex_irq 0 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ vdd-supply = <&p3v3_aux>;
+ };
+ };
+
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2cmux18: mux@70 {
+ compatible = "maxim,max7357";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&bmc_pex_irq 3 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ vdd-supply = <&p3v3_aux>;
+ };
+ };
+
+ i2c@4 {
+ reg = <4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2cmux19: mux@70 {
+ compatible = "maxim,max7357";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&bmc_pex_irq 9 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ vdd-supply = <&p3v3_aux>;
+ };
+ };
+
+ i2c@5 {
+ reg = <5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ smb_pex_rssd17_32: pinctrl@20 {
+ compatible = "cypress,cy8c9560";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-parent = <&bmc_pex_irq>;
+ interrupts = <13 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ vdd-supply = <&p3v3_aux>;
+ reset-gpios = <&bmc_pex_irq 19 GPIO_ACTIVE_HIGH>;
+
+ gpio-reserved-ranges = <48 12>;
+
+ gpio-line-names =
+ /* GPORT0 */
+ "RSSD17_SMBRST_N", "RSSD18_SMBRST_N",
+ "RSSD19_SMBRST_N", "RSSD20_SMBRST_N",
+ "RSSD21_SMBRST_N", "RSSD22_SMBRST_N",
+ "RSSD23_SMBRST_N", "RSSD24_SMBRST_N",
+ /* GPORT1 */
+ "RSSD25_SMBRST_N", "RSSD26_SMBRST_N",
+ "RSSD27_SMBRST_N", "RSSD28_SMBRST_N",
+ "RSSD29_SMBRST_N", "RSSD30_SMBRST_N",
+ "RSSD31_SMBRST_N", "RSSD32_SMBRST_N",
+ /* GPORT2 */
+ "RSSD17_PWRDIS", "RSSD18_PWRDIS",
+ "RSSD19_PWRDIS", "RSSD20_PWRDIS",
+ /* GPORT3 */
+ "RSSD21_PWRDIS", "RSSD22_PWRDIS",
+ "RSSD23_PWRDIS", "RSSD24_PWRDIS",
+ "RSSD25_PWRDIS", "RSSD26_PWRDIS",
+ "RSSD27_PWRDIS", "RSSD28_PWRDIS",
+ /* GPORT4 */
+ "RSSD29_PWRDIS", "RSSD30_PWRDIS",
+ "RSSD31_PWRDIS", "RSSD32_PWRDIS",
+ "RSSD17_RESET_N", "RSSD18_RESET_N",
+ "RSSD19_RESET_N", "RSSD20_RESET_N",
+ /* GPORT5 */
+ "RSSD21_RESET_N", "RSSD22_RESET_N",
+ "RSSD23_RESET_N", "RSSD24_RESET_N",
+ "RSSD25_RESET_N", "RSSD26_RESET_N",
+ "RSSD27_RESET_N", "RSSD28_RESET_N",
+ /* GPORT6 */
+ "RSSD29_RESET_N", "RSSD30_RESET_N",
+ "RSSD31_RESET_N", "RSSD32_RESET_N",
+ "", "",
+ "", "",
+ /* GPORT7 */
+ "", "",
+ "", "",
+ "", "",
+ "", "";
+ };
+ };
+
+ i2c@6 {
+ reg = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2cmux20: mux@70 {
+ compatible = "maxim,max7357";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&bmc_pex_irq 4 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ vdd-supply = <&p3v3_aux>;
+
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@4 {
+ reg = <4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@5 {
+ reg = <5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@6 {
+ reg = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@7 {
+ reg = <7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+ };
+
+ i2c@7 {
+ reg = <7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2cmux21: mux@70 {
+ compatible = "maxim,max7357";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&bmc_pex_irq 5 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ vdd-supply = <&p3v3_aux>;
+
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@4 {
+ reg = <4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@5 {
+ reg = <5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@6 {
+ reg = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@7 {
+ reg = <7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+ };
+ };
+};
+
+&i2c8 {
+ status = "okay";
+
+ i2cmux5: mux@77 {
+ compatible = "maxim,max7357";
+ reg = <0x77>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&gpio0 ASPEED_GPIO(R, 0) (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ vdd-supply = <&p3v3_aux>;
+
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2cmux6: mux@70 {
+ compatible = "maxim,max7357";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&bmc_pex_irq 16 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ vdd-supply = <&p3v3_aux>;
+ };
+ };
+
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2cmux7: mux@70 {
+ compatible = "maxim,max7357";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&bmc_pex_irq 7 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ vdd-supply = <&p3v3_aux>;
+ };
+ };
+
+ i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2cmux8: mux@70 {
+ compatible = "maxim,max7357";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&bmc_pex_irq 1 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ vdd-supply = <&p3v3_aux>;
+ };
+ };
+
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2cmux9: mux@70 {
+ compatible = "maxim,max7357";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&bmc_pex_irq 10 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ vdd-supply = <&p3v3_aux>;
+ };
+ };
+
+ i2c@4 {
+ reg = <4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2cmux10: mux@70 {
+ compatible = "maxim,max7357";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&bmc_pex_irq 15 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ vdd-supply = <&p3v3_aux>;
+ };
+ };
+
+ i2c@5 {
+ reg = <5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ smb_pex_rssd_01_16: pinctrl@20 {
+ compatible = "cypress,cy8c9560";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-parent = <&bmc_pex_irq>;
+ interrupts = <6 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ vdd-supply = <&p3v3_aux>;
+ reset-gpios = <&bmc_pex_irq 18 GPIO_ACTIVE_HIGH>;
+
+ gpio-reserved-ranges = <48 12>;
+
+ gpio-line-names =
+ /* GPORT0 */
+ "RSSD01_SMBRST_N", "RSSD02_SMBRST_N",
+ "RSSD03_SMBRST_N", "RSSD04_SMBRST_N",
+ "RSSD05_SMBRST_N", "RSSD06_SMBRST_N",
+ "RSSD07_SMBRST_N", "RSSD08_SMBRST_N",
+ /* GPORT1 */
+ "RSSD09_SMBRST_N", "RSSD10_SMBRST_N",
+ "RSSD11_SMBRST_N", "RSSD12_SMBRST_N",
+ "RSSD13_SMBRST_N", "RSSD14_SMBRST_N",
+ "RSSD15_SMBRST_N", "RSSD16_SMBRST_N",
+ /* GPORT2 */
+ "RSSD01_PWRDIS", "RSSD02_PWRDIS",
+ "RSSD03_PWRDIS", "RSSD04_PWRDIS",
+ /* GPORT3 */
+ "RSSD05_PWRDIS", "RSSD06_PWRDIS",
+ "RSSD07_PWRDIS", "RSSD08_PWRDIS",
+ "RSSD09_PWRDIS", "RSSD10_PWRDIS",
+ "RSSD11_PWRDIS", "RSSD12_PWRDIS",
+ /* GPORT4 */
+ "RSSD13_PWRDIS", "RSSD14_PWRDIS",
+ "RSSD15_PWRDIS", "RSSD16_PWRDIS",
+ "RSSD01_RESET_N", "RSSD02_RESET_N",
+ "RSSD03_RESET_N", "RSSD04_RESET_N",
+ /* GPORT5 */
+ "RSSD05_RESET_N", "RSSD06_RESET_N",
+ "RSSD07_RESET_N", "RSSD08_RESET_N",
+ "RSSD09_RESET_N", "RSSD10_RESET_N",
+ "RSSD11_RESET_N", "RSSD12_RESET_N",
+ /* GPORT6 */
+ "RSSD13_RESET_N", "RSSD14_RESET_N",
+ "RSSD15_RESET_N", "RSSD16_RESET_N",
+ "", "",
+ "", "",
+ /* GPORT7 */
+ "", "",
+ "", "",
+ "", "",
+ "", "";
+ };
+ };
+
+ i2c@6 {
+ reg = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2cmux11: mux@70 {
+ compatible = "maxim,max7357";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&bmc_pex_irq 12 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ vdd-supply = <&p3v3_aux>;
+
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@4 {
+ reg = <4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@5 {
+ reg = <5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@6 {
+ reg = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@7 {
+ reg = <7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+ };
+
+ i2c@7 {
+ reg = <7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2cmux12: mux@70 {
+ compatible = "maxim,max7357";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&bmc_pex_irq 14 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ vdd-supply = <&p3v3_aux>;
+
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@4 {
+ reg = <4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@5 {
+ reg = <5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@6 {
+ reg = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ i2c@7 {
+ reg = <7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+ };
+ };
+};
+
+&i2c13 {
+ status = "okay";
+
+ i2cmux23: mux@77 {
+ compatible = "maxim,max7357";
+ reg = <0x77>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&gpio0 ASPEED_GPIO(R, 4) (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ vdd-supply = <&p3v3_bmc_aux>;
+ };
+};
+
+&i2cmux23 {
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ smb_pex_vr_ctrl: pinctrl@20 {
+ compatible = "cypress,cy8c9540";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <ASPEED_GPIO(V, 2) IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ vdd-supply = <&p3v3_bmc_aux>;
+ reset-gpios = <&gpio0 ASPEED_GPIO(Q, 2) GPIO_ACTIVE_HIGH>;
+ gpio-line-names =
+ /* GPORT0 */
+ "BCM0_INPUT_DISABLE_N", "SMB_VR_P3V3_AUX_ALERT_N",
+ "SMB_PEX_CPU1_EVENT_INT", "SMB_PEX_CPU2_EVENT_INT",
+ "DPIC0_VOLTAGE_DETECTB_N", "DPIC0_VOLTAGE_DETECTA_N",
+ "DPIC1_VOLTAGE_DETECTA_N", "DPIC1_VOLTAGE_DETECTB_N",
+ /* GPORT1 */
+ "SMB_PEX_NIC_INT", "SMB_VR_P1V05_PCH_AUX_ALERT_N",
+ "SMB_PEX_CPU0_EVENT_INT", "SMB_PEX_CPU3_EVENT_INT",
+ "LED_ID_TPM", "PLUG_DETECT_TPM",
+ "PLUG_DETECT_M2_SSD_CARRIER1", "RST_M2_SSD1_PERST_N",
+ /* GPORT2 */
+ "LED_ID_BAT", "LED_ID_MGMT_PORT2",
+ "LED_ID_MGMT_PORT1", "SMB_VR_P5V_AUX_ALERT_N",
+ /* GPORT3 */
+ "SMB_VR_AUX_SSB_ALERT_N", "BCM1_INPUT_DISABLE_N",
+ "LED_ID_NIC1_PORT1", "LED_ID_NIC1_PORT2",
+ "LED_ID_NIC2_PORT1", "LED_ID_NIC2_PORT2",
+ "RST_M2_SSD2_PERST_N", "PLUG_DETECT_M2_SSD2",
+ /* GPORT4 */
+ "PLUG_DETECT_BAT", "PLUG_DETECT_M2_SSD1",
+ "M2_SSD1_SSB_ALERT_N", "BCM2_INPUT_DISABLE_N",
+ "SMB_VR_P1V8_PCH_AUX_ALERT_N", "BCM3_INPUT_DISABLE_N",
+ "LED_PWR_DWR_BACK", "LED_ID_DWR_BACK_P",
+ /* GPORT5 */
+ "LED_ID_M2_SSD2", "LED_ID_M2_SSD1",
+ "PLUG_DETECT_M2_SSD_CARRIER2", "M2_SSD2_SSB_ALERT_N";
+
+ pinctrl-0 = <&U62120_input &U62120_input_pullup>;
+ pinctrl-names = "default";
+ U62120_input: input-pins {
+ pins = "gp10";
+ function = "gpio";
+ input-enable;
+ bias-disable;
+ };
+ U62120_input_pullup: input-pullup-pins {
+ pins = "gp01", "gp02", "gp03", "gp11", "gp12", "gp13",
+ "gp23", "gp30", "gp40", "gp42", "gp44", "gp53";
+ function = "gpio";
+ input-enable;
+ bias-pull-up;
+ };
+ };
+ };
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ bmc_pex_irq: pinctrl@20 {
+ compatible = "cypress,cy8c9520";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <ASPEED_GPIO(V, 0) IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ vdd-supply = <&p3v3_aux>;
+ reset-gpios = <&gpio0 ASPEED_GPIO(Q, 1) GPIO_ACTIVE_HIGH>;
+ gpio-line-names =
+ /* GPORT0 */
+ "SMB_MUX_PWM_FANGRP2_RST_INT_N", "SMB_MUX_SSB_FANGRP2_RST_INT_N",
+ "SMB_MUX_PWM_FANGRP1_RST_INT_N", "SMB_MUX_SSB_RSSD01_08_RST_INT_N",
+ "SMB_MUX_RSSD01_08_RST_INT_N", "SMB_MUX_RSSD09_16_RST_INT_N",
+ "SMB_PEX_RSSD01_16_INT", "SMB_MUX_SSB_FANGRP1_RST_INT_N",
+ /* GPORT1 */
+ "SMB_SVC_PEX_FAN_ALERT_INT", "SMB_MUX_SSB_RSSD09_16_RST_INT_N",
+ "SMB_MUX_SSB_RSSD17_24_RST_INT_N", "SMB_MUX_PWM_FANGRP0_RST_INT_N",
+ "SMB_MUX_RSSD17_24_RST_INT_N", "SMB_PEX_RSSD17_32_INT",
+ "SMB_MUX_RSSD25_32_RST_INT_N", "SMB_MUX_SSB_RSSD25_32_RST_INT_N",
+ /* GPORT2 */
+ "SMB_MUX_SSB_FANGRP0_RST_INT_N", "PEX_FAN_ALERT_RST",
+ "PEX_RSSD01_16_RST", "PEX_RSSD17_32_RST";
+ pinctrl-0 = <&U60000_pins>;
+ pinctrl-names = "default";
+ U60000_pins: cfg-pins {
+ pins = "gp06", "gp10", "gp15";
+ function = "gpio";
+ input-enable;
+ bias-disable;
+ };
+ };
+ };
+ i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2cmux24: mux@70 {
+ compatible = "maxim,max7357";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vdd-supply = <&p3v3_bmc_aux>;
+ };
+ };
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ eeprom@51 {
+ compatible = "atmel,24c32";
+ reg = <0x51>;
+ pagesize = <32>;
+ vcc-supply = <&p3v3_bmc_aux>;
+ };
+ };
+ i2c@7 {
+ reg = <7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2cmux25: mux@70 {
+ compatible = "maxim,max7357";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+};
+
+&i2cmux25 {
+ reset-gpios = <&gpio0 ASPEED_GPIO(R, 2) (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+ vdd-supply = <&p3v3_bmc_aux>;
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ p5v_aux: ir38263-p5v-aux@40 {
+ compatible = "infineon,ir38263";
+ reg = <0x40>;
+
+ regulator-name = "p5v_aux";
+ regulator-enable-ramp-delay = <2000>;
+ vin-supply = <&p12v>;
+ vbus-supply = <&p3v3_bmc_aux>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+ };
+ i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ p3v3_aux: ir38263-p3v3-aux@40 {
+ compatible = "infineon,ir38263";
+ reg = <0x40>;
+
+ vin-supply = <&p12v>;
+ regulator-name = "p3v3_aux";
+ /*
+ * 2msec for regulator + 18msec for board capacitance
+ * Note: Every IC has a PTC which slowly charges the bypass
+ * cap.
+ */
+ regulator-enable-ramp-delay = <200000>;
+ };
+ };
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ aux_ssb: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_pex_vr_ctrl>;
+ interrupts = <20 IRQ_TYPE_LEVEL_LOW>;
+ vss1-supply = <&p5v_aux>;
+ vss2-supply = <&p3v3_aux>;
+ regulators {
+ p5v: sw0 {
+ regulator-name = "p5v";
+ shunt-resistor-micro-ohms = <12000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <100000>;
+ };
+ p3v3_pch: sw1 {
+ regulator-name = "p3v3_pch";
+ shunt-resistor-micro-ohms = <12000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <100000>;
+ };
+ };
+ };
+ };
+ i2c@4 {
+ reg = <4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pli1209bc_p12v_a: regulator@5f {
+ compatible = "vicor,pli1209bc";
+ reg = <0x5f>;
+ regulators {
+ p12v_a: vout2 {
+ regulator-name = "bcm0";
+ regulator-boot-on;
+ };
+ };
+ };
+ };
+ i2c@5 {
+ reg = <5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pli1209bc_p12v_b: regulator@5f {
+ compatible = "vicor,pli1209bc";
+ reg = <0x5f>;
+ regulators {
+ p12v_b: vout2 {
+ regulator-name = "bcm1";
+ regulator-boot-on;
+ };
+ };
+ };
+ };
+ i2c@6 {
+ reg = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pli1209bc_p12v_c: regulator@5f {
+ compatible = "vicor,pli1209bc";
+ reg = <0x5f>;
+ regulators {
+ p12v_c: vout2 {
+ regulator-name = "bcm2";
+ regulator-boot-on;
+ };
+ };
+ };
+ };
+ i2c@7 {
+ reg = <7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pli1209bc_p12v_d: regulator@5f {
+ compatible = "vicor,pli1209bc";
+ reg = <0x5f>;
+ regulators {
+ p12v_d: vout2 {
+ regulator-name = "bcm3";
+ regulator-boot-on;
+ };
+ };
+ };
+ };
+};
+
+&i2cmux24 {
+
+ reset-gpios = <&gpio0 ASPEED_GPIO(P, 3) (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ smb_svc_pex_rssd01_16: pinctrl@20 {
+ compatible = "cypress,cy8c9560";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <ASPEED_GPIO(S, 0) IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ vdd-supply = <&p3v3_bmc_aux>;
+ reset-gpios = <&smb_svc_pex_cpu0_led 17 GPIO_ACTIVE_HIGH>;
+ gpio-line-names =
+ /* GPORT0 */
+ "LED_ID_RSSD01", "LED_ID_RSSD02",
+ "LED_ID_RSSD03", "LED_ID_RSSD04",
+ "LED_ID_RSSD05", "LED_ID_RSSD06",
+ "LED_ID_RSSD07", "LED_ID_RSSD08",
+ /* GPORT1 */
+ "LED_ID_RSSD09", "LED_ID_RSSD10",
+ "LED_ID_RSSD11", "LED_ID_RSSD12",
+ "LED_ID_RSSD13", "LED_ID_RSSD14",
+ "LED_ID_RSSD15", "LED_ID_RSSD16",
+ /* GPORT2 */
+ "RSSD01_PRESENT_N", "RSSD02_PRESENT_N",
+ "RSSD03_PRESENT_N", "RSSD04_PRESENT_N",
+ /* GPORT3 */
+ "RSSD05_PRESENT_N", "RSSD06_PRESENT_N",
+ "RSSD07_PRESENT_N", "RSSD08_PRESENT_N",
+ "RSSD09_PRESENT_N", "RSSD10_PRESENT_N",
+ "RSSD11_PRESENT_N", "RSSD12_PRESENT_N",
+ /* GPORT4 */
+ "RSSD13_PRESENT_N", "RSSD14_PRESENT_N",
+ "RSSD15_PRESENT_N", "RSSD16_PRESENT_N",
+ "LED_ID_FAN_ASM01", "LED_ID_FAN_ASM02",
+ "LED_ID_FAN_ASM03", "LED_ID_FAN_ASM04",
+ /* GPORT5 */
+ "LED_ID_FAN_ASM05", "LED_ID_FAN_ASM06",
+ "PLUG_DETECT_FAN_ASM01", "PLUG_DETECT_FAN_ASM02",
+ "PLUG_DETECT_FAN_ASM03", "PLUG_DETECT_FAN_ASM04",
+ "PLUG_DETECT_FAN_ASM05", "PLUG_DETECT_FAN_ASM06",
+ /* GPORT6 */
+ "SSB_RSSD01_ALERT_N", "SSB_RSSD02_ALERT_N",
+ "SSB_RSSD03_ALERT_N", "SSB_RSSD04_ALERT_N",
+ "SSB_RSSD05_ALERT_N", "SSB_RSSD06_ALERT_N",
+ "SSB_RSSD07_ALERT_N", "SSB_RSSD08_ALERT_N",
+ /* GPORT7 */
+ "SSB_RSSD09_ALERT_N", "SSB_RSSD10_ALERT_N",
+ "SSB_RSSD11_ALERT_N", "SSB_RSSD12_ALERT_N",
+ "SSB_RSSD13_ALERT_N", "SSB_RSSD14_ALERT_N",
+ "SSB_RSSD15_ALERT_N", "SSB_RSSD16_ALERT_N";
+ pinctrl-0 = <&U65200_pins>;
+ pinctrl-names = "default";
+ U65200_pins: cfg-pins {
+ pins = "gp60", "gp61", "gp62",
+ "gp63", "gp64", "gp65", "gp66",
+ "gp67", "gp70", "gp71", "gp72",
+ "gp73", "gp74", "gp75", "gp76", "gp77";
+ function = "gpio";
+ input-enable;
+ bias-pull-up;
+ };
+ };
+ };
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ smb_svc_pex_rssd17_32: pinctrl@20 {
+ compatible = "cypress,cy8c9560";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <ASPEED_GPIO(H, 0) IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ vdd-supply = <&p3v3_bmc_aux>;
+ reset-gpios = <&smb_svc_pex_cpu1_led 17 GPIO_ACTIVE_HIGH>;
+ gpio-line-names =
+ /* GPORT0 */
+ "LED_ID_RSSD17", "LED_ID_RSSD18",
+ "LED_ID_RSSD19", "LED_ID_RSSD20",
+ "LED_ID_RSSD21", "LED_ID_RSSD22",
+ "LED_ID_RSSD23", "LED_ID_RSSD24",
+ /* GPORT1 */
+ "LED_ID_RSSD25", "LED_ID_RSSD26",
+ "LED_ID_RSSD27", "LED_ID_RSSD28",
+ "LED_ID_RSSD29", "LED_ID_RSSD30",
+ "LED_ID_RSSD31", "LED_ID_RSSD32",
+ /* GPORT2 */
+ "RSSD17_PRESENT_N", "RSSD18_PRESENT_N",
+ "RSSD19_PRESENT_N", "RSSD20_PRESENT_N",
+ /* GPORT3 */
+ "RSSD21_PRESENT_N", "RSSD22_PRESENT_N",
+ "RSSD23_PRESENT_N", "RSSD24_PRESENT_N",
+ "RSSD25_PRESENT_N", "RSSD26_PRESENT_N",
+ "RSSD27_PRESENT_N", "RSSD28_PRESENT_N",
+ /* GPORT4 */
+ "RSSD29_PRESENT_N", "RSSD30_PRESENT_N",
+ "RSSD31_PRESENT_N", "RSSD32_PRESENT_N",
+ "LED_ID_FAN_ASM07", "LED_ID_FAN_ASM08",
+ "LED_ID_FAN_ASM09", "LED_ID_FAN_ASM10",
+ /* GPORT5 */
+ "LED_ID_FAN_ASM11", "LED_ID_FAN_ASM12",
+ "PLUG_DETECT_FAN_ASM07", "PLUG_DETECT_FAN_ASM08",
+ "PLUG_DETECT_FAN_ASM09", "PLUG_DETECT_FAN_ASM10",
+ "PLUG_DETECT_FAN_ASM11", "PLUG_DETECT_FAN_ASM12",
+ /* GPORT6 */
+ "SSB_RSSD17_ALERT_N", "SSB_RSSD18_ALERT_N",
+ "SSB_RSSD19_ALERT_N", "SSB_RSSD20_ALERT_N",
+ "SSB_RSSD21_ALERT_N", "SSB_RSSD22_ALERT_N",
+ "SSB_RSSD23_ALERT_N", "SSB_RSSD24_ALERT_N",
+ /* GPORT7 */
+ "SSB_RSSD25_ALERT_N", "SSB_RSSD26_ALERT_N",
+ "SSB_RSSD27_ALERT_N", "SSB_RSSD28_ALERT_N",
+ "SSB_RSSD29_ALERT_N", "SSB_RSSD30_ALERT_N",
+ "SSB_RSSD31_ALERT_N", "SSB_RSSD32_ALERT_N";
+ pinctrl-0 = <&U65300_pins>;
+ pinctrl-names = "default";
+ U65300_pins: cfg-pins {
+ pins = "gp60", "gp61", "gp62",
+ "gp63", "gp64", "gp65", "gp66",
+ "gp67", "gp70", "gp71", "gp72",
+ "gp73", "gp74", "gp75", "gp76",
+ "gp77";
+ function = "gpio";
+ input-enable;
+ bias-pull-up;
+ };
+ };
+ };
+ i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ smb_svc_pex_cpu1_led: pinctrl@20 {
+ compatible = "cypress,cy8c9540";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <ASPEED_GPIO(V, 6) IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ vdd-supply = <&p3v3_bmc_aux>;
+ reset-gpios = <&gpio0 ASPEED_GPIO(Q, 5) GPIO_ACTIVE_HIGH>;
+ gpio-reserved-ranges = <18 2>, <36 2>;
+ gpio-line-names =
+ /* GPORT0 */
+ "PLUG_DETECT_DIMM_C1E2", "PLUG_DETECT_DIMM_C1E1",
+ "PLUG_DETECT_DIMM_C1F2", "PLUG_DETECT_DIMM_C1F1",
+ "PLUG_DETECT_DIMM_C1G2", "PLUG_DETECT_DIMM_C1G1",
+ "PLUG_DETECT_DIMM_C1H2", "PLUG_DETECT_DIMM_C1H1",
+ /* GPORT1 */
+ "PLUG_DETECT_DIMM_C1D1", "PLUG_DETECT_DIMM_C1D2",
+ "PLUG_DETECT_DIMM_C1C1", "PLUG_DETECT_DIMM_C1C2",
+ "PLUG_DETECT_DIMM_C1B1", "PLUG_DETECT_DIMM_C1B2",
+ "PLUG_DETECT_DIMM_C1A1", "PLUG_DETECT_DIMM_C1A2",
+ /* GPORT2 */
+ "PEX_CPU1_EVENT_RST", "SVC_PEX_RSSD17_32_RST",
+ "", "",
+ /* GPORT3 */
+ "LED_ID_DIMM_C1E2", "LED_ID_DIMM_C1E1",
+ "LED_ID_DIMM_C1F2", "LED_ID_DIMM_C1F1",
+ "LED_ID_DIMM_C1G2", "LED_ID_DIMM_C1G1",
+ "LED_ID_DIMM_C1H2", "LED_ID_DIMM_C1H1",
+ /* GPORT4 */
+ "LED_ID_DIMM_C1A2", "LED_ID_DIMM_C1A1",
+ "LED_ID_DIMM_C1B2", "LED_ID_DIMM_C1B1",
+ "LED_ID_DIMM_C1C2", "LED_ID_DIMM_C1C1",
+ "LED_ID_DIMM_C1D2", "LED_ID_DIMM_C1D1",
+ /* GPORT5 */
+ "", "",
+ "FM_CPU1_SKTOCC_N", "LED_ID_CPU1";
+ };
+ };
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ smb_svc_pex_fan_alert: pinctrl@20 {
+ compatible = "cypress,cy8c9560";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&bmc_pex_irq>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ vdd-supply = <&p3v3_aux>;
+ reset-gpios = <&bmc_pex_irq 17 GPIO_ACTIVE_HIGH>;
+ gpio-reserved-ranges = <24 3>, <51 9>;
+ gpio-line-names =
+ /* GPORT0 */
+ "FAN01_SSB_ALERT_N", "FAN02_SSB_ALERT_N",
+ "FAN03_SSB_ALERT_N", "FAN04_SSB_ALERT_N",
+ "FAN05_SSB_ALERT_N", "FAN06_SSB_ALERT_N",
+ "FAN07_SSB_ALERT_N", "FAN08_SSB_ALERT_N",
+ /* GPORT1 */
+ "FAN09_SSB_ALERT_N", "FAN10_SSB_ALERT_N",
+ "FAN11_SSB_ALERT_N", "FAN12_SSB_ALERT_N",
+ "FAN13_SSB_ALERT_N", "FAN14_SSB_ALERT_N",
+ "FAN15_SSB_ALERT_N", "FAN16_SSB_ALERT_N",
+ /* GPORT2 */
+ "FAN17_SSB_ALERT_N", "FAN18_SSB_ALERT_N",
+ "FAN19_SSB_ALERT_N", "FAN20_SSB_ALERT_N",
+ /* GPORT3 */
+ "FAN21_SSB_ALERT_N", "FAN22_SSB_ALERT_N",
+ "FAN23_SSB_ALERT_N", "FAN24_SSB_ALERT_N",
+ "", "",
+ "", "FAN01_PWM_ALERT_N",
+ /* GPORT4 */
+ "FAN02_PWM_ALERT_N", "FAN03_PWM_ALERT_N",
+ "FAN04_PWM_ALERT_N", "FAN05_PWM_ALERT_N",
+ "FAN06_PWM_ALERT_N", "FAN07_PWM_ALERT_N",
+ "FAN08_PWM_ALERT_N", "FAN09_PWM_ALERT_N",
+ /* GPORT5 */
+ "FAN10_PWM_ALERT_N", "FAN11_PWM_ALERT_N",
+ "FAN12_PWM_ALERT_N", "FAN13_PWM_ALERT_N",
+ "FAN14_PWM_ALERT_N", "FAN15_PWM_ALERT_N",
+ "FAN16_PWM_ALERT_N", "FAN17_PWM_ALERT_N",
+ /* GPORT6 */
+ "FAN18_PWM_ALERT_N", "FAN19_PWM_ALERT_N",
+ "FAN20_PWM_ALERT_N", "FAN21_PWM_ALERT_N",
+ "FAN22_PWM_ALERT_N", "FAN23_PWM_ALERT_N",
+ "FAN24_PWM_ALERT_N", "",
+ /* GPORT7 */
+ "", "",
+ "", "",
+ "", "",
+ "", "";
+ pinctrl-0 = <&U65600_pins>;
+ pinctrl-names = "default";
+ U65600_pins: cfg-pins {
+ pins = "gp00", "gp01", "gp02",
+ "gp03", "gp04", "gp05", "gp06",
+ "gp07", "gp10", "gp11", "gp12",
+ "gp13", "gp14", "gp15", "gp16",
+ "gp17", "gp20", "gp21", "gp22",
+ "gp23", "gp30", "gp31", "gp32",
+ "gp33", "gp37", "gp40", "gp41",
+ "gp42", "gp43", "gp44", "gp45",
+ "gp46", "gp47", "gp50", "gp51",
+ "gp52", "gp53", "gp54", "gp55",
+ "gp56", "gp57", "gp60", "gp61",
+ "gp62", "gp63", "gp64", "gp65",
+ "gp66";
+ function = "gpio";
+ input-enable;
+ bias-pull-up;
+ };
+ };
+ };
+ i2c@4 {
+ reg = <4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ smb_svc_pex_cpu2_led: pinctrl@20 {
+ compatible = "cypress,cy8c9540";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <ASPEED_GPIO(V, 5) IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ vdd-supply = <&p3v3_bmc_aux>;
+ reset-gpios = <&gpio0 ASPEED_GPIO(Q, 6) GPIO_ACTIVE_HIGH>;
+ gpio-reserved-ranges = <17 3>, <36 2>;
+ gpio-line-names =
+ /* GPORT0 */
+ "PLUG_DETECT_DIMM_C2E2", "PLUG_DETECT_DIMM_C2E1",
+ "PLUG_DETECT_DIMM_C2F2", "PLUG_DETECT_DIMM_C2F1",
+ "PLUG_DETECT_DIMM_C2G2", "PLUG_DETECT_DIMM_C2G1",
+ "PLUG_DETECT_DIMM_C2H2", "PLUG_DETECT_DIMM_C2H1",
+ /* GPORT1 */
+ "PLUG_DETECT_DIMM_C2D1", "PLUG_DETECT_DIMM_C2D2",
+ "PLUG_DETECT_DIMM_C2C1", "PLUG_DETECT_DIMM_C2C2",
+ "PLUG_DETECT_DIMM_C2B1", "PLUG_DETECT_DIMM_C2B2",
+ "PLUG_DETECT_DIMM_C2A1", "PLUG_DETECT_DIMM_C2A2",
+ /* GPORT2 */
+ "PEX_CPU2_EVENT_RST", "",
+ "", "",
+ /* GPORT3 */
+ "LED_ID_DIMM_C2E2", "LED_ID_DIMM_C2E1",
+ "LED_ID_DIMM_C2F2", "LED_ID_DIMM_C2F1",
+ "LED_ID_DIMM_C2G2", "LED_ID_DIMM_C2G1",
+ "LED_ID_DIMM_C2H2", "LED_ID_DIMM_C2H1",
+ /* GPORT4 */
+ "LED_ID_DIMM_C2A2", "LED_ID_DIMM_C2A1",
+ "LED_ID_DIMM_C2B2", "LED_ID_DIMM_C2B1",
+ "LED_ID_DIMM_C2C2", "LED_ID_DIMM_C2C1",
+ "LED_ID_DIMM_C2D2", "LED_ID_DIMM_C2D1",
+ /* GPORT5 */
+ "", "",
+ "FM_CPU2_SKTOCC_N", "LED_ID_CPU2";
+ };
+ };
+ i2c@5 {
+ reg = <5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ smb_svc_pex_cpu3_led: pinctrl@20 {
+ compatible = "cypress,cy8c9540";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <ASPEED_GPIO(V, 3) IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ vdd-supply = <&p3v3_bmc_aux>;
+ reset-gpios = <&gpio0 ASPEED_GPIO(Q, 7) GPIO_ACTIVE_HIGH>;
+ gpio-reserved-ranges = <17 3>;
+ gpio-line-names =
+ /* GPORT0 */
+ "PLUG_DETECT_DIMM_C3E2", "PLUG_DETECT_DIMM_C3E1",
+ "PLUG_DETECT_DIMM_C3F2", "PLUG_DETECT_DIMM_C3F1",
+ "PLUG_DETECT_DIMM_C3G2", "PLUG_DETECT_DIMM_C3G1",
+ "PLUG_DETECT_DIMM_C3H2", "PLUG_DETECT_DIMM_C3H1",
+ /* GPORT1 */
+ "PLUG_DETECT_DIMM_C3D1", "PLUG_DETECT_DIMM_C3D2",
+ "PLUG_DETECT_DIMM_C3C1", "PLUG_DETECT_DIMM_C3C2",
+ "PLUG_DETECT_DIMM_C3B1", "PLUG_DETECT_DIMM_C3B2",
+ "PLUG_DETECT_DIMM_C3A1", "PLUG_DETECT_DIMM_C3A2",
+ /* GPORT2 */
+ "PEX_CPU3_EVENT_RST", "",
+ "", "",
+ /* GPORT3 */
+ "LED_ID_DIMM_C3E2", "LED_ID_DIMM_C3E1",
+ "LED_ID_DIMM_C3F2", "LED_ID_DIMM_C3F1",
+ "LED_ID_DIMM_C3G2", "LED_ID_DIMM_C3G1",
+ "LED_ID_DIMM_C3H2", "LED_ID_DIMM_C3H1",
+ /* GPORT4 */
+ "LED_ID_DIMM_C3A2", "LED_ID_DIMM_C3A1",
+ "LED_ID_DIMM_C3B2", "LED_ID_DIMM_C3B1",
+ "LED_ID_DIMM_C3C2", "LED_ID_DIMM_C3C1",
+ "LED_ID_DIMM_C3D2", "LED_ID_DIMM_C3D1",
+ /* GPORT5 */
+ "LED_PWR_DWR_FRNT", "LED_ID_DWR_FRNT_P",
+ "FM_CPU3_SKTOCC_N", "LED_ID_CPU3";
+ };
+ };
+ i2c@6 {
+ reg = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ smb_svc_pex_cpu0_led: pinctrl@20 {
+ compatible = "cypress,cy8c9540";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <ASPEED_GPIO(O, 3) IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ vdd-supply = <&p3v3_bmc_aux>;
+ reset-gpios = <&gpio0 ASPEED_GPIO(Q, 4) GPIO_ACTIVE_HIGH>;
+ gpio-reserved-ranges = <18 2>, <36 2>;
+ gpio-line-names =
+ /* GPORT0 */
+ "PLUG_DETECT_DIMM_C0E2", "PLUG_DETECT_DIMM_C0E1",
+ "PLUG_DETECT_DIMM_C0F2", "PLUG_DETECT_DIMM_C0F1",
+ "PLUG_DETECT_DIMM_C0G2", "PLUG_DETECT_DIMM_C0G1",
+ "PLUG_DETECT_DIMM_C0H2", "PLUG_DETECT_DIMM_C0H1",
+ /* GPORT1 */
+ "PLUG_DETECT_DIMM_C0D1", "PLUG_DETECT_DIMM_C0D2",
+ "PLUG_DETECT_DIMM_C0C1", "PLUG_DETECT_DIMM_C0C2",
+ "PLUG_DETECT_DIMM_C0B1", "PLUG_DETECT_DIMM_C0B2",
+ "PLUG_DETECT_DIMM_C0A1", "PLUG_DETECT_DIMM_C0A2",
+ /* GPORT2 */
+ "PEX_CPU0_EVENT_RST", "SVC_PEX_RSSD01_16_RST",
+ "", "",
+ /* GPORT3 */
+ "LED_ID_DIMM_C0E2", "LED_ID_DIMM_C0E1",
+ "LED_ID_DIMM_C0F2", "LED_ID_DIMM_C0F1",
+ "LED_ID_DIMM_C0G2", "LED_ID_DIMM_C0G1",
+ "LED_ID_DIMM_C0H2", "LED_ID_DIMM_C0H1",
+ /* GPORT4 */
+ "LED_ID_DIMM_C0A2", "LED_ID_DIMM_C0A1",
+ "LED_ID_DIMM_C0B2", "LED_ID_DIMM_C0B1",
+ "LED_ID_DIMM_C0C2", "LED_ID_DIMM_C0C1",
+ "LED_ID_DIMM_C0D2", "LED_ID_DIMM_C0D1",
+ /* GPORT5 */
+ "", "",
+ "FM_CPU0_SKTOCC_N", "LED_ID_CPU0";
+ };
+ };
+};
+
+&i2c9 {
+ status = "okay";
+
+ p1v2_bmc_aux_mon: pmic@60 {
+ compatible = "maxim,max8952";
+ reg = <0x60>;
+ max8952,default-mode = <3>;
+ max8952,dvs-mode-microvolt = <1100000>, <1100000>,
+ <1100000>, <1100000>;
+ max8952,sync-freq = <0>;
+ max8952,ramp-speed = <0>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+};
+
+&i2cmux8 {
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan10_ssb: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+ vss1-supply = <&p12v>;
+ interrupt-parent = <&smb_svc_pex_fan_alert>;
+ interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ sw0_fan10_ssb: sw0 {
+ regulator-name = "fan10_supply";
+ shunt-resistor-micro-ohms = <10000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+
+ };
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan12_ssb: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+ vss1-supply = <&p12v>;
+ interrupt-parent = <&smb_svc_pex_fan_alert>;
+ interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ sw0_fan12_ssb: sw0 {
+ regulator-name = "fan12_supply";
+ shunt-resistor-micro-ohms = <10000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+
+ };
+ i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan14_ssb: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+ vss1-supply = <&p12v>;
+ interrupt-parent = <&smb_svc_pex_fan_alert>;
+ interrupts = <13 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ sw0_fan14_ssb: sw0 {
+ regulator-name = "fan14_supply";
+ shunt-resistor-micro-ohms = <10000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan16_ssb: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+ vss1-supply = <&p12v>;
+ interrupt-parent = <&smb_svc_pex_fan_alert>;
+ interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ sw0_fan16_ssb: sw0 {
+ regulator-name = "fan16_supply";
+ shunt-resistor-micro-ohms = <10000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@4 {
+ reg = <4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan18_ssb: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+ vss1-supply = <&p12v>;
+ interrupt-parent = <&smb_svc_pex_fan_alert>;
+ interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ sw0_fan18_ssb: sw0 {
+ regulator-name = "fan18_supply";
+ shunt-resistor-micro-ohms = <10000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@5 {
+ reg = <5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan20_ssb: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+ vss1-supply = <&p12v>;
+ interrupt-parent = <&smb_svc_pex_fan_alert>;
+ interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ sw0_fan20_ssb: sw0 {
+ regulator-name = "fan20_supply";
+ shunt-resistor-micro-ohms = <10000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@6 {
+ reg = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan22_ssb: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+ vss1-supply = <&p12v>;
+ interrupt-parent = <&smb_svc_pex_fan_alert>;
+ interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ sw0_fan22_ssb: sw0 {
+ regulator-name = "fan22_supply";
+ shunt-resistor-micro-ohms = <10000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@7 {
+ reg = <7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan24_ssb: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+ vss1-supply = <&p12v>;
+ interrupt-parent = <&smb_svc_pex_fan_alert>;
+ interrupts = <23 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ sw0_fan24_ssb: sw0 {
+ regulator-name = "fan24_supply";
+ shunt-resistor-micro-ohms = <10000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+};
+
+&i2cmux7 {
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan17_ssb: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+ vss1-supply = <&p12v>;
+ interrupt-parent = <&smb_svc_pex_fan_alert>;
+ interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ sw0_fan17_ssb: sw0 {
+ regulator-name = "fan17_supply";
+ shunt-resistor-micro-ohms = <10000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan19_ssb: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+ vss1-supply = <&p12v>;
+ interrupt-parent = <&smb_svc_pex_fan_alert>;
+ interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ sw0_fan19_ssb: sw0 {
+ regulator-name = "fan19_supply";
+ shunt-resistor-micro-ohms = <10000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan21_ssb: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+ vss1-supply = <&p12v>;
+ interrupt-parent = <&smb_svc_pex_fan_alert>;
+ interrupts = <20 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ sw0_fan21_ssb: sw0 {
+ regulator-name = "fan21_supply";
+ shunt-resistor-micro-ohms = <10000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan23_ssb: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+ vss1-supply = <&p12v>;
+ interrupt-parent = <&smb_svc_pex_fan_alert>;
+ interrupts = <22 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ sw0_fan23_ssb: sw0 {
+ regulator-name = "fan23_supply";
+ shunt-resistor-micro-ohms = <10000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@4 {
+ reg = <4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan02_ssb: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+ vss1-supply = <&p12v>;
+ interrupt-parent = <&smb_svc_pex_fan_alert>;
+ interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ sw0_fan02_ssb: sw0 {
+ regulator-name = "fan02_supply";
+ shunt-resistor-micro-ohms = <10000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@5 {
+ reg = <5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan04_ssb: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+ vss1-supply = <&p12v>;
+ interrupt-parent = <&smb_svc_pex_fan_alert>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ sw0_fan04_ssb: sw0 {
+ regulator-name = "fan04_supply";
+ shunt-resistor-micro-ohms = <10000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@6 {
+ reg = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan06_ssb: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+ vss1-supply = <&p12v>;
+ interrupt-parent = <&smb_svc_pex_fan_alert>;
+ interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ sw0_fan06_ssb: sw0 {
+ regulator-name = "fan06_supply";
+ shunt-resistor-micro-ohms = <10000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@7 {
+ reg = <7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan08_ssb: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+ vss1-supply = <&p12v>;
+ interrupt-parent = <&smb_svc_pex_fan_alert>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ sw0_fan08_ssb: sw0 {
+ regulator-name = "fan08_supply";
+ shunt-resistor-micro-ohms = <10000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+};
+
+&i2cmux6 {
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan01_ssb: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+ vss1-supply = <&p12v>;
+ interrupt-parent = <&smb_svc_pex_fan_alert>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ sw0_fan01_ssb: sw0 {
+ regulator-name = "fan01_supply";
+ shunt-resistor-micro-ohms = <10000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan03_ssb: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+ vss1-supply = <&p12v>;
+ interrupt-parent = <&smb_svc_pex_fan_alert>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ sw0_fan03_ssb: sw0 {
+ regulator-name = "fan03_supply";
+
+ shunt-resistor-micro-ohms = <10000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan05_ssb: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+ vss1-supply = <&p12v>;
+ interrupt-parent = <&smb_svc_pex_fan_alert>;
+ interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ sw0_fan05_ssb: sw0 {
+ regulator-name = "fan05_supply";
+ shunt-resistor-micro-ohms = <10000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan07_ssb: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+ vss1-supply = <&p12v>;
+ interrupt-parent = <&smb_svc_pex_fan_alert>;
+ interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ sw0_fan07_ssb: sw0 {
+ regulator-name = "fan07_supply";
+ shunt-resistor-micro-ohms = <10000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@4 {
+ reg = <4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan09_ssb: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+ vss1-supply = <&p12v>;
+ interrupt-parent = <&smb_svc_pex_fan_alert>;
+ interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ sw0_fan09_ssb: sw0 {
+ regulator-name = "fan09_supply";
+ shunt-resistor-micro-ohms = <10000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@5 {
+ reg = <5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan11_ssb: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+ vss1-supply = <&p12v>;
+ interrupt-parent = <&smb_svc_pex_fan_alert>;
+ interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ sw0_fan11_ssb: sw0 {
+ regulator-name = "fan11_supply";
+ shunt-resistor-micro-ohms = <10000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@6 {
+ reg = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan13_ssb: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+ vss1-supply = <&p12v>;
+ interrupt-parent = <&smb_svc_pex_fan_alert>;
+ interrupts = <12 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ sw0_fan13_ssb: sw0 {
+ regulator-name = "fan13_supply";
+ shunt-resistor-micro-ohms = <10000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@7 {
+ reg = <7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan15_ssb: regulator@3a {
+ compatible = "maxim,max5978";
+ reg = <0x3a>;
+ vss1-supply = <&p12v>;
+ interrupt-parent = <&smb_svc_pex_fan_alert>;
+ interrupts = <14 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ sw0_fan15_ssb: sw0 {
+ regulator-name = "fan15_supply";
+ shunt-resistor-micro-ohms = <10000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <3400000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+
+ };
+};
+
+&i2cmux9 {
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd19: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd17_32>;
+ interrupts = <46 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd19:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd19: sw0 {
+ regulator-name = "rssd19_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd19: sw1 {
+ regulator-name = "rssd19_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd18: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd17_32>;
+ interrupts = <45 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd18:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd18: sw0 {
+ regulator-name = "rssd18_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd18: sw1 {
+ regulator-name = "rssd18_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd17: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd17_32>;
+ interrupts = <44 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd17:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd17: sw0 {
+ regulator-name = "rssd17_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd17: sw1 {
+ regulator-name = "rssd17_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd20: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd17_32>;
+ interrupts = <47 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd20:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd20: sw0 {
+ regulator-name = "rssd20_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd20: sw1 {
+ regulator-name = "rssd20_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@4 {
+ reg = <4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd21: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd17_32>;
+ interrupts = <48 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd21:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd21: sw0 {
+ regulator-name = "rssd21_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd21: sw1 {
+ regulator-name = "rssd21_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@5 {
+ reg = <5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd22: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd17_32>;
+ interrupts = <49 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd22:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd22: sw0 {
+ regulator-name = "rssd22_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd22: sw1 {
+ regulator-name = "rssd22_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@6 {
+ reg = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd24: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd17_32>;
+ interrupts = <51 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd24:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd24: sw0 {
+ regulator-name = "rssd24_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd24: sw1 {
+ regulator-name = "rssd24_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@7 {
+ reg = <7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd23: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd17_32>;
+ interrupts = <50 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd23:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd23: sw0 {
+ regulator-name = "rssd23_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd23: sw1 {
+ regulator-name = "rssd23_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+};
+
+&i2cmux10 {
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd25: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd17_32>;
+ interrupts = <52 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd25:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd25: sw0 {
+ regulator-name = "rssd25_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd25: sw1 {
+ regulator-name = "rssd25_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd26: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd17_32>;
+ interrupts = <53 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd26:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd26: sw0 {
+ regulator-name = "rssd26_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd26: sw1 {
+ regulator-name = "rssd26_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd27: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd17_32>;
+ interrupts = <54 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd27:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd27: sw0 {
+ regulator-name = "rssd27_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd27: sw1 {
+ regulator-name = "rssd27_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd32: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd17_32>;
+ interrupts = <59 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd32:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd32: sw0 {
+ regulator-name = "rssd32_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd32: sw1 {
+ regulator-name = "rssd32_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@4 {
+ reg = <4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd31: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd17_32>;
+ interrupts = <58 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd31:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd31: sw0 {
+ regulator-name = "rssd31_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd31: sw1 {
+ regulator-name = "rssd31_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@5 {
+ reg = <5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd30: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd17_32>;
+ interrupts = <57 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd30:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd30: sw0 {
+ regulator-name = "rssd30_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd30: sw1 {
+ regulator-name = "rssd30_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@6 {
+ reg = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd29: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd17_32>;
+ interrupts = <56 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd29:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd29: sw0 {
+ regulator-name = "rssd29_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd29: sw1 {
+ regulator-name = "rssd29_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@7 {
+ reg = <7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd28: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd17_32>;
+ interrupts = <55 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd28:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd28: sw0 {
+ regulator-name = "rssd28_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd28: sw1 {
+ regulator-name = "rssd28_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+};
+
+&i2cmux18 {
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd03: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd01_16>;
+ interrupts = <46 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd03:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd03: sw0 {
+ regulator-name = "rssd03_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd03: sw1 {
+ regulator-name = "rssd03_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd02: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd01_16>;
+ interrupts = <45 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd02:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd02: sw0 {
+ regulator-name = "rssd02_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd02: sw1 {
+ regulator-name = "rssd02_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd01: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd01_16>;
+ interrupts = <44 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd01:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd01: sw0 {
+ regulator-name = "rssd01_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd01: sw1 {
+ regulator-name = "rssd01_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd04: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd01_16>;
+ interrupts = <47 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd04:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd04: sw0 {
+ regulator-name = "rssd04_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd04: sw1 {
+ regulator-name = "rssd04_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@4 {
+ reg = <4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd05: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd01_16>;
+ interrupts = <48 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd05:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd05: sw0 {
+ regulator-name = "rssd05_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd05: sw1 {
+ regulator-name = "rssd05_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@5 {
+ reg = <5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd08: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd01_16>;
+ interrupts = <51 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd08:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd08: sw0 {
+ regulator-name = "rssd08_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd08: sw1 {
+ regulator-name = "rssd08_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@6 {
+ reg = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd07: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd01_16>;
+ interrupts = <50 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd07:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd07: sw0 {
+ regulator-name = "rssd07_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd07: sw1 {
+ regulator-name = "rssd07_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@7 {
+ reg = <7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd06: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd01_16>;
+ interrupts = <49 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd06:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd06: sw0 {
+ regulator-name = "rssd06_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd06: sw1 {
+ regulator-name = "rssd06_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+};
+
+&i2cmux19 {
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd14: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd01_16>;
+ interrupts = <57 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd14:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd14: sw0 {
+ regulator-name = "rssd14_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd14: sw1 {
+ regulator-name = "rssd14_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd13: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd01_16>;
+ interrupts = <56 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd13:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd13: sw0 {
+ regulator-name = "rssd13_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd13: sw1 {
+ regulator-name = "rssd13_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd12: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd01_16>;
+ interrupts = <55 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd12:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd12: sw0 {
+ regulator-name = "rssd12_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd12: sw1 {
+ regulator-name = "rssd12_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd11: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd01_16>;
+ interrupts = <54 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd11:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd11: sw0 {
+ regulator-name = "rssd11_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd11: sw1 {
+ regulator-name = "rssd11_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@4 {
+ reg = <4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd10: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd01_16>;
+ interrupts = <53 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd10:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd10: sw0 {
+ regulator-name = "rssd10_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd10: sw1 {
+ regulator-name = "rssd10_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@5 {
+ reg = <5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd09: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd01_16>;
+ interrupts = <52 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd09:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd09: sw0 {
+ regulator-name = "rssd09_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd09: sw1 {
+ regulator-name = "rssd09_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@6 {
+ reg = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd15: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd01_16>;
+ interrupts = <58 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd15:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd15: sw0 {
+ regulator-name = "rssd15_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd15: sw1 {
+ regulator-name = "rssd15_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+ i2c@7 {
+ reg = <7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssb_rssd16: regulator@3a {
+ compatible = "maxim,max5970";
+ reg = <0x3a>;
+ interrupt-parent = <&smb_svc_pex_rssd01_16>;
+ interrupts = <59 IRQ_TYPE_LEVEL_LOW>;
+
+ vss1-supply = <&p3v3_aux>;
+ vss2-supply = <&p12v>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "rssd16:green:power";
+ default-state = "off";
+ };
+ };
+
+ regulators {
+ sw0_ssb_rssd16: sw0 {
+ regulator-name = "rssd16_12v";
+ shunt-resistor-micro-ohms = <9000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <4500000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ sw1_ssb_rssd16: sw1 {
+ regulator-name = "rssd16_3v3";
+ shunt-resistor-micro-ohms = <100000>;
+ regulator-over-current-protection;
+ regulator-oc-protection-microamp = <410000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-system1.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-system1.dts
index f3efecc7eb8d..360b9ce3c850 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-system1.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-system1.dts
@@ -370,17 +370,17 @@
/*K0-K7*/ "","","","","","","","",
/*L0-L7*/ "","","","","","","","bmc-ready",
/*M0-M7*/ "","","","","","","","",
- /*N0-N7*/ "","","","","","","","",
+ /*N0-N7*/ "fpga-debug-enable","","","","","","","",
/*O0-O7*/ "","","","","","","","",
/*P0-P7*/ "","","","","","","","bmc-hb",
- /*Q0-Q7*/ "","","","","","","","",
+ /*Q0-Q7*/ "","","","","","","pch-ready","",
/*R0-R7*/ "","","","","","","","",
/*S0-S7*/ "","","","","","","rear-enc-fault0","rear-enc-id0",
/*T0-T7*/ "","","","","","","","",
/*U0-U7*/ "","","","","","","","",
/*V0-V7*/ "","rtc-battery-voltage-read-enable","","power-chassis-control","","","","",
/*W0-W7*/ "","","","","","","","",
- /*X0-X7*/ "","power-chassis-good","","","","","","",
+ /*X0-X7*/ "fpga-pgood","power-chassis-good","pch-pgood","","","","","",
/*Y0-Y7*/ "","","","","","","","",
/*Z0-Z7*/ "","","","","","","","";
};
@@ -398,6 +398,12 @@
clk-phase-mmc-hs200 = <180>, <180>;
};
+&sgpiom0 {
+ status = "okay";
+ ngpios = <128>;
+ bus-frequency = <1000000>;
+};
+
&ibt {
status = "okay";
};
@@ -464,6 +470,15 @@
aspeed,lpc-interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
};
+&peci0 {
+ status = "okay";
+};
+
+&lpc_snoop {
+ status = "okay";
+ snoop-ports = <0x80>, <0x81>;
+};
+
&i2c0 {
status = "okay";
@@ -666,22 +681,22 @@
status = "okay";
power-supply@58 {
- compatible = "ibm,cffps";
+ compatible = "intel,crps185";
reg = <0x58>;
};
power-supply@59 {
- compatible = "ibm,cffps";
+ compatible = "intel,crps185";
reg = <0x59>;
};
power-supply@5a {
- compatible = "ibm,cffps";
+ compatible = "intel,crps185";
reg = <0x5a>;
};
power-supply@5b {
- compatible = "ibm,cffps";
+ compatible = "intel,crps185";
reg = <0x5b>;
};
};
@@ -1007,6 +1022,7 @@
&i2c8 {
status = "okay";
+ bus-frequency = <400000>;
i2c-mux@71 {
compatible = "nxp,pca9548";
@@ -1468,6 +1484,7 @@
&i2c15 {
status = "okay";
+ bus-frequency = <400000>;
i2c-mux@71 {
compatible = "nxp,pca9548";
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-quanta-s6q.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-quanta-s6q.dts
index 983853eedaef..fd361cf073c2 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-quanta-s6q.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-quanta-s6q.dts
@@ -381,7 +381,7 @@
#size-cells = <0>;
reg = <5>;
- U190_fru@51 {
+ eeprom@51 {
compatible = "atmel,24c128";
reg = <0x51>;
pagesize = <32>;
@@ -460,7 +460,7 @@
status = "okay";
/* MB FRU (U173) @ 0xA2 */
- mb_fru: mb_fru@51 {
+ mb_fru: eeprom@51 {
compatible = "atmel,24c128";
reg = <0x51>;
pagesize = <32>;
@@ -472,7 +472,7 @@
reg = <0x4a>;
};
- FP_U4_fru@52 {
+ eeprom@52 {
compatible = "atmel,24c02";
reg = <0x52>;
pagesize = <16>;
@@ -593,7 +593,7 @@
status = "okay";
/* SCM FRU (U19) @ 0xA2 */
- scm_fru: scm_fru@51 {
+ scm_fru: eeprom@51 {
compatible = "atmel,24c128";
reg = <0x51>;
pagesize = <32>;
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-vegman-rx20.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-vegman-rx20.dts
index b8f0b08018a3..98f3e0437704 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-vegman-rx20.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-vegman-rx20.dts
@@ -154,7 +154,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <7>;
- at24@50 {
+ eeprom@50 {
compatible = "atmel,24c64";
reg = <0x50>;
pagesize = <32>;
@@ -196,7 +196,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <7>;
- at24@50 {
+ eeprom@50 {
compatible = "atmel,24c64";
reg = <0x50>;
pagesize = <32>;
@@ -205,7 +205,7 @@
};
};
};
- at24@50 {
+ eeprom@50 {
compatible = "atmel,24c64";
reg = <0x50>;
pagesize = <32>;
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-vegman.dtsi b/arch/arm/boot/dts/aspeed/aspeed-bmc-vegman.dtsi
index 1a5b25b2ea29..16815eede710 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-vegman.dtsi
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-vegman.dtsi
@@ -291,7 +291,7 @@
/* SMB_BMC_MGMT_LVC3 */
status = "okay";
- at24@50 {
+ eeprom@50 {
compatible = "atmel,24c64";
reg = <0x50>;
pagesize = <32>;
diff --git a/arch/arm/boot/dts/broadcom/Makefile b/arch/arm/boot/dts/broadcom/Makefile
index 5881bcc95eba..d23cf466127b 100644
--- a/arch/arm/boot/dts/broadcom/Makefile
+++ b/arch/arm/boot/dts/broadcom/Makefile
@@ -36,6 +36,7 @@ dtb-$(CONFIG_ARCH_BCM2835) += \
bcm2835-rpi-zero.dtb \
bcm2835-rpi-zero-w.dtb
dtb-$(CONFIG_ARCH_BCMBCA) += \
+ bcm6846-genexis-xg6846b.dtb \
bcm947622.dtb \
bcm963138.dtb \
bcm963138dvt.dtb \
diff --git a/arch/arm/boot/dts/broadcom/bcm53015-meraki-mr26.dts b/arch/arm/boot/dts/broadcom/bcm53015-meraki-mr26.dts
index 0bf5106f7012..08abfdc63d18 100644
--- a/arch/arm/boot/dts/broadcom/bcm53015-meraki-mr26.dts
+++ b/arch/arm/boot/dts/broadcom/bcm53015-meraki-mr26.dts
@@ -59,6 +59,9 @@
&gmac0 {
status = "okay";
+
+ nvmem-cells = <&macaddr_board_config_66>;
+ nvmem-cell-names = "mac-address";
};
&gmac1 {
@@ -102,8 +105,25 @@
};
partition@800000 {
+ compatible = "linux,ubi";
label = "ubi";
reg = <0x800000 0x7780000>;
+
+ volumes {
+ ubi-volume-board-config {
+ volname = "board-config";
+
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ macaddr_board_config_66: macaddr@66 {
+ reg = <0x66 0x6>;
+ };
+ };
+ };
+ };
};
};
};
diff --git a/arch/arm/boot/dts/broadcom/bcm53340-ubnt-unifi-switch8.dts b/arch/arm/boot/dts/broadcom/bcm53340-ubnt-unifi-switch8.dts
index 975f854f652f..08cf1220b655 100644
--- a/arch/arm/boot/dts/broadcom/bcm53340-ubnt-unifi-switch8.dts
+++ b/arch/arm/boot/dts/broadcom/bcm53340-ubnt-unifi-switch8.dts
@@ -32,7 +32,6 @@
&qspi {
status = "okay";
- bspi-sel = <0>;
flash: flash@0 {
compatible = "m25p80";
diff --git a/arch/arm/boot/dts/broadcom/bcm6846-genexis-xg6846b.dts b/arch/arm/boot/dts/broadcom/bcm6846-genexis-xg6846b.dts
new file mode 100644
index 000000000000..a3616fb7b3a8
--- /dev/null
+++ b/arch/arm/boot/dts/broadcom/bcm6846-genexis-xg6846b.dts
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2024 Linus Walleij <linus.walleij@linaro.org>
+ */
+
+/dts-v1/;
+
+#include "bcm6846.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+ model = "Genexis XG6846B Ethernet layer 2/3 router";
+ compatible = "genexis,xg6846b", "brcm,bcm6846", "brcm,bcmbca";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ /* Micron D9PTK 256 MB RAM */
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x10000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ secondary-boot@0 {
+ no-map;
+ reg = <0x00000000 0x00008000>;
+ };
+ pmc3-firmware@8000 {
+ no-map;
+ reg = <0x00008000 0x00100000>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys-polled";
+ poll-interval = <20000>;
+
+ /* Called "canyon rescue button" in the vendor DTB */
+ button-restart {
+ label = "Reset";
+ linux,code = <KEY_RESTART>;
+ gpios = <&gpio0 41 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&gpio0 {
+ status = "okay";
+};
+
+&gpio1 {
+ status = "okay";
+};
+
+&gpio2 {
+ status = "okay";
+ /* Totally 79 GPIOs are available */
+ ngpios = <15>;
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&leds {
+ status = "okay";
+ brcm,serial-shift-bits = <16>;
+
+ led@0 {
+ reg = <0>;
+ active-low;
+ function = "ext";
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@1 {
+ reg = <1>;
+ active-low;
+ function = "ext";
+ color = <LED_COLOR_ID_AMBER>;
+ };
+
+ led@3 {
+ reg = <3>;
+ active-low;
+ function = LED_FUNCTION_WAN;
+ color = <LED_COLOR_ID_AMBER>;
+ };
+
+ led@4 {
+ reg = <4>;
+ active-low;
+ function = LED_FUNCTION_WAN;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@5 {
+ reg = <5>;
+ active-low;
+ function = LED_FUNCTION_POWER;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@6 {
+ reg = <6>;
+ active-low;
+ function = LED_FUNCTION_POWER;
+ color = <LED_COLOR_ID_RED>;
+ };
+
+ led@15 {
+ reg = <15>;
+ active-low;
+ function = LED_FUNCTION_USB;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@7 {
+ /* Activity 03 */
+ reg = <7>;
+ active-low;
+ function = "lan1";
+ color = <LED_COLOR_ID_AMBER>;
+ };
+
+ led@8 {
+ /* Activity 04 */
+ reg = <8>;
+ active-low;
+ function = "lan1";
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@9 {
+ /* Activity 03 */
+ reg = <9>;
+ active-low;
+ function = "lan2";
+ color = <LED_COLOR_ID_AMBER>;
+ };
+
+ led@10 {
+ /* Activity 04 */
+ reg = <10>;
+ active-low;
+ function = "lan2";
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@11 {
+ /* Activity 03 */
+ reg = <11>;
+ active-low;
+ function = "lan3";
+ color = <LED_COLOR_ID_AMBER>;
+ };
+
+ led@12 {
+ /* Activity 04 */
+ reg = <12>;
+ active-low;
+ function = "lan3";
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@13 {
+ /* Activity 03 */
+ reg = <13>;
+ active-low;
+ function = "lan4";
+ color = <LED_COLOR_ID_AMBER>;
+ };
+
+ led@14 {
+ /* Activity 04 */
+ reg = <14>;
+ active-low;
+ function = "lan4";
+ color = <LED_COLOR_ID_GREEN>;
+ };
+};
+
+&hsspi {
+ status = "okay";
+};
+
+&nand_controller {
+ brcm,wp-not-connected;
+ status = "okay";
+};
+
+&nandcs {
+ nand-on-flash-bbt;
+ brcm,nand-ecc-use-strap;
+
+ /* Winbond W29N02GV, 256MB with 128KB erase blocks */
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ loader@0 {
+ label = "loader";
+ reg = <0x00000000 0x00400000>;
+ };
+ image@400000 {
+ label = "image";
+ reg = <0x00400000 0x0fb00000>;
+ };
+ /* 0x00ff0000-0x00ffffff: bad block list */
+ };
+};
+
+&mdio {
+ status = "okay";
+
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+ phy2: ethernet-phy@2 {
+ reg = <2>;
+ };
+ phy3: ethernet-phy@3 {
+ reg = <3>;
+ };
+ phy4: ethernet-phy@4 {
+ reg = <4>;
+ };
+ phy21: ethernet-phy@21 {
+ reg = <21>;
+ };
+};
diff --git a/arch/arm/boot/dts/broadcom/bcm6846.dtsi b/arch/arm/boot/dts/broadcom/bcm6846.dtsi
index ee361cb00b7c..e0e06af3fe89 100644
--- a/arch/arm/boot/dts/broadcom/bcm6846.dtsi
+++ b/arch/arm/boot/dts/broadcom/bcm6846.dtsi
@@ -99,6 +99,91 @@
#size-cells = <1>;
ranges = <0 0xff800000 0x800000>;
+ watchdog@480 {
+ compatible = "brcm,bcm6345-wdt";
+ reg = <0x480 0x10>;
+ };
+
+ /* GPIOs 0 .. 31 */
+ gpio0: gpio@500 {
+ compatible = "brcm,bcm6345-gpio";
+ reg = <0x500 0x04>, <0x520 0x04>;
+ reg-names = "dirout", "dat";
+ gpio-controller;
+ #gpio-cells = <2>;
+ status = "disabled";
+ };
+
+ /* GPIOs 32 .. 63 */
+ gpio1: gpio@504 {
+ compatible = "brcm,bcm6345-gpio";
+ reg = <0x504 0x04>, <0x524 0x04>;
+ reg-names = "dirout", "dat";
+ gpio-controller;
+ #gpio-cells = <2>;
+ status = "disabled";
+ };
+
+ /* GPIOs 64 .. 95 */
+ gpio2: gpio@508 {
+ compatible = "brcm,bcm6345-gpio";
+ reg = <0x508 0x04>, <0x528 0x04>;
+ reg-names = "dirout", "dat";
+ gpio-controller;
+ #gpio-cells = <2>;
+ status = "disabled";
+ };
+
+ /* GPIOs 96 .. 127 */
+ gpio3: gpio@50c {
+ compatible = "brcm,bcm6345-gpio";
+ reg = <0x50c 0x04>, <0x52c 0x04>;
+ reg-names = "dirout", "dat";
+ gpio-controller;
+ #gpio-cells = <2>;
+ status = "disabled";
+ };
+
+ /* GPIOs 128 .. 159 */
+ gpio4: gpio@510 {
+ compatible = "brcm,bcm6345-gpio";
+ reg = <0x510 0x04>, <0x530 0x04>;
+ reg-names = "dirout", "dat";
+ gpio-controller;
+ #gpio-cells = <2>;
+ status = "disabled";
+ };
+
+ /* GPIOs 160 .. 191 */
+ gpio5: gpio@514 {
+ compatible = "brcm,bcm6345-gpio";
+ reg = <0x514 0x04>, <0x534 0x04>;
+ reg-names = "dirout", "dat";
+ gpio-controller;
+ #gpio-cells = <2>;
+ status = "disabled";
+ };
+
+ /* GPIOs 192 .. 223 */
+ gpio6: gpio@518 {
+ compatible = "brcm,bcm6345-gpio";
+ reg = <0x518 0x04>, <0x538 0x04>;
+ reg-names = "dirout", "dat";
+ gpio-controller;
+ #gpio-cells = <2>;
+ status = "disabled";
+ };
+
+ /* GPIOs 224 .. 255 */
+ gpio7: gpio@51c {
+ compatible = "brcm,bcm6345-gpio";
+ reg = <0x51c 0x04>, <0x53c 0x04>;
+ reg-names = "dirout", "dat";
+ gpio-controller;
+ #gpio-cells = <2>;
+ status = "disabled";
+ };
+
uart0: serial@640 {
compatible = "brcm,bcm6345-uart";
reg = <0x640 0x1b>;
@@ -108,6 +193,19 @@
status = "disabled";
};
+ rng@b80 {
+ compatible = "brcm,iproc-rng200";
+ reg = <0xb80 0x28>;
+ };
+
+ leds: led-controller@800 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,bcm63138-leds";
+ reg = <0x800 0xdc>;
+ status = "disabled";
+ };
+
hsspi: spi@1000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -133,5 +231,27 @@
reg = <0>;
};
};
+
+ mdio: mdio@2060 {
+ compatible = "brcm,bcm6846-mdio";
+ reg = <0x02060 0x10>, <0x5a068 0x4>;
+ reg-names = "mdio", "mdio_indir_rw";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ pl081_dma: dma-controller@59000 {
+ compatible = "arm,pl081", "arm,primecell";
+ // The magic B105F00D info is missing
+ arm,primecell-periphid = <0x00041081>;
+ reg = <0x59000 0x1000>;
+ interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+ memcpy-burst-size = <256>;
+ memcpy-bus-width = <32>;
+ clocks = <&periph_clk>;
+ clock-names = "apb_pclk";
+ #dma-cells = <2>;
+ };
};
};
diff --git a/arch/arm/boot/dts/broadcom/bcm953012hr.dts b/arch/arm/boot/dts/broadcom/bcm953012hr.dts
index b070b69466bd..b728cd54715e 100644
--- a/arch/arm/boot/dts/broadcom/bcm953012hr.dts
+++ b/arch/arm/boot/dts/broadcom/bcm953012hr.dts
@@ -74,7 +74,6 @@
&spi_nor {
status = "okay";
spi-max-frequency = <62500000>;
- m25p,default-addr-width = <3>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/broadcom/bcm953012k.dts b/arch/arm/boot/dts/broadcom/bcm953012k.dts
index f1e6bcaa1edd..27c0992f1855 100644
--- a/arch/arm/boot/dts/broadcom/bcm953012k.dts
+++ b/arch/arm/boot/dts/broadcom/bcm953012k.dts
@@ -84,7 +84,6 @@
&spi_nor {
status = "okay";
spi-max-frequency = <62500000>;
- m25p,default-addr-width = <3>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/broadcom/bcm958522er.dts b/arch/arm/boot/dts/broadcom/bcm958522er.dts
index 15f023656df0..2f20f86bd31c 100644
--- a/arch/arm/boot/dts/broadcom/bcm958522er.dts
+++ b/arch/arm/boot/dts/broadcom/bcm958522er.dts
@@ -135,7 +135,6 @@
&qspi {
status = "okay";
- bspi-sel = <0>;
flash: flash@0 {
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/broadcom/bcm958525er.dts b/arch/arm/boot/dts/broadcom/bcm958525er.dts
index 9b9c225a1fb3..980c03f74a19 100644
--- a/arch/arm/boot/dts/broadcom/bcm958525er.dts
+++ b/arch/arm/boot/dts/broadcom/bcm958525er.dts
@@ -135,7 +135,6 @@
&qspi {
status = "okay";
- bspi-sel = <0>;
flash: flash@0 {
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/broadcom/bcm958525xmc.dts b/arch/arm/boot/dts/broadcom/bcm958525xmc.dts
index ca9311452739..440bb2d617f2 100644
--- a/arch/arm/boot/dts/broadcom/bcm958525xmc.dts
+++ b/arch/arm/boot/dts/broadcom/bcm958525xmc.dts
@@ -151,7 +151,6 @@
&qspi {
status = "okay";
- bspi-sel = <0>;
flash: flash@0 {
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/broadcom/bcm958622hr.dts b/arch/arm/boot/dts/broadcom/bcm958622hr.dts
index 9db3c851451a..116f3a7c3bc6 100644
--- a/arch/arm/boot/dts/broadcom/bcm958622hr.dts
+++ b/arch/arm/boot/dts/broadcom/bcm958622hr.dts
@@ -139,7 +139,6 @@
&qspi {
status = "okay";
- bspi-sel = <0>;
flash: flash@0 {
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/broadcom/bcm958623hr.dts b/arch/arm/boot/dts/broadcom/bcm958623hr.dts
index 32786e7c4e12..fc6ab73ecf56 100644
--- a/arch/arm/boot/dts/broadcom/bcm958623hr.dts
+++ b/arch/arm/boot/dts/broadcom/bcm958623hr.dts
@@ -143,7 +143,6 @@
&qspi {
status = "okay";
- bspi-sel = <0>;
flash: flash@0 {
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/broadcom/bcm958625hr.dts b/arch/arm/boot/dts/broadcom/bcm958625hr.dts
index 74263d98de73..a9b6aa04d573 100644
--- a/arch/arm/boot/dts/broadcom/bcm958625hr.dts
+++ b/arch/arm/boot/dts/broadcom/bcm958625hr.dts
@@ -150,7 +150,6 @@
&qspi {
status = "okay";
- bspi-sel = <0>;
flash: flash@0 {
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/broadcom/bcm958625k.dts b/arch/arm/boot/dts/broadcom/bcm958625k.dts
index 69ebc7a913a7..7996116fc923 100644
--- a/arch/arm/boot/dts/broadcom/bcm958625k.dts
+++ b/arch/arm/boot/dts/broadcom/bcm958625k.dts
@@ -154,7 +154,6 @@
&qspi {
status = "okay";
- bspi-sel = <0>;
flash: flash@0 {
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/broadcom/bcm988312hr.dts b/arch/arm/boot/dts/broadcom/bcm988312hr.dts
index e96bc3f2d5cf..663a3f27b6e4 100644
--- a/arch/arm/boot/dts/broadcom/bcm988312hr.dts
+++ b/arch/arm/boot/dts/broadcom/bcm988312hr.dts
@@ -139,7 +139,6 @@
&qspi {
status = "okay";
- bspi-sel = <0>;
flash: flash@0 {
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/intel/socfpga/socfpga_arria10.dtsi b/arch/arm/boot/dts/intel/socfpga/socfpga_arria10.dtsi
index 6b6e77596ffa..b108265e9bde 100644
--- a/arch/arm/boot/dts/intel/socfpga/socfpga_arria10.dtsi
+++ b/arch/arm/boot/dts/intel/socfpga/socfpga_arria10.dtsi
@@ -440,7 +440,7 @@
clocks = <&l4_mp_clk>, <&peri_emac_ptp_clk>;
clock-names = "stmmaceth", "ptp_ref";
resets = <&rst EMAC0_RESET>, <&rst EMAC0_OCP_RESET>;
- reset-names = "stmmaceth", "ahb";
+ reset-names = "stmmaceth", "stmmaceth-ocp";
snps,axi-config = <&socfpga_axi_setup>;
status = "disabled";
};
@@ -460,7 +460,7 @@
clocks = <&l4_mp_clk>, <&peri_emac_ptp_clk>;
clock-names = "stmmaceth", "ptp_ref";
resets = <&rst EMAC1_RESET>, <&rst EMAC1_OCP_RESET>;
- reset-names = "stmmaceth", "ahb";
+ reset-names = "stmmaceth", "stmmaceth-ocp";
snps,axi-config = <&socfpga_axi_setup>;
status = "disabled";
};
@@ -480,7 +480,7 @@
clocks = <&l4_mp_clk>, <&peri_emac_ptp_clk>;
clock-names = "stmmaceth", "ptp_ref";
resets = <&rst EMAC2_RESET>, <&rst EMAC2_OCP_RESET>;
- reset-names = "stmmaceth", "ahb";
+ reset-names = "stmmaceth", "stmmaceth-ocp";
snps,axi-config = <&socfpga_axi_setup>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_mcvevk.dts b/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_mcvevk.dts
index ceaec29770c6..c1e1264bcb09 100644
--- a/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_mcvevk.dts
+++ b/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_mcvevk.dts
@@ -50,8 +50,6 @@
stmpe1: stmpe811@41 {
compatible = "st,stmpe811";
- #address-cells = <1>;
- #size-cells = <0>;
reg = <0x41>;
id = <0>;
blocks = <0x5>;
diff --git a/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_socdk.dts b/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_socdk.dts
index d37a982e8571..97622febc44e 100644
--- a/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_socdk.dts
+++ b/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_socdk.dts
@@ -151,12 +151,6 @@
&spi0 {
status = "okay";
-
- spidev@0 {
- compatible = "rohm,dh2228fv";
- reg = <0>;
- spi-max-frequency = <1000000>;
- };
};
&usb1 {
diff --git a/arch/arm/boot/dts/marvell/mmp2-olpc-xo-1-75.dts b/arch/arm/boot/dts/marvell/mmp2-olpc-xo-1-75.dts
index 55ea87870af3..86c425b72fa7 100644
--- a/arch/arm/boot/dts/marvell/mmp2-olpc-xo-1-75.dts
+++ b/arch/arm/boot/dts/marvell/mmp2-olpc-xo-1-75.dts
@@ -113,8 +113,8 @@
"Headphones", "HPOR",
"MIC2", "Mic Jack";
widgets = "Headphone", "Headphones", "Microphone", "Mic Jack";
- hp-det-gpio = <&gpio 97 GPIO_ACTIVE_HIGH>;
- mic-det-gpio = <&gpio 96 GPIO_ACTIVE_HIGH>;
+ hp-det-gpios = <&gpio 97 GPIO_ACTIVE_HIGH>;
+ mic-det-gpios = <&gpio 96 GPIO_ACTIVE_HIGH>;
};
soc {
diff --git a/arch/arm/boot/dts/mediatek/mt7623.dtsi b/arch/arm/boot/dts/mediatek/mt7623.dtsi
index 814586abc297..fd7a89cc337d 100644
--- a/arch/arm/boot/dts/mediatek/mt7623.dtsi
+++ b/arch/arm/boot/dts/mediatek/mt7623.dtsi
@@ -308,7 +308,7 @@
clock-names = "spi", "wrap";
};
- cir: cir@10013000 {
+ cir: ir-receiver@10013000 {
compatible = "mediatek,mt7623-cir";
reg = <0 0x10013000 0 0x1000>;
interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_LOW>;
diff --git a/arch/arm/boot/dts/microchip/Makefile b/arch/arm/boot/dts/microchip/Makefile
index 470fe46433a9..79cd38fdc7da 100644
--- a/arch/arm/boot/dts/microchip/Makefile
+++ b/arch/arm/boot/dts/microchip/Makefile
@@ -12,6 +12,7 @@ DTC_FLAGS_at91-sama5d2_xplained := -@
DTC_FLAGS_at91-sama5d3_eds := -@
DTC_FLAGS_at91-sama5d3_xplained := -@
DTC_FLAGS_at91-sama5d4_xplained := -@
+DTC_FLAGS_at91-sama7d65_curiosity := -@
DTC_FLAGS_at91-sama7g54_curiosity := -@
DTC_FLAGS_at91-sama7g5ek := -@
dtb-$(CONFIG_SOC_AT91RM9200) += \
@@ -90,6 +91,8 @@ dtb-$(CONFIG_SOC_SAM_V7) += \
at91-sama5d4_xplained.dtb \
at91-sama5d4ek.dtb \
at91-vinco.dtb
+dtb-$(CONFIG_SOC_SAMA7D65) += \
+ at91-sama7d65_curiosity.dtb
dtb-$(CONFIG_SOC_SAMA7G5) += \
at91-sama7g54_curiosity.dtb \
at91-sama7g5ek.dtb
diff --git a/arch/arm/boot/dts/microchip/at91-sam9x75_curiosity.dts b/arch/arm/boot/dts/microchip/at91-sam9x75_curiosity.dts
index 87b6ea97590b..1a6a909a5043 100644
--- a/arch/arm/boot/dts/microchip/at91-sam9x75_curiosity.dts
+++ b/arch/arm/boot/dts/microchip/at91-sam9x75_curiosity.dts
@@ -88,8 +88,6 @@
};
&i2c6 {
- #address-cells = <1>;
- #size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_flx6_default>;
i2c-analog-filter;
@@ -200,6 +198,52 @@
};
};
+&flx7 {
+ atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_TWI>;
+ status = "okay";
+};
+
+&i2c7 {
+ dmas = <0>, <0>;
+ i2c-analog-filter;
+ i2c-digital-filter;
+ i2c-digital-filter-width-ns = <35>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flx7_default>;
+ status = "okay";
+
+ power-monitor@10 {
+ compatible = "microchip,pac1934";
+ reg = <0x10>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@1 {
+ reg = <0x1>;
+ shunt-resistor-micro-ohms = <10000>;
+ label = "VDD3V3";
+ };
+
+ channel@2 {
+ reg = <0x2>;
+ shunt-resistor-micro-ohms = <10000>;
+ label = "DCDC4";
+ };
+
+ channel@3 {
+ reg = <0x3>;
+ shunt-resistor-micro-ohms = <10000>;
+ label = "VDDCORE";
+ };
+
+ channel@4 {
+ reg = <0x4>;
+ shunt-resistor-micro-ohms = <10000>;
+ label = "VDDIODDR";
+ };
+ };
+};
+
&i2s {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2s_default>;
@@ -233,6 +277,12 @@
<AT91_PIOA 24 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
<AT91_PIOA 25 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
};
+
+ pinctrl_flx7_default: flx7-default {
+ atmel,pins =
+ <AT91_PIOC 0 AT91_PERIPH_C AT91_PINCTRL_PULL_UP>,
+ <AT91_PIOC 1 AT91_PERIPH_C AT91_PINCTRL_PULL_UP>;
+ };
};
gpio-keys {
diff --git a/arch/arm/boot/dts/microchip/at91-sama5d27_wlsom1_ek.dts b/arch/arm/boot/dts/microchip/at91-sama5d27_wlsom1_ek.dts
index 15239834d886..35a933eec573 100644
--- a/arch/arm/boot/dts/microchip/at91-sama5d27_wlsom1_ek.dts
+++ b/arch/arm/boot/dts/microchip/at91-sama5d27_wlsom1_ek.dts
@@ -197,6 +197,7 @@
&sdmmc0 {
bus-width = <4>;
+ no-1-8-v;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sdmmc0_default>;
status = "okay";
diff --git a/arch/arm/boot/dts/microchip/at91-sama5d29_curiosity.dts b/arch/arm/boot/dts/microchip/at91-sama5d29_curiosity.dts
index b6684bf67d3e..7be215781549 100644
--- a/arch/arm/boot/dts/microchip/at91-sama5d29_curiosity.dts
+++ b/arch/arm/boot/dts/microchip/at91-sama5d29_curiosity.dts
@@ -514,6 +514,7 @@
&sdmmc0 {
bus-width = <4>;
+ no-1-8-v;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sdmmc0_default>;
disable-wp;
diff --git a/arch/arm/boot/dts/microchip/at91-sama7d65_curiosity.dts b/arch/arm/boot/dts/microchip/at91-sama7d65_curiosity.dts
new file mode 100644
index 000000000000..0f86360fb733
--- /dev/null
+++ b/arch/arm/boot/dts/microchip/at91-sama7d65_curiosity.dts
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * at91-sama7d65_curiosity.dts - Device Tree file for SAMA7D65 Curiosity board
+ *
+ * Copyright (c) 2024 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Romain Sioen <romain.sioen@microchip.com>
+ *
+ */
+/dts-v1/;
+#include "sama7d65-pinfunc.h"
+#include "sama7d65.dtsi"
+#include <dt-bindings/mfd/atmel-flexcom.h>
+#include <dt-bindings/pinctrl/at91.h>
+
+/ {
+ model = "Microchip SAMA7D65 Curiosity";
+ compatible = "microchip,sama7d65-curiosity", "microchip,sama7d65",
+ "microchip,sama7d6", "microchip,sama7";
+
+ aliases {
+ serial0 = &uart6;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@60000000 {
+ device_type = "memory";
+ reg = <0x60000000 0x40000000>;
+ };
+};
+
+&flx6 {
+ atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_USART>;
+ status = "okay";
+};
+
+&uart6 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart6_default>;
+ status = "okay";
+};
+
+&main_xtal {
+ clock-frequency = <24000000>;
+};
+
+&pioa {
+ pinctrl_sdmmc1_default: sdmmc1-default {
+ cmd-data {
+ pinmux = <PIN_PB22__SDMMC1_CMD>,
+ <PIN_PB24__SDMMC1_DAT0>,
+ <PIN_PB25__SDMMC1_DAT1>,
+ <PIN_PB26__SDMMC1_DAT2>,
+ <PIN_PB27__SDMMC1_DAT3>;
+ slew-rate = <0>;
+ bias-disable;
+ };
+
+ ck-cd-rstn-vddsel {
+ pinmux = <PIN_PB23__SDMMC1_CK>,
+ <PIN_PB21__SDMMC1_RSTN>,
+ <PIN_PB30__SDMMC1_1V8SEL>,
+ <PIN_PB29__SDMMC1_CD>,
+ <PIN_PB28__SDMMC1_WP>;
+ slew-rate = <0>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_uart6_default: uart6-default {
+ pinmux = <PIN_PD18__FLEXCOM6_IO0>,
+ <PIN_PD19__FLEXCOM6_IO1>;
+ bias-disable;
+ };
+};
+
+&sdmmc1 {
+ bus-width = <4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdmmc1_default>;
+ status = "okay";
+};
+
+&slow_xtal {
+ clock-frequency = <32768>;
+};
diff --git a/arch/arm/boot/dts/microchip/sam9x60.dtsi b/arch/arm/boot/dts/microchip/sam9x60.dtsi
index 36944e18a329..b8b2c1ddf3f1 100644
--- a/arch/arm/boot/dts/microchip/sam9x60.dtsi
+++ b/arch/arm/boot/dts/microchip/sam9x60.dtsi
@@ -197,6 +197,8 @@
compatible = "microchip,sam9x60-spi", "atmel,at91rm9200-spi";
reg = <0x400 0x200>;
interrupts = <13 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 13>;
clock-names = "spi_clk";
dmas = <&dma0
@@ -268,6 +270,8 @@
compatible = "microchip,sam9x60-spi", "atmel,at91rm9200-spi";
reg = <0x400 0x200>;
interrupts = <14 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 14>;
clock-names = "spi_clk";
dmas = <&dma0
@@ -768,6 +772,8 @@
compatible = "microchip,sam9x60-spi", "atmel,at91rm9200-spi";
reg = <0x400 0x200>;
interrupts = <5 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 5>;
clock-names = "spi_clk";
dmas = <&dma0
@@ -839,6 +845,8 @@
compatible = "microchip,sam9x60-spi", "atmel,at91rm9200-spi";
reg = <0x400 0x200>;
interrupts = <6 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 6>;
clock-names = "spi_clk";
dmas = <&dma0
@@ -910,6 +918,8 @@
compatible = "microchip,sam9x60-spi", "atmel,at91rm9200-spi";
reg = <0x400 0x200>;
interrupts = <7 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 7>;
clock-names = "spi_clk";
dmas = <&dma0
@@ -981,6 +991,8 @@
compatible = "microchip,sam9x60-spi", "atmel,at91rm9200-spi";
reg = <0x400 0x200>;
interrupts = <8 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 8>;
clock-names = "spi_clk";
dmas = <&dma0
diff --git a/arch/arm/boot/dts/microchip/sam9x7.dtsi b/arch/arm/boot/dts/microchip/sam9x7.dtsi
index beb1f34b38d3..b217a908f525 100644
--- a/arch/arm/boot/dts/microchip/sam9x7.dtsi
+++ b/arch/arm/boot/dts/microchip/sam9x7.dtsi
@@ -132,6 +132,8 @@
compatible = "microchip,sam9x7-spi", "atmel,at91rm9200-spi";
reg = <0x400 0x200>;
interrupts = <13 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 13>;
clock-names = "spi_clk";
dmas = <&dma0
@@ -151,6 +153,8 @@
compatible = "microchip,sam9x7-i2c", "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <13 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 13>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
@@ -201,6 +205,8 @@
compatible = "microchip,sam9x7-spi", "atmel,at91rm9200-spi";
reg = <0x400 0x200>;
interrupts = <14 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 14>;
clock-names = "spi_clk";
dmas = <&dma0
@@ -220,6 +226,8 @@
compatible = "microchip,sam9x7-i2c", "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <14 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 14>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
@@ -312,6 +320,8 @@
compatible = "microchip,sam9x7-i2c", "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <32 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 32>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
@@ -362,6 +372,8 @@
compatible = "microchip,sam9x7-i2c", "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <33 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 33>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
@@ -533,6 +545,8 @@
compatible = "microchip,sam9x7-i2c", "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <9 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 9>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
@@ -583,6 +597,8 @@
compatible = "microchip,sam9x7-i2c", "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <10 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 10>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
@@ -633,6 +649,8 @@
compatible = "microchip,sam9x7-i2c", "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <11 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 11>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
@@ -683,6 +701,8 @@
compatible = "microchip,sam9x7-spi", "atmel,at91rm9200-spi";
reg = <0x400 0x200>;
interrupts = <5 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 5>;
clock-names = "spi_clk";
dmas = <&dma0
@@ -702,6 +722,8 @@
compatible = "microchip,sam9x7-i2c", "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <5 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 5>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
@@ -752,6 +774,8 @@
compatible = "microchip,sam9x7-spi", "atmel,at91rm9200-spi";
reg = <0x400 0x200>;
interrupts = <6 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 6>;
clock-names = "spi_clk";
dmas = <&dma0
@@ -771,6 +795,8 @@
compatible = "microchip,sam9x7-i2c", "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <6 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 6>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
@@ -821,6 +847,8 @@
compatible = "microchip,sam9x7-spi", "atmel,at91rm9200-spi";
reg = <0x400 0x200>;
interrupts = <7 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 7>;
clock-names = "spi_clk";
dmas = <&dma0
@@ -840,6 +868,8 @@
compatible = "microchip,sam9x7-i2c", "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <7 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 7>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
@@ -890,6 +920,8 @@
compatible = "microchip,sam9x7-spi", "atmel,at91rm9200-spi";
reg = <0x400 0x200>;
interrupts = <8 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 8>;
clock-names = "spi_clk";
dmas = <&dma0
@@ -909,6 +941,8 @@
compatible = "microchip,sam9x7-i2c", "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <8 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 8>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
@@ -984,6 +1018,8 @@
compatible = "microchip,sam9x7-i2c", "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <15 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 15>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
@@ -1034,6 +1070,8 @@
compatible = "microchip,sam9x7-i2c", "microchip,sam9x60-i2c";
reg = <0x600 0x200>;
interrupts = <16 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 16>;
dmas = <&dma0
(AT91_XDMAC_DT_MEM_IF(0) |
diff --git a/arch/arm/boot/dts/microchip/sama7d65-pinfunc.h b/arch/arm/boot/dts/microchip/sama7d65-pinfunc.h
new file mode 100644
index 000000000000..c591f333cacb
--- /dev/null
+++ b/arch/arm/boot/dts/microchip/sama7d65-pinfunc.h
@@ -0,0 +1,947 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+#define PINMUX_PIN(no, func, ioset) \
+(((no) & 0xffff) | (((func) & 0xf) << 16) | (((ioset) & 0xff) << 20))
+
+#define PIN_PA0 0
+#define PIN_PA0__GPIO PINMUX_PIN(PIN_PA0, 0, 0)
+#define PIN_PA0__SDMMC0_CK PINMUX_PIN(PIN_PA0, 1, 1)
+#define PIN_PA0__FLEXCOM3_IO0 PINMUX_PIN(PIN_PA0, 2, 1)
+#define PIN_PA0__NWER0 PINMUX_PIN(PIN_PA0, 3, 1)
+
+#define PIN_PA1 1
+#define PIN_PA1__GPIO PINMUX_PIN(PIN_PA1, 0, 0)
+#define PIN_PA1__SDMMC0_CMD PINMUX_PIN(PIN_PA1, 1, 1)
+#define PIN_PA1__FLEXCOM3_IO1 PINMUX_PIN(PIN_PA1, 2, 1)
+#define PIN_PA1__A21 PINMUX_PIN(PIN_PA1, 3, 1)
+
+#define PIN_PA2 2
+#define PIN_PA2__GPIO PINMUX_PIN(PIN_PA2, 0, 0)
+#define PIN_PA2__SDMMC0_RSTN PINMUX_PIN(PIN_PA2, 1, 1)
+#define PIN_PA2__FLEXCOM3_IO2 PINMUX_PIN(PIN_PA2, 2, 1)
+#define PIN_PA2__A22 PINMUX_PIN(PIN_PA2, 3, 1)
+
+#define PIN_PA3 3
+#define PIN_PA3__GPIO PINMUX_PIN(PIN_PA3, 0, 0)
+#define PIN_PA3__SDMMC0_DAT0 PINMUX_PIN(PIN_PA3, 1, 1)
+#define PIN_PA3__FLEXCOM3_IO3 PINMUX_PIN(PIN_PA3, 2, 1)
+#define PIN_PA3__D0 PINMUX_PIN(PIN_PA3, 3, 1)
+
+#define PIN_PA4 4
+#define PIN_PA4__GPIO PINMUX_PIN(PIN_PA4, 0, 0)
+#define PIN_PA4__SDMMC0_DAT1 PINMUX_PIN(PIN_PA4, 1, 1)
+#define PIN_PA4__FLEXCOM3_IO4 PINMUX_PIN(PIN_PA4, 2, 1)
+#define PIN_PA4__D1 PINMUX_PIN(PIN_PA4, 3, 1)
+
+#define PIN_PA5 5
+#define PIN_PA5__GPIO PINMUX_PIN(PIN_PA5, 0, 0)
+#define PIN_PA5__SDMMC0_DAT4 PINMUX_PIN(PIN_PA5, 1, 1)
+#define PIN_PA5__FLEXCOM2_IO0 PINMUX_PIN(PIN_PA5, 2, 3)
+#define PIN_PA5__D4 PINMUX_PIN(PIN_PA5, 3, 1)
+#define PIN_PA5__TCLK4 PINMUX_PIN(PIN_PA5, 6, 3)
+
+#define PIN_PA6 6
+#define PIN_PA6__GPIO PINMUX_PIN(PIN_PA6, 0, 0)
+#define PIN_PA6__SDMMC0_DAT5 PINMUX_PIN(PIN_PA6, 1, 1)
+#define PIN_PA6__FLEXCOM2_IO1 PINMUX_PIN(PIN_PA6, 2, 3)
+#define PIN_PA6__D5 PINMUX_PIN(PIN_PA6, 3, 1)
+#define PIN_PA6__TIOB4 PINMUX_PIN(PIN_PA6, 6, 3)
+
+#define PIN_PA7 7
+#define PIN_PA7__GPIO PINMUX_PIN(PIN_PA7, 0, 0)
+#define PIN_PA7__SDMMC0_DAT6 PINMUX_PIN(PIN_PA7, 1, 1)
+#define PIN_PA7__FLEXCOM2_IO2 PINMUX_PIN(PIN_PA7, 2, 3)
+#define PIN_PA7__D6 PINMUX_PIN(PIN_PA7, 3, 1)
+#define PIN_PA7__TIOA4 PINMUX_PIN(PIN_PA7, 6, 3)
+
+#define PIN_PA8 8
+#define PIN_PA8__GPIO PINMUX_PIN(PIN_PA8, 0, 0)
+#define PIN_PA8__SDMMC0_DAT7 PINMUX_PIN(PIN_PA8, 1, 1)
+#define PIN_PA8__FLEXCOM2_IO3 PINMUX_PIN(PIN_PA8, 2, 3)
+#define PIN_PA8__D7 PINMUX_PIN(PIN_PA8, 3, 1)
+#define PIN_PA8__TIOA5 PINMUX_PIN(PIN_PA8, 6, 3)
+
+#define PIN_PA9 9
+#define PIN_PA9__GPIO PINMUX_PIN(PIN_PA9, 0, 0)
+#define PIN_PA9__SDMMC0_DAT2 PINMUX_PIN(PIN_PA9, 1, 1)
+#define PIN_PA9__FLEXCOM0_IO2 PINMUX_PIN(PIN_PA9, 2, 1)
+#define PIN_PA9__D2 PINMUX_PIN(PIN_PA9, 3, 1)
+#define PIN_PA9__TIOB5 PINMUX_PIN(PIN_PA9, 6, 3)
+
+#define PIN_PA10 10
+#define PIN_PA10__GPIO PINMUX_PIN(PIN_PA10, 0, 0)
+#define PIN_PA10__SDMMC0_DAT3 PINMUX_PIN(PIN_PA10, 1, 1)
+#define PIN_PA10__FLEXCOM0_IO3 PINMUX_PIN(PIN_PA10, 2, 1)
+#define PIN_PA10__D3 PINMUX_PIN(PIN_PA10, 3, 1)
+#define PIN_PA10__TCLK5 PINMUX_PIN(PIN_PA10, 6, 3)
+
+#define PIN_PA11 11
+#define PIN_PA11__GPIO PINMUX_PIN(PIN_PA11, 0, 0)
+#define PIN_PA11__SDMMC0_DS PINMUX_PIN(PIN_PA11, 1, 1)
+#define PIN_PA11__FLEXCOM0_IO4 PINMUX_PIN(PIN_PA11, 2, 1)
+#define PIN_PA11__NANDRDY PINMUX_PIN(PIN_PA11, 3, 1)
+#define PIN_PA11__TIOB3 PINMUX_PIN(PIN_PA11, 6, 3)
+
+#define PIN_PA12 12
+#define PIN_PA12__GPIO PINMUX_PIN(PIN_PA12, 0, 0)
+#define PIN_PA12__FLEXCOM0_IO0 PINMUX_PIN(PIN_PA12, 2, 1)
+#define PIN_PA12__NRD PINMUX_PIN(PIN_PA12, 3, 1)
+#define PIN_PA12__PCK0 PINMUX_PIN(PIN_PA12, 4, 1)
+#define PIN_PA12__EXT_IRQ0 PINMUX_PIN(PIN_PA12, 5, 1)
+#define PIN_PA12__TIOA3 PINMUX_PIN(PIN_PA12, 6, 3)
+
+#define PIN_PA13 13
+#define PIN_PA13__GPIO PINMUX_PIN(PIN_PA13, 0, 0)
+#define PIN_PA13__FLEXCOM0_IO1 PINMUX_PIN(PIN_PA13, 2, 1)
+#define PIN_PA13__NCS0 PINMUX_PIN(PIN_PA13, 3, 1)
+#define PIN_PA13__PCK1 PINMUX_PIN(PIN_PA13, 4, 1)
+#define PIN_PA13__TCLK3 PINMUX_PIN(PIN_PA13, 6, 3)
+
+#define PIN_PA14 14
+#define PIN_PA14__GPIO PINMUX_PIN(PIN_PA14, 0, 0)
+#define PIN_PA14__FLEXCOM4_IO4 PINMUX_PIN(PIN_PA14, 1, 1)
+#define PIN_PA14__SDMMC0_WP PINMUX_PIN(PIN_PA14, 2, 1)
+#define PIN_PA14__FLEXCOM3_IO0 PINMUX_PIN(PIN_PA14, 3, 4)
+
+#define PIN_PA15 15
+#define PIN_PA15__GPIO PINMUX_PIN(PIN_PA15, 0, 0)
+#define PIN_PA15__FLEXCOM4_IO3 PINMUX_PIN(PIN_PA15, 1, 1)
+#define PIN_PA15__SDMMC0_1V8SEL PINMUX_PIN(PIN_PA15, 2, 1)
+#define PIN_PA15__FLEXCOM3_IO1 PINMUX_PIN(PIN_PA15, 3, 4)
+
+#define PIN_PA16 16
+#define PIN_PA16__GPIO PINMUX_PIN(PIN_PA16, 0, 0)
+#define PIN_PA16__FLEXCOM4_IO2 PINMUX_PIN(PIN_PA16, 1, 1)
+#define PIN_PA16__SDMMCo_CD PINMUX_PIN(PIN_PA16, 2, 1)
+#define PIN_PA16__PCK2 PINMUX_PIN(PIN_PA16, 4, 1)
+#define PIN_PA16__EXT_IRQ1 PINMUX_PIN(PIN_PA16, 5, 1)
+
+#define PIN_PA17 17
+#define PIN_PA17__GPIO PINMUX_PIN(PIN_PA17, 0, 0)
+#define PIN_PA17__FLEXCOM4_IO1 PINMUX_PIN(PIN_PA17, 1, 1)
+
+#define PIN_PA18 18
+#define PIN_PA18__GPIO PINMUX_PIN(PIN_PA18, 0, 0)
+#define PIN_PA18__FLEXCOM4_IO0 PINMUX_PIN(PIN_PA18, 1, 1)
+
+#define PIN_PA19 19
+#define PIN_PA19__GPIO PINMUX_PIN(PIN_PA19, 0, 0)
+#define PIN_PA19__TK0 PINMUX_PIN(PIN_PA19, 1, 1)
+#define PIN_PA19__FLEXCOM4_IO5 PINMUX_PIN(PIN_PA19, 3, 1)
+#define PIN_PA19__PWML0 PINMUX_PIN(PIN_PA19, 4, 3)
+
+#define PIN_PA20 20
+#define PIN_PA20__GPIO PINMUX_PIN(PIN_PA20, 0, 0)
+#define PIN_PA20__TD0 PINMUX_PIN(PIN_PA20, 1, 1)
+#define PIN_PA20__FLEXCOM3_IO4 PINMUX_PIN(PIN_PA20, 2, 2)
+#define PIN_PA20__FLEXCOM4_IO6 PINMUX_PIN(PIN_PA20, 3, 1)
+#define PIN_PA20__PWMH0 PINMUX_PIN(PIN_PA20, 4, 3)
+
+#define PIN_PA21 21
+#define PIN_PA21__GPIO PINMUX_PIN(PIN_PA21, 0, 0)
+#define PIN_PA21__TF0 PINMUX_PIN(PIN_PA21, 1, 1)
+#define PIN_PA21__FLEXCOM3_IO3 PINMUX_PIN(PIN_PA21, 2, 2)
+#define PIN_PA21__PWML1 PINMUX_PIN(PIN_PA21, 4, 3)
+
+#define PIN_PA22 22
+#define PIN_PA22__GPIO PINMUX_PIN(PIN_PA22, 0, 0)
+#define PIN_PA22__RD0 PINMUX_PIN(PIN_PA22, 1, 1)
+#define PIN_PA22__FLEXCOM3_IO2 PINMUX_PIN(PIN_PA22, 2, 2)
+#define PIN_PA22__PDMC0_DS1 PINMUX_PIN(PIN_PA22, 3, 1)
+#define PIN_PA22__PWMH1 PINMUX_PIN(PIN_PA22, 4, 3)
+
+#define PIN_PA23 23
+#define PIN_PA23__GPIO PINMUX_PIN(PIN_PA23, 0, 0)
+#define PIN_PA23__RK0 PINMUX_PIN(PIN_PA23, 1, 1)
+#define PIN_PA23__FLEXCOM3_IO1 PINMUX_PIN(PIN_PA23, 2, 2)
+#define PIN_PA23__PDMC0_CLK PINMUX_PIN(PIN_PA23, 3, 1)
+#define PIN_PA23__PWML2 PINMUX_PIN(PIN_PA23, 4, 3)
+
+#define PIN_PA24 24
+#define PIN_PA24__GPIO PINMUX_PIN(PIN_PA24, 0, 0)
+#define PIN_PA24__RF0 PINMUX_PIN(PIN_PA24, 1, 1)
+#define PIN_PA24__FLEXCOM3_IO0 PINMUX_PIN(PIN_PA24, 2, 2)
+#define PIN_PA24__PDMC0_DS0 PINMUX_PIN(PIN_PA24, 3, 1)
+#define PIN_PA24__PWMH2 PINMUX_PIN(PIN_PA24, 4, 3)
+
+#define PIN_PA25 25
+#define PIN_PA25__GPIO PINMUX_PIN(PIN_PA25, 0, 0)
+#define PIN_PA25__G0_TXCTL PINMUX_PIN(PIN_PA25, 1, 1)
+#define PIN_PA25__FLEXCOM6_IO2 PINMUX_PIN(PIN_PA25, 2, 1)
+
+#define PIN_PA26 26
+#define PIN_PA26__GPIO PINMUX_PIN(PIN_PA26, 0, 0)
+#define PIN_PA26__G0_TX0 PINMUX_PIN(PIN_PA26, 1, 1)
+#define PIN_PA26__FLEXCOM6_IO3 PINMUX_PIN(PIN_PA26, 2, 1)
+
+#define PIN_PA27 27
+#define PIN_PA27__GPIO PINMUX_PIN(PIN_PA27, 0, 0)
+#define PIN_PA27__G0_TX1 PINMUX_PIN(PIN_PA27, 1, 1)
+#define PIN_PA27__FLEXCOM6_IO4 PINMUX_PIN(PIN_PA27, 2, 1)
+
+#define PIN_PA28 28
+#define PIN_PA28__GPIO PINMUX_PIN(PIN_PA28, 0, 0)
+#define PIN_PA28__G0_RXCTL PINMUX_PIN(PIN_PA28, 1, 1)
+#define PIN_PA28__FLEXCOM6_IO0 PINMUX_PIN(PIN_PA28, 2, 1)
+
+#define PIN_PA29 29
+#define PIN_PA29__GPIO PINMUX_PIN(PIN_PA29, 0, 0)
+#define PIN_PA29__G0_RX0 PINMUX_PIN(PIN_PA29, 1, 1)
+#define PIN_PA29__FLEXCOM6_IO1 PINMUX_PIN(PIN_PA29, 2, 1)
+
+#define PIN_PA30 30
+#define PIN_PA30__GPIO PINMUX_PIN(PIN_PA30, 0, 0)
+#define PIN_PA30__G0_RX1 PINMUX_PIN(PIN_PA30, 1, 1)
+#define PIN_PA30__FLEXCOM8_IO0 PINMUX_PIN(PIN_PA30, 2, 1)
+
+#define PIN_PA31 31
+#define PIN_PA31__GPIO PINMUX_PIN(PIN_PA31, 0, 0)
+#define PIN_PA31__G0_MDC PINMUX_PIN(PIN_PA31, 1, 1)
+#define PIN_PA31__FLEXCOM8_IO1 PINMUX_PIN(PIN_PA31, 2, 1)
+
+#define PIN_PB0 32
+#define PIN_PB0__GPIO PINMUX_PIN(PIN_PB0, 0, 0)
+#define PIN_PB0__G0_MDIO PINMUX_PIN(PIN_PB0, 1, 1)
+#define PIN_PB0__FLEXCOM8_IO3 PINMUX_PIN(PIN_PB0, 2, 2)
+
+#define PIN_PB1 33
+#define PIN_PB1__GPIO PINMUX_PIN(PIN_PB1, 0, 0)
+#define PIN_PB1__G0_REFCK PINMUX_PIN(PIN_PB1, 1, 2)
+#define PIN_PB1__FLEXCOM8_IO2 PINMUX_PIN(PIN_PB1, 2, 1)
+
+#define PIN_PB2 34
+#define PIN_PB2__GPIO PINMUX_PIN(PIN_PB2, 0, 0)
+#define PIN_PB2__G0_RX2 PINMUX_PIN(PIN_PB2, 1, 1)
+#define PIN_PB2__FLEXCOM8_IO4 PINMUX_PIN(PIN_PB2, 2, 1)
+#define PIN_PB2__G0_RXER PINMUX_PIN(PIN_PB2, 3, 2)
+#define PIN_PB2__RK0 PINMUX_PIN(PIN_PB2, 4, 2)
+
+#define PIN_PB3 35
+#define PIN_PB3__GPIO PINMUX_PIN(PIN_PB3, 0, 0)
+#define PIN_PB3__G0_RXCK PINMUX_PIN(PIN_PB3, 1, 1)
+#define PIN_PB3__FLEXCOM10_IO2 PINMUX_PIN(PIN_PB3, 2, 2)
+#define PIN_PB3__TK0 PINMUX_PIN(PIN_PB3, 4, 2)
+
+#define PIN_PB4 36
+#define PIN_PB4__GPIO PINMUX_PIN(PIN_PB4, 0, 0)
+#define PIN_PB4__G0_TX2 PINMUX_PIN(PIN_PB4, 1, 1)
+#define PIN_PB4__FLEXCOM10_IO3 PINMUX_PIN(PIN_PB4, 2, 2)
+#define PIN_PB4__TF0 PINMUX_PIN(PIN_PB4, 4, 2)
+
+#define PIN_PB5 37
+#define PIN_PB5__GPIO PINMUX_PIN(PIN_PB5, 0, 0)
+#define PIN_PB5__G0_TX3 PINMUX_PIN(PIN_PB5, 1, 1)
+#define PIN_PB5__FLEXCOM10_IO4 PINMUX_PIN(PIN_PB5, 2, 1)
+#define PIN_PB5__TD0 PINMUX_PIN(PIN_PB5, 4, 2)
+
+#define PIN_PB6 38
+#define PIN_PB6__GPIO PINMUX_PIN(PIN_PB6, 0, 0)
+#define PIN_PB6__G0_RX3 PINMUX_PIN(PIN_PB6, 1, 1)
+#define PIN_PB6__FLEXCOM10_IO0 PINMUX_PIN(PIN_PB6, 2, 2)
+#define PIN_PB6__RD0 PINMUX_PIN(PIN_PB6, 4, 2)
+
+#define PIN_PB7 39
+#define PIN_PB7__GPIO PINMUX_PIN(PIN_PB7, 0, 0)
+#define PIN_PB7__G0_TSUCOMP PINMUX_PIN(PIN_PB7, 1, 1)
+#define PIN_PB7__FLEXCOM10_IO1 PINMUX_PIN(PIN_PB7, 2, 2)
+#define PIN_PB7__ADTRG PINMUX_PIN(PIN_PB7, 3, 1)
+#define PIN_PB7__RF0 PINMUX_PIN(PIN_PB7, 4, 2)
+
+#define PIN_PB8 40
+#define PIN_PB8__GPIO PINMUX_PIN(PIN_PB8, 0, 0)
+#define PIN_PB8__QSPI0_IO3 PINMUX_PIN(PIN_PB8, 1, 1)
+#define PIN_PB8__PCK3 PINMUX_PIN(PIN_PB8, 2, 1)
+#define PIN_PB8__FLEXCOM2_IO1 PINMUX_PIN(PIN_PB8, 4, 2)
+
+#define PIN_PB9 41
+#define PIN_PB9__GPIO PINMUX_PIN(PIN_PB9, 0, 0)
+#define PIN_PB9__QSPI0_IO2 PINMUX_PIN(PIN_PB9, 1, 1)
+#define PIN_PB9__FLEXCOM2_IO0 PINMUX_PIN(PIN_PB9, 4, 2)
+#define PIN_PB9__PWMEXTRG0 PINMUX_PIN(PIN_PB9, 5, 1)
+
+#define PIN_PB10 42
+#define PIN_PB10__GPIO PINMUX_PIN(PIN_PB10, 0, 0)
+#define PIN_PB10__QSPI0_IO1 PINMUX_PIN(PIN_PB10, 1, 1)
+#define PIN_PB10__FLEXCOM2_IO4 PINMUX_PIN(PIN_PB10, 4, 2)
+#define PIN_PB10__PWMEXTRG1 PINMUX_PIN(PIN_PB10, 5, 1)
+
+#define PIN_PB11 43
+#define PIN_PB11__GPIO PINMUX_PIN(PIN_PB11, 0, 0)
+#define PIN_PB11__QSPI0_IO0 PINMUX_PIN(PIN_PB11, 1, 1)
+#define PIN_PB11__FLEXCOM2_IO5 PINMUX_PIN(PIN_PB11, 4, 2)
+#define PIN_PB11__PWML3 PINMUX_PIN(PIN_PB11, 5, 1)
+#define PIN_PB11__TIOB3 PINMUX_PIN(PIN_PB11, 6, 2)
+
+#define PIN_PB12 44
+#define PIN_PB12__GPIO PINMUX_PIN(PIN_PB12, 0, 0)
+#define PIN_PB12__QSPI0_CS PINMUX_PIN(PIN_PB12, 1, 1)
+#define PIN_PB12__FLEXCOM2_IO3 PINMUX_PIN(PIN_PB12, 4, 2)
+#define PIN_PB12__PWMFI1 PINMUX_PIN(PIN_PB12, 6, 1)
+#define PIN_PB12__TIOA3 PINMUX_PIN(PIN_PB12, 6, 2)
+
+#define PIN_PB13 45
+#define PIN_PB13__GPIO PINMUX_PIN(PIN_PB13, 0, 0)
+#define PIN_PB13__QSPI0_SCK PINMUX_PIN(PIN_PB13, 1, 1)
+#define PIN_PB13__FLEXCOM2_IO2 PINMUX_PIN(PIN_PB13, 4, 2)
+#define PIN_PB13__PWMFI0 PINMUX_PIN(PIN_PB13, 5, 1)
+#define PIN_PB13__TCLK3 PINMUX_PIN(PIN_PB13, 6, 2)
+
+#define PIN_PB14 46
+#define PIN_PB14__GPIO PINMUX_PIN(PIN_PB14, 0, 0)
+#define PIN_PB14__QSPI0_SCKN PINMUX_PIN(PIN_PB14, 1, 1)
+#define PIN_PB14__QSPI1_SCK PINMUX_PIN(PIN_PB14, 2, 1)
+#define PIN_PB14__I2SMCC0_CK PINMUX_PIN(PIN_PB14, 3, 3)
+#define PIN_PB14__FLEXCOM10_IO5 PINMUX_PIN(PIN_PB14, 4, 1)
+#define PIN_PB14__PWMH3 PINMUX_PIN(PIN_PB14, 5, 1)
+#define PIN_PB14__FLEXCOM2_IO1 PINMUX_PIN(PIN_PB14, 7, 4)
+
+#define PIN_PB15 47
+#define PIN_PB15__GPIO PINMUX_PIN(PIN_PB15, 0, 0)
+#define PIN_PB15__QSPI0_IO4 PINMUX_PIN(PIN_PB15, 1, 1)
+#define PIN_PB15__QSPI1_IO0 PINMUX_PIN(PIN_PB15, 2, 1)
+#define PIN_PB15__I2SMCC0_WS PINMUX_PIN(PIN_PB15, 3, 3)
+#define PIN_PB15__FLEXCOM10_IO6 PINMUX_PIN(PIN_PB15, 4, 1)
+#define PIN_PB15__PWML0 PINMUX_PIN(PIN_PB15, 5, 1)
+#define PIN_PB15__TCLK4 PINMUX_PIN(PIN_PB15, 6, 2)
+#define PIN_PB15__FLEXCOM2_IO0 PINMUX_PIN(PIN_PB15, 7, 4)
+
+#define PIN_PB16 48
+#define PIN_PB16__GPIO PINMUX_PIN(PIN_PB16, 0, 0)
+#define PIN_PB16__QSPI0_IO5 PINMUX_PIN(PIN_PB16, 1, 1)
+#define PIN_PB16__QSPI1_IO1 PINMUX_PIN(PIN_PB16, 2, 1)
+#define PIN_PB16__I2SMCC0_DIN0 PINMUX_PIN(PIN_PB16, 3, 3)
+#define PIN_PB16__FLEXCOM10_IO4 PINMUX_PIN(PIN_PB16, 4, 1)
+#define PIN_PB16__PWMH0 PINMUX_PIN(PIN_PB16, 5, 1)
+#define PIN_PB16__TIOB4 PINMUX_PIN(PIN_PB16, 6, 2)
+
+#define PIN_PB17 49
+#define PIN_PB17__GPIO PINMUX_PIN(PIN_PB17, 0, 0)
+#define PIN_PB17__QSPI0_IO6 PINMUX_PIN(PIN_PB17, 1, 1)
+#define PIN_PB17__QSPI1_IO2 PINMUX_PIN(PIN_PB17, 2, 1)
+#define PIN_PB17__I2SMCC0_DOUT0 PINMUX_PIN(PIN_PB17, 3, 3)
+#define PIN_PB17__FLEXCOM10_IO3 PINMUX_PIN(PIN_PB17, 4, 1)
+#define PIN_PB17__PWML1 PINMUX_PIN(PIN_PB17, 5, 1)
+#define PIN_PB17__TIOA4 PINMUX_PIN(PIN_PB17, 6, 2)
+
+#define PIN_PB18 50
+#define PIN_PB18__GPIO PINMUX_PIN(PIN_PB18, 0, 0)
+#define PIN_PB18__QSPI0_IO7 PINMUX_PIN(PIN_PB18, 1, 1)
+#define PIN_PB18__QSPI1_IO3 PINMUX_PIN(PIN_PB18, 2, 1)
+#define PIN_PB18__I2SMCC0_MCK PINMUX_PIN(PIN_PB18, 3, 3)
+#define PIN_PB18__FLEXCOM10_IO2 PINMUX_PIN(PIN_PB18, 4, 1)
+#define PIN_PB18__PWMH1 PINMUX_PIN(PIN_PB18, 5, 1)
+#define PIN_PB18__TIOA5 PINMUX_PIN(PIN_PB18, 6, 2)
+
+#define PIN_PB19 51
+#define PIN_PB19__GPIO PINMUX_PIN(PIN_PB19, 0, 0)
+#define PIN_PB19__QSPI0_DQS PINMUX_PIN(PIN_PB19, 1, 1)
+#define PIN_PB19__EXT_IRQ1 PINMUX_PIN(PIN_PB19, 2, 2)
+#define PIN_PB19__PCK4 PINMUX_PIN(PIN_PB19, 3, 1)
+#define PIN_PB19__FLEXCOM10_IO1 PINMUX_PIN(PIN_PB19, 4, 1)
+#define PIN_PB19__PWML2 PINMUX_PIN(PIN_PB19, 5, 1)
+#define PIN_PB19__TIOB5 PINMUX_PIN(PIN_PB19, 6, 2)
+
+#define PIN_PB20 52
+#define PIN_PB20__GPIO PINMUX_PIN(PIN_PB20, 0, 0)
+#define PIN_PB20__QSPI0_INT PINMUX_PIN(PIN_PB20, 1, 1)
+#define PIN_PB20__QSPI1_CS PINMUX_PIN(PIN_PB20, 2, 1)
+#define PIN_PB20__FLEXCOM10_IO0 PINMUX_PIN(PIN_PB20, 4, 1)
+#define PIN_PB20__PWMH2 PINMUX_PIN(PIN_PB20, 5, 1)
+#define PIN_PB20__TCLK5 PINMUX_PIN(PIN_PB20, 6, 2)
+
+#define PIN_PB21 53
+#define PIN_PB21__GPIO PINMUX_PIN(PIN_PB21, 0, 0)
+#define PIN_PB21__SDMMC1_RSTN PINMUX_PIN(PIN_PB21, 1, 1)
+#define PIN_PB21__FLEXCOM6_IO4 PINMUX_PIN(PIN_PB21, 2, 2)
+#define PIN_PB21__TIOB2 PINMUX_PIN(PIN_PB21, 3, 2)
+#define PIN_PB21__ADTRG PINMUX_PIN(PIN_PB21, 4, 2)
+#define PIN_PB21__EXT_IRQ0 PINMUX_PIN(PIN_PB21, 5, 2)
+
+#define PIN_PB22 54
+#define PIN_PB22__GPIO PINMUX_PIN(PIN_PB22, 0, 0)
+#define PIN_PB22__SDMMC1_CMD PINMUX_PIN(PIN_PB22, 1, 1)
+#define PIN_PB22__FLEXCOM6_IO3 PINMUX_PIN(PIN_PB22, 2, 2)
+#define PIN_PB22__TCLK2 PINMUX_PIN(PIN_PB22, 3, 2)
+
+#define PIN_PB23 55
+#define PIN_PB23__GPIO PINMUX_PIN(PIN_PB23, 0, 0)
+#define PIN_PB23__SDMMC1_CK PINMUX_PIN(PIN_PB23, 1, 1)
+#define PIN_PB23__FLEXCOM6_IO2 PINMUX_PIN(PIN_PB23, 2, 2)
+#define PIN_PB23__TIOA2 PINMUX_PIN(PIN_PB23, 3, 2)
+
+#define PIN_PB24 56
+#define PIN_PB24__GPIO PINMUX_PIN(PIN_PB24, 0, 0)
+#define PIN_PB24__SDMMC1_DAT0 PINMUX_PIN(PIN_PB24, 1, 1)
+#define PIN_PB24__FLEXCOM6_IO0 PINMUX_PIN(PIN_PB24, 2, 2)
+
+#define PIN_PB25 57
+#define PIN_PB25__GPIO PINMUX_PIN(PIN_PB25, 0, 0)
+#define PIN_PB25__SDMMC1_DAT1 PINMUX_PIN(PIN_PB25, 1, 1)
+#define PIN_PB25__FLEXCOM6_IO1 PINMUX_PIN(PIN_PB25, 2, 2)
+#define PIN_PB25__TIOB2 PINMUX_PIN(PIN_PB25, 3, 1)
+
+#define PIN_PB26 58
+#define PIN_PB26__GPIO PINMUX_PIN(PIN_PB26, 0, 0)
+#define PIN_PB26__SDMMC1_DAT2 PINMUX_PIN(PIN_PB26, 1, 1)
+#define PIN_PB26__FLEXCOM8_IO0 PINMUX_PIN(PIN_PB26, 2, 3)
+#define PIN_PB26__TCLK2 PINMUX_PIN(PIN_PB26, 3, 1)
+
+#define PIN_PB27 59
+#define PIN_PB27__GPIO PINMUX_PIN(PIN_PB27, 0, 0)
+#define PIN_PB27__SDMMC1_DAT3 PINMUX_PIN(PIN_PB27, 1, 1)
+#define PIN_PB27__FLEXCOM8_IO1 PINMUX_PIN(PIN_PB27, 2, 3)
+#define PIN_PB27__TIOA2 PINMUX_PIN(PIN_PB27, 3, 1)
+
+#define PIN_PB28 60
+#define PIN_PB28__GPIO PINMUX_PIN(PIN_PB28, 0, 0)
+#define PIN_PB28__SDMMC1_WP PINMUX_PIN(PIN_PB28, 1, 1)
+#define PIN_PB28__FLEXCOM1_IO0 PINMUX_PIN(PIN_PB28, 3, 3)
+#define PIN_PB28__D15 PINMUX_PIN(PIN_PB28, 5, 1)
+
+#define PIN_PB29 61
+#define PIN_PB29__GPIO PINMUX_PIN(PIN_PB29, 0, 0)
+#define PIN_PB29__SDMMC1_CD PINMUX_PIN(PIN_PB29, 1, 1)
+#define PIN_PB29__I2SMCC0_MCK PINMUX_PIN(PIN_PB29, 2, 1)
+#define PIN_PB29__FLEXCOM1_IO1 PINMUX_PIN(PIN_PB29, 3, 3)
+#define PIN_PB29__D14 PINMUX_PIN(PIN_PB29, 5, 2)
+
+#define PIN_PB30 62
+#define PIN_PB30__GPIO PINMUX_PIN(PIN_PB30, 0, 0)
+#define PIN_PB30__SDMMC1_1V8SEL PINMUX_PIN(PIN_PB30, 1, 1)
+#define PIN_PB30__I2SMCC1_MCK PINMUX_PIN(PIN_PB30, 2, 2)
+#define PIN_PB30__FLEXCOM1_IO2 PINMUX_PIN(PIN_PB30, 3, 3)
+#define PIN_PB30__TIOA1 PINMUX_PIN(PIN_PB30, 4, 1)
+#define PIN_PB30__NCS1 PINMUX_PIN(PIN_PB30, 5, 1)
+
+#define PIN_PB31 63
+#define PIN_PB31__GPIO PINMUX_PIN(PIN_PB31, 0, 0)
+#define PIN_PB31__PCK7 PINMUX_PIN(PIN_PB31, 1, 2)
+#define PIN_PB31__I2SMCC1_DIN1 PINMUX_PIN(PIN_PB31, 2, 1)
+#define PIN_PB31__FLEXCOM1_IO3 PINMUX_PIN(PIN_PB31, 3, 3)
+#define PIN_PB31__TCLK1 PINMUX_PIN(PIN_PB31, 4, 1)
+#define PIN_PB31__NWE PINMUX_PIN(PIN_PB31, 5, 2)
+
+#define PIN_PC0 64
+#define PIN_PC0__GPIO PINMUX_PIN(PIN_PC0, 0, 0)
+#define PIN_PC0__PCK6 PINMUX_PIN(PIN_PC0, 1, 2)
+#define PIN_PC0__I2SMCC1_DIN2 PINMUX_PIN(PIN_PC0, 2, 1)
+#define PIN_PC0__FLEXCOM9_IO4 PINMUX_PIN(PIN_PC0, 3, 2)
+#define PIN_PC0__TIOB1 PINMUX_PIN(PIN_PC0, 4, 1)
+#define PIN_PC0__NWR1 PINMUX_PIN(PIN_PC0, 5, 1)
+
+#define PIN_PC1 65
+#define PIN_PC1__GPIO PINMUX_PIN(PIN_PC1, 0, 0)
+#define PIN_PC1__PCK5 PINMUX_PIN(PIN_PC1, 1, 1)
+#define PIN_PC1__FLEXCOM9_IO2 PINMUX_PIN(PIN_PC1, 3, 2)
+#define PIN_PC1__SMCK PINMUX_PIN(PIN_PC1, 5, 1)
+
+#define PIN_PC2 66
+#define PIN_PC2__GPIO PINMUX_PIN(PIN_PC2, 0, 0)
+#define PIN_PC2__EXT_IRQ0 PINMUX_PIN(PIN_PC2, 1, 3)
+#define PIN_PC2__FLEXCOM9_IO3 PINMUX_PIN(PIN_PC2, 3, 2)
+#define PIN_PC2__A11 PINMUX_PIN(PIN_PC2, 5, 1)
+
+#define PIN_PC3 67
+#define PIN_PC3__GPIO PINMUX_PIN(PIN_PC3, 0, 0)
+#define PIN_PC3__SPDIF_RX PINMUX_PIN(PIN_PC3, 1, 2)
+#define PIN_PC3__FLEXCOM9_IO0 PINMUX_PIN(PIN_PC3, 3, 2)
+#define PIN_PC3__FLEXCOM0_IO4 PINMUX_PIN(PIN_PC3, 4, 2)
+#define PIN_PC3__A10 PINMUX_PIN(PIN_PC3, 5, 1)
+
+#define PIN_PC4 68
+#define PIN_PC4__GPIO PINMUX_PIN(PIN_PC4, 0, 0)
+#define PIN_PC4__SPDIF_TX PINMUX_PIN(PIN_PC4, 1, 2)
+#define PIN_PC4__FLEXCOM9_IO1 PINMUX_PIN(PIN_PC4, 3, 2)
+#define PIN_PC4__FLEXCOM0_IO3 PINMUX_PIN(PIN_PC4, 4, 2)
+#define PIN_PC4__D0 PINMUX_PIN(PIN_PC4, 5, 2)
+
+#define PIN_PC5 69
+#define PIN_PC5__GPIO PINMUX_PIN(PIN_PC5, 0, 0)
+#define PIN_PC5__I3CC_SDASPUE PINMUX_PIN(PIN_PC5, 1, 1)
+#define PIN_PC5__I2SMCC1_DIN3 PINMUX_PIN(PIN_PC5, 2, 1)
+#define PIN_PC5__FLEXCOM0_IO2 PINMUX_PIN(PIN_PC5, 4, 2)
+#define PIN_PC5__D1 PINMUX_PIN(PIN_PC5, 5, 2)
+
+#define PIN_PC6 70
+#define PIN_PC6__GPIO PINMUX_PIN(PIN_PC6, 0, 0)
+#define PIN_PC6__I3CC_SCL PINMUX_PIN(PIN_PC6, 1, 1)
+#define PIN_PC6__FLEXCOM0_IO1 PINMUX_PIN(PIN_PC6, 4, 2)
+#define PIN_PC6__D4 PINMUX_PIN(PIN_PC6, 5, 2)
+
+#define PIN_PC7 71
+#define PIN_PC7__GPIO PINMUX_PIN(PIN_PC7, 0, 0)
+#define PIN_PC7__I3CC_SDA PINMUX_PIN(PIN_PC7, 1, 1)
+#define PIN_PC7__FLEXCOM0_IO0 PINMUX_PIN(PIN_PC7, 4, 2)
+#define PIN_PC7__D5 PINMUX_PIN(PIN_PC7, 5, 2)
+
+#define PIN_PC8 72
+#define PIN_PC8__GPIO PINMUX_PIN(PIN_PC8, 0, 0)
+#define PIN_PC8__I2SMCC0_DIN1 PINMUX_PIN(PIN_PC8, 1, 1)
+#define PIN_PC8__PDMC0_DS1 PINMUX_PIN(PIN_PC8, 2, 2)
+#define PIN_PC8__I2SMCC1_DOUT1 PINMUX_PIN(PIN_PC8, 3, 1)
+#define PIN_PC8__FLEXCOM9_IO0 PINMUX_PIN(PIN_PC8, 4, 1)
+#define PIN_PC8__D6 PINMUX_PIN(PIN_PC8, 5, 2)
+
+#define PIN_PC9 73
+#define PIN_PC9__GPIO PINMUX_PIN(PIN_PC9, 0, 0)
+#define PIN_PC9__I2SMCC0_DIN2 PINMUX_PIN(PIN_PC9, 1, 1)
+#define PIN_PC9__PDMC0_CLK PINMUX_PIN(PIN_PC9, 2, 2)
+#define PIN_PC9__I2SMCC1_DOUT2 PINMUX_PIN(PIN_PC9, 3, 1)
+#define PIN_PC9__FLEXCOM9_IO1 PINMUX_PIN(PIN_PC9, 4, 1)
+#define PIN_PC9__D7 PINMUX_PIN(PIN_PC9, 5, 2)
+
+#define PIN_PC10 74
+#define PIN_PC10__GPIO PINMUX_PIN(PIN_PC10, 0, 0)
+#define PIN_PC10__I2SMCC0_DIN3 PINMUX_PIN(PIN_PC10, 1, 1)
+#define PIN_PC10__PDMC0_DS0 PINMUX_PIN(PIN_PC10, 2, 2)
+#define PIN_PC10__I2SMCC1_DOUT3 PINMUX_PIN(PIN_PC10, 3, 1)
+#define PIN_PC10__FLEXCOM9_IO2 PINMUX_PIN(PIN_PC10, 4, 1)
+#define PIN_PC10__D2 PINMUX_PIN(PIN_PC10, 5, 2)
+
+#define PIN_PC11 75
+#define PIN_PC11__GPIO PINMUX_PIN(PIN_PC11, 0, 0)
+#define PIN_PC11__I2SMCC0_DOUT1 PINMUX_PIN(PIN_PC11, 1, 1)
+#define PIN_PC11__PDMC1_DS0 PINMUX_PIN(PIN_PC11, 2, 1)
+#define PIN_PC11__FLEXCOM9_IO3 PINMUX_PIN(PIN_PC11, 4, 1)
+#define PIN_PC10__D3 PINMUX_PIN(PIN_PC10, 5, 2)
+
+#define PIN_PC12 76
+#define PIN_PC12__GPIO PINMUX_PIN(PIN_PC12, 0, 0)
+#define PIN_PC12__I2SMCC0_DOUT2 PINMUX_PIN(PIN_PC12, 1, 1)
+#define PIN_PC12__PDMC1_CLK PINMUX_PIN(PIN_PC12, 2, 1)
+#define PIN_PC12__FLEXCOM9_IO4 PINMUX_PIN(PIN_PC12, 4, 1)
+#define PIN_PC12__A9 PINMUX_PIN(PIN_PC12, 5, 1)
+
+#define PIN_PC13 77
+#define PIN_PC13__GPIO PINMUX_PIN(PIN_PC13, 0, 0)
+#define PIN_PC13__I2SMCC0_DOUT3 PINMUX_PIN(PIN_PC13, 1, 1)
+#define PIN_PC13__PDMC1_DS1 PINMUX_PIN(PIN_PC13, 2, 1)
+#define PIN_PC13__A8 PINMUX_PIN(PIN_PC13, 5, 1)
+
+#define PIN_PC14 78
+#define PIN_PC14__GPIO PINMUX_PIN(PIN_PC14, 0, 0)
+#define PIN_PC14__I2SMCC1_DIN0 PINMUX_PIN(PIN_PC14, 1, 1)
+#define PIN_PC14__SPDIF_RX PINMUX_PIN(PIN_PC14, 2, 3)
+#define PIN_PC14__FLEXCOM1_IO0 PINMUX_PIN(PIN_PC14, 3, 2)
+#define PIN_PC14__A7 PINMUX_PIN(PIN_PC14, 5, 1)
+
+#define PIN_PC15 79
+#define PIN_PC15__GPIO PINMUX_PIN(PIN_PC15, 0, 0)
+#define PIN_PC15__I2SMCC1_WS PINMUX_PIN(PIN_PC15, 1, 1)
+#define PIN_PC15__PDMC1_DS1 PINMUX_PIN(PIN_PC15, 2, 2)
+#define PIN_PC15__FLEXCOM1_IO1 PINMUX_PIN(PIN_PC15, 3, 2)
+#define PIN_PC15__A6 PINMUX_PIN(PIN_PC15, 5, 1)
+
+#define PIN_PC16 80
+#define PIN_PC16__GPIO PINMUX_PIN(PIN_PC16, 0, 0)
+#define PIN_PC16__I2SMCC1_CK PINMUX_PIN(PIN_PC16, 1, 1)
+#define PIN_PC16__PDMC1_CLK PINMUX_PIN(PIN_PC16, 2, 2)
+#define PIN_PC16__FLEXCOM1_IO2 PINMUX_PIN(PIN_PC16, 3, 2)
+#define PIN_PC16__TIOA1 PINMUX_PIN(PIN_PC16, 4, 2)
+#define PIN_PC16__A5 PINMUX_PIN(PIN_PC16, 5, 1)
+
+#define PIN_PC17 81
+#define PIN_PC17__GPIO PINMUX_PIN(PIN_PC17, 0, 0)
+#define PIN_PC17__I2SMCC1_DOUT0 PINMUX_PIN(PIN_PC17, 1, 1)
+#define PIN_PC17__PDMC1_DS0 PINMUX_PIN(PIN_PC17, 2, 2)
+#define PIN_PC17__FLEXCOM1_IO3 PINMUX_PIN(PIN_PC17, 3, 2)
+#define PIN_PC17__TCLK1 PINMUX_PIN(PIN_PC17, 4, 2)
+#define PIN_PC17__A4 PINMUX_PIN(PIN_PC17, 5, 1)
+
+#define PIN_PC18 82
+#define PIN_PC18__GPIO PINMUX_PIN(PIN_PC18, 0, 0)
+#define PIN_PC18__I2SMCC0_DIN0 PINMUX_PIN(PIN_PC18, 1, 1)
+#define PIN_PC18__SPDIF_TX PINMUX_PIN(PIN_PC18, 2, 3)
+#define PIN_PC18__FLEXCOM1_IO4 PINMUX_PIN(PIN_PC18, 3, 2)
+#define PIN_PC18__TIOB1 PINMUX_PIN(PIN_PC18, 4, 2)
+#define PIN_PC18__A3 PINMUX_PIN(PIN_PC18, 5, 1)
+
+#define PIN_PC19 83
+#define PIN_PC19__GPIO PINMUX_PIN(PIN_PC19, 0, 0)
+#define PIN_PC19__I2SMCC0_WS PINMUX_PIN(PIN_PC19, 1, 1)
+#define PIN_PC19__PCK6 PINMUX_PIN(PIN_PC19, 2, 1)
+#define PIN_PC19__A2 PINMUX_PIN(PIN_PC19, 5, 1)
+
+#define PIN_PC20 84
+#define PIN_PC20__GPIO PINMUX_PIN(PIN_PC20, 0, 0)
+#define PIN_PC20__I2SMCC0_DOUT0 PINMUX_PIN(PIN_PC20, 1, 1)
+#define PIN_PC20__A1 PINMUX_PIN(PIN_PC20, 5, 1)
+
+#define PIN_PC21 85
+#define PIN_PC21__GPIO PINMUX_PIN(PIN_PC21, 0, 0)
+#define PIN_PC21__I2SMCC0_CK PINMUX_PIN(PIN_PC21, 1, 1)
+#define PIN_PC21__PCK7 PINMUX_PIN(PIN_PC21, 2, 1)
+#define PIN_PC21__A0 PINMUX_PIN(PIN_PC21, 5, 1)
+
+#define PIN_PC22 86
+#define PIN_PC22__GPIO PINMUX_PIN(PIN_PC22, 0, 0)
+#define PIN_PC22__NTRST PINMUX_PIN(PIN_PC22, 1, 1)
+#define PIN_PC22__NWAIT PINMUX_PIN(PIN_PC22, 5, 1)
+
+#define PIN_PC23 87
+#define PIN_PC23__GPIO PINMUX_PIN(PIN_PC23, 0, 0)
+#define PIN_PC23__TCK_SWCLK PINMUX_PIN(PIN_PC23, 1, 1)
+
+#define PIN_PC24 88
+#define PIN_PC24__GPIO PINMUX_PIN(PIN_PC24, 0, 0)
+#define PIN_PC24__TMS_SWDIO PINMUX_PIN(PIN_PC24, 1, 1)
+
+#define PIN_PC25 89
+#define PIN_PC25__GPIO PINMUX_PIN(PIN_PC25, 0, 0)
+#define PIN_PC25__TDI PINMUX_PIN(PIN_PC25, 1, 1)
+
+#define PIN_PC26 90
+#define PIN_PC26__GPIO PINMUX_PIN(PIN_PC26, 0, 0)
+#define PIN_PC26__TDO PINMUX_PIN(PIN_PC26, 1, 1)
+#define PIN_PC26__A15 PINMUX_PIN(PIN_PC26, 5, 1)
+
+#define PIN_PC27 91
+#define PIN_PC27__GPIO PINMUX_PIN(PIN_PC27, 0, 0)
+#define PIN_PC27__SDMMC2_CMD PINMUX_PIN(PIN_PC27, 1, 1)
+#define PIN_PC27__FLEXCOM8_IO0 PINMUX_PIN(PIN_PC27, 2, 2)
+#define PIN_PC27__TD1 PINMUX_PIN(PIN_PC27, 4, 2)
+#define PIN_PC27__D8 PINMUX_PIN(PIN_PC27, 5, 1)
+
+#define PIN_PC28 92
+#define PIN_PC28__GPIO PINMUX_PIN(PIN_PC28, 0, 0)
+#define PIN_PC28__SDMMC2_CK PINMUX_PIN(PIN_PC28, 1, 1)
+#define PIN_PC28__FLEXCOM8_IO1 PINMUX_PIN(PIN_PC28, 2, 2)
+#define PIN_PC28__TF1 PINMUX_PIN(PIN_PC28, 4, 2)
+#define PIN_PC28__D9 PINMUX_PIN(PIN_PC28, 5, 1)
+
+#define PIN_PC29 93
+#define PIN_PC29__GPIO PINMUX_PIN(PIN_PC29, 0, 0)
+#define PIN_PC29__SDMMC2_DAT0 PINMUX_PIN(PIN_PC29, 1, 1)
+#define PIN_PC29__FLEXCOM8_IO2 PINMUX_PIN(PIN_PC29, 2, 2)
+#define PIN_PC29__TK1 PINMUX_PIN(PIN_PC29, 4, 2)
+#define PIN_PC29__D10 PINMUX_PIN(PIN_PC29, 5, 1)
+#define PIN_PC29__TCLK0 PINMUX_PIN(PIN_PC29, 6, 1)
+
+#define PIN_PC30 94
+#define PIN_PC30__GPIO PINMUX_PIN(PIN_PC30, 0, 0)
+#define PIN_PC30__SDMMC2_DAT1 PINMUX_PIN(PIN_PC30, 1, 1)
+#define PIN_PC30__FLEXCOM8_IO3 PINMUX_PIN(PIN_PC30, 2, 2)
+#define PIN_PC30__RD1 PINMUX_PIN(PIN_PC30, 4, 2)
+#define PIN_PC30__D11 PINMUX_PIN(PIN_PC30, 5, 1)
+#define PIN_PC30__TIOA0 PINMUX_PIN(PIN_PC30, 6, 1)
+
+#define PIN_PC31 95
+#define PIN_PC31__GPIO PINMUX_PIN(PIN_PC31, 0, 0)
+#define PIN_PC31__SDMMC2_DAT2 PINMUX_PIN(PIN_PC31, 1, 1)
+#define PIN_PC31__FLEXCOM8_IO4 PINMUX_PIN(PIN_PC31, 2, 2)
+#define PIN_PC31__PCK0 PINMUX_PIN(PIN_PC31, 3, 2)
+#define PIN_PC31__RK1 PINMUX_PIN(PIN_PC31, 4, 2)
+#define PIN_PC31__D12 PINMUX_PIN(PIN_PC31, 5, 1)
+#define PIN_PC31__TIOB0 PINMUX_PIN(PIN_PC31, 6, 1)
+
+#define PIN_PD0 96
+#define PIN_PD0__GPIO PINMUX_PIN(PIN_PD0, 0, 0)
+#define PIN_PD0__SDMMC2_DAT3 PINMUX_PIN(PIN_PD0, 1, 1)
+#define PIN_PD0__PCK1 PINMUX_PIN(PIN_PD0, 3, 2)
+#define PIN_PD0__RF1 PINMUX_PIN(PIN_PD0, 4, 2)
+#define PIN_PD0__D13 PINMUX_PIN(PIN_PD0, 5, 1)
+
+#define PIN_PD1 97
+#define PIN_PD1__GPIO PINMUX_PIN(PIN_PD1, 0, 0)
+#define PIN_PD1__SDMMC2_WP PINMUX_PIN(PIN_PD1, 1, 1)
+#define PIN_PD1__FLEXCOM1_IO5 PINMUX_PIN(PIN_PD1, 2, 1)
+#define PIN_PD1__LCDC_HSYNC PINMUX_PIN(PIN_PD1, 3, 2)
+#define PIN_PD1__FLEXCOM3_IO0 PINMUX_PIN(PIN_PD1, 4, 3)
+
+#define PIN_PD2 98
+#define PIN_PD2__GPIO PINMUX_PIN(PIN_PD2, 0, 0)
+#define PIN_PD2__SDMMC2_CD PINMUX_PIN(PIN_PD2, 1, 1)
+#define PIN_PD2__FLEXCOM1_IO6 PINMUX_PIN(PIN_PD2, 2, 1)
+#define PIN_PD2__LCDC_VSYNC PINMUX_PIN(PIN_PD2, 3, 2)
+#define PIN_PD2__FLEXCOM3_IO1 PINMUX_PIN(PIN_PD2, 4, 3)
+
+#define PIN_PD3 99
+#define PIN_PD3__GPIO PINMUX_PIN(PIN_PD3, 0, 0)
+#define PIN_PD3__SDMMC2_1V8SEL PINMUX_PIN(PIN_PD3, 1, 1)
+#define PIN_PD3__FLEXCOM1_IO4 PINMUX_PIN(PIN_PD3, 2, 1)
+#define PIN_PD3__TIOA0 PINMUX_PIN(PIN_PD3, 3, 2)
+#define PIN_PD3__FLEXCOM3_IO2 PINMUX_PIN(PIN_PD3, 4, 3)
+#define PIN_PD3__EXT_IRQ1 PINMUX_PIN(PIN_PD3, 5, 3)
+
+#define PIN_PD4 100
+#define PIN_PD4__GPIO PINMUX_PIN(PIN_PD4, 0, 0)
+#define PIN_PD4__LCDC_HSYNC PINMUX_PIN(PIN_PD4, 1, 1)
+#define PIN_PD4__FLEXCOM1_IO2 PINMUX_PIN(PIN_PD4, 2, 1)
+#define PIN_PD4__TIOB0 PINMUX_PIN(PIN_PD4, 3, 2)
+#define PIN_PD4__FLEXCOM7_IO1 PINMUX_PIN(PIN_PD4, 4, 3)
+
+#define PIN_PD5 101
+#define PIN_PD5__GPIO PINMUX_PIN(PIN_PD5, 0, 0)
+#define PIN_PD5__LCDC_VSYNC PINMUX_PIN(PIN_PD5, 1, 1)
+#define PIN_PD5__FLEXCOM1_IO3 PINMUX_PIN(PIN_PD5, 2, 1)
+#define PIN_PD5__TCLK0 PINMUX_PIN(PIN_PD5, 3, 2)
+#define PIN_PD5__FLEXCOM7_IO0 PINMUX_PIN(PIN_PD5, 4, 3)
+
+#define PIN_PD6 102
+#define PIN_PD6__GPIO PINMUX_PIN(PIN_PD6, 0, 0)
+#define PIN_PD6__LCDC_PWM PINMUX_PIN(PIN_PD6, 1, 1)
+#define PIN_PD6__FLEXCOM1_IO1 PINMUX_PIN(PIN_PD6, 2, 1)
+#define PIN_PD6__FLEXCOM7_IO2 PINMUX_PIN(PIN_PD6, 4, 3)
+
+#define PIN_PD7 103
+#define PIN_PD7__GPIO PINMUX_PIN(PIN_PD7, 0, 0)
+#define PIN_PD7__LCDC_DISP PINMUX_PIN(PIN_PD7, 1, 1)
+#define PIN_PD7__FLEXCOM1_IO0 PINMUX_PIN(PIN_PD7, 2, 1)
+#define PIN_PD7__FLEXCOM7_IO3 PINMUX_PIN(PIN_PD7, 4, 3)
+
+#define PIN_PD8 104
+#define PIN_PD8__GPIO PINMUX_PIN(PIN_PD8, 0, 0)
+#define PIN_PD8__CANTX0 PINMUX_PIN(PIN_PD8, 1, 1)
+#define PIN_PD8__FLEXCOM7_IO0 PINMUX_PIN(PIN_PD8, 2, 1)
+
+#define PIN_PD9 105
+#define PIN_PD9__GPIO PINMUX_PIN(PIN_PD9, 0, 0)
+#define PIN_PD9__CANRX0 PINMUX_PIN(PIN_PD9, 1, 1)
+#define PIN_PD9__FLEXCOM7_IO1 PINMUX_PIN(PIN_PD9, 2, 1)
+
+#define PIN_PD10 106
+#define PIN_PD10__GPIO PINMUX_PIN(PIN_PD10, 0, 0)
+#define PIN_PD10__CANTX1 PINMUX_PIN(PIN_PD10, 1, 1)
+#define PIN_PD10__FLEXCOM7_IO2 PINMUX_PIN(PIN_PD10, 2, 1)
+#define PIN_PD10__TIOA1 PINMUX_PIN(PIN_PD10, 3, 3)
+
+#define PIN_PD11 107
+#define PIN_PD11__GPIO PINMUX_PIN(PIN_PD11, 0, 0)
+#define PIN_PD11__CANRX1 PINMUX_PIN(PIN_PD11, 1, 1)
+#define PIN_PD11__FLEXCOM7_IO3 PINMUX_PIN(PIN_PD11, 2, 1)
+#define PIN_PD11__TCLK1 PINMUX_PIN(PIN_PD11, 3, 3)
+
+#define PIN_PD12 108
+#define PIN_PD12__GPIO PINMUX_PIN(PIN_PD12, 0, 0)
+#define PIN_PD12__CANTX2 PINMUX_PIN(PIN_PD12, 1, 1)
+#define PIN_PD12__FLEXCOM7_IO4 PINMUX_PIN(PIN_PD12, 2, 1)
+#define PIN_PD12__TIOB1 PINMUX_PIN(PIN_PD12, 3, 3)
+#define PIN_PD12__PCK2 PINMUX_PIN(PIN_PD12, 4, 2)
+#define PIN_PD12__FLEXCOM3_IO3 PINMUX_PIN(PIN_PD12, 5, 3)
+
+#define PIN_PD13 109
+#define PIN_PD13__GPIO PINMUX_PIN(PIN_PD13, 0, 0)
+#define PIN_PD13__CANRX2 PINMUX_PIN(PIN_PD13, 1, 1)
+#define PIN_PD13__FLEXCOM5_IO4 PINMUX_PIN(PIN_PD13, 2, 1)
+#define PIN_PD13__TIOA2 PINMUX_PIN(PIN_PD13, 3, 3)
+#define PIN_PD13__PCK3 PINMUX_PIN(PIN_PD13, 4, 2)
+
+#define PIN_PD14 110
+#define PIN_PD14__GPIO PINMUX_PIN(PIN_PD14, 0, 0)
+#define PIN_PD14__CANTX3 PINMUX_PIN(PIN_PD14, 1, 1)
+#define PIN_PD14__FLEXCOM5_IO2 PINMUX_PIN(PIN_PD14, 2, 1)
+#define PIN_PD14__TIOB2 PINMUX_PIN(PIN_PD14, 3, 3)
+
+#define PIN_PD15 111
+#define PIN_PD15__GPIO PINMUX_PIN(PIN_PD15, 0, 0)
+#define PIN_PD15__CANRX3 PINMUX_PIN(PIN_PD15, 1, 1)
+#define PIN_PD15__FLEXCOM5_IO3 PINMUX_PIN(PIN_PD15, 2, 1)
+#define PIN_PD15__TCLK2 PINMUX_PIN(PIN_PD15, 3, 3)
+
+#define PIN_PD16 112
+#define PIN_PD16__GPIO PINMUX_PIN(PIN_PD16, 0, 0)
+#define PIN_PD16__CANTX4 PINMUX_PIN(PIN_PD16, 1, 1)
+#define PIN_PD16__FLEXCOM5_IO0 PINMUX_PIN(PIN_PD16, 2, 1)
+
+#define PIN_PD17 113
+#define PIN_PD17__GPIO PINMUX_PIN(PIN_PD17, 0, 0)
+#define PIN_PD17__CANRX4 PINMUX_PIN(PIN_PD17, 1, 1)
+#define PIN_PD17__FLEXCOM5_IO1 PINMUX_PIN(PIN_PD17, 2, 1)
+
+#define PIN_PD18 114
+#define PIN_PD18__GPIO PINMUX_PIN(PIN_PD18, 0, 0)
+#define PIN_PD18__FLEXCOM6_IO0 PINMUX_PIN(PIN_PD18, 2, 4)
+#define PIN_PD18__CANTX1 PINMUX_PIN(PIN_PD18, 3, 2)
+#define PIN_PD18__PCK4 PINMUX_PIN(PIN_PD18, 4, 2)
+
+#define PIN_PD19 115
+#define PIN_PD19__GPIO PINMUX_PIN(PIN_PD19, 0, 0)
+#define PIN_PD19__FLEXCOM6_IO1 PINMUX_PIN(PIN_PD19, 2, 4)
+#define PIN_PD19__CANRX1 PINMUX_PIN(PIN_PD19, 3, 2)
+#define PIN_PD19__PCK2 PINMUX_PIN(PIN_PD19, 4, 3)
+
+#define PIN_PD20 116
+#define PIN_PD20__GPIO PINMUX_PIN(PIN_PD20, 0, 0)
+#define PIN_PD20__PFLEXCOM6_IO2 PINMUX_PIN(PIN_PD20, 2, 4)
+#define PIN_PD20__I2SMCC1_MCK PINMUX_PIN(PIN_PD20, 3, 2)
+#define PIN_PD20__PCK3 PINMUX_PIN(PIN_PD20, 4, 3)
+
+#define PIN_PD21 117
+#define PIN_PD21__GPIO PINMUX_PIN(PIN_PD21, 0, 0)
+#define PIN_PD21__G1_TXCTL PINMUX_PIN(PIN_PD21, 1, 2)
+#define PIN_PD21__FLEXCOM6_IO2 PINMUX_PIN(PIN_PD21, 2, 3)
+#define PIN_PD21__TK1 PINMUX_PIN(PIN_PD21, 3, 1)
+
+#define PIN_PD22 118
+#define PIN_PD22__GPIO PINMUX_PIN(PIN_PD22, 0, 0)
+#define PIN_PD22__G1_TX0 PINMUX_PIN(PIN_PD22, 1, 1)
+#define PIN_PD22__FLEXCOM6_IO3 PINMUX_PIN(PIN_PD22, 2, 3)
+#define PIN_PD22__TF1 PINMUX_PIN(PIN_PD22, 3, 1)
+
+#define PIN_PD23 119
+#define PIN_PD23__GPIO PINMUX_PIN(PIN_PD23, 0, 0)
+#define PIN_PD23__G1_TX1 PINMUX_PIN(PIN_PD23, 1, 1)
+#define PIN_PD23__FLEXCOM6_IO4 PINMUX_PIN(PIN_PD23, 2, 3)
+#define PIN_PD23__TD1 PINMUX_PIN(PIN_PD23, 3, 1)
+
+#define PIN_PD24 120
+#define PIN_PD24__GPIO PINMUX_PIN(PIN_PD24, 0, 0)
+#define PIN_PD24__G1_RXCTL PINMUX_PIN(PIN_PD24, 1, 1)
+#define PIN_PD24__FLEXCOM6_IO0 PINMUX_PIN(PIN_PD24, 2, 3)
+#define PIN_PD24__RD1 PINMUX_PIN(PIN_PD24, 3, 1)
+#define PIN_PD24__PDMC0_DS1 PINMUX_PIN(PIN_PD24, 5, 3)
+
+#define PIN_PD25 121
+#define PIN_PD25__GPIO PINMUX_PIN(PIN_PD25, 0, 0)
+#define PIN_PD25__G1_MDC PINMUX_PIN(PIN_PD25, 1, 1)
+#define PIN_PD25__FLEXCOM6_IO1 PINMUX_PIN(PIN_PD25, 2, 3)
+#define PIN_PD25__RK1 PINMUX_PIN(PIN_PD25, 3, 1)
+#define PIN_PD25__PDMC0_CLK PINMUX_PIN(PIN_PD25, 5, 3)
+
+#define PIN_PD26 122
+#define PIN_PD26__GPIO PINMUX_PIN(PIN_PD26, 0, 0)
+#define PIN_PD26__G1_MDIO PINMUX_PIN(PIN_PD26, 1, 1)
+#define PIN_PD26__FLEXCOM7_IO4 PINMUX_PIN(PIN_PD26, 2, 2)
+#define PIN_PD26__RF1 PINMUX_PIN(PIN_PD26, 3, 1)
+#define PIN_PD26__I2SMCC1_DIN2 PINMUX_PIN(PIN_PD26, 4, 2)
+#define PIN_PD26__PDMC0_DS0 PINMUX_PIN(PIN_PD26, 5, 3)
+
+#define PIN_PD27 123
+#define PIN_PD27__GPIO PINMUX_PIN(PIN_PD27, 0, 0)
+#define PIN_PD27__G1_RX0 PINMUX_PIN(PIN_PD27, 1, 1)
+#define PIN_PD27__FLEXCOM7_IO0 PINMUX_PIN(PIN_PD27, 2, 2)
+#define PIN_PD27__SPDIF_RX PINMUX_PIN(PIN_PD27, 3, 1)
+#define PIN_PD27__I2SMCC1_DIN3 PINMUX_PIN(PIN_PD27, 4, 2)
+
+#define PIN_PD28 124
+#define PIN_PD28__GPIO PINMUX_PIN(PIN_PD28, 0, 0)
+#define PIN_PD28__G1_RX1 PINMUX_PIN(PIN_PD28, 1, 1)
+#define PIN_PD28__FLEXCOM7_IO1 PINMUX_PIN(PIN_PD28, 2, 2)
+#define PIN_PD28__SPDIF_TX PINMUX_PIN(PIN_PD28, 3, 1)
+#define PIN_PD28__I2SMCC1_DIN1 PINMUX_PIN(PIN_PD28, 4, 2)
+
+#define PIN_PD29 125
+#define PIN_PD29__GPIO PINMUX_PIN(PIN_PD29, 0, 0)
+#define PIN_PD29__G1_REFCK PINMUX_PIN(PIN_PD29, 1, 2)
+#define PIN_PD29__FLEXCOM7_IO2 PINMUX_PIN(PIN_PD29, 2, 2)
+#define PIN_PD29__I2SMCC1_DOUT3 PINMUX_PIN(PIN_PD29, 3, 2)
+
+#define PIN_PD30 126
+#define PIN_PD30__GPIO PINMUX_PIN(PIN_PD30, 0, 0)
+#define PIN_PD30__G1_RX2 PINMUX_PIN(PIN_PD30, 1, 1)
+#define PIN_PD30__FLEXCOM7_IO3 PINMUX_PIN(PIN_PD30, 2, 2)
+#define PIN_PD30__I2SMCC1_DOUT1 PINMUX_PIN(PIN_PD30, 3, 2)
+#define PIN_PD30__PDMC1_DS1 PINMUX_PIN(PIN_PD30, 4, 3)
+#define PIN_PD30__G1_RXER PINMUX_PIN(PIN_PD30, 5, 2)
+
+#define PIN_PD31 127
+#define PIN_PD31__GPIO PINMUX_PIN(PIN_PD31, 0, 0)
+#define PIN_PD31__G1_RX3 PINMUX_PIN(PIN_PD31, 1, 1)
+#define PIN_PD31__FLEXCOM5_IO4 PINMUX_PIN(PIN_PD31, 2, 2)
+#define PIN_PD31__I2SMCC1_DOUT2 PINMUX_PIN(PIN_PD31, 3, 3)
+#define PIN_PD31__PDMC1_DS0 PINMUX_PIN(PIN_PD31, 4, 3)
+
+#define PIN_PE0 128
+#define PIN_PE0__GPIO PINMUX_PIN(PIN_PE0, 0, 0)
+#define PIN_PE0__G1_TX2 PINMUX_PIN(PIN_PE0, 1, 1)
+#define PIN_PE0__FLEXCOM5_IO2 PINMUX_PIN(PIN_PE0, 2, 2)
+#define PIN_PE0__I2SMCC1_DIN0 PINMUX_PIN(PIN_PE0, 3, 2)
+#define PIN_PE0__PDMC1_CLK PINMUX_PIN(PIN_PE0, 4, 3)
+
+#define PIN_PE1 129
+#define PIN_PE1__GPIO PINMUX_PIN(PIN_PE1, 0, 0)
+#define PIN_PE1__G1_TX3 PINMUX_PIN(PIN_PE1, 1, 1)
+#define PIN_PE1__FLEXCOM5_IO3 PINMUX_PIN(PIN_PE1, 2, 2)
+#define PIN_PE1__I2SMCC1_WS PINMUX_PIN(PIN_PE1, 3, 2)
+#define PIN_PE1__PDMC0_DS1 PINMUX_PIN(PIN_PE1, 4, 4)
+
+#define PIN_PE2 130
+#define PIN_PE2__GPIO PINMUX_PIN(PIN_PE2, 0, 0)
+#define PIN_PE2__G1_RXCK PINMUX_PIN(PIN_PE2, 1, 1)
+#define PIN_PE2__FLEXCOM5_IO1 PINMUX_PIN(PIN_PE2, 2, 2)
+#define PIN_PE2__I2SMCC1_CK PINMUX_PIN(PIN_PE2, 3, 2)
+#define PIN_PE2__PDMC0_CLK PINMUX_PIN(PIN_PE2, 4, 4)
+
+#define PIN_PE3 131
+#define PIN_PE3__GPIO PINMUX_PIN(PIN_PE3, 0, 0)
+#define PIN_PE3__G1_TSUCOMP PINMUX_PIN(PIN_PE3, 1, 1)
+#define PIN_PE3__FLEXCOM5_IO0 PINMUX_PIN(PIN_PE3, 2, 2)
+#define PIN_PE3__I2SMCC1_DOUT0 PINMUX_PIN(PIN_PE3, 3, 2)
+#define PIN_PE3__PDMC0_DS0 PINMUX_PIN(PIN_PE3, 4, 4)
+
+#define PIN_PE4 132
+#define PIN_PE4__GPIO PINMUX_PIN(PIN_PE4, 0, 0)
+#define PIN_PE4__LCDC_DAT0 PINMUX_PIN(PIN_PE4, 1, 1)
+#define PIN_PE4__FLEXCOM2_IO2 PINMUX_PIN(PIN_PE4, 2, 1)
+#define PIN_PE4__PWML0 PINMUX_PIN(PIN_PE4, 3, 2)
+#define PIN_PE4__TIOA3 PINMUX_PIN(PIN_PE4, 4, 1)
+#define PIN_PE4__I2SMCC0_DIN1 PINMUX_PIN(PIN_PE4, 5, 2)
+
+#define PIN_PE5 133
+#define PIN_PE5__GPIO PINMUX_PIN(PIN_PE5, 0, 0)
+#define PIN_PE5__LCDC_DAT1 PINMUX_PIN(PIN_PE5, 1, 1)
+#define PIN_PE5__FLEXCOM2_IO3 PINMUX_PIN(PIN_PE5, 2, 1)
+#define PIN_PE5__PWMH0 PINMUX_PIN(PIN_PE5, 3, 2)
+#define PIN_PE5__TIOB3 PINMUX_PIN(PIN_PE5, 4, 1)
+#define PIN_PE5__I2SMCC0_DIN2 PINMUX_PIN(PIN_PE5, 5, 2)
+
+#define PIN_PE6 134
+#define PIN_PE6__GPIO PINMUX_PIN(PIN_PE6, 0, 0)
+#define PIN_PE6__LCDC_DAT2 PINMUX_PIN(PIN_PE6, 1, 1)
+#define PIN_PE6__FLEXCOM2_IO4 PINMUX_PIN(PIN_PE6, 2, 1)
+#define PIN_PE6__PWML1 PINMUX_PIN(PIN_PE6, 3, 2)
+#define PIN_PE6__TCLK3 PINMUX_PIN(PIN_PE6, 4, 1)
+#define PIN_PE6__I2SMCC0_DIN3 PINMUX_PIN(PIN_PE6, 5, 2)
+
+#define PIN_PE7 135
+#define PIN_PE7__GPIO PINMUX_PIN(PIN_PE7, 0, 0)
+#define PIN_PE7__LCDC_DAT3 PINMUX_PIN(PIN_PE7, 1, 1)
+#define PIN_PE7__FLEXCOM2_IO5 PINMUX_PIN(PIN_PE7, 2, 1)
+#define PIN_PE7__PWMH1 PINMUX_PIN(PIN_PE7, 3, 2)
+#define PIN_PE7__TIOA4 PINMUX_PIN(PIN_PE7, 4, 1)
+#define PIN_PE7__I2SMCC0_DOUT1 PINMUX_PIN(PIN_PE7, 5, 2)
+
+#define PIN_PE8 136
+#define PIN_PE8__GPIO PINMUX_PIN(PIN_PE8, 0, 0)
+#define PIN_PE8__LCDC_DAT4 PINMUX_PIN(PIN_PE8, 1, 1)
+#define PIN_PE8__FLEXCOM2_IO0 PINMUX_PIN(PIN_PE8, 2, 1)
+#define PIN_PE8__PWML2 PINMUX_PIN(PIN_PE8, 3, 2)
+#define PIN_PE8__TIOB4 PINMUX_PIN(PIN_PE8, 4, 1)
+#define PIN_PE8__I2SMCC0_CK PINMUX_PIN(PIN_PE8, 5, 2)
+
+#define PIN_PE9 137
+#define PIN_PE9__GPIO PINMUX_PIN(PIN_PE9, 0, 0)
+#define PIN_PE9__LCDC_DAT5 PINMUX_PIN(PIN_PE9, 1, 1)
+#define PIN_PE9__FLEXCOM2_IO1 PINMUX_PIN(PIN_PE9, 2, 1)
+#define PIN_PE9__PWMH2 PINMUX_PIN(PIN_PE9, 3, 2)
+#define PIN_PE9__TCLK4 PINMUX_PIN(PIN_PE9, 4, 1)
+#define PIN_PE9__I2SMCC0_WS PINMUX_PIN(PIN_PE9, 5, 2)
+
+#define PIN_PE10 138
+#define PIN_PE10__GPIO PINMUX_PIN(PIN_PE10, 0, 0)
+#define PIN_PE10__LCDC_DAT6 PINMUX_PIN(PIN_PE10, 1, 1)
+#define PIN_PE10__FLEXCOM2_IO6 PINMUX_PIN(PIN_PE10, 2, 1)
+#define PIN_PE10__PWML3 PINMUX_PIN(PIN_PE10, 3, 2)
+#define PIN_PE10__TIOA5 PINMUX_PIN(PIN_PE10, 4, 1)
+#define PIN_PE10__I2SMCC0_DOUT2 PINMUX_PIN(PIN_PE10, 5, 2)
+
+#define PIN_PE11 139
+#define PIN_PE11__GPIO PINMUX_PIN(PIN_PE11, 0, 0)
+#define PIN_PE11__LCDC_DAT7 PINMUX_PIN(PIN_PE11, 1, 1)
+#define PIN_PE11__PWMH3 PINMUX_PIN(PIN_PE11, 3, 2)
+#define PIN_PE11__TIOB5 PINMUX_PIN(PIN_PE11, 4, 1)
+#define PIN_PE11__I2SMCC0_DOUT3 PINMUX_PIN(PIN_PE11, 5, 2)
+
+#define PIN_PE12 140
+#define PIN_PE12__GPIO PINMUX_PIN(PIN_PE12, 0, 0)
+#define PIN_PE12__LCDC_DEN PINMUX_PIN(PIN_PE12, 1, 1)
+#define PIN_PE12__PCK3 PINMUX_PIN(PIN_PE12, 2, 4)
+#define PIN_PE12__PWMEXTRG0 PINMUX_PIN(PIN_PE12, 3, 2)
+#define PIN_PE12__TCLK5 PINMUX_PIN(PIN_PE12, 4, 1)
+#define PIN_PE12__I2SMCC0_DIN0 PINMUX_PIN(PIN_PE12, 5, 2)
+
+#define PIN_PE13 141
+#define PIN_PE13__GPIO PINMUX_PIN(PIN_PE13, 0, 0)
+#define PIN_PE13__LCDC_PCK PINMUX_PIN(PIN_PE13, 1, 1)
+#define PIN_PE13__PCK4 PINMUX_PIN(PIN_PE13, 2, 3)
+#define PIN_PE13__PWMEXTRG1 PINMUX_PIN(PIN_PE13, 3, 2)
+#define PIN_PE13__I2SMCC0DOUT0 PINMUX_PIN(PIN_PE13, 5, 2)
diff --git a/arch/arm/boot/dts/microchip/sama7d65.dtsi b/arch/arm/boot/dts/microchip/sama7d65.dtsi
new file mode 100644
index 000000000000..854b30d15dcd
--- /dev/null
+++ b/arch/arm/boot/dts/microchip/sama7d65.dtsi
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * sama7d65.dtsi - Device Tree Include file for SAMA7D65 SoC
+ *
+ * Copyright (C) 2024 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Ryan Wanner <Ryan.Wanner@microchip.com>
+ *
+ */
+
+#include <dt-bindings/clock/at91.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/mfd/at91-usart.h>
+
+/ {
+ model = "Microchip SAMA7D65 family SoC";
+ compatible = "microchip,sama7d65";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&gic>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ compatible = "arm,cortex-a7";
+ reg = <0x0>;
+ device_type = "cpu";
+ clocks = <&pmc PMC_TYPE_CORE PMC_CPUPLL>;
+ clock-names = "cpu";
+ };
+ };
+
+ clocks {
+ main_xtal: clock-mainxtal {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ };
+
+ slow_xtal: clock-slowxtal {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ };
+ };
+
+ soc {
+ compatible = "simple-bus";
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ pioa: pinctrl@e0014000 {
+ compatible = "microchip,sama7d65-pinctrl", "microchip,sama7g5-pinctrl";
+ reg = <0xe0014000 0x800>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 10>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ pmc: clock-controller@e0018000 {
+ compatible = "microchip,sama7d65-pmc", "syscon";
+ reg = <0xe0018000 0x200>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ #clock-cells = <2>;
+ clocks = <&clk32k 1>, <&clk32k 0>, <&main_xtal>;
+ clock-names = "td_slck", "md_slck", "main_xtal";
+ };
+
+ clk32k: clock-controller@e001d500 {
+ compatible = "microchip,sama7d65-sckc", "microchip,sam9x60-sckc";
+ reg = <0xe001d500 0x4>;
+ clocks = <&slow_xtal>;
+ #clock-cells = <1>;
+ };
+
+ sdmmc1: mmc@e1208000 {
+ compatible = "microchip,sama7d65-sdhci", "microchip,sam9x60-sdhci";
+ reg = <0xe1208000 0x400>;
+ interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 76>, <&pmc PMC_TYPE_GCK 76>;
+ clock-names = "hclock", "multclk";
+ assigned-clocks = <&pmc PMC_TYPE_GCK 76>;
+ assigned-clock-rates = <200000000>;
+ assigned-clock-parents = <&pmc PMC_TYPE_CORE PMC_MCK1>;
+ status = "disabled";
+ };
+
+ pit64b0: timer@e1800000 {
+ compatible = "microchip,sama7d65-pit64b", "microchip,sam9x60-pit64b";
+ reg = <0xe1800000 0x100>;
+ interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 66>, <&pmc PMC_TYPE_GCK 66>;
+ clock-names = "pclk", "gclk";
+ };
+
+ pit64b1: timer@e1804000 {
+ compatible = "microchip,sama7d65-pit64b", "microchip,sam9x60-pit64b";
+ reg = <0xe1804000 0x100>;
+ interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 67>, <&pmc PMC_TYPE_GCK 67>;
+ clock-names = "pclk", "gclk";
+ };
+
+ flx6: flexcom@e2020000 {
+ compatible = "microchip,sama7d65-flexcom", "atmel,sama5d2-flexcom";
+ reg = <0xe2020000 0x200>;
+ ranges = <0x0 0xe2020000 0x800>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 40>;
+ status = "disabled";
+
+ uart6: serial@200 {
+ compatible = "microchip,sama7d65-usart", "atmel,at91sam9260-usart";
+ reg = <0x200 0x200>;
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 40>;
+ clock-names = "usart";
+ atmel,usart-mode = <AT91_USART_MODE_SERIAL>;
+ atmel,fifo-size = <16>;
+ status = "disabled";
+ };
+ };
+
+ gic: interrupt-controller@e8c11000 {
+ compatible = "arm,cortex-a7-gic";
+ reg = <0xe8c11000 0x1000>,
+ <0xe8c12000 0x2000>;
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/nuvoton/nuvoton-npcm730-gbs.dts b/arch/arm/boot/dts/nuvoton/nuvoton-npcm730-gbs.dts
index 9f64c85e1c20..c3501786d600 100644
--- a/arch/arm/boot/dts/nuvoton/nuvoton-npcm730-gbs.dts
+++ b/arch/arm/boot/dts/nuvoton/nuvoton-npcm730-gbs.dts
@@ -661,7 +661,7 @@
clock-frequency = <100000>;
status = "okay";
- mb_fru@50 {
+ eeprom@50 {
compatible = "atmel,24c64";
reg = <0x50>;
};
@@ -704,7 +704,7 @@
reg = <0x5d>;
status = "okay";
};
- fan_fru@51 {
+ eeprom@51 {
compatible = "atmel,24c64";
reg = <0x51>;
};
@@ -714,7 +714,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <3>;
- hsbp_fru@52 {
+ eeprom@52 {
compatible = "atmel,24c64";
reg = <0x52>;
status = "okay";
diff --git a/arch/arm/boot/dts/nuvoton/nuvoton-npcm750-runbmc-olympus.dts b/arch/arm/boot/dts/nuvoton/nuvoton-npcm750-runbmc-olympus.dts
index 087f4ac43187..f67ede148209 100644
--- a/arch/arm/boot/dts/nuvoton/nuvoton-npcm750-runbmc-olympus.dts
+++ b/arch/arm/boot/dts/nuvoton/nuvoton-npcm750-runbmc-olympus.dts
@@ -824,7 +824,7 @@
reg = <0x4a>;
status = "okay";
};
- m24128_fru@51 {
+ eeprom@51 {
compatible = "atmel,24c128";
reg = <0x51>;
pagesize = <64>;
diff --git a/arch/arm/boot/dts/nvidia/tegra124-nyan.dtsi b/arch/arm/boot/dts/nvidia/tegra124-nyan.dtsi
index 8125c1b3e8d7..974c76f007db 100644
--- a/arch/arm/boot/dts/nvidia/tegra124-nyan.dtsi
+++ b/arch/arm/boot/dts/nvidia/tegra124-nyan.dtsi
@@ -716,6 +716,7 @@
regulator-name = "+5V_USB_HS";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
+ regulator-boot-on;
gpio = <&gpio TEGRA_GPIO(N, 4) GPIO_ACTIVE_HIGH>;
enable-active-high;
gpio-open-drain;
@@ -727,6 +728,7 @@
regulator-name = "+5V_USB_SS";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
+ regulator-boot-on;
gpio = <&gpio TEGRA_GPIO(N, 5) GPIO_ACTIVE_HIGH>;
enable-active-high;
gpio-open-drain;
diff --git a/arch/arm/boot/dts/nxp/imx/imx51-zii-rdu1.dts b/arch/arm/boot/dts/nxp/imx/imx51-zii-rdu1.dts
index 7cd17b43b4b2..06545a6052f7 100644
--- a/arch/arm/boot/dts/nxp/imx/imx51-zii-rdu1.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx51-zii-rdu1.dts
@@ -160,7 +160,7 @@
};
};
- mdio_gpio: mdio-gpio {
+ mdio_gpio: mdio {
compatible = "virtual,mdio-gpio";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_swmdio>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx51-zii-scu2-mezz.dts b/arch/arm/boot/dts/nxp/imx/imx51-zii-scu2-mezz.dts
index 625f9ac671ae..26eb7a9506e4 100644
--- a/arch/arm/boot/dts/nxp/imx/imx51-zii-scu2-mezz.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx51-zii-scu2-mezz.dts
@@ -37,7 +37,7 @@
regulator-max-microvolt = <5000000>;
};
- mdio_gpio: mdio-gpio {
+ mdio_gpio: mdio {
compatible = "virtual,mdio-gpio";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_swmdio>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-bx50v3.dtsi b/arch/arm/boot/dts/nxp/imx/imx6q-bx50v3.dtsi
index c1ae7c47b442..aa1adcc74019 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6q-bx50v3.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6q-bx50v3.dtsi
@@ -94,7 +94,7 @@
mdio-gpio0 = &mdio0;
};
- mdio0: mdio-gpio {
+ mdio0: mdio {
compatible = "virtual,mdio-gpio";
gpios = <&gpio2 5 GPIO_ACTIVE_HIGH>, /* mdc */
<&gpio2 7 GPIO_ACTIVE_HIGH>; /* mdio */
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi
index 1c72da417011..dffab5aa8b9c 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi
@@ -691,7 +691,7 @@
adv_7280: adv7280@21 {
compatible = "adi,adv7280";
- adv,force-bt656-4;
+ adi,force-bt656-4;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ipu1_csi0>;
reg = <0x21>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-sabresd.dtsi
index dc8298f6db34..960e83f5e904 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-sabresd.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-sabresd.dtsi
@@ -113,8 +113,8 @@
"DMICDAT", "DMIC";
mux-int-port = <2>;
mux-ext-port = <3>;
- hp-det-gpio = <&gpio7 8 GPIO_ACTIVE_LOW>;
- mic-det-gpio = <&gpio1 9 GPIO_ACTIVE_LOW>;
+ hp-det-gpios = <&gpio7 8 GPIO_ACTIVE_LOW>;
+ mic-det-gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
};
backlight_lvds: backlight-lvds {
@@ -804,6 +804,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usbotg>;
disable-over-current;
+ dr_mode = "otg";
status = "okay";
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl.dtsi
index d2200c9db25a..45bcfd7faf9d 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl.dtsi
@@ -711,8 +711,8 @@
reg_vdd3p0: regulator-3p0 {
compatible = "fsl,anatop-regulator";
regulator-name = "vdd3p0";
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <3150000>;
+ regulator-min-microvolt = <2625000>;
+ regulator-max-microvolt = <3400000>;
regulator-always-on;
anatop-reg-offset = <0x120>;
anatop-vol-bit-shift = <8>;
@@ -806,6 +806,7 @@
reg = <0x020c9000 0x1000>;
interrupts = <0 44 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6QDL_CLK_USBPHY1>;
+ phy-3p0-supply = <&reg_vdd3p0>;
fsl,anatop = <&anatop>;
};
@@ -814,6 +815,7 @@
reg = <0x020ca000 0x1000>;
interrupts = <0 45 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6QDL_CLK_USBPHY2>;
+ phy-3p0-supply = <&reg_vdd3p0>;
fsl,anatop = <&anatop>;
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6sl-evk.dts b/arch/arm/boot/dts/nxp/imx/imx6sl-evk.dts
index 55cdfa7ea206..036705b783f4 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6sl-evk.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6sl-evk.dts
@@ -108,7 +108,7 @@
"IN3R", "AMIC";
mux-int-port = <2>;
mux-ext-port = <3>;
- hp-det-gpio = <&gpio4 19 GPIO_ACTIVE_LOW>;
+ hp-det-gpios = <&gpio4 19 GPIO_ACTIVE_LOW>;
};
panel {
diff --git a/arch/arm/boot/dts/nxp/imx/imx6sl.dtsi b/arch/arm/boot/dts/nxp/imx/imx6sl.dtsi
index 941a2f185056..7381fb7f8912 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6sl.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6sl.dtsi
@@ -546,8 +546,8 @@
reg_vdd3p0: regulator-3p0 {
compatible = "fsl,anatop-regulator";
regulator-name = "vdd3p0";
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <3150000>;
+ regulator-min-microvolt = <2625000>;
+ regulator-max-microvolt = <3400000>;
regulator-always-on;
anatop-reg-offset = <0x120>;
anatop-vol-bit-shift = <8>;
@@ -640,6 +640,7 @@
reg = <0x020c9000 0x1000>;
interrupts = <0 44 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SL_CLK_USBPHY1>;
+ phy-3p0-supply = <&reg_vdd3p0>;
fsl,anatop = <&anatop>;
};
@@ -648,6 +649,7 @@
reg = <0x020ca000 0x1000>;
interrupts = <0 45 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SL_CLK_USBPHY2>;
+ phy-3p0-supply = <&reg_vdd3p0>;
fsl,anatop = <&anatop>;
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6sll-evk.dts b/arch/arm/boot/dts/nxp/imx/imx6sll-evk.dts
index 05d6827ea2af..814401486792 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6sll-evk.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6sll-evk.dts
@@ -157,7 +157,7 @@
"IN3R", "AMIC";
mux-int-port = <2>;
mux-ext-port = <3>;
- hp-det-gpio = <&gpio4 24 GPIO_ACTIVE_LOW>;
+ hp-det-gpios = <&gpio4 24 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6sx-sdb.dtsi b/arch/arm/boot/dts/nxp/imx/imx6sx-sdb.dtsi
index 1beac42c1a27..67cf09e63a63 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6sx-sdb.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6sx-sdb.dtsi
@@ -167,7 +167,7 @@
"IN3R", "AMIC";
mux-int-port = <2>;
mux-ext-port = <6>;
- hp-det-gpio = <&gpio1 17 GPIO_ACTIVE_LOW>;
+ hp-det-gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
};
panel {
diff --git a/arch/arm/boot/dts/nxp/imx/imx6sx.dtsi b/arch/arm/boot/dts/nxp/imx/imx6sx.dtsi
index a9550f115f82..5132b575b001 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6sx.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6sx.dtsi
@@ -637,8 +637,8 @@
reg_vdd3p0: regulator-3p0 {
compatible = "fsl,anatop-regulator";
regulator-name = "vdd3p0";
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <3150000>;
+ regulator-min-microvolt = <2625000>;
+ regulator-max-microvolt = <3400000>;
regulator-always-on;
anatop-reg-offset = <0x120>;
anatop-vol-bit-shift = <8>;
@@ -731,6 +731,7 @@
reg = <0x020c9000 0x1000>;
interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SX_CLK_USBPHY1>;
+ phy-3p0-supply = <&reg_vdd3p0>;
fsl,anatop = <&anatop>;
};
@@ -739,6 +740,7 @@
reg = <0x020ca000 0x1000>;
interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SX_CLK_USBPHY2>;
+ phy-3p0-supply = <&reg_vdd3p0>;
fsl,anatop = <&anatop>;
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-14x14-evk.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-14x14-evk.dtsi
index b74ee8948a78..0e839bbfea08 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-14x14-evk.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-14x14-evk.dtsi
@@ -68,7 +68,7 @@
audio-cpu = <&sai2>;
audio-codec = <&codec>;
audio-asrc = <&asrc>;
- hp-det-gpio = <&gpio5 4 0>;
+ hp-det-gpios = <&gpio5 4 0>;
audio-routing =
"Headphone Jack", "HP_L",
"Headphone Jack", "HP_R",
diff --git a/arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi b/arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi
index e1c401f468e1..576a7df505d3 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi
@@ -87,34 +87,6 @@
<&adc2 0>, <&adc2 1>, <&adc2 2>, <&adc2 3>;
};
- reg_sd1_vmmc: regulator-sd1-vmmc {
- compatible = "regulator-fixed";
- regulator-name = "VCC3V3_SD1";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- };
-
- reg_fec1_pwdn: regulator-fec1-pwdn {
- compatible = "regulator-fixed";
- regulator-name = "PWDN_FEC1";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- gpio = <&gpio1 9 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- };
-
- reg_fec2_pwdn: regulator-fec2-pwdn {
- compatible = "regulator-fixed";
- regulator-name = "PWDN_FEC2";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- gpio = <&gpio2 31 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- };
-
reg_usb_otg1_vbus: regulator-usb-otg1-vbus {
compatible = "regulator-fixed";
regulator-name = "VBUS_USBOTG1";
@@ -141,6 +113,7 @@
gpio = <&pca9555 12 GPIO_ACTIVE_HIGH>;
enable-active-high;
regulator-always-on;
+ vin-supply = <&reg_mba_5v>;
};
reg_mpcie_3v3: regulator-mpcie-3v3 {
@@ -151,6 +124,7 @@
gpio = <&pca9555 10 GPIO_ACTIVE_HIGH>;
enable-active-high;
regulator-always-on;
+ vin-supply = <&reg_mba_3v3>;
};
reg_mba_12v0: regulator-mba-12v0 {
@@ -162,13 +136,18 @@
enable-active-high;
};
- reg_lvds_transmitter: regulator-lvds-transmitter {
+ reg_mba_5v: regulator-mba-5v {
compatible = "regulator-fixed";
- regulator-name = "#SHTDN_LVDS";
+ regulator-name = "VCC5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ reg_mba_3v3: regulator-mba-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC3V3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
- gpio = <&pca9555 1 GPIO_ACTIVE_HIGH>;
- enable-active-high;
};
reg_vref_1v8: regulator-vref-1v8 {
@@ -186,14 +165,7 @@
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
- };
-
- reg_vcc_3v3: regulator-vcc-3v3 {
- compatible = "regulator-fixed";
- regulator-name = "VCC3V3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
+ vin-supply = <&reg_mba_3v3>;
};
sound {
@@ -239,7 +211,6 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet1>;
phy-mode = "rgmii-id";
- phy-supply = <&reg_fec1_pwdn>;
phy-handle = <&ethphy1_0>;
fsl,magic-packet;
status = "okay";
@@ -260,6 +231,8 @@
reset-gpios = <&gpio7 15 GPIO_ACTIVE_LOW>;
reset-assert-us = <1000>;
reset-deassert-us = <500>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
};
};
};
@@ -318,7 +291,7 @@
lm75: temperature-sensor@49 {
compatible = "national,lm75a";
reg = <0x49>;
- vs-supply = <&reg_vcc_3v3>;
+ vs-supply = <&reg_mba_3v3>;
};
};
@@ -351,7 +324,7 @@
interrupts = <12 IRQ_TYPE_EDGE_FALLING>;
interrupt-controller;
#interrupt-cells = <2>;
- vcc-supply = <&reg_vcc_3v3>;
+ vcc-supply = <&reg_mba_3v3>;
};
};
@@ -668,7 +641,7 @@
pinctrl-2 = <&pinctrl_usdhc1_200mhz>, <&pinctrl_usdhc1_gpio>;
cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
wp-gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
- vmmc-supply = <&reg_sd1_vmmc>;
+ vmmc-supply = <&reg_mba_3v3>;
bus-width = <4>;
no-1-8-v;
no-sdio;
diff --git a/arch/arm/boot/dts/nxp/imx/imx7-tqma7.dtsi b/arch/arm/boot/dts/nxp/imx/imx7-tqma7.dtsi
index 028961eb7108..aa8f65cd4adf 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7-tqma7.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx7-tqma7.dtsi
@@ -135,6 +135,7 @@
lm75a: temperature-sensor@48 {
compatible = "national,lm75a";
reg = <0x48>;
+ vs-supply = <&vgen4_reg>;
};
/* NXP SE97BTP with temperature sensor + eeprom, TQMa7x 02xx */
@@ -150,7 +151,6 @@
reg = <0x50>;
pagesize = <32>;
vcc-supply = <&vgen4_reg>;
- status = "okay";
};
at24c02: eeprom@56 {
@@ -158,7 +158,6 @@
reg = <0x56>;
pagesize = <16>;
vcc-supply = <&vgen4_reg>;
- status = "okay";
};
ds1339: rtc@68 {
diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-mba7.dts b/arch/arm/boot/dts/nxp/imx/imx7d-mba7.dts
index 0443faa3dfae..e3ee16f1aaa9 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7d-mba7.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx7d-mba7.dts
@@ -21,7 +21,6 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet2>;
phy-mode = "rgmii-id";
- phy-supply = <&reg_fec2_pwdn>;
phy-handle = <&ethphy2_0>;
fsl,magic-packet;
status = "okay";
@@ -42,6 +41,8 @@
reset-gpios = <&gpio2 28 GPIO_ACTIVE_LOW>;
reset-assert-us = <1000>;
reset-deassert-us = <500>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <31 IRQ_TYPE_EDGE_FALLING>;
};
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-sdb.dts b/arch/arm/boot/dts/nxp/imx/imx7d-sdb.dts
index f712537fca16..6cde84636900 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7d-sdb.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx7d-sdb.dts
@@ -169,7 +169,7 @@
model = "wm8960-audio";
audio-cpu = <&sai1>;
audio-codec = <&codec>;
- hp-det-gpio = <&gpio2 28 GPIO_ACTIVE_HIGH>;
+ hp-det-gpios = <&gpio2 28 GPIO_ACTIVE_HIGH>;
audio-routing =
"Headphone Jack", "HP_L",
"Headphone Jack", "HP_R",
diff --git a/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi b/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
index d0f6120b665d..39530eb580ea 100644
--- a/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
@@ -427,8 +427,9 @@
interrupt-names = "global",
"doorbell";
- interconnects = <&system_noc MASTER_PCIE &mc_virt SLAVE_EBI_CH0>;
- interconnect-names = "pcie-mem";
+ interconnects = <&system_noc MASTER_PCIE &mc_virt SLAVE_EBI_CH0>,
+ <&mem_noc MASTER_AMPSS_M0 &system_noc SLAVE_PCIE_0>;
+ interconnect-names = "pcie-mem", "cpu-pcie";
resets = <&gcc GCC_PCIE_BCR>;
reset-names = "core";
@@ -613,6 +614,8 @@
iommus = <&apps_smmu 0x1a0 0x0>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
phys = <&usb_hsphy>, <&usb_qmpphy>;
phy-names = "usb2-phy", "usb3-phy";
};
diff --git a/arch/arm/boot/dts/qcom/qcom-sdx65.dtsi b/arch/arm/boot/dts/qcom/qcom-sdx65.dtsi
index 3bc67bb8c1eb..6b23ee676c9e 100644
--- a/arch/arm/boot/dts/qcom/qcom-sdx65.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-sdx65.dtsi
@@ -335,6 +335,10 @@
<GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "global", "doorbell";
+ interconnects = <&system_noc MASTER_PCIE_0 &mc_virt SLAVE_EBI1>,
+ <&mem_noc MASTER_APPSS_PROC &system_noc SLAVE_PCIE_0>;
+ interconnect-names = "pcie-mem", "cpu-pcie";
+
resets = <&gcc GCC_PCIE_BCR>;
reset-names = "core";
@@ -526,6 +530,8 @@
iommus = <&apps_smmu 0x1a0 0x0>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
phys = <&usb_hsphy>, <&usb_qmpphy>;
phy-names = "usb2-phy", "usb3-phy";
};
diff --git a/arch/arm/boot/dts/renesas/r7s72100.dtsi b/arch/arm/boot/dts/renesas/r7s72100.dtsi
index b831bbc431ef..1a866dbaf5e9 100644
--- a/arch/arm/boot/dts/renesas/r7s72100.dtsi
+++ b/arch/arm/boot/dts/renesas/r7s72100.dtsi
@@ -238,6 +238,8 @@
<GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "error", "rx", "tx";
clocks = <&mstp10_clks R7S72100_CLK_SPI0>;
+ dmas = <&dmac 0x2d21>, <&dmac 0x2d22>;
+ dma-names = "tx", "rx";
power-domains = <&cpg_clocks>;
num-cs = <1>;
#address-cells = <1>;
@@ -253,6 +255,8 @@
<GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "error", "rx", "tx";
clocks = <&mstp10_clks R7S72100_CLK_SPI1>;
+ dmas = <&dmac 0x2d25>, <&dmac 0x2d26>;
+ dma-names = "tx", "rx";
power-domains = <&cpg_clocks>;
num-cs = <1>;
#address-cells = <1>;
@@ -268,6 +272,8 @@
<GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "error", "rx", "tx";
clocks = <&mstp10_clks R7S72100_CLK_SPI2>;
+ dmas = <&dmac 0x2d29>, <&dmac 0x2d2a>;
+ dma-names = "tx", "rx";
power-domains = <&cpg_clocks>;
num-cs = <1>;
#address-cells = <1>;
@@ -283,6 +289,8 @@
<GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "error", "rx", "tx";
clocks = <&mstp10_clks R7S72100_CLK_SPI3>;
+ dmas = <&dmac 0x2d2d>, <&dmac 0x2d2e>;
+ dma-names = "tx", "rx";
power-domains = <&cpg_clocks>;
num-cs = <1>;
#address-cells = <1>;
@@ -298,6 +306,8 @@
<GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "error", "rx", "tx";
clocks = <&mstp10_clks R7S72100_CLK_SPI4>;
+ dmas = <&dmac 0x2d31>, <&dmac 0x2d32>;
+ dma-names = "tx", "rx";
power-domains = <&cpg_clocks>;
num-cs = <1>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/samsung/exynos4212-tab3.dtsi b/arch/arm/boot/dts/samsung/exynos4212-tab3.dtsi
index 9bc05961577d..70e3091062f9 100644
--- a/arch/arm/boot/dts/samsung/exynos4212-tab3.dtsi
+++ b/arch/arm/boot/dts/samsung/exynos4212-tab3.dtsi
@@ -300,12 +300,31 @@
regulator-max-microvolt = <2800000>;
};
+ earmic_bias_reg: voltage-regulator-6 {
+ compatible = "regulator-fixed";
+ regulator-name = "EAR_MICBIAS_LDO_2.8V";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ gpio = <&gpm0 0 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
sound: sound {
compatible = "samsung,midas-audio";
model = "TAB3";
mic-bias-supply = <&mic_bias_reg>;
submic-bias-supply = <&submic_bias_reg>;
+ lineout-sel-gpios = <&gpj1 2 GPIO_ACTIVE_HIGH>;
+
+ headset-mic-bias-supply = <&earmic_bias_reg>;
+ headset-detect-gpios = <&gpx0 4 GPIO_ACTIVE_LOW>;
+ headset-key-gpios = <&gpx3 6 GPIO_ACTIVE_LOW>;
+ samsung,headset-4pole-threshold-microvolt = <710 2000>;
+ samsung,headset-button-threshold-microvolt = <0 130 260>;
+ io-channel-names = "headset-detect";
+ io-channels = <&adc 0>;
+
audio-routing = "HP", "HPOUT1L",
"HP", "HPOUT1R",
@@ -351,6 +370,11 @@
};
};
+&adc {
+ vdd-supply = <&ldo3_reg>;
+ status = "okay";
+};
+
&bus_acp {
devfreq = <&bus_dmc>;
status = "okay";
@@ -511,12 +535,11 @@
wm1811: audio-codec@1a {
compatible = "wlf,wm1811";
reg = <0x1a>;
- clocks = <&pmu_system_controller 0>;
- clock-names = "MCLK1";
+ clocks = <&pmu_system_controller 0>,
+ <&s5m8767_osc S2MPS11_CLK_BT>;
+ clock-names = "MCLK1", "MCLK2";
interrupt-controller;
#interrupt-cells = <2>;
- interrupt-parent = <&gpx3>;
- interrupts = <6 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
#gpio-cells = <2>;
diff --git a/arch/arm/boot/dts/st/Makefile b/arch/arm/boot/dts/st/Makefile
index eab3a9bd435f..b7d5d305cbbe 100644
--- a/arch/arm/boot/dts/st/Makefile
+++ b/arch/arm/boot/dts/st/Makefile
@@ -39,6 +39,7 @@ dtb-$(CONFIG_ARCH_STM32) += \
stm32mp151c-mect1s.dtb \
stm32mp153c-dhcom-drc02.dtb \
stm32mp153c-dhcor-drc-compact.dtb \
+ stm32mp153c-lxa-tac-gen3.dtb \
stm32mp153c-mecio1r1.dtb \
stm32mp157a-avenger96.dtb \
stm32mp157a-dhcor-avenger96.dtb \
diff --git a/arch/arm/boot/dts/st/stih410-b2260.dts b/arch/arm/boot/dts/st/stih410-b2260.dts
index 240b62040000..736b1e059b0a 100644
--- a/arch/arm/boot/dts/st/stih410-b2260.dts
+++ b/arch/arm/boot/dts/st/stih410-b2260.dts
@@ -206,5 +206,9 @@
sata1: sata@9b28000 {
status = "okay";
};
+
+ gpu: gpu@9f00000 {
+ status = "okay";
+ };
};
};
diff --git a/arch/arm/boot/dts/st/stih410.dtsi b/arch/arm/boot/dts/st/stih410.dtsi
index a69231854f78..d56343f44fda 100644
--- a/arch/arm/boot/dts/st/stih410.dtsi
+++ b/arch/arm/boot/dts/st/stih410.dtsi
@@ -285,5 +285,39 @@
resets = <&softreset STIH407_LPM_SOFTRESET>;
hdmi-phandle = <&sti_hdmi>;
};
+
+ gpu: gpu@9f00000 {
+ compatible = "st,stih410-mali", "arm,mali-400";
+ reg = <0x9f00000 0x10000>;
+ /* LIMA driver needs 2 clocks, use the same for both */
+ clocks = <&clk_s_c0_flexgen CLK_ICN_GPU>,
+ <&clk_s_c0_flexgen CLK_ICN_GPU>;
+ clock-names = "bus", "core";
+ assigned-clocks = <&clk_s_c0_flexgen CLK_ICN_GPU>;
+ assigned-clock-rates = <400000000>;
+ resets = <&softreset STIH407_GPU_SOFTRESET>;
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "gp",
+ "gpmmu",
+ "pp0",
+ "ppmmu0",
+ "pp1",
+ "ppmmu1",
+ "pp2",
+ "ppmmu2",
+ "pp3",
+ "ppmmu3";
+
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm/boot/dts/st/stm32mp131.dtsi b/arch/arm/boot/dts/st/stm32mp131.dtsi
index e1a764d269d2..0019d12c3d3d 100644
--- a/arch/arm/boot/dts/st/stm32mp131.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp131.dtsi
@@ -261,6 +261,11 @@
dma-names = "up";
status = "disabled";
+ counter {
+ compatible = "st,stm32-timer-counter";
+ status = "disabled";
+ };
+
timer@5 {
compatible = "st,stm32h7-timer-trigger";
reg = <5>;
@@ -281,6 +286,11 @@
dma-names = "up";
status = "disabled";
+ counter {
+ compatible = "st,stm32-timer-counter";
+ status = "disabled";
+ };
+
timer@6 {
compatible = "st,stm32h7-timer-trigger";
reg = <6>;
@@ -1196,6 +1206,11 @@
access-controllers = <&etzpc 23>;
status = "disabled";
+ counter {
+ compatible = "st,stm32-timer-counter";
+ status = "disabled";
+ };
+
pwm {
compatible = "st,stm32-pwm";
#pwm-cells = <3>;
@@ -1221,6 +1236,11 @@
access-controllers = <&etzpc 24>;
status = "disabled";
+ counter {
+ compatible = "st,stm32-timer-counter";
+ status = "disabled";
+ };
+
pwm {
compatible = "st,stm32-pwm";
#pwm-cells = <3>;
@@ -1246,6 +1266,11 @@
access-controllers = <&etzpc 25>;
status = "disabled";
+ counter {
+ compatible = "st,stm32-timer-counter";
+ status = "disabled";
+ };
+
pwm {
compatible = "st,stm32-pwm";
#pwm-cells = <3>;
@@ -1276,6 +1301,11 @@
access-controllers = <&etzpc 26>;
status = "disabled";
+ counter {
+ compatible = "st,stm32-timer-counter";
+ status = "disabled";
+ };
+
pwm {
compatible = "st,stm32-pwm";
#pwm-cells = <3>;
@@ -1304,6 +1334,11 @@
access-controllers = <&etzpc 27>;
status = "disabled";
+ counter {
+ compatible = "st,stm32-timer-counter";
+ status = "disabled";
+ };
+
pwm {
compatible = "st,stm32-pwm";
#pwm-cells = <3>;
@@ -1332,6 +1367,11 @@
access-controllers = <&etzpc 28>;
status = "disabled";
+ counter {
+ compatible = "st,stm32-timer-counter";
+ status = "disabled";
+ };
+
pwm {
compatible = "st,stm32-pwm";
#pwm-cells = <3>;
diff --git a/arch/arm/boot/dts/st/stm32mp135f-dk.dts b/arch/arm/boot/dts/st/stm32mp135f-dk.dts
index 3a276589fef7..19a32f7d4d7d 100644
--- a/arch/arm/boot/dts/st/stm32mp135f-dk.dts
+++ b/arch/arm/boot/dts/st/stm32mp135f-dk.dts
@@ -440,6 +440,9 @@
/delete-property/dmas;
/delete-property/dma-names;
status = "disabled";
+ counter {
+ status = "okay";
+ };
pwm {
/* PWM output on pin 7 of the expansion connector (CN8.7) using TIM3_CH4 func */
pinctrl-0 = <&pwm3_pins_a>;
@@ -456,6 +459,9 @@
/delete-property/dmas;
/delete-property/dma-names;
status = "disabled";
+ counter {
+ status = "okay";
+ };
pwm {
/* PWM output on pin 31 of the expansion connector (CN8.31) using TIM4_CH2 func */
pinctrl-0 = <&pwm4_pins_a>;
@@ -472,6 +478,9 @@
/delete-property/dmas;
/delete-property/dma-names;
status = "disabled";
+ counter {
+ status = "okay";
+ };
pwm {
/* PWM output on pin 32 of the expansion connector (CN8.32) using TIM8_CH3 func */
pinctrl-0 = <&pwm8_pins_a>;
@@ -486,6 +495,9 @@
&timers14 {
status = "disabled";
+ counter {
+ status = "okay";
+ };
pwm {
/* PWM output on pin 33 of the expansion connector (CN8.33) using TIM14_CH1 func */
pinctrl-0 = <&pwm14_pins_a>;
diff --git a/arch/arm/boot/dts/st/stm32mp13xx-dhcor-som.dtsi b/arch/arm/boot/dts/st/stm32mp13xx-dhcor-som.dtsi
index 5edbc790d1d2..6236ce2a6968 100644
--- a/arch/arm/boot/dts/st/stm32mp13xx-dhcor-som.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp13xx-dhcor-som.dtsi
@@ -85,8 +85,8 @@
vddcpu: buck1 { /* VDD_CPU_1V2 */
regulator-name = "vddcpu";
- regulator-min-microvolt = <1250000>;
- regulator-max-microvolt = <1250000>;
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
regulator-always-on;
regulator-initial-mode = <0>;
regulator-over-current-protection;
@@ -201,17 +201,17 @@
pagesize = <64>;
};
- eeprom0wl: eeprom@58 {
- compatible = "st,24256e-wl"; /* ST M24256E WL page of 0x50 */
- pagesize = <64>;
- reg = <0x58>;
- };
-
rv3032: rtc@51 {
compatible = "microcrystal,rv3032";
reg = <0x51>;
interrupts-extended = <&gpioi 0 IRQ_TYPE_EDGE_FALLING>;
};
+
+ eeprom0wl: eeprom@58 {
+ compatible = "st,24256e-wl"; /* ST M24256E WL page of 0x50 */
+ pagesize = <64>;
+ reg = <0x58>;
+ };
};
&iwdg2 {
diff --git a/arch/arm/boot/dts/st/stm32mp151.dtsi b/arch/arm/boot/dts/st/stm32mp151.dtsi
index b28dc90926bd..b9a87fbe971d 100644
--- a/arch/arm/boot/dts/st/stm32mp151.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp151.dtsi
@@ -129,7 +129,7 @@
reg = <0x4c001000 0x400>;
st,proc-id = <0>;
interrupts-extended =
- <&exti 61 1>,
+ <&exti 61 IRQ_TYPE_LEVEL_HIGH>,
<&intc GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "rx", "tx";
clocks = <&rcc IPCC>;
@@ -578,6 +578,11 @@
access-controllers = <&etzpc 20>;
status = "disabled";
+ counter {
+ compatible = "st,stm32-timer-counter";
+ status = "disabled";
+ };
+
timer@5 {
compatible = "st,stm32h7-timer-trigger";
reg = <5>;
@@ -599,6 +604,11 @@
access-controllers = <&etzpc 21>;
status = "disabled";
+ counter {
+ compatible = "st,stm32-timer-counter";
+ status = "disabled";
+ };
+
timer@6 {
compatible = "st,stm32h7-timer-trigger";
reg = <6>;
@@ -618,6 +628,11 @@
access-controllers = <&etzpc 22>;
status = "disabled";
+ counter {
+ compatible = "st,stm32-timer-counter";
+ status = "disabled";
+ };
+
pwm {
compatible = "st,stm32-pwm";
#pwm-cells = <3>;
@@ -643,6 +658,11 @@
access-controllers = <&etzpc 23>;
status = "disabled";
+ counter {
+ compatible = "st,stm32-timer-counter";
+ status = "disabled";
+ };
+
pwm {
compatible = "st,stm32-pwm";
#pwm-cells = <3>;
@@ -668,6 +688,11 @@
access-controllers = <&etzpc 24>;
status = "disabled";
+ counter {
+ compatible = "st,stm32-timer-counter";
+ status = "disabled";
+ };
+
pwm {
compatible = "st,stm32-pwm";
#pwm-cells = <3>;
@@ -1116,6 +1141,11 @@
access-controllers = <&etzpc 54>;
status = "disabled";
+ counter {
+ compatible = "st,stm32-timer-counter";
+ status = "disabled";
+ };
+
pwm {
compatible = "st,stm32-pwm";
#pwm-cells = <3>;
@@ -1144,11 +1174,17 @@
access-controllers = <&etzpc 55>;
status = "disabled";
+ counter {
+ compatible = "st,stm32-timer-counter";
+ status = "disabled";
+ };
+
pwm {
compatible = "st,stm32-pwm";
#pwm-cells = <3>;
status = "disabled";
};
+
timer@15 {
compatible = "st,stm32h7-timer-trigger";
reg = <15>;
@@ -1171,6 +1207,11 @@
access-controllers = <&etzpc 56>;
status = "disabled";
+ counter {
+ compatible = "st,stm32-timer-counter";
+ status = "disabled";
+ };
+
pwm {
compatible = "st,stm32-pwm";
#pwm-cells = <3>;
diff --git a/arch/arm/boot/dts/st/stm32mp153c-lxa-tac-gen3.dts b/arch/arm/boot/dts/st/stm32mp153c-lxa-tac-gen3.dts
new file mode 100644
index 000000000000..a40b0eae8da3
--- /dev/null
+++ b/arch/arm/boot/dts/st/stm32mp153c-lxa-tac-gen3.dts
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
+/*
+ * Copyright (C) 2020 STMicroelectronics - All Rights Reserved
+ * Copyright (C) 2021 Rouven Czerwinski, Pengutronix
+ * Copyright (C) 2023, 2024 Leonard Göhrs, Pengutronix
+ */
+
+/dts-v1/;
+
+#include "stm32mp153.dtsi"
+#include "stm32mp15xc-lxa-tac.dtsi"
+
+/ {
+ model = "Linux Automation Test Automation Controller (TAC) Gen 3";
+ compatible = "lxa,stm32mp153c-tac-gen3", "oct,stm32mp153x-osd32", "st,stm32mp153";
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ power-supply = <&v3v3>;
+
+ brightness-levels = <0 31 63 95 127 159 191 223 255>;
+ default-brightness-level = <7>;
+ pwms = <&led_pwm 3 1000000 0>;
+ };
+
+ reg_iobus_12v: regulator-iobus-12v {
+ compatible = "regulator-fixed";
+ vin-supply = <&reg_12v>;
+ gpio = <&gpioh 13 GPIO_ACTIVE_LOW>;
+ regulator-max-microvolt = <12000000>;
+ regulator-min-microvolt = <12000000>;
+ regulator-name = "12V_IOBUS";
+ };
+
+ led-controller-1 {
+ compatible = "pwm-leds-multicolor";
+
+ multi-led {
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_STATUS;
+ max-brightness = <65535>;
+
+ led-red {
+ active-low;
+ color = <LED_COLOR_ID_RED>;
+ pwms = <&led_pwm 0 1000000 0>;
+ };
+
+ led-green {
+ active-low;
+ color = <LED_COLOR_ID_GREEN>;
+ pwms = <&led_pwm 2 1000000 0>;
+ };
+
+ led-blue {
+ active-low;
+ color = <LED_COLOR_ID_BLUE>;
+ pwms = <&led_pwm 1 1000000 0>;
+ };
+ };
+ };
+
+ led-controller-2 {
+ compatible = "gpio-leds";
+
+ led-5 {
+ label = "tac:green:iobus";
+ gpios = <&gpiog 1 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-6 {
+ label = "tac:green:can";
+ gpios = <&gpiof 3 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-7 {
+ label = "tac:green:out0";
+ gpios = <&gpiob 8 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-8 {
+ label = "tac:green:out1";
+ gpios = <&gpiog 3 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-9 {
+ label = "tac:green:uarttx";
+ gpios = <&gpiod 3 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-10 {
+ label = "tac:green:uartrx";
+ gpios = <&gpiof 6 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-11 {
+ label = "tac:green:usbh1";
+ gpios = <&gpioc 8 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-12 {
+ label = "tac:green:usbh2";
+ gpios = <&gpiod 6 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-13 {
+ label = "tac:green:usbh3";
+ gpios = <&gpiob 9 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-14 {
+ label = "tac:green:usbg";
+ gpios = <&gpiod 14 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "usb-gadget";
+ };
+
+ led-15 {
+ label = "tac:green:dutpwr";
+ gpios = <&gpioa 15 GPIO_ACTIVE_HIGH>;
+ };
+ };
+};
+
+&adc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&board_adc1_ain_pins>;
+ vdd-supply = <&vdd>;
+ vdda-supply = <&vdda>;
+ vref-supply = <&vrefbuf>;
+ status = "okay";
+
+ adc1: adc@0 {
+ st,adc-channels = <2 5 9 10 13 14 15 18>;
+ st,min-sample-time-nsecs = <5000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ channel@2 {
+ reg = <2>;
+ label = "OUT_0_FB";
+ };
+
+ channel@5 {
+ reg = <5>;
+ label = "IOBUS_CURR_FB";
+ };
+
+ channel@9 {
+ reg = <9>;
+ label = "IOBUS_VOLT_FB";
+ };
+
+ channel@10 {
+ reg = <10>;
+ label = "OUT_1_FB";
+ };
+
+ channel@13 {
+ reg = <13>;
+ label = "HOST_CURR_FB";
+ };
+
+ channel@14 {
+ reg = <14>;
+ label = "HOST_3_CURR_FB";
+ };
+
+ channel@15 {
+ reg = <15>;
+ label = "HOST_1_CURR_FB";
+ };
+
+ channel@18 {
+ reg = <18>;
+ label = "HOST_2_CURR_FB";
+ };
+ };
+
+ adc2: adc@100 {
+ st,adc-channels = <12>;
+ st,min-sample-time-nsecs = <500000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ channel@12 {
+ reg = <12>;
+ label = "TEMP_INTERNAL";
+ };
+ };
+};
+
+&gpioa {
+ gpio-line-names = "", "", "", "", "", /* 0 */
+ "ETH_GPIO1", "ETH_INT", "", "", "", /* 5 */
+ "", "", "", "BOOTROM_LED", "ETH_LAB_LEDRP", /* 10 */
+ ""; /* 15 */
+};
+
+&gpioc {
+ gpio-line-names = "", "DUT_PWR_DISCH", "", "", "", /* 0 */
+ "", "", "", "", "", /* 5 */
+ "", ""; /* 10 */
+};
+
+&gpioe {
+ gpio-line-names = "TP35", "", "", "", "CAN_1_120R", /* 0 */
+ "", "", "USER_BTN2", "DUT_PWR_EN", "UART_TX_EN", /* 5 */
+ "UART_RX_EN", "TP24", "", "TP25", "TP26", /* 10 */
+ "TP27"; /* 15 */
+};
+
+&gpiog {
+ gpio-line-names = "ETH_RESET", "", "", "", "", /* 0 */
+ "IOBUS_FLT_FB", "", "USER_LED2", "ETH1_PPS_A", "CAN_0_120R", /* 5 */
+ "POWER_ADC_RESET", "", "", "", "", /* 10 */
+ ""; /* 15 */
+};
+
+&m_can2 {
+ termination-gpios = <&gpioe 4 GPIO_ACTIVE_HIGH>;
+ termination-ohms = <120>;
+};
+
+&pinctrl {
+ board_adc1_ain_pins: board-adc1-ain-0 {
+ pins {
+ pinmux = <STM32_PINMUX('F', 11, ANALOG)>, /* ADC1_INP2 */
+ <STM32_PINMUX('B', 1, ANALOG)>, /* ADC1_INP5 */
+ <STM32_PINMUX('B', 0, ANALOG)>, /* ADC1_INP9 */
+ <STM32_PINMUX('C', 0, ANALOG)>, /* ADC1_INP10 */
+ <STM32_PINMUX('C', 3, ANALOG)>, /* ADC1_INP13 */
+ <STM32_PINMUX('A', 2, ANALOG)>, /* ADC1_INP14 */
+ <STM32_PINMUX('A', 3, ANALOG)>, /* ADC1_INP15 */
+ <STM32_PINMUX('A', 4, ANALOG)>; /* ADC1_INP18 */
+ };
+ };
+};
+
+&spi2 {
+ adc@0 {
+ compatible = "ti,lmp92064";
+ reg = <0>;
+
+ reset-gpios = <&gpiog 10 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ shunt-resistor-micro-ohms = <15000>;
+ spi-max-frequency = <5000000>;
+ vdd-supply = <&reg_pb_3v3>;
+ vdig-supply = <&reg_pb_3v3>;
+ };
+};
+
+&timers8 {
+ /* spare dmas for other usage */
+ /delete-property/dmas;
+ /delete-property/dma-names;
+
+ status = "okay";
+
+ led_pwm: pwm {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pwm8_pins_b>;
+ pinctrl-1 = <&pwm8_sleep_pins_b>;
+ status = "okay";
+ };
+};
diff --git a/arch/arm/boot/dts/st/stm32mp157c-ev1.dts b/arch/arm/boot/dts/st/stm32mp157c-ev1.dts
index 9eb9a1bf4f2c..8f99c30f1af1 100644
--- a/arch/arm/boot/dts/st/stm32mp157c-ev1.dts
+++ b/arch/arm/boot/dts/st/stm32mp157c-ev1.dts
@@ -306,6 +306,9 @@
/delete-property/dmas;
/delete-property/dma-names;
status = "disabled";
+ counter {
+ status = "okay";
+ };
pwm {
pinctrl-0 = <&pwm2_pins_a>;
pinctrl-1 = <&pwm2_sleep_pins_a>;
@@ -321,6 +324,9 @@
/delete-property/dmas;
/delete-property/dma-names;
status = "disabled";
+ counter {
+ status = "okay";
+ };
pwm {
pinctrl-0 = <&pwm8_pins_a>;
pinctrl-1 = <&pwm8_sleep_pins_a>;
@@ -336,6 +342,9 @@
/delete-property/dmas;
/delete-property/dma-names;
status = "disabled";
+ counter {
+ status = "okay";
+ };
pwm {
pinctrl-0 = <&pwm12_pins_a>;
pinctrl-1 = <&pwm12_sleep_pins_a>;
diff --git a/arch/arm/boot/dts/st/stm32mp157c-lxa-tac-gen1.dts b/arch/arm/boot/dts/st/stm32mp157c-lxa-tac-gen1.dts
index 81f254fb88b0..e72e42eb0eb4 100644
--- a/arch/arm/boot/dts/st/stm32mp157c-lxa-tac-gen1.dts
+++ b/arch/arm/boot/dts/st/stm32mp157c-lxa-tac-gen1.dts
@@ -35,6 +35,76 @@
};
};
+&adc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&adc1_ain_pins_a>;
+ vdd-supply = <&vdd>;
+ vdda-supply = <&vdda>;
+ vref-supply = <&vrefbuf>;
+ status = "okay";
+
+ adc1: adc@0 {
+ st,adc-channels = <0 1 2 5 9 10 13 15>;
+ st,min-sample-time-nsecs = <5000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ channel@0 {
+ reg = <0>;
+ label = "HOST_2_CURR_FB";
+ };
+
+ channel@1 {
+ reg = <1>;
+ label = "HOST_3_CURR_FB";
+ };
+
+ channel@2 {
+ reg = <2>;
+ label = "OUT_0_FB";
+ };
+
+ channel@5 {
+ reg = <5>;
+ label = "IOBUS_CURR_FB";
+ };
+
+ channel@9 {
+ reg = <9>;
+ label = "IOBUS_VOLT_FB";
+ };
+
+ channel@10 {
+ reg = <10>;
+ label = "OUT_1_FB";
+ };
+
+ channel@13 {
+ reg = <13>;
+ label = "HOST_CURR_FB";
+ };
+
+ channel@15 {
+ reg = <15>;
+ label = "HOST_1_CURR_FB";
+ };
+ };
+
+ adc2: adc@100 {
+ st,adc-channels = <12>;
+ st,min-sample-time-nsecs = <500000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ channel@12 {
+ reg = <12>;
+ label = "TEMP_INTERNAL";
+ };
+ };
+};
+
&gpioa {
gpio-line-names = "", "", "STACK_CS2", "", "STACK_CS3", /* 0 */
"ETH_GPIO1", "ETH_INT", "", "", "", /* 5 */
@@ -48,6 +118,20 @@
"", ""; /* 10 */
};
+&gpioe {
+ gpio-line-names = "TP35", "", "", "", "CAN_1_120R", /* 0 */
+ "", "", "USER_BTN2", "TP48", "UART_TX_EN", /* 5 */
+ "UART_RX_EN", "TP24", "", "TP25", "TP26", /* 10 */
+ "TP27"; /* 15 */
+};
+
+&gpiog {
+ gpio-line-names = "ETH_RESET", "", "", "", "", /* 0 */
+ "IOBUS_FLT_FB", "", "USER_LED2", "ETH1_PPS_A", "CAN_0_120R", /* 5 */
+ "TP49", "", "", "", "", /* 10 */
+ ""; /* 15 */
+};
+
&gpu {
status = "disabled";
};
diff --git a/arch/arm/boot/dts/st/stm32mp157c-lxa-tac-gen2.dts b/arch/arm/boot/dts/st/stm32mp157c-lxa-tac-gen2.dts
index 4cc177031661..2ae281725a48 100644
--- a/arch/arm/boot/dts/st/stm32mp157c-lxa-tac-gen2.dts
+++ b/arch/arm/boot/dts/st/stm32mp157c-lxa-tac-gen2.dts
@@ -121,6 +121,76 @@
};
};
+&adc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&adc1_ain_pins_a>;
+ vdd-supply = <&vdd>;
+ vdda-supply = <&vdda>;
+ vref-supply = <&vrefbuf>;
+ status = "okay";
+
+ adc1: adc@0 {
+ st,adc-channels = <0 1 2 5 9 10 13 15>;
+ st,min-sample-time-nsecs = <5000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ channel@0 {
+ reg = <0>;
+ label = "HOST_2_CURR_FB";
+ };
+
+ channel@1 {
+ reg = <1>;
+ label = "HOST_3_CURR_FB";
+ };
+
+ channel@2 {
+ reg = <2>;
+ label = "OUT_0_FB";
+ };
+
+ channel@5 {
+ reg = <5>;
+ label = "IOBUS_CURR_FB";
+ };
+
+ channel@9 {
+ reg = <9>;
+ label = "IOBUS_VOLT_FB";
+ };
+
+ channel@10 {
+ reg = <10>;
+ label = "OUT_1_FB";
+ };
+
+ channel@13 {
+ reg = <13>;
+ label = "HOST_CURR_FB";
+ };
+
+ channel@15 {
+ reg = <15>;
+ label = "HOST_1_CURR_FB";
+ };
+ };
+
+ adc2: adc@100 {
+ st,adc-channels = <12>;
+ st,min-sample-time-nsecs = <500000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ channel@12 {
+ reg = <12>;
+ label = "TEMP_INTERNAL";
+ };
+ };
+};
+
&gpioa {
gpio-line-names = "", "", "DUT_PWR_EN", "", "STACK_CS3", /* 0 */
"ETH_GPIO1", "ETH_INT", "", "", "", /* 5 */
@@ -134,6 +204,20 @@
"", ""; /* 10 */
};
+&gpioe {
+ gpio-line-names = "TP35", "", "", "", "CAN_1_120R", /* 0 */
+ "", "", "USER_BTN2", "TP48", "UART_TX_EN", /* 5 */
+ "UART_RX_EN", "TP24", "", "TP25", "TP26", /* 10 */
+ "TP27"; /* 15 */
+};
+
+&gpiog {
+ gpio-line-names = "ETH_RESET", "", "", "", "", /* 0 */
+ "IOBUS_FLT_FB", "", "USER_LED2", "ETH1_PPS_A", "CAN_0_120R", /* 5 */
+ "TP49", "", "", "", "", /* 10 */
+ ""; /* 15 */
+};
+
&gpu {
status = "disabled";
};
diff --git a/arch/arm/boot/dts/st/stm32mp15xc-lxa-tac.dtsi b/arch/arm/boot/dts/st/stm32mp15xc-lxa-tac.dtsi
index c87fd96cbd91..be0c355d3105 100644
--- a/arch/arm/boot/dts/st/stm32mp15xc-lxa-tac.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp15xc-lxa-tac.dtsi
@@ -16,12 +16,20 @@
/ {
aliases {
+ can0 = &m_can1;
+ can1 = &m_can2;
ethernet0 = &ethernet0;
ethernet1 = &port_uplink;
ethernet2 = &port_dut;
+ i2c0 = &i2c1;
+ i2c1 = &i2c4;
+ i2c2 = &i2c5;
mmc1 = &sdmmc2;
serial0 = &uart4;
serial1 = &usart3;
+ spi0 = &spi2;
+ spi1 = &spi4;
+ spi2 = &spi5;
};
chosen {
@@ -142,76 +150,6 @@
baseboard_eeprom: &sip_eeprom {
};
-&adc {
- pinctrl-names = "default";
- pinctrl-0 = <&adc1_ain_pins_a>;
- vdd-supply = <&vdd>;
- vdda-supply = <&vdda>;
- vref-supply = <&vrefbuf>;
- status = "okay";
-
- adc1: adc@0 {
- st,adc-channels = <0 1 2 5 9 10 13 15>;
- st,min-sample-time-nsecs = <5000>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "okay";
-
- channel@0 {
- reg = <0>;
- label = "HOST_2_CURR_FB";
- };
-
- channel@1 {
- reg = <1>;
- label = "HOST_3_CURR_FB";
- };
-
- channel@2 {
- reg = <2>;
- label = "OUT_0_FB";
- };
-
- channel@5 {
- reg = <5>;
- label = "IOBUS_CURR_FB";
- };
-
- channel@9 {
- reg = <9>;
- label = "IOBUS_VOLT_FB";
- };
-
- channel@10 {
- reg = <10>;
- label = "OUT_1_FB";
- };
-
- channel@13 {
- reg = <13>;
- label = "HOST_CURR_FB";
- };
-
- channel@15 {
- reg = <15>;
- label = "HOST_1_CURR_FB";
- };
- };
-
- adc2: adc@100 {
- st,adc-channels = <12>;
- st,min-sample-time-nsecs = <500000>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "okay";
-
- channel@12 {
- reg = <12>;
- label = "TEMP_INTERNAL";
- };
- };
-};
-
&crc1 {
status = "okay";
};
@@ -265,13 +203,6 @@ baseboard_eeprom: &sip_eeprom {
"ETH_LAB_LEDRN"; /* 15 */
};
-&gpioe {
- gpio-line-names = "TP35", "", "", "", "CAN_1_120R", /* 0 */
- "", "", "USER_BTN2", "TP48", "UART_TX_EN", /* 5 */
- "UART_RX_EN", "TP24", "", "TP25", "TP26", /* 10 */
- "TP27"; /* 15 */
-};
-
&gpiof {
gpio-line-names = "TP36", "TP37", "", "", "OLED_CS", /* 0 */
"", "", "", "", "", /* 5 */
@@ -279,13 +210,6 @@ baseboard_eeprom: &sip_eeprom {
""; /* 15 */
};
-&gpiog {
- gpio-line-names = "ETH_RESET", "", "", "", "", /* 0 */
- "IOBUS_FLT_FB", "", "USER_LED2", "ETH1_PPS_A", "CAN_0_120R", /* 5 */
- "TP49", "", "", "", "", /* 10 */
- ""; /* 15 */
-};
-
&gpioh {
gpio-line-names = "", "", "OUT_1", "OUT_0", "OLED_RESET", /* 0 */
"", "", "", "", "", /* 5 */
@@ -379,10 +303,6 @@ baseboard_eeprom: &sip_eeprom {
};
};
-&rtc {
- status = "okay";
-};
-
&sdmmc2 {
pinctrl-names = "default", "opendrain", "sleep";
pinctrl-0 = <&sdmmc2_b4_pins_a &sdmmc2_d47_pins_b>;
@@ -576,6 +496,10 @@ baseboard_eeprom: &sip_eeprom {
vusb_d-supply = <&vdd_usb>;
vusb_a-supply = <&reg18>;
+ g-rx-fifo-size = <512>;
+ g-np-tx-fifo-size = <32>;
+ g-tx-fifo-size = <128 128 64 16 16 16 16 16>;
+
dr_mode = "peripheral";
status = "okay";
diff --git a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-drc02.dtsi b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-drc02.dtsi
index bb4f8a0b937f..abe2dfe70636 100644
--- a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-drc02.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-drc02.dtsi
@@ -6,18 +6,6 @@
#include <dt-bindings/input/input.h>
#include <dt-bindings/pwm/pwm.h>
-/ {
- aliases {
- serial0 = &uart4;
- serial1 = &usart3;
- serial2 = &uart8;
- };
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
-};
-
&adc {
status = "disabled";
};
diff --git a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-pdk2.dtsi b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-pdk2.dtsi
index 171d7c7658fa..0fb4e55843b9 100644
--- a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-pdk2.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-pdk2.dtsi
@@ -7,16 +7,6 @@
#include <dt-bindings/pwm/pwm.h>
/ {
- aliases {
- serial0 = &uart4;
- serial1 = &usart3;
- serial2 = &uart8;
- };
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
-
clk_ext_audio_codec: clock-codec {
compatible = "fixed-clock";
#clock-cells = <0>;
diff --git a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-picoitx.dtsi b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-picoitx.dtsi
index b5bc53accd6b..01c693cc0344 100644
--- a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-picoitx.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-picoitx.dtsi
@@ -7,16 +7,6 @@
#include <dt-bindings/pwm/pwm.h>
/ {
- aliases {
- serial0 = &uart4;
- serial1 = &usart3;
- serial2 = &uart8;
- };
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
-
led {
compatible = "gpio-leds";
diff --git a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-som.dtsi b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-som.dtsi
index 74a11ccc5333..142d4a8731f8 100644
--- a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-som.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-som.dtsi
@@ -14,6 +14,13 @@
ethernet1 = &ksz8851;
rtc0 = &hwrtc;
rtc1 = &rtc;
+ serial0 = &uart4;
+ serial1 = &uart8;
+ serial2 = &usart3;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
};
memory@c0000000 {
diff --git a/arch/arm/boot/dts/st/stm32mp15xx-dkx.dtsi b/arch/arm/boot/dts/st/stm32mp15xx-dkx.dtsi
index f7634c51efb2..a5511b1f0ce3 100644
--- a/arch/arm/boot/dts/st/stm32mp15xx-dkx.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp15xx-dkx.dtsi
@@ -570,6 +570,9 @@
/delete-property/dmas;
/delete-property/dma-names;
status = "disabled";
+ counter {
+ status = "okay";
+ };
pwm {
pinctrl-0 = <&pwm1_pins_a>;
pinctrl-1 = <&pwm1_sleep_pins_a>;
@@ -585,6 +588,9 @@
/delete-property/dmas;
/delete-property/dma-names;
status = "disabled";
+ counter {
+ status = "okay";
+ };
pwm {
pinctrl-0 = <&pwm3_pins_a>;
pinctrl-1 = <&pwm3_sleep_pins_a>;
@@ -600,6 +606,9 @@
/delete-property/dmas;
/delete-property/dma-names;
status = "disabled";
+ counter {
+ status = "okay";
+ };
pwm {
pinctrl-0 = <&pwm4_pins_a &pwm4_pins_b>;
pinctrl-1 = <&pwm4_sleep_pins_a &pwm4_sleep_pins_b>;
@@ -615,6 +624,9 @@
/delete-property/dmas;
/delete-property/dma-names;
status = "disabled";
+ counter {
+ status = "okay";
+ };
pwm {
pinctrl-0 = <&pwm5_pins_a>;
pinctrl-1 = <&pwm5_sleep_pins_a>;
@@ -630,6 +642,9 @@
/delete-property/dmas;
/delete-property/dma-names;
status = "disabled";
+ counter {
+ status = "okay";
+ };
timer@5 {
status = "okay";
};
@@ -639,6 +654,9 @@
/delete-property/dmas;
/delete-property/dma-names;
status = "disabled";
+ counter {
+ status = "okay";
+ };
pwm {
pinctrl-0 = <&pwm12_pins_a>;
pinctrl-1 = <&pwm12_sleep_pins_a>;
diff --git a/arch/arm/boot/dts/ti/omap/am437x-l4.dtsi b/arch/arm/boot/dts/ti/omap/am437x-l4.dtsi
index 824b9415ebbe..fd4634f8c629 100644
--- a/arch/arm/boot/dts/ti/omap/am437x-l4.dtsi
+++ b/arch/arm/boot/dts/ti/omap/am437x-l4.dtsi
@@ -180,8 +180,7 @@
<0x9058 0x4>;
reg-names = "rev", "sysc", "syss";
ti,sysc-mask = <(SYSC_OMAP2_ENAWAKEUP |
- SYSC_OMAP2_SOFTRESET |
- SYSC_OMAP2_AUTOIDLE)>;
+ SYSC_OMAP2_SOFTRESET)>;
ti,sysc-sidle = <SYSC_IDLE_FORCE>,
<SYSC_IDLE_NO>,
<SYSC_IDLE_SMART>,
@@ -698,8 +697,7 @@
<0x22058 0x4>;
reg-names = "rev", "sysc", "syss";
ti,sysc-mask = <(SYSC_OMAP2_ENAWAKEUP |
- SYSC_OMAP2_SOFTRESET |
- SYSC_OMAP2_AUTOIDLE)>;
+ SYSC_OMAP2_SOFTRESET)>;
ti,sysc-sidle = <SYSC_IDLE_FORCE>,
<SYSC_IDLE_NO>,
<SYSC_IDLE_SMART>,
@@ -726,8 +724,7 @@
<0x24058 0x4>;
reg-names = "rev", "sysc", "syss";
ti,sysc-mask = <(SYSC_OMAP2_ENAWAKEUP |
- SYSC_OMAP2_SOFTRESET |
- SYSC_OMAP2_AUTOIDLE)>;
+ SYSC_OMAP2_SOFTRESET)>;
ti,sysc-sidle = <SYSC_IDLE_FORCE>,
<SYSC_IDLE_NO>,
<SYSC_IDLE_SMART>,
@@ -1385,8 +1382,7 @@
<0xa6058 0x4>;
reg-names = "rev", "sysc", "syss";
ti,sysc-mask = <(SYSC_OMAP2_ENAWAKEUP |
- SYSC_OMAP2_SOFTRESET |
- SYSC_OMAP2_AUTOIDLE)>;
+ SYSC_OMAP2_SOFTRESET)>;
ti,sysc-sidle = <SYSC_IDLE_FORCE>,
<SYSC_IDLE_NO>,
<SYSC_IDLE_SMART>,
@@ -1413,8 +1409,7 @@
<0xa8058 0x4>;
reg-names = "rev", "sysc", "syss";
ti,sysc-mask = <(SYSC_OMAP2_ENAWAKEUP |
- SYSC_OMAP2_SOFTRESET |
- SYSC_OMAP2_AUTOIDLE)>;
+ SYSC_OMAP2_SOFTRESET)>;
ti,sysc-sidle = <SYSC_IDLE_FORCE>,
<SYSC_IDLE_NO>,
<SYSC_IDLE_SMART>,
@@ -1441,8 +1436,7 @@
<0xaa058 0x4>;
reg-names = "rev", "sysc", "syss";
ti,sysc-mask = <(SYSC_OMAP2_ENAWAKEUP |
- SYSC_OMAP2_SOFTRESET |
- SYSC_OMAP2_AUTOIDLE)>;
+ SYSC_OMAP2_SOFTRESET)>;
ti,sysc-sidle = <SYSC_IDLE_FORCE>,
<SYSC_IDLE_NO>,
<SYSC_IDLE_SMART>,
diff --git a/arch/arm/boot/dts/ti/omap/dra7-l4.dtsi b/arch/arm/boot/dts/ti/omap/dra7-l4.dtsi
index 6e67d99832ac..ba7fdaae9c6e 100644
--- a/arch/arm/boot/dts/ti/omap/dra7-l4.dtsi
+++ b/arch/arm/boot/dts/ti/omap/dra7-l4.dtsi
@@ -12,6 +12,7 @@
ranges = <0x00000000 0x4a000000 0x100000>, /* segment 0 */
<0x00100000 0x4a100000 0x100000>, /* segment 1 */
<0x00200000 0x4a200000 0x100000>; /* segment 2 */
+ dma-ranges;
segment@0 { /* 0x4a000000 */
compatible = "simple-pm-bus";
@@ -557,6 +558,7 @@
<0x0007e000 0x0017e000 0x001000>, /* ap 124 */
<0x00059000 0x00159000 0x001000>, /* ap 125 */
<0x0005a000 0x0015a000 0x001000>; /* ap 126 */
+ dma-ranges;
target-module@2000 { /* 0x4a102000, ap 27 3c.0 */
compatible = "ti,sysc";
diff --git a/arch/arm/boot/dts/ti/omap/omap3-gta04.dtsi b/arch/arm/boot/dts/ti/omap/omap3-gta04.dtsi
index 2ee3ddd64020..1b18ed8c1f7a 100644
--- a/arch/arm/boot/dts/ti/omap/omap3-gta04.dtsi
+++ b/arch/arm/boot/dts/ti/omap/omap3-gta04.dtsi
@@ -446,6 +446,7 @@
pinctrl-names = "default";
pinctrl-0 = <
&hsusb2_2_pins
+ &mcspi3hog_pins
>;
hsusb2_2_pins: hsusb2-2-pins {
@@ -459,6 +460,15 @@
>;
};
+ mcspi3hog_pins: mcspi3hog-pins {
+ pinctrl-single,pins = <
+ OMAP3630_CORE2_IOPAD(0x25dc, PIN_OUTPUT_PULLDOWN | MUX_MODE4) /* etk_d0 */
+ OMAP3630_CORE2_IOPAD(0x25de, PIN_OUTPUT_PULLDOWN | MUX_MODE4) /* etk_d1 */
+ OMAP3630_CORE2_IOPAD(0x25e0, PIN_OUTPUT_PULLDOWN | MUX_MODE4) /* etk_d2 */
+ OMAP3630_CORE2_IOPAD(0x25e2, PIN_OUTPUT_PULLDOWN | MUX_MODE4) /* etk_d3 */
+ >;
+ };
+
spi_gpio_pins: spi-gpio-pinmux-pins {
pinctrl-single,pins = <
OMAP3630_CORE2_IOPAD(0x25d8, PIN_OUTPUT | MUX_MODE4) /* clk */
@@ -591,8 +601,10 @@
interrupts = <0 IRQ_TYPE_EDGE_FALLING>; /* GPIO_160 */
gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* GPIO_160 */
ti,x-plate-ohms = <600>;
- touchscreen-size-x = <480>;
- touchscreen-size-y = <640>;
+ touchscreen-size-x = <0xf00>;
+ touchscreen-size-y = <0xf00>;
+ touchscreen-min-x = <0x100>;
+ touchscreen-min-y = <0x100>;
touchscreen-max-pressure = <1000>;
touchscreen-fuzz-x = <3>;
touchscreen-fuzz-y = <8>;
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index f25eadcba5e6..87841e5cafe2 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -230,6 +230,7 @@ CONFIG_RN5T618_POWER=m
CONFIG_SENSORS_MC13783_ADC=y
CONFIG_SENSORS_GPIO_FAN=y
CONFIG_SENSORS_IIO_HWMON=y
+CONFIG_SENSORS_JC42=m
CONFIG_SENSORS_LM75=m
CONFIG_SENSORS_PWM_FAN=y
CONFIG_SENSORS_SY7636A=y
diff --git a/arch/arm/configs/milbeaut_m10v_defconfig b/arch/arm/configs/milbeaut_m10v_defconfig
index f5eeac9c65c3..acd16204f8d7 100644
--- a/arch/arm/configs/milbeaut_m10v_defconfig
+++ b/arch/arm/configs/milbeaut_m10v_defconfig
@@ -107,7 +107,6 @@ CONFIG_CRYPTO_AES_ARM=m
CONFIG_CRYPTO_AES_ARM_BS=m
CONFIG_CRYPTO_AES_ARM_CE=m
CONFIG_CRYPTO_CHACHA20_NEON=m
-CONFIG_CRYPTO_CRC32_ARM_CE=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_CCITT=m
CONFIG_CRC_ITU_T=m
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 758276027dbc..37e3baa33b67 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -20,6 +20,7 @@ CONFIG_ARCH_AT91=y
CONFIG_SOC_SAMA5D2=y
CONFIG_SOC_SAMA5D3=y
CONFIG_SOC_SAMA5D4=y
+CONFIG_SOC_SAMA7D65=y
CONFIG_SOC_SAMA7G5=y
CONFIG_SOC_LAN966=y
CONFIG_ARCH_BCM=y
@@ -1306,7 +1307,6 @@ CONFIG_CRYPTO_AES_ARM=m
CONFIG_CRYPTO_AES_ARM_BS=m
CONFIG_CRYPTO_AES_ARM_CE=m
CONFIG_CRYPTO_CHACHA20_NEON=m
-CONFIG_CRYPTO_CRC32_ARM_CE=m
CONFIG_CRYPTO_DEV_SUN4I_SS=m
CONFIG_CRYPTO_DEV_FSL_CAAM=m
CONFIG_CRYPTO_DEV_EXYNOS_RNG=m
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 3a166c2f02bd..6de45d7f6078 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -428,6 +428,7 @@ CONFIG_POWER_RESET_GPIO=y
CONFIG_BATTERY_BQ27XXX=m
CONFIG_CHARGER_ISP1704=m
CONFIG_CHARGER_TWL4030=m
+CONFIG_CHARGER_TWL6030=m
CONFIG_CHARGER_BQ2415X=m
CONFIG_CHARGER_BQ24190=m
CONFIG_CHARGER_BQ24735=m
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index 38916ac4bce4..de0ac8f521d7 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -652,7 +652,6 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
diff --git a/arch/arm/configs/sama7_defconfig b/arch/arm/configs/sama7_defconfig
index 1a2e93c8ee71..ea7ddf640ba7 100644
--- a/arch/arm/configs/sama7_defconfig
+++ b/arch/arm/configs/sama7_defconfig
@@ -12,6 +12,7 @@ CONFIG_EXPERT=y
# CONFIG_IO_URING is not set
CONFIG_KALLSYMS_ALL=y
CONFIG_ARCH_AT91=y
+CONFIG_SOC_SAMA7D65=y
CONFIG_SOC_SAMA7G5=y
CONFIG_ATMEL_CLOCKSOURCE_TCB=y
# CONFIG_CACHE_L2X0 is not set
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index 0e380e450a62..fd28f3176c6b 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -192,6 +192,7 @@ CONFIG_DW_DMAC=y
CONFIG_RZN1_DMAMUX=y
CONFIG_RCAR_DMAC=y
CONFIG_RENESAS_USB_DMAC=y
+CONFIG_RZ_DMAC=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_ARCH_EMEV2=y
CONFIG_ARCH_R8A7794=y
diff --git a/arch/arm/configs/stm32_defconfig b/arch/arm/configs/stm32_defconfig
index 3baec075d1ef..77048b5e1ec4 100644
--- a/arch/arm/configs/stm32_defconfig
+++ b/arch/arm/configs/stm32_defconfig
@@ -6,23 +6,25 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EXPERT=y
# CONFIG_UID16 is not set
+# CONFIG_POSIX_TIMERS is not set
CONFIG_BASE_SMALL=y
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
# CONFIG_EVENTFD is not set
# CONFIG_AIO is not set
-# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IO_URING is not set
+# CONFIG_ADVISE_SYSCALLS is not set
# CONFIG_MMU is not set
CONFIG_ARCH_STM32=y
CONFIG_CPU_V7M_NUM_IRQ=240
CONFIG_SET_MEM_PARAM=y
CONFIG_DRAM_BASE=0x90000000
-CONFIG_FLASH_MEM_BASE=0x08000000
-CONFIG_FLASH_SIZE=0x00200000
# CONFIG_ATAGS is not set
CONFIG_XIP_KERNEL=y
CONFIG_XIP_PHYS_ADDR=0x08008000
+# CONFIG_SUSPEND is not set
+# CONFIG_GCC_PLUGINS is not set
CONFIG_BINFMT_FLAT=y
# CONFIG_COREDUMP is not set
# CONFIG_VM_EVENT_COUNTERS is not set
@@ -63,6 +65,7 @@ CONFIG_DMADEVICES=y
CONFIG_STM32_DMA=y
CONFIG_STM32_DMAMUX=y
CONFIG_STM32_MDMA=y
+# CONFIG_COMMON_CLK_STM32MP is not set
CONFIG_IIO=y
CONFIG_STM32_ADC_CORE=y
CONFIG_STM32_ADC=y
@@ -74,10 +77,7 @@ CONFIG_NLS=y
CONFIG_CRC_ITU_T=y
CONFIG_CRC7=y
CONFIG_PRINTK_TIME=y
-# CONFIG_ENABLE_MUST_CHECK is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
# CONFIG_SLUB_DEBUG is not set
-# CONFIG_SCHED_DEBUG is not set
-CONFIG_CRYPTO=y
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index 5ff49a5e9afc..32650c8431d9 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -222,30 +222,5 @@ config CRYPTO_CHACHA20_NEON
Architecture: arm using:
- NEON (Advanced SIMD) extensions
-config CRYPTO_CRC32_ARM_CE
- tristate "CRC32C and CRC32"
- depends on KERNEL_MODE_NEON
- depends on CRC32
- select CRYPTO_HASH
- help
- CRC32c CRC algorithm with the iSCSI polynomial (RFC 3385 and RFC 3720)
- and CRC32 CRC algorithm (IEEE 802.3)
-
- Architecture: arm using:
- - CRC and/or PMULL instructions
-
- Drivers: crc32-arm-ce and crc32c-arm-ce
-
-config CRYPTO_CRCT10DIF_ARM_CE
- tristate "CRCT10DIF"
- depends on KERNEL_MODE_NEON
- depends on CRC_T10DIF
- select CRYPTO_HASH
- help
- CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF)
-
- Architecture: arm using:
- - PMULL (Polynomial Multiply Long) instructions
-
endmenu
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index 13e62c7c25dc..3d0e23ff9e74 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -20,8 +20,6 @@ obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o
obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o
obj-$(CONFIG_CRYPTO_SHA2_ARM_CE) += sha2-arm-ce.o
obj-$(CONFIG_CRYPTO_GHASH_ARM_CE) += ghash-arm-ce.o
-obj-$(CONFIG_CRYPTO_CRCT10DIF_ARM_CE) += crct10dif-arm-ce.o
-obj-$(CONFIG_CRYPTO_CRC32_ARM_CE) += crc32-arm-ce.o
aes-arm-y := aes-cipher-core.o aes-cipher-glue.o
aes-arm-bs-y := aes-neonbs-core.o aes-neonbs-glue.o
@@ -37,8 +35,6 @@ sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o
sha2-arm-ce-y := sha2-ce-core.o sha2-ce-glue.o
aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o
ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o
-crct10dif-arm-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o
-crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o
chacha-neon-y := chacha-scalar-core.o chacha-glue.o
chacha-neon-$(CONFIG_KERNEL_MODE_NEON) += chacha-neon-core.o
poly1305-arm-y := poly1305-core.o poly1305-glue.o
diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c
deleted file mode 100644
index 20b4dff13e3a..000000000000
--- a/arch/arm/crypto/crc32-ce-glue.c
+++ /dev/null
@@ -1,247 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Accelerated CRC32(C) using ARM CRC, NEON and Crypto Extensions instructions
- *
- * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
- */
-
-#include <linux/cpufeature.h>
-#include <linux/crc32.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h>
-
-#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
-
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
-#include <linux/unaligned.h>
-
-#define PMULL_MIN_LEN 64L /* minimum size of buffer
- * for crc32_pmull_le_16 */
-#define SCALE_F 16L /* size of NEON register */
-
-asmlinkage u32 crc32_pmull_le(const u8 buf[], u32 len, u32 init_crc);
-asmlinkage u32 crc32_armv8_le(u32 init_crc, const u8 buf[], u32 len);
-
-asmlinkage u32 crc32c_pmull_le(const u8 buf[], u32 len, u32 init_crc);
-asmlinkage u32 crc32c_armv8_le(u32 init_crc, const u8 buf[], u32 len);
-
-static u32 (*fallback_crc32)(u32 init_crc, const u8 buf[], u32 len);
-static u32 (*fallback_crc32c)(u32 init_crc, const u8 buf[], u32 len);
-
-static int crc32_cra_init(struct crypto_tfm *tfm)
-{
- u32 *key = crypto_tfm_ctx(tfm);
-
- *key = 0;
- return 0;
-}
-
-static int crc32c_cra_init(struct crypto_tfm *tfm)
-{
- u32 *key = crypto_tfm_ctx(tfm);
-
- *key = ~0;
- return 0;
-}
-
-static int crc32_setkey(struct crypto_shash *hash, const u8 *key,
- unsigned int keylen)
-{
- u32 *mctx = crypto_shash_ctx(hash);
-
- if (keylen != sizeof(u32))
- return -EINVAL;
- *mctx = le32_to_cpup((__le32 *)key);
- return 0;
-}
-
-static int crc32_init(struct shash_desc *desc)
-{
- u32 *mctx = crypto_shash_ctx(desc->tfm);
- u32 *crc = shash_desc_ctx(desc);
-
- *crc = *mctx;
- return 0;
-}
-
-static int crc32_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u32 *crc = shash_desc_ctx(desc);
-
- *crc = crc32_armv8_le(*crc, data, length);
- return 0;
-}
-
-static int crc32c_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u32 *crc = shash_desc_ctx(desc);
-
- *crc = crc32c_armv8_le(*crc, data, length);
- return 0;
-}
-
-static int crc32_final(struct shash_desc *desc, u8 *out)
-{
- u32 *crc = shash_desc_ctx(desc);
-
- put_unaligned_le32(*crc, out);
- return 0;
-}
-
-static int crc32c_final(struct shash_desc *desc, u8 *out)
-{
- u32 *crc = shash_desc_ctx(desc);
-
- put_unaligned_le32(~*crc, out);
- return 0;
-}
-
-static int crc32_pmull_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u32 *crc = shash_desc_ctx(desc);
- unsigned int l;
-
- if (crypto_simd_usable()) {
- if ((u32)data % SCALE_F) {
- l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F));
-
- *crc = fallback_crc32(*crc, data, l);
-
- data += l;
- length -= l;
- }
-
- if (length >= PMULL_MIN_LEN) {
- l = round_down(length, SCALE_F);
-
- kernel_neon_begin();
- *crc = crc32_pmull_le(data, l, *crc);
- kernel_neon_end();
-
- data += l;
- length -= l;
- }
- }
-
- if (length > 0)
- *crc = fallback_crc32(*crc, data, length);
-
- return 0;
-}
-
-static int crc32c_pmull_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u32 *crc = shash_desc_ctx(desc);
- unsigned int l;
-
- if (crypto_simd_usable()) {
- if ((u32)data % SCALE_F) {
- l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F));
-
- *crc = fallback_crc32c(*crc, data, l);
-
- data += l;
- length -= l;
- }
-
- if (length >= PMULL_MIN_LEN) {
- l = round_down(length, SCALE_F);
-
- kernel_neon_begin();
- *crc = crc32c_pmull_le(data, l, *crc);
- kernel_neon_end();
-
- data += l;
- length -= l;
- }
- }
-
- if (length > 0)
- *crc = fallback_crc32c(*crc, data, length);
-
- return 0;
-}
-
-static struct shash_alg crc32_pmull_algs[] = { {
- .setkey = crc32_setkey,
- .init = crc32_init,
- .update = crc32_update,
- .final = crc32_final,
- .descsize = sizeof(u32),
- .digestsize = sizeof(u32),
-
- .base.cra_ctxsize = sizeof(u32),
- .base.cra_init = crc32_cra_init,
- .base.cra_name = "crc32",
- .base.cra_driver_name = "crc32-arm-ce",
- .base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .base.cra_blocksize = 1,
- .base.cra_module = THIS_MODULE,
-}, {
- .setkey = crc32_setkey,
- .init = crc32_init,
- .update = crc32c_update,
- .final = crc32c_final,
- .descsize = sizeof(u32),
- .digestsize = sizeof(u32),
-
- .base.cra_ctxsize = sizeof(u32),
- .base.cra_init = crc32c_cra_init,
- .base.cra_name = "crc32c",
- .base.cra_driver_name = "crc32c-arm-ce",
- .base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .base.cra_blocksize = 1,
- .base.cra_module = THIS_MODULE,
-} };
-
-static int __init crc32_pmull_mod_init(void)
-{
- if (elf_hwcap2 & HWCAP2_PMULL) {
- crc32_pmull_algs[0].update = crc32_pmull_update;
- crc32_pmull_algs[1].update = crc32c_pmull_update;
-
- if (elf_hwcap2 & HWCAP2_CRC32) {
- fallback_crc32 = crc32_armv8_le;
- fallback_crc32c = crc32c_armv8_le;
- } else {
- fallback_crc32 = crc32_le;
- fallback_crc32c = __crc32c_le;
- }
- } else if (!(elf_hwcap2 & HWCAP2_CRC32)) {
- return -ENODEV;
- }
-
- return crypto_register_shashes(crc32_pmull_algs,
- ARRAY_SIZE(crc32_pmull_algs));
-}
-
-static void __exit crc32_pmull_mod_exit(void)
-{
- crypto_unregister_shashes(crc32_pmull_algs,
- ARRAY_SIZE(crc32_pmull_algs));
-}
-
-static const struct cpu_feature __maybe_unused crc32_cpu_feature[] = {
- { cpu_feature(CRC32) }, { cpu_feature(PMULL) }, { }
-};
-MODULE_DEVICE_TABLE(cpu, crc32_cpu_feature);
-
-module_init(crc32_pmull_mod_init);
-module_exit(crc32_pmull_mod_exit);
-
-MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
-MODULE_DESCRIPTION("Accelerated CRC32(C) using ARM CRC, NEON and Crypto Extensions");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_CRYPTO("crc32");
-MODULE_ALIAS_CRYPTO("crc32c");
diff --git a/arch/arm/crypto/crct10dif-ce-glue.c b/arch/arm/crypto/crct10dif-ce-glue.c
deleted file mode 100644
index a8b74523729e..000000000000
--- a/arch/arm/crypto/crct10dif-ce-glue.c
+++ /dev/null
@@ -1,124 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions instructions
- *
- * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
- */
-
-#include <linux/crc-t10dif.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h>
-
-#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
-
-#include <asm/neon.h>
-#include <asm/simd.h>
-
-#define CRC_T10DIF_PMULL_CHUNK_SIZE 16U
-
-asmlinkage u16 crc_t10dif_pmull64(u16 init_crc, const u8 *buf, size_t len);
-asmlinkage void crc_t10dif_pmull8(u16 init_crc, const u8 *buf, size_t len,
- u8 out[16]);
-
-static int crct10dif_init(struct shash_desc *desc)
-{
- u16 *crc = shash_desc_ctx(desc);
-
- *crc = 0;
- return 0;
-}
-
-static int crct10dif_update_ce(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u16 *crc = shash_desc_ctx(desc);
-
- if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
- kernel_neon_begin();
- *crc = crc_t10dif_pmull64(*crc, data, length);
- kernel_neon_end();
- } else {
- *crc = crc_t10dif_generic(*crc, data, length);
- }
-
- return 0;
-}
-
-static int crct10dif_update_neon(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u16 *crcp = shash_desc_ctx(desc);
- u8 buf[16] __aligned(16);
- u16 crc = *crcp;
-
- if (length > CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
- kernel_neon_begin();
- crc_t10dif_pmull8(crc, data, length, buf);
- kernel_neon_end();
-
- crc = 0;
- data = buf;
- length = sizeof(buf);
- }
-
- *crcp = crc_t10dif_generic(crc, data, length);
- return 0;
-}
-
-static int crct10dif_final(struct shash_desc *desc, u8 *out)
-{
- u16 *crc = shash_desc_ctx(desc);
-
- *(u16 *)out = *crc;
- return 0;
-}
-
-static struct shash_alg algs[] = {{
- .digestsize = CRC_T10DIF_DIGEST_SIZE,
- .init = crct10dif_init,
- .update = crct10dif_update_neon,
- .final = crct10dif_final,
- .descsize = CRC_T10DIF_DIGEST_SIZE,
-
- .base.cra_name = "crct10dif",
- .base.cra_driver_name = "crct10dif-arm-neon",
- .base.cra_priority = 150,
- .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-}, {
- .digestsize = CRC_T10DIF_DIGEST_SIZE,
- .init = crct10dif_init,
- .update = crct10dif_update_ce,
- .final = crct10dif_final,
- .descsize = CRC_T10DIF_DIGEST_SIZE,
-
- .base.cra_name = "crct10dif",
- .base.cra_driver_name = "crct10dif-arm-ce",
- .base.cra_priority = 200,
- .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-}};
-
-static int __init crc_t10dif_mod_init(void)
-{
- if (!(elf_hwcap & HWCAP_NEON))
- return -ENODEV;
-
- return crypto_register_shashes(algs, 1 + !!(elf_hwcap2 & HWCAP2_PMULL));
-}
-
-static void __exit crc_t10dif_mod_exit(void)
-{
- crypto_unregister_shashes(algs, 1 + !!(elf_hwcap2 & HWCAP2_PMULL));
-}
-
-module_init(crc_t10dif_mod_init);
-module_exit(crc_t10dif_mod_exit);
-
-MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
-MODULE_DESCRIPTION("Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_CRYPTO("crct10dif");
diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
index e3ea34558ada..ecbc100d22a5 100644
--- a/arch/arm/include/asm/cache.h
+++ b/arch/arm/include/asm/cache.h
@@ -26,4 +26,10 @@
#define __read_mostly __section(".data..read_mostly")
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_ARCH_HAS_CACHE_LINE_SIZE
+int cache_line_size(void);
+#endif
+#endif
+
#endif
diff --git a/arch/arm/include/asm/cachetype.h b/arch/arm/include/asm/cachetype.h
index b9dbe1d4c8fe..b01c59076b84 100644
--- a/arch/arm/include/asm/cachetype.h
+++ b/arch/arm/include/asm/cachetype.h
@@ -83,6 +83,14 @@ static inline unsigned int read_ccsidr(void)
asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (val));
return val;
}
+
+static inline unsigned int read_clidr(void)
+{
+ unsigned int val;
+
+ asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (val));
+ return val;
+}
#else /* CONFIG_CPU_V7M */
#include <linux/io.h>
#include "asm/v7m.h"
@@ -96,6 +104,11 @@ static inline unsigned int read_ccsidr(void)
{
return readl(BASEADDR_V7M_SCB + V7M_SCB_CCSIDR);
}
+
+static inline unsigned int read_clidr(void)
+{
+ return readl(BASEADDR_V7M_SCB + V7M_SCB_CLIDR);
+}
#endif
#endif
diff --git a/arch/arm/include/asm/ecard.h b/arch/arm/include/asm/ecard.h
index 4befe8d2ae19..7cbe001bf9cc 100644
--- a/arch/arm/include/asm/ecard.h
+++ b/arch/arm/include/asm/ecard.h
@@ -195,7 +195,7 @@ void __iomem *ecardm_iomap(struct expansion_card *ec, unsigned int res,
unsigned long offset, unsigned long maxsize);
#define ecardm_iounmap(__ec, __addr) devm_iounmap(&(__ec)->dev, __addr)
-extern struct bus_type ecard_bus_type;
+extern const struct bus_type ecard_bus_type;
#define ECARD_DEV(_d) container_of((_d), struct expansion_card, dev)
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index f40d06ad5d2a..ea4fbe7b17f6 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -26,14 +26,7 @@
#else /* !CONFIG_MMU */
-#include <linux/swap.h>
#include <asm/tlbflush.h>
-
-static inline void __tlb_remove_table(void *_table)
-{
- free_page_and_swap_cache((struct page *)_table);
-}
-
#include <asm-generic/tlb.h>
static inline void
@@ -41,8 +34,6 @@ __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
{
struct ptdesc *ptdesc = page_ptdesc(pte);
- pagetable_pte_dtor(ptdesc);
-
#ifndef CONFIG_ARM_LPAE
/*
* With the classic ARM MMU, a pte page has two corresponding pmd
@@ -61,7 +52,6 @@ __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
#ifdef CONFIG_ARM_LPAE
struct ptdesc *ptdesc = virt_to_ptdesc(pmdp);
- pagetable_pmd_dtor(ptdesc);
tlb_remove_ptdesc(tlb, ptdesc);
#endif
}
diff --git a/arch/arm/include/asm/vfp.h b/arch/arm/include/asm/vfp.h
index 157ea3426158..85ccc422d4d0 100644
--- a/arch/arm/include/asm/vfp.h
+++ b/arch/arm/include/asm/vfp.h
@@ -9,16 +9,6 @@
#ifndef __ASM_VFP_H
#define __ASM_VFP_H
-#ifndef CONFIG_AS_VFP_VMRS_FPINST
-#define FPSID cr0
-#define FPSCR cr1
-#define MVFR1 cr6
-#define MVFR0 cr7
-#define FPEXC cr8
-#define FPINST cr9
-#define FPINST2 cr10
-#endif
-
/* FPSID bits */
#define FPSID_IMPLEMENTER_BIT (24)
#define FPSID_IMPLEMENTER_MASK (0xff << FPSID_IMPLEMENTER_BIT)
diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
index ba0d4cb5377e..e2e1d5a3727a 100644
--- a/arch/arm/include/asm/vfpmacros.h
+++ b/arch/arm/include/asm/vfpmacros.h
@@ -8,7 +8,6 @@
#include <asm/vfp.h>
-#ifdef CONFIG_AS_VFP_VMRS_FPINST
.macro VFPFMRX, rd, sysreg, cond
vmrs\cond \rd, \sysreg
.endm
@@ -16,16 +15,6 @@
.macro VFPFMXR, sysreg, rd, cond
vmsr\cond \sysreg, \rd
.endm
-#else
- @ Macros to allow building with old toolkits (with no VFP support)
- .macro VFPFMRX, rd, sysreg, cond
- MRC\cond p10, 7, \rd, \sysreg, cr0, 0 @ FMRX \rd, \sysreg
- .endm
-
- .macro VFPFMXR, sysreg, rd, cond
- MCR\cond p10, 7, \rd, \sysreg, cr0, 0 @ FMXR \sysreg, \rd
- .endm
-#endif
@ read all the working registers back into the VFP
.macro VFPFLDMIA, base, tmp
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index aaae31b8c4a5..b3333d070390 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -40,6 +40,7 @@ obj-y += entry-armv.o
endif
obj-$(CONFIG_MMU) += bugs.o
+obj-$(CONFIG_OF) += cacheinfo.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_ISA_DMA_API) += dma.o
obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
diff --git a/arch/arm/kernel/cacheinfo.c b/arch/arm/kernel/cacheinfo.c
new file mode 100644
index 000000000000..e1469b641780
--- /dev/null
+++ b/arch/arm/kernel/cacheinfo.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARM cacheinfo support
+ *
+ * Copyright (C) 2023 Linaro Ltd.
+ * Copyright (C) 2015 ARM Ltd.
+ * All Rights Reserved
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cacheinfo.h>
+#include <linux/of.h>
+
+#include <asm/cachetype.h>
+#include <asm/cputype.h>
+#include <asm/system_info.h>
+
+/* Ctypen, bits[3(n - 1) + 2 : 3(n - 1)], for n = 1 to 7 */
+#define CLIDR_CTYPE_SHIFT(level) (3 * (level - 1))
+#define CLIDR_CTYPE_MASK(level) (7 << CLIDR_CTYPE_SHIFT(level))
+#define CLIDR_CTYPE(clidr, level) \
+ (((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level))
+
+#define MAX_CACHE_LEVEL 7 /* Max 7 level supported */
+
+#define CTR_FORMAT_MASK GENMASK(31, 29)
+#define CTR_FORMAT_ARMV6 0
+#define CTR_FORMAT_ARMV7 4
+#define CTR_CWG_MASK GENMASK(27, 24)
+#define CTR_DSIZE_LEN_MASK GENMASK(13, 12)
+#define CTR_ISIZE_LEN_MASK GENMASK(1, 0)
+
+/* Also valid for v7m */
+static inline int cache_line_size_cp15(void)
+{
+ u32 ctr = read_cpuid_cachetype();
+ u32 format = FIELD_GET(CTR_FORMAT_MASK, ctr);
+
+ if (format == CTR_FORMAT_ARMV7) {
+ u32 cwg = FIELD_GET(CTR_CWG_MASK, ctr);
+
+ return cwg ? 4 << cwg : ARCH_DMA_MINALIGN;
+ } else if (WARN_ON_ONCE(format != CTR_FORMAT_ARMV6)) {
+ return ARCH_DMA_MINALIGN;
+ }
+
+ return 8 << max(FIELD_GET(CTR_ISIZE_LEN_MASK, ctr),
+ FIELD_GET(CTR_DSIZE_LEN_MASK, ctr));
+}
+
+int cache_line_size(void)
+{
+ if (coherency_max_size != 0)
+ return coherency_max_size;
+
+ /* CP15 is optional / implementation defined before ARMv6 */
+ if (cpu_architecture() < CPU_ARCH_ARMv6)
+ return ARCH_DMA_MINALIGN;
+
+ return cache_line_size_cp15();
+}
+EXPORT_SYMBOL_GPL(cache_line_size);
+
+static inline enum cache_type get_cache_type(int level)
+{
+ u32 clidr;
+
+ if (level > MAX_CACHE_LEVEL)
+ return CACHE_TYPE_NOCACHE;
+
+ clidr = read_clidr();
+
+ return CLIDR_CTYPE(clidr, level);
+}
+
+static void ci_leaf_init(struct cacheinfo *this_leaf,
+ enum cache_type type, unsigned int level)
+{
+ this_leaf->level = level;
+ this_leaf->type = type;
+}
+
+static int detect_cache_level(unsigned int *level_p, unsigned int *leaves_p)
+{
+ unsigned int ctype, level, leaves;
+ u32 ctr, format;
+
+ /* CLIDR is not present before ARMv7/v7m */
+ if (cpu_architecture() < CPU_ARCH_ARMv7)
+ return -EOPNOTSUPP;
+
+ /* Don't try reading CLIDR if CTR declares old format */
+ ctr = read_cpuid_cachetype();
+ format = FIELD_GET(CTR_FORMAT_MASK, ctr);
+ if (format != CTR_FORMAT_ARMV7)
+ return -EOPNOTSUPP;
+
+ for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) {
+ ctype = get_cache_type(level);
+ if (ctype == CACHE_TYPE_NOCACHE) {
+ level--;
+ break;
+ }
+ /* Separate instruction and data caches */
+ leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
+ }
+
+ *level_p = level;
+ *leaves_p = leaves;
+
+ return 0;
+}
+
+int early_cache_level(unsigned int cpu)
+{
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+
+ return detect_cache_level(&this_cpu_ci->num_levels, &this_cpu_ci->num_leaves);
+}
+
+int init_cache_level(unsigned int cpu)
+{
+ unsigned int level, leaves;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ int fw_level;
+ int ret;
+
+ ret = detect_cache_level(&level, &leaves);
+ if (ret)
+ return ret;
+
+ fw_level = of_find_last_cache_level(cpu);
+
+ if (level < fw_level) {
+ /*
+ * some external caches not specified in CLIDR_EL1
+ * the information may be available in the device tree
+ * only unified external caches are considered here
+ */
+ leaves += (fw_level - level);
+ level = fw_level;
+ }
+
+ this_cpu_ci->num_levels = level;
+ this_cpu_ci->num_leaves = leaves;
+ return 0;
+}
+
+int populate_cache_leaves(unsigned int cpu)
+{
+ unsigned int level, idx;
+ enum cache_type type;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf = this_cpu_ci->info_list;
+ unsigned int arch = cpu_architecture();
+
+ /* CLIDR is not present before ARMv7/v7m */
+ if (arch < CPU_ARCH_ARMv7)
+ return -EOPNOTSUPP;
+
+ for (idx = 0, level = 1; level <= this_cpu_ci->num_levels &&
+ idx < this_cpu_ci->num_leaves; idx++, level++) {
+ type = get_cache_type(level);
+ if (type == CACHE_TYPE_SEPARATE) {
+ ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
+ ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
+ } else {
+ ci_leaf_init(this_leaf++, type, level);
+ }
+ }
+
+ return 0;
+}
diff --git a/arch/arm/kernel/isa.c b/arch/arm/kernel/isa.c
index 905b1b191546..db8be609fab2 100644
--- a/arch/arm/kernel/isa.c
+++ b/arch/arm/kernel/isa.c
@@ -16,7 +16,7 @@
static unsigned int isa_membase, isa_portbase, isa_portshift;
-static struct ctl_table ctl_isa_vars[] = {
+static const struct ctl_table ctl_isa_vars[] = {
{
.procname = "membase",
.data = &isa_membase,
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index e6a857bf0ce6..a41c93988d2c 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -880,10 +880,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
*/
boot_alias_start = phys_to_idmap(start);
if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
- res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
- if (!res)
- panic("%s: Failed to allocate %zu bytes\n",
- __func__, sizeof(*res));
+ res = memblock_alloc_or_panic(sizeof(*res), SMP_CACHE_BYTES);
res->name = "System RAM (boot alias)";
res->start = boot_alias_start;
res->end = phys_to_idmap(res_end);
@@ -891,10 +888,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
request_resource(&iomem_resource, res);
}
- res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
- if (!res)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(*res));
+ res = memblock_alloc_or_panic(sizeof(*res), SMP_CACHE_BYTES);
res->name = "System RAM";
res->start = start;
res->end = res_end;
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index 0ca5aae1bcc3..007874320937 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -45,3 +45,9 @@ ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
endif
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
+
+obj-$(CONFIG_CRC32_ARCH) += crc32-arm.o
+crc32-arm-y := crc32-glue.o crc32-core.o
+
+obj-$(CONFIG_CRC_T10DIF_ARCH) += crc-t10dif-arm.o
+crc-t10dif-arm-y := crc-t10dif-glue.o crc-t10dif-core.o
diff --git a/arch/arm/crypto/crct10dif-ce-core.S b/arch/arm/lib/crc-t10dif-core.S
index 2bbf2df9c1e2..2bbf2df9c1e2 100644
--- a/arch/arm/crypto/crct10dif-ce-core.S
+++ b/arch/arm/lib/crc-t10dif-core.S
diff --git a/arch/arm/lib/crc-t10dif-glue.c b/arch/arm/lib/crc-t10dif-glue.c
new file mode 100644
index 000000000000..d24dee62670e
--- /dev/null
+++ b/arch/arm/lib/crc-t10dif-glue.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions instructions
+ *
+ * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+ */
+
+#include <linux/crc-t10dif.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+#include <crypto/internal/simd.h>
+
+#include <asm/neon.h>
+#include <asm/simd.h>
+
+static DEFINE_STATIC_KEY_FALSE(have_neon);
+static DEFINE_STATIC_KEY_FALSE(have_pmull);
+
+#define CRC_T10DIF_PMULL_CHUNK_SIZE 16U
+
+asmlinkage u16 crc_t10dif_pmull64(u16 init_crc, const u8 *buf, size_t len);
+asmlinkage void crc_t10dif_pmull8(u16 init_crc, const u8 *buf, size_t len,
+ u8 out[16]);
+
+u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
+{
+ if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE) {
+ if (static_branch_likely(&have_pmull)) {
+ if (crypto_simd_usable()) {
+ kernel_neon_begin();
+ crc = crc_t10dif_pmull64(crc, data, length);
+ kernel_neon_end();
+ return crc;
+ }
+ } else if (length > CRC_T10DIF_PMULL_CHUNK_SIZE &&
+ static_branch_likely(&have_neon) &&
+ crypto_simd_usable()) {
+ u8 buf[16] __aligned(16);
+
+ kernel_neon_begin();
+ crc_t10dif_pmull8(crc, data, length, buf);
+ kernel_neon_end();
+
+ crc = 0;
+ data = buf;
+ length = sizeof(buf);
+ }
+ }
+ return crc_t10dif_generic(crc, data, length);
+}
+EXPORT_SYMBOL(crc_t10dif_arch);
+
+static int __init crc_t10dif_arm_init(void)
+{
+ if (elf_hwcap & HWCAP_NEON) {
+ static_branch_enable(&have_neon);
+ if (elf_hwcap2 & HWCAP2_PMULL)
+ static_branch_enable(&have_pmull);
+ }
+ return 0;
+}
+arch_initcall(crc_t10dif_arm_init);
+
+static void __exit crc_t10dif_arm_exit(void)
+{
+}
+module_exit(crc_t10dif_arm_exit);
+
+bool crc_t10dif_is_optimized(void)
+{
+ return static_key_enabled(&have_neon);
+}
+EXPORT_SYMBOL(crc_t10dif_is_optimized);
+
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_DESCRIPTION("Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/crypto/crc32-ce-core.S b/arch/arm/lib/crc32-core.S
index 88f9edf94e95..6f674f30c70b 100644
--- a/arch/arm/crypto/crc32-ce-core.S
+++ b/arch/arm/lib/crc32-core.S
@@ -48,7 +48,6 @@
*/
#include <linux/linkage.h>
-#include <linux/cfi_types.h>
#include <asm/assembler.h>
.text
@@ -297,11 +296,11 @@ ARM_BE8(rev16 r3, r3 )
.endm
.align 5
-SYM_TYPED_FUNC_START(crc32_armv8_le)
+SYM_FUNC_START(crc32_armv8_le)
__crc32
SYM_FUNC_END(crc32_armv8_le)
.align 5
-SYM_TYPED_FUNC_START(crc32c_armv8_le)
+SYM_FUNC_START(crc32c_armv8_le)
__crc32 c
SYM_FUNC_END(crc32c_armv8_le)
diff --git a/arch/arm/lib/crc32-glue.c b/arch/arm/lib/crc32-glue.c
new file mode 100644
index 000000000000..2c30ba3d80e6
--- /dev/null
+++ b/arch/arm/lib/crc32-glue.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Accelerated CRC32(C) using ARM CRC, NEON and Crypto Extensions instructions
+ *
+ * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+ */
+
+#include <linux/cpufeature.h>
+#include <linux/crc32.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+#include <crypto/internal/simd.h>
+
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <asm/simd.h>
+
+static DEFINE_STATIC_KEY_FALSE(have_crc32);
+static DEFINE_STATIC_KEY_FALSE(have_pmull);
+
+#define PMULL_MIN_LEN 64 /* min size of buffer for pmull functions */
+
+asmlinkage u32 crc32_pmull_le(const u8 buf[], u32 len, u32 init_crc);
+asmlinkage u32 crc32_armv8_le(u32 init_crc, const u8 buf[], u32 len);
+
+asmlinkage u32 crc32c_pmull_le(const u8 buf[], u32 len, u32 init_crc);
+asmlinkage u32 crc32c_armv8_le(u32 init_crc, const u8 buf[], u32 len);
+
+static u32 crc32_le_scalar(u32 crc, const u8 *p, size_t len)
+{
+ if (static_branch_likely(&have_crc32))
+ return crc32_armv8_le(crc, p, len);
+ return crc32_le_base(crc, p, len);
+}
+
+u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
+{
+ if (len >= PMULL_MIN_LEN + 15 &&
+ static_branch_likely(&have_pmull) && crypto_simd_usable()) {
+ size_t n = -(uintptr_t)p & 15;
+
+ /* align p to 16-byte boundary */
+ if (n) {
+ crc = crc32_le_scalar(crc, p, n);
+ p += n;
+ len -= n;
+ }
+ n = round_down(len, 16);
+ kernel_neon_begin();
+ crc = crc32_pmull_le(p, n, crc);
+ kernel_neon_end();
+ p += n;
+ len -= n;
+ }
+ return crc32_le_scalar(crc, p, len);
+}
+EXPORT_SYMBOL(crc32_le_arch);
+
+static u32 crc32c_le_scalar(u32 crc, const u8 *p, size_t len)
+{
+ if (static_branch_likely(&have_crc32))
+ return crc32c_armv8_le(crc, p, len);
+ return crc32c_le_base(crc, p, len);
+}
+
+u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len)
+{
+ if (len >= PMULL_MIN_LEN + 15 &&
+ static_branch_likely(&have_pmull) && crypto_simd_usable()) {
+ size_t n = -(uintptr_t)p & 15;
+
+ /* align p to 16-byte boundary */
+ if (n) {
+ crc = crc32c_le_scalar(crc, p, n);
+ p += n;
+ len -= n;
+ }
+ n = round_down(len, 16);
+ kernel_neon_begin();
+ crc = crc32c_pmull_le(p, n, crc);
+ kernel_neon_end();
+ p += n;
+ len -= n;
+ }
+ return crc32c_le_scalar(crc, p, len);
+}
+EXPORT_SYMBOL(crc32c_le_arch);
+
+u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
+{
+ return crc32_be_base(crc, p, len);
+}
+EXPORT_SYMBOL(crc32_be_arch);
+
+static int __init crc32_arm_init(void)
+{
+ if (elf_hwcap2 & HWCAP2_CRC32)
+ static_branch_enable(&have_crc32);
+ if (elf_hwcap2 & HWCAP2_PMULL)
+ static_branch_enable(&have_pmull);
+ return 0;
+}
+arch_initcall(crc32_arm_init);
+
+static void __exit crc32_arm_exit(void)
+{
+}
+module_exit(crc32_arm_exit);
+
+u32 crc32_optimizations(void)
+{
+ if (elf_hwcap2 & (HWCAP2_CRC32 | HWCAP2_PMULL))
+ return CRC32_LE_OPTIMIZATION | CRC32C_OPTIMIZATION;
+ return 0;
+}
+EXPORT_SYMBOL(crc32_optimizations);
+
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_DESCRIPTION("Accelerated CRC32(C) using ARM CRC, NEON and Crypto Extensions");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 344f5305f69a..04bd91c72521 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -58,6 +58,17 @@ config SOC_SAMA5D4
help
Select this if you are using one of Microchip's SAMA5D4 family SoC.
+config SOC_SAMA7D65
+ bool "SAMA7D65 family"
+ depends on ARCH_MULTI_V7
+ select HAVE_AT91_GENERATED_CLK
+ select HAVE_AT91_SAM9X60_PLL
+ select HAVE_AT91_USB_CLK
+ select HAVE_AT91_UTMI
+ select SOC_SAMA7
+ help
+ Select this if you are using one of Microchip's SAMA7D65 family SoC.
+
config SOC_SAMA7G5
bool "SAMA7G5 family"
depends on ARCH_MULTI_V7
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index b9b995f8a36e..05a1547642b6 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -598,7 +598,21 @@ static int at91_suspend_finish(unsigned long val)
return 0;
}
-static void at91_pm_switch_ba_to_vbat(void)
+/**
+ * at91_pm_switch_ba_to_auto() - Configure Backup Unit Power Switch
+ * to automatic/hardware mode.
+ *
+ * The Backup Unit Power Switch can be managed either by software or hardware.
+ * Enabling hardware mode allows the automatic transition of power between
+ * VDDANA (or VDDIN33) and VDDBU (or VBAT, respectively), based on the
+ * availability of these power sources.
+ *
+ * If the Backup Unit Power Switch is already in automatic mode, no action is
+ * required. If it is in software-controlled mode, it is switched to automatic
+ * mode to enhance safety and eliminate the need for toggling between power
+ * sources.
+ */
+static void at91_pm_switch_ba_to_auto(void)
{
unsigned int offset = offsetof(struct at91_pm_sfrbu_regs, pswbu);
unsigned int val;
@@ -609,24 +623,19 @@ static void at91_pm_switch_ba_to_vbat(void)
val = readl(soc_pm.data.sfrbu + offset);
- /* Already on VBAT. */
- if (!(val & soc_pm.sfrbu_regs.pswbu.state))
+ /* Already on auto/hardware. */
+ if (!(val & soc_pm.sfrbu_regs.pswbu.ctrl))
return;
- val &= ~soc_pm.sfrbu_regs.pswbu.softsw;
- val |= soc_pm.sfrbu_regs.pswbu.key | soc_pm.sfrbu_regs.pswbu.ctrl;
+ val &= ~soc_pm.sfrbu_regs.pswbu.ctrl;
+ val |= soc_pm.sfrbu_regs.pswbu.key;
writel(val, soc_pm.data.sfrbu + offset);
-
- /* Wait for update. */
- val = readl(soc_pm.data.sfrbu + offset);
- while (val & soc_pm.sfrbu_regs.pswbu.state)
- val = readl(soc_pm.data.sfrbu + offset);
}
static void at91_pm_suspend(suspend_state_t state)
{
if (soc_pm.data.mode == AT91_PM_BACKUP) {
- at91_pm_switch_ba_to_vbat();
+ at91_pm_switch_ba_to_auto();
cpu_suspend(0, at91_suspend_finish);
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index 3312ef93355d..a5bf5554800f 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -289,7 +289,7 @@ static struct gpiod_lookup_table nokia770_irq_gpio_table = {
GPIO_LOOKUP("gpio-0-15", 15, "ads7846_irq",
GPIO_ACTIVE_HIGH),
/* GPIO used for retu IRQ */
- GPIO_LOOKUP("gpio-48-63", 15, "retu_irq",
+ GPIO_LOOKUP("gpio-48-63", 14, "retu_irq",
GPIO_ACTIVE_HIGH),
/* GPIO used for tahvo IRQ */
GPIO_LOOKUP("gpio-32-47", 8, "tahvo_irq",
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index 2441d96b7144..a4785302b7ae 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -523,7 +523,7 @@ int pwrdm_get_mem_bank_count(struct powerdomain *pwrdm)
* Set the powerdomain @pwrdm's next power state to @pwrst. The powerdomain
* may not enter this state immediately if the preconditions for this state
* have not been satisfied. Returns -EINVAL if the powerdomain pointer is
- * null or if the power state is invalid for the powerdomin, or returns 0
+ * null or if the power state is invalid for the powerdomain, or returns 0
* upon success.
*/
int pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst)
diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
index 0c8d9000df5a..dd930e3a61a4 100644
--- a/arch/arm/mach-pxa/sharpsl_pm.c
+++ b/arch/arm/mach-pxa/sharpsl_pm.c
@@ -31,10 +31,10 @@
/*
* Constants
*/
-#define SHARPSL_CHARGE_ON_TIME_INTERVAL (msecs_to_jiffies(1*60*1000)) /* 1 min */
-#define SHARPSL_CHARGE_FINISH_TIME (msecs_to_jiffies(10*60*1000)) /* 10 min */
-#define SHARPSL_BATCHK_TIME (msecs_to_jiffies(15*1000)) /* 15 sec */
-#define SHARPSL_BATCHK_TIME_SUSPEND (60*10) /* 10 min */
+#define SHARPSL_CHARGE_ON_TIME_INTERVAL (secs_to_jiffies(60))
+#define SHARPSL_CHARGE_FINISH_TIME (secs_to_jiffies(10*60))
+#define SHARPSL_BATCHK_TIME (secs_to_jiffies(15))
+#define SHARPSL_BATCHK_TIME_SUSPEND (60*10) /* 10 min */
#define SHARPSL_WAIT_CO_TIME 15 /* 15 sec */
#define SHARPSL_WAIT_DISCHARGE_ON 100 /* 100 msec */
diff --git a/arch/arm/mach-rpc/ecard.c b/arch/arm/mach-rpc/ecard.c
index 9f7454b8efa7..2cde4c83b7f9 100644
--- a/arch/arm/mach-rpc/ecard.c
+++ b/arch/arm/mach-rpc/ecard.c
@@ -1124,7 +1124,7 @@ static int ecard_match(struct device *_dev, const struct device_driver *_drv)
return ret;
}
-struct bus_type ecard_bus_type = {
+const struct bus_type ecard_bus_type = {
.name = "ecard",
.dev_groups = ecard_dev_groups,
.match = ecard_match,
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index f5b7a16c5803..f02f872ea8a9 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -726,13 +726,8 @@ EXPORT_SYMBOL(phys_mem_access_prot);
static void __init *early_alloc(unsigned long sz)
{
- void *ptr = memblock_alloc(sz, sz);
+ return memblock_alloc_or_panic(sz, sz);
- if (!ptr)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, sz, sz);
-
- return ptr;
}
static void *__init late_alloc(unsigned long sz)
@@ -1027,10 +1022,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
if (!nr)
return;
- svm = memblock_alloc(sizeof(*svm) * nr, __alignof__(*svm));
- if (!svm)
- panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
- __func__, sizeof(*svm) * nr, __alignof__(*svm));
+ svm = memblock_alloc_or_panic(sizeof(*svm) * nr, __alignof__(*svm));
for (md = io_desc; nr; md++, nr--) {
create_mapping(md);
@@ -1052,10 +1044,7 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
struct vm_struct *vm;
struct static_vm *svm;
- svm = memblock_alloc(sizeof(*svm), __alignof__(*svm));
- if (!svm)
- panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
- __func__, sizeof(*svm), __alignof__(*svm));
+ svm = memblock_alloc_or_panic(sizeof(*svm), __alignof__(*svm));
vm = &svm->vm;
vm->addr = (void *)addr;
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index c415f3859b20..1a8f6914ee59 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -162,10 +162,7 @@ void __init paging_init(const struct machine_desc *mdesc)
mpu_setup();
/* allocate the zero page. */
- zero_page = (void *)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!zero_page)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE);
+ zero_page = (void *)memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
bootmem_init();
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index f8e9bc58a84f..4eb81b7ed03a 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -17,11 +17,11 @@
#include "mm.h"
#ifdef CONFIG_ARM_LPAE
-#define __pgd_alloc() kmalloc_array(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL)
-#define __pgd_free(pgd) kfree(pgd)
+#define _pgd_alloc(mm) kmalloc_array(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL | __GFP_ZERO)
+#define _pgd_free(mm, pgd) kfree(pgd)
#else
-#define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL, 2)
-#define __pgd_free(pgd) free_pages((unsigned long)pgd, 2)
+#define _pgd_alloc(mm) __pgd_alloc(mm, 2)
+#define _pgd_free(mm, pgd) __pgd_free(mm, pgd)
#endif
/*
@@ -35,12 +35,10 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
pmd_t *new_pmd, *init_pmd;
pte_t *new_pte, *init_pte;
- new_pgd = __pgd_alloc();
+ new_pgd = _pgd_alloc(mm);
if (!new_pgd)
goto no_pgd;
- memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
-
/*
* Copy over the kernel and IO PGD entries
*/
@@ -134,7 +132,7 @@ no_pmd:
no_pud:
p4d_free(mm, new_p4d);
no_p4d:
- __pgd_free(new_pgd);
+ _pgd_free(mm, new_pgd);
no_pgd:
return NULL;
}
@@ -207,5 +205,5 @@ no_pgd:
p4d_free(mm, p4d);
}
#endif
- __pgd_free(pgd_base);
+ _pgd_free(mm, pgd_base);
}
diff --git a/arch/arm/vfp/vfpinstr.h b/arch/arm/vfp/vfpinstr.h
index 32090b0fb250..a2f0c47e0ce7 100644
--- a/arch/arm/vfp/vfpinstr.h
+++ b/arch/arm/vfp/vfpinstr.h
@@ -62,8 +62,6 @@
#define FPSCR_C (1 << 29)
#define FPSCR_V (1 << 28)
-#ifdef CONFIG_AS_VFP_VMRS_FPINST
-
#define fmrx(_vfp_) ({ \
u32 __v; \
asm volatile (".fpu vfpv2\n" \
@@ -78,26 +76,6 @@
: : "r" (_var_) : "cc"); \
})
-#else
-
-#define vfpreg(_vfp_) #_vfp_
-
-#define fmrx(_vfp_) ({ \
- u32 __v; \
- asm volatile ("mrc p10, 7, %0, " vfpreg(_vfp_) "," \
- "cr0, 0 @ fmrx %0, " #_vfp_ \
- : "=r" (__v) : : "cc"); \
- __v; \
-})
-
-#define fmxr(_vfp_, _var_) ({ \
- asm volatile ("mcr p10, 7, %0, " vfpreg(_vfp_) "," \
- "cr0, 0 @ fmxr " #_vfp_ ", %0" \
- : : "r" (_var_) : "cc"); \
-})
-
-#endif
-
u32 vfp_single_cpdo(u32 inst, u32 fpscr);
u32 vfp_single_cprt(u32 inst, u32 fpscr, struct pt_regs *regs);
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index d44867fc0c5e..7803d50b90f8 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -168,7 +168,7 @@ static void vfp_thread_copy(struct thread_info *thread)
/*
* When this function is called with the following 'cmd's, the following
* is true while this function is being run:
- * THREAD_NOFTIFY_SWTICH:
+ * THREAD_NOTIFY_SWITCH:
* - the previously running thread will not be scheduled onto another CPU.
* - the next thread to be run (v) will not be running on another CPU.
* - thread->cpu is the local CPU number
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 83017757be8e..fcdd0ed3eca8 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -21,6 +21,8 @@ config ARM64
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_CC_PLATFORM
+ select ARCH_HAS_CRC32
+ select ARCH_HAS_CRC_T10DIF if KERNEL_MODE_NEON
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEBUG_VM_PGTABLE
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 02007256709e..02f9248f7c84 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -101,6 +101,11 @@ config ARCH_BITMAIN
help
This enables support for the Bitmain SoC Family.
+config ARCH_BLAIZE
+ bool "Blaize SoC Platforms"
+ help
+ This enables support for the Blaize SoC family
+
config ARCH_EXYNOS
bool "Samsung Exynos SoC family"
select COMMON_CLK_SAMSUNG
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index 21cd3a87f385..79b73a21ddc2 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -10,6 +10,7 @@ subdir-y += apm
subdir-y += apple
subdir-y += arm
subdir-y += bitmain
+subdir-y += blaize
subdir-y += broadcom
subdir-y += cavium
subdir-y += exynos
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a100.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a100.dtsi
index 29ac7716c7a5..a24adba201af 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a100.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a100.dtsi
@@ -101,6 +101,39 @@
#size-cells = <1>;
ranges = <0 0 0 0x3fffffff>;
+ syscon: syscon@3000000 {
+ compatible = "allwinner,sun50i-a100-system-control",
+ "allwinner,sun50i-a64-system-control";
+ reg = <0x03000000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ sram_a1: sram@20000 {
+ compatible = "mmio-sram";
+ reg = <0x00020000 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x00020000 0x4000>;
+ };
+
+ sram_c: sram@24000 {
+ compatible = "mmio-sram";
+ reg = <0x024000 0x21000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x024000 0x21000>;
+ };
+
+ sram_a2: sram@100000 {
+ compatible = "mmio-sram";
+ reg = <0x0100000 0x14000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x0100000 0x14000>;
+ };
+ };
+
ccu: clock@3001000 {
compatible = "allwinner,sun50i-a100-ccu";
reg = <0x03001000 0x1000>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
index 379c2c8466f5..86d44349e095 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
@@ -390,6 +390,8 @@
&tcon0 {
pinctrl-names = "default";
pinctrl-0 = <&lcd_rgb666_pins>;
+ assigned-clocks = <&ccu CLK_TCON0>;
+ assigned-clock-parents = <&ccu CLK_PLL_VIDEO0_2X>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts
index b407e1dd08a7..ec055510af8b 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts
@@ -369,6 +369,8 @@
&tcon0 {
pinctrl-names = "default";
pinctrl-0 = <&lcd_rgb666_pins>;
+ assigned-clocks = <&ccu CLK_TCON0>;
+ assigned-clock-parents = <&ccu CLK_PLL_VIDEO0_2X>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index a5c3920e0f04..0fecf0abb204 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -445,6 +445,8 @@
clock-names = "ahb", "tcon-ch0";
clock-output-names = "tcon-data-clock";
#clock-cells = <0>;
+ assigned-clocks = <&ccu CLK_TCON0>;
+ assigned-clock-parents = <&ccu CLK_PLL_MIPI>;
resets = <&ccu RST_BUS_TCON0>, <&ccu RST_BUS_LVDS>;
reset-names = "lcd", "lvds";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h313-tanix-tx1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h313-tanix-tx1.dts
index bafd3e803106..17e6aef67aaf 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h313-tanix-tx1.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h313-tanix-tx1.dts
@@ -6,6 +6,7 @@
/dts-v1/;
#include "sun50i-h616.dtsi"
+#include "sun50i-h616-cpu-opp.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_swvp.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_swvp.dts
index 0d837d3e65a5..34ccf8138f7b 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10_swvp.dts
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_swvp.dts
@@ -63,7 +63,6 @@
status = "okay";
phy-mode = "rgmii";
phy-addr = <0xffffffff>;
- snps,max-mtu = <0x0>;
};
&gmac1 {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
index 52d57773a77f..1736bd2e96e2 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
@@ -178,9 +178,8 @@
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vddio_boot>;
- brcmf: wifi@1 {
+ sdio: wifi@1 {
reg = <1>;
- compatible = "brcm,bcm4329-fmac";
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts
index c1470416faad..7dffeb5931c9 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts
@@ -102,8 +102,7 @@
};
&sd_emmc_a {
- brcmf: wifi@1 {
+ sdio: wifi@1 {
reg = <1>;
- compatible = "brcm,bcm4329-fmac";
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p231.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p231.dts
index 92c425d0259c..ff9145d49090 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p231.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p231.dts
@@ -21,8 +21,7 @@
};
&sd_emmc_a {
- brcmf: wifi@1 {
+ sdio: wifi@1 {
reg = <1>;
- compatible = "brcm,bcm4329-fmac";
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
index 7e7dc87ede2d..b52a830efcce 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
@@ -134,9 +134,8 @@
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vddio_boot>;
- brcmf: wifi@1 {
+ sdio: wifi@1 {
reg = <1>;
- compatible = "brcm,bcm4329-fmac";
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts
index d4858afa0e9c..feb31207773f 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts
@@ -72,8 +72,7 @@
};
&sd_emmc_a {
- brcmf: wifi@1 {
+ sdio: wifi@1 {
reg = <1>;
- compatible = "brcm,bcm4329-fmac";
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-q201.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-q201.dts
index d02b80d77378..6c8bec1853ac 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-q201.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-q201.dts
@@ -21,8 +21,7 @@
};
&sd_emmc_a {
- brcmf: wifi@1 {
+ sdio: wifi@1 {
reg = <1>;
- compatible = "brcm,bcm4329-fmac";
};
};
diff --git a/arch/arm64/boot/dts/blaize/Makefile b/arch/arm64/boot/dts/blaize/Makefile
new file mode 100644
index 000000000000..7e10b3199e6c
--- /dev/null
+++ b/arch/arm64/boot/dts/blaize/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0+
+dtb-$(CONFIG_ARCH_BLAIZE) += blaize-blzp1600-cb2.dtb
diff --git a/arch/arm64/boot/dts/blaize/blaize-blzp1600-cb2.dts b/arch/arm64/boot/dts/blaize/blaize-blzp1600-cb2.dts
new file mode 100644
index 000000000000..7e3cef2ed352
--- /dev/null
+++ b/arch/arm64/boot/dts/blaize/blaize-blzp1600-cb2.dts
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2024 Blaize, Inc. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "blaize-blzp1600-som.dtsi"
+
+/ {
+ model = "Blaize BLZP1600 SoM1600P CB2 Development Board";
+
+ compatible = "blaize,blzp1600-cb2", "blaize,blzp1600";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200";
+ };
+};
+
+&i2c0 {
+ clock-frequency = <100000>;
+ status = "okay";
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ status = "okay";
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+ status = "okay";
+
+ gpio_expander: gpio@74 {
+ compatible = "ti,tca9539";
+ reg = <0x74>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names = "RSP_PIN_7", /* GPIO_0 */
+ "RSP_PIN_11", /* GPIO_1 */
+ "RSP_PIN_13", /* GPIO_2 */
+ "RSP_PIN_15", /* GPIO_3 */
+ "RSP_PIN_27", /* GPIO_4 */
+ "RSP_PIN_29", /* GPIO_5 */
+ "RSP_PIN_31", /* GPIO_6 */
+ "RSP_PIN_33", /* GPIO_7 */
+ "RSP_PIN_37", /* GPIO_8 */
+ "RSP_PIN_16", /* GPIO_9 */
+ "RSP_PIN_18", /* GPIO_10 */
+ "RSP_PIN_22", /* GPIO_11 */
+ "RSP_PIN_28", /* GPIO_12 */
+ "RSP_PIN_32", /* GPIO_13 */
+ "RSP_PIN_36", /* GPIO_14 */
+ "TP31"; /* GPIO_15 */
+ };
+
+ gpio_expander_m2: gpio@75 {
+ compatible = "ti,tca9539";
+ reg = <0x75>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names = "M2_W_DIS1_N", /* GPIO_0 */
+ "M2_W_DIS2_N", /* GPIO_1 */
+ "M2_UART_WAKE_N", /* GPIO_2 */
+ "M2_COEX3", /* GPIO_3 */
+ "M2_COEX_RXD", /* GPIO_4 */
+ "M2_COEX_TXD", /* GPIO_5 */
+ "M2_VENDOR_PIN40", /* GPIO_6 */
+ "M2_VENDOR_PIN42", /* GPIO_7 */
+ "M2_VENDOR_PIN38", /* GPIO_8 */
+ "M2_SDIO_RST_N", /* GPIO_9 */
+ "M2_SDIO_WAKE_N", /* GPIO_10 */
+ "M2_PETN1", /* GPIO_11 */
+ "M2_PERP1", /* GPIO_12 */
+ "M2_PERN1", /* GPIO_13 */
+ "UIM_SWP", /* GPIO_14 */
+ "UART1_TO_RSP"; /* GPIO_15 */
+ };
+};
diff --git a/arch/arm64/boot/dts/blaize/blaize-blzp1600-som.dtsi b/arch/arm64/boot/dts/blaize/blaize-blzp1600-som.dtsi
new file mode 100644
index 000000000000..bfdff5953edd
--- /dev/null
+++ b/arch/arm64/boot/dts/blaize/blaize-blzp1600-som.dtsi
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2024 Blaize, Inc. All rights reserved.
+ */
+
+#include "blaize-blzp1600.dtsi"
+
+/ {
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x1 0x0>;
+ };
+};
+
+/* i2c4 bus is available only on the SoM, not on the board */
+&i2c4 {
+ clock-frequency = <100000>;
+ status = "okay";
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/blaize/blaize-blzp1600.dtsi b/arch/arm64/boot/dts/blaize/blaize-blzp1600.dtsi
new file mode 100644
index 000000000000..7d399e6a532f
--- /dev/null
+++ b/arch/arm64/boot/dts/blaize/blaize-blzp1600.dtsi
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2024 Blaize, Inc. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ compatible = "arm,cortex-a53";
+ reg = <0x0 0x0>;
+ device_type = "cpu";
+ enable-method = "psci";
+ next-level-cache = <&l2>;
+ };
+
+ cpu1: cpu@1 {
+ compatible = "arm,cortex-a53";
+ reg = <0x0 0x1>;
+ device_type = "cpu";
+ enable-method = "psci";
+ next-level-cache = <&l2>;
+ };
+
+ l2: l2-cache0 {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ };
+ };
+
+ firmware {
+ scmi {
+ compatible = "arm,scmi-smc";
+ arm,smc-id = <0x82002000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ shmem = <&scmi0_shm>;
+
+ scmi_clk: protocol@14 {
+ reg = <0x14>;
+ #clock-cells = <1>;
+ };
+
+ scmi_rst: protocol@16 {
+ reg = <0x16>;
+ #reset-cells = <1>;
+ };
+ };
+ };
+
+ pmu {
+ compatible = "arm,cortex-a53-pmu";
+ interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu0>, <&cpu1>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0", "arm,psci-0.2";
+ method = "smc";
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /* SCMI reserved buffer space on DDR space */
+ scmi0_shm: scmi-shmem@800 {
+ compatible = "arm,scmi-shmem";
+ reg = <0x0 0x800 0x0 0x80>;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = /* Physical Secure PPI */
+ <GIC_PPI 13 (GIC_CPU_MASK_RAW(0x3) |
+ IRQ_TYPE_LEVEL_LOW)>,
+ /* Physical Non-Secure PPI */
+ <GIC_PPI 14 (GIC_CPU_MASK_RAW(0x3) |
+ IRQ_TYPE_LEVEL_LOW)>,
+ /* Hypervisor PPI */
+ <GIC_PPI 10 (GIC_CPU_MASK_RAW(0x3) |
+ IRQ_TYPE_LEVEL_LOW)>,
+ /* Virtual PPI */
+ <GIC_PPI 11 (GIC_CPU_MASK_RAW(0x3) |
+ IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ soc@200000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x2 0x0 0x850000>;
+
+ gic: interrupt-controller@410000 {
+ compatible = "arm,gic-400";
+ reg = <0x410000 0x20000>,
+ <0x420000 0x20000>,
+ <0x440000 0x20000>,
+ <0x460000 0x20000>;
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_RAW(0x3) |
+ IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ uart0: serial@4d0000 {
+ compatible = "ns16550a";
+ reg = <0x4d0000 0x1000>;
+ clocks = <&scmi_clk 59>;
+ resets = <&scmi_rst 59>;
+ reg-shift = <2>;
+ interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ uart1: serial@4e0000 {
+ compatible = "ns16550a";
+ reg = <0x4e0000 0x1000>;
+ clocks = <&scmi_clk 60>;
+ resets = <&scmi_rst 60>;
+ reg-shift = <2>;
+ interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ i2c0: i2c@4f0000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x4f0000 0x1000>;
+ interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk 54>;
+ resets = <&scmi_rst 54>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@500000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x500000 0x1000>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk 55>;
+ resets = <&scmi_rst 55>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@510000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x510000 0x1000>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk 56>;
+ resets = <&scmi_rst 56>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@520000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x520000 0x1000>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk 57>;
+ resets = <&scmi_rst 57>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@530000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x530000 0x1000>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk 58>;
+ resets = <&scmi_rst 58>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ arm_cc712: crypto@550000 {
+ compatible = "arm,cryptocell-712-ree";
+ reg = <0x550000 0x1000>;
+ interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk 7>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/broadcom/Makefile b/arch/arm64/boot/dts/broadcom/Makefile
index 92565e9781ad..3d0efb93b06d 100644
--- a/arch/arm64/boot/dts/broadcom/Makefile
+++ b/arch/arm64/boot/dts/broadcom/Makefile
@@ -7,6 +7,7 @@ dtb-$(CONFIG_ARCH_BCM2835) += bcm2711-rpi-400.dtb \
bcm2711-rpi-4-b.dtb \
bcm2711-rpi-cm4-io.dtb \
bcm2712-rpi-5-b.dtb \
+ bcm2712-d-rpi-5-b.dtb \
bcm2837-rpi-3-a-plus.dtb \
bcm2837-rpi-3-b.dtb \
bcm2837-rpi-3-b-plus.dtb \
diff --git a/arch/arm64/boot/dts/broadcom/bcm2712-d-rpi-5-b.dts b/arch/arm64/boot/dts/broadcom/bcm2712-d-rpi-5-b.dts
new file mode 100644
index 000000000000..7de24d60bcd1
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/bcm2712-d-rpi-5-b.dts
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/dts-v1/;
+
+#include "bcm2712-rpi-5-b.dts"
+
+&gio_aon {
+ brcm,gpio-bank-widths = <15 6>;
+
+ gpio-line-names =
+ "RP1_SDA", // AON_GPIO_00
+ "RP1_SCL", // AON_GPIO_01
+ "RP1_RUN", // AON_GPIO_02
+ "SD_IOVDD_SEL", // AON_GPIO_03
+ "SD_PWR_ON", // AON_GPIO_04
+ "SD_CDET_N", // AON_GPIO_05
+ "SD_FLG_N", // AON_GPIO_06
+ "", // AON_GPIO_07
+ "2712_WAKE", // AON_GPIO_08
+ "2712_STAT_LED", // AON_GPIO_09
+ "", // AON_GPIO_10
+ "", // AON_GPIO_11
+ "PMIC_INT", // AON_GPIO_12
+ "UART_TX_FS", // AON_GPIO_13
+ "UART_RX_FS", // AON_GPIO_14
+ "", // AON_GPIO_15
+ "", // AON_GPIO_16
+
+ // Pad bank0 out to 32 entries
+ "", "", "", "", "", "", "", "", "", "", "", "", "", "", "",
+
+ "HDMI0_SCL", // AON_SGPIO_00
+ "HDMI0_SDA", // AON_SGPIO_01
+ "HDMI1_SCL", // AON_SGPIO_02
+ "HDMI1_SDA", // AON_SGPIO_03
+ "PMIC_SCL", // AON_SGPIO_04
+ "PMIC_SDA"; // AON_SGPIO_05
+};
diff --git a/arch/arm64/boot/dts/broadcom/bcm2712-rpi-5-b.dts b/arch/arm64/boot/dts/broadcom/bcm2712-rpi-5-b.dts
index 2bdbb6780242..fbc56309660f 100644
--- a/arch/arm64/boot/dts/broadcom/bcm2712-rpi-5-b.dts
+++ b/arch/arm64/boot/dts/broadcom/bcm2712-rpi-5-b.dts
@@ -62,3 +62,45 @@
sd-uhs-ddr50;
sd-uhs-sdr104;
};
+
+&soc {
+ firmware: firmware {
+ compatible = "raspberrypi,bcm2835-firmware", "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ mboxes = <&mailbox>;
+ dma-ranges;
+
+ firmware_clocks: clocks {
+ compatible = "raspberrypi,firmware-clocks";
+ #clock-cells = <1>;
+ };
+
+ reset: reset {
+ compatible = "raspberrypi,firmware-reset";
+ #reset-cells = <1>;
+ };
+ };
+
+ power: power {
+ compatible = "raspberrypi,bcm2835-power";
+ firmware = <&firmware>;
+ #power-domain-cells = <1>;
+ };
+};
+
+&hvs {
+ clocks = <&firmware_clocks 4>, <&firmware_clocks 16>;
+ clock-names = "core", "disp";
+};
+
+&hdmi0 {
+ clocks = <&firmware_clocks 13>, <&firmware_clocks 14>, <&dvp 0>, <&clk_27MHz>;
+ clock-names = "hdmi", "bvb", "audio", "cec";
+};
+
+&hdmi1 {
+ clocks = <&firmware_clocks 13>, <&firmware_clocks 14>, <&dvp 1>, <&clk_27MHz>;
+ clock-names = "hdmi", "bvb", "audio", "cec";
+};
diff --git a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
index 26a29e5e5078..689c82b7f596 100644
--- a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
+++ b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
@@ -221,11 +221,6 @@
#mbox-cells = <0>;
};
- local_intc: interrupt-controller@7cd00000 {
- compatible = "brcm,bcm2836-l1-intc";
- reg = <0x7cd00000 0x100>;
- };
-
uart10: serial@7d001000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x7d001000 0x200>;
@@ -265,6 +260,172 @@
interrupt-controller;
#interrupt-cells = <3>;
};
+
+ aon_intr: interrupt-controller@7d510600 {
+ compatible = "brcm,bcm2711-l2-intc", "brcm,l2-intc";
+ reg = <0x7d510600 0x30>;
+ interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ pixelvalve0: pixelvalve@7c410000 {
+ compatible = "brcm,bcm2712-pixelvalve0";
+ reg = <0x7c410000 0x100>;
+ interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pixelvalve1: pixelvalve@7c411000 {
+ compatible = "brcm,bcm2712-pixelvalve1";
+ reg = <0x7c411000 0x100>;
+ interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ mop: mop@7c500000 {
+ compatible = "brcm,bcm2712-mop";
+ reg = <0x7c500000 0x28>;
+ interrupt-parent = <&disp_intr>;
+ interrupts = <1>;
+ };
+
+ moplet: moplet@7c501000 {
+ compatible = "brcm,bcm2712-moplet";
+ reg = <0x7c501000 0x20>;
+ interrupt-parent = <&disp_intr>;
+ interrupts = <0>;
+ };
+
+ disp_intr: interrupt-controller@7c502000 {
+ compatible = "brcm,bcm2711-l2-intc", "brcm,l2-intc";
+ reg = <0x7c502000 0x30>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ dvp: clock@7c700000 {
+ compatible = "brcm,brcm2711-dvp";
+ reg = <0x7c700000 0x10>;
+ clocks = <&clk_108MHz>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ ddc0: i2c@7d508200 {
+ compatible = "brcm,brcmstb-i2c";
+ reg = <0x7d508200 0x58>;
+ interrupt-parent = <&bsc_irq>;
+ interrupts = <1>;
+ clock-frequency = <97500>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ ddc1: i2c@7d508280 {
+ compatible = "brcm,brcmstb-i2c";
+ reg = <0x7d508280 0x58>;
+ interrupt-parent = <&bsc_irq>;
+ interrupts = <2>;
+ clock-frequency = <97500>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ bsc_irq: interrupt-controller@7d508380 {
+ compatible = "brcm,bcm7271-l2-intc";
+ reg = <0x7d508380 0x10>;
+ interrupts = <GIC_SPI 242 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ main_irq: interrupt-controller@7d508400 {
+ compatible = "brcm,bcm7271-l2-intc";
+ reg = <0x7d508400 0x10>;
+ interrupts = <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ hdmi0: hdmi@7c701400 {
+ compatible = "brcm,bcm2712-hdmi0";
+ reg = <0x7c701400 0x300>,
+ <0x7c701000 0x200>,
+ <0x7c701d00 0x300>,
+ <0x7c702000 0x80>,
+ <0x7c703800 0x200>,
+ <0x7c704000 0x800>,
+ <0x7c700100 0x80>,
+ <0x7d510800 0x100>,
+ <0x7c720000 0x100>;
+ reg-names = "hdmi",
+ "dvp",
+ "phy",
+ "rm",
+ "packet",
+ "metadata",
+ "csc",
+ "cec",
+ "hd";
+ resets = <&dvp 1>;
+ interrupt-parent = <&aon_intr>;
+ interrupts = <1>, <2>, <3>,
+ <7>, <8>;
+ interrupt-names = "cec-tx", "cec-rx", "cec-low",
+ "hpd-connected", "hpd-removed";
+ ddc = <&ddc0>;
+ };
+
+ hdmi1: hdmi@7c706400 {
+ compatible = "brcm,bcm2712-hdmi1";
+ reg = <0x7c706400 0x300>,
+ <0x7c706000 0x200>,
+ <0x7c706d00 0x300>,
+ <0x7c707000 0x80>,
+ <0x7c708800 0x200>,
+ <0x7c709000 0x800>,
+ <0x7c700180 0x80>,
+ <0x7d511000 0x100>,
+ <0x7c720000 0x100>;
+ reg-names = "hdmi",
+ "dvp",
+ "phy",
+ "rm",
+ "packet",
+ "metadata",
+ "csc",
+ "cec",
+ "hd";
+ resets = <&dvp 2>;
+ interrupt-parent = <&aon_intr>;
+ interrupts = <11>, <12>, <13>,
+ <14>, <15>;
+ interrupt-names = "cec-tx", "cec-rx", "cec-low",
+ "hpd-connected", "hpd-removed";
+ ddc = <&ddc1>;
+ };
+ };
+
+ axi: axi {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ ranges = <0x00 0x00000000 0x00 0x00000000 0x10 0x00000000>,
+ <0x10 0x00000000 0x10 0x00000000 0x01 0x00000000>,
+ <0x14 0x00000000 0x14 0x00000000 0x04 0x00000000>,
+ <0x18 0x00000000 0x18 0x00000000 0x04 0x00000000>,
+ <0x1c 0x00000000 0x1c 0x00000000 0x04 0x00000000>;
+
+ dma-ranges = <0x00 0x00000000 0x00 0x00000000 0x10 0x00000000>,
+ <0x10 0x00000000 0x10 0x00000000 0x01 0x00000000>,
+ <0x14 0x00000000 0x14 0x00000000 0x04 0x00000000>,
+ <0x18 0x00000000 0x18 0x00000000 0x04 0x00000000>,
+ <0x1c 0x00000000 0x1c 0x00000000 0x04 0x00000000>;
+
+ vc4: gpu {
+ compatible = "brcm,bcm2712-vc6";
+ };
};
timer {
@@ -280,4 +441,26 @@
<GIC_PPI 12 (GIC_CPU_MASK_SIMPLE(4) |
IRQ_TYPE_LEVEL_LOW)>;
};
+
+ clk_27MHz: clk-27M {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <27000000>;
+ clock-output-names = "27MHz-clock";
+ };
+
+ clk_108MHz: clk-108M {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <108000000>;
+ clock-output-names = "108MHz-clock";
+ };
+
+ hvs: hvs@107c580000 {
+ compatible = "brcm,bcm2712-hvs";
+ reg = <0x10 0x7c580000 0x0 0x1a000>;
+ interrupt-parent = <&disp_intr>;
+ interrupts = <2>, <9>, <16>;
+ interrupt-names = "ch0-eof", "ch1-eof", "ch2-eof";
+ };
};
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/Makefile b/arch/arm64/boot/dts/broadcom/bcmbca/Makefile
index 27741b71ba9e..9a8461d91c8c 100644
--- a/arch/arm64/boot/dts/broadcom/bcmbca/Makefile
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/Makefile
@@ -2,6 +2,7 @@
dtb-$(CONFIG_ARCH_BCMBCA) += \
bcm4906-netgear-r8000p.dtb \
bcm4906-tplink-archer-c2300-v1.dtb \
+ bcm4906-zyxel-ex3510b.dtb \
bcm4908-asus-gt-ac5300.dtb \
bcm4908-netgear-raxe500.dtb \
bcm94908.dtb \
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4906-netgear-r8000p.dts b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4906-netgear-r8000p.dts
index 999d93730240..a5f9ec92bd5e 100644
--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4906-netgear-r8000p.dts
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4906-netgear-r8000p.dts
@@ -144,16 +144,20 @@
#size-cells = <1>;
partition@0 {
- compatible = "nvmem-cells";
label = "cferom";
reg = <0x0 0x100000>;
-
#address-cells = <1>;
#size-cells = <1>;
ranges = <0 0x0 0x100000>;
- base_mac_addr: mac@106a0 {
- reg = <0x106a0 0x6>;
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ base_mac_addr: mac@106a0 {
+ reg = <0x106a0 0x6>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4906-zyxel-ex3510b.dts b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4906-zyxel-ex3510b.dts
new file mode 100644
index 000000000000..54e453bd09f7
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4906-zyxel-ex3510b.dts
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+
+#include "bcm4906.dtsi"
+
+/ {
+ compatible = "zyxel,ex3510b", "brcm,bcm4906", "brcm,bcm4908", "brcm,bcmbca";
+ model = "Zyxel EX3510-B";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x20000000>;
+ };
+
+ gpio-keys-polled {
+ compatible = "gpio-keys-polled";
+ poll-interval = <100>;
+
+ key-wps {
+ label = "WPS";
+ linux,code = <KEY_WPS_BUTTON>;
+ gpios = <&gpio0 22 GPIO_ACTIVE_LOW>;
+ };
+
+ key-reset {
+ label = "Reset";
+ linux,code = <KEY_RESTART>;
+ gpios = <&gpio0 23 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&leds {
+ pinctrl-0 = <&pins_led_0_a>, <&pins_led_2_a>, <&pins_led_3_a>,
+ <&pins_led_4_a>, <&pins_led_10_a>, <&pins_led_12_a>,
+ <&pins_led_14_a>, <&pins_led_15_a>, <&pins_led_21_a>;
+ pinctrl-names = "default";
+
+ led@0 {
+ reg = <0x0>;
+ function = LED_FUNCTION_POWER;
+ color = <LED_COLOR_ID_RED>;
+ };
+
+ led@2 {
+ reg = <0x2>;
+ function = LED_FUNCTION_WAN_ONLINE;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@3 {
+ reg = <0x3>;
+ function = LED_FUNCTION_WAN_ONLINE;
+ color = <LED_COLOR_ID_RED>;
+ };
+
+ led@4 {
+ reg = <0x4>;
+ function = LED_FUNCTION_USB;
+ color = <LED_COLOR_ID_GREEN>;
+ trigger-sources = <&ohci_port1>, <&ohci_port2>,
+ <&ehci_port1>, <&ehci_port2>,
+ <&xhci_port1>, <&xhci_port2>;
+ linux,default-trigger = "usbport";
+ };
+
+ led@a {
+ reg = <0xa>;
+ function = LED_FUNCTION_POWER;
+ color = <LED_COLOR_ID_GREEN>;
+ linux,default-trigger = "default-on";
+ };
+
+ led@c {
+ reg = <0xc>;
+ function = LED_FUNCTION_LAN;
+ color = <LED_COLOR_ID_GREEN>;
+ active-low;
+ };
+
+ led@e {
+ reg = <0xe>;
+ function = LED_FUNCTION_WPS;
+ color = <LED_COLOR_ID_GREEN>;
+ active-low;
+ };
+
+ led@f {
+ reg = <0xf>;
+ function = LED_FUNCTION_WPS;
+ color = <LED_COLOR_ID_RED>;
+ active-low;
+ };
+
+ led@15 {
+ reg = <0x15>;
+ function = LED_FUNCTION_WAN;
+ color = <LED_COLOR_ID_GREEN>;
+ active-low;
+ };
+};
+
+&enet {
+ nvmem-cells = <&base_mac_addr>;
+ nvmem-cell-names = "mac-address";
+};
+
+&usb_phy {
+ brcm,ioc = <1>;
+ brcm,ipp = <1>;
+ status = "okay";
+};
+
+&ehci {
+ status = "okay";
+};
+
+&ohci {
+ status = "okay";
+};
+
+&xhci {
+ status = "okay";
+};
+
+&ports {
+ port@0 {
+ label = "lan1";
+ };
+
+ port@1 {
+ label = "lan2";
+ };
+
+ port@2 {
+ label = "lan3";
+ };
+
+ port@3 {
+ label = "lan4";
+ };
+
+ port@7 {
+ reg = <7>;
+ phy-mode = "internal";
+ phy-handle = <&phy12>;
+ label = "wan";
+ };
+};
+
+&nand_controller {
+ status = "okay";
+};
+
+&nandcs {
+ brcm,nand-oob-sector-size = <27>;
+ nand-ecc-strength = <8>;
+ nand-ecc-step-size = <512>;
+ nand-on-flash-bbt;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ partitions {
+ compatible = "brcm,bcm4908-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ compatible = "nvmem-cells";
+ label = "cferom";
+ reg = <0x0 0x100000>;
+ read-only;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ base_mac_addr: mac@106a0 {
+ reg = <0x106a0 0x6>;
+ };
+ };
+
+ partition@100000 {
+ compatible = "brcm,bcm4908-firmware";
+ reg = <0x100000 0x5f80000>;
+ };
+
+ partition@6080000 {
+ compatible = "brcm,bcm4908-firmware";
+ reg = <0x6080000 0x5f80000>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
index 8b924812322c..613ba7ee43d6 100644
--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
@@ -30,7 +30,7 @@
compatible = "brcm,brahma-b53";
reg = <0x0>;
enable-method = "spin-table";
- cpu-release-addr = <0x0 0xfff8>;
+ cpu-release-addr = <0x0 0xff8>;
next-level-cache = <&l2>;
};
@@ -39,7 +39,7 @@
compatible = "brcm,brahma-b53";
reg = <0x1>;
enable-method = "spin-table";
- cpu-release-addr = <0x0 0xfff8>;
+ cpu-release-addr = <0x0 0xff8>;
next-level-cache = <&l2>;
};
@@ -48,7 +48,7 @@
compatible = "brcm,brahma-b53";
reg = <0x2>;
enable-method = "spin-table";
- cpu-release-addr = <0x0 0xfff8>;
+ cpu-release-addr = <0x0 0xff8>;
next-level-cache = <&l2>;
};
@@ -57,7 +57,7 @@
compatible = "brcm,brahma-b53";
reg = <0x3>;
enable-method = "spin-table";
- cpu-release-addr = <0x0 0xfff8>;
+ cpu-release-addr = <0x0 0xff8>;
next-level-cache = <&l2>;
};
@@ -68,6 +68,16 @@
};
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ cfe-stub@0 {
+ reg = <0x0 0x0 0x0 0x1000>;
+ };
+ };
+
axi@81000000 {
compatible = "simple-bus";
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts b/arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts
index f43cfe66b6af..5939d342aec7 100644
--- a/arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts
+++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts
@@ -137,7 +137,6 @@
spi-cpha;
spi-cpol;
pl022,interface = <0>;
- pl022,slave-tx-disable = <0>;
pl022,com-mode = <0>;
pl022,rx-level-trig = <1>;
pl022,tx-level-trig = <1>;
@@ -200,7 +199,6 @@
};
&qspi {
- bspi-sel = <0>;
flash: flash@0 {
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2-xmc.dts b/arch/arm64/boot/dts/broadcom/northstar2/ns2-xmc.dts
index c50df1d02797..0e134a94e142 100644
--- a/arch/arm64/boot/dts/broadcom/northstar2/ns2-xmc.dts
+++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2-xmc.dts
@@ -151,7 +151,6 @@
#size-cells = <1>;
compatible = "m25p80";
spi-max-frequency = <62500000>;
- m25p,default-addr-width = <3>;
reg = <0x0 0x0>;
partition@0 {
diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
index cfd9fd23a1c2..5a4b81faff20 100644
--- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
@@ -134,7 +134,6 @@
brcm,pcie-ob;
brcm,pcie-ob-oarr-size;
brcm,pcie-ob-axi-offset = <0x00000000>;
- brcm,pcie-ob-window-size = <256>;
status = "disabled";
@@ -165,7 +164,6 @@
brcm,pcie-ob;
brcm,pcie-ob-oarr-size;
brcm,pcie-ob-axi-offset = <0x30000000>;
- brcm,pcie-ob-window-size = <256>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/exynos/Makefile b/arch/arm64/boot/dts/exynos/Makefile
index 7a934499b235..f6f4bc650a94 100644
--- a/arch/arm64/boot/dts/exynos/Makefile
+++ b/arch/arm64/boot/dts/exynos/Makefile
@@ -8,6 +8,10 @@ dtb-$(CONFIG_ARCH_EXYNOS) += \
exynos7885-jackpotlte.dtb \
exynos850-e850-96.dtb \
exynos8895-dreamlte.dtb \
+ exynos9810-starlte.dtb \
exynos990-c1s.dtb \
+ exynos990-r8s.dtb \
+ exynos990-x1s.dtb \
+ exynos990-x1slte.dtb \
exynosautov9-sadk.dtb \
exynosautov920-sadk.dtb
diff --git a/arch/arm64/boot/dts/exynos/exynos850-e850-96.dts b/arch/arm64/boot/dts/exynos/exynos850-e850-96.dts
index f074df8982b3..7d70a32e75b2 100644
--- a/arch/arm64/boot/dts/exynos/exynos850-e850-96.dts
+++ b/arch/arm64/boot/dts/exynos/exynos850-e850-96.dts
@@ -45,17 +45,9 @@
};
};
- /*
- * RAM: 4 GiB (eMCP):
- * - 2 GiB at 0x80000000
- * - 2 GiB at 0x880000000
- *
- * 0xbab00000..0xbfffffff: secure memory (85 MiB).
- */
memory@80000000 {
device_type = "memory";
- reg = <0x0 0x80000000 0x3ab00000>,
- <0x0 0xc0000000 0x40000000>,
+ reg = <0x0 0x80000000 0x80000000>,
<0x8 0x80000000 0x80000000>;
};
@@ -146,6 +138,11 @@
#size-cells = <1>;
ranges;
+ secure_mem: memory@bab00000 {
+ reg = <0x0 0xbab00000 0x5500000>;
+ no-map;
+ };
+
ramoops@f0000000 {
compatible = "ramoops";
reg = <0x0 0xf0000000 0x200000>;
diff --git a/arch/arm64/boot/dts/exynos/exynos8895.dtsi b/arch/arm64/boot/dts/exynos/exynos8895.dtsi
index 9f9ac5359879..36657abfc615 100644
--- a/arch/arm64/boot/dts/exynos/exynos8895.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos8895.dtsi
@@ -38,7 +38,17 @@
<&cpu3>;
};
- /* There's no PMU model for the Mongoose cores */
+ mongoose-m2-pmu {
+ compatible = "samsung,mongoose-pmu";
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu4>,
+ <&cpu5>,
+ <&cpu6>,
+ <&cpu7>;
+ };
cpus {
#address-cells = <1>;
@@ -218,6 +228,19 @@
"usi1", "usi2", "usi3";
};
+ serial_0: serial@10430000 {
+ compatible = "samsung,exynos8895-uart";
+ reg = <0x10430000 0x100>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_UART_DBG_PCLK>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_UART_DBG_EXT_UCLK>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 385 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_bus>;
+ samsung,uart-fifosize = <256>;
+ status = "disabled";
+ };
+
pinctrl_peric0: pinctrl@104d0000 {
compatible = "samsung,exynos8895-pinctrl";
reg = <0x104d0000 0x1000>;
@@ -250,12 +273,69 @@
"usi10", "usi11", "usi12", "usi13";
};
+ serial_1: serial@10830000 {
+ compatible = "samsung,exynos8895-uart";
+ reg = <0x10830000 0x100>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_UART_BT_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_UART_BT_EXT_UCLK>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 389 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_bus>;
+ samsung,uart-fifosize = <256>;
+ status = "disabled";
+ };
+
pinctrl_peric1: pinctrl@10980000 {
compatible = "samsung,exynos8895-pinctrl";
reg = <0x10980000 0x1000>;
interrupts = <GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>;
};
+ hsi2c_1: i2c@10990000 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x10990000 0x1000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_HSI2C_CAM0_IPCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c1_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ hsi2c_2: i2c@109a0000 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x109a0000 0x1000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_HSI2C_CAM1_IPCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 432 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c2_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ hsi2c_3: i2c@109b0000 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x109b0000 0x1000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_HSI2C_CAM2_IPCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 433 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c3_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ hsi2c_4: i2c@109c0000 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x109c0000 0x1000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_HSI2C_CAM3_IPCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 434 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c4_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
spi_0: spi@109d0000 {
compatible = "samsung,exynos8895-spi",
"samsung,exynos850-spi";
diff --git a/arch/arm64/boot/dts/exynos/exynos9810-pinctrl.dtsi b/arch/arm64/boot/dts/exynos/exynos9810-pinctrl.dtsi
new file mode 100644
index 000000000000..88091bf09e4e
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/exynos9810-pinctrl.dtsi
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Samsung's Exynos 9810 SoC pin-mux and pin-config device tree source
+ *
+ * Copyright (c) 2024 Markuss Broks <markuss.broks@gmail.com>
+ * Copyright (c) 2024 Maksym Holovach <nergzd@nergzd723.xyz>
+ */
+
+#include "exynos-pinctrl.h"
+
+&pinctrl_alive {
+ etc1: etc1-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpa0: gpa0-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <2>;
+ };
+
+ gpa1: gpa1-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <2>;
+ };
+
+ gpa2: gpa2-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <2>;
+ };
+
+ gpa3: gpa3-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <2>;
+ };
+
+ gpa4: gpa4-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpq0: gpq0-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+};
+
+&pinctrl_aud {
+ gpb0: gpb0-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpb1: gpb1-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpb2: gpb2-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+};
+
+&pinctrl_chub {
+ gph0: gph0-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gph1: gph1-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+};
+
+&pinctrl_cmgp {
+ gpm0: gpm0-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ gpm1: gpm1-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ gpm2: gpm2-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ gpm3: gpm3-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ gpm4: gpm4-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ gpm5: gpm5-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ gpm6: gpm6-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ gpm7: gpm7-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ gpm10: gpm10-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ gpm11: gpm11-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ gpm12: gpm12-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ gpm13: gpm13-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ gpm14: gpm14-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ gpm15: gpm15-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ gpm16: gpm16-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ gpm17: gpm17-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ gpm40: gpm40-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ gpm41: gpm41-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ gpm42: gpm42-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ gpm43: gpm43-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+ };
+};
+
+&pinctrl_fsys0 {
+ gpf0: gpf0-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+};
+
+&pinctrl_fsys1 {
+ gpf1: gpf1-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpf2: gpf2-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+};
+
+&pinctrl_peric0 {
+ gpg0: gpg0-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpg1: gpg1-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpg2: gpg2-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpp0: gpp0-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpp1: gpp1-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpp2: gpp2-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpp3: gpp3-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+};
+
+&pinctrl_peric1 {
+ gpc0: gpc0-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpc1: gpc1-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpd0: gpd0-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpg3: gpg3-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpp4: gpp4-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpp5: gpp5-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpp6: gpp6-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+};
+
+&pinctrl_vts {
+ gpt0: gpt0-gpio-bank {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+};
diff --git a/arch/arm64/boot/dts/exynos/exynos9810-starlte.dts b/arch/arm64/boot/dts/exynos/exynos9810-starlte.dts
new file mode 100644
index 000000000000..fc0ddfee4cd6
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/exynos9810-starlte.dts
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Samsung Galaxy S9 (starlte/SM-G960F) device tree source
+ *
+ * Copyright (c) 2024 Markuss Broks <markuss.broks@gmail.com>
+ * Copyright (c) 2024 Maksym Holovach <nergzd@nergzd723.xyz>
+ */
+
+/dts-v1/;
+#include "exynos9810.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+ model = "Samsung Galaxy S9 (SM-G960F)";
+ compatible = "samsung,starlte", "samsung,exynos9810";
+ chassis-type = "handset";
+
+ chosen {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges;
+
+ framebuffer@cc000000 {
+ compatible = "simple-framebuffer";
+ reg = <0x0 0xcc000000 (1440 * 2960 * 4)>;
+ width = <1440>;
+ height = <2960>;
+ stride = <(1440 * 4)>;
+ format = "a8r8g8b8";
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&key_power &key_voldown &key_volup &key_wink>;
+ pinctrl-names = "default";
+
+ power-key {
+ label = "Power";
+ linux,code = <KEY_POWER>;
+ gpios = <&gpa2 4 GPIO_ACTIVE_LOW>;
+ wakeup-source;
+ };
+
+ voldown-key {
+ label = "Volume Down";
+ linux,code = <KEY_VOLUMEDOWN>;
+ gpios = <&gpa0 4 GPIO_ACTIVE_LOW>;
+ };
+
+ volup-key {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ gpios = <&gpa0 3 GPIO_ACTIVE_LOW>;
+ };
+
+ /* In stock firmware used for assistant. Map it as a camera button for now */
+ wink-key {
+ label = "Camera";
+ linux,code = <KEY_CAMERA>;
+ gpios = <&gpa0 6 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x0 0x80000000 0x3c800000>,
+ <0x0 0xc0000000 0x20000000>,
+ <0x0 0xe1900000 0x1e700000>,
+ <0x8 0x80000000 0x80000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges;
+
+ framebuffer@cc000000 {
+ reg = <0x0 0xcc000000 (1440 * 2960 * 4)>;
+ no-map;
+ };
+ };
+};
+
+&oscclk {
+ clock-frequency = <26000000>;
+};
+
+&pinctrl_alive {
+ key_power: key-power-pins {
+ samsung,pins = "gpa2-4";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
+ };
+
+ key_voldown: key-voldown-pins {
+ samsung,pins = "gpa0-4";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
+ };
+
+ key_volup: key-volup-pins {
+ samsung,pins = "gpa0-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
+ };
+
+ key_wink: key-wink-pins {
+ samsung,pins = "gpa0-6";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/exynos/exynos9810.dtsi b/arch/arm64/boot/dts/exynos/exynos9810.dtsi
new file mode 100644
index 000000000000..01eba31f7ca3
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/exynos9810.dtsi
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Samsung Exynos 9810 SoC device tree source
+ *
+ * Copyright (c) 2024 Markuss Broks <markuss.broks@gmail.com>
+ * Copyright (c) 2024 Maksym Holovach <nergzd@nergzd723.xyz>
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ compatible = "samsung,exynos9810";
+ #address-cells = <2>;
+ #size-cells = <1>;
+
+ interrupt-parent = <&gic>;
+
+ aliases {
+ pinctrl0 = &pinctrl_alive;
+ pinctrl1 = &pinctrl_aud;
+ pinctrl2 = &pinctrl_chub;
+ pinctrl3 = &pinctrl_cmgp;
+ pinctrl4 = &pinctrl_fsys0;
+ pinctrl5 = &pinctrl_fsys1;
+ pinctrl6 = &pinctrl_peric0;
+ pinctrl7 = &pinctrl_peric1;
+ pinctrl8 = &pinctrl_vts;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+ core1 {
+ cpu = <&cpu1>;
+ };
+ core2 {
+ cpu = <&cpu2>;
+ };
+ core3 {
+ cpu = <&cpu3>;
+ };
+ };
+
+ cluster1 {
+ core0 {
+ cpu = <&cpu4>;
+ };
+ core1 {
+ cpu = <&cpu5>;
+ };
+ core2 {
+ cpu = <&cpu6>;
+ };
+ core3 {
+ cpu = <&cpu7>;
+ };
+ };
+ };
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0>;
+ enable-method = "psci";
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x1>;
+ enable-method = "psci";
+ };
+
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x2>;
+ enable-method = "psci";
+ };
+
+ cpu3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x3>;
+ enable-method = "psci";
+ };
+
+ cpu4: cpu@100 {
+ device_type = "cpu";
+ compatible = "samsung,mongoose-m3";
+ reg = <0x100>;
+ enable-method = "psci";
+ };
+
+ cpu5: cpu@101 {
+ device_type = "cpu";
+ compatible = "samsung,mongoose-m3";
+ reg = <0x101>;
+ enable-method = "psci";
+ };
+
+ cpu6: cpu@102 {
+ device_type = "cpu";
+ compatible = "samsung,mongoose-m3";
+ reg = <0x102>;
+ enable-method = "psci";
+ };
+
+ cpu7: cpu@103 {
+ device_type = "cpu";
+ compatible = "samsung,mongoose-m3";
+ reg = <0x103>;
+ enable-method = "psci";
+ };
+ };
+
+ oscclk: osc-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-output-names = "oscclk";
+ };
+
+ pmu-a55 {
+ compatible = "arm,cortex-a55-pmu";
+ interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu0>,
+ <&cpu1>,
+ <&cpu2>,
+ <&cpu3>;
+ };
+
+ pmu-mongoose-m3 {
+ compatible = "samsung,mongoose-pmu";
+ interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu4>,
+ <&cpu5>,
+ <&cpu6>,
+ <&cpu7>;
+ };
+
+ psci {
+ compatible = "arm,psci";
+ method = "smc";
+ cpu_off = <0x84000002>;
+ cpu_on = <0xc4000003>;
+ cpu_suspend = <0xc4000001>;
+ };
+
+ soc: soc@0 {
+ compatible = "simple-bus";
+ ranges = <0x0 0x0 0x0 0x20000000>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ chipid@10000000 {
+ compatible = "samsung,exynos9810-chipid",
+ "samsung,exynos850-chipid";
+ reg = <0x10000000 0x100>;
+ };
+
+ gic: interrupt-controller@10101000 {
+ compatible = "arm,gic-400";
+ reg = <0x10101000 0x1000>,
+ <0x10102000 0x1000>,
+ <0x10104000 0x2000>,
+ <0x10106000 0x2000>;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(8) |
+ IRQ_TYPE_LEVEL_HIGH)>;
+ #address-cells = <0>;
+ #size-cells = <1>;
+ };
+
+ pinctrl_peric0: pinctrl@10430000 {
+ compatible = "samsung,exynos9810-pinctrl";
+ reg = <0x10430000 0x1000>;
+ interrupts = <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pinctrl_peric1: pinctrl@10830000 {
+ compatible = "samsung,exynos9810-pinctrl";
+ reg = <0x10830000 0x1000>;
+ interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pinctrl_fsys0: pinctrl@11050000 {
+ compatible = "samsung,exynos9810-pinctrl";
+ reg = <0x11050000 0x1000>;
+ interrupts = <GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pinctrl_fsys1: pinctrl@11430000 {
+ compatible = "samsung,exynos9810-pinctrl";
+ reg = <0x11430000 0x1000>;
+ interrupts = <GIC_SPI 250 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pinctrl_vts: pinctrl@13880000 {
+ compatible = "samsung,exynos9810-pinctrl";
+ reg = <0x13880000 0x1000>;
+ };
+
+ pinctrl_chub: pinctrl@13a80000 {
+ compatible = "samsung,exynos9810-pinctrl";
+ reg = <0x13a80000 0x1000>;
+ interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pinctrl_alive: pinctrl@14050000 {
+ compatible = "samsung,exynos9810-pinctrl";
+ reg = <0x14050000 0x1000>;
+
+ wakeup-interrupt-controller {
+ compatible = "samsung,exynos9810-wakeup-eint",
+ "samsung,exynos850-wakeup-eint",
+ "samsung,exynos7-wakeup-eint";
+ };
+ };
+
+ pmu_system_controller: system-controller@14060000 {
+ compatible = "samsung,exynos9810-pmu",
+ "samsung,exynos7-pmu", "syscon";
+ reg = <0x14060000 0x10000>;
+ };
+
+ pinctrl_cmgp: pinctrl@14220000 {
+ compatible = "samsung,exynos9810-pinctrl";
+ reg = <0x14220000 0x1000>;
+
+ wakeup-interrupt-controller {
+ compatible = "samsung,exynos9810-wakeup-eint",
+ "samsung,exynos850-wakeup-eint",
+ "samsung,exynos7-wakeup-eint";
+ };
+ };
+
+ pinctrl_aud: pinctrl@17c60000 {
+ compatible = "samsung,exynos9810-pinctrl";
+ reg = <0x17c60000 0x1000>;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ /* Hypervisor Virtual Timer interrupt is not wired to GIC */
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
+ /*
+ * Non-updatable, broken stock Samsung bootloader does not
+ * configure CNTFRQ_EL0
+ */
+ clock-frequency = <26000000>;
+ };
+};
+
+#include "exynos9810-pinctrl.dtsi"
+#include "arm/samsung/exynos-syscon-restart.dtsi"
diff --git a/arch/arm64/boot/dts/exynos/exynos990-r8s.dts b/arch/arm64/boot/dts/exynos/exynos990-r8s.dts
new file mode 100644
index 000000000000..6bae3c0ecc1c
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/exynos990-r8s.dts
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Samsung Galaxy S20 FE (r8s/SM-G780F) device tree source
+ *
+ * Copyright (c) 2024, Denzeel Oliva <wachiturroxd150@gmail.com>
+ */
+
+/dts-v1/;
+#include "exynos990.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ model = "Samsung Galaxy S20 FE";
+ compatible = "samsung,r8s", "samsung,exynos990";
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ framebuffer0: framebuffer@f1000000 {
+ compatible = "simple-framebuffer";
+ reg = <0 0xf1000000 0 (1080 * 2400 * 4)>;
+ width = <1080>;
+ height = <2400>;
+ stride = <(1080 * 4)>;
+ format = "a8r8g8b8";
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x0 0x80000000 0x0 0x3ab00000>,
+ /* Memory hole */
+ <0x0 0xc1200000 0x0 0x1ee00000>,
+ /* Memory hole */
+ <0x0 0xe1900000 0x0 0x1e700000>,
+ /* Memory hole - last block */
+ <0x8 0x80000000 0x0 0xc0000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ cont_splash_mem: framebuffer@f1000000 {
+ reg = <0 0xf1000000 0 0x13c6800>;
+ no-map;
+ };
+
+ abox_reserved: audio@f7fb0000 {
+ reg = <0 0xf7fb0000 0 0x2a50000>;
+ no-map;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&key_power &key_voldown &key_volup>;
+ pinctrl-names = "default";
+
+ power-key {
+ label = "Power";
+ linux,code = <KEY_POWER>;
+ gpios = <&gpa2 4 GPIO_ACTIVE_LOW>;
+ wakeup-source;
+ };
+
+ voldown-key {
+ label = "Volume Down";
+ linux,code = <KEY_VOLUMEDOWN>;
+ gpios = <&gpa0 4 GPIO_ACTIVE_LOW>;
+ };
+
+ volup-key {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ gpios = <&gpa0 3 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&oscclk {
+ clock-frequency = <26000000>;
+};
+
+&pinctrl_alive {
+ key_power: key-power-pins {
+ samsung,pins = "gpa2-4";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
+ };
+
+ key_voldown: key-voldown-pins {
+ samsung,pins = "gpa0-4";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
+ };
+
+ key_volup: key-volup-pins {
+ samsung,pins = "gpa0-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/exynos/exynos990-x1s-common.dtsi b/arch/arm64/boot/dts/exynos/exynos990-x1s-common.dtsi
new file mode 100644
index 000000000000..55fa8e9e05db
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/exynos990-x1s-common.dtsi
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Samsung Galaxy S20 Series device tree source
+ *
+ * Copyright (c) 2024, Umer Uddin <umer.uddin@mentallysanemainliners.org>
+ */
+
+/dts-v1/;
+#include "exynos990.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ chosen {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ framebuffer0: framebuffer@f1000000 {
+ compatible = "simple-framebuffer";
+ reg = <0 0xf1000000 0 (1440 * 3200 * 4)>;
+ width = <1440>;
+ height = <3200>;
+ stride = <(1440 * 4)>;
+ format = "a8r8g8b8";
+ };
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ cont_splash_mem: framebuffer@f1000000 {
+ reg = <0 0xf1000000 0 0x1194000>;
+ no-map;
+ };
+
+ abox_reserved: audio@f7fb0000 {
+ reg = <0 0xf7fb0000 0 0x2a50000>;
+ no-map;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&key_power &key_voldown &key_volup>;
+ pinctrl-names = "default";
+
+ power-key {
+ label = "Power";
+ linux,code = <KEY_POWER>;
+ gpios = <&gpa2 4 GPIO_ACTIVE_LOW>;
+ wakeup-source;
+ };
+
+ voldown-key {
+ label = "Volume Down";
+ linux,code = <KEY_VOLUMEDOWN>;
+ gpios = <&gpa0 4 GPIO_ACTIVE_LOW>;
+ };
+
+ volup-key {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ gpios = <&gpa0 3 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&oscclk {
+ clock-frequency = <26000000>;
+};
+
+&pinctrl_alive {
+ key_power: key-power-pins {
+ samsung,pins = "gpa2-4";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
+ };
+
+ key_voldown: key-voldown-pins {
+ samsung,pins = "gpa0-4";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
+ };
+
+ key_volup: key-volup-pins {
+ samsung,pins = "gpa0-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/exynos/exynos990-x1s.dts b/arch/arm64/boot/dts/exynos/exynos990-x1s.dts
new file mode 100644
index 000000000000..1ae881015e0c
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/exynos990-x1s.dts
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Samsung Galaxy S20 5G (x1s/SM-G981B) device tree source
+ *
+ * Copyright (c) 2024, Umer Uddin <umer.uddin@mentallysanemainliners.org>
+ */
+
+/dts-v1/;
+#include "exynos990-x1s-common.dtsi"
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ model = "Samsung Galaxy S20 5G";
+ compatible = "samsung,x1s", "samsung,exynos990";
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x0 0x80000000 0x0 0x3ab00000>,
+ /* Memory hole */
+ <0x0 0xc1200000 0x0 0x1ee00000>,
+ /* Memory hole */
+ <0x0 0xe1900000 0x0 0x1e700000>,
+ /* Memory hole */
+ <0x8 0x80000000 0x2 0x7e800000>;
+ };
+};
diff --git a/arch/arm64/boot/dts/exynos/exynos990-x1slte.dts b/arch/arm64/boot/dts/exynos/exynos990-x1slte.dts
new file mode 100644
index 000000000000..d3720996ba93
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/exynos990-x1slte.dts
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Samsung Galaxy S20 (x1slte/SM-G980F) device tree source
+ *
+ * Copyright (c) 2024, Umer Uddin <umer.uddin@mentallysanemainliners.org>
+ */
+
+/dts-v1/;
+#include "exynos990-x1s-common.dtsi"
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ model = "Samsung Galaxy S20";
+ compatible = "samsung,x1slte", "samsung,exynos990";
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x0 0x80000000 0x0 0x3ab00000>,
+ /* Memory hole */
+ <0x0 0xc1200000 0x0 0x1ee00000>,
+ /* Memory hole */
+ <0x0 0xe1900000 0x0 0x1e700000>,
+ /* Memory hole */
+ <0x8 0x80000000 0x1 0x7ec00000>;
+ };
+};
diff --git a/arch/arm64/boot/dts/exynos/exynos990.dtsi b/arch/arm64/boot/dts/exynos/exynos990.dtsi
index c1986f00e443..9d017dbed952 100644
--- a/arch/arm64/boot/dts/exynos/exynos990.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos990.dtsi
@@ -5,6 +5,7 @@
* Copyright (c) 2024, Igor Belwon <igor.belwon@mentallysanemainliners.org>
*/
+#include <dt-bindings/clock/samsung,exynos990.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
/ {
@@ -46,7 +47,14 @@
<&cpu5>;
};
- /* There's no PMU model for cluster2, which are the Mongoose cores. */
+ mongoose-m5-pmu {
+ compatible = "samsung,mongoose-pmu";
+ interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>;
+
+ interrupt-affinity = <&cpu6>,
+ <&cpu7>;
+ };
cpus {
#address-cells = <1>;
@@ -199,6 +207,23 @@
interrupts = <GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH>;
};
+ cmu_hsi0: clock-controller@10a00000 {
+ compatible = "samsung,exynos990-cmu-hsi0";
+ reg = <0x10a00000 0x8000>;
+ #clock-cells = <1>;
+
+ clocks = <&oscclk>,
+ <&cmu_top CLK_DOUT_CMU_HSI0_BUS>,
+ <&cmu_top CLK_DOUT_CMU_HSI0_USB31DRD>,
+ <&cmu_top CLK_DOUT_CMU_HSI0_USBDP_DEBUG>,
+ <&cmu_top CLK_DOUT_CMU_HSI0_DPGTC>;
+ clock-names = "oscclk",
+ "bus",
+ "usb31drd",
+ "usbdp_debug",
+ "dpgtc";
+ };
+
pinctrl_hsi1: pinctrl@13040000 {
compatible = "samsung,exynos990-pinctrl";
reg = <0x13040000 0x1000>;
@@ -227,10 +252,33 @@
};
};
+ pmu_system_controller: system-controller@15860000 {
+ compatible = "samsung,exynos990-pmu",
+ "samsung,exynos7-pmu", "syscon";
+ reg = <0x15860000 0x10000>;
+
+ reboot: syscon-reboot {
+ compatible = "syscon-reboot";
+ regmap = <&pmu_system_controller>;
+ offset = <0x3a00>; /* SWRESET */
+ mask = <0x2>; /* SWRESET_TRIGGER */
+ value = <0x2>;
+ };
+ };
+
pinctrl_cmgp: pinctrl@15c30000 {
compatible = "samsung,exynos990-pinctrl";
reg = <0x15c30000 0x1000>;
};
+
+ cmu_top: clock-controller@1a330000 {
+ compatible = "samsung,exynos990-cmu-top";
+ reg = <0x1a330000 0x8000>;
+ #clock-cells = <1>;
+
+ clocks = <&oscclk>;
+ clock-names = "oscclk";
+ };
};
timer {
diff --git a/arch/arm64/boot/dts/exynos/exynosautov920.dtsi b/arch/arm64/boot/dts/exynos/exynosautov920.dtsi
index c759134c909e..eb446cdc4ab6 100644
--- a/arch/arm64/boot/dts/exynos/exynosautov920.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynosautov920.dtsi
@@ -183,6 +183,26 @@
"noc";
};
+ watchdog_cl0: watchdog@10060000 {
+ compatible = "samsung,exynosautov920-wdt";
+ reg = <0x10060000 0x100>;
+ interrupts = <GIC_SPI 953 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&xtcxo>, <&xtcxo>;
+ clock-names = "watchdog", "watchdog_src";
+ samsung,syscon-phandle = <&pmu_system_controller>;
+ samsung,cluster-index = <0>;
+ };
+
+ watchdog_cl1: watchdog@10070000 {
+ compatible = "samsung,exynosautov920-wdt";
+ reg = <0x10070000 0x100>;
+ interrupts = <GIC_SPI 952 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&xtcxo>, <&xtcxo>;
+ clock-names = "watchdog", "watchdog_src";
+ samsung,syscon-phandle = <&pmu_system_controller>;
+ samsung,cluster-index = <1>;
+ };
+
gic: interrupt-controller@10400000 {
compatible = "arm,gic-v3";
#interrupt-cells = <3>;
@@ -193,6 +213,69 @@
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
};
+ spdma0: dma-controller@10180000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x10180000 0x1000>;
+ interrupts = <GIC_SPI 918 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cmu_misc CLK_MOUT_MISC_NOC_USER>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ };
+
+ spdma1: dma-controller@10190000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x10190000 0x1000>;
+ interrupts = <GIC_SPI 917 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cmu_misc CLK_MOUT_MISC_NOC_USER>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ };
+
+ pdma0: dma-controller@101a0000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x101a0000 0x1000>;
+ interrupts = <GIC_SPI 916 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cmu_misc CLK_MOUT_MISC_NOC_USER>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ };
+
+ pdma1: dma-controller@101b0000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x101b0000 0x1000>;
+ interrupts = <GIC_SPI 915 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cmu_misc CLK_MOUT_MISC_NOC_USER>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ };
+
+ pdma2: dma-controller@101c0000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x101c0000 0x1000>;
+ interrupts = <GIC_SPI 914 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cmu_misc CLK_MOUT_MISC_NOC_USER>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ };
+
+ pdma3: dma-controller@101d0000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x101d0000 0x1000>;
+ interrupts = <GIC_SPI 913 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cmu_misc CLK_MOUT_MISC_NOC_USER>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ };
+
+ pdma4: dma-controller@101e0000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x101e0000 0x1000>;
+ interrupts = <GIC_SPI 912 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cmu_misc CLK_MOUT_MISC_NOC_USER>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ };
+
cmu_peric0: clock-controller@10800000 {
compatible = "samsung,exynosautov920-cmu-peric0";
reg = <0x10800000 0x8000>;
diff --git a/arch/arm64/boot/dts/exynos/google/gs101-oriole.dts b/arch/arm64/boot/dts/exynos/google/gs101-oriole.dts
index 387fb779bd29..e58881c61d53 100644
--- a/arch/arm64/boot/dts/exynos/google/gs101-oriole.dts
+++ b/arch/arm64/boot/dts/exynos/google/gs101-oriole.dts
@@ -10,6 +10,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
+#include <dt-bindings/usb/pd.h>
#include "gs101-pinctrl.h"
#include "gs101.dtsi"
@@ -90,6 +91,89 @@
&hsi2c_12 {
status = "okay";
/* TODO: add the devices once drivers exist */
+
+ usb-typec@25 {
+ compatible = "maxim,max77759-tcpci", "maxim,max33359";
+ reg = <0x25>;
+ interrupts-extended = <&gpa8 2 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-0 = <&typec_int>;
+ pinctrl-names = "default";
+
+ connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ data-role = "dual";
+ power-role = "dual";
+ self-powered;
+ try-power-role = "sink";
+ op-sink-microwatt = <2600000>;
+ slow-charger-loop;
+ /*
+ * max77759 operating in reverse boost mode (0xA) can
+ * source up to 1.5A while extboost can only do ~1A.
+ * Since extboost is the primary path, advertise 900mA.
+ */
+ source-pdos = <PDO_FIXED(5000, 900,
+ (PDO_FIXED_SUSPEND
+ | PDO_FIXED_USB_COMM
+ | PDO_FIXED_DATA_SWAP
+ | PDO_FIXED_DUAL_ROLE))>;
+ sink-pdos = <PDO_FIXED(5000, 3000,
+ (PDO_FIXED_DATA_SWAP
+ | PDO_FIXED_USB_COMM
+ | PDO_FIXED_HIGHER_CAP
+ | PDO_FIXED_DUAL_ROLE))
+ PDO_FIXED(9000, 2200, 0)
+ PDO_PPS_APDO(5000, 11000, 3000)>;
+ sink-vdos = <VDO_IDH(1, 1, IDH_PTYPE_PERIPH, 0,
+ IDH_PTYPE_DFP_HOST, 2, 0x18d1)
+ VDO_CERT(0x0)
+ VDO_PRODUCT(0x4ee1, 0x0)
+ VDO_UFP(UFP_VDO_VER1_2,
+ (DEV_USB2_CAPABLE
+ | DEV_USB3_CAPABLE),
+ UFP_RECEPTACLE, 0,
+ AMA_VCONN_NOT_REQ, 0,
+ UFP_ALTMODE_NOT_SUPP,
+ UFP_USB32_GEN1)
+ /* padding */ 0
+ VDO_DFP(DFP_VDO_VER1_1,
+ (HOST_USB2_CAPABLE
+ | HOST_USB3_CAPABLE),
+ DFP_RECEPTACLE, 0)>;
+ sink-vdos-v1 = <VDO_IDH(1, 1, IDH_PTYPE_PERIPH, 0,
+ 0, 0, 0x18d1)
+ VDO_CERT(0x0)
+ VDO_PRODUCT(0x4ee1, 0x0)>;
+ /*
+ * Until bootloader is updated to set those two when
+ * console is enabled, we disable PD here.
+ */
+ pd-disable;
+ typec-power-opmode = "default";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usbc0_orien_sw: endpoint {
+ remote-endpoint = <&usbdrd31_phy_orien_switch>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usbc0_role_sw: endpoint {
+ remote-endpoint = <&usbdrd31_dwc3_role_switch>;
+ };
+ };
+ };
+ };
+ };
};
&pinctrl_far_alive {
@@ -106,6 +190,13 @@
samsung,pin-pud = <GS101_PIN_PULL_NONE>;
samsung,pin-drv = <GS101_PIN_DRV_2_5_MA>;
};
+
+ typec_int: typec-int-pins {
+ samsung,pins = "gpa8-2";
+ samsung,pin-function = <GS101_PIN_FUNC_EINT>;
+ samsung,pin-pud = <GS101_PIN_PULL_UP>;
+ samsung,pin-drv = <GS101_PIN_DRV_2_5_MA>;
+ };
};
&pinctrl_gpio_alive {
@@ -142,9 +233,16 @@
role-switch-default-mode = "peripheral";
maximum-speed = "super-speed-plus";
status = "okay";
+
+ port {
+ usbdrd31_dwc3_role_switch: endpoint {
+ remote-endpoint = <&usbc0_role_sw>;
+ };
+ };
};
&usbdrd31_phy {
+ orientation-switch;
/* TODO: Update these once PMIC is implemented */
pll-supply = <&reg_placeholder>;
dvdd-usb20-supply = <&reg_placeholder>;
@@ -153,6 +251,12 @@
vdda-usbdp-supply = <&reg_placeholder>;
vddh-usbdp-supply = <&reg_placeholder>;
status = "okay";
+
+ port {
+ usbdrd31_phy_orien_switch: endpoint {
+ remote-endpoint = <&usbc0_orien_sw>;
+ };
+ };
};
&usi_uart {
diff --git a/arch/arm64/boot/dts/exynos/google/gs101.dtsi b/arch/arm64/boot/dts/exynos/google/gs101.dtsi
index 302c5beb224a..c5335dd59dfe 100644
--- a/arch/arm64/boot/dts/exynos/google/gs101.dtsi
+++ b/arch/arm64/boot/dts/exynos/google/gs101.dtsi
@@ -1267,7 +1267,7 @@
usbdrd31_phy: phy@11100000 {
compatible = "google,gs101-usb31drd-phy";
- reg = <0x11100000 0x0100>,
+ reg = <0x11100000 0x0200>,
<0x110f0000 0x0800>,
<0x110e0000 0x2800>;
reg-names = "phy", "pcs", "pma";
@@ -1302,6 +1302,9 @@
interrupts = <GIC_SPI 463 IRQ_TYPE_LEVEL_HIGH 0>;
phys = <&usbdrd31_phy 0>, <&usbdrd31_phy 1>;
phy-names = "usb2-phy", "usb3-phy";
+ snps,has-lpm-erratum;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_u3_susphy_quirk;
status = "disabled";
};
};
diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
index 42e6482a31cb..839432153cc7 100644
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
@@ -165,6 +165,11 @@ imx8mn-tqma8mqnl-mba8mx-usbotg-dtbs += imx8mn-tqma8mqnl-mba8mx.dtb imx8mn-tqma8m
dtb-$(CONFIG_ARCH_MXC) += imx8mn-tqma8mqnl-mba8mx-lvds-tm070jvhg33.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mn-tqma8mqnl-mba8mx-usbotg.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-aristainetos3-adpismarc.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-aristainetos3-helios.dtb
+imx8mp-aristainetos3-helios-lvds-dtbs += imx8mp-aristainetos3-helios.dtb imx8mp-aristainetos3-helios-lvds.dtbo
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-aristainetos3-helios-lvds.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-aristainetos3-proton2s.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-beacon-kit.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-data-modul-edm-sbc.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-debix-model-a.dtb
@@ -211,8 +216,16 @@ dtb-$(CONFIG_ARCH_MXC) += imx8mp-verdin-wifi-ivy.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-verdin-wifi-mallow.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-verdin-wifi-yavia.dtb
+imx8mp-evk-lvds0-imx-dlvds-hdmi-channel0-dtbs += imx8mp-evk.dtb imx8mp-evk-lvds0-imx-dlvds-hdmi-channel0.dtbo
+imx8mp-evk-lvds0-imx-lvds-hdmi-dtbs += imx8mp-evk.dtb imx8mp-evk-lvds0-imx-lvds-hdmi.dtbo
+imx8mp-evk-lvds1-imx-dlvds-hdmi-channel0-dtbs += imx8mp-evk.dtb imx8mp-evk-lvds1-imx-dlvds-hdmi-channel0.dtbo
+imx8mp-evk-lvds1-imx-lvds-hdmi-dtbs += imx8mp-evk.dtb imx8mp-evk-lvds1-imx-lvds-hdmi.dtbo
imx8mp-evk-mx8-dlvds-lcd1-dtbs += imx8mp-evk.dtb imx8mp-evk-mx8-dlvds-lcd1.dtbo
imx8mp-evk-pcie-ep-dtbs += imx8mp-evk.dtb imx8mp-evk-pcie-ep.dtbo
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-evk-lvds0-imx-dlvds-hdmi-channel0.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-evk-lvds0-imx-lvds-hdmi.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-evk-lvds1-imx-dlvds-hdmi-channel0.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-evk-lvds1-imx-lvds-hdmi.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-evk-mx8-dlvds-lcd1.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-evk-pcie-ep.dtb
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phg.dts b/arch/arm64/boot/dts/freescale/imx8mm-phg.dts
index 75bbedc6164c..a134b1833649 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-phg.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-phg.dts
@@ -82,7 +82,7 @@
};
panel {
- compatible = "panel-lvds";
+ compatible = "auo,g084sn05", "panel-lvds";
width-mm = <170>;
height-mm = <28>;
data-mapping = "jeida-18";
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-bsh-smm-s2-display.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-bsh-smm-s2-display.dtsi
index 7675583a6b67..98dec3c42060 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-bsh-smm-s2-display.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn-bsh-smm-s2-display.dtsi
@@ -4,6 +4,34 @@
*/
/ {
+ chosen {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ framebuffer-panel0 {
+ compatible = "simple-framebuffer";
+ clocks = <&clk IMX8MN_CLK_DISP_PIXEL_ROOT>, /* lcdif */
+ <&clk IMX8MN_CLK_DISP_APB_ROOT>,
+ <&clk IMX8MN_CLK_DISP_AXI_ROOT>,
+ <&clk IMX8MN_VIDEO_PLL1>,
+ <&clk IMX8MN_CLK_DISP_AXI_ROOT>, /* pgc_dispmix */
+ <&clk IMX8MN_CLK_DISP_APB_ROOT>,
+ <&clk IMX8MN_CLK_DISP_AXI>,
+ <&clk IMX8MN_CLK_DISP_APB>,
+ <&clk IMX8MN_SYS_PLL2_1000M>,
+ <&clk IMX8MN_SYS_PLL1_800M>,
+ <&clk IMX8MN_CLK_DSI_CORE>, /* mipi_disi */
+ <&clk IMX8MN_CLK_DSI_PHY_REF>;
+
+ power-domains = <&disp_blk_ctrl IMX8MN_DISPBLK_PD_LCDIF>,
+ <&disp_blk_ctrl IMX8MN_DISPBLK_PD_MIPI_DSI>;
+ dvdd-supply = <&reg_3v3_dvdd>;
+ avdd-supply = <&reg_v3v3_avdd>;
+ status = "disabled";
+ };
+ };
+
backlight: backlight {
compatible = "pwm-backlight";
pwms = <&pwm1 0 700000 0>; /* 700000 ns = 1337Hz */
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-aristainetos3-adpismarc.dts b/arch/arm64/boot/dts/freescale/imx8mp-aristainetos3-adpismarc.dts
new file mode 100644
index 000000000000..6a688510dad9
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-aristainetos3-adpismarc.dts
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2024 Heiko Schocher <hs@denx.de>
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include "imx8mp-aristainetos3a-som-v1.dtsi"
+
+&{/} {
+ model = "Aristainetos3 ADLink PI SMARC carrier";
+ compatible = "abb,imx8mp-aristanetos3-adpismarc",
+ "abb,imx8mp-aristanetos3-som",
+ "fsl,imx8mp";
+};
+
+&flexcan1 {
+ status = "okay";
+};
+
+&i2c2 {
+ gpio8: pinctrl@3e {
+ compatible = "semtech,sx1509q";
+ reg = <0x3e>;
+
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ semtech,probe-reset;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&gpio6>;
+ interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
+ };
+
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-aristainetos3-helios-lvds.dtso b/arch/arm64/boot/dts/freescale/imx8mp-aristainetos3-helios-lvds.dtso
new file mode 100644
index 000000000000..9d1f3b4ccc79
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-aristainetos3-helios-lvds.dtso
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2024 Heiko Schocher <hs@denx.de>
+ */
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/pwm/pwm.h>
+
+/dts-v1/;
+/plugin/;
+
+&{/} {
+ model = "Aristainetos3 helios carrier with LVDS";
+ compatible = "abb,imx8mp-aristanetos3-helios",
+ "abb,imx8mp-aristanetos3-som",
+ "fsl,imx8mp";
+
+ panel_lvds: panel-lvds {
+ compatible = "lg,lb070wv8";
+ power-supply = <&reg_vcc_disp>;
+ backlight = <&lvds_backlight>;
+
+ port {
+ in_lvds0: endpoint {
+ remote-endpoint = <&ldb_lvds_ch0>;
+ };
+ };
+ };
+
+ reg_vcc_disp: regulator-disp {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lcd0_vcc_en>;
+ compatible = "regulator-fixed";
+ regulator-name = "disp_power_en_2v8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ gpio = <&gpio1 13 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+};
+
+&gpio3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio3_hog>;
+
+ lvdssel-hog {
+ gpio-hog;
+ gpios = <23 GPIO_ACTIVE_HIGH>;
+ output-low;
+ line-name = "LVDSSEL";
+ };
+};
+
+&hdmi_blk_ctrl {
+ status = "disabled";
+};
+
+&hdmi_pvi {
+ status = "disabled";
+};
+
+&hdmi_tx {
+ status = "disabled";
+};
+
+&hdmi_tx_phy {
+ status = "disabled";
+};
+
+&irqsteer_hdmi {
+ status = "disabled";
+};
+
+&ldb_lvds_ch0 {
+ remote-endpoint = <&in_lvds0>;
+};
+
+&lcdif1 {
+ status = "disabled";
+};
+
+&lcdif2 {
+ status = "okay";
+};
+
+&lcdif3 {
+ status = "disabled";
+};
+
+&lvds_backlight {
+ status = "okay";
+};
+
+&lvds_bridge {
+ /* IMX8MP_CLK_MEDIA_LDB = IMX8MP_CLK_MEDIA_DISP2_PIX * 7 */
+ assigned-clock-rates = <232820000>;
+ status = "okay";
+};
+
+&media_blk_ctrl {
+ /*
+ * currently it is not possible to let display clocks configure
+ * automatically, so we need to set them manually
+ */
+ assigned-clock-rates = <500000000>, <200000000>, <0>,
+ /* IMX8MP_CLK_MEDIA_DISP2_PIX = pixelclk of lvds panel */
+ <33260000>, <0>,
+ /* IMX8MP_VIDEO_PLL1 = IMX8MP_CLK_MEDIA_LDB * 2 */
+ <465640000>;
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-aristainetos3-helios.dts b/arch/arm64/boot/dts/freescale/imx8mp-aristainetos3-helios.dts
new file mode 100644
index 000000000000..a4e649a8239b
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-aristainetos3-helios.dts
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2024 Heiko Schocher <hs@denx.de>
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "imx8mp-aristainetos3a-som-v1.dtsi"
+
+&{/} {
+ model = "Aristainetos3 helios carrier";
+ compatible = "abb,imx8mp-aristanetos3-helios",
+ "abb,imx8mp-aristanetos3-som",
+ "fsl,imx8mp";
+
+ led-controller {
+ compatible = "gpio-leds";
+
+ led-0 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_RED>;
+ function-enumerator = <20>;
+ gpios = <&pca6416 12 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led-1 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_YELLOW>;
+ function-enumerator = <20>;
+ gpios = <&pca6416 13 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led-2 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_GREEN>;
+ function-enumerator = <20>;
+ gpios = <&pca6416 14 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led-3 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_BLUE>;
+ function-enumerator = <20>;
+ gpios = <&pca6416 15 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+ };
+};
+
+&ethphy1 {
+ status = "disabled";
+};
+
+&fec {
+ status = "disabled";
+};
+
+&i2c1 {
+ eeprom@57 {
+ compatible = "atmel,24c64";
+ reg = <0x57>;
+ };
+};
+
+&i2c3 {
+ pca6416: gpio@20 {
+ compatible = "ti,tca6416";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names = "DIN0_CON",
+ "DIN1_CON",
+ "DIN2_CON",
+ "DIN3_CON",
+ "DIN4_CON",
+ "DIN5_CON",
+ "DIN6_CON",
+ "DIN7_CON",
+ "PM102_RES",
+ "COMx_RES",
+ "BPL_RES",
+ "PC_RES",
+ "LED_RED",
+ "LED_YELLOW",
+ "LED_GREEN",
+ "LED_BLUE";
+ };
+
+ rtc@68 {
+ compatible = "st,m41t00";
+ reg = <0x68>;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-aristainetos3-proton2s.dts b/arch/arm64/boot/dts/freescale/imx8mp-aristainetos3-proton2s.dts
new file mode 100644
index 000000000000..2a736dbe96b4
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-aristainetos3-proton2s.dts
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2024 Heiko Schocher <hs@denx.de>
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include "imx8mp-aristainetos3a-som-v1.dtsi"
+
+&{/} {
+ model = "Aristainetos3 proton2s carrier";
+ compatible = "abb,imx8mp-aristanetos3-proton2s",
+ "abb,imx8mp-aristanetos3-som",
+ "fsl,imx8mp";
+
+ watchdog {
+ /* MAX6371KA */
+ compatible = "linux,wdt-gpio";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_watchdog_gpio>;
+ always-running;
+ gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>;
+ hw_algo = "level";
+ /* Reset triggers in 3..9 seconds */
+ hw_margin_ms = <1500>;
+ };
+};
+
+&ethphy1 {
+ status = "disabled";
+};
+
+&eqos {
+ max-speed = <100>;
+};
+
+&ecspi1{
+ pinctrl-0 = <&pinctrl_ecspi1>;
+ cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
+};
+
+&fec {
+ status = "disabled";
+};
+
+&gpio1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_proton2s>;
+
+ gpio-line-names =
+ "", "", "", "", "", "", "", "POWER",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "";
+};
+
+&gpio6 {
+ gpio-line-names =
+ "RELAY0", "RELAY1", "RELAY2", "HEATER",
+ "FAN", "SPARE", "CLEAR", "FAULT",
+ "", "", "", "", "", "", "", "", "";
+};
+
+&i2c2 {
+ tlc59108@40 {
+ compatible = "ti,tlc59108";
+ reg = <0x40>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0x0>;
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_RED>;
+ function-enumerator = <20>;
+ };
+
+ led@1 {
+ reg = <0x1>;
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_GREEN>;
+ function-enumerator = <20>;
+ };
+
+ led@2 {
+ reg = <0x2>;
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_GREEN>;
+ function-enumerator = <21>;
+ };
+
+ led@3 {
+ reg = <0x3>;
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_RED>;
+ function-enumerator = <21>;
+ };
+
+ led@4 {
+ reg = <0x4>;
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_BLUE>;
+ function-enumerator = <21>;
+ };
+
+ led@5 {
+ reg = <0x5>;
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_RED>;
+ function-enumerator = <22>;
+ };
+
+ led@6 {
+ reg = <0x6>;
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_GREEN>;
+ function-enumerator = <22>;
+ };
+
+ led@7 {
+ reg = <0x7>;
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_BLUE>;
+ function-enumerator = <22>;
+ };
+ };
+
+ rtc1: rtc@68 {
+ compatible = "dallas,ds1339";
+ reg = <0x68>;
+ };
+};
+
+&uart1 {
+ pinctrl-0 = <&pinctrl_uart1>;
+};
+
+&uart2 {
+ pinctrl-0 = <&pinctrl_uart2>;
+};
+
+&uart3 {
+ pinctrl-0 = <&pinctrl_uart3>;
+};
+
+&uart4 {
+ linux,rs485-enabled-at-boot-time;
+ rs485-rts-active-low;
+ rs485-rts-delay = <0 0>;
+ rts-gpios = <&gpio3 9 GPIO_ACTIVE_HIGH>;
+};
+
+&usdhc1 {
+ status = "disabled";
+};
+
+&wdog1 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-aristainetos3a-som-v1.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-aristainetos3a-som-v1.dtsi
new file mode 100644
index 000000000000..231e480acfd4
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-aristainetos3a-som-v1.dtsi
@@ -0,0 +1,1107 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Heiko Schocher <hs@denx.de>
+ */
+
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/net/ti-dp83867.h>
+#include <dt-bindings/phy/phy-imx8-pcie.h>
+#include <dt-bindings/pwm/pwm.h>
+#include "imx8mp.dtsi"
+
+/ {
+ model = "ADLINK LEC-iMX8MP-Q-N-4G-32G";
+ compatible = "abb,imx8mp-aristanetos3-som", "fsl,imx8mp";
+
+ aliases {
+ ethernet0 = &eqos;
+ ethernet1 = &fec;
+ mmc0 = &usdhc3; /* eMMC */
+ mmc1 = &usdhc2; /* MicroSD */
+ };
+
+ chosen {
+ bootargs = "console=ttymxc1,115200 earlycon=ec_imx6q,0x30890000,115200";
+ stdout-path = &uart2;
+ };
+
+ connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+
+ port {
+ usb_dr_connector: endpoint {
+ remote-endpoint = <&usb3_dwc>;
+ };
+ };
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_led>;
+
+ led-0 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_YELLOW>;
+ function-enumerator = <0>;
+ gpios = <&gpio3 16 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+ };
+
+ lvds_backlight: backlight {
+ compatible = "pwm-backlight";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lvds_bklt_en>;
+ pwms = <&pwm2 0 50000 0>;
+ enable-gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
+ brightness-levels = <0 100>;
+ num-interpolated-steps = <100>;
+ default-brightness-level = <80>;
+ status = "disabled";
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ /* Memory size 512 MiB..8 GiB will be filled by U-Boot */
+ reg = <0x0 0x40000000 0 0x08000000>;
+ };
+
+ pcie0_refclk: clock-pcie-ref {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ };
+
+ reg_can1_stby: regulator-can1-stby {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan1_reg>;
+ gpio = <&gpio5 5 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "can1-stby";
+ };
+
+ reg_can2_stby: regulator-can2-stby {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan2_reg>;
+ enable-active-high;
+ gpio = <&gpio4 27 GPIO_ACTIVE_HIGH>;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "can2-stby";
+ };
+
+ reg_dp83867_2v5: regulator-enet {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio7 15 GPIO_ACTIVE_HIGH>;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "enet_2v5";
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_usb1_host_vbus: regulator-usb1-vbus {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb1_vbus>;
+ enable-active-high;
+ gpio = <&gpio1 14 GPIO_ACTIVE_HIGH>;
+ regulator-max-microvolt = <5000000>;
+ regulator-min-microvolt = <5000000>;
+ regulator-name = "usb1_host_vbus";
+ regulator-always-on;
+ };
+
+ reg_usdhc2_vmmc: regulator-usdhc2-vmmc {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc2_vmmc>;
+ enable-active-high;
+ gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>; /* SD2_RESET */
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "VDD_3V3_SD";
+ off-on-delay-us = <12000>;
+ startup-delay-us = <100>;
+ vin-supply = <&buck4>;
+ };
+};
+
+&A53_0 {
+ cpu-supply = <&buck2>;
+};
+
+&A53_1 {
+ cpu-supply = <&buck2>;
+};
+
+&A53_2 {
+ cpu-supply = <&buck2>;
+};
+
+&A53_3 {
+ cpu-supply = <&buck2>;
+};
+
+&clk {
+ clocks = <&osc_32k>, <&osc_24m>, <&clk_ext1>, <&clk_ext2>,
+ <&clk_ext3>, <&clk_ext4>;
+ clock-names = "osc_32k", "osc_24m", "clk_ext1", "clk_ext2",
+ "clk_ext3", "clk_ext4";
+ assigned-clocks = <&clk IMX8MP_CLK_A53_SRC>,
+ <&clk IMX8MP_CLK_A53_CORE>,
+ <&clk IMX8MP_CLK_NOC>,
+ <&clk IMX8MP_CLK_NOC_IO>,
+ <&clk IMX8MP_CLK_GIC>,
+ <&clk IMX8MP_CLK_AUDIO_AHB>,
+ <&clk IMX8MP_CLK_AUDIO_AXI_SRC>,
+ <&clk IMX8MP_AUDIO_PLL1>,
+ <&clk IMX8MP_AUDIO_PLL2>,
+ <&clk IMX8MP_VIDEO_PLL1>;
+};
+
+&ecspi1{
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1 &pinctrl_ecspi1_cs2>;
+ cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW &gpio1 6 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&ecspi2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi2>;
+ cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+/* eth0 */
+&eqos {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_eqos_rgmii>;
+ phy-handle = <&ethphy0>;
+ phy-mode = "rgmii-id";
+ snps,force_thresh_dma_mode;
+ snps,mtl-tx-config = <&mtl_tx_setup>;
+ snps,mtl-rx-config = <&mtl_rx_setup>;
+ status = "okay";
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: eqos-ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_1_75_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_1_75_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ ti,min-output-impedance;
+ ti,dp83867-rxctrl-strap-quirk;
+ interrupt-parent = <&gpio4>;
+ interrupts = <21 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&gpio4 22 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ mtl_tx_setup: tx-queues-config {
+ snps,tx-queues-to-use = <5>;
+
+ queue0 {
+ snps,dcb-algorithm;
+ snps,priority = <0x1>;
+ };
+
+ queue1 {
+ snps,dcb-algorithm;
+ snps,priority = <0x2>;
+ };
+
+ queue2 {
+ snps,dcb-algorithm;
+ snps,priority = <0x4>;
+ };
+
+ queue3 {
+ snps,dcb-algorithm;
+ snps,priority = <0x8>;
+ };
+
+ queue4 {
+ snps,dcb-algorithm;
+ snps,priority = <0xf0>;
+ };
+ };
+
+ mtl_rx_setup: rx-queues-config {
+ snps,rx-queues-to-use = <5>;
+
+ queue0 {
+ snps,dcb-algorithm;
+ snps,priority = <0x1>;
+ snps,map-to-dma-channel = <0>;
+ };
+
+ queue1 {
+ snps,dcb-algorithm;
+ snps,priority = <0x2>;
+ snps,map-to-dma-channel = <1>;
+ };
+
+ queue2 {
+ snps,dcb-algorithm;
+ snps,priority = <0x4>;
+ snps,map-to-dma-channel = <2>;
+ };
+
+ queue3 {
+ snps,dcb-algorithm;
+ snps,priority = <0x8>;
+ snps,map-to-dma-channel = <3>;
+ };
+
+ queue4 {
+ snps,dcb-algorithm;
+ snps,priority = <0xf0>;
+ snps,map-to-dma-channel = <4>;
+ };
+ };
+};
+
+/* eth1 */
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec_rgmii>;
+ phy-handle = <&ethphy1>;
+ phy-mode = "rgmii-id";
+ fsl,magic-packet;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpio = <&gpio4 2 GPIO_ACTIVE_LOW>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_1_75_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_1_75_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ ti,min-output-impedance;
+ ti,dp83867-rxctrl-strap-quirk;
+ eee-broken-1000t;
+ };
+ };
+};
+
+&flexcan1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan1>;
+ xceiver-supply = <&reg_can1_stby>;
+ status = "disabled";
+};
+
+&flexcan2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan2>;
+ xceiver-supply = <&reg_can1_stby>;
+ status = "disabled";
+};
+
+&hdmi_blk_ctrl {
+ status = "okay";
+};
+
+&hdmi_pvi {
+ status = "okay";
+};
+
+&hdmi_tx {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hdmi>;
+ status = "okay";
+};
+
+&hdmi_tx_phy {
+ status = "okay";
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio5 14 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio5 15 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+
+ pmic: pmic@25 {
+ compatible = "nxp,pca9450c";
+ reg = <0x25>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pmic>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+
+ /*
+ * i.MX 8M Plus Data Sheet for Consumer Products
+ * 3.1.4 Operating ranges
+ * MIMX8ML8CVNKZAB
+ */
+ regulators {
+ buck1: BUCK1 { /* VDD_SOC (dual-phase with BUCK3) */
+ regulator-name = "buck1";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <2187500>;
+ regulator-ramp-delay = <3125>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ buck2: BUCK2 { /* VDD_ARM */
+ regulator-name = "buck2";
+ nxp,dvs-run-voltage = <950000>;
+ nxp,dvs-standby-voltage = <850000>;
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <2187500>;
+ regulator-ramp-delay = <3125>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ buck4: BUCK4 { /* VDD_3V3 */
+ regulator-name = "buck4";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ buck5: BUCK5 { /* VDD_1V8 */
+ regulator-name = "buck5";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ buck6: BUCK6 { /* NVCC_DRAM_1V1 */
+ regulator-name = "buck6";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ ldo1: LDO1 { /* NVCC_SNVS_1V8 */
+ regulator-name = "ldo1";
+ regulator-min-microvolt = <1600000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ ldo2: LDO2 { /* VDDA_1V8 */
+ regulator-name = "ldo2";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ ldo3: LDO3 { /* VDDA_1V8 */
+ regulator-name = "ldo3";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ ldo4: LDO4 { /* PMIC_LDO4 */
+ regulator-name = "ldo4";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ ldo5: LDO5 { /* NVCC_SD2 */
+ regulator-name = "ldo5";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ };
+ };
+};
+
+&i2c2 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ pinctrl-1 = <&pinctrl_i2c2_gpio>;
+ scl-gpios = <&gpio5 16 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio5 17 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ pinctrl-1 = <&pinctrl_i2c3_gpio>;
+ scl-gpios = <&gpio5 18 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio5 19 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&i2c5 {
+ #address-cells = <1>;
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c5>;
+ status = "okay";
+};
+
+&i2c6 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c6>;
+ pinctrl-1 = <&pinctrl_i2c6_gpio>;
+ scl-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio3 20 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+
+ /* TPM - ST33TPHF2XI2C U2301 */
+ tpm: tpm@2e {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tpm_irq>;
+ compatible = "st,st33ktpm2xi2c", "tcg,tpm-tis-i2c";
+ reg = <0x2e>;
+
+ label = "tpm";
+ interrupt-parent = <&gpio3>;
+ interrupts = <14 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
+ status = "okay";
+ };
+
+ /* SX1509(0) U2605 */
+ gpio6: pinctrl@3f {
+ compatible = "semtech,sx1509q";
+ reg = <0x3f>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ semtech,probe-reset;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&gpio1>;
+ interrupts = <12 IRQ_TYPE_EDGE_FALLING>;
+ };
+
+ /* RTC U2607 */
+ rtc0: rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ #clock-cells = <0>;
+ };
+
+ /* SX1509(1) U2606 */
+ gpio7: pinctrl@70 {
+ compatible = "semtech,sx1509q";
+ reg = <0x70>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ semtech,probe-reset;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&gpio4>;
+ interrupts = <19 IRQ_TYPE_EDGE_FALLING>;
+
+ gpio6-cfg {
+ pins = "gpio6";
+ output-high;
+ };
+
+ gpio7-cfg {
+ pins = "gpio7";
+ output-high;
+ };
+ };
+};
+
+&irqsteer_hdmi {
+ status = "okay";
+};
+
+&lcdif1 {
+ status = "disabled";
+};
+
+&lcdif2 {
+ status = "disabled";
+};
+
+/* HDMI */
+&lcdif3 {
+ status = "okay";
+
+};
+
+&lvds_bridge {
+ status = "disabled";
+};
+
+&mipi_dsi {
+ status = "disabled";
+};
+
+&pcie{
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie>;
+ reset-gpio = <&gpio4 20 GPIO_ACTIVE_LOW>;
+ fsl,tx-deemph-gen1 = <0x1f>;
+ fsl,max-link-speed = <3>;
+ status = "okay";
+};
+
+&pcie_phy{
+ fsl,refclk-pad-mode = <IMX8_PCIE_REFCLK_PAD_INPUT>;
+ clocks = <&pcie0_refclk>;
+ clock-names = "ref";
+ status = "okay";
+};
+
+&pwm1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm1>;
+ status = "okay";
+};
+
+&pwm2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm2>;
+ #pwm-cells = <3>;
+ status = "okay";
+};
+
+&snvs_pwrkey {
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+ status = "okay";
+};
+
+&usb3_phy0 {
+ status = "okay";
+};
+
+&usb3_0 {
+ status = "okay";
+};
+
+&usb_dwc3_0 {
+ adp-disable;
+ hnp-disable;
+ srp-disable;
+ dr_mode = "otg";
+ usb-role-switch;
+ role-switch-default-mode = "peripheral";
+ status = "okay";
+
+ port {
+ usb3_dwc: endpoint {
+ remote-endpoint = <&usb_dr_connector>;
+ };
+ };
+};
+
+&usb3_phy1 {
+ status = "okay";
+};
+
+&usb3_1 {
+ status = "okay";
+};
+
+&usb_dwc3_1 {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usdhc1 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+ bus-width = <4>;
+ non-removable;
+ status = "okay";
+};
+
+/* SD slot */
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
+ cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ bus-width = <4>;
+ status = "okay";
+};
+
+/* eMMC */
+&usdhc3 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ vmmc-supply = <&buck4>;
+ vqmmc-supply = <&buck5>;
+ bus-width = <8>;
+ non-removable;
+ status = "okay";
+};
+
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_ecspi1: aristainetos3-ecspi1-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_ECSPI1_SCLK__ECSPI1_SCLK 0x82
+ MX8MP_IOMUXC_ECSPI1_MOSI__ECSPI1_MOSI 0x82
+ MX8MP_IOMUXC_ECSPI1_MISO__ECSPI1_MISO 0x82
+ MX8MP_IOMUXC_ECSPI1_SS0__GPIO5_IO09 0x40000
+ >;
+ };
+
+ pinctrl_ecspi1_cs2: aristainetos3-ecspi1-cs2-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO06__GPIO1_IO06 0x40000
+ >;
+ };
+
+ pinctrl_ecspi2: aristainetos3-ecspi2-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_ECSPI2_SCLK__ECSPI2_SCLK 0x82
+ MX8MP_IOMUXC_ECSPI2_MOSI__ECSPI2_MOSI 0x82
+ MX8MP_IOMUXC_ECSPI2_MISO__ECSPI2_MISO 0x82
+ MX8MP_IOMUXC_ECSPI2_SS0__GPIO5_IO13 0x40000
+ >;
+ };
+
+ pinctrl_eqos_rgmii: aristainetos3-eqos-rgmii-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_ENET_MDC__ENET_QOS_MDC 0x3
+ MX8MP_IOMUXC_ENET_MDIO__ENET_QOS_MDIO 0x3
+ MX8MP_IOMUXC_ENET_RD0__ENET_QOS_RGMII_RD0 0x91
+ MX8MP_IOMUXC_ENET_RD1__ENET_QOS_RGMII_RD1 0x91
+ MX8MP_IOMUXC_ENET_RD2__ENET_QOS_RGMII_RD2 0x91
+ MX8MP_IOMUXC_ENET_RD3__ENET_QOS_RGMII_RD3 0x91
+ MX8MP_IOMUXC_ENET_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x91
+ MX8MP_IOMUXC_ENET_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x91
+ MX8MP_IOMUXC_ENET_TD0__ENET_QOS_RGMII_TD0 0x1f
+ MX8MP_IOMUXC_ENET_TD1__ENET_QOS_RGMII_TD1 0x1f
+ MX8MP_IOMUXC_ENET_TD2__ENET_QOS_RGMII_TD2 0x1f
+ MX8MP_IOMUXC_ENET_TD3__ENET_QOS_RGMII_TD3 0x1f
+ MX8MP_IOMUXC_ENET_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x1f
+ MX8MP_IOMUXC_ENET_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x1f
+ MX8MP_IOMUXC_SAI2_RXC__GPIO4_IO22 0x19
+ >;
+ };
+
+ pinctrl_fec_rgmii: aristainetos3-fec-rgmii-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI1_RXD2__ENET1_MDC 0x3
+ MX8MP_IOMUXC_SAI1_RXD3__ENET1_MDIO 0x3
+ MX8MP_IOMUXC_SAI1_RXD4__ENET1_RGMII_RD0 0x91
+ MX8MP_IOMUXC_SAI1_RXD5__ENET1_RGMII_RD1 0x91
+ MX8MP_IOMUXC_SAI1_RXD6__ENET1_RGMII_RD2 0x91
+ MX8MP_IOMUXC_SAI1_RXD7__ENET1_RGMII_RD3 0x91
+ MX8MP_IOMUXC_SAI1_TXC__ENET1_RGMII_RXC 0x91
+ MX8MP_IOMUXC_SAI1_TXFS__ENET1_RGMII_RX_CTL 0x91
+ MX8MP_IOMUXC_SAI1_TXD0__ENET1_RGMII_TD0 0x1f
+ MX8MP_IOMUXC_SAI1_TXD1__ENET1_RGMII_TD1 0x1f
+ MX8MP_IOMUXC_SAI1_TXD2__ENET1_RGMII_TD2 0x1f
+ MX8MP_IOMUXC_SAI1_TXD3__ENET1_RGMII_TD3 0x1f
+ MX8MP_IOMUXC_SAI1_TXD4__ENET1_RGMII_TX_CTL 0x1f
+ MX8MP_IOMUXC_SAI1_TXD5__ENET1_RGMII_TXC 0x1f
+ MX8MP_IOMUXC_SAI1_RXD0__GPIO4_IO02 0x19
+ >;
+ };
+
+ pinctrl_flexcan1: aristainetos3-flexcan1-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SPDIF_RX__CAN1_RX 0x154
+ MX8MP_IOMUXC_SPDIF_TX__CAN1_TX 0x154
+ >;
+ };
+
+ pinctrl_flexcan1_reg: aristainetos3-flexcan1-reg-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SPDIF_EXT_CLK__GPIO5_IO05 0x154
+ >;
+ };
+
+ pinctrl_flexcan2: aristainetos3-flexcan2-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI5_MCLK__CAN2_RX 0x154
+ MX8MP_IOMUXC_SAI5_RXD3__CAN2_TX 0x154
+ >;
+ };
+
+ pinctrl_flexcan2_reg: aristainetos3-flexcan2-reg-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI2_MCLK__GPIO4_IO27 0x154
+ >;
+ };
+
+ pinctrl_gpio3_hog: aristainetos3-gpio3-hog-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI5_RXD2__GPIO3_IO23 0xd6
+ >;
+ };
+
+ pinctrl_gpio_led: aristainetos3-gpio-led-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_READY_B__GPIO3_IO16 0x19
+ >;
+ };
+
+ pinctrl_gpio_proton2s: aristainetos3-gpio-proton2s-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO07__GPIO1_IO07 0x19
+ >;
+ };
+
+ pinctrl_hdmi: aristainetos3-hdmi-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_HDMI_DDC_SCL__HDMIMIX_HDMI_SCL 0x400001c3
+ MX8MP_IOMUXC_HDMI_DDC_SDA__HDMIMIX_HDMI_SDA 0x400001c3
+ MX8MP_IOMUXC_HDMI_HPD__HDMIMIX_HDMI_HPD 0x40000019
+ MX8MP_IOMUXC_HDMI_CEC__HDMIMIX_HDMI_CEC 0x40000019
+ >;
+ };
+
+ pinctrl_i2c1: aristainetos3-i2c1-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C1_SCL__I2C1_SCL 0x400001c3
+ MX8MP_IOMUXC_I2C1_SDA__I2C1_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c1_gpio: aristainetos3-i2c1-gpio-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C1_SCL__GPIO5_IO14 0x1c3
+ MX8MP_IOMUXC_I2C1_SDA__GPIO5_IO15 0x1c3
+ >;
+ };
+
+ pinctrl_i2c2: aristainetos3-i2c2-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL 0x400001c3
+ MX8MP_IOMUXC_I2C2_SDA__I2C2_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c2_gpio: aristainetos3-i2c2-gpio-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C2_SCL__GPIO5_IO16 0x1c3
+ MX8MP_IOMUXC_I2C2_SDA__GPIO5_IO17 0x1c3
+ >;
+ };
+
+ pinctrl_i2c3: aristainetos3-i2c3-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C3_SCL__I2C3_SCL 0x400001c3
+ MX8MP_IOMUXC_I2C3_SDA__I2C3_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c3_gpio: aristainetos3-i2c3-gpio-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C3_SCL__GPIO5_IO18 0x1c3
+ MX8MP_IOMUXC_I2C3_SDA__GPIO5_IO19 0x1c3
+ >;
+ };
+
+ pinctrl_i2c5: aristainetos3-i2c5-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI5_RXD0__I2C5_SCL 0x400001c3
+ MX8MP_IOMUXC_SAI5_MCLK__I2C5_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c6: aristainetos3-i2c6-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI5_RXFS__I2C6_SCL 0x400001c3
+ MX8MP_IOMUXC_SAI5_RXC__I2C6_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c6_gpio: aristainetos3-i2c6-gpio-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI5_RXFS__GPIO3_IO19 0x1c3
+ MX8MP_IOMUXC_SAI5_RXC__GPIO3_IO20 0x1c3
+ >;
+ };
+
+ pinctrl_lcd0_vcc_en: aristainetos3-lcd0-vcc-en-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO13__GPIO1_IO13 0xd6
+ >;
+ };
+
+ pinctrl_lvds_bklt_en: aristainetos3-lvds-bklt-en-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO10__GPIO1_IO10 0xd6
+ >;
+ };
+
+ pinctrl_pcie: aristainetos3-pcie-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C4_SCL__PCIE_CLKREQ_B 0x61
+ MX8MP_IOMUXC_SAI1_MCLK__GPIO4_IO20 0x41
+ >;
+ };
+
+ pinctrl_pmic: aristainetos3-pmic-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO03__GPIO1_IO03 0x41
+ >;
+ };
+
+ pinctrl_pwm1: aristainetos3-pwm1-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO01__PWM1_OUT 0x116
+ >;
+ };
+
+ pinctrl_pwm2: aristainetos3-pwm2-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO11__PWM2_OUT 0x116
+ >;
+ };
+
+ pinctrl_tpm_irq: aristainetos3-tpm-irq-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_DQS__GPIO3_IO14 0xd6
+ >;
+ };
+
+ pinctrl_uart1: aristainetos3-uart1-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_UART1_RXD__UART1_DCE_RX 0x140
+ MX8MP_IOMUXC_UART1_TXD__UART1_DCE_TX 0x140
+ >;
+ };
+
+ pinctrl_uart2: aristainetos3-uart2-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_UART2_RXD__UART2_DCE_RX 0x140
+ MX8MP_IOMUXC_UART2_TXD__UART2_DCE_TX 0x140
+ MX8MP_IOMUXC_SD1_DATA4__GPIO2_IO06 0x140
+ MX8MP_IOMUXC_SD1_DATA5__UART2_DCE_CTS 0x140
+ >;
+ };
+
+ pinctrl_uart3: aristainetos3-uart3-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_ALE__UART3_DCE_RX 0x140
+ MX8MP_IOMUXC_NAND_CE0_B__UART3_DCE_TX 0x140
+ >;
+ };
+
+ pinctrl_uart4: aristainetos3-uart4-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_UART4_RXD__UART4_DCE_RX 0x140
+ MX8MP_IOMUXC_UART4_TXD__UART4_DCE_TX 0x140
+ MX8MP_IOMUXC_NAND_DATA03__GPIO3_IO09 0x140
+ MX8MP_IOMUXC_NAND_DATA02__UART4_DCE_CTS 0x140
+ >;
+ };
+
+ pinctrl_usb1_vbus: aristainetos3-usb1-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO14__GPIO1_IO14 0x19
+ >;
+ };
+
+ pinctrl_usdhc1: aristainetos3-usdhc1-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD1_CLK__USDHC1_CLK 0x190
+ MX8MP_IOMUXC_SD1_CMD__USDHC1_CMD 0x1d0
+ MX8MP_IOMUXC_SD1_DATA0__USDHC1_DATA0 0x1d0
+ MX8MP_IOMUXC_SD1_DATA1__USDHC1_DATA1 0x1d0
+ MX8MP_IOMUXC_SD1_DATA2__USDHC1_DATA2 0x1d0
+ MX8MP_IOMUXC_SD1_DATA3__USDHC1_DATA3 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc1_100mhz: aristainetos3-usdhc1-100mhz-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD1_CLK__USDHC1_CLK 0x194
+ MX8MP_IOMUXC_SD1_CMD__USDHC1_CMD 0x1d4
+ MX8MP_IOMUXC_SD1_DATA0__USDHC1_DATA0 0x1d4
+ MX8MP_IOMUXC_SD1_DATA1__USDHC1_DATA1 0x1d4
+ MX8MP_IOMUXC_SD1_DATA2__USDHC1_DATA2 0x1d4
+ MX8MP_IOMUXC_SD1_DATA3__USDHC1_DATA3 0x1d4
+ >;
+ };
+
+ pinctrl_usdhc1_200mhz: aristainetos3-usdhc1-200mhz-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD1_CLK__USDHC1_CLK 0x196
+ MX8MP_IOMUXC_SD1_CMD__USDHC1_CMD 0x1d6
+ MX8MP_IOMUXC_SD1_DATA0__USDHC1_DATA0 0x1d6
+ MX8MP_IOMUXC_SD1_DATA1__USDHC1_DATA1 0x1d6
+ MX8MP_IOMUXC_SD1_DATA2__USDHC1_DATA2 0x1d6
+ MX8MP_IOMUXC_SD1_DATA3__USDHC1_DATA3 0x1d6
+ >;
+ };
+
+ pinctrl_usdhc2: aristainetos3-usdhc2-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x190
+ MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d0
+ MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d0
+ MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d0
+ MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d0
+ MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d0
+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1
+
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: aristainetos3-usdhc2-100mhz-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x194
+ MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d4
+ MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d4
+ MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d4
+ MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d4
+ MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4
+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: aristainetos3-usdhc2-200mhz-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x196
+ MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d6
+ MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d6
+ MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d6
+ MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d6
+ MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d6
+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1
+ >;
+ };
+
+ pinctrl_usdhc2_gpio: aristainetos3-usdhc2-gpio-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_CD_B__GPIO2_IO12 0x40000080
+ >;
+ };
+
+ pinctrl_usdhc2_vmmc: aristainetos3-usdhc2-vmmc-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19 0x41
+ >;
+ };
+
+ pinctrl_usdhc3: aristainetos3-usdhc3-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x190
+ MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x1d0
+ MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x1d0
+ MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x1d0
+ MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x1d0
+ MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x1d0
+ MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x1d0
+ MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x1d0
+ MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x1d0
+ MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x1d0
+ MX8MP_IOMUXC_NAND_CE1_B__USDHC3_STROBE 0x190
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: aristainetos3-usdhc3-100mhz-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x194
+ MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x1d4
+ MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x1d4
+ MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x1d4
+ MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x1d4
+ MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x1d4
+ MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x1d4
+ MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x1d4
+ MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x1d4
+ MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x1d4
+ MX8MP_IOMUXC_NAND_CE1_B__USDHC3_STROBE 0x194
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: aristainetos3-usdhc3-200mhz-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x196
+ MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x1d6
+ MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x1d6
+ MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x1d6
+ MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x1d6
+ MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x1d6
+ MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x1d6
+ MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x1d6
+ MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x1d6
+ MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x1d6
+ MX8MP_IOMUXC_NAND_CE1_B__USDHC3_STROBE 0x196
+ >;
+ };
+
+ pinctrl_watchdog_gpio: aristainetos3-wdog-gpio-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO06__GPIO1_IO06 0x19
+ >;
+ };
+
+ pinctrl_wdog: aristainetos3-wdog-grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO02__WDOG1_WDOG_B 0xc6
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk-imx-lvds-hdmi-common.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-evk-imx-lvds-hdmi-common.dtsi
new file mode 100644
index 000000000000..44b30e9b3fde
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-evk-imx-lvds-hdmi-common.dtsi
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2024 NXP
+ */
+
+/dts-v1/;
+/plugin/;
+
+&{/} {
+ lvds-hdmi-connector {
+ compatible = "hdmi-connector";
+ label = "J2";
+ type = "a";
+
+ port {
+ lvds2hdmi_connector_in: endpoint {
+ remote-endpoint = <&it6263_out>;
+ };
+ };
+ };
+};
+
+&lcdif2 {
+ status = "okay";
+};
+
+&lvds_bridge {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk-lvds0-imx-dlvds-hdmi-channel0.dtso b/arch/arm64/boot/dts/freescale/imx8mp-evk-lvds0-imx-dlvds-hdmi-channel0.dtso
new file mode 100644
index 000000000000..4008d2fd36d6
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-evk-lvds0-imx-dlvds-hdmi-channel0.dtso
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2024 NXP
+ */
+
+#include "imx8mp-evk-lvds0-imx-lvds-hdmi-common.dtsi"
+
+&it6263 {
+ ports {
+ port@0 {
+ reg = <0>;
+ dual-lvds-odd-pixels;
+
+ it6263_lvds_link1: endpoint {
+ remote-endpoint = <&ldb_lvds_ch0>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dual-lvds-even-pixels;
+
+ it6263_lvds_link2: endpoint {
+ remote-endpoint = <&ldb_lvds_ch1>;
+ };
+ };
+ };
+};
+
+&lvds_bridge {
+ ports {
+ port@1 {
+ ldb_lvds_ch0: endpoint {
+ remote-endpoint = <&it6263_lvds_link1>;
+ };
+ };
+
+ port@2 {
+ ldb_lvds_ch1: endpoint {
+ remote-endpoint = <&it6263_lvds_link2>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk-lvds0-imx-lvds-hdmi-common.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-evk-lvds0-imx-lvds-hdmi-common.dtsi
new file mode 100644
index 000000000000..6eae7477abf8
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-evk-lvds0-imx-lvds-hdmi-common.dtsi
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include "imx8mp-evk-imx-lvds-hdmi-common.dtsi"
+
+&i2c2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ it6263: hdmi@4c {
+ compatible = "ite,it6263";
+ reg = <0x4c>;
+ data-mapping = "jeida-24";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lvds_en>;
+ reset-gpios = <&gpio1 10 GPIO_ACTIVE_LOW>;
+ ivdd-supply = <&reg_buck5>;
+ ovdd-supply = <&reg_vext_3v3>;
+ txavcc18-supply = <&reg_buck5>;
+ txavcc33-supply = <&reg_vext_3v3>;
+ pvcc1-supply = <&reg_buck5>;
+ pvcc2-supply = <&reg_buck5>;
+ avcc-supply = <&reg_vext_3v3>;
+ anvdd-supply = <&reg_buck5>;
+ apvdd-supply = <&reg_buck5>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@2 {
+ reg = <2>;
+
+ it6263_out: endpoint {
+ remote-endpoint = <&lvds2hdmi_connector_in>;
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk-lvds0-imx-lvds-hdmi.dtso b/arch/arm64/boot/dts/freescale/imx8mp-evk-lvds0-imx-lvds-hdmi.dtso
new file mode 100644
index 000000000000..9e11f261ad13
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-evk-lvds0-imx-lvds-hdmi.dtso
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2024 NXP
+ */
+
+#include "imx8mp-evk-lvds0-imx-lvds-hdmi-common.dtsi"
+
+&it6263 {
+ ports {
+ port@0 {
+ reg = <0>;
+
+ it6263_lvds_link1: endpoint {
+ remote-endpoint = <&ldb_lvds_ch0>;
+ };
+ };
+ };
+};
+
+&lvds_bridge {
+ ports {
+ port@1 {
+ ldb_lvds_ch0: endpoint {
+ remote-endpoint = <&it6263_lvds_link1>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk-lvds1-imx-dlvds-hdmi-channel0.dtso b/arch/arm64/boot/dts/freescale/imx8mp-evk-lvds1-imx-dlvds-hdmi-channel0.dtso
new file mode 100644
index 000000000000..af2e73e36a1b
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-evk-lvds1-imx-dlvds-hdmi-channel0.dtso
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2024 NXP
+ */
+
+#include "imx8mp-evk-lvds1-imx-lvds-hdmi-common.dtsi"
+
+&it6263 {
+ ports {
+ port@0 {
+ reg = <0>;
+ dual-lvds-even-pixels;
+
+ it6263_lvds_link1: endpoint {
+ remote-endpoint = <&ldb_lvds_ch1>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dual-lvds-odd-pixels;
+
+ it6263_lvds_link2: endpoint {
+ remote-endpoint = <&ldb_lvds_ch0>;
+ };
+ };
+ };
+};
+
+&lvds_bridge {
+ ports {
+ port@1 {
+ ldb_lvds_ch0: endpoint {
+ remote-endpoint = <&it6263_lvds_link2>;
+ };
+ };
+
+ port@2 {
+ ldb_lvds_ch1: endpoint {
+ remote-endpoint = <&it6263_lvds_link1>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk-lvds1-imx-lvds-hdmi-common.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-evk-lvds1-imx-lvds-hdmi-common.dtsi
new file mode 100644
index 000000000000..8cc9d361c2a4
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-evk-lvds1-imx-lvds-hdmi-common.dtsi
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include "imx8mp-evk-imx-lvds-hdmi-common.dtsi"
+
+&i2c3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ it6263: hdmi@4c {
+ compatible = "ite,it6263";
+ reg = <0x4c>;
+ data-mapping = "jeida-24";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lvds_en>;
+ reset-gpios = <&gpio1 10 GPIO_ACTIVE_LOW>;
+ ivdd-supply = <&reg_buck5>;
+ ovdd-supply = <&reg_vext_3v3>;
+ txavcc18-supply = <&reg_buck5>;
+ txavcc33-supply = <&reg_vext_3v3>;
+ pvcc1-supply = <&reg_buck5>;
+ pvcc2-supply = <&reg_buck5>;
+ avcc-supply = <&reg_vext_3v3>;
+ anvdd-supply = <&reg_buck5>;
+ apvdd-supply = <&reg_buck5>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@2 {
+ reg = <2>;
+
+ it6263_out: endpoint {
+ remote-endpoint = <&lvds2hdmi_connector_in>;
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk-lvds1-imx-lvds-hdmi.dtso b/arch/arm64/boot/dts/freescale/imx8mp-evk-lvds1-imx-lvds-hdmi.dtso
new file mode 100644
index 000000000000..527a893a71b2
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-evk-lvds1-imx-lvds-hdmi.dtso
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2024 NXP
+ */
+
+#include "imx8mp-evk-lvds1-imx-lvds-hdmi-common.dtsi"
+
+&it6263 {
+ ports {
+ port@0 {
+ reg = <0>;
+
+ it6263_lvds_link1: endpoint {
+ remote-endpoint = <&ldb_lvds_ch1>;
+ };
+ };
+ };
+};
+
+&lvds_bridge {
+ ports {
+ port@2 {
+ ldb_lvds_ch1: endpoint {
+ remote-endpoint = <&it6263_lvds_link1>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
index d26930f1a9e9..68e12a752edd 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
@@ -938,6 +938,12 @@
>;
};
+ pinctrl_lvds_en: lvdsengrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO10__GPIO1_IO10 0x1c0
+ >;
+ };
+
pinctrl_pcie0: pcie0grp {
fsl,pins = <
MX8MP_IOMUXC_I2C4_SCL__PCIE_CLKREQ_B 0x60 /* open drain, pull up */
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-skov-revb-mi1010ait-1cp1.dts b/arch/arm64/boot/dts/freescale/imx8mp-skov-revb-mi1010ait-1cp1.dts
index 30962922b361..2c75da5f064f 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-skov-revb-mi1010ait-1cp1.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-skov-revb-mi1010ait-1cp1.dts
@@ -52,7 +52,7 @@
&lvds_bridge {
/* IMX8MP_CLK_MEDIA_LDB = IMX8MP_CLK_MEDIA_DISP2_PIX * 7 */
- assigned-clock-rates = <482300000>;
+ assigned-clock-rates = <490000000>;
status = "okay";
ports {
@@ -70,10 +70,10 @@
*/
assigned-clock-rates = <500000000>, <200000000>, <0>,
/* IMX8MP_CLK_MEDIA_DISP2_PIX = pixelclk of lvds panel */
- <68900000>,
+ <70000000>,
<500000000>,
- /* IMX8MP_VIDEO_PLL1 = IMX8MP_CLK_MEDIA_LDB * 2 */
- <964600000>;
+ /* IMX8MP_VIDEO_PLL1 = IMX8MP_CLK_MEDIA_LDB */
+ <490000000>;
};
&pwm4 {
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
index b268ba7a0e12..9d8e7231b7c6 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
@@ -172,7 +172,7 @@
"Headphones", "HP_OUT",
"Builtin Speaker", "Speaker Amp OUTR",
"Speaker Amp INR", "LINE_OUT";
- simple-audio-card,hp-det-gpio = <&gpio3 20 GPIO_ACTIVE_HIGH>;
+ simple-audio-card,hp-det-gpios = <&gpio3 20 GPIO_ACTIVE_HIGH>;
simple-audio-card,cpu {
sound-dai = <&sai2>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
index 1b39514d5c12..bb37a32ce461 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
@@ -241,7 +241,7 @@
"Headset Mic", "MICBIAS",
"IN3R", "Headset Mic",
"DMICDAT", "Digital Mic";
- simple-audio-card,hp-det-gpio = <&gpio3 9 GPIO_ACTIVE_HIGH>;
+ simple-audio-card,hp-det-gpios = <&gpio3 9 GPIO_ACTIVE_HIGH>;
simple-audio-card,cpu {
sound-dai = <&sai2>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
index 0c960efd9b3d..c7bbba45f368 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
@@ -517,8 +517,6 @@
eeprom@a4 {
compatible = "zii,rave-sp-eeprom";
reg = <0xa4 0x4000>;
- #address-cells = <1>;
- #size-cells = <1>;
zii,eeprom-name = "main-eeprom";
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
index 0e12dcd0d4d1..8491eb53120e 100644
--- a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
@@ -595,6 +595,9 @@
};
&wdog3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
status = "okay";
};
@@ -932,4 +935,9 @@
>;
};
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX93_PAD_WDOG_ANY__WDOG1_WDOG_ANY 0x31e
+ >;
+ };
};
diff --git a/arch/arm64/boot/dts/freescale/imx93-14x14-evk.dts b/arch/arm64/boot/dts/freescale/imx93-14x14-evk.dts
index 236a44c1782a..f556b6569a68 100644
--- a/arch/arm64/boot/dts/freescale/imx93-14x14-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx93-14x14-evk.dts
@@ -219,6 +219,89 @@
interrupt-parent = <&gpio3>;
interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
};
+
+ pmic@25 {
+ compatible = "nxp,pca9452";
+ reg = <0x25>;
+ interrupt-parent = <&pcal6524>;
+ interrupts = <11 IRQ_TYPE_EDGE_FALLING>;
+
+ regulators {
+ buck1: BUCK1 {
+ regulator-name = "BUCK1";
+ regulator-min-microvolt = <610000>;
+ regulator-max-microvolt = <950000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <3125>;
+ };
+
+ buck2: BUCK2 {
+ regulator-name = "BUCK2";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <670000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <3125>;
+ };
+
+ buck4: BUCK4{
+ regulator-name = "BUCK4";
+ regulator-min-microvolt = <1620000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck5: BUCK5{
+ regulator-name = "BUCK5";
+ regulator-min-microvolt = <1620000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck6: BUCK6 {
+ regulator-name = "BUCK6";
+ regulator-min-microvolt = <1060000>;
+ regulator-max-microvolt = <1140000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo1: LDO1 {
+ regulator-name = "LDO1";
+ regulator-min-microvolt = <1620000>;
+ regulator-max-microvolt = <1980000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo3: LDO3 {
+ regulator-name = "LDO3";
+ regulator-min-microvolt = <1710000>;
+ regulator-max-microvolt = <1890000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo4: LDO4 {
+ regulator-name = "LDO4";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <840000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo5: LDO5 {
+ regulator-name = "LDO5";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
};
&lpi2c3 {
@@ -284,6 +367,9 @@
};
&wdog3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
status = "okay";
};
@@ -465,4 +551,10 @@
MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
>;
};
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX93_PAD_WDOG_ANY__WDOG1_WDOG_ANY 0x31e
+ >;
+ };
};
diff --git a/arch/arm64/boot/dts/freescale/imx93-9x9-qsb.dts b/arch/arm64/boot/dts/freescale/imx93-9x9-qsb.dts
index 20ec5b3c21f4..75e67115d52f 100644
--- a/arch/arm64/boot/dts/freescale/imx93-9x9-qsb.dts
+++ b/arch/arm64/boot/dts/freescale/imx93-9x9-qsb.dts
@@ -221,6 +221,11 @@
>;
};
+ p3t1085: temperature-sensor@48 {
+ compatible = "nxp,p3t1085";
+ reg = <0x48>;
+ };
+
ptn5110: tcpc@50 {
compatible = "nxp,ptn5110", "tcpci";
reg = <0x50>;
@@ -454,6 +459,9 @@
};
&wdog3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
status = "okay";
};
@@ -641,4 +649,10 @@
MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
>;
};
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX93_PAD_WDOG_ANY__WDOG1_WDOG_ANY 0x31e
+ >;
+ };
};
diff --git a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts
index 599df32976e2..8e939d716aac 100644
--- a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts
+++ b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts
@@ -627,8 +627,8 @@
fsl,pins = <
/* PD | FSEL_2 | DSE X4 */
MX93_PAD_ENET1_MDC__ENET_QOS_MDC 0x51e
- /* SION | HYS | FSEL_2 | DSE X4 */
- MX93_PAD_ENET1_MDIO__ENET_QOS_MDIO 0x4000111e
+ /* SION | HYS | ODE | FSEL_2 | DSE X4 */
+ MX93_PAD_ENET1_MDIO__ENET_QOS_MDIO 0x4000191e
/* HYS | FSEL_0 | DSE no drive */
MX93_PAD_ENET1_RD0__ENET_QOS_RGMII_RD0 0x1000
MX93_PAD_ENET1_RD1__ENET_QOS_RGMII_RD1 0x1000
@@ -659,8 +659,8 @@
fsl,pins = <
/* PD | FSEL_2 | DSE X4 */
MX93_PAD_ENET2_MDC__ENET1_MDC 0x51e
- /* SION | HYS | FSEL_2 | DSE X4 */
- MX93_PAD_ENET2_MDIO__ENET1_MDIO 0x4000111e
+ /* SION | HYS | ODE | FSEL_2 | DSE X4 */
+ MX93_PAD_ENET2_MDIO__ENET1_MDIO 0x4000191e
/* HYS | FSEL_0 | DSE no drive */
MX93_PAD_ENET2_RD0__ENET1_RGMII_RD0 0x1000
MX93_PAD_ENET2_RD1__ENET1_RGMII_RD1 0x1000
diff --git a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts
index 0b4b3bb866d0..2e953a05c590 100644
--- a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts
+++ b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts
@@ -597,8 +597,8 @@
fsl,pins = <
/* PD | FSEL_2 | DSE X4 */
MX93_PAD_ENET1_MDC__ENET_QOS_MDC 0x51e
- /* SION | HYS | FSEL_2 | DSE X4 */
- MX93_PAD_ENET1_MDIO__ENET_QOS_MDIO 0x4000111e
+ /* SION | HYS | ODE | FSEL_2 | DSE X4 */
+ MX93_PAD_ENET1_MDIO__ENET_QOS_MDIO 0x4000191e
/* HYS | FSEL_0 | DSE no drive */
MX93_PAD_ENET1_RD0__ENET_QOS_RGMII_RD0 0x1000
MX93_PAD_ENET1_RD1__ENET_QOS_RGMII_RD1 0x1000
@@ -629,8 +629,8 @@
fsl,pins = <
/* PD | FSEL_2 | DSE X4 */
MX93_PAD_ENET2_MDC__ENET1_MDC 0x51e
- /* SION | HYS | FSEL_2 | DSE X4 */
- MX93_PAD_ENET2_MDIO__ENET1_MDIO 0x4000111e
+ /* SION | HYS | ODE | FSEL_2 | DSE X4 */
+ MX93_PAD_ENET2_MDIO__ENET1_MDIO 0x4000191e
/* HYS | FSEL_0 | DSE no drive */
MX93_PAD_ENET2_RD0__ENET1_RGMII_RD0 0x1000
MX93_PAD_ENET2_RD1__ENET1_RGMII_RD1 0x1000
diff --git a/arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts b/arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts
index 6086cb7fa5a0..8bc066c3760c 100644
--- a/arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts
@@ -22,6 +22,7 @@
compatible = "fsl,imx95-19x19-evk", "fsl,imx95";
aliases {
+ ethernet0 = &enetc_port0;
gpio0 = &gpio1;
gpio1 = &gpio2;
gpio2 = &gpio3;
@@ -193,6 +194,14 @@
};
};
+&enetc_port0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enetc0>;
+ phy-handle = <&ethphy0>;
+ phy-mode = "rgmii-id";
+ status = "okay";
+};
+
&flexspi1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_flexspi1>;
@@ -338,6 +347,25 @@
status = "okay";
};
+&netcmix_blk_ctrl {
+ status = "okay";
+};
+
+&netc_blk_ctrl {
+ status = "okay";
+};
+
+&netc_emdio {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_emdio>;
+ status = "okay";
+
+ ethphy0: ethernet-phy@1 {
+ reg = <1>;
+ realtek,clkout-disable;
+ };
+};
+
&pcie0 {
pinctrl-0 = <&pinctrl_pcie0>;
pinctrl-names = "default";
@@ -429,6 +457,30 @@
};
&scmi_iomuxc {
+ pinctrl_emdio: emdiogrp{
+ fsl,pins = <
+ IMX95_PAD_ENET1_MDC__NETCMIX_TOP_NETC_MDC 0x57e
+ IMX95_PAD_ENET1_MDIO__NETCMIX_TOP_NETC_MDIO 0x97e
+ >;
+ };
+
+ pinctrl_enetc0: enetc0grp {
+ fsl,pins = <
+ IMX95_PAD_ENET1_TD3__NETCMIX_TOP_ETH0_RGMII_TD3 0x57e
+ IMX95_PAD_ENET1_TD2__NETCMIX_TOP_ETH0_RGMII_TD2 0x57e
+ IMX95_PAD_ENET1_TD1__NETCMIX_TOP_ETH0_RGMII_TD1 0x57e
+ IMX95_PAD_ENET1_TD0__NETCMIX_TOP_ETH0_RGMII_TD0 0x57e
+ IMX95_PAD_ENET1_TX_CTL__NETCMIX_TOP_ETH0_RGMII_TX_CTL 0x57e
+ IMX95_PAD_ENET1_TXC__NETCMIX_TOP_ETH0_RGMII_TX_CLK 0x58e
+ IMX95_PAD_ENET1_RX_CTL__NETCMIX_TOP_ETH0_RGMII_RX_CTL 0x57e
+ IMX95_PAD_ENET1_RXC__NETCMIX_TOP_ETH0_RGMII_RX_CLK 0x58e
+ IMX95_PAD_ENET1_RD0__NETCMIX_TOP_ETH0_RGMII_RD0 0x57e
+ IMX95_PAD_ENET1_RD1__NETCMIX_TOP_ETH0_RGMII_RD1 0x57e
+ IMX95_PAD_ENET1_RD2__NETCMIX_TOP_ETH0_RGMII_RD2 0x57e
+ IMX95_PAD_ENET1_RD3__NETCMIX_TOP_ETH0_RGMII_RD3 0x57e
+ >;
+ };
+
pinctrl_flexspi1: flexspi1grp {
fsl,pins = <
IMX95_PAD_XSPI1_SS0_B__FLEXSPI1_A_SS0_B 0x3fe
diff --git a/arch/arm64/boot/dts/freescale/imx95.dtsi b/arch/arm64/boot/dts/freescale/imx95.dtsi
index e9c7a8265d71..6b8470cb3461 100644
--- a/arch/arm64/boot/dts/freescale/imx95.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx95.dtsi
@@ -1697,6 +1697,99 @@
status = "disabled";
};
+ netc_blk_ctrl: system-controller@4cde0000 {
+ compatible = "nxp,imx95-netc-blk-ctrl";
+ reg = <0x0 0x4cde0000 0x0 0x10000>,
+ <0x0 0x4cdf0000 0x0 0x10000>,
+ <0x0 0x4c81000c 0x0 0x18>;
+ reg-names = "ierb", "prb", "netcmix";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ power-domains = <&scmi_devpd IMX95_PD_NETC>;
+ assigned-clocks = <&scmi_clk IMX95_CLK_ENET>,
+ <&scmi_clk IMX95_CLK_ENETREF>;
+ assigned-clock-parents = <&scmi_clk IMX95_CLK_SYSPLL1_PFD2>,
+ <&scmi_clk IMX95_CLK_SYSPLL1_PFD0>;
+ assigned-clock-rates = <666666666>, <250000000>;
+ clocks = <&scmi_clk IMX95_CLK_ENET>;
+ clock-names = "ipg";
+ status = "disabled";
+
+ netc_bus0: pcie@4ca00000 {
+ compatible = "pci-host-ecam-generic";
+ reg = <0x0 0x4ca00000 0x0 0x100000>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ bus-range = <0x0 0x0>;
+ msi-map = <0x0 &its 0x60 0x1>, //ENETC0 PF
+ <0x10 &its 0x61 0x1>, //ENETC0 VF0
+ <0x20 &its 0x62 0x1>, //ENETC0 VF1
+ <0x40 &its 0x63 0x1>, //ENETC1 PF
+ <0x80 &its 0x64 0x1>, //ENETC2 PF
+ <0x90 &its 0x65 0x1>, //ENETC2 VF0
+ <0xa0 &its 0x66 0x1>, //ENETC2 VF1
+ <0xc0 &its 0x67 0x1>; //NETC Timer
+ /* ENETC0~2 and Timer BAR0 - non-prefetchable memory */
+ ranges = <0x82000000 0x0 0x4cc00000 0x0 0x4cc00000 0x0 0xe0000
+ /* Timer BAR2 - prefetchable memory */
+ 0xc2000000 0x0 0x4cd00000 0x0 0x4cd00000 0x0 0x10000
+ /* ENETC0~2: VF0-1 BAR0 - non-prefetchable memory */
+ 0x82000000 0x0 0x4cd20000 0x0 0x4cd20000 0x0 0x60000
+ /* ENETC0~2: VF0-1 BAR2 - prefetchable memory */
+ 0xc2000000 0x0 0x4cd80000 0x0 0x4cd80000 0x0 0x60000>;
+
+ enetc_port0: ethernet@0,0 {
+ compatible = "pci1131,e101";
+ reg = <0x000000 0 0 0 0>;
+ clocks = <&scmi_clk IMX95_CLK_ENETREF>;
+ clock-names = "ref";
+ status = "disabled";
+ };
+
+ enetc_port1: ethernet@8,0 {
+ compatible = "pci1131,e101";
+ reg = <0x004000 0 0 0 0>;
+ clocks = <&scmi_clk IMX95_CLK_ENETREF>;
+ clock-names = "ref";
+ status = "disabled";
+ };
+
+ enetc_port2: ethernet@10,0 {
+ compatible = "pci1131,e101";
+ reg = <0x008000 0 0 0 0>;
+ status = "disabled";
+ };
+
+ netc_timer: ethernet@18,0 {
+ reg = <0x00c000 0 0 0 0>;
+ status = "disabled";
+ };
+ };
+
+ netc_bus1: pcie@4cb00000 {
+ compatible = "pci-host-ecam-generic";
+ reg = <0x0 0x4cb00000 0x0 0x100000>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ bus-range = <0x1 0x1>;
+ /* EMDIO BAR0 - non-prefetchable memory */
+ ranges = <0x82000000 0x0 0x4cce0000 0x0 0x4cce0000 0x0 0x20000
+ /* EMDIO BAR2 - prefetchable memory */
+ 0xc2000000 0x0 0x4cd10000 0x0 0x4cd10000 0x0 0x10000>;
+
+ netc_emdio: mdio@0,0 {
+ compatible = "pci1131,ee00";
+ reg = <0x010000 0 0 0 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+ };
+ };
+
ddr-pmu@4e090dc0 {
compatible = "fsl,imx95-ddr-pmu", "fsl,imx93-ddr-pmu";
reg = <0x0 0x4e090dc0 0x0 0x200>;
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
index a589954c29e2..f8b56d443850 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
@@ -727,8 +727,6 @@
compatible = "arm,pl022", "arm,primecell";
reg = <0x0 0xf7106000 0x0 0x1000>;
interrupts = <0 50 4>;
- bus-id = <0>;
- enable-dma = <0>;
clocks = <&sys_ctrl HI6220_SPI_CLK>, <&sys_ctrl HI6220_SPI_CLK>;
clock-names = "sspclk", "apb_pclk";
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
index 2a5eeb21da47..1235ba5a9865 100644
--- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
+++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
@@ -101,10 +101,13 @@
compatible = "arm,gic-400", "arm,cortex-a15-gic";
#interrupt-cells = <3>;
interrupt-controller;
+ interrupt-parent = <&intc>;
reg = <0x0 0xfffc1000 0x0 0x1000>,
<0x0 0xfffc2000 0x0 0x2000>,
<0x0 0xfffc4000 0x0 0x2000>,
<0x0 0xfffc6000 0x0 0x2000>;
+ /* VGIC maintenance interrupt */
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
};
clocks {
diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex5.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex5.dtsi
index 1162978329c1..51c6e19e40b8 100644
--- a/arch/arm64/boot/dts/intel/socfpga_agilex5.dtsi
+++ b/arch/arm64/boot/dts/intel/socfpga_agilex5.dtsi
@@ -222,6 +222,26 @@
status = "disabled";
};
+ gpio0: gpio@ffc03200 {
+ compatible = "snps,dw-apb-gpio";
+ reg = <0xffc03200 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ resets = <&rst GPIO0_RESET>;
+ status = "disabled";
+
+ porta: gpio-controller@0 {
+ compatible = "snps,dw-apb-gpio-port";
+ reg = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <24>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
gpio1: gpio@10c03300 {
compatible = "snps,dw-apb-gpio";
reg = <0x10c03300 0x100>;
@@ -314,7 +334,7 @@
reg-io-width = <4>;
num-cs = <4>;
clocks = <&clkmgr AGILEX5_L4_MAIN_CLK>;
- dmas = <&dmac0 2>, <&dmac0 3>;
+ dmas = <&dmac0 16>, <&dmac0 17>;
dma-names = "tx", "rx";
status = "disabled";
@@ -331,6 +351,8 @@
reg-io-width = <4>;
num-cs = <4>;
clocks = <&clkmgr AGILEX5_L4_MAIN_CLK>;
+ dmas = <&dmac0 20>, <&dmac0 21>;
+ dma-names = "tx", "rx";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/marvell/armada-7040-db.dts b/arch/arm64/boot/dts/marvell/armada-7040-db.dts
index 1e0ab35cc686..2b5e45d2c5a6 100644
--- a/arch/arm64/boot/dts/marvell/armada-7040-db.dts
+++ b/arch/arm64/boot/dts/marvell/armada-7040-db.dts
@@ -214,6 +214,7 @@
sata-port@1 {
phys = <&cp0_comphy3 1>;
+ status = "okay";
};
};
diff --git a/arch/arm64/boot/dts/marvell/armada-7040-mochabin.dts b/arch/arm64/boot/dts/marvell/armada-7040-mochabin.dts
index 7af949092b91..6bdc4f1e6939 100644
--- a/arch/arm64/boot/dts/marvell/armada-7040-mochabin.dts
+++ b/arch/arm64/boot/dts/marvell/armada-7040-mochabin.dts
@@ -433,11 +433,13 @@
/* 7 + 12 SATA connector (J24) */
sata-port@0 {
phys = <&cp0_comphy2 0>;
+ status = "okay";
};
/* M.2-2250 B-key (J39) */
sata-port@1 {
phys = <&cp0_comphy3 1>;
+ status = "okay";
};
};
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
index 7005a32a6e1e..225a54ab688d 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
+++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
@@ -475,6 +475,7 @@
sata-port@1 {
phys = <&cp1_comphy0 1>;
+ status = "okay";
};
};
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-db.dts b/arch/arm64/boot/dts/marvell/armada-8040-db.dts
index 2ec19d364e62..9d45e881a97d 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-db.dts
+++ b/arch/arm64/boot/dts/marvell/armada-8040-db.dts
@@ -145,9 +145,12 @@
sata-port@0 {
phys = <&cp0_comphy1 0>;
+ status = "okay";
};
+
sata-port@1 {
phys = <&cp0_comphy3 1>;
+ status = "okay";
};
};
@@ -304,11 +307,9 @@
sata-port@0 {
phys = <&cp1_comphy1 0>;
- phy-names = "cp1-sata0-0-phy";
};
sata-port@1 {
phys = <&cp1_comphy3 1>;
- phy-names = "cp1-sata0-1-phy";
};
};
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi
index e88ff5b179c8..0d4a5fd9503f 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi
@@ -245,6 +245,7 @@
/* CPM Lane 5 - U29 */
sata-port@1 {
phys = <&cp0_comphy5 1>;
+ status = "okay";
};
};
@@ -344,13 +345,11 @@
/* CPS Lane 1 - U32 */
sata-port@0 {
phys = <&cp1_comphy1 0>;
- phy-names = "cp1-sata0-0-phy";
};
/* CPS Lane 3 - U31 */
sata-port@1 {
phys = <&cp1_comphy3 1>;
- phy-names = "cp1-sata0-1-phy";
};
};
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-puzzle-m801.dts b/arch/arm64/boot/dts/marvell/armada-8040-puzzle-m801.dts
index 3e5e0651ce68..9c25a88581e4 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-puzzle-m801.dts
+++ b/arch/arm64/boot/dts/marvell/armada-8040-puzzle-m801.dts
@@ -408,10 +408,12 @@
sata-port@0 {
phys = <&cp0_comphy2 0>;
+ status = "okay";
};
sata-port@1 {
phys = <&cp0_comphy5 1>;
+ status = "okay";
};
};
diff --git a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
index 7e595ac80043..161beec0b6b0 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
@@ -347,10 +347,12 @@
sata-port@0 {
reg = <0>;
+ status = "disabled";
};
sata-port@1 {
reg = <1>;
+ status = "disabled";
};
};
diff --git a/arch/arm64/boot/dts/marvell/cn9130-crb-B.dts b/arch/arm64/boot/dts/marvell/cn9130-crb-B.dts
index 0904cb0309ae..34194745f79e 100644
--- a/arch/arm64/boot/dts/marvell/cn9130-crb-B.dts
+++ b/arch/arm64/boot/dts/marvell/cn9130-crb-B.dts
@@ -28,6 +28,7 @@
status = "okay";
/* Generic PHY, providing serdes lanes */
phys = <&cp0_comphy2 0>;
+ status = "okay";
};
};
diff --git a/arch/arm64/boot/dts/marvell/cn9131-cf-solidwan.dts b/arch/arm64/boot/dts/marvell/cn9131-cf-solidwan.dts
index b1ea7dcaed17..47234d0858dd 100644
--- a/arch/arm64/boot/dts/marvell/cn9131-cf-solidwan.dts
+++ b/arch/arm64/boot/dts/marvell/cn9131-cf-solidwan.dts
@@ -435,7 +435,7 @@
managed = "in-band-status";
phy-mode = "sgmii";
phy = <&cp1_phy0>;
- phys = <&cp0_comphy3 1>;
+ phys = <&cp1_comphy3 1>;
status = "okay";
};
@@ -444,7 +444,7 @@
managed = "in-band-status";
phy-mode = "sgmii";
phy = <&cp1_phy1>;
- phys = <&cp0_comphy5 2>;
+ phys = <&cp1_comphy5 2>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/marvell/cn9131-db.dtsi b/arch/arm64/boot/dts/marvell/cn9131-db.dtsi
index ad7360c83048..626042fce7e2 100644
--- a/arch/arm64/boot/dts/marvell/cn9131-db.dtsi
+++ b/arch/arm64/boot/dts/marvell/cn9131-db.dtsi
@@ -127,6 +127,7 @@
sata-port@1 {
/* Generic PHY, providing serdes lanes */
phys = <&cp1_comphy5 1>;
+ status = "okay";
};
};
diff --git a/arch/arm64/boot/dts/marvell/cn9132-db.dtsi b/arch/arm64/boot/dts/marvell/cn9132-db.dtsi
index e753cfdac697..f91fc69905b8 100644
--- a/arch/arm64/boot/dts/marvell/cn9132-db.dtsi
+++ b/arch/arm64/boot/dts/marvell/cn9132-db.dtsi
@@ -175,6 +175,7 @@
sata-port@0 {
/* Generic PHY, providing serdes lanes */
phys = <&cp2_comphy2 0>;
+ status = "okay";
};
};
diff --git a/arch/arm64/boot/dts/mediatek/Makefile b/arch/arm64/boot/dts/mediatek/Makefile
index 8fd7b2bb7a15..b763b73788a4 100644
--- a/arch/arm64/boot/dts/mediatek/Makefile
+++ b/arch/arm64/boot/dts/mediatek/Makefile
@@ -17,10 +17,13 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += mt7986a-bananapi-bpi-r3-mini.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7986a-bananapi-bpi-r3-emmc.dtbo
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7986a-bananapi-bpi-r3-nand.dtbo
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7986a-bananapi-bpi-r3-nor.dtbo
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt7986a-bananapi-bpi-r3-sata.dtbo
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7986a-bananapi-bpi-r3-sd.dtbo
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7986a-rfb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7986b-rfb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7988a-bananapi-bpi-r4.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt7988a-bananapi-bpi-r4-emmc.dtbo
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt7988a-bananapi-bpi-r4-sd.dtbo
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8167-pumpkin.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-elm.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-elm-hana.dtb
@@ -55,10 +58,15 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-kodama-sku32.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-krane-sku0.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-krane-sku176.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-pumpkin.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-chinchou-sku0.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-chinchou-sku1.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-chinchou-sku16.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-magneton-sku393216.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-magneton-sku393217.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-magneton-sku393218.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-rusty-sku196608.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-starmie-sku0.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-starmie-sku1.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-steelix-sku131072.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-steelix-sku131073.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-tentacool-sku327681.dtb
@@ -69,10 +77,16 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-voltorb-sku589824.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-corsola-voltorb-sku589825.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8186-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8188-evb.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8188-geralt-ciri-sku0.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8188-geralt-ciri-sku1.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8188-geralt-ciri-sku2.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8188-geralt-ciri-sku3.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8188-geralt-ciri-sku4.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8188-geralt-ciri-sku5.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8188-geralt-ciri-sku6.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8188-geralt-ciri-sku7.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8192-asurada-hayato-r1.dtb
-dtb-$(CONFIG_ARCH_MEDIATEK) += mt8192-asurada-hayato-r5-sku2.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8192-asurada-spherion-r0.dtb
-dtb-$(CONFIG_ARCH_MEDIATEK) += mt8192-asurada-spherion-r4.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8192-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8195-cherry-dojo-r1.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8195-cherry-tomato-r1.dtb
@@ -90,3 +104,4 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += mt8516-pumpkin.dtb
# Device tree overlays support
DTC_FLAGS_mt7986a-bananapi-bpi-r3 := -@
DTC_FLAGS_mt7986a-bananapi-bpi-r3-mini := -@
+DTC_FLAGS_mt7988a-bananapi-bpi-r4 := -@
diff --git a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
index c84c47c1352f..0449686bd06b 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
@@ -115,6 +115,7 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&eth_default>;
pinctrl-1 = <&eth_sleep>;
+ mediatek,mac-wol;
status = "okay";
mdio {
diff --git a/arch/arm64/boot/dts/mediatek/mt6359.dtsi b/arch/arm64/boot/dts/mediatek/mt6359.dtsi
index 8e1b8c85c6ed..150ad84d5d2b 100644
--- a/arch/arm64/boot/dts/mediatek/mt6359.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt6359.dtsi
@@ -8,6 +8,7 @@
compatible = "mediatek,mt6359";
interrupt-controller;
#interrupt-cells = <2>;
+ #sound-dai-cells = <1>;
pmic_adc: adc {
compatible = "mediatek,mt6359-auxadc";
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-sata.dtso b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-sata.dtso
new file mode 100644
index 000000000000..f7dd52981977
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-sata.dtso
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (C) 2021 MediaTek Inc.
+ * Author: Frank Wunderlich <frank-w@public-files.de>
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/gpio/gpio.h>
+
+&{/} {
+ compatible = "bananapi,bpi-r3", "mediatek,mt7986a";
+
+ reg_sata12v: regulator-sata12v {
+ compatible = "regulator-fixed";
+ regulator-name = "sata12v";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ gpio = <&pio 8 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ reg_sata5v: regulator-sata5v {
+ compatible = "regulator-fixed";
+ regulator-name = "sata5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ vin-supply = <&reg_sata12v>;
+ };
+
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4-emmc.dtso b/arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4-emmc.dtso
new file mode 100644
index 000000000000..3e320b2f83d5
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4-emmc.dtso
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (C) 2021 MediaTek Inc.
+ * Author: Frank Wunderlich <frank-w@public-files.de>
+ */
+
+/dts-v1/;
+/plugin/;
+
+/ {
+ compatible = "bananapi,bpi-r4", "mediatek,mt7988a";
+};
+
+&{/soc/mmc@11230000} {
+ pinctrl-names = "default", "state_uhs";
+ pinctrl-0 = <&mmc0_pins_emmc_51>;
+ pinctrl-1 = <&mmc0_pins_emmc_51>;
+ bus-width = <8>;
+ max-frequency = <200000000>;
+ cap-mmc-highspeed;
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ hs400-ds-delay = <0x12814>;
+ vqmmc-supply = <&reg_1p8v>;
+ vmmc-supply = <&reg_3p3v>;
+ non-removable;
+ no-sd;
+ no-sdio;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+};
+
diff --git a/arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4-sd.dtso b/arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4-sd.dtso
new file mode 100644
index 000000000000..663c6345dd31
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4-sd.dtso
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (C) 2023 MediaTek Inc.
+ * Author: Frank Wunderlich <frank-w@public-files.de>
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ compatible = "bananapi,bpi-r4", "mediatek,mt7988a";
+};
+
+&{/soc/mmc@11230000} {
+ pinctrl-names = "default", "state_uhs";
+ pinctrl-0 = <&mmc0_pins_sdcard>;
+ pinctrl-1 = <&mmc0_pins_sdcard>;
+ cd-gpios = <&pio 12 GPIO_ACTIVE_LOW>;
+ bus-width = <4>;
+ max-frequency = <52000000>;
+ cap-sd-highspeed;
+ vmmc-supply = <&reg_3p3v>;
+ vqmmc-supply = <&reg_3p3v>;
+ no-mmc;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+};
+
diff --git a/arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4.dts b/arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4.dts
index efc4ad0b08b8..6623112c24c7 100644
--- a/arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7988a-bananapi-bpi-r4.dts
@@ -2,10 +2,408 @@
/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/regulator/richtek,rt5190a-regulator.h>
+
#include "mt7988a.dtsi"
/ {
compatible = "bananapi,bpi-r4", "mediatek,mt7988a";
model = "Banana Pi BPI-R4";
chassis-type = "embedded";
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ reg_1p8v: regulator-1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-1.8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-3.3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+};
+
+&cpu0 {
+ proc-supply = <&rt5190_buck3>;
+};
+
+&cpu1 {
+ proc-supply = <&rt5190_buck3>;
+};
+
+&cpu2 {
+ proc-supply = <&rt5190_buck3>;
+};
+
+&cpu3 {
+ proc-supply = <&rt5190_buck3>;
+};
+
+&cpu_thermal {
+ trips {
+ cpu_trip_hot: hot {
+ temperature = <120000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpu_trip_active_high: active-high {
+ temperature = <115000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+
+ cpu_trip_active_med: active-med {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+
+ cpu_trip_active_low: active-low {
+ temperature = <40000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+ };
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins>;
+ status = "okay";
+
+ rt5190a_64: rt5190a@64 {
+ compatible = "richtek,rt5190a";
+ reg = <0x64>;
+ vin2-supply = <&rt5190_buck1>;
+ vin3-supply = <&rt5190_buck1>;
+ vin4-supply = <&rt5190_buck1>;
+
+ regulators {
+ rt5190_buck1: buck1 {
+ regulator-name = "rt5190a-buck1";
+ regulator-min-microvolt = <5090000>;
+ regulator-max-microvolt = <5090000>;
+ regulator-allowed-modes =
+ <RT5190A_OPMODE_AUTO>, <RT5190A_OPMODE_FPWM>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ buck2 {
+ regulator-name = "vcore";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ rt5190_buck3: buck3 {
+ regulator-name = "vproc";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-boot-on;
+ };
+ buck4 {
+ regulator-name = "rt5190a-buck4";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-allowed-modes =
+ <RT5190A_OPMODE_AUTO>, <RT5190A_OPMODE_FPWM>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ ldo {
+ regulator-name = "rt5190a-ldo";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_1_pins>;
+ status = "okay";
+
+ pca9545: i2c-mux@70 {
+ compatible = "nxp,pca9545";
+ reg = <0x70>;
+ reset-gpios = <&pio 5 GPIO_ACTIVE_LOW>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ pcf8563: rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ #clock-cells = <0>;
+ };
+
+ eeprom@57 {
+ compatible = "atmel,24c02";
+ reg = <0x57>;
+ size = <256>;
+ };
+
+ };
+
+ i2c_sfp1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+
+ i2c_sfp2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+ };
+};
+
+/* mPCIe SIM2 */
+&pcie0 {
+ status = "okay";
+};
+
+/* mPCIe SIM3 */
+&pcie1 {
+ status = "okay";
+};
+
+/* M.2 key-B SIM1 */
+&pcie2 {
+ status = "okay";
+};
+
+/* M.2 key-M SSD */
+&pcie3 {
+ status = "okay";
+};
+
+&pio {
+ mdio0_pins: mdio0-pins {
+ mux {
+ function = "eth";
+ groups = "mdc_mdio0";
+ };
+
+ conf {
+ pins = "SMI_0_MDC", "SMI_0_MDIO";
+ drive-strength = <8>;
+ };
+ };
+
+ i2c0_pins: i2c0-g0-pins {
+ mux {
+ function = "i2c";
+ groups = "i2c0_1";
+ };
+ };
+
+ i2c1_pins: i2c1-g0-pins {
+ mux {
+ function = "i2c";
+ groups = "i2c1_0";
+ };
+ };
+
+ i2c1_sfp_pins: i2c1-sfp-g0-pins {
+ mux {
+ function = "i2c";
+ groups = "i2c1_sfp";
+ };
+ };
+
+ i2c2_0_pins: i2c2-g0-pins {
+ mux {
+ function = "i2c";
+ groups = "i2c2_0";
+ };
+ };
+
+ i2c2_1_pins: i2c2-g1-pins {
+ mux {
+ function = "i2c";
+ groups = "i2c2_1";
+ };
+ };
+
+ gbe0_led0_pins: gbe0-led0-pins {
+ mux {
+ function = "led";
+ groups = "gbe0_led0";
+ };
+ };
+
+ gbe1_led0_pins: gbe1-led0-pins {
+ mux {
+ function = "led";
+ groups = "gbe1_led0";
+ };
+ };
+
+ gbe2_led0_pins: gbe2-led0-pins {
+ mux {
+ function = "led";
+ groups = "gbe2_led0";
+ };
+ };
+
+ gbe3_led0_pins: gbe3-led0-pins {
+ mux {
+ function = "led";
+ groups = "gbe3_led0";
+ };
+ };
+
+ gbe0_led1_pins: gbe0-led1-pins {
+ mux {
+ function = "led";
+ groups = "gbe0_led1";
+ };
+ };
+
+ gbe1_led1_pins: gbe1-led1-pins {
+ mux {
+ function = "led";
+ groups = "gbe1_led1";
+ };
+ };
+
+ gbe2_led1_pins: gbe2-led1-pins {
+ mux {
+ function = "led";
+ groups = "gbe2_led1";
+ };
+ };
+
+ gbe3_led1_pins: gbe3-led1-pins {
+ mux {
+ function = "led";
+ groups = "gbe3_led1";
+ };
+ };
+
+ i2p5gbe_led0_pins: 2p5gbe-led0-pins {
+ mux {
+ function = "led";
+ groups = "2p5gbe_led0";
+ };
+ };
+
+ i2p5gbe_led1_pins: 2p5gbe-led1-pins {
+ mux {
+ function = "led";
+ groups = "2p5gbe_led1";
+ };
+ };
+
+ mmc0_pins_emmc_45: mmc0-emmc-45-pins {
+ mux {
+ function = "flash";
+ groups = "emmc_45";
+ };
+ };
+
+ mmc0_pins_emmc_51: mmc0-emmc-51-pins {
+ mux {
+ function = "flash";
+ groups = "emmc_51";
+ };
+ };
+
+ mmc0_pins_sdcard: mmc0-sdcard-pins {
+ mux {
+ function = "flash";
+ groups = "sdcard";
+ };
+ };
+
+ uart0_pins: uart0-pins {
+ mux {
+ function = "uart";
+ groups = "uart0";
+ };
+ };
+
+ snfi_pins: snfi-pins {
+ mux {
+ function = "flash";
+ groups = "snfi";
+ };
+ };
+
+ spi0_pins: spi0-pins {
+ mux {
+ function = "spi";
+ groups = "spi0";
+ };
+ };
+
+ spi0_flash_pins: spi0-flash-pins {
+ mux {
+ function = "spi";
+ groups = "spi0", "spi0_wp_hold";
+ };
+ };
+
+ spi1_pins: spi1-pins {
+ mux {
+ function = "spi";
+ groups = "spi1";
+ };
+ };
+
+ spi2_pins: spi2-pins {
+ mux {
+ function = "spi";
+ groups = "spi2";
+ };
+ };
+
+ spi2_flash_pins: spi2-flash-pins {
+ mux {
+ function = "spi";
+ groups = "spi2", "spi2_wp_hold";
+ };
+ };
+};
+
+&pwm {
+ status = "okay";
+};
+
+&serial0 {
+ status = "okay";
+};
+
+&ssusb1 {
+ status = "okay";
+};
+
+&tphy {
+ status = "okay";
+};
+
+&watchdog {
+ status = "okay";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt7988a.dtsi b/arch/arm64/boot/dts/mediatek/mt7988a.dtsi
index c9649b815276..88b56a24efca 100644
--- a/arch/arm64/boot/dts/mediatek/mt7988a.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7988a.dtsi
@@ -3,6 +3,8 @@
#include <dt-bindings/clock/mediatek,mt7988-clk.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/pinctrl/mt65xx.h>
+#include <dt-bindings/reset/mediatek,mt7988-resets.h>
/ {
compatible = "mediatek,mt7988a";
@@ -14,32 +16,70 @@
#address-cells = <1>;
#size-cells = <0>;
- cpu@0 {
+ cpu0: cpu@0 {
compatible = "arm,cortex-a73";
reg = <0x0>;
device_type = "cpu";
enable-method = "psci";
+ clocks = <&mcusys CLK_MCU_ARM_DIV_SEL>,
+ <&topckgen CLK_TOP_XTAL>;
+ clock-names = "cpu", "intermediate";
+ operating-points-v2 = <&cluster0_opp>;
};
- cpu@1 {
+ cpu1: cpu@1 {
compatible = "arm,cortex-a73";
reg = <0x1>;
device_type = "cpu";
enable-method = "psci";
+ clocks = <&mcusys CLK_MCU_ARM_DIV_SEL>,
+ <&topckgen CLK_TOP_XTAL>;
+ clock-names = "cpu", "intermediate";
+ operating-points-v2 = <&cluster0_opp>;
};
- cpu@2 {
+ cpu2: cpu@2 {
compatible = "arm,cortex-a73";
reg = <0x2>;
device_type = "cpu";
enable-method = "psci";
+ clocks = <&mcusys CLK_MCU_ARM_DIV_SEL>,
+ <&topckgen CLK_TOP_XTAL>;
+ clock-names = "cpu", "intermediate";
+ operating-points-v2 = <&cluster0_opp>;
};
- cpu@3 {
+ cpu3: cpu@3 {
compatible = "arm,cortex-a73";
reg = <0x3>;
device_type = "cpu";
enable-method = "psci";
+ clocks = <&mcusys CLK_MCU_ARM_DIV_SEL>,
+ <&topckgen CLK_TOP_XTAL>;
+ clock-names = "cpu", "intermediate";
+ operating-points-v2 = <&cluster0_opp>;
+ };
+
+ cluster0_opp: opp-table-0 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-800000000 {
+ opp-hz = /bits/ 64 <800000000>;
+ opp-microvolt = <850000>;
+ };
+ opp-1100000000 {
+ opp-hz = /bits/ 64 <1100000000>;
+ opp-microvolt = <850000>;
+ };
+ opp-1500000000 {
+ opp-hz = /bits/ 64 <1500000000>;
+ opp-microvolt = <850000>;
+ };
+ opp-1800000000 {
+ opp-hz = /bits/ 64 <1800000000>;
+ opp-microvolt = <900000>;
+ };
};
};
@@ -61,6 +101,18 @@
method = "smc";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /* 320 KiB reserved for ARM Trusted Firmware (BL31 and BL32) */
+ secmon@43000000 {
+ reg = <0 0x43000000 0 0x50000>;
+ no-map;
+ };
+ };
+
soc {
compatible = "simple-bus";
ranges;
@@ -84,6 +136,7 @@
compatible = "mediatek,mt7988-infracfg", "syscon";
reg = <0 0x10001000 0 0x1000>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
topckgen: clock-controller@1001b000 {
@@ -99,13 +152,66 @@
#reset-cells = <1>;
};
- clock-controller@1001e000 {
+ apmixedsys: clock-controller@1001e000 {
compatible = "mediatek,mt7988-apmixedsys";
reg = <0 0x1001e000 0 0x1000>;
#clock-cells = <1>;
};
- pwm@10048000 {
+ pio: pinctrl@1001f000 {
+ compatible = "mediatek,mt7988-pinctrl";
+ reg = <0 0x1001f000 0 0x1000>,
+ <0 0x11c10000 0 0x1000>,
+ <0 0x11d00000 0 0x1000>,
+ <0 0x11d20000 0 0x1000>,
+ <0 0x11e00000 0 0x1000>,
+ <0 0x11f00000 0 0x1000>,
+ <0 0x1000b000 0 0x1000>;
+ reg-names = "gpio", "iocfg_tr",
+ "iocfg_br", "iocfg_rb",
+ "iocfg_lb", "iocfg_tl", "eint";
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pio 0 0 84>;
+ interrupt-controller;
+ interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+ #interrupt-cells = <2>;
+
+ pcie0_pins: pcie0-pins {
+ mux {
+ function = "pcie";
+ groups = "pcie_2l_0_pereset", "pcie_clk_req_n0_0",
+ "pcie_wake_n0_0";
+ };
+ };
+
+ pcie1_pins: pcie1-pins {
+ mux {
+ function = "pcie";
+ groups = "pcie_2l_1_pereset", "pcie_clk_req_n1",
+ "pcie_wake_n1_0";
+ };
+ };
+
+ pcie2_pins: pcie2-pins {
+ mux {
+ function = "pcie";
+ groups = "pcie_1l_0_pereset", "pcie_clk_req_n2_0",
+ "pcie_wake_n2_0";
+ };
+ };
+
+ pcie3_pins: pcie3-pins {
+ mux {
+ function = "pcie";
+ groups = "pcie_1l_1_pereset", "pcie_clk_req_n3",
+ "pcie_wake_n3_0";
+ };
+ };
+ };
+
+ pwm: pwm@10048000 {
compatible = "mediatek,mt7988-pwm";
reg = <0 0x10048000 0 0x1000>;
clocks = <&infracfg CLK_INFRA_66M_PWM_BCK>,
@@ -124,7 +230,13 @@
status = "disabled";
};
- serial@11000000 {
+ mcusys: mcusys@100e0000 {
+ compatible = "mediatek,mt7988-mcusys", "syscon";
+ reg = <0 0x100e0000 0 0x1000>;
+ #clock-cells = <1>;
+ };
+
+ serial0: serial@11000000 {
compatible = "mediatek,mt7988-uart", "mediatek,mt6577-uart";
reg = <0 0x11000000 0 0x100>;
interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
@@ -157,11 +269,12 @@
status = "disabled";
};
- i2c@11003000 {
+ i2c0: i2c@11003000 {
compatible = "mediatek,mt7981-i2c";
reg = <0 0x11003000 0 0x1000>,
<0 0x10217080 0 0x80>;
interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
+ clock-div = <1>;
clocks = <&infracfg CLK_INFRA_I2C_BCK>,
<&infracfg CLK_INFRA_66M_AP_DMA_BCK>;
clock-names = "main", "dma";
@@ -170,11 +283,12 @@
status = "disabled";
};
- i2c@11004000 {
+ i2c1: i2c@11004000 {
compatible = "mediatek,mt7981-i2c";
reg = <0 0x11004000 0 0x1000>,
<0 0x10217100 0 0x80>;
interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
+ clock-div = <1>;
clocks = <&infracfg CLK_INFRA_I2C_BCK>,
<&infracfg CLK_INFRA_66M_AP_DMA_BCK>;
clock-names = "main", "dma";
@@ -183,11 +297,12 @@
status = "disabled";
};
- i2c@11005000 {
+ i2c2: i2c@11005000 {
compatible = "mediatek,mt7981-i2c";
reg = <0 0x11005000 0 0x1000>,
<0 0x10217180 0 0x80>;
interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
+ clock-div = <1>;
clocks = <&infracfg CLK_INFRA_I2C_BCK>,
<&infracfg CLK_INFRA_66M_AP_DMA_BCK>;
clock-names = "main", "dma";
@@ -196,6 +311,17 @@
status = "disabled";
};
+ lvts: lvts@1100a000 {
+ compatible = "mediatek,mt7988-lvts-ap";
+ #thermal-sensor-cells = <1>;
+ reg = <0 0x1100a000 0 0x1000>;
+ clocks = <&infracfg CLK_INFRA_26M_THERM_SYSTEM>;
+ interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&infracfg MT7988_INFRA_RST1_THERM_CTRL_SWRST>;
+ nvmem-cells = <&lvts_calibration>;
+ nvmem-cell-names = "lvts-calib-data-1";
+ };
+
usb@11190000 {
compatible = "mediatek,mt7988-xhci", "mediatek,mtk-xhci";
reg = <0 0x11190000 0 0x2e00>,
@@ -208,9 +334,10 @@
<&infracfg CLK_INFRA_133M_USB_HCK>,
<&infracfg CLK_INFRA_USB_XHCI>;
clock-names = "sys_ck", "ref_ck", "mcu_ck", "dma_ck", "xhci_ck";
+ status = "disabled";
};
- usb@11200000 {
+ ssusb1: usb@11200000 {
compatible = "mediatek,mt7988-xhci", "mediatek,mtk-xhci";
reg = <0 0x11200000 0 0x2e00>,
<0 0x11203e00 0 0x0100>;
@@ -222,6 +349,203 @@
<&infracfg CLK_INFRA_133M_USB_HCK_CK_P1>,
<&infracfg CLK_INFRA_USB_XHCI_CK_P1>;
clock-names = "sys_ck", "ref_ck", "mcu_ck", "dma_ck", "xhci_ck";
+ phys = <&tphyu2port0 PHY_TYPE_USB2>,
+ <&tphyu3port0 PHY_TYPE_USB3>;
+ status = "disabled";
+ };
+
+ mmc0: mmc@11230000 {
+ compatible = "mediatek,mt7988-mmc";
+ reg = <0 0x11230000 0 0x1000>,
+ <0 0x11D60000 0 0x1000>;
+ interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&infracfg CLK_INFRA_MSDC400>,
+ <&infracfg CLK_INFRA_MSDC2_HCK>,
+ <&infracfg CLK_INFRA_66M_MSDC_0_HCK>,
+ <&infracfg CLK_INFRA_133M_MSDC_0_HCK>;
+ assigned-clocks = <&topckgen CLK_TOP_EMMC_250M_SEL>,
+ <&topckgen CLK_TOP_EMMC_400M_SEL>;
+ assigned-clock-parents = <&topckgen CLK_TOP_NET1PLL_D5_D2>,
+ <&apmixedsys CLK_APMIXED_MSDCPLL>;
+ clock-names = "source", "hclk", "axi_cg", "ahb_cg";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ pcie2: pcie@11280000 {
+ compatible = "mediatek,mt7986-pcie",
+ "mediatek,mt8192-pcie";
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ reg = <0 0x11280000 0 0x2000>;
+ reg-names = "pcie-mac";
+ linux,pci-domain = <3>;
+ interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>;
+ bus-range = <0x00 0xff>;
+ ranges = <0x81000000 0x00 0x20000000 0x00
+ 0x20000000 0x00 0x00200000>,
+ <0x82000000 0x00 0x20200000 0x00
+ 0x20200000 0x00 0x07e00000>;
+ clocks = <&infracfg CLK_INFRA_PCIE_PIPE_P2>,
+ <&infracfg CLK_INFRA_PCIE_GFMUX_TL_P2>,
+ <&infracfg CLK_INFRA_PCIE_PERI_26M_CK_P2>,
+ <&infracfg CLK_INFRA_133M_PCIE_CK_P2>;
+ clock-names = "pl_250m", "tl_26m", "peri_26m",
+ "top_133m";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie2_pins>;
+ status = "disabled";
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &pcie_intc2 0>,
+ <0 0 0 2 &pcie_intc2 1>,
+ <0 0 0 3 &pcie_intc2 2>,
+ <0 0 0 4 &pcie_intc2 3>;
+ pcie_intc2: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+ };
+
+ pcie3: pcie@11290000 {
+ compatible = "mediatek,mt7986-pcie",
+ "mediatek,mt8192-pcie";
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ reg = <0 0x11290000 0 0x2000>;
+ reg-names = "pcie-mac";
+ linux,pci-domain = <2>;
+ interrupts = <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
+ bus-range = <0x00 0xff>;
+ ranges = <0x81000000 0x00 0x28000000 0x00
+ 0x28000000 0x00 0x00200000>,
+ <0x82000000 0x00 0x28200000 0x00
+ 0x28200000 0x00 0x07e00000>;
+ clocks = <&infracfg CLK_INFRA_PCIE_PIPE_P3>,
+ <&infracfg CLK_INFRA_PCIE_GFMUX_TL_P3>,
+ <&infracfg CLK_INFRA_PCIE_PERI_26M_CK_P3>,
+ <&infracfg CLK_INFRA_133M_PCIE_CK_P3>;
+ clock-names = "pl_250m", "tl_26m", "peri_26m",
+ "top_133m";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie3_pins>;
+ status = "disabled";
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &pcie_intc3 0>,
+ <0 0 0 2 &pcie_intc3 1>,
+ <0 0 0 3 &pcie_intc3 2>,
+ <0 0 0 4 &pcie_intc3 3>;
+ pcie_intc3: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+ };
+
+ pcie0: pcie@11300000 {
+ compatible = "mediatek,mt7986-pcie",
+ "mediatek,mt8192-pcie";
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ reg = <0 0x11300000 0 0x2000>;
+ reg-names = "pcie-mac";
+ linux,pci-domain = <0>;
+ interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
+ bus-range = <0x00 0xff>;
+ ranges = <0x81000000 0x00 0x30000000 0x00
+ 0x30000000 0x00 0x00200000>,
+ <0x82000000 0x00 0x30200000 0x00
+ 0x30200000 0x00 0x07e00000>;
+ clocks = <&infracfg CLK_INFRA_PCIE_PIPE_P0>,
+ <&infracfg CLK_INFRA_PCIE_GFMUX_TL_P0>,
+ <&infracfg CLK_INFRA_PCIE_PERI_26M_CK_P0>,
+ <&infracfg CLK_INFRA_133M_PCIE_CK_P0>;
+ clock-names = "pl_250m", "tl_26m", "peri_26m",
+ "top_133m";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie0_pins>;
+ status = "disabled";
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &pcie_intc0 0>,
+ <0 0 0 2 &pcie_intc0 1>,
+ <0 0 0 3 &pcie_intc0 2>,
+ <0 0 0 4 &pcie_intc0 3>;
+ pcie_intc0: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+ };
+
+ pcie1: pcie@11310000 {
+ compatible = "mediatek,mt7986-pcie",
+ "mediatek,mt8192-pcie";
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ reg = <0 0x11310000 0 0x2000>;
+ reg-names = "pcie-mac";
+ linux,pci-domain = <1>;
+ interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
+ bus-range = <0x00 0xff>;
+ ranges = <0x81000000 0x00 0x38000000 0x00
+ 0x38000000 0x00 0x00200000>,
+ <0x82000000 0x00 0x38200000 0x00
+ 0x38200000 0x00 0x07e00000>;
+ clocks = <&infracfg CLK_INFRA_PCIE_PIPE_P1>,
+ <&infracfg CLK_INFRA_PCIE_GFMUX_TL_P1>,
+ <&infracfg CLK_INFRA_PCIE_PERI_26M_CK_P1>,
+ <&infracfg CLK_INFRA_133M_PCIE_CK_P1>;
+ clock-names = "pl_250m", "tl_26m", "peri_26m",
+ "top_133m";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie1_pins>;
+ status = "disabled";
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &pcie_intc1 0>,
+ <0 0 0 2 &pcie_intc1 1>,
+ <0 0 0 3 &pcie_intc1 2>,
+ <0 0 0 4 &pcie_intc1 3>;
+ pcie_intc1: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+ };
+
+ tphy: t-phy@11c50000 {
+ compatible = "mediatek,mt7986-tphy",
+ "mediatek,generic-tphy-v2";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ status = "disabled";
+
+ tphyu2port0: usb-phy@11c50000 {
+ reg = <0 0x11c50000 0 0x700>;
+ clocks = <&infracfg CLK_INFRA_USB_UTMI_CK_P1>;
+ clock-names = "ref";
+ #phy-cells = <1>;
+ };
+
+ tphyu3port0: usb-phy@11c50700 {
+ reg = <0 0x11c50700 0 0x900>;
+ clocks = <&infracfg CLK_INFRA_USB_PIPE_CK_P1>;
+ clock-names = "ref";
+ #phy-cells = <1>;
+ };
};
clock-controller@11f40000 {
@@ -236,6 +560,10 @@
reg = <0 0x11f50000 0 0x1000>;
#address-cells = <1>;
#size-cells = <1>;
+
+ lvts_calibration: calib@918 {
+ reg = <0x918 0x28>;
+ };
};
clock-controller@15000000 {
@@ -253,6 +581,21 @@
};
};
+ thermal-zones {
+ cpu_thermal: cpu-thermal {
+ polling-delay-passive = <1000>;
+ polling-delay = <1000>;
+ thermal-sensors = <&lvts 0>;
+ trips {
+ cpu_trip_crit: crit {
+ temperature = <125000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
timer {
compatible = "arm,armv8-timer";
interrupt-parent = <&gic>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
index eee64461421f..b5d4b5baf478 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
@@ -931,7 +931,7 @@
interrupt-controller;
#interrupt-cells = <2>;
- clock: mt6397clock {
+ clock: clocks {
compatible = "mediatek,mt6397-clk";
#clock-cells = <1>;
};
@@ -942,11 +942,10 @@
#gpio-cells = <2>;
};
- regulator: mt6397regulator {
+ regulators {
compatible = "mediatek,mt6397-regulator";
mt6397_vpca15_reg: buck_vpca15 {
- regulator-compatible = "buck_vpca15";
regulator-name = "vpca15";
regulator-min-microvolt = < 700000>;
regulator-max-microvolt = <1350000>;
@@ -956,7 +955,6 @@
};
mt6397_vpca7_reg: buck_vpca7 {
- regulator-compatible = "buck_vpca7";
regulator-name = "vpca7";
regulator-min-microvolt = < 700000>;
regulator-max-microvolt = <1350000>;
@@ -966,7 +964,6 @@
};
mt6397_vsramca15_reg: buck_vsramca15 {
- regulator-compatible = "buck_vsramca15";
regulator-name = "vsramca15";
regulator-min-microvolt = < 700000>;
regulator-max-microvolt = <1350000>;
@@ -975,7 +972,6 @@
};
mt6397_vsramca7_reg: buck_vsramca7 {
- regulator-compatible = "buck_vsramca7";
regulator-name = "vsramca7";
regulator-min-microvolt = < 700000>;
regulator-max-microvolt = <1350000>;
@@ -984,7 +980,6 @@
};
mt6397_vcore_reg: buck_vcore {
- regulator-compatible = "buck_vcore";
regulator-name = "vcore";
regulator-min-microvolt = < 700000>;
regulator-max-microvolt = <1350000>;
@@ -993,7 +988,6 @@
};
mt6397_vgpu_reg: buck_vgpu {
- regulator-compatible = "buck_vgpu";
regulator-name = "vgpu";
regulator-min-microvolt = < 700000>;
regulator-max-microvolt = <1350000>;
@@ -1002,7 +996,6 @@
};
mt6397_vdrm_reg: buck_vdrm {
- regulator-compatible = "buck_vdrm";
regulator-name = "vdrm";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1400000>;
@@ -1011,7 +1004,6 @@
};
mt6397_vio18_reg: buck_vio18 {
- regulator-compatible = "buck_vio18";
regulator-name = "vio18";
regulator-min-microvolt = <1620000>;
regulator-max-microvolt = <1980000>;
@@ -1020,18 +1012,15 @@
};
mt6397_vtcxo_reg: ldo_vtcxo {
- regulator-compatible = "ldo_vtcxo";
regulator-name = "vtcxo";
regulator-always-on;
};
mt6397_va28_reg: ldo_va28 {
- regulator-compatible = "ldo_va28";
regulator-name = "va28";
};
mt6397_vcama_reg: ldo_vcama {
- regulator-compatible = "ldo_vcama";
regulator-name = "vcama";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -1039,18 +1028,15 @@
};
mt6397_vio28_reg: ldo_vio28 {
- regulator-compatible = "ldo_vio28";
regulator-name = "vio28";
regulator-always-on;
};
mt6397_vusb_reg: ldo_vusb {
- regulator-compatible = "ldo_vusb";
regulator-name = "vusb";
};
mt6397_vmc_reg: ldo_vmc {
- regulator-compatible = "ldo_vmc";
regulator-name = "vmc";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
@@ -1058,7 +1044,6 @@
};
mt6397_vmch_reg: ldo_vmch {
- regulator-compatible = "ldo_vmch";
regulator-name = "vmch";
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3300000>;
@@ -1066,7 +1051,6 @@
};
mt6397_vemc_3v3_reg: ldo_vemc3v3 {
- regulator-compatible = "ldo_vemc3v3";
regulator-name = "vemc_3v3";
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3300000>;
@@ -1074,7 +1058,6 @@
};
mt6397_vgp1_reg: ldo_vgp1 {
- regulator-compatible = "ldo_vgp1";
regulator-name = "vcamd";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -1082,7 +1065,6 @@
};
mt6397_vgp2_reg: ldo_vgp2 {
- regulator-compatible = "ldo_vgp2";
regulator-name = "vcamio";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
@@ -1090,7 +1072,6 @@
};
mt6397_vgp3_reg: ldo_vgp3 {
- regulator-compatible = "ldo_vgp3";
regulator-name = "vcamaf";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -1098,7 +1079,6 @@
};
mt6397_vgp4_reg: ldo_vgp4 {
- regulator-compatible = "ldo_vgp4";
regulator-name = "vgp4";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <3300000>;
@@ -1106,7 +1086,6 @@
};
mt6397_vgp5_reg: ldo_vgp5 {
- regulator-compatible = "ldo_vgp5";
regulator-name = "vgp5";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <3000000>;
@@ -1114,7 +1093,6 @@
};
mt6397_vgp6_reg: ldo_vgp6 {
- regulator-compatible = "ldo_vgp6";
regulator-name = "vgp6";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
@@ -1123,7 +1101,6 @@
};
mt6397_vibr_reg: ldo_vibr {
- regulator-compatible = "ldo_vibr";
regulator-name = "vibr";
regulator-min-microvolt = <1300000>;
regulator-max-microvolt = <3300000>;
@@ -1131,7 +1108,7 @@
};
};
- rtc: mt6397rtc {
+ rtc: rtc {
compatible = "mediatek,mt6397-rtc";
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
index bb4671c18e3b..9fffed0ef4bf 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
@@ -307,11 +307,10 @@
interrupt-controller;
#interrupt-cells = <2>;
- mt6397regulator: mt6397regulator {
+ regulators {
compatible = "mediatek,mt6397-regulator";
mt6397_vpca15_reg: buck_vpca15 {
- regulator-compatible = "buck_vpca15";
regulator-name = "vpca15";
regulator-min-microvolt = < 700000>;
regulator-max-microvolt = <1350000>;
@@ -320,7 +319,6 @@
};
mt6397_vpca7_reg: buck_vpca7 {
- regulator-compatible = "buck_vpca7";
regulator-name = "vpca7";
regulator-min-microvolt = < 700000>;
regulator-max-microvolt = <1350000>;
@@ -329,7 +327,6 @@
};
mt6397_vsramca15_reg: buck_vsramca15 {
- regulator-compatible = "buck_vsramca15";
regulator-name = "vsramca15";
regulator-min-microvolt = < 700000>;
regulator-max-microvolt = <1350000>;
@@ -338,7 +335,6 @@
};
mt6397_vsramca7_reg: buck_vsramca7 {
- regulator-compatible = "buck_vsramca7";
regulator-name = "vsramca7";
regulator-min-microvolt = < 700000>;
regulator-max-microvolt = <1350000>;
@@ -347,7 +343,6 @@
};
mt6397_vcore_reg: buck_vcore {
- regulator-compatible = "buck_vcore";
regulator-name = "vcore";
regulator-min-microvolt = < 700000>;
regulator-max-microvolt = <1350000>;
@@ -356,7 +351,6 @@
};
mt6397_vgpu_reg: buck_vgpu {
- regulator-compatible = "buck_vgpu";
regulator-name = "vgpu";
regulator-min-microvolt = < 700000>;
regulator-max-microvolt = <1350000>;
@@ -365,7 +359,6 @@
};
mt6397_vdrm_reg: buck_vdrm {
- regulator-compatible = "buck_vdrm";
regulator-name = "vdrm";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1400000>;
@@ -374,7 +367,6 @@
};
mt6397_vio18_reg: buck_vio18 {
- regulator-compatible = "buck_vio18";
regulator-name = "vio18";
regulator-min-microvolt = <1620000>;
regulator-max-microvolt = <1980000>;
@@ -383,19 +375,16 @@
};
mt6397_vtcxo_reg: ldo_vtcxo {
- regulator-compatible = "ldo_vtcxo";
regulator-name = "vtcxo";
regulator-always-on;
};
mt6397_va28_reg: ldo_va28 {
- regulator-compatible = "ldo_va28";
regulator-name = "va28";
regulator-always-on;
};
mt6397_vcama_reg: ldo_vcama {
- regulator-compatible = "ldo_vcama";
regulator-name = "vcama";
regulator-min-microvolt = <1500000>;
regulator-max-microvolt = <2800000>;
@@ -403,18 +392,15 @@
};
mt6397_vio28_reg: ldo_vio28 {
- regulator-compatible = "ldo_vio28";
regulator-name = "vio28";
regulator-always-on;
};
mt6397_vusb_reg: ldo_vusb {
- regulator-compatible = "ldo_vusb";
regulator-name = "vusb";
};
mt6397_vmc_reg: ldo_vmc {
- regulator-compatible = "ldo_vmc";
regulator-name = "vmc";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
@@ -422,7 +408,6 @@
};
mt6397_vmch_reg: ldo_vmch {
- regulator-compatible = "ldo_vmch";
regulator-name = "vmch";
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3300000>;
@@ -430,7 +415,6 @@
};
mt6397_vemc_3v3_reg: ldo_vemc3v3 {
- regulator-compatible = "ldo_vemc3v3";
regulator-name = "vemc_3v3";
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3300000>;
@@ -438,7 +422,6 @@
};
mt6397_vgp1_reg: ldo_vgp1 {
- regulator-compatible = "ldo_vgp1";
regulator-name = "vcamd";
regulator-min-microvolt = <1220000>;
regulator-max-microvolt = <3300000>;
@@ -446,7 +429,6 @@
};
mt6397_vgp2_reg: ldo_vgp2 {
- regulator-compatible = "ldo_vgp2";
regulator-name = "vcamio";
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <3300000>;
@@ -454,7 +436,6 @@
};
mt6397_vgp3_reg: ldo_vgp3 {
- regulator-compatible = "ldo_vgp3";
regulator-name = "vcamaf";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <3300000>;
@@ -462,7 +443,6 @@
};
mt6397_vgp4_reg: ldo_vgp4 {
- regulator-compatible = "ldo_vgp4";
regulator-name = "vgp4";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <3300000>;
@@ -470,7 +450,6 @@
};
mt6397_vgp5_reg: ldo_vgp5 {
- regulator-compatible = "ldo_vgp5";
regulator-name = "vgp5";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <3000000>;
@@ -478,7 +457,6 @@
};
mt6397_vgp6_reg: ldo_vgp6 {
- regulator-compatible = "ldo_vgp6";
regulator-name = "vgp6";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <3300000>;
@@ -486,7 +464,6 @@
};
mt6397_vibr_reg: ldo_vibr {
- regulator-compatible = "ldo_vibr";
regulator-name = "vibr";
regulator-min-microvolt = <1300000>;
regulator-max-microvolt = <3300000>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts
index 65860b33c01f..3935d83a047e 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts
@@ -26,6 +26,10 @@
hid-descr-addr = <0x0001>;
};
+&mt6358codec {
+ mediatek,dmic-mode = <1>; /* one-wire */
+};
+
&qca_wifi {
qcom,ath10k-calibration-variant = "GO_DAMU";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts
index e8241587949b..561770fcf69e 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts
@@ -12,3 +12,18 @@
chassis-type = "laptop";
compatible = "google,juniper-sku17", "google,juniper", "mediatek,mt8183";
};
+
+&i2c0 {
+ touchscreen@40 {
+ compatible = "hid-over-i2c";
+ reg = <0x40>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&touchscreen_pins>;
+
+ interrupts-extended = <&pio 155 IRQ_TYPE_LEVEL_LOW>;
+
+ post-power-on-delay-ms = <70>;
+ hid-descr-addr = <0x0001>;
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow.dtsi
index 76d33540166f..c942e461a177 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow.dtsi
@@ -6,6 +6,21 @@
/dts-v1/;
#include "mt8183-kukui-jacuzzi.dtsi"
+&i2c0 {
+ touchscreen@40 {
+ compatible = "hid-over-i2c";
+ reg = <0x40>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&touchscreen_pins>;
+
+ interrupts-extended = <&pio 155 IRQ_TYPE_LEVEL_LOW>;
+
+ post-power-on-delay-ms = <70>;
+ hid-descr-addr = <0x0001>;
+ };
+};
+
&i2c2 {
trackpad@2c {
compatible = "hid-over-i2c";
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
index 49e053b932e7..80888bd4ad82 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
@@ -39,8 +39,6 @@
pp3300_panel: pp3300-panel {
compatible = "regulator-fixed";
regulator-name = "pp3300_panel";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
pinctrl-names = "default";
pinctrl-0 = <&pp3300_panel_pins>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
index 4b974bb781b1..e1495f1900a7 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
@@ -269,11 +269,6 @@
};
};
-&dpi0 {
- /* TODO Re-enable after DP to Type-C port muxing can be described */
- status = "disabled";
-};
-
&gic {
mediatek,broken-save-restore-fw;
};
@@ -944,13 +939,13 @@
};
&thermal_zones {
- tboard1 {
+ tboard1-thermal {
polling-delay = <1000>; /* milliseconds */
polling-delay-passive = <0>; /* milliseconds */
thermal-sensors = <&tboard_thermistor1>;
};
- tboard2 {
+ tboard2-thermal {
polling-delay = <1000>; /* milliseconds */
polling-delay-passive = <0>; /* milliseconds */
thermal-sensors = <&tboard_thermistor2>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts b/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
index 61a6f66914b8..dbdee604edab 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
@@ -522,10 +522,6 @@
status = "okay";
};
-&dsi0 {
- status = "disabled";
-};
-
&dpi0 {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&dpi_func_pins>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
index 1afeeb1155f5..0aa34e5bbaaa 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
@@ -1024,7 +1024,8 @@
};
keyboard: keyboard@10010000 {
- compatible = "mediatek,mt6779-keypad";
+ compatible = "mediatek,mt8183-keypad",
+ "mediatek,mt6779-keypad";
reg = <0 0x10010000 0 0x1000>;
interrupts = <GIC_SPI 186 IRQ_TYPE_EDGE_FALLING>;
clocks = <&clk26m>;
@@ -1834,6 +1835,7 @@
resets = <&mmsys MT8183_MMSYS_SW0_RST_B_DISP_DSI0>;
phys = <&mipi_tx0>;
phy-names = "dphy";
+ status = "disabled";
};
dpi0: dpi@14015000 {
@@ -1845,6 +1847,7 @@
<&mmsys CLK_MM_DPI_MM>,
<&apmixedsys CLK_APMIXED_TVDPLL>;
clock-names = "pixel", "engine", "pll";
+ status = "disabled";
port {
dpi_out: endpoint { };
diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola-chinchou-sku0.dts b/arch/arm64/boot/dts/mediatek/mt8186-corsola-chinchou-sku0.dts
new file mode 100644
index 000000000000..5d012bc4ff0d
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola-chinchou-sku0.dts
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2024 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8186-corsola-chinchou.dtsi"
+
+/ {
+ model = "Google chinchou CZ1104CM2A/CZ1204CM2A";
+ compatible = "google,chinchou-sku0", "google,chinchou-sku2",
+ "google,chinchou-sku4", "google,chinchou-sku5",
+ "google,chinchou", "mediatek,mt8186";
+};
+
+&gpio_keys {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola-chinchou-sku1.dts b/arch/arm64/boot/dts/mediatek/mt8186-corsola-chinchou-sku1.dts
new file mode 100644
index 000000000000..9d6e62af6944
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola-chinchou-sku1.dts
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2024 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8186-corsola-chinchou.dtsi"
+
+/ {
+ model = "Google chinchou CZ1104FM2A/CZ1204FM2A/CZ1104CM2A/CZ1204CM2A";
+ compatible = "google,chinchou-sku1", "google,chinchou-sku3",
+ "google,chinchou-sku6", "google,chinchou-sku7",
+ "google,chinchou-sku17", "google,chinchou-sku20",
+ "google,chinchou-sku22", "google,chinchou-sku23",
+ "google,chinchou", "mediatek,mt8186";
+};
+
+&gpio_keys {
+ status = "disabled";
+};
+
+&i2c1 {
+ i2c-scl-internal-delay-ns = <10000>;
+
+ touchscreen: touchscreen@41 {
+ compatible = "ilitek,ili2901";
+ reg = <0x41>;
+ interrupts-extended = <&pio 12 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&touchscreen_pins>;
+ reset-gpios = <&pio 60 GPIO_ACTIVE_LOW>;
+ vccio-supply = <&pp1800_tchscr_report_disable>;
+ vcc33-supply = <&pp3300_z2>;
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola-chinchou-sku16.dts b/arch/arm64/boot/dts/mediatek/mt8186-corsola-chinchou-sku16.dts
new file mode 100644
index 000000000000..eb377de1fcde
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola-chinchou-sku16.dts
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2024 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8186-corsola-chinchou.dtsi"
+
+/ {
+ model = "Google chinchou CZ1104FM2A/CZ1204FM2A";
+ compatible = "google,chinchou-sku16", "google,chinchou-sku18",
+ "google,chinchou-sku19", "google,chinchou-sku21",
+ "google,chinchou", "mediatek,mt8186";
+};
+
+&i2c1 {
+ i2c-scl-internal-delay-ns = <10000>;
+
+ touchscreen: touchscreen@41 {
+ compatible = "ilitek,ili2901";
+ reg = <0x41>;
+ interrupts-extended = <&pio 12 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&touchscreen_pins>;
+ reset-gpios = <&pio 60 GPIO_ACTIVE_LOW>;
+ vccio-supply = <&pp1800_tchscr_report_disable>;
+ vcc33-supply = <&pp3300_z2>;
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola-chinchou.dtsi b/arch/arm64/boot/dts/mediatek/mt8186-corsola-chinchou.dtsi
new file mode 100644
index 000000000000..800792157021
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola-chinchou.dtsi
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2024 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8186-corsola.dtsi"
+
+/ {
+ /delete-node/ speaker-codec;
+
+ pp1000_edpbrdg: regulator-pp1000-edpbrdg {
+ compatible = "regulator-fixed";
+ regulator-name = "pp1000_edpbrdg";
+ pinctrl-names = "default";
+ pinctrl-0 = <&en_pp1000_edpbrdg>;
+ enable-active-high;
+ regulator-boot-on;
+ gpio = <&pio 29 GPIO_ACTIVE_HIGH>;
+ vin-supply = <&pp3300_z2>;
+ };
+
+ pp1800_edpbrdg_dx: regulator-pp1800-edpbrdg-dx {
+ compatible = "regulator-fixed";
+ regulator-name = "pp1800_edpbrdg_dx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&en_pp1800_edpbrdg>;
+ enable-active-high;
+ regulator-boot-on;
+ gpio = <&pio 30 GPIO_ACTIVE_HIGH>;
+ vin-supply = <&mt6366_vio18_reg>;
+ };
+
+ pp3300_edp_dx: regulator-pp3300-edp-dx {
+ compatible = "regulator-fixed";
+ regulator-name = "pp3300_edp_dx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&en_pp3300_edpbrdg>;
+ enable-active-high;
+ regulator-boot-on;
+ gpio = <&pio 31 GPIO_ACTIVE_HIGH>;
+ vin-supply = <&pp3300_z2>;
+ };
+
+ pp1800_tchscr_report_disable: regulator-pp1800-tchscr-report-disable {
+ compatible = "regulator-fixed";
+ regulator-name = "pp1800_tchscr_report_disable";
+ pinctrl-names = "default";
+ regulator-boot-on;
+ pinctrl-0 = <&touch_pin_report>;
+ gpio = <&pio 37 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&dsi_out {
+ remote-endpoint = <&anx7625_in>;
+};
+
+&i2c0 {
+ clock-frequency = <400000>;
+
+ anx_bridge: anx7625@58 {
+ compatible = "analogix,anx7625";
+ reg = <0x58>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&anx7625_pins>;
+ enable-gpios = <&pio 96 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&pio 98 GPIO_ACTIVE_HIGH>;
+ vdd10-supply = <&pp1000_edpbrdg>;
+ vdd18-supply = <&pp1800_edpbrdg_dx>;
+ vdd33-supply = <&pp3300_edp_dx>;
+ analogix,lane0-swing = /bits/ 8 <0x70 0x30>;
+ analogix,lane1-swing = /bits/ 8 <0x70 0x30>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ anx7625_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ data-lanes = <0 1 2 3>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ anx7625_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+
+ aux-bus {
+ panel: panel {
+ compatible = "edp-panel";
+ power-supply = <&pp3300_disp_x>;
+ backlight = <&backlight_lcd0>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&anx7625_out>;
+ };
+ };
+ };
+ };
+ };
+};
+
+&i2c2 {
+ /delete-node/ trackpad@15;
+
+ touchpad@15 {
+ compatible = "hid-over-i2c";
+ reg = <0x15>;
+ interrupts-extended = <&pio 11 IRQ_TYPE_LEVEL_LOW>;
+ post-power-on-delay-ms = <10>;
+ hid-descr-addr = <0x0001>;
+ vdd-supply = <&pp3300_s3>;
+ wakeup-source;
+ };
+};
+
+&i2c5 {
+ clock-frequency = <400000>;
+ /delete-node/ codec@1a;
+
+ rt5650: rt5650@1a {
+ compatible = "realtek,rt5650";
+ reg = <0x1a>;
+ avdd-supply = <&mt6366_vio18_reg>;
+ cpvdd-supply = <&mt6366_vio18_reg>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&speaker_codec_pins_default>;
+ cbj-sleeve-gpios = <&pio 150 GPIO_ACTIVE_HIGH>;
+ interrupt-parent = <&pio>;
+ interrupts = <17 IRQ_TYPE_EDGE_BOTH>;
+ #sound-dai-cells = <0>;
+ realtek,dmic1-data-pin = <2>;
+ realtek,jd-mode = <2>;
+ };
+};
+
+&i2c_tunnel {
+ /delete-node/ sbs-battery@b;
+
+ battery: sbs-battery@f {
+ compatible = "sbs,sbs-battery";
+ reg = <0xf>;
+ sbs,i2c-retry-count = <2>;
+ sbs,poll-retry-count = <1>;
+ };
+};
+
+&keyboard_controller {
+ keypad,num-columns = <15>;
+
+ function-row-physmap = <
+ MATRIX_KEY(0x00, 0x02, 0) /* T1 */
+ MATRIX_KEY(0x03, 0x02, 0) /* T2 */
+ MATRIX_KEY(0x02, 0x02, 0) /* T3 */
+ MATRIX_KEY(0x01, 0x02, 0) /* T4 */
+ MATRIX_KEY(0x03, 0x04, 0) /* T5 */
+ MATRIX_KEY(0x02, 0x04, 0) /* T6 */
+ MATRIX_KEY(0x01, 0x04, 0) /* T7 */
+ MATRIX_KEY(0x02, 0x09, 0) /* T8 */
+ MATRIX_KEY(0x01, 0x09, 0) /* T9 */
+ MATRIX_KEY(0x00, 0x04, 0) /* T10 */
+ MATRIX_KEY(0x00, 0x01, 0) /* T11 */
+ MATRIX_KEY(0x01, 0x05, 0) /* T12 */
+ >;
+
+ linux,keymap = <
+ CROS_STD_MAIN_KEYMAP
+ MATRIX_KEY(0x00, 0x02, KEY_BACK) /* T1 */
+ MATRIX_KEY(0x03, 0x02, KEY_REFRESH) /* T2 */
+ MATRIX_KEY(0x02, 0x02, KEY_ZOOM) /* T3 */
+ MATRIX_KEY(0x01, 0x02, KEY_SCALE) /* T4 */
+ MATRIX_KEY(0x03, 0x04, KEY_SYSRQ) /* T5 */
+ MATRIX_KEY(0x02, 0x04, KEY_BRIGHTNESSDOWN) /* T6 */
+ MATRIX_KEY(0x01, 0x04, KEY_BRIGHTNESSUP) /* T7 */
+ MATRIX_KEY(0x02, 0x09, KEY_MUTE) /* T8 */
+ MATRIX_KEY(0x01, 0x09, KEY_VOLUMEDOWN) /* T9 */
+ MATRIX_KEY(0x00, 0x04, KEY_VOLUMEUP) /* T10 */
+ MATRIX_KEY(0x00, 0x01, KEY_MICMUTE) /* T11 */
+ MATRIX_KEY(0x01, 0x05, KEY_CONTROLPANEL) /* T12 */
+ MATRIX_KEY(0x03, 0x05, KEY_PREVIOUSSONG) /* T13 */
+ MATRIX_KEY(0x00, 0x09, KEY_PLAYPAUSE) /* T14 */
+ MATRIX_KEY(0x00, 0x0b, KEY_NEXTSONG) /* T15 */
+ MATRIX_KEY(0x03, 0x00, KEY_LEFTMETA) /* Search*/
+ MATRIX_KEY(0x01, 0x0e, KEY_LEFTCTRL) /* Left Control*/
+ MATRIX_KEY(0x06, 0x0d, KEY_LEFTALT) /* Left ALT*/
+ MATRIX_KEY(0x03, 0x0e, KEY_RIGHTCTRL) /* Right Control*/
+ MATRIX_KEY(0x06, 0x0a, KEY_BACKSLASH) /* BACKSLASH*/
+ >;
+};
+
+&mmc1_pins_default {
+ pins-clk {
+ drive-strength = <8>;
+ };
+
+ pins-cmd-dat {
+ drive-strength = <8>;
+ };
+};
+
+&mmc1_pins_uhs {
+ pins-clk {
+ drive-strength = <8>;
+ };
+
+ pins-cmd-dat {
+ drive-strength = <8>;
+ };
+};
+
+&pen_insert {
+ wakeup-event-action = <EV_ACT_ANY>;
+};
+
+&pio {
+ anx7625_pins: anx7625-pins {
+ pins-int {
+ pinmux = <PINMUX_GPIO9__FUNC_GPIO9>;
+ input-enable;
+ bias-disable;
+ };
+
+ pins-reset {
+ pinmux = <PINMUX_GPIO98__FUNC_GPIO98>;
+ output-low;
+ };
+
+ pins-power-en {
+ pinmux = <PINMUX_GPIO96__FUNC_GPIO96>;
+ output-low;
+ };
+ };
+
+ en_pp1000_edpbrdg: pp1000-edpbrdg-en-pins {
+ pins-vreg-en {
+ pinmux = <PINMUX_GPIO29__FUNC_GPIO29>;
+ output-low;
+ };
+ };
+
+ en_pp1800_edpbrdg: pp1800-edpbrdg-en-pins {
+ pins-vreg-en {
+ pinmux = <PINMUX_GPIO30__FUNC_GPIO30>;
+ output-low;
+ };
+ };
+
+ en_pp3300_edpbrdg: pp3300-edpbrdg-en-pins {
+ pins-vreg-en {
+ pinmux = <PINMUX_GPIO31__FUNC_GPIO31>;
+ output-low;
+ };
+ };
+
+ touch_pin_report: pin-report-pins {
+ pins-touch-en {
+ pinmux = <PINMUX_GPIO37__FUNC_GPIO37>;
+ output-low;
+ };
+ };
+};
+
+&sound {
+ compatible = "mediatek,mt8186-mt6366-rt5650-sound";
+ model = "mt8186_rt5650";
+ mediatek,adsp = <&adsp>;
+
+ audio-routing =
+ "Headphone", "HPOL",
+ "Headphone", "HPOR",
+ "IN1P", "Headset Mic",
+ "IN1N", "Headset Mic",
+ "Speakers", "SPOL",
+ "Speakers", "SPOR",
+ "HDMI1", "TX";
+
+ hs-playback-dai-link {
+ codec {
+ sound-dai = <&rt5650>;
+ };
+ };
+
+ hs-capture-dai-link {
+ codec {
+ sound-dai = <&rt5650>;
+ };
+ };
+
+ spk-share-dai-link {
+ };
+
+ spk-hdmi-playback-dai-link {
+ codec {
+ sound-dai = <&it6505dptx>;
+ };
+ };
+};
+
+&touchscreen_pins {
+ /delete-node/ pins-report-sw;
+};
+
+&wifi_enable_pin {
+ pins-wifi-enable {
+ pinmux = <PINMUX_GPIO51__FUNC_GPIO51>;
+ };
+};
+
+&wifi_pwrseq {
+ reset-gpios = <&pio 51 GPIO_ACTIVE_LOW>;
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola-starmie-sku0.dts b/arch/arm64/boot/dts/mediatek/mt8186-corsola-starmie-sku0.dts
new file mode 100644
index 000000000000..23e194579bf2
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola-starmie-sku0.dts
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2023 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8186-corsola-starmie.dtsi"
+
+/ {
+ model = "Google Starmie sku0 board";
+ compatible = "google,starmie-sku0", "google,starmie-sku2",
+ "google,starmie-sku3", "google,starmie",
+ "mediatek,mt8186";
+};
+
+&panel {
+ compatible = "starry,ili9882t";
+};
+
+&i2c1 {
+ touchscreen: touchscreen@41 {
+ compatible = "ilitek,ili9882t";
+ reg = <0x41>;
+ interrupts-extended = <&pio 12 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&touchscreen_pins>;
+ panel = <&panel>;
+ reset-gpios = <&pio 60 GPIO_ACTIVE_LOW>;
+ vccio-supply = <&mt6366_vio18_reg>;
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola-starmie-sku1.dts b/arch/arm64/boot/dts/mediatek/mt8186-corsola-starmie-sku1.dts
new file mode 100644
index 000000000000..214b972c9357
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola-starmie-sku1.dts
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2023 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8186-corsola-starmie.dtsi"
+
+/ {
+ model = "Google Starmie sku1 board";
+ compatible = "google,starmie-sku1", "google,starmie-sku4",
+ "google,starmie", "mediatek,mt8186";
+};
+
+&panel {
+ compatible = "starry,himax83102-j02", "himax,hx83102";
+};
+
+&i2c1 {
+ touchscreen_himax: touchscreen@4f {
+ compatible = "hid-over-i2c";
+ reg = <0x4f>;
+ interrupts-extended = <&pio 12 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&touchscreen_pins>;
+ vdd-supply = <&mt6366_vio18_reg>;
+ panel = <&panel>;
+ post-power-on-delay-ms = <450>;
+ hid-descr-addr = <0x0001>;
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola-starmie.dtsi b/arch/arm64/boot/dts/mediatek/mt8186-corsola-starmie.dtsi
new file mode 100644
index 000000000000..5ea8bdc00e81
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola-starmie.dtsi
@@ -0,0 +1,472 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2023 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8186-corsola.dtsi"
+
+/ {
+ en_pp6000_mipi_disp_150ma: en-pp6000-mipi-disp-150ma {
+ compatible = "regulator-fixed";
+ regulator-name = "en_pp6000_mipi_disp_150ma";
+ gpio = <&pio 154 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ pinctrl-names = "default";
+ pinctrl-0 = <&en_pp6000_mipi_disp_150ma_fixed_pins>;
+ };
+
+ /*
+ * Starmie does not have 3.3V display regulator. It is replaced
+ * with 6V module for enabling panel, re-using eDP GPIOs.
+ */
+ /delete-node/ pp3300_disp_x;
+ en_pp6000_mipi_disp: en-regulator-pp6000-mipi-disp {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&edp_panel_fixed_pins>;
+ gpios = <&pio 153 GPIO_ACTIVE_HIGH>;
+ regulator-name = "en_pp6000_mipi_disp";
+ enable-active-high;
+ regulator-enable-ramp-delay = <3000>;
+ vin-supply = <&pp3300_z2>;
+ };
+
+ tboard_thermistor1: thermal-sensor1 {
+ compatible = "generic-adc-thermal";
+ #thermal-sensor-cells = <0>;
+ io-channels = <&auxadc 0>;
+ io-channel-names = "sensor-channel";
+ temperature-lookup-table = < (-5000) 1492
+ 0 1413
+ 5000 1324
+ 10000 1227
+ 15000 1121
+ 20000 1017
+ 25000 900
+ 30000 797
+ 35000 698
+ 40000 606
+ 45000 522
+ 50000 449
+ 55000 383
+ 60000 327
+ 65000 278
+ 70000 236
+ 75000 201
+ 80000 171
+ 85000 145
+ 90000 163
+ 95000 124
+ 100000 91
+ 105000 78
+ 110000 67
+ 115000 58
+ 120000 50
+ 125000 44>;
+ };
+
+ tboard_thermistor2: thermal-sensor2 {
+ compatible = "generic-adc-thermal";
+ #thermal-sensor-cells = <0>;
+ io-channels = <&auxadc 1>;
+ io-channel-names = "sensor-channel";
+ temperature-lookup-table = < (-5000) 1492
+ 0 1413
+ 5000 1324
+ 10000 1227
+ 15000 1121
+ 20000 1017
+ 25000 900
+ 30000 797
+ 35000 698
+ 40000 606
+ 45000 522
+ 50000 449
+ 55000 383
+ 60000 327
+ 65000 278
+ 70000 236
+ 75000 201
+ 80000 171
+ 85000 145
+ 90000 163
+ 95000 124
+ 100000 91
+ 105000 78
+ 110000 67
+ 115000 58
+ 120000 50
+ 125000 44>;
+ };
+};
+
+/*
+ * Starmie does not have EC keyboard. Remove default keyboard controller
+ * and replace it with the driver for side switches.
+ */
+/delete-node/ &keyboard_controller;
+
+&cros_ec {
+ cbas: cbas {
+ compatible = "google,cros-cbas";
+ };
+
+ keyboard-controller {
+ compatible = "google,cros-ec-keyb-switches";
+ };
+};
+
+&dsi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ panel: panel@0 {
+ /* compatible will be set in board dts */
+ reg = <0>;
+ enable-gpios = <&pio 98 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&panel_default_pins>;
+ avdd-supply = <&en_pp6000_mipi_disp>;
+ avee-supply = <&en_pp6000_mipi_disp_150ma>;
+ pp1800-supply = <&mt6366_vio18_reg>;
+ backlight = <&backlight_lcd0>;
+ rotation = <270>;
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+ };
+};
+
+&dsi_out {
+ remote-endpoint = <&panel_in>;
+};
+
+&i2c0 {
+ status = "disabled";
+};
+
+&i2c2 {
+ status = "disabled";
+};
+
+&i2c4 {
+ status = "disabled";
+};
+
+&i2c5 {
+ clock-frequency = <400000>;
+};
+
+&mmc1_pins_default {
+ pins-clk {
+ drive-strength = <8>;
+ };
+
+ pins-cmd-dat {
+ drive-strength = <8>;
+ };
+};
+
+&mmc1_pins_uhs {
+ pins-clk {
+ drive-strength = <8>;
+ };
+
+ pins-cmd-dat {
+ drive-strength = <8>;
+ };
+};
+
+&pen_insert {
+ wakeup-event-action = <EV_ACT_ANY>;
+};
+
+&pio {
+ /* 185 lines */
+ gpio-line-names = "TP",
+ "TP",
+ "TP",
+ "I2S0_HP_DI",
+ "I2S3_DP_SPKR_DO",
+ "SAR_INT_ODL",
+ "BT_WAKE_AP_ODL",
+ "WIFI_INT_ODL",
+ "DPBRDG_INT_ODL",
+ "NC",
+ "EC_AP_HPD_OD",
+ "NC",
+ "TCHSCR_INT_1V8_ODL",
+ "EC_AP_INT_ODL",
+ "EC_IN_RW_ODL",
+ "GSC_AP_INT_ODL",
+ /*
+ * AP_FLASH_WP_L is crossystem ABI. Rev1 schematics
+ * call it AP_WP_ODL.
+ */
+ "AP_FLASH_WP_L",
+ "HP_INT_ODL",
+ "PEN_EJECT_OD",
+ "NC",
+ "NC",
+ "UCAM_SEN_EN",
+ "NC",
+ "NC",
+ "NC",
+ "I2S2_DP_SPK_MCK",
+ "I2S2_DP_SPKR_BCK",
+ "I2S2_DP_SPKR_LRCK",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "UART_GSC_TX_AP_RX",
+ "UART_AP_TX_GSC_RX",
+ "UART_DBGCON_TX_ADSP_RX",
+ "UART_ADSP_TX_DBGCON_RX",
+ "NC",
+ "TCHSCR_REPORT_DISABLE",
+ "NC",
+ "EN_PP1800_DPBRDG",
+ "SPI_AP_CLK_EC",
+ "SPI_AP_CS_EC_L",
+ "SPI_AP_DO_EC_DI",
+ "SPI_AP_DI_EC_DO",
+ "SPI_AP_CLK_GSC",
+ "SPI_AP_CS_GSC_L",
+ "SPI_AP_DO_GSC_DI",
+ "SPI_AP_DI_GSC_DO",
+ "UART_DBGCON_TX_SCP_RX",
+ "UART_SCP_TX_DBGCON_RX",
+ "EN_PP1200_CAM_X",
+ "WLAN_MODULE_RST_L",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "I2S1_HP_DO",
+ "I2S1_HP_BCK",
+ "I2S1_HP_LRCK",
+ "I2S1_HP_MCK",
+ "TCHSCR_RST_1V8_L",
+ "SPI_AP_CLK_ROM",
+ "SPI_AP_CS_ROM_L",
+ "SPI_AP_DO_ROM_DI",
+ "SPI_AP_DI_ROM_DO",
+ "NC",
+ "NC",
+ "EMMC_STRB",
+ "EMMC_CLK",
+ "EMMC_CMD",
+ "EMMC_RST_L",
+ "EMMC_DATA0",
+ "EMMC_DATA1",
+ "EMMC_DATA2",
+ "EMMC_DATA3",
+ "EMMC_DATA4",
+ "EMMC_DATA5",
+ "EMMC_DATA6",
+ "EMMC_DATA7",
+ "AP_KPCOL0",
+ "NC",
+ "NC",
+ "NC",
+ "TP",
+ "SDIO_CLK",
+ "SDIO_CMD",
+ "SDIO_DATA0",
+ "SDIO_DATA1",
+ "SDIO_DATA2",
+ "SDIO_DATA3",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "MIPI_BL_PWM_1V8",
+ "DISP_RST_1V8_L",
+ "MIPI_DPI_CLK",
+ "MIPI_DPI_VSYNC",
+ "MIPI_DPI_HSYNC",
+ "MIPI_DPI_DE",
+ "MIPI_DPI_D0",
+ "MIPI_DPI_D1",
+ "MIPI_DPI_D2",
+ "MIPI_DPI_D3",
+ "MIPI_DPI_D4",
+ "MIPI_DPI_D5",
+ "MIPI_DPI_D6",
+ "MIPI_DPI_DA7",
+ "MIPI_DPI_D8",
+ "MIPI_DPI_D9",
+ "MIPI_DPI_D10",
+ "MIPI_DPI_D11",
+ "PCM_BT_CLK",
+ "PCM_BT_SYNC",
+ "PCM_BT_DI",
+ "PCM_BT_DO",
+ "JTAG_TMS_TP",
+ "JTAG_TCK_TP",
+ "JTAG_TDI_TP",
+ "JTAG_TDO_TP",
+ "JTAG_TRSTN_TP",
+ "NC",
+ "NC",
+ "UCAM_DET_ODL",
+ "NC",
+ "NC",
+ "AP_I2C_TCHSCR_SCL_1V8",
+ "AP_I2C_TCHSCR_SDA_1V8",
+ "NC",
+ "NC",
+ "AP_I2C_DPBRDG_SCL_1V8",
+ "AP_I2C_DPBRDG_SDA_1V8",
+ "NC",
+ "NC",
+ "AP_I2C_AUD_SCL_1V8",
+ "AP_I2C_AUD_SDA_1V8",
+ "AP_I2C_DISP_SCL_1V8",
+ "AP_I2C_DISP_SDA_1V8",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "SCP_I2C_SENSOR_SCL_1V8",
+ "SCP_I2C_SENSOR_SDA_1V8",
+ "AP_EC_WARM_RST_REQ",
+ "AP_XHCI_INIT_DONE",
+ "USB3_HUB_RST_L",
+ "EN_SPKR",
+ "BEEP_ON",
+ "AP_DISP_BKLTEN",
+ "EN_PP6000_MIPI_DISP",
+ "EN_PP6000_MIPI_DISP_150MA",
+ "BT_KILL_1V8_L",
+ "WIFI_KILL_1V8_L",
+ "PWRAP_SPI0_CSN",
+ "PWRAP_SPI0_CK",
+ "PWRAP_SPI0_MO",
+ "PWRAP_SPI0_MI",
+ "SRCLKENA0",
+ "SRCLKENA1",
+ "SCP_VREQ_VAO",
+ "AP_RTC_CLK32K",
+ "AP_PMIC_WDTRST_L",
+ "AUD_CLK_MOSI",
+ "AUD_SYNC_MOSI",
+ "AUD_DAT_MOSI0",
+ "AUD_DAT_MOSI1",
+ "AUD_CLK_MISO",
+ "AUD_SYNC_MISO",
+ "AUD_DAT_MISO0",
+ "AUD_DAT_MISO1",
+ "NC",
+ "NC",
+ "NC",
+ "DPBRDG_RST_L",
+ "LTE_W_DISABLE_L",
+ "LTE_SAR_DETECT_L",
+ "EN_PP3300_LTE_X",
+ "LTE_PWR_OFF_L",
+ "LTE_RESET_L",
+ "TP",
+ "TP";
+
+ dpi_default_pins: dpi-default-pins {
+ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO103__FUNC_GPIO103>,
+ <PINMUX_GPIO104__FUNC_GPIO104>,
+ <PINMUX_GPIO105__FUNC_GPIO105>,
+ <PINMUX_GPIO106__FUNC_GPIO106>,
+ <PINMUX_GPIO107__FUNC_GPIO107>,
+ <PINMUX_GPIO108__FUNC_GPIO108>,
+ <PINMUX_GPIO109__FUNC_GPIO109>,
+ <PINMUX_GPIO110__FUNC_GPIO110>,
+ <PINMUX_GPIO111__FUNC_GPIO111>,
+ <PINMUX_GPIO112__FUNC_GPIO112>,
+ <PINMUX_GPIO113__FUNC_GPIO113>,
+ <PINMUX_GPIO114__FUNC_GPIO114>,
+ <PINMUX_GPIO101__FUNC_GPIO101>,
+ <PINMUX_GPIO100__FUNC_GPIO100>,
+ <PINMUX_GPIO102__FUNC_GPIO102>,
+ <PINMUX_GPIO99__FUNC_GPIO99>;
+ drive-strength = <10>;
+ output-low;
+ };
+ };
+
+ dpi_func_pins: dpi-func-pins {
+ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO103__FUNC_DPI_DATA0>,
+ <PINMUX_GPIO104__FUNC_DPI_DATA1>,
+ <PINMUX_GPIO105__FUNC_DPI_DATA2>,
+ <PINMUX_GPIO106__FUNC_DPI_DATA3>,
+ <PINMUX_GPIO107__FUNC_DPI_DATA4>,
+ <PINMUX_GPIO108__FUNC_DPI_DATA5>,
+ <PINMUX_GPIO109__FUNC_DPI_DATA6>,
+ <PINMUX_GPIO110__FUNC_DPI_DATA7>,
+ <PINMUX_GPIO111__FUNC_DPI_DATA8>,
+ <PINMUX_GPIO112__FUNC_DPI_DATA9>,
+ <PINMUX_GPIO113__FUNC_DPI_DATA10>,
+ <PINMUX_GPIO114__FUNC_DPI_DATA11>,
+ <PINMUX_GPIO101__FUNC_DPI_HSYNC>,
+ <PINMUX_GPIO100__FUNC_DPI_VSYNC>,
+ <PINMUX_GPIO102__FUNC_DPI_DE>,
+ <PINMUX_GPIO99__FUNC_DPI_PCLK>;
+ drive-strength = <10>;
+ };
+ };
+
+ en_pp6000_mipi_disp_150ma_fixed_pins: en_pp6000-mipi-disp-150ma-fixed-pins {
+ pins-en {
+ pinmux = <PINMUX_GPIO154__FUNC_GPIO154>;
+ output-low;
+ };
+ };
+
+ panel_default_pins: panel-default-pins {
+ pins-en {
+ pinmux = <PINMUX_GPIO98__FUNC_GPIO98>;
+ output-low;
+ };
+ };
+};
+
+&usb_c1 {
+ status = "disabled";
+};
+
+&thermal_zones {
+ tboard1-thermal {
+ polling-delay = <1000>; /* milliseconds */
+ polling-delay-passive = <0>; /* milliseconds */
+ thermal-sensors = <&tboard_thermistor1>;
+ };
+
+ tboard2-thermal {
+ polling-delay = <1000>; /* milliseconds */
+ polling-delay-passive = <0>; /* milliseconds */
+ thermal-sensors = <&tboard_thermistor2>;
+ };
+};
+
+&wifi_pwrseq {
+ reset-gpios = <&pio 51 1>;
+};
+
+/*
+ * Battery on Starmie is using a different address than default.
+ * Remove old node to reuse "battery" alias.
+ */
+/delete-node/ &battery;
+&i2c_tunnel {
+ battery: sbs-battery@f {
+ compatible = "sbs,sbs-battery";
+ reg = <0xf>;
+ sbs,i2c-retry-count = <2>;
+ sbs,poll-retry-count = <1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi b/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
index cfcc7909dfe6..cebb134331fb 100644
--- a/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
@@ -424,6 +424,7 @@
ovdd-supply = <&mt6366_vsim2_reg>;
pwr18-supply = <&pp1800_dpbrdg_dx>;
reset-gpios = <&pio 177 GPIO_ACTIVE_LOW>;
+ extcon = <&usbc_extcon>;
ports {
#address-cells = <1>;
@@ -1275,7 +1276,7 @@
interrupts-extended = <&pio 201 IRQ_TYPE_LEVEL_HIGH>;
#interrupt-cells = <2>;
- mt6366codec: codec {
+ mt6366codec: audio-codec {
compatible = "mediatek,mt6366-sound", "mediatek,mt6358-sound";
Avdd-supply = <&mt6366_vaud28_reg>;
mediatek,dmic-mode = <1>; /* one-wire */
@@ -1656,6 +1657,11 @@
try-power-role = "source";
};
};
+
+ usbc_extcon: extcon0 {
+ compatible = "google,extcon-usbc-cros-ec";
+ google,usb-port-id = <0>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8186.dtsi b/arch/arm64/boot/dts/mediatek/mt8186.dtsi
index d3c3c2a40adc..b91f88ffae0e 100644
--- a/arch/arm64/boot/dts/mediatek/mt8186.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8186.dtsi
@@ -1577,6 +1577,8 @@
#address-cells = <2>;
#size-cells = <2>;
ranges;
+ wakeup-source;
+ mediatek,syscon-wakeup = <&pericfg 0x420 2>;
status = "disabled";
usb_host0: usb@11200000 {
@@ -1590,8 +1592,6 @@
<&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_XHCI>;
clock-names = "sys_ck", "ref_ck", "mcu_ck", "dma_ck", "xhci_ck";
interrupts = <GIC_SPI 294 IRQ_TYPE_LEVEL_HIGH 0>;
- mediatek,syscon-wakeup = <&pericfg 0x420 2>;
- wakeup-source;
status = "disabled";
};
};
@@ -1643,6 +1643,8 @@
#address-cells = <2>;
#size-cells = <2>;
ranges;
+ wakeup-source;
+ mediatek,syscon-wakeup = <&pericfg 0x424 2>;
status = "disabled";
usb_host1: usb@11280000 {
@@ -1656,8 +1658,6 @@
<&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_P1_XHCI>;
clock-names = "sys_ck", "ref_ck", "mcu_ck", "dma_ck","xhci_ck";
interrupts = <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH 0>;
- mediatek,syscon-wakeup = <&pericfg 0x424 2>;
- wakeup-source;
status = "disabled";
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku0.dts b/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku0.dts
new file mode 100644
index 000000000000..79d6d12394b9
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku0.dts
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2023 Google LLC
+ */
+/dts-v1/;
+#include "mt8188-geralt-ciri.dtsi"
+
+/ {
+ model = "Google Ciri sku0 board";
+ compatible = "google,ciri-sku0", "google,ciri", "mediatek,mt8188";
+};
+
+&dsi_panel {
+ compatible = "boe,nv110wum-l60", "himax,hx83102";
+};
+
+&sound {
+ compatible = "mediatek,mt8188-rt5682s";
+ model = "mt8188_m98390_5682";
+
+ audio-routing =
+ "ETDM1_OUT", "ETDM_SPK_PIN",
+ "ETDM2_OUT", "ETDM_HP_PIN",
+ "ETDM1_IN", "ETDM_SPK_PIN",
+ "ETDM2_IN", "ETDM_HP_PIN",
+ "ADDA Capture", "MTKAIF_PIN",
+ "Headphone Jack", "HPOL",
+ "Headphone Jack", "HPOR",
+ "IN1P", "Headset Mic",
+ "Left Spk", "Front Left BE_OUT",
+ "Right Spk", "Front Right BE_OUT";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku1.dts b/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku1.dts
new file mode 100644
index 000000000000..ef5ea9d12b1d
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku1.dts
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2023 Google LLC
+ */
+/dts-v1/;
+#include "mt8188-geralt-ciri.dtsi"
+
+/ {
+ model = "Google Ciri sku1 board";
+ compatible = "google,ciri-sku1", "google,ciri", "mediatek,mt8188";
+};
+
+&dsi_panel {
+ compatible = "ivo,t109nw41", "himax,hx83102";
+};
+
+&i2c0 {
+ /delete-node/ audio-codec@1a;
+
+ es8326: audio-codec@19 {
+ compatible = "everest,es8326";
+ reg = <0x19>;
+ interrupts-extended = <&pio 108 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&audio_codec_pins>;
+ #sound-dai-cells = <0>;
+ everest,jack-pol = [0e];
+ everest,interrupt-clk = [00];
+ };
+};
+
+&sound {
+ compatible = "mediatek,mt8188-es8326";
+ model = "mt8188_m98390_8326";
+
+ audio-routing =
+ "ETDM1_OUT", "ETDM_SPK_PIN",
+ "ETDM2_OUT", "ETDM_HP_PIN",
+ "ETDM1_IN", "ETDM_SPK_PIN",
+ "ETDM2_IN", "ETDM_HP_PIN",
+ "ADDA Capture", "MTKAIF_PIN",
+ "Headphone Jack", "HPOL",
+ "Headphone Jack", "HPOR",
+ "MIC1", "Headset Mic",
+ "Left Spk", "Front Left BE_OUT",
+ "Right Spk", "Front Right BE_OUT";
+
+ dai-link-2 {
+ codec {
+ sound-dai = <&es8326>;
+ };
+ };
+
+ dai-link-3 {
+ codec {
+ sound-dai = <&es8326>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku2.dts b/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku2.dts
new file mode 100644
index 000000000000..ef56786fc2be
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku2.dts
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2024 Google LLC
+ */
+/dts-v1/;
+#include "mt8188-geralt-ciri.dtsi"
+
+/ {
+ model = "Google Ciri sku2 board";
+ compatible = "google,ciri-sku2", "google,ciri", "mediatek,mt8188";
+};
+
+&dsi_panel {
+ compatible = "boe,nv110wum-l60", "himax,hx83102";
+};
+
+&i2c0 {
+ /delete-node/ audio-codec@1a;
+
+ es8326: audio-codec@19 {
+ compatible = "everest,es8326";
+ reg = <0x19>;
+ interrupts-extended = <&pio 108 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&audio_codec_pins>;
+ #sound-dai-cells = <0>;
+ everest,jack-pol = [0e];
+ everest,interrupt-clk = [00];
+ };
+};
+
+&sound {
+ compatible = "mediatek,mt8188-es8326";
+ model = "mt8188_m98390_8326";
+
+ audio-routing =
+ "ETDM1_OUT", "ETDM_SPK_PIN",
+ "ETDM2_OUT", "ETDM_HP_PIN",
+ "ETDM1_IN", "ETDM_SPK_PIN",
+ "ETDM2_IN", "ETDM_HP_PIN",
+ "ADDA Capture", "MTKAIF_PIN",
+ "Headphone Jack", "HPOL",
+ "Headphone Jack", "HPOR",
+ "MIC1", "Headset Mic",
+ "Left Spk", "Front Left BE_OUT",
+ "Right Spk", "Front Right BE_OUT";
+
+ dai-link-2 {
+ codec {
+ sound-dai = <&es8326>;
+ };
+ };
+
+ dai-link-3 {
+ codec {
+ sound-dai = <&es8326>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku3.dts b/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku3.dts
new file mode 100644
index 000000000000..524f7f0064c1
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku3.dts
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2024 Google LLC
+ */
+/dts-v1/;
+#include "mt8188-geralt-ciri.dtsi"
+
+/ {
+ model = "Google Ciri sku3 board";
+ compatible = "google,ciri-sku3", "google,ciri", "mediatek,mt8188";
+};
+
+&dsi_panel {
+ compatible = "ivo,t109nw41", "himax,hx83102";
+};
+
+&sound {
+ compatible = "mediatek,mt8188-rt5682s";
+ model = "mt8188_m98390_5682";
+
+ audio-routing =
+ "ETDM1_OUT", "ETDM_SPK_PIN",
+ "ETDM2_OUT", "ETDM_HP_PIN",
+ "ETDM1_IN", "ETDM_SPK_PIN",
+ "ETDM2_IN", "ETDM_HP_PIN",
+ "ADDA Capture", "MTKAIF_PIN",
+ "Headphone Jack", "HPOL",
+ "Headphone Jack", "HPOR",
+ "IN1P", "Headset Mic",
+ "Left Spk", "Front Left BE_OUT",
+ "Right Spk", "Front Right BE_OUT";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku4.dts b/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku4.dts
new file mode 100644
index 000000000000..ea953d7e1543
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku4.dts
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2024 Google LLC
+ */
+/dts-v1/;
+#include "mt8188-geralt-ciri.dtsi"
+
+/ {
+ model = "Google Ciri sku4 board (rev4)";
+ compatible = "google,ciri-sku4", "google,ciri", "mediatek,mt8188";
+};
+
+&dsi_panel {
+ compatible = "boe,nv110wum-l60", "himax,hx83102";
+};
+
+&i2c0 {
+ /delete-node/ amplifier@38;
+ /delete-node/ amplifier@39;
+
+ tas2563: amplifier@4f {
+ compatible = "ti,tas2563", "ti,tas2781";
+ reg = <0x4f>, <0x4c>; /* left / right channel */
+ reset-gpios = <&pio 118 GPIO_ACTIVE_HIGH>;
+ #sound-dai-cells = <0>;
+ };
+};
+
+&sound {
+ compatible = "mediatek,mt8188-rt5682s";
+ model = "mt8188_tas2563_5682";
+
+ audio-routing =
+ "ETDM1_OUT", "ETDM_SPK_PIN",
+ "ETDM2_OUT", "ETDM_HP_PIN",
+ "ETDM1_IN", "ETDM_SPK_PIN",
+ "ETDM2_IN", "ETDM_HP_PIN",
+ "ADDA Capture", "MTKAIF_PIN",
+ "Headphone Jack", "HPOL",
+ "Headphone Jack", "HPOR",
+ "IN1P", "Headset Mic";
+
+ dai-link-1 {
+ codec {
+ sound-dai = <&tas2563>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku5.dts b/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku5.dts
new file mode 100644
index 000000000000..bf87201ccf27
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku5.dts
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2024 Google LLC
+ */
+/dts-v1/;
+#include "mt8188-geralt-ciri.dtsi"
+
+/ {
+ model = "Google Ciri sku5 board (rev4)";
+ compatible = "google,ciri-sku5", "google,ciri", "mediatek,mt8188";
+};
+
+&dsi_panel {
+ compatible = "ivo,t109nw41", "himax,hx83102";
+};
+
+&i2c0 {
+ /delete-node/ audio-codec@1a;
+ /delete-node/ amplifier@38;
+ /delete-node/ amplifier@39;
+
+ es8326: audio-codec@19 {
+ compatible = "everest,es8326";
+ reg = <0x19>;
+ interrupts-extended = <&pio 108 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&audio_codec_pins>;
+ #sound-dai-cells = <0>;
+ everest,jack-pol = [0e];
+ everest,interrupt-clk = [00];
+ };
+
+ tas2563: amplifier@4f {
+ compatible = "ti,tas2563", "ti,tas2781";
+ reg = <0x4f>, <0x4c>; /* left / right channel */
+ reset-gpios = <&pio 118 GPIO_ACTIVE_HIGH>;
+ #sound-dai-cells = <0>;
+ };
+};
+
+&sound {
+ compatible = "mediatek,mt8188-es8326";
+ model = "mt8188_tas2563_8326";
+
+ audio-routing =
+ "ETDM1_OUT", "ETDM_SPK_PIN",
+ "ETDM2_OUT", "ETDM_HP_PIN",
+ "ETDM1_IN", "ETDM_SPK_PIN",
+ "ETDM2_IN", "ETDM_HP_PIN",
+ "ADDA Capture", "MTKAIF_PIN",
+ "Headphone Jack", "HPOL",
+ "Headphone Jack", "HPOR",
+ "MIC1", "Headset Mic";
+
+ dai-link-1 {
+ codec {
+ sound-dai = <&tas2563>;
+ };
+ };
+
+ dai-link-2 {
+ codec {
+ sound-dai = <&es8326>;
+ };
+ };
+
+ dai-link-3 {
+ codec {
+ sound-dai = <&es8326>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku6.dts b/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku6.dts
new file mode 100644
index 000000000000..17d7359dfb6a
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku6.dts
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2024 Google LLC
+ */
+/dts-v1/;
+#include "mt8188-geralt-ciri.dtsi"
+
+/ {
+ model = "Google Ciri sku6 board (rev4)";
+ compatible = "google,ciri-sku6", "google,ciri", "mediatek,mt8188";
+};
+
+&dsi_panel {
+ compatible = "boe,nv110wum-l60", "himax,hx83102";
+};
+
+&i2c0 {
+ /delete-node/ audio-codec@1a;
+ /delete-node/ amplifier@38;
+ /delete-node/ amplifier@39;
+
+ es8326: audio-codec@19 {
+ compatible = "everest,es8326";
+ reg = <0x19>;
+ interrupts-extended = <&pio 108 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&audio_codec_pins>;
+ #sound-dai-cells = <0>;
+ everest,jack-pol = [0e];
+ everest,interrupt-clk = [00];
+ };
+
+ tas2563: amplifier@4f {
+ compatible = "ti,tas2563", "ti,tas2781";
+ reg = <0x4f>, <0x4c>; /* left / right channel */
+ reset-gpios = <&pio 118 GPIO_ACTIVE_HIGH>;
+ #sound-dai-cells = <0>;
+ };
+};
+
+&sound {
+ compatible = "mediatek,mt8188-es8326";
+ model = "mt8188_tas2563_8326";
+
+ audio-routing =
+ "ETDM1_OUT", "ETDM_SPK_PIN",
+ "ETDM2_OUT", "ETDM_HP_PIN",
+ "ETDM1_IN", "ETDM_SPK_PIN",
+ "ETDM2_IN", "ETDM_HP_PIN",
+ "ADDA Capture", "MTKAIF_PIN",
+ "Headphone Jack", "HPOL",
+ "Headphone Jack", "HPOR",
+ "MIC1", "Headset Mic";
+
+ dai-link-1 {
+ codec {
+ sound-dai = <&tas2563>;
+ };
+ };
+
+ dai-link-2 {
+ codec {
+ sound-dai = <&es8326>;
+ };
+ };
+
+ dai-link-3 {
+ codec {
+ sound-dai = <&es8326>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku7.dts b/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku7.dts
new file mode 100644
index 000000000000..825015b452d5
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri-sku7.dts
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2024 Google LLC
+ */
+/dts-v1/;
+#include "mt8188-geralt-ciri.dtsi"
+
+/ {
+ model = "Google Ciri sku7 board (rev4)";
+ compatible = "google,ciri-sku7", "google,ciri", "mediatek,mt8188";
+};
+
+&dsi_panel {
+ compatible = "ivo,t109nw41", "himax,hx83102";
+};
+
+&i2c0 {
+ /delete-node/ amplifier@38;
+ /delete-node/ amplifier@39;
+
+ tas2563: amplifier@4f {
+ compatible = "ti,tas2563", "ti,tas2781";
+ reg = <0x4f>, <0x4c>; /* left / right channel */
+ reset-gpios = <&pio 118 GPIO_ACTIVE_HIGH>;
+ #sound-dai-cells = <0>;
+ };
+};
+
+&sound {
+ compatible = "mediatek,mt8188-rt5682s";
+ model = "mt8188_tas2563_5682";
+
+ audio-routing =
+ "ETDM1_OUT", "ETDM_SPK_PIN",
+ "ETDM2_OUT", "ETDM_HP_PIN",
+ "ETDM1_IN", "ETDM_SPK_PIN",
+ "ETDM2_IN", "ETDM_HP_PIN",
+ "ADDA Capture", "MTKAIF_PIN",
+ "Headphone Jack", "HPOL",
+ "Headphone Jack", "HPOR",
+ "IN1P", "Headset Mic";
+
+ dai-link-1 {
+ codec {
+ sound-dai = <&tas2563>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri.dtsi b/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri.dtsi
new file mode 100644
index 000000000000..6815c435a57e
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8188-geralt-ciri.dtsi
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2023 Google LLC
+ */
+/dts-v1/;
+#include "mt8188-geralt.dtsi"
+
+&aud_etdm_hp_on {
+ pins-mclk {
+ pinmux = <PINMUX_GPIO114__FUNC_O_I2SO2_MCK>;
+ };
+};
+
+&aud_etdm_hp_off {
+ pins-mclk {
+ pinmux = <PINMUX_GPIO114__FUNC_B_GPIO114>;
+ bias-pull-down;
+ input-enable;
+ };
+};
+
+&i2c0 {
+ rt5682s: audio-codec@1a {
+ compatible = "realtek,rt5682s";
+ reg = <0x1a>;
+ interrupts-extended = <&pio 108 IRQ_TYPE_EDGE_BOTH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&audio_codec_pins>;
+ #sound-dai-cells = <1>;
+
+ AVDD-supply = <&mt6359_vio18_ldo_reg>;
+ DBVDD-supply = <&mt6359_vio18_ldo_reg>;
+ LDO1-IN-supply = <&mt6359_vio18_ldo_reg>;
+ MICVDD-supply = <&pp3300_s3>;
+ realtek,jd-src = <1>;
+ };
+
+ max98390_38: amplifier@38 {
+ compatible = "maxim,max98390";
+ reg = <0x38>;
+ sound-name-prefix = "Front Right";
+ reset-gpios = <&pio 118 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&speaker_en>;
+ #sound-dai-cells = <0>;
+ };
+
+ max98390_39: amplifier@39 {
+ compatible = "maxim,max98390";
+ reg = <0x39>;
+ sound-name-prefix = "Front Left";
+ #sound-dai-cells = <0>;
+ };
+};
+
+&i2c_tunnel {
+ /*
+ * The virtual battery I2C addr is 0xf on Ciri, so we describe it
+ * manually instead of including 'arm/cros-ec-sbs.dtsi'.
+ **/
+ battery: sbs-battery@f {
+ compatible = "sbs,sbs-battery";
+ reg = <0xf>;
+ sbs,i2c-retry-count = <2>;
+ sbs,poll-retry-count = <1>;
+ };
+};
+
+&mipi_tx_config0 {
+ drive-strength-microamp = <5200>;
+};
+
+&mt6359_vm18_ldo_reg {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1900000>;
+ regulator-microvolt-offset = <100000>;
+};
+
+&sound {
+ dai-link-0 {
+ link-name = "ETDM1_IN_BE";
+ dai-format = "i2s";
+ mediatek,clk-provider = "cpu";
+ };
+
+ dai-link-1 {
+ link-name = "ETDM1_OUT_BE";
+ dai-format = "i2s";
+ mediatek,clk-provider = "cpu";
+
+ codec {
+ sound-dai = <&max98390_38>,
+ <&max98390_39>;
+ };
+ };
+
+ dai-link-2 {
+ link-name = "ETDM2_IN_BE";
+ mediatek,clk-provider = "cpu";
+
+ codec {
+ sound-dai = <&rt5682s 0>;
+ };
+ };
+
+ dai-link-3 {
+ link-name = "ETDM2_OUT_BE";
+ mediatek,clk-provider = "cpu";
+
+ codec {
+ sound-dai = <&rt5682s 0>;
+ };
+ };
+
+ dai-link-4 {
+ link-name = "DPTX_BE";
+
+ codec {
+ sound-dai = <&dp_tx>;
+ };
+ };
+};
+
+&pio {
+ gpio-line-names =
+ "GSC_AP_INT_ODL",
+ "AP_DISP_BKLTEN",
+ "",
+ "EN_PPVAR_MIPI_DISP",
+ "EN_PPVAR_MIPI_DISP_150MA",
+ "TCHSCR_RST_1V8_L",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "I2S_SPKR_DATAOUT",
+ "EN_PP3300_WLAN_X",
+ "WIFI_KILL_1V8_L",
+ "BT_KILL_1V8_L",
+ "AP_FLASH_WP_L", /* ... is crossystem ABI. Rev1 schematics call it AP_WP_ODL. */
+ "",
+ "",
+ "WCAM_PWDN_L",
+ "WCAM_RST_L",
+ "UCAM_PWDM_L",
+ "UCAM_RST_L",
+ "WCAM_24M_CLK",
+ "UCAM_24M_CLK",
+ "MT6319_INT",
+ "DISP_RST_1V8_L",
+ "DSIO_DSI_TE",
+ "",
+ "TP",
+ "MIPI_BL_PWM_1V8",
+ "",
+ "UART_AP_TX_GSC_RX",
+ "UART_GSC_TX_AP_RX",
+ "UART_SSPM_TX_DBGCON_RX",
+ "UART_DBGCON_TX_SSPM_RX",
+ "UART_ADSP_TX_DBGCON_RX",
+ "UART_DBGCON_TX_ADSP_RX",
+ "JTAG_AP_TMS",
+ "JTAG_AP_TCK",
+ "JTAG_AP_TDI",
+ "JTAG_AP_TDO",
+ "JTAG_AP_TRST",
+ "AP_KPCOL0",
+ "TP",
+ "",
+ "TP",
+ "EC_AP_HPD_OD",
+ "PCIE_WAKE_1V8_ODL",
+ "PCIE_RST_1V8_L",
+ "PCIE_CLKREQ_1V8_ODL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "AP_I2C_AUD_SCL_1V8",
+ "AP_I2C_AUD_SDA_1V8",
+ "AP_I2C_TPM_SCL_1V8",
+ "AP_I2C_TPM_SDA_1V8",
+ "AP_I2C_TCHSCR_SCL_1V8",
+ "AP_I2C_TCHSCR_SDA_1V8",
+ "AP_I2C_PMIC_SAR_SCL_1V8",
+ "AP_I2C_PMIC_SAR_SDA_1V8",
+ "AP_I2C_EC_HID_KB_SCL_1V8",
+ "AP_I2C_EC_HID_KB_SDA_1V8",
+ "AP_I2C_UCAM_SCL_1V8",
+ "AP_I2C_UCAM_SDA_1V8",
+ "AP_I2C_WCAM_SCL_1V8",
+ "AP_I2C_WCAM_SDA_1V8",
+ "SPI_AP_CS_EC_L",
+ "SPI_AP_CLK_EC",
+ "SPI_AP_DO_EC_DI",
+ "SPI_AP_DI_EC_DO",
+ "TP",
+ "TP",
+ "SPI_AP_CS_TCHSCR_L",
+ "SPI_AP_CLK_TCHSCR",
+ "SPI_AP_DO_TCHSCR_DI",
+ "SPI_AP_DI_TCHSCR_DO",
+ "TP",
+ "TP",
+ "TP",
+ "TP",
+ "",
+ "",
+ "",
+ "TP",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "PWRAP_SPI_CS_L",
+ "PWRAP_SPI_CK",
+ "PWRAP_SPI_MOSI",
+ "PWRAP_SPI_MISO",
+ "SRCLKENA0",
+ "SRCLKENA1",
+ "SCP_VREQ_VAO",
+ "AP_RTC_CLK32K",
+ "AP_PMIC_WDTRST_L",
+ "AUD_CLK_MOSI",
+ "AUD_SYNC_MOSI",
+ "AUD_DAT_MOSI0",
+ "AUD_DAT_MOSI1",
+ "AUD_DAT_MISO0",
+ "AUD_DAT_MISO1",
+ "",
+ "HP_INT_ODL",
+ "SPKR_INT_ODL",
+ "I2S_HP_DATAIN",
+ "EN_SPKR",
+ "I2S_SPKR_MCLK",
+ "I2S_SPKR_BCLK",
+ "I2S_HP_MCLK",
+ "I2S_HP_BCLK",
+ "I2S_HP_LRCK",
+ "I2S_HP_DATAOUT",
+ "RST_SPKR_L",
+ "I2S_SPKR_LRCK",
+ "I2S_SPKR_DATAIN",
+ "",
+ "",
+ "",
+ "",
+ "SPI_AP_CLK_ROM",
+ "SPI_AP_CS_ROM_L",
+ "SPI_AP_DO_ROM_DI",
+ "SPI_AP_DI_ROM_DO",
+ "TP",
+ "TP",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "EN_PP2800A_UCAM_X",
+ "EN_PP1200_UCAM_X",
+ "EN_PP2800A_WCAM_X",
+ "EN_PP1100_WCAM_X",
+ "TCHSCR_INT_1V8_L",
+ "",
+ "MT7921_PMU_EN_1V8",
+ "",
+ "AP_EC_WARM_RST_REQ",
+ "EC_AP_HID_INT_ODL",
+ "EC_AP_INT_ODL",
+ "AP_XHCI_INIT_DONE",
+ "EMMC_DAT7",
+ "EMMC_DAT6",
+ "EMMC_DAT5",
+ "EMMC_DAT4",
+ "EMMC_RST_L",
+ "EMMC_CMD",
+ "EMMC_CLK",
+ "EMMC_DAT3",
+ "EMMC_DAT2",
+ "EMMC_DAT1",
+ "EMMC_DAT0",
+ "EMMC_DSL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "USB3_HUB_RST_L",
+ "EC_AP_RSVD0_ODL",
+ "",
+ "",
+ "SPMI_SCL",
+ "SPMI_SDA";
+
+ audio_codec_pins: audio-codec-pins {
+ pins-hp-int-odl {
+ pinmux = <PINMUX_GPIO108__FUNC_B_GPIO108>;
+ input-enable;
+ };
+ };
+
+ speaker_en: speaker-en-pins {
+ pins-en-spkr {
+ pinmux = <PINMUX_GPIO111__FUNC_B_GPIO111>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8188-geralt.dtsi b/arch/arm64/boot/dts/mediatek/mt8188-geralt.dtsi
new file mode 100644
index 000000000000..b6abecbcfa81
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8188-geralt.dtsi
@@ -0,0 +1,1156 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
+#include "mt8188.dtsi"
+#include "mt6359.dtsi"
+
+/ {
+ aliases {
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ i2c3 = &i2c3;
+ i2c4 = &i2c4;
+ i2c5 = &i2c5;
+ i2c6 = &i2c6;
+ mmc0 = &mmc0;
+ serial0 = &uart0;
+ };
+
+ backlight_lcd0: backlight-lcd0 {
+ compatible = "pwm-backlight";
+ brightness-levels = <0 1023>;
+ default-brightness-level = <576>;
+ enable-gpios = <&pio 1 GPIO_ACTIVE_HIGH>;
+ num-interpolated-steps = <1023>;
+ power-supply = <&ppvar_sys>;
+ pwms = <&disp_pwm0 0 500000>;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ dmic-codec {
+ compatible = "dmic-codec";
+ num-channels = <2>;
+ wakeup-delay-ms = <100>;
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ /* The size will be filled in by the bootloader */
+ reg = <0 0x40000000 0 0>;
+ };
+
+ /* system wide LDO 1.8V power rail */
+ pp1800_ldo_z1: regulator-pp1800-ldo-z1 {
+ compatible = "regulator-fixed";
+ regulator-name = "pp1800_ldo_z1";
+ /* controlled by PP3300_Z1 */
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&pp3300_z1>;
+ };
+
+ /* separately switched 3.3V power rail */
+ pp3300_s3: regulator-pp3300-s3 {
+ compatible = "regulator-fixed";
+ regulator-name = "pp3300_s3";
+ /* controlled by PMIC */
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&pp3300_z1>;
+ };
+
+ /* system wide 3.3V power rail */
+ pp3300_z1: regulator-pp3300-z1 {
+ compatible = "regulator-fixed";
+ regulator-name = "pp3300_z1";
+ /* controlled by PP3300_LDO_Z5 & EN_PWR_Z1 */
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&ppvar_sys>;
+ };
+
+ pp3300_wlan: regulator-pp3300-wlan {
+ compatible = "regulator-fixed";
+ regulator-name = "pp3300_wlan";
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ enable-active-high;
+ gpio = <&pio 12 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&wlan_en>;
+ pinctrl-names = "default";
+ vin-supply = <&pp3300_z1>;
+ };
+
+ /* system wide 4.2V power rail */
+ pp4200_s5: regulator-pp4200-s5 {
+ compatible = "regulator-fixed";
+ regulator-name = "pp4200_s5";
+ /* controlled by EC */
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <4200000>;
+ regulator-max-microvolt = <4200000>;
+ vin-supply = <&ppvar_sys>;
+ };
+
+ /* system wide 5.0V power rail */
+ pp5000_z1: regulator-pp5000-z1 {
+ compatible = "regulator-fixed";
+ regulator-name = "pp5000_z1";
+ /* controlled by EC */
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&ppvar_sys>;
+ };
+
+ pp5000_usb_vbus: regulator-pp5000-usb-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "pp5000_usb_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ gpio = <&pio 150 GPIO_ACTIVE_HIGH>;
+ vin-supply = <&pp5000_z1>;
+ };
+
+ /* system wide semi-regulated power rail from battery or USB */
+ ppvar_sys: regulator-ppvar-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "ppvar_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ ppvar_mipi_disp_avdd: regulator-ppvar-mipi-disp-avdd {
+ compatible = "regulator-fixed";
+ regulator-name = "ppvar_mipi_disp_avdd";
+ enable-active-high;
+ gpio = <&pio 3 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mipi_disp_avdd_en>;
+ vin-supply = <&pp5000_z1>;
+ };
+
+ ppvar_mipi_disp_avee: regulator-ppvar-mipi-disp-avee {
+ compatible = "regulator-fixed";
+ regulator-name = "ppvar_mipi_disp_avee";
+ regulator-enable-ramp-delay = <10000>;
+ enable-active-high;
+ gpio = <&pio 4 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mipi_disp_avee_en>;
+ vin-supply = <&pp5000_z1>;
+ };
+
+ reserved_memory: reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ apu_mem: memory@55000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x55000000 0 0x1400000>;
+ };
+
+ adsp_mem: memory@60000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x60000000 0 0xf00000>;
+ no-map;
+ };
+
+ afe_dma_mem: memory@60f00000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x60f00000 0 0x100000>;
+ no-map;
+ };
+
+ adsp_dma_mem: memory@61000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x61000000 0 0x100000>;
+ no-map;
+ };
+ };
+};
+
+&adsp {
+ memory-region = <&adsp_dma_mem>, <&adsp_mem>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&adsp_uart_pins>;
+ status = "okay";
+};
+
+&afe {
+ memory-region = <&afe_dma_mem>;
+ mediatek,etdm-out1-cowork-source = <0>; /* in1 */
+ mediatek,etdm-in2-cowork-source = <3>; /* out2 */
+ status = "okay";
+};
+
+&auxadc {
+ status = "okay";
+};
+
+&cam_vcore {
+ domain-supply = <&mt6359_vproc1_buck_reg>;
+};
+
+/*
+ * Geralt is the reference design and doesn't have target TDP.
+ * Ciri is (currently) the only device following Geralt, and its
+ * TDP target is 90 degrees.
+ **/
+&cpu_little0_alert0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+};
+
+&cpu_little1_alert0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+};
+
+&cpu_little2_alert0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+};
+
+&cpu_little3_alert0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+};
+
+&cpu_big0_alert0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+};
+
+&cpu_big1_alert0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+};
+
+&disp_dsi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ dsi_panel: panel@0 {
+ /* Compatible string for different panels can be found in each device dts */
+ reg = <0>;
+ enable-gpios = <&pio 25 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mipi_dsi_pins>;
+
+ backlight = <&backlight_lcd0>;
+ avdd-supply = <&ppvar_mipi_disp_avdd>;
+ avee-supply = <&ppvar_mipi_disp_avee>;
+ pp1800-supply = <&mt6359_vm18_ldo_reg>;
+ rotation = <270>;
+
+ status = "okay";
+
+ port {
+ dsi_panel_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+ };
+
+ port {
+ dsi_out: endpoint {
+ remote-endpoint = <&dsi_panel_in>;
+ };
+ };
+};
+
+&disp_pwm0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&disp_pwm0_pins>;
+ status = "okay";
+};
+
+&disp_pwm1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&disp_pwm1_pins>;
+};
+
+&dp_intf1 {
+ status = "okay";
+
+ port {
+ dp_intf1_out: endpoint {
+ remote-endpoint = <&dptx_in>;
+ };
+ };
+};
+
+&dp_tx {
+ pinctrl-names = "default";
+ pinctrl-0 = <&dp_tx_hpd>;
+ #sound-dai-cells = <0>;
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dptx_in: endpoint {
+ remote-endpoint = <&dp_intf1_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dptx_out: endpoint {
+ data-lanes = <0 1 2 3>;
+ };
+ };
+ };
+};
+
+&gpu {
+ mali-supply = <&mt6359_vproc2_buck_reg>;
+ status = "okay";
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins>;
+ clock-frequency = <400000>;
+ status = "okay";
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins>;
+ clock-frequency = <400000>;
+ status = "okay";
+
+ tpm@50 {
+ compatible = "google,cr50";
+ reg = <0x50>;
+ interrupts-extended = <&pio 0 IRQ_TYPE_EDGE_RISING>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gsc_int>;
+ };
+};
+
+&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins>;
+ clock-frequency = <400000>;
+ status = "okay";
+};
+
+&i2c3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_pins>;
+ clock-frequency = <400000>;
+ status = "okay";
+};
+
+&i2c4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4_pins>;
+ clock-frequency = <400000>;
+ status = "okay";
+};
+
+&i2c5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c5_pins>;
+ clock-frequency = <400000>;
+ status = "okay";
+};
+
+&i2c6 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c6_pins>;
+ clock-frequency = <400000>;
+ status = "okay";
+};
+
+&mfg0 {
+ domain-supply = <&mt6359_vproc2_buck_reg>;
+};
+
+&mfg1 {
+ domain-supply = <&mt6359_vsram_others_ldo_reg>;
+};
+
+&mipi_tx_config0 {
+ status = "okay";
+};
+
+&mmc0 {
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ cap-mmc-hw-reset;
+ hs400-ds-delay = <0x1481b>;
+ max-frequency = <200000000>;
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+ no-sd;
+ no-sdio;
+ non-removable;
+ pinctrl-names = "default", "state_uhs";
+ pinctrl-0 = <&mmc0_pins_default>;
+ pinctrl-1 = <&mmc0_pins_uhs>;
+ supports-cqe;
+ vmmc-supply = <&mt6359_vemc_1_ldo_reg>;
+ vqmmc-supply = <&mt6359_vufs_ldo_reg>;
+ status = "okay";
+};
+
+&mt6359codec {
+ mediatek,dmic-mode = <1>; /* one-wire */
+ mediatek,mic-type-0 = <2>; /* DMIC */
+ mediatek,mic-type-2 = <2>; /* DMIC */
+};
+
+&mt6359_vcore_buck_reg {
+ regulator-always-on;
+};
+
+&mt6359_vgpu11_buck_reg {
+ regulator-always-on;
+};
+
+&mt6359_vgpu11_sshub_buck_reg {
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <550000>;
+ regulator-always-on;
+};
+
+&mt6359_vio28_ldo_reg {
+ /delete-property/ regulator-always-on;
+};
+
+&mt6359_vm18_ldo_reg {
+ /delete-property/ regulator-always-on;
+};
+
+&mt6359_vmodem_buck_reg {
+ regulator-min-microvolt = <775000>;
+ regulator-max-microvolt = <775000>;
+};
+
+&mt6359_vpa_buck_reg {
+ regulator-max-microvolt = <3100000>;
+};
+
+&mt6359_vproc2_buck_reg {
+ /*
+ * Called "ppvar_dvdd_gpu" in the schematic. Renamed to
+ * "ppvar_dvdd_vgpu" here to match mtk-regulator-coupler requirements.
+ */
+ regulator-name = "ppvar_dvdd_vgpu";
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <800000>;
+ regulator-coupled-with = <&mt6359_vsram_others_ldo_reg>;
+ regulator-coupled-max-spread = <6250>;
+};
+
+&mt6359_vpu_buck_reg {
+ regulator-always-on;
+};
+
+&mt6359_vrf12_ldo_reg {
+ regulator-always-on;
+};
+
+&mt6359_vsram_md_ldo_reg {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+};
+
+&mt6359_vsram_others_ldo_reg {
+ regulator-name = "pp0850_dvdd_sram_gpu";
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <800000>;
+ regulator-coupled-with = <&mt6359_vproc2_buck_reg>;
+ regulator-coupled-max-spread = <6250>;
+};
+
+&mt6359_vufs_ldo_reg {
+ regulator-always-on;
+};
+
+&nor_flash {
+ pinctrl-names = "default";
+ pinctrl-0 = <&nor_pins>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <52000000>;
+ };
+};
+
+&pcie {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_pins>;
+ status = "okay";
+};
+
+&pciephy {
+ status = "okay";
+};
+
+&pio {
+ gpio-line-names =
+ "gsc_int",
+ "AP_DISP_BKLTEN",
+ "",
+ "EN_PPVAR_MIPI_DISP",
+ "EN_PPVAR_MIPI_DISP_150MA",
+ "TCHSCR_RST_1V8_L",
+ "TCHSRC_REPORT_DISABLE",
+ "",
+ "",
+ "",
+ "",
+ "I2S_SPKR_DATAOUT",
+ "EN_PP3300_WLAN_X",
+ "WIFI_KILL_1V8_L",
+ "BT_KILL_1V8_L",
+ "AP_FLASH_WP_L", /* ... is crossystem ABI. Rev1 schematics call it AP_WP_ODL. */
+ "",
+ "EDP_HPD_1V8",
+ "WCAM_PWDN_L",
+ "WCAM_RST_L",
+ "UCAM_PWDM_L",
+ "UCAM_RST_L",
+ "WCAM_24M_CLK",
+ "UCAM_24M_CLK",
+ "MT6319_INT",
+ "DISP_RST_1V8_L",
+ "DSIO_DSI_TE",
+ "EN_PP3300_EDP_DISP_X",
+ "TP",
+ "MIPI_BL_PWM_1V8",
+ "EDP_BL_PWM_1V8",
+ "UART_AP_TX_GSC_RX",
+ "UART_GSC_TX_AP_RX",
+ "UART_SSPM_TX_DBGCON_RX",
+ "UART_DBGCON_TX_SSPM_RX",
+ "UART_ADSP_TX_DBGCON_RX",
+ "UART_DBGCON_TX_ADSP_RX",
+ "JTAG_AP_TMS",
+ "JTAG_AP_TCK",
+ "JTAG_AP_TDI",
+ "JTAG_AP_TDO",
+ "JTAG_AP_TRST",
+ "AP_KPCOL0",
+ "TP",
+ "BEEP_ON_OD",
+ "TP",
+ "EC_AP_HPD_OD",
+ "PCIE_WAKE_1V8_ODL",
+ "PCIE_RST_1V8_L",
+ "PCIE_CLKREQ_1V8_ODL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "AP_I2C_AUD_SCL_1V8",
+ "AP_I2C_AUD_SDA_1V8",
+ "AP_I2C_TPM_SCL_1V8",
+ "AP_I2C_TPM_SDA_1V8",
+ "AP_I2C_TCHSCR_SCL_1V8",
+ "AP_I2C_TCHSCR_SDA_1V8",
+ "AP_I2C_PMIC_SAR_SCL_1V8",
+ "AP_I2C_PMIC_SAR_SDA_1V8",
+ "AP_I2C_EC_HID_KB_SCL_1V8",
+ "AP_I2C_EC_HID_KB_SDA_1V8",
+ "AP_I2C_UCAM_SCL_1V8",
+ "AP_I2C_UCAM_SDA_1V8",
+ "AP_I2C_WCAM_SCL_1V8",
+ "AP_I2C_WCAM_SDA_1V8",
+ "SPI_AP_CS_EC_L",
+ "SPI_AP_CLK_EC",
+ "SPI_AP_DO_EC_DI",
+ "SPI_AP_DI_EC_DO",
+ "TP",
+ "TP",
+ "SPI_AP_CS_TCHSCR_L",
+ "SPI_AP_CLK_TCHSCR",
+ "SPI_AP_DO_TCHSCR_DI",
+ "SPI_AP_DI_TCHSCR_DO",
+ "TP",
+ "TP",
+ "TP",
+ "TP",
+ "",
+ "",
+ "",
+ "TP",
+ "",
+ "SAR_INT_ODL",
+ "",
+ "",
+ "",
+ "PWRAP_SPI_CS_L",
+ "PWRAP_SPI_CK",
+ "PWRAP_SPI_MOSI",
+ "PWRAP_SPI_MISO",
+ "SRCLKENA0",
+ "SRCLKENA1",
+ "SCP_VREQ_VAO",
+ "AP_RTC_CLK32K",
+ "AP_PMIC_WDTRST_L",
+ "AUD_CLK_MOSI",
+ "AUD_SYNC_MOSI",
+ "AUD_DAT_MOSI0",
+ "AUD_DAT_MOSI1",
+ "AUD_DAT_MISO0",
+ "AUD_DAT_MISO1",
+ "SD_CD_ODL",
+ "HP_INT_ODL",
+ "SPKR_INT_ODL",
+ "I2S_HP_DATAIN",
+ "EN_SPKR",
+ "I2S_SPKR_MCLK",
+ "I2S_SPKR_BCLK",
+ "I2S_HP_MCLK",
+ "I2S_HP_BCLK",
+ "I2S_HP_LRCK",
+ "I2S_HP_DATAOUT",
+ "RST_SPKR_L",
+ "I2S_SPKR_LRCK",
+ "I2S_SPKR_DATAIN",
+ "",
+ "",
+ "",
+ "",
+ "SPI_AP_CLK_ROM",
+ "SPI_AP_CS_ROM_L",
+ "SPI_AP_DO_ROM_DI",
+ "SPI_AP_DI_ROM_DO",
+ "TP",
+ "TP",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "EN_PP2800A_UCAM_X",
+ "EN_PP1200_UCAM_X",
+ "EN_PP2800A_WCAM_X",
+ "EN_PP1100_WCAM_X",
+ "TCHSCR_INT_1V8_L",
+ "EN_PP3300_MIPI_TCHSCR_X",
+ "MT7921_PMU_EN_1V8",
+ "EN_PP3300_EDP_TCHSCR_X",
+ "AP_EC_WARM_RST_REQ",
+ "EC_AP_HID_INT_ODL",
+ "EC_AP_INT_ODL",
+ "AP_XHCI_INIT_DONE",
+ "EMMC_DAT7",
+ "EMMC_DAT6",
+ "EMMC_DAT5",
+ "EMMC_DAT4",
+ "EMMC_RST_L",
+ "EMMC_CMD",
+ "EMMC_CLK",
+ "EMMC_DAT3",
+ "EMMC_DAT2",
+ "EMMC_DAT1",
+ "EMMC_DAT0",
+ "EMMC_DSL",
+ "SD_CMD",
+ "SD_CLK",
+ "SD_DAT0",
+ "SD_DAT1",
+ "SD_DAT2",
+ "SD_DAT3",
+ "",
+ "",
+ "USB3_HUB_RST_L",
+ "EC_AP_RSVD0_ODL",
+ "",
+ "",
+ "SPMI_SCL",
+ "SPMI_SDA";
+
+ adsp_uart_pins: adsp-uart-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO35__FUNC_O_ADSP_UTXD0>,
+ <PINMUX_GPIO36__FUNC_I1_ADSP_URXD0>;
+ };
+ };
+
+ aud_etdm_hp_on: aud-etdm-hp-on-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO110__FUNC_I0_I2SIN_D0>,
+ <PINMUX_GPIO115__FUNC_B0_I2SO2_BCK>,
+ <PINMUX_GPIO116__FUNC_B0_I2SO2_WS>,
+ <PINMUX_GPIO117__FUNC_O_I2SO2_D0>;
+ };
+ };
+
+ aud_etdm_hp_off: aud-etdm-hp-off-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO110__FUNC_B_GPIO110>,
+ <PINMUX_GPIO115__FUNC_B_GPIO115>,
+ <PINMUX_GPIO116__FUNC_B_GPIO116>,
+ <PINMUX_GPIO117__FUNC_B_GPIO117>;
+ bias-pull-down;
+ input-enable;
+ };
+ };
+
+ aud_etdm_spk_on: aud-etdm-spk-on-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO11__FUNC_O_I2SO1_D0>,
+ <PINMUX_GPIO113__FUNC_B0_TDMIN_BCK>,
+ <PINMUX_GPIO119__FUNC_B0_TDMIN_LRCK>,
+ <PINMUX_GPIO120__FUNC_I0_TDMIN_DI>;
+ drive-strength = <8>;
+ };
+ };
+
+ aud_etdm_spk_off: aud-etdm-spk-off-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO11__FUNC_B_GPIO11>,
+ <PINMUX_GPIO113__FUNC_B_GPIO113>,
+ <PINMUX_GPIO119__FUNC_B_GPIO119>,
+ <PINMUX_GPIO120__FUNC_B_GPIO120>;
+ bias-pull-down;
+ input-enable;
+ };
+ };
+
+ aud_mtkaif_on: aud-mtkaif-on-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO101__FUNC_O_AUD_CLK_MOSI>,
+ <PINMUX_GPIO102__FUNC_O_AUD_SYNC_MOSI>,
+ <PINMUX_GPIO103__FUNC_O_AUD_DAT_MOSI0>,
+ <PINMUX_GPIO104__FUNC_O_AUD_DAT_MOSI1>,
+ <PINMUX_GPIO105__FUNC_I0_AUD_DAT_MISO0>,
+ <PINMUX_GPIO106__FUNC_I0_AUD_DAT_MISO1>;
+ };
+ };
+
+ aud_mtkaif_off: aud-mtkaif-off-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO101__FUNC_B_GPIO101>,
+ <PINMUX_GPIO102__FUNC_B_GPIO102>,
+ <PINMUX_GPIO103__FUNC_B_GPIO103>,
+ <PINMUX_GPIO104__FUNC_B_GPIO104>,
+ <PINMUX_GPIO105__FUNC_B_GPIO105>,
+ <PINMUX_GPIO106__FUNC_B_GPIO106>;
+ bias-pull-down;
+ input-enable;
+ };
+ };
+
+ cros_ec_int: cros-ec-int-pins {
+ pins-ec-ap-int-odl {
+ pinmux = <PINMUX_GPIO149__FUNC_B_GPIO149>;
+ input-enable;
+ };
+ };
+
+ disp_pwm0_pins: disp-pwm0-pins {
+ pins-disp-pwm0 {
+ pinmux = <PINMUX_GPIO29__FUNC_O_DISP_PWM0>;
+ output-high;
+ };
+ };
+
+ disp_pwm1_pins: disp-pwm1-pins {
+ pins-disp-pwm1 {
+ pinmux = <PINMUX_GPIO30__FUNC_O_DISP_PWM1>;
+ output-high;
+ };
+ };
+
+ dp_tx_hpd: dp-tx-hpd-pins {
+ pins-dp-tx-hpd {
+ pinmux = <PINMUX_GPIO46__FUNC_I0_DP_TX_HPD>;
+ };
+ };
+
+ gsc_int: gsc-int-pins {
+ pins-gsc-ap-int-odl {
+ pinmux = <PINMUX_GPIO0__FUNC_B_GPIO0>;
+ input-enable;
+ };
+ };
+
+ i2c0_pins: i2c0-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO56__FUNC_B1_SDA0>,
+ <PINMUX_GPIO55__FUNC_B1_SCL0>;
+ };
+ };
+
+ i2c1_pins: i2c1-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO58__FUNC_B1_SDA1>,
+ <PINMUX_GPIO57__FUNC_B1_SCL1>;
+ };
+ };
+
+ i2c2_pins: i2c2-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO60__FUNC_B1_SDA2>,
+ <PINMUX_GPIO59__FUNC_B1_SCL2>;
+ bias-disable;
+ drive-strength = <12>;
+ };
+ };
+
+ i2c3_pins: i2c3-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO62__FUNC_B1_SDA3>,
+ <PINMUX_GPIO61__FUNC_B1_SCL3>;
+ };
+ };
+
+ i2c4_pins: i2c4-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO64__FUNC_B1_SDA4>,
+ <PINMUX_GPIO63__FUNC_B1_SCL4>;
+ };
+ };
+
+ i2c5_pins: i2c5-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO66__FUNC_B1_SDA5>,
+ <PINMUX_GPIO65__FUNC_B1_SCL5>;
+ };
+ };
+
+ i2c6_pins: i2c6-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO68__FUNC_B1_SDA6>,
+ <PINMUX_GPIO67__FUNC_B1_SCL6>;
+ };
+ };
+
+ mipi_disp_avdd_en: mipi-disp-avdd-en-pins {
+ pins-en-ppvar-mipi-disp {
+ pinmux = <PINMUX_GPIO3__FUNC_B_GPIO3>;
+ output-low;
+ };
+ };
+
+ mipi_disp_avee_en: mipi-disp-avee-en-pins {
+ pins-en-ppvar-mipi-disp-150ma {
+ pinmux = <PINMUX_GPIO4__FUNC_B_GPIO4>;
+ output-low;
+ };
+ };
+
+ mipi_dsi_pins: mipi-dsi-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO1__FUNC_B_GPIO1>,
+ <PINMUX_GPIO25__FUNC_B_GPIO25>;
+ output-low;
+ };
+ };
+
+ mmc0_pins_default: mmc0-default-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO161__FUNC_B1_MSDC0_DAT0>,
+ <PINMUX_GPIO160__FUNC_B1_MSDC0_DAT1>,
+ <PINMUX_GPIO159__FUNC_B1_MSDC0_DAT2>,
+ <PINMUX_GPIO158__FUNC_B1_MSDC0_DAT3>,
+ <PINMUX_GPIO154__FUNC_B1_MSDC0_DAT4>,
+ <PINMUX_GPIO153__FUNC_B1_MSDC0_DAT5>,
+ <PINMUX_GPIO152__FUNC_B1_MSDC0_DAT6>,
+ <PINMUX_GPIO151__FUNC_B1_MSDC0_DAT7>,
+ <PINMUX_GPIO156__FUNC_B1_MSDC0_CMD>;
+ input-enable;
+ drive-strength = <6>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+
+ pins-clk {
+ pinmux = <PINMUX_GPIO157__FUNC_B1_MSDC0_CLK>;
+ drive-strength = <6>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins-rst {
+ pinmux = <PINMUX_GPIO155__FUNC_O_MSDC0_RSTB>;
+ drive-strength = <6>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+ };
+
+ mmc0_pins_uhs: mmc0-uhs-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO161__FUNC_B1_MSDC0_DAT0>,
+ <PINMUX_GPIO160__FUNC_B1_MSDC0_DAT1>,
+ <PINMUX_GPIO159__FUNC_B1_MSDC0_DAT2>,
+ <PINMUX_GPIO158__FUNC_B1_MSDC0_DAT3>,
+ <PINMUX_GPIO154__FUNC_B1_MSDC0_DAT4>,
+ <PINMUX_GPIO153__FUNC_B1_MSDC0_DAT5>,
+ <PINMUX_GPIO152__FUNC_B1_MSDC0_DAT6>,
+ <PINMUX_GPIO151__FUNC_B1_MSDC0_DAT7>,
+ <PINMUX_GPIO156__FUNC_B1_MSDC0_CMD>;
+ input-enable;
+ drive-strength = <8>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+
+ pins-clk {
+ pinmux = <PINMUX_GPIO157__FUNC_B1_MSDC0_CLK>;
+ drive-strength = <8>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins-ds {
+ pinmux = <PINMUX_GPIO162__FUNC_B0_MSDC0_DSL>;
+ drive-strength = <8>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins-rst {
+ pinmux = <PINMUX_GPIO155__FUNC_O_MSDC0_RSTB>;
+ drive-strength = <8>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+ };
+
+ nor_pins: nor-default-pins {
+ pins-clk {
+ pinmux = <PINMUX_GPIO127__FUNC_B0_SPINOR_IO0>,
+ <PINMUX_GPIO125__FUNC_O_SPINOR_CK>,
+ <PINMUX_GPIO128__FUNC_B0_SPINOR_IO1>;
+ bias-pull-down;
+ };
+
+ pins-cs {
+ pinmux = <PINMUX_GPIO126__FUNC_O_SPINOR_CS>;
+ bias-pull-up;
+ };
+ };
+
+ pcie_pins: pcie-default-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO47__FUNC_I1_WAKEN>,
+ <PINMUX_GPIO48__FUNC_O_PERSTN>,
+ <PINMUX_GPIO49__FUNC_B1_CLKREQN>;
+ };
+ };
+
+ spi0_pins: spi0-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO69__FUNC_O_SPIM0_CSB>,
+ <PINMUX_GPIO70__FUNC_O_SPIM0_CLK>,
+ <PINMUX_GPIO71__FUNC_B0_SPIM0_MOSI>,
+ <PINMUX_GPIO72__FUNC_B0_SPIM0_MISO>;
+ bias-disable;
+ };
+ };
+
+ spi1_pins_default: spi1-default-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO75__FUNC_O_SPIM1_CSB>,
+ <PINMUX_GPIO76__FUNC_O_SPIM1_CLK>,
+ <PINMUX_GPIO77__FUNC_B0_SPIM1_MOSI>,
+ <PINMUX_GPIO78__FUNC_B0_SPIM1_MISO>;
+ bias-disable;
+ };
+ };
+
+ spi1_pins_sleep: spi1-sleep-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO75__FUNC_B_GPIO75>,
+ <PINMUX_GPIO76__FUNC_B_GPIO76>,
+ <PINMUX_GPIO77__FUNC_B_GPIO77>,
+ <PINMUX_GPIO78__FUNC_B_GPIO78>;
+ bias-pull-down;
+ input-enable;
+ };
+ };
+
+ spi2_pins: spi2-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO79__FUNC_O_SPIM2_CSB>,
+ <PINMUX_GPIO80__FUNC_O_SPIM2_CLK>,
+ <PINMUX_GPIO81__FUNC_B0_SPIM2_MOSI>,
+ <PINMUX_GPIO82__FUNC_B0_SPIM2_MISO>;
+ bias-disable;
+ };
+ };
+
+ uart0_pins: uart0-pins {
+ pins-bus {
+ pinmux = <PINMUX_GPIO31__FUNC_O_UTXD0>,
+ <PINMUX_GPIO32__FUNC_I1_URXD0>;
+ bias-pull-up;
+ };
+ };
+
+ wlan_en: wlan-en-pins {
+ pins-en-pp3300-wlan {
+ pinmux = <PINMUX_GPIO12__FUNC_B_GPIO12>;
+ output-low;
+ };
+ };
+};
+
+&pmic {
+ interrupts-extended = <&pio 222 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&sound {
+ pinctrl-names = "aud_etdm_hp_on", "aud_etdm_hp_off",
+ "aud_etdm_spk_on", "aud_etdm_spk_off",
+ "aud_mtkaif_on", "aud_mtkaif_off";
+ pinctrl-0 = <&aud_etdm_hp_on>;
+ pinctrl-1 = <&aud_etdm_hp_off>;
+ pinctrl-2 = <&aud_etdm_spk_on>;
+ pinctrl-3 = <&aud_etdm_spk_off>;
+ pinctrl-4 = <&aud_mtkaif_on>;
+ pinctrl-5 = <&aud_mtkaif_off>;
+ mediatek,adsp = <&adsp>;
+ /* The audio-routing is defined in each board dts */
+
+ status = "okay";
+};
+
+&spi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi0_pins>;
+ status = "okay";
+
+ cros_ec: ec@0 {
+ compatible = "google,cros-ec-spi";
+ reg = <0>;
+ interrupts-extended = <&pio 149 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cros_ec_int>;
+ spi-max-frequency = <3000000>;
+
+ i2c_tunnel: i2c-tunnel {
+ compatible = "google,cros-ec-i2c-tunnel";
+ google,remote-bus = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ cbas {
+ compatible = "google,cros-cbas";
+ };
+ };
+};
+
+&spi1 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&spi1_pins_default>;
+ pinctrl-1 = <&spi1_pins_sleep>;
+ status = "okay";
+};
+
+&spi2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2_pins>;
+ status = "okay";
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins>;
+ status = "okay";
+};
+
+&u3phy0 {
+ status = "okay";
+};
+
+&u3phy1 {
+ status = "okay";
+};
+
+&u3phy2 {
+ status = "okay";
+};
+
+/* USB detachable base */
+&xhci0 {
+ /* controlled by EC */
+ vbus-supply = <&pp3300_z1>;
+ status = "okay";
+};
+
+/* USB3 hub */
+&xhci1 {
+ vusb33-supply = <&pp3300_s3>;
+ vbus-supply = <&pp5000_usb_vbus>;
+ status = "okay";
+};
+
+/* USB BT */
+&xhci2 {
+ /* no power supply since MT7921's power is controlled by PCIe */
+ /* MT7921's USB BT has issues with USB2 LPM */
+ usb2-lpm-disable;
+ status = "okay";
+};
+
+#include <arm/cros-ec-keyboard.dtsi>
+
+&keyboard_controller {
+ function-row-physmap = <
+ MATRIX_KEY(0x00, 0x02, 0) /* T1 */
+ MATRIX_KEY(0x03, 0x02, 0) /* T2 */
+ MATRIX_KEY(0x02, 0x02, 0) /* T3 */
+ MATRIX_KEY(0x01, 0x02, 0) /* T4 */
+ MATRIX_KEY(0x03, 0x04, 0) /* T5 */
+ MATRIX_KEY(0x02, 0x04, 0) /* T6 */
+ MATRIX_KEY(0x01, 0x04, 0) /* T7 */
+ MATRIX_KEY(0x02, 0x09, 0) /* T8 */
+ MATRIX_KEY(0x01, 0x09, 0) /* T9 */
+ MATRIX_KEY(0x00, 0x04, 0) /* T10 */
+ >;
+
+ linux,keymap = <
+ MATRIX_KEY(0x00, 0x02, KEY_BACK)
+ MATRIX_KEY(0x03, 0x02, KEY_REFRESH)
+ MATRIX_KEY(0x02, 0x02, KEY_ZOOM)
+ MATRIX_KEY(0x01, 0x02, KEY_SCALE)
+ MATRIX_KEY(0x03, 0x04, KEY_BRIGHTNESSDOWN)
+ MATRIX_KEY(0x02, 0x04, KEY_BRIGHTNESSUP)
+ MATRIX_KEY(0x01, 0x04, KEY_MICMUTE)
+ MATRIX_KEY(0x02, 0x09, KEY_MUTE)
+ MATRIX_KEY(0x01, 0x09, KEY_VOLUMEDOWN)
+ MATRIX_KEY(0x00, 0x04, KEY_VOLUMEUP)
+ CROS_STD_MAIN_KEYMAP
+ >;
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8188.dtsi b/arch/arm64/boot/dts/mediatek/mt8188.dtsi
index faccc7f16259..338120930b81 100644
--- a/arch/arm64/boot/dts/mediatek/mt8188.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8188.dtsi
@@ -2125,6 +2125,11 @@
reg = <0x1ac 0x40>;
};
+ gpu_speedbin: gpu-speedbin@581 {
+ reg = <0x581 0x1>;
+ bits = <0 3>;
+ };
+
socinfo-data1@7a0 {
reg = <0x7a0 0x4>;
};
@@ -2143,6 +2148,8 @@
<GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-names = "job", "mmu", "gpu";
+ nvmem-cells = <&gpu_speedbin>;
+ nvmem-cell-names = "speed-bin";
operating-points-v2 = <&gpu_opp_table>;
power-domains = <&spm MT8188_POWER_DOMAIN_MFG2>,
<&spm MT8188_POWER_DOMAIN_MFG3>,
@@ -2488,7 +2495,7 @@
};
ovl0: ovl@1c000000 {
- compatible = "mediatek,mt8188-disp-ovl", "mediatek,mt8183-disp-ovl";
+ compatible = "mediatek,mt8188-disp-ovl", "mediatek,mt8195-disp-ovl";
reg = <0 0x1c000000 0 0x1000>;
clocks = <&vdosys0 CLK_VDO0_DISP_OVL0>;
interrupts = <GIC_SPI 636 IRQ_TYPE_LEVEL_HIGH 0>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada-hayato-r5-sku2.dts b/arch/arm64/boot/dts/mediatek/mt8192-asurada-hayato-r5-sku2.dts
deleted file mode 100644
index cd86ad9ba28a..000000000000
--- a/arch/arm64/boot/dts/mediatek/mt8192-asurada-hayato-r5-sku2.dts
+++ /dev/null
@@ -1,65 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MIT)
-/*
- * Copyright 2022 Google LLC
- */
-/dts-v1/;
-#include "mt8192-asurada.dtsi"
-
-/ {
- model = "Google Hayato rev5";
- chassis-type = "convertible";
- compatible = "google,hayato-rev5-sku2", "google,hayato-sku2",
- "google,hayato", "mediatek,mt8192";
-};
-
-&keyboard_controller {
- function-row-physmap = <
- MATRIX_KEY(0x00, 0x02, 0) /* T1 */
- MATRIX_KEY(0x03, 0x02, 0) /* T2 */
- MATRIX_KEY(0x02, 0x02, 0) /* T3 */
- MATRIX_KEY(0x01, 0x02, 0) /* T4 */
- MATRIX_KEY(0x03, 0x04, 0) /* T5 */
- MATRIX_KEY(0x02, 0x04, 0) /* T6 */
- MATRIX_KEY(0x01, 0x04, 0) /* T7 */
- MATRIX_KEY(0x02, 0x09, 0) /* T8 */
- MATRIX_KEY(0x01, 0x09, 0) /* T9 */
- MATRIX_KEY(0x00, 0x04, 0) /* T10 */
- >;
- linux,keymap = <
- MATRIX_KEY(0x00, 0x02, KEY_BACK)
- MATRIX_KEY(0x03, 0x02, KEY_FORWARD)
- MATRIX_KEY(0x02, 0x02, KEY_REFRESH)
- MATRIX_KEY(0x01, 0x02, KEY_FULL_SCREEN)
- MATRIX_KEY(0x03, 0x04, KEY_SCALE)
- MATRIX_KEY(0x02, 0x04, KEY_BRIGHTNESSDOWN)
- MATRIX_KEY(0x01, 0x04, KEY_BRIGHTNESSUP)
- MATRIX_KEY(0x02, 0x09, KEY_MUTE)
- MATRIX_KEY(0x01, 0x09, KEY_VOLUMEDOWN)
- MATRIX_KEY(0x00, 0x04, KEY_VOLUMEUP)
-
- CROS_STD_MAIN_KEYMAP
- >;
-};
-
-&rt5682 {
- compatible = "realtek,rt5682s";
-};
-
-&sound {
- compatible = "mediatek,mt8192_mt6359_rt1015p_rt5682s";
-
- speaker-codecs {
- sound-dai = <&rt1015p>;
- };
-
- headset-codec {
- sound-dai = <&rt5682 0>;
- };
-};
-
-&touchscreen {
- compatible = "hid-over-i2c";
- post-power-on-delay-ms = <10>;
- hid-descr-addr = <0x0001>;
- vdd-supply = <&pp3300_u>;
-};
diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada-spherion-r4.dts b/arch/arm64/boot/dts/mediatek/mt8192-asurada-spherion-r4.dts
deleted file mode 100644
index 5e9e598bab90..000000000000
--- a/arch/arm64/boot/dts/mediatek/mt8192-asurada-spherion-r4.dts
+++ /dev/null
@@ -1,78 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MIT)
-/*
- * Copyright 2022 Google LLC
- */
-/dts-v1/;
-#include "mt8192-asurada.dtsi"
-#include <dt-bindings/leds/common.h>
-
-/ {
- model = "Google Spherion (rev4)";
- chassis-type = "laptop";
- compatible = "google,spherion-rev4", "google,spherion",
- "mediatek,mt8192";
-
- pwmleds {
- compatible = "pwm-leds";
-
- led {
- function = LED_FUNCTION_KBD_BACKLIGHT;
- color = <LED_COLOR_ID_WHITE>;
- pwms = <&cros_ec_pwm 0>;
- max-brightness = <1023>;
- };
- };
-};
-
-&cros_ec_pwm {
- status = "okay";
-};
-
-&keyboard_controller {
- function-row-physmap = <
- MATRIX_KEY(0x00, 0x02, 0) /* T1 */
- MATRIX_KEY(0x03, 0x02, 0) /* T2 */
- MATRIX_KEY(0x02, 0x02, 0) /* T3 */
- MATRIX_KEY(0x01, 0x02, 0) /* T4 */
- MATRIX_KEY(0x03, 0x04, 0) /* T5 */
- MATRIX_KEY(0x02, 0x04, 0) /* T6 */
- MATRIX_KEY(0x01, 0x04, 0) /* T7 */
- MATRIX_KEY(0x02, 0x09, 0) /* T8 */
- MATRIX_KEY(0x01, 0x09, 0) /* T9 */
- MATRIX_KEY(0x00, 0x04, 0) /* T10 */
- >;
- linux,keymap = <
- MATRIX_KEY(0x00, 0x02, KEY_BACK)
- MATRIX_KEY(0x03, 0x02, KEY_REFRESH)
- MATRIX_KEY(0x02, 0x02, KEY_FULL_SCREEN)
- MATRIX_KEY(0x01, 0x02, KEY_SCALE)
- MATRIX_KEY(0x03, 0x04, KEY_SYSRQ)
- MATRIX_KEY(0x02, 0x04, KEY_BRIGHTNESSDOWN)
- MATRIX_KEY(0x01, 0x04, KEY_BRIGHTNESSUP)
- MATRIX_KEY(0x02, 0x09, KEY_MUTE)
- MATRIX_KEY(0x01, 0x09, KEY_VOLUMEDOWN)
- MATRIX_KEY(0x00, 0x04, KEY_VOLUMEUP)
-
- CROS_STD_MAIN_KEYMAP
- >;
-};
-
-&rt5682 {
- compatible = "realtek,rt5682s";
-};
-
-&sound {
- compatible = "mediatek,mt8192_mt6359_rt1015p_rt5682s";
-
- speaker-codecs {
- sound-dai = <&rt1015p>;
- };
-
- headset-codec {
- sound-dai = <&rt5682 0>;
- };
-};
-
-&touchscreen {
- compatible = "elan,ekth3500";
-};
diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
index 8dda8b63765b..dd0d07fbe61a 100644
--- a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
@@ -1418,7 +1418,6 @@
regulators {
mt6315_6_vbuck1: vbuck1 {
- regulator-compatible = "vbuck1";
regulator-name = "Vbcpu";
regulator-min-microvolt = <400000>;
regulator-max-microvolt = <1193750>;
@@ -1428,7 +1427,6 @@
};
mt6315_6_vbuck3: vbuck3 {
- regulator-compatible = "vbuck3";
regulator-name = "Vlcpu";
regulator-min-microvolt = <400000>;
regulator-max-microvolt = <1193750>;
@@ -1445,7 +1443,6 @@
regulators {
mt6315_7_vbuck1: vbuck1 {
- regulator-compatible = "vbuck1";
regulator-name = "Vgpu";
regulator-min-microvolt = <400000>;
regulator-max-microvolt = <800000>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
index 2c7b2223ee76..5056e07399e2 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
@@ -1285,7 +1285,6 @@
regulators {
mt6315_6_vbuck1: vbuck1 {
- regulator-compatible = "vbuck1";
regulator-name = "Vbcpu";
regulator-min-microvolt = <400000>;
regulator-max-microvolt = <1193750>;
@@ -1303,7 +1302,6 @@
regulators {
mt6315_7_vbuck1: vbuck1 {
- regulator-compatible = "vbuck1";
regulator-name = "Vgpu";
regulator-min-microvolt = <400000>;
regulator-max-microvolt = <1193750>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
index 31d424b8fc7c..1f59b5786b81 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
@@ -109,6 +109,7 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&eth_default_pins>;
pinctrl-1 = <&eth_sleep_pins>;
+ mediatek,mac-wol;
status = "okay";
mdio {
@@ -137,7 +138,6 @@
richtek,vinovp-microvolt = <14500000>;
otg_vbus_regulator: usb-otg-vbus-regulator {
- regulator-compatible = "usb-otg-vbus";
regulator-name = "usb-otg-vbus";
regulator-min-microvolt = <4425000>;
regulator-max-microvolt = <5825000>;
@@ -149,7 +149,6 @@
LDO_VIN3-supply = <&mt6360_buck2>;
mt6360_buck1: buck1 {
- regulator-compatible = "BUCK1";
regulator-name = "mt6360,buck1";
regulator-min-microvolt = <300000>;
regulator-max-microvolt = <1300000>;
@@ -160,7 +159,6 @@
};
mt6360_buck2: buck2 {
- regulator-compatible = "BUCK2";
regulator-name = "mt6360,buck2";
regulator-min-microvolt = <300000>;
regulator-max-microvolt = <1300000>;
@@ -171,7 +169,6 @@
};
mt6360_ldo1: ldo1 {
- regulator-compatible = "LDO1";
regulator-name = "mt6360,ldo1";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <3600000>;
@@ -180,7 +177,6 @@
};
mt6360_ldo2: ldo2 {
- regulator-compatible = "LDO2";
regulator-name = "mt6360,ldo2";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <3600000>;
@@ -189,7 +185,6 @@
};
mt6360_ldo3: ldo3 {
- regulator-compatible = "LDO3";
regulator-name = "mt6360,ldo3";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <3600000>;
@@ -198,7 +193,6 @@
};
mt6360_ldo5: ldo5 {
- regulator-compatible = "LDO5";
regulator-name = "mt6360,ldo5";
regulator-min-microvolt = <2700000>;
regulator-max-microvolt = <3600000>;
@@ -207,7 +201,6 @@
};
mt6360_ldo6: ldo6 {
- regulator-compatible = "LDO6";
regulator-name = "mt6360,ldo6";
regulator-min-microvolt = <500000>;
regulator-max-microvolt = <2100000>;
@@ -216,7 +209,6 @@
};
mt6360_ldo7: ldo7 {
- regulator-compatible = "LDO7";
regulator-name = "mt6360,ldo7";
regulator-min-microvolt = <500000>;
regulator-max-microvolt = <2100000>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
index ade685ed2190..f013dbad9dc4 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
@@ -1611,9 +1611,6 @@
phy-names = "pcie-phy";
power-domains = <&spm MT8195_POWER_DOMAIN_PCIE_MAC_P1>;
- resets = <&infracfg_ao MT8195_INFRA_RST2_PCIE_P1_SWRST>;
- reset-names = "mac";
-
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &pcie_intc1 0>,
@@ -3138,7 +3135,7 @@
};
ovl0: ovl@1c000000 {
- compatible = "mediatek,mt8195-disp-ovl", "mediatek,mt8183-disp-ovl";
+ compatible = "mediatek,mt8195-disp-ovl";
reg = <0 0x1c000000 0 0x1000>;
interrupts = <GIC_SPI 636 IRQ_TYPE_LEVEL_HIGH 0>;
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS0>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8365-evk.dts b/arch/arm64/boot/dts/mediatek/mt8365-evk.dts
index 7d90112a7e27..44c61094c4d5 100644
--- a/arch/arm64/boot/dts/mediatek/mt8365-evk.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8365-evk.dts
@@ -21,6 +21,7 @@
aliases {
serial0 = &uart0;
+ ethernet = &ethernet;
};
chosen {
diff --git a/arch/arm64/boot/dts/mediatek/mt8365.dtsi b/arch/arm64/boot/dts/mediatek/mt8365.dtsi
index 9c91fe8ea0f9..2bf8c9d02b6e 100644
--- a/arch/arm64/boot/dts/mediatek/mt8365.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8365.dtsi
@@ -449,7 +449,8 @@
};
keypad: keypad@10010000 {
- compatible = "mediatek,mt6779-keypad";
+ compatible = "mediatek,mt8365-keypad",
+ "mediatek,mt6779-keypad";
reg = <0 0x10010000 0 0x1000>;
wakeup-source;
interrupts = <GIC_SPI 124 IRQ_TYPE_EDGE_FALLING>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8390-genio-700-evk.dts b/arch/arm64/boot/dts/mediatek/mt8390-genio-700-evk.dts
index 13f2e0e3fa8a..04e4a2f73799 100644
--- a/arch/arm64/boot/dts/mediatek/mt8390-genio-700-evk.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8390-genio-700-evk.dts
@@ -93,6 +93,24 @@
compatible = "shared-dma-pool";
reg = <0 0x57000000 0 0x1400000>; /* 20 MB */
};
+
+ adsp_mem: memory@60000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x60000000 0 0xf00000>;
+ no-map;
+ };
+
+ afe_dma_mem: memory@60f00000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x60f00000 0 0x100000>;
+ no-map;
+ };
+
+ adsp_dma_mem: memory@61000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x61000000 0 0x100000>;
+ no-map;
+ };
};
common_fixed_5v: regulator-0 {
@@ -210,6 +228,16 @@
};
};
+&adsp {
+ memory-region = <&adsp_dma_mem>, <&adsp_mem>;
+ status = "okay";
+};
+
+&afe {
+ memory-region = <&afe_dma_mem>;
+ status = "okay";
+};
+
&gpu {
mali-supply = <&mt6359_vproc2_buck_reg>;
status = "okay";
@@ -932,6 +960,26 @@
status = "okay";
};
+&sound {
+ compatible = "mediatek,mt8390-mt6359-evk", "mediatek,mt8188-mt6359-evb";
+ model = "mt8390-evk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&audio_default_pins>;
+ audio-routing =
+ "Headphone", "Headphone L",
+ "Headphone", "Headphone R";
+ mediatek,adsp = <&adsp>;
+ status = "okay";
+
+ dai-link-0 {
+ link-name = "DL_SRC_BE";
+
+ codec {
+ sound-dai = <&pmic 0>;
+ };
+ };
+};
+
&spi2 {
pinctrl-0 = <&spi2_pins>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts b/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
index 5f16fb820580..5950194c9ccb 100644
--- a/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
@@ -835,7 +835,6 @@
regulators {
mt6315_6_vbuck1: vbuck1 {
- regulator-compatible = "vbuck1";
regulator-name = "Vbcpu";
regulator-min-microvolt = <300000>;
regulator-max-microvolt = <1193750>;
@@ -852,7 +851,6 @@
regulators {
mt6315_7_vbuck1: vbuck1 {
- regulator-compatible = "vbuck1";
regulator-name = "Vgpu";
regulator-min-microvolt = <546000>;
regulator-max-microvolt = <787000>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts b/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts
index e2e75b8ff918..4985b65925a9 100644
--- a/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts
@@ -271,6 +271,7 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&eth_default_pins>;
pinctrl-1 = <&eth_sleep_pins>;
+ mediatek,mac-wol;
status = "okay";
mdio {
diff --git a/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts b/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts
index 14ec970c4e49..41dc34837b02 100644
--- a/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts
@@ -812,7 +812,6 @@
regulators {
mt6315_6_vbuck1: vbuck1 {
- regulator-compatible = "vbuck1";
regulator-name = "Vbcpu";
regulator-min-microvolt = <300000>;
regulator-max-microvolt = <1193750>;
@@ -829,7 +828,6 @@
regulators {
mt6315_7_vbuck1: vbuck1 {
- regulator-compatible = "vbuck1";
regulator-name = "Vgpu";
regulator-min-microvolt = <300000>;
regulator-max-microvolt = <1193750>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8516.dtsi b/arch/arm64/boot/dts/mediatek/mt8516.dtsi
index d0b03dc4d3f4..b5e753759465 100644
--- a/arch/arm64/boot/dts/mediatek/mt8516.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8516.dtsi
@@ -144,10 +144,10 @@
#size-cells = <2>;
ranges;
- /* 128 KiB reserved for ARM Trusted Firmware (BL31) */
+ /* 192 KiB reserved for ARM Trusted Firmware (BL31) */
bl31_secmon_reserved: secmon@43000000 {
no-map;
- reg = <0 0x43000000 0 0x20000>;
+ reg = <0 0x43000000 0 0x30000>;
};
};
@@ -206,7 +206,7 @@
compatible = "mediatek,mt8516-wdt",
"mediatek,mt6589-wdt";
reg = <0 0x10007000 0 0x1000>;
- interrupts = <GIC_SPI 198 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_LOW>;
#reset-cells = <1>;
};
@@ -220,6 +220,17 @@
clock-names = "clk13m", "bus";
};
+ keypad: keypad@10002000 {
+ compatible = "mediatek,mt8516-keypad",
+ "mediatek,mt6779-keypad";
+ reg = <0 0x10002000 0 0x1000>;
+ wakeup-source;
+ interrupts = <GIC_SPI 149 IRQ_TYPE_EDGE_FALLING>;
+ clocks = <&clk26m>;
+ clock-names = "kpd";
+ status = "disabled";
+ };
+
syscfg_pctl: syscfg-pctl@10005000 {
compatible = "syscon";
reg = <0 0x10005000 0 0x1000>;
@@ -268,7 +279,7 @@
interrupt-parent = <&gic>;
interrupt-controller;
reg = <0 0x10310000 0 0x1000>,
- <0 0x10320000 0 0x1000>,
+ <0 0x1032f000 0 0x2000>,
<0 0x10340000 0 0x2000>,
<0 0x10360000 0 0x2000>;
interrupts = <GIC_PPI 9
@@ -344,6 +355,7 @@
reg = <0 0x11009000 0 0x90>,
<0 0x11000180 0 0x80>;
interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_LOW>;
+ clock-div = <2>;
clocks = <&topckgen CLK_TOP_I2C0>,
<&topckgen CLK_TOP_APDMA>;
clock-names = "main", "dma";
@@ -358,6 +370,7 @@
reg = <0 0x1100a000 0 0x90>,
<0 0x11000200 0 0x80>;
interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_LOW>;
+ clock-div = <2>;
clocks = <&topckgen CLK_TOP_I2C1>,
<&topckgen CLK_TOP_APDMA>;
clock-names = "main", "dma";
@@ -372,6 +385,7 @@
reg = <0 0x1100b000 0 0x90>,
<0 0x11000280 0 0x80>;
interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_LOW>;
+ clock-div = <2>;
clocks = <&topckgen CLK_TOP_I2C2>,
<&topckgen CLK_TOP_APDMA>;
clock-names = "main", "dma";
diff --git a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
index ec8dfb3d1c6d..a356db5fcc5f 100644
--- a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
+++ b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
@@ -47,7 +47,6 @@
};
&i2c0 {
- clock-div = <2>;
pinctrl-names = "default";
pinctrl-0 = <&i2c0_pins_a>;
status = "okay";
@@ -156,7 +155,6 @@
};
&i2c2 {
- clock-div = <2>;
pinctrl-names = "default";
pinctrl-0 = <&i2c2_pins_a>;
status = "okay";
diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
index 984c85eab41a..2601b43b2d8c 100644
--- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
@@ -3815,7 +3815,7 @@
compatible = "nvidia,tegra234-sce-fabric";
reg = <0x0 0xb600000 0x0 0x40000>;
interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
- status = "okay";
+ status = "disabled";
};
rce-fabric@be00000 {
@@ -3900,7 +3900,7 @@
assigned-clock-parents = <&bpmp TEGRA234_CLK_PLLP_OUT0>;
resets = <&bpmp TEGRA234_RESET_SPI2>;
reset-names = "spi";
- dmas = <&gpcdma 19>, <&gpcdma 19>;
+ dmas = <&gpcdma 16>, <&gpcdma 16>;
dma-names = "rx", "tx";
dma-coherent;
status = "disabled";
@@ -3995,7 +3995,7 @@
};
dce-fabric@de00000 {
- compatible = "nvidia,tegra234-sce-fabric";
+ compatible = "nvidia,tegra234-dce-fabric";
reg = <0x0 0xde00000 0x0 0x40000>;
interrupts = <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH>;
status = "okay";
@@ -4018,6 +4018,8 @@
#redistributor-regions = <1>;
#interrupt-cells = <3>;
interrupt-controller;
+
+ #address-cells = <0>;
};
smmu_iso: iommu@10000000 {
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 6ca8db4b8afe..140b0b2abfb5 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -3,6 +3,8 @@ dtb-$(CONFIG_ARCH_QCOM) += apq8016-sbc.dtb
apq8016-sbc-usb-host-dtbs := apq8016-sbc.dtb apq8016-sbc-usb-host.dtbo
+dtb-$(CONFIG_ARCH_QCOM) += sar2130p-qar2130p.dtb
+
dtb-$(CONFIG_ARCH_QCOM) += apq8016-sbc-usb-host.dtb
dtb-$(CONFIG_ARCH_QCOM) += apq8016-sbc-d3-camera-mezzanine.dtb
dtb-$(CONFIG_ARCH_QCOM) += apq8016-schneider-hmibsc.dtb
@@ -16,6 +18,7 @@ dtb-$(CONFIG_ARCH_QCOM) += ipq5332-rdp441.dtb
dtb-$(CONFIG_ARCH_QCOM) += ipq5332-rdp442.dtb
dtb-$(CONFIG_ARCH_QCOM) += ipq5332-rdp468.dtb
dtb-$(CONFIG_ARCH_QCOM) += ipq5332-rdp474.dtb
+dtb-$(CONFIG_ARCH_QCOM) += ipq5424-rdp466.dtb
dtb-$(CONFIG_ARCH_QCOM) += ipq6018-cp01-c1.dtb
dtb-$(CONFIG_ARCH_QCOM) += ipq8074-hk01.dtb
dtb-$(CONFIG_ARCH_QCOM) += ipq8074-hk10-c1.dtb
@@ -59,6 +62,7 @@ dtb-$(CONFIG_ARCH_QCOM) += msm8916-wingtech-wt86518.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-wingtech-wt86528.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-wingtech-wt88047.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-yiming-uz801v3.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8917-xiaomi-riva.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8929-wingtech-wt82918hd.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8939-huawei-kiwi.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8939-longcheer-l9100.dtb
@@ -110,7 +114,9 @@ dtb-$(CONFIG_ARCH_QCOM) += qcm6490-idp.dtb
dtb-$(CONFIG_ARCH_QCOM) += qcm6490-shift-otter.dtb
dtb-$(CONFIG_ARCH_QCOM) += qcs404-evb-1000.dtb
dtb-$(CONFIG_ARCH_QCOM) += qcs404-evb-4000.dtb
+dtb-$(CONFIG_ARCH_QCOM) += qcs615-ride.dtb
dtb-$(CONFIG_ARCH_QCOM) += qcs6490-rb3gen2.dtb
+dtb-$(CONFIG_ARCH_QCOM) += qcs8300-ride.dtb
dtb-$(CONFIG_ARCH_QCOM) += qcs8550-aim300-aiot.dtb
dtb-$(CONFIG_ARCH_QCOM) += qcs9100-ride.dtb
dtb-$(CONFIG_ARCH_QCOM) += qcs9100-ride-r3.dtb
@@ -195,8 +201,10 @@ dtb-$(CONFIG_ARCH_QCOM) += sc7280-crd-r3.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc8180x-lenovo-flex-5g.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc8180x-primus.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc8280xp-crd.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sc8280xp-huawei-gaokun3.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc8280xp-lenovo-thinkpad-x13s.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc8280xp-microsoft-arcata.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sc8280xp-microsoft-blackrock.dtb
dtb-$(CONFIG_ARCH_QCOM) += sda660-inforce-ifc6560.dtb
dtb-$(CONFIG_ARCH_QCOM) += sdm450-lenovo-tbx605f.dtb
dtb-$(CONFIG_ARCH_QCOM) += sdm450-motorola-ali.dtb
@@ -278,10 +286,14 @@ dtb-$(CONFIG_ARCH_QCOM) += sm8650-hdk-display-card.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8650-hdk.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8650-mtp.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8650-qrd.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sm8750-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sm8750-qrd.dtb
+dtb-$(CONFIG_ARCH_QCOM) += x1e001de-devkit.dtb
dtb-$(CONFIG_ARCH_QCOM) += x1e78100-lenovo-thinkpad-t14s.dtb
dtb-$(CONFIG_ARCH_QCOM) += x1e80100-asus-vivobook-s15.dtb
dtb-$(CONFIG_ARCH_QCOM) += x1e80100-crd.dtb
dtb-$(CONFIG_ARCH_QCOM) += x1e80100-dell-xps13-9345.dtb
+dtb-$(CONFIG_ARCH_QCOM) += x1e80100-hp-omnibook-x14.dtb
dtb-$(CONFIG_ARCH_QCOM) += x1e80100-lenovo-yoga-slim7x.dtb
dtb-$(CONFIG_ARCH_QCOM) += x1e80100-microsoft-romulus13.dtb
dtb-$(CONFIG_ARCH_QCOM) += x1e80100-microsoft-romulus15.dtb
diff --git a/arch/arm64/boot/dts/qcom/ipq5332.dtsi b/arch/arm64/boot/dts/qcom/ipq5332.dtsi
index d3c3e215a15c..ca3da95730bd 100644
--- a/arch/arm64/boot/dts/qcom/ipq5332.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq5332.dtsi
@@ -180,7 +180,7 @@
};
rng: rng@e3000 {
- compatible = "qcom,prng-ee";
+ compatible = "qcom,ipq5332-trng", "qcom,trng";
reg = <0x000e3000 0x1000>;
clocks = <&gcc GCC_PRNG_AHB_CLK>;
clock-names = "core";
diff --git a/arch/arm64/boot/dts/qcom/ipq5424-rdp466.dts b/arch/arm64/boot/dts/qcom/ipq5424-rdp466.dts
new file mode 100644
index 000000000000..b6e4bb3328b3
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/ipq5424-rdp466.dts
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
+/*
+ * IPQ5424 RDP466 board device tree source
+ *
+ * Copyright (c) 2024 The Linux Foundation. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "ipq5424.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. IPQ5424 RDP466";
+ compatible = "qcom,ipq5424-rdp466", "qcom,ipq5424";
+
+ aliases {
+ serial0 = &uart1;
+ };
+
+ vreg_misc_3p3: regulator-usb-3p3 {
+ compatible = "regulator-fixed";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-name = "usb_hs_vdda_3p3";
+ };
+
+ vreg_misc_1p8: regulator-usb-1p8 {
+ compatible = "regulator-fixed";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-name = "vdda_1p8_usb";
+ };
+
+ vreg_misc_0p925: regulator-usb-0p925 {
+ compatible = "regulator-fixed";
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <925000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-name = "vdd_core_usb";
+ };
+};
+
+&dwc_0 {
+ dr_mode = "host";
+};
+
+&dwc_1 {
+ dr_mode = "host";
+};
+
+&qusb_phy_0 {
+ vdd-supply = <&vreg_misc_0p925>;
+ vdda-pll-supply = <&vreg_misc_1p8>;
+ vdda-phy-dpdm-supply = <&vreg_misc_3p3>;
+
+ status = "okay";
+};
+
+&qusb_phy_1 {
+ vdd-supply = <&vreg_misc_0p925>;
+ vdda-pll-supply = <&vreg_misc_1p8>;
+ vdda-phy-dpdm-supply = <&vreg_misc_3p3>;
+
+ status = "okay";
+};
+
+&sleep_clk {
+ clock-frequency = <32000>;
+};
+
+&spi0 {
+ pinctrl-0 = <&spi0_default_state>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ flash@0 {
+ compatible = "micron,n25q128a11", "jedec,spi-nor";
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ spi-max-frequency = <50000000>;
+ };
+};
+
+&ssphy_0 {
+ vdda-pll-supply = <&vreg_misc_1p8>;
+ vdda-phy-supply = <&vreg_misc_0p925>;
+
+ status = "okay";
+};
+
+&tlmm {
+ spi0_default_state: spi0-default-state {
+ clk-pins {
+ pins = "gpio6";
+ function = "spi0_clk";
+ drive-strength = <8>;
+ bias-pull-down;
+ };
+
+ cs-pins {
+ pins = "gpio7";
+ function = "spi0_cs";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+
+ miso-pins {
+ pins = "gpio8";
+ function = "spi0_miso";
+ drive-strength = <8>;
+ bias-pull-down;
+ };
+
+ mosi-pins {
+ pins = "gpio9";
+ function = "spi0_mosi";
+ drive-strength = <8>;
+ bias-pull-down;
+ };
+ };
+
+ sdc_default_state: sdc-default-state {
+ clk-pins {
+ pins = "gpio5";
+ function = "sdc_clk";
+ drive-strength = <8>;
+ bias-disable;
+ };
+
+ cmd-pins {
+ pins = "gpio4";
+ function = "sdc_cmd";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+
+ data-pins {
+ pins = "gpio0", "gpio1", "gpio2", "gpio3";
+ function = "sdc_data";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+ };
+};
+
+&uart1 {
+ pinctrl-0 = <&uart1_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&usb2 {
+ status = "okay";
+};
+
+&usb3 {
+ status = "okay";
+};
+
+&xo_board {
+ clock-frequency = <24000000>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/ipq5424.dtsi b/arch/arm64/boot/dts/qcom/ipq5424.dtsi
new file mode 100644
index 000000000000..7034d378b1ef
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/ipq5424.dtsi
@@ -0,0 +1,519 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * IPQ5424 device tree source
+ *
+ * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,ipq5424-gcc.h>
+#include <dt-bindings/reset/qcom,ipq5424-gcc.h>
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&intc>;
+
+ clocks {
+ sleep_clk: sleep-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ };
+
+ xo_board: xo-board-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ };
+ };
+
+ cpus: cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0>;
+ enable-method = "psci";
+ next-level-cache = <&l2_0>;
+ l2_0: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_0>;
+
+ l3_0: l3-cache {
+ compatible = "cache";
+ cache-level = <3>;
+ cache-unified;
+ };
+ };
+ };
+
+ cpu1: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ enable-method = "psci";
+ reg = <0x100>;
+ next-level-cache = <&l2_100>;
+
+ l2_100: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_0>;
+ };
+ };
+
+ cpu2: cpu@200 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ enable-method = "psci";
+ reg = <0x200>;
+ next-level-cache = <&l2_200>;
+
+ l2_200: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_0>;
+ };
+ };
+
+ cpu3: cpu@300 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ enable-method = "psci";
+ reg = <0x300>;
+ next-level-cache = <&l2_300>;
+
+ l2_300: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_0>;
+ };
+ };
+ };
+
+ firmware {
+ scm {
+ compatible = "qcom,scm-ipq5424", "qcom,scm";
+ qcom,dload-mode = <&tcsr 0x25100>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the size */
+ reg = <0x0 0x80000000 0x0 0x0>;
+ };
+
+ pmu-a55 {
+ compatible = "arm,cortex-a55-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pmu-dsu {
+ compatible = "arm,dsu-pmu";
+ interrupts = <GIC_SPI 50 IRQ_TYPE_EDGE_RISING>;
+ cpus = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ tz@8a600000 {
+ reg = <0x0 0x8a600000 0x0 0x200000>;
+ no-map;
+ };
+
+ smem@8a800000 {
+ compatible = "qcom,smem";
+ reg = <0x0 0x8a800000 0x0 0x32000>;
+ no-map;
+
+ hwlocks = <&tcsr_mutex 3>;
+ };
+ };
+
+ soc@0 {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0 0 0 0 0x10 0>;
+
+ rng: rng@4c3000 {
+ compatible = "qcom,ipq5424-trng", "qcom,trng";
+ reg = <0 0x004c3000 0 0x1000>;
+ clocks = <&gcc GCC_PRNG_AHB_CLK>;
+ clock-names = "core";
+ };
+
+ system-cache-controller@800000 {
+ compatible = "qcom,ipq5424-llcc";
+ reg = <0 0x00800000 0 0x200000>;
+ reg-names = "llcc0_base";
+ interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ tlmm: pinctrl@1000000 {
+ compatible = "qcom,ipq5424-tlmm";
+ reg = <0 0x01000000 0 0x300000>;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 50>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ uart1_pins: uart1-state {
+ pins = "gpio43", "gpio44";
+ function = "uart1";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+ };
+
+ gcc: clock-controller@1800000 {
+ compatible = "qcom,ipq5424-gcc";
+ reg = <0 0x01800000 0 0x40000>;
+ clocks = <&xo_board>,
+ <&sleep_clk>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #interconnect-cells = <1>;
+ };
+
+ tcsr_mutex: hwlock@1905000 {
+ compatible = "qcom,tcsr-mutex";
+ reg = <0 0x01905000 0 0x20000>;
+ #hwlock-cells = <1>;
+ };
+
+ tcsr: syscon@1937000 {
+ compatible = "qcom,tcsr-ipq5424", "syscon";
+ reg = <0 0x01937000 0 0x2a000>;
+ };
+
+ qupv3: geniqup@1ac0000 {
+ compatible = "qcom,geni-se-qup";
+ reg = <0 0x01ac0000 0 0x2000>;
+ ranges;
+ clocks = <&gcc GCC_QUPV3_AHB_MST_CLK>,
+ <&gcc GCC_QUPV3_AHB_SLV_CLK>;
+ clock-names = "m-ahb", "s-ahb";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ uart1: serial@1a84000 {
+ compatible = "qcom,geni-debug-uart";
+ reg = <0 0x01a84000 0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_UART1_CLK>;
+ clock-names = "se";
+ interrupts = <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ spi0: spi@1a90000 {
+ compatible = "qcom,geni-spi";
+ reg = <0 0x01a90000 0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_SPI0_CLK>;
+ clock-names = "se";
+ interrupts = <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi1: spi@1a94000 {
+ compatible = "qcom,geni-spi";
+ reg = <0 0x01a94000 0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_SPI1_CLK>;
+ clock-names = "se";
+ interrupts = <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+ };
+
+ sdhc: mmc@7804000 {
+ compatible = "qcom,ipq5424-sdhci", "qcom,sdhci-msm-v5";
+ reg = <0 0x07804000 0 0x1000>, <0 0x07805000 0 0x1000>;
+ reg-names = "hc", "cqhci";
+
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ clocks = <&gcc GCC_SDCC1_AHB_CLK>,
+ <&gcc GCC_SDCC1_APPS_CLK>,
+ <&xo_board>;
+ clock-names = "iface", "core", "xo";
+
+ status = "disabled";
+ };
+
+ intc: interrupt-controller@f200000 {
+ compatible = "arm,gic-v3";
+ reg = <0 0xf200000 0 0x10000>, /* GICD */
+ <0 0xf240000 0 0x80000>; /* GICR * 4 regions */
+ #interrupt-cells = <0x3>;
+ interrupt-controller;
+ #redistributor-regions = <1>;
+ redistributor-stride = <0x0 0x20000>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ mbi-ranges = <672 128>;
+ msi-controller;
+ };
+
+ watchdog@f410000 {
+ compatible = "qcom,apss-wdt-ipq5424", "qcom,kpss-wdt";
+ reg = <0 0x0f410000 0 0x1000>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&sleep_clk>;
+ };
+
+ qusb_phy_1: phy@71000 {
+ compatible = "qcom,ipq5424-qusb2-phy";
+ reg = <0 0x00071000 0 0x180>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_USB1_PHY_CFG_AHB_CLK>,
+ <&xo_board>;
+ clock-names = "cfg_ahb", "ref";
+
+ resets = <&gcc GCC_QUSB2_1_PHY_BCR>;
+ status = "disabled";
+ };
+
+ usb2: usb2@1e00000 {
+ compatible = "qcom,ipq5424-dwc3", "qcom,dwc3";
+ reg = <0 0x01ef8800 0 0x400>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ clocks = <&gcc GCC_USB1_MASTER_CLK>,
+ <&gcc GCC_USB1_SLEEP_CLK>,
+ <&gcc GCC_USB1_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB1_PHY_CFG_AHB_CLK>,
+ <&gcc GCC_CNOC_USB_CLK>;
+
+ clock-names = "core",
+ "sleep",
+ "mock_utmi",
+ "iface",
+ "cfg_noc";
+
+ assigned-clocks = <&gcc GCC_USB1_MASTER_CLK>,
+ <&gcc GCC_USB1_MOCK_UTMI_CLK>;
+ assigned-clock-rates = <200000000>,
+ <24000000>;
+
+ interrupts-extended = <&intc GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 387 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 388 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pwr_event",
+ "qusb2_phy",
+ "dm_hs_phy_irq",
+ "dp_hs_phy_irq";
+
+ resets = <&gcc GCC_USB1_BCR>;
+ qcom,select-utmi-as-pipe-clk;
+ status = "disabled";
+
+ dwc_1: usb@1e00000 {
+ compatible = "snps,dwc3";
+ reg = <0 0x01e00000 0 0xe000>;
+ clocks = <&gcc GCC_USB1_MOCK_UTMI_CLK>;
+ clock-names = "ref";
+ interrupts = <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&qusb_phy_1>;
+ phy-names = "usb2-phy";
+ tx-fifo-resize;
+ snps,is-utmi-l1-suspend;
+ snps,hird-threshold = /bits/ 8 <0x0>;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_u3_susphy_quirk;
+ };
+ };
+
+ qusb_phy_0: phy@7b000 {
+ compatible = "qcom,ipq5424-qusb2-phy";
+ reg = <0 0x0007b000 0 0x180>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_USB0_PHY_CFG_AHB_CLK>,
+ <&xo_board>;
+ clock-names = "cfg_ahb", "ref";
+
+ resets = <&gcc GCC_QUSB2_0_PHY_BCR>;
+ status = "disabled";
+ };
+
+ ssphy_0: phy@7d000 {
+ compatible = "qcom,ipq5424-qmp-usb3-phy";
+ reg = <0 0x0007d000 0 0xa00>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_USB0_AUX_CLK>,
+ <&xo_board>,
+ <&gcc GCC_USB0_PHY_CFG_AHB_CLK>,
+ <&gcc GCC_USB0_PIPE_CLK>;
+ clock-names = "aux",
+ "ref",
+ "cfg_ahb",
+ "pipe";
+
+ resets = <&gcc GCC_USB0_PHY_BCR>,
+ <&gcc GCC_USB3PHY_0_PHY_BCR>;
+ reset-names = "phy",
+ "phy_phy";
+
+ #clock-cells = <0>;
+ clock-output-names = "usb0_pipe_clk";
+
+ status = "disabled";
+ };
+
+ usb3: usb3@8a00000 {
+ compatible = "qcom,ipq5424-dwc3", "qcom,dwc3";
+ reg = <0 0x08af8800 0 0x400>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ clocks = <&gcc GCC_USB0_MASTER_CLK>,
+ <&gcc GCC_USB0_SLEEP_CLK>,
+ <&gcc GCC_USB0_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB0_PHY_CFG_AHB_CLK>,
+ <&gcc GCC_CNOC_USB_CLK>;
+
+ clock-names = "core",
+ "sleep",
+ "mock_utmi",
+ "iface",
+ "cfg_noc";
+
+ assigned-clocks = <&gcc GCC_USB0_MASTER_CLK>,
+ <&gcc GCC_USB0_MOCK_UTMI_CLK>;
+ assigned-clock-rates = <200000000>,
+ <24000000>;
+
+ interrupts-extended = <&intc GIC_SPI 412 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 414 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pwr_event",
+ "qusb2_phy",
+ "dm_hs_phy_irq",
+ "dp_hs_phy_irq";
+
+ resets = <&gcc GCC_USB_BCR>;
+ status = "disabled";
+
+ dwc_0: usb@8a00000 {
+ compatible = "snps,dwc3";
+ reg = <0 0x08a00000 0 0xcd00>;
+ clocks = <&gcc GCC_USB0_MOCK_UTMI_CLK>;
+ clock-names = "ref";
+ interrupts = <GIC_SPI 409 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&qusb_phy_0>, <&ssphy_0>;
+ phy-names = "usb2-phy", "usb3-phy";
+ tx-fifo-resize;
+ snps,is-utmi-l1-suspend;
+ snps,hird-threshold = /bits/ 8 <0x0>;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_u3_susphy_quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
+ };
+ };
+
+ timer@f420000 {
+ compatible = "arm,armv7-timer-mem";
+ reg = <0 0xf420000 0 0x1000>;
+ ranges = <0 0 0 0x10000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ frame@f421000 {
+ reg = <0xf421000 0x1000>,
+ <0xf422000 0x1000>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ frame-number = <0>;
+ };
+
+ frame@f423000 {
+ reg = <0xf423000 0x1000>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ frame-number = <1>;
+ status = "disabled";
+ };
+
+ frame@f425000 {
+ reg = <0xf425000 0x1000>,
+ <0xf426000 0x1000>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ frame-number = <2>;
+ status = "disabled";
+ };
+
+ frame@f427000 {
+ reg = <0xf427000 0x1000>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ frame-number = <3>;
+ status = "disabled";
+ };
+
+ frame@f429000 {
+ reg = <0xf429000 0x1000>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ frame-number = <4>;
+ status = "disabled";
+ };
+
+ frame@f42b000 {
+ reg = <0xf42b000 0x1000>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ frame-number = <5>;
+ status = "disabled";
+ };
+
+ frame@f42d000 {
+ reg = <0xf42d000 0x1000>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ frame-number = <6>;
+ status = "disabled";
+ };
+ };
+
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
index 91e104b0f865..ae12f069f26f 100644
--- a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
@@ -3,7 +3,7 @@
* IPQ9574 RDP board common device tree source
*
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
/dts-v1/;
@@ -164,6 +164,26 @@
status = "okay";
};
+/*
+ * The bootstrap pins for the board select the XO clock frequency
+ * (48 MHZ or 96 MHZ used for different RDP type board). This setting
+ * automatically enables the right dividers, to ensure the reference
+ * clock output from WiFi to the CMN PLL is 48 MHZ.
+ */
+&ref_48mhz_clk {
+ clock-div = <1>;
+ clock-mult = <1>;
+};
+
+/*
+ * The frequency of xo_board_clk is fixed to 24 MHZ, which is routed
+ * from WiFi output clock 48 MHZ divided by 2.
+ */
&xo_board_clk {
- clock-frequency = <24000000>;
+ clock-div = <2>;
+ clock-mult = <1>;
+};
+
+&xo_clk {
+ clock-frequency = <48000000>;
};
diff --git a/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts b/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
index 1bb8d96c9a82..165ebbb59511 100644
--- a/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
+++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
@@ -8,6 +8,7 @@
/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
#include "ipq9574-rdp-common.dtsi"
/ {
@@ -15,6 +16,45 @@
compatible = "qcom,ipq9574-ap-al02-c7", "qcom,ipq9574";
};
+&pcie1_phy {
+ status = "okay";
+};
+
+&pcie1 {
+ pinctrl-0 = <&pcie1_default>;
+ pinctrl-names = "default";
+
+ perst-gpios = <&tlmm 26 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 27 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&pcie2_phy {
+ status = "okay";
+};
+
+&pcie2 {
+ pinctrl-0 = <&pcie2_default>;
+ pinctrl-names = "default";
+
+ perst-gpios = <&tlmm 29 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 30 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&pcie3_phy {
+ status = "okay";
+};
+
+&pcie3 {
+ pinctrl-0 = <&pcie3_default>;
+ pinctrl-names = "default";
+
+ perst-gpios = <&tlmm 32 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 33 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
&sdhc_1 {
pinctrl-0 = <&sdc_default_state>;
pinctrl-names = "default";
@@ -28,6 +68,79 @@
};
&tlmm {
+
+ pcie1_default: pcie1-default-state {
+ clkreq-n-pins {
+ pins = "gpio25";
+ function = "pcie1_clk";
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio26";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-down;
+ output-low;
+ };
+
+ wake-n-pins {
+ pins = "gpio27";
+ function = "pcie1_wake";
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+ };
+
+ pcie2_default: pcie2-default-state {
+ clkreq-n-pins {
+ pins = "gpio28";
+ function = "pcie2_clk";
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio29";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-down;
+ output-low;
+ };
+
+ wake-n-pins {
+ pins = "gpio30";
+ function = "pcie2_wake";
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+ };
+
+ pcie3_default: pcie3-default-state {
+ clkreq-n-pins {
+ pins = "gpio31";
+ function = "pcie3_clk";
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio32";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-up;
+ output-low;
+ };
+
+ wake-n-pins {
+ pins = "gpio33";
+ function = "pcie3_wake";
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+ };
+
sdc_default_state: sdc-default-state {
clk-pins {
pins = "gpio5";
diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
index d1fd35ebc4a2..942290028972 100644
--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
@@ -3,10 +3,11 @@
* IPQ9574 SoC device tree source
*
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <dt-bindings/clock/qcom,apss-ipq.h>
+#include <dt-bindings/clock/qcom,ipq-cmn-pll.h>
#include <dt-bindings/clock/qcom,ipq9574-gcc.h>
#include <dt-bindings/interconnect/qcom,ipq9574.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -19,12 +20,24 @@
#size-cells = <2>;
clocks {
+ ref_48mhz_clk: ref-48mhz-clk {
+ compatible = "fixed-factor-clock";
+ clocks = <&xo_clk>;
+ #clock-cells = <0>;
+ };
+
sleep_clk: sleep-clk {
compatible = "fixed-clock";
#clock-cells = <0>;
};
xo_board_clk: xo-board-clk {
+ compatible = "fixed-factor-clock";
+ clocks = <&ref_48mhz_clk>;
+ #clock-cells = <0>;
+ };
+
+ xo_clk: xo-clk {
compatible = "fixed-clock";
#clock-cells = <0>;
};
@@ -226,8 +239,54 @@
reg = <0x00060000 0x6000>;
};
+ pcie0_phy: phy@84000 {
+ compatible = "qcom,ipq9574-qmp-gen3x1-pcie-phy";
+ reg = <0x00084000 0x1000>;
+
+ clocks = <&gcc GCC_PCIE0_AUX_CLK>,
+ <&gcc GCC_PCIE0_AHB_CLK>,
+ <&gcc GCC_PCIE0_PIPE_CLK>;
+ clock-names = "aux", "cfg_ahb", "pipe";
+
+ assigned-clocks = <&gcc GCC_PCIE0_AUX_CLK>;
+ assigned-clock-rates = <20000000>;
+
+ resets = <&gcc GCC_PCIE0_PHY_BCR>,
+ <&gcc GCC_PCIE0PHY_PHY_BCR>;
+ reset-names = "phy", "common";
+
+ #clock-cells = <0>;
+ clock-output-names = "gcc_pcie0_pipe_clk_src";
+
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
+ pcie2_phy: phy@8c000 {
+ compatible = "qcom,ipq9574-qmp-gen3x2-pcie-phy";
+ reg = <0x0008c000 0x2000>;
+
+ clocks = <&gcc GCC_PCIE2_AUX_CLK>,
+ <&gcc GCC_PCIE2_AHB_CLK>,
+ <&gcc GCC_PCIE2_PIPE_CLK>;
+ clock-names = "aux", "cfg_ahb", "pipe";
+
+ assigned-clocks = <&gcc GCC_PCIE2_AUX_CLK>;
+ assigned-clock-rates = <20000000>;
+
+ resets = <&gcc GCC_PCIE2_PHY_BCR>,
+ <&gcc GCC_PCIE2PHY_PHY_BCR>;
+ reset-names = "phy", "common";
+
+ #clock-cells = <0>;
+ clock-output-names = "gcc_pcie2_pipe_clk_src";
+
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
rng: rng@e3000 {
- compatible = "qcom,prng-ee";
+ compatible = "qcom,ipq9574-trng", "qcom,trng";
reg = <0x000e3000 0x1000>;
clocks = <&gcc GCC_PRNG_AHB_CLK>;
clock-names = "core";
@@ -243,6 +302,64 @@
status = "disabled";
};
+ pcie3_phy: phy@f4000 {
+ compatible = "qcom,ipq9574-qmp-gen3x2-pcie-phy";
+ reg = <0x000f4000 0x2000>;
+
+ clocks = <&gcc GCC_PCIE3_AUX_CLK>,
+ <&gcc GCC_PCIE3_AHB_CLK>,
+ <&gcc GCC_PCIE3_PIPE_CLK>;
+ clock-names = "aux", "cfg_ahb", "pipe";
+
+ assigned-clocks = <&gcc GCC_PCIE3_AUX_CLK>;
+ assigned-clock-rates = <20000000>;
+
+ resets = <&gcc GCC_PCIE3_PHY_BCR>,
+ <&gcc GCC_PCIE3PHY_PHY_BCR>;
+ reset-names = "phy", "common";
+
+ #clock-cells = <0>;
+ clock-output-names = "gcc_pcie3_pipe_clk_src";
+
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
+ pcie1_phy: phy@fc000 {
+ compatible = "qcom,ipq9574-qmp-gen3x1-pcie-phy";
+ reg = <0x000fc000 0x1000>;
+
+ clocks = <&gcc GCC_PCIE1_AUX_CLK>,
+ <&gcc GCC_PCIE1_AHB_CLK>,
+ <&gcc GCC_PCIE1_PIPE_CLK>;
+ clock-names = "aux", "cfg_ahb", "pipe";
+
+ assigned-clocks = <&gcc GCC_PCIE1_AUX_CLK>;
+ assigned-clock-rates = <20000000>;
+
+ resets = <&gcc GCC_PCIE1_PHY_BCR>,
+ <&gcc GCC_PCIE1PHY_PHY_BCR>;
+ reset-names = "phy", "common";
+
+ #clock-cells = <0>;
+ clock-output-names = "gcc_pcie1_pipe_clk_src";
+
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
+ cmn_pll: clock-controller@9b000 {
+ compatible = "qcom,ipq9574-cmn-pll";
+ reg = <0x0009b000 0x800>;
+ clocks = <&ref_48mhz_clk>,
+ <&gcc GCC_CMN_12GPLL_AHB_CLK>,
+ <&gcc GCC_CMN_12GPLL_SYS_CLK>;
+ clock-names = "ref", "ahb", "sys";
+ #clock-cells = <1>;
+ assigned-clocks = <&cmn_pll CMN_PLL_CLK>;
+ assigned-clock-rates-u64 = /bits/ 64 <12000000000>;
+ };
+
qfprom: efuse@a4000 {
compatible = "qcom,ipq9574-qfprom", "qcom,qfprom";
reg = <0x000a4000 0x5a1>;
@@ -309,10 +426,10 @@
clocks = <&xo_board_clk>,
<&sleep_clk>,
<0>,
- <0>,
- <0>,
- <0>,
- <0>,
+ <&pcie0_phy>,
+ <&pcie1_phy>,
+ <&pcie2_phy>,
+ <&pcie3_phy>,
<0>;
#clock-cells = <1>;
#reset-cells = <1>;
@@ -756,6 +873,326 @@
status = "disabled";
};
};
+
+ pcie1: pcie@10000000 {
+ compatible = "qcom,pcie-ipq9574";
+ reg = <0x10000000 0xf1d>,
+ <0x10000f20 0xa8>,
+ <0x10001000 0x1000>,
+ <0x000f8000 0x4000>,
+ <0x10100000 0x1000>;
+ reg-names = "dbi", "elbi", "atu", "parf", "config";
+ device_type = "pci";
+ linux,pci-domain = <1>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ ranges = <0x01000000 0x0 0x00000000 0x10200000 0x0 0x100000>,
+ <0x02000000 0x0 0x10300000 0x10300000 0x0 0x7d00000>;
+
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi0",
+ "msi1",
+ "msi2",
+ "msi3",
+ "msi4",
+ "msi5",
+ "msi6",
+ "msi7";
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 0 35 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &intc 0 0 49 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &intc 0 0 84 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &intc 0 0 85 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_PCIE1_AXI_M_CLK>,
+ <&gcc GCC_PCIE1_AXI_S_CLK>,
+ <&gcc GCC_PCIE1_AXI_S_BRIDGE_CLK>,
+ <&gcc GCC_PCIE1_RCHNG_CLK>,
+ <&gcc GCC_PCIE1_AHB_CLK>,
+ <&gcc GCC_PCIE1_AUX_CLK>;
+ clock-names = "axi_m",
+ "axi_s",
+ "axi_bridge",
+ "rchng",
+ "ahb",
+ "aux";
+
+ resets = <&gcc GCC_PCIE1_PIPE_ARES>,
+ <&gcc GCC_PCIE1_CORE_STICKY_ARES>,
+ <&gcc GCC_PCIE1_AXI_S_STICKY_ARES>,
+ <&gcc GCC_PCIE1_AXI_S_ARES>,
+ <&gcc GCC_PCIE1_AXI_M_STICKY_ARES>,
+ <&gcc GCC_PCIE1_AXI_M_ARES>,
+ <&gcc GCC_PCIE1_AUX_ARES>,
+ <&gcc GCC_PCIE1_AHB_ARES>;
+ reset-names = "pipe",
+ "sticky",
+ "axi_s_sticky",
+ "axi_s",
+ "axi_m_sticky",
+ "axi_m",
+ "aux",
+ "ahb";
+
+ phys = <&pcie1_phy>;
+ phy-names = "pciephy";
+ interconnects = <&gcc MASTER_ANOC_PCIE1 &gcc SLAVE_ANOC_PCIE1>,
+ <&gcc MASTER_SNOC_PCIE1 &gcc SLAVE_SNOC_PCIE1>;
+ interconnect-names = "pcie-mem", "cpu-pcie";
+ status = "disabled";
+ };
+
+ pcie3: pcie@18000000 {
+ compatible = "qcom,pcie-ipq9574";
+ reg = <0x18000000 0xf1d>,
+ <0x18000f20 0xa8>,
+ <0x18001000 0x1000>,
+ <0x000f0000 0x4000>,
+ <0x18100000 0x1000>;
+ reg-names = "dbi", "elbi", "atu", "parf", "config";
+ device_type = "pci";
+ linux,pci-domain = <3>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <2>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ ranges = <0x01000000 0x0 0x00000000 0x18200000 0x0 0x100000>,
+ <0x02000000 0x0 0x18300000 0x18300000 0x0 0x7d00000>;
+
+ interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi0",
+ "msi1",
+ "msi2",
+ "msi3",
+ "msi4",
+ "msi5",
+ "msi6",
+ "msi7";
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 0 189 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &intc 0 0 190 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &intc 0 0 191 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &intc 0 0 192 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_PCIE3_AXI_M_CLK>,
+ <&gcc GCC_PCIE3_AXI_S_CLK>,
+ <&gcc GCC_PCIE3_AXI_S_BRIDGE_CLK>,
+ <&gcc GCC_PCIE3_RCHNG_CLK>,
+ <&gcc GCC_PCIE3_AHB_CLK>,
+ <&gcc GCC_PCIE3_AUX_CLK>;
+ clock-names = "axi_m",
+ "axi_s",
+ "axi_bridge",
+ "rchng",
+ "ahb",
+ "aux";
+
+ resets = <&gcc GCC_PCIE3_PIPE_ARES>,
+ <&gcc GCC_PCIE3_CORE_STICKY_ARES>,
+ <&gcc GCC_PCIE3_AXI_S_STICKY_ARES>,
+ <&gcc GCC_PCIE3_AXI_S_ARES>,
+ <&gcc GCC_PCIE3_AXI_M_STICKY_ARES>,
+ <&gcc GCC_PCIE3_AXI_M_ARES>,
+ <&gcc GCC_PCIE3_AUX_ARES>,
+ <&gcc GCC_PCIE3_AHB_ARES>;
+ reset-names = "pipe",
+ "sticky",
+ "axi_s_sticky",
+ "axi_s",
+ "axi_m_sticky",
+ "axi_m",
+ "aux",
+ "ahb";
+
+ phys = <&pcie3_phy>;
+ phy-names = "pciephy";
+ interconnects = <&gcc MASTER_ANOC_PCIE3 &gcc SLAVE_ANOC_PCIE3>,
+ <&gcc MASTER_SNOC_PCIE3 &gcc SLAVE_SNOC_PCIE3>;
+ interconnect-names = "pcie-mem", "cpu-pcie";
+ status = "disabled";
+ };
+
+ pcie2: pcie@20000000 {
+ compatible = "qcom,pcie-ipq9574";
+ reg = <0x20000000 0xf1d>,
+ <0x20000f20 0xa8>,
+ <0x20001000 0x1000>,
+ <0x00088000 0x4000>,
+ <0x20100000 0x1000>;
+ reg-names = "dbi", "elbi", "atu", "parf", "config";
+ device_type = "pci";
+ linux,pci-domain = <2>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <2>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ ranges = <0x01000000 0x0 0x00000000 0x20200000 0x0 0x100000>,
+ <0x02000000 0x0 0x20300000 0x20300000 0x0 0x7d00000>;
+
+ interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi0",
+ "msi1",
+ "msi2",
+ "msi3",
+ "msi4",
+ "msi5",
+ "msi6",
+ "msi7";
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 0 164 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &intc 0 0 165 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &intc 0 0 186 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &intc 0 0 187 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_PCIE2_AXI_M_CLK>,
+ <&gcc GCC_PCIE2_AXI_S_CLK>,
+ <&gcc GCC_PCIE2_AXI_S_BRIDGE_CLK>,
+ <&gcc GCC_PCIE2_RCHNG_CLK>,
+ <&gcc GCC_PCIE2_AHB_CLK>,
+ <&gcc GCC_PCIE2_AUX_CLK>;
+ clock-names = "axi_m",
+ "axi_s",
+ "axi_bridge",
+ "rchng",
+ "ahb",
+ "aux";
+
+ resets = <&gcc GCC_PCIE2_PIPE_ARES>,
+ <&gcc GCC_PCIE2_CORE_STICKY_ARES>,
+ <&gcc GCC_PCIE2_AXI_S_STICKY_ARES>,
+ <&gcc GCC_PCIE2_AXI_S_ARES>,
+ <&gcc GCC_PCIE2_AXI_M_STICKY_ARES>,
+ <&gcc GCC_PCIE2_AXI_M_ARES>,
+ <&gcc GCC_PCIE2_AUX_ARES>,
+ <&gcc GCC_PCIE2_AHB_ARES>;
+ reset-names = "pipe",
+ "sticky",
+ "axi_s_sticky",
+ "axi_s",
+ "axi_m_sticky",
+ "axi_m",
+ "aux",
+ "ahb";
+
+ phys = <&pcie2_phy>;
+ phy-names = "pciephy";
+ interconnects = <&gcc MASTER_ANOC_PCIE2 &gcc SLAVE_ANOC_PCIE2>,
+ <&gcc MASTER_SNOC_PCIE2 &gcc SLAVE_SNOC_PCIE2>;
+ interconnect-names = "pcie-mem", "cpu-pcie";
+ status = "disabled";
+ };
+
+ pcie0: pci@28000000 {
+ compatible = "qcom,pcie-ipq9574";
+ reg = <0x28000000 0xf1d>,
+ <0x28000f20 0xa8>,
+ <0x28001000 0x1000>,
+ <0x00080000 0x4000>,
+ <0x28100000 0x1000>;
+ reg-names = "dbi", "elbi", "atu", "parf", "config";
+ device_type = "pci";
+ linux,pci-domain = <0>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ ranges = <0x01000000 0x0 0x00000000 0x28200000 0x0 0x100000>,
+ <0x02000000 0x0 0x28300000 0x28300000 0x0 0x7d00000>;
+ interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi0",
+ "msi1",
+ "msi2",
+ "msi3",
+ "msi4",
+ "msi5",
+ "msi6",
+ "msi7";
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 0 75 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &intc 0 0 78 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &intc 0 0 79 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &intc 0 0 83 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_PCIE0_AXI_M_CLK>,
+ <&gcc GCC_PCIE0_AXI_S_CLK>,
+ <&gcc GCC_PCIE0_AXI_S_BRIDGE_CLK>,
+ <&gcc GCC_PCIE0_RCHNG_CLK>,
+ <&gcc GCC_PCIE0_AHB_CLK>,
+ <&gcc GCC_PCIE0_AUX_CLK>;
+ clock-names = "axi_m",
+ "axi_s",
+ "axi_bridge",
+ "rchng",
+ "ahb",
+ "aux";
+
+ resets = <&gcc GCC_PCIE0_PIPE_ARES>,
+ <&gcc GCC_PCIE0_CORE_STICKY_ARES>,
+ <&gcc GCC_PCIE0_AXI_S_STICKY_ARES>,
+ <&gcc GCC_PCIE0_AXI_S_ARES>,
+ <&gcc GCC_PCIE0_AXI_M_STICKY_ARES>,
+ <&gcc GCC_PCIE0_AXI_M_ARES>,
+ <&gcc GCC_PCIE0_AUX_ARES>,
+ <&gcc GCC_PCIE0_AHB_ARES>;
+ reset-names = "pipe",
+ "sticky",
+ "axi_s_sticky",
+ "axi_s",
+ "axi_m_sticky",
+ "axi_m",
+ "aux",
+ "ahb";
+
+ phys = <&pcie0_phy>;
+ phy-names = "pciephy";
+ interconnects = <&gcc MASTER_ANOC_PCIE0 &gcc SLAVE_ANOC_PCIE0>,
+ <&gcc MASTER_SNOC_PCIE0 &gcc SLAVE_SNOC_PCIE0>;
+ interconnect-names = "pcie-mem", "cpu-pcie";
+ status = "disabled";
+ };
+
};
thermal-zones {
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-serranove.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-serranove.dts
index 5ce8f1350abc..caad1dead2e0 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-serranove.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-serranove.dts
@@ -321,6 +321,41 @@
status = "okay";
};
+&gpu {
+ status = "okay";
+};
+
+&mdss {
+ status = "okay";
+};
+
+&mdss_dsi0 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&mdss_default>;
+ pinctrl-1 = <&mdss_sleep>;
+
+ panel@0 {
+ compatible = "samsung,s6e88a0-ams427ap24";
+ reg = <0>;
+
+ vdd3-supply = <&pm8916_l17>;
+ vci-supply = <&pm8916_l6>;
+ reset-gpios = <&tlmm 25 GPIO_ACTIVE_LOW>;
+ flip-horizontal;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&mdss_dsi0_out>;
+ };
+ };
+ };
+};
+
+&mdss_dsi0_out {
+ data-lanes = <0 1>;
+ remote-endpoint = <&panel_in>;
+};
+
&mpss_mem {
reg = <0x0 0x86800000 0x0 0x5a00000>;
};
@@ -330,6 +365,13 @@
linux,code = <KEY_VOLUMEDOWN>;
};
+&pm8916_rpm_regulators {
+ pm8916_l17: l17 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+};
+
&pm8916_vib {
status = "okay";
};
@@ -425,6 +467,22 @@
bias-disable;
};
+ mdss_default: mdss-default-state {
+ pins = "gpio25";
+ function = "gpio";
+
+ drive-strength = <8>;
+ bias-disable;
+ };
+
+ mdss_sleep: mdss-sleep-state {
+ pins = "gpio25";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
muic_i2c_default: muic-i2c-default-state {
pins = "gpio105", "gpio106";
function = "gpio";
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
index 5e558bcc9d87..8f35c9af1878 100644
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -125,7 +125,7 @@
sleep_clk: sleep-clk {
compatible = "fixed-clock";
#clock-cells = <0>;
- clock-frequency = <32768>;
+ clock-frequency = <32764>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-xiaomi-riva.dts b/arch/arm64/boot/dts/qcom/msm8917-xiaomi-riva.dts
new file mode 100644
index 000000000000..f1d22535fedd
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8917-xiaomi-riva.dts
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Barnabas Czeman
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/arm/qcom,ids.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "msm8917.dtsi"
+#include "pm8937.dtsi"
+
+/delete-node/ &qseecom_mem;
+
+/ {
+ model = "Xiaomi Redmi 5A (riva)";
+ compatible = "xiaomi,riva", "qcom,msm8917";
+ chassis-type = "handset";
+
+ qcom,msm-id = <QCOM_ID_MSM8917 0>;
+ qcom,board-id = <0x1000b 2>, <0x2000b 2>;
+
+ battery: battery {
+ compatible = "simple-battery";
+ charge-full-design-microamp-hours = <3000000>;
+ energy-full-design-microwatt-hours = <11500000>;
+ constant-charge-current-max-microamp = <1000000>;
+ constant-charge-voltage-max-microvolt = <4400000>;
+ precharge-current-microamp = <256000>;
+ charge-term-current-microamp = <60000>;
+ voltage-min-design-microvolt = <3400000>;
+ };
+
+ chosen {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ stdout-path = "framebuffer0";
+
+ framebuffer0: framebuffer@90001000 {
+ compatible = "simple-framebuffer";
+ reg = <0x0 0x90001000 0x0 (720 * 1280 * 3)>;
+ width = <720>;
+ height = <1280>;
+ stride = <(720 * 3)>;
+ format = "r8g8b8";
+
+ clocks = <&gcc GCC_MDSS_AHB_CLK>,
+ <&gcc GCC_MDSS_AXI_CLK>,
+ <&gcc GCC_MDSS_VSYNC_CLK>,
+ <&gcc GCC_MDSS_MDP_CLK>,
+ <&gcc GCC_MDSS_BYTE0_CLK>,
+ <&gcc GCC_MDSS_PCLK0_CLK>,
+ <&gcc GCC_MDSS_ESC0_CLK>;
+ power-domains = <&gcc MDSS_GDSC>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&gpio_keys_default>;
+ pinctrl-names = "default";
+
+ key-volup {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ gpios = <&tlmm 91 GPIO_ACTIVE_LOW>;
+ debounce-interval = <15>;
+ };
+ };
+
+ vph_pwr: regulator-vph-pwr {
+ compatible = "regulator-fixed";
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reserved-memory {
+ qseecom_mem: qseecom@84a00000 {
+ reg = <0x0 0x84a00000 0x0 0x1900000>;
+ no-map;
+ };
+
+ framebuffer_mem: memory@90001000 {
+ reg = <0x0 0x90001000 0x0 (720 * 1280 * 3)>;
+ no-map;
+ };
+ };
+};
+
+&blsp1_i2c3 {
+ status = "okay";
+
+ touchscreen@38 {
+ compatible = "edt,edt-ft5306";
+ reg = <0x38>;
+ interrupts-extended = <&tlmm 65 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&tlmm 64 GPIO_ACTIVE_LOW>;
+ pinctrl-0 = <&tsp_int_rst_default>;
+ pinctrl-names = "default";
+ vcc-supply = <&pm8937_l10>;
+ iovcc-supply = <&pm8937_l5>;
+ touchscreen-size-x = <720>;
+ touchscreen-size-y = <1280>;
+ };
+};
+
+&blsp2_i2c1 {
+ status = "okay";
+
+ bq27426@55 {
+ compatible = "ti,bq27426";
+ reg = <0x55>;
+ monitored-battery = <&battery>;
+ };
+
+ bq25601@6b{
+ compatible = "ti,bq25601";
+ reg = <0x6b>;
+ interrupts-extended = <&tlmm 61 IRQ_TYPE_EDGE_FALLING>;
+ pinctrl-0 = <&bq25601_int_default>;
+ pinctrl-names = "default";
+ input-voltage-limit-microvolt = <4400000>;
+ input-current-limit-microamp = <1000000>;
+ monitored-battery = <&battery>;
+ };
+};
+
+&pm8937_resin {
+ linux,code = <KEY_VOLUMEDOWN>;
+
+ status = "okay";
+};
+
+&rpm_requests {
+ regulators-0 {
+ compatible = "qcom,rpm-pm8937-regulators";
+
+ vdd_s1-supply = <&vph_pwr>;
+ vdd_s2-supply = <&vph_pwr>;
+ vdd_s3-supply = <&vph_pwr>;
+ vdd_s4-supply = <&vph_pwr>;
+
+ vdd_l1_l19-supply = <&pm8937_s3>;
+ vdd_l2_l23-supply = <&pm8937_s3>;
+ vdd_l3-supply = <&pm8937_s3>;
+ vdd_l4_l5_l6_l7_l16-supply = <&pm8937_s4>;
+ vdd_l8_l11_l12_l17_l22-supply = <&vph_pwr>;
+ vdd_l9_l10_l13_l14_l15_l18-supply = <&vph_pwr>;
+
+ pm8937_s1: s1 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ pm8937_s3: s3 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ pm8937_s4: s4 {
+ regulator-min-microvolt = <2050000>;
+ regulator-max-microvolt = <2050000>;
+ };
+
+ pm8937_l2: l2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ pm8937_l5: l5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8937_l6: l6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8937_l7: l7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8937_l8: l8 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2900000>;
+ };
+
+ pm8937_l9: l9 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ pm8937_l10: l10 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3000000>;
+ };
+
+ pm8937_l11: l11 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-allow-set-load;
+ regulator-system-load = <200000>;
+ };
+
+ pm8937_l12: l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pm8937_l13: l13 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+ };
+
+ pm8937_l14: l14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ pm8937_l15: l15 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ pm8937_l16: l16 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8937_l17: l17 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2900000>;
+ };
+
+ pm8937_l19: l19 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1350000>;
+ };
+
+ pm8937_l22: l22 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+
+ pm8937_l23: l23 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+ };
+
+};
+
+&sdhc_1 {
+ vmmc-supply = <&pm8937_l8>;
+ vqmmc-supply = <&pm8937_l5>;
+
+ status = "okay";
+};
+
+&sdhc_2 {
+ cd-gpios = <&tlmm 67 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&pm8937_l11>;
+ vqmmc-supply = <&pm8937_l12>;
+ pinctrl-0 = <&sdc2_default &sdc2_cd_default>;
+ pinctrl-1 = <&sdc2_sleep &sdc2_cd_default>;
+ pinctrl-names = "default", "sleep";
+
+ status = "okay";
+};
+
+&sleep_clk {
+ clock-frequency = <32768>;
+};
+
+&tlmm {
+ bq25601_int_default: bq25601-int-default-state {
+ pins = "gpio61";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ gpio_keys_default: gpio-keys-default-state {
+ pins = "gpio91";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ sdc2_cd_default: sdc2-cd-default-state {
+ pins = "gpio67";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ tsp_int_rst_default: tsp-int-rst-default-state {
+ pins = "gpio64", "gpio65";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+};
+
+&wcnss {
+ vddpx-supply = <&pm8937_l5>;
+
+ status = "okay";
+};
+
+&wcnss_iris {
+ compatible = "qcom,wcn3620";
+ vddxo-supply = <&pm8937_l7>;
+ vddrfa-supply = <&pm8937_l19>;
+ vddpa-supply = <&pm8937_l9>;
+ vdddig-supply = <&pm8937_l5>;
+};
+
+&wcnss_mem {
+ status = "okay";
+};
+
+&xo_board {
+ clock-frequency = <19200000>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8917.dtsi b/arch/arm64/boot/dts/qcom/msm8917.dtsi
new file mode 100644
index 000000000000..7bf58dd0146e
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8917.dtsi
@@ -0,0 +1,1954 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <dt-bindings/clock/qcom,gcc-msm8917.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/power/qcom-rpmpd.h>
+#include <dt-bindings/thermal/thermal.h>
+
+/ {
+ interrupt-parent = <&intc>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ clocks {
+ sleep_clk: sleep-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ };
+
+ xo_board: xo-board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ };
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@100 {
+ compatible = "arm,cortex-a53";
+ reg = <0x100>;
+ device_type = "cpu";
+ next-level-cache = <&l2_0>;
+ enable-method = "psci";
+ clocks = <&apcs>;
+ operating-points-v2 = <&cpu_opp_table>;
+ #cooling-cells = <2>;
+ power-domains = <&cpu_pd0>;
+ power-domain-names = "psci";
+
+ l2_0: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ };
+ };
+
+ cpu1: cpu@101 {
+ compatible = "arm,cortex-a53";
+ reg = <0x101>;
+ device_type = "cpu";
+ next-level-cache = <&l2_0>;
+ enable-method = "psci";
+ clocks = <&apcs>;
+ operating-points-v2 = <&cpu_opp_table>;
+ #cooling-cells = <2>;
+ power-domains = <&cpu_pd1>;
+ power-domain-names = "psci";
+ };
+
+ cpu2: cpu@102 {
+ compatible = "arm,cortex-a53";
+ reg = <0x102>;
+ device_type = "cpu";
+ next-level-cache = <&l2_0>;
+ enable-method = "psci";
+ clocks = <&apcs>;
+ operating-points-v2 = <&cpu_opp_table>;
+ #cooling-cells = <2>;
+ power-domains = <&cpu_pd2>;
+ power-domain-names = "psci";
+ };
+
+ cpu3: cpu@103 {
+ compatible = "arm,cortex-a53";
+ reg = <0x103>;
+ device_type = "cpu";
+ next-level-cache = <&l2_0>;
+ enable-method = "psci";
+ clocks = <&apcs>;
+ operating-points-v2 = <&cpu_opp_table>;
+ #cooling-cells = <2>;
+ power-domains = <&cpu_pd3>;
+ power-domain-names = "psci";
+ };
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+
+ core1 {
+ cpu = <&cpu1>;
+ };
+
+ core2 {
+ cpu = <&cpu2>;
+ };
+
+ core3 {
+ cpu = <&cpu3>;
+ };
+ };
+ };
+
+ domain-idle-states {
+ cluster_sleep_0: cluster-sleep-0 {
+ compatible = "domain-idle-state";
+ arm,psci-suspend-param = <0x41000053>;
+ entry-latency-us = <700>;
+ exit-latency-us = <1000>;
+ min-residency-us = <6500>;
+ };
+ };
+
+ idle-states {
+ entry-method = "psci";
+
+ cpu_sleep_0: cpu-sleep-0 {
+ compatible = "arm,idle-state";
+ idle-state-name = "standalone-power-collapse";
+ arm,psci-suspend-param = <0x40000003>;
+ entry-latency-us = <125>;
+ exit-latency-us = <180>;
+ min-residency-us = <595>;
+ local-timer-stop;
+ };
+ };
+
+ cpu_opp_table: opp-table-cpu {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-960000000 {
+ opp-hz = /bits/ 64 <960000000>;
+ };
+
+ opp-1094400000 {
+ opp-hz = /bits/ 64 <1094400000>;
+ };
+
+ opp-1248000000 {
+ opp-hz = /bits/ 64 <1248000000>;
+ };
+
+ opp-1401600000 {
+ opp-hz = /bits/ 64 <1401600000>;
+ };
+ };
+ };
+
+ firmware {
+ scm: scm {
+ compatible = "qcom,scm-msm8916", "qcom,scm";
+ clocks = <&gcc GCC_CRYPTO_CLK>,
+ <&gcc GCC_CRYPTO_AXI_CLK>,
+ <&gcc GCC_CRYPTO_AHB_CLK>;
+ clock-names = "core", "bus", "iface";
+ #reset-cells = <1>;
+
+ qcom,dload-mode = <&tcsr 0x6100>;
+ };
+ };
+
+ memory@80000000 {
+ /* We expect the bootloader to fill in the reg */
+ reg = <0 0x80000000 0 0>;
+ device_type = "memory";
+ };
+
+ pmu {
+ compatible = "arm,cortex-a53-pmu";
+ interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+
+ cluster_pd: power-domain-cluster {
+ #power-domain-cells = <0>;
+ domain-idle-states = <&cluster_sleep_0>;
+ };
+
+ cpu_pd0: power-domain-cpu0 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ domain-idle-states = <&cpu_sleep_0>;
+ };
+
+ cpu_pd1: power-domain-cpu1 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ domain-idle-states = <&cpu_sleep_0>;
+ };
+
+ cpu_pd2: power-domain-cpu2 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ domain-idle-states = <&cpu_sleep_0>;
+ };
+
+ cpu_pd3: power-domain-cpu3 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ domain-idle-states = <&cpu_sleep_0>;
+ };
+ };
+
+ rpm: remoteproc {
+ compatible = "qcom,msm8917-rpm-proc", "qcom,rpm-proc";
+
+ smd-edge {
+ interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
+ qcom,ipc = <&apcs 8 0>;
+ qcom,smd-edge = <15>;
+
+ rpm_requests: rpm-requests {
+ compatible = "qcom,rpm-msm8917", "qcom,smd-rpm";
+ qcom,smd-channels = "rpm_requests";
+
+ rpmcc: clock-controller {
+ compatible = "qcom,rpmcc-msm8917", "qcom,rpmcc";
+ #clock-cells = <1>;
+ clocks = <&xo_board>;
+ clock-names = "xo";
+ };
+
+ rpmpd: power-controller {
+ compatible = "qcom,msm8917-rpmpd";
+ #power-domain-cells = <1>;
+ operating-points-v2 = <&rpmpd_opp_table>;
+
+ rpmpd_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ rpmpd_opp_ret: opp1 {
+ opp-level = <RPM_SMD_LEVEL_RETENTION>;
+ };
+
+ rpmpd_opp_ret_plus: opp2 {
+ opp-level = <RPM_SMD_LEVEL_RETENTION_PLUS>;
+ };
+
+ rpmpd_opp_min_svs: opp3 {
+ opp-level = <RPM_SMD_LEVEL_MIN_SVS>;
+ };
+
+ rpmpd_opp_low_svs: opp4 {
+ opp-level = <RPM_SMD_LEVEL_LOW_SVS>;
+ };
+
+ rpmpd_opp_svs: opp5 {
+ opp-level = <RPM_SMD_LEVEL_SVS>;
+ };
+
+ rpmpd_opp_svs_plus: opp6 {
+ opp-level = <RPM_SMD_LEVEL_SVS_PLUS>;
+ };
+
+ rpmpd_opp_nom: opp7 {
+ opp-level = <RPM_SMD_LEVEL_NOM>;
+ };
+
+ rpmpd_opp_nom_plus: opp8 {
+ opp-level = <RPM_SMD_LEVEL_NOM_PLUS>;
+ };
+
+ rpmpd_opp_turbo: opp9 {
+ opp-level = <RPM_SMD_LEVEL_TURBO>;
+ };
+ };
+ };
+ };
+ };
+ };
+
+ reserved-memory {
+ ranges;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ qseecom_mem: qseecom@85b00000 {
+ reg = <0x0 0x85b00000 0x0 0x800000>;
+ no-map;
+ };
+
+ smem@86300000 {
+ compatible = "qcom,smem";
+ reg = <0x0 0x86300000 0x0 0x100000>;
+ no-map;
+
+ hwlocks = <&tcsr_mutex 3>;
+ qcom,rpm-msg-ram = <&rpm_msg_ram>;
+ };
+
+ reserved@86400000 {
+ reg = <0x0 0x86400000 0x0 0x400000>;
+ no-map;
+ };
+
+ rmtfs@92100000 {
+ compatible = "qcom,rmtfs-mem";
+ reg = <0x0 0x92100000 0x0 0x180000>;
+ no-map;
+
+ qcom,client-id = <1>;
+ };
+
+ adsp_mem: adsp {
+ size = <0x0 0x1100000>;
+ alignment = <0x0 0x100000>;
+ alloc-ranges = <0x0 0x86800000 0x0 0x8000000>;
+ no-map;
+ status = "disabled";
+ };
+
+ mba_mem: mba {
+ size = <0x0 0x100000>;
+ alignment = <0x0 0x100000>;
+ alloc-ranges = <0x0 0x86800000 0x0 0x8000000>;
+ no-map;
+ status = "disabled";
+ };
+
+ venus_mem: venus {
+ size = <0x0 0x400000>;
+ alignment = <0x0 0x100000>;
+ alloc-ranges = <0x0 0x86800000 0x0 0x8000000>;
+ no-map;
+ status = "disabled";
+ };
+
+ wcnss_mem: wcnss {
+ size = <0x0 0x700000>;
+ alignment = <0x0 0x100000>;
+ alloc-ranges = <0x0 0x86800000 0x0 0x8000000>;
+ no-map;
+ status = "disabled";
+ };
+ };
+
+ smp2p-adsp {
+ compatible = "qcom,smp2p";
+ qcom,smem = <443>, <429>;
+
+ interrupts = <GIC_SPI 291 IRQ_TYPE_EDGE_RISING>;
+
+ mboxes = <&apcs 10>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <2>;
+
+ adsp_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+
+ #qcom,smem-state-cells = <1>;
+ };
+
+ adsp_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smp2p-modem {
+ compatible = "qcom,smp2p";
+ qcom,smem = <435>, <428>;
+
+ interrupts = <GIC_SPI 27 IRQ_TYPE_EDGE_RISING>;
+
+ mboxes = <&apcs 14>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <1>;
+
+ modem_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+
+ #qcom,smem-state-cells = <1>;
+ };
+
+ modem_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smp2p-wcnss {
+ compatible = "qcom,smp2p";
+ qcom,smem = <451>, <431>;
+
+ interrupts = <GIC_SPI 143 IRQ_TYPE_EDGE_RISING>;
+
+ mboxes = <&apcs 18>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <4>;
+
+ wcnss_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+
+ #qcom,smem-state-cells = <1>;
+ };
+
+ wcnss_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smsm {
+ compatible = "qcom,smsm";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mboxes = <0>, <&apcs 13>, <0>, <&apcs 19>;
+
+ apps_smsm: apps@0 {
+ reg = <0>;
+
+ #qcom,smem-state-cells = <1>;
+ };
+
+ hexagon_smsm: hexagon@1 {
+ reg = <1>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ wcnss_smsm: wcnss@6 {
+ reg = <6>;
+ interrupts = <GIC_SPI 144 IRQ_TYPE_EDGE_RISING>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ soc: soc@0 {
+ compatible = "simple-bus";
+ ranges = <0 0 0 0xffffffff>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ rpm_msg_ram: sram@60000 {
+ compatible = "qcom,rpm-msg-ram";
+ reg = <0x00060000 0x8000>;
+ };
+
+ usb_hs_phy: phy@6c000 {
+ compatible = "qcom,usb-hs-28nm-femtophy";
+ reg = <0x0006c000 0x200>;
+ #phy-cells = <0>;
+ clocks = <&xo_board>,
+ <&gcc GCC_USB_HS_PHY_CFG_AHB_CLK>,
+ <&gcc GCC_USB2A_PHY_SLEEP_CLK>;
+ clock-names = "ref", "ahb", "sleep";
+ resets = <&gcc GCC_QUSB2_PHY_BCR>,
+ <&gcc GCC_USB2_HS_PHY_ONLY_BCR>;
+ reset-names = "phy", "por";
+ status = "disabled";
+ };
+
+ qfprom: qfprom@a4000 {
+ compatible = "qcom,msm8917-qfprom", "qcom,qfprom";
+ reg = <0x000a4000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ tsens_base1: base1@1d8 {
+ reg = <0x1d8 1>;
+ bits = <0 8>;
+ };
+
+ tsens_s5_p1: s5-p1@1d9 {
+ reg = <0x1d9 1>;
+ bits = <0 6>;
+ };
+
+ tsens_s5_p2: s5-p2@1d9 {
+ reg = <0x1d9 2>;
+ bits = <6 6>;
+ };
+
+ tsens_s6_p1: s6-p1@1da {
+ reg = <0x1da 2>;
+ bits = <4 6>;
+ };
+
+ tsens_s6_p2: s6-p2@1db {
+ reg = <0x1db 1>;
+ bits = <2 6>;
+ };
+
+ tsens_s7_p1: s7-p1@1dc {
+ reg = <0x1dc 1>;
+ bits = <0 6>;
+ };
+
+ tsens_s7_p2: s7-p2@1dc {
+ reg = <0x1dc 2>;
+ bits = <6 6>;
+ };
+
+ tsens_s8_p1: s8-p1@1dd {
+ reg = <0x1dd 2>;
+ bits = <4 6>;
+ };
+
+ tsens_s8_p2: s8-p2@1de {
+ reg = <0x1de 1>;
+ bits = <2 6>;
+ };
+
+ tsens_base2: base2@1df {
+ reg = <0x1df 1>;
+ bits = <0 8>;
+ };
+
+ tsens_mode: mode@210 {
+ reg = <0x210 1>;
+ bits = <0 3>;
+ };
+
+ tsens_s0_p1: s0-p1@210 {
+ reg = <0x210 2>;
+ bits = <3 6>;
+ };
+
+ tsens_s0_p2: s0-p2@211 {
+ reg = <0x211 1>;
+ bits = <1 6>;
+ };
+
+ tsens_s1_p1: s1-p1@211 {
+ reg = <0x211 2>;
+ bits = <7 6>;
+ };
+
+ tsens_s1_p2: s1-p2@212 {
+ reg = <0x212 2>;
+ bits = <5 6>;
+ };
+
+ tsens_s2_p1: s2-p1@213 {
+ reg = <0x213 2>;
+ bits = <3 6>;
+ };
+
+ tsens_s2_p2: s2-p2@214 {
+ reg = <0x214 1>;
+ bits = <1 6>;
+ };
+
+ tsens_s3_p1: s3-p1@214 {
+ reg = <0x214 2>;
+ bits = <7 6>;
+ };
+
+ tsens_s3_p2: s3-p2@215 {
+ reg = <0x215 2>;
+ bits = <5 6>;
+ };
+
+ tsens_s4_p1: s4-p1@216 {
+ reg = <0x216 2>;
+ bits = <3 6>;
+ };
+
+ tsens_s4_p2: s4-p2@217 {
+ reg = <0x217 1>;
+ bits = <1 6>;
+ };
+
+ tsens_s9_p1: s9-p1@230{
+ reg = <0x230 1>;
+ bits = <0 6>;
+ };
+
+ tsens_s9_p2: s9-p2@230 {
+ reg = <0x230 2>;
+ bits = <6 6>;
+ };
+
+ tsens_s10_p1: s10-p1@231 {
+ reg = <0x231 2>;
+ bits = <4 6>;
+ };
+
+ tsens_s10_p2: s10-p2@232 {
+ reg = <0x232 1>;
+ bits = <2 6>;
+ };
+ };
+
+ rng@e3000 {
+ compatible = "qcom,prng";
+ reg = <0x000e3000 0x1000>;
+ clocks = <&gcc GCC_PRNG_AHB_CLK>;
+ clock-names = "core";
+ };
+
+ tsens: thermal-sensor@4a9000 {
+ compatible = "qcom,msm8937-tsens", "qcom,tsens-v1";
+ reg = <0x004a9000 0x1000>,
+ <0x004a8000 0x1000>;
+ interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "uplow";
+ nvmem-cells = <&tsens_mode>,
+ <&tsens_base1>, <&tsens_base2>,
+ <&tsens_s0_p1>, <&tsens_s0_p2>,
+ <&tsens_s1_p1>, <&tsens_s1_p2>,
+ <&tsens_s2_p1>, <&tsens_s2_p2>,
+ <&tsens_s3_p1>, <&tsens_s3_p2>,
+ <&tsens_s4_p1>, <&tsens_s4_p2>,
+ <&tsens_s5_p1>, <&tsens_s5_p2>,
+ <&tsens_s6_p1>, <&tsens_s6_p2>,
+ <&tsens_s7_p1>, <&tsens_s7_p2>,
+ <&tsens_s8_p1>, <&tsens_s8_p2>,
+ <&tsens_s9_p1>, <&tsens_s9_p2>,
+ <&tsens_s10_p1>, <&tsens_s10_p2>;
+ nvmem-cell-names = "mode",
+ "base1", "base2",
+ "s0_p1", "s0_p2",
+ "s1_p1", "s1_p2",
+ "s2_p1", "s2_p2",
+ "s3_p1", "s3_p2",
+ "s4_p1", "s4_p2",
+ "s5_p1", "s5_p2",
+ "s6_p1", "s6_p2",
+ "s7_p1", "s7_p2",
+ "s8_p1", "s8_p2",
+ "s9_p1", "s9_p2",
+ "s10_p1", "s10_p2";
+ #qcom,sensors = <11>;
+ #thermal-sensor-cells = <1>;
+ };
+
+ restart@4ab000 {
+ compatible = "qcom,pshold";
+ reg = <0x004ab000 0x4>;
+ };
+
+ tlmm: pinctrl@1000000 {
+ compatible = "qcom,msm8917-pinctrl";
+ reg = <0x01000000 0x300000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ gpio-ranges = <&tlmm 0 0 134>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ blsp1_i2c2_default: blsp1-i2c2-default-state {
+ pins = "gpio6", "gpio7";
+ function = "blsp_i2c2";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp1_i2c2_sleep: blsp1-i2c2-sleep-state {
+ pins = "gpio6", "gpio7";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp1_i2c3_default: blsp1-i2c3-default-state {
+ pins = "gpio10", "gpio11";
+ function = "blsp_i2c3";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp1_i2c3_sleep: blsp1-i2c3-sleep-state {
+ pins = "gpio10", "gpio11";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp1_i2c4_default: blsp1-i2c4-default-state {
+ pins = "gpio14", "gpio15";
+ function = "blsp_i2c4";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp1_i2c4_sleep: blsp1-i2c4-sleep-state {
+ pins = "gpio14", "gpio15";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp2_i2c1_default: blsp2-i2c1-default-state {
+ pins = "gpio18", "gpio19";
+ function = "blsp_i2c5";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp2_i2c1_sleep: blsp2-i2c1-sleep-state {
+ pins = "gpio18", "gpio19";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp1_spi3_default: blsp1-spi3-default-state {
+ cs-pins {
+ pins = "gpio10";
+ function = "blsp_spi3";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ spi-pins {
+ pins = "gpio8", "gpio9", "gpio11";
+ function = "blsp_spi3";
+ drive-strength = <12>;
+ bias-disable;
+ };
+ };
+
+ blsp1_spi3_sleep: blsp1-spi3-sleep-state {
+ cs-pins {
+ pins = "gpio10";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ spi-pins {
+ pins = "gpio8", "gpio9", "gpio11";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ blsp2_spi2_default: blsp2-spi2-default-state {
+ cs0-pins {
+ pins = "gpio47";
+ function = "blsp_spi6";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ cs1-pins {
+ pins = "gpio22";
+ function = "blsp_spi6";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ spi-pins {
+ pins = "gpio20", "gpio21", "gpio23";
+ function = "blsp_spi6";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ blsp2_spi2_sleep: blsp2-spi2-sleep-state {
+ cs0-pins {
+ pins = "gpio47";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ cs1-pins {
+ pins = "gpio22";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ spi-pins {
+ pins = "gpio20", "gpio21", "gpio23";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ blsp1_uart1_default: blsp1-uart1-default-state {
+ pins = "gpio0", "gpio1", "gpio2", "gpio3";
+ function = "blsp_uart1";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp1_uart1_sleep: blsp1-uart1-sleep-state {
+ pins = "gpio0", "gpio1", "gpio2", "gpio3";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp1_uart2_default: blsp1-uart2-default-state {
+ pins = "gpio4", "gpio5";
+ function = "blsp_uart2";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp1_uart2_sleep: blsp1-uart2-sleep-state {
+ pins = "gpio4", "gpio5";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ sdc1_default: sdc1-default-state {
+ clk-pins {
+ pins = "sdc1_clk";
+ bias-disable;
+ drive-strength = <16>;
+ };
+
+ cmd-pins {
+ pins = "sdc1_cmd";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+
+ data-pins {
+ pins = "sdc1_data";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+
+ rclk-pins {
+ pins = "sdc1_rclk";
+ bias-pull-down;
+ };
+ };
+
+ sdc1_sleep: sdc1-sleep-state {
+ clk-pins {
+ pins = "sdc1_clk";
+ bias-disable;
+ drive-strength = <2>;
+ };
+
+ cmd-pins {
+ pins = "sdc1_cmd";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ data-pins {
+ pins = "sdc1_data";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ rclk-pins {
+ pins = "sdc1_rclk";
+ bias-pull-down;
+ };
+ };
+
+ sdc2_default: sdc2-default-state {
+ clk-pins {
+ pins = "sdc2_clk";
+ bias-disable;
+ drive-strength = <16>;
+ };
+
+ cmd-pins {
+ pins = "sdc2_cmd";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+
+ data-pins {
+ pins = "sdc2_data";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+ };
+
+ sdc2_sleep: sdc2-sleep-state {
+ clk-pins {
+ pins = "sdc2_clk";
+ bias-disable;
+ drive-strength = <2>;
+ };
+
+ cmd-pins {
+ pins = "sdc2_cmd";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ data-pins {
+ pins = "sdc2_data";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+ };
+
+ wcnss_pin_a: wcnss-active-state {
+ wcss-wlan-pins {
+ pins = "gpio79", "gpio80";
+ function = "wcss_wlan";
+ drive-strength = <6>;
+ bias-pull-up;
+
+ };
+
+ wcss-wlan0-pins {
+ pins = "gpio78";
+ function = "wcss_wlan0";
+ drive-strength = <6>;
+ bias-pull-up;
+
+ };
+
+ wcss-wlan1-pins {
+ pins = "gpio77";
+ function = "wcss_wlan1";
+ drive-strength = <6>;
+ bias-pull-up;
+
+ };
+
+ wcss-wlan2-pins {
+ pins = "gpio76";
+ function = "wcss_wlan2";
+ drive-strength = <6>;
+ bias-pull-up;
+
+ };
+ };
+ };
+
+ gcc: clock-controller@1800000 {
+ compatible = "qcom,gcc-msm8917";
+ reg = <0x01800000 0x80000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ clocks = <&xo_board>,
+ <&sleep_clk>,
+ <&mdss_dsi0_phy 1>,
+ <&mdss_dsi0_phy 0>;
+ clock-names = "xo",
+ "sleep_clk",
+ "dsi0pll",
+ "dsi0pllbyte";
+ };
+
+ tcsr_mutex: hwlock@1905000 {
+ compatible = "qcom,tcsr-mutex";
+ reg = <0x01905000 0x20000>;
+ #hwlock-cells = <1>;
+ };
+
+ tcsr: syscon@1937000 {
+ compatible = "qcom,tcsr-msm8917", "syscon";
+ reg = <0x01937000 0x30000>;
+ };
+
+ mdss: display-subsystem@1a00000 {
+ compatible = "qcom,mdss";
+ reg = <0x01a00000 0x1000>,
+ <0x01ab0000 0x1040>;
+ reg-names = "mdss_phys", "vbif_phys";
+ ranges;
+
+ power-domains = <&gcc MDSS_GDSC>;
+
+ clocks = <&gcc GCC_MDSS_AHB_CLK>,
+ <&gcc GCC_MDSS_AXI_CLK>,
+ <&gcc GCC_MDSS_VSYNC_CLK>;
+ clock-names = "iface",
+ "bus",
+ "vsync";
+
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ status = "disabled";
+
+ mdp: display-controller@1a01000 {
+ compatible = "qcom,msm8917-mdp5", "qcom,mdp5";
+ reg = <0x01a01000 0x89000>;
+ reg-names = "mdp_phys";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <0>;
+
+ power-domains = <&gcc MDSS_GDSC>;
+
+ clocks = <&gcc GCC_MDSS_AHB_CLK>,
+ <&gcc GCC_MDSS_AXI_CLK>,
+ <&gcc GCC_MDSS_MDP_CLK>,
+ <&gcc GCC_MDSS_VSYNC_CLK>;
+ clock-names = "iface",
+ "bus",
+ "core",
+ "vsync";
+
+ iommus = <&apps_iommu 0x15>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ mdp5_intf1_out: endpoint {
+ remote-endpoint = <&mdss_dsi0_in>;
+ };
+ };
+ };
+ };
+
+ mdss_dsi0: dsi@1a94000 {
+ compatible = "qcom,mdss-dsi-ctrl";
+ reg = <0x01a94000 0x300>;
+ reg-names = "dsi_ctrl";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <4>;
+
+ assigned-clocks = <&gcc BYTE0_CLK_SRC>,
+ <&gcc PCLK0_CLK_SRC>;
+ assigned-clock-parents = <&mdss_dsi0_phy 0>,
+ <&mdss_dsi0_phy 1>;
+
+ clocks = <&gcc GCC_MDSS_MDP_CLK>,
+ <&gcc GCC_MDSS_AHB_CLK>,
+ <&gcc GCC_MDSS_AXI_CLK>,
+ <&gcc GCC_MDSS_BYTE0_CLK>,
+ <&gcc GCC_MDSS_PCLK0_CLK>,
+ <&gcc GCC_MDSS_ESC0_CLK>;
+ clock-names = "mdp_core",
+ "iface",
+ "bus",
+ "byte",
+ "pixel",
+ "core";
+ phys = <&mdss_dsi0_phy>;
+
+ operating-points-v2 = <&mdss_dsi0_opp_table>;
+ power-domains = <&rpmpd MSM8917_VDDCX>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ mdss_dsi0_in: endpoint {
+ remote-endpoint = <&mdp5_intf1_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ mdss_dsi0_out: endpoint {
+ };
+ };
+ };
+
+ mdss_dsi0_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-125000000 {
+ opp-hz = /bits/ 64 <125000000>;
+ required-opps = <&rpmpd_opp_svs>;
+ };
+
+ opp-187500000 {
+ opp-hz = /bits/ 64 <187500000>;
+ required-opps = <&rpmpd_opp_nom>;
+ };
+ };
+ };
+
+ mdss_dsi0_phy: phy@1a94a00 {
+ compatible = "qcom,dsi-phy-28nm-8937";
+ reg = <0x01a94a00 0xd4>,
+ <0x01a94400 0x280>,
+ <0x01a94b80 0x30>;
+ reg-names = "dsi_pll",
+ "dsi_phy",
+ "dsi_phy_regulator";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_MDSS_AHB_CLK>,
+ <&xo_board>;
+ clock-names = "iface", "ref";
+ };
+ };
+
+ apps_iommu: iommu@1e20000 {
+ compatible = "qcom,msm8917-iommu", "qcom,msm-iommu-v1";
+ ranges = <0 0x01e20000 0x20000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ #iommu-cells = <1>;
+
+ clocks = <&gcc GCC_SMMU_CFG_CLK>,
+ <&gcc GCC_APSS_TCU_CLK>;
+ clock-names = "iface", "bus";
+
+ qcom,iommu-secure-id = <17>;
+
+ /* VFE */
+ iommu-ctx@14000 {
+ compatible = "qcom,msm-iommu-v1-ns";
+ reg = <0x14000 0x1000>;
+ interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ /* MDP_0 */
+ iommu-ctx@15000 {
+ compatible = "qcom,msm-iommu-v1-ns";
+ reg = <0x15000 0x1000>;
+ interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ /* VENUS_NS */
+ iommu-ctx@16000 {
+ compatible = "qcom,msm-iommu-v1-ns";
+ reg = <0x16000 0x1000>;
+ interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+ gpu_iommu: iommu@1f08000 {
+ compatible = "qcom,msm8917-iommu", "qcom,msm-iommu-v1";
+ ranges = <0 0x01f08000 0x10000>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ #iommu-cells = <1>;
+
+ clocks = <&gcc GCC_SMMU_CFG_CLK>,
+ <&gcc GCC_GFX_TCU_CLK>;
+ clock-names = "iface", "bus";
+ qcom,iommu-secure-id = <18>;
+
+ iommu-ctx@0 {
+ compatible = "qcom,msm-iommu-v2-ns";
+ reg = <0 0x1000>;
+ interrupts = <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+ gpu: gpu@1c00000 {
+ compatible = "qcom,adreno-306.32", "qcom,adreno";
+ reg = <0x01c00000 0x20000>;
+ reg-names = "kgsl_3d0_reg_memory";
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "kgsl_3d0_irq";
+ clock-names = "core",
+ "iface",
+ "mem_iface",
+ "alt_mem_iface",
+ "gfx3d";
+ clocks = <&gcc GCC_OXILI_GFX3D_CLK>,
+ <&gcc GCC_OXILI_AHB_CLK>,
+ <&gcc GCC_BIMC_GFX_CLK>,
+ <&gcc GCC_BIMC_GPU_CLK>,
+ <&gcc GFX3D_CLK_SRC>;
+ power-domains = <&gcc OXILI_GX_GDSC>;
+ operating-points-v2 = <&gpu_opp_table>;
+ #cooling-cells = <2>;
+
+ iommus = <&gpu_iommu 0>;
+
+ status = "disabled";
+
+ gpu_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-19200000 {
+ opp-hz = /bits/ 64 <19200000>;
+ };
+
+ opp-270000000 {
+ opp-hz = /bits/ 64 <270000000>;
+ };
+
+ opp-400000000 {
+ opp-hz = /bits/ 64 <400000000>;
+ };
+
+ opp-484800000 {
+ opp-hz = /bits/ 64 <484800000>;
+ };
+
+ opp-523200000 {
+ opp-hz = /bits/ 64 <523200000>;
+ };
+
+ opp-598000000 {
+ opp-hz = /bits/ 64 <598000000>;
+ };
+ };
+ };
+
+ spmi_bus: spmi@200f000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0x0200f000 0x001000>,
+ <0x02400000 0x800000>,
+ <0x02c00000 0x800000>,
+ <0x03800000 0x200000>,
+ <0x0200a000 0x002100>;
+ reg-names = "core",
+ "chnls",
+ "obsrvr",
+ "intr",
+ "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ };
+
+ bam_dmux_dma: dma-controller@4044000 {
+ compatible = "qcom,bam-v1.7.0";
+ reg = <0x04044000 0x19000>;
+ interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ qcom,ee = <0>;
+
+ num-channels = <6>;
+ qcom,num-ees = <1>;
+ qcom,powered-remotely;
+
+ status = "disabled";
+ };
+
+ sdhc_1: mmc@7824900 {
+ compatible = "qcom,sdhci-msm-v4";
+ reg = <0x07824900 0x500>,
+ <0x07824000 0x800>;
+ reg-names = "hc", "core";
+
+ interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hc_irq", "pwr_irq";
+ clocks = <&gcc GCC_SDCC1_AHB_CLK>,
+ <&gcc GCC_SDCC1_APPS_CLK>,
+ <&xo_board>;
+ clock-names = "iface", "core", "xo";
+ pinctrl-0 = <&sdc1_default>;
+ pinctrl-1 = <&sdc1_sleep>;
+ pinctrl-names = "default", "sleep";
+ power-domains = <&rpmpd MSM8917_VDDCX>;
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ mmc-ddr-1_8v;
+ bus-width = <8>;
+ non-removable;
+ status = "disabled";
+ };
+
+ sdhc_2: mmc@7864900 {
+ compatible = "qcom,sdhci-msm-v4";
+ reg = <0x07864900 0x500>,
+ <0x07864000 0x800>;
+ reg-names = "hc", "core";
+
+ interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hc_irq", "pwr_irq";
+ clocks = <&gcc GCC_SDCC2_AHB_CLK>,
+ <&gcc GCC_SDCC2_APPS_CLK>,
+ <&xo_board>;
+ clock-names = "iface", "core", "xo";
+ pinctrl-0 = <&sdc2_default>;
+ pinctrl-1 = <&sdc2_sleep>;
+ pinctrl-names = "default", "sleep";
+ power-domains = <&rpmpd MSM8917_VDDCX>;
+ bus-width = <4>;
+ status = "disabled";
+ };
+
+ blsp1_dma: dma-controller@7884000 {
+ compatible = "qcom,bam-v1.7.0";
+ reg = <0x07884000 0x1f000>;
+ interrupts = <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "bam_clk";
+ qcom,controlled-remotely;
+ #dma-cells = <1>;
+ num-channels = <12>;
+ qcom,num-ees = <4>;
+ qcom,ee = <0>;
+ };
+
+ blsp2_dma: dma-controller@7ac4000 {
+ compatible = "qcom,bam-v1.7.0";
+ reg = <0x07ac4000 0x1d000>;
+ interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "bam_clk";
+ qcom,controlled-remotely;
+ #dma-cells = <1>;
+ num-channels = <10>;
+ qcom,num-ees = <4>;
+ qcom,ee = <0>;
+ };
+
+ blsp1_uart1: serial@78af000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x078af000 0x200>;
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_UART1_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ dmas = <&blsp1_dma 0>, <&blsp1_dma 1>;
+ dma-names = "tx", "rx";
+ pinctrl-0 = <&blsp1_uart1_default>;
+ pinctrl-1 = <&blsp1_uart1_sleep>;
+ pinctrl-names = "default", "sleep";
+ status = "disabled";
+ };
+
+ blsp1_uart2: serial@78b0000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x078b0000 0x200>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ dmas = <&blsp1_dma 2>, <&blsp1_dma 3>;
+ dma-names = "tx", "rx";
+ pinctrl-0 = <&blsp1_uart2_default>;
+ pinctrl-1 = <&blsp1_uart2_sleep>;
+ pinctrl-names = "default", "sleep";
+ status = "disabled";
+ };
+
+ blsp1_i2c2: i2c@78b6000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x078b6000 0x600>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ dmas = <&blsp1_dma 6>, <&blsp1_dma 7>;
+ dma-names = "tx", "rx";
+ pinctrl-0 = <&blsp1_i2c2_default>;
+ pinctrl-1 = <&blsp1_i2c2_sleep>;
+ pinctrl-names = "default", "sleep";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp1_i2c3: i2c@78b7000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x078b7000 0x600>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ dmas = <&blsp1_dma 8>, <&blsp1_dma 9>;
+ dma-names = "tx", "rx";
+ pinctrl-0 = <&blsp1_i2c3_default>;
+ pinctrl-1 = <&blsp1_i2c3_sleep>;
+ pinctrl-names = "default", "sleep";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp1_spi3: spi@78b7000 {
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x078b7000 0x600>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_QUP3_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ dmas = <&blsp1_dma 8>, <&blsp1_dma 9>;
+ dma-names = "tx", "rx";
+ pinctrl-0 = <&blsp1_spi3_default>;
+ pinctrl-1 = <&blsp1_spi3_sleep>;
+ pinctrl-names = "default", "sleep";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp1_i2c4: i2c@78b8000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x078b8000 0x500>;
+ interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_QUP4_I2C_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ dmas = <&blsp1_dma 10>, <&blsp1_dma 11>;
+ dma-names = "tx", "rx";
+ pinctrl-0 = <&blsp1_i2c4_default>;
+ pinctrl-1 = <&blsp1_i2c4_sleep>;
+ pinctrl-names = "default", "sleep";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_i2c1: i2c@7af5000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x07af5000 0x600>;
+ interrupts = <GIC_SPI 299 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_QUP1_I2C_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ dmas = <&blsp2_dma 4>, <&blsp2_dma 5>;
+ dma-names = "tx", "rx";
+ pinctrl-0 = <&blsp2_i2c1_default>;
+ pinctrl-1 = <&blsp2_i2c1_sleep>;
+ pinctrl-names = "default", "sleep";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_spi2: spi@7af6000 {
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x07af6000 0x600>;
+ interrupts = <GIC_SPI 300 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_QUP2_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ dmas = <&blsp2_dma 6>, <&blsp2_dma 7>;
+ dma-names = "tx", "rx";
+ pinctrl-0 = <&blsp2_spi2_default>;
+ pinctrl-1 = <&blsp2_spi2_sleep>;
+ pinctrl-names = "default", "sleep";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ usb: usb@78db000 {
+ compatible = "qcom,ci-hdrc";
+ reg = <0x078db000 0x200>,
+ <0x078db200 0x200>;
+ interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_USB_HS_AHB_CLK>,
+ <&gcc GCC_USB_HS_SYSTEM_CLK>;
+ clock-names = "iface", "core";
+ assigned-clocks = <&gcc GCC_USB_HS_SYSTEM_CLK>;
+ assigned-clock-rates = <80000000>;
+ resets = <&gcc GCC_USB_HS_BCR>;
+ reset-names = "core";
+ phy_type = "ulpi";
+ dr_mode = "otg";
+ hnp-disable;
+ srp-disable;
+ adp-disable;
+ ahb-burst-config = <0>;
+ phy-names = "usb-phy";
+ phys = <&usb_hs_phy>;
+ status = "disabled";
+ #reset-cells = <1>;
+ };
+
+ wcnss: remoteproc@a204000 {
+ compatible = "qcom,pronto-v3-pil", "qcom,pronto";
+ reg = <0x0a204000 0x2000>,
+ <0x0a202000 0x1000>,
+ <0x0a21b000 0x3000>;
+ reg-names = "ccu", "dxe", "pmu";
+
+ memory-region = <&wcnss_mem>;
+
+ interrupts-extended = <&intc GIC_SPI 149 IRQ_TYPE_EDGE_RISING>,
+ <&wcnss_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&wcnss_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&wcnss_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&wcnss_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready", "handover", "stop-ack";
+
+ power-domains = <&rpmpd MSM8917_VDDCX>,
+ <&rpmpd MSM8917_VDDMX>;
+ power-domain-names = "cx", "mx";
+
+ qcom,smem-states = <&wcnss_smp2p_out 0>;
+ qcom,smem-state-names = "stop";
+
+ pinctrl-0 = <&wcnss_pin_a>;
+ pinctrl-names = "default";
+
+ status = "disabled";
+
+ wcnss_iris: iris {
+ clocks = <&rpmcc RPM_SMD_RF_CLK2>;
+ clock-names = "xo";
+ };
+
+ smd-edge {
+ interrupts = <GIC_SPI 142 IRQ_TYPE_EDGE_RISING>;
+
+ mboxes = <&apcs 17>;
+ qcom,smd-edge = <6>;
+ qcom,remote-pid = <4>;
+
+ label = "pronto";
+
+ wcnss_ctrl: wcnss {
+ compatible = "qcom,wcnss";
+ qcom,smd-channels = "WCNSS_CTRL";
+
+ qcom,mmio = <&wcnss>;
+
+ wcnss_bt: bluetooth {
+ compatible = "qcom,wcnss-bt";
+ };
+
+ wcnss_wifi: wifi {
+ compatible = "qcom,wcnss-wlan";
+
+ interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tx", "rx";
+
+ qcom,smem-states = <&apps_smsm 10>, <&apps_smsm 9>;
+ qcom,smem-state-names = "tx-enable",
+ "tx-rings-empty";
+ };
+ };
+ };
+ };
+
+ intc: interrupt-controller@b000000 {
+ compatible = "qcom,msm-qgic2";
+ reg = <0x0b000000 0x1000>,
+ <0x0b002000 0x1000>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ };
+
+ apcs: mailbox@b011000 {
+ compatible = "qcom,msm8939-apcs-kpss-global", "syscon";
+ reg = <0x0b011000 0x1000>;
+ #mbox-cells = <1>;
+ clocks = <&a53pll>, <&gcc GPLL0_EARLY>, <&rpmcc RPM_SMD_XO_CLK_SRC>;
+ clock-names = "pll", "aux", "ref";
+ #clock-cells = <0>;
+ };
+
+ a53pll: clock@b016000 {
+ compatible = "qcom,msm8939-a53pll";
+ reg = <0x0b016000 0x40>;
+ clocks = <&xo_board>;
+ clock-names = "xo";
+ #clock-cells = <0>;
+ operating-points-v2 = <&pll_opp_table>;
+
+ pll_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-960000000 {
+ opp-hz = /bits/ 64 <960000000>;
+ };
+
+ opp-1094400000 {
+ opp-hz = /bits/ 64 <1094400000>;
+ };
+
+ opp-1248000000 {
+ opp-hz = /bits/ 64 <1248000000>;
+ };
+
+ opp-1401600000 {
+ opp-hz = /bits/ 64 <1401600000>;
+ };
+ };
+ };
+
+ watchdog@b017000 {
+ compatible = "qcom,apss-wdt-qcs404", "qcom,kpss-wdt";
+ reg = <0x0b017000 0x1000>;
+ clocks = <&sleep_clk>;
+ };
+
+ timer@b120000 {
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x0b120000 0x1000>;
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ frame@b121000 {
+ reg = <0x0b121000 0x1000>,
+ <0x0b122000 0x1000>;
+ frame-number = <0>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ frame@b123000 {
+ reg = <0x0b123000 0x1000>;
+ frame-number = <1>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ frame@b124000 {
+ reg = <0x0b124000 0x1000>;
+ frame-number = <2>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ frame@b125000 {
+ reg = <0x0b125000 0x1000>;
+ frame-number = <3>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ frame@b126000 {
+ reg = <0x0b126000 0x1000>;
+ frame-number = <4>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ frame@b127000 {
+ reg = <0x0b127000 0x1000>;
+ frame-number = <5>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ frame@b128000 {
+ reg = <0x0b128000 0x1000>;
+ frame-number = <6>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 4 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 1 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ thermal_zones: thermal-zones {
+ aoss-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens 0>;
+
+ trips {
+ aoss_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ camera-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens 3>;
+
+ trips {
+ camera_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ cpuss1-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens 4>;
+
+ cooling-maps {
+ map0 {
+ trip = <&cpuss1_alert0>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+
+ trips {
+ cpuss1_alert0: trip-point0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpuss1_alert1: trip-point1 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpuss1_crit: cpuss1-crit {
+ temperature = <100000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu0-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens 5>;
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu0_alert1>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+
+ trips {
+ cpu0_alert0: trip-point0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu0_alert1: trip-point1 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpu0_crit: cpu-crit {
+ temperature = <100000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu1-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens 6>;
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu1_alert1>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+
+ trips {
+ cpu1_alert0: trip-point0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpu1_alert1: trip-point1 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu1_crit: cpu-crit {
+ temperature = <100000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu2-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens 7>;
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu2_alert1>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+
+ trips {
+ cpu2_alert0: trip-point0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpu2_alert1: trip-point1 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu2_crit: cpu-crit {
+ temperature = <100000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu3-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens 8>;
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu3_alert1>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+
+ trips {
+ cpu3_alert0: trip-point0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpu3_alert1: trip-point1 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu3_crit: cpu-crit {
+ temperature = <100000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ gpu-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens 9>;
+
+ cooling-maps {
+ map0 {
+ trip = <&gpu_alert>;
+ cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+
+ trips {
+ gpu_alert: trip-point0 {
+ temperature = <70000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ gpu_crit: gpu-crit {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ };
+
+ mdm-core-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens 1>;
+
+ trips {
+ mdm_core_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ q6-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens 2>;
+
+ trips {
+ q6_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8939.dtsi b/arch/arm64/boot/dts/qcom/msm8939.dtsi
index 7a6f1eeaa3fc..7cd5660de1b3 100644
--- a/arch/arm64/boot/dts/qcom/msm8939.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8939.dtsi
@@ -34,7 +34,7 @@
sleep_clk: sleep-clk {
compatible = "fixed-clock";
#clock-cells = <0>;
- clock-frequency = <32768>;
+ clock-frequency = <32764>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts b/arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts
index 29e79ae0849d..1aca11daf83c 100644
--- a/arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts
+++ b/arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts
@@ -2,12 +2,13 @@
/*
* Copyright (c) 2015, Huawei Inc. All rights reserved.
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023, Petr Vorel <petr.vorel@gmail.com>
+ * Copyright (c) 2021-2024, Petr Vorel <petr.vorel@gmail.com>
*/
/dts-v1/;
#include "msm8994.dtsi"
+#include "pm8994.dtsi"
/ {
model = "Huawei Nexus 6P";
@@ -46,6 +47,24 @@
no-map;
};
};
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ autorepeat;
+
+ button-vol-up {
+ label = "volume up";
+ gpios = <&pm8994_gpios 3 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ wakeup-source;
+ debounce-interval = <15>;
+ };
+ };
+};
+
+&pm8994_resin {
+ linux,code = <KEY_VOLUMEDOWN>;
+ status = "okay";
};
&blsp1_uart2 {
diff --git a/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon.dtsi b/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon.dtsi
index 10cd244dea4f..4c983b10dd92 100644
--- a/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon.dtsi
@@ -387,11 +387,6 @@
interrupts = <&tlmm 96 IRQ_TYPE_EDGE_FALLING>;
- button_num = <8>;
- touchpad_num = <0>;
- wheel_num = <0>;
- slider_num = <0>;
-
vcc-supply = <&vreg_l18a_2p85>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi
index 1acb0f159511..b5cbdd620bb9 100644
--- a/arch/arm64/boot/dts/qcom/msm8994.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi
@@ -34,7 +34,7 @@
sleep_clk: sleep-clk {
compatible = "fixed-clock";
#clock-cells = <0>;
- clock-frequency = <32768>;
+ clock-frequency = <32764>;
clock-output-names = "sleep_clk";
};
};
@@ -437,6 +437,15 @@
#size-cells = <1>;
ranges;
+ interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pwr_event",
+ "qusb2_phy",
+ "hs_phy_irq",
+ "ss_phy_irq";
+
clocks = <&gcc GCC_USB30_MASTER_CLK>,
<&gcc GCC_SYS_NOC_USB3_AXI_CLK>,
<&gcc GCC_USB30_SLEEP_CLK>,
diff --git a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
index f8e9d90afab0..dbad8f57f2fa 100644
--- a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
+++ b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
@@ -64,7 +64,7 @@
};
led@1 {
- reg = <0>;
+ reg = <1>;
chan-name = "button-backlight1";
led-cur = /bits/ 8 <0x32>;
max-cur = /bits/ 8 <0xc8>;
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index b379623c1b8a..4719e1fc70d2 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -3065,9 +3065,14 @@
#size-cells = <1>;
ranges;
- interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "hs_phy_irq", "ss_phy_irq";
+ interrupt-names = "pwr_event",
+ "qusb2_phy",
+ "hs_phy_irq",
+ "ss_phy_irq";
clocks = <&gcc GCC_SYS_NOC_USB3_AXI_CLK>,
<&gcc GCC_USB30_MASTER_CLK>,
diff --git a/arch/arm64/boot/dts/qcom/pm660l.dtsi b/arch/arm64/boot/dts/qcom/pm660l.dtsi
index 0094e0ef058b..3f8b9eafe164 100644
--- a/arch/arm64/boot/dts/qcom/pm660l.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm660l.dtsi
@@ -70,6 +70,12 @@
status = "disabled";
};
+ pm660l_flash: led-controller@d300 {
+ compatible = "qcom,pm660l-flash-led", "qcom,spmi-flash-led";
+ reg = <0xd300>;
+ status = "disabled";
+ };
+
pm660l_wled: leds@d800 {
compatible = "qcom,pm660l-wled";
reg = <0xd800>, <0xd900>;
diff --git a/arch/arm64/boot/dts/qcom/pm8150.dtsi b/arch/arm64/boot/dts/qcom/pm8150.dtsi
index a74a7ff660d2..d2568686a098 100644
--- a/arch/arm64/boot/dts/qcom/pm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150.dtsi
@@ -49,8 +49,6 @@
pon: pon@800 {
compatible = "qcom,pm8998-pon";
reg = <0x0800>;
- mode-bootloader = <0x2>;
- mode-recovery = <0x1>;
pon_pwrkey: pwrkey {
compatible = "qcom,pm8941-pwrkey";
diff --git a/arch/arm64/boot/dts/qcom/pm8937.dtsi b/arch/arm64/boot/dts/qcom/pm8937.dtsi
new file mode 100644
index 000000000000..42b3575b36ff
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pm8937.dtsi
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2023, Dang Huynh <danct12@riseup.net>
+ */
+
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+#include <dt-bindings/input/linux-event-codes.h>
+#include <dt-bindings/spmi/spmi.h>
+
+/ {
+ thermal-zones {
+ pm8937-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8937_temp>;
+
+ trips {
+ trip0 {
+ temperature = <105000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ trip1 {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "hot";
+ };
+
+ trip2 {
+ temperature = <145000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+ };
+};
+
+&spmi_bus {
+ pmic@0 {
+ compatible = "qcom,pm8937", "qcom,spmi-pmic";
+ reg = <0x0 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pon@800 {
+ compatible = "qcom,pm8916-pon";
+ reg = <0x800>;
+ mode-bootloader = <0x2>;
+ mode-recovery = <0x1>;
+
+ pm8937_pwrkey: pwrkey {
+ compatible = "qcom,pm8941-pwrkey";
+ interrupts = <0 0x8 0 IRQ_TYPE_EDGE_BOTH>;
+ debounce = <15625>;
+ bias-pull-up;
+ linux,code = <KEY_POWER>;
+ };
+
+ pm8937_resin: resin {
+ compatible = "qcom,pm8941-resin";
+ interrupts = <0 0x8 1 IRQ_TYPE_EDGE_BOTH>;
+ debounce = <15625>;
+ bias-pull-up;
+ status = "disabled";
+ };
+ };
+
+ pm8937_gpios: gpio@c000 {
+ compatible = "qcom,pm8937-gpio", "qcom,spmi-gpio";
+ reg = <0xc000>;
+ gpio-controller;
+ gpio-ranges = <&pm8937_gpios 0 0 8>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ pm8937_mpps: mpps@a000 {
+ compatible = "qcom,pm8937-mpp", "qcom,spmi-mpp";
+ reg = <0xa000>;
+ gpio-controller;
+ gpio-ranges = <&pm8937_mpps 0 0 4>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ pm8937_temp: temp-alarm@2400 {
+ compatible = "qcom,spmi-temp-alarm";
+ reg = <0x2400>;
+ interrupts = <0 0x24 0 IRQ_TYPE_EDGE_RISING>;
+ io-channels = <&pm8937_vadc VADC_DIE_TEMP>;
+ io-channel-names = "thermal";
+ #thermal-sensor-cells = <0>;
+ };
+
+ pm8937_vadc: adc@3100 {
+ compatible = "qcom,spmi-vadc";
+ reg = <0x3100>;
+ interrupts = <0 0x31 0 IRQ_TYPE_EDGE_RISING>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #io-channel-cells = <1>;
+
+ channel@8 {
+ reg = <VADC_DIE_TEMP>;
+ };
+
+ channel@9 {
+ reg = <VADC_REF_625MV>;
+ };
+
+ channel@a {
+ reg = <VADC_REF_1250MV>;
+ };
+
+ channel@c {
+ reg = <VADC_SPARE1>;
+ };
+
+ channel@e {
+ reg = <VADC_GND_REF>;
+ };
+
+ channel@f {
+ reg = <VADC_VDD_VADC>;
+ };
+ };
+
+ rtc@6000 {
+ compatible = "qcom,pm8941-rtc";
+ reg = <0x6000>, <0x6100>;
+ reg-names = "rtc", "alarm";
+ interrupts = <0x0 0x61 0x1 IRQ_TYPE_EDGE_RISING>;
+ };
+ };
+
+ pmic@1 {
+ compatible = "qcom,pm8937", "qcom,spmi-pmic";
+ reg = <0x1 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pm8937_spmi_regulators: regulators {
+ compatible = "qcom,pm8937-regulators";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/pmd8028.dtsi b/arch/arm64/boot/dts/qcom/pmd8028.dtsi
new file mode 100644
index 000000000000..a00913e28a4c
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pmd8028.dtsi
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+
+/ {
+ thermal-zones {
+ pmd8028-thermal {
+ polling-delay-passive = <100>;
+ thermal-sensors = <&pmd8028_temp_alarm>;
+
+ trips {
+ pmd8028_trip0: trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ pmd8028_trip1: trip1 {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "hot";
+ };
+
+ pmd8028_trip2: trip2 {
+ temperature = <145000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+ };
+};
+
+&spmi_bus {
+ pmd8028: pmic@4 {
+ compatible = "qcom,pmd8028", "qcom,spmi-pmic";
+ reg = <0x4 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmd8028_temp_alarm: temp-alarm@a00 {
+ compatible = "qcom,spmi-temp-alarm";
+ reg = <0xa00>;
+ interrupts = <0x4 0xa 0x0 IRQ_TYPE_EDGE_BOTH>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ pmd8028_gpios: gpio@8800 {
+ compatible = "qcom,pmd8028-gpio", "qcom,spmi-gpio";
+ reg = <0x8800>;
+ gpio-controller;
+ gpio-ranges = <&pmd8028_gpios 0 0 4>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/pmi8950.dtsi b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
index 4aff437263a2..3d3b1cd97cc3 100644
--- a/arch/arm64/boot/dts/qcom/pmi8950.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
@@ -84,6 +84,23 @@
#address-cells = <1>;
#size-cells = <0>;
+ labibb {
+ compatible = "qcom,pmi8950-lab-ibb",
+ "qcom,pmi8998-lab-ibb";
+
+ ibb: ibb {
+ interrupts = <0x3 0xdc 0x2 IRQ_TYPE_EDGE_RISING>,
+ <0x3 0xdc 0x0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "sc-err", "ocp";
+ };
+
+ lab: lab {
+ interrupts = <0x3 0xde 0x1 IRQ_TYPE_EDGE_RISING>,
+ <0x3 0xde 0x0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "sc-err", "ocp";
+ };
+ };
+
pmi8950_pwm: pwm {
compatible = "qcom,pmi8950-pwm";
#pwm-cells = <2>;
diff --git a/arch/arm64/boot/dts/qcom/pmih0108.dtsi b/arch/arm64/boot/dts/qcom/pmih0108.dtsi
new file mode 100644
index 000000000000..1c875995d881
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pmih0108.dtsi
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+
+/ {
+ thermal-zones {
+ pmih0108-thermal {
+ polling-delay-passive = <100>;
+ thermal-sensors = <&pmih0108_temp_alarm>;
+
+ trips {
+ trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ trip1 {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "hot";
+ };
+
+ trip2 {
+ temperature = <145000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+ };
+};
+
+&spmi_bus {
+ pmih0108: pmic@7 {
+ compatible = "qcom,pmih0108", "qcom,spmi-pmic";
+ reg = <0x7 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmih0108_temp_alarm: temp-alarm@a00 {
+ compatible = "qcom,spmi-temp-alarm";
+ reg = <0xa00>;
+ interrupts = <0x7 0xa 0x0 IRQ_TYPE_EDGE_BOTH>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ pmih0108_gpios: gpio@8800 {
+ compatible = "qcom,pmih0108-gpio", "qcom,spmi-gpio";
+ reg = <0x8800>;
+ gpio-controller;
+ gpio-ranges = <&pmih0108_gpios 0 0 18>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ pmih0108_eusb2_repeater: phy@fd00 {
+ compatible = "qcom,pm8550b-eusb2-repeater";
+ reg = <0xfd00>;
+ #phy-cells = <0>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/pmk8350.dtsi b/arch/arm64/boot/dts/qcom/pmk8350.dtsi
index f0ed15458dd7..565752af2204 100644
--- a/arch/arm64/boot/dts/qcom/pmk8350.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmk8350.dtsi
@@ -76,6 +76,14 @@
status = "disabled";
};
+ pmk8350_sdam_1: nvram@7000 {
+ compatible = "qcom,spmi-sdam";
+ reg = <0x7000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x7000 0x100>;
+ };
+
pmk8350_sdam_2: nvram@7100 {
compatible = "qcom,spmi-sdam";
reg = <0x7100>;
@@ -89,6 +97,70 @@
};
};
+ pmk8350_sdam_5: nvram@7400 {
+ compatible = "qcom,spmi-sdam";
+ reg = <0x7400>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x7400 0x100>;
+ };
+
+ pmk8350_sdam_13: nvram@7c00 {
+ compatible = "qcom,spmi-sdam";
+ reg = <0x7c00>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x7c00 0x100>;
+ };
+
+ pmk8350_sdam_14: nvram@7d00 {
+ compatible = "qcom,spmi-sdam";
+ reg = <0x7d00>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x7d00 0x100>;
+ };
+
+ pmk8350_sdam_21: nvram@8400 {
+ compatible = "qcom,spmi-sdam";
+ reg = <0x8400>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x8400 0x100>;
+ };
+
+ pmk8350_sdam_22: nvram@8500 {
+ compatible = "qcom,spmi-sdam";
+ reg = <0x8500>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x8500 0x100>;
+ };
+
+ pmk8350_sdam_23: nvram@8600 {
+ compatible = "qcom,spmi-sdam";
+ reg = <0x8600>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x8600 0x100>;
+ };
+
+ pmk8350_sdam_41: nvram@9800 {
+ compatible = "qcom,spmi-sdam";
+ reg = <0x9800>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x9800 0x100>;
+ };
+
+ pmk8350_sdam_46: nvram@9d00 {
+ compatible = "qcom,spmi-sdam";
+ reg = <0x9d00>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x9d00 0x100>;
+ };
+
pmk8350_gpios: gpio@b000 {
compatible = "qcom,pmk8350-gpio", "qcom,spmi-gpio";
reg = <0xb000>;
diff --git a/arch/arm64/boot/dts/qcom/qcm6490-fairphone-fp5.dts b/arch/arm64/boot/dts/qcom/qcm6490-fairphone-fp5.dts
index fdc62f1b1c5a..769c66cb5d19 100644
--- a/arch/arm64/boot/dts/qcom/qcm6490-fairphone-fp5.dts
+++ b/arch/arm64/boot/dts/qcom/qcm6490-fairphone-fp5.dts
@@ -108,6 +108,36 @@
};
};
+ vreg_afvdd_2p8: regulator-afvdd-2p8 {
+ compatible = "regulator-fixed";
+ regulator-name = "AFVDD_2P8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ gpio = <&tlmm 68 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vreg_bob>;
+ };
+
+ vreg_ois_avdd0_1p8: regulator-ois-avdd0-1p8 {
+ compatible = "regulator-fixed";
+ regulator-name = "OIS_AVDD0_1P8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ gpio = <&tlmm 157 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vreg_bob>;
+ };
+
+ vreg_ois_dvdd_1p1: regulator-ois-dvdd-1p1 {
+ compatible = "regulator-fixed";
+ regulator-name = "OIS_DVDD_1P1";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ gpio = <&tlmm 97 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vreg_s8b>;
+ };
+
reserved-memory {
cont_splash_mem: cont-splash@e1000000 {
reg = <0x0 0xe1000000 0x0 0x2300000>;
@@ -134,36 +164,6 @@
};
};
- ois_avdd0_1p8: regulator-ois-avdd0-1p8 {
- compatible = "regulator-fixed";
- regulator-name = "OIS_AVDD0_1P8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- gpio = <&tlmm 157 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- vin-supply = <&vreg_bob>;
- };
-
- ois_dvdd_1p1: regulator-ois-dvdd-1p1 {
- compatible = "regulator-fixed";
- regulator-name = "OIS_DVDD_1P1";
- regulator-min-microvolt = <1100000>;
- regulator-max-microvolt = <1100000>;
- gpio = <&tlmm 97 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- vin-supply = <&vreg_s8b>;
- };
-
- afvdd_2p8: regulator-afvdd-2p8 {
- compatible = "regulator-fixed";
- regulator-name = "AFVDD_2P8";
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <2800000>;
- gpio = <&tlmm 68 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- vin-supply = <&vreg_bob>;
- };
-
thermal-zones {
camera-thermal {
polling-delay-passive = <0>;
@@ -556,6 +556,47 @@
};
};
+&cci0 {
+ status = "okay";
+};
+
+&cci0_i2c0 {
+ /* IMX800 @ 1a */
+
+ eeprom@50 {
+ compatible = "puya,p24c256c", "atmel,24c256";
+ reg = <0x50>;
+ vcc-supply = <&vreg_l6p>;
+ read-only;
+ };
+};
+
+&cci0_i2c1 {
+ /* IMX858 @ 29 */
+
+ eeprom@54 {
+ compatible = "giantec,gt24p128f", "atmel,24c128";
+ reg = <0x54>;
+ vcc-supply = <&vreg_l6p>;
+ read-only;
+ };
+};
+
+&cci1 {
+ status = "okay";
+};
+
+&cci1_i2c1 {
+ /* S5KJN1SQ03 @ 10 */
+
+ eeprom@51 {
+ compatible = "giantec,gt24p128f", "atmel,24c128";
+ reg = <0x51>;
+ vcc-supply = <&vreg_l6p>;
+ read-only;
+ };
+};
+
&dispcc {
/* Disable for now so simple-framebuffer continues working */
status = "disabled";
diff --git a/arch/arm64/boot/dts/qcom/qcm6490-idp.dts b/arch/arm64/boot/dts/qcom/qcm6490-idp.dts
index c5fb153614e1..9209efcc49b5 100644
--- a/arch/arm64/boot/dts/qcom/qcm6490-idp.dts
+++ b/arch/arm64/boot/dts/qcom/qcm6490-idp.dts
@@ -258,6 +258,8 @@
regulator-name = "vreg_l6b_1p2";
regulator-min-microvolt = <1140000>;
regulator-max-microvolt = <1260000>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM RPMH_REGULATOR_MODE_HPM>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
@@ -265,6 +267,8 @@
regulator-name = "vreg_l7b_2p952";
regulator-min-microvolt = <2400000>;
regulator-max-microvolt = <3544000>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM RPMH_REGULATOR_MODE_HPM>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
@@ -279,6 +283,8 @@
regulator-name = "vreg_l9b_1p2";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1304000>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM RPMH_REGULATOR_MODE_HPM>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
@@ -467,6 +473,8 @@
regulator-name = "vreg_l10c_0p88";
regulator-min-microvolt = <720000>;
regulator-max-microvolt = <1050000>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM RPMH_REGULATOR_MODE_HPM>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
diff --git a/arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts b/arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts
index 4667e47a74bc..75930f957696 100644
--- a/arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts
+++ b/arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts
@@ -942,8 +942,6 @@
qcom,squelch-detector-bp = <(-2090)>;
- orientation-switch;
-
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/qcs404.dtsi b/arch/arm64/boot/dts/qcom/qcs404.dtsi
index 215ba146207a..5a9df6b12305 100644
--- a/arch/arm64/boot/dts/qcom/qcs404.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs404.dtsi
@@ -28,7 +28,7 @@
sleep_clk: sleep-clk {
compatible = "fixed-clock";
#clock-cells = <0>;
- clock-frequency = <32768>;
+ clock-frequency = <32764>;
};
};
@@ -694,6 +694,8 @@
snps,has-lpm-erratum;
snps,hird-threshold = /bits/ 8 <0x10>;
snps,usb3_lpm_capable;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
dr_mode = "otg";
};
};
@@ -731,6 +733,8 @@
snps,has-lpm-erratum;
snps,hird-threshold = /bits/ 8 <0x10>;
snps,usb3_lpm_capable;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
dr_mode = "peripheral";
};
};
diff --git a/arch/arm64/boot/dts/qcom/qcs615-ride.dts b/arch/arm64/boot/dts/qcom/qcs615-ride.dts
new file mode 100644
index 000000000000..2b5aa3c66867
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs615-ride.dts
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "qcs615.dtsi"
+#include "pm8150.dtsi"
+/ {
+ model = "Qualcomm Technologies, Inc. QCS615 Ride";
+ compatible = "qcom,qcs615-ride", "qcom,qcs615";
+ chassis-type = "embedded";
+
+ aliases {
+ mmc0 = &sdhc_1;
+ mmc1 = &sdhc_2;
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ clocks {
+ sleep_clk: sleep-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <32000>;
+ #clock-cells = <0>;
+ };
+
+ xo_board_clk: xo-board-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <38400000>;
+ #clock-cells = <0>;
+ };
+ };
+
+ regulator-usb2-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "USB2_VBUS";
+ gpio = <&pm8150_gpios 10 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&usb2_en>;
+ pinctrl-names = "default";
+ enable-active-high;
+ regulator-always-on;
+ };
+};
+
+&apps_rsc {
+ regulators-0 {
+ compatible = "qcom,pm8150-rpmh-regulators";
+ qcom,pmic-id = "a";
+
+ vreg_s3a: smps3 {
+ regulator-name = "vreg_s3a";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <650000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s4a: smps4 {
+ regulator-name = "vreg_s4a";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1829000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s5a: smps5 {
+ regulator-name = "vreg_s5a";
+ regulator-min-microvolt = <1896000>;
+ regulator-max-microvolt = <2040000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s6a: smps6 {
+ regulator-name = "vreg_s6a";
+ regulator-min-microvolt = <1304000>;
+ regulator-max-microvolt = <1404000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1a: ldo1 {
+ regulator-name = "vreg_l1a";
+ regulator-min-microvolt = <488000>;
+ regulator-max-microvolt = <852000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2a: ldo2 {
+ regulator-name = "vreg_l2a";
+ regulator-min-microvolt = <1650000>;
+ regulator-max-microvolt = <3100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3a: ldo3 {
+ regulator-name = "vreg_l3a";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1248000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5a: ldo5 {
+ regulator-name = "vreg_l5a";
+ regulator-min-microvolt = <875000>;
+ regulator-max-microvolt = <975000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7a: ldo7 {
+ regulator-name = "vreg_l7a";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1900000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8a: ldo8 {
+ regulator-name = "vreg_l8a";
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l10a: ldo10 {
+ regulator-name = "vreg_l10a";
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <3312000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l11a: ldo11 {
+ regulator-name = "vreg_l11a";
+ regulator-min-microvolt = <1232000>;
+ regulator-max-microvolt = <1260000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l12a: ldo12 {
+ regulator-name = "vreg_l12a";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1890000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l13a: ldo13 {
+ regulator-name = "vreg_l13a";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3230000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l15a: ldo15 {
+ regulator-name = "vreg_l15a";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1904000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l16a: ldo16 {
+ regulator-name = "vreg_l16a";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3312000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l17a: ldo17 {
+ regulator-name = "vreg_l17a";
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <3312000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+};
+
+&gcc {
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&rpmhcc RPMH_CXO_CLK_A>,
+ <&sleep_clk>;
+};
+
+&pm8150_gpios {
+ usb2_en: usb2-en-state {
+ pins = "gpio10";
+ function = "normal";
+ output-enable;
+ power-source = <0>;
+ };
+};
+
+&pon_pwrkey {
+ status = "okay";
+};
+
+&pon_resin {
+ linux,code = <KEY_VOLUMEDOWN>;
+
+ status = "okay";
+};
+
+&qupv3_id_0 {
+ status = "okay";
+};
+
+&rpmhcc {
+ clocks = <&xo_board_clk>;
+};
+
+&sdhc_1 {
+ pinctrl-0 = <&sdc1_state_on>;
+ pinctrl-1 = <&sdc1_state_off>;
+ pinctrl-names = "default", "sleep";
+
+ bus-width = <8>;
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+ vmmc-supply = <&vreg_l17a>;
+ vqmmc-supply = <&vreg_s4a>;
+
+ non-removable;
+ no-sd;
+ no-sdio;
+
+ status = "okay";
+};
+
+&sdhc_2 {
+ pinctrl-0 = <&sdc2_state_on>;
+ pinctrl-1 = <&sdc2_state_off>;
+ pinctrl-names = "default", "sleep";
+
+ bus-width = <4>;
+ cd-gpios = <&tlmm 99 GPIO_ACTIVE_LOW>;
+
+ vmmc-supply = <&vreg_l10a>;
+ vqmmc-supply = <&vreg_s4a>;
+
+ status = "okay";
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&usb_1_hsphy {
+ vdd-supply = <&vreg_l5a>;
+ vdda-pll-supply = <&vreg_l12a>;
+ vdda-phy-dpdm-supply = <&vreg_l13a>;
+
+ status = "okay";
+};
+
+&usb_qmpphy {
+ vdda-phy-supply = <&vreg_l5a>;
+ vdda-pll-supply = <&vreg_l12a>;
+
+ status = "okay";
+};
+
+&usb_1 {
+ status = "okay";
+};
+
+&usb_1_dwc3 {
+ dr_mode = "peripheral";
+};
+
+&usb_hsphy_2 {
+ vdd-supply = <&vreg_l5a>;
+ vdda-pll-supply = <&vreg_l12a>;
+ vdda-phy-dpdm-supply = <&vreg_l13a>;
+
+ status = "okay";
+};
+
+&usb_2 {
+ status = "okay";
+};
+
+&usb_2_dwc3 {
+ dr_mode = "host";
+};
+
+&ufs_mem_hc {
+ reset-gpios = <&tlmm 123 GPIO_ACTIVE_LOW>;
+ vcc-supply = <&vreg_l17a>;
+ vcc-max-microamp = <600000>;
+ vccq2-supply = <&vreg_s4a>;
+ vccq2-max-microamp = <600000>;
+
+ status = "okay";
+};
+
+&ufs_mem_phy {
+ vdda-phy-supply = <&vreg_l5a>;
+ vdda-pll-supply = <&vreg_l12a>;
+
+ status = "okay";
+};
+
+&watchdog {
+ clocks = <&sleep_clk>;
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs615.dtsi b/arch/arm64/boot/dts/qcom/qcs615.dtsi
new file mode 100644
index 000000000000..f4abfad474ea
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs615.dtsi
@@ -0,0 +1,3670 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <dt-bindings/clock/qcom,qcs615-gcc.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/dma/qcom-gpi.h>
+#include <dt-bindings/interconnect/qcom,icc.h>
+#include <dt-bindings/interconnect/qcom,qcs615-rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/power/qcom-rpmpd.h>
+#include <dt-bindings/power/qcom,rpmhpd.h>
+#include <dt-bindings/soc/qcom,rpmh-rsc.h>
+
+/ {
+ interrupt-parent = <&intc>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ power-domains = <&cpu_pd0>;
+ power-domain-names = "psci";
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <100>;
+ next-level-cache = <&l2_0>;
+ #cooling-cells = <2>;
+
+ l2_0: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_0>;
+ };
+ };
+
+ cpu1: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ power-domains = <&cpu_pd1>;
+ power-domain-names = "psci";
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <100>;
+ next-level-cache = <&l2_100>;
+
+ l2_100: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_0>;
+ };
+ };
+
+ cpu2: cpu@200 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x200>;
+ enable-method = "psci";
+ power-domains = <&cpu_pd2>;
+ power-domain-names = "psci";
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <100>;
+ next-level-cache = <&l2_200>;
+
+ l2_200: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_0>;
+ };
+ };
+
+ cpu3: cpu@300 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x300>;
+ enable-method = "psci";
+ power-domains = <&cpu_pd3>;
+ power-domain-names = "psci";
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <100>;
+ next-level-cache = <&l2_300>;
+
+ l2_300: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_0>;
+ };
+ };
+
+ cpu4: cpu@400 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x400>;
+ enable-method = "psci";
+ power-domains = <&cpu_pd4>;
+ power-domain-names = "psci";
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <100>;
+ next-level-cache = <&l2_400>;
+
+ l2_400: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_0>;
+ };
+ };
+
+ cpu5: cpu@500 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x500>;
+ enable-method = "psci";
+ power-domains = <&cpu_pd5>;
+ power-domain-names = "psci";
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <100>;
+ next-level-cache = <&l2_500>;
+
+ l2_500: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_0>;
+ };
+ };
+
+ cpu6: cpu@600 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a76";
+ reg = <0x0 0x600>;
+ enable-method = "psci";
+ power-domains = <&cpu_pd6>;
+ power-domain-names = "psci";
+ capacity-dmips-mhz = <1740>;
+ dynamic-power-coefficient = <404>;
+ next-level-cache = <&l2_600>;
+ #cooling-cells = <2>;
+
+ l2_600: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_0>;
+ };
+ };
+
+ cpu7: cpu@700 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a76";
+ reg = <0x0 0x700>;
+ enable-method = "psci";
+ power-domains = <&cpu_pd7>;
+ power-domain-names = "psci";
+ capacity-dmips-mhz = <1740>;
+ dynamic-power-coefficient = <404>;
+ next-level-cache = <&l2_700>;
+
+ l2_700: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_0>;
+ };
+ };
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+
+ core1 {
+ cpu = <&cpu1>;
+ };
+
+ core2 {
+ cpu = <&cpu2>;
+ };
+
+ core3 {
+ cpu = <&cpu3>;
+ };
+
+ core4 {
+ cpu = <&cpu4>;
+ };
+
+ core5 {
+ cpu = <&cpu5>;
+ };
+
+ core6 {
+ cpu = <&cpu6>;
+ };
+
+ core7 {
+ cpu = <&cpu7>;
+ };
+ };
+ };
+
+ l3_0: l3-cache {
+ compatible = "cache";
+ cache-level = <3>;
+ cache-unified;
+ };
+ };
+
+ dummy_eud: dummy-sink {
+ compatible = "arm,coresight-dummy-sink";
+
+ in-ports {
+ port {
+ eud_in: endpoint {
+ remote-endpoint = <&replicator_swao_out1>;
+ };
+ };
+ };
+ };
+
+ idle-states {
+ entry-method = "psci";
+
+ little_cpu_sleep_0: cpu-sleep-0-0 {
+ compatible = "arm,idle-state";
+ idle-state-name = "silver-power-collapse";
+ arm,psci-suspend-param = <0x40000003>;
+ entry-latency-us = <549>;
+ exit-latency-us = <901>;
+ min-residency-us = <1774>;
+ local-timer-stop;
+ };
+
+ little_cpu_sleep_1: cpu-sleep-0-1 {
+ compatible = "arm,idle-state";
+ idle-state-name = "silver-rail-power-collapse";
+ arm,psci-suspend-param = <0x40000004>;
+ entry-latency-us = <702>;
+ exit-latency-us = <915>;
+ min-residency-us = <4001>;
+ local-timer-stop;
+ };
+
+ big_cpu_sleep_0: cpu-sleep-1-0 {
+ compatible = "arm,idle-state";
+ idle-state-name = "gold-power-collapse";
+ arm,psci-suspend-param = <0x40000003>;
+ entry-latency-us = <523>;
+ exit-latency-us = <1244>;
+ min-residency-us = <2207>;
+ local-timer-stop;
+ };
+
+ big_cpu_sleep_1: cpu-sleep-1-1 {
+ compatible = "arm,idle-state";
+ idle-state-name = "gold-rail-power-collapse";
+ arm,psci-suspend-param = <0x40000004>;
+ entry-latency-us = <526>;
+ exit-latency-us = <1854>;
+ min-residency-us = <5555>;
+ local-timer-stop;
+ };
+ };
+
+ domain-idle-states {
+ cluster_sleep_0: cluster-sleep-0 {
+ compatible = "domain-idle-state";
+ arm,psci-suspend-param = <0x41000044>;
+ entry-latency-us = <2752>;
+ exit-latency-us = <3048>;
+ min-residency-us = <6118>;
+ };
+
+ cluster_sleep_1: cluster-sleep-1 {
+ compatible = "domain-idle-state";
+ arm,psci-suspend-param = <0x41001344>;
+ entry-latency-us = <3263>;
+ exit-latency-us = <4562>;
+ min-residency-us = <8467>;
+ };
+
+ cluster_sleep_2: cluster-sleep-2 {
+ compatible = "domain-idle-state";
+ arm,psci-suspend-param = <0x4100b344>;
+ entry-latency-us = <3638>;
+ exit-latency-us = <6562>;
+ min-residency-us = <9826>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the size */
+ reg = <0 0x80000000 0 0>;
+ };
+
+ firmware {
+ scm {
+ compatible = "qcom,scm-qcs615", "qcom,scm";
+ qcom,dload-mode = <&tcsr 0x13000>;
+ };
+ };
+
+ camnoc_virt: interconnect-0 {
+ compatible = "qcom,qcs615-camnoc-virt";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ ipa_virt: interconnect-1 {
+ compatible = "qcom,qcs615-ipa-virt";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ mc_virt: interconnect-2 {
+ compatible = "qcom,qcs615-mc-virt";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ qup_opp_table: opp-table-qup {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-75000000 {
+ opp-hz = /bits/ 64 <75000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-100000000 {
+ opp-hz = /bits/ 64 <100000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+
+ opp-128000000 {
+ opp-hz = /bits/ 64 <128000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+
+ cpu_pd0: power-domain-cpu0 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
+ };
+
+ cpu_pd1: power-domain-cpu1 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
+ };
+
+ cpu_pd2: power-domain-cpu2 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
+ };
+
+ cpu_pd3: power-domain-cpu3 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
+ };
+
+ cpu_pd4: power-domain-cpu4 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
+ };
+
+ cpu_pd5: power-domain-cpu5 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
+ };
+
+ cpu_pd6: power-domain-cpu6 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ domain-idle-states = <&big_cpu_sleep_0 &big_cpu_sleep_1>;
+ };
+
+ cpu_pd7: power-domain-cpu7 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ domain-idle-states = <&big_cpu_sleep_0 &big_cpu_sleep_1>;
+ };
+
+ cluster_pd: power-domain-cluster {
+ #power-domain-cells = <0>;
+ domain-idle-states = <&cluster_sleep_0
+ &cluster_sleep_1
+ &cluster_sleep_2>;
+ };
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ smem_region: smem@86000000 {
+ compatible = "qcom,smem";
+ reg = <0x0 0x86000000 0x0 0x200000>;
+ no-map;
+ hwlocks = <&tcsr_mutex 3>;
+ };
+ };
+
+ soc: soc@0 {
+ compatible = "simple-bus";
+ ranges = <0 0 0 0 0x10 0>;
+ dma-ranges = <0 0 0 0 0x10 0>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ gcc: clock-controller@100000 {
+ compatible = "qcom,qcs615-gcc";
+ reg = <0 0x00100000 0 0x1f0000>;
+
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+ qfprom: efuse@780000 {
+ compatible = "qcom,qcs615-qfprom", "qcom,qfprom";
+ reg = <0x0 0x00780000 0x0 0x7000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ qusb2_hstx_trim: hstx-trim@1f8 {
+ reg = <0x1fb 0x1>;
+ bits = <1 4>;
+ };
+ };
+
+ sdhc_1: mmc@7c4000 {
+ compatible = "qcom,qcs615-sdhci", "qcom,sdhci-msm-v5";
+ reg = <0x0 0x007c4000 0x0 0x1000>,
+ <0x0 0x007c5000 0x0 0x1000>,
+ <0x0 0x007c8000 0x0 0x8000>;
+ reg-names = "hc",
+ "cqhci",
+ "ice";
+
+ interrupts = <GIC_SPI 641 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 644 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hc_irq",
+ "pwr_irq";
+
+ clocks = <&gcc GCC_SDCC1_AHB_CLK>,
+ <&gcc GCC_SDCC1_APPS_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_SDCC1_ICE_CORE_CLK>;
+ clock-names = "iface",
+ "core",
+ "xo",
+ "ice";
+
+ resets = <&gcc GCC_SDCC1_BCR>;
+
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&sdhc1_opp_table>;
+ iommus = <&apps_smmu 0x02c0 0x0>;
+ interconnects = <&aggre1_noc MASTER_SDCC_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_SDCC_1 QCOM_ICC_TAG_ACTIVE_ONLY>;
+ interconnect-names = "sdhc-ddr",
+ "cpu-sdhc";
+
+ qcom,dll-config = <0x000f642c>;
+ qcom,ddr-config = <0x80040868>;
+ supports-cqe;
+ dma-coherent;
+
+ status = "disabled";
+
+ sdhc1_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-50000000 {
+ opp-hz = /bits/ 64 <50000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-100000000 {
+ opp-hz = /bits/ 64 <100000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+
+ opp-200000000 {
+ opp-hz = /bits/ 64 <200000000>;
+ required-opps = <&rpmhpd_opp_svs_l1>;
+ };
+
+ opp-384000000 {
+ opp-hz = /bits/ 64 <384000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ };
+ };
+ };
+
+ gpi_dma0: dma-controller@800000 {
+ compatible = "qcom,qcs615-gpi-dma", "qcom,sdm845-gpi-dma";
+ reg = <0x0 0x800000 0x0 0x60000>;
+ #dma-cells = <3>;
+ interrupts = <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 250 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 251 IRQ_TYPE_LEVEL_HIGH>;
+ dma-channels = <8>;
+ dma-channel-mask = <0xf>;
+ iommus = <&apps_smmu 0xd6 0x0>;
+ status = "disabled";
+ };
+
+ qupv3_id_0: geniqup@8c0000 {
+ compatible = "qcom,geni-se-qup";
+ reg = <0x0 0x008c0000 0x0 0x6000>;
+ ranges;
+ clocks = <&gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ clock-names = "m-ahb",
+ "s-ahb";
+ iommus = <&apps_smmu 0xc3 0x0>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ status = "disabled";
+
+ uart0: serial@880000 {
+ compatible = "qcom,geni-debug-uart";
+ reg = <0x0 0x00880000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_uart0_tx>, <&qup_uart0_rx>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 601 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&aggre1_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@884000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x884000 0x0 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_i2c1_data_clk>;
+ pinctrl-names = "default";
+ interconnects = <&aggre1_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre1_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ dmas = <&gpi_dma0 0 1 QCOM_GPI_I2C>,
+ <&gpi_dma0 1 1 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ i2c2: i2c@888000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x888000 0x0 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_i2c2_data_clk>;
+ pinctrl-names = "default";
+ interconnects = <&aggre1_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre1_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ dmas = <&gpi_dma0 0 2 QCOM_GPI_I2C>,
+ <&gpi_dma0 1 2 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ spi2: spi@888000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00888000 0x0 0x4000>;
+ interrupts = <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_spi2_data_clk>, <&qup_spi2_cs>;
+ pinctrl-names = "default";
+ interconnects = <&aggre1_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ dmas = <&gpi_dma0 0 2 QCOM_GPI_SPI>,
+ <&gpi_dma0 1 2 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ uart2: serial@888000 {
+ compatible = "qcom,geni-uart";
+ reg = <0x0 0x00888000 0x0 0x4000>;
+ interrupts = <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_uart2_cts>, <&qup_uart2_rts>,
+ <&qup_uart2_tx>, <&qup_uart2_rx>;
+ pinctrl-names = "default";
+ interconnects = <&aggre1_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@88c000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x88c000 0x0 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_i2c3_data_clk>;
+ pinctrl-names = "default";
+ interconnects = <&aggre1_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre1_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ dmas = <&gpi_dma0 0 3 QCOM_GPI_I2C>,
+ <&gpi_dma0 1 3 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+ };
+
+ gpi_dma1: dma-controller@a00000 {
+ compatible = "qcom,qcs615-gpi-dma", "qcom,sdm845-gpi-dma";
+ reg = <0x0 0xa00000 0x0 0x60000>;
+ #dma-cells = <3>;
+ interrupts = <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 284 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 293 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 294 IRQ_TYPE_LEVEL_HIGH>;
+ dma-channels = <8>;
+ dma-channel-mask = <0xf>;
+ iommus = <&apps_smmu 0x376 0x0>;
+ status = "disabled";
+ };
+
+ qupv3_id_1: geniqup@ac0000 {
+ compatible = "qcom,geni-se-qup";
+ reg = <0x0 0xac0000 0x0 0x2000>;
+ ranges;
+ clocks = <&gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ clock-names = "m-ahb",
+ "s-ahb";
+ iommus = <&apps_smmu 0x363 0x0>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ status = "disabled";
+
+ i2c4: i2c@a80000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0xa80000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_i2c4_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&aggre1_noc MASTER_BLSP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre1_noc MASTER_BLSP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ dmas = <&gpi_dma1 0 0 QCOM_GPI_I2C>,
+ <&gpi_dma1 1 0 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ spi4: spi@a80000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0xa80000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_spi4_data_clk>, <&qup_spi4_cs>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&aggre1_noc MASTER_BLSP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ dmas = <&gpi_dma1 0 0 QCOM_GPI_SPI>,
+ <&gpi_dma1 1 0 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ uart4: serial@a80000 {
+ compatible = "qcom,geni-uart";
+ reg = <0x0 0xa80000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_uart4_cts>, <&qup_uart4_rts>,
+ <&qup_uart4_tx>, <&qup_uart4_rx>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&aggre1_noc MASTER_BLSP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ status = "disabled";
+ };
+
+ i2c5: i2c@a84000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0xa84000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_i2c5_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&aggre1_noc MASTER_BLSP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre1_noc MASTER_BLSP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ dmas = <&gpi_dma1 0 1 QCOM_GPI_I2C>,
+ <&gpi_dma1 1 1 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ i2c6: i2c@a88000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0xa88000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_i2c6_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&aggre1_noc MASTER_BLSP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre1_noc MASTER_BLSP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ dmas = <&gpi_dma1 0 2 QCOM_GPI_I2C>,
+ <&gpi_dma1 1 2 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ spi6: spi@a88000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0xa88000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_spi6_data_clk>, <&qup_spi6_cs>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&aggre1_noc MASTER_BLSP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ dmas = <&gpi_dma1 0 2 QCOM_GPI_SPI>,
+ <&gpi_dma1 1 2 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ uart6: serial@a88000 {
+ compatible = "qcom,geni-uart";
+ reg = <0x0 0xa88000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_uart6_cts>, <&qup_uart6_rts>,
+ <&qup_uart6_tx>, <&qup_uart6_rx>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&aggre1_noc MASTER_BLSP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ status = "disabled";
+ };
+
+ i2c7: i2c@a8c000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0xa8c000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_i2c7_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&aggre1_noc MASTER_BLSP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre1_noc MASTER_BLSP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ dmas = <&gpi_dma1 0 3 QCOM_GPI_I2C>,
+ <&gpi_dma1 1 3 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ spi7: spi@a8c000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0xa8c000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_spi7_data_clk>, <&qup_spi7_cs>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&aggre1_noc MASTER_BLSP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ dmas = <&gpi_dma1 0 3 QCOM_GPI_SPI>,
+ <&gpi_dma1 1 3 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+ status = "disabled";
+ };
+
+ uart7: serial@a8c000 {
+ compatible = "qcom,geni-uart";
+ reg = <0x0 0xa8c000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_uart7_cts>, <&qup_uart7_rts>,
+ <&qup_uart7_tx>, <&qup_uart7_rx>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&aggre1_noc MASTER_BLSP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table>;
+ status = "disabled";
+ };
+ };
+
+ config_noc: interconnect@1500000 {
+ reg = <0x0 0x01500000 0x0 0x5080>;
+ compatible = "qcom,qcs615-config-noc";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ system_noc: interconnect@1620000 {
+ reg = <0x0 0x01620000 0x0 0x1f300>;
+ compatible = "qcom,qcs615-system-noc";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ aggre1_noc: interconnect@1700000 {
+ reg = <0x0 0x01700000 0x0 0x3f200>;
+ compatible = "qcom,qcs615-aggre1-noc";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ mmss_noc: interconnect@1740000 {
+ reg = <0x0 0x01740000 0x0 0x1c100>;
+ compatible = "qcom,qcs615-mmss-noc";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ ufs_mem_hc: ufshc@1d84000 {
+ compatible = "qcom,qcs615-ufshc", "qcom,ufshc", "jedec,ufs-2.0";
+ reg = <0x0 0x01d84000 0x0 0x3000>,
+ <0x0 0x01d90000 0x0 0x8000>;
+ reg-names = "std",
+ "ice";
+
+ interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_UFS_PHY_AHB_CLK>,
+ <&gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
+ <&gcc GCC_UFS_PHY_ICE_CORE_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>;
+ clock-names = "core_clk",
+ "bus_aggr_clk",
+ "iface_clk",
+ "core_clk_unipro",
+ "core_clk_ice",
+ "ref_clk",
+ "tx_lane0_sync_clk",
+ "rx_lane0_sync_clk";
+
+ resets = <&gcc GCC_UFS_PHY_BCR>;
+ reset-names = "rst";
+
+ operating-points-v2 = <&ufs_opp_table>;
+ interconnects = <&aggre1_noc MASTER_UFS_MEM QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_UFS_MEM_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
+ interconnect-names = "ufs-ddr",
+ "cpu-ufs";
+
+ power-domains = <&gcc UFS_PHY_GDSC>;
+
+ iommus = <&apps_smmu 0x300 0x0>;
+ dma-coherent;
+
+ lanes-per-direction = <1>;
+
+ phys = <&ufs_mem_phy>;
+ phy-names = "ufsphy";
+
+ #reset-cells = <1>;
+
+ status = "disabled";
+
+ ufs_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-50000000 {
+ opp-hz = /bits/ 64 <50000000>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>,
+ /bits/ 64 <37500000>,
+ /bits/ 64 <75000000>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-100000000 {
+ opp-hz = /bits/ 64 <100000000>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>,
+ /bits/ 64 <75000000>,
+ /bits/ 64 <150000000>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+
+ opp-200000000 {
+ opp-hz = /bits/ 64 <200000000>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>,
+ /bits/ 64 <150000000>,
+ /bits/ 64 <300000000>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>,
+ /bits/ 64 <0>;
+ required-opps = <&rpmhpd_opp_nom>;
+ };
+ };
+ };
+
+ ufs_mem_phy: phy@1d87000 {
+ compatible = "qcom,qcs615-qmp-ufs-phy", "qcom,sm6115-qmp-ufs-phy";
+ reg = <0x0 0x01d87000 0x0 0xe00>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_PHY_PHY_AUX_CLK>,
+ <&gcc GCC_UFS_MEM_CLKREF_CLK>;
+ clock-names = "ref",
+ "ref_aux",
+ "qref";
+
+ power-domains = <&gcc UFS_PHY_GDSC>;
+
+ resets = <&ufs_mem_hc 0>;
+ reset-names = "ufsphy";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
+ tcsr_mutex: hwlock@1f40000 {
+ compatible = "qcom,tcsr-mutex";
+ reg = <0x0 0x01f40000 0x0 0x20000>;
+ #hwlock-cells = <1>;
+ };
+
+ tcsr: syscon@1fc0000 {
+ compatible = "qcom,qcs615-tcsr", "syscon";
+ reg = <0x0 0x01fc0000 0x0 0x30000>;
+ };
+
+ tlmm: pinctrl@3100000 {
+ compatible = "qcom,qcs615-tlmm";
+ reg = <0x0 0x03100000 0x0 0x300000>,
+ <0x0 0x03500000 0x0 0x300000>,
+ <0x0 0x03d00000 0x0 0x300000>;
+ reg-names = "east",
+ "west",
+ "south";
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-ranges = <&tlmm 0 0 124>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ wakeup-parent = <&pdc>;
+
+ qup_i2c1_data_clk: qup-i2c1-data-clk-state {
+ pins = "gpio4", "gpio5";
+ function = "qup0";
+
+ };
+
+ qup_i2c2_data_clk: qup-i2c2-data-clk-state {
+ pins = "gpio0", "gpio1";
+ function = "qup0";
+ };
+
+ qup_i2c3_data_clk: qup-i2c3-data-clk-state {
+ pins = "gpio18", "gpio19";
+ function = "qup0";
+ };
+
+ qup_i2c4_data_clk: qup-i2c4-data-clk-state {
+ pins = "gpio20", "gpio21";
+ function = "qup1";
+ };
+
+ qup_i2c5_data_clk: qup-i2c5-data-clk-state {
+ pins = "gpio14", "gpio15";
+ function = "qup1";
+ };
+
+ qup_i2c6_data_clk: qup-i2c6-data-clk-state {
+ pins = "gpio6", "gpio7";
+ function = "qup1";
+ };
+
+ qup_i2c7_data_clk: qup-i2c7-data-clk-state {
+ pins = "gpio10", "gpio11";
+ function = "qup1";
+ };
+
+ qup_spi2_data_clk: qup-spi2-data-clk-state {
+ pins = "gpio0", "gpio1", "gpio2";
+ function = "qup0";
+ };
+
+ qup_spi2_cs: qup-spi2-cs-state {
+ pins = "gpio3";
+ function = "qup0";
+ };
+
+ qup_spi2_cs_gpio: qup-spi2-cs-gpio-state {
+ pins = "gpio3";
+ function = "gpio";
+ };
+
+ qup_spi4_data_clk: qup-spi4-data-clk-state {
+ pins = "gpio20", "gpio21", "gpio22";
+ function = "qup1";
+ };
+
+ qup_spi4_cs: qup-spi4-cs-state {
+ pins = "gpio23";
+ function = "qup1";
+ };
+
+ qup_spi4_cs_gpio: qup-spi4-cs-gpio-state {
+ pins = "gpio23";
+ function = "gpio";
+ };
+
+ qup_spi6_data_clk: qup-spi6-data-clk-state {
+ pins = "gpio6", "gpio7", "gpio8";
+ function = "qup1";
+ };
+
+ qup_spi6_cs: qup-spi6-cs-state {
+ pins = "gpio9";
+ function = "qup1";
+ };
+
+ qup_spi6_cs_gpio: qup-spi6-cs-gpio-state {
+ pins = "gpio9";
+ function = "gpio";
+ };
+
+ qup_spi7_data_clk: qup-spi7-data-clk-state {
+ pins = "gpio10", "gpio11", "gpio12";
+ function = "qup1";
+ };
+
+ qup_spi7_cs: qup-spi7-cs-state {
+ pins = "gpio13";
+ function = "qup1";
+ };
+
+ qup_spi7_cs_gpio: qup-spi7-cs-gpio-state {
+ pins = "gpio13";
+ function = "gpio";
+ };
+
+ qup_uart0_tx: qup-uart0-tx-state {
+ pins = "gpio16";
+ function = "qup0";
+ };
+
+ qup_uart0_rx: qup-uart0-rx-state {
+ pins = "gpio17";
+ function = "qup0";
+ };
+
+ qup_uart2_cts: qup-uart2-cts-state {
+ pins = "gpio0";
+ function = "qup0";
+ };
+
+ qup_uart2_rts: qup-uart2-rts-state {
+ pins = "gpio1";
+ function = "qup0";
+ };
+
+ qup_uart2_tx: qup-uart2-tx-state {
+ pins = "gpio2";
+ function = "qup0";
+ };
+
+ qup_uart2_rx: qup-uart2-rx-state {
+ pins = "gpio3";
+ function = "qup0";
+ };
+
+ qup_uart4_cts: qup-uart4-cts-state {
+ pins = "gpio20";
+ function = "qup1";
+ };
+
+ qup_uart4_rts: qup-uart4-rts-state {
+ pins = "gpio21";
+ function = "qup1";
+ };
+
+ qup_uart4_tx: qup-uart4-tx-state {
+ pins = "gpio22";
+ function = "qup1";
+ };
+
+ qup_uart4_rx: qup-uart4-rx-state {
+ pins = "gpio23";
+ function = "qup1";
+ };
+
+ qup_uart6_cts: qup-uart6-cts-state {
+ pins = "gpio6";
+ function = "qup1";
+ };
+
+ qup_uart6_rts: qup-uart6-rts-state {
+ pins = "gpio7";
+ function = "qup1";
+ };
+
+ qup_uart6_tx: qup-uart6-tx-state {
+ pins = "gpio8";
+ function = "qup1";
+ };
+
+ qup_uart6_rx: qup-uart6-rx-state {
+ pins = "gpio9";
+ function = "qup1";
+ };
+
+ qup_uart7_cts: qup-uart7-cts-state {
+ pins = "gpio10";
+ function = "qup1";
+ };
+
+ qup_uart7_rts: qup-uart7-rts-state {
+ pins = "gpio11";
+ function = "qup1";
+ };
+
+ qup_uart7_tx: qup-uart7-tx-state {
+ pins = "gpio12";
+ function = "qup1";
+ };
+
+ qup_uart7_rx: qup-uart7-rx-state {
+ pins = "gpio13";
+ function = "qup1";
+ };
+
+ sdc1_state_on: sdc1-on-state {
+ clk-pins {
+ pins = "sdc1_clk";
+ bias-disable;
+ drive-strength = <16>;
+ };
+
+ cmd-pins {
+ pins = "sdc1_cmd";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+
+ data-pins {
+ pins = "sdc1_data";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+
+ rclk-pins {
+ pins = "sdc1_rclk";
+ bias-pull-down;
+ };
+ };
+
+ sdc1_state_off: sdc1-off-state {
+ clk-pins {
+ pins = "sdc1_clk";
+ bias-disable;
+ drive-strength = <2>;
+ };
+
+ cmd-pins {
+ pins = "sdc1_cmd";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ data-pins {
+ pins = "sdc1_data";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ rclk-pins {
+ pins = "sdc1_rclk";
+ bias-pull-down;
+ };
+ };
+
+ sdc2_state_on: sdc2-on-state {
+ clk-pins {
+ pins = "sdc2_clk";
+ bias-disable;
+ drive-strength = <16>;
+ };
+
+ cmd-pins {
+ pins = "sdc2_cmd";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+
+ data-pins {
+ pins = "sdc2_data";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+ };
+
+ sdc2_state_off: sdc2-off-state {
+ clk-pins {
+ pins = "sdc2_clk";
+ bias-disable;
+ drive-strength = <2>;
+ };
+
+ cmd-pins {
+ pins = "sdc2_cmd";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ data-pins {
+ pins = "sdc2_data";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+ };
+ };
+
+ stm@6002000 {
+ compatible = "arm,coresight-stm", "arm,primecell";
+ reg = <0x0 0x06002000 0x0 0x1000>,
+ <0x0 0x16280000 0x0 0x180000>;
+ reg-names = "stm-base",
+ "stm-stimulus-base";
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ stm_out: endpoint {
+ remote-endpoint = <&funnel_in0_in7>;
+ };
+ };
+ };
+ };
+
+ tpda@6004000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x06004000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ tpda_qdss_in0: endpoint {
+ remote-endpoint = <&tpdm_center_out>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+
+ tpda_qdss_in4: endpoint {
+ remote-endpoint = <&funnel_monaq_out>;
+ };
+ };
+
+ port@5 {
+ reg = <5>;
+
+ tpda_qdss_in5: endpoint {
+ remote-endpoint = <&funnel_ddr_0_out>;
+ };
+ };
+
+ port@6 {
+ reg = <6>;
+
+ tpda_qdss_in6: endpoint {
+ remote-endpoint = <&funnel_turing_out>;
+ };
+ };
+
+ port@7 {
+ reg = <7>;
+
+ tpda_qdss_in7: endpoint {
+ remote-endpoint = <&tpdm_vsense_out>;
+ };
+ };
+
+ port@8 {
+ reg = <8>;
+
+ tpda_qdss_in8: endpoint {
+ remote-endpoint = <&tpdm_dcc_out>;
+ };
+ };
+
+ port@9 {
+ reg = <9>;
+
+ tpda_qdss_in9: endpoint {
+ remote-endpoint = <&tpdm_prng_out>;
+ };
+ };
+
+ port@b {
+ reg = <11>;
+
+ tpda_qdss_in11: endpoint {
+ remote-endpoint = <&tpdm_qm_out>;
+ };
+ };
+
+ port@c {
+ reg = <12>;
+
+ tpda_qdss_in12: endpoint {
+ remote-endpoint = <&tpdm_west_out>;
+ };
+ };
+
+ port@d {
+ reg = <13>;
+
+ tpda_qdss_in13: endpoint {
+ remote-endpoint = <&tpdm_pimem_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ tpda_qdss_out: endpoint {
+ remote-endpoint = <&funnel_qatb_in>;
+ };
+ };
+ };
+ };
+
+ funnel@6005000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x06005000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ funnel_qatb_in: endpoint {
+ remote-endpoint = <&tpda_qdss_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ funnel_qatb_out: endpoint {
+ remote-endpoint = <&funnel_in0_in6>;
+ };
+ };
+ };
+ };
+
+ cti@6010000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x06010000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@6011000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x06011000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@6012000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x06012000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@6013000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x06013000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@6014000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x06014000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@6015000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x06015000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@6016000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x06016000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@6017000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x06017000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@6018000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x06018000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@6019000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x06019000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@601a000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x0601a000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@601b000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x0601b000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@601c000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x0601c000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@601d000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x0601d000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@601e000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x0601e000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@601f000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x0601f000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ funnel@6041000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x06041000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@6 {
+ reg = <6>;
+
+ funnel_in0_in6: endpoint {
+ remote-endpoint = <&funnel_qatb_out>;
+ };
+ };
+
+ port@7 {
+ reg = <7>;
+
+ funnel_in0_in7: endpoint {
+ remote-endpoint = <&stm_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ funnel_in0_out: endpoint {
+ remote-endpoint = <&funnel_merg_in0>;
+ };
+ };
+ };
+ };
+
+ funnel@6042000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x06042000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@3 {
+ reg = <3>;
+
+ funnel_in1_in3: endpoint {
+ remote-endpoint = <&replicator_swao_out0>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+
+ funnel_in1_in4: endpoint {
+ remote-endpoint = <&tpdm_wcss_out>;
+ };
+ };
+
+ port@7 {
+ reg = <7>;
+
+ funnel_in1_in7: endpoint {
+ remote-endpoint = <&funnel_apss_merg_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ funnel_in1_out: endpoint {
+ remote-endpoint = <&funnel_merg_in1>;
+ };
+ };
+ };
+ };
+
+ funnel@6045000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x06045000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ funnel_merg_in0: endpoint {
+ remote-endpoint = <&funnel_in0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ funnel_merg_in1: endpoint {
+ remote-endpoint = <&funnel_in1_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ funnel_merg_out: endpoint {
+ remote-endpoint = <&tmc_etf_in>;
+ };
+ };
+ };
+ };
+
+ replicator@6046000 {
+ compatible = "arm,coresight-dynamic-replicator", "arm,primecell";
+ reg = <0x0 0x06046000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ replicator0_in: endpoint {
+ remote-endpoint= <&tmc_etf_out>;
+ };
+ };
+ };
+
+ out-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ reg = <1>;
+
+ replicator0_out1: endpoint {
+ remote-endpoint= <&replicator1_in>;
+ };
+ };
+ };
+ };
+
+ tmc@6047000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0x0 0x06047000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ tmc_etf_in: endpoint {
+ remote-endpoint = <&funnel_merg_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ tmc_etf_out: endpoint {
+ remote-endpoint = <&replicator0_in>;
+ };
+ };
+ };
+ };
+
+ replicator@604a000 {
+ compatible = "arm,coresight-dynamic-replicator", "arm,primecell";
+ reg = <0x0 0x0604a000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ replicator1_in: endpoint {
+ remote-endpoint= <&replicator0_out1>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ replicator1_out: endpoint {
+ remote-endpoint= <&funnel_swao_in6>;
+ };
+ };
+ };
+ };
+
+ cti@683b000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x0683b000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ tpdm@6840000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x06840000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <64>;
+ qcom,cmb-msrs-num = <32>;
+ status = "disabled";
+
+ out-ports {
+ port {
+ tpdm_vsense_out: endpoint {
+ remote-endpoint = <&tpda_qdss_in7>;
+ };
+ };
+ };
+ };
+
+ tpdm@684c000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x0684c000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <32>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ tpdm_prng_out: endpoint {
+ remote-endpoint = <&tpda_qdss_in9>;
+ };
+ };
+ };
+ };
+
+ tpdm@6850000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x06850000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <64>;
+ qcom,cmb-msrs-num = <32>;
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ tpdm_pimem_out: endpoint {
+ remote-endpoint = <&tpda_qdss_in13>;
+ };
+ };
+ };
+ };
+
+ tpdm@6860000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x06860000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ tpdm_turing_out: endpoint {
+ remote-endpoint = <&funnel_turing_in>;
+ };
+ };
+ };
+ };
+
+ funnel@6861000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x06861000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ funnel_turing_in: endpoint {
+ remote-endpoint = <&tpdm_turing_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ funnel_turing_out: endpoint {
+ remote-endpoint = <&tpda_qdss_in6>;
+ };
+ };
+ };
+ };
+
+ cti@6867000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x06867000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ tpdm@6870000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x06870000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <32>;
+ qcom,cmb-msrs-num = <32>;
+ status = "disabled";
+
+ out-ports {
+ port {
+ tpdm_dcc_out: endpoint {
+ remote-endpoint = <&tpda_qdss_in8>;
+ };
+ };
+ };
+ };
+
+ tpdm@699c000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x0699c000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <32>;
+ qcom,cmb-msrs-num = <32>;
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+ status = "disabled";
+
+ out-ports {
+ port {
+ tpdm_wcss_out: endpoint {
+ remote-endpoint = <&funnel_in1_in4>;
+ };
+ };
+ };
+ };
+
+ tpdm@69c0000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x069c0000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ tpdm_monaq_out: endpoint {
+ remote-endpoint = <&funnel_monaq_in>;
+ };
+ };
+ };
+ };
+
+ funnel@69c3000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x069c3000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ funnel_monaq_in: endpoint {
+ remote-endpoint = <&tpdm_monaq_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ funnel_monaq_out: endpoint {
+ remote-endpoint = <&tpda_qdss_in4>;
+ };
+ };
+ };
+ };
+
+ tpdm@69d0000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x069d0000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+ status = "disabled";
+
+ out-ports {
+ port {
+ tpdm_qm_out: endpoint {
+ remote-endpoint = <&tpda_qdss_in11>;
+ };
+ };
+ };
+ };
+
+ tpdm@6a00000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x06a00000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+ status = "disabled";
+
+ out-ports {
+ port {
+ tpdm_ddr_out: endpoint {
+ remote-endpoint = <&funnel_ddr_0_in>;
+ };
+ };
+ };
+ };
+
+ cti@6a02000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x06a02000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@6a03000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x06a03000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@6a10000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x06a10000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@6a11000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x06a11000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ funnel@6a05000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x06a05000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ funnel_ddr_0_in: endpoint {
+ remote-endpoint = <&tpdm_ddr_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ funnel_ddr_0_out: endpoint {
+ remote-endpoint = <&tpda_qdss_in5>;
+ };
+ };
+ };
+ };
+
+ tpda@6b01000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x06b01000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ tpda_swao_in0: endpoint {
+ remote-endpoint = <&tpdm_swao0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ tpda_swao_in1: endpoint {
+ remote-endpoint = <&tpdm_swao1_out>;
+ };
+
+ };
+ };
+
+ out-ports {
+ port {
+ tpda_swao_out: endpoint {
+ remote-endpoint = <&funnel_swao_in7>;
+ };
+ };
+ };
+ };
+
+ tpdm@6b02000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x06b02000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <64>;
+ qcom,cmb-msrs-num = <32>;
+ status = "disabled";
+
+ out-ports {
+ port {
+ tpdm_swao0_out: endpoint {
+ remote-endpoint = <&tpda_swao_in0>;
+ };
+ };
+ };
+ };
+
+ tpdm@6b03000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x06b03000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+ status = "disabled";
+
+ out-ports {
+ port {
+ tpdm_swao1_out: endpoint {
+ remote-endpoint = <&tpda_swao_in1>;
+ };
+ };
+ };
+ };
+
+ cti@6b04000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x06b04000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@6b05000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x06b05000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@6b06000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x06b06000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@6b07000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x06b07000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ funnel@6b08000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x06b08000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@6 {
+ reg = <6>;
+
+ funnel_swao_in6: endpoint {
+ remote-endpoint= <&replicator1_out>;
+ };
+ };
+
+ port@7 {
+ reg = <7>;
+
+ funnel_swao_in7: endpoint {
+ remote-endpoint= <&tpda_swao_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ funnel_swao_out: endpoint {
+ remote-endpoint = <&tmc_etf_swao_in>;
+ };
+ };
+ };
+ };
+
+ tmc@6b09000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0x0 0x06b09000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ tmc_etf_swao_in: endpoint {
+ remote-endpoint= <&funnel_swao_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ tmc_etf_swao_out: endpoint {
+ remote-endpoint= <&replicator_swao_in>;
+ };
+ };
+ };
+ };
+
+ replicator@6b0a000 {
+ compatible = "arm,coresight-dynamic-replicator", "arm,primecell";
+ reg = <0x0 0x06b0a000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ replicator_swao_in: endpoint {
+ remote-endpoint = <&tmc_etf_swao_out>;
+ };
+ };
+ };
+
+ out-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ replicator_swao_out0: endpoint {
+ remote-endpoint = <&funnel_in1_in3>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ replicator_swao_out1: endpoint {
+ remote-endpoint = <&eud_in>;
+ };
+ };
+ };
+ };
+
+ cti@6b21000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x06b21000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ tpdm@6b48000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x06b48000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ tpdm_west_out: endpoint {
+ remote-endpoint = <&tpda_qdss_in12>;
+ };
+ };
+ };
+ };
+
+ cti@6c13000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x06c13000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@6c20000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x06c20000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ status = "disabled";
+ };
+
+ tpdm@6c28000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x06c28000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ tpdm_center_out: endpoint {
+ remote-endpoint = <&tpda_qdss_in0>;
+ };
+ };
+ };
+ };
+
+ cti@6c29000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x06c29000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@6c2a000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x06c2a000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@7020000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x07020000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ etm@7040000 {
+ compatible = "arm,primecell";
+ reg = <0x0 0x07040000 0x0 0x1000>;
+ cpu = <&cpu0>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm0_out: endpoint {
+ remote-endpoint = <&funnel_apss_in0>;
+ };
+ };
+ };
+ };
+
+ cti@7120000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x07120000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ etm@7140000 {
+ compatible = "arm,primecell";
+ reg = <0x0 0x07140000 0x0 0x1000>;
+ cpu = <&cpu1>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm1_out: endpoint {
+ remote-endpoint = <&funnel_apss_in1>;
+ };
+ };
+ };
+ };
+
+ cti@7220000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x07220000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ etm@7240000 {
+ compatible = "arm,primecell";
+ reg = <0x0 0x07240000 0x0 0x1000>;
+ cpu = <&cpu2>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm2_out: endpoint {
+ remote-endpoint = <&funnel_apss_in2>;
+ };
+ };
+ };
+ };
+
+ cti@7320000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x07320000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ etm@7340000 {
+ compatible = "arm,primecell";
+ reg = <0x0 0x07340000 0x0 0x1000>;
+ cpu = <&cpu3>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm3_out: endpoint {
+ remote-endpoint = <&funnel_apss_in3>;
+ };
+ };
+ };
+ };
+
+ cti@7420000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x07420000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ etm@7440000 {
+ compatible = "arm,primecell";
+ reg = <0x0 0x07440000 0x0 0x1000>;
+ cpu = <&cpu4>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm4_out: endpoint {
+ remote-endpoint = <&funnel_apss_in4>;
+ };
+ };
+ };
+ };
+
+ cti@7520000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x07520000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ etm@7540000 {
+ compatible = "arm,primecell";
+ reg = <0x0 0x07540000 0x0 0x1000>;
+ cpu = <&cpu5>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm5_out: endpoint {
+ remote-endpoint = <&funnel_apss_in5>;
+ };
+ };
+ };
+ };
+
+ cti@7620000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x07620000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ etm@7640000 {
+ compatible = "arm,primecell";
+ reg = <0x0 0x07640000 0x0 0x1000>;
+ cpu = <&cpu6>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm6_out: endpoint {
+ remote-endpoint = <&funnel_apss_in6>;
+ };
+ };
+ };
+ };
+
+ cti@7720000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x07720000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ etm@7740000 {
+ compatible = "arm,primecell";
+ reg = <0x0 0x07740000 0x0 0x1000>;
+ cpu = <&cpu7>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm7_out: endpoint {
+ remote-endpoint = <&funnel_apss_in7>;
+ };
+ };
+ };
+ };
+
+ funnel@7800000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x07800000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ funnel_apss_in0: endpoint {
+ remote-endpoint = <&etm0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ funnel_apss_in1: endpoint {
+ remote-endpoint = <&etm1_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ funnel_apss_in2: endpoint {
+ remote-endpoint = <&etm2_out>;
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+
+ funnel_apss_in3: endpoint {
+ remote-endpoint = <&etm3_out>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+
+ funnel_apss_in4: endpoint {
+ remote-endpoint = <&etm4_out>;
+ };
+ };
+
+ port@5 {
+ reg = <5>;
+
+ funnel_apss_in5: endpoint {
+ remote-endpoint = <&etm5_out>;
+ };
+ };
+
+ port@6 {
+ reg = <6>;
+
+ funnel_apss_in6: endpoint {
+ remote-endpoint = <&etm6_out>;
+ };
+ };
+
+ port@7 {
+ reg = <7>;
+
+ funnel_apss_in7: endpoint {
+ remote-endpoint = <&etm7_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ funnel_apss_out: endpoint {
+ remote-endpoint = <&funnel_apss_merg_in0>;
+ };
+ };
+ };
+ };
+
+ funnel@7810000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x07810000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ funnel_apss_merg_in0: endpoint {
+ remote-endpoint = <&funnel_apss_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ funnel_apss_merg_in2: endpoint {
+ remote-endpoint = <&tpda_olc_out>;
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+
+ funnel_apss_merg_in3: endpoint {
+ remote-endpoint = <&tpda_llm_silver_out>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+
+ funnel_apss_merg_in4: endpoint {
+ remote-endpoint = <&tpda_llm_gold_out>;
+ };
+ };
+
+ port@5 {
+ reg = <5>;
+
+ funnel_apss_merg_in5: endpoint {
+ remote-endpoint = <&tpda_apss_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ funnel_apss_merg_out: endpoint {
+ remote-endpoint = <&funnel_in1_in7>;
+ };
+ };
+ };
+ };
+
+ tpdm@7830000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x07830000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <64>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ tpdm_olc_out: endpoint {
+ remote-endpoint = <&tpda_olc_in>;
+ };
+ };
+ };
+ };
+
+ tpda@7832000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x07832000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ tpda_olc_in: endpoint {
+ remote-endpoint = <&tpdm_olc_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ tpda_olc_out: endpoint {
+ remote-endpoint = <&funnel_apss_merg_in2>;
+ };
+ };
+ };
+ };
+
+ tpdm@7860000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x07860000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ tpdm_apss_out: endpoint {
+ remote-endpoint = <&tpda_apss_in>;
+ };
+ };
+ };
+ };
+
+ tpda@7862000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x07862000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ tpda_apss_in: endpoint {
+ remote-endpoint = <&tpdm_apss_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ tpda_apss_out: endpoint {
+ remote-endpoint = <&funnel_apss_merg_in5>;
+ };
+ };
+ };
+ };
+
+ tpdm@78a0000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x078a0000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <32>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ tpdm_llm_silver_out: endpoint {
+ remote-endpoint = <&tpda_llm_silver_in>;
+ };
+ };
+ };
+ };
+
+ tpdm@78b0000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x078b0000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <32>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ tpdm_llm_gold_out: endpoint {
+ remote-endpoint = <&tpda_llm_gold_in>;
+ };
+ };
+ };
+ };
+
+ tpda@78c0000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x078c0000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ tpda_llm_silver_in: endpoint {
+ remote-endpoint = <&tpdm_llm_silver_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ tpda_llm_silver_out: endpoint {
+ remote-endpoint = <&funnel_apss_merg_in3>;
+ };
+ };
+ };
+ };
+
+ tpda@78d0000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x078d0000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ tpda_llm_gold_in: endpoint {
+ remote-endpoint = <&tpdm_llm_gold_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ tpda_llm_gold_out: endpoint {
+ remote-endpoint = <&funnel_apss_merg_in4>;
+ };
+ };
+ };
+ };
+
+ cti@78e0000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x078e0000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@78f0000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x078f0000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@7900000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x07900000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ pmu@90b6300 {
+ compatible = "qcom,qcs615-cpu-bwmon", "qcom,sdm845-bwmon";
+ reg = <0x0 0x090b6300 0x0 0x600>;
+ interrupts = <GIC_SPI 581 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ACTIVE_ONLY>;
+
+ operating-points-v2 = <&cpu_bwmon_opp_table>;
+
+ cpu_bwmon_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-0 {
+ opp-peak-kBps = <12896000>;
+ };
+
+ opp-1 {
+ opp-peak-kBps = <14928000>;
+ };
+ };
+ };
+
+ pmu@90cd000 {
+ compatible = "qcom,qcs615-llcc-bwmon", "qcom,sc7280-llcc-bwmon";
+ reg = <0x0 0x090cd000 0x0 0x1000>;
+ interrupts = <GIC_SPI 667 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&mc_virt MASTER_LLCC QCOM_ICC_TAG_ACTIVE_ONLY
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>;
+
+ operating-points-v2 = <&llcc_bwmon_opp_table>;
+
+ llcc_bwmon_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-0 {
+ opp-peak-kBps = <800000>;
+ };
+
+ opp-1 {
+ opp-peak-kBps = <1200000>;
+ };
+
+ opp-2 {
+ opp-peak-kBps = <1804800>;
+ };
+
+ opp-3 {
+ opp-peak-kBps = <2188800>;
+ };
+
+ opp-4 {
+ opp-peak-kBps = <2726400>;
+ };
+
+ opp-5 {
+ opp-peak-kBps = <3072000>;
+ };
+
+ opp-6 {
+ opp-peak-kBps = <4070400>;
+ };
+
+ opp-7 {
+ opp-peak-kBps = <5414400>;
+ };
+
+ opp-8 {
+ opp-peak-kBps = <6220800>;
+ };
+ };
+ };
+
+ sdhc_2: mmc@8804000 {
+ compatible = "qcom,qcs615-sdhci", "qcom,sdhci-msm-v5";
+ reg = <0x0 0x08804000 0x0 0x1000>;
+ reg-names = "hc";
+
+ interrupts = <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hc_irq",
+ "pwr_irq";
+
+ clocks = <&gcc GCC_SDCC2_AHB_CLK>,
+ <&gcc GCC_SDCC2_APPS_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface",
+ "core",
+ "xo";
+
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&sdhc2_opp_table>;
+ iommus = <&apps_smmu 0x02a0 0x0>;
+ resets = <&gcc GCC_SDCC2_BCR>;
+ interconnects = <&aggre1_noc MASTER_SDCC_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_SDCC_2 QCOM_ICC_TAG_ACTIVE_ONLY>;
+ interconnect-names = "sdhc-ddr",
+ "cpu-sdhc";
+
+ qcom,dll-config = <0x0007642c>;
+ qcom,ddr-config = <0x80040868>;
+ dma-coherent;
+
+ status = "disabled";
+
+ sdhc2_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-50000000 {
+ opp-hz = /bits/ 64 <50000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-100000000 {
+ opp-hz = /bits/ 64 <100000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+
+ opp-202000000 {
+ opp-hz = /bits/ 64 <202000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ };
+ };
+ };
+
+ dc_noc: interconnect@9160000 {
+ reg = <0x0 0x09160000 0x0 0x3200>;
+ compatible = "qcom,qcs615-dc-noc";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ llcc: system-cache-controller@9200000 {
+ compatible = "qcom,qcs615-llcc";
+ reg = <0x0 0x09200000 0x0 0x50000>,
+ <0x0 0x09600000 0x0 0x50000>;
+ reg-names = "llcc0_base",
+ "llcc_broadcast_base";
+ };
+
+ gem_noc: interconnect@9680000 {
+ reg = <0x0 0x09680000 0x0 0x3e200>;
+ compatible = "qcom,qcs615-gem-noc";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ pdc: interrupt-controller@b220000 {
+ compatible = "qcom,qcs615-pdc", "qcom,pdc";
+ reg = <0x0 0x0b220000 0x0 0x30000>,
+ <0x0 0x17c000f0 0x0 0x64>;
+ qcom,pdc-ranges = <0 480 94>, <94 609 31>, <125 63 1>;
+ interrupt-parent = <&intc>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ };
+
+ aoss_qmp: power-controller@c300000 {
+ compatible = "qcom,qcs615-aoss-qmp", "qcom,aoss-qmp";
+ reg = <0x0 0x0c300000 0x0 0x400>;
+ interrupts = <GIC_SPI 389 IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&apss_shared 0>;
+
+ #clock-cells = <0>;
+ #power-domain-cells = <1>;
+ };
+
+ sram@c3f0000 {
+ compatible = "qcom,rpmh-stats";
+ reg = <0x0 0x0c3f0000 0x0 0x400>;
+ };
+
+ apps_smmu: iommu@15000000 {
+ compatible = "qcom,qcs615-smmu-500", "qcom,smmu-500", "arm,mmu-500";
+ reg = <0x0 0x15000000 0x0 0x80000>;
+ #iommu-cells = <2>;
+ #global-interrupts = <1>;
+ dma-coherent;
+
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ spmi_bus: spmi@c440000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0x0 0x0c440000 0x0 0x1100>,
+ <0x0 0x0c600000 0x0 0x2000000>,
+ <0x0 0x0e600000 0x0 0x100000>,
+ <0x0 0x0e700000 0x0 0xa0000>,
+ <0x0 0x0c40a000 0x0 0x26000>;
+ reg-names = "core",
+ "chnls",
+ "obsrvr",
+ "intr",
+ "cnfg";
+ interrupts-extended = <&pdc 1 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "periph_irq";
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ qcom,channel = <0>;
+ qcom,ee = <0>;
+ };
+
+ intc: interrupt-controller@17a00000 {
+ compatible = "arm,gic-v3";
+ reg = <0x0 0x17a00000 0x0 0x10000>, /* GICD */
+ <0x0 0x17a60000 0x0 0x100000>; /* GICR * 8 */
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ #redistributor-regions = <1>;
+ redistributor-stride = <0x0 0x20000>;
+ };
+
+ apss_shared: mailbox@17c00000 {
+ compatible = "qcom,qcs615-apss-shared",
+ "qcom,sdm845-apss-shared";
+ reg = <0x0 0x17c00000 0x0 0x1000>;
+ #mbox-cells = <1>;
+ };
+
+ watchdog: watchdog@17c10000 {
+ compatible = "qcom,apss-wdt-qcs615", "qcom,kpss-wdt";
+ reg = <0x0 0x17c10000 0x0 0x1000>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ timer@17c20000 {
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x0 0x17c20000 0x0 0x1000>;
+ ranges = <0 0 0 0x20000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ frame@17c21000 {
+ reg = <0x17c21000 0x1000>,
+ <0x17c22000 0x1000>;
+ frame-number = <0>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ frame@17c23000 {
+ reg = <0x17c23000 0x1000>;
+ frame-number = <1>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ frame@17c25000 {
+ reg = <0x17c25000 0x1000>;
+ frame-number = <2>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ frame@17c27000 {
+ reg = <0x17c27000 0x1000>;
+ frame-number = <3>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ frame@17c29000 {
+ reg = <0x17c29000 0x1000>;
+ frame-number = <4>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ frame@17c2b000 {
+ reg = <0x17c2b000 0x1000>;
+ frame-number = <5>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ frame@17c2d000 {
+ reg = <0x17c2d000 0x1000>;
+ frame-number = <6>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+ };
+
+ apps_rsc: rsc@18200000 {
+ compatible = "qcom,rpmh-rsc";
+ reg = <0x0 0x18200000 0x0 0x10000>,
+ <0x0 0x18210000 0x0 0x10000>,
+ <0x0 0x18220000 0x0 0x10000>;
+ reg-names = "drv-0",
+ "drv-1",
+ "drv-2";
+
+ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+
+ qcom,drv-id = <2>;
+ qcom,tcs-offset = <0xd00>;
+ qcom,tcs-config = <ACTIVE_TCS 2>,
+ <SLEEP_TCS 3>,
+ <WAKE_TCS 3>,
+ <CONTROL_TCS 1>;
+
+ label = "apps_rsc";
+ power-domains = <&cluster_pd>;
+
+ apps_bcm_voter: bcm-voter {
+ compatible = "qcom,bcm-voter";
+ };
+
+ rpmhcc: clock-controller {
+ compatible = "qcom,qcs615-rpmh-clk";
+ clock-names = "xo";
+
+ #clock-cells = <1>;
+ };
+
+ rpmhpd: power-controller {
+ compatible = "qcom,qcs615-rpmhpd";
+ #power-domain-cells = <1>;
+ operating-points-v2 = <&rpmhpd_opp_table>;
+
+ rpmhpd_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ rpmhpd_opp_ret: opp-0 {
+ opp-level = <RPMH_REGULATOR_LEVEL_RETENTION>;
+ };
+
+ rpmhpd_opp_min_svs: opp-1 {
+ opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+ };
+
+ rpmhpd_opp_low_svs: opp-2 {
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+ };
+
+ rpmhpd_opp_svs: opp-3 {
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+ };
+
+ rpmhpd_opp_svs_l1: opp-4 {
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
+ };
+
+ rpmhpd_opp_nom: opp-5 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
+ };
+
+ rpmhpd_opp_nom_l1: opp-6 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
+ };
+
+ rpmhpd_opp_nom_l2: opp-7 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM_L2>;
+ };
+
+ rpmhpd_opp_turbo: opp-8 {
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
+ };
+
+ rpmhpd_opp_turbo_l1: opp-9 {
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
+ };
+ };
+ };
+ };
+
+ usb_1_hsphy: phy@88e2000 {
+ compatible = "qcom,qcs615-qusb2-phy";
+ reg = <0x0 0x88e2000 0x0 0x180>;
+
+ clocks = <&gcc GCC_AHB2PHY_WEST_CLK>, <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "cfg_ahb", "ref";
+
+ resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>;
+ nvmem-cells = <&qusb2_hstx_trim>;
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
+ usb_hsphy_2: phy@88e3000 {
+ compatible = "qcom,qcs615-qusb2-phy";
+ reg = <0x0 0x088e3000 0x0 0x180>;
+
+ clocks = <&gcc GCC_AHB2PHY_WEST_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "cfg_ahb",
+ "ref";
+
+ resets = <&gcc GCC_QUSB2PHY_SEC_BCR>;
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
+ usb_qmpphy: phy@88e6000 {
+ compatible = "qcom,qcs615-qmp-usb3-phy";
+ reg = <0x0 0x88e6000 0x0 0x1000>;
+
+ clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
+ <&gcc GCC_USB3_PRIM_CLKREF_CLK>,
+ <&gcc GCC_AHB2PHY_WEST_CLK>,
+ <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
+ clock-names = "aux",
+ "ref",
+ "cfg_ahb",
+ "pipe";
+
+ resets = <&gcc GCC_USB3_PHY_PRIM_SP0_BCR>,
+ <&gcc GCC_USB3PHY_PHY_PRIM_SP0_BCR>;
+ reset-names = "phy", "phy_phy";
+
+ qcom,tcsr-reg = <&tcsr 0xb244>;
+
+ clock-output-names = "usb3_phy_pipe_clk_src";
+ #clock-cells = <0>;
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
+ usb_1: usb@a6f8800 {
+ compatible = "qcom,qcs615-dwc3", "qcom,dwc3";
+ reg = <0x0 0x0a6f8800 0x0 0x400>;
+
+ clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
+ <&gcc GCC_USB30_PRIM_MASTER_CLK>,
+ <&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
+ <&gcc GCC_USB30_PRIM_SLEEP_CLK>,
+ <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB3_PRIM_CLKREF_CLK>;
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi",
+ "xo";
+
+ assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_PRIM_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <200000000>;
+
+ interrupts-extended = <&intc GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 9 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 8 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 6 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pwr_event",
+ "hs_phy_irq",
+ "dp_hs_phy_irq",
+ "dm_hs_phy_irq",
+ "ss_phy_irq";
+
+ power-domains = <&gcc USB30_PRIM_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
+
+ resets = <&gcc GCC_USB30_PRIM_BCR>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ status = "disabled";
+
+ usb_1_dwc3: usb@a600000 {
+ compatible = "snps,dwc3";
+ reg = <0x0 0x0a600000 0x0 0xcd00>;
+
+ iommus = <&apps_smmu 0x140 0x0>;
+ interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+
+ phys = <&usb_1_hsphy>, <&usb_qmpphy>;
+ phy-names = "usb2-phy", "usb3-phy";
+
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_enblslpm_quirk;
+ snps,has-lpm-erratum;
+ snps,hird-threshold = /bits/ 8 <0x10>;
+ snps,usb3_lpm_capable;
+ };
+ };
+
+ usb_2: usb@a8f8800 {
+ compatible = "qcom,qcs615-dwc3", "qcom,dwc3";
+ reg = <0x0 0x0a8f8800 0x0 0x400>;
+
+ clocks = <&gcc GCC_CFG_NOC_USB2_SEC_AXI_CLK>,
+ <&gcc GCC_USB20_SEC_MASTER_CLK>,
+ <&gcc GCC_AGGRE_USB2_SEC_AXI_CLK>,
+ <&gcc GCC_USB20_SEC_SLEEP_CLK>,
+ <&gcc GCC_USB20_SEC_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB2_PRIM_CLKREF_CLK>;
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi",
+ "xo";
+
+ assigned-clocks = <&gcc GCC_USB20_SEC_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB20_SEC_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <200000000>;
+
+ interrupts-extended = <&intc GIC_SPI 663 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 662 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 11 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 10 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "pwr_event",
+ "hs_phy_irq",
+ "dp_hs_phy_irq",
+ "dm_hs_phy_irq";
+
+ power-domains = <&gcc USB20_SEC_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
+
+ resets = <&gcc GCC_USB20_SEC_BCR>;
+
+ qcom,select-utmi-as-pipe-clk;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ status = "disabled";
+
+ usb_2_dwc3: usb@a800000 {
+ compatible = "snps,dwc3";
+ reg = <0x0 0x0a800000 0x0 0xcd00>;
+
+ iommus = <&apps_smmu 0xe0 0x0>;
+ interrupts = <GIC_SPI 664 IRQ_TYPE_LEVEL_HIGH>;
+
+ phys = <&usb_hsphy_2>;
+ phy-names = "usb2-phy";
+
+ snps,dis_u2_susphy_quirk;
+ snps,dis_enblslpm_quirk;
+ snps,has-lpm-erratum;
+ snps,hird-threshold = /bits/ 8 <0x10>;
+
+ maximum-speed = "high-speed";
+ };
+ };
+ };
+
+ arch_timer: timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 1 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 0 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts b/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts
index 27695bd54220..7a36c90ad4ec 100644
--- a/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts
+++ b/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts
@@ -9,6 +9,7 @@
#define PM7250B_SID 8
#define PM7250B_SID1 9
+#include <dt-bindings/leds/common.h>
#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
#include "sc7280.dtsi"
@@ -744,6 +745,46 @@
};
};
+&pm8350c_pwm {
+ nvmem = <&pmk8350_sdam_21>,
+ <&pmk8350_sdam_22>;
+ nvmem-names = "lpg_chan_sdam",
+ "lut_sdam";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "okay";
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_INDICATOR;
+ function-enumerator = <3>;
+ linux,default-trigger = "none";
+ default-state = "off";
+ panic-indicator;
+ };
+
+ led@2 {
+ reg = <2>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_INDICATOR;
+ function-enumerator = <2>;
+ linux,default-trigger = "none";
+ default-state = "off";
+ };
+
+ led@3 {
+ reg = <3>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_INDICATOR;
+ function-enumerator = <1>;
+ linux,default-trigger = "none";
+ default-state = "off";
+ };
+};
+
&pmk8350_rtc {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/qcs8300-ride.dts b/arch/arm64/boot/dts/qcom/qcs8300-ride.dts
new file mode 100644
index 000000000000..b5c9f89b3435
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs8300-ride.dts
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+
+#include "qcs8300.dtsi"
+/ {
+ model = "Qualcomm Technologies, Inc. QCS8300 Ride";
+ compatible = "qcom,qcs8300-ride", "qcom,qcs8300";
+ chassis-type = "embedded";
+
+ aliases {
+ serial0 = &uart7;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&apps_rsc {
+ regulators-0 {
+ compatible = "qcom,pmm8654au-rpmh-regulators";
+ qcom,pmic-id = "a";
+
+ vreg_s4a: smps4 {
+ regulator-name = "vreg_s4a";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s9a: smps9 {
+ regulator-name = "vreg_s9a";
+ regulator-min-microvolt = <1352000>;
+ regulator-max-microvolt = <1352000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3a: ldo3 {
+ regulator-name = "vreg_l3a";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4a: ldo4 {
+ regulator-name = "vreg_l4a";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5a: ldo5 {
+ regulator-name = "vreg_l5a";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6a: ldo6 {
+ regulator-name = "vreg_l6a";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7a: ldo7 {
+ regulator-name = "vreg_l7a";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8a: ldo8 {
+ regulator-name = "vreg_l8a";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9a: ldo9 {
+ regulator-name = "vreg_l9a";
+ regulator-min-microvolt = <2970000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-1 {
+ compatible = "qcom,pmm8654au-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vreg_s5c: smps5 {
+ regulator-name = "vreg_s5c";
+ regulator-min-microvolt = <1104000>;
+ regulator-max-microvolt = <1104000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1c: ldo1 {
+ regulator-name = "vreg_l1c";
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <500000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2c: ldo2 {
+ regulator-name = "vreg_l2c";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <904000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4c: ldo4 {
+ regulator-name = "vreg_l4c";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6c: ldo6 {
+ regulator-name = "vreg_l6c";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7c: ldo7 {
+ regulator-name = "vreg_l7c";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8c: ldo8 {
+ regulator-name = "vreg_l8c";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9c: ldo9 {
+ regulator-name = "vreg_l9c";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+};
+
+&ethernet0 {
+ phy-mode = "2500base-x";
+ phy-handle = <&phy0>;
+
+ pinctrl-0 = <&ethernet0_default>;
+ pinctrl-names = "default";
+
+ snps,mtl-rx-config = <&mtl_rx_setup>;
+ snps,mtl-tx-config = <&mtl_tx_setup>;
+ snps,ps-speed = <1000>;
+
+ status = "okay";
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy0: phy@8 {
+ compatible = "ethernet-phy-id31c3.1c33";
+ reg = <0x8>;
+ device_type = "ethernet-phy";
+ interrupts-extended = <&tlmm 4 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&tlmm 31 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <11000>;
+ reset-deassert-us = <70000>;
+ };
+ };
+
+ mtl_rx_setup: rx-queues-config {
+ snps,rx-queues-to-use = <4>;
+ snps,rx-sched-sp;
+
+ queue0 {
+ snps,dcb-algorithm;
+ snps,map-to-dma-channel = <0x0>;
+ snps,route-up;
+ snps,priority = <0x1>;
+ };
+
+ queue1 {
+ snps,dcb-algorithm;
+ snps,map-to-dma-channel = <0x1>;
+ snps,route-ptp;
+ };
+
+ queue2 {
+ snps,avb-algorithm;
+ snps,map-to-dma-channel = <0x2>;
+ snps,route-avcp;
+ };
+
+ queue3 {
+ snps,avb-algorithm;
+ snps,map-to-dma-channel = <0x3>;
+ snps,priority = <0xc>;
+ };
+ };
+
+ mtl_tx_setup: tx-queues-config {
+ snps,tx-queues-to-use = <4>;
+ snps,tx-sched-sp;
+
+ queue0 {
+ snps,dcb-algorithm;
+ };
+
+ queue1 {
+ snps,dcb-algorithm;
+ };
+
+ queue2 {
+ snps,avb-algorithm;
+ snps,send_slope = <0x1000>;
+ snps,idle_slope = <0x1000>;
+ snps,high_credit = <0x3e800>;
+ snps,low_credit = <0xffc18000>;
+ };
+
+ queue3 {
+ snps,avb-algorithm;
+ snps,send_slope = <0x1000>;
+ snps,idle_slope = <0x1000>;
+ snps,high_credit = <0x3e800>;
+ snps,low_credit = <0xffc18000>;
+ };
+ };
+};
+
+&qupv3_id_0 {
+ status = "okay";
+};
+
+&remoteproc_adsp {
+ firmware-name = "qcom/qcs8300/adsp.mbn";
+ status = "okay";
+};
+
+&remoteproc_cdsp {
+ firmware-name = "qcom/qcs8300/cdsp0.mbn";
+ status = "okay";
+};
+
+&remoteproc_gpdsp {
+ firmware-name = "qcom/qcs8300/gpdsp0.mbn";
+ status = "okay";
+};
+
+&serdes0 {
+ phy-supply = <&vreg_l5a>;
+ status = "okay";
+};
+
+&tlmm {
+ ethernet0_default: ethernet0-default-state {
+ ethernet0_mdc: ethernet0-mdc-pins {
+ pins = "gpio5";
+ function = "emac0_mdc";
+ drive-strength = <16>;
+ bias-pull-up;
+ };
+
+ ethernet0_mdio: ethernet0-mdio-pins {
+ pins = "gpio6";
+ function = "emac0_mdio";
+ drive-strength = <16>;
+ bias-pull-up;
+ };
+ };
+};
+
+&uart7 {
+ status = "okay";
+};
+
+&ufs_mem_hc {
+ reset-gpios = <&tlmm 133 GPIO_ACTIVE_LOW>;
+ vcc-supply = <&vreg_l8a>;
+ vcc-max-microamp = <1100000>;
+ vccq-supply = <&vreg_l4c>;
+ vccq-max-microamp = <1200000>;
+ status = "okay";
+};
+
+&ufs_mem_phy {
+ vdda-phy-supply = <&vreg_l4a>;
+ vdda-pll-supply = <&vreg_l5a>;
+ status = "okay";
+};
+
+&usb_1_hsphy {
+ vdda-pll-supply = <&vreg_l7a>;
+ vdda18-supply = <&vreg_l7c>;
+ vdda33-supply = <&vreg_l9a>;
+
+ status = "okay";
+};
+
+&usb_qmpphy {
+ vdda-phy-supply = <&vreg_l7a>;
+ vdda-pll-supply = <&vreg_l5a>;
+
+ status = "okay";
+};
+
+&usb_1 {
+ status = "okay";
+};
+
+&usb_1_dwc3 {
+ dr_mode = "peripheral";
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs8300.dtsi b/arch/arm64/boot/dts/qcom/qcs8300.dtsi
new file mode 100644
index 000000000000..4a057f7c0d9f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs8300.dtsi
@@ -0,0 +1,3548 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <dt-bindings/clock/qcom,qcs8300-gcc.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/clock/qcom,sa8775p-camcc.h>
+#include <dt-bindings/clock/qcom,sa8775p-dispcc.h>
+#include <dt-bindings/clock/qcom,sa8775p-gpucc.h>
+#include <dt-bindings/clock/qcom,sa8775p-videocc.h>
+#include <dt-bindings/firmware/qcom,scm.h>
+#include <dt-bindings/interconnect/qcom,icc.h>
+#include <dt-bindings/interconnect/qcom,qcs8300-rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/mailbox/qcom-ipcc.h>
+#include <dt-bindings/power/qcom,rpmhpd.h>
+#include <dt-bindings/power/qcom-rpmpd.h>
+#include <dt-bindings/soc/qcom,rpmh-rsc.h>
+
+/ {
+ interrupt-parent = <&intc>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ clocks {
+ xo_board_clk: xo-board-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <38400000>;
+ };
+
+ sleep_clk: sleep-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32000>;
+ };
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a78c";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ next-level-cache = <&l2_0>;
+ power-domains = <&cpu_pd0>;
+ power-domain-names = "psci";
+ capacity-dmips-mhz = <1946>;
+ dynamic-power-coefficient = <472>;
+
+ l2_0: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_0>;
+ };
+ };
+
+ cpu1: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a78c";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ next-level-cache = <&l2_1>;
+ power-domains = <&cpu_pd1>;
+ power-domain-names = "psci";
+ capacity-dmips-mhz = <1946>;
+ dynamic-power-coefficient = <472>;
+
+ l2_1: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_0>;
+ };
+ };
+
+ cpu2: cpu@200 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a78c";
+ reg = <0x0 0x200>;
+ enable-method = "psci";
+ next-level-cache = <&l2_2>;
+ power-domains = <&cpu_pd2>;
+ power-domain-names = "psci";
+ capacity-dmips-mhz = <1946>;
+ dynamic-power-coefficient = <507>;
+
+ l2_2: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_0>;
+ };
+ };
+
+ cpu3: cpu@300 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a78c";
+ reg = <0x0 0x300>;
+ enable-method = "psci";
+ next-level-cache = <&l2_3>;
+ power-domains = <&cpu_pd3>;
+ power-domain-names = "psci";
+ capacity-dmips-mhz = <1946>;
+ dynamic-power-coefficient = <507>;
+
+ l2_3: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_0>;
+ };
+ };
+
+ cpu4: cpu@10000 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x10000>;
+ enable-method = "psci";
+ next-level-cache = <&l2_4>;
+ power-domains = <&cpu_pd4>;
+ power-domain-names = "psci";
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <100>;
+
+ l2_4: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_1>;
+ };
+ };
+
+ cpu5: cpu@10100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x10100>;
+ enable-method = "psci";
+ next-level-cache = <&l2_5>;
+ power-domains = <&cpu_pd5>;
+ power-domain-names = "psci";
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <100>;
+
+ l2_5: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_1>;
+ };
+ };
+
+ cpu6: cpu@10200 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x10200>;
+ enable-method = "psci";
+ next-level-cache = <&l2_6>;
+ power-domains = <&cpu_pd6>;
+ power-domain-names = "psci";
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <100>;
+
+ l2_6: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_1>;
+ };
+ };
+
+ cpu7: cpu@10300 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x10300>;
+ enable-method = "psci";
+ next-level-cache = <&l2_7>;
+ power-domains = <&cpu_pd7>;
+ power-domain-names = "psci";
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <100>;
+
+ l2_7: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_1>;
+ };
+ };
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+
+ core1 {
+ cpu = <&cpu1>;
+ };
+
+ core2 {
+ cpu = <&cpu2>;
+ };
+
+ core3 {
+ cpu = <&cpu3>;
+ };
+ };
+
+ cluster1 {
+ core0 {
+ cpu = <&cpu4>;
+ };
+
+ core1 {
+ cpu = <&cpu5>;
+ };
+
+ core2 {
+ cpu = <&cpu6>;
+ };
+
+ core3 {
+ cpu = <&cpu7>;
+ };
+ };
+ };
+
+ l3_0: l3-cache-0 {
+ compatible = "cache";
+ cache-level = <3>;
+ cache-unified;
+ };
+
+ l3_1: l3-cache-1 {
+ compatible = "cache";
+ cache-level = <3>;
+ cache-unified;
+ };
+
+ idle-states {
+ entry-method = "psci";
+
+ little_cpu_sleep_0: cpu-sleep-0-0 {
+ compatible = "arm,idle-state";
+ idle-state-name = "silver-power-collapse";
+ arm,psci-suspend-param = <0x40000003>;
+ entry-latency-us = <449>;
+ exit-latency-us = <801>;
+ min-residency-us = <1574>;
+ local-timer-stop;
+ };
+
+ little_cpu_sleep_1: cpu-sleep-0-1 {
+ compatible = "arm,idle-state";
+ idle-state-name = "silver-rail-power-collapse";
+ arm,psci-suspend-param = <0x40000004>;
+ entry-latency-us = <602>;
+ exit-latency-us = <961>;
+ min-residency-us = <4288>;
+ local-timer-stop;
+ };
+
+ big_cpu_sleep_0: cpu-sleep-1-0 {
+ compatible = "arm,idle-state";
+ idle-state-name = "gold-power-collapse";
+ arm,psci-suspend-param = <0x40000003>;
+ entry-latency-us = <549>;
+ exit-latency-us = <901>;
+ min-residency-us = <1774>;
+ local-timer-stop;
+ };
+
+ big_cpu_sleep_1: cpu-sleep-1-1 {
+ compatible = "arm,idle-state";
+ idle-state-name = "gold-rail-power-collapse";
+ arm,psci-suspend-param = <0x40000004>;
+ entry-latency-us = <702>;
+ exit-latency-us = <1061>;
+ min-residency-us = <4488>;
+ local-timer-stop;
+ };
+ };
+
+ domain-idle-states {
+ silver_cluster_sleep: cluster-sleep-0 {
+ compatible = "domain-idle-state";
+ arm,psci-suspend-param = <0x41000044>;
+ entry-latency-us = <2552>;
+ exit-latency-us = <2848>;
+ min-residency-us = <5908>;
+ };
+
+ gold_cluster_sleep: cluster-sleep-1 {
+ compatible = "domain-idle-state";
+ arm,psci-suspend-param = <0x41000044>;
+ entry-latency-us = <2752>;
+ exit-latency-us = <3048>;
+ min-residency-us = <6118>;
+ };
+
+ system_sleep: domain-sleep {
+ compatible = "domain-idle-state";
+ arm,psci-suspend-param = <0x42000144>;
+ entry-latency-us = <3263>;
+ exit-latency-us = <6562>;
+ min-residency-us = <9987>;
+ };
+ };
+ };
+
+ dummy_eud: dummy-sink {
+ compatible = "arm,coresight-dummy-sink";
+
+ in-ports {
+ port {
+ eud_in: endpoint {
+ remote-endpoint = <&swao_rep_out1>;
+ };
+ };
+ };
+ };
+
+ firmware {
+ scm: scm {
+ compatible = "qcom,scm-qcs8300", "qcom,scm";
+ qcom,dload-mode = <&tcsr 0x13000>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the size */
+ reg = <0x0 0x80000000 0x0 0x0>;
+ };
+
+ clk_virt: interconnect-0 {
+ compatible = "qcom,qcs8300-clk-virt";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ mc_virt: interconnect-1 {
+ compatible = "qcom,qcs8300-mc-virt";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ pmu-a55 {
+ compatible = "arm,cortex-a55-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ pmu-a78 {
+ compatible = "arm,cortex-a78-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+
+ cpu_pd0: power-domain-cpu0 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd0>;
+ domain-idle-states = <&big_cpu_sleep_0 &big_cpu_sleep_1>;
+ };
+
+ cpu_pd1: power-domain-cpu1 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd0>;
+ domain-idle-states = <&big_cpu_sleep_0 &big_cpu_sleep_1>;
+ };
+
+ cpu_pd2: power-domain-cpu2 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd0>;
+ domain-idle-states = <&big_cpu_sleep_0 &big_cpu_sleep_1>;
+ };
+
+ cpu_pd3: power-domain-cpu3 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd0>;
+ domain-idle-states = <&big_cpu_sleep_0 &big_cpu_sleep_1>;
+ };
+
+ cpu_pd4: power-domain-cpu4 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd1>;
+ domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
+ };
+
+ cpu_pd5: power-domain-cpu5 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd1>;
+ domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
+ };
+
+ cpu_pd6: power-domain-cpu6 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd1>;
+ domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
+ };
+
+ cpu_pd7: power-domain-cpu7 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd1>;
+ domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
+ };
+
+ cluster_pd0: power-domain-cluster0 {
+ #power-domain-cells = <0>;
+ power-domains = <&system_pd>;
+ domain-idle-states = <&gold_cluster_sleep>;
+ };
+
+ cluster_pd1: power-domain-cluster1 {
+ #power-domain-cells = <0>;
+ power-domains = <&system_pd>;
+ domain-idle-states = <&silver_cluster_sleep>;
+ };
+
+ system_pd: power-domain-system {
+ #power-domain-cells = <0>;
+ domain-idle-states = <&system_sleep>;
+ };
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ aop_image_mem: aop-image-region@90800000 {
+ reg = <0x0 0x90800000 0x0 0x60000>;
+ no-map;
+ };
+
+ aop_cmd_db_mem: aop-cmd-db-region@90860000 {
+ compatible = "qcom,cmd-db";
+ reg = <0x0 0x90860000 0x0 0x20000>;
+ no-map;
+ };
+
+ smem_mem: smem@90900000 {
+ compatible = "qcom,smem";
+ reg = <0x0 0x90900000 0x0 0x200000>;
+ no-map;
+ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ lpass_machine_learning_mem: lpass-machine-learning-region@93b00000 {
+ reg = <0x0 0x93b00000 0x0 0xf00000>;
+ no-map;
+ };
+
+ adsp_rpc_remote_heap_mem: adsp-rpc-remote-heap-region@94a00000 {
+ reg = <0x0 0x94a00000 0x0 0x800000>;
+ no-map;
+ };
+
+ camera_mem: camera-region@95200000 {
+ reg = <0x0 0x95200000 0x0 0x500000>;
+ no-map;
+ };
+
+ adsp_mem: adsp-region@95c00000 {
+ no-map;
+ reg = <0x0 0x95c00000 0x0 0x1e00000>;
+ };
+
+ q6_adsp_dtb_mem: q6-adsp-dtb-region@97a00000 {
+ reg = <0x0 0x97a00000 0x0 0x80000>;
+ no-map;
+ };
+
+ q6_gpdsp_dtb_mem: q6-gpdsp-dtb-region@97a80000 {
+ reg = <0x0 0x97a80000 0x0 0x80000>;
+ no-map;
+ };
+
+ gpdsp_mem: gpdsp-region@97b00000 {
+ reg = <0x0 0x97b00000 0x0 0x1e00000>;
+ no-map;
+ };
+
+ q6_cdsp_dtb_mem: q6-cdsp-dtb-region@99900000 {
+ reg = <0x0 0x99900000 0x0 0x80000>;
+ no-map;
+ };
+
+ cdsp_mem: cdsp-region@99980000 {
+ reg = <0x0 0x99980000 0x0 0x1e00000>;
+ no-map;
+ };
+
+ gpu_microcode_mem: gpu-microcode-region@9b780000 {
+ reg = <0x0 0x9b780000 0x0 0x2000>;
+ no-map;
+ };
+
+ cvp_mem: cvp-region@9b782000 {
+ reg = <0x0 0x9b782000 0x0 0x700000>;
+ no-map;
+ };
+
+ video_mem: video-region@9be82000 {
+ reg = <0x0 0x9be82000 0x0 0x700000>;
+ no-map;
+ };
+ };
+
+ smp2p-adsp {
+ compatible = "qcom,smp2p";
+ interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_SMP2P
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_SMP2P>;
+
+ qcom,smem = <443>, <429>;
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <2>;
+
+ smp2p_adsp_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ smp2p_adsp_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+ };
+
+ smp2p-cdsp {
+ compatible = "qcom,smp2p";
+ interrupts-extended = <&ipcc IPCC_CLIENT_CDSP
+ IPCC_MPROC_SIGNAL_SMP2P
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_CDSP
+ IPCC_MPROC_SIGNAL_SMP2P>;
+
+ qcom,smem = <94>, <432>;
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <5>;
+
+ smp2p_cdsp_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ smp2p_cdsp_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+ };
+
+ smp2p-gpdsp {
+ compatible = "qcom,smp2p";
+ interrupts-extended = <&ipcc IPCC_CLIENT_GPDSP0
+ IPCC_MPROC_SIGNAL_SMP2P
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_GPDSP0
+ IPCC_MPROC_SIGNAL_SMP2P>;
+
+ qcom,smem = <617>, <616>;
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <17>;
+
+ smp2p_gpdsp_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ smp2p_gpdsp_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+ };
+
+ soc: soc@0 {
+ compatible = "simple-bus";
+ ranges = <0 0 0 0 0x10 0>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ gcc: clock-controller@100000 {
+ compatible = "qcom,qcs8300-gcc";
+ reg = <0x0 0x00100000 0x0 0xc7018>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&sleep_clk>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>;
+ };
+
+ ipcc: mailbox@408000 {
+ compatible = "qcom,qcs8300-ipcc", "qcom,ipcc";
+ reg = <0x0 0x408000 0x0 0x1000>;
+ interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ #mbox-cells = <2>;
+ };
+
+ qfprom: efuse@784000 {
+ compatible = "qcom,qcs8300-qfprom", "qcom,qfprom";
+ reg = <0x0 0x00784000 0x0 0x1200>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+
+ qupv3_id_0: geniqup@9c0000 {
+ compatible = "qcom,geni-se-qup";
+ reg = <0x0 0x9c0000 0x0 0x2000>;
+ ranges;
+ clocks = <&gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ clock-names = "m-ahb",
+ "s-ahb";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ status = "disabled";
+
+ uart7: serial@99c000 {
+ compatible = "qcom,geni-debug-uart";
+ reg = <0x0 0x0099c000 0x0 0x4000>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S7_CLK>;
+ clock-names = "se";
+ pinctrl-0 = <&qup_uart7_default>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+ status = "disabled";
+ };
+ };
+
+ rng: rng@10d2000 {
+ compatible = "qcom,qcs8300-trng", "qcom,trng";
+ reg = <0x0 0x010d2000 0x0 0x1000>;
+ };
+
+ config_noc: interconnect@14c0000 {
+ compatible = "qcom,qcs8300-config-noc";
+ reg = <0x0 0x014c0000 0x0 0x13080>;
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ system_noc: interconnect@1680000 {
+ compatible = "qcom,qcs8300-system-noc";
+ reg = <0x0 0x01680000 0x0 0x15080>;
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ aggre1_noc: interconnect@16c0000 {
+ compatible = "qcom,qcs8300-aggre1-noc";
+ reg = <0x0 0x016c0000 0x0 0x17080>;
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ aggre2_noc: interconnect@1700000 {
+ compatible = "qcom,qcs8300-aggre2-noc";
+ reg = <0x0 0x01700000 0x0 0x1a080>;
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ pcie_anoc: interconnect@1760000 {
+ compatible = "qcom,qcs8300-pcie-anoc";
+ reg = <0x0 0x01760000 0x0 0xc080>;
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ gpdsp_anoc: interconnect@1780000 {
+ compatible = "qcom,qcs8300-gpdsp-anoc";
+ reg = <0x0 0x01780000 0x0 0xd080>;
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ mmss_noc: interconnect@17a0000 {
+ compatible = "qcom,qcs8300-mmss-noc";
+ reg = <0x0 0x017a0000 0x0 0x40000>;
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ ufs_mem_hc: ufs@1d84000 {
+ compatible = "qcom,qcs8300-ufshc", "qcom,ufshc", "jedec,ufs-2.0";
+ reg = <0x0 0x01d84000 0x0 0x3000>;
+ interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&ufs_mem_phy>;
+ phy-names = "ufsphy";
+ lanes-per-direction = <2>;
+ #reset-cells = <1>;
+ resets = <&gcc GCC_UFS_PHY_BCR>;
+ reset-names = "rst";
+
+ power-domains = <&gcc GCC_UFS_PHY_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
+
+ iommus = <&apps_smmu 0x100 0x0>;
+ dma-coherent;
+
+ interconnects = <&aggre1_noc MASTER_UFS_MEM QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_UFS_MEM_CFG QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "ufs-ddr",
+ "cpu-ufs";
+
+ clocks = <&gcc GCC_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_UFS_PHY_AHB_CLK>,
+ <&gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>;
+ clock-names = "core_clk",
+ "bus_aggr_clk",
+ "iface_clk",
+ "core_clk_unipro",
+ "ref_clk",
+ "tx_lane0_sync_clk",
+ "rx_lane0_sync_clk",
+ "rx_lane1_sync_clk";
+ freq-table-hz = <75000000 300000000>,
+ <0 0>,
+ <0 0>,
+ <75000000 300000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>;
+ qcom,ice = <&ice>;
+ status = "disabled";
+ };
+
+ ufs_mem_phy: phy@1d87000 {
+ compatible = "qcom,qcs8300-qmp-ufs-phy", "qcom,sa8775p-qmp-ufs-phy";
+ reg = <0x0 0x01d87000 0x0 0xe10>;
+ /*
+ * Yes, GCC_EDP_REF_CLKREF_EN is correct in qref. It
+ * enables the CXO clock to eDP *and* UFS PHY.
+ */
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_PHY_PHY_AUX_CLK>,
+ <&gcc GCC_EDP_REF_CLKREF_EN>;
+ clock-names = "ref",
+ "ref_aux",
+ "qref";
+ power-domains = <&gcc GCC_UFS_PHY_GDSC>;
+
+ resets = <&ufs_mem_hc 0>;
+ reset-names = "ufsphy";
+
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
+ cryptobam: dma-controller@1dc4000 {
+ compatible = "qcom,bam-v1.7.4", "qcom,bam-v1.7.0";
+ reg = <0x0 0x01dc4000 0x0 0x28000>;
+ interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ qcom,ee = <0>;
+ qcom,controlled-remotely;
+ num-channels = <20>;
+ qcom,num-ees = <4>;
+ iommus = <&apps_smmu 0x480 0x00>,
+ <&apps_smmu 0x481 0x00>;
+ };
+
+ crypto: crypto@1dfa000 {
+ compatible = "qcom,qcs8300-qce", "qcom,qce";
+ reg = <0x0 0x01dfa000 0x0 0x6000>;
+ dmas = <&cryptobam 4>, <&cryptobam 5>;
+ dma-names = "rx", "tx";
+ iommus = <&apps_smmu 0x480 0x00>,
+ <&apps_smmu 0x481 0x00>;
+ interconnects = <&aggre2_noc MASTER_CRYPTO_CORE0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "memory";
+ };
+
+ ice: crypto@1d88000 {
+ compatible = "qcom,qcs8300-inline-crypto-engine",
+ "qcom,inline-crypto-engine";
+ reg = <0x0 0x01d88000 0x0 0x18000>;
+ clocks = <&gcc GCC_UFS_PHY_ICE_CORE_CLK>;
+ };
+
+ tcsr_mutex: hwlock@1f40000 {
+ compatible = "qcom,tcsr-mutex";
+ reg = <0x0 0x01f40000 0x0 0x20000>;
+ #hwlock-cells = <1>;
+ };
+
+ tcsr: syscon@1fc0000 {
+ compatible = "qcom,qcs8300-tcsr", "syscon";
+ reg = <0x0 0x1fc0000 0x0 0x30000>;
+ };
+
+ remoteproc_adsp: remoteproc@3000000 {
+ compatible = "qcom,qcs8300-adsp-pas", "qcom,sa8775p-adsp-pas";
+ reg = <0x0 0x3000000 0x0 0x00100>;
+
+ interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog",
+ "fatal",
+ "ready",
+ "handover",
+ "stop-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ power-domains = <&rpmhpd RPMHPD_LCX>,
+ <&rpmhpd RPMHPD_LMX>;
+ power-domain-names = "lcx",
+ "lmx";
+
+ memory-region = <&adsp_mem>;
+
+ qcom,qmp = <&aoss_qmp>;
+
+ qcom,smem-states = <&smp2p_adsp_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ remoteproc_adsp_glink: glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ label = "lpass";
+ qcom,remote-pid = <2>;
+
+ fastrpc {
+ compatible = "qcom,fastrpc";
+ qcom,glink-channels = "fastrpcglink-apps-dsp";
+ label = "adsp";
+ memory-region = <&adsp_rpc_remote_heap_mem>;
+ qcom,vmids = <QCOM_SCM_VMID_LPASS
+ QCOM_SCM_VMID_ADSP_HEAP>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compute-cb@3 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <3>;
+ iommus = <&apps_smmu 0x2003 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@4 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <4>;
+ iommus = <&apps_smmu 0x2004 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@5 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <5>;
+ iommus = <&apps_smmu 0x2005 0x0>;
+ dma-coherent;
+ };
+ };
+ };
+ };
+
+ lpass_ag_noc: interconnect@3c40000 {
+ compatible = "qcom,qcs8300-lpass-ag-noc";
+ reg = <0x0 0x03c40000 0x0 0x17200>;
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ stm@4002000 {
+ compatible = "arm,coresight-stm", "arm,primecell";
+ reg = <0x0 0x04002000 0x0 0x1000>,
+ <0x0 0x16280000 0x0 0x180000>;
+ reg-names = "stm-base",
+ "stm-stimulus-base";
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ stm_out: endpoint {
+ remote-endpoint = <&funnel0_in7>;
+ };
+ };
+ };
+ };
+
+ tpda@4004000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x04004000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ reg = <1>;
+
+ qdss_tpda_in1: endpoint {
+ remote-endpoint = <&qdss_tpdm1_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ qdss_tpda_out: endpoint {
+ remote-endpoint = <&funnel0_in6>;
+ };
+ };
+ };
+ };
+
+ tpdm@400f000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x0400f000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <32>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ qdss_tpdm1_out: endpoint {
+ remote-endpoint = <&qdss_tpda_in1>;
+ };
+ };
+ };
+ };
+
+ funnel@4041000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x04041000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@6 {
+ reg = <6>;
+
+ funnel0_in6: endpoint {
+ remote-endpoint = <&qdss_tpda_out>;
+ };
+ };
+
+ port@7 {
+ reg = <7>;
+
+ funnel0_in7: endpoint {
+ remote-endpoint = <&stm_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ funnel0_out: endpoint {
+ remote-endpoint = <&qdss_funnel_in0>;
+ };
+ };
+ };
+ };
+
+ funnel@4042000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x04042000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@4 {
+ reg = <4>;
+
+ funnel1_in4: endpoint {
+ remote-endpoint = <&apss_funnel1_out>;
+ };
+ };
+
+ port@5 {
+ reg = <5>;
+
+ funnel1_in5: endpoint {
+ remote-endpoint = <&dlct0_funnel_out>;
+ };
+ };
+
+ port@6 {
+ reg = <6>;
+
+ funnel1_in6: endpoint {
+ remote-endpoint = <&dlmm_funnel_out>;
+ };
+ };
+
+ port@7 {
+ reg = <7>;
+
+ funnel1_in7: endpoint {
+ remote-endpoint = <&dlst_ch_funnel_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ funnel1_out: endpoint {
+ remote-endpoint = <&qdss_funnel_in1>;
+ };
+ };
+ };
+ };
+
+ funnel@4045000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x04045000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ qdss_funnel_in0: endpoint {
+ remote-endpoint = <&funnel0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ qdss_funnel_in1: endpoint {
+ remote-endpoint = <&funnel1_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ qdss_funnel_out: endpoint {
+ remote-endpoint = <&aoss_funnel_in7>;
+ };
+ };
+ };
+ };
+
+ tpdm@4841000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x04841000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <32>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ prng_tpdm_out: endpoint {
+ remote-endpoint = <&dlct0_tpda_in19>;
+ };
+ };
+ };
+ };
+
+ tpdm@4850000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x04850000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <64>;
+ qcom,cmb-msrs-num = <32>;
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ pimem_tpdm_out: endpoint {
+ remote-endpoint = <&dlct0_tpda_in25>;
+ };
+ };
+ };
+ };
+
+ tpdm@4860000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x04860000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ dlst_ch_tpdm0_out: endpoint {
+ remote-endpoint = <&dlst_ch_tpda_in8>;
+ };
+ };
+ };
+ };
+
+ tpda@4864000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x04864000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@8 {
+ reg = <8>;
+
+ dlst_ch_tpda_in8: endpoint {
+ remote-endpoint = <&dlst_ch_tpdm0_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ dlst_ch_tpda_out: endpoint {
+ remote-endpoint = <&dlst_ch_funnel_in0>;
+ };
+ };
+ };
+ };
+
+ funnel@4865000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x04865000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dlst_ch_funnel_in0: endpoint {
+ remote-endpoint = <&dlst_ch_tpda_out>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+
+ dlst_ch_funnel_in4: endpoint {
+ remote-endpoint = <&dlst_funnel_out>;
+ };
+ };
+
+ port@6 {
+ reg = <6>;
+
+ dlst_ch_funnel_in6: endpoint {
+ remote-endpoint = <&gdsp_funnel_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ dlst_ch_funnel_out: endpoint {
+ remote-endpoint = <&funnel1_in7>;
+ };
+ };
+ };
+ };
+
+ tpdm@4980000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x04980000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ turing2_tpdm_out: endpoint {
+ remote-endpoint = <&turing2_funnel_in0>;
+ };
+ };
+ };
+ };
+
+ funnel@4983000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x04983000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ turing2_funnel_in0: endpoint {
+ remote-endpoint = <&turing2_tpdm_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ turing2_funnel_out0: endpoint {
+ remote-endpoint = <&gdsp_tpda_in5>;
+ };
+ };
+ };
+ };
+
+ tpdm@4ac0000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x04ac0000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ dlmm_tpdm0_out: endpoint {
+ remote-endpoint = <&dlmm_tpda_in27>;
+ };
+ };
+ };
+ };
+
+ tpda@4ac4000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x04ac4000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1b {
+ reg = <27>;
+
+ dlmm_tpda_in27: endpoint {
+ remote-endpoint = <&dlmm_tpdm0_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ dlmm_tpda_out: endpoint {
+ remote-endpoint = <&dlmm_funnel_in0>;
+ };
+ };
+ };
+ };
+
+ funnel@4ac5000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x04ac5000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ dlmm_funnel_in0: endpoint {
+ remote-endpoint = <&dlmm_tpda_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ dlmm_funnel_out: endpoint {
+ remote-endpoint = <&funnel1_in6>;
+ };
+ };
+ };
+ };
+
+ tpdm@4ad0000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x04ad0000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ dlct0_tpdm0_out: endpoint {
+ remote-endpoint = <&dlct0_tpda_in26>;
+ };
+ };
+ };
+ };
+
+ tpda@4ad3000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x04ad3000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@13 {
+ reg = <19>;
+
+ dlct0_tpda_in19: endpoint {
+ remote-endpoint = <&prng_tpdm_out>;
+ };
+ };
+
+ port@19 {
+ reg = <25>;
+
+ dlct0_tpda_in25: endpoint {
+ remote-endpoint = <&pimem_tpdm_out>;
+ };
+ };
+
+ port@1a {
+ reg = <26>;
+
+ dlct0_tpda_in26: endpoint {
+ remote-endpoint = <&dlct0_tpdm0_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ dlct0_tpda_out: endpoint {
+ remote-endpoint = <&dlct0_funnel_in0>;
+ };
+ };
+ };
+ };
+
+ funnel@4ad4000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x04ad4000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dlct0_funnel_in0: endpoint {
+ remote-endpoint = <&dlct0_tpda_out>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+
+ dlct0_funnel_in4: endpoint {
+ remote-endpoint = <&ddr_funnel5_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ dlct0_funnel_out: endpoint {
+ remote-endpoint = <&funnel1_in5>;
+ };
+ };
+ };
+ };
+
+ funnel@4b04000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x04b04000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@6 {
+ reg = <6>;
+
+ aoss_funnel_in6: endpoint {
+ remote-endpoint = <&aoss_tpda_out>;
+ };
+ };
+
+ port@7 {
+ reg = <7>;
+
+ aoss_funnel_in7: endpoint {
+ remote-endpoint = <&qdss_funnel_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ aoss_funnel_out: endpoint {
+ remote-endpoint = <&etf0_in>;
+ };
+ };
+ };
+ };
+
+ tmc_etf: tmc@4b05000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0x0 0x04b05000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ etf0_in: endpoint {
+ remote-endpoint = <&aoss_funnel_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ etf0_out: endpoint {
+ remote-endpoint = <&swao_rep_in>;
+ };
+ };
+ };
+ };
+
+ replicator@4b06000 {
+ compatible = "arm,coresight-dynamic-replicator", "arm,primecell";
+ reg = <0x0 0x04b06000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ swao_rep_in: endpoint {
+ remote-endpoint = <&etf0_out>;
+ };
+ };
+ };
+
+ out-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ reg = <1>;
+
+ swao_rep_out1: endpoint {
+ remote-endpoint = <&eud_in>;
+ };
+ };
+ };
+ };
+
+ tpda@4b08000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x04b08000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ aoss_tpda_in0: endpoint {
+ remote-endpoint = <&aoss_tpdm0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ aoss_tpda_in1: endpoint {
+ remote-endpoint = <&aoss_tpdm1_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ aoss_tpda_in2: endpoint {
+ remote-endpoint = <&aoss_tpdm2_out>;
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+
+ aoss_tpda_in3: endpoint {
+ remote-endpoint = <&aoss_tpdm3_out>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+
+ aoss_tpda_in4: endpoint {
+ remote-endpoint = <&aoss_tpdm4_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ aoss_tpda_out: endpoint {
+ remote-endpoint = <&aoss_funnel_in6>;
+ };
+ };
+ };
+ };
+
+ tpdm@4b09000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x04b09000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <64>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ aoss_tpdm0_out: endpoint {
+ remote-endpoint = <&aoss_tpda_in0>;
+ };
+ };
+ };
+ };
+
+ tpdm@4b0a000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x04b0a000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <64>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ aoss_tpdm1_out: endpoint {
+ remote-endpoint = <&aoss_tpda_in1>;
+ };
+ };
+ };
+ };
+
+ tpdm@4b0b000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x04b0b000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <64>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ aoss_tpdm2_out: endpoint {
+ remote-endpoint = <&aoss_tpda_in2>;
+ };
+ };
+ };
+ };
+
+ tpdm@4b0c000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x04b0c000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <64>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ aoss_tpdm3_out: endpoint {
+ remote-endpoint = <&aoss_tpda_in3>;
+ };
+ };
+ };
+ };
+
+ tpdm@4b0d000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x04b0d000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ aoss_tpdm4_out: endpoint {
+ remote-endpoint = <&aoss_tpda_in4>;
+ };
+ };
+ };
+ };
+
+ cti@4b13000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x04b13000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ tpdm@4b80000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x04b80000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ turing0_tpdm0_out: endpoint {
+ remote-endpoint = <&turing0_tpda_in0>;
+ };
+ };
+ };
+ };
+
+ tpda@4b86000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x04b86000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ turing0_tpda_in0: endpoint {
+ remote-endpoint = <&turing0_tpdm0_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ turing0_tpda_out: endpoint {
+ remote-endpoint = <&turing0_funnel_in0>;
+ };
+ };
+ };
+ };
+
+ funnel@4b87000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x04b87000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ turing0_funnel_in0: endpoint {
+ remote-endpoint = <&turing0_tpda_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ turing0_funnel_out: endpoint {
+ remote-endpoint = <&gdsp_funnel_in4>;
+ };
+ };
+ };
+ };
+
+ cti@4b8b000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x04b8b000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ tpdm@4c40000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x04c40000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ gdsp_tpdm0_out: endpoint {
+ remote-endpoint = <&gdsp_tpda_in8>;
+ };
+ };
+ };
+ };
+
+ tpda@4c44000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x04c44000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@5 {
+ reg = <5>;
+
+ gdsp_tpda_in5: endpoint {
+ remote-endpoint = <&turing2_funnel_out0>;
+ };
+ };
+
+ port@8 {
+ reg = <8>;
+
+ gdsp_tpda_in8: endpoint {
+ remote-endpoint = <&gdsp_tpdm0_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ gdsp_tpda_out: endpoint {
+ remote-endpoint = <&gdsp_funnel_in0>;
+ };
+ };
+ };
+ };
+
+ funnel@4c45000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x04c45000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ gdsp_funnel_in0: endpoint {
+ remote-endpoint = <&gdsp_tpda_out>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+
+ gdsp_funnel_in4: endpoint {
+ remote-endpoint = <&turing0_funnel_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ gdsp_funnel_out: endpoint {
+ remote-endpoint = <&dlst_ch_funnel_in6>;
+ };
+ };
+ };
+ };
+
+ tpdm@4c50000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x04c50000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ dlst_tpdm0_out: endpoint {
+ remote-endpoint = <&dlst_tpda_in8>;
+ };
+ };
+ };
+ };
+
+ tpda@4c54000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x04c54000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@8 {
+ reg = <8>;
+
+ dlst_tpda_in8: endpoint {
+ remote-endpoint = <&dlst_tpdm0_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ dlst_tpda_out: endpoint {
+ remote-endpoint = <&dlst_funnel_in0>;
+ };
+ };
+ };
+ };
+
+ funnel@4c55000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x04c55000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ dlst_funnel_in0: endpoint {
+ remote-endpoint = <&dlst_tpda_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ dlst_funnel_out: endpoint {
+ remote-endpoint = <&dlst_ch_funnel_in4>;
+ };
+ };
+ };
+ };
+
+ tpdm@4e00000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x04e00000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+ qcom,cmb-element-bits = <32>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ ddr_tpdm3_out: endpoint {
+ remote-endpoint = <&ddr_tpda_in4>;
+ };
+ };
+ };
+ };
+
+ tpda@4e03000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x04e03000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ ddr_tpda_in0: endpoint {
+ remote-endpoint = <&ddr_funnel0_out0>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ ddr_tpda_in1: endpoint {
+ remote-endpoint = <&ddr_funnel1_out0>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+
+ ddr_tpda_in4: endpoint {
+ remote-endpoint = <&ddr_tpdm3_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ ddr_tpda_out: endpoint {
+ remote-endpoint = <&ddr_funnel5_in0>;
+ };
+ };
+ };
+ };
+
+ funnel@4e04000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x04e04000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ ddr_funnel5_in0: endpoint {
+ remote-endpoint = <&ddr_tpda_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ ddr_funnel5_out: endpoint {
+ remote-endpoint = <&dlct0_funnel_in4>;
+ };
+ };
+ };
+ };
+
+ tpdm@4e10000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x04e10000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ ddr_tpdm0_out: endpoint {
+ remote-endpoint = <&ddr_funnel0_in0>;
+ };
+ };
+ };
+ };
+
+ funnel@4e12000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x04e12000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ ddr_funnel0_in0: endpoint {
+ remote-endpoint = <&ddr_tpdm0_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ ddr_funnel0_out0: endpoint {
+ remote-endpoint = <&ddr_tpda_in0>;
+ };
+ };
+ };
+ };
+
+ tpdm@4e20000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x04e20000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ ddr_tpdm1_out: endpoint {
+ remote-endpoint = <&ddr_funnel1_in0>;
+ };
+ };
+ };
+ };
+
+ funnel@4e22000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x04e22000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ ddr_funnel1_in0: endpoint {
+ remote-endpoint = <&ddr_tpdm1_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ ddr_funnel1_out0: endpoint {
+ remote-endpoint = <&ddr_tpda_in1>;
+ };
+ };
+ };
+ };
+
+ etm@6040000 {
+ compatible = "arm,primecell";
+ reg = <0x0 0x06040000 0x0 0x1000>;
+ cpu = <&cpu0>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm0_out: endpoint {
+ remote-endpoint = <&apss_funnel0_in0>;
+ };
+ };
+ };
+ };
+
+ etm@6140000 {
+ compatible = "arm,primecell";
+ reg = <0x0 0x06140000 0x0 0x1000>;
+ cpu = <&cpu1>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm1_out: endpoint {
+ remote-endpoint = <&apss_funnel0_in1>;
+ };
+ };
+ };
+ };
+
+ etm@6240000 {
+ compatible = "arm,primecell";
+ reg = <0x0 0x06240000 0x0 0x1000>;
+ cpu = <&cpu2>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm2_out: endpoint {
+ remote-endpoint = <&apss_funnel0_in2>;
+ };
+ };
+ };
+ };
+
+ etm@6340000 {
+ compatible = "arm,primecell";
+ reg = <0x0 0x06340000 0x0 0x1000>;
+ cpu = <&cpu3>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm3_out: endpoint {
+ remote-endpoint = <&apss_funnel0_in3>;
+ };
+ };
+ };
+ };
+
+ etm@6440000 {
+ compatible = "arm,primecell";
+ reg = <0x0 0x06440000 0x0 0x1000>;
+ cpu = <&cpu4>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm4_out: endpoint {
+ remote-endpoint = <&apss_funnel0_in4>;
+ };
+ };
+ };
+ };
+
+ etm@6540000 {
+ compatible = "arm,primecell";
+ reg = <0x0 0x06540000 0x0 0x1000>;
+ cpu = <&cpu5>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm5_out: endpoint {
+ remote-endpoint = <&apss_funnel0_in5>;
+ };
+ };
+ };
+ };
+
+ etm@6640000 {
+ compatible = "arm,primecell";
+ reg = <0x0 0x06640000 0x0 0x1000>;
+ cpu = <&cpu6>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm6_out: endpoint {
+ remote-endpoint = <&apss_funnel0_in6>;
+ };
+ };
+ };
+ };
+
+ etm@6740000 {
+ compatible = "arm,primecell";
+ reg = <0x0 0x06740000 0x0 0x1000>;
+ cpu = <&cpu7>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm7_out: endpoint {
+ remote-endpoint = <&apss_funnel0_in7>;
+ };
+ };
+ };
+ };
+
+ funnel@6800000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x06800000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ apss_funnel0_in0: endpoint {
+ remote-endpoint = <&etm0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ apss_funnel0_in1: endpoint {
+ remote-endpoint = <&etm1_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ apss_funnel0_in2: endpoint {
+ remote-endpoint = <&etm2_out>;
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+
+ apss_funnel0_in3: endpoint {
+ remote-endpoint = <&etm3_out>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+
+ apss_funnel0_in4: endpoint {
+ remote-endpoint = <&etm4_out>;
+ };
+ };
+
+ port@5 {
+ reg = <5>;
+
+ apss_funnel0_in5: endpoint {
+ remote-endpoint = <&etm5_out>;
+ };
+ };
+
+ port@6 {
+ reg = <6>;
+
+ apss_funnel0_in6: endpoint {
+ remote-endpoint = <&etm6_out>;
+ };
+ };
+
+ port@7 {
+ reg = <7>;
+
+ apss_funnel0_in7: endpoint {
+ remote-endpoint = <&etm7_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ apss_funnel0_out: endpoint {
+ remote-endpoint = <&apss_funnel1_in0>;
+ };
+ };
+ };
+ };
+
+ funnel@6810000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x06810000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ apss_funnel1_in0: endpoint {
+ remote-endpoint = <&apss_funnel0_out>;
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+
+ apss_funnel1_in3: endpoint {
+ remote-endpoint = <&apss_tpda_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ apss_funnel1_out: endpoint {
+ remote-endpoint = <&funnel1_in4>;
+ };
+ };
+ };
+ };
+
+ cti@682b000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x0682b000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ tpdm@6860000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x06860000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <64>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ apss_tpdm3_out: endpoint {
+ remote-endpoint = <&apss_tpda_in3>;
+ };
+ };
+ };
+ };
+
+ tpdm@6861000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x06861000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ apss_tpdm4_out: endpoint {
+ remote-endpoint = <&apss_tpda_in4>;
+ };
+ };
+ };
+ };
+
+ tpda@6863000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x06863000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ apss_tpda_in0: endpoint {
+ remote-endpoint = <&apss_tpdm0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ apss_tpda_in1: endpoint {
+ remote-endpoint = <&apss_tpdm1_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ apss_tpda_in2: endpoint {
+ remote-endpoint = <&apss_tpdm2_out>;
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+
+ apss_tpda_in3: endpoint {
+ remote-endpoint = <&apss_tpdm3_out>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+
+ apss_tpda_in4: endpoint {
+ remote-endpoint = <&apss_tpdm4_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ apss_tpda_out: endpoint {
+ remote-endpoint = <&apss_funnel1_in3>;
+ };
+ };
+ };
+ };
+
+ tpdm@68a0000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x068a0000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <32>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ apss_tpdm1_out: endpoint {
+ remote-endpoint = <&apss_tpda_in1>;
+ };
+ };
+ };
+ };
+
+ tpdm@68b0000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x068b0000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <32>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ apss_tpdm0_out: endpoint {
+ remote-endpoint = <&apss_tpda_in0>;
+ };
+ };
+ };
+ };
+
+ tpdm@68c0000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x068c0000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ apss_tpdm2_out: endpoint {
+ remote-endpoint = <&apss_tpda_in2>;
+ };
+ };
+ };
+ };
+
+ cti@68e0000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x068e0000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@68f0000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x068f0000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@6900000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x06900000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ usb_1_hsphy: phy@8904000 {
+ compatible = "qcom,qcs8300-usb-hs-phy",
+ "qcom,usb-snps-hs-7nm-phy";
+ reg = <0x0 0x08904000 0x0 0x400>;
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "ref";
+
+ resets = <&gcc GCC_USB2_PHY_PRIM_BCR>;
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
+ usb_2_hsphy: phy@8906000 {
+ compatible = "qcom,qcs8300-usb-hs-phy",
+ "qcom,usb-snps-hs-7nm-phy";
+ reg = <0x0 0x08906000 0x0 0x400>;
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "ref";
+
+ resets = <&gcc GCC_USB2_PHY_SEC_BCR>;
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
+ usb_qmpphy: phy@8907000 {
+ compatible = "qcom,qcs8300-qmp-usb3-uni-phy";
+ reg = <0x0 0x08907000 0x0 0x2000>;
+
+ clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
+ <&gcc GCC_USB_CLKREF_EN>,
+ <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>,
+ <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
+ clock-names = "aux",
+ "ref",
+ "com_aux",
+ "pipe";
+
+ resets = <&gcc GCC_USB3_PHY_PRIM_BCR>,
+ <&gcc GCC_USB3PHY_PHY_PRIM_BCR>;
+ reset-names = "phy", "phy_phy";
+
+ power-domains = <&gcc GCC_USB30_PRIM_GDSC>;
+
+ #clock-cells = <0>;
+ clock-output-names = "usb3_prim_phy_pipe_clk_src";
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
+ serdes0: phy@8909000 {
+ compatible = "qcom,qcs8300-dwmac-sgmii-phy", "qcom,sa8775p-dwmac-sgmii-phy";
+ reg = <0x0 0x08909000 0x0 0x00000e10>;
+ clocks = <&gcc GCC_SGMI_CLKREF_EN>;
+ clock-names = "sgmi_ref";
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
+ gpucc: clock-controller@3d90000 {
+ compatible = "qcom,qcs8300-gpucc";
+ reg = <0x0 0x03d90000 0x0 0xa000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_GPU_GPLL0_CLK_SRC>,
+ <&gcc GCC_GPU_GPLL0_DIV_CLK_SRC>;
+ clock-names = "bi_tcxo",
+ "gcc_gpu_gpll0_clk_src",
+ "gcc_gpu_gpll0_div_clk_src";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+ pmu@9091000 {
+ compatible = "qcom,qcs8300-llcc-bwmon", "qcom,sc7280-llcc-bwmon";
+ reg = <0x0 0x9091000 0x0 0x1000>;
+
+ interrupts = <GIC_SPI 620 IRQ_TYPE_LEVEL_HIGH>;
+
+ interconnects = <&mc_virt MASTER_LLCC QCOM_ICC_TAG_ACTIVE_ONLY
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>;
+
+ operating-points-v2 = <&llcc_bwmon_opp_table>;
+
+ llcc_bwmon_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-0 {
+ opp-peak-kBps = <762000>;
+ };
+
+ opp-1 {
+ opp-peak-kBps = <1720000>;
+ };
+
+ opp-2 {
+ opp-peak-kBps = <2086000>;
+ };
+
+ opp-3 {
+ opp-peak-kBps = <2601000>;
+ };
+
+ opp-4 {
+ opp-peak-kBps = <2929000>;
+ };
+
+ opp-5 {
+ opp-peak-kBps = <5931000>;
+ };
+
+ opp-6 {
+ opp-peak-kBps = <6515000>;
+ };
+
+ opp-7 {
+ opp-peak-kBps = <7984000>;
+ };
+
+ opp-8 {
+ opp-peak-kBps = <10437000>;
+ };
+
+ opp-9 {
+ opp-peak-kBps = <12195000>;
+ };
+ };
+ };
+
+ pmu@90b5400 {
+ compatible = "qcom,qcs8300-cpu-bwmon", "qcom,sdm845-bwmon";
+ reg = <0x0 0x90b5400 0x0 0x600>;
+ interrupts = <GIC_SPI 581 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ACTIVE_ONLY>;
+
+ operating-points-v2 = <&cpu_bwmon_opp_table>;
+
+ cpu_bwmon_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-0 {
+ opp-peak-kBps = <9155000>;
+ };
+
+ opp-1 {
+ opp-peak-kBps = <12298000>;
+ };
+
+ opp-2 {
+ opp-peak-kBps = <14236000>;
+ };
+
+ opp-3 {
+ opp-peak-kBps = <16265000>;
+ };
+ };
+ };
+
+ pmu@90b6400 {
+ compatible = "qcom,qcs8300-cpu-bwmon", "qcom,sdm845-bwmon";
+ reg = <0x0 0x90b6400 0x0 0x600>;
+ interrupts = <GIC_SPI 581 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ACTIVE_ONLY>;
+
+ operating-points-v2 = <&cpu_bwmon_opp_table>;
+ };
+
+ dc_noc: interconnect@90e0000 {
+ compatible = "qcom,qcs8300-dc-noc";
+ reg = <0x0 0x090e0000 0x0 0x5080>;
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ gem_noc: interconnect@9100000 {
+ compatible = "qcom,qcs8300-gem-noc";
+ reg = <0x0 0x9100000 0x0 0xf7080>;
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ llcc: system-cache-controller@9200000 {
+ compatible = "qcom,qcs8300-llcc";
+ reg = <0x0 0x09200000 0x0 0x80000>,
+ <0x0 0x09300000 0x0 0x80000>,
+ <0x0 0x09400000 0x0 0x80000>,
+ <0x0 0x09500000 0x0 0x80000>,
+ <0x0 0x09a00000 0x0 0x80000>;
+ reg-names = "llcc0_base",
+ "llcc1_base",
+ "llcc2_base",
+ "llcc3_base",
+ "llcc_broadcast_base";
+ interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ usb_1: usb@a6f8800 {
+ compatible = "qcom,qcs8300-dwc3", "qcom,dwc3";
+ reg = <0x0 0x0a6f8800 0x0 0x400>;
+
+ clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
+ <&gcc GCC_USB30_PRIM_MASTER_CLK>,
+ <&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
+ <&gcc GCC_USB30_PRIM_SLEEP_CLK>,
+ <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>;
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi";
+
+ assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_PRIM_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <200000000>;
+
+ interrupts-extended = <&intc GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 14 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 15 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 12 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pwr_event",
+ "hs_phy_irq",
+ "dp_hs_phy_irq",
+ "dm_hs_phy_irq",
+ "ss_phy_irq";
+
+ power-domains = <&gcc GCC_USB30_PRIM_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
+
+ resets = <&gcc GCC_USB30_PRIM_BCR>;
+ interconnects = <&aggre1_noc MASTER_USB3_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_USB3_0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "usb-ddr", "apps-usb";
+
+ wakeup-source;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ status = "disabled";
+
+ usb_1_dwc3: usb@a600000 {
+ compatible = "snps,dwc3";
+ reg = <0x0 0x0a600000 0x0 0xe000>;
+ interrupts = <GIC_SPI 292 IRQ_TYPE_LEVEL_HIGH>;
+ iommus = <&apps_smmu 0x80 0x0>;
+ phys = <&usb_1_hsphy>, <&usb_qmpphy>;
+ phy-names = "usb2-phy", "usb3-phy";
+ snps,dis_enblslpm_quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_u3_susphy_quirk;
+ };
+ };
+
+ usb_2: usb@a4f8800 {
+ compatible = "qcom,qcs8300-dwc3", "qcom,dwc3";
+ reg = <0x0 0x0a4f8800 0x0 0x400>;
+
+ clocks = <&gcc GCC_CFG_NOC_USB2_PRIM_AXI_CLK>,
+ <&gcc GCC_USB20_MASTER_CLK>,
+ <&gcc GCC_AGGRE_USB2_PRIM_AXI_CLK>,
+ <&gcc GCC_USB20_SLEEP_CLK>,
+ <&gcc GCC_USB20_MOCK_UTMI_CLK>;
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi";
+
+ assigned-clocks = <&gcc GCC_USB20_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB20_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <120000000>;
+
+ interrupts-extended = <&intc GIC_SPI 444 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 443 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 10 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 9 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "pwr_event",
+ "hs_phy_irq",
+ "dp_hs_phy_irq",
+ "dm_hs_phy_irq";
+
+ power-domains = <&gcc GCC_USB20_PRIM_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
+
+ resets = <&gcc GCC_USB20_PRIM_BCR>;
+
+ interconnects = <&aggre1_noc MASTER_USB2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_USB2 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "usb-ddr", "apps-usb";
+
+ qcom,select-utmi-as-pipe-clk;
+ wakeup-source;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ status = "disabled";
+
+ usb_2_dwc3: usb@a400000 {
+ compatible = "snps,dwc3";
+ reg = <0x0 0x0a400000 0x0 0xe000>;
+
+ interrupts = <GIC_SPI 442 IRQ_TYPE_LEVEL_HIGH>;
+ iommus = <&apps_smmu 0x20 0x0>;
+
+ phys = <&usb_2_hsphy>;
+ phy-names = "usb2-phy";
+ maximum-speed = "high-speed";
+
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_u3_susphy_quirk;
+ snps,dis_enblslpm_quirk;
+ };
+ };
+
+ videocc: clock-controller@abf0000 {
+ compatible = "qcom,qcs8300-videocc";
+ reg = <0x0 0x0abf0000 0x0 0x10000>;
+ clocks = <&gcc GCC_VIDEO_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&rpmhcc RPMH_CXO_CLK_A>,
+ <&sleep_clk>;
+ power-domains = <&rpmhpd RPMHPD_MMCX>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+ camcc: clock-controller@ade0000 {
+ compatible = "qcom,qcs8300-camcc";
+ reg = <0x0 0x0ade0000 0x0 0x20000>;
+ clocks = <&gcc GCC_CAMERA_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&rpmhcc RPMH_CXO_CLK_A>,
+ <&sleep_clk>;
+ power-domains = <&rpmhpd RPMHPD_MMCX>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+ dispcc: clock-controller@af00000 {
+ compatible = "qcom,sa8775p-dispcc0";
+ reg = <0x0 0x0af00000 0x0 0x20000>;
+ clocks = <&gcc GCC_DISP_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&rpmhcc RPMH_CXO_CLK_A>,
+ <&sleep_clk>,
+ <0>, <0>, <0>, <0>,
+ <0>, <0>, <0>, <0>;
+ power-domains = <&rpmhpd RPMHPD_MMCX>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+ pdc: interrupt-controller@b220000 {
+ compatible = "qcom,qcs8300-pdc", "qcom,pdc";
+ reg = <0x0 0xb220000 0x0 0x30000>,
+ <0x0 0x17c000f0 0x0 0x64>;
+ interrupt-parent = <&intc>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ qcom,pdc-ranges = <0 480 40>,
+ <40 140 14>,
+ <54 263 1>,
+ <55 306 4>,
+ <59 312 3>,
+ <62 374 2>,
+ <64 434 2>,
+ <66 438 2>,
+ <70 520 1>,
+ <73 523 1>,
+ <118 568 6>,
+ <124 609 3>,
+ <159 638 1>,
+ <160 720 3>,
+ <169 728 30>,
+ <199 416 2>,
+ <201 449 1>,
+ <202 89 1>,
+ <203 451 1>,
+ <204 462 1>,
+ <205 264 1>,
+ <206 579 1>,
+ <207 653 1>,
+ <208 656 1>,
+ <209 659 1>,
+ <210 122 1>,
+ <211 699 1>,
+ <212 705 1>,
+ <213 450 1>,
+ <214 643 2>,
+ <216 646 5>,
+ <221 390 5>,
+ <226 700 2>,
+ <228 440 1>,
+ <229 663 1>,
+ <230 524 2>,
+ <232 612 3>,
+ <235 723 5>;
+ };
+
+ aoss_qmp: power-management@c300000 {
+ compatible = "qcom,qcs8300-aoss-qmp", "qcom,aoss-qmp";
+ reg = <0x0 0x0c300000 0x0 0x400>;
+ interrupts-extended = <&ipcc IPCC_CLIENT_AOP
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_AOP IPCC_MPROC_SIGNAL_GLINK_QMP>;
+ #clock-cells = <0>;
+ };
+
+ tlmm: pinctrl@f100000 {
+ compatible = "qcom,qcs8300-tlmm";
+ reg = <0x0 0x0f100000 0x0 0x300000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 134>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ wakeup-parent = <&pdc>;
+
+ qup_uart7_default: qup-uart7-state {
+ /* TX, RX */
+ pins = "gpio43", "gpio44";
+ function = "qup0_se7";
+ };
+ };
+
+ sram: sram@146d8000 {
+ compatible = "qcom,qcs8300-imem", "syscon", "simple-mfd";
+ reg = <0x0 0x146d8000 0x0 0x1000>;
+ ranges = <0x0 0x0 0x146d8000 0x1000>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ pil-reloc@94c {
+ compatible = "qcom,pil-reloc-info";
+ reg = <0x94c 0xc8>;
+ };
+ };
+
+ apps_smmu: iommu@15000000 {
+ compatible = "qcom,qcs8300-smmu-500", "qcom,smmu-500", "arm,mmu-500";
+
+ reg = <0x0 0x15000000 0x0 0x100000>;
+ #iommu-cells = <2>;
+ #global-interrupts = <2>;
+ dma-coherent;
+
+ interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 409 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 412 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 706 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 689 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 690 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 691 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 692 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 693 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 694 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 695 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 696 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 410 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 411 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 413 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 707 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 708 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 709 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 710 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 711 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 414 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 712 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 713 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 714 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 715 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 912 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 911 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 910 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 909 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 908 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 907 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 906 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 905 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 904 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 903 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 902 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 901 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 900 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 899 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 898 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 897 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 896 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 895 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ intc: interrupt-controller@17a00000 {
+ compatible = "arm,gic-v3";
+ reg = <0x0 0x17a00000 0x0 0x10000>,
+ <0x0 0x17a60000 0x0 0x100000>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ #redistributor-regions = <1>;
+ redistributor-stride = <0x0 0x20000>;
+ };
+
+ watchdog@17c10000 {
+ compatible = "qcom,apss-wdt-qcs8300", "qcom,kpss-wdt";
+ reg = <0x0 0x17c10000 0x0 0x1000>;
+ clocks = <&sleep_clk>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
+ };
+
+ timer@17c20000 {
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x0 0x17c20000 0x0 0x1000>;
+ ranges = <0x0 0x0 0x0 0x20000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ frame@17c21000 {
+ reg = <0x17c21000 0x1000>,
+ <0x17c22000 0x1000>;
+ frame-number = <0>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ frame@17c23000 {
+ reg = <0x17c23000 0x1000>;
+ frame-number = <1>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ frame@17c25000 {
+ reg = <0x17c25000 0x1000>;
+ frame-number = <2>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ frame@17c27000 {
+ reg = <0x17c27000 0x1000>;
+ frame-number = <3>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ frame@17c29000 {
+ reg = <0x17c29000 0x1000>;
+ frame-number = <4>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ frame@17c2b000 {
+ reg = <0x17c2b000 0x1000>;
+ frame-number = <5>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ frame@17c2d000 {
+ reg = <0x17c2d000 0x1000>;
+ frame-number = <6>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+ };
+
+ apps_rsc: rsc@18200000 {
+ compatible = "qcom,rpmh-rsc";
+ reg = <0x0 0x18200000 0x0 0x10000>,
+ <0x0 0x18210000 0x0 0x10000>,
+ <0x0 0x18220000 0x0 0x10000>;
+ reg-names = "drv-0",
+ "drv-1",
+ "drv-2";
+ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+
+ power-domains = <&system_pd>;
+ label = "apps_rsc";
+
+ qcom,tcs-offset = <0xd00>;
+ qcom,drv-id = <2>;
+ qcom,tcs-config = <ACTIVE_TCS 2>,
+ <SLEEP_TCS 3>,
+ <WAKE_TCS 3>,
+ <CONTROL_TCS 0>;
+
+ apps_bcm_voter: bcm-voter {
+ compatible = "qcom,bcm-voter";
+ };
+
+ rpmhcc: clock-controller {
+ compatible = "qcom,sa8775p-rpmh-clk";
+ #clock-cells = <1>;
+ clocks = <&xo_board_clk>;
+ clock-names = "xo";
+ };
+
+ rpmhpd: power-controller {
+ compatible = "qcom,qcs8300-rpmhpd";
+ #power-domain-cells = <1>;
+ operating-points-v2 = <&rpmhpd_opp_table>;
+
+ rpmhpd_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ rpmhpd_opp_ret: opp-0 {
+ opp-level = <RPMH_REGULATOR_LEVEL_RETENTION>;
+ };
+
+ rpmhpd_opp_min_svs: opp-1 {
+ opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+ };
+
+ rpmhpd_opp_low_svs: opp-2 {
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+ };
+
+ rpmhpd_opp_svs: opp-3 {
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+ };
+
+ rpmhpd_opp_svs_l1: opp-4 {
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
+ };
+
+ rpmhpd_opp_nom: opp-5 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
+ };
+
+ rpmhpd_opp_nom_l1: opp-6 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
+ };
+
+ rpmhpd_opp_nom_l2: opp-7 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM_L2>;
+ };
+
+ rpmhpd_opp_turbo: opp-8 {
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
+ };
+
+ rpmhpd_opp_turbo_l1: opp-9 {
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
+ };
+ };
+ };
+ };
+
+ remoteproc_gpdsp: remoteproc@20c00000 {
+ compatible = "qcom,qcs8300-gpdsp-pas", "qcom,sa8775p-gpdsp0-pas";
+ reg = <0x0 0x20c00000 0x0 0x10000>;
+
+ interrupts-extended = <&intc GIC_SPI 768 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_gpdsp_in 0 0>,
+ <&smp2p_gpdsp_in 1 0>,
+ <&smp2p_gpdsp_in 2 0>,
+ <&smp2p_gpdsp_in 3 0>;
+ interrupt-names = "wdog",
+ "fatal",
+ "ready",
+ "handover",
+ "stop-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ power-domains = <&rpmhpd RPMHPD_CX>,
+ <&rpmhpd RPMHPD_MXC>;
+ power-domain-names = "cx",
+ "mxc";
+
+ interconnects = <&gpdsp_anoc MASTER_DSP0 QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_CLK_CTL QCOM_ICC_TAG_ALWAYS>;
+
+ memory-region = <&gpdsp_mem>;
+
+ qcom,qmp = <&aoss_qmp>;
+
+ qcom,smem-states = <&smp2p_gpdsp_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_GPDSP0
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_GPDSP0
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ label = "gpdsp";
+ qcom,remote-pid = <17>;
+ };
+ };
+
+ ethernet0: ethernet@23040000 {
+ compatible = "qcom,qcs8300-ethqos", "qcom,sa8775p-ethqos";
+ reg = <0x0 0x23040000 0x0 0x00010000>,
+ <0x0 0x23056000 0x0 0x00000100>;
+ reg-names = "stmmaceth", "rgmii";
+
+ interrupts = <GIC_SPI 946 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 783 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "sfty";
+
+ clocks = <&gcc GCC_EMAC0_AXI_CLK>,
+ <&gcc GCC_EMAC0_SLV_AHB_CLK>,
+ <&gcc GCC_EMAC0_PTP_CLK>,
+ <&gcc GCC_EMAC0_PHY_AUX_CLK>;
+ clock-names = "stmmaceth",
+ "pclk",
+ "ptp_ref",
+ "phyaux";
+ power-domains = <&gcc GCC_EMAC0_GDSC>;
+
+ phys = <&serdes0>;
+ phy-names = "serdes";
+
+ iommus = <&apps_smmu 0x120 0xf>;
+ dma-coherent;
+
+ snps,tso;
+ snps,pbl = <32>;
+ rx-fifo-depth = <16384>;
+ tx-fifo-depth = <20480>;
+
+ status = "disabled";
+ };
+
+ nspa_noc: interconnect@260c0000 {
+ compatible = "qcom,qcs8300-nspa-noc";
+ reg = <0x0 0x260c0000 0x0 0x16080>;
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ remoteproc_cdsp: remoteproc@26300000 {
+ compatible = "qcom,qcs8300-cdsp-pas", "qcom,sa8775p-cdsp0-pas";
+ reg = <0x0 0x26300000 0x0 0x10000>;
+
+ interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog",
+ "fatal",
+ "ready",
+ "handover",
+ "stop-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ power-domains = <&rpmhpd RPMHPD_CX>,
+ <&rpmhpd RPMHPD_MXC>,
+ <&rpmhpd RPMHPD_NSP0>;
+
+ power-domain-names = "cx",
+ "mxc",
+ "nsp";
+
+ interconnects = <&nspa_noc MASTER_CDSP_PROC QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+
+ memory-region = <&cdsp_mem>;
+
+ qcom,qmp = <&aoss_qmp>;
+
+ qcom,smem-states = <&smp2p_cdsp_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_CDSP
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_CDSP
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ label = "cdsp";
+ qcom,remote-pid = <5>;
+
+ fastrpc {
+ compatible = "qcom,fastrpc";
+ qcom,glink-channels = "fastrpcglink-apps-dsp";
+ label = "cdsp";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compute-cb@1 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <1>;
+ iommus = <&apps_smmu 0x19c1 0x0440>,
+ <&apps_smmu 0x1961 0x0400>;
+ dma-coherent;
+ };
+
+ compute-cb@2 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <2>;
+ iommus = <&apps_smmu 0x19c2 0x0440>,
+ <&apps_smmu 0x1962 0x0400>;
+ dma-coherent;
+ };
+
+ compute-cb@3 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <3>;
+ iommus = <&apps_smmu 0x19c3 0x0440>,
+ <&apps_smmu 0x1963 0x0400>;
+ dma-coherent;
+ };
+
+ compute-cb@4 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <4>;
+ iommus = <&apps_smmu 0x19c4 0x0440>,
+ <&apps_smmu 0x1964 0x0400>;
+ dma-coherent;
+ };
+ };
+ };
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs8550-aim300.dtsi b/arch/arm64/boot/dts/qcom/qcs8550-aim300.dtsi
index f6960e2d466a..e6ac529e6b72 100644
--- a/arch/arm64/boot/dts/qcom/qcs8550-aim300.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs8550-aim300.dtsi
@@ -367,7 +367,7 @@
};
&sleep_clk {
- clock-frequency = <32000>;
+ clock-frequency = <32764>;
};
&ufs_mem_hc {
diff --git a/arch/arm64/boot/dts/qcom/qdu1000-idp.dts b/arch/arm64/boot/dts/qcom/qdu1000-idp.dts
index e65305f8136c..d125fc77ae14 100644
--- a/arch/arm64/boot/dts/qcom/qdu1000-idp.dts
+++ b/arch/arm64/boot/dts/qcom/qdu1000-idp.dts
@@ -22,20 +22,6 @@
stdout-path = "serial0:115200n8";
};
- clocks {
- xo_board: xo-board-clk {
- compatible = "fixed-clock";
- clock-frequency = <19200000>;
- #clock-cells = <0>;
- };
-
- sleep_clk: sleep-clk {
- compatible = "fixed-clock";
- clock-frequency = <32000>;
- #clock-cells = <0>;
- };
- };
-
ppvar_sys: ppvar-sys-regulator {
compatible = "regulator-fixed";
regulator-name = "ppvar_sys";
@@ -239,6 +225,11 @@
};
};
+&pon {
+ mode-bootloader = <0x2>;
+ mode-recovery = <0x1>;
+};
+
&qup_i2c1_data_clk {
drive-strength = <2>;
bias-pull-up;
diff --git a/arch/arm64/boot/dts/qcom/qdu1000.dtsi b/arch/arm64/boot/dts/qcom/qdu1000.dtsi
index 47c0dd31aaf2..f973aa8f7477 100644
--- a/arch/arm64/boot/dts/qcom/qdu1000.dtsi
+++ b/arch/arm64/boot/dts/qcom/qdu1000.dtsi
@@ -21,6 +21,20 @@
chosen: chosen { };
+ clocks {
+ xo_board: xo-board-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <19200000>;
+ #clock-cells = <0>;
+ };
+
+ sleep_clk: sleep-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <32764>;
+ #clock-cells = <0>;
+ };
+ };
+
cpus {
#address-cells = <2>;
#size-cells = <0>;
@@ -1009,6 +1023,8 @@
iommus = <&apps_smmu 0xc0 0x0>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
phys = <&usb_1_hsphy>,
<&usb_1_qmpphy>;
phy-names = "usb2-phy",
diff --git a/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts b/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
index a9540e92d3e6..52db18847803 100644
--- a/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
+++ b/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
@@ -6,6 +6,8 @@
/dts-v1/;
#include <dt-bindings/leds/common.h>
+#include <dt-bindings/sound/qcom,q6afe.h>
+#include <dt-bindings/sound/qcom,q6asm.h>
#include <dt-bindings/usb/pd.h>
#include "sm4250.dtsi"
#include "pm6125.dtsi"
@@ -103,6 +105,55 @@
};
};
+ sound {
+ compatible = "qcom,qrb4210-rb2-sndcard";
+ pinctrl-0 = <&lpi_i2s2_active>;
+ pinctrl-names = "default";
+ model = "Qualcomm-RB2-WSA8815-Speakers-DMIC0";
+ audio-routing = "MM_DL1", "MultiMedia1 Playback",
+ "MM_DL2", "MultiMedia2 Playback";
+
+ mm1-dai-link {
+ link-name = "MultiMedia1";
+
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ mm2-dai-link {
+ link-name = "MultiMedia2";
+
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA2>;
+ };
+ };
+
+ mm3-dai-link {
+ link-name = "MultiMedia3";
+
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA3>;
+ };
+ };
+
+ hdmi-dai-link {
+ link-name = "HDMI Playback";
+
+ cpu {
+ sound-dai = <&q6afedai SECONDARY_MI2S_RX>;
+ };
+
+ platform {
+ sound-dai = <&q6routing>;
+ };
+
+ codec {
+ sound-dai = <&lt9611_codec 0>;
+ };
+ };
+ };
+
vreg_hdmi_out_1p2: regulator-hdmi-out-1p2 {
compatible = "regulator-fixed";
regulator-name = "VREG_HDMI_OUT_1P2";
@@ -318,6 +369,14 @@
status = "okay";
};
+/* SECONDARY I2S uses 1 I2S SD Line for audio on LT9611UXC HDMI Bridge */
+&q6afedai {
+ dai@20 {
+ reg = <SECONDARY_MI2S_RX>;
+ qcom,sd-lines = <0>;
+ };
+};
+
&qupv3_id_0 {
status = "okay";
};
@@ -545,7 +604,7 @@
};
&sleep_clk {
- clock-frequency = <32000>;
+ clock-frequency = <32764>;
};
&tlmm {
diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
index 52eef88e882c..7afa5acac3fc 100644
--- a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
+++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
@@ -964,6 +964,11 @@
};
};
+&pon {
+ mode-bootloader = <0x2>;
+ mode-recovery = <0x1>;
+};
+
&pon_pwrkey {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/qru1000-idp.dts b/arch/arm64/boot/dts/qcom/qru1000-idp.dts
index 1c781d9e24cf..439f5c327dc4 100644
--- a/arch/arm64/boot/dts/qcom/qru1000-idp.dts
+++ b/arch/arm64/boot/dts/qcom/qru1000-idp.dts
@@ -22,20 +22,6 @@
stdout-path = "serial0:115200n8";
};
- clocks {
- xo_board: xo-board-clk {
- compatible = "fixed-clock";
- clock-frequency = <19200000>;
- #clock-cells = <0>;
- };
-
- sleep_clk: sleep-clk {
- compatible = "fixed-clock";
- clock-frequency = <32000>;
- #clock-cells = <0>;
- };
- };
-
ppvar_sys: ppvar-sys-regulator {
compatible = "regulator-fixed";
regulator-name = "ppvar_sys";
@@ -239,6 +225,11 @@
};
};
+&pon {
+ mode-bootloader = <0x2>;
+ mode-recovery = <0x1>;
+};
+
&qup_i2c1_data_clk {
drive-strength = <2>;
bias-pull-up;
diff --git a/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi b/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi
index 3fc62e123689..175f8b1e3b2d 100644
--- a/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi
+++ b/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi
@@ -104,6 +104,30 @@
};
};
};
+
+ dp0-connector {
+ compatible = "dp-connector";
+ label = "eDP0";
+ type = "full-size";
+
+ port {
+ dp0_connector_in: endpoint {
+ remote-endpoint = <&mdss0_dp0_out>;
+ };
+ };
+ };
+
+ dp1-connector {
+ compatible = "dp-connector";
+ label = "eDP1";
+ type = "full-size";
+
+ port {
+ dp1_connector_in: endpoint {
+ remote-endpoint = <&mdss0_dp1_out>;
+ };
+ };
+ };
};
&apps_rsc {
@@ -498,6 +522,50 @@
status = "okay";
};
+&mdss0 {
+ status = "okay";
+};
+
+&mdss0_dp0 {
+ pinctrl-0 = <&dp0_hot_plug_det>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&mdss0_dp0_out {
+ data-lanes = <0 1 2 3>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
+ remote-endpoint = <&dp0_connector_in>;
+};
+
+&mdss0_dp0_phy {
+ vdda-phy-supply = <&vreg_l1c>;
+ vdda-pll-supply = <&vreg_l4a>;
+
+ status = "okay";
+};
+
+&mdss0_dp1 {
+ pinctrl-0 = <&dp1_hot_plug_det>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&mdss0_dp1_out {
+ data-lanes = <0 1 2 3>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
+ remote-endpoint = <&dp1_connector_in>;
+};
+
+&mdss0_dp1_phy {
+ vdda-phy-supply = <&vreg_l1c>;
+ vdda-pll-supply = <&vreg_l4a>;
+
+ status = "okay";
+};
+
&pmm8654au_0_gpios {
gpio-line-names = "DS_EN",
"POFF_COMPLETE",
@@ -608,7 +676,7 @@
};
&sleep_clk {
- clock-frequency = <32764>;
+ clock-frequency = <32000>;
};
&spi16 {
@@ -618,6 +686,18 @@
};
&tlmm {
+ dp0_hot_plug_det: dp0-hot-plug-det-state {
+ pins = "gpio101";
+ function = "edp0_hot";
+ bias-disable;
+ };
+
+ dp1_hot_plug_det: dp1-hot-plug-det-state {
+ pins = "gpio102";
+ function = "edp1_hot";
+ bias-disable;
+ };
+
ethernet0_default: ethernet0-default-state {
ethernet0_mdc: ethernet0-mdc-pins {
pins = "gpio8";
diff --git a/arch/arm64/boot/dts/qcom/sa8775p.dtsi b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
index 9da62d7c4d27..3394ae2d1300 100644
--- a/arch/arm64/boot/dts/qcom/sa8775p.dtsi
+++ b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
@@ -7,6 +7,7 @@
#include <dt-bindings/interconnect/qcom,icc.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/clock/qcom,sa8775p-dispcc.h>
#include <dt-bindings/clock/qcom,sa8775p-gcc.h>
#include <dt-bindings/clock/qcom,sa8775p-gpucc.h>
#include <dt-bindings/dma/qcom-gpi.h>
@@ -44,6 +45,8 @@
compatible = "qcom,kryo";
reg = <0x0 0x0>;
enable-method = "psci";
+ power-domains = <&cpu_pd0>;
+ power-domain-names = "psci";
qcom,freq-domain = <&cpufreq_hw 0>;
next-level-cache = <&l2_0>;
capacity-dmips-mhz = <1024>;
@@ -66,6 +69,8 @@
compatible = "qcom,kryo";
reg = <0x0 0x100>;
enable-method = "psci";
+ power-domains = <&cpu_pd1>;
+ power-domain-names = "psci";
qcom,freq-domain = <&cpufreq_hw 0>;
next-level-cache = <&l2_1>;
capacity-dmips-mhz = <1024>;
@@ -83,6 +88,8 @@
compatible = "qcom,kryo";
reg = <0x0 0x200>;
enable-method = "psci";
+ power-domains = <&cpu_pd2>;
+ power-domain-names = "psci";
qcom,freq-domain = <&cpufreq_hw 0>;
next-level-cache = <&l2_2>;
capacity-dmips-mhz = <1024>;
@@ -100,6 +107,8 @@
compatible = "qcom,kryo";
reg = <0x0 0x300>;
enable-method = "psci";
+ power-domains = <&cpu_pd3>;
+ power-domain-names = "psci";
qcom,freq-domain = <&cpufreq_hw 0>;
next-level-cache = <&l2_3>;
capacity-dmips-mhz = <1024>;
@@ -117,6 +126,8 @@
compatible = "qcom,kryo";
reg = <0x0 0x10000>;
enable-method = "psci";
+ power-domains = <&cpu_pd4>;
+ power-domain-names = "psci";
qcom,freq-domain = <&cpufreq_hw 1>;
next-level-cache = <&l2_4>;
capacity-dmips-mhz = <1024>;
@@ -140,6 +151,8 @@
compatible = "qcom,kryo";
reg = <0x0 0x10100>;
enable-method = "psci";
+ power-domains = <&cpu_pd5>;
+ power-domain-names = "psci";
qcom,freq-domain = <&cpufreq_hw 1>;
next-level-cache = <&l2_5>;
capacity-dmips-mhz = <1024>;
@@ -157,6 +170,8 @@
compatible = "qcom,kryo";
reg = <0x0 0x10200>;
enable-method = "psci";
+ power-domains = <&cpu_pd6>;
+ power-domain-names = "psci";
qcom,freq-domain = <&cpufreq_hw 1>;
next-level-cache = <&l2_6>;
capacity-dmips-mhz = <1024>;
@@ -174,6 +189,8 @@
compatible = "qcom,kryo";
reg = <0x0 0x10300>;
enable-method = "psci";
+ power-domains = <&cpu_pd7>;
+ power-domain-names = "psci";
qcom,freq-domain = <&cpufreq_hw 1>;
next-level-cache = <&l2_7>;
capacity-dmips-mhz = <1024>;
@@ -854,8 +871,8 @@
#mbox-cells = <2>;
};
- gpi_dma2: qcom,gpi-dma@800000 {
- compatible = "qcom,sm6350-gpi-dma";
+ gpi_dma2: dma-controller@800000 {
+ compatible = "qcom,sa8775p-gpi-dma", "qcom,sm6350-gpi-dma";
reg = <0x0 0x00800000 0x0 0x60000>;
#dma-cells = <3>;
interrupts = <GIC_SPI 588 IRQ_TYPE_LEVEL_HIGH>,
@@ -1345,8 +1362,8 @@
};
- gpi_dma0: qcom,gpi-dma@900000 {
- compatible = "qcom,sm6350-gpi-dma";
+ gpi_dma0: dma-controller@900000 {
+ compatible = "qcom,sa8775p-gpi-dma", "qcom,sm6350-gpi-dma";
reg = <0x0 0x00900000 0x0 0x60000>;
#dma-cells = <3>;
interrupts = <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>,
@@ -1770,8 +1787,8 @@
};
};
- gpi_dma1: qcom,gpi-dma@a00000 {
- compatible = "qcom,sm6350-gpi-dma";
+ gpi_dma1: dma-controller@a00000 {
+ compatible = "qcom,sa8775p-gpi-dma", "qcom,sm6350-gpi-dma";
reg = <0x0 0x00a00000 0x0 0x60000>;
#dma-cells = <3>;
interrupts = <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
@@ -2225,8 +2242,8 @@
};
};
- gpi_dma3: qcom,gpi-dma@b00000 {
- compatible = "qcom,sm6350-gpi-dma";
+ gpi_dma3: dma-controller@b00000 {
+ compatible = "qcom,sa8775p-gpi-dma", "qcom,sm6350-gpi-dma";
reg = <0x0 0x00b00000 0x0 0x58000>;
#dma-cells = <3>;
interrupts = <GIC_SPI 368 IRQ_TYPE_LEVEL_HIGH>,
@@ -3412,6 +3429,8 @@
iommus = <&apps_smmu 0x080 0x0>;
phys = <&usb_0_hsphy>, <&usb_0_qmpphy>;
phy-names = "usb2-phy", "usb3-phy";
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
};
};
@@ -3501,6 +3520,8 @@
iommus = <&apps_smmu 0x0a0 0x0>;
phys = <&usb_1_hsphy>, <&usb_1_qmpphy>;
phy-names = "usb2-phy", "usb3-phy";
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
};
};
@@ -3564,6 +3585,8 @@
iommus = <&apps_smmu 0x020 0x0>;
phys = <&usb_2_hsphy>;
phy-names = "usb2-phy";
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
};
};
@@ -3760,6 +3783,353 @@
interrupts = <GIC_SPI 580 IRQ_TYPE_LEVEL_HIGH>;
};
+ videocc: clock-controller@abf0000 {
+ compatible = "qcom,sa8775p-videocc";
+ reg = <0x0 0x0abf0000 0x0 0x10000>;
+ clocks = <&gcc GCC_VIDEO_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&rpmhcc RPMH_CXO_CLK_A>,
+ <&sleep_clk>;
+ power-domains = <&rpmhpd SA8775P_MMCX>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+ camcc: clock-controller@ade0000 {
+ compatible = "qcom,sa8775p-camcc";
+ reg = <0x0 0x0ade0000 0x0 0x20000>;
+ clocks = <&gcc GCC_CAMERA_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&rpmhcc RPMH_CXO_CLK_A>,
+ <&sleep_clk>;
+ power-domains = <&rpmhpd SA8775P_MMCX>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+ mdss0: display-subsystem@ae00000 {
+ compatible = "qcom,sa8775p-mdss";
+ reg = <0x0 0x0ae00000 0x0 0x1000>;
+ reg-names = "mdss";
+
+ /* same path used twice */
+ interconnects = <&mmss_noc MASTER_MDP0 QCOM_ICC_TAG_ACTIVE_ONLY
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&mmss_noc MASTER_MDP1 QCOM_ICC_TAG_ACTIVE_ONLY
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_DISPLAY_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
+ interconnect-names = "mdp0-mem",
+ "mdp1-mem",
+ "cpu-cfg";
+
+ resets = <&dispcc0 MDSS_DISP_CC_MDSS_CORE_BCR>;
+
+ power-domains = <&dispcc0 MDSS_DISP_CC_MDSS_CORE_GDSC>;
+
+ clocks = <&dispcc0 MDSS_DISP_CC_MDSS_AHB_CLK>,
+ <&gcc GCC_DISP_HF_AXI_CLK>,
+ <&dispcc0 MDSS_DISP_CC_MDSS_MDP_CLK>;
+
+ interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ iommus = <&apps_smmu 0x1000 0x402>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ status = "disabled";
+
+ mdss0_mdp: display-controller@ae01000 {
+ compatible = "qcom,sa8775p-dpu";
+ reg = <0x0 0x0ae01000 0x0 0x8f000>,
+ <0x0 0x0aeb0000 0x0 0x2008>;
+ reg-names = "mdp", "vbif";
+
+ clocks = <&gcc GCC_DISP_HF_AXI_CLK>,
+ <&dispcc0 MDSS_DISP_CC_MDSS_AHB_CLK>,
+ <&dispcc0 MDSS_DISP_CC_MDSS_MDP_LUT_CLK>,
+ <&dispcc0 MDSS_DISP_CC_MDSS_MDP_CLK>,
+ <&dispcc0 MDSS_DISP_CC_MDSS_VSYNC_CLK>;
+ clock-names = "bus",
+ "iface",
+ "lut",
+ "core",
+ "vsync";
+
+ assigned-clocks = <&dispcc0 MDSS_DISP_CC_MDSS_VSYNC_CLK>;
+ assigned-clock-rates = <19200000>;
+
+ operating-points-v2 = <&mdss0_mdp_opp_table>;
+ power-domains = <&rpmhpd SA8775P_MMCX>;
+
+ interrupt-parent = <&mdss0>;
+ interrupts = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dpu_intf0_out: endpoint {
+ remote-endpoint = <&mdss0_dp0_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ dpu_intf4_out: endpoint {
+ remote-endpoint = <&mdss0_dp1_in>;
+ };
+ };
+ };
+
+ mdss0_mdp_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-375000000 {
+ opp-hz = /bits/ 64 <375000000>;
+ required-opps = <&rpmhpd_opp_svs_l1>;
+ };
+
+ opp-500000000 {
+ opp-hz = /bits/ 64 <500000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ };
+
+ opp-575000000 {
+ opp-hz = /bits/ 64 <575000000>;
+ required-opps = <&rpmhpd_opp_turbo>;
+ };
+
+ opp-650000000 {
+ opp-hz = /bits/ 64 <650000000>;
+ required-opps = <&rpmhpd_opp_turbo_l1>;
+ };
+ };
+ };
+
+ mdss0_dp0_phy: phy@aec2a00 {
+ compatible = "qcom,sa8775p-edp-phy";
+
+ reg = <0x0 0x0aec2a00 0x0 0x200>,
+ <0x0 0x0aec2200 0x0 0xd0>,
+ <0x0 0x0aec2600 0x0 0xd0>,
+ <0x0 0x0aec2000 0x0 0x1c8>;
+
+ clocks = <&dispcc0 MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK>,
+ <&dispcc0 MDSS_DISP_CC_MDSS_AHB_CLK>;
+ clock-names = "aux",
+ "cfg_ahb";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
+ mdss0_dp1_phy: phy@aec5a00 {
+ compatible = "qcom,sa8775p-edp-phy";
+
+ reg = <0x0 0x0aec5a00 0x0 0x200>,
+ <0x0 0x0aec5200 0x0 0xd0>,
+ <0x0 0x0aec5600 0x0 0xd0>,
+ <0x0 0x0aec5000 0x0 0x1c8>;
+
+ clocks = <&dispcc0 MDSS_DISP_CC_MDSS_DPTX1_AUX_CLK>,
+ <&dispcc0 MDSS_DISP_CC_MDSS_AHB_CLK>;
+ clock-names = "aux",
+ "cfg_ahb";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
+ mdss0_dp0: displayport-controller@af54000 {
+ compatible = "qcom,sa8775p-dp";
+
+ reg = <0x0 0x0af54000 0x0 0x104>,
+ <0x0 0x0af54200 0x0 0x0c0>,
+ <0x0 0x0af55000 0x0 0x770>,
+ <0x0 0x0af56000 0x0 0x09c>,
+ <0x0 0x0af57000 0x0 0x09c>;
+
+ interrupt-parent = <&mdss0>;
+ interrupts = <12>;
+
+ clocks = <&dispcc0 MDSS_DISP_CC_MDSS_AHB_CLK>,
+ <&dispcc0 MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK>,
+ <&dispcc0 MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK>,
+ <&dispcc0 MDSS_DISP_CC_MDSS_DPTX0_LINK_INTF_CLK>,
+ <&dispcc0 MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK>;
+ clock-names = "core_iface",
+ "core_aux",
+ "ctrl_link",
+ "ctrl_link_iface",
+ "stream_pixel";
+ assigned-clocks = <&dispcc0 MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK_SRC>,
+ <&dispcc0 MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC>;
+ assigned-clock-parents = <&mdss0_dp0_phy 0>, <&mdss0_dp0_phy 1>;
+ phys = <&mdss0_dp0_phy>;
+ phy-names = "dp";
+
+ operating-points-v2 = <&dp_opp_table>;
+ power-domains = <&rpmhpd SA8775P_MMCX>;
+
+ #sound-dai-cells = <0>;
+
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ mdss0_dp0_in: endpoint {
+ remote-endpoint = <&dpu_intf0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ mdss0_dp0_out: endpoint { };
+ };
+ };
+
+ dp_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-160000000 {
+ opp-hz = /bits/ 64 <160000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-270000000 {
+ opp-hz = /bits/ 64 <270000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+
+ opp-540000000 {
+ opp-hz = /bits/ 64 <540000000>;
+ required-opps = <&rpmhpd_opp_svs_l1>;
+ };
+
+ opp-810000000 {
+ opp-hz = /bits/ 64 <810000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ };
+ };
+ };
+
+ mdss0_dp1: displayport-controller@af5c000 {
+ compatible = "qcom,sa8775p-dp";
+
+ reg = <0x0 0x0af5c000 0x0 0x104>,
+ <0x0 0x0af5c200 0x0 0x0c0>,
+ <0x0 0x0af5d000 0x0 0x770>,
+ <0x0 0x0af5e000 0x0 0x09c>,
+ <0x0 0x0af5f000 0x0 0x09c>;
+
+ interrupt-parent = <&mdss0>;
+ interrupts = <13>;
+
+ clocks = <&dispcc0 MDSS_DISP_CC_MDSS_AHB_CLK>,
+ <&dispcc0 MDSS_DISP_CC_MDSS_DPTX1_AUX_CLK>,
+ <&dispcc0 MDSS_DISP_CC_MDSS_DPTX1_LINK_CLK>,
+ <&dispcc0 MDSS_DISP_CC_MDSS_DPTX1_LINK_INTF_CLK>,
+ <&dispcc0 MDSS_DISP_CC_MDSS_DPTX1_PIXEL0_CLK>;
+ clock-names = "core_iface",
+ "core_aux",
+ "ctrl_link",
+ "ctrl_link_iface",
+ "stream_pixel";
+ assigned-clocks = <&dispcc0 MDSS_DISP_CC_MDSS_DPTX1_LINK_CLK_SRC>,
+ <&dispcc0 MDSS_DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC>;
+ assigned-clock-parents = <&mdss0_dp1_phy 0>, <&mdss0_dp1_phy 1>;
+ phys = <&mdss0_dp1_phy>;
+ phy-names = "dp";
+
+ operating-points-v2 = <&dp1_opp_table>;
+ power-domains = <&rpmhpd SA8775P_MMCX>;
+
+ #sound-dai-cells = <0>;
+
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ mdss0_dp1_in: endpoint {
+ remote-endpoint = <&dpu_intf4_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ mdss0_dp1_out: endpoint { };
+ };
+ };
+
+ dp1_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-160000000 {
+ opp-hz = /bits/ 64 <160000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-270000000 {
+ opp-hz = /bits/ 64 <270000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+
+ opp-540000000 {
+ opp-hz = /bits/ 64 <540000000>;
+ required-opps = <&rpmhpd_opp_svs_l1>;
+ };
+
+ opp-810000000 {
+ opp-hz = /bits/ 64 <810000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ };
+ };
+ };
+ };
+
+ dispcc0: clock-controller@af00000 {
+ compatible = "qcom,sa8775p-dispcc0";
+ reg = <0x0 0x0af00000 0x0 0x20000>;
+ clocks = <&gcc GCC_DISP_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&rpmhcc RPMH_CXO_CLK_A>,
+ <&sleep_clk>,
+ <&mdss0_dp0_phy 0>, <&mdss0_dp0_phy 1>,
+ <&mdss0_dp1_phy 0>, <&mdss0_dp1_phy 1>,
+ <0>, <0>, <0>, <0>;
+ power-domains = <&rpmhpd SA8775P_MMCX>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
pdc: interrupt-controller@b220000 {
compatible = "qcom,sa8775p-pdc", "qcom,pdc";
reg = <0x0 0x0b220000 0x0 0x30000>,
@@ -4382,6 +4752,22 @@
};
};
+ dispcc1: clock-controller@22100000 {
+ compatible = "qcom,sa8775p-dispcc1";
+ reg = <0x0 0x22100000 0x0 0x20000>;
+ clocks = <&gcc GCC_DISP_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&rpmhcc RPMH_CXO_CLK_A>,
+ <&sleep_clk>,
+ <0>, <0>, <0>, <0>,
+ <0>, <0>, <0>, <0>;
+ power-domains = <&rpmhpd SA8775P_MMCX>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ status = "disabled";
+ };
+
ethernet1: ethernet@23000000 {
compatible = "qcom,sa8775p-ethqos";
reg = <0x0 0x23000000 0x0 0x10000>,
diff --git a/arch/arm64/boot/dts/qcom/sar2130p-qar2130p.dts b/arch/arm64/boot/dts/qcom/sar2130p-qar2130p.dts
new file mode 100644
index 000000000000..74778a5b19ba
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sar2130p-qar2130p.dts
@@ -0,0 +1,558 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2024, Linaro Limited
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include "sar2130p.dtsi"
+#include "pm8150.dtsi"
+
+/ {
+ model = "Qualcomm Snapdragon AR2 Gen1 Smart Viewer Development Kit";
+ compatible = "qcom,qar2130p", "qcom,sar2130p";
+ chassis-type = "embedded";
+
+ aliases {
+ serial0 = &uart11;
+ serial1 = &uart7;
+ i2c0 = &i2c8;
+ i2c1 = &i2c10;
+ mmc1 = &sdhc_1;
+ spi0 = &spi0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ vph_pwr: regulator-vph-pwr {
+ compatible = "regulator-fixed";
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+ regulator-always-on;
+ };
+
+ /* pm3003a on I2C0, should not be controlled */
+ vreg_ext_1p3: regulator-ext-1p3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vph_ext_1p3";
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-always-on;
+ vin-supply = <&vph_pwr>;
+ };
+
+ /* EBI rail, used as LDO input, can not be part of PMIC config */
+ vreg_s10a_0p89: regulator-s10a-0p89 {
+ compatible = "regulator-fixed";
+ regulator-name = "vph_s10a_0p89";
+ regulator-min-microvolt = <890000>;
+ regulator-max-microvolt = <890000>;
+ regulator-always-on;
+ vin-supply = <&vph_pwr>;
+ };
+
+ thermal-zones {
+ sar2130p-thermal {
+ thermal-sensors = <&pm8150_adc_tm 1>;
+
+ trips {
+ active-config0 {
+ temperature = <100000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ wifi-thermal {
+ thermal-sensors = <&pm8150_adc_tm 2>;
+
+ trips {
+ active-config0 {
+ temperature = <52000>;
+ hysteresis = <4000>;
+ type = "passive";
+ };
+ };
+ };
+
+ xo-thermal {
+ thermal-sensors = <&pm8150_adc_tm 0>;
+
+ trips {
+ active-config0 {
+ temperature = <50000>;
+ hysteresis = <4000>;
+ type = "passive";
+ };
+ };
+ };
+ };
+
+ wcn7850-pmu {
+ compatible = "qcom,wcn7850-pmu";
+
+ pinctrl-0 = <&wlan_en_state>, <&bt_en_state>;
+ pinctrl-names = "default";
+
+ wlan-enable-gpios = <&tlmm 45 GPIO_ACTIVE_HIGH>;
+ bt-enable-gpios = <&tlmm 46 GPIO_ACTIVE_HIGH>;
+
+ vdd-supply = <&vreg_s4a_0p95>;
+ vddio-supply = <&vreg_l15a_1p8>;
+ vddaon-supply = <&vreg_s4a_0p95>;
+ vdddig-supply = <&vreg_s4a_0p95>;
+ vddrfa1p2-supply = <&vreg_s4a_0p95>;
+ vddrfa1p8-supply = <&vreg_s5a_1p88>;
+
+ regulators {
+ vreg_pmu_rfa_cmn: ldo0 {
+ regulator-name = "vreg_pmu_rfa_cmn";
+ };
+
+ vreg_pmu_aon_0p59: ldo1 {
+ regulator-name = "vreg_pmu_aon_0p59";
+ };
+
+ vreg_pmu_wlcx_0p8: ldo2 {
+ regulator-name = "vreg_pmu_wlcx_0p8";
+ };
+
+ vreg_pmu_wlmx_0p85: ldo3 {
+ regulator-name = "vreg_pmu_wlmx_0p85";
+ };
+
+ vreg_pmu_btcmx_0p85: ldo4 {
+ regulator-name = "vreg_pmu_btcmx_0p85";
+ };
+
+ vreg_pmu_rfa_0p8: ldo5 {
+ regulator-name = "vreg_pmu_rfa_0p8";
+ };
+
+ vreg_pmu_rfa_1p2: ldo6 {
+ regulator-name = "vreg_pmu_rfa_1p2";
+ };
+
+ vreg_pmu_rfa_1p8: ldo7 {
+ regulator-name = "vreg_pmu_rfa_1p8";
+ };
+
+ vreg_pmu_pcie_0p9: ldo8 {
+ regulator-name = "vreg_pmu_pcie_0p9";
+ };
+
+ vreg_pmu_pcie_1p8: ldo9 {
+ regulator-name = "vreg_pmu_pcie_1p8";
+ };
+ };
+ };
+};
+
+&apps_rsc {
+ regulators-0 {
+ compatible = "qcom,pm8150-rpmh-regulators";
+ qcom,pmic-id = "a";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+ vdd-s5-supply = <&vph_pwr>;
+ vdd-s6-supply = <&vph_pwr>;
+ vdd-s7-supply = <&vph_pwr>;
+ vdd-s8-supply = <&vph_pwr>;
+ vdd-s9-supply = <&vph_pwr>;
+ vdd-s10-supply = <&vph_pwr>;
+ vdd-l1-l8-l11-supply = <&vreg_s4a_0p95>;
+ vdd-l3-l4-l5-l18-supply = <&vreg_ext_1p3>;
+ vdd-l6-l9-supply = <&vreg_s10a_0p89>;
+ vdd-l7-l12-l14-l15-supply = <&vreg_s5a_1p88>;
+
+ vreg_s4a_0p95: smps6 {
+ regulator-name = "vreg_s4a_0p95";
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <1170000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s5a_1p88: smps5 {
+ regulator-name = "vreg_s5a_1p88";
+ regulator-min-microvolt = <1856000>;
+ regulator-max-microvolt = <2040000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1a_0p91: ldo1 {
+ regulator-name = "vreg_l1a_0p91";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2a_3p1: ldo2 {
+ regulator-name = "vreg_l2a_3p1";
+ regulator-min-microvolt = <3080000>;
+ regulator-max-microvolt = <3544000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3a_1p2: ldo3 {
+ regulator-name = "vreg_l3a_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1304000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ /* ldo4 1.26 - system ? */
+
+ vreg_l5a_1p13: ldo5 {
+ regulator-name = "vreg_l5a_1p13";
+ regulator-min-microvolt = <1128000>;
+ regulator-max-microvolt = <1170000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6a_0p6: ldo6 {
+ regulator-name = "vreg_l6a_0p6";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <650000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7a_1p8: ldo7 {
+ regulator-name = "vreg_l7a_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1950000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8a_0p88: ldo8 {
+ regulator-name = "vreg_l8a_0p88";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <950000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ /* ldo9 - LCX */
+
+ vreg_l10a_2p95: ldo10 {
+ regulator-name = "vreg_l10a_2p95";
+ regulator-min-microvolt = <2952000>;
+ regulator-max-microvolt = <3544000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ /* ldo11 - LMX */
+
+ vreg_l12a_1p8: ldo12 {
+ regulator-name = "vreg_l12a_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ /* no ldo13 */
+
+ vreg_l14a_1p8: ldo14 {
+ regulator-name = "vreg_l14a_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l15a_1p8: ldo15 {
+ regulator-name = "vreg_l15a_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ /* no ldo16 - system */
+
+ vreg_l17a_3p26: ldo17 {
+ regulator-name = "vreg_l17a_3p26";
+ regulator-min-microvolt = <3200000>;
+ regulator-max-microvolt = <3544000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l18a_1p2: ldo18 {
+ regulator-name = "vreg_l18a_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1304000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+};
+
+&gpi_dma0 {
+ status = "okay";
+};
+
+&gpi_dma1 {
+ status = "okay";
+};
+
+&gpu {
+ status = "okay";
+};
+
+&gpu_zap_shader {
+ firmware-name = "qcom/sar2130p/a620_zap.mbn";
+};
+
+&pon_pwrkey {
+ status = "okay";
+};
+
+&pon_resin {
+ linux,code = <KEY_VOLUMEDOWN>;
+
+ status = "okay";
+};
+
+&qupv3_id_0 {
+ status = "okay";
+};
+
+&qupv3_id_1 {
+ status = "okay";
+};
+
+&i2c4 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+};
+
+&i2c8 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ ptn3222: redriver@4f {
+ compatible = "nxp,ptn3222";
+ reg = <0x4f>;
+
+ reset-gpios = <&tlmm 99 GPIO_ACTIVE_LOW>;
+
+ vdd3v3-supply = <&vreg_l2a_3p1>;
+ vdd1v8-supply = <&vreg_l15a_1p8>;
+
+ #phy-cells = <0>;
+ };
+};
+
+&i2c10 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+};
+
+&pcie0 {
+ perst-gpios = <&tlmm 55 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 57 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-0 = <&pcie0_default_state>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&pcieport0 {
+ wifi@0 {
+ compatible = "pci17cb,1107";
+ reg = <0x10000 0x0 0x0 0x0 0x0>;
+
+ vddaon-supply = <&vreg_pmu_aon_0p59>;
+ vddwlcx-supply = <&vreg_pmu_wlcx_0p8>;
+ vddwlmx-supply = <&vreg_pmu_wlmx_0p85>;
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ vddrfa1p8-supply = <&vreg_pmu_rfa_1p8>;
+ vddpcie0p9-supply = <&vreg_pmu_pcie_0p9>;
+ vddpcie1p8-supply = <&vreg_pmu_pcie_1p8>;
+ };
+};
+
+&pcie0_phy {
+ vdda-phy-supply = <&vreg_l8a_0p88>;
+ vdda-pll-supply = <&vreg_l3a_1p2>;
+
+ status = "okay";
+};
+
+&pm8150_adc {
+ channel@4c {
+ reg = <ADC5_XO_THERM_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ label = "xo_therm";
+ };
+
+ channel@4d {
+ reg = <ADC5_AMUX_THM1_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ qcom,pre-scaling = <1 1>;
+ label = "skin_therm";
+ };
+
+ channel@4e {
+ /* msm-5.10 uses ADC5_AMUX_THM2 / 0x0e, although there is a pullup */
+ reg = <ADC5_AMUX_THM2_100K_PU>;
+ qcom,hw-settle-time = <200>;
+ qcom,pre-scaling = <1 1>;
+ label = "wifi_therm";
+ };
+};
+
+&pm8150_adc_tm {
+ status = "okay";
+
+ xo-therm@0 {
+ reg = <0>;
+ io-channels = <&pm8150_adc ADC5_XO_THERM_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+
+ skin-therm@1 {
+ reg = <1>;
+ io-channels = <&pm8150_adc ADC5_AMUX_THM1_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+
+ wifi-therm@2 {
+ reg = <2>;
+ /* msm-5.10 uses ADC5_AMUX_THM2, although there is a pullup */
+ io-channels = <&pm8150_adc ADC5_AMUX_THM2_100K_PU>;
+ qcom,hw-settle-time-us = <200>;
+ };
+};
+
+&remoteproc_adsp {
+ firmware-name = "qcom/sar2130p/adsp.mbn";
+
+ status = "okay";
+};
+
+&sdhc_1 {
+ vmmc-supply = <&vreg_l10a_2p95>;
+ vqmmc-supply = <&vreg_l7a_1p8>;
+
+ status = "okay";
+};
+
+&tlmm {
+ bt_en_state: bt-enable-state {
+ pins = "gpio46";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ pcie0_default_state: pcie0-default-state {
+ perst-pins {
+ pins = "gpio55";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ clkreq-pins {
+ pins = "gpio56";
+ function = "pcie0_clkreqn";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ wake-pins {
+ pins = "gpio57";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie1_default_state: pcie1-default-state {
+ perst-pins {
+ pins = "gpio58";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ clkreq-pins {
+ pins = "gpio59";
+ function = "pcie1_clkreqn";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ wake-pins {
+ pins = "gpio60";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ wlan_en_state: wlan-enable-state {
+ pins = "gpio45";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ };
+};
+
+&uart7 {
+ status = "okay";
+
+ bluetooth {
+ compatible = "qcom,wcn7850-bt";
+
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn>;
+ vddaon-supply = <&vreg_pmu_aon_0p59>;
+ vddwlcx-supply = <&vreg_pmu_wlcx_0p8>;
+ vddwlmx-supply = <&vreg_pmu_wlmx_0p85>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ vddrfa1p8-supply = <&vreg_pmu_rfa_1p8>;
+
+ max-speed = <3200000>;
+ };
+};
+
+&uart11 {
+ status = "okay";
+};
+
+&usb_1 {
+ status = "okay";
+};
+
+&usb_1_hsphy {
+ vdd-supply = <&vreg_l8a_0p88>;
+ vdda12-supply = <&vreg_l3a_1p2>;
+
+ phys = <&ptn3222>;
+
+ status = "okay";
+};
+
+&usb_dp_qmpphy {
+ vdda-phy-supply = <&vreg_l3a_1p2>;
+ vdda-pll-supply = <&vreg_l1a_0p91>;
+
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/sar2130p.dtsi b/arch/arm64/boot/dts/qcom/sar2130p.dtsi
new file mode 100644
index 000000000000..dd832e6816be
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sar2130p.dtsi
@@ -0,0 +1,3123 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2024, Linaro Limited
+ */
+
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/clock/qcom,sar2130p-gcc.h>
+#include <dt-bindings/clock/qcom,sar2130p-gpucc.h>
+#include <dt-bindings/clock/qcom,sm8550-tcsr.h>
+#include <dt-bindings/dma/qcom-gpi.h>
+#include <dt-bindings/interconnect/qcom,icc.h>
+#include <dt-bindings/interconnect/qcom,sar2130p-rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/mailbox/qcom-ipcc.h>
+#include <dt-bindings/phy/phy-qcom-qmp.h>
+#include <dt-bindings/power/qcom-rpmpd.h>
+#include <dt-bindings/power/qcom,rpmhpd.h>
+#include <dt-bindings/soc/qcom,gpr.h>
+#include <dt-bindings/soc/qcom,rpmh-rsc.h>
+#include <dt-bindings/thermal/thermal.h>
+
+/ {
+ interrupt-parent = <&intc>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ clocks {
+ xo_board: xo-board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <19200000>;
+ };
+
+ sleep_clk: sleep-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32764>;
+ };
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x0>;
+ clocks = <&cpufreq_hw 0>;
+ enable-method = "psci";
+ next-level-cache = <&l2_0>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
+ power-domains = <&cpu_pd0>;
+ power-domain-names = "psci";
+ #cooling-cells = <2>;
+
+ l2_0: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_0>;
+
+ l3_0: l3-cache {
+ compatible = "cache";
+ cache-level = <3>;
+ cache-unified;
+ };
+ };
+ };
+
+ cpu1: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x100>;
+ clocks = <&cpufreq_hw 0>;
+ enable-method = "psci";
+ next-level-cache = <&l2_100>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
+ power-domains = <&cpu_pd1>;
+ power-domain-names = "psci";
+ #cooling-cells = <2>;
+
+ l2_100: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_0>;
+ };
+ };
+
+ cpu2: cpu@200 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x200>;
+ clocks = <&cpufreq_hw 0>;
+ enable-method = "psci";
+ next-level-cache = <&l2_200>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
+ power-domains = <&cpu_pd2>;
+ power-domain-names = "psci";
+ #cooling-cells = <2>;
+
+ l2_200: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_0>;
+ };
+ };
+
+ cpu3: cpu@300 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x300>;
+ clocks = <&cpufreq_hw 0>;
+ enable-method = "psci";
+ next-level-cache = <&l2_300>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
+ power-domains = <&cpu_pd3>;
+ power-domain-names = "psci";
+ #cooling-cells = <2>;
+
+ l2_300: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ next-level-cache = <&l3_0>;
+ };
+ };
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+
+ core1 {
+ cpu = <&cpu1>;
+ };
+
+ core2 {
+ cpu = <&cpu2>;
+ };
+
+ core3 {
+ cpu = <&cpu3>;
+ };
+ };
+ };
+
+ idle-states {
+ entry-method = "psci";
+
+ cpu_sleep_0: cpu-sleep-0-0 {
+ compatible = "arm,idle-state";
+ idle-state-name = "silver-power-collapse";
+ arm,psci-suspend-param = <0x40000003>;
+ entry-latency-us = <549>;
+ exit-latency-us = <901>;
+ min-residency-us = <1774>;
+ local-timer-stop;
+ };
+
+ cpu_sleep_1: cpu-sleep-0-1 {
+ compatible = "arm,idle-state";
+ idle-state-name = "silver-rail-power-collapse";
+ arm,psci-suspend-param = <0x40000004>;
+ entry-latency-us = <702>;
+ exit-latency-us = <915>;
+ min-residency-us = <4001>;
+ local-timer-stop;
+ };
+ };
+
+ domain-idle-states {
+ cluster_sleep_0: cluster-sleep-0 {
+ compatible = "domain-idle-state";
+ arm,psci-suspend-param = <0x41000044>;
+ entry-latency-us = <2752>;
+ exit-latency-us = <3048>;
+ min-residency-us = <6118>;
+ };
+
+ cluster_sleep_1: cluster-sleep-1 {
+ compatible = "domain-idle-state";
+ arm,psci-suspend-param = <0x41002344>;
+ entry-latency-us = <3263>;
+ exit-latency-us = <4562>;
+ min-residency-us = <8467>;
+ };
+
+ cluster_sleep_2: cluster-sleep-2 {
+ compatible = "domain-idle-state";
+ arm,psci-suspend-param = <0x4100c344>;
+ entry-latency-us = <3638>;
+ exit-latency-us = <6562>;
+ min-residency-us = <9862>;
+ };
+ };
+ };
+
+ firmware {
+ scm: scm {
+ compatible = "qcom,scm-sar2130p", "qcom,scm";
+ qcom,dload-mode = <&tcsr_mutex 0x13000>;
+ interconnects = <&system_noc MASTER_CRYPTO QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ };
+ };
+
+ clk_virt: interconnect-0 {
+ compatible = "qcom,sar2130p-clk-virt";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ mc_virt: interconnect-1 {
+ compatible = "qcom,sar2130p-mc-virt";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the size */
+ reg = <0x0 0x80000000 0x0 0x0>;
+ };
+
+ pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+
+ cpu_pd0: power-domain-cpu0 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ domain-idle-states = <&cpu_sleep_0>, <&cpu_sleep_1>;
+ };
+
+ cpu_pd1: power-domain-cpu1 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ domain-idle-states = <&cpu_sleep_0>, <&cpu_sleep_1>;
+ };
+
+ cpu_pd2: power-domain-cpu2 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ domain-idle-states = <&cpu_sleep_0>, <&cpu_sleep_1>;
+ };
+
+ cpu_pd3: power-domain-cpu3 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ domain-idle-states = <&cpu_sleep_0>, <&cpu_sleep_1>;
+ };
+
+ cluster_pd: power-domain-cpu-cluster0 {
+ #power-domain-cells = <0>;
+ domain-idle-states = <&cluster_sleep_0>, <&cluster_sleep_1>, <&cluster_sleep_2>;
+ };
+ };
+
+ reserved_memory: reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ hyp_mem: hyp@80000000 {
+ reg = <0x0 0x80000000 0x0 0x600000>;
+ no-map;
+ };
+
+ xbl_dt_log_mem: xbl-dt-log@80600000 {
+ reg = <0x0 0x80600000 0x0 0x40000>;
+ no-map;
+ };
+
+ xbl_ramdump_mem: xbl-ramdump@80640000 {
+ reg = <0x0 0x80640000 0x0 0x1c0000>;
+ no-map;
+ };
+
+ aop_image_mem: aop-image@80800000 {
+ reg = <0x0 0x80800000 0x0 0x60000>;
+ no-map;
+ };
+
+ aop_cmd_db_mem: aop-cmd-db@80860000 {
+ compatible = "qcom,cmd-db";
+ reg = <0x0 0x80860000 0x0 0x20000>;
+ no-map;
+ };
+
+ aop_config_mem: aop-config@80880000 {
+ reg = <0x0 0x80880000 0x0 0x20000>;
+ no-map;
+ };
+
+ tme_crash_dump_mem: tme-crash-dump@808a0000 {
+ reg = <0x0 0x808a0000 0x0 0x40000>;
+ no-map;
+ };
+
+ tme_log_mem: tme-log@808e0000 {
+ reg = <0x0 0x808e0000 0x0 0x4000>;
+ no-map;
+ };
+
+ uefi_log_mem: uefi-log@808e4000 {
+ reg = <0x0 0x808e4000 0x0 0x10000>;
+ no-map;
+ };
+
+ secdata_apss_mem: secdata-apss@808ff000 {
+ reg = <0x0 0x808ff000 0x0 0x1000>;
+ no-map;
+ };
+
+ smem: smem@80900000 {
+ compatible = "qcom,smem";
+ reg = <0x0 0x80900000 0x0 0x200000>;
+ hwlocks = <&tcsr_mutex 3>;
+ no-map;
+ };
+
+ cpucp_fw_mem: cpucp-fw@80b00000 {
+ reg = <0x0 0x80b00000 0x0 0x100000>;
+ no-map;
+ };
+
+ helios_ram_dump_mem: helios-ram-dump@80c00000 {
+ reg = <0x0 0x80c00000 0x0 0xe00000>;
+ no-map;
+ };
+
+ camera_mem: camera@84e00000 {
+ reg = <0x0 0x84e00000 0x0 0x800000>;
+ no-map;
+ };
+
+ video_mem: video@86f00000 {
+ reg = <0x0 0x86f00000 0x0 0x500000>;
+ no-map;
+ };
+
+ adsp_mem: adsp@87600000 {
+ reg = <0x0 0x87600000 0x0 0x1e00000>;
+ no-map;
+ };
+
+ cdsp_mem: cdsp@89400000 {
+ reg = <0x0 0x89400000 0x0 0xf00000>;
+ no-map;
+ };
+
+ ipa_fw_mem: ipa-fw@8a300000 {
+ reg = <0x0 0x8a300000 0x0 0x10000>;
+ no-map;
+ };
+
+ ipa_gsi_mem: ipa-gsi@8a3a0000 {
+ reg = <0x0 0x8a310000 0x0 0xa000>;
+ no-map;
+ };
+
+ gpu_micro_code_mem: gpu-micro-code@8a31a000 {
+ reg = <0x0 0x8a31a000 0x0 0x2000>;
+ no-map;
+ };
+
+ cvp_mem: cvp@8a400000 {
+ reg = <0x0 0x8a400000 0x0 0x700000>;
+ no-map;
+ };
+
+ xbl_sc_mem: xbl-sc@a6e00000 {
+ no-map;
+ reg = <0x0 0xa6e00000 0x0 0x40000>;
+ };
+
+ global_sync_mem: global-sync@a6f00000 {
+ no-map;
+ reg = <0x0 0xa6f00000 0x0 0x100000>;
+ };
+
+ tz_stat_mem: tz-stat@e8800000 {
+ no-map;
+ reg = <0x0 0xe8800000 0x0 0x100000>;
+ };
+
+ tags_mem: tags@e8900000 {
+ no-map;
+ reg = <0x0 0xe8900000 0x0 0x500000>;
+ };
+
+ qtee_mem: qtee@e8e00000 {
+ no-map;
+ reg = <0x0 0xe8e00000 0x0 0x500000>;
+ };
+
+ trusted_apps_mem: trusted-apps@e9300000 {
+ no-map;
+ reg = <0x0 0xe9300000 0x0 0xc00000>;
+ };
+ };
+
+ smp2p-adsp {
+ compatible = "qcom,smp2p";
+ qcom,smem = <443>, <429>;
+ interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_SMP2P
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_SMP2P>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <2>;
+
+ smp2p_adsp_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ smp2p_adsp_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smp2p-cdsp {
+ compatible = "qcom,smp2p";
+ qcom,smem = <94>, <432>;
+ interrupts-extended = <&ipcc IPCC_CLIENT_CDSP
+ IPCC_MPROC_SIGNAL_SMP2P
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_CDSP
+ IPCC_MPROC_SIGNAL_SMP2P>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <5>;
+
+ smp2p_cdsp_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ smp2p_cdsp_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ soc: soc@0 {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0 0 0 0 0x10 0>;
+ dma-ranges = <0 0 0 0 0x10 0>;
+
+ gcc: clock-controller@100000 {
+ compatible = "qcom,sar2130p-gcc";
+ reg = <0x0 0x00100000 0x0 0x1f4200>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&sleep_clk>,
+ <&pcie0_phy>,
+ <&pcie1_phy>,
+ <&usb_dp_qmpphy QMP_USB43DP_USB3_PIPE_CLK>;
+ };
+
+ sdhc_1: mmc@7c4000 {
+ compatible = "qcom,sar2130p-sdhci", "qcom,sdhci-msm-v5";
+ reg = <0x0 0x007c4000 0x0 0x1000>,
+ <0x0 0x007c5000 0x0 0x1000>;
+ reg-names = "hc", "cqhci";
+
+ iommus = <&apps_smmu 0x160 0x0>;
+ interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ clocks = <&gcc GCC_SDCC1_AHB_CLK>,
+ <&gcc GCC_SDCC1_APPS_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface", "core", "xo";
+ interconnects = <&system_noc MASTER_SDCC_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_SDCC_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "sdhc-ddr","cpu-sdhc";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&sdhc1_opp_table>;
+
+ pinctrl-0 = <&sdc1_default>;
+ pinctrl-1 = <&sdc1_sleep>;
+ pinctrl-names = "default", "sleep";
+
+ bus-width = <8>;
+ non-removable;
+ supports-cqe;
+
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+
+ status = "disabled";
+
+ sdhc1_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-100000000 {
+ opp-hz = /bits/ 64 <100000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ opp-peak-kBps = <500000 200000>;
+ opp-avg-kBps = <104000 0>;
+ };
+
+ opp-384000000 {
+ opp-hz = /bits/ 64 <384000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ opp-peak-kBps = <2500000 1000000>;
+ opp-avg-kBps = <400000 0>;
+ };
+ };
+ };
+
+ gpi_dma0: dma-controller@900000 {
+ compatible = "qcom,sar2130p-gpi-dma", "qcom,sm6350-gpi-dma";
+ reg = <0x0 0x00900000 0x0 0x60000>;
+ interrupts = <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 250 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 251 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 253 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 255 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <3>;
+ dma-channels = <12>;
+ dma-channel-mask = <0x7e>;
+ iommus = <&apps_smmu 0x76 0x0>;
+
+ status = "disabled";
+ };
+
+ qupv3_id_0: geniqup@9c0000 {
+ compatible = "qcom,geni-se-qup";
+ reg = <0x0 0x009c0000 0x0 0x2000>;
+ clock-names = "m-ahb", "s-ahb";
+ clocks = <&gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ iommus = <&apps_smmu 0x63 0x0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ status = "disabled";
+
+ i2c0: i2c@980000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00980000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
+ pinctrl-0 = <&qup_i2c0_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 601 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_A2NOC_SNOC QCOM_ICC_TAG_ALWAYS
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma0 0 0 QCOM_GPI_I2C>,
+ <&gpi_dma0 1 0 QCOM_GPI_I2C>;
+ dma-names = "tx", "rx";
+
+ status = "disabled";
+ };
+
+ spi0: spi@980000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00980000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
+ interrupts = <GIC_SPI 601 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&qup_spi0_data_clk>, <&qup_spi0_cs0>;
+ pinctrl-names = "default";
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_A2NOC_SNOC QCOM_ICC_TAG_ALWAYS
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma0 0 0 QCOM_GPI_SPI>,
+ <&gpi_dma0 1 0 QCOM_GPI_SPI>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c1: i2c@984000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00984000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>;
+ pinctrl-0 = <&qup_i2c1_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_A2NOC_SNOC QCOM_ICC_TAG_ALWAYS
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma0 0 1 QCOM_GPI_I2C>,
+ <&gpi_dma0 1 1 QCOM_GPI_I2C>;
+ dma-names = "tx", "rx";
+
+ status = "disabled";
+ };
+
+ spi1: spi@984000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00984000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>;
+ interrupts = <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&qup_spi1_data_clk>, <&qup_spi1_cs>;
+ pinctrl-names = "default";
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_A2NOC_SNOC QCOM_ICC_TAG_ALWAYS
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma0 0 1 QCOM_GPI_SPI>,
+ <&gpi_dma0 1 1 QCOM_GPI_SPI>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c2: i2c@988000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00988000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
+ pinctrl-0 = <&qup_i2c2_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_A2NOC_SNOC QCOM_ICC_TAG_ALWAYS
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma0 0 2 QCOM_GPI_I2C>,
+ <&gpi_dma0 1 2 QCOM_GPI_I2C>;
+ dma-names = "tx", "rx";
+
+ status = "disabled";
+ };
+
+ spi2: spi@988000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00988000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
+ interrupts = <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&qup_spi2_data_clk>, <&qup_spi2_cs>;
+ pinctrl-names = "default";
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_A2NOC_SNOC QCOM_ICC_TAG_ALWAYS
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma0 0 2 QCOM_GPI_SPI>,
+ <&gpi_dma0 1 2 QCOM_GPI_SPI>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+
+ i2c3: i2c@98c000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x0098c000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
+ pinctrl-0 = <&qup_i2c3_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_A2NOC_SNOC QCOM_ICC_TAG_ALWAYS
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma0 0 3 QCOM_GPI_I2C>,
+ <&gpi_dma0 1 3 QCOM_GPI_I2C>;
+ dma-names = "tx", "rx";
+
+ status = "disabled";
+ };
+
+ spi3: spi@98c000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x0098c000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
+ interrupts = <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&qup_spi3_data_clk>, <&qup_spi3_cs0>;
+ pinctrl-names = "default";
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_A2NOC_SNOC QCOM_ICC_TAG_ALWAYS
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma0 0 3 QCOM_GPI_SPI>,
+ <&gpi_dma0 1 3 QCOM_GPI_SPI>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c4: i2c@990000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00990000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>;
+ pinctrl-0 = <&qup_i2c4_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 605 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_A2NOC_SNOC QCOM_ICC_TAG_ALWAYS
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma0 0 4 QCOM_GPI_I2C>,
+ <&gpi_dma0 1 4 QCOM_GPI_I2C>;
+ dma-names = "tx", "rx";
+
+ status = "disabled";
+ };
+
+ spi4: spi@990000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00990000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>;
+ interrupts = <GIC_SPI 605 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&qup_spi4_data_clk>, <&qup_spi4_cs0>;
+ pinctrl-names = "default";
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_A2NOC_SNOC QCOM_ICC_TAG_ALWAYS
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma0 0 4 QCOM_GPI_SPI>,
+ <&gpi_dma0 1 4 QCOM_GPI_SPI>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c5: i2c@994000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00994000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>;
+ pinctrl-0 = <&qup_i2c5_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 606 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_A2NOC_SNOC QCOM_ICC_TAG_ALWAYS
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma0 0 5 QCOM_GPI_I2C>,
+ <&gpi_dma0 1 5 QCOM_GPI_I2C>;
+ dma-names = "tx", "rx";
+
+ status = "disabled";
+ };
+
+ spi5: spi@994000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00994000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>;
+ interrupts = <GIC_SPI 606 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&qup_spi5_data_clk>, <&qup_spi5_cs>;
+ pinctrl-names = "default";
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_A2NOC_SNOC QCOM_ICC_TAG_ALWAYS
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma0 0 5 QCOM_GPI_SPI>,
+ <&gpi_dma0 1 5 QCOM_GPI_SPI>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+ };
+
+ gpi_dma1: dma-controller@a00000 {
+ compatible = "qcom,sar2130p-gpi-dma", "qcom,sm6350-gpi-dma";
+ #dma-cells = <3>;
+ reg = <0x0 0x00a00000 0x0 0x60000>;
+ interrupts = <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 284 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 293 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 294 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 295 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 296 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 297 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 298 IRQ_TYPE_LEVEL_HIGH>;
+ dma-channels = <12>;
+ dma-channel-mask = <0x7e>;
+ iommus = <&apps_smmu 0x16 0x0>;
+
+ status = "disabled";
+ };
+
+ qupv3_id_1: geniqup@ac0000 {
+ compatible = "qcom,geni-se-qup";
+ reg = <0x0 0x00ac0000 0x0 0x6000>;
+ clock-names = "m-ahb", "s-ahb";
+ clocks = <&gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ iommus = <&apps_smmu 0x3 0x0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ status = "disabled";
+
+ i2c6: i2c@a80000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00a80000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
+ pinctrl-0 = <&qup_i2c6_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_A2NOC_SNOC QCOM_ICC_TAG_ALWAYS
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma1 0 0 QCOM_GPI_I2C>,
+ <&gpi_dma1 1 0 QCOM_GPI_I2C>;
+ dma-names = "tx", "rx";
+
+ status = "disabled";
+ };
+
+ spi6: spi@a80000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00a80000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
+ interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&qup_spi6_data_clk>, <&qup_spi6_cs>;
+ pinctrl-names = "default";
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_A2NOC_SNOC QCOM_ICC_TAG_ALWAYS
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma1 0 0 QCOM_GPI_SPI>,
+ <&gpi_dma1 1 0 QCOM_GPI_SPI>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c7: i2c@a84000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00a84000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
+ pinctrl-0 = <&qup_i2c7_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_A2NOC_SNOC QCOM_ICC_TAG_ALWAYS
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma1 0 1 QCOM_GPI_I2C>,
+ <&gpi_dma1 1 1 QCOM_GPI_I2C>;
+ dma-names = "tx", "rx";
+
+ status = "disabled";
+ };
+
+ spi7: spi@a84000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00a84000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&qup_spi7_data_clk>, <&qup_spi7_cs>;
+ pinctrl-names = "default";
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_A2NOC_SNOC QCOM_ICC_TAG_ALWAYS
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma1 0 1 QCOM_GPI_SPI>,
+ <&gpi_dma1 1 1 QCOM_GPI_SPI>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ uart7: serial@a84000 {
+ compatible = "qcom,geni-uart";
+ reg = <0x0 0x00a84000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
+ pinctrl-0 = <&qup_uart7_default>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_A2NOC_SNOC QCOM_ICC_TAG_ALWAYS
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config";
+
+ status = "disabled";
+ };
+
+ i2c8: i2c@a88000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00a88000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
+ pinctrl-0 = <&qup_i2c8_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_A2NOC_SNOC QCOM_ICC_TAG_ALWAYS
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma1 0 2 QCOM_GPI_I2C>,
+ <&gpi_dma1 1 2 QCOM_GPI_I2C>;
+ dma-names = "tx", "rx";
+
+ status = "disabled";
+ };
+
+ spi8: spi@a88000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00a88000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
+ interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&qup_spi8_data_clk>, <&qup_spi8_cs>;
+ pinctrl-names = "default";
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_A2NOC_SNOC QCOM_ICC_TAG_ALWAYS
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma1 0 2 QCOM_GPI_SPI>,
+ <&gpi_dma1 1 2 QCOM_GPI_SPI>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c9: i2c@a8c000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00a8c000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
+ pinctrl-0 = <&qup_i2c9_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_A2NOC_SNOC QCOM_ICC_TAG_ALWAYS
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma1 0 3 QCOM_GPI_I2C>,
+ <&gpi_dma1 1 3 QCOM_GPI_I2C>;
+ dma-names = "tx", "rx";
+
+ status = "disabled";
+ };
+
+ spi9: spi@a8c000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00a8c000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
+ interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&qup_spi9_data_clk>, <&qup_spi9_cs>;
+ pinctrl-names = "default";
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_A2NOC_SNOC QCOM_ICC_TAG_ALWAYS
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma1 0 3 QCOM_GPI_SPI>,
+ <&gpi_dma1 1 3 QCOM_GPI_SPI>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c10: i2c@a90000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00a90000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
+ pinctrl-0 = <&qup_i2c10_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_A2NOC_SNOC QCOM_ICC_TAG_ALWAYS
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma1 0 4 QCOM_GPI_I2C>,
+ <&gpi_dma1 1 4 QCOM_GPI_I2C>;
+ dma-names = "tx", "rx";
+
+ status = "disabled";
+ };
+
+ spi10: spi@a90000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00a90000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
+ interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&qup_spi10_data_clk>, <&qup_spi10_cs>;
+ pinctrl-names = "default";
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_A2NOC_SNOC QCOM_ICC_TAG_ALWAYS
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma1 0 4 QCOM_GPI_SPI>,
+ <&gpi_dma1 1 4 QCOM_GPI_SPI>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c11: i2c@a94000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00a94000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>;
+ pinctrl-0 = <&qup_i2c11_data_clk>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_A2NOC_SNOC QCOM_ICC_TAG_ALWAYS
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma1 0 5 QCOM_GPI_I2C>,
+ <&gpi_dma1 1 5 QCOM_GPI_I2C>;
+ dma-names = "tx", "rx";
+
+ status = "disabled";
+ };
+
+ spi11: spi@a94000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00a94000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>;
+ interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&qup_spi11_data_clk>, <&qup_spi11_cs>;
+ pinctrl-names = "default";
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_A2NOC_SNOC QCOM_ICC_TAG_ALWAYS
+ &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+ <&system_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core", "qup-config", "qup-memory";
+ dmas = <&gpi_dma1 0 5 QCOM_GPI_SPI>,
+ <&gpi_dma1 1 5 QCOM_GPI_SPI>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ uart11: serial@a94000 {
+ compatible = "qcom,geni-debug-uart";
+ reg = <0x0 0x00a94000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>;
+ pinctrl-0 = <&qup_uart11_default>;
+ pinctrl-names = "default";
+ interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+
+ status = "disabled";
+ };
+ };
+
+ config_noc: interconnect@1500000 {
+ compatible = "qcom,sar2130p-config-noc";
+ reg = <0x0 0x01500000 0x0 0x10>;
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ system_noc: interconnect@1680000 {
+ compatible = "qcom,sar2130p-system-noc";
+ reg = <0x0 0x01680000 0x0 0x29080>;
+ clocks = <&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>;
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ pcie_noc: interconnect@16c0000 {
+ compatible = "qcom,sar2130p-pcie-anoc";
+ reg = <0x0 0x016c0000 0x0 0xa080>;
+ clocks = <&gcc GCC_AGGRE_NOC_PCIE_1_AXI_CLK>,
+ <&gcc GCC_CFG_NOC_PCIE_ANOC_AHB_CLK>;
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ mmss_noc: interconnect@1740000 {
+ compatible = "qcom,sar2130p-mmss-noc";
+ reg = <0x0 0x01740000 0x0 0x1f100>;
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ pcie0: pcie@1c00000 {
+ device_type = "pci";
+ compatible = "qcom,sar2130p-pcie", "qcom,pcie-sm8550";
+ reg = <0x0 0x01c00000 0x0 0x3000>,
+ <0x0 0x60000000 0x0 0xf1d>,
+ <0x0 0x60000f20 0x0 0xa8>,
+ <0x0 0x60001000 0x0 0x1000>,
+ <0x0 0x60100000 0x0 0x100000>,
+ <0x0 0x01c0c000 0x0 0x1000>;
+ reg-names = "parf", "dbi", "elbi", "atu", "config", "mhi";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x00000000 0x0 0x60200000 0x0 0x100000>,
+ <0x02000000 0x0 0x60300000 0x0 0x60300000 0x0 0x3d00000>;
+ bus-range = <0x00 0xff>;
+
+ dma-coherent;
+
+ linux,pci-domain = <0>;
+ num-lanes = <2>;
+
+ interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi0",
+ "msi1",
+ "msi2",
+ "msi3",
+ "msi4",
+ "msi5",
+ "msi6",
+ "msi7";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 0 0 149 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 0 0 150 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 0 0 151 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 0 0 152 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ clocks = <&gcc GCC_PCIE_0_AUX_CLK>,
+ <&gcc GCC_PCIE_0_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_0_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_0_SLV_AXI_CLK>,
+ <&gcc GCC_PCIE_0_SLV_Q2A_AXI_CLK>,
+ <&gcc GCC_DDRSS_PCIE_SF_CLK>,
+ <&gcc GCC_AGGRE_NOC_PCIE_1_AXI_CLK>;
+ clock-names = "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave",
+ "slave_q2a",
+ "ddrss_sf_tbu",
+ "noc_aggr";
+
+ interconnects = <&pcie_noc MASTER_PCIE_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_PCIE_0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "pcie-mem", "cpu-pcie";
+
+ iommu-map = <0x0 &apps_smmu 0x1c00 0x1>,
+ <0x100 &apps_smmu 0x1c01 0x1>;
+
+ resets = <&gcc GCC_PCIE_0_BCR>;
+ reset-names = "pci";
+
+ power-domains = <&gcc PCIE_0_GDSC>;
+
+ phys = <&pcie0_phy>;
+ phy-names = "pciephy";
+
+ status = "disabled";
+
+ pcieport0: pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
+ };
+
+ pcie0_phy: phy@1c06000 {
+ compatible = "qcom,sar2130p-qmp-gen3x2-pcie-phy";
+ reg = <0x0 0x01c06000 0x0 0x2000>;
+
+ clocks = <&gcc GCC_PCIE_0_AUX_CLK>,
+ <&gcc GCC_PCIE_0_CFG_AHB_CLK>,
+ <&tcsr TCSR_PCIE_0_CLKREF_EN>,
+ <&gcc GCC_PCIE_0_PHY_RCHNG_CLK>,
+ <&gcc GCC_PCIE_0_PIPE_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref", "rchng",
+ "pipe";
+
+ resets = <&gcc GCC_PCIE_0_PHY_BCR>;
+ reset-names = "phy";
+
+ assigned-clocks = <&gcc GCC_PCIE_0_PHY_RCHNG_CLK>;
+ assigned-clock-rates = <100000000>;
+
+ power-domains = <&gcc PCIE_0_PHY_GDSC>;
+
+ #clock-cells = <0>;
+ clock-output-names = "pcie0_pipe_clk";
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
+ pcie1: pcie@1c08000 {
+ device_type = "pci";
+ compatible = "qcom,sar2130p-pcie", "qcom,pcie-sm8550";
+ reg = <0x0 0x01c08000 0x0 0x3000>,
+ <0x0 0x40000000 0x0 0xf1d>,
+ <0x0 0x40000f20 0x0 0xa8>,
+ <0x0 0x40001000 0x0 0x1000>,
+ <0x0 0x40100000 0x0 0x100000>,
+ <0x0 0x01c0b000 0x0 0x1000>;
+ reg-names = "parf", "dbi", "elbi", "atu", "config", "mhi";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
+ <0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;
+ bus-range = <0x00 0xff>;
+
+ dma-coherent;
+
+ linux,pci-domain = <1>;
+ num-lanes = <2>;
+
+ interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi0",
+ "msi1",
+ "msi2",
+ "msi3",
+ "msi4",
+ "msi5",
+ "msi6",
+ "msi7";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 0 0 434 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 0 0 435 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 0 0 438 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 0 0 439 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ clocks = <&gcc GCC_PCIE_1_AUX_CLK>,
+ <&gcc GCC_PCIE_1_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_1_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_1_SLV_AXI_CLK>,
+ <&gcc GCC_PCIE_1_SLV_Q2A_AXI_CLK>,
+ <&gcc GCC_DDRSS_PCIE_SF_CLK>,
+ <&gcc GCC_AGGRE_NOC_PCIE_1_AXI_CLK>,
+ <&gcc GCC_CFG_NOC_PCIE_ANOC_AHB_CLK>,
+ <&gcc GCC_QMIP_PCIE_AHB_CLK>;
+ clock-names = "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave",
+ "slave_q2a",
+ "ddrss_sf_tbu",
+ "noc_aggr",
+ "cnoc_sf_axi",
+ "qmip_pcie_ahb";
+
+ assigned-clocks = <&gcc GCC_PCIE_1_AUX_CLK>;
+ assigned-clock-rates = <19200000>;
+
+ interconnects = <&pcie_noc MASTER_PCIE_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_PCIE_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "pcie-mem", "cpu-pcie";
+
+ iommu-map = <0x0 &apps_smmu 0x1e00 0x1>,
+ <0x100 &apps_smmu 0x1e01 0x1>;
+
+ resets = <&gcc GCC_PCIE_1_BCR>,
+ <&gcc GCC_PCIE_1_LINK_DOWN_BCR>;
+ reset-names = "pci", "link_down";
+
+ power-domains = <&gcc PCIE_1_GDSC>;
+
+ phys = <&pcie1_phy>;
+ phy-names = "pciephy";
+
+ status = "disabled";
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
+ };
+
+ pcie1_phy: phy@1c0e000 {
+ compatible = "qcom,sar2130p-qmp-gen3x2-pcie-phy";
+ reg = <0x0 0x01c0e000 0x0 0x2000>;
+
+ clocks = <&gcc GCC_PCIE_1_AUX_CLK>,
+ <&gcc GCC_PCIE_1_CFG_AHB_CLK>,
+ <&tcsr TCSR_PCIE_1_CLKREF_EN>,
+ <&gcc GCC_PCIE_1_PHY_RCHNG_CLK>,
+ <&gcc GCC_PCIE_1_PIPE_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref", "rchng",
+ "pipe";
+
+ resets = <&gcc GCC_PCIE_1_PHY_BCR>;
+ reset-names = "phy";
+
+ assigned-clocks = <&gcc GCC_PCIE_1_PHY_RCHNG_CLK>;
+ assigned-clock-rates = <100000000>;
+
+ power-domains = <&gcc PCIE_1_PHY_GDSC>;
+
+ #clock-cells = <0>;
+ clock-output-names = "pcie1_pipe_clk";
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
+ tcsr_mutex: hwlock@1f40000 {
+ compatible = "qcom,tcsr-mutex";
+ reg = <0x0 0x01f40000 0x0 0x20000>;
+
+ #hwlock-cells = <1>;
+ };
+
+ tcsr: clock-controller@1fc0000 {
+ compatible = "qcom,sar2130p-tcsr", "syscon";
+ reg = <0x0 0x01fc0000 0x0 0x30000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ remoteproc_adsp: remoteproc@3000000 {
+ compatible = "qcom,sar2130p-adsp-pas";
+ reg = <0x0 0x03000000 0x0 0x10000>;
+
+ interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready",
+ "handover", "stop-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ power-domains = <&rpmhpd RPMHPD_LCX>,
+ <&rpmhpd RPMHPD_LMX>;
+ power-domain-names = "lcx", "lmx";
+
+ memory-region = <&adsp_mem>;
+
+ qcom,qmp = <&aoss_qmp>;
+
+ qcom,smem-states = <&smp2p_adsp_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ remoteproc_adsp_glink: glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ label = "lpass";
+ qcom,remote-pid = <2>;
+
+ gpr {
+ compatible = "qcom,gpr";
+ qcom,glink-channels = "adsp_apps";
+ qcom,domain = <GPR_DOMAIN_ID_ADSP>;
+ qcom,intents = <512 20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ q6apm: service@1 {
+ compatible = "qcom,q6apm";
+ reg = <GPR_APM_MODULE_IID>;
+ #sound-dai-cells = <0>;
+ qcom,protection-domain = "avs/audio",
+ "msm/adsp/audio_pd";
+
+ q6apmdai: dais {
+ compatible = "qcom,q6apm-dais";
+ iommus = <&apps_smmu 0x1801 0x0>;
+ };
+
+ q6apmbedai: bedais {
+ compatible = "qcom,q6apm-lpass-dais";
+ #sound-dai-cells = <1>;
+ };
+ };
+
+ q6prm: service@2 {
+ compatible = "qcom,q6prm";
+ reg = <GPR_PRM_MODULE_IID>;
+ qcom,protection-domain = "avs/audio",
+ "msm/adsp/audio_pd";
+
+ q6prmcc: clock-controller {
+ compatible = "qcom,q6prm-lpass-clocks";
+ #clock-cells = <2>;
+ };
+ };
+ };
+
+ fastrpc {
+ compatible = "qcom,fastrpc";
+ qcom,glink-channels = "fastrpcglink-apps-dsp";
+ label = "adsp";
+ qcom,non-secure-domain;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compute-cb@3 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <3>;
+ iommus = <&apps_smmu 0x1803 0x0>;
+ };
+
+ compute-cb@4 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <4>;
+ iommus = <&apps_smmu 0x1804 0x0>;
+ };
+
+ compute-cb@5 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <5>;
+ iommus = <&apps_smmu 0x1805 0x0>;
+ };
+
+ compute-cb@6 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <6>;
+ iommus = <&apps_smmu 0x1806 0x0>;
+ };
+ };
+ };
+ };
+
+ gpu: gpu@3d00000 {
+ compatible = "qcom,adreno-621.0", "qcom,adreno";
+ reg = <0x0 0x03d00000 0x0 0x40000>,
+ <0x0 0x03d9e000 0x0 0x2000>,
+ <0x0 0x03d61000 0x0 0x800>;
+ reg-names = "kgsl_3d0_reg_memory",
+ "cx_mem",
+ "cx_dbgc";
+
+ interrupts = <GIC_SPI 300 IRQ_TYPE_LEVEL_HIGH>;
+
+ iommus = <&adreno_smmu 0 0x401>;
+
+ operating-points-v2 = <&gpu_opp_table>;
+
+ qcom,gmu = <&gmu>;
+
+ nvmem-cells = <&gpu_speed_bin>;
+ nvmem-cell-names = "speed_bin";
+ #cooling-cells = <2>;
+
+ status = "disabled";
+
+ gpu_zap_shader: zap-shader {
+ memory-region = <&gpu_micro_code_mem>;
+ };
+
+ gpu_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-843000000 {
+ opp-hz = /bits/ 64 <843000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
+ opp-supported-hw = <0x1>;
+ };
+
+ opp-780000000 {
+ opp-hz = /bits/ 64 <780000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
+ opp-supported-hw = <0x1>;
+ };
+
+ opp-644000000 {
+ opp-hz = /bits/ 64 <644000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
+ opp-supported-hw = <0x3>;
+ };
+
+ opp-570000000 {
+ opp-hz = /bits/ 64 <570000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
+ opp-supported-hw = <0x3>;
+ };
+
+ opp-450000000 {
+ opp-hz = /bits/ 64 <450000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+ opp-supported-hw = <0x3>;
+ };
+
+ opp-320000000 {
+ opp-hz = /bits/ 64 <320000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+ opp-supported-hw = <0x3>;
+ };
+
+ opp-235000000 {
+ opp-hz = /bits/ 64 <235000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS_D1>;
+ opp-supported-hw = <0x3>;
+ };
+ };
+ };
+
+ gmu: gmu@3d6a000 {
+ compatible = "qcom,adreno-gmu-621.0", "qcom,adreno-gmu";
+ reg = <0x0 0x03d6a000 0x0 0x35000>,
+ <0x0 0x03de0000 0x0 0x10000>,
+ <0x0 0x0b290000 0x0 0x10000>;
+ reg-names = "gmu", "rscc", "gmu_pdc";
+
+ interrupts = <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hfi", "gmu";
+
+ clocks = <&gpucc GPU_CC_AHB_CLK>,
+ <&gpucc GPU_CC_CX_GMU_CLK>,
+ <&gpucc GPU_CC_CXO_CLK>,
+ <&gcc GCC_DDRSS_GPU_AXI_CLK>,
+ <&gcc GCC_GPU_MEMNOC_GFX_CLK>,
+ <&gpucc GPU_CC_HUB_CX_INT_CLK>;
+ clock-names = "ahb",
+ "gmu",
+ "cxo",
+ "axi",
+ "memnoc",
+ "hub";
+
+ power-domains = <&gpucc GPU_CX_GDSC>,
+ <&gpucc GPU_GX_GDSC>;
+ power-domain-names = "cx",
+ "gx";
+
+ iommus = <&adreno_smmu 5 0x400>;
+
+ qcom,qmp = <&aoss_qmp>;
+
+ operating-points-v2 = <&gmu_opp_table>;
+
+ gmu_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-220000000 {
+ opp-hz = /bits/ 64 <220000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+ };
+
+ opp-550000000 {
+ opp-hz = /bits/ 64 <550000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+ };
+ };
+ };
+
+ gpucc: clock-controller@3d90000 {
+ compatible = "qcom,sar2130p-gpucc";
+ reg = <0x0 0x03d90000 0x0 0xa000>;
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_GPU_GPLL0_CLK_SRC>,
+ <&gcc GCC_GPU_GPLL0_DIV_CLK_SRC>;
+
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+ adreno_smmu: iommu@3da0000 {
+ compatible = "qcom,sar2130p-smmu-500", "qcom,adreno-smmu",
+ "qcom,smmu-500", "arm,mmu-500";
+ reg = <0x0 0x03da0000 0x0 0x10000>;
+ #iommu-cells = <2>;
+ #global-interrupts = <1>;
+ interrupts = <GIC_SPI 673 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 678 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 679 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 680 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 681 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 682 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 683 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 684 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 685 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gpucc GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK>,
+ <&gcc GCC_GPU_MEMNOC_GFX_CLK>,
+ <&gcc GCC_GPU_SNOC_DVM_GFX_CLK>,
+ <&gpucc GPU_CC_AHB_CLK>;
+ clock-names = "hlos",
+ "bus",
+ "iface",
+ "ahb";
+ power-domains = <&gpucc GPU_CX_GDSC>;
+ dma-coherent;
+ };
+
+ usb_1_hsphy: phy@88e3000 {
+ compatible = "qcom,sar2130p-snps-eusb2-phy",
+ "qcom,sm8550-snps-eusb2-phy";
+ reg = <0x0 0x088e3000 0x0 0x154>;
+ #phy-cells = <0>;
+
+ clocks = <&tcsr TCSR_USB2_CLKREF_EN>;
+ clock-names = "ref";
+
+ resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>;
+
+ status = "disabled";
+ };
+
+ usb_dp_qmpphy: phy@88e8000 {
+ compatible = "qcom,sar2130p-qmp-usb3-dp-phy";
+ reg = <0x0 0x088e8000 0x0 0x3000>;
+
+ clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>,
+ <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
+ clock-names = "aux", "ref", "com_aux", "usb3_pipe";
+
+ power-domains = <&gcc USB3_PHY_GDSC>;
+
+ resets = <&gcc GCC_USB3_PHY_PRIM_BCR>,
+ <&gcc GCC_USB3_DP_PHY_PRIM_BCR>;
+ reset-names = "phy", "common";
+
+ #clock-cells = <1>;
+ #phy-cells = <1>;
+
+ orientation-switch;
+
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usb_dp_qmpphy_out: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usb_dp_qmpphy_usb_ss_in: endpoint {
+ remote-endpoint = <&usb_1_dwc3_ss>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ usb_dp_qmpphy_dp_in: endpoint {
+ };
+ };
+ };
+ };
+
+ usb_1: usb@a6f8800 {
+ compatible = "qcom,sar2130p-dwc3", "qcom,dwc3";
+ reg = <0x0 0x0a6f8800 0x0 0x400>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
+ <&gcc GCC_USB30_PRIM_MASTER_CLK>,
+ <&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
+ <&gcc GCC_USB30_PRIM_SLEEP_CLK>,
+ <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
+ <&tcsr TCSR_USB3_CLKREF_EN>;
+ clock-names = "cfg_noc",
+ "core",
+ "iface",
+ "sleep",
+ "mock_utmi",
+ "xo";
+
+ assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_PRIM_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <200000000>;
+
+ interrupts-extended = <&intc GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 349 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 14 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 15 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 17 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pwr_event",
+ "hs_phy_irq",
+ "dp_hs_phy_irq",
+ "dm_hs_phy_irq",
+ "ss_phy_irq";
+
+ power-domains = <&gcc USB30_PRIM_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
+
+ resets = <&gcc GCC_USB30_PRIM_BCR>;
+
+ interconnects = <&system_noc MASTER_USB3_0 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_USB3_0 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "usb-ddr", "apps-usb";
+
+ status = "disabled";
+
+ usb_1_dwc3: usb@a600000 {
+ compatible = "snps,dwc3";
+ reg = <0x0 0x0a600000 0x0 0xcd00>;
+ interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>;
+ iommus = <&apps_smmu 0x20 0x0>;
+ phys = <&usb_1_hsphy>,
+ <&usb_dp_qmpphy QMP_USB43DP_USB3_PHY>;
+ phy-names = "usb2-phy", "usb3-phy";
+
+ snps,has-lpm-erratum;
+ snps,hird-threshold = /bits/ 8 <0x0>;
+ snps,is-utmi-l1-suspend;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_u3_susphy_quirk;
+ snps,parkmode-disable-ss-quirk;
+
+ tx-fifo-resize;
+ dma-coherent;
+ usb-role-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usb_1_dwc3_hs: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usb_1_dwc3_ss: endpoint {
+ remote-endpoint = <&usb_dp_qmpphy_usb_ss_in>;
+ };
+ };
+ };
+ };
+ };
+
+ pdc: interrupt-controller@b220000 {
+ compatible = "qcom,sar2130p-pdc", "qcom,pdc";
+ reg = <0x0 0x0b220000 0x0 0x30000>, <0x0 0x174000f0 0x0 0x64>;
+ qcom,pdc-ranges = <0 480 94>,
+ <94 609 31>,
+ <125 63 1>,
+ <126 716 12>;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&intc>;
+ interrupt-controller;
+ };
+
+ aoss_qmp: power-management@c300000 {
+ compatible = "qcom,sar2130p-aoss-qmp", "qcom,aoss-qmp";
+ reg = <0x0 0x0c300000 0x0 0x400>;
+ interrupt-parent = <&ipcc>;
+ interrupts-extended = <&ipcc IPCC_CLIENT_AOP IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_AOP IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ #clock-cells = <0>;
+ };
+
+ tsens0: thermal-sensor@c263000 {
+ compatible = "qcom,sar2130p-tsens", "qcom,tsens-v2";
+ reg = <0x0 0x0c263000 0x0 0x1000>, /* TM */
+ <0x0 0x0c222000 0x0 0x1000>; /* SROT */
+ #qcom,sensors = <16>;
+ interrupts = <GIC_SPI 506 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 508 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "uplow", "critical";
+ #thermal-sensor-cells = <1>;
+ };
+
+ sram@c3f0000 {
+ compatible = "qcom,rpmh-stats";
+ reg = <0x0 0x0c3f0000 0x0 0x400>;
+ };
+
+ arbiter@c400000 {
+ compatible = "qcom,sar2130p-spmi-pmic-arb",
+ "qcom,x1e80100-spmi-pmic-arb";
+ reg = <0x0 0x0c400000 0x0 0x3000>,
+ <0x0 0x0c500000 0x0 0x400000>,
+ <0x0 0x0c440000 0x0 0x80000>;
+ reg-names = "core", "chnls", "obsrvr";
+
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ spmi_bus: spmi@c42d000 {
+ reg = <0x0 0x0c42d000 0x0 0x4000>,
+ <0x0 0x0c4c0000 0x0 0x10000>;
+ reg-names = "cnfg", "intr";
+
+ interrupt-names = "periph_irq";
+ interrupts-extended = <&pdc 1 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+
+ #address-cells = <2>;
+ #size-cells = <0>;
+ };
+ };
+
+ ipcc: mailbox@ed18000 {
+ compatible = "qcom,sar2130p-ipcc", "qcom,ipcc";
+ reg = <0x0 0x0ed18000 0x0 0x1000>;
+
+ interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+
+ #mbox-cells = <2>;
+ };
+
+ tlmm: pinctrl@f100000 {
+ compatible = "qcom,sar2130p-tlmm";
+ reg = <0x0 0x0f100000 0x0 0x300000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 156>;
+ wakeup-parent = <&pdc>;
+
+ qup_i2c0_data_clk: qup-i2c0-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio0", "gpio1";
+ function = "qup0";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c1_data_clk: qup-i2c1-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio2", "gpio3";
+ function = "qup1";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c2_data_clk: qup-i2c2-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio22", "gpio23";
+ function = "qup2";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c3_data_clk: qup-i2c3-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio16", "gpio17";
+ function = "qup3";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c4_data_clk: qup-i2c4-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio20", "gpio21";
+ function = "qup4";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c5_data_clk: qup-i2c5-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio95", "gpio96";
+ function = "qup5";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c6_data_clk: qup-i2c6-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio91", "gpio92";
+ function = "qup6";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c7_data_clk: qup-i2c7-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio8", "gpio9";
+ function = "qup7";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c8_data_clk: qup-i2c8-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio8", "gpio9";
+ function = "qup8";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c9_data_clk: qup-i2c9-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio109", "gpio110";
+ function = "qup9";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c10_data_clk: qup-i2c10-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio4", "gpio5";
+ function = "qup10";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c11_data_clk: qup-i2c11-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio28", "gpio30";
+ function = "qup11";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_spi0_cs0: qup-spi0-cs0-state {
+ pins = "gpio3";
+ function = "qup0";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi0_cs1: qup-spi0-cs1-state {
+ pins = "gpio93";
+ function = "qup0";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi0_data_clk: qup-spi0-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio0", "gpio1", "gpio2";
+ function = "qup0";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi1_cs: qup-spi1-cs-state {
+ pins = "gpio62";
+ function = "qup1";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi1_data_clk: qup-spi1-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio2", "gpio3", "gpio61";
+ function = "qup1";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi2_cs: qup-spi2-cs-state {
+ pins = "gpio13";
+ function = "qup2";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi2_data_clk: qup-spi2-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio22", "gpio23", "gpio12";
+ function = "qup2";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi3_cs0: qup-spi3-cs0-state {
+ pins = "gpio19";
+ function = "qup3";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi3_cs1: qup-spi3-cs1-state {
+ pins = "gpio41";
+ function = "qup3";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi3_data_clk: qup-spi3-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio16", "gpio17", "gpio18";
+ function = "qup3";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi4_cs0: qup-spi4-cs0-state {
+ pins = "gpio23";
+ function = "qup4";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi4_cs1: qup-spi4-cs1-state {
+ pins = "gpio94";
+ function = "qup4";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi4_data_clk: qup-spi4-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio20", "gpio21", "gpio22";
+ function = "qup4";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi5_cs: qup-spi5-cs-state {
+ pins = "gpio98";
+ function = "qup5";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi5_data_clk: qup-spi5-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio95", "gpio96", "gpio97";
+ function = "qup5";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi6_cs: qup-spi6-cs-state {
+ pins = "gpio63";
+ function = "qup6";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi6_data_clk: qup-spi6-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio91", "gpio92", "gpio64";
+ function = "qup6";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi7_cs: qup-spi7-cs-state {
+ pins = "gpio27";
+ function = "qup7";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi7_data_clk: qup-spi7-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio24", "gpio25", "gpio26";
+ function = "qup7";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi8_cs: qup-spi8-cs-state {
+ pins = "gpio11";
+ function = "qup8";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi8_data_clk: qup-spi8-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio8", "gpio9", "gpio10";
+ function = "qup8";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi9_cs: qup-spi9-cs-state {
+ pins = "gpio35";
+ function = "qup9";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi9_data_clk: qup-spi9-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio109", "gpio110", "gpio34";
+ function = "qup9";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi10_cs: qup-spi10-cs-state {
+ pins = "gpio7";
+ function = "qup10";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi10_data_clk: qup-spi10-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio4", "gpio5", "gpio6";
+ function = "qup10";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi11_cs: qup-spi11-cs-state {
+ pins = "gpio15";
+ function = "qup11";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_spi11_data_clk: qup-spi11-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio28", "gpio30", "gpio14";
+ function = "qup11";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_uart7_default: qup-uart7-default-state {
+ cts-pins {
+ pins = "gpio24";
+ function = "qup7";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rts-pins {
+ pins = "gpio25";
+ function = "qup7";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ rx-pins {
+ pins = "gpio27";
+ function = "qup7";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ tx-pins {
+ pins = "gpio26";
+ function = "qup7";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ qup_uart11_default: qup-uart11-default-state {
+ pins = "gpio14", "gpio15";
+ function = "qup11";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ sdc1_default: sdc1-default-state {
+ clk-pins {
+ pins = "sdc1_clk";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ cmd-pins {
+ pins = "sdc1_cmd";
+ drive-strength = <10>;
+ bias-pull-up;
+ };
+
+ data-pins {
+ pins = "sdc1_data";
+ drive-strength = <10>;
+ bias-pull-up;
+ };
+
+ rclk-pins {
+ pins = "sdc1_rclk";
+ bias-pull-down;
+ };
+ };
+
+ sdc1_sleep: sdc1-sleep-state {
+ clk-pins {
+ pins = "sdc1_clk";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ cmd-pins {
+ pins = "sdc1_cmd";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ data-pins {
+ pins = "sdc1_data";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ rclk-pins {
+ pins = "sdc1_rclk";
+ bias-pull-down;
+ };
+ };
+ };
+
+ apps_smmu: iommu@15000000 {
+ compatible = "qcom,sar2130p-smmu-500", "qcom,smmu-500", "arm,mmu-500";
+ reg = <0x0 0x15000000 0x0 0x100000>;
+ #iommu-cells = <2>;
+ #global-interrupts = <1>;
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 409 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 412 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 707 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 690 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 691 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 692 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 693 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 694 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 695 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 696 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 697 IRQ_TYPE_LEVEL_HIGH>;
+ dma-coherent;
+ };
+
+ intc: interrupt-controller@17200000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ #redistributor-regions = <1>;
+ redistributor-stride = <0x0 0x20000>;
+ reg = <0x0 0x17200000 0x0 0x10000>,
+ <0x0 0x17260000 0x0 0x100000>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ gic_its: msi-controller@17240000 {
+ compatible = "arm,gic-v3-its";
+ reg = <0x0 0x17240000 0x0 0x20000>;
+ msi-controller;
+ #msi-cells = <1>;
+ };
+ };
+
+ apps_rsc: rsc@17a00000 {
+ label = "apps_rsc";
+ compatible = "qcom,rpmh-rsc";
+ reg = <0x0 0x17a00000 0x0 0x10000>,
+ <0x0 0x17a10000 0x0 0x10000>,
+ <0x0 0x17a20000 0x0 0x10000>;
+ reg-names = "drv-0", "drv-1", "drv-2";
+ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,tcs-offset = <0xd00>;
+ qcom,drv-id = <2>;
+ qcom,tcs-config = <ACTIVE_TCS 3>, <SLEEP_TCS 2>,
+ <WAKE_TCS 2>, <CONTROL_TCS 0>;
+ power-domains = <&cluster_pd>;
+
+ apps_bcm_voter: bcm-voter {
+ compatible = "qcom,bcm-voter";
+ };
+
+ rpmhcc: clock-controller {
+ compatible = "qcom,sar2130p-rpmh-clk";
+ #clock-cells = <1>;
+ clock-names = "xo";
+ clocks = <&xo_board>;
+ };
+
+ rpmhpd: power-controller {
+ compatible = "qcom,sar2130p-rpmhpd";
+ #power-domain-cells = <1>;
+ operating-points-v2 = <&rpmhpd_opp_table>;
+
+ rpmhpd_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ rpmhpd_opp_ret: opp1 {
+ opp-level = <RPMH_REGULATOR_LEVEL_RETENTION>;
+ };
+
+ rpmhpd_opp_min_svs: opp2 {
+ opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+ };
+
+ rpmhpd_opp_low_svs_d1: opp3 {
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS_D1>;
+ };
+
+ rpmhpd_opp_low_svs: opp4 {
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+ };
+
+ rpmhpd_opp_svs: opp5 {
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+ };
+
+ rpmhpd_opp_svs_l1: opp6 {
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
+ };
+
+ rpmhpd_opp_nom: opp7 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
+ };
+
+ rpmhpd_opp_turbo: opp8 {
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
+ };
+
+ rpmhpd_opp_turbo_l1: opp9 {
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
+ };
+ };
+ };
+ };
+
+ cpufreq_hw: cpufreq@17d91000 {
+ compatible = "qcom,sar2130p-cpufreq-epss", "qcom,cpufreq-epss";
+ reg = <0x0 0x17d91000 0x0 0x1000>;
+ reg-names = "freq-domain0";
+ clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_GPLL0>;
+ clock-names = "xo", "alternate";
+ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "dcvsh-irq-0";
+ #freq-domain-cells = <1>;
+ #clock-cells = <1>;
+ };
+
+ gem_noc: interconnect@19100000 {
+ compatible = "qcom,sar2130p-gem-noc";
+ reg = <0x0 0x19100000 0x0 0xa2080>;
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ /*
+ * Bootloader expects just cache-controller node instead of
+ * the typical system-cache-controller
+ */
+ llcc: cache-controller@19200000 {
+ compatible = "qcom,sar2130p-llcc";
+ reg = <0x0 0x19200000 0x0 0x80000>,
+ <0x0 0x19300000 0x0 0x80000>,
+ <0x0 0x19a00000 0x0 0x80000>,
+ <0x0 0x19c00000 0x0 0x80000>,
+ <0x0 0x19af0000 0x0 0x80000>,
+ <0x0 0x19cf0000 0x0 0x80000>;
+ reg-names = "llcc0_base",
+ "llcc1_base",
+ "llcc_broadcast_base",
+ "llcc_broadcast_and_base",
+ "llcc_scratchpad_broadcast_base",
+ "llcc_scratchpad_broadcast_and_base";
+ interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ qfprom: qfprom@221c8000 {
+ compatible = "qcom,sar2130p-qfprom", "qcom,qfprom";
+ reg = <0x0 0x221c8000 0x0 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ read-only;
+
+ gpu_speed_bin: gpu-speed-bin@119 {
+ reg = <0x119 0x2>;
+ bits = <5 8>;
+ };
+ };
+
+ nsp_noc: interconnect@320c0000 {
+ compatible = "qcom,sar2130p-nsp-noc";
+ reg = <0x0 0x320c0000 0x0 0x10>;
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ lpass_ag_noc: interconnect@3c40000 {
+ compatible = "qcom,sar2130p-lpass-ag-noc";
+ reg = <0x0 0x3c40000 0x0 0x10>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ thermal-zones {
+ aoss0-thermal {
+ thermal-sensors = <&tsens0 0>;
+
+ trips {
+ trip-point0 {
+ temperature = <115000>;
+ hysteresis = <5000>;
+ type = "hot";
+ };
+
+ aoss0-critical {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+
+ };
+ };
+
+ cpu0-thermal {
+ thermal-sensors = <&tsens0 1>;
+
+ trips {
+ cpu0_alert0: trip-point0 {
+ temperature = <110000>;
+ hysteresis = <10000>;
+ type = "passive";
+ };
+
+ cpu0_alert1: trip-point1 {
+ temperature = <115000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+
+ cpu0-critical {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu0_alert0>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+
+ map1 {
+ trip = <&cpu0_alert1>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu1-thermal {
+ thermal-sensors = <&tsens0 2>;
+
+ trips {
+ cpu1_alert0: trip-point0 {
+ temperature = <110000>;
+ hysteresis = <10000>;
+ type = "passive";
+ };
+
+ cpu1_alert1: trip-point1 {
+ temperature = <115000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+
+ cpu1-critical {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu1_alert0>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+
+ map1 {
+ trip = <&cpu1_alert1>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu2-thermal {
+ thermal-sensors = <&tsens0 3>;
+
+ trips {
+ cpu2_alert0: trip-point0 {
+ temperature = <110000>;
+ hysteresis = <10000>;
+ type = "passive";
+ };
+
+ cpu2_alert1: trip-point1 {
+ temperature = <115000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+
+ cpu2-critical {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu2_alert0>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+
+ map1 {
+ trip = <&cpu2_alert1>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu3-thermal {
+ thermal-sensors = <&tsens0 4>;
+
+ trips {
+ cpu3_alert0: trip-point0 {
+ temperature = <110000>;
+ hysteresis = <10000>;
+ type = "passive";
+ };
+
+ cpu3_alert1: rip-point1 {
+ temperature = <115000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+
+ cpu3-critical {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu3_alert0>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+
+ map1 {
+ trip = <&cpu3_alert1>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ gpuss0-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens0 5>;
+
+ cooling-maps {
+ map0 {
+ trip = <&gpu0_alert0>;
+ cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+
+ trips {
+ gpu0_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <90000>;
+ hysteresis = <1000>;
+ type = "hot";
+ };
+
+ trip-point2 {
+ temperature = <115000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ gpuss1-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&tsens0 6>;
+
+ cooling-maps {
+ map0 {
+ trip = <&gpu1_alert0>;
+ cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+
+ trips {
+ gpu1_alert0: trip-point0 {
+ temperature = <85000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ trip-point1 {
+ temperature = <90000>;
+ hysteresis = <1000>;
+ type = "hot";
+ };
+
+ trip-point2 {
+ temperature = <115000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ nspss0-thermal {
+ thermal-sensors = <&tsens0 7>;
+
+ trips {
+ trip-point0 {
+ temperature = <95000>;
+ hysteresis = <5000>;
+ type = "hot";
+ };
+
+ trip-point1 {
+ temperature = <115000>;
+ hysteresis = <5000>;
+ type = "hot";
+ };
+
+ nspss1-critical {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ nspss1-thermal {
+ thermal-sensors = <&tsens0 8>;
+
+ trips {
+ trip-point0 {
+ temperature = <95000>;
+ hysteresis = <5000>;
+ type = "hot";
+ };
+
+ trip-point1 {
+ temperature = <115000>;
+ hysteresis = <5000>;
+ type = "hot";
+ };
+
+ nspss2-critical {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ nspss2-thermal {
+ thermal-sensors = <&tsens0 9>;
+
+ trips {
+ trip-point0 {
+ temperature = <95000>;
+ hysteresis = <5000>;
+ type = "hot";
+ };
+
+ trip-point1 {
+ temperature = <115000>;
+ hysteresis = <5000>;
+ type = "hot";
+ };
+
+ nspss2-critical {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ video-thermal {
+ thermal-sensors = <&tsens0 10>;
+
+ trips {
+ trip-point0 {
+ temperature = <115000>;
+ hysteresis = <5000>;
+ type = "hot";
+ };
+
+ video-critical {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ ddr-thermal {
+ thermal-sensors = <&tsens0 11>;
+
+ trips {
+ trip-point0 {
+ temperature = <115000>;
+ hysteresis = <5000>;
+ type = "hot";
+ };
+
+ ddr-critical {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ camera0-thermal {
+ thermal-sensors = <&tsens0 12>;
+
+ trips {
+ trip-point0 {
+ temperature = <115000>;
+ hysteresis = <5000>;
+ type = "hot";
+ };
+
+ camera0-critical {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ camera1-thermal {
+ thermal-sensors = <&tsens0 13>;
+
+ trips {
+ trip-point0 {
+ temperature = <115000>;
+ hysteresis = <5000>;
+ type = "hot";
+ };
+
+ camera1-critical {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ mdmss-thermal {
+ thermal-sensors = <&tsens0 14>;
+
+ trips {
+ trip-point0 {
+ temperature = <115000>;
+ hysteresis = <5000>;
+ type = "hot";
+ };
+
+ mdmss-critical {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
index ac8d4589e3fb..f7300ffbb451 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
@@ -12,11 +12,11 @@
/ {
thermal-zones {
- 5v-choke-thermal {
+ choke-5v-thermal {
thermal-sensors = <&pm6150_adc_tm 1>;
trips {
- 5v-choke-crit {
+ choke-5v-crit {
temperature = <125000>;
hysteresis = <1000>;
type = "critical";
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-quackingstick.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-quackingstick.dtsi
index 00229b1515e6..ff8996b4de4e 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-quackingstick.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-quackingstick.dtsi
@@ -78,6 +78,7 @@
pinctrl-names = "default";
pinctrl-0 = <&lcd_rst>;
avdd-supply = <&ppvar_lcd>;
+ avee-supply = <&ppvar_lcd>;
pp1800-supply = <&v1p8_disp>;
pp3300-supply = <&pp3300_dx_edp>;
backlight = <&backlight>;
diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
index 76fe314d2ad5..87c432c12a24 100644
--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
@@ -580,55 +580,55 @@
compatible = "arm,psci-1.0";
method = "smc";
- cpu_pd0: cpu0 {
+ cpu_pd0: power-domain-cpu0 {
#power-domain-cells = <0>;
power-domains = <&cluster_pd>;
domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
};
- cpu_pd1: cpu1 {
+ cpu_pd1: power-domain-cpu1 {
#power-domain-cells = <0>;
power-domains = <&cluster_pd>;
domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
};
- cpu_pd2: cpu2 {
+ cpu_pd2: power-domain-cpu2 {
#power-domain-cells = <0>;
power-domains = <&cluster_pd>;
domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
};
- cpu_pd3: cpu3 {
+ cpu_pd3: power-domain-cpu3 {
#power-domain-cells = <0>;
power-domains = <&cluster_pd>;
domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
};
- cpu_pd4: cpu4 {
+ cpu_pd4: power-domain-cpu4 {
#power-domain-cells = <0>;
power-domains = <&cluster_pd>;
domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
};
- cpu_pd5: cpu5 {
+ cpu_pd5: power-domain-cpu5 {
#power-domain-cells = <0>;
power-domains = <&cluster_pd>;
domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
};
- cpu_pd6: cpu6 {
+ cpu_pd6: power-domain-cpu6 {
#power-domain-cells = <0>;
power-domains = <&cluster_pd>;
domain-idle-states = <&big_cpu_sleep_0 &big_cpu_sleep_1>;
};
- cpu_pd7: cpu7 {
+ cpu_pd7: power-domain-cpu7 {
#power-domain-cells = <0>;
power-domains = <&cluster_pd>;
domain-idle-states = <&big_cpu_sleep_0 &big_cpu_sleep_1>;
};
- cluster_pd: cpu-cluster0 {
+ cluster_pd: power-domain-cluster {
#power-domain-cells = <0>;
domain-idle-states = <&cluster_sleep_pc
&cluster_sleep_cx_ret
@@ -3064,6 +3064,8 @@
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
snps,parkmode-disable-ss-quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
maximum-speed = "super-speed";
diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
index 55db1c83ef55..0f2caf36910b 100644
--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
@@ -83,7 +83,7 @@
sleep_clk: sleep-clk {
compatible = "fixed-clock";
- clock-frequency = <32000>;
+ clock-frequency = <32764>;
#clock-cells = <0>;
};
};
@@ -3715,6 +3715,8 @@
iommus = <&apps_smmu 0xa0 0x0>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
phys = <&usb_2_hsphy>;
phy-names = "usb2-phy";
maximum-speed = "high-speed";
@@ -4244,6 +4246,8 @@
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
snps,parkmode-disable-ss-quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
maximum-speed = "super-speed";
diff --git a/arch/arm64/boot/dts/qcom/sc8180x-lenovo-flex-5g.dts b/arch/arm64/boot/dts/qcom/sc8180x-lenovo-flex-5g.dts
index 62de4774c556..21c2d25a2945 100644
--- a/arch/arm64/boot/dts/qcom/sc8180x-lenovo-flex-5g.dts
+++ b/arch/arm64/boot/dts/qcom/sc8180x-lenovo-flex-5g.dts
@@ -681,10 +681,6 @@
status = "okay";
};
-&xo_board_clk {
- clock-frequency = <38400000>;
-};
-
/* PINCTRL */
&pmc8180c_gpios {
diff --git a/arch/arm64/boot/dts/qcom/sc8180x-primus.dts b/arch/arm64/boot/dts/qcom/sc8180x-primus.dts
index 79b4d293ea1e..7a4bd6955470 100644
--- a/arch/arm64/boot/dts/qcom/sc8180x-primus.dts
+++ b/arch/arm64/boot/dts/qcom/sc8180x-primus.dts
@@ -773,10 +773,6 @@
status = "okay";
};
-&xo_board_clk {
- clock-frequency = <38400000>;
-};
-
/* PINCTRL */
&pmc8180c_gpios {
diff --git a/arch/arm64/boot/dts/qcom/sc8180x.dtsi b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
index 745a7d0b8381..28693a3bfc7f 100644
--- a/arch/arm64/boot/dts/qcom/sc8180x.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
@@ -2762,6 +2762,8 @@
iommus = <&apps_smmu 0x60 0>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
phys = <&usb_mp_hsphy0>,
<&usb_mp_qmpphy0>,
<&usb_mp_hsphy1>,
@@ -2825,6 +2827,8 @@
iommus = <&apps_smmu 0x140 0>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
phys = <&usb_prim_hsphy>, <&usb_prim_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
@@ -2902,6 +2906,8 @@
iommus = <&apps_smmu 0x160 0>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
phys = <&usb_sec_hsphy>, <&usb_sec_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-huawei-gaokun3.dts b/arch/arm64/boot/dts/qcom/sc8280xp-huawei-gaokun3.dts
new file mode 100644
index 000000000000..09b95f89ee58
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sc8280xp-huawei-gaokun3.dts
@@ -0,0 +1,1318 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Limited
+ *
+ * Copyright (c) 2024, Tianyu Gao <gty0622@gmail.com>
+ * Copyright (c) 2024, Xuecong Chen <chenxuecong2009@outlook.com>
+ *
+ * Copyright (c) 2024, Pengyu Luo <mitltlatltl@gmail.com>
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/iio/qcom,spmi-adc7-pm8350.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/gpio-keys.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <dt-bindings/phy/phy.h>
+
+#include "sc8280xp.dtsi"
+#include "sc8280xp-pmics.dtsi"
+
+/ {
+ chassis-type = "tablet";
+ model = "Matebook E Go";
+ compatible = "huawei,gaokun3", "qcom,sc8280xp";
+
+ aliases {
+ i2c4 = &i2c4;
+ serial1 = &uart2;
+ };
+
+ chosen {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ framebuffer0: framebuffer@c6200000 {
+ compatible = "simple-framebuffer";
+ reg = <0x0 0xc6200000 0x0 0x02400000>;
+ width = <1600>;
+ height = <2560>;
+ stride = <(1600 * 4)>;
+ format = "a8r8g8b8";
+ };
+ };
+
+ wcd938x: audio-codec {
+ compatible = "qcom,wcd9380-codec";
+
+ pinctrl-0 = <&wcd_default>;
+ pinctrl-names = "default";
+
+ reset-gpios = <&tlmm 106 GPIO_ACTIVE_LOW>;
+
+ vdd-buck-supply = <&vreg_s10b>;
+ vdd-rxtx-supply = <&vreg_s10b>;
+ vdd-io-supply = <&vreg_s10b>;
+ vdd-mic-bias-supply = <&vreg_bob>;
+
+ qcom,micbias1-microvolt = <1800000>;
+ qcom,micbias2-microvolt = <1800000>;
+ qcom,micbias3-microvolt = <1800000>;
+ qcom,micbias4-microvolt = <1800000>;
+ qcom,mbhc-buttons-vthreshold-microvolt = <75000 150000 237000 500000 500000 500000 500000 500000>;
+ qcom,mbhc-headset-vthreshold-microvolt = <1700000>;
+ qcom,mbhc-headphone-vthreshold-microvolt = <50000>;
+ qcom,rx-device = <&wcd_rx>;
+ qcom,tx-device = <&wcd_tx>;
+
+ #sound-dai-cells = <1>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&mode_pin_active>, <&vol_up_n>;
+ pinctrl-names = "default";
+
+ key-vol-up {
+ label = "Volume Up";
+ gpios = <&pmc8280_1_gpios 6 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ debounce-interval = <15>;
+ linux,can-disable;
+ wakeup-source;
+ };
+
+ switch-mode {
+ label = "Tablet Mode Switch";
+ gpios = <&tlmm 26 GPIO_ACTIVE_HIGH>;
+ linux,input-type = <EV_SW>;
+ linux,code = <SW_TABLET_MODE>;
+ debounce-interval = <10>;
+ wakeup-source;
+ };
+ };
+
+ vreg_misc_3p3: regulator-misc-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VCC3B";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&pmc8280_1_gpios 1 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&misc_3p3_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vreg_nvme: regulator-nvme {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VCC3_SSD";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 135 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&nvme_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_vph_pwr: regulator-vph-pwr {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VPH_VCC3R9";
+ regulator-min-microvolt = <3900000>;
+ regulator-max-microvolt = <3900000>;
+
+ regulator-always-on;
+ };
+
+ vreg_wlan: regulator-wlan {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VCC_WLAN_3R9";
+ regulator-min-microvolt = <3900000>;
+ regulator-max-microvolt = <3900000>;
+
+ gpio = <&pmr735a_gpios 1 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&hastings_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ reserved-memory {
+ gpu_mem: gpu-mem@8bf00000 {
+ reg = <0 0x8bf00000 0 0x2000>;
+ no-map;
+ };
+
+ linux,cma {
+ compatible = "shared-dma-pool";
+ size = <0x0 0x8000000>;
+ reusable;
+ linux,cma-default;
+ };
+ };
+
+ thermal-zones {
+ skin-temp-thermal {
+ polling-delay-passive = <250>;
+
+ thermal-sensors = <&pmk8280_adc_tm 5>;
+
+ trips {
+ skin_temp_alert0: trip-point0 {
+ temperature = <55000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ skin_temp_alert1: trip-point1 {
+ temperature = <58000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ skin-temp-crit {
+ temperature = <73000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&skin_temp_alert0>;
+ cooling-device = <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+
+ map1 {
+ trip = <&skin_temp_alert1>;
+ cooling-device = <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+ };
+
+ wcn6855-pmu {
+ compatible = "qcom,wcn6855-pmu";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_default>, <&wlan_en>;
+
+ wlan-enable-gpios = <&tlmm 134 GPIO_ACTIVE_HIGH>;
+ bt-enable-gpios = <&tlmm 133 GPIO_ACTIVE_HIGH>;
+ swctrl-gpios = <&tlmm 132 GPIO_ACTIVE_HIGH>;
+
+ vddio-supply = <&vreg_s10b>;
+ vddaon-supply = <&vreg_s12b>;
+ vddpmu-supply = <&vreg_s12b>;
+ vddpmumx-supply = <&vreg_s12b>;
+ vddpmucx-supply = <&vreg_s12b>;
+ vddrfa0p95-supply = <&vreg_s12b>;
+ vddrfa1p3-supply = <&vreg_s11b>;
+ vddrfa1p9-supply = <&vreg_s1c>;
+ vddpcie1p3-supply = <&vreg_s11b>;
+ vddpcie1p9-supply = <&vreg_s1c>;
+
+ regulators {
+ vreg_pmu_rfa_cmn_0p8: ldo0 {
+ regulator-name = "vreg_pmu_rfa_cmn_0p8";
+ };
+
+ vreg_pmu_aon_0p8: ldo1 {
+ regulator-name = "vreg_pmu_aon_0p8";
+ };
+
+ vreg_pmu_wlcx_0p8: ldo2 {
+ regulator-name = "vreg_pmu_wlcx_0p8";
+ };
+
+ vreg_pmu_wlmx_0p8: ldo3 {
+ regulator-name = "vreg_pmu_wlmx_0p8";
+ };
+
+ vreg_pmu_btcmx_0p8: ldo4 {
+ regulator-name = "vreg_pmu_btcmx_0p8";
+ };
+
+ vreg_pmu_pcie_1p8: ldo5 {
+ regulator-name = "vreg_pmu_pcie_1p8";
+ };
+
+ vreg_pmu_pcie_0p9: ldo6 {
+ regulator-name = "vreg_pmu_pcie_0p9";
+ };
+
+ vreg_pmu_rfa_0p8: ldo7 {
+ regulator-name = "vreg_pmu_rfa_0p8";
+ };
+
+ vreg_pmu_rfa_1p2: ldo8 {
+ regulator-name = "vreg_pmu_rfa_1p2";
+ };
+
+ vreg_pmu_rfa_1p7: ldo9 {
+ regulator-name = "vreg_pmu_rfa_1p7";
+ };
+ };
+ };
+};
+
+&apps_rsc {
+ regulators-0 {
+ compatible = "qcom,pm8350-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ vdd-l1-l4-supply = <&vreg_s12b>;
+ vdd-l2-l7-supply = <&vreg_bob>;
+ vdd-l3-l5-supply = <&vreg_s11b>;
+ vdd-l6-l9-l10-supply = <&vreg_s12b>;
+ vdd-l8-supply = <&vreg_s12b>;
+
+ vreg_s10b: smps10 {
+ regulator-name = "vreg_s10b";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
+ };
+
+ vreg_s11b: smps11 {
+ regulator-name = "vreg_s11b";
+ regulator-min-microvolt = <1272000>;
+ regulator-max-microvolt = <1272000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s12b: smps12 {
+ regulator-name = "vreg_s12b";
+ regulator-min-microvolt = <984000>;
+ regulator-max-microvolt = <984000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1b: ldo1 {
+ regulator-name = "vreg_l1b";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2b: ldo2 {
+ regulator-name = "vreg_l2b";
+ regulator-min-microvolt = <1904000>;
+ regulator-max-microvolt = <1904000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3b: ldo3 {
+ regulator-name = "vreg_l3b";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-boot-on;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4b: ldo4 {
+ regulator-name = "vreg_l4b";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5b: ldo5 {
+ regulator-name = "vreg_l5b";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6b: ldo6 {
+ regulator-name = "vreg_l6b";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-boot-on;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7b: ldo7 {
+ regulator-name = "vreg_l7b";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-boot-on;
+ };
+
+ vreg_l9b: ldo9 {
+ regulator-name = "vreg_l9b";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-1 {
+ compatible = "qcom,pm8350c-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vdd-bob-supply = <&vreg_vph_pwr>;
+ vdd-l1-l12-supply = <&vreg_s1c>;
+ vdd-l2-l8-supply = <&vreg_s1c>;
+ vdd-l3-l4-l5-l7-l13-supply = <&vreg_bob>;
+ vdd-l6-l9-l11-supply = <&vreg_bob>;
+ vdd-l10-supply = <&vreg_s11b>;
+
+ vreg_s1c: smps1 {
+ regulator-name = "vreg_s1c";
+ regulator-min-microvolt = <1880000>;
+ regulator-max-microvolt = <1900000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
+ };
+
+ vreg_l1c: ldo1 {
+ regulator-name = "vreg_l1c";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2c: ldo2 {
+ regulator-name = "vreg_l2c";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8c: ldo8 {
+ regulator-name = "vreg_l8c";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l12c: ldo12 {
+ regulator-name = "vreg_l12c";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l13c: ldo13 {
+ regulator-name = "vreg_l13c";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_bob: bob {
+ regulator-name = "vreg_bob";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>;
+ regulator-always-on;
+ };
+ };
+
+ regulators-2 {
+ compatible = "qcom,pm8350-rpmh-regulators";
+ qcom,pmic-id = "d";
+
+ vdd-l1-l4-supply = <&vreg_s11b>;
+ vdd-l2-l7-supply = <&vreg_bob>;
+ vdd-l3-l5-supply = <&vreg_s11b>;
+ vdd-l6-l9-l10-supply = <&vreg_s12b>;
+ vdd-l8-supply = <&vreg_s12b>;
+
+ vreg_l2d: ldo2 {
+ regulator-name = "vreg_l2d";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3d: ldo3 {
+ regulator-name = "vreg_l3d";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4d: ldo4 {
+ regulator-name = "vreg_l4d";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6d: ldo6 {
+ regulator-name = "vreg_l6d";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7d: ldo7 {
+ regulator-name = "vreg_l7d";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8d: ldo8 {
+ regulator-name = "vreg_l8d";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9d: ldo9 {
+ regulator-name = "vreg_l9d";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l10d: ldo10 {
+ regulator-name = "vreg_l10d";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+};
+
+&dispcc0 {
+ status = "okay";
+};
+
+&gpu {
+ status = "okay";
+
+ zap-shader {
+ memory-region = <&gpu_mem>;
+ firmware-name = "qcom/sc8280xp/HUAWEI/gaokun3/qcdxkmsuc8280.mbn";
+ };
+};
+
+&i2c4 {
+ clock-frequency = <400000>;
+
+ pinctrl-0 = <&i2c4_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+
+ touchscreen@4f {
+ compatible = "hid-over-i2c";
+ reg = <0x4f>;
+
+ hid-descr-addr = <0x1>;
+ interrupts-extended = <&tlmm 175 IRQ_TYPE_LEVEL_LOW>;
+
+ vdd-supply = <&vreg_misc_3p3>;
+ vddl-supply = <&vreg_s10b>;
+
+ pinctrl-0 = <&ts0_default>;
+ pinctrl-names = "default";
+ };
+
+};
+
+&mdss0 {
+ status = "okay";
+};
+
+&mdss0_dp0 {
+ status = "okay";
+};
+
+&mdss0_dp0_out {
+ data-lanes = <0 1>;
+ remote-endpoint = <&usb_0_qmpphy_dp_in>;
+};
+
+&mdss0_dp1 {
+ status = "okay";
+};
+
+&mdss0_dp1_out {
+ data-lanes = <0 1>;
+ remote-endpoint = <&usb_1_qmpphy_dp_in>;
+};
+
+&pcie2a {
+ perst-gpios = <&tlmm 143 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 145 GPIO_ACTIVE_LOW>;
+
+ vddpe-3v3-supply = <&vreg_nvme>;
+
+ pinctrl-0 = <&pcie2a_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&pcie2a_phy {
+ vdda-phy-supply = <&vreg_l6d>;
+ vdda-pll-supply = <&vreg_l4d>;
+
+ status = "okay";
+};
+
+&pcie4 {
+ max-link-speed = <2>;
+
+ perst-gpios = <&tlmm 141 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 139 GPIO_ACTIVE_LOW>;
+
+ vddpe-3v3-supply = <&vreg_wlan>;
+
+ pinctrl-0 = <&pcie4_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&pcie4_port0 {
+ wifi@0 {
+ compatible = "pci17cb,1103";
+ reg = <0x10000 0x0 0x0 0x0 0x0>;
+
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn_0p8>;
+ vddaon-supply = <&vreg_pmu_aon_0p8>;
+ vddwlcx-supply = <&vreg_pmu_wlcx_0p8>;
+ vddwlmx-supply = <&vreg_pmu_wlmx_0p8>;
+ vddpcie1p8-supply = <&vreg_pmu_pcie_1p8>;
+ vddpcie0p9-supply = <&vreg_pmu_pcie_0p9>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ vddrfa1p8-supply = <&vreg_pmu_rfa_1p7>;
+ };
+};
+
+&pcie4_phy {
+ vdda-phy-supply = <&vreg_l6d>;
+ vdda-pll-supply = <&vreg_l4d>;
+
+ status = "okay";
+};
+
+&pmk8280_adc_tm {
+ status = "okay";
+
+ sys-therm@0 {
+ reg = <0>;
+ io-channels = <&pmk8280_vadc PM8350_ADC7_AMUX_THM1_100K_PU(1)>;
+ qcom,hw-settle-time-us = <200>;
+ qcom,avg-samples = <2>;
+ qcom,ratiometric;
+ };
+
+ sys-therm@1 {
+ reg = <1>;
+ io-channels = <&pmk8280_vadc PM8350_ADC7_AMUX_THM2_100K_PU(1)>;
+ qcom,hw-settle-time-us = <200>;
+ qcom,avg-samples = <2>;
+ qcom,ratiometric;
+ };
+
+ sys-therm@2 {
+ reg = <2>;
+ io-channels = <&pmk8280_vadc PM8350_ADC7_AMUX_THM3_100K_PU(1)>;
+ qcom,hw-settle-time-us = <200>;
+ qcom,avg-samples = <2>;
+ qcom,ratiometric;
+ };
+
+ sys-therm@3 {
+ reg = <3>;
+ io-channels = <&pmk8280_vadc PM8350_ADC7_AMUX_THM4_100K_PU(1)>;
+ qcom,hw-settle-time-us = <200>;
+ qcom,avg-samples = <2>;
+ qcom,ratiometric;
+ };
+
+ sys-therm@4 {
+ reg = <4>;
+ io-channels = <&pmk8280_vadc PM8350_ADC7_AMUX_THM1_100K_PU(3)>;
+ qcom,hw-settle-time-us = <200>;
+ qcom,avg-samples = <2>;
+ qcom,ratiometric;
+ };
+
+ sys-therm@5 {
+ reg = <5>;
+ io-channels = <&pmk8280_vadc PM8350_ADC7_AMUX_THM2_100K_PU(3)>;
+ qcom,hw-settle-time-us = <200>;
+ qcom,avg-samples = <2>;
+ qcom,ratiometric;
+ };
+
+ sys-therm@6 {
+ reg = <6>;
+ io-channels = <&pmk8280_vadc PM8350_ADC7_AMUX_THM3_100K_PU(3)>;
+ qcom,hw-settle-time-us = <200>;
+ qcom,avg-samples = <2>;
+ qcom,ratiometric;
+ };
+
+ sys-therm@7 {
+ reg = <7>;
+ io-channels = <&pmk8280_vadc PM8350_ADC7_AMUX_THM4_100K_PU(3)>;
+ qcom,hw-settle-time-us = <200>;
+ qcom,avg-samples = <2>;
+ qcom,ratiometric;
+ };
+};
+
+&pmk8280_pon_pwrkey {
+ status = "okay";
+};
+
+&pmk8280_pon_resin {
+ status = "okay";
+ linux,code = <KEY_VOLUMEDOWN>;
+};
+
+&pmk8280_rtc {
+ nvmem-cells = <&rtc_offset>;
+ nvmem-cell-names = "offset";
+
+ status = "okay";
+};
+
+&pmk8280_sdam_6 {
+ status = "okay";
+
+ rtc_offset: rtc-offset@bc {
+ reg = <0xbc 0x4>;
+ };
+};
+
+&pmk8280_vadc {
+ channel@144 {
+ reg = <PM8350_ADC7_AMUX_THM1_100K_PU(1)>;
+ qcom,hw-settle-time = <200>;
+ qcom,ratiometric;
+ label = "sys_therm1";
+ };
+
+ channel@145 {
+ reg = <PM8350_ADC7_AMUX_THM2_100K_PU(1)>;
+ qcom,hw-settle-time = <200>;
+ qcom,ratiometric;
+ label = "sys_therm2";
+ };
+
+ channel@146 {
+ reg = <PM8350_ADC7_AMUX_THM3_100K_PU(1)>;
+ qcom,hw-settle-time = <200>;
+ qcom,ratiometric;
+ label = "sys_therm3";
+ };
+
+ channel@147 {
+ reg = <PM8350_ADC7_AMUX_THM4_100K_PU(1)>;
+ qcom,hw-settle-time = <200>;
+ qcom,ratiometric;
+ label = "sys_therm4";
+ };
+
+ channel@344 {
+ reg = <PM8350_ADC7_AMUX_THM1_100K_PU(3)>;
+ qcom,hw-settle-time = <200>;
+ qcom,ratiometric;
+ label = "sys_therm5";
+ };
+
+ channel@345 {
+ reg = <PM8350_ADC7_AMUX_THM2_100K_PU(3)>;
+ qcom,hw-settle-time = <200>;
+ qcom,ratiometric;
+ label = "sys_therm6";
+ };
+
+ channel@346 {
+ reg = <PM8350_ADC7_AMUX_THM3_100K_PU(3)>;
+ qcom,hw-settle-time = <200>;
+ qcom,ratiometric;
+ label = "sys_therm7";
+ };
+
+ channel@347 {
+ reg = <PM8350_ADC7_AMUX_THM4_100K_PU(3)>;
+ qcom,hw-settle-time = <200>;
+ qcom,ratiometric;
+ label = "sys_therm8";
+ };
+};
+
+&qup0 {
+ status = "okay";
+};
+
+&qup1 {
+ status = "okay";
+};
+
+&qup2 {
+ status = "okay";
+};
+
+&remoteproc_adsp {
+ firmware-name = "qcom/sc8280xp/HUAWEI/gaokun3/qcadsp8280.mbn";
+
+ status = "okay";
+};
+
+&remoteproc_nsp0 {
+ firmware-name = "qcom/sc8280xp/HUAWEI/gaokun3/qccdsp8280.mbn";
+
+ status = "okay";
+};
+
+&rxmacro {
+ status = "okay";
+};
+
+&sound {
+ compatible = "qcom,sc8280xp-sndcard";
+ model = "SC8280XP-HUAWEI-MATEBOOKEGO";
+ audio-routing = "SpkrLeft IN", "WSA_SPK1 OUT",
+ "SpkrRight IN", "WSA_SPK2 OUT",
+ "IN1_HPHL", "HPHL_OUT",
+ "IN2_HPHR", "HPHR_OUT",
+ "AMIC2", "MIC BIAS2",
+ "VA DMIC0", "MIC BIAS1",
+ "VA DMIC1", "MIC BIAS1",
+ "VA DMIC2", "MIC BIAS3",
+ "VA DMIC0", "VA MIC BIAS1",
+ "VA DMIC1", "VA MIC BIAS1",
+ "VA DMIC2", "VA MIC BIAS3",
+ "TX SWR_ADC1", "ADC2_OUTPUT";
+
+ wcd-playback-dai-link {
+ link-name = "WCD Playback";
+
+ cpu {
+ sound-dai = <&q6apmbedai RX_CODEC_DMA_RX_0>;
+ };
+
+ codec {
+ sound-dai = <&wcd938x 0>, <&swr1 0>, <&rxmacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ wcd-capture-dai-link {
+ link-name = "WCD Capture";
+
+ cpu {
+ sound-dai = <&q6apmbedai TX_CODEC_DMA_TX_3>;
+ };
+
+ codec {
+ sound-dai = <&wcd938x 1>, <&swr2 0>, <&txmacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ wsa-dai-link {
+ link-name = "WSA Playback";
+
+ cpu {
+ sound-dai = <&q6apmbedai WSA_CODEC_DMA_RX_0>;
+ };
+
+ codec {
+ sound-dai = <&left_spkr>, <&right_spkr>, <&swr0 0>, <&wsamacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ va-dai-link {
+ link-name = "VA Capture";
+
+ cpu {
+ sound-dai = <&q6apmbedai VA_CODEC_DMA_TX_0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+
+ codec {
+ sound-dai = <&vamacro 0>;
+ };
+ };
+};
+
+&swr0 {
+ status = "okay";
+
+ left_spkr: wsa8830-left@0,1 {
+ compatible = "sdw10217020200";
+ reg = <0 1>;
+ pinctrl-0 = <&spkr_1_sd_n_default>;
+ pinctrl-names = "default";
+ powerdown-gpios = <&tlmm 178 GPIO_ACTIVE_LOW>;
+ #thermal-sensor-cells = <0>;
+ sound-name-prefix = "SpkrLeft";
+ #sound-dai-cells = <0>;
+ vdd-supply = <&vreg_s10b>;
+ };
+
+ right_spkr: wsa8830-right@0,2 {
+ compatible = "sdw10217020200";
+ reg = <0 2>;
+ pinctrl-0 = <&spkr_2_sd_n_default>;
+ pinctrl-names = "default";
+ powerdown-gpios = <&tlmm 179 GPIO_ACTIVE_LOW>;
+ #thermal-sensor-cells = <0>;
+ sound-name-prefix = "SpkrRight";
+ #sound-dai-cells = <0>;
+ vdd-supply = <&vreg_s10b>;
+ };
+};
+
+&swr1 {
+ status = "okay";
+
+ wcd_rx: wcd9380-rx@0,4 {
+ compatible = "sdw20217010d00";
+ reg = <0 4>;
+ qcom,rx-port-mapping = <1 2 3 4 5>;
+ };
+};
+
+&swr2 {
+ status = "okay";
+
+ wcd_tx: wcd9380-tx@0,3 {
+ compatible = "sdw20217010d00";
+ reg = <0 3>;
+ qcom,tx-port-mapping = <1 1 2 3>;
+ };
+};
+
+&txmacro {
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-0 = <&uart2_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+
+ bluetooth {
+ compatible = "qcom,wcn6855-bt";
+
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn_0p8>;
+ vddaon-supply = <&vreg_pmu_aon_0p8>;
+ vddwlcx-supply = <&vreg_pmu_wlcx_0p8>;
+ vddwlmx-supply = <&vreg_pmu_wlmx_0p8>;
+ vddbtcmx-supply = <&vreg_pmu_btcmx_0p8>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ vddrfa1p8-supply = <&vreg_pmu_rfa_1p7>;
+
+ max-speed = <3200000>;
+ };
+};
+
+&usb_0 {
+ status = "okay";
+};
+
+&usb_0_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_0_hsphy {
+ vdda-pll-supply = <&vreg_l9d>;
+ vdda18-supply = <&vreg_l1c>;
+ vdda33-supply = <&vreg_l7d>;
+
+ status = "okay";
+};
+
+&usb_0_qmpphy {
+ vdda-phy-supply = <&vreg_l9d>;
+ vdda-pll-supply = <&vreg_l4d>;
+
+ orientation-switch;
+
+ status = "okay";
+};
+
+&usb_0_qmpphy_dp_in {
+ remote-endpoint = <&mdss0_dp0_out>;
+};
+
+&usb_1 {
+ status = "okay";
+};
+
+&usb_1_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_1_hsphy {
+ vdda-pll-supply = <&vreg_l4b>;
+ vdda18-supply = <&vreg_l1c>;
+ vdda33-supply = <&vreg_l13c>;
+
+ status = "okay";
+};
+
+&usb_1_qmpphy {
+ vdda-phy-supply = <&vreg_l4b>;
+ vdda-pll-supply = <&vreg_l3b>;
+
+ orientation-switch;
+
+ status = "okay";
+};
+
+&usb_1_qmpphy_dp_in {
+ remote-endpoint = <&mdss0_dp1_out>;
+};
+
+&usb_2 {
+ status = "okay";
+};
+
+&usb_2_hsphy0 {
+ vdda-pll-supply = <&vreg_l1b>;
+ vdda18-supply = <&vreg_l1c>;
+ vdda33-supply = <&vreg_l7d>;
+
+ status = "okay";
+};
+
+&usb_2_hsphy1 {
+ vdda-pll-supply = <&vreg_l8d>;
+ vdda18-supply = <&vreg_l1c>;
+ vdda33-supply = <&vreg_l7d>;
+
+ status = "okay";
+};
+
+&usb_2_hsphy2 {
+ vdda-pll-supply = <&vreg_l10d>;
+ vdda18-supply = <&vreg_l8c>;
+ vdda33-supply = <&vreg_l2d>;
+
+ status = "okay";
+};
+
+&usb_2_hsphy3 {
+ vdda-pll-supply = <&vreg_l10d>;
+ vdda18-supply = <&vreg_l8c>;
+ vdda33-supply = <&vreg_l2d>;
+
+ status = "okay";
+};
+
+&usb_2_qmpphy0 {
+ vdda-phy-supply = <&vreg_l1b>;
+ vdda-pll-supply = <&vreg_l4d>;
+
+ status = "okay";
+};
+
+&usb_2_qmpphy1 {
+ vdda-phy-supply = <&vreg_l8d>;
+ vdda-pll-supply = <&vreg_l4d>;
+
+ status = "okay";
+};
+
+&vamacro {
+ pinctrl-0 = <&dmic01_default>, <&dmic23_default>;
+ pinctrl-names = "default";
+
+ vdd-micb-supply = <&vreg_s10b>;
+
+ qcom,dmic-sample-rate = <4800000>;
+
+ status = "okay";
+};
+
+&wsamacro {
+ status = "okay";
+};
+
+&xo_board_clk {
+ clock-frequency = <38400000>;
+};
+
+/* PINCTRL */
+
+&lpass_tlmm {
+ status = "okay";
+};
+
+&pmc8280_1_gpios {
+ misc_3p3_reg_en: misc-3p3-reg-en-state {
+ pins = "gpio1";
+ function = "normal";
+ };
+
+ vol_up_n: vol-up-n-state {
+ pins = "gpio6";
+ function = "normal";
+ power-source = <1>;
+ input-enable;
+ bias-pull-up;
+ };
+};
+
+&pmr735a_gpios {
+ hastings_reg_en: hastings-reg-en-state {
+ pins = "gpio1";
+ function = "normal";
+ };
+};
+
+&tlmm {
+
+ gpio-reserved-ranges = <70 2>, <74 6>, <83 4>, <125 2>, <128 2>, <154 4>;
+
+ bt_default: bt-default-state {
+ hstp-bt-en-pins {
+ pins = "gpio133";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ hstp-sw-ctrl-pins {
+ pins = "gpio132";
+ function = "gpio";
+ bias-pull-down;
+ };
+ };
+
+ i2c4_default: i2c4-default-state {
+ pins = "gpio171", "gpio172";
+ function = "qup4";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ mode_pin_active: mode-pin-state {
+ pins = "gpio26";
+ function = "gpio";
+ bias-disable;
+ };
+
+ nvme_reg_en: nvme-reg-en-state {
+ pins = "gpio135";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ pcie2a_default: pcie2a-default-state {
+ clkreq-n-pins {
+ pins = "gpio142";
+ function = "pcie2a_clkreq";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio143";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake-n-pins {
+ pins = "gpio145";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie4_default: pcie4-default-state {
+ clkreq-n-pins {
+ pins = "gpio140";
+ function = "pcie4_clkreq";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio141";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake-n-pins {
+ pins = "gpio139";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ spkr_1_sd_n_default: spkr-1-sd-n-default-state {
+ perst-n-pins {
+ pins = "gpio178";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-high;
+ };
+ };
+
+ spkr_2_sd_n_default: spkr-2-sd-n-default-state {
+ perst-n-pins {
+ pins = "gpio179";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-high;
+ };
+ };
+
+ ts0_default: ts0-default-state {
+ int-n-pins {
+ pins = "gpio175";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ reset-n-pins {
+ pins = "gpio99";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+ };
+
+ uart2_default: uart2-default-state {
+ cts-pins {
+ pins = "gpio121";
+ function = "qup2";
+ bias-bus-hold;
+ };
+
+ rts-pins {
+ pins = "gpio122";
+ function = "qup2";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rx-pins {
+ pins = "gpio124";
+ function = "qup2";
+ bias-pull-up;
+ };
+
+ tx-pins {
+ pins = "gpio123";
+ function = "qup2";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ wcd_default: wcd-default-state {
+ reset-pins {
+ pins = "gpio106";
+ function = "gpio";
+ bias-disable;
+ };
+ };
+
+ wlan_en: wlan-en-state {
+ pins = "gpio134";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-down;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-microsoft-blackrock.dts b/arch/arm64/boot/dts/qcom/sc8280xp-microsoft-blackrock.dts
new file mode 100644
index 000000000000..fa9d94105052
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sc8280xp-microsoft-blackrock.dts
@@ -0,0 +1,1325 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Limited
+ * Copyright (c) 2023, Merck Hung <merckhung@gmail.com>
+ * Copyright (c) 2023, 2024 Jens Glathe <jens.glathe@oldschoolsolutions.biz>
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/iio/qcom,spmi-adc7-pm8350.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/gpio-keys.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+
+#include "sc8280xp.dtsi"
+#include "sc8280xp-pmics.dtsi"
+
+/ {
+ model = "Windows Dev Kit 2023";
+ compatible = "microsoft,blackrock", "qcom,sc8280xp";
+ chassis-type = "desktop";
+
+ aliases {
+ i2c4 = &i2c4;
+ i2c21 = &i2c21;
+ serial1 = &uart2;
+ };
+
+ wcd938x: audio-codec {
+ compatible = "qcom,wcd9380-codec";
+
+ pinctrl-0 = <&wcd_default>;
+ pinctrl-names = "default";
+
+ reset-gpios = <&tlmm 106 GPIO_ACTIVE_LOW>;
+
+ vdd-buck-supply = <&vreg_s10b>;
+ vdd-rxtx-supply = <&vreg_s10b>;
+ vdd-io-supply = <&vreg_s10b>;
+ vdd-mic-bias-supply = <&vreg_bob>;
+
+ qcom,micbias1-microvolt = <1800000>;
+ qcom,micbias2-microvolt = <1800000>;
+ qcom,micbias3-microvolt = <1800000>;
+ qcom,micbias4-microvolt = <1800000>;
+ qcom,mbhc-buttons-vthreshold-microvolt = <75000 150000 237000 500000 500000 500000 500000 500000>;
+ qcom,mbhc-headset-vthreshold-microvolt = <1700000>;
+ qcom,mbhc-headphone-vthreshold-microvolt = <50000>;
+ qcom,rx-device = <&wcd_rx>;
+ qcom,tx-device = <&wcd_tx>;
+
+ #sound-dai-cells = <1>;
+ };
+
+ dp3_connector: connector {
+ compatible = "dp-connector";
+ label = "DP-3";
+ type = "mini";
+
+ dp-pwr-supply = <&vreg_misc_3p3>;
+
+ port {
+ dp1_connector_in: endpoint {
+ remote-endpoint = <&mdss0_dp2_phy_out>;
+ };
+ };
+ };
+
+ pmic-glink {
+ compatible = "qcom,sc8280xp-pmic-glink", "qcom,pmic-glink";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ orientation-gpios = <&tlmm 166 GPIO_ACTIVE_HIGH>,
+ <&tlmm 49 GPIO_ACTIVE_HIGH>;
+
+ /* Left-side rear port */
+ connector@0 {
+ compatible = "usb-c-connector";
+ reg = <0>;
+ power-role = "source";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_con0_hs: endpoint {
+ remote-endpoint = <&usb_0_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_con0_ss: endpoint {
+ remote-endpoint = <&usb_0_qmpphy_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_con0_sbu: endpoint {
+ remote-endpoint = <&usb0_sbu_mux>;
+ };
+ };
+ };
+ };
+
+ /* Left-side front port */
+ connector@1 {
+ compatible = "usb-c-connector";
+ reg = <1>;
+ power-role = "source";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_con1_hs: endpoint {
+ remote-endpoint = <&usb_1_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_con1_ss: endpoint {
+ remote-endpoint = <&usb_1_qmpphy_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_con1_sbu: endpoint {
+ remote-endpoint = <&usb1_sbu_mux>;
+ };
+ };
+ };
+ };
+ };
+
+ vreg_misc_3p3: regulator-misc-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VCC3B";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&pmc8280_1_gpios 1 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&misc_3p3_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vreg_nvme: regulator-nvme {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VCC3_SSD";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 135 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&nvme_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_vph_pwr: regulator-vph-pwr {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VPH_VCC3R9";
+ regulator-min-microvolt = <3900000>;
+ regulator-max-microvolt = <3900000>;
+
+ regulator-always-on;
+ };
+
+ vreg_wlan: regulator-wlan {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VCC_WLAN_3R9";
+ regulator-min-microvolt = <3900000>;
+ regulator-max-microvolt = <3900000>;
+
+ gpio = <&pmr735a_gpios 1 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&hastings_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_wwan: regulator-wwan {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VCC3B_WAN";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&pmc8280_2_gpios 1 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&wwan_sw_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ reserved-memory {
+ gpu_mem: gpu-mem@8bf00000 {
+ reg = <0 0x8bf00000 0 0x2000>;
+ no-map;
+ };
+
+ linux,cma {
+ compatible = "shared-dma-pool";
+ size = <0x0 0x8000000>;
+ reusable;
+ linux,cma-default;
+ };
+ };
+
+ usb0-sbu-mux {
+ compatible = "pericom,pi3usb102", "gpio-sbu-mux";
+
+ enable-gpios = <&tlmm 101 GPIO_ACTIVE_LOW>;
+ select-gpios = <&tlmm 164 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-0 = <&usb0_sbu_default>;
+ pinctrl-names = "default";
+
+ mode-switch;
+ orientation-switch;
+
+ port {
+ usb0_sbu_mux: endpoint {
+ remote-endpoint = <&pmic_glink_con0_sbu>;
+ };
+ };
+ };
+
+ usb1-sbu-mux {
+ compatible = "pericom,pi3usb102", "gpio-sbu-mux";
+
+ enable-gpios = <&tlmm 48 GPIO_ACTIVE_LOW>;
+ select-gpios = <&tlmm 47 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-0 = <&usb1_sbu_default>;
+ pinctrl-names = "default";
+
+ mode-switch;
+ orientation-switch;
+
+ port {
+ usb1_sbu_mux: endpoint {
+ remote-endpoint = <&pmic_glink_con1_sbu>;
+ };
+ };
+ };
+
+ wcn6855-pmu {
+ compatible = "qcom,wcn6855-pmu";
+
+ pinctrl-0 = <&bt_default>, <&wlan_en>;
+ pinctrl-names = "default";
+
+ wlan-enable-gpios = <&tlmm 134 GPIO_ACTIVE_HIGH>;
+ bt-enable-gpios = <&tlmm 133 GPIO_ACTIVE_HIGH>;
+ swctrl-gpios = <&tlmm 132 GPIO_ACTIVE_HIGH>;
+
+ vddio-supply = <&vreg_s10b>;
+ vddaon-supply = <&vreg_s12b>;
+ vddpmu-supply = <&vreg_s12b>;
+ vddpmumx-supply = <&vreg_s12b>;
+ vddpmucx-supply = <&vreg_s12b>;
+ vddrfa0p95-supply = <&vreg_s12b>;
+ vddrfa1p3-supply = <&vreg_s11b>;
+ vddrfa1p9-supply = <&vreg_s1c>;
+ vddpcie1p3-supply = <&vreg_s11b>;
+ vddpcie1p9-supply = <&vreg_s1c>;
+
+ regulators {
+ vreg_pmu_rfa_cmn_0p8: ldo0 {
+ regulator-name = "vreg_pmu_rfa_cmn_0p8";
+ };
+
+ vreg_pmu_aon_0p8: ldo1 {
+ regulator-name = "vreg_pmu_aon_0p8";
+ };
+
+ vreg_pmu_wlcx_0p8: ldo2 {
+ regulator-name = "vreg_pmu_wlcx_0p8";
+ };
+
+ vreg_pmu_wlmx_0p8: ldo3 {
+ regulator-name = "vreg_pmu_wlmx_0p8";
+ };
+
+ vreg_pmu_btcmx_0p8: ldo4 {
+ regulator-name = "vreg_pmu_btcmx_0p8";
+ };
+
+ vreg_pmu_pcie_1p8: ldo5 {
+ regulator-name = "vreg_pmu_pcie_1p8";
+ };
+
+ vreg_pmu_pcie_0p9: ldo6 {
+ regulator-name = "vreg_pmu_pcie_0p9";
+ };
+
+ vreg_pmu_rfa_0p8: ldo7 {
+ regulator-name = "vreg_pmu_rfa_0p8";
+ };
+
+ vreg_pmu_rfa_1p2: ldo8 {
+ regulator-name = "vreg_pmu_rfa_1p2";
+ };
+
+ vreg_pmu_rfa_1p7: ldo9 {
+ regulator-name = "vreg_pmu_rfa_1p7";
+ };
+ };
+ };
+};
+
+&apps_rsc {
+ regulators-0 {
+ compatible = "qcom,pm8350-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ vdd-s10-supply = <&vreg_vph_pwr>;
+ vdd-s11-supply = <&vreg_vph_pwr>;
+ vdd-s12-supply = <&vreg_vph_pwr>;
+ vdd-l1-l4-supply = <&vreg_s12b>;
+ vdd-l2-l7-supply = <&vreg_bob>;
+ vdd-l3-l5-supply = <&vreg_s11b>;
+ vdd-l6-l9-l10-supply = <&vreg_s12b>;
+ vdd-l8-supply = <&vreg_s12b>;
+
+ vreg_s10b: smps10 {
+ regulator-name = "vreg_s10b";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
+ };
+
+ vreg_s11b: smps11 {
+ regulator-name = "vreg_s11b";
+ regulator-min-microvolt = <1272000>;
+ regulator-max-microvolt = <1272000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s12b: smps12 {
+ regulator-name = "vreg_s12b";
+ regulator-min-microvolt = <984000>;
+ regulator-max-microvolt = <984000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1b: ldo1 {
+ regulator-name = "vreg_l1b";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3b: ldo3 {
+ regulator-name = "vreg_l3b";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-boot-on;
+ };
+
+ vreg_l4b: ldo4 {
+ regulator-name = "vreg_l4b";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6b: ldo6 {
+ regulator-name = "vreg_l6b";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-boot-on;
+ };
+ };
+
+ regulators-1 {
+ compatible = "qcom,pm8350c-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vdd-bob-supply = <&vreg_vph_pwr>;
+ vdd-l1-l12-supply = <&vreg_s1c>;
+ vdd-l2-l8-supply = <&vreg_s1c>;
+ vdd-l3-l4-l5-l7-l13-supply = <&vreg_bob>;
+ vdd-l6-l9-l11-supply = <&vreg_bob>;
+ vdd-l10-supply = <&vreg_s11b>;
+
+ vreg_s1c: smps1 {
+ regulator-name = "vreg_s1c";
+ regulator-min-microvolt = <1880000>;
+ regulator-max-microvolt = <1900000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
+ };
+
+ vreg_l1c: ldo1 {
+ regulator-name = "vreg_l1c";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6c: ldo6 {
+ regulator-name = "vreg_l6c";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7c: ldo7 {
+ regulator-name = "vreg_l7c";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <2504000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8c: ldo8 {
+ regulator-name = "vreg_l8c";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9c: ldo9 {
+ regulator-name = "vreg_l9c";
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l12c: ldo12 {
+ regulator-name = "vreg_l12c";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l13c: ldo13 {
+ regulator-name = "vreg_l13c";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_bob: bob {
+ regulator-name = "vreg_bob";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>;
+ regulator-always-on;
+ };
+ };
+
+ regulators-2 {
+ compatible = "qcom,pm8350-rpmh-regulators";
+ qcom,pmic-id = "d";
+
+ vdd-l1-l4-supply = <&vreg_s11b>;
+ vdd-l2-l7-supply = <&vreg_bob>;
+ vdd-l3-l5-supply = <&vreg_s11b>;
+ vdd-l6-l9-l10-supply = <&vreg_s12b>;
+ vdd-l8-supply = <&vreg_s12b>;
+
+ vreg_l2d: ldo2 {
+ regulator-name = "vreg_l2d";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3d: ldo3 {
+ regulator-name = "vreg_l3d";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4d: ldo4 {
+ regulator-name = "vreg_l4d";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6d: ldo6 {
+ regulator-name = "vreg_l6d";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7d: ldo7 {
+ regulator-name = "vreg_l7d";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8d: ldo8 {
+ regulator-name = "vreg_l8d";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9d: ldo9 {
+ regulator-name = "vreg_l9d";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l10d: ldo10 {
+ regulator-name = "vreg_l10d";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+};
+
+&dispcc0 {
+ status = "okay";
+};
+
+&gpu {
+ status = "okay";
+
+ zap-shader {
+ memory-region = <&gpu_mem>;
+ firmware-name = "qcom/sc8280xp/microsoft/blackrock/qcdxkmsuc8280.mbn";
+ };
+};
+
+&mdss0 {
+ status = "okay";
+};
+
+&mdss0_dp0 {
+ status = "okay";
+};
+
+&mdss0_dp0_out {
+ data-lanes = <0 1>;
+ remote-endpoint = <&usb_0_qmpphy_dp_in>;
+};
+
+&mdss0_dp1 {
+ status = "okay";
+};
+
+&mdss0_dp1_out {
+ data-lanes = <0 1>;
+ remote-endpoint = <&usb_1_qmpphy_dp_in>;
+};
+
+&mdss0_dp2 {
+ compatible = "qcom,sc8280xp-dp";
+
+ data-lanes = <0 1 2 3>;
+
+ status = "okay";
+
+ ports {
+ port@1 {
+ reg = <1>;
+ mdss0_dp2_phy_out: endpoint {
+ remote-endpoint = <&dp1_connector_in>;
+ };
+ };
+ };
+};
+
+&mdss0_dp2_phy {
+ compatible = "qcom,sc8280xp-dp-phy";
+
+ vdda-phy-supply = <&vreg_l3b>;
+ vdda-pll-supply = <&vreg_l6b>;
+
+ status = "okay";
+};
+
+&pcie2a {
+ perst-gpios = <&tlmm 143 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 145 GPIO_ACTIVE_LOW>;
+
+ vddpe-3v3-supply = <&vreg_nvme>;
+
+ pinctrl-0 = <&pcie2a_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&pcie2a_phy {
+ vdda-phy-supply = <&vreg_l4d>;
+ vdda-pll-supply = <&vreg_l6d>;
+
+ status = "okay";
+};
+
+&pcie4 {
+ max-link-speed = <2>;
+
+ perst-gpios = <&tlmm 141 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 139 GPIO_ACTIVE_LOW>;
+
+ vddpe-3v3-supply = <&vreg_wlan>;
+
+ pinctrl-0 = <&pcie4_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&pcie4_port0 {
+ wifi@0 {
+ compatible = "pci17cb,1103";
+ reg = <0x10000 0x0 0x0 0x0 0x0>;
+
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn_0p8>;
+ vddaon-supply = <&vreg_pmu_aon_0p8>;
+ vddwlcx-supply = <&vreg_pmu_wlcx_0p8>;
+ vddwlmx-supply = <&vreg_pmu_wlmx_0p8>;
+ vddpcie1p8-supply = <&vreg_pmu_pcie_1p8>;
+ vddpcie0p9-supply = <&vreg_pmu_pcie_0p9>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ vddrfa1p8-supply = <&vreg_pmu_rfa_1p7>;
+
+ qcom,ath11k-calibration-variant = "MS_Volterra";
+ };
+};
+
+&pcie4_phy {
+ vdda-phy-supply = <&vreg_l4d>;
+ vdda-pll-supply = <&vreg_l6d>;
+
+ status = "okay";
+};
+
+&pmc8280c_lpg {
+ status = "okay";
+};
+
+&pmk8280_adc_tm {
+ status = "okay";
+
+ sys-therm@0 {
+ reg = <0>;
+ io-channels = <&pmk8280_vadc PM8350_ADC7_AMUX_THM1_100K_PU(1)>;
+ qcom,hw-settle-time-us = <200>;
+ qcom,avg-samples = <2>;
+ qcom,ratiometric;
+ };
+
+ sys-therm@1 {
+ reg = <1>;
+ io-channels = <&pmk8280_vadc PM8350_ADC7_AMUX_THM2_100K_PU(1)>;
+ qcom,hw-settle-time-us = <200>;
+ qcom,avg-samples = <2>;
+ qcom,ratiometric;
+ };
+
+ sys-therm@2 {
+ reg = <2>;
+ io-channels = <&pmk8280_vadc PM8350_ADC7_AMUX_THM3_100K_PU(1)>;
+ qcom,hw-settle-time-us = <200>;
+ qcom,avg-samples = <2>;
+ qcom,ratiometric;
+ };
+
+ sys-therm@3 {
+ reg = <3>;
+ io-channels = <&pmk8280_vadc PM8350_ADC7_AMUX_THM4_100K_PU(1)>;
+ qcom,hw-settle-time-us = <200>;
+ qcom,avg-samples = <2>;
+ qcom,ratiometric;
+ };
+
+ sys-therm@4 {
+ reg = <4>;
+ io-channels = <&pmk8280_vadc PM8350_ADC7_AMUX_THM1_100K_PU(3)>;
+ qcom,hw-settle-time-us = <200>;
+ qcom,avg-samples = <2>;
+ qcom,ratiometric;
+ };
+
+ sys-therm@5 {
+ reg = <5>;
+ io-channels = <&pmk8280_vadc PM8350_ADC7_AMUX_THM2_100K_PU(3)>;
+ qcom,hw-settle-time-us = <200>;
+ qcom,avg-samples = <2>;
+ qcom,ratiometric;
+ };
+
+ sys-therm@6 {
+ reg = <6>;
+ io-channels = <&pmk8280_vadc PM8350_ADC7_AMUX_THM3_100K_PU(3)>;
+ qcom,hw-settle-time-us = <200>;
+ qcom,avg-samples = <2>;
+ qcom,ratiometric;
+ };
+
+ sys-therm@7 {
+ reg = <7>;
+ io-channels = <&pmk8280_vadc PM8350_ADC7_AMUX_THM4_100K_PU(3)>;
+ qcom,hw-settle-time-us = <200>;
+ qcom,avg-samples = <2>;
+ qcom,ratiometric;
+ };
+};
+
+&pmk8280_pon_pwrkey {
+ status = "okay";
+};
+
+&pmk8280_pon_resin {
+ status = "okay";
+};
+
+&pmk8280_rtc {
+ nvmem-cells = <&rtc_offset>;
+ nvmem-cell-names = "offset";
+
+ status = "okay";
+};
+
+&pmk8280_sdam_6 {
+ status = "okay";
+
+ rtc_offset: rtc-offset@bc {
+ reg = <0xbc 0x4>;
+ };
+};
+
+&pmk8280_vadc {
+ channel@144 {
+ reg = <PM8350_ADC7_AMUX_THM1_100K_PU(1)>;
+ qcom,hw-settle-time = <200>;
+ qcom,ratiometric;
+ label = "sys_therm1";
+ };
+
+ channel@145 {
+ reg = <PM8350_ADC7_AMUX_THM2_100K_PU(1)>;
+ qcom,hw-settle-time = <200>;
+ qcom,ratiometric;
+ label = "sys_therm2";
+ };
+
+ channel@146 {
+ reg = <PM8350_ADC7_AMUX_THM3_100K_PU(1)>;
+ qcom,hw-settle-time = <200>;
+ qcom,ratiometric;
+ label = "sys_therm3";
+ };
+
+ channel@147 {
+ reg = <PM8350_ADC7_AMUX_THM4_100K_PU(1)>;
+ qcom,hw-settle-time = <200>;
+ qcom,ratiometric;
+ label = "sys_therm4";
+ };
+
+ channel@344 {
+ reg = <PM8350_ADC7_AMUX_THM1_100K_PU(3)>;
+ qcom,hw-settle-time = <200>;
+ qcom,ratiometric;
+ label = "sys_therm5";
+ };
+
+ channel@345 {
+ reg = <PM8350_ADC7_AMUX_THM2_100K_PU(3)>;
+ qcom,hw-settle-time = <200>;
+ qcom,ratiometric;
+ label = "sys_therm6";
+ };
+
+ channel@346 {
+ reg = <PM8350_ADC7_AMUX_THM3_100K_PU(3)>;
+ qcom,hw-settle-time = <200>;
+ qcom,ratiometric;
+ label = "sys_therm7";
+ };
+
+ channel@347 {
+ reg = <PM8350_ADC7_AMUX_THM4_100K_PU(3)>;
+ qcom,hw-settle-time = <200>;
+ qcom,ratiometric;
+ label = "sys_therm8";
+ };
+};
+
+&qup0 {
+ status = "okay";
+};
+
+&qup1 {
+ status = "okay";
+};
+
+&qup2 {
+ status = "okay";
+};
+
+&remoteproc_adsp {
+ firmware-name = "qcom/sc8280xp/microsoft/blackrock/qcadsp8280.mbn";
+
+ status = "okay";
+};
+
+&remoteproc_nsp0 {
+ firmware-name = "qcom/sc8280xp/microsoft/blackrock/qccdsp8280.mbn";
+
+ status = "okay";
+};
+
+&rxmacro {
+ status = "okay";
+};
+
+&sound {
+ compatible = "qcom,sc8280xp-sndcard";
+ model = "microsoft/blackrock";
+
+ wcd-playback-dai-link {
+ link-name = "WCD Playback";
+
+ cpu {
+ sound-dai = <&q6apmbedai RX_CODEC_DMA_RX_0>;
+ };
+
+ codec {
+ sound-dai = <&wcd938x 0>, <&swr1 0>, <&rxmacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ wcd-capture-dai-link {
+ link-name = "WCD Capture";
+
+ cpu {
+ sound-dai = <&q6apmbedai TX_CODEC_DMA_TX_3>;
+ };
+
+ codec {
+ sound-dai = <&wcd938x 1>, <&swr2 0>, <&txmacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ wsa-dai-link {
+ link-name = "WSA Playback";
+
+ cpu {
+ sound-dai = <&q6apmbedai WSA_CODEC_DMA_RX_0>;
+ };
+
+ codec {
+ sound-dai = <&swr0 0>, <&wsamacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ va-dai-link {
+ link-name = "VA Capture";
+
+ cpu {
+ sound-dai = <&q6apmbedai VA_CODEC_DMA_TX_0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+
+ codec {
+ sound-dai = <&vamacro 0>;
+ };
+ };
+};
+
+&swr0 {
+ status = "okay";
+};
+
+&swr1 {
+ status = "okay";
+
+ wcd_rx: wcd9380-rx@0,4 {
+ compatible = "sdw20217010d00";
+ reg = <0 4>;
+ qcom,rx-port-mapping = <1 2 3 4 5>;
+ };
+};
+
+&swr2 {
+ status = "okay";
+
+ wcd_tx: wcd9380-tx@0,3 {
+ compatible = "sdw20217010d00";
+ reg = <0 3>;
+ qcom,tx-port-mapping = <1 1 2 3>;
+ };
+};
+
+&txmacro {
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-0 = <&uart2_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+
+ bluetooth {
+ compatible = "qcom,wcn6855-bt";
+
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn_0p8>;
+ vddaon-supply = <&vreg_pmu_aon_0p8>;
+ vddwlcx-supply = <&vreg_pmu_wlcx_0p8>;
+ vddwlmx-supply = <&vreg_pmu_wlmx_0p8>;
+ vddbtcmx-supply = <&vreg_pmu_btcmx_0p8>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ vddrfa1p8-supply = <&vreg_pmu_rfa_1p7>;
+
+ max-speed = <3200000>;
+ };
+};
+
+&usb_0 {
+ status = "okay";
+};
+
+&usb_0_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_0_dwc3_hs {
+ remote-endpoint = <&pmic_glink_con0_hs>;
+};
+
+&usb_0_hsphy {
+ vdda-pll-supply = <&vreg_l9d>;
+ vdda18-supply = <&vreg_l1c>;
+ vdda33-supply = <&vreg_l7d>;
+
+ status = "okay";
+};
+
+&usb_0_qmpphy {
+ vdda-phy-supply = <&vreg_l4d>;
+ vdda-pll-supply = <&vreg_l9d>;
+
+ orientation-switch;
+
+ status = "okay";
+};
+
+&usb_0_qmpphy_dp_in {
+ remote-endpoint = <&mdss0_dp0_out>;
+};
+
+&usb_0_qmpphy_out {
+ remote-endpoint = <&pmic_glink_con0_ss>;
+};
+
+&usb_1 {
+ status = "okay";
+};
+
+&usb_1_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_1_dwc3_hs {
+ remote-endpoint = <&pmic_glink_con1_hs>;
+};
+
+&usb_1_hsphy {
+ vdda-pll-supply = <&vreg_l4b>;
+ vdda18-supply = <&vreg_l1c>;
+ vdda33-supply = <&vreg_l13c>;
+
+ status = "okay";
+};
+
+&usb_1_qmpphy {
+ vdda-phy-supply = <&vreg_l3b>;
+ vdda-pll-supply = <&vreg_l4b>;
+
+ orientation-switch;
+
+ status = "okay";
+};
+
+&usb_1_qmpphy_dp_in {
+ remote-endpoint = <&mdss0_dp1_out>;
+};
+
+&usb_1_qmpphy_out {
+ remote-endpoint = <&pmic_glink_con1_ss>;
+};
+
+&usb_2 {
+ pinctrl-0 = <&usb2_en_state>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&usb_2_dwc3 {
+ phys = <&usb_2_hsphy0>, <&usb_2_qmpphy0>;
+ phy-names = "usb2-0", "usb3-0";
+};
+
+&usb_2_hsphy0 {
+ vdda-pll-supply = <&vreg_l1b>;
+ vdda18-supply = <&vreg_l1c>;
+ vdda33-supply = <&vreg_l7d>;
+
+ status = "okay";
+};
+
+&usb_2_qmpphy0 {
+ vdda-phy-supply = <&vreg_l1b>;
+ vdda-pll-supply = <&vreg_l4d>;
+
+ status = "okay";
+};
+
+&vamacro {
+ pinctrl-0 = <&dmic01_default>, <&dmic23_default>;
+ pinctrl-names = "default";
+
+ vdd-micb-supply = <&vreg_s10b>;
+
+ qcom,dmic-sample-rate = <4800000>;
+
+ status = "okay";
+};
+
+&wsamacro {
+ status = "okay";
+};
+
+&xo_board_clk {
+ clock-frequency = <38400000>;
+};
+
+/* PINCTRL */
+
+&lpass_tlmm {
+ status = "okay";
+};
+
+&pmc8280_1_gpios {
+ misc_3p3_reg_en: misc-3p3-reg-en-state {
+ pins = "gpio1";
+ function = "normal";
+ };
+
+ edp_bl_en: edp-bl-en-state {
+ pins = "gpio8";
+ function = "normal";
+ };
+
+ edp_bl_reg_en: edp-bl-reg-en-state {
+ pins = "gpio9";
+ function = "normal";
+ };
+};
+
+&pmc8280_2_gpios {
+ wwan_sw_en: wwan-sw-en-state {
+ pins = "gpio1";
+ function = "normal";
+ };
+};
+
+&pmc8280c_gpios {
+ edp_bl_pwm: edp-bl-pwm-state {
+ pins = "gpio8";
+ function = "func1";
+ };
+};
+
+&pmr735a_gpios {
+ hastings_reg_en: hastings-reg-en-state {
+ pins = "gpio1";
+ function = "normal";
+ };
+};
+
+&tlmm {
+ bt_default: bt-default-state {
+ hstp-bt-en-pins {
+ pins = "gpio133";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ hstp-sw-ctrl-pins {
+ pins = "gpio132";
+ function = "gpio";
+ bias-pull-down;
+ };
+ };
+
+ nvme_reg_en: nvme-reg-en-state {
+ pins = "gpio135";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ pcie2a_default: pcie2a-default-state {
+ clkreq-n-pins {
+ pins = "gpio142";
+ function = "pcie2a_clkreq";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio143";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake-n-pins {
+ pins = "gpio145";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie3a_default: pcie3a-default-state {
+ clkreq-n-pins {
+ pins = "gpio150";
+ function = "pcie3a_clkreq";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio151";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake-n-pins {
+ pins = "gpio148";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie4_default: pcie4-default-state {
+ clkreq-n-pins {
+ pins = "gpio140";
+ function = "pcie4_clkreq";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio141";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake-n-pins {
+ pins = "gpio139";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ uart2_default: uart2-default-state {
+ cts-pins {
+ pins = "gpio121";
+ function = "qup2";
+ bias-bus-hold;
+ };
+
+ rts-pins {
+ pins = "gpio122";
+ function = "qup2";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rx-pins {
+ pins = "gpio124";
+ function = "qup2";
+ bias-pull-up;
+ };
+
+ tx-pins {
+ pins = "gpio123";
+ function = "qup2";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ usb0_sbu_default: usb0-sbu-state {
+ oe-n-pins {
+ pins = "gpio101";
+ function = "gpio";
+ bias-disable;
+ drive-strength = <16>;
+ output-high;
+ };
+
+ sel-pins {
+ pins = "gpio164";
+ function = "gpio";
+ bias-disable;
+ drive-strength = <16>;
+ };
+ };
+
+ usb1_sbu_default: usb1-sbu-state {
+ oe-n-pins {
+ pins = "gpio48";
+ function = "gpio";
+ bias-disable;
+ drive-strength = <16>;
+ output-high;
+ };
+
+ sel-pins {
+ pins = "gpio47";
+ function = "gpio";
+ bias-disable;
+ drive-strength = <16>;
+ };
+ };
+
+ usb2_en_state: usb2-en-state {
+ /* TS3USB221A USB2.0 mux select */
+ pins = "gpio24";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
+
+ wcd_default: wcd-default-state {
+ reset-pins {
+ pins = "gpio106";
+ function = "gpio";
+ bias-disable;
+ };
+ };
+
+ wlan_en: wlan-en-state {
+ pins = "gpio134";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-down;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
index ef06d1ac084d..01501acb1790 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
@@ -2743,7 +2743,7 @@
remoteproc_adsp: remoteproc@3000000 {
compatible = "qcom,sc8280xp-adsp-pas";
- reg = <0 0x03000000 0 0x100>;
+ reg = <0 0x03000000 0 0x10000>;
interrupts-extended = <&intc GIC_SPI 162 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
@@ -3536,6 +3536,8 @@
"usb2-2",
"usb2-3";
dr_mode = "host";
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
};
};
@@ -3593,6 +3595,8 @@
iommus = <&apps_smmu 0x820 0x0>;
phys = <&usb_0_hsphy>, <&usb_0_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
ports {
#address-cells = <1>;
@@ -3670,6 +3674,8 @@
iommus = <&apps_smmu 0x860 0x0>;
phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
ports {
#address-cells = <1>;
@@ -3900,26 +3906,26 @@
"vfe3",
"csid3";
- interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 478 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 479 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 640 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 641 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 758 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 759 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 760 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 761 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 762 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 764 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 359 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 360 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 448 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 464 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 465 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 466 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 467 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 468 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 469 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 477 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 478 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 479 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 640 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 641 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 758 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 759 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 760 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 761 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 762 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 764 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "csid1_lite",
"vfe_lite1",
"csiphy3",
@@ -5254,7 +5260,7 @@
remoteproc_nsp0: remoteproc@1b300000 {
compatible = "qcom,sc8280xp-nsp0-pas";
- reg = <0 0x1b300000 0 0x100>;
+ reg = <0 0x1b300000 0 0x10000>;
interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
<&smp2p_nsp0_in 0 IRQ_TYPE_EDGE_RISING>,
@@ -5385,7 +5391,7 @@
remoteproc_nsp1: remoteproc@21300000 {
compatible = "qcom,sc8280xp-nsp1-pas";
- reg = <0 0x21300000 0 0x100>;
+ reg = <0 0x21300000 0 0x10000>;
interrupts-extended = <&intc GIC_SPI 887 IRQ_TYPE_EDGE_RISING>,
<&smp2p_nsp1_in 0 IRQ_TYPE_EDGE_RISING>,
diff --git a/arch/arm64/boot/dts/qcom/sdm450-lenovo-tbx605f.dts b/arch/arm64/boot/dts/qcom/sdm450-lenovo-tbx605f.dts
index c509bbfe5d3e..735a21df8cc9 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-lenovo-tbx605f.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-lenovo-tbx605f.dts
@@ -46,6 +46,18 @@
};
};
+ backlight: gpio-backlight {
+ compatible = "gpio-backlight";
+
+ gpios = <&tlmm 16 GPIO_ACTIVE_HIGH>;
+
+ default-on;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&backlight_enable_active>;
+ pinctrl-1 = <&backlight_enable_sleep>;
+ };
+
gpio-keys {
compatible = "gpio-keys";
key-volume-up {
@@ -63,6 +75,49 @@
};
};
+&mdss {
+ status = "okay";
+};
+
+&mdss_dsi0 {
+ vdda-supply = <&pm8953_s3>;
+ vddio-supply = <&pm8953_l6>;
+
+ status = "okay";
+
+ panel@0 {
+ compatible = "boe,tv101wum-ll2";
+ reg = <0>;
+
+ vsp-supply = <&lab>;
+ vsn-supply = <&ibb>;
+ reset-gpios = <&tlmm 61 GPIO_ACTIVE_LOW>;
+
+ backlight = <&backlight>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&panel_reset_active>;
+ pinctrl-1 = <&panel_reset_sleep>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&mdss_dsi0_out>;
+ };
+ };
+ };
+};
+
+&mdss_dsi0_out {
+ remote-endpoint = <&panel_in>;
+ data-lanes = <0 1 2 3>;
+};
+
+&mdss_dsi0_phy {
+ vcca-supply = <&pm8953_l3>;
+
+ status = "okay";
+};
+
&hsusb_phy {
vdd-supply = <&pm8953_l3>;
vdda-pll-supply = <&pm8953_l7>;
@@ -90,6 +145,18 @@
};
};
+&ibb {
+ regulator-min-microvolt = <4600000>;
+ regulator-max-microvolt = <6000000>;
+ qcom,discharge-resistor-kohms = <32>;
+};
+
+&lab {
+ regulator-min-microvolt = <4600000>;
+ regulator-max-microvolt = <6000000>;
+ qcom,soft-start-us = <800>;
+};
+
&pm8953_resin {
linux,code = <KEY_VOLUMEDOWN>;
status = "okay";
@@ -237,6 +304,36 @@
&tlmm {
gpio-reserved-ranges = <0 4>, <135 4>;
+ backlight_enable_active: backlight-enable-active-state {
+ pins = "gpio16";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-disable;
+ output-high;
+ };
+
+ backlight_enable_sleep: backlight-enable-sleep-state {
+ pins = "gpio16";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ panel_reset_active: panel-reset-active-state {
+ pins = "gpio61";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-disable;
+ output-high;
+ };
+
+ panel_reset_sleep: panel-reset-sleep-state {
+ pins = "gpio61";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
ts_int_active: ts-int-active-state {
pins = "gpio65";
function = "gpio";
diff --git a/arch/arm64/boot/dts/qcom/sdm630.dtsi b/arch/arm64/boot/dts/qcom/sdm630.dtsi
index 19420cfdadf1..a2c079bac1a7 100644
--- a/arch/arm64/boot/dts/qcom/sdm630.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm630.dtsi
@@ -1300,6 +1300,8 @@
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
snps,parkmode-disable-ss-quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
phys = <&qusb2phy0>, <&usb3_qmpphy>;
phy-names = "usb2-phy", "usb3-phy";
@@ -1505,6 +1507,8 @@
interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
/* This is the HS-only host */
maximum-speed = "high-speed";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-google-sargo.dts b/arch/arm64/boot/dts/qcom/sdm670-google-sargo.dts
index 176b0119fe6d..74b5d9c68eb6 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-google-sargo.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-google-sargo.dts
@@ -10,6 +10,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
#include <dt-bindings/power/qcom-rpmpd.h>
#include "sdm670.dtsi"
@@ -49,20 +50,6 @@
};
};
- clocks {
- sleep_clk: sleep-clk {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <32764>;
- };
-
- xo_board: xo-board {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <38400000>;
- };
- };
-
gpio-keys {
compatible = "gpio-keys";
autorepeat;
@@ -407,6 +394,15 @@
status = "okay";
};
+&gpu {
+ status = "okay";
+
+ zap-shader {
+ memory-region = <&gpu_mem>;
+ firmware-name = "qcom/sdm670/sargo/a615_zap.mbn";
+ };
+};
+
&i2c9 {
clock-frequency = <100000>;
status = "okay";
@@ -482,6 +478,19 @@
status = "okay";
};
+&pm660l_flash {
+ status = "okay";
+
+ led-0 {
+ function = LED_FUNCTION_FLASH;
+ color = <LED_COLOR_ID_WHITE>;
+ led-sources = <1>, <2>;
+ led-max-microamp = <500000>;
+ flash-max-microamp = <1500000>;
+ flash-max-timeout-us = <1280000>;
+ };
+};
+
&pm660l_gpios {
vol_up_pin: vol-up-state {
pins = "gpio7";
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index c93dd06c0b7d..279e62ec5433 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -8,6 +8,7 @@
#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,gpucc-sdm845.h>
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/dma/qcom-gpi.h>
#include <dt-bindings/gpio/gpio.h>
@@ -28,6 +29,20 @@
chosen { };
+ clocks {
+ sleep_clk: sleep-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32764>;
+ };
+
+ xo_board: xo-board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <38400000>;
+ };
+ };
+
cpus {
#address-cells = <2>;
#size-cells = <0>;
@@ -617,6 +632,11 @@
#address-cells = <1>;
#size-cells = <1>;
+ gpu_speed_bin: gpu_speed_bin@1a2 {
+ reg = <0x1a2 0x2>;
+ bits = <5 8>;
+ };
+
qusb2_hstx_trim: hstx-trim@1eb {
reg = <0x1eb 0x1>;
bits = <1 4>;
@@ -1299,6 +1319,180 @@
};
};
+ gpu: gpu@5000000 {
+ compatible = "qcom,adreno-615.0", "qcom,adreno";
+
+ reg = <0 0x05000000 0 0x40000>, <0 0x0509e000 0 0x10>;
+ reg-names = "kgsl_3d0_reg_memory", "cx_mem";
+
+ /*
+ * Look ma, no clocks! The GPU clocks and power are
+ * controlled entirely by the GMU
+ */
+
+ interrupts = <GIC_SPI 300 IRQ_TYPE_LEVEL_HIGH>;
+
+ iommus = <&adreno_smmu 0>;
+
+ operating-points-v2 = <&gpu_opp_table>;
+
+ qcom,gmu = <&gmu>;
+
+ interconnects = <&mem_noc MASTER_GRAPHICS_3D 0 &mem_noc SLAVE_EBI_CH0 0>;
+ interconnect-names = "gfx-mem";
+
+ nvmem-cells = <&gpu_speed_bin>;
+ nvmem-cell-names = "speed_bin";
+
+ status = "disabled";
+
+ gpu_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-780000000 {
+ opp-hz = /bits/ 64 <780000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
+ opp-peak-kBps = <7216000>;
+ opp-supported-hw = <0x8>;
+ };
+
+ opp-750000000 {
+ opp-hz = /bits/ 64 <750000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
+ opp-peak-kBps = <7216000>;
+ opp-supported-hw = <0x8>;
+ };
+
+ opp-700000000 {
+ opp-hz = /bits/ 64 <700000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
+ opp-peak-kBps = <7216000>;
+ opp-supported-hw = <0x4>;
+ };
+
+ opp-650000000 {
+ opp-hz = /bits/ 64 <650000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
+ opp-peak-kBps = <7216000>;
+ opp-supported-hw = <0xc>;
+ };
+
+ opp-565000000 {
+ opp-hz = /bits/ 64 <565000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
+ opp-peak-kBps = <7216000>;
+ opp-supported-hw = <0xc>;
+ };
+
+ opp-504000000 {
+ opp-hz = /bits/ 64 <504000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
+ opp-peak-kBps = <7216000>;
+ opp-supported-hw = <0x2>;
+ };
+
+ opp-430000000 {
+ opp-hz = /bits/ 64 <430000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
+ opp-peak-kBps = <7216000>;
+ opp-supported-hw = <0xf>;
+ };
+
+ opp-355000000 {
+ opp-hz = /bits/ 64 <355000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+ opp-peak-kBps = <6220000>;
+ opp-supported-hw = <0xf>;
+ };
+
+ opp-267000000 {
+ opp-hz = /bits/ 64 <267000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+ opp-peak-kBps = <4068000>;
+ opp-supported-hw = <0xf>;
+ };
+
+ opp-180000000 {
+ opp-hz = /bits/ 64 <180000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+ opp-peak-kBps = <1804000>;
+ opp-supported-hw = <0xf>;
+ };
+ };
+ };
+
+ adreno_smmu: iommu@5040000 {
+ compatible = "qcom,sdm670-smmu-v2", "qcom,adreno-smmu", "qcom,smmu-v2";
+ reg = <0 0x05040000 0 0x10000>;
+ #iommu-cells = <1>;
+ #global-interrupts = <2>;
+ interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 231 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 364 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 365 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 366 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 367 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 368 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 369 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 370 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 371 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&gcc GCC_GPU_MEMNOC_GFX_CLK>,
+ <&gcc GCC_GPU_CFG_AHB_CLK>;
+ clock-names = "bus", "iface";
+
+ power-domains = <&gpucc GPU_CX_GDSC>;
+ };
+
+ gmu: gmu@506a000 {
+ compatible = "qcom,adreno-gmu-615.0", "qcom,adreno-gmu";
+
+ reg = <0 0x0506a000 0 0x30000>,
+ <0 0x0b280000 0 0x10000>,
+ <0 0x0b480000 0 0x10000>;
+ reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq";
+
+ interrupts = <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hfi", "gmu";
+
+ clocks = <&gpucc GPU_CC_CX_GMU_CLK>,
+ <&gpucc GPU_CC_CXO_CLK>,
+ <&gcc GCC_DDRSS_GPU_AXI_CLK>,
+ <&gcc GCC_GPU_MEMNOC_GFX_CLK>;
+ clock-names = "gmu", "cxo", "axi", "memnoc";
+
+ power-domains = <&gpucc GPU_CX_GDSC>,
+ <&gpucc GPU_GX_GDSC>;
+ power-domain-names = "cx", "gx";
+
+ iommus = <&adreno_smmu 5>;
+
+ operating-points-v2 = <&gmu_opp_table>;
+
+ gmu_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-200000000 {
+ opp-hz = /bits/ 64 <200000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+ };
+ };
+ };
+
+ gpucc: clock-controller@5090000 {
+ compatible = "qcom,sdm845-gpucc";
+ reg = <0 0x05090000 0 0x9000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_GPU_GPLL0_CLK_SRC>,
+ <&gcc GCC_GPU_GPLL0_DIV_CLK_SRC>;
+ clock-names = "bi_tcxo",
+ "gcc_gpu_gpll0_clk_src",
+ "gcc_gpu_gpll0_div_clk_src";
+ };
+
usb_1_hsphy: phy@88e2000 {
compatible = "qcom,sdm670-qusb2-phy", "qcom,qusb2-v2-phy";
reg = <0 0x088e2000 0 0x400>;
@@ -1400,6 +1594,16 @@
#interrupt-cells = <4>;
};
+ camcc: clock-controller@ad00000 {
+ compatible = "qcom,sdm670-camcc", "qcom,sdm845-camcc";
+ reg = <0 0x0ad00000 0 0x10000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "bi_tcxo";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
mdss: display-subsystem@ae00000 {
compatible = "qcom,sdm670-mdss";
reg = <0 0x0ae00000 0 0x1000>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c-navigation-mezzanine.dtso b/arch/arm64/boot/dts/qcom/sdm845-db845c-navigation-mezzanine.dtso
index 0a87df806caf..59970082da45 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-db845c-navigation-mezzanine.dtso
+++ b/arch/arm64/boot/dts/qcom/sdm845-db845c-navigation-mezzanine.dtso
@@ -79,45 +79,3 @@
};
};
};
-
-&cci_i2c1 {
- #address-cells = <1>;
- #size-cells = <0>;
-
- camera@60 {
- compatible = "ovti,ov7251";
-
- /* I2C address as per ov7251.txt linux documentation */
- reg = <0x60>;
-
- /* CAM3_RST_N */
- enable-gpios = <&tlmm 21 GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&cam3_default>;
-
- clocks = <&clock_camcc CAM_CC_MCLK3_CLK>;
- clock-names = "xclk";
- clock-frequency = <24000000>;
-
- /*
- * The &vreg_s4a_1p8 trace always powered on.
- *
- * The 2.8V vdda-supply regulator is enabled when the
- * vreg_s4a_1p8 trace is pulled high.
- * It too is represented by a fixed regulator.
- *
- * No 1.2V vddd-supply regulator is used.
- */
- vdddo-supply = <&vreg_lvs1a_1p8>;
- vdda-supply = <&cam3_avdd_2v8>;
-
- status = "disabled";
-
- port {
- ov7251_ep: endpoint {
- data-lanes = <0 1>;
-/* remote-endpoint = <&csiphy3_ep>; */
- };
- };
- };
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts b/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts
index 486ce175e6bc..ddb82ecb0a92 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts
@@ -452,7 +452,6 @@
irq-gpio = <&tlmm 125 GPIO_TRANSITORY>;
touchscreen-size-x = <1080>;
touchscreen-size-y = <2160>;
- focaltech,max-touch-number = <5>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 1ed794638a7c..e0ce804bb1a3 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -4139,6 +4139,8 @@
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
snps,parkmode-disable-ss-quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
@@ -4215,6 +4217,8 @@
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
snps,parkmode-disable-ss-quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
phys = <&usb_2_hsphy>, <&usb_2_qmpphy>;
phy-names = "usb2-phy", "usb3-phy";
};
@@ -4326,16 +4330,16 @@
"vfe1",
"vfe_lite";
- interrupts = <GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 478 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 479 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 464 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 466 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 468 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 477 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 478 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 479 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 448 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 465 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 467 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 469 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "csid0",
"csid1",
"csid2",
diff --git a/arch/arm64/boot/dts/qcom/sdx75.dtsi b/arch/arm64/boot/dts/qcom/sdx75.dtsi
index 5f7e59ecf1ca..b0a8a0fe5f39 100644
--- a/arch/arm64/boot/dts/qcom/sdx75.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdx75.dtsi
@@ -34,7 +34,7 @@
sleep_clk: sleep-clk {
compatible = "fixed-clock";
- clock-frequency = <32000>;
+ clock-frequency = <32764>;
#clock-cells = <0>;
};
};
@@ -893,7 +893,7 @@
remoteproc_mpss: remoteproc@4080000 {
compatible = "qcom,sdx75-mpss-pas";
- reg = <0 0x04080000 0 0x4040>;
+ reg = <0 0x04080000 0 0x10000>;
interrupts-extended = <&intc GIC_SPI 250 IRQ_TYPE_EDGE_RISING>,
<&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>,
@@ -1037,6 +1037,8 @@
iommus = <&apps_smmu 0x80 0x0>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
phys = <&usb_hsphy>,
<&usb_qmpphy>;
phy-names = "usb2-phy",
diff --git a/arch/arm64/boot/dts/qcom/sm4250.dtsi b/arch/arm64/boot/dts/qcom/sm4250.dtsi
index a0ed61925e12..cd8c8e59976e 100644
--- a/arch/arm64/boot/dts/qcom/sm4250.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm4250.dtsi
@@ -36,3 +36,42 @@
&cpu7 {
compatible = "qcom,kryo240";
};
+
+&lpass_tlmm {
+ compatible = "qcom,sm4250-lpass-lpi-pinctrl";
+ gpio-ranges = <&lpass_tlmm 0 0 27>;
+
+ lpi_i2s2_active: lpi-i2s2-active-state {
+ sck-pins {
+ pins = "gpio10";
+ function = "i2s2_clk";
+ bias-disable;
+ drive-strength = <8>;
+ output-high;
+ };
+
+ ws-pins {
+ pins = "gpio11";
+ function = "i2s2_ws";
+ bias-disable;
+ drive-strength = <8>;
+ output-high;
+ };
+
+ data-pins {
+ pins = "gpio12";
+ function = "i2s2_data";
+ bias-disable;
+ drive-strength = <8>;
+ output-high;
+ };
+
+ ext-mclk1-pins {
+ pins = "gpio18";
+ function = "ext_mclk1_a";
+ bias-disable;
+ drive-strength = <16>;
+ output-high;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sm4450.dtsi b/arch/arm64/boot/dts/qcom/sm4450.dtsi
index a0de5fe16faa..27453771aa68 100644
--- a/arch/arm64/boot/dts/qcom/sm4450.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm4450.dtsi
@@ -29,7 +29,7 @@
sleep_clk: sleep-clk {
compatible = "fixed-clock";
- clock-frequency = <32000>;
+ clock-frequency = <32764>;
#clock-cells = <0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sm6115.dtsi b/arch/arm64/boot/dts/qcom/sm6115.dtsi
index 9b23534c456b..94c081bf7a89 100644
--- a/arch/arm64/boot/dts/qcom/sm6115.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6115.dtsi
@@ -14,6 +14,9 @@
#include <dt-bindings/interconnect/qcom,sm6115.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/qcom-rpmpd.h>
+#include <dt-bindings/soc/qcom,apr.h>
+#include <dt-bindings/sound/qcom,q6asm.h>
+#include <dt-bindings/sound/qcom,q6dsp-lpass-ports.h>
#include <dt-bindings/thermal/thermal.h>
/ {
@@ -808,6 +811,20 @@
};
};
+ lpass_tlmm: pinctrl@a7c0000 {
+ compatible = "qcom,sm6115-lpass-lpi-pinctrl";
+ reg = <0x0 0x0a7c0000 0x0 0x20000>,
+ <0x0 0x0a950000 0x0 0x10000>;
+
+ clocks = <&q6afecc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>;
+ clock-names = "audio";
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&lpass_tlmm 0 0 19>;
+
+ };
+
gcc: clock-controller@1400000 {
compatible = "qcom,gcc-sm6115";
reg = <0x0 0x01400000 0x0 0x1f0000>;
@@ -2027,7 +2044,7 @@
remoteproc_mpss: remoteproc@6080000 {
compatible = "qcom,sm6115-mpss-pas";
- reg = <0x0 0x06080000 0x0 0x100>;
+ reg = <0x0 0x06080000 0x0 0x10000>;
interrupts-extended = <&intc GIC_SPI 307 IRQ_TYPE_EDGE_RISING>,
<&modem_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
@@ -2670,9 +2687,9 @@
};
};
- remoteproc_adsp: remoteproc@ab00000 {
+ remoteproc_adsp: remoteproc@a400000 {
compatible = "qcom,sm6115-adsp-pas";
- reg = <0x0 0x0ab00000 0x0 0x100>;
+ reg = <0x0 0x0a400000 0x0 0x4040>;
interrupts-extended = <&intc GIC_SPI 282 IRQ_TYPE_EDGE_RISING>,
<&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
@@ -2701,6 +2718,76 @@
qcom,remote-pid = <2>;
mboxes = <&apcs_glb 8>;
+ apr {
+ compatible = "qcom,apr-v2";
+ qcom,glink-channels = "apr_audio_svc";
+ qcom,domain = <APR_DOMAIN_ADSP>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ service@3 {
+ reg = <APR_SVC_ADSP_CORE>;
+ compatible = "qcom,q6core";
+ qcom,protection-domain = "avs/audio",
+ "msm/adsp/audio_pd";
+ };
+
+ q6afe: service@4 {
+ compatible = "qcom,q6afe";
+ reg = <APR_SVC_AFE>;
+ qcom,protection-domain = "avs/audio",
+ "msm/adsp/audio_pd";
+ q6afedai: dais {
+ compatible = "qcom,q6afe-dais";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #sound-dai-cells = <1>;
+ };
+
+ q6afecc: clock-controller {
+ compatible = "qcom,q6afe-clocks";
+ #clock-cells = <2>;
+ };
+ };
+
+ q6asm: service@7 {
+ compatible = "qcom,q6asm";
+ reg = <APR_SVC_ASM>;
+ qcom,protection-domain = "avs/audio",
+ "msm/adsp/audio_pd";
+ q6asmdai: dais {
+ compatible = "qcom,q6asm-dais";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #sound-dai-cells = <1>;
+ iommus = <&apps_smmu 0x1c1 0x0>;
+
+ dai@0 {
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+
+ dai@1 {
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA2>;
+ };
+
+ dai@2 {
+ reg = <MSM_FRONTEND_DAI_MULTIMEDIA3>;
+ };
+ };
+ };
+
+ q6adm: service@8 {
+ compatible = "qcom,q6adm";
+ reg = <APR_SVC_ADM>;
+ qcom,protection-domain = "avs/audio",
+ "msm/adsp/audio_pd";
+ q6routing: routing {
+ compatible = "qcom,q6adm-routing";
+ #sound-dai-cells = <0>;
+ };
+ };
+ };
+
fastrpc {
compatible = "qcom,fastrpc";
qcom,glink-channels = "fastrpcglink-apps-dsp";
@@ -2744,7 +2831,7 @@
remoteproc_cdsp: remoteproc@b300000 {
compatible = "qcom,sm6115-cdsp-pas";
- reg = <0x0 0x0b300000 0x0 0x100000>;
+ reg = <0x0 0x0b300000 0x0 0x4040>;
interrupts-extended = <&intc GIC_SPI 265 IRQ_TYPE_EDGE_RISING>,
<&cdsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
diff --git a/arch/arm64/boot/dts/qcom/sm6125.dtsi b/arch/arm64/boot/dts/qcom/sm6125.dtsi
index 17d528d63934..350d807a622f 100644
--- a/arch/arm64/boot/dts/qcom/sm6125.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6125.dtsi
@@ -28,7 +28,7 @@
sleep_clk: sleep-clk {
compatible = "fixed-clock";
#clock-cells = <0>;
- clock-frequency = <32000>;
+ clock-frequency = <32764>;
clock-output-names = "sleep_clk";
};
};
@@ -1209,6 +1209,8 @@
phy-names = "usb2-phy";
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
maximum-speed = "high-speed";
dr_mode = "peripheral";
};
diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi
index 8d697280249f..00ad1d09a195 100644
--- a/arch/arm64/boot/dts/qcom/sm6350.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi
@@ -936,7 +936,7 @@
power-domains = <&rpmhpd SM6350_CX>;
operating-points-v2 = <&qup_opp_table>;
interconnects = <&clk_virt MASTER_QUP_CORE_0 0 &clk_virt SLAVE_QUP_CORE_0 0>,
- <&aggre1_noc MASTER_QUP_0 0 &clk_virt SLAVE_EBI_CH0 0>;
+ <&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_QUP_0 0>;
interconnect-names = "qup-core", "qup-config";
status = "disabled";
};
@@ -1283,7 +1283,7 @@
adsp: remoteproc@3000000 {
compatible = "qcom,sm6350-adsp-pas";
- reg = <0 0x03000000 0 0x100>;
+ reg = <0x0 0x03000000 0x0 0x10000>;
interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
@@ -1503,7 +1503,7 @@
mpss: remoteproc@4080000 {
compatible = "qcom,sm6350-mpss-pas";
- reg = <0x0 0x04080000 0x0 0x4040>;
+ reg = <0x0 0x04080000 0x0 0x10000>;
interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_EDGE_RISING>,
<&modem_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
@@ -1924,6 +1924,8 @@
snps,has-lpm-erratum;
snps,hird-threshold = /bits/ 8 <0x10>;
snps,parkmode-disable-ss-quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
usb-role-switch;
diff --git a/arch/arm64/boot/dts/qcom/sm6375.dtsi b/arch/arm64/boot/dts/qcom/sm6375.dtsi
index e0b1c54e98c0..0faa3a40ff82 100644
--- a/arch/arm64/boot/dts/qcom/sm6375.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6375.dtsi
@@ -29,7 +29,7 @@
sleep_clk: sleep-clk {
compatible = "fixed-clock";
- clock-frequency = <32000>;
+ clock-frequency = <32764>;
#clock-cells = <0>;
};
};
@@ -1516,9 +1516,9 @@
#power-domain-cells = <1>;
};
- remoteproc_mss: remoteproc@6000000 {
+ remoteproc_mss: remoteproc@6080000 {
compatible = "qcom,sm6375-mpss-pas";
- reg = <0 0x06000000 0 0x4040>;
+ reg = <0x0 0x06080000 0x0 0x10000>;
interrupts-extended = <&intc GIC_SPI 307 IRQ_TYPE_EDGE_RISING>,
<&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>,
@@ -1559,7 +1559,7 @@
remoteproc_adsp: remoteproc@a400000 {
compatible = "qcom,sm6375-adsp-pas";
- reg = <0 0x0a400000 0 0x100>;
+ reg = <0 0x0a400000 0 0x10000>;
interrupts-extended = <&intc GIC_SPI 282 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
@@ -1595,9 +1595,9 @@
};
};
- remoteproc_cdsp: remoteproc@b000000 {
+ remoteproc_cdsp: remoteproc@b300000 {
compatible = "qcom,sm6375-cdsp-pas";
- reg = <0x0 0x0b000000 0x0 0x100000>;
+ reg = <0x0 0x0b300000 0x0 0x10000>;
interrupts-extended = <&intc GIC_SPI 265 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
diff --git a/arch/arm64/boot/dts/qcom/sm7225-fairphone-fp4.dts b/arch/arm64/boot/dts/qcom/sm7225-fairphone-fp4.dts
index 2ee2561b57b1..52b16a4fdc43 100644
--- a/arch/arm64/boot/dts/qcom/sm7225-fairphone-fp4.dts
+++ b/arch/arm64/boot/dts/qcom/sm7225-fairphone-fp4.dts
@@ -32,7 +32,7 @@
chassis-type = "handset";
/* required for bootloader to select correct board */
- qcom,msm-id = <434 0x10000>, <459 0x10000>;
+ qcom,msm-id = <459 0x10000>;
qcom,board-id = <8 32>;
aliases {
diff --git a/arch/arm64/boot/dts/qcom/sm8150-hdk.dts b/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
index bac08f00b303..6ea883b1edfa 100644
--- a/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
+++ b/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
@@ -578,6 +578,11 @@
};
};
+&pon {
+ mode-bootloader = <0x2>;
+ mode-recovery = <0x1>;
+};
+
&pon_pwrkey {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts b/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts
index b039773c4465..9a3d0ac6c423 100644
--- a/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts
+++ b/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts
@@ -376,8 +376,8 @@
pinctrl-0 = <&da7280_intr_default>;
dlg,actuator-type = "LRA";
- dlg,dlg,const-op-mode = <1>;
- dlg,dlg,periodic-op-mode = <1>;
+ dlg,const-op-mode = <1>;
+ dlg,periodic-op-mode = <1>;
dlg,nom-microvolt = <2000000>;
dlg,abs-max-microvolt = <2000000>;
dlg,imax-microamp = <129000>;
@@ -430,6 +430,11 @@
/* MAX34417 @ 0x1e */
};
+&pon {
+ mode-bootloader = <0x2>;
+ mode-recovery = <0x1>;
+};
+
&pon_pwrkey {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sm8150-mtp.dts b/arch/arm64/boot/dts/qcom/sm8150-mtp.dts
index 256a1ba94945..2e1c7afe0aa7 100644
--- a/arch/arm64/boot/dts/qcom/sm8150-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sm8150-mtp.dts
@@ -358,6 +358,11 @@
status = "okay";
};
+&pon {
+ mode-bootloader = <0x2>;
+ mode-recovery = <0x1>;
+};
+
&pon_pwrkey {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi b/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi
index ae0ca48b89a5..70fd6455518b 100644
--- a/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi
@@ -601,6 +601,11 @@
};
};
+&pon {
+ mode-bootloader = <0x2>;
+ mode-recovery = <0x1>;
+};
+
&pon_pwrkey {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
index cedae8d03a51..4dbda54b47a5 100644
--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
@@ -3658,6 +3658,8 @@
iommus = <&apps_smmu 0x140 0>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
@@ -3735,6 +3737,8 @@
iommus = <&apps_smmu 0x160 0>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
phys = <&usb_2_hsphy>, <&usb_2_qmpphy>;
phy-names = "usb2-phy", "usb3-phy";
};
diff --git a/arch/arm64/boot/dts/qcom/sm8250-hdk.dts b/arch/arm64/boot/dts/qcom/sm8250-hdk.dts
index 1bbb71e1a4fc..f5c193c6c5f9 100644
--- a/arch/arm64/boot/dts/qcom/sm8250-hdk.dts
+++ b/arch/arm64/boot/dts/qcom/sm8250-hdk.dts
@@ -373,6 +373,11 @@
status = "okay";
};
+&pon {
+ mode-bootloader = <0x2>;
+ mode-recovery = <0x1>;
+};
+
&pon_pwrkey {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
index 21b2ca1def83..7f592bd30248 100644
--- a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
@@ -627,6 +627,11 @@
};
};
+&pon {
+ mode-bootloader = <0x2>;
+ mode-recovery = <0x1>;
+};
+
&qupv3_id_0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
index f6870d3f2886..d8289b2698f3 100644
--- a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
@@ -591,6 +591,11 @@
};
};
+&pon {
+ mode-bootloader = <0x2>;
+ mode-recovery = <0x1>;
+};
+
&pon_pwrkey {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-common.dtsi b/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-common.dtsi
index 3596dd328c31..813b009b7bd6 100644
--- a/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-common.dtsi
@@ -30,6 +30,10 @@
qcom,msm-id = <QCOM_ID_SM8250 0x20001>; /* SM8250 v2.1 */
qcom,board-id = <0x10008 0>;
+ aliases {
+ serial0 = &uart6;
+ };
+
chosen {
#address-cells = <2>;
#size-cells = <2>;
@@ -97,6 +101,67 @@
};
};
+ qca6390-pmu {
+ compatible = "qcom,qca6390-pmu";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_en_state>, <&wlan_en_state>;
+
+ vddaon-supply = <&vreg_s6a_0p95>;
+ vddpmu-supply = <&vreg_s6a_0p95>;
+ vddrfa0p95-supply = <&vreg_s6a_0p95>;
+ vddrfa1p3-supply = <&vreg_s8c_1p35>;
+ vddrfa1p9-supply = <&vreg_s5a_1p9>;
+ vddpcie1p3-supply = <&vreg_s8c_1p35>;
+ vddpcie1p9-supply = <&vreg_s5a_1p9>;
+ vddio-supply = <&vreg_s4a_1p8>;
+
+ wlan-enable-gpios = <&tlmm 20 GPIO_ACTIVE_HIGH>;
+ bt-enable-gpios = <&tlmm 21 GPIO_ACTIVE_HIGH>;
+
+ regulators {
+ vreg_pmu_rfa_cmn: ldo0 {
+ regulator-name = "vreg_pmu_rfa_cmn";
+ };
+
+ vreg_pmu_aon_0p59: ldo1 {
+ regulator-name = "vreg_pmu_aon_0p59";
+ };
+
+ vreg_pmu_wlcx_0p8: ldo2 {
+ regulator-name = "vreg_pmu_wlcx_0p8";
+ };
+
+ vreg_pmu_wlmx_0p85: ldo3 {
+ regulator-name = "vreg_pmu_wlmx_0p85";
+ };
+
+ vreg_pmu_btcmx_0p85: ldo4 {
+ regulator-name = "vreg_pmu_btcmx_0p85";
+ };
+
+ vreg_pmu_rfa_0p8: ldo5 {
+ regulator-name = "vreg_pmu_rfa_0p8";
+ };
+
+ vreg_pmu_rfa_1p2: ldo6 {
+ regulator-name = "vreg_pmu_rfa_1p2";
+ };
+
+ vreg_pmu_rfa_1p7: ldo7 {
+ regulator-name = "vreg_pmu_rfa_1p7";
+ };
+
+ vreg_pmu_pcie_0p9: ldo8 {
+ regulator-name = "vreg_pmu_pcie_0p9";
+ };
+
+ vreg_pmu_pcie_1p8: ldo9 {
+ regulator-name = "vreg_pmu_pcie_1p8";
+ };
+ };
+ };
+
vph_pwr: vph-pwr-regulator {
compatible = "regulator-fixed";
regulator-name = "vph_pwr";
@@ -619,6 +684,25 @@
status = "okay";
};
+&pcieport0 {
+ wifi@0 {
+ compatible = "pci17cb,1101";
+ reg = <0x10000 0x0 0x0 0x0 0x0>;
+
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn>;
+ vddaon-supply = <&vreg_pmu_aon_0p59>;
+ vddwlcx-supply = <&vreg_pmu_wlcx_0p8>;
+ vddwlmx-supply = <&vreg_pmu_wlmx_0p85>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ vddrfa1p7-supply = <&vreg_pmu_rfa_1p7>;
+ vddpcie0p9-supply = <&vreg_pmu_pcie_0p9>;
+ vddpcie1p8-supply = <&vreg_pmu_pcie_1p8>;
+
+ qcom,ath11k-calibration-variant = "Xiaomi_Pad_5Pro";
+ };
+};
+
&pm8150_gpios {
vol_up_n: vol-up-n-state {
pins = "gpio6";
@@ -673,6 +757,11 @@
status = "okay";
};
+&pon {
+ mode-bootloader = <0x2>;
+ mode-recovery = <0x1>;
+};
+
&pon_pwrkey {
status = "okay";
};
@@ -701,6 +790,37 @@
&tlmm {
gpio-reserved-ranges = <40 4>;
+
+ bt_en_state: bt-default-state {
+ pins = "gpio21";
+ function = "gpio";
+ drive-strength = <16>;
+ output-low;
+ bias-pull-up;
+ };
+
+ wlan_en_state: wlan-default-state {
+ pins = "gpio20";
+ function = "gpio";
+ drive-strength = <16>;
+ output-low;
+ bias-pull-up;
+ };
+};
+
+&uart6 {
+ status = "okay";
+
+ bluetooth {
+ compatible = "qcom,qca6390-bt";
+
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn>;
+ vddaon-supply = <&vreg_pmu_aon_0p59>;
+ vddbtcmx-supply = <&vreg_pmu_btcmx_0p85>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ vddrfa1p7-supply = <&vreg_pmu_rfa_1p7>;
+ };
};
&usb_1 {
diff --git a/arch/arm64/boot/dts/qcom/sm8250-xiaomi-pipa.dts b/arch/arm64/boot/dts/qcom/sm8250-xiaomi-pipa.dts
index 86e1f7fd1c20..668078ea4f04 100644
--- a/arch/arm64/boot/dts/qcom/sm8250-xiaomi-pipa.dts
+++ b/arch/arm64/boot/dts/qcom/sm8250-xiaomi-pipa.dts
@@ -554,6 +554,11 @@
};
};
+&pon {
+ mode-bootloader = <0x2>;
+ mode-recovery = <0x1>;
+};
+
&pon_pwrkey {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
index 48318ed1ce98..c2937b4d9f18 100644
--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
@@ -84,7 +84,7 @@
sleep_clk: sleep-clk {
compatible = "fixed-clock";
- clock-frequency = <32768>;
+ clock-frequency = <32764>;
#clock-cells = <0>;
};
};
@@ -4207,6 +4207,8 @@
iommus = <&apps_smmu 0x0 0x0>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
@@ -4294,6 +4296,8 @@
iommus = <&apps_smmu 0x20 0>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
phys = <&usb_2_hsphy>, <&usb_2_qmpphy>;
phy-names = "usb2-phy", "usb3-phy";
};
@@ -4481,20 +4485,20 @@
"vfe_lite0",
"vfe_lite1";
- interrupts = <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 478 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 479 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 477 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 478 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 479 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 448 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 86 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 89 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 464 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 466 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 468 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 359 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 465 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 467 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 469 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 360 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "csiphy0",
"csiphy1",
"csiphy2",
diff --git a/arch/arm64/boot/dts/qcom/sm8350-hdk.dts b/arch/arm64/boot/dts/qcom/sm8350-hdk.dts
index 796cbb58ef6e..f9de0e49fa24 100644
--- a/arch/arm64/boot/dts/qcom/sm8350-hdk.dts
+++ b/arch/arm64/boot/dts/qcom/sm8350-hdk.dts
@@ -925,3 +925,10 @@
};
};
};
+
+&ipa {
+ qcom,gsi-loader = "self";
+ memory-region = <&pil_ipa_fw_mem>;
+ status = "okay";
+ firmware-name = "qcom/sm8350/ipa_fws.mbn";
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
index 877905dfd861..69da30f35baa 100644
--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
@@ -42,7 +42,7 @@
sleep_clk: sleep-clk {
compatible = "fixed-clock";
- clock-frequency = <32000>;
+ clock-frequency = <32764>;
#clock-cells = <0>;
};
};
@@ -1876,6 +1876,142 @@
reg = <0x0 0x1fc0000 0x0 0x30000>;
};
+ adsp: remoteproc@3000000 {
+ compatible = "qcom,sm8350-adsp-pas";
+ reg = <0x0 0x03000000 0x0 0x10000>;
+
+ interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready",
+ "handover", "stop-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ power-domains = <&rpmhpd RPMHPD_LCX>,
+ <&rpmhpd RPMHPD_LMX>;
+ power-domain-names = "lcx", "lmx";
+
+ memory-region = <&pil_adsp_mem>;
+
+ qcom,qmp = <&aoss_qmp>;
+
+ qcom,smem-states = <&smp2p_adsp_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ label = "lpass";
+ qcom,remote-pid = <2>;
+
+ apr {
+ compatible = "qcom,apr-v2";
+ qcom,glink-channels = "apr_audio_svc";
+ qcom,domain = <APR_DOMAIN_ADSP>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ service@3 {
+ reg = <APR_SVC_ADSP_CORE>;
+ compatible = "qcom,q6core";
+ qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+ };
+
+ q6afe: service@4 {
+ compatible = "qcom,q6afe";
+ reg = <APR_SVC_AFE>;
+ qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+
+ q6afedai: dais {
+ compatible = "qcom,q6afe-dais";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #sound-dai-cells = <1>;
+ };
+
+ q6afecc: clock-controller {
+ compatible = "qcom,q6afe-clocks";
+ #clock-cells = <2>;
+ };
+ };
+
+ q6asm: service@7 {
+ compatible = "qcom,q6asm";
+ reg = <APR_SVC_ASM>;
+ qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+
+ q6asmdai: dais {
+ compatible = "qcom,q6asm-dais";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #sound-dai-cells = <1>;
+ iommus = <&apps_smmu 0x1801 0x0>;
+
+ dai@0 {
+ reg = <0>;
+ };
+
+ dai@1 {
+ reg = <1>;
+ };
+
+ dai@2 {
+ reg = <2>;
+ };
+ };
+ };
+
+ q6adm: service@8 {
+ compatible = "qcom,q6adm";
+ reg = <APR_SVC_ADM>;
+ qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+
+ q6routing: routing {
+ compatible = "qcom,q6adm-routing";
+ #sound-dai-cells = <0>;
+ };
+ };
+ };
+
+ fastrpc {
+ compatible = "qcom,fastrpc";
+ qcom,glink-channels = "fastrpcglink-apps-dsp";
+ label = "adsp";
+ qcom,non-secure-domain;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compute-cb@3 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <3>;
+ iommus = <&apps_smmu 0x1803 0x0>;
+ };
+
+ compute-cb@4 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <4>;
+ iommus = <&apps_smmu 0x1804 0x0>;
+ };
+
+ compute-cb@5 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <5>;
+ iommus = <&apps_smmu 0x1805 0x0>;
+ };
+ };
+ };
+ };
+
lpass_tlmm: pinctrl@33c0000 {
compatible = "qcom,sm8350-lpass-lpi-pinctrl";
reg = <0 0x033c0000 0 0x20000>,
@@ -2078,7 +2214,7 @@
mpss: remoteproc@4080000 {
compatible = "qcom,sm8350-mpss-pas";
- reg = <0x0 0x04080000 0x0 0x4040>;
+ reg = <0x0 0x04080000 0x0 0x10000>;
interrupts-extended = <&intc GIC_SPI 264 IRQ_TYPE_EDGE_RISING>,
<&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>,
@@ -2360,6 +2496,115 @@
qcom,bcm-voters = <&apps_bcm_voter>;
};
+ cdsp: remoteproc@a300000 {
+ compatible = "qcom,sm8350-cdsp-pas";
+ reg = <0x0 0x0a300000 0x0 0x10000>;
+
+ interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready",
+ "handover", "stop-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ power-domains = <&rpmhpd RPMHPD_CX>,
+ <&rpmhpd RPMHPD_MXC>;
+ power-domain-names = "cx", "mxc";
+
+ interconnects = <&compute_noc MASTER_CDSP_PROC 0 &mc_virt SLAVE_EBI1 0>;
+
+ memory-region = <&pil_cdsp_mem>;
+
+ qcom,qmp = <&aoss_qmp>;
+
+ qcom,smem-states = <&smp2p_cdsp_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_CDSP
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_CDSP
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ label = "cdsp";
+ qcom,remote-pid = <5>;
+
+ fastrpc {
+ compatible = "qcom,fastrpc";
+ qcom,glink-channels = "fastrpcglink-apps-dsp";
+ label = "cdsp";
+ qcom,non-secure-domain;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compute-cb@1 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <1>;
+ iommus = <&apps_smmu 0x2161 0x0400>,
+ <&apps_smmu 0x1181 0x0420>;
+ };
+
+ compute-cb@2 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <2>;
+ iommus = <&apps_smmu 0x2162 0x0400>,
+ <&apps_smmu 0x1182 0x0420>;
+ };
+
+ compute-cb@3 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <3>;
+ iommus = <&apps_smmu 0x2163 0x0400>,
+ <&apps_smmu 0x1183 0x0420>;
+ };
+
+ compute-cb@4 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <4>;
+ iommus = <&apps_smmu 0x2164 0x0400>,
+ <&apps_smmu 0x1184 0x0420>;
+ };
+
+ compute-cb@5 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <5>;
+ iommus = <&apps_smmu 0x2165 0x0400>,
+ <&apps_smmu 0x1185 0x0420>;
+ };
+
+ compute-cb@6 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <6>;
+ iommus = <&apps_smmu 0x2166 0x0400>,
+ <&apps_smmu 0x1186 0x0420>;
+ };
+
+ compute-cb@7 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <7>;
+ iommus = <&apps_smmu 0x2167 0x0400>,
+ <&apps_smmu 0x1187 0x0420>;
+ };
+
+ compute-cb@8 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <8>;
+ iommus = <&apps_smmu 0x2168 0x0400>,
+ <&apps_smmu 0x1188 0x0420>;
+ };
+
+ /* note: secure cb9 in downstream */
+ };
+ };
+ };
+
usb_1: usb@a6f8800 {
compatible = "qcom,sm8350-dwc3", "qcom,dwc3";
reg = <0 0x0a6f8800 0 0x400>;
@@ -2409,6 +2654,8 @@
iommus = <&apps_smmu 0x0 0x0>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
@@ -2485,6 +2732,8 @@
iommus = <&apps_smmu 0x20 0x0>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
phys = <&usb_2_hsphy>, <&usb_2_qmpphy>;
phy-names = "usb2-phy", "usb3-phy";
};
@@ -3285,142 +3534,6 @@
dma-coherent;
};
- adsp: remoteproc@17300000 {
- compatible = "qcom,sm8350-adsp-pas";
- reg = <0 0x17300000 0 0x100>;
-
- interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
- <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
- <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
- <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
- <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "wdog", "fatal", "ready",
- "handover", "stop-ack";
-
- clocks = <&rpmhcc RPMH_CXO_CLK>;
- clock-names = "xo";
-
- power-domains = <&rpmhpd RPMHPD_LCX>,
- <&rpmhpd RPMHPD_LMX>;
- power-domain-names = "lcx", "lmx";
-
- memory-region = <&pil_adsp_mem>;
-
- qcom,qmp = <&aoss_qmp>;
-
- qcom,smem-states = <&smp2p_adsp_out 0>;
- qcom,smem-state-names = "stop";
-
- status = "disabled";
-
- glink-edge {
- interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
- IPCC_MPROC_SIGNAL_GLINK_QMP
- IRQ_TYPE_EDGE_RISING>;
- mboxes = <&ipcc IPCC_CLIENT_LPASS
- IPCC_MPROC_SIGNAL_GLINK_QMP>;
-
- label = "lpass";
- qcom,remote-pid = <2>;
-
- apr {
- compatible = "qcom,apr-v2";
- qcom,glink-channels = "apr_audio_svc";
- qcom,domain = <APR_DOMAIN_ADSP>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- service@3 {
- reg = <APR_SVC_ADSP_CORE>;
- compatible = "qcom,q6core";
- qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
- };
-
- q6afe: service@4 {
- compatible = "qcom,q6afe";
- reg = <APR_SVC_AFE>;
- qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
-
- q6afedai: dais {
- compatible = "qcom,q6afe-dais";
- #address-cells = <1>;
- #size-cells = <0>;
- #sound-dai-cells = <1>;
- };
-
- q6afecc: clock-controller {
- compatible = "qcom,q6afe-clocks";
- #clock-cells = <2>;
- };
- };
-
- q6asm: service@7 {
- compatible = "qcom,q6asm";
- reg = <APR_SVC_ASM>;
- qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
-
- q6asmdai: dais {
- compatible = "qcom,q6asm-dais";
- #address-cells = <1>;
- #size-cells = <0>;
- #sound-dai-cells = <1>;
- iommus = <&apps_smmu 0x1801 0x0>;
-
- dai@0 {
- reg = <0>;
- };
-
- dai@1 {
- reg = <1>;
- };
-
- dai@2 {
- reg = <2>;
- };
- };
- };
-
- q6adm: service@8 {
- compatible = "qcom,q6adm";
- reg = <APR_SVC_ADM>;
- qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
-
- q6routing: routing {
- compatible = "qcom,q6adm-routing";
- #sound-dai-cells = <0>;
- };
- };
- };
-
- fastrpc {
- compatible = "qcom,fastrpc";
- qcom,glink-channels = "fastrpcglink-apps-dsp";
- label = "adsp";
- qcom,non-secure-domain;
- #address-cells = <1>;
- #size-cells = <0>;
-
- compute-cb@3 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <3>;
- iommus = <&apps_smmu 0x1803 0x0>;
- };
-
- compute-cb@4 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <4>;
- iommus = <&apps_smmu 0x1804 0x0>;
- };
-
- compute-cb@5 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <5>;
- iommus = <&apps_smmu 0x1805 0x0>;
- };
- };
- };
- };
-
intc: interrupt-controller@17a00000 {
compatible = "arm,gic-v3";
#interrupt-cells = <3>;
@@ -3589,115 +3702,6 @@
#freq-domain-cells = <1>;
#clock-cells = <1>;
};
-
- cdsp: remoteproc@98900000 {
- compatible = "qcom,sm8350-cdsp-pas";
- reg = <0 0x98900000 0 0x1400000>;
-
- interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
- <&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
- <&smp2p_cdsp_in 1 IRQ_TYPE_EDGE_RISING>,
- <&smp2p_cdsp_in 2 IRQ_TYPE_EDGE_RISING>,
- <&smp2p_cdsp_in 3 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "wdog", "fatal", "ready",
- "handover", "stop-ack";
-
- clocks = <&rpmhcc RPMH_CXO_CLK>;
- clock-names = "xo";
-
- power-domains = <&rpmhpd RPMHPD_CX>,
- <&rpmhpd RPMHPD_MXC>;
- power-domain-names = "cx", "mxc";
-
- interconnects = <&compute_noc MASTER_CDSP_PROC 0 &mc_virt SLAVE_EBI1 0>;
-
- memory-region = <&pil_cdsp_mem>;
-
- qcom,qmp = <&aoss_qmp>;
-
- qcom,smem-states = <&smp2p_cdsp_out 0>;
- qcom,smem-state-names = "stop";
-
- status = "disabled";
-
- glink-edge {
- interrupts-extended = <&ipcc IPCC_CLIENT_CDSP
- IPCC_MPROC_SIGNAL_GLINK_QMP
- IRQ_TYPE_EDGE_RISING>;
- mboxes = <&ipcc IPCC_CLIENT_CDSP
- IPCC_MPROC_SIGNAL_GLINK_QMP>;
-
- label = "cdsp";
- qcom,remote-pid = <5>;
-
- fastrpc {
- compatible = "qcom,fastrpc";
- qcom,glink-channels = "fastrpcglink-apps-dsp";
- label = "cdsp";
- qcom,non-secure-domain;
- #address-cells = <1>;
- #size-cells = <0>;
-
- compute-cb@1 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <1>;
- iommus = <&apps_smmu 0x2161 0x0400>,
- <&apps_smmu 0x1181 0x0420>;
- };
-
- compute-cb@2 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <2>;
- iommus = <&apps_smmu 0x2162 0x0400>,
- <&apps_smmu 0x1182 0x0420>;
- };
-
- compute-cb@3 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <3>;
- iommus = <&apps_smmu 0x2163 0x0400>,
- <&apps_smmu 0x1183 0x0420>;
- };
-
- compute-cb@4 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <4>;
- iommus = <&apps_smmu 0x2164 0x0400>,
- <&apps_smmu 0x1184 0x0420>;
- };
-
- compute-cb@5 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <5>;
- iommus = <&apps_smmu 0x2165 0x0400>,
- <&apps_smmu 0x1185 0x0420>;
- };
-
- compute-cb@6 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <6>;
- iommus = <&apps_smmu 0x2166 0x0400>,
- <&apps_smmu 0x1186 0x0420>;
- };
-
- compute-cb@7 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <7>;
- iommus = <&apps_smmu 0x2167 0x0400>,
- <&apps_smmu 0x1187 0x0420>;
- };
-
- compute-cb@8 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <8>;
- iommus = <&apps_smmu 0x2168 0x0400>,
- <&apps_smmu 0x1188 0x0420>;
- };
-
- /* note: secure cb9 in downstream */
- };
- };
- };
};
thermal_zones: thermal-zones {
diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
index 53147aa6f7e4..9c809fc5fa45 100644
--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
@@ -43,7 +43,7 @@
sleep_clk: sleep-clk {
compatible = "fixed-clock";
#clock-cells = <0>;
- clock-frequency = <32000>;
+ clock-frequency = <32764>;
};
};
@@ -287,6 +287,192 @@
};
};
+ ete-0 {
+ compatible = "arm,embedded-trace-extension";
+ cpu = <&cpu0>;
+
+ out-ports {
+ port {
+ ete0_out_funnel_ete: endpoint {
+ remote-endpoint = <&funnel_ete_in_ete0>;
+ };
+ };
+ };
+ };
+
+ ete-1 {
+ compatible = "arm,embedded-trace-extension";
+ cpu = <&cpu1>;
+
+ out-ports {
+ port {
+ ete1_out_funnel_ete: endpoint {
+ remote-endpoint = <&funnel_ete_in_ete1>;
+ };
+ };
+ };
+ };
+
+ ete-2 {
+ compatible = "arm,embedded-trace-extension";
+ cpu = <&cpu2>;
+
+ out-ports {
+ port {
+ ete2_out_funnel_ete: endpoint {
+ remote-endpoint = <&funnel_ete_in_ete2>;
+ };
+ };
+ };
+ };
+
+ ete-3 {
+ compatible = "arm,embedded-trace-extension";
+ cpu = <&cpu3>;
+
+ out-ports {
+ port {
+ ete3_out_funnel_ete: endpoint {
+ remote-endpoint = <&funnel_ete_in_ete3>;
+ };
+ };
+ };
+ };
+
+ ete-4 {
+ compatible = "arm,embedded-trace-extension";
+ cpu = <&cpu4>;
+
+ out-ports {
+ port {
+ ete4_out_funnel_ete: endpoint {
+ remote-endpoint = <&funnel_ete_in_ete4>;
+ };
+ };
+ };
+ };
+
+ ete-5 {
+ compatible = "arm,embedded-trace-extension";
+ cpu = <&cpu5>;
+
+ out-ports {
+ port {
+ ete5_out_funnel_ete: endpoint {
+ remote-endpoint = <&funnel_ete_in_ete5>;
+ };
+ };
+ };
+ };
+
+ ete-6 {
+ compatible = "arm,embedded-trace-extension";
+ cpu = <&cpu6>;
+
+ out-ports {
+ port {
+ ete6_out_funnel_ete: endpoint {
+ remote-endpoint = <&funnel_ete_in_ete6>;
+ };
+ };
+ };
+ };
+
+ ete-7 {
+ compatible = "arm,embedded-trace-extension";
+ cpu = <&cpu7>;
+
+ out-ports {
+ port {
+ ete7_out_funnel_ete: endpoint {
+ remote-endpoint = <&funnel_ete_in_ete7>;
+ };
+ };
+ };
+ };
+
+ funnel-ete {
+ compatible = "arm,coresight-static-funnel";
+
+ out-ports {
+ port {
+ funnel_ete_out_funnel_apss: endpoint {
+ remote-endpoint =
+ <&funnel_apss_in_funnel_ete>;
+ };
+ };
+ };
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_ete_in_ete0: endpoint {
+ remote-endpoint =
+ <&ete0_out_funnel_ete>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ funnel_ete_in_ete1: endpoint {
+ remote-endpoint =
+ <&ete1_out_funnel_ete>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ funnel_ete_in_ete2: endpoint {
+ remote-endpoint =
+ <&ete2_out_funnel_ete>;
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+ funnel_ete_in_ete3: endpoint {
+ remote-endpoint =
+ <&ete3_out_funnel_ete>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+ funnel_ete_in_ete4: endpoint {
+ remote-endpoint =
+ <&ete4_out_funnel_ete>;
+ };
+ };
+
+ port@5 {
+ reg = <5>;
+ funnel_ete_in_ete5: endpoint {
+ remote-endpoint =
+ <&ete5_out_funnel_ete>;
+ };
+ };
+
+ port@6 {
+ reg = <6>;
+ funnel_ete_in_ete6: endpoint {
+ remote-endpoint =
+ <&ete6_out_funnel_ete>;
+ };
+ };
+
+ port@7 {
+ reg = <7>;
+ funnel_ete_in_ete7: endpoint {
+ remote-endpoint =
+ <&ete7_out_funnel_ete>;
+ };
+ };
+ };
+ };
+
firmware {
scm: scm {
compatible = "qcom,scm-sm8450", "qcom,scm";
@@ -2496,6 +2682,112 @@
};
};
+ remoteproc_adsp: remoteproc@3000000 {
+ compatible = "qcom,sm8450-adsp-pas";
+ reg = <0x0 0x03000000 0x0 0x10000>;
+
+ interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready",
+ "handover", "stop-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ power-domains = <&rpmhpd RPMHPD_LCX>,
+ <&rpmhpd RPMHPD_LMX>;
+ power-domain-names = "lcx", "lmx";
+
+ memory-region = <&adsp_mem>;
+
+ qcom,qmp = <&aoss_qmp>;
+
+ qcom,smem-states = <&smp2p_adsp_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ remoteproc_adsp_glink: glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ label = "lpass";
+ qcom,remote-pid = <2>;
+
+ gpr {
+ compatible = "qcom,gpr";
+ qcom,glink-channels = "adsp_apps";
+ qcom,domain = <GPR_DOMAIN_ID_ADSP>;
+ qcom,intents = <512 20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ q6apm: service@1 {
+ compatible = "qcom,q6apm";
+ reg = <GPR_APM_MODULE_IID>;
+ #sound-dai-cells = <0>;
+ qcom,protection-domain = "avs/audio",
+ "msm/adsp/audio_pd";
+
+ q6apmdai: dais {
+ compatible = "qcom,q6apm-dais";
+ iommus = <&apps_smmu 0x1801 0x0>;
+ };
+
+ q6apmbedai: bedais {
+ compatible = "qcom,q6apm-lpass-dais";
+ #sound-dai-cells = <1>;
+ };
+ };
+
+ q6prm: service@2 {
+ compatible = "qcom,q6prm";
+ reg = <GPR_PRM_MODULE_IID>;
+ qcom,protection-domain = "avs/audio",
+ "msm/adsp/audio_pd";
+
+ q6prmcc: clock-controller {
+ compatible = "qcom,q6prm-lpass-clocks";
+ #clock-cells = <2>;
+ };
+ };
+ };
+
+ fastrpc {
+ compatible = "qcom,fastrpc";
+ qcom,glink-channels = "fastrpcglink-apps-dsp";
+ label = "adsp";
+ qcom,non-secure-domain;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compute-cb@3 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <3>;
+ iommus = <&apps_smmu 0x1803 0x0>;
+ };
+
+ compute-cb@4 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <4>;
+ iommus = <&apps_smmu 0x1804 0x0>;
+ };
+
+ compute-cb@5 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <5>;
+ iommus = <&apps_smmu 0x1805 0x0>;
+ };
+ };
+ };
+ };
+
wsa2macro: codec@31e0000 {
compatible = "qcom,sm8450-lpass-wsa-macro";
reg = <0 0x031e0000 0 0x1000>;
@@ -2692,115 +2984,9 @@
status = "disabled";
};
- remoteproc_adsp: remoteproc@30000000 {
- compatible = "qcom,sm8450-adsp-pas";
- reg = <0 0x30000000 0 0x100>;
-
- interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
- <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
- <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
- <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
- <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "wdog", "fatal", "ready",
- "handover", "stop-ack";
-
- clocks = <&rpmhcc RPMH_CXO_CLK>;
- clock-names = "xo";
-
- power-domains = <&rpmhpd RPMHPD_LCX>,
- <&rpmhpd RPMHPD_LMX>;
- power-domain-names = "lcx", "lmx";
-
- memory-region = <&adsp_mem>;
-
- qcom,qmp = <&aoss_qmp>;
-
- qcom,smem-states = <&smp2p_adsp_out 0>;
- qcom,smem-state-names = "stop";
-
- status = "disabled";
-
- remoteproc_adsp_glink: glink-edge {
- interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
- IPCC_MPROC_SIGNAL_GLINK_QMP
- IRQ_TYPE_EDGE_RISING>;
- mboxes = <&ipcc IPCC_CLIENT_LPASS
- IPCC_MPROC_SIGNAL_GLINK_QMP>;
-
- label = "lpass";
- qcom,remote-pid = <2>;
-
- gpr {
- compatible = "qcom,gpr";
- qcom,glink-channels = "adsp_apps";
- qcom,domain = <GPR_DOMAIN_ID_ADSP>;
- qcom,intents = <512 20>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- q6apm: service@1 {
- compatible = "qcom,q6apm";
- reg = <GPR_APM_MODULE_IID>;
- #sound-dai-cells = <0>;
- qcom,protection-domain = "avs/audio",
- "msm/adsp/audio_pd";
-
- q6apmdai: dais {
- compatible = "qcom,q6apm-dais";
- iommus = <&apps_smmu 0x1801 0x0>;
- };
-
- q6apmbedai: bedais {
- compatible = "qcom,q6apm-lpass-dais";
- #sound-dai-cells = <1>;
- };
- };
-
- q6prm: service@2 {
- compatible = "qcom,q6prm";
- reg = <GPR_PRM_MODULE_IID>;
- qcom,protection-domain = "avs/audio",
- "msm/adsp/audio_pd";
-
- q6prmcc: clock-controller {
- compatible = "qcom,q6prm-lpass-clocks";
- #clock-cells = <2>;
- };
- };
- };
-
- fastrpc {
- compatible = "qcom,fastrpc";
- qcom,glink-channels = "fastrpcglink-apps-dsp";
- label = "adsp";
- qcom,non-secure-domain;
- #address-cells = <1>;
- #size-cells = <0>;
-
- compute-cb@3 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <3>;
- iommus = <&apps_smmu 0x1803 0x0>;
- };
-
- compute-cb@4 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <4>;
- iommus = <&apps_smmu 0x1804 0x0>;
- };
-
- compute-cb@5 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <5>;
- iommus = <&apps_smmu 0x1805 0x0>;
- };
- };
- };
- };
-
remoteproc_cdsp: remoteproc@32300000 {
compatible = "qcom,sm8450-cdsp-pas";
- reg = <0 0x32300000 0 0x1400000>;
+ reg = <0 0x32300000 0 0x10000>;
interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
@@ -2907,7 +3093,7 @@
remoteproc_mpss: remoteproc@4080000 {
compatible = "qcom,sm8450-mpss-pas";
- reg = <0x0 0x04080000 0x0 0x4040>;
+ reg = <0x0 0x04080000 0x0 0x10000>;
interrupts-extended = <&intc GIC_SPI 264 IRQ_TYPE_EDGE_RISING>,
<&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>,
@@ -4144,6 +4330,546 @@
};
};
+ stm@10002000 {
+ compatible = "arm,coresight-stm", "arm,primecell";
+ reg = <0x0 0x10002000 0x0 0x1000>,
+ <0x0 0x16280000 0x0 0x180000>;
+ reg-names = "stm-base", "stm-stimulus-base";
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ stm_out_funnel_in0: endpoint {
+ remote-endpoint =
+ <&funnel_in0_in_stm>;
+ };
+ };
+ };
+ };
+
+ funnel@10041000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x10041000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@7 {
+ reg = <7>;
+ funnel_in0_in_stm: endpoint {
+ remote-endpoint =
+ <&stm_out_funnel_in0>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ funnel_in0_out_funnel_qdss: endpoint {
+ remote-endpoint =
+ <&funnel_qdss_in_funnel_in0>;
+ };
+ };
+ };
+ };
+
+ funnel@10042000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+
+ reg = <0x0 0x10042000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@4 {
+ reg = <4>;
+ funnel_in1_in_funnel_apss: endpoint {
+ remote-endpoint =
+ <&funnel_apss_out_funnel_in1>;
+ };
+ };
+
+ port@6 {
+ reg = <6>;
+ funnel_in1_in_funnel_dl_center: endpoint {
+ remote-endpoint =
+ <&funnel_dl_center_out_funnel_in1>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ funnel_in1_out_funnel_qdss: endpoint {
+ remote-endpoint =
+ <&funnel_qdss_in_funnel_in1>;
+ };
+ };
+ };
+ };
+
+ funnel@10045000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x10045000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_qdss_in_funnel_in0: endpoint {
+ remote-endpoint =
+ <&funnel_in0_out_funnel_qdss>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ funnel_qdss_in_funnel_in1: endpoint {
+ remote-endpoint =
+ <&funnel_in1_out_funnel_qdss>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ funnel_qdss_out_funnel_aoss: endpoint {
+ remote-endpoint =
+ <&funnel_aoss_in_funnel_qdss>;
+ };
+ };
+ };
+ };
+
+ replicator@10046000 {
+ compatible = "arm,coresight-dynamic-replicator", "arm,primecell";
+ reg = <0x0 0x10046000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ replicator_qdss_in_replicator_swao: endpoint {
+ remote-endpoint =
+ <&replicator_swao_out_replicator_qdss>;
+ };
+ };
+ };
+
+ out-ports {
+
+ port {
+ replicator_qdss_out_replicator_etr: endpoint {
+ remote-endpoint =
+ <&replicator_etr_in_replicator_qdss>;
+ };
+ };
+ };
+ };
+
+ tmc_etr: tmc@10048000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0x0 0x10048000 0x0 0x1000>;
+
+ iommus = <&apps_smmu 0x0600 0>;
+ arm,buffer-size = <0x10000>;
+
+ arm,scatter-gather;
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ tmc_etr_in_replicator_etr: endpoint {
+ remote-endpoint =
+ <&replicator_etr_out_tmc_etr>;
+ };
+ };
+ };
+ };
+
+ replicator@1004e000 {
+ compatible = "arm,coresight-dynamic-replicator", "arm,primecell";
+ reg = <0x0 0x1004e000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ replicator_etr_in_replicator_qdss: endpoint {
+ remote-endpoint =
+ <&replicator_qdss_out_replicator_etr>;
+ };
+ };
+ };
+
+ out-ports {
+
+ port {
+
+ replicator_etr_out_tmc_etr: endpoint {
+ remote-endpoint =
+ <&tmc_etr_in_replicator_etr>;
+ };
+ };
+ };
+ };
+
+ funnel@10b04000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+
+ reg = <0x0 0x10b04000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@6 {
+ reg = <6>;
+ funnel_aoss_in_tpda_aoss: endpoint {
+ remote-endpoint =
+ <&tpda_aoss_out_funnel_aoss>;
+ };
+ };
+
+ port@7 {
+ reg = <7>;
+ funnel_aoss_in_funnel_qdss: endpoint {
+ remote-endpoint =
+ <&funnel_qdss_out_funnel_aoss>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ funnel_aoss_out_tmc_etf: endpoint {
+ remote-endpoint =
+ <&tmc_etf_in_funnel_aoss>;
+ };
+ };
+ };
+ };
+
+ tmc@10b05000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0x0 0x10b05000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ tmc_etf_in_funnel_aoss: endpoint {
+ remote-endpoint =
+ <&funnel_aoss_out_tmc_etf>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ tmc_etf_out_replicator_swao: endpoint {
+ remote-endpoint =
+ <&replicator_swao_in_tmc_etf>;
+ };
+ };
+ };
+ };
+
+ replicator@10b06000 {
+ compatible = "arm,coresight-dynamic-replicator", "arm,primecell";
+ reg = <0x0 0x10b06000 0x0 0x1000>;
+
+ qcom,replicator-loses-context;
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ replicator_swao_in_tmc_etf: endpoint {
+ remote-endpoint =
+ <&tmc_etf_out_replicator_swao>;
+ };
+ };
+ };
+
+ out-ports {
+
+ port {
+ replicator_swao_out_replicator_qdss: endpoint {
+ remote-endpoint =
+ <&replicator_qdss_in_replicator_swao>;
+ };
+ };
+ };
+ };
+
+ tpda@10b08000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+
+ reg = <0x0 0x10b08000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ tpda_aoss_in_tpdm_swao_prio_0: endpoint {
+ remote-endpoint =
+ <&tpdm_swao_prio_0_out_tpda_aoss>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+ tpda_aoss_in_tpdm_swao: endpoint {
+ remote-endpoint =
+ <&tpdm_swao_out_tpda_aoss>;
+ };
+ };
+ };
+
+ out-ports {
+
+ port {
+ tpda_aoss_out_funnel_aoss: endpoint {
+ remote-endpoint =
+ <&funnel_aoss_in_tpda_aoss>;
+ };
+ };
+ };
+ };
+
+ tpdm@10b09000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10b09000 0x0 0x1000>;
+
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ tpdm_swao_prio_0_out_tpda_aoss: endpoint {
+ remote-endpoint =
+ <&tpda_aoss_in_tpdm_swao_prio_0>;
+ };
+ };
+ };
+ };
+
+ tpdm@10b0d000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10b0d000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ tpdm_swao_out_tpda_aoss: endpoint {
+ remote-endpoint =
+ <&tpda_aoss_in_tpdm_swao>;
+ };
+ };
+ };
+ };
+
+ tpdm@10c28000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10c28000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ tpdm_dlct_out_tpda_dl_center_26: endpoint {
+ remote-endpoint =
+ <&tpda_dl_center_26_in_tpdm_dlct>;
+ };
+ };
+ };
+ };
+
+ tpdm@10c29000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10c29000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ tpdm_ipcc_out_tpda_dl_center_27: endpoint {
+ remote-endpoint =
+ <&tpda_dl_center_27_in_tpdm_ipcc>;
+ };
+ };
+ };
+ };
+
+ cti@10c2a000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x10c2a000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@10c2b000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x10c2b000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ tpda@10c2e000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x10c2e000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1a {
+ reg = <26>;
+ tpda_dl_center_26_in_tpdm_dlct: endpoint {
+ remote-endpoint =
+ <&tpdm_dlct_out_tpda_dl_center_26>;
+ };
+ };
+
+ port@1b {
+ reg = <27>;
+ tpda_dl_center_27_in_tpdm_ipcc: endpoint {
+ remote-endpoint =
+ <&tpdm_ipcc_out_tpda_dl_center_27>;
+ };
+ };
+ };
+
+ out-ports {
+
+ port {
+ tpda_dl_center_out_funnel_dl_center: endpoint {
+ remote-endpoint =
+ <&funnel_dl_center_in_tpda_dl_center>;
+ };
+ };
+ };
+ };
+
+ funnel@10c2f000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x10c2f000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+
+ port {
+ funnel_dl_center_in_tpda_dl_center: endpoint {
+ remote-endpoint =
+ <&tpda_dl_center_out_funnel_dl_center>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ funnel_dl_center_out_funnel_in1: endpoint {
+ remote-endpoint =
+ <&funnel_in1_in_funnel_dl_center>;
+ };
+ };
+ };
+ };
+
+ funnel@13810000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+
+ reg = <0x0 0x13810000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+
+ port {
+ funnel_apss_in_funnel_ete: endpoint {
+ remote-endpoint =
+ <&funnel_ete_out_funnel_apss>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ funnel_apss_out_funnel_in1: endpoint {
+ remote-endpoint =
+ <&funnel_in1_in_funnel_apss>;
+ };
+ };
+ };
+ };
+
+ cti@138e0000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x138e0000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@138f0000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x138f0000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ cti@13900000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x13900000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
sram@146aa000 {
compatible = "qcom,sm8450-imem", "syscon", "simple-mfd";
reg = <0 0x146aa000 0 0x1000>;
@@ -4672,6 +5398,8 @@
iommus = <&apps_smmu 0x0 0x0>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
diff --git a/arch/arm64/boot/dts/qcom/sm8550-hdk.dts b/arch/arm64/boot/dts/qcom/sm8550-hdk.dts
index 01c921602605..29bc1ddfc7b2 100644
--- a/arch/arm64/boot/dts/qcom/sm8550-hdk.dts
+++ b/arch/arm64/boot/dts/qcom/sm8550-hdk.dts
@@ -1172,7 +1172,7 @@
};
&sleep_clk {
- clock-frequency = <32000>;
+ clock-frequency = <32764>;
};
&swr0 {
diff --git a/arch/arm64/boot/dts/qcom/sm8550-mtp.dts b/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
index ab447fc252f7..5648ab60ba4c 100644
--- a/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
@@ -825,7 +825,7 @@
};
&sleep_clk {
- clock-frequency = <32000>;
+ clock-frequency = <32764>;
};
&swr0 {
diff --git a/arch/arm64/boot/dts/qcom/sm8550-qrd.dts b/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
index 6052dd922ec5..3a6cb2791304 100644
--- a/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
@@ -1005,7 +1005,7 @@
};
&sleep_clk {
- clock-frequency = <32000>;
+ clock-frequency = <32764>;
};
&swr0 {
diff --git a/arch/arm64/boot/dts/qcom/sm8550-samsung-q5q.dts b/arch/arm64/boot/dts/qcom/sm8550-samsung-q5q.dts
index 3c5d8d26704f..e8383faac576 100644
--- a/arch/arm64/boot/dts/qcom/sm8550-samsung-q5q.dts
+++ b/arch/arm64/boot/dts/qcom/sm8550-samsung-q5q.dts
@@ -565,7 +565,7 @@
};
&sleep_clk {
- clock-frequency = <32000>;
+ clock-frequency = <32764>;
};
&tlmm {
diff --git a/arch/arm64/boot/dts/qcom/sm8550-sony-xperia-yodo-pdx234.dts b/arch/arm64/boot/dts/qcom/sm8550-sony-xperia-yodo-pdx234.dts
index 85d487ef80a0..d90dc7b37c4a 100644
--- a/arch/arm64/boot/dts/qcom/sm8550-sony-xperia-yodo-pdx234.dts
+++ b/arch/arm64/boot/dts/qcom/sm8550-sony-xperia-yodo-pdx234.dts
@@ -722,7 +722,7 @@
};
&sleep_clk {
- clock-frequency = <32000>;
+ clock-frequency = <32764>;
};
&tlmm {
diff --git a/arch/arm64/boot/dts/qcom/sm8550.dtsi b/arch/arm64/boot/dts/qcom/sm8550.dtsi
index e7774d32fb6d..eac8de4005d8 100644
--- a/arch/arm64/boot/dts/qcom/sm8550.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8550.dtsi
@@ -14,6 +14,7 @@
#include <dt-bindings/firmware/qcom,scm.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interconnect/qcom,icc.h>
#include <dt-bindings/interconnect/qcom,sm8550-rpmh.h>
#include <dt-bindings/mailbox/qcom-ipcc.h>
#include <dt-bindings/power/qcom-rpmpd.h>
@@ -1734,7 +1735,8 @@
<GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi0",
"msi1",
"msi2",
@@ -1742,7 +1744,8 @@
"msi4",
"msi5",
"msi6",
- "msi7";
+ "msi7",
+ "global";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
interrupt-map = <0 0 0 1 &intc 0 0 0 149 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
@@ -1850,7 +1853,8 @@
<GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi0",
"msi1",
"msi2",
@@ -1858,7 +1862,8 @@
"msi4",
"msi5",
"msi6",
- "msi7";
+ "msi7",
+ "global";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
interrupt-map = <0 0 0 1 &intc 0 0 0 434 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
@@ -2114,6 +2119,10 @@
qcom,gmu = <&gmu>;
#cooling-cells = <2>;
+ interconnects = <&gem_noc MASTER_GFX3D QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "gfx-mem";
+
status = "disabled";
zap-shader {
@@ -2127,41 +2136,49 @@
opp-680000000 {
opp-hz = /bits/ 64 <680000000>;
opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
+ opp-peak-kBps = <16500000>;
};
opp-615000000 {
opp-hz = /bits/ 64 <615000000>;
opp-level = <RPMH_REGULATOR_LEVEL_SVS_L0>;
+ opp-peak-kBps = <12449218>;
};
opp-550000000 {
opp-hz = /bits/ 64 <550000000>;
opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+ opp-peak-kBps = <10687500>;
};
opp-475000000 {
opp-hz = /bits/ 64 <475000000>;
opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS_L1>;
+ opp-peak-kBps = <6074218>;
};
opp-401000000 {
opp-hz = /bits/ 64 <401000000>;
opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+ opp-peak-kBps = <6074218>;
};
opp-348000000 {
opp-hz = /bits/ 64 <348000000>;
opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS_D0>;
+ opp-peak-kBps = <6074218>;
};
opp-295000000 {
opp-hz = /bits/ 64 <295000000>;
opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS_D1>;
+ opp-peak-kBps = <6074218>;
};
opp-220000000 {
opp-hz = /bits/ 64 <220000000>;
opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS_D2>;
+ opp-peak-kBps = <2136718>;
};
};
};
@@ -2314,7 +2331,7 @@
remoteproc_mpss: remoteproc@4080000 {
compatible = "qcom,sm8550-mpss-pas";
- reg = <0x0 0x04080000 0x0 0x4040>;
+ reg = <0x0 0x04080000 0x0 0x10000>;
interrupts-extended = <&intc GIC_SPI 264 IRQ_TYPE_EDGE_RISING>,
<&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>,
@@ -2354,6 +2371,137 @@
};
};
+ remoteproc_adsp: remoteproc@6800000 {
+ compatible = "qcom,sm8550-adsp-pas";
+ reg = <0x0 0x06800000 0x0 0x10000>;
+
+ interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready",
+ "handover", "stop-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ power-domains = <&rpmhpd RPMHPD_LCX>,
+ <&rpmhpd RPMHPD_LMX>;
+ power-domain-names = "lcx", "lmx";
+
+ interconnects = <&lpass_lpicx_noc MASTER_LPASS_PROC 0 &mc_virt SLAVE_EBI1 0>;
+
+ memory-region = <&adspslpi_mem>, <&q6_adsp_dtb_mem>;
+
+ qcom,qmp = <&aoss_qmp>;
+
+ qcom,smem-states = <&smp2p_adsp_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ remoteproc_adsp_glink: glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ label = "lpass";
+ qcom,remote-pid = <2>;
+
+ fastrpc {
+ compatible = "qcom,fastrpc";
+ qcom,glink-channels = "fastrpcglink-apps-dsp";
+ label = "adsp";
+ qcom,non-secure-domain;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compute-cb@3 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <3>;
+ iommus = <&apps_smmu 0x1003 0x80>,
+ <&apps_smmu 0x1063 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@4 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <4>;
+ iommus = <&apps_smmu 0x1004 0x80>,
+ <&apps_smmu 0x1064 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@5 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <5>;
+ iommus = <&apps_smmu 0x1005 0x80>,
+ <&apps_smmu 0x1065 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@6 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <6>;
+ iommus = <&apps_smmu 0x1006 0x80>,
+ <&apps_smmu 0x1066 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@7 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <7>;
+ iommus = <&apps_smmu 0x1007 0x80>,
+ <&apps_smmu 0x1067 0x0>;
+ dma-coherent;
+ };
+ };
+
+ gpr {
+ compatible = "qcom,gpr";
+ qcom,glink-channels = "adsp_apps";
+ qcom,domain = <GPR_DOMAIN_ID_ADSP>;
+ qcom,intents = <512 20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ q6apm: service@1 {
+ compatible = "qcom,q6apm";
+ reg = <GPR_APM_MODULE_IID>;
+ #sound-dai-cells = <0>;
+ qcom,protection-domain = "avs/audio",
+ "msm/adsp/audio_pd";
+
+ q6apmdai: dais {
+ compatible = "qcom,q6apm-dais";
+ iommus = <&apps_smmu 0x1001 0x80>,
+ <&apps_smmu 0x1061 0x0>;
+ };
+
+ q6apmbedai: bedais {
+ compatible = "qcom,q6apm-lpass-dais";
+ #sound-dai-cells = <1>;
+ };
+ };
+
+ q6prm: service@2 {
+ compatible = "qcom,q6prm";
+ reg = <GPR_PRM_MODULE_IID>;
+ qcom,protection-domain = "avs/audio",
+ "msm/adsp/audio_pd";
+
+ q6prmcc: clock-controller {
+ compatible = "qcom,q6prm-lpass-clocks";
+ #clock-cells = <2>;
+ };
+ };
+ };
+ };
+ };
+
lpass_wsa2macro: codec@6aa0000 {
compatible = "qcom,sm8550-lpass-wsa-macro";
reg = <0 0x06aa0000 0 0x1000>;
@@ -2872,9 +3020,8 @@
power-domains = <&dispcc MDSS_GDSC>;
- interconnects = <&mmss_noc MASTER_MDP 0 &gem_noc SLAVE_LLCC 0>,
- <&mc_virt MASTER_LLCC 0 &mc_virt SLAVE_EBI1 0>;
- interconnect-names = "mdp0-mem", "mdp1-mem";
+ interconnects = <&mmss_noc MASTER_MDP 0 &mc_virt SLAVE_EBI1 0>;
+ interconnect-names = "mdp0-mem";
iommus = <&apps_smmu 0x1c00 0x2>;
@@ -4576,137 +4723,6 @@
interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
};
- remoteproc_adsp: remoteproc@30000000 {
- compatible = "qcom,sm8550-adsp-pas";
- reg = <0x0 0x30000000 0x0 0x100>;
-
- interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
- <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
- <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
- <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
- <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "wdog", "fatal", "ready",
- "handover", "stop-ack";
-
- clocks = <&rpmhcc RPMH_CXO_CLK>;
- clock-names = "xo";
-
- power-domains = <&rpmhpd RPMHPD_LCX>,
- <&rpmhpd RPMHPD_LMX>;
- power-domain-names = "lcx", "lmx";
-
- interconnects = <&lpass_lpicx_noc MASTER_LPASS_PROC 0 &mc_virt SLAVE_EBI1 0>;
-
- memory-region = <&adspslpi_mem>, <&q6_adsp_dtb_mem>;
-
- qcom,qmp = <&aoss_qmp>;
-
- qcom,smem-states = <&smp2p_adsp_out 0>;
- qcom,smem-state-names = "stop";
-
- status = "disabled";
-
- remoteproc_adsp_glink: glink-edge {
- interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
- IPCC_MPROC_SIGNAL_GLINK_QMP
- IRQ_TYPE_EDGE_RISING>;
- mboxes = <&ipcc IPCC_CLIENT_LPASS
- IPCC_MPROC_SIGNAL_GLINK_QMP>;
-
- label = "lpass";
- qcom,remote-pid = <2>;
-
- fastrpc {
- compatible = "qcom,fastrpc";
- qcom,glink-channels = "fastrpcglink-apps-dsp";
- label = "adsp";
- qcom,non-secure-domain;
- #address-cells = <1>;
- #size-cells = <0>;
-
- compute-cb@3 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <3>;
- iommus = <&apps_smmu 0x1003 0x80>,
- <&apps_smmu 0x1063 0x0>;
- dma-coherent;
- };
-
- compute-cb@4 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <4>;
- iommus = <&apps_smmu 0x1004 0x80>,
- <&apps_smmu 0x1064 0x0>;
- dma-coherent;
- };
-
- compute-cb@5 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <5>;
- iommus = <&apps_smmu 0x1005 0x80>,
- <&apps_smmu 0x1065 0x0>;
- dma-coherent;
- };
-
- compute-cb@6 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <6>;
- iommus = <&apps_smmu 0x1006 0x80>,
- <&apps_smmu 0x1066 0x0>;
- dma-coherent;
- };
-
- compute-cb@7 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <7>;
- iommus = <&apps_smmu 0x1007 0x80>,
- <&apps_smmu 0x1067 0x0>;
- dma-coherent;
- };
- };
-
- gpr {
- compatible = "qcom,gpr";
- qcom,glink-channels = "adsp_apps";
- qcom,domain = <GPR_DOMAIN_ID_ADSP>;
- qcom,intents = <512 20>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- q6apm: service@1 {
- compatible = "qcom,q6apm";
- reg = <GPR_APM_MODULE_IID>;
- #sound-dai-cells = <0>;
- qcom,protection-domain = "avs/audio",
- "msm/adsp/audio_pd";
-
- q6apmdai: dais {
- compatible = "qcom,q6apm-dais";
- iommus = <&apps_smmu 0x1001 0x80>,
- <&apps_smmu 0x1061 0x0>;
- };
-
- q6apmbedai: bedais {
- compatible = "qcom,q6apm-lpass-dais";
- #sound-dai-cells = <1>;
- };
- };
-
- q6prm: service@2 {
- compatible = "qcom,q6prm";
- reg = <GPR_PRM_MODULE_IID>;
- qcom,protection-domain = "avs/audio",
- "msm/adsp/audio_pd";
-
- q6prmcc: clock-controller {
- compatible = "qcom,q6prm-lpass-clocks";
- #clock-cells = <2>;
- };
- };
- };
- };
- };
-
nsp_noc: interconnect@320c0000 {
compatible = "qcom,sm8550-nsp-noc";
reg = <0 0x320c0000 0 0xe080>;
@@ -4716,7 +4732,7 @@
remoteproc_cdsp: remoteproc@32300000 {
compatible = "qcom,sm8550-cdsp-pas";
- reg = <0x0 0x32300000 0x0 0x1400000>;
+ reg = <0x0 0x32300000 0x0 0x10000>;
interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
diff --git a/arch/arm64/boot/dts/qcom/sm8650-hdk.dts b/arch/arm64/boot/dts/qcom/sm8650-hdk.dts
index f00bdff4280a..d0912735b54e 100644
--- a/arch/arm64/boot/dts/qcom/sm8650-hdk.dts
+++ b/arch/arm64/boot/dts/qcom/sm8650-hdk.dts
@@ -1113,7 +1113,7 @@
};
&sleep_clk {
- clock-frequency = <32000>;
+ clock-frequency = <32764>;
};
&swr0 {
diff --git a/arch/arm64/boot/dts/qcom/sm8650-mtp.dts b/arch/arm64/boot/dts/qcom/sm8650-mtp.dts
index 0db2cb03f252..76ef43c10f77 100644
--- a/arch/arm64/boot/dts/qcom/sm8650-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sm8650-mtp.dts
@@ -730,7 +730,7 @@
};
&sleep_clk {
- clock-frequency = <32000>;
+ clock-frequency = <32764>;
};
&swr0 {
diff --git a/arch/arm64/boot/dts/qcom/sm8650-qrd.dts b/arch/arm64/boot/dts/qcom/sm8650-qrd.dts
index c5e8c3c2df91..71033fba21b5 100644
--- a/arch/arm64/boot/dts/qcom/sm8650-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sm8650-qrd.dts
@@ -1041,7 +1041,7 @@
};
&sleep_clk {
- clock-frequency = <32000>;
+ clock-frequency = <32764>;
};
&spi4 {
diff --git a/arch/arm64/boot/dts/qcom/sm8650.dtsi b/arch/arm64/boot/dts/qcom/sm8650.dtsi
index 25e47505adcb..86684cb9a932 100644
--- a/arch/arm64/boot/dts/qcom/sm8650.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8650.dtsi
@@ -365,6 +365,40 @@
};
};
+ ete0 {
+ compatible = "arm,embedded-trace-extension";
+
+ cpu = <&cpu0>;
+
+ out-ports {
+ port {
+ ete0_out_funnel_ete: endpoint {
+ remote-endpoint = <&funnel_ete_in_ete0>;
+ };
+ };
+ };
+ };
+
+ funnel-ete {
+ compatible = "arm,coresight-static-funnel";
+
+ in-ports {
+ port {
+ funnel_ete_in_ete0: endpoint {
+ remote-endpoint = <&ete0_out_funnel_ete>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ funnel_ete_out_funnel_apss: endpoint {
+ remote-endpoint = <&funnel_apss_in_funnel_ete>;
+ };
+ };
+ };
+ };
+
firmware {
scm: scm {
compatible = "qcom,scm-sm8650", "qcom,scm";
@@ -2233,7 +2267,8 @@
<GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi0",
"msi1",
"msi2",
@@ -2241,7 +2276,8 @@
"msi4",
"msi5",
"msi6",
- "msi7";
+ "msi7",
+ "global";
clocks = <&gcc GCC_PCIE_0_AUX_CLK>,
<&gcc GCC_PCIE_0_CFG_AHB_CLK>,
@@ -2365,7 +2401,8 @@
<GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi0",
"msi1",
"msi2",
@@ -2373,7 +2410,8 @@
"msi4",
"msi5",
"msi6",
- "msi7";
+ "msi7",
+ "global";
clocks = <&gcc GCC_PCIE_1_AUX_CLK>,
<&gcc GCC_PCIE_1_CFG_AHB_CLK>,
@@ -2636,6 +2674,10 @@
qcom,gmu = <&gmu>;
#cooling-cells = <2>;
+ interconnects = <&gem_noc MASTER_GFX3D QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "gfx-mem";
+
status = "disabled";
zap-shader {
@@ -2649,56 +2691,67 @@
opp-231000000 {
opp-hz = /bits/ 64 <231000000>;
opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS_D2>;
+ opp-peak-kBps = <2136718>;
};
opp-310000000 {
opp-hz = /bits/ 64 <310000000>;
opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS_D1>;
+ opp-peak-kBps = <2136718>;
};
opp-366000000 {
opp-hz = /bits/ 64 <366000000>;
opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS_D0>;
+ opp-peak-kBps = <6074218>;
};
opp-422000000 {
opp-hz = /bits/ 64 <422000000>;
opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+ opp-peak-kBps = <8171875>;
};
opp-500000000 {
opp-hz = /bits/ 64 <500000000>;
opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS_L1>;
+ opp-peak-kBps = <8171875>;
};
opp-578000000 {
opp-hz = /bits/ 64 <578000000>;
opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+ opp-peak-kBps = <8171875>;
};
opp-629000000 {
opp-hz = /bits/ 64 <629000000>;
opp-level = <RPMH_REGULATOR_LEVEL_SVS_L0>;
+ opp-peak-kBps = <10687500>;
};
opp-680000000 {
opp-hz = /bits/ 64 <680000000>;
opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
+ opp-peak-kBps = <12449218>;
};
opp-720000000 {
opp-hz = /bits/ 64 <720000000>;
opp-level = <RPMH_REGULATOR_LEVEL_SVS_L2>;
+ opp-peak-kBps = <12449218>;
};
opp-770000000 {
opp-hz = /bits/ 64 <770000000>;
opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
+ opp-peak-kBps = <12449218>;
};
opp-834000000 {
opp-hz = /bits/ 64 <834000000>;
opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
+ opp-peak-kBps = <14398437>;
};
};
};
@@ -2853,7 +2906,7 @@
remoteproc_mpss: remoteproc@4080000 {
compatible = "qcom,sm8650-mpss-pas";
- reg = <0 0x04080000 0 0x4040>;
+ reg = <0x0 0x04080000 0x0 0x10000>;
interrupts-extended = <&intc GIC_SPI 264 IRQ_TYPE_EDGE_RISING>,
<&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>,
@@ -2904,6 +2957,154 @@
};
};
+ remoteproc_adsp: remoteproc@6800000 {
+ compatible = "qcom,sm8650-adsp-pas";
+ reg = <0x0 0x06800000 0x0 0x10000>;
+
+ interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog",
+ "fatal",
+ "ready",
+ "handover",
+ "stop-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ interconnects = <&lpass_lpicx_noc MASTER_LPASS_PROC QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+
+ power-domains = <&rpmhpd RPMHPD_LCX>,
+ <&rpmhpd RPMHPD_LMX>;
+ power-domain-names = "lcx",
+ "lmx";
+
+ memory-region = <&adspslpi_mem>, <&q6_adsp_dtb_mem>;
+
+ qcom,qmp = <&aoss_qmp>;
+
+ qcom,smem-states = <&smp2p_adsp_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ remoteproc_adsp_glink: glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+
+ mboxes = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ qcom,remote-pid = <2>;
+
+ label = "lpass";
+
+ fastrpc {
+ compatible = "qcom,fastrpc";
+
+ qcom,glink-channels = "fastrpcglink-apps-dsp";
+
+ label = "adsp";
+
+ qcom,non-secure-domain;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compute-cb@3 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <3>;
+
+ iommus = <&apps_smmu 0x1003 0x80>,
+ <&apps_smmu 0x1043 0x20>;
+ dma-coherent;
+ };
+
+ compute-cb@4 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <4>;
+
+ iommus = <&apps_smmu 0x1004 0x80>,
+ <&apps_smmu 0x1044 0x20>;
+ dma-coherent;
+ };
+
+ compute-cb@5 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <5>;
+
+ iommus = <&apps_smmu 0x1005 0x80>,
+ <&apps_smmu 0x1045 0x20>;
+ dma-coherent;
+ };
+
+ compute-cb@6 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <6>;
+
+ iommus = <&apps_smmu 0x1006 0x80>,
+ <&apps_smmu 0x1046 0x20>;
+ dma-coherent;
+ };
+
+ compute-cb@7 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <7>;
+
+ iommus = <&apps_smmu 0x1007 0x40>,
+ <&apps_smmu 0x1067 0x0>,
+ <&apps_smmu 0x1087 0x0>;
+ dma-coherent;
+ };
+ };
+
+ gpr {
+ compatible = "qcom,gpr";
+ qcom,glink-channels = "adsp_apps";
+ qcom,domain = <GPR_DOMAIN_ID_ADSP>;
+ qcom,intents = <512 20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ q6apm: service@1 {
+ compatible = "qcom,q6apm";
+ reg = <GPR_APM_MODULE_IID>;
+ #sound-dai-cells = <0>;
+ qcom,protection-domain = "avs/audio",
+ "msm/adsp/audio_pd";
+
+ q6apmbedai: bedais {
+ compatible = "qcom,q6apm-lpass-dais";
+ #sound-dai-cells = <1>;
+ };
+
+ q6apmdai: dais {
+ compatible = "qcom,q6apm-dais";
+ iommus = <&apps_smmu 0x1001 0x80>,
+ <&apps_smmu 0x1061 0x0>;
+ };
+ };
+
+ q6prm: service@2 {
+ compatible = "qcom,q6prm";
+ reg = <GPR_PRM_MODULE_IID>;
+ qcom,protection-domain = "avs/audio",
+ "msm/adsp/audio_pd";
+
+ q6prmcc: clock-controller {
+ compatible = "qcom,q6prm-lpass-clocks";
+ #clock-cells = <2>;
+ };
+ };
+ };
+ };
+ };
+
lpass_wsa2macro: codec@6aa0000 {
compatible = "qcom,sm8650-lpass-wsa-macro", "qcom,sm8550-lpass-wsa-macro";
reg = <0 0x06aa0000 0 0x1000>;
@@ -3455,11 +3656,8 @@
resets = <&dispcc DISP_CC_MDSS_CORE_BCR>;
interconnects = <&mmss_noc MASTER_MDP QCOM_ICC_TAG_ALWAYS
- &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
- <&mc_virt MASTER_LLCC QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
- interconnect-names = "mdp0-mem",
- "mdp1-mem";
+ interconnect-names = "mdp0-mem";
power-domains = <&dispcc MDSS_GDSC>;
@@ -4854,6 +5052,138 @@
};
};
+ funnel@10042000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+
+ reg = <0x0 0x10042000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@4 {
+ reg = <4>;
+
+ funnel_in1_in_funnel_apss: endpoint {
+ remote-endpoint = <&funnel_apss_out_funnel_in1>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ funnel_in1_out_funnel_qdss: endpoint {
+ remote-endpoint = <&funnel_qdss_in_funnel_in1>;
+ };
+ };
+ };
+ };
+
+ funnel@10045000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+
+ reg = <0x0 0x10045000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ reg = <1>;
+
+ funnel_qdss_in_funnel_in1: endpoint {
+ remote-endpoint = <&funnel_in1_out_funnel_qdss>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ funnel_qdss_out_funnel_aoss: endpoint {
+ remote-endpoint = <&funnel_aoss_in_funnel_qdss>;
+ };
+ };
+ };
+ };
+
+ funnel@10b04000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+
+ reg = <0x0 0x10b04000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@7 {
+ reg = <7>;
+
+ funnel_aoss_in_funnel_qdss: endpoint {
+ remote-endpoint = <&funnel_qdss_out_funnel_aoss>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ funnel_aoss_out_tmc_etf: endpoint {
+ remote-endpoint = <&tmc_etf_in_funnel_aoss>;
+ };
+ };
+ };
+ };
+
+ tmc@10b05000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+
+ reg = <0x0 0x10b05000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ tmc_etf_in_funnel_aoss: endpoint {
+ remote-endpoint = <&funnel_aoss_out_tmc_etf>;
+ };
+ };
+ };
+ };
+
+ funnel@13810000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+
+ reg = <0x0 0x13810000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ funnel_apss_in_funnel_ete: endpoint {
+ remote-endpoint = <&funnel_ete_out_funnel_apss>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ funnel_apss_out_funnel_in1: endpoint {
+ remote-endpoint = <&funnel_in1_in_funnel_apss>;
+ };
+ };
+ };
+ };
+
apps_smmu: iommu@15000000 {
compatible = "qcom,sm8650-smmu-500", "qcom,smmu-500", "arm,mmu-500";
reg = <0 0x15000000 0 0x100000>;
@@ -5322,154 +5652,6 @@
interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
};
- remoteproc_adsp: remoteproc@30000000 {
- compatible = "qcom,sm8650-adsp-pas";
- reg = <0 0x30000000 0 0x100>;
-
- interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
- <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
- <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
- <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
- <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "wdog",
- "fatal",
- "ready",
- "handover",
- "stop-ack";
-
- clocks = <&rpmhcc RPMH_CXO_CLK>;
- clock-names = "xo";
-
- interconnects = <&lpass_lpicx_noc MASTER_LPASS_PROC QCOM_ICC_TAG_ALWAYS
- &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
-
- power-domains = <&rpmhpd RPMHPD_LCX>,
- <&rpmhpd RPMHPD_LMX>;
- power-domain-names = "lcx",
- "lmx";
-
- memory-region = <&adspslpi_mem>, <&q6_adsp_dtb_mem>;
-
- qcom,qmp = <&aoss_qmp>;
-
- qcom,smem-states = <&smp2p_adsp_out 0>;
- qcom,smem-state-names = "stop";
-
- status = "disabled";
-
- remoteproc_adsp_glink: glink-edge {
- interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
- IPCC_MPROC_SIGNAL_GLINK_QMP
- IRQ_TYPE_EDGE_RISING>;
-
- mboxes = <&ipcc IPCC_CLIENT_LPASS
- IPCC_MPROC_SIGNAL_GLINK_QMP>;
-
- qcom,remote-pid = <2>;
-
- label = "lpass";
-
- fastrpc {
- compatible = "qcom,fastrpc";
-
- qcom,glink-channels = "fastrpcglink-apps-dsp";
-
- label = "adsp";
-
- qcom,non-secure-domain;
-
- #address-cells = <1>;
- #size-cells = <0>;
-
- compute-cb@3 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <3>;
-
- iommus = <&apps_smmu 0x1003 0x80>,
- <&apps_smmu 0x1043 0x20>;
- dma-coherent;
- };
-
- compute-cb@4 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <4>;
-
- iommus = <&apps_smmu 0x1004 0x80>,
- <&apps_smmu 0x1044 0x20>;
- dma-coherent;
- };
-
- compute-cb@5 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <5>;
-
- iommus = <&apps_smmu 0x1005 0x80>,
- <&apps_smmu 0x1045 0x20>;
- dma-coherent;
- };
-
- compute-cb@6 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <6>;
-
- iommus = <&apps_smmu 0x1006 0x80>,
- <&apps_smmu 0x1046 0x20>;
- dma-coherent;
- };
-
- compute-cb@7 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <7>;
-
- iommus = <&apps_smmu 0x1007 0x40>,
- <&apps_smmu 0x1067 0x0>,
- <&apps_smmu 0x1087 0x0>;
- dma-coherent;
- };
- };
-
- gpr {
- compatible = "qcom,gpr";
- qcom,glink-channels = "adsp_apps";
- qcom,domain = <GPR_DOMAIN_ID_ADSP>;
- qcom,intents = <512 20>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- q6apm: service@1 {
- compatible = "qcom,q6apm";
- reg = <GPR_APM_MODULE_IID>;
- #sound-dai-cells = <0>;
- qcom,protection-domain = "avs/audio",
- "msm/adsp/audio_pd";
-
- q6apmbedai: bedais {
- compatible = "qcom,q6apm-lpass-dais";
- #sound-dai-cells = <1>;
- };
-
- q6apmdai: dais {
- compatible = "qcom,q6apm-dais";
- iommus = <&apps_smmu 0x1001 0x80>,
- <&apps_smmu 0x1061 0x0>;
- };
- };
-
- q6prm: service@2 {
- compatible = "qcom,q6prm";
- reg = <GPR_PRM_MODULE_IID>;
- qcom,protection-domain = "avs/audio",
- "msm/adsp/audio_pd";
-
- q6prmcc: clock-controller {
- compatible = "qcom,q6prm-lpass-clocks";
- #clock-cells = <2>;
- };
- };
- };
- };
- };
-
nsp_noc: interconnect@320c0000 {
compatible = "qcom,sm8650-nsp-noc";
reg = <0 0x320c0000 0 0xf080>;
@@ -5481,7 +5663,7 @@
remoteproc_cdsp: remoteproc@32300000 {
compatible = "qcom,sm8650-cdsp-pas";
- reg = <0 0x32300000 0 0x1400000>;
+ reg = <0x0 0x32300000 0x0 0x10000>;
interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
@@ -5622,7 +5804,7 @@
/* note: secure cb9 in downstream */
- compute-cb@10 {
+ compute-cb@12 {
compatible = "qcom,fastrpc-compute-cb";
reg = <12>;
@@ -5632,7 +5814,7 @@
dma-coherent;
};
- compute-cb@11 {
+ compute-cb@13 {
compatible = "qcom,fastrpc-compute-cb";
reg = <13>;
@@ -5642,7 +5824,7 @@
dma-coherent;
};
- compute-cb@12 {
+ compute-cb@14 {
compatible = "qcom,fastrpc-compute-cb";
reg = <14>;
diff --git a/arch/arm64/boot/dts/qcom/sm8750-mtp.dts b/arch/arm64/boot/dts/qcom/sm8750-mtp.dts
new file mode 100644
index 000000000000..9e3aacad7bda
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sm8750-mtp.dts
@@ -0,0 +1,794 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include "sm8750.dtsi"
+#include "pm8010.dtsi"
+#include "pm8550.dtsi"
+#define PMK8550VE_SID 8
+#include "pm8550ve.dtsi"
+#include "pmd8028.dtsi"
+#include "pmih0108.dtsi"
+#include "pmk8550.dtsi"
+#include "pmr735d_a.dtsi"
+#include "sm8750-pmics.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SM8750 MTP";
+ compatible = "qcom,sm8750-mtp", "qcom,sm8750";
+ chassis-type = "handset";
+
+
+ aliases {
+ serial0 = &uart7;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ clocks {
+ xo_board: xo-board {
+ compatible = "fixed-clock";
+ clock-frequency = <76800000>;
+ #clock-cells = <0>;
+ };
+
+ sleep_clk: sleep-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <32000>;
+ #clock-cells = <0>;
+ };
+
+ bi_tcxo_div2: bi-tcxo-div2-clk {
+ compatible = "fixed-factor-clock";
+ #clock-cells = <0>;
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-mult = <1>;
+ clock-div = <2>;
+ };
+
+ bi_tcxo_ao_div2: bi-tcxo-ao-div2-clk {
+ compatible = "fixed-factor-clock";
+ #clock-cells = <0>;
+
+ clocks = <&rpmhcc RPMH_CXO_CLK_A>;
+ clock-mult = <1>;
+ clock-div = <2>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&volume_up_n>;
+ pinctrl-names = "default";
+
+ key-volume-up {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ gpios = <&pm8550_gpios 6 GPIO_ACTIVE_LOW>;
+ debounce-interval = <15>;
+ linux,can-disable;
+ wakeup-source;
+ };
+ };
+
+ vph_pwr: vph-pwr-regulator {
+ compatible = "regulator-fixed";
+
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ };
+};
+
+&apps_rsc {
+ regulators-0 {
+ compatible = "qcom,pm8550-rpmh-regulators";
+
+ vdd-bob1-supply = <&vph_pwr>;
+ vdd-bob2-supply = <&vph_pwr>;
+ vdd-l1-l4-l10-supply = <&vreg_s3g_1p8>;
+ vdd-l2-l13-l14-supply = <&vreg_bob1>;
+ vdd-l3-supply = <&vreg_s7i_1p2>;
+ vdd-l5-l16-supply = <&vreg_bob1>;
+ vdd-l6-l7-supply = <&vreg_bob1>;
+ vdd-l8-l9-supply = <&vreg_bob1>;
+ vdd-l11-supply = <&vreg_s7i_1p2>;
+ vdd-l12-supply = <&vreg_s3g_1p8>;
+ vdd-l15-supply = <&vreg_s3g_1p8>;
+ vdd-l17-supply = <&vreg_bob2>;
+
+ qcom,pmic-id = "b";
+
+ vreg_bob1: bob1 {
+ regulator-name = "vreg_bob1";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <4000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_bob2: bob2 {
+ regulator-name = "vreg_bob2";
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1b_1p8: ldo1 {
+ regulator-name = "vreg_l1b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2b_3p0: ldo2 {
+ regulator-name = "vreg_l2b_3p0";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3048000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4b_1p8: ldo4 {
+ regulator-name = "vreg_l4b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5b_3p1: ldo5 {
+ regulator-name = "vreg_l5b_3p1";
+ regulator-min-microvolt = <3100000>;
+ regulator-max-microvolt = <3148000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6b_1p8: ldo6 {
+ regulator-name = "vreg_l6b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7b_1p8: ldo7 {
+ regulator-name = "vreg_l7b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+
+ };
+
+ vreg_l8b_1p8: ldo8 {
+ regulator-name = "vreg_l8b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9b_2p9: ldo9 {
+ regulator-name = "vreg_l9b_2p9";
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l10b_1p8: ldo10 {
+ regulator-name = "vreg_l10b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l11b_1p0: ldo11 {
+ regulator-name = "vreg_l11b_1p0";
+ regulator-min-microvolt = <1064000>;
+ regulator-max-microvolt = <1292000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l12b_1p8: ldo12 {
+ regulator-name = "vreg_l12b_1p8";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l13b_3p0: ldo13 {
+ regulator-name = "vreg_l13b_3p0";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l14b_3p2: ldo14 {
+ regulator-name = "vreg_l14b_3p2";
+ regulator-min-microvolt = <3200000>;
+ regulator-max-microvolt = <3200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l15b_1p8: ldo15 {
+ regulator-name = "vreg_l15b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l16b_2p8: ldo16 {
+ regulator-name = "vreg_l16b_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l17b_2p5: ldo17 {
+ regulator-name = "vreg_l17b_2p5";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <2504000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-1 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+
+ vdd-l1-supply = <&vreg_s7i_1p2>;
+ vdd-l2-supply = <&vreg_s1d_0p97>;
+ vdd-l3-supply = <&vreg_s1d_0p97>;
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+
+ qcom,pmic-id = "d";
+
+ vreg_s1d_0p97: smps1 {
+ regulator-name = "vreg_s1d_0p97";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s3d_1p2: smps3 {
+ regulator-name = "vreg_s3d_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s4d_0p85: smps4 {
+ regulator-name = "vreg_s4d_0p85";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1036000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1d_1p2: ldo1 {
+ regulator-name = "vreg_l1d_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2d_0p88: ldo2 {
+ regulator-name = "vreg_l2d_0p88";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3d_0p88: ldo3 {
+ regulator-name = "vreg_l3d_0p88";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-2 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+
+ vdd-l1-supply = <&vreg_s1d_0p97>;
+ vdd-l2-supply = <&vreg_s7i_1p2>;
+ vdd-l3-supply = <&vreg_s3g_1p8>;
+ vdd-s5-supply = <&vph_pwr>;
+
+ qcom,pmic-id = "f";
+
+ vreg_s5f_0p5: smps5 {
+ regulator-name = "vreg_s5f_0p5";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1f_0p88: ldo1 {
+ regulator-name = "vreg_l1f_0p88";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2f_1p2: ldo2 {
+ regulator-name = "vreg_l2f_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3f_1p8: ldo3 {
+ regulator-name = "vreg_l3f_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ };
+
+ regulators-3 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+
+ vdd-l1-supply = <&vreg_s1d_0p97>;
+ vdd-l2-supply = <&vreg_s3g_1p8>;
+ vdd-l3-supply = <&vreg_s7i_1p2>;
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+
+ qcom,pmic-id = "g";
+
+ vreg_s1g_0p5: smps1 {
+ regulator-name = "vreg_s1g_0p5";
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <700000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s3g_1p8: smps3 {
+ regulator-name = "vreg_s3g_1p8";
+ regulator-min-microvolt = <1856000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s4g_0p75: smps4 {
+ regulator-name = "vreg_s4g_0p75";
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <900000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1g_0p91: ldo1 {
+ regulator-name = "vreg_l1g_0p91";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <936000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2g_1p8: ldo2 {
+ regulator-name = "vreg_l2g_1p8";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1860000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3g_1p2: ldo3 {
+ regulator-name = "vreg_l3g_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1256000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-4 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+
+ vdd-l1-supply = <&vreg_s7i_1p2>;
+ vdd-l2-supply = <&vreg_s7i_1p2>;
+ vdd-l3-supply = <&vreg_s1d_0p97>;
+ vdd-s7-supply = <&vph_pwr>;
+ vdd-s8-supply = <&vph_pwr>;
+
+ qcom,pmic-id = "i";
+
+ vreg_s7i_1p2: smps7 {
+ regulator-name = "vreg_s7i_1p2";
+ regulator-min-microvolt = <1224000>;
+ regulator-max-microvolt = <1340000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s8i_0p9: smps8 {
+ regulator-name = "vreg_s8i_0p9";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <972000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1i_1p2: ldo1 {
+ regulator-name = "vreg_l1i_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2i_1p2: ldo2 {
+ regulator-name = "vreg_l2i_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3i_0p88: ldo3 {
+ regulator-name = "vreg_l3i_0p88";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-5 {
+ compatible = "qcom,pm8550vs-rpmh-regulators";
+
+ vdd-l1-supply = <&vreg_s1d_0p97>;
+ vdd-l2-supply = <&vreg_s7i_1p2>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+
+ qcom,pmic-id = "j";
+
+ vreg_s2j_1p1: smps2 {
+ regulator-name = "vreg_s2j_1p1";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s3j_1p1: smps3 {
+ regulator-name = "vreg_s3j_1p1";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1j_0p91: ldo1 {
+ regulator-name = "vreg_l1j_0p91";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2j_1p2: ldo2 {
+ regulator-name = "vreg_l2j_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-6 {
+ compatible = "qcom,pm8010-rpmh-regulators";
+ qcom,pmic-id = "m";
+
+ vdd-l1-l2-supply = <&vreg_s7i_1p2>;
+ vdd-l3-l4-supply = <&vreg_bob2>;
+ vdd-l5-supply = <&vreg_s3g_1p8>;
+ vdd-l6-supply = <&vreg_bob1>;
+ vdd-l7-supply = <&vreg_bob1>;
+
+ vreg_l1m_1p1: ldo1 {
+ regulator-name = "vreg_l1m_1p1";
+ regulator-min-microvolt = <1104000>;
+ regulator-max-microvolt = <1104000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2m_1p056: ldo2 {
+ regulator-name = "vreg_l2m_1p056";
+ regulator-min-microvolt = <1056000>;
+ regulator-max-microvolt = <1056000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3m_2p8: ldo3 {
+ regulator-name = "vreg_l3m_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4m_2p8: ldo4 {
+ regulator-name = "vreg_l4m_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5m_1p8: ldo5 {
+ regulator-name = "vreg_l5m_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6m_2p8: ldo6 {
+ regulator-name = "vreg_l6m_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7m_2p96: ldo7 {
+ regulator-name = "vreg_l7m_2p96";
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-7 {
+ compatible = "qcom,pm8010-rpmh-regulators";
+ qcom,pmic-id = "n";
+
+ vdd-l1-l2-supply = <&vreg_s7i_1p2>;
+ vdd-l3-l4-supply = <&vreg_s3g_1p8>;
+ vdd-l5-supply = <&vreg_bob2>;
+ vdd-l6-supply = <&vreg_bob2>;
+ vdd-l7-supply = <&vreg_bob1>;
+
+ vreg_l1n_1p1: ldo1 {
+ regulator-name = "vreg_l1n_1p1";
+ regulator-min-microvolt = <1104000>;
+ regulator-max-microvolt = <1104000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2n_1p1: ldo2 {
+ regulator-name = "vreg_l2n_1p1";
+ regulator-min-microvolt = <1104000>;
+ regulator-max-microvolt = <1104000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3n_1p8: ldo3 {
+ regulator-name = "vreg_l3n_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4n_1p8: ldo4 {
+ regulator-name = "vreg_l4n_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5n_2p8: ldo5 {
+ regulator-name = "vreg_l5n_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6n_2p8: ldo6 {
+ regulator-name = "vreg_l6n_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7n_3p3: ldo7 {
+ regulator-name = "vreg_l7n_3p3";
+ regulator-min-microvolt = <3304000>;
+ regulator-max-microvolt = <3304000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+};
+
+&pm8550_flash {
+ status = "okay";
+
+ led-0 {
+ function = LED_FUNCTION_FLASH;
+ color = <LED_COLOR_ID_YELLOW>;
+ led-sources = <1>, <4>;
+ led-max-microamp = <500000>;
+ flash-max-microamp = <2000000>;
+ flash-max-timeout-us = <1280000>;
+ function-enumerator = <0>;
+ };
+
+ led-1 {
+ function = LED_FUNCTION_FLASH;
+ color = <LED_COLOR_ID_WHITE>;
+ led-sources = <2>, <3>;
+ led-max-microamp = <500000>;
+ flash-max-microamp = <2000000>;
+ flash-max-timeout-us = <1280000>;
+ function-enumerator = <1>;
+ };
+};
+
+&pm8550_gpios {
+ volume_up_n: volume-up-n-state {
+ pins = "gpio6";
+ function = "normal";
+ bias-pull-up;
+ input-enable;
+ power-source = <1>;
+ };
+};
+
+&pm8550_pwm {
+ status = "okay";
+
+ multi-led {
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_STATUS;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_RED>;
+ };
+
+ led@2 {
+ reg = <2>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@3 {
+ reg = <3>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+ };
+};
+
+&pon_pwrkey {
+ status = "okay";
+};
+
+&pon_resin {
+ linux,code = <KEY_VOLUMEDOWN>;
+
+ status = "okay";
+};
+
+&pmih0108_eusb2_repeater {
+ status = "okay";
+
+ vdd18-supply = <&vreg_l15b_1p8>;
+ vdd3-supply = <&vreg_l5b_3p1>;
+};
+
+&qupv3_1 {
+ status = "okay";
+};
+
+&tlmm {
+ /* reserved for secure world */
+ gpio-reserved-ranges = <36 4>, <74 1>;
+};
+
+&uart7 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8750-pmics.dtsi b/arch/arm64/boot/dts/qcom/sm8750-pmics.dtsi
new file mode 100644
index 000000000000..6eb8d78937c3
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sm8750-pmics.dtsi
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/ {
+ thermal-zones {
+ pm8550ve-d-thermal {
+ polling-delay-passive = <100>;
+
+ thermal-sensors = <&pm8550ve_d_temp_alarm>;
+
+ trips {
+ trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ trip1 {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "hot";
+ };
+ };
+ };
+
+ pm8550ve-f-thermal {
+ polling-delay-passive = <100>;
+
+ thermal-sensors = <&pm8550ve_f_temp_alarm>;
+
+ trips {
+ trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ trip1 {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "hot";
+ };
+ };
+ };
+
+ pm8550ve-g-thermal {
+ polling-delay-passive = <100>;
+
+ thermal-sensors = <&pm8550ve_g_temp_alarm>;
+
+ trips {
+ trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ trip1 {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "hot";
+ };
+ };
+ };
+
+ pm8550vs-j-thermal {
+ polling-delay-passive = <100>;
+
+ thermal-sensors = <&pm8550vs_j_temp_alarm>;
+
+ trips {
+ trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ trip1 {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "hot";
+ };
+ };
+ };
+ };
+};
+
+&spmi_bus {
+ /* PM8550VE */
+ pm8550ve_d: pmic@3 {
+ compatible = "qcom,pm8550ve", "qcom,spmi-pmic";
+ reg = <0x3 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pm8550ve_d_temp_alarm: temp-alarm@a00 {
+ compatible = "qcom,spmi-temp-alarm";
+ reg = <0xa00>;
+ interrupts = <0x3 0xa 0x0 IRQ_TYPE_EDGE_BOTH>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ pm8550ve_d_gpios: gpio@8800 {
+ compatible = "qcom,pm8550ve-gpio", "qcom,spmi-gpio";
+ reg = <0x8800>;
+ gpio-controller;
+ gpio-ranges = <&pm8550ve_d_gpios 0 0 8>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ pm8550ve_f: pmic@5 {
+ compatible = "qcom,pm8550ve", "qcom,spmi-pmic";
+ reg = <0x5 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pm8550ve_f_temp_alarm: temp-alarm@a00 {
+ compatible = "qcom,spmi-temp-alarm";
+ reg = <0xa00>;
+ interrupts = <0x5 0xa 0x0 IRQ_TYPE_EDGE_BOTH>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ pm8550ve_f_gpios: gpio@8800 {
+ compatible = "qcom,pm8550ve-gpio", "qcom,spmi-gpio";
+ reg = <0x8800>;
+ gpio-controller;
+ gpio-ranges = <&pm8550ve_f_gpios 0 0 6>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ pm8550ve_g: pmic@6 {
+ compatible = "qcom,pm8550ve", "qcom,spmi-pmic";
+ reg = <0x6 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pm8550ve_g_temp_alarm: temp-alarm@a00 {
+ compatible = "qcom,spmi-temp-alarm";
+ reg = <0xa00>;
+ interrupts = <0x6 0xa 0x0 IRQ_TYPE_EDGE_BOTH>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ pm8550ve_g_gpios: gpio@8800 {
+ compatible = "qcom,pm8550ve-gpio", "qcom,spmi-gpio";
+ reg = <0x8800>;
+ gpio-controller;
+ gpio-ranges = <&pm8550ve_g_gpios 0 0 8>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ /* PM8550VS */
+ pm8550vs_j: pmic@9 {
+ compatible = "qcom,pm8550vs", "qcom,spmi-pmic";
+ reg = <0x9 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pm8550vs_j_temp_alarm: temp-alarm@a00 {
+ compatible = "qcom,spmi-temp-alarm";
+ reg = <0xa00>;
+ interrupts = <0x9 0xa 0x0 IRQ_TYPE_EDGE_BOTH>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ pm8550vs_j_gpios: gpio@8800 {
+ compatible = "qcom,pm8550vs-gpio", "qcom,spmi-gpio";
+ reg = <0x8800>;
+ gpio-controller;
+ gpio-ranges = <&pm8550vs_j_gpios 0 0 6>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8750-qrd.dts b/arch/arm64/boot/dts/qcom/sm8750-qrd.dts
new file mode 100644
index 000000000000..f77efab0aef9
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sm8750-qrd.dts
@@ -0,0 +1,792 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include "sm8750.dtsi"
+#include "pm8010.dtsi"
+#include "pm8550.dtsi"
+#define PMK8550VE_SID 8
+#include "pm8550ve.dtsi"
+#include "pmd8028.dtsi"
+#include "pmih0108.dtsi"
+#include "pmk8550.dtsi"
+#include "pmr735d_a.dtsi"
+#include "sm8750-pmics.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SM8750 QRD";
+ compatible = "qcom,sm8750-qrd", "qcom,sm8750";
+ chassis-type = "handset";
+
+ aliases {
+ serial0 = &uart7;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ clocks {
+ xo_board: xo-board {
+ compatible = "fixed-clock";
+ clock-frequency = <76800000>;
+ #clock-cells = <0>;
+ };
+
+ sleep_clk: sleep-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <32000>;
+ #clock-cells = <0>;
+ };
+
+ bi_tcxo_div2: bi-tcxo-div2-clk {
+ compatible = "fixed-factor-clock";
+ #clock-cells = <0>;
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-mult = <1>;
+ clock-div = <2>;
+ };
+
+ bi_tcxo_ao_div2: bi-tcxo-ao-div2-clk {
+ compatible = "fixed-factor-clock";
+ #clock-cells = <0>;
+
+ clocks = <&rpmhcc RPMH_CXO_CLK_A>;
+ clock-mult = <1>;
+ clock-div = <2>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&volume_up_n>;
+ pinctrl-names = "default";
+
+ key-volume-up {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ gpios = <&pm8550_gpios 6 GPIO_ACTIVE_LOW>;
+ debounce-interval = <15>;
+ linux,can-disable;
+ wakeup-source;
+ };
+ };
+
+ vph_pwr: vph-pwr-regulator {
+ compatible = "regulator-fixed";
+
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ };
+};
+
+&apps_rsc {
+ regulators-0 {
+ compatible = "qcom,pm8550-rpmh-regulators";
+
+ vdd-bob1-supply = <&vph_pwr>;
+ vdd-bob2-supply = <&vph_pwr>;
+ vdd-l1-l4-l10-supply = <&vreg_s3g_1p8>;
+ vdd-l2-l13-l14-supply = <&vreg_bob1>;
+ vdd-l3-supply = <&vreg_s7i_1p2>;
+ vdd-l5-l16-supply = <&vreg_bob1>;
+ vdd-l6-l7-supply = <&vreg_bob1>;
+ vdd-l8-l9-supply = <&vreg_bob1>;
+ vdd-l11-supply = <&vreg_s7i_1p2>;
+ vdd-l12-supply = <&vreg_s3g_1p8>;
+ vdd-l15-supply = <&vreg_s3g_1p8>;
+ vdd-l17-supply = <&vreg_bob2>;
+
+ qcom,pmic-id = "b";
+
+ vreg_bob1: bob1 {
+ regulator-name = "vreg_bob1";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <4000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_bob2: bob2 {
+ regulator-name = "vreg_bob2";
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1b_1p8: ldo1 {
+ regulator-name = "vreg_l1b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2b_3p0: ldo2 {
+ regulator-name = "vreg_l2b_3p0";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3048000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4b_1p8: ldo4 {
+ regulator-name = "vreg_l4b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5b_3p1: ldo5 {
+ regulator-name = "vreg_l5b_3p1";
+ regulator-min-microvolt = <3100000>;
+ regulator-max-microvolt = <3148000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6b_1p8: ldo6 {
+ regulator-name = "vreg_l6b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7b_1p8: ldo7 {
+ regulator-name = "vreg_l7b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8b_1p8: ldo8 {
+ regulator-name = "vreg_l8b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9b_2p9: ldo9 {
+ regulator-name = "vreg_l9b_2p9";
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l10b_1p8: ldo10 {
+ regulator-name = "vreg_l10b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l11b_1p0: ldo11 {
+ regulator-name = "vreg_l11b_1p0";
+ regulator-min-microvolt = <1064000>;
+ regulator-max-microvolt = <1292000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l12b_1p8: ldo12 {
+ regulator-name = "vreg_l12b_1p8";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l13b_3p0: ldo13 {
+ regulator-name = "vreg_l13b_3p0";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l14b_3p2: ldo14 {
+ regulator-name = "vreg_l14b_3p2";
+ regulator-min-microvolt = <3200000>;
+ regulator-max-microvolt = <3200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l15b_1p8: ldo15 {
+ regulator-name = "vreg_l15b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l16b_2p8: ldo16 {
+ regulator-name = "vreg_l16b_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l17b_2p5: ldo17 {
+ regulator-name = "vreg_l17b_2p5";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <2504000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-1 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+
+ vdd-l1-supply = <&vreg_s7i_1p2>;
+ vdd-l2-supply = <&vreg_s1d_0p97>;
+ vdd-l3-supply = <&vreg_s1d_0p97>;
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+
+ qcom,pmic-id = "d";
+
+ vreg_s1d_0p97: smps1 {
+ regulator-name = "vreg_s1d_0p97";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s3d_1p2: smps3 {
+ regulator-name = "vreg_s3d_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s4d_0p85: smps4 {
+ regulator-name = "vreg_s4d_0p85";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1036000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1d_1p2: ldo1 {
+ regulator-name = "vreg_l1d_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2d_0p88: ldo2 {
+ regulator-name = "vreg_l2d_0p88";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3d_0p88: ldo3 {
+ regulator-name = "vreg_l3d_0p88";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-2 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+
+ vdd-l1-supply = <&vreg_s1d_0p97>;
+ vdd-l2-supply = <&vreg_s7i_1p2>;
+ vdd-l3-supply = <&vreg_s3g_1p8>;
+ vdd-s5-supply = <&vph_pwr>;
+
+ qcom,pmic-id = "f";
+
+ vreg_s5f_0p5: smps5 {
+ regulator-name = "vreg_s5f_0p5";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1f_0p88: ldo1 {
+ regulator-name = "vreg_l1f_0p88";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2f_1p2: ldo2 {
+ regulator-name = "vreg_l2f_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3f_1p8: ldo3 {
+ regulator-name = "vreg_l3f_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ };
+
+ regulators-3 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+
+ vdd-l1-supply = <&vreg_s1d_0p97>;
+ vdd-l2-supply = <&vreg_s3g_1p8>;
+ vdd-l3-supply = <&vreg_s7i_1p2>;
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+
+ qcom,pmic-id = "g";
+
+ vreg_s1g_0p5: smps1 {
+ regulator-name = "vreg_s1g_0p5";
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <700000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s3g_1p8: smps3 {
+ regulator-name = "vreg_s3g_1p8";
+ regulator-min-microvolt = <1856000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s4g_0p75: smps4 {
+ regulator-name = "vreg_s4g_0p75";
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <900000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1g_0p91: ldo1 {
+ regulator-name = "vreg_l1g_0p91";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <936000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2g_1p8: ldo2 {
+ regulator-name = "vreg_l2g_1p8";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1860000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3g_1p2: ldo3 {
+ regulator-name = "vreg_l3g_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1256000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-4 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+
+ vdd-l1-supply = <&vreg_s7i_1p2>;
+ vdd-l2-supply = <&vreg_s7i_1p2>;
+ vdd-l3-supply = <&vreg_s1d_0p97>;
+ vdd-s7-supply = <&vph_pwr>;
+ vdd-s8-supply = <&vph_pwr>;
+
+ qcom,pmic-id = "i";
+
+ vreg_s7i_1p2: smps7 {
+ regulator-name = "vreg_s7i_1p2";
+ regulator-min-microvolt = <1224000>;
+ regulator-max-microvolt = <1340000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s8i_0p9: smps8 {
+ regulator-name = "vreg_s8i_0p9";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <972000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1i_1p2: ldo1 {
+ regulator-name = "vreg_l1i_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2i_1p2: ldo2 {
+ regulator-name = "vreg_l2i_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3i_0p88: ldo3 {
+ regulator-name = "vreg_l3i_0p88";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-5 {
+ compatible = "qcom,pm8550vs-rpmh-regulators";
+
+ vdd-l1-supply = <&vreg_s1d_0p97>;
+ vdd-l2-supply = <&vreg_s7i_1p2>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+
+ qcom,pmic-id = "j";
+
+ vreg_s2j_1p1: smps2 {
+ regulator-name = "vreg_s2j_1p1";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s3j_1p1: smps3 {
+ regulator-name = "vreg_s3j_1p1";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+
+ vreg_l1j_0p91: ldo1 {
+ regulator-name = "vreg_l1j_0p91";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2j_1p2: ldo2 {
+ regulator-name = "vreg_l2j_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-6 {
+ compatible = "qcom,pm8010-rpmh-regulators";
+ qcom,pmic-id = "m";
+
+ vdd-l1-l2-supply = <&vreg_s7i_1p2>;
+ vdd-l3-l4-supply = <&vreg_s3g_1p8>;
+ vdd-l5-supply = <&vreg_s3g_1p8>;
+ vdd-l6-supply = <&vreg_bob1>;
+ vdd-l7-supply = <&vreg_bob1>;
+
+ vreg_l1m_1p1: ldo1 {
+ regulator-name = "vreg_l1m_1p1";
+ regulator-min-microvolt = <1104000>;
+ regulator-max-microvolt = <1104000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2m_1p056: ldo2 {
+ regulator-name = "vreg_l2m_1p056";
+ regulator-min-microvolt = <1056000>;
+ regulator-max-microvolt = <1056000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3m_2p8: ldo3 {
+ regulator-name = "vreg_l3m_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4m_2p8: ldo4 {
+ regulator-name = "vreg_l4m_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5m_1p8: ldo5 {
+ regulator-name = "vreg_l5m_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6m_2p8: ldo6 {
+ regulator-name = "vreg_l6m_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7m_2p96: ldo7 {
+ regulator-name = "vreg_l7m_2p96";
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-7 {
+ compatible = "qcom,pm8010-rpmh-regulators";
+ qcom,pmic-id = "n";
+
+ vdd-l1-l2-supply = <&vreg_s7i_1p2>;
+ vdd-l3-l4-supply = <&vreg_s7i_1p2>;
+ vdd-l5-supply = <&vreg_bob2>;
+ vdd-l6-supply = <&vreg_bob2>;
+ vdd-l7-supply = <&vreg_bob1>;
+
+ vreg_l1n_1p1: ldo1 {
+ regulator-name = "vreg_l1n_1p1";
+ regulator-min-microvolt = <1104000>;
+ regulator-max-microvolt = <1104000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2n_1p1: ldo2 {
+ regulator-name = "vreg_l2n_1p1";
+ regulator-min-microvolt = <1104000>;
+ regulator-max-microvolt = <1104000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ regulator-allowed-modes = <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3n_1p8: ldo3 {
+ regulator-name = "vreg_l3n_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4n_1p8: ldo4 {
+ regulator-name = "vreg_l4n_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5n_2p8: ldo5 {
+ regulator-name = "vreg_l5n_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6n_2p8: ldo6 {
+ regulator-name = "vreg_l6n_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7n_3p3: ldo7 {
+ regulator-name = "vreg_l7n_3p3";
+ regulator-min-microvolt = <3304000>;
+ regulator-max-microvolt = <3304000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+};
+
+&pm8550_flash {
+ status = "okay";
+
+ led-0 {
+ function = LED_FUNCTION_FLASH;
+ color = <LED_COLOR_ID_YELLOW>;
+ led-sources = <1>, <4>;
+ led-max-microamp = <500000>;
+ flash-max-microamp = <2000000>;
+ flash-max-timeout-us = <1280000>;
+ function-enumerator = <0>;
+ };
+
+ led-1 {
+ function = LED_FUNCTION_FLASH;
+ color = <LED_COLOR_ID_WHITE>;
+ led-sources = <2>, <3>;
+ led-max-microamp = <500000>;
+ flash-max-microamp = <2000000>;
+ flash-max-timeout-us = <1280000>;
+ function-enumerator = <1>;
+ };
+};
+
+&pm8550_gpios {
+ volume_up_n: volume-up-n-state {
+ pins = "gpio6";
+ function = "normal";
+ bias-pull-up;
+ input-enable;
+ power-source = <1>;
+ };
+};
+
+&pm8550_pwm {
+ status = "okay";
+
+ multi-led {
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_STATUS;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_RED>;
+ };
+
+ led@2 {
+ reg = <2>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@3 {
+ reg = <3>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+ };
+};
+
+&pon_pwrkey {
+ status = "okay";
+};
+
+&pon_resin {
+ linux,code = <KEY_VOLUMEDOWN>;
+
+ status = "okay";
+};
+
+&pmih0108_eusb2_repeater {
+ status = "okay";
+
+ vdd18-supply = <&vreg_l15b_1p8>;
+ vdd3-supply = <&vreg_l5b_3p1>;
+};
+
+&qupv3_1 {
+ status = "okay";
+};
+
+&tlmm {
+ /* reserved for secure world */
+ gpio-reserved-ranges = <36 4>, <74 1>;
+};
+
+&uart7 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8750.dtsi b/arch/arm64/boot/dts/qcom/sm8750.dtsi
new file mode 100644
index 000000000000..3bbd7d18598e
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sm8750.dtsi
@@ -0,0 +1,2907 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/clock/qcom,sm8750-gcc.h>
+#include <dt-bindings/clock/qcom,sm8750-tcsr.h>
+#include <dt-bindings/dma/qcom-gpi.h>
+#include <dt-bindings/interconnect/qcom,icc.h>
+#include <dt-bindings/interconnect/qcom,sm8750-rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/power/qcom,rpmhpd.h>
+#include <dt-bindings/power/qcom-rpmpd.h>
+#include <dt-bindings/soc/qcom,rpmh-rsc.h>
+
+/ {
+ interrupt-parent = <&intc>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "qcom,oryon";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ next-level-cache = <&l2_0>;
+ power-domains = <&cpu_pd0>;
+ power-domain-names = "psci";
+
+ l2_0: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ };
+ };
+
+ cpu1: cpu@100 {
+ device_type = "cpu";
+ compatible = "qcom,oryon";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ next-level-cache = <&l2_0>;
+ power-domains = <&cpu_pd1>;
+ power-domain-names = "psci";
+ };
+
+ cpu2: cpu@200 {
+ device_type = "cpu";
+ compatible = "qcom,oryon";
+ reg = <0x0 0x200>;
+ enable-method = "psci";
+ next-level-cache = <&l2_0>;
+ power-domains = <&cpu_pd2>;
+ power-domain-names = "psci";
+ };
+
+ cpu3: cpu@300 {
+ device_type = "cpu";
+ compatible = "qcom,oryon";
+ reg = <0x0 0x300>;
+ enable-method = "psci";
+ next-level-cache = <&l2_0>;
+ power-domains = <&cpu_pd3>;
+ power-domain-names = "psci";
+ };
+
+ cpu4: cpu@400 {
+ device_type = "cpu";
+ compatible = "qcom,oryon";
+ reg = <0x0 0x400>;
+ enable-method = "psci";
+ next-level-cache = <&l2_0>;
+ power-domains = <&cpu_pd4>;
+ power-domain-names = "psci";
+ };
+
+ cpu5: cpu@500 {
+ device_type = "cpu";
+ compatible = "qcom,oryon";
+ reg = <0x0 0x500>;
+ enable-method = "psci";
+ next-level-cache = <&l2_0>;
+ power-domains = <&cpu_pd5>;
+ power-domain-names = "psci";
+ };
+
+ cpu6: cpu@10000 {
+ device_type = "cpu";
+ compatible = "qcom,oryon";
+ reg = <0x0 0x10000>;
+ enable-method = "psci";
+ next-level-cache = <&L2_1>;
+ power-domains = <&cpu_pd6>;
+ power-domain-names = "psci";
+
+ L2_1: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ };
+ };
+
+ cpu7: cpu@10100 {
+ device_type = "cpu";
+ compatible = "qcom,oryon";
+ reg = <0x0 0x10100>;
+ enable-method = "psci";
+ next-level-cache = <&L2_1>;
+ power-domains = <&cpu_pd7>;
+ power-domain-names = "psci";
+ };
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+
+ core1 {
+ cpu = <&cpu1>;
+ };
+
+ core2 {
+ cpu = <&cpu2>;
+ };
+
+ core3 {
+ cpu = <&cpu3>;
+ };
+
+ core4 {
+ cpu = <&cpu4>;
+ };
+
+ core5 {
+ cpu = <&cpu5>;
+ };
+ };
+
+ cluster1 {
+ core0 {
+ cpu = <&cpu6>;
+ };
+
+ core1 {
+ cpu = <&cpu7>;
+ };
+ };
+ };
+
+ idle-states {
+ entry-method = "psci";
+
+ cluster0_c4: cpu-sleep-0 {
+ compatible = "arm,idle-state";
+ idle-state-name = "ret";
+ arm,psci-suspend-param = <0x00000004>;
+ entry-latency-us = <93>;
+ exit-latency-us = <129>;
+ min-residency-us = <560>;
+ };
+
+ cluster1_c4: cpu-sleep-1 {
+ compatible = "arm,idle-state";
+ idle-state-name = "ret";
+ arm,psci-suspend-param = <0x00000004>;
+ entry-latency-us = <172>;
+ exit-latency-us = <130>;
+ min-residency-us = <686>;
+ };
+
+ };
+
+ domain-idle-states {
+ cluster_cl5: cluster-sleep-0 {
+ compatible = "domain-idle-state";
+ arm,psci-suspend-param = <0x01000054>;
+ entry-latency-us = <2150>;
+ exit-latency-us = <1983>;
+ min-residency-us = <9144>;
+ };
+
+ domain_ss3: domain-sleep-0 {
+ compatible = "domain-idle-state";
+ arm,psci-suspend-param = <0x0200c354>;
+ entry-latency-us = <2800>;
+ exit-latency-us = <4400>;
+ min-residency-us = <10150>;
+ };
+ };
+ };
+
+ firmware {
+ scm: scm {
+ compatible = "qcom,scm-sm8750", "qcom,scm";
+ interconnects = <&aggre2_noc MASTER_CRYPTO QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ };
+ };
+
+ clk_virt: interconnect-0 {
+ compatible = "qcom,sm8750-clk-virt";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ mc_virt: interconnect-1 {
+ compatible = "qcom,sm8750-mc-virt";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ memory@a0000000 {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the size */
+ reg = <0x0 0xa0000000 0x0 0x0>;
+ };
+
+ pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+
+ cpu_pd0: power-domain-cpu0 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ domain-idle-states = <&cluster0_c4>;
+ };
+
+ cpu_pd1: power-domain-cpu1 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ domain-idle-states = <&cluster0_c4>;
+ };
+
+ cpu_pd2: power-domain-cpu2 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ domain-idle-states = <&cluster0_c4>;
+ };
+
+ cpu_pd3: power-domain-cpu3 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ domain-idle-states = <&cluster0_c4>;
+ };
+
+ cpu_pd4: power-domain-cpu4 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ domain-idle-states = <&cluster0_c4>;
+ };
+
+ cpu_pd5: power-domain-cpu5 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ domain-idle-states = <&cluster0_c4>;
+ };
+
+ cpu_pd6: power-domain-cpu6 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ domain-idle-states = <&cluster1_c4>;
+ };
+
+ cpu_pd7: power-domain-cpu7 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ domain-idle-states = <&cluster1_c4>;
+ };
+
+ cluster_pd: power-domain-cluster {
+ #power-domain-cells = <0>;
+ domain-idle-states = <&cluster_cl5>;
+ power-domains = <&system_pd>;
+ };
+
+ system_pd: power-domain-system {
+ #power-domain-cells = <0>;
+ domain-idle-states = <&domain_ss3>;
+ };
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ gunyah_hyp_mem: gunyah-hyp@80000000 {
+ reg = <0x0 0x80000000 0x0 0xe00000>;
+ no-map;
+ };
+
+ cpusys_vm_mem: cpusys-vm-mem@80e00000 {
+ reg = <0x0 0x80e00000 0x0 0x40000>;
+ no-map;
+ };
+
+ cpucp_mem: cpucp@81200000 {
+ reg = <0x0 0x81200000 0x0 0x200000>;
+ no-map;
+ };
+
+ xbl_dtlog_mem: xbl-dtlog@81a00000 {
+ reg = <0x0 0x81a00000 0x0 0x40000>;
+ no-map;
+ };
+
+ aop_image_mem: aop-image@81c00000 {
+ reg = <0x0 0x81c00000 0x0 0x60000>;
+ no-map;
+ };
+
+ aop_cmd_db_mem: aop-cmd-db@81c60000 {
+ compatible = "qcom,cmd-db";
+ reg = <0x0 0x81c60000 0x0 0x20000>;
+ no-map;
+ };
+
+ /* Merged aop_config, tme_crash_dump, tme_log and uefi_log regions */
+ aop_tme_uefi_merged_mem: aop-tme-uefi-merged@81c80000 {
+ reg = <0x0 0x81c80000 0x0 0x74000>;
+ no-map;
+ };
+
+ /* Secdata region can be reused by apps */
+
+ smem_mem: smem@81d00000 {
+ compatible = "qcom,smem";
+ reg = <0x0 0x81d00000 0x0 0x200000>;
+ hwlocks = <&tcsr_mutex 3>;
+ no-map;
+ };
+
+ pdp_ns_shared_mem: pdp-ns-shared@81f00000 {
+ reg = <0x0 0x81f00000 0x0 0x100000>;
+ no-map;
+ };
+
+ cpucp_scandump_mem: cpucp-scandump@82000000 {
+ reg = <0x0 0x82000000 0x0 0x380000>;
+ no-map;
+ };
+
+ adsp_mhi_mem: adsp-mhi@82380000 {
+ reg = <0x0 0x82380000 0x0 0x20000>;
+ no-map;
+ };
+
+ soccp_sdi_mem: soccp-sdi@823a0000 {
+ reg = <0x0 0x823a0000 0x0 0x40000>;
+ no-map;
+ };
+
+ pmic_minii_dump_mem: pmic-minii-dump@823e0000 {
+ reg = <0x0 0x823e0000 0x0 0x80000>;
+ no-map;
+ };
+
+ pvmfw_mem: pvmfw@824a0000 {
+ reg = <0x0 0x824a0000 0x0 0x100000>;
+ no-map;
+ };
+
+ global_sync_mem: global-sync@82600000 {
+ reg = <0x0 0x82600000 0x0 0x100000>;
+ no-map;
+ };
+
+ tz_stat_mem: tz-stat@82700000 {
+ reg = <0x0 0x82700000 0x0 0x100000>;
+ no-map;
+ };
+
+ qdss_mem: qdss@82800000 {
+ reg = <0x0 0x82800000 0x0 0x2000000>;
+ no-map;
+ };
+
+ dsm_partition_1_mem: dsm-partition-1@84a00000 {
+ reg = <0x0 0x84a00000 0x0 0x4900000>;
+ no-map;
+ };
+
+ dsm_partition_2_mem: dsm-partition-2@89300000 {
+ reg = <0x0 0x89300000 0x0 0xa80000>;
+ no-map;
+ };
+
+ mpss_mem: mpss@8ba00000 {
+ reg = <0x0 0x8ba00000 0x0 0xf600000>;
+ no-map;
+ };
+
+ q6_mpss_dtb_mem: q6-mpss-dtb@9b000000 {
+ reg = <0x0 0x9b000000 0x0 0x80000>;
+ no-map;
+ };
+
+ ipa_fw_mem: ipa-fw@9b080000 {
+ reg = <0x0 0x9b080000 0x0 0x10000>;
+ no-map;
+ };
+
+ ipa_gsi_mem: ipa-gsi@9b090000 {
+ reg = <0x0 0x9b090000 0x0 0xa000>;
+ no-map;
+ };
+
+ gpu_micro_code_mem: gpu-micro-code@9b09a000 {
+ reg = <0x0 0x9b09a000 0x0 0x2000>;
+ no-map;
+ };
+
+ spss_region_mem: spss@9b0a0000 {
+ reg = <0x0 0x9b0a0000 0x0 0x1e0000>;
+ no-map;
+ };
+
+ /* First part of the "SPU secure shared memory" region */
+ spu_tz_shared_mem: spu-tz-shared@9b280000 {
+ reg = <0x0 0x9b280000 0x0 0x40000>;
+ no-map;
+ };
+
+ /* Second part of the "SPU secure shared memory" region */
+ spu_modem_shared_mem: spu-modem-shared@9b2c0000 {
+ reg = <0x0 0x9b2c0000 0x0 0x40000>;
+ no-map;
+ };
+
+ camera_mem: camera@9b300000 {
+ reg = <0x0 0x9b300000 0x0 0x800000>;
+ no-map;
+ };
+
+ camera_2_mem: camera-2@9bb00000 {
+ reg = <0x0 0x9bb00000 0x0 0x800000>;
+ no-map;
+ };
+
+ video_mem: video@9c300000 {
+ reg = <0x0 0x9c300000 0x0 0x800000>;
+ no-map;
+ };
+
+ cvp_mem: cvp@9cb00000 {
+ reg = <0x0 0x9cb00000 0x0 0x700000>;
+ no-map;
+ };
+
+ cdsp_mem: cdsp@9d200000 {
+ reg = <0x0 0x9d200000 0x0 0x1900000>;
+ no-map;
+ };
+
+ q6_cdsp_dtb_mem: q6-cdsp-dtb@9eb00000 {
+ reg = <0x0 0x9eb00000 0x0 0x80000>;
+ no-map;
+ };
+
+ soccp_mem: soccp@9ec00000 {
+ reg = <0x0 0x9ec00000 0x0 0x180000>;
+ no-map;
+ };
+
+ q6_adsp_dtb_mem: q6-adsp-dtb@9ed80000 {
+ reg = <0x0 0x9ed80000 0x0 0x80000>;
+ no-map;
+ };
+
+ adspslpi_mem: adspslpi@9ee00000 {
+ reg = <0x0 0x9ee00000 0x0 0x3a80000>;
+ no-map;
+ };
+
+ xbl_ramdump_mem: xbl-ramdump@b8000000 {
+ reg = <0x0 0xb8000000 0x0 0x1c0000>;
+ no-map;
+ };
+
+ hwfence_shbuf: hwfence-shbuf@d4e23000 {
+ no-map;
+ reg = <0x0 0xd4e23000 0x0 0x2dd000>;
+ };
+
+ /* Merged tz_reserved, xbl_sc, and qtee regions */
+ tz_merged_mem: tz-merged@d8000000 {
+ reg = <0x0 0xd8000000 0x0 0x600000>;
+ no-map;
+ };
+
+ trust_ui_vm_mem: trust-ui-vm@f3800000 {
+ reg = <0x0 0xf3800000 0x0 0x4400000>;
+ no-map;
+ };
+
+ oem_vm_mem: oem-vm@f7c00000 {
+ reg = <0x0 0xf7c00000 0x0 0x4c00000>;
+ no-map;
+ };
+
+ llcc_lpi_mem: llcc-lpi@ff800000 {
+ reg = <0x0 0xff800000 0x0 0x800000>;
+ no-map;
+ };
+ };
+
+ soc: soc@0 {
+ compatible = "simple-bus";
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+ dma-ranges = <0 0 0 0 0x10 0>;
+ ranges = <0 0 0 0 0x10 0>;
+
+ gcc: clock-controller@100000 {
+ compatible = "qcom,sm8750-gcc";
+ reg = <0x0 0x00100000 0x0 0x1f4200>;
+
+ clocks = <&bi_tcxo_div2>,
+ <0>,
+ <&sleep_clk>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>;
+
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+ gpi_dma2: dma-controller@800000 {
+ compatible = "qcom,sm8750-gpi-dma", "qcom,sm6350-gpi-dma";
+ reg = <0x0 0x00800000 0x0 0x60000>;
+
+ interrupts = <GIC_SPI 588 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 589 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 590 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 591 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 592 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 593 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 594 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 595 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 596 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 597 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 598 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 599 IRQ_TYPE_LEVEL_HIGH>;
+
+ dma-channels = <12>;
+ dma-channel-mask = <0x1e>;
+ #dma-cells = <3>;
+
+ iommus = <&apps_smmu 0x436 0x0>;
+
+ status = "disabled";
+ };
+
+ qupv3_2: geniqup@8c0000 {
+ compatible = "qcom,geni-se-qup";
+ reg = <0x0 0x008c0000 0x0 0x2000>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP_2_M_AHB_CLK>,
+ <&gcc GCC_QUPV3_WRAP_2_S_AHB_CLK>;
+ clock-names = "m-ahb",
+ "s-ahb";
+
+ iommus = <&apps_smmu 0x423 0x0>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ status = "disabled";
+
+ i2c8: i2c@880000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00880000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP2_S0_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma2 0 0 QCOM_GPI_I2C>,
+ <&gpi_dma2 1 0 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_i2c8_data_clk>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ spi8: spi@880000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00880000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP2_S0_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma2 0 0 QCOM_GPI_SPI>,
+ <&gpi_dma2 1 0 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_spi8_data_clk>, <&qup_spi8_cs>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c9: i2c@884000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00884000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 583 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP2_S1_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma2 0 1 QCOM_GPI_I2C>,
+ <&gpi_dma2 1 1 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_i2c9_data_clk>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ spi9: spi@884000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00884000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 583 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP2_S1_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma2 0 1 QCOM_GPI_SPI>,
+ <&gpi_dma2 1 1 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_spi9_data_clk>, <&qup_spi9_cs>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c10: i2c@888000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00888000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 584 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP2_S2_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma2 0 2 QCOM_GPI_I2C>,
+ <&gpi_dma2 1 2 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_i2c10_data_clk>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ spi10: spi@888000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00888000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 584 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP2_S2_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma2 0 2 QCOM_GPI_SPI>,
+ <&gpi_dma2 1 2 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_spi10_data_clk>, <&qup_spi10_cs>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c11: i2c@88c000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x0088c000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 585 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP2_S3_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma2 0 3 QCOM_GPI_I2C>,
+ <&gpi_dma2 1 3 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_i2c11_data_clk>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ spi11: spi@88c000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x0088c000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 585 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP2_S3_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma2 0 3 QCOM_GPI_SPI>,
+ <&gpi_dma2 1 3 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_spi11_data_clk>, <&qup_spi11_cs>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c12: i2c@890000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00890000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 586 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP2_S4_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma2 0 4 QCOM_GPI_I2C>,
+ <&gpi_dma2 1 4 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_i2c12_data_clk>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ spi12: spi@890000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00890000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 586 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP2_S4_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma2 0 4 QCOM_GPI_SPI>,
+ <&gpi_dma2 1 4 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_spi12_data_clk>, <&qup_spi12_cs>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c13: i2c@894000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00894000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 587 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP2_S5_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma2 0 5 QCOM_GPI_I2C>,
+ <&gpi_dma2 1 5 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_i2c13_data_clk>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ spi13: spi@894000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00894000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 587 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP2_S5_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma2 0 5 QCOM_GPI_SPI>,
+ <&gpi_dma2 1 5 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_spi13_data_clk>, <&qup_spi13_cs>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ uart14: serial@898000 {
+ compatible = "qcom,geni-uart";
+ reg = <0x0 0x00898000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 461 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP2_S5_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+
+ pinctrl-0 = <&qup_uart14_default>;
+ pinctrl-names = "default";
+
+ status = "disabled";
+ };
+
+ i2c15: i2c@89c000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x0089c000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 462 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP2_S7_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma2 0 7 QCOM_GPI_I2C>,
+ <&gpi_dma2 1 7 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_i2c15_data_clk>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ spi15: spi@89c000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x0089c000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 462 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP2_S7_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre2_noc MASTER_QUP_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma2 0 7 QCOM_GPI_SPI>,
+ <&gpi_dma2 1 7 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_spi15_data_clk>, <&qup_spi15_cs>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+ };
+
+ i2c_master_hub_0: geniqup@9c0000 {
+ compatible = "qcom,geni-se-i2c-master-hub";
+ reg = <0x0 0x009c0000 0x0 0x2000>;
+
+ clocks = <&gcc GCC_QUPV3_I2C_S_AHB_CLK>;
+ clock-names = "s-ahb";
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ status = "disabled";
+
+ i2c_hub_0: i2c@980000 {
+ compatible = "qcom,geni-i2c-master-hub";
+ reg = <0x0 0x00980000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_I2C_S0_CLK>,
+ <&gcc GCC_QUPV3_I2C_CORE_CLK>;
+ clock-names = "se",
+ "core";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+
+ pinctrl-0 = <&hub_i2c0_data_clk>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c_hub_1: i2c@984000 {
+ compatible = "qcom,geni-i2c-master-hub";
+ reg = <0x0 0x00984000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_I2C_S1_CLK>,
+ <&gcc GCC_QUPV3_I2C_CORE_CLK>;
+ clock-names = "se",
+ "core";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+
+ pinctrl-0 = <&hub_i2c1_data_clk>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c_hub_2: i2c@988000 {
+ compatible = "qcom,geni-i2c-master-hub";
+ reg = <0x0 0x00988000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_I2C_S2_CLK>,
+ <&gcc GCC_QUPV3_I2C_CORE_CLK>;
+ clock-names = "se",
+ "core";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+
+ pinctrl-0 = <&hub_i2c2_data_clk>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c_hub_3: i2c@98c000 {
+ compatible = "qcom,geni-i2c-master-hub";
+ reg = <0x0 0x0098c000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_I2C_S3_CLK>,
+ <&gcc GCC_QUPV3_I2C_CORE_CLK>;
+ clock-names = "se",
+ "core";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+
+ pinctrl-0 = <&hub_i2c3_data_clk>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c_hub_4: i2c@990000 {
+ compatible = "qcom,geni-i2c-master-hub";
+ reg = <0x0 0x00990000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_I2C_S4_CLK>,
+ <&gcc GCC_QUPV3_I2C_CORE_CLK>;
+ clock-names = "se",
+ "core";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+
+ pinctrl-0 = <&hub_i2c4_data_clk>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c_hub_5: i2c@994000 {
+ compatible = "qcom,geni-i2c-master-hub";
+ reg = <0x0 0x00994000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_I2C_S5_CLK>,
+ <&gcc GCC_QUPV3_I2C_CORE_CLK>;
+ clock-names = "se",
+ "core";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+
+ pinctrl-0 = <&hub_i2c5_data_clk>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c_hub_6: i2c@998000 {
+ compatible = "qcom,geni-i2c-master-hub";
+ reg = <0x0 0x00998000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 470 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_I2C_S6_CLK>,
+ <&gcc GCC_QUPV3_I2C_CORE_CLK>;
+ clock-names = "se",
+ "core";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+
+ pinctrl-0 = <&hub_i2c6_data_clk>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c_hub_7: i2c@99c000 {
+ compatible = "qcom,geni-i2c-master-hub";
+ reg = <0x0 0x0099c000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 471 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_I2C_S7_CLK>,
+ <&gcc GCC_QUPV3_I2C_CORE_CLK>;
+ clock-names = "se",
+ "core";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+
+ pinctrl-0 = <&hub_i2c7_data_clk>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c_hub_8: i2c@9a0000 {
+ compatible = "qcom,geni-i2c-master-hub";
+ reg = <0x0 0x009a0000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_I2C_S8_CLK>,
+ <&gcc GCC_QUPV3_I2C_CORE_CLK>;
+ clock-names = "se",
+ "core";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+
+ pinctrl-0 = <&hub_i2c8_data_clk>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c_hub_9: i2c@9a4000 {
+ compatible = "qcom,geni-i2c-master-hub";
+ reg = <0x0 0x009a4000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_I2C_S9_CLK>,
+ <&gcc GCC_QUPV3_I2C_CORE_CLK>;
+ clock-names = "se",
+ "core";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_I2C QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+
+ pinctrl-0 = <&hub_i2c9_data_clk>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+ };
+
+ gpi_dma1: dma-controller@a00000 {
+ compatible = "qcom,sm8750-gpi-dma", "qcom,sm6350-gpi-dma";
+ reg = <0x0 0x00a00000 0x0 0x60000>;
+
+ interrupts = <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 284 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 293 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 294 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 295 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 296 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 297 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 298 IRQ_TYPE_LEVEL_HIGH>;
+
+ dma-channels = <12>;
+ dma-channel-mask = <0x1e>;
+ #dma-cells = <3>;
+
+ iommus = <&apps_smmu 0xb6 0x0>;
+
+ status = "disabled";
+ };
+
+ qupv3_1: geniqup@ac0000 {
+ compatible = "qcom,geni-se-qup";
+ reg = <0x0 0x00ac0000 0x0 0x2000>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ clock-names = "m-ahb",
+ "s-ahb";
+
+ iommus = <&apps_smmu 0xa3 0x0>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ status = "disabled";
+
+ i2c0: i2c@a80000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00a80000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma1 0 0 QCOM_GPI_I2C>,
+ <&gpi_dma1 1 0 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_i2c0_data_clk>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ spi0: spi@a80000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00a80000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma1 0 0 QCOM_GPI_SPI>,
+ <&gpi_dma1 1 0 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_spi0_data_clk>, <&qup_spi0_cs>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c1: i2c@a84000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00a84000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma1 0 1 QCOM_GPI_I2C>,
+ <&gpi_dma1 1 1 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_i2c1_data_clk>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ spi1: spi@a84000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00a84000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma1 0 1 QCOM_GPI_SPI>,
+ <&gpi_dma1 1 1 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_spi1_data_clk>, <&qup_spi1_cs>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c2: i2c@a88000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00a88000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma1 0 2 QCOM_GPI_I2C>,
+ <&gpi_dma1 1 2 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_i2c2_data_clk>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ spi2: spi@a88000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00a88000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma1 0 2 QCOM_GPI_SPI>,
+ <&gpi_dma1 1 2 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_spi2_data_clk>, <&qup_spi2_cs>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c3: i2c@a8c000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00a8c000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma1 0 3 QCOM_GPI_I2C>,
+ <&gpi_dma1 1 3 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_i2c3_data_clk>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ spi3: spi@a8c000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00a8c000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma1 0 3 QCOM_GPI_SPI>,
+ <&gpi_dma1 1 3 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_spi3_data_clk>, <&qup_spi3_cs>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c4: i2c@a90000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00a90000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma1 0 4 QCOM_GPI_I2C>,
+ <&gpi_dma1 1 4 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_i2c4_data_clk>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ spi4: spi@a90000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00a90000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma1 0 4 QCOM_GPI_SPI>,
+ <&gpi_dma1 1 4 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_spi4_data_clk>, <&qup_spi4_cs>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c5: i2c@a94000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00a94000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma1 0 5 QCOM_GPI_I2C>,
+ <&gpi_dma1 1 5 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_i2c5_data_clk>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ spi5: spi@a94000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00a94000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma1 0 5 QCOM_GPI_SPI>,
+ <&gpi_dma1 1 5 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_spi5_data_clk>, <&qup_spi5_cs>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c6: i2c@a98000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0x0 0x00a98000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 363 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP1_S6_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma1 0 6 QCOM_GPI_I2C>,
+ <&gpi_dma1 1 6 QCOM_GPI_I2C>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_i2c6_data_clk>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ spi6: spi@a98000 {
+ compatible = "qcom,geni-spi";
+ reg = <0x0 0x00a98000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 363 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP1_S6_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ <&aggre1_noc MASTER_QUP_1 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
+
+ dmas = <&gpi_dma1 0 6 QCOM_GPI_SPI>,
+ <&gpi_dma1 1 6 QCOM_GPI_SPI>;
+ dma-names = "tx",
+ "rx";
+
+ pinctrl-0 = <&qup_spi6_data_clk>, <&qup_spi6_cs>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ uart7: serial@a9c000 {
+ compatible = "qcom,geni-debug-uart";
+ reg = <0x0 0x00a9c000 0x0 0x4000>;
+
+ interrupts = <GIC_SPI 579 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP1_S7_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+
+
+ pinctrl-0 = <&qup_uart7_default>;
+ pinctrl-names = "default";
+
+ status = "disabled";
+ };
+ };
+
+ cnoc_main: interconnect@1500000 {
+ compatible = "qcom,sm8750-cnoc-main";
+ reg = <0x0 0x01500000 0x0 0x16080>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ #interconnect-cells = <2>;
+ };
+
+ config_noc: interconnect@1600000 {
+ compatible = "qcom,sm8750-config-noc";
+ reg = <0x0 0x01600000 0x0 0x6200>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ #interconnect-cells = <2>;
+ };
+
+ system_noc: interconnect@1680000 {
+ compatible = "qcom,sm8750-system-noc";
+ reg = <0x0 0x01680000 0x0 0x1d080>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ #interconnect-cells = <2>;
+ };
+
+ pcie_noc: interconnect@16c0000 {
+ compatible = "qcom,sm8750-pcie-anoc";
+ reg = <0x0 0x016c0000 0x0 0x11400>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ #interconnect-cells = <2>;
+ clocks = <&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>;
+
+ };
+
+ aggre1_noc: interconnect@16e0000 {
+ compatible = "qcom,sm8750-aggre1-noc";
+ reg = <0x0 0x016e0000 0x0 0x16400>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ #interconnect-cells = <2>;
+ clocks = <&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>;
+
+ };
+
+ aggre2_noc: interconnect@1700000 {
+ compatible = "qcom,sm8750-aggre2-noc";
+ reg = <0x0 0x01700000 0x0 0x1f400>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ #interconnect-cells = <2>;
+ clocks = <&rpmhcc RPMH_IPA_CLK>;
+ };
+
+ mmss_noc: interconnect@1780000 {
+ compatible = "qcom,sm8750-mmss-noc";
+ reg = <0x0 0x01780000 0x0 0x5b800>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ #interconnect-cells = <2>;
+ };
+
+ tcsr_mutex: hwlock@1f40000 {
+ compatible = "qcom,tcsr-mutex";
+ reg = <0x0 0x01f40000 0x0 0x20000>;
+ #hwlock-cells = <1>;
+ };
+
+ lpass_ag_noc: interconnect@7e40000 {
+ compatible = "qcom,sm8750-lpass-ag-noc";
+ reg = <0x0 0x07e40000 0x0 0xe080>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ #interconnect-cells = <2>;
+ };
+
+ lpass_lpiaon_noc: interconnect@7400000 {
+ compatible = "qcom,sm8750-lpass-lpiaon-noc";
+ reg = <0x0 0x07400000 0x0 0x19080>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ #interconnect-cells = <2>;
+ };
+
+ lpass_lpicx_noc: interconnect@7420000 {
+ compatible = "qcom,sm8750-lpass-lpicx-noc";
+ reg = <0x0 0x07420000 0x0 0x44080>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ #interconnect-cells = <2>;
+ };
+
+ pdc: interrupt-controller@b220000 {
+ compatible = "qcom,sm8750-pdc", "qcom,pdc";
+ reg = <0x0 0x0b220000 0x0 0x10000>, <0x0 0x164400f0 0x0 0x64>;
+
+ qcom,pdc-ranges = <0 745 51>, <51 527 47>,
+ <98 609 32>, <130 717 12>,
+ <142 251 5>, <147 796 16>;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&intc>;
+ interrupt-controller;
+ };
+
+ spmi_bus: spmi@c400000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0x0 0x0c400000 0x0 0x3000>,
+ <0x0 0x0c500000 0x0 0x400000>,
+ <0x0 0x0c440000 0x0 0x80000>,
+ <0x0 0x0c4c0000 0x0 0x10000>,
+ <0x0 0x0c42d000 0x0 0x4000>;
+ reg-names = "core",
+ "chnls",
+ "obsrvr",
+ "intr",
+ "cnfg";
+
+ interrupts-extended = <&pdc 1 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "periph_irq";
+
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ qcom,bus-id = <0>;
+
+ interrupt-controller;
+ #interrupt-cells = <4>;
+
+ #address-cells = <2>;
+ #size-cells = <0>;
+ };
+
+ tlmm: pinctrl@f100000 {
+ compatible = "qcom,sm8750-tlmm";
+ reg = <0x0 0x0f100000 0x0 0x102000>;
+
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ gpio-ranges = <&tlmm 0 0 216>;
+ wakeup-parent = <&pdc>;
+
+ hub_i2c0_data_clk: hub-i2c0-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio64", "gpio65";
+ function = "i2chub0_se0";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ hub_i2c1_data_clk: hub-i2c1-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio66", "gpio67";
+ function = "i2chub0_se1";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ hub_i2c2_data_clk: hub-i2c2-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio68", "gpio69";
+ function = "i2chub0_se2";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ hub_i2c3_data_clk: hub-i2c3-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio70", "gpio71";
+ function = "i2chub0_se3";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ hub_i2c4_data_clk: hub-i2c4-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio72", "gpio73";
+ function = "i2chub0_se4";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ hub_i2c5_data_clk: hub-i2c5-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio74", "gpio75";
+ function = "i2chub0_se5";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ hub_i2c6_data_clk: hub-i2c6-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio76", "gpio77";
+ function = "i2chub0_se6";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ hub_i2c7_data_clk: hub-i2c7-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio82", "gpio83";
+ function = "i2chub0_se7";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ hub_i2c8_data_clk: hub-i2c8-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio206", "gpio207";
+ function = "i2chub0_se8";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ hub_i2c9_data_clk: hub-i2c9-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio80", "gpio81";
+ function = "i2chub0_se9";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ pcie0_default_state: pcie0-default-state {
+ perst-pins {
+ pins = "gpio102";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ clkreq-pins {
+ pins = "gpio103";
+ function = "pcie0_clk_req_n";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ wake-pins {
+ pins = "gpio104";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ qup_i2c0_data_clk: qup-i2c0-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio32", "gpio33";
+ function = "qup1_se0";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c1_data_clk: qup-i2c1-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio36", "gpio37";
+ function = "qup1_se1";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c2_data_clk: qup-i2c2-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio40", "gpio41";
+ function = "qup1_se2";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c3_data_clk: qup-i2c3-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio44", "gpio45";
+ function = "qup1_se3";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c4_data_clk: qup-i2c4-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio48", "gpio49";
+ function = "qup1_se4";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c5_data_clk: qup-i2c5-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio52", "gpio53";
+ function = "qup1_se5";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c6_data_clk: qup-i2c6-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio56", "gpio57";
+ function = "qup1_se6";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c8_data_clk: qup-i2c8-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio0", "gpio1";
+ function = "qup2_se0";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c9_data_clk: qup-i2c9-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio4", "gpio5";
+ function = "qup2_se1";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c10_data_clk: qup-i2c10-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio8", "gpio9";
+ function = "qup2_se2";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c11_data_clk: qup-i2c11-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio12", "gpio13";
+ function = "qup2_se3";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c12_data_clk: qup-i2c12-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio16", "gpio17";
+ function = "qup2_se4";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c13_data_clk: qup-i2c13-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio20", "gpio21";
+ function = "qup2_se5";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_i2c15_data_clk: qup-i2c15-data-clk-state {
+ /* SDA, SCL */
+ pins = "gpio28", "gpio29";
+ function = "qup2_se7";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_spi0_cs: qup-spi0-cs-state {
+ pins = "gpio35";
+ function = "qup1_se0";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi0_data_clk: qup-spi0-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio32", "gpio33", "gpio34";
+ function = "qup1_se0";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi1_cs: qup-spi1-cs-state {
+ pins = "gpio39";
+ function = "qup1_se1";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi1_data_clk: qup-spi1-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio36", "gpio37", "gpio38";
+ function = "qup1_se1";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi2_cs: qup-spi2-cs-state {
+ pins = "gpio43";
+ function = "qup1_se2";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi2_data_clk: qup-spi2-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio40", "gpio41", "gpio42";
+ function = "qup1_se2";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi3_cs: qup-spi3-cs-state {
+ pins = "gpio47";
+ function = "qup1_se3";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi3_data_clk: qup-spi3-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio44", "gpio45", "gpio46";
+ function = "qup1_se3";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi4_cs: qup-spi4-cs-state {
+ pins = "gpio51";
+ function = "qup1_se4";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi4_data_clk: qup-spi4-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio48", "gpio49", "gpio50";
+ function = "qup1_se4";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi5_cs: qup-spi5-cs-state {
+ pins = "gpio55";
+ function = "qup1_se5";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi5_data_clk: qup-spi5-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio52", "gpio53", "gpio54";
+ function = "qup1_se5";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi6_cs: qup-spi6-cs-state {
+ pins = "gpio59";
+ function = "qup1_se6";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi6_data_clk: qup-spi6-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio56", "gpio57", "gpio58";
+ function = "qup1_se6";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi8_cs: qup-spi8-cs-state {
+ pins = "gpio3";
+ function = "qup2_se0";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi8_data_clk: qup-spi8-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio0", "gpio1", "gpio2";
+ function = "qup2_se0";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi9_cs: qup-spi9-cs-state {
+ pins = "gpio7";
+ function = "qup2_se1";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi9_data_clk: qup-spi9-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio4", "gpio5", "gpio6";
+ function = "qup2_se1";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi10_cs: qup-spi10-cs-state {
+ pins = "gpio11";
+ function = "qup2_se2";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi10_data_clk: qup-spi10-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio8", "gpio9", "gpio10";
+ function = "qup2_se2";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi11_cs: qup-spi11-cs-state {
+ pins = "gpio15";
+ function = "qup2_se3";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi11_data_clk: qup-spi11-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio12", "gpio13", "gpio14";
+ function = "qup2_se3";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi12_cs: qup-spi12-cs-state {
+ pins = "gpio19";
+ function = "qup2_se4";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi12_data_clk: qup-spi12-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio16", "gpio17", "gpio18";
+ function = "qup2_se4";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi13_cs: qup-spi13-cs-state {
+ pins = "gpio23";
+ function = "qup2_se5";
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+
+ qup_spi13_data_clk: qup-spi13-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio20", "gpio21", "gpio22";
+ function = "qup2_se5";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi15_cs: qup-spi15-cs-state {
+ pins = "gpio31";
+ function = "qup2_se7";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_spi15_data_clk: qup-spi15-data-clk-state {
+ /* MISO, MOSI, CLK */
+ pins = "gpio28", "gpio29", "gpio30";
+ function = "qup2_se7";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ qup_uart7_default: qup-uart7-default-state {
+ /* TX, RX */
+ pins = "gpio62", "gpio63";
+ function = "qup1_se7";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ qup_uart14_default: qup-uart14-default-state {
+ /* TX, RX */
+ pins = "gpio26", "gpio27";
+ function = "qup2_se6";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ qup_uart14_cts_rts: qup-uart14-cts-rts-state {
+ /* CTS, RTS */
+ pins = "gpio24", "gpio25";
+ function = "qup2_se6";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ sdc2_sleep: sdc2-sleep-state {
+ clk-pins {
+ pins = "sdc2_clk";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ cmd-pins {
+ pins = "sdc2_cmd";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ data-pins {
+ pins = "sdc2_data";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ sdc2_default: sdc2-default-state {
+ clk-pins {
+ pins = "sdc2_clk";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ cmd-pins {
+ pins = "sdc2_cmd";
+ drive-strength = <10>;
+ bias-pull-up;
+ };
+
+ data-pins {
+ pins = "sdc2_data";
+ drive-strength = <10>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ tcsrcc: clock-controller@f204008 {
+ compatible = "qcom,sm8750-tcsr", "syscon";
+ reg = <0x0 0x0f204008 0x0 0x3004>;
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ apps_smmu: iommu@15000000 {
+ compatible = "qcom,sm8750-smmu-500", "qcom,smmu-500", "arm,mmu-500";
+ reg = <0x0 0x15000000 0x0 0x100000>;
+
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 409 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 412 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 707 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 690 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 691 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 692 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 693 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 694 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 695 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 696 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 697 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 410 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 490 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 491 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 492 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 493 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 494 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 495 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 496 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 497 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 498 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 499 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 500 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 501 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 502 IRQ_TYPE_LEVEL_HIGH>;
+
+ #iommu-cells = <2>;
+ #global-interrupts = <1>;
+
+ dma-coherent;
+ };
+
+ intc: interrupt-controller@16000000 {
+ compatible = "arm,gic-v3";
+ reg = <0x0 0x16000000 0x0 0x10000>,
+ <0x0 0x16080000 0x0 0x200000>;
+
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+
+ #interrupt-cells = <3>;
+ interrupt-controller;
+
+ #redistributor-regions = <1>;
+ redistributor-stride = <0x0 0x40000>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ gic_its: msi-controller@16040000 {
+ compatible = "arm,gic-v3-its";
+ reg = <0x0 0x16040000 0x0 0x20000>;
+
+ msi-controller;
+ #msi-cells = <1>;
+ };
+ };
+
+ apps_rsc: rsc@16500000 {
+ compatible = "qcom,rpmh-rsc";
+ reg = <0x0 0x16500000 0x0 0x10000>,
+ <0x0 0x16510000 0x0 0x10000>,
+ <0x0 0x16520000 0x0 0x10000>;
+ reg-names = "drv-0",
+ "drv-1",
+ "drv-2";
+
+ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,tcs-offset = <0xd00>;
+ qcom,drv-id = <2>;
+ qcom,tcs-config = <ACTIVE_TCS 3>, <SLEEP_TCS 2>,
+ <WAKE_TCS 2>, <CONTROL_TCS 0>;
+
+ label = "apps_rsc";
+
+ power-domains = <&system_pd>;
+
+ apps_bcm_voter: bcm-voter {
+ compatible = "qcom,bcm-voter";
+ };
+
+ rpmhcc: clock-controller {
+ compatible = "qcom,sm8750-rpmh-clk";
+
+ clocks = <&xo_board>;
+ clock-names = "xo";
+
+ #clock-cells = <1>;
+ };
+
+ rpmhpd: power-controller {
+ compatible = "qcom,sm8750-rpmhpd";
+
+ operating-points-v2 = <&rpmhpd_opp_table>;
+
+ #power-domain-cells = <1>;
+
+ rpmhpd_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ rpmhpd_opp_ret: opp-16 {
+ opp-level = <RPMH_REGULATOR_LEVEL_RETENTION>;
+ };
+
+ rpmhpd_opp_min_svs: opp-48 {
+ opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+ };
+
+ rpmhpd_opp_low_svs_d3: opp-50 {
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS_D3>;
+ };
+
+ rpmhpd_opp_low_svs_d2: opp-52 {
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS_D2>;
+ };
+
+ rpmhpd_opp_low_svs_d1: opp-56 {
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS_D1>;
+ };
+
+ rpmhpd_opp_low_svs_d0: opp-60 {
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS_D0>;
+ };
+
+ rpmhpd_opp_low_svs: opp-64 {
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+ };
+
+ rpmhpd_opp_low_svs_l1: opp-80 {
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS_L1>;
+ };
+
+ rpmhpd_opp_svs: opp-128 {
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+ };
+
+ rpmhpd_opp_svs_l0: opp-144 {
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L0>;
+ };
+
+ rpmhpd_opp_svs_l1: opp-192 {
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
+ };
+
+ rpmhpd_opp_svs_l2: opp-224 {
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L2>;
+ };
+
+ rpmhpd_opp_nom: opp-256 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
+ };
+
+ rpmhpd_opp_nom_l1: opp-320 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
+ };
+
+ rpmhpd_opp_nom_l2: opp-336 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM_L2>;
+ };
+
+ rpmhpd_opp_turbo: opp-384 {
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
+ };
+
+ rpmhpd_opp_turbo_l1: opp-416 {
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
+ };
+
+ rpmhpd_opp_turbo_l2: opp-432 {
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L2>;
+ };
+
+ rpmhpd_opp_turbo_l3: opp-448 {
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L3>;
+ };
+
+ rpmhpd_opp_turbo_l4: opp-452 {
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L4>;
+ };
+
+ rpmhpd_opp_super_turbo_no_cpr: opp-480 {
+ opp-level =
+ <RPMH_REGULATOR_LEVEL_SUPER_TURBO_NO_CPR>;
+ };
+ };
+ };
+ };
+
+ timer@16800000 {
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x0 0x16800000 0x0 0x1000>;
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0 0x20000000>;
+
+ frame@16801000 {
+ reg = <0x0 0x16801000 0x1000>,
+ <0x0 0x16802000 0x1000>;
+
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+
+ frame-number = <0>;
+ };
+
+ frame@16803000 {
+ reg = <0x0 0x16803000 0x1000>;
+
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+
+ frame-number = <1>;
+
+ status = "disabled";
+ };
+
+ frame@16805000 {
+ reg = <0x0 0x16805000 0x1000>;
+
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+
+ frame-number = <2>;
+
+ status = "disabled";
+ };
+
+ frame@16807000 {
+ reg = <0x0 0x16807000 0x1000>;
+
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+
+ frame-number = <3>;
+
+ status = "disabled";
+ };
+
+ frame@16809000 {
+ reg = <0x0 0x16809000 0x1000>;
+
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+
+ frame-number = <4>;
+
+ status = "disabled";
+ };
+
+ frame@1680b000 {
+ reg = <0x0 0x1680b000 0x1000>;
+
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+
+ frame-number = <5>;
+
+ status = "disabled";
+ };
+
+ frame@1680d000 {
+ reg = <0x0 0x1680d000 0x1000>;
+
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+
+ frame-number = <6>;
+
+ status = "disabled";
+ };
+ };
+
+ gem_noc: interconnect@24100000 {
+ compatible = "qcom,sm8750-gem-noc";
+ reg = <0x0 0x24100000 0x0 0x14b080>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ #interconnect-cells = <2>;
+ };
+
+ nsp_noc: interconnect@320c0000 {
+ compatible = "qcom,sm8750-nsp-noc";
+ reg = <0x0 0x320c0000 0x0 0x13080>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ #interconnect-cells = <2>;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/x1e001de-devkit.dts b/arch/arm64/boot/dts/qcom/x1e001de-devkit.dts
new file mode 100644
index 000000000000..5e3970b26e2f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/x1e001de-devkit.dts
@@ -0,0 +1,1371 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+
+#include "x1e80100.dtsi"
+#include "x1e80100-pmics.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. X1E001DE Snapdragon Devkit for Windows";
+ compatible = "qcom,x1e001de-devkit", "qcom,x1e001de", "qcom,x1e80100";
+
+ aliases {
+ serial0 = &uart21;
+ };
+
+ wcd938x: audio-codec {
+ compatible = "qcom,wcd9385-codec";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&wcd_default>;
+
+ qcom,micbias1-microvolt = <1800000>;
+ qcom,micbias2-microvolt = <1800000>;
+ qcom,micbias3-microvolt = <1800000>;
+ qcom,micbias4-microvolt = <1800000>;
+ qcom,mbhc-buttons-vthreshold-microvolt = <75000 150000 237000 500000 500000 500000 500000 500000>;
+ qcom,mbhc-headset-vthreshold-microvolt = <1700000>;
+ qcom,mbhc-headphone-vthreshold-microvolt = <50000>;
+ qcom,rx-device = <&wcd_rx>;
+ qcom,tx-device = <&wcd_tx>;
+
+ reset-gpios = <&tlmm 191 GPIO_ACTIVE_LOW>;
+
+ vdd-buck-supply = <&vreg_l15b_1p8>;
+ vdd-rxtx-supply = <&vreg_l15b_1p8>;
+ vdd-io-supply = <&vreg_l15b_1p8>;
+ vdd-mic-bias-supply = <&vreg_bob1>;
+
+ #sound-dai-cells = <1>;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ pmic-glink {
+ compatible = "qcom,x1e80100-pmic-glink",
+ "qcom,sm8550-pmic-glink",
+ "qcom,pmic-glink";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ orientation-gpios = <&tlmm 121 GPIO_ACTIVE_HIGH>,
+ <&tlmm 123 GPIO_ACTIVE_HIGH>,
+ <&tlmm 125 GPIO_ACTIVE_HIGH>;
+
+ /* Back panel port closer to the RJ45 connector */
+ connector@0 {
+ compatible = "usb-c-connector";
+ reg = <0>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_ss0_hs_in: endpoint {
+ remote-endpoint = <&usb_1_ss0_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss0_ss_in: endpoint {
+ remote-endpoint = <&retimer_ss0_ss_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_ss0_con_sbu_in: endpoint {
+ remote-endpoint = <&retimer_ss0_con_sbu_out>;
+ };
+ };
+ };
+ };
+
+ /* Back panel port closer to the audio jack */
+ connector@1 {
+ compatible = "usb-c-connector";
+ reg = <1>;
+ power-role = "dual";
+ data-role = "host";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_ss1_hs_in: endpoint {
+ remote-endpoint = <&usb_1_ss1_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss1_ss_in: endpoint {
+ remote-endpoint = <&retimer_ss1_ss_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_ss1_con_sbu_in: endpoint {
+ remote-endpoint = <&retimer_ss1_con_sbu_out>;
+ };
+ };
+ };
+ };
+
+ /* Front panel port */
+ connector@2 {
+ compatible = "usb-c-connector";
+ reg = <2>;
+ power-role = "dual";
+ data-role = "host";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_ss2_hs_in: endpoint {
+ remote-endpoint = <&usb_1_ss2_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss2_ss_in: endpoint {
+ remote-endpoint = <&retimer_ss2_ss_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_ss2_con_sbu_in: endpoint {
+ remote-endpoint = <&retimer_ss2_con_sbu_out>;
+ };
+ };
+ };
+ };
+ };
+
+ reserved-memory {
+ linux,cma {
+ compatible = "shared-dma-pool";
+ size = <0x0 0x8000000>;
+ reusable;
+ linux,cma-default;
+ };
+ };
+
+ sound {
+ compatible = "qcom,x1e80100-sndcard";
+ model = "X1E001DE-DEVKIT";
+ audio-routing = "IN1_HPHL", "HPHL_OUT",
+ "IN2_HPHR", "HPHR_OUT",
+ "AMIC2", "MIC BIAS2",
+ "TX SWR_INPUT1", "ADC2_OUTPUT";
+
+ wcd-playback-dai-link {
+ link-name = "WCD Playback";
+
+ cpu {
+ sound-dai = <&q6apmbedai RX_CODEC_DMA_RX_0>;
+ };
+
+ codec {
+ sound-dai = <&wcd938x 0>, <&swr1 0>, <&lpass_rxmacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ wcd-capture-dai-link {
+ link-name = "WCD Capture";
+
+ cpu {
+ sound-dai = <&q6apmbedai TX_CODEC_DMA_TX_3>;
+ };
+
+ codec {
+ sound-dai = <&wcd938x 1>, <&swr2 1>, <&lpass_txmacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+ };
+
+ vreg_nvme: regulator-nvme {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_NVME_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 18 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&nvme_reg_en>;
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr0_1p15: regulator-rtmr0-1p15 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR0_1P15";
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+
+ gpio = <&pmc8380_5_gpios 8 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb0_pwr_1p15_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr0_1p8: regulator-rtmr0-1p8 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR0_1P8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&pm8550ve_9_gpios 8 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb0_1p8_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr0_3p3: regulator-rtmr0-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR0_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&pm8550_gpios 11 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb0_3p3_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr1_1p15: regulator-rtmr1-1p15 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR1_1P15";
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+
+ gpio = <&tlmm 188 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&rtmr1_1p15_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr1_1p8: regulator-rtmr1-1p8 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR1_1P8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&tlmm 175 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&rtmr1_1p8_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr1_3p3: regulator-rtmr1-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR1_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 186 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&rtmr1_3p3_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr2_1p15: regulator-rtmr2-1p15 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR2_1P15";
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+
+ gpio = <&tlmm 189 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&rtmr2_1p15_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr2_1p8: regulator-rtmr2-1p8 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR2_1P8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&tlmm 126 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&rtmr2_1p8_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr2_3p3: regulator-rtmr2-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR2_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 187 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&rtmr2_3p3_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vph_pwr: regulator-vph-pwr {
+ compatible = "regulator-fixed";
+
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vreg_wwan: regulator-wwan {
+ compatible = "regulator-fixed";
+
+ regulator-name = "SDX_VPH_PWR";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 221 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&wwan_sw_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+};
+
+&apps_rsc {
+ regulators-0 {
+ compatible = "qcom,pm8550-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ vdd-bob1-supply = <&vph_pwr>;
+ vdd-bob2-supply = <&vph_pwr>;
+ vdd-l1-l4-l10-supply = <&vreg_s4c_1p8>;
+ vdd-l2-l13-l14-supply = <&vreg_bob1>;
+ vdd-l5-l16-supply = <&vreg_bob1>;
+ vdd-l6-l7-supply = <&vreg_bob2>;
+ vdd-l8-l9-supply = <&vreg_bob1>;
+ vdd-l12-supply = <&vreg_s5j_1p2>;
+ vdd-l15-supply = <&vreg_s4c_1p8>;
+ vdd-l17-supply = <&vreg_bob2>;
+
+ vreg_bob1: bob1 {
+ regulator-name = "vreg_bob1";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_bob2: bob2 {
+ regulator-name = "vreg_bob2";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1b_1p8: ldo1 {
+ regulator-name = "vreg_l1b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2b_3p0: ldo2 {
+ regulator-name = "vreg_l2b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4b_1p8: ldo4 {
+ regulator-name = "vreg_l4b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5b_3p0: ldo5 {
+ regulator-name = "vreg_l5b_3p0";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6b_1p8: ldo6 {
+ regulator-name = "vreg_l6b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7b_2p8: ldo7 {
+ regulator-name = "vreg_l7b_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8b_3p0: ldo8 {
+ regulator-name = "vreg_l8b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9b_2p9: ldo9 {
+ regulator-name = "vreg_l9b_2p9";
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l10b_1p8: ldo10 {
+ regulator-name = "vreg_l10b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l12b_1p2: ldo12 {
+ regulator-name = "vreg_l12b_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l13b_3p0: ldo13 {
+ regulator-name = "vreg_l13b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l14b_3p0: ldo14 {
+ regulator-name = "vreg_l14b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l15b_1p8: ldo15 {
+ regulator-name = "vreg_l15b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l16b_2p9: ldo16 {
+ regulator-name = "vreg_l16b_2p9";
+ regulator-min-microvolt = <2912000>;
+ regulator-max-microvolt = <2912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l17b_2p5: ldo17 {
+ regulator-name = "vreg_l17b_2p5";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <2504000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-1 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vdd-l1-supply = <&vreg_s5j_1p2>;
+ vdd-l2-supply = <&vreg_s1f_0p7>;
+ vdd-l3-supply = <&vreg_s1f_0p7>;
+ vdd-s4-supply = <&vph_pwr>;
+
+ vreg_s4c_1p8: smps4 {
+ regulator-name = "vreg_s4c_1p8";
+ regulator-min-microvolt = <1856000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1c_1p2: ldo1 {
+ regulator-name = "vreg_l1c_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2c_0p8: ldo2 {
+ regulator-name = "vreg_l2c_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3c_0p8: ldo3 {
+ regulator-name = "vreg_l3c_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-2 {
+ compatible = "qcom,pmc8380-rpmh-regulators";
+ qcom,pmic-id = "d";
+
+ vdd-l1-supply = <&vreg_s1f_0p7>;
+ vdd-l2-supply = <&vreg_s1f_0p7>;
+ vdd-l3-supply = <&vreg_s4c_1p8>;
+ vdd-s1-supply = <&vph_pwr>;
+
+ vreg_l1d_0p8: ldo1 {
+ regulator-name = "vreg_l1d_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2d_0p9: ldo2 {
+ regulator-name = "vreg_l2d_0p9";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3d_1p8: ldo3 {
+ regulator-name = "vreg_l3d_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-3 {
+ compatible = "qcom,pmc8380-rpmh-regulators";
+ qcom,pmic-id = "e";
+
+ vdd-l2-supply = <&vreg_s1f_0p7>;
+ vdd-l3-supply = <&vreg_s5j_1p2>;
+
+ vreg_l2e_0p8: ldo2 {
+ regulator-name = "vreg_l2e_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3e_1p2: ldo3 {
+ regulator-name = "vreg_l3e_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-4 {
+ compatible = "qcom,pmc8380-rpmh-regulators";
+ qcom,pmic-id = "f";
+
+ vdd-l1-supply = <&vreg_s5j_1p2>;
+ vdd-l2-supply = <&vreg_s5j_1p2>;
+ vdd-l3-supply = <&vreg_s5j_1p2>;
+ vdd-s1-supply = <&vph_pwr>;
+
+ vreg_s1f_0p7: smps1 {
+ regulator-name = "vreg_s1f_0p7";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1f_1p0: ldo1 {
+ regulator-name = "vreg_l1f_1p0";
+ regulator-min-microvolt = <1024000>;
+ regulator-max-microvolt = <1024000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2f_1p0: ldo2 {
+ regulator-name = "vreg_l2f_1p0";
+ regulator-min-microvolt = <1024000>;
+ regulator-max-microvolt = <1024000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3f_1p0: ldo3 {
+ regulator-name = "vreg_l3f_1p0";
+ regulator-min-microvolt = <1024000>;
+ regulator-max-microvolt = <1024000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-6 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "i";
+
+ vdd-l1-supply = <&vreg_s4c_1p8>;
+ vdd-l2-supply = <&vreg_s5j_1p2>;
+ vdd-l3-supply = <&vreg_s1f_0p7>;
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+
+ vreg_s1i_0p9: smps1 {
+ regulator-name = "vreg_s1i_0p9";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s2i_1p0: smps2 {
+ regulator-name = "vreg_s2i_1p0";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1i_1p8: ldo1 {
+ regulator-name = "vreg_l1i_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2i_1p2: ldo2 {
+ regulator-name = "vreg_l2i_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3i_0p8: ldo3 {
+ regulator-name = "vreg_l3i_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-7 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "j";
+
+ vdd-l1-supply = <&vreg_s1f_0p7>;
+ vdd-l2-supply = <&vreg_s5j_1p2>;
+ vdd-l3-supply = <&vreg_s1f_0p7>;
+ vdd-s5-supply = <&vph_pwr>;
+
+ vreg_s5j_1p2: smps5 {
+ regulator-name = "vreg_s5j_1p2";
+ regulator-min-microvolt = <1256000>;
+ regulator-max-microvolt = <1304000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1j_0p8: ldo1 {
+ regulator-name = "vreg_l1j_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2j_1p2: ldo2 {
+ regulator-name = "vreg_l2j_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3j_0p8: ldo3 {
+ regulator-name = "vreg_l3j_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+};
+
+&gpu {
+ status = "okay";
+
+ zap-shader {
+ firmware-name = "qcom/x1e80100/Thundercomm/DEVKIT/qcdxkmsuc8380.mbn";
+ };
+};
+
+&i2c1 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ typec-mux@8 {
+ compatible = "parade,ps8830";
+ reg = <0x08>;
+
+ clocks = <&rpmhcc RPMH_RF_CLK5>;
+ clock-names = "xo";
+
+ vdd-supply = <&vreg_rtmr2_1p15>;
+ vdd33-supply = <&vreg_rtmr2_3p3>;
+ vdd33-cap-supply = <&vreg_rtmr2_3p3>;
+ vddar-supply = <&vreg_rtmr2_1p15>;
+ vddat-supply = <&vreg_rtmr2_1p15>;
+ vddio-supply = <&vreg_rtmr2_1p8>;
+
+ reset-gpios = <&tlmm 185 GPIO_ACTIVE_HIGH>;
+
+ orientation-switch;
+ retimer-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ retimer_ss2_ss_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss2_ss_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ retimer_ss2_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss2_qmpphy_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ retimer_ss2_con_sbu_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss2_con_sbu_in>;
+ };
+ };
+ };
+ };
+};
+
+&i2c3 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ typec-mux@8 {
+ compatible = "parade,ps8830";
+ reg = <0x08>;
+
+ clocks = <&rpmhcc RPMH_RF_CLK3>;
+ clock-names = "xo";
+
+ vdd-supply = <&vreg_rtmr0_1p15>;
+ vdd33-supply = <&vreg_rtmr0_3p3>;
+ vdd33-cap-supply = <&vreg_rtmr0_3p3>;
+ vddar-supply = <&vreg_rtmr0_1p15>;
+ vddat-supply = <&vreg_rtmr0_1p15>;
+ vddio-supply = <&vreg_rtmr0_1p8>;
+
+ reset-gpios = <&pm8550_gpios 10 GPIO_ACTIVE_HIGH>;
+
+ retimer-switch;
+ orientation-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ retimer_ss0_ss_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss0_ss_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ retimer_ss0_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss0_qmpphy_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ retimer_ss0_con_sbu_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss0_con_sbu_in>;
+ };
+ };
+ };
+ };
+};
+
+&i2c7 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ typec-mux@8 {
+ compatible = "parade,ps8830";
+ reg = <0x8>;
+
+ clocks = <&rpmhcc RPMH_RF_CLK4>;
+ clock-names = "xo";
+
+ vdd-supply = <&vreg_rtmr1_1p15>;
+ vdd33-supply = <&vreg_rtmr1_3p3>;
+ vdd33-cap-supply = <&vreg_rtmr1_3p3>;
+ vddar-supply = <&vreg_rtmr1_1p15>;
+ vddat-supply = <&vreg_rtmr1_1p15>;
+ vddio-supply = <&vreg_rtmr1_1p8>;
+
+ reset-gpios = <&tlmm 176 GPIO_ACTIVE_HIGH>;
+
+ retimer-switch;
+ orientation-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ retimer_ss1_ss_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss1_ss_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ retimer_ss1_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss1_qmpphy_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ retimer_ss1_con_sbu_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss1_con_sbu_in>;
+ };
+ };
+ };
+ };
+};
+
+&mdss {
+ status = "okay";
+};
+
+&mdss_dp0 {
+ status = "okay";
+};
+
+&mdss_dp0_out {
+ data-lanes = <0 1>;
+};
+
+&mdss_dp1 {
+ status = "okay";
+};
+
+&mdss_dp1_out {
+ data-lanes = <0 1>;
+};
+
+&mdss_dp2 {
+ status = "okay";
+};
+
+&mdss_dp2_out {
+ data-lanes = <0 1>;
+};
+
+&pcie4 {
+ perst-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&pcie4_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&pcie4_phy {
+ vdda-phy-supply = <&vreg_l3i_0p8>;
+ vdda-pll-supply = <&vreg_l3e_1p2>;
+
+ status = "okay";
+};
+
+&pcie5 {
+ perst-gpios = <&tlmm 149 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 151 GPIO_ACTIVE_LOW>;
+
+ vddpe-3v3-supply = <&vreg_wwan>;
+
+ pinctrl-0 = <&pcie5_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&pcie5_phy {
+ vdda-phy-supply = <&vreg_l3i_0p8>;
+ vdda-pll-supply = <&vreg_l3e_1p2>;
+
+ status = "okay";
+};
+
+&pcie6a {
+ perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>;
+
+ vddpe-3v3-supply = <&vreg_nvme>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie6a_default>;
+
+ status = "okay";
+};
+
+&pcie6a_phy {
+ vdda-phy-supply = <&vreg_l1d_0p8>;
+ vdda-pll-supply = <&vreg_l2j_1p2>;
+
+ status = "okay";
+};
+
+&pm8550_gpios {
+ usb0_3p3_reg_en: usb0-3p3-reg-en-state {
+ pins = "gpio11";
+ function = "normal";
+ };
+};
+
+&pmc8380_5_gpios {
+ usb0_pwr_1p15_en: usb0-pwr-1p15-en-state {
+ pins = "gpio8";
+ function = "normal";
+ };
+};
+
+&pm8550ve_9_gpios {
+ usb0_1p8_reg_en: usb0-1p8-reg-en-state {
+ pins = "gpio8";
+ function = "normal";
+ };
+};
+
+&qupv3_0 {
+ status = "okay";
+};
+
+&qupv3_1 {
+ status = "okay";
+};
+
+&qupv3_2 {
+ status = "okay";
+};
+
+&remoteproc_adsp {
+ firmware-name = "qcom/x1e80100/Thundercomm/DEVKIT/qcadsp8380.mbn",
+ "qcom/x1e80100/Thundercomm/DEVKIT/adsp_dtbs.elf";
+
+ status = "okay";
+};
+
+&remoteproc_cdsp {
+ firmware-name = "qcom/x1e80100/Thundercomm/DEVKIT/qccdsp8380.mbn",
+ "qcom/x1e80100/Thundercomm/DEVKIT/cdsp_dtbs.elf";
+
+ status = "okay";
+};
+
+&sdhc_2 {
+ cd-gpios = <&tlmm 71 GPIO_ACTIVE_LOW>;
+ pinctrl-0 = <&sdc2_default &sdc2_card_det_n>;
+ pinctrl-1 = <&sdc2_sleep &sdc2_card_det_n>;
+ pinctrl-names = "default", "sleep";
+ vmmc-supply = <&vreg_l9b_2p9>;
+ vqmmc-supply = <&vreg_l6b_1p8>;
+ bus-width = <4>;
+ no-sdio;
+ no-mmc;
+ status = "okay";
+};
+
+&smb2360_0 {
+ status = "okay";
+};
+
+&smb2360_0_eusb2_repeater {
+ vdd18-supply = <&vreg_l3d_1p8>;
+ vdd3-supply = <&vreg_l2b_3p0>;
+};
+
+&smb2360_1 {
+ status = "okay";
+};
+
+&smb2360_1_eusb2_repeater {
+ vdd18-supply = <&vreg_l3d_1p8>;
+ vdd3-supply = <&vreg_l14b_3p0>;
+};
+
+&smb2360_2 {
+ status = "okay";
+};
+
+&smb2360_2_eusb2_repeater {
+ vdd18-supply = <&vreg_l3d_1p8>;
+ vdd3-supply = <&vreg_l8b_3p0>;
+};
+
+&swr1 {
+ status = "okay";
+
+ /* WCD9385 RX */
+ wcd_rx: codec@0,4 {
+ compatible = "sdw20217010d00";
+ reg = <0 4>;
+ qcom,rx-port-mapping = <1 2 3 4 5>;
+ };
+};
+
+&swr2 {
+ status = "okay";
+
+ /* WCD9385 TX */
+ wcd_tx: codec@0,3 {
+ compatible = "sdw20217010d00";
+ reg = <0 3>;
+ qcom,tx-port-mapping = <2 2 3 4>;
+ };
+};
+
+&tlmm {
+ gpio-reserved-ranges = <44 4>; /* SPI (TPM) */
+
+ nvme_reg_en: nvme-reg-en-state {
+ pins = "gpio18";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ pcie4_default: pcie4-default-state {
+ clkreq-n-pins {
+ pins = "gpio147";
+ function = "pcie4_clk";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio146";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake-n-pins {
+ pins = "gpio148";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie5_default: pcie5-default-state {
+ clkreq-n-pins {
+ pins = "gpio150";
+ function = "pcie5_clk";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio149";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake-n-pins {
+ pins = "gpio151";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie6a_default: pcie6a-default-state {
+ clkreq-n-pins {
+ pins = "gpio153";
+ function = "pcie6a_clk";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio152";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake-n-pins {
+ pins = "gpio154";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ rtmr1_1p15_reg_en: rtmr1-1p15-reg-en-state {
+ pins = "gpio188";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rtmr1_1p8_reg_en: rtmr1-1p8-reg-en-state {
+ pins = "gpio175";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rtmr1_3p3_reg_en: rtmr1-3p3-reg-en-state {
+ pins = "gpio186";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rtmr2_1p15_reg_en: rtmr2-1p15-reg-en-state {
+ pins = "gpio189";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rtmr2_1p8_reg_en: rtmr2-1p8-reg-en-state {
+ pins = "gpio126";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rtmr2_3p3_reg_en: rtmr2-3p3-reg-en-state {
+ pins = "gpio187";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ sdc2_card_det_n: sdc2-card-det-state {
+ pins = "gpio71";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ wcd_default: wcd-reset-n-active-state {
+ pins = "gpio191";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+
+ wwan_sw_en: wwan-sw-en-state {
+ pins = "gpio221";
+ function = "gpio";
+ drive-strength = <4>;
+ bias-disable;
+ };
+};
+
+&uart21 {
+ compatible = "qcom,geni-debug-uart";
+ status = "okay";
+};
+
+&usb_1_ss0_hsphy {
+ vdd-supply = <&vreg_l3j_0p8>;
+ vdda12-supply = <&vreg_l2j_1p2>;
+
+ phys = <&smb2360_0_eusb2_repeater>;
+
+ status = "okay";
+};
+
+&usb_1_ss0_qmpphy {
+ vdda-phy-supply = <&vreg_l2j_1p2>;
+ vdda-pll-supply = <&vreg_l1j_0p8>;
+
+ status = "okay";
+};
+
+&usb_1_ss0 {
+ status = "okay";
+};
+
+&usb_1_ss0_dwc3 {
+ dr_mode = "otg";
+ usb-role-switch;
+};
+
+&usb_1_ss0_dwc3_hs {
+ remote-endpoint = <&pmic_glink_ss0_hs_in>;
+};
+
+&usb_1_ss0_qmpphy_out {
+ remote-endpoint = <&retimer_ss0_ss_in>;
+};
+
+&usb_1_ss1_hsphy {
+ vdd-supply = <&vreg_l3j_0p8>;
+ vdda12-supply = <&vreg_l2j_1p2>;
+
+ phys = <&smb2360_1_eusb2_repeater>;
+
+ status = "okay";
+};
+
+&usb_1_ss1_qmpphy {
+ vdda-phy-supply = <&vreg_l2j_1p2>;
+ vdda-pll-supply = <&vreg_l2d_0p9>;
+
+ status = "okay";
+};
+
+&usb_1_ss1 {
+ status = "okay";
+};
+
+&usb_1_ss1_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_1_ss1_dwc3_hs {
+ remote-endpoint = <&pmic_glink_ss1_hs_in>;
+};
+
+&usb_1_ss1_qmpphy_out {
+ remote-endpoint = <&retimer_ss1_ss_in>;
+};
+
+&usb_1_ss2_hsphy {
+ vdd-supply = <&vreg_l3j_0p8>;
+ vdda12-supply = <&vreg_l2j_1p2>;
+
+ phys = <&smb2360_2_eusb2_repeater>;
+
+ status = "okay";
+};
+
+&usb_1_ss2_qmpphy {
+ vdda-phy-supply = <&vreg_l2j_1p2>;
+ vdda-pll-supply = <&vreg_l2d_0p9>;
+
+ status = "okay";
+};
+
+&usb_1_ss2 {
+ status = "okay";
+};
+
+&usb_1_ss2_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_1_ss2_dwc3_hs {
+ remote-endpoint = <&pmic_glink_ss2_hs_in>;
+};
+
+&usb_1_ss2_qmpphy_out {
+ remote-endpoint = <&retimer_ss2_ss_in>;
+};
diff --git a/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts b/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts
index 66513fc8e67a..b2c2347f54fa 100644
--- a/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts
+++ b/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts
@@ -19,6 +19,32 @@
compatible = "lenovo,thinkpad-t14s", "qcom,x1e78100", "qcom,x1e80100";
chassis-type = "laptop";
+ wcd938x: audio-codec {
+ compatible = "qcom,wcd9385-codec";
+
+ pinctrl-0 = <&wcd_default>;
+ pinctrl-names = "default";
+
+ qcom,micbias1-microvolt = <1800000>;
+ qcom,micbias2-microvolt = <1800000>;
+ qcom,micbias3-microvolt = <1800000>;
+ qcom,micbias4-microvolt = <1800000>;
+ qcom,mbhc-buttons-vthreshold-microvolt = <75000 150000 237000 500000 500000 500000 500000 500000>;
+ qcom,mbhc-headset-vthreshold-microvolt = <1700000>;
+ qcom,mbhc-headphone-vthreshold-microvolt = <50000>;
+ qcom,rx-device = <&wcd_rx>;
+ qcom,tx-device = <&wcd_tx>;
+
+ reset-gpios = <&tlmm 191 GPIO_ACTIVE_LOW>;
+
+ vdd-buck-supply = <&vreg_l15b_1p8>;
+ vdd-rxtx-supply = <&vreg_l15b_1p8>;
+ vdd-io-supply = <&vreg_l15b_1p8>;
+ vdd-mic-bias-supply = <&vreg_bob1>;
+
+ #sound-dai-cells = <1>;
+ };
+
gpio-keys {
compatible = "gpio-keys";
@@ -153,6 +179,85 @@
regulator-always-on;
regulator-boot-on;
};
+
+ sound {
+ compatible = "qcom,x1e80100-sndcard";
+ model = "X1E80100-LENOVO-Thinkpad-T14s";
+ audio-routing = "SpkrLeft IN", "WSA WSA_SPK1 OUT",
+ "SpkrRight IN", "WSA WSA_SPK2 OUT",
+ "IN1_HPHL", "HPHL_OUT",
+ "IN2_HPHR", "HPHR_OUT",
+ "AMIC2", "MIC BIAS2",
+ "VA DMIC0", "MIC BIAS1",
+ "VA DMIC1", "MIC BIAS1",
+ "VA DMIC0", "VA MIC BIAS1",
+ "VA DMIC1", "VA MIC BIAS1",
+ "TX SWR_INPUT1", "ADC2_OUTPUT";
+
+ wcd-playback-dai-link {
+ link-name = "WCD Playback";
+
+ cpu {
+ sound-dai = <&q6apmbedai RX_CODEC_DMA_RX_0>;
+ };
+
+ codec {
+ sound-dai = <&wcd938x 0>, <&swr1 0>, <&lpass_rxmacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ wcd-capture-dai-link {
+ link-name = "WCD Capture";
+
+ cpu {
+ sound-dai = <&q6apmbedai TX_CODEC_DMA_TX_3>;
+ };
+
+ codec {
+ sound-dai = <&wcd938x 1>, <&swr2 1>, <&lpass_txmacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ wsa-dai-link {
+ link-name = "WSA Playback";
+
+ cpu {
+ sound-dai = <&q6apmbedai WSA_CODEC_DMA_RX_0>;
+ };
+
+ codec {
+ sound-dai = <&left_spkr>, <&right_spkr>, <&swr0 0>, <&lpass_wsamacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ va-dai-link {
+ link-name = "VA Capture";
+
+ cpu {
+ sound-dai = <&q6apmbedai VA_CODEC_DMA_TX_0>;
+ };
+
+ codec {
+ sound-dai = <&lpass_vamacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+ };
};
&apps_rsc {
@@ -185,6 +290,13 @@
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
+ vreg_l1b_1p8: ldo1 {
+ regulator-name = "vreg_l1b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
vreg_l2b_3p0: ldo2 {
regulator-name = "vreg_l2b_3p0";
regulator-min-microvolt = <3072000>;
@@ -495,6 +607,54 @@
};
};
+&i2c5 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ eusb5_repeater: redriver@43 {
+ compatible = "nxp,ptn3222";
+ reg = <0x43>;
+ #phy-cells = <0>;
+
+ vdd3v3-supply = <&vreg_l13b_3p0>;
+ vdd1v8-supply = <&vreg_l4b_1p8>;
+
+ reset-gpios = <&tlmm 7 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&eusb5_reset_n>;
+ pinctrl-names = "default";
+ };
+
+ eusb3_repeater: redriver@47 {
+ compatible = "nxp,ptn3222";
+ reg = <0x47>;
+ #phy-cells = <0>;
+
+ vdd3v3-supply = <&vreg_l13b_3p0>;
+ vdd1v8-supply = <&vreg_l4b_1p8>;
+
+ reset-gpios = <&tlmm 6 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&eusb3_reset_n>;
+ pinctrl-names = "default";
+ };
+
+ eusb6_repeater: redriver@4f {
+ compatible = "nxp,ptn3222";
+ reg = <0x4f>;
+ #phy-cells = <0>;
+
+ vdd3v3-supply = <&vreg_l13b_3p0>;
+ vdd1v8-supply = <&vreg_l4b_1p8>;
+
+ reset-gpios = <&tlmm 184 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&eusb6_reset_n>;
+ pinctrl-names = "default";
+ };
+};
+
&i2c8 {
clock-frequency = <400000>;
@@ -515,6 +675,24 @@
/* TODO: second-sourced touchscreen @ 0x41 */
};
+&lpass_tlmm {
+ spkr_01_sd_n_active: spkr-01-sd-n-active-state {
+ pins = "gpio12";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+};
+
+&lpass_vamacro {
+ pinctrl-0 = <&dmic01_default>;
+ pinctrl-names = "default";
+
+ vdd-micb-supply = <&vreg_l1b_1p8>;
+ qcom,dmic-sample-rate = <4800000>;
+};
+
&mdss {
status = "okay";
};
@@ -635,22 +813,107 @@
status = "okay";
};
+&smb2360_0 {
+ status = "okay";
+};
+
&smb2360_0_eusb2_repeater {
vdd18-supply = <&vreg_l3d_1p8>;
vdd3-supply = <&vreg_l2b_3p0>;
};
+&smb2360_1 {
+ status = "okay";
+};
+
&smb2360_1_eusb2_repeater {
vdd18-supply = <&vreg_l3d_1p8>;
vdd3-supply = <&vreg_l14b_3p0>;
};
+&swr0 {
+ status = "okay";
+
+ pinctrl-0 = <&wsa_swr_active>, <&spkr_01_sd_n_active>;
+ pinctrl-names = "default";
+
+ /* WSA8845, Left Speaker */
+ left_spkr: speaker@0,0 {
+ compatible = "sdw20217020400";
+ reg = <0 0>;
+ reset-gpios = <&lpass_tlmm 12 GPIO_ACTIVE_LOW>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "SpkrLeft";
+ vdd-1p8-supply = <&vreg_l15b_1p8>;
+ vdd-io-supply = <&vreg_l12b_1p2>;
+ qcom,port-mapping = <1 2 3 7 10 13>;
+ };
+
+ /* WSA8845, Right Speaker */
+ right_spkr: speaker@0,1 {
+ compatible = "sdw20217020400";
+ reg = <0 1>;
+ reset-gpios = <&lpass_tlmm 12 GPIO_ACTIVE_LOW>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "SpkrRight";
+ vdd-1p8-supply = <&vreg_l15b_1p8>;
+ vdd-io-supply = <&vreg_l12b_1p2>;
+ qcom,port-mapping = <4 5 6 7 11 13>;
+ };
+};
+
+&swr1 {
+ status = "okay";
+
+ /* WCD9385 RX */
+ wcd_rx: codec@0,4 {
+ compatible = "sdw20217010d00";
+ reg = <0 4>;
+ qcom,rx-port-mapping = <1 2 3 4 5>;
+ };
+};
+
+&swr2 {
+ status = "okay";
+
+ /* WCD9385 TX */
+ wcd_tx: codec@0,3 {
+ compatible = "sdw20217010d00";
+ reg = <0 3>;
+ qcom,tx-port-mapping = <2 2 3 4>;
+ };
+};
+
&tlmm {
gpio-reserved-ranges = <34 2>, /* Unused */
<44 4>, /* SPI (TPM) */
<72 2>, /* Secure EC I2C connection (?) */
<238 1>; /* UFS Reset */
+ eusb3_reset_n: eusb3-reset-n-state {
+ pins = "gpio6";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
+
+ eusb5_reset_n: eusb5-reset-n-state {
+ pins = "gpio7";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
+
+ eusb6_reset_n: eusb6-reset-n-state {
+ pins = "gpio184";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
+
tpad_default: tpad-default-state {
pins = "gpio3";
function = "gpio";
@@ -763,7 +1026,7 @@
};
&usb_1_ss0_qmpphy {
- vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-phy-supply = <&vreg_l2j_1p2>;
vdda-pll-supply = <&vreg_l1j_0p8>;
status = "okay";
@@ -795,7 +1058,7 @@
};
&usb_1_ss1_qmpphy {
- vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-phy-supply = <&vreg_l2j_1p2>;
vdda-pll-supply = <&vreg_l2d_0p9>;
status = "okay";
@@ -816,3 +1079,56 @@
&usb_1_ss1_qmpphy_out {
remote-endpoint = <&pmic_glink_ss1_ss_in>;
};
+
+&usb_2 {
+ status = "okay";
+};
+
+&usb_2_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_2_hsphy {
+ vdd-supply = <&vreg_l2e_0p8>;
+ vdda12-supply = <&vreg_l3e_1p2>;
+
+ phys = <&eusb5_repeater>;
+
+ status = "okay";
+};
+
+&usb_mp {
+ status = "okay";
+};
+
+&usb_mp_hsphy0 {
+ vdd-supply = <&vreg_l2e_0p8>;
+ vdda12-supply = <&vreg_l3e_1p2>;
+
+ phys = <&eusb6_repeater>;
+
+ status = "okay";
+};
+
+&usb_mp_hsphy1 {
+ vdd-supply = <&vreg_l2e_0p8>;
+ vdda12-supply = <&vreg_l3e_1p2>;
+
+ phys = <&eusb3_repeater>;
+
+ status = "okay";
+};
+
+&usb_mp_qmpphy0 {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l3c_0p8>;
+
+ status = "okay";
+};
+
+&usb_mp_qmpphy1 {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l3c_0p8>;
+
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts b/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
index 8515c254e158..53781f9b13af 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
+++ b/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
@@ -7,7 +7,9 @@
/dts-v1/;
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/gpio-keys.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
#include "x1e80100.dtsi"
#include "x1e80100-pmics.dtsi"
@@ -17,6 +19,20 @@
compatible = "asus,vivobook-s15", "qcom,x1e80100";
chassis-type = "laptop";
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-0 = <&hall_int_n_default>;
+ pinctrl-names = "default";
+
+ switch-lid {
+ gpios = <&tlmm 92 GPIO_ACTIVE_LOW>;
+ linux,input-type = <EV_SW>;
+ linux,code = <SW_LID>;
+ wakeup-source;
+ wakeup-event-action = <EV_ACT_DEASSERTED>;
+ };
+ };
+
pmic-glink {
compatible = "qcom,x1e80100-pmic-glink",
"qcom,sm8550-pmic-glink",
@@ -328,6 +344,14 @@
};
};
+&gpu {
+ status = "okay";
+
+ zap-shader {
+ firmware-name = "qcom/x1e80100/ASUSTeK/vivobook-s15/qcdxkmsuc8380.mbn";
+ };
+};
+
&i2c0 {
clock-frequency = <400000>;
status = "okay";
@@ -399,9 +423,13 @@
aux-bus {
panel {
- compatible = "edp-panel";
+ compatible = "samsung,atna56ac03", "samsung,atna33xc20";
+ enable-gpios = <&pmc8380_3_gpios 4 GPIO_ACTIVE_HIGH>;
power-supply = <&vreg_edp_3p3>;
+ pinctrl-0 = <&edp_bl_en>;
+ pinctrl-names = "default";
+
port {
edp_panel_in: endpoint {
remote-endpoint = <&mdss_dp3_out>;
@@ -467,6 +495,18 @@
status = "okay";
};
+&pmc8380_3_gpios {
+ edp_bl_en: edp-bl-en-state {
+ pins = "gpio4";
+ function = "normal";
+ power-source = <1>; /* 1.8 V */
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_MED>;
+ bias-pull-down;
+ input-disable;
+ output-enable;
+ };
+};
+
&qupv3_0 {
status = "okay";
};
@@ -493,11 +533,19 @@
status = "okay";
};
+&smb2360_0 {
+ status = "okay";
+};
+
&smb2360_0_eusb2_repeater {
vdd18-supply = <&vreg_l3d_1p8>;
vdd3-supply = <&vreg_l2b_3p0>;
};
+&smb2360_1 {
+ status = "okay";
+};
+
&smb2360_1_eusb2_repeater {
vdd18-supply = <&vreg_l3d_1p8>;
vdd3-supply = <&vreg_l14b_3p0>;
@@ -515,6 +563,12 @@
bias-disable;
};
+ hall_int_n_default: hall-int-n-state {
+ pins = "gpio92";
+ function = "gpio";
+ bias-disable;
+ };
+
kybd_default: kybd-default-state {
pins = "gpio67";
function = "gpio";
@@ -591,7 +645,7 @@
};
&usb_1_ss0_qmpphy {
- vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-phy-supply = <&vreg_l2j_1p2>;
vdda-pll-supply = <&vreg_l1j_0p8>;
status = "okay";
@@ -623,7 +677,7 @@
};
&usb_1_ss1_qmpphy {
- vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-phy-supply = <&vreg_l2j_1p2>;
vdda-pll-supply = <&vreg_l2d_0p9>;
status = "okay";
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
index d51a9bdcf67f..ff5b3472fafd 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
+++ b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
@@ -932,11 +932,19 @@
status = "okay";
};
+&smb2360_0 {
+ status = "okay";
+};
+
&smb2360_0_eusb2_repeater {
vdd18-supply = <&vreg_l3d_1p8>;
vdd3-supply = <&vreg_l2b_3p0>;
};
+&smb2360_1 {
+ status = "okay";
+};
+
&smb2360_1_eusb2_repeater {
vdd18-supply = <&vreg_l3d_1p8>;
vdd3-supply = <&vreg_l14b_3p0>;
@@ -1187,7 +1195,7 @@
};
&usb_1_ss0_qmpphy {
- vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-phy-supply = <&vreg_l2j_1p2>;
vdda-pll-supply = <&vreg_l1j_0p8>;
status = "okay";
@@ -1219,7 +1227,7 @@
};
&usb_1_ss1_qmpphy {
- vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-phy-supply = <&vreg_l2j_1p2>;
vdda-pll-supply = <&vreg_l2d_0p9>;
status = "okay";
@@ -1251,7 +1259,7 @@
};
&usb_1_ss2_qmpphy {
- vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-phy-supply = <&vreg_l2j_1p2>;
vdda-pll-supply = <&vreg_l2d_0p9>;
status = "okay";
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-dell-xps13-9345.dts b/arch/arm64/boot/dts/qcom/x1e80100-dell-xps13-9345.dts
index 05624226faf9..86e87f03b0ec 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-dell-xps13-9345.dts
+++ b/arch/arm64/boot/dts/qcom/x1e80100-dell-xps13-9345.dts
@@ -89,7 +89,15 @@
reg = <1>;
pmic_glink_ss0_ss_in: endpoint {
- remote-endpoint = <&usb_1_ss0_qmpphy_out>;
+ remote-endpoint = <&retimer_ss0_ss_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_ss0_con_sbu_in: endpoint {
+ remote-endpoint = <&retimer_ss0_con_sbu_out>;
};
};
};
@@ -118,7 +126,15 @@
reg = <1>;
pmic_glink_ss1_ss_in: endpoint {
- remote-endpoint = <&usb_1_ss1_qmpphy_out>;
+ remote-endpoint = <&retimer_ss1_ss_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_ss1_con_sbu_in: endpoint {
+ remote-endpoint = <&retimer_ss1_con_sbu_out>;
};
};
};
@@ -166,6 +182,102 @@
regulator-boot-on;
};
+ vreg_rtmr0_1p15: regulator-rtmr0-1p15 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR0_1P15";
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+
+ gpio = <&pmc8380_5_gpios 8 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&rtmr0_1p15_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr0_1p8: regulator-rtmr0-1p8 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR0_1P8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&pm8550ve_9_gpios 8 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&rtmr0_1p8_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr0_3p3: regulator-rtmr0-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR0_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&pm8550_gpios 11 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&rtmr0_3p3_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr1_1p15: regulator-rtmr1-1p15 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR1_1P15";
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+
+ gpio = <&tlmm 188 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&rtmr1_1p15_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr1_1p8: regulator-rtmr1-1p8 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR1_1P8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&tlmm 175 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&rtmr1_1p8_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr1_3p3: regulator-rtmr1-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR1_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 186 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&rtmr1_3p3_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
vreg_vph_pwr: regulator-vph-pwr {
compatible = "regulator-fixed";
@@ -492,9 +604,60 @@
&i2c3 {
clock-frequency = <400000>;
- status = "disabled";
- /* PS8830 Retimer @0x8 */
- /* Unknown device @0x9 */
+ status = "okay";
+
+ /* Right-side USB Type-C port */
+ typec-mux@8 {
+ compatible = "parade,ps8830";
+ reg = <0x08>;
+
+ clocks = <&rpmhcc RPMH_RF_CLK3>;
+ clock-names = "xo";
+
+ vdd-supply = <&vreg_rtmr0_1p15>;
+ vdd33-supply = <&vreg_rtmr0_3p3>;
+ vdd33-cap-supply = <&vreg_rtmr0_3p3>;
+ vddar-supply = <&vreg_rtmr0_1p15>;
+ vddat-supply = <&vreg_rtmr0_1p15>;
+ vddio-supply = <&vreg_rtmr0_1p8>;
+
+ reset-gpios = <&pm8550_gpios 10 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&rtmr0_default>;
+ pinctrl-names = "default";
+
+ retimer-switch;
+ orientation-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ retimer_ss0_ss_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss0_ss_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ retimer_ss0_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss0_qmpphy_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ retimer_ss0_con_sbu_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss0_con_sbu_in>;
+ };
+ };
+ };
+ };
};
&i2c5 {
@@ -505,9 +668,61 @@
&i2c7 {
clock-frequency = <400000>;
- status = "disabled";
- /* PS8830 Retimer @0x8 */
- /* Unknown device @0x9 */
+ status = "okay";
+
+ /* Left-side USB Type-C port */
+ typec-mux@8 {
+ compatible = "parade,ps8830";
+ reg = <0x8>;
+
+ clocks = <&rpmhcc RPMH_RF_CLK4>;
+ clock-names = "xo";
+
+ vdd-supply = <&vreg_rtmr1_1p15>;
+ vdd33-supply = <&vreg_rtmr1_3p3>;
+ vdd33-cap-supply = <&vreg_rtmr1_3p3>;
+ vddar-supply = <&vreg_rtmr1_1p15>;
+ vddat-supply = <&vreg_rtmr1_1p15>;
+ vddio-supply = <&vreg_rtmr1_1p8>;
+
+ reset-gpios = <&tlmm 176 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&rtmr1_default>;
+ pinctrl-names = "default";
+
+ retimer-switch;
+ orientation-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ retimer_ss1_ss_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss1_ss_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ retimer_ss1_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss1_qmpphy_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ retimer_ss1_con_sbu_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss1_con_sbu_in>;
+ };
+ };
+
+ };
+ };
};
&i2c8 {
@@ -634,6 +849,36 @@
status = "okay";
};
+&pm8550_gpios {
+ rtmr0_default: rtmr0-reset-n-active-state {
+ pins = "gpio10";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ };
+
+ rtmr0_3p3_reg_en: rtmr0-3p3-reg-en-state {
+ pins = "gpio11";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ };
+};
+
+&pmc8380_5_gpios {
+ rtmr0_1p15_reg_en: rtmr0-1p15-reg-en-state {
+ pins = "gpio8";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ };
+};
+
+&pm8550ve_9_gpios {
+ rtmr0_1p8_reg_en: rtmr0-1p8-reg-en-state {
+ pins = "gpio8";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ };
+};
+
&qupv3_0 {
status = "okay";
};
@@ -660,11 +905,19 @@
status = "okay";
};
+&smb2360_0 {
+ status = "okay";
+};
+
&smb2360_0_eusb2_repeater {
vdd18-supply = <&vreg_l3d_1p8>;
vdd3-supply = <&vreg_l2b_3p0>;
};
+&smb2360_1 {
+ status = "okay";
+};
+
&smb2360_1_eusb2_repeater {
vdd18-supply = <&vreg_l3d_1p8>;
vdd3-supply = <&vreg_l14b_3p0>;
@@ -762,6 +1015,34 @@
};
};
+ rtmr1_1p15_reg_en: rtmr1-1p15-reg-en-state {
+ pins = "gpio188";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rtmr1_1p8_reg_en: rtmr1-1p8-reg-en-state {
+ pins = "gpio175";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rtmr1_3p3_reg_en: rtmr1-3p3-reg-en-state {
+ pins = "gpio186";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rtmr1_default: rtmr1-reset-n-active-state {
+ pins = "gpio176";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
tpad_default: tpad-default-state {
disable-pins {
pins = "gpio38";
@@ -820,7 +1101,7 @@
};
&usb_1_ss0_qmpphy {
- vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-phy-supply = <&vreg_l2j_1p2>;
vdda-pll-supply = <&vreg_l1j_0p9>;
status = "okay";
@@ -839,7 +1120,7 @@
};
&usb_1_ss0_qmpphy_out {
- remote-endpoint = <&pmic_glink_ss0_ss_in>;
+ remote-endpoint = <&retimer_ss0_ss_in>;
};
&usb_1_ss1_hsphy {
@@ -852,7 +1133,7 @@
};
&usb_1_ss1_qmpphy {
- vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-phy-supply = <&vreg_l2j_1p2>;
vdda-pll-supply = <&vreg_l2d_0p9>;
status = "okay";
@@ -871,5 +1152,5 @@
};
&usb_1_ss1_qmpphy_out {
- remote-endpoint = <&pmic_glink_ss1_ss_in>;
+ remote-endpoint = <&retimer_ss1_ss_in>;
};
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-hp-omnibook-x14.dts b/arch/arm64/boot/dts/qcom/x1e80100-hp-omnibook-x14.dts
new file mode 100644
index 000000000000..cd860a246c45
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/x1e80100-hp-omnibook-x14.dts
@@ -0,0 +1,1693 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2024, Xilin Wu <wuxilin123@gmail.com>
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/gpio-keys.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+
+#include "x1e80100.dtsi"
+#include "x1e80100-pmics.dtsi"
+
+/ {
+ model = "HP Omnibook X 14";
+ compatible = "hp,omnibook-x14", "qcom,x1e80100";
+ chassis-type = "laptop";
+
+ aliases {
+ serial0 = &uart21;
+ serial1 = &uart14;
+ };
+
+ wcd938x: audio-codec {
+ compatible = "qcom,wcd9385-codec";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&wcd_default>;
+
+ qcom,micbias1-microvolt = <1800000>;
+ qcom,micbias2-microvolt = <1800000>;
+ qcom,micbias3-microvolt = <1800000>;
+ qcom,micbias4-microvolt = <1800000>;
+ qcom,mbhc-buttons-vthreshold-microvolt = <75000 150000 237000 500000 500000 500000 500000 500000>;
+ qcom,mbhc-headset-vthreshold-microvolt = <1700000>;
+ qcom,mbhc-headphone-vthreshold-microvolt = <50000>;
+ qcom,rx-device = <&wcd_rx>;
+ qcom,tx-device = <&wcd_tx>;
+
+ reset-gpios = <&tlmm 191 GPIO_ACTIVE_LOW>;
+
+ vdd-buck-supply = <&vreg_l15b_1p8>;
+ vdd-rxtx-supply = <&vreg_l15b_1p8>;
+ vdd-io-supply = <&vreg_l15b_1p8>;
+ vdd-mic-bias-supply = <&vreg_bob1>;
+
+ #sound-dai-cells = <1>;
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pmk8550_pwm 0 5000000>;
+
+ brightness-levels = <0 2048 4096 8192 16384 65535>;
+ num-interpolated-steps = <20>;
+ default-brightness-level = <80>;
+
+ enable-gpios = <&pmc8380_3_gpios 4 GPIO_ACTIVE_HIGH>;
+ power-supply = <&vreg_edp_bl>;
+
+ pinctrl-0 = <&edp_bl_en>, <&edp_bl_pwm>;
+ pinctrl-names = "default";
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&hall_int_n_default>;
+ pinctrl-names = "default";
+
+ switch-lid {
+ gpios = <&tlmm 92 GPIO_ACTIVE_LOW>;
+ linux,input-type = <EV_SW>;
+ linux,code = <SW_LID>;
+ wakeup-source;
+ wakeup-event-action = <EV_ACT_DEASSERTED>;
+ };
+ };
+
+ pmic-glink {
+ compatible = "qcom,x1e80100-pmic-glink",
+ "qcom,sm8550-pmic-glink",
+ "qcom,pmic-glink";
+ orientation-gpios = <&tlmm 121 GPIO_ACTIVE_HIGH>,
+ <&tlmm 123 GPIO_ACTIVE_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Left-side port, closer to the screen */
+ connector@0 {
+ compatible = "usb-c-connector";
+ reg = <0>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_ss0_hs_in: endpoint {
+ remote-endpoint = <&usb_1_ss0_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss0_ss_in: endpoint {
+ remote-endpoint = <&retimer_ss0_ss_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_ss0_con_sbu_in: endpoint {
+ remote-endpoint = <&retimer_ss0_con_sbu_out>;
+ };
+ };
+ };
+ };
+
+ /* Left-side port, farther from the screen */
+ connector@1 {
+ compatible = "usb-c-connector";
+ reg = <1>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ pmic_glink_ss1_hs_in: endpoint {
+ remote-endpoint = <&usb_1_ss1_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ pmic_glink_ss1_ss_in: endpoint {
+ remote-endpoint = <&retimer_ss1_ss_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_ss1_con_sbu_in: endpoint {
+ remote-endpoint = <&retimer_ss1_con_sbu_out>;
+ };
+ };
+ };
+ };
+ };
+
+ reserved-memory {
+ linux,cma {
+ compatible = "shared-dma-pool";
+ size = <0x0 0x8000000>;
+ reusable;
+ linux,cma-default;
+ };
+ };
+
+ sound {
+ compatible = "qcom,x1e80100-sndcard";
+ model = "X1E80100-HP-OMNIBOOK-X14";
+ audio-routing = "SpkrLeft IN", "WSA WSA_SPK1 OUT",
+ "SpkrRight IN", "WSA WSA_SPK2 OUT",
+ "IN1_HPHL", "HPHL_OUT",
+ "IN2_HPHR", "HPHR_OUT",
+ "AMIC2", "MIC BIAS2",
+ "VA DMIC0", "MIC BIAS3",
+ "VA DMIC1", "MIC BIAS3",
+ "VA DMIC2", "MIC BIAS1",
+ "VA DMIC3", "MIC BIAS1",
+ "VA DMIC0", "VA MIC BIAS3",
+ "VA DMIC1", "VA MIC BIAS3",
+ "VA DMIC2", "VA MIC BIAS1",
+ "VA DMIC3", "VA MIC BIAS1",
+ "TX SWR_INPUT1", "ADC2_OUTPUT";
+
+ wcd-playback-dai-link {
+ link-name = "WCD Playback";
+
+ cpu {
+ sound-dai = <&q6apmbedai RX_CODEC_DMA_RX_0>;
+ };
+
+ codec {
+ sound-dai = <&wcd938x 0>, <&swr1 0>, <&lpass_rxmacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ wcd-capture-dai-link {
+ link-name = "WCD Capture";
+
+ cpu {
+ sound-dai = <&q6apmbedai TX_CODEC_DMA_TX_3>;
+ };
+
+ codec {
+ sound-dai = <&wcd938x 1>, <&swr2 1>, <&lpass_txmacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ wsa-dai-link {
+ link-name = "WSA Playback";
+
+ cpu {
+ sound-dai = <&q6apmbedai WSA_CODEC_DMA_RX_0>;
+ };
+
+ codec {
+ sound-dai = <&left_spkr>, <&right_spkr>, <&swr0 0>, <&lpass_wsamacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ va-dai-link {
+ link-name = "VA Capture";
+
+ cpu {
+ sound-dai = <&q6apmbedai VA_CODEC_DMA_TX_0>;
+ };
+
+ codec {
+ sound-dai = <&lpass_vamacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+ };
+
+ vreg_edp_3p3: regulator-edp-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_EDP_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 70 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&edp_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_edp_bl: regulator-edp-bl {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VBL9";
+ regulator-min-microvolt = <3600000>;
+ regulator-max-microvolt = <3600000>;
+
+ gpio = <&pmc8380_3_gpios 10 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&edp_bl_reg_en>;
+
+ regulator-boot-on;
+ };
+
+ vreg_misc_3p3: regulator-misc-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_MISC_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&pm8550ve_8_gpios 6 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&misc_3p3_reg_en>;
+
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vreg_nvme: regulator-nvme {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_NVME_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 18 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&nvme_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr0_1p15: regulator-rtmr0-1p15 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR0_1P15";
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+
+ gpio = <&pmc8380_5_gpios 8 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb0_pwr_1p15_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr0_1p8: regulator-rtmr0-1p8 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR0_1P8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&pm8550ve_9_gpios 8 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb0_1p8_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr0_3p3: regulator-rtmr0-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR0_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&pm8550_gpios 11 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb0_3p3_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr1_1p15: regulator-rtmr1-1p15 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR1_1P15";
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+
+ gpio = <&tlmm 188 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb1_pwr_1p15_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr1_1p8: regulator-rtmr1-1p8 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR1_1P8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&tlmm 175 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb1_pwr_1p8_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr1_3p3: regulator-rtmr1-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR1_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 186 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&usb1_pwr_3p3_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_vph_pwr: regulator-vph-pwr {
+ compatible = "regulator-fixed";
+
+ regulator-name = "vreg_vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vreg_wcn_3p3: regulator-wcn-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_WCN_3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 214 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&wcn_sw_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ /*
+ * TODO: These two regulators are actually part of the removable M.2
+ * card and not the CRD mainboard. Need to describe this differently.
+ * Functionally it works correctly, because all we need to do is to
+ * turn on the actual 3.3V supply above.
+ */
+ vreg_wcn_0p95: regulator-wcn-0p95 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_WCN_0P95";
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <950000>;
+
+ vin-supply = <&vreg_wcn_3p3>;
+ };
+
+ vreg_wcn_1p9: regulator-wcn-1p9 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_WCN_1P9";
+ regulator-min-microvolt = <1900000>;
+ regulator-max-microvolt = <1900000>;
+
+ vin-supply = <&vreg_wcn_3p3>;
+ };
+
+ wcn6855-pmu {
+ compatible = "qcom,wcn6855-pmu";
+
+ vddaon-supply = <&vreg_wcn_0p95>;
+ vddio-supply = <&vreg_wcn_1p9>;
+ vddpcie1p3-supply = <&vreg_wcn_1p9>;
+ vddpcie1p9-supply = <&vreg_wcn_1p9>;
+ vddpmu-supply = <&vreg_wcn_0p95>;
+ vddpmumx-supply = <&vreg_wcn_0p95>;
+ vddpmucx-supply = <&vreg_wcn_0p95>;
+ vddrfa0p95-supply = <&vreg_wcn_0p95>;
+ vddrfa1p3-supply = <&vreg_wcn_1p9>;
+ vddrfa1p9-supply = <&vreg_wcn_1p9>;
+
+ wlan-enable-gpios = <&tlmm 117 GPIO_ACTIVE_HIGH>;
+ bt-enable-gpios = <&tlmm 116 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-0 = <&wcn_wlan_bt_en>;
+ pinctrl-names = "default";
+
+ regulators {
+ vreg_pmu_rfa_cmn_0p8: ldo0 {
+ regulator-name = "vreg_pmu_rfa_cmn_0p8";
+ };
+
+ vreg_pmu_aon_0p8: ldo1 {
+ regulator-name = "vreg_pmu_aon_0p8";
+ };
+
+ vreg_pmu_wlcx_0p8: ldo2 {
+ regulator-name = "vreg_pmu_wlcx_0p8";
+ };
+
+ vreg_pmu_wlmx_0p8: ldo3 {
+ regulator-name = "vreg_pmu_wlmx_0p8";
+ };
+
+ vreg_pmu_btcmx_0p8: ldo4 {
+ regulator-name = "vreg_pmu_btcmx_0p8";
+ };
+
+ vreg_pmu_pcie_1p8: ldo5 {
+ regulator-name = "vreg_pmu_pcie_1p8";
+ };
+
+ vreg_pmu_pcie_0p9: ldo6 {
+ regulator-name = "vreg_pmu_pcie_0p9";
+ };
+
+ vreg_pmu_rfa_0p8: ldo7 {
+ regulator-name = "vreg_pmu_rfa_0p8";
+ };
+
+ vreg_pmu_rfa_1p2: ldo8 {
+ regulator-name = "vreg_pmu_rfa_1p2";
+ };
+
+ vreg_pmu_rfa_1p7: ldo9 {
+ regulator-name = "vreg_pmu_rfa_1p7";
+ };
+ };
+ };
+};
+
+&apps_rsc {
+ regulators-0 {
+ compatible = "qcom,pm8550-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ vdd-bob1-supply = <&vreg_vph_pwr>;
+ vdd-bob2-supply = <&vreg_vph_pwr>;
+ vdd-l1-l4-l10-supply = <&vreg_s4c_1p8>;
+ vdd-l2-l13-l14-supply = <&vreg_bob1>;
+ vdd-l5-l16-supply = <&vreg_bob1>;
+ vdd-l6-l7-supply = <&vreg_bob2>;
+ vdd-l8-l9-supply = <&vreg_bob1>;
+ vdd-l12-supply = <&vreg_s5j_1p2>;
+ vdd-l15-supply = <&vreg_s4c_1p8>;
+ vdd-l17-supply = <&vreg_bob2>;
+
+ vreg_bob1: bob1 {
+ regulator-name = "vreg_bob1";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_bob2: bob2 {
+ regulator-name = "vreg_bob2";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1b_1p8: ldo1 {
+ regulator-name = "vreg_l1b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2b_3p0: ldo2 {
+ regulator-name = "vreg_l2b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4b_1p8: ldo4 {
+ regulator-name = "vreg_l4b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5b_3p0: ldo5 {
+ regulator-name = "vreg_l5b_3p0";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6b_1p8: ldo6 {
+ regulator-name = "vreg_l6b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7b_2p8: ldo7 {
+ regulator-name = "vreg_l7b_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8b_3p0: ldo8 {
+ regulator-name = "vreg_l8b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9b_2p9: ldo9 {
+ regulator-name = "vreg_l9b_2p9";
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l10b_1p8: ldo10 {
+ regulator-name = "vreg_l10b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l12b_1p2: ldo12 {
+ regulator-name = "vreg_l12b_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l13b_3p0: ldo13 {
+ regulator-name = "vreg_l13b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l14b_3p0: ldo14 {
+ regulator-name = "vreg_l14b_3p0";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l15b_1p8: ldo15 {
+ regulator-name = "vreg_l15b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l16b_2p9: ldo16 {
+ regulator-name = "vreg_l16b_2p9";
+ regulator-min-microvolt = <2912000>;
+ regulator-max-microvolt = <2912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l17b_2p5: ldo17 {
+ regulator-name = "vreg_l17b_2p5";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <2504000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-1 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vdd-l1-supply = <&vreg_s5j_1p2>;
+ vdd-l2-supply = <&vreg_s1f_0p7>;
+ vdd-l3-supply = <&vreg_s1f_0p7>;
+ vdd-s4-supply = <&vreg_vph_pwr>;
+
+ vreg_s4c_1p8: smps4 {
+ regulator-name = "vreg_s4c_1p8";
+ regulator-min-microvolt = <1856000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1c_1p2: ldo1 {
+ regulator-name = "vreg_l1c_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2c_0p8: ldo2 {
+ regulator-name = "vreg_l2c_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3c_0p8: ldo3 {
+ regulator-name = "vreg_l3c_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-2 {
+ compatible = "qcom,pmc8380-rpmh-regulators";
+ qcom,pmic-id = "d";
+
+ vdd-l1-supply = <&vreg_s1f_0p7>;
+ vdd-l2-supply = <&vreg_s1f_0p7>;
+ vdd-l3-supply = <&vreg_s4c_1p8>;
+ vdd-s1-supply = <&vreg_vph_pwr>;
+
+ vreg_l1d_0p8: ldo1 {
+ regulator-name = "vreg_l1d_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2d_0p9: ldo2 {
+ regulator-name = "vreg_l2d_0p9";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3d_1p8: ldo3 {
+ regulator-name = "vreg_l3d_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-3 {
+ compatible = "qcom,pmc8380-rpmh-regulators";
+ qcom,pmic-id = "e";
+
+ vdd-l2-supply = <&vreg_s1f_0p7>;
+ vdd-l3-supply = <&vreg_s5j_1p2>;
+
+ vreg_l2e_0p8: ldo2 {
+ regulator-name = "vreg_l2e_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3e_1p2: ldo3 {
+ regulator-name = "vreg_l3e_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-4 {
+ compatible = "qcom,pmc8380-rpmh-regulators";
+ qcom,pmic-id = "f";
+
+ vdd-l1-supply = <&vreg_s5j_1p2>;
+ vdd-l2-supply = <&vreg_s5j_1p2>;
+ vdd-l3-supply = <&vreg_s5j_1p2>;
+ vdd-s1-supply = <&vreg_vph_pwr>;
+
+ vreg_s1f_0p7: smps1 {
+ regulator-name = "vreg_s1f_0p7";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1f_1p0: ldo1 {
+ regulator-name = "vreg_l1f_1p0";
+ regulator-min-microvolt = <1024000>;
+ regulator-max-microvolt = <1024000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2f_1p0: ldo2 {
+ regulator-name = "vreg_l2f_1p0";
+ regulator-min-microvolt = <1024000>;
+ regulator-max-microvolt = <1024000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3f_1p0: ldo3 {
+ regulator-name = "vreg_l3f_1p0";
+ regulator-min-microvolt = <1024000>;
+ regulator-max-microvolt = <1024000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-6 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "i";
+
+ vdd-l1-supply = <&vreg_s4c_1p8>;
+ vdd-l2-supply = <&vreg_s5j_1p2>;
+ vdd-l3-supply = <&vreg_s1f_0p7>;
+ vdd-s1-supply = <&vreg_vph_pwr>;
+ vdd-s2-supply = <&vreg_vph_pwr>;
+
+ vreg_s1i_0p9: smps1 {
+ regulator-name = "vreg_s1i_0p9";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s2i_1p0: smps2 {
+ regulator-name = "vreg_s2i_1p0";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1i_1p8: ldo1 {
+ regulator-name = "vreg_l1i_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2i_1p2: ldo2 {
+ regulator-name = "vreg_l2i_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3i_0p8: ldo3 {
+ regulator-name = "vreg_l3i_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ regulators-7 {
+ compatible = "qcom,pm8550ve-rpmh-regulators";
+ qcom,pmic-id = "j";
+
+ vdd-l1-supply = <&vreg_s1f_0p7>;
+ vdd-l2-supply = <&vreg_s5j_1p2>;
+ vdd-l3-supply = <&vreg_s1f_0p7>;
+ vdd-s5-supply = <&vreg_vph_pwr>;
+
+ vreg_s5j_1p2: smps5 {
+ regulator-name = "vreg_s5j_1p2";
+ regulator-min-microvolt = <1256000>;
+ regulator-max-microvolt = <1304000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1j_0p8: ldo1 {
+ regulator-name = "vreg_l1j_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2j_1p2: ldo2 {
+ regulator-name = "vreg_l2j_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3j_0p8: ldo3 {
+ regulator-name = "vreg_l3j_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+};
+
+&gpu {
+ status = "okay";
+
+ zap-shader {
+ firmware-name = "qcom/x1e80100/hp/omnibook-x14/qcdxkmsuc8380.mbn";
+ };
+};
+
+&i2c0 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ keyboard@3a {
+ compatible = "hid-over-i2c";
+ reg = <0x3a>;
+
+ hid-descr-addr = <0x1>;
+ interrupts-extended = <&tlmm 67 IRQ_TYPE_LEVEL_LOW>;
+
+ vdd-supply = <&vreg_misc_3p3>;
+ vddl-supply = <&vreg_l12b_1p2>;
+
+ pinctrl-0 = <&kybd_default>;
+ pinctrl-names = "default";
+
+ wakeup-source;
+ };
+
+ touchpad@15 {
+ compatible = "hid-over-i2c";
+ reg = <0x15>;
+
+ hid-descr-addr = <0x1>;
+ interrupts-extended = <&tlmm 3 IRQ_TYPE_LEVEL_LOW>;
+
+ vdd-supply = <&vreg_misc_3p3>;
+ vddl-supply = <&vreg_l12b_1p2>;
+
+ pinctrl-0 = <&tpad_default>;
+ pinctrl-names = "default";
+
+ wakeup-source;
+ };
+};
+
+&i2c1 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ /* type-c PS8830 Retimer #2 0x8 */
+ /* is active on Windows */
+};
+
+&i2c3 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ typec-mux@8 {
+ compatible = "parade,ps8830";
+ reg = <0x08>;
+
+ clocks = <&rpmhcc RPMH_RF_CLK3>;
+
+ vdd-supply = <&vreg_rtmr0_1p15>;
+ vdd33-supply = <&vreg_rtmr0_3p3>;
+ vdd33-cap-supply = <&vreg_rtmr0_3p3>;
+ vddar-supply = <&vreg_rtmr0_1p15>;
+ vddat-supply = <&vreg_rtmr0_1p15>;
+ vddio-supply = <&vreg_rtmr0_1p8>;
+
+ reset-gpios = <&pm8550_gpios 10 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&rtmr0_default>;
+ pinctrl-names = "default";
+
+ orientation-switch;
+ retimer-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ retimer_ss0_ss_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss0_ss_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ retimer_ss0_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss0_qmpphy_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ retimer_ss0_con_sbu_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss0_con_sbu_in>;
+ };
+ };
+ };
+ };
+};
+
+&i2c4 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ /* is active on Windows */
+};
+
+&i2c5 {
+ clock-frequency = <400000>;
+ status = "okay";
+
+ eusb3_repeater: redriver@47 {
+ compatible = "nxp,ptn3222";
+ reg = <0x47>;
+ #phy-cells = <0>;
+
+ vdd3v3-supply = <&vreg_l13b_3p0>;
+ vdd1v8-supply = <&vreg_l4b_1p8>;
+
+ reset-gpios = <&tlmm 6 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&eusb3_reset_n>;
+ pinctrl-names = "default";
+
+ };
+};
+
+&i2c7 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ typec-mux@8 {
+ compatible = "parade,ps8830";
+ reg = <0x8>;
+
+ clocks = <&rpmhcc RPMH_RF_CLK4>;
+
+ vdd-supply = <&vreg_rtmr1_1p15>;
+ vdd33-supply = <&vreg_rtmr1_3p3>;
+ vdd33-cap-supply = <&vreg_rtmr1_3p3>;
+ vddar-supply = <&vreg_rtmr1_1p15>;
+ vddat-supply = <&vreg_rtmr1_1p15>;
+ vddio-supply = <&vreg_rtmr1_1p8>;
+
+ reset-gpios = <&tlmm 176 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&rtmr1_default>;
+ pinctrl-names = "default";
+
+ orientation-switch;
+ retimer-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ retimer_ss1_ss_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss1_ss_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ retimer_ss1_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss1_qmpphy_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ retimer_ss1_con_sbu_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss1_con_sbu_in>;
+ };
+ };
+
+ };
+ };
+};
+
+&i2c8 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ touchscreen@10 {
+ compatible = "hid-over-i2c";
+ reg = <0x10>;
+
+ hid-descr-addr = <0x1>;
+ interrupts-extended = <&tlmm 51 IRQ_TYPE_LEVEL_LOW>;
+
+ vdd-supply = <&vreg_misc_3p3>;
+ vddl-supply = <&vreg_l15b_1p8>;
+
+ pinctrl-0 = <&ts0_default>;
+ pinctrl-names = "default";
+ };
+};
+
+&i2c9 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ /* is active on Windows */
+};
+
+&lpass_tlmm {
+ spkr_01_sd_n_active: spkr-01-sd-n-active-state {
+ pins = "gpio12";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+};
+
+&lpass_vamacro {
+ pinctrl-0 = <&dmic01_default>, <&dmic23_default>;
+ pinctrl-names = "default";
+
+ vdd-micb-supply = <&vreg_l1b_1p8>;
+ qcom,dmic-sample-rate = <4800000>;
+};
+
+&mdss {
+ status = "okay";
+};
+
+&mdss_dp0 {
+ status = "okay";
+};
+
+&mdss_dp0_out {
+ data-lanes = <0 1>;
+};
+
+&mdss_dp1 {
+ status = "okay";
+};
+
+&mdss_dp1_out {
+ data-lanes = <0 1>;
+};
+
+&mdss_dp3 {
+ compatible = "qcom,x1e80100-dp";
+ /delete-property/ #sound-dai-cells;
+
+ status = "okay";
+
+ aux-bus {
+ panel {
+ compatible = "edp-panel";
+ power-supply = <&vreg_edp_3p3>;
+
+ backlight = <&backlight>;
+
+ port {
+ edp_panel_in: endpoint {
+ remote-endpoint = <&mdss_dp3_out>;
+ };
+ };
+ };
+ };
+
+ ports {
+ port@1 {
+ reg = <1>;
+
+ mdss_dp3_out: endpoint {
+ data-lanes = <0 1 2 3>;
+ link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
+
+ remote-endpoint = <&edp_panel_in>;
+ };
+ };
+ };
+};
+
+&mdss_dp3_phy {
+ vdda-phy-supply = <&vreg_l3j_0p8>;
+ vdda-pll-supply = <&vreg_l2j_1p2>;
+
+ status = "okay";
+};
+
+&pcie4 {
+ perst-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&pcie4_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&pcie4_phy {
+ vdda-phy-supply = <&vreg_l3i_0p8>;
+ vdda-pll-supply = <&vreg_l3e_1p2>;
+
+ status = "okay";
+};
+
+&pcie4_port0 {
+ wifi@0 {
+ compatible = "pci17cb,1107";
+ reg = <0x10000 0x0 0x0 0x0 0x0>;
+
+ vddaon-supply = <&vreg_pmu_aon_0p8>;
+ vddpcie0p9-supply = <&vreg_pmu_pcie_0p9>;
+ vddpcie1p8-supply = <&vreg_pmu_pcie_1p8>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ vddrfa1p8-supply = <&vreg_pmu_rfa_1p7>;
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn_0p8>;
+ vddwlcx-supply = <&vreg_pmu_wlcx_0p8>;
+ vddwlmx-supply = <&vreg_pmu_wlmx_0p8>;
+ };
+};
+
+&pcie6a {
+ perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>;
+
+ vddpe-3v3-supply = <&vreg_nvme>;
+
+ pinctrl-0 = <&pcie6a_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&pcie6a_phy {
+ vdda-phy-supply = <&vreg_l1d_0p8>;
+ vdda-pll-supply = <&vreg_l2j_1p2>;
+
+ status = "okay";
+};
+
+&pm8550_gpios {
+ rtmr0_default: rtmr0-reset-n-active-state {
+ pins = "gpio10";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ bias-disable;
+ input-disable;
+ output-enable;
+ };
+
+ usb0_3p3_reg_en: usb0-3p3-reg-en-state {
+ pins = "gpio11";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ bias-disable;
+ input-disable;
+ output-enable;
+ };
+};
+
+&pm8550ve_8_gpios {
+ misc_3p3_reg_en: misc-3p3-reg-en-state {
+ pins = "gpio6";
+ function = "normal";
+ bias-disable;
+ drive-push-pull;
+ input-disable;
+ output-enable;
+ power-source = <1>; /* 1.8 V */
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
+ };
+};
+
+&pm8550ve_9_gpios {
+ usb0_1p8_reg_en: usb0-1p8-reg-en-state {
+ pins = "gpio8";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ bias-disable;
+ input-disable;
+ output-enable;
+ };
+};
+
+&pmc8380_3_gpios {
+ edp_bl_en: edp-bl-en-state {
+ pins = "gpio4";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ input-disable;
+ output-enable;
+ };
+
+ edp_bl_reg_en: edp-bl-reg-en-state {
+ pins = "gpio10";
+ function = "normal";
+ };
+
+};
+
+&pmk8550_gpios {
+ edp_bl_pwm: edp-bl-pwm-state {
+ pins = "gpio5";
+ function = "func3";
+ };
+};
+
+&pmk8550_pwm {
+ status = "okay";
+};
+
+&pmc8380_5_gpios {
+ usb0_pwr_1p15_reg_en: usb0-pwr-1p15-reg-en-state {
+ pins = "gpio8";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ bias-disable;
+ input-disable;
+ output-enable;
+ };
+};
+
+&qupv3_0 {
+ status = "okay";
+};
+
+&qupv3_1 {
+ status = "okay";
+};
+
+&qupv3_2 {
+ status = "okay";
+};
+
+&remoteproc_adsp {
+ firmware-name = "qcom/x1e80100/hp/omnibook-x14/qcadsp8380.mbn",
+ "qcom/x1e80100/hp/omnibook-x14/adsp_dtbs.elf";
+
+ status = "okay";
+};
+
+&remoteproc_cdsp {
+ firmware-name = "qcom/x1e80100/hp/omnibook-x14/qccdsp8380.mbn",
+ "qcom/x1e80100/hp/omnibook-x14/cdsp_dtbs.elf";
+
+ status = "okay";
+};
+
+&smb2360_0_eusb2_repeater {
+ vdd18-supply = <&vreg_l3d_1p8>;
+ vdd3-supply = <&vreg_l2b_3p0>;
+
+ status = "okay";
+};
+
+&smb2360_1_eusb2_repeater {
+ vdd18-supply = <&vreg_l3d_1p8>;
+ vdd3-supply = <&vreg_l14b_3p0>;
+
+ status = "okay";
+};
+
+&swr0 {
+ pinctrl-0 = <&wsa_swr_active>, <&spkr_01_sd_n_active>;
+ pinctrl-names = "default";
+
+ status = "okay";
+
+ /* WSA8845, Left Speaker */
+ left_spkr: speaker@0,0 {
+ compatible = "sdw20217020400";
+ reg = <0 0>;
+ reset-gpios = <&lpass_tlmm 12 GPIO_ACTIVE_LOW>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "SpkrLeft";
+ vdd-1p8-supply = <&vreg_l15b_1p8>;
+ vdd-io-supply = <&vreg_l12b_1p2>;
+ qcom,port-mapping = <1 2 3 7 10 13>;
+ };
+
+ /* WSA8845, Right Speaker */
+ right_spkr: speaker@0,1 {
+ compatible = "sdw20217020400";
+ reg = <0 1>;
+ reset-gpios = <&lpass_tlmm 12 GPIO_ACTIVE_LOW>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "SpkrRight";
+ vdd-1p8-supply = <&vreg_l15b_1p8>;
+ vdd-io-supply = <&vreg_l12b_1p2>;
+ qcom,port-mapping = <4 5 6 7 11 13>;
+ };
+};
+
+&swr1 {
+ status = "okay";
+
+ /* WCD9385 RX */
+ wcd_rx: codec@0,4 {
+ compatible = "sdw20217010d00";
+ reg = <0 4>;
+ qcom,rx-port-mapping = <1 2 3 4 5>;
+ };
+};
+
+&swr2 {
+ status = "okay";
+
+ /* WCD9385 TX */
+ wcd_tx: codec@0,3 {
+ compatible = "sdw20217010d00";
+ reg = <0 3>;
+ qcom,tx-port-mapping = <2 2 3 4>;
+ };
+};
+
+&tlmm {
+ gpio-reserved-ranges = <34 2>, /* Unused */
+ <44 4>, /* SPI (TPM) */
+ <72 2>, /* Secure EC I2C connection (?) */
+ <238 1>; /* UFS Reset */
+
+ bt_en_default: bt-en-sleep {
+ pins = "gpio116";
+ function = "gpio";
+ output-low;
+ bias-disable;
+ drive-strength = <16>;
+ };
+
+ edp_reg_en: edp-reg-en-state {
+ pins = "gpio70";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ eusb3_reset_n: eusb3-reset-n-state {
+ pins = "gpio6";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
+
+ hall_int_n_default: hall-int-n-state {
+ pins = "gpio92";
+ function = "gpio";
+ bias-disable;
+ };
+
+ kybd_default: kybd-default-state {
+ pins = "gpio67";
+ function = "gpio";
+ bias-pull-up;
+ };
+
+ nvme_reg_en: nvme-reg-en-state {
+ pins = "gpio18";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ pcie4_default: pcie4-default-state {
+ clkreq-n-pins {
+ pins = "gpio147";
+ function = "pcie4_clk";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio146";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake-n-pins {
+ pins = "gpio148";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie6a_default: pcie6a-default-state {
+ clkreq-n-pins {
+ pins = "gpio153";
+ function = "pcie6a_clk";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio152";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake-n-pins {
+ pins = "gpio154";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ rtmr1_default: rtmr1-reset-n-active-state {
+ pins = "gpio176";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ tpad_default: tpad-default-state {
+ pins = "gpio3";
+ function = "gpio";
+ bias-pull-up;
+ };
+
+ ts0_default: ts0-default-state {
+ int-n-pins {
+ pins = "gpio51";
+ function = "gpio";
+ bias-pull-up;
+ };
+
+ reset-n-pins {
+ pins = "gpio48";
+ function = "gpio";
+ output-high;
+ drive-strength = <16>;
+ };
+ };
+
+ usb1_pwr_1p15_reg_en: usb1-pwr-1p15-reg-en-state {
+ pins = "gpio188";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ usb1_pwr_1p8_reg_en: usb1-pwr-1p8-reg-en-state {
+ pins = "gpio175";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ usb1_pwr_3p3_reg_en: usb1-pwr-3p3-reg-en-state {
+ pins = "gpio186";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wcd_default: wcd-reset-n-active-state {
+ pins = "gpio191";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+
+ wcn_sw_en: wcn-sw-en-state {
+ pins = "gpio214";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wcn_wlan_bt_en: wcn-wlan-bt-en-state {
+ pins = "gpio116", "gpio117";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+};
+
+&uart14 {
+ status = "okay";
+
+ bluetooth {
+ compatible = "qcom,wcn6855-bt";
+ max-speed = <3200000>;
+
+ vddrfacmn-supply = <&vreg_pmu_rfa_cmn_0p8>;
+ vddaon-supply = <&vreg_pmu_aon_0p8>;
+ vddwlcx-supply = <&vreg_pmu_wlcx_0p8>;
+ vddwlmx-supply = <&vreg_pmu_wlmx_0p8>;
+ vddbtcmx-supply = <&vreg_pmu_btcmx_0p8>;
+ vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
+ vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
+ vddrfa1p8-supply = <&vreg_pmu_rfa_1p7>;
+ };
+};
+
+&usb_1_ss0_hsphy {
+ vdd-supply = <&vreg_l3j_0p8>;
+ vdda12-supply = <&vreg_l2j_1p2>;
+
+ phys = <&smb2360_0_eusb2_repeater>;
+
+ status = "okay";
+};
+
+&usb_1_ss0_qmpphy {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l1j_0p8>;
+
+ status = "okay";
+};
+
+&usb_1_ss0 {
+ status = "okay";
+};
+
+&usb_1_ss0_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_1_ss0_dwc3_hs {
+ remote-endpoint = <&pmic_glink_ss0_hs_in>;
+};
+
+&usb_1_ss0_qmpphy_out {
+ remote-endpoint = <&retimer_ss0_ss_in>;
+};
+
+&usb_1_ss1_hsphy {
+ vdd-supply = <&vreg_l3j_0p8>;
+ vdda12-supply = <&vreg_l2j_1p2>;
+
+ phys = <&smb2360_1_eusb2_repeater>;
+
+ status = "okay";
+};
+
+&usb_1_ss1_qmpphy {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l2d_0p9>;
+
+ status = "okay";
+};
+
+&usb_1_ss1 {
+ status = "okay";
+};
+
+&usb_1_ss1_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_1_ss1_dwc3_hs {
+ remote-endpoint = <&pmic_glink_ss1_hs_in>;
+};
+
+&usb_1_ss1_qmpphy_out {
+ remote-endpoint = <&retimer_ss1_ss_in>;
+};
+
+&usb_mp {
+ status = "okay";
+};
+
+&usb_mp_dwc3 {
+ phys = <&usb_mp_hsphy0>, <&usb_mp_qmpphy0>;
+ phy-names = "usb2-0", "usb3-0";
+};
+
+&usb_mp_hsphy0 {
+ vdd-supply = <&vreg_l2e_0p8>;
+ vdda12-supply = <&vreg_l3e_1p2>;
+
+ phys = <&eusb3_repeater>;
+
+ status = "okay";
+};
+
+&usb_mp_qmpphy0 {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l3c_0p8>;
+
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts b/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
index ca5a808f2c7d..a3d53f2ba2c3 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
+++ b/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
@@ -6,6 +6,7 @@
/dts-v1/;
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/gpio-keys.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
#include "x1e80100.dtsi"
@@ -23,6 +24,21 @@
stdout-path = "serial0:115200n8";
};
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&hall_int_n_default>;
+ pinctrl-names = "default";
+
+ switch-lid {
+ gpios = <&tlmm 92 GPIO_ACTIVE_LOW>;
+ linux,input-type = <EV_SW>;
+ linux,code = <SW_LID>;
+ wakeup-source;
+ wakeup-event-action = <EV_ACT_DEASSERTED>;
+ };
+ };
+
pmic-glink {
compatible = "qcom,x1e80100-pmic-glink",
"qcom,sm8550-pmic-glink",
@@ -717,11 +733,19 @@
status = "okay";
};
+&smb2360_0 {
+ status = "okay";
+};
+
&smb2360_0_eusb2_repeater {
vdd18-supply = <&vreg_l3d_1p8>;
vdd3-supply = <&vreg_l2b_3p0>;
};
+&smb2360_1 {
+ status = "okay";
+};
+
&smb2360_1_eusb2_repeater {
vdd18-supply = <&vreg_l3d_1p8>;
vdd3-supply = <&vreg_l14b_3p0>;
@@ -811,6 +835,28 @@
bias-disable;
};
+ hall_int_n_default: hall-int-n-state {
+ lid-n-pins {
+ pins = "gpio92";
+ function = "gpio";
+ bias-disable;
+ };
+
+ /*
+ * Pins 71 and 92 seem to be bridged together (pin 71 and 92 show the same
+ * events). By default, pin 71 is set as output-high, which blocks any
+ * event on pin 92. Output-disable on pin 71 is necessary to get events on
+ * pin 92.
+ * The purpose of pin 71 is not yet known; lid-pull is a supposition.
+ */
+ lid-pull-n-pins {
+ pins = "gpio71";
+ function = "gpio";
+ bias-pull-up;
+ output-disable;
+ };
+ };
+
kybd_default: kybd-default-state {
pins = "gpio67";
function = "gpio";
@@ -908,7 +954,7 @@
};
&usb_1_ss0_qmpphy {
- vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-phy-supply = <&vreg_l2j_1p2>;
vdda-pll-supply = <&vreg_l1j_0p8>;
status = "okay";
@@ -940,7 +986,7 @@
};
&usb_1_ss1_qmpphy {
- vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-phy-supply = <&vreg_l2j_1p2>;
vdda-pll-supply = <&vreg_l2d_0p9>;
status = "okay";
@@ -972,7 +1018,7 @@
};
&usb_1_ss2_qmpphy {
- vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-phy-supply = <&vreg_l2j_1p2>;
vdda-pll-supply = <&vreg_l2d_0p9>;
status = "okay";
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi
index 6835fdeef3ae..5867953c7356 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi
+++ b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi
@@ -22,6 +22,33 @@
i2c7 = &i2c7;
};
+ wcd938x: audio-codec {
+ compatible = "qcom,wcd9385-codec";
+
+ reset-gpios = <&tlmm 191 GPIO_ACTIVE_LOW>;
+
+ qcom,micbias1-microvolt = <1800000>;
+ qcom,micbias2-microvolt = <1800000>;
+ qcom,micbias3-microvolt = <1800000>;
+ qcom,micbias4-microvolt = <1800000>;
+ qcom,mbhc-buttons-vthreshold-microvolt = <75000 150000 237000 500000 500000 500000 500000 500000>;
+ qcom,mbhc-headset-vthreshold-microvolt = <1700000>;
+ qcom,mbhc-headphone-vthreshold-microvolt = <50000>;
+ qcom,rx-device = <&wcd_rx>;
+ qcom,tx-device = <&wcd_tx>;
+
+
+ vdd-buck-supply = <&vreg_l15b>;
+ vdd-rxtx-supply = <&vreg_l15b>;
+ vdd-io-supply = <&vreg_l15b>;
+ vdd-mic-bias-supply = <&vreg_bob1>;
+
+ pinctrl-0 = <&wcd_default>;
+ pinctrl-names = "default";
+
+ #sound-dai-cells = <1>;
+ };
+
backlight: backlight {
compatible = "pwm-backlight";
pwms = <&pmk8550_pwm 0 5000000>;
@@ -97,7 +124,15 @@
reg = <1>;
pmic_glink_ss0_ss_in: endpoint {
- remote-endpoint = <&usb_1_ss0_qmpphy_out>;
+ remote-endpoint = <&retimer_ss0_ss_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_ss0_con_sbu_in: endpoint {
+ remote-endpoint = <&retimer_ss0_con_sbu_out>;
};
};
};
@@ -126,7 +161,15 @@
reg = <1>;
pmic_glink_ss1_ss_in: endpoint {
- remote-endpoint = <&usb_1_ss1_qmpphy_out>;
+ remote-endpoint = <&retimer_ss1_ss_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_ss1_con_sbu_in: endpoint {
+ remote-endpoint = <&retimer_ss1_con_sbu_out>;
};
};
};
@@ -158,6 +201,109 @@
regulator-boot-on;
};
+ vreg_rtmr0_1p15: regulator-rtmr0-1p15 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR0_1P15";
+
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+
+ gpio = <&pmc8380_5_gpios 8 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&rtmr0_1p15_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr0_1p8: regulator-rtmr0-1p8 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR0_1P8";
+
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&pm8550ve_9_gpios 8 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&rtmr0_1p8_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr0_3p3: regulator-rtmr0-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR0_3P3";
+
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&pm8550_gpios 11 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&rtmr0_3p3_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr1_1p15: regulator-rtmr1-1p15 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR1_1P15";
+
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+
+ gpio = <&tlmm 188 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&rtmr1_1p15_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr1_1p8: regulator-rtmr1-1p8 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR1_1P8";
+
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&tlmm 175 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&rtmr1_1p8_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+ vreg_rtmr1_3p3: regulator-rtmr1-3p3 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VREG_RTMR1_3P3";
+
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&tlmm 186 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-0 = <&rtmr1_3p3_reg_en>;
+ pinctrl-names = "default";
+
+ regulator-boot-on;
+ };
+
+
vreg_nvme: regulator-nvme {
compatible = "regulator-fixed";
@@ -184,6 +330,86 @@
regulator-always-on;
regulator-boot-on;
};
+
+ sound {
+ compatible = "qcom,x1e80100-sndcard";
+ model = "X1E80100-Romulus";
+ audio-routing = "SpkrLeft IN", "WSA WSA_SPK1 OUT",
+ "SpkrRight IN", "WSA WSA_SPK2 OUT",
+ "IN1_HPHL", "HPHL_OUT",
+ "IN2_HPHR", "HPHR_OUT",
+ "AMIC2", "MIC BIAS2",
+ "VA DMIC0", "MIC BIAS3",
+ "VA DMIC1", "MIC BIAS3",
+ "VA DMIC0", "VA MIC BIAS3",
+ "VA DMIC1", "VA MIC BIAS3",
+ "TX SWR_INPUT1", "ADC2_OUTPUT";
+
+ va-dai-link {
+ link-name = "VA Capture";
+
+ cpu {
+ sound-dai = <&q6apmbedai VA_CODEC_DMA_TX_0>;
+ };
+
+ codec {
+ sound-dai = <&lpass_vamacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ wcd-capture-dai-link {
+ link-name = "WCD Capture";
+
+ cpu {
+ sound-dai = <&q6apmbedai TX_CODEC_DMA_TX_3>;
+ };
+
+ codec {
+ sound-dai = <&wcd938x 1>, <&swr2 1>, <&lpass_txmacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ wcd-playback-dai-link {
+ link-name = "WCD Playback";
+
+ cpu {
+ sound-dai = <&q6apmbedai RX_CODEC_DMA_RX_0>;
+ };
+
+ codec {
+ sound-dai = <&wcd938x 0>, <&swr1 0>, <&lpass_rxmacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+
+ wsa-dai-link {
+ link-name = "WSA Playback";
+
+ cpu {
+ sound-dai = <&q6apmbedai WSA_CODEC_DMA_RX_0>;
+ };
+
+ codec {
+ sound-dai = <&left_spkr>, <&right_spkr>,
+ <&swr0 0>, <&lpass_wsamacro 0>;
+ };
+
+ platform {
+ sound-dai = <&q6apm>;
+ };
+ };
+ };
};
&apps_rsc {
@@ -558,7 +784,59 @@
status = "okay";
- /* PS8830 USB retimer @8 */
+ /* Left-side rear port */
+ typec-mux@8 {
+ compatible = "parade,ps8830";
+ reg = <0x8>;
+
+ reset-gpios = <&pm8550_gpios 10 GPIO_ACTIVE_LOW>;
+
+ clocks = <&rpmhcc RPMH_RF_CLK3>;
+ clock-names = "xo";
+
+ vdd-supply = <&vreg_rtmr0_1p15>;
+ vdd33-supply = <&vreg_rtmr0_3p3>;
+ vdd33-cap-supply = <&vreg_rtmr0_3p3>;
+ vddar-supply = <&vreg_rtmr0_1p15>;
+ vddat-supply = <&vreg_rtmr0_1p15>;
+ vddio-supply = <&vreg_rtmr0_1p8>;
+
+ pinctrl-0 = <&rtmr0_default>;
+ pinctrl-names = "default";
+
+ retimer-switch;
+ orientation-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ retimer_ss0_ss_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss0_ss_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ retimer_ss0_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss0_qmpphy_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ retimer_ss0_con_sbu_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss0_con_sbu_in>;
+ };
+ };
+ };
+ };
+
};
&i2c4 {
@@ -592,7 +870,74 @@
status = "okay";
- /* PS8830 USB retimer @8 */
+ /* Left-side front port */
+ typec-mux@8 {
+ compatible = "parade,ps8830";
+ reg = <0x8>;
+
+ reset-gpios = <&tlmm 176 GPIO_ACTIVE_LOW>;
+
+ clocks = <&rpmhcc RPMH_RF_CLK4>;
+ clock-names = "xo";
+
+ vdd-supply = <&vreg_rtmr1_1p15>;
+ vdd33-supply = <&vreg_rtmr1_3p3>;
+ vdd33-cap-supply = <&vreg_rtmr1_3p3>;
+ vddar-supply = <&vreg_rtmr1_1p15>;
+ vddat-supply = <&vreg_rtmr1_1p15>;
+ vddio-supply = <&vreg_rtmr1_1p8>;
+
+ retimer-switch;
+ orientation-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ retimer_ss1_ss_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss1_ss_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ retimer_ss1_ss_in: endpoint {
+ remote-endpoint = <&usb_1_ss1_qmpphy_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ retimer_ss1_con_sbu_out: endpoint {
+ remote-endpoint = <&pmic_glink_ss1_con_sbu_in>;
+ };
+ };
+ };
+ };
+};
+
+&lpass_tlmm {
+ spkr_01_sd_n_active: spkr-01-sd-n-active-state {
+ pins = "gpio12";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+};
+
+&lpass_vamacro {
+ qcom,dmic-sample-rate = <4800000>;
+
+ vdd-micb-supply = <&vreg_l1b>;
+
+ pinctrl-0 = <&dmic01_default>;
+ pinctrl-names = "default";
};
&mdss {
@@ -641,6 +986,25 @@
status = "okay";
};
+&pcie3 {
+ perst-gpios = <&tlmm 143 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 145 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-0 = <&pcie3_default>;
+ pinctrl-names = "default";
+
+ /* The RTS5261 chip on the other side only does Gen1x1 anyway */
+ max-link-speed = <1>;
+ status = "okay";
+};
+
+&pcie3_phy {
+ vdda-phy-supply = <&vreg_l3c>;
+ vdda-pll-supply = <&vreg_l3e>;
+
+ status = "okay";
+};
+
&pcie4 {
status = "okay";
};
@@ -671,6 +1035,28 @@
status = "okay";
};
+&pm8550_gpios {
+ rtmr0_default: rtmr0-reset-n-active-state {
+ pins = "gpio10";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ };
+
+ rtmr0_3p3_reg_en: rtmr0-3p3-reg-en-state {
+ pins = "gpio11";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ };
+};
+
+&pm8550ve_9_gpios {
+ rtmr0_1p8_reg_en: rtmr0-1p8-reg-en-state {
+ pins = "gpio8";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ };
+};
+
&pmc8380_3_gpios {
edp_bl_en: edp-bl-en-state {
pins = "gpio4";
@@ -681,6 +1067,14 @@
};
};
+&pmc8380_5_gpios {
+ rtmr0_1p15_reg_en: rtmr0-1p15-reg-en-state {
+ pins = "gpio8";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ };
+};
+
&pmk8550_pwm {
status = "okay";
};
@@ -706,23 +1100,31 @@
&remoteproc_adsp {
firmware-name = "qcom/x1e80100/microsoft/Romulus/qcadsp8380.mbn",
- "qcom/x1e80100/microsoft/Romulus/adsp_dtb.mbn";
+ "qcom/x1e80100/microsoft/Romulus/adsp_dtbs.elf";
status = "okay";
};
&remoteproc_cdsp {
firmware-name = "qcom/x1e80100/microsoft/Romulus/qccdsp8380.mbn",
- "qcom/x1e80100/microsoft/Romulus/cdsp_dtb.mbn";
+ "qcom/x1e80100/microsoft/Romulus/cdsp_dtbs.elf";
status = "okay";
};
+&smb2360_0 {
+ status = "okay";
+};
+
&smb2360_0_eusb2_repeater {
vdd18-supply = <&vreg_l3d>;
vdd3-supply = <&vreg_l2b>;
};
+&smb2360_1 {
+ status = "okay";
+};
+
&smb2360_1_eusb2_repeater {
vdd18-supply = <&vreg_l3d>;
vdd3-supply = <&vreg_l14b>;
@@ -737,6 +1139,59 @@
vdd3-supply = <&vreg_l8b>;
};
+&swr0 {
+ pinctrl-0 = <&wsa_swr_active>, <&spkr_01_sd_n_active>;
+ pinctrl-names = "default";
+
+ status = "okay";
+
+ /* WSA8845, Left speaker */
+ left_spkr: speaker@0,0 {
+ compatible = "sdw20217020400";
+ reg = <0 0>;
+ reset-gpios = <&lpass_tlmm 12 GPIO_ACTIVE_LOW>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "SpkrLeft";
+ vdd-1p8-supply = <&vreg_l15b>;
+ vdd-io-supply = <&vreg_l12b>;
+ qcom,port-mapping = <1 2 3 7 10 13>;
+ };
+
+ /* WSA8845, Right speaker */
+ right_spkr: speaker@0,1 {
+ compatible = "sdw20217020400";
+ reg = <0 1>;
+ reset-gpios = <&lpass_tlmm 12 GPIO_ACTIVE_LOW>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "SpkrRight";
+ vdd-1p8-supply = <&vreg_l15b>;
+ vdd-io-supply = <&vreg_l12b>;
+ qcom,port-mapping = <4 5 6 7 11 13>;
+ };
+};
+
+&swr1 {
+ status = "okay";
+
+ /* WCD9385 RX */
+ wcd_rx: codec@0,4 {
+ compatible = "sdw20217010d00";
+ reg = <0 4>;
+ qcom,rx-port-mapping = <1 2 3 4 5>;
+ };
+};
+
+&swr2 {
+ status = "okay";
+
+ /* WCD9385 TX */
+ wcd_tx: codec@0,3 {
+ compatible = "sdw20217010d00";
+ reg = <0 3>;
+ qcom,tx-port-mapping = <2 2 3 4>;
+ };
+};
+
&tlmm {
gpio-reserved-ranges = <44 4>, /* SPI (TPM) */
<238 1>; /* UFS Reset */
@@ -767,6 +1222,29 @@
bias-disable;
};
+ pcie3_default: pcie3-default-state {
+ perst-n-pins {
+ pins = "gpio143";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ clkreq-n-pins {
+ pins = "gpio144";
+ function = "pcie3_clk";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ wake-n-pins {
+ pins = "gpio145";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
pcie6a_default: pcie6a-default-state {
perst-n-pins {
pins = "gpio152";
@@ -790,6 +1268,35 @@
};
};
+ rtmr1_1p8_reg_en: rtmr1-1p8-reg-en-state {
+ pins = "gpio175";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rtmr1_3p3_reg_en: rtmr1-3p3-reg-en-state {
+ pins = "gpio186";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rtmr1_1p15_reg_en: rtmr1-1p15-reg-en-state {
+ pins = "gpio188";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wcd_default: wcd-reset-n-active-state {
+ pins = "gpio191";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+
cam_indicator_en: cam-indicator-en-state {
pins = "gpio225";
function = "gpio";
@@ -823,7 +1330,7 @@
};
&usb_1_ss0_qmpphy {
- vdda-phy-supply = <&vreg_l3e>;
+ vdda-phy-supply = <&vreg_l2j>;
vdda-pll-supply = <&vreg_l1j>;
status = "okay";
@@ -842,7 +1349,7 @@
};
&usb_1_ss0_qmpphy_out {
- remote-endpoint = <&pmic_glink_ss0_ss_in>;
+ remote-endpoint = <&retimer_ss0_ss_in>;
};
&usb_1_ss1_hsphy {
@@ -855,7 +1362,7 @@
};
&usb_1_ss1_qmpphy {
- vdda-phy-supply = <&vreg_l3e>;
+ vdda-phy-supply = <&vreg_l2j>;
vdda-pll-supply = <&vreg_l2d>;
status = "okay";
@@ -874,7 +1381,7 @@
};
&usb_1_ss1_qmpphy_out {
- remote-endpoint = <&pmic_glink_ss1_ss_in>;
+ remote-endpoint = <&retimer_ss1_ss_in>;
};
/* MP0 goes to the Surface Connector, MP1 goes to the USB-A port */
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi b/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi
index 5b54ee79f048..d7a2a2b8fc6c 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi
+++ b/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi
@@ -491,6 +491,8 @@
#address-cells = <1>;
#size-cells = <0>;
+ status = "disabled";
+
smb2360_0_eusb2_repeater: phy@fd00 {
compatible = "qcom,smb2360-eusb2-repeater";
reg = <0xfd00>;
@@ -504,6 +506,8 @@
#address-cells = <1>;
#size-cells = <0>;
+ status = "disabled";
+
smb2360_1_eusb2_repeater: phy@fd00 {
compatible = "qcom,smb2360-eusb2-repeater";
reg = <0xfd00>;
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
index 5ef030c60abe..ec594628304a 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
+++ b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
@@ -84,6 +84,14 @@
remote-endpoint = <&usb_1_ss0_qmpphy_out>;
};
};
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_ss0_sbu: endpoint {
+ remote-endpoint = <&usb_1_ss0_sbu_mux>;
+ };
+ };
};
};
@@ -112,6 +120,14 @@
remote-endpoint = <&usb_1_ss1_qmpphy_out>;
};
};
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_ss1_sbu: endpoint {
+ remote-endpoint = <&usb_1_ss1_sbu_mux>;
+ };
+ };
};
};
@@ -140,6 +156,14 @@
remote-endpoint = <&usb_1_ss2_qmpphy_out>;
};
};
+
+ port@2 {
+ reg = <2>;
+
+ pmic_glink_ss2_sbu: endpoint {
+ remote-endpoint = <&usb_1_ss2_sbu_mux>;
+ };
+ };
};
};
};
@@ -256,6 +280,63 @@
regulator-boot-on;
};
+
+ usb-1-ss0-sbu-mux {
+ compatible = "onnn,fsusb42", "gpio-sbu-mux";
+
+ enable-gpios = <&tlmm 168 GPIO_ACTIVE_LOW>;
+ select-gpios = <&tlmm 167 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-0 = <&usb_1_ss0_sbu_default>;
+ pinctrl-names = "default";
+
+ mode-switch;
+ orientation-switch;
+
+ port {
+ usb_1_ss0_sbu_mux: endpoint {
+ remote-endpoint = <&pmic_glink_ss0_sbu>;
+ };
+ };
+ };
+
+ usb-1-ss1-sbu-mux {
+ compatible = "onnn,fsusb42", "gpio-sbu-mux";
+
+ enable-gpios = <&tlmm 179 GPIO_ACTIVE_LOW>;
+ select-gpios = <&tlmm 178 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-0 = <&usb_1_ss1_sbu_default>;
+ pinctrl-names = "default";
+
+ mode-switch;
+ orientation-switch;
+
+ port {
+ usb_1_ss1_sbu_mux: endpoint {
+ remote-endpoint = <&pmic_glink_ss1_sbu>;
+ };
+ };
+ };
+
+ usb-1-ss2-sbu-mux {
+ compatible = "onnn,fsusb42", "gpio-sbu-mux";
+
+ enable-gpios = <&tlmm 171 GPIO_ACTIVE_LOW>;
+ select-gpios = <&tlmm 170 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-0 = <&usb_1_ss2_sbu_default>;
+ pinctrl-names = "default";
+
+ mode-switch;
+ orientation-switch;
+
+ port {
+ usb_1_ss2_sbu_mux: endpoint {
+ remote-endpoint = <&pmic_glink_ss2_sbu>;
+ };
+ };
+ };
};
&apps_rsc {
@@ -616,6 +697,40 @@
};
};
+&i2c5 {
+ clock-frequency = <400000>;
+
+ status = "okay";
+
+ eusb3_repeater: redriver@47 {
+ compatible = "nxp,ptn3222";
+ reg = <0x47>;
+ #phy-cells = <0>;
+
+ vdd3v3-supply = <&vreg_l13b_3p0>;
+ vdd1v8-supply = <&vreg_l4b_1p8>;
+
+ reset-gpios = <&tlmm 6 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&eusb3_reset_n>;
+ pinctrl-names = "default";
+ };
+
+ eusb6_repeater: redriver@4f {
+ compatible = "nxp,ptn3222";
+ reg = <0x4f>;
+ #phy-cells = <0>;
+
+ vdd3v3-supply = <&vreg_l13b_3p0>;
+ vdd1v8-supply = <&vreg_l4b_1p8>;
+
+ reset-gpios = <&tlmm 184 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&eusb6_reset_n>;
+ pinctrl-names = "default";
+ };
+};
+
&lpass_tlmm {
spkr_01_sd_n_active: spkr-01-sd-n-active-state {
pins = "gpio12";
@@ -630,6 +745,30 @@
status = "okay";
};
+&mdss_dp0 {
+ status = "okay";
+};
+
+&mdss_dp0_out {
+ data-lanes = <0 1>;
+};
+
+&mdss_dp1 {
+ status = "okay";
+};
+
+&mdss_dp1_out {
+ data-lanes = <0 1>;
+};
+
+&mdss_dp2 {
+ status = "okay";
+};
+
+&mdss_dp2_out {
+ data-lanes = <0 1>;
+};
+
&mdss_dp3 {
compatible = "qcom,x1e80100-dp";
/delete-property/ #sound-dai-cells;
@@ -731,11 +870,32 @@
status = "okay";
};
+&smb2360_0 {
+ status = "okay";
+};
+
+&sdhc_2 {
+ cd-gpios = <&tlmm 71 GPIO_ACTIVE_LOW>;
+ pinctrl-0 = <&sdc2_default &sdc2_card_det_n>;
+ pinctrl-1 = <&sdc2_sleep &sdc2_card_det_n>;
+ pinctrl-names = "default", "sleep";
+ vmmc-supply = <&vreg_l9b_2p9>;
+ vqmmc-supply = <&vreg_l6b_1p8>;
+ bus-width = <4>;
+ no-sdio;
+ no-mmc;
+ status = "okay";
+};
+
&smb2360_0_eusb2_repeater {
vdd18-supply = <&vreg_l3d_1p8>;
vdd3-supply = <&vreg_l2b_3p0>;
};
+&smb2360_1 {
+ status = "okay";
+};
+
&smb2360_1_eusb2_repeater {
vdd18-supply = <&vreg_l3d_1p8>;
vdd3-supply = <&vreg_l14b_3p0>;
@@ -819,6 +979,22 @@
bias-disable;
};
+ eusb3_reset_n: eusb3-reset-n-state {
+ pins = "gpio6";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
+
+ eusb6_reset_n: eusb6-reset-n-state {
+ pins = "gpio184";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
+
nvme_reg_en: nvme-reg-en-state {
pins = "gpio18";
function = "gpio";
@@ -872,6 +1048,86 @@
};
};
+ sdc2_card_det_n: sdc2-card-det-state {
+ pins = "gpio71";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ usb_1_ss0_sbu_default: usb-1-ss0-sbu-state {
+ mode-pins {
+ pins = "gpio166";
+ function = "gpio";
+ bias-disable;
+ drive-strength = <2>;
+ output-high;
+ };
+
+ oe-n-pins {
+ pins = "gpio168";
+ function = "gpio";
+ bias-disable;
+ drive-strength = <2>;
+ };
+
+ sel-pins {
+ pins = "gpio167";
+ function = "gpio";
+ bias-disable;
+ drive-strength = <2>;
+ };
+
+ };
+
+ usb_1_ss1_sbu_default: usb-1-ss1-sbu-state {
+ mode-pins {
+ pins = "gpio177";
+ function = "gpio";
+ bias-disable;
+ drive-strength = <2>;
+ output-high;
+ };
+
+ oe-n-pins {
+ pins = "gpio179";
+ function = "gpio";
+ bias-disable;
+ drive-strength = <2>;
+ };
+
+ sel-pins {
+ pins = "gpio178";
+ function = "gpio";
+ bias-disable;
+ drive-strength = <2>;
+ };
+ };
+
+ usb_1_ss2_sbu_default: usb-1-ss2-sbu-state {
+ mode-pins {
+ pins = "gpio169";
+ function = "gpio";
+ bias-disable;
+ drive-strength = <2>;
+ output-high;
+ };
+
+ oe-n-pins {
+ pins = "gpio171";
+ function = "gpio";
+ bias-disable;
+ drive-strength = <2>;
+ };
+
+ sel-pins {
+ pins = "gpio170";
+ function = "gpio";
+ bias-disable;
+ drive-strength = <2>;
+ };
+ };
+
wcd_default: wcd-reset-n-active-state {
pins = "gpio191";
function = "gpio";
@@ -896,7 +1152,7 @@
};
&usb_1_ss0_qmpphy {
- vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-phy-supply = <&vreg_l2j_1p2>;
vdda-pll-supply = <&vreg_l1j_0p8>;
status = "okay";
@@ -928,7 +1184,7 @@
};
&usb_1_ss1_qmpphy {
- vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-phy-supply = <&vreg_l2j_1p2>;
vdda-pll-supply = <&vreg_l2d_0p9>;
status = "okay";
@@ -960,7 +1216,7 @@
};
&usb_1_ss2_qmpphy {
- vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-phy-supply = <&vreg_l2j_1p2>;
vdda-pll-supply = <&vreg_l2d_0p9>;
status = "okay";
@@ -981,3 +1237,39 @@
&usb_1_ss2_qmpphy_out {
remote-endpoint = <&pmic_glink_ss2_ss_in>;
};
+
+&usb_mp {
+ status = "okay";
+};
+
+&usb_mp_hsphy0 {
+ vdd-supply = <&vreg_l2e_0p8>;
+ vdda12-supply = <&vreg_l3e_1p2>;
+
+ phys = <&eusb6_repeater>;
+
+ status = "okay";
+};
+
+&usb_mp_hsphy1 {
+ vdd-supply = <&vreg_l2e_0p8>;
+ vdda12-supply = <&vreg_l3e_1p2>;
+
+ phys = <&eusb3_repeater>;
+
+ status = "okay";
+};
+
+&usb_mp_qmpphy0 {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l3c_0p8>;
+
+ status = "okay";
+};
+
+&usb_mp_qmpphy1 {
+ vdda-phy-supply = <&vreg_l3e_1p2>;
+ vdda-pll-supply = <&vreg_l3c_0p8>;
+
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/x1e80100.dtsi b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
index 7e4f46ad8edd..4936fa5b98ff 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+++ b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
@@ -38,7 +38,7 @@
sleep_clk: sleep-clk {
compatible = "fixed-clock";
- clock-frequency = <32000>;
+ clock-frequency = <32764>;
#clock-cells = <0>;
};
@@ -303,6 +303,18 @@
};
};
+ dummy-sink {
+ compatible = "arm,coresight-dummy-sink";
+
+ in-ports {
+ port {
+ eud_in: endpoint {
+ remote-endpoint = <&swao_rep_out1>;
+ };
+ };
+ };
+ };
+
firmware {
scm: scm {
compatible = "qcom,scm-x1e80100", "qcom,scm";
@@ -677,6 +689,34 @@
};
};
+ qup_opp_table_100mhz: opp-table-qup100mhz {
+ compatible = "operating-points-v2";
+
+ opp-75000000 {
+ opp-hz = /bits/ 64 <75000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-100000000 {
+ opp-hz = /bits/ 64 <100000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+ };
+
+ qup_opp_table_120mhz: opp-table-qup120mhz {
+ compatible = "operating-points-v2";
+
+ opp-75000000 {
+ opp-hz = /bits/ 64 <75000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-120000000 {
+ opp-hz = /bits/ 64 <120000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+ };
+
smp2p-adsp {
compatible = "qcom,smp2p";
@@ -743,7 +783,7 @@
clocks = <&bi_tcxo_div2>,
<&sleep_clk>,
- <0>,
+ <&pcie3_phy>,
<&pcie4_phy>,
<&pcie5_phy>,
<&pcie6a_phy>,
@@ -831,6 +871,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+
dmas = <&gpi_dma2 0 0 QCOM_GPI_I2C>,
<&gpi_dma2 1 0 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -864,6 +907,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_120mhz>;
+
dmas = <&gpi_dma2 0 0 QCOM_GPI_SPI>,
<&gpi_dma2 1 0 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -897,6 +943,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+
dmas = <&gpi_dma2 0 1 QCOM_GPI_I2C>,
<&gpi_dma2 1 1 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -930,6 +979,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_120mhz>;
+
dmas = <&gpi_dma2 0 1 QCOM_GPI_SPI>,
<&gpi_dma2 1 1 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -963,6 +1015,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+
dmas = <&gpi_dma2 0 2 QCOM_GPI_I2C>,
<&gpi_dma2 1 2 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -996,6 +1051,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma2 0 2 QCOM_GPI_SPI>,
<&gpi_dma2 1 2 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -1029,6 +1087,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+
dmas = <&gpi_dma2 0 3 QCOM_GPI_I2C>,
<&gpi_dma2 1 3 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -1062,6 +1123,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma2 0 3 QCOM_GPI_SPI>,
<&gpi_dma2 1 3 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -1095,6 +1159,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+
dmas = <&gpi_dma2 0 4 QCOM_GPI_I2C>,
<&gpi_dma2 1 4 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -1128,6 +1195,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma2 0 4 QCOM_GPI_SPI>,
<&gpi_dma2 1 4 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -1161,6 +1231,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+
dmas = <&gpi_dma2 0 5 QCOM_GPI_I2C>,
<&gpi_dma2 1 5 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -1194,6 +1267,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma2 0 5 QCOM_GPI_SPI>,
<&gpi_dma2 1 5 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -1224,6 +1300,9 @@
interconnect-names = "qup-core",
"qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
pinctrl-0 = <&qup_uart21_default>;
pinctrl-names = "default";
@@ -1249,6 +1328,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+
dmas = <&gpi_dma2 0 6 QCOM_GPI_I2C>,
<&gpi_dma2 1 6 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -1282,6 +1364,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma2 0 6 QCOM_GPI_SPI>,
<&gpi_dma2 1 6 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -1315,6 +1400,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+
dmas = <&gpi_dma2 0 7 QCOM_GPI_I2C>,
<&gpi_dma2 1 7 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -1348,6 +1436,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma2 0 7 QCOM_GPI_SPI>,
<&gpi_dma2 1 7 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -1425,6 +1516,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+
dmas = <&gpi_dma1 0 0 QCOM_GPI_I2C>,
<&gpi_dma1 1 0 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -1458,6 +1552,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_120mhz>;
+
dmas = <&gpi_dma1 0 0 QCOM_GPI_SPI>,
<&gpi_dma1 1 0 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -1491,6 +1588,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+
dmas = <&gpi_dma1 0 1 QCOM_GPI_I2C>,
<&gpi_dma1 1 1 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -1524,6 +1624,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_120mhz>;
+
dmas = <&gpi_dma1 0 1 QCOM_GPI_SPI>,
<&gpi_dma1 1 1 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -1557,6 +1660,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+
dmas = <&gpi_dma1 0 2 QCOM_GPI_I2C>,
<&gpi_dma1 1 2 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -1590,6 +1696,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma1 0 2 QCOM_GPI_SPI>,
<&gpi_dma1 1 2 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -1623,6 +1732,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+
dmas = <&gpi_dma1 0 3 QCOM_GPI_I2C>,
<&gpi_dma1 1 3 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -1656,6 +1768,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma1 0 3 QCOM_GPI_SPI>,
<&gpi_dma1 1 3 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -1689,6 +1804,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+
dmas = <&gpi_dma1 0 4 QCOM_GPI_I2C>,
<&gpi_dma1 1 4 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -1722,6 +1840,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma1 0 4 QCOM_GPI_SPI>,
<&gpi_dma1 1 4 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -1755,6 +1876,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+
dmas = <&gpi_dma1 0 5 QCOM_GPI_I2C>,
<&gpi_dma1 1 5 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -1788,6 +1912,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma1 0 5 QCOM_GPI_SPI>,
<&gpi_dma1 1 5 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -1821,6 +1948,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+
dmas = <&gpi_dma1 0 6 QCOM_GPI_I2C>,
<&gpi_dma1 1 6 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -1854,6 +1984,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma1 0 6 QCOM_GPI_SPI>,
<&gpi_dma1 1 6 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -1868,6 +2001,31 @@
status = "disabled";
};
+ uart14: serial@a98000 {
+ compatible = "qcom,geni-uart";
+ reg = <0 0x00a98000 0 0x4000>;
+
+ interrupts = <GIC_SPI 806 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_QUPV3_WRAP1_S6_CLK>;
+ clock-names = "se";
+
+ interconnects = <&clk_virt MASTER_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
+
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
+ pinctrl-0 = <&qup_uart14_default>;
+ pinctrl-names = "default";
+
+ status = "disabled";
+ };
+
i2c15: i2c@a9c000 {
compatible = "qcom,geni-i2c";
reg = <0 0x00a9c000 0 0x4000>;
@@ -1887,6 +2045,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+
dmas = <&gpi_dma1 0 7 QCOM_GPI_I2C>,
<&gpi_dma1 1 7 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -1920,6 +2081,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma1 0 7 QCOM_GPI_SPI>,
<&gpi_dma1 1 7 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -1996,6 +2160,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+
dmas = <&gpi_dma0 0 0 QCOM_GPI_I2C>,
<&gpi_dma0 1 0 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -2029,6 +2196,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_120mhz>;
+
dmas = <&gpi_dma0 0 0 QCOM_GPI_SPI>,
<&gpi_dma0 1 0 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -2062,6 +2232,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+
dmas = <&gpi_dma0 0 1 QCOM_GPI_I2C>,
<&gpi_dma0 1 1 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -2095,6 +2268,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_120mhz>;
+
dmas = <&gpi_dma0 0 1 QCOM_GPI_SPI>,
<&gpi_dma0 1 1 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -2128,6 +2304,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+
dmas = <&gpi_dma0 0 2 QCOM_GPI_I2C>,
<&gpi_dma0 1 2 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -2158,6 +2337,9 @@
interconnect-names = "qup-core",
"qup-config";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
pinctrl-0 = <&qup_uart2_default>;
pinctrl-names = "default";
@@ -2183,6 +2365,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma0 0 2 QCOM_GPI_SPI>,
<&gpi_dma0 1 2 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -2216,6 +2401,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+
dmas = <&gpi_dma0 0 3 QCOM_GPI_I2C>,
<&gpi_dma0 1 3 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -2249,6 +2437,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma0 0 3 QCOM_GPI_SPI>,
<&gpi_dma0 1 3 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -2282,6 +2473,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+
dmas = <&gpi_dma0 0 4 QCOM_GPI_I2C>,
<&gpi_dma0 1 4 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -2315,6 +2509,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma0 0 4 QCOM_GPI_SPI>,
<&gpi_dma0 1 4 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -2348,6 +2545,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+
dmas = <&gpi_dma0 0 5 QCOM_GPI_I2C>,
<&gpi_dma0 1 5 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -2381,6 +2581,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma0 0 5 QCOM_GPI_SPI>,
<&gpi_dma0 1 5 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -2414,6 +2617,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+
dmas = <&gpi_dma0 0 6 QCOM_GPI_I2C>,
<&gpi_dma0 1 6 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -2447,6 +2653,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma0 0 6 QCOM_GPI_SPI>,
<&gpi_dma0 1 6 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -2480,6 +2689,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+
dmas = <&gpi_dma0 0 7 QCOM_GPI_I2C>,
<&gpi_dma0 1 7 QCOM_GPI_I2C>;
dma-names = "tx",
@@ -2513,6 +2725,9 @@
"qup-config",
"qup-memory";
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&qup_opp_table_100mhz>;
+
dmas = <&gpi_dma0 0 7 QCOM_GPI_SPI>,
<&gpi_dma0 1 7 QCOM_GPI_SPI>;
dma-names = "tx",
@@ -2906,6 +3121,208 @@
#interconnect-cells = <2>;
};
+ pcie3: pcie@1bd0000 {
+ device_type = "pci";
+ compatible = "qcom,pcie-x1e80100";
+ reg = <0x0 0x01bd0000 0x0 0x3000>,
+ <0x0 0x78000000 0x0 0xf1d>,
+ <0x0 0x78000f40 0x0 0xa8>,
+ <0x0 0x78001000 0x0 0x1000>,
+ <0x0 0x78100000 0x0 0x100000>,
+ <0x0 0x01bd3000 0x0 0x1000>;
+ reg-names = "parf",
+ "dbi",
+ "elbi",
+ "atu",
+ "config",
+ "mhi";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x00000000 0x0 0x78200000 0x0 0x100000>,
+ <0x02000000 0x0 0x78300000 0x0 0x78300000 0x0 0x3d00000>,
+ <0x03000000 0x7 0x40000000 0x7 0x40000000 0x0 0x40000000>;
+ bus-range = <0x00 0xff>;
+
+ dma-coherent;
+
+ linux,pci-domain = <3>;
+ num-lanes = <8>;
+
+ interrupts = <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 769 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 836 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 671 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi0",
+ "msi1",
+ "msi2",
+ "msi3",
+ "msi4",
+ "msi5",
+ "msi6",
+ "msi7",
+ "global";
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 0 GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &intc 0 0 GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &intc 0 0 GIC_SPI 237 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &intc 0 0 GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_PCIE_3_AUX_CLK>,
+ <&gcc GCC_PCIE_3_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_3_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_3_SLV_AXI_CLK>,
+ <&gcc GCC_PCIE_3_SLV_Q2A_AXI_CLK>,
+ <&gcc GCC_CFG_NOC_PCIE_ANOC_NORTH_AHB_CLK>,
+ <&gcc GCC_CNOC_PCIE_NORTH_SF_AXI_CLK>;
+ clock-names = "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave",
+ "slave_q2a",
+ "noc_aggr",
+ "cnoc_sf_axi";
+
+ assigned-clocks = <&gcc GCC_PCIE_3_AUX_CLK>;
+ assigned-clock-rates = <19200000>;
+
+ interconnects = <&pcie_north_anoc MASTER_PCIE_3 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &cnoc_main SLAVE_PCIE_3 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "pcie-mem",
+ "cpu-pcie";
+
+ resets = <&gcc GCC_PCIE_3_BCR>,
+ <&gcc GCC_PCIE_3_LINK_DOWN_BCR>;
+ reset-names = "pci",
+ "link_down";
+
+ power-domains = <&gcc GCC_PCIE_3_GDSC>;
+
+ phys = <&pcie3_phy>;
+ phy-names = "pciephy";
+
+ operating-points-v2 = <&pcie3_opp_table>;
+
+ status = "disabled";
+
+ pcie3_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ /* GEN 1 x1 */
+ opp-2500000 {
+ opp-hz = /bits/ 64 <2500000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ opp-peak-kBps = <250000 1>;
+ };
+
+ /* GEN 1 x2 and GEN 2 x1 */
+ opp-5000000 {
+ opp-hz = /bits/ 64 <5000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ opp-peak-kBps = <500000 1>;
+ };
+
+ /* GEN 1 x4 and GEN 2 x2 */
+ opp-10000000 {
+ opp-hz = /bits/ 64 <10000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ opp-peak-kBps = <1000000 1>;
+ };
+
+ /* GEN 1 x8 and GEN 2 x4 */
+ opp-20000000 {
+ opp-hz = /bits/ 64 <20000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ opp-peak-kBps = <2000000 1>;
+ };
+
+ /* GEN 2 x8 */
+ opp-40000000 {
+ opp-hz = /bits/ 64 <40000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ opp-peak-kBps = <4000000 1>;
+ };
+
+ /* GEN 3 x1 */
+ opp-8000000 {
+ opp-hz = /bits/ 64 <8000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ opp-peak-kBps = <984500 1>;
+ };
+
+ /* GEN 3 x2 and GEN 4 x1 */
+ opp-16000000 {
+ opp-hz = /bits/ 64 <16000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ opp-peak-kBps = <1969000 1>;
+ };
+
+ /* GEN 3 x4 and GEN 4 x2 */
+ opp-32000000 {
+ opp-hz = /bits/ 64 <32000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ opp-peak-kBps = <3938000 1>;
+ };
+
+ /* GEN 3 x8 and GEN 4 x4 */
+ opp-64000000 {
+ opp-hz = /bits/ 64 <64000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ opp-peak-kBps = <7876000 1>;
+ };
+
+ /* GEN 4 x8 */
+ opp-128000000 {
+ opp-hz = /bits/ 64 <128000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ opp-peak-kBps = <15753000 1>;
+ };
+ };
+ };
+
+ pcie3_phy: phy@1be0000 {
+ compatible = "qcom,x1e80100-qmp-gen4x8-pcie-phy";
+ reg = <0 0x01be0000 0 0x10000>;
+
+ clocks = <&gcc GCC_PCIE_3_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_3_CFG_AHB_CLK>,
+ <&tcsr TCSR_PCIE_8L_CLKREF_EN>,
+ <&gcc GCC_PCIE_3_PHY_RCHNG_CLK>,
+ <&gcc GCC_PCIE_3_PIPE_CLK>,
+ <&gcc GCC_PCIE_3_PIPEDIV2_CLK>;
+ clock-names = "aux",
+ "cfg_ahb",
+ "ref",
+ "rchng",
+ "pipe",
+ "pipediv2";
+
+ resets = <&gcc GCC_PCIE_3_PHY_BCR>,
+ <&gcc GCC_PCIE_3_NOCSR_COM_PHY_BCR>;
+ reset-names = "phy",
+ "phy_nocsr";
+
+ assigned-clocks = <&gcc GCC_PCIE_3_PHY_RCHNG_CLK>;
+ assigned-clock-rates = <100000000>;
+
+ power-domains = <&gcc GCC_PCIE_3_PHY_GDSC>;
+
+ #clock-cells = <0>;
+ clock-output-names = "pcie3_pipe_clk";
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
pcie6a: pci@1bf8000 {
device_type = "pci";
compatible = "qcom,pcie-x1e80100";
@@ -3518,6 +3935,143 @@
#interconnect-cells = <2>;
};
+ remoteproc_adsp: remoteproc@6800000 {
+ compatible = "qcom,x1e80100-adsp-pas";
+ reg = <0x0 0x06800000 0x0 0x10000>;
+
+ interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog",
+ "fatal",
+ "ready",
+ "handover",
+ "stop-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ power-domains = <&rpmhpd RPMHPD_LCX>,
+ <&rpmhpd RPMHPD_LMX>;
+ power-domain-names = "lcx",
+ "lmx";
+
+ interconnects = <&lpass_lpicx_noc MASTER_LPASS_PROC QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+
+ memory-region = <&adspslpi_mem>,
+ <&q6_adsp_dtb_mem>;
+
+ qcom,qmp = <&aoss_qmp>;
+
+ qcom,smem-states = <&smp2p_adsp_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ label = "lpass";
+ qcom,remote-pid = <2>;
+
+ fastrpc {
+ compatible = "qcom,fastrpc";
+ qcom,glink-channels = "fastrpcglink-apps-dsp";
+ label = "adsp";
+ qcom,non-secure-domain;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compute-cb@3 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <3>;
+ iommus = <&apps_smmu 0x1003 0x80>,
+ <&apps_smmu 0x1063 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@4 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <4>;
+ iommus = <&apps_smmu 0x1004 0x80>,
+ <&apps_smmu 0x1064 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@5 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <5>;
+ iommus = <&apps_smmu 0x1005 0x80>,
+ <&apps_smmu 0x1065 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@6 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <6>;
+ iommus = <&apps_smmu 0x1006 0x80>,
+ <&apps_smmu 0x1066 0x0>;
+ dma-coherent;
+ };
+
+ compute-cb@7 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <7>;
+ iommus = <&apps_smmu 0x1007 0x80>,
+ <&apps_smmu 0x1067 0x0>;
+ dma-coherent;
+ };
+ };
+
+ gpr {
+ compatible = "qcom,gpr";
+ qcom,glink-channels = "adsp_apps";
+ qcom,domain = <GPR_DOMAIN_ID_ADSP>;
+ qcom,intents = <512 20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ q6apm: service@1 {
+ compatible = "qcom,q6apm";
+ reg = <GPR_APM_MODULE_IID>;
+ #sound-dai-cells = <0>;
+ qcom,protection-domain = "avs/audio",
+ "msm/adsp/audio_pd";
+
+ q6apmbedai: bedais {
+ compatible = "qcom,q6apm-lpass-dais";
+ #sound-dai-cells = <1>;
+ };
+
+ q6apmdai: dais {
+ compatible = "qcom,q6apm-dais";
+ iommus = <&apps_smmu 0x1001 0x80>,
+ <&apps_smmu 0x1061 0x0>;
+ };
+ };
+
+ q6prm: service@2 {
+ compatible = "qcom,q6prm";
+ reg = <GPR_PRM_MODULE_IID>;
+ qcom,protection-domain = "avs/audio",
+ "msm/adsp/audio_pd";
+
+ q6prmcc: clock-controller {
+ compatible = "qcom,q6prm-lpass-clocks";
+ #clock-cells = <2>;
+ };
+ };
+ };
+ };
+ };
+
lpass_wsa2macro: codec@6aa0000 {
compatible = "qcom,x1e80100-lpass-wsa-macro", "qcom,sm8550-lpass-wsa-macro";
reg = <0 0x06aa0000 0 0x1000>;
@@ -3892,6 +4446,112 @@
#interconnect-cells = <2>;
};
+ sdhc_2: mmc@8804000 {
+ compatible = "qcom,x1e80100-sdhci", "qcom,sdhci-msm-v5";
+ reg = <0 0x08804000 0 0x1000>;
+
+ interrupts = <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ clocks = <&gcc GCC_SDCC2_AHB_CLK>,
+ <&gcc GCC_SDCC2_APPS_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface", "core", "xo";
+ iommus = <&apps_smmu 0x520 0>;
+ qcom,dll-config = <0x0007642c>;
+ qcom,ddr-config = <0x80040868>;
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&sdhc2_opp_table>;
+
+ interconnects = <&aggre2_noc MASTER_SDCC_2 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_SDCC_2 QCOM_ICC_TAG_ACTIVE_ONLY>;
+ interconnect-names = "sdhc-ddr", "cpu-sdhc";
+ bus-width = <4>;
+ dma-coherent;
+
+ status = "disabled";
+
+ sdhc2_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-19200000 {
+ opp-hz = /bits/ 64 <19200000>;
+ required-opps = <&rpmhpd_opp_min_svs>;
+ };
+
+ opp-50000000 {
+ opp-hz = /bits/ 64 <50000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-100000000 {
+ opp-hz = /bits/ 64 <100000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+
+ opp-202000000 {
+ opp-hz = /bits/ 64 <202000000>;
+ required-opps = <&rpmhpd_opp_svs_l1>;
+ };
+ };
+ };
+
+ sdhc_4: mmc@8844000 {
+ compatible = "qcom,x1e80100-sdhci", "qcom,sdhci-msm-v5";
+ reg = <0 0x08844000 0 0x1000>;
+
+ interrupts = <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ clocks = <&gcc GCC_SDCC4_AHB_CLK>,
+ <&gcc GCC_SDCC4_APPS_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface", "core", "xo";
+ iommus = <&apps_smmu 0x160 0>;
+ qcom,dll-config = <0x0007642c>;
+ qcom,ddr-config = <0x80040868>;
+ power-domains = <&rpmhpd RPMHPD_CX>;
+ operating-points-v2 = <&sdhc4_opp_table>;
+
+ interconnects = <&aggre2_noc MASTER_SDCC_4 QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_SDCC_4 QCOM_ICC_TAG_ACTIVE_ONLY>;
+ interconnect-names = "sdhc-ddr", "cpu-sdhc";
+ bus-width = <4>;
+ dma-coherent;
+
+ status = "disabled";
+
+ sdhc4_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-19200000 {
+ opp-hz = /bits/ 64 <19200000>;
+ required-opps = <&rpmhpd_opp_min_svs>;
+ };
+
+ opp-50000000 {
+ opp-hz = /bits/ 64 <50000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-100000000 {
+ opp-hz = /bits/ 64 <100000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+
+ opp-202000000 {
+ opp-hz = /bits/ 64 <202000000>;
+ required-opps = <&rpmhpd_opp_svs_l1>;
+ };
+ };
+ };
+
usb_2_hsphy: phy@88e0000 {
compatible = "qcom,x1e80100-snps-eusb2-phy",
"qcom,sm8550-snps-eusb2-phy";
@@ -4063,6 +4723,8 @@
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
snps,usb3_lpm_capable;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
dma-coherent;
@@ -4118,7 +4780,7 @@
<&gcc GCC_USB20_MASTER_CLK>;
assigned-clock-rates = <19200000>, <200000000>;
- interrupts-extended = <&intc GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>,
<&pdc 50 IRQ_TYPE_EDGE_BOTH>,
<&pdc 49 IRQ_TYPE_EDGE_BOTH>;
interrupt-names = "pwr_event",
@@ -4144,11 +4806,13 @@
usb_2_dwc3: usb@a200000 {
compatible = "snps,dwc3";
reg = <0 0x0a200000 0 0xcd00>;
- interrupts = <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>;
iommus = <&apps_smmu 0x14e0 0x0>;
phys = <&usb_2_hsphy>;
phy-names = "usb2-phy";
maximum-speed = "high-speed";
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
ports {
#address-cells = <1>;
@@ -4245,6 +4909,8 @@
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
snps,usb3_lpm_capable;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
dma-coherent;
};
@@ -4316,6 +4982,8 @@
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
snps,usb3_lpm_capable;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
dma-coherent;
@@ -4414,6 +5082,8 @@
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
snps,usb3_lpm_capable;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
dma-coherent;
@@ -5629,6 +6299,34 @@
};
};
+ qup_uart14_default: qup-uart14-default-state {
+ cts-pins {
+ pins = "gpio56";
+ function = "qup1_se6";
+ bias-bus-hold;
+ };
+
+ rts-pins {
+ pins = "gpio57";
+ function = "qup1_se6";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ tx-pins {
+ pins = "gpio58";
+ function = "qup1_se6";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rx-pins {
+ pins = "gpio59";
+ function = "qup1_se6";
+ bias-pull-up;
+ };
+ };
+
qup_uart21_default: qup-uart21-default-state {
tx-pins {
pins = "gpio86";
@@ -5644,6 +6342,1487 @@
bias-disable;
};
};
+
+ sdc2_default: sdc2-default-state {
+ clk-pins {
+ pins = "sdc2_clk";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ cmd-pins {
+ pins = "sdc2_cmd";
+ drive-strength = <10>;
+ bias-pull-up;
+ };
+
+ data-pins {
+ pins = "sdc2_data";
+ drive-strength = <10>;
+ bias-pull-up;
+ };
+ };
+
+ sdc2_sleep: sdc2-sleep-state {
+ clk-pins {
+ pins = "sdc2_clk";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ cmd-pins {
+ pins = "sdc2_cmd";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ data-pins {
+ pins = "sdc2_data";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ stm@10002000 {
+ compatible = "arm,coresight-stm", "arm,primecell";
+ reg = <0x0 0x10002000 0x0 0x1000>,
+ <0x0 0x16280000 0x0 0x180000>;
+ reg-names = "stm-base",
+ "stm-stimulus-base";
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ stm_out: endpoint {
+ remote-endpoint = <&funnel0_in7>;
+ };
+ };
+ };
+ };
+
+ tpdm@10003000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10003000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <32>;
+ qcom,cmb-msrs-num = <32>;
+ status = "disabled";
+
+ out-ports {
+ port {
+ dcc_tpdm_out: endpoint {
+ remote-endpoint = <&qdss_tpda_in0>;
+ };
+ };
+ };
+ };
+
+ tpda@10004000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x10004000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ qdss_tpda_in0: endpoint {
+ remote-endpoint = <&dcc_tpdm_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ qdss_tpda_in1: endpoint {
+ remote-endpoint = <&qdss_tpdm_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ qdss_tpda_out: endpoint {
+ remote-endpoint = <&funnel0_in6>;
+ };
+ };
+ };
+ };
+
+ tpdm@1000f000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x1000f000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <32>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ qdss_tpdm_out: endpoint {
+ remote-endpoint = <&qdss_tpda_in1>;
+ };
+ };
+ };
+ };
+
+ funnel@10041000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x10041000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@6 {
+ reg = <6>;
+
+ funnel0_in6: endpoint {
+ remote-endpoint = <&qdss_tpda_out>;
+ };
+ };
+
+ port@7 {
+ reg = <7>;
+
+ funnel0_in7: endpoint {
+ remote-endpoint = <&stm_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ funnel0_out: endpoint {
+ remote-endpoint = <&qdss_funnel_in0>;
+ };
+ };
+ };
+ };
+
+ funnel@10042000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x10042000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@2 {
+ reg = <2>;
+
+ funnel1_in2: endpoint {
+ remote-endpoint = <&tmess_funnel_out>;
+ };
+ };
+
+ port@5 {
+ reg = <5>;
+
+ funnel1_in5: endpoint {
+ remote-endpoint = <&dlst_funnel_out>;
+ };
+ };
+
+ port@6 {
+ reg = <6>;
+
+ funnel1_in6: endpoint {
+ remote-endpoint = <&dlct1_funnel_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ funnel1_out: endpoint {
+ remote-endpoint = <&qdss_funnel_in1>;
+ };
+ };
+ };
+ };
+
+ funnel@10045000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x10045000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ qdss_funnel_in0: endpoint {
+ remote-endpoint = <&funnel0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ qdss_funnel_in1: endpoint {
+ remote-endpoint = <&funnel1_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ qdss_funnel_out: endpoint {
+ remote-endpoint = <&aoss_funnel_in7>;
+ };
+ };
+ };
+ };
+
+ tpdm@10800000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10800000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <64>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ mxa_tpdm_out: endpoint {
+ remote-endpoint = <&dlct2_tpda_in15>;
+ };
+ };
+ };
+ };
+
+ tpdm@1082c000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x1082c000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ gcc_tpdm_out: endpoint {
+ remote-endpoint = <&dlct1_tpda_in21>;
+ };
+ };
+ };
+ };
+
+ tpdm@10841000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10841000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <32>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ prng_tpdm_out: endpoint {
+ remote-endpoint = <&dlct1_tpda_in19>;
+ };
+ };
+ };
+ };
+
+ tpdm@10844000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10844000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ lpass_cx_tpdm_out: endpoint {
+ remote-endpoint = <&lpass_cx_funnel_in0>;
+ };
+ };
+ };
+ };
+
+ funnel@10846000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x10846000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ lpass_cx_funnel_in0: endpoint {
+ remote-endpoint = <&lpass_cx_tpdm_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ lpass_cx_funnel_out: endpoint {
+ remote-endpoint = <&dlct1_tpda_in4>;
+ };
+ };
+ };
+ };
+
+ cti@1098b000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x0 0x1098b000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ };
+
+ tpdm@109d0000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x109d0000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+ status = "disabled";
+
+ out-ports {
+ port {
+ qm_tpdm_out: endpoint {
+ remote-endpoint = <&dlct1_tpda_in20>;
+ };
+ };
+ };
+ };
+
+ tpdm@10ac0000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10ac0000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+ status = "disabled";
+
+ out-ports {
+ port {
+ dlst_tpdm0_out: endpoint {
+ remote-endpoint = <&dlst_tpda_in8>;
+ };
+ };
+ };
+ };
+
+ tpdm@10ac1000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10ac1000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <64>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ dlst_tpdm1_out: endpoint {
+ remote-endpoint = <&dlst_tpda_in9>;
+ };
+ };
+ };
+ };
+
+ tpda@10ac4000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x10ac4000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@8 {
+ reg = <8>;
+
+ dlst_tpda_in8: endpoint {
+ remote-endpoint = <&dlst_tpdm0_out>;
+ };
+ };
+
+ port@9 {
+ reg = <9>;
+
+ dlst_tpda_in9: endpoint {
+ remote-endpoint = <&dlst_tpdm1_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ dlst_tpda_out: endpoint {
+ remote-endpoint = <&dlst_funnel_in0>;
+ };
+ };
+ };
+ };
+
+ funnel@10ac5000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x10ac5000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ dlst_funnel_in0: endpoint {
+ remote-endpoint = <&dlst_tpda_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ dlst_funnel_out: endpoint {
+ remote-endpoint = <&funnel1_in5>;
+ };
+ };
+ };
+ };
+
+ funnel@10b04000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x10b04000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@3 {
+ reg = <3>;
+
+ aoss_funnel_in3: endpoint {
+ remote-endpoint = <&ddr_lpi_funnel_out>;
+ };
+ };
+
+ port@6 {
+ reg = <6>;
+
+ aoss_funnel_in6: endpoint {
+ remote-endpoint = <&aoss_tpda_out>;
+ };
+ };
+
+ port@7 {
+ reg = <7>;
+
+ aoss_funnel_in7: endpoint {
+ remote-endpoint = <&qdss_funnel_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ aoss_funnel_out: endpoint {
+ remote-endpoint = <&etf0_in>;
+ };
+ };
+ };
+ };
+
+ etf0: tmc@10b05000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0x0 0x10b05000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ etf0_in: endpoint {
+ remote-endpoint = <&aoss_funnel_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ etf0_out: endpoint {
+ remote-endpoint = <&swao_rep_in>;
+ };
+ };
+ };
+ };
+
+ replicator@10b06000 {
+ compatible = "arm,coresight-dynamic-replicator", "arm,primecell";
+ reg = <0x0 0x10b06000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ swao_rep_in: endpoint {
+ remote-endpoint = <&etf0_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ swao_rep_out1: endpoint {
+ remote-endpoint = <&eud_in>;
+ };
+ };
+ };
+ };
+
+ tpda@10b08000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x10b08000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ aoss_tpda_in0: endpoint {
+ remote-endpoint = <&aoss_tpdm0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ aoss_tpda_in1: endpoint {
+ remote-endpoint = <&aoss_tpdm1_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ aoss_tpda_in2: endpoint {
+ remote-endpoint = <&aoss_tpdm2_out>;
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+
+ aoss_tpda_in3: endpoint {
+ remote-endpoint = <&aoss_tpdm3_out>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+
+ aoss_tpda_in4: endpoint {
+ remote-endpoint = <&aoss_tpdm4_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ aoss_tpda_out: endpoint {
+ remote-endpoint = <&aoss_funnel_in6>;
+ };
+ };
+ };
+ };
+
+ tpdm@10b09000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10b09000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <64>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ aoss_tpdm0_out: endpoint {
+ remote-endpoint = <&aoss_tpda_in0>;
+ };
+ };
+ };
+ };
+
+ tpdm@10b0a000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10b0a000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <64>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ aoss_tpdm1_out: endpoint {
+ remote-endpoint = <&aoss_tpda_in1>;
+ };
+ };
+ };
+ };
+
+ tpdm@10b0b000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10b0b000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <64>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ aoss_tpdm2_out: endpoint {
+ remote-endpoint = <&aoss_tpda_in2>;
+ };
+ };
+ };
+ };
+
+ tpdm@10b0c000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10b0c000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <64>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ aoss_tpdm3_out: endpoint {
+ remote-endpoint = <&aoss_tpda_in3>;
+ };
+ };
+ };
+ };
+
+ tpdm@10b0d000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10b0d000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ aoss_tpdm4_out: endpoint {
+ remote-endpoint = <&aoss_tpda_in4>;
+ };
+ };
+ };
+ };
+
+ tpdm@10b20000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10b20000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+ status = "disabled";
+
+ out-ports {
+ port {
+ lpicc_tpdm_out: endpoint {
+ remote-endpoint = <&ddr_lpi_tpda_in>;
+ };
+ };
+ };
+ };
+
+ tpda@10b23000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x10b23000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ status = "disabled";
+
+ in-ports {
+ port {
+ ddr_lpi_tpda_in: endpoint {
+ remote-endpoint = <&lpicc_tpdm_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ ddr_lpi_tpda_out: endpoint {
+ remote-endpoint = <&ddr_lpi_funnel_in0>;
+ };
+ };
+ };
+ };
+
+ funnel@10b24000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x10b24000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ status = "disabled";
+
+ in-ports {
+ port {
+ ddr_lpi_funnel_in0: endpoint {
+ remote-endpoint = <&ddr_lpi_tpda_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ ddr_lpi_funnel_out: endpoint {
+ remote-endpoint = <&aoss_funnel_in3>;
+ };
+ };
+ };
+ };
+
+ tpdm@10c08000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10c08000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ mm_tpdm_out: endpoint {
+ remote-endpoint = <&mm_funnel_in4>;
+ };
+ };
+ };
+ };
+
+ funnel@10c0b000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x10c0b000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@4 {
+ reg = <4>;
+
+ mm_funnel_in4: endpoint {
+ remote-endpoint = <&mm_tpdm_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ mm_funnel_out: endpoint {
+ remote-endpoint = <&dlct2_tpda_in4>;
+ };
+ };
+ };
+ };
+
+ tpdm@10c28000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10c28000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ dlct1_tpdm_out: endpoint {
+ remote-endpoint = <&dlct1_tpda_in26>;
+ };
+ };
+ };
+ };
+
+ tpdm@10c29000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10c29000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <64>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ ipcc_tpdm_out: endpoint {
+ remote-endpoint = <&dlct1_tpda_in27>;
+ };
+ };
+ };
+ };
+
+ tpda@10c2b000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x10c2b000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@4 {
+ reg = <4>;
+
+ dlct1_tpda_in4: endpoint {
+ remote-endpoint = <&lpass_cx_funnel_out>;
+ };
+ };
+
+ port@13 {
+ reg = <19>;
+
+ dlct1_tpda_in19: endpoint {
+ remote-endpoint = <&prng_tpdm_out>;
+ };
+ };
+
+ port@14 {
+ reg = <20>;
+
+ dlct1_tpda_in20: endpoint {
+ remote-endpoint = <&qm_tpdm_out>;
+ };
+ };
+
+ port@15 {
+ reg = <21>;
+
+ dlct1_tpda_in21: endpoint {
+ remote-endpoint = <&gcc_tpdm_out>;
+ };
+ };
+
+ port@1a {
+ reg = <26>;
+
+ dlct1_tpda_in26: endpoint {
+ remote-endpoint = <&dlct1_tpdm_out>;
+ };
+ };
+
+ port@1b {
+ reg = <27>;
+
+ dlct1_tpda_in27: endpoint {
+ remote-endpoint = <&ipcc_tpdm_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ dlct1_tpda_out: endpoint {
+ remote-endpoint = <&dlct1_funnel_in0>;
+ };
+ };
+ };
+ };
+
+ funnel@10c2c000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x10c2c000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dlct1_funnel_in0: endpoint {
+ remote-endpoint = <&dlct1_tpda_out>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+
+ dlct1_funnel_in4: endpoint {
+ remote-endpoint = <&dlct2_funnel_out>;
+ };
+ };
+
+ port@5 {
+ reg = <5>;
+
+ dlct1_funnel_in5: endpoint {
+ remote-endpoint = <&ddr_funnel0_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ dlct1_funnel_out: endpoint {
+ remote-endpoint = <&funnel1_in6>;
+ };
+ };
+ };
+ };
+
+ tpdm@10c38000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10c38000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <64>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ dlct2_tpdm0_out: endpoint {
+ remote-endpoint = <&dlct2_tpda_in16>;
+ };
+ };
+ };
+ };
+
+ tpdm@10c39000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10c39000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <64>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ dlct2_tpdm1_out: endpoint {
+ remote-endpoint = <&dlct2_tpda_in17>;
+ };
+ };
+ };
+ };
+
+ tpda@10c3c000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x10c3c000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@4 {
+ reg = <4>;
+
+ dlct2_tpda_in4: endpoint {
+ remote-endpoint = <&mm_funnel_out>;
+ };
+ };
+
+ port@f {
+ reg = <15>;
+
+ dlct2_tpda_in15: endpoint {
+ remote-endpoint = <&mxa_tpdm_out>;
+ };
+ };
+
+ port@10 {
+ reg = <16>;
+
+ dlct2_tpda_in16: endpoint {
+ remote-endpoint = <&dlct2_tpdm0_out>;
+ };
+ };
+
+ port@11 {
+ reg = <17>;
+
+ dlct2_tpda_in17: endpoint {
+ remote-endpoint = <&dlct2_tpdm1_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ dlct2_tpda_out: endpoint {
+ remote-endpoint = <&dlct2_funnel_in0>;
+ };
+ };
+ };
+ };
+
+ funnel@10c3d000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x10c3d000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ dlct2_funnel_in0: endpoint {
+ remote-endpoint = <&dlct2_tpda_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ dlct2_funnel_out: endpoint {
+ remote-endpoint = <&dlct1_funnel_in4>;
+ };
+ };
+ };
+ };
+
+ tpdm@10cc1000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10cc1000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <64>;
+ qcom,cmb-msrs-num = <32>;
+ qcom,dsb-element-bits = <32>;
+ qcom,dsb-msrs-num = <32>;
+ status = "disabled";
+
+ out-ports {
+ port {
+ tmess_tpdm1_out: endpoint {
+ remote-endpoint = <&tmess_tpda_in2>;
+ };
+ };
+ };
+ };
+
+ tpda@10cc4000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x10cc4000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@2 {
+ reg = <2>;
+
+ tmess_tpda_in2: endpoint {
+ remote-endpoint = <&tmess_tpdm1_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ tmess_tpda_out: endpoint {
+ remote-endpoint = <&tmess_funnel_in0>;
+ };
+ };
+ };
+ };
+
+ funnel@10cc5000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x10cc5000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ tmess_funnel_in0: endpoint {
+ remote-endpoint = <&tmess_tpda_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ tmess_funnel_out: endpoint {
+ remote-endpoint = <&funnel1_in2>;
+ };
+ };
+ };
+ };
+
+ funnel@10d04000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x10d04000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@6 {
+ reg = <6>;
+
+ ddr_funnel0_in6: endpoint {
+ remote-endpoint = <&ddr_funnel1_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ ddr_funnel0_out: endpoint {
+ remote-endpoint = <&dlct1_funnel_in5>;
+ };
+ };
+ };
+ };
+
+ tpdm@10d08000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10d08000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <32>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ llcc0_tpdm_out: endpoint {
+ remote-endpoint = <&llcc_tpda_in0>;
+ };
+ };
+ };
+ };
+
+ tpdm@10d09000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10d09000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <32>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ llcc1_tpdm_out: endpoint {
+ remote-endpoint = <&llcc_tpda_in1>;
+ };
+ };
+ };
+ };
+
+ tpdm@10d0a000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10d0a000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <32>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ llcc2_tpdm_out: endpoint {
+ remote-endpoint = <&llcc_tpda_in2>;
+ };
+ };
+ };
+ };
+
+ tpdm@10d0b000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10d0b000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <32>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ llcc3_tpdm_out: endpoint {
+ remote-endpoint = <&llcc_tpda_in3>;
+ };
+ };
+ };
+ };
+
+ tpdm@10d0c000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10d0c000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <32>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ llcc4_tpdm_out: endpoint {
+ remote-endpoint = <&llcc_tpda_in4>;
+ };
+ };
+ };
+ };
+
+ tpdm@10d0d000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10d0d000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <32>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ llcc5_tpdm_out: endpoint {
+ remote-endpoint = <&llcc_tpda_in5>;
+ };
+ };
+ };
+ };
+
+ tpdm@10d0e000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10d0e000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <32>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ llcc6_tpdm_out: endpoint {
+ remote-endpoint = <&llcc_tpda_in6>;
+ };
+ };
+ };
+ };
+
+ tpdm@10d0f000 {
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
+ reg = <0x0 0x10d0f000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ qcom,cmb-element-bits = <32>;
+ qcom,cmb-msrs-num = <32>;
+
+ out-ports {
+ port {
+ llcc7_tpdm_out: endpoint {
+ remote-endpoint = <&llcc_tpda_in7>;
+ };
+ };
+ };
+ };
+
+ tpda@10d12000 {
+ compatible = "qcom,coresight-tpda", "arm,primecell";
+ reg = <0x0 0x10d12000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ llcc_tpda_in0: endpoint {
+ remote-endpoint = <&llcc0_tpdm_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ llcc_tpda_in1: endpoint {
+ remote-endpoint = <&llcc1_tpdm_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ llcc_tpda_in2: endpoint {
+ remote-endpoint = <&llcc2_tpdm_out>;
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+
+ llcc_tpda_in3: endpoint {
+ remote-endpoint = <&llcc3_tpdm_out>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+
+ llcc_tpda_in4: endpoint {
+ remote-endpoint = <&llcc4_tpdm_out>;
+ };
+ };
+
+ port@5 {
+ reg = <5>;
+
+ llcc_tpda_in5: endpoint {
+ remote-endpoint = <&llcc5_tpdm_out>;
+ };
+ };
+
+ port@6 {
+ reg = <6>;
+
+ llcc_tpda_in6: endpoint {
+ remote-endpoint = <&llcc6_tpdm_out>;
+ };
+ };
+
+ port@7 {
+ reg = <7>;
+
+ llcc_tpda_in7: endpoint {
+ remote-endpoint = <&llcc7_tpdm_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ llcc_tpda_out: endpoint {
+ remote-endpoint = <&ddr_funnel1_in0>;
+ };
+ };
+ };
+ };
+
+ funnel@10d13000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0x0 0x10d13000 0x0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ in-ports {
+ port {
+ ddr_funnel1_in0: endpoint {
+ remote-endpoint = <&llcc_tpda_out>;
+ };
+ };
+ };
+
+ out-ports {
+ port {
+ ddr_funnel1_out: endpoint {
+ remote-endpoint = <&ddr_funnel0_in6>;
+ };
+ };
+ };
};
apps_smmu: iommu@15000000 {
@@ -6111,146 +8290,9 @@
interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
};
- remoteproc_adsp: remoteproc@30000000 {
- compatible = "qcom,x1e80100-adsp-pas";
- reg = <0 0x30000000 0 0x100>;
-
- interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
- <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
- <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
- <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
- <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "wdog",
- "fatal",
- "ready",
- "handover",
- "stop-ack";
-
- clocks = <&rpmhcc RPMH_CXO_CLK>;
- clock-names = "xo";
-
- power-domains = <&rpmhpd RPMHPD_LCX>,
- <&rpmhpd RPMHPD_LMX>;
- power-domain-names = "lcx",
- "lmx";
-
- interconnects = <&lpass_lpicx_noc MASTER_LPASS_PROC QCOM_ICC_TAG_ALWAYS
- &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
-
- memory-region = <&adspslpi_mem>,
- <&q6_adsp_dtb_mem>;
-
- qcom,qmp = <&aoss_qmp>;
-
- qcom,smem-states = <&smp2p_adsp_out 0>;
- qcom,smem-state-names = "stop";
-
- status = "disabled";
-
- glink-edge {
- interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
- IPCC_MPROC_SIGNAL_GLINK_QMP
- IRQ_TYPE_EDGE_RISING>;
- mboxes = <&ipcc IPCC_CLIENT_LPASS
- IPCC_MPROC_SIGNAL_GLINK_QMP>;
-
- label = "lpass";
- qcom,remote-pid = <2>;
-
- fastrpc {
- compatible = "qcom,fastrpc";
- qcom,glink-channels = "fastrpcglink-apps-dsp";
- label = "adsp";
- qcom,non-secure-domain;
- #address-cells = <1>;
- #size-cells = <0>;
-
- compute-cb@3 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <3>;
- iommus = <&apps_smmu 0x1003 0x80>,
- <&apps_smmu 0x1063 0x0>;
- dma-coherent;
- };
-
- compute-cb@4 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <4>;
- iommus = <&apps_smmu 0x1004 0x80>,
- <&apps_smmu 0x1064 0x0>;
- dma-coherent;
- };
-
- compute-cb@5 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <5>;
- iommus = <&apps_smmu 0x1005 0x80>,
- <&apps_smmu 0x1065 0x0>;
- dma-coherent;
- };
-
- compute-cb@6 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <6>;
- iommus = <&apps_smmu 0x1006 0x80>,
- <&apps_smmu 0x1066 0x0>;
- dma-coherent;
- };
-
- compute-cb@7 {
- compatible = "qcom,fastrpc-compute-cb";
- reg = <7>;
- iommus = <&apps_smmu 0x1007 0x80>,
- <&apps_smmu 0x1067 0x0>;
- dma-coherent;
- };
- };
-
- gpr {
- compatible = "qcom,gpr";
- qcom,glink-channels = "adsp_apps";
- qcom,domain = <GPR_DOMAIN_ID_ADSP>;
- qcom,intents = <512 20>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- q6apm: service@1 {
- compatible = "qcom,q6apm";
- reg = <GPR_APM_MODULE_IID>;
- #sound-dai-cells = <0>;
- qcom,protection-domain = "avs/audio",
- "msm/adsp/audio_pd";
-
- q6apmbedai: bedais {
- compatible = "qcom,q6apm-lpass-dais";
- #sound-dai-cells = <1>;
- };
-
- q6apmdai: dais {
- compatible = "qcom,q6apm-dais";
- iommus = <&apps_smmu 0x1001 0x80>,
- <&apps_smmu 0x1061 0x0>;
- };
- };
-
- q6prm: service@2 {
- compatible = "qcom,q6prm";
- reg = <GPR_PRM_MODULE_IID>;
- qcom,protection-domain = "avs/audio",
- "msm/adsp/audio_pd";
-
- q6prmcc: clock-controller {
- compatible = "qcom,q6prm-lpass-clocks";
- #clock-cells = <2>;
- };
- };
- };
- };
- };
-
remoteproc_cdsp: remoteproc@32300000 {
compatible = "qcom,x1e80100-cdsp-pas";
- reg = <0 0x32300000 0 0x1400000>;
+ reg = <0x0 0x32300000 0x0 0x10000>;
interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
diff --git a/arch/arm64/boot/dts/renesas/Makefile b/arch/arm64/boot/dts/renesas/Makefile
index 97228a3cb99c..928635f2e76b 100644
--- a/arch/arm64/boot/dts/renesas/Makefile
+++ b/arch/arm64/boot/dts/renesas/Makefile
@@ -86,11 +86,17 @@ dtb-$(CONFIG_ARCH_R8A779F0) += r8a779f4-s4sk.dtb
dtb-$(CONFIG_ARCH_R8A779G0) += r8a779g0-white-hawk.dtb
dtb-$(CONFIG_ARCH_R8A779G0) += r8a779g0-white-hawk-cpu.dtb
-dtb-$(CONFIG_ARCH_R8A779G0) += r8a779g0-white-hawk-ard-audio-da7212.dtbo
-r8a779g0-white-hawk-ard-audio-da7212-dtbs := r8a779g0-white-hawk.dtb r8a779g0-white-hawk-ard-audio-da7212.dtbo
+dtb-$(CONFIG_ARCH_R8A779G0) += white-hawk-ard-audio-da7212.dtbo
+r8a779g0-white-hawk-ard-audio-da7212-dtbs := r8a779g0-white-hawk.dtb white-hawk-ard-audio-da7212.dtbo
dtb-$(CONFIG_ARCH_R8A779G0) += r8a779g0-white-hawk-ard-audio-da7212.dtb
dtb-$(CONFIG_ARCH_R8A779G0) += r8a779g2-white-hawk-single.dtb
+r8a779g2-white-hawk-single-ard-audio-da7212-dtbs := r8a779g2-white-hawk-single.dtb white-hawk-ard-audio-da7212.dtbo
+dtb-$(CONFIG_ARCH_R8A779G0) += r8a779g2-white-hawk-single-ard-audio-da7212.dtb
+
+dtb-$(CONFIG_ARCH_R8A779G0) += r8a779g3-white-hawk-single.dtb
+r8a779g3-white-hawk-single-ard-audio-da7212-dtbs := r8a779g3-white-hawk-single.dtb white-hawk-ard-audio-da7212.dtbo
+dtb-$(CONFIG_ARCH_R8A779G0) += r8a779g3-white-hawk-single-ard-audio-da7212.dtb
dtb-$(CONFIG_ARCH_R8A779H0) += r8a779h0-gray-hawk-single.dtb
@@ -140,6 +146,8 @@ dtb-$(CONFIG_ARCH_R9A08G045) += r9a08g045s33-smarc.dtb
dtb-$(CONFIG_ARCH_R9A09G011) += r9a09g011-v2mevk2.dtb
+dtb-$(CONFIG_ARCH_R9A09G047) += r9a09g047e57-smarc.dtb
+
dtb-$(CONFIG_ARCH_R9A09G057) += r9a09g057h44-rzv2h-evk.dtb
dtb-$(CONFIG_ARCH_RCAR_GEN3) += draak-ebisu-panel-aa104xd12.dtbo
diff --git a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-ethernet.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-ethernet.dtsi
index e11bf9ace776..2a8537e13873 100644
--- a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-ethernet.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-ethernet.dtsi
@@ -5,6 +5,121 @@
* Copyright (C) 2021 Glider bv
*/
+/ {
+ aliases {
+ ethernet1 = &avb1;
+ ethernet2 = &avb2;
+ ethernet3 = &avb3;
+ ethernet4 = &avb4;
+ ethernet5 = &avb5;
+ };
+};
+
+&avb1 {
+ pinctrl-0 = <&avb1_pins>;
+ pinctrl-names = "default";
+ phy-handle = <&avb1_phy>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&gpio5 15 GPIO_ACTIVE_LOW>;
+ reset-post-delay-us = <4000>;
+
+ avb1_phy: ethernet-phy@7 {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ reg = <7>;
+ interrupts-extended = <&gpio5 16 IRQ_TYPE_LEVEL_LOW>;
+ };
+ };
+};
+
+&avb2 {
+ pinctrl-0 = <&avb2_pins>;
+ pinctrl-names = "default";
+ phy-handle = <&avb2_phy>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&gpio6 15 GPIO_ACTIVE_LOW>;
+ reset-post-delay-us = <4000>;
+
+ avb2_phy: ethernet-phy@7 {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ reg = <7>;
+ interrupts-extended = <&gpio6 16 IRQ_TYPE_LEVEL_LOW>;
+ };
+ };
+};
+
+&avb3 {
+ pinctrl-0 = <&avb3_pins>;
+ pinctrl-names = "default";
+ phy-handle = <&avb3_phy>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&gpio7 15 GPIO_ACTIVE_LOW>;
+ reset-post-delay-us = <4000>;
+
+ avb3_phy: ethernet-phy@7 {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ reg = <7>;
+ interrupts-extended = <&gpio7 16 IRQ_TYPE_LEVEL_LOW>;
+ };
+ };
+};
+
+&avb4 {
+ pinctrl-0 = <&avb4_pins>;
+ pinctrl-names = "default";
+ phy-handle = <&avb4_phy>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&gpio8 15 GPIO_ACTIVE_LOW>;
+ reset-post-delay-us = <4000>;
+
+ avb4_phy: ethernet-phy@7 {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ reg = <7>;
+ interrupts-extended = <&gpio8 16 IRQ_TYPE_LEVEL_LOW>;
+ };
+ };
+};
+
+&avb5 {
+ pinctrl-0 = <&avb5_pins>;
+ pinctrl-names = "default";
+ phy-handle = <&avb5_phy>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&gpio9 15 GPIO_ACTIVE_LOW>;
+ reset-post-delay-us = <4000>;
+
+ avb5_phy: ethernet-phy@7 {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ reg = <7>;
+ interrupts-extended = <&gpio9 16 IRQ_TYPE_LEVEL_LOW>;
+ };
+ };
+};
+
&i2c0 {
eeprom@53 {
compatible = "rohm,br24g01", "atmel,24c01";
@@ -13,3 +128,130 @@
pagesize = <8>;
};
};
+
+&pfc {
+ avb1_pins: avb1 {
+ mux {
+ groups = "avb1_link", "avb1_mdio", "avb1_rgmii",
+ "avb1_txcrefclk";
+ function = "avb1";
+ };
+
+ link {
+ groups = "avb1_link";
+ bias-disable;
+ };
+
+ mdio {
+ groups = "avb1_mdio";
+ drive-strength = <24>;
+ bias-disable;
+ };
+
+ rgmii {
+ groups = "avb1_rgmii";
+ drive-strength = <24>;
+ bias-disable;
+ };
+ };
+
+ avb2_pins: avb2 {
+ mux {
+ groups = "avb2_link", "avb2_mdio", "avb2_rgmii",
+ "avb2_txcrefclk";
+ function = "avb2";
+ };
+
+ link {
+ groups = "avb2_link";
+ bias-disable;
+ };
+
+ mdio {
+ groups = "avb2_mdio";
+ drive-strength = <24>;
+ bias-disable;
+ };
+
+ rgmii {
+ groups = "avb2_rgmii";
+ drive-strength = <24>;
+ bias-disable;
+ };
+ };
+
+ avb3_pins: avb3 {
+ mux {
+ groups = "avb3_link", "avb3_mdio", "avb3_rgmii",
+ "avb3_txcrefclk";
+ function = "avb3";
+ };
+
+ link {
+ groups = "avb3_link";
+ bias-disable;
+ };
+
+ mdio {
+ groups = "avb3_mdio";
+ drive-strength = <24>;
+ bias-disable;
+ };
+
+ rgmii {
+ groups = "avb3_rgmii";
+ drive-strength = <24>;
+ bias-disable;
+ };
+ };
+
+ avb4_pins: avb4 {
+ mux {
+ groups = "avb4_link", "avb4_mdio", "avb4_rgmii",
+ "avb4_txcrefclk";
+ function = "avb4";
+ };
+
+ link {
+ groups = "avb4_link";
+ bias-disable;
+ };
+
+ mdio {
+ groups = "avb4_mdio";
+ drive-strength = <24>;
+ bias-disable;
+ };
+
+ rgmii {
+ groups = "avb4_rgmii";
+ drive-strength = <24>;
+ bias-disable;
+ };
+ };
+
+ avb5_pins: avb5 {
+ mux {
+ groups = "avb5_link", "avb5_mdio", "avb5_rgmii",
+ "avb5_txcrefclk";
+ function = "avb5";
+ };
+
+ link {
+ groups = "avb5_link";
+ bias-disable;
+ };
+
+ mdio {
+ groups = "avb5_mdio";
+ drive-strength = <24>;
+ bias-disable;
+ };
+
+ rgmii {
+ groups = "avb5_rgmii";
+ drive-strength = <24>;
+ bias-disable;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
index 7156b1a542e8..fe6d97859e4a 100644
--- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
@@ -765,8 +765,6 @@
rx-internal-delay-ps = <0>;
tx-internal-delay-ps = <0>;
iommus = <&ipmmu_ds1 1>;
- #address-cells = <1>;
- #size-cells = <0>;
status = "disabled";
};
@@ -814,8 +812,6 @@
rx-internal-delay-ps = <0>;
tx-internal-delay-ps = <0>;
iommus = <&ipmmu_ds1 2>;
- #address-cells = <1>;
- #size-cells = <0>;
status = "disabled";
};
@@ -863,8 +859,6 @@
rx-internal-delay-ps = <0>;
tx-internal-delay-ps = <0>;
iommus = <&ipmmu_ds1 3>;
- #address-cells = <1>;
- #size-cells = <0>;
status = "disabled";
};
@@ -912,8 +906,6 @@
rx-internal-delay-ps = <0>;
tx-internal-delay-ps = <0>;
iommus = <&ipmmu_ds1 4>;
- #address-cells = <1>;
- #size-cells = <0>;
status = "disabled";
};
@@ -961,8 +953,6 @@
rx-internal-delay-ps = <0>;
tx-internal-delay-ps = <0>;
iommus = <&ipmmu_ds1 11>;
- #address-cells = <1>;
- #size-cells = <0>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
index 61c6b8022ffd..104f740d20d3 100644
--- a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
@@ -2453,6 +2453,46 @@
};
};
+ fcpvx0: fcp@fedb0000 {
+ compatible = "renesas,fcpv";
+ reg = <0 0xfedb0000 0 0x200>;
+ clocks = <&cpg CPG_MOD 1100>;
+ power-domains = <&sysc R8A779G0_PD_A3ISP0>;
+ resets = <&cpg 1100>;
+ iommus = <&ipmmu_vi1 24>;
+ };
+
+ fcpvx1: fcp@fedb8000 {
+ compatible = "renesas,fcpv";
+ reg = <0 0xfedb8000 0 0x200>;
+ clocks = <&cpg CPG_MOD 1101>;
+ power-domains = <&sysc R8A779G0_PD_A3ISP1>;
+ resets = <&cpg 1101>;
+ iommus = <&ipmmu_vi1 25>;
+ };
+
+ vspx0: vsp@fedd0000 {
+ compatible = "renesas,vsp2";
+ reg = <0 0xfedd0000 0 0x8000>;
+ interrupts = <GIC_SPI 556 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 1028>;
+ power-domains = <&sysc R8A779G0_PD_A3ISP0>;
+ resets = <&cpg 1028>;
+
+ renesas,fcp = <&fcpvx0>;
+ };
+
+ vspx1: vsp@fedd8000 {
+ compatible = "renesas,vsp2";
+ reg = <0 0xfedd8000 0 0x8000>;
+ interrupts = <GIC_SPI 557 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 1029>;
+ power-domains = <&sysc R8A779G0_PD_A3ISP1>;
+ resets = <&cpg 1029>;
+
+ renesas,fcp = <&fcpvx1>;
+ };
+
prr: chipid@fff00044 {
compatible = "renesas,prr";
reg = <0 0xfff00044 0 4>;
diff --git a/arch/arm64/boot/dts/renesas/r8a779g2-white-hawk-single.dts b/arch/arm64/boot/dts/renesas/r8a779g2-white-hawk-single.dts
index 0062362b0ba0..48befde38937 100644
--- a/arch/arm64/boot/dts/renesas/r8a779g2-white-hawk-single.dts
+++ b/arch/arm64/boot/dts/renesas/r8a779g2-white-hawk-single.dts
@@ -7,70 +7,10 @@
/dts-v1/;
#include "r8a779g2.dtsi"
-#include "white-hawk-cpu-common.dtsi"
-#include "white-hawk-common.dtsi"
+#include "white-hawk-single.dtsi"
/ {
model = "Renesas White Hawk Single board based on r8a779g2";
compatible = "renesas,white-hawk-single", "renesas,r8a779g2",
"renesas,r8a779g0";
};
-
-&hscif0 {
- uart-has-rtscts;
-};
-
-&hscif0_pins {
- groups = "hscif0_data", "hscif0_ctrl";
- function = "hscif0";
-};
-
-&pfc {
- tsn0_pins: tsn0 {
- mux {
- groups = "tsn0_link", "tsn0_mdio", "tsn0_rgmii",
- "tsn0_txcrefclk";
- function = "tsn0";
- };
-
- link {
- groups = "tsn0_link";
- bias-disable;
- };
-
- mdio {
- groups = "tsn0_mdio";
- drive-strength = <24>;
- bias-disable;
- };
-
- rgmii {
- groups = "tsn0_rgmii";
- drive-strength = <24>;
- bias-disable;
- };
- };
-};
-
-&tsn0 {
- pinctrl-0 = <&tsn0_pins>;
- pinctrl-names = "default";
- phy-mode = "rgmii";
- phy-handle = <&phy3>;
- status = "okay";
-
- mdio {
- #address-cells = <1>;
- #size-cells = <0>;
-
- reset-gpios = <&gpio1 23 GPIO_ACTIVE_LOW>;
- reset-post-delay-us = <4000>;
-
- phy3: ethernet-phy@0 {
- compatible = "ethernet-phy-id002b.0980",
- "ethernet-phy-ieee802.3-c22";
- reg = <0>;
- interrupts-extended = <&gpio4 3 IRQ_TYPE_LEVEL_LOW>;
- };
- };
-};
diff --git a/arch/arm64/boot/dts/renesas/r8a779g3-white-hawk-single.dts b/arch/arm64/boot/dts/renesas/r8a779g3-white-hawk-single.dts
new file mode 100644
index 000000000000..cd466d858854
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a779g3-white-hawk-single.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Device Tree Source for the R-Car V4H ES3.0 White Hawk Single board
+ *
+ * Copyright (C) 2024 Glider bv
+ */
+
+/dts-v1/;
+#include "r8a779g3.dtsi"
+#include "white-hawk-single.dtsi"
+
+/ {
+ model = "Renesas White Hawk Single board based on r8a779g3";
+ compatible = "renesas,white-hawk-single", "renesas,r8a779g3",
+ "renesas,r8a779g0";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a779g3.dtsi b/arch/arm64/boot/dts/renesas/r8a779g3.dtsi
new file mode 100644
index 000000000000..0295858a0260
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a779g3.dtsi
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Device Tree Source for the R-Car V4H (R8A779G3) SoC
+ *
+ * Copyright (C) 2024 Glider bv
+ */
+
+#include "r8a779g0.dtsi"
+
+/ {
+ compatible = "renesas,r8a779g3", "renesas,r8a779g0";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts b/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts
index 58eabcc7e0e0..18fd52f55de5 100644
--- a/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts
+++ b/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts
@@ -30,6 +30,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/leds/common.h>
+#include <dt-bindings/media/video-interfaces.h>
#include "r8a779h0.dtsi"
@@ -59,6 +60,12 @@
stdout-path = "serial0:921600n8";
};
+ sn65dsi86_refclk: clk-x6 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <38400000>;
+ };
+
keys {
compatible = "gpio-keys";
@@ -132,22 +139,43 @@
#clock-cells = <0>;
};
+ mini-dp-con {
+ compatible = "dp-connector";
+ label = "CN5";
+ type = "mini";
+
+ port {
+ mini_dp_con_in: endpoint {
+ remote-endpoint = <&sn65dsi86_out0>;
+ };
+ };
+ };
+
+ reg_1p2v: regulator-1p2v {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-1.2V";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
reg_1p8v: regulator-1p8v {
- compatible = "regulator-fixed";
- regulator-name = "fixed-1.8V";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-boot-on;
- regulator-always-on;
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-1.8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
};
reg_3p3v: regulator-3p3v {
- compatible = "regulator-fixed";
- regulator-name = "fixed-3.3V";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-boot-on;
- regulator-always-on;
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-3.3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
};
sound_mux: sound-mux {
@@ -205,6 +233,65 @@
};
};
+&dsi0 {
+ status = "okay";
+
+ ports {
+ port@1 {
+ reg = <1>;
+
+ dsi0_out: endpoint {
+ remote-endpoint = <&sn65dsi86_in0>;
+ data-lanes = <1 2 3 4>;
+ };
+ };
+ };
+};
+
+&du {
+ status = "okay";
+};
+
+&csi40 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ csi40_in: endpoint {
+ bus-type = <MEDIA_BUS_TYPE_CSI2_DPHY>;
+ clock-lanes = <0>;
+ data-lanes = <1 2 3 4>;
+ remote-endpoint = <&max96724_out0>;
+ };
+ };
+ };
+};
+
+&csi41 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ csi41_in: endpoint {
+ bus-type = <MEDIA_BUS_TYPE_CSI2_DPHY>;
+ clock-lanes = <0>;
+ data-lanes = <1 2 3 4>;
+ remote-endpoint = <&max96724_out1>;
+ };
+ };
+ };
+};
+
&extal_clk {
clock-frequency = <16666666>;
};
@@ -255,6 +342,20 @@
#interrupt-cells = <2>;
};
+ io_expander_b: gpio@21 {
+ compatible = "onnn,pca9654";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ io_expander_c: gpio@22 {
+ compatible = "onnn,pca9654";
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
eeprom@50 {
compatible = "rohm,br24g01", "atmel,24c01";
label = "cpu-board";
@@ -284,6 +385,97 @@
};
};
+&i2c1 {
+ pinctrl-0 = <&i2c1_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+ clock-frequency = <400000>;
+
+ bridge@2c {
+ pinctrl-0 = <&irq0_pins>;
+ pinctrl-names = "default";
+
+ compatible = "ti,sn65dsi86";
+ reg = <0x2c>;
+
+ clocks = <&sn65dsi86_refclk>;
+ clock-names = "refclk";
+
+ interrupts-extended = <&intc_ex 0 IRQ_TYPE_LEVEL_HIGH>;
+
+ enable-gpios = <&gpio1 26 GPIO_ACTIVE_HIGH>;
+
+ vccio-supply = <&reg_1p8v>;
+ vpll-supply = <&reg_1p8v>;
+ vcca-supply = <&reg_1p2v>;
+ vcc-supply = <&reg_1p2v>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ sn65dsi86_in0: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ sn65dsi86_out0: endpoint {
+ remote-endpoint = <&mini_dp_con_in>;
+ };
+ };
+ };
+ };
+
+ gmsl0: gmsl-deserializer@4e {
+ compatible = "maxim,max96724";
+ reg = <0x4e>;
+ enable-gpios = <&io_expander_b 0 GPIO_ACTIVE_HIGH>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@4 {
+ reg = <4>;
+ max96724_out0: endpoint {
+ bus-type = <MEDIA_BUS_TYPE_CSI2_DPHY>;
+ clock-lanes = <0>;
+ data-lanes = <1 2 3 4>;
+ remote-endpoint = <&csi40_in>;
+ };
+ };
+ };
+ };
+
+ gmsl1: gmsl-deserializer@4f {
+ compatible = "maxim,max96724";
+ reg = <0x4f>;
+ enable-gpios = <&io_expander_c 0 GPIO_ACTIVE_HIGH>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@4 {
+ reg = <4>;
+ max96724_out1: endpoint {
+ bus-type = <MEDIA_BUS_TYPE_CSI2_DPHY>;
+ clock-lanes = <0>;
+ data-lanes = <1 2 3 4>;
+ remote-endpoint = <&csi41_in>;
+ };
+ };
+ };
+ };
+};
+
&i2c3 {
pinctrl-0 = <&i2c3_pins>;
pinctrl-names = "default";
@@ -307,6 +499,14 @@
};
};
+&isp0 {
+ status = "okay";
+};
+
+&isp1 {
+ status = "okay";
+};
+
&mmc0 {
pinctrl-0 = <&mmc_pins>;
pinctrl-1 = <&mmc_pins>;
@@ -388,11 +588,21 @@
function = "i2c0";
};
+ i2c1_pins: i2c1 {
+ groups = "i2c1";
+ function = "i2c1";
+ };
+
i2c3_pins: i2c3 {
groups = "i2c3";
function = "i2c3";
};
+ irq0_pins: irq0_pins {
+ groups = "intc_ex_irq0_a";
+ function = "intc_ex";
+ };
+
keys_pins: keys {
pins = "GP_5_0", "GP_5_1", "GP_5_2";
bias-pull-up;
@@ -494,3 +704,67 @@
&scif_clk2 {
clock-frequency = <24000000>;
};
+
+&vin00 {
+ status = "okay";
+};
+
+&vin01 {
+ status = "okay";
+};
+
+&vin02 {
+ status = "okay";
+};
+
+&vin03 {
+ status = "okay";
+};
+
+&vin04 {
+ status = "okay";
+};
+
+&vin05 {
+ status = "okay";
+};
+
+&vin06 {
+ status = "okay";
+};
+
+&vin07 {
+ status = "okay";
+};
+
+&vin08 {
+ status = "okay";
+};
+
+&vin09 {
+ status = "okay";
+};
+
+&vin10 {
+ status = "okay";
+};
+
+&vin11 {
+ status = "okay";
+};
+
+&vin12 {
+ status = "okay";
+};
+
+&vin13 {
+ status = "okay";
+};
+
+&vin14 {
+ status = "okay";
+};
+
+&vin15 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a779h0.dtsi b/arch/arm64/boot/dts/renesas/r8a779h0.dtsi
index facfff4b9cdc..d0c01c0fdda2 100644
--- a/arch/arm64/boot/dts/renesas/r8a779h0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779h0.dtsi
@@ -1900,6 +1900,50 @@
};
};
+ fcpvd0: fcp@fea10000 {
+ compatible = "renesas,fcpv";
+ reg = <0 0xfea10000 0 0x200>;
+ clocks = <&cpg CPG_MOD 508>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ resets = <&cpg 508>;
+ };
+
+ vspd0: vsp@fea20000 {
+ compatible = "renesas,vsp2";
+ reg = <0 0xfea20000 0 0x8000>;
+ interrupts = <GIC_SPI 546 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 830>;
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ resets = <&cpg 830>;
+ renesas,fcp = <&fcpvd0>;
+ };
+
+ du: display@feb00000 {
+ compatible = "renesas,du-r8a779h0";
+ reg = <0 0xfeb00000 0 0x40000>;
+ interrupts = <GIC_SPI 523 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 411>;
+ clock-names = "du.0";
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ resets = <&cpg 411>;
+ reset-names = "du.0";
+ renesas,vsps = <&vspd0 0>;
+
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ du_out_dsi0: endpoint {
+ remote-endpoint = <&dsi0_in>;
+ };
+ };
+ };
+ };
+
isp0: isp@fed00000 {
compatible = "renesas,r8a779h0-isp",
"renesas,rcar-gen4-isp";
@@ -2068,6 +2112,35 @@
};
};
+ dsi0: dsi-encoder@fed80000 {
+ compatible = "renesas,r8a779h0-dsi-csi2-tx";
+ reg = <0 0xfed80000 0 0x10000>;
+ clocks = <&cpg CPG_MOD 415>,
+ <&cpg CPG_CORE R8A779H0_CLK_DSIEXT>,
+ <&cpg CPG_CORE R8A779H0_CLK_DSIREF>;
+ clock-names = "fck", "dsi", "pll";
+ power-domains = <&sysc R8A779H0_PD_C4>;
+ resets = <&cpg 415>;
+
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi0_in: endpoint {
+ remote-endpoint = <&du_out_dsi0>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ };
+ };
+ };
+
prr: chipid@fff00044 {
compatible = "renesas,prr";
reg = <0 0xfff00044 0 4>;
diff --git a/arch/arm64/boot/dts/renesas/r9a08g045.dtsi b/arch/arm64/boot/dts/renesas/r9a08g045.dtsi
index be8a0a768c65..a9b98db9ef95 100644
--- a/arch/arm64/boot/dts/renesas/r9a08g045.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a08g045.dtsi
@@ -14,6 +14,20 @@
#address-cells = <2>;
#size-cells = <2>;
+ audio_clk1: audio1-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by boards that provide it. */
+ clock-frequency = <0>;
+ };
+
+ audio_clk2: audio2-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by boards that provide it. */
+ clock-frequency = <0>;
+ };
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -73,6 +87,96 @@
status = "disabled";
};
+ scif1: serial@1004bc00 {
+ compatible = "renesas,scif-r9a08g045", "renesas,scif-r9a07g044";
+ reg = <0 0x1004bc00 0 0x400>;
+ interrupts = <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "eri", "rxi", "txi",
+ "bri", "dri", "tei";
+ clocks = <&cpg CPG_MOD R9A08G045_SCIF1_CLK_PCK>;
+ clock-names = "fck";
+ power-domains = <&cpg>;
+ resets = <&cpg R9A08G045_SCIF1_RST_SYSTEM_N>;
+ status = "disabled";
+ };
+
+ scif2: serial@1004c000 {
+ compatible = "renesas,scif-r9a08g045", "renesas,scif-r9a07g044";
+ reg = <0 0x1004c000 0 0x400>;
+ interrupts = <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "eri", "rxi", "txi",
+ "bri", "dri", "tei";
+ clocks = <&cpg CPG_MOD R9A08G045_SCIF2_CLK_PCK>;
+ clock-names = "fck";
+ power-domains = <&cpg>;
+ resets = <&cpg R9A08G045_SCIF2_RST_SYSTEM_N>;
+ status = "disabled";
+ };
+
+ scif3: serial@1004c400 {
+ compatible = "renesas,scif-r9a08g045", "renesas,scif-r9a07g044";
+ reg = <0 0x1004c400 0 0x400>;
+ interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "eri", "rxi", "txi",
+ "bri", "dri", "tei";
+ clocks = <&cpg CPG_MOD R9A08G045_SCIF3_CLK_PCK>;
+ clock-names = "fck";
+ power-domains = <&cpg>;
+ resets = <&cpg R9A08G045_SCIF3_RST_SYSTEM_N>;
+ status = "disabled";
+ };
+
+ scif4: serial@1004c800 {
+ compatible = "renesas,scif-r9a08g045", "renesas,scif-r9a07g044";
+ reg = <0 0x1004c800 0 0x400>;
+ interrupts = <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "eri", "rxi", "txi",
+ "bri", "dri", "tei";
+ clocks = <&cpg CPG_MOD R9A08G045_SCIF4_CLK_PCK>;
+ clock-names = "fck";
+ power-domains = <&cpg>;
+ resets = <&cpg R9A08G045_SCIF4_RST_SYSTEM_N>;
+ status = "disabled";
+ };
+
+ scif5: serial@1004e000 {
+ compatible = "renesas,scif-r9a08g045", "renesas,scif-r9a07g044";
+ reg = <0 0x1004e000 0 0x400>;
+ interrupts = <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 348 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 349 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 349 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "eri", "rxi", "txi",
+ "bri", "dri", "tei";
+ clocks = <&cpg CPG_MOD R9A08G045_SCIF5_CLK_PCK>;
+ clock-names = "fck";
+ power-domains = <&cpg>;
+ resets = <&cpg R9A08G045_SCIF5_RST_SYSTEM_N>;
+ status = "disabled";
+ };
+
rtc: rtc@1004ec00 {
compatible = "renesas,r9a08g045-rtca3", "renesas,rz-rtca3";
reg = <0 0x1004ec00 0 0x400>;
@@ -87,6 +191,59 @@
status = "disabled";
};
+ adc: adc@10058000 {
+ compatible = "renesas,r9a08g045-adc";
+ reg = <0 0x10058000 0 0x1000>;
+ interrupts = <GIC_SPI 312 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&cpg CPG_MOD R9A08G045_ADC_ADCLK>,
+ <&cpg CPG_MOD R9A08G045_ADC_PCLK>;
+ clock-names = "adclk", "pclk";
+ resets = <&cpg R9A08G045_ADC_PRESETN>,
+ <&cpg R9A08G045_ADC_ADRST_N>;
+ reset-names = "presetn", "adrst-n";
+ power-domains = <&cpg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #io-channel-cells = <1>;
+ status = "disabled";
+
+ channel@0 {
+ reg = <0>;
+ };
+
+ channel@1 {
+ reg = <1>;
+ };
+
+ channel@2 {
+ reg = <2>;
+ };
+
+ channel@3 {
+ reg = <3>;
+ };
+
+ channel@4 {
+ reg = <4>;
+ };
+
+ channel@5 {
+ reg = <5>;
+ };
+
+ channel@6 {
+ reg = <6>;
+ };
+
+ channel@7 {
+ reg = <7>;
+ };
+
+ channel@8 {
+ reg = <8>;
+ };
+ };
+
vbattb: clock-controller@1005c000 {
compatible = "renesas,r9a08g045-vbattb";
reg = <0 0x1005c000 0 0x1000>;
@@ -187,6 +344,86 @@
status = "disabled";
};
+ ssi0: ssi@100a8000 {
+ compatible = "renesas,r9a08g045-ssi",
+ "renesas,rz-ssi";
+ reg = <0 0x100a8000 0 0x400>;
+ interrupts = <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 241 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 242 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "int_req", "dma_rx", "dma_tx";
+ clocks = <&cpg CPG_MOD R9A08G045_SSI0_PCLK2>,
+ <&cpg CPG_MOD R9A08G045_SSI0_PCLK_SFR>,
+ <&audio_clk1>, <&audio_clk2>;
+ clock-names = "ssi", "ssi_sfr", "audio_clk1", "audio_clk2";
+ resets = <&cpg R9A08G045_SSI0_RST_M2_REG>;
+ dmas = <&dmac 0x2665>, <&dmac 0x2666>;
+ dma-names = "tx", "rx";
+ power-domains = <&cpg>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ ssi1: ssi@100a8400 {
+ compatible = "renesas,r9a08g045-ssi",
+ "renesas,rz-ssi";
+ reg = <0 0x100a8400 0 0x400>;
+ interrupts = <GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 244 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 245 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "int_req", "dma_rx", "dma_tx";
+ clocks = <&cpg CPG_MOD R9A08G045_SSI1_PCLK2>,
+ <&cpg CPG_MOD R9A08G045_SSI1_PCLK_SFR>,
+ <&audio_clk1>, <&audio_clk2>;
+ clock-names = "ssi", "ssi_sfr", "audio_clk1", "audio_clk2";
+ resets = <&cpg R9A08G045_SSI1_RST_M2_REG>;
+ dmas = <&dmac 0x2669>, <&dmac 0x266a>;
+ dma-names = "tx", "rx";
+ power-domains = <&cpg>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ ssi2: ssi@100a8800 {
+ compatible = "renesas,r9a08g045-ssi",
+ "renesas,rz-ssi";
+ reg = <0 0x100a8800 0 0x400>;
+ interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 247 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 248 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "int_req", "dma_rx", "dma_tx";
+ clocks = <&cpg CPG_MOD R9A08G045_SSI2_PCLK2>,
+ <&cpg CPG_MOD R9A08G045_SSI2_PCLK_SFR>,
+ <&audio_clk1>, <&audio_clk2>;
+ clock-names = "ssi", "ssi_sfr", "audio_clk1", "audio_clk2";
+ resets = <&cpg R9A08G045_SSI2_RST_M2_REG>;
+ dmas = <&dmac 0x266d>, <&dmac 0x266e>;
+ dma-names = "tx", "rx";
+ power-domains = <&cpg>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ ssi3: ssi@100a8c00 {
+ compatible = "renesas,r9a08g045-ssi",
+ "renesas,rz-ssi";
+ reg = <0 0x100a8c00 0 0x400>;
+ interrupts = <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 250 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 251 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "int_req", "dma_rx", "dma_tx";
+ clocks = <&cpg CPG_MOD R9A08G045_SSI3_PCLK2>,
+ <&cpg CPG_MOD R9A08G045_SSI3_PCLK_SFR>,
+ <&audio_clk1>, <&audio_clk2>;
+ clock-names = "ssi", "ssi_sfr", "audio_clk1", "audio_clk2";
+ resets = <&cpg R9A08G045_SSI3_RST_M2_REG>;
+ dmas = <&dmac 0x2671>, <&dmac 0x2672>;
+ dma-names = "tx", "rx";
+ power-domains = <&cpg>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
cpg: clock-controller@11010000 {
compatible = "renesas,r9a08g045-cpg";
reg = <0 0x11010000 0 0x10000>;
diff --git a/arch/arm64/boot/dts/renesas/r9a09g047.dtsi b/arch/arm64/boot/dts/renesas/r9a09g047.dtsi
new file mode 100644
index 000000000000..200e9ea89193
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r9a09g047.dtsi
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Device Tree Source for the RZ/G3E SoC
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+#include <dt-bindings/clock/renesas,r9a09g047-cpg.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ compatible = "renesas,r9a09g047";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ audio_extal_clk: audio-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by the board */
+ clock-frequency = <0>;
+ };
+
+ /*
+ * The default cluster table is based on the assumption that the PLLCA55 clock
+ * frequency is set to 1.7GHz. The PLLCA55 clock frequency can be set to
+ * 1.7/1.6/1.5/1.1 GHz based on the BOOTPLLCA_0/1 pins (and additionally can be
+ * clocked to 1.8GHz as well). The table below should be overridden in the board
+ * DTS based on the PLLCA55 clock frequency.
+ */
+ cluster0_opp: opp-table-0 {
+ compatible = "operating-points-v2";
+
+ opp-1700000000 {
+ opp-hz = /bits/ 64 <1700000000>;
+ opp-microvolt = <900000>;
+ clock-latency-ns = <300000>;
+ };
+ opp-850000000 {
+ opp-hz = /bits/ 64 <850000000>;
+ opp-microvolt = <800000>;
+ clock-latency-ns = <300000>;
+ };
+ opp-425000000 {
+ opp-hz = /bits/ 64 <425000000>;
+ opp-microvolt = <800000>;
+ clock-latency-ns = <300000>;
+ };
+ opp-212500000 {
+ opp-hz = /bits/ 64 <212500000>;
+ opp-microvolt = <800000>;
+ clock-latency-ns = <300000>;
+ opp-suspend;
+ };
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ compatible = "arm,cortex-a55";
+ reg = <0>;
+ device_type = "cpu";
+ next-level-cache = <&L3_CA55>;
+ enable-method = "psci";
+ clocks = <&cpg CPG_CORE R9A09G047_CA55_0_CORECLK0>;
+ operating-points-v2 = <&cluster0_opp>;
+ };
+
+ cpu1: cpu@100 {
+ compatible = "arm,cortex-a55";
+ reg = <0x100>;
+ device_type = "cpu";
+ next-level-cache = <&L3_CA55>;
+ enable-method = "psci";
+ clocks = <&cpg CPG_CORE R9A09G047_CA55_0_CORECLK1>;
+ operating-points-v2 = <&cluster0_opp>;
+ };
+
+ cpu2: cpu@200 {
+ compatible = "arm,cortex-a55";
+ reg = <0x200>;
+ device_type = "cpu";
+ next-level-cache = <&L3_CA55>;
+ enable-method = "psci";
+ clocks = <&cpg CPG_CORE R9A09G047_CA55_0_CORECLK2>;
+ operating-points-v2 = <&cluster0_opp>;
+ };
+
+ cpu3: cpu@300 {
+ compatible = "arm,cortex-a55";
+ reg = <0x300>;
+ device_type = "cpu";
+ next-level-cache = <&L3_CA55>;
+ enable-method = "psci";
+ clocks = <&cpg CPG_CORE R9A09G047_CA55_0_CORECLK3>;
+ operating-points-v2 = <&cluster0_opp>;
+ };
+
+ L3_CA55: cache-controller-0 {
+ compatible = "cache";
+ cache-unified;
+ cache-size = <0x100000>;
+ cache-level = <3>;
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-1.0", "arm,psci-0.2";
+ method = "smc";
+ };
+
+ qextal_clk: qextal-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by the board */
+ clock-frequency = <0>;
+ };
+
+ rtxin_clk: rtxin-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by the board */
+ clock-frequency = <0>;
+ };
+
+ soc: soc {
+ compatible = "simple-bus";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ pinctrl: pinctrl@10410000 {
+ compatible = "renesas,r9a09g047-pinctrl";
+ reg = <0 0x10410000 0 0x10000>;
+ clocks = <&cpg CPG_CORE R9A09G047_IOTOP_0_SHCLK>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl 0 0 232>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ power-domains = <&cpg>;
+ resets = <&cpg 0xa5>, <&cpg 0xa6>;
+ };
+
+ cpg: clock-controller@10420000 {
+ compatible = "renesas,r9a09g047-cpg";
+ reg = <0 0x10420000 0 0x10000>;
+ clocks = <&audio_extal_clk>, <&rtxin_clk>, <&qextal_clk>;
+ clock-names = "audio_extal", "rtxin", "qextal";
+ #clock-cells = <2>;
+ #reset-cells = <1>;
+ #power-domain-cells = <0>;
+ };
+
+ scif0: serial@11c01400 {
+ compatible = "renesas,scif-r9a09g047", "renesas,scif-r9a09g057";
+ reg = <0 0x11c01400 0 0x400>;
+ interrupts = <GIC_SPI 529 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 532 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 533 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 530 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 534 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 531 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 535 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 536 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 537 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "eri", "rxi", "txi", "bri", "dri",
+ "tei", "tei-dri", "rxi-edge", "txi-edge";
+ clocks = <&cpg CPG_MOD 0x8f>;
+ clock-names = "fck";
+ power-domains = <&cpg>;
+ resets = <&cpg 0x95>;
+ status = "disabled";
+ };
+
+ i2c0: i2c@14400400 {
+ compatible = "renesas,riic-r9a09g047", "renesas,riic-r9a09g057";
+ reg = <0 0x14400400 0 0x400>;
+ interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 507 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 506 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ "naki", "ali", "tmoi";
+ clocks = <&cpg CPG_MOD 0x94>;
+ resets = <&cpg 0x98>;
+ power-domains = <&cpg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@14400800 {
+ compatible = "renesas,riic-r9a09g047", "renesas,riic-r9a09g057";
+ reg = <0 0x14400800 0 0x400>;
+ interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 509 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 508 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ "naki", "ali", "tmoi";
+ clocks = <&cpg CPG_MOD 0x95>;
+ resets = <&cpg 0x99>;
+ power-domains = <&cpg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@14400c00 {
+ compatible = "renesas,riic-r9a09g047", "renesas,riic-r9a09g057";
+ reg = <0 0x14400c00 0 0x400>;
+ interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 511 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 510 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ "naki", "ali", "tmoi";
+ clocks = <&cpg CPG_MOD 0x96>;
+ resets = <&cpg 0x9a>;
+ power-domains = <&cpg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@14401000 {
+ compatible = "renesas,riic-r9a09g047", "renesas,riic-r9a09g057";
+ reg = <0 0x14401000 0 0x400>;
+ interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 513 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 512 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ "naki", "ali", "tmoi";
+ clocks = <&cpg CPG_MOD 0x97>;
+ resets = <&cpg 0x9b>;
+ power-domains = <&cpg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@14401400 {
+ compatible = "renesas,riic-r9a09g047", "renesas,riic-r9a09g057";
+ reg = <0 0x14401400 0 0x400>;
+ interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 515 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 514 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ "naki", "ali", "tmoi";
+ clocks = <&cpg CPG_MOD 0x98>;
+ resets = <&cpg 0x9c>;
+ power-domains = <&cpg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c5: i2c@14401800 {
+ compatible = "renesas,riic-r9a09g047", "renesas,riic-r9a09g057";
+ reg = <0 0x14401800 0 0x400>;
+ interrupts = <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 517 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 516 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ "naki", "ali", "tmoi";
+ clocks = <&cpg CPG_MOD 0x99>;
+ resets = <&cpg 0x9d>;
+ power-domains = <&cpg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c6: i2c@14401c00 {
+ compatible = "renesas,riic-r9a09g047", "renesas,riic-r9a09g057";
+ reg = <0 0x14401c00 0 0x400>;
+ interrupts = <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 519 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 518 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ "naki", "ali", "tmoi";
+ clocks = <&cpg CPG_MOD 0x9a>;
+ resets = <&cpg 0x9e>;
+ power-domains = <&cpg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c7: i2c@14402000 {
+ compatible = "renesas,riic-r9a09g047", "renesas,riic-r9a09g057";
+ reg = <0 0x14402000 0 0x400>;
+ interrupts = <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 521 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 520 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ "naki", "ali", "tmoi";
+ clocks = <&cpg CPG_MOD 0x9b>;
+ resets = <&cpg 0x9f>;
+ power-domains = <&cpg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c8: i2c@11c01000 {
+ compatible = "renesas,riic-r9a09g047", "renesas,riic-r9a09g057";
+ reg = <0 0x11c01000 0 0x400>;
+ interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 523 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 522 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ "naki", "ali", "tmoi";
+ clocks = <&cpg CPG_MOD 0x93>;
+ resets = <&cpg 0xa0>;
+ power-domains = <&cpg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ gic: interrupt-controller@14900000 {
+ compatible = "arm,gic-v3";
+ reg = <0x0 0x14900000 0 0x20000>,
+ <0x0 0x14940000 0 0x80000>;
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys", "hyp-virt";
+ };
+};
diff --git a/arch/arm64/boot/dts/renesas/r9a09g047e37.dtsi b/arch/arm64/boot/dts/renesas/r9a09g047e37.dtsi
new file mode 100644
index 000000000000..e50d9159e832
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r9a09g047e37.dtsi
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Device Tree Source for the RZ/G3E R9A09G047E37 SoC specific parts
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+/dts-v1/;
+#include "r9a09g047.dtsi"
+
+/ {
+ compatible = "renesas,r9a09g047e37", "renesas,r9a09g047";
+
+ cpus {
+ /delete-node/ cpu@200;
+ /delete-node/ cpu@300;
+ };
+};
diff --git a/arch/arm64/boot/dts/renesas/r9a09g047e57-smarc.dts b/arch/arm64/boot/dts/renesas/r9a09g047e57-smarc.dts
new file mode 100644
index 000000000000..c063d47e2952
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r9a09g047e57-smarc.dts
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Device Tree Source for the RZ/G3E SMARC EVK board
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/pinctrl/renesas,r9a09g047-pinctrl.h>
+#include "r9a09g047e57.dtsi"
+#include "rzg3e-smarc-som.dtsi"
+#include "renesas-smarc2.dtsi"
+
+/ {
+ model = "Renesas SMARC EVK version 2 based on r9a09g047e57";
+ compatible = "renesas,smarc2-evk", "renesas,rzg3e-smarcm",
+ "renesas,r9a09g047e57", "renesas,r9a09g047";
+};
+
+&pinctrl {
+ scif_pins: scif {
+ pins = "SCIF_TXD", "SCIF_RXD";
+ renesas,output-impedance = <1>;
+ };
+};
+
+&scif0 {
+ pinctrl-0 = <&scif_pins>;
+ pinctrl-names = "default";
+};
diff --git a/arch/arm64/boot/dts/renesas/r9a09g047e57.dtsi b/arch/arm64/boot/dts/renesas/r9a09g047e57.dtsi
new file mode 100644
index 000000000000..98a5faebd47a
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r9a09g047e57.dtsi
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Device Tree Source for the RZ/G3E R9A09G047E57 SoC specific parts
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+/dts-v1/;
+#include "r9a09g047.dtsi"
+
+/ {
+ compatible = "renesas,r9a09g047e57", "renesas,r9a09g047";
+};
diff --git a/arch/arm64/boot/dts/renesas/r9a09g057h44-rzv2h-evk.dts b/arch/arm64/boot/dts/renesas/r9a09g057h44-rzv2h-evk.dts
index 4703da8e9cff..0b705c987b6c 100644
--- a/arch/arm64/boot/dts/renesas/r9a09g057h44-rzv2h-evk.dts
+++ b/arch/arm64/boot/dts/renesas/r9a09g057h44-rzv2h-evk.dts
@@ -7,7 +7,7 @@
/dts-v1/;
-#include <dt-bindings/pinctrl/rzg2l-pinctrl.h>
+#include <dt-bindings/pinctrl/renesas,r9a09g057-pinctrl.h>
#include <dt-bindings/gpio/gpio.h>
#include "r9a09g057.dtsi"
@@ -56,7 +56,7 @@
vqmmc_sdhi1: regulator-vccq-sdhi1 {
compatible = "regulator-gpio";
regulator-name = "SDHI1 VccQ";
- gpios = <&pinctrl RZG2L_GPIO(10, 2) GPIO_ACTIVE_HIGH>;
+ gpios = <&pinctrl RZV2H_GPIO(A, 2) GPIO_ACTIVE_HIGH>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
gpios-states = <0>;
@@ -158,38 +158,38 @@
&pinctrl {
i2c0_pins: i2c0 {
- pinmux = <RZG2L_PORT_PINMUX(3, 0, 1)>, /* I2C0_SDA */
- <RZG2L_PORT_PINMUX(3, 1, 1)>; /* I2C0_SCL */
+ pinmux = <RZV2H_PORT_PINMUX(3, 0, 1)>, /* I2C0_SDA */
+ <RZV2H_PORT_PINMUX(3, 1, 1)>; /* I2C0_SCL */
};
i2c1_pins: i2c1 {
- pinmux = <RZG2L_PORT_PINMUX(3, 2, 1)>, /* I2C1_SDA */
- <RZG2L_PORT_PINMUX(3, 3, 1)>; /* I2C1_SCL */
+ pinmux = <RZV2H_PORT_PINMUX(3, 2, 1)>, /* I2C1_SDA */
+ <RZV2H_PORT_PINMUX(3, 3, 1)>; /* I2C1_SCL */
};
i2c2_pins: i2c2 {
- pinmux = <RZG2L_PORT_PINMUX(2, 0, 4)>, /* I2C2_SDA */
- <RZG2L_PORT_PINMUX(2, 1, 4)>; /* I2C2_SCL */
+ pinmux = <RZV2H_PORT_PINMUX(2, 0, 4)>, /* I2C2_SDA */
+ <RZV2H_PORT_PINMUX(2, 1, 4)>; /* I2C2_SCL */
};
i2c3_pins: i2c3 {
- pinmux = <RZG2L_PORT_PINMUX(3, 6, 1)>, /* I2C3_SDA */
- <RZG2L_PORT_PINMUX(3, 7, 1)>; /* I2C3_SCL */
+ pinmux = <RZV2H_PORT_PINMUX(3, 6, 1)>, /* I2C3_SDA */
+ <RZV2H_PORT_PINMUX(3, 7, 1)>; /* I2C3_SCL */
};
i2c6_pins: i2c6 {
- pinmux = <RZG2L_PORT_PINMUX(4, 4, 1)>, /* I2C6_SDA */
- <RZG2L_PORT_PINMUX(4, 5, 1)>; /* I2C6_SCL */
+ pinmux = <RZV2H_PORT_PINMUX(4, 4, 1)>, /* I2C6_SDA */
+ <RZV2H_PORT_PINMUX(4, 5, 1)>; /* I2C6_SCL */
};
i2c7_pins: i2c7 {
- pinmux = <RZG2L_PORT_PINMUX(4, 6, 1)>, /* I2C7_SDA */
- <RZG2L_PORT_PINMUX(4, 7, 1)>; /* I2C7_SCL */
+ pinmux = <RZV2H_PORT_PINMUX(4, 6, 1)>, /* I2C7_SDA */
+ <RZV2H_PORT_PINMUX(4, 7, 1)>; /* I2C7_SCL */
};
i2c8_pins: i2c8 {
- pinmux = <RZG2L_PORT_PINMUX(0, 6, 1)>, /* I2C8_SDA */
- <RZG2L_PORT_PINMUX(0, 7, 1)>; /* I2C8_SCL */
+ pinmux = <RZV2H_PORT_PINMUX(0, 6, 1)>, /* I2C8_SDA */
+ <RZV2H_PORT_PINMUX(0, 7, 1)>; /* I2C8_SCL */
};
scif_pins: scif {
@@ -199,7 +199,7 @@
sd1-pwr-en-hog {
gpio-hog;
- gpios = <RZG2L_GPIO(10, 3) GPIO_ACTIVE_HIGH>;
+ gpios = <RZV2H_GPIO(A, 3) GPIO_ACTIVE_HIGH>;
output-high;
line-name = "sd1_pwr_en";
};
@@ -219,7 +219,7 @@
};
sd1_cd {
- pinmux = <RZG2L_PORT_PINMUX(9, 4, 14)>; /* SD1_CD */
+ pinmux = <RZV2H_PORT_PINMUX(9, 4, 14)>; /* SD1_CD */
};
};
};
diff --git a/arch/arm64/boot/dts/renesas/renesas-smarc2.dtsi b/arch/arm64/boot/dts/renesas/renesas-smarc2.dtsi
new file mode 100644
index 000000000000..e378d55e6e9b
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/renesas-smarc2.dtsi
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Device Tree Source for the RZ SMARC Carrier-II Board.
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+/ {
+ model = "Renesas RZ SMARC Carrier-II Board";
+ compatible = "renesas,smarc2-evk";
+
+ chosen {
+ bootargs = "ignore_loglevel";
+ stdout-path = "serial3:115200n8";
+ };
+
+ aliases {
+ serial3 = &scif0;
+ };
+};
+
+&scif0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/rzg3e-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg3e-smarc-som.dtsi
new file mode 100644
index 000000000000..6b583ae2ac52
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/rzg3e-smarc-som.dtsi
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Device Tree Source for the R9A09G047E57 SMARC SoM board.
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+/ {
+ compatible = "renesas,rzg3e-smarcm", "renesas,r9a09g047e57", "renesas,r9a09g047";
+
+ memory@48000000 {
+ device_type = "memory";
+ /* First 128MB is reserved for secure area. */
+ reg = <0x0 0x48000000 0x0 0xf8000000>;
+ };
+};
+
+&audio_extal_clk {
+ clock-frequency = <48000000>;
+};
+
+&qextal_clk {
+ clock-frequency = <24000000>;
+};
+
+&rtxin_clk {
+ clock-frequency = <32768>;
+};
diff --git a/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
index 2ed01d391554..ef12c1c462a7 100644
--- a/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
@@ -43,11 +43,6 @@
#endif
};
- chosen {
- bootargs = "ignore_loglevel";
- stdout-path = "serial0:115200n8";
- };
-
memory@48000000 {
device_type = "memory";
/* First 128MB is reserved for secure area. */
@@ -63,7 +58,6 @@
enable-active-high;
};
-#if SW_CONFIG2 == SW_ON
vccq_sdhi0: regulator1 {
compatible = "regulator-gpio";
regulator-name = "SDHI0 VccQ";
@@ -73,8 +67,8 @@
gpios-states = <1>;
states = <3300000 1>, <1800000 0>;
};
-#else
- reg_1p8v: regulator1 {
+
+ reg_1p8v: regulator2 {
compatible = "regulator-fixed";
regulator-name = "fixed-1.8V";
regulator-min-microvolt = <1800000>;
@@ -82,9 +76,17 @@
regulator-boot-on;
regulator-always-on;
};
-#endif
- vcc_sdhi2: regulator2 {
+ reg_3p3v: regulator3 {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-3.3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vcc_sdhi2: regulator4 {
compatible = "regulator-fixed";
regulator-name = "SDHI2 Vcc";
regulator-min-microvolt = <3300000>;
@@ -92,6 +94,16 @@
gpios = <&pinctrl RZG2L_GPIO(8, 1) GPIO_ACTIVE_HIGH>;
enable-active-high;
};
+
+ x3_clk: x3-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+};
+
+&adc {
+ status = "okay";
};
#if SW_CONFIG3 == SW_ON
@@ -152,6 +164,30 @@
&i2c1 {
status = "okay";
+
+ versa3: clock-generator@68 {
+ compatible = "renesas,5l35023";
+ reg = <0x68>;
+ clocks = <&x3_clk>;
+ #clock-cells = <1>;
+ assigned-clocks = <&versa3 0>,
+ <&versa3 1>,
+ <&versa3 2>,
+ <&versa3 3>,
+ <&versa3 4>,
+ <&versa3 5>;
+ assigned-clock-rates = <24000000>,
+ <12288000>,
+ <11289600>,
+ <25000000>,
+ <100000000>,
+ <100000000>;
+ renesas,settings = [
+ 80 00 11 19 4c 42 dc 2f 06 7d 20 1a 5f 1e f2 27
+ 00 40 00 00 00 00 00 00 06 0c 19 02 3f f0 90 86
+ a0 80 30 30 9c
+ ];
+ };
};
#if SW_CONFIG2 == SW_ON
diff --git a/arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi b/arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi
index 4509151344c4..81b4ffd1417d 100644
--- a/arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi
@@ -12,10 +12,15 @@
/ {
aliases {
i2c0 = &i2c0;
- serial0 = &scif0;
+ serial3 = &scif0;
mmc1 = &sdhi1;
};
+ chosen {
+ bootargs = "ignore_loglevel";
+ stdout-path = "serial3:115200n8";
+ };
+
keys {
compatible = "gpio-keys";
@@ -44,6 +49,23 @@
};
};
+ snd_rzg3s: sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,bitclock-master = <&cpu_dai>;
+ simple-audio-card,frame-master = <&cpu_dai>;
+ simple-audio-card,mclk-fs = <256>;
+
+ cpu_dai: simple-audio-card,cpu {
+ sound-dai = <&ssi3>;
+ };
+
+ codec_dai: simple-audio-card,codec {
+ sound-dai = <&da7212>;
+ clocks = <&versa3 1>;
+ };
+ };
+
vcc_sdhi1: regulator-vcc-sdhi1 {
compatible = "regulator-fixed";
regulator-name = "SDHI1 Vcc";
@@ -64,13 +86,56 @@
};
};
+&audio_clk2 {
+ clock-frequency = <12288000>;
+};
+
&i2c0 {
status = "okay";
clock-frequency = <1000000>;
+
+ da7212: codec@1a {
+ compatible = "dlg,da7212";
+ reg = <0x1a>;
+
+ clocks = <&versa3 1>;
+ clock-names = "mclk";
+
+ #sound-dai-cells = <0>;
+
+ dlg,micbias1-lvl = <2500>;
+ dlg,micbias2-lvl = <2500>;
+ dlg,dmic-data-sel = "lrise_rfall";
+ dlg,dmic-samplephase = "between_clkedge";
+ dlg,dmic-clkrate = <3000000>;
+
+ VDDA-supply = <&reg_1p8v>;
+ VDDSP-supply = <&reg_3p3v>;
+ VDDMIC-supply = <&reg_3p3v>;
+ VDDIO-supply = <&reg_1p8v>;
+ };
+};
+
+&i2c1 {
+ status = "okay";
+
+ clock-frequency = <400000>;
+
+ power-monitor@44 {
+ compatible = "renesas,isl28022";
+ reg = <0x44>;
+ shunt-resistor-micro-ohms = <8000>;
+ renesas,average-samples = <32>;
+ };
};
&pinctrl {
+ audio_clock_pins: audio-clock {
+ pins = "AUDIO_CLK1", "AUDIO_CLK2";
+ input-enable;
+ };
+
key-1-gpio-hog {
gpio-hog;
gpios = <RZG2L_GPIO(18, 0) GPIO_ACTIVE_LOW>;
@@ -128,6 +193,13 @@
pinmux = <RZG2L_PORT_PINMUX(0, 2, 1)>; /* SD1_CD */
};
};
+
+ ssi3_pins: ssi3 {
+ pinmux = <RZG2L_PORT_PINMUX(18, 2, 8)>, /* BCK */
+ <RZG2L_PORT_PINMUX(18, 3, 8)>, /* RCK */
+ <RZG2L_PORT_PINMUX(18, 4, 8)>, /* TXD */
+ <RZG2L_PORT_PINMUX(18, 5, 8)>; /* RXD */
+ };
};
&scif0 {
@@ -148,3 +220,12 @@
max-frequency = <125000000>;
status = "okay";
};
+
+&ssi3 {
+ clocks = <&cpg CPG_MOD R9A08G045_SSI3_PCLK2>,
+ <&cpg CPG_MOD R9A08G045_SSI3_PCLK_SFR>,
+ <&versa3 2>, <&audio_clk2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ssi3_pins>, <&audio_clock_pins>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
index 5c211ed83049..2a157d1efb3d 100644
--- a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
+++ b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
@@ -4,8 +4,24 @@
*
* Copyright (C) 2017 Renesas Electronics Corp.
* Copyright (C) 2017 Cogent Embedded, Inc.
+ *
+ * Sample Audio settings:
+ *
+ * > amixer set "DVC Out" 1%
+ * > amixer set "DVC In" 20%
+ *
+ * // if you use xxxx-mix+split.dtsi
+ * > amixer -D hw:1 set "pcm3168a DAC1" 50%
+ * > amixer -D hw:1 set "pcm3168a DAC2" 50%
+ * > amixer -D hw:1 set "pcm3168a DAC3" 50%
+ * > amixer -D hw:1 set "pcm3168a DAC4" 50%
+ *
+ * // else
+ * > amixer -D hw:1 set "DAC1" 50%
+ * > amixer -D hw:1 set "DAC2" 50%
+ * > amixer -D hw:1 set "DAC3" 50%
+ * > amixer -D hw:1 set "DAC4" 50%
*/
-
/ {
aliases {
serial1 = &hscif0;
diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi
index cb11abba7bef..0c58d816c375 100644
--- a/arch/arm64/boot/dts/renesas/ulcb.dtsi
+++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi
@@ -4,6 +4,11 @@
*
* Copyright (C) 2016 Renesas Electronics Corp.
* Copyright (C) 2016 Cogent Embedded, Inc.
+ *
+ * Sample Audio settings:
+ *
+ * > amixer set "DVC Out" 1%
+ * > amixer set "DVC In" 20%
*/
#include <dt-bindings/gpio/gpio.h>
diff --git a/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-ard-audio-da7212.dtso b/arch/arm64/boot/dts/renesas/white-hawk-ard-audio-da7212.dtso
index e6cf304c77ee..c27b9b3d4e5f 100644
--- a/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-ard-audio-da7212.dtso
+++ b/arch/arm64/boot/dts/renesas/white-hawk-ard-audio-da7212.dtso
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Device Tree Source for the White Hawk board with ARD-AUDIO-DA7212 Board
+ * Device Tree Source for White Hawk (Single) board with ARD-AUDIO-DA7212 board
*
* You can find and buy "ARD-AUDIO-DA7212" at Digi-Key
*
@@ -27,12 +27,12 @@
* +----------------------------+
* |Breakout board |
* | | +---------------+
- * |CN34 (I2C CN) | |J1 |
+ * |CN(30)34 (I2C CN) | |J1 |
* | I2C0_SCL pin3 |<----->| pin20 SCL |
* | I2C0_SDA pin5 |<----->| pin18 SDA |
* | | +---------------+
* | | +-----------------------+
- * |CN4 (Power) | |J7 |
+ * |CN(300)4 (Power) | |J7 |
* | 3v3 (v) pin9 |<----->| pin4 / pin8 3.3v |
* | GND (v) pin3 / pin4 |<----->| pin12 / pin14 GND |
* +----------------------------+ +-----------------------+
diff --git a/arch/arm64/boot/dts/renesas/white-hawk-csi-dsi.dtsi b/arch/arm64/boot/dts/renesas/white-hawk-csi-dsi.dtsi
index 3006b0a64f41..9017c4475a7c 100644
--- a/arch/arm64/boot/dts/renesas/white-hawk-csi-dsi.dtsi
+++ b/arch/arm64/boot/dts/renesas/white-hawk-csi-dsi.dtsi
@@ -21,6 +21,7 @@
bus-type = <MEDIA_BUS_TYPE_CSI2_CPHY>;
clock-lanes = <0>;
data-lanes = <1 2 3>;
+ line-orders = <0 3 0>;
remote-endpoint = <&max96712_out0>;
};
};
@@ -41,6 +42,7 @@
bus-type = <MEDIA_BUS_TYPE_CSI2_CPHY>;
clock-lanes = <0>;
data-lanes = <1 2 3>;
+ line-orders = <0 3 0>;
remote-endpoint = <&max96712_out1>;
};
};
diff --git a/arch/arm64/boot/dts/renesas/white-hawk-single.dtsi b/arch/arm64/boot/dts/renesas/white-hawk-single.dtsi
new file mode 100644
index 000000000000..20e8232f2f32
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/white-hawk-single.dtsi
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Device Tree Source for the White Hawk Single board
+ *
+ * Copyright (C) 2023-2024 Glider bv
+ */
+
+#include "white-hawk-cpu-common.dtsi"
+#include "white-hawk-common.dtsi"
+
+/ {
+ model = "Renesas White Hawk Single board";
+ compatible = "renesas,white-hawk-single";
+};
+
+&hscif0 {
+ uart-has-rtscts;
+};
+
+&hscif0_pins {
+ groups = "hscif0_data", "hscif0_ctrl";
+ function = "hscif0";
+};
+
+&pfc {
+ tsn0_pins: tsn0 {
+ mux {
+ groups = "tsn0_link", "tsn0_mdio", "tsn0_rgmii",
+ "tsn0_txcrefclk";
+ function = "tsn0";
+ };
+
+ link {
+ groups = "tsn0_link";
+ bias-disable;
+ };
+
+ mdio {
+ groups = "tsn0_mdio";
+ drive-strength = <24>;
+ bias-disable;
+ };
+
+ rgmii {
+ groups = "tsn0_rgmii";
+ drive-strength = <24>;
+ bias-disable;
+ };
+ };
+};
+
+&tsn0 {
+ pinctrl-0 = <&tsn0_pins>;
+ pinctrl-names = "default";
+ phy-mode = "rgmii";
+ phy-handle = <&phy3>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&gpio1 23 GPIO_ACTIVE_LOW>;
+ reset-post-delay-us = <4000>;
+
+ phy3: ethernet-phy@0 {
+ compatible = "ethernet-phy-id002b.0980",
+ "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ interrupts-extended = <&gpio4 3 IRQ_TYPE_LEVEL_LOW>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile
index 86cc418a2255..def1222c1907 100644
--- a/arch/arm64/boot/dts/rockchip/Makefile
+++ b/arch/arm64/boot/dts/rockchip/Makefile
@@ -111,6 +111,8 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-box-demo.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-lckfb-tspi.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-lubancat-1.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-nanopi-r3s.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-bigtreetech-cb2-manta.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-bigtreetech-pi2.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-bpi-r2-pro.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-evb1-v10.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-fastrhino-r66s.dtb
@@ -129,6 +131,8 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-wolfvision-pf5.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-wolfvision-pf5-display-vz.dtbo
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-wolfvision-pf5-io-expander.dtbo
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3576-armsom-sige5.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3576-evb1-v10.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3582-radxa-e52c.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-armsom-sige7.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-armsom-w3.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-coolpi-cm5-evb.dtb
@@ -137,11 +141,14 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-edgeble-neu6a-io.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-edgeble-neu6a-wifi.dtbo
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-edgeble-neu6b-io.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-evb1-v10.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-firefly-itx-3588j.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-friendlyelec-cm3588-nas.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-h96-max-v58.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-jaguar.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-nanopc-t6.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-nanopc-t6-lts.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-ok3588-c.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-orangepi-5-max.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-orangepi-5-plus.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-quartzpro64.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-rock-5-itx.dtb
diff --git a/arch/arm64/boot/dts/rockchip/rk3308-rock-s0.dts b/arch/arm64/boot/dts/rockchip/rk3308-rock-s0.dts
index bd6419a5c20a..8311af4c8689 100644
--- a/arch/arm64/boot/dts/rockchip/rk3308-rock-s0.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3308-rock-s0.dts
@@ -74,6 +74,23 @@
vin-supply = <&vcc5v0_sys>;
};
+ /*
+ * HW revision prior to v1.2 must pull GPIO4_D6 low to access sdmmc.
+ * This is modeled as an always-on active low fixed regulator.
+ */
+ vcc_sd: regulator-3v3-vcc-sd {
+ compatible = "regulator-fixed";
+ gpios = <&gpio4 RK_PD6 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc_2030>;
+ regulator-name = "vcc_sd";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_io>;
+ };
+
vcc5v0_sys: regulator-5v0-vcc-sys {
compatible = "regulator-fixed";
regulator-name = "vcc5v0_sys";
@@ -181,6 +198,12 @@
};
};
+ sdmmc {
+ sdmmc_2030: sdmmc-2030 {
+ rockchip,pins = <4 RK_PD6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
wifi {
wifi_reg_on: wifi-reg-on {
rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
@@ -233,7 +256,7 @@
cap-mmc-highspeed;
cap-sd-highspeed;
disable-wp;
- vmmc-supply = <&vcc_io>;
+ vmmc-supply = <&vcc_sd>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-a1.dts b/arch/arm64/boot/dts/rockchip/rk3328-a1.dts
index 8dfeaf1f8eb0..f7c4578865c5 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-a1.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-a1.dts
@@ -110,7 +110,6 @@
phy-supply = <&vcc_io>;
pinctrl-names = "default";
pinctrl-0 = <&rgmiim1_pins>;
- snps,aal;
snps,pbl = <0x4>;
tx_delay = <0x26>;
rx_delay = <0x11>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2.dtsi b/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2.dtsi
index 1715d311e1f2..691d17022afb 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2.dtsi
@@ -142,7 +142,6 @@
phy-supply = <&vcc_io_33>;
pinctrl-0 = <&rgmiim1_pins>;
pinctrl-names = "default";
- snps,aal;
mdio {
compatible = "snps,dwmac-mdio";
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus.dtsi b/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus.dtsi
index 82021ffb0a49..4f193704e5dc 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus.dtsi
@@ -113,7 +113,6 @@
phy-supply = <&vcc_io>;
pinctrl-0 = <&rgmiim1_pins>;
pinctrl-names = "default";
- snps,aal;
mdio {
compatible = "snps,dwmac-mdio";
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
index 425de197ddb8..6310b58de77f 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
@@ -153,9 +153,6 @@
phy-supply = <&vcc_io>;
pinctrl-names = "default";
pinctrl-0 = <&rgmiim1_pins>;
- snps,aal;
- snps,rxpbl = <0x4>;
- snps,txpbl = <0x4>;
tx_delay = <0x26>;
rx_delay = <0x11>;
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
index 745d3e996418..e550b6eeeff3 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
@@ -144,7 +144,6 @@
phy-mode = "rgmii";
pinctrl-names = "default";
pinctrl-0 = <&rgmiim1_pins>;
- snps,force_thresh_dma_mode;
snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>;
snps,reset-active-low;
snps,reset-delays-us = <0 10000 50000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
index d12e661dfd99..995b30a7aae0 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
@@ -182,7 +182,7 @@
snps,reset-active-low;
snps,reset-delays-us = <0 10000 50000>;
tx_delay = <0x10>;
- rx_delay = <0x10>;
+ rx_delay = <0x23>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-bigtreetech-cb2-manta.dts b/arch/arm64/boot/dts/rockchip/rk3566-bigtreetech-cb2-manta.dts
new file mode 100644
index 000000000000..97415d099d88
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3566-bigtreetech-cb2-manta.dts
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+
+#include "rk3566-bigtreetech-cb2.dtsi"
+
+/ {
+ model = "BigTreeTech CB2";
+ compatible = "bigtreetech,cb2-manta", "bigtreetech,cb2", "rockchip,rk3566";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-bigtreetech-cb2.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-bigtreetech-cb2.dtsi
new file mode 100644
index 000000000000..a48351471764
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3566-bigtreetech-cb2.dtsi
@@ -0,0 +1,904 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pwm/pwm.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
+#include <dt-bindings/leds/common.h>
+#include "rk3566.dtsi"
+
+/ {
+ aliases {
+ ethernet0 = &gmac1;
+ mmc0 = &sdhci;
+ mmc1 = &sdmmc0;
+ };
+
+ chosen: chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ ext_cam_clk: clock-25000000-cam {
+ compatible = "fixed-clock";
+ clock-frequency = <25000000>;
+ clock-output-names = "ext_cam_clk";
+ #clock-cells = <0>;
+ };
+
+ can_mcp2515_osc: clock-8000000-mcp2515 {
+ compatible = "fixed-clock";
+ clock-frequency = <8000000>;
+ #clock-cells = <0>;
+ };
+
+ hdmi-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con_in: endpoint {
+ remote-endpoint = <&hdmi_out_con>;
+ };
+ };
+ };
+
+ leds: leds {
+ compatible = "gpio-leds";
+
+ led-0 {
+ color = <LED_COLOR_ID_BLUE>;
+ function = LED_FUNCTION_POWER;
+ gpios = <&gpio4 RK_PA1 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "default-on";
+ pinctrl-names = "default";
+ pinctrl-0 =<&blue_led>;
+ };
+
+ led-1 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_HEARTBEAT;
+ gpios = <&gpio0 RK_PB7 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ pinctrl-names = "default";
+ pinctrl-0 =<&heartbeat_led>;
+ };
+ };
+
+ fan: pwm-fan {
+ compatible = "pwm-fan";
+ #cooling-cells = <2>;
+ cooling-levels = <0 50 100 150 200 255>;
+ pwms = <&pwm7 0 50000 0>;
+ };
+
+ rk809-sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,name = "Analog RK809";
+ simple-audio-card,mclk-fs = <256>;
+
+ simple-audio-card,cpu {
+ sound-dai = <&i2s1_8ch>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&rk809>;
+ };
+ };
+
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&rk809 1>;
+ clock-names = "ext_clock";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_enable_h>;
+ post-power-on-delay-ms = <200>;
+ reset-gpios = <&gpio0 RK_PD3 GPIO_ACTIVE_LOW>;
+ };
+
+ vbus: regulator-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "vbus";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vcc12v_dcin: regulator-vcc12v-dcin {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc12v_dcin";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ };
+
+ vcc3v3_pcie: regulator-vcc3v3-pcie {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_pcie";
+ enable-active-high;
+ gpios = <&gpio0 RK_PB1 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_drv>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc3v3_sys: regulator-vcc3v3-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vbus>;
+ };
+
+ vcc5v0_host: regulator-vcc5v0-host {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio0 RK_PB6 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_host_en>;
+ regulator-name = "vcc5v0_host3";
+ regulator-always-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vcc5v0_otg: regulator-vcc5v0-otg {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_otg_en>;
+ regulator-name = "vcc5v0_otg3";
+ regulator-always-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vcc5v0_sys: regulator-vcc5v0-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vbus>;
+ };
+
+ vcc5v0_usb: regulator-vcc5v0-usb {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_usb";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vbus>;
+ };
+
+ vcc5v0_usb2b: regulator-vcc5v0-usb2b {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 RK_PC4 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_usb2b_en>;
+ regulator-name = "vcc5v0_usb2b";
+ regulator-always-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vcc5v0_usb2t: regulator-vcc5v0-usb2t {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio0 RK_PD5 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_usb2t_en>;
+ regulator-name = "vcc5v0_usb2t";
+ regulator-always-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vcc_5v: regulator-vcc-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_5v";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc_sd: regulator-vcc-sd {
+ compatible = "regulator-fixed";
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "vcc_sd";
+ vin-supply = <&vcc3v3_sys>;
+ };
+};
+
+&combphy1 {
+ status = "okay";
+};
+
+&combphy2 {
+ status = "okay";
+};
+
+&cpu0 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu1 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu2 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu3 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&gmac1 {
+ assigned-clocks = <&cru SCLK_GMAC1_RX_TX>, <&cru SCLK_GMAC1>;
+ assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>, <&cru CLK_MAC1_2TOP>;
+ assigned-clock-rates = <0>, <125000000>;
+ clock_in_out = "input";
+ phy-handle = <&rgmii_phy0>;
+ phy-mode = "rgmii-id";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac1m0_miim
+ &gmac1m0_tx_bus2
+ &gmac1m0_rx_bus2
+ &gmac1m0_rgmii_clk
+ &gmac1m0_clkinout
+ &gmac1m0_rgmii_bus>;
+ status = "okay";
+};
+
+&mdio1 {
+ rgmii_phy0: phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reset-delay-us = <20000>;
+ reset-gpios = <&gpio0 RK_PC5 GPIO_ACTIVE_LOW>;
+ reset-post-delay-us = <100000>;
+ reg = <0x0>;
+ };
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu>;
+ status = "okay";
+};
+
+&hdmi {
+ avdd-0v9-supply = <&vdda0v9_image>;
+ avdd-1v8-supply = <&vcca1v8_image>;
+ status = "okay";
+};
+
+&hdmi_in {
+ hdmi_in_vp0: endpoint {
+ remote-endpoint = <&vp0_out_hdmi>;
+ };
+};
+
+&hdmi_out {
+ hdmi_out_con: endpoint {
+ remote-endpoint = <&hdmi_con_in>;
+ };
+};
+
+&hdmi_sound {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+
+ vdd_cpu: regulator@1c {
+ compatible = "tcs,tcs4525";
+ reg = <0x1c>;
+ regulator-name = "vdd_cpu";
+ regulator-min-microvolt = <712500>;
+ regulator-max-microvolt = <1390000>;
+ regulator-initial-mode = <1>;
+ regulator-ramp-delay = <2300>;
+ regulator-boot-on;
+ regulator-always-on;
+ vin-supply = <&vcc5v0_sys>;
+ fcs,suspend-voltage-selector = <1>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ rk809: pmic@20 {
+ compatible = "rockchip,rk809";
+ reg = <0x20>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ assigned-clocks = <&cru I2S1_MCLKOUT_TX>;
+ assigned-clock-parents = <&cru CLK_I2S1_8CH_TX>;
+ #clock-cells = <1>;
+ clock-names = "mclk";
+ clocks = <&cru I2S1_MCLKOUT_TX>;
+ pinctrl-names = "default", "pmic-sleep",
+ "pmic-power-off", "pmic-reset";
+ pinctrl-0 = <&pmic_int>, <&i2s1m0_mclk>;
+ #sound-dai-cells = <0>;
+ system-power-controller;
+ wakeup-source;
+
+ vcc1-supply = <&vcc3v3_sys>;
+ vcc2-supply = <&vcc3v3_sys>;
+ vcc3-supply = <&vcc3v3_sys>;
+ vcc4-supply = <&vcc3v3_sys>;
+ vcc5-supply = <&vcc3v3_sys>;
+ vcc6-supply = <&vcc3v3_sys>;
+ vcc7-supply = <&vcc3v3_sys>;
+ vcc8-supply = <&vcc3v3_sys>;
+ vcc9-supply = <&vcc3v3_sys>;
+
+ regulators {
+ vdd_logic: DCDC_REG1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+ regulator-initial-mode = <0x2>;
+ regulator-name = "vdd_logic";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_gpu: DCDC_REG2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+ regulator-initial-mode = <0x2>;
+ regulator-name = "vdd_gpu";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+ regulator-name = "vcc_ddr";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vdd_npu: DCDC_REG4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+ regulator-initial-mode = <0x2>;
+ regulator-name = "vdd_npu";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda0v9_image: LDO_REG1 {
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ regulator-name = "vdda0v9_image";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda_0v9: LDO_REG2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ regulator-name = "vdda_0v9";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda0v9_pmu: LDO_REG3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ regulator-name = "vdda0v9_pmu";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <900000>;
+ };
+ };
+
+ vccio_acodec: LDO_REG4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vccio_acodec";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd: LDO_REG5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vccio_sd";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_pmu: LDO_REG6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc3v3_pmu";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcca_1v8: LDO_REG7 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcca_1v8";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca1v8_pmu: LDO_REG8 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcca1v8_pmu";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcca1v8_image: LDO_REG9 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcca1v8_image";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8: DCDC_REG5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v3: SWITCH_REG1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vcc_3v3";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_sd: SWITCH_REG2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vcc3v3_sd";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+
+ codec {
+ rockchip,mic-in-differential;
+ };
+ };
+};
+
+&i2c2 {
+ pinctrl-0 = <&i2c2m1_xfer>;
+};
+
+&i2c3 {
+ status = "okay";
+
+ tft_tp: touchscreen@48 {
+ compatible = "ti,tsc2007";
+ reg = <0x48>;
+ status = "okay";
+ ti,x-plate-ohms = <660>;
+ ti,rt-thr = <3000>;
+ ti,fuzzx = <32>;
+ ti,fuzzy = <16>;
+ };
+};
+
+&i2s0_8ch {
+ status = "okay";
+};
+
+&i2s1_8ch {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s1m0_sclktx &i2s1m0_lrcktx &i2s1m0_sdi0 &i2s1m0_sdo0>;
+ rockchip,trcm-sync-tx-only;
+ status = "okay";
+};
+
+&spi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi1m1_cs0 &spi1m1_pins>;
+
+ can_mcp2515: can@0 {
+ compatible = "microchip,mcp2515";
+ reg = <0x00>;
+ clocks = <&can_mcp2515_osc>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcp2515_int_pin>;
+ spi-max-frequency = <10000000>;
+ vdd-supply = <&vcc3v3_sys>;
+ xceiver-supply = <&vcc3v3_sys>;
+ };
+};
+
+&spi3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi3m1_cs0 &spi3m1_pins>;
+};
+
+&pcie2x1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_reset_h>;
+ reset-gpios = <&gpio1 RK_PB2 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie>;
+ status = "okay";
+};
+
+&pinctrl {
+ bt {
+ bt_enable: bt-enable-h {
+ rockchip,pins = <2 RK_PB7 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
+ bt_host_wake: bt-host-wake-l {
+ rockchip,pins = <2 RK_PC1 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
+ bt_wake: bt-wake-l {
+ rockchip,pins = <2 RK_PC0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pcie {
+ pcie_drv: pcie-drv {
+ rockchip,pins = <4 RK_PB1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie_reset_h: pcie-reset-h {
+ rockchip,pins = <1 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pmic {
+ pmic_int: pmic-int {
+ rockchip,pins = <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ sdio-pwrseq {
+ wifi_enable_h: wifi-enable-h {
+ rockchip,pins = <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ wifi_host_wake: wifi-host-wake-l {
+ rockchip,pins = <2 RK_PB1 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+ };
+
+ usb {
+ vcc5v0_host_en: vcc5v0-host-en {
+ rockchip,pins = <0 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ vcc5v0_otg_en: vcc5v0-otg-en {
+ rockchip,pins = <0 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ vcc5v0_usb2t_en: vcc5v0-usb2t-en {
+ rockchip,pins = <3 RK_PD5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ vcc5v0_usb2b_en: vcc5v0-usb2b-en {
+ rockchip,pins = <4 RK_PC4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ work-led {
+ heartbeat_led: led-heartbeat {
+ rockchip,pins = <0 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ blue_led: led-blue {
+ rockchip,pins = <4 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ mcp2515 {
+ mcp2515_int_pin: mcp2515-int-pin {
+ rockchip,pins = <4 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+};
+
+&pmu_io_domains {
+ pmuio1-supply = <&vcc3v3_pmu>;
+ pmuio2-supply = <&vcc3v3_pmu>;
+ vccio1-supply = <&vcc_3v3>;
+ vccio2-supply = <&vcc_1v8>;
+ vccio3-supply = <&vccio_sd>;
+ vccio4-supply = <&vcc_1v8>;
+ vccio5-supply = <&vcc_3v3>;
+ vccio6-supply = <&vcc_3v3>;
+ vccio7-supply = <&vcc_3v3>;
+ status = "okay";
+};
+
+&pwm0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm0m1_pins>;
+};
+
+&pwm12 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm12m1_pins>;
+};
+
+&pwm13 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm13m1_pins>;
+};
+
+&pwm14 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm14m1_pins>;
+};
+
+&pwm15 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm15m1_pins>;
+};
+
+&saradc {
+ vref-supply = <&vcca_1v8>;
+ status = "okay";
+};
+
+&sdhci {
+ bus-width = <8>;
+ max-frequency = <200000000>;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd>;
+ status = "okay";
+};
+
+&sdmmc0 {
+ max-frequency = <150000000>;
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ disable-wp;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd &sdmmc0_det>;
+ vmmc-supply = <&vcc_sd>;
+ vqmmc-supply = <&vccio_sd>;
+ status = "okay";
+};
+
+&sdmmc1 {
+ /* WiFi & BT combo module AMPAK AP6256 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+ disable-wp;
+ keep-power-in-suspend;
+ max-frequency = <150000000>;
+ mmc-pwrseq = <&sdio_pwrseq>;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc1_bus4 &sdmmc1_cmd &sdmmc1_clk>;
+ rockchip,default-sample-phase = <90>;
+ status = "okay";
+
+ sdio-wifi@1 {
+ compatible = "brcm,bcm4329-fmac";
+ reg = <1>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <9 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host-wake";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_host_wake>;
+ brcm,drive-strength = <10>;
+ };
+};
+
+&sfc {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+};
+
+&tsadc {
+ status = "okay";
+};
+
+&uart1 {
+ dma-names = "tx","rx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1m0_xfer &uart1m0_ctsn &uart1m0_rtsn>;
+ uart-has-rtscts;
+ status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm4345c5";
+ clocks = <&rk809 1>;
+ clock-names = "lpo";
+ device-wakeup-gpios = <&gpio2 RK_PC0 GPIO_ACTIVE_HIGH>;
+ host-wakeup-gpios = <&gpio2 RK_PC1 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpio2 RK_PB7 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_host_wake &bt_wake &bt_enable>;
+ vbat-supply = <&vcc3v3_sys>;
+ vddio-supply = <&vcca1v8_pmu>;
+ };
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&uart5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart5m1_xfer>;
+};
+
+&uart7 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart7m2_xfer>;
+};
+
+&usb2phy0 {
+ status = "okay";
+};
+
+&usb2phy0_host {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+};
+
+&usb2phy0_otg {
+ phy-supply = <&vcc5v0_otg>;
+ status = "okay";
+};
+
+&usb2phy1 {
+ status = "okay";
+};
+
+&usb2phy1_host {
+ phy-supply = <&vcc5v0_usb2t>;
+ status = "okay";
+};
+
+&usb2phy1_otg {
+ phy-supply = <&vcc5v0_usb2b>;
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&usb_host0_xhci {
+ dr_mode = "host";
+ extcon = <&usb2phy0>;
+ status = "okay";
+};
+
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
+
+&usb_host1_xhci {
+ status = "okay";
+};
+
+&vop {
+ assigned-clocks = <&cru DCLK_VOP0>, <&cru DCLK_VOP1>;
+ assigned-clock-parents = <&pmucru PLL_HPLL>, <&cru PLL_VPLL>;
+ status = "okay";
+};
+
+&vop_mmu {
+ status = "okay";
+};
+
+&vp0 {
+ vp0_out_hdmi: endpoint@ROCKCHIP_VOP2_EP_HDMI0 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI0>;
+ remote-endpoint = <&hdmi_in_vp0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-bigtreetech-pi2.dts b/arch/arm64/boot/dts/rockchip/rk3566-bigtreetech-pi2.dts
new file mode 100644
index 000000000000..7cd444caa18b
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3566-bigtreetech-pi2.dts
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+
+#include "rk3566-bigtreetech-cb2.dtsi"
+
+/ {
+ model = "BigTreeTech Pi 2";
+ compatible = "bigtreetech,pi2", "rockchip,rk3566";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-mecsbc.dts b/arch/arm64/boot/dts/rockchip/rk3568-mecsbc.dts
index c491dc4d4947..b1f185a58902 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-mecsbc.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-mecsbc.dts
@@ -206,12 +206,6 @@
};
};
-&i2c2 {
- status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <&i2c2m0_xfer>;
-};
-
&i2c3 {
pinctrl-names = "default";
pinctrl-0 = <&i2c3m0_xfer>;
@@ -357,6 +351,19 @@
status = "okay";
};
+&spi0 {
+ /* use hardware chipselect on cs0 (cs1 unconnected) */
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi0m0_pins>, <&spi0m0_cs0>;
+ status = "okay";
+
+ fram@0 {
+ compatible = "fujitsu,mb85rs128ty";
+ reg = <0>;
+ spi-max-frequency = <33000000>;
+ };
+};
+
&tsadc {
rockchip,hw-tshut-mode = <1>;
rockchip,hw-tshut-polarity = <0>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-qnap-ts433.dts b/arch/arm64/boot/dts/rockchip/rk3568-qnap-ts433.dts
index e601d9271ba8..7bd32d230ad2 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-qnap-ts433.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-qnap-ts433.dts
@@ -50,6 +50,7 @@
color = <LED_COLOR_ID_GREEN>;
function = LED_FUNCTION_DISK;
gpios = <&gpio1 RK_PD5 GPIO_ACTIVE_LOW>;
+ label = "hdd1:green:disk";
linux,default-trigger = "disk-activity";
pinctrl-names = "default";
pinctrl-0 = <&hdd1_led_pin>;
@@ -59,6 +60,7 @@
color = <LED_COLOR_ID_GREEN>;
function = LED_FUNCTION_DISK;
gpios = <&gpio1 RK_PD6 GPIO_ACTIVE_LOW>;
+ label = "hdd2:green:disk";
linux,default-trigger = "disk-activity";
pinctrl-names = "default";
pinctrl-0 = <&hdd2_led_pin>;
@@ -68,6 +70,7 @@
color = <LED_COLOR_ID_GREEN>;
function = LED_FUNCTION_DISK;
gpios = <&gpio1 RK_PD7 GPIO_ACTIVE_LOW>;
+ label = "hdd3:green:disk";
linux,default-trigger = "disk-activity";
pinctrl-names = "default";
pinctrl-0 = <&hdd3_led_pin>;
@@ -77,6 +80,7 @@
color = <LED_COLOR_ID_GREEN>;
function = LED_FUNCTION_DISK;
gpios = <&gpio2 RK_PA0 GPIO_ACTIVE_LOW>;
+ label = "hdd4:green:disk";
linux,default-trigger = "disk-activity";
pinctrl-names = "default";
pinctrl-0 = <&hdd4_led_pin>;
@@ -483,6 +487,54 @@
};
};
+/*
+ * The MCU can provide system temperature too, but only by polling and of
+ * course also cannot set trip points. So attach to the cpu thermal-zone
+ * instead to control the fan.
+ */
+&cpu_thermal {
+ trips {
+ case_fan0: case-fan0 {
+ hysteresis = <2000>;
+ temperature = <35000>;
+ type = "active";
+ };
+
+ case_fan1: case-fan1 {
+ hysteresis = <2000>;
+ temperature = <45000>;
+ type = "active";
+ };
+
+ case_fan2: case-fan2 {
+ hysteresis = <2000>;
+ temperature = <65000>;
+ type = "active";
+ };
+ };
+
+ cooling-maps {
+ /*
+ * Always provide some air movement, due to small case
+ * full of harddrives.
+ */
+ map1 {
+ cooling-device = <&fan THERMAL_NO_LIMIT 1>;
+ trip = <&case_fan0>;
+ };
+
+ map2 {
+ cooling-device = <&fan 2 3>;
+ trip = <&case_fan1>;
+ };
+
+ map3 {
+ cooling-device = <&fan 4 THERMAL_NO_LIMIT>;
+ trip = <&case_fan2>;
+ };
+ };
+};
+
&pcie30phy {
data-lanes = <1 2>;
status = "okay";
@@ -582,6 +634,15 @@
*/
&uart0 {
status = "okay";
+
+ mcu {
+ compatible = "qnap,ts433-mcu";
+
+ fan: fan-0 {
+ #cooling-cells = <2>;
+ cooling-levels = <0 64 89 128 166 204 221 238>;
+ };
+ };
};
/*
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5.dts b/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5.dts
index e8243c908542..bb33fabae16e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5.dts
@@ -53,7 +53,7 @@
pdm_codec: pdm-codec {
compatible = "dmic-codec";
- num-channels = <1>;
+ num-channels = <2>;
#sound-dai-cells = <0>;
};
@@ -167,6 +167,10 @@
};
};
+&hdmi_sound {
+ status = "okay";
+};
+
&i2c0 {
status = "okay";
@@ -414,6 +418,10 @@
pinctrl-0 = <&i2c4m1_xfer>;
};
+&i2s0_8ch {
+ status = "okay";
+};
+
&pdm {
pinctrl-0 = <&pdmm0_clk
&pdmm0_sdi0>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3576-evb1-v10.dts b/arch/arm64/boot/dts/rockchip/rk3576-evb1-v10.dts
new file mode 100644
index 000000000000..782ca000a644
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3576-evb1-v10.dts
@@ -0,0 +1,731 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2024 Rockchip Electronics Co., Ltd.
+ *
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include "rk3576.dtsi"
+
+/ {
+ model = "Rockchip RK3576 EVB V10 Board";
+ compatible = "rockchip,rk3576-evb1-v10", "rockchip,rk3576";
+
+ aliases {
+ ethernet0 = &gmac0;
+ ethernet1 = &gmac1;
+ };
+
+ chosen: chosen {
+ stdout-path = "serial0:1500000n8";
+ };
+
+ adc_keys: adc-keys {
+ compatible = "adc-keys";
+ io-channels = <&saradc 1>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1800000>;
+ poll-interval = <100>;
+
+ button-back {
+ label = "back";
+ linux,code = <KEY_BACK>;
+ press-threshold-microvolt = <1235000>;
+ };
+
+ button-menu {
+ label = "menu";
+ linux,code = <KEY_MENU>;
+ press-threshold-microvolt = <890000>;
+ };
+
+ button-vol-down {
+ label = "volume down";
+ linux,code = <KEY_VOLUMEDOWN>;
+ press-threshold-microvolt = <417000>;
+ };
+
+ button-vol-up {
+ label = "volume up";
+ linux,code = <KEY_VOLUMEUP>;
+ press-threshold-microvolt = <17000>;
+ };
+ };
+
+ leds: leds {
+ compatible = "gpio-leds";
+
+ work_led: led-0 {
+ gpios = <&gpio0 RK_PB4 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ vbus5v0_typec: regulator-vbus5v0-typec {
+ compatible = "regulator-fixed";
+ regulator-name = "vbus5v0_typec";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ gpio = <&gpio0 RK_PD1 GPIO_ACTIVE_HIGH>;
+ vin-supply = <&vcc5v0_device>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_otg0_pwren>;
+ };
+
+ vcc12v_dcin: regulator-vcc12v-dcin {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc12v_dcin";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ };
+
+ vcc1v2_ufs_vccq_s0: regulator-vcc1v2-ufs-vccq-s0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc1v2_ufs_vccq_s0";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ vin-supply = <&vcc_sys>;
+ };
+
+ vcc1v8_ufs_vccq2_s0: regulator-vcc1v8-ufs-vccq2-s0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc1v8_ufs_vccq2_s0";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_1v8_s3>;
+ };
+
+ vcc3v3_lcd_n: regulator-vcc3v3-lcd0-n {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_lcd0_n";
+ regulator-boot-on;
+ enable-active-high;
+ gpio = <&gpio0 RK_PC6 GPIO_ACTIVE_HIGH>;
+ vin-supply = <&vcc_3v3_s0>;
+ };
+
+ vcc3v3_pcie0: regulator-vcc3v3-pcie0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_pcie1";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ enable-active-high;
+ gpios = <&gpio3 RK_PD4 GPIO_ACTIVE_HIGH>;
+ startup-delay-us = <5000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc3v3_rtc_s5: regulator-vcc3v3-rtc-s5 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_rtc_s5";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_sys>;
+ };
+
+ vcc5v0_device: regulator-vcc5v0-device {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_device";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc5v0_host: regulator-vcc5v0-host {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_host";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ gpio = <&gpio0 RK_PC7 GPIO_ACTIVE_HIGH>;
+ vin-supply = <&vcc5v0_device>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_host_pwren>;
+ };
+
+ vcc_sys: regulator-vcc5v0-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc_1v1_nldo_s3: regulator-vcc-1v1-nldo-s3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_1v1_nldo_s3";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ vin-supply = <&vcc_sys>;
+ };
+
+ vcc_1v8_s0: regulator-vcc-1v8-s0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_1v8_s0";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_1v8_s3>;
+ };
+
+ vcc_2v0_pldo_s3: regulator-vcc-2v0-pldo-s3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_2v0_pldo_s3";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+ vin-supply = <&vcc_sys>;
+ };
+
+ vcc_3v3_s0: regulator-vcc-3v3-s0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_3v3_s0";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_3v3_s3>;
+ };
+
+ vcc_ufs_s0: regulator-vcc-ufs-s0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_ufs_s0";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_sys>;
+ };
+};
+
+&cpu_l0 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_b0 {
+ cpu-supply = <&vdd_cpu_big_s0>;
+};
+
+&combphy1_psu {
+ status = "okay";
+};
+
+&gmac0 {
+ clock_in_out = "output";
+ phy-mode = "rgmii-rxid";
+ phy-handle = <&rgmii_phy0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&eth0m0_miim
+ &eth0m0_tx_bus2
+ &eth0m0_rx_bus2
+ &eth0m0_rgmii_clk
+ &eth0m0_rgmii_bus
+ &ethm0_clk0_25m_out>;
+ snps,reset-gpio = <&gpio2 RK_PB5 GPIO_ACTIVE_LOW>;
+ snps,reset-active-low;
+ snps,reset-delays-us = <0 20000 100000>;
+ tx_delay = <0x21>;
+ status = "okay";
+};
+
+&gmac1 {
+ clock_in_out = "output";
+ phy-handle = <&rgmii_phy1>;
+ phy-mode = "rgmii-rxid";
+ pinctrl-names = "default";
+ pinctrl-0 = <&eth1m0_miim
+ &eth1m0_tx_bus2
+ &eth1m0_rx_bus2
+ &eth1m0_rgmii_clk
+ &eth1m0_rgmii_bus
+ &ethm0_clk1_25m_out>;
+ snps,reset-gpio = <&gpio3 RK_PA3 GPIO_ACTIVE_LOW>;
+ snps,reset-active-low;
+ snps,reset-delays-us = <0 20000 100000>;
+ tx_delay = <0x20>;
+ status = "okay";
+};
+
+&i2c1 {
+ status = "okay";
+
+ rk806: pmic@23 {
+ compatible = "rockchip,rk806";
+ reg = <0x23>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
+ <&rk806_dvs2_null>, <&rk806_dvs3_null>;
+ system-power-controller;
+
+ vcc1-supply = <&vcc_sys>;
+ vcc2-supply = <&vcc_sys>;
+ vcc3-supply = <&vcc_sys>;
+ vcc4-supply = <&vcc_sys>;
+ vcc5-supply = <&vcc_sys>;
+ vcc6-supply = <&vcc_sys>;
+ vcc7-supply = <&vcc_sys>;
+ vcc8-supply = <&vcc_sys>;
+ vcc9-supply = <&vcc_sys>;
+ vcc10-supply = <&vcc_sys>;
+ vcc11-supply = <&vcc_2v0_pldo_s3>;
+ vcc12-supply = <&vcc_sys>;
+ vcc13-supply = <&vcc_1v1_nldo_s3>;
+ vcc14-supply = <&vcc_1v1_nldo_s3>;
+ vcca-supply = <&vcc_sys>;
+
+ rk806_dvs1_null: dvs1-null-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs2_null: dvs2-null-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs3_null: dvs3-null-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs1_slp: dvs1-slp-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun1";
+ };
+
+ rk806_dvs1_pwrdn: dvs1-pwrdn-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun2";
+ };
+
+ rk806_dvs1_rst: dvs1-rst-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun3";
+ };
+
+ rk806_dvs2_slp: dvs2-slp-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun1";
+ };
+
+ rk806_dvs2_pwrdn: dvs2-pwrdn-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun2";
+ };
+
+ rk806_dvs2_rst: dvs2-rst-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun3";
+ };
+
+ rk806_dvs2_dvs: dvs2-dvs-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun4";
+ };
+
+ rk806_dvs2_gpio: dvs2-gpio-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun5";
+ };
+
+ rk806_dvs3_slp: dvs3-slp-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun1";
+ };
+
+ rk806_dvs3_pwrdn: dvs3-pwrdn-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun2";
+ };
+
+ rk806_dvs3_rst: dvs3-rst-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun3";
+ };
+
+ rk806_dvs3_dvs: dvs3-dvs-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun4";
+ };
+
+ rk806_dvs3_gpio: dvs3-gpio-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun5";
+ };
+
+ regulators {
+ vdd_cpu_big_s0: dcdc-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_cpu_big_s0";
+ regulator-enable-ramp-delay = <400>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_npu_s0: dcdc-reg2 {
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_npu_s0";
+ regulator-enable-ramp-delay = <400>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_lit_s0: dcdc-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_cpu_lit_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vcc_3v3_s3: dcdc-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_3v3_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vdd_gpu_s0: dcdc-reg5 {
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <900000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_gpu_s0";
+ regulator-enable-ramp-delay = <400>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ vddq_ddr_s0: dcdc-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vddq_ddr_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_logic_s0: dcdc-reg7 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <800000>;
+ regulator-name = "vdd_logic_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s3: dcdc-reg8 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd2_ddr_s3: dcdc-reg9 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vdd2_ddr_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vdd_ddr_s0: dcdc-reg10 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "vdd_ddr_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca_1v8_s0: pldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcca_1v8_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca1v8_pldo2_s0: pldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcca1v8_pldo2_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda_1v2_s0: pldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "vdda_1v2_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca_3v3_s0: pldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcca_3v3_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd_s0: pldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vccio_sd_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca1v8_pldo6_s3: pldo-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcca1v8_pldo6_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_0v75_s3: nldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdd_0v75_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdda_ddr_pll_s0: nldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "vdda_ddr_pll_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda0v75_hdmi_s0: nldo-reg3 {
+ regulator-boot-on;
+ regulator-min-microvolt = <837500>;
+ regulator-max-microvolt = <837500>;
+ regulator-name = "vdda0v75_hdmi_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda_0v85_s0: nldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "vdda_0v85_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda_0v75_s0: nldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdda_0v75_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+&mdio0 {
+ rgmii_phy0: phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0x1>;
+ clocks = <&cru REFCLKO25M_GMAC0_OUT>;
+ };
+};
+
+&mdio1 {
+ rgmii_phy1: phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0x1>;
+ clocks = <&cru REFCLKO25M_GMAC1_OUT>;
+ };
+};
+
+&pinctrl {
+ usb {
+ usb_host_pwren: usb-host-pwren {
+ rockchip,pins = <0 RK_PC7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ usb_otg0_pwren: usb-otg0-pwren {
+ rockchip,pins = <0 RK_PD1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ usbc0_int: usbc0-int {
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+};
+
+&sdmmc {
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ disable-wp;
+ max-frequency = <200000000>;
+ no-sdio;
+ no-mmc;
+ sd-uhs-sdr104;
+ vqmmc-supply = <&vccio_sd_s0>;
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&vcca_1v8_s0>;
+ status = "okay";
+};
+
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy0_otg {
+ phy-supply = <&vbus5v0_typec>;
+ status = "okay";
+};
+
+&u2phy1 {
+ status = "okay";
+};
+
+&u2phy1_otg {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&usbdp_phy {
+ rockchip,dp-lane-mux = <2 3>;
+ status = "okay";
+};
+
+&usb_drd0_dwc3 {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usb_drd1_dwc3 {
+ dr_mode = "host";
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3576.dtsi b/arch/arm64/boot/dts/rockchip/rk3576.dtsi
index 436232ffe4d1..4dde954043ef 100644
--- a/arch/arm64/boot/dts/rockchip/rk3576.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3576.dtsi
@@ -445,6 +445,58 @@
#size-cells = <2>;
ranges;
+ usb_drd0_dwc3: usb@23000000 {
+ compatible = "rockchip,rk3576-dwc3", "snps,dwc3";
+ reg = <0x0 0x23000000 0x0 0x400000>;
+ clocks = <&cru CLK_REF_USB3OTG0>,
+ <&cru CLK_SUSPEND_USB3OTG0>,
+ <&cru ACLK_USB3OTG0>;
+ clock-names = "ref_clk", "suspend_clk", "bus_clk";
+ interrupts = <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&power RK3576_PD_USB>;
+ resets = <&cru SRST_A_USB3OTG0>;
+ dr_mode = "otg";
+ phys = <&u2phy0_otg>, <&usbdp_phy PHY_TYPE_USB3>;
+ phy-names = "usb2-phy", "usb3-phy";
+ phy_type = "utmi_wide";
+ snps,dis_enblslpm_quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
+ snps,dis-u2-freeclk-exists-quirk;
+ snps,dis-del-phy-power-chg-quirk;
+ snps,dis-tx-ipgap-linecheck-quirk;
+ snps,parkmode-disable-hs-quirk;
+ snps,parkmode-disable-ss-quirk;
+ status = "disabled";
+ };
+
+ usb_drd1_dwc3: usb@23400000 {
+ compatible = "rockchip,rk3576-dwc3", "snps,dwc3";
+ reg = <0x0 0x23400000 0x0 0x400000>;
+ clocks = <&cru CLK_REF_USB3OTG1>,
+ <&cru CLK_SUSPEND_USB3OTG1>,
+ <&cru ACLK_USB3OTG1>;
+ clock-names = "ref_clk", "suspend_clk", "bus_clk";
+ interrupts = <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&power RK3576_PD_PHP>;
+ resets = <&cru SRST_A_USB3OTG1>;
+ dr_mode = "otg";
+ phys = <&u2phy1_otg>, <&combphy1_psu PHY_TYPE_USB3>;
+ phy-names = "usb2-phy", "usb3-phy";
+ phy_type = "utmi_wide";
+ snps,dis_enblslpm_quirk;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
+ snps,dis-u2-freeclk-exists-quirk;
+ snps,dis-del-phy-power-chg-quirk;
+ snps,dis-tx-ipgap-linecheck-quirk;
+ snps,dis_rxdet_inp3_quirk;
+ snps,parkmode-disable-hs-quirk;
+ snps,parkmode-disable-ss-quirk;
+ dma-coherent;
+ status = "disabled";
+ };
+
sys_grf: syscon@2600a000 {
compatible = "rockchip,rk3576-sys-grf", "syscon";
reg = <0x0 0x2600a000 0x0 0x2000>;
@@ -515,6 +567,65 @@
reg = <0x0 0x2602c000 0x0 0x2000>;
};
+ usb2phy_grf: syscon@2602e000 {
+ compatible = "rockchip,rk3576-usb2phy-grf", "syscon", "simple-mfd";
+ reg = <0x0 0x2602e000 0x0 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ u2phy0: usb2-phy@0 {
+ compatible = "rockchip,rk3576-usb2phy";
+ reg = <0x0 0x10>;
+ resets = <&cru SRST_OTGPHY_0>, <&cru SRST_P_USBPHY_GRF_0>;
+ reset-names = "phy", "apb";
+ clocks = <&cru CLK_PHY_REF_SRC>,
+ <&cru ACLK_MMU2>,
+ <&cru ACLK_SLV_MMU2>;
+ clock-names = "phyclk", "aclk", "aclk_slv";
+ clock-output-names = "usb480m_phy0";
+ #clock-cells = <0>;
+ status = "disabled";
+
+ u2phy0_otg: otg-port {
+ #phy-cells = <0>;
+ interrupts = <GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "otg-bvalid", "otg-id", "linestate";
+ status = "disabled";
+ };
+ };
+
+ u2phy1: usb2-phy@2000 {
+ compatible = "rockchip,rk3576-usb2phy";
+ reg = <0x2000 0x10>;
+ resets = <&cru SRST_OTGPHY_1>, <&cru SRST_P_USBPHY_GRF_1>;
+ reset-names = "phy", "apb";
+ clocks = <&cru CLK_PHY_REF_SRC>,
+ <&cru ACLK_MMU1>,
+ <&cru ACLK_SLV_MMU1>;
+ clock-names = "phyclk", "aclk", "aclk_slv";
+ clock-output-names = "usb480m_phy1";
+ #clock-cells = <0>;
+ status = "disabled";
+
+ u2phy1_otg: otg-port {
+ #phy-cells = <0>;
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "otg-bvalid", "otg-id", "linestate";
+ status = "disabled";
+ };
+ };
+ };
+
+ vo1_grf: syscon@26036000 {
+ compatible = "rockchip,rk3576-vo1-grf", "syscon";
+ reg = <0x0 0x26036000 0x0 0x100>;
+ clocks = <&cru PCLK_VO1_ROOT>;
+ };
+
sdgmac_grf: syscon@26038000 {
compatible = "rockchip,rk3576-sdgmac-grf", "syscon";
reg = <0x0 0x26038000 0x0 0x1000>;
@@ -1587,6 +1698,64 @@
status = "disabled";
};
+ combphy0_ps: phy@2b050000 {
+ compatible = "rockchip,rk3576-naneng-combphy";
+ reg = <0x0 0x2b050000 0x0 0x100>;
+ #phy-cells = <1>;
+ clocks = <&cru CLK_REF_PCIE0_PHY>,
+ <&cru PCLK_PCIE2_COMBOPHY0>,
+ <&cru PCLK_PCIE0>;
+ clock-names = "ref", "apb", "pipe";
+ assigned-clocks = <&cru CLK_REF_PCIE0_PHY>;
+ assigned-clock-rates = <100000000>;
+ resets = <&cru SRST_PCIE0_PIPE_PHY>,
+ <&cru SRST_P_PCIE2_COMBOPHY0>;
+ reset-names = "phy", "apb";
+ rockchip,pipe-grf = <&php_grf>;
+ rockchip,pipe-phy-grf = <&pipe_phy0_grf>;
+ status = "disabled";
+ };
+
+ combphy1_psu: phy@2b060000 {
+ compatible = "rockchip,rk3576-naneng-combphy";
+ reg = <0x0 0x2b060000 0x0 0x100>;
+ #phy-cells = <1>;
+ clocks = <&cru CLK_REF_PCIE1_PHY>,
+ <&cru PCLK_PCIE2_COMBOPHY1>,
+ <&cru PCLK_PCIE1>;
+ clock-names = "ref", "apb", "pipe";
+ assigned-clocks = <&cru CLK_REF_PCIE1_PHY>;
+ assigned-clock-rates = <100000000>;
+ resets = <&cru SRST_PCIE1_PIPE_PHY>,
+ <&cru SRST_P_PCIE2_COMBOPHY1>;
+ reset-names = "phy", "apb";
+ rockchip,pipe-grf = <&php_grf>;
+ rockchip,pipe-phy-grf = <&pipe_phy1_grf>;
+ status = "disabled";
+ };
+
+ usbdp_phy: phy@2b010000 {
+ compatible = "rockchip,rk3576-usbdp-phy";
+ reg = <0x0 0x2b010000 0x0 0x10000>;
+ #phy-cells = <1>;
+ clocks = <&cru CLK_PHY_REF_SRC >,
+ <&cru CLK_USBDP_COMBO_PHY_IMMORTAL>,
+ <&cru PCLK_USBDPPHY>,
+ <&u2phy0>;
+ clock-names = "refclk", "immortal", "pclk", "utmi";
+ resets = <&cru SRST_USBDP_COMBO_PHY_INIT>,
+ <&cru SRST_USBDP_COMBO_PHY_CMN>,
+ <&cru SRST_USBDP_COMBO_PHY_LANE>,
+ <&cru SRST_USBDP_COMBO_PHY_PCS>,
+ <&cru SRST_P_USBDPPHY>;
+ reset-names = "init", "cmn", "lane", "pcs_apb", "pma_apb";
+ rockchip,u2phy-grf = <&usb2phy_grf>;
+ rockchip,usb-grf = <&usb_grf>;
+ rockchip,usbdpphy-grf = <&usbdpphy_grf>;
+ rockchip,vo-grf = <&vo1_grf>;
+ status = "disabled";
+ };
+
sram: sram@3ff88000 {
compatible = "mmio-sram";
reg = <0x0 0x3ff88000 0x0 0x78000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3582-radxa-e52c.dts b/arch/arm64/boot/dts/rockchip/rk3582-radxa-e52c.dts
new file mode 100644
index 000000000000..e04f21d8c831
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3582-radxa-e52c.dts
@@ -0,0 +1,743 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2024 Radxa Computer (Shenzhen) Co., Ltd.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/pwm/pwm.h>
+#include "rk3588s.dtsi"
+
+/ {
+ model = "Radxa E52C";
+ compatible = "radxa,e52c", "rockchip,rk3582", "rockchip,rk3588s";
+
+ aliases {
+ mmc0 = &sdhci;
+ mmc1 = &sdmmc;
+ };
+
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ keys-0 {
+ compatible = "adc-keys";
+ io-channels = <&saradc 0>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <18000>;
+ poll-interval = <100>;
+
+ button-0 {
+ label = "Maskrom";
+ linux,code = <KEY_VENDOR>;
+ press-threshold-microvolt = <0>;
+ };
+ };
+
+ keys-1 {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&btn_0>;
+
+ button-1 {
+ label = "User";
+ gpios = <&gpio4 RK_PB3 GPIO_ACTIVE_LOW>;
+ linux,code = <BTN_0>;
+ wakeup-source;
+ };
+ };
+
+ leds-0 {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_0>;
+
+ led-0 {
+ color = <LED_COLOR_ID_GREEN>;
+ default-state = "on";
+ function = LED_FUNCTION_STATUS;
+ gpios = <&gpio3 RK_PC4 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ leds-1 {
+ compatible = "pwm-leds";
+
+ led-1 {
+ color = <LED_COLOR_ID_GREEN>;
+ default-state = "on";
+ function = LED_FUNCTION_LAN;
+ linux,default-trigger = "netdev";
+ pwms = <&pwm14 0 1000000 PWM_POLARITY_INVERTED>;
+ max-brightness = <255>;
+ };
+
+ led-2 {
+ color = <LED_COLOR_ID_GREEN>;
+ default-state = "on";
+ function = LED_FUNCTION_WAN;
+ linux,default-trigger = "netdev";
+ pwms = <&pwm11 0 1000000 PWM_POLARITY_INVERTED>;
+ max-brightness = <255>;
+ };
+ };
+
+ vcc_1v1_nldo_s3: regulator-1v1 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_1v1_nldo_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ vin-supply = <&vcc_sysin>;
+ };
+
+ vcc_3v3_pmu: regulator-3v3-0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_3v3_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_3v3_s3>;
+ };
+
+ vcc_3v3_s0: regulator-3v3-1 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_3v3_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_3v3_s3>;
+ };
+
+ vcca: regulator-4v0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcca";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <4000000>;
+ regulator-max-microvolt = <4000000>;
+ vin-supply = <&vcc_sysin>;
+ };
+
+ vcc5v0_usb_otg0: regulator-5v0-0 {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 RK_PD4 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_otg_pwren_h>;
+ regulator-name = "vcc5v0_usb_otg0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc_sysin>;
+ };
+
+ vcc_5v0: regulator-5v0-1 {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc_5v0_pwren_h>;
+ regulator-name = "vcc_5v0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc_sysin>;
+ };
+
+ vcc_sysin: regulator-5v0-2 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_sysin";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+};
+
+&combphy0_ps {
+ status = "okay";
+};
+
+&combphy2_psu {
+ status = "okay";
+};
+
+/*
+ * In the Rockchip RK3582 SoC, some CPU cores end up disabled
+ * and unused because they're marked in the efuses as defective.
+ * The disabling in the DT is performed by the boot loader.
+ */
+&cpu_b0 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b1 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b2 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&cpu_b3 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&cpu_l0 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l1 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l2 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l3 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0m2_xfer>;
+ status = "okay";
+
+ vdd_cpu_big0_s0: regulator@42 {
+ compatible = "rockchip,rk8602";
+ reg = <0x42>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu_big0_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc_sysin>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_big1_s0: regulator@43 {
+ compatible = "rockchip,rk8603", "rockchip,rk8602";
+ reg = <0x43>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu_big1_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc_sysin>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ eeprom@50 {
+ compatible = "belling,bl24c16a", "atmel,24c16";
+ reg = <0x50>;
+ pagesize = <16>;
+ vcc-supply = <&vcc_3v3_pmu>;
+ };
+};
+
+&i2c2 {
+ status = "okay";
+
+ vdd_npu_s0: regulator@42 {
+ compatible = "rockchip,rk8602";
+ reg = <0x42>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_npu_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc_sysin>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&i2c5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c5m2_xfer>;
+ status = "okay";
+
+ rtc@51 {
+ compatible = "haoyu,hym8563";
+ reg = <0x51>;
+ #clock-cells = <0>;
+ clock-output-names = "rtcic_32kout";
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PB0 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&rtc_int_l>;
+ wakeup-source;
+ };
+};
+
+&pcie2x1l1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie20x1_1_perstn_m1>;
+ reset-gpios = <&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc_3v3_s3>;
+ status = "okay";
+};
+
+&pcie2x1l2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie20x1_2_perstn_m0>;
+ reset-gpios = <&gpio3 RK_PD1 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc_3v3_s3>;
+ status = "okay";
+};
+
+&pinctrl {
+ keys {
+ btn_0: button-0 {
+ rockchip,pins = <4 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ leds {
+ led_0: led-0 {
+ rockchip,pins = <3 RK_PC4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pcie {
+ pcie20x1_1_perstn_m1: pcie-1 {
+ rockchip,pins = <4 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie20x1_2_perstn_m0: pcie-2 {
+ rockchip,pins = <3 RK_PD1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ regulators {
+ vcc_5v0_pwren_h: regulator-5v0-1 {
+ rockchip,pins = <4 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ rtc {
+ rtc_int_l: rtc-0 {
+ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb {
+ usb_otg_pwren_h: regulator-5v0-0 {
+ rockchip,pins = <0 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&pwm11 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm11m1_pins>;
+ status = "okay";
+};
+
+&pwm14 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm14m1_pins>;
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&vcca_1v8_s0>;
+ status = "okay";
+};
+
+&sdhci {
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+ no-sd;
+ no-sdio;
+ non-removable;
+ vmmc-supply = <&vcc_3v3_s0>;
+ vqmmc-supply = <&vcc_1v8_s3>;
+ status = "okay";
+};
+
+&sdmmc {
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>;
+ disable-wp;
+ no-sdio;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc_3v3_s3>;
+ vqmmc-supply = <&vccio_sd_s0>;
+ status = "okay";
+};
+
+&spi2 {
+ status = "okay";
+ assigned-clocks = <&cru CLK_SPI2>;
+ assigned-clock-rates = <200000000>;
+ num-cs = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2m2_cs0 &spi2m2_pins>;
+
+ pmic@0 {
+ compatible = "rockchip,rk806";
+ reg = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
+ <&rk806_dvs2_null>, <&rk806_dvs3_null>;
+ spi-max-frequency = <1000000>;
+ system-power-controller;
+
+ vcc1-supply = <&vcc_sysin>;
+ vcc2-supply = <&vcc_sysin>;
+ vcc3-supply = <&vcc_sysin>;
+ vcc4-supply = <&vcc_sysin>;
+ vcc5-supply = <&vcc_sysin>;
+ vcc6-supply = <&vcc_sysin>;
+ vcc7-supply = <&vcc_sysin>;
+ vcc8-supply = <&vcc_sysin>;
+ vcc9-supply = <&vcc_sysin>;
+ vcc10-supply = <&vcc_sysin>;
+ vcc11-supply = <&vcc_2v0_pldo_s3>;
+ vcc12-supply = <&vcc_sysin>;
+ vcc13-supply = <&vcc_1v1_nldo_s3>;
+ vcc14-supply = <&vcc_1v1_nldo_s3>;
+ vcca-supply = <&vcca>;
+
+ rk806_dvs1_null: dvs1-null-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs2_null: dvs2-null-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs3_null: dvs3-null-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun0";
+ };
+
+ regulators {
+ vdd_gpu_s0: dcdc-reg1 {
+ regulator-name = "vdd_gpu_s0";
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <400>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_lit_s0: dcdc-reg2 {
+ regulator-name = "vdd_cpu_lit_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_logic_s0: dcdc-reg3 {
+ regulator-name = "vdd_logic_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <750000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_vdenc_s0: dcdc-reg4 {
+ regulator-name = "vdd_vdenc_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_ddr_s0: dcdc-reg5 {
+ regulator-name = "vdd_ddr_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <900000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ vdd2_ddr_s3: dcdc-reg6 {
+ regulator-name = "vdd2_ddr_s3";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_2v0_pldo_s3: dcdc-reg7 {
+ regulator-name = "vcc_2v0_pldo_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <2000000>;
+ };
+ };
+
+ vcc_3v3_s3: dcdc-reg8 {
+ regulator-name = "vcc_3v3_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vddq_ddr_s0: dcdc-reg9 {
+ regulator-name = "vddq_ddr_s0";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s3: dcdc-reg10 {
+ regulator-name = "vcc_1v8_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc_1v8_s0: pldo-reg1 {
+ regulator-name = "vcc_1v8_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcca_1v8_s0: pldo-reg2 {
+ regulator-name = "vcca_1v8_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdda_1v2_s0: pldo-reg3 {
+ regulator-name = "vdda_1v2_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca_3v3_s0: pldo-reg4 {
+ regulator-name = "vcca_3v3_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vccio_sd_s0: pldo-reg5 {
+ regulator-name = "vccio_sd_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ pldo6_s3: pldo-reg6 {
+ regulator-name = "pldo6_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_0v75_s3: nldo-reg1 {
+ regulator-name = "vdd_0v75_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdda_ddr_pll_s0: nldo-reg2 {
+ regulator-name = "vdda_ddr_pll_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ vdda_0v75_s0: nldo-reg3 {
+ regulator-name = "vdda_0v75_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdda_0v85_s0: nldo-reg4 {
+ regulator-name = "vdda_0v85_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_0v75_s0: nldo-reg5 {
+ regulator-name = "vdd_0v75_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+&tsadc {
+ status = "okay";
+};
+
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy0_otg {
+ phy-supply = <&vcc5v0_usb_otg0>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-0 = <&uart2m0_xfer>;
+ status = "okay";
+};
+
+&usb_host0_xhci {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usbdp_phy0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
index a337f3fb8377..8cfa30837ce7 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
@@ -555,7 +555,6 @@
<GIC_SPI 367 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
#iommu-cells = <1>;
- status = "disabled";
};
mmu600_php: iommu@fcb00000 {
@@ -1686,6 +1685,7 @@
linux,pci-domain = <3>;
max-link-speed = <2>;
msi-map = <0x3000 &its0 0x3000 0x1000>;
+ iommu-map = <0x3000 &mmu600_pcie 0x3000 0x1000>;
num-lanes = <1>;
phys = <&combphy2_psu PHY_TYPE_PCIE>;
phy-names = "pcie-phy";
@@ -1737,6 +1737,7 @@
linux,pci-domain = <4>;
max-link-speed = <2>;
msi-map = <0x4000 &its0 0x4000 0x1000>;
+ iommu-map = <0x4000 &mmu600_pcie 0x4000 0x1000>;
num-lanes = <1>;
phys = <&combphy0_ps PHY_TYPE_PCIE>;
phy-names = "pcie-phy";
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-io.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-io.dtsi
index 05ae9bdcfbbd..7125790bbed2 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-io.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-io.dtsi
@@ -10,6 +10,15 @@
stdout-path = "serial2:1500000n8";
};
+ /* Unnamed gated oscillator: 100MHz,3.3V,3225 */
+ pcie30_port0_refclk: pcie30_port1_refclk: pcie-oscillator {
+ compatible = "gated-fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ clock-output-names = "pcie30_refclk";
+ vdd-supply = <&vcc3v3_pi6c_05>;
+ };
+
vcc3v3_pcie2x1l0: regulator-vcc3v3-pcie2x1l0 {
compatible = "regulator-fixed";
regulator-name = "vcc3v3_pcie2x1l0";
@@ -19,26 +28,26 @@
vin-supply = <&vcc_3v3_s3>;
};
- vcc3v3_pcie3x2: regulator-vcc3v3-pcie3x2 {
+ vcc3v3_bkey: regulator-vcc3v3-bkey {
compatible = "regulator-fixed";
enable-active-high;
gpios = <&gpio2 RK_PC4 GPIO_ACTIVE_HIGH>; /* PCIE_4G_PWEN */
pinctrl-names = "default";
- pinctrl-0 = <&pcie3x2_vcc3v3_en>;
- regulator-name = "vcc3v3_pcie3x2";
+ pinctrl-0 = <&pcie_4g_pwen>;
+ regulator-name = "vcc3v3_bkey";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
startup-delay-us = <5000>;
vin-supply = <&vcc5v0_sys>;
};
- vcc3v3_pcie3x4: regulator-vcc3v3-pcie3x4 {
+ vcc3v3_pcie30: vcc3v3_pi6c_05: regulator-vcc3v3-pi6c-05 {
compatible = "regulator-fixed";
enable-active-high;
gpios = <&gpio2 RK_PC5 GPIO_ACTIVE_HIGH>; /* PCIE30x4_PWREN_H */
pinctrl-names = "default";
- pinctrl-0 = <&pcie3x4_vcc3v3_en>;
- regulator-name = "vcc3v3_pcie3x4";
+ pinctrl-0 = <&pcie30x4_pwren_h>;
+ regulator-name = "vcc3v3_pcie30";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
startup-delay-us = <5000>;
@@ -98,24 +107,52 @@
};
&pcie30phy {
+ data-lanes = <1 1 2 2>;
+ /* separate clock lines from the clock generator to phy and devices */
+ rockchip,rx-common-refclk-mode = <0 0 0 0>;
status = "okay";
};
-/* B-Key and E-Key */
+/* M-Key */
&pcie3x2 {
+ /*
+ * The board has a "pcie_refclk" oscillator that needs enabling,
+ * so add it to the list of clocks.
+ */
+ clocks = <&cru ACLK_PCIE_2L_MSTR>, <&cru ACLK_PCIE_2L_SLV>,
+ <&cru ACLK_PCIE_2L_DBI>, <&cru PCLK_PCIE_2L>,
+ <&cru CLK_PCIE_AUX1>, <&cru CLK_PCIE2L_PIPE>,
+ <&pcie30_port1_refclk>;
+ clock-names = "aclk_mst", "aclk_slv",
+ "aclk_dbi", "pclk",
+ "aux", "pipe",
+ "ref";
+ num-lanes = <2>;
pinctrl-names = "default";
- pinctrl-0 = <&pcie3x2_rst>;
- reset-gpios = <&gpio4 RK_PB6 GPIO_ACTIVE_HIGH>; /* PCIE30X4_PERSTn_M1_L */
- vpcie3v3-supply = <&vcc3v3_pcie3x2>;
+ pinctrl-0 = <&pcie30x2_perstn_m1_l>;
+ reset-gpios = <&gpio4 RK_PB0 GPIO_ACTIVE_HIGH>; /* PCIE30X2_PERSTn_M1_L */
+ vpcie3v3-supply = <&vcc3v3_pcie30>;
status = "okay";
};
-/* M-Key */
+/* B-Key and E-Key */
&pcie3x4 {
+ /*
+ * The board has a "pcie_refclk" oscillator that needs enabling,
+ * so add it to the list of clocks.
+ */
+ clocks = <&cru ACLK_PCIE_4L_MSTR>, <&cru ACLK_PCIE_4L_SLV>,
+ <&cru ACLK_PCIE_4L_DBI>, <&cru PCLK_PCIE_4L>,
+ <&cru CLK_PCIE_AUX0>, <&cru CLK_PCIE4L_PIPE>,
+ <&pcie30_port0_refclk>;
+ clock-names = "aclk_mst", "aclk_slv",
+ "aclk_dbi", "pclk",
+ "aux", "pipe",
+ "ref";
pinctrl-names = "default";
- pinctrl-0 = <&pcie3x4_rst>;
- reset-gpios = <&gpio4 RK_PB0 GPIO_ACTIVE_HIGH>; /* PCIE30X2_PERSTn_M1_L */
- vpcie3v3-supply = <&vcc3v3_pcie3x4>;
+ pinctrl-0 = <&pcie30x4_perstn_m1_l>;
+ reset-gpios = <&gpio4 RK_PB6 GPIO_ACTIVE_HIGH>; /* PCIE30X4_PERSTn_M1_L */
+ vpcie3v3-supply = <&vcc3v3_bkey>;
status = "okay";
};
@@ -127,20 +164,20 @@
};
pcie3 {
- pcie3x2_rst: pcie3x2-rst {
- rockchip,pins = <4 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
+ pcie30x2_perstn_m1_l: pcie30x2-perstn-m1-l {
+ rockchip,pins = <4 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
};
- pcie3x2_vcc3v3_en: pcie3x2-vcc3v3-en {
- rockchip,pins = <2 RK_PC4 RK_FUNC_GPIO &pcfg_pull_none>;
+ pcie_4g_pwen: pcie-4g-pwen {
+ rockchip,pins = <2 RK_PC4 RK_FUNC_GPIO &pcfg_pull_down>;
};
- pcie3x4_rst: pcie3x4-rst {
- rockchip,pins = <4 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+ pcie30x4_perstn_m1_l: pcie30x4-perstn-m1-l {
+ rockchip,pins = <4 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
};
- pcie3x4_vcc3v3_en: pcie3x4-vcc3v3-en {
- rockchip,pins = <2 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
+ pcie30x4_pwren_h: pcie30x4-pwren-h {
+ rockchip,pins = <2 RK_PC5 RK_FUNC_GPIO &pcfg_pull_down>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts b/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts
index d6e464cdc536..ba49f0bbaac6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts
@@ -206,6 +206,28 @@
pinctrl-0 = <&vcc3v3_pcie30_en>;
};
+ vcc3v3_pciewl_vbat: regulator-vcc3v3-pciewl-vbat {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "wlan-vbat";
+ vin-supply = <&vcc_3v3_s0>;
+ };
+
+ vcc3v3_wlan: regulator-vcc3v3-wlan {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio3 RK_PB1 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_pwren>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "wlan-en";
+ vin-supply = <&vcc3v3_pciewl_vbat>;
+ };
+
vcc5v0_host: regulator-vcc5v0-host {
compatible = "regulator-fixed";
regulator-name = "vcc5v0_host";
@@ -249,12 +271,26 @@
regulator-max-microvolt = <5000000>;
vin-supply = <&vcc5v0_usbdcin>;
};
+
+ vccio_wl: regulator-vccio-wl {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "wlan-vddio";
+ vin-supply = <&vcc_1v8_s0>;
+ };
};
&combphy0_ps {
status = "okay";
};
+&combphy1_ps {
+ status = "okay";
+};
+
&combphy2_psu {
status = "okay";
};
@@ -440,6 +476,30 @@
};
};
+&pcie2x1l0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie2_0_rst>, <&pcie2_0_wake>, <&pcie2_0_clkreq>, <&wifi_host_wake_irq>;
+ reset-gpios = <&gpio4 RK_PA5 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_wlan>;
+ status = "okay";
+
+ pcie@0,0 {
+ reg = <0x200000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ device_type = "pci";
+ bus-range = <0x20 0x2f>;
+
+ wifi: wifi@0,0 {
+ compatible = "pci14e4,449d";
+ reg = <0x210000 0 0 0 0>;
+ clocks = <&hym8563>;
+ clock-names = "lpo";
+ };
+ };
+};
+
&pcie2x1l1 {
reset-gpios = <&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
@@ -494,6 +554,18 @@
};
pcie2 {
+ pcie2_0_rst: pcie2-0-rst {
+ rockchip,pins = <4 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie2_0_wake: pcie2-0-wake {
+ rockchip,pins = <4 RK_PA4 4 &pcfg_pull_none>;
+ };
+
+ pcie2_0_clkreq: pcie2-0-clkreq {
+ rockchip,pins = <4 RK_PA3 4 &pcfg_pull_none>;
+ };
+
pcie2_1_rst: pcie2-1-rst {
rockchip,pins = <4 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
};
@@ -524,6 +596,16 @@
rockchip,pins = <3 RK_PB4 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
+
+ wlan {
+ wifi_host_wake_irq: wifi-host-wake-irq {
+ rockchip,pins = <3 RK_PA7 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
+ wifi_pwren: wifi-pwren {
+ rockchip,pins = <3 RK_PB1 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
};
&pwm2 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi
index 0ce0934ec6b7..4a950907ea6f 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi
@@ -162,6 +162,7 @@
linux,pci-domain = <0>;
max-link-speed = <3>;
msi-map = <0x0000 &its1 0x0000 0x1000>;
+ iommu-map = <0x0000 &mmu600_pcie 0x0000 0x1000>;
num-lanes = <4>;
phys = <&pcie30phy>;
phy-names = "pcie-phy";
@@ -212,6 +213,7 @@
interrupt-names = "sys", "pmc", "msg", "legacy", "err",
"dma0", "dma1", "dma2", "dma3";
max-link-speed = <3>;
+ iommus = <&mmu600_pcie 0x0000>;
num-lanes = <4>;
phys = <&pcie30phy>;
phy-names = "pcie-phy";
@@ -248,6 +250,7 @@
linux,pci-domain = <1>;
max-link-speed = <3>;
msi-map = <0x1000 &its1 0x1000 0x1000>;
+ iommu-map = <0x1000 &mmu600_pcie 0x1000 0x1000>;
num-lanes = <2>;
phys = <&pcie30phy>;
phy-names = "pcie-phy";
@@ -297,6 +300,7 @@
linux,pci-domain = <2>;
max-link-speed = <2>;
msi-map = <0x2000 &its0 0x2000 0x1000>;
+ iommu-map = <0x2000 &mmu600_pcie 0x2000 0x1000>;
num-lanes = <1>;
phys = <&combphy1_ps PHY_TYPE_PCIE>;
phy-names = "pcie-phy";
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-firefly-core-3588j.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-firefly-core-3588j.dtsi
new file mode 100644
index 000000000000..42c523b553c9
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-firefly-core-3588j.dtsi
@@ -0,0 +1,443 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+
+#include "rk3588.dtsi"
+
+/ {
+ compatible = "firefly,core-3588j", "rockchip,rk3588";
+
+ aliases {
+ mmc0 = &sdhci;
+ };
+};
+
+&cpu_b0 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b1 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b2 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&cpu_b3 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&cpu_l0 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l1 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l2 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l3 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0m2_xfer>;
+ status = "okay";
+
+ vdd_cpu_big0_s0: regulator@42 {
+ compatible = "rockchip,rk8602";
+ reg = <0x42>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-name = "vdd_cpu_big0_s0";
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_big1_s0: regulator@43 {
+ compatible = "rockchip,rk8603", "rockchip,rk8602";
+ reg = <0x43>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu_big1_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1m2_xfer>;
+ status = "okay";
+
+ vdd_npu_s0: vdd_npu_mem_s0: regulator@42 {
+ compatible = "rockchip,rk8602";
+ reg = <0x42>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-name = "vdd_npu_s0";
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&sdhci {
+ bus-width = <8>;
+ no-sdio;
+ no-sd;
+ non-removable;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+ status = "okay";
+};
+
+&spi2 {
+ assigned-clocks = <&cru CLK_SPI2>;
+ assigned-clock-rates = <200000000>;
+ num-cs = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2m2_cs0 &spi2m2_pins>;
+ status = "okay";
+
+ pmic@0 {
+ compatible = "rockchip,rk806";
+ reg = <0x0>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
+ <&rk806_dvs2_null>, <&rk806_dvs3_null>;
+ spi-max-frequency = <1000000>;
+ system-power-controller;
+
+ vcc1-supply = <&vcc5v0_sys>;
+ vcc2-supply = <&vcc5v0_sys>;
+ vcc3-supply = <&vcc5v0_sys>;
+ vcc4-supply = <&vcc5v0_sys>;
+ vcc5-supply = <&vcc5v0_sys>;
+ vcc6-supply = <&vcc5v0_sys>;
+ vcc7-supply = <&vcc5v0_sys>;
+ vcc8-supply = <&vcc5v0_sys>;
+ vcc9-supply = <&vcc5v0_sys>;
+ vcc10-supply = <&vcc5v0_sys>;
+ vcc11-supply = <&vcc_2v0_pldo_s3>;
+ vcc12-supply = <&vcc5v0_sys>;
+ vcc13-supply = <&vcc_1v1_nldo_s3>;
+ vcc14-supply = <&vcc_1v1_nldo_s3>;
+ vcca-supply = <&vcc5v0_sys>;
+
+ rk806_dvs1_null: dvs1-null-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs2_null: dvs2-null-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs3_null: dvs3-null-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun0";
+ };
+
+ regulators {
+ vdd_gpu_s0: vdd_gpu_mem_s0: dcdc-reg1 {
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_gpu_s0";
+ regulator-enable-ramp-delay = <400>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_lit_s0: vdd_cpu_lit_mem_s0: dcdc-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_cpu_lit_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_log_s0: dcdc-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <750000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_log_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_vdenc_s0: vdd_vdenc_mem_s0: dcdc-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_vdenc_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_ddr_s0: dcdc-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <900000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_ddr_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ vdd2_ddr_s3: dcdc-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vdd2_ddr_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_2v0_pldo_s3: dcdc-reg7 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-name = "vdd_2v0_pldo_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <2000000>;
+ };
+ };
+
+ vcc_3v3_s3: dcdc-reg8 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_3v3_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vddq_ddr_s0: dcdc-reg9 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vddq_ddr_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s3: dcdc-reg10 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ avcc_1v8_s0: pldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "avcc_1v8_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s0: pldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ avdd_1v2_s0: pldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "avdd_1v2_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ avcc_3v3_s0: pldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "avcc_3v3_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd_s0: pldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vccio_sd_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ pldo6_s3: pldo-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "pldo6_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_0v75_s3: nldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdd_0v75_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ avdd_ddr_pll_s0: nldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "avdd_ddr_pll_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ avdd_0v75_s0: nldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "avdd_0v75_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ avdd_0v85_s0: nldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "avdd_0v85_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_0v75_s0: nldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdd_0v75_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+/* rk3588 preferred debug out */
+&uart2 {
+ pinctrl-0 = <&uart2m0_xfer>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-firefly-itx-3588j.dts b/arch/arm64/boot/dts/rockchip/rk3588-firefly-itx-3588j.dts
new file mode 100644
index 000000000000..2be5251d3e3b
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-firefly-itx-3588j.dts
@@ -0,0 +1,702 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/pwm/pwm.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
+#include "dt-bindings/usb/pd.h"
+
+#include "rk3588-firefly-core-3588j.dtsi"
+
+/ {
+ model = "Firefly ITX-3588J";
+ compatible = "firefly,itx-3588j", "firefly,core-3588j", "rockchip,rk3588";
+
+ aliases {
+ ethernet0 = &gmac0;
+ ethernet1 = &gmac1;
+ };
+
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ /*
+ * there are also a "Reset" and "Mask ROM" button, but the needed
+ * settings are unknown at this time
+ */
+ adc-keys-0 {
+ compatible = "adc-keys";
+ io-channels = <&saradc 1>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1800000>;
+ poll-interval = <100>;
+
+ button-recovery {
+ label = "Recovery";
+ linux,code = <KEY_VENDOR>;
+ press-threshold-microvolt = <2000>;
+ };
+ };
+
+ analog-sound {
+ compatible = "simple-audio-card";
+ pinctrl-0 = <&hp_detect>;
+ pinctrl-names = "default";
+ simple-audio-card,aux-devs = <&amp_headphones>, <&amp_speaker>;
+ simple-audio-card,format = "i2s";
+ simple-audio-card,hp-det-gpios = <&gpio1 RK_PC4 GPIO_ACTIVE_LOW>;
+ simple-audio-card,mclk-fs = <384>;
+ simple-audio-card,name = "rockchip_es8323";
+ simple-audio-card,pin-switches = "Headphones", "Speaker";
+ simple-audio-card,routing =
+ "Speaker Amplifier INL", "LOUT2",
+ "Speaker Amplifier INR", "ROUT2",
+ "Speaker", "Speaker Amplifier OUTL",
+ "Speaker", "Speaker Amplifier OUTR",
+ "Headphones Amplifier INL", "LOUT1",
+ "Headphones Amplifier INR", "ROUT1",
+ "Headphones", "Headphones Amplifier OUTL",
+ "Headphones", "Headphones Amplifier OUTR",
+ "LINPUT1", "Microphone Jack",
+ "RINPUT1", "Microphone Jack",
+ "LINPUT2", "Onboard Microphone",
+ "RINPUT2", "Onboard Microphone";
+ simple-audio-card,widgets =
+ "Microphone", "Microphone Jack",
+ "Microphone", "Onboard Microphone",
+ "Headphone", "Headphones",
+ "Speaker", "Speaker";
+
+ simple-audio-card,cpu {
+ sound-dai = <&i2s0_8ch>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&es8323>;
+ system-clock-frequency = <12288000>;
+ };
+ };
+
+ /*
+ * this does not seem to be a proper "amplifier" but is just
+ * a way to control the GPIO pins to switch on or off the given
+ * sound output device
+ */
+ amp_headphones: headphones-audio-amplifier {
+ compatible = "simple-audio-amplifier";
+ enable-gpios = <&gpio4 RK_PB0 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&headphone_amplifier_en>;
+ sound-name-prefix = "Headphones Amplifier";
+ };
+
+ amp_speaker: speaker-audio-amplifier {
+ compatible = "simple-audio-amplifier";
+ enable-gpios = <&gpio3 RK_PB2 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&speaker_amplifier_en>;
+ sound-name-prefix = "Speaker Amplifier";
+ };
+
+ fan: pwm-fan {
+ compatible = "pwm-fan";
+ #cooling-cells = <2>;
+ cooling-levels = <0 120 150 180 210 240 255>;
+ fan-supply = <&vcc12v_dcin>;
+ pwms = <&pwm15 0 50000 1>;
+ };
+
+ hdmi0-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi0_con_in: endpoint {
+ remote-endpoint = <&hdmi0_out_con>;
+ };
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ /*
+ * There is also a Power LED control @ RK_PB3 on
+ * GPIO1 but for some reason it doesn't seem to work right
+ */
+
+ user_led: led-1 {
+ gpios = <&pca9555 3 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "disk-activity";
+ };
+ };
+
+ pcie30_avdd0v75: regulator-pcie30-avdd0v75 {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "pcie30_avdd0v75";
+ vin-supply = <&avdd_0v75_s0>;
+ };
+
+ vbus5v0_typec_pwr_en: regulator-vbus5v0-typec-pwr-en {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&pca9555 12 GPIO_ACTIVE_HIGH>;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-name = "vbus5v0_typec_pwr_en";
+ };
+
+ vcc12v_dcin: regulator-vcc12v-dcin {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-name = "vcc12v_dcin";
+ };
+
+ vcc3v3_pcie30: regulator-vcc3v3-pcie30 {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio2 RK_PC5 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie3_vcc3v3_en>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc3v3_pcie30";
+ startup-delay-us = <5000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc5v0_host: regulator-vcc5v0-host {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&pca9555 5 GPIO_ACTIVE_HIGH>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-name = "vcc5v0_host";
+ vin-supply = <&vcc5v0_usb>;
+ };
+
+ vcc5v0_host3: regulator-vcc5v0-host3 {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&pca9555 7 GPIO_ACTIVE_HIGH>;
+ regulator-name = "vcc5v0_host3";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_usb>;
+ };
+
+ vcc5v0_sys: regulator-vcc5v0-sys {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-name = "vcc5v0_sys";
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc5v0_usb: regulator-vcc5v0-usb {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-name = "vcc5v0_usb";
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc_1v1_nldo_s3: regulator-vcc-1v1-nldo-s3 {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-name = "vcc_1v1_nldo_s3";
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc_fan_pwr_en: regulator-vcc-fan-pwr-en {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&pca9555 11 GPIO_ACTIVE_HIGH>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vcc_fan_pwr_en";
+ };
+
+ vcc_hub_reset: regulator-vcc-hub-reset {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&pca9555 4 GPIO_ACTIVE_HIGH>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vcc_hub_reset";
+ };
+
+ vcc_hub3_reset: regulator-vcc-hub3-reset {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&pca9555 6 GPIO_ACTIVE_HIGH>;
+ regulator-always-on;
+ regulator-name = "vcc_hub3_reset";
+ };
+
+ vcc_sata_pwr_en: regulator-vcc-sata-pwr-en {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&pca9555 10 GPIO_ACTIVE_HIGH>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vcc_sata_pwr_en";
+ };
+};
+
+&avcc_1v8_s0 {
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+};
+
+&combphy0_ps {
+ status = "okay";
+};
+
+&combphy1_ps {
+ status = "okay";
+};
+
+&combphy2_psu {
+ status = "okay";
+};
+
+&gmac0 {
+ clock_in_out = "output";
+ phy-handle = <&rgmii_phy0>;
+ phy-mode = "rgmii-rxid";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac0_miim
+ &gmac0_tx_bus2
+ &gmac0_rx_bus2
+ &gmac0_rgmii_clk
+ &gmac0_rgmii_bus>;
+ tx_delay = <0x45>;
+ rx_delay = <0x4a>;
+ status = "okay";
+};
+
+&gmac1 {
+ clock_in_out = "output";
+ phy-handle = <&rgmii_phy1>;
+ phy-mode = "rgmii-rxid";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac1_miim
+ &gmac1_tx_bus2
+ &gmac1_rx_bus2
+ &gmac1_rgmii_clk
+ &gmac1_rgmii_bus>;
+ tx_delay = <0x42>;
+ rx_delay = <0x4f>;
+ status = "okay";
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu_s0>;
+ sram-supply = <&vdd_gpu_mem_s0>;
+ status = "okay";
+};
+
+&hdmi0 {
+ status = "okay";
+};
+
+&hdmi0_in {
+ hdmi0_in_vp0: endpoint {
+ remote-endpoint = <&vp0_out_hdmi0>;
+ };
+};
+
+&hdmi0_out {
+ hdmi0_out_con: endpoint {
+ remote-endpoint = <&hdmi0_con_in>;
+ };
+};
+
+&hdptxphy_hdmi0 {
+ status = "okay";
+};
+
+&i2c3 {
+ status = "okay";
+
+ /*
+ * in the Firefly BSP source this was confusingly called an
+ * "ES8388" - it actually seems to be an ES8323 and the drivers
+ * for that work best
+ */
+ es8323: audio-codec@11 {
+ compatible = "everest,es8323";
+ reg = <0x11>;
+ assigned-clocks = <&cru I2S0_8CH_MCLKOUT>;
+ assigned-clock-rates = <12288000>;
+ clocks = <&cru I2S0_8CH_MCLKOUT>;
+ clock-names = "mclk";
+ #sound-dai-cells = <0>;
+ };
+};
+
+&i2c6 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c6m0_xfer>;
+ status = "okay";
+
+ pca9555: gpio@21 {
+ compatible = "nxp,pca9555";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ usbc0: usb-typec@22 {
+ compatible = "fcs,fusb302";
+ reg = <0x22>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PD3 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usbc0_int>;
+ vbus-supply = <&vbus5v0_typec_pwr_en>;
+
+ usb_con: connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ data-role = "dual";
+ op-sink-microwatt = <1000000>;
+ power-role = "dual";
+ sink-pdos =
+ <PDO_FIXED(5000, 1000, PDO_FIXED_USB_COMM)>;
+ source-pdos =
+ <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)>;
+ try-power-role = "source";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usbc0_orien_sw: endpoint {
+ remote-endpoint = <&usbdp_phy0_orientation_switch>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usbc0_role_sw: endpoint {
+ remote-endpoint = <&dwc3_0_role_switch>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ dp_altmode_mux: endpoint {
+ remote-endpoint = <&usbdp_phy0_dp_altmode_mux>;
+ };
+ };
+ };
+ };
+ };
+
+ hym8563: rtc@51 {
+ compatible = "haoyu,hym8563";
+ reg = <0x51>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PB0 IRQ_TYPE_LEVEL_LOW>;
+ #clock-cells = <0>;
+ clock-output-names = "hym8563";
+ pinctrl-names = "default";
+ pinctrl-0 = <&hym8563_int>;
+ wakeup-source;
+ };
+};
+
+&i2s0_8ch {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s0_lrck
+ &i2s0_mclk
+ &i2s0_sclk
+ &i2s0_sdi0
+ &i2s0_sdo0>;
+ status = "okay";
+};
+
+&mdio0 {
+ rgmii_phy0: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0x1>;
+ };
+};
+
+&mdio1 {
+ rgmii_phy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0x1>;
+ };
+};
+
+&pcie2x1l0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie2_0_rst>;
+ reset-gpios = <&gpio1 RK_PB4 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&pcie30phy {
+ status = "okay";
+};
+
+&pcie3x4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie3_rst>;
+ reset-gpios = <&gpio4 RK_PB6 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie30>;
+ status = "okay";
+};
+
+&pinctrl {
+ dp {
+ dp1_hpd: dp1-hpd {
+ rockchip,pins = <1 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ hym8563 {
+ hym8563_int: hym8563-int {
+ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ gpio-leds {
+ sys_led_pin: sys-led-pin {
+ rockchip,pins =
+ <1 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pcie2 {
+ pcie2_0_rst: pcie2-0-rst {
+ rockchip,pins = <1 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pcie3 {
+ pcie3_rst: pcie3-rst {
+ rockchip,pins = <4 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie3_vcc3v3_en: pcie3-vcc3v3-en {
+ rockchip,pins = <2 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ sound {
+ hp_detect: hp-detect {
+ rockchip,pins = <1 RK_PC4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ headphone_amplifier_en: headphone-amplifier-en {
+ rockchip,pins = <4 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ speaker_amplifier_en: speaker-amplifier-en {
+ rockchip,pins = <3 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb {
+ usbc0_int: usbc0-int {
+ rockchip,pins = <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+};
+
+&pwm15 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm15m2_pins>;
+ status = "okay";
+};
+
+&sata0 {
+ status = "okay";
+};
+
+&sata1 {
+ status = "okay";
+};
+
+&sata2 {
+ status = "okay";
+};
+
+/* uart/232/485 */
+&uart0 {
+ pinctrl-0 = <&uart0m2_xfer>;
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-0 = <&uart1m1_xfer>;
+ status = "okay";
+};
+
+/* usb enable */
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy0_otg {
+ status = "okay";
+};
+
+&u2phy1 {
+ status = "okay";
+};
+
+&u2phy1_otg {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+};
+
+&u2phy2 {
+ status = "okay";
+};
+
+&u2phy2_host {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+};
+
+&u2phy3 {
+ status = "okay";
+};
+
+&u2phy3_host {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
+
+&usb_host0_xhci {
+ usb-role-switch;
+ dr_mode = "otg";
+ status = "okay";
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dwc3_0_role_switch: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&usbc0_role_sw>;
+ };
+ };
+};
+
+&usb_host1_xhci {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usb_host2_xhci {
+ status = "okay";
+};
+
+&usbdp_phy0 {
+ orientation-switch;
+ sbu1-dc-gpios = <&gpio4 RK_PA6 GPIO_ACTIVE_HIGH>;
+ sbu2-dc-gpios = <&gpio4 RK_PA7 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ usbdp_phy0_orientation_switch: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&usbc0_orien_sw>;
+ };
+
+ usbdp_phy0_dp_altmode_mux: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&dp_altmode_mux>;
+ };
+ };
+};
+
+&usbdp_phy1 {
+ rockchip,dp-lane-mux = <2 3>;
+ status = "okay";
+};
+
+&vcc_1v8_s0 {
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+};
+
+/* for fan when deep sleep */
+&vdd_log_s0 {
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+};
+
+/* display generator */
+&vop_mmu {
+ status = "okay";
+};
+
+&vop {
+ status = "okay";
+};
+
+&vp0 {
+ vp0_out_hdmi0: endpoint@ROCKCHIP_VOP2_EP_HDMI0 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI0>;
+ remote-endpoint = <&hdmi0_in_vp0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-h96-max-v58.dts b/arch/arm64/boot/dts/rockchip/rk3588-h96-max-v58.dts
new file mode 100644
index 000000000000..4791b77f3571
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-h96-max-v58.dts
@@ -0,0 +1,802 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
+#include "rk3588.dtsi"
+
+/ {
+ model = "H96 Max V58 TV Box";
+ compatible = "haochuangyi,h96-max-v58", "rockchip,rk3588";
+
+ aliases {
+ ethernet0 = &gmac1;
+ mmc0 = &sdhci;
+ };
+
+ adc-keys {
+ compatible = "adc-keys";
+ io-channels = <&saradc 1>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1800000>;
+ poll-interval = <100>;
+
+ button-function {
+ label = "Reset";
+ linux,code = <KEY_VENDOR>;
+ press-threshold-microvolt = <1750>;
+ };
+ };
+
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ hdmi0-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi0_con_in: endpoint {
+ remote-endpoint = <&hdmi0_out_con>;
+ };
+ };
+ };
+
+ ir-receiver {
+ compatible = "gpio-ir-receiver";
+ gpios = <&gpio0 RK_PD4 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ir_receiver_pin>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pins>;
+
+ led {
+ color = <LED_COLOR_ID_BLUE>;
+ function = LED_FUNCTION_STATUS;
+ gpios = <&gpio3 RK_PD4 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ vcc_1v1_nldo_s3: regulator-1v1 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_1v1_nldo_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ pcie_3v3: regulator-3v3-pcie {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio4 RK_PA1 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&pcie2_0_pow>;
+ pinctrl-names = "default";
+ regulator-name = "pcie_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ wl_en_3v3: regulator-3v3-wlen {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio0 RK_PC4 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&wl_en>;
+ pinctrl-names = "default";
+ /*
+ * Needs to be brought up before the PCIe driver is probed,
+ * otherwise detecting the WLAN module requires rescanning
+ * the bus, and even then it fails half of the time during
+ * firmware load
+ */
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "wl_en_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_host: regulator-5v0-host {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_host";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ gpios = <&gpio4 RK_PB0 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&vcc5v0_host_en>;
+ pinctrl-names = "default";
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_otg: regulator-5v0-otg {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_otg";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ gpios = <&gpio4 RK_PA7 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&vcc5v0_otg_en>;
+ pinctrl-names = "default";
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_sys: regulator-5v0-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+};
+
+&combphy0_ps {
+ status = "okay";
+};
+
+&combphy1_ps {
+ status = "okay";
+};
+
+&combphy2_psu {
+ status = "okay";
+};
+
+&cpu_b0 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b1 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b2 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&cpu_b3 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&cpu_l0 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l1 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l2 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l3 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu_s0>;
+ status = "okay";
+};
+
+&hdmi0 {
+ status = "okay";
+};
+
+&hdmi0_in {
+ hdmi0_in_vp0: endpoint {
+ remote-endpoint = <&vp0_out_hdmi0>;
+ };
+};
+
+&hdmi0_out {
+ hdmi0_out_con: endpoint {
+ remote-endpoint = <&hdmi0_con_in>;
+ };
+};
+
+&hdptxphy_hdmi0 {
+ status = "okay";
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0m2_xfer>;
+ status = "okay";
+
+ vdd_cpu_big0_s0: regulator@42 {
+ compatible = "rockchip,rk8602";
+ reg = <0x42>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu_big0_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_big1_s0: regulator@43 {
+ compatible = "rockchip,rk8603", "rockchip,rk8602";
+ reg = <0x43>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu_big1_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&i2c6 {
+ status = "okay";
+
+ hym8563: rtc@51 {
+ compatible = "haoyu,hym8563";
+ reg = <0x51>;
+ #clock-cells = <0>;
+ clock-output-names = "hym8563";
+ pinctrl-names = "default";
+ pinctrl-0 = <&hym8563_int>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PB0 IRQ_TYPE_LEVEL_LOW>;
+ wakeup-source;
+ };
+};
+
+&gmac1 {
+ clock_in_out = "output";
+ phy-handle = <&rgmii_phy1>;
+ phy-mode = "rgmii-id";
+ pinctrl-0 = <&gmac1_miim
+ &gmac1_tx_bus2
+ &gmac1_rx_bus2
+ &gmac1_rgmii_clk
+ &gmac1_rgmii_bus>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&mdio1 {
+ rgmii_phy1: ethernet-phy@1 {
+ /* RTL8211F */
+ compatible = "ethernet-phy-id001c.c916";
+ reg = <0x1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&rtl8211f_rst>;
+ reset-assert-us = <20000>;
+ reset-deassert-us = <100000>;
+ reset-gpios = <&gpio3 RK_PB7 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&pcie2x1l0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie2_0_rst>;
+ reset-gpios = <&gpio4 RK_PA5 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+
+ pcie@0,0 {
+ reg = <0x200000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ device_type = "pci";
+ bus-range = <0x20 0x2f>;
+
+ wifi: wifi@0,0 {
+ compatible = "pci14e4,449d";
+ reg = <0x210000 0 0 0 0>;
+ clocks = <&hym8563>;
+ clock-names = "lpo";
+ };
+ };
+};
+
+&pinctrl {
+ hym8563 {
+ hym8563_int: hym8563-int {
+ rockchip,pins = <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ ir-receiver {
+ ir_receiver_pin: ir-receiver-pin {
+ rockchip,pins = <0 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ leds {
+ led_pins: led-pins {
+ rockchip,pins = <3 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pcie2 {
+ pcie2_0_rst: pcie2-0-rst {
+ rockchip,pins = <4 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie2_0_pow: pcie2-0-pow {
+ rockchip,pins = <4 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ rtl8211f {
+ rtl8211f_rst: rtl8211f-rst {
+ rockchip,pins = <3 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ };
+
+ usb {
+ vcc5v0_host_en: vcc5v0-host-en {
+ rockchip,pins = <4 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ vcc5v0_otg_en: vcc5v0-otg-en {
+ rockchip,pins = <4 RK_PA7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ wifibt {
+ wl_en: wl-en {
+ rockchip,pins = <0 RK_PC4 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ wl_wake_host: wl-wake-host {
+ rockchip,pins = <0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ bt_en: bt-en {
+ rockchip,pins = <0 RK_PC6 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ bt_wake: bt-wake {
+ rockchip,pins = <0 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ bt_wake_host: bt-wake-host {
+ rockchip,pins = <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+ };
+};
+
+&saradc {
+ vref-supply = <&avcc_1v8_s0>;
+ status = "okay";
+};
+
+&sdhci {
+ bus-width = <8>;
+ no-sdio;
+ no-sd;
+ non-removable;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+ status = "okay";
+};
+
+&spi2 {
+ assigned-clocks = <&cru CLK_SPI2>;
+ assigned-clock-rates = <200000000>;
+ num-cs = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2m2_cs0 &spi2m2_pins>;
+ status = "okay";
+
+ pmic@0 {
+ compatible = "rockchip,rk806";
+ reg = <0x0>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
+ <&rk806_dvs2_null>, <&rk806_dvs3_null>;
+ spi-max-frequency = <1000000>;
+ system-power-controller;
+
+ vcc1-supply = <&vcc5v0_sys>;
+ vcc2-supply = <&vcc5v0_sys>;
+ vcc3-supply = <&vcc5v0_sys>;
+ vcc4-supply = <&vcc5v0_sys>;
+ vcc5-supply = <&vcc5v0_sys>;
+ vcc6-supply = <&vcc5v0_sys>;
+ vcc7-supply = <&vcc5v0_sys>;
+ vcc8-supply = <&vcc5v0_sys>;
+ vcc9-supply = <&vcc5v0_sys>;
+ vcc10-supply = <&vcc5v0_sys>;
+ vcc11-supply = <&vcc_2v0_pldo_s3>;
+ vcc12-supply = <&vcc5v0_sys>;
+ vcc13-supply = <&vcc_1v1_nldo_s3>;
+ vcc14-supply = <&vcc_1v1_nldo_s3>;
+ vcca-supply = <&vcc5v0_sys>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ rk806_dvs1_null: dvs1-null-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs2_null: dvs2-null-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs3_null: dvs3-null-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun0";
+ };
+
+ regulators {
+ vdd_gpu_s0: vdd_gpu_mem_s0: dcdc-reg1 {
+ regulator-name = "vdd_gpu_s0";
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <400>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_lit_s0: vdd_cpu_lit_mem_s0: dcdc-reg2 {
+ regulator-name = "vdd_cpu_lit_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_log_s0: dcdc-reg3 {
+ regulator-name = "vdd_log_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <750000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_vdenc_s0: vdd_vdenc_mem_s0: dcdc-reg4 {
+ regulator-name = "vdd_vdenc_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_ddr_s0: dcdc-reg5 {
+ regulator-name = "vdd_ddr_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <900000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ vdd2_ddr_s3: dcdc-reg6 {
+ regulator-name = "vdd2_ddr_s3";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_2v0_pldo_s3: dcdc-reg7 {
+ regulator-name = "vdd_2v0_pldo_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <2000000>;
+ };
+ };
+
+ vcc_3v3_s3: dcdc-reg8 {
+ regulator-name = "vcc_3v3_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vddq_ddr_s0: dcdc-reg9 {
+ regulator-name = "vddq_ddr_s0";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s3: dcdc-reg10 {
+ regulator-name = "vcc_1v8_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ avcc_1v8_s0: pldo-reg1 {
+ regulator-name = "avcc_1v8_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s0: pldo-reg2 {
+ regulator-name = "vcc_1v8_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ avdd_1v2_s0: pldo-reg3 {
+ regulator-name = "avdd_1v2_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v3_s0: pldo-reg4 {
+ regulator-name = "vcc_3v3_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd_s0: pldo-reg5 {
+ regulator-name = "vccio_sd_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ pldo6_s3: pldo-reg6 {
+ regulator-name = "pldo6_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_0v75_s3: nldo-reg1 {
+ regulator-name = "vdd_0v75_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_ddr_pll_s0: nldo-reg2 {
+ regulator-name = "vdd_ddr_pll_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ avdd_0v75_s0: nldo-reg3 {
+ regulator-name = "avdd_0v75_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_0v85_s0: nldo-reg4 {
+ regulator-name = "vdd_0v85_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_0v75_s0: nldo-reg5 {
+ regulator-name = "vdd_0v75_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+&tsadc {
+ status = "okay";
+};
+
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy0_otg {
+ phy-supply = <&vcc5v0_otg>;
+ status = "okay";
+};
+
+&u2phy1 {
+ status = "okay";
+};
+
+&u2phy1_otg {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-0 = <&uart2m0_xfer>;
+ status = "okay";
+};
+
+&uart9 {
+ pinctrl-0 = <&uart9m0_xfer &uart9m0_ctsn &uart9m0_rtsn>;
+ pinctrl-names = "default";
+ uart-has-rtscts;
+ status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ clocks = <&hym8563>;
+ clock-names = "lpo";
+ device-wakeup-gpios = <&gpio0 RK_PC5 GPIO_ACTIVE_HIGH>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA0 IRQ_TYPE_EDGE_FALLING>;
+ pinctrl-0 = <&bt_en>, <&bt_wake_host>, <&bt_wake>;
+ pinctrl-names = "default";
+ shutdown-gpios = <&gpio0 RK_PC6 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&usbdp_phy0 {
+ status = "okay";
+};
+
+&usbdp_phy1 {
+ status = "okay";
+};
+
+&usb_host0_xhci {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usb_host1_xhci {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&vop_mmu {
+ status = "okay";
+};
+
+&vop {
+ status = "okay";
+};
+
+&vp0 {
+ vp0_out_hdmi0: endpoint@ROCKCHIP_VOP2_EP_HDMI0 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI0>;
+ remote-endpoint = <&hdmi0_in_vp0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-compact.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-compact.dtsi
new file mode 100644
index 000000000000..87090cb98020
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-compact.dtsi
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include "rk3588-orangepi-5.dtsi"
+
+/ {
+ model = "Xunlong Orange Pi 5 Max";
+ compatible = "xunlong,orangepi-5-max", "rockchip,rk3588";
+
+ vcc5v0_usb30_otg: vcc5v0-usb30-otg-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ /* USB_OTG_PWREN */
+ gpios = <&gpio4 RK_PB3 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_otg_pwren>;
+ regulator-name = "vcc5v0_usb30_otg";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+};
+
+&headphone_amp {
+ /* PHONE_CTL */
+ enable-gpios = <&gpio4 RK_PB0 GPIO_ACTIVE_HIGH>;
+};
+
+&analog_sound {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hp_detect>;
+ simple-audio-card,aux-devs = <&headphone_amp>;
+ simple-audio-card,hp-det-gpios = <&gpio3 RK_PD2 GPIO_ACTIVE_HIGH>;
+
+ simple-audio-card,routing =
+ "Headphones", "LOUT1",
+ "Headphones", "ROUT1",
+ "LINPUT1", "Microphone Jack",
+ "RINPUT1", "Microphone Jack",
+ "LINPUT2", "Onboard Microphone",
+ "RINPUT2", "Onboard Microphone";
+ simple-audio-card,widgets =
+ "Microphone", "Microphone Jack",
+ "Microphone", "Onboard Microphone",
+ "Headphone", "Headphones";
+};
+
+&fan {
+ /* FAN_CTL_H */
+ pwms = <&pwm9 0 50000 0>;
+};
+
+&hym8563 {
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PC4 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&rtc_int_l>;
+};
+
+&led_blue_pwm {
+ /* PWM_LED1 */
+ pwms = <&pwm4 0 25000 0>;
+ status = "okay";
+};
+
+&led_green_pwm {
+ /* PWM_LED2 */
+ pwms = <&pwm5 0 25000 0>;
+};
+
+/* phy2 */
+&pcie2x1l1 {
+ reset-gpios = <&gpio4 RK_PD4 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie_eth>;
+ status = "okay";
+};
+
+&pinctrl {
+ hym8563 {
+ rtc_int_l: hym8563-int {
+ rockchip,pins = <0 RK_PC4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ sound {
+ hp_detect: hp-detect {
+ rockchip,pins = <3 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb {
+ usb_host_pwren: usb-host-pwren {
+ rockchip,pins = <3 RK_PD5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&pwm4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm4m0_pins>;
+ status = "okay";
+};
+
+&pwm5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm5m1_pins>;
+ status = "okay";
+};
+
+&pwm9 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm9m2_pins>;
+ status = "okay";
+};
+
+&sfc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&fspim2_pins>;
+};
+
+&u2phy0_otg {
+ phy-supply = <&vcc5v0_usb30_otg>;
+};
+
+&u2phy1_otg {
+ phy-supply = <&vcc5v0_usb20>;
+};
+
+&usb_host0_xhci {
+ dr_mode = "host";
+};
+
+/* pcie eth. not a real regulator. 33VAUX */
+&vcc3v3_pcie_eth {
+ /* Ethernet_power_en */
+ gpios = <&gpio0 RK_PD3 GPIO_ACTIVE_LOW>;
+};
+
+/*
+ * Represents the vcc5v0_usb20 and vcc5v0_usb30 in the schematic,
+ * both regulators share the same enable gpio
+ */
+&vcc5v0_usb20 {
+ /* USB_HOST_PWREN */
+ gpios = <&gpio3 RK_PD5 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_host_pwren>;
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-max.dts b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-max.dts
new file mode 100644
index 000000000000..ce44549babf4
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-max.dts
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
+#include "rk3588-orangepi-5-compact.dtsi"
+
+/ {
+ model = "Xunlong Orange Pi 5 Max";
+ compatible = "xunlong,orangepi-5-max", "rockchip,rk3588";
+
+ hdmi0-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi0_con_in: endpoint {
+ remote-endpoint = <&hdmi0_out_con>;
+ };
+ };
+ };
+};
+
+&hdmi0 {
+ status = "okay";
+};
+
+&hdmi0_in {
+ hdmi0_in_vp0: endpoint {
+ remote-endpoint = <&vp0_out_hdmi0>;
+ };
+};
+
+&hdmi0_out {
+ hdmi0_out_con: endpoint {
+ remote-endpoint = <&hdmi0_con_in>;
+ };
+};
+
+&hdptxphy_hdmi0 {
+ status = "okay";
+};
+
+&pinctrl {
+
+ usb {
+ usb_otg_pwren: usb-otg-pwren {
+ rockchip,pins = <4 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&vp0 {
+ vp0_out_hdmi0: endpoint@ROCKCHIP_VOP2_EP_HDMI0 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI0>;
+ remote-endpoint = <&hdmi0_in_vp0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts
index 9f5a38b290bf..255e33c5dbdc 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts
@@ -6,86 +6,15 @@
/dts-v1/;
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/leds/common.h>
-#include <dt-bindings/input/input.h>
#include <dt-bindings/pinctrl/rockchip.h>
#include <dt-bindings/soc/rockchip,vop2.h>
#include <dt-bindings/usb/pd.h>
-#include "rk3588.dtsi"
+#include "rk3588-orangepi-5.dtsi"
/ {
model = "Xunlong Orange Pi 5 Plus";
compatible = "xunlong,orangepi-5-plus", "rockchip,rk3588";
- aliases {
- mmc0 = &sdhci;
- mmc1 = &sdmmc;
- };
-
- chosen {
- stdout-path = "serial2:1500000n8";
- };
-
- adc-keys-0 {
- compatible = "adc-keys";
- io-channels = <&saradc 0>;
- io-channel-names = "buttons";
- keyup-threshold-microvolt = <1800000>;
- poll-interval = <100>;
-
- button-maskrom {
- label = "Mask Rom";
- linux,code = <KEY_SETUP>;
- press-threshold-microvolt = <2000>;
- };
- };
-
- adc-keys-1 {
- compatible = "adc-keys";
- io-channels = <&saradc 1>;
- io-channel-names = "buttons";
- keyup-threshold-microvolt = <1800000>;
- poll-interval = <100>;
-
- button-recovery {
- label = "Recovery";
- linux,code = <KEY_VENDOR>;
- press-threshold-microvolt = <2000>;
- };
- };
-
- speaker_amp: speaker-audio-amplifier {
- compatible = "simple-audio-amplifier";
- enable-gpios = <&gpio3 RK_PC0 GPIO_ACTIVE_HIGH>;
- sound-name-prefix = "Speaker Amp";
- };
-
- headphone_amp: headphones-audio-amplifier {
- compatible = "simple-audio-amplifier";
- enable-gpios = <&gpio3 RK_PA7 GPIO_ACTIVE_HIGH>;
- sound-name-prefix = "Headphones Amp";
- };
-
- ir-receiver {
- compatible = "gpio-ir-receiver";
- gpios = <&gpio4 RK_PB3 GPIO_ACTIVE_LOW>;
- pinctrl-names = "default";
- pinctrl-0 = <&ir_receiver_pin>;
- };
-
- gpio-leds {
- compatible = "gpio-leds";
- pinctrl-names = "default";
- pinctrl-0 = <&blue_led_pin>;
-
- led {
- color = <LED_COLOR_ID_BLUE>;
- function = LED_FUNCTION_INDICATOR;
- function-enumerator = <1>;
- gpios = <&gpio3 RK_PA6 GPIO_ACTIVE_HIGH>;
- };
- };
-
hdmi0-con {
compatible = "hdmi-connector";
type = "a";
@@ -97,24 +26,11 @@
};
};
- fan: pwm-fan {
- compatible = "pwm-fan";
- cooling-levels = <0 70 75 80 100>;
- fan-supply = <&vcc5v0_sys>;
- pwms = <&pwm3 0 50000 0>;
- #cooling-cells = <2>;
- };
-
- pwm-leds {
- compatible = "pwm-leds";
-
- led {
- color = <LED_COLOR_ID_GREEN>;
- function = LED_FUNCTION_INDICATOR;
- function-enumerator = <2>;
- max-brightness = <255>;
- pwms = <&pwm2 0 25000 0>;
- };
+ ir-receiver {
+ compatible = "gpio-ir-receiver";
+ gpios = <&gpio4 RK_PB3 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ir_receiver_pin>;
};
rfkill {
@@ -124,160 +40,73 @@
shutdown-gpios = <&gpio0 RK_PC4 GPIO_ACTIVE_HIGH>;
};
- sound {
- compatible = "simple-audio-card";
- pinctrl-names = "default";
- pinctrl-0 = <&hp_detect>;
- simple-audio-card,name = "Analog";
- simple-audio-card,aux-devs = <&speaker_amp>, <&headphone_amp>;
- simple-audio-card,format = "i2s";
- simple-audio-card,mclk-fs = <256>;
- simple-audio-card,hp-det-gpios = <&gpio1 RK_PD3 GPIO_ACTIVE_LOW>;
- simple-audio-card,bitclock-master = <&daicpu>;
- simple-audio-card,frame-master = <&daicpu>;
- /*TODO: SARADC_IN3 is used as MIC detection / key input */
-
- simple-audio-card,widgets =
- "Microphone", "Onboard Microphone",
- "Microphone", "Microphone Jack",
- "Speaker", "Speaker",
- "Headphone", "Headphones";
-
- simple-audio-card,routing =
- "Headphones", "LOUT1",
- "Headphones", "ROUT1",
- "Speaker", "LOUT2",
- "Speaker", "ROUT2",
-
- "Headphones", "Headphones Amp OUTL",
- "Headphones", "Headphones Amp OUTR",
- "Headphones Amp INL", "LOUT1",
- "Headphones Amp INR", "ROUT1",
-
- "Speaker", "Speaker Amp OUTL",
- "Speaker", "Speaker Amp OUTR",
- "Speaker Amp INL", "LOUT2",
- "Speaker Amp INR", "ROUT2",
-
- /* single ended signal to LINPUT1 */
- "LINPUT1", "Microphone Jack",
- "RINPUT1", "Microphone Jack",
- /* differential signal */
- "LINPUT2", "Onboard Microphone",
- "RINPUT2", "Onboard Microphone";
-
- daicpu: simple-audio-card,cpu {
- sound-dai = <&i2s0_8ch>;
- system-clock-frequency = <12288000>;
- };
-
- daicodec: simple-audio-card,codec {
- sound-dai = <&es8388>;
- system-clock-frequency = <12288000>;
- };
- };
-
- vcc3v3_pcie30: regulator-vcc3v3-pcie30 {
- compatible = "regulator-fixed";
- enable-active-high;
- gpios = <&gpio2 RK_PB6 GPIO_ACTIVE_HIGH>;
- regulator-name = "vcc3v3_pcie30";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- startup-delay-us = <5000>;
- vin-supply = <&vcc5v0_sys>;
- };
-
- vcc3v3_pcie_eth: regulator-vcc3v3-pcie-eth {
- compatible = "regulator-fixed";
- gpios = <&gpio3 RK_PB4 GPIO_ACTIVE_LOW>;
- regulator-name = "vcc3v3_pcie_eth";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- startup-delay-us = <50000>;
- vin-supply = <&vcc5v0_sys>;
- };
-
- vcc3v3_wf: regulator-vcc3v3-wf {
- compatible = "regulator-fixed";
- enable-active-high;
- gpios = <&gpio2 RK_PC5 GPIO_ACTIVE_HIGH>;
- regulator-name = "vcc3v3_wf";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- startup-delay-us = <50000>;
- vin-supply = <&vcc5v0_sys>;
- };
-
- vcc5v0_sys: regulator-vcc5v0-sys {
- compatible = "regulator-fixed";
- regulator-name = "vcc5v0_sys";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- };
-
- vcc5v0_usb20: regulator-vcc5v0-usb20 {
+ vbus5v0_typec: regulator-vbus-typec {
compatible = "regulator-fixed";
enable-active-high;
- gpio = <&gpio3 RK_PB7 GPIO_ACTIVE_HIGH>;
+ gpio = <&gpio4 RK_PB0 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
- pinctrl-0 = <&vcc5v0_usb20_en>;
- regulator-name = "vcc5v0_usb20";
+ pinctrl-0 = <&typec5v_pwren>;
+ regulator-name = "vbus5v0_typec";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
vin-supply = <&vcc5v0_sys>;
};
};
-&combphy0_ps {
- status = "okay";
-};
-
-&combphy1_ps {
+&speaker_amp {
+ enable-gpios = <&gpio3 RK_PC0 GPIO_ACTIVE_HIGH>;
status = "okay";
};
-&combphy2_psu {
- status = "okay";
+&headphone_amp {
+ enable-gpios = <&gpio3 RK_PA7 GPIO_ACTIVE_HIGH>;
};
-&cpu_b0 {
- cpu-supply = <&vdd_cpu_big0_s0>;
-};
-
-&cpu_b1 {
- cpu-supply = <&vdd_cpu_big0_s0>;
-};
-
-&cpu_b2 {
- cpu-supply = <&vdd_cpu_big1_s0>;
-};
-
-&cpu_b3 {
- cpu-supply = <&vdd_cpu_big1_s0>;
-};
-
-&cpu_l0 {
- cpu-supply = <&vdd_cpu_lit_s0>;
+&analog_sound {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hp_detect>;
+ simple-audio-card,aux-devs = <&speaker_amp>, <&headphone_amp>;
+ simple-audio-card,hp-det-gpios = <&gpio1 RK_PD3 GPIO_ACTIVE_LOW>;
+ simple-audio-card,widgets =
+ "Microphone", "Onboard Microphone",
+ "Microphone", "Microphone Jack",
+ "Speaker", "Speaker",
+ "Headphone", "Headphones";
+
+ simple-audio-card,routing =
+ "Headphones", "LOUT1",
+ "Headphones", "ROUT1",
+ "Speaker", "LOUT2",
+ "Speaker", "ROUT2",
+
+ "Headphones", "Headphones Amp OUTL",
+ "Headphones", "Headphones Amp OUTR",
+ "Headphones Amp INL", "LOUT1",
+ "Headphones Amp INR", "ROUT1",
+
+ "Speaker", "Speaker Amp OUTL",
+ "Speaker", "Speaker Amp OUTR",
+ "Speaker Amp INL", "LOUT2",
+ "Speaker Amp INR", "ROUT2",
+
+ /* single ended signal to LINPUT1 */
+ "LINPUT1", "Microphone Jack",
+ "RINPUT1", "Microphone Jack",
+ /* differential signal */
+ "LINPUT2", "Onboard Microphone",
+ "RINPUT2", "Onboard Microphone";
};
-&cpu_l1 {
- cpu-supply = <&vdd_cpu_lit_s0>;
+&combphy0_ps {
+ status = "okay";
};
-&cpu_l2 {
- cpu-supply = <&vdd_cpu_lit_s0>;
+&combphy1_ps {
+ status = "okay";
};
-&cpu_l3 {
- cpu-supply = <&vdd_cpu_lit_s0>;
-};
-
-&gpu {
- mali-supply = <&vdd_gpu_s0>;
- status = "okay";
+&fan {
+ pwms = <&pwm3 0 50000 0>;
};
&hdmi0 {
@@ -300,128 +129,73 @@
status = "okay";
};
-&i2c0 {
+&hym8563 {
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PB0 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
- pinctrl-0 = <&i2c0m2_xfer>;
- status = "okay";
-
- vdd_cpu_big0_s0: regulator@42 {
- compatible = "rockchip,rk8602";
- reg = <0x42>;
- fcs,suspend-voltage-selector = <1>;
- regulator-name = "vdd_cpu_big0_s0";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <550000>;
- regulator-max-microvolt = <1050000>;
- regulator-ramp-delay = <2300>;
- vin-supply = <&vcc5v0_sys>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vdd_cpu_big1_s0: regulator@43 {
- compatible = "rockchip,rk8603", "rockchip,rk8602";
- reg = <0x43>;
- fcs,suspend-voltage-selector = <1>;
- regulator-name = "vdd_cpu_big1_s0";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <550000>;
- regulator-max-microvolt = <1050000>;
- regulator-ramp-delay = <2300>;
- vin-supply = <&vcc5v0_sys>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
+ pinctrl-0 = <&hym8563_int>;
};
&i2c6 {
- clock-frequency = <400000>;
- status = "okay";
-
- hym8563: rtc@51 {
- compatible = "haoyu,hym8563";
- reg = <0x51>;
+ usbc0: usb-typec@22 {
+ compatible = "fcs,fusb302";
+ reg = <0x22>;
interrupt-parent = <&gpio0>;
- interrupts = <RK_PB0 IRQ_TYPE_LEVEL_LOW>;
- #clock-cells = <0>;
- clock-output-names = "hym8563";
+ interrupts = <RK_PD3 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
- pinctrl-0 = <&hym8563_int>;
- wakeup-source;
- };
-};
-
-&i2c7 {
- status = "okay";
-
- /* PLDO2 vcca 1.8V, BUCK8 gated by PLDO2 being enabled */
- es8388: audio-codec@11 {
- compatible = "everest,es8388";
- reg = <0x11>;
- clocks = <&cru I2S0_8CH_MCLKOUT>;
- AVDD-supply = <&vcc_1v8_s0>;
- DVDD-supply = <&vcc_1v8_s0>;
- HPVDD-supply = <&vcc_3v3_s0>;
- PVDD-supply = <&vcc_3v3_s0>;
- assigned-clocks = <&cru I2S0_8CH_MCLKOUT>;
- assigned-clock-rates = <12288000>;
- #sound-dai-cells = <0>;
- };
-};
-
-&i2s0_8ch {
- pinctrl-names = "default";
- pinctrl-0 = <&i2s0_lrck
- &i2s0_mclk
- &i2s0_sclk
- &i2s0_sdi0
- &i2s0_sdo0>;
- status = "okay";
-};
+ pinctrl-0 = <&usbc0_int>;
+ vbus-supply = <&vbus5v0_typec>;
+ status = "okay";
+
+ usb_con: connector {
+ compatible = "usb-c-connector";
+ data-role = "dual";
+ label = "USB-C";
+ power-role = "dual";
+ op-sink-microwatt = <10>;
+ source-pdos = <PDO_FIXED(5000, 1400, PDO_FIXED_USB_COMM)>;
+ sink-pdos = <PDO_FIXED(5000, 10, PDO_FIXED_USB_COMM)>;
+ try-power-role = "source";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usbc0_hs: endpoint {
+ remote-endpoint = <&usb_host0_xhci_drd_sw>;
+ };
+ };
-&i2s2_2ch {
- pinctrl-names = "default";
- pinctrl-0 = <&i2s2m0_lrck
- &i2s2m0_sclk
- &i2s2m0_sdi
- &i2s2m0_sdo>;
- status = "okay";
-};
+ port@1 {
+ reg = <1>;
-&package_thermal {
- polling-delay = <1000>;
+ usbc0_ss: endpoint {
+ remote-endpoint = <&usbdp_phy0_typec_ss>;
+ };
+ };
- cooling-maps {
- map0 {
- trip = <&package_fan0>;
- cooling-device = <&fan THERMAL_NO_LIMIT 1>;
- };
+ port@2 {
+ reg = <2>;
- map1 {
- trip = <&package_fan1>;
- cooling-device = <&fan 2 THERMAL_NO_LIMIT>;
+ usbc0_sbu: endpoint {
+ remote-endpoint = <&usbdp_phy0_typec_sbu>;
+ };
+ };
+ };
};
};
+};
- trips {
- package_fan0: package-fan0 {
- temperature = <55000>;
- hysteresis = <2000>;
- type = "active";
- };
+&led_blue_gpio {
+ gpios = <&gpio3 RK_PA6 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
- package_fan1: package-fan1 {
- temperature = <65000>;
- hysteresis = <2000>;
- type = "active";
- };
- };
+&led_green_pwm {
+ pwms = <&pwm2 0 25000 0>;
};
/* phy1 - M.KEY socket */
@@ -445,16 +219,6 @@
status = "okay";
};
-&pcie30phy {
- status = "okay";
-};
-
-&pcie3x4 {
- reset-gpios = <&gpio4 RK_PB6 GPIO_ACTIVE_HIGH>;
- vpcie3v3-supply = <&vcc3v3_pcie30>;
- status = "okay";
-};
-
&pinctrl {
hym8563 {
hym8563_int: hym8563-int {
@@ -485,6 +249,16 @@
rockchip,pins = <3 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
+
+ usb-typec {
+ usbc0_int: usbc0-int {
+ rockchip,pins = <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ typec5v_pwren: typec5v-pwren {
+ rockchip,pins = <4 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
};
&pwm2 {
@@ -498,434 +272,68 @@
status = "okay";
};
-&saradc {
- vref-supply = <&vcc_1v8_s0>;
- status = "okay";
-};
-
-&sdhci {
- bus-width = <8>;
- no-sdio;
- no-sd;
- non-removable;
- max-frequency = <200000000>;
- mmc-hs400-1_8v;
- mmc-hs400-enhanced-strobe;
- status = "okay";
-};
-
-&sdmmc {
- bus-width = <4>;
- cap-sd-highspeed;
- cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>;
- disable-wp;
- max-frequency = <150000000>;
- no-sdio;
- no-mmc;
- sd-uhs-sdr104;
- vmmc-supply = <&vcc_3v3_s3>;
- vqmmc-supply = <&vccio_sd_s0>;
+&recovery_button {
status = "okay";
};
&sfc {
pinctrl-names = "default";
pinctrl-0 = <&fspim1_pins>;
- status = "okay";
+};
- spi_flash: flash@0 {
- compatible = "jedec,spi-nor";
- reg = <0x0>;
- spi-max-frequency = <100000000>;
- spi-rx-bus-width = <4>;
- spi-tx-bus-width = <1>;
- };
+&u2phy1_otg {
+ phy-supply = <&vcc5v0_sys>;
};
-&spi2 {
- assigned-clocks = <&cru CLK_SPI2>;
- assigned-clock-rates = <200000000>;
- num-cs = <1>;
- pinctrl-names = "default";
- pinctrl-0 = <&spi2m2_cs0 &spi2m2_pins>;
+&uart9 {
+ pinctrl-0 = <&uart9m0_xfer>;
status = "okay";
+};
- pmic@0 {
- compatible = "rockchip,rk806";
- reg = <0x0>;
- interrupt-parent = <&gpio0>;
- interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
- pinctrl-names = "default";
- pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
- <&rk806_dvs2_null>, <&rk806_dvs3_null>;
- spi-max-frequency = <1000000>;
- system-power-controller;
-
- vcc1-supply = <&vcc5v0_sys>;
- vcc2-supply = <&vcc5v0_sys>;
- vcc3-supply = <&vcc5v0_sys>;
- vcc4-supply = <&vcc5v0_sys>;
- vcc5-supply = <&vcc5v0_sys>;
- vcc6-supply = <&vcc5v0_sys>;
- vcc7-supply = <&vcc5v0_sys>;
- vcc8-supply = <&vcc5v0_sys>;
- vcc9-supply = <&vcc5v0_sys>;
- vcc10-supply = <&vcc5v0_sys>;
- vcc11-supply = <&vcc_2v0_pldo_s3>;
- vcc12-supply = <&vcc5v0_sys>;
- vcc13-supply = <&vdd2_ddr_s3>;
- vcc14-supply = <&vdd2_ddr_s3>;
- vcca-supply = <&vcc5v0_sys>;
-
- gpio-controller;
- #gpio-cells = <2>;
-
- rk806_dvs1_null: dvs1-null-pins {
- pins = "gpio_pwrctrl1";
- function = "pin_fun0";
- };
+&usbdp_phy0 {
+ mode-switch;
+ orientation-switch;
+ sbu1-dc-gpios = <&gpio4 RK_PA6 GPIO_ACTIVE_HIGH>;
+ sbu2-dc-gpios = <&gpio4 RK_PA7 GPIO_ACTIVE_HIGH>;
- rk806_dvs2_null: dvs2-null-pins {
- pins = "gpio_pwrctrl2";
- function = "pin_fun0";
- };
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
- rk806_dvs3_null: dvs3-null-pins {
- pins = "gpio_pwrctrl3";
- function = "pin_fun0";
+ usbdp_phy0_typec_ss: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&usbc0_ss>;
};
- regulators {
- vdd_gpu_s0: dcdc-reg1 {
- regulator-name = "vdd_gpu_s0";
- regulator-boot-on;
- regulator-enable-ramp-delay = <400>;
- regulator-min-microvolt = <550000>;
- regulator-max-microvolt = <950000>;
- regulator-ramp-delay = <12500>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vdd_cpu_lit_s0: dcdc-reg2 {
- regulator-name = "vdd_cpu_lit_s0";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <550000>;
- regulator-max-microvolt = <950000>;
- regulator-ramp-delay = <12500>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vdd_log_s0: dcdc-reg3 {
- regulator-name = "vdd_log_s0";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <675000>;
- regulator-max-microvolt = <825000>;
- regulator-ramp-delay = <12500>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- regulator-suspend-microvolt = <750000>;
- };
- };
-
- vdd_vdenc_s0: dcdc-reg4 {
- regulator-name = "vdd_vdenc_s0";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <550000>;
- regulator-max-microvolt = <825000>;
- regulator-ramp-delay = <12500>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vdd_ddr_s0: dcdc-reg5 {
- regulator-name = "vdd_ddr_s0";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <675000>;
- regulator-max-microvolt = <900000>;
- regulator-ramp-delay = <12500>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- regulator-suspend-microvolt = <850000>;
- };
- };
-
- vdd2_ddr_s3: dcdc-reg6 {
- regulator-name = "vdd2_ddr_s3";
- regulator-always-on;
- regulator-boot-on;
-
- regulator-state-mem {
- regulator-on-in-suspend;
- };
- };
-
- vcc_2v0_pldo_s3: dcdc-reg7 {
- regulator-name = "vdd_2v0_pldo_s3";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <2000000>;
- regulator-max-microvolt = <2000000>;
- regulator-ramp-delay = <12500>;
-
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <2000000>;
- };
- };
-
- vcc_3v3_s3: dcdc-reg8 {
- regulator-name = "vcc_3v3_s3";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
-
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <3300000>;
- };
- };
-
- vddq_ddr_s0: dcdc-reg9 {
- regulator-name = "vddq_ddr_s0";
- regulator-always-on;
- regulator-boot-on;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vcc_1v8_s3: dcdc-reg10 {
- regulator-name = "vcc_1v8_s3";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
-
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <1800000>;
- };
- };
-
- avcc_1v8_s0: pldo-reg1 {
- regulator-name = "avcc_1v8_s0";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- regulator-suspend-microvolt = <1800000>;
- };
- };
-
- /* shorted to avcc_1v8_s0 on the board */
- vcc_1v8_s0: pldo-reg2 {
- regulator-name = "vcc_1v8_s0";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- regulator-suspend-microvolt = <1800000>;
- };
- };
-
- avdd_1v2_s0: pldo-reg3 {
- regulator-name = "avdd_1v2_s0";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vcc_3v3_s0: pldo-reg4 {
- regulator-name = "vcc_3v3_s0";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-ramp-delay = <12500>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vccio_sd_s0: pldo-reg5 {
- regulator-name = "vccio_sd_s0";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3300000>;
- regulator-ramp-delay = <12500>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- pldo6_s3: pldo-reg6 {
- regulator-name = "pldo6_s3";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
-
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <1800000>;
- };
- };
-
- vdd_0v75_s3: nldo-reg1 {
- regulator-name = "vdd_0v75_s3";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <750000>;
- regulator-max-microvolt = <750000>;
-
- regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <750000>;
- };
- };
-
- vdd_ddr_pll_s0: nldo-reg2 {
- regulator-name = "vdd_ddr_pll_s0";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <850000>;
- regulator-max-microvolt = <850000>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- regulator-suspend-microvolt = <850000>;
- };
- };
-
- avdd_0v75_s0: nldo-reg3 {
- regulator-name = "avdd_0v75_s0";
- regulator-always-on;
- regulator-boot-on;
- /*
- * The schematic mentions that actual setting
- * should be 0.8375V. RK3588 datasheet specifies
- * maximum as 0.825V. So we set datasheet max
- * here.
- */
- regulator-min-microvolt = <825000>;
- regulator-max-microvolt = <825000>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vdd_0v85_s0: nldo-reg4 {
- regulator-name = "vdd_0v85_s0";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <850000>;
- regulator-max-microvolt = <850000>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
-
- vdd_0v75_s0: nldo-reg5 {
- regulator-name = "vdd_0v75_s0";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <750000>;
- regulator-max-microvolt = <750000>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
- };
+ usbdp_phy0_typec_sbu: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&usbc0_sbu>;
};
};
};
-&tsadc {
- status = "okay";
-};
+&usb_host0_xhci {
+ usb-role-switch;
-&u2phy2 {
- status = "okay";
-};
-
-&u2phy3 {
- status = "okay";
-};
-
-&u2phy2_host {
- phy-supply = <&vcc5v0_usb20>;
- status = "okay";
-};
-
-&u2phy3_host {
- phy-supply = <&vcc5v0_usb20>;
- status = "okay";
-};
-
-&uart2 {
- pinctrl-0 = <&uart2m0_xfer>;
- status = "okay";
-};
-
-&uart9 {
- pinctrl-0 = <&uart9m0_xfer>;
- status = "okay";
-};
-
-&usb_host0_ehci {
- status = "okay";
-};
-
-&usb_host0_ohci {
- status = "okay";
-};
-
-&usb_host1_ehci {
- status = "okay";
+ port {
+ usb_host0_xhci_drd_sw: endpoint {
+ remote-endpoint = <&usbc0_hs>;
+ };
+ };
};
-&usb_host1_ohci {
- status = "okay";
+&vcc3v3_pcie_eth {
+ gpios = <&gpio3 RK_PB4 GPIO_ACTIVE_LOW>;
};
-&vop_mmu {
+&vcc3v3_wf {
status = "okay";
};
-&vop {
- status = "okay";
+&vcc5v0_usb20 {
+ gpio = <&gpio3 RK_PB7 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_usb20_en>;
};
&vp0 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5.dtsi
new file mode 100644
index 000000000000..a98e804a0949
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5.dtsi
@@ -0,0 +1,805 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2023 Ondřej Jirman <megi@xff.cz>
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/usb/pd.h>
+#include "rk3588.dtsi"
+
+/ {
+ aliases {
+ mmc0 = &sdhci;
+ mmc1 = &sdmmc;
+ };
+
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ adc-keys-0 {
+ compatible = "adc-keys";
+ io-channels = <&saradc 0>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1800000>;
+ poll-interval = <100>;
+
+ button-maskrom {
+ label = "Mask Rom";
+ linux,code = <KEY_SETUP>;
+ press-threshold-microvolt = <2000>;
+ };
+ };
+
+ recovery_button: adc-keys-1 {
+ compatible = "adc-keys";
+ io-channels = <&saradc 1>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1800000>;
+ poll-interval = <100>;
+ status = "disabled";
+
+ button-recovery {
+ label = "Recovery";
+ linux,code = <KEY_VENDOR>;
+ press-threshold-microvolt = <2000>;
+ };
+ };
+
+ speaker_amp: speaker-audio-amplifier {
+ compatible = "simple-audio-amplifier";
+ sound-name-prefix = "Speaker Amp";
+ status = "disabled";
+ };
+
+ headphone_amp: headphones-audio-amplifier {
+ compatible = "simple-audio-amplifier";
+ sound-name-prefix = "Headphones Amp";
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ led_blue_gpio: led {
+ color = <LED_COLOR_ID_BLUE>;
+ function = LED_FUNCTION_INDICATOR;
+ function-enumerator = <1>;
+ status = "disabled";
+ };
+ };
+
+ fan: pwm-fan {
+ compatible = "pwm-fan";
+ cooling-levels = <0 70 75 80 100>;
+ fan-supply = <&vcc5v0_sys>;
+ #cooling-cells = <2>;
+ };
+
+ pwm-leds {
+ compatible = "pwm-leds";
+
+ led_blue_pwm: led-1 {
+ color = <LED_COLOR_ID_BLUE>;
+ function = LED_FUNCTION_STATUS;
+ linux,default-trigger = "heartbeat";
+ max-brightness = <255>;
+ status = "disabled";
+ };
+
+ led_green_pwm: led-2 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_INDICATOR;
+ function-enumerator = <2>;
+ max-brightness = <255>;
+ };
+ };
+
+ rfkill {
+ compatible = "rfkill-gpio";
+ label = "rfkill-pcie-wlan";
+ radio-type = "wlan";
+ shutdown-gpios = <&gpio0 RK_PC4 GPIO_ACTIVE_HIGH>;
+ };
+
+ analog_sound: sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "Analog";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,mclk-fs = <256>;
+ simple-audio-card,bitclock-master = <&daicpu>;
+ simple-audio-card,frame-master = <&daicpu>;
+ /*TODO: SARADC_IN3 is used as MIC detection / key input */
+
+ daicpu: simple-audio-card,cpu {
+ sound-dai = <&i2s0_8ch>;
+ system-clock-frequency = <12288000>;
+ };
+
+ daicodec: simple-audio-card,codec {
+ sound-dai = <&es8388>;
+ system-clock-frequency = <12288000>;
+ };
+ };
+
+ vcc3v3_pcie30: regulator-vcc3v3-pcie30 {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio2 RK_PB6 GPIO_ACTIVE_HIGH>;
+ regulator-name = "vcc3v3_pcie30";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ startup-delay-us = <5000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc3v3_pcie_eth: regulator-vcc3v3-pcie-eth {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_pcie_eth";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ startup-delay-us = <50000>;
+ vin-supply = <&vcc_3v3_s3>;
+ };
+
+ vcc3v3_wf: regulator-vcc3v3-wf {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio2 RK_PC5 GPIO_ACTIVE_HIGH>;
+ regulator-name = "vcc3v3_wf";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ startup-delay-us = <50000>;
+ vin-supply = <&vcc5v0_sys>;
+ status = "disabled";
+ };
+
+ vcc5v0_sys: regulator-vcc5v0-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vcc5v0_usb20: regulator-vcc5v0-usb20 {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ regulator-name = "vcc5v0_usb20";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+};
+
+&combphy2_psu {
+ status = "okay";
+};
+
+&cpu_b0 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b1 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b2 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&cpu_b3 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&cpu_l0 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l1 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l2 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l3 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu_s0>;
+ status = "okay";
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0m2_xfer>;
+ status = "okay";
+
+ vdd_cpu_big0_s0: regulator@42 {
+ compatible = "rockchip,rk8602";
+ reg = <0x42>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu_big0_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_big1_s0: regulator@43 {
+ compatible = "rockchip,rk8603", "rockchip,rk8602";
+ reg = <0x43>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu_big1_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&i2c6 {
+ clock-frequency = <400000>;
+ status = "okay";
+
+ hym8563: rtc@51 {
+ compatible = "haoyu,hym8563";
+ reg = <0x51>;
+ #clock-cells = <0>;
+ clock-output-names = "hym8563";
+ wakeup-source;
+ };
+};
+
+&i2c7 {
+ status = "okay";
+
+ /* PLDO2 vcca 1.8V, BUCK8 gated by PLDO2 being enabled */
+ es8388: audio-codec@11 {
+ compatible = "everest,es8388";
+ reg = <0x11>;
+ clocks = <&cru I2S0_8CH_MCLKOUT>;
+ AVDD-supply = <&vcc_3v3_s0>;
+ DVDD-supply = <&vcc_1v8_s0>;
+ HPVDD-supply = <&vcc_3v3_s0>;
+ PVDD-supply = <&vcc_1v8_s0>;
+ assigned-clocks = <&cru I2S0_8CH_MCLKOUT>;
+ assigned-clock-rates = <12288000>;
+ #sound-dai-cells = <0>;
+ };
+};
+
+&i2s0_8ch {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s0_lrck
+ &i2s0_mclk
+ &i2s0_sclk
+ &i2s0_sdi0
+ &i2s0_sdo0>;
+ status = "okay";
+};
+
+&i2s2_2ch {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s2m0_lrck
+ &i2s2m0_sclk
+ &i2s2m0_sdi
+ &i2s2m0_sdo>;
+ status = "okay";
+};
+
+&package_thermal {
+ polling-delay = <1000>;
+
+ cooling-maps {
+ map0 {
+ trip = <&package_fan0>;
+ cooling-device = <&fan THERMAL_NO_LIMIT 1>;
+ };
+
+ map1 {
+ trip = <&package_fan1>;
+ cooling-device = <&fan 2 THERMAL_NO_LIMIT>;
+ };
+ };
+
+ trips {
+ package_fan0: package-fan0 {
+ temperature = <55000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+
+ package_fan1: package-fan1 {
+ temperature = <65000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+ };
+};
+
+&pcie30phy {
+ status = "okay";
+};
+
+&pcie3x4 {
+ reset-gpios = <&gpio4 RK_PB6 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie30>;
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&vcc_1v8_s0>;
+ status = "okay";
+};
+
+&sdhci {
+ bus-width = <8>;
+ no-sdio;
+ no-sd;
+ non-removable;
+ max-frequency = <200000000>;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+ status = "okay";
+};
+
+&sdmmc {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>;
+ disable-wp;
+ max-frequency = <150000000>;
+ no-sdio;
+ no-mmc;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc_3v3_s3>;
+ vqmmc-supply = <&vccio_sd_s0>;
+ status = "okay";
+};
+
+&sfc {
+ status = "okay";
+
+ spi_flash: flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0x0>;
+ spi-max-frequency = <100000000>;
+ spi-rx-bus-width = <4>;
+ spi-tx-bus-width = <1>;
+ };
+};
+
+&spi2 {
+ assigned-clocks = <&cru CLK_SPI2>;
+ assigned-clock-rates = <200000000>;
+ num-cs = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2m2_cs0 &spi2m2_pins>;
+ status = "okay";
+
+ pmic@0 {
+ compatible = "rockchip,rk806";
+ reg = <0x0>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
+ <&rk806_dvs2_null>, <&rk806_dvs3_null>;
+ spi-max-frequency = <1000000>;
+ system-power-controller;
+
+ vcc1-supply = <&vcc5v0_sys>;
+ vcc2-supply = <&vcc5v0_sys>;
+ vcc3-supply = <&vcc5v0_sys>;
+ vcc4-supply = <&vcc5v0_sys>;
+ vcc5-supply = <&vcc5v0_sys>;
+ vcc6-supply = <&vcc5v0_sys>;
+ vcc7-supply = <&vcc5v0_sys>;
+ vcc8-supply = <&vcc5v0_sys>;
+ vcc9-supply = <&vcc5v0_sys>;
+ vcc10-supply = <&vcc5v0_sys>;
+ vcc11-supply = <&vcc_2v0_pldo_s3>;
+ vcc12-supply = <&vcc5v0_sys>;
+ vcc13-supply = <&vdd2_ddr_s3>;
+ vcc14-supply = <&vdd2_ddr_s3>;
+ vcca-supply = <&vcc5v0_sys>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ rk806_dvs1_null: dvs1-null-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs2_null: dvs2-null-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs3_null: dvs3-null-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun0";
+ };
+
+ regulators {
+ vdd_gpu_s0: dcdc-reg1 {
+ regulator-name = "vdd_gpu_s0";
+ regulator-boot-on;
+ regulator-enable-ramp-delay = <400>;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_lit_s0: dcdc-reg2 {
+ regulator-name = "vdd_cpu_lit_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_log_s0: dcdc-reg3 {
+ regulator-name = "vdd_log_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <825000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_vdenc_s0: dcdc-reg4 {
+ regulator-name = "vdd_vdenc_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <825000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_ddr_s0: dcdc-reg5 {
+ regulator-name = "vdd_ddr_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <900000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ vdd2_ddr_s3: dcdc-reg6 {
+ regulator-name = "vdd2_ddr_s3";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_2v0_pldo_s3: dcdc-reg7 {
+ regulator-name = "vdd_2v0_pldo_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <2000000>;
+ };
+ };
+
+ vcc_3v3_s3: dcdc-reg8 {
+ regulator-name = "vcc_3v3_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vddq_ddr_s0: dcdc-reg9 {
+ regulator-name = "vddq_ddr_s0";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s3: dcdc-reg10 {
+ regulator-name = "vcc_1v8_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ avcc_1v8_s0: pldo-reg1 {
+ regulator-name = "avcc_1v8_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ /* shorted to avcc_1v8_s0 on the board */
+ vcc_1v8_s0: pldo-reg2 {
+ regulator-name = "vcc_1v8_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ avdd_1v2_s0: pldo-reg3 {
+ regulator-name = "avdd_1v2_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v3_s0: pldo-reg4 {
+ regulator-name = "vcc_3v3_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd_s0: pldo-reg5 {
+ regulator-name = "vccio_sd_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ pldo6_s3: pldo-reg6 {
+ regulator-name = "pldo6_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_0v75_s3: nldo-reg1 {
+ regulator-name = "vdd_0v75_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_ddr_pll_s0: nldo-reg2 {
+ regulator-name = "vdd_ddr_pll_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ avdd_0v75_s0: nldo-reg3 {
+ regulator-name = "avdd_0v75_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ /*
+ * The schematic mentions that actual setting
+ * should be 0.8375V. RK3588 datasheet specifies
+ * maximum as 0.825V. So we set datasheet max
+ * here.
+ */
+ regulator-min-microvolt = <825000>;
+ regulator-max-microvolt = <825000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_0v85_s0: nldo-reg4 {
+ regulator-name = "vdd_0v85_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_0v75_s0: nldo-reg5 {
+ regulator-name = "vdd_0v75_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+&tsadc {
+ status = "okay";
+};
+
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy0_otg {
+ status = "okay";
+};
+
+&u2phy1 {
+ status = "okay";
+};
+
+&u2phy1_otg {
+ status = "okay";
+};
+
+&u2phy2 {
+ status = "okay";
+};
+
+&u2phy3 {
+ status = "okay";
+};
+
+&u2phy2_host {
+ phy-supply = <&vcc5v0_usb20>;
+ status = "okay";
+};
+
+&u2phy3_host {
+ phy-supply = <&vcc5v0_usb20>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-0 = <&uart2m0_xfer>;
+ status = "okay";
+};
+
+&usbdp_phy0 {
+ status = "okay";
+};
+
+&usbdp_phy1 {
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&usb_host0_xhci {
+ status = "okay";
+};
+
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
+
+&usb_host1_xhci {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&vop_mmu {
+ status = "okay";
+};
+
+&vop {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-nanopi-r6.dtsi b/arch/arm64/boot/dts/rockchip/rk3588s-nanopi-r6.dtsi
index c9749cb50076..d2eddea1840f 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-nanopi-r6.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-nanopi-r6.dtsi
@@ -775,6 +775,15 @@
status = "okay";
};
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy0_otg {
+ phy-supply = <&vcc5v0_usb_otg0>;
+ status = "okay";
+};
+
&u2phy2 {
status = "okay";
};
@@ -797,6 +806,15 @@
status = "okay";
};
+&usb_host0_xhci {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usbdp_phy0 {
+ status = "okay";
+};
+
&vop {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts b/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts
index a251c4343548..de219570bbc9 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts
@@ -61,7 +61,7 @@
&i2s_port4
&spdif_port0
&comp_spdif_port0>;
- hp-det-gpio = <&gpio UNIPHIER_GPIO_IRQ(0) GPIO_ACTIVE_LOW>;
+ hp-det-gpios = <&gpio UNIPHIER_GPIO_IRQ(0) GPIO_ACTIVE_LOW>;
};
spdif-out {
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts b/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts
index 79f6db2455c1..20e5fb724fae 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts
@@ -61,7 +61,7 @@
&i2s_port4
&spdif_port0
&comp_spdif_port0>;
- hp-det-gpio = <&gpio UNIPHIER_GPIO_IRQ(0) GPIO_ACTIVE_LOW>;
+ hp-det-gpios = <&gpio UNIPHIER_GPIO_IRQ(0) GPIO_ACTIVE_LOW>;
};
spdif-out {
diff --git a/arch/arm64/boot/dts/sprd/sc2731.dtsi b/arch/arm64/boot/dts/sprd/sc2731.dtsi
index 12136e68dada..458685b82462 100644
--- a/arch/arm64/boot/dts/sprd/sc2731.dtsi
+++ b/arch/arm64/boot/dts/sprd/sc2731.dtsi
@@ -94,17 +94,17 @@
nvmem-cells = <&adc_big_scale>, <&adc_small_scale>;
};
- fuel-gauge@a00 {
+ pmic_fgu: fuel-gauge@a00 {
compatible = "sprd,sc2731-fgu";
reg = <0xa00>;
- bat-detect-gpio = <&pmic_eic 9 GPIO_ACTIVE_HIGH>;
+ battery-detect-gpios = <&pmic_eic 9 GPIO_ACTIVE_HIGH>;
io-channels = <&pmic_adc 3>, <&pmic_adc 6>;
io-channel-names = "bat-temp", "charge-vol";
- monitored-battery = <&bat>;
nvmem-cell-names = "fgu_calib";
nvmem-cells = <&fgu_calib>;
interrupt-parent = <&sc2731_pmic>;
interrupts = <4>;
+ status = "disabled";
};
vibrator@ec8 {
diff --git a/arch/arm64/boot/dts/sprd/sc9863a.dtsi b/arch/arm64/boot/dts/sprd/sc9863a.dtsi
index e5a2857721e2..e97000e560e7 100644
--- a/arch/arm64/boot/dts/sprd/sc9863a.dtsi
+++ b/arch/arm64/boot/dts/sprd/sc9863a.dtsi
@@ -163,18 +163,18 @@
ap_clk: clock-controller@21500000 {
compatible = "sprd,sc9863a-ap-clk";
reg = <0 0x21500000 0 0x1000>;
- clocks = <&ext_32k>, <&ext_26m>;
- clock-names = "ext-32k", "ext-26m";
+ clocks = <&ext_26m>, <&ext_32k>;
+ clock-names = "ext-26m", "ext-32k";
#clock-cells = <1>;
};
aon_clk: clock-controller@402d0000 {
compatible = "sprd,sc9863a-aon-clk";
reg = <0 0x402d0000 0 0x1000>;
- clocks = <&ext_26m>, <&rco_100m>,
- <&ext_32k>, <&ext_4m>;
- clock-names = "ext-26m", "rco-100m",
- "ext-32k", "ext-4m";
+ clocks = <&ext_26m>, <&ext_32k>,
+ <&ext_4m>, <&rco_100m>;
+ clock-names = "ext-26m", "ext-32k",
+ "ext-4m", "rco-100m";
#clock-cells = <1>;
};
@@ -288,7 +288,7 @@
};
};
- in-port {
+ in-ports {
port {
etf_little_in: endpoint {
remote-endpoint =
diff --git a/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts b/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts
index 095b24a31313..b1fa817ece1e 100644
--- a/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts
+++ b/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts
@@ -71,8 +71,8 @@
compatible = "simple-battery";
charge-full-design-microamp-hours = <1900000>;
charge-term-current-microamp = <120000>;
- constant_charge_voltage_max_microvolt = <4350000>;
- internal-resistance-micro-ohms = <250000>;
+ constant-charge-voltage-max-microvolt = <4350000>;
+ factory-internal-resistance-micro-ohms = <250000>;
ocv-capacity-celsius = <20>;
ocv-capacity-table-0 = <4185000 100>, <4113000 95>, <4066000 90>,
<4022000 85>, <3983000 80>, <3949000 75>,
@@ -84,6 +84,11 @@
};
};
+&pmic_fgu {
+ monitored-battery = <&bat>;
+ status = "okay";
+};
+
&uart0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/st/stm32mp251.dtsi b/arch/arm64/boot/dts/st/stm32mp251.dtsi
index 6fe12e3bd7dd..f3c6cdfd7008 100644
--- a/arch/arm64/boot/dts/st/stm32mp251.dtsi
+++ b/arch/arm64/boot/dts/st/stm32mp251.dtsi
@@ -7,6 +7,7 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/reset/st,stm32mp25-rcc.h>
#include <dt-bindings/regulator/st,stm32mp25-regulator.h>
+#include <dt-bindings/phy/phy.h>
/ {
#address-cells = <2>;
@@ -237,6 +238,21 @@
#access-controller-cells = <1>;
ranges;
+ i2s2: audio-controller@400b0000 {
+ compatible = "st,stm32mp25-i2s";
+ reg = <0x400b0000 0x400>;
+ #sound-dai-cells = <0>;
+ interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_BUS_SPI2>, <&rcc CK_KER_SPI2>;
+ clock-names = "pclk", "i2sclk";
+ resets = <&rcc SPI2_R>;
+ dmas = <&hpdma 51 0x43 0x12>,
+ <&hpdma 52 0x43 0x21>;
+ dma-names = "rx", "tx";
+ access-controllers = <&rifsc 23>;
+ status = "disabled";
+ };
+
spi2: spi@400b0000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -252,6 +268,21 @@
status = "disabled";
};
+ i2s3: audio-controller@400c0000 {
+ compatible = "st,stm32mp25-i2s";
+ reg = <0x400c0000 0x400>;
+ #sound-dai-cells = <0>;
+ interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_BUS_SPI3>, <&rcc CK_KER_SPI3>;
+ clock-names = "pclk", "i2sclk";
+ resets = <&rcc SPI3_R>;
+ dmas = <&hpdma 53 0x43 0x12>,
+ <&hpdma 54 0x43 0x21>;
+ dma-names = "rx", "tx";
+ access-controllers = <&rifsc 24>;
+ status = "disabled";
+ };
+
spi3: spi@400c0000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -267,6 +298,20 @@
status = "disabled";
};
+ spdifrx: audio-controller@400d0000 {
+ compatible = "st,stm32h7-spdifrx";
+ #sound-dai-cells = <0>;
+ reg = <0x400d0000 0x400>;
+ clocks = <&rcc CK_KER_SPDIFRX>;
+ clock-names = "kclk";
+ interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&hpdma 71 0x43 0x212>,
+ <&hpdma 72 0x43 0x212>;
+ dma-names = "rx", "rx-ctrl";
+ access-controllers = <&rifsc 30>;
+ status = "disabled";
+ };
+
usart2: serial@400e0000 {
compatible = "st,stm32h7-uart";
reg = <0x400e0000 0x400>;
@@ -439,6 +484,21 @@
status = "disabled";
};
+ i2s1: audio-controller@40230000 {
+ compatible = "st,stm32mp25-i2s";
+ reg = <0x40230000 0x400>;
+ #sound-dai-cells = <0>;
+ interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_BUS_SPI1>, <&rcc CK_KER_SPI1>;
+ clock-names = "pclk", "i2sclk";
+ resets = <&rcc SPI1_R>;
+ dmas = <&hpdma 49 0x43 0x12>,
+ <&hpdma 50 0x43 0x21>;
+ dma-names = "rx", "tx";
+ access-controllers = <&rifsc 22>;
+ status = "disabled";
+ };
+
spi1: spi@40230000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -484,6 +544,108 @@
status = "disabled";
};
+ sai1: sai@40290000 {
+ compatible = "st,stm32mp25-sai";
+ reg = <0x40290000 0x4>, <0x4029a3f0 0x10>;
+ ranges = <0 0x40290000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&rcc CK_BUS_SAI1>;
+ clock-names = "pclk";
+ interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rcc SAI1_R>;
+ access-controllers = <&rifsc 49>;
+ status = "disabled";
+
+ sai1a: audio-controller@40290004 {
+ compatible = "st,stm32-sai-sub-a";
+ reg = <0x4 0x20>;
+ #sound-dai-cells = <0>;
+ clocks = <&rcc CK_KER_SAI1>;
+ clock-names = "sai_ck";
+ dmas = <&hpdma 73 0x43 0x21>;
+ status = "disabled";
+ };
+
+ sai1b: audio-controller@40290024 {
+ compatible = "st,stm32-sai-sub-b";
+ reg = <0x24 0x20>;
+ #sound-dai-cells = <0>;
+ clocks = <&rcc CK_KER_SAI1>;
+ clock-names = "sai_ck";
+ dmas = <&hpdma 74 0x43 0x12>;
+ status = "disabled";
+ };
+ };
+
+ sai2: sai@402a0000 {
+ compatible = "st,stm32mp25-sai";
+ reg = <0x402a0000 0x4>, <0x402aa3f0 0x10>;
+ ranges = <0 0x402a0000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&rcc CK_BUS_SAI2>;
+ clock-names = "pclk";
+ interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rcc SAI2_R>;
+ access-controllers = <&rifsc 50>;
+ status = "disabled";
+
+ sai2a: audio-controller@402a0004 {
+ compatible = "st,stm32-sai-sub-a";
+ reg = <0x4 0x20>;
+ #sound-dai-cells = <0>;
+ clocks = <&rcc CK_KER_SAI2>;
+ clock-names = "sai_ck";
+ dmas = <&hpdma 75 0x43 0x21>;
+ status = "disabled";
+ };
+
+ sai2b: audio-controller@402a0024 {
+ compatible = "st,stm32-sai-sub-b";
+ reg = <0x24 0x20>;
+ #sound-dai-cells = <0>;
+ clocks = <&rcc CK_KER_SAI2>;
+ clock-names = "sai_ck";
+ dmas = <&hpdma 76 0x43 0x12>;
+ status = "disabled";
+ };
+ };
+
+ sai3: sai@402b0000 {
+ compatible = "st,stm32mp25-sai";
+ reg = <0x402b0000 0x4>, <0x402ba3f0 0x10>;
+ ranges = <0 0x402b0000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&rcc CK_BUS_SAI3>;
+ clock-names = "pclk";
+ interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rcc SAI3_R>;
+ access-controllers = <&rifsc 51>;
+ status = "disabled";
+
+ sai3a: audio-controller@402b0004 {
+ compatible = "st,stm32-sai-sub-a";
+ reg = <0x4 0x20>;
+ #sound-dai-cells = <0>;
+ clocks = <&rcc CK_KER_SAI3>;
+ clock-names = "sai_ck";
+ dmas = <&hpdma 77 0x43 0x21>;
+ status = "disabled";
+ };
+
+ sai3b: audio-controller@502b0024 {
+ compatible = "st,stm32-sai-sub-b";
+ reg = <0x24 0x20>;
+ #sound-dai-cells = <0>;
+ clocks = <&rcc CK_KER_SAI3>;
+ clock-names = "sai_ck";
+ dmas = <&hpdma 78 0x43 0x12>;
+ status = "disabled";
+ };
+ };
+
uart9: serial@402c0000 {
compatible = "st,stm32h7-uart";
reg = <0x402c0000 0x400>;
@@ -508,6 +670,40 @@
status = "disabled";
};
+ sai4: sai@40340000 {
+ compatible = "st,stm32mp25-sai";
+ reg = <0x40340000 0x4>, <0x4034a3f0 0x10>;
+ ranges = <0 0x40340000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&rcc CK_BUS_SAI4>;
+ clock-names = "pclk";
+ interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rcc SAI4_R>;
+ access-controllers = <&rifsc 52>;
+ status = "disabled";
+
+ sai4a: audio-controller@40340004 {
+ compatible = "st,stm32-sai-sub-a";
+ reg = <0x4 0x20>;
+ #sound-dai-cells = <0>;
+ clocks = <&rcc CK_KER_SAI4>;
+ clock-names = "sai_ck";
+ dmas = <&hpdma 79 0x63 0x21>;
+ status = "disabled";
+ };
+
+ sai4b: audio-controller@40340024 {
+ compatible = "st,stm32-sai-sub-b";
+ reg = <0x24 0x20>;
+ #sound-dai-cells = <0>;
+ clocks = <&rcc CK_KER_SAI4>;
+ clock-names = "sai_ck";
+ dmas = <&hpdma 80 0x43 0x12>;
+ status = "disabled";
+ };
+ };
+
spi6: spi@40350000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -603,6 +799,44 @@
status = "disabled";
};
+ csi: csi@48020000 {
+ compatible = "st,stm32mp25-csi";
+ reg = <0x48020000 0x2000>;
+ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rcc CSI_R>;
+ clocks = <&rcc CK_KER_CSI>, <&rcc CK_KER_CSITXESC>,
+ <&rcc CK_KER_CSIPHY>;
+ clock-names = "pclk", "txesc", "csi2phy";
+ access-controllers = <&rifsc 86>;
+ status = "disabled";
+ };
+
+ dcmipp: dcmipp@48030000 {
+ compatible = "st,stm32mp25-dcmipp";
+ reg = <0x48030000 0x1000>;
+ interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rcc DCMIPP_R>;
+ clocks = <&rcc CK_BUS_DCMIPP>, <&rcc CK_KER_CSI>;
+ clock-names = "kclk", "mclk";
+ access-controllers = <&rifsc 87>;
+ status = "disabled";
+ };
+
+ combophy: phy@480c0000 {
+ compatible = "st,stm32mp25-combophy";
+ reg = <0x480c0000 0x1000>;
+ #phy-cells = <1>;
+ clocks = <&rcc CK_BUS_USB3PCIEPHY>, <&rcc CK_KER_USB3PCIEPHY>;
+ clock-names = "apb", "ker";
+ resets = <&rcc USB3PCIEPHY_R>;
+ reset-names = "phy";
+ access-controllers = <&rifsc 67>;
+ power-domains = <&CLUSTER_PD>;
+ wakeup-source;
+ interrupts-extended = <&exti1 45 IRQ_TYPE_EDGE_FALLING>;
+ status = "disabled";
+ };
+
sdmmc1: mmc@48220000 {
compatible = "st,stm32mp25-sdmmc2", "arm,pl18x", "arm,primecell";
arm,primecell-periphid = <0x00353180>;
diff --git a/arch/arm64/boot/dts/st/stm32mp257f-ev1.dts b/arch/arm64/boot/dts/st/stm32mp257f-ev1.dts
index 6f393b082789..1b88485a62a1 100644
--- a/arch/arm64/boot/dts/st/stm32mp257f-ev1.dts
+++ b/arch/arm64/boot/dts/st/stm32mp257f-ev1.dts
@@ -27,6 +27,44 @@
stdout-path = "serial0:115200n8";
};
+ clocks {
+ clk_ext_camera: clk-ext-camera {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>;
+ };
+
+ pad_clk: pad-clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <100000000>;
+ };
+ };
+
+ imx335_2v9: regulator-2v9 {
+ compatible = "regulator-fixed";
+ regulator-name = "imx335-avdd";
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <2900000>;
+ regulator-always-on;
+ };
+
+ imx335_1v8: regulator-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "imx335-ovdd";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ imx335_1v2: regulator-1v2 {
+ compatible = "regulator-fixed";
+ regulator-name = "imx335-dvdd";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ };
+
memory@80000000 {
device_type = "memory";
reg = <0x0 0x80000000 0x1 0x0>;
@@ -50,6 +88,46 @@
status = "okay";
};
+&combophy {
+ clocks = <&rcc CK_BUS_USB3PCIEPHY>, <&rcc CK_KER_USB3PCIEPHY>, <&pad_clk>;
+ clock-names = "apb", "ker", "pad";
+ status = "okay";
+};
+
+&csi {
+ vdd-supply = <&scmi_vddcore>;
+ vdda18-supply = <&scmi_v1v8>;
+ status = "okay";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ csi_sink: endpoint {
+ remote-endpoint = <&imx335_ep>;
+ data-lanes = <1 2>;
+ bus-type = <4>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ csi_source: endpoint {
+ remote-endpoint = <&dcmipp_0>;
+ };
+ };
+ };
+};
+
+&dcmipp {
+ status = "okay";
+ port {
+ dcmipp_0: endpoint {
+ remote-endpoint = <&csi_source>;
+ bus-type = <4>;
+ };
+ };
+};
+
&ethernet2 {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&eth2_rgmii_pins_a>;
@@ -81,6 +159,25 @@
i2c-scl-falling-time-ns = <13>;
clock-frequency = <400000>;
status = "okay";
+
+ imx335: camera@1a {
+ compatible = "sony,imx335";
+ reg = <0x1a>;
+ clocks = <&clk_ext_camera>;
+ avdd-supply = <&imx335_2v9>;
+ ovdd-supply = <&imx335_1v8>;
+ dvdd-supply = <&imx335_1v2>;
+ reset-gpios = <&gpioi 7 (GPIO_ACTIVE_LOW | GPIO_PUSH_PULL)>;
+
+ port {
+ imx335_ep: endpoint {
+ remote-endpoint = <&csi_sink>;
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ link-frequencies = /bits/ 64 <594000000>;
+ };
+ };
+ };
};
&i2c8 {
diff --git a/arch/arm64/boot/dts/ti/Makefile b/arch/arm64/boot/dts/ti/Makefile
index f71360f14f23..8a4bdf87e2d4 100644
--- a/arch/arm64/boot/dts/ti/Makefile
+++ b/arch/arm64/boot/dts/ti/Makefile
@@ -42,10 +42,6 @@ dtb-$(CONFIG_ARCH_K3) += k3-am62x-sk-csi2-imx219.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-am62x-sk-hdmi-audio.dtbo
# Boards with AM64x SoC
-k3-am642-hummingboard-t-pcie-dtbs := \
- k3-am642-hummingboard-t.dtb k3-am642-hummingboard-t-pcie.dtbo
-k3-am642-hummingboard-t-usb3-dtbs := \
- k3-am642-hummingboard-t.dtb k3-am642-hummingboard-t-usb3.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-am642-evm.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am642-evm-icssg1-dualemac.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-am642-evm-icssg1-dualemac-mii.dtbo
@@ -107,11 +103,13 @@ dtb-$(CONFIG_ARCH_K3) += k3-j721e-common-proc-board-infotainment.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-j721e-evm.dtb
dtb-$(CONFIG_ARCH_K3) += k3-j721e-evm-gesi-exp-board.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-j721e-evm-pcie0-ep.dtbo
+dtb-$(CONFIG_ARCH_K3) += k3-j721e-evm-pcie1-ep.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-j721e-sk.dtb
dtb-$(CONFIG_ARCH_K3) += k3-j721e-sk-csi2-dual-imx219.dtbo
# Boards with J721s2 SoC
dtb-$(CONFIG_ARCH_K3) += k3-am68-sk-base-board.dtb
+dtb-$(CONFIG_ARCH_K3) += k3-am68-sk-base-board-pcie1-ep.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-j721s2-common-proc-board.dtb
dtb-$(CONFIG_ARCH_K3) += k3-j721s2-evm-gesi-exp-board.dtbo
k3-j721s2-evm-dtbs := k3-j721s2-common-proc-board.dtb k3-j721s2-evm-gesi-exp-board.dtbo
@@ -124,6 +122,7 @@ dtb-$(CONFIG_ARCH_K3) += k3-j722s-evm.dtb
# Boards with J784s4 SoC
dtb-$(CONFIG_ARCH_K3) += k3-am69-sk.dtb
+dtb-$(CONFIG_ARCH_K3) += k3-am69-sk-pcie0-ep.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-j784s4-evm.dtb
dtb-$(CONFIG_ARCH_K3) += k3-j784s4-evm-pcie0-pcie1-ep.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-j784s4-evm-quad-port-eth-exp1.dtbo
@@ -192,14 +191,20 @@ k3-am642-tqma64xxl-mbax4xxl-wlan-dtbs := \
k3-am642-tqma64xxl-mbax4xxl.dtb k3-am64-tqma64xxl-mbax4xxl-wlan.dtbo
k3-am68-sk-base-board-csi2-dual-imx219-dtbs := k3-am68-sk-base-board.dtb \
k3-j721e-sk-csi2-dual-imx219.dtbo
+k3-am68-sk-base-board-pcie1-ep-dtbs := k3-am68-sk-base-board.dtb \
+ k3-am68-sk-base-board-pcie1-ep.dtbo
k3-am69-sk-csi2-dual-imx219-dtbs := k3-am69-sk.dtb \
k3-j721e-sk-csi2-dual-imx219.dtbo
+k3-am69-sk-pcie0-ep-dtbs := k3-am69-sk.dtb \
+ k3-am69-sk-pcie0-ep.dtbo
k3-j7200-evm-pcie1-ep-dtbs := k3-j7200-common-proc-board.dtb \
k3-j7200-evm-pcie1-ep.dtbo
k3-j721e-common-proc-board-infotainment-dtbs := k3-j721e-common-proc-board.dtb \
k3-j721e-common-proc-board-infotainment.dtbo
k3-j721e-evm-pcie0-ep-dtbs := k3-j721e-common-proc-board.dtb \
k3-j721e-evm-pcie0-ep.dtbo
+k3-j721e-evm-pcie1-ep-dtbs := k3-j721e-common-proc-board.dtb \
+ k3-j721e-evm-pcie1-ep.dtbo
k3-j721e-sk-csi2-dual-imx219-dtbs := k3-j721e-sk.dtb \
k3-j721e-sk-csi2-dual-imx219.dtbo
k3-j721s2-evm-pcie1-ep-dtbs := k3-j721s2-common-proc-board.dtb \
@@ -229,10 +234,13 @@ dtb- += k3-am625-beagleplay-csi2-ov5640.dtb \
k3-am642-tqma64xxl-mbax4xxl-sdcard.dtb \
k3-am642-tqma64xxl-mbax4xxl-wlan.dtb \
k3-am68-sk-base-board-csi2-dual-imx219.dtb \
+ k3-am68-sk-base-board-pcie1-ep.dtb \
k3-am69-sk-csi2-dual-imx219.dtb \
- k3-j7200-evm-pcie1-ep.dtbo \
+ k3-am69-sk-pcie0-ep.dtb \
+ k3-j7200-evm-pcie1-ep.dtb \
k3-j721e-common-proc-board-infotainment.dtb \
k3-j721e-evm-pcie0-ep.dtb \
+ k3-j721e-evm-pcie1-ep.dtb \
k3-j721e-sk-csi2-dual-imx219.dtb \
k3-j721s2-evm-pcie1-ep.dtb \
k3-j784s4-evm-pcie0-pcie1-ep.dtb \
@@ -255,6 +263,7 @@ DTC_FLAGS_k3-am68-sk-base-board += -@
DTC_FLAGS_k3-am69-sk += -@
DTC_FLAGS_k3-j7200-common-proc-board += -@
DTC_FLAGS_k3-j721e-common-proc-board += -@
+DTC_FLAGS_k3-j721e-evm-pcie0-ep += -@
DTC_FLAGS_k3-j721e-sk += -@
DTC_FLAGS_k3-j721s2-common-proc-board += -@
DTC_FLAGS_k3-j784s4-evm += -@
diff --git a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
index 7cd727d10a5f..7d355aa73ea2 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
@@ -23,7 +23,6 @@
interrupt-controller;
reg = <0x00 0x01800000 0x00 0x10000>, /* GICD */
<0x00 0x01880000 0x00 0xc0000>, /* GICR */
- <0x00 0x01880000 0x00 0xc0000>, /* GICR */
<0x01 0x00000000 0x00 0x2000>, /* GICC */
<0x01 0x00010000 0x00 0x1000>, /* GICH */
<0x01 0x00020000 0x00 0x2000>; /* GICV */
diff --git a/arch/arm64/boot/dts/ti/k3-am62-phycore-som.dtsi b/arch/arm64/boot/dts/ti/k3-am62-phycore-som.dtsi
index 5952874fe429..2ef4cbaec789 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-phycore-som.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-phycore-som.dtsi
@@ -95,6 +95,16 @@
regulator-boot-on;
};
+ vddshv_3v3: regulator-vddshv-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDSHV0";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vdd_3v3>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
@@ -313,6 +323,7 @@
compatible = "atmel,24c32";
pagesize = <32>;
reg = <0x50>;
+ vcc-supply = <&vddshv_3v3>;
};
i2c_som_rtc: rtc@52 {
diff --git a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
index ee96f4f6deb0..75c80290b12a 100644
--- a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
+++ b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
@@ -610,7 +610,7 @@
reg = <1>;
reset-gpios = <&main_gpio1 5 GPIO_ACTIVE_LOW>;
reset-assert-us = <25>;
- reset-deassert-us = <60000>; /* T2 */
+ reset-deassert-us = <35>;
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-am625-sk.dts b/arch/arm64/boot/dts/ti/k3-am625-sk.dts
index ae81ebb39d02..2fbfa3719345 100644
--- a/arch/arm64/boot/dts/ti/k3-am625-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am625-sk.dts
@@ -219,13 +219,6 @@
};
};
-&mailbox0_cluster0 {
- mbox_m4_0: mbox-m4-0 {
- ti,mbox-rx = <0 0 0>;
- ti,mbox-tx = <1 0 0>;
- };
-};
-
&fss {
bootph-all;
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
index a93e2cd7b8c7..a1daba7b1fad 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
@@ -18,7 +18,6 @@
compatible = "arm,gic-v3";
reg = <0x00 0x01800000 0x00 0x10000>, /* GICD */
<0x00 0x01880000 0x00 0xc0000>, /* GICR */
- <0x00 0x01880000 0x00 0xc0000>, /* GICR */
<0x01 0x00000000 0x00 0x2000>, /* GICC */
<0x01 0x00010000 0x00 0x1000>, /* GICH */
<0x01 0x00020000 0x00 0x2000>; /* GICV */
diff --git a/arch/arm64/boot/dts/ti/k3-am62a-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-wakeup.dtsi
index 0b1dd5390cd3..b2c8f5351743 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62a-wakeup.dtsi
@@ -2,9 +2,11 @@
/*
* Device Tree Source for AM62A SoC Family Wakeup Domain peripherals
*
- * Copyright (C) 2022-2024 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2022-2025 Texas Instruments Incorporated - https://www.ti.com/
*/
+#include <dt-bindings/bus/ti-sysc.h>
+
&cbass_wakeup {
wkup_conf: bus@43000000 {
compatible = "simple-bus";
@@ -38,14 +40,34 @@
};
};
- wkup_uart0: serial@2b300000 {
- compatible = "ti,am64-uart", "ti,am654-uart";
- reg = <0x00 0x2b300000 0x00 0x100>;
- interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
+ target-module@2b300050 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0 0x2b300050 0 0x4>,
+ <0 0x2b300054 0 0x4>,
+ <0 0x2b300058 0 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-mask = <(SYSC_OMAP2_ENAWAKEUP |
+ SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ ti,syss-mask = <1>;
+ ti,no-reset-on-init;
power-domains = <&k3_pds 114 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 114 0>;
- clock-names = "fclk";
- status = "disabled";
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0x2b300000 0x100000>;
+
+ wkup_uart0: serial@0 {
+ compatible = "ti,am64-uart", "ti,am654-uart";
+ reg = <0 0x100>;
+ interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
};
wkup_i2c0: i2c@2b200000 {
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi
index 41e1c24b1144..6e3beb5c2e01 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi
@@ -651,6 +651,7 @@
interrupt-names = "host", "peripheral";
maximum-speed = "high-speed";
dr_mode = "otg";
+ bootph-all;
snps,usb2-gadget-lpm-disable;
snps,usb2-lpm-disable;
};
@@ -768,6 +769,7 @@
#mbox-cells = <1>;
ti,mbox-num-users = <4>;
ti,mbox-num-fifos = <16>;
+ status = "disabled";
};
mailbox0_cluster1: mailbox@29010000 {
@@ -777,6 +779,7 @@
#mbox-cells = <1>;
ti,mbox-num-users = <4>;
ti,mbox-num-fifos = <16>;
+ status = "disabled";
};
mailbox0_cluster2: mailbox@29020000 {
@@ -786,6 +789,7 @@
#mbox-cells = <1>;
ti,mbox-num-users = <4>;
ti,mbox-num-fifos = <16>;
+ status = "disabled";
};
mailbox0_cluster3: mailbox@29030000 {
@@ -795,6 +799,7 @@
#mbox-cells = <1>;
ti,mbox-num-users = <4>;
ti,mbox-num-fifos = <16>;
+ status = "disabled";
};
ecap0: pwm@23100000 {
diff --git a/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts b/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
index 7f3dc39e12bc..ad71d2f27f53 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
@@ -621,6 +621,8 @@
};
&mailbox0_cluster0 {
+ status = "okay";
+
mbox_r5_0: mbox-r5-0 {
ti,mbox-rx = <0 0 0>;
ti,mbox-tx = <1 0 0>;
@@ -628,6 +630,8 @@
};
&mailbox0_cluster1 {
+ status = "okay";
+
mbox_mcu_r5_0: mbox-mcu-r5-0 {
ti,mbox-rx = <0 0 0>;
ti,mbox-tx = <1 0 0>;
diff --git a/arch/arm64/boot/dts/ti/k3-am62x-phyboard-lyra.dtsi b/arch/arm64/boot/dts/ti/k3-am62x-phyboard-lyra.dtsi
index d364c247833f..922cad14c9f8 100644
--- a/arch/arm64/boot/dts/ti/k3-am62x-phyboard-lyra.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62x-phyboard-lyra.dtsi
@@ -112,6 +112,25 @@
regulator-boot-on;
};
+ vcc_3v3_hdmi: regulator-vcc-3v3-hdmi {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_3V3_HDMI";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_3v3_sw>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vcc_1v2_hdmi: regulator-vcc-1v2-hdmi {
+ compatible = "regulator-fixed";
+ regulator-name = "HDMI_CVCC";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
vcc_3v3_mmc: regulator-vcc-3v3-mmc {
compatible = "regulator-fixed";
regulator-name = "VCC_3V3_MMC";
@@ -367,6 +386,9 @@
pinctrl-names = "default";
pinctrl-0 = <&hdmi_int_pins_default>;
+ iovcc-supply = <&vcc_3v3_hdmi>;
+ cvcc12-supply = <&vcc_1v2_hdmi>;
+
ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -376,6 +398,7 @@
sii9022_in: endpoint {
remote-endpoint = <&dpi1_out>;
+ bus-width = <16>;
};
};
@@ -393,6 +416,7 @@
compatible = "atmel,24c02";
pagesize = <16>;
reg = <0x51>;
+ vcc-supply = <&vcc_3v3_mmc>;
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi b/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi
index 6957b3e44c82..2f129e8cd5b9 100644
--- a/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi
@@ -256,7 +256,7 @@
main_usb1_pins_default: main-usb1-default-pins {
pinctrl-single,pins = <
- AM62X_IOPAD(0x0258, PIN_OUTPUT, 0) /* (F18/E16) USB1_DRVVBUS */
+ AM62X_IOPAD(0x0258, PIN_OUTPUT | PIN_DS_PULLUD_ENABLE | PIN_DS_PULL_UP, 0) /* (F18/E16) USB1_DRVVBUS */
>;
};
@@ -315,6 +315,10 @@
};
};
+&cpsw_mac_syscon {
+ bootph-all;
+};
+
&wkup_uart0 {
/* WKUP UART0 is used by DM firmware */
bootph-pre-ram;
diff --git a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
index c66289a4362b..324eb44c258d 100644
--- a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
@@ -1227,6 +1227,15 @@
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x00 0x30000000 0x80000>;
+ clocks = <&k3_clks 81 0>, /* icssg0_core_clk */
+ <&k3_clks 81 3>, /* icssg0_iep_clk */
+ <&k3_clks 81 16>, /* icssg0_rgmii_mhz_250_clk */
+ <&k3_clks 81 17>, /* icssg0_rgmii_mhz_50_clk */
+ <&k3_clks 81 18>, /* icssg0_rgmii_mhz_5_clk */
+ <&k3_clks 81 19>, /* icssg0_uart_clk */
+ <&k3_clks 81 20>; /* icssg0_iclk */
+ assigned-clocks = <&k3_clks 81 0>;
+ assigned-clock-parents = <&k3_clks 81 2>;
icssg0_mem: memories@0 {
reg = <0x0 0x2000>,
@@ -1252,7 +1261,7 @@
clocks = <&k3_clks 81 0>, /* icssg0_core_clk */
<&k3_clks 81 20>; /* icssg0_iclk */
assigned-clocks = <&icssg0_coreclk_mux>;
- assigned-clock-parents = <&k3_clks 81 20>;
+ assigned-clock-parents = <&k3_clks 81 0>;
};
icssg0_iepclk_mux: iepclk-mux@30 {
@@ -1397,6 +1406,15 @@
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x00 0x30080000 0x80000>;
+ clocks = <&k3_clks 82 0>, /* icssg1_core_clk */
+ <&k3_clks 82 3>, /* icssg1_iep_clk */
+ <&k3_clks 82 16>, /* icssg1_rgmii_mhz_250_clk */
+ <&k3_clks 82 17>, /* icssg1_rgmii_mhz_50_clk */
+ <&k3_clks 82 18>, /* icssg1_rgmii_mhz_5_clk */
+ <&k3_clks 82 19>, /* icssg1_uart_clk */
+ <&k3_clks 82 20>; /* icssg1_iclk */
+ assigned-clocks = <&k3_clks 82 0>;
+ assigned-clock-parents = <&k3_clks 82 2>;
icssg1_mem: memories@0 {
reg = <0x0 0x2000>,
@@ -1422,7 +1440,7 @@
clocks = <&k3_clks 82 0>, /* icssg1_core_clk */
<&k3_clks 82 20>; /* icssg1_iclk */
assigned-clocks = <&icssg1_coreclk_mux>;
- assigned-clock-parents = <&k3_clks 82 20>;
+ assigned-clock-parents = <&k3_clks 82 0>;
};
icssg1_iepclk_mux: iepclk-mux@30 {
diff --git a/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-pcie.dtso b/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-pcie.dts
index bd9a5caf20da..023b2a6aaa56 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-pcie.dtso
+++ b/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-pcie.dts
@@ -2,17 +2,19 @@
/*
* Copyright (C) 2023 Josua Mayer <josua@solid-run.com>
*
- * Overlay for SolidRun AM642 HummingBoard-T to enable PCI-E.
+ * DTS for SolidRun AM642 HummingBoard-T,
+ * running on Cortex A53, with PCI-E.
+ *
*/
-/dts-v1/;
-/plugin/;
-
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/phy/phy.h>
+#include "k3-am642-hummingboard-t.dts"
#include "k3-serdes.h"
+/ {
+ model = "SolidRun AM642 HummingBoard-T with PCI-E";
+};
+
&pcie0_rc {
pinctrl-names = "default";
pinctrl-0 = <&pcie0_default_pins>;
diff --git a/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-usb3.dtso b/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-usb3.dts
index ffcc3bd3c7bc..ee9bd618f370 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-usb3.dtso
+++ b/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-usb3.dts
@@ -2,16 +2,19 @@
/*
* Copyright (C) 2023 Josua Mayer <josua@solid-run.com>
*
- * Overlay for SolidRun AM642 HummingBoard-T to enable USB-3.1.
+ * DTS for SolidRun AM642 HummingBoard-T,
+ * running on Cortex A53, with USB-3.1 Gen 1.
+ *
*/
-/dts-v1/;
-/plugin/;
-
-#include <dt-bindings/phy/phy.h>
+#include "k3-am642-hummingboard-t.dts"
#include "k3-serdes.h"
+/ {
+ model = "SolidRun AM642 HummingBoard-T with USB-3.1 Gen 1";
+};
+
&serdes0 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts b/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts
index e06a3b178b34..8f64d6272b1b 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts
+++ b/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts
@@ -201,8 +201,6 @@
reset-gpios = <&main_gpio0 44 GPIO_ACTIVE_LOW>;
reset-assert-us = <1000>;
reset-deassert-us = <1000>;
- ti,rx-fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
- ti,tx-fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
};
@@ -230,8 +228,6 @@
reset-gpios = <&main_gpio1 47 GPIO_ACTIVE_LOW>;
reset-assert-us = <1000>;
reset-deassert-us = <1000>;
- ti,rx-fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
- ti,tx-fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
};
@@ -242,8 +238,6 @@
reset-gpios = <&main_gpio1 51 GPIO_ACTIVE_LOW>;
reset-assert-us = <1000>;
reset-deassert-us = <1000>;
- ti,rx-fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
- ti,tx-fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
};
diff --git a/arch/arm64/boot/dts/ti/k3-am67a-beagley-ai.dts b/arch/arm64/boot/dts/ti/k3-am67a-beagley-ai.dts
index 44dfbdf89277..9be6bba28c26 100644
--- a/arch/arm64/boot/dts/ti/k3-am67a-beagley-ai.dts
+++ b/arch/arm64/boot/dts/ti/k3-am67a-beagley-ai.dts
@@ -50,11 +50,71 @@
no-map;
};
+ wkup_r5fss0_core0_dma_memory_region: r5f-dma-memory@a0000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa0000000 0x00 0x100000>;
+ no-map;
+ };
+
wkup_r5fss0_core0_memory_region: r5f-memory@a0100000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0100000 0x00 0xf00000>;
no-map;
};
+
+ mcu_r5fss0_core0_dma_memory_region: mcu-r5fss-dma-memory-region@a1000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa1000000 0x00 0x100000>;
+ no-map;
+ };
+
+ mcu_r5fss0_core0_memory_region: mcu-r5fss-memory-region@a1100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa1100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ main_r5fss0_core0_dma_memory_region: main-r5fss-dma-memory-region@a2000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa2000000 0x00 0x100000>;
+ no-map;
+ };
+
+ main_r5fss0_core0_memory_region: main-r5fss-memory-region@a2100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa2100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ c7x_0_dma_memory_region: c7x-dma-memory@a3000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa3000000 0x00 0x100000>;
+ no-map;
+ };
+
+ c7x_0_memory_region: c7x-memory@a3100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa3100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ c7x_1_dma_memory_region: c7x-dma-memory@a4000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa4000000 0x00 0x100000>;
+ no-map;
+ };
+
+ c7x_1_memory_region: c7x-memory@a4100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa4100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ rtos_ipc_memory_region: ipc-memories@a5000000 {
+ reg = <0x00 0xa5000000 0x00 0x1c00000>;
+ alignment = <0x1000>;
+ no-map;
+ };
};
vsys_5v0: regulator-1 {
@@ -391,3 +451,101 @@
ti,fails-without-test-cd;
status = "okay";
};
+
+&mailbox0_cluster0 {
+ status = "okay";
+
+ mbox_wkup_r5_0: mbox-wkup-r5-0 {
+ ti,mbox-rx = <0 0 0>;
+ ti,mbox-tx = <1 0 0>;
+ };
+};
+
+&mailbox0_cluster1 {
+ status = "okay";
+
+ mbox_mcu_r5_0: mbox-mcu-r5-0 {
+ ti,mbox-rx = <0 0 0>;
+ ti,mbox-tx = <1 0 0>;
+ };
+};
+
+&mailbox0_cluster2 {
+ status = "okay";
+
+ mbox_c7x_0: mbox-c7x-0 {
+ ti,mbox-rx = <0 0 0>;
+ ti,mbox-tx = <1 0 0>;
+ };
+};
+
+&mailbox0_cluster3 {
+ status = "okay";
+
+ mbox_main_r5_0: mbox-main-r5-0 {
+ ti,mbox-rx = <0 0 0>;
+ ti,mbox-tx = <1 0 0>;
+ };
+
+ mbox_c7x_1: mbox-c7x-1 {
+ ti,mbox-rx = <2 0 0>;
+ ti,mbox-tx = <3 0 0>;
+ };
+};
+
+/* Timers are used by Remoteproc firmware */
+&main_timer0 {
+ status = "reserved";
+};
+
+&main_timer1 {
+ status = "reserved";
+};
+
+&main_timer2 {
+ status = "reserved";
+};
+
+&wkup_r5fss0 {
+ status = "okay";
+};
+
+&wkup_r5fss0_core0 {
+ mboxes = <&mailbox0_cluster0 &mbox_wkup_r5_0>;
+ memory-region = <&wkup_r5fss0_core0_dma_memory_region>,
+ <&wkup_r5fss0_core0_memory_region>;
+};
+
+&mcu_r5fss0 {
+ status = "okay";
+};
+
+&mcu_r5fss0_core0 {
+ mboxes = <&mailbox0_cluster1 &mbox_mcu_r5_0>;
+ memory-region = <&mcu_r5fss0_core0_dma_memory_region>,
+ <&mcu_r5fss0_core0_memory_region>;
+};
+
+&main_r5fss0 {
+ status = "okay";
+};
+
+&main_r5fss0_core0 {
+ mboxes = <&mailbox0_cluster3 &mbox_main_r5_0>;
+ memory-region = <&main_r5fss0_core0_dma_memory_region>,
+ <&main_r5fss0_core0_memory_region>;
+};
+
+&c7x_0 {
+ mboxes = <&mailbox0_cluster2 &mbox_c7x_0>;
+ memory-region = <&c7x_0_dma_memory_region>,
+ <&c7x_0_memory_region>;
+ status = "okay";
+};
+
+&c7x_1 {
+ mboxes = <&mailbox0_cluster3 &mbox_c7x_1>;
+ memory-region = <&c7x_1_dma_memory_region>,
+ <&c7x_1_memory_region>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am68-sk-base-board-pcie1-ep.dtso b/arch/arm64/boot/dts/ti/k3-am68-sk-base-board-pcie1-ep.dtso
new file mode 100644
index 000000000000..455736e378cc
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am68-sk-base-board-pcie1-ep.dtso
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/**
+ * DT Overlay for enabling PCIE1 instance in Endpoint Configuration with the
+ * AM68-SK board.
+ *
+ * AM68-SK Board Product Link: https://www.ti.com/tool/SK-AM68
+ *
+ * Copyright (C) 2024 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/soc/ti,sci_pm_domain.h>
+
+#include "k3-pinctrl.h"
+
+/*
+ * Since Root Complex and Endpoint modes are mutually exclusive
+ * disable Root Complex mode.
+ */
+&pcie1_rc {
+ status = "disabled";
+};
+
+&cbass_main {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&gic500>;
+
+ pcie1_ep: pcie-ep@2910000 {
+ compatible = "ti,j7200-pcie-ep", "ti,j721e-pcie-ep";
+ reg = <0x00 0x02910000 0x00 0x1000>,
+ <0x00 0x02917000 0x00 0x400>,
+ <0x00 0x0d800000 0x00 0x00800000>,
+ <0x00 0x18000000 0x00 0x08000000>;
+ reg-names = "intd_cfg", "user_cfg", "reg", "mem";
+ interrupt-names = "link_state";
+ interrupts = <GIC_SPI 330 IRQ_TYPE_EDGE_RISING>;
+ max-link-speed = <3>;
+ num-lanes = <2>;
+ power-domains = <&k3_pds 276 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 276 41>;
+ clock-names = "fck";
+ max-functions = /bits/ 8 <6>;
+ max-virtual-functions = /bits/ 8 <4 4 4 4 0 0>;
+ dma-coherent;
+ phys = <&serdes0_pcie_link>;
+ phy-names = "pcie-phy";
+ ti,syscon-pcie-ctrl = <&scm_conf 0x074>;
+ };
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am69-sk-pcie0-ep.dtso b/arch/arm64/boot/dts/ti/k3-am69-sk-pcie0-ep.dtso
new file mode 100644
index 000000000000..9a5bcf282a9e
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am69-sk-pcie0-ep.dtso
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/**
+ * DT Overlay for enabling PCIE0 instances of PCIe in Endpoint Configuration
+ * on AM69-SK.
+ *
+ * AM69-SK Product Link: https://www.ti.com/tool/SK-AM69
+ *
+ * Copyright (C) 2024 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/soc/ti,sci_pm_domain.h>
+
+#include "k3-pinctrl.h"
+
+/*
+ * Since Root Complex and Endpoint modes are mutually exclusive
+ * disable Root Complex mode.
+ */
+&pcie0_rc {
+ status = "disabled";
+};
+
+&cbass_main {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&gic500>;
+
+ pcie0_ep: pcie-ep@2900000 {
+ compatible = "ti,j784s4-pcie-ep";
+ reg = <0x00 0x02900000 0x00 0x1000>,
+ <0x00 0x02907000 0x00 0x400>,
+ <0x00 0x0d000000 0x00 0x00800000>,
+ <0x00 0x10000000 0x00 0x08000000>;
+ reg-names = "intd_cfg", "user_cfg", "reg", "mem";
+ interrupt-names = "link_state";
+ interrupts = <GIC_SPI 318 IRQ_TYPE_EDGE_RISING>;
+ max-link-speed = <3>;
+ num-lanes = <4>;
+ power-domains = <&k3_pds 332 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 332 0>;
+ clock-names = "fck";
+ max-functions = /bits/ 8 <6>;
+ max-virtual-functions = /bits/ 8 <4 4 4 4 0 0>;
+ dma-coherent;
+ phys = <&serdes1_pcie_link>;
+ phy-names = "pcie-phy";
+ ti,syscon-pcie-ctrl = <&pcie0_ctrl 0x0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am69-sk.dts b/arch/arm64/boot/dts/ti/k3-am69-sk.dts
index 1e36965a1403..b85227052f97 100644
--- a/arch/arm64/boot/dts/ti/k3-am69-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am69-sk.dts
@@ -484,6 +484,12 @@
>;
};
+ main_usbss0_pins_default: main-usbss0-default-pins {
+ pinctrl-single,pins = <
+ J784S4_IOPAD(0x0ec, PIN_OUTPUT, 6) /* (AN37) TIMER_IO1.USB0_DRVVBUS */
+ >;
+ };
+
};
&wkup_pmx0 {
@@ -755,6 +761,7 @@
regulator-max-microvolt = <1100000>;
regulator-boot-on;
regulator-always-on;
+ bootph-all;
};
bucka3: buck3 {
@@ -763,6 +770,7 @@
regulator-max-microvolt = <850000>;
regulator-boot-on;
regulator-always-on;
+ bootph-all;
};
bucka4: buck4 {
@@ -771,6 +779,7 @@
regulator-max-microvolt = <1800000>;
regulator-boot-on;
regulator-always-on;
+ bootph-all;
};
bucka5: buck5 {
@@ -779,6 +788,7 @@
regulator-max-microvolt = <850000>;
regulator-boot-on;
regulator-always-on;
+ bootph-all;
};
ldoa1: ldo1 {
@@ -787,6 +797,7 @@
regulator-max-microvolt = <1800000>;
regulator-boot-on;
regulator-always-on;
+ bootph-all;
};
ldoa2: ldo2 {
@@ -795,6 +806,7 @@
regulator-max-microvolt = <3300000>;
regulator-boot-on;
regulator-always-on;
+ bootph-all;
};
ldoa3: ldo3 {
@@ -803,6 +815,7 @@
regulator-max-microvolt = <800000>;
regulator-boot-on;
regulator-always-on;
+ bootph-all;
};
ldoa4: ldo4 {
@@ -811,6 +824,7 @@
regulator-max-microvolt = <1800000>;
regulator-boot-on;
regulator-always-on;
+ bootph-all;
};
};
};
@@ -1299,6 +1313,14 @@
cdns,phy-type = <PHY_TYPE_PCIE>;
resets = <&serdes_wiz0 1>, <&serdes_wiz0 2>, <&serdes_wiz0 3>;
};
+
+ serdes0_usb_link: phy@3 {
+ reg = <3>;
+ cdns,num-lanes = <1>;
+ #phy-cells = <0>;
+ cdns,phy-type = <PHY_TYPE_USB3>;
+ resets = <&serdes_wiz0 4>;
+ };
};
&serdes_wiz1 {
@@ -1339,3 +1361,22 @@
phy-names = "pcie-phy";
num-lanes = <1>;
};
+
+&usb_serdes_mux {
+ idle-states = <0>; /* USB0 to SERDES0 */
+};
+
+&usbss0 {
+ status = "okay";
+ pinctrl-0 = <&main_usbss0_pins_default>;
+ pinctrl-names = "default";
+ ti,vbus-divider;
+};
+
+&usb0 {
+ status = "okay";
+ dr_mode = "otg";
+ maximum-speed = "super-speed";
+ phys = <&serdes0_usb_link>;
+ phy-names = "cdns3,usb3-phy";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
index db43e7e10b76..f684ce6ad9ad 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
@@ -409,6 +409,10 @@
<J7200_SERDES0_LANE2_QSGMII_LANE1>, <J7200_SERDES0_LANE3_IP4_UNUSED>;
};
+&mcu_spi1 {
+ mux-controls = <&spi1_linkdis 0>;
+};
+
&usb_serdes_mux {
idle-states = <1>; /* USB0 to SERDES lane 3 */
bootph-all;
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
index 6a8453865874..56ab144fea07 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
@@ -184,6 +184,13 @@
reg = <0x4040 0x4>;
#phy-cells = <1>;
};
+
+ spi1_linkdis: mux-controller@4060 {
+ compatible = "reg-mux";
+ reg = <0x4060 0x4>;
+ #mux-control-cells = <1>;
+ mux-reg-masks = <0x0 0x1>;
+ };
};
wkup_conf: bus@43000000 {
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-evm-pcie1-ep.dtso b/arch/arm64/boot/dts/ti/k3-j721e-evm-pcie1-ep.dtso
new file mode 100644
index 000000000000..a8cccdcf3e3b
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-j721e-evm-pcie1-ep.dtso
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/**
+ * DT Overlay for enabling PCIE1 instance in Endpoint Configuration with the
+ * J7 common processor board.
+ *
+ * J7 Common Processor Board Product Link: https://www.ti.com/tool/J721EXCPXEVM
+ *
+ * Copyright (C) 2024 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/soc/ti,sci_pm_domain.h>
+
+#include "k3-pinctrl.h"
+
+/*
+ * Since Root Complex and Endpoint modes are mutually exclusive
+ * disable Root Complex mode.
+ */
+&pcie1_rc {
+ status = "disabled";
+};
+
+&cbass_main {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&gic500>;
+
+ pcie1_ep: pcie-ep@2910000 {
+ compatible = "ti,j721e-pcie-ep";
+ reg = <0x00 0x02910000 0x00 0x1000>,
+ <0x00 0x02917000 0x00 0x400>,
+ <0x00 0x0d800000 0x00 0x00800000>,
+ <0x00 0x18000000 0x00 0x08000000>;
+ reg-names = "intd_cfg", "user_cfg", "reg", "mem";
+ interrupt-names = "link_state";
+ interrupts = <GIC_SPI 330 IRQ_TYPE_EDGE_RISING>;
+ max-link-speed = <3>;
+ num-lanes = <2>;
+ power-domains = <&k3_pds 240 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 240 1>;
+ clock-names = "fck";
+ max-functions = /bits/ 8 <6>;
+ max-virtual-functions = /bits/ 8 <4 4 4 4 0 0>;
+ dma-coherent;
+ phys = <&serdes1_pcie_link>;
+ phy-names = "pcie-phy";
+ ti,syscon-pcie-ctrl = <&scm_conf 0x4074>;
+ };
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j722s-evm.dts b/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
index a00f4a7d20d9..d184e9c1a0a5 100644
--- a/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
+++ b/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
@@ -359,6 +359,13 @@
J722S_IOPAD(0x00a0, PIN_OUTPUT, 1) /* (N24) GPMC0_WPn.AUDIO_EXT_REFCLK1 */
>;
};
+
+ pmic_irq_pins_default: pmic-irq-default-pins {
+ pinctrl-single,pins = <
+ J722S_IOPAD(0x030, PIN_INPUT, 7) /* (K23) GPIO0_12 */
+ >;
+ };
+
};
&cpsw3g {
@@ -406,6 +413,13 @@
&mcu_pmx0 {
+ mcu_i2c0_pins_default: mcu-i2c0-default-pins {
+ pinctrl-single,pins = <
+ J722S_MCU_IOPAD(0x048, PIN_INPUT, 0) /* (E11) MCU_I2C0_SDA */
+ J722S_MCU_IOPAD(0x044, PIN_INPUT, 0) /* (B13) MCU_I2C0_SCL */
+ >;
+ };
+
mcu_mcan0_pins_default: mcu-mcan0-default-pins {
pinctrl-single,pins = <
J722S_MCU_IOPAD(0x038, PIN_INPUT, 0) /* (D8) MCU_MCAN0_RX */
@@ -459,6 +473,87 @@
clock-frequency = <400000>;
status = "okay";
bootph-all;
+
+ tps65224: pmic@48 {
+ compatible = "ti,tps65224-q1";
+ reg = <0x48>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_irq_pins_default>;
+ interrupt-parent = <&main_gpio0>;
+ interrupts = <0 IRQ_TYPE_EDGE_FALLING>;
+ ti,primary-pmic;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ buck12-supply = <&vsys_io_3v3>;
+ buck3-supply = <&vsys_io_3v3>;
+ buck4-supply = <&vsys_io_3v3>;
+
+ ldo1-supply = <&vsys_io_3v3>;
+ ldo2-supply = <&vsys_io_3v3>;
+ ldo3-supply = <&vsys_io_3v3>;
+
+ regulators {
+
+ buck1: buck1 {
+ regulator-name = "vcc1v8_io_buck1";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ bootph-all;
+ };
+
+ buck2: buck2 {
+ regulator-name = "vcc1v1_ddr_buck2";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck3: buck3 {
+ regulator-name = "vcc0v85_ram_buck3";
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck4: buck4 {
+ regulator-name = "vcc0v75_ioret_buck4";
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo1: ldo1 {
+ regulator-name = "vdda1v8_pll_ldo1";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo2: ldo2 {
+ regulator-name = "dvdd3v3_ldo2";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo3: ldo3 {
+ regulator-name = "vdd1v85_phy_ldo3";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
};
&k3_clks {
@@ -812,3 +907,10 @@
&mcu_gpio0 {
status = "okay";
};
+
+&mcu_i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_i2c0_pins_default>;
+ clock-frequency = <400000>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-evm-common.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-evm-common.dtsi
index b2e2b9f507a9..2664f74a9c7a 100644
--- a/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-evm-common.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-evm-common.dtsi
@@ -635,6 +635,7 @@
regulator-max-microvolt = <1100000>;
regulator-boot-on;
regulator-always-on;
+ bootph-all;
};
bucka3: buck3 {
@@ -643,6 +644,7 @@
regulator-max-microvolt = <850000>;
regulator-boot-on;
regulator-always-on;
+ bootph-all;
};
bucka4: buck4 {
@@ -651,6 +653,7 @@
regulator-max-microvolt = <1800000>;
regulator-boot-on;
regulator-always-on;
+ bootph-all;
};
bucka5: buck5 {
@@ -659,6 +662,7 @@
regulator-max-microvolt = <850000>;
regulator-boot-on;
regulator-always-on;
+ bootph-all;
};
ldoa1: ldo1 {
@@ -667,6 +671,7 @@
regulator-max-microvolt = <1800000>;
regulator-boot-on;
regulator-always-on;
+ bootph-all;
};
ldoa2: ldo2 {
@@ -675,6 +680,7 @@
regulator-max-microvolt = <3300000>;
regulator-boot-on;
regulator-always-on;
+ bootph-all;
};
ldoa3: ldo3 {
@@ -683,6 +689,7 @@
regulator-max-microvolt = <800000>;
regulator-boot-on;
regulator-always-on;
+ bootph-all;
};
ldoa4: ldo4 {
@@ -691,6 +698,7 @@
regulator-max-microvolt = <1800000>;
regulator-boot-on;
regulator-always-on;
+ bootph-all;
};
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi
index 7721852c1f68..83bbf94b58d1 100644
--- a/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi
@@ -224,7 +224,7 @@
};
main_pmx0: pinctrl@11c000 {
- compatible = "pinctrl-single";
+ compatible = "ti,j7200-padconf", "pinctrl-single";
/* Proxy 0 addressing */
reg = <0x00 0x11c000 0x00 0x120>;
#pinctrl-cells = <1>;
@@ -234,7 +234,7 @@
/* TIMERIO pad input CTRLMMR_TIMER*_CTRL registers */
main_timerio_input: pinctrl@104200 {
- compatible = "pinctrl-single";
+ compatible = "ti,j7200-padconf", "pinctrl-single";
reg = <0x00 0x104200 0x00 0x50>;
#pinctrl-cells = <1>;
pinctrl-single,register-width = <32>;
@@ -243,7 +243,7 @@
/* TIMERIO pad output CTCTRLMMR_TIMERIO*_CTRL registers */
main_timerio_output: pinctrl@104280 {
- compatible = "pinctrl-single";
+ compatible = "ti,j7200-padconf", "pinctrl-single";
reg = <0x00 0x104280 0x00 0x20>;
#pinctrl-cells = <1>;
pinctrl-single,register-width = <32>;
@@ -2040,7 +2040,7 @@
#address-cells = <1>;
#size-cells = <0>;
power-domains = <&k3_pds 376 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 376 1>;
+ clocks = <&k3_clks 376 0>;
status = "disabled";
};
@@ -2051,7 +2051,7 @@
#address-cells = <1>;
#size-cells = <0>;
power-domains = <&k3_pds 377 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 377 1>;
+ clocks = <&k3_clks 377 0>;
status = "disabled";
};
@@ -2062,7 +2062,7 @@
#address-cells = <1>;
#size-cells = <0>;
power-domains = <&k3_pds 378 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 378 1>;
+ clocks = <&k3_clks 378 0>;
status = "disabled";
};
@@ -2073,7 +2073,7 @@
#address-cells = <1>;
#size-cells = <0>;
power-domains = <&k3_pds 379 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 379 1>;
+ clocks = <&k3_clks 379 0>;
status = "disabled";
};
@@ -2084,7 +2084,7 @@
#address-cells = <1>;
#size-cells = <0>;
power-domains = <&k3_pds 380 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 380 1>;
+ clocks = <&k3_clks 380 0>;
status = "disabled";
};
@@ -2095,7 +2095,7 @@
#address-cells = <1>;
#size-cells = <0>;
power-domains = <&k3_pds 381 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 381 1>;
+ clocks = <&k3_clks 381 0>;
status = "disabled";
};
@@ -2106,7 +2106,7 @@
#address-cells = <1>;
#size-cells = <0>;
power-domains = <&k3_pds 382 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 382 1>;
+ clocks = <&k3_clks 382 0>;
status = "disabled";
};
@@ -2117,7 +2117,7 @@
#address-cells = <1>;
#size-cells = <0>;
power-domains = <&k3_pds 383 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 383 1>;
+ clocks = <&k3_clks 383 0>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-mcu-wakeup-common.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-mcu-wakeup-common.dtsi
index 9638130caece..52e2965a3bf5 100644
--- a/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-mcu-wakeup-common.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-mcu-wakeup-common.dtsi
@@ -76,7 +76,7 @@
};
wkup_pmx0: pinctrl@4301c000 {
- compatible = "pinctrl-single";
+ compatible = "ti,j7200-padconf", "pinctrl-single";
/* Proxy 0 addressing */
reg = <0x00 0x4301c000 0x00 0x034>;
#pinctrl-cells = <1>;
@@ -85,7 +85,7 @@
};
wkup_pmx1: pinctrl@4301c038 {
- compatible = "pinctrl-single";
+ compatible = "ti,j7200-padconf", "pinctrl-single";
/* Proxy 0 addressing */
reg = <0x00 0x4301c038 0x00 0x02c>;
#pinctrl-cells = <1>;
@@ -94,7 +94,7 @@
};
wkup_pmx2: pinctrl@4301c068 {
- compatible = "pinctrl-single";
+ compatible = "ti,j7200-padconf", "pinctrl-single";
/* Proxy 0 addressing */
reg = <0x00 0x4301c068 0x00 0x120>;
#pinctrl-cells = <1>;
@@ -103,7 +103,7 @@
};
wkup_pmx3: pinctrl@4301c190 {
- compatible = "pinctrl-single";
+ compatible = "ti,j7200-padconf", "pinctrl-single";
/* Proxy 0 addressing */
reg = <0x00 0x4301c190 0x00 0x004>;
#pinctrl-cells = <1>;
@@ -125,7 +125,7 @@
/* MCU_TIMERIO pad input CTRLMMR_MCU_TIMER*_CTRL registers */
mcu_timerio_input: pinctrl@40f04200 {
- compatible = "pinctrl-single";
+ compatible = "ti,j7200-padconf", "pinctrl-single";
reg = <0x00 0x40f04200 0x00 0x28>;
#pinctrl-cells = <1>;
pinctrl-single,register-width = <32>;
@@ -136,7 +136,7 @@
/* MCU_TIMERIO pad output CTRLMMR_MCU_TIMERIO*_CTRL registers */
mcu_timerio_output: pinctrl@40f04280 {
- compatible = "pinctrl-single";
+ compatible = "ti,j7200-padconf", "pinctrl-single";
reg = <0x00 0x40f04280 0x00 0x28>;
#pinctrl-cells = <1>;
pinctrl-single,register-width = <32>;
diff --git a/arch/arm64/boot/dts/ti/k3-pinctrl.h b/arch/arm64/boot/dts/ti/k3-pinctrl.h
index 22b8d73cfd32..cac7cccc1112 100644
--- a/arch/arm64/boot/dts/ti/k3-pinctrl.h
+++ b/arch/arm64/boot/dts/ti/k3-pinctrl.h
@@ -12,6 +12,12 @@
#define PULLTYPESEL_SHIFT (17)
#define RXACTIVE_SHIFT (18)
#define DEBOUNCE_SHIFT (11)
+#define FORCE_DS_EN_SHIFT (15)
+#define DS_EN_SHIFT (24)
+#define DS_OUT_DIS_SHIFT (25)
+#define DS_OUT_VAL_SHIFT (26)
+#define DS_PULLUD_EN_SHIFT (27)
+#define DS_PULLTYPE_SEL_SHIFT (28)
#define PULL_DISABLE (1 << PULLUDEN_SHIFT)
#define PULL_ENABLE (0 << PULLUDEN_SHIFT)
@@ -38,6 +44,19 @@
#define PIN_DEBOUNCE_CONF5 (5 << DEBOUNCE_SHIFT)
#define PIN_DEBOUNCE_CONF6 (6 << DEBOUNCE_SHIFT)
+#define PIN_DS_FORCE_DISABLE (0 << FORCE_DS_EN_SHIFT)
+#define PIN_DS_FORCE_ENABLE (1 << FORCE_DS_EN_SHIFT)
+#define PIN_DS_IO_OVERRIDE_DISABLE (0 << DS_IO_OVERRIDE_EN_SHIFT)
+#define PIN_DS_IO_OVERRIDE_ENABLE (1 << DS_IO_OVERRIDE_EN_SHIFT)
+#define PIN_DS_OUT_ENABLE (0 << DS_OUT_DIS_SHIFT)
+#define PIN_DS_OUT_DISABLE (1 << DS_OUT_DIS_SHIFT)
+#define PIN_DS_OUT_VALUE_ZERO (0 << DS_OUT_VAL_SHIFT)
+#define PIN_DS_OUT_VALUE_ONE (1 << DS_OUT_VAL_SHIFT)
+#define PIN_DS_PULLUD_ENABLE (0 << DS_PULLUD_EN_SHIFT)
+#define PIN_DS_PULLUD_DISABLE (1 << DS_PULLUD_EN_SHIFT)
+#define PIN_DS_PULL_DOWN (0 << DS_PULLTYPE_SEL_SHIFT)
+#define PIN_DS_PULL_UP (1 << DS_PULLTYPE_SEL_SHIFT)
+
/* Default mux configuration for gpio-ranges to use with pinctrl */
#define PIN_GPIO_RANGE_IOPAD (PIN_INPUT | 7)
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index c62831e61586..cb7da4415599 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -44,6 +44,7 @@ CONFIG_ARCH_BCM_IPROC=y
CONFIG_ARCH_BCMBCA=y
CONFIG_ARCH_BRCMSTB=y
CONFIG_ARCH_BERLIN=y
+CONFIG_ARCH_BLAIZE=y
CONFIG_ARCH_EXYNOS=y
CONFIG_ARCH_SPARX5=y
CONFIG_ARCH_K3=y
@@ -200,6 +201,7 @@ CONFIG_CFG80211=m
CONFIG_MAC80211=m
CONFIG_MAC80211_LEDS=y
CONFIG_RFKILL=m
+CONFIG_RFKILL_GPIO=m
CONFIG_NET_9P=y
CONFIG_NET_9P_VIRTIO=y
CONFIG_NFC=m
@@ -330,6 +332,7 @@ CONFIG_VIRTIO_NET=y
CONFIG_MHI_NET=m
CONFIG_NET_DSA_BCM_SF2=m
CONFIG_NET_DSA_MSCC_FELIX=m
+CONFIG_ENA_ETHERNET=m
CONFIG_AMD_XGBE=y
CONFIG_NET_XGENE=y
CONFIG_ATL1C=m
@@ -358,6 +361,8 @@ CONFIG_IGBVF=y
CONFIG_MVNETA=y
CONFIG_MVPP2=y
CONFIG_SKY2=y
+CONFIG_NET_VENDOR_MEDIATEK=y
+CONFIG_NET_MEDIATEK_STAR_EMAC=m
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y
@@ -373,6 +378,7 @@ CONFIG_SMSC911X=y
CONFIG_SNI_AVE=y
CONFIG_SNI_NETSEC=y
CONFIG_STMMAC_ETH=m
+CONFIG_DWMAC_MEDIATEK=m
CONFIG_DWMAC_TEGRA=m
CONFIG_TI_K3_AM65_CPSW_NUSS=y
CONFIG_TI_ICSSG_PRUETH=m
@@ -516,6 +522,7 @@ CONFIG_TCG_TIS_I2C_INFINEON=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MUX=y
CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_MUX_PINCTRL=m
CONFIG_I2C_BCM2835=m
CONFIG_I2C_CADENCE=m
CONFIG_I2C_DESIGNWARE_CORE=y
@@ -607,6 +614,8 @@ CONFIG_PINCTRL_MSM8996=y
CONFIG_PINCTRL_MSM8998=y
CONFIG_PINCTRL_QCM2290=y
CONFIG_PINCTRL_QCS404=y
+CONFIG_PINCTRL_QCS615=y
+CONFIG_PINCTRL_QCS8300=y
CONFIG_PINCTRL_QDF2XXX=y
CONFIG_PINCTRL_QDU1000=y
CONFIG_PINCTRL_SA8775P=y
@@ -629,6 +638,7 @@ CONFIG_PINCTRL_SM8350=y
CONFIG_PINCTRL_SM8450=y
CONFIG_PINCTRL_SM8550=y
CONFIG_PINCTRL_SM8650=y
+CONFIG_PINCTRL_SM8750=y
CONFIG_PINCTRL_X1E80100=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
CONFIG_PINCTRL_LPASS_LPI=m
@@ -737,6 +747,7 @@ CONFIG_MESON_WATCHDOG=m
CONFIG_ARM_SMC_WATCHDOG=y
CONFIG_RENESAS_WDT=y
CONFIG_RENESAS_RZG2LWDT=y
+CONFIG_RENESAS_RZV2HWDT=y
CONFIG_UNIPHIER_WATCHDOG=y
CONFIG_PM8916_WATCHDOG=m
CONFIG_BCM2835_WDT=y
@@ -872,6 +883,7 @@ CONFIG_ROCKCHIP_VOP2=y
CONFIG_ROCKCHIP_ANALOGIX_DP=y
CONFIG_ROCKCHIP_CDN_DP=y
CONFIG_ROCKCHIP_DW_HDMI=y
+CONFIG_ROCKCHIP_DW_HDMI_QP=y
CONFIG_ROCKCHIP_DW_MIPI_DSI=y
CONFIG_ROCKCHIP_INNO_HDMI=y
CONFIG_ROCKCHIP_LVDS=y
@@ -900,6 +912,7 @@ CONFIG_DRM_PANEL_SITRONIX_ST7703=m
CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA=m
CONFIG_DRM_PANEL_VISIONOX_VTDR6130=m
CONFIG_DRM_FSL_LDB=m
+CONFIG_DRM_ITE_IT6263=m
CONFIG_DRM_LONTIUM_LT8912B=m
CONFIG_DRM_LONTIUM_LT9611=m
CONFIG_DRM_LONTIUM_LT9611UXC=m
@@ -965,6 +978,8 @@ CONFIG_SND_SOC_IMX_AUDMIX=m
CONFIG_SND_SOC_MT8183=m
CONFIG_SND_SOC_MT8183_MT6358_TS3A227E_MAX98357A=m
CONFIG_SND_SOC_MT8183_DA7219_MAX98357A=m
+CONFIG_SND_SOC_MT8188=m
+CONFIG_SND_SOC_MT8188_MT6359=m
CONFIG_SND_SOC_MT8192=m
CONFIG_SND_SOC_MT8192_MT6359_RT1015_RT5682=m
CONFIG_SND_SOC_MT8195=m
@@ -993,6 +1008,7 @@ CONFIG_SND_SOC_RZ=m
CONFIG_SND_SOC_SOF_TOPLEVEL=y
CONFIG_SND_SOC_SOF_OF=y
CONFIG_SND_SOC_SOF_MTK_TOPLEVEL=y
+CONFIG_SND_SOC_SOF_MT8186=m
CONFIG_SND_SOC_SOF_MT8195=m
CONFIG_SND_SUN8I_CODEC=m
CONFIG_SND_SUN8I_CODEC_ANALOG=m
@@ -1120,6 +1136,7 @@ CONFIG_USB_MASS_STORAGE=m
CONFIG_TYPEC=m
CONFIG_TYPEC_TCPM=m
CONFIG_TYPEC_TCPCI=m
+CONFIG_TYPEC_TCPCI_MAXIM=m
CONFIG_TYPEC_FUSB302=m
CONFIG_TYPEC_QCOM_PMIC=m
CONFIG_TYPEC_UCSI=m
@@ -1310,6 +1327,7 @@ CONFIG_QCOM_CLK_SMD_RPM=y
CONFIG_QCOM_CLK_RPMH=y
CONFIG_IPQ_APSS_6018=y
CONFIG_IPQ_APSS_5018=y
+CONFIG_IPQ_CMN_PLL=m
CONFIG_IPQ_GCC_5018=y
CONFIG_IPQ_GCC_5332=y
CONFIG_IPQ_GCC_6018=y
@@ -1325,11 +1343,15 @@ CONFIG_MSM_MMCC_8998=m
CONFIG_QCM_GCC_2290=y
CONFIG_QCM_DISPCC_2290=m
CONFIG_QCS_GCC_404=y
+CONFIG_QCS_GCC_615=y
+CONFIG_QCS_GCC_8300=y
CONFIG_SC_CAMCC_7280=m
+CONFIG_SA_CAMCC_8775P=m
CONFIG_QDU_GCC_1000=y
CONFIG_SC_CAMCC_8280XP=m
CONFIG_SC_DISPCC_7280=m
CONFIG_SC_DISPCC_8280XP=m
+CONFIG_SA_DISPCC_8775P=m
CONFIG_SA_GCC_8775P=y
CONFIG_SA_GPUCC_8775P=m
CONFIG_SC_GCC_7180=y
@@ -1352,13 +1374,14 @@ CONFIG_SM_DISPCC_6115=m
CONFIG_SM_DISPCC_8250=y
CONFIG_SM_DISPCC_8450=m
CONFIG_SM_DISPCC_8550=m
-CONFIG_SM_DISPCC_8650=m
+CONFIG_SM_DISPCC_8750=m
CONFIG_SM_GCC_4450=y
CONFIG_SM_GCC_6115=y
CONFIG_SM_GCC_8350=y
CONFIG_SM_GCC_8450=y
CONFIG_SM_GCC_8550=y
CONFIG_SM_GCC_8650=y
+CONFIG_SM_GCC_8750=y
CONFIG_SM_GPUCC_6115=m
CONFIG_SM_GPUCC_8150=y
CONFIG_SM_GPUCC_8250=y
@@ -1368,6 +1391,8 @@ CONFIG_SM_GPUCC_8550=m
CONFIG_SM_GPUCC_8650=m
CONFIG_SM_TCSRCC_8550=y
CONFIG_SM_TCSRCC_8650=y
+CONFIG_SM_TCSRCC_8750=m
+CONFIG_SA_VIDEOCC_8775P=m
CONFIG_SM_VIDEOCC_8250=y
CONFIG_QCOM_HFPLL=y
CONFIG_CLK_GFM_LPASS_SM8250=m
@@ -1400,6 +1425,7 @@ CONFIG_QCOM_Q6V5_PAS=m
CONFIG_QCOM_SYSMON=m
CONFIG_QCOM_WCNSS_PIL=m
CONFIG_TI_K3_DSP_REMOTEPROC=m
+CONFIG_TI_K3_M4_REMOTEPROC=m
CONFIG_TI_K3_R5_REMOTEPROC=m
CONFIG_RPMSG_CHAR=m
CONFIG_RPMSG_CTRL=m
@@ -1456,6 +1482,7 @@ CONFIG_ARCH_R9A07G044=y
CONFIG_ARCH_R9A07G054=y
CONFIG_ARCH_R9A08G045=y
CONFIG_ARCH_R9A09G011=y
+CONFIG_ARCH_R9A09G047=y
CONFIG_ARCH_R9A09G057=y
CONFIG_ROCKCHIP_IODOMAIN=y
CONFIG_ARCH_TEGRA_132_SOC=y
@@ -1632,6 +1659,8 @@ CONFIG_INTERCONNECT_QCOM_MSM8996=y
CONFIG_INTERCONNECT_QCOM_OSM_L3=m
CONFIG_INTERCONNECT_QCOM_QCM2290=y
CONFIG_INTERCONNECT_QCOM_QCS404=m
+CONFIG_INTERCONNECT_QCOM_QCS615=y
+CONFIG_INTERCONNECT_QCOM_QCS8300=y
CONFIG_INTERCONNECT_QCOM_QDU1000=y
CONFIG_INTERCONNECT_QCOM_SA8775P=y
CONFIG_INTERCONNECT_QCOM_SC7180=y
@@ -1647,6 +1676,7 @@ CONFIG_INTERCONNECT_QCOM_SM8350=y
CONFIG_INTERCONNECT_QCOM_SM8450=y
CONFIG_INTERCONNECT_QCOM_SM8550=y
CONFIG_INTERCONNECT_QCOM_SM8650=y
+CONFIG_INTERCONNECT_QCOM_SM8750=y
CONFIG_INTERCONNECT_QCOM_X1E80100=y
CONFIG_COUNTER=m
CONFIG_RZ_MTU3_CNT=m
@@ -1698,7 +1728,6 @@ CONFIG_CRYPTO_SM3_ARM64_CE=m
CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
CONFIG_CRYPTO_AES_ARM64_BS=m
CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
-CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m
CONFIG_CRYPTO_DEV_SUN8I_CE=m
CONFIG_CRYPTO_DEV_FSL_CAAM=m
CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM=m
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index e7d9bd8e4709..5636ab83f22a 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -312,15 +312,5 @@ config CRYPTO_SM4_ARM64_CE_GCM
- PMULL (Polynomial Multiply Long) instructions
- NEON (Advanced SIMD) extensions
-config CRYPTO_CRCT10DIF_ARM64_CE
- tristate "CRCT10DIF (PMULL)"
- depends on KERNEL_MODE_NEON && CRC_T10DIF
- select CRYPTO_HASH
- help
- CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF)
-
- Architecture: arm64 using
- - PMULL (Polynomial Multiply Long) instructions
-
endmenu
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index fbe64dce66e0..e7139c4768ce 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -44,9 +44,6 @@ ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o
obj-$(CONFIG_CRYPTO_POLYVAL_ARM64_CE) += polyval-ce.o
polyval-ce-y := polyval-ce-glue.o polyval-ce-core.o
-obj-$(CONFIG_CRYPTO_CRCT10DIF_ARM64_CE) += crct10dif-ce.o
-crct10dif-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o
-
obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o
aes-ce-cipher-y := aes-ce-core.o aes-ce-glue.o
diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c
deleted file mode 100644
index 08bcbd884395..000000000000
--- a/arch/arm64/crypto/crct10dif-ce-glue.c
+++ /dev/null
@@ -1,132 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions
- *
- * Copyright (C) 2016 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
- */
-
-#include <linux/cpufeature.h>
-#include <linux/crc-t10dif.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h>
-
-#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
-
-#include <asm/neon.h>
-#include <asm/simd.h>
-
-#define CRC_T10DIF_PMULL_CHUNK_SIZE 16U
-
-asmlinkage void crc_t10dif_pmull_p8(u16 init_crc, const u8 *buf, size_t len,
- u8 out[16]);
-asmlinkage u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 *buf, size_t len);
-
-static int crct10dif_init(struct shash_desc *desc)
-{
- u16 *crc = shash_desc_ctx(desc);
-
- *crc = 0;
- return 0;
-}
-
-static int crct10dif_update_pmull_p8(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u16 *crcp = shash_desc_ctx(desc);
- u16 crc = *crcp;
- u8 buf[16];
-
- if (length > CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
- kernel_neon_begin();
- crc_t10dif_pmull_p8(crc, data, length, buf);
- kernel_neon_end();
-
- crc = 0;
- data = buf;
- length = sizeof(buf);
- }
-
- *crcp = crc_t10dif_generic(crc, data, length);
- return 0;
-}
-
-static int crct10dif_update_pmull_p64(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u16 *crc = shash_desc_ctx(desc);
-
- if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
- kernel_neon_begin();
- *crc = crc_t10dif_pmull_p64(*crc, data, length);
- kernel_neon_end();
- } else {
- *crc = crc_t10dif_generic(*crc, data, length);
- }
-
- return 0;
-}
-
-static int crct10dif_final(struct shash_desc *desc, u8 *out)
-{
- u16 *crc = shash_desc_ctx(desc);
-
- *(u16 *)out = *crc;
- return 0;
-}
-
-static struct shash_alg crc_t10dif_alg[] = {{
- .digestsize = CRC_T10DIF_DIGEST_SIZE,
- .init = crct10dif_init,
- .update = crct10dif_update_pmull_p8,
- .final = crct10dif_final,
- .descsize = CRC_T10DIF_DIGEST_SIZE,
-
- .base.cra_name = "crct10dif",
- .base.cra_driver_name = "crct10dif-arm64-neon",
- .base.cra_priority = 150,
- .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-}, {
- .digestsize = CRC_T10DIF_DIGEST_SIZE,
- .init = crct10dif_init,
- .update = crct10dif_update_pmull_p64,
- .final = crct10dif_final,
- .descsize = CRC_T10DIF_DIGEST_SIZE,
-
- .base.cra_name = "crct10dif",
- .base.cra_driver_name = "crct10dif-arm64-ce",
- .base.cra_priority = 200,
- .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-}};
-
-static int __init crc_t10dif_mod_init(void)
-{
- if (cpu_have_named_feature(PMULL))
- return crypto_register_shashes(crc_t10dif_alg,
- ARRAY_SIZE(crc_t10dif_alg));
- else
- /* only register the first array element */
- return crypto_register_shash(crc_t10dif_alg);
-}
-
-static void __exit crc_t10dif_mod_exit(void)
-{
- if (cpu_have_named_feature(PMULL))
- crypto_unregister_shashes(crc_t10dif_alg,
- ARRAY_SIZE(crc_t10dif_alg));
- else
- crypto_unregister_shash(crc_t10dif_alg);
-}
-
-module_cpu_feature_match(ASIMD, crc_t10dif_mod_init);
-module_exit(crc_t10dif_mod_exit);
-
-MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
-MODULE_DESCRIPTION("CRC-T10DIF using arm64 NEON and Crypto Extensions");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_CRYPTO("crct10dif");
-MODULE_ALIAS_CRYPTO("crct10dif-arm64-ce");
diff --git a/arch/arm64/hyperv/hv_core.c b/arch/arm64/hyperv/hv_core.c
index f1ebc025e1df..69004f619c57 100644
--- a/arch/arm64/hyperv/hv_core.c
+++ b/arch/arm64/hyperv/hv_core.c
@@ -11,11 +11,10 @@
#include <linux/types.h>
#include <linux/export.h>
#include <linux/mm.h>
-#include <linux/hyperv.h>
#include <linux/arm-smccc.h>
#include <linux/module.h>
#include <asm-generic/bug.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#include <asm/mshyperv.h>
/*
diff --git a/arch/arm64/hyperv/mshyperv.c b/arch/arm64/hyperv/mshyperv.c
index b1a4de4eee29..fc49949b7df6 100644
--- a/arch/arm64/hyperv/mshyperv.c
+++ b/arch/arm64/hyperv/mshyperv.c
@@ -49,12 +49,12 @@ static int __init hyperv_init(void)
hv_set_vpreg(HV_REGISTER_GUEST_OS_ID, guest_id);
/* Get the features and hints from Hyper-V */
- hv_get_vpreg_128(HV_REGISTER_FEATURES, &result);
+ hv_get_vpreg_128(HV_REGISTER_PRIVILEGES_AND_FEATURES_INFO, &result);
ms_hyperv.features = result.as32.a;
ms_hyperv.priv_high = result.as32.b;
ms_hyperv.misc_features = result.as32.c;
- hv_get_vpreg_128(HV_REGISTER_ENLIGHTENMENTS, &result);
+ hv_get_vpreg_128(HV_REGISTER_FEATURES_INFO, &result);
ms_hyperv.hints = result.as32.a;
pr_info("Hyper-V: privilege flags low 0x%x, high 0x%x, hints 0x%x, misc 0x%x\n",
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 488f8e751349..6f3f4142e214 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -122,6 +122,7 @@
#define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803
#define QCOM_CPU_PART_KRYO_4XX_GOLD 0x804
#define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805
+#define QCOM_CPU_PART_ORYON_X1 0x001
#define NVIDIA_CPU_PART_DENVER 0x003
#define NVIDIA_CPU_PART_CARMEL 0x004
@@ -198,6 +199,7 @@
#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
#define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD)
#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)
+#define MIDR_QCOM_ORYON_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_ORYON_X1)
#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
diff --git a/arch/arm64/include/asm/hyperv-tlfs.h b/arch/arm64/include/asm/hyperv-tlfs.h
deleted file mode 100644
index bc30aadedfe9..000000000000
--- a/arch/arm64/include/asm/hyperv-tlfs.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-/*
- * This file contains definitions from the Hyper-V Hypervisor Top-Level
- * Functional Specification (TLFS):
- * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
- *
- * Copyright (C) 2021, Microsoft, Inc.
- *
- * Author : Michael Kelley <mikelley@microsoft.com>
- */
-
-#ifndef _ASM_HYPERV_TLFS_H
-#define _ASM_HYPERV_TLFS_H
-
-#include <linux/types.h>
-
-/*
- * All data structures defined in the TLFS that are shared between Hyper-V
- * and a guest VM use Little Endian byte ordering. This matches the default
- * byte ordering of Linux running on ARM64, so no special handling is required.
- */
-
-/*
- * Group C Features. See the asm-generic version of hyperv-tlfs.h
- * for a description of Feature Groups.
- */
-
-/* Crash MSRs available */
-#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(8)
-
-/* STIMER direct mode is available */
-#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(13)
-
-/*
- * To support arch-generic code calling hv_set/get_register:
- * - On x86, HV_MSR_ indicates an MSR accessed via rdmsrl/wrmsrl
- * - On ARM, HV_MSR_ indicates a VP register accessed via hypercall
- */
-#define HV_MSR_CRASH_P0 (HV_REGISTER_GUEST_CRASH_P0)
-#define HV_MSR_CRASH_P1 (HV_REGISTER_GUEST_CRASH_P1)
-#define HV_MSR_CRASH_P2 (HV_REGISTER_GUEST_CRASH_P2)
-#define HV_MSR_CRASH_P3 (HV_REGISTER_GUEST_CRASH_P3)
-#define HV_MSR_CRASH_P4 (HV_REGISTER_GUEST_CRASH_P4)
-#define HV_MSR_CRASH_CTL (HV_REGISTER_GUEST_CRASH_CTL)
-
-#define HV_MSR_VP_INDEX (HV_REGISTER_VP_INDEX)
-#define HV_MSR_TIME_REF_COUNT (HV_REGISTER_TIME_REF_COUNT)
-#define HV_MSR_REFERENCE_TSC (HV_REGISTER_REFERENCE_TSC)
-
-#define HV_MSR_SINT0 (HV_REGISTER_SINT0)
-#define HV_MSR_SCONTROL (HV_REGISTER_SCONTROL)
-#define HV_MSR_SIEFP (HV_REGISTER_SIEFP)
-#define HV_MSR_SIMP (HV_REGISTER_SIMP)
-#define HV_MSR_EOM (HV_REGISTER_EOM)
-
-#define HV_MSR_STIMER0_CONFIG (HV_REGISTER_STIMER0_CONFIG)
-#define HV_MSR_STIMER0_COUNT (HV_REGISTER_STIMER0_COUNT)
-
-union hv_msi_entry {
- u64 as_uint64[2];
- struct {
- u64 address;
- u32 data;
- u32 reserved;
- } __packed;
-};
-
-#include <asm-generic/hyperv-tlfs.h>
-
-#endif
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 43e365fbff0b..8d94a6c0ed5c 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -300,7 +300,7 @@
#define CPTR_EL2_TSM (1 << 12)
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
#define CPTR_EL2_TZ (1 << 8)
-#define CPTR_NVHE_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 (nVHE) */
+#define CPTR_NVHE_EL2_RES1 (BIT(13) | BIT(9) | GENMASK(7, 0))
#define CPTR_NVHE_EL2_RES0 (GENMASK(63, 32) | \
GENMASK(29, 21) | \
GENMASK(19, 14) | \
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index ca2590344313..bec227f9500a 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -53,8 +53,7 @@
enum __kvm_host_smccc_func {
/* Hypercalls available only prior to pKVM finalisation */
/* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */
- __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1,
- __KVM_HOST_SMCCC_FUNC___pkvm_init,
+ __KVM_HOST_SMCCC_FUNC___pkvm_init = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1,
__KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping,
__KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector,
__KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs,
@@ -65,6 +64,12 @@ enum __kvm_host_smccc_func {
/* Hypercalls available after pKVM finalisation */
__KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
__KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_share_guest,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_guest,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_relax_perms_guest,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_wrprotect_guest,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_test_clear_young_guest,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_mkyoung_guest,
__KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
__KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
__KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,
@@ -79,6 +84,9 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
__KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
__KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm,
+ __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load,
+ __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put,
+ __KVM_HOST_SMCCC_FUNC___pkvm_tlb_flush_vmid,
};
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
@@ -247,8 +255,6 @@ extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu);
extern u64 __vgic_v3_get_gic_config(void);
extern void __vgic_v3_init_lrs(void);
-extern u64 __kvm_get_mdcr_el2(void);
-
#define __KVM_EXTABLE(from, to) \
" .pushsection __kvm_ex_table, \"a\"\n" \
" .align 3\n" \
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 4f1d99725f6b..47f2cf408eed 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -184,29 +184,30 @@ static inline bool vcpu_is_el2(const struct kvm_vcpu *vcpu)
return vcpu_is_el2_ctxt(&vcpu->arch.ctxt);
}
-static inline bool __vcpu_el2_e2h_is_set(const struct kvm_cpu_context *ctxt)
+static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu *vcpu)
{
return (!cpus_have_final_cap(ARM64_HAS_HCR_NV1) ||
- (ctxt_sys_reg(ctxt, HCR_EL2) & HCR_E2H));
+ (__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_E2H));
}
-static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu *vcpu)
+static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu)
{
- return __vcpu_el2_e2h_is_set(&vcpu->arch.ctxt);
+ return ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2) & HCR_TGE;
}
-static inline bool __vcpu_el2_tge_is_set(const struct kvm_cpu_context *ctxt)
+static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
{
- return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_TGE;
-}
+ bool e2h, tge;
+ u64 hcr;
-static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu)
-{
- return __vcpu_el2_tge_is_set(&vcpu->arch.ctxt);
-}
+ if (!vcpu_has_nv(vcpu))
+ return false;
+
+ hcr = __vcpu_sys_reg(vcpu, HCR_EL2);
+
+ e2h = (hcr & HCR_E2H);
+ tge = (hcr & HCR_TGE);
-static inline bool __is_hyp_ctxt(const struct kvm_cpu_context *ctxt)
-{
/*
* We are in a hypervisor context if the vcpu mode is EL2 or
* E2H and TGE bits are set. The latter means we are in the user space
@@ -215,14 +216,7 @@ static inline bool __is_hyp_ctxt(const struct kvm_cpu_context *ctxt)
* Note that the HCR_EL2.{E2H,TGE}={0,1} isn't really handled in the
* rest of the KVM code, and will result in a misbehaving guest.
*/
- return vcpu_is_el2_ctxt(ctxt) ||
- (__vcpu_el2_e2h_is_set(ctxt) && __vcpu_el2_tge_is_set(ctxt)) ||
- __vcpu_el2_tge_is_set(ctxt);
-}
-
-static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
-{
- return vcpu_has_nv(vcpu) && __is_hyp_ctxt(&vcpu->arch.ctxt);
+ return vcpu_is_el2(vcpu) || (e2h && tge) || tge;
}
static inline bool vcpu_is_host_el0(const struct kvm_vcpu *vcpu)
@@ -619,7 +613,8 @@ static __always_inline void kvm_write_cptr_el2(u64 val)
write_sysreg(val, cptr_el2);
}
-static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
+/* Resets the value of cptr_el2 when returning to the host. */
+static __always_inline void __kvm_reset_cptr_el2(struct kvm *kvm)
{
u64 val;
@@ -630,29 +625,28 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
} else if (has_hvhe()) {
val = CPACR_EL1_FPEN;
- if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
+ if (!kvm_has_sve(kvm) || !guest_owns_fp_regs())
val |= CPACR_EL1_ZEN;
if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_EL1_SMEN;
} else {
val = CPTR_NVHE_EL2_RES1;
- if (vcpu_has_sve(vcpu) && guest_owns_fp_regs())
+ if (kvm_has_sve(kvm) && guest_owns_fp_regs())
val |= CPTR_EL2_TZ;
- if (cpus_have_final_cap(ARM64_SME))
- val &= ~CPTR_EL2_TSM;
+ if (!cpus_have_final_cap(ARM64_SME))
+ val |= CPTR_EL2_TSM;
}
- return val;
-}
-
-static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
-{
- u64 val = kvm_get_reset_cptr_el2(vcpu);
-
kvm_write_cptr_el2(val);
}
+#ifdef __KVM_NVHE_HYPERVISOR__
+#define kvm_reset_cptr_el2(v) __kvm_reset_cptr_el2(kern_hyp_va((v)->kvm))
+#else
+#define kvm_reset_cptr_el2(v) __kvm_reset_cptr_el2((v)->kvm)
+#endif
+
/*
* Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE
* format if E2H isn't set.
@@ -697,9 +691,4 @@ static inline bool guest_hyp_sve_traps_enabled(const struct kvm_vcpu *vcpu)
{
return __guest_hyp_cptr_xen_trap_enabled(vcpu, ZEN);
}
-
-static inline void kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
-{
- vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH);
-}
#endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e18e9244d17a..7cfa024de4e3 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -85,6 +85,7 @@ void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
struct kvm_hyp_memcache {
phys_addr_t head;
unsigned long nr_pages;
+ struct pkvm_mapping *mapping; /* only used from EL1 */
};
static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
@@ -331,6 +332,8 @@ struct kvm_arch {
#define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7
/* Fine-Grained UNDEF initialised */
#define KVM_ARCH_FLAG_FGU_INITIALIZED 8
+ /* SVE exposed to guest */
+#define KVM_ARCH_FLAG_GUEST_HAS_SVE 9
unsigned long flags;
/* VM-wide vCPU feature set */
@@ -490,7 +493,6 @@ enum vcpu_sysreg {
VBAR_EL2, /* Vector Base Address Register (EL2) */
RVBAR_EL2, /* Reset Vector Base Address Register */
CONTEXTIDR_EL2, /* Context ID Register (EL2) */
- CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */
SP_EL2, /* EL2 Stack Pointer */
CNTHP_CTL_EL2,
CNTHP_CVAL_EL2,
@@ -501,6 +503,7 @@ enum vcpu_sysreg {
MARKER(__SANITISED_REG_START__),
TCR2_EL2, /* Extended Translation Control Register (EL2) */
MDCR_EL2, /* Monitor Debug Configuration Register (EL2) */
+ CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */
/* Any VNCR-capable reg goes after this point */
MARKER(__VNCR_START__),
@@ -610,6 +613,14 @@ struct cpu_sve_state {
* field.
*/
struct kvm_host_data {
+#define KVM_HOST_DATA_FLAG_HAS_SPE 0
+#define KVM_HOST_DATA_FLAG_HAS_TRBE 1
+#define KVM_HOST_DATA_FLAG_HOST_SVE_ENABLED 2
+#define KVM_HOST_DATA_FLAG_HOST_SME_ENABLED 3
+#define KVM_HOST_DATA_FLAG_TRBE_ENABLED 4
+#define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED 5
+ unsigned long flags;
+
struct kvm_cpu_context host_ctxt;
/*
@@ -642,7 +653,7 @@ struct kvm_host_data {
* host_debug_state contains the host registers which are
* saved and restored during world switches.
*/
- struct {
+ struct {
/* {Break,watch}point registers */
struct kvm_guest_debug_arch regs;
/* Statistical profiling extension */
@@ -652,6 +663,16 @@ struct kvm_host_data {
/* Values of trap registers for the host before guest entry. */
u64 mdcr_el2;
} host_debug_state;
+
+ /* Guest trace filter value */
+ u64 trfcr_while_in_guest;
+
+ /* Number of programmable event counters (PMCR_EL0.N) for this CPU */
+ unsigned int nr_event_counters;
+
+ /* Number of debug breakpoints/watchpoints for this CPU (minus 1) */
+ unsigned int debug_brps;
+ unsigned int debug_wrps;
};
struct kvm_host_psci_config {
@@ -708,7 +729,6 @@ struct kvm_vcpu_arch {
u64 hcr_el2;
u64 hcrx_el2;
u64 mdcr_el2;
- u64 cptr_el2;
/* Exception Information */
struct kvm_vcpu_fault_info fault;
@@ -739,31 +759,22 @@ struct kvm_vcpu_arch {
*
* external_debug_state contains the debug values we want to debug the
* guest. This is set via the KVM_SET_GUEST_DEBUG ioctl.
- *
- * debug_ptr points to the set of debug registers that should be loaded
- * onto the hardware when running the guest.
*/
- struct kvm_guest_debug_arch *debug_ptr;
struct kvm_guest_debug_arch vcpu_debug_state;
struct kvm_guest_debug_arch external_debug_state;
+ u64 external_mdscr_el1;
+
+ enum {
+ VCPU_DEBUG_FREE,
+ VCPU_DEBUG_HOST_OWNED,
+ VCPU_DEBUG_GUEST_OWNED,
+ } debug_owner;
/* VGIC state */
struct vgic_cpu vgic_cpu;
struct arch_timer_cpu timer_cpu;
struct kvm_pmu pmu;
- /*
- * Guest registers we preserve during guest debugging.
- *
- * These shadow registers are updated by the kvm_handle_sys_reg
- * trap handler if the guest accesses or updates them while we
- * are using guest debug.
- */
- struct {
- u32 mdscr_el1;
- bool pstate_ss;
- } guest_debug_preserved;
-
/* vcpu power state */
struct kvm_mp_state mp_state;
spinlock_t mp_state_lock;
@@ -771,6 +782,9 @@ struct kvm_vcpu_arch {
/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;
+ /* Pages to top-up the pKVM/EL2 guest pool */
+ struct kvm_hyp_memcache pkvm_memcache;
+
/* Virtual SError ESR to restore when HCR_EL2.VSE is set */
u64 vsesr_el2;
@@ -863,14 +877,10 @@ struct kvm_vcpu_arch {
#define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__)
#define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__)
-/* SVE exposed to guest */
-#define GUEST_HAS_SVE __vcpu_single_flag(cflags, BIT(0))
+/* KVM_ARM_VCPU_INIT completed */
+#define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(0))
/* SVE config completed */
#define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
-/* PTRAUTH exposed to guest */
-#define GUEST_HAS_PTRAUTH __vcpu_single_flag(cflags, BIT(2))
-/* KVM_ARM_VCPU_INIT completed */
-#define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(3))
/* Exception pending */
#define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0))
@@ -906,29 +916,21 @@ struct kvm_vcpu_arch {
#define EXCEPT_AA64_EL2_IRQ __vcpu_except_flags(5)
#define EXCEPT_AA64_EL2_FIQ __vcpu_except_flags(6)
#define EXCEPT_AA64_EL2_SERR __vcpu_except_flags(7)
-/* Guest debug is live */
-#define DEBUG_DIRTY __vcpu_single_flag(iflags, BIT(4))
-/* Save SPE context if active */
-#define DEBUG_STATE_SAVE_SPE __vcpu_single_flag(iflags, BIT(5))
-/* Save TRBE context if active */
-#define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6))
-
-/* SVE enabled for host EL0 */
-#define HOST_SVE_ENABLED __vcpu_single_flag(sflags, BIT(0))
-/* SME enabled for EL0 */
-#define HOST_SME_ENABLED __vcpu_single_flag(sflags, BIT(1))
+
/* Physical CPU not in supported_cpus */
-#define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(2))
+#define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(0))
/* WFIT instruction trapped */
-#define IN_WFIT __vcpu_single_flag(sflags, BIT(3))
+#define IN_WFIT __vcpu_single_flag(sflags, BIT(1))
/* vcpu system registers loaded on physical CPU */
-#define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(4))
-/* Software step state is Active-pending */
-#define DBG_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(5))
+#define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(2))
+/* Software step state is Active-pending for external debug */
+#define HOST_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(3))
+/* Software step state is Active pending for guest debug */
+#define GUEST_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(4))
/* PMUSERENR for the guest EL0 is on physical CPU */
-#define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(6))
+#define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(5))
/* WFI instruction trapped */
-#define IN_WFI __vcpu_single_flag(sflags, BIT(7))
+#define IN_WFI __vcpu_single_flag(sflags, BIT(6))
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
@@ -959,14 +961,21 @@ struct kvm_vcpu_arch {
KVM_GUESTDBG_USE_HW | \
KVM_GUESTDBG_SINGLESTEP)
-#define vcpu_has_sve(vcpu) (system_supports_sve() && \
- vcpu_get_flag(vcpu, GUEST_HAS_SVE))
+#define kvm_has_sve(kvm) (system_supports_sve() && \
+ test_bit(KVM_ARCH_FLAG_GUEST_HAS_SVE, &(kvm)->arch.flags))
+
+#ifdef __KVM_NVHE_HYPERVISOR__
+#define vcpu_has_sve(vcpu) kvm_has_sve(kern_hyp_va((vcpu)->kvm))
+#else
+#define vcpu_has_sve(vcpu) kvm_has_sve((vcpu)->kvm)
+#endif
#ifdef CONFIG_ARM64_PTR_AUTH
#define vcpu_has_ptrauth(vcpu) \
((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
- vcpu_get_flag(vcpu, GUEST_HAS_PTRAUTH))
+ (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) || \
+ vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC)))
#else
#define vcpu_has_ptrauth(vcpu) false
#endif
@@ -1307,6 +1316,13 @@ DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
&this_cpu_ptr_hyp_sym(kvm_host_data)->f)
#endif
+#define host_data_test_flag(flag) \
+ (test_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags)))
+#define host_data_set_flag(flag) \
+ set_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags))
+#define host_data_clear_flag(flag) \
+ clear_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags))
+
/* Check whether the FP regs are owned by the guest */
static inline bool guest_owns_fp_regs(void)
{
@@ -1332,15 +1348,22 @@ static inline bool kvm_system_needs_idmapped_vectors(void)
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
-void kvm_arm_init_debug(void);
-void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
-void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
-void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
-void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
+void kvm_init_host_debug_data(void);
+void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu);
+void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu);
+void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu);
+void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val);
#define kvm_vcpu_os_lock_enabled(vcpu) \
(!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & OSLSR_EL1_OSLK))
+#define kvm_debug_regs_in_use(vcpu) \
+ ((vcpu)->arch.debug_owner != VCPU_DEBUG_FREE)
+#define kvm_host_owns_debug_regs(vcpu) \
+ ((vcpu)->arch.debug_owner == VCPU_DEBUG_HOST_OWNED)
+#define kvm_guest_owns_debug_regs(vcpu) \
+ ((vcpu)->arch.debug_owner == VCPU_DEBUG_GUEST_OWNED)
+
int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
@@ -1367,14 +1390,13 @@ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
return (!has_vhe() && attr->exclude_host);
}
-/* Flags for host debug state */
-void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
-void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
-
#ifdef CONFIG_KVM
void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr);
void kvm_clr_pmu_events(u64 clr);
bool kvm_set_pmuserenr(u64 val);
+void kvm_enable_trbe(void);
+void kvm_disable_trbe(void);
+void kvm_tracing_set_el1_configuration(u64 trfcr_while_in_guest);
#else
static inline void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr) {}
static inline void kvm_clr_pmu_events(u64 clr) {}
@@ -1382,6 +1404,9 @@ static inline bool kvm_set_pmuserenr(u64 val)
{
return false;
}
+static inline void kvm_enable_trbe(void) {}
+static inline void kvm_disable_trbe(void) {}
+static inline void kvm_tracing_set_el1_configuration(u64 trfcr_while_in_guest) {}
#endif
void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu);
@@ -1422,6 +1447,7 @@ static inline bool __vcpu_has_feature(const struct kvm_arch *ka, int feature)
return test_bit(feature, ka->vcpu_features);
}
+#define kvm_vcpu_has_feature(k, f) __vcpu_has_feature(&(k)->arch, (f))
#define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f))
#define kvm_vcpu_initialized(v) vcpu_get_flag(vcpu, VCPU_INITIALIZED)
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 66d93e320ec8..b98ac6aa631f 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -139,6 +139,8 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
+extern u32 __hyp_va_bits;
+
/*
* We currently support using a VM-specified IPA size. For backward
* compatibility, the default IPA size is fixed to 40bits.
@@ -353,6 +355,22 @@ static inline bool kvm_is_nested_s2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
return &kvm->arch.mmu != mmu;
}
+static inline void kvm_fault_lock(struct kvm *kvm)
+{
+ if (is_protected_kvm_enabled())
+ write_lock(&kvm->mmu_lock);
+ else
+ read_lock(&kvm->mmu_lock);
+}
+
+static inline void kvm_fault_unlock(struct kvm *kvm)
+{
+ if (is_protected_kvm_enabled())
+ write_unlock(&kvm->mmu_lock);
+ else
+ read_unlock(&kvm->mmu_lock);
+}
+
#ifdef CONFIG_PTDUMP_STAGE2_DEBUGFS
void kvm_s2_ptdump_create_debugfs(struct kvm *kvm);
#else
diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
index 6cd08198bf19..56c4bcd35e2e 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -64,6 +64,7 @@ static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)
}
extern bool forward_smc_trap(struct kvm_vcpu *vcpu);
+extern bool forward_debug_exception(struct kvm_vcpu *vcpu);
extern void kvm_init_nested(struct kvm *kvm);
extern int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu);
extern void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu);
@@ -186,7 +187,7 @@ static inline bool kvm_supported_tlbi_s1e2_op(struct kvm_vcpu *vpcu, u32 instr)
return true;
}
-int kvm_init_nv_sysregs(struct kvm *kvm);
+int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu);
#ifdef CONFIG_ARM64_PTR_AUTH
bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr);
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index aab04097b505..6b9d274052c7 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -412,15 +412,20 @@ static inline bool kvm_pgtable_walk_lock_held(void)
* be used instead of block mappings.
*/
struct kvm_pgtable {
- u32 ia_bits;
- s8 start_level;
- kvm_pteref_t pgd;
- struct kvm_pgtable_mm_ops *mm_ops;
-
- /* Stage-2 only */
- struct kvm_s2_mmu *mmu;
- enum kvm_pgtable_stage2_flags flags;
- kvm_pgtable_force_pte_cb_t force_pte_cb;
+ union {
+ struct rb_root pkvm_mappings;
+ struct {
+ u32 ia_bits;
+ s8 start_level;
+ kvm_pteref_t pgd;
+ struct kvm_pgtable_mm_ops *mm_ops;
+
+ /* Stage-2 only */
+ enum kvm_pgtable_stage2_flags flags;
+ kvm_pgtable_force_pte_cb_t force_pte_cb;
+ };
+ };
+ struct kvm_s2_mmu *mmu;
};
/**
@@ -526,8 +531,11 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
enum kvm_pgtable_stage2_flags flags,
kvm_pgtable_force_pte_cb_t force_pte_cb);
-#define kvm_pgtable_stage2_init(pgt, mmu, mm_ops) \
- __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL)
+static inline int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
+ struct kvm_pgtable_mm_ops *mm_ops)
+{
+ return __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL);
+}
/**
* kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
@@ -669,13 +677,15 @@ int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
* kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry.
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address to identify the page-table entry.
+ * @flags: Flags to control the page-table walk (ex. a shared walk)
*
* The offset of @addr within a page is ignored.
*
* If there is a valid, leaf page-table entry used to translate @addr, then
* set the access flag in that entry.
*/
-void kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr);
+void kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr,
+ enum kvm_pgtable_walk_flags flags);
/**
* kvm_pgtable_stage2_test_clear_young() - Test and optionally clear the access
@@ -705,6 +715,7 @@ bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address to identify the page-table entry.
* @prot: Additional permissions to grant for the mapping.
+ * @flags: Flags to control the page-table walk (ex. a shared walk)
*
* The offset of @addr within a page is ignored.
*
@@ -717,7 +728,8 @@ bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
* Return: 0 on success, negative error code on failure.
*/
int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
- enum kvm_pgtable_prot prot);
+ enum kvm_pgtable_prot prot,
+ enum kvm_pgtable_walk_flags flags);
/**
* kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point
diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h
index cd56acd9a842..eb65f12e81d9 100644
--- a/arch/arm64/include/asm/kvm_pkvm.h
+++ b/arch/arm64/include/asm/kvm_pkvm.h
@@ -20,6 +20,31 @@ int pkvm_init_host_vm(struct kvm *kvm);
int pkvm_create_hyp_vm(struct kvm *kvm);
void pkvm_destroy_hyp_vm(struct kvm *kvm);
+/*
+ * This functions as an allow-list of protected VM capabilities.
+ * Features not explicitly allowed by this function are denied.
+ */
+static inline bool kvm_pvm_ext_allowed(long ext)
+{
+ switch (ext) {
+ case KVM_CAP_IRQCHIP:
+ case KVM_CAP_ARM_PSCI:
+ case KVM_CAP_ARM_PSCI_0_2:
+ case KVM_CAP_NR_VCPUS:
+ case KVM_CAP_MAX_VCPUS:
+ case KVM_CAP_MAX_VCPU_ID:
+ case KVM_CAP_MSI_DEVID:
+ case KVM_CAP_ARM_VM_IPA_SIZE:
+ case KVM_CAP_ARM_PMU_V3:
+ case KVM_CAP_ARM_SVE:
+ case KVM_CAP_ARM_PTRAUTH_ADDRESS:
+ case KVM_CAP_ARM_PTRAUTH_GENERIC:
+ return true;
+ default:
+ return false;
+ }
+}
+
extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
@@ -137,4 +162,30 @@ static inline size_t pkvm_host_sve_state_size(void)
SVE_SIG_REGS_SIZE(sve_vq_from_vl(kvm_host_sve_max_vl)));
}
+struct pkvm_mapping {
+ struct rb_node node;
+ u64 gfn;
+ u64 pfn;
+};
+
+int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
+ struct kvm_pgtable_mm_ops *mm_ops);
+void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
+int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
+ enum kvm_pgtable_prot prot, void *mc,
+ enum kvm_pgtable_walk_flags flags);
+int pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
+int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
+int pkvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
+bool pkvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64 size, bool mkold);
+int pkvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot,
+ enum kvm_pgtable_walk_flags flags);
+void pkvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr,
+ enum kvm_pgtable_walk_flags flags);
+int pkvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
+ struct kvm_mmu_memory_cache *mc);
+void pkvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level);
+kvm_pte_t *pkvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, u64 phys, s8 level,
+ enum kvm_pgtable_prot prot, void *mc,
+ bool force_pte);
#endif /* __ARM64_KVM_PKVM_H__ */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 8b9f33cf561b..717829df294e 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -145,13 +145,16 @@
#define OVERFLOW_STACK_SIZE SZ_4K
+#define NVHE_STACK_SHIFT PAGE_SHIFT
+#define NVHE_STACK_SIZE (UL(1) << NVHE_STACK_SHIFT)
+
/*
* With the minimum frame size of [x29, x30], exactly half the combined
* sizes of the hyp and overflow stacks is the maximum size needed to
* save the unwinded stacktrace; plus an additional entry to delimit the
* end.
*/
-#define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + PAGE_SIZE) / 2 + sizeof(long))
+#define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + NVHE_STACK_SIZE) / 2 + sizeof(long))
/*
* Alignment of kernel segments (e.g. .text, .data).
diff --git a/arch/arm64/include/asm/mshyperv.h b/arch/arm64/include/asm/mshyperv.h
index a975e1a689dd..2e2f83bafcfb 100644
--- a/arch/arm64/include/asm/mshyperv.h
+++ b/arch/arm64/include/asm/mshyperv.h
@@ -6,9 +6,8 @@
* the ARM64 architecture. See include/asm-generic/mshyperv.h for
* definitions are that architecture independent.
*
- * Definitions that are specified in the Hyper-V Top Level Functional
- * Spec (TLFS) should not go in this file, but should instead go in
- * hyperv-tlfs.h.
+ * Definitions that are derived from Hyper-V code or headers should not go in
+ * this file, but should instead go in the relevant files in include/hyperv.
*
* Copyright (C) 2021, Microsoft, Inc.
*
@@ -20,7 +19,7 @@
#include <linux/types.h>
#include <linux/arm-smccc.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
/*
* Declare calls to get and set Hyper-V VP register values on ARM64, which
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index e75422864d1b..1b4509d3382c 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -85,24 +85,6 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgdp, p4d_t *p4dp)
__pgd_populate(pgdp, __pa(p4dp), pgdval);
}
-static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
-{
- gfp_t gfp = GFP_PGTABLE_USER;
-
- if (mm == &init_mm)
- gfp = GFP_PGTABLE_KERNEL;
- return (p4d_t *)get_zeroed_page(gfp);
-}
-
-static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
-{
- if (!pgtable_l5_enabled())
- return;
- BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
- free_page((unsigned long)p4d);
-}
-
-#define __p4d_free_tlb(tlb, p4d, addr) p4d_free((tlb)->mm, p4d)
#else
static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t p4dp, pgdval_t prot)
{
diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
index 44759281d0d4..171f9edef49f 100644
--- a/arch/arm64/include/asm/stacktrace/nvhe.h
+++ b/arch/arm64/include/asm/stacktrace/nvhe.h
@@ -47,7 +47,7 @@ static inline void kvm_nvhe_unwind_init(struct unwind_state *state,
DECLARE_KVM_NVHE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
-DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base);
void kvm_nvhe_dump_backtrace(unsigned long hyp_offset);
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index b8303a83c0bf..05ea5223d2d5 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -283,8 +283,6 @@
#define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5)
#define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6)
-#define SYS_TRFCR_EL1 sys_reg(3, 0, 1, 2, 1)
-
#define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2)
#define SYS_APIAKEYLO_EL1 sys_reg(3, 0, 2, 1, 0)
@@ -477,6 +475,7 @@
#define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0)
#define SYS_CNTPCT_EL0 sys_reg(3, 3, 14, 0, 1)
+#define SYS_CNTVCT_EL0 sys_reg(3, 3, 14, 0, 2)
#define SYS_CNTPCTSS_EL0 sys_reg(3, 3, 14, 0, 5)
#define SYS_CNTVCTSS_EL0 sys_reg(3, 3, 14, 0, 6)
@@ -484,14 +483,17 @@
#define SYS_CNTP_CTL_EL0 sys_reg(3, 3, 14, 2, 1)
#define SYS_CNTP_CVAL_EL0 sys_reg(3, 3, 14, 2, 2)
+#define SYS_CNTV_TVAL_EL0 sys_reg(3, 3, 14, 3, 0)
#define SYS_CNTV_CTL_EL0 sys_reg(3, 3, 14, 3, 1)
#define SYS_CNTV_CVAL_EL0 sys_reg(3, 3, 14, 3, 2)
#define SYS_AARCH32_CNTP_TVAL sys_reg(0, 0, 14, 2, 0)
#define SYS_AARCH32_CNTP_CTL sys_reg(0, 0, 14, 2, 1)
#define SYS_AARCH32_CNTPCT sys_reg(0, 0, 0, 14, 0)
+#define SYS_AARCH32_CNTVCT sys_reg(0, 1, 0, 14, 0)
#define SYS_AARCH32_CNTP_CVAL sys_reg(0, 2, 0, 14, 0)
#define SYS_AARCH32_CNTPCTSS sys_reg(0, 8, 0, 14, 0)
+#define SYS_AARCH32_CNTVCTSS sys_reg(0, 9, 0, 14, 0)
#define __PMEV_op2(n) ((n) & 0x7)
#define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3))
@@ -519,7 +521,6 @@
#define SYS_VTTBR_EL2 sys_reg(3, 4, 2, 1, 0)
#define SYS_VTCR_EL2 sys_reg(3, 4, 2, 1, 2)
-#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1)
#define SYS_VNCR_EL2 sys_reg(3, 4, 2, 2, 0)
#define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6)
#define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0)
@@ -983,15 +984,6 @@
/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */
#define SYS_MPIDR_SAFE_VAL (BIT(31))
-#define TRFCR_ELx_TS_SHIFT 5
-#define TRFCR_ELx_TS_MASK ((0x3UL) << TRFCR_ELx_TS_SHIFT)
-#define TRFCR_ELx_TS_VIRTUAL ((0x1UL) << TRFCR_ELx_TS_SHIFT)
-#define TRFCR_ELx_TS_GUEST_PHYSICAL ((0x2UL) << TRFCR_ELx_TS_SHIFT)
-#define TRFCR_ELx_TS_PHYSICAL ((0x3UL) << TRFCR_ELx_TS_SHIFT)
-#define TRFCR_EL2_CX BIT(3)
-#define TRFCR_ELx_ExTRE BIT(1)
-#define TRFCR_ELx_E0TRE BIT(0)
-
/* GIC Hypervisor interface registers */
/* ICH_MISR_EL2 bit definitions */
#define ICH_MISR_EOI (1 << 0)
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index a947c6e784ed..8d762607285c 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -9,12 +9,7 @@
#define __ASM_TLB_H
#include <linux/pagemap.h>
-#include <linux/swap.h>
-static inline void __tlb_remove_table(void *_table)
-{
- free_page_and_swap_cache((struct page *)_table);
-}
#define tlb_flush tlb_flush
static void tlb_flush(struct mmu_gather *tlb);
@@ -82,7 +77,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
{
struct ptdesc *ptdesc = page_ptdesc(pte);
- pagetable_pte_dtor(ptdesc);
tlb_remove_ptdesc(tlb, ptdesc);
}
@@ -92,7 +86,6 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
{
struct ptdesc *ptdesc = virt_to_ptdesc(pmdp);
- pagetable_pmd_dtor(ptdesc);
tlb_remove_ptdesc(tlb, ptdesc);
}
#endif
@@ -106,7 +99,19 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
if (!pgtable_l4_enabled())
return;
- pagetable_pud_dtor(ptdesc);
+ tlb_remove_ptdesc(tlb, ptdesc);
+}
+#endif
+
+#if CONFIG_PGTABLE_LEVELS > 4
+static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4dp,
+ unsigned long addr)
+{
+ struct ptdesc *ptdesc = virt_to_ptdesc(p4dp);
+
+ if (!pgtable_l5_enabled())
+ return;
+
tlb_remove_ptdesc(tlb, ptdesc);
}
#endif
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 66736ff04011..568bf858f319 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -43,9 +43,6 @@
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_DIRTY_LOG_PAGE_OFFSET 64
-#define KVM_REG_SIZE(id) \
- (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
-
struct kvm_regs {
struct user_pt_regs regs; /* sp = sp_el0 */
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index a78f247029ae..7ce555862895 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -787,5 +787,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
},
#endif
{
+ .desc = "Broken CNTVOFF_EL2",
+ .capability = ARM64_WORKAROUND_QCOM_ORYON_CNTVOFF,
+ ERRATA_MIDR_RANGE_LIST(((const struct midr_range[]) {
+ MIDR_ALL_VERSIONS(MIDR_QCOM_ORYON_X1),
+ {}
+ })),
+ },
+ {
}
};
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 8c4c1a2186cc..2b601d88762d 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -562,7 +562,7 @@ static int vec_proc_do_default_vl(const struct ctl_table *table, int write,
return 0;
}
-static struct ctl_table sve_default_vl_table[] = {
+static const struct ctl_table sve_default_vl_table[] = {
{
.procname = "sve_default_vector_length",
.mode = 0644,
@@ -585,7 +585,7 @@ static int __init sve_sysctl_init(void) { return 0; }
#endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
#if defined(CONFIG_ARM64_SME) && defined(CONFIG_SYSCTL)
-static struct ctl_table sme_default_vl_table[] = {
+static const struct ctl_table sme_default_vl_table[] = {
{
.procname = "sme_default_vector_length",
.mode = 0644,
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 8f5422ed1b75..ef3a69cc398e 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -105,6 +105,9 @@ KVM_NVHE_ALIAS(__hyp_stub_vectors);
KVM_NVHE_ALIAS(vgic_v2_cpuif_trap);
KVM_NVHE_ALIAS(vgic_v3_cpuif_trap);
+/* Static key which is set if CNTVOFF_EL2 is unusable */
+KVM_NVHE_ALIAS(broken_cntvoff_key);
+
/* EL2 exception handling */
KVM_NVHE_ALIAS(__start___kvm_ex_table);
KVM_NVHE_ALIAS(__stop___kvm_ex_table);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 2968a33bb3bc..42faebb7b712 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -859,7 +859,7 @@ long get_tagged_addr_ctrl(struct task_struct *task)
* disable it for tasks that already opted in to the relaxed ABI.
*/
-static struct ctl_table tagged_addr_sysctl_table[] = {
+static const struct ctl_table tagged_addr_sysctl_table[] = {
{
.procname = "tagged_addr_disabled",
.mode = 0644,
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 4f613e8e0745..85104587f849 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -223,9 +223,7 @@ static void __init request_standard_resources(void)
num_standard_resources = memblock.memory.cnt;
res_size = num_standard_resources * sizeof(*standard_resources);
- standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
- if (!standard_resources)
- panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
+ standard_resources = memblock_alloc_or_panic(res_size, SMP_CACHE_BYTES);
for_each_mem_region(region) {
res = &standard_resources[i++];
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index 1215df590418..231c0cd9c7b4 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -30,6 +30,7 @@ static u32 host_vtimer_irq_flags;
static u32 host_ptimer_irq_flags;
static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
+DEFINE_STATIC_KEY_FALSE(broken_cntvoff_key);
static const u8 default_ppi[] = {
[TIMER_PTIMER] = 30,
@@ -101,21 +102,6 @@ u64 timer_get_cval(struct arch_timer_context *ctxt)
}
}
-static u64 timer_get_offset(struct arch_timer_context *ctxt)
-{
- u64 offset = 0;
-
- if (!ctxt)
- return 0;
-
- if (ctxt->offset.vm_offset)
- offset += *ctxt->offset.vm_offset;
- if (ctxt->offset.vcpu_offset)
- offset += *ctxt->offset.vcpu_offset;
-
- return offset;
-}
-
static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
@@ -441,11 +427,30 @@ void kvm_timer_update_run(struct kvm_vcpu *vcpu)
regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
}
+static void kvm_timer_update_status(struct arch_timer_context *ctx, bool level)
+{
+ /*
+ * Paper over NV2 brokenness by publishing the interrupt status
+ * bit. This still results in a poor quality of emulation (guest
+ * writes will have no effect until the next exit).
+ *
+ * But hey, it's fast, right?
+ */
+ if (is_hyp_ctxt(ctx->vcpu) &&
+ (ctx == vcpu_vtimer(ctx->vcpu) || ctx == vcpu_ptimer(ctx->vcpu))) {
+ unsigned long val = timer_get_ctl(ctx);
+ __assign_bit(__ffs(ARCH_TIMER_CTRL_IT_STAT), &val, level);
+ timer_set_ctl(ctx, val);
+ }
+}
+
static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
struct arch_timer_context *timer_ctx)
{
int ret;
+ kvm_timer_update_status(timer_ctx, new_level);
+
timer_ctx->irq.level = new_level;
trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_irq(timer_ctx),
timer_ctx->irq.level);
@@ -466,10 +471,10 @@ static void timer_emulate(struct arch_timer_context *ctx)
trace_kvm_timer_emulate(ctx, should_fire);
- if (should_fire != ctx->irq.level) {
+ if (should_fire != ctx->irq.level)
kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
- return;
- }
+
+ kvm_timer_update_status(ctx, should_fire);
/*
* If the timer can fire now, we don't need to have a soft timer
@@ -513,7 +518,12 @@ static void timer_save_state(struct arch_timer_context *ctx)
case TIMER_VTIMER:
case TIMER_HVTIMER:
timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTV_CTL));
- timer_set_cval(ctx, read_sysreg_el0(SYS_CNTV_CVAL));
+ cval = read_sysreg_el0(SYS_CNTV_CVAL);
+
+ if (has_broken_cntvoff())
+ cval -= timer_get_offset(ctx);
+
+ timer_set_cval(ctx, cval);
/* Disable the timer */
write_sysreg_el0(0, SYS_CNTV_CTL);
@@ -618,8 +628,15 @@ static void timer_restore_state(struct arch_timer_context *ctx)
case TIMER_VTIMER:
case TIMER_HVTIMER:
- set_cntvoff(timer_get_offset(ctx));
- write_sysreg_el0(timer_get_cval(ctx), SYS_CNTV_CVAL);
+ cval = timer_get_cval(ctx);
+ offset = timer_get_offset(ctx);
+ if (has_broken_cntvoff()) {
+ set_cntvoff(0);
+ cval += offset;
+ } else {
+ set_cntvoff(offset);
+ }
+ write_sysreg_el0(cval, SYS_CNTV_CVAL);
isb();
write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL);
break;
@@ -742,27 +759,12 @@ static void kvm_timer_vcpu_load_nested_switch(struct kvm_vcpu *vcpu,
timer_irq(map->direct_ptimer),
&arch_timer_irq_ops);
WARN_ON_ONCE(ret);
-
- /*
- * The virtual offset behaviour is "interesting", as it
- * always applies when HCR_EL2.E2H==0, but only when
- * accessed from EL1 when HCR_EL2.E2H==1. So make sure we
- * track E2H when putting the HV timer in "direct" mode.
- */
- if (map->direct_vtimer == vcpu_hvtimer(vcpu)) {
- struct arch_timer_offset *offs = &map->direct_vtimer->offset;
-
- if (vcpu_el2_e2h_is_set(vcpu))
- offs->vcpu_offset = NULL;
- else
- offs->vcpu_offset = &__vcpu_sys_reg(vcpu, CNTVOFF_EL2);
- }
}
}
static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map)
{
- bool tpt, tpc;
+ bool tvt, tpt, tvc, tpc, tvt02, tpt02;
u64 clr, set;
/*
@@ -777,7 +779,29 @@ static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map)
* within this function, reality kicks in and we start adding
* traps based on emulation requirements.
*/
- tpt = tpc = false;
+ tvt = tpt = tvc = tpc = false;
+ tvt02 = tpt02 = false;
+
+ /*
+ * NV2 badly breaks the timer semantics by redirecting accesses to
+ * the EL1 timer state to memory, so let's call ECV to the rescue if
+ * available: we trap all CNT{P,V}_{CTL,CVAL,TVAL}_EL0 accesses.
+ *
+ * The treatment slightly varies depending whether we run a nVHE or
+ * VHE guest: nVHE will use the _EL0 registers directly, while VHE
+ * will use the _EL02 accessors. This translates in different trap
+ * bits.
+ *
+ * None of the trapping is required when running in non-HYP context,
+ * unless required by the L1 hypervisor settings once we advertise
+ * ECV+NV in the guest, or that we need trapping for other reasons.
+ */
+ if (cpus_have_final_cap(ARM64_HAS_ECV) && is_hyp_ctxt(vcpu)) {
+ if (vcpu_el2_e2h_is_set(vcpu))
+ tvt02 = tpt02 = true;
+ else
+ tvt = tpt = true;
+ }
/*
* We have two possibility to deal with a physical offset:
@@ -793,9 +817,20 @@ static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map)
tpt = tpc = true;
/*
+ * For the poor sods that could not correctly substract one value
+ * from another, trap the full virtual timer and counter.
+ */
+ if (has_broken_cntvoff() && timer_get_offset(map->direct_vtimer))
+ tvt = tvc = true;
+
+ /*
* Apply the enable bits that the guest hypervisor has requested for
* its own guest. We can only add traps that wouldn't have been set
* above.
+ * Implementation choices: we do not support NV when E2H=0 in the
+ * guest, and we don't support configuration where E2H is writable
+ * by the guest (either FEAT_VHE or FEAT_E2H0 is implemented, but
+ * not both). This simplifies the handling of the EL1NV* bits.
*/
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
u64 val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
@@ -806,6 +841,9 @@ static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map)
tpt |= !(val & (CNTHCTL_EL1PCEN << 10));
tpc |= !(val & (CNTHCTL_EL1PCTEN << 10));
+
+ tpt02 |= (val & CNTHCTL_EL1NVPCT);
+ tvt02 |= (val & CNTHCTL_EL1NVVCT);
}
/*
@@ -817,6 +855,10 @@ static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map)
assign_clear_set_bit(tpt, CNTHCTL_EL1PCEN << 10, set, clr);
assign_clear_set_bit(tpc, CNTHCTL_EL1PCTEN << 10, set, clr);
+ assign_clear_set_bit(tvt, CNTHCTL_EL1TVT, clr, set);
+ assign_clear_set_bit(tvc, CNTHCTL_EL1TVCT, clr, set);
+ assign_clear_set_bit(tvt02, CNTHCTL_EL1NVVCT, clr, set);
+ assign_clear_set_bit(tpt02, CNTHCTL_EL1NVPCT, clr, set);
/* This only happens on VHE, so use the CNTHCTL_EL2 accessor. */
sysreg_clear_set(cnthctl_el2, clr, set);
@@ -905,6 +947,44 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
kvm_timer_blocking(vcpu);
}
+void kvm_timer_sync_nested(struct kvm_vcpu *vcpu)
+{
+ /*
+ * When NV2 is on, guest hypervisors have their EL1 timer register
+ * accesses redirected to the VNCR page. Any guest action taken on
+ * the timer is postponed until the next exit, leading to a very
+ * poor quality of emulation.
+ *
+ * This is an unmitigated disaster, only papered over by FEAT_ECV,
+ * which allows trapping of the timer registers even with NV2.
+ * Still, this is still worse than FEAT_NV on its own. Meh.
+ */
+ if (!cpus_have_final_cap(ARM64_HAS_ECV)) {
+ /*
+ * For a VHE guest hypervisor, the EL2 state is directly
+ * stored in the host EL1 timers, while the emulated EL1
+ * state is stored in the VNCR page. The latter could have
+ * been updated behind our back, and we must reset the
+ * emulation of the timers.
+ *
+ * A non-VHE guest hypervisor doesn't have any direct access
+ * to its timers: the EL2 registers trap despite being
+ * notionally direct (we use the EL1 HW, as for VHE), while
+ * the EL1 registers access memory.
+ *
+ * In both cases, process the emulated timers on each guest
+ * exit. Boo.
+ */
+ struct timer_map map;
+ get_timer_map(vcpu, &map);
+
+ soft_timer_cancel(&map.emul_vtimer->hrtimer);
+ soft_timer_cancel(&map.emul_ptimer->hrtimer);
+ timer_emulate(map.emul_vtimer);
+ timer_emulate(map.emul_ptimer);
+ }
+}
+
/*
* With a userspace irqchip we have to check if the guest de-asserted the
* timer and if so, unmask the timer irq signal on the host interrupt
@@ -1363,6 +1443,37 @@ static int kvm_irq_init(struct arch_timer_kvm_info *info)
return 0;
}
+static void kvm_timer_handle_errata(void)
+{
+ u64 mmfr0, mmfr1, mmfr4;
+
+ /*
+ * CNTVOFF_EL2 is broken on some implementations. For those, we trap
+ * all virtual timer/counter accesses, requiring FEAT_ECV.
+ *
+ * However, a hypervisor supporting nesting is likely to mitigate the
+ * erratum at L0, and not require other levels to mitigate it (which
+ * would otherwise be a terrible performance sink due to trap
+ * amplification).
+ *
+ * Given that the affected HW implements both FEAT_VHE and FEAT_E2H0,
+ * and that NV is likely not to (because of limitations of the
+ * architecture), only enable the workaround when FEAT_VHE and
+ * FEAT_E2H0 are both detected. Time will tell if this actually holds.
+ */
+ mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+ mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
+ mmfr4 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR4_EL1);
+ if (SYS_FIELD_GET(ID_AA64MMFR1_EL1, VH, mmfr1) &&
+ !SYS_FIELD_GET(ID_AA64MMFR4_EL1, E2H0, mmfr4) &&
+ SYS_FIELD_GET(ID_AA64MMFR0_EL1, ECV, mmfr0) &&
+ (has_vhe() || has_hvhe()) &&
+ cpus_have_final_cap(ARM64_WORKAROUND_QCOM_ORYON_CNTVOFF)) {
+ static_branch_enable(&broken_cntvoff_key);
+ kvm_info("Broken CNTVOFF_EL2, trapping virtual timer\n");
+ }
+}
+
int __init kvm_timer_hyp_init(bool has_gic)
{
struct arch_timer_kvm_info *info;
@@ -1431,6 +1542,7 @@ int __init kvm_timer_hyp_init(bool has_gic)
goto out_free_vtimer_irq;
}
+ kvm_timer_handle_errata();
return 0;
out_free_ptimer_irq:
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 7b2735ad32e9..071a7d75be68 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -61,7 +61,7 @@ static enum kvm_wfx_trap_policy kvm_wfe_trap_policy __read_mostly = KVM_WFX_NOTR
DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
-DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base);
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
@@ -80,31 +80,6 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
}
-/*
- * This functions as an allow-list of protected VM capabilities.
- * Features not explicitly allowed by this function are denied.
- */
-static bool pkvm_ext_allowed(struct kvm *kvm, long ext)
-{
- switch (ext) {
- case KVM_CAP_IRQCHIP:
- case KVM_CAP_ARM_PSCI:
- case KVM_CAP_ARM_PSCI_0_2:
- case KVM_CAP_NR_VCPUS:
- case KVM_CAP_MAX_VCPUS:
- case KVM_CAP_MAX_VCPU_ID:
- case KVM_CAP_MSI_DEVID:
- case KVM_CAP_ARM_VM_IPA_SIZE:
- case KVM_CAP_ARM_PMU_V3:
- case KVM_CAP_ARM_SVE:
- case KVM_CAP_ARM_PTRAUTH_ADDRESS:
- case KVM_CAP_ARM_PTRAUTH_GENERIC:
- return true;
- default:
- return false;
- }
-}
-
int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
struct kvm_enable_cap *cap)
{
@@ -113,7 +88,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
if (cap->flags)
return -EINVAL;
- if (kvm_vm_is_protected(kvm) && !pkvm_ext_allowed(kvm, cap->cap))
+ if (kvm_vm_is_protected(kvm) && !kvm_pvm_ext_allowed(cap->cap))
return -EINVAL;
switch (cap->cap) {
@@ -311,7 +286,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
{
int r;
- if (kvm && kvm_vm_is_protected(kvm) && !pkvm_ext_allowed(kvm, ext))
+ if (kvm && kvm_vm_is_protected(kvm) && !kvm_pvm_ext_allowed(ext))
return 0;
switch (ext) {
@@ -476,8 +451,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
kvm_pmu_vcpu_init(vcpu);
- kvm_arm_reset_debug_ptr(vcpu);
-
kvm_arm_pvtime_vcpu_init(&vcpu->arch);
vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
@@ -502,7 +475,10 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
- kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+ if (!is_protected_kvm_enabled())
+ kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+ else
+ free_hyp_memcache(&vcpu->arch.pkvm_memcache);
kvm_timer_vcpu_terminate(vcpu);
kvm_pmu_vcpu_destroy(vcpu);
kvm_vgic_vcpu_destroy(vcpu);
@@ -574,6 +550,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
struct kvm_s2_mmu *mmu;
int *last_ran;
+ if (is_protected_kvm_enabled())
+ goto nommu;
+
if (vcpu_has_nv(vcpu))
kvm_vcpu_load_hw_mmu(vcpu);
@@ -594,10 +573,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
*last_ran = vcpu->vcpu_idx;
}
+nommu:
vcpu->cpu = cpu;
kvm_vgic_load(vcpu);
kvm_timer_vcpu_load(vcpu);
+ kvm_vcpu_load_debug(vcpu);
if (has_vhe())
kvm_vcpu_load_vhe(vcpu);
kvm_arch_vcpu_load_fp(vcpu);
@@ -617,7 +598,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu_set_pauth_traps(vcpu);
- kvm_arch_vcpu_load_debug_state_flags(vcpu);
+ if (is_protected_kvm_enabled()) {
+ kvm_call_hyp_nvhe(__pkvm_vcpu_load,
+ vcpu->kvm->arch.pkvm.handle,
+ vcpu->vcpu_idx, vcpu->arch.hcr_el2);
+ kvm_call_hyp(__vgic_v3_restore_vmcr_aprs,
+ &vcpu->arch.vgic_cpu.vgic_v3);
+ }
if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus))
vcpu_set_on_unsupported_cpu(vcpu);
@@ -625,7 +612,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
- kvm_arch_vcpu_put_debug_state_flags(vcpu);
+ if (is_protected_kvm_enabled()) {
+ kvm_call_hyp(__vgic_v3_save_vmcr_aprs,
+ &vcpu->arch.vgic_cpu.vgic_v3);
+ kvm_call_hyp_nvhe(__pkvm_vcpu_put);
+ }
+
+ kvm_vcpu_put_debug(vcpu);
kvm_arch_vcpu_put_fp(vcpu);
if (has_vhe())
kvm_vcpu_put_vhe(vcpu);
@@ -808,8 +801,6 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
kvm_init_mpidr_data(kvm);
- kvm_arm_vcpu_init_debug(vcpu);
-
if (likely(irqchip_in_kernel(kvm))) {
/*
* Map the VGIC hardware resources before running a vcpu the
@@ -1187,7 +1178,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
continue;
}
- kvm_arm_setup_debug(vcpu);
kvm_arch_vcpu_ctxflush_fp(vcpu);
/**************************************************************
@@ -1204,8 +1194,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
* Back from guest
*************************************************************/
- kvm_arm_clear_debug(vcpu);
-
/*
* We must sync the PMU state before the vgic state so
* that the vgic can properly sample the updated state of the
@@ -1228,6 +1216,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
kvm_timer_sync_user(vcpu);
+ if (is_hyp_ctxt(vcpu))
+ kvm_timer_sync_nested(vcpu);
+
kvm_arch_vcpu_ctxsync_fp(vcpu);
/*
@@ -1571,7 +1562,6 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
}
vcpu_reset_hcr(vcpu);
- vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
/*
* Handle the "start in power-off" case.
@@ -2109,6 +2099,7 @@ static void cpu_set_hyp_vector(void)
static void cpu_hyp_init_context(void)
{
kvm_init_host_cpu_context(host_data_ptr(host_ctxt));
+ kvm_init_host_debug_data();
if (!is_kernel_in_hyp_mode())
cpu_init_hyp_mode();
@@ -2117,7 +2108,6 @@ static void cpu_hyp_init_context(void)
static void cpu_hyp_init_features(void)
{
cpu_set_hyp_vector();
- kvm_arm_init_debug();
if (is_kernel_in_hyp_mode())
kvm_timer_init_vhe();
@@ -2300,6 +2290,19 @@ static int __init init_subsystems(void)
break;
case -ENODEV:
case -ENXIO:
+ /*
+ * No VGIC? No pKVM for you.
+ *
+ * Protected mode assumes that VGICv3 is present, so no point
+ * in trying to hobble along if vgic initialization fails.
+ */
+ if (is_protected_kvm_enabled())
+ goto out;
+
+ /*
+ * Otherwise, userspace could choose to implement a GIC for its
+ * guest on non-cooperative hardware.
+ */
vgic_present = false;
err = 0;
break;
@@ -2339,7 +2342,7 @@ static void __init teardown_hyp_mode(void)
free_hyp_pgds();
for_each_possible_cpu(cpu) {
- free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
+ free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT);
free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
if (free_sve) {
@@ -2410,6 +2413,13 @@ static void kvm_hyp_init_symbols(void)
kvm_nvhe_sym(id_aa64smfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64SMFR0_EL1);
kvm_nvhe_sym(__icache_flags) = __icache_flags;
kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits;
+
+ /*
+ * Flush entire BSS since part of its data containing init symbols is read
+ * while the MMU is off.
+ */
+ kvm_flush_dcache_to_poc(kvm_ksym_ref(__hyp_bss_start),
+ kvm_ksym_ref(__hyp_bss_end) - kvm_ksym_ref(__hyp_bss_start));
}
static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
@@ -2527,15 +2537,15 @@ static int __init init_hyp_mode(void)
* Allocate stack pages for Hypervisor-mode
*/
for_each_possible_cpu(cpu) {
- unsigned long stack_page;
+ unsigned long stack_base;
- stack_page = __get_free_page(GFP_KERNEL);
- if (!stack_page) {
+ stack_base = __get_free_pages(GFP_KERNEL, NVHE_STACK_SHIFT - PAGE_SHIFT);
+ if (!stack_base) {
err = -ENOMEM;
goto out_err;
}
- per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
+ per_cpu(kvm_arm_hyp_stack_base, cpu) = stack_base;
}
/*
@@ -2604,9 +2614,9 @@ static int __init init_hyp_mode(void)
*/
for_each_possible_cpu(cpu) {
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
- char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
+ char *stack_base = (char *)per_cpu(kvm_arm_hyp_stack_base, cpu);
- err = create_hyp_stack(__pa(stack_page), &params->stack_hyp_va);
+ err = create_hyp_stack(__pa(stack_base), &params->stack_hyp_va);
if (err) {
kvm_err("Cannot map hyp stack\n");
goto out_err;
@@ -2618,7 +2628,7 @@ static int __init init_hyp_mode(void)
* __hyp_pa() won't do the right thing there, since the stack
* has been mapped in the flexible private VA space.
*/
- params->stack_pa = __pa(stack_page);
+ params->stack_pa = __pa(stack_base);
}
for_each_possible_cpu(cpu) {
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index ce8886122ed3..0e4c805e7e89 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -3,7 +3,8 @@
* Debug and Guest Debug support
*
* Copyright (C) 2015 - Linaro Ltd
- * Author: Alex Bennée <alex.bennee@linaro.org>
+ * Authors: Alex Bennée <alex.bennee@linaro.org>
+ * Oliver Upton <oliver.upton@linux.dev>
*/
#include <linux/kvm_host.h>
@@ -14,72 +15,6 @@
#include <asm/kvm_arm.h>
#include <asm/kvm_emulate.h>
-#include "trace.h"
-
-/* These are the bits of MDSCR_EL1 we may manipulate */
-#define MDSCR_EL1_DEBUG_MASK (DBG_MDSCR_SS | \
- DBG_MDSCR_KDE | \
- DBG_MDSCR_MDE)
-
-static DEFINE_PER_CPU(u64, mdcr_el2);
-
-/*
- * save/restore_guest_debug_regs
- *
- * For some debug operations we need to tweak some guest registers. As
- * a result we need to save the state of those registers before we
- * make those modifications.
- *
- * Guest access to MDSCR_EL1 is trapped by the hypervisor and handled
- * after we have restored the preserved value to the main context.
- *
- * When single-step is enabled by userspace, we tweak PSTATE.SS on every
- * guest entry. Preserve PSTATE.SS so we can restore the original value
- * for the vcpu after the single-step is disabled.
- */
-static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
-{
- u64 val = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
-
- vcpu->arch.guest_debug_preserved.mdscr_el1 = val;
-
- trace_kvm_arm_set_dreg32("Saved MDSCR_EL1",
- vcpu->arch.guest_debug_preserved.mdscr_el1);
-
- vcpu->arch.guest_debug_preserved.pstate_ss =
- (*vcpu_cpsr(vcpu) & DBG_SPSR_SS);
-}
-
-static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
-{
- u64 val = vcpu->arch.guest_debug_preserved.mdscr_el1;
-
- vcpu_write_sys_reg(vcpu, val, MDSCR_EL1);
-
- trace_kvm_arm_set_dreg32("Restored MDSCR_EL1",
- vcpu_read_sys_reg(vcpu, MDSCR_EL1));
-
- if (vcpu->arch.guest_debug_preserved.pstate_ss)
- *vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
- else
- *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
-}
-
-/**
- * kvm_arm_init_debug - grab what we need for debug
- *
- * Currently the sole task of this function is to retrieve the initial
- * value of mdcr_el2 so we can preserve MDCR_EL2.HPMN which has
- * presumably been set-up by some knowledgeable bootcode.
- *
- * It is called once per-cpu during CPU hyp initialisation.
- */
-
-void kvm_arm_init_debug(void)
-{
- __this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2));
-}
-
/**
* kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
*
@@ -95,11 +30,14 @@ void kvm_arm_init_debug(void)
*/
static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
{
+ preempt_disable();
+
/*
* This also clears MDCR_EL2_E2PB_MASK and MDCR_EL2_E2TB_MASK
* to disable guest access to the profiling and trace buffers
*/
- vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
+ vcpu->arch.mdcr_el2 = FIELD_PREP(MDCR_EL2_HPMN,
+ *host_data_ptr(nr_event_counters));
vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
MDCR_EL2_TPMS |
MDCR_EL2_TTRF |
@@ -113,233 +51,215 @@ static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
/*
- * Trap debug register access when one of the following is true:
- * - Userspace is using the hardware to debug the guest
- * (KVM_GUESTDBG_USE_HW is set).
- * - The guest is not using debug (DEBUG_DIRTY clear).
- * - The guest has enabled the OS Lock (debug exceptions are blocked).
+ * Trap debug registers if the guest doesn't have ownership of them.
*/
- if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) ||
- !vcpu_get_flag(vcpu, DEBUG_DIRTY) ||
- kvm_vcpu_os_lock_enabled(vcpu))
+ if (!kvm_guest_owns_debug_regs(vcpu))
vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
- trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
-}
+ /* Write MDCR_EL2 directly if we're already at EL2 */
+ if (has_vhe())
+ write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
-/**
- * kvm_arm_vcpu_init_debug - setup vcpu debug traps
- *
- * @vcpu: the vcpu pointer
- *
- * Set vcpu initial mdcr_el2 value.
- */
-void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
-{
- preempt_disable();
- kvm_arm_setup_mdcr_el2(vcpu);
preempt_enable();
}
-/**
- * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
- * @vcpu: the vcpu pointer
- */
-
-void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
+void kvm_init_host_debug_data(void)
{
- vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state;
+ u64 dfr0 = read_sysreg(id_aa64dfr0_el1);
+
+ if (cpuid_feature_extract_signed_field(dfr0, ID_AA64DFR0_EL1_PMUVer_SHIFT) > 0)
+ *host_data_ptr(nr_event_counters) = FIELD_GET(ARMV8_PMU_PMCR_N,
+ read_sysreg(pmcr_el0));
+
+ *host_data_ptr(debug_brps) = SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr0);
+ *host_data_ptr(debug_wrps) = SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr0);
+
+ if (has_vhe())
+ return;
+
+ if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) &&
+ !(read_sysreg_s(SYS_PMBIDR_EL1) & PMBIDR_EL1_P))
+ host_data_set_flag(HAS_SPE);
+
+ if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceFilt_SHIFT)) {
+ /* Force disable trace in protected mode in case of no TRBE */
+ if (is_protected_kvm_enabled())
+ host_data_set_flag(EL1_TRACING_CONFIGURED);
+
+ if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) &&
+ !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_EL1_P))
+ host_data_set_flag(HAS_TRBE);
+ }
}
-/**
- * kvm_arm_setup_debug - set up debug related stuff
+/*
+ * Configures the 'external' MDSCR_EL1 value for the guest, i.e. when the host
+ * has taken over MDSCR_EL1.
*
- * @vcpu: the vcpu pointer
+ * - Userspace is single-stepping the guest, and MDSCR_EL1.SS is forced to 1.
*
- * This is called before each entry into the hypervisor to setup any
- * debug related registers.
+ * - Userspace is using the breakpoint/watchpoint registers to debug the
+ * guest, and MDSCR_EL1.MDE is forced to 1.
*
- * Additionally, KVM only traps guest accesses to the debug registers if
- * the guest is not actively using them (see the DEBUG_DIRTY
- * flag on vcpu->arch.iflags). Since the guest must not interfere
- * with the hardware state when debugging the guest, we must ensure that
- * trapping is enabled whenever we are debugging the guest using the
- * debug registers.
+ * - The guest has enabled the OS Lock, and KVM is forcing MDSCR_EL1.MDE to 0,
+ * masking all debug exceptions affected by the OS Lock.
*/
-
-void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
+static void setup_external_mdscr(struct kvm_vcpu *vcpu)
{
- unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2;
+ /*
+ * Use the guest's MDSCR_EL1 as a starting point, since there are
+ * several other features controlled by MDSCR_EL1 that are not relevant
+ * to the host.
+ *
+ * Clear the bits that KVM may use which also satisfies emulation of
+ * the OS Lock as MDSCR_EL1.MDE is cleared.
+ */
+ u64 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1) & ~(MDSCR_EL1_SS |
+ MDSCR_EL1_MDE |
+ MDSCR_EL1_KDE);
- trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
+ mdscr |= MDSCR_EL1_SS;
- kvm_arm_setup_mdcr_el2(vcpu);
+ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW)
+ mdscr |= MDSCR_EL1_MDE | MDSCR_EL1_KDE;
- /* Check if we need to use the debug registers. */
+ vcpu->arch.external_mdscr_el1 = mdscr;
+}
+
+void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu)
+{
+ u64 mdscr;
+
+ /* Must be called before kvm_vcpu_load_vhe() */
+ KVM_BUG_ON(vcpu_get_flag(vcpu, SYSREGS_ON_CPU), vcpu->kvm);
+
+ /*
+ * Determine which of the possible debug states we're in:
+ *
+ * - VCPU_DEBUG_HOST_OWNED: KVM has taken ownership of the guest's
+ * breakpoint/watchpoint registers, or needs to use MDSCR_EL1 to do
+ * software step or emulate the effects of the OS Lock being enabled.
+ *
+ * - VCPU_DEBUG_GUEST_OWNED: The guest has debug exceptions enabled, and
+ * the breakpoint/watchpoint registers need to be loaded eagerly.
+ *
+ * - VCPU_DEBUG_FREE: Neither of the above apply, no breakpoint/watchpoint
+ * context needs to be loaded on the CPU.
+ */
if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) {
- /* Save guest debug state */
- save_guest_debug_regs(vcpu);
+ vcpu->arch.debug_owner = VCPU_DEBUG_HOST_OWNED;
+ setup_external_mdscr(vcpu);
/*
- * Single Step (ARM ARM D2.12.3 The software step state
- * machine)
- *
- * If we are doing Single Step we need to manipulate
- * the guest's MDSCR_EL1.SS and PSTATE.SS. Once the
- * step has occurred the hypervisor will trap the
- * debug exception and we return to userspace.
- *
- * If the guest attempts to single step its userspace
- * we would have to deal with a trapped exception
- * while in the guest kernel. Because this would be
- * hard to unwind we suppress the guest's ability to
- * do so by masking MDSCR_EL.SS.
- *
- * This confuses guest debuggers which use
- * single-step behind the scenes but everything
- * returns to normal once the host is no longer
- * debugging the system.
+ * Steal the guest's single-step state machine if userspace wants
+ * single-step the guest.
*/
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
- /*
- * If the software step state at the last guest exit
- * was Active-pending, we don't set DBG_SPSR_SS so
- * that the state is maintained (to not run another
- * single-step until the pending Software Step
- * exception is taken).
- */
- if (!vcpu_get_flag(vcpu, DBG_SS_ACTIVE_PENDING))
+ if (*vcpu_cpsr(vcpu) & DBG_SPSR_SS)
+ vcpu_clear_flag(vcpu, GUEST_SS_ACTIVE_PENDING);
+ else
+ vcpu_set_flag(vcpu, GUEST_SS_ACTIVE_PENDING);
+
+ if (!vcpu_get_flag(vcpu, HOST_SS_ACTIVE_PENDING))
*vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
else
*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
-
- mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
- mdscr |= DBG_MDSCR_SS;
- vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
- } else {
- mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
- mdscr &= ~DBG_MDSCR_SS;
- vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
}
+ } else {
+ mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
- trace_kvm_arm_set_dreg32("SPSR_EL2", *vcpu_cpsr(vcpu));
-
- /*
- * HW Breakpoints and watchpoints
- *
- * We simply switch the debug_ptr to point to our new
- * external_debug_state which has been populated by the
- * debug ioctl. The existing DEBUG_DIRTY mechanism ensures
- * the registers are updated on the world switch.
- */
- if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
- /* Enable breakpoints/watchpoints */
- mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
- mdscr |= DBG_MDSCR_MDE;
- vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
-
- vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
- vcpu_set_flag(vcpu, DEBUG_DIRTY);
-
- trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
- &vcpu->arch.debug_ptr->dbg_bcr[0],
- &vcpu->arch.debug_ptr->dbg_bvr[0]);
-
- trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
- &vcpu->arch.debug_ptr->dbg_wcr[0],
- &vcpu->arch.debug_ptr->dbg_wvr[0]);
-
- /*
- * The OS Lock blocks debug exceptions in all ELs when it is
- * enabled. If the guest has enabled the OS Lock, constrain its
- * effects to the guest. Emulate the behavior by clearing
- * MDSCR_EL1.MDE. In so doing, we ensure that host debug
- * exceptions are unaffected by guest configuration of the OS
- * Lock.
- */
- } else if (kvm_vcpu_os_lock_enabled(vcpu)) {
- mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
- mdscr &= ~DBG_MDSCR_MDE;
- vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
- }
+ if (mdscr & (MDSCR_EL1_KDE | MDSCR_EL1_MDE))
+ vcpu->arch.debug_owner = VCPU_DEBUG_GUEST_OWNED;
+ else
+ vcpu->arch.debug_owner = VCPU_DEBUG_FREE;
}
- BUG_ON(!vcpu->guest_debug &&
- vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
-
- /* If KDE or MDE are set, perform a full save/restore cycle. */
- if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
- vcpu_set_flag(vcpu, DEBUG_DIRTY);
-
- /* Write mdcr_el2 changes since vcpu_load on VHE systems */
- if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
- write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
-
- trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
+ kvm_arm_setup_mdcr_el2(vcpu);
}
-void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
+void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu)
{
- trace_kvm_arm_clear_debug(vcpu->guest_debug);
+ if (likely(!(vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
+ return;
/*
- * Restore the guest's debug registers if we were using them.
+ * Save the host's software step state and restore the guest's before
+ * potentially returning to userspace.
*/
- if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) {
- if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
- if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS))
- /*
- * Mark the vcpu as ACTIVE_PENDING
- * until Software Step exception is taken.
- */
- vcpu_set_flag(vcpu, DBG_SS_ACTIVE_PENDING);
- }
-
- restore_guest_debug_regs(vcpu);
+ if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS))
+ vcpu_set_flag(vcpu, HOST_SS_ACTIVE_PENDING);
+ else
+ vcpu_clear_flag(vcpu, HOST_SS_ACTIVE_PENDING);
- /*
- * If we were using HW debug we need to restore the
- * debug_ptr to the guest debug state.
- */
- if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
- kvm_arm_reset_debug_ptr(vcpu);
+ if (vcpu_get_flag(vcpu, GUEST_SS_ACTIVE_PENDING))
+ *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
+ else
+ *vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
+}
- trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
- &vcpu->arch.debug_ptr->dbg_bcr[0],
- &vcpu->arch.debug_ptr->dbg_bvr[0]);
+/*
+ * Updates ownership of the debug registers after a trapped guest access to a
+ * breakpoint/watchpoint register. Host ownership of the debug registers is of
+ * strictly higher priority, and it is the responsibility of the VMM to emulate
+ * guest debug exceptions in this configuration.
+ */
+void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu)
+{
+ if (kvm_host_owns_debug_regs(vcpu))
+ return;
- trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
- &vcpu->arch.debug_ptr->dbg_wcr[0],
- &vcpu->arch.debug_ptr->dbg_wvr[0]);
- }
- }
+ vcpu->arch.debug_owner = VCPU_DEBUG_GUEST_OWNED;
+ kvm_arm_setup_mdcr_el2(vcpu);
}
-void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
+void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val)
{
- u64 dfr0;
+ if (val & OSLAR_EL1_OSLK)
+ __vcpu_sys_reg(vcpu, OSLSR_EL1) |= OSLSR_EL1_OSLK;
+ else
+ __vcpu_sys_reg(vcpu, OSLSR_EL1) &= ~OSLSR_EL1_OSLK;
- /* For VHE, there is nothing to do */
- if (has_vhe())
+ preempt_disable();
+ kvm_arch_vcpu_put(vcpu);
+ kvm_arch_vcpu_load(vcpu, smp_processor_id());
+ preempt_enable();
+}
+
+void kvm_enable_trbe(void)
+{
+ if (has_vhe() || is_protected_kvm_enabled() ||
+ WARN_ON_ONCE(preemptible()))
return;
- dfr0 = read_sysreg(id_aa64dfr0_el1);
- /*
- * If SPE is present on this CPU and is available at current EL,
- * we may need to check if the host state needs to be saved.
- */
- if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) &&
- !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(PMBIDR_EL1_P_SHIFT)))
- vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE);
+ host_data_set_flag(TRBE_ENABLED);
+}
+EXPORT_SYMBOL_GPL(kvm_enable_trbe);
+
+void kvm_disable_trbe(void)
+{
+ if (has_vhe() || is_protected_kvm_enabled() ||
+ WARN_ON_ONCE(preemptible()))
+ return;
- /* Check if we have TRBE implemented and available at the host */
- if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) &&
- !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_EL1_P))
- vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE);
+ host_data_clear_flag(TRBE_ENABLED);
}
+EXPORT_SYMBOL_GPL(kvm_disable_trbe);
-void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu)
+void kvm_tracing_set_el1_configuration(u64 trfcr_while_in_guest)
{
- vcpu_clear_flag(vcpu, DEBUG_STATE_SAVE_SPE);
- vcpu_clear_flag(vcpu, DEBUG_STATE_SAVE_TRBE);
+ if (is_protected_kvm_enabled() || WARN_ON_ONCE(preemptible()))
+ return;
+
+ if (has_vhe()) {
+ write_sysreg_s(trfcr_while_in_guest, SYS_TRFCR_EL12);
+ return;
+ }
+
+ *host_data_ptr(trfcr_while_in_guest) = trfcr_while_in_guest;
+ if (read_sysreg_s(SYS_TRFCR_EL1) != trfcr_while_in_guest)
+ host_data_set_flag(EL1_TRACING_CONFIGURED);
+ else
+ host_data_clear_flag(EL1_TRACING_CONFIGURED);
}
+EXPORT_SYMBOL_GPL(kvm_tracing_set_el1_configuration);
diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
index f1b7287e1f3c..607d37bab70b 100644
--- a/arch/arm64/kvm/emulate-nested.c
+++ b/arch/arm64/kvm/emulate-nested.c
@@ -89,6 +89,9 @@ enum cgt_group_id {
CGT_HCRX_EnFPM,
CGT_HCRX_TCR2En,
+ CGT_CNTHCTL_EL1TVT,
+ CGT_CNTHCTL_EL1TVCT,
+
CGT_ICH_HCR_TC,
CGT_ICH_HCR_TALL0,
CGT_ICH_HCR_TALL1,
@@ -124,6 +127,8 @@ enum cgt_group_id {
__COMPLEX_CONDITIONS__,
CGT_CNTHCTL_EL1PCTEN = __COMPLEX_CONDITIONS__,
CGT_CNTHCTL_EL1PTEN,
+ CGT_CNTHCTL_EL1NVPCT,
+ CGT_CNTHCTL_EL1NVVCT,
CGT_CPTR_TTA,
CGT_MDCR_HPMN,
@@ -393,6 +398,18 @@ static const struct trap_bits coarse_trap_bits[] = {
.mask = HCRX_EL2_TCR2En,
.behaviour = BEHAVE_FORWARD_RW,
},
+ [CGT_CNTHCTL_EL1TVT] = {
+ .index = CNTHCTL_EL2,
+ .value = CNTHCTL_EL1TVT,
+ .mask = CNTHCTL_EL1TVT,
+ .behaviour = BEHAVE_FORWARD_RW,
+ },
+ [CGT_CNTHCTL_EL1TVCT] = {
+ .index = CNTHCTL_EL2,
+ .value = CNTHCTL_EL1TVCT,
+ .mask = CNTHCTL_EL1TVCT,
+ .behaviour = BEHAVE_FORWARD_READ,
+ },
[CGT_ICH_HCR_TC] = {
.index = ICH_HCR_EL2,
.value = ICH_HCR_TC,
@@ -487,6 +504,32 @@ static enum trap_behaviour check_cnthctl_el1pten(struct kvm_vcpu *vcpu)
return BEHAVE_FORWARD_RW;
}
+static bool is_nested_nv2_guest(struct kvm_vcpu *vcpu)
+{
+ u64 val;
+
+ val = __vcpu_sys_reg(vcpu, HCR_EL2);
+ return ((val & (HCR_E2H | HCR_TGE | HCR_NV2 | HCR_NV1 | HCR_NV)) == (HCR_E2H | HCR_NV2 | HCR_NV));
+}
+
+static enum trap_behaviour check_cnthctl_el1nvpct(struct kvm_vcpu *vcpu)
+{
+ if (!is_nested_nv2_guest(vcpu) ||
+ !(__vcpu_sys_reg(vcpu, CNTHCTL_EL2) & CNTHCTL_EL1NVPCT))
+ return BEHAVE_HANDLE_LOCALLY;
+
+ return BEHAVE_FORWARD_RW;
+}
+
+static enum trap_behaviour check_cnthctl_el1nvvct(struct kvm_vcpu *vcpu)
+{
+ if (!is_nested_nv2_guest(vcpu) ||
+ !(__vcpu_sys_reg(vcpu, CNTHCTL_EL2) & CNTHCTL_EL1NVVCT))
+ return BEHAVE_HANDLE_LOCALLY;
+
+ return BEHAVE_FORWARD_RW;
+}
+
static enum trap_behaviour check_cptr_tta(struct kvm_vcpu *vcpu)
{
u64 val = __vcpu_sys_reg(vcpu, CPTR_EL2);
@@ -534,6 +577,8 @@ static enum trap_behaviour check_mdcr_hpmn(struct kvm_vcpu *vcpu)
static const complex_condition_check ccc[] = {
CCC(CGT_CNTHCTL_EL1PCTEN, check_cnthctl_el1pcten),
CCC(CGT_CNTHCTL_EL1PTEN, check_cnthctl_el1pten),
+ CCC(CGT_CNTHCTL_EL1NVPCT, check_cnthctl_el1nvpct),
+ CCC(CGT_CNTHCTL_EL1NVVCT, check_cnthctl_el1nvvct),
CCC(CGT_CPTR_TTA, check_cptr_tta),
CCC(CGT_MDCR_HPMN, check_mdcr_hpmn),
};
@@ -850,11 +895,15 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
SYS_CNTHP_CVAL_EL2, CGT_HCR_NV),
SR_RANGE_TRAP(SYS_CNTHV_TVAL_EL2,
SYS_CNTHV_CVAL_EL2, CGT_HCR_NV),
- /* All _EL02, _EL12 registers */
+ /* All _EL02, _EL12 registers up to CNTKCTL_EL12*/
SR_RANGE_TRAP(sys_reg(3, 5, 0, 0, 0),
sys_reg(3, 5, 10, 15, 7), CGT_HCR_NV),
SR_RANGE_TRAP(sys_reg(3, 5, 12, 0, 0),
- sys_reg(3, 5, 14, 15, 7), CGT_HCR_NV),
+ sys_reg(3, 5, 14, 1, 0), CGT_HCR_NV),
+ SR_TRAP(SYS_CNTP_CTL_EL02, CGT_CNTHCTL_EL1NVPCT),
+ SR_TRAP(SYS_CNTP_CVAL_EL02, CGT_CNTHCTL_EL1NVPCT),
+ SR_TRAP(SYS_CNTV_CTL_EL02, CGT_CNTHCTL_EL1NVVCT),
+ SR_TRAP(SYS_CNTV_CVAL_EL02, CGT_CNTHCTL_EL1NVVCT),
SR_TRAP(OP_AT_S1E2R, CGT_HCR_NV),
SR_TRAP(OP_AT_S1E2W, CGT_HCR_NV),
SR_TRAP(OP_AT_S12E1R, CGT_HCR_NV),
@@ -1184,6 +1233,11 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
SR_TRAP(SYS_CNTP_CTL_EL0, CGT_CNTHCTL_EL1PTEN),
SR_TRAP(SYS_CNTPCT_EL0, CGT_CNTHCTL_EL1PCTEN),
SR_TRAP(SYS_CNTPCTSS_EL0, CGT_CNTHCTL_EL1PCTEN),
+ SR_TRAP(SYS_CNTV_TVAL_EL0, CGT_CNTHCTL_EL1TVT),
+ SR_TRAP(SYS_CNTV_CVAL_EL0, CGT_CNTHCTL_EL1TVT),
+ SR_TRAP(SYS_CNTV_CTL_EL0, CGT_CNTHCTL_EL1TVT),
+ SR_TRAP(SYS_CNTVCT_EL0, CGT_CNTHCTL_EL1TVCT),
+ SR_TRAP(SYS_CNTVCTSS_EL0, CGT_CNTHCTL_EL1TVCT),
SR_TRAP(SYS_FPMR, CGT_HCRX_EnFPM),
/*
* IMPDEF choice:
@@ -2345,14 +2399,14 @@ inject:
return true;
}
-static bool forward_traps(struct kvm_vcpu *vcpu, u64 control_bit)
+static bool __forward_traps(struct kvm_vcpu *vcpu, unsigned int reg, u64 control_bit)
{
bool control_bit_set;
if (!vcpu_has_nv(vcpu))
return false;
- control_bit_set = __vcpu_sys_reg(vcpu, HCR_EL2) & control_bit;
+ control_bit_set = __vcpu_sys_reg(vcpu, reg) & control_bit;
if (!is_hyp_ctxt(vcpu) && control_bit_set) {
kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
return true;
@@ -2360,9 +2414,24 @@ static bool forward_traps(struct kvm_vcpu *vcpu, u64 control_bit)
return false;
}
+static bool forward_hcr_traps(struct kvm_vcpu *vcpu, u64 control_bit)
+{
+ return __forward_traps(vcpu, HCR_EL2, control_bit);
+}
+
bool forward_smc_trap(struct kvm_vcpu *vcpu)
{
- return forward_traps(vcpu, HCR_TSC);
+ return forward_hcr_traps(vcpu, HCR_TSC);
+}
+
+static bool forward_mdcr_traps(struct kvm_vcpu *vcpu, u64 control_bit)
+{
+ return __forward_traps(vcpu, MDCR_EL2, control_bit);
+}
+
+bool forward_debug_exception(struct kvm_vcpu *vcpu)
+{
+ return forward_mdcr_traps(vcpu, MDCR_EL2_TDE);
}
static u64 kvm_check_illegal_exception_return(struct kvm_vcpu *vcpu, u64 spsr)
@@ -2406,7 +2475,7 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
* Forward this trap to the virtual EL2 if the virtual
* HCR_EL2.NV bit is set and this is coming from !EL2.
*/
- if (forward_traps(vcpu, HCR_NV))
+ if (forward_hcr_traps(vcpu, HCR_NV))
return;
spsr = vcpu_read_sys_reg(vcpu, SPSR_EL2);
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index 98718bd65bf1..4d3d1a2eb157 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -65,14 +65,14 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
*host_data_ptr(fpsimd_state) = kern_hyp_va(&current->thread.uw.fpsimd_state);
*host_data_ptr(fpmr_ptr) = kern_hyp_va(&current->thread.uw.fpmr);
- vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
+ host_data_clear_flag(HOST_SVE_ENABLED);
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
- vcpu_set_flag(vcpu, HOST_SVE_ENABLED);
+ host_data_set_flag(HOST_SVE_ENABLED);
if (system_supports_sme()) {
- vcpu_clear_flag(vcpu, HOST_SME_ENABLED);
+ host_data_clear_flag(HOST_SME_ENABLED);
if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
- vcpu_set_flag(vcpu, HOST_SME_ENABLED);
+ host_data_set_flag(HOST_SME_ENABLED);
/*
* If PSTATE.SM is enabled then save any pending FP
@@ -168,7 +168,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
*/
if (has_vhe() && system_supports_sme()) {
/* Also restore EL0 state seen on entry */
- if (vcpu_get_flag(vcpu, HOST_SME_ENABLED))
+ if (host_data_test_flag(HOST_SME_ENABLED))
sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_SMEN);
else
sysreg_clear_set(CPACR_EL1,
@@ -227,7 +227,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
* for EL0. To avoid spurious traps, restore the trap state
* seen by kvm_arch_vcpu_load_fp():
*/
- if (vcpu_get_flag(vcpu, HOST_SVE_ENABLED))
+ if (host_data_test_flag(HOST_SVE_ENABLED))
sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN);
else
sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 12dad841f2a5..2196979a24a3 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -917,31 +917,24 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
- int ret = 0;
-
trace_kvm_set_guest_debug(vcpu, dbg->control);
- if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
- ret = -EINVAL;
- goto out;
- }
-
- if (dbg->control & KVM_GUESTDBG_ENABLE) {
- vcpu->guest_debug = dbg->control;
-
- /* Hardware assisted Break and Watch points */
- if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
- vcpu->arch.external_debug_state = dbg->arch;
- }
+ if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
+ return -EINVAL;
- } else {
- /* If not enabled clear all flags */
+ if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
vcpu->guest_debug = 0;
- vcpu_clear_flag(vcpu, DBG_SS_ACTIVE_PENDING);
+ vcpu_clear_flag(vcpu, HOST_SS_ACTIVE_PENDING);
+ return 0;
}
-out:
- return ret;
+ vcpu->guest_debug = dbg->control;
+
+ /* Hardware assisted Break and Watch points */
+ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW)
+ vcpu->arch.external_debug_state = dbg->arch;
+
+ return 0;
}
int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index d7c2990e7c9e..512d152233ff 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -183,6 +183,9 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
struct kvm_run *run = vcpu->run;
u64 esr = kvm_vcpu_get_esr(vcpu);
+ if (!vcpu->guest_debug && forward_debug_exception(vcpu))
+ return 1;
+
run->exit_reason = KVM_EXIT_DEBUG;
run->debug.arch.hsr = lower_32_bits(esr);
run->debug.arch.hsr_high = upper_32_bits(esr);
@@ -193,7 +196,7 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
run->debug.arch.far = vcpu->arch.fault.far_el2;
break;
case ESR_ELx_EC_SOFTSTP_LOW:
- vcpu_clear_flag(vcpu, DBG_SS_ACTIVE_PENDING);
+ *vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
break;
}
diff --git a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h
index d00093699aaf..502a5b73ee70 100644
--- a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h
@@ -88,15 +88,26 @@
default: write_debug(ptr[0], reg, 0); \
}
+static struct kvm_guest_debug_arch *__vcpu_debug_regs(struct kvm_vcpu *vcpu)
+{
+ switch (vcpu->arch.debug_owner) {
+ case VCPU_DEBUG_FREE:
+ WARN_ON_ONCE(1);
+ fallthrough;
+ case VCPU_DEBUG_GUEST_OWNED:
+ return &vcpu->arch.vcpu_debug_state;
+ case VCPU_DEBUG_HOST_OWNED:
+ return &vcpu->arch.external_debug_state;
+ }
+
+ return NULL;
+}
+
static void __debug_save_state(struct kvm_guest_debug_arch *dbg,
struct kvm_cpu_context *ctxt)
{
- u64 aa64dfr0;
- int brps, wrps;
-
- aa64dfr0 = read_sysreg(id_aa64dfr0_el1);
- brps = (aa64dfr0 >> 12) & 0xf;
- wrps = (aa64dfr0 >> 20) & 0xf;
+ int brps = *host_data_ptr(debug_brps);
+ int wrps = *host_data_ptr(debug_wrps);
save_debug(dbg->dbg_bcr, dbgbcr, brps);
save_debug(dbg->dbg_bvr, dbgbvr, brps);
@@ -109,13 +120,8 @@ static void __debug_save_state(struct kvm_guest_debug_arch *dbg,
static void __debug_restore_state(struct kvm_guest_debug_arch *dbg,
struct kvm_cpu_context *ctxt)
{
- u64 aa64dfr0;
- int brps, wrps;
-
- aa64dfr0 = read_sysreg(id_aa64dfr0_el1);
-
- brps = (aa64dfr0 >> 12) & 0xf;
- wrps = (aa64dfr0 >> 20) & 0xf;
+ int brps = *host_data_ptr(debug_brps);
+ int wrps = *host_data_ptr(debug_wrps);
restore_debug(dbg->dbg_bcr, dbgbcr, brps);
restore_debug(dbg->dbg_bvr, dbgbvr, brps);
@@ -132,13 +138,13 @@ static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu)
struct kvm_guest_debug_arch *host_dbg;
struct kvm_guest_debug_arch *guest_dbg;
- if (!vcpu_get_flag(vcpu, DEBUG_DIRTY))
+ if (!kvm_debug_regs_in_use(vcpu))
return;
host_ctxt = host_data_ptr(host_ctxt);
guest_ctxt = &vcpu->arch.ctxt;
host_dbg = host_data_ptr(host_debug_state.regs);
- guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
+ guest_dbg = __vcpu_debug_regs(vcpu);
__debug_save_state(host_dbg, host_ctxt);
__debug_restore_state(guest_dbg, guest_ctxt);
@@ -151,18 +157,16 @@ static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu)
struct kvm_guest_debug_arch *host_dbg;
struct kvm_guest_debug_arch *guest_dbg;
- if (!vcpu_get_flag(vcpu, DEBUG_DIRTY))
+ if (!kvm_debug_regs_in_use(vcpu))
return;
host_ctxt = host_data_ptr(host_ctxt);
guest_ctxt = &vcpu->arch.ctxt;
host_dbg = host_data_ptr(host_debug_state.regs);
- guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
+ guest_dbg = __vcpu_debug_regs(vcpu);
__debug_save_state(guest_dbg, guest_ctxt);
__debug_restore_state(host_dbg, host_ctxt);
-
- vcpu_clear_flag(vcpu, DEBUG_DIRTY);
}
#endif /* __ARM64_KVM_HYP_DEBUG_SR_H__ */
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index abfa6ad92e91..f838a45665f2 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -501,7 +501,12 @@ static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
return true;
}
-static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu)
+static inline u64 compute_counter_value(struct arch_timer_context *ctxt)
+{
+ return arch_timer_read_cntpct_el0() - timer_get_offset(ctxt);
+}
+
+static bool kvm_handle_cntxct(struct kvm_vcpu *vcpu)
{
struct arch_timer_context *ctxt;
u32 sysreg;
@@ -511,18 +516,19 @@ static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu)
* We only get here for 64bit guests, 32bit guests will hit
* the long and winding road all the way to the standard
* handling. Yes, it sucks to be irrelevant.
+ *
+ * Also, we only deal with non-hypervisor context here (either
+ * an EL1 guest, or a non-HYP context of an EL2 guest).
*/
+ if (is_hyp_ctxt(vcpu))
+ return false;
+
sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
switch (sysreg) {
case SYS_CNTPCT_EL0:
case SYS_CNTPCTSS_EL0:
if (vcpu_has_nv(vcpu)) {
- if (is_hyp_ctxt(vcpu)) {
- ctxt = vcpu_hptimer(vcpu);
- break;
- }
-
/* Check for guest hypervisor trapping */
val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
if (!vcpu_el2_e2h_is_set(vcpu))
@@ -534,16 +540,23 @@ static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu)
ctxt = vcpu_ptimer(vcpu);
break;
+ case SYS_CNTVCT_EL0:
+ case SYS_CNTVCTSS_EL0:
+ if (vcpu_has_nv(vcpu)) {
+ /* Check for guest hypervisor trapping */
+ val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
+
+ if (val & CNTHCTL_EL1TVCT)
+ return false;
+ }
+
+ ctxt = vcpu_vtimer(vcpu);
+ break;
default:
return false;
}
- val = arch_timer_read_cntpct_el0();
-
- if (ctxt->offset.vm_offset)
- val -= *kern_hyp_va(ctxt->offset.vm_offset);
- if (ctxt->offset.vcpu_offset)
- val -= *kern_hyp_va(ctxt->offset.vcpu_offset);
+ val = compute_counter_value(ctxt);
vcpu_set_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu), val);
__kvm_skip_instr(vcpu);
@@ -588,7 +601,7 @@ static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
__vgic_v3_perform_cpuif_access(vcpu) == 1)
return true;
- if (kvm_hyp_handle_cntpct(vcpu))
+ if (kvm_handle_cntxct(vcpu))
return true;
return false;
diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
index a651c43ad679..76ff095c6b6e 100644
--- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
@@ -18,9 +18,34 @@
static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt);
+static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt)
+{
+ struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu;
+
+ if (!vcpu)
+ vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt);
+
+ return vcpu;
+}
+
+static inline bool ctxt_is_guest(struct kvm_cpu_context *ctxt)
+{
+ return host_data_ptr(host_ctxt) != ctxt;
+}
+
+static inline u64 *ctxt_mdscr_el1(struct kvm_cpu_context *ctxt)
+{
+ struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt);
+
+ if (ctxt_is_guest(ctxt) && kvm_host_owns_debug_regs(vcpu))
+ return &vcpu->arch.external_mdscr_el1;
+
+ return &ctxt_sys_reg(ctxt, MDSCR_EL1);
+}
+
static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
{
- ctxt_sys_reg(ctxt, MDSCR_EL1) = read_sysreg(mdscr_el1);
+ *ctxt_mdscr_el1(ctxt) = read_sysreg(mdscr_el1);
// POR_EL0 can affect uaccess, so must be saved/restored early.
if (ctxt_has_s1poe(ctxt))
@@ -33,16 +58,6 @@ static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0);
}
-static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt)
-{
- struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu;
-
- if (!vcpu)
- vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt);
-
- return vcpu;
-}
-
static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt)
{
struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt);
@@ -139,7 +154,7 @@ static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
static inline void __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
{
- write_sysreg(ctxt_sys_reg(ctxt, MDSCR_EL1), mdscr_el1);
+ write_sysreg(*ctxt_mdscr_el1(ctxt), mdscr_el1);
// POR_EL0 can affect uaccess, so must be saved/restored early.
if (ctxt_has_s1poe(ctxt))
@@ -283,7 +298,7 @@ static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu)
__vcpu_sys_reg(vcpu, DACR32_EL2) = read_sysreg(dacr32_el2);
__vcpu_sys_reg(vcpu, IFSR32_EL2) = read_sysreg(ifsr32_el2);
- if (has_vhe() || vcpu_get_flag(vcpu, DEBUG_DIRTY))
+ if (has_vhe() || kvm_debug_regs_in_use(vcpu))
__vcpu_sys_reg(vcpu, DBGVCR32_EL2) = read_sysreg(dbgvcr32_el2);
}
@@ -300,7 +315,7 @@ static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu)
write_sysreg(__vcpu_sys_reg(vcpu, DACR32_EL2), dacr32_el2);
write_sysreg(__vcpu_sys_reg(vcpu, IFSR32_EL2), ifsr32_el2);
- if (has_vhe() || vcpu_get_flag(vcpu, DEBUG_DIRTY))
+ if (has_vhe() || kvm_debug_regs_in_use(vcpu))
write_sysreg(__vcpu_sys_reg(vcpu, DBGVCR32_EL2), dbgvcr32_el2);
}
diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
deleted file mode 100644
index f957890c7e38..000000000000
--- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
+++ /dev/null
@@ -1,223 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2021 Google LLC
- * Author: Fuad Tabba <tabba@google.com>
- */
-
-#ifndef __ARM64_KVM_FIXED_CONFIG_H__
-#define __ARM64_KVM_FIXED_CONFIG_H__
-
-#include <asm/sysreg.h>
-
-/*
- * This file contains definitions for features to be allowed or restricted for
- * guest virtual machines, depending on the mode KVM is running in and on the
- * type of guest that is running.
- *
- * The ALLOW masks represent a bitmask of feature fields that are allowed
- * without any restrictions as long as they are supported by the system.
- *
- * The RESTRICT_UNSIGNED masks, if present, represent unsigned fields for
- * features that are restricted to support at most the specified feature.
- *
- * If a feature field is not present in either, than it is not supported.
- *
- * The approach taken for protected VMs is to allow features that are:
- * - Needed by common Linux distributions (e.g., floating point)
- * - Trivial to support, e.g., supporting the feature does not introduce or
- * require tracking of additional state in KVM
- * - Cannot be trapped or prevent the guest from using anyway
- */
-
-/*
- * Allow for protected VMs:
- * - Floating-point and Advanced SIMD
- * - Data Independent Timing
- * - Spectre/Meltdown Mitigation
- */
-#define PVM_ID_AA64PFR0_ALLOW (\
- ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \
- ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \
- ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) | \
- ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | \
- ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3) \
- )
-
-/*
- * Restrict to the following *unsigned* features for protected VMs:
- * - AArch64 guests only (no support for AArch32 guests):
- * AArch32 adds complexity in trap handling, emulation, condition codes,
- * etc...
- * - RAS (v1)
- * Supported by KVM
- */
-#define PVM_ID_AA64PFR0_RESTRICT_UNSIGNED (\
- SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL0, IMP) | \
- SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL1, IMP) | \
- SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL2, IMP) | \
- SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL3, IMP) | \
- SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, RAS, IMP) \
- )
-
-/*
- * Allow for protected VMs:
- * - Branch Target Identification
- * - Speculative Store Bypassing
- */
-#define PVM_ID_AA64PFR1_ALLOW (\
- ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_BT) | \
- ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SSBS) \
- )
-
-#define PVM_ID_AA64PFR2_ALLOW 0ULL
-
-/*
- * Allow for protected VMs:
- * - Mixed-endian
- * - Distinction between Secure and Non-secure Memory
- * - Mixed-endian at EL0 only
- * - Non-context synchronizing exception entry and exit
- */
-#define PVM_ID_AA64MMFR0_ALLOW (\
- ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGEND) | \
- ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_SNSMEM) | \
- ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGENDEL0) | \
- ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_EXS) \
- )
-
-/*
- * Restrict to the following *unsigned* features for protected VMs:
- * - 40-bit IPA
- * - 16-bit ASID
- */
-#define PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED (\
- FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_PARANGE), ID_AA64MMFR0_EL1_PARANGE_40) | \
- FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_ASIDBITS), ID_AA64MMFR0_EL1_ASIDBITS_16) \
- )
-
-/*
- * Allow for protected VMs:
- * - Hardware translation table updates to Access flag and Dirty state
- * - Number of VMID bits from CPU
- * - Hierarchical Permission Disables
- * - Privileged Access Never
- * - SError interrupt exceptions from speculative reads
- * - Enhanced Translation Synchronization
- * - Control for cache maintenance permission
- */
-#define PVM_ID_AA64MMFR1_ALLOW (\
- ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HAFDBS) | \
- ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_VMIDBits) | \
- ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HPDS) | \
- ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_PAN) | \
- ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_SpecSEI) | \
- ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_ETS) | \
- ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_CMOW) \
- )
-
-/*
- * Allow for protected VMs:
- * - Common not Private translations
- * - User Access Override
- * - IESB bit in the SCTLR_ELx registers
- * - Unaligned single-copy atomicity and atomic functions
- * - ESR_ELx.EC value on an exception by read access to feature ID space
- * - TTL field in address operations.
- * - Break-before-make sequences when changing translation block size
- * - E0PDx mechanism
- */
-#define PVM_ID_AA64MMFR2_ALLOW (\
- ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_CnP) | \
- ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_UAO) | \
- ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_IESB) | \
- ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_AT) | \
- ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_IDS) | \
- ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_TTL) | \
- ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_BBM) | \
- ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_E0PD) \
- )
-
-#define PVM_ID_AA64MMFR3_ALLOW (0ULL)
-
-/*
- * No support for Scalable Vectors for protected VMs:
- * Requires additional support from KVM, e.g., context-switching and
- * trapping at EL2
- */
-#define PVM_ID_AA64ZFR0_ALLOW (0ULL)
-
-/*
- * No support for debug, including breakpoints, and watchpoints for protected
- * VMs:
- * The Arm architecture mandates support for at least the Armv8 debug
- * architecture, which would include at least 2 hardware breakpoints and
- * watchpoints. Providing that support to protected guests adds
- * considerable state and complexity. Therefore, the reserved value of 0 is
- * used for debug-related fields.
- */
-#define PVM_ID_AA64DFR0_ALLOW (0ULL)
-#define PVM_ID_AA64DFR1_ALLOW (0ULL)
-
-/*
- * No support for implementation defined features.
- */
-#define PVM_ID_AA64AFR0_ALLOW (0ULL)
-#define PVM_ID_AA64AFR1_ALLOW (0ULL)
-
-/*
- * No restrictions on instructions implemented in AArch64.
- */
-#define PVM_ID_AA64ISAR0_ALLOW (\
- ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_AES) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SHA1) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SHA2) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_CRC32) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_ATOMIC) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_RDM) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SHA3) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SM3) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SM4) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_DP) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_FHM) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_TS) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_TLB) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_RNDR) \
- )
-
-/* Restrict pointer authentication to the basic version. */
-#define PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED (\
- FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA), ID_AA64ISAR1_EL1_APA_PAuth) | \
- FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API), ID_AA64ISAR1_EL1_API_PAuth) \
- )
-
-#define PVM_ID_AA64ISAR2_RESTRICT_UNSIGNED (\
- FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3), ID_AA64ISAR2_EL1_APA3_PAuth) \
- )
-
-#define PVM_ID_AA64ISAR1_ALLOW (\
- ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_DPB) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_JSCVT) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_FCMA) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_LRCPC) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_FRINTTS) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_SB) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_SPECRES) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_BF16) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_DGH) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_I8MM) \
- )
-
-#define PVM_ID_AA64ISAR2_ALLOW (\
- ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_ATS1A)| \
- ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS) \
- )
-
-u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id);
-bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code);
-bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code);
-int kvm_check_pvm_sysreg_table(void);
-
-#endif /* __ARM64_KVM_FIXED_CONFIG_H__ */
diff --git a/arch/arm64/kvm/hyp/include/nvhe/gfp.h b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
index 97c527ef53c2..3766333bace9 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/gfp.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
@@ -7,7 +7,7 @@
#include <nvhe/memory.h>
#include <nvhe/spinlock.h>
-#define HYP_NO_ORDER USHRT_MAX
+#define HYP_NO_ORDER ((u8)(~0))
struct hyp_pool {
/*
@@ -19,11 +19,11 @@ struct hyp_pool {
struct list_head free_area[NR_PAGE_ORDERS];
phys_addr_t range_start;
phys_addr_t range_end;
- unsigned short max_order;
+ u8 max_order;
};
/* Allocation */
-void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order);
+void *hyp_alloc_pages(struct hyp_pool *pool, u8 order);
void hyp_split_page(struct hyp_page *page);
void hyp_get_page(struct hyp_pool *pool, void *addr);
void hyp_put_page(struct hyp_pool *pool, void *addr);
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index 0972faccc2af..978f38c386ee 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -11,40 +11,10 @@
#include <asm/kvm_mmu.h>
#include <asm/kvm_pgtable.h>
#include <asm/virt.h>
+#include <nvhe/memory.h>
#include <nvhe/pkvm.h>
#include <nvhe/spinlock.h>
-/*
- * SW bits 0-1 are reserved to track the memory ownership state of each page:
- * 00: The page is owned exclusively by the page-table owner.
- * 01: The page is owned by the page-table owner, but is shared
- * with another entity.
- * 10: The page is shared with, but not owned by the page-table owner.
- * 11: Reserved for future use (lending).
- */
-enum pkvm_page_state {
- PKVM_PAGE_OWNED = 0ULL,
- PKVM_PAGE_SHARED_OWNED = KVM_PGTABLE_PROT_SW0,
- PKVM_PAGE_SHARED_BORROWED = KVM_PGTABLE_PROT_SW1,
- __PKVM_PAGE_RESERVED = KVM_PGTABLE_PROT_SW0 |
- KVM_PGTABLE_PROT_SW1,
-
- /* Meta-states which aren't encoded directly in the PTE's SW bits */
- PKVM_NOPAGE,
-};
-
-#define PKVM_PAGE_STATE_PROT_MASK (KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1)
-static inline enum kvm_pgtable_prot pkvm_mkstate(enum kvm_pgtable_prot prot,
- enum pkvm_page_state state)
-{
- return (prot & ~PKVM_PAGE_STATE_PROT_MASK) | state;
-}
-
-static inline enum pkvm_page_state pkvm_getstate(enum kvm_pgtable_prot prot)
-{
- return prot & PKVM_PAGE_STATE_PROT_MASK;
-}
-
struct host_mmu {
struct kvm_arch arch;
struct kvm_pgtable pgt;
@@ -69,6 +39,13 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages);
int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages);
+int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu,
+ enum kvm_pgtable_prot prot);
+int __pkvm_host_unshare_guest(u64 gfn, struct pkvm_hyp_vm *hyp_vm);
+int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_pgtable_prot prot);
+int __pkvm_host_wrprotect_guest(u64 gfn, struct pkvm_hyp_vm *hyp_vm);
+int __pkvm_host_test_clear_young_guest(u64 gfn, bool mkold, struct pkvm_hyp_vm *vm);
+int __pkvm_host_mkyoung_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu);
bool addr_is_memory(phys_addr_t phys);
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);
diff --git a/arch/arm64/kvm/hyp/include/nvhe/memory.h b/arch/arm64/kvm/hyp/include/nvhe/memory.h
index ab205c4d6774..34233d586060 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/memory.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/memory.h
@@ -7,9 +7,47 @@
#include <linux/types.h>
+/*
+ * Bits 0-1 are reserved to track the memory ownership state of each page:
+ * 00: The page is owned exclusively by the page-table owner.
+ * 01: The page is owned by the page-table owner, but is shared
+ * with another entity.
+ * 10: The page is shared with, but not owned by the page-table owner.
+ * 11: Reserved for future use (lending).
+ */
+enum pkvm_page_state {
+ PKVM_PAGE_OWNED = 0ULL,
+ PKVM_PAGE_SHARED_OWNED = BIT(0),
+ PKVM_PAGE_SHARED_BORROWED = BIT(1),
+ __PKVM_PAGE_RESERVED = BIT(0) | BIT(1),
+
+ /* Meta-states which aren't encoded directly in the PTE's SW bits */
+ PKVM_NOPAGE = BIT(2),
+};
+#define PKVM_PAGE_META_STATES_MASK (~__PKVM_PAGE_RESERVED)
+
+#define PKVM_PAGE_STATE_PROT_MASK (KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1)
+static inline enum kvm_pgtable_prot pkvm_mkstate(enum kvm_pgtable_prot prot,
+ enum pkvm_page_state state)
+{
+ prot &= ~PKVM_PAGE_STATE_PROT_MASK;
+ prot |= FIELD_PREP(PKVM_PAGE_STATE_PROT_MASK, state);
+ return prot;
+}
+
+static inline enum pkvm_page_state pkvm_getstate(enum kvm_pgtable_prot prot)
+{
+ return FIELD_GET(PKVM_PAGE_STATE_PROT_MASK, prot);
+}
+
struct hyp_page {
- unsigned short refcount;
- unsigned short order;
+ u16 refcount;
+ u8 order;
+
+ /* Host (non-meta) state. Guarded by the host stage-2 lock. */
+ enum pkvm_page_state host_state : 8;
+
+ u32 host_share_guest_count;
};
extern u64 __hyp_vmemmap;
@@ -29,7 +67,13 @@ static inline phys_addr_t hyp_virt_to_phys(void *addr)
#define hyp_phys_to_pfn(phys) ((phys) >> PAGE_SHIFT)
#define hyp_pfn_to_phys(pfn) ((phys_addr_t)((pfn) << PAGE_SHIFT))
-#define hyp_phys_to_page(phys) (&hyp_vmemmap[hyp_phys_to_pfn(phys)])
+
+static inline struct hyp_page *hyp_phys_to_page(phys_addr_t phys)
+{
+ BUILD_BUG_ON(sizeof(struct hyp_page) != sizeof(u64));
+ return &hyp_vmemmap[hyp_phys_to_pfn(phys)];
+}
+
#define hyp_virt_to_page(virt) hyp_phys_to_page(__hyp_pa(virt))
#define hyp_virt_to_pfn(virt) hyp_phys_to_pfn(__hyp_pa(virt))
diff --git a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
index 24a9a8330d19..e42bf68c8848 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
@@ -20,6 +20,12 @@ struct pkvm_hyp_vcpu {
/* Backpointer to the host's (untrusted) vCPU instance. */
struct kvm_vcpu *host_vcpu;
+
+ /*
+ * If this hyp vCPU is loaded, then this is a backpointer to the
+ * per-cpu pointer tracking us. Otherwise, NULL if not loaded.
+ */
+ struct pkvm_hyp_vcpu **loaded_hyp_vcpu;
};
/*
@@ -47,6 +53,8 @@ struct pkvm_hyp_vm {
struct pkvm_hyp_vcpu *vcpus[];
};
+extern hyp_spinlock_t vm_table_lock;
+
static inline struct pkvm_hyp_vm *
pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu)
{
@@ -58,6 +66,11 @@ static inline bool pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu *hyp_vcpu)
return vcpu_is_protected(&hyp_vcpu->vcpu);
}
+static inline bool pkvm_hyp_vm_is_protected(struct pkvm_hyp_vm *hyp_vm)
+{
+ return kvm_vm_is_protected(&hyp_vm->kvm);
+}
+
void pkvm_hyp_vm_table_init(void *tbl);
int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
@@ -69,5 +82,15 @@ int __pkvm_teardown_vm(pkvm_handle_t handle);
struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
unsigned int vcpu_idx);
void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu);
+struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void);
+
+struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle);
+struct pkvm_hyp_vm *get_np_pkvm_hyp_vm(pkvm_handle_t handle);
+void put_pkvm_hyp_vm(struct pkvm_hyp_vm *hyp_vm);
+
+bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code);
+bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code);
+void kvm_init_pvm_id_regs(struct kvm_vcpu *vcpu);
+int kvm_check_pvm_sysreg_table(void);
#endif /* __ARM64_KVM_NVHE_PKVM_H__ */
diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
index 53efda0235cf..2f4a4f5036bb 100644
--- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c
+++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
@@ -51,42 +51,55 @@ static void __debug_restore_spe(u64 pmscr_el1)
write_sysreg_el1(pmscr_el1, SYS_PMSCR);
}
-static void __debug_save_trace(u64 *trfcr_el1)
+static void __trace_do_switch(u64 *saved_trfcr, u64 new_trfcr)
{
- *trfcr_el1 = 0;
+ *saved_trfcr = read_sysreg_el1(SYS_TRFCR);
+ write_sysreg_el1(new_trfcr, SYS_TRFCR);
+}
- /* Check if the TRBE is enabled */
- if (!(read_sysreg_s(SYS_TRBLIMITR_EL1) & TRBLIMITR_EL1_E))
- return;
- /*
- * Prohibit trace generation while we are in guest.
- * Since access to TRFCR_EL1 is trapped, the guest can't
- * modify the filtering set by the host.
- */
- *trfcr_el1 = read_sysreg_el1(SYS_TRFCR);
- write_sysreg_el1(0, SYS_TRFCR);
- isb();
- /* Drain the trace buffer to memory */
- tsb_csync();
+static bool __trace_needs_drain(void)
+{
+ if (is_protected_kvm_enabled() && host_data_test_flag(HAS_TRBE))
+ return read_sysreg_s(SYS_TRBLIMITR_EL1) & TRBLIMITR_EL1_E;
+
+ return host_data_test_flag(TRBE_ENABLED);
}
-static void __debug_restore_trace(u64 trfcr_el1)
+static bool __trace_needs_switch(void)
{
- if (!trfcr_el1)
- return;
+ return host_data_test_flag(TRBE_ENABLED) ||
+ host_data_test_flag(EL1_TRACING_CONFIGURED);
+}
- /* Restore trace filter controls */
- write_sysreg_el1(trfcr_el1, SYS_TRFCR);
+static void __trace_switch_to_guest(void)
+{
+ /* Unsupported with TRBE so disable */
+ if (host_data_test_flag(TRBE_ENABLED))
+ *host_data_ptr(trfcr_while_in_guest) = 0;
+
+ __trace_do_switch(host_data_ptr(host_debug_state.trfcr_el1),
+ *host_data_ptr(trfcr_while_in_guest));
+
+ if (__trace_needs_drain()) {
+ isb();
+ tsb_csync();
+ }
+}
+
+static void __trace_switch_to_host(void)
+{
+ __trace_do_switch(host_data_ptr(trfcr_while_in_guest),
+ *host_data_ptr(host_debug_state.trfcr_el1));
}
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
{
/* Disable and flush SPE data generation */
- if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_SPE))
+ if (host_data_test_flag(HAS_SPE))
__debug_save_spe(host_data_ptr(host_debug_state.pmscr_el1));
- /* Disable and flush Self-Hosted Trace generation */
- if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_TRBE))
- __debug_save_trace(host_data_ptr(host_debug_state.trfcr_el1));
+
+ if (__trace_needs_switch())
+ __trace_switch_to_guest();
}
void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
@@ -96,18 +109,13 @@ void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
{
- if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_SPE))
+ if (host_data_test_flag(HAS_SPE))
__debug_restore_spe(*host_data_ptr(host_debug_state.pmscr_el1));
- if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_TRBE))
- __debug_restore_trace(*host_data_ptr(host_debug_state.trfcr_el1));
+ if (__trace_needs_switch())
+ __trace_switch_to_host();
}
void __debug_switch_to_host(struct kvm_vcpu *vcpu)
{
__debug_switch_to_host_common(vcpu);
}
-
-u64 __kvm_get_mdcr_el2(void)
-{
- return read_sysreg(mdcr_el2);
-}
diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
index 3d610fc51f4d..58f0cb2298cc 100644
--- a/arch/arm64/kvm/hyp/nvhe/host.S
+++ b/arch/arm64/kvm/hyp/nvhe/host.S
@@ -188,12 +188,12 @@ SYM_FUNC_END(__host_hvc)
/*
* Test whether the SP has overflowed, without corrupting a GPR.
- * nVHE hypervisor stacks are aligned so that the PAGE_SHIFT bit
+ * nVHE hypervisor stacks are aligned so that the NVHE_STACK_SHIFT bit
* of SP should always be 1.
*/
add sp, sp, x0 // sp' = sp + x0
sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
- tbz x0, #PAGE_SHIFT, .L__hyp_sp_overflow\@
+ tbz x0, #NVHE_STACK_SHIFT, .L__hyp_sp_overflow\@
sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 6c90ef6736d6..6e12c070832f 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -91,11 +91,34 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
}
+static void flush_debug_state(struct pkvm_hyp_vcpu *hyp_vcpu)
+{
+ struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
+
+ hyp_vcpu->vcpu.arch.debug_owner = host_vcpu->arch.debug_owner;
+
+ if (kvm_guest_owns_debug_regs(&hyp_vcpu->vcpu))
+ hyp_vcpu->vcpu.arch.vcpu_debug_state = host_vcpu->arch.vcpu_debug_state;
+ else if (kvm_host_owns_debug_regs(&hyp_vcpu->vcpu))
+ hyp_vcpu->vcpu.arch.external_debug_state = host_vcpu->arch.external_debug_state;
+}
+
+static void sync_debug_state(struct pkvm_hyp_vcpu *hyp_vcpu)
+{
+ struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
+
+ if (kvm_guest_owns_debug_regs(&hyp_vcpu->vcpu))
+ host_vcpu->arch.vcpu_debug_state = hyp_vcpu->vcpu.arch.vcpu_debug_state;
+ else if (kvm_host_owns_debug_regs(&hyp_vcpu->vcpu))
+ host_vcpu->arch.external_debug_state = hyp_vcpu->vcpu.arch.external_debug_state;
+}
+
static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
{
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
fpsimd_sve_flush();
+ flush_debug_state(hyp_vcpu);
hyp_vcpu->vcpu.arch.ctxt = host_vcpu->arch.ctxt;
@@ -103,8 +126,6 @@ static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
/* Limit guest vector length to the maximum supported by the host. */
hyp_vcpu->vcpu.arch.sve_max_vl = min(host_vcpu->arch.sve_max_vl, kvm_host_sve_max_vl);
- hyp_vcpu->vcpu.arch.hw_mmu = host_vcpu->arch.hw_mmu;
-
hyp_vcpu->vcpu.arch.mdcr_el2 = host_vcpu->arch.mdcr_el2;
hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWI | HCR_TWE);
hyp_vcpu->vcpu.arch.hcr_el2 |= READ_ONCE(host_vcpu->arch.hcr_el2) &
@@ -112,8 +133,6 @@ static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags;
- hyp_vcpu->vcpu.arch.debug_ptr = kern_hyp_va(host_vcpu->arch.debug_ptr);
-
hyp_vcpu->vcpu.arch.vsesr_el2 = host_vcpu->arch.vsesr_el2;
hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3 = host_vcpu->arch.vgic_cpu.vgic_v3;
@@ -127,6 +146,7 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
unsigned int i;
fpsimd_sve_sync(&hyp_vcpu->vcpu);
+ sync_debug_state(hyp_vcpu);
host_vcpu->arch.ctxt = hyp_vcpu->vcpu.arch.ctxt;
@@ -141,16 +161,46 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
host_cpu_if->vgic_lr[i] = hyp_cpu_if->vgic_lr[i];
}
+static void handle___pkvm_vcpu_load(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
+ DECLARE_REG(unsigned int, vcpu_idx, host_ctxt, 2);
+ DECLARE_REG(u64, hcr_el2, host_ctxt, 3);
+ struct pkvm_hyp_vcpu *hyp_vcpu;
+
+ if (!is_protected_kvm_enabled())
+ return;
+
+ hyp_vcpu = pkvm_load_hyp_vcpu(handle, vcpu_idx);
+ if (!hyp_vcpu)
+ return;
+
+ if (pkvm_hyp_vcpu_is_protected(hyp_vcpu)) {
+ /* Propagate WFx trapping flags */
+ hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWE | HCR_TWI);
+ hyp_vcpu->vcpu.arch.hcr_el2 |= hcr_el2 & (HCR_TWE | HCR_TWI);
+ }
+}
+
+static void handle___pkvm_vcpu_put(struct kvm_cpu_context *host_ctxt)
+{
+ struct pkvm_hyp_vcpu *hyp_vcpu;
+
+ if (!is_protected_kvm_enabled())
+ return;
+
+ hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
+ if (hyp_vcpu)
+ pkvm_put_hyp_vcpu(hyp_vcpu);
+}
+
static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 1);
int ret;
- host_vcpu = kern_hyp_va(host_vcpu);
-
if (unlikely(is_protected_kvm_enabled())) {
- struct pkvm_hyp_vcpu *hyp_vcpu;
- struct kvm *host_kvm;
+ struct pkvm_hyp_vcpu *hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
/*
* KVM (and pKVM) doesn't support SME guests for now, and
@@ -163,9 +213,6 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
goto out;
}
- host_kvm = kern_hyp_va(host_vcpu->kvm);
- hyp_vcpu = pkvm_load_hyp_vcpu(host_kvm->arch.pkvm.handle,
- host_vcpu->vcpu_idx);
if (!hyp_vcpu) {
ret = -EINVAL;
goto out;
@@ -176,12 +223,141 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
ret = __kvm_vcpu_run(&hyp_vcpu->vcpu);
sync_hyp_vcpu(hyp_vcpu);
- pkvm_put_hyp_vcpu(hyp_vcpu);
} else {
/* The host is fully trusted, run its vCPU directly. */
- ret = __kvm_vcpu_run(host_vcpu);
+ ret = __kvm_vcpu_run(kern_hyp_va(host_vcpu));
}
+out:
+ cpu_reg(host_ctxt, 1) = ret;
+}
+
+static int pkvm_refill_memcache(struct pkvm_hyp_vcpu *hyp_vcpu)
+{
+ struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
+
+ return refill_memcache(&hyp_vcpu->vcpu.arch.pkvm_memcache,
+ host_vcpu->arch.pkvm_memcache.nr_pages,
+ &host_vcpu->arch.pkvm_memcache);
+}
+
+static void handle___pkvm_host_share_guest(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(u64, pfn, host_ctxt, 1);
+ DECLARE_REG(u64, gfn, host_ctxt, 2);
+ DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3);
+ struct pkvm_hyp_vcpu *hyp_vcpu;
+ int ret = -EINVAL;
+
+ if (!is_protected_kvm_enabled())
+ goto out;
+
+ hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
+ if (!hyp_vcpu || pkvm_hyp_vcpu_is_protected(hyp_vcpu))
+ goto out;
+
+ ret = pkvm_refill_memcache(hyp_vcpu);
+ if (ret)
+ goto out;
+
+ ret = __pkvm_host_share_guest(pfn, gfn, hyp_vcpu, prot);
+out:
+ cpu_reg(host_ctxt, 1) = ret;
+}
+
+static void handle___pkvm_host_unshare_guest(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
+ DECLARE_REG(u64, gfn, host_ctxt, 2);
+ struct pkvm_hyp_vm *hyp_vm;
+ int ret = -EINVAL;
+
+ if (!is_protected_kvm_enabled())
+ goto out;
+
+ hyp_vm = get_np_pkvm_hyp_vm(handle);
+ if (!hyp_vm)
+ goto out;
+
+ ret = __pkvm_host_unshare_guest(gfn, hyp_vm);
+ put_pkvm_hyp_vm(hyp_vm);
+out:
+ cpu_reg(host_ctxt, 1) = ret;
+}
+
+static void handle___pkvm_host_relax_perms_guest(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(u64, gfn, host_ctxt, 1);
+ DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 2);
+ struct pkvm_hyp_vcpu *hyp_vcpu;
+ int ret = -EINVAL;
+
+ if (!is_protected_kvm_enabled())
+ goto out;
+
+ hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
+ if (!hyp_vcpu || pkvm_hyp_vcpu_is_protected(hyp_vcpu))
+ goto out;
+
+ ret = __pkvm_host_relax_perms_guest(gfn, hyp_vcpu, prot);
+out:
+ cpu_reg(host_ctxt, 1) = ret;
+}
+
+static void handle___pkvm_host_wrprotect_guest(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
+ DECLARE_REG(u64, gfn, host_ctxt, 2);
+ struct pkvm_hyp_vm *hyp_vm;
+ int ret = -EINVAL;
+
+ if (!is_protected_kvm_enabled())
+ goto out;
+
+ hyp_vm = get_np_pkvm_hyp_vm(handle);
+ if (!hyp_vm)
+ goto out;
+
+ ret = __pkvm_host_wrprotect_guest(gfn, hyp_vm);
+ put_pkvm_hyp_vm(hyp_vm);
+out:
+ cpu_reg(host_ctxt, 1) = ret;
+}
+
+static void handle___pkvm_host_test_clear_young_guest(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
+ DECLARE_REG(u64, gfn, host_ctxt, 2);
+ DECLARE_REG(bool, mkold, host_ctxt, 3);
+ struct pkvm_hyp_vm *hyp_vm;
+ int ret = -EINVAL;
+
+ if (!is_protected_kvm_enabled())
+ goto out;
+
+ hyp_vm = get_np_pkvm_hyp_vm(handle);
+ if (!hyp_vm)
+ goto out;
+
+ ret = __pkvm_host_test_clear_young_guest(gfn, mkold, hyp_vm);
+ put_pkvm_hyp_vm(hyp_vm);
+out:
+ cpu_reg(host_ctxt, 1) = ret;
+}
+
+static void handle___pkvm_host_mkyoung_guest(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(u64, gfn, host_ctxt, 1);
+ struct pkvm_hyp_vcpu *hyp_vcpu;
+ int ret = -EINVAL;
+
+ if (!is_protected_kvm_enabled())
+ goto out;
+
+ hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
+ if (!hyp_vcpu || pkvm_hyp_vcpu_is_protected(hyp_vcpu))
+ goto out;
+ ret = __pkvm_host_mkyoung_guest(gfn, hyp_vcpu);
out:
cpu_reg(host_ctxt, 1) = ret;
}
@@ -233,6 +409,22 @@ static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
__kvm_tlb_flush_vmid(kern_hyp_va(mmu));
}
+static void handle___pkvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
+ struct pkvm_hyp_vm *hyp_vm;
+
+ if (!is_protected_kvm_enabled())
+ return;
+
+ hyp_vm = get_np_pkvm_hyp_vm(handle);
+ if (!hyp_vm)
+ return;
+
+ __kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
+ put_pkvm_hyp_vm(hyp_vm);
+}
+
static void handle___kvm_flush_cpu_context(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
@@ -264,11 +456,6 @@ static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt)
__vgic_v3_init_lrs();
}
-static void handle___kvm_get_mdcr_el2(struct kvm_cpu_context *host_ctxt)
-{
- cpu_reg(host_ctxt, 1) = __kvm_get_mdcr_el2();
-}
-
static void handle___vgic_v3_save_vmcr_aprs(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
@@ -384,7 +571,6 @@ typedef void (*hcall_t)(struct kvm_cpu_context *);
static const hcall_t host_hcall[] = {
/* ___kvm_hyp_init */
- HANDLE_FUNC(__kvm_get_mdcr_el2),
HANDLE_FUNC(__pkvm_init),
HANDLE_FUNC(__pkvm_create_private_mapping),
HANDLE_FUNC(__pkvm_cpu_set_vector),
@@ -395,6 +581,12 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_host_share_hyp),
HANDLE_FUNC(__pkvm_host_unshare_hyp),
+ HANDLE_FUNC(__pkvm_host_share_guest),
+ HANDLE_FUNC(__pkvm_host_unshare_guest),
+ HANDLE_FUNC(__pkvm_host_relax_perms_guest),
+ HANDLE_FUNC(__pkvm_host_wrprotect_guest),
+ HANDLE_FUNC(__pkvm_host_test_clear_young_guest),
+ HANDLE_FUNC(__pkvm_host_mkyoung_guest),
HANDLE_FUNC(__kvm_adjust_pc),
HANDLE_FUNC(__kvm_vcpu_run),
HANDLE_FUNC(__kvm_flush_vm_context),
@@ -409,6 +601,9 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_init_vm),
HANDLE_FUNC(__pkvm_init_vcpu),
HANDLE_FUNC(__pkvm_teardown_vm),
+ HANDLE_FUNC(__pkvm_vcpu_load),
+ HANDLE_FUNC(__pkvm_vcpu_put),
+ HANDLE_FUNC(__pkvm_tlb_flush_vmid),
};
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index e75374d682f4..7ad7b133b81a 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -201,8 +201,8 @@ static void *guest_s2_zalloc_page(void *mc)
memset(addr, 0, PAGE_SIZE);
p = hyp_virt_to_page(addr);
- memset(p, 0, sizeof(*p));
p->refcount = 1;
+ p->order = 0;
return addr;
}
@@ -268,6 +268,7 @@ int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc)
{
+ struct hyp_page *page;
void *addr;
/* Dump all pgtable pages in the hyp_pool */
@@ -279,7 +280,9 @@ void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc)
/* Drain the hyp_pool into the memcache */
addr = hyp_alloc_pages(&vm->pool, 0);
while (addr) {
- memset(hyp_virt_to_page(addr), 0, sizeof(struct hyp_page));
+ page = hyp_virt_to_page(addr);
+ page->refcount = 0;
+ page->order = 0;
push_hyp_memcache(mc, addr, hyp_virt_to_phys);
WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(addr), 1));
addr = hyp_alloc_pages(&vm->pool, 0);
@@ -382,19 +385,28 @@ bool addr_is_memory(phys_addr_t phys)
return !!find_mem_range(phys, &range);
}
-static bool addr_is_allowed_memory(phys_addr_t phys)
+static bool is_in_mem_range(u64 addr, struct kvm_mem_range *range)
+{
+ return range->start <= addr && addr < range->end;
+}
+
+static int check_range_allowed_memory(u64 start, u64 end)
{
struct memblock_region *reg;
struct kvm_mem_range range;
- reg = find_mem_range(phys, &range);
+ /*
+ * Callers can't check the state of a range that overlaps memory and
+ * MMIO regions, so ensure [start, end[ is in the same kvm_mem_range.
+ */
+ reg = find_mem_range(start, &range);
+ if (!is_in_mem_range(end - 1, &range))
+ return -EINVAL;
- return reg && !(reg->flags & MEMBLOCK_NOMAP);
-}
+ if (!reg || reg->flags & MEMBLOCK_NOMAP)
+ return -EPERM;
-static bool is_in_mem_range(u64 addr, struct kvm_mem_range *range)
-{
- return range->start <= addr && addr < range->end;
+ return 0;
}
static bool range_is_memory(u64 start, u64 end)
@@ -454,8 +466,10 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
if (kvm_pte_valid(pte))
return -EAGAIN;
- if (pte)
+ if (pte) {
+ WARN_ON(addr_is_memory(addr) && hyp_phys_to_page(addr)->host_state != PKVM_NOPAGE);
return -EPERM;
+ }
do {
u64 granule = kvm_granule_size(level);
@@ -477,10 +491,33 @@ int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
return host_stage2_try(__host_stage2_idmap, addr, addr + size, prot);
}
+static void __host_update_page_state(phys_addr_t addr, u64 size, enum pkvm_page_state state)
+{
+ phys_addr_t end = addr + size;
+
+ for (; addr < end; addr += PAGE_SIZE)
+ hyp_phys_to_page(addr)->host_state = state;
+}
+
int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
{
- return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_mmu.pgt,
- addr, size, &host_s2_pool, owner_id);
+ int ret;
+
+ if (!addr_is_memory(addr))
+ return -EPERM;
+
+ ret = host_stage2_try(kvm_pgtable_stage2_set_owner, &host_mmu.pgt,
+ addr, size, &host_s2_pool, owner_id);
+ if (ret)
+ return ret;
+
+ /* Don't forget to update the vmemmap tracking for the host */
+ if (owner_id == PKVM_ID_HOST)
+ __host_update_page_state(addr, size, PKVM_PAGE_OWNED);
+ else
+ __host_update_page_state(addr, size, PKVM_NOPAGE);
+
+ return 0;
}
static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot)
@@ -546,39 +583,6 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
BUG_ON(ret && ret != -EAGAIN);
}
-struct pkvm_mem_transition {
- u64 nr_pages;
-
- struct {
- enum pkvm_component_id id;
- /* Address in the initiator's address space */
- u64 addr;
-
- union {
- struct {
- /* Address in the completer's address space */
- u64 completer_addr;
- } host;
- struct {
- u64 completer_addr;
- } hyp;
- };
- } initiator;
-
- struct {
- enum pkvm_component_id id;
- } completer;
-};
-
-struct pkvm_mem_share {
- const struct pkvm_mem_transition tx;
- const enum kvm_pgtable_prot completer_prot;
-};
-
-struct pkvm_mem_donation {
- const struct pkvm_mem_transition tx;
-};
-
struct check_walk_data {
enum pkvm_page_state desired;
enum pkvm_page_state (*get_page_state)(kvm_pte_t pte, u64 addr);
@@ -604,115 +608,38 @@ static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
return kvm_pgtable_walk(pgt, addr, size, &walker);
}
-static enum pkvm_page_state host_get_page_state(kvm_pte_t pte, u64 addr)
-{
- if (!addr_is_allowed_memory(addr))
- return PKVM_NOPAGE;
-
- if (!kvm_pte_valid(pte) && pte)
- return PKVM_NOPAGE;
-
- return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte));
-}
-
static int __host_check_page_state_range(u64 addr, u64 size,
enum pkvm_page_state state)
{
- struct check_walk_data d = {
- .desired = state,
- .get_page_state = host_get_page_state,
- };
+ u64 end = addr + size;
+ int ret;
+
+ ret = check_range_allowed_memory(addr, end);
+ if (ret)
+ return ret;
hyp_assert_lock_held(&host_mmu.lock);
- return check_page_state_range(&host_mmu.pgt, addr, size, &d);
+ for (; addr < end; addr += PAGE_SIZE) {
+ if (hyp_phys_to_page(addr)->host_state != state)
+ return -EPERM;
+ }
+
+ return 0;
}
static int __host_set_page_state_range(u64 addr, u64 size,
enum pkvm_page_state state)
{
- enum kvm_pgtable_prot prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, state);
-
- return host_stage2_idmap_locked(addr, size, prot);
-}
-
-static int host_request_owned_transition(u64 *completer_addr,
- const struct pkvm_mem_transition *tx)
-{
- u64 size = tx->nr_pages * PAGE_SIZE;
- u64 addr = tx->initiator.addr;
-
- *completer_addr = tx->initiator.host.completer_addr;
- return __host_check_page_state_range(addr, size, PKVM_PAGE_OWNED);
-}
-
-static int host_request_unshare(u64 *completer_addr,
- const struct pkvm_mem_transition *tx)
-{
- u64 size = tx->nr_pages * PAGE_SIZE;
- u64 addr = tx->initiator.addr;
-
- *completer_addr = tx->initiator.host.completer_addr;
- return __host_check_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
-}
-
-static int host_initiate_share(u64 *completer_addr,
- const struct pkvm_mem_transition *tx)
-{
- u64 size = tx->nr_pages * PAGE_SIZE;
- u64 addr = tx->initiator.addr;
-
- *completer_addr = tx->initiator.host.completer_addr;
- return __host_set_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
-}
-
-static int host_initiate_unshare(u64 *completer_addr,
- const struct pkvm_mem_transition *tx)
-{
- u64 size = tx->nr_pages * PAGE_SIZE;
- u64 addr = tx->initiator.addr;
-
- *completer_addr = tx->initiator.host.completer_addr;
- return __host_set_page_state_range(addr, size, PKVM_PAGE_OWNED);
-}
-
-static int host_initiate_donation(u64 *completer_addr,
- const struct pkvm_mem_transition *tx)
-{
- u8 owner_id = tx->completer.id;
- u64 size = tx->nr_pages * PAGE_SIZE;
-
- *completer_addr = tx->initiator.host.completer_addr;
- return host_stage2_set_owner_locked(tx->initiator.addr, size, owner_id);
-}
-
-static bool __host_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
-{
- return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) ||
- tx->initiator.id != PKVM_ID_HYP);
-}
-
-static int __host_ack_transition(u64 addr, const struct pkvm_mem_transition *tx,
- enum pkvm_page_state state)
-{
- u64 size = tx->nr_pages * PAGE_SIZE;
-
- if (__host_ack_skip_pgtable_check(tx))
- return 0;
-
- return __host_check_page_state_range(addr, size, state);
-}
+ if (hyp_phys_to_page(addr)->host_state == PKVM_NOPAGE) {
+ int ret = host_stage2_idmap_locked(addr, size, PKVM_HOST_MEM_PROT);
-static int host_ack_donation(u64 addr, const struct pkvm_mem_transition *tx)
-{
- return __host_ack_transition(addr, tx, PKVM_NOPAGE);
-}
+ if (ret)
+ return ret;
+ }
-static int host_complete_donation(u64 addr, const struct pkvm_mem_transition *tx)
-{
- u64 size = tx->nr_pages * PAGE_SIZE;
- u8 host_id = tx->completer.id;
+ __host_update_page_state(addr, size, state);
- return host_stage2_set_owner_locked(addr, size, host_id);
+ return 0;
}
static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte, u64 addr)
@@ -735,573 +662,418 @@ static int __hyp_check_page_state_range(u64 addr, u64 size,
return check_page_state_range(&pkvm_pgtable, addr, size, &d);
}
-static int hyp_request_donation(u64 *completer_addr,
- const struct pkvm_mem_transition *tx)
+static enum pkvm_page_state guest_get_page_state(kvm_pte_t pte, u64 addr)
{
- u64 size = tx->nr_pages * PAGE_SIZE;
- u64 addr = tx->initiator.addr;
+ if (!kvm_pte_valid(pte))
+ return PKVM_NOPAGE;
- *completer_addr = tx->initiator.hyp.completer_addr;
- return __hyp_check_page_state_range(addr, size, PKVM_PAGE_OWNED);
+ return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte));
}
-static int hyp_initiate_donation(u64 *completer_addr,
- const struct pkvm_mem_transition *tx)
+static int __guest_check_page_state_range(struct pkvm_hyp_vcpu *vcpu, u64 addr,
+ u64 size, enum pkvm_page_state state)
{
- u64 size = tx->nr_pages * PAGE_SIZE;
- int ret;
+ struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
+ struct check_walk_data d = {
+ .desired = state,
+ .get_page_state = guest_get_page_state,
+ };
- *completer_addr = tx->initiator.hyp.completer_addr;
- ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, tx->initiator.addr, size);
- return (ret != size) ? -EFAULT : 0;
+ hyp_assert_lock_held(&vm->lock);
+ return check_page_state_range(&vm->pgt, addr, size, &d);
}
-static bool __hyp_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
+int __pkvm_host_share_hyp(u64 pfn)
{
- return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) ||
- tx->initiator.id != PKVM_ID_HOST);
-}
+ u64 phys = hyp_pfn_to_phys(pfn);
+ void *virt = __hyp_va(phys);
+ enum kvm_pgtable_prot prot;
+ u64 size = PAGE_SIZE;
+ int ret;
-static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx,
- enum kvm_pgtable_prot perms)
-{
- u64 size = tx->nr_pages * PAGE_SIZE;
+ host_lock_component();
+ hyp_lock_component();
- if (perms != PAGE_HYP)
- return -EPERM;
+ ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
+ if (ret)
+ goto unlock;
+ if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
+ ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE);
+ if (ret)
+ goto unlock;
+ }
- if (__hyp_ack_skip_pgtable_check(tx))
- return 0;
+ prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED);
+ WARN_ON(pkvm_create_mappings_locked(virt, virt + size, prot));
+ WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED));
- return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE);
+unlock:
+ hyp_unlock_component();
+ host_unlock_component();
+
+ return ret;
}
-static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
+int __pkvm_host_unshare_hyp(u64 pfn)
{
- u64 size = tx->nr_pages * PAGE_SIZE;
+ u64 phys = hyp_pfn_to_phys(pfn);
+ u64 virt = (u64)__hyp_va(phys);
+ u64 size = PAGE_SIZE;
+ int ret;
- if (tx->initiator.id == PKVM_ID_HOST && hyp_page_count((void *)addr))
- return -EBUSY;
+ host_lock_component();
+ hyp_lock_component();
- return __hyp_check_page_state_range(addr, size,
- PKVM_PAGE_SHARED_BORROWED);
-}
+ ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED);
+ if (ret)
+ goto unlock;
+ ret = __hyp_check_page_state_range(virt, size, PKVM_PAGE_SHARED_BORROWED);
+ if (ret)
+ goto unlock;
+ if (hyp_page_count((void *)virt)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
-static int hyp_ack_donation(u64 addr, const struct pkvm_mem_transition *tx)
-{
- u64 size = tx->nr_pages * PAGE_SIZE;
+ WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size);
+ WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_OWNED));
- if (__hyp_ack_skip_pgtable_check(tx))
- return 0;
+unlock:
+ hyp_unlock_component();
+ host_unlock_component();
- return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE);
+ return ret;
}
-static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx,
- enum kvm_pgtable_prot perms)
+int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
{
- void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE);
+ u64 phys = hyp_pfn_to_phys(pfn);
+ u64 size = PAGE_SIZE * nr_pages;
+ void *virt = __hyp_va(phys);
enum kvm_pgtable_prot prot;
+ int ret;
- prot = pkvm_mkstate(perms, PKVM_PAGE_SHARED_BORROWED);
- return pkvm_create_mappings_locked(start, end, prot);
-}
+ host_lock_component();
+ hyp_lock_component();
-static int hyp_complete_unshare(u64 addr, const struct pkvm_mem_transition *tx)
-{
- u64 size = tx->nr_pages * PAGE_SIZE;
- int ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, addr, size);
+ ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
+ if (ret)
+ goto unlock;
+ if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
+ ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE);
+ if (ret)
+ goto unlock;
+ }
- return (ret != size) ? -EFAULT : 0;
-}
+ prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED);
+ WARN_ON(pkvm_create_mappings_locked(virt, virt + size, prot));
+ WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HYP));
-static int hyp_complete_donation(u64 addr,
- const struct pkvm_mem_transition *tx)
-{
- void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE);
- enum kvm_pgtable_prot prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED);
+unlock:
+ hyp_unlock_component();
+ host_unlock_component();
- return pkvm_create_mappings_locked(start, end, prot);
+ return ret;
}
-static int check_share(struct pkvm_mem_share *share)
+int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
{
- const struct pkvm_mem_transition *tx = &share->tx;
- u64 completer_addr;
+ u64 phys = hyp_pfn_to_phys(pfn);
+ u64 size = PAGE_SIZE * nr_pages;
+ u64 virt = (u64)__hyp_va(phys);
int ret;
- switch (tx->initiator.id) {
- case PKVM_ID_HOST:
- ret = host_request_owned_transition(&completer_addr, tx);
- break;
- default:
- ret = -EINVAL;
- }
+ host_lock_component();
+ hyp_lock_component();
+ ret = __hyp_check_page_state_range(virt, size, PKVM_PAGE_OWNED);
if (ret)
- return ret;
-
- switch (tx->completer.id) {
- case PKVM_ID_HYP:
- ret = hyp_ack_share(completer_addr, tx, share->completer_prot);
- break;
- case PKVM_ID_FFA:
- /*
- * We only check the host; the secure side will check the other
- * end when we forward the FFA call.
- */
- ret = 0;
- break;
- default:
- ret = -EINVAL;
+ goto unlock;
+ if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
+ ret = __host_check_page_state_range(phys, size, PKVM_NOPAGE);
+ if (ret)
+ goto unlock;
}
+ WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size);
+ WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HOST));
+
+unlock:
+ hyp_unlock_component();
+ host_unlock_component();
+
return ret;
}
-static int __do_share(struct pkvm_mem_share *share)
+int hyp_pin_shared_mem(void *from, void *to)
{
- const struct pkvm_mem_transition *tx = &share->tx;
- u64 completer_addr;
+ u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
+ u64 end = PAGE_ALIGN((u64)to);
+ u64 size = end - start;
int ret;
- switch (tx->initiator.id) {
- case PKVM_ID_HOST:
- ret = host_initiate_share(&completer_addr, tx);
- break;
- default:
- ret = -EINVAL;
- }
+ host_lock_component();
+ hyp_lock_component();
+ ret = __host_check_page_state_range(__hyp_pa(start), size,
+ PKVM_PAGE_SHARED_OWNED);
if (ret)
- return ret;
-
- switch (tx->completer.id) {
- case PKVM_ID_HYP:
- ret = hyp_complete_share(completer_addr, tx, share->completer_prot);
- break;
- case PKVM_ID_FFA:
- /*
- * We're not responsible for any secure page-tables, so there's
- * nothing to do here.
- */
- ret = 0;
- break;
- default:
- ret = -EINVAL;
- }
+ goto unlock;
- return ret;
-}
+ ret = __hyp_check_page_state_range(start, size,
+ PKVM_PAGE_SHARED_BORROWED);
+ if (ret)
+ goto unlock;
-/*
- * do_share():
- *
- * The page owner grants access to another component with a given set
- * of permissions.
- *
- * Initiator: OWNED => SHARED_OWNED
- * Completer: NOPAGE => SHARED_BORROWED
- */
-static int do_share(struct pkvm_mem_share *share)
-{
- int ret;
+ for (cur = start; cur < end; cur += PAGE_SIZE)
+ hyp_page_ref_inc(hyp_virt_to_page(cur));
- ret = check_share(share);
- if (ret)
- return ret;
+unlock:
+ hyp_unlock_component();
+ host_unlock_component();
- return WARN_ON(__do_share(share));
+ return ret;
}
-static int check_unshare(struct pkvm_mem_share *share)
+void hyp_unpin_shared_mem(void *from, void *to)
{
- const struct pkvm_mem_transition *tx = &share->tx;
- u64 completer_addr;
- int ret;
-
- switch (tx->initiator.id) {
- case PKVM_ID_HOST:
- ret = host_request_unshare(&completer_addr, tx);
- break;
- default:
- ret = -EINVAL;
- }
+ u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
+ u64 end = PAGE_ALIGN((u64)to);
- if (ret)
- return ret;
+ host_lock_component();
+ hyp_lock_component();
- switch (tx->completer.id) {
- case PKVM_ID_HYP:
- ret = hyp_ack_unshare(completer_addr, tx);
- break;
- case PKVM_ID_FFA:
- /* See check_share() */
- ret = 0;
- break;
- default:
- ret = -EINVAL;
- }
+ for (cur = start; cur < end; cur += PAGE_SIZE)
+ hyp_page_ref_dec(hyp_virt_to_page(cur));
- return ret;
+ hyp_unlock_component();
+ host_unlock_component();
}
-static int __do_unshare(struct pkvm_mem_share *share)
+int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages)
{
- const struct pkvm_mem_transition *tx = &share->tx;
- u64 completer_addr;
+ u64 phys = hyp_pfn_to_phys(pfn);
+ u64 size = PAGE_SIZE * nr_pages;
int ret;
- switch (tx->initiator.id) {
- case PKVM_ID_HOST:
- ret = host_initiate_unshare(&completer_addr, tx);
- break;
- default:
- ret = -EINVAL;
- }
-
- if (ret)
- return ret;
-
- switch (tx->completer.id) {
- case PKVM_ID_HYP:
- ret = hyp_complete_unshare(completer_addr, tx);
- break;
- case PKVM_ID_FFA:
- /* See __do_share() */
- ret = 0;
- break;
- default:
- ret = -EINVAL;
- }
+ host_lock_component();
+ ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
+ if (!ret)
+ ret = __host_set_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED);
+ host_unlock_component();
return ret;
}
-/*
- * do_unshare():
- *
- * The page owner revokes access from another component for a range of
- * pages which were previously shared using do_share().
- *
- * Initiator: SHARED_OWNED => OWNED
- * Completer: SHARED_BORROWED => NOPAGE
- */
-static int do_unshare(struct pkvm_mem_share *share)
+int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages)
{
+ u64 phys = hyp_pfn_to_phys(pfn);
+ u64 size = PAGE_SIZE * nr_pages;
int ret;
- ret = check_unshare(share);
- if (ret)
- return ret;
+ host_lock_component();
+ ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED);
+ if (!ret)
+ ret = __host_set_page_state_range(phys, size, PKVM_PAGE_OWNED);
+ host_unlock_component();
- return WARN_ON(__do_unshare(share));
+ return ret;
}
-static int check_donation(struct pkvm_mem_donation *donation)
+int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu,
+ enum kvm_pgtable_prot prot)
{
- const struct pkvm_mem_transition *tx = &donation->tx;
- u64 completer_addr;
+ struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
+ u64 phys = hyp_pfn_to_phys(pfn);
+ u64 ipa = hyp_pfn_to_phys(gfn);
+ struct hyp_page *page;
int ret;
- switch (tx->initiator.id) {
- case PKVM_ID_HOST:
- ret = host_request_owned_transition(&completer_addr, tx);
- break;
- case PKVM_ID_HYP:
- ret = hyp_request_donation(&completer_addr, tx);
- break;
- default:
- ret = -EINVAL;
- }
+ if (prot & ~KVM_PGTABLE_PROT_RWX)
+ return -EINVAL;
+ ret = check_range_allowed_memory(phys, phys + PAGE_SIZE);
if (ret)
return ret;
- switch (tx->completer.id) {
- case PKVM_ID_HOST:
- ret = host_ack_donation(completer_addr, tx);
- break;
- case PKVM_ID_HYP:
- ret = hyp_ack_donation(completer_addr, tx);
- break;
- default:
- ret = -EINVAL;
- }
-
- return ret;
-}
+ host_lock_component();
+ guest_lock_component(vm);
-static int __do_donate(struct pkvm_mem_donation *donation)
-{
- const struct pkvm_mem_transition *tx = &donation->tx;
- u64 completer_addr;
- int ret;
+ ret = __guest_check_page_state_range(vcpu, ipa, PAGE_SIZE, PKVM_NOPAGE);
+ if (ret)
+ goto unlock;
- switch (tx->initiator.id) {
- case PKVM_ID_HOST:
- ret = host_initiate_donation(&completer_addr, tx);
- break;
- case PKVM_ID_HYP:
- ret = hyp_initiate_donation(&completer_addr, tx);
+ page = hyp_phys_to_page(phys);
+ switch (page->host_state) {
+ case PKVM_PAGE_OWNED:
+ WARN_ON(__host_set_page_state_range(phys, PAGE_SIZE, PKVM_PAGE_SHARED_OWNED));
break;
+ case PKVM_PAGE_SHARED_OWNED:
+ if (page->host_share_guest_count)
+ break;
+ /* Only host to np-guest multi-sharing is tolerated */
+ WARN_ON(1);
+ fallthrough;
default:
- ret = -EINVAL;
+ ret = -EPERM;
+ goto unlock;
}
- if (ret)
- return ret;
+ WARN_ON(kvm_pgtable_stage2_map(&vm->pgt, ipa, PAGE_SIZE, phys,
+ pkvm_mkstate(prot, PKVM_PAGE_SHARED_BORROWED),
+ &vcpu->vcpu.arch.pkvm_memcache, 0));
+ page->host_share_guest_count++;
- switch (tx->completer.id) {
- case PKVM_ID_HOST:
- ret = host_complete_donation(completer_addr, tx);
- break;
- case PKVM_ID_HYP:
- ret = hyp_complete_donation(completer_addr, tx);
- break;
- default:
- ret = -EINVAL;
- }
+unlock:
+ guest_unlock_component(vm);
+ host_unlock_component();
return ret;
}
-/*
- * do_donate():
- *
- * The page owner transfers ownership to another component, losing access
- * as a consequence.
- *
- * Initiator: OWNED => NOPAGE
- * Completer: NOPAGE => OWNED
- */
-static int do_donate(struct pkvm_mem_donation *donation)
+static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ipa)
{
+ enum pkvm_page_state state;
+ struct hyp_page *page;
+ kvm_pte_t pte;
+ u64 phys;
+ s8 level;
int ret;
- ret = check_donation(donation);
+ ret = kvm_pgtable_get_leaf(&vm->pgt, ipa, &pte, &level);
if (ret)
return ret;
+ if (level != KVM_PGTABLE_LAST_LEVEL)
+ return -E2BIG;
+ if (!kvm_pte_valid(pte))
+ return -ENOENT;
- return WARN_ON(__do_donate(donation));
-}
-
-int __pkvm_host_share_hyp(u64 pfn)
-{
- int ret;
- u64 host_addr = hyp_pfn_to_phys(pfn);
- u64 hyp_addr = (u64)__hyp_va(host_addr);
- struct pkvm_mem_share share = {
- .tx = {
- .nr_pages = 1,
- .initiator = {
- .id = PKVM_ID_HOST,
- .addr = host_addr,
- .host = {
- .completer_addr = hyp_addr,
- },
- },
- .completer = {
- .id = PKVM_ID_HYP,
- },
- },
- .completer_prot = PAGE_HYP,
- };
+ state = guest_get_page_state(pte, ipa);
+ if (state != PKVM_PAGE_SHARED_BORROWED)
+ return -EPERM;
- host_lock_component();
- hyp_lock_component();
+ phys = kvm_pte_to_phys(pte);
+ ret = check_range_allowed_memory(phys, phys + PAGE_SIZE);
+ if (WARN_ON(ret))
+ return ret;
- ret = do_share(&share);
+ page = hyp_phys_to_page(phys);
+ if (page->host_state != PKVM_PAGE_SHARED_OWNED)
+ return -EPERM;
+ if (WARN_ON(!page->host_share_guest_count))
+ return -EINVAL;
- hyp_unlock_component();
- host_unlock_component();
+ *__phys = phys;
- return ret;
+ return 0;
}
-int __pkvm_host_unshare_hyp(u64 pfn)
+int __pkvm_host_unshare_guest(u64 gfn, struct pkvm_hyp_vm *vm)
{
+ u64 ipa = hyp_pfn_to_phys(gfn);
+ struct hyp_page *page;
+ u64 phys;
int ret;
- u64 host_addr = hyp_pfn_to_phys(pfn);
- u64 hyp_addr = (u64)__hyp_va(host_addr);
- struct pkvm_mem_share share = {
- .tx = {
- .nr_pages = 1,
- .initiator = {
- .id = PKVM_ID_HOST,
- .addr = host_addr,
- .host = {
- .completer_addr = hyp_addr,
- },
- },
- .completer = {
- .id = PKVM_ID_HYP,
- },
- },
- .completer_prot = PAGE_HYP,
- };
host_lock_component();
- hyp_lock_component();
+ guest_lock_component(vm);
+
+ ret = __check_host_shared_guest(vm, &phys, ipa);
+ if (ret)
+ goto unlock;
+
+ ret = kvm_pgtable_stage2_unmap(&vm->pgt, ipa, PAGE_SIZE);
+ if (ret)
+ goto unlock;
- ret = do_unshare(&share);
+ page = hyp_phys_to_page(phys);
+ page->host_share_guest_count--;
+ if (!page->host_share_guest_count)
+ WARN_ON(__host_set_page_state_range(phys, PAGE_SIZE, PKVM_PAGE_OWNED));
- hyp_unlock_component();
+unlock:
+ guest_unlock_component(vm);
host_unlock_component();
return ret;
}
-int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
+int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_pgtable_prot prot)
{
+ struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
+ u64 ipa = hyp_pfn_to_phys(gfn);
+ u64 phys;
int ret;
- u64 host_addr = hyp_pfn_to_phys(pfn);
- u64 hyp_addr = (u64)__hyp_va(host_addr);
- struct pkvm_mem_donation donation = {
- .tx = {
- .nr_pages = nr_pages,
- .initiator = {
- .id = PKVM_ID_HOST,
- .addr = host_addr,
- .host = {
- .completer_addr = hyp_addr,
- },
- },
- .completer = {
- .id = PKVM_ID_HYP,
- },
- },
- };
+
+ if (prot & ~KVM_PGTABLE_PROT_RWX)
+ return -EINVAL;
host_lock_component();
- hyp_lock_component();
+ guest_lock_component(vm);
- ret = do_donate(&donation);
+ ret = __check_host_shared_guest(vm, &phys, ipa);
+ if (!ret)
+ ret = kvm_pgtable_stage2_relax_perms(&vm->pgt, ipa, prot, 0);
- hyp_unlock_component();
+ guest_unlock_component(vm);
host_unlock_component();
return ret;
}
-int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
+int __pkvm_host_wrprotect_guest(u64 gfn, struct pkvm_hyp_vm *vm)
{
+ u64 ipa = hyp_pfn_to_phys(gfn);
+ u64 phys;
int ret;
- u64 host_addr = hyp_pfn_to_phys(pfn);
- u64 hyp_addr = (u64)__hyp_va(host_addr);
- struct pkvm_mem_donation donation = {
- .tx = {
- .nr_pages = nr_pages,
- .initiator = {
- .id = PKVM_ID_HYP,
- .addr = hyp_addr,
- .hyp = {
- .completer_addr = host_addr,
- },
- },
- .completer = {
- .id = PKVM_ID_HOST,
- },
- },
- };
host_lock_component();
- hyp_lock_component();
+ guest_lock_component(vm);
- ret = do_donate(&donation);
+ ret = __check_host_shared_guest(vm, &phys, ipa);
+ if (!ret)
+ ret = kvm_pgtable_stage2_wrprotect(&vm->pgt, ipa, PAGE_SIZE);
- hyp_unlock_component();
+ guest_unlock_component(vm);
host_unlock_component();
return ret;
}
-int hyp_pin_shared_mem(void *from, void *to)
+int __pkvm_host_test_clear_young_guest(u64 gfn, bool mkold, struct pkvm_hyp_vm *vm)
{
- u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
- u64 end = PAGE_ALIGN((u64)to);
- u64 size = end - start;
+ u64 ipa = hyp_pfn_to_phys(gfn);
+ u64 phys;
int ret;
host_lock_component();
- hyp_lock_component();
-
- ret = __host_check_page_state_range(__hyp_pa(start), size,
- PKVM_PAGE_SHARED_OWNED);
- if (ret)
- goto unlock;
-
- ret = __hyp_check_page_state_range(start, size,
- PKVM_PAGE_SHARED_BORROWED);
- if (ret)
- goto unlock;
+ guest_lock_component(vm);
- for (cur = start; cur < end; cur += PAGE_SIZE)
- hyp_page_ref_inc(hyp_virt_to_page(cur));
+ ret = __check_host_shared_guest(vm, &phys, ipa);
+ if (!ret)
+ ret = kvm_pgtable_stage2_test_clear_young(&vm->pgt, ipa, PAGE_SIZE, mkold);
-unlock:
- hyp_unlock_component();
+ guest_unlock_component(vm);
host_unlock_component();
return ret;
}
-void hyp_unpin_shared_mem(void *from, void *to)
-{
- u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
- u64 end = PAGE_ALIGN((u64)to);
-
- host_lock_component();
- hyp_lock_component();
-
- for (cur = start; cur < end; cur += PAGE_SIZE)
- hyp_page_ref_dec(hyp_virt_to_page(cur));
-
- hyp_unlock_component();
- host_unlock_component();
-}
-
-int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages)
+int __pkvm_host_mkyoung_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu)
{
+ struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
+ u64 ipa = hyp_pfn_to_phys(gfn);
+ u64 phys;
int ret;
- struct pkvm_mem_share share = {
- .tx = {
- .nr_pages = nr_pages,
- .initiator = {
- .id = PKVM_ID_HOST,
- .addr = hyp_pfn_to_phys(pfn),
- },
- .completer = {
- .id = PKVM_ID_FFA,
- },
- },
- };
host_lock_component();
- ret = do_share(&share);
- host_unlock_component();
+ guest_lock_component(vm);
- return ret;
-}
+ ret = __check_host_shared_guest(vm, &phys, ipa);
+ if (!ret)
+ kvm_pgtable_stage2_mkyoung(&vm->pgt, ipa, 0);
-int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages)
-{
- int ret;
- struct pkvm_mem_share share = {
- .tx = {
- .nr_pages = nr_pages,
- .initiator = {
- .id = PKVM_ID_HOST,
- .addr = hyp_pfn_to_phys(pfn),
- },
- .completer = {
- .id = PKVM_ID_FFA,
- },
- },
- };
-
- host_lock_component();
- ret = do_unshare(&share);
+ guest_unlock_component(vm);
host_unlock_component();
return ret;
diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c
index 8850b591d775..f41c7440b34b 100644
--- a/arch/arm64/kvm/hyp/nvhe/mm.c
+++ b/arch/arm64/kvm/hyp/nvhe/mm.c
@@ -360,10 +360,10 @@ int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr)
prev_base = __io_map_base;
/*
- * Efficient stack verification using the PAGE_SHIFT bit implies
+ * Efficient stack verification using the NVHE_STACK_SHIFT bit implies
* an alignment of our allocation on the order of the size.
*/
- size = PAGE_SIZE * 2;
+ size = NVHE_STACK_SIZE * 2;
addr = ALIGN(__io_map_base, size);
ret = __pkvm_alloc_private_va_range(addr, size);
@@ -373,12 +373,12 @@ int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr)
* at the higher address and leave the lower guard page
* unbacked.
*
- * Any valid stack address now has the PAGE_SHIFT bit as 1
+ * Any valid stack address now has the NVHE_STACK_SHIFT bit as 1
* and addresses corresponding to the guard page have the
- * PAGE_SHIFT bit as 0 - this is used for overflow detection.
+ * NVHE_STACK_SHIFT bit as 0 - this is used for overflow detection.
*/
- ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + PAGE_SIZE,
- PAGE_SIZE, phys, PAGE_HYP);
+ ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + NVHE_STACK_SIZE,
+ NVHE_STACK_SIZE, phys, PAGE_HYP);
if (ret)
__io_map_base = prev_base;
}
diff --git a/arch/arm64/kvm/hyp/nvhe/page_alloc.c b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
index e691290d3765..a1eb27a1a747 100644
--- a/arch/arm64/kvm/hyp/nvhe/page_alloc.c
+++ b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
@@ -32,7 +32,7 @@ u64 __hyp_vmemmap;
*/
static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool,
struct hyp_page *p,
- unsigned short order)
+ u8 order)
{
phys_addr_t addr = hyp_page_to_phys(p);
@@ -51,7 +51,7 @@ static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool,
/* Find a buddy page currently available for allocation */
static struct hyp_page *__find_buddy_avail(struct hyp_pool *pool,
struct hyp_page *p,
- unsigned short order)
+ u8 order)
{
struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order);
@@ -94,7 +94,7 @@ static void __hyp_attach_page(struct hyp_pool *pool,
struct hyp_page *p)
{
phys_addr_t phys = hyp_page_to_phys(p);
- unsigned short order = p->order;
+ u8 order = p->order;
struct hyp_page *buddy;
memset(hyp_page_to_virt(p), 0, PAGE_SIZE << p->order);
@@ -129,7 +129,7 @@ insert:
static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool,
struct hyp_page *p,
- unsigned short order)
+ u8 order)
{
struct hyp_page *buddy;
@@ -183,7 +183,7 @@ void hyp_get_page(struct hyp_pool *pool, void *addr)
void hyp_split_page(struct hyp_page *p)
{
- unsigned short order = p->order;
+ u8 order = p->order;
unsigned int i;
p->order = 0;
@@ -195,10 +195,10 @@ void hyp_split_page(struct hyp_page *p)
}
}
-void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order)
+void *hyp_alloc_pages(struct hyp_pool *pool, u8 order)
{
- unsigned short i = order;
struct hyp_page *p;
+ u8 i = order;
hyp_spin_lock(&pool->lock);
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 73e319891327..3927fe52a3dd 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -9,7 +9,6 @@
#include <asm/kvm_emulate.h>
-#include <nvhe/fixed_config.h>
#include <nvhe/mem_protect.h>
#include <nvhe/memory.h>
#include <nvhe/pkvm.h>
@@ -24,232 +23,160 @@ unsigned int kvm_arm_vmid_bits;
unsigned int kvm_host_sve_max_vl;
/*
- * Set trap register values based on features in ID_AA64PFR0.
+ * The currently loaded hyp vCPU for each physical CPU. Used only when
+ * protected KVM is enabled, but for both protected and non-protected VMs.
*/
-static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
-{
- const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
- u64 hcr_set = HCR_RW;
- u64 hcr_clear = 0;
- u64 cptr_set = 0;
- u64 cptr_clear = 0;
-
- /* Protected KVM does not support AArch32 guests. */
- BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
- PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_EL0_IMP);
- BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
- PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_EL1_IMP);
+static DEFINE_PER_CPU(struct pkvm_hyp_vcpu *, loaded_hyp_vcpu);
- /*
- * Linux guests assume support for floating-point and Advanced SIMD. Do
- * not change the trapping behavior for these from the KVM default.
- */
- BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP),
- PVM_ID_AA64PFR0_ALLOW));
- BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD),
- PVM_ID_AA64PFR0_ALLOW));
+static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
if (has_hvhe())
- hcr_set |= HCR_E2H;
+ vcpu->arch.hcr_el2 |= HCR_E2H;
- /* Trap RAS unless all current versions are supported */
- if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) <
- ID_AA64PFR0_EL1_RAS_V1P1) {
- hcr_set |= HCR_TERR | HCR_TEA;
- hcr_clear |= HCR_FIEN;
+ if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
+ /* route synchronous external abort exceptions to EL2 */
+ vcpu->arch.hcr_el2 |= HCR_TEA;
+ /* trap error record accesses */
+ vcpu->arch.hcr_el2 |= HCR_TERR;
}
- /* Trap AMU */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
- hcr_clear |= HCR_AMVOFFEN;
- cptr_set |= CPTR_EL2_TAM;
- }
+ if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
+ vcpu->arch.hcr_el2 |= HCR_FWB;
- /* Trap SVE */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
- if (has_hvhe())
- cptr_clear |= CPACR_EL1_ZEN;
- else
- cptr_set |= CPTR_EL2_TZ;
- }
+ if (cpus_have_final_cap(ARM64_HAS_EVT) &&
+ !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE))
+ vcpu->arch.hcr_el2 |= HCR_TID4;
+ else
+ vcpu->arch.hcr_el2 |= HCR_TID2;
- vcpu->arch.hcr_el2 |= hcr_set;
- vcpu->arch.hcr_el2 &= ~hcr_clear;
- vcpu->arch.cptr_el2 |= cptr_set;
- vcpu->arch.cptr_el2 &= ~cptr_clear;
+ if (vcpu_has_ptrauth(vcpu))
+ vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
+
+ if (kvm_has_mte(vcpu->kvm))
+ vcpu->arch.hcr_el2 |= HCR_ATA;
}
-/*
- * Set trap register values based on features in ID_AA64PFR1.
- */
-static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu)
+static void pvm_init_traps_hcr(struct kvm_vcpu *vcpu)
{
- const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR1_EL1);
- u64 hcr_set = 0;
- u64 hcr_clear = 0;
+ struct kvm *kvm = vcpu->kvm;
+ u64 val = vcpu->arch.hcr_el2;
- /* Memory Tagging: Trap and Treat as Untagged if not supported. */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), feature_ids)) {
- hcr_set |= HCR_TID5;
- hcr_clear |= HCR_DCT | HCR_ATA;
+ /* No support for AArch32. */
+ val |= HCR_RW;
+
+ /*
+ * Always trap:
+ * - Feature id registers: to control features exposed to guests
+ * - Implementation-defined features
+ */
+ val |= HCR_TACR | HCR_TIDCP | HCR_TID3 | HCR_TID1;
+
+ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) {
+ val |= HCR_TERR | HCR_TEA;
+ val &= ~(HCR_FIEN);
}
- vcpu->arch.hcr_el2 |= hcr_set;
- vcpu->arch.hcr_el2 &= ~hcr_clear;
-}
+ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
+ val &= ~(HCR_AMVOFFEN);
-/*
- * Set trap register values based on features in ID_AA64DFR0.
- */
-static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
-{
- const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
- u64 mdcr_set = 0;
- u64 mdcr_clear = 0;
- u64 cptr_set = 0;
-
- /* Trap/constrain PMU */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
- mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
- mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME |
- MDCR_EL2_HPMN_MASK;
+ if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, MTE, IMP)) {
+ val |= HCR_TID5;
+ val &= ~(HCR_DCT | HCR_ATA);
}
- /* Trap Debug */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), feature_ids))
- mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
+ val |= HCR_TLOR;
- /* Trap OS Double Lock */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DoubleLock), feature_ids))
- mdcr_set |= MDCR_EL2_TDOSA;
+ vcpu->arch.hcr_el2 = val;
+}
+
+static void pvm_init_traps_mdcr(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+ u64 val = vcpu->arch.mdcr_el2;
- /* Trap SPE */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) {
- mdcr_set |= MDCR_EL2_TPMS;
- mdcr_clear |= MDCR_EL2_E2PB_MASK;
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP)) {
+ val |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
+ val &= ~(MDCR_EL2_HPME | MDCR_EL2_MTPME | MDCR_EL2_HPMN_MASK);
}
- /* Trap Trace Filter */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
- mdcr_set |= MDCR_EL2_TTRF;
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DebugVer, IMP))
+ val |= MDCR_EL2_TDRA | MDCR_EL2_TDA;
- /* Trap Trace */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) {
- if (has_hvhe())
- cptr_set |= CPACR_EL1_TTA;
- else
- cptr_set |= CPTR_EL2_TTA;
- }
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DoubleLock, IMP))
+ val |= MDCR_EL2_TDOSA;
- /* Trap External Trace */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids))
- mdcr_clear |= MDCR_EL2_E2TB_MASK;
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, IMP)) {
+ val |= MDCR_EL2_TPMS;
+ val &= ~MDCR_EL2_E2PB_MASK;
+ }
- vcpu->arch.mdcr_el2 |= mdcr_set;
- vcpu->arch.mdcr_el2 &= ~mdcr_clear;
- vcpu->arch.cptr_el2 |= cptr_set;
-}
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceFilt, IMP))
+ val |= MDCR_EL2_TTRF;
-/*
- * Set trap register values based on features in ID_AA64MMFR0.
- */
-static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu)
-{
- const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR0_EL1);
- u64 mdcr_set = 0;
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, ExtTrcBuff, IMP))
+ val |= MDCR_EL2_E2TB_MASK;
/* Trap Debug Communications Channel registers */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_FGT), feature_ids))
- mdcr_set |= MDCR_EL2_TDCC;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, FGT, IMP))
+ val |= MDCR_EL2_TDCC;
- vcpu->arch.mdcr_el2 |= mdcr_set;
+ vcpu->arch.mdcr_el2 = val;
}
/*
- * Set trap register values based on features in ID_AA64MMFR1.
+ * Check that cpu features that are neither trapped nor supported are not
+ * enabled for protected VMs.
*/
-static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu)
+static int pkvm_check_pvm_cpu_features(struct kvm_vcpu *vcpu)
{
- const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR1_EL1);
- u64 hcr_set = 0;
-
- /* Trap LOR */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_LO), feature_ids))
- hcr_set |= HCR_TLOR;
+ struct kvm *kvm = vcpu->kvm;
- vcpu->arch.hcr_el2 |= hcr_set;
-}
-
-/*
- * Set baseline trap register values.
- */
-static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
-{
- const u64 hcr_trap_feat_regs = HCR_TID3;
- const u64 hcr_trap_impdef = HCR_TACR | HCR_TIDCP | HCR_TID1;
+ /* Protected KVM does not support AArch32 guests. */
+ if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL0, AARCH32) ||
+ kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL1, AARCH32))
+ return -EINVAL;
/*
- * Always trap:
- * - Feature id registers: to control features exposed to guests
- * - Implementation-defined features
+ * Linux guests assume support for floating-point and Advanced SIMD. Do
+ * not change the trapping behavior for these from the KVM default.
*/
- vcpu->arch.hcr_el2 |= hcr_trap_feat_regs | hcr_trap_impdef;
-
- /* Clear res0 and set res1 bits to trap potential new features. */
- vcpu->arch.hcr_el2 &= ~(HCR_RES0);
- vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
- if (!has_hvhe()) {
- vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
- vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
- }
-}
-
-static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
-
- if (has_hvhe())
- vcpu->arch.hcr_el2 |= HCR_E2H;
-
- if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
- /* route synchronous external abort exceptions to EL2 */
- vcpu->arch.hcr_el2 |= HCR_TEA;
- /* trap error record accesses */
- vcpu->arch.hcr_el2 |= HCR_TERR;
- }
-
- if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
- vcpu->arch.hcr_el2 |= HCR_FWB;
+ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, FP, IMP) ||
+ !kvm_has_feat(kvm, ID_AA64PFR0_EL1, AdvSIMD, IMP))
+ return -EINVAL;
- if (cpus_have_final_cap(ARM64_HAS_EVT) &&
- !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE))
- vcpu->arch.hcr_el2 |= HCR_TID4;
- else
- vcpu->arch.hcr_el2 |= HCR_TID2;
+ /* No SME support in KVM right now. Check to catch if it changes. */
+ if (kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP))
+ return -EINVAL;
- if (vcpu_has_ptrauth(vcpu))
- vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
+ return 0;
}
/*
* Initialize trap register values in protected mode.
*/
-static void pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
+static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
{
- vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
+ struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
+ int ret;
+
vcpu->arch.mdcr_el2 = 0;
pkvm_vcpu_reset_hcr(vcpu);
- if ((!vcpu_is_protected(vcpu)))
- return;
+ if ((!pkvm_hyp_vcpu_is_protected(hyp_vcpu)))
+ return 0;
+
+ ret = pkvm_check_pvm_cpu_features(vcpu);
+ if (ret)
+ return ret;
+
+ pvm_init_traps_hcr(vcpu);
+ pvm_init_traps_mdcr(vcpu);
- pvm_init_trap_regs(vcpu);
- pvm_init_traps_aa64pfr0(vcpu);
- pvm_init_traps_aa64pfr1(vcpu);
- pvm_init_traps_aa64dfr0(vcpu);
- pvm_init_traps_aa64mmfr0(vcpu);
- pvm_init_traps_aa64mmfr1(vcpu);
+ return 0;
}
/*
@@ -270,10 +197,10 @@ static pkvm_handle_t idx_to_vm_handle(unsigned int idx)
/*
* Spinlock for protecting state related to the VM table. Protects writes
- * to 'vm_table' and 'nr_table_entries' as well as reads and writes to
- * 'last_hyp_vcpu_lookup'.
+ * to 'vm_table', 'nr_table_entries', and other per-vm state on initialization.
+ * Also protects reads and writes to 'last_hyp_vcpu_lookup'.
*/
-static DEFINE_HYP_SPINLOCK(vm_table_lock);
+DEFINE_HYP_SPINLOCK(vm_table_lock);
/*
* The table of VM entries for protected VMs in hyp.
@@ -306,15 +233,30 @@ struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
struct pkvm_hyp_vm *hyp_vm;
+ /* Cannot load a new vcpu without putting the old one first. */
+ if (__this_cpu_read(loaded_hyp_vcpu))
+ return NULL;
+
hyp_spin_lock(&vm_table_lock);
hyp_vm = get_vm_by_handle(handle);
if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx)
goto unlock;
hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
+
+ /* Ensure vcpu isn't loaded on more than one cpu simultaneously. */
+ if (unlikely(hyp_vcpu->loaded_hyp_vcpu)) {
+ hyp_vcpu = NULL;
+ goto unlock;
+ }
+
+ hyp_vcpu->loaded_hyp_vcpu = this_cpu_ptr(&loaded_hyp_vcpu);
hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
unlock:
hyp_spin_unlock(&vm_table_lock);
+
+ if (hyp_vcpu)
+ __this_cpu_write(loaded_hyp_vcpu, hyp_vcpu);
return hyp_vcpu;
}
@@ -323,17 +265,63 @@ void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
hyp_spin_lock(&vm_table_lock);
+ hyp_vcpu->loaded_hyp_vcpu = NULL;
+ __this_cpu_write(loaded_hyp_vcpu, NULL);
+ hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
+ hyp_spin_unlock(&vm_table_lock);
+}
+
+struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void)
+{
+ return __this_cpu_read(loaded_hyp_vcpu);
+
+}
+
+struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle)
+{
+ struct pkvm_hyp_vm *hyp_vm;
+
+ hyp_spin_lock(&vm_table_lock);
+ hyp_vm = get_vm_by_handle(handle);
+ if (hyp_vm)
+ hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
+ hyp_spin_unlock(&vm_table_lock);
+
+ return hyp_vm;
+}
+
+void put_pkvm_hyp_vm(struct pkvm_hyp_vm *hyp_vm)
+{
+ hyp_spin_lock(&vm_table_lock);
hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
hyp_spin_unlock(&vm_table_lock);
}
+struct pkvm_hyp_vm *get_np_pkvm_hyp_vm(pkvm_handle_t handle)
+{
+ struct pkvm_hyp_vm *hyp_vm = get_pkvm_hyp_vm(handle);
+
+ if (hyp_vm && pkvm_hyp_vm_is_protected(hyp_vm)) {
+ put_pkvm_hyp_vm(hyp_vm);
+ hyp_vm = NULL;
+ }
+
+ return hyp_vm;
+}
+
static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struct kvm *host_kvm)
{
struct kvm *kvm = &hyp_vm->kvm;
+ unsigned long host_arch_flags = READ_ONCE(host_kvm->arch.flags);
DECLARE_BITMAP(allowed_features, KVM_VCPU_MAX_FEATURES);
+ if (test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &host_kvm->arch.flags))
+ set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags);
+
/* No restrictions for non-protected VMs. */
if (!kvm_vm_is_protected(kvm)) {
+ hyp_vm->kvm.arch.flags = host_arch_flags;
+
bitmap_copy(kvm->arch.vcpu_features,
host_kvm->arch.vcpu_features,
KVM_VCPU_MAX_FEATURES);
@@ -342,50 +330,26 @@ static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struc
bitmap_zero(allowed_features, KVM_VCPU_MAX_FEATURES);
- /*
- * For protected VMs, always allow:
- * - CPU starting in poweroff state
- * - PSCI v0.2
- */
- set_bit(KVM_ARM_VCPU_POWER_OFF, allowed_features);
set_bit(KVM_ARM_VCPU_PSCI_0_2, allowed_features);
- /*
- * Check if remaining features are allowed:
- * - Performance Monitoring
- * - Scalable Vectors
- * - Pointer Authentication
- */
- if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), PVM_ID_AA64DFR0_ALLOW))
+ if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PMU_V3))
set_bit(KVM_ARM_VCPU_PMU_V3, allowed_features);
- if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), PVM_ID_AA64PFR0_ALLOW))
- set_bit(KVM_ARM_VCPU_SVE, allowed_features);
-
- if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API), PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED) &&
- FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA), PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED))
+ if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PTRAUTH_ADDRESS))
set_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, allowed_features);
- if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI), PVM_ID_AA64ISAR1_ALLOW) &&
- FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA), PVM_ID_AA64ISAR1_ALLOW))
+ if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PTRAUTH_GENERIC))
set_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, allowed_features);
+ if (kvm_pvm_ext_allowed(KVM_CAP_ARM_SVE)) {
+ set_bit(KVM_ARM_VCPU_SVE, allowed_features);
+ kvm->arch.flags |= host_arch_flags & BIT(KVM_ARCH_FLAG_GUEST_HAS_SVE);
+ }
+
bitmap_and(kvm->arch.vcpu_features, host_kvm->arch.vcpu_features,
allowed_features, KVM_VCPU_MAX_FEATURES);
}
-static void pkvm_vcpu_init_ptrauth(struct pkvm_hyp_vcpu *hyp_vcpu)
-{
- struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
-
- if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) ||
- vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC)) {
- kvm_vcpu_enable_ptrauth(vcpu);
- } else {
- vcpu_clear_flag(&hyp_vcpu->vcpu, GUEST_HAS_PTRAUTH);
- }
-}
-
static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
{
if (host_vcpu)
@@ -408,6 +372,7 @@ static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
hyp_vm->kvm.created_vcpus = nr_vcpus;
hyp_vm->kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr;
hyp_vm->kvm.arch.pkvm.enabled = READ_ONCE(host_kvm->arch.pkvm.enabled);
+ hyp_vm->kvm.arch.flags = 0;
pkvm_init_features_from_host(hyp_vm, host_kvm);
}
@@ -415,10 +380,8 @@ static void pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *
{
struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
- if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) {
- vcpu_clear_flag(vcpu, GUEST_HAS_SVE);
+ if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE))
vcpu_clear_flag(vcpu, VCPU_SVE_FINALIZED);
- }
}
static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
@@ -446,9 +409,14 @@ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
hyp_vcpu->vcpu.arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
+ if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
+ kvm_init_pvm_id_regs(&hyp_vcpu->vcpu);
+
+ ret = pkvm_vcpu_init_traps(hyp_vcpu);
+ if (ret)
+ goto done;
+
pkvm_vcpu_init_sve(hyp_vcpu, host_vcpu);
- pkvm_vcpu_init_ptrauth(hyp_vcpu);
- pkvm_vcpu_init_traps(&hyp_vcpu->vcpu);
done:
if (ret)
unpin_host_vcpu(host_vcpu);
@@ -693,8 +661,6 @@ unlock:
return ret;
}
- hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu);
-
return 0;
}
@@ -746,6 +712,14 @@ int __pkvm_teardown_vm(pkvm_handle_t handle)
/* Push the metadata pages to the teardown memcache */
for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
+ struct kvm_hyp_memcache *vcpu_mc = &hyp_vcpu->vcpu.arch.pkvm_memcache;
+
+ while (vcpu_mc->nr_pages) {
+ void *addr = pop_hyp_memcache(vcpu_mc, hyp_phys_to_virt);
+
+ push_hyp_memcache(mc, addr, hyp_virt_to_phys);
+ unmap_donated_memory_noclear(addr, PAGE_SIZE);
+ }
teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
}
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index cbdd18cd3f98..d62bcb5634a2 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -12,7 +12,6 @@
#include <nvhe/early_alloc.h>
#include <nvhe/ffa.h>
-#include <nvhe/fixed_config.h>
#include <nvhe/gfp.h>
#include <nvhe/memory.h>
#include <nvhe/mem_protect.h>
@@ -180,7 +179,6 @@ static void hpool_put_page(void *addr)
static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
enum kvm_pgtable_walk_flags visit)
{
- enum kvm_pgtable_prot prot;
enum pkvm_page_state state;
phys_addr_t phys;
@@ -203,16 +201,16 @@ static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
case PKVM_PAGE_OWNED:
return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
case PKVM_PAGE_SHARED_OWNED:
- prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_BORROWED);
+ hyp_phys_to_page(phys)->host_state = PKVM_PAGE_SHARED_BORROWED;
break;
case PKVM_PAGE_SHARED_BORROWED:
- prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_OWNED);
+ hyp_phys_to_page(phys)->host_state = PKVM_PAGE_SHARED_OWNED;
break;
default:
return -EINVAL;
}
- return host_stage2_idmap_locked(phys, PAGE_SIZE, prot);
+ return 0;
}
static int fix_hyp_pgtable_refcnt_walker(const struct kvm_pgtable_visit_ctx *ctx,
diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c
index ed6b58b19cfa..5b6eeab1a774 100644
--- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c
+++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c
@@ -28,7 +28,7 @@ static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc)
struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr(&kvm_stacktrace_info);
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
- stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - PAGE_SIZE);
+ stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - NVHE_STACK_SIZE);
stacktrace_info->overflow_stack_base = (unsigned long)this_cpu_ptr(overflow_stack);
stacktrace_info->fp = fp;
stacktrace_info->pc = pc;
@@ -54,7 +54,7 @@ static struct stack_info stackinfo_get_hyp(void)
{
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
unsigned long high = params->stack_hyp_va;
- unsigned long low = high - PAGE_SIZE;
+ unsigned long low = high - NVHE_STACK_SIZE;
return (struct stack_info) {
.low = low,
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 0f6b01b3da5c..6c846d033d24 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -26,7 +26,6 @@
#include <asm/debug-monitors.h>
#include <asm/processor.h>
-#include <nvhe/fixed_config.h>
#include <nvhe/mem_protect.h>
/* Non-VHE specific context */
@@ -36,33 +35,46 @@ DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
-static void __activate_traps(struct kvm_vcpu *vcpu)
+static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
{
- u64 val;
+ u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
- ___activate_traps(vcpu, vcpu->arch.hcr_el2);
- __activate_traps_common(vcpu);
+ if (has_hvhe()) {
+ val |= CPACR_EL1_TTA;
- val = vcpu->arch.cptr_el2;
- val |= CPTR_EL2_TAM; /* Same bit irrespective of E2H */
- val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
- if (cpus_have_final_cap(ARM64_SME)) {
- if (has_hvhe())
- val &= ~CPACR_EL1_SMEN;
- else
- val |= CPTR_EL2_TSM;
- }
+ if (guest_owns_fp_regs()) {
+ val |= CPACR_EL1_FPEN;
+ if (vcpu_has_sve(vcpu))
+ val |= CPACR_EL1_ZEN;
+ }
+ } else {
+ val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
- if (!guest_owns_fp_regs()) {
- if (has_hvhe())
- val &= ~(CPACR_EL1_FPEN | CPACR_EL1_ZEN);
- else
- val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
+ /*
+ * Always trap SME since it's not supported in KVM.
+ * TSM is RES1 if SME isn't implemented.
+ */
+ val |= CPTR_EL2_TSM;
- __activate_traps_fpsimd32(vcpu);
+ if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
+ val |= CPTR_EL2_TZ;
+
+ if (!guest_owns_fp_regs())
+ val |= CPTR_EL2_TFP;
}
+ if (!guest_owns_fp_regs())
+ __activate_traps_fpsimd32(vcpu);
+
kvm_write_cptr_el2(val);
+}
+
+static void __activate_traps(struct kvm_vcpu *vcpu)
+{
+ ___activate_traps(vcpu, vcpu->arch.hcr_el2);
+ __activate_traps_common(vcpu);
+ __activate_cptr_traps(vcpu);
+
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
index 2860548d4250..1ddd9ed3cbb3 100644
--- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c
+++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
@@ -11,7 +11,7 @@
#include <hyp/adjust_pc.h>
-#include <nvhe/fixed_config.h>
+#include <nvhe/pkvm.h>
#include "../../sys_regs.h"
@@ -28,222 +28,255 @@ u64 id_aa64mmfr1_el1_sys_val;
u64 id_aa64mmfr2_el1_sys_val;
u64 id_aa64smfr0_el1_sys_val;
-/*
- * Inject an unknown/undefined exception to an AArch64 guest while most of its
- * sysregs are live.
- */
-static void inject_undef64(struct kvm_vcpu *vcpu)
-{
- u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
-
- *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
- *vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
-
- kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
-
- __kvm_adjust_pc(vcpu);
-
- write_sysreg_el1(esr, SYS_ESR);
- write_sysreg_el1(read_sysreg_el2(SYS_ELR), SYS_ELR);
- write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
- write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
-}
-
-/*
- * Returns the restricted features values of the feature register based on the
- * limitations in restrict_fields.
- * A feature id field value of 0b0000 does not impose any restrictions.
- * Note: Use only for unsigned feature field values.
- */
-static u64 get_restricted_features_unsigned(u64 sys_reg_val,
- u64 restrict_fields)
-{
- u64 value = 0UL;
- u64 mask = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
+struct pvm_ftr_bits {
+ bool sign;
+ u8 shift;
+ u8 width;
+ u8 max_val;
+ bool (*vm_supported)(const struct kvm *kvm);
+};
- /*
- * According to the Arm Architecture Reference Manual, feature fields
- * use increasing values to indicate increases in functionality.
- * Iterate over the restricted feature fields and calculate the minimum
- * unsigned value between the one supported by the system, and what the
- * value is being restricted to.
- */
- while (sys_reg_val && restrict_fields) {
- value |= min(sys_reg_val & mask, restrict_fields & mask);
- sys_reg_val &= ~mask;
- restrict_fields &= ~mask;
- mask <<= ARM64_FEATURE_FIELD_BITS;
+#define __MAX_FEAT_FUNC(id, fld, max, func, sgn) \
+ { \
+ .sign = sgn, \
+ .shift = id##_##fld##_SHIFT, \
+ .width = id##_##fld##_WIDTH, \
+ .max_val = id##_##fld##_##max, \
+ .vm_supported = func, \
}
- return value;
-}
-
-/*
- * Functions that return the value of feature id registers for protected VMs
- * based on allowed features, system features, and KVM support.
- */
+#define MAX_FEAT_FUNC(id, fld, max, func) \
+ __MAX_FEAT_FUNC(id, fld, max, func, id##_##fld##_SIGNED)
-static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
-{
- u64 set_mask = 0;
- u64 allow_mask = PVM_ID_AA64PFR0_ALLOW;
+#define MAX_FEAT(id, fld, max) \
+ MAX_FEAT_FUNC(id, fld, max, NULL)
- set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val,
- PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
+#define MAX_FEAT_ENUM(id, fld, max) \
+ __MAX_FEAT_FUNC(id, fld, max, NULL, false)
- return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
-}
+#define FEAT_END { .width = 0, }
-static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu)
+static bool vm_has_ptrauth(const struct kvm *kvm)
{
- const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
- u64 allow_mask = PVM_ID_AA64PFR1_ALLOW;
-
- if (!kvm_has_mte(kvm))
- allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
-
- return id_aa64pfr1_el1_sys_val & allow_mask;
-}
-
-static u64 get_pvm_id_aa64zfr0(const struct kvm_vcpu *vcpu)
-{
- /*
- * No support for Scalable Vectors, therefore, hyp has no sanitized
- * copy of the feature id register.
- */
- BUILD_BUG_ON(PVM_ID_AA64ZFR0_ALLOW != 0ULL);
- return 0;
-}
-
-static u64 get_pvm_id_aa64dfr0(const struct kvm_vcpu *vcpu)
-{
- /*
- * No support for debug, including breakpoints, and watchpoints,
- * therefore, pKVM has no sanitized copy of the feature id register.
- */
- BUILD_BUG_ON(PVM_ID_AA64DFR0_ALLOW != 0ULL);
- return 0;
-}
-
-static u64 get_pvm_id_aa64dfr1(const struct kvm_vcpu *vcpu)
-{
- /*
- * No support for debug, therefore, hyp has no sanitized copy of the
- * feature id register.
- */
- BUILD_BUG_ON(PVM_ID_AA64DFR1_ALLOW != 0ULL);
- return 0;
-}
+ if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH))
+ return false;
-static u64 get_pvm_id_aa64afr0(const struct kvm_vcpu *vcpu)
-{
- /*
- * No support for implementation defined features, therefore, hyp has no
- * sanitized copy of the feature id register.
- */
- BUILD_BUG_ON(PVM_ID_AA64AFR0_ALLOW != 0ULL);
- return 0;
+ return (cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) ||
+ cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) &&
+ kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_GENERIC);
}
-static u64 get_pvm_id_aa64afr1(const struct kvm_vcpu *vcpu)
+static bool vm_has_sve(const struct kvm *kvm)
{
- /*
- * No support for implementation defined features, therefore, hyp has no
- * sanitized copy of the feature id register.
- */
- BUILD_BUG_ON(PVM_ID_AA64AFR1_ALLOW != 0ULL);
- return 0;
+ return system_supports_sve() && kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_SVE);
}
-static u64 get_pvm_id_aa64isar0(const struct kvm_vcpu *vcpu)
-{
- return id_aa64isar0_el1_sys_val & PVM_ID_AA64ISAR0_ALLOW;
-}
+/*
+ * Definitions for features to be allowed or restricted for protected guests.
+ *
+ * Each field in the masks represents the highest supported value for the
+ * feature. If a feature field is not present, it is not supported. Moreover,
+ * these are used to generate the guest's view of the feature registers.
+ *
+ * The approach for protected VMs is to at least support features that are:
+ * - Needed by common Linux distributions (e.g., floating point)
+ * - Trivial to support, e.g., supporting the feature does not introduce or
+ * require tracking of additional state in KVM
+ * - Cannot be trapped or prevent the guest from using anyway
+ */
-static u64 get_pvm_id_aa64isar1(const struct kvm_vcpu *vcpu)
-{
- u64 allow_mask = PVM_ID_AA64ISAR1_ALLOW;
+static const struct pvm_ftr_bits pvmid_aa64pfr0[] = {
+ MAX_FEAT(ID_AA64PFR0_EL1, EL0, IMP),
+ MAX_FEAT(ID_AA64PFR0_EL1, EL1, IMP),
+ MAX_FEAT(ID_AA64PFR0_EL1, EL2, IMP),
+ MAX_FEAT(ID_AA64PFR0_EL1, EL3, IMP),
+ MAX_FEAT(ID_AA64PFR0_EL1, FP, FP16),
+ MAX_FEAT(ID_AA64PFR0_EL1, AdvSIMD, FP16),
+ MAX_FEAT(ID_AA64PFR0_EL1, GIC, IMP),
+ MAX_FEAT_FUNC(ID_AA64PFR0_EL1, SVE, IMP, vm_has_sve),
+ MAX_FEAT(ID_AA64PFR0_EL1, RAS, IMP),
+ MAX_FEAT(ID_AA64PFR0_EL1, DIT, IMP),
+ MAX_FEAT(ID_AA64PFR0_EL1, CSV2, IMP),
+ MAX_FEAT(ID_AA64PFR0_EL1, CSV3, IMP),
+ FEAT_END
+};
- if (!vcpu_has_ptrauth(vcpu))
- allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
- ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
- ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
- ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
+static const struct pvm_ftr_bits pvmid_aa64pfr1[] = {
+ MAX_FEAT(ID_AA64PFR1_EL1, BT, IMP),
+ MAX_FEAT(ID_AA64PFR1_EL1, SSBS, SSBS2),
+ MAX_FEAT_ENUM(ID_AA64PFR1_EL1, MTE_frac, NI),
+ FEAT_END
+};
- return id_aa64isar1_el1_sys_val & allow_mask;
-}
+static const struct pvm_ftr_bits pvmid_aa64mmfr0[] = {
+ MAX_FEAT_ENUM(ID_AA64MMFR0_EL1, PARANGE, 40),
+ MAX_FEAT_ENUM(ID_AA64MMFR0_EL1, ASIDBITS, 16),
+ MAX_FEAT(ID_AA64MMFR0_EL1, BIGEND, IMP),
+ MAX_FEAT(ID_AA64MMFR0_EL1, SNSMEM, IMP),
+ MAX_FEAT(ID_AA64MMFR0_EL1, BIGENDEL0, IMP),
+ MAX_FEAT(ID_AA64MMFR0_EL1, EXS, IMP),
+ FEAT_END
+};
-static u64 get_pvm_id_aa64isar2(const struct kvm_vcpu *vcpu)
-{
- u64 allow_mask = PVM_ID_AA64ISAR2_ALLOW;
+static const struct pvm_ftr_bits pvmid_aa64mmfr1[] = {
+ MAX_FEAT(ID_AA64MMFR1_EL1, HAFDBS, DBM),
+ MAX_FEAT_ENUM(ID_AA64MMFR1_EL1, VMIDBits, 16),
+ MAX_FEAT(ID_AA64MMFR1_EL1, HPDS, HPDS2),
+ MAX_FEAT(ID_AA64MMFR1_EL1, PAN, PAN3),
+ MAX_FEAT(ID_AA64MMFR1_EL1, SpecSEI, IMP),
+ MAX_FEAT(ID_AA64MMFR1_EL1, ETS, IMP),
+ MAX_FEAT(ID_AA64MMFR1_EL1, CMOW, IMP),
+ FEAT_END
+};
- if (!vcpu_has_ptrauth(vcpu))
- allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
- ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
+static const struct pvm_ftr_bits pvmid_aa64mmfr2[] = {
+ MAX_FEAT(ID_AA64MMFR2_EL1, CnP, IMP),
+ MAX_FEAT(ID_AA64MMFR2_EL1, UAO, IMP),
+ MAX_FEAT(ID_AA64MMFR2_EL1, IESB, IMP),
+ MAX_FEAT(ID_AA64MMFR2_EL1, AT, IMP),
+ MAX_FEAT_ENUM(ID_AA64MMFR2_EL1, IDS, 0x18),
+ MAX_FEAT(ID_AA64MMFR2_EL1, TTL, IMP),
+ MAX_FEAT(ID_AA64MMFR2_EL1, BBM, 2),
+ MAX_FEAT(ID_AA64MMFR2_EL1, E0PD, IMP),
+ FEAT_END
+};
- return id_aa64isar2_el1_sys_val & allow_mask;
-}
+static const struct pvm_ftr_bits pvmid_aa64isar1[] = {
+ MAX_FEAT(ID_AA64ISAR1_EL1, DPB, DPB2),
+ MAX_FEAT_FUNC(ID_AA64ISAR1_EL1, APA, PAuth, vm_has_ptrauth),
+ MAX_FEAT_FUNC(ID_AA64ISAR1_EL1, API, PAuth, vm_has_ptrauth),
+ MAX_FEAT(ID_AA64ISAR1_EL1, JSCVT, IMP),
+ MAX_FEAT(ID_AA64ISAR1_EL1, FCMA, IMP),
+ MAX_FEAT(ID_AA64ISAR1_EL1, LRCPC, LRCPC3),
+ MAX_FEAT(ID_AA64ISAR1_EL1, GPA, IMP),
+ MAX_FEAT(ID_AA64ISAR1_EL1, GPI, IMP),
+ MAX_FEAT(ID_AA64ISAR1_EL1, FRINTTS, IMP),
+ MAX_FEAT(ID_AA64ISAR1_EL1, SB, IMP),
+ MAX_FEAT(ID_AA64ISAR1_EL1, SPECRES, COSP_RCTX),
+ MAX_FEAT(ID_AA64ISAR1_EL1, BF16, EBF16),
+ MAX_FEAT(ID_AA64ISAR1_EL1, DGH, IMP),
+ MAX_FEAT(ID_AA64ISAR1_EL1, I8MM, IMP),
+ FEAT_END
+};
-static u64 get_pvm_id_aa64mmfr0(const struct kvm_vcpu *vcpu)
-{
- u64 set_mask;
+static const struct pvm_ftr_bits pvmid_aa64isar2[] = {
+ MAX_FEAT_FUNC(ID_AA64ISAR2_EL1, GPA3, IMP, vm_has_ptrauth),
+ MAX_FEAT_FUNC(ID_AA64ISAR2_EL1, APA3, PAuth, vm_has_ptrauth),
+ MAX_FEAT(ID_AA64ISAR2_EL1, ATS1A, IMP),
+ FEAT_END
+};
- set_mask = get_restricted_features_unsigned(id_aa64mmfr0_el1_sys_val,
- PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED);
+/*
+ * None of the features in ID_AA64DFR0_EL1 nor ID_AA64MMFR4_EL1 are supported.
+ * However, both have Not-Implemented values that are non-zero. Define them
+ * so they can be used when getting the value of these registers.
+ */
+#define ID_AA64DFR0_EL1_NONZERO_NI \
+( \
+ SYS_FIELD_PREP_ENUM(ID_AA64DFR0_EL1, DoubleLock, NI) | \
+ SYS_FIELD_PREP_ENUM(ID_AA64DFR0_EL1, MTPMU, NI) \
+)
- return (id_aa64mmfr0_el1_sys_val & PVM_ID_AA64MMFR0_ALLOW) | set_mask;
-}
+#define ID_AA64MMFR4_EL1_NONZERO_NI \
+ SYS_FIELD_PREP_ENUM(ID_AA64MMFR4_EL1, E2H0, NI)
-static u64 get_pvm_id_aa64mmfr1(const struct kvm_vcpu *vcpu)
+/*
+ * Returns the value of the feature registers based on the system register
+ * value, the vcpu support for the revelant features, and the additional
+ * restrictions for protected VMs.
+ */
+static u64 get_restricted_features(const struct kvm_vcpu *vcpu,
+ u64 sys_reg_val,
+ const struct pvm_ftr_bits restrictions[])
{
- return id_aa64mmfr1_el1_sys_val & PVM_ID_AA64MMFR1_ALLOW;
-}
+ u64 val = 0UL;
+ int i;
+
+ for (i = 0; restrictions[i].width != 0; i++) {
+ bool (*vm_supported)(const struct kvm *) = restrictions[i].vm_supported;
+ bool sign = restrictions[i].sign;
+ int shift = restrictions[i].shift;
+ int width = restrictions[i].width;
+ u64 min_signed = (1UL << width) - 1UL;
+ u64 sign_bit = 1UL << (width - 1);
+ u64 mask = GENMASK_ULL(width + shift - 1, shift);
+ u64 sys_val = (sys_reg_val & mask) >> shift;
+ u64 pvm_max = restrictions[i].max_val;
+
+ if (vm_supported && !vm_supported(vcpu->kvm))
+ val |= (sign ? min_signed : 0) << shift;
+ else if (sign && (sys_val >= sign_bit || pvm_max >= sign_bit))
+ val |= max(sys_val, pvm_max) << shift;
+ else
+ val |= min(sys_val, pvm_max) << shift;
+ }
-static u64 get_pvm_id_aa64mmfr2(const struct kvm_vcpu *vcpu)
-{
- return id_aa64mmfr2_el1_sys_val & PVM_ID_AA64MMFR2_ALLOW;
+ return val;
}
-/* Read a sanitized cpufeature ID register by its encoding */
-u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
+static u64 pvm_calc_id_reg(const struct kvm_vcpu *vcpu, u32 id)
{
switch (id) {
case SYS_ID_AA64PFR0_EL1:
- return get_pvm_id_aa64pfr0(vcpu);
+ return get_restricted_features(vcpu, id_aa64pfr0_el1_sys_val, pvmid_aa64pfr0);
case SYS_ID_AA64PFR1_EL1:
- return get_pvm_id_aa64pfr1(vcpu);
- case SYS_ID_AA64ZFR0_EL1:
- return get_pvm_id_aa64zfr0(vcpu);
- case SYS_ID_AA64DFR0_EL1:
- return get_pvm_id_aa64dfr0(vcpu);
- case SYS_ID_AA64DFR1_EL1:
- return get_pvm_id_aa64dfr1(vcpu);
- case SYS_ID_AA64AFR0_EL1:
- return get_pvm_id_aa64afr0(vcpu);
- case SYS_ID_AA64AFR1_EL1:
- return get_pvm_id_aa64afr1(vcpu);
+ return get_restricted_features(vcpu, id_aa64pfr1_el1_sys_val, pvmid_aa64pfr1);
case SYS_ID_AA64ISAR0_EL1:
- return get_pvm_id_aa64isar0(vcpu);
+ return id_aa64isar0_el1_sys_val;
case SYS_ID_AA64ISAR1_EL1:
- return get_pvm_id_aa64isar1(vcpu);
+ return get_restricted_features(vcpu, id_aa64isar1_el1_sys_val, pvmid_aa64isar1);
case SYS_ID_AA64ISAR2_EL1:
- return get_pvm_id_aa64isar2(vcpu);
+ return get_restricted_features(vcpu, id_aa64isar2_el1_sys_val, pvmid_aa64isar2);
case SYS_ID_AA64MMFR0_EL1:
- return get_pvm_id_aa64mmfr0(vcpu);
+ return get_restricted_features(vcpu, id_aa64mmfr0_el1_sys_val, pvmid_aa64mmfr0);
case SYS_ID_AA64MMFR1_EL1:
- return get_pvm_id_aa64mmfr1(vcpu);
+ return get_restricted_features(vcpu, id_aa64mmfr1_el1_sys_val, pvmid_aa64mmfr1);
case SYS_ID_AA64MMFR2_EL1:
- return get_pvm_id_aa64mmfr2(vcpu);
+ return get_restricted_features(vcpu, id_aa64mmfr2_el1_sys_val, pvmid_aa64mmfr2);
+ case SYS_ID_AA64DFR0_EL1:
+ return ID_AA64DFR0_EL1_NONZERO_NI;
+ case SYS_ID_AA64MMFR4_EL1:
+ return ID_AA64MMFR4_EL1_NONZERO_NI;
default:
/* Unhandled ID register, RAZ */
return 0;
}
}
+/*
+ * Inject an unknown/undefined exception to an AArch64 guest while most of its
+ * sysregs are live.
+ */
+static void inject_undef64(struct kvm_vcpu *vcpu)
+{
+ u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
+
+ *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
+ *vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
+
+ kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
+
+ __kvm_adjust_pc(vcpu);
+
+ write_sysreg_el1(esr, SYS_ESR);
+ write_sysreg_el1(read_sysreg_el2(SYS_ELR), SYS_ELR);
+ write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
+ write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
+}
+
static u64 read_id_reg(const struct kvm_vcpu *vcpu,
struct sys_reg_desc const *r)
{
- return pvm_read_id_reg(vcpu, reg_to_encoding(r));
+ struct kvm *kvm = vcpu->kvm;
+ u32 reg = reg_to_encoding(r);
+
+ if (WARN_ON_ONCE(!test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags)))
+ return 0;
+
+ if (reg >= sys_reg(3, 0, 0, 1, 0) && reg <= sys_reg(3, 0, 0, 7, 7))
+ return kvm->arch.id_regs[IDREG_IDX(reg)];
+
+ return 0;
}
/* Handler to RAZ/WI sysregs */
@@ -271,13 +304,6 @@ static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu,
return false;
}
- /*
- * No support for AArch32 guests, therefore, pKVM has no sanitized copy
- * of AArch32 feature id registers.
- */
- BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
- PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_EL1_EL1_IMP);
-
return pvm_access_raz_wi(vcpu, p, r);
}
@@ -449,6 +475,30 @@ static const struct sys_reg_desc pvm_sys_reg_descs[] = {
};
/*
+ * Initializes feature registers for protected vms.
+ */
+void kvm_init_pvm_id_regs(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_arch *ka = &kvm->arch;
+ u32 r;
+
+ hyp_assert_lock_held(&vm_table_lock);
+
+ if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
+ return;
+
+ /*
+ * Initialize only AArch64 id registers since AArch32 isn't supported
+ * for protected VMs.
+ */
+ for (r = sys_reg(3, 0, 0, 4, 0); r <= sys_reg(3, 0, 0, 7, 7); r += sys_reg(0, 0, 0, 0, 1))
+ ka->id_regs[IDREG_IDX(r)] = pvm_calc_id_reg(vcpu, r);
+
+ set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
+}
+
+/*
* Checks that the sysreg table is unique and in-order.
*
* Returns 0 if the table is consistent, or 1 otherwise.
diff --git a/arch/arm64/kvm/hyp/nvhe/timer-sr.c b/arch/arm64/kvm/hyp/nvhe/timer-sr.c
index 3aaab20ae5b4..ff176f4ce7de 100644
--- a/arch/arm64/kvm/hyp/nvhe/timer-sr.c
+++ b/arch/arm64/kvm/hyp/nvhe/timer-sr.c
@@ -22,15 +22,16 @@ void __kvm_timer_set_cntvoff(u64 cntvoff)
*/
void __timer_disable_traps(struct kvm_vcpu *vcpu)
{
- u64 val, shift = 0;
+ u64 set, clr, shift = 0;
if (has_hvhe())
shift = 10;
/* Allow physical timer/counter access for the host */
- val = read_sysreg(cnthctl_el2);
- val |= (CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN) << shift;
- write_sysreg(val, cnthctl_el2);
+ set = (CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN) << shift;
+ clr = CNTHCTL_EL1TVT | CNTHCTL_EL1TVCT;
+
+ sysreg_clear_set(cnthctl_el2, clr, set);
}
/*
@@ -58,5 +59,12 @@ void __timer_enable_traps(struct kvm_vcpu *vcpu)
set <<= 10;
}
+ /*
+ * Trap the virtual counter/timer if we have a broken cntvoff
+ * implementation.
+ */
+ if (has_broken_cntvoff())
+ set |= CNTHCTL_EL1TVT | CNTHCTL_EL1TVCT;
+
sysreg_clear_set(cnthctl_el2, clr, set);
}
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index d2b6fa051d6b..df5cc74a7dd0 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -1232,14 +1232,13 @@ int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
NULL, NULL, 0);
}
-void kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
+void kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr,
+ enum kvm_pgtable_walk_flags flags)
{
int ret;
ret = stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0,
- NULL, NULL,
- KVM_PGTABLE_WALK_HANDLE_FAULT |
- KVM_PGTABLE_WALK_SHARED);
+ NULL, NULL, flags);
if (!ret)
dsb(ishst);
}
@@ -1295,7 +1294,7 @@ bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
}
int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
- enum kvm_pgtable_prot prot)
+ enum kvm_pgtable_prot prot, enum kvm_pgtable_walk_flags flags)
{
int ret;
s8 level;
@@ -1313,9 +1312,7 @@ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
if (prot & KVM_PGTABLE_PROT_X)
clr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
- ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level,
- KVM_PGTABLE_WALK_HANDLE_FAULT |
- KVM_PGTABLE_WALK_SHARED);
+ ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level, flags);
if (!ret || ret == -EAGAIN)
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa_nsh, pgt->mmu, addr, level);
return ret;
diff --git a/arch/arm64/kvm/hyp/vhe/debug-sr.c b/arch/arm64/kvm/hyp/vhe/debug-sr.c
index 289689b2682d..0100339b09e0 100644
--- a/arch/arm64/kvm/hyp/vhe/debug-sr.c
+++ b/arch/arm64/kvm/hyp/vhe/debug-sr.c
@@ -19,8 +19,3 @@ void __debug_switch_to_host(struct kvm_vcpu *vcpu)
{
__debug_switch_to_host_common(vcpu);
}
-
-u64 __kvm_get_mdcr_el2(void)
-{
- return read_sysreg(mdcr_el2);
-}
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 59d992455793..b5b9dbaf1fdd 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -256,6 +256,110 @@ void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu)
host_data_ptr(host_ctxt)->__hyp_running_vcpu = NULL;
}
+static u64 compute_emulated_cntx_ctl_el0(struct kvm_vcpu *vcpu,
+ enum vcpu_sysreg reg)
+{
+ unsigned long ctl;
+ u64 cval, cnt;
+ bool stat;
+
+ switch (reg) {
+ case CNTP_CTL_EL0:
+ cval = __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
+ ctl = __vcpu_sys_reg(vcpu, CNTP_CTL_EL0);
+ cnt = compute_counter_value(vcpu_ptimer(vcpu));
+ break;
+ case CNTV_CTL_EL0:
+ cval = __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0);
+ ctl = __vcpu_sys_reg(vcpu, CNTV_CTL_EL0);
+ cnt = compute_counter_value(vcpu_vtimer(vcpu));
+ break;
+ default:
+ BUG();
+ }
+
+ stat = cval <= cnt;
+ __assign_bit(__ffs(ARCH_TIMER_CTRL_IT_STAT), &ctl, stat);
+
+ return ctl;
+}
+
+static bool kvm_hyp_handle_timer(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+ u64 esr, val;
+
+ /*
+ * Having FEAT_ECV allows for a better quality of timer emulation.
+ * However, this comes at a huge cost in terms of traps. Try and
+ * satisfy the reads from guest's hypervisor context without
+ * returning to the kernel if we can.
+ */
+ if (!is_hyp_ctxt(vcpu))
+ return false;
+
+ esr = kvm_vcpu_get_esr(vcpu);
+ if ((esr & ESR_ELx_SYS64_ISS_DIR_MASK) != ESR_ELx_SYS64_ISS_DIR_READ)
+ return false;
+
+ switch (esr_sys64_to_sysreg(esr)) {
+ case SYS_CNTP_CTL_EL02:
+ val = compute_emulated_cntx_ctl_el0(vcpu, CNTP_CTL_EL0);
+ break;
+ case SYS_CNTP_CTL_EL0:
+ if (vcpu_el2_e2h_is_set(vcpu))
+ val = read_sysreg_el0(SYS_CNTP_CTL);
+ else
+ val = compute_emulated_cntx_ctl_el0(vcpu, CNTP_CTL_EL0);
+ break;
+ case SYS_CNTP_CVAL_EL02:
+ val = __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
+ break;
+ case SYS_CNTP_CVAL_EL0:
+ if (vcpu_el2_e2h_is_set(vcpu)) {
+ val = read_sysreg_el0(SYS_CNTP_CVAL);
+
+ if (!has_cntpoff())
+ val -= timer_get_offset(vcpu_hptimer(vcpu));
+ } else {
+ val = __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
+ }
+ break;
+ case SYS_CNTPCT_EL0:
+ case SYS_CNTPCTSS_EL0:
+ val = compute_counter_value(vcpu_hptimer(vcpu));
+ break;
+ case SYS_CNTV_CTL_EL02:
+ val = compute_emulated_cntx_ctl_el0(vcpu, CNTV_CTL_EL0);
+ break;
+ case SYS_CNTV_CTL_EL0:
+ if (vcpu_el2_e2h_is_set(vcpu))
+ val = read_sysreg_el0(SYS_CNTV_CTL);
+ else
+ val = compute_emulated_cntx_ctl_el0(vcpu, CNTV_CTL_EL0);
+ break;
+ case SYS_CNTV_CVAL_EL02:
+ val = __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0);
+ break;
+ case SYS_CNTV_CVAL_EL0:
+ if (vcpu_el2_e2h_is_set(vcpu))
+ val = read_sysreg_el0(SYS_CNTV_CVAL);
+ else
+ val = __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0);
+ break;
+ case SYS_CNTVCT_EL0:
+ case SYS_CNTVCTSS_EL0:
+ val = compute_counter_value(vcpu_hvtimer(vcpu));
+ break;
+ default:
+ return false;
+ }
+
+ vcpu_set_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu), val);
+ __kvm_skip_instr(vcpu);
+
+ return true;
+}
+
static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
{
u64 esr = kvm_vcpu_get_esr(vcpu);
@@ -409,6 +513,9 @@ static bool kvm_hyp_handle_sysreg_vhe(struct kvm_vcpu *vcpu, u64 *exit_code)
if (kvm_hyp_handle_tlbi_el2(vcpu, exit_code))
return true;
+ if (kvm_hyp_handle_timer(vcpu, exit_code))
+ return true;
+
if (kvm_hyp_handle_cpacr_el1(vcpu, exit_code))
return true;
diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
index 5f78a39053a7..90b018e06f2c 100644
--- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
@@ -216,7 +216,7 @@ void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu)
__sysreg32_restore_state(vcpu);
__sysreg_restore_user_state(guest_ctxt);
- if (unlikely(__is_hyp_ctxt(guest_ctxt))) {
+ if (unlikely(is_hyp_ctxt(vcpu))) {
__sysreg_restore_vel2_state(vcpu);
} else {
if (vcpu_has_nv(vcpu)) {
@@ -260,7 +260,7 @@ void __vcpu_put_switch_sysregs(struct kvm_vcpu *vcpu)
host_ctxt = host_data_ptr(host_ctxt);
- if (unlikely(__is_hyp_ctxt(guest_ctxt)))
+ if (unlikely(is_hyp_ctxt(vcpu)))
__sysreg_save_vel2_state(vcpu);
else
__sysreg_save_el1_state(guest_ctxt);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index c9d46ad57e52..1f55b0c7b11d 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -15,6 +15,7 @@
#include <asm/kvm_arm.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_pgtable.h>
+#include <asm/kvm_pkvm.h>
#include <asm/kvm_ras.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
@@ -29,8 +30,12 @@ static unsigned long __ro_after_init hyp_idmap_start;
static unsigned long __ro_after_init hyp_idmap_end;
static phys_addr_t __ro_after_init hyp_idmap_vector;
+u32 __ro_after_init __hyp_va_bits;
+
static unsigned long __ro_after_init io_map_base;
+#define KVM_PGT_FN(fn) (!is_protected_kvm_enabled() ? fn : p ## fn)
+
static phys_addr_t __stage2_range_addr_end(phys_addr_t addr, phys_addr_t end,
phys_addr_t size)
{
@@ -147,7 +152,7 @@ static int kvm_mmu_split_huge_pages(struct kvm *kvm, phys_addr_t addr,
return -EINVAL;
next = __stage2_range_addr_end(addr, end, chunk_size);
- ret = kvm_pgtable_stage2_split(pgt, addr, next - addr, cache);
+ ret = KVM_PGT_FN(kvm_pgtable_stage2_split)(pgt, addr, next - addr, cache);
if (ret)
break;
} while (addr = next, addr != end);
@@ -168,15 +173,23 @@ static bool memslot_is_logging(struct kvm_memory_slot *memslot)
*/
int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
{
- kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu);
+ if (is_protected_kvm_enabled())
+ kvm_call_hyp_nvhe(__pkvm_tlb_flush_vmid, kvm->arch.pkvm.handle);
+ else
+ kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu);
return 0;
}
int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm,
gfn_t gfn, u64 nr_pages)
{
- kvm_tlb_flush_vmid_range(&kvm->arch.mmu,
- gfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
+ u64 size = nr_pages << PAGE_SHIFT;
+ u64 addr = gfn << PAGE_SHIFT;
+
+ if (is_protected_kvm_enabled())
+ kvm_call_hyp_nvhe(__pkvm_tlb_flush_vmid, kvm->arch.pkvm.handle);
+ else
+ kvm_tlb_flush_vmid_range(&kvm->arch.mmu, addr, size);
return 0;
}
@@ -225,7 +238,7 @@ static void stage2_free_unlinked_table_rcu_cb(struct rcu_head *head)
void *pgtable = page_to_virt(page);
s8 level = page_private(page);
- kvm_pgtable_stage2_free_unlinked(&kvm_s2_mm_ops, pgtable, level);
+ KVM_PGT_FN(kvm_pgtable_stage2_free_unlinked)(&kvm_s2_mm_ops, pgtable, level);
}
static void stage2_free_unlinked_table(void *addr, s8 level)
@@ -324,7 +337,7 @@ static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64
lockdep_assert_held_write(&kvm->mmu_lock);
WARN_ON(size & ~PAGE_MASK);
- WARN_ON(stage2_apply_range(mmu, start, end, kvm_pgtable_stage2_unmap,
+ WARN_ON(stage2_apply_range(mmu, start, end, KVM_PGT_FN(kvm_pgtable_stage2_unmap),
may_block));
}
@@ -336,7 +349,7 @@ void kvm_stage2_unmap_range(struct kvm_s2_mmu *mmu, phys_addr_t start,
void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
{
- stage2_apply_range_resched(mmu, addr, end, kvm_pgtable_stage2_flush);
+ stage2_apply_range_resched(mmu, addr, end, KVM_PGT_FN(kvm_pgtable_stage2_flush));
}
static void stage2_flush_memslot(struct kvm *kvm,
@@ -704,10 +717,10 @@ int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr)
mutex_lock(&kvm_hyp_pgd_mutex);
/*
- * Efficient stack verification using the PAGE_SHIFT bit implies
+ * Efficient stack verification using the NVHE_STACK_SHIFT bit implies
* an alignment of our allocation on the order of the size.
*/
- size = PAGE_SIZE * 2;
+ size = NVHE_STACK_SIZE * 2;
base = ALIGN_DOWN(io_map_base - size, size);
ret = __hyp_alloc_private_va_range(base);
@@ -724,12 +737,12 @@ int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr)
* at the higher address and leave the lower guard page
* unbacked.
*
- * Any valid stack address now has the PAGE_SHIFT bit as 1
+ * Any valid stack address now has the NVHE_STACK_SHIFT bit as 1
* and addresses corresponding to the guard page have the
- * PAGE_SHIFT bit as 0 - this is used for overflow detection.
+ * NVHE_STACK_SHIFT bit as 0 - this is used for overflow detection.
*/
- ret = __create_hyp_mappings(base + PAGE_SIZE, PAGE_SIZE, phys_addr,
- PAGE_HYP);
+ ret = __create_hyp_mappings(base + NVHE_STACK_SIZE, NVHE_STACK_SIZE,
+ phys_addr, PAGE_HYP);
if (ret)
kvm_err("Cannot map hyp stack\n");
@@ -942,10 +955,14 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t
return -ENOMEM;
mmu->arch = &kvm->arch;
- err = kvm_pgtable_stage2_init(pgt, mmu, &kvm_s2_mm_ops);
+ err = KVM_PGT_FN(kvm_pgtable_stage2_init)(pgt, mmu, &kvm_s2_mm_ops);
if (err)
goto out_free_pgtable;
+ mmu->pgt = pgt;
+ if (is_protected_kvm_enabled())
+ return 0;
+
mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran));
if (!mmu->last_vcpu_ran) {
err = -ENOMEM;
@@ -959,7 +976,6 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t
mmu->split_page_chunk_size = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT;
mmu->split_page_cache.gfp_zero = __GFP_ZERO;
- mmu->pgt = pgt;
mmu->pgd_phys = __pa(pgt->pgd);
if (kvm_is_nested_s2_mmu(kvm, mmu))
@@ -968,7 +984,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t
return 0;
out_destroy_pgtable:
- kvm_pgtable_stage2_destroy(pgt);
+ KVM_PGT_FN(kvm_pgtable_stage2_destroy)(pgt);
out_free_pgtable:
kfree(pgt);
return err;
@@ -1065,7 +1081,7 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
write_unlock(&kvm->mmu_lock);
if (pgt) {
- kvm_pgtable_stage2_destroy(pgt);
+ KVM_PGT_FN(kvm_pgtable_stage2_destroy)(pgt);
kfree(pgt);
}
}
@@ -1082,9 +1098,11 @@ static void *hyp_mc_alloc_fn(void *unused)
void free_hyp_memcache(struct kvm_hyp_memcache *mc)
{
- if (is_protected_kvm_enabled())
- __free_hyp_memcache(mc, hyp_mc_free_fn,
- kvm_host_va, NULL);
+ if (!is_protected_kvm_enabled())
+ return;
+
+ kfree(mc->mapping);
+ __free_hyp_memcache(mc, hyp_mc_free_fn, kvm_host_va, NULL);
}
int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages)
@@ -1092,6 +1110,12 @@ int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages)
if (!is_protected_kvm_enabled())
return 0;
+ if (!mc->mapping) {
+ mc->mapping = kzalloc(sizeof(struct pkvm_mapping), GFP_KERNEL_ACCOUNT);
+ if (!mc->mapping)
+ return -ENOMEM;
+ }
+
return __topup_hyp_memcache(mc, min_pages, hyp_mc_alloc_fn,
kvm_host_pa, NULL);
}
@@ -1130,8 +1154,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
break;
write_lock(&kvm->mmu_lock);
- ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot,
- &cache, 0);
+ ret = KVM_PGT_FN(kvm_pgtable_stage2_map)(pgt, addr, PAGE_SIZE,
+ pa, prot, &cache, 0);
write_unlock(&kvm->mmu_lock);
if (ret)
break;
@@ -1151,7 +1175,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
*/
void kvm_stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
{
- stage2_apply_range_resched(mmu, addr, end, kvm_pgtable_stage2_wrprotect);
+ stage2_apply_range_resched(mmu, addr, end, KVM_PGT_FN(kvm_pgtable_stage2_wrprotect));
}
/**
@@ -1442,9 +1466,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
unsigned long mmu_seq;
phys_addr_t ipa = fault_ipa;
struct kvm *kvm = vcpu->kvm;
- struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
struct vm_area_struct *vma;
short vma_shift;
+ void *memcache;
gfn_t gfn;
kvm_pfn_t pfn;
bool logging_active = memslot_is_logging(memslot);
@@ -1452,6 +1476,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
struct kvm_pgtable *pgt;
struct page *page;
+ enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_HANDLE_FAULT | KVM_PGTABLE_WALK_SHARED;
if (fault_is_perm)
fault_granule = kvm_vcpu_trap_get_perm_fault_granule(vcpu);
@@ -1471,8 +1496,15 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* and a write fault needs to collapse a block entry into a table.
*/
if (!fault_is_perm || (logging_active && write_fault)) {
- ret = kvm_mmu_topup_memory_cache(memcache,
- kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu));
+ int min_pages = kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu);
+
+ if (!is_protected_kvm_enabled()) {
+ memcache = &vcpu->arch.mmu_page_cache;
+ ret = kvm_mmu_topup_memory_cache(memcache, min_pages);
+ } else {
+ memcache = &vcpu->arch.pkvm_memcache;
+ ret = topup_hyp_memcache(memcache, min_pages);
+ }
if (ret)
return ret;
}
@@ -1493,7 +1525,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* logging_active is guaranteed to never be true for VM_PFNMAP
* memslots.
*/
- if (logging_active) {
+ if (logging_active || is_protected_kvm_enabled()) {
force_pte = true;
vma_shift = PAGE_SHIFT;
} else {
@@ -1633,7 +1665,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
prot |= kvm_encode_nested_level(nested);
}
- read_lock(&kvm->mmu_lock);
+ kvm_fault_lock(kvm);
pgt = vcpu->arch.hw_mmu->pgt;
if (mmu_invalidate_retry(kvm, mmu_seq)) {
ret = -EAGAIN;
@@ -1695,18 +1727,16 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* PTE, which will be preserved.
*/
prot &= ~KVM_NV_GUEST_MAP_SZ;
- ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
+ ret = KVM_PGT_FN(kvm_pgtable_stage2_relax_perms)(pgt, fault_ipa, prot, flags);
} else {
- ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
+ ret = KVM_PGT_FN(kvm_pgtable_stage2_map)(pgt, fault_ipa, vma_pagesize,
__pfn_to_phys(pfn), prot,
- memcache,
- KVM_PGTABLE_WALK_HANDLE_FAULT |
- KVM_PGTABLE_WALK_SHARED);
+ memcache, flags);
}
out_unlock:
kvm_release_faultin_page(kvm, page, !!ret, writable);
- read_unlock(&kvm->mmu_lock);
+ kvm_fault_unlock(kvm);
/* Mark the page dirty only if the fault is handled successfully */
if (writable && !ret)
@@ -1718,13 +1748,14 @@ out_unlock:
/* Resolve the access fault by making the page young again. */
static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
{
+ enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_HANDLE_FAULT | KVM_PGTABLE_WALK_SHARED;
struct kvm_s2_mmu *mmu;
trace_kvm_access_fault(fault_ipa);
read_lock(&vcpu->kvm->mmu_lock);
mmu = vcpu->arch.hw_mmu;
- kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa);
+ KVM_PGT_FN(kvm_pgtable_stage2_mkyoung)(mmu->pgt, fault_ipa, flags);
read_unlock(&vcpu->kvm->mmu_lock);
}
@@ -1764,7 +1795,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
}
/* Falls between the IPA range and the PARange? */
- if (fault_ipa >= BIT_ULL(vcpu->arch.hw_mmu->pgt->ia_bits)) {
+ if (fault_ipa >= BIT_ULL(VTCR_EL2_IPA(vcpu->arch.hw_mmu->vtcr))) {
fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
if (is_iabt)
@@ -1930,7 +1961,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
if (!kvm->arch.mmu.pgt)
return false;
- return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt,
+ return KVM_PGT_FN(kvm_pgtable_stage2_test_clear_young)(kvm->arch.mmu.pgt,
range->start << PAGE_SHIFT,
size, true);
/*
@@ -1946,7 +1977,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
if (!kvm->arch.mmu.pgt)
return false;
- return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt,
+ return KVM_PGT_FN(kvm_pgtable_stage2_test_clear_young)(kvm->arch.mmu.pgt,
range->start << PAGE_SHIFT,
size, false);
}
@@ -2056,6 +2087,7 @@ int __init kvm_mmu_init(u32 *hyp_va_bits)
goto out_destroy_pgtable;
io_map_base = hyp_idmap_start;
+ __hyp_va_bits = *hyp_va_bits;
return 0;
out_destroy_pgtable:
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 9b36218b48de..0c9387d2f507 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -67,26 +67,27 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
if (!tmp)
return -ENOMEM;
+ swap(kvm->arch.nested_mmus, tmp);
+
/*
* If we went through a realocation, adjust the MMU back-pointers in
* the previously initialised kvm_pgtable structures.
*/
if (kvm->arch.nested_mmus != tmp)
for (int i = 0; i < kvm->arch.nested_mmus_size; i++)
- tmp[i].pgt->mmu = &tmp[i];
+ kvm->arch.nested_mmus[i].pgt->mmu = &kvm->arch.nested_mmus[i];
for (int i = kvm->arch.nested_mmus_size; !ret && i < num_mmus; i++)
- ret = init_nested_s2_mmu(kvm, &tmp[i]);
+ ret = init_nested_s2_mmu(kvm, &kvm->arch.nested_mmus[i]);
if (ret) {
for (int i = kvm->arch.nested_mmus_size; i < num_mmus; i++)
- kvm_free_stage2_pgd(&tmp[i]);
+ kvm_free_stage2_pgd(&kvm->arch.nested_mmus[i]);
return ret;
}
kvm->arch.nested_mmus_size = num_mmus;
- kvm->arch.nested_mmus = tmp;
return 0;
}
@@ -830,8 +831,10 @@ static void limit_nv_id_regs(struct kvm *kvm)
NV_FTR(PFR0, RAS) |
NV_FTR(PFR0, EL3) |
NV_FTR(PFR0, EL2) |
- NV_FTR(PFR0, EL1));
- /* 64bit EL1/EL2/EL3 only */
+ NV_FTR(PFR0, EL1) |
+ NV_FTR(PFR0, EL0));
+ /* 64bit only at any EL */
+ val |= FIELD_PREP(NV_FTR(PFR0, EL0), 0b0001);
val |= FIELD_PREP(NV_FTR(PFR0, EL1), 0b0001);
val |= FIELD_PREP(NV_FTR(PFR0, EL2), 0b0001);
val |= FIELD_PREP(NV_FTR(PFR0, EL3), 0b0001);
@@ -963,14 +966,15 @@ static __always_inline void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0,
kvm->arch.sysreg_masks->mask[i].res1 = res1;
}
-int kvm_init_nv_sysregs(struct kvm *kvm)
+int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
{
+ struct kvm *kvm = vcpu->kvm;
u64 res0, res1;
lockdep_assert_held(&kvm->arch.config_lock);
if (kvm->arch.sysreg_masks)
- return 0;
+ goto out;
kvm->arch.sysreg_masks = kzalloc(sizeof(*(kvm->arch.sysreg_masks)),
GFP_KERNEL_ACCOUNT);
@@ -1021,8 +1025,8 @@ int kvm_init_nv_sysregs(struct kvm *kvm)
res0 |= HCR_NV2;
if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, IMP))
res0 |= (HCR_AT | HCR_NV1 | HCR_NV);
- if (!(__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS) &&
- __vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_GENERIC)))
+ if (!(kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_ADDRESS) &&
+ kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_GENERIC)))
res0 |= (HCR_API | HCR_APK);
if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TME, IMP))
res0 |= BIT(39);
@@ -1078,8 +1082,8 @@ int kvm_init_nv_sysregs(struct kvm *kvm)
/* HFG[RW]TR_EL2 */
res0 = res1 = 0;
- if (!(__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS) &&
- __vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_GENERIC)))
+ if (!(kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_ADDRESS) &&
+ kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_GENERIC)))
res0 |= (HFGxTR_EL2_APDAKey | HFGxTR_EL2_APDBKey |
HFGxTR_EL2_APGAKey | HFGxTR_EL2_APIAKey |
HFGxTR_EL2_APIBKey);
@@ -1271,6 +1275,25 @@ int kvm_init_nv_sysregs(struct kvm *kvm)
res0 |= MDCR_EL2_EnSTEPOP;
set_sysreg_masks(kvm, MDCR_EL2, res0, res1);
+ /* CNTHCTL_EL2 */
+ res0 = GENMASK(63, 20);
+ res1 = 0;
+ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RME, IMP))
+ res0 |= CNTHCTL_CNTPMASK | CNTHCTL_CNTVMASK;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, ECV, CNTPOFF)) {
+ res0 |= CNTHCTL_ECV;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, ECV, IMP))
+ res0 |= (CNTHCTL_EL1TVT | CNTHCTL_EL1TVCT |
+ CNTHCTL_EL1NVPCT | CNTHCTL_EL1NVVCT);
+ }
+ if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, VH, IMP))
+ res0 |= GENMASK(11, 8);
+ set_sysreg_masks(kvm, CNTHCTL_EL2, res0, res1);
+
+out:
+ for (enum vcpu_sysreg sr = __SANITISED_REG_START__; sr < NR_SYS_REGS; sr++)
+ (void)__vcpu_sys_reg(vcpu, sr);
+
return 0;
}
diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c
index 85117ea8f351..930b677eb9b0 100644
--- a/arch/arm64/kvm/pkvm.c
+++ b/arch/arm64/kvm/pkvm.c
@@ -7,6 +7,7 @@
#include <linux/init.h>
#include <linux/kmemleak.h>
#include <linux/kvm_host.h>
+#include <asm/kvm_mmu.h>
#include <linux/memblock.h>
#include <linux/mutex.h>
#include <linux/sort.h>
@@ -268,3 +269,203 @@ static int __init finalize_pkvm(void)
return ret;
}
device_initcall_sync(finalize_pkvm);
+
+static int cmp_mappings(struct rb_node *node, const struct rb_node *parent)
+{
+ struct pkvm_mapping *a = rb_entry(node, struct pkvm_mapping, node);
+ struct pkvm_mapping *b = rb_entry(parent, struct pkvm_mapping, node);
+
+ if (a->gfn < b->gfn)
+ return -1;
+ if (a->gfn > b->gfn)
+ return 1;
+ return 0;
+}
+
+static struct rb_node *find_first_mapping_node(struct rb_root *root, u64 gfn)
+{
+ struct rb_node *node = root->rb_node, *prev = NULL;
+ struct pkvm_mapping *mapping;
+
+ while (node) {
+ mapping = rb_entry(node, struct pkvm_mapping, node);
+ if (mapping->gfn == gfn)
+ return node;
+ prev = node;
+ node = (gfn < mapping->gfn) ? node->rb_left : node->rb_right;
+ }
+
+ return prev;
+}
+
+/*
+ * __tmp is updated to rb_next(__tmp) *before* entering the body of the loop to allow freeing
+ * of __map inline.
+ */
+#define for_each_mapping_in_range_safe(__pgt, __start, __end, __map) \
+ for (struct rb_node *__tmp = find_first_mapping_node(&(__pgt)->pkvm_mappings, \
+ ((__start) >> PAGE_SHIFT)); \
+ __tmp && ({ \
+ __map = rb_entry(__tmp, struct pkvm_mapping, node); \
+ __tmp = rb_next(__tmp); \
+ true; \
+ }); \
+ ) \
+ if (__map->gfn < ((__start) >> PAGE_SHIFT)) \
+ continue; \
+ else if (__map->gfn >= ((__end) >> PAGE_SHIFT)) \
+ break; \
+ else
+
+int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
+ struct kvm_pgtable_mm_ops *mm_ops)
+{
+ pgt->pkvm_mappings = RB_ROOT;
+ pgt->mmu = mmu;
+
+ return 0;
+}
+
+void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
+{
+ struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu);
+ pkvm_handle_t handle = kvm->arch.pkvm.handle;
+ struct pkvm_mapping *mapping;
+ struct rb_node *node;
+
+ if (!handle)
+ return;
+
+ node = rb_first(&pgt->pkvm_mappings);
+ while (node) {
+ mapping = rb_entry(node, struct pkvm_mapping, node);
+ kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn);
+ node = rb_next(node);
+ rb_erase(&mapping->node, &pgt->pkvm_mappings);
+ kfree(mapping);
+ }
+}
+
+int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
+ u64 phys, enum kvm_pgtable_prot prot,
+ void *mc, enum kvm_pgtable_walk_flags flags)
+{
+ struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu);
+ struct pkvm_mapping *mapping = NULL;
+ struct kvm_hyp_memcache *cache = mc;
+ u64 gfn = addr >> PAGE_SHIFT;
+ u64 pfn = phys >> PAGE_SHIFT;
+ int ret;
+
+ if (size != PAGE_SIZE)
+ return -EINVAL;
+
+ lockdep_assert_held_write(&kvm->mmu_lock);
+ ret = kvm_call_hyp_nvhe(__pkvm_host_share_guest, pfn, gfn, prot);
+ if (ret) {
+ /* Is the gfn already mapped due to a racing vCPU? */
+ if (ret == -EPERM)
+ return -EAGAIN;
+ }
+
+ swap(mapping, cache->mapping);
+ mapping->gfn = gfn;
+ mapping->pfn = pfn;
+ WARN_ON(rb_find_add(&mapping->node, &pgt->pkvm_mappings, cmp_mappings));
+
+ return ret;
+}
+
+int pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
+{
+ struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu);
+ pkvm_handle_t handle = kvm->arch.pkvm.handle;
+ struct pkvm_mapping *mapping;
+ int ret = 0;
+
+ lockdep_assert_held_write(&kvm->mmu_lock);
+ for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping) {
+ ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn);
+ if (WARN_ON(ret))
+ break;
+ rb_erase(&mapping->node, &pgt->pkvm_mappings);
+ kfree(mapping);
+ }
+
+ return ret;
+}
+
+int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
+{
+ struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu);
+ pkvm_handle_t handle = kvm->arch.pkvm.handle;
+ struct pkvm_mapping *mapping;
+ int ret = 0;
+
+ lockdep_assert_held(&kvm->mmu_lock);
+ for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping) {
+ ret = kvm_call_hyp_nvhe(__pkvm_host_wrprotect_guest, handle, mapping->gfn);
+ if (WARN_ON(ret))
+ break;
+ }
+
+ return ret;
+}
+
+int pkvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
+{
+ struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu);
+ struct pkvm_mapping *mapping;
+
+ lockdep_assert_held(&kvm->mmu_lock);
+ for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping)
+ __clean_dcache_guest_page(pfn_to_kaddr(mapping->pfn), PAGE_SIZE);
+
+ return 0;
+}
+
+bool pkvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64 size, bool mkold)
+{
+ struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu);
+ pkvm_handle_t handle = kvm->arch.pkvm.handle;
+ struct pkvm_mapping *mapping;
+ bool young = false;
+
+ lockdep_assert_held(&kvm->mmu_lock);
+ for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping)
+ young |= kvm_call_hyp_nvhe(__pkvm_host_test_clear_young_guest, handle, mapping->gfn,
+ mkold);
+
+ return young;
+}
+
+int pkvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot,
+ enum kvm_pgtable_walk_flags flags)
+{
+ return kvm_call_hyp_nvhe(__pkvm_host_relax_perms_guest, addr >> PAGE_SHIFT, prot);
+}
+
+void pkvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr,
+ enum kvm_pgtable_walk_flags flags)
+{
+ WARN_ON(kvm_call_hyp_nvhe(__pkvm_host_mkyoung_guest, addr >> PAGE_SHIFT));
+}
+
+void pkvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level)
+{
+ WARN_ON_ONCE(1);
+}
+
+kvm_pte_t *pkvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, u64 phys, s8 level,
+ enum kvm_pgtable_prot prot, void *mc, bool force_pte)
+{
+ WARN_ON_ONCE(1);
+ return NULL;
+}
+
+int pkvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
+ struct kvm_mmu_memory_cache *mc)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 470524b31951..803e11b0dc8f 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -85,7 +85,7 @@ static void kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
* KVM_REG_ARM64_SVE_VLS. Allocation is deferred until
* kvm_arm_vcpu_finalize(), which freezes the configuration.
*/
- vcpu_set_flag(vcpu, GUEST_HAS_SVE);
+ set_bit(KVM_ARCH_FLAG_GUEST_HAS_SVE, &vcpu->kvm->arch.flags);
}
/*
@@ -211,10 +211,6 @@ void kvm_reset_vcpu(struct kvm_vcpu *vcpu)
kvm_vcpu_reset_sve(vcpu);
}
- if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) ||
- vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC))
- kvm_vcpu_enable_ptrauth(vcpu);
-
if (vcpu_el1_is_32bit(vcpu))
pstate = VCPU_RESET_PSTATE_SVC;
else if (vcpu_has_nv(vcpu))
diff --git a/arch/arm64/kvm/stacktrace.c b/arch/arm64/kvm/stacktrace.c
index 3ace5b75813b..af5eec681127 100644
--- a/arch/arm64/kvm/stacktrace.c
+++ b/arch/arm64/kvm/stacktrace.c
@@ -19,6 +19,7 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
+#include <asm/kvm_mmu.h>
#include <asm/stacktrace/nvhe.h>
static struct stack_info stackinfo_get_overflow(void)
@@ -50,7 +51,7 @@ static struct stack_info stackinfo_get_hyp(void)
struct kvm_nvhe_stacktrace_info *stacktrace_info
= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
unsigned long low = (unsigned long)stacktrace_info->stack_base;
- unsigned long high = low + PAGE_SIZE;
+ unsigned long high = low + NVHE_STACK_SIZE;
return (struct stack_info) {
.low = low,
@@ -60,8 +61,8 @@ static struct stack_info stackinfo_get_hyp(void)
static struct stack_info stackinfo_get_hyp_kern_va(void)
{
- unsigned long low = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page);
- unsigned long high = low + PAGE_SIZE;
+ unsigned long low = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_base);
+ unsigned long high = low + NVHE_STACK_SIZE;
return (struct stack_info) {
.low = low,
@@ -145,7 +146,7 @@ static void unwind(struct unwind_state *state,
*/
static bool kvm_nvhe_dump_backtrace_entry(void *arg, unsigned long where)
{
- unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0);
+ unsigned long va_mask = GENMASK_ULL(__hyp_va_bits - 1, 0);
unsigned long hyp_offset = (unsigned long)arg;
/* Mask tags and convert to kern addr */
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index e4749ecbcd79..82430c1e1dd0 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -570,17 +570,10 @@ static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- u64 oslsr;
-
if (!p->is_write)
return read_from_write_only(vcpu, p, r);
- /* Forward the OSLK bit to OSLSR */
- oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~OSLSR_EL1_OSLK;
- if (p->regval & OSLAR_EL1_OSLK)
- oslsr |= OSLSR_EL1_OSLK;
-
- __vcpu_sys_reg(vcpu, OSLSR_EL1) = oslsr;
+ kvm_debug_handle_oslar(vcpu, p->regval);
return true;
}
@@ -621,43 +614,13 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
}
}
-/*
- * We want to avoid world-switching all the DBG registers all the
- * time:
- *
- * - If we've touched any debug register, it is likely that we're
- * going to touch more of them. It then makes sense to disable the
- * traps and start doing the save/restore dance
- * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
- * then mandatory to save/restore the registers, as the guest
- * depends on them.
- *
- * For this, we use a DIRTY bit, indicating the guest has modified the
- * debug registers, used as follow:
- *
- * On guest entry:
- * - If the dirty bit is set (because we're coming back from trapping),
- * disable the traps, save host registers, restore guest registers.
- * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
- * set the dirty bit, disable the traps, save host registers,
- * restore guest registers.
- * - Otherwise, enable the traps
- *
- * On guest exit:
- * - If the dirty bit is set, save guest registers, restore host
- * registers and clear the dirty bit. This ensure that the host can
- * now use the debug registers.
- */
static bool trap_debug_regs(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
access_rw(vcpu, p, r);
- if (p->is_write)
- vcpu_set_flag(vcpu, DEBUG_DIRTY);
-
- trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
+ kvm_debug_set_guest_ownership(vcpu);
return true;
}
@@ -666,9 +629,6 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
*
* A 32 bit write to a debug register leave top bits alone
* A 32 bit read from a debug register only returns the bottom bits
- *
- * All writes will set the DEBUG_DIRTY flag to ensure the hyp code
- * switches between host and guest values in future.
*/
static void reg_to_dbg(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
@@ -683,8 +643,6 @@ static void reg_to_dbg(struct kvm_vcpu *vcpu,
val &= ~mask;
val |= (p->regval & (mask >> shift)) << shift;
*dbg_reg = val;
-
- vcpu_set_flag(vcpu, DEBUG_DIRTY);
}
static void dbg_to_reg(struct kvm_vcpu *vcpu,
@@ -698,152 +656,79 @@ static void dbg_to_reg(struct kvm_vcpu *vcpu,
p->regval = (*dbg_reg & mask) >> shift;
}
-static bool trap_bvr(struct kvm_vcpu *vcpu,
- struct sys_reg_params *p,
- const struct sys_reg_desc *rd)
-{
- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
-
- if (p->is_write)
- reg_to_dbg(vcpu, p, rd, dbg_reg);
- else
- dbg_to_reg(vcpu, p, rd, dbg_reg);
-
- trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
-
- return true;
-}
-
-static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
- u64 val)
+static u64 *demux_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
{
- vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = val;
- return 0;
-}
+ struct kvm_guest_debug_arch *dbg = &vcpu->arch.vcpu_debug_state;
-static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
- u64 *val)
-{
- *val = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
- return 0;
+ switch (rd->Op2) {
+ case 0b100:
+ return &dbg->dbg_bvr[rd->CRm];
+ case 0b101:
+ return &dbg->dbg_bcr[rd->CRm];
+ case 0b110:
+ return &dbg->dbg_wvr[rd->CRm];
+ case 0b111:
+ return &dbg->dbg_wcr[rd->CRm];
+ default:
+ KVM_BUG_ON(1, vcpu->kvm);
+ return NULL;
+ }
}
-static u64 reset_bvr(struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *rd)
+static bool trap_dbg_wb_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
{
- vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
- return rd->val;
-}
+ u64 *reg = demux_wb_reg(vcpu, rd);
-static bool trap_bcr(struct kvm_vcpu *vcpu,
- struct sys_reg_params *p,
- const struct sys_reg_desc *rd)
-{
- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
+ if (!reg)
+ return false;
if (p->is_write)
- reg_to_dbg(vcpu, p, rd, dbg_reg);
+ reg_to_dbg(vcpu, p, rd, reg);
else
- dbg_to_reg(vcpu, p, rd, dbg_reg);
-
- trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
+ dbg_to_reg(vcpu, p, rd, reg);
+ kvm_debug_set_guest_ownership(vcpu);
return true;
}
-static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
- u64 val)
-{
- vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = val;
- return 0;
-}
-
-static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
- u64 *val)
+static int set_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ u64 val)
{
- *val = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
- return 0;
-}
+ u64 *reg = demux_wb_reg(vcpu, rd);
-static u64 reset_bcr(struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *rd)
-{
- vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
- return rd->val;
-}
-
-static bool trap_wvr(struct kvm_vcpu *vcpu,
- struct sys_reg_params *p,
- const struct sys_reg_desc *rd)
-{
- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
-
- if (p->is_write)
- reg_to_dbg(vcpu, p, rd, dbg_reg);
- else
- dbg_to_reg(vcpu, p, rd, dbg_reg);
-
- trace_trap_reg(__func__, rd->CRm, p->is_write,
- vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
-
- return true;
-}
-
-static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
- u64 val)
-{
- vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = val;
- return 0;
-}
+ if (!reg)
+ return -EINVAL;
-static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
- u64 *val)
-{
- *val = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
+ *reg = val;
return 0;
}
-static u64 reset_wvr(struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *rd)
-{
- vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
- return rd->val;
-}
-
-static bool trap_wcr(struct kvm_vcpu *vcpu,
- struct sys_reg_params *p,
- const struct sys_reg_desc *rd)
+static int get_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ u64 *val)
{
- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
-
- if (p->is_write)
- reg_to_dbg(vcpu, p, rd, dbg_reg);
- else
- dbg_to_reg(vcpu, p, rd, dbg_reg);
+ u64 *reg = demux_wb_reg(vcpu, rd);
- trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
-
- return true;
-}
+ if (!reg)
+ return -EINVAL;
-static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
- u64 val)
-{
- vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = val;
+ *val = *reg;
return 0;
}
-static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
- u64 *val)
+static u64 reset_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
{
- *val = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
- return 0;
-}
+ u64 *reg = demux_wb_reg(vcpu, rd);
-static u64 reset_wcr(struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *rd)
-{
- vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
+ /*
+ * Bail early if we couldn't find storage for the register, the
+ * KVM_BUG_ON() in demux_wb_reg() will prevent this VM from ever
+ * being run.
+ */
+ if (!reg)
+ return 0;
+
+ *reg = rd->val;
return rd->val;
}
@@ -1350,13 +1235,17 @@ static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
{ SYS_DESC(SYS_DBGBVRn_EL1(n)), \
- trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
+ trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
+ get_dbg_wb_reg, set_dbg_wb_reg }, \
{ SYS_DESC(SYS_DBGBCRn_EL1(n)), \
- trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
+ trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
+ get_dbg_wb_reg, set_dbg_wb_reg }, \
{ SYS_DESC(SYS_DBGWVRn_EL1(n)), \
- trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
+ trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
+ get_dbg_wb_reg, set_dbg_wb_reg }, \
{ SYS_DESC(SYS_DBGWCRn_EL1(n)), \
- trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
+ trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
+ get_dbg_wb_reg, set_dbg_wb_reg }
#define PMU_SYS_REG(name) \
SYS_DESC(SYS_##name), .reset = reset_pmu_reg, \
@@ -1410,26 +1299,146 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
switch (reg) {
case SYS_CNTP_TVAL_EL0:
+ if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
+ tmr = TIMER_HPTIMER;
+ else
+ tmr = TIMER_PTIMER;
+ treg = TIMER_REG_TVAL;
+ break;
+
+ case SYS_CNTV_TVAL_EL0:
+ if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
+ tmr = TIMER_HVTIMER;
+ else
+ tmr = TIMER_VTIMER;
+ treg = TIMER_REG_TVAL;
+ break;
+
case SYS_AARCH32_CNTP_TVAL:
+ case SYS_CNTP_TVAL_EL02:
tmr = TIMER_PTIMER;
treg = TIMER_REG_TVAL;
break;
+
+ case SYS_CNTV_TVAL_EL02:
+ tmr = TIMER_VTIMER;
+ treg = TIMER_REG_TVAL;
+ break;
+
+ case SYS_CNTHP_TVAL_EL2:
+ tmr = TIMER_HPTIMER;
+ treg = TIMER_REG_TVAL;
+ break;
+
+ case SYS_CNTHV_TVAL_EL2:
+ tmr = TIMER_HVTIMER;
+ treg = TIMER_REG_TVAL;
+ break;
+
case SYS_CNTP_CTL_EL0:
+ if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
+ tmr = TIMER_HPTIMER;
+ else
+ tmr = TIMER_PTIMER;
+ treg = TIMER_REG_CTL;
+ break;
+
+ case SYS_CNTV_CTL_EL0:
+ if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
+ tmr = TIMER_HVTIMER;
+ else
+ tmr = TIMER_VTIMER;
+ treg = TIMER_REG_CTL;
+ break;
+
case SYS_AARCH32_CNTP_CTL:
+ case SYS_CNTP_CTL_EL02:
tmr = TIMER_PTIMER;
treg = TIMER_REG_CTL;
break;
+
+ case SYS_CNTV_CTL_EL02:
+ tmr = TIMER_VTIMER;
+ treg = TIMER_REG_CTL;
+ break;
+
+ case SYS_CNTHP_CTL_EL2:
+ tmr = TIMER_HPTIMER;
+ treg = TIMER_REG_CTL;
+ break;
+
+ case SYS_CNTHV_CTL_EL2:
+ tmr = TIMER_HVTIMER;
+ treg = TIMER_REG_CTL;
+ break;
+
case SYS_CNTP_CVAL_EL0:
+ if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
+ tmr = TIMER_HPTIMER;
+ else
+ tmr = TIMER_PTIMER;
+ treg = TIMER_REG_CVAL;
+ break;
+
+ case SYS_CNTV_CVAL_EL0:
+ if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
+ tmr = TIMER_HVTIMER;
+ else
+ tmr = TIMER_VTIMER;
+ treg = TIMER_REG_CVAL;
+ break;
+
case SYS_AARCH32_CNTP_CVAL:
+ case SYS_CNTP_CVAL_EL02:
tmr = TIMER_PTIMER;
treg = TIMER_REG_CVAL;
break;
+
+ case SYS_CNTV_CVAL_EL02:
+ tmr = TIMER_VTIMER;
+ treg = TIMER_REG_CVAL;
+ break;
+
+ case SYS_CNTHP_CVAL_EL2:
+ tmr = TIMER_HPTIMER;
+ treg = TIMER_REG_CVAL;
+ break;
+
+ case SYS_CNTHV_CVAL_EL2:
+ tmr = TIMER_HVTIMER;
+ treg = TIMER_REG_CVAL;
+ break;
+
case SYS_CNTPCT_EL0:
case SYS_CNTPCTSS_EL0:
+ if (is_hyp_ctxt(vcpu))
+ tmr = TIMER_HPTIMER;
+ else
+ tmr = TIMER_PTIMER;
+ treg = TIMER_REG_CNT;
+ break;
+
case SYS_AARCH32_CNTPCT:
+ case SYS_AARCH32_CNTPCTSS:
tmr = TIMER_PTIMER;
treg = TIMER_REG_CNT;
break;
+
+ case SYS_CNTVCT_EL0:
+ case SYS_CNTVCTSS_EL0:
+ if (is_hyp_ctxt(vcpu))
+ tmr = TIMER_HVTIMER;
+ else
+ tmr = TIMER_VTIMER;
+ treg = TIMER_REG_CNT;
+ break;
+
+ case SYS_AARCH32_CNTVCT:
+ case SYS_AARCH32_CNTVCTSS:
+ tmr = TIMER_VTIMER;
+ treg = TIMER_REG_CNT;
+ break;
+
default:
print_sys_reg_msg(p, "%s", "Unhandled trapped timer register");
return undef_access(vcpu, p, r);
@@ -1443,6 +1452,16 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
return true;
}
+static bool access_hv_timer(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (!vcpu_el2_e2h_is_set(vcpu))
+ return undef_access(vcpu, p, r);
+
+ return access_arch_timer(vcpu, p, r);
+}
+
static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
s64 new, s64 cur)
{
@@ -1599,7 +1618,8 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
if (!vcpu_has_ptrauth(vcpu))
val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
- if (!cpus_have_final_cap(ARM64_HAS_WFXT))
+ if (!cpus_have_final_cap(ARM64_HAS_WFXT) ||
+ has_broken_cntvoff())
val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
break;
case SYS_ID_AA64ISAR3_EL1:
@@ -1807,6 +1827,9 @@ static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
/* Hide SPE from guests */
val &= ~ID_AA64DFR0_EL1_PMSVer_MASK;
+ /* Hide BRBE from guests */
+ val &= ~ID_AA64DFR0_EL1_BRBE_MASK;
+
return val;
}
@@ -2924,11 +2947,17 @@ static const struct sys_reg_desc sys_reg_descs[] = {
AMU_AMEVTYPER1_EL0(15),
{ SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer },
+ { SYS_DESC(SYS_CNTVCT_EL0), access_arch_timer },
{ SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer },
+ { SYS_DESC(SYS_CNTVCTSS_EL0), access_arch_timer },
{ SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
{ SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
{ SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
+ { SYS_DESC(SYS_CNTV_TVAL_EL0), access_arch_timer },
+ { SYS_DESC(SYS_CNTV_CTL_EL0), access_arch_timer },
+ { SYS_DESC(SYS_CNTV_CVAL_EL0), access_arch_timer },
+
/* PMEVCNTRn_EL0 */
PMU_PMEVCNTR_EL0(0),
PMU_PMEVCNTR_EL0(1),
@@ -3080,9 +3109,24 @@ static const struct sys_reg_desc sys_reg_descs[] = {
EL2_REG_VNCR(CNTVOFF_EL2, reset_val, 0),
EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0),
+ { SYS_DESC(SYS_CNTHP_TVAL_EL2), access_arch_timer },
+ EL2_REG(CNTHP_CTL_EL2, access_arch_timer, reset_val, 0),
+ EL2_REG(CNTHP_CVAL_EL2, access_arch_timer, reset_val, 0),
+
+ { SYS_DESC(SYS_CNTHV_TVAL_EL2), access_hv_timer },
+ EL2_REG(CNTHV_CTL_EL2, access_hv_timer, reset_val, 0),
+ EL2_REG(CNTHV_CVAL_EL2, access_hv_timer, reset_val, 0),
{ SYS_DESC(SYS_CNTKCTL_EL12), access_cntkctl_el12 },
+ { SYS_DESC(SYS_CNTP_TVAL_EL02), access_arch_timer },
+ { SYS_DESC(SYS_CNTP_CTL_EL02), access_arch_timer },
+ { SYS_DESC(SYS_CNTP_CVAL_EL02), access_arch_timer },
+
+ { SYS_DESC(SYS_CNTV_TVAL_EL02), access_arch_timer },
+ { SYS_DESC(SYS_CNTV_CTL_EL02), access_arch_timer },
+ { SYS_DESC(SYS_CNTV_CVAL_EL02), access_arch_timer },
+
EL2_REG(SP_EL2, NULL, reset_unknown, 0),
};
@@ -3594,18 +3638,20 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
* None of the other registers share their location, so treat them as
* if they were 64bit.
*/
-#define DBG_BCR_BVR_WCR_WVR(n) \
- /* DBGBVRn */ \
- { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
- /* DBGBCRn */ \
- { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
- /* DBGWVRn */ \
- { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
- /* DBGWCRn */ \
- { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
-
-#define DBGBXVR(n) \
- { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n }
+#define DBG_BCR_BVR_WCR_WVR(n) \
+ /* DBGBVRn */ \
+ { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), \
+ trap_dbg_wb_reg, NULL, n }, \
+ /* DBGBCRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_dbg_wb_reg, NULL, n }, \
+ /* DBGWVRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_dbg_wb_reg, NULL, n }, \
+ /* DBGWCRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_dbg_wb_reg, NULL, n }
+
+#define DBGBXVR(n) \
+ { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), \
+ trap_dbg_wb_reg, NULL, n }
/*
* Trapped cp14 registers. We generally ignore most of the external
@@ -3902,9 +3948,11 @@ static const struct sys_reg_desc cp15_64_regs[] = {
{ SYS_DESC(SYS_AARCH32_CNTPCT), access_arch_timer },
{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
{ Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
+ { SYS_DESC(SYS_AARCH32_CNTVCT), access_arch_timer },
{ Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
{ SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
{ SYS_DESC(SYS_AARCH32_CNTPCTSS), access_arch_timer },
+ { SYS_DESC(SYS_AARCH32_CNTVCTSS), access_arch_timer },
};
static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
@@ -4419,6 +4467,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
reset_vcpu_ftr_id_reg(vcpu, r);
else
r->reset(vcpu, r);
+
+ if (r->reg >= __SANITISED_REG_START__ && r->reg < NR_SYS_REGS)
+ (void)__vcpu_sys_reg(vcpu, r->reg);
}
set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
@@ -4995,6 +5046,14 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu)
kvm->arch.fgu[HAFGRTR_GROUP] |= ~(HAFGRTR_EL2_RES0 |
HAFGRTR_EL2_RES1);
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, BRBE, IMP)) {
+ kvm->arch.fgu[HDFGRTR_GROUP] |= (HDFGRTR_EL2_nBRBDATA |
+ HDFGRTR_EL2_nBRBCTL |
+ HDFGRTR_EL2_nBRBIDR);
+ kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_nBRBINJ |
+ HFGITR_EL2_nBRBIALL);
+ }
+
set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags);
out:
mutex_unlock(&kvm->arch.config_lock);
@@ -5022,7 +5081,7 @@ int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu)
}
if (vcpu_has_nv(vcpu)) {
- int ret = kvm_init_nv_sysregs(kvm);
+ int ret = kvm_init_nv_sysregs(vcpu);
if (ret)
return ret;
}
diff --git a/arch/arm64/kvm/trace_handle_exit.h b/arch/arm64/kvm/trace_handle_exit.h
index 064a58c19f48..f85415db7713 100644
--- a/arch/arm64/kvm/trace_handle_exit.h
+++ b/arch/arm64/kvm/trace_handle_exit.h
@@ -46,38 +46,6 @@ TRACE_EVENT(kvm_hvc_arm64,
__entry->vcpu_pc, __entry->r0, __entry->imm)
);
-TRACE_EVENT(kvm_arm_setup_debug,
- TP_PROTO(struct kvm_vcpu *vcpu, __u32 guest_debug),
- TP_ARGS(vcpu, guest_debug),
-
- TP_STRUCT__entry(
- __field(struct kvm_vcpu *, vcpu)
- __field(__u32, guest_debug)
- ),
-
- TP_fast_assign(
- __entry->vcpu = vcpu;
- __entry->guest_debug = guest_debug;
- ),
-
- TP_printk("vcpu: %p, flags: 0x%08x", __entry->vcpu, __entry->guest_debug)
-);
-
-TRACE_EVENT(kvm_arm_clear_debug,
- TP_PROTO(__u32 guest_debug),
- TP_ARGS(guest_debug),
-
- TP_STRUCT__entry(
- __field(__u32, guest_debug)
- ),
-
- TP_fast_assign(
- __entry->guest_debug = guest_debug;
- ),
-
- TP_printk("flags: 0x%08x", __entry->guest_debug)
-);
-
/*
* The dreg32 name is a leftover from a distant past. This will really
* output a 64bit value...
@@ -99,49 +67,6 @@ TRACE_EVENT(kvm_arm_set_dreg32,
TP_printk("%s: 0x%llx", __entry->name, __entry->value)
);
-TRACE_DEFINE_SIZEOF(__u64);
-
-TRACE_EVENT(kvm_arm_set_regset,
- TP_PROTO(const char *type, int len, __u64 *control, __u64 *value),
- TP_ARGS(type, len, control, value),
- TP_STRUCT__entry(
- __field(const char *, name)
- __field(int, len)
- __array(u64, ctrls, 16)
- __array(u64, values, 16)
- ),
- TP_fast_assign(
- __entry->name = type;
- __entry->len = len;
- memcpy(__entry->ctrls, control, len << 3);
- memcpy(__entry->values, value, len << 3);
- ),
- TP_printk("%d %s CTRL:%s VALUE:%s", __entry->len, __entry->name,
- __print_array(__entry->ctrls, __entry->len, sizeof(__u64)),
- __print_array(__entry->values, __entry->len, sizeof(__u64)))
-);
-
-TRACE_EVENT(trap_reg,
- TP_PROTO(const char *fn, int reg, bool is_write, u64 write_value),
- TP_ARGS(fn, reg, is_write, write_value),
-
- TP_STRUCT__entry(
- __field(const char *, fn)
- __field(int, reg)
- __field(bool, is_write)
- __field(u64, write_value)
- ),
-
- TP_fast_assign(
- __entry->fn = fn;
- __entry->reg = reg;
- __entry->is_write = is_write;
- __entry->write_value = write_value;
- ),
-
- TP_printk("%s %s reg %d (0x%016llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value)
-);
-
TRACE_EVENT(kvm_handle_sys_reg,
TP_PROTO(unsigned long hsr),
TP_ARGS(hsr),
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index f267bc2486a1..d7233ab982d0 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -6,6 +6,7 @@
#include <linux/kstrtox.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
+#include <linux/string_choices.h>
#include <kvm/arm_vgic.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
@@ -663,9 +664,9 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
if (info->has_v4) {
kvm_vgic_global_state.has_gicv4 = gicv4_enable;
kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable;
- kvm_info("GICv4%s support %sabled\n",
+ kvm_info("GICv4%s support %s\n",
kvm_vgic_global_state.has_gicv4_1 ? ".1" : "",
- gicv4_enable ? "en" : "dis");
+ str_enabled_disabled(gicv4_enable));
}
kvm_vgic_global_state.vcpu_base = 0;
@@ -734,7 +735,8 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
- kvm_call_hyp(__vgic_v3_restore_vmcr_aprs, cpu_if);
+ if (likely(!is_protected_kvm_enabled()))
+ kvm_call_hyp(__vgic_v3_restore_vmcr_aprs, cpu_if);
if (has_vhe())
__vgic_v3_activate_traps(cpu_if);
@@ -746,7 +748,8 @@ void vgic_v3_put(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
- kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if);
+ if (likely(!is_protected_kvm_enabled()))
+ kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if);
WARN_ON(vgic_v4_put(vcpu));
if (has_vhe())
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 8e882f479d98..4d49dff721a8 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -13,7 +13,11 @@ endif
lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o
-obj-$(CONFIG_CRC32) += crc32.o crc32-glue.o
+obj-$(CONFIG_CRC32_ARCH) += crc32-arm64.o
+crc32-arm64-y := crc32.o crc32-glue.o
+
+obj-$(CONFIG_CRC_T10DIF_ARCH) += crc-t10dif-arm64.o
+crc-t10dif-arm64-y := crc-t10dif-glue.o crc-t10dif-core.o
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
diff --git a/arch/arm64/crypto/crct10dif-ce-core.S b/arch/arm64/lib/crc-t10dif-core.S
index 87dd6d46224d..87dd6d46224d 100644
--- a/arch/arm64/crypto/crct10dif-ce-core.S
+++ b/arch/arm64/lib/crc-t10dif-core.S
diff --git a/arch/arm64/lib/crc-t10dif-glue.c b/arch/arm64/lib/crc-t10dif-glue.c
new file mode 100644
index 000000000000..dab7e3796232
--- /dev/null
+++ b/arch/arm64/lib/crc-t10dif-glue.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions
+ *
+ * Copyright (C) 2016 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
+ */
+
+#include <linux/cpufeature.h>
+#include <linux/crc-t10dif.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+#include <crypto/internal/simd.h>
+
+#include <asm/neon.h>
+#include <asm/simd.h>
+
+static DEFINE_STATIC_KEY_FALSE(have_asimd);
+static DEFINE_STATIC_KEY_FALSE(have_pmull);
+
+#define CRC_T10DIF_PMULL_CHUNK_SIZE 16U
+
+asmlinkage void crc_t10dif_pmull_p8(u16 init_crc, const u8 *buf, size_t len,
+ u8 out[16]);
+asmlinkage u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 *buf, size_t len);
+
+u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
+{
+ if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE) {
+ if (static_branch_likely(&have_pmull)) {
+ if (crypto_simd_usable()) {
+ kernel_neon_begin();
+ crc = crc_t10dif_pmull_p64(crc, data, length);
+ kernel_neon_end();
+ return crc;
+ }
+ } else if (length > CRC_T10DIF_PMULL_CHUNK_SIZE &&
+ static_branch_likely(&have_asimd) &&
+ crypto_simd_usable()) {
+ u8 buf[16];
+
+ kernel_neon_begin();
+ crc_t10dif_pmull_p8(crc, data, length, buf);
+ kernel_neon_end();
+
+ crc = 0;
+ data = buf;
+ length = sizeof(buf);
+ }
+ }
+ return crc_t10dif_generic(crc, data, length);
+}
+EXPORT_SYMBOL(crc_t10dif_arch);
+
+static int __init crc_t10dif_arm64_init(void)
+{
+ if (cpu_have_named_feature(ASIMD)) {
+ static_branch_enable(&have_asimd);
+ if (cpu_have_named_feature(PMULL))
+ static_branch_enable(&have_pmull);
+ }
+ return 0;
+}
+arch_initcall(crc_t10dif_arm64_init);
+
+static void __exit crc_t10dif_arm64_exit(void)
+{
+}
+module_exit(crc_t10dif_arm64_exit);
+
+bool crc_t10dif_is_optimized(void)
+{
+ return static_key_enabled(&have_asimd);
+}
+EXPORT_SYMBOL(crc_t10dif_is_optimized);
+
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_DESCRIPTION("CRC-T10DIF using arm64 NEON and Crypto Extensions");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm64/lib/crc32-glue.c b/arch/arm64/lib/crc32-glue.c
index 295ae3e6b997..15c4c9db573e 100644
--- a/arch/arm64/lib/crc32-glue.c
+++ b/arch/arm64/lib/crc32-glue.c
@@ -2,6 +2,7 @@
#include <linux/crc32.h>
#include <linux/linkage.h>
+#include <linux/module.h>
#include <asm/alternative.h>
#include <asm/cpufeature.h>
@@ -21,7 +22,7 @@ asmlinkage u32 crc32_le_arm64_4way(u32 crc, unsigned char const *p, size_t len);
asmlinkage u32 crc32c_le_arm64_4way(u32 crc, unsigned char const *p, size_t len);
asmlinkage u32 crc32_be_arm64_4way(u32 crc, unsigned char const *p, size_t len);
-u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
+u32 __pure crc32_le_arch(u32 crc, const u8 *p, size_t len)
{
if (!alternative_has_cap_likely(ARM64_HAS_CRC32))
return crc32_le_base(crc, p, len);
@@ -40,11 +41,12 @@ u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
return crc32_le_arm64(crc, p, len);
}
+EXPORT_SYMBOL(crc32_le_arch);
-u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
+u32 __pure crc32c_le_arch(u32 crc, const u8 *p, size_t len)
{
if (!alternative_has_cap_likely(ARM64_HAS_CRC32))
- return __crc32c_le_base(crc, p, len);
+ return crc32c_le_base(crc, p, len);
if (len >= min_len && cpu_have_named_feature(PMULL) && crypto_simd_usable()) {
kernel_neon_begin();
@@ -60,8 +62,9 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
return crc32c_le_arm64(crc, p, len);
}
+EXPORT_SYMBOL(crc32c_le_arch);
-u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
+u32 __pure crc32_be_arch(u32 crc, const u8 *p, size_t len)
{
if (!alternative_has_cap_likely(ARM64_HAS_CRC32))
return crc32_be_base(crc, p, len);
@@ -80,3 +83,17 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
return crc32_be_arm64(crc, p, len);
}
+EXPORT_SYMBOL(crc32_be_arch);
+
+u32 crc32_optimizations(void)
+{
+ if (alternative_has_cap_likely(ARM64_HAS_CRC32))
+ return CRC32_LE_OPTIMIZATION |
+ CRC32_BE_OPTIMIZATION |
+ CRC32C_OPTIMIZATION;
+ return 0;
+}
+EXPORT_SYMBOL(crc32_optimizations);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("arm64-optimized CRC32 functions");
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 0c501cabc238..8160cff35089 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -33,7 +33,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
gfp_t gfp = GFP_PGTABLE_USER;
if (pgdir_is_page_size())
- return (pgd_t *)__get_free_page(gfp);
+ return __pgd_alloc(mm, 0);
else
return kmem_cache_alloc(pgd_cache, gfp);
}
@@ -41,7 +41,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
if (pgdir_is_page_size())
- free_page((unsigned long)pgd);
+ __pgd_free(mm, pgd);
else
kmem_cache_free(pgd_cache, pgd);
}
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 66708b95493a..8446848edddb 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -267,6 +267,19 @@ static bool is_addsub_imm(u32 imm)
return !(imm & ~0xfff) || !(imm & ~0xfff000);
}
+static inline void emit_a64_add_i(const bool is64, const int dst, const int src,
+ const int tmp, const s32 imm, struct jit_ctx *ctx)
+{
+ if (is_addsub_imm(imm)) {
+ emit(A64_ADD_I(is64, dst, src, imm), ctx);
+ } else if (is_addsub_imm(-imm)) {
+ emit(A64_SUB_I(is64, dst, src, -imm), ctx);
+ } else {
+ emit_a64_mov_i(is64, tmp, imm, ctx);
+ emit(A64_ADD(is64, dst, src, tmp), ctx);
+ }
+}
+
/*
* There are 3 types of AArch64 LDR/STR (immediate) instruction:
* Post-index, Pre-index, Unsigned offset.
@@ -648,16 +661,13 @@ static int emit_lse_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
const s16 off = insn->off;
u8 reg = dst;
- if (off || arena) {
- if (off) {
- emit_a64_mov_i(1, tmp, off, ctx);
- emit(A64_ADD(1, tmp, tmp, dst), ctx);
- reg = tmp;
- }
- if (arena) {
- emit(A64_ADD(1, tmp, reg, arena_vm_base), ctx);
- reg = tmp;
- }
+ if (off) {
+ emit_a64_add_i(1, tmp, reg, tmp, off, ctx);
+ reg = tmp;
+ }
+ if (arena) {
+ emit(A64_ADD(1, tmp, reg, arena_vm_base), ctx);
+ reg = tmp;
}
switch (insn->imm) {
@@ -723,7 +733,7 @@ static int emit_ll_sc_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
const s32 imm = insn->imm;
const s16 off = insn->off;
const bool isdw = BPF_SIZE(code) == BPF_DW;
- u8 reg;
+ u8 reg = dst;
s32 jmp_offset;
if (BPF_MODE(code) == BPF_PROBE_ATOMIC) {
@@ -732,11 +742,8 @@ static int emit_ll_sc_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
return -EINVAL;
}
- if (!off) {
- reg = dst;
- } else {
- emit_a64_mov_i(1, tmp, off, ctx);
- emit(A64_ADD(1, tmp, tmp, dst), ctx);
+ if (off) {
+ emit_a64_add_i(1, tmp, reg, tmp, off, ctx);
reg = tmp;
}
@@ -1146,14 +1153,7 @@ emit_bswap_uxt:
/* dst = dst OP imm */
case BPF_ALU | BPF_ADD | BPF_K:
case BPF_ALU64 | BPF_ADD | BPF_K:
- if (is_addsub_imm(imm)) {
- emit(A64_ADD_I(is64, dst, dst, imm), ctx);
- } else if (is_addsub_imm(-imm)) {
- emit(A64_SUB_I(is64, dst, dst, -imm), ctx);
- } else {
- emit_a64_mov_i(is64, tmp, imm, ctx);
- emit(A64_ADD(is64, dst, dst, tmp), ctx);
- }
+ emit_a64_add_i(is64, dst, dst, tmp, imm, ctx);
break;
case BPF_ALU | BPF_SUB | BPF_K:
case BPF_ALU64 | BPF_SUB | BPF_K:
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index eb17f59e543c..1e65f2fb45bd 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -105,6 +105,7 @@ WORKAROUND_CLEAN_CACHE
WORKAROUND_DEVICE_LOAD_ACQUIRE
WORKAROUND_NVIDIA_CARMEL_CNP
WORKAROUND_QCOM_FALKOR_E1003
+WORKAROUND_QCOM_ORYON_CNTVOFF
WORKAROUND_REPEAT_TLBI
WORKAROUND_SPECULATIVE_AT
WORKAROUND_SPECULATIVE_SSBS
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index 94ca9cdb0b16..762ee084b37c 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -2064,6 +2064,18 @@ Field 17:16 ZEN
Res0 15:0
EndSysreg
+Sysreg TRFCR_EL1 3 0 1 2 1
+Res0 63:7
+UnsignedEnum 6:5 TS
+ 0b0001 VIRTUAL
+ 0b0010 GUEST_PHYSICAL
+ 0b0011 PHYSICAL
+EndEnum
+Res0 4:2
+Field 1 ExTRE
+Field 0 E0TRE
+EndSysregFields
+
Sysreg SMPRI_EL1 3 0 1 2 4
Res0 63:4
Field 3:0 PRIORITY
@@ -2613,6 +2625,22 @@ Field 1 ICIALLU
Field 0 ICIALLUIS
EndSysreg
+Sysreg TRFCR_EL2 3 4 1 2 1
+Res0 63:7
+UnsignedEnum 6:5 TS
+ 0b0000 USE_TRFCR_EL1_TS
+ 0b0001 VIRTUAL
+ 0b0010 GUEST_PHYSICAL
+ 0b0011 PHYSICAL
+EndEnum
+Res0 4
+Field 3 CX
+Res0 2
+Field 1 E2TRE
+Field 0 E0HTRE
+EndSysreg
+
+
Sysreg HDFGRTR_EL2 3 4 3 1 4
Field 63 PMBIDR_EL1
Field 62 nPMSNEVFR_EL1
@@ -3023,6 +3051,10 @@ Sysreg ZCR_EL12 3 5 1 2 0
Mapping ZCR_EL1
EndSysreg
+Sysreg TRFCR_EL12 3 5 1 2 1
+Mapping TRFCR_EL1
+EndSysreg
+
Sysreg SMCR_EL12 3 5 1 2 6
Mapping SMCR_EL1
EndSysreg
diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h
index 9c84c9012e53..bf8400c28b5a 100644
--- a/arch/csky/include/asm/pgalloc.h
+++ b/arch/csky/include/asm/pgalloc.h
@@ -44,7 +44,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
pgd_t *ret;
pgd_t *init;
- ret = (pgd_t *) __get_free_page(GFP_KERNEL);
+ ret = __pgd_alloc(mm, 0);
if (ret) {
init = pgd_offset(&init_mm, 0UL);
pgd_init((unsigned long *)ret);
@@ -63,7 +63,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
#define __pte_free_tlb(tlb, pte, address) \
do { \
- pagetable_pte_dtor(page_ptdesc(pte)); \
+ pagetable_dtor(page_ptdesc(pte)); \
tlb_remove_page_ptdesc(tlb, page_ptdesc(pte)); \
} while (0)
diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h
index bf6cf5579cf4..9c58fb81f7fd 100644
--- a/arch/hexagon/include/asm/cmpxchg.h
+++ b/arch/hexagon/include/asm/cmpxchg.h
@@ -56,7 +56,7 @@ __arch_xchg(unsigned long x, volatile void *ptr, int size)
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
- __typeof__(*(ptr)) __oldval = 0; \
+ __typeof__(*(ptr)) __oldval = (__typeof__(*(ptr))) 0; \
\
asm volatile( \
"1: %0 = memw_locked(%1);\n" \
diff --git a/arch/hexagon/include/asm/pgalloc.h b/arch/hexagon/include/asm/pgalloc.h
index 55988625e6fb..1ee5f5f157ca 100644
--- a/arch/hexagon/include/asm/pgalloc.h
+++ b/arch/hexagon/include/asm/pgalloc.h
@@ -22,7 +22,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
- pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+ pgd = __pgd_alloc(mm, 0);
/*
* There may be better ways to do this, but to ensure
@@ -89,7 +89,7 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
#define __pte_free_tlb(tlb, pte, addr) \
do { \
- pagetable_pte_dtor((page_ptdesc(pte))); \
+ pagetable_dtor((page_ptdesc(pte))); \
tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
} while (0)
diff --git a/arch/hexagon/include/asm/setup.h b/arch/hexagon/include/asm/setup.h
new file mode 100644
index 000000000000..9f2749cd4052
--- /dev/null
+++ b/arch/hexagon/include/asm/setup.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_HEXAGON_SETUP_H
+#define _ASM_HEXAGON_SETUP_H
+
+#include <linux/init.h>
+#include <uapi/asm/setup.h>
+
+extern char external_cmdline_buffer;
+
+void __init setup_arch_memory(void);
+
+#endif
diff --git a/arch/hexagon/include/uapi/asm/setup.h b/arch/hexagon/include/uapi/asm/setup.h
index 8ce9428b1583..598f74f671f6 100644
--- a/arch/hexagon/include/uapi/asm/setup.h
+++ b/arch/hexagon/include/uapi/asm/setup.h
@@ -17,19 +17,9 @@
* 02110-1301, USA.
*/
-#ifndef _ASM_SETUP_H
-#define _ASM_SETUP_H
-
-#ifdef __KERNEL__
-#include <linux/init.h>
-#else
-#define __init
-#endif
+#ifndef _UAPI_ASM_HEXAGON_SETUP_H
+#define _UAPI_ASM_HEXAGON_SETUP_H
#include <asm-generic/setup.h>
-extern char external_cmdline_buffer;
-
-void __init setup_arch_memory(void);
-
#endif
diff --git a/arch/hexagon/kernel/time.c b/arch/hexagon/kernel/time.c
index f0f207e2a694..6f851e1cd4ee 100644
--- a/arch/hexagon/kernel/time.c
+++ b/arch/hexagon/kernel/time.c
@@ -170,8 +170,7 @@ static void __init time_init_deferred(void)
ce_dev->cpumask = cpu_all_mask;
- if (!resource)
- resource = rtos_timer_device.resource;
+ resource = rtos_timer_device.resource;
/* ioremap here means this has to run later, after paging init */
rtos_timer = ioremap(resource->start, resource_size(resource));
diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c
index 75e062722d28..e732aa01c2ff 100644
--- a/arch/hexagon/kernel/traps.c
+++ b/arch/hexagon/kernel/traps.c
@@ -135,7 +135,7 @@ static void do_show_stack(struct task_struct *task, unsigned long *fp,
}
/* Attempt to continue past exception. */
- if (0 == newfp) {
+ if (!newfp) {
struct pt_regs *regs = (struct pt_regs *) (((void *)fp)
+ 8);
@@ -195,8 +195,10 @@ int die(const char *str, struct pt_regs *regs, long err)
printk(KERN_EMERG "Oops: %s[#%d]:\n", str, ++die.counter);
if (notify_die(DIE_OOPS, str, regs, err, pt_cause(regs), SIGSEGV) ==
- NOTIFY_STOP)
+ NOTIFY_STOP) {
+ spin_unlock_irq(&die.lock);
return 1;
+ }
print_modules();
show_regs(regs);
diff --git a/arch/loongarch/Kbuild b/arch/loongarch/Kbuild
index bfa21465d83a..beb8499dd8ed 100644
--- a/arch/loongarch/Kbuild
+++ b/arch/loongarch/Kbuild
@@ -4,7 +4,6 @@ obj-y += net/
obj-y += vdso/
obj-$(CONFIG_KVM) += kvm/
-obj-$(CONFIG_BUILTIN_DTB) += boot/dts/
# for cleaning
subdir- += boot
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 28f0221e22fb..2b8bd27a852f 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -15,6 +15,7 @@ config LOONGARCH
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_CPU_FINALIZE_INIT
+ select ARCH_HAS_CRC32
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_FAST_MULTIPLIER
@@ -248,7 +249,7 @@ config MACH_LOONGSON64
def_bool 64BIT
config FIX_EARLYCON_MEM
- def_bool y
+ def_bool !ARCH_IOREMAP
config PGTABLE_2LEVEL
bool
@@ -399,6 +400,7 @@ endchoice
config BUILTIN_DTB
bool "Enable built-in dtb in kernel"
depends on OF
+ select GENERIC_BUILTIN_DTB
help
Some existing systems do not provide a canonical device tree to
the kernel at boot time. Let's provide a device tree table in the
diff --git a/arch/loongarch/boot/dts/Makefile b/arch/loongarch/boot/dts/Makefile
index 747d0c3f6389..15d5e14fe418 100644
--- a/arch/loongarch/boot/dts/Makefile
+++ b/arch/loongarch/boot/dts/Makefile
@@ -1,5 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
dtb-y = loongson-2k0500-ref.dtb loongson-2k1000-ref.dtb loongson-2k2000-ref.dtb
-
-obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .dtb.o, $(CONFIG_BUILTIN_DTB_NAME))
diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
index 4dffc90192f7..73c77500ac46 100644
--- a/arch/loongarch/configs/loongson3_defconfig
+++ b/arch/loongarch/configs/loongson3_defconfig
@@ -113,7 +113,10 @@ CONFIG_ZBUD=y
CONFIG_ZSMALLOC=m
# CONFIG_COMPAT_BRK is not set
CONFIG_MEMORY_HOTPLUG=y
-CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
+# CONFIG_MHP_DEFAULT_ONLINE_TYPE_OFFLINE is not set
+CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_AUTO=y
+# CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_KERNEL is not set
+# CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_MOVABLE is not set
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
@@ -1029,7 +1032,6 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_LZO=m
@@ -1040,7 +1042,6 @@ CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
-CONFIG_CRYPTO_CRC32_LOONGARCH=m
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_DMA_CMA=y
CONFIG_DMA_NUMA_CMA=y
diff --git a/arch/loongarch/crypto/Kconfig b/arch/loongarch/crypto/Kconfig
index 200a6e8b43b1..a0270b3e5b30 100644
--- a/arch/loongarch/crypto/Kconfig
+++ b/arch/loongarch/crypto/Kconfig
@@ -2,13 +2,4 @@
menu "Accelerated Cryptographic Algorithms for CPU (loongarch)"
-config CRYPTO_CRC32_LOONGARCH
- tristate "CRC32c and CRC32"
- select CRC32
- select CRYPTO_HASH
- help
- CRC32c and CRC32 CRC algorithms
-
- Architecture: LoongArch with CRC32 instructions
-
endmenu
diff --git a/arch/loongarch/crypto/Makefile b/arch/loongarch/crypto/Makefile
index d22613d27ce9..ba83755dde2b 100644
--- a/arch/loongarch/crypto/Makefile
+++ b/arch/loongarch/crypto/Makefile
@@ -2,5 +2,3 @@
#
# Makefile for LoongArch crypto files..
#
-
-obj-$(CONFIG_CRYPTO_CRC32_LOONGARCH) += crc32-loongarch.o
diff --git a/arch/loongarch/crypto/crc32-loongarch.c b/arch/loongarch/crypto/crc32-loongarch.c
deleted file mode 100644
index b7d9782827f5..000000000000
--- a/arch/loongarch/crypto/crc32-loongarch.c
+++ /dev/null
@@ -1,300 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * crc32.c - CRC32 and CRC32C using LoongArch crc* instructions
- *
- * Module based on mips/crypto/crc32-mips.c
- *
- * Copyright (C) 2014 Linaro Ltd <yazen.ghannam@linaro.org>
- * Copyright (C) 2018 MIPS Tech, LLC
- * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
- */
-
-#include <linux/module.h>
-#include <crypto/internal/hash.h>
-
-#include <asm/cpu-features.h>
-#include <linux/unaligned.h>
-
-#define _CRC32(crc, value, size, type) \
-do { \
- __asm__ __volatile__( \
- #type ".w." #size ".w" " %0, %1, %0\n\t"\
- : "+r" (crc) \
- : "r" (value) \
- : "memory"); \
-} while (0)
-
-#define CRC32(crc, value, size) _CRC32(crc, value, size, crc)
-#define CRC32C(crc, value, size) _CRC32(crc, value, size, crcc)
-
-static u32 crc32_loongarch_hw(u32 crc_, const u8 *p, unsigned int len)
-{
- u32 crc = crc_;
-
- while (len >= sizeof(u64)) {
- u64 value = get_unaligned_le64(p);
-
- CRC32(crc, value, d);
- p += sizeof(u64);
- len -= sizeof(u64);
- }
-
- if (len & sizeof(u32)) {
- u32 value = get_unaligned_le32(p);
-
- CRC32(crc, value, w);
- p += sizeof(u32);
- }
-
- if (len & sizeof(u16)) {
- u16 value = get_unaligned_le16(p);
-
- CRC32(crc, value, h);
- p += sizeof(u16);
- }
-
- if (len & sizeof(u8)) {
- u8 value = *p++;
-
- CRC32(crc, value, b);
- }
-
- return crc;
-}
-
-static u32 crc32c_loongarch_hw(u32 crc_, const u8 *p, unsigned int len)
-{
- u32 crc = crc_;
-
- while (len >= sizeof(u64)) {
- u64 value = get_unaligned_le64(p);
-
- CRC32C(crc, value, d);
- p += sizeof(u64);
- len -= sizeof(u64);
- }
-
- if (len & sizeof(u32)) {
- u32 value = get_unaligned_le32(p);
-
- CRC32C(crc, value, w);
- p += sizeof(u32);
- }
-
- if (len & sizeof(u16)) {
- u16 value = get_unaligned_le16(p);
-
- CRC32C(crc, value, h);
- p += sizeof(u16);
- }
-
- if (len & sizeof(u8)) {
- u8 value = *p++;
-
- CRC32C(crc, value, b);
- }
-
- return crc;
-}
-
-#define CHKSUM_BLOCK_SIZE 1
-#define CHKSUM_DIGEST_SIZE 4
-
-struct chksum_ctx {
- u32 key;
-};
-
-struct chksum_desc_ctx {
- u32 crc;
-};
-
-static int chksum_init(struct shash_desc *desc)
-{
- struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = mctx->key;
-
- return 0;
-}
-
-/*
- * Setting the seed allows arbitrary accumulators and flexible XOR policy
- * If your algorithm starts with ~0, then XOR with ~0 before you set the seed.
- */
-static int chksum_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen)
-{
- struct chksum_ctx *mctx = crypto_shash_ctx(tfm);
-
- if (keylen != sizeof(mctx->key))
- return -EINVAL;
-
- mctx->key = get_unaligned_le32(key);
-
- return 0;
-}
-
-static int chksum_update(struct shash_desc *desc, const u8 *data, unsigned int length)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = crc32_loongarch_hw(ctx->crc, data, length);
- return 0;
-}
-
-static int chksumc_update(struct shash_desc *desc, const u8 *data, unsigned int length)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = crc32c_loongarch_hw(ctx->crc, data, length);
- return 0;
-}
-
-static int chksum_final(struct shash_desc *desc, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- put_unaligned_le32(ctx->crc, out);
- return 0;
-}
-
-static int chksumc_final(struct shash_desc *desc, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- put_unaligned_le32(~ctx->crc, out);
- return 0;
-}
-
-static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
-{
- put_unaligned_le32(crc32_loongarch_hw(crc, data, len), out);
- return 0;
-}
-
-static int __chksumc_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
-{
- put_unaligned_le32(~crc32c_loongarch_hw(crc, data, len), out);
- return 0;
-}
-
-static int chksum_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- return __chksum_finup(ctx->crc, data, len, out);
-}
-
-static int chksumc_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- return __chksumc_finup(ctx->crc, data, len, out);
-}
-
-static int chksum_digest(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out)
-{
- struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
-
- return __chksum_finup(mctx->key, data, length, out);
-}
-
-static int chksumc_digest(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out)
-{
- struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
-
- return __chksumc_finup(mctx->key, data, length, out);
-}
-
-static int chksum_cra_init(struct crypto_tfm *tfm)
-{
- struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
-
- mctx->key = 0;
- return 0;
-}
-
-static int chksumc_cra_init(struct crypto_tfm *tfm)
-{
- struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
-
- mctx->key = ~0;
- return 0;
-}
-
-static struct shash_alg crc32_alg = {
- .digestsize = CHKSUM_DIGEST_SIZE,
- .setkey = chksum_setkey,
- .init = chksum_init,
- .update = chksum_update,
- .final = chksum_final,
- .finup = chksum_finup,
- .digest = chksum_digest,
- .descsize = sizeof(struct chksum_desc_ctx),
- .base = {
- .cra_name = "crc32",
- .cra_driver_name = "crc32-loongarch",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct chksum_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = chksum_cra_init,
- }
-};
-
-static struct shash_alg crc32c_alg = {
- .digestsize = CHKSUM_DIGEST_SIZE,
- .setkey = chksum_setkey,
- .init = chksum_init,
- .update = chksumc_update,
- .final = chksumc_final,
- .finup = chksumc_finup,
- .digest = chksumc_digest,
- .descsize = sizeof(struct chksum_desc_ctx),
- .base = {
- .cra_name = "crc32c",
- .cra_driver_name = "crc32c-loongarch",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct chksum_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = chksumc_cra_init,
- }
-};
-
-static int __init crc32_mod_init(void)
-{
- int err;
-
- if (!cpu_has(CPU_FEATURE_CRC32))
- return 0;
-
- err = crypto_register_shash(&crc32_alg);
- if (err)
- return err;
-
- err = crypto_register_shash(&crc32c_alg);
- if (err)
- return err;
-
- return 0;
-}
-
-static void __exit crc32_mod_exit(void)
-{
- if (!cpu_has(CPU_FEATURE_CRC32))
- return;
-
- crypto_unregister_shash(&crc32_alg);
- crypto_unregister_shash(&crc32c_alg);
-}
-
-module_init(crc32_mod_init);
-module_exit(crc32_mod_exit);
-
-MODULE_AUTHOR("Min Zhou <zhoumin@loongson.cn>");
-MODULE_AUTHOR("Huacai Chen <chenhuacai@loongson.cn>");
-MODULE_DESCRIPTION("CRC32 and CRC32C using LoongArch crc* instructions");
-MODULE_LICENSE("GPL v2");
diff --git a/arch/loongarch/include/asm/cpu-info.h b/arch/loongarch/include/asm/cpu-info.h
index 900589cb159d..35e0a230a484 100644
--- a/arch/loongarch/include/asm/cpu-info.h
+++ b/arch/loongarch/include/asm/cpu-info.h
@@ -57,6 +57,7 @@ struct cpuinfo_loongarch {
int global_id; /* physical global thread number */
int vabits; /* Virtual Address size in bits */
int pabits; /* Physical Address size in bits */
+ int timerbits; /* Width of arch timer in bits */
unsigned int ksave_mask; /* Usable KSave mask. */
unsigned int watch_dreg_count; /* Number data breakpoints */
unsigned int watch_ireg_count; /* Number instruction breakpoints */
diff --git a/arch/loongarch/include/asm/hw_breakpoint.h b/arch/loongarch/include/asm/hw_breakpoint.h
index d78330916bd1..13b2462f3d8c 100644
--- a/arch/loongarch/include/asm/hw_breakpoint.h
+++ b/arch/loongarch/include/asm/hw_breakpoint.h
@@ -38,8 +38,8 @@ struct arch_hw_breakpoint {
* Limits.
* Changing these will require modifications to the register accessors.
*/
-#define LOONGARCH_MAX_BRP 8
-#define LOONGARCH_MAX_WRP 8
+#define LOONGARCH_MAX_BRP 14
+#define LOONGARCH_MAX_WRP 14
/* Virtual debug register bases. */
#define CSR_CFG_ADDR 0
diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
index 7b8367c39da8..590982cd986e 100644
--- a/arch/loongarch/include/asm/kvm_host.h
+++ b/arch/loongarch/include/asm/kvm_host.h
@@ -162,6 +162,7 @@ enum emulation_result {
#define LOONGARCH_PV_FEAT_UPDATED BIT_ULL(63)
#define LOONGARCH_PV_FEAT_MASK (BIT(KVM_FEATURE_IPI) | \
BIT(KVM_FEATURE_STEAL_TIME) | \
+ BIT(KVM_FEATURE_USER_HCALL) | \
BIT(KVM_FEATURE_VIRT_EXTIOI))
struct kvm_vcpu_arch {
diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h
index c4e84227280d..3e4b397f423f 100644
--- a/arch/loongarch/include/asm/kvm_para.h
+++ b/arch/loongarch/include/asm/kvm_para.h
@@ -13,6 +13,7 @@
#define KVM_HCALL_CODE_SERVICE 0
#define KVM_HCALL_CODE_SWDBG 1
+#define KVM_HCALL_CODE_USER_SERVICE 2
#define KVM_HCALL_SERVICE HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SERVICE)
#define KVM_HCALL_FUNC_IPI 1
@@ -20,6 +21,8 @@
#define KVM_HCALL_SWDBG HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SWDBG)
+#define KVM_HCALL_USER_SERVICE HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_USER_SERVICE)
+
/*
* LoongArch hypercall return code
*/
diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
index d7e8f7d50ee0..2c349f961bfb 100644
--- a/arch/loongarch/include/asm/kvm_vcpu.h
+++ b/arch/loongarch/include/asm/kvm_vcpu.h
@@ -43,6 +43,7 @@ int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst);
int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst);
int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_complete_user_service(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_emu_idle(struct kvm_vcpu *vcpu);
int kvm_pending_timer(struct kvm_vcpu *vcpu);
int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault);
diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
index 64ad277e096e..52651aa0e583 100644
--- a/arch/loongarch/include/asm/loongarch.h
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -108,6 +108,12 @@
#define CPUCFG3_SPW_HG_HF BIT(11)
#define CPUCFG3_RVA BIT(12)
#define CPUCFG3_RVAMAX GENMASK(16, 13)
+#define CPUCFG3_ALDORDER_CAP BIT(18) /* All address load ordered, capability */
+#define CPUCFG3_ASTORDER_CAP BIT(19) /* All address store ordered, capability */
+#define CPUCFG3_ALDORDER_STA BIT(20) /* All address load ordered, status */
+#define CPUCFG3_ASTORDER_STA BIT(21) /* All address store ordered, status */
+#define CPUCFG3_SLDORDER_CAP BIT(22) /* Same address load ordered, capability */
+#define CPUCFG3_SLDORDER_STA BIT(23) /* Same address load ordered, status */
#define LOONGARCH_CPUCFG4 0x4
#define CPUCFG4_CCFREQ GENMASK(31, 0)
@@ -466,7 +472,6 @@
#define LOONGARCH_CSR_TCFG 0x41 /* Timer config */
#define CSR_TCFG_VAL_SHIFT 2
-#define CSR_TCFG_VAL_WIDTH 48
#define CSR_TCFG_VAL (_ULCAST_(0x3fffffffffff) << CSR_TCFG_VAL_SHIFT)
#define CSR_TCFG_PERIOD_SHIFT 1
#define CSR_TCFG_PERIOD (_ULCAST_(0x1) << CSR_TCFG_PERIOD_SHIFT)
@@ -566,6 +571,15 @@
/* Implement dependent */
#define LOONGARCH_CSR_IMPCTL1 0x80 /* Loongson config1 */
+#define CSR_LDSTORDER_SHIFT 28
+#define CSR_LDSTORDER_WIDTH 3
+#define CSR_LDSTORDER_MASK (_ULCAST_(0x7) << CSR_LDSTORDER_SHIFT)
+#define CSR_LDSTORDER_NLD_NST (_ULCAST_(0x0) << CSR_LDSTORDER_SHIFT) /* 000 = No Load No Store */
+#define CSR_LDSTORDER_ALD_NST (_ULCAST_(0x1) << CSR_LDSTORDER_SHIFT) /* 001 = All Load No Store */
+#define CSR_LDSTORDER_SLD_NST (_ULCAST_(0x3) << CSR_LDSTORDER_SHIFT) /* 011 = Same Load No Store */
+#define CSR_LDSTORDER_NLD_AST (_ULCAST_(0x4) << CSR_LDSTORDER_SHIFT) /* 100 = No Load All Store */
+#define CSR_LDSTORDER_ALD_AST (_ULCAST_(0x5) << CSR_LDSTORDER_SHIFT) /* 101 = All Load All Store */
+#define CSR_LDSTORDER_SLD_AST (_ULCAST_(0x7) << CSR_LDSTORDER_SHIFT) /* 111 = Same Load All Store */
#define CSR_MISPEC_SHIFT 20
#define CSR_MISPEC_WIDTH 8
#define CSR_MISPEC (_ULCAST_(0xff) << CSR_MISPEC_SHIFT)
@@ -959,6 +973,36 @@
#define LOONGARCH_CSR_DB7CTRL 0x34a /* data breakpoint 7 control */
#define LOONGARCH_CSR_DB7ASID 0x34b /* data breakpoint 7 asid */
+#define LOONGARCH_CSR_DB8ADDR 0x350 /* data breakpoint 8 address */
+#define LOONGARCH_CSR_DB8MASK 0x351 /* data breakpoint 8 mask */
+#define LOONGARCH_CSR_DB8CTRL 0x352 /* data breakpoint 8 control */
+#define LOONGARCH_CSR_DB8ASID 0x353 /* data breakpoint 8 asid */
+
+#define LOONGARCH_CSR_DB9ADDR 0x358 /* data breakpoint 9 address */
+#define LOONGARCH_CSR_DB9MASK 0x359 /* data breakpoint 9 mask */
+#define LOONGARCH_CSR_DB9CTRL 0x35a /* data breakpoint 9 control */
+#define LOONGARCH_CSR_DB9ASID 0x35b /* data breakpoint 9 asid */
+
+#define LOONGARCH_CSR_DB10ADDR 0x360 /* data breakpoint 10 address */
+#define LOONGARCH_CSR_DB10MASK 0x361 /* data breakpoint 10 mask */
+#define LOONGARCH_CSR_DB10CTRL 0x362 /* data breakpoint 10 control */
+#define LOONGARCH_CSR_DB10ASID 0x363 /* data breakpoint 10 asid */
+
+#define LOONGARCH_CSR_DB11ADDR 0x368 /* data breakpoint 11 address */
+#define LOONGARCH_CSR_DB11MASK 0x369 /* data breakpoint 11 mask */
+#define LOONGARCH_CSR_DB11CTRL 0x36a /* data breakpoint 11 control */
+#define LOONGARCH_CSR_DB11ASID 0x36b /* data breakpoint 11 asid */
+
+#define LOONGARCH_CSR_DB12ADDR 0x370 /* data breakpoint 12 address */
+#define LOONGARCH_CSR_DB12MASK 0x371 /* data breakpoint 12 mask */
+#define LOONGARCH_CSR_DB12CTRL 0x372 /* data breakpoint 12 control */
+#define LOONGARCH_CSR_DB12ASID 0x373 /* data breakpoint 12 asid */
+
+#define LOONGARCH_CSR_DB13ADDR 0x378 /* data breakpoint 13 address */
+#define LOONGARCH_CSR_DB13MASK 0x379 /* data breakpoint 13 mask */
+#define LOONGARCH_CSR_DB13CTRL 0x37a /* data breakpoint 13 control */
+#define LOONGARCH_CSR_DB13ASID 0x37b /* data breakpoint 13 asid */
+
#define LOONGARCH_CSR_FWPC 0x380 /* instruction breakpoint config */
#define LOONGARCH_CSR_FWPS 0x381 /* instruction breakpoint status */
@@ -1002,6 +1046,36 @@
#define LOONGARCH_CSR_IB7CTRL 0x3ca /* inst breakpoint 7 control */
#define LOONGARCH_CSR_IB7ASID 0x3cb /* inst breakpoint 7 asid */
+#define LOONGARCH_CSR_IB8ADDR 0x3d0 /* inst breakpoint 8 address */
+#define LOONGARCH_CSR_IB8MASK 0x3d1 /* inst breakpoint 8 mask */
+#define LOONGARCH_CSR_IB8CTRL 0x3d2 /* inst breakpoint 8 control */
+#define LOONGARCH_CSR_IB8ASID 0x3d3 /* inst breakpoint 8 asid */
+
+#define LOONGARCH_CSR_IB9ADDR 0x3d8 /* inst breakpoint 9 address */
+#define LOONGARCH_CSR_IB9MASK 0x3d9 /* inst breakpoint 9 mask */
+#define LOONGARCH_CSR_IB9CTRL 0x3da /* inst breakpoint 9 control */
+#define LOONGARCH_CSR_IB9ASID 0x3db /* inst breakpoint 9 asid */
+
+#define LOONGARCH_CSR_IB10ADDR 0x3e0 /* inst breakpoint 10 address */
+#define LOONGARCH_CSR_IB10MASK 0x3e1 /* inst breakpoint 10 mask */
+#define LOONGARCH_CSR_IB10CTRL 0x3e2 /* inst breakpoint 10 control */
+#define LOONGARCH_CSR_IB10ASID 0x3e3 /* inst breakpoint 10 asid */
+
+#define LOONGARCH_CSR_IB11ADDR 0x3e8 /* inst breakpoint 11 address */
+#define LOONGARCH_CSR_IB11MASK 0x3e9 /* inst breakpoint 11 mask */
+#define LOONGARCH_CSR_IB11CTRL 0x3ea /* inst breakpoint 11 control */
+#define LOONGARCH_CSR_IB11ASID 0x3eb /* inst breakpoint 11 asid */
+
+#define LOONGARCH_CSR_IB12ADDR 0x3f0 /* inst breakpoint 12 address */
+#define LOONGARCH_CSR_IB12MASK 0x3f1 /* inst breakpoint 12 mask */
+#define LOONGARCH_CSR_IB12CTRL 0x3f2 /* inst breakpoint 12 control */
+#define LOONGARCH_CSR_IB12ASID 0x3f3 /* inst breakpoint 12 asid */
+
+#define LOONGARCH_CSR_IB13ADDR 0x3f8 /* inst breakpoint 13 address */
+#define LOONGARCH_CSR_IB13MASK 0x3f9 /* inst breakpoint 13 mask */
+#define LOONGARCH_CSR_IB13CTRL 0x3fa /* inst breakpoint 13 control */
+#define LOONGARCH_CSR_IB13ASID 0x3fb /* inst breakpoint 13 asid */
+
#define LOONGARCH_CSR_DEBUG 0x500 /* debug config */
#define LOONGARCH_CSR_DERA 0x501 /* debug era */
#define LOONGARCH_CSR_DESAVE 0x502 /* debug save */
diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h
index a7b9c9e73593..7211dff8c969 100644
--- a/arch/loongarch/include/asm/pgalloc.h
+++ b/arch/loongarch/include/asm/pgalloc.h
@@ -57,7 +57,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
#define __pte_free_tlb(tlb, pte, address) \
do { \
- pagetable_pte_dtor(page_ptdesc(pte)); \
+ pagetable_dtor(page_ptdesc(pte)); \
tlb_remove_page_ptdesc((tlb), page_ptdesc(pte)); \
} while (0)
diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h
index 82cd3a9f094b..45bfc65a0c9f 100644
--- a/arch/loongarch/include/asm/pgtable-bits.h
+++ b/arch/loongarch/include/asm/pgtable-bits.h
@@ -96,6 +96,13 @@
#define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL_SUC)
+#define pgprot_nx pgprot_nx
+
+static inline pgprot_t pgprot_nx(pgprot_t _prot)
+{
+ return __pgprot(pgprot_val(_prot) | _PAGE_NO_EXEC);
+}
+
#define pgprot_noncached pgprot_noncached
static inline pgprot_t pgprot_noncached(pgprot_t _prot)
diff --git a/arch/loongarch/include/uapi/asm/kvm_para.h b/arch/loongarch/include/uapi/asm/kvm_para.h
index b0604aa9b4bb..76d802ef01ce 100644
--- a/arch/loongarch/include/uapi/asm/kvm_para.h
+++ b/arch/loongarch/include/uapi/asm/kvm_para.h
@@ -17,5 +17,6 @@
#define KVM_FEATURE_STEAL_TIME 2
/* BIT 24 - 31 are features configurable by user space vmm */
#define KVM_FEATURE_VIRT_EXTIOI 24
+#define KVM_FEATURE_USER_HCALL 25
#endif /* _UAPI_ASM_KVM_PARA_H */
diff --git a/arch/loongarch/include/uapi/asm/ptrace.h b/arch/loongarch/include/uapi/asm/ptrace.h
index ac915f841650..aafb3cd9e943 100644
--- a/arch/loongarch/include/uapi/asm/ptrace.h
+++ b/arch/loongarch/include/uapi/asm/ptrace.h
@@ -72,6 +72,16 @@ struct user_watch_state {
} dbg_regs[8];
};
+struct user_watch_state_v2 {
+ uint64_t dbg_info;
+ struct {
+ uint64_t addr;
+ uint64_t mask;
+ uint32_t ctrl;
+ uint32_t pad;
+ } dbg_regs[14];
+};
+
#define PTRACE_SYSEMU 0x1f
#define PTRACE_SYSEMU_SINGLESTEP 0x20
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
index 9497968ee158..4853e8b04c6f 100644
--- a/arch/loongarch/kernel/Makefile
+++ b/arch/loongarch/kernel/Makefile
@@ -10,7 +10,7 @@ extra-y := vmlinux.lds
obj-y += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \
traps.o irq.o idle.o process.o dma.o mem.o reset.o switch.o \
elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o \
- alternative.o unwind.o
+ alternative.o kdebugfs.o unwind.o
obj-$(CONFIG_ACPI) += acpi.o
obj-$(CONFIG_EFI) += efi.o
diff --git a/arch/loongarch/kernel/cacheinfo.c b/arch/loongarch/kernel/cacheinfo.c
index c7988f757281..8e231b0d2cd6 100644
--- a/arch/loongarch/kernel/cacheinfo.c
+++ b/arch/loongarch/kernel/cacheinfo.c
@@ -51,6 +51,12 @@ static void cache_cpumap_setup(unsigned int cpu)
continue;
sib_leaf = sib_cpu_ci->info_list + index;
+ /* SMT cores share all caches */
+ if (cpus_are_siblings(i, cpu)) {
+ cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
+ cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
+ }
+ /* Node's cores share shared caches */
if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c
index cbce099037b2..fedaa67cde41 100644
--- a/arch/loongarch/kernel/cpu-probe.c
+++ b/arch/loongarch/kernel/cpu-probe.c
@@ -190,6 +190,7 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
set_cpu_asid_mask(c, asid_mask);
config = read_csr_prcfg1();
+ c->timerbits = (config & CSR_CONF1_TMRBITS) >> CSR_CONF1_TMRBITS_SHIFT;
c->ksave_mask = GENMASK((config & CSR_CONF1_KSNUM) - 1, 0);
c->ksave_mask &= ~(EXC_KSAVE_MASK | PERCPU_KSAVE_MASK | KVM_KSAVE_MASK);
diff --git a/arch/loongarch/kernel/hw_breakpoint.c b/arch/loongarch/kernel/hw_breakpoint.c
index a6e4b605bfa8..c35f9bf38033 100644
--- a/arch/loongarch/kernel/hw_breakpoint.c
+++ b/arch/loongarch/kernel/hw_breakpoint.c
@@ -51,7 +51,13 @@ int hw_breakpoint_slots(int type)
READ_WB_REG_CASE(OFF, 4, REG, T, VAL); \
READ_WB_REG_CASE(OFF, 5, REG, T, VAL); \
READ_WB_REG_CASE(OFF, 6, REG, T, VAL); \
- READ_WB_REG_CASE(OFF, 7, REG, T, VAL);
+ READ_WB_REG_CASE(OFF, 7, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 8, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 9, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 10, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 11, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 12, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 13, REG, T, VAL);
#define GEN_WRITE_WB_REG_CASES(OFF, REG, T, VAL) \
WRITE_WB_REG_CASE(OFF, 0, REG, T, VAL); \
@@ -61,7 +67,13 @@ int hw_breakpoint_slots(int type)
WRITE_WB_REG_CASE(OFF, 4, REG, T, VAL); \
WRITE_WB_REG_CASE(OFF, 5, REG, T, VAL); \
WRITE_WB_REG_CASE(OFF, 6, REG, T, VAL); \
- WRITE_WB_REG_CASE(OFF, 7, REG, T, VAL);
+ WRITE_WB_REG_CASE(OFF, 7, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 8, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 9, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 10, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 11, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 12, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 13, REG, T, VAL);
static u64 read_wb_reg(int reg, int n, int t)
{
diff --git a/arch/loongarch/kernel/kdebugfs.c b/arch/loongarch/kernel/kdebugfs.c
new file mode 100644
index 000000000000..80cf64772399
--- /dev/null
+++ b/arch/loongarch/kernel/kdebugfs.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/debugfs.h>
+#include <linux/kstrtox.h>
+#include <asm/loongarch.h>
+
+struct dentry *arch_debugfs_dir;
+EXPORT_SYMBOL(arch_debugfs_dir);
+
+static int sfb_state, tso_state;
+
+static void set_sfb_state(void *info)
+{
+ int val = *(int *)info << CSR_STFILL_SHIFT;
+
+ csr_xchg32(val, CSR_STFILL, LOONGARCH_CSR_IMPCTL1);
+}
+
+static ssize_t sfb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ int s, state;
+ char str[32];
+
+ state = (csr_read32(LOONGARCH_CSR_IMPCTL1) & CSR_STFILL) >> CSR_STFILL_SHIFT;
+
+ s = snprintf(str, sizeof(str), "Boot State: %x\nCurrent State: %x\n", sfb_state, state);
+
+ if (*ppos >= s)
+ return 0;
+
+ s -= *ppos;
+ s = min_t(u32, s, count);
+
+ if (copy_to_user(buf, &str[*ppos], s))
+ return -EFAULT;
+
+ *ppos += s;
+
+ return s;
+}
+
+static ssize_t sfb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+{
+ int state;
+
+ if (kstrtoint_from_user(buf, count, 10, &state))
+ return -EFAULT;
+
+ switch (state) {
+ case 0: case 1:
+ on_each_cpu(set_sfb_state, &state, 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static const struct file_operations sfb_fops = {
+ .read = sfb_read,
+ .write = sfb_write,
+ .open = simple_open,
+ .llseek = default_llseek
+};
+
+#define LDSTORDER_NLD_NST 0x0 /* 000 = No Load No Store */
+#define LDSTORDER_ALD_NST 0x1 /* 001 = All Load No Store */
+#define LDSTORDER_SLD_NST 0x3 /* 011 = Same Load No Store */
+#define LDSTORDER_NLD_AST 0x4 /* 100 = No Load All Store */
+#define LDSTORDER_ALD_AST 0x5 /* 101 = All Load All Store */
+#define LDSTORDER_SLD_AST 0x7 /* 111 = Same Load All Store */
+
+static char *tso_hints[] = {
+ "No Load No Store",
+ "All Load No Store",
+ "Invalid Config",
+ "Same Load No Store",
+ "No Load All Store",
+ "All Load All Store",
+ "Invalid Config",
+ "Same Load All Store"
+};
+
+static void set_tso_state(void *info)
+{
+ int val = *(int *)info << CSR_LDSTORDER_SHIFT;
+
+ csr_xchg32(val, CSR_LDSTORDER_MASK, LOONGARCH_CSR_IMPCTL1);
+}
+
+static ssize_t tso_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ int s, state;
+ char str[240];
+
+ state = (csr_read32(LOONGARCH_CSR_IMPCTL1) & CSR_LDSTORDER_MASK) >> CSR_LDSTORDER_SHIFT;
+
+ s = snprintf(str, sizeof(str), "Boot State: %d (%s)\n"
+ "Current State: %d (%s)\n\n"
+ "Available States:\n"
+ "0 (%s)\t" "1 (%s)\t" "3 (%s)\n"
+ "4 (%s)\t" "5 (%s)\t" "7 (%s)\n",
+ tso_state, tso_hints[tso_state], state, tso_hints[state],
+ tso_hints[0], tso_hints[1], tso_hints[3], tso_hints[4], tso_hints[5], tso_hints[7]);
+
+ if (*ppos >= s)
+ return 0;
+
+ s -= *ppos;
+ s = min_t(u32, s, count);
+
+ if (copy_to_user(buf, &str[*ppos], s))
+ return -EFAULT;
+
+ *ppos += s;
+
+ return s;
+}
+
+static ssize_t tso_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+{
+ int state;
+
+ if (kstrtoint_from_user(buf, count, 10, &state))
+ return -EFAULT;
+
+ switch (state) {
+ case 0: case 1: case 3:
+ case 4: case 5: case 7:
+ on_each_cpu(set_tso_state, &state, 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static const struct file_operations tso_fops = {
+ .read = tso_read,
+ .write = tso_write,
+ .open = simple_open,
+ .llseek = default_llseek
+};
+
+static int __init arch_kdebugfs_init(void)
+{
+ unsigned int config = read_cpucfg(LOONGARCH_CPUCFG3);
+
+ arch_debugfs_dir = debugfs_create_dir("loongarch", NULL);
+
+ if (config & CPUCFG3_SFB) {
+ debugfs_create_file("sfb_state", S_IRUGO | S_IWUSR,
+ arch_debugfs_dir, &sfb_state, &sfb_fops);
+ sfb_state = (csr_read32(LOONGARCH_CSR_IMPCTL1) & CSR_STFILL) >> CSR_STFILL_SHIFT;
+ }
+
+ if (config & (CPUCFG3_ALDORDER_CAP | CPUCFG3_ASTORDER_CAP)) {
+ debugfs_create_file("tso_state", S_IRUGO | S_IWUSR,
+ arch_debugfs_dir, &tso_state, &tso_fops);
+ tso_state = (csr_read32(LOONGARCH_CSR_IMPCTL1) & CSR_LDSTORDER_MASK) >> CSR_LDSTORDER_SHIFT;
+ }
+
+ return 0;
+}
+postcore_initcall(arch_kdebugfs_init);
diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c
index 19dc6eff45cc..5e2402cfcab0 100644
--- a/arch/loongarch/kernel/ptrace.c
+++ b/arch/loongarch/kernel/ptrace.c
@@ -720,7 +720,7 @@ static int hw_break_set(struct task_struct *target,
unsigned int note_type = regset->core_note_type;
/* Resource info */
- offset = offsetof(struct user_watch_state, dbg_regs);
+ offset = offsetof(struct user_watch_state_v2, dbg_regs);
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
/* (address, mask, ctrl) registers */
@@ -920,7 +920,7 @@ static const struct user_regset loongarch64_regsets[] = {
#ifdef CONFIG_HAVE_HW_BREAKPOINT
[REGSET_HW_BREAK] = {
.core_note_type = NT_LOONGARCH_HW_BREAK,
- .n = sizeof(struct user_watch_state) / sizeof(u32),
+ .n = sizeof(struct user_watch_state_v2) / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
.regset_get = hw_break_get,
@@ -928,7 +928,7 @@ static const struct user_regset loongarch64_regsets[] = {
},
[REGSET_HW_WATCH] = {
.core_note_type = NT_LOONGARCH_HW_WATCH,
- .n = sizeof(struct user_watch_state) / sizeof(u32),
+ .n = sizeof(struct user_watch_state_v2) / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
.regset_get = hw_break_get,
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index 56934fe58170..edcfdfcad7d2 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -431,7 +431,7 @@ static void __init resource_init(void)
num_standard_resources = memblock.memory.cnt;
res_size = num_standard_resources * sizeof(*standard_resources);
- standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
+ standard_resources = memblock_alloc_or_panic(res_size, SMP_CACHE_BYTES);
for_each_mem_region(region) {
res = &standard_resources[i++];
diff --git a/arch/loongarch/kernel/switch.S b/arch/loongarch/kernel/switch.S
index 31dd8199b245..9c23cb7e432f 100644
--- a/arch/loongarch/kernel/switch.S
+++ b/arch/loongarch/kernel/switch.S
@@ -12,7 +12,7 @@
/*
* task_struct *__switch_to(task_struct *prev, task_struct *next,
- * struct thread_info *next_ti)
+ * struct thread_info *next_ti, void *sched_ra, void *sched_cfa)
*/
.align 5
SYM_FUNC_START(__switch_to)
diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
index a07d7eff4dc5..e2d3bfeb6366 100644
--- a/arch/loongarch/kernel/time.c
+++ b/arch/loongarch/kernel/time.c
@@ -132,7 +132,7 @@ int constant_clockevent_init(void)
#else
unsigned long min_delta = 1000;
#endif
- unsigned long max_delta = (1UL << 48) - 1;
+ unsigned long max_delta = GENMASK_ULL(boot_cpu_data.timerbits, 0);
struct clock_event_device *cd;
static int irq = 0, timer_irq_installed = 0;
diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
index c57b4134f3e8..2ec3106c0da3 100644
--- a/arch/loongarch/kernel/traps.c
+++ b/arch/loongarch/kernel/traps.c
@@ -597,17 +597,24 @@ int is_valid_bugaddr(unsigned long addr)
static void bug_handler(struct pt_regs *regs)
{
+ if (user_mode(regs)) {
+ force_sig(SIGTRAP);
+ return;
+ }
+
switch (report_bug(regs->csr_era, regs)) {
case BUG_TRAP_TYPE_BUG:
- case BUG_TRAP_TYPE_NONE:
- die_if_kernel("Oops - BUG", regs);
- force_sig(SIGTRAP);
+ die("Oops - BUG", regs);
break;
case BUG_TRAP_TYPE_WARN:
/* Skip the BUG instruction and continue */
regs->csr_era += LOONGARCH_INSN_SIZE;
break;
+
+ default:
+ if (!fixup_exception(regs))
+ die("Oops - BUG", regs);
}
}
diff --git a/arch/loongarch/kernel/unaligned.c b/arch/loongarch/kernel/unaligned.c
index 3abf163dda05..487be604b96a 100644
--- a/arch/loongarch/kernel/unaligned.c
+++ b/arch/loongarch/kernel/unaligned.c
@@ -482,14 +482,10 @@ sigbus:
#ifdef CONFIG_DEBUG_FS
static int __init debugfs_unaligned(void)
{
- struct dentry *d;
-
- d = debugfs_create_dir("loongarch", NULL);
-
debugfs_create_u32("unaligned_instructions_user",
- S_IRUGO, d, &unaligned_instructions_user);
+ S_IRUGO, arch_debugfs_dir, &unaligned_instructions_user);
debugfs_create_u32("unaligned_instructions_kernel",
- S_IRUGO, d, &unaligned_instructions_kernel);
+ S_IRUGO, arch_debugfs_dir, &unaligned_instructions_kernel);
return 0;
}
diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
index a7893bd01e73..c1e8ec5b941b 100644
--- a/arch/loongarch/kvm/exit.c
+++ b/arch/loongarch/kvm/exit.c
@@ -709,6 +709,14 @@ static int kvm_handle_write_fault(struct kvm_vcpu *vcpu)
return kvm_handle_rdwr_fault(vcpu, true);
}
+int kvm_complete_user_service(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ update_pc(&vcpu->arch);
+ kvm_write_reg(vcpu, LOONGARCH_GPR_A0, run->hypercall.ret);
+
+ return 0;
+}
+
/**
* kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host
* @vcpu: Virtual CPU context.
@@ -873,6 +881,28 @@ static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
vcpu->stat.hypercall_exits++;
kvm_handle_service(vcpu);
break;
+ case KVM_HCALL_USER_SERVICE:
+ if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_USER_HCALL)) {
+ kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE);
+ break;
+ }
+
+ vcpu->stat.hypercall_exits++;
+ vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
+ vcpu->run->hypercall.nr = KVM_HCALL_USER_SERVICE;
+ vcpu->run->hypercall.args[0] = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
+ vcpu->run->hypercall.args[1] = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
+ vcpu->run->hypercall.args[2] = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
+ vcpu->run->hypercall.args[3] = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
+ vcpu->run->hypercall.args[4] = kvm_read_reg(vcpu, LOONGARCH_GPR_A4);
+ vcpu->run->hypercall.args[5] = kvm_read_reg(vcpu, LOONGARCH_GPR_A5);
+ vcpu->run->hypercall.flags = 0;
+ /*
+ * Set invalid return value by default, let user-mode VMM modify it.
+ */
+ vcpu->run->hypercall.ret = KVM_HCALL_INVALID_CODE;
+ ret = RESUME_HOST;
+ break;
case KVM_HCALL_SWDBG:
/* KVM_HCALL_SWDBG only in effective when SW_BP is enabled */
if (vcpu->guest_debug & KVM_GUESTDBG_SW_BP_MASK) {
diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c
index 396fed2665a5..bf9268bf26d5 100644
--- a/arch/loongarch/kvm/main.c
+++ b/arch/loongarch/kvm/main.c
@@ -245,6 +245,24 @@ void kvm_check_vpid(struct kvm_vcpu *vcpu)
trace_kvm_vpid_change(vcpu, vcpu->arch.vpid);
vcpu->cpu = cpu;
kvm_clear_request(KVM_REQ_TLB_FLUSH_GPA, vcpu);
+
+ /*
+ * LLBCTL is a separated guest CSR register from host, a general
+ * exception ERET instruction clears the host LLBCTL register in
+ * host mode, and clears the guest LLBCTL register in guest mode.
+ * ERET in tlb refill exception does not clear LLBCTL register.
+ *
+ * When secondary mmu mapping is changed, guest OS does not know
+ * even if the content is changed after mapping is changed.
+ *
+ * Here clear WCLLB of the guest LLBCTL register when mapping is
+ * changed. Otherwise, if mmu mapping is changed while guest is
+ * executing LL/SC pair, LL loads with the old address and set
+ * the LLBCTL flag, SC checks the LLBCTL flag and will store the
+ * new address successfully since LLBCTL_WCLLB is on, even if
+ * memory with new address is changed on other VCPUs.
+ */
+ set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
}
/* Restore GSTAT(0x50).vpid */
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index d18a4a270415..fb72095c8077 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -1732,9 +1732,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
vcpu->mmio_needed = 0;
}
- if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) {
+ switch (run->exit_reason) {
+ case KVM_EXIT_HYPERCALL:
+ kvm_complete_user_service(vcpu, run);
+ break;
+ case KVM_EXIT_LOONGARCH_IOCSR:
if (!run->iocsr_io.is_write)
kvm_complete_iocsr_read(vcpu, run);
+ break;
}
if (!vcpu->wants_to_run)
diff --git a/arch/loongarch/lib/Makefile b/arch/loongarch/lib/Makefile
index ccea3bbd4353..fae77809048b 100644
--- a/arch/loongarch/lib/Makefile
+++ b/arch/loongarch/lib/Makefile
@@ -11,3 +11,5 @@ obj-$(CONFIG_ARCH_SUPPORTS_INT128) += tishift.o
obj-$(CONFIG_CPU_HAS_LSX) += xor_simd.o xor_simd_glue.o
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
+
+obj-$(CONFIG_CRC32_ARCH) += crc32-loongarch.o
diff --git a/arch/loongarch/lib/crc32-loongarch.c b/arch/loongarch/lib/crc32-loongarch.c
new file mode 100644
index 000000000000..8af8113ecd9d
--- /dev/null
+++ b/arch/loongarch/lib/crc32-loongarch.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CRC32 and CRC32C using LoongArch crc* instructions
+ *
+ * Module based on mips/crypto/crc32-mips.c
+ *
+ * Copyright (C) 2014 Linaro Ltd <yazen.ghannam@linaro.org>
+ * Copyright (C) 2018 MIPS Tech, LLC
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#include <asm/cpu-features.h>
+#include <linux/crc32.h>
+#include <linux/module.h>
+#include <linux/unaligned.h>
+
+#define _CRC32(crc, value, size, type) \
+do { \
+ __asm__ __volatile__( \
+ #type ".w." #size ".w" " %0, %1, %0\n\t"\
+ : "+r" (crc) \
+ : "r" (value) \
+ : "memory"); \
+} while (0)
+
+#define CRC32(crc, value, size) _CRC32(crc, value, size, crc)
+#define CRC32C(crc, value, size) _CRC32(crc, value, size, crcc)
+
+static DEFINE_STATIC_KEY_FALSE(have_crc32);
+
+u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
+{
+ if (!static_branch_likely(&have_crc32))
+ return crc32_le_base(crc, p, len);
+
+ while (len >= sizeof(u64)) {
+ u64 value = get_unaligned_le64(p);
+
+ CRC32(crc, value, d);
+ p += sizeof(u64);
+ len -= sizeof(u64);
+ }
+
+ if (len & sizeof(u32)) {
+ u32 value = get_unaligned_le32(p);
+
+ CRC32(crc, value, w);
+ p += sizeof(u32);
+ }
+
+ if (len & sizeof(u16)) {
+ u16 value = get_unaligned_le16(p);
+
+ CRC32(crc, value, h);
+ p += sizeof(u16);
+ }
+
+ if (len & sizeof(u8)) {
+ u8 value = *p++;
+
+ CRC32(crc, value, b);
+ }
+
+ return crc;
+}
+EXPORT_SYMBOL(crc32_le_arch);
+
+u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len)
+{
+ if (!static_branch_likely(&have_crc32))
+ return crc32c_le_base(crc, p, len);
+
+ while (len >= sizeof(u64)) {
+ u64 value = get_unaligned_le64(p);
+
+ CRC32C(crc, value, d);
+ p += sizeof(u64);
+ len -= sizeof(u64);
+ }
+
+ if (len & sizeof(u32)) {
+ u32 value = get_unaligned_le32(p);
+
+ CRC32C(crc, value, w);
+ p += sizeof(u32);
+ }
+
+ if (len & sizeof(u16)) {
+ u16 value = get_unaligned_le16(p);
+
+ CRC32C(crc, value, h);
+ p += sizeof(u16);
+ }
+
+ if (len & sizeof(u8)) {
+ u8 value = *p++;
+
+ CRC32C(crc, value, b);
+ }
+
+ return crc;
+}
+EXPORT_SYMBOL(crc32c_le_arch);
+
+u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
+{
+ return crc32_be_base(crc, p, len);
+}
+EXPORT_SYMBOL(crc32_be_arch);
+
+static int __init crc32_loongarch_init(void)
+{
+ if (cpu_has_crc32)
+ static_branch_enable(&have_crc32);
+ return 0;
+}
+arch_initcall(crc32_loongarch_init);
+
+static void __exit crc32_loongarch_exit(void)
+{
+}
+module_exit(crc32_loongarch_exit);
+
+u32 crc32_optimizations(void)
+{
+ if (static_key_enabled(&have_crc32))
+ return CRC32_LE_OPTIMIZATION | CRC32C_OPTIMIZATION;
+ return 0;
+}
+EXPORT_SYMBOL(crc32_optimizations);
+
+MODULE_AUTHOR("Min Zhou <zhoumin@loongson.cn>");
+MODULE_AUTHOR("Huacai Chen <chenhuacai@loongson.cn>");
+MODULE_DESCRIPTION("CRC32 and CRC32C using LoongArch crc* instructions");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
index 188b52bbb254..ca5aa5f46a9f 100644
--- a/arch/loongarch/mm/init.c
+++ b/arch/loongarch/mm/init.c
@@ -174,9 +174,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr)
pmd_t *pmd;
if (p4d_none(p4dp_get(p4d))) {
- pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!pud)
- panic("%s: Failed to allocate memory\n", __func__);
+ pud = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
p4d_populate(&init_mm, p4d, pud);
#ifndef __PAGETABLE_PUD_FOLDED
pud_init(pud);
@@ -185,9 +183,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr)
pud = pud_offset(p4d, addr);
if (pud_none(pudp_get(pud))) {
- pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!pmd)
- panic("%s: Failed to allocate memory\n", __func__);
+ pmd = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
pud_populate(&init_mm, pud, pmd);
#ifndef __PAGETABLE_PMD_FOLDED
pmd_init(pmd);
@@ -198,10 +194,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr)
if (!pmd_present(pmdp_get(pmd))) {
pte_t *pte;
- pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!pte)
- panic("%s: Failed to allocate memory\n", __func__);
-
+ pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
pmd_populate_kernel(&init_mm, pmd, pte);
kernel_pte_init(pte);
}
diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c
index 3fa69b23ff84..22a94bb3e6e8 100644
--- a/arch/loongarch/mm/pgtable.c
+++ b/arch/loongarch/mm/pgtable.c
@@ -23,11 +23,10 @@ EXPORT_SYMBOL(tlb_virt_to_page);
pgd_t *pgd_alloc(struct mm_struct *mm)
{
- pgd_t *init, *ret = NULL;
- struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0);
+ pgd_t *init, *ret;
- if (ptdesc) {
- ret = (pgd_t *)ptdesc_address(ptdesc);
+ ret = __pgd_alloc(mm, 0);
+ if (ret) {
init = pgd_offset(&init_mm, 0UL);
pgd_init(ret);
memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
diff --git a/arch/loongarch/power/platform.c b/arch/loongarch/power/platform.c
index 0909729dc2e1..5bbdb9fd76e5 100644
--- a/arch/loongarch/power/platform.c
+++ b/arch/loongarch/power/platform.c
@@ -17,7 +17,7 @@ void enable_gpe_wakeup(void)
if (acpi_gbl_reduced_hardware)
return;
- acpi_enable_all_wakeup_gpes();
+ acpi_hw_enable_all_wakeup_gpes();
}
void enable_pci_wakeup(void)
diff --git a/arch/loongarch/power/suspend_asm.S b/arch/loongarch/power/suspend_asm.S
index 9fe28d5a0270..df0865df26fa 100644
--- a/arch/loongarch/power/suspend_asm.S
+++ b/arch/loongarch/power/suspend_asm.S
@@ -30,9 +30,6 @@
st.d $r29, sp, PT_R29
st.d $r30, sp, PT_R30
st.d $r31, sp, PT_R31
-
- la.pcrel t0, acpi_saved_sp
- st.d sp, t0, 0
.endm
.macro SETUP_WAKEUP
@@ -51,6 +48,7 @@
ld.d $r29, sp, PT_R29
ld.d $r30, sp, PT_R30
ld.d $r31, sp, PT_R31
+ addi.d sp, sp, PT_SIZE
.endm
.text
@@ -59,6 +57,10 @@
/* Sleep/wakeup code for Loongson-3 */
SYM_FUNC_START(loongarch_suspend_enter)
SETUP_SLEEP
+
+ la.pcrel t0, acpi_saved_sp
+ st.d sp, t0, 0
+
bl __flush_cache_all
/* Pass RA and SP to BIOS */
@@ -82,7 +84,7 @@ SYM_INNER_LABEL(loongarch_wakeup_start, SYM_L_GLOBAL)
la.pcrel t0, acpi_saved_sp
ld.d sp, t0, 0
+
SETUP_WAKEUP
- addi.d sp, sp, PT_SIZE
jr ra
SYM_FUNC_END(loongarch_suspend_enter)
diff --git a/arch/m68k/coldfire/m5441x.c b/arch/m68k/coldfire/m5441x.c
index 405e9d5c832c..7a25cfc7ac07 100644
--- a/arch/m68k/coldfire/m5441x.c
+++ b/arch/m68k/coldfire/m5441x.c
@@ -33,14 +33,14 @@ DEFINE_CLK(0, "mcfuart.0", 24, MCF_BUSCLK);
DEFINE_CLK(0, "mcfuart.1", 25, MCF_BUSCLK);
DEFINE_CLK(0, "mcfuart.2", 26, MCF_BUSCLK);
DEFINE_CLK(0, "mcfuart.3", 27, MCF_BUSCLK);
-DEFINE_CLK(0, "mcftmr.0", 28, MCF_CLK);
-DEFINE_CLK(0, "mcftmr.1", 29, MCF_CLK);
-DEFINE_CLK(0, "mcftmr.2", 30, MCF_CLK);
-DEFINE_CLK(0, "mcftmr.3", 31, MCF_CLK);
-DEFINE_CLK(0, "mcfpit.0", 32, MCF_CLK);
-DEFINE_CLK(0, "mcfpit.1", 33, MCF_CLK);
-DEFINE_CLK(0, "mcfpit.2", 34, MCF_CLK);
-DEFINE_CLK(0, "mcfpit.3", 35, MCF_CLK);
+DEFINE_CLK(0, "mcftmr.0", 28, MCF_BUSCLK);
+DEFINE_CLK(0, "mcftmr.1", 29, MCF_BUSCLK);
+DEFINE_CLK(0, "mcftmr.2", 30, MCF_BUSCLK);
+DEFINE_CLK(0, "mcftmr.3", 31, MCF_BUSCLK);
+DEFINE_CLK(0, "mcfpit.0", 32, MCF_BUSCLK);
+DEFINE_CLK(0, "mcfpit.1", 33, MCF_BUSCLK);
+DEFINE_CLK(0, "mcfpit.2", 34, MCF_BUSCLK);
+DEFINE_CLK(0, "mcfpit.3", 35, MCF_BUSCLK);
DEFINE_CLK(0, "mcfeport.0", 37, MCF_CLK);
DEFINE_CLK(0, "mcfadc.0", 38, MCF_CLK);
DEFINE_CLK(0, "mcfdac.0", 39, MCF_CLK);
@@ -167,8 +167,8 @@ static struct clk * const disable_clks[] __initconst = {
&__clk_0_14, /* i2c.1 */
&__clk_0_22, /* i2c.0 */
&__clk_0_23, /* dspi.0 */
- &__clk_0_28, /* tmr.1 */
- &__clk_0_29, /* tmr.2 */
+ &__clk_0_28, /* tmr.0 */
+ &__clk_0_29, /* tmr.1 */
&__clk_0_30, /* tmr.2 */
&__clk_0_31, /* tmr.3 */
&__clk_0_32, /* pit.0 */
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index c705247e7b5b..dbf2ea561c85 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -579,7 +579,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -589,7 +588,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
@@ -606,7 +604,6 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_PRIME_NUMBERS=m
-CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 6d62b9187a58..b0fd199cc0a4 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -536,7 +536,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -546,7 +545,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
@@ -563,7 +561,6 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_PRIME_NUMBERS=m
-CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index c3c644df852d..bb5b2d3b6c10 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -556,7 +556,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -566,7 +565,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
@@ -583,7 +581,6 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_PRIME_NUMBERS=m
-CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 20261f819691..8315a13bab73 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -528,7 +528,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -538,7 +537,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
@@ -555,7 +553,6 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_PRIME_NUMBERS=m
-CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index ce4fe93a0f70..350370657e5f 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -538,7 +538,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -548,7 +547,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
@@ -565,7 +563,6 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_PRIME_NUMBERS=m
-CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 040ae75f47c3..f942b4755702 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -555,7 +555,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -565,7 +564,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
@@ -582,7 +580,6 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_PRIME_NUMBERS=m
-CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 20d877cb4e30..b1eaad02efab 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -642,7 +642,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -652,7 +651,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
@@ -669,7 +667,6 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_PRIME_NUMBERS=m
-CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 5e1c8d0d3da5..6309a4442bb3 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -528,7 +528,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -538,7 +537,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
@@ -555,7 +553,6 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_PRIME_NUMBERS=m
-CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 5d1409e6a137..3feb0731f814 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -529,7 +529,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -539,7 +538,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
@@ -556,7 +554,6 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_PRIME_NUMBERS=m
-CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index e4c30e2b9bbb..ea04b1b0da7d 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -545,7 +545,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -555,7 +554,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
@@ -572,7 +570,6 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_PRIME_NUMBERS=m
-CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 980843a9ea1e..f52d9af92153 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -526,7 +526,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -536,7 +535,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
@@ -553,7 +551,6 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_PRIME_NUMBERS=m
-CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 38681cc6b598..f348447824da 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -526,7 +526,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -536,7 +535,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
@@ -553,7 +551,6 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_PRIME_NUMBERS=m
-CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
CONFIG_GLOB_SELFTEST=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
index 302c5bf67179..4c648b51e7fd 100644
--- a/arch/m68k/include/asm/mcf_pgalloc.h
+++ b/arch/m68k/include/asm/mcf_pgalloc.h
@@ -37,7 +37,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable,
{
struct ptdesc *ptdesc = virt_to_ptdesc(pgtable);
- pagetable_pte_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
pagetable_free(ptdesc);
}
@@ -61,7 +61,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pgtable)
{
struct ptdesc *ptdesc = virt_to_ptdesc(pgtable);
- pagetable_pte_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
pagetable_free(ptdesc);
}
@@ -73,7 +73,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pgtable)
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- pagetable_free(virt_to_ptdesc(pgd));
+ pagetable_dtor_free(virt_to_ptdesc(pgd));
}
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
@@ -84,6 +84,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
if (!ptdesc)
return NULL;
+ pagetable_pgd_ctor(ptdesc);
new_pgd = ptdesc_address(ptdesc);
memcpy(new_pgd, swapper_pg_dir, PTRS_PER_PGD * sizeof(pgd_t));
diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h
index 74a817d9387f..5abe7da8ac5a 100644
--- a/arch/m68k/include/asm/motorola_pgalloc.h
+++ b/arch/m68k/include/asm/motorola_pgalloc.h
@@ -9,9 +9,9 @@ extern void mmu_page_ctor(void *page);
extern void mmu_page_dtor(void *page);
enum m68k_table_types {
- TABLE_PGD = 0,
- TABLE_PMD = 0, /* same size as PGD */
- TABLE_PTE = 1,
+ TABLE_PGD,
+ TABLE_PMD,
+ TABLE_PTE,
};
extern void init_pointer_table(void *table, int type);
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h
index 4a137eecb6fe..f1ae4ed890db 100644
--- a/arch/m68k/include/asm/sun3_pgalloc.h
+++ b/arch/m68k/include/asm/sun3_pgalloc.h
@@ -19,7 +19,7 @@ extern const char bad_pmd_string[];
#define __pte_free_tlb(tlb, pte, addr) \
do { \
- pagetable_pte_dtor(page_ptdesc(pte)); \
+ pagetable_dtor(page_ptdesc(pte)); \
tlb_remove_page_ptdesc((tlb), page_ptdesc(pte)); \
} while (0)
@@ -43,7 +43,7 @@ static inline pgd_t * pgd_alloc(struct mm_struct *mm)
{
pgd_t *new_pgd;
- new_pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL);
+ new_pgd = __pgd_alloc(mm, 0);
memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
memset(new_pgd, 0, (PAGE_OFFSET >> PGDIR_SHIFT));
return new_pgd;
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 1b47bec15832..8b11d0d545aa 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -68,10 +68,7 @@ void __init paging_init(void)
high_memory = (void *) end_mem;
- empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!empty_zero_page)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE);
+ empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
max_zone_pfn[ZONE_DMA] = end_mem >> PAGE_SHIFT;
free_area_init(max_zone_pfn);
}
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 9a6fa342e872..19a75029036c 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -42,20 +42,14 @@ void __init paging_init(void)
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
int i;
- empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!empty_zero_page)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE);
+ empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
pg_dir = swapper_pg_dir;
memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
size = num_pages * sizeof(pte_t);
size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
- next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE);
- if (!next_pgtable)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, size, PAGE_SIZE);
+ next_pgtable = (unsigned long) memblock_alloc_or_panic(size, PAGE_SIZE);
pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index c1761d309fc6..73651e093c4d 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -97,17 +97,19 @@ void mmu_page_dtor(void *page)
typedef struct list_head ptable_desc;
-static struct list_head ptable_list[2] = {
+static struct list_head ptable_list[3] = {
LIST_HEAD_INIT(ptable_list[0]),
LIST_HEAD_INIT(ptable_list[1]),
+ LIST_HEAD_INIT(ptable_list[2]),
};
#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page((void *)(page))->lru))
#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
#define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index)
-static const int ptable_shift[2] = {
- 7+2, /* PGD, PMD */
+static const int ptable_shift[3] = {
+ 7+2, /* PGD */
+ 7+2, /* PMD */
6+2, /* PTE */
};
@@ -156,12 +158,20 @@ void *get_pointer_table(int type)
if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
return NULL;
- if (type == TABLE_PTE) {
+ switch (type) {
+ case TABLE_PTE:
/*
* m68k doesn't have SPLIT_PTE_PTLOCKS for not having
* SMP.
*/
pagetable_pte_ctor(virt_to_ptdesc(page));
+ break;
+ case TABLE_PMD:
+ pagetable_pmd_ctor(virt_to_ptdesc(page));
+ break;
+ case TABLE_PGD:
+ pagetable_pgd_ctor(virt_to_ptdesc(page));
+ break;
}
mmu_page_ctor(page);
@@ -200,8 +210,7 @@ int free_pointer_table(void *table, int type)
/* all tables in page are free, free page */
list_del(dp);
mmu_page_dtor((void *)page);
- if (type == TABLE_PTE)
- pagetable_pte_dtor(virt_to_ptdesc((void *)page));
+ pagetable_dtor(virt_to_ptdesc((void *)page));
free_page (page);
return 1;
} else if (ptable_list[type].next != dp) {
@@ -491,10 +500,7 @@ void __init paging_init(void)
* initialize the bad page table and bad page to point
* to a couple of allocated pages
*/
- empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!empty_zero_page)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE);
+ empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
/*
* Set up SFC/DFC registers
diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c
index 494739c1783e..1ecf6bdd08bf 100644
--- a/arch/m68k/mm/sun3mmu.c
+++ b/arch/m68k/mm/sun3mmu.c
@@ -44,10 +44,7 @@ void __init paging_init(void)
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
unsigned long size;
- empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!empty_zero_page)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE);
+ empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
address = PAGE_OFFSET;
pg_dir = swapper_pg_dir;
@@ -57,10 +54,7 @@ void __init paging_init(void)
size = num_pages * sizeof(pte_t);
size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
- next_pgtable = (unsigned long)memblock_alloc(size, PAGE_SIZE);
- if (!next_pgtable)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, size, PAGE_SIZE);
+ next_pgtable = (unsigned long)memblock_alloc_or_panic(size, PAGE_SIZE);
bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
/* Map whole memory from PAGE_OFFSET (0x0E000000) */
diff --git a/arch/m68k/sun3/sun3dvma.c b/arch/m68k/sun3/sun3dvma.c
index 6ebf52740ad7..225fc735e466 100644
--- a/arch/m68k/sun3/sun3dvma.c
+++ b/arch/m68k/sun3/sun3dvma.c
@@ -252,12 +252,8 @@ void __init dvma_init(void)
list_add(&(hole->list), &hole_list);
- iommu_use = memblock_alloc(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long),
+ iommu_use = memblock_alloc_or_panic(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long),
SMP_CACHE_BYTES);
- if (!iommu_use)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- IOMMU_TOTAL_ENTRIES * sizeof(unsigned long));
-
dvma_unmap_iommu(DVMA_START, DVMA_SIZE);
sun3_dvma_init();
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index 6c33b05f730f..084a8a0dc239 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -21,12 +21,7 @@
extern void __bad_pte(pmd_t *pmd);
-static inline pgd_t *get_pgd(void)
-{
- return (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, 0);
-}
-
-#define pgd_alloc(mm) get_pgd()
+#define pgd_alloc(mm) __pgd_alloc(mm, 0)
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 467b10f4361a..1924f2d83932 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -29,10 +29,12 @@ config MIPS
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_WANT_LD_ORPHAN_WARN
select BUILDTIME_TABLE_SORT
+ select BUILTIN_DTB_ALL if BUILTIN_DTB
select CLONE_BACKWARDS
select CPU_NO_EFFICIENT_FFS if (TARGET_ISA_REV < 1)
select CPU_PM if CPU_IDLE || SUSPEND
select GENERIC_ATOMIC64 if !64BIT
+ select GENERIC_BUILTIN_DTB if BUILTIN_DTB
select GENERIC_CMOS_UPDATE
select GENERIC_CPU_AUTOPROBE
select GENERIC_GETTIMEOFDAY
@@ -1084,7 +1086,6 @@ config CSRC_IOASIC
config CSRC_R4K
select CLOCKSOURCE_WATCHDOG if CPU_FREQ
- select HAVE_UNSTABLE_SCHED_CLOCK if SMP && 64BIT
bool
config CSRC_SB1250
@@ -1995,11 +1996,11 @@ config CPU_MIPSR5
config CPU_MIPSR6
bool
default y if CPU_MIPS32_R6 || CPU_MIPS64_R6
+ select ARCH_HAS_CRC32
select CPU_HAS_RIXI
select CPU_HAS_DIEI if !CPU_DIEI_BROKEN
select HAVE_ARCH_BITREVERSE
select MIPS_ASID_BITS_VARIABLE
- select MIPS_CRC_SUPPORT
select MIPS_SPRAM
config TARGET_ISA_REV
@@ -2475,9 +2476,6 @@ config MIPS_ASID_BITS
config MIPS_ASID_BITS_VARIABLE
bool
-config MIPS_CRC_SUPPORT
- bool
-
# R4600 erratum. Due to the lack of errata information the exact
# technical details aren't known. I've experimentally found that disabling
# interrupts during indexed I-cache flushes seems to be sufficient to deal
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 5785a3d5ccfb..be8cb44a89fd 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -423,9 +423,6 @@ endif
CLEAN_FILES += vmlinux.32 vmlinux.64
-# device-trees
-core-y += arch/mips/boot/dts/
-
archprepare:
ifdef CONFIG_MIPS32_N32
@$(kecho) ' Checking missing-syscalls for N32'
diff --git a/arch/mips/boot/dts/Makefile b/arch/mips/boot/dts/Makefile
index e2476b12bb0c..ff468439a8c4 100644
--- a/arch/mips/boot/dts/Makefile
+++ b/arch/mips/boot/dts/Makefile
@@ -16,5 +16,3 @@ subdir-$(CONFIG_ATH79) += qca
subdir-$(CONFIG_RALINK) += ralink
subdir-$(CONFIG_MACH_REALTEK_RTL) += realtek
subdir-$(CONFIG_FIT_IMAGE_FDT_XILFPGA) += xilfpga
-
-obj-$(CONFIG_BUILTIN_DTB) := $(addsuffix /, $(subdir-y))
diff --git a/arch/mips/boot/dts/brcm/Makefile b/arch/mips/boot/dts/brcm/Makefile
index d85f446cc0ce..1798209697c6 100644
--- a/arch/mips/boot/dts/brcm/Makefile
+++ b/arch/mips/boot/dts/brcm/Makefile
@@ -33,5 +33,3 @@ dtb-$(CONFIG_DT_NONE) += \
bcm97420c.dtb \
bcm97425svmb.dtb \
bcm97435svmb.dtb
-
-obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/mips/boot/dts/cavium-octeon/Makefile b/arch/mips/boot/dts/cavium-octeon/Makefile
index 17aef35f311b..48085bca666c 100644
--- a/arch/mips/boot/dts/cavium-octeon/Makefile
+++ b/arch/mips/boot/dts/cavium-octeon/Makefile
@@ -1,4 +1,2 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_CAVIUM_OCTEON_SOC) += octeon_3xxx.dtb octeon_68xx.dtb
-
-obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/mips/boot/dts/ingenic/Makefile b/arch/mips/boot/dts/ingenic/Makefile
index 54aa0c4e6091..6e674f1a3aa3 100644
--- a/arch/mips/boot/dts/ingenic/Makefile
+++ b/arch/mips/boot/dts/ingenic/Makefile
@@ -5,5 +5,3 @@ dtb-$(CONFIG_JZ4770_GCW0) += gcw0.dtb
dtb-$(CONFIG_JZ4780_CI20) += ci20.dtb
dtb-$(CONFIG_X1000_CU1000_NEO) += cu1000-neo.dtb
dtb-$(CONFIG_X1830_CU1830_NEO) += cu1830-neo.dtb
-
-obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/mips/boot/dts/lantiq/Makefile b/arch/mips/boot/dts/lantiq/Makefile
index ae6e3e21ebeb..d8531b4653c0 100644
--- a/arch/mips/boot/dts/lantiq/Makefile
+++ b/arch/mips/boot/dts/lantiq/Makefile
@@ -1,4 +1,2 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_DT_EASY50712) += danube_easy50712.dtb
-
-obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/mips/boot/dts/loongson/Makefile b/arch/mips/boot/dts/loongson/Makefile
index 5c6433e441ee..5e3ab984d70f 100644
--- a/arch/mips/boot/dts/loongson/Makefile
+++ b/arch/mips/boot/dts/loongson/Makefile
@@ -5,5 +5,3 @@ dtb-$(CONFIG_MACH_LOONGSON64) += loongson64c_4core_rs780e.dtb
dtb-$(CONFIG_MACH_LOONGSON64) += loongson64c_8core_rs780e.dtb
dtb-$(CONFIG_MACH_LOONGSON64) += loongson64g_4core_ls7a.dtb
dtb-$(CONFIG_MACH_LOONGSON64) += loongson64v_4core_virtio.dtb
-
-obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/mips/boot/dts/mobileye/eyeq5.dtsi b/arch/mips/boot/dts/mobileye/eyeq5.dtsi
index 5d73e8320b8e..a84e6e720619 100644
--- a/arch/mips/boot/dts/mobileye/eyeq5.dtsi
+++ b/arch/mips/boot/dts/mobileye/eyeq5.dtsi
@@ -49,6 +49,28 @@
mhm_reserved_0: the-mhm-reserved-0@0 {
reg = <0x8 0x00000000 0x0 0x0000800>;
};
+
+ nvram@461fe00 {
+ compatible = "mobileye,eyeq5-bootloader-config", "nvmem-rmem";
+ reg = <0x0 0x0461fe00 0x0 0x200>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ no-map;
+
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ eth0_mac: mac@7c {
+ reg = <0x7c 0x6>;
+ };
+
+ eth1_mac: mac@82 {
+ reg = <0x82 0x6>;
+ };
+ };
+ };
};
aliases {
diff --git a/arch/mips/boot/dts/mscc/Makefile b/arch/mips/boot/dts/mscc/Makefile
index eeb6b7aae83b..566dbec3c7fb 100644
--- a/arch/mips/boot/dts/mscc/Makefile
+++ b/arch/mips/boot/dts/mscc/Makefile
@@ -8,6 +8,3 @@ dtb-$(CONFIG_SOC_VCOREIII) += \
ocelot_pcb123.dtb \
serval_pcb105.dtb \
serval_pcb106.dtb
-
-
-obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/mips/boot/dts/mti/Makefile b/arch/mips/boot/dts/mti/Makefile
index b5f7426998b1..c1c7b27296dd 100644
--- a/arch/mips/boot/dts/mti/Makefile
+++ b/arch/mips/boot/dts/mti/Makefile
@@ -1,5 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_MIPS_MALTA) += malta.dtb
dtb-$(CONFIG_LEGACY_BOARD_SEAD3) += sead3.dtb
-
-obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/mips/boot/dts/pic32/Makefile b/arch/mips/boot/dts/pic32/Makefile
index fb57f36324db..4069cda2370c 100644
--- a/arch/mips/boot/dts/pic32/Makefile
+++ b/arch/mips/boot/dts/pic32/Makefile
@@ -3,5 +3,3 @@ dtb-$(CONFIG_DTB_PIC32_MZDA_SK) += pic32mzda_sk.dtb
dtb-$(CONFIG_DTB_PIC32_NONE) += \
pic32mzda_sk.dtb
-
-obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/mips/boot/dts/ralink/Makefile b/arch/mips/boot/dts/ralink/Makefile
index d27d7e8c700f..dc002152d843 100644
--- a/arch/mips/boot/dts/ralink/Makefile
+++ b/arch/mips/boot/dts/ralink/Makefile
@@ -10,5 +10,3 @@ dtb-$(CONFIG_SOC_MT7621) += \
mt7621-gnubee-gb-pc1.dtb \
mt7621-gnubee-gb-pc2.dtb \
mt7621-tplink-hc220-g5-v1.dtb
-
-obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig
index e463a9acae03..f7c4b3529a2c 100644
--- a/arch/mips/configs/bigsur_defconfig
+++ b/arch/mips/configs/bigsur_defconfig
@@ -222,7 +222,6 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
diff --git a/arch/mips/configs/decstation_64_defconfig b/arch/mips/configs/decstation_64_defconfig
index 92a1d0aea38c..da51b9731db0 100644
--- a/arch/mips/configs/decstation_64_defconfig
+++ b/arch/mips/configs/decstation_64_defconfig
@@ -177,10 +177,8 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_CRCT10DIF=m
CONFIG_CRYPTO_MD4=m
diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig
index db214fcebcbe..424e3f011fc2 100644
--- a/arch/mips/configs/decstation_defconfig
+++ b/arch/mips/configs/decstation_defconfig
@@ -172,10 +172,8 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_CRCT10DIF=m
CONFIG_CRYPTO_MD4=m
diff --git a/arch/mips/configs/decstation_r4k_defconfig b/arch/mips/configs/decstation_r4k_defconfig
index 15b769e96d5b..cfc8bf791792 100644
--- a/arch/mips/configs/decstation_r4k_defconfig
+++ b/arch/mips/configs/decstation_r4k_defconfig
@@ -172,10 +172,8 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_CRCT10DIF=m
CONFIG_CRYPTO_MD4=m
diff --git a/arch/mips/configs/eyeq5_defconfig b/arch/mips/configs/eyeq5_defconfig
index ae9a09b16e40..ff7af5dc6d9d 100644
--- a/arch/mips/configs/eyeq5_defconfig
+++ b/arch/mips/configs/eyeq5_defconfig
@@ -99,7 +99,6 @@ CONFIG_NFS_V4=y
CONFIG_NFS_V4_1=y
CONFIG_NFS_V4_2=y
CONFIG_ROOT_NFS=y
-CONFIG_CRYPTO_CRC32_MIPS=y
CONFIG_FRAME_WARN=1024
CONFIG_DEBUG_FS=y
# CONFIG_RCU_TRACE is not set
diff --git a/arch/mips/configs/eyeq6_defconfig b/arch/mips/configs/eyeq6_defconfig
index 6597d5e88b33..0afbb45a78e8 100644
--- a/arch/mips/configs/eyeq6_defconfig
+++ b/arch/mips/configs/eyeq6_defconfig
@@ -102,7 +102,6 @@ CONFIG_NFS_V4=y
CONFIG_NFS_V4_1=y
CONFIG_NFS_V4_2=y
CONFIG_ROOT_NFS=y
-CONFIG_CRYPTO_CRC32_MIPS=y
CONFIG_FRAME_WARN=1024
CONFIG_DEBUG_FS=y
# CONFIG_RCU_TRACE is not set
diff --git a/arch/mips/configs/generic/32r6.config b/arch/mips/configs/generic/32r6.config
index 1a5d5ea4ab2b..ca606e71f4d0 100644
--- a/arch/mips/configs/generic/32r6.config
+++ b/arch/mips/configs/generic/32r6.config
@@ -1,4 +1,2 @@
CONFIG_CPU_MIPS32_R6=y
CONFIG_HIGHMEM=y
-
-CONFIG_CRYPTO_CRC32_MIPS=y
diff --git a/arch/mips/configs/generic/64r6.config b/arch/mips/configs/generic/64r6.config
index 63b4e95f303d..23a300914957 100644
--- a/arch/mips/configs/generic/64r6.config
+++ b/arch/mips/configs/generic/64r6.config
@@ -4,5 +4,4 @@ CONFIG_MIPS32_O32=y
CONFIG_MIPS32_N32=y
CONFIG_CPU_HAS_MSA=y
-CONFIG_CRYPTO_CRC32_MIPS=y
CONFIG_VIRTUALIZATION=y
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index 4714074c8bd7..b08a199767d1 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -305,7 +305,6 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SHA512=m
diff --git a/arch/mips/configs/ip30_defconfig b/arch/mips/configs/ip30_defconfig
index 178d61645cea..270181a7320a 100644
--- a/arch/mips/configs/ip30_defconfig
+++ b/arch/mips/configs/ip30_defconfig
@@ -176,7 +176,6 @@ CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/mips/crypto/Kconfig b/arch/mips/crypto/Kconfig
index 9003a5c1e879..7decd40c4e20 100644
--- a/arch/mips/crypto/Kconfig
+++ b/arch/mips/crypto/Kconfig
@@ -2,15 +2,6 @@
menu "Accelerated Cryptographic Algorithms for CPU (mips)"
-config CRYPTO_CRC32_MIPS
- tristate "CRC32c and CRC32"
- depends on MIPS_CRC_SUPPORT
- select CRYPTO_HASH
- help
- CRC32c and CRC32 CRC algorithms
-
- Architecture: mips
-
config CRYPTO_POLY1305_MIPS
tristate "Hash functions: Poly1305"
depends on MIPS
diff --git a/arch/mips/crypto/Makefile b/arch/mips/crypto/Makefile
index 5e4105cccf9f..fddc88281412 100644
--- a/arch/mips/crypto/Makefile
+++ b/arch/mips/crypto/Makefile
@@ -3,8 +3,6 @@
# Makefile for MIPS crypto files..
#
-obj-$(CONFIG_CRYPTO_CRC32_MIPS) += crc32-mips.o
-
obj-$(CONFIG_CRYPTO_CHACHA_MIPS) += chacha-mips.o
chacha-mips-y := chacha-core.o chacha-glue.o
AFLAGS_chacha-core.o += -O2 # needed to fill branch delay slots
diff --git a/arch/mips/crypto/crc32-mips.c b/arch/mips/crypto/crc32-mips.c
deleted file mode 100644
index 90eacf00cfc3..000000000000
--- a/arch/mips/crypto/crc32-mips.c
+++ /dev/null
@@ -1,354 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * crc32-mips.c - CRC32 and CRC32C using optional MIPSr6 instructions
- *
- * Module based on arm64/crypto/crc32-arm.c
- *
- * Copyright (C) 2014 Linaro Ltd <yazen.ghannam@linaro.org>
- * Copyright (C) 2018 MIPS Tech, LLC
- */
-
-#include <linux/cpufeature.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <asm/mipsregs.h>
-#include <linux/unaligned.h>
-
-#include <crypto/internal/hash.h>
-
-enum crc_op_size {
- b, h, w, d,
-};
-
-enum crc_type {
- crc32,
- crc32c,
-};
-
-#ifndef TOOLCHAIN_SUPPORTS_CRC
-#define _ASM_SET_CRC(OP, SZ, TYPE) \
-_ASM_MACRO_3R(OP, rt, rs, rt2, \
- ".ifnc \\rt, \\rt2\n\t" \
- ".error \"invalid operands \\\"" #OP " \\rt,\\rs,\\rt2\\\"\"\n\t" \
- ".endif\n\t" \
- _ASM_INSN_IF_MIPS(0x7c00000f | (__rt << 16) | (__rs << 21) | \
- ((SZ) << 6) | ((TYPE) << 8)) \
- _ASM_INSN32_IF_MM(0x00000030 | (__rs << 16) | (__rt << 21) | \
- ((SZ) << 14) | ((TYPE) << 3)))
-#define _ASM_UNSET_CRC(op, SZ, TYPE) ".purgem " #op "\n\t"
-#else /* !TOOLCHAIN_SUPPORTS_CRC */
-#define _ASM_SET_CRC(op, SZ, TYPE) ".set\tcrc\n\t"
-#define _ASM_UNSET_CRC(op, SZ, TYPE)
-#endif
-
-#define __CRC32(crc, value, op, SZ, TYPE) \
-do { \
- __asm__ __volatile__( \
- ".set push\n\t" \
- _ASM_SET_CRC(op, SZ, TYPE) \
- #op " %0, %1, %0\n\t" \
- _ASM_UNSET_CRC(op, SZ, TYPE) \
- ".set pop" \
- : "+r" (crc) \
- : "r" (value)); \
-} while (0)
-
-#define _CRC32_crc32b(crc, value) __CRC32(crc, value, crc32b, 0, 0)
-#define _CRC32_crc32h(crc, value) __CRC32(crc, value, crc32h, 1, 0)
-#define _CRC32_crc32w(crc, value) __CRC32(crc, value, crc32w, 2, 0)
-#define _CRC32_crc32d(crc, value) __CRC32(crc, value, crc32d, 3, 0)
-#define _CRC32_crc32cb(crc, value) __CRC32(crc, value, crc32cb, 0, 1)
-#define _CRC32_crc32ch(crc, value) __CRC32(crc, value, crc32ch, 1, 1)
-#define _CRC32_crc32cw(crc, value) __CRC32(crc, value, crc32cw, 2, 1)
-#define _CRC32_crc32cd(crc, value) __CRC32(crc, value, crc32cd, 3, 1)
-
-#define _CRC32(crc, value, size, op) \
- _CRC32_##op##size(crc, value)
-
-#define CRC32(crc, value, size) \
- _CRC32(crc, value, size, crc32)
-
-#define CRC32C(crc, value, size) \
- _CRC32(crc, value, size, crc32c)
-
-static u32 crc32_mips_le_hw(u32 crc_, const u8 *p, unsigned int len)
-{
- u32 crc = crc_;
-
- if (IS_ENABLED(CONFIG_64BIT)) {
- for (; len >= sizeof(u64); p += sizeof(u64), len -= sizeof(u64)) {
- u64 value = get_unaligned_le64(p);
-
- CRC32(crc, value, d);
- }
-
- if (len & sizeof(u32)) {
- u32 value = get_unaligned_le32(p);
-
- CRC32(crc, value, w);
- p += sizeof(u32);
- }
- } else {
- for (; len >= sizeof(u32); len -= sizeof(u32)) {
- u32 value = get_unaligned_le32(p);
-
- CRC32(crc, value, w);
- p += sizeof(u32);
- }
- }
-
- if (len & sizeof(u16)) {
- u16 value = get_unaligned_le16(p);
-
- CRC32(crc, value, h);
- p += sizeof(u16);
- }
-
- if (len & sizeof(u8)) {
- u8 value = *p++;
-
- CRC32(crc, value, b);
- }
-
- return crc;
-}
-
-static u32 crc32c_mips_le_hw(u32 crc_, const u8 *p, unsigned int len)
-{
- u32 crc = crc_;
-
- if (IS_ENABLED(CONFIG_64BIT)) {
- for (; len >= sizeof(u64); p += sizeof(u64), len -= sizeof(u64)) {
- u64 value = get_unaligned_le64(p);
-
- CRC32C(crc, value, d);
- }
-
- if (len & sizeof(u32)) {
- u32 value = get_unaligned_le32(p);
-
- CRC32C(crc, value, w);
- p += sizeof(u32);
- }
- } else {
- for (; len >= sizeof(u32); len -= sizeof(u32)) {
- u32 value = get_unaligned_le32(p);
-
- CRC32C(crc, value, w);
- p += sizeof(u32);
- }
- }
-
- if (len & sizeof(u16)) {
- u16 value = get_unaligned_le16(p);
-
- CRC32C(crc, value, h);
- p += sizeof(u16);
- }
-
- if (len & sizeof(u8)) {
- u8 value = *p++;
-
- CRC32C(crc, value, b);
- }
- return crc;
-}
-
-#define CHKSUM_BLOCK_SIZE 1
-#define CHKSUM_DIGEST_SIZE 4
-
-struct chksum_ctx {
- u32 key;
-};
-
-struct chksum_desc_ctx {
- u32 crc;
-};
-
-static int chksum_init(struct shash_desc *desc)
-{
- struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = mctx->key;
-
- return 0;
-}
-
-/*
- * Setting the seed allows arbitrary accumulators and flexible XOR policy
- * If your algorithm starts with ~0, then XOR with ~0 before you set
- * the seed.
- */
-static int chksum_setkey(struct crypto_shash *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct chksum_ctx *mctx = crypto_shash_ctx(tfm);
-
- if (keylen != sizeof(mctx->key))
- return -EINVAL;
- mctx->key = get_unaligned_le32(key);
- return 0;
-}
-
-static int chksum_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = crc32_mips_le_hw(ctx->crc, data, length);
- return 0;
-}
-
-static int chksumc_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = crc32c_mips_le_hw(ctx->crc, data, length);
- return 0;
-}
-
-static int chksum_final(struct shash_desc *desc, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- put_unaligned_le32(ctx->crc, out);
- return 0;
-}
-
-static int chksumc_final(struct shash_desc *desc, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- put_unaligned_le32(~ctx->crc, out);
- return 0;
-}
-
-static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
-{
- put_unaligned_le32(crc32_mips_le_hw(crc, data, len), out);
- return 0;
-}
-
-static int __chksumc_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
-{
- put_unaligned_le32(~crc32c_mips_le_hw(crc, data, len), out);
- return 0;
-}
-
-static int chksum_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- return __chksum_finup(ctx->crc, data, len, out);
-}
-
-static int chksumc_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- return __chksumc_finup(ctx->crc, data, len, out);
-}
-
-static int chksum_digest(struct shash_desc *desc, const u8 *data,
- unsigned int length, u8 *out)
-{
- struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
-
- return __chksum_finup(mctx->key, data, length, out);
-}
-
-static int chksumc_digest(struct shash_desc *desc, const u8 *data,
- unsigned int length, u8 *out)
-{
- struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
-
- return __chksumc_finup(mctx->key, data, length, out);
-}
-
-static int chksum_cra_init(struct crypto_tfm *tfm)
-{
- struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
-
- mctx->key = ~0;
- return 0;
-}
-
-static struct shash_alg crc32_alg = {
- .digestsize = CHKSUM_DIGEST_SIZE,
- .setkey = chksum_setkey,
- .init = chksum_init,
- .update = chksum_update,
- .final = chksum_final,
- .finup = chksum_finup,
- .digest = chksum_digest,
- .descsize = sizeof(struct chksum_desc_ctx),
- .base = {
- .cra_name = "crc32",
- .cra_driver_name = "crc32-mips-hw",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct chksum_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = chksum_cra_init,
- }
-};
-
-static struct shash_alg crc32c_alg = {
- .digestsize = CHKSUM_DIGEST_SIZE,
- .setkey = chksum_setkey,
- .init = chksum_init,
- .update = chksumc_update,
- .final = chksumc_final,
- .finup = chksumc_finup,
- .digest = chksumc_digest,
- .descsize = sizeof(struct chksum_desc_ctx),
- .base = {
- .cra_name = "crc32c",
- .cra_driver_name = "crc32c-mips-hw",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct chksum_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = chksum_cra_init,
- }
-};
-
-static int __init crc32_mod_init(void)
-{
- int err;
-
- err = crypto_register_shash(&crc32_alg);
-
- if (err)
- return err;
-
- err = crypto_register_shash(&crc32c_alg);
-
- if (err) {
- crypto_unregister_shash(&crc32_alg);
- return err;
- }
-
- return 0;
-}
-
-static void __exit crc32_mod_exit(void)
-{
- crypto_unregister_shash(&crc32_alg);
- crypto_unregister_shash(&crc32c_alg);
-}
-
-MODULE_AUTHOR("Marcin Nowakowski <marcin.nowakowski@mips.com");
-MODULE_DESCRIPTION("CRC32 and CRC32C using optional MIPS instructions");
-MODULE_LICENSE("GPL v2");
-
-module_cpu_feature_match(MIPS_CRC32, crc32_mod_init);
-module_exit(crc32_mod_exit);
diff --git a/arch/mips/include/asm/mach-loongson64/boot_param.h b/arch/mips/include/asm/mach-loongson64/boot_param.h
index 9218b3ae3383..3a11ce85762b 100644
--- a/arch/mips/include/asm/mach-loongson64/boot_param.h
+++ b/arch/mips/include/asm/mach-loongson64/boot_param.h
@@ -128,10 +128,10 @@ struct irq_source_routing_table {
} __packed;
struct interface_info {
- u16 vers; /* version of the specificition */
- u16 size;
- u8 flag;
- char description[64];
+ u16 vers; /* version of the specification */
+ u16 size; /* size of this interface */
+ u8 flag; /* used or unused */
+ char description[64]; /* description for each change */
} __packed;
#define MAX_RESOURCE_NUMBER 128
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 3c6ddc0c2c7a..c025558754d5 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -2039,8 +2039,8 @@ do { \
#define read_c0_perfcntr3_64() __read_64bit_c0_register($25, 7)
#define write_c0_perfcntr3_64(val) __write_64bit_c0_register($25, 7, val)
-#define read_c0_ecc() __read_32bit_c0_register($26, 0)
-#define write_c0_ecc(val) __write_32bit_c0_register($26, 0, val)
+#define read_c0_errctl() __read_32bit_c0_register($26, 0)
+#define write_c0_errctl(val) __write_32bit_c0_register($26, 0, val)
#define read_c0_derraddr0() __read_ulong_c0_register($26, 1)
#define write_c0_derraddr0(val) __write_ulong_c0_register($26, 1, val)
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index f4440edcd8fe..26c7a6ede983 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -15,7 +15,6 @@
#define __HAVE_ARCH_PMD_ALLOC_ONE
#define __HAVE_ARCH_PUD_ALLOC_ONE
-#define __HAVE_ARCH_PGD_FREE
#include <asm-generic/pgalloc.h>
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
@@ -49,14 +48,9 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
extern void pgd_init(void *addr);
extern pgd_t *pgd_alloc(struct mm_struct *mm);
-static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
-{
- pagetable_free(virt_to_ptdesc(pgd));
-}
-
#define __pte_free_tlb(tlb, pte, address) \
do { \
- pagetable_pte_dtor(page_ptdesc(pte)); \
+ pagetable_dtor(page_ptdesc(pte)); \
tlb_remove_page_ptdesc((tlb), page_ptdesc(pte)); \
} while (0)
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c
index d39a2963b451..2a14dc4ee57e 100644
--- a/arch/mips/kernel/cevt-bcm1480.c
+++ b/arch/mips/kernel/cevt-bcm1480.c
@@ -103,7 +103,7 @@ void sb1480_clockevent_init(void)
BUG_ON(cpu > 3); /* Only have 4 general purpose timers */
- sprintf(name, "bcm1480-counter-%d", cpu);
+ sprintf(name, "bcm1480-counter-%u", cpu);
cd->name = name;
cd->features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT;
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 8c401e42301c..f39e85fd58fa 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -248,7 +248,7 @@ int ftrace_disable_ftrace_graph_caller(void)
#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
-unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
+static unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
{
unsigned long sp, ip, tmp;
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index e3ff6179c99f..d99ed58b7043 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -60,6 +60,7 @@
.endm
__HEAD
+
#ifndef CONFIG_NO_EXCEPT_FILL
/*
* Reserved space for exception handlers.
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index 37676a44fefb..2ef610650a9e 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -122,9 +122,8 @@ void mips_mt_set_cpuoptions(void)
unsigned long ectlval;
unsigned long itcblkgrn;
- /* ErrCtl register is known as "ecc" to Linux */
- ectlval = read_c0_ecc();
- write_c0_ecc(ectlval | (0x1 << 26));
+ ectlval = read_c0_errctl();
+ write_c0_errctl(ectlval | (0x1 << 26));
ehb();
#define INDEX_0 (0x80000000)
#define INDEX_8 (0x80000008)
@@ -145,7 +144,7 @@ void mips_mt_set_cpuoptions(void)
ehb();
/* Write out to ITU with CACHE op */
cache_op(Index_Store_Tag_D, INDEX_0);
- write_c0_ecc(ectlval);
+ write_c0_errctl(ectlval);
ehb();
printk("Mapped %ld ITC cells starting at 0x%08x\n",
((itcblkgrn & 0x7fe00000) >> 20), itc_base);
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 12a1a4ffb602..fbfe0771317e 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -704,10 +704,7 @@ static void __init resource_init(void)
for_each_mem_range(i, &start, &end) {
struct resource *res;
- res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
- if (!res)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(struct resource));
+ res = memblock_alloc_or_panic(sizeof(struct resource), SMP_CACHE_BYTES);
res->start = start;
/*
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
index 71c7e5e27567..dd31e3fffd24 100644
--- a/arch/mips/kernel/spram.c
+++ b/arch/mips/kernel/spram.c
@@ -26,10 +26,6 @@
#define ERRCTL_SPRAM (1 << 28)
-/* errctl access */
-#define read_c0_errctl(x) read_c0_ecc(x)
-#define write_c0_errctl(x) write_c0_ecc(x)
-
/*
* Different semantics to the set_c0_* function built by __BUILD_SET_C0
*/
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index dc29bd9656b0..39e248d0ed59 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -38,6 +38,7 @@
#include <linux/kdb.h>
#include <linux/irq.h>
#include <linux/perf_event.h>
+#include <linux/string_choices.h>
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
@@ -1705,10 +1706,10 @@ static inline __init void parity_protection_init(void)
l2parity &= l1parity;
/* Probe L1 ECC support */
- cp0_ectl = read_c0_ecc();
- write_c0_ecc(cp0_ectl | ERRCTL_PE);
+ cp0_ectl = read_c0_errctl();
+ write_c0_errctl(cp0_ectl | ERRCTL_PE);
back_to_back_c0_hazard();
- cp0_ectl = read_c0_ecc();
+ cp0_ectl = read_c0_errctl();
/* Probe L2 ECC support */
gcr_ectl = read_gcr_err_control();
@@ -1727,9 +1728,9 @@ static inline __init void parity_protection_init(void)
cp0_ectl |= ERRCTL_PE;
else
cp0_ectl &= ~ERRCTL_PE;
- write_c0_ecc(cp0_ectl);
+ write_c0_errctl(cp0_ectl);
back_to_back_c0_hazard();
- WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity);
+ WARN_ON(!!(read_c0_errctl() & ERRCTL_PE) != l1parity);
/* Configure L2 ECC checking */
if (l2parity)
@@ -1741,8 +1742,8 @@ static inline __init void parity_protection_init(void)
gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN;
WARN_ON(!!gcr_ectl != l2parity);
- pr_info("Cache parity protection %sabled\n",
- l1parity ? "en" : "dis");
+ pr_info("Cache parity protection %s\n",
+ str_enabled_disabled(l1parity));
return;
}
@@ -1761,18 +1762,18 @@ static inline __init void parity_protection_init(void)
unsigned long errctl;
unsigned int l1parity_present, l2parity_present;
- errctl = read_c0_ecc();
+ errctl = read_c0_errctl();
errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
/* probe L1 parity support */
- write_c0_ecc(errctl | ERRCTL_PE);
+ write_c0_errctl(errctl | ERRCTL_PE);
back_to_back_c0_hazard();
- l1parity_present = (read_c0_ecc() & ERRCTL_PE);
+ l1parity_present = (read_c0_errctl() & ERRCTL_PE);
/* probe L2 parity support */
- write_c0_ecc(errctl|ERRCTL_L2P);
+ write_c0_errctl(errctl|ERRCTL_L2P);
back_to_back_c0_hazard();
- l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
+ l2parity_present = (read_c0_errctl() & ERRCTL_L2P);
if (l1parity_present && l2parity_present) {
if (l1parity)
@@ -1791,20 +1792,20 @@ static inline __init void parity_protection_init(void)
printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
- write_c0_ecc(errctl);
+ write_c0_errctl(errctl);
back_to_back_c0_hazard();
- errctl = read_c0_ecc();
+ errctl = read_c0_errctl();
printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
if (l1parity_present)
- printk(KERN_INFO "Cache parity protection %sabled\n",
- (errctl & ERRCTL_PE) ? "en" : "dis");
+ pr_info("Cache parity protection %s\n",
+ str_enabled_disabled(errctl & ERRCTL_PE));
if (l2parity_present) {
if (l1parity_present && l1parity)
errctl ^= ERRCTL_L2P;
- printk(KERN_INFO "L2 cache parity protection %sabled\n",
- (errctl & ERRCTL_L2P) ? "en" : "dis");
+ pr_info("L2 cache parity protection %s\n",
+ str_enabled_disabled(errctl & ERRCTL_L2P));
}
}
break;
@@ -1812,11 +1813,11 @@ static inline __init void parity_protection_init(void)
case CPU_5KC:
case CPU_5KE:
case CPU_LOONGSON32:
- write_c0_ecc(0x80000000);
+ write_c0_errctl(0x80000000);
back_to_back_c0_hazard();
/* Set the PE bit (bit 31) in the c0_errctl register. */
- printk(KERN_INFO "Cache parity protection %sabled\n",
- (read_c0_ecc() & 0x80000000) ? "en" : "dis");
+ pr_info("Cache parity protection %s\n",
+ str_enabled_disabled(read_c0_errctl() & 0x80000000));
break;
case CPU_20KC:
case CPU_25KF:
@@ -1887,8 +1888,8 @@ asmlinkage void do_ftlb(void)
if ((cpu_has_mips_r2_r6) &&
(((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
- pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
- read_c0_ecc());
+ pr_err("FTLB error exception, cp0_errctl=0x%08x:\n",
+ read_c0_errctl());
pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
reg_val = read_c0_cacheerr();
pr_err("c0_cacheerr == %08x\n", reg_val);
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 4c8e3c0aa210..75c9d3618f58 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -11,6 +11,7 @@
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/mman.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -97,11 +98,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
return -EINTR;
if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
+ unsigned long unused;
+
/* Map delay slot emulation page */
- base = mmap_region(NULL, STACK_TOP, PAGE_SIZE,
- VM_READ | VM_EXEC |
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
- 0, NULL);
+ base = do_mmap(NULL, STACK_TOP, PAGE_SIZE, PROT_READ | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, 0, 0, &unused,
+ NULL);
if (IS_ERR_VALUE(base)) {
ret = base;
goto out;
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index 5d5b993cbc2b..9c024e6d5e54 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -14,5 +14,7 @@ lib-$(CONFIG_GENERIC_CSUM) := $(filter-out csum_partial.o, $(lib-y))
obj-$(CONFIG_CPU_GENERIC_DUMP_TLB) += dump_tlb.o
obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o
+obj-$(CONFIG_CRC32_ARCH) += crc32-mips.o
+
# libgcc-style stuff needed in the kernel
obj-y += bswapsi.o bswapdi.o multi3.o
diff --git a/arch/mips/lib/crc32-mips.c b/arch/mips/lib/crc32-mips.c
new file mode 100644
index 000000000000..083e5d693a16
--- /dev/null
+++ b/arch/mips/lib/crc32-mips.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * crc32-mips.c - CRC32 and CRC32C using optional MIPSr6 instructions
+ *
+ * Module based on arm64/crypto/crc32-arm.c
+ *
+ * Copyright (C) 2014 Linaro Ltd <yazen.ghannam@linaro.org>
+ * Copyright (C) 2018 MIPS Tech, LLC
+ */
+
+#include <linux/cpufeature.h>
+#include <linux/crc32.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/mipsregs.h>
+#include <linux/unaligned.h>
+
+enum crc_op_size {
+ b, h, w, d,
+};
+
+enum crc_type {
+ crc32,
+ crc32c,
+};
+
+#ifndef TOOLCHAIN_SUPPORTS_CRC
+#define _ASM_SET_CRC(OP, SZ, TYPE) \
+_ASM_MACRO_3R(OP, rt, rs, rt2, \
+ ".ifnc \\rt, \\rt2\n\t" \
+ ".error \"invalid operands \\\"" #OP " \\rt,\\rs,\\rt2\\\"\"\n\t" \
+ ".endif\n\t" \
+ _ASM_INSN_IF_MIPS(0x7c00000f | (__rt << 16) | (__rs << 21) | \
+ ((SZ) << 6) | ((TYPE) << 8)) \
+ _ASM_INSN32_IF_MM(0x00000030 | (__rs << 16) | (__rt << 21) | \
+ ((SZ) << 14) | ((TYPE) << 3)))
+#define _ASM_UNSET_CRC(op, SZ, TYPE) ".purgem " #op "\n\t"
+#else /* !TOOLCHAIN_SUPPORTS_CRC */
+#define _ASM_SET_CRC(op, SZ, TYPE) ".set\tcrc\n\t"
+#define _ASM_UNSET_CRC(op, SZ, TYPE)
+#endif
+
+#define __CRC32(crc, value, op, SZ, TYPE) \
+do { \
+ __asm__ __volatile__( \
+ ".set push\n\t" \
+ _ASM_SET_CRC(op, SZ, TYPE) \
+ #op " %0, %1, %0\n\t" \
+ _ASM_UNSET_CRC(op, SZ, TYPE) \
+ ".set pop" \
+ : "+r" (crc) \
+ : "r" (value)); \
+} while (0)
+
+#define _CRC32_crc32b(crc, value) __CRC32(crc, value, crc32b, 0, 0)
+#define _CRC32_crc32h(crc, value) __CRC32(crc, value, crc32h, 1, 0)
+#define _CRC32_crc32w(crc, value) __CRC32(crc, value, crc32w, 2, 0)
+#define _CRC32_crc32d(crc, value) __CRC32(crc, value, crc32d, 3, 0)
+#define _CRC32_crc32cb(crc, value) __CRC32(crc, value, crc32cb, 0, 1)
+#define _CRC32_crc32ch(crc, value) __CRC32(crc, value, crc32ch, 1, 1)
+#define _CRC32_crc32cw(crc, value) __CRC32(crc, value, crc32cw, 2, 1)
+#define _CRC32_crc32cd(crc, value) __CRC32(crc, value, crc32cd, 3, 1)
+
+#define _CRC32(crc, value, size, op) \
+ _CRC32_##op##size(crc, value)
+
+#define CRC32(crc, value, size) \
+ _CRC32(crc, value, size, crc32)
+
+#define CRC32C(crc, value, size) \
+ _CRC32(crc, value, size, crc32c)
+
+static DEFINE_STATIC_KEY_FALSE(have_crc32);
+
+u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
+{
+ if (!static_branch_likely(&have_crc32))
+ return crc32_le_base(crc, p, len);
+
+ if (IS_ENABLED(CONFIG_64BIT)) {
+ for (; len >= sizeof(u64); p += sizeof(u64), len -= sizeof(u64)) {
+ u64 value = get_unaligned_le64(p);
+
+ CRC32(crc, value, d);
+ }
+
+ if (len & sizeof(u32)) {
+ u32 value = get_unaligned_le32(p);
+
+ CRC32(crc, value, w);
+ p += sizeof(u32);
+ }
+ } else {
+ for (; len >= sizeof(u32); len -= sizeof(u32)) {
+ u32 value = get_unaligned_le32(p);
+
+ CRC32(crc, value, w);
+ p += sizeof(u32);
+ }
+ }
+
+ if (len & sizeof(u16)) {
+ u16 value = get_unaligned_le16(p);
+
+ CRC32(crc, value, h);
+ p += sizeof(u16);
+ }
+
+ if (len & sizeof(u8)) {
+ u8 value = *p++;
+
+ CRC32(crc, value, b);
+ }
+
+ return crc;
+}
+EXPORT_SYMBOL(crc32_le_arch);
+
+u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len)
+{
+ if (!static_branch_likely(&have_crc32))
+ return crc32c_le_base(crc, p, len);
+
+ if (IS_ENABLED(CONFIG_64BIT)) {
+ for (; len >= sizeof(u64); p += sizeof(u64), len -= sizeof(u64)) {
+ u64 value = get_unaligned_le64(p);
+
+ CRC32C(crc, value, d);
+ }
+
+ if (len & sizeof(u32)) {
+ u32 value = get_unaligned_le32(p);
+
+ CRC32C(crc, value, w);
+ p += sizeof(u32);
+ }
+ } else {
+ for (; len >= sizeof(u32); len -= sizeof(u32)) {
+ u32 value = get_unaligned_le32(p);
+
+ CRC32C(crc, value, w);
+ p += sizeof(u32);
+ }
+ }
+
+ if (len & sizeof(u16)) {
+ u16 value = get_unaligned_le16(p);
+
+ CRC32C(crc, value, h);
+ p += sizeof(u16);
+ }
+
+ if (len & sizeof(u8)) {
+ u8 value = *p++;
+
+ CRC32C(crc, value, b);
+ }
+ return crc;
+}
+EXPORT_SYMBOL(crc32c_le_arch);
+
+u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
+{
+ return crc32_be_base(crc, p, len);
+}
+EXPORT_SYMBOL(crc32_be_arch);
+
+static int __init crc32_mips_init(void)
+{
+ if (cpu_have_feature(cpu_feature(MIPS_CRC32)))
+ static_branch_enable(&have_crc32);
+ return 0;
+}
+arch_initcall(crc32_mips_init);
+
+static void __exit crc32_mips_exit(void)
+{
+}
+module_exit(crc32_mips_exit);
+
+u32 crc32_optimizations(void)
+{
+ if (static_key_enabled(&have_crc32))
+ return CRC32_LE_OPTIMIZATION | CRC32C_OPTIMIZATION;
+ return 0;
+}
+EXPORT_SYMBOL(crc32_optimizations);
+
+MODULE_AUTHOR("Marcin Nowakowski <marcin.nowakowski@mips.com");
+MODULE_DESCRIPTION("CRC32 and CRC32C using optional MIPS instructions");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/mips/loongson64/boardinfo.c b/arch/mips/loongson64/boardinfo.c
index 280989c5a137..8bb275c93ac0 100644
--- a/arch/mips/loongson64/boardinfo.c
+++ b/arch/mips/loongson64/boardinfo.c
@@ -21,13 +21,11 @@ static ssize_t boardinfo_show(struct kobject *kobj,
"BIOS Info\n"
"Vendor\t\t\t: %s\n"
"Version\t\t\t: %s\n"
- "ROM Size\t\t: %d KB\n"
"Release Date\t\t: %s\n",
strsep(&tmp_board_manufacturer, "-"),
eboard->name,
strsep(&tmp_bios_vendor, "-"),
einter->description,
- einter->size,
especial->special_name);
}
static struct kobj_attribute boardinfo_attr = __ATTR(boardinfo, 0444,
diff --git a/arch/mips/loongson64/env.c b/arch/mips/loongson64/env.c
index 09ff05269861..be8d2ad10750 100644
--- a/arch/mips/loongson64/env.c
+++ b/arch/mips/loongson64/env.c
@@ -17,6 +17,7 @@
#include <linux/dma-map-ops.h>
#include <linux/export.h>
#include <linux/pci_ids.h>
+#include <linux/string_choices.h>
#include <asm/bootinfo.h>
#include <loongson.h>
#include <boot_param.h>
@@ -162,7 +163,7 @@ void __init prom_lefi_init_env(void)
dma_default_coherent = !eirq_source->dma_noncoherent;
}
- pr_info("Firmware: Coherent DMA: %s\n", dma_default_coherent ? "on" : "off");
+ pr_info("Firmware: Coherent DMA: %s\n", str_on_off(dma_default_coherent));
loongson_sysconf.restart_addr = boot_p->reset_system.ResetWarm;
loongson_sysconf.poweroff_addr = boot_p->reset_system.Shutdown;
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 265bc57819df..c89e70df43d8 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -1660,7 +1660,7 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
break;
}
- case 0x3:
+ case 0x7:
if (MIPSInst_FUNC(ir) != pfetch_op)
return SIGILL;
diff --git a/arch/mips/mm/pgtable.c b/arch/mips/mm/pgtable.c
index 1506e458040d..10835414819f 100644
--- a/arch/mips/mm/pgtable.c
+++ b/arch/mips/mm/pgtable.c
@@ -10,12 +10,10 @@
pgd_t *pgd_alloc(struct mm_struct *mm)
{
- pgd_t *init, *ret = NULL;
- struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM,
- PGD_TABLE_ORDER);
+ pgd_t *init, *ret;
- if (ptdesc) {
- ret = ptdesc_address(ptdesc);
+ ret = __pgd_alloc(mm, PGD_TABLE_ORDER);
+ if (ret) {
init = pgd_offset(&init_mm, 0UL);
pgd_init(ret);
memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
index ec2567f8efd8..66898fd182dc 100644
--- a/arch/mips/pci/pci-legacy.c
+++ b/arch/mips/pci/pci-legacy.c
@@ -29,6 +29,14 @@ static LIST_HEAD(controllers);
static int pci_initialized;
+unsigned long pci_address_to_pio(phys_addr_t address)
+{
+ if (address > IO_SPACE_LIMIT)
+ return (unsigned long)-1;
+
+ return (unsigned long) address;
+}
+
/*
* We need to avoid collisions with `mirrored' VGA ports
* and other strange ISA hardware, so we always want the
diff --git a/arch/nios2/include/asm/pgalloc.h b/arch/nios2/include/asm/pgalloc.h
index ce6bb8e74271..12a536b7bfbd 100644
--- a/arch/nios2/include/asm/pgalloc.h
+++ b/arch/nios2/include/asm/pgalloc.h
@@ -30,7 +30,7 @@ extern pgd_t *pgd_alloc(struct mm_struct *mm);
#define __pte_free_tlb(tlb, pte, addr) \
do { \
- pagetable_pte_dtor(page_ptdesc(pte)); \
+ pagetable_dtor(page_ptdesc(pte)); \
tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
} while (0)
diff --git a/arch/nios2/mm/pgtable.c b/arch/nios2/mm/pgtable.c
index 7c76e8a7447a..6470ed378782 100644
--- a/arch/nios2/mm/pgtable.c
+++ b/arch/nios2/mm/pgtable.c
@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <asm/cpuinfo.h>
+#include <asm/pgalloc.h>
/* pteaddr:
* ptbase | vpn* | zero
@@ -54,7 +55,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *ret, *init;
- ret = (pgd_t *) __get_free_page(GFP_KERNEL);
+ ret = __pgd_alloc(mm, 0);
if (ret) {
init = pgd_offset(&init_mm, 0UL);
pgd_init(ret);
diff --git a/arch/openrisc/Kbuild b/arch/openrisc/Kbuild
index b0b0f2b03f87..70bdb24ff204 100644
--- a/arch/openrisc/Kbuild
+++ b/arch/openrisc/Kbuild
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
obj-y += lib/ kernel/ mm/
-obj-y += boot/dts/
# for cleaning
subdir- += boot
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 3279ef457c57..b38fee299bc4 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -10,6 +10,7 @@ config OPENRISC
select ARCH_HAS_DMA_SET_UNCACHED
select ARCH_HAS_DMA_CLEAR_UNCACHED
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select GENERIC_BUILTIN_DTB
select COMMON_CLK
select OF
select OF_EARLY_FLATTREE
@@ -26,6 +27,8 @@ config OPENRISC
select HAVE_PCI
select HAVE_UID16
select HAVE_PAGE_SIZE_8KB
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RSEQ
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS_BROADCAST
select GENERIC_SMP_IDLE_THREAD
@@ -92,7 +95,7 @@ config DCACHE_WRITETHROUGH
If unsure say N here
-config OPENRISC_BUILTIN_DTB
+config BUILTIN_DTB_NAME
string "Builtin DTB"
default ""
diff --git a/arch/openrisc/boot/dts/Makefile b/arch/openrisc/boot/dts/Makefile
index 13db5a2aab52..3a66e0ef3985 100644
--- a/arch/openrisc/boot/dts/Makefile
+++ b/arch/openrisc/boot/dts/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y += $(addsuffix .dtb.o, $(CONFIG_OPENRISC_BUILTIN_DTB))
+dtb-y += $(addsuffix .dtb, $(CONFIG_BUILTIN_DTB_NAME))
#DTC_FLAGS ?= -p 1024
diff --git a/arch/openrisc/configs/or1klitex_defconfig b/arch/openrisc/configs/or1klitex_defconfig
index 466f31a091be..3e849d25838a 100644
--- a/arch/openrisc/configs/or1klitex_defconfig
+++ b/arch/openrisc/configs/or1klitex_defconfig
@@ -7,7 +7,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SGETMASK_SYSCALL=y
CONFIG_EXPERT=y
-CONFIG_OPENRISC_BUILTIN_DTB="or1klitex"
+CONFIG_BUILTIN_DTB_NAME="or1klitex"
CONFIG_HZ_100=y
CONFIG_OPENRISC_HAVE_SHADOW_GPRS=y
CONFIG_NET=y
diff --git a/arch/openrisc/configs/or1ksim_defconfig b/arch/openrisc/configs/or1ksim_defconfig
index 0116e465238f..59fe33cefba2 100644
--- a/arch/openrisc/configs/or1ksim_defconfig
+++ b/arch/openrisc/configs/or1ksim_defconfig
@@ -14,7 +14,7 @@ CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
CONFIG_MODULES=y
# CONFIG_BLOCK is not set
-CONFIG_OPENRISC_BUILTIN_DTB="or1ksim"
+CONFIG_BUILTIN_DTB_NAME="or1ksim"
CONFIG_HZ_100=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/openrisc/configs/simple_smp_defconfig b/arch/openrisc/configs/simple_smp_defconfig
index b990cb6c9309..6008e824d31c 100644
--- a/arch/openrisc/configs/simple_smp_defconfig
+++ b/arch/openrisc/configs/simple_smp_defconfig
@@ -20,7 +20,7 @@ CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
CONFIG_MODULES=y
# CONFIG_BLOCK is not set
-CONFIG_OPENRISC_BUILTIN_DTB="simple_smp"
+CONFIG_BUILTIN_DTB_NAME="simple_smp"
CONFIG_SMP=y
CONFIG_HZ_100=y
CONFIG_OPENRISC_HAVE_SHADOW_GPRS=y
diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h
index c6a73772a546..3372f4e6ab4b 100644
--- a/arch/openrisc/include/asm/pgalloc.h
+++ b/arch/openrisc/include/asm/pgalloc.h
@@ -41,15 +41,13 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
*/
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
+ pgd_t *ret = __pgd_alloc(mm, 0);
- if (ret) {
- memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ if (ret)
memcpy(ret + USER_PTRS_PER_PGD,
swapper_pg_dir + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
- }
return ret;
}
@@ -68,7 +66,7 @@ extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
#define __pte_free_tlb(tlb, pte, addr) \
do { \
- pagetable_pte_dtor(page_ptdesc(pte)); \
+ pagetable_dtor(page_ptdesc(pte)); \
tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
} while (0)
diff --git a/arch/openrisc/include/asm/ptrace.h b/arch/openrisc/include/asm/ptrace.h
index 1da3e66292e2..e5a282b67075 100644
--- a/arch/openrisc/include/asm/ptrace.h
+++ b/arch/openrisc/include/asm/ptrace.h
@@ -17,6 +17,7 @@
#include <asm/spr_defs.h>
#include <uapi/asm/ptrace.h>
+#include <linux/compiler.h>
/*
* Make kernel PTrace/register structures opaque to userspace... userspace can
@@ -42,6 +43,36 @@ struct pt_regs {
/* Named registers */
long sr; /* Stored in place of r0 */
long sp; /* r1 */
+ long gpr2;
+ long gpr3;
+ long gpr4;
+ long gpr5;
+ long gpr6;
+ long gpr7;
+ long gpr8;
+ long gpr9;
+ long gpr10;
+ long gpr11;
+ long gpr12;
+ long gpr13;
+ long gpr14;
+ long gpr15;
+ long gpr16;
+ long gpr17;
+ long gpr18;
+ long gpr19;
+ long gpr20;
+ long gpr21;
+ long gpr22;
+ long gpr23;
+ long gpr24;
+ long gpr25;
+ long gpr26;
+ long gpr27;
+ long gpr28;
+ long gpr29;
+ long gpr30;
+ long gpr31;
};
struct {
/* Old style */
@@ -66,16 +97,56 @@ struct pt_regs {
/* TODO: Rename this to REDZONE because that's what it is */
#define STACK_FRAME_OVERHEAD 128 /* size of minimum stack frame */
-#define instruction_pointer(regs) ((regs)->pc)
+#define MAX_REG_OFFSET offsetof(struct pt_regs, orig_gpr11)
+
+/* Helpers for working with the instruction pointer */
+static inline unsigned long instruction_pointer(struct pt_regs *regs)
+{
+ return (unsigned long)regs->pc;
+}
+static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->pc = val;
+}
+
#define user_mode(regs) (((regs)->sr & SPR_SR_SM) == 0)
#define user_stack_pointer(regs) ((unsigned long)(regs)->sp)
#define profile_pc(regs) instruction_pointer(regs)
+/* Valid only for Kernel mode traps. */
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+ return (unsigned long)regs->sp;
+}
+
static inline long regs_return_value(struct pt_regs *regs)
{
return regs->gpr[11];
}
+extern int regs_query_register_offset(const char *name);
+extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+ unsigned int n);
+
+/**
+ * regs_get_register() - get register value from its offset
+ * @regs: pt_regs from which register value is gotten
+ * @offset: offset of the register.
+ *
+ * regs_get_register returns the value of a register whose offset from @regs.
+ * The @offset is the offset of the register in struct pt_regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline unsigned long regs_get_register(struct pt_regs *regs,
+ unsigned int offset)
+{
+ if (unlikely(offset > MAX_REG_OFFSET))
+ return 0;
+
+ return *(unsigned long *)((unsigned long)regs + offset);
+}
+
#endif /* __ASSEMBLY__ */
/*
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
index ce6f2b08a35e..c7e90b09645e 100644
--- a/arch/openrisc/kernel/entry.S
+++ b/arch/openrisc/kernel/entry.S
@@ -714,6 +714,10 @@ _syscall_check_trace_leave:
* interrupts that set NEED_RESCHED or SIGNALPENDING... really true? */
_syscall_check_work:
+#ifdef CONFIG_DEBUG_RSEQ
+ l.jal rseq_syscall
+ l.ori r3,r1,0
+#endif
/* Here we need to disable interrupts */
DISABLE_INTERRUPTS(r27,r29)
TRACE_IRQS_OFF
diff --git a/arch/openrisc/kernel/ptrace.c b/arch/openrisc/kernel/ptrace.c
index 5091b18eab4c..8430570d0620 100644
--- a/arch/openrisc/kernel/ptrace.c
+++ b/arch/openrisc/kernel/ptrace.c
@@ -160,6 +160,102 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
* in exit.c or in signal.c.
*/
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+static const struct pt_regs_offset regoffset_table[] = {
+ REG_OFFSET_NAME(sr),
+ REG_OFFSET_NAME(sp),
+ REG_OFFSET_NAME(gpr2),
+ REG_OFFSET_NAME(gpr3),
+ REG_OFFSET_NAME(gpr4),
+ REG_OFFSET_NAME(gpr5),
+ REG_OFFSET_NAME(gpr6),
+ REG_OFFSET_NAME(gpr7),
+ REG_OFFSET_NAME(gpr8),
+ REG_OFFSET_NAME(gpr9),
+ REG_OFFSET_NAME(gpr10),
+ REG_OFFSET_NAME(gpr11),
+ REG_OFFSET_NAME(gpr12),
+ REG_OFFSET_NAME(gpr13),
+ REG_OFFSET_NAME(gpr14),
+ REG_OFFSET_NAME(gpr15),
+ REG_OFFSET_NAME(gpr16),
+ REG_OFFSET_NAME(gpr17),
+ REG_OFFSET_NAME(gpr18),
+ REG_OFFSET_NAME(gpr19),
+ REG_OFFSET_NAME(gpr20),
+ REG_OFFSET_NAME(gpr21),
+ REG_OFFSET_NAME(gpr22),
+ REG_OFFSET_NAME(gpr23),
+ REG_OFFSET_NAME(gpr24),
+ REG_OFFSET_NAME(gpr25),
+ REG_OFFSET_NAME(gpr26),
+ REG_OFFSET_NAME(gpr27),
+ REG_OFFSET_NAME(gpr28),
+ REG_OFFSET_NAME(gpr29),
+ REG_OFFSET_NAME(gpr30),
+ REG_OFFSET_NAME(gpr31),
+ REG_OFFSET_NAME(pc),
+ REG_OFFSET_NAME(orig_gpr11),
+ REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @addr: address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
+{
+ return (addr & ~(THREAD_SIZE - 1)) ==
+ (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @n: stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
+{
+ unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+
+ addr += n;
+ if (regs_within_kernel_stack(regs, (unsigned long)addr))
+ return *addr;
+ else
+ return 0;
+}
/*
* Called by kernel/ptrace.c when detaching..
diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c
index c7ab42e2cb7a..f70a13ee0593 100644
--- a/arch/openrisc/kernel/signal.c
+++ b/arch/openrisc/kernel/signal.c
@@ -244,6 +244,8 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
int ret;
+ rseq_signal_deliver(ksig, regs);
+
ret = setup_rt_frame(ksig, sigmask_to_save(), regs);
signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
index f59ea4c10b0f..8e63e86251ca 100644
--- a/arch/openrisc/mm/ioremap.c
+++ b/arch/openrisc/mm/ioremap.c
@@ -38,10 +38,7 @@ pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm)
if (likely(mem_init_done)) {
pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
} else {
- pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!pte)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE);
+ pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
}
return pte;
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index aa6a3cad275d..fcc5973f7519 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -60,8 +60,8 @@ config PARISC
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HASH
- select HAVE_ARCH_JUMP_LABEL
- select HAVE_ARCH_JUMP_LABEL_RELATIVE
+ # select HAVE_ARCH_JUMP_LABEL
+ # select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KFENCE
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h
index a63190af2f05..3143cf29ce27 100644
--- a/arch/parisc/include/asm/io.h
+++ b/arch/parisc/include/asm/io.h
@@ -135,12 +135,8 @@ static inline void gsc_writeq(unsigned long long val, unsigned long addr)
#define pci_iounmap pci_iounmap
-void memset_io(volatile void __iomem *addr, unsigned char val, int count);
void memcpy_fromio(void *dst, const volatile void __iomem *src, int count);
-void memcpy_toio(volatile void __iomem *dst, const void *src, int count);
-#define memset_io memset_io
#define memcpy_fromio memcpy_fromio
-#define memcpy_toio memcpy_toio
/* Port-space IO */
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index e3e142b1c5c5..2ca74a56415c 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -11,27 +11,12 @@
#include <asm/cache.h>
#define __HAVE_ARCH_PMD_ALLOC_ONE
-#define __HAVE_ARCH_PMD_FREE
-#define __HAVE_ARCH_PGD_FREE
#include <asm-generic/pgalloc.h>
/* Allocate the top level pgd (page directory) */
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- pgd_t *pgd;
-
- pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_TABLE_ORDER);
- if (unlikely(pgd == NULL))
- return NULL;
-
- memset(pgd, 0, PAGE_SIZE << PGD_TABLE_ORDER);
-
- return pgd;
-}
-
-static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
-{
- free_pages((unsigned long)pgd, PGD_TABLE_ORDER);
+ return __pgd_alloc(mm, PGD_TABLE_ORDER);
}
#if CONFIG_PGTABLE_LEVELS == 3
@@ -46,17 +31,19 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
- pmd_t *pmd;
-
- pmd = (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_TABLE_ORDER);
- if (likely(pmd))
- memset ((void *)pmd, 0, PAGE_SIZE << PMD_TABLE_ORDER);
- return pmd;
-}
+ struct ptdesc *ptdesc;
+ gfp_t gfp = GFP_PGTABLE_USER;
-static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
-{
- free_pages((unsigned long)pmd, PMD_TABLE_ORDER);
+ if (mm == &init_mm)
+ gfp = GFP_PGTABLE_KERNEL;
+ ptdesc = pagetable_alloc(gfp, PMD_TABLE_ORDER);
+ if (!ptdesc)
+ return NULL;
+ if (!pagetable_pmd_ctor(ptdesc)) {
+ pagetable_free(ptdesc);
+ return NULL;
+ }
+ return ptdesc_address(ptdesc);
}
#endif
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index c1587aa35beb..1c366b0d3134 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -43,9 +43,7 @@ EXPORT_SYMBOL($global$);
#endif
#include <asm/io.h>
-EXPORT_SYMBOL(memcpy_toio);
EXPORT_SYMBOL(memcpy_fromio);
-EXPORT_SYMBOL(memset_io);
extern void $$divI(void);
extern void $$divU(void);
diff --git a/arch/parisc/kernel/vdso32/Makefile b/arch/parisc/kernel/vdso32/Makefile
index 2b36d25ada6e..288f8b85978f 100644
--- a/arch/parisc/kernel/vdso32/Makefile
+++ b/arch/parisc/kernel/vdso32/Makefile
@@ -33,7 +33,7 @@ KBUILD_CFLAGS += -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING
VDSO_LIBGCC := $(shell $(CROSS32CC) -print-libgcc-file-name)
obj-y += vdso32_wrapper.o
-extra-y += vdso32.lds
+targets += vdso32.lds
CPPFLAGS_vdso32.lds += -P -C # -U$(ARCH)
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so FORCE
diff --git a/arch/parisc/kernel/vdso64/Makefile b/arch/parisc/kernel/vdso64/Makefile
index bd87bd6a6659..bc5d9553f311 100644
--- a/arch/parisc/kernel/vdso64/Makefile
+++ b/arch/parisc/kernel/vdso64/Makefile
@@ -32,7 +32,7 @@ KBUILD_CFLAGS += -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING
VDSO_LIBGCC := $(shell $(CC) -print-libgcc-file-name)
obj-y += vdso64_wrapper.o
-extra-y += vdso64.lds
+targets += vdso64.lds
CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so FORCE
diff --git a/arch/parisc/lib/io.c b/arch/parisc/lib/io.c
index 7c00496b47d4..7461366a65c9 100644
--- a/arch/parisc/lib/io.c
+++ b/arch/parisc/lib/io.c
@@ -12,32 +12,6 @@
#include <linux/module.h>
#include <asm/io.h>
-/* Copies a block of memory to a device in an efficient manner.
- * Assumes the device can cope with 32-bit transfers. If it can't,
- * don't use this function.
- */
-void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
-{
- if (((unsigned long)dst & 3) != ((unsigned long)src & 3))
- goto bytecopy;
- while ((unsigned long)dst & 3) {
- writeb(*(char *)src, dst++);
- src++;
- count--;
- }
- while (count > 3) {
- __raw_writel(*(u32 *)src, dst);
- src += 4;
- dst += 4;
- count -= 4;
- }
- bytecopy:
- while (count--) {
- writeb(*(char *)src, dst++);
- src++;
- }
-}
-
/*
** Copies a block of memory from a device in an efficient manner.
** Assumes the device can cope with 32-bit transfers. If it can't,
@@ -99,27 +73,6 @@ void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
}
}
-/* Sets a block of memory on a device to a given value.
- * Assumes the device can cope with 32-bit transfers. If it can't,
- * don't use this function.
- */
-void memset_io(volatile void __iomem *addr, unsigned char val, int count)
-{
- u32 val32 = (val << 24) | (val << 16) | (val << 8) | val;
- while ((unsigned long)addr & 3) {
- writeb(val, addr++);
- count--;
- }
- while (count > 3) {
- __raw_writel(val32, addr);
- addr += 4;
- count -= 4;
- }
- while (count--) {
- writeb(val, addr++);
- }
-}
-
/*
* Read COUNT 8-bit bytes from port PORT into memory starting at
* SRC.
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 96970fa75e4a..61c0a2477072 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -377,10 +377,8 @@ static void __ref map_pages(unsigned long start_vaddr,
#if CONFIG_PGTABLE_LEVELS == 3
if (pud_none(*pud)) {
- pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER,
+ pmd = memblock_alloc_or_panic(PAGE_SIZE << PMD_TABLE_ORDER,
PAGE_SIZE << PMD_TABLE_ORDER);
- if (!pmd)
- panic("pmd allocation failed.\n");
pud_populate(NULL, pud, pmd);
}
#endif
@@ -388,9 +386,7 @@ static void __ref map_pages(unsigned long start_vaddr,
pmd = pmd_offset(pud, vaddr);
for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
if (pmd_none(*pmd)) {
- pg_table = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!pg_table)
- panic("page table allocation failed\n");
+ pg_table = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
pmd_populate_kernel(NULL, pmd, pg_table);
}
@@ -648,9 +644,7 @@ static void __init pagetable_init(void)
}
#endif
- empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!empty_zero_page)
- panic("zero page allocation failed.\n");
+ empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
}
@@ -687,19 +681,15 @@ static void __init fixmap_init(void)
#if CONFIG_PGTABLE_LEVELS == 3
if (pud_none(*pud)) {
- pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER,
+ pmd = memblock_alloc_or_panic(PAGE_SIZE << PMD_TABLE_ORDER,
PAGE_SIZE << PMD_TABLE_ORDER);
- if (!pmd)
- panic("fixmap: pmd allocation failed.\n");
pud_populate(NULL, pud, pmd);
}
#endif
pmd = pmd_offset(pud, addr);
do {
- pte_t *pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!pte)
- panic("fixmap: pte allocation failed.\n");
+ pte_t *pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
pmd_populate_kernel(&init_mm, pmd, pte);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 34b965fa45ac..424f188e62d9 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -127,6 +127,8 @@ config PPC
select ARCH_ENABLE_MEMORY_HOTPLUG
select ARCH_ENABLE_MEMORY_HOTREMOVE
select ARCH_HAS_COPY_MC if PPC64
+ select ARCH_HAS_CRC32 if PPC64 && ALTIVEC
+ select ARCH_HAS_CRC_T10DIF if PPC64 && ALTIVEC
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEBUG_VM_PGTABLE
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index ee84ade7a033..6b6d7467fecf 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -320,8 +320,6 @@ CONFIG_XMON=y
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_CRC32C_VPMSUM=m
-CONFIG_CRYPTO_CRCT10DIF_VPMSUM=m
CONFIG_CRYPTO_MD5_PPC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA1_PPC=m
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index f39c0d000c43..465eb96c755e 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -389,9 +389,6 @@ CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_LZO=m
-CONFIG_CRYPTO_CRC32C_VPMSUM=m
-CONFIG_CRYPTO_CRCT10DIF_VPMSUM=m
-CONFIG_CRYPTO_VPMSUM_TESTER=m
CONFIG_CRYPTO_MD5_PPC=m
CONFIG_CRYPTO_SHA1_PPC=m
CONFIG_CRYPTO_AES_GCM_P10=m
diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig
index 951a43726461..5b315e9756b3 100644
--- a/arch/powerpc/crypto/Kconfig
+++ b/arch/powerpc/crypto/Kconfig
@@ -13,39 +13,6 @@ config CRYPTO_CURVE25519_PPC64
Architecture: PowerPC64
- Little-endian
-config CRYPTO_CRC32C_VPMSUM
- tristate "CRC32c"
- depends on PPC64 && ALTIVEC
- select CRYPTO_HASH
- select CRC32
- help
- CRC32c CRC algorithm with the iSCSI polynomial (RFC 3385 and RFC 3720)
-
- Architecture: powerpc64 using
- - AltiVec extensions
-
- Enable on POWER8 and newer processors for improved performance.
-
-config CRYPTO_CRCT10DIF_VPMSUM
- tristate "CRC32T10DIF"
- depends on PPC64 && ALTIVEC && CRC_T10DIF
- select CRYPTO_HASH
- help
- CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF)
-
- Architecture: powerpc64 using
- - AltiVec extensions
-
- Enable on POWER8 and newer processors for improved performance.
-
-config CRYPTO_VPMSUM_TESTER
- tristate "CRC32c and CRC32T10DIF hardware acceleration tester"
- depends on CRYPTO_CRCT10DIF_VPMSUM && CRYPTO_CRC32C_VPMSUM
- help
- Stress test for CRC32c and CRCT10DIF algorithms implemented with
- powerpc64 AltiVec extensions (POWER8 vpmsum instructions).
- Unless you are testing these algorithms, you don't need this.
-
config CRYPTO_MD5_PPC
tristate "Digests: MD5"
depends on PPC
diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile
index 59808592f0a1..9b38f4a7bc15 100644
--- a/arch/powerpc/crypto/Makefile
+++ b/arch/powerpc/crypto/Makefile
@@ -10,9 +10,6 @@ obj-$(CONFIG_CRYPTO_MD5_PPC) += md5-ppc.o
obj-$(CONFIG_CRYPTO_SHA1_PPC) += sha1-powerpc.o
obj-$(CONFIG_CRYPTO_SHA1_PPC_SPE) += sha1-ppc-spe.o
obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o
-obj-$(CONFIG_CRYPTO_CRC32C_VPMSUM) += crc32c-vpmsum.o
-obj-$(CONFIG_CRYPTO_CRCT10DIF_VPMSUM) += crct10dif-vpmsum.o
-obj-$(CONFIG_CRYPTO_VPMSUM_TESTER) += crc-vpmsum_test.o
obj-$(CONFIG_CRYPTO_AES_GCM_P10) += aes-gcm-p10-crypto.o
obj-$(CONFIG_CRYPTO_CHACHA20_P10) += chacha-p10-crypto.o
obj-$(CONFIG_CRYPTO_POLY1305_P10) += poly1305-p10-crypto.o
@@ -24,8 +21,6 @@ md5-ppc-y := md5-asm.o md5-glue.o
sha1-powerpc-y := sha1-powerpc-asm.o sha1.o
sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o
sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o
-crc32c-vpmsum-y := crc32c-vpmsum_asm.o crc32c-vpmsum_glue.o
-crct10dif-vpmsum-y := crct10dif-vpmsum_asm.o crct10dif-vpmsum_glue.o
aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp10-ppc.o aesp10-ppc.o
chacha-p10-crypto-y := chacha-p10-glue.o chacha-p10le-8x.o
poly1305-p10-crypto-y := poly1305-p10-glue.o poly1305-p10le_64.o
diff --git a/arch/powerpc/crypto/aes-gcm-p10-glue.c b/arch/powerpc/crypto/aes-gcm-p10-glue.c
index f37b3d13fc53..679f52794baf 100644
--- a/arch/powerpc/crypto/aes-gcm-p10-glue.c
+++ b/arch/powerpc/crypto/aes-gcm-p10-glue.c
@@ -214,7 +214,6 @@ static int p10_aes_gcm_crypt(struct aead_request *req, u8 *riv,
struct gcm_ctx *gctx = PTR_ALIGN((void *)databuf, PPC_ALIGN);
u8 hashbuf[sizeof(struct Hash_ctx) + PPC_ALIGN];
struct Hash_ctx *hash = PTR_ALIGN((void *)hashbuf, PPC_ALIGN);
- struct scatter_walk assoc_sg_walk;
struct skcipher_walk walk;
u8 *assocmem = NULL;
u8 *assoc;
@@ -234,8 +233,7 @@ static int p10_aes_gcm_crypt(struct aead_request *req, u8 *riv,
/* Linearize assoc, if not already linear */
if (req->src->length >= assoclen && req->src->length) {
- scatterwalk_start(&assoc_sg_walk, req->src);
- assoc = scatterwalk_map(&assoc_sg_walk);
+ assoc = sg_virt(req->src); /* ppc64 is !HIGHMEM */
} else {
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
@@ -253,10 +251,7 @@ static int p10_aes_gcm_crypt(struct aead_request *req, u8 *riv,
gcmp10_init(gctx, iv, (unsigned char *) &ctx->enc_key, hash, assoc, assoclen);
vsx_end();
- if (!assocmem)
- scatterwalk_unmap(assoc);
- else
- kfree(assocmem);
+ kfree(assocmem);
if (enc)
ret = skcipher_walk_aead_encrypt(&walk, req, false);
diff --git a/arch/powerpc/crypto/crc-vpmsum_test.c b/arch/powerpc/crypto/crc-vpmsum_test.c
deleted file mode 100644
index c61a874a3a5c..000000000000
--- a/arch/powerpc/crypto/crc-vpmsum_test.c
+++ /dev/null
@@ -1,133 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * CRC vpmsum tester
- * Copyright 2017 Daniel Axtens, IBM Corporation.
- */
-
-#include <linux/crc-t10dif.h>
-#include <linux/crc32.h>
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/random.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/cpufeature.h>
-#include <asm/switch_to.h>
-
-static unsigned long iterations = 10000;
-
-#define MAX_CRC_LENGTH 65535
-
-
-static int __init crc_test_init(void)
-{
- u16 crc16 = 0, verify16 = 0;
- __le32 verify32le = 0;
- unsigned char *data;
- u32 verify32 = 0;
- unsigned long i;
- __le32 crc32;
- int ret;
-
- struct crypto_shash *crct10dif_tfm;
- struct crypto_shash *crc32c_tfm;
-
- if (!cpu_has_feature(CPU_FTR_ARCH_207S))
- return -ENODEV;
-
- data = kmalloc(MAX_CRC_LENGTH, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0);
-
- if (IS_ERR(crct10dif_tfm)) {
- pr_err("Error allocating crc-t10dif\n");
- goto free_buf;
- }
-
- crc32c_tfm = crypto_alloc_shash("crc32c", 0, 0);
-
- if (IS_ERR(crc32c_tfm)) {
- pr_err("Error allocating crc32c\n");
- goto free_16;
- }
-
- do {
- SHASH_DESC_ON_STACK(crct10dif_shash, crct10dif_tfm);
- SHASH_DESC_ON_STACK(crc32c_shash, crc32c_tfm);
-
- crct10dif_shash->tfm = crct10dif_tfm;
- ret = crypto_shash_init(crct10dif_shash);
-
- if (ret) {
- pr_err("Error initing crc-t10dif\n");
- goto free_32;
- }
-
-
- crc32c_shash->tfm = crc32c_tfm;
- ret = crypto_shash_init(crc32c_shash);
-
- if (ret) {
- pr_err("Error initing crc32c\n");
- goto free_32;
- }
-
- pr_info("crc-vpmsum_test begins, %lu iterations\n", iterations);
- for (i=0; i<iterations; i++) {
- size_t offset = get_random_u32_below(16);
- size_t len = get_random_u32_below(MAX_CRC_LENGTH);
-
- if (len <= offset)
- continue;
- get_random_bytes(data, len);
- len -= offset;
-
- crypto_shash_update(crct10dif_shash, data+offset, len);
- crypto_shash_final(crct10dif_shash, (u8 *)(&crc16));
- verify16 = crc_t10dif_generic(verify16, data+offset, len);
-
-
- if (crc16 != verify16) {
- pr_err("FAILURE in CRC16: got 0x%04x expected 0x%04x (len %lu)\n",
- crc16, verify16, len);
- break;
- }
-
- crypto_shash_update(crc32c_shash, data+offset, len);
- crypto_shash_final(crc32c_shash, (u8 *)(&crc32));
- verify32 = le32_to_cpu(verify32le);
- verify32le = ~cpu_to_le32(__crc32c_le(~verify32, data+offset, len));
- if (crc32 != verify32le) {
- pr_err("FAILURE in CRC32: got 0x%08x expected 0x%08x (len %lu)\n",
- crc32, verify32, len);
- break;
- }
- cond_resched();
- }
- pr_info("crc-vpmsum_test done, completed %lu iterations\n", i);
- } while (0);
-
-free_32:
- crypto_free_shash(crc32c_tfm);
-
-free_16:
- crypto_free_shash(crct10dif_tfm);
-
-free_buf:
- kfree(data);
-
- return 0;
-}
-
-static void __exit crc_test_exit(void) {}
-
-module_init(crc_test_init);
-module_exit(crc_test_exit);
-module_param(iterations, long, 0400);
-
-MODULE_AUTHOR("Daniel Axtens <dja@axtens.net>");
-MODULE_DESCRIPTION("Vector polynomial multiply-sum CRC tester");
-MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
deleted file mode 100644
index 63760b7dbb76..000000000000
--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+++ /dev/null
@@ -1,173 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/crc32.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/cpufeature.h>
-#include <asm/simd.h>
-#include <asm/switch_to.h>
-
-#define CHKSUM_BLOCK_SIZE 1
-#define CHKSUM_DIGEST_SIZE 4
-
-#define VMX_ALIGN 16
-#define VMX_ALIGN_MASK (VMX_ALIGN-1)
-
-#define VECTOR_BREAKPOINT 512
-
-u32 __crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len);
-
-static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len)
-{
- unsigned int prealign;
- unsigned int tail;
-
- if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || !crypto_simd_usable())
- return __crc32c_le(crc, p, len);
-
- if ((unsigned long)p & VMX_ALIGN_MASK) {
- prealign = VMX_ALIGN - ((unsigned long)p & VMX_ALIGN_MASK);
- crc = __crc32c_le(crc, p, prealign);
- len -= prealign;
- p += prealign;
- }
-
- if (len & ~VMX_ALIGN_MASK) {
- preempt_disable();
- pagefault_disable();
- enable_kernel_altivec();
- crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK);
- disable_kernel_altivec();
- pagefault_enable();
- preempt_enable();
- }
-
- tail = len & VMX_ALIGN_MASK;
- if (tail) {
- p += len & ~VMX_ALIGN_MASK;
- crc = __crc32c_le(crc, p, tail);
- }
-
- return crc;
-}
-
-static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm)
-{
- u32 *key = crypto_tfm_ctx(tfm);
-
- *key = ~0;
-
- return 0;
-}
-
-/*
- * Setting the seed allows arbitrary accumulators and flexible XOR policy
- * If your algorithm starts with ~0, then XOR with ~0 before you set
- * the seed.
- */
-static int crc32c_vpmsum_setkey(struct crypto_shash *hash, const u8 *key,
- unsigned int keylen)
-{
- u32 *mctx = crypto_shash_ctx(hash);
-
- if (keylen != sizeof(u32))
- return -EINVAL;
- *mctx = le32_to_cpup((__le32 *)key);
- return 0;
-}
-
-static int crc32c_vpmsum_init(struct shash_desc *desc)
-{
- u32 *mctx = crypto_shash_ctx(desc->tfm);
- u32 *crcp = shash_desc_ctx(desc);
-
- *crcp = *mctx;
-
- return 0;
-}
-
-static int crc32c_vpmsum_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- u32 *crcp = shash_desc_ctx(desc);
-
- *crcp = crc32c_vpmsum(*crcp, data, len);
-
- return 0;
-}
-
-static int __crc32c_vpmsum_finup(u32 *crcp, const u8 *data, unsigned int len,
- u8 *out)
-{
- *(__le32 *)out = ~cpu_to_le32(crc32c_vpmsum(*crcp, data, len));
-
- return 0;
-}
-
-static int crc32c_vpmsum_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return __crc32c_vpmsum_finup(shash_desc_ctx(desc), data, len, out);
-}
-
-static int crc32c_vpmsum_final(struct shash_desc *desc, u8 *out)
-{
- u32 *crcp = shash_desc_ctx(desc);
-
- *(__le32 *)out = ~cpu_to_le32p(crcp);
-
- return 0;
-}
-
-static int crc32c_vpmsum_digest(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return __crc32c_vpmsum_finup(crypto_shash_ctx(desc->tfm), data, len,
- out);
-}
-
-static struct shash_alg alg = {
- .setkey = crc32c_vpmsum_setkey,
- .init = crc32c_vpmsum_init,
- .update = crc32c_vpmsum_update,
- .final = crc32c_vpmsum_final,
- .finup = crc32c_vpmsum_finup,
- .digest = crc32c_vpmsum_digest,
- .descsize = sizeof(u32),
- .digestsize = CHKSUM_DIGEST_SIZE,
- .base = {
- .cra_name = "crc32c",
- .cra_driver_name = "crc32c-vpmsum",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(u32),
- .cra_module = THIS_MODULE,
- .cra_init = crc32c_vpmsum_cra_init,
- }
-};
-
-static int __init crc32c_vpmsum_mod_init(void)
-{
- if (!cpu_has_feature(CPU_FTR_ARCH_207S))
- return -ENODEV;
-
- return crypto_register_shash(&alg);
-}
-
-static void __exit crc32c_vpmsum_mod_fini(void)
-{
- crypto_unregister_shash(&alg);
-}
-
-module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, crc32c_vpmsum_mod_init);
-module_exit(crc32c_vpmsum_mod_fini);
-
-MODULE_AUTHOR("Anton Blanchard <anton@samba.org>");
-MODULE_DESCRIPTION("CRC32C using vector polynomial multiply-sum instructions");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_CRYPTO("crc32c");
-MODULE_ALIAS_CRYPTO("crc32c-vpmsum");
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index 1ca7d4c4b90d..2058e8d3e013 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -37,6 +37,7 @@ extern void tlb_flush(struct mmu_gather *tlb);
*/
#define tlb_needs_table_invalidate() radix_enabled()
+#define __HAVE_ARCH_TLB_REMOVE_TABLE
/* Get the generic bits... */
#include <asm-generic/tlb.h>
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 1bee15c013e7..3af6c06af02f 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -1087,12 +1087,10 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
/* Count and allocate space for cpu features */
of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
&nr_dt_cpu_features);
- dt_cpu_features = memblock_alloc(sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, PAGE_SIZE);
- if (!dt_cpu_features)
- panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
- __func__,
- sizeof(struct dt_cpu_feature) * nr_dt_cpu_features,
- PAGE_SIZE);
+ dt_cpu_features =
+ memblock_alloc_or_panic(
+ sizeof(struct dt_cpu_feature) * nr_dt_cpu_features,
+ PAGE_SIZE);
cpufeatures_setup_start(isa);
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 30b56c67fa61..e527cd3ef128 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -97,7 +97,7 @@ void power4_idle(void)
/*
* Register the sysctl to set/clear powersave_nap.
*/
-static struct ctl_table powersave_nap_ctl_table[] = {
+static const struct ctl_table powersave_nap_ctl_table[] = {
{
.procname = "powersave-nap",
.data = &powersave_nap,
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 45dac7b46aa3..34a5aec4908f 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -369,6 +369,24 @@ static void dedotify_versions(struct modversion_info *vers,
}
}
+/* Same as normal versions, remove a leading dot if present. */
+static void dedotify_ext_version_names(char *str_seq, unsigned long size)
+{
+ unsigned long out = 0;
+ unsigned long in;
+ char last = '\0';
+
+ for (in = 0; in < size; in++) {
+ /* Skip one leading dot */
+ if (last == '\0' && str_seq[in] == '.')
+ in++;
+ last = str_seq[in];
+ str_seq[out++] = last;
+ }
+ /* Zero the trailing portion of the names table for robustness */
+ memset(&str_seq[out], 0, size - out);
+}
+
/*
* Undefined symbols which refer to .funcname, hack to funcname. Make .TOC.
* seem to be defined (value set later).
@@ -438,10 +456,12 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr,
me->arch.toc_section = i;
if (sechdrs[i].sh_addralign < 8)
sechdrs[i].sh_addralign = 8;
- }
- else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0)
+ } else if (strcmp(secstrings + sechdrs[i].sh_name, "__versions") == 0)
dedotify_versions((void *)hdr + sechdrs[i].sh_offset,
sechdrs[i].sh_size);
+ else if (strcmp(secstrings + sechdrs[i].sh_name, "__version_ext_names") == 0)
+ dedotify_ext_version_names((void *)hdr + sechdrs[i].sh_offset,
+ sechdrs[i].sh_size);
if (sechdrs[i].sh_type == SHT_SYMTAB)
dedotify((void *)hdr + sechdrs[i].sh_offset,
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index ce0c8623e563..f8a3bd8cfae4 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -213,11 +213,8 @@ pci_create_OF_bus_map(void)
struct property* of_prop;
struct device_node *dn;
- of_prop = memblock_alloc(sizeof(struct property) + 256,
+ of_prop = memblock_alloc_or_panic(sizeof(struct property) + 256,
SMP_CACHE_BYTES);
- if (!of_prop)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(struct property) + 256);
dn = of_find_node_by_path("/");
if (dn) {
memset(of_prop, -1, sizeof(struct property) + 256);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index f7d7a93f07fc..a08b0ede4e64 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -458,11 +458,8 @@ void __init smp_setup_cpu_maps(void)
DBG("smp_setup_cpu_maps()\n");
- cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32),
+ cpu_to_phys_id = memblock_alloc_or_panic(nr_cpu_ids * sizeof(u32),
__alignof__(u32));
- if (!cpu_to_phys_id)
- panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
- __func__, nr_cpu_ids * sizeof(u32), __alignof__(u32));
for_each_node_by_type(dn, "cpu") {
const __be32 *intserv;
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 75dbf3e0d9c4..5a1bf501fbe1 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -140,13 +140,7 @@ arch_initcall(ppc_init);
static void *__init alloc_stack(void)
{
- void *ptr = memblock_alloc(THREAD_SIZE, THREAD_ALIGN);
-
- if (!ptr)
- panic("cannot allocate %d bytes for stack at %pS\n",
- THREAD_SIZE, (void *)_RET_IP_);
-
- return ptr;
+ return memblock_alloc_or_panic(THREAD_SIZE, THREAD_ALIGN);
}
void __init irqstack_early_init(void)
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 25429905ae90..86bff159c51e 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -4957,7 +4957,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
* states are synchronized from L0 to L1. L1 needs to inform L0 about
* MER=1 only when there are pending external interrupts.
* In the above if check, MER bit is set if there are pending
- * external interrupts. Hence, explicity mask off MER bit
+ * external interrupts. Hence, explicitly mask off MER bit
* here as otherwise it may generate spurious interrupts in L2 KVM
* causing an endless loop, which results in L2 guest getting hung.
*/
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index f14ecab674a3..dd8a4b52a0cc 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -78,4 +78,10 @@ CFLAGS_xor_vmx.o += -mhard-float -maltivec $(call cc-option,-mabi=altivec)
# Enable <altivec.h>
CFLAGS_xor_vmx.o += -isystem $(shell $(CC) -print-file-name=include)
+obj-$(CONFIG_CRC32_ARCH) += crc32-powerpc.o
+crc32-powerpc-y := crc32-glue.o crc32c-vpmsum_asm.o
+
+obj-$(CONFIG_CRC_T10DIF_ARCH) += crc-t10dif-powerpc.o
+crc-t10dif-powerpc-y := crc-t10dif-glue.o crct10dif-vpmsum_asm.o
+
obj-$(CONFIG_PPC64) += $(obj64-y)
diff --git a/arch/powerpc/crypto/crct10dif-vpmsum_glue.c b/arch/powerpc/lib/crc-t10dif-glue.c
index 1dc8b6915178..730850dbc51d 100644
--- a/arch/powerpc/crypto/crct10dif-vpmsum_glue.c
+++ b/arch/powerpc/lib/crc-t10dif-glue.c
@@ -7,7 +7,6 @@
*/
#include <linux/crc-t10dif.h>
-#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -22,15 +21,18 @@
#define VECTOR_BREAKPOINT 64
+static DEFINE_STATIC_KEY_FALSE(have_vec_crypto);
+
u32 __crct10dif_vpmsum(u32 crc, unsigned char const *p, size_t len);
-static u16 crct10dif_vpmsum(u16 crci, unsigned char const *p, size_t len)
+u16 crc_t10dif_arch(u16 crci, const u8 *p, size_t len)
{
unsigned int prealign;
unsigned int tail;
u32 crc = crci;
- if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || !crypto_simd_usable())
+ if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) ||
+ !static_branch_likely(&have_vec_crypto) || !crypto_simd_usable())
return crc_t10dif_generic(crc, p, len);
if ((unsigned long)p & VMX_ALIGN_MASK) {
@@ -60,67 +62,28 @@ static u16 crct10dif_vpmsum(u16 crci, unsigned char const *p, size_t len)
return crc & 0xffff;
}
+EXPORT_SYMBOL(crc_t10dif_arch);
-static int crct10dif_vpmsum_init(struct shash_desc *desc)
-{
- u16 *crc = shash_desc_ctx(desc);
-
- *crc = 0;
- return 0;
-}
-
-static int crct10dif_vpmsum_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u16 *crc = shash_desc_ctx(desc);
-
- *crc = crct10dif_vpmsum(*crc, data, length);
-
- return 0;
-}
-
-
-static int crct10dif_vpmsum_final(struct shash_desc *desc, u8 *out)
+static int __init crc_t10dif_powerpc_init(void)
{
- u16 *crcp = shash_desc_ctx(desc);
-
- *(u16 *)out = *crcp;
+ if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
+ (cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO))
+ static_branch_enable(&have_vec_crypto);
return 0;
}
+arch_initcall(crc_t10dif_powerpc_init);
-static struct shash_alg alg = {
- .init = crct10dif_vpmsum_init,
- .update = crct10dif_vpmsum_update,
- .final = crct10dif_vpmsum_final,
- .descsize = CRC_T10DIF_DIGEST_SIZE,
- .digestsize = CRC_T10DIF_DIGEST_SIZE,
- .base = {
- .cra_name = "crct10dif",
- .cra_driver_name = "crct10dif-vpmsum",
- .cra_priority = 200,
- .cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
-
-static int __init crct10dif_vpmsum_mod_init(void)
+static void __exit crc_t10dif_powerpc_exit(void)
{
- if (!cpu_has_feature(CPU_FTR_ARCH_207S))
- return -ENODEV;
-
- return crypto_register_shash(&alg);
}
+module_exit(crc_t10dif_powerpc_exit);
-static void __exit crct10dif_vpmsum_mod_fini(void)
+bool crc_t10dif_is_optimized(void)
{
- crypto_unregister_shash(&alg);
+ return static_key_enabled(&have_vec_crypto);
}
-
-module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, crct10dif_vpmsum_mod_init);
-module_exit(crct10dif_vpmsum_mod_fini);
+EXPORT_SYMBOL(crc_t10dif_is_optimized);
MODULE_AUTHOR("Daniel Axtens <dja@axtens.net>");
MODULE_DESCRIPTION("CRCT10DIF using vector polynomial multiply-sum instructions");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_CRYPTO("crct10dif");
-MODULE_ALIAS_CRYPTO("crct10dif-vpmsum");
diff --git a/arch/powerpc/lib/crc32-glue.c b/arch/powerpc/lib/crc32-glue.c
new file mode 100644
index 000000000000..79cc954f499f
--- /dev/null
+++ b/arch/powerpc/lib/crc32-glue.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/crc32.h>
+#include <crypto/internal/simd.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/cpufeature.h>
+#include <asm/simd.h>
+#include <asm/switch_to.h>
+
+#define VMX_ALIGN 16
+#define VMX_ALIGN_MASK (VMX_ALIGN-1)
+
+#define VECTOR_BREAKPOINT 512
+
+static DEFINE_STATIC_KEY_FALSE(have_vec_crypto);
+
+u32 __crc32c_vpmsum(u32 crc, const u8 *p, size_t len);
+
+u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
+{
+ return crc32_le_base(crc, p, len);
+}
+EXPORT_SYMBOL(crc32_le_arch);
+
+u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len)
+{
+ unsigned int prealign;
+ unsigned int tail;
+
+ if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) ||
+ !static_branch_likely(&have_vec_crypto) || !crypto_simd_usable())
+ return crc32c_le_base(crc, p, len);
+
+ if ((unsigned long)p & VMX_ALIGN_MASK) {
+ prealign = VMX_ALIGN - ((unsigned long)p & VMX_ALIGN_MASK);
+ crc = crc32c_le_base(crc, p, prealign);
+ len -= prealign;
+ p += prealign;
+ }
+
+ if (len & ~VMX_ALIGN_MASK) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_altivec();
+ crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK);
+ disable_kernel_altivec();
+ pagefault_enable();
+ preempt_enable();
+ }
+
+ tail = len & VMX_ALIGN_MASK;
+ if (tail) {
+ p += len & ~VMX_ALIGN_MASK;
+ crc = crc32c_le_base(crc, p, tail);
+ }
+
+ return crc;
+}
+EXPORT_SYMBOL(crc32c_le_arch);
+
+u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
+{
+ return crc32_be_base(crc, p, len);
+}
+EXPORT_SYMBOL(crc32_be_arch);
+
+static int __init crc32_powerpc_init(void)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
+ (cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO))
+ static_branch_enable(&have_vec_crypto);
+ return 0;
+}
+arch_initcall(crc32_powerpc_init);
+
+static void __exit crc32_powerpc_exit(void)
+{
+}
+module_exit(crc32_powerpc_exit);
+
+u32 crc32_optimizations(void)
+{
+ if (static_key_enabled(&have_vec_crypto))
+ return CRC32C_OPTIMIZATION;
+ return 0;
+}
+EXPORT_SYMBOL(crc32_optimizations);
+
+MODULE_AUTHOR("Anton Blanchard <anton@samba.org>");
+MODULE_DESCRIPTION("CRC32C using vector polynomial multiply-sum instructions");
+MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/crypto/crc32-vpmsum_core.S b/arch/powerpc/lib/crc32-vpmsum_core.S
index b0f87f595b26..b0f87f595b26 100644
--- a/arch/powerpc/crypto/crc32-vpmsum_core.S
+++ b/arch/powerpc/lib/crc32-vpmsum_core.S
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_asm.S b/arch/powerpc/lib/crc32c-vpmsum_asm.S
index bf442004ea1f..bf442004ea1f 100644
--- a/arch/powerpc/crypto/crc32c-vpmsum_asm.S
+++ b/arch/powerpc/lib/crc32c-vpmsum_asm.S
diff --git a/arch/powerpc/crypto/crct10dif-vpmsum_asm.S b/arch/powerpc/lib/crct10dif-vpmsum_asm.S
index f0b93a0fe168..f0b93a0fe168 100644
--- a/arch/powerpc/crypto/crct10dif-vpmsum_asm.S
+++ b/arch/powerpc/lib/crct10dif-vpmsum_asm.S
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index 6978344edcb4..be9c4106e22f 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -377,10 +377,7 @@ void __init MMU_init_hw(void)
* Find some memory for the hash table.
*/
if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
- Hash = memblock_alloc(Hash_size, Hash_size);
- if (!Hash)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, Hash_size, Hash_size);
+ Hash = memblock_alloc_or_panic(Hash_size, Hash_size);
_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
pr_info("Total memory = %lldMB; using %ldkB for hash table\n",
diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c
index 1715b07c630c..4e1e45420bd4 100644
--- a/arch/powerpc/mm/book3s64/mmu_context.c
+++ b/arch/powerpc/mm/book3s64/mmu_context.c
@@ -253,7 +253,7 @@ static void pmd_frag_destroy(void *pmd_frag)
count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT;
/* We allow PTE_FRAG_NR fragments from a PTE page */
if (atomic_sub_and_test(PMD_FRAG_NR - count, &ptdesc->pt_frag_refcount)) {
- pagetable_pmd_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
pagetable_free(ptdesc);
}
}
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index 374542528080..ce64abea9e3e 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -330,11 +330,7 @@ void __init mmu_partition_table_init(void)
unsigned long ptcr;
/* Initialize the Partition Table with no entries */
- partition_tb = memblock_alloc(patb_size, patb_size);
- if (!partition_tb)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, patb_size, patb_size);
-
+ partition_tb = memblock_alloc_or_panic(patb_size, patb_size);
ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
set_ptcr_when_no_uv(ptcr);
powernv_set_nmmu_ptcr(ptcr);
@@ -477,7 +473,7 @@ void pmd_fragment_free(unsigned long *pmd)
BUG_ON(atomic_read(&ptdesc->pt_frag_refcount) <= 0);
if (atomic_dec_and_test(&ptdesc->pt_frag_refcount)) {
- pagetable_pmd_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
pagetable_free(ptdesc);
}
}
diff --git a/arch/powerpc/mm/kasan/init_book3e_64.c b/arch/powerpc/mm/kasan/init_book3e_64.c
index 43c03b84ff32..60c78aac0f63 100644
--- a/arch/powerpc/mm/kasan/init_book3e_64.c
+++ b/arch/powerpc/mm/kasan/init_book3e_64.c
@@ -40,19 +40,19 @@ static int __init kasan_map_kernel_page(unsigned long ea, unsigned long pa, pgpr
pgdp = pgd_offset_k(ea);
p4dp = p4d_offset(pgdp, ea);
if (kasan_pud_table(*p4dp)) {
- pudp = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
+ pudp = memblock_alloc_or_panic(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
memcpy(pudp, kasan_early_shadow_pud, PUD_TABLE_SIZE);
p4d_populate(&init_mm, p4dp, pudp);
}
pudp = pud_offset(p4dp, ea);
if (kasan_pmd_table(*pudp)) {
- pmdp = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE);
+ pmdp = memblock_alloc_or_panic(PMD_TABLE_SIZE, PMD_TABLE_SIZE);
memcpy(pmdp, kasan_early_shadow_pmd, PMD_TABLE_SIZE);
pud_populate(&init_mm, pudp, pmdp);
}
pmdp = pmd_offset(pudp, ea);
if (kasan_pte_table(*pmdp)) {
- ptep = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE);
+ ptep = memblock_alloc_or_panic(PTE_TABLE_SIZE, PTE_TABLE_SIZE);
memcpy(ptep, kasan_early_shadow_pte, PTE_TABLE_SIZE);
pmd_populate_kernel(&init_mm, pmdp, ptep);
}
@@ -74,7 +74,7 @@ static void __init kasan_init_phys_region(void *start, void *end)
k_start = ALIGN_DOWN((unsigned long)kasan_mem_to_shadow(start), PAGE_SIZE);
k_end = ALIGN((unsigned long)kasan_mem_to_shadow(end), PAGE_SIZE);
- va = memblock_alloc(k_end - k_start, PAGE_SIZE);
+ va = memblock_alloc_or_panic(k_end - k_start, PAGE_SIZE);
for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE, va += PAGE_SIZE)
kasan_map_kernel_page(k_cur, __pa(va), PAGE_KERNEL);
}
diff --git a/arch/powerpc/mm/kasan/init_book3s_64.c b/arch/powerpc/mm/kasan/init_book3s_64.c
index 3fb5ce4f48f4..7d959544c077 100644
--- a/arch/powerpc/mm/kasan/init_book3s_64.c
+++ b/arch/powerpc/mm/kasan/init_book3s_64.c
@@ -32,7 +32,7 @@ static void __init kasan_init_phys_region(void *start, void *end)
k_start = ALIGN_DOWN((unsigned long)kasan_mem_to_shadow(start), PAGE_SIZE);
k_end = ALIGN((unsigned long)kasan_mem_to_shadow(end), PAGE_SIZE);
- va = memblock_alloc(k_end - k_start, PAGE_SIZE);
+ va = memblock_alloc_or_panic(k_end - k_start, PAGE_SIZE);
for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE, va += PAGE_SIZE)
map_kernel_page(k_cur, __pa(va), PAGE_KERNEL);
}
diff --git a/arch/powerpc/mm/nohash/mmu_context.c b/arch/powerpc/mm/nohash/mmu_context.c
index 0b181da40ddb..a1a4e697251a 100644
--- a/arch/powerpc/mm/nohash/mmu_context.c
+++ b/arch/powerpc/mm/nohash/mmu_context.c
@@ -385,21 +385,11 @@ void __init mmu_context_init(void)
/*
* Allocate the maps used by context management
*/
- context_map = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
- if (!context_map)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- CTX_MAP_SIZE);
- context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1),
+ context_map = memblock_alloc_or_panic(CTX_MAP_SIZE, SMP_CACHE_BYTES);
+ context_mm = memblock_alloc_or_panic(sizeof(void *) * (LAST_CONTEXT + 1),
SMP_CACHE_BYTES);
- if (!context_mm)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(void *) * (LAST_CONTEXT + 1));
if (IS_ENABLED(CONFIG_SMP)) {
- stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
- if (!stale_map[boot_cpuid])
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- CTX_MAP_SIZE);
-
+ stale_map[boot_cpuid] = memblock_alloc_or_panic(CTX_MAP_SIZE, SMP_CACHE_BYTES);
cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
"powerpc/mmu/ctx:prepare",
mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead);
diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c
index e89f64a0f24a..713268ccb1a0 100644
--- a/arch/powerpc/mm/pgtable-frag.c
+++ b/arch/powerpc/mm/pgtable-frag.c
@@ -25,7 +25,7 @@ void pte_frag_destroy(void *pte_frag)
count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
/* We allow PTE_FRAG_NR fragments from a PTE page */
if (atomic_sub_and_test(PTE_FRAG_NR - count, &ptdesc->pt_frag_refcount)) {
- pagetable_pte_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
pagetable_free(ptdesc);
}
}
@@ -111,7 +111,7 @@ static void pte_free_now(struct rcu_head *head)
struct ptdesc *ptdesc;
ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
- pagetable_pte_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
pagetable_free(ptdesc);
}
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 787b22206386..15276068f657 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -50,13 +50,8 @@ notrace void __init early_ioremap_init(void)
void __init *early_alloc_pgtable(unsigned long size)
{
- void *ptr = memblock_alloc(size, size);
+ return memblock_alloc_or_panic(size, size);
- if (!ptr)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, size, size);
-
- return ptr;
}
pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index fe2e0249cbc2..a112d26185a0 100644
--- a/arch/powerpc/platforms/powermac/nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -514,10 +514,7 @@ static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr)
printk(KERN_ERR "nvram: no address\n");
return -EINVAL;
}
- nvram_image = memblock_alloc(NVRAM_SIZE, SMP_CACHE_BYTES);
- if (!nvram_image)
- panic("%s: Failed to allocate %u bytes\n", __func__,
- NVRAM_SIZE);
+ nvram_image = memblock_alloc_or_panic(NVRAM_SIZE, SMP_CACHE_BYTES);
nvram_data = ioremap(addr, NVRAM_SIZE*2);
nvram_naddrs = 1; /* Make sure we get the correct case */
diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
index 877720c64515..4ac9808e55a4 100644
--- a/arch/powerpc/platforms/powernv/memtrace.c
+++ b/arch/powerpc/platforms/powernv/memtrace.c
@@ -88,26 +88,6 @@ static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
}
}
-static void memtrace_clear_range(unsigned long start_pfn,
- unsigned long nr_pages)
-{
- unsigned long pfn;
-
- /* As HIGHMEM does not apply, use clear_page() directly. */
- for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
- if (IS_ALIGNED(pfn, PAGES_PER_SECTION))
- cond_resched();
- clear_page(__va(PFN_PHYS(pfn)));
- }
- /*
- * Before we go ahead and use this range as cache inhibited range
- * flush the cache.
- */
- flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn),
- (unsigned long)pfn_to_kaddr(start_pfn + nr_pages),
- FLUSH_CHUNK_SIZE);
-}
-
static u64 memtrace_alloc_node(u32 nid, u64 size)
{
const unsigned long nr_pages = PHYS_PFN(size);
@@ -119,17 +99,18 @@ static u64 memtrace_alloc_node(u32 nid, u64 size)
* by alloc_contig_pages().
*/
page = alloc_contig_pages(nr_pages, GFP_KERNEL | __GFP_THISNODE |
- __GFP_NOWARN, nid, NULL);
+ __GFP_NOWARN | __GFP_ZERO, nid, NULL);
if (!page)
return 0;
start_pfn = page_to_pfn(page);
/*
- * Clear the range while we still have a linear mapping.
- *
- * TODO: use __GFP_ZERO with alloc_contig_pages() once supported.
+ * Before we go ahead and use this range as cache inhibited range
+ * flush the cache.
*/
- memtrace_clear_range(start_pfn, nr_pages);
+ flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn),
+ (unsigned long)pfn_to_kaddr(start_pfn + nr_pages),
+ FLUSH_CHUNK_SIZE);
/*
* Set pages PageOffline(), to indicate that nobody (e.g., hibernation,
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 5d0f35bb917e..9ec265fcaff4 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -180,10 +180,7 @@ int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
/*
* Allocate a buffer to hold the MC recoverable ranges.
*/
- mc_recoverable_range = memblock_alloc(size, __alignof__(u64));
- if (!mc_recoverable_range)
- panic("%s: Failed to allocate %u bytes align=0x%lx\n",
- __func__, size, __alignof__(u64));
+ mc_recoverable_range = memblock_alloc_or_panic(size, __alignof__(u64));
for (i = 0; i < mc_recoverable_range_len; i++) {
mc_recoverable_range[i].start_addr =
@@ -818,7 +815,7 @@ static int opal_add_one_export(struct kobject *parent, const char *export_name,
sysfs_bin_attr_init(attr);
attr->attr.name = name;
attr->attr.mode = 0400;
- attr->read = sysfs_bin_attr_simple_read;
+ attr->read_new = sysfs_bin_attr_simple_read;
attr->private = __va(vals[0]);
attr->size = vals[1];
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 5144f11359f7..150c09b58ae8 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -115,10 +115,7 @@ static void __init prealloc(struct ps3_prealloc *p)
if (!p->size)
return;
- p->address = memblock_alloc(p->size, p->align);
- if (!p->address)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, p->size, p->align);
+ p->address = memblock_alloc_or_panic(p->size, p->align);
printk(KERN_INFO "%s: %lu bytes at %p\n", p->name, p->size,
p->address);
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 1893f66371fa..b12ef382fec7 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -580,8 +580,10 @@ static int pseries_eeh_get_state(struct eeh_pe *pe, int *delay)
switch(rets[0]) {
case 0:
- result = EEH_STATE_MMIO_ACTIVE |
- EEH_STATE_DMA_ACTIVE;
+ result = EEH_STATE_MMIO_ACTIVE |
+ EEH_STATE_DMA_ACTIVE |
+ EEH_STATE_MMIO_ENABLED |
+ EEH_STATE_DMA_ENABLED;
break;
case 1:
result = EEH_STATE_RESET_ACTIVE |
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 29f1a0cc59cd..ae6f7a235d8b 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -2208,6 +2208,9 @@ static long spapr_tce_unset_window(struct iommu_table_group *table_group, int nu
const char *win_name;
int ret = -ENODEV;
+ if (!tbl) /* The table was never created OR window was never opened */
+ return 0;
+
mutex_lock(&dma_win_init_mutex);
if ((num == 0) && is_default_window_table(table_group, tbl))
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 1798f0f14d58..62bd8e2d5d4c 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -53,7 +53,7 @@ struct update_props_workarea {
static unsigned int nmi_wd_lpm_factor = 200;
#ifdef CONFIG_SYSCTL
-static struct ctl_table nmi_wd_lpm_factor_ctl_table[] = {
+static const struct ctl_table nmi_wd_lpm_factor_ctl_table[] = {
{
.procname = "nmi_wd_lpm_factor",
.data = &nmi_wd_lpm_factor,
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index f84ac9fbe203..f7c9271bda58 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -544,7 +544,7 @@ static int drc_pmem_query_health(struct papr_scm_priv *p)
/* Jiffies offset for which the health data is assumed to be same */
cache_timeout = p->lasthealth_jiffies +
- msecs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL * 1000);
+ secs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL);
/* Fetch new health info is its older than MIN_HEALTH_QUERY_INTERVAL */
if (time_after(jiffies, cache_timeout))
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 1aa0cb097c9c..7b9a5ea9cad9 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -75,7 +75,7 @@ static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p)
srs = (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK;
cascade_virq = msi_data->cascade_array[srs]->virq;
- seq_printf(p, " fsl-msi-%d", cascade_virq);
+ seq_printf(p, "fsl-msi-%d", cascade_virq);
}
diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c
index 0b6e37f3ffb8..456a4f64ae0a 100644
--- a/arch/powerpc/sysdev/msi_bitmap.c
+++ b/arch/powerpc/sysdev/msi_bitmap.c
@@ -124,10 +124,7 @@ int __ref msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count,
if (bmp->bitmap_from_slab)
bmp->bitmap = kzalloc(size, GFP_KERNEL);
else {
- bmp->bitmap = memblock_alloc(size, SMP_CACHE_BYTES);
- if (!bmp->bitmap)
- panic("%s: Failed to allocate %u bytes\n", __func__,
- size);
+ bmp->bitmap = memblock_alloc_or_panic(size, SMP_CACHE_BYTES);
/* the bitmap won't be freed from memblock allocator */
kmemleak_not_leak(bmp->bitmap);
}
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index c736e349f222..7612c52e9b1e 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -24,6 +24,7 @@ config RISCV
select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_HAS_BINFMT_FLAT
+ select ARCH_HAS_CRC32 if RISCV_ISA_ZBC
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEBUG_VM_PGTABLE
diff --git a/arch/riscv/Kconfig.errata b/arch/riscv/Kconfig.errata
index 2acc7d876e1f..e318119d570d 100644
--- a/arch/riscv/Kconfig.errata
+++ b/arch/riscv/Kconfig.errata
@@ -119,4 +119,15 @@ config ERRATA_THEAD_PMU
If you don't know what to do here, say "Y".
+config ERRATA_THEAD_GHOSTWRITE
+ bool "Apply T-Head Ghostwrite errata"
+ depends on ERRATA_THEAD && RISCV_ISA_XTHEADVECTOR
+ default y
+ help
+ The T-Head C9xx cores have a vulnerability in the xtheadvector
+ instruction set. When this errata is enabled, the CPUs will be probed
+ to determine if they are vulnerable and disable xtheadvector.
+
+ If you don't know what to do here, say "Y".
+
endmenu # "CPU errata selection"
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index f51bb24bc84c..1916cf7ba450 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -24,6 +24,11 @@ config ARCH_SOPHGO
help
This enables support for Sophgo SoC platform hardware.
+config ARCH_SPACEMIT
+ bool "SpacemiT SoCs"
+ help
+ This enables support for SpacemiT SoC platform hardware.
+
config ARCH_STARFIVE
def_bool SOC_STARFIVE
diff --git a/arch/riscv/Kconfig.vendor b/arch/riscv/Kconfig.vendor
index 6f1cdd32ed29..b096548fe0ff 100644
--- a/arch/riscv/Kconfig.vendor
+++ b/arch/riscv/Kconfig.vendor
@@ -16,4 +16,30 @@ config RISCV_ISA_VENDOR_EXT_ANDES
If you don't know what to do here, say Y.
endmenu
+menu "T-Head"
+config RISCV_ISA_VENDOR_EXT_THEAD
+ bool "T-Head vendor extension support"
+ select RISCV_ISA_VENDOR_EXT
+ default y
+ help
+ Say N here to disable detection of and support for all T-Head vendor
+ extensions. Without this option enabled, T-Head vendor extensions will
+ not be detected at boot and their presence not reported to userspace.
+
+ If you don't know what to do here, say Y.
+
+config RISCV_ISA_XTHEADVECTOR
+ bool "xtheadvector extension support"
+ depends on RISCV_ISA_VENDOR_EXT_THEAD
+ depends on RISCV_ISA_V
+ depends on FPU
+ default y
+ help
+ Say N here if you want to disable all xtheadvector related procedures
+ in the kernel. This will disable vector for any T-Head board that
+ contains xtheadvector rather than the standard vector.
+
+ If you don't know what to do here, say Y.
+endmenu
+
endmenu
diff --git a/arch/riscv/Makefile.postlink b/arch/riscv/Makefile.postlink
index 829b9abc91f6..6b0580949b6a 100644
--- a/arch/riscv/Makefile.postlink
+++ b/arch/riscv/Makefile.postlink
@@ -10,6 +10,7 @@ __archpost:
-include include/config/auto.conf
include $(srctree)/scripts/Kbuild.include
+include $(srctree)/scripts/Makefile.lib
quiet_cmd_relocs_check = CHKREL $@
cmd_relocs_check = \
@@ -19,11 +20,6 @@ ifdef CONFIG_RELOCATABLE
quiet_cmd_cp_vmlinux_relocs = CPREL vmlinux.relocs
cmd_cp_vmlinux_relocs = cp vmlinux vmlinux.relocs
-quiet_cmd_relocs_strip = STRIPREL $@
-cmd_relocs_strip = $(OBJCOPY) --remove-section='.rel.*' \
- --remove-section='.rel__*' \
- --remove-section='.rela.*' \
- --remove-section='.rela__*' $@
endif
# `@true` prevents complaint when there is nothing to be done
@@ -33,7 +29,7 @@ vmlinux: FORCE
ifdef CONFIG_RELOCATABLE
$(call if_changed,relocs_check)
$(call if_changed,cp_vmlinux_relocs)
- $(call if_changed,relocs_strip)
+ $(call if_changed,strip_relocs)
endif
clean:
diff --git a/arch/riscv/boot/dts/Makefile b/arch/riscv/boot/dts/Makefile
index fdae05bbf556..bff887d38abe 100644
--- a/arch/riscv/boot/dts/Makefile
+++ b/arch/riscv/boot/dts/Makefile
@@ -5,6 +5,7 @@ subdir-y += microchip
subdir-y += renesas
subdir-y += sifive
subdir-y += sophgo
+subdir-y += spacemit
subdir-y += starfive
subdir-y += thead
diff --git a/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi b/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi
index 64c3c2e6cbe0..6367112e614a 100644
--- a/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi
+++ b/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi
@@ -27,7 +27,8 @@
riscv,isa = "rv64imafdc";
riscv,isa-base = "rv64i";
riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "zicntr", "zicsr",
- "zifencei", "zihpm";
+ "zifencei", "zihpm", "xtheadvector";
+ thead,vlenb = <128>;
#cooling-cells = <2>;
cpu0_intc: interrupt-controller {
diff --git a/arch/riscv/boot/dts/spacemit/Makefile b/arch/riscv/boot/dts/spacemit/Makefile
new file mode 100644
index 000000000000..ac617319a574
--- /dev/null
+++ b/arch/riscv/boot/dts/spacemit/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+dtb-$(CONFIG_ARCH_SPACEMIT) += k1-bananapi-f3.dtb
diff --git a/arch/riscv/boot/dts/spacemit/k1-bananapi-f3.dts b/arch/riscv/boot/dts/spacemit/k1-bananapi-f3.dts
new file mode 100644
index 000000000000..1d617b40a2d5
--- /dev/null
+++ b/arch/riscv/boot/dts/spacemit/k1-bananapi-f3.dts
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2024 Yangyu Chen <cyy@cyyself.name>
+ */
+
+#include "k1.dtsi"
+#include "k1-pinctrl.dtsi"
+
+/ {
+ model = "Banana Pi BPI-F3";
+ compatible = "bananapi,bpi-f3", "spacemit,k1";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0";
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_2_cfg>;
+ status = "okay";
+};
diff --git a/arch/riscv/boot/dts/spacemit/k1-pinctrl.dtsi b/arch/riscv/boot/dts/spacemit/k1-pinctrl.dtsi
new file mode 100644
index 000000000000..a8eac5517f85
--- /dev/null
+++ b/arch/riscv/boot/dts/spacemit/k1-pinctrl.dtsi
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (c) 2024 Yixun Lan <dlan@gentoo.org>
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+
+#define K1_PADCONF(pin, func) (((pin) << 16) | (func))
+
+&pinctrl {
+ uart0_2_cfg: uart0-2-cfg {
+ uart0-2-pins {
+ pinmux = <K1_PADCONF(68, 2)>,
+ <K1_PADCONF(69, 2)>;
+
+ bias-pull-up = <0>;
+ drive-strength = <32>;
+ };
+ };
+};
diff --git a/arch/riscv/boot/dts/spacemit/k1.dtsi b/arch/riscv/boot/dts/spacemit/k1.dtsi
new file mode 100644
index 000000000000..c670ebf8fa12
--- /dev/null
+++ b/arch/riscv/boot/dts/spacemit/k1.dtsi
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2024 Yangyu Chen <cyy@cyyself.name>
+ */
+
+/dts-v1/;
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ model = "SpacemiT K1";
+ compatible = "spacemit,k1";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ timebase-frequency = <24000000>;
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu_0>;
+ };
+ core1 {
+ cpu = <&cpu_1>;
+ };
+ core2 {
+ cpu = <&cpu_2>;
+ };
+ core3 {
+ cpu = <&cpu_3>;
+ };
+ };
+
+ cluster1 {
+ core0 {
+ cpu = <&cpu_4>;
+ };
+ core1 {
+ cpu = <&cpu_5>;
+ };
+ core2 {
+ cpu = <&cpu_6>;
+ };
+ core3 {
+ cpu = <&cpu_7>;
+ };
+ };
+ };
+
+ cpu_0: cpu@0 {
+ compatible = "spacemit,x60", "riscv";
+ device_type = "cpu";
+ reg = <0>;
+ riscv,isa = "rv64imafdcv_zicbom_zicbop_zicboz_zicntr_zicond_zicsr_zifencei_zihintpause_zihpm_zfh_zba_zbb_zbc_zbs_zkt_zvfh_zvkt_sscofpmf_sstc_svinval_svnapot_svpbmt";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", "zicbom",
+ "zicbop", "zicboz", "zicntr", "zicond", "zicsr",
+ "zifencei", "zihintpause", "zihpm", "zfh", "zba",
+ "zbb", "zbc", "zbs", "zkt", "zvfh", "zvkt",
+ "sscofpmf", "sstc", "svinval", "svnapot", "svpbmt";
+ riscv,cbom-block-size = <64>;
+ riscv,cbop-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+ i-cache-block-size = <64>;
+ i-cache-size = <32768>;
+ i-cache-sets = <128>;
+ d-cache-block-size = <64>;
+ d-cache-size = <32768>;
+ d-cache-sets = <128>;
+ next-level-cache = <&cluster0_l2_cache>;
+ mmu-type = "riscv,sv39";
+
+ cpu0_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu_1: cpu@1 {
+ compatible = "spacemit,x60", "riscv";
+ device_type = "cpu";
+ reg = <1>;
+ riscv,isa = "rv64imafdcv_zicbom_zicbop_zicboz_zicntr_zicond_zicsr_zifencei_zihintpause_zihpm_zfh_zba_zbb_zbc_zbs_zkt_zvfh_zvkt_sscofpmf_sstc_svinval_svnapot_svpbmt";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", "zicbom",
+ "zicbop", "zicboz", "zicntr", "zicond", "zicsr",
+ "zifencei", "zihintpause", "zihpm", "zfh", "zba",
+ "zbb", "zbc", "zbs", "zkt", "zvfh", "zvkt",
+ "sscofpmf", "sstc", "svinval", "svnapot", "svpbmt";
+ riscv,cbom-block-size = <64>;
+ riscv,cbop-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+ i-cache-block-size = <64>;
+ i-cache-size = <32768>;
+ i-cache-sets = <128>;
+ d-cache-block-size = <64>;
+ d-cache-size = <32768>;
+ d-cache-sets = <128>;
+ next-level-cache = <&cluster0_l2_cache>;
+ mmu-type = "riscv,sv39";
+
+ cpu1_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu_2: cpu@2 {
+ compatible = "spacemit,x60", "riscv";
+ device_type = "cpu";
+ reg = <2>;
+ riscv,isa = "rv64imafdcv_zicbom_zicbop_zicboz_zicntr_zicond_zicsr_zifencei_zihintpause_zihpm_zfh_zba_zbb_zbc_zbs_zkt_zvfh_zvkt_sscofpmf_sstc_svinval_svnapot_svpbmt";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", "zicbom",
+ "zicbop", "zicboz", "zicntr", "zicond", "zicsr",
+ "zifencei", "zihintpause", "zihpm", "zfh", "zba",
+ "zbb", "zbc", "zbs", "zkt", "zvfh", "zvkt",
+ "sscofpmf", "sstc", "svinval", "svnapot", "svpbmt";
+ riscv,cbom-block-size = <64>;
+ riscv,cbop-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+ i-cache-block-size = <64>;
+ i-cache-size = <32768>;
+ i-cache-sets = <128>;
+ d-cache-block-size = <64>;
+ d-cache-size = <32768>;
+ d-cache-sets = <128>;
+ next-level-cache = <&cluster0_l2_cache>;
+ mmu-type = "riscv,sv39";
+
+ cpu2_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu_3: cpu@3 {
+ compatible = "spacemit,x60", "riscv";
+ device_type = "cpu";
+ reg = <3>;
+ riscv,isa = "rv64imafdcv_zicbom_zicbop_zicboz_zicntr_zicond_zicsr_zifencei_zihintpause_zihpm_zfh_zba_zbb_zbc_zbs_zkt_zvfh_zvkt_sscofpmf_sstc_svinval_svnapot_svpbmt";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", "zicbom",
+ "zicbop", "zicboz", "zicntr", "zicond", "zicsr",
+ "zifencei", "zihintpause", "zihpm", "zfh", "zba",
+ "zbb", "zbc", "zbs", "zkt", "zvfh", "zvkt",
+ "sscofpmf", "sstc", "svinval", "svnapot", "svpbmt";
+ riscv,cbom-block-size = <64>;
+ riscv,cbop-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+ i-cache-block-size = <64>;
+ i-cache-size = <32768>;
+ i-cache-sets = <128>;
+ d-cache-block-size = <64>;
+ d-cache-size = <32768>;
+ d-cache-sets = <128>;
+ next-level-cache = <&cluster0_l2_cache>;
+ mmu-type = "riscv,sv39";
+
+ cpu3_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu_4: cpu@4 {
+ compatible = "spacemit,x60", "riscv";
+ device_type = "cpu";
+ reg = <4>;
+ riscv,isa = "rv64imafdcv_zicbom_zicbop_zicboz_zicntr_zicond_zicsr_zifencei_zihintpause_zihpm_zfh_zba_zbb_zbc_zbs_zkt_zvfh_zvkt_sscofpmf_sstc_svinval_svnapot_svpbmt";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", "zicbom",
+ "zicbop", "zicboz", "zicntr", "zicond", "zicsr",
+ "zifencei", "zihintpause", "zihpm", "zfh", "zba",
+ "zbb", "zbc", "zbs", "zkt", "zvfh", "zvkt",
+ "sscofpmf", "sstc", "svinval", "svnapot", "svpbmt";
+ riscv,cbom-block-size = <64>;
+ riscv,cbop-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+ i-cache-block-size = <64>;
+ i-cache-size = <32768>;
+ i-cache-sets = <128>;
+ d-cache-block-size = <64>;
+ d-cache-size = <32768>;
+ d-cache-sets = <128>;
+ next-level-cache = <&cluster1_l2_cache>;
+ mmu-type = "riscv,sv39";
+
+ cpu4_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu_5: cpu@5 {
+ compatible = "spacemit,x60", "riscv";
+ device_type = "cpu";
+ reg = <5>;
+ riscv,isa = "rv64imafdcv_zicbom_zicbop_zicboz_zicntr_zicond_zicsr_zifencei_zihintpause_zihpm_zfh_zba_zbb_zbc_zbs_zkt_zvfh_zvkt_sscofpmf_sstc_svinval_svnapot_svpbmt";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", "zicbom",
+ "zicbop", "zicboz", "zicntr", "zicond", "zicsr",
+ "zifencei", "zihintpause", "zihpm", "zfh", "zba",
+ "zbb", "zbc", "zbs", "zkt", "zvfh", "zvkt",
+ "sscofpmf", "sstc", "svinval", "svnapot", "svpbmt";
+ riscv,cbom-block-size = <64>;
+ riscv,cbop-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+ i-cache-block-size = <64>;
+ i-cache-size = <32768>;
+ i-cache-sets = <128>;
+ d-cache-block-size = <64>;
+ d-cache-size = <32768>;
+ d-cache-sets = <128>;
+ next-level-cache = <&cluster1_l2_cache>;
+ mmu-type = "riscv,sv39";
+
+ cpu5_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu_6: cpu@6 {
+ compatible = "spacemit,x60", "riscv";
+ device_type = "cpu";
+ reg = <6>;
+ riscv,isa = "rv64imafdcv_zicbom_zicbop_zicboz_zicntr_zicond_zicsr_zifencei_zihintpause_zihpm_zfh_zba_zbb_zbc_zbs_zkt_zvfh_zvkt_sscofpmf_sstc_svinval_svnapot_svpbmt";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", "zicbom",
+ "zicbop", "zicboz", "zicntr", "zicond", "zicsr",
+ "zifencei", "zihintpause", "zihpm", "zfh", "zba",
+ "zbb", "zbc", "zbs", "zkt", "zvfh", "zvkt",
+ "sscofpmf", "sstc", "svinval", "svnapot", "svpbmt";
+ riscv,cbom-block-size = <64>;
+ riscv,cbop-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+ i-cache-block-size = <64>;
+ i-cache-size = <32768>;
+ i-cache-sets = <128>;
+ d-cache-block-size = <64>;
+ d-cache-size = <32768>;
+ d-cache-sets = <128>;
+ next-level-cache = <&cluster1_l2_cache>;
+ mmu-type = "riscv,sv39";
+
+ cpu6_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu_7: cpu@7 {
+ compatible = "spacemit,x60", "riscv";
+ device_type = "cpu";
+ reg = <7>;
+ riscv,isa = "rv64imafdcv_zicbom_zicbop_zicboz_zicntr_zicond_zicsr_zifencei_zihintpause_zihpm_zfh_zba_zbb_zbc_zbs_zkt_zvfh_zvkt_sscofpmf_sstc_svinval_svnapot_svpbmt";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", "zicbom",
+ "zicbop", "zicboz", "zicntr", "zicond", "zicsr",
+ "zifencei", "zihintpause", "zihpm", "zfh", "zba",
+ "zbb", "zbc", "zbs", "zkt", "zvfh", "zvkt",
+ "sscofpmf", "sstc", "svinval", "svnapot", "svpbmt";
+ riscv,cbom-block-size = <64>;
+ riscv,cbop-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+ i-cache-block-size = <64>;
+ i-cache-size = <32768>;
+ i-cache-sets = <128>;
+ d-cache-block-size = <64>;
+ d-cache-size = <32768>;
+ d-cache-sets = <128>;
+ next-level-cache = <&cluster1_l2_cache>;
+ mmu-type = "riscv,sv39";
+
+ cpu7_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cluster0_l2_cache: l2-cache0 {
+ compatible = "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-size = <524288>;
+ cache-sets = <512>;
+ cache-unified;
+ };
+
+ cluster1_l2_cache: l2-cache1 {
+ compatible = "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-size = <524288>;
+ cache-sets = <512>;
+ cache-unified;
+ };
+ };
+
+ soc {
+ compatible = "simple-bus";
+ interrupt-parent = <&plic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ dma-noncoherent;
+ ranges;
+
+ uart0: serial@d4017000 {
+ compatible = "spacemit,k1-uart", "intel,xscale-uart";
+ reg = <0x0 0xd4017000 0x0 0x100>;
+ interrupts = <42>;
+ clock-frequency = <14857000>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart2: serial@d4017100 {
+ compatible = "spacemit,k1-uart", "intel,xscale-uart";
+ reg = <0x0 0xd4017100 0x0 0x100>;
+ interrupts = <44>;
+ clock-frequency = <14857000>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart3: serial@d4017200 {
+ compatible = "spacemit,k1-uart", "intel,xscale-uart";
+ reg = <0x0 0xd4017200 0x0 0x100>;
+ interrupts = <45>;
+ clock-frequency = <14857000>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart4: serial@d4017300 {
+ compatible = "spacemit,k1-uart", "intel,xscale-uart";
+ reg = <0x0 0xd4017300 0x0 0x100>;
+ interrupts = <46>;
+ clock-frequency = <14857000>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart5: serial@d4017400 {
+ compatible = "spacemit,k1-uart", "intel,xscale-uart";
+ reg = <0x0 0xd4017400 0x0 0x100>;
+ interrupts = <47>;
+ clock-frequency = <14857000>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart6: serial@d4017500 {
+ compatible = "spacemit,k1-uart", "intel,xscale-uart";
+ reg = <0x0 0xd4017500 0x0 0x100>;
+ interrupts = <48>;
+ clock-frequency = <14857000>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart7: serial@d4017600 {
+ compatible = "spacemit,k1-uart", "intel,xscale-uart";
+ reg = <0x0 0xd4017600 0x0 0x100>;
+ interrupts = <49>;
+ clock-frequency = <14857000>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart8: serial@d4017700 {
+ compatible = "spacemit,k1-uart", "intel,xscale-uart";
+ reg = <0x0 0xd4017700 0x0 0x100>;
+ interrupts = <50>;
+ clock-frequency = <14857000>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart9: serial@d4017800 {
+ compatible = "spacemit,k1-uart", "intel,xscale-uart";
+ reg = <0x0 0xd4017800 0x0 0x100>;
+ interrupts = <51>;
+ clock-frequency = <14857000>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ pinctrl: pinctrl@d401e000 {
+ compatible = "spacemit,k1-pinctrl";
+ reg = <0x0 0xd401e000 0x0 0x400>;
+ };
+
+ plic: interrupt-controller@e0000000 {
+ compatible = "spacemit,k1-plic", "sifive,plic-1.0.0";
+ reg = <0x0 0xe0000000 0x0 0x4000000>;
+ interrupts-extended = <&cpu0_intc 11>, <&cpu0_intc 9>,
+ <&cpu1_intc 11>, <&cpu1_intc 9>,
+ <&cpu2_intc 11>, <&cpu2_intc 9>,
+ <&cpu3_intc 11>, <&cpu3_intc 9>,
+ <&cpu4_intc 11>, <&cpu4_intc 9>,
+ <&cpu5_intc 11>, <&cpu5_intc 9>,
+ <&cpu6_intc 11>, <&cpu6_intc 9>,
+ <&cpu7_intc 11>, <&cpu7_intc 9>;
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ riscv,ndev = <159>;
+ };
+
+ clint: timer@e4000000 {
+ compatible = "spacemit,k1-clint", "sifive,clint0";
+ reg = <0x0 0xe4000000 0x0 0x10000>;
+ interrupts-extended = <&cpu0_intc 3>, <&cpu0_intc 7>,
+ <&cpu1_intc 3>, <&cpu1_intc 7>,
+ <&cpu2_intc 3>, <&cpu2_intc 7>,
+ <&cpu3_intc 3>, <&cpu3_intc 7>,
+ <&cpu4_intc 3>, <&cpu4_intc 7>,
+ <&cpu5_intc 3>, <&cpu5_intc 7>,
+ <&cpu6_intc 3>, <&cpu6_intc 7>,
+ <&cpu7_intc 3>, <&cpu7_intc 7>;
+ };
+
+ sec_uart1: serial@f0612000 {
+ compatible = "spacemit,k1-uart", "intel,xscale-uart";
+ reg = <0x0 0xf0612000 0x0 0x100>;
+ interrupts = <43>;
+ clock-frequency = <14857000>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "reserved"; /* for TEE usage */
+ };
+ };
+};
diff --git a/arch/riscv/boot/dts/starfive/jh7110-milkv-mars.dts b/arch/riscv/boot/dts/starfive/jh7110-milkv-mars.dts
index 0d248b671d4b..3bd62ab78523 100644
--- a/arch/riscv/boot/dts/starfive/jh7110-milkv-mars.dts
+++ b/arch/riscv/boot/dts/starfive/jh7110-milkv-mars.dts
@@ -53,7 +53,23 @@
status = "okay";
};
+&sysgpio {
+ usb0_pins: usb0-0 {
+ vbus-pins {
+ pinmux = <GPIOMUX(25, GPOUT_SYS_USB_DRIVE_VBUS,
+ GPOEN_ENABLE,
+ GPI_NONE)>;
+ bias-disable;
+ input-disable;
+ input-schmitt-disable;
+ slew-rate = <0>;
+ };
+ };
+};
+
&usb0 {
- dr_mode = "peripheral";
+ dr_mode = "host";
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb0_pins>;
status = "okay";
};
diff --git a/arch/riscv/boot/dts/starfive/jh7110-pine64-star64.dts b/arch/riscv/boot/dts/starfive/jh7110-pine64-star64.dts
index fe4a490ecc61..b764d4d92fd9 100644
--- a/arch/riscv/boot/dts/starfive/jh7110-pine64-star64.dts
+++ b/arch/riscv/boot/dts/starfive/jh7110-pine64-star64.dts
@@ -80,7 +80,23 @@
status = "okay";
};
+&sysgpio {
+ usb0_pins: usb0-0 {
+ vbus-pins {
+ pinmux = <GPIOMUX(25, GPOUT_SYS_USB_DRIVE_VBUS,
+ GPOEN_ENABLE,
+ GPI_NONE)>;
+ bias-disable;
+ input-disable;
+ input-schmitt-disable;
+ slew-rate = <0>;
+ };
+ };
+};
+
&usb0 {
- dr_mode = "peripheral";
+ dr_mode = "host";
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb0_pins>;
status = "okay";
};
diff --git a/arch/riscv/boot/dts/thead/th1520.dtsi b/arch/riscv/boot/dts/thead/th1520.dtsi
index acfe030e803a..527336417765 100644
--- a/arch/riscv/boot/dts/thead/th1520.dtsi
+++ b/arch/riscv/boot/dts/thead/th1520.dtsi
@@ -599,6 +599,22 @@
status = "disabled";
};
+ mbox_910t: mailbox@ffffc38000 {
+ compatible = "thead,th1520-mbox";
+ reg = <0xff 0xffc38000 0x0 0x6000>,
+ <0xff 0xffc40000 0x0 0x6000>,
+ <0xff 0xffc4c000 0x0 0x2000>,
+ <0xff 0xffc54000 0x0 0x2000>;
+ reg-names = "local", "remote-icu0", "remote-icu1", "remote-icu2";
+ clocks = <&clk CLK_MBOX0>, <&clk CLK_MBOX1>, <&clk CLK_MBOX2>,
+ <&clk CLK_MBOX3>;
+ clock-names = "clk-local", "clk-remote-icu0", "clk-remote-icu1",
+ "clk-remote-icu2";
+ interrupt-parent = <&plic>;
+ interrupts = <28 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
+ };
+
gpio@fffff41000 {
compatible = "snps,dw-apb-gpio";
reg = <0xff 0xfff41000 0x0 0x1000>;
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index b4a37345703e..0f7dcbe3c45b 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -10,7 +10,6 @@ CONFIG_MEMCG=y
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_SCHED=y
CONFIG_CFS_BANDWIDTH=y
-CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
@@ -30,6 +29,7 @@ CONFIG_ARCH_MICROCHIP=y
CONFIG_ARCH_RENESAS=y
CONFIG_ARCH_SIFIVE=y
CONFIG_ARCH_SOPHGO=y
+CONFIG_ARCH_SPACEMIT=y
CONFIG_SOC_STARFIVE=y
CONFIG_ARCH_SUNXI=y
CONFIG_ARCH_THEAD=y
@@ -167,6 +167,7 @@ CONFIG_PINCTRL_SOPHGO_CV1800B=y
CONFIG_PINCTRL_SOPHGO_CV1812H=y
CONFIG_PINCTRL_SOPHGO_SG2000=y
CONFIG_PINCTRL_SOPHGO_SG2002=y
+CONFIG_PINCTRL_TH1520=y
CONFIG_GPIO_DWAPB=y
CONFIG_GPIO_SIFIVE=y
CONFIG_POWER_RESET_GPIO_RESTART=y
@@ -242,6 +243,7 @@ CONFIG_RTC_DRV_SUN6I=y
CONFIG_DMADEVICES=y
CONFIG_DMA_SUN6I=m
CONFIG_DW_AXI_DMAC=y
+CONFIG_DWMAC_THEAD=m
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_INPUT=y
diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c
index e24770a77932..0b942183f708 100644
--- a/arch/riscv/errata/thead/errata.c
+++ b/arch/riscv/errata/thead/errata.c
@@ -10,6 +10,7 @@
#include <linux/string.h>
#include <linux/uaccess.h>
#include <asm/alternative.h>
+#include <asm/bugs.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/dma-noncoherent.h>
@@ -142,6 +143,31 @@ static bool errata_probe_pmu(unsigned int stage,
return true;
}
+static bool errata_probe_ghostwrite(unsigned int stage,
+ unsigned long arch_id, unsigned long impid)
+{
+ if (!IS_ENABLED(CONFIG_ERRATA_THEAD_GHOSTWRITE))
+ return false;
+
+ /*
+ * target-c9xx cores report arch_id and impid as 0
+ *
+ * While ghostwrite may not affect all c9xx cores that implement
+ * xtheadvector, there is no futher granularity than c9xx. Assume
+ * vulnerable for this entire class of processors when xtheadvector is
+ * enabled.
+ */
+ if (arch_id != 0 || impid != 0)
+ return false;
+
+ if (stage != RISCV_ALTERNATIVES_EARLY_BOOT)
+ return false;
+
+ ghostwrite_set_vulnerable();
+
+ return true;
+}
+
static u32 thead_errata_probe(unsigned int stage,
unsigned long archid, unsigned long impid)
{
@@ -155,6 +181,8 @@ static u32 thead_errata_probe(unsigned int stage,
if (errata_probe_pmu(stage, archid, impid))
cpu_req_errata |= BIT(ERRATA_THEAD_PMU);
+ errata_probe_ghostwrite(stage, archid, impid);
+
return cpu_req_errata;
}
diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h
index fae152ea0508..c6bd3d8354a9 100644
--- a/arch/riscv/include/asm/bitops.h
+++ b/arch/riscv/include/asm/bitops.h
@@ -228,7 +228,7 @@ legacy:
*
* This operation may be reordered on other architectures than x86.
*/
-static inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
+static __always_inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
{
return __test_and_op_bit(or, __NOP, nr, addr);
}
@@ -240,7 +240,7 @@ static inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
*
* This operation can be reordered on other architectures other than x86.
*/
-static inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
+static __always_inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
{
return __test_and_op_bit(and, __NOT, nr, addr);
}
@@ -253,7 +253,7 @@ static inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
+static __always_inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
{
return __test_and_op_bit(xor, __NOP, nr, addr);
}
@@ -270,7 +270,7 @@ static inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void arch_set_bit(int nr, volatile unsigned long *addr)
+static __always_inline void arch_set_bit(int nr, volatile unsigned long *addr)
{
__op_bit(or, __NOP, nr, addr);
}
@@ -284,7 +284,7 @@ static inline void arch_set_bit(int nr, volatile unsigned long *addr)
* on non x86 architectures, so if you are writing portable code,
* make sure not to rely on its reordering guarantees.
*/
-static inline void arch_clear_bit(int nr, volatile unsigned long *addr)
+static __always_inline void arch_clear_bit(int nr, volatile unsigned long *addr)
{
__op_bit(and, __NOT, nr, addr);
}
@@ -298,7 +298,7 @@ static inline void arch_clear_bit(int nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void arch_change_bit(int nr, volatile unsigned long *addr)
+static __always_inline void arch_change_bit(int nr, volatile unsigned long *addr)
{
__op_bit(xor, __NOP, nr, addr);
}
@@ -311,7 +311,7 @@ static inline void arch_change_bit(int nr, volatile unsigned long *addr)
* This operation is atomic and provides acquire barrier semantics.
* It can be used to implement bit locks.
*/
-static inline int arch_test_and_set_bit_lock(
+static __always_inline int arch_test_and_set_bit_lock(
unsigned long nr, volatile unsigned long *addr)
{
return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq);
@@ -324,7 +324,7 @@ static inline int arch_test_and_set_bit_lock(
*
* This operation is atomic and provides release barrier semantics.
*/
-static inline void arch_clear_bit_unlock(
+static __always_inline void arch_clear_bit_unlock(
unsigned long nr, volatile unsigned long *addr)
{
__op_bit_ord(and, __NOT, nr, addr, .rl);
@@ -345,13 +345,13 @@ static inline void arch_clear_bit_unlock(
* non-atomic property here: it's a lot more instructions and we still have to
* provide release semantics anyway.
*/
-static inline void arch___clear_bit_unlock(
+static __always_inline void arch___clear_bit_unlock(
unsigned long nr, volatile unsigned long *addr)
{
arch_clear_bit_unlock(nr, addr);
}
-static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
+static __always_inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
volatile unsigned long *addr)
{
unsigned long res;
diff --git a/arch/riscv/include/asm/bugs.h b/arch/riscv/include/asm/bugs.h
new file mode 100644
index 000000000000..17ca0a947730
--- /dev/null
+++ b/arch/riscv/include/asm/bugs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Interface for managing mitigations for riscv vulnerabilities.
+ *
+ * Copyright (C) 2024 Rivos Inc.
+ */
+
+#ifndef __ASM_BUGS_H
+#define __ASM_BUGS_H
+
+/* Watch out, ordering is important here. */
+enum mitigation_state {
+ UNAFFECTED,
+ MITIGATED,
+ VULNERABLE,
+};
+
+void ghostwrite_set_vulnerable(void);
+bool ghostwrite_enable_mitigation(void);
+enum mitigation_state ghostwrite_get_state(void);
+
+#endif /* __ASM_BUGS_H */
diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
index 4bd054c54c21..569140d6e639 100644
--- a/arch/riscv/include/asm/cpufeature.h
+++ b/arch/riscv/include/asm/cpufeature.h
@@ -34,6 +34,8 @@ DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
/* Per-cpu ISA extensions. */
extern struct riscv_isainfo hart_isa[NR_CPUS];
+extern u32 thead_vlenb_of;
+
void __init riscv_user_isa_enable(void);
#define _RISCV_ISA_EXT_DATA(_name, _id, _subset_exts, _subset_exts_size, _validate) { \
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 37bdea65bbd8..6fed42e37705 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -30,6 +30,12 @@
#define SR_VS_CLEAN _AC(0x00000400, UL)
#define SR_VS_DIRTY _AC(0x00000600, UL)
+#define SR_VS_THEAD _AC(0x01800000, UL) /* xtheadvector Status */
+#define SR_VS_OFF_THEAD _AC(0x00000000, UL)
+#define SR_VS_INITIAL_THEAD _AC(0x00800000, UL)
+#define SR_VS_CLEAN_THEAD _AC(0x01000000, UL)
+#define SR_VS_DIRTY_THEAD _AC(0x01800000, UL)
+
#define SR_XS _AC(0x00018000, UL) /* Extension Status */
#define SR_XS_OFF _AC(0x00000000, UL)
#define SR_XS_INITIAL _AC(0x00008000, UL)
@@ -315,6 +321,15 @@
#define CSR_STIMECMP 0x14D
#define CSR_STIMECMPH 0x15D
+/* xtheadvector symbolic CSR names */
+#define CSR_VXSAT 0x9
+#define CSR_VXRM 0xa
+
+/* xtheadvector CSR masks */
+#define CSR_VXRM_MASK 3
+#define CSR_VXRM_SHIFT 1
+#define CSR_VXSAT_MASK 1
+
/* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
#define CSR_SISELECT 0x150
#define CSR_SIREG 0x151
diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
index 7c8a71a526a3..6e426ed7919a 100644
--- a/arch/riscv/include/asm/errata_list.h
+++ b/arch/riscv/include/asm/errata_list.h
@@ -25,7 +25,8 @@
#ifdef CONFIG_ERRATA_THEAD
#define ERRATA_THEAD_MAE 0
#define ERRATA_THEAD_PMU 1
-#define ERRATA_THEAD_NUMBER 2
+#define ERRATA_THEAD_GHOSTWRITE 2
+#define ERRATA_THEAD_NUMBER 3
#endif
#ifdef __ASSEMBLY__
diff --git a/arch/riscv/include/asm/futex.h b/arch/riscv/include/asm/futex.h
index fc8130f995c1..72be100afa23 100644
--- a/arch/riscv/include/asm/futex.h
+++ b/arch/riscv/include/asm/futex.h
@@ -85,7 +85,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
__enable_user_access();
__asm__ __volatile__ (
- "1: lr.w.aqrl %[v],%[u] \n"
+ "1: lr.w %[v],%[u] \n"
" bne %[v],%z[ov],3f \n"
"2: sc.w.aqrl %[t],%z[nv],%[u] \n"
" bnez %[t],1b \n"
diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h
index 1ce1df6d0ff3..dd624523981c 100644
--- a/arch/riscv/include/asm/hwprobe.h
+++ b/arch/riscv/include/asm/hwprobe.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
- * Copyright 2023 Rivos, Inc
+ * Copyright 2023-2024 Rivos, Inc
*/
#ifndef _ASM_HWPROBE_H
@@ -8,7 +8,7 @@
#include <uapi/asm/hwprobe.h>
-#define RISCV_HWPROBE_MAX_KEY 10
+#define RISCV_HWPROBE_MAX_KEY 11
static inline bool riscv_hwprobe_key_is_valid(__s64 key)
{
@@ -21,6 +21,7 @@ static inline bool hwprobe_key_is_bitmask(__s64 key)
case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
case RISCV_HWPROBE_KEY_IMA_EXT_0:
case RISCV_HWPROBE_KEY_CPUPERF_0:
+ case RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0:
return true;
}
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index 35eab6e0f4ae..cc33e35cd628 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -87,6 +87,11 @@ struct kvm_vcpu_stat {
u64 csr_exit_kernel;
u64 signal_exits;
u64 exits;
+ u64 instr_illegal_exits;
+ u64 load_misaligned_exits;
+ u64 store_misaligned_exits;
+ u64 load_access_exits;
+ u64 store_access_exits;
};
struct kvm_arch_memory_slot {
diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h
index b96705258cf9..4ed6203cdd30 100644
--- a/arch/riscv/include/asm/kvm_vcpu_sbi.h
+++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h
@@ -85,6 +85,7 @@ extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_susp;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor;
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
index f52264304f77..3e2aebea6312 100644
--- a/arch/riscv/include/asm/pgalloc.h
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -12,16 +12,25 @@
#include <asm/tlb.h>
#ifdef CONFIG_MMU
-#define __HAVE_ARCH_PUD_ALLOC_ONE
#define __HAVE_ARCH_PUD_FREE
#include <asm-generic/pgalloc.h>
+/*
+ * While riscv platforms with riscv_ipi_for_rfence as true require an IPI to
+ * perform TLB shootdown, some platforms with riscv_ipi_for_rfence as false use
+ * SBI to perform TLB shootdown. To keep software pagetable walkers safe in this
+ * case we switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the
+ * comment below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h
+ * for more details.
+ */
static inline void riscv_tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt)
{
- if (riscv_use_sbi_for_rfence())
+ if (riscv_use_sbi_for_rfence()) {
tlb_remove_ptdesc(tlb, pt);
- else
+ } else {
+ pagetable_dtor(pt);
tlb_remove_page_ptdesc(tlb, pt);
+ }
}
static inline void pmd_populate_kernel(struct mm_struct *mm,
@@ -88,15 +97,6 @@ static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd,
}
}
-#define pud_alloc_one pud_alloc_one
-static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
-{
- if (pgtable_l4_enabled)
- return __pud_alloc_one(mm, addr);
-
- return NULL;
-}
-
#define pud_free pud_free
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
@@ -107,39 +107,8 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long addr)
{
- if (pgtable_l4_enabled) {
- struct ptdesc *ptdesc = virt_to_ptdesc(pud);
-
- pagetable_pud_dtor(ptdesc);
- riscv_tlb_remove_ptdesc(tlb, ptdesc);
- }
-}
-
-#define p4d_alloc_one p4d_alloc_one
-static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
-{
- if (pgtable_l5_enabled) {
- gfp_t gfp = GFP_PGTABLE_USER;
-
- if (mm == &init_mm)
- gfp = GFP_PGTABLE_KERNEL;
- return (p4d_t *)get_zeroed_page(gfp);
- }
-
- return NULL;
-}
-
-static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d)
-{
- BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
- free_page((unsigned long)p4d);
-}
-
-#define p4d_free p4d_free
-static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
-{
- if (pgtable_l5_enabled)
- __p4d_free(mm, p4d);
+ if (pgtable_l4_enabled)
+ riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(pud));
}
static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
@@ -161,9 +130,8 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
- pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
+ pgd = __pgd_alloc(mm, 0);
if (likely(pgd != NULL)) {
- memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
/* Copy kernel mappings */
sync_kernel_mappings(pgd);
}
@@ -175,10 +143,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long addr)
{
- struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
-
- pagetable_pmd_dtor(ptdesc);
- riscv_tlb_remove_ptdesc(tlb, ptdesc);
+ riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd));
}
#endif /* __PAGETABLE_PMD_FOLDED */
@@ -186,10 +151,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr)
{
- struct ptdesc *ptdesc = page_ptdesc(pte);
-
- pagetable_pte_dtor(ptdesc);
- riscv_tlb_remove_ptdesc(tlb, ptdesc);
+ riscv_tlb_remove_ptdesc(tlb, page_ptdesc(pte));
}
#endif /* CONFIG_MMU */
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
index 94e33216b2d9..0e71eb82f920 100644
--- a/arch/riscv/include/asm/switch_to.h
+++ b/arch/riscv/include/asm/switch_to.h
@@ -117,7 +117,7 @@ do { \
__set_prev_cpu(__prev->thread); \
if (has_fpu()) \
__switch_to_fpu(__prev, __next); \
- if (has_vector()) \
+ if (has_vector() || has_xtheadvector()) \
__switch_to_vector(__prev, __next); \
if (switch_to_should_flush_icache(__next)) \
local_flush_icache_all(); \
diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h
index 1f6c38420d8e..50b63b5c15bd 100644
--- a/arch/riscv/include/asm/tlb.h
+++ b/arch/riscv/include/asm/tlb.h
@@ -10,24 +10,6 @@ struct mmu_gather;
static void tlb_flush(struct mmu_gather *tlb);
-#ifdef CONFIG_MMU
-#include <linux/swap.h>
-
-/*
- * While riscv platforms with riscv_ipi_for_rfence as true require an IPI to
- * perform TLB shootdown, some platforms with riscv_ipi_for_rfence as false use
- * SBI to perform TLB shootdown. To keep software pagetable walkers safe in this
- * case we switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the
- * comment below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h
- * for more details.
- */
-static inline void __tlb_remove_table(void *table)
-{
- free_page_and_swap_cache(table);
-}
-
-#endif /* CONFIG_MMU */
-
#define tlb_flush tlb_flush
#include <asm-generic/tlb.h>
diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h
index c7c023afbacd..e8a83f55be2b 100644
--- a/arch/riscv/include/asm/vector.h
+++ b/arch/riscv/include/asm/vector.h
@@ -18,6 +18,27 @@
#include <asm/cpufeature.h>
#include <asm/csr.h>
#include <asm/asm.h>
+#include <asm/vendorid_list.h>
+#include <asm/vendor_extensions.h>
+#include <asm/vendor_extensions/thead.h>
+
+#define __riscv_v_vstate_or(_val, TYPE) ({ \
+ typeof(_val) _res = _val; \
+ if (has_xtheadvector()) \
+ _res = (_res & ~SR_VS_THEAD) | SR_VS_##TYPE##_THEAD; \
+ else \
+ _res = (_res & ~SR_VS) | SR_VS_##TYPE; \
+ _res; \
+})
+
+#define __riscv_v_vstate_check(_val, TYPE) ({ \
+ bool _res; \
+ if (has_xtheadvector()) \
+ _res = ((_val) & SR_VS_THEAD) == SR_VS_##TYPE##_THEAD; \
+ else \
+ _res = ((_val) & SR_VS) == SR_VS_##TYPE; \
+ _res; \
+})
extern unsigned long riscv_v_vsize;
int riscv_v_setup_vsize(void);
@@ -41,39 +62,62 @@ static __always_inline bool has_vector(void)
return riscv_has_extension_unlikely(RISCV_ISA_EXT_ZVE32X);
}
+static __always_inline bool has_xtheadvector_no_alternatives(void)
+{
+ if (IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR))
+ return riscv_isa_vendor_extension_available(THEAD_VENDOR_ID, XTHEADVECTOR);
+ else
+ return false;
+}
+
+static __always_inline bool has_xtheadvector(void)
+{
+ if (IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR))
+ return riscv_has_vendor_extension_unlikely(THEAD_VENDOR_ID,
+ RISCV_ISA_VENDOR_EXT_XTHEADVECTOR);
+ else
+ return false;
+}
+
static inline void __riscv_v_vstate_clean(struct pt_regs *regs)
{
- regs->status = (regs->status & ~SR_VS) | SR_VS_CLEAN;
+ regs->status = __riscv_v_vstate_or(regs->status, CLEAN);
}
static inline void __riscv_v_vstate_dirty(struct pt_regs *regs)
{
- regs->status = (regs->status & ~SR_VS) | SR_VS_DIRTY;
+ regs->status = __riscv_v_vstate_or(regs->status, DIRTY);
}
static inline void riscv_v_vstate_off(struct pt_regs *regs)
{
- regs->status = (regs->status & ~SR_VS) | SR_VS_OFF;
+ regs->status = __riscv_v_vstate_or(regs->status, OFF);
}
static inline void riscv_v_vstate_on(struct pt_regs *regs)
{
- regs->status = (regs->status & ~SR_VS) | SR_VS_INITIAL;
+ regs->status = __riscv_v_vstate_or(regs->status, INITIAL);
}
static inline bool riscv_v_vstate_query(struct pt_regs *regs)
{
- return (regs->status & SR_VS) != 0;
+ return !__riscv_v_vstate_check(regs->status, OFF);
}
static __always_inline void riscv_v_enable(void)
{
- csr_set(CSR_SSTATUS, SR_VS);
+ if (has_xtheadvector())
+ csr_set(CSR_SSTATUS, SR_VS_THEAD);
+ else
+ csr_set(CSR_SSTATUS, SR_VS);
}
static __always_inline void riscv_v_disable(void)
{
- csr_clear(CSR_SSTATUS, SR_VS);
+ if (has_xtheadvector())
+ csr_clear(CSR_SSTATUS, SR_VS_THEAD);
+ else
+ csr_clear(CSR_SSTATUS, SR_VS);
}
static __always_inline void __vstate_csr_save(struct __riscv_v_ext_state *dest)
@@ -82,10 +126,36 @@ static __always_inline void __vstate_csr_save(struct __riscv_v_ext_state *dest)
"csrr %0, " __stringify(CSR_VSTART) "\n\t"
"csrr %1, " __stringify(CSR_VTYPE) "\n\t"
"csrr %2, " __stringify(CSR_VL) "\n\t"
- "csrr %3, " __stringify(CSR_VCSR) "\n\t"
- "csrr %4, " __stringify(CSR_VLENB) "\n\t"
: "=r" (dest->vstart), "=r" (dest->vtype), "=r" (dest->vl),
- "=r" (dest->vcsr), "=r" (dest->vlenb) : :);
+ "=r" (dest->vcsr) : :);
+
+ if (has_xtheadvector()) {
+ unsigned long status;
+
+ /*
+ * CSR_VCSR is defined as
+ * [2:1] - vxrm[1:0]
+ * [0] - vxsat
+ * The earlier vector spec implemented by T-Head uses separate
+ * registers for the same bit-elements, so just combine those
+ * into the existing output field.
+ *
+ * Additionally T-Head cores need FS to be enabled when accessing
+ * the VXRM and VXSAT CSRs, otherwise ending in illegal instructions.
+ * Though the cores do not implement the VXRM and VXSAT fields in the
+ * FCSR CSR that vector-0.7.1 specifies.
+ */
+ status = csr_read_set(CSR_STATUS, SR_FS_DIRTY);
+ dest->vcsr = csr_read(CSR_VXSAT) | csr_read(CSR_VXRM) << CSR_VXRM_SHIFT;
+
+ dest->vlenb = riscv_v_vsize / 32;
+
+ if ((status & SR_FS) != SR_FS_DIRTY)
+ csr_write(CSR_STATUS, status);
+ } else {
+ dest->vcsr = csr_read(CSR_VCSR);
+ dest->vlenb = csr_read(CSR_VLENB);
+ }
}
static __always_inline void __vstate_csr_restore(struct __riscv_v_ext_state *src)
@@ -96,9 +166,25 @@ static __always_inline void __vstate_csr_restore(struct __riscv_v_ext_state *src
"vsetvl x0, %2, %1\n\t"
".option pop\n\t"
"csrw " __stringify(CSR_VSTART) ", %0\n\t"
- "csrw " __stringify(CSR_VCSR) ", %3\n\t"
- : : "r" (src->vstart), "r" (src->vtype), "r" (src->vl),
- "r" (src->vcsr) :);
+ : : "r" (src->vstart), "r" (src->vtype), "r" (src->vl));
+
+ if (has_xtheadvector()) {
+ unsigned long status = csr_read(CSR_SSTATUS);
+
+ /*
+ * Similar to __vstate_csr_save above, restore values for the
+ * separate VXRM and VXSAT CSRs from the vcsr variable.
+ */
+ status = csr_read_set(CSR_STATUS, SR_FS_DIRTY);
+
+ csr_write(CSR_VXRM, (src->vcsr >> CSR_VXRM_SHIFT) & CSR_VXRM_MASK);
+ csr_write(CSR_VXSAT, src->vcsr & CSR_VXSAT_MASK);
+
+ if ((status & SR_FS) != SR_FS_DIRTY)
+ csr_write(CSR_STATUS, status);
+ } else {
+ csr_write(CSR_VCSR, src->vcsr);
+ }
}
static inline void __riscv_v_vstate_save(struct __riscv_v_ext_state *save_to,
@@ -108,19 +194,33 @@ static inline void __riscv_v_vstate_save(struct __riscv_v_ext_state *save_to,
riscv_v_enable();
__vstate_csr_save(save_to);
- asm volatile (
- ".option push\n\t"
- ".option arch, +zve32x\n\t"
- "vsetvli %0, x0, e8, m8, ta, ma\n\t"
- "vse8.v v0, (%1)\n\t"
- "add %1, %1, %0\n\t"
- "vse8.v v8, (%1)\n\t"
- "add %1, %1, %0\n\t"
- "vse8.v v16, (%1)\n\t"
- "add %1, %1, %0\n\t"
- "vse8.v v24, (%1)\n\t"
- ".option pop\n\t"
- : "=&r" (vl) : "r" (datap) : "memory");
+ if (has_xtheadvector()) {
+ asm volatile (
+ "mv t0, %0\n\t"
+ THEAD_VSETVLI_T4X0E8M8D1
+ THEAD_VSB_V_V0T0
+ "add t0, t0, t4\n\t"
+ THEAD_VSB_V_V0T0
+ "add t0, t0, t4\n\t"
+ THEAD_VSB_V_V0T0
+ "add t0, t0, t4\n\t"
+ THEAD_VSB_V_V0T0
+ : : "r" (datap) : "memory", "t0", "t4");
+ } else {
+ asm volatile (
+ ".option push\n\t"
+ ".option arch, +zve32x\n\t"
+ "vsetvli %0, x0, e8, m8, ta, ma\n\t"
+ "vse8.v v0, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vse8.v v8, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vse8.v v16, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vse8.v v24, (%1)\n\t"
+ ".option pop\n\t"
+ : "=&r" (vl) : "r" (datap) : "memory");
+ }
riscv_v_disable();
}
@@ -130,19 +230,33 @@ static inline void __riscv_v_vstate_restore(struct __riscv_v_ext_state *restore_
unsigned long vl;
riscv_v_enable();
- asm volatile (
- ".option push\n\t"
- ".option arch, +zve32x\n\t"
- "vsetvli %0, x0, e8, m8, ta, ma\n\t"
- "vle8.v v0, (%1)\n\t"
- "add %1, %1, %0\n\t"
- "vle8.v v8, (%1)\n\t"
- "add %1, %1, %0\n\t"
- "vle8.v v16, (%1)\n\t"
- "add %1, %1, %0\n\t"
- "vle8.v v24, (%1)\n\t"
- ".option pop\n\t"
- : "=&r" (vl) : "r" (datap) : "memory");
+ if (has_xtheadvector()) {
+ asm volatile (
+ "mv t0, %0\n\t"
+ THEAD_VSETVLI_T4X0E8M8D1
+ THEAD_VLB_V_V0T0
+ "add t0, t0, t4\n\t"
+ THEAD_VLB_V_V0T0
+ "add t0, t0, t4\n\t"
+ THEAD_VLB_V_V0T0
+ "add t0, t0, t4\n\t"
+ THEAD_VLB_V_V0T0
+ : : "r" (datap) : "memory", "t0", "t4");
+ } else {
+ asm volatile (
+ ".option push\n\t"
+ ".option arch, +zve32x\n\t"
+ "vsetvli %0, x0, e8, m8, ta, ma\n\t"
+ "vle8.v v0, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vle8.v v8, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vle8.v v16, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vle8.v v24, (%1)\n\t"
+ ".option pop\n\t"
+ : "=&r" (vl) : "r" (datap) : "memory");
+ }
__vstate_csr_restore(restore_from);
riscv_v_disable();
}
@@ -152,33 +266,41 @@ static inline void __riscv_v_vstate_discard(void)
unsigned long vl, vtype_inval = 1UL << (BITS_PER_LONG - 1);
riscv_v_enable();
+ if (has_xtheadvector())
+ asm volatile (THEAD_VSETVLI_T4X0E8M8D1 : : : "t4");
+ else
+ asm volatile (
+ ".option push\n\t"
+ ".option arch, +zve32x\n\t"
+ "vsetvli %0, x0, e8, m8, ta, ma\n\t"
+ ".option pop\n\t": "=&r" (vl));
+
asm volatile (
".option push\n\t"
".option arch, +zve32x\n\t"
- "vsetvli %0, x0, e8, m8, ta, ma\n\t"
"vmv.v.i v0, -1\n\t"
"vmv.v.i v8, -1\n\t"
"vmv.v.i v16, -1\n\t"
"vmv.v.i v24, -1\n\t"
"vsetvl %0, x0, %1\n\t"
".option pop\n\t"
- : "=&r" (vl) : "r" (vtype_inval) : "memory");
+ : "=&r" (vl) : "r" (vtype_inval));
+
riscv_v_disable();
}
static inline void riscv_v_vstate_discard(struct pt_regs *regs)
{
- if ((regs->status & SR_VS) == SR_VS_OFF)
- return;
-
- __riscv_v_vstate_discard();
- __riscv_v_vstate_dirty(regs);
+ if (riscv_v_vstate_query(regs)) {
+ __riscv_v_vstate_discard();
+ __riscv_v_vstate_dirty(regs);
+ }
}
static inline void riscv_v_vstate_save(struct __riscv_v_ext_state *vstate,
struct pt_regs *regs)
{
- if ((regs->status & SR_VS) == SR_VS_DIRTY) {
+ if (__riscv_v_vstate_check(regs->status, DIRTY)) {
__riscv_v_vstate_save(vstate, vstate->datap);
__riscv_v_vstate_clean(regs);
}
@@ -187,7 +309,7 @@ static inline void riscv_v_vstate_save(struct __riscv_v_ext_state *vstate,
static inline void riscv_v_vstate_restore(struct __riscv_v_ext_state *vstate,
struct pt_regs *regs)
{
- if ((regs->status & SR_VS) != SR_VS_OFF) {
+ if (riscv_v_vstate_query(regs)) {
__riscv_v_vstate_restore(vstate, vstate->datap);
__riscv_v_vstate_clean(regs);
}
@@ -196,7 +318,7 @@ static inline void riscv_v_vstate_restore(struct __riscv_v_ext_state *vstate,
static inline void riscv_v_vstate_set_restore(struct task_struct *task,
struct pt_regs *regs)
{
- if ((regs->status & SR_VS) != SR_VS_OFF) {
+ if (riscv_v_vstate_query(regs)) {
set_tsk_thread_flag(task, TIF_RISCV_V_DEFER_RESTORE);
riscv_v_vstate_on(regs);
}
@@ -270,6 +392,8 @@ struct pt_regs;
static inline int riscv_v_setup_vsize(void) { return -EOPNOTSUPP; }
static __always_inline bool has_vector(void) { return false; }
static __always_inline bool insn_is_vector(u32 insn_buf) { return false; }
+static __always_inline bool has_xtheadvector_no_alternatives(void) { return false; }
+static __always_inline bool has_xtheadvector(void) { return false; }
static inline bool riscv_v_first_use_handler(struct pt_regs *regs) { return false; }
static inline bool riscv_v_vstate_query(struct pt_regs *regs) { return false; }
static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; }
diff --git a/arch/riscv/include/asm/vendor_extensions/thead.h b/arch/riscv/include/asm/vendor_extensions/thead.h
new file mode 100644
index 000000000000..e85c75b3b340
--- /dev/null
+++ b/arch/riscv/include/asm/vendor_extensions/thead.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_VENDOR_EXTENSIONS_THEAD_H
+#define _ASM_RISCV_VENDOR_EXTENSIONS_THEAD_H
+
+#include <asm/vendor_extensions.h>
+
+#include <linux/types.h>
+
+/*
+ * Extension keys must be strictly less than RISCV_ISA_VENDOR_EXT_MAX.
+ */
+#define RISCV_ISA_VENDOR_EXT_XTHEADVECTOR 0
+
+extern struct riscv_isa_vendor_ext_data_list riscv_isa_vendor_ext_list_thead;
+
+#ifdef CONFIG_RISCV_ISA_VENDOR_EXT_THEAD
+void disable_xtheadvector(void);
+#else
+static inline void disable_xtheadvector(void) { }
+#endif
+
+/* Extension specific helpers */
+
+/*
+ * Vector 0.7.1 as used for example on T-Head Xuantie cores, uses an older
+ * encoding for vsetvli (ta, ma vs. d1), so provide an instruction for
+ * vsetvli t4, x0, e8, m8, d1
+ */
+#define THEAD_VSETVLI_T4X0E8M8D1 ".long 0x00307ed7\n\t"
+
+/*
+ * While in theory, the vector-0.7.1 vsb.v and vlb.v result in the same
+ * encoding as the standard vse8.v and vle8.v, compilers seem to optimize
+ * the call resulting in a different encoding and then using a value for
+ * the "mop" field that is not part of vector-0.7.1
+ * So encode specific variants for vstate_save and _restore.
+ */
+#define THEAD_VSB_V_V0T0 ".long 0x02028027\n\t"
+#define THEAD_VSB_V_V8T0 ".long 0x02028427\n\t"
+#define THEAD_VSB_V_V16T0 ".long 0x02028827\n\t"
+#define THEAD_VSB_V_V24T0 ".long 0x02028c27\n\t"
+#define THEAD_VLB_V_V0T0 ".long 0x012028007\n\t"
+#define THEAD_VLB_V_V8T0 ".long 0x012028407\n\t"
+#define THEAD_VLB_V_V16T0 ".long 0x012028807\n\t"
+#define THEAD_VLB_V_V24T0 ".long 0x012028c07\n\t"
+
+#endif
diff --git a/arch/riscv/include/asm/vendor_extensions/thead_hwprobe.h b/arch/riscv/include/asm/vendor_extensions/thead_hwprobe.h
new file mode 100644
index 000000000000..65a9c5612466
--- /dev/null
+++ b/arch/riscv/include/asm/vendor_extensions/thead_hwprobe.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_VENDOR_EXTENSIONS_THEAD_HWPROBE_H
+#define _ASM_RISCV_VENDOR_EXTENSIONS_THEAD_HWPROBE_H
+
+#include <linux/cpumask.h>
+
+#include <uapi/asm/hwprobe.h>
+
+#ifdef CONFIG_RISCV_ISA_VENDOR_EXT_THEAD
+void hwprobe_isa_vendor_ext_thead_0(struct riscv_hwprobe *pair, const struct cpumask *cpus);
+#else
+static inline void hwprobe_isa_vendor_ext_thead_0(struct riscv_hwprobe *pair,
+ const struct cpumask *cpus)
+{
+ pair->value = 0;
+}
+#endif
+
+#endif
diff --git a/arch/riscv/include/asm/vendor_extensions/vendor_hwprobe.h b/arch/riscv/include/asm/vendor_extensions/vendor_hwprobe.h
new file mode 100644
index 000000000000..6b9293e984a9
--- /dev/null
+++ b/arch/riscv/include/asm/vendor_extensions/vendor_hwprobe.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2024 Rivos, Inc
+ */
+
+#ifndef _ASM_RISCV_SYS_HWPROBE_H
+#define _ASM_RISCV_SYS_HWPROBE_H
+
+#include <asm/cpufeature.h>
+
+#define VENDOR_EXT_KEY(ext) \
+ do { \
+ if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_VENDOR_EXT_##ext)) \
+ pair->value |= RISCV_HWPROBE_VENDOR_EXT_##ext; \
+ else \
+ missing |= RISCV_HWPROBE_VENDOR_EXT_##ext; \
+ } while (false)
+
+/*
+ * Loop through and record extensions that 1) anyone has, and 2) anyone
+ * doesn't have.
+ *
+ * _extension_checks is an arbitrary C block to set the values of pair->value
+ * and missing. It should be filled with VENDOR_EXT_KEY expressions.
+ */
+#define VENDOR_EXTENSION_SUPPORTED(pair, cpus, per_hart_vendor_bitmap, _extension_checks) \
+ do { \
+ int cpu; \
+ u64 missing = 0; \
+ for_each_cpu(cpu, (cpus)) { \
+ struct riscv_isavendorinfo *isainfo = &(per_hart_vendor_bitmap)[cpu]; \
+ _extension_checks \
+ } \
+ (pair)->value &= ~missing; \
+ } while (false) \
+
+#endif /* _ASM_RISCV_SYS_HWPROBE_H */
diff --git a/arch/riscv/include/asm/vendorid_list.h b/arch/riscv/include/asm/vendorid_list.h
index 2f2bb0c84f9a..a5150cdf34d8 100644
--- a/arch/riscv/include/asm/vendorid_list.h
+++ b/arch/riscv/include/asm/vendorid_list.h
@@ -6,6 +6,7 @@
#define ASM_VENDOR_LIST_H
#define ANDES_VENDOR_ID 0x31e
+#define MICROCHIP_VENDOR_ID 0x029
#define SIFIVE_VENDOR_ID 0x489
#define THEAD_VENDOR_ID 0x5b7
diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h
index 3af142b99f77..c3c1cc951cb9 100644
--- a/arch/riscv/include/uapi/asm/hwprobe.h
+++ b/arch/riscv/include/uapi/asm/hwprobe.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
- * Copyright 2023 Rivos, Inc
+ * Copyright 2023-2024 Rivos, Inc
*/
#ifndef _UAPI_ASM_HWPROBE_H
@@ -94,6 +94,7 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW 2
#define RISCV_HWPROBE_MISALIGNED_VECTOR_FAST 3
#define RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED 4
+#define RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0 11
/* Increase RISCV_HWPROBE_MAX_KEY when adding items. */
/* Flags */
diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h
index 3482c9a73d1b..f06bc5efcd79 100644
--- a/arch/riscv/include/uapi/asm/kvm.h
+++ b/arch/riscv/include/uapi/asm/kvm.h
@@ -179,6 +179,9 @@ enum KVM_RISCV_ISA_EXT_ID {
KVM_RISCV_ISA_EXT_SSNPM,
KVM_RISCV_ISA_EXT_SVADE,
KVM_RISCV_ISA_EXT_SVADU,
+ KVM_RISCV_ISA_EXT_SVVPTC,
+ KVM_RISCV_ISA_EXT_ZABHA,
+ KVM_RISCV_ISA_EXT_ZICCRSE,
KVM_RISCV_ISA_EXT_MAX,
};
@@ -198,6 +201,7 @@ enum KVM_RISCV_SBI_EXT_ID {
KVM_RISCV_SBI_EXT_VENDOR,
KVM_RISCV_SBI_EXT_DBCN,
KVM_RISCV_SBI_EXT_STA,
+ KVM_RISCV_SBI_EXT_SUSP,
KVM_RISCV_SBI_EXT_MAX,
};
@@ -211,9 +215,6 @@ struct kvm_riscv_sbi_sta {
#define KVM_RISCV_TIMER_STATE_OFF 0
#define KVM_RISCV_TIMER_STATE_ON 1
-#define KVM_REG_SIZE(id) \
- (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
-
/* If you need to interpret the index values, here is the key: */
#define KVM_REG_RISCV_TYPE_MASK 0x00000000FF000000
#define KVM_REG_RISCV_TYPE_SHIFT 24
diff --git a/arch/riscv/include/uapi/asm/vendor/thead.h b/arch/riscv/include/uapi/asm/vendor/thead.h
new file mode 100644
index 000000000000..43790ebe5faf
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/vendor/thead.h
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#define RISCV_HWPROBE_VENDOR_EXT_XTHEADVECTOR (1 << 0)
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 063d1faf5a53..8d186bfced45 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -123,3 +123,5 @@ obj-$(CONFIG_COMPAT) += compat_vdso/
obj-$(CONFIG_64BIT) += pi/
obj-$(CONFIG_ACPI) += acpi.o
obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o
+
+obj-$(CONFIG_GENERIC_CPU_VULNERABILITIES) += bugs.o
diff --git a/arch/riscv/kernel/bugs.c b/arch/riscv/kernel/bugs.c
new file mode 100644
index 000000000000..3655fe7d678c
--- /dev/null
+++ b/arch/riscv/kernel/bugs.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Rivos Inc.
+ */
+
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/sprintf.h>
+
+#include <asm/bugs.h>
+#include <asm/vendor_extensions/thead.h>
+
+static enum mitigation_state ghostwrite_state;
+
+void ghostwrite_set_vulnerable(void)
+{
+ ghostwrite_state = VULNERABLE;
+}
+
+/*
+ * Vendor extension alternatives will use the value set at the time of boot
+ * alternative patching, thus this must be called before boot alternatives are
+ * patched (and after extension probing) to be effective.
+ *
+ * Returns true if mitgated, false otherwise.
+ */
+bool ghostwrite_enable_mitigation(void)
+{
+ if (IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR) &&
+ ghostwrite_state == VULNERABLE && !cpu_mitigations_off()) {
+ disable_xtheadvector();
+ ghostwrite_state = MITIGATED;
+ return true;
+ }
+
+ return false;
+}
+
+enum mitigation_state ghostwrite_get_state(void)
+{
+ return ghostwrite_state;
+}
+
+ssize_t cpu_show_ghostwrite(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ if (IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR)) {
+ switch (ghostwrite_state) {
+ case UNAFFECTED:
+ return sprintf(buf, "Not affected\n");
+ case MITIGATED:
+ return sprintf(buf, "Mitigation: xtheadvector disabled\n");
+ case VULNERABLE:
+ fallthrough;
+ default:
+ return sprintf(buf, "Vulnerable\n");
+ }
+ } else {
+ return sprintf(buf, "Not affected\n");
+ }
+}
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index c0916ed318c2..c6ba750536c3 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -17,6 +17,7 @@
#include <linux/of.h>
#include <asm/acpi.h>
#include <asm/alternative.h>
+#include <asm/bugs.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/hwcap.h>
@@ -26,6 +27,7 @@
#include <asm/sbi.h>
#include <asm/vector.h>
#include <asm/vendor_extensions.h>
+#include <asm/vendor_extensions/thead.h>
#define NUM_ALPHA_EXTS ('z' - 'a' + 1)
@@ -39,6 +41,8 @@ static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly;
/* Per-cpu ISA extensions. */
struct riscv_isainfo hart_isa[NR_CPUS];
+u32 thead_vlenb_of;
+
/**
* riscv_isa_extension_base() - Get base extension word
*
@@ -791,9 +795,50 @@ static void __init riscv_fill_vendor_ext_list(int cpu)
}
}
+static int has_thead_homogeneous_vlenb(void)
+{
+ int cpu;
+ u32 prev_vlenb = 0;
+ u32 vlenb;
+
+ /* Ignore thead,vlenb property if xtheavector is not enabled in the kernel */
+ if (!IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR))
+ return 0;
+
+ for_each_possible_cpu(cpu) {
+ struct device_node *cpu_node;
+
+ cpu_node = of_cpu_device_node_get(cpu);
+ if (!cpu_node) {
+ pr_warn("Unable to find cpu node\n");
+ return -ENOENT;
+ }
+
+ if (of_property_read_u32(cpu_node, "thead,vlenb", &vlenb)) {
+ of_node_put(cpu_node);
+
+ if (prev_vlenb)
+ return -ENOENT;
+ continue;
+ }
+
+ if (prev_vlenb && vlenb != prev_vlenb) {
+ of_node_put(cpu_node);
+ return -ENOENT;
+ }
+
+ prev_vlenb = vlenb;
+ of_node_put(cpu_node);
+ }
+
+ thead_vlenb_of = vlenb;
+ return 0;
+}
+
static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap)
{
unsigned int cpu;
+ bool mitigated;
for_each_possible_cpu(cpu) {
unsigned long this_hwcap = 0;
@@ -844,6 +889,17 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap)
riscv_fill_vendor_ext_list(cpu);
}
+ /*
+ * Execute ghostwrite mitigation immediately after detecting extensions
+ * to disable xtheadvector if necessary.
+ */
+ mitigated = ghostwrite_enable_mitigation();
+
+ if (!mitigated && has_xtheadvector_no_alternatives() && has_thead_homogeneous_vlenb() < 0) {
+ pr_warn("Unsupported heterogeneous vlenb detected, vector extension disabled.\n");
+ disable_xtheadvector();
+ }
+
if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
return -ENOENT;
@@ -896,7 +952,8 @@ void __init riscv_fill_hwcap(void)
elf_hwcap &= ~COMPAT_HWCAP_ISA_F;
}
- if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_ZVE32X)) {
+ if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_ZVE32X) ||
+ has_xtheadvector_no_alternatives()) {
/*
* This cannot fail when called on the boot hart
*/
diff --git a/arch/riscv/kernel/kernel_mode_vector.c b/arch/riscv/kernel/kernel_mode_vector.c
index 6afe80c7f03a..99972a48e86b 100644
--- a/arch/riscv/kernel/kernel_mode_vector.c
+++ b/arch/riscv/kernel/kernel_mode_vector.c
@@ -143,7 +143,7 @@ static int riscv_v_start_kernel_context(bool *is_nested)
/* Transfer the ownership of V from user to kernel, then save */
riscv_v_start(RISCV_PREEMPT_V | RISCV_PREEMPT_V_DIRTY);
- if ((task_pt_regs(current)->status & SR_VS) == SR_VS_DIRTY) {
+ if (__riscv_v_vstate_check(task_pt_regs(current)->status, DIRTY)) {
uvstate = &current->thread.vstate;
__riscv_v_vstate_save(uvstate, uvstate->datap);
}
@@ -160,7 +160,7 @@ asmlinkage void riscv_v_context_nesting_start(struct pt_regs *regs)
return;
depth = riscv_v_ctx_get_depth();
- if (depth == 0 && (regs->status & SR_VS) == SR_VS_DIRTY)
+ if (depth == 0 && __riscv_v_vstate_check(regs->status, DIRTY))
riscv_preempt_v_set_dirty();
riscv_v_ctx_depth_inc();
@@ -208,7 +208,7 @@ void kernel_vector_begin(void)
{
bool nested = false;
- if (WARN_ON(!has_vector()))
+ if (WARN_ON(!(has_vector() || has_xtheadvector())))
return;
BUG_ON(!may_use_simd());
@@ -236,7 +236,7 @@ EXPORT_SYMBOL_GPL(kernel_vector_begin);
*/
void kernel_vector_end(void)
{
- if (WARN_ON(!has_vector()))
+ if (WARN_ON(!(has_vector() || has_xtheadvector())))
return;
riscv_v_disable();
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index 58b6482c2bf6..7c244de77180 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -190,7 +190,7 @@ void flush_thread(void)
void arch_release_task_struct(struct task_struct *tsk)
{
/* Free the vector context of datap. */
- if (has_vector())
+ if (has_vector() || has_xtheadvector())
riscv_v_thread_free(tsk);
}
@@ -240,7 +240,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
p->thread.s[0] = 0;
}
p->thread.riscv_v_flags = 0;
- if (has_vector())
+ if (has_vector() || has_xtheadvector())
riscv_v_thread_alloc(p);
p->thread.ra = (unsigned long)ret_from_fork;
p->thread.sp = (unsigned long)childregs; /* kernel sp */
@@ -364,7 +364,7 @@ static bool try_to_set_pmm(unsigned long value)
* disable it for tasks that already opted in to the relaxed ABI.
*/
-static struct ctl_table tagged_addr_sysctl_table[] = {
+static const struct ctl_table tagged_addr_sysctl_table[] = {
{
.procname = "tagged_addr_disabled",
.mode = 0644,
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 45010e71df86..f1793630fc51 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -147,9 +147,7 @@ static void __init init_resources(void)
res_idx = num_resources - 1;
mem_res_sz = num_resources * sizeof(*mem_res);
- mem_res = memblock_alloc(mem_res_sz, SMP_CACHE_BYTES);
- if (!mem_res)
- panic("%s: Failed to allocate %zu bytes\n", __func__, mem_res_sz);
+ mem_res = memblock_alloc_or_panic(mem_res_sz, SMP_CACHE_BYTES);
/*
* Start by adding the reserved regions, if they overlap
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index dcd282419456..94e905eea1de 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -189,7 +189,7 @@ static long restore_sigcontext(struct pt_regs *regs,
return 0;
case RISCV_V_MAGIC:
- if (!has_vector() || !riscv_v_vstate_query(regs) ||
+ if (!(has_vector() || has_xtheadvector()) || !riscv_v_vstate_query(regs) ||
size != riscv_v_sc_size)
return -EINVAL;
@@ -211,7 +211,7 @@ static size_t get_rt_frame_size(bool cal_all)
frame_size = sizeof(*frame);
- if (has_vector()) {
+ if (has_vector() || has_xtheadvector()) {
if (cal_all || riscv_v_vstate_query(task_pt_regs(current)))
total_context_size += riscv_v_sc_size;
}
@@ -284,7 +284,7 @@ static long setup_sigcontext(struct rt_sigframe __user *frame,
if (has_fpu())
err |= save_fp_state(regs, &sc->sc_fpregs);
/* Save the vector state. */
- if (has_vector() && riscv_v_vstate_query(regs))
+ if ((has_vector() || has_xtheadvector()) && riscv_v_vstate_query(regs))
err |= save_v_state(regs, (void __user **)&sc_ext_ptr);
/* Write zero to fp-reserved space and check it on restore_sigcontext */
err |= __put_user(0, &sc->sc_extdesc.reserved);
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index c180a647a30e..d58b5e751286 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -43,6 +43,7 @@ enum ipi_message_type {
unsigned long __cpuid_to_hartid_map[NR_CPUS] __ro_after_init = {
[0 ... NR_CPUS-1] = INVALID_HARTID
};
+EXPORT_SYMBOL_GPL(__cpuid_to_hartid_map);
void __init smp_setup_processor_id(void)
{
diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c
index cb93adfffc48..bcd3b816306c 100644
--- a/arch/riscv/kernel/sys_hwprobe.c
+++ b/arch/riscv/kernel/sys_hwprobe.c
@@ -15,6 +15,7 @@
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/vector.h>
+#include <asm/vendor_extensions/thead_hwprobe.h>
#include <vdso/vsyscall.h>
@@ -286,6 +287,10 @@ static void hwprobe_one_pair(struct riscv_hwprobe *pair,
pair->value = riscv_timebase;
break;
+ case RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0:
+ hwprobe_isa_vendor_ext_thead_0(pair, cpus);
+ break;
+
/*
* For forward compatibility, unknown keys don't fail the whole
* call, but get their element key set to -1 and value set to 0
diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c
index 821818886fab..184f780c932d 100644
--- a/arch/riscv/kernel/vector.c
+++ b/arch/riscv/kernel/vector.c
@@ -33,7 +33,17 @@ int riscv_v_setup_vsize(void)
{
unsigned long this_vsize;
- /* There are 32 vector registers with vlenb length. */
+ /*
+ * There are 32 vector registers with vlenb length.
+ *
+ * If the thead,vlenb property was provided by the firmware, use that
+ * instead of probing the CSRs.
+ */
+ if (thead_vlenb_of) {
+ riscv_v_vsize = thead_vlenb_of * 32;
+ return 0;
+ }
+
riscv_v_enable();
this_vsize = csr_read(CSR_VLENB) * 32;
riscv_v_disable();
@@ -53,7 +63,7 @@ int riscv_v_setup_vsize(void)
void __init riscv_v_setup_ctx_cache(void)
{
- if (!has_vector())
+ if (!(has_vector() || has_xtheadvector()))
return;
riscv_v_user_cachep = kmem_cache_create_usercopy("riscv_vector_ctx",
@@ -173,7 +183,7 @@ bool riscv_v_first_use_handler(struct pt_regs *regs)
u32 __user *epc = (u32 __user *)regs->epc;
u32 insn = (u32)regs->badaddr;
- if (!has_vector())
+ if (!(has_vector() || has_xtheadvector()))
return false;
/* Do not handle if V is not supported, or disabled */
@@ -216,7 +226,7 @@ void riscv_v_vstate_ctrl_init(struct task_struct *tsk)
bool inherit;
int cur, next;
- if (!has_vector())
+ if (!(has_vector() || has_xtheadvector()))
return;
next = riscv_v_ctrl_get_next(tsk);
@@ -238,7 +248,7 @@ void riscv_v_vstate_ctrl_init(struct task_struct *tsk)
long riscv_v_vstate_ctrl_get_current(void)
{
- if (!has_vector())
+ if (!(has_vector() || has_xtheadvector()))
return -EINVAL;
return current->thread.vstate_ctrl & PR_RISCV_V_VSTATE_CTRL_MASK;
@@ -249,7 +259,7 @@ long riscv_v_vstate_ctrl_set_current(unsigned long arg)
bool inherit;
int cur, next;
- if (!has_vector())
+ if (!(has_vector() || has_xtheadvector()))
return -EINVAL;
if (arg & ~PR_RISCV_V_VSTATE_CTRL_MASK)
@@ -287,7 +297,7 @@ long riscv_v_vstate_ctrl_set_current(unsigned long arg)
#ifdef CONFIG_SYSCTL
-static struct ctl_table riscv_v_default_vstate_table[] = {
+static const struct ctl_table riscv_v_default_vstate_table[] = {
{
.procname = "riscv_v_default_allow",
.data = &riscv_v_implicit_uacc,
@@ -299,7 +309,7 @@ static struct ctl_table riscv_v_default_vstate_table[] = {
static int __init riscv_v_sysctl_init(void)
{
- if (has_vector())
+ if (has_vector() || has_xtheadvector())
if (!register_sysctl("abi", riscv_v_default_vstate_table))
return -EINVAL;
return 0;
@@ -309,7 +319,7 @@ static int __init riscv_v_sysctl_init(void)
static int __init riscv_v_sysctl_init(void) { return 0; }
#endif /* ! CONFIG_SYSCTL */
-static int riscv_v_init(void)
+static int __init riscv_v_init(void)
{
return riscv_v_sysctl_init();
}
diff --git a/arch/riscv/kernel/vendor_extensions.c b/arch/riscv/kernel/vendor_extensions.c
index a8126d118341..a31ff84740eb 100644
--- a/arch/riscv/kernel/vendor_extensions.c
+++ b/arch/riscv/kernel/vendor_extensions.c
@@ -6,6 +6,7 @@
#include <asm/vendorid_list.h>
#include <asm/vendor_extensions.h>
#include <asm/vendor_extensions/andes.h>
+#include <asm/vendor_extensions/thead.h>
#include <linux/array_size.h>
#include <linux/types.h>
@@ -14,6 +15,9 @@ struct riscv_isa_vendor_ext_data_list *riscv_isa_vendor_ext_list[] = {
#ifdef CONFIG_RISCV_ISA_VENDOR_EXT_ANDES
&riscv_isa_vendor_ext_list_andes,
#endif
+#ifdef CONFIG_RISCV_ISA_VENDOR_EXT_THEAD
+ &riscv_isa_vendor_ext_list_thead,
+#endif
};
const size_t riscv_isa_vendor_ext_list_size = ARRAY_SIZE(riscv_isa_vendor_ext_list);
@@ -41,6 +45,12 @@ bool __riscv_isa_vendor_extension_available(int cpu, unsigned long vendor, unsig
cpu_bmap = riscv_isa_vendor_ext_list_andes.per_hart_isa_bitmap;
break;
#endif
+ #ifdef CONFIG_RISCV_ISA_VENDOR_EXT_THEAD
+ case THEAD_VENDOR_ID:
+ bmap = &riscv_isa_vendor_ext_list_thead.all_harts_isa_bitmap;
+ cpu_bmap = riscv_isa_vendor_ext_list_thead.per_hart_isa_bitmap;
+ break;
+ #endif
default:
return false;
}
diff --git a/arch/riscv/kernel/vendor_extensions/Makefile b/arch/riscv/kernel/vendor_extensions/Makefile
index 6a61aed944f1..866414c81a9f 100644
--- a/arch/riscv/kernel/vendor_extensions/Makefile
+++ b/arch/riscv/kernel/vendor_extensions/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_RISCV_ISA_VENDOR_EXT_ANDES) += andes.o
+obj-$(CONFIG_RISCV_ISA_VENDOR_EXT_THEAD) += thead.o
+obj-$(CONFIG_RISCV_ISA_VENDOR_EXT_THEAD) += thead_hwprobe.o
diff --git a/arch/riscv/kernel/vendor_extensions/thead.c b/arch/riscv/kernel/vendor_extensions/thead.c
new file mode 100644
index 000000000000..519dbf70710a
--- /dev/null
+++ b/arch/riscv/kernel/vendor_extensions/thead.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <asm/cpufeature.h>
+#include <asm/vendor_extensions.h>
+#include <asm/vendor_extensions/thead.h>
+
+#include <linux/array_size.h>
+#include <linux/cpumask.h>
+#include <linux/types.h>
+
+/* All T-Head vendor extensions supported in Linux */
+static const struct riscv_isa_ext_data riscv_isa_vendor_ext_thead[] = {
+ __RISCV_ISA_EXT_DATA(xtheadvector, RISCV_ISA_VENDOR_EXT_XTHEADVECTOR),
+};
+
+struct riscv_isa_vendor_ext_data_list riscv_isa_vendor_ext_list_thead = {
+ .ext_data_count = ARRAY_SIZE(riscv_isa_vendor_ext_thead),
+ .ext_data = riscv_isa_vendor_ext_thead,
+};
+
+void disable_xtheadvector(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ clear_bit(RISCV_ISA_VENDOR_EXT_XTHEADVECTOR, riscv_isa_vendor_ext_list_thead.per_hart_isa_bitmap[cpu].isa);
+
+ clear_bit(RISCV_ISA_VENDOR_EXT_XTHEADVECTOR, riscv_isa_vendor_ext_list_thead.all_harts_isa_bitmap.isa);
+}
diff --git a/arch/riscv/kernel/vendor_extensions/thead_hwprobe.c b/arch/riscv/kernel/vendor_extensions/thead_hwprobe.c
new file mode 100644
index 000000000000..2eba34011786
--- /dev/null
+++ b/arch/riscv/kernel/vendor_extensions/thead_hwprobe.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <asm/vendor_extensions/thead.h>
+#include <asm/vendor_extensions/thead_hwprobe.h>
+#include <asm/vendor_extensions/vendor_hwprobe.h>
+
+#include <linux/cpumask.h>
+#include <linux/types.h>
+
+#include <uapi/asm/hwprobe.h>
+#include <uapi/asm/vendor/thead.h>
+
+void hwprobe_isa_vendor_ext_thead_0(struct riscv_hwprobe *pair, const struct cpumask *cpus)
+{
+ VENDOR_EXTENSION_SUPPORTED(pair, cpus,
+ riscv_isa_vendor_ext_list_thead.per_hart_isa_bitmap, {
+ VENDOR_EXT_KEY(XTHEADVECTOR);
+ });
+}
diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile
index 0fb1840c3e0a..4e0bba91d284 100644
--- a/arch/riscv/kvm/Makefile
+++ b/arch/riscv/kvm/Makefile
@@ -30,6 +30,7 @@ kvm-y += vcpu_sbi_hsm.o
kvm-$(CONFIG_RISCV_PMU_SBI) += vcpu_sbi_pmu.o
kvm-y += vcpu_sbi_replace.o
kvm-y += vcpu_sbi_sta.o
+kvm-y += vcpu_sbi_system.o
kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o
kvm-y += vcpu_switch.o
kvm-y += vcpu_timer.o
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index e048dcc6e65e..60d684c76c58 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -34,7 +34,12 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
STATS_DESC_COUNTER(VCPU, csr_exit_user),
STATS_DESC_COUNTER(VCPU, csr_exit_kernel),
STATS_DESC_COUNTER(VCPU, signal_exits),
- STATS_DESC_COUNTER(VCPU, exits)
+ STATS_DESC_COUNTER(VCPU, exits),
+ STATS_DESC_COUNTER(VCPU, instr_illegal_exits),
+ STATS_DESC_COUNTER(VCPU, load_misaligned_exits),
+ STATS_DESC_COUNTER(VCPU, store_misaligned_exits),
+ STATS_DESC_COUNTER(VCPU, load_access_exits),
+ STATS_DESC_COUNTER(VCPU, store_access_exits),
};
const struct kvm_stats_header kvm_vcpu_stats_header = {
diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c
index fa98e5c024b2..6e0c18412795 100644
--- a/arch/riscv/kvm/vcpu_exit.c
+++ b/arch/riscv/kvm/vcpu_exit.c
@@ -165,6 +165,17 @@ void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
vcpu->arch.guest_context.sstatus |= SR_SPP;
}
+static inline int vcpu_redirect(struct kvm_vcpu *vcpu, struct kvm_cpu_trap *trap)
+{
+ int ret = -EFAULT;
+
+ if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) {
+ kvm_riscv_vcpu_trap_redirect(vcpu, trap);
+ ret = 1;
+ }
+ return ret;
+}
+
/*
* Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
* proper exit to userspace.
@@ -183,14 +194,32 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
run->exit_reason = KVM_EXIT_UNKNOWN;
switch (trap->scause) {
case EXC_INST_ILLEGAL:
+ kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ILLEGAL_INSN);
+ vcpu->stat.instr_illegal_exits++;
+ ret = vcpu_redirect(vcpu, trap);
+ break;
case EXC_LOAD_MISALIGNED:
+ kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_MISALIGNED_LOAD);
+ vcpu->stat.load_misaligned_exits++;
+ ret = vcpu_redirect(vcpu, trap);
+ break;
case EXC_STORE_MISALIGNED:
+ kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_MISALIGNED_STORE);
+ vcpu->stat.store_misaligned_exits++;
+ ret = vcpu_redirect(vcpu, trap);
+ break;
case EXC_LOAD_ACCESS:
+ kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ACCESS_LOAD);
+ vcpu->stat.load_access_exits++;
+ ret = vcpu_redirect(vcpu, trap);
+ break;
case EXC_STORE_ACCESS:
- if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) {
- kvm_riscv_vcpu_trap_redirect(vcpu, trap);
- ret = 1;
- }
+ kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ACCESS_STORE);
+ vcpu->stat.store_access_exits++;
+ ret = vcpu_redirect(vcpu, trap);
+ break;
+ case EXC_INST_ACCESS:
+ ret = vcpu_redirect(vcpu, trap);
break;
case EXC_VIRTUAL_INST_FAULT:
if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c
index 753f66c8b70a..f6d27b59c641 100644
--- a/arch/riscv/kvm/vcpu_onereg.c
+++ b/arch/riscv/kvm/vcpu_onereg.c
@@ -46,6 +46,8 @@ static const unsigned long kvm_isa_ext_arr[] = {
KVM_ISA_EXT_ARR(SVINVAL),
KVM_ISA_EXT_ARR(SVNAPOT),
KVM_ISA_EXT_ARR(SVPBMT),
+ KVM_ISA_EXT_ARR(SVVPTC),
+ KVM_ISA_EXT_ARR(ZABHA),
KVM_ISA_EXT_ARR(ZACAS),
KVM_ISA_EXT_ARR(ZAWRS),
KVM_ISA_EXT_ARR(ZBA),
@@ -65,6 +67,7 @@ static const unsigned long kvm_isa_ext_arr[] = {
KVM_ISA_EXT_ARR(ZFHMIN),
KVM_ISA_EXT_ARR(ZICBOM),
KVM_ISA_EXT_ARR(ZICBOZ),
+ KVM_ISA_EXT_ARR(ZICCRSE),
KVM_ISA_EXT_ARR(ZICNTR),
KVM_ISA_EXT_ARR(ZICOND),
KVM_ISA_EXT_ARR(ZICSR),
@@ -145,6 +148,8 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
case KVM_RISCV_ISA_EXT_SSTC:
case KVM_RISCV_ISA_EXT_SVINVAL:
case KVM_RISCV_ISA_EXT_SVNAPOT:
+ case KVM_RISCV_ISA_EXT_SVVPTC:
+ case KVM_RISCV_ISA_EXT_ZABHA:
case KVM_RISCV_ISA_EXT_ZACAS:
case KVM_RISCV_ISA_EXT_ZAWRS:
case KVM_RISCV_ISA_EXT_ZBA:
@@ -162,6 +167,7 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
case KVM_RISCV_ISA_EXT_ZFA:
case KVM_RISCV_ISA_EXT_ZFH:
case KVM_RISCV_ISA_EXT_ZFHMIN:
+ case KVM_RISCV_ISA_EXT_ZICCRSE:
case KVM_RISCV_ISA_EXT_ZICNTR:
case KVM_RISCV_ISA_EXT_ZICOND:
case KVM_RISCV_ISA_EXT_ZICSR:
diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
index 6e704ed86a83..d1c83a77735e 100644
--- a/arch/riscv/kvm/vcpu_sbi.c
+++ b/arch/riscv/kvm/vcpu_sbi.c
@@ -71,6 +71,10 @@ static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
.ext_ptr = &vcpu_sbi_ext_dbcn,
},
{
+ .ext_idx = KVM_RISCV_SBI_EXT_SUSP,
+ .ext_ptr = &vcpu_sbi_ext_susp,
+ },
+ {
.ext_idx = KVM_RISCV_SBI_EXT_STA,
.ext_ptr = &vcpu_sbi_ext_sta,
},
diff --git a/arch/riscv/kvm/vcpu_sbi_system.c b/arch/riscv/kvm/vcpu_sbi_system.c
new file mode 100644
index 000000000000..5d55e08791fa
--- /dev/null
+++ b/arch/riscv/kvm/vcpu_sbi_system.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 Ventana Micro Systems Inc.
+ */
+
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_vcpu_sbi.h>
+#include <asm/sbi.h>
+
+static int kvm_sbi_ext_susp_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_vcpu_sbi_return *retdata)
+{
+ struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+ struct kvm_cpu_context *reset_cntx;
+ unsigned long funcid = cp->a6;
+ unsigned long hva, i;
+ struct kvm_vcpu *tmp;
+
+ switch (funcid) {
+ case SBI_EXT_SUSP_SYSTEM_SUSPEND:
+ if (cp->a0 != SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM) {
+ retdata->err_val = SBI_ERR_INVALID_PARAM;
+ return 0;
+ }
+
+ if (!(cp->sstatus & SR_SPP)) {
+ retdata->err_val = SBI_ERR_FAILURE;
+ return 0;
+ }
+
+ hva = kvm_vcpu_gfn_to_hva_prot(vcpu, cp->a1 >> PAGE_SHIFT, NULL);
+ if (kvm_is_error_hva(hva)) {
+ retdata->err_val = SBI_ERR_INVALID_ADDRESS;
+ return 0;
+ }
+
+ kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
+ if (tmp == vcpu)
+ continue;
+ if (!kvm_riscv_vcpu_stopped(tmp)) {
+ retdata->err_val = SBI_ERR_DENIED;
+ return 0;
+ }
+ }
+
+ spin_lock(&vcpu->arch.reset_cntx_lock);
+ reset_cntx = &vcpu->arch.guest_reset_context;
+ reset_cntx->sepc = cp->a1;
+ reset_cntx->a0 = vcpu->vcpu_id;
+ reset_cntx->a1 = cp->a2;
+ spin_unlock(&vcpu->arch.reset_cntx_lock);
+
+ kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
+
+ /* userspace provides the suspend implementation */
+ kvm_riscv_vcpu_sbi_forward(vcpu, run);
+ retdata->uexit = true;
+ break;
+ default:
+ retdata->err_val = SBI_ERR_NOT_SUPPORTED;
+ break;
+ }
+
+ return 0;
+}
+
+const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_susp = {
+ .extid_start = SBI_EXT_SUSP,
+ .extid_end = SBI_EXT_SUSP,
+ .default_disabled = true,
+ .handler = kvm_sbi_ext_susp_handler,
+};
diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
index 8eec6b69a875..79368a895fee 100644
--- a/arch/riscv/lib/Makefile
+++ b/arch/riscv/lib/Makefile
@@ -15,8 +15,7 @@ endif
lib-$(CONFIG_MMU) += uaccess.o
lib-$(CONFIG_64BIT) += tishift.o
lib-$(CONFIG_RISCV_ISA_ZICBOZ) += clear_page.o
-lib-$(CONFIG_RISCV_ISA_ZBC) += crc32.o
-
+obj-$(CONFIG_CRC32_ARCH) += crc32-riscv.o
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
lib-$(CONFIG_RISCV_ISA_V) += xor.o
lib-$(CONFIG_RISCV_ISA_V) += riscv_v_helpers.o
diff --git a/arch/riscv/lib/crc32.c b/arch/riscv/lib/crc32-riscv.c
index d7dc599af3ef..53d56ab422c7 100644
--- a/arch/riscv/lib/crc32.c
+++ b/arch/riscv/lib/crc32-riscv.c
@@ -14,6 +14,7 @@
#include <linux/crc32poly.h>
#include <linux/crc32.h>
#include <linux/byteorder/generic.h>
+#include <linux/module.h>
/*
* Refer to https://www.corsix.org/content/barrett-reduction-polynomials for
@@ -217,17 +218,19 @@ legacy:
return crc_fb(crc, p, len);
}
-u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
+u32 __pure crc32_le_arch(u32 crc, const u8 *p, size_t len)
{
return crc32_le_generic(crc, p, len, CRC32_POLY_LE, CRC32_POLY_QT_LE,
crc32_le_base);
}
+EXPORT_SYMBOL(crc32_le_arch);
-u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
+u32 __pure crc32c_le_arch(u32 crc, const u8 *p, size_t len)
{
return crc32_le_generic(crc, p, len, CRC32C_POLY_LE,
- CRC32C_POLY_QT_LE, __crc32c_le_base);
+ CRC32C_POLY_QT_LE, crc32c_le_base);
}
+EXPORT_SYMBOL(crc32c_le_arch);
static inline u32 crc32_be_unaligned(u32 crc, unsigned char const *p,
size_t len)
@@ -253,7 +256,7 @@ static inline u32 crc32_be_unaligned(u32 crc, unsigned char const *p,
return crc;
}
-u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
+u32 __pure crc32_be_arch(u32 crc, const u8 *p, size_t len)
{
size_t offset, head_len, tail_len;
unsigned long const *p_ul;
@@ -292,3 +295,17 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
legacy:
return crc32_be_base(crc, p, len);
}
+EXPORT_SYMBOL(crc32_be_arch);
+
+u32 crc32_optimizations(void)
+{
+ if (riscv_has_extension_likely(RISCV_ISA_EXT_ZBC))
+ return CRC32_LE_OPTIMIZATION |
+ CRC32_BE_OPTIMIZATION |
+ CRC32C_OPTIMIZATION;
+ return 0;
+}
+EXPORT_SYMBOL(crc32_optimizations);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Accelerated CRC32 implementation with Zbc extension");
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index a9f2b4af8f3f..0194324a0c50 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -22,6 +22,57 @@
#include "../kernel/head.h"
+static void show_pte(unsigned long addr)
+{
+ pgd_t *pgdp, pgd;
+ p4d_t *p4dp, p4d;
+ pud_t *pudp, pud;
+ pmd_t *pmdp, pmd;
+ pte_t *ptep, pte;
+ struct mm_struct *mm = current->mm;
+
+ if (!mm)
+ mm = &init_mm;
+
+ pr_alert("Current %s pgtable: %luK pagesize, %d-bit VAs, pgdp=0x%016llx\n",
+ current->comm, PAGE_SIZE / SZ_1K, VA_BITS,
+ mm == &init_mm ? (u64)__pa_symbol(mm->pgd) : virt_to_phys(mm->pgd));
+
+ pgdp = pgd_offset(mm, addr);
+ pgd = pgdp_get(pgdp);
+ pr_alert("[%016lx] pgd=%016lx", addr, pgd_val(pgd));
+ if (pgd_none(pgd) || pgd_bad(pgd) || pgd_leaf(pgd))
+ goto out;
+
+ p4dp = p4d_offset(pgdp, addr);
+ p4d = p4dp_get(p4dp);
+ pr_cont(", p4d=%016lx", p4d_val(p4d));
+ if (p4d_none(p4d) || p4d_bad(p4d) || p4d_leaf(p4d))
+ goto out;
+
+ pudp = pud_offset(p4dp, addr);
+ pud = pudp_get(pudp);
+ pr_cont(", pud=%016lx", pud_val(pud));
+ if (pud_none(pud) || pud_bad(pud) || pud_leaf(pud))
+ goto out;
+
+ pmdp = pmd_offset(pudp, addr);
+ pmd = pmdp_get(pmdp);
+ pr_cont(", pmd=%016lx", pmd_val(pmd));
+ if (pmd_none(pmd) || pmd_bad(pmd) || pmd_leaf(pmd))
+ goto out;
+
+ ptep = pte_offset_map(pmdp, addr);
+ if (!ptep)
+ goto out;
+
+ pte = ptep_get(ptep);
+ pr_cont(", pte=%016lx", pte_val(pte));
+ pte_unmap(ptep);
+out:
+ pr_cont("\n");
+}
+
static void die_kernel_fault(const char *msg, unsigned long addr,
struct pt_regs *regs)
{
@@ -31,6 +82,7 @@ static void die_kernel_fault(const char *msg, unsigned long addr,
addr);
bust_spinlocks(0);
+ show_pte(addr);
die(regs, "Oops");
make_task_dead(SIGKILL);
}
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 8d167e09f1fe..15b2eda4c364 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -268,8 +268,12 @@ static void __init setup_bootmem(void)
*/
if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU)) {
max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE;
- memblock_cap_memory_range(phys_ram_base,
- max_mapped_addr - phys_ram_base);
+ if (memblock_end_of_DRAM() > max_mapped_addr) {
+ memblock_cap_memory_range(phys_ram_base,
+ max_mapped_addr - phys_ram_base);
+ pr_warn("Physical memory overflows the linear mapping size: region above %pa removed",
+ &max_mapped_addr);
+ }
}
/*
@@ -1573,7 +1577,7 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
return;
}
- pagetable_pte_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
if (PageReserved(page))
free_reserved_page(page);
else
@@ -1595,7 +1599,7 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud, bool is_vmemm
}
if (!is_vmemmap)
- pagetable_pmd_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
if (PageReserved(page))
free_reserved_page(page);
else
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index c301c8d291d2..41c635d6aca4 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -32,7 +32,7 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned
pte_t *ptep, *p;
if (pmd_none(pmdp_get(pmd))) {
- p = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
+ p = memblock_alloc_or_panic(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@@ -54,7 +54,7 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned
unsigned long next;
if (pud_none(pudp_get(pud))) {
- p = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
+ p = memblock_alloc_or_panic(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
set_pud(pud, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@@ -85,7 +85,7 @@ static void __init kasan_populate_pud(p4d_t *p4d,
unsigned long next;
if (p4d_none(p4dp_get(p4d))) {
- p = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
+ p = memblock_alloc_or_panic(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
set_p4d(p4d, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@@ -116,7 +116,7 @@ static void __init kasan_populate_p4d(pgd_t *pgd,
unsigned long next;
if (pgd_none(pgdp_get(pgd))) {
- p = memblock_alloc(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE);
+ p = memblock_alloc_or_panic(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE);
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@@ -385,7 +385,7 @@ static void __init kasan_shallow_populate_pud(p4d_t *p4d,
next = pud_addr_end(vaddr, end);
if (pud_none(pudp_get(pud_k))) {
- p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ p = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
set_pud(pud_k, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
continue;
}
@@ -405,7 +405,7 @@ static void __init kasan_shallow_populate_p4d(pgd_t *pgd,
next = p4d_addr_end(vaddr, end);
if (p4d_none(p4dp_get(p4d_k))) {
- p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ p = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
set_p4d(p4d_k, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
continue;
}
@@ -424,7 +424,7 @@ static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long
next = pgd_addr_end(vaddr, end);
if (pgd_none(pgdp_get(pgd_k))) {
- p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ p = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
continue;
}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 5cbbb7daed44..9c9ec08d78c7 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -52,13 +52,19 @@ config KASAN_SHADOW_OFFSET
depends on KASAN
default 0x1C000000000000
-config GCC_ASM_FLAG_OUTPUT_BROKEN
+config CC_ASM_FLAG_OUTPUT_BROKEN
def_bool CC_IS_GCC && GCC_VERSION < 140200
help
GCC versions before 14.2.0 may die with an internal
compiler error in some configurations if flag output
operands are used within inline assemblies.
+config CC_HAS_ASM_AOR_FORMAT_FLAGS
+ def_bool !(CC_IS_CLANG && CLANG_VERSION < 190100)
+ help
+ Clang versions before 19.1.0 do not support A,
+ O, and R inline assembly format flags.
+
config S390
def_bool y
#
@@ -72,6 +78,8 @@ config S390
select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM
select ARCH_ENABLE_MEMORY_HOTREMOVE
select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
+ select ARCH_HAS_CPU_FINALIZE_INIT
+ select ARCH_HAS_CRC32
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEBUG_VM_PGTABLE
@@ -243,6 +251,7 @@ config S390
select MODULES_USE_ELF_RELA
select NEED_DMA_MAP_STATE if PCI
select NEED_PER_CPU_EMBED_FIRST_CHUNK
+ select NEED_PROC_VMCORE_DEVICE_RAM if PROC_VMCORE
select NEED_SG_DMA_LENGTH if PCI
select OLD_SIGACTION
select OLD_SIGSUSPEND3
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 3f25498dac65..5fae311203c2 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -22,7 +22,7 @@ KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__
ifndef CONFIG_AS_IS_LLVM
KBUILD_AFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),$(aflags_dwarf))
endif
-KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack
+KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack -std=gnu11
KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
KBUILD_CFLAGS_DECOMPRESSOR += -D__DECOMPRESSOR
KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbackchain
diff --git a/arch/s390/Makefile.postlink b/arch/s390/Makefile.postlink
index df82f5410769..1ae5478cd6ac 100644
--- a/arch/s390/Makefile.postlink
+++ b/arch/s390/Makefile.postlink
@@ -11,6 +11,7 @@ __archpost:
-include include/config/auto.conf
include $(srctree)/scripts/Kbuild.include
+include $(srctree)/scripts/Makefile.lib
CMD_RELOCS=arch/s390/tools/relocs
OUT_RELOCS = arch/s390/boot
@@ -19,11 +20,6 @@ quiet_cmd_relocs = RELOCS $(OUT_RELOCS)/relocs.S
mkdir -p $(OUT_RELOCS); \
$(CMD_RELOCS) $@ > $(OUT_RELOCS)/relocs.S
-quiet_cmd_strip_relocs = RSTRIP $@
- cmd_strip_relocs = \
- $(OBJCOPY) --remove-section='.rel.*' --remove-section='.rel__*' \
- --remove-section='.rela.*' --remove-section='.rela__*' $@
-
vmlinux: FORCE
$(call cmd,relocs)
$(call cmd,strip_relocs)
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 91a30e017d65..dd7ba7587dd5 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -52,7 +52,7 @@ static int appldata_interval_handler(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
static struct ctl_table_header *appldata_sysctl_header;
-static struct ctl_table appldata_table[] = {
+static const struct ctl_table appldata_table[] = {
{
.procname = "timer",
.mode = S_IRUGO | S_IWUSR,
diff --git a/arch/s390/boot/als.c b/arch/s390/boot/als.c
index 11e0c3d5dbc8..79afb5fa7f1f 100644
--- a/arch/s390/boot/als.c
+++ b/arch/s390/boot/als.c
@@ -46,7 +46,7 @@ void print_missing_facilities(void)
* z/VM adds a four character prefix.
*/
if (strlen(als_str) > 70) {
- boot_printk("%s\n", als_str);
+ boot_emerg("%s\n", als_str);
*als_str = '\0';
}
u16_to_decimal(val_str, i * BITS_PER_LONG + j);
@@ -54,7 +54,7 @@ void print_missing_facilities(void)
first = 0;
}
}
- boot_printk("%s\n", als_str);
+ boot_emerg("%s\n", als_str);
}
static void facility_mismatch(void)
@@ -62,10 +62,10 @@ static void facility_mismatch(void)
struct cpuid id;
get_cpu_id(&id);
- boot_printk("The Linux kernel requires more recent processor hardware\n");
- boot_printk("Detected machine-type number: %4x\n", id.machine);
+ boot_emerg("The Linux kernel requires more recent processor hardware\n");
+ boot_emerg("Detected machine-type number: %4x\n", id.machine);
print_missing_facilities();
- boot_printk("See Principles of Operations for facility bits\n");
+ boot_emerg("See Principles of Operations for facility bits\n");
disabled_wait();
}
diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
index 56244fe78182..69f261566a64 100644
--- a/arch/s390/boot/boot.h
+++ b/arch/s390/boot/boot.h
@@ -8,6 +8,7 @@
#ifndef __ASSEMBLY__
+#include <linux/printk.h>
#include <asm/physmem_info.h>
struct machine_info {
@@ -47,13 +48,16 @@ void physmem_set_usable_limit(unsigned long limit);
void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size);
void physmem_free(enum reserved_range_type type);
/* for continuous/multiple allocations per type */
-unsigned long physmem_alloc_top_down(enum reserved_range_type type, unsigned long size,
- unsigned long align);
+unsigned long physmem_alloc_or_die(enum reserved_range_type type, unsigned long size,
+ unsigned long align);
+unsigned long physmem_alloc(enum reserved_range_type type, unsigned long size,
+ unsigned long align, bool die_on_oom);
/* for single allocations, 1 per type */
unsigned long physmem_alloc_range(enum reserved_range_type type, unsigned long size,
unsigned long align, unsigned long min, unsigned long max,
bool die_on_oom);
unsigned long get_physmem_alloc_pos(void);
+void dump_physmem_reserved(void);
bool ipl_report_certs_intersects(unsigned long addr, unsigned long size,
unsigned long *intersection_start);
bool is_ipl_block_dump(void);
@@ -69,12 +73,28 @@ void print_pgm_check_info(void);
unsigned long randomize_within_range(unsigned long size, unsigned long align,
unsigned long min, unsigned long max);
void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned long asce_limit);
-void __printf(1, 2) boot_printk(const char *fmt, ...);
+int __printf(1, 2) boot_printk(const char *fmt, ...);
void print_stacktrace(unsigned long sp);
void error(char *m);
int get_random(unsigned long limit, unsigned long *value);
+void boot_rb_dump(void);
+
+#ifndef boot_fmt
+#define boot_fmt(fmt) fmt
+#endif
+
+#define boot_emerg(fmt, ...) boot_printk(KERN_EMERG boot_fmt(fmt), ##__VA_ARGS__)
+#define boot_alert(fmt, ...) boot_printk(KERN_ALERT boot_fmt(fmt), ##__VA_ARGS__)
+#define boot_crit(fmt, ...) boot_printk(KERN_CRIT boot_fmt(fmt), ##__VA_ARGS__)
+#define boot_err(fmt, ...) boot_printk(KERN_ERR boot_fmt(fmt), ##__VA_ARGS__)
+#define boot_warn(fmt, ...) boot_printk(KERN_WARNING boot_fmt(fmt), ##__VA_ARGS__)
+#define boot_notice(fmt, ...) boot_printk(KERN_NOTICE boot_fmt(fmt), ##__VA_ARGS__)
+#define boot_info(fmt, ...) boot_printk(KERN_INFO boot_fmt(fmt), ##__VA_ARGS__)
+#define boot_debug(fmt, ...) boot_printk(KERN_DEBUG boot_fmt(fmt), ##__VA_ARGS__)
extern struct machine_info machine;
+extern int boot_console_loglevel;
+extern bool boot_ignore_loglevel;
/* Symbols defined by linker scripts */
extern const char kernel_version[];
diff --git a/arch/s390/boot/decompressor.c b/arch/s390/boot/decompressor.c
index f478e8e9cbda..03500b9d9fb9 100644
--- a/arch/s390/boot/decompressor.c
+++ b/arch/s390/boot/decompressor.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
+#include <asm/boot_data.h>
#include <asm/page.h>
#include "decompressor.h"
#include "boot.h"
@@ -63,6 +64,15 @@ static unsigned long free_mem_end_ptr = (unsigned long) _end + BOOT_HEAP_SIZE;
#include "../../../../lib/decompress_unzstd.c"
#endif
+static void decompress_error(char *m)
+{
+ if (bootdebug)
+ boot_rb_dump();
+ boot_emerg("Decompression error: %s\n", m);
+ boot_emerg(" -- System halted\n");
+ disabled_wait();
+}
+
unsigned long mem_safe_offset(void)
{
return ALIGN(free_mem_end_ptr, PAGE_SIZE);
@@ -71,5 +81,5 @@ unsigned long mem_safe_offset(void)
void deploy_kernel(void *output)
{
__decompress(_compressed_start, _compressed_end - _compressed_start,
- NULL, NULL, output, vmlinux.image_size, NULL, error);
+ NULL, NULL, output, vmlinux.image_size, NULL, decompress_error);
}
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
index 557462e62cd7..d3731f2983b7 100644
--- a/arch/s390/boot/ipl_parm.c
+++ b/arch/s390/boot/ipl_parm.c
@@ -215,7 +215,7 @@ static void check_cleared_facilities(void)
for (i = 0; i < ARRAY_SIZE(als); i++) {
if ((stfle_fac_list[i] & als[i]) != als[i]) {
- boot_printk("Warning: The Linux kernel requires facilities cleared via command line option\n");
+ boot_emerg("The Linux kernel requires facilities cleared via command line option\n");
print_missing_facilities();
break;
}
@@ -313,5 +313,23 @@ void parse_boot_command_line(void)
#endif
if (!strcmp(param, "relocate_lowcore") && test_facility(193))
relocate_lowcore = 1;
+ if (!strcmp(param, "earlyprintk"))
+ boot_earlyprintk = true;
+ if (!strcmp(param, "debug"))
+ boot_console_loglevel = CONSOLE_LOGLEVEL_DEBUG;
+ if (!strcmp(param, "bootdebug")) {
+ bootdebug = true;
+ if (val)
+ strncpy(bootdebug_filter, val, sizeof(bootdebug_filter) - 1);
+ }
+ if (!strcmp(param, "quiet"))
+ boot_console_loglevel = CONSOLE_LOGLEVEL_QUIET;
+ if (!strcmp(param, "ignore_loglevel"))
+ boot_ignore_loglevel = true;
+ if (!strcmp(param, "loglevel")) {
+ boot_console_loglevel = simple_strtoull(val, NULL, 10);
+ if (boot_console_loglevel < CONSOLE_LOGLEVEL_MIN)
+ boot_console_loglevel = CONSOLE_LOGLEVEL_MIN;
+ }
}
}
diff --git a/arch/s390/boot/ipl_report.c b/arch/s390/boot/ipl_report.c
index d00898852a88..f73cd757a5f7 100644
--- a/arch/s390/boot/ipl_report.c
+++ b/arch/s390/boot/ipl_report.c
@@ -30,7 +30,6 @@ static unsigned long get_cert_comp_list_size(void)
{
struct ipl_rb_certificate_entry *cert;
struct ipl_rb_component_entry *comp;
- size_t size;
/*
* Find the length for the IPL report boot data
@@ -155,7 +154,7 @@ void save_ipl_cert_comp_list(void)
return;
size = get_cert_comp_list_size();
- early_ipl_comp_list_addr = physmem_alloc_top_down(RR_CERT_COMP_LIST, size, sizeof(int));
+ early_ipl_comp_list_addr = physmem_alloc_or_die(RR_CERT_COMP_LIST, size, sizeof(int));
ipl_cert_list_addr = early_ipl_comp_list_addr + early_ipl_comp_list_size;
copy_components_bootdata();
diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
index f864d2bff775..941f4c9e27cc 100644
--- a/arch/s390/boot/kaslr.c
+++ b/arch/s390/boot/kaslr.c
@@ -32,7 +32,7 @@ struct prng_parm {
static int check_prng(void)
{
if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) {
- boot_printk("KASLR disabled: CPU has no PRNG\n");
+ boot_warn("KASLR disabled: CPU has no PRNG\n");
return 0;
}
if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
@@ -168,7 +168,7 @@ static unsigned long iterate_valid_positions(unsigned long size, unsigned long a
* cannot have chains.
*
* On the other hand, "dynamic" or "repetitive" allocations are done via
- * physmem_alloc_top_down(). These allocations are tightly packed together
+ * physmem_alloc_or_die(). These allocations are tightly packed together
* top down from the end of online memory. physmem_alloc_pos represents
* current position where those allocations start.
*
diff --git a/arch/s390/boot/pgm_check_info.c b/arch/s390/boot/pgm_check_info.c
index 5abe59fb3bc0..633f11600aab 100644
--- a/arch/s390/boot/pgm_check_info.c
+++ b/arch/s390/boot/pgm_check_info.c
@@ -17,13 +17,14 @@ void print_stacktrace(unsigned long sp)
(unsigned long)_stack_end };
bool first = true;
- boot_printk("Call Trace:\n");
+ boot_emerg("Call Trace:\n");
while (!(sp & 0x7) && on_stack(&boot_stack, sp, sizeof(struct stack_frame))) {
struct stack_frame *sf = (struct stack_frame *)sp;
- boot_printk(first ? "(sp:%016lx [<%016lx>] %pS)\n" :
- " sp:%016lx [<%016lx>] %pS\n",
- sp, sf->gprs[8], (void *)sf->gprs[8]);
+ if (first)
+ boot_emerg("(sp:%016lx [<%016lx>] %pS)\n", sp, sf->gprs[8], (void *)sf->gprs[8]);
+ else
+ boot_emerg(" sp:%016lx [<%016lx>] %pS\n", sp, sf->gprs[8], (void *)sf->gprs[8]);
if (sf->back_chain <= sp)
break;
sp = sf->back_chain;
@@ -36,30 +37,30 @@ void print_pgm_check_info(void)
unsigned long *gpregs = (unsigned long *)get_lowcore()->gpregs_save_area;
struct psw_bits *psw = &psw_bits(get_lowcore()->psw_save_area);
- boot_printk("Linux version %s\n", kernel_version);
+ if (bootdebug)
+ boot_rb_dump();
+ boot_emerg("Linux version %s\n", kernel_version);
if (!is_prot_virt_guest() && early_command_line[0])
- boot_printk("Kernel command line: %s\n", early_command_line);
- boot_printk("Kernel fault: interruption code %04x ilc:%x\n",
- get_lowcore()->pgm_code, get_lowcore()->pgm_ilc >> 1);
+ boot_emerg("Kernel command line: %s\n", early_command_line);
+ boot_emerg("Kernel fault: interruption code %04x ilc:%d\n",
+ get_lowcore()->pgm_code, get_lowcore()->pgm_ilc >> 1);
if (kaslr_enabled()) {
- boot_printk("Kernel random base: %lx\n", __kaslr_offset);
- boot_printk("Kernel random base phys: %lx\n", __kaslr_offset_phys);
+ boot_emerg("Kernel random base: %lx\n", __kaslr_offset);
+ boot_emerg("Kernel random base phys: %lx\n", __kaslr_offset_phys);
}
- boot_printk("PSW : %016lx %016lx (%pS)\n",
- get_lowcore()->psw_save_area.mask,
- get_lowcore()->psw_save_area.addr,
- (void *)get_lowcore()->psw_save_area.addr);
- boot_printk(
- " R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x P:%x AS:%x CC:%x PM:%x RI:%x EA:%x\n",
- psw->per, psw->dat, psw->io, psw->ext, psw->key, psw->mcheck,
- psw->wait, psw->pstate, psw->as, psw->cc, psw->pm, psw->ri,
- psw->eaba);
- boot_printk("GPRS: %016lx %016lx %016lx %016lx\n", gpregs[0], gpregs[1], gpregs[2], gpregs[3]);
- boot_printk(" %016lx %016lx %016lx %016lx\n", gpregs[4], gpregs[5], gpregs[6], gpregs[7]);
- boot_printk(" %016lx %016lx %016lx %016lx\n", gpregs[8], gpregs[9], gpregs[10], gpregs[11]);
- boot_printk(" %016lx %016lx %016lx %016lx\n", gpregs[12], gpregs[13], gpregs[14], gpregs[15]);
+ boot_emerg("PSW : %016lx %016lx (%pS)\n",
+ get_lowcore()->psw_save_area.mask,
+ get_lowcore()->psw_save_area.addr,
+ (void *)get_lowcore()->psw_save_area.addr);
+ boot_emerg(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x P:%x AS:%x CC:%x PM:%x RI:%x EA:%x\n",
+ psw->per, psw->dat, psw->io, psw->ext, psw->key, psw->mcheck,
+ psw->wait, psw->pstate, psw->as, psw->cc, psw->pm, psw->ri, psw->eaba);
+ boot_emerg("GPRS: %016lx %016lx %016lx %016lx\n", gpregs[0], gpregs[1], gpregs[2], gpregs[3]);
+ boot_emerg(" %016lx %016lx %016lx %016lx\n", gpregs[4], gpregs[5], gpregs[6], gpregs[7]);
+ boot_emerg(" %016lx %016lx %016lx %016lx\n", gpregs[8], gpregs[9], gpregs[10], gpregs[11]);
+ boot_emerg(" %016lx %016lx %016lx %016lx\n", gpregs[12], gpregs[13], gpregs[14], gpregs[15]);
print_stacktrace(get_lowcore()->gpregs_save_area[15]);
- boot_printk("Last Breaking-Event-Address:\n");
- boot_printk(" [<%016lx>] %pS\n", (unsigned long)get_lowcore()->pgm_last_break,
- (void *)get_lowcore()->pgm_last_break);
+ boot_emerg("Last Breaking-Event-Address:\n");
+ boot_emerg(" [<%016lx>] %pS\n", (unsigned long)get_lowcore()->pgm_last_break,
+ (void *)get_lowcore()->pgm_last_break);
}
diff --git a/arch/s390/boot/physmem_info.c b/arch/s390/boot/physmem_info.c
index 7617aa2d2f7e..aa096ef68e8c 100644
--- a/arch/s390/boot/physmem_info.c
+++ b/arch/s390/boot/physmem_info.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#define boot_fmt(fmt) "physmem: " fmt
#include <linux/processor.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -28,7 +29,7 @@ static struct physmem_range *__get_physmem_range_ptr(u32 n)
return &physmem_info.online[n];
if (unlikely(!physmem_info.online_extended)) {
physmem_info.online_extended = (struct physmem_range *)physmem_alloc_range(
- RR_MEM_DETECT_EXTENDED, ENTRIES_EXTENDED_MAX, sizeof(long), 0,
+ RR_MEM_DETECT_EXT, ENTRIES_EXTENDED_MAX, sizeof(long), 0,
physmem_alloc_pos, true);
}
return &physmem_info.online_extended[n - MEM_INLINED_ENTRIES];
@@ -207,11 +208,16 @@ unsigned long detect_max_physmem_end(void)
max_physmem_end = search_mem_end();
physmem_info.info_source = MEM_DETECT_BIN_SEARCH;
}
+ boot_debug("Max physical memory: 0x%016lx (info source: %s)\n", max_physmem_end,
+ get_physmem_info_source());
return max_physmem_end;
}
void detect_physmem_online_ranges(unsigned long max_physmem_end)
{
+ unsigned long start, end;
+ int i;
+
if (!sclp_early_read_storage_info()) {
physmem_info.info_source = MEM_DETECT_SCLP_STOR_INFO;
} else if (physmem_info.info_source == MEM_DETECT_DIAG500_STOR_LIMIT) {
@@ -226,12 +232,16 @@ void detect_physmem_online_ranges(unsigned long max_physmem_end)
} else if (max_physmem_end) {
add_physmem_online_range(0, max_physmem_end);
}
+ boot_debug("Online memory ranges (info source: %s):\n", get_physmem_info_source());
+ for_each_physmem_online_range(i, &start, &end)
+ boot_debug(" online [%d]: 0x%016lx-0x%016lx\n", i, start, end);
}
void physmem_set_usable_limit(unsigned long limit)
{
physmem_info.usable = limit;
physmem_alloc_pos = limit;
+ boot_debug("Usable memory limit: 0x%016lx\n", limit);
}
static void die_oom(unsigned long size, unsigned long align, unsigned long min, unsigned long max)
@@ -241,38 +251,47 @@ static void die_oom(unsigned long size, unsigned long align, unsigned long min,
enum reserved_range_type t;
int i;
- boot_printk("Linux version %s\n", kernel_version);
+ boot_emerg("Linux version %s\n", kernel_version);
if (!is_prot_virt_guest() && early_command_line[0])
- boot_printk("Kernel command line: %s\n", early_command_line);
- boot_printk("Out of memory allocating %lx bytes %lx aligned in range %lx:%lx\n",
- size, align, min, max);
- boot_printk("Reserved memory ranges:\n");
+ boot_emerg("Kernel command line: %s\n", early_command_line);
+ boot_emerg("Out of memory allocating %lu bytes 0x%lx aligned in range %lx:%lx\n",
+ size, align, min, max);
+ boot_emerg("Reserved memory ranges:\n");
for_each_physmem_reserved_range(t, range, &start, &end) {
- boot_printk("%016lx %016lx %s\n", start, end, get_rr_type_name(t));
+ boot_emerg("%016lx %016lx %s\n", start, end, get_rr_type_name(t));
total_reserved_mem += end - start;
}
- boot_printk("Usable online memory ranges (info source: %s [%x]):\n",
- get_physmem_info_source(), physmem_info.info_source);
+ boot_emerg("Usable online memory ranges (info source: %s [%d]):\n",
+ get_physmem_info_source(), physmem_info.info_source);
for_each_physmem_usable_range(i, &start, &end) {
- boot_printk("%016lx %016lx\n", start, end);
+ boot_emerg("%016lx %016lx\n", start, end);
total_mem += end - start;
}
- boot_printk("Usable online memory total: %lx Reserved: %lx Free: %lx\n",
- total_mem, total_reserved_mem,
- total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0);
+ boot_emerg("Usable online memory total: %lu Reserved: %lu Free: %lu\n",
+ total_mem, total_reserved_mem,
+ total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0);
print_stacktrace(current_frame_address());
- boot_printk("\n\n -- System halted\n");
+ boot_emerg(" -- System halted\n");
disabled_wait();
}
-void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size)
+static void _physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size)
{
physmem_info.reserved[type].start = addr;
physmem_info.reserved[type].end = addr + size;
}
+void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size)
+{
+ _physmem_reserve(type, addr, size);
+ boot_debug("%-14s 0x%016lx-0x%016lx %s\n", "Reserve:", addr, addr + size,
+ get_rr_type_name(type));
+}
+
void physmem_free(enum reserved_range_type type)
{
+ boot_debug("%-14s 0x%016lx-0x%016lx %s\n", "Free:", physmem_info.reserved[type].start,
+ physmem_info.reserved[type].end, get_rr_type_name(type));
physmem_info.reserved[type].start = 0;
physmem_info.reserved[type].end = 0;
}
@@ -339,41 +358,73 @@ unsigned long physmem_alloc_range(enum reserved_range_type type, unsigned long s
max = min(max, physmem_alloc_pos);
addr = __physmem_alloc_range(size, align, min, max, 0, NULL, die_on_oom);
if (addr)
- physmem_reserve(type, addr, size);
+ _physmem_reserve(type, addr, size);
+ boot_debug("%-14s 0x%016lx-0x%016lx %s\n", "Alloc range:", addr, addr + size,
+ get_rr_type_name(type));
return addr;
}
-unsigned long physmem_alloc_top_down(enum reserved_range_type type, unsigned long size,
- unsigned long align)
+unsigned long physmem_alloc(enum reserved_range_type type, unsigned long size,
+ unsigned long align, bool die_on_oom)
{
struct reserved_range *range = &physmem_info.reserved[type];
- struct reserved_range *new_range;
+ struct reserved_range *new_range = NULL;
unsigned int ranges_left;
unsigned long addr;
addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos, physmem_alloc_ranges,
- &ranges_left, true);
+ &ranges_left, die_on_oom);
+ if (!addr)
+ return 0;
/* if not a consecutive allocation of the same type or first allocation */
if (range->start != addr + size) {
if (range->end) {
- physmem_alloc_pos = __physmem_alloc_range(
- sizeof(struct reserved_range), 0, 0, physmem_alloc_pos,
- physmem_alloc_ranges, &ranges_left, true);
- new_range = (struct reserved_range *)physmem_alloc_pos;
+ addr = __physmem_alloc_range(sizeof(struct reserved_range), 0, 0,
+ physmem_alloc_pos, physmem_alloc_ranges,
+ &ranges_left, true);
+ new_range = (struct reserved_range *)addr;
+ addr = __physmem_alloc_range(size, align, 0, addr, ranges_left,
+ &ranges_left, die_on_oom);
+ if (!addr)
+ return 0;
*new_range = *range;
range->chain = new_range;
- addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos,
- ranges_left, &ranges_left, true);
}
range->end = addr + size;
}
+ if (type != RR_VMEM) {
+ boot_debug("%-14s 0x%016lx-0x%016lx %-20s align 0x%lx split %d\n", "Alloc topdown:",
+ addr, addr + size, get_rr_type_name(type), align, !!new_range);
+ }
range->start = addr;
physmem_alloc_pos = addr;
physmem_alloc_ranges = ranges_left;
return addr;
}
+unsigned long physmem_alloc_or_die(enum reserved_range_type type, unsigned long size,
+ unsigned long align)
+{
+ return physmem_alloc(type, size, align, true);
+}
+
unsigned long get_physmem_alloc_pos(void)
{
return physmem_alloc_pos;
}
+
+void dump_physmem_reserved(void)
+{
+ struct reserved_range *range;
+ enum reserved_range_type t;
+ unsigned long start, end;
+
+ boot_debug("Reserved memory ranges:\n");
+ for_each_physmem_reserved_range(t, range, &start, &end) {
+ if (end) {
+ boot_debug("%-14s 0x%016lx-0x%016lx @%012lx chain %012lx\n",
+ get_rr_type_name(t), start, end, (unsigned long)range,
+ (unsigned long)range->chain);
+ }
+ }
+}
diff --git a/arch/s390/boot/printk.c b/arch/s390/boot/printk.c
index 35f18f2b936e..b4c66fa667d5 100644
--- a/arch/s390/boot/printk.c
+++ b/arch/s390/boot/printk.c
@@ -5,21 +5,111 @@
#include <linux/ctype.h>
#include <asm/stacktrace.h>
#include <asm/boot_data.h>
+#include <asm/sections.h>
#include <asm/lowcore.h>
#include <asm/setup.h>
#include <asm/sclp.h>
#include <asm/uv.h>
#include "boot.h"
+int boot_console_loglevel = CONFIG_CONSOLE_LOGLEVEL_DEFAULT;
+bool boot_ignore_loglevel;
+char __bootdata(boot_rb)[PAGE_SIZE * 2];
+bool __bootdata(boot_earlyprintk);
+size_t __bootdata(boot_rb_off);
+char __bootdata(bootdebug_filter)[128];
+bool __bootdata(bootdebug);
+
+static void boot_rb_add(const char *str, size_t len)
+{
+ /* leave double '\0' in the end */
+ size_t avail = sizeof(boot_rb) - boot_rb_off - 1;
+
+ /* store strings separated by '\0' */
+ if (len + 1 > avail)
+ boot_rb_off = 0;
+ strcpy(boot_rb + boot_rb_off, str);
+ boot_rb_off += len + 1;
+}
+
+static void print_rb_entry(const char *str)
+{
+ sclp_early_printk(printk_skip_level(str));
+}
+
+static bool debug_messages_printed(void)
+{
+ return boot_earlyprintk && (boot_ignore_loglevel || boot_console_loglevel > LOGLEVEL_DEBUG);
+}
+
+void boot_rb_dump(void)
+{
+ if (debug_messages_printed())
+ return;
+ sclp_early_printk("Boot messages ring buffer:\n");
+ boot_rb_foreach(print_rb_entry);
+}
+
const char hex_asc[] = "0123456789abcdef";
static char *as_hex(char *dst, unsigned long val, int pad)
{
- char *p, *end = p = dst + max(pad, (int)__fls(val | 1) / 4 + 1);
+ char *p = dst + max(pad, (int)__fls(val | 1) / 4 + 1);
- for (*p-- = 0; p >= dst; val >>= 4)
+ for (*p-- = '\0'; p >= dst; val >>= 4)
*p-- = hex_asc[val & 0x0f];
- return end;
+ return dst;
+}
+
+#define MAX_NUMLEN 21
+static char *as_dec(char *buf, unsigned long val, bool is_signed)
+{
+ bool negative = false;
+ char *p = buf + MAX_NUMLEN;
+
+ if (is_signed && (long)val < 0) {
+ val = (val == LONG_MIN ? LONG_MIN : -(long)val);
+ negative = true;
+ }
+
+ *--p = '\0';
+ do {
+ *--p = '0' + (val % 10);
+ val /= 10;
+ } while (val);
+
+ if (negative)
+ *--p = '-';
+ return p;
+}
+
+static ssize_t strpad(char *dst, size_t dst_size, const char *src,
+ int _pad, bool zero_pad, bool decimal)
+{
+ ssize_t len = strlen(src), pad = _pad;
+ char *p = dst;
+
+ if (max(len, abs(pad)) >= dst_size)
+ return -E2BIG;
+
+ if (pad > len) {
+ if (decimal && zero_pad && *src == '-') {
+ *p++ = '-';
+ src++;
+ len--;
+ pad--;
+ }
+ memset(p, zero_pad ? '0' : ' ', pad - len);
+ p += pad - len;
+ }
+ memcpy(p, src, len);
+ p += len;
+ if (pad < 0 && -pad > len) {
+ memset(p, ' ', -pad - len);
+ p += -pad - len;
+ }
+ *p = '\0';
+ return p - dst;
}
static char *symstart(char *p)
@@ -58,35 +148,94 @@ static noinline char *findsym(unsigned long ip, unsigned short *off, unsigned sh
return NULL;
}
-static noinline char *strsym(void *ip)
+#define MAX_SYMLEN 64
+static noinline char *strsym(char *buf, void *ip)
{
- static char buf[64];
unsigned short off;
unsigned short len;
char *p;
p = findsym((unsigned long)ip, &off, &len);
if (p) {
- strncpy(buf, p, sizeof(buf));
+ strncpy(buf, p, MAX_SYMLEN);
/* reserve 15 bytes for offset/len in symbol+0x1234/0x1234 */
- p = buf + strnlen(buf, sizeof(buf) - 15);
+ p = buf + strnlen(buf, MAX_SYMLEN - 15);
strcpy(p, "+0x");
- p = as_hex(p + 3, off, 0);
- strcpy(p, "/0x");
- as_hex(p + 3, len, 0);
+ as_hex(p + 3, off, 0);
+ strcat(p, "/0x");
+ as_hex(p + strlen(p), len, 0);
} else {
as_hex(buf, (unsigned long)ip, 16);
}
return buf;
}
-void boot_printk(const char *fmt, ...)
+static inline int printk_loglevel(const char *buf)
+{
+ if (buf[0] == KERN_SOH_ASCII && buf[1]) {
+ switch (buf[1]) {
+ case '0' ... '7':
+ return buf[1] - '0';
+ }
+ }
+ return MESSAGE_LOGLEVEL_DEFAULT;
+}
+
+static void boot_console_earlyprintk(const char *buf)
+{
+ int level = printk_loglevel(buf);
+
+ /* always print emergency messages */
+ if (level > LOGLEVEL_EMERG && !boot_earlyprintk)
+ return;
+ buf = printk_skip_level(buf);
+ /* print debug messages only when bootdebug is enabled */
+ if (level == LOGLEVEL_DEBUG && (!bootdebug || !bootdebug_filter_match(skip_timestamp(buf))))
+ return;
+ if (boot_ignore_loglevel || level < boot_console_loglevel)
+ sclp_early_printk(buf);
+}
+
+static char *add_timestamp(char *buf)
+{
+#ifdef CONFIG_PRINTK_TIME
+ union tod_clock *boot_clock = (union tod_clock *)&get_lowcore()->boot_clock;
+ unsigned long ns = tod_to_ns(get_tod_clock() - boot_clock->tod);
+ char ts[MAX_NUMLEN];
+
+ *buf++ = '[';
+ buf += strpad(buf, MAX_NUMLEN, as_dec(ts, ns / NSEC_PER_SEC, 0), 5, 0, 0);
+ *buf++ = '.';
+ buf += strpad(buf, MAX_NUMLEN, as_dec(ts, (ns % NSEC_PER_SEC) / NSEC_PER_USEC, 0), 6, 1, 0);
+ *buf++ = ']';
+ *buf++ = ' ';
+#endif
+ return buf;
+}
+
+#define va_arg_len_type(args, lenmod, typemod) \
+ ((lenmod == 'l') ? va_arg(args, typemod long) : \
+ (lenmod == 'h') ? (typemod short)va_arg(args, typemod int) : \
+ (lenmod == 'H') ? (typemod char)va_arg(args, typemod int) : \
+ (lenmod == 'z') ? va_arg(args, typemod long) : \
+ va_arg(args, typemod int))
+
+int boot_printk(const char *fmt, ...)
{
char buf[1024] = { 0 };
char *end = buf + sizeof(buf) - 1; /* make sure buf is 0 terminated */
- unsigned long pad;
- char *p = buf;
+ bool zero_pad, decimal;
+ char *strval, *p = buf;
+ char valbuf[MAX(MAX_SYMLEN, MAX_NUMLEN)];
va_list args;
+ char lenmod;
+ ssize_t len;
+ int pad;
+
+ *p++ = KERN_SOH_ASCII;
+ *p++ = printk_get_level(fmt) ?: '0' + MESSAGE_LOGLEVEL_DEFAULT;
+ p = add_timestamp(p);
+ fmt = printk_skip_level(fmt);
va_start(args, fmt);
for (; p < end && *fmt; fmt++) {
@@ -94,31 +243,56 @@ void boot_printk(const char *fmt, ...)
*p++ = *fmt;
continue;
}
- pad = isdigit(*++fmt) ? simple_strtol(fmt, (char **)&fmt, 10) : 0;
+ if (*++fmt == '%') {
+ *p++ = '%';
+ continue;
+ }
+ zero_pad = (*fmt == '0');
+ pad = simple_strtol(fmt, (char **)&fmt, 10);
+ lenmod = (*fmt == 'h' || *fmt == 'l' || *fmt == 'z') ? *fmt++ : 0;
+ if (lenmod == 'h' && *fmt == 'h') {
+ lenmod = 'H';
+ fmt++;
+ }
+ decimal = false;
switch (*fmt) {
case 's':
- p = buf + strlcat(buf, va_arg(args, char *), sizeof(buf));
+ if (lenmod)
+ goto out;
+ strval = va_arg(args, char *);
+ zero_pad = false;
break;
case 'p':
- if (*++fmt != 'S')
+ if (*++fmt != 'S' || lenmod)
goto out;
- p = buf + strlcat(buf, strsym(va_arg(args, void *)), sizeof(buf));
+ strval = strsym(valbuf, va_arg(args, void *));
+ zero_pad = false;
break;
- case 'l':
- if (*++fmt != 'x' || end - p <= max(sizeof(long) * 2, pad))
- goto out;
- p = as_hex(p, va_arg(args, unsigned long), pad);
+ case 'd':
+ case 'i':
+ strval = as_dec(valbuf, va_arg_len_type(args, lenmod, signed), 1);
+ decimal = true;
+ break;
+ case 'u':
+ strval = as_dec(valbuf, va_arg_len_type(args, lenmod, unsigned), 0);
break;
case 'x':
- if (end - p <= max(sizeof(int) * 2, pad))
- goto out;
- p = as_hex(p, va_arg(args, unsigned int), pad);
+ strval = as_hex(valbuf, va_arg_len_type(args, lenmod, unsigned), 0);
break;
default:
goto out;
}
+ len = strpad(p, end - p, strval, pad, zero_pad, decimal);
+ if (len == -E2BIG)
+ break;
+ p += len;
}
out:
va_end(args);
- sclp_early_printk(buf);
+ len = strlen(buf);
+ if (len) {
+ boot_rb_add(buf, len);
+ boot_console_earlyprintk(buf);
+ }
+ return len;
}
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index e6b06692ddc8..885bd1dd2c82 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#define boot_fmt(fmt) "startup: " fmt
#include <linux/string.h>
#include <linux/elf.h>
#include <asm/page-states.h>
@@ -42,7 +43,8 @@ struct machine_info machine;
void error(char *x)
{
- boot_printk("\n\n%s\n\n -- System halted", x);
+ boot_emerg("%s\n", x);
+ boot_emerg(" -- System halted\n");
disabled_wait();
}
@@ -143,7 +145,7 @@ static void rescue_initrd(unsigned long min, unsigned long max)
return;
old_addr = addr;
physmem_free(RR_INITRD);
- addr = physmem_alloc_top_down(RR_INITRD, size, 0);
+ addr = physmem_alloc_or_die(RR_INITRD, size, 0);
memmove((void *)addr, (void *)old_addr, size);
}
@@ -222,12 +224,16 @@ static void setup_ident_map_size(unsigned long max_physmem_end)
if (oldmem_data.start) {
__kaslr_enabled = 0;
ident_map_size = min(ident_map_size, oldmem_data.size);
+ boot_debug("kdump memory limit: 0x%016lx\n", oldmem_data.size);
} else if (ipl_block_valid && is_ipl_block_dump()) {
__kaslr_enabled = 0;
- if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size)
+ if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size) {
ident_map_size = min(ident_map_size, hsa_size);
+ boot_debug("Stand-alone dump limit: 0x%016lx\n", hsa_size);
+ }
}
#endif
+ boot_debug("Identity map size: 0x%016lx\n", ident_map_size);
}
#define FIXMAP_SIZE round_up(MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE, sizeof(struct lowcore))
@@ -267,6 +273,7 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
BUILD_BUG_ON(!IS_ALIGNED(__NO_KASLR_START_KERNEL, THREAD_SIZE));
BUILD_BUG_ON(__NO_KASLR_END_KERNEL > _REGION1_SIZE);
vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION3_SIZE);
+ boot_debug("vmem size estimated: 0x%016lx\n", vsize);
if (IS_ENABLED(CONFIG_KASAN) || __NO_KASLR_END_KERNEL > _REGION2_SIZE ||
(vsize > _REGION2_SIZE && kaslr_enabled())) {
asce_limit = _REGION1_SIZE;
@@ -290,8 +297,10 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
* otherwise asce_limit and rte_size would have been adjusted.
*/
vmax = adjust_to_uv_max(asce_limit);
+ boot_debug("%d level paging 0x%016lx vmax\n", vmax == _REGION1_SIZE ? 4 : 3, vmax);
#ifdef CONFIG_KASAN
BUILD_BUG_ON(__NO_KASLR_END_KERNEL > KASAN_SHADOW_START);
+ boot_debug("KASAN shadow area: 0x%016lx-0x%016lx\n", KASAN_SHADOW_START, KASAN_SHADOW_END);
/* force vmalloc and modules below kasan shadow */
vmax = min(vmax, KASAN_SHADOW_START);
#endif
@@ -305,19 +314,27 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
pos = 0;
kernel_end = vmax - pos * THREAD_SIZE;
kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE);
+ boot_debug("Randomization range: 0x%016lx-0x%016lx\n", vmax - kaslr_len, vmax);
+ boot_debug("kernel image: 0x%016lx-0x%016lx (kaslr)\n", kernel_start,
+ kernel_size + kernel_size);
} else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) {
kernel_start = round_down(vmax - kernel_size, THREAD_SIZE);
- boot_printk("The kernel base address is forced to %lx\n", kernel_start);
+ boot_debug("kernel image: 0x%016lx-0x%016lx (constrained)\n", kernel_start,
+ kernel_start + kernel_size);
} else {
kernel_start = __NO_KASLR_START_KERNEL;
+ boot_debug("kernel image: 0x%016lx-0x%016lx (nokaslr)\n", kernel_start,
+ kernel_start + kernel_size);
}
__kaslr_offset = kernel_start;
+ boot_debug("__kaslr_offset: 0x%016lx\n", __kaslr_offset);
MODULES_END = round_down(kernel_start, _SEGMENT_SIZE);
MODULES_VADDR = MODULES_END - MODULES_LEN;
VMALLOC_END = MODULES_VADDR;
if (IS_ENABLED(CONFIG_KMSAN))
VMALLOC_END -= MODULES_LEN * 2;
+ boot_debug("modules area: 0x%016lx-0x%016lx\n", MODULES_VADDR, MODULES_END);
/* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
vsize = (VMALLOC_END - FIXMAP_SIZE) / 2;
@@ -329,10 +346,15 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
VMALLOC_END -= vmalloc_size * 2;
}
VMALLOC_START = VMALLOC_END - vmalloc_size;
+ boot_debug("vmalloc area: 0x%016lx-0x%016lx\n", VMALLOC_START, VMALLOC_END);
__memcpy_real_area = round_down(VMALLOC_START - MEMCPY_REAL_SIZE, PAGE_SIZE);
+ boot_debug("memcpy real area: 0x%016lx-0x%016lx\n", __memcpy_real_area,
+ __memcpy_real_area + MEMCPY_REAL_SIZE);
__abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
sizeof(struct lowcore));
+ boot_debug("abs lowcore: 0x%016lx-0x%016lx\n", __abs_lowcore,
+ __abs_lowcore + ABS_LOWCORE_MAP_SIZE);
/* split remaining virtual space between 1:1 mapping & vmemmap array */
pages = __abs_lowcore / (PAGE_SIZE + sizeof(struct page));
@@ -352,8 +374,11 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
BUILD_BUG_ON(MAX_DCSS_ADDR > (1UL << MAX_PHYSMEM_BITS));
max_mappable = max(ident_map_size, MAX_DCSS_ADDR);
max_mappable = min(max_mappable, vmemmap_start);
- if (IS_ENABLED(CONFIG_RANDOMIZE_IDENTITY_BASE))
- __identity_base = round_down(vmemmap_start - max_mappable, rte_size);
+#ifdef CONFIG_RANDOMIZE_IDENTITY_BASE
+ __identity_base = round_down(vmemmap_start - max_mappable, rte_size);
+#endif
+ boot_debug("identity map: 0x%016lx-0x%016lx\n", __identity_base,
+ __identity_base + ident_map_size);
return asce_limit;
}
@@ -412,6 +437,10 @@ void startup_kernel(void)
psw_t psw;
setup_lpp();
+ store_ipl_parmblock();
+ uv_query_info();
+ setup_boot_command_line();
+ parse_boot_command_line();
/*
* Non-randomized kernel physical start address must be _SEGMENT_SIZE
@@ -431,12 +460,8 @@ void startup_kernel(void)
oldmem_data.start = parmarea.oldmem_base;
oldmem_data.size = parmarea.oldmem_size;
- store_ipl_parmblock();
read_ipl_report();
- uv_query_info();
sclp_early_read_info();
- setup_boot_command_line();
- parse_boot_command_line();
detect_facilities();
cmma_init();
sanitize_prot_virt_host();
@@ -526,6 +551,7 @@ void startup_kernel(void)
__kaslr_offset, __kaslr_offset_phys);
kaslr_adjust_got(__kaslr_offset);
setup_vmem(__kaslr_offset, __kaslr_offset + kernel_size, asce_limit);
+ dump_physmem_reserved();
copy_bootdata();
__apply_alternatives((struct alt_instr *)_vmlinux_info.alt_instructions,
(struct alt_instr *)_vmlinux_info.alt_instructions_end,
@@ -542,5 +568,6 @@ void startup_kernel(void)
*/
psw.addr = __kaslr_offset + vmlinux.entry;
psw.mask = PSW_KERNEL_BITS;
+ boot_debug("Starting kernel at: 0x%016lx\n", psw.addr);
__load_psw(psw);
}
diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
index 881a1ece422f..cfca94a8eac4 100644
--- a/arch/s390/boot/vmem.c
+++ b/arch/s390/boot/vmem.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#define boot_fmt(fmt) "vmem: " fmt
#include <linux/sched/task.h>
#include <linux/pgtable.h>
#include <linux/kasan.h>
@@ -13,6 +14,7 @@
#include "decompressor.h"
#include "boot.h"
+#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
struct ctlreg __bootdata_preserved(s390_invalid_asce);
#ifdef CONFIG_PROC_FS
@@ -31,12 +33,42 @@ enum populate_mode {
POPULATE_IDENTITY,
POPULATE_KERNEL,
#ifdef CONFIG_KASAN
+ /* KASAN modes should be last and grouped together, see is_kasan_populate_mode() */
POPULATE_KASAN_MAP_SHADOW,
POPULATE_KASAN_ZERO_SHADOW,
POPULATE_KASAN_SHALLOW
#endif
};
+#define POPULATE_MODE_NAME(t) case POPULATE_ ## t: return #t
+static inline const char *get_populate_mode_name(enum populate_mode t)
+{
+ switch (t) {
+ POPULATE_MODE_NAME(NONE);
+ POPULATE_MODE_NAME(DIRECT);
+ POPULATE_MODE_NAME(LOWCORE);
+ POPULATE_MODE_NAME(ABS_LOWCORE);
+ POPULATE_MODE_NAME(IDENTITY);
+ POPULATE_MODE_NAME(KERNEL);
+#ifdef CONFIG_KASAN
+ POPULATE_MODE_NAME(KASAN_MAP_SHADOW);
+ POPULATE_MODE_NAME(KASAN_ZERO_SHADOW);
+ POPULATE_MODE_NAME(KASAN_SHALLOW);
+#endif
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static bool is_kasan_populate_mode(enum populate_mode mode)
+{
+#ifdef CONFIG_KASAN
+ return mode >= POPULATE_KASAN_MAP_SHADOW;
+#else
+ return false;
+#endif
+}
+
static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode);
#ifdef CONFIG_KASAN
@@ -52,9 +84,12 @@ static pte_t pte_z;
static inline void kasan_populate(unsigned long start, unsigned long end, enum populate_mode mode)
{
- start = PAGE_ALIGN_DOWN(__sha(start));
- end = PAGE_ALIGN(__sha(end));
- pgtable_populate(start, end, mode);
+ unsigned long sha_start = PAGE_ALIGN_DOWN(__sha(start));
+ unsigned long sha_end = PAGE_ALIGN(__sha(end));
+
+ boot_debug("%-17s 0x%016lx-0x%016lx >> 0x%016lx-0x%016lx\n", get_populate_mode_name(mode),
+ start, end, sha_start, sha_end);
+ pgtable_populate(sha_start, sha_end, mode);
}
static void kasan_populate_shadow(unsigned long kernel_start, unsigned long kernel_end)
@@ -200,7 +235,7 @@ static void *boot_crst_alloc(unsigned long val)
unsigned long size = PAGE_SIZE << CRST_ALLOC_ORDER;
unsigned long *table;
- table = (unsigned long *)physmem_alloc_top_down(RR_VMEM, size, size);
+ table = (unsigned long *)physmem_alloc_or_die(RR_VMEM, size, size);
crst_table_init(table, val);
__arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
return table;
@@ -216,7 +251,7 @@ static pte_t *boot_pte_alloc(void)
* during POPULATE_KASAN_MAP_SHADOW when EDAT is off
*/
if (!pte_leftover) {
- pte_leftover = (void *)physmem_alloc_top_down(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
+ pte_leftover = (void *)physmem_alloc_or_die(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
pte = pte_leftover + _PAGE_TABLE_SIZE;
__arch_set_page_dat(pte, 1);
} else {
@@ -228,11 +263,12 @@ static pte_t *boot_pte_alloc(void)
return pte;
}
-static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_mode mode)
+static unsigned long resolve_pa_may_alloc(unsigned long addr, unsigned long size,
+ enum populate_mode mode)
{
switch (mode) {
case POPULATE_NONE:
- return -1;
+ return INVALID_PHYS_ADDR;
case POPULATE_DIRECT:
return addr;
case POPULATE_LOWCORE:
@@ -245,38 +281,64 @@ static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_m
return __identity_pa(addr);
#ifdef CONFIG_KASAN
case POPULATE_KASAN_MAP_SHADOW:
- addr = physmem_alloc_top_down(RR_VMEM, size, size);
- memset((void *)addr, 0, size);
- return addr;
+ /* Allow to fail large page allocations, this will fall back to 1mb/4k pages */
+ addr = physmem_alloc(RR_VMEM, size, size, size == PAGE_SIZE);
+ if (addr) {
+ memset((void *)addr, 0, size);
+ return addr;
+ }
+ return INVALID_PHYS_ADDR;
#endif
default:
- return -1;
+ return INVALID_PHYS_ADDR;
}
}
-static bool large_allowed(enum populate_mode mode)
+static bool large_page_mapping_allowed(enum populate_mode mode)
{
- return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY) || (mode == POPULATE_KERNEL);
+ switch (mode) {
+ case POPULATE_DIRECT:
+ case POPULATE_IDENTITY:
+ case POPULATE_KERNEL:
+#ifdef CONFIG_KASAN
+ case POPULATE_KASAN_MAP_SHADOW:
+#endif
+ return true;
+ default:
+ return false;
+ }
}
-static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end,
- enum populate_mode mode)
+static unsigned long try_get_large_pud_pa(pud_t *pu_dir, unsigned long addr, unsigned long end,
+ enum populate_mode mode)
{
- unsigned long size = end - addr;
+ unsigned long pa, size = end - addr;
+
+ if (!machine.has_edat2 || !large_page_mapping_allowed(mode) ||
+ !IS_ALIGNED(addr, PUD_SIZE) || (size < PUD_SIZE))
+ return INVALID_PHYS_ADDR;
+
+ pa = resolve_pa_may_alloc(addr, size, mode);
+ if (!IS_ALIGNED(pa, PUD_SIZE))
+ return INVALID_PHYS_ADDR;
- return machine.has_edat2 && large_allowed(mode) &&
- IS_ALIGNED(addr, PUD_SIZE) && (size >= PUD_SIZE) &&
- IS_ALIGNED(_pa(addr, size, mode), PUD_SIZE);
+ return pa;
}
-static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end,
- enum populate_mode mode)
+static unsigned long try_get_large_pmd_pa(pmd_t *pm_dir, unsigned long addr, unsigned long end,
+ enum populate_mode mode)
{
- unsigned long size = end - addr;
+ unsigned long pa, size = end - addr;
- return machine.has_edat1 && large_allowed(mode) &&
- IS_ALIGNED(addr, PMD_SIZE) && (size >= PMD_SIZE) &&
- IS_ALIGNED(_pa(addr, size, mode), PMD_SIZE);
+ if (!machine.has_edat1 || !large_page_mapping_allowed(mode) ||
+ !IS_ALIGNED(addr, PMD_SIZE) || (size < PMD_SIZE))
+ return INVALID_PHYS_ADDR;
+
+ pa = resolve_pa_may_alloc(addr, size, mode);
+ if (!IS_ALIGNED(pa, PMD_SIZE))
+ return INVALID_PHYS_ADDR;
+
+ return pa;
}
static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end,
@@ -290,7 +352,7 @@ static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long e
if (pte_none(*pte)) {
if (kasan_pte_populate_zero_shadow(pte, mode))
continue;
- entry = __pte(_pa(addr, PAGE_SIZE, mode));
+ entry = __pte(resolve_pa_may_alloc(addr, PAGE_SIZE, mode));
entry = set_pte_bit(entry, PAGE_KERNEL);
set_pte(pte, entry);
pages++;
@@ -303,7 +365,7 @@ static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long e
static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long end,
enum populate_mode mode)
{
- unsigned long next, pages = 0;
+ unsigned long pa, next, pages = 0;
pmd_t *pmd, entry;
pte_t *pte;
@@ -313,8 +375,9 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
if (pmd_none(*pmd)) {
if (kasan_pmd_populate_zero_shadow(pmd, addr, next, mode))
continue;
- if (can_large_pmd(pmd, addr, next, mode)) {
- entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode));
+ pa = try_get_large_pmd_pa(pmd, addr, next, mode);
+ if (pa != INVALID_PHYS_ADDR) {
+ entry = __pmd(pa);
entry = set_pmd_bit(entry, SEGMENT_KERNEL);
set_pmd(pmd, entry);
pages++;
@@ -334,7 +397,7 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long end,
enum populate_mode mode)
{
- unsigned long next, pages = 0;
+ unsigned long pa, next, pages = 0;
pud_t *pud, entry;
pmd_t *pmd;
@@ -344,8 +407,9 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e
if (pud_none(*pud)) {
if (kasan_pud_populate_zero_shadow(pud, addr, next, mode))
continue;
- if (can_large_pud(pud, addr, next, mode)) {
- entry = __pud(_pa(addr, _REGION3_SIZE, mode));
+ pa = try_get_large_pud_pa(pud, addr, next, mode);
+ if (pa != INVALID_PHYS_ADDR) {
+ entry = __pud(pa);
entry = set_pud_bit(entry, REGION3_KERNEL);
set_pud(pud, entry);
pages++;
@@ -388,6 +452,13 @@ static void pgtable_populate(unsigned long addr, unsigned long end, enum populat
pgd_t *pgd;
p4d_t *p4d;
+ if (!is_kasan_populate_mode(mode)) {
+ boot_debug("%-17s 0x%016lx-0x%016lx -> 0x%016lx-0x%016lx\n",
+ get_populate_mode_name(mode), addr, end,
+ resolve_pa_may_alloc(addr, 0, mode),
+ resolve_pa_may_alloc(end - 1, 0, mode) + 1);
+ }
+
pgd = pgd_offset(&init_mm, addr);
for (; addr < end; addr = next, pgd++) {
next = pgd_addr_end(addr, end);
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index d8d227ab82de..d6beec5292a0 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -770,7 +770,6 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_AEGIS128=m
@@ -782,7 +781,6 @@ CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_CRC32=m
@@ -795,7 +793,6 @@ CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
-CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
@@ -818,7 +815,6 @@ CONFIG_SYSTEM_BLACKLIST_KEYRING=y
CONFIG_CORDIC=m
CONFIG_CRYPTO_LIB_CURVE25519=m
CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
-CONFIG_CRC32_SELFTEST=y
CONFIG_CRC4=m
CONFIG_CRC7=m
CONFIG_CRC8=m
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 6c2f2bb4fbf8..8cfbfb10bba8 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -756,7 +756,6 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_HCTR2=m
-CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_AEGIS128=m
@@ -768,7 +767,6 @@ CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
-CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_CRC32=m
@@ -782,7 +780,6 @@ CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
-CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig
index d3eb3a233693..b760232537f1 100644
--- a/arch/s390/crypto/Kconfig
+++ b/arch/s390/crypto/Kconfig
@@ -2,18 +2,6 @@
menu "Accelerated Cryptographic Algorithms for CPU (s390)"
-config CRYPTO_CRC32_S390
- tristate "CRC32c and CRC32"
- depends on S390
- select CRYPTO_HASH
- select CRC32
- help
- CRC32c and CRC32 CRC algorithms
-
- Architecture: s390
-
- It is available with IBM z13 or later.
-
config CRYPTO_SHA512_S390
tristate "Hash functions: SHA-384 and SHA-512"
depends on S390
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile
index a0cb96937c3d..14dafadbcbed 100644
--- a/arch/s390/crypto/Makefile
+++ b/arch/s390/crypto/Makefile
@@ -14,9 +14,7 @@ obj-$(CONFIG_CRYPTO_PAES_S390) += paes_s390.o
obj-$(CONFIG_CRYPTO_CHACHA_S390) += chacha_s390.o
obj-$(CONFIG_S390_PRNG) += prng.o
obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o
-obj-$(CONFIG_CRYPTO_CRC32_S390) += crc32-vx_s390.o
obj-$(CONFIG_CRYPTO_HMAC_S390) += hmac_s390.o
obj-y += arch_random.o
-crc32-vx_s390-y := crc32-vx.o crc32le-vx.o crc32be-vx.o
chacha_s390-y := chacha-glue.o chacha-s390.o
diff --git a/arch/s390/crypto/crc32-vx.c b/arch/s390/crypto/crc32-vx.c
deleted file mode 100644
index 89a10337e6ea..000000000000
--- a/arch/s390/crypto/crc32-vx.c
+++ /dev/null
@@ -1,306 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Crypto-API module for CRC-32 algorithms implemented with the
- * z/Architecture Vector Extension Facility.
- *
- * Copyright IBM Corp. 2015
- * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
- */
-#define KMSG_COMPONENT "crc32-vx"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
-#include <linux/module.h>
-#include <linux/cpufeature.h>
-#include <linux/crc32.h>
-#include <crypto/internal/hash.h>
-#include <asm/fpu.h>
-#include "crc32-vx.h"
-
-#define CRC32_BLOCK_SIZE 1
-#define CRC32_DIGEST_SIZE 4
-
-#define VX_MIN_LEN 64
-#define VX_ALIGNMENT 16L
-#define VX_ALIGN_MASK (VX_ALIGNMENT - 1)
-
-struct crc_ctx {
- u32 key;
-};
-
-struct crc_desc_ctx {
- u32 crc;
-};
-
-/*
- * DEFINE_CRC32_VX() - Define a CRC-32 function using the vector extension
- *
- * Creates a function to perform a particular CRC-32 computation. Depending
- * on the message buffer, the hardware-accelerated or software implementation
- * is used. Note that the message buffer is aligned to improve fetch
- * operations of VECTOR LOAD MULTIPLE instructions.
- *
- */
-#define DEFINE_CRC32_VX(___fname, ___crc32_vx, ___crc32_sw) \
- static u32 __pure ___fname(u32 crc, \
- unsigned char const *data, size_t datalen) \
- { \
- unsigned long prealign, aligned, remaining; \
- DECLARE_KERNEL_FPU_ONSTACK16(vxstate); \
- \
- if (datalen < VX_MIN_LEN + VX_ALIGN_MASK) \
- return ___crc32_sw(crc, data, datalen); \
- \
- if ((unsigned long)data & VX_ALIGN_MASK) { \
- prealign = VX_ALIGNMENT - \
- ((unsigned long)data & VX_ALIGN_MASK); \
- datalen -= prealign; \
- crc = ___crc32_sw(crc, data, prealign); \
- data = (void *)((unsigned long)data + prealign); \
- } \
- \
- aligned = datalen & ~VX_ALIGN_MASK; \
- remaining = datalen & VX_ALIGN_MASK; \
- \
- kernel_fpu_begin(&vxstate, KERNEL_VXR_LOW); \
- crc = ___crc32_vx(crc, data, aligned); \
- kernel_fpu_end(&vxstate, KERNEL_VXR_LOW); \
- \
- if (remaining) \
- crc = ___crc32_sw(crc, data + aligned, remaining); \
- \
- return crc; \
- }
-
-DEFINE_CRC32_VX(crc32_le_vx, crc32_le_vgfm_16, crc32_le)
-DEFINE_CRC32_VX(crc32_be_vx, crc32_be_vgfm_16, crc32_be)
-DEFINE_CRC32_VX(crc32c_le_vx, crc32c_le_vgfm_16, __crc32c_le)
-
-
-static int crc32_vx_cra_init_zero(struct crypto_tfm *tfm)
-{
- struct crc_ctx *mctx = crypto_tfm_ctx(tfm);
-
- mctx->key = 0;
- return 0;
-}
-
-static int crc32_vx_cra_init_invert(struct crypto_tfm *tfm)
-{
- struct crc_ctx *mctx = crypto_tfm_ctx(tfm);
-
- mctx->key = ~0;
- return 0;
-}
-
-static int crc32_vx_init(struct shash_desc *desc)
-{
- struct crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
- struct crc_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = mctx->key;
- return 0;
-}
-
-static int crc32_vx_setkey(struct crypto_shash *tfm, const u8 *newkey,
- unsigned int newkeylen)
-{
- struct crc_ctx *mctx = crypto_shash_ctx(tfm);
-
- if (newkeylen != sizeof(mctx->key))
- return -EINVAL;
- mctx->key = le32_to_cpu(*(__le32 *)newkey);
- return 0;
-}
-
-static int crc32be_vx_setkey(struct crypto_shash *tfm, const u8 *newkey,
- unsigned int newkeylen)
-{
- struct crc_ctx *mctx = crypto_shash_ctx(tfm);
-
- if (newkeylen != sizeof(mctx->key))
- return -EINVAL;
- mctx->key = be32_to_cpu(*(__be32 *)newkey);
- return 0;
-}
-
-static int crc32le_vx_final(struct shash_desc *desc, u8 *out)
-{
- struct crc_desc_ctx *ctx = shash_desc_ctx(desc);
-
- *(__le32 *)out = cpu_to_le32p(&ctx->crc);
- return 0;
-}
-
-static int crc32be_vx_final(struct shash_desc *desc, u8 *out)
-{
- struct crc_desc_ctx *ctx = shash_desc_ctx(desc);
-
- *(__be32 *)out = cpu_to_be32p(&ctx->crc);
- return 0;
-}
-
-static int crc32c_vx_final(struct shash_desc *desc, u8 *out)
-{
- struct crc_desc_ctx *ctx = shash_desc_ctx(desc);
-
- /*
- * Perform a final XOR with 0xFFFFFFFF to be in sync
- * with the generic crc32c shash implementation.
- */
- *(__le32 *)out = ~cpu_to_le32p(&ctx->crc);
- return 0;
-}
-
-static int __crc32le_vx_finup(u32 *crc, const u8 *data, unsigned int len,
- u8 *out)
-{
- *(__le32 *)out = cpu_to_le32(crc32_le_vx(*crc, data, len));
- return 0;
-}
-
-static int __crc32be_vx_finup(u32 *crc, const u8 *data, unsigned int len,
- u8 *out)
-{
- *(__be32 *)out = cpu_to_be32(crc32_be_vx(*crc, data, len));
- return 0;
-}
-
-static int __crc32c_vx_finup(u32 *crc, const u8 *data, unsigned int len,
- u8 *out)
-{
- /*
- * Perform a final XOR with 0xFFFFFFFF to be in sync
- * with the generic crc32c shash implementation.
- */
- *(__le32 *)out = ~cpu_to_le32(crc32c_le_vx(*crc, data, len));
- return 0;
-}
-
-
-#define CRC32_VX_FINUP(alg, func) \
- static int alg ## _vx_finup(struct shash_desc *desc, const u8 *data, \
- unsigned int datalen, u8 *out) \
- { \
- return __ ## alg ## _vx_finup(shash_desc_ctx(desc), \
- data, datalen, out); \
- }
-
-CRC32_VX_FINUP(crc32le, crc32_le_vx)
-CRC32_VX_FINUP(crc32be, crc32_be_vx)
-CRC32_VX_FINUP(crc32c, crc32c_le_vx)
-
-#define CRC32_VX_DIGEST(alg, func) \
- static int alg ## _vx_digest(struct shash_desc *desc, const u8 *data, \
- unsigned int len, u8 *out) \
- { \
- return __ ## alg ## _vx_finup(crypto_shash_ctx(desc->tfm), \
- data, len, out); \
- }
-
-CRC32_VX_DIGEST(crc32le, crc32_le_vx)
-CRC32_VX_DIGEST(crc32be, crc32_be_vx)
-CRC32_VX_DIGEST(crc32c, crc32c_le_vx)
-
-#define CRC32_VX_UPDATE(alg, func) \
- static int alg ## _vx_update(struct shash_desc *desc, const u8 *data, \
- unsigned int datalen) \
- { \
- struct crc_desc_ctx *ctx = shash_desc_ctx(desc); \
- ctx->crc = func(ctx->crc, data, datalen); \
- return 0; \
- }
-
-CRC32_VX_UPDATE(crc32le, crc32_le_vx)
-CRC32_VX_UPDATE(crc32be, crc32_be_vx)
-CRC32_VX_UPDATE(crc32c, crc32c_le_vx)
-
-
-static struct shash_alg crc32_vx_algs[] = {
- /* CRC-32 LE */
- {
- .init = crc32_vx_init,
- .setkey = crc32_vx_setkey,
- .update = crc32le_vx_update,
- .final = crc32le_vx_final,
- .finup = crc32le_vx_finup,
- .digest = crc32le_vx_digest,
- .descsize = sizeof(struct crc_desc_ctx),
- .digestsize = CRC32_DIGEST_SIZE,
- .base = {
- .cra_name = "crc32",
- .cra_driver_name = "crc32-vx",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CRC32_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crc_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = crc32_vx_cra_init_zero,
- },
- },
- /* CRC-32 BE */
- {
- .init = crc32_vx_init,
- .setkey = crc32be_vx_setkey,
- .update = crc32be_vx_update,
- .final = crc32be_vx_final,
- .finup = crc32be_vx_finup,
- .digest = crc32be_vx_digest,
- .descsize = sizeof(struct crc_desc_ctx),
- .digestsize = CRC32_DIGEST_SIZE,
- .base = {
- .cra_name = "crc32be",
- .cra_driver_name = "crc32be-vx",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CRC32_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crc_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = crc32_vx_cra_init_zero,
- },
- },
- /* CRC-32C LE */
- {
- .init = crc32_vx_init,
- .setkey = crc32_vx_setkey,
- .update = crc32c_vx_update,
- .final = crc32c_vx_final,
- .finup = crc32c_vx_finup,
- .digest = crc32c_vx_digest,
- .descsize = sizeof(struct crc_desc_ctx),
- .digestsize = CRC32_DIGEST_SIZE,
- .base = {
- .cra_name = "crc32c",
- .cra_driver_name = "crc32c-vx",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CRC32_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crc_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = crc32_vx_cra_init_invert,
- },
- },
-};
-
-
-static int __init crc_vx_mod_init(void)
-{
- return crypto_register_shashes(crc32_vx_algs,
- ARRAY_SIZE(crc32_vx_algs));
-}
-
-static void __exit crc_vx_mod_exit(void)
-{
- crypto_unregister_shashes(crc32_vx_algs, ARRAY_SIZE(crc32_vx_algs));
-}
-
-module_cpu_feature_match(S390_CPU_FEATURE_VXRS, crc_vx_mod_init);
-module_exit(crc_vx_mod_exit);
-
-MODULE_AUTHOR("Hendrik Brueckner <brueckner@linux.vnet.ibm.com>");
-MODULE_DESCRIPTION("CRC-32 algorithms using z/Architecture Vector Extension Facility");
-MODULE_LICENSE("GPL");
-
-MODULE_ALIAS_CRYPTO("crc32");
-MODULE_ALIAS_CRYPTO("crc32-vx");
-MODULE_ALIAS_CRYPTO("crc32c");
-MODULE_ALIAS_CRYPTO("crc32c-vx");
diff --git a/arch/s390/include/asm/asm-extable.h b/arch/s390/include/asm/asm-extable.h
index 4a6b0a8b6412..2e829c16fd8a 100644
--- a/arch/s390/include/asm/asm-extable.h
+++ b/arch/s390/include/asm/asm-extable.h
@@ -9,11 +9,11 @@
#define EX_TYPE_NONE 0
#define EX_TYPE_FIXUP 1
#define EX_TYPE_BPF 2
-#define EX_TYPE_UA_STORE 3
-#define EX_TYPE_UA_LOAD_MEM 4
+#define EX_TYPE_UA_FAULT 3
#define EX_TYPE_UA_LOAD_REG 5
#define EX_TYPE_UA_LOAD_REGPAIR 6
#define EX_TYPE_ZEROPAD 7
+#define EX_TYPE_FPC 8
#define EX_DATA_REG_ERR_SHIFT 0
#define EX_DATA_REG_ERR GENMASK(3, 0)
@@ -69,11 +69,8 @@
#define EX_TABLE_AMODE31(_fault, _target) \
__EX_TABLE(.amode31.ex_table, _fault, _target, EX_TYPE_FIXUP, __stringify(%%r0), __stringify(%%r0), 0)
-#define EX_TABLE_UA_STORE(_fault, _target, _regerr) \
- __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_STORE, _regerr, _regerr, 0)
-
-#define EX_TABLE_UA_LOAD_MEM(_fault, _target, _regerr, _regmem, _len) \
- __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_MEM, _regerr, _regmem, _len)
+#define EX_TABLE_UA_FAULT(_fault, _target, _regerr) \
+ __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_FAULT, _regerr, _regerr, 0)
#define EX_TABLE_UA_LOAD_REG(_fault, _target, _regerr, _regzero) \
__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_REG, _regerr, _regzero, 0)
@@ -84,4 +81,7 @@
#define EX_TABLE_ZEROPAD(_fault, _target, _regdata, _regaddr) \
__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_ZEROPAD, _regdata, _regaddr, 0)
+#define EX_TABLE_FPC(_fault, _target) \
+ __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FPC, __stringify(%%r0), __stringify(%%r0), 0)
+
#endif /* __ASM_EXTABLE_H */
diff --git a/arch/s390/include/asm/asm.h b/arch/s390/include/asm/asm.h
index ec011b94af2a..e9062b01e2a2 100644
--- a/arch/s390/include/asm/asm.h
+++ b/arch/s390/include/asm/asm.h
@@ -28,7 +28,7 @@
* [var] also contains the program mask. CC_TRANSFORM() moves the condition
* code to the two least significant bits and sets all other bits to zero.
*/
-#if defined(__GCC_ASM_FLAG_OUTPUTS__) && !(IS_ENABLED(CONFIG_GCC_ASM_FLAG_OUTPUT_BROKEN))
+#if defined(__GCC_ASM_FLAG_OUTPUTS__) && !(IS_ENABLED(CONFIG_CC_ASM_FLAG_OUTPUT_BROKEN))
#define __HAVE_ASM_FLAG_OUTPUTS__
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 15aa64e3020e..d5125296ade2 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -60,7 +60,7 @@ static __always_inline bool arch_test_bit(unsigned long nr, const volatile unsig
asm volatile(
" tm %[addr],%[mask]\n"
: "=@cc" (cc)
- : [addr] "R" (*addr), [mask] "I" (mask)
+ : [addr] "Q" (*addr), [mask] "I" (mask)
);
return cc == 3;
}
diff --git a/arch/s390/include/asm/boot_data.h b/arch/s390/include/asm/boot_data.h
index f7eed27b3220..f55f8227058e 100644
--- a/arch/s390/include/asm/boot_data.h
+++ b/arch/s390/include/asm/boot_data.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_S390_BOOT_DATA_H
+#include <linux/string.h>
#include <asm/setup.h>
#include <asm/ipl.h>
@@ -15,4 +16,54 @@ extern unsigned long ipl_cert_list_size;
extern unsigned long early_ipl_comp_list_addr;
extern unsigned long early_ipl_comp_list_size;
+extern char boot_rb[PAGE_SIZE * 2];
+extern bool boot_earlyprintk;
+extern size_t boot_rb_off;
+extern char bootdebug_filter[128];
+extern bool bootdebug;
+
+#define boot_rb_foreach(cb) \
+ do { \
+ size_t off = boot_rb_off + strlen(boot_rb + boot_rb_off) + 1; \
+ size_t len; \
+ for (; off < sizeof(boot_rb) && (len = strlen(boot_rb + off)); off += len + 1) \
+ cb(boot_rb + off); \
+ for (off = 0; off < boot_rb_off && (len = strlen(boot_rb + off)); off += len + 1) \
+ cb(boot_rb + off); \
+ } while (0)
+
+/*
+ * bootdebug_filter is a comma separated list of strings,
+ * where each string can be a prefix of the message.
+ */
+static inline bool bootdebug_filter_match(const char *buf)
+{
+ char *p = bootdebug_filter, *s;
+ char *end;
+
+ if (!*p)
+ return true;
+
+ end = p + strlen(p);
+ while (p < end) {
+ p = skip_spaces(p);
+ s = memscan(p, ',', end - p);
+ if (!strncmp(p, buf, s - p))
+ return true;
+ p = s + 1;
+ }
+ return false;
+}
+
+static inline const char *skip_timestamp(const char *buf)
+{
+#ifdef CONFIG_PRINTK_TIME
+ const char *p = memchr(buf, ']', strlen(buf));
+
+ if (p && p[1] == ' ')
+ return p + 2;
+#endif
+ return buf;
+}
+
#endif /* _ASM_S390_BOOT_DATA_H */
diff --git a/arch/s390/include/asm/fpu-insn.h b/arch/s390/include/asm/fpu-insn.h
index de510c9f6efa..f668bffd6dd3 100644
--- a/arch/s390/include/asm/fpu-insn.h
+++ b/arch/s390/include/asm/fpu-insn.h
@@ -100,19 +100,12 @@ static __always_inline void fpu_lfpc(unsigned int *fpc)
*/
static inline void fpu_lfpc_safe(unsigned int *fpc)
{
- u32 tmp;
-
instrument_read(fpc, sizeof(*fpc));
asm_inline volatile(
- "0: lfpc %[fpc]\n"
- "1: nopr %%r7\n"
- ".pushsection .fixup, \"ax\"\n"
- "2: lghi %[tmp],0\n"
- " sfpc %[tmp]\n"
- " jg 1b\n"
- ".popsection\n"
- EX_TABLE(1b, 2b)
- : [tmp] "=d" (tmp)
+ " lfpc %[fpc]\n"
+ "0: nopr %%r7\n"
+ EX_TABLE_FPC(0b, 0b)
+ :
: [fpc] "Q" (*fpc)
: "memory");
}
@@ -183,7 +176,19 @@ static __always_inline void fpu_vgfmg(u8 v1, u8 v2, u8 v3)
: "memory");
}
-#ifdef CONFIG_CC_IS_CLANG
+#ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS
+
+static __always_inline void fpu_vl(u8 v1, const void *vxr)
+{
+ instrument_read(vxr, sizeof(__vector128));
+ asm volatile("VL %[v1],%O[vxr],,%R[vxr]\n"
+ :
+ : [vxr] "Q" (*(__vector128 *)vxr),
+ [v1] "I" (v1)
+ : "memory");
+}
+
+#else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
static __always_inline void fpu_vl(u8 v1, const void *vxr)
{
@@ -197,19 +202,7 @@ static __always_inline void fpu_vl(u8 v1, const void *vxr)
: "memory", "1");
}
-#else /* CONFIG_CC_IS_CLANG */
-
-static __always_inline void fpu_vl(u8 v1, const void *vxr)
-{
- instrument_read(vxr, sizeof(__vector128));
- asm volatile("VL %[v1],%O[vxr],,%R[vxr]\n"
- :
- : [vxr] "Q" (*(__vector128 *)vxr),
- [v1] "I" (v1)
- : "memory");
-}
-
-#endif /* CONFIG_CC_IS_CLANG */
+#endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
static __always_inline void fpu_vleib(u8 v, s16 val, u8 index)
{
@@ -238,7 +231,7 @@ static __always_inline u64 fpu_vlgvf(u8 v, u16 index)
return val;
}
-#ifdef CONFIG_CC_IS_CLANG
+#ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS
static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
{
@@ -246,17 +239,15 @@ static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
size = min(index + 1, sizeof(__vector128));
instrument_read(vxr, size);
- asm volatile(
- " la 1,%[vxr]\n"
- " VLL %[v1],%[index],0,1\n"
- :
- : [vxr] "R" (*(u8 *)vxr),
- [index] "d" (index),
- [v1] "I" (v1)
- : "memory", "1");
+ asm volatile("VLL %[v1],%[index],%O[vxr],%R[vxr]\n"
+ :
+ : [vxr] "Q" (*(u8 *)vxr),
+ [index] "d" (index),
+ [v1] "I" (v1)
+ : "memory");
}
-#else /* CONFIG_CC_IS_CLANG */
+#else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
{
@@ -264,17 +255,19 @@ static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
size = min(index + 1, sizeof(__vector128));
instrument_read(vxr, size);
- asm volatile("VLL %[v1],%[index],%O[vxr],%R[vxr]\n"
- :
- : [vxr] "Q" (*(u8 *)vxr),
- [index] "d" (index),
- [v1] "I" (v1)
- : "memory");
+ asm volatile(
+ " la 1,%[vxr]\n"
+ " VLL %[v1],%[index],0,1\n"
+ :
+ : [vxr] "R" (*(u8 *)vxr),
+ [index] "d" (index),
+ [v1] "I" (v1)
+ : "memory", "1");
}
-#endif /* CONFIG_CC_IS_CLANG */
+#endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
-#ifdef CONFIG_CC_IS_CLANG
+#ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS
#define fpu_vlm(_v1, _v3, _vxrs) \
({ \
@@ -284,17 +277,15 @@ static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
} *_v = (void *)(_vxrs); \
\
instrument_read(_v, size); \
- asm volatile( \
- " la 1,%[vxrs]\n" \
- " VLM %[v1],%[v3],0,1\n" \
- : \
- : [vxrs] "R" (*_v), \
- [v1] "I" (_v1), [v3] "I" (_v3) \
- : "memory", "1"); \
+ asm volatile("VLM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \
+ : \
+ : [vxrs] "Q" (*_v), \
+ [v1] "I" (_v1), [v3] "I" (_v3) \
+ : "memory"); \
(_v3) - (_v1) + 1; \
})
-#else /* CONFIG_CC_IS_CLANG */
+#else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
#define fpu_vlm(_v1, _v3, _vxrs) \
({ \
@@ -304,15 +295,17 @@ static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
} *_v = (void *)(_vxrs); \
\
instrument_read(_v, size); \
- asm volatile("VLM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \
- : \
- : [vxrs] "Q" (*_v), \
- [v1] "I" (_v1), [v3] "I" (_v3) \
- : "memory"); \
+ asm volatile( \
+ " la 1,%[vxrs]\n" \
+ " VLM %[v1],%[v3],0,1\n" \
+ : \
+ : [vxrs] "R" (*_v), \
+ [v1] "I" (_v1), [v3] "I" (_v3) \
+ : "memory", "1"); \
(_v3) - (_v1) + 1; \
})
-#endif /* CONFIG_CC_IS_CLANG */
+#endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
static __always_inline void fpu_vlr(u8 v1, u8 v2)
{
@@ -362,7 +355,18 @@ static __always_inline void fpu_vsrlb(u8 v1, u8 v2, u8 v3)
: "memory");
}
-#ifdef CONFIG_CC_IS_CLANG
+#ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS
+
+static __always_inline void fpu_vst(u8 v1, const void *vxr)
+{
+ instrument_write(vxr, sizeof(__vector128));
+ asm volatile("VST %[v1],%O[vxr],,%R[vxr]\n"
+ : [vxr] "=Q" (*(__vector128 *)vxr)
+ : [v1] "I" (v1)
+ : "memory");
+}
+
+#else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
static __always_inline void fpu_vst(u8 v1, const void *vxr)
{
@@ -375,20 +379,23 @@ static __always_inline void fpu_vst(u8 v1, const void *vxr)
: "memory", "1");
}
-#else /* CONFIG_CC_IS_CLANG */
+#endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
-static __always_inline void fpu_vst(u8 v1, const void *vxr)
+#ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS
+
+static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
{
- instrument_write(vxr, sizeof(__vector128));
- asm volatile("VST %[v1],%O[vxr],,%R[vxr]\n"
- : [vxr] "=Q" (*(__vector128 *)vxr)
- : [v1] "I" (v1)
+ unsigned int size;
+
+ size = min(index + 1, sizeof(__vector128));
+ instrument_write(vxr, size);
+ asm volatile("VSTL %[v1],%[index],%O[vxr],%R[vxr]\n"
+ : [vxr] "=Q" (*(u8 *)vxr)
+ : [index] "d" (index), [v1] "I" (v1)
: "memory");
}
-#endif /* CONFIG_CC_IS_CLANG */
-
-#ifdef CONFIG_CC_IS_CLANG
+#else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
{
@@ -404,23 +411,9 @@ static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
: "memory", "1");
}
-#else /* CONFIG_CC_IS_CLANG */
+#endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
-static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
-{
- unsigned int size;
-
- size = min(index + 1, sizeof(__vector128));
- instrument_write(vxr, size);
- asm volatile("VSTL %[v1],%[index],%O[vxr],%R[vxr]\n"
- : [vxr] "=Q" (*(u8 *)vxr)
- : [index] "d" (index), [v1] "I" (v1)
- : "memory");
-}
-
-#endif /* CONFIG_CC_IS_CLANG */
-
-#ifdef CONFIG_CC_IS_CLANG
+#ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS
#define fpu_vstm(_v1, _v3, _vxrs) \
({ \
@@ -430,16 +423,14 @@ static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
} *_v = (void *)(_vxrs); \
\
instrument_write(_v, size); \
- asm volatile( \
- " la 1,%[vxrs]\n" \
- " VSTM %[v1],%[v3],0,1\n" \
- : [vxrs] "=R" (*_v) \
- : [v1] "I" (_v1), [v3] "I" (_v3) \
- : "memory", "1"); \
+ asm volatile("VSTM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \
+ : [vxrs] "=Q" (*_v) \
+ : [v1] "I" (_v1), [v3] "I" (_v3) \
+ : "memory"); \
(_v3) - (_v1) + 1; \
})
-#else /* CONFIG_CC_IS_CLANG */
+#else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
#define fpu_vstm(_v1, _v3, _vxrs) \
({ \
@@ -449,14 +440,16 @@ static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
} *_v = (void *)(_vxrs); \
\
instrument_write(_v, size); \
- asm volatile("VSTM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \
- : [vxrs] "=Q" (*_v) \
- : [v1] "I" (_v1), [v3] "I" (_v3) \
- : "memory"); \
+ asm volatile( \
+ " la 1,%[vxrs]\n" \
+ " VSTM %[v1],%[v3],0,1\n" \
+ : [vxrs] "=R" (*_v) \
+ : [v1] "I" (_v1), [v3] "I" (_v3) \
+ : "memory", "1"); \
(_v3) - (_v1) + 1; \
})
-#endif /* CONFIG_CC_IS_CLANG */
+#endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
static __always_inline void fpu_vupllf(u8 v1, u8 v2)
{
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index a3b73a4f626e..185331e91f83 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -51,6 +51,7 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr;
}
+#define ftrace_get_symaddr(fentry_ip) ((unsigned long)(fentry_ip))
#include <linux/ftrace_regs.h>
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index 752a2310f0d6..f5781794356b 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -2,80 +2,95 @@
#ifndef _ASM_S390_FUTEX_H
#define _ASM_S390_FUTEX_H
+#include <linux/instrumented.h>
#include <linux/uaccess.h>
#include <linux/futex.h>
#include <asm/asm-extable.h>
#include <asm/mmu_context.h>
#include <asm/errno.h>
-#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
- asm volatile( \
- " sacf 256\n" \
- "0: l %1,0(%6)\n" \
- "1:"insn \
- "2: cs %1,%2,0(%6)\n" \
- "3: jl 1b\n" \
- " lhi %0,0\n" \
- "4: sacf 768\n" \
- EX_TABLE(0b,4b) EX_TABLE(1b,4b) \
- EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
- : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
- "=m" (*uaddr) \
- : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
- "m" (*uaddr) : "cc");
+#define FUTEX_OP_FUNC(name, insn) \
+static uaccess_kmsan_or_inline int \
+__futex_atomic_##name(int oparg, int *old, u32 __user *uaddr) \
+{ \
+ int rc, new; \
+ \
+ instrument_copy_from_user_before(old, uaddr, sizeof(*old)); \
+ asm_inline volatile( \
+ " sacf 256\n" \
+ "0: l %[old],%[uaddr]\n" \
+ "1:"insn \
+ "2: cs %[old],%[new],%[uaddr]\n" \
+ "3: jl 1b\n" \
+ " lhi %[rc],0\n" \
+ "4: sacf 768\n" \
+ EX_TABLE_UA_FAULT(0b, 4b, %[rc]) \
+ EX_TABLE_UA_FAULT(1b, 4b, %[rc]) \
+ EX_TABLE_UA_FAULT(2b, 4b, %[rc]) \
+ EX_TABLE_UA_FAULT(3b, 4b, %[rc]) \
+ : [rc] "=d" (rc), [old] "=&d" (*old), \
+ [new] "=&d" (new), [uaddr] "+Q" (*uaddr) \
+ : [oparg] "d" (oparg) \
+ : "cc"); \
+ if (!rc) \
+ instrument_copy_from_user_after(old, uaddr, sizeof(*old), 0); \
+ return rc; \
+}
+
+FUTEX_OP_FUNC(set, "lr %[new],%[oparg]\n")
+FUTEX_OP_FUNC(add, "lr %[new],%[old]\n ar %[new],%[oparg]\n")
+FUTEX_OP_FUNC(or, "lr %[new],%[old]\n or %[new],%[oparg]\n")
+FUTEX_OP_FUNC(and, "lr %[new],%[old]\n nr %[new],%[oparg]\n")
+FUTEX_OP_FUNC(xor, "lr %[new],%[old]\n xr %[new],%[oparg]\n")
-static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
- u32 __user *uaddr)
+static inline
+int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int oldval = 0, newval, ret;
+ int old, rc;
switch (op) {
case FUTEX_OP_SET:
- __futex_atomic_op("lr %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
+ rc = __futex_atomic_set(oparg, &old, uaddr);
break;
case FUTEX_OP_ADD:
- __futex_atomic_op("lr %2,%1\nar %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
+ rc = __futex_atomic_add(oparg, &old, uaddr);
break;
case FUTEX_OP_OR:
- __futex_atomic_op("lr %2,%1\nor %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
+ rc = __futex_atomic_or(oparg, &old, uaddr);
break;
case FUTEX_OP_ANDN:
- __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
- ret, oldval, newval, uaddr, ~oparg);
+ rc = __futex_atomic_and(~oparg, &old, uaddr);
break;
case FUTEX_OP_XOR:
- __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
+ rc = __futex_atomic_xor(oparg, &old, uaddr);
break;
default:
- ret = -ENOSYS;
+ rc = -ENOSYS;
}
-
- if (!ret)
- *oval = oldval;
-
- return ret;
+ if (!rc)
+ *oval = old;
+ return rc;
}
-static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
+static uaccess_kmsan_or_inline
+int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval)
{
- int ret;
+ int rc;
- asm volatile(
- " sacf 256\n"
- "0: cs %1,%4,0(%5)\n"
- "1: la %0,0\n"
- "2: sacf 768\n"
- EX_TABLE(0b,2b) EX_TABLE(1b,2b)
- : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
- : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
+ instrument_copy_from_user_before(uval, uaddr, sizeof(*uval));
+ asm_inline volatile(
+ " sacf 256\n"
+ "0: cs %[old],%[new],%[uaddr]\n"
+ "1: lhi %[rc],0\n"
+ "2: sacf 768\n"
+ EX_TABLE_UA_FAULT(0b, 2b, %[rc])
+ EX_TABLE_UA_FAULT(1b, 2b, %[rc])
+ : [rc] "=d" (rc), [old] "+d" (oldval), [uaddr] "+Q" (*uaddr)
+ : [new] "d" (newval)
: "cc", "memory");
*uval = oldval;
- return ret;
+ instrument_copy_from_user_after(uval, uaddr, sizeof(*uval), 0);
+ return rc;
}
#endif /* _ASM_S390_FUTEX_H */
diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
index 13f51a6a5bb1..4e73ef46d4b2 100644
--- a/arch/s390/include/asm/gmap.h
+++ b/arch/s390/include/asm/gmap.h
@@ -23,7 +23,6 @@
/**
* struct gmap_struct - guest address space
* @list: list head for the mm->context gmap list
- * @crst_list: list of all crst tables used in the guest address space
* @mm: pointer to the parent mm_struct
* @guest_to_host: radix tree with guest to host address translation
* @host_to_guest: radix tree with pointer to segment table entries
@@ -35,7 +34,6 @@
* @guest_handle: protected virtual machine handle for the ultravisor
* @host_to_rmap: radix tree with gmap_rmap lists
* @children: list of shadow gmap structures
- * @pt_list: list of all page tables used in the shadow guest address space
* @shadow_lock: spinlock to protect the shadow gmap list
* @parent: pointer to the parent gmap for shadow guest address spaces
* @orig_asce: ASCE for which the shadow page table has been created
@@ -45,7 +43,6 @@
*/
struct gmap {
struct list_head list;
- struct list_head crst_list;
struct mm_struct *mm;
struct radix_tree_root guest_to_host;
struct radix_tree_root host_to_guest;
@@ -61,7 +58,6 @@ struct gmap {
/* Additional data for shadow guest address spaces */
struct radix_tree_root host_to_rmap;
struct list_head children;
- struct list_head pt_list;
spinlock_t shadow_lock;
struct gmap *parent;
unsigned long orig_asce;
@@ -106,23 +102,21 @@ struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit);
void gmap_remove(struct gmap *gmap);
struct gmap *gmap_get(struct gmap *gmap);
void gmap_put(struct gmap *gmap);
+void gmap_free(struct gmap *gmap);
+struct gmap *gmap_alloc(unsigned long limit);
int gmap_map_segment(struct gmap *gmap, unsigned long from,
unsigned long to, unsigned long len);
int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
unsigned long __gmap_translate(struct gmap *, unsigned long gaddr);
-unsigned long gmap_translate(struct gmap *, unsigned long gaddr);
int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr);
-int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags);
void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
void __gmap_zap(struct gmap *, unsigned long gaddr);
void gmap_unlink(struct mm_struct *, unsigned long *table, unsigned long vmaddr);
int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val);
-struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
- int edat_level);
-int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level);
+void gmap_unshadow(struct gmap *sg);
int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
int fake);
int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
@@ -131,24 +125,22 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
int fake);
int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
int fake);
-int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
- unsigned long *pgt, int *dat_protection, int *fake);
int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte);
void gmap_register_pte_notifier(struct gmap_notifier *);
void gmap_unregister_pte_notifier(struct gmap_notifier *);
-int gmap_mprotect_notify(struct gmap *, unsigned long start,
- unsigned long len, int prot);
+int gmap_protect_one(struct gmap *gmap, unsigned long gaddr, int prot, unsigned long bits);
void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
unsigned long gaddr, unsigned long vmaddr);
int s390_disable_cow_sharing(void);
-void s390_unlist_old_asce(struct gmap *gmap);
int s390_replace_asce(struct gmap *gmap);
void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns);
int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start,
unsigned long end, bool interruptible);
+int kvm_s390_wiggle_split_folio(struct mm_struct *mm, struct folio *folio, bool split);
+unsigned long *gmap_table_walk(struct gmap *gmap, unsigned long gaddr, int level);
/**
* s390_uv_destroy_range - Destroy a range of pages in the given mm.
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 97c7c8127543..9a367866cab0 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -30,6 +30,8 @@
#define KVM_S390_ESCA_CPU_SLOTS 248
#define KVM_MAX_VCPUS 255
+#define KVM_INTERNAL_MEM_SLOTS 1
+
/*
* These seem to be used for allocating ->chip in the routing table, which we
* don't use. 1 is as small as we can get to reduce the needed memory. If we
@@ -931,12 +933,14 @@ struct sie_page2 {
u8 reserved928[0x1000 - 0x928]; /* 0x0928 */
};
+struct vsie_page;
+
struct kvm_s390_vsie {
struct mutex mutex;
struct radix_tree_root addr_to_page;
int page_count;
int next;
- struct page *pages[KVM_MAX_VCPUS];
+ struct vsie_page *pages[KVM_MAX_VCPUS];
};
struct kvm_s390_gisa_iam {
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 4f43cdd9835b..1ff145f7b52b 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -184,7 +184,11 @@ extern struct vm_layout vm_layout;
#define __kaslr_offset vm_layout.kaslr_offset
#define __kaslr_offset_phys vm_layout.kaslr_offset_phys
+#ifdef CONFIG_RANDOMIZE_IDENTITY_BASE
#define __identity_base vm_layout.identity_base
+#else
+#define __identity_base 0UL
+#endif
#define ident_map_size vm_layout.identity_size
static inline unsigned long kaslr_offset(void)
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 7b84ef6dc4b6..b19b6ed2ab53 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -53,29 +53,42 @@ static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
{
unsigned long *table = crst_table_alloc(mm);
- if (table)
- crst_table_init(table, _REGION2_ENTRY_EMPTY);
+ if (!table)
+ return NULL;
+ crst_table_init(table, _REGION2_ENTRY_EMPTY);
+ pagetable_p4d_ctor(virt_to_ptdesc(table));
+
return (p4d_t *) table;
}
static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
{
- if (!mm_p4d_folded(mm))
- crst_table_free(mm, (unsigned long *) p4d);
+ if (mm_p4d_folded(mm))
+ return;
+
+ pagetable_dtor(virt_to_ptdesc(p4d));
+ crst_table_free(mm, (unsigned long *) p4d);
}
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
{
unsigned long *table = crst_table_alloc(mm);
- if (table)
- crst_table_init(table, _REGION3_ENTRY_EMPTY);
+
+ if (!table)
+ return NULL;
+ crst_table_init(table, _REGION3_ENTRY_EMPTY);
+ pagetable_pud_ctor(virt_to_ptdesc(table));
+
return (pud_t *) table;
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
- if (!mm_pud_folded(mm))
- crst_table_free(mm, (unsigned long *) pud);
+ if (mm_pud_folded(mm))
+ return;
+
+ pagetable_dtor(virt_to_ptdesc(pud));
+ crst_table_free(mm, (unsigned long *) pud);
}
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
@@ -96,7 +109,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
if (mm_pmd_folded(mm))
return;
- pagetable_pmd_dtor(virt_to_ptdesc(pmd));
+ pagetable_dtor(virt_to_ptdesc(pmd));
crst_table_free(mm, (unsigned long *) pmd);
}
@@ -117,11 +130,18 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- return (pgd_t *) crst_table_alloc(mm);
+ unsigned long *table = crst_table_alloc(mm);
+
+ if (!table)
+ return NULL;
+ pagetable_pgd_ctor(virt_to_ptdesc(table));
+
+ return (pgd_t *) table;
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
+ pagetable_dtor(virt_to_ptdesc(pgd));
crst_table_free(mm, (unsigned long *) pgd);
}
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index a3b51056a177..3ca5af4cfe43 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -420,9 +420,10 @@ void setup_protection_map(void);
#define PGSTE_HC_BIT 0x0020000000000000UL
#define PGSTE_GR_BIT 0x0004000000000000UL
#define PGSTE_GC_BIT 0x0002000000000000UL
-#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
-#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
-#define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
+#define PGSTE_ST2_MASK 0x0000ffff00000000UL
+#define PGSTE_UC_BIT 0x0000000000008000UL /* user dirty (migration) */
+#define PGSTE_IN_BIT 0x0000000000004000UL /* IPTE notify bit */
+#define PGSTE_VSIE_BIT 0x0000000000002000UL /* ref'd in a shadow table */
/* Guest Page State used for virtualization */
#define _PGSTE_GPS_ZERO 0x0000000080000000UL
@@ -2007,4 +2008,18 @@ extern void s390_reset_cmma(struct mm_struct *mm);
#define pmd_pgtable(pmd) \
((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
+static inline unsigned long gmap_pgste_get_pgt_addr(unsigned long *pgt)
+{
+ unsigned long *pgstes, res;
+
+ pgstes = pgt + _PAGE_ENTRIES;
+
+ res = (pgstes[0] & PGSTE_ST2_MASK) << 16;
+ res |= pgstes[1] & PGSTE_ST2_MASK;
+ res |= (pgstes[2] & PGSTE_ST2_MASK) >> 16;
+ res |= (pgstes[3] & PGSTE_ST2_MASK) >> 32;
+
+ return res;
+}
+
#endif /* _S390_PAGE_H */
diff --git a/arch/s390/include/asm/physmem_info.h b/arch/s390/include/asm/physmem_info.h
index 51b68a43e195..7ef3bbec98b0 100644
--- a/arch/s390/include/asm/physmem_info.h
+++ b/arch/s390/include/asm/physmem_info.h
@@ -26,7 +26,7 @@ enum reserved_range_type {
RR_AMODE31,
RR_IPLREPORT,
RR_CERT_COMP_LIST,
- RR_MEM_DETECT_EXTENDED,
+ RR_MEM_DETECT_EXT,
RR_VMEM,
RR_MAX
};
@@ -128,7 +128,7 @@ static inline const char *get_rr_type_name(enum reserved_range_type t)
RR_TYPE_NAME(AMODE31);
RR_TYPE_NAME(IPLREPORT);
RR_TYPE_NAME(CERT_COMP_LIST);
- RR_TYPE_NAME(MEM_DETECT_EXTENDED);
+ RR_TYPE_NAME(MEM_DETECT_EXT);
RR_TYPE_NAME(VMEM);
default:
return "UNKNOWN";
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 4da3b2956285..18f37dff03c9 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -172,6 +172,7 @@ void sclp_early_printk(const char *s);
void __sclp_early_printk(const char *s, unsigned int len);
void sclp_emergency_printk(const char *s);
+int sclp_init(void);
int sclp_early_get_memsize(unsigned long *mem);
int sclp_early_get_hsa_size(unsigned long *hsa_size);
int _sclp_get_core_info(struct sclp_core_info *info);
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index ea150ea83e57..72655fd2d867 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -22,7 +22,6 @@
* Pages used for the page tables is a different story. FIXME: more
*/
-void __tlb_remove_table(void *_table);
static inline void tlb_flush(struct mmu_gather *tlb);
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, bool delay_rmap, int page_size);
@@ -87,7 +86,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
tlb->cleared_pmds = 1;
if (mm_alloc_pgste(tlb->mm))
gmap_unlink(tlb->mm, (unsigned long *)pte, address);
- tlb_remove_ptdesc(tlb, pte);
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(pte));
}
/*
@@ -102,12 +101,11 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
{
if (mm_pmd_folded(tlb->mm))
return;
- pagetable_pmd_dtor(virt_to_ptdesc(pmd));
__tlb_adjust_range(tlb, address, PAGE_SIZE);
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
tlb->cleared_puds = 1;
- tlb_remove_ptdesc(tlb, pmd);
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd));
}
/*
@@ -125,7 +123,7 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
__tlb_adjust_range(tlb, address, PAGE_SIZE);
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
- tlb_remove_ptdesc(tlb, p4d);
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
}
/*
@@ -144,7 +142,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
tlb->cleared_p4ds = 1;
- tlb_remove_ptdesc(tlb, pud);
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(pud));
}
#endif /* _S390_TLB_H */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index a81f897a81ce..f5920163ee97 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -22,16 +22,117 @@
void debug_user_asce(int exit);
-unsigned long __must_check
-raw_copy_from_user(void *to, const void __user *from, unsigned long n);
+union oac {
+ unsigned int val;
+ struct {
+ struct {
+ unsigned short key : 4;
+ unsigned short : 4;
+ unsigned short as : 2;
+ unsigned short : 4;
+ unsigned short k : 1;
+ unsigned short a : 1;
+ } oac1;
+ struct {
+ unsigned short key : 4;
+ unsigned short : 4;
+ unsigned short as : 2;
+ unsigned short : 4;
+ unsigned short k : 1;
+ unsigned short a : 1;
+ } oac2;
+ };
+};
-unsigned long __must_check
-raw_copy_to_user(void __user *to, const void *from, unsigned long n);
+static __always_inline __must_check unsigned long
+raw_copy_from_user_key(void *to, const void __user *from, unsigned long size, unsigned long key)
+{
+ unsigned long rem;
+ union oac spec = {
+ .oac2.key = key,
+ .oac2.as = PSW_BITS_AS_SECONDARY,
+ .oac2.k = 1,
+ .oac2.a = 1,
+ };
-#ifndef CONFIG_KASAN
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
-#endif
+ asm_inline volatile(
+ " lr %%r0,%[spec]\n"
+ "0: mvcos 0(%[to]),0(%[from]),%[size]\n"
+ "1: jz 5f\n"
+ " algr %[size],%[val]\n"
+ " slgr %[from],%[val]\n"
+ " slgr %[to],%[val]\n"
+ " j 0b\n"
+ "2: la %[rem],4095(%[from])\n" /* rem = from + 4095 */
+ " nr %[rem],%[val]\n" /* rem = (from + 4095) & -4096 */
+ " slgr %[rem],%[from]\n"
+ " clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
+ " jnh 6f\n"
+ "3: mvcos 0(%[to]),0(%[from]),%[rem]\n"
+ "4: slgr %[size],%[rem]\n"
+ " j 6f\n"
+ "5: lghi %[size],0\n"
+ "6:\n"
+ EX_TABLE(0b, 2b)
+ EX_TABLE(1b, 2b)
+ EX_TABLE(3b, 6b)
+ EX_TABLE(4b, 6b)
+ : [size] "+&a" (size), [from] "+&a" (from), [to] "+&a" (to), [rem] "=&a" (rem)
+ : [val] "a" (-4096UL), [spec] "d" (spec.val)
+ : "cc", "memory", "0");
+ return size;
+}
+
+static __always_inline __must_check unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ return raw_copy_from_user_key(to, from, n, 0);
+}
+
+static __always_inline __must_check unsigned long
+raw_copy_to_user_key(void __user *to, const void *from, unsigned long size, unsigned long key)
+{
+ unsigned long rem;
+ union oac spec = {
+ .oac1.key = key,
+ .oac1.as = PSW_BITS_AS_SECONDARY,
+ .oac1.k = 1,
+ .oac1.a = 1,
+ };
+
+ asm_inline volatile(
+ " lr %%r0,%[spec]\n"
+ "0: mvcos 0(%[to]),0(%[from]),%[size]\n"
+ "1: jz 5f\n"
+ " algr %[size],%[val]\n"
+ " slgr %[to],%[val]\n"
+ " slgr %[from],%[val]\n"
+ " j 0b\n"
+ "2: la %[rem],4095(%[to])\n" /* rem = to + 4095 */
+ " nr %[rem],%[val]\n" /* rem = (to + 4095) & -4096 */
+ " slgr %[rem],%[to]\n"
+ " clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
+ " jnh 6f\n"
+ "3: mvcos 0(%[to]),0(%[from]),%[rem]\n"
+ "4: slgr %[size],%[rem]\n"
+ " j 6f\n"
+ "5: lghi %[size],0\n"
+ "6:\n"
+ EX_TABLE(0b, 2b)
+ EX_TABLE(1b, 2b)
+ EX_TABLE(3b, 6b)
+ EX_TABLE(4b, 6b)
+ : [size] "+&a" (size), [to] "+&a" (to), [from] "+&a" (from), [rem] "=&a" (rem)
+ : [val] "a" (-4096UL), [spec] "d" (spec.val)
+ : "cc", "memory", "0");
+ return size;
+}
+
+static __always_inline __must_check unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ return raw_copy_to_user_key(to, from, n, 0);
+}
unsigned long __must_check
_copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key);
@@ -55,63 +156,71 @@ copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned lo
return n;
}
-union oac {
- unsigned int val;
- struct {
- struct {
- unsigned short key : 4;
- unsigned short : 4;
- unsigned short as : 2;
- unsigned short : 4;
- unsigned short k : 1;
- unsigned short a : 1;
- } oac1;
- struct {
- unsigned short key : 4;
- unsigned short : 4;
- unsigned short as : 2;
- unsigned short : 4;
- unsigned short k : 1;
- unsigned short a : 1;
- } oac2;
- };
-};
-
int __noreturn __put_user_bad(void);
#ifdef CONFIG_KMSAN
-#define get_put_user_noinstr_attributes \
- noinline __maybe_unused __no_sanitize_memory
+#define uaccess_kmsan_or_inline noinline __maybe_unused __no_sanitize_memory
#else
-#define get_put_user_noinstr_attributes __always_inline
+#define uaccess_kmsan_or_inline __always_inline
#endif
-#define DEFINE_PUT_USER(type) \
-static get_put_user_noinstr_attributes int \
+#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+
+#define DEFINE_PUT_USER_NOINSTR(type) \
+static uaccess_kmsan_or_inline int \
+__put_user_##type##_noinstr(unsigned type __user *to, \
+ unsigned type *from, \
+ unsigned long size) \
+{ \
+ asm goto( \
+ " llilh %%r0,%[spec]\n" \
+ "0: mvcos %[to],%[from],%[size]\n" \
+ "1: nopr %%r7\n" \
+ EX_TABLE(0b, %l[Efault]) \
+ EX_TABLE(1b, %l[Efault]) \
+ : [to] "+Q" (*to) \
+ : [size] "d" (size), [from] "Q" (*from), \
+ [spec] "I" (0x81) \
+ : "cc", "0" \
+ : Efault \
+ ); \
+ return 0; \
+Efault: \
+ return -EFAULT; \
+}
+
+#else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
+
+#define DEFINE_PUT_USER_NOINSTR(type) \
+static uaccess_kmsan_or_inline int \
__put_user_##type##_noinstr(unsigned type __user *to, \
unsigned type *from, \
unsigned long size) \
{ \
- union oac __oac_spec = { \
- .oac1.as = PSW_BITS_AS_SECONDARY, \
- .oac1.a = 1, \
- }; \
int rc; \
\
asm volatile( \
- " lr 0,%[spec]\n" \
- "0: mvcos %[_to],%[_from],%[_size]\n" \
- "1: xr %[rc],%[rc]\n" \
+ " llilh %%r0,%[spec]\n" \
+ "0: mvcos %[to],%[from],%[size]\n" \
+ "1: lhi %[rc],0\n" \
"2:\n" \
- EX_TABLE_UA_STORE(0b, 2b, %[rc]) \
- EX_TABLE_UA_STORE(1b, 2b, %[rc]) \
- : [rc] "=&d" (rc), [_to] "+Q" (*(to)) \
- : [_size] "d" (size), [_from] "Q" (*(from)), \
- [spec] "d" (__oac_spec.val) \
+ EX_TABLE_UA_FAULT(0b, 2b, %[rc]) \
+ EX_TABLE_UA_FAULT(1b, 2b, %[rc]) \
+ : [rc] "=d" (rc), [to] "+Q" (*to) \
+ : [size] "d" (size), [from] "Q" (*from), \
+ [spec] "I" (0x81) \
: "cc", "0"); \
return rc; \
-} \
- \
+}
+
+#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
+
+DEFINE_PUT_USER_NOINSTR(char);
+DEFINE_PUT_USER_NOINSTR(short);
+DEFINE_PUT_USER_NOINSTR(int);
+DEFINE_PUT_USER_NOINSTR(long);
+
+#define DEFINE_PUT_USER(type) \
static __always_inline int \
__put_user_##type(unsigned type __user *to, unsigned type *from, \
unsigned long size) \
@@ -128,69 +237,111 @@ DEFINE_PUT_USER(short);
DEFINE_PUT_USER(int);
DEFINE_PUT_USER(long);
-static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
-{
- int rc;
+#define __put_user(x, ptr) \
+({ \
+ __typeof__(*(ptr)) __x = (x); \
+ int __prc; \
+ \
+ __chk_user_ptr(ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ __prc = __put_user_char((unsigned char __user *)(ptr), \
+ (unsigned char *)&__x, \
+ sizeof(*(ptr))); \
+ break; \
+ case 2: \
+ __prc = __put_user_short((unsigned short __user *)(ptr),\
+ (unsigned short *)&__x, \
+ sizeof(*(ptr))); \
+ break; \
+ case 4: \
+ __prc = __put_user_int((unsigned int __user *)(ptr), \
+ (unsigned int *)&__x, \
+ sizeof(*(ptr))); \
+ break; \
+ case 8: \
+ __prc = __put_user_long((unsigned long __user *)(ptr), \
+ (unsigned long *)&__x, \
+ sizeof(*(ptr))); \
+ break; \
+ default: \
+ __prc = __put_user_bad(); \
+ break; \
+ } \
+ __builtin_expect(__prc, 0); \
+})
- switch (size) {
- case 1:
- rc = __put_user_char((unsigned char __user *)ptr,
- (unsigned char *)x,
- size);
- break;
- case 2:
- rc = __put_user_short((unsigned short __user *)ptr,
- (unsigned short *)x,
- size);
- break;
- case 4:
- rc = __put_user_int((unsigned int __user *)ptr,
- (unsigned int *)x,
- size);
- break;
- case 8:
- rc = __put_user_long((unsigned long __user *)ptr,
- (unsigned long *)x,
- size);
- break;
- default:
- __put_user_bad();
- break;
- }
- return rc;
-}
+#define put_user(x, ptr) \
+({ \
+ might_fault(); \
+ __put_user(x, ptr); \
+})
int __noreturn __get_user_bad(void);
-#define DEFINE_GET_USER(type) \
-static get_put_user_noinstr_attributes int \
+#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+
+#define DEFINE_GET_USER_NOINSTR(type) \
+static uaccess_kmsan_or_inline int \
__get_user_##type##_noinstr(unsigned type *to, \
- unsigned type __user *from, \
+ const unsigned type __user *from, \
+ unsigned long size) \
+{ \
+ asm goto( \
+ " lhi %%r0,%[spec]\n" \
+ "0: mvcos %[to],%[from],%[size]\n" \
+ "1: nopr %%r7\n" \
+ EX_TABLE(0b, %l[Efault]) \
+ EX_TABLE(1b, %l[Efault]) \
+ : [to] "=Q" (*to) \
+ : [size] "d" (size), [from] "Q" (*from), \
+ [spec] "I" (0x81) \
+ : "cc", "0" \
+ : Efault \
+ ); \
+ return 0; \
+Efault: \
+ *to = 0; \
+ return -EFAULT; \
+}
+
+#else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
+
+#define DEFINE_GET_USER_NOINSTR(type) \
+static uaccess_kmsan_or_inline int \
+__get_user_##type##_noinstr(unsigned type *to, \
+ const unsigned type __user *from, \
unsigned long size) \
{ \
- union oac __oac_spec = { \
- .oac2.as = PSW_BITS_AS_SECONDARY, \
- .oac2.a = 1, \
- }; \
int rc; \
\
asm volatile( \
- " lr 0,%[spec]\n" \
- "0: mvcos 0(%[_to]),%[_from],%[_size]\n" \
- "1: xr %[rc],%[rc]\n" \
+ " lhi %%r0,%[spec]\n" \
+ "0: mvcos %[to],%[from],%[size]\n" \
+ "1: lhi %[rc],0\n" \
"2:\n" \
- EX_TABLE_UA_LOAD_MEM(0b, 2b, %[rc], %[_to], %[_ksize]) \
- EX_TABLE_UA_LOAD_MEM(1b, 2b, %[rc], %[_to], %[_ksize]) \
- : [rc] "=&d" (rc), "=Q" (*(to)) \
- : [_size] "d" (size), [_from] "Q" (*(from)), \
- [spec] "d" (__oac_spec.val), [_to] "a" (to), \
- [_ksize] "K" (size) \
+ EX_TABLE_UA_FAULT(0b, 2b, %[rc]) \
+ EX_TABLE_UA_FAULT(1b, 2b, %[rc]) \
+ : [rc] "=d" (rc), [to] "=Q" (*to) \
+ : [size] "d" (size), [from] "Q" (*from), \
+ [spec] "I" (0x81) \
: "cc", "0"); \
+ if (likely(!rc)) \
+ return 0; \
+ *to = 0; \
return rc; \
-} \
- \
+}
+
+#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
+
+DEFINE_GET_USER_NOINSTR(char);
+DEFINE_GET_USER_NOINSTR(short);
+DEFINE_GET_USER_NOINSTR(int);
+DEFINE_GET_USER_NOINSTR(long);
+
+#define DEFINE_GET_USER(type) \
static __always_inline int \
-__get_user_##type(unsigned type *to, unsigned type __user *from, \
+__get_user_##type(unsigned type *to, const unsigned type __user *from, \
unsigned long size) \
{ \
int rc; \
@@ -205,107 +356,50 @@ DEFINE_GET_USER(short);
DEFINE_GET_USER(int);
DEFINE_GET_USER(long);
-static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
-{
- int rc;
-
- switch (size) {
- case 1:
- rc = __get_user_char((unsigned char *)x,
- (unsigned char __user *)ptr,
- size);
- break;
- case 2:
- rc = __get_user_short((unsigned short *)x,
- (unsigned short __user *)ptr,
- size);
- break;
- case 4:
- rc = __get_user_int((unsigned int *)x,
- (unsigned int __user *)ptr,
- size);
- break;
- case 8:
- rc = __get_user_long((unsigned long *)x,
- (unsigned long __user *)ptr,
- size);
- break;
- default:
- __get_user_bad();
- break;
- }
- return rc;
-}
-
-/*
- * These are the main single-value transfer routines. They automatically
- * use the right size if we just have the right pointer type.
- */
-#define __put_user(x, ptr) \
-({ \
- __typeof__(*(ptr)) __x = (x); \
- int __pu_err = -EFAULT; \
- \
- __chk_user_ptr(ptr); \
- switch (sizeof(*(ptr))) { \
- case 1: \
- case 2: \
- case 4: \
- case 8: \
- __pu_err = __put_user_fn(&__x, ptr, sizeof(*(ptr))); \
- break; \
- default: \
- __put_user_bad(); \
- break; \
- } \
- __builtin_expect(__pu_err, 0); \
-})
-
-#define put_user(x, ptr) \
-({ \
- might_fault(); \
- __put_user(x, ptr); \
-})
-
#define __get_user(x, ptr) \
({ \
- int __gu_err = -EFAULT; \
+ const __user void *____guptr = (ptr); \
+ int __grc; \
\
__chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: { \
+ const unsigned char __user *__guptr = ____guptr; \
unsigned char __x; \
\
- __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
+ __grc = __get_user_char(&__x, __guptr, sizeof(*(ptr))); \
(x) = *(__force __typeof__(*(ptr)) *)&__x; \
break; \
}; \
case 2: { \
+ const unsigned short __user *__guptr = ____guptr; \
unsigned short __x; \
\
- __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
+ __grc = __get_user_short(&__x, __guptr, sizeof(*(ptr)));\
(x) = *(__force __typeof__(*(ptr)) *)&__x; \
break; \
}; \
case 4: { \
+ const unsigned int __user *__guptr = ____guptr; \
unsigned int __x; \
\
- __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
+ __grc = __get_user_int(&__x, __guptr, sizeof(*(ptr))); \
(x) = *(__force __typeof__(*(ptr)) *)&__x; \
break; \
}; \
case 8: { \
+ const unsigned long __user *__guptr = ____guptr; \
unsigned long __x; \
\
- __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
+ __grc = __get_user_long(&__x, __guptr, sizeof(*(ptr))); \
(x) = *(__force __typeof__(*(ptr)) *)&__x; \
break; \
}; \
default: \
- __get_user_bad(); \
+ __grc = __get_user_bad(); \
break; \
} \
- __builtin_expect(__gu_err, 0); \
+ __builtin_expect(__grc, 0); \
})
#define get_user(x, ptr) \
@@ -341,109 +435,71 @@ static inline void *s390_kernel_write(void *dst, const void *src, size_t size)
return __s390_kernel_write(dst, src, size);
}
-int __noreturn __put_kernel_bad(void);
+void __noreturn __mvc_kernel_nofault_bad(void);
-#define __put_kernel_asm(val, to, insn) \
-({ \
- int __rc; \
- \
- asm volatile( \
- "0: " insn " %[_val],%[_to]\n" \
- "1: xr %[rc],%[rc]\n" \
- "2:\n" \
- EX_TABLE_UA_STORE(0b, 2b, %[rc]) \
- EX_TABLE_UA_STORE(1b, 2b, %[rc]) \
- : [rc] "=d" (__rc), [_to] "+Q" (*(to)) \
- : [_val] "d" (val) \
- : "cc"); \
- __rc; \
-})
+#if defined(CONFIG_CC_HAS_ASM_GOTO_OUTPUT) && defined(CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS)
-#define __put_kernel_nofault(dst, src, type, err_label) \
+#define __mvc_kernel_nofault(dst, src, type, err_label) \
do { \
- unsigned long __x = (unsigned long)(*((type *)(src))); \
- int __pk_err; \
- \
switch (sizeof(type)) { \
case 1: \
- __pk_err = __put_kernel_asm(__x, (type *)(dst), "stc"); \
- break; \
case 2: \
- __pk_err = __put_kernel_asm(__x, (type *)(dst), "sth"); \
- break; \
case 4: \
- __pk_err = __put_kernel_asm(__x, (type *)(dst), "st"); \
- break; \
case 8: \
- __pk_err = __put_kernel_asm(__x, (type *)(dst), "stg"); \
+ asm goto( \
+ "0: mvc %O[_dst](%[_len],%R[_dst]),%[_src]\n" \
+ "1: nopr %%r7\n" \
+ EX_TABLE(0b, %l[err_label]) \
+ EX_TABLE(1b, %l[err_label]) \
+ : [_dst] "=Q" (*(type *)dst) \
+ : [_src] "Q" (*(type *)(src)), \
+ [_len] "I" (sizeof(type)) \
+ : \
+ : err_label); \
break; \
default: \
- __pk_err = __put_kernel_bad(); \
+ __mvc_kernel_nofault_bad(); \
break; \
} \
- if (unlikely(__pk_err)) \
- goto err_label; \
} while (0)
-int __noreturn __get_kernel_bad(void);
-
-#define __get_kernel_asm(val, from, insn) \
-({ \
- int __rc; \
- \
- asm volatile( \
- "0: " insn " %[_val],%[_from]\n" \
- "1: xr %[rc],%[rc]\n" \
- "2:\n" \
- EX_TABLE_UA_LOAD_REG(0b, 2b, %[rc], %[_val]) \
- EX_TABLE_UA_LOAD_REG(1b, 2b, %[rc], %[_val]) \
- : [rc] "=d" (__rc), [_val] "=d" (val) \
- : [_from] "Q" (*(from)) \
- : "cc"); \
- __rc; \
-})
+#else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT) && CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
-#define __get_kernel_nofault(dst, src, type, err_label) \
+#define __mvc_kernel_nofault(dst, src, type, err_label) \
do { \
- int __gk_err; \
+ type *(__dst) = (type *)(dst); \
+ int __rc; \
\
switch (sizeof(type)) { \
- case 1: { \
- unsigned char __x; \
- \
- __gk_err = __get_kernel_asm(__x, (type *)(src), "ic"); \
- *((type *)(dst)) = (type)__x; \
- break; \
- }; \
- case 2: { \
- unsigned short __x; \
- \
- __gk_err = __get_kernel_asm(__x, (type *)(src), "lh"); \
- *((type *)(dst)) = (type)__x; \
- break; \
- }; \
- case 4: { \
- unsigned int __x; \
- \
- __gk_err = __get_kernel_asm(__x, (type *)(src), "l"); \
- *((type *)(dst)) = (type)__x; \
- break; \
- }; \
- case 8: { \
- unsigned long __x; \
- \
- __gk_err = __get_kernel_asm(__x, (type *)(src), "lg"); \
- *((type *)(dst)) = (type)__x; \
+ case 1: \
+ case 2: \
+ case 4: \
+ case 8: \
+ asm_inline volatile( \
+ "0: mvc 0(%[_len],%[_dst]),%[_src]\n" \
+ "1: lhi %[_rc],0\n" \
+ "2:\n" \
+ EX_TABLE_UA_FAULT(0b, 2b, %[_rc]) \
+ EX_TABLE_UA_FAULT(1b, 2b, %[_rc]) \
+ : [_rc] "=d" (__rc), \
+ "=m" (*__dst) \
+ : [_src] "Q" (*(type *)(src)), \
+ [_dst] "a" (__dst), \
+ [_len] "I" (sizeof(type))); \
+ if (__rc) \
+ goto err_label; \
break; \
- }; \
default: \
- __gk_err = __get_kernel_bad(); \
+ __mvc_kernel_nofault_bad(); \
break; \
} \
- if (unlikely(__gk_err)) \
- goto err_label; \
} while (0)
+#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT && CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
+
+#define __get_kernel_nofault __mvc_kernel_nofault
+#define __put_kernel_nofault __mvc_kernel_nofault
+
void __cmpxchg_user_key_called_with_bad_pointer(void);
#define CMPXCHG_USER_KEY_MAX_LOOPS 128
diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
index dc332609f2c3..b11f5b6d0bd1 100644
--- a/arch/s390/include/asm/uv.h
+++ b/arch/s390/include/asm/uv.h
@@ -628,12 +628,12 @@ static inline int is_prot_virt_host(void)
}
int uv_pin_shared(unsigned long paddr);
-int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
-int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr);
int uv_destroy_folio(struct folio *folio);
int uv_destroy_pte(pte_t pte);
int uv_convert_from_secure_pte(pte_t pte);
-int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
+int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb);
+int uv_convert_from_secure(unsigned long paddr);
+int uv_convert_from_secure_folio(struct folio *folio);
void setup_uv(void);
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index cd0c93a8fb8b..276cb4c1e11b 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -63,9 +63,7 @@ struct save_area * __init save_area_alloc(bool is_boot_cpu)
{
struct save_area *sa;
- sa = memblock_alloc(sizeof(*sa), 8);
- if (!sa)
- return NULL;
+ sa = memblock_alloc_or_panic(sizeof(*sa), 8);
if (is_boot_cpu)
list_add(&sa->list, &dump_save_areas);
@@ -508,6 +506,19 @@ static int get_mem_chunk_cnt(void)
return cnt;
}
+static void fill_ptload(Elf64_Phdr *phdr, unsigned long paddr,
+ unsigned long vaddr, unsigned long size)
+{
+ phdr->p_type = PT_LOAD;
+ phdr->p_vaddr = vaddr;
+ phdr->p_offset = paddr;
+ phdr->p_paddr = paddr;
+ phdr->p_filesz = size;
+ phdr->p_memsz = size;
+ phdr->p_flags = PF_R | PF_W | PF_X;
+ phdr->p_align = PAGE_SIZE;
+}
+
/*
* Initialize ELF loads (new kernel)
*/
@@ -520,14 +531,8 @@ static void loads_init(Elf64_Phdr *phdr, bool os_info_has_vm)
if (os_info_has_vm)
old_identity_base = os_info_old_value(OS_INFO_IDENTITY_BASE);
for_each_physmem_range(idx, &oldmem_type, &start, &end) {
- phdr->p_type = PT_LOAD;
- phdr->p_vaddr = old_identity_base + start;
- phdr->p_offset = start;
- phdr->p_paddr = start;
- phdr->p_filesz = end - start;
- phdr->p_memsz = end - start;
- phdr->p_flags = PF_R | PF_W | PF_X;
- phdr->p_align = PAGE_SIZE;
+ fill_ptload(phdr, start, old_identity_base + start,
+ end - start);
phdr++;
}
}
@@ -537,6 +542,22 @@ static bool os_info_has_vm(void)
return os_info_old_value(OS_INFO_KASLR_OFFSET);
}
+#ifdef CONFIG_PROC_VMCORE_DEVICE_RAM
+/*
+ * Fill PT_LOAD for a physical memory range owned by a device and detected by
+ * its device driver.
+ */
+void elfcorehdr_fill_device_ram_ptload_elf64(Elf64_Phdr *phdr,
+ unsigned long long paddr, unsigned long long size)
+{
+ unsigned long old_identity_base = 0;
+
+ if (os_info_has_vm())
+ old_identity_base = os_info_old_value(OS_INFO_IDENTITY_BASE);
+ fill_ptload(phdr, paddr, old_identity_base + paddr, size);
+}
+#endif
+
/*
* Prepare PT_LOAD type program header for kernel image region
*/
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index ba6b7329a10e..ce038e9205f7 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -1122,7 +1122,7 @@ static int s390dbf_procactive(const struct ctl_table *table, int write,
return 0;
}
-static struct ctl_table s390dbf_table[] = {
+static const struct ctl_table s390dbf_table[] = {
{
.procname = "debug_stoppable",
.data = &debug_stoppable,
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 62f8f5a750a3..2fa25164df7d 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -50,6 +50,7 @@ decompressor_handled_param(facilities);
decompressor_handled_param(nokaslr);
decompressor_handled_param(cmma);
decompressor_handled_param(relocate_lowcore);
+decompressor_handled_param(bootdebug);
#if IS_ENABLED(CONFIG_KVM)
decompressor_handled_param(prot_virt);
#endif
@@ -58,7 +59,7 @@ static void __init kasan_early_init(void)
{
#ifdef CONFIG_KASAN
init_task.kasan_depth = 0;
- sclp_early_printk("KernelAddressSanitizer initialized\n");
+ pr_info("KernelAddressSanitizer initialized\n");
#endif
}
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index c0b2c97efefb..63ba6306632e 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -266,18 +266,13 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs)
{
unsigned long *parent = &arch_ftrace_regs(fregs)->regs.gprs[14];
- int bit;
if (unlikely(ftrace_graph_is_dead()))
return;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
- bit = ftrace_test_recursion_trylock(ip, *parent);
- if (bit < 0)
- return;
if (!function_graph_enter_regs(*parent, ip, 0, parent, fregs))
*parent = (unsigned long)&return_to_handler;
- ftrace_test_recursion_unlock(bit);
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/s390/kernel/hiperdispatch.c b/arch/s390/kernel/hiperdispatch.c
index 2a99a216ab62..7857a7e8e56c 100644
--- a/arch/s390/kernel/hiperdispatch.c
+++ b/arch/s390/kernel/hiperdispatch.c
@@ -292,7 +292,7 @@ static int hiperdispatch_ctl_handler(const struct ctl_table *ctl, int write,
return 0;
}
-static struct ctl_table hiperdispatch_ctl_table[] = {
+static const struct ctl_table hiperdispatch_ctl_table[] = {
{
.procname = "hiperdispatch",
.mode = 0644,
diff --git a/arch/s390/kernel/lgr.c b/arch/s390/kernel/lgr.c
index 6652e54cf3db..6d1ffca5f798 100644
--- a/arch/s390/kernel/lgr.c
+++ b/arch/s390/kernel/lgr.c
@@ -166,7 +166,7 @@ static struct timer_list lgr_timer;
*/
static void lgr_timer_set(void)
{
- mod_timer(&lgr_timer, jiffies + msecs_to_jiffies(LGR_TIMER_INTERVAL_SECS * MSEC_PER_SEC));
+ mod_timer(&lgr_timer, jiffies + secs_to_jiffies(LGR_TIMER_INTERVAL_SECS));
}
/*
diff --git a/arch/s390/kernel/numa.c b/arch/s390/kernel/numa.c
index ddc1448ea2e1..2fc40f97c0ad 100644
--- a/arch/s390/kernel/numa.c
+++ b/arch/s390/kernel/numa.c
@@ -21,12 +21,8 @@ void __init numa_setup(void)
nodes_clear(node_possible_map);
node_set(0, node_possible_map);
node_set_online(0);
- for (nid = 0; nid < MAX_NUMNODES; nid++) {
- NODE_DATA(nid) = memblock_alloc(sizeof(pg_data_t), 8);
- if (!NODE_DATA(nid))
- panic("%s: Failed to allocate %zu bytes align=0x%x\n",
- __func__, sizeof(pg_data_t), 8);
- }
+ for (nid = 0; nid < MAX_NUMNODES; nid++)
+ NODE_DATA(nid) = memblock_alloc_or_panic(sizeof(pg_data_t), 8);
NODE_DATA(0)->node_spanned_pages = memblock_end_of_DRAM() >> PAGE_SHIFT;
NODE_DATA(0)->node_id = 0;
}
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 0ce550faf073..d78bcfe707b5 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -157,6 +157,12 @@ u64 __bootdata_preserved(stfle_fac_list[16]);
EXPORT_SYMBOL(stfle_fac_list);
struct oldmem_data __bootdata_preserved(oldmem_data);
+char __bootdata(boot_rb)[PAGE_SIZE * 2];
+bool __bootdata(boot_earlyprintk);
+size_t __bootdata(boot_rb_off);
+char __bootdata(bootdebug_filter)[128];
+bool __bootdata(bootdebug);
+
unsigned long __bootdata_preserved(VMALLOC_START);
EXPORT_SYMBOL(VMALLOC_START);
@@ -376,11 +382,7 @@ static unsigned long __init stack_alloc_early(void)
{
unsigned long stack;
- stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
- if (!stack) {
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, THREAD_SIZE, THREAD_SIZE);
- }
+ stack = (unsigned long)memblock_alloc_or_panic(THREAD_SIZE, THREAD_SIZE);
return stack;
}
@@ -504,10 +506,7 @@ static void __init setup_resources(void)
bss_resource.end = __pa_symbol(__bss_stop) - 1;
for_each_mem_range(i, &start, &end) {
- res = memblock_alloc(sizeof(*res), 8);
- if (!res)
- panic("%s: Failed to allocate %zu bytes align=0x%x\n",
- __func__, sizeof(*res), 8);
+ res = memblock_alloc_or_panic(sizeof(*res), 8);
res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
res->name = "System RAM";
@@ -526,10 +525,7 @@ static void __init setup_resources(void)
std_res->start > res->end)
continue;
if (std_res->end > res->end) {
- sub_res = memblock_alloc(sizeof(*sub_res), 8);
- if (!sub_res)
- panic("%s: Failed to allocate %zu bytes align=0x%x\n",
- __func__, sizeof(*sub_res), 8);
+ sub_res = memblock_alloc_or_panic(sizeof(*sub_res), 8);
*sub_res = *std_res;
sub_res->end = res->end;
std_res->start = res->end + 1;
@@ -696,7 +692,7 @@ static void __init reserve_physmem_info(void)
{
unsigned long addr, size;
- if (get_physmem_reserved(RR_MEM_DETECT_EXTENDED, &addr, &size))
+ if (get_physmem_reserved(RR_MEM_DETECT_EXT, &addr, &size))
memblock_reserve(addr, size);
}
@@ -704,7 +700,7 @@ static void __init free_physmem_info(void)
{
unsigned long addr, size;
- if (get_physmem_reserved(RR_MEM_DETECT_EXTENDED, &addr, &size))
+ if (get_physmem_reserved(RR_MEM_DETECT_EXT, &addr, &size))
memblock_phys_free(addr, size);
}
@@ -734,7 +730,7 @@ static void __init reserve_lowcore(void)
void *lowcore_end = lowcore_start + sizeof(struct lowcore);
void *start, *end;
- if ((void *)__identity_base < lowcore_end) {
+ if (absolute_pointer(__identity_base) < lowcore_end) {
start = max(lowcore_start, (void *)__identity_base);
end = min(lowcore_end, (void *)(__identity_base + ident_map_size));
memblock_reserve(__pa(start), __pa(end));
@@ -816,9 +812,7 @@ static void __init setup_randomness(void)
{
struct sysinfo_3_2_2 *vmms;
- vmms = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!vmms)
- panic("Failed to allocate memory for sysinfo structure\n");
+ vmms = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
memblock_free(vmms, PAGE_SIZE);
@@ -878,6 +872,23 @@ static void __init log_component_list(void)
}
/*
+ * Print avoiding interpretation of % in buf and taking bootdebug option
+ * into consideration.
+ */
+static void __init print_rb_entry(const char *buf)
+{
+ char fmt[] = KERN_SOH "0boot: %s";
+ int level = printk_get_level(buf);
+
+ buf = skip_timestamp(printk_skip_level(buf));
+ if (level == KERN_DEBUG[1] && (!bootdebug || !bootdebug_filter_match(buf)))
+ return;
+
+ fmt[1] = level;
+ printk(fmt, buf);
+}
+
+/*
* Setup function called from init/main.c just after the banner
* was printed.
*/
@@ -896,6 +907,9 @@ void __init setup_arch(char **cmdline_p)
pr_info("Linux is running natively in 64-bit mode\n");
else
pr_info("Linux is running as a guest in 64-bit mode\n");
+ /* Print decompressor messages if not already printed */
+ if (!boot_earlyprintk)
+ boot_rb_foreach(print_rb_entry);
if (have_relocated_lowcore())
pr_info("Lowcore relocated to 0x%px\n", get_lowcore());
@@ -999,3 +1013,8 @@ void __init setup_arch(char **cmdline_p)
/* Add system specific data to the random pool */
setup_randomness();
}
+
+void __init arch_cpu_finalize_init(void)
+{
+ sclp_init();
+}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 822d8e6f8717..7b08399b0846 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -611,9 +611,7 @@ void __init smp_save_dump_ipl_cpu(void)
if (!dump_available())
return;
sa = save_area_alloc(true);
- regs = memblock_alloc(512, 8);
- if (!sa || !regs)
- panic("could not allocate memory for boot CPU save area\n");
+ regs = memblock_alloc_or_panic(512, 8);
copy_oldmem_kernel(regs, __LC_FPREGS_SAVE_AREA, 512);
save_area_add_regs(sa, regs);
memblock_free(regs, 512);
@@ -646,8 +644,6 @@ void __init smp_save_dump_secondary_cpus(void)
SIGP_CC_NOT_OPERATIONAL)
continue;
sa = save_area_alloc(false);
- if (!sa)
- panic("could not allocate memory for save area\n");
__pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, __pa(page));
save_area_add_regs(sa, page);
if (cpu_has_vx()) {
@@ -792,10 +788,7 @@ void __init smp_detect_cpus(void)
u16 address;
/* Get CPU information */
- info = memblock_alloc(sizeof(*info), 8);
- if (!info)
- panic("%s: Failed to allocate %zu bytes align=0x%x\n",
- __func__, sizeof(*info), 8);
+ info = memblock_alloc_or_panic(sizeof(*info), 8);
smp_get_core_info(info, 1);
/* Find boot CPU type */
if (sclp.has_core_type) {
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 34a65c141ea0..e9f47c3a6197 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -662,12 +662,12 @@ static void stp_check_leap(void)
if (ret < 0)
pr_err("failed to set leap second flags\n");
/* arm Timer to clear leap second flags */
- mod_timer(&stp_timer, jiffies + msecs_to_jiffies(14400 * MSEC_PER_SEC));
+ mod_timer(&stp_timer, jiffies + secs_to_jiffies(14400));
} else {
/* The day the leap second is scheduled for hasn't been reached. Retry
* in one hour.
*/
- mod_timer(&stp_timer, jiffies + msecs_to_jiffies(3600 * MSEC_PER_SEC));
+ mod_timer(&stp_timer, jiffies + secs_to_jiffies(3600));
}
}
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 39f5ed21f31a..211cc8382e4a 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -371,7 +371,7 @@ static void set_topology_timer(void)
if (atomic_add_unless(&topology_poll, -1, 0))
mod_timer(&topology_timer, jiffies + msecs_to_jiffies(100));
else
- mod_timer(&topology_timer, jiffies + msecs_to_jiffies(60 * MSEC_PER_SEC));
+ mod_timer(&topology_timer, jiffies + secs_to_jiffies(60));
}
void topology_expect_change(void)
@@ -548,10 +548,7 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info,
nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
nr_masks = max(nr_masks, 1);
for (i = 0; i < nr_masks; i++) {
- mask->next = memblock_alloc(sizeof(*mask->next), 8);
- if (!mask->next)
- panic("%s: Failed to allocate %zu bytes align=0x%x\n",
- __func__, sizeof(*mask->next), 8);
+ mask->next = memblock_alloc_or_panic(sizeof(*mask->next), 8);
mask = mask->next;
}
}
@@ -579,10 +576,7 @@ void __init topology_init_early(void)
}
if (!MACHINE_HAS_TOPOLOGY)
goto out;
- tl_info = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!tl_info)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE);
+ tl_info = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
info = tl_info;
store_topology(info);
cpu_management = detect_polarization(info->tle);
@@ -673,7 +667,7 @@ static int polarization_ctl_handler(const struct ctl_table *ctl, int write,
return set_polarization(polarization);
}
-static struct ctl_table topology_ctl_table[] = {
+static const struct ctl_table topology_ctl_table[] = {
{
.procname = "topology",
.mode = 0644,
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index 6f9654a191ad..9f05df2da2f7 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -19,19 +19,6 @@
#include <asm/sections.h>
#include <asm/uv.h>
-#if !IS_ENABLED(CONFIG_KVM)
-unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
-{
- return 0;
-}
-
-int gmap_fault(struct gmap *gmap, unsigned long gaddr,
- unsigned int fault_flags)
-{
- return 0;
-}
-#endif
-
/* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
int __bootdata_preserved(prot_virt_guest);
EXPORT_SYMBOL(prot_virt_guest);
@@ -159,6 +146,7 @@ int uv_destroy_folio(struct folio *folio)
folio_put(folio);
return rc;
}
+EXPORT_SYMBOL(uv_destroy_folio);
/*
* The present PTE still indirectly holds a folio reference through the mapping.
@@ -175,7 +163,7 @@ int uv_destroy_pte(pte_t pte)
*
* @paddr: Absolute host address of page to be exported
*/
-static int uv_convert_from_secure(unsigned long paddr)
+int uv_convert_from_secure(unsigned long paddr)
{
struct uv_cb_cfs uvcb = {
.header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
@@ -187,11 +175,12 @@ static int uv_convert_from_secure(unsigned long paddr)
return -EINVAL;
return 0;
}
+EXPORT_SYMBOL_GPL(uv_convert_from_secure);
/*
* The caller must already hold a reference to the folio.
*/
-static int uv_convert_from_secure_folio(struct folio *folio)
+int uv_convert_from_secure_folio(struct folio *folio)
{
int rc;
@@ -206,6 +195,7 @@ static int uv_convert_from_secure_folio(struct folio *folio)
folio_put(folio);
return rc;
}
+EXPORT_SYMBOL_GPL(uv_convert_from_secure_folio);
/*
* The present PTE still indirectly holds a folio reference through the mapping.
@@ -237,13 +227,33 @@ static int expected_folio_refs(struct folio *folio)
return res;
}
-static int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
+/**
+ * make_folio_secure() - make a folio secure
+ * @folio: the folio to make secure
+ * @uvcb: the uvcb that describes the UVC to be used
+ *
+ * The folio @folio will be made secure if possible, @uvcb will be passed
+ * as-is to the UVC.
+ *
+ * Return: 0 on success;
+ * -EBUSY if the folio is in writeback or has too many references;
+ * -E2BIG if the folio is large;
+ * -EAGAIN if the UVC needs to be attempted again;
+ * -ENXIO if the address is not mapped;
+ * -EINVAL if the UVC failed for other reasons.
+ *
+ * Context: The caller must hold exactly one extra reference on the folio
+ * (it's the same logic as split_folio())
+ */
+int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
{
int expected, cc = 0;
+ if (folio_test_large(folio))
+ return -E2BIG;
if (folio_test_writeback(folio))
- return -EAGAIN;
- expected = expected_folio_refs(folio);
+ return -EBUSY;
+ expected = expected_folio_refs(folio) + 1;
if (!folio_ref_freeze(folio, expected))
return -EBUSY;
set_bit(PG_arch_1, &folio->flags);
@@ -267,251 +277,7 @@ static int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
return -EAGAIN;
return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
}
-
-/**
- * should_export_before_import - Determine whether an export is needed
- * before an import-like operation
- * @uvcb: the Ultravisor control block of the UVC to be performed
- * @mm: the mm of the process
- *
- * Returns whether an export is needed before every import-like operation.
- * This is needed for shared pages, which don't trigger a secure storage
- * exception when accessed from a different guest.
- *
- * Although considered as one, the Unpin Page UVC is not an actual import,
- * so it is not affected.
- *
- * No export is needed also when there is only one protected VM, because the
- * page cannot belong to the wrong VM in that case (there is no "other VM"
- * it can belong to).
- *
- * Return: true if an export is needed before every import, otherwise false.
- */
-static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
-{
- /*
- * The misc feature indicates, among other things, that importing a
- * shared page from a different protected VM will automatically also
- * transfer its ownership.
- */
- if (uv_has_feature(BIT_UV_FEAT_MISC))
- return false;
- if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
- return false;
- return atomic_read(&mm->context.protected_count) > 1;
-}
-
-/*
- * Drain LRU caches: the local one on first invocation and the ones of all
- * CPUs on successive invocations. Returns "true" on the first invocation.
- */
-static bool drain_lru(bool *drain_lru_called)
-{
- /*
- * If we have tried a local drain and the folio refcount
- * still does not match our expected safe value, try with a
- * system wide drain. This is needed if the pagevecs holding
- * the page are on a different CPU.
- */
- if (*drain_lru_called) {
- lru_add_drain_all();
- /* We give up here, don't retry immediately. */
- return false;
- }
- /*
- * We are here if the folio refcount does not match the
- * expected safe value. The main culprits are usually
- * pagevecs. With lru_add_drain() we drain the pagevecs
- * on the local CPU so that hopefully the refcount will
- * reach the expected safe value.
- */
- lru_add_drain();
- *drain_lru_called = true;
- /* The caller should try again immediately */
- return true;
-}
-
-/*
- * Requests the Ultravisor to make a page accessible to a guest.
- * If it's brought in the first time, it will be cleared. If
- * it has been exported before, it will be decrypted and integrity
- * checked.
- */
-int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
-{
- struct vm_area_struct *vma;
- bool drain_lru_called = false;
- spinlock_t *ptelock;
- unsigned long uaddr;
- struct folio *folio;
- pte_t *ptep;
- int rc;
-
-again:
- rc = -EFAULT;
- mmap_read_lock(gmap->mm);
-
- uaddr = __gmap_translate(gmap, gaddr);
- if (IS_ERR_VALUE(uaddr))
- goto out;
- vma = vma_lookup(gmap->mm, uaddr);
- if (!vma)
- goto out;
- /*
- * Secure pages cannot be huge and userspace should not combine both.
- * In case userspace does it anyway this will result in an -EFAULT for
- * the unpack. The guest is thus never reaching secure mode. If
- * userspace is playing dirty tricky with mapping huge pages later
- * on this will result in a segmentation fault.
- */
- if (is_vm_hugetlb_page(vma))
- goto out;
-
- rc = -ENXIO;
- ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
- if (!ptep)
- goto out;
- if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
- folio = page_folio(pte_page(*ptep));
- rc = -EAGAIN;
- if (folio_test_large(folio)) {
- rc = -E2BIG;
- } else if (folio_trylock(folio)) {
- if (should_export_before_import(uvcb, gmap->mm))
- uv_convert_from_secure(PFN_PHYS(folio_pfn(folio)));
- rc = make_folio_secure(folio, uvcb);
- folio_unlock(folio);
- }
-
- /*
- * Once we drop the PTL, the folio may get unmapped and
- * freed immediately. We need a temporary reference.
- */
- if (rc == -EAGAIN || rc == -E2BIG)
- folio_get(folio);
- }
- pte_unmap_unlock(ptep, ptelock);
-out:
- mmap_read_unlock(gmap->mm);
-
- switch (rc) {
- case -E2BIG:
- folio_lock(folio);
- rc = split_folio(folio);
- folio_unlock(folio);
- folio_put(folio);
-
- switch (rc) {
- case 0:
- /* Splitting succeeded, try again immediately. */
- goto again;
- case -EAGAIN:
- /* Additional folio references. */
- if (drain_lru(&drain_lru_called))
- goto again;
- return -EAGAIN;
- case -EBUSY:
- /* Unexpected race. */
- return -EAGAIN;
- }
- WARN_ON_ONCE(1);
- return -ENXIO;
- case -EAGAIN:
- /*
- * If we are here because the UVC returned busy or partial
- * completion, this is just a useless check, but it is safe.
- */
- folio_wait_writeback(folio);
- folio_put(folio);
- return -EAGAIN;
- case -EBUSY:
- /* Additional folio references. */
- if (drain_lru(&drain_lru_called))
- goto again;
- return -EAGAIN;
- case -ENXIO:
- if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
- return -EFAULT;
- return -EAGAIN;
- }
- return rc;
-}
-EXPORT_SYMBOL_GPL(gmap_make_secure);
-
-int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
-{
- struct uv_cb_cts uvcb = {
- .header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
- .header.len = sizeof(uvcb),
- .guest_handle = gmap->guest_handle,
- .gaddr = gaddr,
- };
-
- return gmap_make_secure(gmap, gaddr, &uvcb);
-}
-EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
-
-/**
- * gmap_destroy_page - Destroy a guest page.
- * @gmap: the gmap of the guest
- * @gaddr: the guest address to destroy
- *
- * An attempt will be made to destroy the given guest page. If the attempt
- * fails, an attempt is made to export the page. If both attempts fail, an
- * appropriate error is returned.
- */
-int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
-{
- struct vm_area_struct *vma;
- struct folio_walk fw;
- unsigned long uaddr;
- struct folio *folio;
- int rc;
-
- rc = -EFAULT;
- mmap_read_lock(gmap->mm);
-
- uaddr = __gmap_translate(gmap, gaddr);
- if (IS_ERR_VALUE(uaddr))
- goto out;
- vma = vma_lookup(gmap->mm, uaddr);
- if (!vma)
- goto out;
- /*
- * Huge pages should not be able to become secure
- */
- if (is_vm_hugetlb_page(vma))
- goto out;
-
- rc = 0;
- folio = folio_walk_start(&fw, vma, uaddr, 0);
- if (!folio)
- goto out;
- /*
- * See gmap_make_secure(): large folios cannot be secure. Small
- * folio implies FW_LEVEL_PTE.
- */
- if (folio_test_large(folio) || !pte_write(fw.pte))
- goto out_walk_end;
- rc = uv_destroy_folio(folio);
- /*
- * Fault handlers can race; it is possible that two CPUs will fault
- * on the same secure page. One CPU can destroy the page, reboot,
- * re-enter secure mode and import it, while the second CPU was
- * stuck at the beginning of the handler. At some point the second
- * CPU will be able to progress, and it will not be able to destroy
- * the page. In that case we do not want to terminate the process,
- * we instead try to export the page.
- */
- if (rc)
- rc = uv_convert_from_secure_folio(folio);
-out_walk_end:
- folio_walk_end(&fw, vma);
-out:
- mmap_read_unlock(gmap->mm);
- return rc;
-}
-EXPORT_SYMBOL_GPL(gmap_destroy_page);
+EXPORT_SYMBOL_GPL(make_folio_secure);
/*
* To be called with the folio locked or with an extra reference! This will
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 377b9aaf8c92..ff1ddba96352 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -52,7 +52,6 @@ SECTIONS
SOFTIRQENTRY_TEXT
FTRACE_HOTPATCH_TRAMPOLINES_TEXT
*(.text.*_indirect_*)
- *(.fixup)
*(.gnu.warning)
. = ALIGN(PAGE_SIZE);
_etext = .; /* End of text section */
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index 02217fb4ae10..f0ffe874adc2 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -8,7 +8,7 @@ include $(srctree)/virt/kvm/Makefile.kvm
ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
kvm-y += kvm-s390.o intercept.o interrupt.o priv.o sigp.o
-kvm-y += diag.o gaccess.o guestdbg.o vsie.o pv.o
+kvm-y += diag.o gaccess.o guestdbg.o vsie.o pv.o gmap.o gmap-vsie.o
kvm-$(CONFIG_VFIO_PCI_ZDEV_KVM) += pci.o
obj-$(CONFIG_KVM) += kvm.o
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 9816b0060fbe..f6fded15633a 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -16,6 +16,7 @@
#include <asm/gmap.h>
#include <asm/dat-bits.h>
#include "kvm-s390.h"
+#include "gmap.h"
#include "gaccess.h"
/*
@@ -1393,6 +1394,44 @@ shadow_pgt:
}
/**
+ * shadow_pgt_lookup() - find a shadow page table
+ * @sg: pointer to the shadow guest address space structure
+ * @saddr: the address in the shadow aguest address space
+ * @pgt: parent gmap address of the page table to get shadowed
+ * @dat_protection: if the pgtable is marked as protected by dat
+ * @fake: pgt references contiguous guest memory block, not a pgtable
+ *
+ * Returns 0 if the shadow page table was found and -EAGAIN if the page
+ * table was not found.
+ *
+ * Called with sg->mm->mmap_lock in read.
+ */
+static int shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, unsigned long *pgt,
+ int *dat_protection, int *fake)
+{
+ unsigned long pt_index;
+ unsigned long *table;
+ struct page *page;
+ int rc;
+
+ spin_lock(&sg->guest_table_lock);
+ table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
+ if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
+ /* Shadow page tables are full pages (pte+pgste) */
+ page = pfn_to_page(*table >> PAGE_SHIFT);
+ pt_index = gmap_pgste_get_pgt_addr(page_to_virt(page));
+ *pgt = pt_index & ~GMAP_SHADOW_FAKE_TABLE;
+ *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
+ *fake = !!(pt_index & GMAP_SHADOW_FAKE_TABLE);
+ rc = 0;
+ } else {
+ rc = -EAGAIN;
+ }
+ spin_unlock(&sg->guest_table_lock);
+ return rc;
+}
+
+/**
* kvm_s390_shadow_fault - handle fault on a shadow page table
* @vcpu: virtual cpu
* @sg: pointer to the shadow guest address space structure
@@ -1415,6 +1454,9 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
int dat_protection, fake;
int rc;
+ if (KVM_BUG_ON(!gmap_is_shadow(sg), vcpu->kvm))
+ return -EFAULT;
+
mmap_read_lock(sg->mm);
/*
* We don't want any guest-2 tables to change - so the parent
@@ -1423,7 +1465,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
*/
ipte_lock(vcpu->kvm);
- rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake);
+ rc = shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake);
if (rc)
rc = kvm_s390_shadow_tables(sg, saddr, &pgt, &dat_protection,
&fake);
diff --git a/arch/s390/kvm/gmap-vsie.c b/arch/s390/kvm/gmap-vsie.c
new file mode 100644
index 000000000000..a6d1dbb04c97
--- /dev/null
+++ b/arch/s390/kvm/gmap-vsie.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Guest memory management for KVM/s390 nested VMs.
+ *
+ * Copyright IBM Corp. 2008, 2020, 2024
+ *
+ * Author(s): Claudio Imbrenda <imbrenda@linux.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * David Hildenbrand <david@redhat.com>
+ * Janosch Frank <frankja@linux.vnet.ibm.com>
+ */
+
+#include <linux/compiler.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/pgtable.h>
+#include <linux/pagemap.h>
+#include <linux/mman.h>
+
+#include <asm/lowcore.h>
+#include <asm/gmap.h>
+#include <asm/uv.h>
+
+#include "kvm-s390.h"
+#include "gmap.h"
+
+/**
+ * gmap_find_shadow - find a specific asce in the list of shadow tables
+ * @parent: pointer to the parent gmap
+ * @asce: ASCE for which the shadow table is created
+ * @edat_level: edat level to be used for the shadow translation
+ *
+ * Returns the pointer to a gmap if a shadow table with the given asce is
+ * already available, ERR_PTR(-EAGAIN) if another one is just being created,
+ * otherwise NULL
+ *
+ * Context: Called with parent->shadow_lock held
+ */
+static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce, int edat_level)
+{
+ struct gmap *sg;
+
+ lockdep_assert_held(&parent->shadow_lock);
+ list_for_each_entry(sg, &parent->children, list) {
+ if (!gmap_shadow_valid(sg, asce, edat_level))
+ continue;
+ if (!sg->initialized)
+ return ERR_PTR(-EAGAIN);
+ refcount_inc(&sg->ref_count);
+ return sg;
+ }
+ return NULL;
+}
+
+/**
+ * gmap_shadow - create/find a shadow guest address space
+ * @parent: pointer to the parent gmap
+ * @asce: ASCE for which the shadow table is created
+ * @edat_level: edat level to be used for the shadow translation
+ *
+ * The pages of the top level page table referred by the asce parameter
+ * will be set to read-only and marked in the PGSTEs of the kvm process.
+ * The shadow table will be removed automatically on any change to the
+ * PTE mapping for the source table.
+ *
+ * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
+ * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
+ * parent gmap table could not be protected.
+ */
+struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, int edat_level)
+{
+ struct gmap *sg, *new;
+ unsigned long limit;
+ int rc;
+
+ if (KVM_BUG_ON(parent->mm->context.allow_gmap_hpage_1m, (struct kvm *)parent->private) ||
+ KVM_BUG_ON(gmap_is_shadow(parent), (struct kvm *)parent->private))
+ return ERR_PTR(-EFAULT);
+ spin_lock(&parent->shadow_lock);
+ sg = gmap_find_shadow(parent, asce, edat_level);
+ spin_unlock(&parent->shadow_lock);
+ if (sg)
+ return sg;
+ /* Create a new shadow gmap */
+ limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
+ if (asce & _ASCE_REAL_SPACE)
+ limit = -1UL;
+ new = gmap_alloc(limit);
+ if (!new)
+ return ERR_PTR(-ENOMEM);
+ new->mm = parent->mm;
+ new->parent = gmap_get(parent);
+ new->private = parent->private;
+ new->orig_asce = asce;
+ new->edat_level = edat_level;
+ new->initialized = false;
+ spin_lock(&parent->shadow_lock);
+ /* Recheck if another CPU created the same shadow */
+ sg = gmap_find_shadow(parent, asce, edat_level);
+ if (sg) {
+ spin_unlock(&parent->shadow_lock);
+ gmap_free(new);
+ return sg;
+ }
+ if (asce & _ASCE_REAL_SPACE) {
+ /* only allow one real-space gmap shadow */
+ list_for_each_entry(sg, &parent->children, list) {
+ if (sg->orig_asce & _ASCE_REAL_SPACE) {
+ spin_lock(&sg->guest_table_lock);
+ gmap_unshadow(sg);
+ spin_unlock(&sg->guest_table_lock);
+ list_del(&sg->list);
+ gmap_put(sg);
+ break;
+ }
+ }
+ }
+ refcount_set(&new->ref_count, 2);
+ list_add(&new->list, &parent->children);
+ if (asce & _ASCE_REAL_SPACE) {
+ /* nothing to protect, return right away */
+ new->initialized = true;
+ spin_unlock(&parent->shadow_lock);
+ return new;
+ }
+ spin_unlock(&parent->shadow_lock);
+ /* protect after insertion, so it will get properly invalidated */
+ mmap_read_lock(parent->mm);
+ rc = __kvm_s390_mprotect_many(parent, asce & _ASCE_ORIGIN,
+ ((asce & _ASCE_TABLE_LENGTH) + 1),
+ PROT_READ, GMAP_NOTIFY_SHADOW);
+ mmap_read_unlock(parent->mm);
+ spin_lock(&parent->shadow_lock);
+ new->initialized = true;
+ if (rc) {
+ list_del(&new->list);
+ gmap_free(new);
+ new = ERR_PTR(rc);
+ }
+ spin_unlock(&parent->shadow_lock);
+ return new;
+}
diff --git a/arch/s390/kvm/gmap.c b/arch/s390/kvm/gmap.c
new file mode 100644
index 000000000000..02adf151d4de
--- /dev/null
+++ b/arch/s390/kvm/gmap.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Guest memory management for KVM/s390
+ *
+ * Copyright IBM Corp. 2008, 2020, 2024
+ *
+ * Author(s): Claudio Imbrenda <imbrenda@linux.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * David Hildenbrand <david@redhat.com>
+ * Janosch Frank <frankja@linux.vnet.ibm.com>
+ */
+
+#include <linux/compiler.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/pgtable.h>
+#include <linux/pagemap.h>
+
+#include <asm/lowcore.h>
+#include <asm/gmap.h>
+#include <asm/uv.h>
+
+#include "gmap.h"
+
+/**
+ * should_export_before_import - Determine whether an export is needed
+ * before an import-like operation
+ * @uvcb: the Ultravisor control block of the UVC to be performed
+ * @mm: the mm of the process
+ *
+ * Returns whether an export is needed before every import-like operation.
+ * This is needed for shared pages, which don't trigger a secure storage
+ * exception when accessed from a different guest.
+ *
+ * Although considered as one, the Unpin Page UVC is not an actual import,
+ * so it is not affected.
+ *
+ * No export is needed also when there is only one protected VM, because the
+ * page cannot belong to the wrong VM in that case (there is no "other VM"
+ * it can belong to).
+ *
+ * Return: true if an export is needed before every import, otherwise false.
+ */
+static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
+{
+ /*
+ * The misc feature indicates, among other things, that importing a
+ * shared page from a different protected VM will automatically also
+ * transfer its ownership.
+ */
+ if (uv_has_feature(BIT_UV_FEAT_MISC))
+ return false;
+ if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
+ return false;
+ return atomic_read(&mm->context.protected_count) > 1;
+}
+
+static int __gmap_make_secure(struct gmap *gmap, struct page *page, void *uvcb)
+{
+ struct folio *folio = page_folio(page);
+ int rc;
+
+ /*
+ * Secure pages cannot be huge and userspace should not combine both.
+ * In case userspace does it anyway this will result in an -EFAULT for
+ * the unpack. The guest is thus never reaching secure mode.
+ * If userspace plays dirty tricks and decides to map huge pages at a
+ * later point in time, it will receive a segmentation fault or
+ * KVM_RUN will return -EFAULT.
+ */
+ if (folio_test_hugetlb(folio))
+ return -EFAULT;
+ if (folio_test_large(folio)) {
+ mmap_read_unlock(gmap->mm);
+ rc = kvm_s390_wiggle_split_folio(gmap->mm, folio, true);
+ mmap_read_lock(gmap->mm);
+ if (rc)
+ return rc;
+ folio = page_folio(page);
+ }
+
+ if (!folio_trylock(folio))
+ return -EAGAIN;
+ if (should_export_before_import(uvcb, gmap->mm))
+ uv_convert_from_secure(folio_to_phys(folio));
+ rc = make_folio_secure(folio, uvcb);
+ folio_unlock(folio);
+
+ /*
+ * In theory a race is possible and the folio might have become
+ * large again before the folio_trylock() above. In that case, no
+ * action is performed and -EAGAIN is returned; the callers will
+ * have to try again later.
+ * In most cases this implies running the VM again, getting the same
+ * exception again, and make another attempt in this function.
+ * This is expected to happen extremely rarely.
+ */
+ if (rc == -E2BIG)
+ return -EAGAIN;
+ /* The folio has too many references, try to shake some off */
+ if (rc == -EBUSY) {
+ mmap_read_unlock(gmap->mm);
+ kvm_s390_wiggle_split_folio(gmap->mm, folio, false);
+ mmap_read_lock(gmap->mm);
+ return -EAGAIN;
+ }
+
+ return rc;
+}
+
+/**
+ * gmap_make_secure() - make one guest page secure
+ * @gmap: the guest gmap
+ * @gaddr: the guest address that needs to be made secure
+ * @uvcb: the UVCB specifying which operation needs to be performed
+ *
+ * Context: needs to be called with kvm->srcu held.
+ * Return: 0 on success, < 0 in case of error (see __gmap_make_secure()).
+ */
+int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
+{
+ struct kvm *kvm = gmap->private;
+ struct page *page;
+ int rc = 0;
+
+ lockdep_assert_held(&kvm->srcu);
+
+ page = gfn_to_page(kvm, gpa_to_gfn(gaddr));
+ mmap_read_lock(gmap->mm);
+ if (page)
+ rc = __gmap_make_secure(gmap, page, uvcb);
+ kvm_release_page_clean(page);
+ mmap_read_unlock(gmap->mm);
+
+ return rc;
+}
+
+int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
+{
+ struct uv_cb_cts uvcb = {
+ .header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
+ .header.len = sizeof(uvcb),
+ .guest_handle = gmap->guest_handle,
+ .gaddr = gaddr,
+ };
+
+ return gmap_make_secure(gmap, gaddr, &uvcb);
+}
+
+/**
+ * __gmap_destroy_page() - Destroy a guest page.
+ * @gmap: the gmap of the guest
+ * @page: the page to destroy
+ *
+ * An attempt will be made to destroy the given guest page. If the attempt
+ * fails, an attempt is made to export the page. If both attempts fail, an
+ * appropriate error is returned.
+ *
+ * Context: must be called holding the mm lock for gmap->mm
+ */
+static int __gmap_destroy_page(struct gmap *gmap, struct page *page)
+{
+ struct folio *folio = page_folio(page);
+ int rc;
+
+ /*
+ * See gmap_make_secure(): large folios cannot be secure. Small
+ * folio implies FW_LEVEL_PTE.
+ */
+ if (folio_test_large(folio))
+ return -EFAULT;
+
+ rc = uv_destroy_folio(folio);
+ /*
+ * Fault handlers can race; it is possible that two CPUs will fault
+ * on the same secure page. One CPU can destroy the page, reboot,
+ * re-enter secure mode and import it, while the second CPU was
+ * stuck at the beginning of the handler. At some point the second
+ * CPU will be able to progress, and it will not be able to destroy
+ * the page. In that case we do not want to terminate the process,
+ * we instead try to export the page.
+ */
+ if (rc)
+ rc = uv_convert_from_secure_folio(folio);
+
+ return rc;
+}
+
+/**
+ * gmap_destroy_page() - Destroy a guest page.
+ * @gmap: the gmap of the guest
+ * @gaddr: the guest address to destroy
+ *
+ * An attempt will be made to destroy the given guest page. If the attempt
+ * fails, an attempt is made to export the page. If both attempts fail, an
+ * appropriate error is returned.
+ *
+ * Context: may sleep.
+ */
+int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
+{
+ struct page *page;
+ int rc = 0;
+
+ mmap_read_lock(gmap->mm);
+ page = gfn_to_page(gmap->private, gpa_to_gfn(gaddr));
+ if (page)
+ rc = __gmap_destroy_page(gmap, page);
+ kvm_release_page_clean(page);
+ mmap_read_unlock(gmap->mm);
+ return rc;
+}
diff --git a/arch/s390/kvm/gmap.h b/arch/s390/kvm/gmap.h
new file mode 100644
index 000000000000..c8f031c9ea5f
--- /dev/null
+++ b/arch/s390/kvm/gmap.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KVM guest address space mapping code
+ *
+ * Copyright IBM Corp. 2007, 2016, 2025
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Claudio Imbrenda <imbrenda@linux.ibm.com>
+ */
+
+#ifndef ARCH_KVM_S390_GMAP_H
+#define ARCH_KVM_S390_GMAP_H
+
+#define GMAP_SHADOW_FAKE_TABLE 1ULL
+
+int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
+int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
+int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr);
+struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, int edat_level);
+
+/**
+ * gmap_shadow_valid - check if a shadow guest address space matches the
+ * given properties and is still valid
+ * @sg: pointer to the shadow guest address space structure
+ * @asce: ASCE for which the shadow table is requested
+ * @edat_level: edat level to be used for the shadow translation
+ *
+ * Returns 1 if the gmap shadow is still valid and matches the given
+ * properties, the caller can continue using it. Returns 0 otherwise, the
+ * caller has to request a new shadow gmap in this case.
+ *
+ */
+static inline int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
+{
+ if (sg->removed)
+ return 0;
+ return sg->orig_asce == asce && sg->edat_level == edat_level;
+}
+
+#endif
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 5bbaadf75dc6..610dd44a948b 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -21,6 +21,7 @@
#include "gaccess.h"
#include "trace.h"
#include "trace-s390.h"
+#include "gmap.h"
u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu)
{
@@ -367,7 +368,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
reg2, &srcaddr, GACC_FETCH, 0);
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
- rc = gmap_fault(vcpu->arch.gmap, srcaddr, 0);
+ rc = kvm_s390_handle_dat_fault(vcpu, srcaddr, 0);
if (rc != 0)
return rc;
@@ -376,7 +377,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
reg1, &dstaddr, GACC_STORE, 0);
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
- rc = gmap_fault(vcpu->arch.gmap, dstaddr, FAULT_FLAG_WRITE);
+ rc = kvm_s390_handle_dat_fault(vcpu, dstaddr, FOLL_WRITE);
if (rc != 0)
return rc;
@@ -549,7 +550,7 @@ static int handle_pv_uvc(struct kvm_vcpu *vcpu)
* If the unpin did not succeed, the guest will exit again for the UVC
* and we will retry the unpin.
*/
- if (rc == -EINVAL)
+ if (rc == -EINVAL || rc == -ENXIO)
return 0;
/*
* If we got -EAGAIN here, we simply return it. It will eventually
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index d4f031e086fc..07ff0e10cb7f 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -2893,7 +2893,8 @@ int kvm_set_routing_entry(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue)
{
- u64 uaddr;
+ u64 uaddr_s, uaddr_i;
+ int idx;
switch (ue->type) {
/* we store the userspace addresses instead of the guest addresses */
@@ -2901,14 +2902,16 @@ int kvm_set_routing_entry(struct kvm *kvm,
if (kvm_is_ucontrol(kvm))
return -EINVAL;
e->set = set_adapter_int;
- uaddr = gmap_translate(kvm->arch.gmap, ue->u.adapter.summary_addr);
- if (uaddr == -EFAULT)
- return -EFAULT;
- e->adapter.summary_addr = uaddr;
- uaddr = gmap_translate(kvm->arch.gmap, ue->u.adapter.ind_addr);
- if (uaddr == -EFAULT)
+
+ idx = srcu_read_lock(&kvm->srcu);
+ uaddr_s = gpa_to_hva(kvm, ue->u.adapter.summary_addr);
+ uaddr_i = gpa_to_hva(kvm, ue->u.adapter.ind_addr);
+ srcu_read_unlock(&kvm->srcu, idx);
+
+ if (kvm_is_error_hva(uaddr_s) || kvm_is_error_hva(uaddr_i))
return -EFAULT;
- e->adapter.ind_addr = uaddr;
+ e->adapter.summary_addr = uaddr_s;
+ e->adapter.ind_addr = uaddr_i;
e->adapter.summary_offset = ue->u.adapter.summary_offset;
e->adapter.ind_offset = ue->u.adapter.ind_offset;
e->adapter.adapter_id = ue->u.adapter.adapter_id;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index d8080c27d45b..ebecb96bacce 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -50,6 +50,7 @@
#include "kvm-s390.h"
#include "gaccess.h"
#include "pci.h"
+#include "gmap.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -3428,8 +3429,20 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
VM_EVENT(kvm, 3, "vm created with type %lu", type);
if (type & KVM_VM_S390_UCONTROL) {
+ struct kvm_userspace_memory_region2 fake_memslot = {
+ .slot = KVM_S390_UCONTROL_MEMSLOT,
+ .guest_phys_addr = 0,
+ .userspace_addr = 0,
+ .memory_size = ALIGN_DOWN(TASK_SIZE, _SEGMENT_SIZE),
+ .flags = 0,
+ };
+
kvm->arch.gmap = NULL;
kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
+ /* one flat fake memslot covering the whole address-space */
+ mutex_lock(&kvm->slots_lock);
+ KVM_BUG_ON(kvm_set_internal_memslot(kvm, &fake_memslot), kvm);
+ mutex_unlock(&kvm->slots_lock);
} else {
if (sclp.hamax == U64_MAX)
kvm->arch.mem_limit = TASK_SIZE_MAX;
@@ -4498,6 +4511,75 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu)
return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
}
+static int __kvm_s390_fixup_fault_sync(struct gmap *gmap, gpa_t gaddr, unsigned int flags)
+{
+ struct kvm *kvm = gmap->private;
+ gfn_t gfn = gpa_to_gfn(gaddr);
+ bool unlocked;
+ hva_t vmaddr;
+ gpa_t tmp;
+ int rc;
+
+ if (kvm_is_ucontrol(kvm)) {
+ tmp = __gmap_translate(gmap, gaddr);
+ gfn = gpa_to_gfn(tmp);
+ }
+
+ vmaddr = gfn_to_hva(kvm, gfn);
+ rc = fixup_user_fault(gmap->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked);
+ if (!rc)
+ rc = __gmap_link(gmap, gaddr, vmaddr);
+ return rc;
+}
+
+/**
+ * __kvm_s390_mprotect_many() - Apply specified protection to guest pages
+ * @gmap: the gmap of the guest
+ * @gpa: the starting guest address
+ * @npages: how many pages to protect
+ * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
+ * @bits: pgste notification bits to set
+ *
+ * Returns: 0 in case of success, < 0 in case of error - see gmap_protect_one()
+ *
+ * Context: kvm->srcu and gmap->mm need to be held in read mode
+ */
+int __kvm_s390_mprotect_many(struct gmap *gmap, gpa_t gpa, u8 npages, unsigned int prot,
+ unsigned long bits)
+{
+ unsigned int fault_flag = (prot & PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
+ gpa_t end = gpa + npages * PAGE_SIZE;
+ int rc;
+
+ for (; gpa < end; gpa = ALIGN(gpa + 1, rc)) {
+ rc = gmap_protect_one(gmap, gpa, prot, bits);
+ if (rc == -EAGAIN) {
+ __kvm_s390_fixup_fault_sync(gmap, gpa, fault_flag);
+ rc = gmap_protect_one(gmap, gpa, prot, bits);
+ }
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int kvm_s390_mprotect_notify_prefix(struct kvm_vcpu *vcpu)
+{
+ gpa_t gaddr = kvm_s390_get_prefix(vcpu);
+ int idx, rc;
+
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ mmap_read_lock(vcpu->arch.gmap->mm);
+
+ rc = __kvm_s390_mprotect_many(vcpu->arch.gmap, gaddr, 2, PROT_WRITE, GMAP_NOTIFY_MPROT);
+
+ mmap_read_unlock(vcpu->arch.gmap->mm);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
+ return rc;
+}
+
static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
{
retry:
@@ -4513,9 +4595,8 @@ retry:
*/
if (kvm_check_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu)) {
int rc;
- rc = gmap_mprotect_notify(vcpu->arch.gmap,
- kvm_s390_get_prefix(vcpu),
- PAGE_SIZE * 2, PROT_WRITE);
+
+ rc = kvm_s390_mprotect_notify_prefix(vcpu);
if (rc) {
kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
return rc;
@@ -4766,11 +4847,111 @@ static int vcpu_post_run_addressing_exception(struct kvm_vcpu *vcpu)
return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
}
+static void kvm_s390_assert_primary_as(struct kvm_vcpu *vcpu)
+{
+ KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
+ "Unexpected program interrupt 0x%x, TEID 0x%016lx",
+ current->thread.gmap_int_code, current->thread.gmap_teid.val);
+}
+
+/*
+ * __kvm_s390_handle_dat_fault() - handle a dat fault for the gmap of a vcpu
+ * @vcpu: the vCPU whose gmap is to be fixed up
+ * @gfn: the guest frame number used for memslots (including fake memslots)
+ * @gaddr: the gmap address, does not have to match @gfn for ucontrol gmaps
+ * @flags: FOLL_* flags
+ *
+ * Return: 0 on success, < 0 in case of error.
+ * Context: The mm lock must not be held before calling. May sleep.
+ */
+int __kvm_s390_handle_dat_fault(struct kvm_vcpu *vcpu, gfn_t gfn, gpa_t gaddr, unsigned int flags)
+{
+ struct kvm_memory_slot *slot;
+ unsigned int fault_flags;
+ bool writable, unlocked;
+ unsigned long vmaddr;
+ struct page *page;
+ kvm_pfn_t pfn;
+ int rc;
+
+ slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+ if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
+ return vcpu_post_run_addressing_exception(vcpu);
+
+ fault_flags = flags & FOLL_WRITE ? FAULT_FLAG_WRITE : 0;
+ if (vcpu->arch.gmap->pfault_enabled)
+ flags |= FOLL_NOWAIT;
+ vmaddr = __gfn_to_hva_memslot(slot, gfn);
+
+try_again:
+ pfn = __kvm_faultin_pfn(slot, gfn, flags, &writable, &page);
+
+ /* Access outside memory, inject addressing exception */
+ if (is_noslot_pfn(pfn))
+ return vcpu_post_run_addressing_exception(vcpu);
+ /* Signal pending: try again */
+ if (pfn == KVM_PFN_ERR_SIGPENDING)
+ return -EAGAIN;
+
+ /* Needs I/O, try to setup async pfault (only possible with FOLL_NOWAIT) */
+ if (pfn == KVM_PFN_ERR_NEEDS_IO) {
+ trace_kvm_s390_major_guest_pfault(vcpu);
+ if (kvm_arch_setup_async_pf(vcpu))
+ return 0;
+ vcpu->stat.pfault_sync++;
+ /* Could not setup async pfault, try again synchronously */
+ flags &= ~FOLL_NOWAIT;
+ goto try_again;
+ }
+ /* Any other error */
+ if (is_error_pfn(pfn))
+ return -EFAULT;
+
+ /* Success */
+ mmap_read_lock(vcpu->arch.gmap->mm);
+ /* Mark the userspace PTEs as young and/or dirty, to avoid page fault loops */
+ rc = fixup_user_fault(vcpu->arch.gmap->mm, vmaddr, fault_flags, &unlocked);
+ if (!rc)
+ rc = __gmap_link(vcpu->arch.gmap, gaddr, vmaddr);
+ scoped_guard(spinlock, &vcpu->kvm->mmu_lock) {
+ kvm_release_faultin_page(vcpu->kvm, page, false, writable);
+ }
+ mmap_read_unlock(vcpu->arch.gmap->mm);
+ return rc;
+}
+
+static int vcpu_dat_fault_handler(struct kvm_vcpu *vcpu, unsigned long gaddr, unsigned int flags)
+{
+ unsigned long gaddr_tmp;
+ gfn_t gfn;
+
+ gfn = gpa_to_gfn(gaddr);
+ if (kvm_is_ucontrol(vcpu->kvm)) {
+ /*
+ * This translates the per-vCPU guest address into a
+ * fake guest address, which can then be used with the
+ * fake memslots that are identity mapping userspace.
+ * This allows ucontrol VMs to use the normal fault
+ * resolution path, like normal VMs.
+ */
+ mmap_read_lock(vcpu->arch.gmap->mm);
+ gaddr_tmp = __gmap_translate(vcpu->arch.gmap, gaddr);
+ mmap_read_unlock(vcpu->arch.gmap->mm);
+ if (gaddr_tmp == -EFAULT) {
+ vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
+ vcpu->run->s390_ucontrol.trans_exc_code = gaddr;
+ vcpu->run->s390_ucontrol.pgm_code = PGM_SEGMENT_TRANSLATION;
+ return -EREMOTE;
+ }
+ gfn = gpa_to_gfn(gaddr_tmp);
+ }
+ return __kvm_s390_handle_dat_fault(vcpu, gfn, gaddr, flags);
+}
+
static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
{
unsigned int flags = 0;
unsigned long gaddr;
- int rc = 0;
gaddr = current->thread.gmap_teid.addr * PAGE_SIZE;
if (kvm_s390_cur_gmap_fault_is_write())
@@ -4781,9 +4962,7 @@ static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
vcpu->stat.exit_null++;
break;
case PGM_NON_SECURE_STORAGE_ACCESS:
- KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
- "Unexpected program interrupt 0x%x, TEID 0x%016lx",
- current->thread.gmap_int_code, current->thread.gmap_teid.val);
+ kvm_s390_assert_primary_as(vcpu);
/*
* This is normal operation; a page belonging to a protected
* guest has not been imported yet. Try to import the page into
@@ -4794,9 +4973,7 @@ static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
break;
case PGM_SECURE_STORAGE_ACCESS:
case PGM_SECURE_STORAGE_VIOLATION:
- KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
- "Unexpected program interrupt 0x%x, TEID 0x%016lx",
- current->thread.gmap_int_code, current->thread.gmap_teid.val);
+ kvm_s390_assert_primary_as(vcpu);
/*
* This can happen after a reboot with asynchronous teardown;
* the new guest (normal or protected) will run on top of the
@@ -4825,40 +5002,15 @@ static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
case PGM_REGION_FIRST_TRANS:
case PGM_REGION_SECOND_TRANS:
case PGM_REGION_THIRD_TRANS:
- KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
- "Unexpected program interrupt 0x%x, TEID 0x%016lx",
- current->thread.gmap_int_code, current->thread.gmap_teid.val);
- if (vcpu->arch.gmap->pfault_enabled) {
- rc = gmap_fault(vcpu->arch.gmap, gaddr, flags | FAULT_FLAG_RETRY_NOWAIT);
- if (rc == -EFAULT)
- return vcpu_post_run_addressing_exception(vcpu);
- if (rc == -EAGAIN) {
- trace_kvm_s390_major_guest_pfault(vcpu);
- if (kvm_arch_setup_async_pf(vcpu))
- return 0;
- vcpu->stat.pfault_sync++;
- } else {
- return rc;
- }
- }
- rc = gmap_fault(vcpu->arch.gmap, gaddr, flags);
- if (rc == -EFAULT) {
- if (kvm_is_ucontrol(vcpu->kvm)) {
- vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
- vcpu->run->s390_ucontrol.trans_exc_code = gaddr;
- vcpu->run->s390_ucontrol.pgm_code = 0x10;
- return -EREMOTE;
- }
- return vcpu_post_run_addressing_exception(vcpu);
- }
- break;
+ kvm_s390_assert_primary_as(vcpu);
+ return vcpu_dat_fault_handler(vcpu, gaddr, flags);
default:
KVM_BUG(1, vcpu->kvm, "Unexpected program interrupt 0x%x, TEID 0x%016lx",
current->thread.gmap_int_code, current->thread.gmap_teid.val);
send_sig(SIGSEGV, current, 0);
break;
}
- return rc;
+ return 0;
}
static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
@@ -5737,7 +5889,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
}
#endif
case KVM_S390_VCPU_FAULT: {
- r = gmap_fault(vcpu->arch.gmap, arg, 0);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ r = vcpu_dat_fault_handler(vcpu, arg, 0);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
break;
}
case KVM_ENABLE_CAP:
@@ -5853,7 +6007,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
{
gpa_t size;
- if (kvm_is_ucontrol(kvm))
+ if (kvm_is_ucontrol(kvm) && new->id < KVM_USER_MEM_SLOTS)
return -EINVAL;
/* When we are protected, we should not change the memory slots */
@@ -5905,6 +6059,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
{
int rc = 0;
+ if (kvm_is_ucontrol(kvm))
+ return;
+
switch (change) {
case KVM_MR_DELETE:
rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 597d7a71deeb..8d3bbb2dd8d2 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -20,6 +20,8 @@
#include <asm/processor.h>
#include <asm/sclp.h>
+#define KVM_S390_UCONTROL_MEMSLOT (KVM_USER_MEM_SLOTS + 0)
+
static inline void kvm_s390_fpu_store(struct kvm_run *run)
{
fpu_stfpc(&run->s.regs.fpc);
@@ -279,6 +281,15 @@ static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm)
return gd;
}
+static inline hva_t gpa_to_hva(struct kvm *kvm, gpa_t gpa)
+{
+ hva_t hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
+
+ if (!kvm_is_error_hva(hva))
+ hva |= offset_in_page(gpa);
+ return hva;
+}
+
/* implemented in pv.c */
int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
@@ -408,6 +419,14 @@ void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm);
__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu);
int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc);
+int __kvm_s390_handle_dat_fault(struct kvm_vcpu *vcpu, gfn_t gfn, gpa_t gaddr, unsigned int flags);
+int __kvm_s390_mprotect_many(struct gmap *gmap, gpa_t gpa, u8 npages, unsigned int prot,
+ unsigned long bits);
+
+static inline int kvm_s390_handle_dat_fault(struct kvm_vcpu *vcpu, gpa_t gaddr, unsigned int flags)
+{
+ return __kvm_s390_handle_dat_fault(vcpu, gpa_to_gfn(gaddr), gaddr, flags);
+}
/* implemented in diag.c */
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c
index 75e81ba26d04..22c012aa5206 100644
--- a/arch/s390/kvm/pv.c
+++ b/arch/s390/kvm/pv.c
@@ -17,6 +17,7 @@
#include <linux/sched/mm.h>
#include <linux/mmu_notifier.h>
#include "kvm-s390.h"
+#include "gmap.h"
bool kvm_s390_pv_is_protected(struct kvm *kvm)
{
@@ -638,10 +639,28 @@ static int unpack_one(struct kvm *kvm, unsigned long addr, u64 tweak,
.tweak[1] = offset,
};
int ret = gmap_make_secure(kvm->arch.gmap, addr, &uvcb);
+ unsigned long vmaddr;
+ bool unlocked;
*rc = uvcb.header.rc;
*rrc = uvcb.header.rrc;
+ if (ret == -ENXIO) {
+ mmap_read_lock(kvm->mm);
+ vmaddr = gfn_to_hva(kvm, gpa_to_gfn(addr));
+ if (kvm_is_error_hva(vmaddr)) {
+ ret = -EFAULT;
+ } else {
+ ret = fixup_user_fault(kvm->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked);
+ if (!ret)
+ ret = __gmap_link(kvm->arch.gmap, addr, vmaddr);
+ }
+ mmap_read_unlock(kvm->mm);
+ if (!ret)
+ return -EAGAIN;
+ return ret;
+ }
+
if (ret && ret != -EAGAIN)
KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: failed addr %llx with rc %x rrc %x",
uvcb.gaddr, *rc, *rrc);
@@ -660,6 +679,8 @@ int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: start addr %lx size %lx",
addr, size);
+ guard(srcu)(&kvm->srcu);
+
while (offset < size) {
ret = unpack_one(kvm, addr, tweak, offset, rc, rrc);
if (ret == -EAGAIN) {
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index a687695d8f68..a78df3a4f353 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -13,6 +13,7 @@
#include <linux/bitmap.h>
#include <linux/sched/signal.h>
#include <linux/io.h>
+#include <linux/mman.h>
#include <asm/gmap.h>
#include <asm/mmu_context.h>
@@ -22,6 +23,11 @@
#include <asm/facility.h>
#include "kvm-s390.h"
#include "gaccess.h"
+#include "gmap.h"
+
+enum vsie_page_flags {
+ VSIE_PAGE_IN_USE = 0,
+};
struct vsie_page {
struct kvm_s390_sie_block scb_s; /* 0x0000 */
@@ -46,7 +52,18 @@ struct vsie_page {
gpa_t gvrd_gpa; /* 0x0240 */
gpa_t riccbd_gpa; /* 0x0248 */
gpa_t sdnx_gpa; /* 0x0250 */
- __u8 reserved[0x0700 - 0x0258]; /* 0x0258 */
+ /*
+ * guest address of the original SCB. Remains set for free vsie
+ * pages, so we can properly look them up in our addr_to_page
+ * radix tree.
+ */
+ gpa_t scb_gpa; /* 0x0258 */
+ /*
+ * Flags: must be set/cleared atomically after the vsie page can be
+ * looked up by other CPUs.
+ */
+ unsigned long flags; /* 0x0260 */
+ __u8 reserved[0x0700 - 0x0268]; /* 0x0268 */
struct kvm_s390_crypto_cb crycb; /* 0x0700 */
__u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE]; /* 0x0800 */
};
@@ -584,7 +601,6 @@ void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
struct kvm *kvm = gmap->private;
struct vsie_page *cur;
unsigned long prefix;
- struct page *page;
int i;
if (!gmap_is_shadow(gmap))
@@ -594,10 +610,9 @@ void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
* therefore we can safely reference them all the time.
*/
for (i = 0; i < kvm->arch.vsie.page_count; i++) {
- page = READ_ONCE(kvm->arch.vsie.pages[i]);
- if (!page)
+ cur = READ_ONCE(kvm->arch.vsie.pages[i]);
+ if (!cur)
continue;
- cur = page_to_virt(page);
if (READ_ONCE(cur->gmap) != gmap)
continue;
prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT;
@@ -1345,6 +1360,20 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
return rc;
}
+/* Try getting a given vsie page, returning "true" on success. */
+static inline bool try_get_vsie_page(struct vsie_page *vsie_page)
+{
+ if (test_bit(VSIE_PAGE_IN_USE, &vsie_page->flags))
+ return false;
+ return !test_and_set_bit(VSIE_PAGE_IN_USE, &vsie_page->flags);
+}
+
+/* Put a vsie page acquired through get_vsie_page / try_get_vsie_page. */
+static void put_vsie_page(struct vsie_page *vsie_page)
+{
+ clear_bit(VSIE_PAGE_IN_USE, &vsie_page->flags);
+}
+
/*
* Get or create a vsie page for a scb address.
*
@@ -1355,16 +1384,21 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
{
struct vsie_page *vsie_page;
- struct page *page;
int nr_vcpus;
rcu_read_lock();
- page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
+ vsie_page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
rcu_read_unlock();
- if (page) {
- if (page_ref_inc_return(page) == 2)
- return page_to_virt(page);
- page_ref_dec(page);
+ if (vsie_page) {
+ if (try_get_vsie_page(vsie_page)) {
+ if (vsie_page->scb_gpa == addr)
+ return vsie_page;
+ /*
+ * We raced with someone reusing + putting this vsie
+ * page before we grabbed it.
+ */
+ put_vsie_page(vsie_page);
+ }
}
/*
@@ -1375,36 +1409,40 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
mutex_lock(&kvm->arch.vsie.mutex);
if (kvm->arch.vsie.page_count < nr_vcpus) {
- page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO | GFP_DMA);
- if (!page) {
+ vsie_page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO | GFP_DMA);
+ if (!vsie_page) {
mutex_unlock(&kvm->arch.vsie.mutex);
return ERR_PTR(-ENOMEM);
}
- page_ref_inc(page);
- kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page;
+ __set_bit(VSIE_PAGE_IN_USE, &vsie_page->flags);
+ kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = vsie_page;
kvm->arch.vsie.page_count++;
} else {
/* reuse an existing entry that belongs to nobody */
while (true) {
- page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
- if (page_ref_inc_return(page) == 2)
+ vsie_page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
+ if (try_get_vsie_page(vsie_page))
break;
- page_ref_dec(page);
kvm->arch.vsie.next++;
kvm->arch.vsie.next %= nr_vcpus;
}
- radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
+ if (vsie_page->scb_gpa != ULONG_MAX)
+ radix_tree_delete(&kvm->arch.vsie.addr_to_page,
+ vsie_page->scb_gpa >> 9);
}
- page->index = addr;
- /* double use of the same address */
- if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
- page_ref_dec(page);
+ /* Mark it as invalid until it resides in the tree. */
+ vsie_page->scb_gpa = ULONG_MAX;
+
+ /* Double use of the same address or allocation failure. */
+ if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9,
+ vsie_page)) {
+ put_vsie_page(vsie_page);
mutex_unlock(&kvm->arch.vsie.mutex);
return NULL;
}
+ vsie_page->scb_gpa = addr;
mutex_unlock(&kvm->arch.vsie.mutex);
- vsie_page = page_to_virt(page);
memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block));
release_gmap_shadow(vsie_page);
vsie_page->fault_addr = 0;
@@ -1412,14 +1450,6 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
return vsie_page;
}
-/* put a vsie page acquired via get_vsie_page */
-static void put_vsie_page(struct kvm *kvm, struct vsie_page *vsie_page)
-{
- struct page *page = pfn_to_page(__pa(vsie_page) >> PAGE_SHIFT);
-
- page_ref_dec(page);
-}
-
int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
{
struct vsie_page *vsie_page;
@@ -1470,7 +1500,7 @@ out_unshadow:
out_unpin_scb:
unpin_scb(vcpu, vsie_page, scb_addr);
out_put:
- put_vsie_page(vcpu->kvm, vsie_page);
+ put_vsie_page(vsie_page);
return rc < 0 ? rc : 0;
}
@@ -1486,18 +1516,18 @@ void kvm_s390_vsie_init(struct kvm *kvm)
void kvm_s390_vsie_destroy(struct kvm *kvm)
{
struct vsie_page *vsie_page;
- struct page *page;
int i;
mutex_lock(&kvm->arch.vsie.mutex);
for (i = 0; i < kvm->arch.vsie.page_count; i++) {
- page = kvm->arch.vsie.pages[i];
+ vsie_page = kvm->arch.vsie.pages[i];
kvm->arch.vsie.pages[i] = NULL;
- vsie_page = page_to_virt(page);
release_gmap_shadow(vsie_page);
/* free the radix tree entry */
- radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
- __free_page(page);
+ if (vsie_page->scb_gpa != ULONG_MAX)
+ radix_tree_delete(&kvm->arch.vsie.addr_to_page,
+ vsie_page->scb_gpa >> 9);
+ free_page((unsigned long)vsie_page);
}
kvm->arch.vsie.page_count = 0;
mutex_unlock(&kvm->arch.vsie.mutex);
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index f43f897d3fc0..14bbfe50033c 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -24,3 +24,6 @@ obj-$(CONFIG_S390_MODULES_SANITY_TEST_HELPERS) += test_modules_helpers.o
lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
obj-$(CONFIG_EXPOLINE_EXTERN) += expoline.o
+
+obj-$(CONFIG_CRC32_ARCH) += crc32-s390.o
+crc32-s390-y := crc32-glue.o crc32le-vx.o crc32be-vx.o
diff --git a/arch/s390/lib/crc32-glue.c b/arch/s390/lib/crc32-glue.c
new file mode 100644
index 000000000000..137080e61f90
--- /dev/null
+++ b/arch/s390/lib/crc32-glue.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CRC-32 implemented with the z/Architecture Vector Extension Facility.
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ */
+#define KMSG_COMPONENT "crc32-vx"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/cpufeature.h>
+#include <linux/crc32.h>
+#include <asm/fpu.h>
+#include "crc32-vx.h"
+
+#define VX_MIN_LEN 64
+#define VX_ALIGNMENT 16L
+#define VX_ALIGN_MASK (VX_ALIGNMENT - 1)
+
+static DEFINE_STATIC_KEY_FALSE(have_vxrs);
+
+/*
+ * DEFINE_CRC32_VX() - Define a CRC-32 function using the vector extension
+ *
+ * Creates a function to perform a particular CRC-32 computation. Depending
+ * on the message buffer, the hardware-accelerated or software implementation
+ * is used. Note that the message buffer is aligned to improve fetch
+ * operations of VECTOR LOAD MULTIPLE instructions.
+ */
+#define DEFINE_CRC32_VX(___fname, ___crc32_vx, ___crc32_sw) \
+ u32 ___fname(u32 crc, const u8 *data, size_t datalen) \
+ { \
+ unsigned long prealign, aligned, remaining; \
+ DECLARE_KERNEL_FPU_ONSTACK16(vxstate); \
+ \
+ if (datalen < VX_MIN_LEN + VX_ALIGN_MASK || \
+ !static_branch_likely(&have_vxrs)) \
+ return ___crc32_sw(crc, data, datalen); \
+ \
+ if ((unsigned long)data & VX_ALIGN_MASK) { \
+ prealign = VX_ALIGNMENT - \
+ ((unsigned long)data & VX_ALIGN_MASK); \
+ datalen -= prealign; \
+ crc = ___crc32_sw(crc, data, prealign); \
+ data = (void *)((unsigned long)data + prealign); \
+ } \
+ \
+ aligned = datalen & ~VX_ALIGN_MASK; \
+ remaining = datalen & VX_ALIGN_MASK; \
+ \
+ kernel_fpu_begin(&vxstate, KERNEL_VXR_LOW); \
+ crc = ___crc32_vx(crc, data, aligned); \
+ kernel_fpu_end(&vxstate, KERNEL_VXR_LOW); \
+ \
+ if (remaining) \
+ crc = ___crc32_sw(crc, data + aligned, remaining); \
+ \
+ return crc; \
+ } \
+ EXPORT_SYMBOL(___fname);
+
+DEFINE_CRC32_VX(crc32_le_arch, crc32_le_vgfm_16, crc32_le_base)
+DEFINE_CRC32_VX(crc32_be_arch, crc32_be_vgfm_16, crc32_be_base)
+DEFINE_CRC32_VX(crc32c_le_arch, crc32c_le_vgfm_16, crc32c_le_base)
+
+static int __init crc32_s390_init(void)
+{
+ if (cpu_have_feature(S390_CPU_FEATURE_VXRS))
+ static_branch_enable(&have_vxrs);
+ return 0;
+}
+arch_initcall(crc32_s390_init);
+
+static void __exit crc32_s390_exit(void)
+{
+}
+module_exit(crc32_s390_exit);
+
+u32 crc32_optimizations(void)
+{
+ if (static_key_enabled(&have_vxrs))
+ return CRC32_LE_OPTIMIZATION |
+ CRC32_BE_OPTIMIZATION |
+ CRC32C_OPTIMIZATION;
+ return 0;
+}
+EXPORT_SYMBOL(crc32_optimizations);
+
+MODULE_AUTHOR("Hendrik Brueckner <brueckner@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("CRC-32 algorithms using z/Architecture Vector Extension Facility");
+MODULE_LICENSE("GPL");
diff --git a/arch/s390/crypto/crc32-vx.h b/arch/s390/lib/crc32-vx.h
index 652c96e1a822..652c96e1a822 100644
--- a/arch/s390/crypto/crc32-vx.h
+++ b/arch/s390/lib/crc32-vx.h
diff --git a/arch/s390/crypto/crc32be-vx.c b/arch/s390/lib/crc32be-vx.c
index fed7c9c70d05..fed7c9c70d05 100644
--- a/arch/s390/crypto/crc32be-vx.c
+++ b/arch/s390/lib/crc32be-vx.c
diff --git a/arch/s390/crypto/crc32le-vx.c b/arch/s390/lib/crc32le-vx.c
index 2f629f394df7..2f629f394df7 100644
--- a/arch/s390/crypto/crc32le-vx.c
+++ b/arch/s390/lib/crc32le-vx.c
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index c7c269d5c491..f977b7c37efc 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -31,51 +31,6 @@ void debug_user_asce(int exit)
}
#endif /*CONFIG_DEBUG_ENTRY */
-static unsigned long raw_copy_from_user_key(void *to, const void __user *from,
- unsigned long size, unsigned long key)
-{
- unsigned long rem;
- union oac spec = {
- .oac2.key = key,
- .oac2.as = PSW_BITS_AS_SECONDARY,
- .oac2.k = 1,
- .oac2.a = 1,
- };
-
- asm volatile(
- " lr 0,%[spec]\n"
- "0: mvcos 0(%[to]),0(%[from]),%[size]\n"
- "1: jz 5f\n"
- " algr %[size],%[val]\n"
- " slgr %[from],%[val]\n"
- " slgr %[to],%[val]\n"
- " j 0b\n"
- "2: la %[rem],4095(%[from])\n" /* rem = from + 4095 */
- " nr %[rem],%[val]\n" /* rem = (from + 4095) & -4096 */
- " slgr %[rem],%[from]\n"
- " clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
- " jnh 6f\n"
- "3: mvcos 0(%[to]),0(%[from]),%[rem]\n"
- "4: slgr %[size],%[rem]\n"
- " j 6f\n"
- "5: slgr %[size],%[size]\n"
- "6:\n"
- EX_TABLE(0b, 2b)
- EX_TABLE(1b, 2b)
- EX_TABLE(3b, 6b)
- EX_TABLE(4b, 6b)
- : [size] "+&a" (size), [from] "+&a" (from), [to] "+&a" (to), [rem] "=&a" (rem)
- : [val] "a" (-4096UL), [spec] "d" (spec.val)
- : "cc", "memory", "0");
- return size;
-}
-
-unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- return raw_copy_from_user_key(to, from, n, 0);
-}
-EXPORT_SYMBOL(raw_copy_from_user);
-
unsigned long _copy_from_user_key(void *to, const void __user *from,
unsigned long n, unsigned long key)
{
@@ -93,51 +48,6 @@ unsigned long _copy_from_user_key(void *to, const void __user *from,
}
EXPORT_SYMBOL(_copy_from_user_key);
-static unsigned long raw_copy_to_user_key(void __user *to, const void *from,
- unsigned long size, unsigned long key)
-{
- unsigned long rem;
- union oac spec = {
- .oac1.key = key,
- .oac1.as = PSW_BITS_AS_SECONDARY,
- .oac1.k = 1,
- .oac1.a = 1,
- };
-
- asm volatile(
- " lr 0,%[spec]\n"
- "0: mvcos 0(%[to]),0(%[from]),%[size]\n"
- "1: jz 5f\n"
- " algr %[size],%[val]\n"
- " slgr %[to],%[val]\n"
- " slgr %[from],%[val]\n"
- " j 0b\n"
- "2: la %[rem],4095(%[to])\n" /* rem = to + 4095 */
- " nr %[rem],%[val]\n" /* rem = (to + 4095) & -4096 */
- " slgr %[rem],%[to]\n"
- " clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
- " jnh 6f\n"
- "3: mvcos 0(%[to]),0(%[from]),%[rem]\n"
- "4: slgr %[size],%[rem]\n"
- " j 6f\n"
- "5: slgr %[size],%[size]\n"
- "6:\n"
- EX_TABLE(0b, 2b)
- EX_TABLE(1b, 2b)
- EX_TABLE(3b, 6b)
- EX_TABLE(4b, 6b)
- : [size] "+&a" (size), [to] "+&a" (to), [from] "+&a" (from), [rem] "=&a" (rem)
- : [val] "a" (-4096UL), [spec] "d" (spec.val)
- : "cc", "memory", "0");
- return size;
-}
-
-unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- return raw_copy_to_user_key(to, from, n, 0);
-}
-EXPORT_SYMBOL(raw_copy_to_user);
-
unsigned long _copy_to_user_key(void __user *to, const void *from,
unsigned long n, unsigned long key)
{
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index d01724a715d0..39f44b6256e0 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -204,7 +204,7 @@ static void cmm_set_timer(void)
del_timer(&cmm_timer);
return;
}
- mod_timer(&cmm_timer, jiffies + msecs_to_jiffies(cmm_timeout_seconds * MSEC_PER_SEC));
+ mod_timer(&cmm_timer, jiffies + secs_to_jiffies(cmm_timeout_seconds));
}
static void cmm_timer_fn(struct timer_list *unused)
@@ -332,7 +332,7 @@ static int cmm_timeout_handler(const struct ctl_table *ctl, int write,
return 0;
}
-static struct ctl_table cmm_table[] = {
+static const struct ctl_table cmm_table[] = {
{
.procname = "cmm_pages",
.mode = 0644,
diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c
index 0a0738a473af..a046be1715cf 100644
--- a/arch/s390/mm/extable.c
+++ b/arch/s390/mm/extable.c
@@ -7,6 +7,7 @@
#include <linux/panic.h>
#include <asm/asm-extable.h>
#include <asm/extable.h>
+#include <asm/fpu.h>
const struct exception_table_entry *s390_search_extables(unsigned long addr)
{
@@ -26,7 +27,7 @@ static bool ex_handler_fixup(const struct exception_table_entry *ex, struct pt_r
return true;
}
-static bool ex_handler_ua_store(const struct exception_table_entry *ex, struct pt_regs *regs)
+static bool ex_handler_ua_fault(const struct exception_table_entry *ex, struct pt_regs *regs)
{
unsigned int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
@@ -35,18 +36,6 @@ static bool ex_handler_ua_store(const struct exception_table_entry *ex, struct p
return true;
}
-static bool ex_handler_ua_load_mem(const struct exception_table_entry *ex, struct pt_regs *regs)
-{
- unsigned int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->data);
- unsigned int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
- size_t len = FIELD_GET(EX_DATA_LEN, ex->data);
-
- regs->gprs[reg_err] = -EFAULT;
- memset((void *)regs->gprs[reg_addr], 0, len);
- regs->psw.addr = extable_fixup(ex);
- return true;
-}
-
static bool ex_handler_ua_load_reg(const struct exception_table_entry *ex,
bool pair, struct pt_regs *regs)
{
@@ -77,6 +66,13 @@ static bool ex_handler_zeropad(const struct exception_table_entry *ex, struct pt
return true;
}
+static bool ex_handler_fpc(const struct exception_table_entry *ex, struct pt_regs *regs)
+{
+ fpu_sfpc(0);
+ regs->psw.addr = extable_fixup(ex);
+ return true;
+}
+
bool fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *ex;
@@ -89,16 +85,16 @@ bool fixup_exception(struct pt_regs *regs)
return ex_handler_fixup(ex, regs);
case EX_TYPE_BPF:
return ex_handler_bpf(ex, regs);
- case EX_TYPE_UA_STORE:
- return ex_handler_ua_store(ex, regs);
- case EX_TYPE_UA_LOAD_MEM:
- return ex_handler_ua_load_mem(ex, regs);
+ case EX_TYPE_UA_FAULT:
+ return ex_handler_ua_fault(ex, regs);
case EX_TYPE_UA_LOAD_REG:
return ex_handler_ua_load_reg(ex, false, regs);
case EX_TYPE_UA_LOAD_REGPAIR:
return ex_handler_ua_load_reg(ex, true, regs);
case EX_TYPE_ZEROPAD:
return ex_handler_zeropad(ex, regs);
+ case EX_TYPE_FPC:
+ return ex_handler_fpc(ex, regs);
}
panic("invalid exception table entry");
}
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 16b8a36c56de..94d927785800 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -24,6 +24,16 @@
#include <asm/page.h>
#include <asm/tlb.h>
+/*
+ * The address is saved in a radix tree directly; NULL would be ambiguous,
+ * since 0 is a valid address, and NULL is returned when nothing was found.
+ * The lower bits are ignored by all users of the macro, so it can be used
+ * to distinguish a valid address 0 from a NULL.
+ */
+#define VALID_GADDR_FLAG 1
+#define IS_GADDR_VALID(gaddr) ((gaddr) & VALID_GADDR_FLAG)
+#define MAKE_VALID_GADDR(gaddr) (((gaddr) & HPAGE_MASK) | VALID_GADDR_FLAG)
+
#define GMAP_SHADOW_FAKE_TABLE 1ULL
static struct page *gmap_alloc_crst(void)
@@ -43,7 +53,7 @@ static struct page *gmap_alloc_crst(void)
*
* Returns a guest address space structure.
*/
-static struct gmap *gmap_alloc(unsigned long limit)
+struct gmap *gmap_alloc(unsigned long limit)
{
struct gmap *gmap;
struct page *page;
@@ -70,9 +80,7 @@ static struct gmap *gmap_alloc(unsigned long limit)
gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL_ACCOUNT);
if (!gmap)
goto out;
- INIT_LIST_HEAD(&gmap->crst_list);
INIT_LIST_HEAD(&gmap->children);
- INIT_LIST_HEAD(&gmap->pt_list);
INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL_ACCOUNT);
INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC | __GFP_ACCOUNT);
INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC | __GFP_ACCOUNT);
@@ -82,8 +90,6 @@ static struct gmap *gmap_alloc(unsigned long limit)
page = gmap_alloc_crst();
if (!page)
goto out_free;
- page->index = 0;
- list_add(&page->lru, &gmap->crst_list);
table = page_to_virt(page);
crst_table_init(table, etype);
gmap->table = table;
@@ -97,6 +103,7 @@ out_free:
out:
return NULL;
}
+EXPORT_SYMBOL_GPL(gmap_alloc);
/**
* gmap_create - create a guest address space
@@ -185,32 +192,46 @@ static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
} while (nr > 0);
}
+static void gmap_free_crst(unsigned long *table, bool free_ptes)
+{
+ bool is_segment = (table[0] & _SEGMENT_ENTRY_TYPE_MASK) == 0;
+ int i;
+
+ if (is_segment) {
+ if (!free_ptes)
+ goto out;
+ for (i = 0; i < _CRST_ENTRIES; i++)
+ if (!(table[i] & _SEGMENT_ENTRY_INVALID))
+ page_table_free_pgste(page_ptdesc(phys_to_page(table[i])));
+ } else {
+ for (i = 0; i < _CRST_ENTRIES; i++)
+ if (!(table[i] & _REGION_ENTRY_INVALID))
+ gmap_free_crst(__va(table[i] & PAGE_MASK), free_ptes);
+ }
+
+out:
+ free_pages((unsigned long)table, CRST_ALLOC_ORDER);
+}
+
/**
* gmap_free - free a guest address space
* @gmap: pointer to the guest address space structure
*
* No locks required. There are no references to this gmap anymore.
*/
-static void gmap_free(struct gmap *gmap)
+void gmap_free(struct gmap *gmap)
{
- struct page *page, *next;
-
/* Flush tlb of all gmaps (if not already done for shadows) */
if (!(gmap_is_shadow(gmap) && gmap->removed))
gmap_flush_tlb(gmap);
/* Free all segment & region tables. */
- list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
- __free_pages(page, CRST_ALLOC_ORDER);
+ gmap_free_crst(gmap->table, gmap_is_shadow(gmap));
+
gmap_radix_tree_free(&gmap->guest_to_host);
gmap_radix_tree_free(&gmap->host_to_guest);
/* Free additional data for a shadow gmap */
if (gmap_is_shadow(gmap)) {
- struct ptdesc *ptdesc, *n;
-
- /* Free all page tables. */
- list_for_each_entry_safe(ptdesc, n, &gmap->pt_list, pt_list)
- page_table_free_pgste(ptdesc);
gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
/* Release reference to the parent */
gmap_put(gmap->parent);
@@ -218,6 +239,7 @@ static void gmap_free(struct gmap *gmap)
kfree(gmap);
}
+EXPORT_SYMBOL_GPL(gmap_free);
/**
* gmap_get - increase reference counter for guest address space
@@ -298,10 +320,8 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
crst_table_init(new, init);
spin_lock(&gmap->guest_table_lock);
if (*table & _REGION_ENTRY_INVALID) {
- list_add(&page->lru, &gmap->crst_list);
*table = __pa(new) | _REGION_ENTRY_LENGTH |
(*table & _REGION_ENTRY_TYPE_MASK);
- page->index = gaddr;
page = NULL;
}
spin_unlock(&gmap->guest_table_lock);
@@ -310,21 +330,23 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
return 0;
}
-/**
- * __gmap_segment_gaddr - find virtual address from segment pointer
- * @entry: pointer to a segment table entry in the guest address space
- *
- * Returns the virtual address in the guest address space for the segment
- */
-static unsigned long __gmap_segment_gaddr(unsigned long *entry)
+static unsigned long host_to_guest_lookup(struct gmap *gmap, unsigned long vmaddr)
{
- struct page *page;
- unsigned long offset;
+ return (unsigned long)radix_tree_lookup(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
+}
- offset = (unsigned long) entry / sizeof(unsigned long);
- offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
- page = pmd_pgtable_page((pmd_t *) entry);
- return page->index + offset;
+static unsigned long host_to_guest_delete(struct gmap *gmap, unsigned long vmaddr)
+{
+ return (unsigned long)radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
+}
+
+static pmd_t *host_to_guest_pmd_delete(struct gmap *gmap, unsigned long vmaddr,
+ unsigned long *gaddr)
+{
+ *gaddr = host_to_guest_delete(gmap, vmaddr);
+ if (IS_GADDR_VALID(*gaddr))
+ return (pmd_t *)gmap_table_walk(gmap, *gaddr, 1);
+ return NULL;
}
/**
@@ -336,16 +358,19 @@ static unsigned long __gmap_segment_gaddr(unsigned long *entry)
*/
static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
{
- unsigned long *entry;
+ unsigned long gaddr;
int flush = 0;
+ pmd_t *pmdp;
BUG_ON(gmap_is_shadow(gmap));
spin_lock(&gmap->guest_table_lock);
- entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
- if (entry) {
- flush = (*entry != _SEGMENT_ENTRY_EMPTY);
- *entry = _SEGMENT_ENTRY_EMPTY;
+
+ pmdp = host_to_guest_pmd_delete(gmap, vmaddr, &gaddr);
+ if (pmdp) {
+ flush = (pmd_val(*pmdp) != _SEGMENT_ENTRY_EMPTY);
+ *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
}
+
spin_unlock(&gmap->guest_table_lock);
return flush;
}
@@ -464,26 +489,6 @@ unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
EXPORT_SYMBOL_GPL(__gmap_translate);
/**
- * gmap_translate - translate a guest address to a user space address
- * @gmap: pointer to guest mapping meta data structure
- * @gaddr: guest address
- *
- * Returns user space address which corresponds to the guest address or
- * -EFAULT if no such mapping exists.
- * This function does not establish potentially missing page table entries.
- */
-unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
-{
- unsigned long rc;
-
- mmap_read_lock(gmap->mm);
- rc = __gmap_translate(gmap, gaddr);
- mmap_read_unlock(gmap->mm);
- return rc;
-}
-EXPORT_SYMBOL_GPL(gmap_translate);
-
-/**
* gmap_unlink - disconnect a page table from the gmap shadow tables
* @mm: pointer to the parent mm_struct
* @table: pointer to the host page table
@@ -582,7 +587,8 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
spin_lock(&gmap->guest_table_lock);
if (*table == _SEGMENT_ENTRY_EMPTY) {
rc = radix_tree_insert(&gmap->host_to_guest,
- vmaddr >> PMD_SHIFT, table);
+ vmaddr >> PMD_SHIFT,
+ (void *)MAKE_VALID_GADDR(gaddr));
if (!rc) {
if (pmd_leaf(*pmd)) {
*table = (pmd_val(*pmd) &
@@ -605,130 +611,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
radix_tree_preload_end();
return rc;
}
-
-/**
- * fixup_user_fault_nowait - manually resolve a user page fault without waiting
- * @mm: mm_struct of target mm
- * @address: user address
- * @fault_flags:flags to pass down to handle_mm_fault()
- * @unlocked: did we unlock the mmap_lock while retrying
- *
- * This function behaves similarly to fixup_user_fault(), but it guarantees
- * that the fault will be resolved without waiting. The function might drop
- * and re-acquire the mm lock, in which case @unlocked will be set to true.
- *
- * The guarantee is that the fault is handled without waiting, but the
- * function itself might sleep, due to the lock.
- *
- * Context: Needs to be called with mm->mmap_lock held in read mode, and will
- * return with the lock held in read mode; @unlocked will indicate whether
- * the lock has been dropped and re-acquired. This is the same behaviour as
- * fixup_user_fault().
- *
- * Return: 0 on success, -EAGAIN if the fault cannot be resolved without
- * waiting, -EFAULT if the fault cannot be resolved, -ENOMEM if out of
- * memory.
- */
-static int fixup_user_fault_nowait(struct mm_struct *mm, unsigned long address,
- unsigned int fault_flags, bool *unlocked)
-{
- struct vm_area_struct *vma;
- unsigned int test_flags;
- vm_fault_t fault;
- int rc;
-
- fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
- test_flags = fault_flags & FAULT_FLAG_WRITE ? VM_WRITE : VM_READ;
-
- vma = find_vma(mm, address);
- if (unlikely(!vma || address < vma->vm_start))
- return -EFAULT;
- if (unlikely(!(vma->vm_flags & test_flags)))
- return -EFAULT;
-
- fault = handle_mm_fault(vma, address, fault_flags, NULL);
- /* the mm lock has been dropped, take it again */
- if (fault & VM_FAULT_COMPLETED) {
- *unlocked = true;
- mmap_read_lock(mm);
- return 0;
- }
- /* the mm lock has not been dropped */
- if (fault & VM_FAULT_ERROR) {
- rc = vm_fault_to_errno(fault, 0);
- BUG_ON(!rc);
- return rc;
- }
- /* the mm lock has not been dropped because of FAULT_FLAG_RETRY_NOWAIT */
- if (fault & VM_FAULT_RETRY)
- return -EAGAIN;
- /* nothing needed to be done and the mm lock has not been dropped */
- return 0;
-}
-
-/**
- * __gmap_fault - resolve a fault on a guest address
- * @gmap: pointer to guest mapping meta data structure
- * @gaddr: guest address
- * @fault_flags: flags to pass down to handle_mm_fault()
- *
- * Context: Needs to be called with mm->mmap_lock held in read mode. Might
- * drop and re-acquire the lock. Will always return with the lock held.
- */
-static int __gmap_fault(struct gmap *gmap, unsigned long gaddr, unsigned int fault_flags)
-{
- unsigned long vmaddr;
- bool unlocked;
- int rc = 0;
-
-retry:
- unlocked = false;
-
- vmaddr = __gmap_translate(gmap, gaddr);
- if (IS_ERR_VALUE(vmaddr))
- return vmaddr;
-
- if (fault_flags & FAULT_FLAG_RETRY_NOWAIT)
- rc = fixup_user_fault_nowait(gmap->mm, vmaddr, fault_flags, &unlocked);
- else
- rc = fixup_user_fault(gmap->mm, vmaddr, fault_flags, &unlocked);
- if (rc)
- return rc;
- /*
- * In the case that fixup_user_fault unlocked the mmap_lock during
- * fault-in, redo __gmap_translate() to avoid racing with a
- * map/unmap_segment.
- * In particular, __gmap_translate(), fixup_user_fault{,_nowait}(),
- * and __gmap_link() must all be called atomically in one go; if the
- * lock had been dropped in between, a retry is needed.
- */
- if (unlocked)
- goto retry;
-
- return __gmap_link(gmap, gaddr, vmaddr);
-}
-
-/**
- * gmap_fault - resolve a fault on a guest address
- * @gmap: pointer to guest mapping meta data structure
- * @gaddr: guest address
- * @fault_flags: flags to pass down to handle_mm_fault()
- *
- * Returns 0 on success, -ENOMEM for out of memory conditions, -EFAULT if the
- * vm address is already mapped to a different guest segment, and -EAGAIN if
- * FAULT_FLAG_RETRY_NOWAIT was specified and the fault could not be processed
- * immediately.
- */
-int gmap_fault(struct gmap *gmap, unsigned long gaddr, unsigned int fault_flags)
-{
- int rc;
-
- mmap_read_lock(gmap->mm);
- rc = __gmap_fault(gmap, gaddr, fault_flags);
- mmap_read_unlock(gmap->mm);
- return rc;
-}
-EXPORT_SYMBOL_GPL(gmap_fault);
+EXPORT_SYMBOL(__gmap_link);
/*
* this function is assumed to be called with mmap_lock held
@@ -853,8 +736,7 @@ static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
*
* Note: Can also be called for shadow gmaps.
*/
-static inline unsigned long *gmap_table_walk(struct gmap *gmap,
- unsigned long gaddr, int level)
+unsigned long *gmap_table_walk(struct gmap *gmap, unsigned long gaddr, int level)
{
const int asce_type = gmap->asce & _ASCE_TYPE_MASK;
unsigned long *table = gmap->table;
@@ -905,6 +787,7 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap,
}
return table;
}
+EXPORT_SYMBOL(gmap_table_walk);
/**
* gmap_pte_op_walk - walk the gmap page table, get the page table lock
@@ -1101,86 +984,40 @@ static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
* @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
* @bits: pgste notification bits to set
*
- * Returns 0 if successfully protected, -ENOMEM if out of memory and
- * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
+ * Returns:
+ * PAGE_SIZE if a small page was successfully protected;
+ * HPAGE_SIZE if a large page was successfully protected;
+ * -ENOMEM if out of memory;
+ * -EFAULT if gaddr is invalid (or mapping for shadows is missing);
+ * -EAGAIN if the guest mapping is missing and should be fixed by the caller.
*
- * Called with sg->mm->mmap_lock in read.
+ * Context: Called with sg->mm->mmap_lock in read.
*/
-static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
- unsigned long len, int prot, unsigned long bits)
+int gmap_protect_one(struct gmap *gmap, unsigned long gaddr, int prot, unsigned long bits)
{
- unsigned long vmaddr, dist;
pmd_t *pmdp;
- int rc;
+ int rc = 0;
BUG_ON(gmap_is_shadow(gmap));
- while (len) {
- rc = -EAGAIN;
- pmdp = gmap_pmd_op_walk(gmap, gaddr);
- if (pmdp) {
- if (!pmd_leaf(*pmdp)) {
- rc = gmap_protect_pte(gmap, gaddr, pmdp, prot,
- bits);
- if (!rc) {
- len -= PAGE_SIZE;
- gaddr += PAGE_SIZE;
- }
- } else {
- rc = gmap_protect_pmd(gmap, gaddr, pmdp, prot,
- bits);
- if (!rc) {
- dist = HPAGE_SIZE - (gaddr & ~HPAGE_MASK);
- len = len < dist ? 0 : len - dist;
- gaddr = (gaddr & HPAGE_MASK) + HPAGE_SIZE;
- }
- }
- gmap_pmd_op_end(gmap, pmdp);
- }
- if (rc) {
- if (rc == -EINVAL)
- return rc;
- /* -EAGAIN, fixup of userspace mm and gmap */
- vmaddr = __gmap_translate(gmap, gaddr);
- if (IS_ERR_VALUE(vmaddr))
- return vmaddr;
- rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
- if (rc)
- return rc;
- }
- }
- return 0;
-}
+ pmdp = gmap_pmd_op_walk(gmap, gaddr);
+ if (!pmdp)
+ return -EAGAIN;
-/**
- * gmap_mprotect_notify - change access rights for a range of ptes and
- * call the notifier if any pte changes again
- * @gmap: pointer to guest mapping meta data structure
- * @gaddr: virtual address in the guest address space
- * @len: size of area
- * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
- *
- * Returns 0 if for each page in the given range a gmap mapping exists,
- * the new access rights could be set and the notifier could be armed.
- * If the gmap mapping is missing for one or more pages -EFAULT is
- * returned. If no memory could be allocated -ENOMEM is returned.
- * This function establishes missing page table entries.
- */
-int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
- unsigned long len, int prot)
-{
- int rc;
+ if (!pmd_leaf(*pmdp)) {
+ rc = gmap_protect_pte(gmap, gaddr, pmdp, prot, bits);
+ if (!rc)
+ rc = PAGE_SIZE;
+ } else {
+ rc = gmap_protect_pmd(gmap, gaddr, pmdp, prot, bits);
+ if (!rc)
+ rc = HPAGE_SIZE;
+ }
+ gmap_pmd_op_end(gmap, pmdp);
- if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
- return -EINVAL;
- if (!MACHINE_HAS_ESOP && prot == PROT_READ)
- return -EINVAL;
- mmap_read_lock(gmap->mm);
- rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT);
- mmap_read_unlock(gmap->mm);
return rc;
}
-EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
+EXPORT_SYMBOL_GPL(gmap_protect_one);
/**
* gmap_read_table - get an unsigned long value from a guest page table using
@@ -1414,7 +1251,6 @@ static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
__gmap_unshadow_pgt(sg, raddr, __va(pgt));
/* Free page table */
ptdesc = page_ptdesc(phys_to_page(pgt));
- list_del(&ptdesc->pt_list);
page_table_free_pgste(ptdesc);
}
@@ -1442,7 +1278,6 @@ static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
__gmap_unshadow_pgt(sg, raddr, __va(pgt));
/* Free page table */
ptdesc = page_ptdesc(phys_to_page(pgt));
- list_del(&ptdesc->pt_list);
page_table_free_pgste(ptdesc);
}
}
@@ -1472,7 +1307,6 @@ static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
__gmap_unshadow_sgt(sg, raddr, __va(sgt));
/* Free segment table */
page = phys_to_page(sgt);
- list_del(&page->lru);
__free_pages(page, CRST_ALLOC_ORDER);
}
@@ -1500,7 +1334,6 @@ static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
__gmap_unshadow_sgt(sg, raddr, __va(sgt));
/* Free segment table */
page = phys_to_page(sgt);
- list_del(&page->lru);
__free_pages(page, CRST_ALLOC_ORDER);
}
}
@@ -1530,7 +1363,6 @@ static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
__gmap_unshadow_r3t(sg, raddr, __va(r3t));
/* Free region 3 table */
page = phys_to_page(r3t);
- list_del(&page->lru);
__free_pages(page, CRST_ALLOC_ORDER);
}
@@ -1558,7 +1390,6 @@ static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
__gmap_unshadow_r3t(sg, raddr, __va(r3t));
/* Free region 3 table */
page = phys_to_page(r3t);
- list_del(&page->lru);
__free_pages(page, CRST_ALLOC_ORDER);
}
}
@@ -1588,7 +1419,6 @@ static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
__gmap_unshadow_r2t(sg, raddr, __va(r2t));
/* Free region 2 table */
page = phys_to_page(r2t);
- list_del(&page->lru);
__free_pages(page, CRST_ALLOC_ORDER);
}
@@ -1620,7 +1450,6 @@ static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
r1t[i] = _REGION1_ENTRY_EMPTY;
/* Free region 2 table */
page = phys_to_page(r2t);
- list_del(&page->lru);
__free_pages(page, CRST_ALLOC_ORDER);
}
}
@@ -1631,7 +1460,7 @@ static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
*
* Called with sg->guest_table_lock
*/
-static void gmap_unshadow(struct gmap *sg)
+void gmap_unshadow(struct gmap *sg)
{
unsigned long *table;
@@ -1657,143 +1486,7 @@ static void gmap_unshadow(struct gmap *sg)
break;
}
}
-
-/**
- * gmap_find_shadow - find a specific asce in the list of shadow tables
- * @parent: pointer to the parent gmap
- * @asce: ASCE for which the shadow table is created
- * @edat_level: edat level to be used for the shadow translation
- *
- * Returns the pointer to a gmap if a shadow table with the given asce is
- * already available, ERR_PTR(-EAGAIN) if another one is just being created,
- * otherwise NULL
- */
-static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
- int edat_level)
-{
- struct gmap *sg;
-
- list_for_each_entry(sg, &parent->children, list) {
- if (sg->orig_asce != asce || sg->edat_level != edat_level ||
- sg->removed)
- continue;
- if (!sg->initialized)
- return ERR_PTR(-EAGAIN);
- refcount_inc(&sg->ref_count);
- return sg;
- }
- return NULL;
-}
-
-/**
- * gmap_shadow_valid - check if a shadow guest address space matches the
- * given properties and is still valid
- * @sg: pointer to the shadow guest address space structure
- * @asce: ASCE for which the shadow table is requested
- * @edat_level: edat level to be used for the shadow translation
- *
- * Returns 1 if the gmap shadow is still valid and matches the given
- * properties, the caller can continue using it. Returns 0 otherwise, the
- * caller has to request a new shadow gmap in this case.
- *
- */
-int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
-{
- if (sg->removed)
- return 0;
- return sg->orig_asce == asce && sg->edat_level == edat_level;
-}
-EXPORT_SYMBOL_GPL(gmap_shadow_valid);
-
-/**
- * gmap_shadow - create/find a shadow guest address space
- * @parent: pointer to the parent gmap
- * @asce: ASCE for which the shadow table is created
- * @edat_level: edat level to be used for the shadow translation
- *
- * The pages of the top level page table referred by the asce parameter
- * will be set to read-only and marked in the PGSTEs of the kvm process.
- * The shadow table will be removed automatically on any change to the
- * PTE mapping for the source table.
- *
- * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
- * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
- * parent gmap table could not be protected.
- */
-struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
- int edat_level)
-{
- struct gmap *sg, *new;
- unsigned long limit;
- int rc;
-
- BUG_ON(parent->mm->context.allow_gmap_hpage_1m);
- BUG_ON(gmap_is_shadow(parent));
- spin_lock(&parent->shadow_lock);
- sg = gmap_find_shadow(parent, asce, edat_level);
- spin_unlock(&parent->shadow_lock);
- if (sg)
- return sg;
- /* Create a new shadow gmap */
- limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
- if (asce & _ASCE_REAL_SPACE)
- limit = -1UL;
- new = gmap_alloc(limit);
- if (!new)
- return ERR_PTR(-ENOMEM);
- new->mm = parent->mm;
- new->parent = gmap_get(parent);
- new->private = parent->private;
- new->orig_asce = asce;
- new->edat_level = edat_level;
- new->initialized = false;
- spin_lock(&parent->shadow_lock);
- /* Recheck if another CPU created the same shadow */
- sg = gmap_find_shadow(parent, asce, edat_level);
- if (sg) {
- spin_unlock(&parent->shadow_lock);
- gmap_free(new);
- return sg;
- }
- if (asce & _ASCE_REAL_SPACE) {
- /* only allow one real-space gmap shadow */
- list_for_each_entry(sg, &parent->children, list) {
- if (sg->orig_asce & _ASCE_REAL_SPACE) {
- spin_lock(&sg->guest_table_lock);
- gmap_unshadow(sg);
- spin_unlock(&sg->guest_table_lock);
- list_del(&sg->list);
- gmap_put(sg);
- break;
- }
- }
- }
- refcount_set(&new->ref_count, 2);
- list_add(&new->list, &parent->children);
- if (asce & _ASCE_REAL_SPACE) {
- /* nothing to protect, return right away */
- new->initialized = true;
- spin_unlock(&parent->shadow_lock);
- return new;
- }
- spin_unlock(&parent->shadow_lock);
- /* protect after insertion, so it will get properly invalidated */
- mmap_read_lock(parent->mm);
- rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
- ((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
- PROT_READ, GMAP_NOTIFY_SHADOW);
- mmap_read_unlock(parent->mm);
- spin_lock(&parent->shadow_lock);
- new->initialized = true;
- if (rc) {
- list_del(&new->list);
- gmap_free(new);
- new = ERR_PTR(rc);
- }
- spin_unlock(&parent->shadow_lock);
- return new;
-}
-EXPORT_SYMBOL_GPL(gmap_shadow);
+EXPORT_SYMBOL(gmap_unshadow);
/**
* gmap_shadow_r2t - create an empty shadow region 2 table
@@ -1827,9 +1520,6 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
page = gmap_alloc_crst();
if (!page)
return -ENOMEM;
- page->index = r2t & _REGION_ENTRY_ORIGIN;
- if (fake)
- page->index |= GMAP_SHADOW_FAKE_TABLE;
s_r2t = page_to_phys(page);
/* Install shadow region second table */
spin_lock(&sg->guest_table_lock);
@@ -1851,7 +1541,6 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
if (sg->edat_level >= 1)
*table |= (r2t & _REGION_ENTRY_PROTECT);
- list_add(&page->lru, &sg->crst_list);
if (fake) {
/* nothing to protect for fake tables */
*table &= ~_REGION_ENTRY_INVALID;
@@ -1911,9 +1600,6 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
page = gmap_alloc_crst();
if (!page)
return -ENOMEM;
- page->index = r3t & _REGION_ENTRY_ORIGIN;
- if (fake)
- page->index |= GMAP_SHADOW_FAKE_TABLE;
s_r3t = page_to_phys(page);
/* Install shadow region second table */
spin_lock(&sg->guest_table_lock);
@@ -1935,7 +1621,6 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
if (sg->edat_level >= 1)
*table |= (r3t & _REGION_ENTRY_PROTECT);
- list_add(&page->lru, &sg->crst_list);
if (fake) {
/* nothing to protect for fake tables */
*table &= ~_REGION_ENTRY_INVALID;
@@ -1995,9 +1680,6 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
page = gmap_alloc_crst();
if (!page)
return -ENOMEM;
- page->index = sgt & _REGION_ENTRY_ORIGIN;
- if (fake)
- page->index |= GMAP_SHADOW_FAKE_TABLE;
s_sgt = page_to_phys(page);
/* Install shadow region second table */
spin_lock(&sg->guest_table_lock);
@@ -2019,7 +1701,6 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
if (sg->edat_level >= 1)
*table |= sgt & _REGION_ENTRY_PROTECT;
- list_add(&page->lru, &sg->crst_list);
if (fake) {
/* nothing to protect for fake tables */
*table &= ~_REGION_ENTRY_INVALID;
@@ -2052,45 +1733,22 @@ out_free:
}
EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
-/**
- * gmap_shadow_pgt_lookup - find a shadow page table
- * @sg: pointer to the shadow guest address space structure
- * @saddr: the address in the shadow aguest address space
- * @pgt: parent gmap address of the page table to get shadowed
- * @dat_protection: if the pgtable is marked as protected by dat
- * @fake: pgt references contiguous guest memory block, not a pgtable
- *
- * Returns 0 if the shadow page table was found and -EAGAIN if the page
- * table was not found.
- *
- * Called with sg->mm->mmap_lock in read.
- */
-int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
- unsigned long *pgt, int *dat_protection,
- int *fake)
+static void gmap_pgste_set_pgt_addr(struct ptdesc *ptdesc, unsigned long pgt_addr)
{
- unsigned long *table;
- struct page *page;
- int rc;
+ unsigned long *pgstes = page_to_virt(ptdesc_page(ptdesc));
- BUG_ON(!gmap_is_shadow(sg));
- spin_lock(&sg->guest_table_lock);
- table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
- if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
- /* Shadow page tables are full pages (pte+pgste) */
- page = pfn_to_page(*table >> PAGE_SHIFT);
- *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
- *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
- *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
- rc = 0;
- } else {
- rc = -EAGAIN;
- }
- spin_unlock(&sg->guest_table_lock);
- return rc;
+ pgstes += _PAGE_ENTRIES;
+
+ pgstes[0] &= ~PGSTE_ST2_MASK;
+ pgstes[1] &= ~PGSTE_ST2_MASK;
+ pgstes[2] &= ~PGSTE_ST2_MASK;
+ pgstes[3] &= ~PGSTE_ST2_MASK;
+ pgstes[0] |= (pgt_addr >> 16) & PGSTE_ST2_MASK;
+ pgstes[1] |= pgt_addr & PGSTE_ST2_MASK;
+ pgstes[2] |= (pgt_addr << 16) & PGSTE_ST2_MASK;
+ pgstes[3] |= (pgt_addr << 32) & PGSTE_ST2_MASK;
}
-EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
/**
* gmap_shadow_pgt - instantiate a shadow page table
@@ -2119,9 +1777,10 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
ptdesc = page_table_alloc_pgste(sg->mm);
if (!ptdesc)
return -ENOMEM;
- ptdesc->pt_index = pgt & _SEGMENT_ENTRY_ORIGIN;
+ origin = pgt & _SEGMENT_ENTRY_ORIGIN;
if (fake)
- ptdesc->pt_index |= GMAP_SHADOW_FAKE_TABLE;
+ origin |= GMAP_SHADOW_FAKE_TABLE;
+ gmap_pgste_set_pgt_addr(ptdesc, origin);
s_pgt = page_to_phys(ptdesc_page(ptdesc));
/* Install shadow page table */
spin_lock(&sg->guest_table_lock);
@@ -2140,7 +1799,6 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
/* mark as invalid as long as the parent table is not protected */
*table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
(pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
- list_add(&ptdesc->pt_list, &sg->pt_list);
if (fake) {
/* nothing to protect for fake tables */
*table &= ~_SEGMENT_ENTRY_INVALID;
@@ -2318,7 +1976,6 @@ void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
pte_t *pte, unsigned long bits)
{
unsigned long offset, gaddr = 0;
- unsigned long *table;
struct gmap *gmap, *sg, *next;
offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
@@ -2326,12 +1983,9 @@ void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
rcu_read_lock();
list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
spin_lock(&gmap->guest_table_lock);
- table = radix_tree_lookup(&gmap->host_to_guest,
- vmaddr >> PMD_SHIFT);
- if (table)
- gaddr = __gmap_segment_gaddr(table) + offset;
+ gaddr = host_to_guest_lookup(gmap, vmaddr) + offset;
spin_unlock(&gmap->guest_table_lock);
- if (!table)
+ if (!IS_GADDR_VALID(gaddr))
continue;
if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
@@ -2391,10 +2045,8 @@ static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
rcu_read_lock();
list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
spin_lock(&gmap->guest_table_lock);
- pmdp = (pmd_t *)radix_tree_delete(&gmap->host_to_guest,
- vmaddr >> PMD_SHIFT);
+ pmdp = host_to_guest_pmd_delete(gmap, vmaddr, &gaddr);
if (pmdp) {
- gaddr = __gmap_segment_gaddr((unsigned long *)pmdp);
pmdp_notify_gmap(gmap, pmdp, gaddr);
WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
_SEGMENT_ENTRY_GMAP_UC |
@@ -2438,28 +2090,25 @@ EXPORT_SYMBOL_GPL(gmap_pmdp_csp);
*/
void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr)
{
- unsigned long *entry, gaddr;
+ unsigned long gaddr;
struct gmap *gmap;
pmd_t *pmdp;
rcu_read_lock();
list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
spin_lock(&gmap->guest_table_lock);
- entry = radix_tree_delete(&gmap->host_to_guest,
- vmaddr >> PMD_SHIFT);
- if (entry) {
- pmdp = (pmd_t *)entry;
- gaddr = __gmap_segment_gaddr(entry);
+ pmdp = host_to_guest_pmd_delete(gmap, vmaddr, &gaddr);
+ if (pmdp) {
pmdp_notify_gmap(gmap, pmdp, gaddr);
- WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
- _SEGMENT_ENTRY_GMAP_UC |
- _SEGMENT_ENTRY));
+ WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
+ _SEGMENT_ENTRY_GMAP_UC |
+ _SEGMENT_ENTRY));
if (MACHINE_HAS_TLB_GUEST)
__pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
gmap->asce, IDTE_LOCAL);
else if (MACHINE_HAS_IDTE)
__pmdp_idte(gaddr, pmdp, 0, 0, IDTE_LOCAL);
- *entry = _SEGMENT_ENTRY_EMPTY;
+ *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
}
spin_unlock(&gmap->guest_table_lock);
}
@@ -2474,22 +2123,19 @@ EXPORT_SYMBOL_GPL(gmap_pmdp_idte_local);
*/
void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr)
{
- unsigned long *entry, gaddr;
+ unsigned long gaddr;
struct gmap *gmap;
pmd_t *pmdp;
rcu_read_lock();
list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
spin_lock(&gmap->guest_table_lock);
- entry = radix_tree_delete(&gmap->host_to_guest,
- vmaddr >> PMD_SHIFT);
- if (entry) {
- pmdp = (pmd_t *)entry;
- gaddr = __gmap_segment_gaddr(entry);
+ pmdp = host_to_guest_pmd_delete(gmap, vmaddr, &gaddr);
+ if (pmdp) {
pmdp_notify_gmap(gmap, pmdp, gaddr);
- WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
- _SEGMENT_ENTRY_GMAP_UC |
- _SEGMENT_ENTRY));
+ WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
+ _SEGMENT_ENTRY_GMAP_UC |
+ _SEGMENT_ENTRY));
if (MACHINE_HAS_TLB_GUEST)
__pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
gmap->asce, IDTE_GLOBAL);
@@ -2497,7 +2143,7 @@ void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr)
__pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL);
else
__pmdp_csp(pmdp);
- *entry = _SEGMENT_ENTRY_EMPTY;
+ *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
}
spin_unlock(&gmap->guest_table_lock);
}
@@ -2943,49 +2589,6 @@ int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start,
EXPORT_SYMBOL_GPL(__s390_uv_destroy_range);
/**
- * s390_unlist_old_asce - Remove the topmost level of page tables from the
- * list of page tables of the gmap.
- * @gmap: the gmap whose table is to be removed
- *
- * On s390x, KVM keeps a list of all pages containing the page tables of the
- * gmap (the CRST list). This list is used at tear down time to free all
- * pages that are now not needed anymore.
- *
- * This function removes the topmost page of the tree (the one pointed to by
- * the ASCE) from the CRST list.
- *
- * This means that it will not be freed when the VM is torn down, and needs
- * to be handled separately by the caller, unless a leak is actually
- * intended. Notice that this function will only remove the page from the
- * list, the page will still be used as a top level page table (and ASCE).
- */
-void s390_unlist_old_asce(struct gmap *gmap)
-{
- struct page *old;
-
- old = virt_to_page(gmap->table);
- spin_lock(&gmap->guest_table_lock);
- list_del(&old->lru);
- /*
- * Sometimes the topmost page might need to be "removed" multiple
- * times, for example if the VM is rebooted into secure mode several
- * times concurrently, or if s390_replace_asce fails after calling
- * s390_remove_old_asce and is attempted again later. In that case
- * the old asce has been removed from the list, and therefore it
- * will not be freed when the VM terminates, but the ASCE is still
- * in use and still pointed to.
- * A subsequent call to replace_asce will follow the pointer and try
- * to remove the same page from the list again.
- * Therefore it's necessary that the page of the ASCE has valid
- * pointers, so list_del can work (and do nothing) without
- * dereferencing stale or invalid pointers.
- */
- INIT_LIST_HEAD(&old->lru);
- spin_unlock(&gmap->guest_table_lock);
-}
-EXPORT_SYMBOL_GPL(s390_unlist_old_asce);
-
-/**
* s390_replace_asce - Try to replace the current ASCE of a gmap with a copy
* @gmap: the gmap whose ASCE needs to be replaced
*
@@ -3004,8 +2607,6 @@ int s390_replace_asce(struct gmap *gmap)
struct page *page;
void *table;
- s390_unlist_old_asce(gmap);
-
/* Replacing segment type ASCEs would cause serious issues */
if ((gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT)
return -EINVAL;
@@ -3013,19 +2614,9 @@ int s390_replace_asce(struct gmap *gmap)
page = gmap_alloc_crst();
if (!page)
return -ENOMEM;
- page->index = 0;
table = page_to_virt(page);
memcpy(table, gmap->table, 1UL << (CRST_ALLOC_ORDER + PAGE_SHIFT));
- /*
- * The caller has to deal with the old ASCE, but here we make sure
- * the new one is properly added to the CRST list, so that
- * it will be freed when the VM is torn down.
- */
- spin_lock(&gmap->guest_table_lock);
- list_add(&page->lru, &gmap->crst_list);
- spin_unlock(&gmap->guest_table_lock);
-
/* Set new table origin while preserving existing ASCE control bits */
asce = (gmap->asce & ~_ASCE_ORIGIN) | __pa(table);
WRITE_ONCE(gmap->asce, asce);
@@ -3035,3 +2626,31 @@ int s390_replace_asce(struct gmap *gmap)
return 0;
}
EXPORT_SYMBOL_GPL(s390_replace_asce);
+
+/**
+ * kvm_s390_wiggle_split_folio() - try to drain extra references to a folio and optionally split
+ * @mm: the mm containing the folio to work on
+ * @folio: the folio
+ * @split: whether to split a large folio
+ *
+ * Context: Must be called while holding an extra reference to the folio;
+ * the mm lock should not be held.
+ */
+int kvm_s390_wiggle_split_folio(struct mm_struct *mm, struct folio *folio, bool split)
+{
+ int rc;
+
+ lockdep_assert_not_held(&mm->mmap_lock);
+ folio_wait_writeback(folio);
+ lru_add_drain_all();
+ if (split) {
+ folio_lock(folio);
+ rc = split_folio(folio);
+ folio_unlock(folio);
+
+ if (rc != -EBUSY)
+ return rc;
+ }
+ return -EAGAIN;
+}
+EXPORT_SYMBOL_GPL(kvm_s390_wiggle_split_folio);
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 58696a0c4e4a..30387a6e98ff 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -21,7 +21,7 @@
int page_table_allocate_pgste = 0;
EXPORT_SYMBOL(page_table_allocate_pgste);
-static struct ctl_table page_table_sysctl[] = {
+static const struct ctl_table page_table_sysctl[] = {
{
.procname = "allocate_pgste",
.data = &page_table_allocate_pgste,
@@ -88,12 +88,14 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
if (unlikely(!p4d))
goto err_p4d;
crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
+ pagetable_p4d_ctor(virt_to_ptdesc(p4d));
}
if (end > _REGION1_SIZE) {
pgd = crst_table_alloc(mm);
if (unlikely(!pgd))
goto err_pgd;
crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
+ pagetable_pgd_ctor(virt_to_ptdesc(pgd));
}
spin_lock_bh(&mm->page_table_lock);
@@ -130,6 +132,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
return 0;
err_pgd:
+ pagetable_dtor(virt_to_ptdesc(p4d));
crst_table_free(mm, p4d);
err_p4d:
return -ENOMEM;
@@ -173,37 +176,16 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
}
table = ptdesc_to_virt(ptdesc);
__arch_set_page_dat(table, 1);
- /* pt_list is used by gmap only */
- INIT_LIST_HEAD(&ptdesc->pt_list);
memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
return table;
}
-static void pagetable_pte_dtor_free(struct ptdesc *ptdesc)
-{
- pagetable_pte_dtor(ptdesc);
- pagetable_free(ptdesc);
-}
-
void page_table_free(struct mm_struct *mm, unsigned long *table)
{
struct ptdesc *ptdesc = virt_to_ptdesc(table);
- pagetable_pte_dtor_free(ptdesc);
-}
-
-void __tlb_remove_table(void *table)
-{
- struct ptdesc *ptdesc = virt_to_ptdesc(table);
- struct page *page = ptdesc_page(ptdesc);
-
- if (compound_order(page) == CRST_ALLOC_ORDER) {
- /* pmd, pud, or p4d */
- pagetable_free(ptdesc);
- return;
- }
- pagetable_pte_dtor_free(ptdesc);
+ pagetable_dtor_free(ptdesc);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -211,7 +193,7 @@ static void pte_free_now(struct rcu_head *head)
{
struct ptdesc *ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
- pagetable_pte_dtor_free(ptdesc);
+ pagetable_dtor_free(ptdesc);
}
void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 7c684c54e721..8ead999e340b 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -662,7 +662,7 @@ void __init vmem_map_init(void)
if (!static_key_enabled(&cpu_has_bear))
set_memory_x(0, 1);
if (debug_pagealloc_enabled())
- __set_memory_4k(__va(0), __va(0) + ident_map_size);
+ __set_memory_4k(__va(0), absolute_pointer(__va(0)) + ident_map_size);
pr_info("Write protected kernel read-only data: %luk\n",
(unsigned long)(__end_rodata - _stext) >> 10);
}
diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
index d5ace00d10f0..857afbc4828f 100644
--- a/arch/s390/pci/pci_bus.c
+++ b/arch/s390/pci/pci_bus.c
@@ -171,7 +171,6 @@ void zpci_bus_scan_busses(void)
static bool zpci_bus_is_multifunction_root(struct zpci_dev *zdev)
{
return !s390_pci_no_rid && zdev->rid_available &&
- zpci_is_device_configured(zdev) &&
!zdev->vfn;
}
diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile
index 24eccaa29337..bdcf2a3b6c41 100644
--- a/arch/s390/purgatory/Makefile
+++ b/arch/s390/purgatory/Makefile
@@ -13,7 +13,7 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS -D__NO_FORTIFY
$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE
$(call if_changed_rule,as_o_S)
-KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
+KBUILD_CFLAGS := -std=gnu11 -fno-strict-aliasing -Wall -Wstrict-prototypes
KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
KBUILD_CFLAGS += -Os -m64 -msoft-float -fno-common
diff --git a/arch/s390/tools/gen_opcode_table.c b/arch/s390/tools/gen_opcode_table.c
index a1bc02b29c81..7d76c417f83f 100644
--- a/arch/s390/tools/gen_opcode_table.c
+++ b/arch/s390/tools/gen_opcode_table.c
@@ -201,6 +201,17 @@ static int cmp_long_insn(const void *a, const void *b)
return strcmp(((struct insn *)a)->name, ((struct insn *)b)->name);
}
+static void print_insn_name(const char *name)
+{
+ size_t i, len;
+
+ len = strlen(name);
+ printf("{");
+ for (i = 0; i < len; i++)
+ printf(" \'%c\',", name[i]);
+ printf(" }");
+}
+
static void print_long_insn(struct gen_opcode *desc)
{
struct insn *insn;
@@ -223,7 +234,9 @@ static void print_long_insn(struct gen_opcode *desc)
insn = &desc->insn[i];
if (insn->name_len < 6)
continue;
- printf("\t[LONG_INSN_%s] = \"%s\", \\\n", insn->upper, insn->name);
+ printf("\t[LONG_INSN_%s] = ", insn->upper);
+ print_insn_name(insn->name);
+ printf(", \\\n");
}
printf("}\n\n");
}
@@ -236,11 +249,13 @@ static void print_opcode(struct insn *insn, int nr)
if (insn->type->byte != 0)
opcode += 2;
printf("\t[%4d] = { .opfrag = 0x%s, .format = INSTR_%s, ", nr, opcode, insn->format);
- if (insn->name_len < 6)
- printf(".name = \"%s\" ", insn->name);
- else
- printf(".offset = LONG_INSN_%s ", insn->upper);
- printf("}, \\\n");
+ if (insn->name_len < 6) {
+ printf(".name = ");
+ print_insn_name(insn->name);
+ } else {
+ printf(".offset = LONG_INSN_%s", insn->upper);
+ }
+ printf(" }, \\\n");
}
static void add_to_group(struct gen_opcode *desc, struct insn *insn, int offset)
diff --git a/arch/sh/Kbuild b/arch/sh/Kbuild
index 056efec72c2a..0da6c6d6821a 100644
--- a/arch/sh/Kbuild
+++ b/arch/sh/Kbuild
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y += kernel/ mm/ boards/
obj-$(CONFIG_SH_FPU_EMU) += math-emu/
-obj-$(CONFIG_USE_BUILTIN_DTB) += boot/dts/
obj-$(CONFIG_HD6446X_SERIES) += cchips/hd6446x/
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 04ff5fb9242e..89185af7bcc9 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -648,10 +648,11 @@ endmenu
menu "Boot options"
-config USE_BUILTIN_DTB
+config BUILTIN_DTB
bool "Use builtin DTB"
default n
depends on SH_DEVICE_TREE
+ select GENERIC_BUILTIN_DTB
help
Link a device tree blob for particular hardware into the kernel,
suppressing use of the DTB pointer provided by the bootloader.
@@ -659,10 +660,10 @@ config USE_BUILTIN_DTB
not capable of providing a DTB to the kernel, or for experimental
hardware without stable device tree bindings.
-config BUILTIN_DTB_SOURCE
+config BUILTIN_DTB_NAME
string "Source file for builtin DTB"
default ""
- depends on USE_BUILTIN_DTB
+ depends on BUILTIN_DTB
help
Base name (without suffix, relative to arch/sh/boot/dts) for the
a DTS file that will be used to produce the DTB linked into the
diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
index 109bec4dad94..1af93be61b1f 100644
--- a/arch/sh/boards/Kconfig
+++ b/arch/sh/boards/Kconfig
@@ -80,8 +80,8 @@ config SH_7724_SOLUTION_ENGINE
select SOLUTION_ENGINE
depends on CPU_SUBTYPE_SH7724
select GPIOLIB
- select SND_SOC_AK4642 if SND_SIMPLE_CARD
select REGULATOR_FIXED_VOLTAGE if REGULATOR
+ imply SND_SOC_AK4642 if SND_SIMPLE_CARD
help
Select 7724 SolutionEngine if configuring for a Hitachi SH7724
evaluation board.
@@ -259,8 +259,8 @@ config SH_ECOVEC
bool "EcoVec"
depends on CPU_SUBTYPE_SH7724
select GPIOLIB
- select SND_SOC_DA7210 if SND_SIMPLE_CARD
select REGULATOR_FIXED_VOLTAGE if REGULATOR
+ imply SND_SOC_DA7210 if SND_SIMPLE_CARD
help
Renesas "R0P7724LC0011/21RL (EcoVec)" support.
diff --git a/arch/sh/boot/dts/Makefile b/arch/sh/boot/dts/Makefile
index 4a6dec9714a9..d109978a5eb9 100644
--- a/arch/sh/boot/dts/Makefile
+++ b/arch/sh/boot/dts/Makefile
@@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_USE_BUILTIN_DTB) += $(addsuffix .dtb.o, $(CONFIG_BUILTIN_DTB_SOURCE))
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .dtb.o, $(CONFIG_BUILTIN_DTB_NAME))
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index 5d8577ab1591..96d938fdf224 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -34,7 +34,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
#define __pte_free_tlb(tlb, pte, addr) \
do { \
- pagetable_pte_dtor(page_ptdesc(pte)); \
+ pagetable_dtor(page_ptdesc(pte)); \
tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
} while (0)
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 4e6835de54cf..9022d8af9d68 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -43,9 +43,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
{
int j;
- seq_printf(p, "%*s: ", prec, "NMI");
+ seq_printf(p, "%*s:", prec, "NMI");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", per_cpu(irq_stat.__nmi_count, j));
+ seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat.__nmi_count, j), 10);
seq_printf(p, " Non-maskable interrupts\n");
seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index f2b6f16a46b8..039a51291002 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -249,7 +249,7 @@ void __ref sh_fdt_init(phys_addr_t dt_phys)
/* Avoid calling an __init function on secondary cpus. */
if (done) return;
-#ifdef CONFIG_USE_BUILTIN_DTB
+#ifdef CONFIG_BUILTIN_DTB
dt_virt = __dtb_start;
#else
dt_virt = phys_to_virt(dt_phys);
@@ -323,7 +323,7 @@ void __init setup_arch(char **cmdline_p)
sh_early_platform_driver_probe("earlyprintk", 1, 1);
#ifdef CONFIG_OF_EARLY_FLATTREE
-#ifdef CONFIG_USE_BUILTIN_DTB
+#ifdef CONFIG_BUILTIN_DTB
unflatten_and_copy_device_tree();
#else
unflatten_device_tree();
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 2a88b0c9e70f..289a2fecebef 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -137,10 +137,7 @@ static pmd_t * __init one_md_table_init(pud_t *pud)
if (pud_none(*pud)) {
pmd_t *pmd;
- pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!pmd)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE);
+ pmd = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
pud_populate(&init_mm, pud, pmd);
BUG_ON(pmd != pmd_offset(pud, 0));
}
@@ -153,10 +150,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
if (pmd_none(*pmd)) {
pte_t *pte;
- pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!pte)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE);
+ pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
pmd_populate_kernel(&init_mm, pmd, pte);
BUG_ON(pte != pte_offset_kernel(pmd, 0));
}
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index dcfdb7f1dae9..0f88123925a4 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -110,6 +110,7 @@ config SPARC64
select HAVE_SETUP_PER_CPU_AREA
select NEED_PER_CPU_EMBED_FIRST_CHUNK
select NEED_PER_CPU_PAGE_FIRST_CHUNK
+ select ARCH_HAS_CRC32
config ARCH_PROC_KCORE_TEXT
def_bool y
diff --git a/arch/sparc/crypto/Kconfig b/arch/sparc/crypto/Kconfig
index cfe5102b1c68..e858597de89d 100644
--- a/arch/sparc/crypto/Kconfig
+++ b/arch/sparc/crypto/Kconfig
@@ -16,16 +16,6 @@ config CRYPTO_DES_SPARC64
Architecture: sparc64
-config CRYPTO_CRC32C_SPARC64
- tristate "CRC32c"
- depends on SPARC64
- select CRYPTO_HASH
- select CRC32
- help
- CRC32c CRC algorithm with the iSCSI polynomial (RFC 3385 and RFC 3720)
-
- Architecture: sparc64
-
config CRYPTO_MD5_SPARC64
tristate "Digests: MD5"
depends on SPARC64
diff --git a/arch/sparc/crypto/Makefile b/arch/sparc/crypto/Makefile
index d257186c27d1..a2d7fca40cb4 100644
--- a/arch/sparc/crypto/Makefile
+++ b/arch/sparc/crypto/Makefile
@@ -12,8 +12,6 @@ obj-$(CONFIG_CRYPTO_AES_SPARC64) += aes-sparc64.o
obj-$(CONFIG_CRYPTO_DES_SPARC64) += des-sparc64.o
obj-$(CONFIG_CRYPTO_CAMELLIA_SPARC64) += camellia-sparc64.o
-obj-$(CONFIG_CRYPTO_CRC32C_SPARC64) += crc32c-sparc64.o
-
sha1-sparc64-y := sha1_asm.o sha1_glue.o
sha256-sparc64-y := sha256_asm.o sha256_glue.o
sha512-sparc64-y := sha512_asm.o sha512_glue.o
@@ -22,5 +20,3 @@ md5-sparc64-y := md5_asm.o md5_glue.o
aes-sparc64-y := aes_asm.o aes_glue.o
des-sparc64-y := des_asm.o des_glue.o
camellia-sparc64-y := camellia_asm.o camellia_glue.o
-
-crc32c-sparc64-y := crc32c_asm.o crc32c_glue.o
diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c
deleted file mode 100644
index 913b9a09e885..000000000000
--- a/arch/sparc/crypto/crc32c_glue.c
+++ /dev/null
@@ -1,184 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Glue code for CRC32C optimized for sparc64 crypto opcodes.
- *
- * This is based largely upon arch/x86/crypto/crc32c-intel.c
- *
- * Copyright (C) 2008 Intel Corporation
- * Authors: Austin Zhang <austin_zhang@linux.intel.com>
- * Kent Liu <kent.liu@intel.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/crc32.h>
-
-#include <crypto/internal/hash.h>
-
-#include <asm/pstate.h>
-#include <asm/elf.h>
-#include <linux/unaligned.h>
-
-#include "opcodes.h"
-
-/*
- * Setting the seed allows arbitrary accumulators and flexible XOR policy
- * If your algorithm starts with ~0, then XOR with ~0 before you set
- * the seed.
- */
-static int crc32c_sparc64_setkey(struct crypto_shash *hash, const u8 *key,
- unsigned int keylen)
-{
- u32 *mctx = crypto_shash_ctx(hash);
-
- if (keylen != sizeof(u32))
- return -EINVAL;
- *mctx = get_unaligned_le32(key);
- return 0;
-}
-
-static int crc32c_sparc64_init(struct shash_desc *desc)
-{
- u32 *mctx = crypto_shash_ctx(desc->tfm);
- u32 *crcp = shash_desc_ctx(desc);
-
- *crcp = *mctx;
-
- return 0;
-}
-
-extern void crc32c_sparc64(u32 *crcp, const u64 *data, unsigned int len);
-
-static u32 crc32c_compute(u32 crc, const u8 *data, unsigned int len)
-{
- unsigned int n = -(uintptr_t)data & 7;
-
- if (n) {
- /* Data isn't 8-byte aligned. Align it. */
- n = min(n, len);
- crc = __crc32c_le(crc, data, n);
- data += n;
- len -= n;
- }
- n = len & ~7U;
- if (n) {
- crc32c_sparc64(&crc, (const u64 *)data, n);
- data += n;
- len -= n;
- }
- if (len)
- crc = __crc32c_le(crc, data, len);
- return crc;
-}
-
-static int crc32c_sparc64_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- u32 *crcp = shash_desc_ctx(desc);
-
- *crcp = crc32c_compute(*crcp, data, len);
- return 0;
-}
-
-static int __crc32c_sparc64_finup(const u32 *crcp, const u8 *data,
- unsigned int len, u8 *out)
-{
- put_unaligned_le32(~crc32c_compute(*crcp, data, len), out);
- return 0;
-}
-
-static int crc32c_sparc64_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return __crc32c_sparc64_finup(shash_desc_ctx(desc), data, len, out);
-}
-
-static int crc32c_sparc64_final(struct shash_desc *desc, u8 *out)
-{
- u32 *crcp = shash_desc_ctx(desc);
-
- put_unaligned_le32(~*crcp, out);
- return 0;
-}
-
-static int crc32c_sparc64_digest(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return __crc32c_sparc64_finup(crypto_shash_ctx(desc->tfm), data, len,
- out);
-}
-
-static int crc32c_sparc64_cra_init(struct crypto_tfm *tfm)
-{
- u32 *key = crypto_tfm_ctx(tfm);
-
- *key = ~0;
-
- return 0;
-}
-
-#define CHKSUM_BLOCK_SIZE 1
-#define CHKSUM_DIGEST_SIZE 4
-
-static struct shash_alg alg = {
- .setkey = crc32c_sparc64_setkey,
- .init = crc32c_sparc64_init,
- .update = crc32c_sparc64_update,
- .final = crc32c_sparc64_final,
- .finup = crc32c_sparc64_finup,
- .digest = crc32c_sparc64_digest,
- .descsize = sizeof(u32),
- .digestsize = CHKSUM_DIGEST_SIZE,
- .base = {
- .cra_name = "crc32c",
- .cra_driver_name = "crc32c-sparc64",
- .cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(u32),
- .cra_module = THIS_MODULE,
- .cra_init = crc32c_sparc64_cra_init,
- }
-};
-
-static bool __init sparc64_has_crc32c_opcode(void)
-{
- unsigned long cfr;
-
- if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
- return false;
-
- __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
- if (!(cfr & CFR_CRC32C))
- return false;
-
- return true;
-}
-
-static int __init crc32c_sparc64_mod_init(void)
-{
- if (sparc64_has_crc32c_opcode()) {
- pr_info("Using sparc64 crc32c opcode optimized CRC32C implementation\n");
- return crypto_register_shash(&alg);
- }
- pr_info("sparc64 crc32c opcode not available.\n");
- return -ENODEV;
-}
-
-static void __exit crc32c_sparc64_mod_fini(void)
-{
- crypto_unregister_shash(&alg);
-}
-
-module_init(crc32c_sparc64_mod_init);
-module_exit(crc32c_sparc64_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated");
-
-MODULE_ALIAS_CRYPTO("crc32c");
-
-#include "crop_devid.c"
diff --git a/arch/sparc/include/asm/tlb_64.h b/arch/sparc/include/asm/tlb_64.h
index 3037187482db..1a6e694418e3 100644
--- a/arch/sparc/include/asm/tlb_64.h
+++ b/arch/sparc/include/asm/tlb_64.h
@@ -33,6 +33,7 @@ void flush_tlb_pending(void);
#define tlb_needs_table_invalidate() (false)
#endif
+#define __HAVE_ARCH_TLB_REMOVE_TABLE
#include <asm-generic/tlb.h>
#endif /* _SPARC64_TLB_H */
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c
index 8605dd710f3c..5210991429d5 100644
--- a/arch/sparc/kernel/irq_32.c
+++ b/arch/sparc/kernel/irq_32.c
@@ -199,18 +199,18 @@ int arch_show_interrupts(struct seq_file *p, int prec)
int j;
#ifdef CONFIG_SMP
- seq_printf(p, "RES: ");
+ seq_printf(p, "RES:");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", cpu_data(j).irq_resched_count);
+ seq_put_decimal_ull_width(p, " ", cpu_data(j).irq_resched_count, 10);
seq_printf(p, " IPI rescheduling interrupts\n");
- seq_printf(p, "CAL: ");
+ seq_printf(p, "CAL:");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", cpu_data(j).irq_call_count);
+ seq_put_decimal_ull_width(p, " ", cpu_data(j).irq_call_count, 10);
seq_printf(p, " IPI function call interrupts\n");
#endif
- seq_printf(p, "NMI: ");
+ seq_printf(p, "NMI:");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", cpu_data(j).counter);
+ seq_put_decimal_ull_width(p, " ", cpu_data(j).counter, 10);
seq_printf(p, " Non-maskable interrupts\n");
return 0;
}
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 01ee800efde3..ded463c82abd 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -22,6 +22,7 @@
#include <linux/seq_file.h>
#include <linux/ftrace.h>
#include <linux/irq.h>
+#include <linux/string_choices.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
@@ -145,9 +146,7 @@ static int hv_irq_version;
*/
static bool sun4v_cookie_only_virqs(void)
{
- if (hv_irq_version >= 3)
- return true;
- return false;
+ return hv_irq_version >= 3;
}
static void __init irq_init_hv(void)
@@ -170,7 +169,7 @@ static void __init irq_init_hv(void)
pr_info("SUN4V: Using IRQ API major %d, cookie only virqs %s\n",
hv_irq_version,
- sun4v_cookie_only_virqs() ? "enabled" : "disabled");
+ str_enabled_disabled(sun4v_cookie_only_virqs()));
}
/* This function is for the timer interrupt.*/
@@ -304,9 +303,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
{
int j;
- seq_printf(p, "NMI: ");
+ seq_printf(p, "NMI:");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
+ seq_put_decimal_ull_width(p, " ", cpu_data(j).__nmi_count, 10);
seq_printf(p, " Non-maskable interrupts\n");
return 0;
}
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 50a0927a84a6..ddac216a2aff 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -932,7 +932,7 @@ static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus)
{
const struct pci_slot_names {
u32 slot_mask;
- char names[0];
+ char names[];
} *prop;
const char *sp;
int len, i;
diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c
index 5eeec9ad6845..2576f4f31309 100644
--- a/arch/sparc/kernel/pci_common.c
+++ b/arch/sparc/kernel/pci_common.c
@@ -361,7 +361,7 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm)
int i, saw_mem, saw_io;
int num_pbm_ranges;
- /* Corresponding generic code in of_pci_get_host_bridge_resources() */
+ /* Corresponds to generic devm_of_pci_get_host_bridge_resources() */
saw_mem = saw_io = 0;
pbm_ranges = of_get_property(pbm->op->dev.of_node, "ranges", &i);
diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c
index 3df960c137f7..a67dd67f10c8 100644
--- a/arch/sparc/kernel/prom_32.c
+++ b/arch/sparc/kernel/prom_32.c
@@ -28,9 +28,7 @@ void * __init prom_early_alloc(unsigned long size)
{
void *ret;
- ret = memblock_alloc(size, SMP_CACHE_BYTES);
- if (!ret)
- panic("%s: Failed to allocate %lu bytes\n", __func__, size);
+ ret = memblock_alloc_or_panic(size, SMP_CACHE_BYTES);
prom_early_allocated += size;
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index 07933d75ac81..1a1a9d6b8f2e 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -419,13 +419,13 @@ struct vio_remove_node_data {
u64 node;
};
-static int vio_md_node_match(struct device *dev, void *arg)
+static int vio_md_node_match(struct device *dev, const void *arg)
{
struct vio_dev *vdev = to_vio_dev(dev);
- struct vio_remove_node_data *node_data;
+ const struct vio_remove_node_data *node_data;
u64 node;
- node_data = (struct vio_remove_node_data *)arg;
+ node_data = (const struct vio_remove_node_data *)arg;
node = vio_vdev_node(node_data->hp, vdev);
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index ee5091dd67ed..5724d0f356eb 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -53,3 +53,5 @@ lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o
obj-$(CONFIG_SPARC64) += iomap.o
obj-$(CONFIG_SPARC32) += atomic32.o
obj-$(CONFIG_SPARC64) += PeeCeeI.o
+obj-$(CONFIG_CRC32_ARCH) += crc32-sparc.o
+crc32-sparc-y := crc32_glue.o crc32c_asm.o
diff --git a/arch/sparc/lib/crc32_glue.c b/arch/sparc/lib/crc32_glue.c
new file mode 100644
index 000000000000..41076d2b1fd2
--- /dev/null
+++ b/arch/sparc/lib/crc32_glue.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Glue code for CRC32C optimized for sparc64 crypto opcodes.
+ *
+ * This is based largely upon arch/x86/crypto/crc32c-intel.c
+ *
+ * Copyright (C) 2008 Intel Corporation
+ * Authors: Austin Zhang <austin_zhang@linux.intel.com>
+ * Kent Liu <kent.liu@intel.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/crc32.h>
+#include <asm/pstate.h>
+#include <asm/elf.h>
+
+static DEFINE_STATIC_KEY_FALSE(have_crc32c_opcode);
+
+u32 crc32_le_arch(u32 crc, const u8 *data, size_t len)
+{
+ return crc32_le_base(crc, data, len);
+}
+EXPORT_SYMBOL(crc32_le_arch);
+
+void crc32c_sparc64(u32 *crcp, const u64 *data, size_t len);
+
+u32 crc32c_le_arch(u32 crc, const u8 *data, size_t len)
+{
+ size_t n = -(uintptr_t)data & 7;
+
+ if (!static_branch_likely(&have_crc32c_opcode))
+ return crc32c_le_base(crc, data, len);
+
+ if (n) {
+ /* Data isn't 8-byte aligned. Align it. */
+ n = min(n, len);
+ crc = crc32c_le_base(crc, data, n);
+ data += n;
+ len -= n;
+ }
+ n = len & ~7U;
+ if (n) {
+ crc32c_sparc64(&crc, (const u64 *)data, n);
+ data += n;
+ len -= n;
+ }
+ if (len)
+ crc = crc32c_le_base(crc, data, len);
+ return crc;
+}
+EXPORT_SYMBOL(crc32c_le_arch);
+
+u32 crc32_be_arch(u32 crc, const u8 *data, size_t len)
+{
+ return crc32_be_base(crc, data, len);
+}
+EXPORT_SYMBOL(crc32_be_arch);
+
+static int __init crc32_sparc_init(void)
+{
+ unsigned long cfr;
+
+ if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
+ return 0;
+
+ __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
+ if (!(cfr & CFR_CRC32C))
+ return 0;
+
+ static_branch_enable(&have_crc32c_opcode);
+ pr_info("Using sparc64 crc32c opcode optimized CRC32C implementation\n");
+ return 0;
+}
+arch_initcall(crc32_sparc_init);
+
+static void __exit crc32_sparc_exit(void)
+{
+}
+module_exit(crc32_sparc_exit);
+
+u32 crc32_optimizations(void)
+{
+ if (static_key_enabled(&have_crc32c_opcode))
+ return CRC32C_OPTIMIZATION;
+ return 0;
+}
+EXPORT_SYMBOL(crc32_optimizations);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated");
diff --git a/arch/sparc/crypto/crc32c_asm.S b/arch/sparc/lib/crc32c_asm.S
index b8659a479242..ee454fa6aed6 100644
--- a/arch/sparc/crypto/crc32c_asm.S
+++ b/arch/sparc/lib/crc32c_asm.S
@@ -3,7 +3,7 @@
#include <asm/visasm.h>
#include <asm/asi.h>
-#include "opcodes.h"
+#include "../crypto/opcodes.h"
ENTRY(crc32c_sparc64)
/* %o0=crc32p, %o1=data_ptr, %o2=len */
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 21f8cbbd0581..05882bca5b73 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2915,7 +2915,7 @@ static void __pte_free(pgtable_t pte)
{
struct ptdesc *ptdesc = virt_to_ptdesc(pte);
- pagetable_pte_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
pagetable_free(ptdesc);
}
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 9df51a62333d..dd32711022f5 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -277,19 +277,13 @@ static void __init srmmu_nocache_init(void)
bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
- srmmu_nocache_pool = memblock_alloc(srmmu_nocache_size,
+ srmmu_nocache_pool = memblock_alloc_or_panic(srmmu_nocache_size,
SRMMU_NOCACHE_ALIGN_MAX);
- if (!srmmu_nocache_pool)
- panic("%s: Failed to allocate %lu bytes align=0x%x\n",
- __func__, srmmu_nocache_size, SRMMU_NOCACHE_ALIGN_MAX);
memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
srmmu_nocache_bitmap =
- memblock_alloc(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
+ memblock_alloc_or_panic(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
SMP_CACHE_BYTES);
- if (!srmmu_nocache_bitmap)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- BITS_TO_LONGS(bitmap_bits) * sizeof(long));
bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
@@ -372,7 +366,7 @@ void pte_free(struct mm_struct *mm, pgtable_t ptep)
page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
spin_lock(&mm->page_table_lock);
if (page_ref_dec_return(page) == 1)
- pagetable_pte_dtor(page_ptdesc(page));
+ pagetable_dtor(page_ptdesc(page));
spin_unlock(&mm->page_table_lock);
srmmu_free_nocache(ptep, SRMMU_PTE_TABLE_SIZE);
@@ -452,9 +446,7 @@ static void __init sparc_context_init(int numctx)
unsigned long size;
size = numctx * sizeof(struct ctx_list);
- ctx_list_pool = memblock_alloc(size, SMP_CACHE_BYTES);
- if (!ctx_list_pool)
- panic("%s: Failed to allocate %lu bytes\n", __func__, size);
+ ctx_list_pool = memblock_alloc_or_panic(size, SMP_CACHE_BYTES);
for (ctx = 0; ctx < numctx; ctx++) {
struct ctx_list *clist;
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 75d04fb4994a..d5a9c5aabaec 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -636,10 +636,7 @@ static int __init eth_setup(char *str)
return 1;
}
- new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
- if (!new)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(*new));
+ new = memblock_alloc_or_panic(sizeof(*new), SMP_CACHE_BYTES);
INIT_LIST_HEAD(&new->list);
new->index = n;
diff --git a/arch/um/drivers/rtc_kern.c b/arch/um/drivers/rtc_kern.c
index 134a58f93c85..9158c936c128 100644
--- a/arch/um/drivers/rtc_kern.c
+++ b/arch/um/drivers/rtc_kern.c
@@ -51,6 +51,7 @@ static int uml_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int uml_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
{
+ struct timespec64 ts;
unsigned long long secs;
if (!enable && !uml_rtc_alarm_enabled)
@@ -58,7 +59,8 @@ static int uml_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
uml_rtc_alarm_enabled = enable;
- secs = uml_rtc_alarm_time - ktime_get_real_seconds();
+ read_persistent_clock64(&ts);
+ secs = uml_rtc_alarm_time - ts.tv_sec;
if (time_travel_mode == TT_MODE_OFF) {
if (!enable) {
@@ -73,7 +75,8 @@ static int uml_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
if (enable)
time_travel_add_event_rel(&uml_rtc_alarm_event,
- secs * NSEC_PER_SEC);
+ secs * NSEC_PER_SEC -
+ ts.tv_nsec);
}
return 0;
diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
index 64c09db392c1..85b129e2b70b 100644
--- a/arch/um/drivers/vector_kern.c
+++ b/arch/um/drivers/vector_kern.c
@@ -1694,10 +1694,7 @@ static int __init vector_setup(char *str)
str, error);
return 1;
}
- new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
- if (!new)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(*new));
+ new = memblock_alloc_or_panic(sizeof(*new), SMP_CACHE_BYTES);
INIT_LIST_HEAD(&new->list);
new->unit = n;
new->arguments = str;
diff --git a/arch/um/include/asm/fixmap.h b/arch/um/include/asm/fixmap.h
deleted file mode 100644
index 2efac5827188..000000000000
--- a/arch/um/include/asm/fixmap.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __UM_FIXMAP_H
-#define __UM_FIXMAP_H
-
-#include <asm/processor.h>
-#include <asm/archparam.h>
-#include <asm/page.h>
-#include <linux/threads.h>
-
-/*
- * Here we define all the compile-time 'special' virtual
- * addresses. The point is to have a constant address at
- * compile time, but to set the physical address only
- * in the boot process. We allocate these special addresses
- * from the end of virtual memory (0xfffff000) backwards.
- * Also this lets us do fail-safe vmalloc(), we
- * can guarantee that these special addresses and
- * vmalloc()-ed addresses never overlap.
- *
- * these 'compile-time allocated' memory buffers are
- * fixed-size 4k pages. (or larger if used with an increment
- * highger than 1) use fixmap_set(idx,phys) to associate
- * physical memory with fixmap indices.
- *
- * TLB entries of such buffers will not be flushed across
- * task switches.
- */
-
-/*
- * on UP currently we will have no trace of the fixmap mechanizm,
- * no page table allocations, etc. This might change in the
- * future, say framebuffers for the console driver(s) could be
- * fix-mapped?
- */
-enum fixed_addresses {
- __end_of_fixed_addresses
-};
-
-extern void __set_fixmap (enum fixed_addresses idx,
- unsigned long phys, pgprot_t flags);
-
-/*
- * used by vmalloc.c.
- *
- * Leave one empty page between vmalloc'ed areas and
- * the start of the fixmap, and leave one page empty
- * at the top of mem..
- */
-
-#define FIXADDR_TOP (TASK_SIZE - 2 * PAGE_SIZE)
-#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
-#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
-
-#include <asm-generic/fixmap.h>
-
-#endif
diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h
index 04fb4e6969a4..f0af23c3aeb2 100644
--- a/arch/um/include/asm/pgalloc.h
+++ b/arch/um/include/asm/pgalloc.h
@@ -27,7 +27,7 @@ extern pgd_t *pgd_alloc(struct mm_struct *);
#define __pte_free_tlb(tlb, pte, address) \
do { \
- pagetable_pte_dtor(page_ptdesc(pte)); \
+ pagetable_dtor(page_ptdesc(pte)); \
tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
} while (0)
@@ -35,7 +35,7 @@ do { \
#define __pmd_free_tlb(tlb, pmd, address) \
do { \
- pagetable_pmd_dtor(virt_to_ptdesc(pmd)); \
+ pagetable_dtor(virt_to_ptdesc(pmd)); \
tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(pmd)); \
} while (0)
@@ -43,7 +43,7 @@ do { \
#define __pud_free_tlb(tlb, pud, address) \
do { \
- pagetable_pud_dtor(virt_to_ptdesc(pud)); \
+ pagetable_dtor(virt_to_ptdesc(pud)); \
tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(pud)); \
} while (0)
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index 0bd60afcc37d..5601ca98e8a6 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -8,7 +8,8 @@
#ifndef __UM_PGTABLE_H
#define __UM_PGTABLE_H
-#include <asm/fixmap.h>
+#include <asm/page.h>
+#include <linux/mm_types.h>
#define _PAGE_PRESENT 0x001
#define _PAGE_NEEDSYNC 0x002
@@ -48,11 +49,9 @@ extern unsigned long end_iomem;
#define VMALLOC_OFFSET (__va_space)
#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
-#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
-#define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
+#define VMALLOC_END (TASK_SIZE-2*PAGE_SIZE)
#define MODULES_VADDR VMALLOC_START
#define MODULES_END VMALLOC_END
-#define MODULES_LEN (MODULES_VADDR - MODULES_END)
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
diff --git a/arch/um/kernel/load_file.c b/arch/um/kernel/load_file.c
index 5cecd0e291fb..cb9d178ab7d8 100644
--- a/arch/um/kernel/load_file.c
+++ b/arch/um/kernel/load_file.c
@@ -48,9 +48,7 @@ void *uml_load_file(const char *filename, unsigned long long *size)
return NULL;
}
- area = memblock_alloc(*size, SMP_CACHE_BYTES);
- if (!area)
- panic("%s: Failed to allocate %llu bytes\n", __func__, *size);
+ area = memblock_alloc_or_panic(*size, SMP_CACHE_BYTES);
if (__uml_load_file(filename, area, *size)) {
memblock_free(area, *size);
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 53248ed04771..befed230aac2 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -9,7 +9,6 @@
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/slab.h>
-#include <asm/fixmap.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <as-layout.h>
@@ -74,6 +73,7 @@ void __init mem_init(void)
kmalloc_ok = 1;
}
+#if IS_ENABLED(CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA)
/*
* Create a page table and place a pointer to it in a middle page
* directory entry.
@@ -152,7 +152,6 @@ static void __init fixrange_init(unsigned long start, unsigned long end,
static void __init fixaddr_user_init( void)
{
-#ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
long size = FIXADDR_USER_END - FIXADDR_USER_START;
pte_t *pte;
phys_t p;
@@ -174,13 +173,12 @@ static void __init fixaddr_user_init( void)
pte = virt_to_kpte(vaddr);
pte_set_val(*pte, p, PAGE_READONLY);
}
-#endif
}
+#endif
void __init paging_init(void)
{
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
- unsigned long vaddr;
empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
PAGE_SIZE);
@@ -191,14 +189,9 @@ void __init paging_init(void)
max_zone_pfn[ZONE_NORMAL] = end_iomem >> PAGE_SHIFT;
free_area_init(max_zone_pfn);
- /*
- * Fixed mappings, only the page table structure has to be
- * created - mappings will be set by set_fixmap():
- */
- vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
- fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
-
+#if IS_ENABLED(CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA)
fixaddr_user_init();
+#endif
}
/*
@@ -214,14 +207,13 @@ void free_initmem(void)
pgd_t *pgd_alloc(struct mm_struct *mm)
{
- pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
+ pgd_t *pgd = __pgd_alloc(mm, 0);
- if (pgd) {
- memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ if (pgd)
memcpy(pgd + USER_PTRS_PER_PGD,
swapper_pg_dir + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
- }
+
return pgd;
}
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 30bdc0a87dc8..e5a2d4d897e0 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -213,14 +213,6 @@ int __uml_cant_sleep(void) {
/* Is in_interrupt() really needed? */
}
-int user_context(unsigned long sp)
-{
- unsigned long stack;
-
- stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
- return stack != (unsigned long) current_thread_info();
-}
-
extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
void do_uml_exitcalls(void)
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 8037a967225d..79ea97d4797e 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -264,7 +264,7 @@ EXPORT_SYMBOL(end_iomem);
#define MIN_VMALLOC (32 * 1024 * 1024)
-static void parse_host_cpu_flags(char *line)
+static void __init parse_host_cpu_flags(char *line)
{
int i;
for (i = 0; i < 32*NCAPINTS; i++) {
@@ -272,7 +272,8 @@ static void parse_host_cpu_flags(char *line)
set_cpu_cap(&boot_cpu_data, i);
}
}
-static void parse_cache_line(char *line)
+
+static void __init parse_cache_line(char *line)
{
long res;
char *to_parse = strstr(line, ":");
@@ -288,7 +289,7 @@ static void parse_cache_line(char *line)
}
}
-static unsigned long get_top_address(char **envp)
+static unsigned long __init get_top_address(char **envp)
{
unsigned long top_addr = (unsigned long) &top_addr;
int i;
@@ -376,9 +377,8 @@ int __init linux_main(int argc, char **argv, char **envp)
iomem_size = (iomem_size + PAGE_SIZE - 1) & PAGE_MASK;
max_physmem = TASK_SIZE - uml_physmem - iomem_size - MIN_VMALLOC;
-
- if (physmem_size + iomem_size > max_physmem) {
- physmem_size = max_physmem - iomem_size;
+ if (physmem_size > max_physmem) {
+ physmem_size = max_physmem;
os_info("Physical memory size shrunk to %llu bytes\n",
physmem_size);
}
diff --git a/arch/um/os-Linux/main.c b/arch/um/os-Linux/main.c
index 0afcdeb8995b..3c63ce19e3bf 100644
--- a/arch/um/os-Linux/main.c
+++ b/arch/um/os-Linux/main.c
@@ -19,13 +19,11 @@
#include <um_malloc.h>
#include "internal.h"
-#define PGD_BOUND (4 * 1024 * 1024)
#define STACKSIZE (8 * 1024 * 1024)
-#define THREAD_NAME_LEN (256)
long elf_aux_hwcap;
-static void set_stklim(void)
+static void __init set_stklim(void)
{
struct rlimit lim;
@@ -48,7 +46,7 @@ static void last_ditch_exit(int sig)
exit(1);
}
-static void install_fatal_handler(int sig)
+static void __init install_fatal_handler(int sig)
{
struct sigaction action;
@@ -73,7 +71,7 @@ static void install_fatal_handler(int sig)
#define UML_LIB_PATH ":" OS_LIB_PATH "/uml"
-static void setup_env_path(void)
+static void __init setup_env_path(void)
{
char *new_path = NULL;
char *old_path = NULL;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 27c21c9b6a70..87198d957e2f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -76,6 +76,8 @@ config X86
select ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_CPU_PASID if IOMMU_SVA
+ select ARCH_HAS_CRC32
+ select ARCH_HAS_CRC_T10DIF if X86_64
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEBUG_VM_PGTABLE if !X86_PAE
@@ -323,6 +325,7 @@ config X86
select FUNCTION_ALIGNMENT_4B
imply IMA_SECURE_AND_OR_TRUSTED_BOOT if EFI
select HAVE_DYNAMIC_FTRACE_NO_PATCHABLE
+ select ARCH_SUPPORTS_PT_RECLAIM if X86_64
config INSTRUCTION_DECODER
def_bool y
diff --git a/arch/x86/Makefile.postlink b/arch/x86/Makefile.postlink
index fef2e977cc7d..8b8a68162c94 100644
--- a/arch/x86/Makefile.postlink
+++ b/arch/x86/Makefile.postlink
@@ -11,6 +11,7 @@ __archpost:
-include include/config/auto.conf
include $(srctree)/scripts/Kbuild.include
+include $(srctree)/scripts/Makefile.lib
CMD_RELOCS = arch/x86/tools/relocs
OUT_RELOCS = arch/x86/boot/compressed
@@ -20,11 +21,6 @@ quiet_cmd_relocs = RELOCS $(OUT_RELOCS)/$@.relocs
$(CMD_RELOCS) $@ > $(OUT_RELOCS)/$@.relocs; \
$(CMD_RELOCS) --abs-relocs $@
-quiet_cmd_strip_relocs = RSTRIP $@
- cmd_strip_relocs = \
- $(OBJCOPY) --remove-section='.rel.*' --remove-section='.rel__*' \
- --remove-section='.rela.*' --remove-section='.rela__*' $@
-
# `@true` prevents complaint when there is nothing to be done
vmlinux: FORCE
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index f2051644de94..606c74f27459 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -25,6 +25,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
# avoid errors with '-march=i386', and future flags may depend on the target to
# be valid.
KBUILD_CFLAGS := -m$(BITS) -O2 $(CLANG_FLAGS)
+KBUILD_CFLAGS += -std=gnu11
KBUILD_CFLAGS += -fno-strict-aliasing -fPIE
KBUILD_CFLAGS += -Wundef
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
diff --git a/arch/x86/coco/sev/Makefile b/arch/x86/coco/sev/Makefile
index 4e375e7305ac..dcb06dc8b5ae 100644
--- a/arch/x86/coco/sev/Makefile
+++ b/arch/x86/coco/sev/Makefile
@@ -2,6 +2,10 @@
obj-y += core.o
+# jump tables are emitted using absolute references in non-PIC code
+# so they cannot be used in the early SEV startup code
+CFLAGS_core.o += -fno-jump-tables
+
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_core.o = -pg
endif
@@ -13,3 +17,6 @@ KCOV_INSTRUMENT_core.o := n
# With some compiler versions the generated code results in boot hangs, caused
# by several compilation units. To be safe, disable all instrumentation.
KCSAN_SANITIZE := n
+
+# Clang 14 and older may fail to respect __no_sanitize_undefined when inlining
+UBSAN_SANITIZE := n
diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c
index 65d676c0f7bc..82492efc5d94 100644
--- a/arch/x86/coco/sev/core.c
+++ b/arch/x86/coco/sev/core.c
@@ -9,6 +9,8 @@
#define pr_fmt(fmt) "SEV: " fmt
+#define DISABLE_BRANCH_PROFILING
+
#include <linux/sched/debug.h> /* For show_regs() */
#include <linux/percpu-defs.h>
#include <linux/cc_platform.h>
@@ -787,15 +789,10 @@ early_set_pages_state(unsigned long vaddr, unsigned long paddr,
val = sev_es_rd_ghcb_msr();
- if (WARN(GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP,
- "Wrong PSC response code: 0x%x\n",
- (unsigned int)GHCB_RESP_CODE(val)))
+ if (GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP)
goto e_term;
- if (WARN(GHCB_MSR_PSC_RESP_VAL(val),
- "Failed to change page state to '%s' paddr 0x%lx error 0x%llx\n",
- op == SNP_PAGE_STATE_PRIVATE ? "private" : "shared",
- paddr, GHCB_MSR_PSC_RESP_VAL(val)))
+ if (GHCB_MSR_PSC_RESP_VAL(val))
goto e_term;
/* Page validation must be performed after changing to private */
@@ -831,7 +828,7 @@ void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_PRIVATE);
}
-void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
+void __head early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
unsigned long npages)
{
/*
@@ -1633,9 +1630,7 @@ static void __init alloc_runtime_data(int cpu)
struct svsm_ca *caa;
/* Allocate the SVSM CA page if an SVSM is present */
- caa = memblock_alloc(sizeof(*caa), PAGE_SIZE);
- if (!caa)
- panic("Can't allocate SVSM CA page\n");
+ caa = memblock_alloc_or_panic(sizeof(*caa), PAGE_SIZE);
per_cpu(svsm_caa, cpu) = caa;
per_cpu(svsm_caa_pa, cpu) = __pa(caa);
@@ -2423,7 +2418,7 @@ static __head void svsm_setup(struct cc_blob_sev_info *cc_info)
call.rcx = pa;
ret = svsm_perform_call_protocol(&call);
if (ret)
- panic("Can't remap the SVSM CA, ret=%d, rax_out=0x%llx\n", ret, call.rax_out);
+ sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_CA_REMAP_FAIL);
RIP_REL_REF(boot_svsm_caa) = (struct svsm_ca *)pa;
RIP_REL_REF(boot_svsm_caa_pa) = pa;
diff --git a/arch/x86/coco/sev/shared.c b/arch/x86/coco/sev/shared.c
index 4386f37bd31d..2e4122f8aa6b 100644
--- a/arch/x86/coco/sev/shared.c
+++ b/arch/x86/coco/sev/shared.c
@@ -498,7 +498,7 @@ static const struct snp_cpuid_table *snp_cpuid_get_table(void)
*
* Return: XSAVE area size on success, 0 otherwise.
*/
-static u32 snp_cpuid_calc_xsave_size(u64 xfeatures_en, bool compacted)
+static u32 __head snp_cpuid_calc_xsave_size(u64 xfeatures_en, bool compacted)
{
const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
u64 xfeatures_found = 0;
@@ -576,8 +576,9 @@ static void snp_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpui
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID_HV);
}
-static int snp_cpuid_postprocess(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
- struct cpuid_leaf *leaf)
+static int __head
+snp_cpuid_postprocess(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
+ struct cpuid_leaf *leaf)
{
struct cpuid_leaf leaf_hv = *leaf;
@@ -1253,7 +1254,7 @@ static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svs
__pval_terminate(pfn, action, page_size, ret, svsm_ret);
}
-static void svsm_pval_4k_page(unsigned long paddr, bool validate)
+static void __head svsm_pval_4k_page(unsigned long paddr, bool validate)
{
struct svsm_pvalidate_call *pc;
struct svsm_call call = {};
@@ -1285,12 +1286,13 @@ static void svsm_pval_4k_page(unsigned long paddr, bool validate)
ret = svsm_perform_call_protocol(&call);
if (ret)
- svsm_pval_terminate(pc, ret, call.rax_out);
+ sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
native_local_irq_restore(flags);
}
-static void pvalidate_4k_page(unsigned long vaddr, unsigned long paddr, bool validate)
+static void __head pvalidate_4k_page(unsigned long vaddr, unsigned long paddr,
+ bool validate)
{
int ret;
@@ -1303,7 +1305,7 @@ static void pvalidate_4k_page(unsigned long vaddr, unsigned long paddr, bool val
} else {
ret = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
if (ret)
- __pval_terminate(PHYS_PFN(paddr), validate, RMP_PG_SIZE_4K, ret, 0);
+ sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
}
}
diff --git a/arch/x86/coco/tdx/Makefile b/arch/x86/coco/tdx/Makefile
index 2c7dcbf1458b..b3c47d3700e2 100644
--- a/arch/x86/coco/tdx/Makefile
+++ b/arch/x86/coco/tdx/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y += tdx.o tdx-shared.o tdcall.o
+obj-y += debug.o tdcall.o tdx.o tdx-shared.o
diff --git a/arch/x86/coco/tdx/debug.c b/arch/x86/coco/tdx/debug.c
new file mode 100644
index 000000000000..cef847c8bb67
--- /dev/null
+++ b/arch/x86/coco/tdx/debug.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#undef pr_fmt
+#define pr_fmt(fmt) "tdx: " fmt
+
+#include <linux/array_size.h>
+#include <linux/printk.h>
+#include <asm/tdx.h>
+
+#define DEF_TDX_ATTR_NAME(_name) [TDX_ATTR_##_name##_BIT] = __stringify(_name)
+
+static __initdata const char *tdx_attributes[] = {
+ DEF_TDX_ATTR_NAME(DEBUG),
+ DEF_TDX_ATTR_NAME(HGS_PLUS_PROF),
+ DEF_TDX_ATTR_NAME(PERF_PROF),
+ DEF_TDX_ATTR_NAME(PMT_PROF),
+ DEF_TDX_ATTR_NAME(ICSSD),
+ DEF_TDX_ATTR_NAME(LASS),
+ DEF_TDX_ATTR_NAME(SEPT_VE_DISABLE),
+ DEF_TDX_ATTR_NAME(MIGRTABLE),
+ DEF_TDX_ATTR_NAME(PKS),
+ DEF_TDX_ATTR_NAME(KL),
+ DEF_TDX_ATTR_NAME(TPA),
+ DEF_TDX_ATTR_NAME(PERFMON),
+};
+
+#define DEF_TD_CTLS_NAME(_name) [TD_CTLS_##_name##_BIT] = __stringify(_name)
+
+static __initdata const char *tdcs_td_ctls[] = {
+ DEF_TD_CTLS_NAME(PENDING_VE_DISABLE),
+ DEF_TD_CTLS_NAME(ENUM_TOPOLOGY),
+ DEF_TD_CTLS_NAME(VIRT_CPUID2),
+ DEF_TD_CTLS_NAME(REDUCE_VE),
+ DEF_TD_CTLS_NAME(LOCK),
+};
+
+void __init tdx_dump_attributes(u64 td_attr)
+{
+ pr_info("Attributes:");
+
+ for (int i = 0; i < ARRAY_SIZE(tdx_attributes); i++) {
+ if (!tdx_attributes[i])
+ continue;
+ if (td_attr & BIT(i))
+ pr_cont(" %s", tdx_attributes[i]);
+ td_attr &= ~BIT(i);
+ }
+
+ if (td_attr)
+ pr_cont(" unknown:%#llx", td_attr);
+ pr_cont("\n");
+
+}
+
+void __init tdx_dump_td_ctls(u64 td_ctls)
+{
+ pr_info("TD_CTLS:");
+
+ for (int i = 0; i < ARRAY_SIZE(tdcs_td_ctls); i++) {
+ if (!tdcs_td_ctls[i])
+ continue;
+ if (td_ctls & BIT(i))
+ pr_cont(" %s", tdcs_td_ctls[i]);
+ td_ctls &= ~BIT(i);
+ }
+ if (td_ctls)
+ pr_cont(" unknown:%#llx", td_ctls);
+ pr_cont("\n");
+}
diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
index 0d9b090b4880..32809a06dab4 100644
--- a/arch/x86/coco/tdx/tdx.c
+++ b/arch/x86/coco/tdx/tdx.c
@@ -32,9 +32,6 @@
#define VE_GET_PORT_NUM(e) ((e) >> 16)
#define VE_IS_IO_STRING(e) ((e) & BIT(4))
-#define ATTR_DEBUG BIT(0)
-#define ATTR_SEPT_VE_DISABLE BIT(28)
-
/* TDX Module call error codes */
#define TDCALL_RETURN_CODE(a) ((a) >> 32)
#define TDCALL_INVALID_OPERAND 0xc0000100
@@ -200,14 +197,14 @@ static void __noreturn tdx_panic(const char *msg)
*
* TDX 1.0 does not allow the guest to disable SEPT #VE on its own. The VMM
* controls if the guest will receive such #VE with TD attribute
- * ATTR_SEPT_VE_DISABLE.
+ * TDX_ATTR_SEPT_VE_DISABLE.
*
* Newer TDX modules allow the guest to control if it wants to receive SEPT
* violation #VEs.
*
* Check if the feature is available and disable SEPT #VE if possible.
*
- * If the TD is allowed to disable/enable SEPT #VEs, the ATTR_SEPT_VE_DISABLE
+ * If the TD is allowed to disable/enable SEPT #VEs, the TDX_ATTR_SEPT_VE_DISABLE
* attribute is no longer reliable. It reflects the initial state of the
* control for the TD, but it will not be updated if someone (e.g. bootloader)
* changes it before the kernel starts. Kernel must check TDCS_TD_CTLS bit to
@@ -216,14 +213,14 @@ static void __noreturn tdx_panic(const char *msg)
static void disable_sept_ve(u64 td_attr)
{
const char *msg = "TD misconfiguration: SEPT #VE has to be disabled";
- bool debug = td_attr & ATTR_DEBUG;
+ bool debug = td_attr & TDX_ATTR_DEBUG;
u64 config, controls;
/* Is this TD allowed to disable SEPT #VE */
tdg_vm_rd(TDCS_CONFIG_FLAGS, &config);
if (!(config & TDCS_CONFIG_FLEXIBLE_PENDING_VE)) {
/* No SEPT #VE controls for the guest: check the attribute */
- if (td_attr & ATTR_SEPT_VE_DISABLE)
+ if (td_attr & TDX_ATTR_SEPT_VE_DISABLE)
return;
/* Relax SEPT_VE_DISABLE check for debug TD for backtraces */
@@ -274,6 +271,20 @@ static void enable_cpu_topology_enumeration(void)
tdg_vm_wr(TDCS_TD_CTLS, TD_CTLS_ENUM_TOPOLOGY, TD_CTLS_ENUM_TOPOLOGY);
}
+static void reduce_unnecessary_ve(void)
+{
+ u64 err = tdg_vm_wr(TDCS_TD_CTLS, TD_CTLS_REDUCE_VE, TD_CTLS_REDUCE_VE);
+
+ if (err == TDX_SUCCESS)
+ return;
+
+ /*
+ * Enabling REDUCE_VE includes ENUM_TOPOLOGY. Only try to
+ * enable ENUM_TOPOLOGY if REDUCE_VE was not successful.
+ */
+ enable_cpu_topology_enumeration();
+}
+
static void tdx_setup(u64 *cc_mask)
{
struct tdx_module_args args = {};
@@ -305,7 +316,8 @@ static void tdx_setup(u64 *cc_mask)
tdg_vm_wr(TDCS_NOTIFY_ENABLES, 0, -1ULL);
disable_sept_ve(td_attr);
- enable_cpu_topology_enumeration();
+
+ reduce_unnecessary_ve();
}
/*
@@ -1025,6 +1037,20 @@ static void tdx_kexec_finish(void)
}
}
+static __init void tdx_announce(void)
+{
+ struct tdx_module_args args = {};
+ u64 controls;
+
+ pr_info("Guest detected\n");
+
+ tdcall(TDG_VP_INFO, &args);
+ tdx_dump_attributes(args.rdx);
+
+ tdg_vm_rd(TDCS_TD_CTLS, &controls);
+ tdx_dump_td_ctls(controls);
+}
+
void __init tdx_early_init(void)
{
u64 cc_mask;
@@ -1094,5 +1120,5 @@ void __init tdx_early_init(void)
*/
x86_cpuinit.parallel_bringup = false;
- pr_info("Guest detected\n");
+ tdx_announce();
}
diff --git a/arch/x86/crypto/Kconfig b/arch/x86/crypto/Kconfig
index 3d2e38ba5240..4757bf922075 100644
--- a/arch/x86/crypto/Kconfig
+++ b/arch/x86/crypto/Kconfig
@@ -492,36 +492,4 @@ config CRYPTO_GHASH_CLMUL_NI_INTEL
Architecture: x86_64 using:
- CLMUL-NI (carry-less multiplication new instructions)
-config CRYPTO_CRC32C_INTEL
- tristate "CRC32c (SSE4.2/PCLMULQDQ)"
- depends on X86
- select CRYPTO_HASH
- help
- CRC32c CRC algorithm with the iSCSI polynomial (RFC 3385 and RFC 3720)
-
- Architecture: x86 (32-bit and 64-bit) using:
- - SSE4.2 (Streaming SIMD Extensions 4.2) CRC32 instruction
- - PCLMULQDQ (carry-less multiplication)
-
-config CRYPTO_CRC32_PCLMUL
- tristate "CRC32 (PCLMULQDQ)"
- depends on X86
- select CRYPTO_HASH
- select CRC32
- help
- CRC32 CRC algorithm (IEEE 802.3)
-
- Architecture: x86 (32-bit and 64-bit) using:
- - PCLMULQDQ (carry-less multiplication)
-
-config CRYPTO_CRCT10DIF_PCLMUL
- tristate "CRCT10DIF (PCLMULQDQ)"
- depends on X86 && 64BIT && CRC_T10DIF
- select CRYPTO_HASH
- help
- CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF)
-
- Architecture: x86_64 using:
- - PCLMULQDQ (carry-less multiplication)
-
endmenu
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 53b4a277809e..07b00bfca64b 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -75,16 +75,6 @@ ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
obj-$(CONFIG_CRYPTO_POLYVAL_CLMUL_NI) += polyval-clmulni.o
polyval-clmulni-y := polyval-clmulni_asm.o polyval-clmulni_glue.o
-obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o
-crc32c-intel-y := crc32c-intel_glue.o
-crc32c-intel-$(CONFIG_64BIT) += crc32c-pcl-intel-asm_64.o
-
-obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o
-crc32-pclmul-y := crc32-pclmul_asm.o crc32-pclmul_glue.o
-
-obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o
-crct10dif-pclmul-y := crct10dif-pcl-asm_64.o crct10dif-pclmul_glue.o
-
obj-$(CONFIG_CRYPTO_POLY1305_X86_64) += poly1305-x86_64.o
poly1305-x86_64-y := poly1305-x86_64-cryptogams.o poly1305_glue.o
targets += poly1305-x86_64-cryptogams.S
diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
index c19d8e3d96a3..01fa568dc5fc 100644
--- a/arch/x86/crypto/aegis128-aesni-glue.c
+++ b/arch/x86/crypto/aegis128-aesni-glue.c
@@ -240,7 +240,6 @@ static struct aead_alg crypto_aegis128_aesni_alg = {
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aegis_ctx) +
__alignof__(struct aegis_ctx),
- .cra_alignmask = 0,
.cra_priority = 400,
.cra_name = "__aegis128",
diff --git a/arch/x86/crypto/aes-gcm-avx10-x86_64.S b/arch/x86/crypto/aes-gcm-avx10-x86_64.S
index 97e0ee515fc5..02ee11083d4f 100644
--- a/arch/x86/crypto/aes-gcm-avx10-x86_64.S
+++ b/arch/x86/crypto/aes-gcm-avx10-x86_64.S
@@ -88,7 +88,7 @@
// A shuffle mask that reflects the bytes of 16-byte blocks
.Lbswap_mask:
- .octa 0x000102030405060708090a0b0c0d0e0f
+ .octa 0x000102030405060708090a0b0c0d0e0f
// This is the GHASH reducing polynomial without its constant term, i.e.
// x^128 + x^7 + x^2 + x, represented using the backwards mapping
@@ -384,8 +384,8 @@
vpshufd $0xd3, H_CUR_XMM, %xmm0
vpsrad $31, %xmm0, %xmm0
vpaddq H_CUR_XMM, H_CUR_XMM, H_CUR_XMM
- vpand .Lgfpoly_and_internal_carrybit(%rip), %xmm0, %xmm0
- vpxor %xmm0, H_CUR_XMM, H_CUR_XMM
+ // H_CUR_XMM ^= xmm0 & gfpoly_and_internal_carrybit
+ vpternlogd $0x78, .Lgfpoly_and_internal_carrybit(%rip), %xmm0, H_CUR_XMM
// Load the gfpoly constant.
vbroadcasti32x4 .Lgfpoly(%rip), GFPOLY
@@ -562,6 +562,32 @@
vpxord RNDKEY0, V3, V3
.endm
+// Do the last AES round for four vectors of counter blocks V0-V3, XOR source
+// data with the resulting keystream, and write the result to DST and
+// GHASHDATA[0-3]. (Implementation differs slightly, but has the same effect.)
+.macro _aesenclast_and_xor_4x
+ // XOR the source data with the last round key, saving the result in
+ // GHASHDATA[0-3]. This reduces latency by taking advantage of the
+ // property vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a).
+ vpxord 0*VL(SRC), RNDKEYLAST, GHASHDATA0
+ vpxord 1*VL(SRC), RNDKEYLAST, GHASHDATA1
+ vpxord 2*VL(SRC), RNDKEYLAST, GHASHDATA2
+ vpxord 3*VL(SRC), RNDKEYLAST, GHASHDATA3
+
+ // Do the last AES round. This handles the XOR with the source data
+ // too, as per the optimization described above.
+ vaesenclast GHASHDATA0, V0, GHASHDATA0
+ vaesenclast GHASHDATA1, V1, GHASHDATA1
+ vaesenclast GHASHDATA2, V2, GHASHDATA2
+ vaesenclast GHASHDATA3, V3, GHASHDATA3
+
+ // Store the en/decrypted data to DST.
+ vmovdqu8 GHASHDATA0, 0*VL(DST)
+ vmovdqu8 GHASHDATA1, 1*VL(DST)
+ vmovdqu8 GHASHDATA2, 2*VL(DST)
+ vmovdqu8 GHASHDATA3, 3*VL(DST)
+.endm
+
// void aes_gcm_{enc,dec}_update_##suffix(const struct aes_gcm_key_avx10 *key,
// const u32 le_ctr[4], u8 ghash_acc[16],
// const u8 *src, u8 *dst, int datalen);
@@ -640,7 +666,7 @@
// LE_CTR contains the next set of little-endian counter blocks.
.set LE_CTR, V12
- // RNDKEY0, RNDKEYLAST, and RNDKEY_M[9-5] contain cached AES round keys,
+ // RNDKEY0, RNDKEYLAST, and RNDKEY_M[9-1] contain cached AES round keys,
// copied to all 128-bit lanes. RNDKEY0 is the zero-th round key,
// RNDKEYLAST the last, and RNDKEY_M\i the one \i-th from the last.
.set RNDKEY0, V13
@@ -650,15 +676,10 @@
.set RNDKEY_M7, V17
.set RNDKEY_M6, V18
.set RNDKEY_M5, V19
-
- // RNDKEYLAST[0-3] temporarily store the last AES round key XOR'd with
- // the corresponding block of source data. This is useful because
- // vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a), and key ^ b can
- // be computed in parallel with the AES rounds.
- .set RNDKEYLAST0, V20
- .set RNDKEYLAST1, V21
- .set RNDKEYLAST2, V22
- .set RNDKEYLAST3, V23
+ .set RNDKEY_M4, V20
+ .set RNDKEY_M3, V21
+ .set RNDKEY_M2, V22
+ .set RNDKEY_M1, V23
// GHASHTMP[0-2] are temporary variables used by _ghash_step_4x. These
// cannot coincide with anything used for AES encryption, since for
@@ -713,7 +734,7 @@
// Pre-subtracting 4*VL from DATALEN saves an instruction from the main
// loop and also ensures that at least one write always occurs to
// DATALEN, zero-extending it and allowing DATALEN64 to be used later.
- sub $4*VL, DATALEN
+ add $-4*VL, DATALEN // shorter than 'sub 4*VL' when VL=32
jl .Lcrypt_loop_4x_done\@
// Load powers of the hash key.
@@ -748,26 +769,15 @@
add $16, %rax
cmp %rax, RNDKEYLAST_PTR
jne 1b
- vpxord 0*VL(SRC), RNDKEYLAST, RNDKEYLAST0
- vpxord 1*VL(SRC), RNDKEYLAST, RNDKEYLAST1
- vpxord 2*VL(SRC), RNDKEYLAST, RNDKEYLAST2
- vpxord 3*VL(SRC), RNDKEYLAST, RNDKEYLAST3
- vaesenclast RNDKEYLAST0, V0, GHASHDATA0
- vaesenclast RNDKEYLAST1, V1, GHASHDATA1
- vaesenclast RNDKEYLAST2, V2, GHASHDATA2
- vaesenclast RNDKEYLAST3, V3, GHASHDATA3
- vmovdqu8 GHASHDATA0, 0*VL(DST)
- vmovdqu8 GHASHDATA1, 1*VL(DST)
- vmovdqu8 GHASHDATA2, 2*VL(DST)
- vmovdqu8 GHASHDATA3, 3*VL(DST)
- add $4*VL, SRC
- add $4*VL, DST
- sub $4*VL, DATALEN
+ _aesenclast_and_xor_4x
+ sub $-4*VL, SRC // shorter than 'add 4*VL' when VL=32
+ sub $-4*VL, DST
+ add $-4*VL, DATALEN
jl .Lghash_last_ciphertext_4x\@
.endif
// Cache as many additional AES round keys as possible.
-.irp i, 9,8,7,6,5
+.irp i, 9,8,7,6,5,4,3,2,1
vbroadcasti32x4 -\i*16(RNDKEYLAST_PTR), RNDKEY_M\i
.endr
@@ -799,50 +809,17 @@
_vaesenc_4x RNDKEY
128:
- // XOR the source data with the last round key, saving the result in
- // RNDKEYLAST[0-3]. This reduces latency by taking advantage of the
- // property vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a).
-.if \enc
- vpxord 0*VL(SRC), RNDKEYLAST, RNDKEYLAST0
- vpxord 1*VL(SRC), RNDKEYLAST, RNDKEYLAST1
- vpxord 2*VL(SRC), RNDKEYLAST, RNDKEYLAST2
- vpxord 3*VL(SRC), RNDKEYLAST, RNDKEYLAST3
-.else
- vpxord GHASHDATA0, RNDKEYLAST, RNDKEYLAST0
- vpxord GHASHDATA1, RNDKEYLAST, RNDKEYLAST1
- vpxord GHASHDATA2, RNDKEYLAST, RNDKEYLAST2
- vpxord GHASHDATA3, RNDKEYLAST, RNDKEYLAST3
-.endif
-
// Finish the AES encryption of the counter blocks in V0-V3, interleaved
// with the GHASH update of the ciphertext blocks in GHASHDATA[0-3].
-.irp i, 9,8,7,6,5
+.irp i, 9,8,7,6,5,4,3,2,1
+ _ghash_step_4x (9 - \i)
_vaesenc_4x RNDKEY_M\i
- _ghash_step_4x (9 - \i)
-.endr
-.irp i, 4,3,2,1
- vbroadcasti32x4 -\i*16(RNDKEYLAST_PTR), RNDKEY
- _vaesenc_4x RNDKEY
- _ghash_step_4x (9 - \i)
.endr
_ghash_step_4x 9
-
- // Do the last AES round. This handles the XOR with the source data
- // too, as per the optimization described above.
- vaesenclast RNDKEYLAST0, V0, GHASHDATA0
- vaesenclast RNDKEYLAST1, V1, GHASHDATA1
- vaesenclast RNDKEYLAST2, V2, GHASHDATA2
- vaesenclast RNDKEYLAST3, V3, GHASHDATA3
-
- // Store the en/decrypted data to DST.
- vmovdqu8 GHASHDATA0, 0*VL(DST)
- vmovdqu8 GHASHDATA1, 1*VL(DST)
- vmovdqu8 GHASHDATA2, 2*VL(DST)
- vmovdqu8 GHASHDATA3, 3*VL(DST)
-
- add $4*VL, SRC
- add $4*VL, DST
- sub $4*VL, DATALEN
+ _aesenclast_and_xor_4x
+ sub $-4*VL, SRC // shorter than 'add 4*VL' when VL=32
+ sub $-4*VL, DST
+ add $-4*VL, DATALEN
jge .Lcrypt_loop_4x\@
.if \enc
@@ -856,7 +833,7 @@
.Lcrypt_loop_4x_done\@:
// Undo the extra subtraction by 4*VL and check whether data remains.
- add $4*VL, DATALEN
+ sub $-4*VL, DATALEN // shorter than 'add 4*VL' when VL=32
jz .Ldone\@
// The data length isn't a multiple of 4*VL. Process the remaining data
@@ -940,7 +917,7 @@
// GHASH. However, any such blocks are all-zeroes, and the values that
// they're multiplied with are also all-zeroes. Therefore they just add
// 0 * 0 = 0 to the final GHASH result, which makes no difference.
- vmovdqu8 (POWERS_PTR), H_POW1
+ vmovdqu8 (POWERS_PTR), H_POW1
.if \enc
vmovdqu8 V0, V1{%k1}{z}
.endif
diff --git a/arch/x86/crypto/aes-xts-avx-x86_64.S b/arch/x86/crypto/aes-xts-avx-x86_64.S
index 48f97b79f7a9..8a3e23fbcf85 100644
--- a/arch/x86/crypto/aes-xts-avx-x86_64.S
+++ b/arch/x86/crypto/aes-xts-avx-x86_64.S
@@ -80,22 +80,6 @@
.byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80
.text
-// Function parameters
-.set KEY, %rdi // Initially points to crypto_aes_ctx, then is
- // advanced to point to 7th-from-last round key
-.set SRC, %rsi // Pointer to next source data
-.set DST, %rdx // Pointer to next destination data
-.set LEN, %ecx // Remaining length in bytes
-.set LEN8, %cl
-.set LEN64, %rcx
-.set TWEAK, %r8 // Pointer to next tweak
-
-// %rax holds the AES key length in bytes.
-.set KEYLEN, %eax
-.set KEYLEN64, %rax
-
-// %r9-r11 are available as temporaries.
-
.macro _define_Vi i
.if VL == 16
.set V\i, %xmm\i
@@ -112,41 +96,31 @@
// Define register aliases V0-V15, or V0-V31 if all 32 SIMD registers
// are available, that map to the xmm, ymm, or zmm registers according
// to the selected Vector Length (VL).
- _define_Vi 0
- _define_Vi 1
- _define_Vi 2
- _define_Vi 3
- _define_Vi 4
- _define_Vi 5
- _define_Vi 6
- _define_Vi 7
- _define_Vi 8
- _define_Vi 9
- _define_Vi 10
- _define_Vi 11
- _define_Vi 12
- _define_Vi 13
- _define_Vi 14
- _define_Vi 15
+.irp i, 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+ _define_Vi \i
+.endr
.if USE_AVX10
- _define_Vi 16
- _define_Vi 17
- _define_Vi 18
- _define_Vi 19
- _define_Vi 20
- _define_Vi 21
- _define_Vi 22
- _define_Vi 23
- _define_Vi 24
- _define_Vi 25
- _define_Vi 26
- _define_Vi 27
- _define_Vi 28
- _define_Vi 29
- _define_Vi 30
- _define_Vi 31
+.irp i, 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
+ _define_Vi \i
+.endr
.endif
+ // Function parameters
+ .set KEY, %rdi // Initially points to crypto_aes_ctx, then is
+ // advanced to point to 7th-from-last round key
+ .set SRC, %rsi // Pointer to next source data
+ .set DST, %rdx // Pointer to next destination data
+ .set LEN, %ecx // Remaining length in bytes
+ .set LEN8, %cl
+ .set LEN64, %rcx
+ .set TWEAK, %r8 // Pointer to next tweak
+
+ // %rax holds the AES key length in bytes.
+ .set KEYLEN, %eax
+ .set KEYLEN64, %rax
+
+ // %r9-r11 are available as temporaries.
+
// V0-V3 hold the data blocks during the main loop, or temporary values
// otherwise. V4-V5 hold temporary values.
@@ -214,6 +188,7 @@
.endm
// Move a vector between memory and a register.
+// The register operand must be in the first 16 vector registers.
.macro _vmovdqu src, dst
.if VL < 64
vmovdqu \src, \dst
@@ -234,11 +209,12 @@
.endm
// XOR two vectors together.
+// Any register operands must be in the first 16 vector registers.
.macro _vpxor src1, src2, dst
-.if USE_AVX10
- vpxord \src1, \src2, \dst
-.else
+.if VL < 64
vpxor \src1, \src2, \dst
+.else
+ vpxord \src1, \src2, \dst
.endif
.endm
@@ -259,8 +235,12 @@
vpshufd $0x13, \src, \tmp
vpaddq \src, \src, \dst
vpsrad $31, \tmp, \tmp
+.if USE_AVX10
+ vpternlogd $0x78, GF_POLY_XMM, \tmp, \dst
+.else
vpand GF_POLY_XMM, \tmp, \tmp
vpxor \tmp, \dst, \dst
+.endif
.endm
// Given the XTS tweak(s) in the vector \src, compute the next vector of
@@ -369,9 +349,14 @@
// Do one step in computing the next set of tweaks using the VPCLMULQDQ method
// (the same method _next_tweakvec uses for VL > 16). This means multiplying
-// each tweak by x^(4*VL/16) independently. Since 4*VL/16 is a multiple of 8
-// when VL > 16 (which it is here), the needed shift amounts are byte-aligned,
-// which allows the use of vpsrldq and vpslldq to do 128-bit wide shifts.
+// each tweak by x^(4*VL/16) independently.
+//
+// Since 4*VL/16 is a multiple of 8 when VL > 16 (which it is here), the needed
+// shift amounts are byte-aligned, which allows the use of vpsrldq and vpslldq
+// to do 128-bit wide shifts. The 128-bit left shift (vpslldq) saves
+// instructions directly. The 128-bit right shift (vpsrldq) performs better
+// than a 64-bit right shift on Intel CPUs in the context where it is used here,
+// because it runs on a different execution port from the AES instructions.
.macro _tweak_step_pclmul i
.if \i == 0
vpsrldq $(128 - 4*VL/16) / 8, TWEAK0, NEXT_TWEAK0
@@ -406,7 +391,7 @@
// \i that include at least 0 through 19, then 1000 which signals the last step.
//
// This is used to interleave the computation of the next set of tweaks with the
-// AES en/decryptions, which increases performance in some cases.
+// AES en/decryptions, which increases performance in some cases. Clobbers V5.
.macro _tweak_step i
.if VL == 16
_tweak_step_mulx \i
@@ -443,9 +428,10 @@
// the last round needs different instructions.
//
// An alternative approach would be to roll up all the round loops. We
- // don't do that because it isn't compatible with caching the round keys
- // in registers which we do when possible (see below), and also because
- // it seems unwise to rely *too* heavily on the CPU's branch predictor.
+ // don't do that because (a) it isn't compatible with caching the round
+ // keys in registers which we do when possible (see below), (b) we
+ // interleave the AES rounds with the XTS tweak computation, and (c) it
+ // seems unwise to rely *too* heavily on the CPU's branch predictor.
lea OFFS-16(KEY, KEYLEN64, 4), KEY
// If all 32 SIMD registers are available, cache all the round keys.
@@ -472,90 +458,94 @@
.endif
.endm
-// Do a single round of AES encryption (if \enc==1) or decryption (if \enc==0)
-// on the block(s) in \data using the round key(s) in \key. The register length
-// determines the number of AES blocks en/decrypted.
-.macro _vaes enc, last, key, data
+// Do a single non-last round of AES encryption (if \enc==1) or decryption (if
+// \enc==0) on the block(s) in \data using the round key(s) in \key. The
+// register length determines the number of AES blocks en/decrypted.
+.macro _vaes enc, key, data
.if \enc
-.if \last
- vaesenclast \key, \data, \data
-.else
vaesenc \key, \data, \data
-.endif
-.else
-.if \last
- vaesdeclast \key, \data, \data
.else
vaesdec \key, \data, \data
.endif
+.endm
+
+// Same as _vaes, but does the last round.
+.macro _vaeslast enc, key, data
+.if \enc
+ vaesenclast \key, \data, \data
+.else
+ vaesdeclast \key, \data, \data
.endif
.endm
-// Do a single round of AES en/decryption on the block(s) in \data, using the
-// same key for all block(s). The round key is loaded from the appropriate
-// register or memory location for round \i. May clobber V4.
-.macro _vaes_1x enc, last, i, xmm_suffix, data
+// Do a single non-last round of AES en/decryption on the block(s) in \data,
+// using the same key for all block(s). The round key is loaded from the
+// appropriate register or memory location for round \i. May clobber \tmp.
+.macro _vaes_1x enc, i, xmm_suffix, data, tmp
.if USE_AVX10
- _vaes \enc, \last, KEY\i\xmm_suffix, \data
+ _vaes \enc, KEY\i\xmm_suffix, \data
.else
.ifnb \xmm_suffix
- _vaes \enc, \last, (\i-7)*16(KEY), \data
+ _vaes \enc, (\i-7)*16(KEY), \data
.else
- _vbroadcast128 (\i-7)*16(KEY), V4
- _vaes \enc, \last, V4, \data
+ _vbroadcast128 (\i-7)*16(KEY), \tmp
+ _vaes \enc, \tmp, \data
.endif
.endif
.endm
-// Do a single round of AES en/decryption on the blocks in registers V0-V3,
-// using the same key for all blocks. The round key is loaded from the
+// Do a single non-last round of AES en/decryption on the blocks in registers
+// V0-V3, using the same key for all blocks. The round key is loaded from the
// appropriate register or memory location for round \i. In addition, does two
-// steps of the computation of the next set of tweaks. May clobber V4.
-.macro _vaes_4x enc, last, i
+// steps of the computation of the next set of tweaks. May clobber V4 and V5.
+.macro _vaes_4x enc, i
.if USE_AVX10
_tweak_step (2*(\i-5))
- _vaes \enc, \last, KEY\i, V0
- _vaes \enc, \last, KEY\i, V1
+ _vaes \enc, KEY\i, V0
+ _vaes \enc, KEY\i, V1
_tweak_step (2*(\i-5) + 1)
- _vaes \enc, \last, KEY\i, V2
- _vaes \enc, \last, KEY\i, V3
+ _vaes \enc, KEY\i, V2
+ _vaes \enc, KEY\i, V3
.else
_vbroadcast128 (\i-7)*16(KEY), V4
_tweak_step (2*(\i-5))
- _vaes \enc, \last, V4, V0
- _vaes \enc, \last, V4, V1
+ _vaes \enc, V4, V0
+ _vaes \enc, V4, V1
_tweak_step (2*(\i-5) + 1)
- _vaes \enc, \last, V4, V2
- _vaes \enc, \last, V4, V3
+ _vaes \enc, V4, V2
+ _vaes \enc, V4, V3
.endif
.endm
// Do tweaked AES en/decryption (i.e., XOR with \tweak, then AES en/decrypt,
// then XOR with \tweak again) of the block(s) in \data. To process a single
// block, use xmm registers and set \xmm_suffix=_XMM. To process a vector of
-// length VL, use V* registers and leave \xmm_suffix empty. May clobber V4.
-.macro _aes_crypt enc, xmm_suffix, tweak, data
+// length VL, use V* registers and leave \xmm_suffix empty. Clobbers \tmp.
+.macro _aes_crypt enc, xmm_suffix, tweak, data, tmp
_xor3 KEY0\xmm_suffix, \tweak, \data
cmp $24, KEYLEN
jl .Laes128\@
je .Laes192\@
- _vaes_1x \enc, 0, 1, \xmm_suffix, \data
- _vaes_1x \enc, 0, 2, \xmm_suffix, \data
+ _vaes_1x \enc, 1, \xmm_suffix, \data, tmp=\tmp
+ _vaes_1x \enc, 2, \xmm_suffix, \data, tmp=\tmp
.Laes192\@:
- _vaes_1x \enc, 0, 3, \xmm_suffix, \data
- _vaes_1x \enc, 0, 4, \xmm_suffix, \data
+ _vaes_1x \enc, 3, \xmm_suffix, \data, tmp=\tmp
+ _vaes_1x \enc, 4, \xmm_suffix, \data, tmp=\tmp
.Laes128\@:
- _vaes_1x \enc, 0, 5, \xmm_suffix, \data
- _vaes_1x \enc, 0, 6, \xmm_suffix, \data
- _vaes_1x \enc, 0, 7, \xmm_suffix, \data
- _vaes_1x \enc, 0, 8, \xmm_suffix, \data
- _vaes_1x \enc, 0, 9, \xmm_suffix, \data
- _vaes_1x \enc, 0, 10, \xmm_suffix, \data
- _vaes_1x \enc, 0, 11, \xmm_suffix, \data
- _vaes_1x \enc, 0, 12, \xmm_suffix, \data
- _vaes_1x \enc, 0, 13, \xmm_suffix, \data
- _vaes_1x \enc, 1, 14, \xmm_suffix, \data
- _vpxor \tweak, \data, \data
+.irp i, 5,6,7,8,9,10,11,12,13
+ _vaes_1x \enc, \i, \xmm_suffix, \data, tmp=\tmp
+.endr
+.if USE_AVX10
+ vpxord KEY14\xmm_suffix, \tweak, \tmp
+.else
+.ifnb \xmm_suffix
+ vpxor 7*16(KEY), \tweak, \tmp
+.else
+ _vbroadcast128 7*16(KEY), \tmp
+ vpxor \tweak, \tmp, \tmp
+.endif
+.endif
+ _vaeslast \enc, \tmp, \data
.endm
.macro _aes_xts_crypt enc
@@ -581,7 +571,7 @@
// Compute the first set of tweaks TWEAK[0-3].
_compute_first_set_of_tweaks
- sub $4*VL, LEN
+ add $-4*VL, LEN // shorter than 'sub 4*VL' when VL=32
jl .Lhandle_remainder\@
.Lmain_loop\@:
@@ -589,10 +579,10 @@
// XOR each source block with its tweak and the zero-th round key.
.if USE_AVX10
- vmovdqu8 0*VL(SRC), V0
- vmovdqu8 1*VL(SRC), V1
- vmovdqu8 2*VL(SRC), V2
- vmovdqu8 3*VL(SRC), V3
+ _vmovdqu 0*VL(SRC), V0
+ _vmovdqu 1*VL(SRC), V1
+ _vmovdqu 2*VL(SRC), V2
+ _vmovdqu 3*VL(SRC), V3
vpternlogd $0x96, TWEAK0, KEY0, V0
vpternlogd $0x96, TWEAK1, KEY0, V1
vpternlogd $0x96, TWEAK2, KEY0, V2
@@ -612,28 +602,43 @@
je .Laes192\@
// Do all the AES rounds on the data blocks, interleaved with
// the computation of the next set of tweaks.
- _vaes_4x \enc, 0, 1
- _vaes_4x \enc, 0, 2
+ _vaes_4x \enc, 1
+ _vaes_4x \enc, 2
.Laes192\@:
- _vaes_4x \enc, 0, 3
- _vaes_4x \enc, 0, 4
+ _vaes_4x \enc, 3
+ _vaes_4x \enc, 4
.Laes128\@:
- _vaes_4x \enc, 0, 5
- _vaes_4x \enc, 0, 6
- _vaes_4x \enc, 0, 7
- _vaes_4x \enc, 0, 8
- _vaes_4x \enc, 0, 9
- _vaes_4x \enc, 0, 10
- _vaes_4x \enc, 0, 11
- _vaes_4x \enc, 0, 12
- _vaes_4x \enc, 0, 13
- _vaes_4x \enc, 1, 14
-
- // XOR in the tweaks again.
- _vpxor TWEAK0, V0, V0
- _vpxor TWEAK1, V1, V1
- _vpxor TWEAK2, V2, V2
- _vpxor TWEAK3, V3, V3
+.irp i, 5,6,7,8,9,10,11,12,13
+ _vaes_4x \enc, \i
+.endr
+ // Do the last AES round, then XOR the results with the tweaks again.
+ // Reduce latency by doing the XOR before the vaesenclast, utilizing the
+ // property vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a)
+ // (and likewise for vaesdeclast).
+.if USE_AVX10
+ _tweak_step 18
+ _tweak_step 19
+ vpxord TWEAK0, KEY14, V4
+ vpxord TWEAK1, KEY14, V5
+ _vaeslast \enc, V4, V0
+ _vaeslast \enc, V5, V1
+ vpxord TWEAK2, KEY14, V4
+ vpxord TWEAK3, KEY14, V5
+ _vaeslast \enc, V4, V2
+ _vaeslast \enc, V5, V3
+.else
+ _vbroadcast128 7*16(KEY), V4
+ _tweak_step 18 // uses V5
+ _tweak_step 19 // uses V5
+ vpxor TWEAK0, V4, V5
+ _vaeslast \enc, V5, V0
+ vpxor TWEAK1, V4, V5
+ _vaeslast \enc, V5, V1
+ vpxor TWEAK2, V4, V5
+ vpxor TWEAK3, V4, V4
+ _vaeslast \enc, V5, V2
+ _vaeslast \enc, V4, V3
+.endif
// Store the destination blocks.
_vmovdqu V0, 0*VL(DST)
@@ -644,9 +649,9 @@
// Finish computing the next set of tweaks.
_tweak_step 1000
- add $4*VL, SRC
- add $4*VL, DST
- sub $4*VL, LEN
+ sub $-4*VL, SRC // shorter than 'add 4*VL' when VL=32
+ sub $-4*VL, DST
+ add $-4*VL, LEN
jge .Lmain_loop\@
// Check for the uncommon case where the data length isn't a multiple of
@@ -670,7 +675,7 @@
jl .Lvec_at_a_time_done\@
.Lvec_at_a_time\@:
_vmovdqu (SRC), V0
- _aes_crypt \enc, , TWEAK0, V0
+ _aes_crypt \enc, , TWEAK0, V0, tmp=V1
_vmovdqu V0, (DST)
_next_tweakvec TWEAK0, V0, V1, TWEAK0
add $VL, SRC
@@ -687,7 +692,7 @@
jl .Lblock_at_a_time_done\@
.Lblock_at_a_time\@:
vmovdqu (SRC), %xmm0
- _aes_crypt \enc, _XMM, TWEAK0_XMM, %xmm0
+ _aes_crypt \enc, _XMM, TWEAK0_XMM, %xmm0, tmp=%xmm1
vmovdqu %xmm0, (DST)
_next_tweak TWEAK0_XMM, %xmm0, TWEAK0_XMM
add $16, SRC
@@ -715,7 +720,7 @@
// Do it now by advancing the tweak and decrypting the last full block.
_next_tweak TWEAK0_XMM, %xmm0, TWEAK1_XMM
vmovdqu (SRC), %xmm0
- _aes_crypt \enc, _XMM, TWEAK1_XMM, %xmm0
+ _aes_crypt \enc, _XMM, TWEAK1_XMM, %xmm0, tmp=%xmm1
.endif
.if USE_AVX10
@@ -758,47 +763,49 @@
vpblendvb %xmm3, %xmm0, %xmm1, %xmm0
.endif
// En/decrypt again and store the last full block.
- _aes_crypt \enc, _XMM, TWEAK0_XMM, %xmm0
+ _aes_crypt \enc, _XMM, TWEAK0_XMM, %xmm0, tmp=%xmm1
vmovdqu %xmm0, (DST)
jmp .Ldone\@
.endm
// void aes_xts_encrypt_iv(const struct crypto_aes_ctx *tweak_key,
// u8 iv[AES_BLOCK_SIZE]);
+//
+// Encrypt |iv| using the AES key |tweak_key| to get the first tweak. Assumes
+// that the CPU supports AES-NI and AVX, but not necessarily VAES or AVX10.
SYM_TYPED_FUNC_START(aes_xts_encrypt_iv)
- vmovdqu (%rsi), %xmm0
- vpxor (%rdi), %xmm0, %xmm0
- movl 480(%rdi), %eax // AES key length
- lea -16(%rdi, %rax, 4), %rdi
- cmp $24, %eax
+ .set TWEAK_KEY, %rdi
+ .set IV, %rsi
+ .set KEYLEN, %eax
+ .set KEYLEN64, %rax
+
+ vmovdqu (IV), %xmm0
+ vpxor (TWEAK_KEY), %xmm0, %xmm0
+ movl 480(TWEAK_KEY), KEYLEN
+ lea -16(TWEAK_KEY, KEYLEN64, 4), TWEAK_KEY
+ cmp $24, KEYLEN
jl .Lencrypt_iv_aes128
je .Lencrypt_iv_aes192
- vaesenc -6*16(%rdi), %xmm0, %xmm0
- vaesenc -5*16(%rdi), %xmm0, %xmm0
+ vaesenc -6*16(TWEAK_KEY), %xmm0, %xmm0
+ vaesenc -5*16(TWEAK_KEY), %xmm0, %xmm0
.Lencrypt_iv_aes192:
- vaesenc -4*16(%rdi), %xmm0, %xmm0
- vaesenc -3*16(%rdi), %xmm0, %xmm0
+ vaesenc -4*16(TWEAK_KEY), %xmm0, %xmm0
+ vaesenc -3*16(TWEAK_KEY), %xmm0, %xmm0
.Lencrypt_iv_aes128:
- vaesenc -2*16(%rdi), %xmm0, %xmm0
- vaesenc -1*16(%rdi), %xmm0, %xmm0
- vaesenc 0*16(%rdi), %xmm0, %xmm0
- vaesenc 1*16(%rdi), %xmm0, %xmm0
- vaesenc 2*16(%rdi), %xmm0, %xmm0
- vaesenc 3*16(%rdi), %xmm0, %xmm0
- vaesenc 4*16(%rdi), %xmm0, %xmm0
- vaesenc 5*16(%rdi), %xmm0, %xmm0
- vaesenc 6*16(%rdi), %xmm0, %xmm0
- vaesenclast 7*16(%rdi), %xmm0, %xmm0
- vmovdqu %xmm0, (%rsi)
+.irp i, -2,-1,0,1,2,3,4,5,6
+ vaesenc \i*16(TWEAK_KEY), %xmm0, %xmm0
+.endr
+ vaesenclast 7*16(TWEAK_KEY), %xmm0, %xmm0
+ vmovdqu %xmm0, (IV)
RET
SYM_FUNC_END(aes_xts_encrypt_iv)
// Below are the actual AES-XTS encryption and decryption functions,
// instantiated from the above macro. They all have the following prototype:
//
-// void (*xts_asm_func)(const struct crypto_aes_ctx *key,
-// const u8 *src, u8 *dst, unsigned int len,
-// u8 tweak[AES_BLOCK_SIZE]);
+// void (*xts_crypt_func)(const struct crypto_aes_ctx *key,
+// const u8 *src, u8 *dst, int len,
+// u8 tweak[AES_BLOCK_SIZE]);
//
// |key| is the data key. |tweak| contains the next tweak; the encryption of
// the original IV with the tweak key was already done. This function supports
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index fbf43482e1f5..11e95fc62636 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -505,7 +505,7 @@ static int xts_setkey_aesni(struct crypto_skcipher *tfm, const u8 *key,
typedef void (*xts_encrypt_iv_func)(const struct crypto_aes_ctx *tweak_key,
u8 iv[AES_BLOCK_SIZE]);
typedef void (*xts_crypt_func)(const struct crypto_aes_ctx *key,
- const u8 *src, u8 *dst, unsigned int len,
+ const u8 *src, u8 *dst, int len,
u8 tweak[AES_BLOCK_SIZE]);
/* This handles cases where the source and/or destination span pages. */
@@ -624,14 +624,14 @@ static void aesni_xts_encrypt_iv(const struct crypto_aes_ctx *tweak_key,
}
static void aesni_xts_encrypt(const struct crypto_aes_ctx *key,
- const u8 *src, u8 *dst, unsigned int len,
+ const u8 *src, u8 *dst, int len,
u8 tweak[AES_BLOCK_SIZE])
{
aesni_xts_enc(key, dst, src, len, tweak);
}
static void aesni_xts_decrypt(const struct crypto_aes_ctx *key,
- const u8 *src, u8 *dst, unsigned int len,
+ const u8 *src, u8 *dst, int len,
u8 tweak[AES_BLOCK_SIZE])
{
aesni_xts_dec(key, dst, src, len, tweak);
@@ -790,10 +790,10 @@ asmlinkage void aes_xts_encrypt_iv(const struct crypto_aes_ctx *tweak_key,
\
asmlinkage void \
aes_xts_encrypt_##suffix(const struct crypto_aes_ctx *key, const u8 *src, \
- u8 *dst, unsigned int len, u8 tweak[AES_BLOCK_SIZE]); \
+ u8 *dst, int len, u8 tweak[AES_BLOCK_SIZE]); \
asmlinkage void \
aes_xts_decrypt_##suffix(const struct crypto_aes_ctx *key, const u8 *src, \
- u8 *dst, unsigned int len, u8 tweak[AES_BLOCK_SIZE]); \
+ u8 *dst, int len, u8 tweak[AES_BLOCK_SIZE]); \
\
static int xts_encrypt_##suffix(struct skcipher_request *req) \
{ \
diff --git a/arch/x86/crypto/blowfish_glue.c b/arch/x86/crypto/blowfish_glue.c
index 552f2df0643f..26c5f2ee5d10 100644
--- a/arch/x86/crypto/blowfish_glue.c
+++ b/arch/x86/crypto/blowfish_glue.c
@@ -94,7 +94,6 @@ static struct crypto_alg bf_cipher_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = BF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct bf_ctx),
- .cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c
index f110708c8038..3bd37d664121 100644
--- a/arch/x86/crypto/camellia_glue.c
+++ b/arch/x86/crypto/camellia_glue.c
@@ -1313,7 +1313,6 @@ static struct crypto_alg camellia_cipher_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct camellia_ctx),
- .cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
deleted file mode 100644
index 9f5e342b9845..000000000000
--- a/arch/x86/crypto/crc32-pclmul_glue.c
+++ /dev/null
@@ -1,202 +0,0 @@
-/* GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see http://www.gnu.org/licenses
- *
- * Please visit http://www.xyratex.com/contact if you need additional
- * information or have any questions.
- *
- * GPL HEADER END
- */
-
-/*
- * Copyright 2012 Xyratex Technology Limited
- *
- * Wrappers for kernel crypto shash api to pclmulqdq crc32 implementation.
- */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/crc32.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
-
-#include <asm/cpufeatures.h>
-#include <asm/cpu_device_id.h>
-#include <asm/simd.h>
-
-#define CHKSUM_BLOCK_SIZE 1
-#define CHKSUM_DIGEST_SIZE 4
-
-#define PCLMUL_MIN_LEN 64L /* minimum size of buffer
- * for crc32_pclmul_le_16 */
-#define SCALE_F 16L /* size of xmm register */
-#define SCALE_F_MASK (SCALE_F - 1)
-
-u32 crc32_pclmul_le_16(unsigned char const *buffer, size_t len, u32 crc32);
-
-static u32 __attribute__((pure))
- crc32_pclmul_le(u32 crc, unsigned char const *p, size_t len)
-{
- unsigned int iquotient;
- unsigned int iremainder;
- unsigned int prealign;
-
- if (len < PCLMUL_MIN_LEN + SCALE_F_MASK || !crypto_simd_usable())
- return crc32_le(crc, p, len);
-
- if ((long)p & SCALE_F_MASK) {
- /* align p to 16 byte */
- prealign = SCALE_F - ((long)p & SCALE_F_MASK);
-
- crc = crc32_le(crc, p, prealign);
- len -= prealign;
- p = (unsigned char *)(((unsigned long)p + SCALE_F_MASK) &
- ~SCALE_F_MASK);
- }
- iquotient = len & (~SCALE_F_MASK);
- iremainder = len & SCALE_F_MASK;
-
- kernel_fpu_begin();
- crc = crc32_pclmul_le_16(p, iquotient, crc);
- kernel_fpu_end();
-
- if (iremainder)
- crc = crc32_le(crc, p + iquotient, iremainder);
-
- return crc;
-}
-
-static int crc32_pclmul_cra_init(struct crypto_tfm *tfm)
-{
- u32 *key = crypto_tfm_ctx(tfm);
-
- *key = 0;
-
- return 0;
-}
-
-static int crc32_pclmul_setkey(struct crypto_shash *hash, const u8 *key,
- unsigned int keylen)
-{
- u32 *mctx = crypto_shash_ctx(hash);
-
- if (keylen != sizeof(u32))
- return -EINVAL;
- *mctx = le32_to_cpup((__le32 *)key);
- return 0;
-}
-
-static int crc32_pclmul_init(struct shash_desc *desc)
-{
- u32 *mctx = crypto_shash_ctx(desc->tfm);
- u32 *crcp = shash_desc_ctx(desc);
-
- *crcp = *mctx;
-
- return 0;
-}
-
-static int crc32_pclmul_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- u32 *crcp = shash_desc_ctx(desc);
-
- *crcp = crc32_pclmul_le(*crcp, data, len);
- return 0;
-}
-
-/* No final XOR 0xFFFFFFFF, like crc32_le */
-static int __crc32_pclmul_finup(u32 *crcp, const u8 *data, unsigned int len,
- u8 *out)
-{
- *(__le32 *)out = cpu_to_le32(crc32_pclmul_le(*crcp, data, len));
- return 0;
-}
-
-static int crc32_pclmul_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return __crc32_pclmul_finup(shash_desc_ctx(desc), data, len, out);
-}
-
-static int crc32_pclmul_final(struct shash_desc *desc, u8 *out)
-{
- u32 *crcp = shash_desc_ctx(desc);
-
- *(__le32 *)out = cpu_to_le32p(crcp);
- return 0;
-}
-
-static int crc32_pclmul_digest(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return __crc32_pclmul_finup(crypto_shash_ctx(desc->tfm), data, len,
- out);
-}
-
-static struct shash_alg alg = {
- .setkey = crc32_pclmul_setkey,
- .init = crc32_pclmul_init,
- .update = crc32_pclmul_update,
- .final = crc32_pclmul_final,
- .finup = crc32_pclmul_finup,
- .digest = crc32_pclmul_digest,
- .descsize = sizeof(u32),
- .digestsize = CHKSUM_DIGEST_SIZE,
- .base = {
- .cra_name = "crc32",
- .cra_driver_name = "crc32-pclmul",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(u32),
- .cra_module = THIS_MODULE,
- .cra_init = crc32_pclmul_cra_init,
- }
-};
-
-static const struct x86_cpu_id crc32pclmul_cpu_id[] = {
- X86_MATCH_FEATURE(X86_FEATURE_PCLMULQDQ, NULL),
- {}
-};
-MODULE_DEVICE_TABLE(x86cpu, crc32pclmul_cpu_id);
-
-
-static int __init crc32_pclmul_mod_init(void)
-{
-
- if (!x86_match_cpu(crc32pclmul_cpu_id)) {
- pr_info("PCLMULQDQ-NI instructions are not detected.\n");
- return -ENODEV;
- }
- return crypto_register_shash(&alg);
-}
-
-static void __exit crc32_pclmul_mod_fini(void)
-{
- crypto_unregister_shash(&alg);
-}
-
-module_init(crc32_pclmul_mod_init);
-module_exit(crc32_pclmul_mod_fini);
-
-MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
-MODULE_DESCRIPTION("CRC32 algorithm (IEEE 802.3) accelerated with PCLMULQDQ");
-MODULE_LICENSE("GPL");
-
-MODULE_ALIAS_CRYPTO("crc32");
-MODULE_ALIAS_CRYPTO("crc32-pclmul");
diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
deleted file mode 100644
index 52c5d47ef5a1..000000000000
--- a/arch/x86/crypto/crc32c-intel_glue.c
+++ /dev/null
@@ -1,250 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Using hardware provided CRC32 instruction to accelerate the CRC32 disposal.
- * CRC32C polynomial:0x1EDC6F41(BE)/0x82F63B78(LE)
- * CRC32 is a new instruction in Intel SSE4.2, the reference can be found at:
- * http://www.intel.com/products/processor/manuals/
- * Intel(R) 64 and IA-32 Architectures Software Developer's Manual
- * Volume 2A: Instruction Set Reference, A-M
- *
- * Copyright (C) 2008 Intel Corporation
- * Authors: Austin Zhang <austin_zhang@linux.intel.com>
- * Kent Liu <kent.liu@intel.com>
- */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
-
-#include <asm/cpufeatures.h>
-#include <asm/cpu_device_id.h>
-#include <asm/simd.h>
-
-#define CHKSUM_BLOCK_SIZE 1
-#define CHKSUM_DIGEST_SIZE 4
-
-#define SCALE_F sizeof(unsigned long)
-
-#ifdef CONFIG_X86_64
-#define CRC32_INST "crc32q %1, %q0"
-#else
-#define CRC32_INST "crc32l %1, %0"
-#endif
-
-#ifdef CONFIG_X86_64
-/*
- * use carryless multiply version of crc32c when buffer
- * size is >= 512 to account
- * for fpu state save/restore overhead.
- */
-#define CRC32C_PCL_BREAKEVEN 512
-
-asmlinkage unsigned int crc_pcl(const u8 *buffer, unsigned int len,
- unsigned int crc_init);
-#endif /* CONFIG_X86_64 */
-
-static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t length)
-{
- while (length--) {
- asm("crc32b %1, %0"
- : "+r" (crc) : "rm" (*data));
- data++;
- }
-
- return crc;
-}
-
-static u32 __pure crc32c_intel_le_hw(u32 crc, unsigned char const *p, size_t len)
-{
- unsigned int iquotient = len / SCALE_F;
- unsigned int iremainder = len % SCALE_F;
- unsigned long *ptmp = (unsigned long *)p;
-
- while (iquotient--) {
- asm(CRC32_INST
- : "+r" (crc) : "rm" (*ptmp));
- ptmp++;
- }
-
- if (iremainder)
- crc = crc32c_intel_le_hw_byte(crc, (unsigned char *)ptmp,
- iremainder);
-
- return crc;
-}
-
-/*
- * Setting the seed allows arbitrary accumulators and flexible XOR policy
- * If your algorithm starts with ~0, then XOR with ~0 before you set
- * the seed.
- */
-static int crc32c_intel_setkey(struct crypto_shash *hash, const u8 *key,
- unsigned int keylen)
-{
- u32 *mctx = crypto_shash_ctx(hash);
-
- if (keylen != sizeof(u32))
- return -EINVAL;
- *mctx = le32_to_cpup((__le32 *)key);
- return 0;
-}
-
-static int crc32c_intel_init(struct shash_desc *desc)
-{
- u32 *mctx = crypto_shash_ctx(desc->tfm);
- u32 *crcp = shash_desc_ctx(desc);
-
- *crcp = *mctx;
-
- return 0;
-}
-
-static int crc32c_intel_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- u32 *crcp = shash_desc_ctx(desc);
-
- *crcp = crc32c_intel_le_hw(*crcp, data, len);
- return 0;
-}
-
-static int __crc32c_intel_finup(u32 *crcp, const u8 *data, unsigned int len,
- u8 *out)
-{
- *(__le32 *)out = ~cpu_to_le32(crc32c_intel_le_hw(*crcp, data, len));
- return 0;
-}
-
-static int crc32c_intel_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return __crc32c_intel_finup(shash_desc_ctx(desc), data, len, out);
-}
-
-static int crc32c_intel_final(struct shash_desc *desc, u8 *out)
-{
- u32 *crcp = shash_desc_ctx(desc);
-
- *(__le32 *)out = ~cpu_to_le32p(crcp);
- return 0;
-}
-
-static int crc32c_intel_digest(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return __crc32c_intel_finup(crypto_shash_ctx(desc->tfm), data, len,
- out);
-}
-
-static int crc32c_intel_cra_init(struct crypto_tfm *tfm)
-{
- u32 *key = crypto_tfm_ctx(tfm);
-
- *key = ~0;
-
- return 0;
-}
-
-#ifdef CONFIG_X86_64
-static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- u32 *crcp = shash_desc_ctx(desc);
-
- /*
- * use faster PCL version if datasize is large enough to
- * overcome kernel fpu state save/restore overhead
- */
- if (len >= CRC32C_PCL_BREAKEVEN && crypto_simd_usable()) {
- kernel_fpu_begin();
- *crcp = crc_pcl(data, len, *crcp);
- kernel_fpu_end();
- } else
- *crcp = crc32c_intel_le_hw(*crcp, data, len);
- return 0;
-}
-
-static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len,
- u8 *out)
-{
- if (len >= CRC32C_PCL_BREAKEVEN && crypto_simd_usable()) {
- kernel_fpu_begin();
- *(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp));
- kernel_fpu_end();
- } else
- *(__le32 *)out =
- ~cpu_to_le32(crc32c_intel_le_hw(*crcp, data, len));
- return 0;
-}
-
-static int crc32c_pcl_intel_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return __crc32c_pcl_intel_finup(shash_desc_ctx(desc), data, len, out);
-}
-
-static int crc32c_pcl_intel_digest(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return __crc32c_pcl_intel_finup(crypto_shash_ctx(desc->tfm), data, len,
- out);
-}
-#endif /* CONFIG_X86_64 */
-
-static struct shash_alg alg = {
- .setkey = crc32c_intel_setkey,
- .init = crc32c_intel_init,
- .update = crc32c_intel_update,
- .final = crc32c_intel_final,
- .finup = crc32c_intel_finup,
- .digest = crc32c_intel_digest,
- .descsize = sizeof(u32),
- .digestsize = CHKSUM_DIGEST_SIZE,
- .base = {
- .cra_name = "crc32c",
- .cra_driver_name = "crc32c-intel",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(u32),
- .cra_module = THIS_MODULE,
- .cra_init = crc32c_intel_cra_init,
- }
-};
-
-static const struct x86_cpu_id crc32c_cpu_id[] = {
- X86_MATCH_FEATURE(X86_FEATURE_XMM4_2, NULL),
- {}
-};
-MODULE_DEVICE_TABLE(x86cpu, crc32c_cpu_id);
-
-static int __init crc32c_intel_mod_init(void)
-{
- if (!x86_match_cpu(crc32c_cpu_id))
- return -ENODEV;
-#ifdef CONFIG_X86_64
- if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) {
- alg.update = crc32c_pcl_intel_update;
- alg.finup = crc32c_pcl_intel_finup;
- alg.digest = crc32c_pcl_intel_digest;
- }
-#endif
- return crypto_register_shash(&alg);
-}
-
-static void __exit crc32c_intel_mod_fini(void)
-{
- crypto_unregister_shash(&alg);
-}
-
-module_init(crc32c_intel_mod_init);
-module_exit(crc32c_intel_mod_fini);
-
-MODULE_AUTHOR("Austin Zhang <austin.zhang@intel.com>, Kent Liu <kent.liu@intel.com>");
-MODULE_DESCRIPTION("CRC32c (Castagnoli) optimization using Intel Hardware.");
-MODULE_LICENSE("GPL");
-
-MODULE_ALIAS_CRYPTO("crc32c");
-MODULE_ALIAS_CRYPTO("crc32c-intel");
diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
deleted file mode 100644
index 71291d5af9f4..000000000000
--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Cryptographic API.
- *
- * T10 Data Integrity Field CRC16 Crypto Transform using PCLMULQDQ Instructions
- *
- * Copyright (C) 2013 Intel Corporation
- * Author: Tim Chen <tim.c.chen@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/crc-t10dif.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <asm/cpufeatures.h>
-#include <asm/cpu_device_id.h>
-#include <asm/simd.h>
-
-asmlinkage u16 crc_t10dif_pcl(u16 init_crc, const u8 *buf, size_t len);
-
-struct chksum_desc_ctx {
- __u16 crc;
-};
-
-static int chksum_init(struct shash_desc *desc)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = 0;
-
- return 0;
-}
-
-static int chksum_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- if (length >= 16 && crypto_simd_usable()) {
- kernel_fpu_begin();
- ctx->crc = crc_t10dif_pcl(ctx->crc, data, length);
- kernel_fpu_end();
- } else
- ctx->crc = crc_t10dif_generic(ctx->crc, data, length);
- return 0;
-}
-
-static int chksum_final(struct shash_desc *desc, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- *(__u16 *)out = ctx->crc;
- return 0;
-}
-
-static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
-{
- if (len >= 16 && crypto_simd_usable()) {
- kernel_fpu_begin();
- *(__u16 *)out = crc_t10dif_pcl(crc, data, len);
- kernel_fpu_end();
- } else
- *(__u16 *)out = crc_t10dif_generic(crc, data, len);
- return 0;
-}
-
-static int chksum_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- return __chksum_finup(ctx->crc, data, len, out);
-}
-
-static int chksum_digest(struct shash_desc *desc, const u8 *data,
- unsigned int length, u8 *out)
-{
- return __chksum_finup(0, data, length, out);
-}
-
-static struct shash_alg alg = {
- .digestsize = CRC_T10DIF_DIGEST_SIZE,
- .init = chksum_init,
- .update = chksum_update,
- .final = chksum_final,
- .finup = chksum_finup,
- .digest = chksum_digest,
- .descsize = sizeof(struct chksum_desc_ctx),
- .base = {
- .cra_name = "crct10dif",
- .cra_driver_name = "crct10dif-pclmul",
- .cra_priority = 200,
- .cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
-
-static const struct x86_cpu_id crct10dif_cpu_id[] = {
- X86_MATCH_FEATURE(X86_FEATURE_PCLMULQDQ, NULL),
- {}
-};
-MODULE_DEVICE_TABLE(x86cpu, crct10dif_cpu_id);
-
-static int __init crct10dif_intel_mod_init(void)
-{
- if (!x86_match_cpu(crct10dif_cpu_id))
- return -ENODEV;
-
- return crypto_register_shash(&alg);
-}
-
-static void __exit crct10dif_intel_mod_fini(void)
-{
- crypto_unregister_shash(&alg);
-}
-
-module_init(crct10dif_intel_mod_init);
-module_exit(crct10dif_intel_mod_fini);
-
-MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
-MODULE_DESCRIPTION("T10 DIF CRC calculation accelerated with PCLMULQDQ.");
-MODULE_LICENSE("GPL");
-
-MODULE_ALIAS_CRYPTO("crct10dif");
-MODULE_ALIAS_CRYPTO("crct10dif-pclmul");
diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c
index abb8b1fe123b..e88439d3828e 100644
--- a/arch/x86/crypto/des3_ede_glue.c
+++ b/arch/x86/crypto/des3_ede_glue.c
@@ -291,7 +291,6 @@ static struct crypto_alg des3_ede_cipher = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
- .cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
diff --git a/arch/x86/crypto/twofish_glue.c b/arch/x86/crypto/twofish_glue.c
index 0614beece279..4c67184dc573 100644
--- a/arch/x86/crypto/twofish_glue.c
+++ b/arch/x86/crypto/twofish_glue.c
@@ -68,7 +68,6 @@ static struct crypto_alg alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_ctx),
- .cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c
index 76e4e74f35b5..f6d2d8aba643 100644
--- a/arch/x86/entry/vdso/vdso32-setup.c
+++ b/arch/x86/entry/vdso/vdso32-setup.c
@@ -57,7 +57,7 @@ __setup_param("vdso=", vdso_setup, vdso32_setup, 0);
/* Register vsyscall32 into the ABI table */
#include <linux/sysctl.h>
-static struct ctl_table abi_table2[] = {
+static const struct ctl_table abi_table2[] = {
{
.procname = "vsyscall32",
.data = &vdso32_enabled,
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index 0569f579338b..f022d5f64fb6 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -23,7 +23,6 @@
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/clockchips.h>
-#include <linux/hyperv.h>
#include <linux/slab.h>
#include <linux/cpuhotplug.h>
#include <asm/hypervisor.h>
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 95eada2994e1..173005e6a95d 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -19,7 +19,7 @@
#include <asm/sev.h>
#include <asm/ibt.h>
#include <asm/hypervisor.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#include <asm/mshyperv.h>
#include <asm/idtentry.h>
#include <asm/set_memory.h>
@@ -27,7 +27,6 @@
#include <linux/version.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
-#include <linux/hyperv.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/cpuhotplug.h>
@@ -416,24 +415,24 @@ static void __init hv_get_partition_id(void)
static u8 __init get_vtl(void)
{
u64 control = HV_HYPERCALL_REP_COMP_1 | HVCALL_GET_VP_REGISTERS;
- struct hv_get_vp_registers_input *input;
- struct hv_get_vp_registers_output *output;
+ struct hv_input_get_vp_registers *input;
+ struct hv_output_get_vp_registers *output;
unsigned long flags;
u64 ret;
local_irq_save(flags);
input = *this_cpu_ptr(hyperv_pcpu_input_arg);
- output = (struct hv_get_vp_registers_output *)input;
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
- memset(input, 0, struct_size(input, element, 1));
- input->header.partitionid = HV_PARTITION_ID_SELF;
- input->header.vpindex = HV_VP_INDEX_SELF;
- input->header.inputvtl = 0;
- input->element[0].name0 = HV_X64_REGISTER_VSM_VP_STATUS;
+ memset(input, 0, struct_size(input, names, 1));
+ input->partition_id = HV_PARTITION_ID_SELF;
+ input->vp_index = HV_VP_INDEX_SELF;
+ input->input_vtl.as_uint8 = 0;
+ input->names[0] = HV_REGISTER_VSM_VP_STATUS;
ret = hv_do_hypercall(control, input, output);
if (hv_result_success(ret)) {
- ret = output->as64.low & HV_X64_VTL_MASK;
+ ret = output->values[0].reg8 & HV_X64_VTL_MASK;
} else {
pr_err("Failed to get VTL(error: %lld) exiting...\n", ret);
BUG();
@@ -473,7 +472,7 @@ void __init hyperv_init(void)
if (hv_isolation_type_tdx())
hv_vp_assist_page = NULL;
else
- hv_vp_assist_page = kcalloc(num_possible_cpus(),
+ hv_vp_assist_page = kcalloc(nr_cpu_ids,
sizeof(*hv_vp_assist_page),
GFP_KERNEL);
if (!hv_vp_assist_page) {
diff --git a/arch/x86/hyperv/hv_proc.c b/arch/x86/hyperv/hv_proc.c
index 3fa1f2ee7b0d..ac4c834d4435 100644
--- a/arch/x86/hyperv/hv_proc.c
+++ b/arch/x86/hyperv/hv_proc.c
@@ -3,7 +3,6 @@
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/clockchips.h>
-#include <linux/hyperv.h>
#include <linux/slab.h>
#include <linux/cpuhotplug.h>
#include <linux/minmax.h>
@@ -177,7 +176,7 @@ int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
input->partition_id = partition_id;
input->vp_index = vp_index;
input->flags = flags;
- input->subnode_type = HvSubnodeAny;
+ input->subnode_type = HV_SUBNODE_ANY;
input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
status = hv_do_hypercall(HVCALL_CREATE_VP, input, NULL);
local_irq_restore(irq_flags);
diff --git a/arch/x86/hyperv/hv_vtl.c b/arch/x86/hyperv/hv_vtl.c
index 04775346369c..4e1b1e3b5658 100644
--- a/arch/x86/hyperv/hv_vtl.c
+++ b/arch/x86/hyperv/hv_vtl.c
@@ -189,7 +189,7 @@ static int hv_vtl_apicid_to_vp_id(u32 apic_id)
input->partition_id = HV_PARTITION_ID_SELF;
input->apic_ids[0] = apic_id;
- output = (u32 *)input;
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
control = HV_HYPERCALL_REP_COMP_1 | HVCALL_GET_VP_ID_FROM_APIC_ID;
status = hv_do_hypercall(control, input, output);
diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c
index 90aabe1fd3b6..dd68d9ad9b22 100644
--- a/arch/x86/hyperv/ivm.c
+++ b/arch/x86/hyperv/ivm.c
@@ -7,7 +7,6 @@
*/
#include <linux/bitfield.h>
-#include <linux/hyperv.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <asm/svm.h>
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index 1cc113200ff5..cc8c3bd0e7c2 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -1,6 +1,5 @@
#define pr_fmt(fmt) "Hyper-V: " fmt
-#include <linux/hyperv.h>
#include <linux/log2.h>
#include <linux/slab.h>
#include <linux/types.h>
diff --git a/arch/x86/hyperv/nested.c b/arch/x86/hyperv/nested.c
index 9dc259fa322e..1083dc8646f9 100644
--- a/arch/x86/hyperv/nested.c
+++ b/arch/x86/hyperv/nested.c
@@ -11,7 +11,7 @@
#include <linux/types.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#include <asm/mshyperv.h>
#include <asm/tlbflush.h>
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 521aad70e41b..f227a70ac91f 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -250,6 +250,9 @@ static inline u32 efi64_convert_status(efi_status_t status)
#define __efi64_argmap_allocate_pool(type, size, buffer) \
((type), (size), efi64_zero_upper(buffer))
+#define __efi64_argmap_locate_handle_buffer(type, proto, key, num, buf) \
+ ((type), (proto), (key), efi64_zero_upper(num), efi64_zero_upper(buf))
+
#define __efi64_argmap_create_event(type, tpl, f, c, event) \
((type), (tpl), (f), (c), efi64_zero_upper(event))
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h
index ab9f3dd87c80..ab0c78855ecb 100644
--- a/arch/x86/include/asm/hpet.h
+++ b/arch/x86/include/asm/hpet.h
@@ -84,7 +84,6 @@ extern int hpet_set_rtc_irq_bit(unsigned long bit_mask);
extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
unsigned char sec);
extern int hpet_set_periodic_freq(unsigned long freq);
-extern int hpet_rtc_dropped_irq(void);
extern int hpet_rtc_timer_init(void);
extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id);
extern int hpet_register_irq_handler(rtc_irq_handler handler);
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
deleted file mode 100644
index 3787d26810c1..000000000000
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ /dev/null
@@ -1,811 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-/*
- * This file contains definitions from Hyper-V Hypervisor Top-Level Functional
- * Specification (TLFS):
- * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
- */
-
-#ifndef _ASM_X86_HYPERV_TLFS_H
-#define _ASM_X86_HYPERV_TLFS_H
-
-#include <linux/types.h>
-#include <asm/page.h>
-/*
- * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
- * is set by CPUID(HvCpuIdFunctionVersionAndFeatures).
- */
-#define HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS 0x40000000
-#define HYPERV_CPUID_INTERFACE 0x40000001
-#define HYPERV_CPUID_VERSION 0x40000002
-#define HYPERV_CPUID_FEATURES 0x40000003
-#define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004
-#define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005
-#define HYPERV_CPUID_CPU_MANAGEMENT_FEATURES 0x40000007
-#define HYPERV_CPUID_NESTED_FEATURES 0x4000000A
-#define HYPERV_CPUID_ISOLATION_CONFIG 0x4000000C
-
-#define HYPERV_CPUID_VIRT_STACK_INTERFACE 0x40000081
-#define HYPERV_VS_INTERFACE_EAX_SIGNATURE 0x31235356 /* "VS#1" */
-
-#define HYPERV_CPUID_VIRT_STACK_PROPERTIES 0x40000082
-/* Support for the extended IOAPIC RTE format */
-#define HYPERV_VS_PROPERTIES_EAX_EXTENDED_IOAPIC_RTE BIT(2)
-
-#define HYPERV_HYPERVISOR_PRESENT_BIT 0x80000000
-#define HYPERV_CPUID_MIN 0x40000005
-#define HYPERV_CPUID_MAX 0x4000ffff
-
-/*
- * Group D Features. The bit assignments are custom to each architecture.
- * On x86/x64 these are HYPERV_CPUID_FEATURES.EDX bits.
- */
-/* The MWAIT instruction is available (per section MONITOR / MWAIT) */
-#define HV_X64_MWAIT_AVAILABLE BIT(0)
-/* Guest debugging support is available */
-#define HV_X64_GUEST_DEBUGGING_AVAILABLE BIT(1)
-/* Performance Monitor support is available*/
-#define HV_X64_PERF_MONITOR_AVAILABLE BIT(2)
-/* Support for physical CPU dynamic partitioning events is available*/
-#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE BIT(3)
-/*
- * Support for passing hypercall input parameter block via XMM
- * registers is available
- */
-#define HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE BIT(4)
-/* Support for a virtual guest idle state is available */
-#define HV_X64_GUEST_IDLE_STATE_AVAILABLE BIT(5)
-/* Frequency MSRs available */
-#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE BIT(8)
-/* Crash MSR available */
-#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(10)
-/* Support for debug MSRs available */
-#define HV_FEATURE_DEBUG_MSRS_AVAILABLE BIT(11)
-/* Support for extended gva ranges for flush hypercalls available */
-#define HV_FEATURE_EXT_GVA_RANGES_FLUSH BIT(14)
-/*
- * Support for returning hypercall output block via XMM
- * registers is available
- */
-#define HV_X64_HYPERCALL_XMM_OUTPUT_AVAILABLE BIT(15)
-/* stimer Direct Mode is available */
-#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(19)
-
-/*
- * Implementation recommendations. Indicates which behaviors the hypervisor
- * recommends the OS implement for optimal performance.
- * These are HYPERV_CPUID_ENLIGHTMENT_INFO.EAX bits.
- */
-/*
- * Recommend using hypercall for address space switches rather
- * than MOV to CR3 instruction
- */
-#define HV_X64_AS_SWITCH_RECOMMENDED BIT(0)
-/* Recommend using hypercall for local TLB flushes rather
- * than INVLPG or MOV to CR3 instructions */
-#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED BIT(1)
-/*
- * Recommend using hypercall for remote TLB flushes rather
- * than inter-processor interrupts
- */
-#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED BIT(2)
-/*
- * Recommend using MSRs for accessing APIC registers
- * EOI, ICR and TPR rather than their memory-mapped counterparts
- */
-#define HV_X64_APIC_ACCESS_RECOMMENDED BIT(3)
-/* Recommend using the hypervisor-provided MSR to initiate a system RESET */
-#define HV_X64_SYSTEM_RESET_RECOMMENDED BIT(4)
-/*
- * Recommend using relaxed timing for this partition. If used,
- * the VM should disable any watchdog timeouts that rely on the
- * timely delivery of external interrupts
- */
-#define HV_X64_RELAXED_TIMING_RECOMMENDED BIT(5)
-
-/*
- * Recommend not using Auto End-Of-Interrupt feature
- */
-#define HV_DEPRECATING_AEOI_RECOMMENDED BIT(9)
-
-/*
- * Recommend using cluster IPI hypercalls.
- */
-#define HV_X64_CLUSTER_IPI_RECOMMENDED BIT(10)
-
-/* Recommend using the newer ExProcessorMasks interface */
-#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED BIT(11)
-
-/* Indicates that the hypervisor is nested within a Hyper-V partition. */
-#define HV_X64_HYPERV_NESTED BIT(12)
-
-/* Recommend using enlightened VMCS */
-#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED BIT(14)
-
-/* Use hypercalls for MMIO config space access */
-#define HV_X64_USE_MMIO_HYPERCALLS BIT(21)
-
-/*
- * CPU management features identification.
- * These are HYPERV_CPUID_CPU_MANAGEMENT_FEATURES.EAX bits.
- */
-#define HV_X64_START_LOGICAL_PROCESSOR BIT(0)
-#define HV_X64_CREATE_ROOT_VIRTUAL_PROCESSOR BIT(1)
-#define HV_X64_PERFORMANCE_COUNTER_SYNC BIT(2)
-#define HV_X64_RESERVED_IDENTITY_BIT BIT(31)
-
-/*
- * Virtual processor will never share a physical core with another virtual
- * processor, except for virtual processors that are reported as sibling SMT
- * threads.
- */
-#define HV_X64_NO_NONARCH_CORESHARING BIT(18)
-
-/* Nested features. These are HYPERV_CPUID_NESTED_FEATURES.EAX bits. */
-#define HV_X64_NESTED_DIRECT_FLUSH BIT(17)
-#define HV_X64_NESTED_GUEST_MAPPING_FLUSH BIT(18)
-#define HV_X64_NESTED_MSR_BITMAP BIT(19)
-
-/* Nested features #2. These are HYPERV_CPUID_NESTED_FEATURES.EBX bits. */
-#define HV_X64_NESTED_EVMCS1_PERF_GLOBAL_CTRL BIT(0)
-
-/*
- * This is specific to AMD and specifies that enlightened TLB flush is
- * supported. If guest opts in to this feature, ASID invalidations only
- * flushes gva -> hpa mapping entries. To flush the TLB entries derived
- * from NPT, hypercalls should be used (HvFlushGuestPhysicalAddressSpace
- * or HvFlushGuestPhysicalAddressList).
- */
-#define HV_X64_NESTED_ENLIGHTENED_TLB BIT(22)
-
-/* HYPERV_CPUID_ISOLATION_CONFIG.EAX bits. */
-#define HV_PARAVISOR_PRESENT BIT(0)
-
-/* HYPERV_CPUID_ISOLATION_CONFIG.EBX bits. */
-#define HV_ISOLATION_TYPE GENMASK(3, 0)
-#define HV_SHARED_GPA_BOUNDARY_ACTIVE BIT(5)
-#define HV_SHARED_GPA_BOUNDARY_BITS GENMASK(11, 6)
-
-enum hv_isolation_type {
- HV_ISOLATION_TYPE_NONE = 0,
- HV_ISOLATION_TYPE_VBS = 1,
- HV_ISOLATION_TYPE_SNP = 2,
- HV_ISOLATION_TYPE_TDX = 3
-};
-
-/* Hyper-V specific model specific registers (MSRs) */
-
-/* MSR used to identify the guest OS. */
-#define HV_X64_MSR_GUEST_OS_ID 0x40000000
-
-/* MSR used to setup pages used to communicate with the hypervisor. */
-#define HV_X64_MSR_HYPERCALL 0x40000001
-
-/* MSR used to provide vcpu index */
-#define HV_X64_MSR_VP_INDEX 0x40000002
-
-/* MSR used to reset the guest OS. */
-#define HV_X64_MSR_RESET 0x40000003
-
-/* MSR used to provide vcpu runtime in 100ns units */
-#define HV_X64_MSR_VP_RUNTIME 0x40000010
-
-/* MSR used to read the per-partition time reference counter */
-#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
-
-/* A partition's reference time stamp counter (TSC) page */
-#define HV_X64_MSR_REFERENCE_TSC 0x40000021
-
-/* MSR used to retrieve the TSC frequency */
-#define HV_X64_MSR_TSC_FREQUENCY 0x40000022
-
-/* MSR used to retrieve the local APIC timer frequency */
-#define HV_X64_MSR_APIC_FREQUENCY 0x40000023
-
-/* Define the virtual APIC registers */
-#define HV_X64_MSR_EOI 0x40000070
-#define HV_X64_MSR_ICR 0x40000071
-#define HV_X64_MSR_TPR 0x40000072
-#define HV_X64_MSR_VP_ASSIST_PAGE 0x40000073
-
-/* Define synthetic interrupt controller model specific registers. */
-#define HV_X64_MSR_SCONTROL 0x40000080
-#define HV_X64_MSR_SVERSION 0x40000081
-#define HV_X64_MSR_SIEFP 0x40000082
-#define HV_X64_MSR_SIMP 0x40000083
-#define HV_X64_MSR_EOM 0x40000084
-#define HV_X64_MSR_SINT0 0x40000090
-#define HV_X64_MSR_SINT1 0x40000091
-#define HV_X64_MSR_SINT2 0x40000092
-#define HV_X64_MSR_SINT3 0x40000093
-#define HV_X64_MSR_SINT4 0x40000094
-#define HV_X64_MSR_SINT5 0x40000095
-#define HV_X64_MSR_SINT6 0x40000096
-#define HV_X64_MSR_SINT7 0x40000097
-#define HV_X64_MSR_SINT8 0x40000098
-#define HV_X64_MSR_SINT9 0x40000099
-#define HV_X64_MSR_SINT10 0x4000009A
-#define HV_X64_MSR_SINT11 0x4000009B
-#define HV_X64_MSR_SINT12 0x4000009C
-#define HV_X64_MSR_SINT13 0x4000009D
-#define HV_X64_MSR_SINT14 0x4000009E
-#define HV_X64_MSR_SINT15 0x4000009F
-
-/*
- * Define synthetic interrupt controller model specific registers for
- * nested hypervisor.
- */
-#define HV_X64_MSR_NESTED_SCONTROL 0x40001080
-#define HV_X64_MSR_NESTED_SVERSION 0x40001081
-#define HV_X64_MSR_NESTED_SIEFP 0x40001082
-#define HV_X64_MSR_NESTED_SIMP 0x40001083
-#define HV_X64_MSR_NESTED_EOM 0x40001084
-#define HV_X64_MSR_NESTED_SINT0 0x40001090
-
-/*
- * Synthetic Timer MSRs. Four timers per vcpu.
- */
-#define HV_X64_MSR_STIMER0_CONFIG 0x400000B0
-#define HV_X64_MSR_STIMER0_COUNT 0x400000B1
-#define HV_X64_MSR_STIMER1_CONFIG 0x400000B2
-#define HV_X64_MSR_STIMER1_COUNT 0x400000B3
-#define HV_X64_MSR_STIMER2_CONFIG 0x400000B4
-#define HV_X64_MSR_STIMER2_COUNT 0x400000B5
-#define HV_X64_MSR_STIMER3_CONFIG 0x400000B6
-#define HV_X64_MSR_STIMER3_COUNT 0x400000B7
-
-/* Hyper-V guest idle MSR */
-#define HV_X64_MSR_GUEST_IDLE 0x400000F0
-
-/* Hyper-V guest crash notification MSR's */
-#define HV_X64_MSR_CRASH_P0 0x40000100
-#define HV_X64_MSR_CRASH_P1 0x40000101
-#define HV_X64_MSR_CRASH_P2 0x40000102
-#define HV_X64_MSR_CRASH_P3 0x40000103
-#define HV_X64_MSR_CRASH_P4 0x40000104
-#define HV_X64_MSR_CRASH_CTL 0x40000105
-
-/* TSC emulation after migration */
-#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106
-#define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107
-#define HV_X64_MSR_TSC_EMULATION_STATUS 0x40000108
-
-/* TSC invariant control */
-#define HV_X64_MSR_TSC_INVARIANT_CONTROL 0x40000118
-
-/* HV_X64_MSR_TSC_INVARIANT_CONTROL bits */
-#define HV_EXPOSE_INVARIANT_TSC BIT_ULL(0)
-
-/*
- * To support arch-generic code calling hv_set/get_register:
- * - On x86, HV_MSR_ indicates an MSR accessed via rdmsrl/wrmsrl
- * - On ARM, HV_MSR_ indicates a VP register accessed via hypercall
- */
-#define HV_MSR_CRASH_P0 (HV_X64_MSR_CRASH_P0)
-#define HV_MSR_CRASH_P1 (HV_X64_MSR_CRASH_P1)
-#define HV_MSR_CRASH_P2 (HV_X64_MSR_CRASH_P2)
-#define HV_MSR_CRASH_P3 (HV_X64_MSR_CRASH_P3)
-#define HV_MSR_CRASH_P4 (HV_X64_MSR_CRASH_P4)
-#define HV_MSR_CRASH_CTL (HV_X64_MSR_CRASH_CTL)
-
-#define HV_MSR_VP_INDEX (HV_X64_MSR_VP_INDEX)
-#define HV_MSR_TIME_REF_COUNT (HV_X64_MSR_TIME_REF_COUNT)
-#define HV_MSR_REFERENCE_TSC (HV_X64_MSR_REFERENCE_TSC)
-
-#define HV_MSR_SINT0 (HV_X64_MSR_SINT0)
-#define HV_MSR_SVERSION (HV_X64_MSR_SVERSION)
-#define HV_MSR_SCONTROL (HV_X64_MSR_SCONTROL)
-#define HV_MSR_SIEFP (HV_X64_MSR_SIEFP)
-#define HV_MSR_SIMP (HV_X64_MSR_SIMP)
-#define HV_MSR_EOM (HV_X64_MSR_EOM)
-
-#define HV_MSR_NESTED_SCONTROL (HV_X64_MSR_NESTED_SCONTROL)
-#define HV_MSR_NESTED_SVERSION (HV_X64_MSR_NESTED_SVERSION)
-#define HV_MSR_NESTED_SIEFP (HV_X64_MSR_NESTED_SIEFP)
-#define HV_MSR_NESTED_SIMP (HV_X64_MSR_NESTED_SIMP)
-#define HV_MSR_NESTED_EOM (HV_X64_MSR_NESTED_EOM)
-#define HV_MSR_NESTED_SINT0 (HV_X64_MSR_NESTED_SINT0)
-
-#define HV_MSR_STIMER0_CONFIG (HV_X64_MSR_STIMER0_CONFIG)
-#define HV_MSR_STIMER0_COUNT (HV_X64_MSR_STIMER0_COUNT)
-
-/*
- * Registers are only accessible via HVCALL_GET_VP_REGISTERS hvcall and
- * there is not associated MSR address.
- */
-#define HV_X64_REGISTER_VSM_VP_STATUS 0x000D0003
-#define HV_X64_VTL_MASK GENMASK(3, 0)
-
-/* Hyper-V memory host visibility */
-enum hv_mem_host_visibility {
- VMBUS_PAGE_NOT_VISIBLE = 0,
- VMBUS_PAGE_VISIBLE_READ_ONLY = 1,
- VMBUS_PAGE_VISIBLE_READ_WRITE = 3
-};
-
-/* HvCallModifySparseGpaPageHostVisibility hypercall */
-#define HV_MAX_MODIFY_GPA_REP_COUNT ((PAGE_SIZE / sizeof(u64)) - 2)
-struct hv_gpa_range_for_visibility {
- u64 partition_id;
- u32 host_visibility:2;
- u32 reserved0:30;
- u32 reserved1;
- u64 gpa_page_list[HV_MAX_MODIFY_GPA_REP_COUNT];
-} __packed;
-
-/*
- * Declare the MSR used to setup pages used to communicate with the hypervisor.
- */
-union hv_x64_msr_hypercall_contents {
- u64 as_uint64;
- struct {
- u64 enable:1;
- u64 reserved:11;
- u64 guest_physical_address:52;
- } __packed;
-};
-
-union hv_vp_assist_msr_contents {
- u64 as_uint64;
- struct {
- u64 enable:1;
- u64 reserved:11;
- u64 pfn:52;
- } __packed;
-};
-
-struct hv_reenlightenment_control {
- __u64 vector:8;
- __u64 reserved1:8;
- __u64 enabled:1;
- __u64 reserved2:15;
- __u64 target_vp:32;
-} __packed;
-
-struct hv_tsc_emulation_control {
- __u64 enabled:1;
- __u64 reserved:63;
-} __packed;
-
-struct hv_tsc_emulation_status {
- __u64 inprogress:1;
- __u64 reserved:63;
-} __packed;
-
-#define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001
-#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12
-#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \
- (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1))
-
-#define HV_X64_MSR_CRASH_PARAMS \
- (1 + (HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0))
-
-#define HV_IPI_LOW_VECTOR 0x10
-#define HV_IPI_HIGH_VECTOR 0xff
-
-#define HV_X64_MSR_VP_ASSIST_PAGE_ENABLE 0x00000001
-#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT 12
-#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK \
- (~((1ull << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT) - 1))
-
-/* Hyper-V Enlightened VMCS version mask in nested features CPUID */
-#define HV_X64_ENLIGHTENED_VMCS_VERSION 0xff
-
-#define HV_X64_MSR_TSC_REFERENCE_ENABLE 0x00000001
-#define HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT 12
-
-/* Number of XMM registers used in hypercall input/output */
-#define HV_HYPERCALL_MAX_XMM_REGISTERS 6
-
-struct hv_nested_enlightenments_control {
- struct {
- __u32 directhypercall:1;
- __u32 reserved:31;
- } features;
- struct {
- __u32 inter_partition_comm:1;
- __u32 reserved:31;
- } hypercallControls;
-} __packed;
-
-/* Define virtual processor assist page structure. */
-struct hv_vp_assist_page {
- __u32 apic_assist;
- __u32 reserved1;
- __u32 vtl_entry_reason;
- __u32 vtl_reserved;
- __u64 vtl_ret_x64rax;
- __u64 vtl_ret_x64rcx;
- struct hv_nested_enlightenments_control nested_control;
- __u8 enlighten_vmentry;
- __u8 reserved2[7];
- __u64 current_nested_vmcs;
- __u8 synthetic_time_unhalted_timer_expired;
- __u8 reserved3[7];
- __u8 virtualization_fault_information[40];
- __u8 reserved4[8];
- __u8 intercept_message[256];
- __u8 vtl_ret_actions[256];
-} __packed;
-
-struct hv_enlightened_vmcs {
- u32 revision_id;
- u32 abort;
-
- u16 host_es_selector;
- u16 host_cs_selector;
- u16 host_ss_selector;
- u16 host_ds_selector;
- u16 host_fs_selector;
- u16 host_gs_selector;
- u16 host_tr_selector;
-
- u16 padding16_1;
-
- u64 host_ia32_pat;
- u64 host_ia32_efer;
-
- u64 host_cr0;
- u64 host_cr3;
- u64 host_cr4;
-
- u64 host_ia32_sysenter_esp;
- u64 host_ia32_sysenter_eip;
- u64 host_rip;
- u32 host_ia32_sysenter_cs;
-
- u32 pin_based_vm_exec_control;
- u32 vm_exit_controls;
- u32 secondary_vm_exec_control;
-
- u64 io_bitmap_a;
- u64 io_bitmap_b;
- u64 msr_bitmap;
-
- u16 guest_es_selector;
- u16 guest_cs_selector;
- u16 guest_ss_selector;
- u16 guest_ds_selector;
- u16 guest_fs_selector;
- u16 guest_gs_selector;
- u16 guest_ldtr_selector;
- u16 guest_tr_selector;
-
- u32 guest_es_limit;
- u32 guest_cs_limit;
- u32 guest_ss_limit;
- u32 guest_ds_limit;
- u32 guest_fs_limit;
- u32 guest_gs_limit;
- u32 guest_ldtr_limit;
- u32 guest_tr_limit;
- u32 guest_gdtr_limit;
- u32 guest_idtr_limit;
-
- u32 guest_es_ar_bytes;
- u32 guest_cs_ar_bytes;
- u32 guest_ss_ar_bytes;
- u32 guest_ds_ar_bytes;
- u32 guest_fs_ar_bytes;
- u32 guest_gs_ar_bytes;
- u32 guest_ldtr_ar_bytes;
- u32 guest_tr_ar_bytes;
-
- u64 guest_es_base;
- u64 guest_cs_base;
- u64 guest_ss_base;
- u64 guest_ds_base;
- u64 guest_fs_base;
- u64 guest_gs_base;
- u64 guest_ldtr_base;
- u64 guest_tr_base;
- u64 guest_gdtr_base;
- u64 guest_idtr_base;
-
- u64 padding64_1[3];
-
- u64 vm_exit_msr_store_addr;
- u64 vm_exit_msr_load_addr;
- u64 vm_entry_msr_load_addr;
-
- u64 cr3_target_value0;
- u64 cr3_target_value1;
- u64 cr3_target_value2;
- u64 cr3_target_value3;
-
- u32 page_fault_error_code_mask;
- u32 page_fault_error_code_match;
-
- u32 cr3_target_count;
- u32 vm_exit_msr_store_count;
- u32 vm_exit_msr_load_count;
- u32 vm_entry_msr_load_count;
-
- u64 tsc_offset;
- u64 virtual_apic_page_addr;
- u64 vmcs_link_pointer;
-
- u64 guest_ia32_debugctl;
- u64 guest_ia32_pat;
- u64 guest_ia32_efer;
-
- u64 guest_pdptr0;
- u64 guest_pdptr1;
- u64 guest_pdptr2;
- u64 guest_pdptr3;
-
- u64 guest_pending_dbg_exceptions;
- u64 guest_sysenter_esp;
- u64 guest_sysenter_eip;
-
- u32 guest_activity_state;
- u32 guest_sysenter_cs;
-
- u64 cr0_guest_host_mask;
- u64 cr4_guest_host_mask;
- u64 cr0_read_shadow;
- u64 cr4_read_shadow;
- u64 guest_cr0;
- u64 guest_cr3;
- u64 guest_cr4;
- u64 guest_dr7;
-
- u64 host_fs_base;
- u64 host_gs_base;
- u64 host_tr_base;
- u64 host_gdtr_base;
- u64 host_idtr_base;
- u64 host_rsp;
-
- u64 ept_pointer;
-
- u16 virtual_processor_id;
- u16 padding16_2[3];
-
- u64 padding64_2[5];
- u64 guest_physical_address;
-
- u32 vm_instruction_error;
- u32 vm_exit_reason;
- u32 vm_exit_intr_info;
- u32 vm_exit_intr_error_code;
- u32 idt_vectoring_info_field;
- u32 idt_vectoring_error_code;
- u32 vm_exit_instruction_len;
- u32 vmx_instruction_info;
-
- u64 exit_qualification;
- u64 exit_io_instruction_ecx;
- u64 exit_io_instruction_esi;
- u64 exit_io_instruction_edi;
- u64 exit_io_instruction_eip;
-
- u64 guest_linear_address;
- u64 guest_rsp;
- u64 guest_rflags;
-
- u32 guest_interruptibility_info;
- u32 cpu_based_vm_exec_control;
- u32 exception_bitmap;
- u32 vm_entry_controls;
- u32 vm_entry_intr_info_field;
- u32 vm_entry_exception_error_code;
- u32 vm_entry_instruction_len;
- u32 tpr_threshold;
-
- u64 guest_rip;
-
- u32 hv_clean_fields;
- u32 padding32_1;
- u32 hv_synthetic_controls;
- struct {
- u32 nested_flush_hypercall:1;
- u32 msr_bitmap:1;
- u32 reserved:30;
- } __packed hv_enlightenments_control;
- u32 hv_vp_id;
- u32 padding32_2;
- u64 hv_vm_id;
- u64 partition_assist_page;
- u64 padding64_4[4];
- u64 guest_bndcfgs;
- u64 guest_ia32_perf_global_ctrl;
- u64 guest_ia32_s_cet;
- u64 guest_ssp;
- u64 guest_ia32_int_ssp_table_addr;
- u64 guest_ia32_lbr_ctl;
- u64 padding64_5[2];
- u64 xss_exit_bitmap;
- u64 encls_exiting_bitmap;
- u64 host_ia32_perf_global_ctrl;
- u64 tsc_multiplier;
- u64 host_ia32_s_cet;
- u64 host_ssp;
- u64 host_ia32_int_ssp_table_addr;
- u64 padding64_6;
-} __packed;
-
-#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE 0
-#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP BIT(0)
-#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP BIT(1)
-#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2 BIT(2)
-#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1 BIT(3)
-#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC BIT(4)
-#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT BIT(5)
-#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY BIT(6)
-#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN BIT(7)
-#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR BIT(8)
-#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT BIT(9)
-#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC BIT(10)
-#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1 BIT(11)
-#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2 BIT(12)
-#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER BIT(13)
-#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1 BIT(14)
-#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ENLIGHTENMENTSCONTROL BIT(15)
-
-#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL 0xFFFF
-
-/*
- * Note, Hyper-V isn't actually stealing bit 28 from Intel, just abusing it by
- * pairing it with architecturally impossible exit reasons. Bit 28 is set only
- * on SMI exits to a SMI transfer monitor (STM) and if and only if a MTF VM-Exit
- * is pending. I.e. it will never be set by hardware for non-SMI exits (there
- * are only three), nor will it ever be set unless the VMM is an STM.
- */
-#define HV_VMX_SYNTHETIC_EXIT_REASON_TRAP_AFTER_FLUSH 0x10000031
-
-/*
- * Hyper-V uses the software reserved 32 bytes in VMCB control area to expose
- * SVM enlightenments to guests.
- */
-struct hv_vmcb_enlightenments {
- struct __packed hv_enlightenments_control {
- u32 nested_flush_hypercall:1;
- u32 msr_bitmap:1;
- u32 enlightened_npt_tlb: 1;
- u32 reserved:29;
- } __packed hv_enlightenments_control;
- u32 hv_vp_id;
- u64 hv_vm_id;
- u64 partition_assist_page;
- u64 reserved;
-} __packed;
-
-/*
- * Hyper-V uses the software reserved clean bit in VMCB.
- */
-#define HV_VMCB_NESTED_ENLIGHTENMENTS 31
-
-/* Synthetic VM-Exit */
-#define HV_SVM_EXITCODE_ENL 0xf0000000
-#define HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH (1)
-
-struct hv_partition_assist_pg {
- u32 tlb_lock_count;
-};
-
-enum hv_interrupt_type {
- HV_X64_INTERRUPT_TYPE_FIXED = 0x0000,
- HV_X64_INTERRUPT_TYPE_LOWESTPRIORITY = 0x0001,
- HV_X64_INTERRUPT_TYPE_SMI = 0x0002,
- HV_X64_INTERRUPT_TYPE_REMOTEREAD = 0x0003,
- HV_X64_INTERRUPT_TYPE_NMI = 0x0004,
- HV_X64_INTERRUPT_TYPE_INIT = 0x0005,
- HV_X64_INTERRUPT_TYPE_SIPI = 0x0006,
- HV_X64_INTERRUPT_TYPE_EXTINT = 0x0007,
- HV_X64_INTERRUPT_TYPE_LOCALINT0 = 0x0008,
- HV_X64_INTERRUPT_TYPE_LOCALINT1 = 0x0009,
- HV_X64_INTERRUPT_TYPE_MAXIMUM = 0x000A,
-};
-
-union hv_msi_address_register {
- u32 as_uint32;
- struct {
- u32 reserved1:2;
- u32 destination_mode:1;
- u32 redirection_hint:1;
- u32 reserved2:8;
- u32 destination_id:8;
- u32 msi_base:12;
- };
-} __packed;
-
-union hv_msi_data_register {
- u32 as_uint32;
- struct {
- u32 vector:8;
- u32 delivery_mode:3;
- u32 reserved1:3;
- u32 level_assert:1;
- u32 trigger_mode:1;
- u32 reserved2:16;
- };
-} __packed;
-
-/* HvRetargetDeviceInterrupt hypercall */
-union hv_msi_entry {
- u64 as_uint64;
- struct {
- union hv_msi_address_register address;
- union hv_msi_data_register data;
- } __packed;
-};
-
-struct hv_x64_segment_register {
- u64 base;
- u32 limit;
- u16 selector;
- union {
- struct {
- u16 segment_type : 4;
- u16 non_system_segment : 1;
- u16 descriptor_privilege_level : 2;
- u16 present : 1;
- u16 reserved : 4;
- u16 available : 1;
- u16 _long : 1;
- u16 _default : 1;
- u16 granularity : 1;
- } __packed;
- u16 attributes;
- };
-} __packed;
-
-struct hv_x64_table_register {
- u16 pad[3];
- u16 limit;
- u64 base;
-} __packed;
-
-struct hv_init_vp_context {
- u64 rip;
- u64 rsp;
- u64 rflags;
-
- struct hv_x64_segment_register cs;
- struct hv_x64_segment_register ds;
- struct hv_x64_segment_register es;
- struct hv_x64_segment_register fs;
- struct hv_x64_segment_register gs;
- struct hv_x64_segment_register ss;
- struct hv_x64_segment_register tr;
- struct hv_x64_segment_register ldtr;
-
- struct hv_x64_table_register idtr;
- struct hv_x64_table_register gdtr;
-
- u64 efer;
- u64 cr0;
- u64 cr3;
- u64 cr4;
- u64 msr_cr_pat;
-} __packed;
-
-union hv_input_vtl {
- u8 as_uint8;
- struct {
- u8 target_vtl: 4;
- u8 use_target_vtl: 1;
- u8 reserved_z: 3;
- };
-} __packed;
-
-struct hv_enable_vp_vtl {
- u64 partition_id;
- u32 vp_index;
- union hv_input_vtl target_vtl;
- u8 mbz0;
- u16 mbz1;
- struct hv_init_vp_context vp_context;
-} __packed;
-
-struct hv_get_vp_from_apic_id_in {
- u64 partition_id;
- union hv_input_vtl target_vtl;
- u8 res[7];
- u32 apic_ids[];
-} __packed;
-
-#include <asm-generic/hyperv-tlfs.h>
-
-#endif
diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
index 14d72727d7ee..0e82ebc5d1e1 100644
--- a/arch/x86/include/asm/init.h
+++ b/arch/x86/include/asm/init.h
@@ -2,7 +2,7 @@
#ifndef _ASM_X86_INIT_H
#define _ASM_X86_INIT_H
-#define __head __section(".head.text")
+#define __head __section(".head.text") __no_sanitize_undefined
struct x86_mapping_info {
void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
diff --git a/arch/x86/include/asm/intel_punit_ipc.h b/arch/x86/include/asm/intel_punit_ipc.h
index ce16da719596..1f9b5d225912 100644
--- a/arch/x86/include/asm/intel_punit_ipc.h
+++ b/arch/x86/include/asm/intel_punit_ipc.h
@@ -80,17 +80,10 @@ typedef enum {
#if IS_ENABLED(CONFIG_INTEL_PUNIT_IPC)
-int intel_punit_ipc_simple_command(int cmd, int para1, int para2);
int intel_punit_ipc_command(u32 cmd, u32 para1, u32 para2, u32 *in, u32 *out);
#else
-static inline int intel_punit_ipc_simple_command(int cmd,
- int para1, int para2)
-{
- return -ENODEV;
-}
-
static inline int intel_punit_ipc_command(u32 cmd, u32 para1, u32 para2,
u32 *in, u32 *out)
{
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index ae5482a2f0ca..8ad187462b68 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -8,14 +8,9 @@
# define PA_PGD 2
# define PA_SWAP_PAGE 3
# define PAGES_NR 4
-#else
-# define PA_CONTROL_PAGE 0
-# define VA_CONTROL_PAGE 1
-# define PA_TABLE_PAGE 2
-# define PA_SWAP_PAGE 3
-# define PAGES_NR 4
#endif
+# define KEXEC_CONTROL_PAGE_SIZE 4096
# define KEXEC_CONTROL_CODE_MAX_SIZE 2048
#ifndef __ASSEMBLY__
@@ -43,7 +38,6 @@ struct kimage;
/* Maximum address we can use for the control code buffer */
# define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
-# define KEXEC_CONTROL_PAGE_SIZE 4096
/* The native architecture */
# define KEXEC_ARCH KEXEC_ARCH_386
@@ -58,11 +52,12 @@ struct kimage;
/* Maximum address we can use for the control pages */
# define KEXEC_CONTROL_MEMORY_LIMIT (MAXMEM-1)
-/* Allocate one page for the pdp and the second for the code */
-# define KEXEC_CONTROL_PAGE_SIZE (4096UL + 4096UL)
-
/* The native architecture */
# define KEXEC_ARCH KEXEC_ARCH_X86_64
+
+extern unsigned long kexec_va_control_page;
+extern unsigned long kexec_pa_table_page;
+extern unsigned long kexec_pa_swap_page;
#endif
/*
@@ -116,21 +111,21 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
}
#ifdef CONFIG_X86_32
-asmlinkage unsigned long
-relocate_kernel(unsigned long indirection_page,
- unsigned long control_page,
- unsigned long start_address,
- unsigned int has_pae,
- unsigned int preserve_context);
+typedef asmlinkage unsigned long
+relocate_kernel_fn(unsigned long indirection_page,
+ unsigned long control_page,
+ unsigned long start_address,
+ unsigned int has_pae,
+ unsigned int preserve_context);
#else
-unsigned long
-relocate_kernel(unsigned long indirection_page,
- unsigned long page_list,
- unsigned long start_address,
- unsigned int preserve_context,
- unsigned int host_mem_enc_active);
+typedef unsigned long
+relocate_kernel_fn(unsigned long indirection_page,
+ unsigned long pa_control_page,
+ unsigned long start_address,
+ unsigned int preserve_context,
+ unsigned int host_mem_enc_active);
#endif
-
+extern relocate_kernel_fn relocate_kernel;
#define ARCH_HAS_KIMAGE_ARCH
#ifdef CONFIG_X86_32
@@ -145,6 +140,19 @@ struct kimage_arch {
};
#else
struct kimage_arch {
+ /*
+ * This is a kimage control page, as it must not overlap with either
+ * source or destination address ranges.
+ */
+ pgd_t *pgd;
+ /*
+ * The virtual mapping of the control code page itself is used only
+ * during the transition, while the current kernel's pages are all
+ * in place. Thus the intermediate page table pages used to map it
+ * are not control pages, but instead just normal pages obtained
+ * with get_zeroed_page(). And have to be tracked (below) so that
+ * they can be freed.
+ */
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 5aff7222e40f..c35550581da0 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -83,7 +83,6 @@ KVM_X86_OP(enable_nmi_window)
KVM_X86_OP(enable_irq_window)
KVM_X86_OP_OPTIONAL(update_cr8_intercept)
KVM_X86_OP(refresh_apicv_exec_ctrl)
-KVM_X86_OP_OPTIONAL(hwapic_irr_update)
KVM_X86_OP_OPTIONAL(hwapic_isr_update)
KVM_X86_OP_OPTIONAL(load_eoi_exitmap)
KVM_X86_OP_OPTIONAL(set_virtual_apic_mode)
@@ -94,12 +93,17 @@ KVM_X86_OP_OPTIONAL_RET0(set_tss_addr)
KVM_X86_OP_OPTIONAL_RET0(set_identity_map_addr)
KVM_X86_OP_OPTIONAL_RET0(get_mt_mask)
KVM_X86_OP(load_mmu_pgd)
+KVM_X86_OP_OPTIONAL(link_external_spt)
+KVM_X86_OP_OPTIONAL(set_external_spte)
+KVM_X86_OP_OPTIONAL(free_external_spt)
+KVM_X86_OP_OPTIONAL(remove_external_spte)
KVM_X86_OP(has_wbinvd_exit)
KVM_X86_OP(get_l2_tsc_offset)
KVM_X86_OP(get_l2_tsc_multiplier)
KVM_X86_OP(write_tsc_offset)
KVM_X86_OP(write_tsc_multiplier)
KVM_X86_OP(get_exit_info)
+KVM_X86_OP(get_entry_info)
KVM_X86_OP(check_intercept)
KVM_X86_OP(handle_exit_irqoff)
KVM_X86_OP_OPTIONAL(update_cpu_dirty_logging)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index e159e44a6a1b..b15cde0a9b5c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -24,9 +24,9 @@
#include <linux/pvclock_gtod.h>
#include <linux/clocksource.h>
#include <linux/irqbypass.h>
-#include <linux/hyperv.h>
#include <linux/kfifo.h>
#include <linux/sched/vhost_task.h>
+#include <linux/call_once.h>
#include <asm/apic.h>
#include <asm/pvclock-abi.h>
@@ -36,8 +36,8 @@
#include <asm/asm.h>
#include <asm/kvm_page_track.h>
#include <asm/kvm_vcpu_regs.h>
-#include <asm/hyperv-tlfs.h>
#include <asm/reboot.h>
+#include <hyperv/hvhdk.h>
#define __KVM_HAVE_ARCH_VCPU_DEBUGFS
@@ -313,10 +313,11 @@ struct kvm_kernel_irq_routing_entry;
* the number of unique SPs that can theoretically be created is 2^n, where n
* is the number of bits that are used to compute the role.
*
- * But, even though there are 19 bits in the mask below, not all combinations
+ * But, even though there are 20 bits in the mask below, not all combinations
* of modes and flags are possible:
*
- * - invalid shadow pages are not accounted, so the bits are effectively 18
+ * - invalid shadow pages are not accounted, mirror pages are not shadowed,
+ * so the bits are effectively 18.
*
* - quadrant will only be used if has_4_byte_gpte=1 (non-PAE paging);
* execonly and ad_disabled are only used for nested EPT which has
@@ -349,7 +350,8 @@ union kvm_mmu_page_role {
unsigned ad_disabled:1;
unsigned guest_mode:1;
unsigned passthrough:1;
- unsigned :5;
+ unsigned is_mirror:1;
+ unsigned :4;
/*
* This is left at the top of the word so that
@@ -457,6 +459,7 @@ struct kvm_mmu {
int (*sync_spte)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, int i);
struct kvm_mmu_root_info root;
+ hpa_t mirror_root_hpa;
union kvm_cpu_role cpu_role;
union kvm_mmu_page_role root_role;
@@ -739,6 +742,23 @@ struct kvm_queued_exception {
bool has_payload;
};
+/*
+ * Hardware-defined CPUID leafs that are either scattered by the kernel or are
+ * unknown to the kernel, but need to be directly used by KVM. Note, these
+ * word values conflict with the kernel's "bug" caps, but KVM doesn't use those.
+ */
+enum kvm_only_cpuid_leafs {
+ CPUID_12_EAX = NCAPINTS,
+ CPUID_7_1_EDX,
+ CPUID_8000_0007_EDX,
+ CPUID_8000_0022_EAX,
+ CPUID_7_2_EDX,
+ CPUID_24_0_EBX,
+ NR_KVM_CPU_CAPS,
+
+ NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
+};
+
struct kvm_vcpu_arch {
/*
* rip and regs accesses must go through
@@ -813,6 +833,11 @@ struct kvm_vcpu_arch {
struct kvm_mmu_memory_cache mmu_shadow_page_cache;
struct kvm_mmu_memory_cache mmu_shadowed_info_cache;
struct kvm_mmu_memory_cache mmu_page_header_cache;
+ /*
+ * This cache is to allocate external page table. E.g. private EPT used
+ * by the TDX module.
+ */
+ struct kvm_mmu_memory_cache mmu_external_spt_cache;
/*
* QEMU userspace and the guest each have their own FPU state.
@@ -854,27 +879,23 @@ struct kvm_vcpu_arch {
int cpuid_nent;
struct kvm_cpuid_entry2 *cpuid_entries;
- struct kvm_hypervisor_cpuid kvm_cpuid;
bool is_amd_compatible;
/*
- * FIXME: Drop this macro and use KVM_NR_GOVERNED_FEATURES directly
- * when "struct kvm_vcpu_arch" is no longer defined in an
- * arch/x86/include/asm header. The max is mostly arbitrary, i.e.
- * can be increased as necessary.
- */
-#define KVM_MAX_NR_GOVERNED_FEATURES BITS_PER_LONG
-
- /*
- * Track whether or not the guest is allowed to use features that are
- * governed by KVM, where "governed" means KVM needs to manage state
- * and/or explicitly enable the feature in hardware. Typically, but
- * not always, governed features can be used by the guest if and only
- * if both KVM and userspace want to expose the feature to the guest.
+ * cpu_caps holds the effective guest capabilities, i.e. the features
+ * the vCPU is allowed to use. Typically, but not always, features can
+ * be used by the guest if and only if both KVM and userspace want to
+ * expose the feature to the guest.
+ *
+ * A common exception is for virtualization holes, i.e. when KVM can't
+ * prevent the guest from using a feature, in which case the vCPU "has"
+ * the feature regardless of what KVM or userspace desires.
+ *
+ * Note, features that don't require KVM involvement in any way are
+ * NOT enforced/sanitized by KVM, i.e. are taken verbatim from the
+ * guest CPUID provided by userspace.
*/
- struct {
- DECLARE_BITMAP(enabled, KVM_MAX_NR_GOVERNED_FEATURES);
- } governed_features;
+ u32 cpu_caps[NR_KVM_CPU_CAPS];
u64 reserved_gpa_bits;
int maxphyaddr;
@@ -1445,6 +1466,7 @@ struct kvm_arch {
struct kvm_x86_pmu_event_filter __rcu *pmu_event_filter;
struct vhost_task *nx_huge_page_recovery_thread;
u64 nx_huge_page_last;
+ struct once nx_once;
#ifdef CONFIG_X86_64
/* The number of TDP MMU pages across all roots. */
@@ -1536,6 +1558,8 @@ struct kvm_arch {
*/
#define SPLIT_DESC_CACHE_MIN_NR_OBJECTS (SPTE_ENT_PER_PAGE + 1)
struct kvm_mmu_memory_cache split_desc_cache;
+
+ gfn_t gfn_direct_bits;
};
struct kvm_vm_stat {
@@ -1734,8 +1758,7 @@ struct kvm_x86_ops {
const unsigned long required_apicv_inhibits;
bool allow_apicv_in_x2apic_without_x2apic_virtualization;
void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
- void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
- void (*hwapic_isr_update)(int isr);
+ void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu);
@@ -1749,6 +1772,21 @@ struct kvm_x86_ops {
void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa,
int root_level);
+ /* Update external mapping with page table link. */
+ int (*link_external_spt)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
+ void *external_spt);
+ /* Update the external page table from spte getting set. */
+ int (*set_external_spte)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
+ kvm_pfn_t pfn_for_gfn);
+
+ /* Update external page tables for page table about to be freed. */
+ int (*free_external_spt)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
+ void *external_spt);
+
+ /* Update external page table from spte getting removed, and flush TLB. */
+ int (*remove_external_spte)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
+ kvm_pfn_t pfn_for_gfn);
+
bool (*has_wbinvd_exit)(void);
u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu);
@@ -1757,12 +1795,15 @@ struct kvm_x86_ops {
void (*write_tsc_multiplier)(struct kvm_vcpu *vcpu);
/*
- * Retrieve somewhat arbitrary exit information. Intended to
+ * Retrieve somewhat arbitrary exit/entry information. Intended to
* be used only from within tracepoints or error paths.
*/
void (*get_exit_info)(struct kvm_vcpu *vcpu, u32 *reason,
u64 *info1, u64 *info2,
- u32 *exit_int_info, u32 *exit_int_info_err_code);
+ u32 *intr_info, u32 *error_code);
+
+ void (*get_entry_info)(struct kvm_vcpu *vcpu,
+ u32 *intr_info, u32 *error_code);
int (*check_intercept)(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info,
@@ -2019,8 +2060,8 @@ u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
* VMware backdoor emulation handles select instructions
* and reinjects the #GP for all other cases.
*
- * EMULTYPE_PF - Set when emulating MMIO by way of an intercepted #PF, in which
- * case the CR2/GPA value pass on the stack is valid.
+ * EMULTYPE_PF - Set when an intercepted #PF triggers the emulation, in which case
+ * the CR2/GPA value pass on the stack is valid.
*
* EMULTYPE_COMPLETE_USER_EXIT - Set when the emulator should update interruptibility
* state and inject single-step #DBs after skipping
@@ -2055,6 +2096,11 @@ u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
#define EMULTYPE_COMPLETE_USER_EXIT (1 << 7)
#define EMULTYPE_WRITE_PF_TO_SP (1 << 8)
+static inline bool kvm_can_emulate_event_vectoring(int emul_type)
+{
+ return !(emul_type & EMULTYPE_PF);
+}
+
int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
void *insn, int insn_len);
@@ -2062,6 +2108,8 @@ void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu,
u64 *data, u8 ndata);
void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu);
+void kvm_prepare_event_vectoring_exit(struct kvm_vcpu *vcpu, gpa_t gpa);
+
void kvm_enable_efer_bits(u64);
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data);
@@ -2181,12 +2229,6 @@ static inline void kvm_clear_apicv_inhibit(struct kvm *kvm,
kvm_set_or_clear_apicv_inhibit(kvm, reason, false);
}
-unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
- unsigned long a0, unsigned long a1,
- unsigned long a2, unsigned long a3,
- int op_64_bit, int cpl);
-int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
-
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
void *insn, int insn_len);
void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg);
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index ce4677b8b735..3b496cdcb74b 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -37,6 +37,8 @@ typedef struct {
*/
atomic64_t tlb_gen;
+ unsigned long next_trim_cpumask;
+
#ifdef CONFIG_MODIFY_LDT_SYSCALL
struct rw_semaphore ldt_usr_sem;
struct ldt_struct *ldt;
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 2886cb668d7f..795fdd53bd0a 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -151,6 +151,7 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
atomic64_set(&mm->context.tlb_gen, 0);
+ mm->context.next_trim_cpumask = jiffies + HZ;
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 5f0bc6a6d025..f91ab1e75f9f 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -6,10 +6,9 @@
#include <linux/nmi.h>
#include <linux/msi.h>
#include <linux/io.h>
-#include <asm/hyperv-tlfs.h>
#include <asm/nospec-branch.h>
#include <asm/paravirt.h>
-#include <asm/mshyperv.h>
+#include <hyperv/hvhdk.h>
/*
* Hyper-V always provides a single IO-APIC at this MMIO address.
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index dcd836b59beb..dd4841231bb9 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -147,24 +147,6 @@ static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4
set_pgd_safe(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
}
-static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
-{
- gfp_t gfp = GFP_KERNEL_ACCOUNT;
-
- if (mm == &init_mm)
- gfp &= ~__GFP_ACCOUNT;
- return (p4d_t *)get_zeroed_page(gfp);
-}
-
-static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
-{
- if (!pgtable_l5_enabled())
- return;
-
- BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
- free_page((unsigned long)p4d);
-}
-
extern void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d);
static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h
index 3fa87e5e11ab..30e8ee7006f9 100644
--- a/arch/x86/include/asm/sections.h
+++ b/arch/x86/include/asm/sections.h
@@ -5,6 +5,7 @@
#include <asm-generic/sections.h>
#include <asm/extable.h>
+extern char __relocate_kernel_start[], __relocate_kernel_end[];
extern char __brk_base[], __brk_limit[];
extern char __end_rodata_aligned[];
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 0667b2a88614..85f4fde3515c 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -49,7 +49,7 @@ extern unsigned long saved_video_mode;
extern void reserve_standard_io_resources(void);
extern void i386_reserve_resources(void);
-extern unsigned long __startup_64(unsigned long physaddr, struct boot_params *bp);
+extern unsigned long __startup_64(unsigned long p2v_offset, struct boot_params *bp);
extern void startup_64_setup_gdt_idt(void);
extern void early_setup_idt(void);
extern void __init do_early_exception(struct pt_regs *regs, int trapnr);
diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
index 6ef92432a5ce..dcbccdb280f9 100644
--- a/arch/x86/include/asm/sev-common.h
+++ b/arch/x86/include/asm/sev-common.h
@@ -207,6 +207,7 @@ struct snp_psc_desc {
#define GHCB_TERM_SVSM_VMPL0 8 /* SVSM is present but has set VMPL to 0 */
#define GHCB_TERM_SVSM_CAA 9 /* SVSM is present but CAA is not page aligned */
#define GHCB_TERM_SECURE_TSC 10 /* Secure TSC initialization failed */
+#define GHCB_TERM_SVSM_CA_REMAP_FAIL 11 /* SVSM is present but CA could not be remapped */
#define GHCB_RESP_CODE(v) ((v) & GHCB_MSR_INFO_MASK)
diff --git a/arch/x86/include/asm/shared/tdx.h b/arch/x86/include/asm/shared/tdx.h
index 89f7fcade8ae..fcbbef484a78 100644
--- a/arch/x86/include/asm/shared/tdx.h
+++ b/arch/x86/include/asm/shared/tdx.h
@@ -19,6 +19,32 @@
#define TDG_VM_RD 7
#define TDG_VM_WR 8
+/* TDX attributes */
+#define TDX_ATTR_DEBUG_BIT 0
+#define TDX_ATTR_DEBUG BIT_ULL(TDX_ATTR_DEBUG_BIT)
+#define TDX_ATTR_HGS_PLUS_PROF_BIT 4
+#define TDX_ATTR_HGS_PLUS_PROF BIT_ULL(TDX_ATTR_HGS_PLUS_PROF_BIT)
+#define TDX_ATTR_PERF_PROF_BIT 5
+#define TDX_ATTR_PERF_PROF BIT_ULL(TDX_ATTR_PERF_PROF_BIT)
+#define TDX_ATTR_PMT_PROF_BIT 6
+#define TDX_ATTR_PMT_PROF BIT_ULL(TDX_ATTR_PMT_PROF_BIT)
+#define TDX_ATTR_ICSSD_BIT 16
+#define TDX_ATTR_ICSSD BIT_ULL(TDX_ATTR_ICSSD_BIT)
+#define TDX_ATTR_LASS_BIT 27
+#define TDX_ATTR_LASS BIT_ULL(TDX_ATTR_LASS_BIT)
+#define TDX_ATTR_SEPT_VE_DISABLE_BIT 28
+#define TDX_ATTR_SEPT_VE_DISABLE BIT_ULL(TDX_ATTR_SEPT_VE_DISABLE_BIT)
+#define TDX_ATTR_MIGRTABLE_BIT 29
+#define TDX_ATTR_MIGRTABLE BIT_ULL(TDX_ATTR_MIGRTABLE_BIT)
+#define TDX_ATTR_PKS_BIT 30
+#define TDX_ATTR_PKS BIT_ULL(TDX_ATTR_PKS_BIT)
+#define TDX_ATTR_KL_BIT 31
+#define TDX_ATTR_KL BIT_ULL(TDX_ATTR_KL_BIT)
+#define TDX_ATTR_TPA_BIT 62
+#define TDX_ATTR_TPA BIT_ULL(TDX_ATTR_TPA_BIT)
+#define TDX_ATTR_PERFMON_BIT 63
+#define TDX_ATTR_PERFMON BIT_ULL(TDX_ATTR_PERFMON_BIT)
+
/* TDX TD-Scope Metadata. To be used by TDG.VM.WR and TDG.VM.RD */
#define TDCS_CONFIG_FLAGS 0x1110000300000016
#define TDCS_TD_CTLS 0x1110000300000017
@@ -29,8 +55,16 @@
#define TDCS_CONFIG_FLEXIBLE_PENDING_VE BIT_ULL(1)
/* TDCS_TD_CTLS bits */
-#define TD_CTLS_PENDING_VE_DISABLE BIT_ULL(0)
-#define TD_CTLS_ENUM_TOPOLOGY BIT_ULL(1)
+#define TD_CTLS_PENDING_VE_DISABLE_BIT 0
+#define TD_CTLS_PENDING_VE_DISABLE BIT_ULL(TD_CTLS_PENDING_VE_DISABLE_BIT)
+#define TD_CTLS_ENUM_TOPOLOGY_BIT 1
+#define TD_CTLS_ENUM_TOPOLOGY BIT_ULL(TD_CTLS_ENUM_TOPOLOGY_BIT)
+#define TD_CTLS_VIRT_CPUID2_BIT 2
+#define TD_CTLS_VIRT_CPUID2 BIT_ULL(TD_CTLS_VIRT_CPUID2_BIT)
+#define TD_CTLS_REDUCE_VE_BIT 3
+#define TD_CTLS_REDUCE_VE BIT_ULL(TD_CTLS_REDUCE_VE_BIT)
+#define TD_CTLS_LOCK_BIT 63
+#define TD_CTLS_LOCK BIT_ULL(TD_CTLS_LOCK_BIT)
/* TDX hypercall Leaf IDs */
#define TDVMCALL_MAP_GPA 0x10001
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 92e18798f197..e2fac21471f5 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -5,7 +5,7 @@
#include <uapi/asm/svm.h>
#include <uapi/asm/kvm.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
/*
* 32-bit intercept words in the VMCB Control Area, starting
diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h
index eba178996d84..b4b16dafd55e 100644
--- a/arch/x86/include/asm/tdx.h
+++ b/arch/x86/include/asm/tdx.h
@@ -66,6 +66,9 @@ int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport);
u64 tdx_hcall_get_quote(u8 *buf, size_t size);
+void __init tdx_dump_attributes(u64 td_attr);
+void __init tdx_dump_td_ctls(u64 td_ctls);
+
#else
static inline void tdx_early_init(void) { };
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index 4d3c9d00d6b6..77f52bc1578a 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -20,22 +20,9 @@ static inline void tlb_flush(struct mmu_gather *tlb)
flush_tlb_mm_range(tlb->mm, start, end, stride_shift, tlb->freed_tables);
}
-/*
- * While x86 architecture in general requires an IPI to perform TLB
- * shootdown, enablement code for several hypervisors overrides
- * .flush_tlb_others hook in pv_mmu_ops and implements it by issuing
- * a hypercall. To keep software pagetable walkers safe in this case we
- * switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the comment
- * below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h
- * for more details.
- */
-static inline void __tlb_remove_table(void *table)
-{
- free_page_and_swap_cache(table);
-}
-
static inline void invlpg(unsigned long addr)
{
asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
}
+
#endif /* _ASM_X86_TLB_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 69e79fff41b8..02fc2aa06e9e 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -222,6 +222,7 @@ struct flush_tlb_info {
unsigned int initiating_cpu;
u8 stride_shift;
u8 freed_tables;
+ u8 trim_cpumask;
};
void flush_tlb_local(void);
diff --git a/arch/x86/include/uapi/asm/amd_hsmp.h b/arch/x86/include/uapi/asm/amd_hsmp.h
index 4a7cace06204..92d8f256d096 100644
--- a/arch/x86/include/uapi/asm/amd_hsmp.h
+++ b/arch/x86/include/uapi/asm/amd_hsmp.h
@@ -50,6 +50,12 @@ enum hsmp_message_ids {
HSMP_GET_METRIC_TABLE_VER, /* 23h Get metrics table version */
HSMP_GET_METRIC_TABLE, /* 24h Get metrics table */
HSMP_GET_METRIC_TABLE_DRAM_ADDR,/* 25h Get metrics table dram address */
+ HSMP_SET_XGMI_PSTATE_RANGE, /* 26h Set xGMI P-state range */
+ HSMP_CPU_RAIL_ISO_FREQ_POLICY, /* 27h Get/Set Cpu Iso frequency policy */
+ HSMP_DFC_ENABLE_CTRL, /* 28h Enable/Disable DF C-state */
+ HSMP_GET_RAPL_UNITS = 0x30, /* 30h Get scaling factor for energy */
+ HSMP_GET_RAPL_CORE_COUNTER, /* 31h Get core energy counter value */
+ HSMP_GET_RAPL_PACKAGE_COUNTER, /* 32h Get package energy counter value */
HSMP_MSG_ID_MAX,
};
@@ -65,6 +71,7 @@ enum hsmp_msg_type {
HSMP_RSVD = -1,
HSMP_SET = 0,
HSMP_GET = 1,
+ HSMP_SET_GET = 2,
};
enum hsmp_proto_versions {
@@ -72,7 +79,8 @@ enum hsmp_proto_versions {
HSMP_PROTO_VER3,
HSMP_PROTO_VER4,
HSMP_PROTO_VER5,
- HSMP_PROTO_VER6
+ HSMP_PROTO_VER6,
+ HSMP_PROTO_VER7
};
struct hsmp_msg_desc {
@@ -300,7 +308,7 @@ static const struct hsmp_msg_desc hsmp_msg_desc_table[]
* HSMP_SET_POWER_MODE, num_args = 1, response_sz = 0
* input: args[0] = power efficiency mode[2:0]
*/
- {1, 0, HSMP_SET},
+ {1, 1, HSMP_SET_GET},
/*
* HSMP_SET_PSTATE_MAX_MIN, num_args = 1, response_sz = 0
@@ -325,6 +333,58 @@ static const struct hsmp_msg_desc hsmp_msg_desc_table[]
* output: args[1] = upper 32 bits of the address
*/
{0, 2, HSMP_GET},
+
+ /*
+ * HSMP_SET_XGMI_PSTATE_RANGE, num_args = 1, response_sz = 0
+ * input: args[0] = min xGMI p-state[15:8] + max xGMI p-state[7:0]
+ */
+ {1, 0, HSMP_SET},
+
+ /*
+ * HSMP_CPU_RAIL_ISO_FREQ_POLICY, num_args = 1, response_sz = 1
+ * input: args[0] = set/get policy[31] +
+ * disable/enable independent control[0]
+ * output: args[0] = current policy[0]
+ */
+ {1, 1, HSMP_SET_GET},
+
+ /*
+ * HSMP_DFC_ENABLE_CTRL, num_args = 1, response_sz = 1
+ * input: args[0] = set/get policy[31] + enable/disable DFC[0]
+ * output: args[0] = current policy[0]
+ */
+ {1, 1, HSMP_SET_GET},
+
+ /* RESERVED(0x29-0x2f) */
+ {0, 0, HSMP_RSVD},
+ {0, 0, HSMP_RSVD},
+ {0, 0, HSMP_RSVD},
+ {0, 0, HSMP_RSVD},
+ {0, 0, HSMP_RSVD},
+ {0, 0, HSMP_RSVD},
+ {0, 0, HSMP_RSVD},
+
+ /*
+ * HSMP_GET_RAPL_UNITS, response_sz = 1
+ * output: args[0] = tu value[19:16] + esu value[12:8]
+ */
+ {0, 1, HSMP_GET},
+
+ /*
+ * HSMP_GET_RAPL_CORE_COUNTER, num_args = 1, response_sz = 1
+ * input: args[0] = apic id[15:0]
+ * output: args[0] = lower 32 bits of energy
+ * output: args[1] = upper 32 bits of energy
+ */
+ {1, 2, HSMP_GET},
+
+ /*
+ * HSMP_GET_RAPL_PACKAGE_COUNTER, num_args = 0, response_sz = 1
+ * output: args[0] = lower 32 bits of energy
+ * output: args[1] = upper 32 bits of energy
+ */
+ {0, 2, HSMP_GET},
+
};
/* Metrics table (supported only with proto version 6) */
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 88585c1de416..9e75da97bce0 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -925,5 +925,6 @@ struct kvm_hyperv_eventfd {
#define KVM_X86_SEV_VM 2
#define KVM_X86_SEV_ES_VM 3
#define KVM_X86_SNP_VM 4
+#define KVM_X86_TDX_VM 5
#endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 3a44a9dc3fb7..dae6a73be40e 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -227,6 +227,28 @@ acpi_parse_x2apic(union acpi_subtable_headers *header, const unsigned long end)
}
static int __init
+acpi_check_lapic(union acpi_subtable_headers *header, const unsigned long end)
+{
+ struct acpi_madt_local_apic *processor = NULL;
+
+ processor = (struct acpi_madt_local_apic *)header;
+
+ if (BAD_MADT_ENTRY(processor, end))
+ return -EINVAL;
+
+ /* Ignore invalid ID */
+ if (processor->id == 0xff)
+ return 0;
+
+ /* Ignore processors that can not be onlined */
+ if (!acpi_is_processor_usable(processor->lapic_flags))
+ return 0;
+
+ has_lapic_cpus = true;
+ return 0;
+}
+
+static int __init
acpi_parse_lapic(union acpi_subtable_headers * header, const unsigned long end)
{
struct acpi_madt_local_apic *processor = NULL;
@@ -257,7 +279,6 @@ acpi_parse_lapic(union acpi_subtable_headers * header, const unsigned long end)
processor->processor_id, /* ACPI ID */
processor->lapic_flags & ACPI_MADT_ENABLED);
- has_lapic_cpus = true;
return 0;
}
@@ -911,11 +932,8 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
* the resource tree during the lateinit timeframe.
*/
#define HPET_RESOURCE_NAME_SIZE 9
- hpet_res = memblock_alloc(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE,
+ hpet_res = memblock_alloc_or_panic(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE,
SMP_CACHE_BYTES);
- if (!hpet_res)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
hpet_res->name = (void *)&hpet_res[1];
hpet_res->flags = IORESOURCE_MEM;
@@ -1029,6 +1047,8 @@ static int __init early_acpi_parse_madt_lapic_addr_ovr(void)
static int __init acpi_parse_madt_lapic_entries(void)
{
int count, x2count = 0;
+ struct acpi_subtable_proc madt_proc[2];
+ int ret;
if (!boot_cpu_has(X86_FEATURE_APIC))
return -ENODEV;
@@ -1037,10 +1057,27 @@ static int __init acpi_parse_madt_lapic_entries(void)
acpi_parse_sapic, MAX_LOCAL_APIC);
if (!count) {
- count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC,
- acpi_parse_lapic, MAX_LOCAL_APIC);
- x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC,
- acpi_parse_x2apic, MAX_LOCAL_APIC);
+ /* Check if there are valid LAPIC entries */
+ acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_check_lapic, MAX_LOCAL_APIC);
+
+ /*
+ * Enumerate the APIC IDs in the order that they appear in the
+ * MADT, no matter LAPIC entry or x2APIC entry is used.
+ */
+ memset(madt_proc, 0, sizeof(madt_proc));
+ madt_proc[0].id = ACPI_MADT_TYPE_LOCAL_APIC;
+ madt_proc[0].handler = acpi_parse_lapic;
+ madt_proc[1].id = ACPI_MADT_TYPE_LOCAL_X2APIC;
+ madt_proc[1].handler = acpi_parse_x2apic;
+ ret = acpi_table_parse_entries_array(ACPI_SIG_MADT,
+ sizeof(struct acpi_table_madt),
+ madt_proc, ARRAY_SIZE(madt_proc), MAX_LOCAL_APIC);
+ if (ret < 0) {
+ pr_err("Error parsing LAPIC/X2APIC entries\n");
+ return ret;
+ }
+ count = madt_proc[0].count;
+ x2count = madt_proc[1].count;
}
if (!count && !x2count) {
pr_err("No LAPIC entries present\n");
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 243843e44e89..c71b575bf229 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -1854,11 +1854,18 @@ static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
return temp_state;
}
+__ro_after_init struct mm_struct *poking_mm;
+__ro_after_init unsigned long poking_addr;
+
static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
{
lockdep_assert_irqs_disabled();
+
switch_mm_irqs_off(NULL, prev_state.mm, current);
+ /* Clear the cpumask, to indicate no TLB flushing is needed anywhere */
+ cpumask_clear_cpu(raw_smp_processor_id(), mm_cpumask(poking_mm));
+
/*
* Restore the breakpoints if they were disabled before the temporary mm
* was loaded.
@@ -1867,9 +1874,6 @@ static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
hw_breakpoint_restore();
}
-__ro_after_init struct mm_struct *poking_mm;
-__ro_after_init unsigned long poking_addr;
-
static void text_poke_memcpy(void *dst, const void *src, size_t len)
{
memcpy(dst, src, len);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 17af7ced2c73..eebc360ed1bb 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2503,9 +2503,7 @@ static struct resource * __init ioapic_setup_resources(void)
n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
n *= nr_ioapics;
- mem = memblock_alloc(n, SMP_CACHE_BYTES);
- if (!mem)
- panic("%s: Failed to allocate %lu bytes\n", __func__, n);
+ mem = memblock_alloc_or_panic(n, SMP_CACHE_BYTES);
res = (void *)mem;
mem += sizeof(struct resource) * nr_ioapics;
@@ -2564,11 +2562,8 @@ void __init io_apic_init_mappings(void)
#ifdef CONFIG_X86_32
fake_ioapic_page:
#endif
- ioapic_phys = (unsigned long)memblock_alloc(PAGE_SIZE,
+ ioapic_phys = (unsigned long)memblock_alloc_or_panic(PAGE_SIZE,
PAGE_SIZE);
- if (!ioapic_phys)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE);
ioapic_phys = __pa(ioapic_phys);
}
io_apic_set_fixmap(idx, ioapic_phys);
diff --git a/arch/x86/kernel/callthunks.c b/arch/x86/kernel/callthunks.c
index f17d16607882..8418a892d195 100644
--- a/arch/x86/kernel/callthunks.c
+++ b/arch/x86/kernel/callthunks.c
@@ -139,9 +139,15 @@ static bool skip_addr(void *dest)
return true;
#endif
#ifdef CONFIG_KEXEC_CORE
+# ifdef CONFIG_X86_64
+ if (dest >= (void *)__relocate_kernel_start &&
+ dest < (void *)__relocate_kernel_end)
+ return true;
+# else
if (dest >= (void *)relocate_kernel &&
dest < (void*)relocate_kernel + KEXEC_CONTROL_CODE_MAX_SIZE)
return true;
+# endif
#endif
return false;
}
diff --git a/arch/x86/kernel/cpu/bus_lock.c b/arch/x86/kernel/cpu/bus_lock.c
index 704e9241b964..6cba85c79d42 100644
--- a/arch/x86/kernel/cpu/bus_lock.c
+++ b/arch/x86/kernel/cpu/bus_lock.c
@@ -49,7 +49,7 @@ static unsigned int sysctl_sld_mitigate = 1;
static DEFINE_SEMAPHORE(buslock_sem, 1);
#ifdef CONFIG_PROC_SYSCTL
-static struct ctl_table sld_sysctls[] = {
+static const struct ctl_table sld_sysctls[] = {
{
.procname = "split_lock_mitigate",
.data = &sysctl_sld_mitigate,
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index dc12fe5ef3ca..f285757618fc 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -19,7 +19,7 @@
#include <linux/random.h>
#include <asm/processor.h>
#include <asm/hypervisor.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#include <asm/mshyperv.h>
#include <asm/desc.h>
#include <asm/idtentry.h>
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 4893d30ce438..82b96ed9890a 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1146,11 +1146,8 @@ void __init e820__reserve_resources(void)
struct resource *res;
u64 end;
- res = memblock_alloc(sizeof(*res) * e820_table->nr_entries,
+ res = memblock_alloc_or_panic(sizeof(*res) * e820_table->nr_entries,
SMP_CACHE_BYTES);
- if (!res)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(*res) * e820_table->nr_entries);
e820_res = res;
for (i = 0; i < e820_table->nr_entries; i++) {
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 4b9d4557fc94..22c9ba305ac1 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -91,9 +91,11 @@ static inline bool check_la57_support(void)
return true;
}
-static unsigned long __head sme_postprocess_startup(struct boot_params *bp, pmdval_t *pmd)
+static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
+ pmdval_t *pmd,
+ unsigned long p2v_offset)
{
- unsigned long vaddr, vaddr_end;
+ unsigned long paddr, paddr_end;
int i;
/* Encrypt the kernel and related (if SME is active) */
@@ -106,10 +108,10 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp, pmdv
* attribute.
*/
if (sme_get_me_mask()) {
- vaddr = (unsigned long)__start_bss_decrypted;
- vaddr_end = (unsigned long)__end_bss_decrypted;
+ paddr = (unsigned long)&RIP_REL_REF(__start_bss_decrypted);
+ paddr_end = (unsigned long)&RIP_REL_REF(__end_bss_decrypted);
- for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
+ for (; paddr < paddr_end; paddr += PMD_SIZE) {
/*
* On SNP, transition the page to shared in the RMP table so that
* it is consistent with the page table attribute change.
@@ -118,11 +120,11 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp, pmdv
* mapping (kernel .text). PVALIDATE, by way of
* early_snp_set_memory_shared(), requires a valid virtual
* address but the kernel is currently running off of the identity
- * mapping so use __pa() to get a *currently* valid virtual address.
+ * mapping so use the PA to get a *currently* valid virtual address.
*/
- early_snp_set_memory_shared(__pa(vaddr), __pa(vaddr), PTRS_PER_PMD);
+ early_snp_set_memory_shared(paddr, paddr, PTRS_PER_PMD);
- i = pmd_index(vaddr);
+ i = pmd_index(paddr - p2v_offset);
pmd[i] -= sme_get_me_mask();
}
}
@@ -138,12 +140,15 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp, pmdv
* doesn't have to generate PC-relative relocations when accessing globals from
* that function. Clang actually does not generate them, which leads to
* boot-time crashes. To work around this problem, every global pointer must
- * be accessed using RIP_REL_REF().
+ * be accessed using RIP_REL_REF(). Kernel virtual addresses can be determined
+ * by subtracting p2v_offset from the RIP-relative address.
*/
-unsigned long __head __startup_64(unsigned long physaddr,
+unsigned long __head __startup_64(unsigned long p2v_offset,
struct boot_params *bp)
{
pmd_t (*early_pgts)[PTRS_PER_PMD] = RIP_REL_REF(early_dynamic_pgts);
+ unsigned long physaddr = (unsigned long)&RIP_REL_REF(_text);
+ unsigned long va_text, va_end;
unsigned long pgtable_flags;
unsigned long load_delta;
pgdval_t *pgd;
@@ -163,13 +168,16 @@ unsigned long __head __startup_64(unsigned long physaddr,
* Compute the delta between the address I am compiled to run at
* and the address I am actually running at.
*/
- load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map);
+ load_delta = __START_KERNEL_map + p2v_offset;
RIP_REL_REF(phys_base) = load_delta;
/* Is the address not 2M aligned? */
if (load_delta & ~PMD_MASK)
for (;;);
+ va_text = physaddr - p2v_offset;
+ va_end = (unsigned long)&RIP_REL_REF(_end) - p2v_offset;
+
/* Include the SME encryption mask in the fixup value */
load_delta += sme_get_me_mask();
@@ -178,7 +186,7 @@ unsigned long __head __startup_64(unsigned long physaddr,
pgd = &RIP_REL_REF(early_top_pgt)->pgd;
pgd[pgd_index(__START_KERNEL_map)] += load_delta;
- if (la57) {
+ if (IS_ENABLED(CONFIG_X86_5LEVEL) && la57) {
p4d = (p4dval_t *)&RIP_REL_REF(level4_kernel_pgt);
p4d[MAX_PTRS_PER_P4D - 1] += load_delta;
@@ -230,7 +238,7 @@ unsigned long __head __startup_64(unsigned long physaddr,
pmd_entry += sme_get_me_mask();
pmd_entry += physaddr;
- for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
+ for (i = 0; i < DIV_ROUND_UP(va_end - va_text, PMD_SIZE); i++) {
int idx = i + (physaddr >> PMD_SHIFT);
pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
@@ -255,11 +263,11 @@ unsigned long __head __startup_64(unsigned long physaddr,
pmd = &RIP_REL_REF(level2_kernel_pgt)->pmd;
/* invalidate pages before the kernel image */
- for (i = 0; i < pmd_index((unsigned long)_text); i++)
+ for (i = 0; i < pmd_index(va_text); i++)
pmd[i] &= ~_PAGE_PRESENT;
/* fixup pages that are part of the kernel image */
- for (; i <= pmd_index((unsigned long)_end); i++)
+ for (; i <= pmd_index(va_end); i++)
if (pmd[i] & _PAGE_PRESENT)
pmd[i] += load_delta;
@@ -267,7 +275,7 @@ unsigned long __head __startup_64(unsigned long physaddr,
for (; i < PTRS_PER_PMD; i++)
pmd[i] &= ~_PAGE_PRESENT;
- return sme_postprocess_startup(bp, pmd);
+ return sme_postprocess_startup(bp, pmd, p2v_offset);
}
/* Wipe all early page tables except for the kernel symbol map */
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 56163e2124cf..31345e0ba006 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -95,12 +95,18 @@ SYM_CODE_START_NOALIGN(startup_64)
call verify_cpu
/*
+ * Derive the kernel's physical-to-virtual offset from the physical and
+ * virtual addresses of common_startup_64().
+ */
+ leaq common_startup_64(%rip), %rdi
+ subq .Lcommon_startup_64(%rip), %rdi
+
+ /*
* Perform pagetable fixups. Additionally, if SME is active, encrypt
* the kernel and retrieve the modifier (SME encryption mask if SME
* is active) to be added to the initial pgdir entry that will be
* programmed into CR3.
*/
- leaq _text(%rip), %rdi
movq %r15, %rsi
call __startup_64
@@ -128,11 +134,11 @@ SYM_CODE_START_NOALIGN(startup_64)
/* Branch to the common startup code at its kernel virtual address */
ANNOTATE_RETPOLINE_SAFE
- jmp *0f(%rip)
+ jmp *.Lcommon_startup_64(%rip)
SYM_CODE_END(startup_64)
__INITRODATA
-0: .quad common_startup_64
+SYM_DATA_LOCAL(.Lcommon_startup_64, .quad common_startup_64)
.text
SYM_CODE_START(secondary_startup_64)
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 9182303a50b0..7f4b2966e15c 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -1382,12 +1382,6 @@ int hpet_set_periodic_freq(unsigned long freq)
}
EXPORT_SYMBOL_GPL(hpet_set_periodic_freq);
-int hpet_rtc_dropped_irq(void)
-{
- return is_hpet_enabled();
-}
-EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
-
static void hpet_rtc_timer_reinit(void)
{
unsigned int delta;
diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
index 257892fcefa7..b68d4be9464e 100644
--- a/arch/x86/kernel/ksysfs.c
+++ b/arch/x86/kernel/ksysfs.c
@@ -28,19 +28,19 @@ static ssize_t version_show(struct kobject *kobj,
static struct kobj_attribute boot_params_version_attr = __ATTR_RO(version);
static ssize_t boot_params_data_read(struct file *fp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
memcpy(buf, (void *)&boot_params + off, count);
return count;
}
-static struct bin_attribute boot_params_data_attr = {
+static const struct bin_attribute boot_params_data_attr = {
.attr = {
.name = "data",
.mode = S_IRUGO,
},
- .read = boot_params_data_read,
+ .read_new = boot_params_data_read,
.size = sizeof(boot_params),
};
@@ -49,14 +49,14 @@ static struct attribute *boot_params_version_attrs[] = {
NULL,
};
-static struct bin_attribute *boot_params_data_attrs[] = {
+static const struct bin_attribute *const boot_params_data_attrs[] = {
&boot_params_data_attr,
NULL,
};
static const struct attribute_group boot_params_attr_group = {
.attrs = boot_params_version_attrs,
- .bin_attrs = boot_params_data_attrs,
+ .bin_attrs_new = boot_params_data_attrs,
};
static int kobj_to_setup_data_nr(struct kobject *kobj, int *nr)
@@ -172,7 +172,7 @@ static ssize_t type_show(struct kobject *kobj,
static ssize_t setup_data_data_read(struct file *fp,
struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf,
loff_t off, size_t count)
{
@@ -250,7 +250,7 @@ static struct bin_attribute data_attr __ro_after_init = {
.name = "data",
.mode = S_IRUGO,
},
- .read = setup_data_data_read,
+ .read_new = setup_data_data_read,
};
static struct attribute *setup_data_type_attrs[] = {
@@ -258,14 +258,14 @@ static struct attribute *setup_data_type_attrs[] = {
NULL,
};
-static struct bin_attribute *setup_data_data_attrs[] = {
+static const struct bin_attribute *const setup_data_data_attrs[] = {
&data_attr,
NULL,
};
static const struct attribute_group setup_data_attr_group = {
.attrs = setup_data_type_attrs,
- .bin_attrs = setup_data_data_attrs,
+ .bin_attrs_new = setup_data_data_attrs,
};
static int __init create_setup_data_node(struct kobject *parent,
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 1b373d79cedc..80265162aeff 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -160,15 +160,10 @@ void machine_kexec_cleanup(struct kimage *image)
*/
void machine_kexec(struct kimage *image)
{
+ relocate_kernel_fn *relocate_kernel_ptr;
unsigned long page_list[PAGES_NR];
void *control_page;
int save_ftrace_enabled;
- asmlinkage unsigned long
- (*relocate_kernel_ptr)(unsigned long indirection_page,
- unsigned long control_page,
- unsigned long start_address,
- unsigned int has_pae,
- unsigned int preserve_context);
#ifdef CONFIG_KEXEC_JUMP
if (image->preserve_context)
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 9c9ac606893e..a68f5a0a9f37 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -146,7 +146,8 @@ static void free_transition_pgtable(struct kimage *image)
image->arch.pte = NULL;
}
-static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
+static int init_transition_pgtable(struct kimage *image, pgd_t *pgd,
+ unsigned long control_page)
{
pgprot_t prot = PAGE_KERNEL_EXEC_NOENC;
unsigned long vaddr, paddr;
@@ -156,8 +157,13 @@ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
pmd_t *pmd;
pte_t *pte;
- vaddr = (unsigned long)relocate_kernel;
- paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE);
+ /*
+ * For the transition to the identity mapped page tables, the control
+ * code page also needs to be mapped at the virtual address it starts
+ * off running from.
+ */
+ vaddr = (unsigned long)__va(control_page);
+ paddr = control_page;
pgd += pgd_index(vaddr);
if (!pgd_present(*pgd)) {
p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL);
@@ -216,7 +222,7 @@ static void *alloc_pgt_page(void *data)
return p;
}
-static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
+static int init_pgtable(struct kimage *image, unsigned long control_page)
{
struct x86_mapping_info info = {
.alloc_pgt_page = alloc_pgt_page,
@@ -225,12 +231,12 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
.kernpg_flag = _KERNPG_TABLE_NOENC,
};
unsigned long mstart, mend;
- pgd_t *level4p;
int result;
int i;
- level4p = (pgd_t *)__va(start_pgtable);
- clear_page(level4p);
+ image->arch.pgd = alloc_pgt_page(image);
+ if (!image->arch.pgd)
+ return -ENOMEM;
if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
info.page_flag |= _PAGE_ENC;
@@ -244,8 +250,8 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
mstart = pfn_mapped[i].start << PAGE_SHIFT;
mend = pfn_mapped[i].end << PAGE_SHIFT;
- result = kernel_ident_mapping_init(&info,
- level4p, mstart, mend);
+ result = kernel_ident_mapping_init(&info, image->arch.pgd,
+ mstart, mend);
if (result)
return result;
}
@@ -260,8 +266,8 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
mstart = image->segment[i].mem;
mend = mstart + image->segment[i].memsz;
- result = kernel_ident_mapping_init(&info,
- level4p, mstart, mend);
+ result = kernel_ident_mapping_init(&info, image->arch.pgd,
+ mstart, mend);
if (result)
return result;
@@ -271,15 +277,19 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
* Prepare EFI systab and ACPI tables for kexec kernel since they are
* not covered by pfn_mapped.
*/
- result = map_efi_systab(&info, level4p);
+ result = map_efi_systab(&info, image->arch.pgd);
if (result)
return result;
- result = map_acpi_tables(&info, level4p);
+ result = map_acpi_tables(&info, image->arch.pgd);
if (result)
return result;
- return init_transition_pgtable(image, level4p);
+ /*
+ * This must be last because the intermediate page table pages it
+ * allocates will not be control pages and may overlap the image.
+ */
+ return init_transition_pgtable(image, image->arch.pgd, control_page);
}
static void load_segments(void)
@@ -296,22 +306,35 @@ static void load_segments(void)
int machine_kexec_prepare(struct kimage *image)
{
- unsigned long start_pgtable;
+ void *control_page = page_address(image->control_code_page);
+ unsigned long reloc_start = (unsigned long)__relocate_kernel_start;
+ unsigned long reloc_end = (unsigned long)__relocate_kernel_end;
int result;
- /* Calculate the offsets */
- start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
-
/* Setup the identity mapped 64bit page table */
- result = init_pgtable(image, start_pgtable);
+ result = init_pgtable(image, __pa(control_page));
if (result)
return result;
+ kexec_va_control_page = (unsigned long)control_page;
+ kexec_pa_table_page = (unsigned long)__pa(image->arch.pgd);
+
+ if (image->type == KEXEC_TYPE_DEFAULT)
+ kexec_pa_swap_page = page_to_pfn(image->swap_page) << PAGE_SHIFT;
+
+ __memcpy(control_page, __relocate_kernel_start, reloc_end - reloc_start);
+
+ set_memory_rox((unsigned long)control_page, 1);
return 0;
}
void machine_kexec_cleanup(struct kimage *image)
{
+ void *control_page = page_address(image->control_code_page);
+
+ set_memory_nx((unsigned long)control_page, 1);
+ set_memory_rw((unsigned long)control_page, 1);
+
free_transition_pgtable(image);
}
@@ -319,9 +342,10 @@ void machine_kexec_cleanup(struct kimage *image)
* Do not allocate memory (or fail in any way) in machine_kexec().
* We are past the point of no return, committed to rebooting now.
*/
-void machine_kexec(struct kimage *image)
+void __nocfi machine_kexec(struct kimage *image)
{
- unsigned long page_list[PAGES_NR];
+ unsigned long reloc_start = (unsigned long)__relocate_kernel_start;
+ relocate_kernel_fn *relocate_kernel_ptr;
unsigned int host_mem_enc_active;
int save_ftrace_enabled;
void *control_page;
@@ -357,17 +381,13 @@ void machine_kexec(struct kimage *image)
#endif
}
- control_page = page_address(image->control_code_page) + PAGE_SIZE;
- __memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
+ control_page = page_address(image->control_code_page);
- page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
- page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
- page_list[PA_TABLE_PAGE] =
- (unsigned long)__pa(page_address(image->control_code_page));
-
- if (image->type == KEXEC_TYPE_DEFAULT)
- page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
- << PAGE_SHIFT);
+ /*
+ * Allow for the possibility that relocate_kernel might not be at
+ * the very start of the page.
+ */
+ relocate_kernel_ptr = control_page + (unsigned long)relocate_kernel - reloc_start;
/*
* The segment registers are funny things, they have both a
@@ -388,11 +408,11 @@ void machine_kexec(struct kimage *image)
native_gdt_invalidate();
/* now call it */
- image->start = relocate_kernel((unsigned long)image->head,
- (unsigned long)page_list,
- image->start,
- image->preserve_context,
- host_mem_enc_active);
+ image->start = relocate_kernel_ptr((unsigned long)image->head,
+ virt_to_phys(control_page),
+ image->start,
+ image->preserve_context,
+ host_mem_enc_active);
#ifdef CONFIG_KEXEC_JUMP
if (image->preserve_context)
@@ -573,8 +593,7 @@ static void kexec_mark_crashkres(bool protect)
/* Don't touch the control code page used in crash_kexec().*/
control = PFN_PHYS(page_to_pfn(kexec_crash_image->control_code_page));
- /* Control code page is located in the 2nd page. */
- kexec_mark_range(crashk_res.start, control + PAGE_SIZE - 1, protect);
+ kexec_mark_range(crashk_res.start, control - 1, protect);
control += KEXEC_CONTROL_PAGE_SIZE;
kexec_mark_range(control, crashk_res.end, protect);
}
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 927e33e6843a..1ccaa3397a67 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -59,10 +59,20 @@ void __init native_pv_lock_init(void)
static_branch_enable(&virt_spin_lock_key);
}
+#ifndef CONFIG_PT_RECLAIM
static void native_tlb_remove_table(struct mmu_gather *tlb, void *table)
{
- tlb_remove_page(tlb, table);
+ struct ptdesc *ptdesc = (struct ptdesc *)table;
+
+ pagetable_dtor(ptdesc);
+ tlb_remove_page(tlb, ptdesc_page(ptdesc));
}
+#else
+static void native_tlb_remove_table(struct mmu_gather *tlb, void *table)
+{
+ tlb_remove_table(tlb, table);
+}
+#endif
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
index 540443d699e3..b44d8863e57f 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -24,33 +24,30 @@
#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
/*
- * control_page + KEXEC_CONTROL_CODE_MAX_SIZE
- * ~ control_page + PAGE_SIZE are used as data storage and stack for
- * jumping back
+ * The .text..relocate_kernel and .data..relocate_kernel sections are copied
+ * into the control page, and the remainder of the page is used as the stack.
*/
-#define DATA(offset) (KEXEC_CONTROL_CODE_MAX_SIZE+(offset))
+ .section .data..relocate_kernel,"a";
/* Minimal CPU state */
-#define RSP DATA(0x0)
-#define CR0 DATA(0x8)
-#define CR3 DATA(0x10)
-#define CR4 DATA(0x18)
-
-/* other data */
-#define CP_PA_TABLE_PAGE DATA(0x20)
-#define CP_PA_SWAP_PAGE DATA(0x28)
-#define CP_PA_BACKUP_PAGES_MAP DATA(0x30)
-
- .text
- .align PAGE_SIZE
+SYM_DATA_LOCAL(saved_rsp, .quad 0)
+SYM_DATA_LOCAL(saved_cr0, .quad 0)
+SYM_DATA_LOCAL(saved_cr3, .quad 0)
+SYM_DATA_LOCAL(saved_cr4, .quad 0)
+ /* other data */
+SYM_DATA(kexec_va_control_page, .quad 0)
+SYM_DATA(kexec_pa_table_page, .quad 0)
+SYM_DATA(kexec_pa_swap_page, .quad 0)
+SYM_DATA_LOCAL(pa_backup_pages_map, .quad 0)
+
+ .section .text..relocate_kernel,"ax";
.code64
-SYM_CODE_START_NOALIGN(relocate_range)
SYM_CODE_START_NOALIGN(relocate_kernel)
UNWIND_HINT_END_OF_STACK
ANNOTATE_NOENDBR
/*
* %rdi indirection_page
- * %rsi page_list
+ * %rsi pa_control_page
* %rdx start address
* %rcx preserve_context
* %r8 host_mem_enc_active
@@ -65,60 +62,57 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
pushq %r15
pushf
- movq PTR(VA_CONTROL_PAGE)(%rsi), %r11
- movq %rsp, RSP(%r11)
- movq %cr0, %rax
- movq %rax, CR0(%r11)
- movq %cr3, %rax
- movq %rax, CR3(%r11)
- movq %cr4, %rax
- movq %rax, CR4(%r11)
-
- /* Save CR4. Required to enable the right paging mode later. */
- movq %rax, %r13
-
/* zero out flags, and disable interrupts */
pushq $0
popfq
- /* Save SME active flag */
- movq %r8, %r12
+ /* Switch to the identity mapped page tables */
+ movq %cr3, %rax
+ movq kexec_pa_table_page(%rip), %r9
+ movq %r9, %cr3
- /*
- * get physical address of control page now
- * this is impossible after page table switch
- */
- movq PTR(PA_CONTROL_PAGE)(%rsi), %r8
+ /* Leave CR4 in %r13 to enable the right paging mode later. */
+ movq %cr4, %r13
- /* get physical address of page table now too */
- movq PTR(PA_TABLE_PAGE)(%rsi), %r9
+ /* Disable global pages immediately to ensure this mapping is RWX */
+ movq %r13, %r12
+ andq $~(X86_CR4_PGE), %r12
+ movq %r12, %cr4
- /* get physical address of swap page now */
- movq PTR(PA_SWAP_PAGE)(%rsi), %r10
+ /* Save %rsp and CRs. */
+ movq %r13, saved_cr4(%rip)
+ movq %rsp, saved_rsp(%rip)
+ movq %rax, saved_cr3(%rip)
+ movq %cr0, %rax
+ movq %rax, saved_cr0(%rip)
- /* save some information for jumping back */
- movq %r9, CP_PA_TABLE_PAGE(%r11)
- movq %r10, CP_PA_SWAP_PAGE(%r11)
- movq %rdi, CP_PA_BACKUP_PAGES_MAP(%r11)
+ /* save indirection list for jumping back */
+ movq %rdi, pa_backup_pages_map(%rip)
- /* Switch to the identity mapped page tables */
- movq %r9, %cr3
+ /* Save the preserve_context to %r11 as swap_pages clobbers %rcx. */
+ movq %rcx, %r11
/* setup a new stack at the end of the physical control page */
- lea PAGE_SIZE(%r8), %rsp
+ lea PAGE_SIZE(%rsi), %rsp
/* jump to identity mapped page */
- addq $(identity_mapped - relocate_kernel), %r8
- pushq %r8
- ANNOTATE_UNRET_SAFE
- ret
- int3
+0: addq $identity_mapped - 0b, %rsi
+ subq $__relocate_kernel_start - 0b, %rsi
+ ANNOTATE_RETPOLINE_SAFE
+ jmp *%rsi
SYM_CODE_END(relocate_kernel)
SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
UNWIND_HINT_END_OF_STACK
- /* set return address to 0 if not preserving context */
- pushq $0
+ /*
+ * %rdi indirection page
+ * %rdx start address
+ * %r8 host_mem_enc_active
+ * %r9 page table page
+ * %r11 preserve_context
+ * %r13 original CR4 when relocate_kernel() was invoked
+ */
+
/* store the start address on the stack */
pushq %rdx
@@ -166,13 +160,11 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
* entries that will conflict with the now unencrypted memory
* used by kexec. Flush the caches before copying the kernel.
*/
- testq %r12, %r12
+ testq %r8, %r8
jz .Lsme_off
wbinvd
.Lsme_off:
- /* Save the preserve_context to %r11 as swap_pages clobbers %rcx. */
- movq %rcx, %r11
call swap_pages
/*
@@ -184,13 +176,14 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
movq %cr3, %rax
movq %rax, %cr3
+ testq %r11, %r11 /* preserve_context */
+ jnz .Lrelocate
+
/*
* set all of the registers to known values
* leave %rsp alone
*/
- testq %r11, %r11
- jnz .Lrelocate
xorl %eax, %eax
xorl %ebx, %ebx
xorl %ecx, %ecx
@@ -213,20 +206,34 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
.Lrelocate:
popq %rdx
+
+ /* Use the swap page for the callee's stack */
+ movq kexec_pa_swap_page(%rip), %r10
leaq PAGE_SIZE(%r10), %rsp
+
+ /* push the existing entry point onto the callee's stack */
+ pushq %rdx
+
ANNOTATE_RETPOLINE_SAFE
call *%rdx
/* get the re-entry point of the peer system */
- movq 0(%rsp), %rbp
- leaq relocate_kernel(%rip), %r8
- movq CP_PA_SWAP_PAGE(%r8), %r10
- movq CP_PA_BACKUP_PAGES_MAP(%r8), %rdi
- movq CP_PA_TABLE_PAGE(%r8), %rax
+ popq %rbp
+ movq kexec_pa_swap_page(%rip), %r10
+ movq pa_backup_pages_map(%rip), %rdi
+ movq kexec_pa_table_page(%rip), %rax
movq %rax, %cr3
+
+ /* Find start (and end) of this physical mapping of control page */
+ leaq (%rip), %r8
+ ANNOTATE_NOENDBR
+ andq $PAGE_MASK, %r8
lea PAGE_SIZE(%r8), %rsp
+ movl $1, %r11d /* Ensure preserve_context flag is set */
call swap_pages
- movq $virtual_mapped, %rax
+ movq kexec_va_control_page(%rip), %rax
+0: addq $virtual_mapped - 0b, %rax
+ subq $__relocate_kernel_start - 0b, %rax
pushq %rax
ANNOTATE_UNRET_SAFE
ret
@@ -236,11 +243,11 @@ SYM_CODE_END(identity_mapped)
SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
UNWIND_HINT_END_OF_STACK
ANNOTATE_NOENDBR // RET target, above
- movq RSP(%r8), %rsp
- movq CR4(%r8), %rax
+ movq saved_rsp(%rip), %rsp
+ movq saved_cr4(%rip), %rax
movq %rax, %cr4
- movq CR3(%r8), %rax
- movq CR0(%r8), %r8
+ movq saved_cr3(%rip), %rax
+ movq saved_cr0(%rip), %r8
movq %rax, %cr3
movq %r8, %cr0
@@ -250,6 +257,7 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
lgdt saved_context_gdt_desc(%rax)
#endif
+ /* relocate_kernel() returns the re-entry point for next time */
movq %rbp, %rax
popf
@@ -267,42 +275,49 @@ SYM_CODE_END(virtual_mapped)
/* Do the copies */
SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
UNWIND_HINT_END_OF_STACK
+ /*
+ * %rdi indirection page
+ * %r11 preserve_context
+ */
movq %rdi, %rcx /* Put the indirection_page in %rcx */
xorl %edi, %edi
xorl %esi, %esi
- jmp 1f
+ jmp .Lstart /* Should start with an indirection record */
-0: /* top, read another word for the indirection page */
+.Lloop: /* top, read another word for the indirection page */
movq (%rbx), %rcx
addq $8, %rbx
-1:
+.Lstart:
testb $0x1, %cl /* is it a destination page? */
- jz 2f
+ jz .Lnotdest
movq %rcx, %rdi
andq $0xfffffffffffff000, %rdi
- jmp 0b
-2:
+ jmp .Lloop
+.Lnotdest:
testb $0x2, %cl /* is it an indirection page? */
- jz 2f
+ jz .Lnotind
movq %rcx, %rbx
andq $0xfffffffffffff000, %rbx
- jmp 0b
-2:
+ jmp .Lloop
+.Lnotind:
testb $0x4, %cl /* is it the done indicator? */
- jz 2f
- jmp 3f
-2:
+ jz .Lnotdone
+ jmp .Ldone
+.Lnotdone:
testb $0x8, %cl /* is it the source indicator? */
- jz 0b /* Ignore it otherwise */
+ jz .Lloop /* Ignore it otherwise */
movq %rcx, %rsi /* For ever source page do a copy */
andq $0xfffffffffffff000, %rsi
movq %rdi, %rdx /* Save destination page to %rdx */
movq %rsi, %rax /* Save source page to %rax */
+ testq %r11, %r11 /* Only actually swap for ::preserve_context */
+ jz .Lnoswap
+
/* copy source page to swap page */
- movq %r10, %rdi
+ movq kexec_pa_swap_page(%rip), %rdi
movl $512, %ecx
rep ; movsq
@@ -314,17 +329,15 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
/* copy swap page to destination page */
movq %rdx, %rdi
- movq %r10, %rsi
+ movq kexec_pa_swap_page(%rip), %rsi
+.Lnoswap:
movl $512, %ecx
rep ; movsq
lea PAGE_SIZE(%rax), %rsi
- jmp 0b
-3:
+ jmp .Lloop
+.Ldone:
ANNOTATE_UNRET_SAFE
ret
int3
SYM_CODE_END(swap_pages)
-
- .skip KEXEC_CONTROL_CODE_MAX_SIZE - (. - relocate_kernel), 0xcc
-SYM_CODE_END(relocate_range);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index f1fea506e20f..cebee310e200 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -259,6 +259,7 @@ static void __init relocate_initrd(void)
u64 ramdisk_image = get_ramdisk_image();
u64 ramdisk_size = get_ramdisk_size();
u64 area_size = PAGE_ALIGN(ramdisk_size);
+ int ret = 0;
/* We need to move the initrd down into directly mapped mem */
u64 relocated_ramdisk = memblock_phys_alloc_range(area_size, PAGE_SIZE, 0,
@@ -272,7 +273,9 @@ static void __init relocate_initrd(void)
printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
- copy_from_early_mem((void *)initrd_start, ramdisk_image, ramdisk_size);
+ ret = copy_from_early_mem((void *)initrd_start, ramdisk_image, ramdisk_size);
+ if (ret)
+ panic("Copy RAMDISK failed\n");
printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
" [mem %#010llx-%#010llx]\n",
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index e9e803a4d44c..e6cc84143f3e 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -246,9 +246,8 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
/* VM86_SCREEN_BITMAP had numerous bugs and appears to have no users. */
if (v.flags & VM86_SCREEN_BITMAP) {
- char comm[TASK_COMM_LEN];
-
- pr_info_once("vm86: '%s' uses VM86_SCREEN_BITMAP, which is no longer supported\n", get_task_comm(comm, current));
+ pr_info_once("vm86: '%s' uses VM86_SCREEN_BITMAP, which is no longer supported\n",
+ current->comm);
return -EINVAL;
}
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 6a17396c8174..0deb4887d6e9 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -28,6 +28,7 @@
#include <asm/orc_lookup.h>
#include <asm/cache.h>
#include <asm/boot.h>
+#include <asm/kexec.h>
#undef i386 /* in case the preprocessor is a 32bit one */
@@ -95,7 +96,19 @@ const_pcpu_hot = pcpu_hot;
#define BSS_DECRYPTED
#endif
-
+#if defined(CONFIG_X86_64) && defined(CONFIG_KEXEC_CORE)
+#define KEXEC_RELOCATE_KERNEL \
+ . = ALIGN(0x100); \
+ __relocate_kernel_start = .; \
+ *(.text..relocate_kernel); \
+ *(.data..relocate_kernel); \
+ __relocate_kernel_end = .;
+
+ASSERT(__relocate_kernel_end - __relocate_kernel_start <= KEXEC_CONTROL_CODE_MAX_SIZE,
+ "relocate_kernel code too large!")
+#else
+#define KEXEC_RELOCATE_KERNEL
+#endif
PHDRS {
text PT_LOAD FLAGS(5); /* R_E */
data PT_LOAD FLAGS(6); /* RW_ */
@@ -121,19 +134,6 @@ SECTIONS
.text : AT(ADDR(.text) - LOAD_OFFSET) {
_text = .;
_stext = .;
- /* bootstrapping code */
- HEAD_TEXT
- TEXT_TEXT
- SCHED_TEXT
- LOCK_TEXT
- KPROBES_TEXT
- SOFTIRQENTRY_TEXT
-#ifdef CONFIG_MITIGATION_RETPOLINE
- *(.text..__x86.indirect_thunk)
- *(.text..__x86.return_thunk)
-#endif
- STATIC_CALL_TEXT
-
ALIGN_ENTRY_TEXT_BEGIN
*(.text..__x86.rethunk_untrain)
ENTRY_TEXT
@@ -147,10 +147,26 @@ SECTIONS
*(.text..__x86.rethunk_safe)
#endif
ALIGN_ENTRY_TEXT_END
+
+ TEXT_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ SOFTIRQENTRY_TEXT
+#ifdef CONFIG_MITIGATION_RETPOLINE
+ *(.text..__x86.indirect_thunk)
+ *(.text..__x86.return_thunk)
+#endif
+ STATIC_CALL_TEXT
*(.gnu.warning)
} :text = 0xcccccccc
+ /* bootstrapping code */
+ .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {
+ HEAD_TEXT
+ } :text = 0xcccccccc
+
/* End of text section, which should occupy whole number of pages */
_etext = .;
. = ALIGN(PAGE_SIZE);
@@ -181,6 +197,7 @@ SECTIONS
DATA_DATA
CONSTRUCTORS
+ KEXEC_RELOCATE_KERNEL
/* rarely changed data like cpu maps */
READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index f7e222953cab..8eb3a88707f2 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -82,15 +82,6 @@ u32 xstate_required_size(u64 xstate_bv, bool compacted)
return ret;
}
-#define F feature_bit
-
-/* Scattered Flag - For features that are scattered by cpufeatures.h. */
-#define SF(name) \
-({ \
- BUILD_BUG_ON(X86_FEATURE_##name >= MAX_CPU_FEATURES); \
- (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0); \
-})
-
/*
* Magic value used by KVM when querying userspace-provided CPUID entries and
* doesn't care about the CPIUD index because the index of the function in
@@ -100,8 +91,8 @@ u32 xstate_required_size(u64 xstate_bv, bool compacted)
*/
#define KVM_CPUID_INDEX_NOT_SIGNIFICANT -1ull
-static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
- struct kvm_cpuid_entry2 *entries, int nent, u32 function, u64 index)
+static struct kvm_cpuid_entry2 *cpuid_entry2_find(struct kvm_vcpu *vcpu,
+ u32 function, u64 index)
{
struct kvm_cpuid_entry2 *e;
int i;
@@ -118,8 +109,8 @@ static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
*/
lockdep_assert_irqs_enabled();
- for (i = 0; i < nent; i++) {
- e = &entries[i];
+ for (i = 0; i < vcpu->arch.cpuid_nent; i++) {
+ e = &vcpu->arch.cpuid_entries[i];
if (e->function != function)
continue;
@@ -151,9 +142,27 @@ static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
return NULL;
}
-static int kvm_check_cpuid(struct kvm_vcpu *vcpu,
- struct kvm_cpuid_entry2 *entries,
- int nent)
+struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
+ u32 function, u32 index)
+{
+ return cpuid_entry2_find(vcpu, function, index);
+}
+EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry_index);
+
+struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
+ u32 function)
+{
+ return cpuid_entry2_find(vcpu, function, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
+}
+EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
+
+/*
+ * cpuid_entry2_find() and KVM_CPUID_INDEX_NOT_SIGNIFICANT should never be used
+ * directly outside of kvm_find_cpuid_entry() and kvm_find_cpuid_entry_index().
+ */
+#undef KVM_CPUID_INDEX_NOT_SIGNIFICANT
+
+static int kvm_check_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
u64 xfeatures;
@@ -162,8 +171,7 @@ static int kvm_check_cpuid(struct kvm_vcpu *vcpu,
* The existing code assumes virtual address is 48-bit or 57-bit in the
* canonical address checks; exit if it is ever changed.
*/
- best = cpuid_entry2_find(entries, nent, 0x80000008,
- KVM_CPUID_INDEX_NOT_SIGNIFICANT);
+ best = kvm_find_cpuid_entry(vcpu, 0x80000008);
if (best) {
int vaddr_bits = (best->eax & 0xff00) >> 8;
@@ -175,7 +183,7 @@ static int kvm_check_cpuid(struct kvm_vcpu *vcpu,
* Exposing dynamic xfeatures to the guest requires additional
* enabling in the FPU, e.g. to expand the guest XSAVE state size.
*/
- best = cpuid_entry2_find(entries, nent, 0xd, 0);
+ best = kvm_find_cpuid_entry_index(vcpu, 0xd, 0);
if (!best)
return 0;
@@ -187,6 +195,8 @@ static int kvm_check_cpuid(struct kvm_vcpu *vcpu,
return fpu_enable_guest_xfd_features(&vcpu->arch.guest_fpu, xfeatures);
}
+static u32 kvm_apply_cpuid_pv_features_quirk(struct kvm_vcpu *vcpu);
+
/* Check whether the supplied CPUID data is equal to what is already set for the vCPU. */
static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
int nent)
@@ -194,6 +204,15 @@ static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2
struct kvm_cpuid_entry2 *orig;
int i;
+ /*
+ * Apply runtime CPUID updates to the incoming CPUID entries to avoid
+ * false positives due mismatches on KVM-owned feature flags.
+ *
+ * Note! @e2 and @nent track the _old_ CPUID entries!
+ */
+ kvm_update_cpuid_runtime(vcpu);
+ kvm_apply_cpuid_pv_features_quirk(vcpu);
+
if (nent != vcpu->arch.cpuid_nent)
return -EINVAL;
@@ -210,15 +229,15 @@ static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2
return 0;
}
-static struct kvm_hypervisor_cpuid __kvm_get_hypervisor_cpuid(struct kvm_cpuid_entry2 *entries,
- int nent, const char *sig)
+static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcpu,
+ const char *sig)
{
struct kvm_hypervisor_cpuid cpuid = {};
struct kvm_cpuid_entry2 *entry;
u32 base;
for_each_possible_hypervisor_cpuid_base(base) {
- entry = cpuid_entry2_find(entries, nent, base, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
+ entry = kvm_find_cpuid_entry(vcpu, base);
if (entry) {
u32 signature[3];
@@ -238,118 +257,90 @@ static struct kvm_hypervisor_cpuid __kvm_get_hypervisor_cpuid(struct kvm_cpuid_e
return cpuid;
}
-static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcpu,
- const char *sig)
+static u32 kvm_apply_cpuid_pv_features_quirk(struct kvm_vcpu *vcpu)
{
- return __kvm_get_hypervisor_cpuid(vcpu->arch.cpuid_entries,
- vcpu->arch.cpuid_nent, sig);
-}
-
-static struct kvm_cpuid_entry2 *__kvm_find_kvm_cpuid_features(struct kvm_cpuid_entry2 *entries,
- int nent, u32 kvm_cpuid_base)
-{
- return cpuid_entry2_find(entries, nent, kvm_cpuid_base | KVM_CPUID_FEATURES,
- KVM_CPUID_INDEX_NOT_SIGNIFICANT);
-}
-
-static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
-{
- u32 base = vcpu->arch.kvm_cpuid.base;
+ struct kvm_hypervisor_cpuid kvm_cpuid;
+ struct kvm_cpuid_entry2 *best;
- if (!base)
- return NULL;
+ kvm_cpuid = kvm_get_hypervisor_cpuid(vcpu, KVM_SIGNATURE);
+ if (!kvm_cpuid.base)
+ return 0;
- return __kvm_find_kvm_cpuid_features(vcpu->arch.cpuid_entries,
- vcpu->arch.cpuid_nent, base);
-}
+ best = kvm_find_cpuid_entry(vcpu, kvm_cpuid.base | KVM_CPUID_FEATURES);
+ if (!best)
+ return 0;
-void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best = kvm_find_kvm_cpuid_features(vcpu);
+ if (kvm_hlt_in_guest(vcpu->kvm))
+ best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
- /*
- * save the feature bitmap to avoid cpuid lookup for every PV
- * operation
- */
- if (best)
- vcpu->arch.pv_cpuid.features = best->eax;
+ return best->eax;
}
/*
* Calculate guest's supported XCR0 taking into account guest CPUID data and
* KVM's supported XCR0 (comprised of host's XCR0 and KVM_SUPPORTED_XCR0).
*/
-static u64 cpuid_get_supported_xcr0(struct kvm_cpuid_entry2 *entries, int nent)
+static u64 cpuid_get_supported_xcr0(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
- best = cpuid_entry2_find(entries, nent, 0xd, 0);
+ best = kvm_find_cpuid_entry_index(vcpu, 0xd, 0);
if (!best)
return 0;
return (best->eax | ((u64)best->edx << 32)) & kvm_caps.supported_xcr0;
}
-static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entries,
- int nent)
+static __always_inline void kvm_update_feature_runtime(struct kvm_vcpu *vcpu,
+ struct kvm_cpuid_entry2 *entry,
+ unsigned int x86_feature,
+ bool has_feature)
+{
+ cpuid_entry_change(entry, x86_feature, has_feature);
+ guest_cpu_cap_change(vcpu, x86_feature, has_feature);
+}
+
+void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
- struct kvm_hypervisor_cpuid kvm_cpuid;
- best = cpuid_entry2_find(entries, nent, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
+ best = kvm_find_cpuid_entry(vcpu, 1);
if (best) {
- /* Update OSXSAVE bit */
- if (boot_cpu_has(X86_FEATURE_XSAVE))
- cpuid_entry_change(best, X86_FEATURE_OSXSAVE,
+ kvm_update_feature_runtime(vcpu, best, X86_FEATURE_OSXSAVE,
kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE));
- cpuid_entry_change(best, X86_FEATURE_APIC,
- vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
+ kvm_update_feature_runtime(vcpu, best, X86_FEATURE_APIC,
+ vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
+
+ if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT))
+ kvm_update_feature_runtime(vcpu, best, X86_FEATURE_MWAIT,
+ vcpu->arch.ia32_misc_enable_msr &
+ MSR_IA32_MISC_ENABLE_MWAIT);
}
- best = cpuid_entry2_find(entries, nent, 7, 0);
- if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7)
- cpuid_entry_change(best, X86_FEATURE_OSPKE,
- kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE));
+ best = kvm_find_cpuid_entry_index(vcpu, 7, 0);
+ if (best)
+ kvm_update_feature_runtime(vcpu, best, X86_FEATURE_OSPKE,
+ kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE));
+
- best = cpuid_entry2_find(entries, nent, 0xD, 0);
+ best = kvm_find_cpuid_entry_index(vcpu, 0xD, 0);
if (best)
best->ebx = xstate_required_size(vcpu->arch.xcr0, false);
- best = cpuid_entry2_find(entries, nent, 0xD, 1);
+ best = kvm_find_cpuid_entry_index(vcpu, 0xD, 1);
if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
-
- kvm_cpuid = __kvm_get_hypervisor_cpuid(entries, nent, KVM_SIGNATURE);
- if (kvm_cpuid.base) {
- best = __kvm_find_kvm_cpuid_features(entries, nent, kvm_cpuid.base);
- if (kvm_hlt_in_guest(vcpu->kvm) && best)
- best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
- }
-
- if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
- best = cpuid_entry2_find(entries, nent, 0x1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
- if (best)
- cpuid_entry_change(best, X86_FEATURE_MWAIT,
- vcpu->arch.ia32_misc_enable_msr &
- MSR_IA32_MISC_ENABLE_MWAIT);
- }
-}
-
-void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
-{
- __kvm_update_cpuid_runtime(vcpu, vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
}
EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime);
-static bool kvm_cpuid_has_hyperv(struct kvm_cpuid_entry2 *entries, int nent)
+static bool kvm_cpuid_has_hyperv(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_KVM_HYPERV
struct kvm_cpuid_entry2 *entry;
- entry = cpuid_entry2_find(entries, nent, HYPERV_CPUID_INTERFACE,
- KVM_CPUID_INDEX_NOT_SIGNIFICANT);
+ entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE);
return entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX;
#else
return false;
@@ -368,15 +359,71 @@ static bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu)
is_guest_vendor_hygon(entry->ebx, entry->ecx, entry->edx);
}
-static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
+/*
+ * This isn't truly "unsafe", but except for the cpu_caps initialization code,
+ * all register lookups should use __cpuid_entry_get_reg(), which provides
+ * compile-time validation of the input.
+ */
+static u32 cpuid_get_reg_unsafe(struct kvm_cpuid_entry2 *entry, u32 reg)
+{
+ switch (reg) {
+ case CPUID_EAX:
+ return entry->eax;
+ case CPUID_EBX:
+ return entry->ebx;
+ case CPUID_ECX:
+ return entry->ecx;
+ case CPUID_EDX:
+ return entry->edx;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
+static int cpuid_func_emulated(struct kvm_cpuid_entry2 *entry, u32 func,
+ bool include_partially_emulated);
+
+void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
struct kvm_cpuid_entry2 *best;
+ struct kvm_cpuid_entry2 *entry;
bool allow_gbpages;
+ int i;
+
+ memset(vcpu->arch.cpu_caps, 0, sizeof(vcpu->arch.cpu_caps));
+ BUILD_BUG_ON(ARRAY_SIZE(reverse_cpuid) != NR_KVM_CPU_CAPS);
- BUILD_BUG_ON(KVM_NR_GOVERNED_FEATURES > KVM_MAX_NR_GOVERNED_FEATURES);
- bitmap_zero(vcpu->arch.governed_features.enabled,
- KVM_MAX_NR_GOVERNED_FEATURES);
+ /*
+ * Reset guest capabilities to userspace's guest CPUID definition, i.e.
+ * honor userspace's definition for features that don't require KVM or
+ * hardware management/support (or that KVM simply doesn't care about).
+ */
+ for (i = 0; i < NR_KVM_CPU_CAPS; i++) {
+ const struct cpuid_reg cpuid = reverse_cpuid[i];
+ struct kvm_cpuid_entry2 emulated;
+
+ if (!cpuid.function)
+ continue;
+
+ entry = kvm_find_cpuid_entry_index(vcpu, cpuid.function, cpuid.index);
+ if (!entry)
+ continue;
+
+ cpuid_func_emulated(&emulated, cpuid.function, true);
+
+ /*
+ * A vCPU has a feature if it's supported by KVM and is enabled
+ * in guest CPUID. Note, this includes features that are
+ * supported by KVM but aren't advertised to userspace!
+ */
+ vcpu->arch.cpu_caps[i] = kvm_cpu_caps[i] |
+ cpuid_get_reg_unsafe(&emulated, cpuid.reg);
+ vcpu->arch.cpu_caps[i] &= cpuid_get_reg_unsafe(entry, cpuid.reg);
+ }
+
+ kvm_update_cpuid_runtime(vcpu);
/*
* If TDP is enabled, let the guest use GBPAGES if they're supported in
@@ -390,9 +437,8 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
* and can install smaller shadow pages if the host lacks 1GiB support.
*/
allow_gbpages = tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) :
- guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES);
- if (allow_gbpages)
- kvm_governed_feature_set(vcpu, X86_FEATURE_GBPAGES);
+ guest_cpu_cap_has(vcpu, X86_FEATURE_GBPAGES);
+ guest_cpu_cap_change(vcpu, X86_FEATURE_GBPAGES, allow_gbpages);
best = kvm_find_cpuid_entry(vcpu, 1);
if (best && apic) {
@@ -404,21 +450,22 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
kvm_apic_set_version(vcpu);
}
- vcpu->arch.guest_supported_xcr0 =
- cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
+ vcpu->arch.guest_supported_xcr0 = cpuid_get_supported_xcr0(vcpu);
- kvm_update_pv_runtime(vcpu);
+ vcpu->arch.pv_cpuid.features = kvm_apply_cpuid_pv_features_quirk(vcpu);
vcpu->arch.is_amd_compatible = guest_cpuid_is_amd_or_hygon(vcpu);
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
kvm_pmu_refresh(vcpu);
- vcpu->arch.cr4_guest_rsvd_bits =
- __cr4_reserved_bits(guest_cpuid_has, vcpu);
- kvm_hv_set_cpuid(vcpu, kvm_cpuid_has_hyperv(vcpu->arch.cpuid_entries,
- vcpu->arch.cpuid_nent));
+#define __kvm_cpu_cap_has(UNUSED_, f) kvm_cpu_cap_has(f)
+ vcpu->arch.cr4_guest_rsvd_bits = __cr4_reserved_bits(__kvm_cpu_cap_has, UNUSED_) |
+ __cr4_reserved_bits(guest_cpu_cap_has, vcpu);
+#undef __kvm_cpu_cap_has
+
+ kvm_hv_set_cpuid(vcpu, kvm_cpuid_has_hyperv(vcpu));
/* Invoke the vendor callback only after the above state is updated. */
kvm_x86_call(vcpu_after_set_cpuid)(vcpu);
@@ -457,9 +504,25 @@ u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu)
static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
int nent)
{
+ u32 vcpu_caps[NR_KVM_CPU_CAPS];
int r;
- __kvm_update_cpuid_runtime(vcpu, e2, nent);
+ /*
+ * Swap the existing (old) entries with the incoming (new) entries in
+ * order to massage the new entries, e.g. to account for dynamic bits
+ * that KVM controls, without clobbering the current guest CPUID, which
+ * KVM needs to preserve in order to unwind on failure.
+ *
+ * Similarly, save the vCPU's current cpu_caps so that the capabilities
+ * can be updated alongside the CPUID entries when performing runtime
+ * updates. Full initialization is done if and only if the vCPU hasn't
+ * run, i.e. only if userspace is potentially changing CPUID features.
+ */
+ swap(vcpu->arch.cpuid_entries, e2);
+ swap(vcpu->arch.cpuid_nent, nent);
+
+ memcpy(vcpu_caps, vcpu->arch.cpu_caps, sizeof(vcpu_caps));
+ BUILD_BUG_ON(sizeof(vcpu_caps) != sizeof(vcpu->arch.cpu_caps));
/*
* KVM does not correctly handle changing guest CPUID after KVM_RUN, as
@@ -475,35 +538,36 @@ static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
if (kvm_vcpu_has_run(vcpu)) {
r = kvm_cpuid_check_equal(vcpu, e2, nent);
if (r)
- return r;
-
- kvfree(e2);
- return 0;
+ goto err;
+ goto success;
}
#ifdef CONFIG_KVM_HYPERV
- if (kvm_cpuid_has_hyperv(e2, nent)) {
+ if (kvm_cpuid_has_hyperv(vcpu)) {
r = kvm_hv_vcpu_init(vcpu);
if (r)
- return r;
+ goto err;
}
#endif
- r = kvm_check_cpuid(vcpu, e2, nent);
+ r = kvm_check_cpuid(vcpu);
if (r)
- return r;
-
- kvfree(vcpu->arch.cpuid_entries);
- vcpu->arch.cpuid_entries = e2;
- vcpu->arch.cpuid_nent = nent;
+ goto err;
- vcpu->arch.kvm_cpuid = kvm_get_hypervisor_cpuid(vcpu, KVM_SIGNATURE);
#ifdef CONFIG_KVM_XEN
vcpu->arch.xen.cpuid = kvm_get_hypervisor_cpuid(vcpu, XEN_SIGNATURE);
#endif
kvm_vcpu_after_set_cpuid(vcpu);
+success:
+ kvfree(e2);
return 0;
+
+err:
+ memcpy(vcpu->arch.cpu_caps, vcpu_caps, sizeof(vcpu_caps));
+ swap(vcpu->arch.cpuid_entries, e2);
+ swap(vcpu->arch.cpuid_nent, nent);
+ return r;
}
/* when an old userspace process fills a new kernel module */
@@ -590,107 +654,294 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
return 0;
}
-/* Mask kvm_cpu_caps for @leaf with the raw CPUID capabilities of this CPU. */
-static __always_inline void __kvm_cpu_cap_mask(unsigned int leaf)
+static __always_inline u32 raw_cpuid_get(struct cpuid_reg cpuid)
{
- const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32);
struct kvm_cpuid_entry2 entry;
+ u32 base;
- reverse_cpuid_check(leaf);
+ /*
+ * KVM only supports features defined by Intel (0x0), AMD (0x80000000),
+ * and Centaur (0xc0000000). WARN if a feature for new vendor base is
+ * defined, as this and other code would need to be updated.
+ */
+ base = cpuid.function & 0xffff0000;
+ if (WARN_ON_ONCE(base && base != 0x80000000 && base != 0xc0000000))
+ return 0;
+
+ if (cpuid_eax(base) < cpuid.function)
+ return 0;
cpuid_count(cpuid.function, cpuid.index,
&entry.eax, &entry.ebx, &entry.ecx, &entry.edx);
- kvm_cpu_caps[leaf] &= *__cpuid_entry_get_reg(&entry, cpuid.reg);
+ return *__cpuid_entry_get_reg(&entry, cpuid.reg);
}
-static __always_inline
-void kvm_cpu_cap_init_kvm_defined(enum kvm_only_cpuid_leafs leaf, u32 mask)
-{
- /* Use kvm_cpu_cap_mask for leafs that aren't KVM-only. */
- BUILD_BUG_ON(leaf < NCAPINTS);
+/*
+ * For kernel-defined leafs, mask KVM's supported feature set with the kernel's
+ * capabilities as well as raw CPUID. For KVM-defined leafs, consult only raw
+ * CPUID, as KVM is the one and only authority (in the kernel).
+ */
+#define kvm_cpu_cap_init(leaf, feature_initializers...) \
+do { \
+ const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32); \
+ const u32 __maybe_unused kvm_cpu_cap_init_in_progress = leaf; \
+ const u32 *kernel_cpu_caps = boot_cpu_data.x86_capability; \
+ u32 kvm_cpu_cap_passthrough = 0; \
+ u32 kvm_cpu_cap_synthesized = 0; \
+ u32 kvm_cpu_cap_emulated = 0; \
+ u32 kvm_cpu_cap_features = 0; \
+ \
+ feature_initializers \
+ \
+ kvm_cpu_caps[leaf] = kvm_cpu_cap_features; \
+ \
+ if (leaf < NCAPINTS) \
+ kvm_cpu_caps[leaf] &= kernel_cpu_caps[leaf]; \
+ \
+ kvm_cpu_caps[leaf] |= kvm_cpu_cap_passthrough; \
+ kvm_cpu_caps[leaf] &= (raw_cpuid_get(cpuid) | \
+ kvm_cpu_cap_synthesized); \
+ kvm_cpu_caps[leaf] |= kvm_cpu_cap_emulated; \
+} while (0)
- kvm_cpu_caps[leaf] = mask;
+/*
+ * Assert that the feature bit being declared, e.g. via F(), is in the CPUID
+ * word that's being initialized. Exempt 0x8000_0001.EDX usage of 0x1.EDX
+ * features, as AMD duplicated many 0x1.EDX features into 0x8000_0001.EDX.
+ */
+#define KVM_VALIDATE_CPU_CAP_USAGE(name) \
+do { \
+ u32 __leaf = __feature_leaf(X86_FEATURE_##name); \
+ \
+ BUILD_BUG_ON(__leaf != kvm_cpu_cap_init_in_progress); \
+} while (0)
+
+#define F(name) \
+({ \
+ KVM_VALIDATE_CPU_CAP_USAGE(name); \
+ kvm_cpu_cap_features |= feature_bit(name); \
+})
- __kvm_cpu_cap_mask(leaf);
-}
+/* Scattered Flag - For features that are scattered by cpufeatures.h. */
+#define SCATTERED_F(name) \
+({ \
+ BUILD_BUG_ON(X86_FEATURE_##name >= MAX_CPU_FEATURES); \
+ KVM_VALIDATE_CPU_CAP_USAGE(name); \
+ if (boot_cpu_has(X86_FEATURE_##name)) \
+ F(name); \
+})
-static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
-{
- /* Use kvm_cpu_cap_init_kvm_defined for KVM-only leafs. */
- BUILD_BUG_ON(leaf >= NCAPINTS);
+/* Features that KVM supports only on 64-bit kernels. */
+#define X86_64_F(name) \
+({ \
+ KVM_VALIDATE_CPU_CAP_USAGE(name); \
+ if (IS_ENABLED(CONFIG_X86_64)) \
+ F(name); \
+})
- kvm_cpu_caps[leaf] &= mask;
+/*
+ * Emulated Feature - For features that KVM emulates in software irrespective
+ * of host CPU/kernel support.
+ */
+#define EMULATED_F(name) \
+({ \
+ kvm_cpu_cap_emulated |= feature_bit(name); \
+ F(name); \
+})
- __kvm_cpu_cap_mask(leaf);
-}
+/*
+ * Synthesized Feature - For features that are synthesized into boot_cpu_data,
+ * i.e. may not be present in the raw CPUID, but can still be advertised to
+ * userspace. Primarily used for mitigation related feature flags.
+ */
+#define SYNTHESIZED_F(name) \
+({ \
+ kvm_cpu_cap_synthesized |= feature_bit(name); \
+ F(name); \
+})
+
+/*
+ * Passthrough Feature - For features that KVM supports based purely on raw
+ * hardware CPUID, i.e. that KVM virtualizes even if the host kernel doesn't
+ * use the feature. Simply force set the feature in KVM's capabilities, raw
+ * CPUID support will be factored in by kvm_cpu_cap_mask().
+ */
+#define PASSTHROUGH_F(name) \
+({ \
+ kvm_cpu_cap_passthrough |= feature_bit(name); \
+ F(name); \
+})
+
+/*
+ * Aliased Features - For features in 0x8000_0001.EDX that are duplicates of
+ * identical 0x1.EDX features, and thus are aliased from 0x1 to 0x8000_0001.
+ */
+#define ALIASED_1_EDX_F(name) \
+({ \
+ BUILD_BUG_ON(__feature_leaf(X86_FEATURE_##name) != CPUID_1_EDX); \
+ BUILD_BUG_ON(kvm_cpu_cap_init_in_progress != CPUID_8000_0001_EDX); \
+ kvm_cpu_cap_features |= feature_bit(name); \
+})
+
+/*
+ * Vendor Features - For features that KVM supports, but are added in later
+ * because they require additional vendor enabling.
+ */
+#define VENDOR_F(name) \
+({ \
+ KVM_VALIDATE_CPU_CAP_USAGE(name); \
+})
+
+/*
+ * Runtime Features - For features that KVM dynamically sets/clears at runtime,
+ * e.g. when CR4 changes, but which are never advertised to userspace.
+ */
+#define RUNTIME_F(name) \
+({ \
+ KVM_VALIDATE_CPU_CAP_USAGE(name); \
+})
+
+/*
+ * Undefine the MSR bit macro to avoid token concatenation issues when
+ * processing X86_FEATURE_SPEC_CTRL_SSBD.
+ */
+#undef SPEC_CTRL_SSBD
+
+/* DS is defined by ptrace-abi.h on 32-bit builds. */
+#undef DS
void kvm_set_cpu_caps(void)
{
-#ifdef CONFIG_X86_64
- unsigned int f_gbpages = F(GBPAGES);
- unsigned int f_lm = F(LM);
- unsigned int f_xfd = F(XFD);
-#else
- unsigned int f_gbpages = 0;
- unsigned int f_lm = 0;
- unsigned int f_xfd = 0;
-#endif
memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps));
BUILD_BUG_ON(sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)) >
sizeof(boot_cpu_data.x86_capability));
- memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability,
- sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)));
-
- kvm_cpu_cap_mask(CPUID_1_ECX,
+ kvm_cpu_cap_init(CPUID_1_ECX,
+ F(XMM3),
+ F(PCLMULQDQ),
+ VENDOR_F(DTES64),
/*
* NOTE: MONITOR (and MWAIT) are emulated as NOP, but *not*
- * advertised to guests via CPUID!
+ * advertised to guests via CPUID! MWAIT is also technically a
+ * runtime flag thanks to IA32_MISC_ENABLES; mark it as such so
+ * that KVM is aware that it's a known, unadvertised flag.
*/
- F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
- 0 /* DS-CPL, VMX, SMX, EST */ |
- 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
- F(FMA) | F(CX16) | 0 /* xTPR Update */ | F(PDCM) |
- F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
- F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
- 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
- F(F16C) | F(RDRAND)
+ RUNTIME_F(MWAIT),
+ /* DS-CPL */
+ VENDOR_F(VMX),
+ /* SMX, EST */
+ /* TM2 */
+ F(SSSE3),
+ /* CNXT-ID */
+ /* Reserved */
+ F(FMA),
+ F(CX16),
+ /* xTPR Update */
+ F(PDCM),
+ F(PCID),
+ /* Reserved, DCA */
+ F(XMM4_1),
+ F(XMM4_2),
+ EMULATED_F(X2APIC),
+ F(MOVBE),
+ F(POPCNT),
+ EMULATED_F(TSC_DEADLINE_TIMER),
+ F(AES),
+ F(XSAVE),
+ RUNTIME_F(OSXSAVE),
+ F(AVX),
+ F(F16C),
+ F(RDRAND),
+ EMULATED_F(HYPERVISOR),
);
- /* KVM emulates x2apic in software irrespective of host support. */
- kvm_cpu_cap_set(X86_FEATURE_X2APIC);
-
- kvm_cpu_cap_mask(CPUID_1_EDX,
- F(FPU) | F(VME) | F(DE) | F(PSE) |
- F(TSC) | F(MSR) | F(PAE) | F(MCE) |
- F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
- F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
- F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
- 0 /* Reserved, DS, ACPI */ | F(MMX) |
- F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
- 0 /* HTT, TM, Reserved, PBE */
+
+ kvm_cpu_cap_init(CPUID_1_EDX,
+ F(FPU),
+ F(VME),
+ F(DE),
+ F(PSE),
+ F(TSC),
+ F(MSR),
+ F(PAE),
+ F(MCE),
+ F(CX8),
+ F(APIC),
+ /* Reserved */
+ F(SEP),
+ F(MTRR),
+ F(PGE),
+ F(MCA),
+ F(CMOV),
+ F(PAT),
+ F(PSE36),
+ /* PSN */
+ F(CLFLUSH),
+ /* Reserved */
+ VENDOR_F(DS),
+ /* ACPI */
+ F(MMX),
+ F(FXSR),
+ F(XMM),
+ F(XMM2),
+ F(SELFSNOOP),
+ /* HTT, TM, Reserved, PBE */
);
- kvm_cpu_cap_mask(CPUID_7_0_EBX,
- F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) |
- F(FDP_EXCPTN_ONLY) | F(SMEP) | F(BMI2) | F(ERMS) | F(INVPCID) |
- F(RTM) | F(ZERO_FCS_FDS) | 0 /*MPX*/ | F(AVX512F) |
- F(AVX512DQ) | F(RDSEED) | F(ADX) | F(SMAP) | F(AVX512IFMA) |
- F(CLFLUSHOPT) | F(CLWB) | 0 /*INTEL_PT*/ | F(AVX512PF) |
- F(AVX512ER) | F(AVX512CD) | F(SHA_NI) | F(AVX512BW) |
- F(AVX512VL));
-
- kvm_cpu_cap_mask(CPUID_7_ECX,
- F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
- F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
- F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
- F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ |
- F(SGX_LC) | F(BUS_LOCK_DETECT)
+ kvm_cpu_cap_init(CPUID_7_0_EBX,
+ F(FSGSBASE),
+ EMULATED_F(TSC_ADJUST),
+ F(SGX),
+ F(BMI1),
+ F(HLE),
+ F(AVX2),
+ F(FDP_EXCPTN_ONLY),
+ F(SMEP),
+ F(BMI2),
+ F(ERMS),
+ F(INVPCID),
+ F(RTM),
+ F(ZERO_FCS_FDS),
+ VENDOR_F(MPX),
+ F(AVX512F),
+ F(AVX512DQ),
+ F(RDSEED),
+ F(ADX),
+ F(SMAP),
+ F(AVX512IFMA),
+ F(CLFLUSHOPT),
+ F(CLWB),
+ VENDOR_F(INTEL_PT),
+ F(AVX512PF),
+ F(AVX512ER),
+ F(AVX512CD),
+ F(SHA_NI),
+ F(AVX512BW),
+ F(AVX512VL),
+ );
+
+ kvm_cpu_cap_init(CPUID_7_ECX,
+ F(AVX512VBMI),
+ PASSTHROUGH_F(LA57),
+ F(PKU),
+ RUNTIME_F(OSPKE),
+ F(RDPID),
+ F(AVX512_VPOPCNTDQ),
+ F(UMIP),
+ F(AVX512_VBMI2),
+ F(GFNI),
+ F(VAES),
+ F(VPCLMULQDQ),
+ F(AVX512_VNNI),
+ F(AVX512_BITALG),
+ F(CLDEMOTE),
+ F(MOVDIRI),
+ F(MOVDIR64B),
+ VENDOR_F(WAITPKG),
+ F(SGX_LC),
+ F(BUS_LOCK_DETECT),
);
- /* Set LA57 based on hardware capability. */
- if (cpuid_ecx(7) & F(LA57))
- kvm_cpu_cap_set(X86_FEATURE_LA57);
/*
* PKU not yet implemented for shadow paging and requires OSPKE
@@ -699,18 +950,25 @@ void kvm_set_cpu_caps(void)
if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
kvm_cpu_cap_clear(X86_FEATURE_PKU);
- kvm_cpu_cap_mask(CPUID_7_EDX,
- F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
- F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
- F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) |
- F(SERIALIZE) | F(TSXLDTRK) | F(AVX512_FP16) |
- F(AMX_TILE) | F(AMX_INT8) | F(AMX_BF16) | F(FLUSH_L1D)
+ kvm_cpu_cap_init(CPUID_7_EDX,
+ F(AVX512_4VNNIW),
+ F(AVX512_4FMAPS),
+ F(SPEC_CTRL),
+ F(SPEC_CTRL_SSBD),
+ EMULATED_F(ARCH_CAPABILITIES),
+ F(INTEL_STIBP),
+ F(MD_CLEAR),
+ F(AVX512_VP2INTERSECT),
+ F(FSRM),
+ F(SERIALIZE),
+ F(TSXLDTRK),
+ F(AVX512_FP16),
+ F(AMX_TILE),
+ F(AMX_INT8),
+ F(AMX_BF16),
+ F(FLUSH_L1D),
);
- /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */
- kvm_cpu_cap_set(X86_FEATURE_TSC_ADJUST);
- kvm_cpu_cap_set(X86_FEATURE_ARCH_CAPABILITIES);
-
if (boot_cpu_has(X86_FEATURE_AMD_IBPB_RET) &&
boot_cpu_has(X86_FEATURE_AMD_IBPB) &&
boot_cpu_has(X86_FEATURE_AMD_IBRS))
@@ -720,65 +978,133 @@ void kvm_set_cpu_caps(void)
if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD);
- kvm_cpu_cap_mask(CPUID_7_1_EAX,
- F(SHA512) | F(SM3) | F(SM4) | F(AVX_VNNI) | F(AVX512_BF16) |
- F(CMPCCXADD) | F(FZRM) | F(FSRS) | F(FSRC) | F(AMX_FP16) |
- F(AVX_IFMA) | F(LAM)
+ kvm_cpu_cap_init(CPUID_7_1_EAX,
+ F(SHA512),
+ F(SM3),
+ F(SM4),
+ F(AVX_VNNI),
+ F(AVX512_BF16),
+ F(CMPCCXADD),
+ F(FZRM),
+ F(FSRS),
+ F(FSRC),
+ F(AMX_FP16),
+ F(AVX_IFMA),
+ F(LAM),
);
- kvm_cpu_cap_init_kvm_defined(CPUID_7_1_EDX,
- F(AVX_VNNI_INT8) | F(AVX_NE_CONVERT) | F(AMX_COMPLEX) |
- F(AVX_VNNI_INT16) | F(PREFETCHITI) | F(AVX10)
+ kvm_cpu_cap_init(CPUID_7_1_EDX,
+ F(AVX_VNNI_INT8),
+ F(AVX_NE_CONVERT),
+ F(AMX_COMPLEX),
+ F(AVX_VNNI_INT16),
+ F(PREFETCHITI),
+ F(AVX10),
);
- kvm_cpu_cap_init_kvm_defined(CPUID_7_2_EDX,
- F(INTEL_PSFD) | F(IPRED_CTRL) | F(RRSBA_CTRL) | F(DDPD_U) |
- F(BHI_CTRL) | F(MCDT_NO)
+ kvm_cpu_cap_init(CPUID_7_2_EDX,
+ F(INTEL_PSFD),
+ F(IPRED_CTRL),
+ F(RRSBA_CTRL),
+ F(DDPD_U),
+ F(BHI_CTRL),
+ F(MCDT_NO),
);
- kvm_cpu_cap_mask(CPUID_D_1_EAX,
- F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd
+ kvm_cpu_cap_init(CPUID_D_1_EAX,
+ F(XSAVEOPT),
+ F(XSAVEC),
+ F(XGETBV1),
+ F(XSAVES),
+ X86_64_F(XFD),
);
- kvm_cpu_cap_init_kvm_defined(CPUID_12_EAX,
- SF(SGX1) | SF(SGX2) | SF(SGX_EDECCSSA)
+ kvm_cpu_cap_init(CPUID_12_EAX,
+ SCATTERED_F(SGX1),
+ SCATTERED_F(SGX2),
+ SCATTERED_F(SGX_EDECCSSA),
);
- kvm_cpu_cap_init_kvm_defined(CPUID_24_0_EBX,
- F(AVX10_128) | F(AVX10_256) | F(AVX10_512)
+ kvm_cpu_cap_init(CPUID_24_0_EBX,
+ F(AVX10_128),
+ F(AVX10_256),
+ F(AVX10_512),
);
- kvm_cpu_cap_mask(CPUID_8000_0001_ECX,
- F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
- F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
- F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
- 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) |
- F(TOPOEXT) | 0 /* PERFCTR_CORE */
+ kvm_cpu_cap_init(CPUID_8000_0001_ECX,
+ F(LAHF_LM),
+ F(CMP_LEGACY),
+ VENDOR_F(SVM),
+ /* ExtApicSpace */
+ F(CR8_LEGACY),
+ F(ABM),
+ F(SSE4A),
+ F(MISALIGNSSE),
+ F(3DNOWPREFETCH),
+ F(OSVW),
+ /* IBS */
+ F(XOP),
+ /* SKINIT, WDT, LWP */
+ F(FMA4),
+ F(TBM),
+ F(TOPOEXT),
+ VENDOR_F(PERFCTR_CORE),
);
- kvm_cpu_cap_mask(CPUID_8000_0001_EDX,
- F(FPU) | F(VME) | F(DE) | F(PSE) |
- F(TSC) | F(MSR) | F(PAE) | F(MCE) |
- F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
- F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
- F(PAT) | F(PSE36) | 0 /* Reserved */ |
- F(NX) | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
- F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) |
- 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW)
+ kvm_cpu_cap_init(CPUID_8000_0001_EDX,
+ ALIASED_1_EDX_F(FPU),
+ ALIASED_1_EDX_F(VME),
+ ALIASED_1_EDX_F(DE),
+ ALIASED_1_EDX_F(PSE),
+ ALIASED_1_EDX_F(TSC),
+ ALIASED_1_EDX_F(MSR),
+ ALIASED_1_EDX_F(PAE),
+ ALIASED_1_EDX_F(MCE),
+ ALIASED_1_EDX_F(CX8),
+ ALIASED_1_EDX_F(APIC),
+ /* Reserved */
+ F(SYSCALL),
+ ALIASED_1_EDX_F(MTRR),
+ ALIASED_1_EDX_F(PGE),
+ ALIASED_1_EDX_F(MCA),
+ ALIASED_1_EDX_F(CMOV),
+ ALIASED_1_EDX_F(PAT),
+ ALIASED_1_EDX_F(PSE36),
+ /* Reserved */
+ F(NX),
+ /* Reserved */
+ F(MMXEXT),
+ ALIASED_1_EDX_F(MMX),
+ ALIASED_1_EDX_F(FXSR),
+ F(FXSR_OPT),
+ X86_64_F(GBPAGES),
+ F(RDTSCP),
+ /* Reserved */
+ X86_64_F(LM),
+ F(3DNOWEXT),
+ F(3DNOW),
);
if (!tdp_enabled && IS_ENABLED(CONFIG_X86_64))
kvm_cpu_cap_set(X86_FEATURE_GBPAGES);
- kvm_cpu_cap_init_kvm_defined(CPUID_8000_0007_EDX,
- SF(CONSTANT_TSC)
+ kvm_cpu_cap_init(CPUID_8000_0007_EDX,
+ SCATTERED_F(CONSTANT_TSC),
);
- kvm_cpu_cap_mask(CPUID_8000_0008_EBX,
- F(CLZERO) | F(XSAVEERPTR) |
- F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
- F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON) |
- F(AMD_PSFD) | F(AMD_IBPB_RET)
+ kvm_cpu_cap_init(CPUID_8000_0008_EBX,
+ F(CLZERO),
+ F(XSAVEERPTR),
+ F(WBNOINVD),
+ F(AMD_IBPB),
+ F(AMD_IBRS),
+ F(AMD_SSBD),
+ F(VIRT_SSBD),
+ F(AMD_SSB_NO),
+ F(AMD_STIBP),
+ F(AMD_STIBP_ALWAYS_ON),
+ F(AMD_PSFD),
+ F(AMD_IBPB_RET),
);
/*
@@ -808,50 +1134,73 @@ void kvm_set_cpu_caps(void)
!boot_cpu_has(X86_FEATURE_AMD_SSBD))
kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
- /*
- * Hide all SVM features by default, SVM will set the cap bits for
- * features it emulates and/or exposes for L1.
- */
- kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0);
-
- kvm_cpu_cap_mask(CPUID_8000_001F_EAX,
- 0 /* SME */ | 0 /* SEV */ | 0 /* VM_PAGE_FLUSH */ | 0 /* SEV_ES */ |
- F(SME_COHERENT));
+ /* All SVM features required additional vendor module enabling. */
+ kvm_cpu_cap_init(CPUID_8000_000A_EDX,
+ VENDOR_F(NPT),
+ VENDOR_F(VMCBCLEAN),
+ VENDOR_F(FLUSHBYASID),
+ VENDOR_F(NRIPS),
+ VENDOR_F(TSCRATEMSR),
+ VENDOR_F(V_VMSAVE_VMLOAD),
+ VENDOR_F(LBRV),
+ VENDOR_F(PAUSEFILTER),
+ VENDOR_F(PFTHRESHOLD),
+ VENDOR_F(VGIF),
+ VENDOR_F(VNMI),
+ VENDOR_F(SVME_ADDR_CHK),
+ );
- kvm_cpu_cap_mask(CPUID_8000_0021_EAX,
- F(NO_NESTED_DATA_BP) | F(LFENCE_RDTSC) | 0 /* SmmPgCfgLock */ |
- F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */ |
- F(WRMSR_XX_BASE_NS) | F(SRSO_USER_KERNEL_NO)
+ kvm_cpu_cap_init(CPUID_8000_001F_EAX,
+ VENDOR_F(SME),
+ VENDOR_F(SEV),
+ /* VM_PAGE_FLUSH */
+ VENDOR_F(SEV_ES),
+ F(SME_COHERENT),
);
- kvm_cpu_cap_check_and_set(X86_FEATURE_SBPB);
- kvm_cpu_cap_check_and_set(X86_FEATURE_IBPB_BRTYPE);
- kvm_cpu_cap_check_and_set(X86_FEATURE_SRSO_NO);
+ kvm_cpu_cap_init(CPUID_8000_0021_EAX,
+ F(NO_NESTED_DATA_BP),
+ /*
+ * Synthesize "LFENCE is serializing" into the AMD-defined entry
+ * in KVM's supported CPUID, i.e. if the feature is reported as
+ * supported by the kernel. LFENCE_RDTSC was a Linux-defined
+ * synthetic feature long before AMD joined the bandwagon, e.g.
+ * LFENCE is serializing on most CPUs that support SSE2. On
+ * CPUs that don't support AMD's leaf, ANDing with the raw host
+ * CPUID will drop the flags, and reporting support in AMD's
+ * leaf can make it easier for userspace to detect the feature.
+ */
+ SYNTHESIZED_F(LFENCE_RDTSC),
+ /* SmmPgCfgLock */
+ F(NULL_SEL_CLR_BASE),
+ F(AUTOIBRS),
+ EMULATED_F(NO_SMM_CTL_MSR),
+ /* PrefetchCtlMsr */
+ F(WRMSR_XX_BASE_NS),
+ SYNTHESIZED_F(SBPB),
+ SYNTHESIZED_F(IBPB_BRTYPE),
+ SYNTHESIZED_F(SRSO_NO),
+ F(SRSO_USER_KERNEL_NO),
+ );
- kvm_cpu_cap_init_kvm_defined(CPUID_8000_0022_EAX,
- F(PERFMON_V2)
+ kvm_cpu_cap_init(CPUID_8000_0022_EAX,
+ F(PERFMON_V2),
);
- /*
- * Synthesize "LFENCE is serializing" into the AMD-defined entry in
- * KVM's supported CPUID if the feature is reported as supported by the
- * kernel. LFENCE_RDTSC was a Linux-defined synthetic feature long
- * before AMD joined the bandwagon, e.g. LFENCE is serializing on most
- * CPUs that support SSE2. On CPUs that don't support AMD's leaf,
- * kvm_cpu_cap_mask() will unfortunately drop the flag due to ANDing
- * the mask with the raw host CPUID, and reporting support in AMD's
- * leaf can make it easier for userspace to detect the feature.
- */
- if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
- kvm_cpu_cap_set(X86_FEATURE_LFENCE_RDTSC);
if (!static_cpu_has_bug(X86_BUG_NULL_SEG))
kvm_cpu_cap_set(X86_FEATURE_NULL_SEL_CLR_BASE);
- kvm_cpu_cap_set(X86_FEATURE_NO_SMM_CTL_MSR);
- kvm_cpu_cap_mask(CPUID_C000_0001_EDX,
- F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
- F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
- F(PMM) | F(PMM_EN)
+ kvm_cpu_cap_init(CPUID_C000_0001_EDX,
+ F(XSTORE),
+ F(XSTORE_EN),
+ F(XCRYPT),
+ F(XCRYPT_EN),
+ F(ACE2),
+ F(ACE2_EN),
+ F(PHE),
+ F(PHE_EN),
+ F(PMM),
+ F(PMM_EN),
);
/*
@@ -871,6 +1220,16 @@ void kvm_set_cpu_caps(void)
}
EXPORT_SYMBOL_GPL(kvm_set_cpu_caps);
+#undef F
+#undef SCATTERED_F
+#undef X86_64_F
+#undef EMULATED_F
+#undef SYNTHESIZED_F
+#undef PASSTHROUGH_F
+#undef ALIASED_1_EDX_F
+#undef VENDOR_F
+#undef RUNTIME_F
+
struct kvm_cpuid_array {
struct kvm_cpuid_entry2 *entries;
int maxnent;
@@ -928,14 +1287,11 @@ static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array,
return entry;
}
-static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
+static int cpuid_func_emulated(struct kvm_cpuid_entry2 *entry, u32 func,
+ bool include_partially_emulated)
{
- struct kvm_cpuid_entry2 *entry;
-
- if (array->nent >= array->maxnent)
- return -E2BIG;
+ memset(entry, 0, sizeof(*entry));
- entry = &array->entries[array->nent];
entry->function = func;
entry->index = 0;
entry->flags = 0;
@@ -943,23 +1299,37 @@ static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
switch (func) {
case 0:
entry->eax = 7;
- ++array->nent;
- break;
+ return 1;
case 1:
- entry->ecx = F(MOVBE);
- ++array->nent;
- break;
+ entry->ecx = feature_bit(MOVBE);
+ /*
+ * KVM allows userspace to enumerate MONITOR+MWAIT support to
+ * the guest, but the MWAIT feature flag is never advertised
+ * to userspace because MONITOR+MWAIT aren't virtualized by
+ * hardware, can't be faithfully emulated in software (KVM
+ * emulates them as NOPs), and allowing the guest to execute
+ * them natively requires enabling a per-VM capability.
+ */
+ if (include_partially_emulated)
+ entry->ecx |= feature_bit(MWAIT);
+ return 1;
case 7:
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
entry->eax = 0;
if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
- entry->ecx = F(RDPID);
- ++array->nent;
- break;
+ entry->ecx = feature_bit(RDPID);
+ return 1;
default:
- break;
+ return 0;
}
+}
+
+static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
+{
+ if (array->nent >= array->maxnent)
+ return -E2BIG;
+ array->nent += cpuid_func_emulated(&array->entries[array->nent], func, false);
return 0;
}
@@ -1103,7 +1473,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
goto out;
cpuid_entry_override(entry, CPUID_D_1_EAX);
- if (entry->eax & (F(XSAVES)|F(XSAVEC)))
+ if (entry->eax & (feature_bit(XSAVES) | feature_bit(XSAVEC)))
entry->ebx = xstate_required_size(permitted_xcr0 | permitted_xss,
true);
else {
@@ -1540,22 +1910,6 @@ out_free:
return r;
}
-struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
- u32 function, u32 index)
-{
- return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent,
- function, index);
-}
-EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry_index);
-
-struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
- u32 function)
-{
- return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent,
- function, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
-}
-EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
-
/*
* Intel CPUID semantics treats any query for an out-of-range leaf as if the
* highest basic leaf (i.e. CPUID.0H:EAX) were requested. AMD CPUID semantics
@@ -1648,10 +2002,10 @@ bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
u64 data;
if (!__kvm_get_msr(vcpu, MSR_IA32_TSX_CTRL, &data, true) &&
(data & TSX_CTRL_CPUID_CLEAR))
- *ebx &= ~(F(RTM) | F(HLE));
+ *ebx &= ~(feature_bit(RTM) | feature_bit(HLE));
} else if (function == 0x80000007) {
if (kvm_hv_invtsc_suppressed(vcpu))
- *edx &= ~SF(CONSTANT_TSC);
+ *edx &= ~feature_bit(CONSTANT_TSC);
}
} else {
*eax = *ebx = *ecx = *edx = 0;
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index f16a7b2c2adc..67d80aa72d50 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -10,8 +10,8 @@
extern u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
void kvm_set_cpu_caps(void);
+void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu);
void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
-void kvm_update_pv_runtime(struct kvm_vcpu *vcpu);
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
u32 function, u32 index);
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
@@ -67,41 +67,40 @@ static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry,
*reg = kvm_cpu_caps[leaf];
}
-static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
- unsigned int x86_feature)
+static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu,
+ unsigned int x86_feature)
{
const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
struct kvm_cpuid_entry2 *entry;
+ u32 *reg;
+
+ /*
+ * XSAVES is a special snowflake. Due to lack of a dedicated intercept
+ * on SVM, KVM must assume that XSAVES (and thus XRSTORS) is usable by
+ * the guest if the host supports XSAVES and *XSAVE* is exposed to the
+ * guest. Because the guest can execute XSAVES and XRSTORS, i.e. can
+ * indirectly consume XSS, KVM must ensure XSS is zeroed when running
+ * the guest, i.e. must set XSAVES in vCPU capabilities. But to reject
+ * direct XSS reads and writes (to minimize the virtualization hole and
+ * honor userspace's CPUID), KVM needs to check the raw guest CPUID,
+ * not KVM's view of guest capabilities.
+ *
+ * For all other features, guest capabilities are accurate. Expand
+ * this allowlist with extreme vigilance.
+ */
+ BUILD_BUG_ON(x86_feature != X86_FEATURE_XSAVES);
entry = kvm_find_cpuid_entry_index(vcpu, cpuid.function, cpuid.index);
if (!entry)
return NULL;
- return __cpuid_entry_get_reg(entry, cpuid.reg);
-}
-
-static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu,
- unsigned int x86_feature)
-{
- u32 *reg;
-
- reg = guest_cpuid_get_register(vcpu, x86_feature);
+ reg = __cpuid_entry_get_reg(entry, cpuid.reg);
if (!reg)
return false;
return *reg & __feature_bit(x86_feature);
}
-static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu,
- unsigned int x86_feature)
-{
- u32 *reg;
-
- reg = guest_cpuid_get_register(vcpu, x86_feature);
- if (reg)
- *reg &= ~__feature_bit(x86_feature);
-}
-
static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu)
{
return vcpu->arch.is_amd_compatible;
@@ -150,21 +149,6 @@ static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
return x86_stepping(best->eax);
}
-static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
-{
- return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
- guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) ||
- guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) ||
- guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD));
-}
-
-static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
-{
- return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
- guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB) ||
- guest_cpuid_has(vcpu, X86_FEATURE_SBPB));
-}
-
static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
{
return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
@@ -180,7 +164,6 @@ static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
{
unsigned int x86_leaf = __feature_leaf(x86_feature);
- reverse_cpuid_check(x86_leaf);
kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
}
@@ -188,7 +171,6 @@ static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
{
unsigned int x86_leaf = __feature_leaf(x86_feature);
- reverse_cpuid_check(x86_leaf);
kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
}
@@ -196,7 +178,6 @@ static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature)
{
unsigned int x86_leaf = __feature_leaf(x86_feature);
- reverse_cpuid_check(x86_leaf);
return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature);
}
@@ -220,58 +201,61 @@ static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu,
return vcpu->arch.pv_cpuid.features & (1u << kvm_feature);
}
-enum kvm_governed_features {
-#define KVM_GOVERNED_FEATURE(x) KVM_GOVERNED_##x,
-#include "governed_features.h"
- KVM_NR_GOVERNED_FEATURES
-};
-
-static __always_inline int kvm_governed_feature_index(unsigned int x86_feature)
+static __always_inline void guest_cpu_cap_set(struct kvm_vcpu *vcpu,
+ unsigned int x86_feature)
{
- switch (x86_feature) {
-#define KVM_GOVERNED_FEATURE(x) case x: return KVM_GOVERNED_##x;
-#include "governed_features.h"
- default:
- return -1;
- }
-}
+ unsigned int x86_leaf = __feature_leaf(x86_feature);
-static __always_inline bool kvm_is_governed_feature(unsigned int x86_feature)
-{
- return kvm_governed_feature_index(x86_feature) >= 0;
+ vcpu->arch.cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
}
-static __always_inline void kvm_governed_feature_set(struct kvm_vcpu *vcpu,
- unsigned int x86_feature)
+static __always_inline void guest_cpu_cap_clear(struct kvm_vcpu *vcpu,
+ unsigned int x86_feature)
{
- BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
+ unsigned int x86_leaf = __feature_leaf(x86_feature);
- __set_bit(kvm_governed_feature_index(x86_feature),
- vcpu->arch.governed_features.enabled);
+ vcpu->arch.cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
}
-static __always_inline void kvm_governed_feature_check_and_set(struct kvm_vcpu *vcpu,
- unsigned int x86_feature)
+static __always_inline void guest_cpu_cap_change(struct kvm_vcpu *vcpu,
+ unsigned int x86_feature,
+ bool guest_has_cap)
{
- if (kvm_cpu_cap_has(x86_feature) && guest_cpuid_has(vcpu, x86_feature))
- kvm_governed_feature_set(vcpu, x86_feature);
+ if (guest_has_cap)
+ guest_cpu_cap_set(vcpu, x86_feature);
+ else
+ guest_cpu_cap_clear(vcpu, x86_feature);
}
-static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu,
- unsigned int x86_feature)
+static __always_inline bool guest_cpu_cap_has(struct kvm_vcpu *vcpu,
+ unsigned int x86_feature)
{
- BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
+ unsigned int x86_leaf = __feature_leaf(x86_feature);
- return test_bit(kvm_governed_feature_index(x86_feature),
- vcpu->arch.governed_features.enabled);
+ return vcpu->arch.cpu_caps[x86_leaf] & __feature_bit(x86_feature);
}
static inline bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
- if (guest_can_use(vcpu, X86_FEATURE_LAM))
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_LAM))
cr3 &= ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
return kvm_vcpu_is_legal_gpa(vcpu, cr3);
}
+static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
+{
+ return (guest_cpu_cap_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
+ guest_cpu_cap_has(vcpu, X86_FEATURE_AMD_STIBP) ||
+ guest_cpu_cap_has(vcpu, X86_FEATURE_AMD_IBRS) ||
+ guest_cpu_cap_has(vcpu, X86_FEATURE_AMD_SSBD));
+}
+
+static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
+{
+ return (guest_cpu_cap_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
+ guest_cpu_cap_has(vcpu, X86_FEATURE_AMD_IBPB) ||
+ guest_cpu_cap_has(vcpu, X86_FEATURE_SBPB));
+}
+
#endif
diff --git a/arch/x86/kvm/governed_features.h b/arch/x86/kvm/governed_features.h
deleted file mode 100644
index ad463b1ed4e4..000000000000
--- a/arch/x86/kvm/governed_features.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#if !defined(KVM_GOVERNED_FEATURE) || defined(KVM_GOVERNED_X86_FEATURE)
-BUILD_BUG()
-#endif
-
-#define KVM_GOVERNED_X86_FEATURE(x) KVM_GOVERNED_FEATURE(X86_FEATURE_##x)
-
-KVM_GOVERNED_X86_FEATURE(GBPAGES)
-KVM_GOVERNED_X86_FEATURE(XSAVES)
-KVM_GOVERNED_X86_FEATURE(VMX)
-KVM_GOVERNED_X86_FEATURE(NRIPS)
-KVM_GOVERNED_X86_FEATURE(TSCRATEMSR)
-KVM_GOVERNED_X86_FEATURE(V_VMSAVE_VMLOAD)
-KVM_GOVERNED_X86_FEATURE(LBRV)
-KVM_GOVERNED_X86_FEATURE(PAUSEFILTER)
-KVM_GOVERNED_X86_FEATURE(PFTHRESHOLD)
-KVM_GOVERNED_X86_FEATURE(VGIF)
-KVM_GOVERNED_X86_FEATURE(VNMI)
-KVM_GOVERNED_X86_FEATURE(LAM)
-
-#undef KVM_GOVERNED_X86_FEATURE
-#undef KVM_GOVERNED_FEATURE
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 4f0a94346d00..6a6dd5a84f22 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1352,7 +1352,7 @@ static void __kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu)
return;
if (guest_cpuid_has(vcpu, X86_FEATURE_XSAVES) ||
- !guest_cpuid_has(vcpu, X86_FEATURE_XSAVEC))
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVEC))
return;
pr_notice_ratelimited("Booting SMP Windows KVM VM with !XSAVES && XSAVEC. "
diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
index 10495fffb890..73072585e164 100644
--- a/arch/x86/kvm/kvm_emulate.h
+++ b/arch/x86/kvm/kvm_emulate.h
@@ -88,6 +88,8 @@ struct x86_instruction_info {
#define X86EMUL_CMPXCHG_FAILED 4 /* cmpxchg did not see expected value */
#define X86EMUL_IO_NEEDED 5 /* IO is needed to complete emulation */
#define X86EMUL_INTERCEPTED 6 /* Intercepted by nested VMCB/VMCS */
+/* Emulation during event vectoring is unhandleable. */
+#define X86EMUL_UNHANDLEABLE_VECTORING 7
/* x86-specific emulation flags */
#define X86EMUL_F_WRITE BIT(0)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 3c83951c619e..a009c94c26c2 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -598,7 +598,7 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu)
* version first and level-triggered interrupts never get EOIed in
* IOAPIC.
*/
- if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) &&
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_X2APIC) &&
!ioapic_in_kernel(vcpu->kvm))
v |= APIC_LVR_DIRECTED_EOI;
kvm_lapic_set_reg(apic, APIC_LVR, v);
@@ -734,10 +734,7 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic)
static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
{
if (unlikely(apic->apicv_active)) {
- /* need to update RVI */
kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
- kvm_x86_call(hwapic_irr_update)(apic->vcpu,
- apic_find_highest_irr(apic));
} else {
apic->irr_pending = false;
kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
@@ -763,7 +760,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
* just set SVI.
*/
if (unlikely(apic->apicv_active))
- kvm_x86_call(hwapic_isr_update)(vec);
+ kvm_x86_call(hwapic_isr_update)(apic->vcpu, vec);
else {
++apic->isr_count;
BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
@@ -808,7 +805,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
* and must be left alone.
*/
if (unlikely(apic->apicv_active))
- kvm_x86_call(hwapic_isr_update)(apic_find_highest_isr(apic));
+ kvm_x86_call(hwapic_isr_update)(apic->vcpu, apic_find_highest_isr(apic));
else {
--apic->isr_count;
BUG_ON(apic->isr_count < 0);
@@ -816,6 +813,17 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
}
}
+void kvm_apic_update_hwapic_isr(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+ if (WARN_ON_ONCE(!lapic_in_kernel(vcpu)) || !apic->apicv_active)
+ return;
+
+ kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
+}
+EXPORT_SYMBOL_GPL(kvm_apic_update_hwapic_isr);
+
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
{
/* This may race with setting of irr in __apic_accept_irq() and
@@ -2357,7 +2365,7 @@ static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
case APIC_LVTT:
if (!kvm_apic_sw_enabled(apic))
val |= APIC_LVT_MASKED;
- val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
+ val &= (apic_lvt_mask[LVT_TIMER] | apic->lapic_timer.timer_mode_mask);
kvm_lapic_set_reg(apic, APIC_LVTT, val);
apic_update_lvtt(apic);
break;
@@ -2634,7 +2642,7 @@ int kvm_apic_set_base(struct kvm_vcpu *vcpu, u64 value, bool host_initiated)
return 0;
u64 reserved_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu) | 0x2ff |
- (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE);
+ (guest_cpu_cap_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE);
if ((value & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID)
return 1;
@@ -2805,8 +2813,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
apic_update_ppr(apic);
if (apic->apicv_active) {
kvm_x86_call(apicv_post_state_restore)(vcpu);
- kvm_x86_call(hwapic_irr_update)(vcpu, -1);
- kvm_x86_call(hwapic_isr_update)(-1);
+ kvm_x86_call(hwapic_isr_update)(vcpu, -1);
}
vcpu->arch.apic_arb_prio = 0;
@@ -3121,9 +3128,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
kvm_apic_update_apicv(vcpu);
if (apic->apicv_active) {
kvm_x86_call(apicv_post_state_restore)(vcpu);
- kvm_x86_call(hwapic_irr_update)(vcpu,
- apic_find_highest_irr(apic));
- kvm_x86_call(hwapic_isr_update)(apic_find_highest_isr(apic));
+ kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
}
kvm_make_request(KVM_REQ_EVENT, vcpu);
if (ioapic_in_kernel(vcpu->kvm))
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 24add38beaf0..1a8553ebdb42 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -118,6 +118,7 @@ void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high);
int kvm_apic_set_base(struct kvm_vcpu *vcpu, u64 value, bool host_initiated);
int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
+void kvm_apic_update_hwapic_isr(struct kvm_vcpu *vcpu);
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index e9322358678b..050a0e229a4d 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -104,6 +104,15 @@ void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
{
+ /*
+ * Checking root.hpa is sufficient even when KVM has mirror root.
+ * We can have either:
+ * (1) mirror_root_hpa = INVALID_PAGE, root.hpa = INVALID_PAGE
+ * (2) mirror_root_hpa = root, root.hpa = INVALID_PAGE
+ * (3) mirror_root_hpa = root1, root.hpa = root2
+ * We don't ever have:
+ * mirror_root_hpa = INVALID_PAGE, root.hpa = root
+ */
if (likely(vcpu->arch.mmu->root.hpa != INVALID_PAGE))
return 0;
@@ -126,7 +135,7 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
static inline unsigned long kvm_get_active_cr3_lam_bits(struct kvm_vcpu *vcpu)
{
- if (!guest_can_use(vcpu, X86_FEATURE_LAM))
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_LAM))
return 0;
return kvm_read_cr3(vcpu) & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
@@ -287,4 +296,26 @@ static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu,
return gpa;
return translate_nested_gpa(vcpu, gpa, access, exception);
}
+
+static inline bool kvm_has_mirrored_tdp(const struct kvm *kvm)
+{
+ return kvm->arch.vm_type == KVM_X86_TDX_VM;
+}
+
+static inline gfn_t kvm_gfn_direct_bits(const struct kvm *kvm)
+{
+ return kvm->arch.gfn_direct_bits;
+}
+
+static inline bool kvm_is_addr_direct(struct kvm *kvm, gpa_t gpa)
+{
+ gpa_t gpa_direct_bits = gfn_to_gpa(kvm_gfn_direct_bits(kvm));
+
+ return !gpa_direct_bits || (gpa & gpa_direct_bits);
+}
+
+static inline bool kvm_is_gfn_alias(struct kvm *kvm, gfn_t gfn)
+{
+ return gfn & kvm_gfn_direct_bits(kvm);
+}
#endif
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 2401606db260..74c20dbb92da 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -599,6 +599,12 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
if (r)
return r;
+ if (kvm_has_mirrored_tdp(vcpu->kvm)) {
+ r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_external_spt_cache,
+ PT64_ROOT_MAX_LEVEL);
+ if (r)
+ return r;
+ }
r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
PT64_ROOT_MAX_LEVEL);
if (r)
@@ -618,6 +624,7 @@ static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadowed_info_cache);
+ kvm_mmu_free_memory_cache(&vcpu->arch.mmu_external_spt_cache);
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
}
@@ -3656,8 +3663,13 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
unsigned i;
int r;
- if (tdp_mmu_enabled)
- return kvm_tdp_mmu_alloc_root(vcpu);
+ if (tdp_mmu_enabled) {
+ if (kvm_has_mirrored_tdp(vcpu->kvm) &&
+ !VALID_PAGE(mmu->mirror_root_hpa))
+ kvm_tdp_mmu_alloc_root(vcpu, true);
+ kvm_tdp_mmu_alloc_root(vcpu, false);
+ return 0;
+ }
write_lock(&vcpu->kvm->mmu_lock);
r = make_mmu_pages_available(vcpu);
@@ -4379,8 +4391,12 @@ static int kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault, unsigned int access)
{
struct kvm_memory_slot *slot = fault->slot;
+ struct kvm *kvm = vcpu->kvm;
int ret;
+ if (KVM_BUG_ON(kvm_is_gfn_alias(kvm, fault->gfn), kvm))
+ return -EFAULT;
+
/*
* Note that the mmu_invalidate_seq also serves to detect a concurrent
* change in attributes. is_page_fault_stale() will detect an
@@ -4394,7 +4410,7 @@ static int kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu,
* Now that we have a snapshot of mmu_invalidate_seq we can check for a
* private vs. shared mismatch.
*/
- if (fault->is_private != kvm_mem_is_private(vcpu->kvm, fault->gfn)) {
+ if (fault->is_private != kvm_mem_is_private(kvm, fault->gfn)) {
kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
return -EFAULT;
}
@@ -4456,7 +4472,7 @@ static int kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu,
* *guaranteed* to need to retry, i.e. waiting until mmu_lock is held
* to detect retry guarantees the worst case latency for the vCPU.
*/
- if (mmu_invalidate_retry_gfn_unsafe(vcpu->kvm, fault->mmu_seq, fault->gfn))
+ if (mmu_invalidate_retry_gfn_unsafe(kvm, fault->mmu_seq, fault->gfn))
return RET_PF_RETRY;
ret = __kvm_mmu_faultin_pfn(vcpu, fault);
@@ -4476,7 +4492,7 @@ static int kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu,
* overall cost of failing to detect the invalidation until after
* mmu_lock is acquired.
*/
- if (mmu_invalidate_retry_gfn_unsafe(vcpu->kvm, fault->mmu_seq, fault->gfn)) {
+ if (mmu_invalidate_retry_gfn_unsafe(kvm, fault->mmu_seq, fault->gfn)) {
kvm_mmu_finish_page_fault(vcpu, fault, RET_PF_RETRY);
return RET_PF_RETRY;
}
@@ -5022,7 +5038,7 @@ static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
__reset_rsvds_bits_mask(&context->guest_rsvd_check,
vcpu->arch.reserved_gpa_bits,
context->cpu_role.base.level, is_efer_nx(context),
- guest_can_use(vcpu, X86_FEATURE_GBPAGES),
+ guest_cpu_cap_has(vcpu, X86_FEATURE_GBPAGES),
is_cr4_pse(context),
guest_cpuid_is_amd_compatible(vcpu));
}
@@ -5099,7 +5115,7 @@ static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
context->root_role.level,
context->root_role.efer_nx,
- guest_can_use(vcpu, X86_FEATURE_GBPAGES),
+ guest_cpu_cap_has(vcpu, X86_FEATURE_GBPAGES),
is_pse, is_amd);
if (!shadow_me_mask)
@@ -6095,8 +6111,16 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err
else if (r == RET_PF_SPURIOUS)
vcpu->stat.pf_spurious++;
+ /*
+ * None of handle_mmio_page_fault(), kvm_mmu_do_page_fault(), or
+ * kvm_mmu_write_protect_fault() return RET_PF_CONTINUE.
+ * kvm_mmu_do_page_fault() only uses RET_PF_CONTINUE internally to
+ * indicate continuing the page fault handling until to the final
+ * page table mapping phase.
+ */
+ WARN_ON_ONCE(r == RET_PF_CONTINUE);
if (r != RET_PF_EMULATE)
- return 1;
+ return r;
emulate:
return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
@@ -6272,6 +6296,7 @@ static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
mmu->root.hpa = INVALID_PAGE;
mmu->root.pgd = 0;
+ mmu->mirror_root_hpa = INVALID_PAGE;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
@@ -6441,8 +6466,13 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
* write and in the same critical section as making the reload request,
* e.g. before kvm_zap_obsolete_pages() could drop mmu_lock and yield.
*/
- if (tdp_mmu_enabled)
- kvm_tdp_mmu_invalidate_all_roots(kvm);
+ if (tdp_mmu_enabled) {
+ /*
+ * External page tables don't support fast zapping, therefore
+ * their mirrors must be invalidated separately by the caller.
+ */
+ kvm_tdp_mmu_invalidate_roots(kvm, KVM_DIRECT_ROOTS);
+ }
/*
* Notify all vcpus to reload its shadow page table and flush TLB.
@@ -6467,7 +6497,7 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
* lead to use-after-free.
*/
if (tdp_mmu_enabled)
- kvm_tdp_mmu_zap_invalidated_roots(kvm);
+ kvm_tdp_mmu_zap_invalidated_roots(kvm, true);
}
void kvm_mmu_init_vm(struct kvm *kvm)
@@ -7090,6 +7120,19 @@ static void mmu_destroy_caches(void)
kmem_cache_destroy(mmu_page_header_cache);
}
+static void kvm_wake_nx_recovery_thread(struct kvm *kvm)
+{
+ /*
+ * The NX recovery thread is spawned on-demand at the first KVM_RUN and
+ * may not be valid even though the VM is globally visible. Do nothing,
+ * as such a VM can't have any possible NX huge pages.
+ */
+ struct vhost_task *nx_thread = READ_ONCE(kvm->arch.nx_huge_page_recovery_thread);
+
+ if (nx_thread)
+ vhost_task_wake(nx_thread);
+}
+
static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp)
{
if (nx_hugepage_mitigation_hard_disabled)
@@ -7150,7 +7193,7 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
kvm_mmu_zap_all_fast(kvm);
mutex_unlock(&kvm->slots_lock);
- vhost_task_wake(kvm->arch.nx_huge_page_recovery_thread);
+ kvm_wake_nx_recovery_thread(kvm);
}
mutex_unlock(&kvm_lock);
}
@@ -7220,6 +7263,12 @@ out:
void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{
kvm_mmu_unload(vcpu);
+ if (tdp_mmu_enabled) {
+ read_lock(&vcpu->kvm->mmu_lock);
+ mmu_free_root_page(vcpu->kvm, &vcpu->arch.mmu->mirror_root_hpa,
+ NULL);
+ read_unlock(&vcpu->kvm->mmu_lock);
+ }
free_mmu_pages(&vcpu->arch.root_mmu);
free_mmu_pages(&vcpu->arch.guest_mmu);
mmu_free_memory_caches(vcpu);
@@ -7279,7 +7328,7 @@ static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel
mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
- vhost_task_wake(kvm->arch.nx_huge_page_recovery_thread);
+ kvm_wake_nx_recovery_thread(kvm);
mutex_unlock(&kvm_lock);
}
@@ -7411,20 +7460,34 @@ static bool kvm_nx_huge_page_recovery_worker(void *data)
return true;
}
+static void kvm_mmu_start_lpage_recovery(struct once *once)
+{
+ struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once);
+ struct kvm *kvm = container_of(ka, struct kvm, arch);
+ struct vhost_task *nx_thread;
+
+ kvm->arch.nx_huge_page_last = get_jiffies_64();
+ nx_thread = vhost_task_create(kvm_nx_huge_page_recovery_worker,
+ kvm_nx_huge_page_recovery_worker_kill,
+ kvm, "kvm-nx-lpage-recovery");
+
+ if (!nx_thread)
+ return;
+
+ vhost_task_start(nx_thread);
+
+ /* Make the task visible only once it is fully started. */
+ WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread);
+}
+
int kvm_mmu_post_init_vm(struct kvm *kvm)
{
if (nx_hugepage_mitigation_hard_disabled)
return 0;
- kvm->arch.nx_huge_page_last = get_jiffies_64();
- kvm->arch.nx_huge_page_recovery_thread = vhost_task_create(
- kvm_nx_huge_page_recovery_worker, kvm_nx_huge_page_recovery_worker_kill,
- kvm, "kvm-nx-lpage-recovery");
-
+ call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
if (!kvm->arch.nx_huge_page_recovery_thread)
return -ENOMEM;
-
- vhost_task_start(kvm->arch.nx_huge_page_recovery_thread);
return 0;
}
@@ -7452,6 +7515,12 @@ bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
return false;
+ /* Unmap the old attribute page. */
+ if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE)
+ range->attr_filter = KVM_FILTER_SHARED;
+ else
+ range->attr_filter = KVM_FILTER_PRIVATE;
+
return kvm_unmap_gfn_range(kvm, range);
}
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index b00abbe3f6cf..75f00598289d 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -6,6 +6,8 @@
#include <linux/kvm_host.h>
#include <asm/kvm_host.h>
+#include "mmu.h"
+
#ifdef CONFIG_KVM_PROVE_MMU
#define KVM_MMU_WARN_ON(x) WARN_ON_ONCE(x)
#else
@@ -101,7 +103,22 @@ struct kvm_mmu_page {
int root_count;
refcount_t tdp_mmu_root_count;
};
- unsigned int unsync_children;
+ union {
+ /* These two members aren't used for TDP MMU */
+ struct {
+ unsigned int unsync_children;
+ /*
+ * Number of writes since the last time traversal
+ * visited this page.
+ */
+ atomic_t write_flooding_count;
+ };
+ /*
+ * Page table page of external PT.
+ * Passed to TDX module, not accessed by KVM.
+ */
+ void *external_spt;
+ };
union {
struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
tdp_ptep_t ptep;
@@ -124,9 +141,6 @@ struct kvm_mmu_page {
int clear_spte_count;
#endif
- /* Number of writes since the last time traversal visited this page. */
- atomic_t write_flooding_count;
-
#ifdef CONFIG_X86_64
/* Used for freeing the page asynchronously if it is a TDP MMU page. */
struct rcu_head rcu_head;
@@ -145,6 +159,34 @@ static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
return kvm_mmu_role_as_id(sp->role);
}
+static inline bool is_mirror_sp(const struct kvm_mmu_page *sp)
+{
+ return sp->role.is_mirror;
+}
+
+static inline void kvm_mmu_alloc_external_spt(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+{
+ /*
+ * external_spt is allocated for TDX module to hold private EPT mappings,
+ * TDX module will initialize the page by itself.
+ * Therefore, KVM does not need to initialize or access external_spt.
+ * KVM only interacts with sp->spt for private EPT operations.
+ */
+ sp->external_spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_external_spt_cache);
+}
+
+static inline gfn_t kvm_gfn_root_bits(const struct kvm *kvm, const struct kvm_mmu_page *root)
+{
+ /*
+ * Since mirror SPs are used only for TDX, which maps private memory
+ * at its "natural" GFN, no mask needs to be applied to them - and, dually,
+ * we expect that the bits is only used for the shared PT.
+ */
+ if (is_mirror_sp(root))
+ return 0;
+ return kvm_gfn_direct_bits(kvm);
+}
+
static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp)
{
/*
@@ -229,7 +271,12 @@ struct kvm_page_fault {
*/
u8 goal_level;
- /* Shifted addr, or result of guest page table walk if addr is a gva. */
+ /*
+ * Shifted addr, or result of guest page table walk if addr is a gva. In
+ * the case of VM where memslot's can be mapped at multiple GPA aliases
+ * (i.e. TDX), the gfn field does not contain the bit that selects between
+ * the aliases (i.e. the shared bit for TDX).
+ */
gfn_t gfn;
/* The memslot containing gfn. May be NULL. */
@@ -268,9 +315,7 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
* tracepoints via TRACE_DEFINE_ENUM() in mmutrace.h
*
* Note, all values must be greater than or equal to zero so as not to encroach
- * on -errno return values. Somewhat arbitrarily use '0' for CONTINUE, which
- * will allow for efficient machine code when checking for CONTINUE, e.g.
- * "TEST %rax, %rax, JNZ", as all "stop!" values are non-zero.
+ * on -errno return values.
*/
enum {
RET_PF_CONTINUE = 0,
@@ -282,6 +327,14 @@ enum {
RET_PF_SPURIOUS,
};
+/*
+ * Define RET_PF_CONTINUE as 0 to allow for
+ * - efficient machine code when checking for CONTINUE, e.g.
+ * "TEST %rax, %rax, JNZ", as all "stop!" values are non-zero,
+ * - kvm_mmu_do_page_fault() to return other RET_PF_* as a positive value.
+ */
+static_assert(RET_PF_CONTINUE == 0);
+
static inline void kvm_mmu_prepare_memory_fault_exit(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault)
{
@@ -317,10 +370,19 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int r;
if (vcpu->arch.mmu->root_role.direct) {
- fault.gfn = fault.addr >> PAGE_SHIFT;
+ /*
+ * Things like memslots don't understand the concept of a shared
+ * bit. Strip it so that the GFN can be used like normal, and the
+ * fault.addr can be used when the shared bit is needed.
+ */
+ fault.gfn = gpa_to_gfn(fault.addr) & ~kvm_gfn_direct_bits(vcpu->kvm);
fault.slot = kvm_vcpu_gfn_to_memslot(vcpu, fault.gfn);
}
+ /*
+ * With retpoline being active an indirect call is rather expensive,
+ * so do a direct call in the most common case.
+ */
if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && fault.is_tdp)
r = kvm_tdp_page_fault(vcpu, &fault);
else
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index af10bc0380a3..59746854c0af 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -276,6 +276,11 @@ static inline struct kvm_mmu_page *root_to_sp(hpa_t root)
return spte_to_child_sp(root);
}
+static inline bool is_mirror_sptep(tdp_ptep_t sptep)
+{
+ return is_mirror_sp(sptep_to_sp(rcu_dereference(sptep)));
+}
+
static inline bool is_mmio_spte(struct kvm *kvm, u64 spte)
{
return (spte & shadow_mmio_mask) == kvm->arch.shadow_mmio_value &&
diff --git a/arch/x86/kvm/mmu/tdp_iter.c b/arch/x86/kvm/mmu/tdp_iter.c
index 04c247bfe318..9e17bfa80901 100644
--- a/arch/x86/kvm/mmu/tdp_iter.c
+++ b/arch/x86/kvm/mmu/tdp_iter.c
@@ -12,7 +12,7 @@
static void tdp_iter_refresh_sptep(struct tdp_iter *iter)
{
iter->sptep = iter->pt_path[iter->level - 1] +
- SPTE_INDEX(iter->gfn << PAGE_SHIFT, iter->level);
+ SPTE_INDEX((iter->gfn | iter->gfn_bits) << PAGE_SHIFT, iter->level);
iter->old_spte = kvm_tdp_mmu_read_spte(iter->sptep);
}
@@ -37,15 +37,17 @@ void tdp_iter_restart(struct tdp_iter *iter)
* rooted at root_pt, starting with the walk to translate next_last_level_gfn.
*/
void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root,
- int min_level, gfn_t next_last_level_gfn)
+ int min_level, gfn_t next_last_level_gfn, gfn_t gfn_bits)
{
if (WARN_ON_ONCE(!root || (root->role.level < 1) ||
- (root->role.level > PT64_ROOT_MAX_LEVEL))) {
+ (root->role.level > PT64_ROOT_MAX_LEVEL) ||
+ (gfn_bits && next_last_level_gfn >= gfn_bits))) {
iter->valid = false;
return;
}
iter->next_last_level_gfn = next_last_level_gfn;
+ iter->gfn_bits = gfn_bits;
iter->root_level = root->role.level;
iter->min_level = min_level;
iter->pt_path[iter->root_level - 1] = (tdp_ptep_t)root->spt;
@@ -113,7 +115,7 @@ static bool try_step_side(struct tdp_iter *iter)
* Check if the iterator is already at the end of the current page
* table.
*/
- if (SPTE_INDEX(iter->gfn << PAGE_SHIFT, iter->level) ==
+ if (SPTE_INDEX((iter->gfn | iter->gfn_bits) << PAGE_SHIFT, iter->level) ==
(SPTE_ENT_PER_PAGE - 1))
return false;
diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h
index 2880fd392e0c..047b78333653 100644
--- a/arch/x86/kvm/mmu/tdp_iter.h
+++ b/arch/x86/kvm/mmu/tdp_iter.h
@@ -93,8 +93,10 @@ struct tdp_iter {
tdp_ptep_t pt_path[PT64_ROOT_MAX_LEVEL];
/* A pointer to the current SPTE */
tdp_ptep_t sptep;
- /* The lowest GFN mapped by the current SPTE */
+ /* The lowest GFN (mask bits excluded) mapped by the current SPTE */
gfn_t gfn;
+ /* Mask applied to convert the GFN to the mapping GPA */
+ gfn_t gfn_bits;
/* The level of the root page given to the iterator */
int root_level;
/* The lowest level the iterator should traverse to */
@@ -122,18 +124,23 @@ struct tdp_iter {
* Iterates over every SPTE mapping the GFN range [start, end) in a
* preorder traversal.
*/
-#define for_each_tdp_pte_min_level(iter, root, min_level, start, end) \
- for (tdp_iter_start(&iter, root, min_level, start); \
- iter.valid && iter.gfn < end; \
+#define for_each_tdp_pte_min_level(iter, kvm, root, min_level, start, end) \
+ for (tdp_iter_start(&iter, root, min_level, start, kvm_gfn_root_bits(kvm, root)); \
+ iter.valid && iter.gfn < end; \
tdp_iter_next(&iter))
-#define for_each_tdp_pte(iter, root, start, end) \
- for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end)
+#define for_each_tdp_pte_min_level_all(iter, root, min_level) \
+ for (tdp_iter_start(&iter, root, min_level, 0, 0); \
+ iter.valid && iter.gfn < tdp_mmu_max_gfn_exclusive(); \
+ tdp_iter_next(&iter))
+
+#define for_each_tdp_pte(iter, kvm, root, start, end) \
+ for_each_tdp_pte_min_level(iter, kvm, root, PG_LEVEL_4K, start, end)
tdp_ptep_t spte_to_child_pt(u64 pte, int level);
void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root,
- int min_level, gfn_t next_last_level_gfn);
+ int min_level, gfn_t next_last_level_gfn, gfn_t gfn_bits);
void tdp_iter_next(struct tdp_iter *iter);
void tdp_iter_restart(struct tdp_iter *iter);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 2f15e0e33903..046b6ba31197 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -37,8 +37,8 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
* for zapping and thus puts the TDP MMU's reference to each root, i.e.
* ultimately frees all roots.
*/
- kvm_tdp_mmu_invalidate_all_roots(kvm);
- kvm_tdp_mmu_zap_invalidated_roots(kvm);
+ kvm_tdp_mmu_invalidate_roots(kvm, KVM_VALID_ROOTS);
+ kvm_tdp_mmu_zap_invalidated_roots(kvm, false);
WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
@@ -53,6 +53,7 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
{
+ free_page((unsigned long)sp->external_spt);
free_page((unsigned long)sp->spt);
kmem_cache_free(mmu_page_header_cache, sp);
}
@@ -91,19 +92,33 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
}
+static bool tdp_mmu_root_match(struct kvm_mmu_page *root,
+ enum kvm_tdp_mmu_root_types types)
+{
+ if (WARN_ON_ONCE(!(types & KVM_VALID_ROOTS)))
+ return false;
+
+ if (root->role.invalid && !(types & KVM_INVALID_ROOTS))
+ return false;
+
+ if (likely(!is_mirror_sp(root)))
+ return types & KVM_DIRECT_ROOTS;
+ return types & KVM_MIRROR_ROOTS;
+}
+
/*
* Returns the next root after @prev_root (or the first root if @prev_root is
- * NULL). A reference to the returned root is acquired, and the reference to
- * @prev_root is released (the caller obviously must hold a reference to
- * @prev_root if it's non-NULL).
+ * NULL) that matches with @types. A reference to the returned root is
+ * acquired, and the reference to @prev_root is released (the caller obviously
+ * must hold a reference to @prev_root if it's non-NULL).
*
- * If @only_valid is true, invalid roots are skipped.
+ * Roots that doesn't match with @types are skipped.
*
* Returns NULL if the end of tdp_mmu_roots was reached.
*/
static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
struct kvm_mmu_page *prev_root,
- bool only_valid)
+ enum kvm_tdp_mmu_root_types types)
{
struct kvm_mmu_page *next_root;
@@ -124,7 +139,7 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
typeof(*next_root), link);
while (next_root) {
- if ((!only_valid || !next_root->role.invalid) &&
+ if (tdp_mmu_root_match(next_root, types) &&
kvm_tdp_mmu_get_root(next_root))
break;
@@ -149,20 +164,20 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
* If shared is set, this function is operating under the MMU lock in read
* mode.
*/
-#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _only_valid) \
- for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid); \
+#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _types) \
+ for (_root = tdp_mmu_next_root(_kvm, NULL, _types); \
({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \
- _root = tdp_mmu_next_root(_kvm, _root, _only_valid)) \
+ _root = tdp_mmu_next_root(_kvm, _root, _types)) \
if (_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) { \
} else
#define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id) \
- __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, true)
+ __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, KVM_VALID_ROOTS)
#define for_each_tdp_mmu_root_yield_safe(_kvm, _root) \
- for (_root = tdp_mmu_next_root(_kvm, NULL, false); \
+ for (_root = tdp_mmu_next_root(_kvm, NULL, KVM_ALL_ROOTS); \
({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \
- _root = tdp_mmu_next_root(_kvm, _root, false))
+ _root = tdp_mmu_next_root(_kvm, _root, KVM_ALL_ROOTS))
/*
* Iterate over all TDP MMU roots. Requires that mmu_lock be held for write,
@@ -171,18 +186,15 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
* Holding mmu_lock for write obviates the need for RCU protection as the list
* is guaranteed to be stable.
*/
-#define __for_each_tdp_mmu_root(_kvm, _root, _as_id, _only_valid) \
+#define __for_each_tdp_mmu_root(_kvm, _root, _as_id, _types) \
list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) \
if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) && \
((_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) || \
- ((_only_valid) && (_root)->role.invalid))) { \
+ !tdp_mmu_root_match((_root), (_types)))) { \
} else
-#define for_each_tdp_mmu_root(_kvm, _root, _as_id) \
- __for_each_tdp_mmu_root(_kvm, _root, _as_id, false)
-
#define for_each_valid_tdp_mmu_root(_kvm, _root, _as_id) \
- __for_each_tdp_mmu_root(_kvm, _root, _as_id, true)
+ __for_each_tdp_mmu_root(_kvm, _root, _as_id, KVM_VALID_ROOTS)
static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
{
@@ -223,7 +235,7 @@ static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp,
tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role);
}
-int kvm_tdp_mmu_alloc_root(struct kvm_vcpu *vcpu)
+void kvm_tdp_mmu_alloc_root(struct kvm_vcpu *vcpu, bool mirror)
{
struct kvm_mmu *mmu = vcpu->arch.mmu;
union kvm_mmu_page_role role = mmu->root_role;
@@ -231,6 +243,9 @@ int kvm_tdp_mmu_alloc_root(struct kvm_vcpu *vcpu)
struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_page *root;
+ if (mirror)
+ role.is_mirror = true;
+
/*
* Check for an existing root before acquiring the pages lock to avoid
* unnecessary serialization if multiple vCPUs are loading a new root.
@@ -282,9 +297,12 @@ out_read_unlock:
* and actually consuming the root if it's invalidated after dropping
* mmu_lock, and the root can't be freed as this vCPU holds a reference.
*/
- mmu->root.hpa = __pa(root->spt);
- mmu->root.pgd = 0;
- return 0;
+ if (mirror) {
+ mmu->mirror_root_hpa = __pa(root->spt);
+ } else {
+ mmu->root.hpa = __pa(root->spt);
+ mmu->root.pgd = 0;
+ }
}
static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
@@ -322,6 +340,29 @@ static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
}
+static void remove_external_spte(struct kvm *kvm, gfn_t gfn, u64 old_spte,
+ int level)
+{
+ kvm_pfn_t old_pfn = spte_to_pfn(old_spte);
+ int ret;
+
+ /*
+ * External (TDX) SPTEs are limited to PG_LEVEL_4K, and external
+ * PTs are removed in a special order, involving free_external_spt().
+ * But remove_external_spte() will be called on non-leaf PTEs via
+ * __tdp_mmu_zap_root(), so avoid the error the former would return
+ * in this case.
+ */
+ if (!is_last_spte(old_spte, level))
+ return;
+
+ /* Zapping leaf spte is allowed only when write lock is held. */
+ lockdep_assert_held_write(&kvm->mmu_lock);
+ /* Because write lock is held, operation should success. */
+ ret = static_call(kvm_x86_remove_external_spte)(kvm, gfn, level, old_pfn);
+ KVM_BUG_ON(ret, kvm);
+}
+
/**
* handle_removed_pt() - handle a page table removed from the TDP structure
*
@@ -417,11 +458,81 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
}
handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
old_spte, FROZEN_SPTE, level, shared);
+
+ if (is_mirror_sp(sp)) {
+ KVM_BUG_ON(shared, kvm);
+ remove_external_spte(kvm, gfn, old_spte, level);
+ }
+ }
+
+ if (is_mirror_sp(sp) &&
+ WARN_ON(static_call(kvm_x86_free_external_spt)(kvm, base_gfn, sp->role.level,
+ sp->external_spt))) {
+ /*
+ * Failed to free page table page in mirror page table and
+ * there is nothing to do further.
+ * Intentionally leak the page to prevent the kernel from
+ * accessing the encrypted page.
+ */
+ sp->external_spt = NULL;
}
call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
}
+static void *get_external_spt(gfn_t gfn, u64 new_spte, int level)
+{
+ if (is_shadow_present_pte(new_spte) && !is_last_spte(new_spte, level)) {
+ struct kvm_mmu_page *sp = spte_to_child_sp(new_spte);
+
+ WARN_ON_ONCE(sp->role.level + 1 != level);
+ WARN_ON_ONCE(sp->gfn != gfn);
+ return sp->external_spt;
+ }
+
+ return NULL;
+}
+
+static int __must_check set_external_spte_present(struct kvm *kvm, tdp_ptep_t sptep,
+ gfn_t gfn, u64 old_spte,
+ u64 new_spte, int level)
+{
+ bool was_present = is_shadow_present_pte(old_spte);
+ bool is_present = is_shadow_present_pte(new_spte);
+ bool is_leaf = is_present && is_last_spte(new_spte, level);
+ kvm_pfn_t new_pfn = spte_to_pfn(new_spte);
+ int ret = 0;
+
+ KVM_BUG_ON(was_present, kvm);
+
+ lockdep_assert_held(&kvm->mmu_lock);
+ /*
+ * We need to lock out other updates to the SPTE until the external
+ * page table has been modified. Use FROZEN_SPTE similar to
+ * the zapping case.
+ */
+ if (!try_cmpxchg64(rcu_dereference(sptep), &old_spte, FROZEN_SPTE))
+ return -EBUSY;
+
+ /*
+ * Use different call to either set up middle level
+ * external page table, or leaf.
+ */
+ if (is_leaf) {
+ ret = static_call(kvm_x86_set_external_spte)(kvm, gfn, level, new_pfn);
+ } else {
+ void *external_spt = get_external_spt(gfn, new_spte, level);
+
+ KVM_BUG_ON(!external_spt, kvm);
+ ret = static_call(kvm_x86_link_external_spt)(kvm, gfn, level, external_spt);
+ }
+ if (ret)
+ __kvm_tdp_mmu_write_spte(sptep, old_spte);
+ else
+ __kvm_tdp_mmu_write_spte(sptep, new_spte);
+ return ret;
+}
+
/**
* handle_changed_spte - handle bookkeeping associated with an SPTE change
* @kvm: kvm instance
@@ -522,11 +633,10 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared);
}
-static inline int __must_check __tdp_mmu_set_spte_atomic(struct tdp_iter *iter,
+static inline int __must_check __tdp_mmu_set_spte_atomic(struct kvm *kvm,
+ struct tdp_iter *iter,
u64 new_spte)
{
- u64 *sptep = rcu_dereference(iter->sptep);
-
/*
* The caller is responsible for ensuring the old SPTE is not a FROZEN
* SPTE. KVM should never attempt to zap or manipulate a FROZEN SPTE,
@@ -535,15 +645,34 @@ static inline int __must_check __tdp_mmu_set_spte_atomic(struct tdp_iter *iter,
*/
WARN_ON_ONCE(iter->yielded || is_frozen_spte(iter->old_spte));
- /*
- * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
- * does not hold the mmu_lock. On failure, i.e. if a different logical
- * CPU modified the SPTE, try_cmpxchg64() updates iter->old_spte with
- * the current value, so the caller operates on fresh data, e.g. if it
- * retries tdp_mmu_set_spte_atomic()
- */
- if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte))
- return -EBUSY;
+ if (is_mirror_sptep(iter->sptep) && !is_frozen_spte(new_spte)) {
+ int ret;
+
+ /*
+ * Users of atomic zapping don't operate on mirror roots,
+ * so don't handle it and bug the VM if it's seen.
+ */
+ if (KVM_BUG_ON(!is_shadow_present_pte(new_spte), kvm))
+ return -EBUSY;
+
+ ret = set_external_spte_present(kvm, iter->sptep, iter->gfn,
+ iter->old_spte, new_spte, iter->level);
+ if (ret)
+ return ret;
+ } else {
+ u64 *sptep = rcu_dereference(iter->sptep);
+
+ /*
+ * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs
+ * and does not hold the mmu_lock. On failure, i.e. if a
+ * different logical CPU modified the SPTE, try_cmpxchg64()
+ * updates iter->old_spte with the current value, so the caller
+ * operates on fresh data, e.g. if it retries
+ * tdp_mmu_set_spte_atomic()
+ */
+ if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte))
+ return -EBUSY;
+ }
return 0;
}
@@ -573,7 +702,7 @@ static inline int __must_check tdp_mmu_set_spte_atomic(struct kvm *kvm,
lockdep_assert_held_read(&kvm->mmu_lock);
- ret = __tdp_mmu_set_spte_atomic(iter, new_spte);
+ ret = __tdp_mmu_set_spte_atomic(kvm, iter, new_spte);
if (ret)
return ret;
@@ -613,6 +742,16 @@ static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level);
handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);
+
+ /*
+ * Users that do non-atomic setting of PTEs don't operate on mirror
+ * roots, so don't handle it and bug the VM if it's seen.
+ */
+ if (is_mirror_sptep(sptep)) {
+ KVM_BUG_ON(is_shadow_present_pte(new_spte), kvm);
+ remove_external_spte(kvm, gfn, old_spte, level);
+ }
+
return old_spte;
}
@@ -625,18 +764,18 @@ static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter,
iter->gfn, iter->level);
}
-#define tdp_root_for_each_pte(_iter, _root, _start, _end) \
- for_each_tdp_pte(_iter, _root, _start, _end)
+#define tdp_root_for_each_pte(_iter, _kvm, _root, _start, _end) \
+ for_each_tdp_pte(_iter, _kvm, _root, _start, _end)
-#define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \
- tdp_root_for_each_pte(_iter, _root, _start, _end) \
+#define tdp_root_for_each_leaf_pte(_iter, _kvm, _root, _start, _end) \
+ tdp_root_for_each_pte(_iter, _kvm, _root, _start, _end) \
if (!is_shadow_present_pte(_iter.old_spte) || \
!is_last_spte(_iter.old_spte, _iter.level)) \
continue; \
else
-#define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \
- for_each_tdp_pte(_iter, root_to_sp(_mmu->root.hpa), _start, _end)
+#define tdp_mmu_for_each_pte(_iter, _kvm, _root, _start, _end) \
+ for_each_tdp_pte(_iter, _kvm, _root, _start, _end)
static inline bool __must_check tdp_mmu_iter_need_resched(struct kvm *kvm,
struct tdp_iter *iter)
@@ -705,10 +844,7 @@ static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
{
struct tdp_iter iter;
- gfn_t end = tdp_mmu_max_gfn_exclusive();
- gfn_t start = 0;
-
- for_each_tdp_pte_min_level(iter, root, zap_level, start, end) {
+ for_each_tdp_pte_min_level_all(iter, root, zap_level) {
retry:
if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
continue;
@@ -812,7 +948,7 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
rcu_read_lock();
- for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) {
+ for_each_tdp_pte_min_level(iter, kvm, root, PG_LEVEL_4K, start, end) {
if (can_yield &&
tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
flush = false;
@@ -863,19 +999,21 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm)
struct kvm_mmu_page *root;
/*
- * Zap all roots, including invalid roots, as all SPTEs must be dropped
- * before returning to the caller. Zap directly even if the root is
- * also being zapped by a worker. Walking zapped top-level SPTEs isn't
- * all that expensive and mmu_lock is already held, which means the
- * worker has yielded, i.e. flushing the work instead of zapping here
- * isn't guaranteed to be any faster.
+ * Zap all direct roots, including invalid direct roots, as all direct
+ * SPTEs must be dropped before returning to the caller. For TDX, mirror
+ * roots don't need handling in response to the mmu notifier (the caller).
+ *
+ * Zap directly even if the root is also being zapped by a concurrent
+ * "fast zap". Walking zapped top-level SPTEs isn't all that expensive
+ * and mmu_lock is already held, which means the other thread has yielded.
*
* A TLB flush is unnecessary, KVM zaps everything if and only the VM
* is being destroyed or the userspace VMM has exited. In both cases,
* KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
*/
lockdep_assert_held_write(&kvm->mmu_lock);
- for_each_tdp_mmu_root_yield_safe(kvm, root)
+ __for_each_tdp_mmu_root_yield_safe(kvm, root, -1,
+ KVM_DIRECT_ROOTS | KVM_INVALID_ROOTS)
tdp_mmu_zap_root(kvm, root, false);
}
@@ -883,11 +1021,14 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm)
* Zap all invalidated roots to ensure all SPTEs are dropped before the "fast
* zap" completes.
*/
-void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
+void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm, bool shared)
{
struct kvm_mmu_page *root;
- read_lock(&kvm->mmu_lock);
+ if (shared)
+ read_lock(&kvm->mmu_lock);
+ else
+ write_lock(&kvm->mmu_lock);
for_each_tdp_mmu_root_yield_safe(kvm, root) {
if (!root->tdp_mmu_scheduled_root_to_zap)
@@ -905,7 +1046,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
* that may be zapped, as such entries are associated with the
* ASID on both VMX and SVM.
*/
- tdp_mmu_zap_root(kvm, root, true);
+ tdp_mmu_zap_root(kvm, root, shared);
/*
* The referenced needs to be put *after* zapping the root, as
@@ -915,7 +1056,10 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
kvm_tdp_mmu_put_root(kvm, root);
}
- read_unlock(&kvm->mmu_lock);
+ if (shared)
+ read_unlock(&kvm->mmu_lock);
+ else
+ write_unlock(&kvm->mmu_lock);
}
/*
@@ -928,11 +1072,19 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
* Note, kvm_tdp_mmu_zap_invalidated_roots() is gifted the TDP MMU's reference.
* See kvm_tdp_mmu_alloc_root().
*/
-void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
+void kvm_tdp_mmu_invalidate_roots(struct kvm *kvm,
+ enum kvm_tdp_mmu_root_types root_types)
{
struct kvm_mmu_page *root;
/*
+ * Invalidating invalid roots doesn't make sense, prevent developers from
+ * having to think about it.
+ */
+ if (WARN_ON_ONCE(root_types & KVM_INVALID_ROOTS))
+ root_types &= ~KVM_INVALID_ROOTS;
+
+ /*
* mmu_lock must be held for write to ensure that a root doesn't become
* invalid while there are active readers (invalidating a root while
* there are active readers may or may not be problematic in practice,
@@ -953,6 +1105,9 @@ void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
* or get/put references to roots.
*/
list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
+ if (!tdp_mmu_root_match(root, root_types))
+ continue;
+
/*
* Note, invalid roots can outlive a memslot update! Invalid
* roots must be *zapped* before the memslot update completes,
@@ -1068,7 +1223,7 @@ static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
*/
int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{
- struct kvm_mmu *mmu = vcpu->arch.mmu;
+ struct kvm_mmu_page *root = tdp_mmu_get_root_for_fault(vcpu, fault);
struct kvm *kvm = vcpu->kvm;
struct tdp_iter iter;
struct kvm_mmu_page *sp;
@@ -1080,7 +1235,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
rcu_read_lock();
- tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
+ tdp_mmu_for_each_pte(iter, kvm, root, fault->gfn, fault->gfn + 1) {
int r;
if (fault->nx_huge_page_workaround_enabled)
@@ -1107,13 +1262,18 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
*/
sp = tdp_mmu_alloc_sp(vcpu);
tdp_mmu_init_child_sp(sp, &iter);
+ if (is_mirror_sp(sp))
+ kvm_mmu_alloc_external_spt(vcpu, sp);
sp->nx_huge_page_disallowed = fault->huge_page_disallowed;
- if (is_shadow_present_pte(iter.old_spte))
+ if (is_shadow_present_pte(iter.old_spte)) {
+ /* Don't support large page for mirrored roots (TDX) */
+ KVM_BUG_ON(is_mirror_sptep(iter.sptep), vcpu->kvm);
r = tdp_mmu_split_huge_page(kvm, &iter, sp, true);
- else
+ } else {
r = tdp_mmu_link_sp(kvm, &iter, sp, true);
+ }
/*
* Force the guest to retry if installing an upper level SPTE
@@ -1148,12 +1308,16 @@ retry:
return ret;
}
+/* Used by mmu notifier via kvm_unmap_gfn_range() */
bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
bool flush)
{
+ enum kvm_tdp_mmu_root_types types;
struct kvm_mmu_page *root;
- __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false)
+ types = kvm_gfn_range_filter_to_root_types(kvm, range->attr_filter) | KVM_INVALID_ROOTS;
+
+ __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, types)
flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,
range->may_block, flush);
@@ -1193,20 +1357,24 @@ static bool __kvm_tdp_mmu_age_gfn_range(struct kvm *kvm,
struct kvm_gfn_range *range,
bool test_only)
{
+ enum kvm_tdp_mmu_root_types types;
struct kvm_mmu_page *root;
struct tdp_iter iter;
bool ret = false;
+ types = kvm_gfn_range_filter_to_root_types(kvm, range->attr_filter);
+
/*
* Don't support rescheduling, none of the MMU notifiers that funnel
* into this helper allow blocking; it'd be dead, wasteful code. Note,
* this helper must NOT be used to unmap GFNs, as it processes only
* valid roots!
*/
- for_each_valid_tdp_mmu_root(kvm, root, range->slot->as_id) {
+ WARN_ON(types & ~KVM_VALID_ROOTS);
+ __for_each_tdp_mmu_root(kvm, root, range->slot->as_id, types) {
guard(rcu)();
- tdp_root_for_each_leaf_pte(iter, root, range->start, range->end) {
+ tdp_root_for_each_leaf_pte(iter, kvm, root, range->start, range->end) {
if (!is_accessed_spte(iter.old_spte))
continue;
@@ -1247,7 +1415,7 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
- for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
+ for_each_tdp_pte_min_level(iter, kvm, root, min_level, start, end) {
retry:
if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
continue;
@@ -1366,7 +1534,7 @@ static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
* level above the target level (e.g. splitting a 1GB to 512 2MB pages,
* and then splitting each of those to 512 4KB pages).
*/
- for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) {
+ for_each_tdp_pte_min_level(iter, kvm, root, target_level + 1, start, end) {
retry:
if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
continue;
@@ -1464,7 +1632,7 @@ static void clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
rcu_read_lock();
- tdp_root_for_each_pte(iter, root, start, end) {
+ tdp_root_for_each_pte(iter, kvm, root, start, end) {
retry:
if (!is_shadow_present_pte(iter.old_spte) ||
!is_last_spte(iter.old_spte, iter.level))
@@ -1512,7 +1680,7 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
rcu_read_lock();
- tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
+ tdp_root_for_each_leaf_pte(iter, kvm, root, gfn + __ffs(mask),
gfn + BITS_PER_LONG) {
if (!mask)
break;
@@ -1566,7 +1734,7 @@ static int tdp_mmu_make_huge_spte(struct kvm *kvm,
gfn_t end = start + KVM_PAGES_PER_HPAGE(parent->level);
struct tdp_iter iter;
- tdp_root_for_each_leaf_pte(iter, root, start, end) {
+ tdp_root_for_each_leaf_pte(iter, kvm, root, start, end) {
/*
* Use the parent iterator when checking for forward progress so
* that KVM doesn't get stuck continuously trying to yield (i.e.
@@ -1600,7 +1768,7 @@ static void recover_huge_pages_range(struct kvm *kvm,
rcu_read_lock();
- for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) {
+ for_each_tdp_pte_min_level(iter, kvm, root, PG_LEVEL_2M, start, end) {
retry:
if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {
flush = false;
@@ -1681,7 +1849,7 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
rcu_read_lock();
- for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) {
+ for_each_tdp_pte_min_level(iter, kvm, root, min_level, gfn, gfn + 1) {
if (!is_shadow_present_pte(iter.old_spte) ||
!is_last_spte(iter.old_spte, iter.level))
continue;
@@ -1729,14 +1897,14 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
int *root_level)
{
+ struct kvm_mmu_page *root = root_to_sp(vcpu->arch.mmu->root.hpa);
struct tdp_iter iter;
- struct kvm_mmu *mmu = vcpu->arch.mmu;
gfn_t gfn = addr >> PAGE_SHIFT;
int leaf = -1;
*root_level = vcpu->arch.mmu->root_role.level;
- tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
+ tdp_mmu_for_each_pte(iter, vcpu->kvm, root, gfn, gfn + 1) {
leaf = iter.level;
sptes[leaf] = iter.old_spte;
}
@@ -1758,11 +1926,12 @@ int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gfn_t gfn,
u64 *spte)
{
+ /* Fast pf is not supported for mirrored roots */
+ struct kvm_mmu_page *root = tdp_mmu_get_root(vcpu, KVM_DIRECT_ROOTS);
struct tdp_iter iter;
- struct kvm_mmu *mmu = vcpu->arch.mmu;
tdp_ptep_t sptep = NULL;
- tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
+ tdp_mmu_for_each_pte(iter, vcpu->kvm, root, gfn, gfn + 1) {
*spte = iter.old_spte;
sptep = iter.sptep;
}
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index f03ca0dd13d9..52acf99d40a0 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -10,7 +10,7 @@
void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
-int kvm_tdp_mmu_alloc_root(struct kvm_vcpu *vcpu);
+void kvm_tdp_mmu_alloc_root(struct kvm_vcpu *vcpu, bool private);
__must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
{
@@ -19,11 +19,56 @@ __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root);
+enum kvm_tdp_mmu_root_types {
+ KVM_INVALID_ROOTS = BIT(0),
+ KVM_DIRECT_ROOTS = BIT(1),
+ KVM_MIRROR_ROOTS = BIT(2),
+ KVM_VALID_ROOTS = KVM_DIRECT_ROOTS | KVM_MIRROR_ROOTS,
+ KVM_ALL_ROOTS = KVM_VALID_ROOTS | KVM_INVALID_ROOTS,
+};
+
+static inline enum kvm_tdp_mmu_root_types kvm_gfn_range_filter_to_root_types(struct kvm *kvm,
+ enum kvm_gfn_range_filter process)
+{
+ enum kvm_tdp_mmu_root_types ret = 0;
+
+ if (!kvm_has_mirrored_tdp(kvm))
+ return KVM_DIRECT_ROOTS;
+
+ if (process & KVM_FILTER_PRIVATE)
+ ret |= KVM_MIRROR_ROOTS;
+ if (process & KVM_FILTER_SHARED)
+ ret |= KVM_DIRECT_ROOTS;
+
+ WARN_ON_ONCE(!ret);
+
+ return ret;
+}
+
+static inline struct kvm_mmu_page *tdp_mmu_get_root_for_fault(struct kvm_vcpu *vcpu,
+ struct kvm_page_fault *fault)
+{
+ if (unlikely(!kvm_is_addr_direct(vcpu->kvm, fault->addr)))
+ return root_to_sp(vcpu->arch.mmu->mirror_root_hpa);
+
+ return root_to_sp(vcpu->arch.mmu->root.hpa);
+}
+
+static inline struct kvm_mmu_page *tdp_mmu_get_root(struct kvm_vcpu *vcpu,
+ enum kvm_tdp_mmu_root_types type)
+{
+ if (unlikely(type == KVM_MIRROR_ROOTS))
+ return root_to_sp(vcpu->arch.mmu->mirror_root_hpa);
+
+ return root_to_sp(vcpu->arch.mmu->root.hpa);
+}
+
bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
void kvm_tdp_mmu_zap_all(struct kvm *kvm);
-void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
-void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm);
+void kvm_tdp_mmu_invalidate_roots(struct kvm *kvm,
+ enum kvm_tdp_mmu_root_types root_types);
+void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm, bool shared);
int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 47a46283c866..75e9cfc689f8 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -797,7 +797,6 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
memset(pmu, 0, sizeof(*pmu));
kvm_pmu_call(init)(vcpu);
- kvm_pmu_refresh(vcpu);
}
/* Release perf_events for vPMCs that have been unused for a full time slice. */
diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h
index e46220ece83c..fde0ae986003 100644
--- a/arch/x86/kvm/reverse_cpuid.h
+++ b/arch/x86/kvm/reverse_cpuid.h
@@ -7,23 +7,6 @@
#include <asm/cpufeatures.h>
/*
- * Hardware-defined CPUID leafs that are either scattered by the kernel or are
- * unknown to the kernel, but need to be directly used by KVM. Note, these
- * word values conflict with the kernel's "bug" caps, but KVM doesn't use those.
- */
-enum kvm_only_cpuid_leafs {
- CPUID_12_EAX = NCAPINTS,
- CPUID_7_1_EDX,
- CPUID_8000_0007_EDX,
- CPUID_8000_0022_EAX,
- CPUID_7_2_EDX,
- CPUID_24_0_EBX,
- NR_KVM_CPU_CAPS,
-
- NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
-};
-
-/*
* Define a KVM-only feature flag.
*
* For features that are scattered by cpufeatures.h, __feature_translate() also
@@ -145,7 +128,10 @@ static __always_inline u32 __feature_translate(int x86_feature)
static __always_inline u32 __feature_leaf(int x86_feature)
{
- return __feature_translate(x86_feature) / 32;
+ u32 x86_leaf = __feature_translate(x86_feature) / 32;
+
+ reverse_cpuid_check(x86_leaf);
+ return x86_leaf;
}
/*
@@ -168,7 +154,6 @@ static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_featu
{
unsigned int x86_leaf = __feature_leaf(x86_feature);
- reverse_cpuid_check(x86_leaf);
return reverse_cpuid[x86_leaf];
}
diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c
index 85241c0c7f56..e0ab7df27b66 100644
--- a/arch/x86/kvm/smm.c
+++ b/arch/x86/kvm/smm.c
@@ -283,7 +283,7 @@ void enter_smm(struct kvm_vcpu *vcpu)
memset(smram.bytes, 0, sizeof(smram.bytes));
#ifdef CONFIG_X86_64
- if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_LM))
enter_smm_save_state_64(vcpu, &smram.smram64);
else
#endif
@@ -353,7 +353,7 @@ void enter_smm(struct kvm_vcpu *vcpu)
kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
#ifdef CONFIG_X86_64
- if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_LM))
if (kvm_x86_call(set_efer)(vcpu, 0))
goto error;
#endif
@@ -586,7 +586,7 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
* supports long mode.
*/
#ifdef CONFIG_X86_64
- if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_LM)) {
struct kvm_segment cs_desc;
unsigned long cr4;
@@ -609,7 +609,7 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
kvm_set_cr0(vcpu, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
#ifdef CONFIG_X86_64
- if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_LM)) {
unsigned long cr4, efer;
/* Clear CR4.PAE before clearing EFER.LME. */
@@ -634,7 +634,7 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
return X86EMUL_UNHANDLEABLE;
#ifdef CONFIG_X86_64
- if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_LM))
ret = rsm_load_state_64(ctxt, &smram.smram64);
else
#endif
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index b708bdf7eaff..d77b094d9a4d 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -111,7 +111,7 @@ static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm)
{
- if (!guest_can_use(&svm->vcpu, X86_FEATURE_V_VMSAVE_VMLOAD))
+ if (!guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_V_VMSAVE_VMLOAD))
return true;
if (!nested_npt_enabled(svm))
@@ -594,7 +594,7 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
vmcb_mark_dirty(vmcb02, VMCB_DR);
}
- if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) &&
+ if (unlikely(guest_cpu_cap_has(vcpu, X86_FEATURE_LBRV) &&
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
/*
* Reserved bits of DEBUGCTL are ignored. Be consistent with
@@ -651,7 +651,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
* exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
*/
- if (guest_can_use(vcpu, X86_FEATURE_VGIF) &&
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_VGIF) &&
(svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK))
int_ctl_vmcb12_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
else
@@ -689,7 +689,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
vmcb02->control.tsc_offset = vcpu->arch.tsc_offset;
- if (guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR) &&
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR) &&
svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio)
nested_svm_update_tsc_ratio_msr(vcpu);
@@ -710,7 +710,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
* what a nrips=0 CPU would do (L1 is responsible for advancing RIP
* prior to injecting the event).
*/
- if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_NRIPS))
vmcb02->control.next_rip = svm->nested.ctl.next_rip;
else if (boot_cpu_has(X86_FEATURE_NRIPS))
vmcb02->control.next_rip = vmcb12_rip;
@@ -720,7 +720,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
svm->soft_int_injected = true;
svm->soft_int_csbase = vmcb12_csbase;
svm->soft_int_old_rip = vmcb12_rip;
- if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_NRIPS))
svm->soft_int_next_rip = svm->nested.ctl.next_rip;
else
svm->soft_int_next_rip = vmcb12_rip;
@@ -728,18 +728,18 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
vmcb02->control.virt_ext = vmcb01->control.virt_ext &
LBR_CTL_ENABLE_MASK;
- if (guest_can_use(vcpu, X86_FEATURE_LBRV))
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_LBRV))
vmcb02->control.virt_ext |=
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
if (!nested_vmcb_needs_vls_intercept(svm))
vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
- if (guest_can_use(vcpu, X86_FEATURE_PAUSEFILTER))
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_PAUSEFILTER))
pause_count12 = svm->nested.ctl.pause_filter_count;
else
pause_count12 = 0;
- if (guest_can_use(vcpu, X86_FEATURE_PFTHRESHOLD))
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_PFTHRESHOLD))
pause_thresh12 = svm->nested.ctl.pause_filter_thresh;
else
pause_thresh12 = 0;
@@ -1026,7 +1026,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
if (vmcb12->control.exit_code != SVM_EXIT_ERR)
nested_save_pending_event_to_vmcb12(svm, vmcb12);
- if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_NRIPS))
vmcb12->control.next_rip = vmcb02->control.next_rip;
vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
@@ -1065,7 +1065,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
if (!nested_exit_on_intr(svm))
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
- if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) &&
+ if (unlikely(guest_cpu_cap_has(vcpu, X86_FEATURE_LBRV) &&
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
svm_copy_lbrs(vmcb12, vmcb02);
svm_update_lbrv(vcpu);
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index 22d5a65b410c..288f7f2a46f2 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -46,7 +46,7 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
switch (msr) {
case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
- if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_PERFCTR_CORE))
return NULL;
/*
* Each PMU counter has a pair of CTL and CTR MSRs. CTLn
@@ -109,7 +109,7 @@ static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3:
return pmu->version > 0;
case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
- return guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE);
+ return guest_cpu_cap_has(vcpu, X86_FEATURE_PERFCTR_CORE);
case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
@@ -179,7 +179,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
union cpuid_0x80000022_ebx ebx;
pmu->version = 1;
- if (guest_cpuid_has(vcpu, X86_FEATURE_PERFMON_V2)) {
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_PERFMON_V2)) {
pmu->version = 2;
/*
* Note, PERFMON_V2 is also in 0x80000022.0x0, i.e. the guest
@@ -189,7 +189,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
x86_feature_cpuid(X86_FEATURE_PERFMON_V2).index);
ebx.full = kvm_find_cpuid_entry_index(vcpu, 0x80000022, 0)->ebx;
pmu->nr_arch_gp_counters = ebx.split.num_core_pmc;
- } else if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
+ } else if (guest_cpu_cap_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
} else {
pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index fe6cc763fd51..a2a794c32050 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -3051,11 +3051,11 @@ out:
min_sev_asid, max_sev_asid);
if (boot_cpu_has(X86_FEATURE_SEV_ES))
pr_info("SEV-ES %s (ASIDs %u - %u)\n",
- sev_es_supported ? "enabled" : "disabled",
+ str_enabled_disabled(sev_es_supported),
min_sev_asid > 1 ? 1 : 0, min_sev_asid - 1);
if (boot_cpu_has(X86_FEATURE_SEV_SNP))
pr_info("SEV-SNP %s (ASIDs %u - %u)\n",
- sev_snp_supported ? "enabled" : "disabled",
+ str_enabled_disabled(sev_snp_supported),
min_sev_asid > 1 ? 1 : 0, min_sev_asid - 1);
sev_enabled = sev_supported;
@@ -3627,13 +3627,20 @@ static int snp_begin_psc_msr(struct vcpu_svm *svm, u64 ghcb_msr)
return 1; /* resume guest */
}
- if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE))) {
+ if (!user_exit_on_hypercall(vcpu->kvm, KVM_HC_MAP_GPA_RANGE)) {
set_ghcb_msr(svm, GHCB_MSR_PSC_RESP_ERROR);
return 1; /* resume guest */
}
vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE;
+ /*
+ * In principle this should have been -KVM_ENOSYS, but userspace (QEMU <=9.2)
+ * assumed that vcpu->run->hypercall.ret is never changed by KVM and thus that
+ * it was always zero on KVM_EXIT_HYPERCALL. Since KVM is now overwriting
+ * vcpu->run->hypercall.ret, ensuring that it is zero to not break QEMU.
+ */
+ vcpu->run->hypercall.ret = 0;
vcpu->run->hypercall.args[0] = gpa;
vcpu->run->hypercall.args[1] = 1;
vcpu->run->hypercall.args[2] = (op == SNP_PAGE_STATE_PRIVATE)
@@ -3710,7 +3717,7 @@ static int snp_begin_psc(struct vcpu_svm *svm, struct psc_buffer *psc)
bool huge;
u64 gfn;
- if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE))) {
+ if (!user_exit_on_hypercall(vcpu->kvm, KVM_HC_MAP_GPA_RANGE)) {
snp_complete_psc(svm, VMGEXIT_PSC_ERROR_GENERIC);
return 1;
}
@@ -3797,6 +3804,13 @@ next_range:
case VMGEXIT_PSC_OP_SHARED:
vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE;
+ /*
+ * In principle this should have been -KVM_ENOSYS, but userspace (QEMU <=9.2)
+ * assumed that vcpu->run->hypercall.ret is never changed by KVM and thus that
+ * it was always zero on KVM_EXIT_HYPERCALL. Since KVM is now overwriting
+ * vcpu->run->hypercall.ret, ensuring that it is zero to not break QEMU.
+ */
+ vcpu->run->hypercall.ret = 0;
vcpu->run->hypercall.args[0] = gfn_to_gpa(gfn);
vcpu->run->hypercall.args[1] = npages;
vcpu->run->hypercall.args[2] = entry_start.operation == VMGEXIT_PSC_OP_PRIVATE
@@ -4435,8 +4449,8 @@ static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm)
struct kvm_vcpu *vcpu = &svm->vcpu;
if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) {
- bool v_tsc_aux = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) ||
- guest_cpuid_has(vcpu, X86_FEATURE_RDPID);
+ bool v_tsc_aux = guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP) ||
+ guest_cpu_cap_has(vcpu, X86_FEATURE_RDPID);
set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, v_tsc_aux, v_tsc_aux);
}
@@ -4445,16 +4459,15 @@ static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm)
* For SEV-ES, accesses to MSR_IA32_XSS should not be intercepted if
* the host/guest supports its use.
*
- * guest_can_use() checks a number of requirements on the host/guest to
- * ensure that MSR_IA32_XSS is available, but it might report true even
- * if X86_FEATURE_XSAVES isn't configured in the guest to ensure host
- * MSR_IA32_XSS is always properly restored. For SEV-ES, it is better
- * to further check that the guest CPUID actually supports
- * X86_FEATURE_XSAVES so that accesses to MSR_IA32_XSS by misbehaved
- * guests will still get intercepted and caught in the normal
- * kvm_emulate_rdmsr()/kvm_emulated_wrmsr() paths.
+ * KVM treats the guest as being capable of using XSAVES even if XSAVES
+ * isn't enabled in guest CPUID as there is no intercept for XSAVES,
+ * i.e. the guest can use XSAVES/XRSTOR to read/write XSS if XSAVE is
+ * exposed to the guest and XSAVES is supported in hardware. Condition
+ * full XSS passthrough on the guest being able to use XSAVES *and*
+ * XSAVES being exposed to the guest so that KVM can at least honor
+ * guest CPUID for RDMSR and WRMSR.
*/
- if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 1, 1);
else
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 21dacd312779..7640a84e554a 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -28,6 +28,7 @@
#include <linux/rwsem.h>
#include <linux/cc_platform.h>
#include <linux/smp.h>
+#include <linux/string_choices.h>
#include <asm/apic.h>
#include <asm/perf_event.h>
@@ -284,8 +285,6 @@ u32 svm_msrpm_offset(u32 msr)
return MSR_INVALID;
}
-static void svm_flush_tlb_current(struct kvm_vcpu *vcpu);
-
static int get_npt_level(void)
{
#ifdef CONFIG_X86_64
@@ -1049,7 +1048,7 @@ void svm_update_lbrv(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
bool current_enable_lbrv = svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK;
bool enable_lbrv = (svm_get_lbr_vmcb(svm)->save.dbgctl & DEBUGCTLMSR_LBR) ||
- (is_guest_mode(vcpu) && guest_can_use(vcpu, X86_FEATURE_LBRV) &&
+ (is_guest_mode(vcpu) && guest_cpu_cap_has(vcpu, X86_FEATURE_LBRV) &&
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK));
if (enable_lbrv == current_enable_lbrv)
@@ -1187,14 +1186,14 @@ static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu,
*/
if (kvm_cpu_cap_has(X86_FEATURE_INVPCID)) {
if (!npt_enabled ||
- !guest_cpuid_has(&svm->vcpu, X86_FEATURE_INVPCID))
+ !guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_INVPCID))
svm_set_intercept(svm, INTERCEPT_INVPCID);
else
svm_clr_intercept(svm, INTERCEPT_INVPCID);
}
if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) {
- if (guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP))
svm_clr_intercept(svm, INTERCEPT_RDTSCP);
else
svm_set_intercept(svm, INTERCEPT_RDTSCP);
@@ -1921,9 +1920,6 @@ void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
unsigned long old_cr4 = vcpu->arch.cr4;
- if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
- svm_flush_tlb_current(vcpu);
-
vcpu->arch.cr4 = cr4;
if (!npt_enabled) {
cr4 |= X86_CR4_PAE;
@@ -2864,7 +2860,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
switch (msr_info->index) {
case MSR_AMD64_TSC_RATIO:
if (!msr_info->host_initiated &&
- !guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR))
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR))
return 1;
msr_info->data = svm->tsc_ratio_msr;
break;
@@ -2940,7 +2936,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_AMD64_VIRT_SPEC_CTRL:
if (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_VIRT_SSBD))
return 1;
msr_info->data = svm->virt_spec_ctrl;
@@ -3024,7 +3020,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
switch (ecx) {
case MSR_AMD64_TSC_RATIO:
- if (!guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR)) {
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR)) {
if (!msr->host_initiated)
return 1;
@@ -3046,7 +3042,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
svm->tsc_ratio_msr = data;
- if (guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR) &&
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR) &&
is_guest_mode(vcpu))
nested_svm_update_tsc_ratio_msr(vcpu);
@@ -3091,7 +3087,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
break;
case MSR_AMD64_VIRT_SPEC_CTRL:
if (!msr->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_VIRT_SSBD))
return 1;
if (data & ~SPEC_CTRL_SSBD)
@@ -3263,7 +3259,7 @@ static int invpcid_interception(struct kvm_vcpu *vcpu)
unsigned long type;
gva_t gva;
- if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_INVPCID)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
@@ -3533,6 +3529,21 @@ static void svm_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
*error_code = 0;
}
+static void svm_get_entry_info(struct kvm_vcpu *vcpu, u32 *intr_info,
+ u32 *error_code)
+{
+ struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
+
+ *intr_info = control->event_inj;
+
+ if ((*intr_info & SVM_EXITINTINFO_VALID) &&
+ (*intr_info & SVM_EXITINTINFO_VALID_ERR))
+ *error_code = control->event_inj_err;
+ else
+ *error_code = 0;
+
+}
+
static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -4392,27 +4403,17 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
* XSS on VM-Enter/VM-Exit. Failure to do so would effectively give
* the guest read/write access to the host's XSS.
*/
- if (boot_cpu_has(X86_FEATURE_XSAVE) &&
- boot_cpu_has(X86_FEATURE_XSAVES) &&
- guest_cpuid_has(vcpu, X86_FEATURE_XSAVE))
- kvm_governed_feature_set(vcpu, X86_FEATURE_XSAVES);
-
- kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_NRIPS);
- kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_TSCRATEMSR);
- kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_LBRV);
+ guest_cpu_cap_change(vcpu, X86_FEATURE_XSAVES,
+ boot_cpu_has(X86_FEATURE_XSAVES) &&
+ guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVE));
/*
* Intercept VMLOAD if the vCPU model is Intel in order to emulate that
* VMLOAD drops bits 63:32 of SYSENTER (ignoring the fact that exposing
* SVM on Intel is bonkers and extremely unlikely to work).
*/
- if (!guest_cpuid_is_intel_compatible(vcpu))
- kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
-
- kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_PAUSEFILTER);
- kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_PFTHRESHOLD);
- kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VGIF);
- kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VNMI);
+ if (guest_cpuid_is_intel_compatible(vcpu))
+ guest_cpu_cap_clear(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
svm_recalc_instruction_intercepts(vcpu, svm);
@@ -4422,7 +4423,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
if (boot_cpu_has(X86_FEATURE_FLUSH_L1D))
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_FLUSH_CMD, 0,
- !!guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D));
+ !!guest_cpu_cap_has(vcpu, X86_FEATURE_FLUSH_L1D));
if (sev_guest(vcpu->kvm))
sev_vcpu_after_set_cpuid(svm);
@@ -4673,7 +4674,7 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
* responsible for ensuring nested SVM and SMIs are mutually exclusive.
*/
- if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_LM))
return 1;
smram->smram64.svm_guest_flag = 1;
@@ -4720,14 +4721,14 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
const struct kvm_smram_state_64 *smram64 = &smram->smram64;
- if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_LM))
return 0;
/* Non-zero if SMI arrived while vCPU was in guest mode. */
if (!smram64->svm_guest_flag)
return 0;
- if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SVM))
return 1;
if (!(smram64->efer & EFER_SVME))
@@ -4790,9 +4791,15 @@ static void svm_enable_smi_window(struct kvm_vcpu *vcpu)
static int svm_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
void *insn, int insn_len)
{
+ struct vcpu_svm *svm = to_svm(vcpu);
bool smep, smap, is_user;
u64 error_code;
+ /* Check that emulation is possible during event vectoring */
+ if ((svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK) &&
+ !kvm_can_emulate_event_vectoring(emul_type))
+ return X86EMUL_UNHANDLEABLE_VECTORING;
+
/* Emulation is always possible when KVM has access to all guest state. */
if (!sev_guest(vcpu->kvm))
return X86EMUL_CONTINUE;
@@ -4889,7 +4896,7 @@ static int svm_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
* In addition, don't apply the erratum workaround if the #NPF occurred
* while translating guest page tables (see below).
*/
- error_code = to_svm(vcpu)->vmcb->control.exit_info_1;
+ error_code = svm->vmcb->control.exit_info_1;
if (error_code & (PFERR_GUEST_PAGE_MASK | PFERR_FETCH_MASK))
goto resume_guest;
@@ -5077,6 +5084,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.required_apicv_inhibits = AVIC_REQUIRED_APICV_INHIBITS,
.get_exit_info = svm_get_exit_info,
+ .get_entry_info = svm_get_entry_info,
.vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid,
@@ -5328,7 +5336,7 @@ static __init int svm_hardware_setup(void)
/* Force VM NPT level equal to the host's paging level */
kvm_configure_mmu(npt_enabled, get_npt_level(),
get_npt_level(), PG_LEVEL_1G);
- pr_info("Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
+ pr_info("Nested Paging %s\n", str_enabled_disabled(npt_enabled));
/* Setup shadow_me_value and shadow_me_mask */
kvm_mmu_set_me_spte_mask(sme_me_mask, sme_me_mask);
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 43fa6a16eb19..9d7cdb8fbf87 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -358,39 +358,32 @@ static __always_inline struct kvm_sev_info *to_kvm_sev_info(struct kvm *kvm)
return &to_kvm_svm(kvm)->sev_info;
}
+#ifdef CONFIG_KVM_AMD_SEV
static __always_inline bool sev_guest(struct kvm *kvm)
{
-#ifdef CONFIG_KVM_AMD_SEV
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
return sev->active;
-#else
- return false;
-#endif
}
-
static __always_inline bool sev_es_guest(struct kvm *kvm)
{
-#ifdef CONFIG_KVM_AMD_SEV
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
return sev->es_active && !WARN_ON_ONCE(!sev->active);
-#else
- return false;
-#endif
}
static __always_inline bool sev_snp_guest(struct kvm *kvm)
{
-#ifdef CONFIG_KVM_AMD_SEV
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
return (sev->vmsa_features & SVM_SEV_FEAT_SNP_ACTIVE) &&
!WARN_ON_ONCE(!sev_es_guest(kvm));
+}
#else
- return false;
+#define sev_guest(kvm) false
+#define sev_es_guest(kvm) false
+#define sev_snp_guest(kvm) false
#endif
-}
static inline bool ghcb_gpa_is_registered(struct vcpu_svm *svm, u64 val)
{
@@ -502,7 +495,7 @@ static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
static inline bool nested_vgif_enabled(struct vcpu_svm *svm)
{
- return guest_can_use(&svm->vcpu, X86_FEATURE_VGIF) &&
+ return guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_VGIF) &&
(svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK);
}
@@ -554,7 +547,7 @@ static inline bool nested_npt_enabled(struct vcpu_svm *svm)
static inline bool nested_vnmi_enabled(struct vcpu_svm *svm)
{
- return guest_can_use(&svm->vcpu, X86_FEATURE_VNMI) &&
+ return guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_VNMI) &&
(svm->nested.ctl.int_ctl & V_NMI_ENABLE_MASK);
}
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index d3aeffd6ae75..0b844cb97978 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -22,15 +22,22 @@ TRACE_EVENT(kvm_entry,
__field( unsigned int, vcpu_id )
__field( unsigned long, rip )
__field( bool, immediate_exit )
+ __field( u32, intr_info )
+ __field( u32, error_code )
),
TP_fast_assign(
__entry->vcpu_id = vcpu->vcpu_id;
__entry->rip = kvm_rip_read(vcpu);
__entry->immediate_exit = force_immediate_exit;
+
+ kvm_x86_call(get_entry_info)(vcpu, &__entry->intr_info,
+ &__entry->error_code);
),
- TP_printk("vcpu %u, rip 0x%lx%s", __entry->vcpu_id, __entry->rip,
+ TP_printk("vcpu %u, rip 0x%lx intr_info 0x%08x error_code 0x%08x%s",
+ __entry->vcpu_id, __entry->rip,
+ __entry->intr_info, __entry->error_code,
__entry->immediate_exit ? "[immediate exit]" : "")
);
@@ -308,12 +315,14 @@ TRACE_EVENT(name, \
__field( u32, intr_info ) \
__field( u32, error_code ) \
__field( unsigned int, vcpu_id ) \
+ __field( u64, requests ) \
), \
\
TP_fast_assign( \
__entry->guest_rip = kvm_rip_read(vcpu); \
__entry->isa = isa; \
__entry->vcpu_id = vcpu->vcpu_id; \
+ __entry->requests = READ_ONCE(vcpu->requests); \
kvm_x86_call(get_exit_info)(vcpu, \
&__entry->exit_reason, \
&__entry->info1, \
@@ -323,11 +332,13 @@ TRACE_EVENT(name, \
), \
\
TP_printk("vcpu %u reason %s%s%s rip 0x%lx info1 0x%016llx " \
- "info2 0x%016llx intr_info 0x%08x error_code 0x%08x", \
+ "info2 0x%016llx intr_info 0x%08x error_code 0x%08x " \
+ "requests 0x%016llx", \
__entry->vcpu_id, \
kvm_print_exit_reason(__entry->exit_reason, __entry->isa), \
__entry->guest_rip, __entry->info1, __entry->info2, \
- __entry->intr_info, __entry->error_code) \
+ __entry->intr_info, __entry->error_code, \
+ __entry->requests) \
)
/*
diff --git a/arch/x86/kvm/vmx/hyperv.h b/arch/x86/kvm/vmx/hyperv.h
index a87407412615..11a339009781 100644
--- a/arch/x86/kvm/vmx/hyperv.h
+++ b/arch/x86/kvm/vmx/hyperv.h
@@ -42,7 +42,7 @@ static inline struct hv_enlightened_vmcs *nested_vmx_evmcs(struct vcpu_vmx *vmx)
return vmx->nested.hv_evmcs;
}
-static inline bool guest_cpuid_has_evmcs(struct kvm_vcpu *vcpu)
+static inline bool guest_cpu_cap_has_evmcs(struct kvm_vcpu *vcpu)
{
/*
* eVMCS is exposed to the guest if Hyper-V is enabled in CPUID and
diff --git a/arch/x86/kvm/vmx/hyperv_evmcs.h b/arch/x86/kvm/vmx/hyperv_evmcs.h
index a543fccfc574..6536290f4274 100644
--- a/arch/x86/kvm/vmx/hyperv_evmcs.h
+++ b/arch/x86/kvm/vmx/hyperv_evmcs.h
@@ -6,7 +6,7 @@
#ifndef __KVM_X86_VMX_HYPERV_EVMCS_H
#define __KVM_X86_VMX_HYPERV_EVMCS_H
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#include "capabilities.h"
#include "vmcs12.h"
diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
index 92d35cc6cd15..2427f918e763 100644
--- a/arch/x86/kvm/vmx/main.c
+++ b/arch/x86/kvm/vmx/main.c
@@ -100,7 +100,6 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.load_eoi_exitmap = vmx_load_eoi_exitmap,
.apicv_pre_state_restore = vmx_apicv_pre_state_restore,
.required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS,
- .hwapic_irr_update = vmx_hwapic_irr_update,
.hwapic_isr_update = vmx_hwapic_isr_update,
.sync_pir_to_irr = vmx_sync_pir_to_irr,
.deliver_interrupt = vmx_deliver_interrupt,
@@ -111,6 +110,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.get_mt_mask = vmx_get_mt_mask,
.get_exit_info = vmx_get_exit_info,
+ .get_entry_info = vmx_get_entry_info,
.vcpu_after_set_cpuid = vmx_vcpu_after_set_cpuid,
@@ -126,7 +126,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.check_intercept = vmx_check_intercept,
.handle_exit_irqoff = vmx_handle_exit_irqoff,
- .cpu_dirty_log_size = PML_ENTITY_NUM,
+ .cpu_dirty_log_size = PML_LOG_NR_ENTRIES,
.update_cpu_dirty_logging = vmx_update_cpu_dirty_logging,
.nested_ops = &vmx_nested_ops,
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index aa78b6f38dfe..8a7af02d466e 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -257,7 +257,7 @@ static bool nested_evmcs_handle_vmclear(struct kvm_vcpu *vcpu, gpa_t vmptr)
* state. It is possible that the area will stay mapped as
* vmx->nested.hv_evmcs but this shouldn't be a problem.
*/
- if (!guest_cpuid_has_evmcs(vcpu) ||
+ if (!guest_cpu_cap_has_evmcs(vcpu) ||
!evmptr_is_valid(nested_get_evmptr(vcpu)))
return false;
@@ -2089,7 +2089,7 @@ static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
bool evmcs_gpa_changed = false;
u64 evmcs_gpa;
- if (likely(!guest_cpuid_has_evmcs(vcpu)))
+ if (likely(!guest_cpu_cap_has_evmcs(vcpu)))
return EVMPTRLD_DISABLED;
evmcs_gpa = nested_get_evmptr(vcpu);
@@ -2992,7 +2992,7 @@ static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
return -EINVAL;
#ifdef CONFIG_KVM_HYPERV
- if (guest_cpuid_has_evmcs(vcpu))
+ if (guest_cpu_cap_has_evmcs(vcpu))
return nested_evmcs_check_controls(vmcs12);
#endif
@@ -3287,7 +3287,7 @@ static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
* L2 was running), map it here to make sure vmcs12 changes are
* properly reflected.
*/
- if (guest_cpuid_has_evmcs(vcpu) &&
+ if (guest_cpu_cap_has_evmcs(vcpu) &&
vmx->nested.hv_evmcs_vmptr == EVMPTR_MAP_PENDING) {
enum nested_evmptrld_status evmptrld_status =
nested_vmx_handle_enlightened_vmptrld(vcpu, false);
@@ -3442,7 +3442,7 @@ static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
if (!nested_cpu_has_pml(vmcs12))
return 0;
- if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
+ if (vmcs12->guest_pml_index >= PML_LOG_NR_ENTRIES) {
vmx->nested.pml_full = true;
return 1;
}
@@ -3481,14 +3481,6 @@ static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
return 1;
}
-static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
-{
- u8 rvi = vmx_get_rvi();
- u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
-
- return ((rvi & 0xf0) > (vppr & 0xf0));
-}
-
static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12);
@@ -3508,7 +3500,6 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
enum vm_entry_failure_code entry_failure_code;
- bool evaluate_pending_interrupts;
union vmx_exit_reason exit_reason = {
.basic = EXIT_REASON_INVALID_STATE,
.failed_vmentry = 1,
@@ -3527,13 +3518,6 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
kvm_service_local_tlb_flush_requests(vcpu);
- evaluate_pending_interrupts = exec_controls_get(vmx) &
- (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING);
- if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
- evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
- if (!evaluate_pending_interrupts)
- evaluate_pending_interrupts |= kvm_apic_has_pending_init_or_sipi(vcpu);
-
if (!vmx->nested.nested_run_pending ||
!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
vmx->nested.pre_vmenter_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
@@ -3616,9 +3600,13 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
* Re-evaluate pending events if L1 had a pending IRQ/NMI/INIT/SIPI
* when it executed VMLAUNCH/VMRESUME, as entering non-root mode can
* effectively unblock various events, e.g. INIT/SIPI cause VM-Exit
- * unconditionally.
+ * unconditionally. Take care to pull data from vmcs01 as appropriate,
+ * e.g. when checking for interrupt windows, as vmcs02 is now loaded.
*/
- if (unlikely(evaluate_pending_interrupts))
+ if ((__exec_controls_get(&vmx->vmcs01) & (CPU_BASED_INTR_WINDOW_EXITING |
+ CPU_BASED_NMI_WINDOW_EXITING)) ||
+ kvm_apic_has_pending_init_or_sipi(vcpu) ||
+ kvm_apic_has_interrupt(vcpu))
kvm_make_request(KVM_REQ_EVENT, vcpu);
/*
@@ -3751,14 +3739,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
if (unlikely(status != NVMX_VMENTRY_SUCCESS))
goto vmentry_failed;
- /* Emulate processing of posted interrupts on VM-Enter. */
- if (nested_cpu_has_posted_intr(vmcs12) &&
- kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) {
- vmx->nested.pi_pending = true;
- kvm_make_request(KVM_REQ_EVENT, vcpu);
- kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv);
- }
-
/* Hide L1D cache contents from the nested guest. */
vmx->vcpu.arch.l1tf_flush_l1d = true;
@@ -4220,13 +4200,25 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
*/
bool block_nested_exceptions = vmx->nested.nested_run_pending;
/*
- * New events (not exceptions) are only recognized at instruction
+ * Events that don't require injection, i.e. that are virtualized by
+ * hardware, aren't blocked by a pending VM-Enter as KVM doesn't need
+ * to regain control in order to deliver the event, and hardware will
+ * handle event ordering, e.g. with respect to injected exceptions.
+ *
+ * But, new events (not exceptions) are only recognized at instruction
* boundaries. If an event needs reinjection, then KVM is handling a
- * VM-Exit that occurred _during_ instruction execution; new events are
- * blocked until the instruction completes.
+ * VM-Exit that occurred _during_ instruction execution; new events,
+ * irrespective of whether or not they're injected, are blocked until
+ * the instruction completes.
+ */
+ bool block_non_injected_events = kvm_event_needs_reinjection(vcpu);
+ /*
+ * Inject events are blocked by nested VM-Enter, as KVM is responsible
+ * for managing priority between concurrent events, i.e. KVM needs to
+ * wait until after VM-Enter completes to deliver injected events.
*/
bool block_nested_events = block_nested_exceptions ||
- kvm_event_needs_reinjection(vcpu);
+ block_non_injected_events;
if (lapic_in_kernel(vcpu) &&
test_bit(KVM_APIC_INIT, &apic->pending_events)) {
@@ -4338,18 +4330,26 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
if (kvm_cpu_has_interrupt(vcpu) && !vmx_interrupt_blocked(vcpu)) {
int irq;
- if (block_nested_events)
- return -EBUSY;
- if (!nested_exit_on_intr(vcpu))
+ if (!nested_exit_on_intr(vcpu)) {
+ if (block_nested_events)
+ return -EBUSY;
+
goto no_vmexit;
+ }
if (!nested_exit_intr_ack_set(vcpu)) {
+ if (block_nested_events)
+ return -EBUSY;
+
nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
return 0;
}
irq = kvm_cpu_get_extint(vcpu);
if (irq != -1) {
+ if (block_nested_events)
+ return -EBUSY;
+
nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT,
INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR | irq, 0);
return 0;
@@ -4368,11 +4368,22 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
* and enabling posted interrupts requires ACK-on-exit.
*/
if (irq == vmx->nested.posted_intr_nv) {
+ /*
+ * Nested posted interrupts are delivered via RVI, i.e.
+ * aren't injected by KVM, and so can be queued even if
+ * manual event injection is disallowed.
+ */
+ if (block_non_injected_events)
+ return -EBUSY;
+
vmx->nested.pi_pending = true;
kvm_apic_clear_irr(vcpu, irq);
goto no_vmexit;
}
+ if (block_nested_events)
+ return -EBUSY;
+
nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT,
INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR | irq, 0);
@@ -5015,7 +5026,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
* doesn't isolate different VMCSs, i.e. in this case, doesn't provide
* separate modes for L2 vs L1.
*/
- if (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_SPEC_CTRL))
indirect_branch_prediction_barrier();
/* Update any VMCS fields that might have changed while L2 ran */
@@ -5050,6 +5061,11 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
}
+ if (vmx->nested.update_vmcs01_hwapic_isr) {
+ vmx->nested.update_vmcs01_hwapic_isr = false;
+ kvm_apic_update_hwapic_isr(vcpu);
+ }
+
if ((vm_exit_reason != -1) &&
(enable_shadow_vmcs || nested_vmx_is_evmptr12_valid(vmx)))
vmx->nested.need_vmcs12_to_shadow_sync = true;
@@ -6279,7 +6295,7 @@ static bool nested_vmx_exit_handled_encls(struct kvm_vcpu *vcpu,
{
u32 encls_leaf;
- if (!guest_cpuid_has(vcpu, X86_FEATURE_SGX) ||
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SGX) ||
!nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING))
return false;
@@ -6617,7 +6633,7 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
vmx = to_vmx(vcpu);
vmcs12 = get_vmcs12(vcpu);
- if (guest_can_use(vcpu, X86_FEATURE_VMX) &&
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_VMX) &&
(vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr;
@@ -6758,7 +6774,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS)
return -EINVAL;
} else {
- if (!guest_can_use(vcpu, X86_FEATURE_VMX))
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_VMX))
return -EINVAL;
if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
@@ -6792,7 +6808,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
return -EINVAL;
if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) &&
- (!guest_can_use(vcpu, X86_FEATURE_VMX) ||
+ (!guest_cpu_cap_has(vcpu, X86_FEATURE_VMX) ||
!vmx->nested.enlightened_vmcs_enabled))
return -EINVAL;
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 9c9d4a336166..77012b2eca0e 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -110,7 +110,7 @@ static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu)
{
- if (!guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_PDCM))
return 0;
return vcpu->arch.perf_capabilities;
@@ -160,7 +160,7 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
ret = vcpu_get_perf_capabilities(vcpu) & PERF_CAP_PEBS_FORMAT;
break;
case MSR_IA32_DS_AREA:
- ret = guest_cpuid_has(vcpu, X86_FEATURE_DS);
+ ret = guest_cpu_cap_has(vcpu, X86_FEATURE_DS);
break;
case MSR_PEBS_DATA_CFG:
perf_capabilities = vcpu_get_perf_capabilities(vcpu);
diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c
index b352a3ba7354..9961e07cf071 100644
--- a/arch/x86/kvm/vmx/sgx.c
+++ b/arch/x86/kvm/vmx/sgx.c
@@ -122,7 +122,7 @@ static int sgx_inject_fault(struct kvm_vcpu *vcpu, gva_t gva, int trapnr)
* likely than a bad userspace address.
*/
if ((trapnr == PF_VECTOR || !boot_cpu_has(X86_FEATURE_SGX2)) &&
- guest_cpuid_has(vcpu, X86_FEATURE_SGX2)) {
+ guest_cpu_cap_has(vcpu, X86_FEATURE_SGX2)) {
memset(&ex, 0, sizeof(ex));
ex.vector = PF_VECTOR;
ex.error_code = PFERR_PRESENT_MASK | PFERR_WRITE_MASK |
@@ -365,7 +365,7 @@ static inline bool encls_leaf_enabled_in_guest(struct kvm_vcpu *vcpu, u32 leaf)
return true;
if (leaf >= EAUG && leaf <= EMODT)
- return guest_cpuid_has(vcpu, X86_FEATURE_SGX2);
+ return guest_cpu_cap_has(vcpu, X86_FEATURE_SGX2);
return false;
}
@@ -381,8 +381,8 @@ int handle_encls(struct kvm_vcpu *vcpu)
{
u32 leaf = (u32)kvm_rax_read(vcpu);
- if (!enable_sgx || !guest_cpuid_has(vcpu, X86_FEATURE_SGX) ||
- !guest_cpuid_has(vcpu, X86_FEATURE_SGX1)) {
+ if (!enable_sgx || !guest_cpu_cap_has(vcpu, X86_FEATURE_SGX) ||
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_SGX1)) {
kvm_queue_exception(vcpu, UD_VECTOR);
} else if (!encls_leaf_enabled_in_guest(vcpu, leaf) ||
!sgx_enabled_in_guest_bios(vcpu) || !is_paging(vcpu)) {
@@ -479,15 +479,15 @@ void vmx_write_encls_bitmap(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
if (!cpu_has_vmx_encls_vmexit())
return;
- if (guest_cpuid_has(vcpu, X86_FEATURE_SGX) &&
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_SGX) &&
sgx_enabled_in_guest_bios(vcpu)) {
- if (guest_cpuid_has(vcpu, X86_FEATURE_SGX1)) {
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_SGX1)) {
bitmap &= ~GENMASK_ULL(ETRACK, ECREATE);
if (sgx_intercept_encls_ecreate(vcpu))
bitmap |= (1 << ECREATE);
}
- if (guest_cpuid_has(vcpu, X86_FEATURE_SGX2))
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_SGX2))
bitmap &= ~GENMASK_ULL(EMODT, EAUG);
/*
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 893366e53732..f72835e85b6d 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1636,7 +1636,8 @@ static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data)
* result in a #GP unless the same write also clears TraceEn.
*/
if ((vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) &&
- ((vmx->pt_desc.guest.ctl ^ data) & ~RTIT_CTL_TRACEEN))
+ (data & RTIT_CTL_TRACEEN) &&
+ data != vmx->pt_desc.guest.ctl)
return 1;
/*
@@ -1705,6 +1706,12 @@ int vmx_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
kvm_queue_exception(vcpu, UD_VECTOR);
return X86EMUL_PROPAGATE_FAULT;
}
+
+ /* Check that emulation is possible during event vectoring */
+ if ((to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
+ !kvm_can_emulate_event_vectoring(emul_type))
+ return X86EMUL_UNHANDLEABLE_VECTORING;
+
return X86EMUL_CONTINUE;
}
@@ -1908,8 +1915,8 @@ static void vmx_setup_uret_msrs(struct vcpu_vmx *vmx)
vmx_setup_uret_msr(vmx, MSR_EFER, update_transition_efer(vmx));
vmx_setup_uret_msr(vmx, MSR_TSC_AUX,
- guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP) ||
- guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDPID));
+ guest_cpu_cap_has(&vmx->vcpu, X86_FEATURE_RDTSCP) ||
+ guest_cpu_cap_has(&vmx->vcpu, X86_FEATURE_RDPID));
/*
* hle=0, rtm=0, tsx_ctrl=1 can be found with some combinations of new
@@ -2062,7 +2069,7 @@ int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_BNDCFGS:
if (!kvm_mpx_supported() ||
(!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_MPX)))
return 1;
msr_info->data = vmcs_read64(GUEST_BNDCFGS);
break;
@@ -2078,13 +2085,13 @@ int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3:
if (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC))
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_SGX_LC))
return 1;
msr_info->data = to_vmx(vcpu)->msr_ia32_sgxlepubkeyhash
[msr_info->index - MSR_IA32_SGXLEPUBKEYHASH0];
break;
case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
- if (!guest_can_use(vcpu, X86_FEATURE_VMX))
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_VMX))
return 1;
if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
&msr_info->data))
@@ -2097,7 +2104,7 @@ int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
* sanity checking and refuse to boot. Filter all unsupported
* features out.
*/
- if (!msr_info->host_initiated && guest_cpuid_has_evmcs(vcpu))
+ if (!msr_info->host_initiated && guest_cpu_cap_has_evmcs(vcpu))
nested_evmcs_filter_control_msr(vcpu, msr_info->index,
&msr_info->data);
#endif
@@ -2167,7 +2174,7 @@ static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu,
u64 data)
{
#ifdef CONFIG_X86_64
- if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_LM))
return (u32)data;
#endif
return (unsigned long)data;
@@ -2178,7 +2185,7 @@ static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated
u64 debugctl = 0;
if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) &&
- (host_initiated || guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)))
+ (host_initiated || guest_cpu_cap_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)))
debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT;
if ((kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT) &&
@@ -2282,7 +2289,7 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_BNDCFGS:
if (!kvm_mpx_supported() ||
(!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_MPX)))
return 1;
if (is_noncanonical_msr_address(data & PAGE_MASK, vcpu) ||
(data & MSR_IA32_BNDCFGS_RSVD))
@@ -2384,7 +2391,7 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
* behavior, but it's close enough.
*/
if (!msr_info->host_initiated &&
- (!guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC) ||
+ (!guest_cpu_cap_has(vcpu, X86_FEATURE_SGX_LC) ||
((vmx->msr_ia32_feature_control & FEAT_CTL_LOCKED) &&
!(vmx->msr_ia32_feature_control & FEAT_CTL_SGX_LC_ENABLED))))
return 1;
@@ -2394,7 +2401,7 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
if (!msr_info->host_initiated)
return 1; /* they are read-only */
- if (!guest_can_use(vcpu, X86_FEATURE_VMX))
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_VMX))
return 1;
return vmx_set_vmx_msr(vcpu, msr_index, data);
case MSR_IA32_RTIT_CTL:
@@ -2468,9 +2475,9 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if ((data & PERF_CAP_PEBS_MASK) !=
(kvm_caps.supported_perf_cap & PERF_CAP_PEBS_MASK))
return 1;
- if (!guest_cpuid_has(vcpu, X86_FEATURE_DS))
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_DS))
return 1;
- if (!guest_cpuid_has(vcpu, X86_FEATURE_DTES64))
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_DTES64))
return 1;
if (!cpuid_model_is_consistent(vcpu))
return 1;
@@ -4590,10 +4597,7 @@ vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control,
bool __enabled; \
\
if (cpu_has_vmx_##name()) { \
- if (kvm_is_governed_feature(X86_FEATURE_##feat_name)) \
- __enabled = guest_can_use(__vcpu, X86_FEATURE_##feat_name); \
- else \
- __enabled = guest_cpuid_has(__vcpu, X86_FEATURE_##feat_name); \
+ __enabled = guest_cpu_cap_has(__vcpu, X86_FEATURE_##feat_name); \
vmx_adjust_secondary_exec_control(vmx, exec_control, SECONDARY_EXEC_##ctrl_name,\
__enabled, exiting); \
} \
@@ -4669,8 +4673,8 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
*/
if (cpu_has_vmx_rdtscp()) {
bool rdpid_or_rdtscp_enabled =
- guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) ||
- guest_cpuid_has(vcpu, X86_FEATURE_RDPID);
+ guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP) ||
+ guest_cpu_cap_has(vcpu, X86_FEATURE_RDPID);
vmx_adjust_secondary_exec_control(vmx, &exec_control,
SECONDARY_EXEC_ENABLE_RDTSCP,
@@ -4820,7 +4824,7 @@ static void init_vmcs(struct vcpu_vmx *vmx)
if (enable_pml) {
vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
- vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
+ vmcs_write16(GUEST_PML_INDEX, PML_HEAD_INDEX);
}
vmx_write_encls_bitmap(&vmx->vcpu, NULL);
@@ -5959,7 +5963,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
} operand;
int gpr_index;
- if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_INVPCID)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
@@ -6049,7 +6053,7 @@ static int handle_preemption_timer(struct kvm_vcpu *vcpu)
/*
* When nested=0, all VMX instruction VM Exits filter here. The handlers
- * are overwritten by nested_vmx_setup() when nested=1.
+ * are overwritten by nested_vmx_hardware_setup() when nested=1.
*/
static int handle_vmx_instruction(struct kvm_vcpu *vcpu)
{
@@ -6191,6 +6195,15 @@ void vmx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
}
}
+void vmx_get_entry_info(struct kvm_vcpu *vcpu, u32 *intr_info, u32 *error_code)
+{
+ *intr_info = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD);
+ if (is_exception_with_error_code(*intr_info))
+ *error_code = vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE);
+ else
+ *error_code = 0;
+}
+
static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
{
if (vmx->pml_pg) {
@@ -6202,32 +6215,40 @@ static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u16 pml_idx, pml_tail_index;
u64 *pml_buf;
- u16 pml_idx;
+ int i;
pml_idx = vmcs_read16(GUEST_PML_INDEX);
/* Do nothing if PML buffer is empty */
- if (pml_idx == (PML_ENTITY_NUM - 1))
+ if (pml_idx == PML_HEAD_INDEX)
return;
+ /*
+ * PML index always points to the next available PML buffer entity
+ * unless PML log has just overflowed.
+ */
+ pml_tail_index = (pml_idx >= PML_LOG_NR_ENTRIES) ? 0 : pml_idx + 1;
- /* PML index always points to next available PML buffer entity */
- if (pml_idx >= PML_ENTITY_NUM)
- pml_idx = 0;
- else
- pml_idx++;
-
+ /*
+ * PML log is written backwards: the CPU first writes the entry 511
+ * then the entry 510, and so on.
+ *
+ * Read the entries in the same order they were written, to ensure that
+ * the dirty ring is filled in the same order the CPU wrote them.
+ */
pml_buf = page_address(vmx->pml_pg);
- for (; pml_idx < PML_ENTITY_NUM; pml_idx++) {
+
+ for (i = PML_HEAD_INDEX; i >= pml_tail_index; i--) {
u64 gpa;
- gpa = pml_buf[pml_idx];
+ gpa = pml_buf[i];
WARN_ON(gpa & (PAGE_SIZE - 1));
kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
}
/* reset PML index */
- vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
+ vmcs_write16(GUEST_PML_INDEX, PML_HEAD_INDEX);
}
static void vmx_dump_sel(char *name, uint32_t sel)
@@ -6543,33 +6564,15 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
return 0;
}
- /*
- * Note:
- * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by
- * delivery event since it indicates guest is accessing MMIO.
- * The vm-exit can be triggered again after return to guest that
- * will cause infinite loop.
- */
if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
(exit_reason.basic != EXIT_REASON_EXCEPTION_NMI &&
exit_reason.basic != EXIT_REASON_EPT_VIOLATION &&
exit_reason.basic != EXIT_REASON_PML_FULL &&
exit_reason.basic != EXIT_REASON_APIC_ACCESS &&
exit_reason.basic != EXIT_REASON_TASK_SWITCH &&
- exit_reason.basic != EXIT_REASON_NOTIFY)) {
- int ndata = 3;
-
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
- vcpu->run->internal.data[0] = vectoring_info;
- vcpu->run->internal.data[1] = exit_reason.full;
- vcpu->run->internal.data[2] = vmx_get_exit_qual(vcpu);
- if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) {
- vcpu->run->internal.data[ndata++] =
- vmcs_read64(GUEST_PHYSICAL_ADDRESS);
- }
- vcpu->run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu;
- vcpu->run->internal.ndata = ndata;
+ exit_reason.basic != EXIT_REASON_NOTIFY &&
+ exit_reason.basic != EXIT_REASON_EPT_MISCONFIG)) {
+ kvm_prepare_event_vectoring_exit(vcpu, INVALID_GPA);
return 0;
}
@@ -6862,11 +6865,32 @@ void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
read_unlock(&vcpu->kvm->mmu_lock);
}
-void vmx_hwapic_isr_update(int max_isr)
+void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
{
u16 status;
u8 old;
+ /*
+ * If L2 is active, defer the SVI update until vmcs01 is loaded, as SVI
+ * is only relevant for if and only if Virtual Interrupt Delivery is
+ * enabled in vmcs12, and if VID is enabled then L2 EOIs affect L2's
+ * vAPIC, not L1's vAPIC. KVM must update vmcs01 on the next nested
+ * VM-Exit, otherwise L1 with run with a stale SVI.
+ */
+ if (is_guest_mode(vcpu)) {
+ /*
+ * KVM is supposed to forward intercepted L2 EOIs to L1 if VID
+ * is enabled in vmcs12; as above, the EOIs affect L2's vAPIC.
+ * Note, userspace can stuff state while L2 is active; assert
+ * that VID is disabled if and only if the vCPU is in KVM_RUN
+ * to avoid false positives if userspace is setting APIC state.
+ */
+ WARN_ON_ONCE(vcpu->wants_to_run &&
+ nested_cpu_has_vid(get_vmcs12(vcpu)));
+ to_vmx(vcpu)->nested.update_vmcs01_hwapic_isr = true;
+ return;
+ }
+
if (max_isr == -1)
max_isr = 0;
@@ -6896,20 +6920,6 @@ static void vmx_set_rvi(int vector)
}
}
-void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
-{
- /*
- * When running L2, updating RVI is only relevant when
- * vmcs12 virtual-interrupt-delivery enabled.
- * However, it can be enabled only when L1 also
- * intercepts external-interrupts and in that case
- * we should not update vmcs02 RVI but instead intercept
- * interrupt. Therefore, do nothing when running L2.
- */
- if (!is_guest_mode(vcpu))
- vmx_set_rvi(max_irr);
-}
-
int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -7828,12 +7838,8 @@ void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
* to the guest. XSAVES depends on CR4.OSXSAVE, and CR4.OSXSAVE can be
* set if and only if XSAVE is supported.
*/
- if (boot_cpu_has(X86_FEATURE_XSAVE) &&
- guest_cpuid_has(vcpu, X86_FEATURE_XSAVE))
- kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_XSAVES);
-
- kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VMX);
- kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_LAM);
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVE))
+ guest_cpu_cap_clear(vcpu, X86_FEATURE_XSAVES);
vmx_setup_uret_msrs(vmx);
@@ -7841,7 +7847,7 @@ void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
vmcs_set_secondary_exec_control(vmx,
vmx_secondary_exec_control(vmx));
- if (guest_can_use(vcpu, X86_FEATURE_VMX))
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_VMX))
vmx->msr_ia32_feature_control_valid_bits |=
FEAT_CTL_VMX_ENABLED_INSIDE_SMX |
FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
@@ -7850,25 +7856,25 @@ void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
~(FEAT_CTL_VMX_ENABLED_INSIDE_SMX |
FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX);
- if (guest_can_use(vcpu, X86_FEATURE_VMX))
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_VMX))
nested_vmx_cr_fixed1_bits_update(vcpu);
if (boot_cpu_has(X86_FEATURE_INTEL_PT) &&
- guest_cpuid_has(vcpu, X86_FEATURE_INTEL_PT))
+ guest_cpu_cap_has(vcpu, X86_FEATURE_INTEL_PT))
update_intel_pt_cfg(vcpu);
if (boot_cpu_has(X86_FEATURE_RTM)) {
struct vmx_uret_msr *msr;
msr = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
if (msr) {
- bool enabled = guest_cpuid_has(vcpu, X86_FEATURE_RTM);
+ bool enabled = guest_cpu_cap_has(vcpu, X86_FEATURE_RTM);
vmx_set_guest_uret_msr(vmx, msr, enabled ? 0 : TSX_CTRL_RTM_DISABLE);
}
}
if (kvm_cpu_cap_has(X86_FEATURE_XFD))
vmx_set_intercept_for_msr(vcpu, MSR_IA32_XFD_ERR, MSR_TYPE_R,
- !guest_cpuid_has(vcpu, X86_FEATURE_XFD));
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_XFD));
if (boot_cpu_has(X86_FEATURE_IBPB))
vmx_set_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, MSR_TYPE_W,
@@ -7876,17 +7882,17 @@ void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
if (boot_cpu_has(X86_FEATURE_FLUSH_L1D))
vmx_set_intercept_for_msr(vcpu, MSR_IA32_FLUSH_CMD, MSR_TYPE_W,
- !guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D));
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_FLUSH_L1D));
set_cr4_guest_host_mask(vmx);
vmx_write_encls_bitmap(vcpu, NULL);
- if (guest_cpuid_has(vcpu, X86_FEATURE_SGX))
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_SGX))
vmx->msr_ia32_feature_control_valid_bits |= FEAT_CTL_SGX_ENABLED;
else
vmx->msr_ia32_feature_control_valid_bits &= ~FEAT_CTL_SGX_ENABLED;
- if (guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC))
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_SGX_LC))
vmx->msr_ia32_feature_control_valid_bits |=
FEAT_CTL_SGX_LC_ENABLED;
else
@@ -8597,7 +8603,7 @@ static void __vmx_exit(void)
vmx_cleanup_l1d_flush();
}
-static void vmx_exit(void)
+static void __exit vmx_exit(void)
{
kvm_exit();
__vmx_exit();
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 43f573f6ca46..8b111ce1087c 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -176,6 +176,7 @@ struct nested_vmx {
bool reload_vmcs01_apic_access_page;
bool update_vmcs01_cpu_dirty_logging;
bool update_vmcs01_apicv_status;
+ bool update_vmcs01_hwapic_isr;
/*
* Enlightened VMCS has been enabled. It does not mean that L1 has to
@@ -330,7 +331,10 @@ struct vcpu_vmx {
bool ple_window_dirty;
/* Support for PML */
-#define PML_ENTITY_NUM 512
+#define PML_LOG_NR_ENTRIES 512
+ /* PML is written backwards: this is the first entry written by the CPU */
+#define PML_HEAD_INDEX (PML_LOG_NR_ENTRIES-1)
+
struct page *pml_pg;
/* apic deadline value in host tsc */
diff --git a/arch/x86/kvm/vmx/vmx_onhyperv.h b/arch/x86/kvm/vmx/vmx_onhyperv.h
index bba24ed99ee6..cdf8cbb69209 100644
--- a/arch/x86/kvm/vmx/vmx_onhyperv.h
+++ b/arch/x86/kvm/vmx/vmx_onhyperv.h
@@ -3,7 +3,7 @@
#ifndef __ARCH_X86_KVM_VMX_ONHYPERV_H__
#define __ARCH_X86_KVM_VMX_ONHYPERV_H__
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#include <asm/mshyperv.h>
#include <linux/jump_label.h>
diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
index a55981c5216e..ce3295a67c04 100644
--- a/arch/x86/kvm/vmx/x86_ops.h
+++ b/arch/x86/kvm/vmx/x86_ops.h
@@ -47,8 +47,7 @@ bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu);
void vmx_migrate_timers(struct kvm_vcpu *vcpu);
void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu);
-void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
-void vmx_hwapic_isr_update(int max_isr);
+void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu);
void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
int trig_mode, int vector);
@@ -104,8 +103,11 @@ void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr);
u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
+
void vmx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code);
+void vmx_get_entry_info(struct kvm_vcpu *vcpu, u32 *intr_info, u32 *error_code);
+
u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
void vmx_write_tsc_offset(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c79a8cc57ba4..8e77e61d4fbd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -119,8 +119,6 @@ u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
#endif
-static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
-
#define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE)
#define KVM_CAP_PMU_VALID_MASK KVM_PMU_CAP_DISABLE
@@ -1179,7 +1177,7 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
if (vcpu->arch.xcr0 != kvm_host.xcr0)
xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
- if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
vcpu->arch.ia32_xss != kvm_host.xss)
wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
}
@@ -1188,7 +1186,7 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
vcpu->arch.pkru != vcpu->arch.host_pkru &&
((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE)))
- write_pkru(vcpu->arch.pkru);
+ wrpkru(vcpu->arch.pkru);
}
EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state);
@@ -1202,7 +1200,7 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) {
vcpu->arch.pkru = rdpkru();
if (vcpu->arch.pkru != vcpu->arch.host_pkru)
- write_pkru(vcpu->arch.host_pkru);
+ wrpkru(vcpu->arch.host_pkru);
}
if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
@@ -1210,7 +1208,7 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
if (vcpu->arch.xcr0 != kvm_host.xcr0)
xsetbv(XCR_XFEATURE_ENABLED_MASK, kvm_host.xcr0);
- if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
vcpu->arch.ia32_xss != kvm_host.xss)
wrmsrl(MSR_IA32_XSS, kvm_host.xss);
}
@@ -1283,18 +1281,6 @@ int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv);
-bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
-{
- if (cr4 & cr4_reserved_bits)
- return false;
-
- if (cr4 & vcpu->arch.cr4_guest_rsvd_bits)
- return false;
-
- return true;
-}
-EXPORT_SYMBOL_GPL(__kvm_is_valid_cr4);
-
static bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
return __kvm_is_valid_cr4(vcpu, cr4) &&
@@ -1516,10 +1502,10 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
{
u64 fixed = DR6_FIXED_1;
- if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM))
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_RTM))
fixed |= DR6_RTM;
- if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT))
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT))
fixed |= DR6_BUS_LOCK;
return fixed;
}
@@ -1695,20 +1681,20 @@ static int do_get_feature_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
{
- if (efer & EFER_AUTOIBRS && !guest_cpuid_has(vcpu, X86_FEATURE_AUTOIBRS))
+ if (efer & EFER_AUTOIBRS && !guest_cpu_cap_has(vcpu, X86_FEATURE_AUTOIBRS))
return false;
- if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
+ if (efer & EFER_FFXSR && !guest_cpu_cap_has(vcpu, X86_FEATURE_FXSR_OPT))
return false;
- if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
+ if (efer & EFER_SVME && !guest_cpu_cap_has(vcpu, X86_FEATURE_SVM))
return false;
if (efer & (EFER_LME | EFER_LMA) &&
- !guest_cpuid_has(vcpu, X86_FEATURE_LM))
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_LM))
return false;
- if (efer & EFER_NX && !guest_cpuid_has(vcpu, X86_FEATURE_NX))
+ if (efer & EFER_NX && !guest_cpu_cap_has(vcpu, X86_FEATURE_NX))
return false;
return true;
@@ -1850,8 +1836,8 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
return 1;
if (!host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
- !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP) &&
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_RDPID))
return 1;
/*
@@ -1908,8 +1894,8 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
return 1;
if (!host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
- !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP) &&
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_RDPID))
return 1;
break;
}
@@ -2095,7 +2081,7 @@ EXPORT_SYMBOL_GPL(kvm_handle_invalid_op);
static int kvm_emulate_monitor_mwait(struct kvm_vcpu *vcpu, const char *insn)
{
if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS) &&
- !guest_cpuid_has(vcpu, X86_FEATURE_MWAIT))
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_MWAIT))
return kvm_handle_invalid_op(vcpu);
pr_warn_once("%s instruction emulated as NOP!\n", insn);
@@ -3767,13 +3753,13 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_IA32_ARCH_CAPABILITIES:
if (!msr_info->host_initiated ||
- !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
return KVM_MSR_RET_UNSUPPORTED;
vcpu->arch.arch_capabilities = data;
break;
case MSR_IA32_PERF_CAPABILITIES:
if (!msr_info->host_initiated ||
- !guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_PDCM))
return KVM_MSR_RET_UNSUPPORTED;
if (data & ~kvm_caps.supported_perf_cap)
@@ -3797,11 +3783,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if ((!guest_has_pred_cmd_msr(vcpu)))
return 1;
- if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
- !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_AMD_IBPB))
reserved_bits |= PRED_CMD_IBPB;
- if (!guest_cpuid_has(vcpu, X86_FEATURE_SBPB))
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SBPB))
reserved_bits |= PRED_CMD_SBPB;
}
@@ -3822,7 +3808,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
}
case MSR_IA32_FLUSH_CMD:
if (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D))
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_FLUSH_L1D))
return 1;
if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D) || (data & ~L1D_FLUSH))
@@ -3873,7 +3859,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
kvm_set_lapic_tscdeadline_msr(vcpu, data);
break;
case MSR_IA32_TSC_ADJUST:
- if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) {
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_TSC_ADJUST)) {
if (!msr_info->host_initiated) {
s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
adjust_tsc_offset_guest(vcpu, adj);
@@ -3900,7 +3886,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) &&
((old_val ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) {
- if (!guest_cpuid_has(vcpu, X86_FEATURE_XMM3))
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_XMM3))
return 1;
vcpu->arch.ia32_misc_enable_msr = data;
kvm_update_cpuid_runtime(vcpu);
@@ -4077,12 +4063,12 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
kvm_pr_unimpl_wrmsr(vcpu, msr, data);
break;
case MSR_AMD64_OSVW_ID_LENGTH:
- if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_OSVW))
return 1;
vcpu->arch.osvw.length = data;
break;
case MSR_AMD64_OSVW_STATUS:
- if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_OSVW))
return 1;
vcpu->arch.osvw.status = data;
break;
@@ -4101,7 +4087,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
#ifdef CONFIG_X86_64
case MSR_IA32_XFD:
if (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_XFD))
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_XFD))
return 1;
if (data & ~kvm_guest_supported_xfd(vcpu))
@@ -4111,7 +4097,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_IA32_XFD_ERR:
if (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_XFD))
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_XFD))
return 1;
if (data & ~kvm_guest_supported_xfd(vcpu))
@@ -4226,12 +4212,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = vcpu->arch.microcode_version;
break;
case MSR_IA32_ARCH_CAPABILITIES:
- if (!guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
return KVM_MSR_RET_UNSUPPORTED;
msr_info->data = vcpu->arch.arch_capabilities;
break;
case MSR_IA32_PERF_CAPABILITIES:
- if (!guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_PDCM))
return KVM_MSR_RET_UNSUPPORTED;
msr_info->data = vcpu->arch.perf_capabilities;
break;
@@ -4432,12 +4418,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = 0xbe702111;
break;
case MSR_AMD64_OSVW_ID_LENGTH:
- if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_OSVW))
return 1;
msr_info->data = vcpu->arch.osvw.length;
break;
case MSR_AMD64_OSVW_STATUS:
- if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_OSVW))
return 1;
msr_info->data = vcpu->arch.osvw.status;
break;
@@ -4456,14 +4442,14 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
#ifdef CONFIG_X86_64
case MSR_IA32_XFD:
if (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_XFD))
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_XFD))
return 1;
msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd;
break;
case MSR_IA32_XFD_ERR:
if (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_XFD))
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_XFD))
return 1;
msr_info->data = vcpu->arch.guest_fpu.xfd_err;
@@ -4545,6 +4531,20 @@ static inline bool kvm_can_mwait_in_guest(void)
boot_cpu_has(X86_FEATURE_ARAT);
}
+static u64 kvm_get_allowed_disable_exits(void)
+{
+ u64 r = KVM_X86_DISABLE_EXITS_PAUSE;
+
+ if (!mitigate_smt_rsb) {
+ r |= KVM_X86_DISABLE_EXITS_HLT |
+ KVM_X86_DISABLE_EXITS_CSTATE;
+
+ if (kvm_can_mwait_in_guest())
+ r |= KVM_X86_DISABLE_EXITS_MWAIT;
+ }
+ return r;
+}
+
#ifdef CONFIG_KVM_HYPERV
static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu,
struct kvm_cpuid2 __user *cpuid_arg)
@@ -4687,15 +4687,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = KVM_CLOCK_VALID_FLAGS;
break;
case KVM_CAP_X86_DISABLE_EXITS:
- r = KVM_X86_DISABLE_EXITS_PAUSE;
-
- if (!mitigate_smt_rsb) {
- r |= KVM_X86_DISABLE_EXITS_HLT |
- KVM_X86_DISABLE_EXITS_CSTATE;
-
- if (kvm_can_mwait_in_guest())
- r |= KVM_X86_DISABLE_EXITS_MWAIT;
- }
+ r = kvm_get_allowed_disable_exits();
break;
case KVM_CAP_X86_SMM:
if (!IS_ENABLED(CONFIG_KVM_SMM))
@@ -5822,9 +5814,6 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
vcpu->arch.pv_cpuid.enforce = cap->args[0];
- if (vcpu->arch.pv_cpuid.enforce)
- kvm_update_pv_runtime(vcpu);
-
return 0;
default:
return -EINVAL;
@@ -6542,30 +6531,32 @@ split_irqchip_unlock:
break;
case KVM_CAP_X86_DISABLE_EXITS:
r = -EINVAL;
- if (cap->args[0] & ~KVM_X86_DISABLE_VALID_EXITS)
+ if (cap->args[0] & ~kvm_get_allowed_disable_exits())
break;
- if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE)
- kvm->arch.pause_in_guest = true;
+ mutex_lock(&kvm->lock);
+ if (kvm->created_vcpus)
+ goto disable_exits_unlock;
#define SMT_RSB_MSG "This processor is affected by the Cross-Thread Return Predictions vulnerability. " \
"KVM_CAP_X86_DISABLE_EXITS should only be used with SMT disabled or trusted guests."
- if (!mitigate_smt_rsb) {
- if (boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible() &&
- (cap->args[0] & ~KVM_X86_DISABLE_EXITS_PAUSE))
- pr_warn_once(SMT_RSB_MSG);
-
- if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) &&
- kvm_can_mwait_in_guest())
- kvm->arch.mwait_in_guest = true;
- if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT)
- kvm->arch.hlt_in_guest = true;
- if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE)
- kvm->arch.cstate_in_guest = true;
- }
+ if (!mitigate_smt_rsb && boot_cpu_has_bug(X86_BUG_SMT_RSB) &&
+ cpu_smt_possible() &&
+ (cap->args[0] & ~KVM_X86_DISABLE_EXITS_PAUSE))
+ pr_warn_once(SMT_RSB_MSG);
+ if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE)
+ kvm->arch.pause_in_guest = true;
+ if (cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT)
+ kvm->arch.mwait_in_guest = true;
+ if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT)
+ kvm->arch.hlt_in_guest = true;
+ if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE)
+ kvm->arch.cstate_in_guest = true;
r = 0;
+disable_exits_unlock:
+ mutex_unlock(&kvm->lock);
break;
case KVM_CAP_MSR_PLATFORM_INFO:
kvm->arch.guest_can_read_msr_platform_info = cap->args[0];
@@ -8511,17 +8502,17 @@ static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
static bool emulator_guest_has_movbe(struct x86_emulate_ctxt *ctxt)
{
- return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_MOVBE);
+ return guest_cpu_cap_has(emul_to_vcpu(ctxt), X86_FEATURE_MOVBE);
}
static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt)
{
- return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR);
+ return guest_cpu_cap_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR);
}
static bool emulator_guest_has_rdpid(struct x86_emulate_ctxt *ctxt)
{
- return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_RDPID);
+ return guest_cpu_cap_has(emul_to_vcpu(ctxt), X86_FEATURE_RDPID);
}
static bool emulator_guest_cpuid_is_intel_compatible(struct x86_emulate_ctxt *ctxt)
@@ -8813,6 +8804,28 @@ void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_prepare_emulation_failure_exit);
+void kvm_prepare_event_vectoring_exit(struct kvm_vcpu *vcpu, gpa_t gpa)
+{
+ u32 reason, intr_info, error_code;
+ struct kvm_run *run = vcpu->run;
+ u64 info1, info2;
+ int ndata = 0;
+
+ kvm_x86_call(get_exit_info)(vcpu, &reason, &info1, &info2,
+ &intr_info, &error_code);
+
+ run->internal.data[ndata++] = info2;
+ run->internal.data[ndata++] = reason;
+ run->internal.data[ndata++] = info1;
+ run->internal.data[ndata++] = gpa;
+ run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu;
+
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
+ run->internal.ndata = ndata;
+}
+EXPORT_SYMBOL_GPL(kvm_prepare_event_vectoring_exit);
+
static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
{
struct kvm *kvm = vcpu->kvm;
@@ -9085,6 +9098,15 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
if (r == X86EMUL_RETRY_INSTR || r == X86EMUL_PROPAGATE_FAULT)
return 1;
+ if (kvm_unprotect_and_retry_on_failure(vcpu, cr2_or_gpa,
+ emulation_type))
+ return 1;
+
+ if (r == X86EMUL_UNHANDLEABLE_VECTORING) {
+ kvm_prepare_event_vectoring_exit(vcpu, cr2_or_gpa);
+ return 0;
+ }
+
WARN_ON_ONCE(r != X86EMUL_UNHANDLEABLE);
return handle_emulation_failure(vcpu, emulation_type);
}
@@ -9773,10 +9795,6 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
kvm_caps.supported_xss = 0;
-#define __kvm_cpu_cap_has(UNUSED_, f) kvm_cpu_cap_has(f)
- cr4_reserved_bits = __cr4_reserved_bits(__kvm_cpu_cap_has, UNUSED_);
-#undef __kvm_cpu_cap_has
-
if (kvm_caps.has_tsc_control) {
/*
* Make sure the user can only configure tsc_khz values that
@@ -9979,17 +9997,19 @@ static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
if (!is_64_bit_hypercall(vcpu))
ret = (u32)ret;
kvm_rax_write(vcpu, ret);
- ++vcpu->stat.hypercalls;
return kvm_skip_emulated_instruction(vcpu);
}
-unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
- unsigned long a0, unsigned long a1,
- unsigned long a2, unsigned long a3,
- int op_64_bit, int cpl)
+int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
+ unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ int op_64_bit, int cpl,
+ int (*complete_hypercall)(struct kvm_vcpu *))
{
unsigned long ret;
+ ++vcpu->stat.hypercalls;
+
trace_kvm_hypercall(nr, a0, a1, a2, a3);
if (!op_64_bit) {
@@ -10041,7 +10061,7 @@ unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
u64 gpa = a0, npages = a1, attrs = a2;
ret = -KVM_ENOSYS;
- if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE)))
+ if (!user_exit_on_hypercall(vcpu->kvm, KVM_HC_MAP_GPA_RANGE))
break;
if (!PAGE_ALIGNED(gpa) || !npages ||
@@ -10052,6 +10072,13 @@ unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE;
+ /*
+ * In principle this should have been -KVM_ENOSYS, but userspace (QEMU <=9.2)
+ * assumed that vcpu->run->hypercall.ret is never changed by KVM and thus that
+ * it was always zero on KVM_EXIT_HYPERCALL. Since KVM is now overwriting
+ * vcpu->run->hypercall.ret, ensuring that it is zero to not break QEMU.
+ */
+ vcpu->run->hypercall.ret = 0;
vcpu->run->hypercall.args[0] = gpa;
vcpu->run->hypercall.args[1] = npages;
vcpu->run->hypercall.args[2] = attrs;
@@ -10060,8 +10087,7 @@ unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
vcpu->run->hypercall.flags |= KVM_EXIT_HYPERCALL_LONG_MODE;
WARN_ON_ONCE(vcpu->run->hypercall.flags & KVM_EXIT_HYPERCALL_MBZ);
- vcpu->arch.complete_userspace_io = complete_hypercall_exit;
- /* stat is incremented on completion. */
+ vcpu->arch.complete_userspace_io = complete_hypercall;
return 0;
}
default:
@@ -10070,41 +10096,23 @@ unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
}
out:
- ++vcpu->stat.hypercalls;
- return ret;
+ vcpu->run->hypercall.ret = ret;
+ return 1;
}
-EXPORT_SYMBOL_GPL(__kvm_emulate_hypercall);
+EXPORT_SYMBOL_GPL(____kvm_emulate_hypercall);
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{
- unsigned long nr, a0, a1, a2, a3, ret;
- int op_64_bit;
- int cpl;
-
if (kvm_xen_hypercall_enabled(vcpu->kvm))
return kvm_xen_hypercall(vcpu);
if (kvm_hv_hypercall_enabled(vcpu))
return kvm_hv_hypercall(vcpu);
- nr = kvm_rax_read(vcpu);
- a0 = kvm_rbx_read(vcpu);
- a1 = kvm_rcx_read(vcpu);
- a2 = kvm_rdx_read(vcpu);
- a3 = kvm_rsi_read(vcpu);
- op_64_bit = is_64_bit_hypercall(vcpu);
- cpl = kvm_x86_call(get_cpl)(vcpu);
-
- ret = __kvm_emulate_hypercall(vcpu, nr, a0, a1, a2, a3, op_64_bit, cpl);
- if (nr == KVM_HC_MAP_GPA_RANGE && !ret)
- /* MAP_GPA tosses the request to the user space. */
- return 0;
-
- if (!op_64_bit)
- ret = (u32)ret;
- kvm_rax_write(vcpu, ret);
-
- return kvm_skip_emulated_instruction(vcpu);
+ return __kvm_emulate_hypercall(vcpu, rax, rbx, rcx, rdx, rsi,
+ is_64_bit_hypercall(vcpu),
+ kvm_x86_call(get_cpl)(vcpu),
+ complete_hypercall_exit);
}
EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
@@ -11463,6 +11471,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
struct kvm_run *kvm_run = vcpu->run;
int r;
+ r = kvm_mmu_post_init_vm(vcpu->kvm);
+ if (r)
+ return r;
+
vcpu_load(vcpu);
kvm_sigset_activate(vcpu);
kvm_run->flags = 0;
@@ -12276,9 +12288,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
goto free_emulate_ctxt;
}
- vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
- vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
-
kvm_async_pf_hash_reset(vcpu);
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS)) {
@@ -12301,6 +12310,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
kvm_xen_init_vcpu(vcpu);
vcpu_load(vcpu);
+ kvm_vcpu_after_set_cpuid(vcpu);
kvm_set_tsc_khz(vcpu, vcpu->kvm->arch.default_tsc_khz);
kvm_vcpu_reset(vcpu, false);
kvm_init_mmu(vcpu);
@@ -12731,6 +12741,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
"does not run without ignore_msrs=1, please report it to kvm@vger.kernel.org.\n");
}
+ once_init(&kvm->arch.nx_once);
return 0;
out_uninit_mmu:
@@ -12740,11 +12751,6 @@ out:
return ret;
}
-int kvm_arch_post_init_vm(struct kvm *kvm)
-{
- return kvm_mmu_post_init_vm(kvm);
-}
-
static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
{
vcpu_load(vcpu);
@@ -12800,7 +12806,8 @@ void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
struct kvm_memslots *slots = kvm_memslots(kvm);
struct kvm_memory_slot *slot;
- /* Called with kvm->slots_lock held. */
+ lockdep_assert_held(&kvm->slots_lock);
+
if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
return ERR_PTR_USR(-EINVAL);
@@ -12833,7 +12840,7 @@ void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
m.guest_phys_addr = gpa;
m.userspace_addr = hva;
m.memory_size = size;
- r = __kvm_set_memory_region(kvm, &m);
+ r = kvm_set_internal_memslot(kvm, &m);
if (r < 0)
return ERR_PTR_USR(r);
}
@@ -12934,7 +12941,7 @@ static int kvm_alloc_memslot_metadata(struct kvm *kvm,
/*
* Clear out the previous array pointers for the KVM_MR_MOVE case. The
- * old arrays will be freed by __kvm_set_memory_region() if installing
+ * old arrays will be freed by kvm_set_memory_region() if installing
* the new memslot is successful.
*/
memset(&slot->arch, 0, sizeof(slot->arch));
@@ -13027,6 +13034,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
if ((new->base_gfn + new->npages - 1) > kvm_mmu_max_gfn())
return -EINVAL;
+ if (kvm_is_gfn_alias(kvm, new->base_gfn + new->npages - 1))
+ return -EINVAL;
+
return kvm_alloc_memslot_metadata(kvm, new);
}
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index ec623d23d13d..91e50a513100 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -550,7 +550,6 @@ static inline void kvm_machine_check(void)
void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
int kvm_spec_ctrl_test_value(u64 value);
-bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
struct x86_exception *e);
int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
@@ -577,6 +576,11 @@ enum kvm_msr_access {
#define KVM_MSR_RET_UNSUPPORTED 2
#define KVM_MSR_RET_FILTERED 3
+static inline bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+{
+ return !(cr4 & vcpu->arch.cr4_guest_rsvd_bits);
+}
+
#define __cr4_reserved_bits(__cpu_has, __c) \
({ \
u64 __reserved_bits = CR4_RESERVED_BITS; \
@@ -612,4 +616,32 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
unsigned int port, void *data, unsigned int count,
int in);
+static inline bool user_exit_on_hypercall(struct kvm *kvm, unsigned long hc_nr)
+{
+ return kvm->arch.hypercall_exit_enabled & BIT(hc_nr);
+}
+
+int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
+ unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ int op_64_bit, int cpl,
+ int (*complete_hypercall)(struct kvm_vcpu *));
+
+#define __kvm_emulate_hypercall(_vcpu, nr, a0, a1, a2, a3, op_64_bit, cpl, complete_hypercall) \
+({ \
+ int __ret; \
+ \
+ __ret = ____kvm_emulate_hypercall(_vcpu, \
+ kvm_##nr##_read(_vcpu), kvm_##a0##_read(_vcpu), \
+ kvm_##a1##_read(_vcpu), kvm_##a2##_read(_vcpu), \
+ kvm_##a3##_read(_vcpu), op_64_bit, cpl, \
+ complete_hypercall); \
+ \
+ if (__ret > 0) \
+ __ret = complete_hypercall(_vcpu); \
+ __ret; \
+})
+
+int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
+
#endif
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 98583a9dbab3..8a59c61624c2 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -38,6 +38,13 @@ lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
lib-$(CONFIG_MITIGATION_RETPOLINE) += retpoline.o
+obj-$(CONFIG_CRC32_ARCH) += crc32-x86.o
+crc32-x86-y := crc32-glue.o crc32-pclmul.o
+crc32-x86-$(CONFIG_64BIT) += crc32c-3way.o
+
+obj-$(CONFIG_CRC_T10DIF_ARCH) += crc-t10dif-x86.o
+crc-t10dif-x86-y := crc-t10dif-glue.o crct10dif-pcl-asm_64.o
+
obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
obj-y += iomem.o
diff --git a/arch/x86/lib/crc-t10dif-glue.c b/arch/x86/lib/crc-t10dif-glue.c
new file mode 100644
index 000000000000..13f07ddc9122
--- /dev/null
+++ b/arch/x86/lib/crc-t10dif-glue.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * CRC-T10DIF using PCLMULQDQ instructions
+ *
+ * Copyright 2024 Google LLC
+ */
+
+#include <asm/cpufeatures.h>
+#include <asm/simd.h>
+#include <crypto/internal/simd.h>
+#include <linux/crc-t10dif.h>
+#include <linux/module.h>
+
+static DEFINE_STATIC_KEY_FALSE(have_pclmulqdq);
+
+asmlinkage u16 crc_t10dif_pcl(u16 init_crc, const u8 *buf, size_t len);
+
+u16 crc_t10dif_arch(u16 crc, const u8 *p, size_t len)
+{
+ if (len >= 16 &&
+ static_key_enabled(&have_pclmulqdq) && crypto_simd_usable()) {
+ kernel_fpu_begin();
+ crc = crc_t10dif_pcl(crc, p, len);
+ kernel_fpu_end();
+ return crc;
+ }
+ return crc_t10dif_generic(crc, p, len);
+}
+EXPORT_SYMBOL(crc_t10dif_arch);
+
+static int __init crc_t10dif_x86_init(void)
+{
+ if (boot_cpu_has(X86_FEATURE_PCLMULQDQ))
+ static_branch_enable(&have_pclmulqdq);
+ return 0;
+}
+arch_initcall(crc_t10dif_x86_init);
+
+static void __exit crc_t10dif_x86_exit(void)
+{
+}
+module_exit(crc_t10dif_x86_exit);
+
+bool crc_t10dif_is_optimized(void)
+{
+ return static_key_enabled(&have_pclmulqdq);
+}
+EXPORT_SYMBOL(crc_t10dif_is_optimized);
+
+MODULE_DESCRIPTION("CRC-T10DIF using PCLMULQDQ instructions");
+MODULE_LICENSE("GPL");
diff --git a/arch/x86/lib/crc32-glue.c b/arch/x86/lib/crc32-glue.c
new file mode 100644
index 000000000000..2dd18a886ded
--- /dev/null
+++ b/arch/x86/lib/crc32-glue.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * x86-optimized CRC32 functions
+ *
+ * Copyright (C) 2008 Intel Corporation
+ * Copyright 2012 Xyratex Technology Limited
+ * Copyright 2024 Google LLC
+ */
+
+#include <asm/cpufeatures.h>
+#include <asm/simd.h>
+#include <crypto/internal/simd.h>
+#include <linux/crc32.h>
+#include <linux/linkage.h>
+#include <linux/module.h>
+
+/* minimum size of buffer for crc32_pclmul_le_16 */
+#define CRC32_PCLMUL_MIN_LEN 64
+
+static DEFINE_STATIC_KEY_FALSE(have_crc32);
+static DEFINE_STATIC_KEY_FALSE(have_pclmulqdq);
+
+u32 crc32_pclmul_le_16(u32 crc, const u8 *buffer, size_t len);
+
+u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
+{
+ if (len >= CRC32_PCLMUL_MIN_LEN + 15 &&
+ static_branch_likely(&have_pclmulqdq) && crypto_simd_usable()) {
+ size_t n = -(uintptr_t)p & 15;
+
+ /* align p to 16-byte boundary */
+ if (n) {
+ crc = crc32_le_base(crc, p, n);
+ p += n;
+ len -= n;
+ }
+ n = round_down(len, 16);
+ kernel_fpu_begin();
+ crc = crc32_pclmul_le_16(crc, p, n);
+ kernel_fpu_end();
+ p += n;
+ len -= n;
+ }
+ if (len)
+ crc = crc32_le_base(crc, p, len);
+ return crc;
+}
+EXPORT_SYMBOL(crc32_le_arch);
+
+#ifdef CONFIG_X86_64
+#define CRC32_INST "crc32q %1, %q0"
+#else
+#define CRC32_INST "crc32l %1, %0"
+#endif
+
+/*
+ * Use carryless multiply version of crc32c when buffer size is >= 512 to
+ * account for FPU state save/restore overhead.
+ */
+#define CRC32C_PCLMUL_BREAKEVEN 512
+
+asmlinkage u32 crc32c_x86_3way(u32 crc, const u8 *buffer, size_t len);
+
+u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len)
+{
+ size_t num_longs;
+
+ if (!static_branch_likely(&have_crc32))
+ return crc32c_le_base(crc, p, len);
+
+ if (IS_ENABLED(CONFIG_X86_64) && len >= CRC32C_PCLMUL_BREAKEVEN &&
+ static_branch_likely(&have_pclmulqdq) && crypto_simd_usable()) {
+ kernel_fpu_begin();
+ crc = crc32c_x86_3way(crc, p, len);
+ kernel_fpu_end();
+ return crc;
+ }
+
+ for (num_longs = len / sizeof(unsigned long);
+ num_longs != 0; num_longs--, p += sizeof(unsigned long))
+ asm(CRC32_INST : "+r" (crc) : "rm" (*(unsigned long *)p));
+
+ for (len %= sizeof(unsigned long); len; len--, p++)
+ asm("crc32b %1, %0" : "+r" (crc) : "rm" (*p));
+
+ return crc;
+}
+EXPORT_SYMBOL(crc32c_le_arch);
+
+u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
+{
+ return crc32_be_base(crc, p, len);
+}
+EXPORT_SYMBOL(crc32_be_arch);
+
+static int __init crc32_x86_init(void)
+{
+ if (boot_cpu_has(X86_FEATURE_XMM4_2))
+ static_branch_enable(&have_crc32);
+ if (boot_cpu_has(X86_FEATURE_PCLMULQDQ))
+ static_branch_enable(&have_pclmulqdq);
+ return 0;
+}
+arch_initcall(crc32_x86_init);
+
+static void __exit crc32_x86_exit(void)
+{
+}
+module_exit(crc32_x86_exit);
+
+u32 crc32_optimizations(void)
+{
+ u32 optimizations = 0;
+
+ if (static_key_enabled(&have_crc32))
+ optimizations |= CRC32C_OPTIMIZATION;
+ if (static_key_enabled(&have_pclmulqdq))
+ optimizations |= CRC32_LE_OPTIMIZATION;
+ return optimizations;
+}
+EXPORT_SYMBOL(crc32_optimizations);
+
+MODULE_DESCRIPTION("x86-optimized CRC32 functions");
+MODULE_LICENSE("GPL");
diff --git a/arch/x86/crypto/crc32-pclmul_asm.S b/arch/x86/lib/crc32-pclmul.S
index 5d31137e2c7d..f9637789cac1 100644
--- a/arch/x86/crypto/crc32-pclmul_asm.S
+++ b/arch/x86/lib/crc32-pclmul.S
@@ -58,13 +58,13 @@
#define CONSTANT %xmm0
#ifdef __x86_64__
-#define BUF %rdi
-#define LEN %rsi
-#define CRC %edx
+#define CRC %edi
+#define BUF %rsi
+#define LEN %rdx
#else
-#define BUF %eax
-#define LEN %edx
-#define CRC %ecx
+#define CRC %eax
+#define BUF %edx
+#define LEN %ecx
#endif
@@ -72,12 +72,11 @@
.text
/**
* Calculate crc32
- * BUF - buffer (16 bytes aligned)
- * LEN - sizeof buffer (16 bytes aligned), LEN should be grater than 63
* CRC - initial crc32
+ * BUF - buffer (16 bytes aligned)
+ * LEN - sizeof buffer (16 bytes aligned), LEN should be greater than 63
* return %eax crc32
- * uint crc32_pclmul_le_16(unsigned char const *buffer,
- * size_t len, uint crc32)
+ * u32 crc32_pclmul_le_16(u32 crc, const u8 *buffer, size_t len);
*/
SYM_FUNC_START(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/lib/crc32c-3way.S
index 752812bc4991..9b8770503bbc 100644
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/lib/crc32c-3way.S
@@ -52,15 +52,16 @@
# regular CRC code that does not interleave the CRC instructions.
#define SMALL_SIZE 200
-# unsigned int crc_pcl(const u8 *buffer, unsigned int len, unsigned int crc_init);
+# u32 crc32c_x86_3way(u32 crc, const u8 *buffer, size_t len);
.text
-SYM_FUNC_START(crc_pcl)
-#define bufp %rdi
-#define bufp_d %edi
-#define len %esi
-#define crc_init %edx
-#define crc_init_q %rdx
+SYM_FUNC_START(crc32c_x86_3way)
+#define crc0 %edi
+#define crc0_q %rdi
+#define bufp %rsi
+#define bufp_d %esi
+#define len %rdx
+#define len_dw %edx
#define n_misaligned %ecx /* overlaps chunk_bytes! */
#define n_misaligned_q %rcx
#define chunk_bytes %ecx /* overlaps n_misaligned! */
@@ -85,9 +86,9 @@ SYM_FUNC_START(crc_pcl)
.Ldo_align:
movq (bufp), %rax
add n_misaligned_q, bufp
- sub n_misaligned, len
+ sub n_misaligned_q, len
.Lalign_loop:
- crc32b %al, crc_init # compute crc32 of 1-byte
+ crc32b %al, crc0 # compute crc32 of 1-byte
shr $8, %rax # get next byte
dec n_misaligned
jne .Lalign_loop
@@ -102,7 +103,7 @@ SYM_FUNC_START(crc_pcl)
.Lpartial_block:
# Compute floor(len / 24) to get num qwords to process from each lane.
- imul $2731, len, %eax # 2731 = ceil(2^16 / 24)
+ imul $2731, len_dw, %eax # 2731 = ceil(2^16 / 24)
shr $16, %eax
jmp .Lcrc_3lanes
@@ -125,16 +126,16 @@ SYM_FUNC_START(crc_pcl)
# Unroll the loop by a factor of 4 to reduce the overhead of the loop
# bookkeeping instructions, which can compete with crc32q for the ALUs.
.Lcrc_3lanes_4x_loop:
- crc32q (bufp), crc_init_q
+ crc32q (bufp), crc0_q
crc32q (bufp,chunk_bytes_q), crc1
crc32q (bufp,chunk_bytes_q,2), crc2
- crc32q 8(bufp), crc_init_q
+ crc32q 8(bufp), crc0_q
crc32q 8(bufp,chunk_bytes_q), crc1
crc32q 8(bufp,chunk_bytes_q,2), crc2
- crc32q 16(bufp), crc_init_q
+ crc32q 16(bufp), crc0_q
crc32q 16(bufp,chunk_bytes_q), crc1
crc32q 16(bufp,chunk_bytes_q,2), crc2
- crc32q 24(bufp), crc_init_q
+ crc32q 24(bufp), crc0_q
crc32q 24(bufp,chunk_bytes_q), crc1
crc32q 24(bufp,chunk_bytes_q,2), crc2
add $32, bufp
@@ -146,7 +147,7 @@ SYM_FUNC_START(crc_pcl)
jz .Lcrc_3lanes_last_qword
.Lcrc_3lanes_1x_loop:
- crc32q (bufp), crc_init_q
+ crc32q (bufp), crc0_q
crc32q (bufp,chunk_bytes_q), crc1
crc32q (bufp,chunk_bytes_q,2), crc2
add $8, bufp
@@ -154,7 +155,7 @@ SYM_FUNC_START(crc_pcl)
jnz .Lcrc_3lanes_1x_loop
.Lcrc_3lanes_last_qword:
- crc32q (bufp), crc_init_q
+ crc32q (bufp), crc0_q
crc32q (bufp,chunk_bytes_q), crc1
# SKIP crc32q (bufp,chunk_bytes_q,2), crc2 ; Don't do this one yet
@@ -165,9 +166,9 @@ SYM_FUNC_START(crc_pcl)
lea (K_table-8)(%rip), %rax # first entry is for idx 1
pmovzxdq (%rax,chunk_bytes_q), %xmm0 # 2 consts: K1:K2
lea (chunk_bytes,chunk_bytes,2), %eax # chunk_bytes * 3
- sub %eax, len # len -= chunk_bytes * 3
+ sub %rax, len # len -= chunk_bytes * 3
- movq crc_init_q, %xmm1 # CRC for block 1
+ movq crc0_q, %xmm1 # CRC for block 1
pclmulqdq $0x00, %xmm0, %xmm1 # Multiply by K2
movq crc1, %xmm2 # CRC for block 2
@@ -176,8 +177,8 @@ SYM_FUNC_START(crc_pcl)
pxor %xmm2,%xmm1
movq %xmm1, %rax
xor (bufp,chunk_bytes_q,2), %rax
- mov crc2, crc_init_q
- crc32 %rax, crc_init_q
+ mov crc2, crc0_q
+ crc32 %rax, crc0_q
lea 8(bufp,chunk_bytes_q,2), bufp
################################################################
@@ -193,34 +194,34 @@ SYM_FUNC_START(crc_pcl)
## 6) Process any remainder without interleaving:
#######################################################################
.Lsmall:
- test len, len
+ test len_dw, len_dw
jz .Ldone
- mov len, %eax
+ mov len_dw, %eax
shr $3, %eax
jz .Ldo_dword
.Ldo_qwords:
- crc32q (bufp), crc_init_q
+ crc32q (bufp), crc0_q
add $8, bufp
dec %eax
jnz .Ldo_qwords
.Ldo_dword:
- test $4, len
+ test $4, len_dw
jz .Ldo_word
- crc32l (bufp), crc_init
+ crc32l (bufp), crc0
add $4, bufp
.Ldo_word:
- test $2, len
+ test $2, len_dw
jz .Ldo_byte
- crc32w (bufp), crc_init
+ crc32w (bufp), crc0
add $2, bufp
.Ldo_byte:
- test $1, len
+ test $1, len_dw
jz .Ldone
- crc32b (bufp), crc_init
+ crc32b (bufp), crc0
.Ldone:
- mov crc_init, %eax
+ mov crc0, %eax
RET
-SYM_FUNC_END(crc_pcl)
+SYM_FUNC_END(crc32c_x86_3way)
.section .rodata, "a", @progbits
################################################################
diff --git a/arch/x86/crypto/crct10dif-pcl-asm_64.S b/arch/x86/lib/crct10dif-pcl-asm_64.S
index 5286db5b8165..5286db5b8165 100644
--- a/arch/x86/crypto/crct10dif-pcl-asm_64.S
+++ b/arch/x86/lib/crct10dif-pcl-asm_64.S
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index ac52255fab01..296d294142c8 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -7,7 +7,6 @@
#include <linux/sched.h> /* test_thread_flag(), ... */
#include <linux/sched/task_stack.h> /* task_stack_*(), ... */
#include <linux/kdebug.h> /* oops_begin/end, ... */
-#include <linux/extable.h> /* search_exception_tables */
#include <linux/memblock.h> /* max_low_pfn */
#include <linux/kfence.h> /* kfence_handle_page_fault */
#include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 95bc50a8541c..ef4514d64c05 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -32,8 +32,6 @@
#include <asm/pgalloc.h>
#include <asm/proto.h>
#include <asm/memtype.h>
-#include <asm/hyperv-tlfs.h>
-#include <asm/mshyperv.h>
#include "../mm_internal.h"
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 5745a354a241..1fef5ad32d5a 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -19,12 +19,23 @@ EXPORT_SYMBOL(physical_mask);
#endif
#ifndef CONFIG_PARAVIRT
+#ifndef CONFIG_PT_RECLAIM
static inline
void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
{
- tlb_remove_page(tlb, table);
+ struct ptdesc *ptdesc = (struct ptdesc *)table;
+
+ pagetable_dtor(ptdesc);
+ tlb_remove_page(tlb, ptdesc_page(ptdesc));
}
-#endif
+#else
+static inline
+void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
+{
+ tlb_remove_table(tlb, table);
+}
+#endif /* !CONFIG_PT_RECLAIM */
+#endif /* !CONFIG_PARAVIRT */
gfp_t __userpte_alloc_gfp = GFP_PGTABLE_USER | PGTABLE_HIGHMEM;
@@ -52,15 +63,13 @@ early_param("userpte", setup_userpte);
void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
{
- pagetable_pte_dtor(page_ptdesc(pte));
paravirt_release_pte(page_to_pfn(pte));
- paravirt_tlb_remove_table(tlb, pte);
+ paravirt_tlb_remove_table(tlb, page_ptdesc(pte));
}
#if CONFIG_PGTABLE_LEVELS > 2
void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
{
- struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
/*
* NOTE! For PAE, any changes to the top page-directory-pointer-table
@@ -69,25 +78,21 @@ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
#ifdef CONFIG_X86_PAE
tlb->need_flush_all = 1;
#endif
- pagetable_pmd_dtor(ptdesc);
- paravirt_tlb_remove_table(tlb, ptdesc_page(ptdesc));
+ paravirt_tlb_remove_table(tlb, virt_to_ptdesc(pmd));
}
#if CONFIG_PGTABLE_LEVELS > 3
void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
{
- struct ptdesc *ptdesc = virt_to_ptdesc(pud);
-
- pagetable_pud_dtor(ptdesc);
paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
- paravirt_tlb_remove_table(tlb, virt_to_page(pud));
+ paravirt_tlb_remove_table(tlb, virt_to_ptdesc(pud));
}
#if CONFIG_PGTABLE_LEVELS > 4
void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d)
{
paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT);
- paravirt_tlb_remove_table(tlb, virt_to_page(p4d));
+ paravirt_tlb_remove_table(tlb, virt_to_ptdesc(p4d));
}
#endif /* CONFIG_PGTABLE_LEVELS > 4 */
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
@@ -222,7 +227,7 @@ static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
if (pmds[i]) {
ptdesc = virt_to_ptdesc(pmds[i]);
- pagetable_pmd_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
pagetable_free(ptdesc);
mm_dec_nr_pmds(mm);
}
@@ -392,15 +397,14 @@ void __init pgtable_cache_init(void)
SLAB_PANIC, NULL);
}
-static inline pgd_t *_pgd_alloc(void)
+static inline pgd_t *_pgd_alloc(struct mm_struct *mm)
{
/*
* If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
* We allocate one page for pgd.
*/
if (!SHARED_KERNEL_PMD)
- return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
- PGD_ALLOCATION_ORDER);
+ return __pgd_alloc(mm, PGD_ALLOCATION_ORDER);
/*
* Now PAE kernel is not running as a Xen domain. We can allocate
@@ -409,24 +413,23 @@ static inline pgd_t *_pgd_alloc(void)
return kmem_cache_alloc(pgd_cache, GFP_PGTABLE_USER);
}
-static inline void _pgd_free(pgd_t *pgd)
+static inline void _pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
if (!SHARED_KERNEL_PMD)
- free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
+ __pgd_free(mm, pgd);
else
kmem_cache_free(pgd_cache, pgd);
}
#else
-static inline pgd_t *_pgd_alloc(void)
+static inline pgd_t *_pgd_alloc(struct mm_struct *mm)
{
- return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
- PGD_ALLOCATION_ORDER);
+ return __pgd_alloc(mm, PGD_ALLOCATION_ORDER);
}
-static inline void _pgd_free(pgd_t *pgd)
+static inline void _pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
+ __pgd_free(mm, pgd);
}
#endif /* CONFIG_X86_PAE */
@@ -436,7 +439,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS];
pmd_t *pmds[MAX_PREALLOCATED_PMDS];
- pgd = _pgd_alloc();
+ pgd = _pgd_alloc(mm);
if (pgd == NULL)
goto out;
@@ -479,7 +482,7 @@ out_free_pmds:
if (sizeof(pmds) != 0)
free_pmds(mm, pmds, PREALLOCATED_PMDS);
out_free_pgd:
- _pgd_free(pgd);
+ _pgd_free(mm, pgd);
out:
return NULL;
}
@@ -489,7 +492,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd_mop_up_pmds(mm, pgd);
pgd_dtor(pgd);
paravirt_pgd_free(mm, pgd);
- _pgd_free(pgd);
+ _pgd_free(mm, pgd);
}
/*
@@ -856,7 +859,7 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr)
free_page((unsigned long)pmd_sv);
- pagetable_pmd_dtor(virt_to_ptdesc(pmd));
+ pagetable_dtor(virt_to_ptdesc(pmd));
free_page((unsigned long)pmd);
return 1;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index a2becb85bea7..6cf881a942bb 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -607,18 +607,15 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
cond_mitigation(tsk);
/*
- * Stop remote flushes for the previous mm.
- * Skip kernel threads; we never send init_mm TLB flushing IPIs,
- * but the bitmap manipulation can cause cache line contention.
+ * Leave this CPU in prev's mm_cpumask. Atomic writes to
+ * mm_cpumask can be expensive under contention. The CPU
+ * will be removed lazily at TLB flush time.
*/
- if (prev != &init_mm) {
- VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu,
- mm_cpumask(prev)));
- cpumask_clear_cpu(cpu, mm_cpumask(prev));
- }
+ VM_WARN_ON_ONCE(prev != &init_mm && !cpumask_test_cpu(cpu,
+ mm_cpumask(prev)));
/* Start receiving IPIs and then read tlb_gen (and LAM below) */
- if (next != &init_mm)
+ if (next != &init_mm && !cpumask_test_cpu(cpu, mm_cpumask(next)))
cpumask_set_cpu(cpu, mm_cpumask(next));
next_tlb_gen = atomic64_read(&next->context.tlb_gen);
@@ -760,10 +757,13 @@ static void flush_tlb_func(void *info)
if (!local) {
inc_irq_stat(irq_tlb_count);
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+ }
- /* Can only happen on remote CPUs */
- if (f->mm && f->mm != loaded_mm)
- return;
+ /* The CPU was left in the mm_cpumask of the target mm. Clear it. */
+ if (f->mm && f->mm != loaded_mm) {
+ cpumask_clear_cpu(raw_smp_processor_id(), mm_cpumask(f->mm));
+ trace_tlb_flush(TLB_REMOTE_WRONG_CPU, 0);
+ return;
}
if (unlikely(loaded_mm == &init_mm))
@@ -893,9 +893,36 @@ done:
nr_invalidate);
}
-static bool tlb_is_not_lazy(int cpu, void *data)
+static bool should_flush_tlb(int cpu, void *data)
+{
+ struct flush_tlb_info *info = data;
+
+ /* Lazy TLB will get flushed at the next context switch. */
+ if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
+ return false;
+
+ /* No mm means kernel memory flush. */
+ if (!info->mm)
+ return true;
+
+ /* The target mm is loaded, and the CPU is not lazy. */
+ if (per_cpu(cpu_tlbstate.loaded_mm, cpu) == info->mm)
+ return true;
+
+ /* In cpumask, but not the loaded mm? Periodically remove by flushing. */
+ if (info->trim_cpumask)
+ return true;
+
+ return false;
+}
+
+static bool should_trim_cpumask(struct mm_struct *mm)
{
- return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
+ if (time_after(jiffies, READ_ONCE(mm->context.next_trim_cpumask))) {
+ WRITE_ONCE(mm->context.next_trim_cpumask, jiffies + HZ);
+ return true;
+ }
+ return false;
}
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
@@ -929,7 +956,7 @@ STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask,
if (info->freed_tables)
on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true);
else
- on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func,
+ on_each_cpu_cond_mask(should_flush_tlb, flush_tlb_func,
(void *)info, 1, cpumask);
}
@@ -980,6 +1007,7 @@ static struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
info->freed_tables = freed_tables;
info->new_tlb_gen = new_tlb_gen;
info->initiating_cpu = smp_processor_id();
+ info->trim_cpumask = 0;
return info;
}
@@ -1022,6 +1050,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
* flush_tlb_func_local() directly in this case.
*/
if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) {
+ info->trim_cpumask = should_trim_cpumask(mm);
flush_tlb_multi(mm_cpumask(mm), info);
} else if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
lockdep_assert_irqs_enabled();
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 592fb9d97e77..efefeb82ab61 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -1010,4 +1010,34 @@ DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1668, amd_rp_pme_suspend);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1668, amd_rp_pme_resume);
DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1669, amd_rp_pme_suspend);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1669, amd_rp_pme_resume);
+
+/*
+ * Putting PCIe root ports on Ryzen SoCs with USB4 controllers into D3hot
+ * may cause problems when the system attempts wake up from s2idle.
+ *
+ * On the TUXEDO Sirius 16 Gen 1 with a specific old BIOS this manifests as
+ * a system hang.
+ */
+static const struct dmi_system_id quirk_tuxeo_rp_d3_dmi_table[] = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "APX958"),
+ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "V1.00A00_20240108"),
+ },
+ },
+ {}
+};
+
+static void quirk_tuxeo_rp_d3(struct pci_dev *pdev)
+{
+ struct pci_dev *root_pdev;
+
+ if (dmi_check_system(quirk_tuxeo_rp_d3_dmi_table)) {
+ root_pdev = pcie_find_root_port(pdev);
+ if (root_pdev)
+ root_pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1502, quirk_tuxeo_rp_d3);
#endif /* CONFIG_SUSPEND */
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index a7ff189421c3..463b784499a8 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -54,13 +54,11 @@
#include <asm/uv/uv.h>
static unsigned long efi_systab_phys __initdata;
-static unsigned long uga_phys = EFI_INVALID_TABLE_ADDR;
static unsigned long efi_runtime, efi_nr_tables;
unsigned long efi_fw_vendor, efi_config_table;
static const efi_config_table_type_t arch_tables[] __initconst = {
- {UGA_IO_PROTOCOL_GUID, &uga_phys, "UGA" },
#ifdef CONFIG_X86_UV
{UV_SYSTEM_TABLE_GUID, &uv_systab_phys, "UVsystab" },
#endif
@@ -72,7 +70,6 @@ static const unsigned long * const efi_tables[] = {
&efi.acpi20,
&efi.smbios,
&efi.smbios3,
- &uga_phys,
#ifdef CONFIG_X86_UV
&uv_systab_phys,
#endif
@@ -891,13 +888,6 @@ bool efi_is_table_address(unsigned long phys_addr)
return false;
}
-char *efi_systab_show_arch(char *str)
-{
- if (uga_phys != EFI_INVALID_TABLE_ADDR)
- str += sprintf(str, "UGA=0x%lx\n", uga_phys);
- return str;
-}
-
#define EFI_FIELD(var) efi_ ## var
#define EFI_ATTR_SHOW(name) \
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 846bf49f2508..553f330198f2 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -561,6 +561,11 @@ int __init efi_reuse_config(u64 tables, int nr_tables)
if (!efi_guidcmp(guid, SMBIOS_TABLE_GUID))
((efi_config_table_64_t *)p)->table = data->smbios;
+
+ /* Do not bother to play with mem attr table across kexec */
+ if (!efi_guidcmp(guid, EFI_MEMORY_ATTRIBUTES_TABLE_GUID))
+ ((efi_config_table_64_t *)p)->table = EFI_INVALID_TABLE_ADDR;
+
p += sz;
}
early_memunmap(tablep, nr_tables * sz);
diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
index 74ebd6882690..cf5dca2dbb91 100644
--- a/arch/x86/platform/olpc/olpc_dt.c
+++ b/arch/x86/platform/olpc/olpc_dt.c
@@ -136,11 +136,7 @@ void * __init prom_early_alloc(unsigned long size)
* fast enough on the platforms we care about while minimizing
* wasted bootmem) and hand off chunks of it to callers.
*/
- res = memblock_alloc(chunk_size, SMP_CACHE_BYTES);
- if (!res)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- chunk_size);
- BUG_ON(!res);
+ res = memblock_alloc_or_panic(chunk_size, SMP_CACHE_BYTES);
prom_early_allocated += chunk_size;
memset(res, 0, chunk_size);
free_mem = chunk_size;
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 27441e5863b2..e937be979ec8 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -841,10 +841,10 @@ static int is_percpu_sym(ElfW(Sym) *sym, const char *symname)
static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
const char *symname)
{
+ int headtext = !strcmp(sec_name(sec->shdr.sh_info), ".head.text");
unsigned r_type = ELF64_R_TYPE(rel->r_info);
ElfW(Addr) offset = rel->r_offset;
int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
-
if (sym->st_shndx == SHN_UNDEF)
return 0;
@@ -900,6 +900,12 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
break;
}
+ if (headtext) {
+ die("Absolute reference to symbol '%s' not permitted in .head.text\n",
+ symname);
+ break;
+ }
+
/*
* Relocation offsets for 64 bit kernels are output
* as 32 bits and sign extended back to 64 bits when
diff --git a/arch/x86/um/asm/archparam.h b/arch/x86/um/asm/archparam.h
deleted file mode 100644
index c17cf68dda0f..000000000000
--- a/arch/x86/um/asm/archparam.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
- * Copyright 2003 PathScale, Inc.
- * Licensed under the GPL
- */
-
-#ifndef __UM_ARCHPARAM_H
-#define __UM_ARCHPARAM_H
-
-#ifdef CONFIG_X86_32
-
-#ifdef CONFIG_X86_PAE
-#define LAST_PKMAP 512
-#else
-#define LAST_PKMAP 1024
-#endif
-
-#endif
-
-#endif
diff --git a/arch/x86/um/shared/sysdep/ptrace.h b/arch/x86/um/shared/sysdep/ptrace.h
index 2dd4ca6713f8..8f7476ff6e95 100644
--- a/arch/x86/um/shared/sysdep/ptrace.h
+++ b/arch/x86/um/shared/sysdep/ptrace.h
@@ -74,8 +74,6 @@ struct uml_pt_regs {
#define UPT_FAULTINFO(r) (&(r)->faultinfo)
#define UPT_IS_USER(r) ((r)->is_user)
-extern int user_context(unsigned long sp);
-
extern int arch_init_registers(int pid);
#endif /* __SYSDEP_X86_PTRACE_H */
diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c
index 4e2b2e2ac9f9..7fdb37387886 100644
--- a/arch/x86/virt/vmx/tdx/tdx.c
+++ b/arch/x86/virt/vmx/tdx/tdx.c
@@ -270,57 +270,15 @@ static int read_sys_metadata_field(u64 field_id, u64 *data)
return 0;
}
-static int read_sys_metadata_field16(u64 field_id,
- int offset,
- struct tdx_tdmr_sysinfo *ts)
-{
- u16 *ts_member = ((void *)ts) + offset;
- u64 tmp;
- int ret;
-
- if (WARN_ON_ONCE(MD_FIELD_ID_ELE_SIZE_CODE(field_id) !=
- MD_FIELD_ID_ELE_SIZE_16BIT))
- return -EINVAL;
-
- ret = read_sys_metadata_field(field_id, &tmp);
- if (ret)
- return ret;
-
- *ts_member = tmp;
-
- return 0;
-}
-
-struct field_mapping {
- u64 field_id;
- int offset;
-};
-
-#define TD_SYSINFO_MAP(_field_id, _offset) \
- { .field_id = MD_FIELD_ID_##_field_id, \
- .offset = offsetof(struct tdx_tdmr_sysinfo, _offset) }
-
-/* Map TD_SYSINFO fields into 'struct tdx_tdmr_sysinfo': */
-static const struct field_mapping fields[] = {
- TD_SYSINFO_MAP(MAX_TDMRS, max_tdmrs),
- TD_SYSINFO_MAP(MAX_RESERVED_PER_TDMR, max_reserved_per_tdmr),
- TD_SYSINFO_MAP(PAMT_4K_ENTRY_SIZE, pamt_entry_size[TDX_PS_4K]),
- TD_SYSINFO_MAP(PAMT_2M_ENTRY_SIZE, pamt_entry_size[TDX_PS_2M]),
- TD_SYSINFO_MAP(PAMT_1G_ENTRY_SIZE, pamt_entry_size[TDX_PS_1G]),
-};
+#include "tdx_global_metadata.c"
-static int get_tdx_tdmr_sysinfo(struct tdx_tdmr_sysinfo *tdmr_sysinfo)
+static int check_features(struct tdx_sys_info *sysinfo)
{
- int ret;
- int i;
+ u64 tdx_features0 = sysinfo->features.tdx_features0;
- /* Populate 'tdmr_sysinfo' fields using the mapping structure above: */
- for (i = 0; i < ARRAY_SIZE(fields); i++) {
- ret = read_sys_metadata_field16(fields[i].field_id,
- fields[i].offset,
- tdmr_sysinfo);
- if (ret)
- return ret;
+ if (!(tdx_features0 & TDX_FEATURES0_NO_RBP_MOD)) {
+ pr_err("frame pointer (RBP) clobber bug present, upgrade TDX module\n");
+ return -EINVAL;
}
return 0;
@@ -342,13 +300,13 @@ static int tdmr_size_single(u16 max_reserved_per_tdmr)
}
static int alloc_tdmr_list(struct tdmr_info_list *tdmr_list,
- struct tdx_tdmr_sysinfo *tdmr_sysinfo)
+ struct tdx_sys_info_tdmr *sysinfo_tdmr)
{
size_t tdmr_sz, tdmr_array_sz;
void *tdmr_array;
- tdmr_sz = tdmr_size_single(tdmr_sysinfo->max_reserved_per_tdmr);
- tdmr_array_sz = tdmr_sz * tdmr_sysinfo->max_tdmrs;
+ tdmr_sz = tdmr_size_single(sysinfo_tdmr->max_reserved_per_tdmr);
+ tdmr_array_sz = tdmr_sz * sysinfo_tdmr->max_tdmrs;
/*
* To keep things simple, allocate all TDMRs together.
@@ -367,7 +325,7 @@ static int alloc_tdmr_list(struct tdmr_info_list *tdmr_list,
* at a given index in the TDMR list.
*/
tdmr_list->tdmr_sz = tdmr_sz;
- tdmr_list->max_tdmrs = tdmr_sysinfo->max_tdmrs;
+ tdmr_list->max_tdmrs = sysinfo_tdmr->max_tdmrs;
tdmr_list->nr_consumed_tdmrs = 0;
return 0;
@@ -921,25 +879,29 @@ static int tdmrs_populate_rsvd_areas_all(struct tdmr_info_list *tdmr_list,
/*
* Construct a list of TDMRs on the preallocated space in @tdmr_list
* to cover all TDX memory regions in @tmb_list based on the TDX module
- * TDMR global information in @tdmr_sysinfo.
+ * TDMR global information in @sysinfo_tdmr.
*/
static int construct_tdmrs(struct list_head *tmb_list,
struct tdmr_info_list *tdmr_list,
- struct tdx_tdmr_sysinfo *tdmr_sysinfo)
+ struct tdx_sys_info_tdmr *sysinfo_tdmr)
{
+ u16 pamt_entry_size[TDX_PS_NR] = {
+ sysinfo_tdmr->pamt_4k_entry_size,
+ sysinfo_tdmr->pamt_2m_entry_size,
+ sysinfo_tdmr->pamt_1g_entry_size,
+ };
int ret;
ret = fill_out_tdmrs(tmb_list, tdmr_list);
if (ret)
return ret;
- ret = tdmrs_set_up_pamt_all(tdmr_list, tmb_list,
- tdmr_sysinfo->pamt_entry_size);
+ ret = tdmrs_set_up_pamt_all(tdmr_list, tmb_list, pamt_entry_size);
if (ret)
return ret;
ret = tdmrs_populate_rsvd_areas_all(tdmr_list, tmb_list,
- tdmr_sysinfo->max_reserved_per_tdmr);
+ sysinfo_tdmr->max_reserved_per_tdmr);
if (ret)
tdmrs_free_pamt_all(tdmr_list);
@@ -1098,9 +1060,18 @@ static int init_tdmrs(struct tdmr_info_list *tdmr_list)
static int init_tdx_module(void)
{
- struct tdx_tdmr_sysinfo tdmr_sysinfo;
+ struct tdx_sys_info sysinfo;
int ret;
+ ret = get_tdx_sys_info(&sysinfo);
+ if (ret)
+ return ret;
+
+ /* Check whether the kernel can support this module */
+ ret = check_features(&sysinfo);
+ if (ret)
+ return ret;
+
/*
* To keep things simple, assume that all TDX-protected memory
* will come from the page allocator. Make sure all pages in the
@@ -1117,17 +1088,13 @@ static int init_tdx_module(void)
if (ret)
goto out_put_tdxmem;
- ret = get_tdx_tdmr_sysinfo(&tdmr_sysinfo);
- if (ret)
- goto err_free_tdxmem;
-
/* Allocate enough space for constructing TDMRs */
- ret = alloc_tdmr_list(&tdx_tdmr_list, &tdmr_sysinfo);
+ ret = alloc_tdmr_list(&tdx_tdmr_list, &sysinfo.tdmr);
if (ret)
goto err_free_tdxmem;
/* Cover all TDX-usable memory regions in TDMRs */
- ret = construct_tdmrs(&tdx_memlist, &tdx_tdmr_list, &tdmr_sysinfo);
+ ret = construct_tdmrs(&tdx_memlist, &tdx_tdmr_list, &sysinfo.tdmr);
if (ret)
goto err_free_tdmrs;
diff --git a/arch/x86/virt/vmx/tdx/tdx.h b/arch/x86/virt/vmx/tdx/tdx.h
index b701f69485d3..4e3d533cdd61 100644
--- a/arch/x86/virt/vmx/tdx/tdx.h
+++ b/arch/x86/virt/vmx/tdx/tdx.h
@@ -3,6 +3,7 @@
#define _X86_VIRT_TDX_H
#include <linux/bits.h>
+#include "tdx_global_metadata.h"
/*
* This file contains both macros and data structures defined by the TDX
@@ -26,35 +27,6 @@
#define PT_NDA 0x0
#define PT_RSVD 0x1
-/*
- * Global scope metadata field ID.
- *
- * See Table "Global Scope Metadata", TDX module 1.5 ABI spec.
- */
-#define MD_FIELD_ID_MAX_TDMRS 0x9100000100000008ULL
-#define MD_FIELD_ID_MAX_RESERVED_PER_TDMR 0x9100000100000009ULL
-#define MD_FIELD_ID_PAMT_4K_ENTRY_SIZE 0x9100000100000010ULL
-#define MD_FIELD_ID_PAMT_2M_ENTRY_SIZE 0x9100000100000011ULL
-#define MD_FIELD_ID_PAMT_1G_ENTRY_SIZE 0x9100000100000012ULL
-
-/*
- * Sub-field definition of metadata field ID.
- *
- * See Table "MD_FIELD_ID (Metadata Field Identifier / Sequence Header)
- * Definition", TDX module 1.5 ABI spec.
- *
- * - Bit 33:32: ELEMENT_SIZE_CODE -- size of a single element of metadata
- *
- * 0: 8 bits
- * 1: 16 bits
- * 2: 32 bits
- * 3: 64 bits
- */
-#define MD_FIELD_ID_ELE_SIZE_CODE(_field_id) \
- (((_field_id) & GENMASK_ULL(33, 32)) >> 32)
-
-#define MD_FIELD_ID_ELE_SIZE_16BIT 1
-
struct tdmr_reserved_area {
u64 offset;
u64 size;
@@ -80,6 +52,9 @@ struct tdmr_info {
DECLARE_FLEX_ARRAY(struct tdmr_reserved_area, reserved_areas);
} __packed __aligned(TDMR_INFO_ALIGNMENT);
+/* Bit definitions of TDX_FEATURES0 metadata field */
+#define TDX_FEATURES0_NO_RBP_MOD BIT(18)
+
/*
* Do not put any hardware-defined TDX structure representations below
* this comment!
@@ -99,13 +74,6 @@ struct tdx_memblock {
int nid;
};
-/* "TDMR info" part of "Global Scope Metadata" for constructing TDMRs */
-struct tdx_tdmr_sysinfo {
- u16 max_tdmrs;
- u16 max_reserved_per_tdmr;
- u16 pamt_entry_size[TDX_PS_NR];
-};
-
/* Warn if kernel has less than TDMR_NR_WARN TDMRs after allocation */
#define TDMR_NR_WARN 4
diff --git a/arch/x86/virt/vmx/tdx/tdx_global_metadata.c b/arch/x86/virt/vmx/tdx/tdx_global_metadata.c
new file mode 100644
index 000000000000..8027a24d1c6e
--- /dev/null
+++ b/arch/x86/virt/vmx/tdx/tdx_global_metadata.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Automatically generated functions to read TDX global metadata.
+ *
+ * This file doesn't compile on its own as it lacks of inclusion
+ * of SEAMCALL wrapper primitive which reads global metadata.
+ * Include this file to other C file instead.
+ */
+
+static int get_tdx_sys_info_features(struct tdx_sys_info_features *sysinfo_features)
+{
+ int ret = 0;
+ u64 val;
+
+ if (!ret && !(ret = read_sys_metadata_field(0x0A00000300000008, &val)))
+ sysinfo_features->tdx_features0 = val;
+
+ return ret;
+}
+
+static int get_tdx_sys_info_tdmr(struct tdx_sys_info_tdmr *sysinfo_tdmr)
+{
+ int ret = 0;
+ u64 val;
+
+ if (!ret && !(ret = read_sys_metadata_field(0x9100000100000008, &val)))
+ sysinfo_tdmr->max_tdmrs = val;
+ if (!ret && !(ret = read_sys_metadata_field(0x9100000100000009, &val)))
+ sysinfo_tdmr->max_reserved_per_tdmr = val;
+ if (!ret && !(ret = read_sys_metadata_field(0x9100000100000010, &val)))
+ sysinfo_tdmr->pamt_4k_entry_size = val;
+ if (!ret && !(ret = read_sys_metadata_field(0x9100000100000011, &val)))
+ sysinfo_tdmr->pamt_2m_entry_size = val;
+ if (!ret && !(ret = read_sys_metadata_field(0x9100000100000012, &val)))
+ sysinfo_tdmr->pamt_1g_entry_size = val;
+
+ return ret;
+}
+
+static int get_tdx_sys_info(struct tdx_sys_info *sysinfo)
+{
+ int ret = 0;
+
+ ret = ret ?: get_tdx_sys_info_features(&sysinfo->features);
+ ret = ret ?: get_tdx_sys_info_tdmr(&sysinfo->tdmr);
+
+ return ret;
+}
diff --git a/arch/x86/virt/vmx/tdx/tdx_global_metadata.h b/arch/x86/virt/vmx/tdx/tdx_global_metadata.h
new file mode 100644
index 000000000000..6dd3c9695f59
--- /dev/null
+++ b/arch/x86/virt/vmx/tdx/tdx_global_metadata.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Automatically generated TDX global metadata structures. */
+#ifndef _X86_VIRT_TDX_AUTO_GENERATED_TDX_GLOBAL_METADATA_H
+#define _X86_VIRT_TDX_AUTO_GENERATED_TDX_GLOBAL_METADATA_H
+
+#include <linux/types.h>
+
+struct tdx_sys_info_features {
+ u64 tdx_features0;
+};
+
+struct tdx_sys_info_tdmr {
+ u16 max_tdmrs;
+ u16 max_reserved_per_tdmr;
+ u16 pamt_4k_entry_size;
+ u16 pamt_2m_entry_size;
+ u16 pamt_1g_entry_size;
+};
+
+struct tdx_sys_info {
+ struct tdx_sys_info_features features;
+ struct tdx_sys_info_tdmr tdmr;
+};
+
+#endif
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 55a4996d0c04..2c70cd35e72c 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -781,6 +781,7 @@ void xen_mm_pin_all(void)
{
struct page *page;
+ spin_lock(&init_mm.page_table_lock);
spin_lock(&pgd_lock);
list_for_each_entry(page, &pgd_list, lru) {
@@ -791,6 +792,7 @@ void xen_mm_pin_all(void)
}
spin_unlock(&pgd_lock);
+ spin_unlock(&init_mm.page_table_lock);
}
static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
@@ -887,6 +889,7 @@ void xen_mm_unpin_all(void)
{
struct page *page;
+ spin_lock(&init_mm.page_table_lock);
spin_lock(&pgd_lock);
list_for_each_entry(page, &pgd_list, lru) {
@@ -898,6 +901,7 @@ void xen_mm_unpin_all(void)
}
spin_unlock(&pgd_lock);
+ spin_unlock(&init_mm.page_table_lock);
}
static void xen_enter_mmap(struct mm_struct *mm)
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index b52d3e17e2c1..56914e21e303 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -178,13 +178,7 @@ static void p2m_init_identity(unsigned long *p2m, unsigned long pfn)
static void * __ref alloc_p2m_page(void)
{
if (unlikely(!slab_is_available())) {
- void *ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
-
- if (!ptr)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE);
-
- return ptr;
+ return memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
}
return (void *)__get_free_page(GFP_KERNEL);
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 9252652afe59..894edf8d6d62 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -100,9 +100,6 @@ SYM_FUNC_START(xen_hypercall_hvm)
push %r10
push %r9
push %r8
-#ifdef CONFIG_FRAME_POINTER
- pushq $0 /* Dummy push for stack alignment. */
-#endif
#endif
/* Set the vendor specific function. */
call __xen_hypercall_setfunc
@@ -117,11 +114,8 @@ SYM_FUNC_START(xen_hypercall_hvm)
pop %ebx
pop %eax
#else
- lea xen_hypercall_amd(%rip), %rbx
- cmp %rax, %rbx
-#ifdef CONFIG_FRAME_POINTER
- pop %rax /* Dummy pop. */
-#endif
+ lea xen_hypercall_amd(%rip), %rcx
+ cmp %rax, %rcx
pop %r8
pop %r9
pop %r10
@@ -132,6 +126,7 @@ SYM_FUNC_START(xen_hypercall_hvm)
pop %rcx
pop %rax
#endif
+ FRAME_END
/* Use correct hypercall function. */
jz xen_hypercall_amd
jmp xen_hypercall_intel
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
index 7fc0f9126dd3..1919ee9c3dd6 100644
--- a/arch/xtensa/include/asm/pgalloc.h
+++ b/arch/xtensa/include/asm/pgalloc.h
@@ -29,7 +29,7 @@
static inline pgd_t*
pgd_alloc(struct mm_struct *mm)
{
- return (pgd_t*) __get_free_page(GFP_KERNEL | __GFP_ZERO);
+ return __pgd_alloc(mm, 0);
}
static inline void ptes_clear(pte_t *ptep)
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index 7ed1a2085bd7..47b5df86ab5c 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -160,9 +160,7 @@ struct thread_struct {
struct perf_event *ptrace_bp[XCHAL_NUM_IBREAK];
struct perf_event *ptrace_wp[XCHAL_NUM_DBREAK];
#endif
- /* Make structure 16 bytes aligned. */
- int align[0] __attribute__ ((aligned(16)));
-};
+} __aligned(16);
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index e51f2060e830..f72e280363be 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -55,7 +55,7 @@ extern int initrd_below_start_ok;
#endif
#ifdef CONFIG_USE_OF
-void *dtb_start = __dtb_start;
+static void *dtb_start __initdata = __dtb_start;
#endif
extern unsigned long loops_per_jiffy;
diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c
index f00d122aa806..f39c4d83173a 100644
--- a/arch/xtensa/mm/kasan_init.c
+++ b/arch/xtensa/mm/kasan_init.c
@@ -39,11 +39,7 @@ static void __init populate(void *start, void *end)
unsigned long i, j;
unsigned long vaddr = (unsigned long)start;
pmd_t *pmd = pmd_off_k(vaddr);
- pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
-
- if (!pte)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
+ pte_t *pte = memblock_alloc_or_panic(n_pages * sizeof(pte_t), PAGE_SIZE);
pr_debug("%s: %p - %p\n", __func__, start, end);
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
index d6d2b533a574..6ed009318d24 100644
--- a/arch/xtensa/platforms/iss/simdisk.c
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/proc_fs.h>
@@ -75,7 +76,7 @@ static void simdisk_transfer(struct simdisk *dev, unsigned long sector,
if (offset > dev->size || dev->size - offset < nbytes) {
pr_notice("Beyond-end %s (%ld %ld)\n",
- write ? "write" : "read", offset, nbytes);
+ str_write_read(write), offset, nbytes);
return;
}
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 45a395862fbc..9ed93d91d754 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1138,6 +1138,7 @@ static void blkcg_fill_root_iostats(void)
blkg_iostat_set(&blkg->iostat.cur, &tmp);
u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
}
+ class_dev_iter_exit(&iter);
}
static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
@@ -1545,6 +1546,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
struct request_queue *q = disk->queue;
struct blkg_policy_data *pd_prealloc = NULL;
struct blkcg_gq *blkg, *pinned_blkg = NULL;
+ unsigned int memflags;
int ret;
if (blkcg_policy_enabled(q, pol))
@@ -1559,7 +1561,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
return -EINVAL;
if (queue_is_mq(q))
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
retry:
spin_lock_irq(&q->queue_lock);
@@ -1623,7 +1625,7 @@ retry:
spin_unlock_irq(&q->queue_lock);
out:
if (queue_is_mq(q))
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
if (pinned_blkg)
blkg_put(pinned_blkg);
if (pd_prealloc)
@@ -1667,12 +1669,13 @@ void blkcg_deactivate_policy(struct gendisk *disk,
{
struct request_queue *q = disk->queue;
struct blkcg_gq *blkg;
+ unsigned int memflags;
if (!blkcg_policy_enabled(q, pol))
return;
if (queue_is_mq(q))
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
mutex_lock(&q->blkcg_mutex);
spin_lock_irq(&q->queue_lock);
@@ -1696,7 +1699,7 @@ void blkcg_deactivate_policy(struct gendisk *disk,
mutex_unlock(&q->blkcg_mutex);
if (queue_is_mq(q))
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
}
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
diff --git a/block/blk-core.c b/block/blk-core.c
index 32fb28a6372c..d6c4fa3943b5 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -430,7 +430,6 @@ struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id)
refcount_set(&q->refs, 1);
mutex_init(&q->debugfs_mutex);
mutex_init(&q->sysfs_lock);
- mutex_init(&q->sysfs_dir_lock);
mutex_init(&q->limits_lock);
mutex_init(&q->rq_qos_mutex);
spin_lock_init(&q->queue_lock);
diff --git a/block/blk-ia-ranges.c b/block/blk-ia-ranges.c
index c9eb4241e048..d479f5481b66 100644
--- a/block/blk-ia-ranges.c
+++ b/block/blk-ia-ranges.c
@@ -111,7 +111,6 @@ int disk_register_independent_access_ranges(struct gendisk *disk)
struct request_queue *q = disk->queue;
int i, ret;
- lockdep_assert_held(&q->sysfs_dir_lock);
lockdep_assert_held(&q->sysfs_lock);
if (!iars)
@@ -155,7 +154,6 @@ void disk_unregister_independent_access_ranges(struct gendisk *disk)
struct blk_independent_access_ranges *iars = disk->ia_ranges;
int i;
- lockdep_assert_held(&q->sysfs_dir_lock);
lockdep_assert_held(&q->sysfs_lock);
if (!iars)
@@ -289,7 +287,6 @@ void disk_set_independent_access_ranges(struct gendisk *disk,
{
struct request_queue *q = disk->queue;
- mutex_lock(&q->sysfs_dir_lock);
mutex_lock(&q->sysfs_lock);
if (iars && !disk_check_ia_ranges(disk, iars)) {
kfree(iars);
@@ -313,6 +310,5 @@ void disk_set_independent_access_ranges(struct gendisk *disk,
disk_register_independent_access_ranges(disk);
unlock:
mutex_unlock(&q->sysfs_lock);
- mutex_unlock(&q->sysfs_dir_lock);
}
EXPORT_SYMBOL_GPL(disk_set_independent_access_ranges);
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index a5894ec9696e..65a1d4427ccf 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -3224,6 +3224,7 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
u32 qos[NR_QOS_PARAMS];
bool enable, user;
char *body, *p;
+ unsigned int memflags;
int ret;
blkg_conf_init(&ctx, input);
@@ -3247,7 +3248,7 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
ioc = q_to_ioc(disk->queue);
}
- blk_mq_freeze_queue(disk->queue);
+ memflags = blk_mq_freeze_queue(disk->queue);
blk_mq_quiesce_queue(disk->queue);
spin_lock_irq(&ioc->lock);
@@ -3347,7 +3348,7 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
wbt_enable_default(disk);
blk_mq_unquiesce_queue(disk->queue);
- blk_mq_unfreeze_queue(disk->queue);
+ blk_mq_unfreeze_queue(disk->queue, memflags);
blkg_conf_exit(&ctx);
return nbytes;
@@ -3355,7 +3356,7 @@ einval:
spin_unlock_irq(&ioc->lock);
blk_mq_unquiesce_queue(disk->queue);
- blk_mq_unfreeze_queue(disk->queue);
+ blk_mq_unfreeze_queue(disk->queue, memflags);
ret = -EINVAL;
err:
@@ -3414,6 +3415,7 @@ static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
{
struct blkg_conf_ctx ctx;
struct request_queue *q;
+ unsigned int memflags;
struct ioc *ioc;
u64 u[NR_I_LCOEFS];
bool user;
@@ -3441,7 +3443,7 @@ static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
ioc = q_to_ioc(q);
}
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
spin_lock_irq(&ioc->lock);
@@ -3493,7 +3495,7 @@ static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
spin_unlock_irq(&ioc->lock);
blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
blkg_conf_exit(&ctx);
return nbytes;
@@ -3502,7 +3504,7 @@ einval:
spin_unlock_irq(&ioc->lock);
blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
ret = -EINVAL;
err:
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index ebb522788d97..42c1e0b9a68f 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -749,9 +749,11 @@ static void blkiolatency_enable_work_fn(struct work_struct *work)
*/
enabled = atomic_read(&blkiolat->enable_cnt);
if (enabled != blkiolat->enabled) {
- blk_mq_freeze_queue(blkiolat->rqos.disk->queue);
+ unsigned int memflags;
+
+ memflags = blk_mq_freeze_queue(blkiolat->rqos.disk->queue);
blkiolat->enabled = enabled;
- blk_mq_unfreeze_queue(blkiolat->rqos.disk->queue);
+ blk_mq_unfreeze_queue(blkiolat->rqos.disk->queue, memflags);
}
}
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index ad8d6a363f24..444798c5374f 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -87,7 +87,6 @@ void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap,
return;
fallback:
- WARN_ON_ONCE(qmap->nr_queues > 1);
- blk_mq_clear_mq_map(qmap);
+ blk_mq_map_queues(qmap);
}
EXPORT_SYMBOL_GPL(blk_mq_map_hw_queues);
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 156e9bb07abf..3feeeccf8a99 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -223,30 +223,27 @@ int blk_mq_sysfs_register(struct gendisk *disk)
unsigned long i, j;
int ret;
- lockdep_assert_held(&q->sysfs_dir_lock);
-
ret = kobject_add(q->mq_kobj, &disk_to_dev(disk)->kobj, "mq");
if (ret < 0)
- goto out;
+ return ret;
kobject_uevent(q->mq_kobj, KOBJ_ADD);
+ mutex_lock(&q->tag_set->tag_list_lock);
queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_register_hctx(hctx);
if (ret)
- goto unreg;
+ goto out_unreg;
}
+ mutex_unlock(&q->tag_set->tag_list_lock);
+ return 0;
- q->mq_sysfs_init_done = true;
-
-out:
- return ret;
-
-unreg:
+out_unreg:
queue_for_each_hw_ctx(q, hctx, j) {
if (j < i)
blk_mq_unregister_hctx(hctx);
}
+ mutex_unlock(&q->tag_set->tag_list_lock);
kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
kobject_del(q->mq_kobj);
@@ -259,15 +256,13 @@ void blk_mq_sysfs_unregister(struct gendisk *disk)
struct blk_mq_hw_ctx *hctx;
unsigned long i;
- lockdep_assert_held(&q->sysfs_dir_lock);
-
+ mutex_lock(&q->tag_set->tag_list_lock);
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_unregister_hctx(hctx);
+ mutex_unlock(&q->tag_set->tag_list_lock);
kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
kobject_del(q->mq_kobj);
-
- q->mq_sysfs_init_done = false;
}
void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
@@ -275,15 +270,11 @@ void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
struct blk_mq_hw_ctx *hctx;
unsigned long i;
- mutex_lock(&q->sysfs_dir_lock);
- if (!q->mq_sysfs_init_done)
- goto unlock;
+ if (!blk_queue_registered(q))
+ return;
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_unregister_hctx(hctx);
-
-unlock:
- mutex_unlock(&q->sysfs_dir_lock);
}
int blk_mq_sysfs_register_hctxs(struct request_queue *q)
@@ -292,9 +283,8 @@ int blk_mq_sysfs_register_hctxs(struct request_queue *q)
unsigned long i;
int ret = 0;
- mutex_lock(&q->sysfs_dir_lock);
- if (!q->mq_sysfs_init_done)
- goto unlock;
+ if (!blk_queue_registered(q))
+ goto out;
queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_register_hctx(hctx);
@@ -302,8 +292,6 @@ int blk_mq_sysfs_register_hctxs(struct request_queue *q)
break;
}
-unlock:
- mutex_unlock(&q->sysfs_dir_lock);
-
+out:
return ret;
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index da39a1cac702..40490ac88045 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -210,12 +210,12 @@ int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
}
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
-void blk_mq_freeze_queue(struct request_queue *q)
+void blk_mq_freeze_queue_nomemsave(struct request_queue *q)
{
blk_freeze_queue_start(q);
blk_mq_freeze_queue_wait(q);
}
-EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
+EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_nomemsave);
bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
{
@@ -236,12 +236,12 @@ bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
return unfreeze;
}
-void blk_mq_unfreeze_queue(struct request_queue *q)
+void blk_mq_unfreeze_queue_nomemrestore(struct request_queue *q)
{
if (__blk_mq_unfreeze_queue(q, false))
blk_unfreeze_release_lock(q);
}
-EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
+EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue_nomemrestore);
/*
* non_owner variant of blk_freeze_queue_start
@@ -4223,13 +4223,14 @@ static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set,
bool shared)
{
struct request_queue *q;
+ unsigned int memflags;
lockdep_assert_held(&set->tag_list_lock);
list_for_each_entry(q, &set->tag_list, tag_set_list) {
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
queue_set_hctx_shared(q, shared);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
}
}
@@ -4992,6 +4993,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
struct request_queue *q;
LIST_HEAD(head);
int prev_nr_hw_queues = set->nr_hw_queues;
+ unsigned int memflags;
int i;
lockdep_assert_held(&set->tag_list_lock);
@@ -5003,8 +5005,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
return;
+ memflags = memalloc_noio_save();
list_for_each_entry(q, &set->tag_list, tag_set_list)
- blk_mq_freeze_queue(q);
+ blk_mq_freeze_queue_nomemsave(q);
+
/*
* Switch IO scheduler to 'none', cleaning up the data associated
* with the previous scheduler. We will switch back once we are done
@@ -5052,7 +5056,8 @@ switch_back:
blk_mq_elv_switch_back(&head, q);
list_for_each_entry(q, &set->tag_list, tag_set_list)
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue_nomemrestore(q);
+ memalloc_noio_restore(memflags);
/* Free the excess tags when nr_hw_queues shrink. */
for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
diff --git a/block/blk-pm.c b/block/blk-pm.c
index 42e842074715..8d3e052f91da 100644
--- a/block/blk-pm.c
+++ b/block/blk-pm.c
@@ -89,7 +89,7 @@ int blk_pre_runtime_suspend(struct request_queue *q)
if (percpu_ref_is_zero(&q->q_usage_counter))
ret = 0;
/* Switch q_usage_counter back to per-cpu mode. */
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue_nomemrestore(q);
if (ret < 0) {
spin_lock_irq(&q->queue_lock);
diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
index eb9618cd68ad..d4d4f4dc0e23 100644
--- a/block/blk-rq-qos.c
+++ b/block/blk-rq-qos.c
@@ -299,6 +299,7 @@ int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
const struct rq_qos_ops *ops)
{
struct request_queue *q = disk->queue;
+ unsigned int memflags;
lockdep_assert_held(&q->rq_qos_mutex);
@@ -310,14 +311,14 @@ int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
* No IO can be in-flight when adding rqos, so freeze queue, which
* is fine since we only support rq_qos for blk-mq queue.
*/
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
if (rq_qos_id(q, rqos->id))
goto ebusy;
rqos->next = q->rq_qos;
q->rq_qos = rqos;
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
if (rqos->ops->debugfs_attrs) {
mutex_lock(&q->debugfs_mutex);
@@ -327,7 +328,7 @@ int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
return 0;
ebusy:
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
return -EBUSY;
}
@@ -335,17 +336,18 @@ void rq_qos_del(struct rq_qos *rqos)
{
struct request_queue *q = rqos->disk->queue;
struct rq_qos **cur;
+ unsigned int memflags;
lockdep_assert_held(&q->rq_qos_mutex);
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
if (*cur == rqos) {
*cur = rqos->next;
break;
}
}
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
mutex_lock(&q->debugfs_mutex);
blk_mq_debugfs_unregister_rqos(rqos);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index db12396ff5c7..c44dadc35e1e 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -461,11 +461,12 @@ EXPORT_SYMBOL_GPL(queue_limits_commit_update);
int queue_limits_commit_update_frozen(struct request_queue *q,
struct queue_limits *lim)
{
+ unsigned int memflags;
int ret;
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
ret = queue_limits_commit_update(q, lim);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
return ret;
}
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index e09b455874bf..6f548a4376aa 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -681,7 +681,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
struct queue_sysfs_entry *entry = to_queue(attr);
struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
struct request_queue *q = disk->queue;
- unsigned int noio_flag;
+ unsigned int memflags;
ssize_t res;
if (!entry->store_limit && !entry->store)
@@ -711,11 +711,9 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
}
mutex_lock(&q->sysfs_lock);
- blk_mq_freeze_queue(q);
- noio_flag = memalloc_noio_save();
+ memflags = blk_mq_freeze_queue(q);
res = entry->store(disk, page, length);
- memalloc_noio_restore(noio_flag);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
mutex_unlock(&q->sysfs_lock);
return res;
}
@@ -764,7 +762,6 @@ int blk_register_queue(struct gendisk *disk)
struct request_queue *q = disk->queue;
int ret;
- mutex_lock(&q->sysfs_dir_lock);
kobject_init(&disk->queue_kobj, &blk_queue_ktype);
ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
if (ret < 0)
@@ -805,7 +802,6 @@ int blk_register_queue(struct gendisk *disk)
if (q->elevator)
kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
mutex_unlock(&q->sysfs_lock);
- mutex_unlock(&q->sysfs_dir_lock);
/*
* SCSI probing may synchronously create and destroy a lot of
@@ -830,7 +826,6 @@ out_debugfs_remove:
mutex_unlock(&q->sysfs_lock);
out_put_queue_kobj:
kobject_put(&disk->queue_kobj);
- mutex_unlock(&q->sysfs_dir_lock);
return ret;
}
@@ -861,7 +856,6 @@ void blk_unregister_queue(struct gendisk *disk)
blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
mutex_unlock(&q->sysfs_lock);
- mutex_lock(&q->sysfs_dir_lock);
/*
* Remove the sysfs attributes before unregistering the queue data
* structures that can be modified through sysfs.
@@ -878,7 +872,6 @@ void blk_unregister_queue(struct gendisk *disk)
/* Now that we've deleted all child objects, we can delete the queue. */
kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE);
kobject_del(&disk->queue_kobj);
- mutex_unlock(&q->sysfs_dir_lock);
blk_debugfs_remove(disk);
}
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 82dbaefcfa3b..8d149aff9fd0 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1202,6 +1202,7 @@ static int blk_throtl_init(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
struct throtl_data *td;
+ unsigned int memflags;
int ret;
td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
@@ -1215,7 +1216,7 @@ static int blk_throtl_init(struct gendisk *disk)
* Freeze queue before activating policy, to synchronize with IO path,
* which is protected by 'q_usage_counter'.
*/
- blk_mq_freeze_queue(disk->queue);
+ memflags = blk_mq_freeze_queue(disk->queue);
blk_mq_quiesce_queue(disk->queue);
q->td = td;
@@ -1239,7 +1240,7 @@ static int blk_throtl_init(struct gendisk *disk)
out:
blk_mq_unquiesce_queue(disk->queue);
- blk_mq_unfreeze_queue(disk->queue);
+ blk_mq_unfreeze_queue(disk->queue, memflags);
return ret;
}
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 9d08a54c201e..761ea662ddc3 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -1717,9 +1717,10 @@ int blk_revalidate_disk_zones(struct gendisk *disk)
else
pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
if (ret) {
- blk_mq_freeze_queue(q);
+ unsigned int memflags = blk_mq_freeze_queue(q);
+
disk_free_zone_resources(disk);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
}
return ret;
diff --git a/block/elevator.c b/block/elevator.c
index b81216c48b6b..cd2ce4921601 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -570,6 +570,7 @@ static struct elevator_type *elevator_get_default(struct request_queue *q)
void elevator_init_mq(struct request_queue *q)
{
struct elevator_type *e;
+ unsigned int memflags;
int err;
WARN_ON_ONCE(blk_queue_registered(q));
@@ -590,13 +591,13 @@ void elevator_init_mq(struct request_queue *q)
*
* Disk isn't added yet, so verifying queue lock only manually.
*/
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
blk_mq_cancel_work_sync(q);
err = blk_mq_init_sched(q, e);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
if (err) {
pr_warn("\"%s\" elevator initialization failed, "
@@ -614,11 +615,12 @@ void elevator_init_mq(struct request_queue *q)
*/
int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
{
+ unsigned int memflags;
int ret;
lockdep_assert_held(&q->sysfs_lock);
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
if (q->elevator) {
@@ -639,7 +641,7 @@ int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
out_unfreeze:
blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
if (ret) {
pr_warn("elv: switch to \"%s\" failed, falling back to \"none\"\n",
@@ -651,9 +653,11 @@ out_unfreeze:
void elevator_disable(struct request_queue *q)
{
+ unsigned int memflags;
+
lockdep_assert_held(&q->sysfs_lock);
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
elv_unregister_queue(q);
@@ -664,7 +668,7 @@ void elevator_disable(struct request_queue *q)
blk_add_trace_msg(q, "elv switch: none");
blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
}
/*
diff --git a/block/fops.c b/block/fops.c
index 6d5c4fc5a216..be9f1dbea9ce 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -783,11 +783,12 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
file_accessed(iocb->ki_filp);
ret = blkdev_direct_IO(iocb, to);
- if (ret >= 0) {
+ if (ret > 0) {
iocb->ki_pos += ret;
count -= ret;
}
- iov_iter_revert(to, count - iov_iter_count(to));
+ if (ret != -EIOCBQUEUED)
+ iov_iter_revert(to, count - iov_iter_count(to));
if (ret < 0 || !count)
goto reexpand;
}
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 6b0bfbccac08..74ae5f52b784 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -684,14 +684,6 @@ config CRYPTO_HCTR2
See https://eprint.iacr.org/2021/1441
-config CRYPTO_KEYWRAP
- tristate "KW (AES Key Wrap)"
- select CRYPTO_SKCIPHER
- select CRYPTO_MANAGER
- help
- KW (AES Key Wrap) authenticated encryption mode (NIST SP800-38F
- and RFC3394) without padding.
-
config CRYPTO_LRW
tristate "LRW (Liskov Rivest Wagner)"
select CRYPTO_LIB_GF128MUL
@@ -1029,16 +1021,6 @@ config CRYPTO_STREEBOG
https://tc26.ru/upload/iblock/fed/feddbb4d26b685903faa2ba11aea43f6.pdf
https://tools.ietf.org/html/rfc6986
-config CRYPTO_VMAC
- tristate "VMAC"
- select CRYPTO_HASH
- select CRYPTO_MANAGER
- help
- VMAC is a message authentication algorithm designed for
- very high speed on 64-bit architectures.
-
- See https://fastcrypto.org/vmac for further information.
-
config CRYPTO_WP512
tristate "Whirlpool"
select CRYPTO_HASH
@@ -1102,6 +1084,7 @@ config CRYPTO_CRC32
config CRYPTO_CRCT10DIF
tristate "CRCT10DIF"
select CRYPTO_HASH
+ select CRC_T10DIF
help
CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF)
diff --git a/crypto/Makefile b/crypto/Makefile
index 77abca715445..f67e853c4690 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -69,7 +69,6 @@ obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
obj-$(CONFIG_CRYPTO_USER) += crypto_user.o
obj-$(CONFIG_CRYPTO_CMAC) += cmac.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
-obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
obj-$(CONFIG_CRYPTO_NULL2) += crypto_null.o
obj-$(CONFIG_CRYPTO_MD4) += md4.o
@@ -95,7 +94,6 @@ obj-$(CONFIG_CRYPTO_XTS) += xts.o
obj-$(CONFIG_CRYPTO_CTR) += ctr.o
obj-$(CONFIG_CRYPTO_XCTR) += xctr.o
obj-$(CONFIG_CRYPTO_HCTR2) += hctr2.o
-obj-$(CONFIG_CRYPTO_KEYWRAP) += keywrap.o
obj-$(CONFIG_CRYPTO_ADIANTUM) += adiantum.o
obj-$(CONFIG_CRYPTO_NHPOLY1305) += nhpoly1305.o
obj-$(CONFIG_CRYPTO_GCM) += gcm.o
@@ -157,7 +155,8 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o
obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
CFLAGS_crc32c_generic.o += -DARCH=$(ARCH)
CFLAGS_crc32_generic.o += -DARCH=$(ARCH)
-obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
+obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_generic.o
+CFLAGS_crct10dif_generic.o += -DARCH=$(ARCH)
obj-$(CONFIG_CRYPTO_CRC64_ROCKSOFT) += crc64_rocksoft_generic.o
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
obj-$(CONFIG_CRYPTO_LZO) += lzo.o lzo-rle.o
diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c
index 4fdb53435827..6cbff298722b 100644
--- a/crypto/aegis128-core.c
+++ b/crypto/aegis128-core.c
@@ -516,7 +516,6 @@ static struct aead_alg crypto_aegis128_alg_generic = {
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aegis_ctx),
- .base.cra_alignmask = 0,
.base.cra_priority = 100,
.base.cra_name = "aegis128",
.base.cra_driver_name = "aegis128-generic",
@@ -535,7 +534,6 @@ static struct aead_alg crypto_aegis128_alg_simd = {
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aegis_ctx),
- .base.cra_alignmask = 0,
.base.cra_priority = 200,
.base.cra_name = "aegis128",
.base.cra_driver_name = "aegis128-simd",
diff --git a/crypto/ahash.c b/crypto/ahash.c
index bcd9de009a91..b08b89ec26ec 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -27,6 +27,93 @@
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
+struct crypto_hash_walk {
+ char *data;
+
+ unsigned int offset;
+ unsigned int flags;
+
+ struct page *pg;
+ unsigned int entrylen;
+
+ unsigned int total;
+ struct scatterlist *sg;
+};
+
+static int hash_walk_next(struct crypto_hash_walk *walk)
+{
+ unsigned int offset = walk->offset;
+ unsigned int nbytes = min(walk->entrylen,
+ ((unsigned int)(PAGE_SIZE)) - offset);
+
+ walk->data = kmap_local_page(walk->pg);
+ walk->data += offset;
+ walk->entrylen -= nbytes;
+ return nbytes;
+}
+
+static int hash_walk_new_entry(struct crypto_hash_walk *walk)
+{
+ struct scatterlist *sg;
+
+ sg = walk->sg;
+ walk->offset = sg->offset;
+ walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
+ walk->offset = offset_in_page(walk->offset);
+ walk->entrylen = sg->length;
+
+ if (walk->entrylen > walk->total)
+ walk->entrylen = walk->total;
+ walk->total -= walk->entrylen;
+
+ return hash_walk_next(walk);
+}
+
+static int crypto_hash_walk_first(struct ahash_request *req,
+ struct crypto_hash_walk *walk)
+{
+ walk->total = req->nbytes;
+
+ if (!walk->total) {
+ walk->entrylen = 0;
+ return 0;
+ }
+
+ walk->sg = req->src;
+ walk->flags = req->base.flags;
+
+ return hash_walk_new_entry(walk);
+}
+
+static int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
+{
+ walk->data -= walk->offset;
+
+ kunmap_local(walk->data);
+ crypto_yield(walk->flags);
+
+ if (err)
+ return err;
+
+ if (walk->entrylen) {
+ walk->offset = 0;
+ walk->pg++;
+ return hash_walk_next(walk);
+ }
+
+ if (!walk->total)
+ return 0;
+
+ walk->sg = sg_next(walk->sg);
+
+ return hash_walk_new_entry(walk);
+}
+
+static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
+{
+ return !(walk->entrylen | walk->total);
+}
+
/*
* For an ahash tfm that is using an shash algorithm (instead of an ahash
* algorithm), this returns the underlying shash tfm.
@@ -137,77 +224,6 @@ static int crypto_init_ahash_using_shash(struct crypto_tfm *tfm)
return 0;
}
-static int hash_walk_next(struct crypto_hash_walk *walk)
-{
- unsigned int offset = walk->offset;
- unsigned int nbytes = min(walk->entrylen,
- ((unsigned int)(PAGE_SIZE)) - offset);
-
- walk->data = kmap_local_page(walk->pg);
- walk->data += offset;
- walk->entrylen -= nbytes;
- return nbytes;
-}
-
-static int hash_walk_new_entry(struct crypto_hash_walk *walk)
-{
- struct scatterlist *sg;
-
- sg = walk->sg;
- walk->offset = sg->offset;
- walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
- walk->offset = offset_in_page(walk->offset);
- walk->entrylen = sg->length;
-
- if (walk->entrylen > walk->total)
- walk->entrylen = walk->total;
- walk->total -= walk->entrylen;
-
- return hash_walk_next(walk);
-}
-
-int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
-{
- walk->data -= walk->offset;
-
- kunmap_local(walk->data);
- crypto_yield(walk->flags);
-
- if (err)
- return err;
-
- if (walk->entrylen) {
- walk->offset = 0;
- walk->pg++;
- return hash_walk_next(walk);
- }
-
- if (!walk->total)
- return 0;
-
- walk->sg = sg_next(walk->sg);
-
- return hash_walk_new_entry(walk);
-}
-EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
-
-int crypto_hash_walk_first(struct ahash_request *req,
- struct crypto_hash_walk *walk)
-{
- walk->total = req->nbytes;
-
- if (!walk->total) {
- walk->entrylen = 0;
- return 0;
- }
-
- walk->sg = req->src;
- walk->flags = req->base.flags;
-
- return hash_walk_new_entry(walk);
-}
-EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
-
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 16f7c7a9d8ab..5318c214debb 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -407,6 +407,7 @@ EXPORT_SYMBOL_GPL(crypto_remove_final);
int crypto_register_alg(struct crypto_alg *alg)
{
struct crypto_larval *larval;
+ bool test_started = false;
LIST_HEAD(algs_to_put);
int err;
@@ -418,17 +419,19 @@ int crypto_register_alg(struct crypto_alg *alg)
down_write(&crypto_alg_sem);
larval = __crypto_register_alg(alg, &algs_to_put);
if (!IS_ERR_OR_NULL(larval)) {
- bool test_started = crypto_boot_test_finished();
-
+ test_started = crypto_boot_test_finished();
larval->test_started = test_started;
- if (test_started)
- crypto_schedule_test(larval);
}
up_write(&crypto_alg_sem);
if (IS_ERR(larval))
return PTR_ERR(larval);
- crypto_remove_final(&algs_to_put);
+
+ if (test_started)
+ crypto_schedule_test(larval);
+ else
+ crypto_remove_final(&algs_to_put);
+
return 0;
}
EXPORT_SYMBOL_GPL(crypto_register_alg);
@@ -642,10 +645,8 @@ int crypto_register_instance(struct crypto_template *tmpl,
larval = __crypto_register_alg(&inst->alg, &algs_to_put);
if (IS_ERR(larval))
goto unlock;
- else if (larval) {
+ else if (larval)
larval->test_started = true;
- crypto_schedule_test(larval);
- }
hlist_add_head(&inst->list, &tmpl->instances);
inst->tmpl = tmpl;
@@ -655,7 +656,12 @@ unlock:
if (IS_ERR(larval))
return PTR_ERR(larval);
- crypto_remove_final(&algs_to_put);
+
+ if (larval)
+ crypto_schedule_test(larval);
+ else
+ crypto_remove_final(&algs_to_put);
+
return 0;
}
EXPORT_SYMBOL_GPL(crypto_register_instance);
@@ -1016,6 +1022,8 @@ static void __init crypto_start_tests(void)
if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
return;
+ set_crypto_boot_test_finished();
+
for (;;) {
struct crypto_larval *larval = NULL;
struct crypto_alg *q;
@@ -1038,7 +1046,6 @@ static void __init crypto_start_tests(void)
l->test_started = true;
larval = l;
- crypto_schedule_test(larval);
break;
}
@@ -1046,9 +1053,9 @@ static void __init crypto_start_tests(void)
if (!larval)
break;
- }
- set_crypto_boot_test_finished();
+ crypto_schedule_test(larval);
+ }
}
static int __init crypto_algapi_init(void)
diff --git a/crypto/anubis.c b/crypto/anubis.c
index 9f0cf61bbc6e..886e7c913688 100644
--- a/crypto/anubis.c
+++ b/crypto/anubis.c
@@ -33,7 +33,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <asm/byteorder.h>
+#include <linux/unaligned.h>
#include <linux/types.h>
#define ANUBIS_MIN_KEY_SIZE 16
@@ -463,7 +463,6 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct anubis_ctx *ctx = crypto_tfm_ctx(tfm);
- const __be32 *key = (const __be32 *)in_key;
int N, R, i, r;
u32 kappa[ANUBIS_MAX_N];
u32 inter[ANUBIS_MAX_N];
@@ -482,7 +481,7 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
/* * map cipher key to initial key state (mu): */
for (i = 0; i < N; i++)
- kappa[i] = be32_to_cpu(key[i]);
+ kappa[i] = get_unaligned_be32(&in_key[4 * i]);
/*
* generate R + 1 round keys:
@@ -570,10 +569,8 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
}
static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4],
- u8 *ciphertext, const u8 *plaintext, const int R)
+ u8 *dst, const u8 *src, const int R)
{
- const __be32 *src = (const __be32 *)plaintext;
- __be32 *dst = (__be32 *)ciphertext;
int i, r;
u32 state[4];
u32 inter[4];
@@ -583,7 +580,7 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4],
* and add initial round key (sigma[K^0]):
*/
for (i = 0; i < 4; i++)
- state[i] = be32_to_cpu(src[i]) ^ roundKey[0][i];
+ state[i] = get_unaligned_be32(&src[4 * i]) ^ roundKey[0][i];
/*
* R - 1 full rounds:
@@ -654,7 +651,7 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4],
*/
for (i = 0; i < 4; i++)
- dst[i] = cpu_to_be32(inter[i]);
+ put_unaligned_be32(inter[i], &dst[4 * i]);
}
static void anubis_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
@@ -675,7 +672,6 @@ static struct crypto_alg anubis_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = ANUBIS_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct anubis_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = ANUBIS_MIN_KEY_SIZE,
diff --git a/crypto/aria_generic.c b/crypto/aria_generic.c
index d96dfc4fdde6..bd359d3313c2 100644
--- a/crypto/aria_generic.c
+++ b/crypto/aria_generic.c
@@ -15,6 +15,7 @@
*/
#include <crypto/aria.h>
+#include <linux/unaligned.h>
static const u32 key_rc[20] = {
0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0,
@@ -27,7 +28,6 @@ static const u32 key_rc[20] = {
static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key,
unsigned int key_len)
{
- const __be32 *key = (const __be32 *)in_key;
u32 w0[4], w1[4], w2[4], w3[4];
u32 reg0, reg1, reg2, reg3;
const u32 *ck;
@@ -35,10 +35,10 @@ static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key,
ck = &key_rc[(key_len - 16) / 2];
- w0[0] = be32_to_cpu(key[0]);
- w0[1] = be32_to_cpu(key[1]);
- w0[2] = be32_to_cpu(key[2]);
- w0[3] = be32_to_cpu(key[3]);
+ w0[0] = get_unaligned_be32(&in_key[0]);
+ w0[1] = get_unaligned_be32(&in_key[4]);
+ w0[2] = get_unaligned_be32(&in_key[8]);
+ w0[3] = get_unaligned_be32(&in_key[12]);
reg0 = w0[0] ^ ck[0];
reg1 = w0[1] ^ ck[1];
@@ -48,11 +48,11 @@ static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key,
aria_subst_diff_odd(&reg0, &reg1, &reg2, &reg3);
if (key_len > 16) {
- w1[0] = be32_to_cpu(key[4]);
- w1[1] = be32_to_cpu(key[5]);
+ w1[0] = get_unaligned_be32(&in_key[16]);
+ w1[1] = get_unaligned_be32(&in_key[20]);
if (key_len > 24) {
- w1[2] = be32_to_cpu(key[6]);
- w1[3] = be32_to_cpu(key[7]);
+ w1[2] = get_unaligned_be32(&in_key[24]);
+ w1[3] = get_unaligned_be32(&in_key[28]);
} else {
w1[2] = 0;
w1[3] = 0;
@@ -195,17 +195,15 @@ EXPORT_SYMBOL_GPL(aria_set_key);
static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in,
u32 key[][ARIA_RD_KEY_WORDS])
{
- const __be32 *src = (const __be32 *)in;
- __be32 *dst = (__be32 *)out;
u32 reg0, reg1, reg2, reg3;
int rounds, rkidx = 0;
rounds = ctx->rounds;
- reg0 = be32_to_cpu(src[0]);
- reg1 = be32_to_cpu(src[1]);
- reg2 = be32_to_cpu(src[2]);
- reg3 = be32_to_cpu(src[3]);
+ reg0 = get_unaligned_be32(&in[0]);
+ reg1 = get_unaligned_be32(&in[4]);
+ reg2 = get_unaligned_be32(&in[8]);
+ reg3 = get_unaligned_be32(&in[12]);
aria_add_round_key(key[rkidx], &reg0, &reg1, &reg2, &reg3);
rkidx++;
@@ -241,10 +239,10 @@ static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in,
(u8)(s1[get_u8(reg3, 2)]),
(u8)(s2[get_u8(reg3, 3)]));
- dst[0] = cpu_to_be32(reg0);
- dst[1] = cpu_to_be32(reg1);
- dst[2] = cpu_to_be32(reg2);
- dst[3] = cpu_to_be32(reg3);
+ put_unaligned_be32(reg0, &out[0]);
+ put_unaligned_be32(reg1, &out[4]);
+ put_unaligned_be32(reg2, &out[8]);
+ put_unaligned_be32(reg3, &out[12]);
}
void aria_encrypt(void *_ctx, u8 *out, const u8 *in)
@@ -284,7 +282,6 @@ static struct crypto_alg aria_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = ARIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aria_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
index 43af5fa510c0..ba2d9d1ea235 100644
--- a/crypto/asymmetric_keys/asymmetric_type.c
+++ b/crypto/asymmetric_keys/asymmetric_type.c
@@ -18,16 +18,6 @@
#include "asymmetric_keys.h"
-const char *const key_being_used_for[NR__KEY_BEING_USED_FOR] = {
- [VERIFYING_MODULE_SIGNATURE] = "mod sig",
- [VERIFYING_FIRMWARE_SIGNATURE] = "firmware sig",
- [VERIFYING_KEXEC_PE_SIGNATURE] = "kexec PE sig",
- [VERIFYING_KEY_SIGNATURE] = "key sig",
- [VERIFYING_KEY_SELF_SIGNATURE] = "key self sig",
- [VERIFYING_UNSPECIFIED_SIGNATURE] = "unspec sig",
-};
-EXPORT_SYMBOL_GPL(key_being_used_for);
-
static LIST_HEAD(asymmetric_key_parsers);
static DECLARE_RWSEM(asymmetric_key_parsers_sem);
diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c
index 6a55d206fab3..783a30b27398 100644
--- a/crypto/crc32_generic.c
+++ b/crypto/crc32_generic.c
@@ -157,15 +157,19 @@ static struct shash_alg algs[] = {{
.base.cra_init = crc32_cra_init,
}};
+static int num_algs;
+
static int __init crc32_mod_init(void)
{
/* register the arch flavor only if it differs from the generic one */
- return crypto_register_shashes(algs, 1 + (&crc32_le != &crc32_le_base));
+ num_algs = 1 + ((crc32_optimizations() & CRC32_LE_OPTIMIZATION) != 0);
+
+ return crypto_register_shashes(algs, num_algs);
}
static void __exit crc32_mod_fini(void)
{
- crypto_unregister_shashes(algs, 1 + (&crc32_le != &crc32_le_base));
+ crypto_unregister_shashes(algs, num_algs);
}
subsys_initcall(crc32_mod_init);
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
index 7c2357c30fdf..985da981d6e2 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c_generic.c
@@ -85,7 +85,7 @@ static int chksum_update(struct shash_desc *desc, const u8 *data,
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
- ctx->crc = __crc32c_le_base(ctx->crc, data, length);
+ ctx->crc = crc32c_le_base(ctx->crc, data, length);
return 0;
}
@@ -108,7 +108,7 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
static int __chksum_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out)
{
- put_unaligned_le32(~__crc32c_le_base(*crcp, data, len), out);
+ put_unaligned_le32(~crc32c_le_base(*crcp, data, len), out);
return 0;
}
@@ -197,15 +197,19 @@ static struct shash_alg algs[] = {{
.base.cra_init = crc32c_cra_init,
}};
+static int num_algs;
+
static int __init crc32c_mod_init(void)
{
/* register the arch flavor only if it differs from the generic one */
- return crypto_register_shashes(algs, 1 + (&__crc32c_le != &__crc32c_le_base));
+ num_algs = 1 + ((crc32_optimizations() & CRC32C_OPTIMIZATION) != 0);
+
+ return crypto_register_shashes(algs, num_algs);
}
static void __exit crc32c_mod_fini(void)
{
- crypto_unregister_shashes(algs, 1 + (&__crc32c_le != &__crc32c_le_base));
+ crypto_unregister_shashes(algs, num_algs);
}
subsys_initcall(crc32c_mod_init);
diff --git a/crypto/crct10dif_common.c b/crypto/crct10dif_common.c
deleted file mode 100644
index b2fab366f518..000000000000
--- a/crypto/crct10dif_common.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Cryptographic API.
- *
- * T10 Data Integrity Field CRC16 Crypto Transform
- *
- * Copyright (c) 2007 Oracle Corporation. All rights reserved.
- * Written by Martin K. Petersen <martin.petersen@oracle.com>
- * Copyright (C) 2013 Intel Corporation
- * Author: Tim Chen <tim.c.chen@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/crc-t10dif.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-
-/* Table generated using the following polynomium:
- * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1
- * gt: 0x8bb7
- */
-static const __u16 t10_dif_crc_table[256] = {
- 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B,
- 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6,
- 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6,
- 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B,
- 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1,
- 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C,
- 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C,
- 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781,
- 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8,
- 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255,
- 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925,
- 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698,
- 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472,
- 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF,
- 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF,
- 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02,
- 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA,
- 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067,
- 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17,
- 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA,
- 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640,
- 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD,
- 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D,
- 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30,
- 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759,
- 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4,
- 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394,
- 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29,
- 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3,
- 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E,
- 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E,
- 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3
-};
-
-__u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len)
-{
- unsigned int i;
-
- for (i = 0 ; i < len ; i++)
- crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff];
-
- return crc;
-}
-EXPORT_SYMBOL(crc_t10dif_generic);
-
-MODULE_DESCRIPTION("T10 DIF CRC calculation common code");
-MODULE_LICENSE("GPL");
diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
index e843982073bb..259cb01932cb 100644
--- a/crypto/crct10dif_generic.c
+++ b/crypto/crct10dif_generic.c
@@ -57,6 +57,15 @@ static int chksum_update(struct shash_desc *desc, const u8 *data,
return 0;
}
+static int chksum_update_arch(struct shash_desc *desc, const u8 *data,
+ unsigned int length)
+{
+ struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ ctx->crc = crc_t10dif_update(ctx->crc, data, length);
+ return 0;
+}
+
static int chksum_final(struct shash_desc *desc, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
@@ -71,6 +80,13 @@ static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
return 0;
}
+static int __chksum_finup_arch(__u16 crc, const u8 *data, unsigned int len,
+ u8 *out)
+{
+ *(__u16 *)out = crc_t10dif_update(crc, data, len);
+ return 0;
+}
+
static int chksum_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
@@ -79,37 +95,67 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
return __chksum_finup(ctx->crc, data, len, out);
}
+static int chksum_finup_arch(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ return __chksum_finup_arch(ctx->crc, data, len, out);
+}
+
static int chksum_digest(struct shash_desc *desc, const u8 *data,
unsigned int length, u8 *out)
{
return __chksum_finup(0, data, length, out);
}
-static struct shash_alg alg = {
- .digestsize = CRC_T10DIF_DIGEST_SIZE,
- .init = chksum_init,
- .update = chksum_update,
- .final = chksum_final,
- .finup = chksum_finup,
- .digest = chksum_digest,
- .descsize = sizeof(struct chksum_desc_ctx),
- .base = {
- .cra_name = "crct10dif",
- .cra_driver_name = "crct10dif-generic",
- .cra_priority = 100,
- .cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
+static int chksum_digest_arch(struct shash_desc *desc, const u8 *data,
+ unsigned int length, u8 *out)
+{
+ return __chksum_finup_arch(0, data, length, out);
+}
+
+static struct shash_alg algs[] = {{
+ .digestsize = CRC_T10DIF_DIGEST_SIZE,
+ .init = chksum_init,
+ .update = chksum_update,
+ .final = chksum_final,
+ .finup = chksum_finup,
+ .digest = chksum_digest,
+ .descsize = sizeof(struct chksum_desc_ctx),
+ .base.cra_name = "crct10dif",
+ .base.cra_driver_name = "crct10dif-generic",
+ .base.cra_priority = 100,
+ .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .digestsize = CRC_T10DIF_DIGEST_SIZE,
+ .init = chksum_init,
+ .update = chksum_update_arch,
+ .final = chksum_final,
+ .finup = chksum_finup_arch,
+ .digest = chksum_digest_arch,
+ .descsize = sizeof(struct chksum_desc_ctx),
+ .base.cra_name = "crct10dif",
+ .base.cra_driver_name = "crct10dif-" __stringify(ARCH),
+ .base.cra_priority = 150,
+ .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}};
+
+static int num_algs;
static int __init crct10dif_mod_init(void)
{
- return crypto_register_shash(&alg);
+ /* register the arch flavor only if it differs from the generic one */
+ num_algs = 1 + crc_t10dif_is_optimized();
+
+ return crypto_register_shashes(algs, num_algs);
}
static void __exit crct10dif_mod_fini(void)
{
- crypto_unregister_shash(&alg);
+ crypto_unregister_shashes(algs, num_algs);
}
subsys_initcall(crct10dif_mod_init);
diff --git a/crypto/fips.c b/crypto/fips.c
index 8a784018ebfc..2fa3a9ee61a1 100644
--- a/crypto/fips.c
+++ b/crypto/fips.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/sysctl.h>
#include <linux/notifier.h>
+#include <linux/string_choices.h>
#include <generated/utsrelease.h>
int fips_enabled;
@@ -24,8 +25,7 @@ EXPORT_SYMBOL_GPL(fips_fail_notif_chain);
static int fips_enable(char *str)
{
fips_enabled = !!simple_strtol(str, NULL, 0);
- printk(KERN_INFO "fips mode: %s\n",
- fips_enabled ? "enabled" : "disabled");
+ pr_info("fips mode: %s\n", str_enabled_disabled(fips_enabled));
return 1;
}
@@ -41,7 +41,7 @@ __setup("fips=", fips_enable);
static char fips_name[] = FIPS_MODULE_NAME;
static char fips_version[] = FIPS_MODULE_VERSION;
-static struct ctl_table crypto_sysctl_table[] = {
+static const struct ctl_table crypto_sysctl_table[] = {
{
.procname = "fips_enabled",
.data = &fips_enabled,
diff --git a/crypto/keywrap.c b/crypto/keywrap.c
deleted file mode 100644
index 385ffdfd5a9b..000000000000
--- a/crypto/keywrap.c
+++ /dev/null
@@ -1,320 +0,0 @@
-/*
- * Key Wrapping: RFC3394 / NIST SP800-38F
- *
- * Copyright (C) 2015, Stephan Mueller <smueller@chronox.de>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, and the entire permission notice in its entirety,
- * including the disclaimer of warranties.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote
- * products derived from this software without specific prior
- * written permission.
- *
- * ALTERNATIVELY, this product may be distributed under the terms of
- * the GNU General Public License, in which case the provisions of the GPL2
- * are required INSTEAD OF the above restrictions. (This clause is
- * necessary due to a potential bad interaction between the GPL and
- * the restrictions contained in a BSD-style copyright.)
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
- * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
- * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- */
-
-/*
- * Note for using key wrapping:
- *
- * * The result of the encryption operation is the ciphertext starting
- * with the 2nd semiblock. The first semiblock is provided as the IV.
- * The IV used to start the encryption operation is the default IV.
- *
- * * The input for the decryption is the first semiblock handed in as an
- * IV. The ciphertext is the data starting with the 2nd semiblock. The
- * return code of the decryption operation will be EBADMSG in case an
- * integrity error occurs.
- *
- * To obtain the full result of an encryption as expected by SP800-38F, the
- * caller must allocate a buffer of plaintext + 8 bytes:
- *
- * unsigned int datalen = ptlen + crypto_skcipher_ivsize(tfm);
- * u8 data[datalen];
- * u8 *iv = data;
- * u8 *pt = data + crypto_skcipher_ivsize(tfm);
- * <ensure that pt contains the plaintext of size ptlen>
- * sg_init_one(&sg, pt, ptlen);
- * skcipher_request_set_crypt(req, &sg, &sg, ptlen, iv);
- *
- * ==> After encryption, data now contains full KW result as per SP800-38F.
- *
- * In case of decryption, ciphertext now already has the expected length
- * and must be segmented appropriately:
- *
- * unsigned int datalen = CTLEN;
- * u8 data[datalen];
- * <ensure that data contains full ciphertext>
- * u8 *iv = data;
- * u8 *ct = data + crypto_skcipher_ivsize(tfm);
- * unsigned int ctlen = datalen - crypto_skcipher_ivsize(tfm);
- * sg_init_one(&sg, ct, ctlen);
- * skcipher_request_set_crypt(req, &sg, &sg, ctlen, iv);
- *
- * ==> After decryption (which hopefully does not return EBADMSG), the ct
- * pointer now points to the plaintext of size ctlen.
- *
- * Note 2: KWP is not implemented as this would defy in-place operation.
- * If somebody wants to wrap non-aligned data, he should simply pad
- * the input with zeros to fill it up to the 8 byte boundary.
- */
-
-#include <linux/module.h>
-#include <linux/crypto.h>
-#include <linux/scatterlist.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/cipher.h>
-#include <crypto/internal/skcipher.h>
-
-struct crypto_kw_block {
-#define SEMIBSIZE 8
- __be64 A;
- __be64 R;
-};
-
-/*
- * Fast forward the SGL to the "end" length minus SEMIBSIZE.
- * The start in the SGL defined by the fast-forward is returned with
- * the walk variable
- */
-static void crypto_kw_scatterlist_ff(struct scatter_walk *walk,
- struct scatterlist *sg,
- unsigned int end)
-{
- unsigned int skip = 0;
-
- /* The caller should only operate on full SEMIBLOCKs. */
- BUG_ON(end < SEMIBSIZE);
-
- skip = end - SEMIBSIZE;
- while (sg) {
- if (sg->length > skip) {
- scatterwalk_start(walk, sg);
- scatterwalk_advance(walk, skip);
- break;
- }
-
- skip -= sg->length;
- sg = sg_next(sg);
- }
-}
-
-static int crypto_kw_decrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
- struct crypto_kw_block block;
- struct scatterlist *src, *dst;
- u64 t = 6 * ((req->cryptlen) >> 3);
- unsigned int i;
- int ret = 0;
-
- /*
- * Require at least 2 semiblocks (note, the 3rd semiblock that is
- * required by SP800-38F is the IV.
- */
- if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE)
- return -EINVAL;
-
- /* Place the IV into block A */
- memcpy(&block.A, req->iv, SEMIBSIZE);
-
- /*
- * src scatterlist is read-only. dst scatterlist is r/w. During the
- * first loop, src points to req->src and dst to req->dst. For any
- * subsequent round, the code operates on req->dst only.
- */
- src = req->src;
- dst = req->dst;
-
- for (i = 0; i < 6; i++) {
- struct scatter_walk src_walk, dst_walk;
- unsigned int nbytes = req->cryptlen;
-
- while (nbytes) {
- /* move pointer by nbytes in the SGL */
- crypto_kw_scatterlist_ff(&src_walk, src, nbytes);
- /* get the source block */
- scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
- false);
-
- /* perform KW operation: modify IV with counter */
- block.A ^= cpu_to_be64(t);
- t--;
- /* perform KW operation: decrypt block */
- crypto_cipher_decrypt_one(cipher, (u8 *)&block,
- (u8 *)&block);
-
- /* move pointer by nbytes in the SGL */
- crypto_kw_scatterlist_ff(&dst_walk, dst, nbytes);
- /* Copy block->R into place */
- scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
- true);
-
- nbytes -= SEMIBSIZE;
- }
-
- /* we now start to operate on the dst SGL only */
- src = req->dst;
- dst = req->dst;
- }
-
- /* Perform authentication check */
- if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL))
- ret = -EBADMSG;
-
- memzero_explicit(&block, sizeof(struct crypto_kw_block));
-
- return ret;
-}
-
-static int crypto_kw_encrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
- struct crypto_kw_block block;
- struct scatterlist *src, *dst;
- u64 t = 1;
- unsigned int i;
-
- /*
- * Require at least 2 semiblocks (note, the 3rd semiblock that is
- * required by SP800-38F is the IV that occupies the first semiblock.
- * This means that the dst memory must be one semiblock larger than src.
- * Also ensure that the given data is aligned to semiblock.
- */
- if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE)
- return -EINVAL;
-
- /*
- * Place the predefined IV into block A -- for encrypt, the caller
- * does not need to provide an IV, but he needs to fetch the final IV.
- */
- block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL);
-
- /*
- * src scatterlist is read-only. dst scatterlist is r/w. During the
- * first loop, src points to req->src and dst to req->dst. For any
- * subsequent round, the code operates on req->dst only.
- */
- src = req->src;
- dst = req->dst;
-
- for (i = 0; i < 6; i++) {
- struct scatter_walk src_walk, dst_walk;
- unsigned int nbytes = req->cryptlen;
-
- scatterwalk_start(&src_walk, src);
- scatterwalk_start(&dst_walk, dst);
-
- while (nbytes) {
- /* get the source block */
- scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
- false);
-
- /* perform KW operation: encrypt block */
- crypto_cipher_encrypt_one(cipher, (u8 *)&block,
- (u8 *)&block);
- /* perform KW operation: modify IV with counter */
- block.A ^= cpu_to_be64(t);
- t++;
-
- /* Copy block->R into place */
- scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
- true);
-
- nbytes -= SEMIBSIZE;
- }
-
- /* we now start to operate on the dst SGL only */
- src = req->dst;
- dst = req->dst;
- }
-
- /* establish the IV for the caller to pick up */
- memcpy(req->iv, &block.A, SEMIBSIZE);
-
- memzero_explicit(&block, sizeof(struct crypto_kw_block));
-
- return 0;
-}
-
-static int crypto_kw_create(struct crypto_template *tmpl, struct rtattr **tb)
-{
- struct skcipher_instance *inst;
- struct crypto_alg *alg;
- int err;
-
- inst = skcipher_alloc_instance_simple(tmpl, tb);
- if (IS_ERR(inst))
- return PTR_ERR(inst);
-
- alg = skcipher_ialg_simple(inst);
-
- err = -EINVAL;
- /* Section 5.1 requirement for KW */
- if (alg->cra_blocksize != sizeof(struct crypto_kw_block))
- goto out_free_inst;
-
- inst->alg.base.cra_blocksize = SEMIBSIZE;
- inst->alg.base.cra_alignmask = 0;
- inst->alg.ivsize = SEMIBSIZE;
-
- inst->alg.encrypt = crypto_kw_encrypt;
- inst->alg.decrypt = crypto_kw_decrypt;
-
- err = skcipher_register_instance(tmpl, inst);
- if (err) {
-out_free_inst:
- inst->free(inst);
- }
-
- return err;
-}
-
-static struct crypto_template crypto_kw_tmpl = {
- .name = "kw",
- .create = crypto_kw_create,
- .module = THIS_MODULE,
-};
-
-static int __init crypto_kw_init(void)
-{
- return crypto_register_template(&crypto_kw_tmpl);
-}
-
-static void __exit crypto_kw_exit(void)
-{
- crypto_unregister_template(&crypto_kw_tmpl);
-}
-
-subsys_initcall(crypto_kw_init);
-module_exit(crypto_kw_exit);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
-MODULE_DESCRIPTION("Key Wrapping (RFC3394 / NIST SP800-38F)");
-MODULE_ALIAS_CRYPTO("kw");
-MODULE_IMPORT_NS("CRYPTO_INTERNAL");
diff --git a/crypto/khazad.c b/crypto/khazad.c
index 70cafe73f974..7ad338ca2c18 100644
--- a/crypto/khazad.c
+++ b/crypto/khazad.c
@@ -23,7 +23,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <asm/byteorder.h>
+#include <linux/unaligned.h>
#include <linux/types.h>
#define KHAZAD_KEY_SIZE 16
@@ -757,14 +757,12 @@ static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct khazad_ctx *ctx = crypto_tfm_ctx(tfm);
- const __be32 *key = (const __be32 *)in_key;
int r;
const u64 *S = T7;
u64 K2, K1;
- /* key is supposed to be 32-bit aligned */
- K2 = ((u64)be32_to_cpu(key[0]) << 32) | be32_to_cpu(key[1]);
- K1 = ((u64)be32_to_cpu(key[2]) << 32) | be32_to_cpu(key[3]);
+ K2 = get_unaligned_be64(&in_key[0]);
+ K1 = get_unaligned_be64(&in_key[8]);
/* setup the encrypt key */
for (r = 0; r <= KHAZAD_ROUNDS; r++) {
@@ -800,14 +798,12 @@ static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key,
}
static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1],
- u8 *ciphertext, const u8 *plaintext)
+ u8 *dst, const u8 *src)
{
- const __be64 *src = (const __be64 *)plaintext;
- __be64 *dst = (__be64 *)ciphertext;
int r;
u64 state;
- state = be64_to_cpu(*src) ^ roundKey[0];
+ state = get_unaligned_be64(src) ^ roundKey[0];
for (r = 1; r < KHAZAD_ROUNDS; r++) {
state = T0[(int)(state >> 56) ] ^
@@ -831,7 +827,7 @@ static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1],
(T7[(int)(state ) & 0xff] & 0x00000000000000ffULL) ^
roundKey[KHAZAD_ROUNDS];
- *dst = cpu_to_be64(state);
+ put_unaligned_be64(state, dst);
}
static void khazad_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
@@ -852,7 +848,6 @@ static struct crypto_alg khazad_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = KHAZAD_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct khazad_ctx),
- .cra_alignmask = 7,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = KHAZAD_KEY_SIZE,
diff --git a/crypto/proc.c b/crypto/proc.c
index 56c7c78df297..522b27d90d29 100644
--- a/crypto/proc.c
+++ b/crypto/proc.c
@@ -47,13 +47,10 @@ static int c_show(struct seq_file *m, void *p)
(alg->cra_flags & CRYPTO_ALG_TESTED) ?
"passed" : "unknown");
seq_printf(m, "internal : %s\n",
- (alg->cra_flags & CRYPTO_ALG_INTERNAL) ?
- "yes" : "no");
- if (fips_enabled) {
+ str_yes_no(alg->cra_flags & CRYPTO_ALG_INTERNAL));
+ if (fips_enabled)
seq_printf(m, "fips : %s\n",
- (alg->cra_flags & CRYPTO_ALG_FIPS_INTERNAL) ?
- "no" : "yes");
- }
+ str_no_yes(alg->cra_flags & CRYPTO_ALG_FIPS_INTERNAL));
if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
seq_printf(m, "type : larval\n");
diff --git a/crypto/seed.c b/crypto/seed.c
index d0506ade2a5f..d05d8ed909fa 100644
--- a/crypto/seed.c
+++ b/crypto/seed.c
@@ -13,7 +13,7 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <asm/byteorder.h>
+#include <linux/unaligned.h>
#define SEED_NUM_KCONSTANTS 16
#define SEED_KEY_SIZE 16
@@ -329,13 +329,12 @@ static int seed_set_key(struct crypto_tfm *tfm, const u8 *in_key,
{
struct seed_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *keyout = ctx->keysched;
- const __be32 *key = (const __be32 *)in_key;
u32 i, t0, t1, x1, x2, x3, x4;
- x1 = be32_to_cpu(key[0]);
- x2 = be32_to_cpu(key[1]);
- x3 = be32_to_cpu(key[2]);
- x4 = be32_to_cpu(key[3]);
+ x1 = get_unaligned_be32(&in_key[0]);
+ x2 = get_unaligned_be32(&in_key[4]);
+ x3 = get_unaligned_be32(&in_key[8]);
+ x4 = get_unaligned_be32(&in_key[12]);
for (i = 0; i < SEED_NUM_KCONSTANTS; i++) {
t0 = x1 + x3 - KC[i];
@@ -364,15 +363,13 @@ static int seed_set_key(struct crypto_tfm *tfm, const u8 *in_key,
static void seed_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct seed_ctx *ctx = crypto_tfm_ctx(tfm);
- const __be32 *src = (const __be32 *)in;
- __be32 *dst = (__be32 *)out;
u32 x1, x2, x3, x4, t0, t1;
const u32 *ks = ctx->keysched;
- x1 = be32_to_cpu(src[0]);
- x2 = be32_to_cpu(src[1]);
- x3 = be32_to_cpu(src[2]);
- x4 = be32_to_cpu(src[3]);
+ x1 = get_unaligned_be32(&in[0]);
+ x2 = get_unaligned_be32(&in[4]);
+ x3 = get_unaligned_be32(&in[8]);
+ x4 = get_unaligned_be32(&in[12]);
OP(x1, x2, x3, x4, 0);
OP(x3, x4, x1, x2, 2);
@@ -391,10 +388,10 @@ static void seed_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
OP(x1, x2, x3, x4, 28);
OP(x3, x4, x1, x2, 30);
- dst[0] = cpu_to_be32(x3);
- dst[1] = cpu_to_be32(x4);
- dst[2] = cpu_to_be32(x1);
- dst[3] = cpu_to_be32(x2);
+ put_unaligned_be32(x3, &out[0]);
+ put_unaligned_be32(x4, &out[4]);
+ put_unaligned_be32(x1, &out[8]);
+ put_unaligned_be32(x2, &out[12]);
}
/* decrypt a block of text */
@@ -402,15 +399,13 @@ static void seed_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
static void seed_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct seed_ctx *ctx = crypto_tfm_ctx(tfm);
- const __be32 *src = (const __be32 *)in;
- __be32 *dst = (__be32 *)out;
u32 x1, x2, x3, x4, t0, t1;
const u32 *ks = ctx->keysched;
- x1 = be32_to_cpu(src[0]);
- x2 = be32_to_cpu(src[1]);
- x3 = be32_to_cpu(src[2]);
- x4 = be32_to_cpu(src[3]);
+ x1 = get_unaligned_be32(&in[0]);
+ x2 = get_unaligned_be32(&in[4]);
+ x3 = get_unaligned_be32(&in[8]);
+ x4 = get_unaligned_be32(&in[12]);
OP(x1, x2, x3, x4, 30);
OP(x3, x4, x1, x2, 28);
@@ -429,10 +424,10 @@ static void seed_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
OP(x1, x2, x3, x4, 2);
OP(x3, x4, x1, x2, 0);
- dst[0] = cpu_to_be32(x3);
- dst[1] = cpu_to_be32(x4);
- dst[2] = cpu_to_be32(x1);
- dst[3] = cpu_to_be32(x2);
+ put_unaligned_be32(x3, &out[0]);
+ put_unaligned_be32(x4, &out[4]);
+ put_unaligned_be32(x1, &out[8]);
+ put_unaligned_be32(x2, &out[12]);
}
@@ -443,7 +438,6 @@ static struct crypto_alg seed_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SEED_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct seed_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
diff --git a/crypto/sig.c b/crypto/sig.c
index 5e1f1f739da2..dfc7cae90802 100644
--- a/crypto/sig.c
+++ b/crypto/sig.c
@@ -15,8 +15,6 @@
#include "internal.h"
-#define CRYPTO_ALG_TYPE_SIG_MASK 0x0000000e
-
static void crypto_sig_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_sig *sig = __crypto_sig_tfm(tfm);
@@ -73,7 +71,7 @@ static const struct crypto_type crypto_sig_type = {
.report = crypto_sig_report,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
- .maskset = CRYPTO_ALG_TYPE_SIG_MASK,
+ .maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SIG,
.tfmsize = offsetof(struct crypto_sig, base),
};
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index f74e4d0d87a2..a9eb2dcf2898 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -17,7 +17,6 @@
#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
-#include <linux/list.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/seq_file.h>
@@ -29,19 +28,10 @@
#define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e
enum {
- SKCIPHER_WALK_PHYS = 1 << 0,
- SKCIPHER_WALK_SLOW = 1 << 1,
- SKCIPHER_WALK_COPY = 1 << 2,
- SKCIPHER_WALK_DIFF = 1 << 3,
- SKCIPHER_WALK_SLEEP = 1 << 4,
-};
-
-struct skcipher_walk_buffer {
- struct list_head entry;
- struct scatter_walk dst;
- unsigned int len;
- u8 *data;
- u8 buffer[];
+ SKCIPHER_WALK_SLOW = 1 << 0,
+ SKCIPHER_WALK_COPY = 1 << 1,
+ SKCIPHER_WALK_DIFF = 1 << 2,
+ SKCIPHER_WALK_SLEEP = 1 << 3,
};
static const struct crypto_type crypto_skcipher_type;
@@ -73,16 +63,6 @@ static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
}
-/* Get a spot of the specified length that does not straddle a page.
- * The caller needs to ensure that there is enough space for this operation.
- */
-static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
-{
- u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
-
- return max(start, end_page);
-}
-
static inline struct skcipher_alg *__crypto_skcipher_alg(
struct crypto_alg *alg)
{
@@ -91,30 +71,44 @@ static inline struct skcipher_alg *__crypto_skcipher_alg(
static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
{
- u8 *addr;
+ u8 *addr = PTR_ALIGN(walk->buffer, walk->alignmask + 1);
- addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
- addr = skcipher_get_spot(addr, bsize);
- scatterwalk_copychunks(addr, &walk->out, bsize,
- (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
+ scatterwalk_copychunks(addr, &walk->out, bsize, 1);
return 0;
}
-int skcipher_walk_done(struct skcipher_walk *walk, int err)
+/**
+ * skcipher_walk_done() - finish one step of a skcipher_walk
+ * @walk: the skcipher_walk
+ * @res: number of bytes *not* processed (>= 0) from walk->nbytes,
+ * or a -errno value to terminate the walk due to an error
+ *
+ * This function cleans up after one step of walking through the source and
+ * destination scatterlists, and advances to the next step if applicable.
+ * walk->nbytes is set to the number of bytes available in the next step,
+ * walk->total is set to the new total number of bytes remaining, and
+ * walk->{src,dst}.virt.addr is set to the next pair of data pointers. If there
+ * is no more data, or if an error occurred (i.e. -errno return), then
+ * walk->nbytes and walk->total are set to 0 and all resources owned by the
+ * skcipher_walk are freed.
+ *
+ * Return: 0 or a -errno value. If @res was a -errno value then it will be
+ * returned, but other errors may occur too.
+ */
+int skcipher_walk_done(struct skcipher_walk *walk, int res)
{
- unsigned int n = walk->nbytes;
- unsigned int nbytes = 0;
+ unsigned int n = walk->nbytes; /* num bytes processed this step */
+ unsigned int total = 0; /* new total remaining */
if (!n)
goto finish;
- if (likely(err >= 0)) {
- n -= err;
- nbytes = walk->total - n;
+ if (likely(res >= 0)) {
+ n -= res; /* subtract num bytes *not* processed */
+ total = walk->total - n;
}
- if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
- SKCIPHER_WALK_SLOW |
+ if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW |
SKCIPHER_WALK_COPY |
SKCIPHER_WALK_DIFF)))) {
unmap_src:
@@ -126,34 +120,36 @@ unmap_src:
skcipher_map_dst(walk);
memcpy(walk->dst.virt.addr, walk->page, n);
skcipher_unmap_dst(walk);
- } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
- if (err > 0) {
+ } else { /* SKCIPHER_WALK_SLOW */
+ if (res > 0) {
/*
* Didn't process all bytes. Either the algorithm is
* broken, or this was the last step and it turned out
* the message wasn't evenly divisible into blocks but
* the algorithm requires it.
*/
- err = -EINVAL;
- nbytes = 0;
+ res = -EINVAL;
+ total = 0;
} else
n = skcipher_done_slow(walk, n);
}
- if (err > 0)
- err = 0;
+ if (res > 0)
+ res = 0;
- walk->total = nbytes;
+ walk->total = total;
walk->nbytes = 0;
scatterwalk_advance(&walk->in, n);
scatterwalk_advance(&walk->out, n);
- scatterwalk_done(&walk->in, 0, nbytes);
- scatterwalk_done(&walk->out, 1, nbytes);
-
- if (nbytes) {
- crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
- CRYPTO_TFM_REQ_MAY_SLEEP : 0);
+ scatterwalk_done(&walk->in, 0, total);
+ scatterwalk_done(&walk->out, 1, total);
+
+ if (total) {
+ if (walk->flags & SKCIPHER_WALK_SLEEP)
+ cond_resched();
+ walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
+ SKCIPHER_WALK_DIFF);
return skcipher_walk_next(walk);
}
@@ -162,9 +158,6 @@ finish:
if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
goto out;
- if (walk->flags & SKCIPHER_WALK_PHYS)
- goto out;
-
if (walk->iv != walk->oiv)
memcpy(walk->oiv, walk->iv, walk->ivsize);
if (walk->buffer != walk->page)
@@ -173,104 +166,29 @@ finish:
free_page((unsigned long)walk->page);
out:
- return err;
+ return res;
}
EXPORT_SYMBOL_GPL(skcipher_walk_done);
-void skcipher_walk_complete(struct skcipher_walk *walk, int err)
-{
- struct skcipher_walk_buffer *p, *tmp;
-
- list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
- u8 *data;
-
- if (err)
- goto done;
-
- data = p->data;
- if (!data) {
- data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
- data = skcipher_get_spot(data, walk->stride);
- }
-
- scatterwalk_copychunks(data, &p->dst, p->len, 1);
-
- if (offset_in_page(p->data) + p->len + walk->stride >
- PAGE_SIZE)
- free_page((unsigned long)p->data);
-
-done:
- list_del(&p->entry);
- kfree(p);
- }
-
- if (!err && walk->iv != walk->oiv)
- memcpy(walk->oiv, walk->iv, walk->ivsize);
- if (walk->buffer != walk->page)
- kfree(walk->buffer);
- if (walk->page)
- free_page((unsigned long)walk->page);
-}
-EXPORT_SYMBOL_GPL(skcipher_walk_complete);
-
-static void skcipher_queue_write(struct skcipher_walk *walk,
- struct skcipher_walk_buffer *p)
-{
- p->dst = walk->out;
- list_add_tail(&p->entry, &walk->buffers);
-}
-
static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
{
- bool phys = walk->flags & SKCIPHER_WALK_PHYS;
unsigned alignmask = walk->alignmask;
- struct skcipher_walk_buffer *p;
- unsigned a;
unsigned n;
u8 *buffer;
- void *v;
-
- if (!phys) {
- if (!walk->buffer)
- walk->buffer = walk->page;
- buffer = walk->buffer;
- if (buffer)
- goto ok;
- }
-
- /* Start with the minimum alignment of kmalloc. */
- a = crypto_tfm_ctx_alignment() - 1;
- n = bsize;
-
- if (phys) {
- /* Calculate the minimum alignment of p->buffer. */
- a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
- n += sizeof(*p);
- }
-
- /* Minimum size to align p->buffer by alignmask. */
- n += alignmask & ~a;
- /* Minimum size to ensure p->buffer does not straddle a page. */
- n += (bsize - 1) & ~(alignmask | a);
-
- v = kzalloc(n, skcipher_walk_gfp(walk));
- if (!v)
- return skcipher_walk_done(walk, -ENOMEM);
-
- if (phys) {
- p = v;
- p->len = bsize;
- skcipher_queue_write(walk, p);
- buffer = p->buffer;
- } else {
- walk->buffer = v;
- buffer = v;
+ if (!walk->buffer)
+ walk->buffer = walk->page;
+ buffer = walk->buffer;
+ if (!buffer) {
+ /* Min size for a buffer of bsize bytes aligned to alignmask */
+ n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
+
+ buffer = kzalloc(n, skcipher_walk_gfp(walk));
+ if (!buffer)
+ return skcipher_walk_done(walk, -ENOMEM);
+ walk->buffer = buffer;
}
-
-ok:
walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
- walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
walk->src.virt.addr = walk->dst.virt.addr;
scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
@@ -283,7 +201,6 @@ ok:
static int skcipher_next_copy(struct skcipher_walk *walk)
{
- struct skcipher_walk_buffer *p;
u8 *tmp = walk->page;
skcipher_map_src(walk);
@@ -292,24 +209,6 @@ static int skcipher_next_copy(struct skcipher_walk *walk)
walk->src.virt.addr = tmp;
walk->dst.virt.addr = tmp;
-
- if (!(walk->flags & SKCIPHER_WALK_PHYS))
- return 0;
-
- p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
- if (!p)
- return -ENOMEM;
-
- p->data = walk->page;
- p->len = walk->nbytes;
- skcipher_queue_write(walk, p);
-
- if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
- PAGE_SIZE)
- walk->page = NULL;
- else
- walk->page += walk->nbytes;
-
return 0;
}
@@ -317,16 +216,10 @@ static int skcipher_next_fast(struct skcipher_walk *walk)
{
unsigned long diff;
- walk->src.phys.page = scatterwalk_page(&walk->in);
- walk->src.phys.offset = offset_in_page(walk->in.offset);
- walk->dst.phys.page = scatterwalk_page(&walk->out);
- walk->dst.phys.offset = offset_in_page(walk->out.offset);
-
- if (walk->flags & SKCIPHER_WALK_PHYS)
- return 0;
-
- diff = walk->src.phys.offset - walk->dst.phys.offset;
- diff |= walk->src.virt.page - walk->dst.virt.page;
+ diff = offset_in_page(walk->in.offset) -
+ offset_in_page(walk->out.offset);
+ diff |= (u8 *)scatterwalk_page(&walk->in) -
+ (u8 *)scatterwalk_page(&walk->out);
skcipher_map_src(walk);
walk->dst.virt.addr = walk->src.virt.addr;
@@ -343,10 +236,6 @@ static int skcipher_walk_next(struct skcipher_walk *walk)
{
unsigned int bsize;
unsigned int n;
- int err;
-
- walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
- SKCIPHER_WALK_DIFF);
n = walk->total;
bsize = min(walk->stride, max(n, walk->blocksize));
@@ -358,9 +247,9 @@ static int skcipher_walk_next(struct skcipher_walk *walk)
return skcipher_walk_done(walk, -EINVAL);
slow_path:
- err = skcipher_next_slow(walk, bsize);
- goto set_phys_lowmem;
+ return skcipher_next_slow(walk, bsize);
}
+ walk->nbytes = n;
if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
if (!walk->page) {
@@ -370,58 +259,30 @@ slow_path:
if (!walk->page)
goto slow_path;
}
-
- walk->nbytes = min_t(unsigned, n,
- PAGE_SIZE - offset_in_page(walk->page));
walk->flags |= SKCIPHER_WALK_COPY;
- err = skcipher_next_copy(walk);
- goto set_phys_lowmem;
+ return skcipher_next_copy(walk);
}
- walk->nbytes = n;
-
return skcipher_next_fast(walk);
-
-set_phys_lowmem:
- if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
- walk->src.phys.page = virt_to_page(walk->src.virt.addr);
- walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
- walk->src.phys.offset &= PAGE_SIZE - 1;
- walk->dst.phys.offset &= PAGE_SIZE - 1;
- }
- return err;
}
static int skcipher_copy_iv(struct skcipher_walk *walk)
{
- unsigned a = crypto_tfm_ctx_alignment() - 1;
unsigned alignmask = walk->alignmask;
unsigned ivsize = walk->ivsize;
- unsigned bs = walk->stride;
- unsigned aligned_bs;
+ unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1);
unsigned size;
u8 *iv;
- aligned_bs = ALIGN(bs, alignmask + 1);
-
- /* Minimum size to align buffer by alignmask. */
- size = alignmask & ~a;
-
- if (walk->flags & SKCIPHER_WALK_PHYS)
- size += ivsize;
- else {
- size += aligned_bs + ivsize;
-
- /* Minimum size to ensure buffer does not straddle a page. */
- size += (bs - 1) & ~(alignmask | a);
- }
+ /* Min size for a buffer of stride + ivsize, aligned to alignmask */
+ size = aligned_stride + ivsize +
+ (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
if (!walk->buffer)
return -ENOMEM;
- iv = PTR_ALIGN(walk->buffer, alignmask + 1);
- iv = skcipher_get_spot(iv, bs) + aligned_bs;
+ iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride;
walk->iv = memcpy(iv, walk->iv, walk->ivsize);
return 0;
@@ -444,16 +305,22 @@ static int skcipher_walk_first(struct skcipher_walk *walk)
return skcipher_walk_next(walk);
}
-static int skcipher_walk_skcipher(struct skcipher_walk *walk,
- struct skcipher_request *req)
+int skcipher_walk_virt(struct skcipher_walk *walk,
+ struct skcipher_request *req, bool atomic)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ const struct skcipher_alg *alg =
+ crypto_skcipher_alg(crypto_skcipher_reqtfm(req));
+
+ might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
walk->total = req->cryptlen;
walk->nbytes = 0;
walk->iv = req->iv;
walk->oiv = req->iv;
+ if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
+ walk->flags = SKCIPHER_WALK_SLEEP;
+ else
+ walk->flags = 0;
if (unlikely(!walk->total))
return 0;
@@ -461,13 +328,14 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
scatterwalk_start(&walk->in, req->src);
scatterwalk_start(&walk->out, req->dst);
- walk->flags &= ~SKCIPHER_WALK_SLEEP;
- walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
- SKCIPHER_WALK_SLEEP : 0;
-
- walk->blocksize = crypto_skcipher_blocksize(tfm);
- walk->ivsize = crypto_skcipher_ivsize(tfm);
- walk->alignmask = crypto_skcipher_alignmask(tfm);
+ /*
+ * Accessing 'alg' directly generates better code than using the
+ * crypto_skcipher_blocksize() and similar helper functions here, as it
+ * prevents the algorithm pointer from being repeatedly reloaded.
+ */
+ walk->blocksize = alg->base.cra_blocksize;
+ walk->ivsize = alg->co.ivsize;
+ walk->alignmask = alg->base.cra_alignmask;
if (alg->co.base.cra_type != &crypto_skcipher_type)
walk->stride = alg->co.chunksize;
@@ -476,50 +344,24 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
return skcipher_walk_first(walk);
}
-
-int skcipher_walk_virt(struct skcipher_walk *walk,
- struct skcipher_request *req, bool atomic)
-{
- int err;
-
- might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
-
- walk->flags &= ~SKCIPHER_WALK_PHYS;
-
- err = skcipher_walk_skcipher(walk, req);
-
- walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
-
- return err;
-}
EXPORT_SYMBOL_GPL(skcipher_walk_virt);
-int skcipher_walk_async(struct skcipher_walk *walk,
- struct skcipher_request *req)
-{
- walk->flags |= SKCIPHER_WALK_PHYS;
-
- INIT_LIST_HEAD(&walk->buffers);
-
- return skcipher_walk_skcipher(walk, req);
-}
-EXPORT_SYMBOL_GPL(skcipher_walk_async);
-
static int skcipher_walk_aead_common(struct skcipher_walk *walk,
struct aead_request *req, bool atomic)
{
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- int err;
+ const struct aead_alg *alg = crypto_aead_alg(crypto_aead_reqtfm(req));
walk->nbytes = 0;
walk->iv = req->iv;
walk->oiv = req->iv;
+ if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
+ walk->flags = SKCIPHER_WALK_SLEEP;
+ else
+ walk->flags = 0;
if (unlikely(!walk->total))
return 0;
- walk->flags &= ~SKCIPHER_WALK_PHYS;
-
scatterwalk_start(&walk->in, req->src);
scatterwalk_start(&walk->out, req->dst);
@@ -529,22 +371,17 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
scatterwalk_done(&walk->in, 0, walk->total);
scatterwalk_done(&walk->out, 0, walk->total);
- if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
- walk->flags |= SKCIPHER_WALK_SLEEP;
- else
- walk->flags &= ~SKCIPHER_WALK_SLEEP;
-
- walk->blocksize = crypto_aead_blocksize(tfm);
- walk->stride = crypto_aead_chunksize(tfm);
- walk->ivsize = crypto_aead_ivsize(tfm);
- walk->alignmask = crypto_aead_alignmask(tfm);
-
- err = skcipher_walk_first(walk);
-
- if (atomic)
- walk->flags &= ~SKCIPHER_WALK_SLEEP;
+ /*
+ * Accessing 'alg' directly generates better code than using the
+ * crypto_aead_blocksize() and similar helper functions here, as it
+ * prevents the algorithm pointer from being repeatedly reloaded.
+ */
+ walk->blocksize = alg->base.cra_blocksize;
+ walk->stride = alg->chunksize;
+ walk->ivsize = alg->ivsize;
+ walk->alignmask = alg->base.cra_alignmask;
- return err;
+ return skcipher_walk_first(walk);
}
int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index e9e7dceb606e..e1a74cb2cfbe 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1738,10 +1738,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
ret = min(ret, tcrypt_test("hmac(rmd160)"));
break;
- case 109:
- ret = min(ret, tcrypt_test("vmac64(aes)"));
- break;
-
case 111:
ret = min(ret, tcrypt_test("hmac(sha3-224)"));
break;
diff --git a/crypto/tea.c b/crypto/tea.c
index 896f863f3067..b315da8c89eb 100644
--- a/crypto/tea.c
+++ b/crypto/tea.c
@@ -18,7 +18,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <asm/byteorder.h>
+#include <linux/unaligned.h>
#include <linux/types.h>
#define TEA_KEY_SIZE 16
@@ -43,12 +43,11 @@ static int tea_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *key = (const __le32 *)in_key;
- ctx->KEY[0] = le32_to_cpu(key[0]);
- ctx->KEY[1] = le32_to_cpu(key[1]);
- ctx->KEY[2] = le32_to_cpu(key[2]);
- ctx->KEY[3] = le32_to_cpu(key[3]);
+ ctx->KEY[0] = get_unaligned_le32(&in_key[0]);
+ ctx->KEY[1] = get_unaligned_le32(&in_key[4]);
+ ctx->KEY[2] = get_unaligned_le32(&in_key[8]);
+ ctx->KEY[3] = get_unaligned_le32(&in_key[12]);
return 0;
@@ -59,11 +58,9 @@ static void tea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
u32 y, z, n, sum = 0;
u32 k0, k1, k2, k3;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *in = (const __le32 *)src;
- __le32 *out = (__le32 *)dst;
- y = le32_to_cpu(in[0]);
- z = le32_to_cpu(in[1]);
+ y = get_unaligned_le32(&src[0]);
+ z = get_unaligned_le32(&src[4]);
k0 = ctx->KEY[0];
k1 = ctx->KEY[1];
@@ -78,8 +75,8 @@ static void tea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
z += ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3);
}
- out[0] = cpu_to_le32(y);
- out[1] = cpu_to_le32(z);
+ put_unaligned_le32(y, &dst[0]);
+ put_unaligned_le32(z, &dst[4]);
}
static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
@@ -87,11 +84,9 @@ static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
u32 y, z, n, sum;
u32 k0, k1, k2, k3;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *in = (const __le32 *)src;
- __le32 *out = (__le32 *)dst;
- y = le32_to_cpu(in[0]);
- z = le32_to_cpu(in[1]);
+ y = get_unaligned_le32(&src[0]);
+ z = get_unaligned_le32(&src[4]);
k0 = ctx->KEY[0];
k1 = ctx->KEY[1];
@@ -108,20 +103,19 @@ static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
sum -= TEA_DELTA;
}
- out[0] = cpu_to_le32(y);
- out[1] = cpu_to_le32(z);
+ put_unaligned_le32(y, &dst[0]);
+ put_unaligned_le32(z, &dst[4]);
}
static int xtea_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *key = (const __le32 *)in_key;
- ctx->KEY[0] = le32_to_cpu(key[0]);
- ctx->KEY[1] = le32_to_cpu(key[1]);
- ctx->KEY[2] = le32_to_cpu(key[2]);
- ctx->KEY[3] = le32_to_cpu(key[3]);
+ ctx->KEY[0] = get_unaligned_le32(&in_key[0]);
+ ctx->KEY[1] = get_unaligned_le32(&in_key[4]);
+ ctx->KEY[2] = get_unaligned_le32(&in_key[8]);
+ ctx->KEY[3] = get_unaligned_le32(&in_key[12]);
return 0;
@@ -132,11 +126,9 @@ static void xtea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
u32 y, z, sum = 0;
u32 limit = XTEA_DELTA * XTEA_ROUNDS;
struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *in = (const __le32 *)src;
- __le32 *out = (__le32 *)dst;
- y = le32_to_cpu(in[0]);
- z = le32_to_cpu(in[1]);
+ y = get_unaligned_le32(&src[0]);
+ z = get_unaligned_le32(&src[4]);
while (sum != limit) {
y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]);
@@ -144,19 +136,17 @@ static void xtea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]);
}
- out[0] = cpu_to_le32(y);
- out[1] = cpu_to_le32(z);
+ put_unaligned_le32(y, &dst[0]);
+ put_unaligned_le32(z, &dst[4]);
}
static void xtea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, sum;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *in = (const __le32 *)src;
- __le32 *out = (__le32 *)dst;
- y = le32_to_cpu(in[0]);
- z = le32_to_cpu(in[1]);
+ y = get_unaligned_le32(&src[0]);
+ z = get_unaligned_le32(&src[4]);
sum = XTEA_DELTA * XTEA_ROUNDS;
@@ -166,8 +156,8 @@ static void xtea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]);
}
- out[0] = cpu_to_le32(y);
- out[1] = cpu_to_le32(z);
+ put_unaligned_le32(y, &dst[0]);
+ put_unaligned_le32(z, &dst[4]);
}
@@ -176,11 +166,9 @@ static void xeta_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
u32 y, z, sum = 0;
u32 limit = XTEA_DELTA * XTEA_ROUNDS;
struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *in = (const __le32 *)src;
- __le32 *out = (__le32 *)dst;
- y = le32_to_cpu(in[0]);
- z = le32_to_cpu(in[1]);
+ y = get_unaligned_le32(&src[0]);
+ z = get_unaligned_le32(&src[4]);
while (sum != limit) {
y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3];
@@ -188,19 +176,17 @@ static void xeta_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3];
}
- out[0] = cpu_to_le32(y);
- out[1] = cpu_to_le32(z);
+ put_unaligned_le32(y, &dst[0]);
+ put_unaligned_le32(z, &dst[4]);
}
static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, sum;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *in = (const __le32 *)src;
- __le32 *out = (__le32 *)dst;
- y = le32_to_cpu(in[0]);
- z = le32_to_cpu(in[1]);
+ y = get_unaligned_le32(&src[0]);
+ z = get_unaligned_le32(&src[4]);
sum = XTEA_DELTA * XTEA_ROUNDS;
@@ -210,8 +196,8 @@ static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3];
}
- out[0] = cpu_to_le32(y);
- out[1] = cpu_to_le32(z);
+ put_unaligned_le32(y, &dst[0]);
+ put_unaligned_le32(z, &dst[4]);
}
static struct crypto_alg tea_algs[3] = { {
@@ -220,7 +206,6 @@ static struct crypto_alg tea_algs[3] = { {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = TEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct tea_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = TEA_KEY_SIZE,
@@ -234,7 +219,6 @@ static struct crypto_alg tea_algs[3] = { {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = XTEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct xtea_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = XTEA_KEY_SIZE,
@@ -248,7 +232,6 @@ static struct crypto_alg tea_algs[3] = { {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = XTEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct xtea_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = XTEA_KEY_SIZE,
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 1f5f48ab18c7..e61490ba4095 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -2885,18 +2885,11 @@ static int test_skcipher_vec_cfg(int enc, const struct cipher_testvec *vec,
if (ivsize) {
if (WARN_ON(ivsize > MAX_IVLEN))
return -EINVAL;
- if (vec->generates_iv && !enc)
- memcpy(iv, vec->iv_out, ivsize);
- else if (vec->iv)
+ if (vec->iv)
memcpy(iv, vec->iv, ivsize);
else
memset(iv, 0, ivsize);
} else {
- if (vec->generates_iv) {
- pr_err("alg: skcipher: %s has ivsize=0 but test vector %s generates IV!\n",
- driver, vec_name);
- return -EINVAL;
- }
iv = NULL;
}
@@ -3133,10 +3126,6 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
if (noextratests)
return 0;
- /* Keywrap isn't supported here yet as it handles its IV differently. */
- if (strncmp(algname, "kw(", 3) == 0)
- return 0;
-
init_rnd_state(&rng);
if (!generic_driver) { /* Use default naming convention? */
@@ -5409,13 +5398,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.fips_allowed = 1,
.test = alg_test_null,
}, {
- .alg = "kw(aes)",
- .test = alg_test_skcipher,
- .fips_allowed = 1,
- .suite = {
- .cipher = __VECS(aes_kw_tv_template)
- }
- }, {
.alg = "lrw(aes)",
.generic_driver = "lrw(ecb(aes-generic))",
.test = alg_test_skcipher,
@@ -5750,12 +5732,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(streebog512_tv_template)
}
}, {
- .alg = "vmac64(aes)",
- .test = alg_test_hash,
- .suite = {
- .hash = __VECS(vmac64_aes_tv_template)
- }
- }, {
.alg = "wp256",
.test = alg_test_hash,
.suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 430d33d9ac13..d754ab997186 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -59,8 +59,6 @@ struct hash_testvec {
* @wk: Does the test need CRYPTO_TFM_REQ_FORBID_WEAK_KEYS?
* ( e.g. test needs to fail due to a weak key )
* @fips_skip: Skip the test vector in FIPS mode
- * @generates_iv: Encryption should ignore the given IV, and output @iv_out.
- * Decryption takes @iv_out. Needed for AES Keywrap ("kw(aes)").
* @setkey_error: Expected error from setkey()
* @crypt_error: Expected error from encrypt() and decrypt()
*/
@@ -74,7 +72,6 @@ struct cipher_testvec {
unsigned short klen;
unsigned int len;
bool fips_skip;
- bool generates_iv;
int setkey_error;
int crypt_error;
};
@@ -8561,159 +8558,6 @@ static const struct hash_testvec aes_xcbc128_tv_template[] = {
}
};
-static const char vmac64_string1[144] = {
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- '\x01', '\x01', '\x01', '\x01', '\x02', '\x03', '\x02', '\x02',
- '\x02', '\x04', '\x01', '\x07', '\x04', '\x01', '\x04', '\x03',
-};
-
-static const char vmac64_string2[144] = {
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- 'a', 'b', 'c',
-};
-
-static const char vmac64_string3[144] = {
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- 'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b',
- 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a',
- 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c',
- 'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b',
- 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a',
- 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c',
-};
-
-static const char vmac64_string4[33] = {
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- 'b', 'c', 'e', 'f', 'i', 'j', 'l', 'm',
- 'o', 'p', 'r', 's', 't', 'u', 'w', 'x',
- 'z',
-};
-
-static const char vmac64_string5[143] = {
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- 'r', 'm', 'b', 't', 'c', 'o', 'l', 'k',
- ']', '%', '9', '2', '7', '!', 'A',
-};
-
-static const char vmac64_string6[145] = {
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- 'p', 't', '*', '7', 'l', 'i', '!', '#',
- 'w', '0', 'z', '/', '4', 'A', 'n',
-};
-
-static const struct hash_testvec vmac64_aes_tv_template[] = {
- { /* draft-krovetz-vmac-01 test vector 1 */
- .key = "abcdefghijklmnop",
- .ksize = 16,
- .plaintext = "\0\0\0\0\0\0\0\0bcdefghi",
- .psize = 16,
- .digest = "\x25\x76\xbe\x1c\x56\xd8\xb8\x1b",
- }, { /* draft-krovetz-vmac-01 test vector 2 */
- .key = "abcdefghijklmnop",
- .ksize = 16,
- .plaintext = "\0\0\0\0\0\0\0\0bcdefghiabc",
- .psize = 19,
- .digest = "\x2d\x37\x6c\xf5\xb1\x81\x3c\xe5",
- }, { /* draft-krovetz-vmac-01 test vector 3 */
- .key = "abcdefghijklmnop",
- .ksize = 16,
- .plaintext = "\0\0\0\0\0\0\0\0bcdefghi"
- "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc",
- .psize = 64,
- .digest = "\xe8\x42\x1f\x61\xd5\x73\xd2\x98",
- }, { /* draft-krovetz-vmac-01 test vector 4 */
- .key = "abcdefghijklmnop",
- .ksize = 16,
- .plaintext = "\0\0\0\0\0\0\0\0bcdefghi"
- "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
- "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
- "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
- "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
- "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
- "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabc",
- .psize = 316,
- .digest = "\x44\x92\xdf\x6c\x5c\xac\x1b\xbe",
- }, {
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ksize = 16,
- .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 16,
- .digest = "\x54\x7b\xa4\x77\x35\x80\x58\x07",
- }, {
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ksize = 16,
- .plaintext = vmac64_string1,
- .psize = sizeof(vmac64_string1),
- .digest = "\xa1\x8c\x68\xae\xd3\x3c\xf5\xce",
- }, {
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ksize = 16,
- .plaintext = vmac64_string2,
- .psize = sizeof(vmac64_string2),
- .digest = "\x2d\x14\xbd\x81\x73\xb0\x27\xc9",
- }, {
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ksize = 16,
- .plaintext = vmac64_string3,
- .psize = sizeof(vmac64_string3),
- .digest = "\x19\x0b\x47\x98\x8c\x95\x1a\x8d",
- }, {
- .key = "abcdefghijklmnop",
- .ksize = 16,
- .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 16,
- .digest = "\x84\x8f\x55\x9e\x26\xa1\x89\x3b",
- }, {
- .key = "abcdefghijklmnop",
- .ksize = 16,
- .plaintext = vmac64_string1,
- .psize = sizeof(vmac64_string1),
- .digest = "\xc2\x74\x8d\xf6\xb0\xab\x5e\xab",
- }, {
- .key = "abcdefghijklmnop",
- .ksize = 16,
- .plaintext = vmac64_string2,
- .psize = sizeof(vmac64_string2),
- .digest = "\xdf\x09\x7b\x3d\x42\x68\x15\x11",
- }, {
- .key = "abcdefghijklmnop",
- .ksize = 16,
- .plaintext = vmac64_string3,
- .psize = sizeof(vmac64_string3),
- .digest = "\xd4\xfa\x8f\xed\xe1\x8f\x32\x8b",
- }, {
- .key = "a09b5cd!f#07K\x00\x00\x00",
- .ksize = 16,
- .plaintext = vmac64_string4,
- .psize = sizeof(vmac64_string4),
- .digest = "\x5f\xa1\x4e\x42\xea\x0f\xa5\xab",
- }, {
- .key = "a09b5cd!f#07K\x00\x00\x00",
- .ksize = 16,
- .plaintext = vmac64_string5,
- .psize = sizeof(vmac64_string5),
- .digest = "\x60\x67\xe8\x1d\xbc\x98\x31\x25",
- }, {
- .key = "a09b5cd!f#07K\x00\x00\x00",
- .ksize = 16,
- .plaintext = vmac64_string6,
- .psize = sizeof(vmac64_string6),
- .digest = "\x41\xeb\x65\x95\x47\x9b\xae\xc4",
- },
-};
-
/*
* SHA384 HMAC test vectors from RFC4231
*/
@@ -24349,42 +24193,6 @@ static const struct aead_testvec aegis128_tv_template[] = {
};
/*
- * All key wrapping test vectors taken from
- * http://csrc.nist.gov/groups/STM/cavp/documents/mac/kwtestvectors.zip
- *
- * Note: as documented in keywrap.c, the ivout for encryption is the first
- * semiblock of the ciphertext from the test vector. For decryption, iv is
- * the first semiblock of the ciphertext.
- */
-static const struct cipher_testvec aes_kw_tv_template[] = {
- {
- .key = "\x75\x75\xda\x3a\x93\x60\x7c\xc2"
- "\xbf\xd8\xce\xc7\xaa\xdf\xd9\xa6",
- .klen = 16,
- .ptext = "\x42\x13\x6d\x3c\x38\x4a\x3e\xea"
- "\xc9\x5a\x06\x6f\xd2\x8f\xed\x3f",
- .ctext = "\xf6\x85\x94\x81\x6f\x64\xca\xa3"
- "\xf5\x6f\xab\xea\x25\x48\xf5\xfb",
- .len = 16,
- .iv_out = "\x03\x1f\x6b\xd7\xe6\x1e\x64\x3d",
- .generates_iv = true,
- }, {
- .key = "\x80\xaa\x99\x73\x27\xa4\x80\x6b"
- "\x6a\x7a\x41\xa5\x2b\x86\xc3\x71"
- "\x03\x86\xf9\x32\x78\x6e\xf7\x96"
- "\x76\xfa\xfb\x90\xb8\x26\x3c\x5f",
- .klen = 32,
- .ptext = "\x0a\x25\x6b\xa7\x5c\xfa\x03\xaa"
- "\xa0\x2b\xa9\x42\x03\xf1\x5b\xaa",
- .ctext = "\xd3\x3d\x3d\x97\x7b\xf0\xa9\x15"
- "\x59\xf9\x9c\x8a\xcd\x29\x3d\x43",
- .len = 16,
- .iv_out = "\x42\x3c\x96\x0d\x8a\x2a\xc4\xc1",
- .generates_iv = true,
- },
-};
-
-/*
* ANSI X9.31 Continuous Pseudo-Random Number Generator (AES mode)
* test vectors, taken from Appendix B.2.9 and B.2.10:
* http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf
diff --git a/crypto/vmac.c b/crypto/vmac.c
deleted file mode 100644
index 2ea384645ecf..000000000000
--- a/crypto/vmac.c
+++ /dev/null
@@ -1,696 +0,0 @@
-/*
- * VMAC: Message Authentication Code using Universal Hashing
- *
- * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01
- *
- * Copyright (c) 2009, Intel Corporation.
- * Copyright (c) 2018, Google Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-
-/*
- * Derived from:
- * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
- * This implementation is herby placed in the public domain.
- * The authors offers no warranty. Use at your own risk.
- * Last modified: 17 APR 08, 1700 PDT
- */
-
-#include <linux/unaligned.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
-#include <linux/scatterlist.h>
-#include <asm/byteorder.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/cipher.h>
-#include <crypto/internal/hash.h>
-
-/*
- * User definable settings.
- */
-#define VMAC_TAG_LEN 64
-#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
-#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
-#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
-#define VMAC_NONCEBYTES 16
-
-/* per-transform (per-key) context */
-struct vmac_tfm_ctx {
- struct crypto_cipher *cipher;
- u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
- u64 polykey[2*VMAC_TAG_LEN/64];
- u64 l3key[2*VMAC_TAG_LEN/64];
-};
-
-/* per-request context */
-struct vmac_desc_ctx {
- union {
- u8 partial[VMAC_NHBYTES]; /* partial block */
- __le64 partial_words[VMAC_NHBYTES / 8];
- };
- unsigned int partial_size; /* size of the partial block */
- bool first_block_processed;
- u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */
- union {
- u8 bytes[VMAC_NONCEBYTES];
- __be64 pads[VMAC_NONCEBYTES / 8];
- } nonce;
- unsigned int nonce_size; /* nonce bytes filled so far */
-};
-
-/*
- * Constants and masks
- */
-#define UINT64_C(x) x##ULL
-static const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */
-static const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */
-static const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
-static const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */
-static const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
-
-#define pe64_to_cpup le64_to_cpup /* Prefer little endian */
-
-#ifdef __LITTLE_ENDIAN
-#define INDEX_HIGH 1
-#define INDEX_LOW 0
-#else
-#define INDEX_HIGH 0
-#define INDEX_LOW 1
-#endif
-
-/*
- * The following routines are used in this implementation. They are
- * written via macros to simulate zero-overhead call-by-reference.
- *
- * MUL64: 64x64->128-bit multiplication
- * PMUL64: assumes top bits cleared on inputs
- * ADD128: 128x128->128-bit addition
- */
-
-#define ADD128(rh, rl, ih, il) \
- do { \
- u64 _il = (il); \
- (rl) += (_il); \
- if ((rl) < (_il)) \
- (rh)++; \
- (rh) += (ih); \
- } while (0)
-
-#define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2))
-
-#define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \
- do { \
- u64 _i1 = (i1), _i2 = (i2); \
- u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \
- rh = MUL32(_i1>>32, _i2>>32); \
- rl = MUL32(_i1, _i2); \
- ADD128(rh, rl, (m >> 32), (m << 32)); \
- } while (0)
-
-#define MUL64(rh, rl, i1, i2) \
- do { \
- u64 _i1 = (i1), _i2 = (i2); \
- u64 m1 = MUL32(_i1, _i2>>32); \
- u64 m2 = MUL32(_i1>>32, _i2); \
- rh = MUL32(_i1>>32, _i2>>32); \
- rl = MUL32(_i1, _i2); \
- ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \
- ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \
- } while (0)
-
-/*
- * For highest performance the L1 NH and L2 polynomial hashes should be
- * carefully implemented to take advantage of one's target architecture.
- * Here these two hash functions are defined multiple time; once for
- * 64-bit architectures, once for 32-bit SSE2 architectures, and once
- * for the rest (32-bit) architectures.
- * For each, nh_16 *must* be defined (works on multiples of 16 bytes).
- * Optionally, nh_vmac_nhbytes can be defined (for multiples of
- * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two
- * NH computations at once).
- */
-
-#ifdef CONFIG_64BIT
-
-#define nh_16(mp, kp, nw, rh, rl) \
- do { \
- int i; u64 th, tl; \
- rh = rl = 0; \
- for (i = 0; i < nw; i += 2) { \
- MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
- pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
- ADD128(rh, rl, th, tl); \
- } \
- } while (0)
-
-#define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \
- do { \
- int i; u64 th, tl; \
- rh1 = rl1 = rh = rl = 0; \
- for (i = 0; i < nw; i += 2) { \
- MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
- pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
- ADD128(rh, rl, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
- pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
- ADD128(rh1, rl1, th, tl); \
- } \
- } while (0)
-
-#if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */
-#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
- do { \
- int i; u64 th, tl; \
- rh = rl = 0; \
- for (i = 0; i < nw; i += 8) { \
- MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
- pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
- ADD128(rh, rl, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
- pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
- ADD128(rh, rl, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
- pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
- ADD128(rh, rl, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
- pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
- ADD128(rh, rl, th, tl); \
- } \
- } while (0)
-
-#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \
- do { \
- int i; u64 th, tl; \
- rh1 = rl1 = rh = rl = 0; \
- for (i = 0; i < nw; i += 8) { \
- MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
- pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
- ADD128(rh, rl, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
- pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
- ADD128(rh1, rl1, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
- pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
- ADD128(rh, rl, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \
- pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \
- ADD128(rh1, rl1, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
- pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
- ADD128(rh, rl, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \
- pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \
- ADD128(rh1, rl1, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
- pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
- ADD128(rh, rl, th, tl); \
- MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \
- pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \
- ADD128(rh1, rl1, th, tl); \
- } \
- } while (0)
-#endif
-
-#define poly_step(ah, al, kh, kl, mh, ml) \
- do { \
- u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \
- /* compute ab*cd, put bd into result registers */ \
- PMUL64(t3h, t3l, al, kh); \
- PMUL64(t2h, t2l, ah, kl); \
- PMUL64(t1h, t1l, ah, 2*kh); \
- PMUL64(ah, al, al, kl); \
- /* add 2 * ac to result */ \
- ADD128(ah, al, t1h, t1l); \
- /* add together ad + bc */ \
- ADD128(t2h, t2l, t3h, t3l); \
- /* now (ah,al), (t2l,2*t2h) need summing */ \
- /* first add the high registers, carrying into t2h */ \
- ADD128(t2h, ah, z, t2l); \
- /* double t2h and add top bit of ah */ \
- t2h = 2 * t2h + (ah >> 63); \
- ah &= m63; \
- /* now add the low registers */ \
- ADD128(ah, al, mh, ml); \
- ADD128(ah, al, z, t2h); \
- } while (0)
-
-#else /* ! CONFIG_64BIT */
-
-#ifndef nh_16
-#define nh_16(mp, kp, nw, rh, rl) \
- do { \
- u64 t1, t2, m1, m2, t; \
- int i; \
- rh = rl = t = 0; \
- for (i = 0; i < nw; i += 2) { \
- t1 = pe64_to_cpup(mp+i) + kp[i]; \
- t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \
- m2 = MUL32(t1 >> 32, t2); \
- m1 = MUL32(t1, t2 >> 32); \
- ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
- MUL32(t1, t2)); \
- rh += (u64)(u32)(m1 >> 32) \
- + (u32)(m2 >> 32); \
- t += (u64)(u32)m1 + (u32)m2; \
- } \
- ADD128(rh, rl, (t >> 32), (t << 32)); \
- } while (0)
-#endif
-
-static void poly_step_func(u64 *ahi, u64 *alo,
- const u64 *kh, const u64 *kl,
- const u64 *mh, const u64 *ml)
-{
-#define a0 (*(((u32 *)alo)+INDEX_LOW))
-#define a1 (*(((u32 *)alo)+INDEX_HIGH))
-#define a2 (*(((u32 *)ahi)+INDEX_LOW))
-#define a3 (*(((u32 *)ahi)+INDEX_HIGH))
-#define k0 (*(((u32 *)kl)+INDEX_LOW))
-#define k1 (*(((u32 *)kl)+INDEX_HIGH))
-#define k2 (*(((u32 *)kh)+INDEX_LOW))
-#define k3 (*(((u32 *)kh)+INDEX_HIGH))
-
- u64 p, q, t;
- u32 t2;
-
- p = MUL32(a3, k3);
- p += p;
- p += *(u64 *)mh;
- p += MUL32(a0, k2);
- p += MUL32(a1, k1);
- p += MUL32(a2, k0);
- t = (u32)(p);
- p >>= 32;
- p += MUL32(a0, k3);
- p += MUL32(a1, k2);
- p += MUL32(a2, k1);
- p += MUL32(a3, k0);
- t |= ((u64)((u32)p & 0x7fffffff)) << 32;
- p >>= 31;
- p += (u64)(((u32 *)ml)[INDEX_LOW]);
- p += MUL32(a0, k0);
- q = MUL32(a1, k3);
- q += MUL32(a2, k2);
- q += MUL32(a3, k1);
- q += q;
- p += q;
- t2 = (u32)(p);
- p >>= 32;
- p += (u64)(((u32 *)ml)[INDEX_HIGH]);
- p += MUL32(a0, k1);
- p += MUL32(a1, k0);
- q = MUL32(a2, k3);
- q += MUL32(a3, k2);
- q += q;
- p += q;
- *(u64 *)(alo) = (p << 32) | t2;
- p >>= 32;
- *(u64 *)(ahi) = p + t;
-
-#undef a0
-#undef a1
-#undef a2
-#undef a3
-#undef k0
-#undef k1
-#undef k2
-#undef k3
-}
-
-#define poly_step(ah, al, kh, kl, mh, ml) \
- poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml))
-
-#endif /* end of specialized NH and poly definitions */
-
-/* At least nh_16 is defined. Defined others as needed here */
-#ifndef nh_16_2
-#define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \
- do { \
- nh_16(mp, kp, nw, rh, rl); \
- nh_16(mp, ((kp)+2), nw, rh2, rl2); \
- } while (0)
-#endif
-#ifndef nh_vmac_nhbytes
-#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
- nh_16(mp, kp, nw, rh, rl)
-#endif
-#ifndef nh_vmac_nhbytes_2
-#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \
- do { \
- nh_vmac_nhbytes(mp, kp, nw, rh, rl); \
- nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \
- } while (0)
-#endif
-
-static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
-{
- u64 rh, rl, t, z = 0;
-
- /* fully reduce (p1,p2)+(len,0) mod p127 */
- t = p1 >> 63;
- p1 &= m63;
- ADD128(p1, p2, len, t);
- /* At this point, (p1,p2) is at most 2^127+(len<<64) */
- t = (p1 > m63) + ((p1 == m63) && (p2 == m64));
- ADD128(p1, p2, z, t);
- p1 &= m63;
-
- /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */
- t = p1 + (p2 >> 32);
- t += (t >> 32);
- t += (u32)t > 0xfffffffeu;
- p1 += (t >> 32);
- p2 += (p1 << 32);
-
- /* compute (p1+k1)%p64 and (p2+k2)%p64 */
- p1 += k1;
- p1 += (0 - (p1 < k1)) & 257;
- p2 += k2;
- p2 += (0 - (p2 < k2)) & 257;
-
- /* compute (p1+k1)*(p2+k2)%p64 */
- MUL64(rh, rl, p1, p2);
- t = rh >> 56;
- ADD128(t, rl, z, rh);
- rh <<= 8;
- ADD128(t, rl, z, rh);
- t += t << 8;
- rl += t;
- rl += (0 - (rl < t)) & 257;
- rl += (0 - (rl > p64-1)) & 257;
- return rl;
-}
-
-/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */
-static void vhash_blocks(const struct vmac_tfm_ctx *tctx,
- struct vmac_desc_ctx *dctx,
- const __le64 *mptr, unsigned int blocks)
-{
- const u64 *kptr = tctx->nhkey;
- const u64 pkh = tctx->polykey[0];
- const u64 pkl = tctx->polykey[1];
- u64 ch = dctx->polytmp[0];
- u64 cl = dctx->polytmp[1];
- u64 rh, rl;
-
- if (!dctx->first_block_processed) {
- dctx->first_block_processed = true;
- nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
- rh &= m62;
- ADD128(ch, cl, rh, rl);
- mptr += (VMAC_NHBYTES/sizeof(u64));
- blocks--;
- }
-
- while (blocks--) {
- nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
- rh &= m62;
- poly_step(ch, cl, pkh, pkl, rh, rl);
- mptr += (VMAC_NHBYTES/sizeof(u64));
- }
-
- dctx->polytmp[0] = ch;
- dctx->polytmp[1] = cl;
-}
-
-static int vmac_setkey(struct crypto_shash *tfm,
- const u8 *key, unsigned int keylen)
-{
- struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm);
- __be64 out[2];
- u8 in[16] = { 0 };
- unsigned int i;
- int err;
-
- if (keylen != VMAC_KEY_LEN)
- return -EINVAL;
-
- err = crypto_cipher_setkey(tctx->cipher, key, keylen);
- if (err)
- return err;
-
- /* Fill nh key */
- in[0] = 0x80;
- for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) {
- crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
- tctx->nhkey[i] = be64_to_cpu(out[0]);
- tctx->nhkey[i+1] = be64_to_cpu(out[1]);
- in[15]++;
- }
-
- /* Fill poly key */
- in[0] = 0xC0;
- in[15] = 0;
- for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) {
- crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
- tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly;
- tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly;
- in[15]++;
- }
-
- /* Fill ip key */
- in[0] = 0xE0;
- in[15] = 0;
- for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) {
- do {
- crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
- tctx->l3key[i] = be64_to_cpu(out[0]);
- tctx->l3key[i+1] = be64_to_cpu(out[1]);
- in[15]++;
- } while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64);
- }
-
- return 0;
-}
-
-static int vmac_init(struct shash_desc *desc)
-{
- const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
- struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
-
- dctx->partial_size = 0;
- dctx->first_block_processed = false;
- memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp));
- dctx->nonce_size = 0;
- return 0;
-}
-
-static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
-{
- const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
- struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
- unsigned int n;
-
- /* Nonce is passed as first VMAC_NONCEBYTES bytes of data */
- if (dctx->nonce_size < VMAC_NONCEBYTES) {
- n = min(len, VMAC_NONCEBYTES - dctx->nonce_size);
- memcpy(&dctx->nonce.bytes[dctx->nonce_size], p, n);
- dctx->nonce_size += n;
- p += n;
- len -= n;
- }
-
- if (dctx->partial_size) {
- n = min(len, VMAC_NHBYTES - dctx->partial_size);
- memcpy(&dctx->partial[dctx->partial_size], p, n);
- dctx->partial_size += n;
- p += n;
- len -= n;
- if (dctx->partial_size == VMAC_NHBYTES) {
- vhash_blocks(tctx, dctx, dctx->partial_words, 1);
- dctx->partial_size = 0;
- }
- }
-
- if (len >= VMAC_NHBYTES) {
- n = round_down(len, VMAC_NHBYTES);
- /* TODO: 'p' may be misaligned here */
- vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES);
- p += n;
- len -= n;
- }
-
- if (len) {
- memcpy(dctx->partial, p, len);
- dctx->partial_size = len;
- }
-
- return 0;
-}
-
-static u64 vhash_final(const struct vmac_tfm_ctx *tctx,
- struct vmac_desc_ctx *dctx)
-{
- unsigned int partial = dctx->partial_size;
- u64 ch = dctx->polytmp[0];
- u64 cl = dctx->polytmp[1];
-
- /* L1 and L2-hash the final block if needed */
- if (partial) {
- /* Zero-pad to next 128-bit boundary */
- unsigned int n = round_up(partial, 16);
- u64 rh, rl;
-
- memset(&dctx->partial[partial], 0, n - partial);
- nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl);
- rh &= m62;
- if (dctx->first_block_processed)
- poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1],
- rh, rl);
- else
- ADD128(ch, cl, rh, rl);
- }
-
- /* L3-hash the 128-bit output of L2-hash */
- return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8);
-}
-
-static int vmac_final(struct shash_desc *desc, u8 *out)
-{
- const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
- struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
- int index;
- u64 hash, pad;
-
- if (dctx->nonce_size != VMAC_NONCEBYTES)
- return -EINVAL;
-
- /*
- * The VMAC specification requires a nonce at least 1 bit shorter than
- * the block cipher's block length, so we actually only accept a 127-bit
- * nonce. We define the unused bit to be the first one and require that
- * it be 0, so the needed prepending of a 0 bit is implicit.
- */
- if (dctx->nonce.bytes[0] & 0x80)
- return -EINVAL;
-
- /* Finish calculating the VHASH of the message */
- hash = vhash_final(tctx, dctx);
-
- /* Generate pseudorandom pad by encrypting the nonce */
- BUILD_BUG_ON(VMAC_NONCEBYTES != 2 * (VMAC_TAG_LEN / 8));
- index = dctx->nonce.bytes[VMAC_NONCEBYTES - 1] & 1;
- dctx->nonce.bytes[VMAC_NONCEBYTES - 1] &= ~1;
- crypto_cipher_encrypt_one(tctx->cipher, dctx->nonce.bytes,
- dctx->nonce.bytes);
- pad = be64_to_cpu(dctx->nonce.pads[index]);
-
- /* The VMAC is the sum of VHASH and the pseudorandom pad */
- put_unaligned_be64(hash + pad, out);
- return 0;
-}
-
-static int vmac_init_tfm(struct crypto_tfm *tfm)
-{
- struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst);
- struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
- struct crypto_cipher *cipher;
-
- cipher = crypto_spawn_cipher(spawn);
- if (IS_ERR(cipher))
- return PTR_ERR(cipher);
-
- tctx->cipher = cipher;
- return 0;
-}
-
-static void vmac_exit_tfm(struct crypto_tfm *tfm)
-{
- struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
-
- crypto_free_cipher(tctx->cipher);
-}
-
-static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
-{
- struct shash_instance *inst;
- struct crypto_cipher_spawn *spawn;
- struct crypto_alg *alg;
- u32 mask;
- int err;
-
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
- if (err)
- return err;
-
- inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
- if (!inst)
- return -ENOMEM;
- spawn = shash_instance_ctx(inst);
-
- err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
- crypto_attr_alg_name(tb[1]), 0, mask);
- if (err)
- goto err_free_inst;
- alg = crypto_spawn_cipher_alg(spawn);
-
- err = -EINVAL;
- if (alg->cra_blocksize != VMAC_NONCEBYTES)
- goto err_free_inst;
-
- err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg);
- if (err)
- goto err_free_inst;
-
- inst->alg.base.cra_priority = alg->cra_priority;
- inst->alg.base.cra_blocksize = alg->cra_blocksize;
-
- inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx);
- inst->alg.base.cra_init = vmac_init_tfm;
- inst->alg.base.cra_exit = vmac_exit_tfm;
-
- inst->alg.descsize = sizeof(struct vmac_desc_ctx);
- inst->alg.digestsize = VMAC_TAG_LEN / 8;
- inst->alg.init = vmac_init;
- inst->alg.update = vmac_update;
- inst->alg.final = vmac_final;
- inst->alg.setkey = vmac_setkey;
-
- inst->free = shash_free_singlespawn_instance;
-
- err = shash_register_instance(tmpl, inst);
- if (err) {
-err_free_inst:
- shash_free_singlespawn_instance(inst);
- }
- return err;
-}
-
-static struct crypto_template vmac64_tmpl = {
- .name = "vmac64",
- .create = vmac_create,
- .module = THIS_MODULE,
-};
-
-static int __init vmac_module_init(void)
-{
- return crypto_register_template(&vmac64_tmpl);
-}
-
-static void __exit vmac_module_exit(void)
-{
- crypto_unregister_template(&vmac64_tmpl);
-}
-
-subsys_initcall(vmac_module_init);
-module_exit(vmac_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("VMAC hash algorithm");
-MODULE_ALIAS_CRYPTO("vmac64");
-MODULE_IMPORT_NS("CRYPTO_INTERNAL");
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c
index 97d4a032171f..f5b8497cf5ad 100644
--- a/drivers/accel/amdxdna/amdxdna_pci_drv.c
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c
@@ -21,6 +21,11 @@
#define AMDXDNA_AUTOSUSPEND_DELAY 5000 /* milliseconds */
+MODULE_FIRMWARE("amdnpu/1502_00/npu.sbin");
+MODULE_FIRMWARE("amdnpu/17f0_10/npu.sbin");
+MODULE_FIRMWARE("amdnpu/17f0_11/npu.sbin");
+MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin");
+
/*
* Bind the driver base on (vendor_id, device_id) pair and later use the
* (device_id, rev_id) pair as a key to select the devices. The devices with
diff --git a/drivers/accel/habanalabs/common/context.c b/drivers/accel/habanalabs/common/context.c
index b83141f58319..9f212b17611a 100644
--- a/drivers/accel/habanalabs/common/context.c
+++ b/drivers/accel/habanalabs/common/context.c
@@ -199,7 +199,6 @@ out_err:
int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
{
- char task_comm[TASK_COMM_LEN];
int rc = 0, i;
ctx->hdev = hdev;
@@ -272,7 +271,7 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
mutex_init(&ctx->ts_reg_lock);
dev_dbg(hdev->dev, "create user context, comm=\"%s\", asid=%u\n",
- get_task_comm(task_comm, current), ctx->asid);
+ current->comm, ctx->asid);
}
return 0;
diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c
index e0cf3b4343bb..30277ae410d4 100644
--- a/drivers/accel/habanalabs/common/device.c
+++ b/drivers/accel/habanalabs/common/device.c
@@ -817,7 +817,7 @@ static void device_hard_reset_pending(struct work_struct *work)
}
queue_delayed_work(hdev->reset_wq, &device_reset_work->reset_work,
- msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000));
+ secs_to_jiffies(HL_PENDING_RESET_PER_SEC));
}
}
diff --git a/drivers/accel/habanalabs/common/habanalabs_drv.c b/drivers/accel/habanalabs/common/habanalabs_drv.c
index 5409b2c656c8..596c52e8aa26 100644
--- a/drivers/accel/habanalabs/common/habanalabs_drv.c
+++ b/drivers/accel/habanalabs/common/habanalabs_drv.c
@@ -361,8 +361,7 @@ static void fixup_device_params_per_asic(struct hl_device *hdev, int timeout)
* a different default timeout for Gaudi
*/
if (timeout == HL_DEFAULT_TIMEOUT_LOCKED)
- hdev->timeout_jiffies = msecs_to_jiffies(GAUDI_DEFAULT_TIMEOUT_LOCKED *
- MSEC_PER_SEC);
+ hdev->timeout_jiffies = secs_to_jiffies(GAUDI_DEFAULT_TIMEOUT_LOCKED);
hdev->reset_upon_device_release = 0;
break;
diff --git a/drivers/accel/habanalabs/common/habanalabs_ioctl.c b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
index 1dd6e23172ca..8729a0c57d78 100644
--- a/drivers/accel/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
@@ -1279,13 +1279,10 @@ static long _hl_ioctl(struct hl_fpriv *hpriv, unsigned int cmd, unsigned long ar
retcode = -EFAULT;
out_err:
- if (retcode) {
- char task_comm[TASK_COMM_LEN];
-
+ if (retcode)
dev_dbg_ratelimited(dev,
"error in ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n",
- task_pid_nr(current), get_task_comm(task_comm, current), cmd, nr);
- }
+ task_pid_nr(current), current->comm, cmd, nr);
if (kdata != stack_kdata)
kfree(kdata);
@@ -1308,11 +1305,9 @@ long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg)
if (nr == _IOC_NR(DRM_IOCTL_HL_INFO)) {
ioctl = &hl_ioctls_control[nr - HL_COMMAND_START];
} else {
- char task_comm[TASK_COMM_LEN];
-
dev_dbg_ratelimited(hdev->dev_ctrl,
"invalid ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n",
- task_pid_nr(current), get_task_comm(task_comm, current), cmd, nr);
+ task_pid_nr(current), current->comm, cmd, nr);
return -ENOTTY;
}
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 1e8ffbe25eee..38cf1c342c72 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -397,15 +397,19 @@ int ivpu_boot(struct ivpu_device *vdev)
if (ivpu_fw_is_cold_boot(vdev)) {
ret = ivpu_pm_dct_init(vdev);
if (ret)
- goto err_diagnose_failure;
+ goto err_disable_ipc;
ret = ivpu_hw_sched_init(vdev);
if (ret)
- goto err_diagnose_failure;
+ goto err_disable_ipc;
}
return 0;
+err_disable_ipc:
+ ivpu_ipc_disable(vdev);
+ ivpu_hw_irq_disable(vdev);
+ disable_irq(vdev->irq);
err_diagnose_failure:
ivpu_hw_diagnose_failure(vdev);
ivpu_mmu_evtq_dump(vdev);
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index 87d7411ae059..5060c5dd40d1 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -115,41 +115,57 @@ err_power_down:
return ret;
}
-static void ivpu_pm_recovery_work(struct work_struct *work)
+static void ivpu_pm_reset_begin(struct ivpu_device *vdev)
{
- struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
- struct ivpu_device *vdev = pm->vdev;
- char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
- int ret;
-
- ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
-
- ret = pm_runtime_resume_and_get(vdev->drm.dev);
- if (ret)
- ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
-
- ivpu_jsm_state_dump(vdev);
- ivpu_dev_coredump(vdev);
+ pm_runtime_disable(vdev->drm.dev);
atomic_inc(&vdev->pm->reset_counter);
atomic_set(&vdev->pm->reset_pending, 1);
down_write(&vdev->pm->reset_lock);
+}
+
+static void ivpu_pm_reset_complete(struct ivpu_device *vdev)
+{
+ int ret;
- ivpu_suspend(vdev);
ivpu_pm_prepare_cold_boot(vdev);
ivpu_jobs_abort_all(vdev);
ivpu_ms_cleanup_all(vdev);
ret = ivpu_resume(vdev);
- if (ret)
+ if (ret) {
ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
+ pm_runtime_set_suspended(vdev->drm.dev);
+ } else {
+ pm_runtime_set_active(vdev->drm.dev);
+ }
up_write(&vdev->pm->reset_lock);
atomic_set(&vdev->pm->reset_pending, 0);
- kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
pm_runtime_mark_last_busy(vdev->drm.dev);
- pm_runtime_put_autosuspend(vdev->drm.dev);
+ pm_runtime_enable(vdev->drm.dev);
+}
+
+static void ivpu_pm_recovery_work(struct work_struct *work)
+{
+ struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
+ struct ivpu_device *vdev = pm->vdev;
+ char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
+
+ ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
+
+ ivpu_pm_reset_begin(vdev);
+
+ if (!pm_runtime_status_suspended(vdev->drm.dev)) {
+ ivpu_jsm_state_dump(vdev);
+ ivpu_dev_coredump(vdev);
+ ivpu_suspend(vdev);
+ }
+
+ ivpu_pm_reset_complete(vdev);
+
+ kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
}
void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason)
@@ -309,7 +325,10 @@ int ivpu_rpm_get(struct ivpu_device *vdev)
int ret;
ret = pm_runtime_resume_and_get(vdev->drm.dev);
- drm_WARN_ON(&vdev->drm, ret < 0);
+ if (ret < 0) {
+ ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
+ pm_runtime_set_suspended(vdev->drm.dev);
+ }
return ret;
}
@@ -325,16 +344,13 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
struct ivpu_device *vdev = pci_get_drvdata(pdev);
ivpu_dbg(vdev, PM, "Pre-reset..\n");
- atomic_inc(&vdev->pm->reset_counter);
- atomic_set(&vdev->pm->reset_pending, 1);
- pm_runtime_get_sync(vdev->drm.dev);
- down_write(&vdev->pm->reset_lock);
- ivpu_prepare_for_reset(vdev);
- ivpu_hw_reset(vdev);
- ivpu_pm_prepare_cold_boot(vdev);
- ivpu_jobs_abort_all(vdev);
- ivpu_ms_cleanup_all(vdev);
+ ivpu_pm_reset_begin(vdev);
+
+ if (!pm_runtime_status_suspended(vdev->drm.dev)) {
+ ivpu_prepare_for_reset(vdev);
+ ivpu_hw_reset(vdev);
+ }
ivpu_dbg(vdev, PM, "Pre-reset done.\n");
}
@@ -342,18 +358,12 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
void ivpu_pm_reset_done_cb(struct pci_dev *pdev)
{
struct ivpu_device *vdev = pci_get_drvdata(pdev);
- int ret;
ivpu_dbg(vdev, PM, "Post-reset..\n");
- ret = ivpu_resume(vdev);
- if (ret)
- ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
- up_write(&vdev->pm->reset_lock);
- atomic_set(&vdev->pm->reset_pending, 0);
- ivpu_dbg(vdev, PM, "Post-reset done.\n");
- pm_runtime_mark_last_busy(vdev->drm.dev);
- pm_runtime_put_autosuspend(vdev->drm.dev);
+ ivpu_pm_reset_complete(vdev);
+
+ ivpu_dbg(vdev, PM, "Post-reset done.\n");
}
void ivpu_pm_init(struct ivpu_device *vdev)
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
index ca87a0939135..f7fb7205028d 100644
--- a/drivers/acpi/acpi_extlog.c
+++ b/drivers/acpi/acpi_extlog.c
@@ -251,6 +251,10 @@ static int __init extlog_init(void)
}
extlog_l1_hdr = acpi_os_map_iomem(l1_dirbase, l1_hdr_size);
+ if (!extlog_l1_hdr) {
+ rc = -ENOMEM;
+ goto err_release_l1_hdr;
+ }
l1_head = (struct extlog_l1_head *)extlog_l1_hdr;
l1_size = l1_head->total_len;
l1_percpu_entry = l1_head->entries;
@@ -268,6 +272,10 @@ static int __init extlog_init(void)
goto err;
}
extlog_l1_addr = acpi_os_map_iomem(l1_dirbase, l1_size);
+ if (!extlog_l1_addr) {
+ rc = -ENOMEM;
+ goto err_release_l1_dir;
+ }
l1_entry_base = (u64 *)((u8 *)extlog_l1_addr + l1_hdr_size);
/* remap elog table */
@@ -279,6 +287,10 @@ static int __init extlog_init(void)
goto err_release_l1_dir;
}
elog_addr = acpi_os_map_iomem(elog_base, elog_size);
+ if (!elog_addr) {
+ rc = -ENOMEM;
+ goto err_release_elog;
+ }
rc = -ENOMEM;
/* allocate buffer to save elog record */
@@ -300,6 +312,8 @@ err_release_l1_dir:
if (extlog_l1_addr)
acpi_os_unmap_iomem(extlog_l1_addr, l1_size);
release_mem_region(l1_dirbase, l1_size);
+err_release_l1_hdr:
+ release_mem_region(l1_dirbase, l1_hdr_size);
err:
pr_warn(FW_BUG "Extended error log disabled because of problems parsing f/w tables\n");
return rc;
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 79bbfe00d241..b8543a34caea 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -103,8 +103,6 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info,
acpi_status acpi_hw_enable_all_runtime_gpes(void);
-acpi_status acpi_hw_enable_all_wakeup_gpes(void);
-
u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number);
acpi_status
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index 80a3481c0470..bfbb08b1e6af 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -442,9 +442,9 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
return -EINVAL;
}
- pr_info("Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
- hmat_loc->flags, hmat_data_type(type), ipds, tpds,
- hmat_loc->entry_base_unit);
+ pr_debug("Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
+ hmat_loc->flags, hmat_data_type(type), ipds, tpds,
+ hmat_loc->entry_base_unit);
inits = (u32 *)(hmat_loc + 1);
targs = inits + ipds;
@@ -455,9 +455,9 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
value = hmat_normalize(entries[init * tpds + targ],
hmat_loc->entry_base_unit,
type);
- pr_info(" Initiator-Target[%u-%u]:%u%s\n",
- inits[init], targs[targ], value,
- hmat_data_type_suffix(type));
+ pr_debug(" Initiator-Target[%u-%u]:%u%s\n",
+ inits[init], targs[targ], value,
+ hmat_data_type_suffix(type));
hmat_update_target(targs[targ], inits[init],
mem_hier, type, value);
@@ -485,9 +485,9 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
}
attrs = cache->cache_attributes;
- pr_info("Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
- cache->memory_PD, cache->cache_size, attrs,
- cache->number_of_SMBIOShandles);
+ pr_debug("Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
+ cache->memory_PD, cache->cache_size, attrs,
+ cache->number_of_SMBIOShandles);
target = find_mem_target(cache->memory_PD);
if (!target)
@@ -546,9 +546,9 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
}
if (hmat_revision == 1)
- pr_info("Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
- p->reserved3, p->reserved4, p->flags, p->processor_PD,
- p->memory_PD);
+ pr_debug("Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
+ p->reserved3, p->reserved4, p->flags, p->processor_PD,
+ p->memory_PD);
else
pr_info("Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n",
p->flags, p->processor_PD, p->memory_PD);
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index bec0dcd1f9c3..00ac0d7bb8c9 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -81,6 +81,101 @@ int acpi_map_pxm_to_node(int pxm)
}
EXPORT_SYMBOL(acpi_map_pxm_to_node);
+#ifdef CONFIG_NUMA_EMU
+/*
+ * Take max_nid - 1 fake-numa nodes into account in both
+ * pxm_to_node_map()/node_to_pxm_map[] tables.
+ */
+int __init fix_pxm_node_maps(int max_nid)
+{
+ static int pxm_to_node_map_copy[MAX_PXM_DOMAINS] __initdata
+ = { [0 ... MAX_PXM_DOMAINS - 1] = NUMA_NO_NODE };
+ static int node_to_pxm_map_copy[MAX_NUMNODES] __initdata
+ = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
+ int i, j, index = -1, count = 0;
+ nodemask_t nodes_to_enable;
+
+ if (numa_off)
+ return -1;
+
+ /* no or incomplete node/PXM mapping set, nothing to do */
+ if (srat_disabled())
+ return 0;
+
+ /* find fake nodes PXM mapping */
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ if (node_to_pxm_map[i] != PXM_INVAL) {
+ for (j = 0; j <= max_nid; j++) {
+ if ((emu_nid_to_phys[j] == i) &&
+ WARN(node_to_pxm_map_copy[j] != PXM_INVAL,
+ "Node %d is already binded to PXM %d\n",
+ j, node_to_pxm_map_copy[j]))
+ return -1;
+ if (emu_nid_to_phys[j] == i) {
+ node_to_pxm_map_copy[j] =
+ node_to_pxm_map[i];
+ if (j > index)
+ index = j;
+ count++;
+ }
+ }
+ }
+ }
+ if (index == -1) {
+ pr_debug("No node/PXM mapping has been set\n");
+ /* nothing more to be done */
+ return 0;
+ }
+ if (WARN(index != max_nid, "%d max nid when expected %d\n",
+ index, max_nid))
+ return -1;
+
+ nodes_clear(nodes_to_enable);
+
+ /* map phys nodes not used for fake nodes */
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ if (node_to_pxm_map[i] != PXM_INVAL) {
+ for (j = 0; j <= max_nid; j++)
+ if (emu_nid_to_phys[j] == i)
+ break;
+ /* fake nodes PXM mapping has been done */
+ if (j <= max_nid)
+ continue;
+ /* find first hole */
+ for (j = 0;
+ j < MAX_NUMNODES &&
+ node_to_pxm_map_copy[j] != PXM_INVAL;
+ j++)
+ ;
+ if (WARN(j == MAX_NUMNODES,
+ "Number of nodes exceeds MAX_NUMNODES\n"))
+ return -1;
+ node_to_pxm_map_copy[j] = node_to_pxm_map[i];
+ node_set(j, nodes_to_enable);
+ count++;
+ }
+ }
+
+ /* creating reverse mapping in pxm_to_node_map[] */
+ for (i = 0; i < MAX_NUMNODES; i++)
+ if (node_to_pxm_map_copy[i] != PXM_INVAL &&
+ pxm_to_node_map_copy[node_to_pxm_map_copy[i]] == NUMA_NO_NODE)
+ pxm_to_node_map_copy[node_to_pxm_map_copy[i]] = i;
+
+ /* overwrite with new mapping */
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ node_to_pxm_map[i] = node_to_pxm_map_copy[i];
+ pxm_to_node_map[i] = pxm_to_node_map_copy[i];
+ }
+
+ /* enable other nodes found in PXM for hotplug */
+ nodes_or(numa_nodes_parsed, nodes_to_enable, numa_nodes_parsed);
+
+ pr_debug("found %d total number of nodes\n", count);
+ return 0;
+}
+#endif
+
static void __init
acpi_table_print_srat_entry(struct acpi_subtable_header *header)
{
diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c
index d2f7fd7743a1..fc92e43d0fe9 100644
--- a/drivers/acpi/platform_profile.c
+++ b/drivers/acpi/platform_profile.c
@@ -2,16 +2,28 @@
/* Platform profile sysfs interface */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/acpi.h>
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/platform_profile.h>
#include <linux/sysfs.h>
-static struct platform_profile_handler *cur_profile;
+#define to_pprof_handler(d) (container_of(d, struct platform_profile_handler, dev))
+
static DEFINE_MUTEX(profile_lock);
+struct platform_profile_handler {
+ const char *name;
+ struct device dev;
+ int minor;
+ unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ const struct platform_profile_ops *ops;
+};
+
static const char * const profile_names[] = {
[PLATFORM_PROFILE_LOW_POWER] = "low-power",
[PLATFORM_PROFILE_COOL] = "cool",
@@ -19,99 +31,373 @@ static const char * const profile_names[] = {
[PLATFORM_PROFILE_BALANCED] = "balanced",
[PLATFORM_PROFILE_BALANCED_PERFORMANCE] = "balanced-performance",
[PLATFORM_PROFILE_PERFORMANCE] = "performance",
+ [PLATFORM_PROFILE_CUSTOM] = "custom",
};
static_assert(ARRAY_SIZE(profile_names) == PLATFORM_PROFILE_LAST);
-static ssize_t platform_profile_choices_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int len = 0;
- int err, i;
-
- err = mutex_lock_interruptible(&profile_lock);
- if (err)
- return err;
+static DEFINE_IDA(platform_profile_ida);
- if (!cur_profile) {
- mutex_unlock(&profile_lock);
- return -ENODEV;
- }
+/**
+ * _commmon_choices_show - Show the available profile choices
+ * @choices: The available profile choices
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t _commmon_choices_show(unsigned long *choices, char *buf)
+{
+ int i, len = 0;
- for_each_set_bit(i, cur_profile->choices, PLATFORM_PROFILE_LAST) {
+ for_each_set_bit(i, choices, PLATFORM_PROFILE_LAST) {
if (len == 0)
len += sysfs_emit_at(buf, len, "%s", profile_names[i]);
else
len += sysfs_emit_at(buf, len, " %s", profile_names[i]);
}
len += sysfs_emit_at(buf, len, "\n");
- mutex_unlock(&profile_lock);
+
return len;
}
-static ssize_t platform_profile_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+/**
+ * _store_class_profile - Set the profile for a class device
+ * @dev: The class device
+ * @data: The profile to set
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _store_class_profile(struct device *dev, void *data)
{
- enum platform_profile_option profile = PLATFORM_PROFILE_BALANCED;
+ struct platform_profile_handler *handler;
+ int *bit = (int *)data;
+
+ lockdep_assert_held(&profile_lock);
+ handler = to_pprof_handler(dev);
+ if (!test_bit(*bit, handler->choices))
+ return -EOPNOTSUPP;
+
+ return handler->ops->profile_set(dev, *bit);
+}
+
+/**
+ * _notify_class_profile - Notify the class device of a profile change
+ * @dev: The class device
+ * @data: Unused
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _notify_class_profile(struct device *dev, void *data)
+{
+ struct platform_profile_handler *handler = to_pprof_handler(dev);
+
+ lockdep_assert_held(&profile_lock);
+ sysfs_notify(&handler->dev.kobj, NULL, "profile");
+ kobject_uevent(&handler->dev.kobj, KOBJ_CHANGE);
+
+ return 0;
+}
+
+/**
+ * get_class_profile - Show the current profile for a class device
+ * @dev: The class device
+ * @profile: The profile to return
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int get_class_profile(struct device *dev,
+ enum platform_profile_option *profile)
+{
+ struct platform_profile_handler *handler;
+ enum platform_profile_option val;
int err;
- err = mutex_lock_interruptible(&profile_lock);
- if (err)
+ lockdep_assert_held(&profile_lock);
+ handler = to_pprof_handler(dev);
+ err = handler->ops->profile_get(dev, &val);
+ if (err) {
+ pr_err("Failed to get profile for handler %s\n", handler->name);
return err;
+ }
+
+ if (WARN_ON(val >= PLATFORM_PROFILE_LAST))
+ return -EINVAL;
+ *profile = val;
+
+ return 0;
+}
+
+/**
+ * name_show - Show the name of the profile handler
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct platform_profile_handler *handler = to_pprof_handler(dev);
+
+ return sysfs_emit(buf, "%s\n", handler->name);
+}
+static DEVICE_ATTR_RO(name);
+
+/**
+ * choices_show - Show the available profile choices
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t choices_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_profile_handler *handler = to_pprof_handler(dev);
- if (!cur_profile) {
- mutex_unlock(&profile_lock);
- return -ENODEV;
+ return _commmon_choices_show(handler->choices, buf);
+}
+static DEVICE_ATTR_RO(choices);
+
+/**
+ * profile_show - Show the current profile for a class device
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t profile_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
+ int err;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ err = get_class_profile(dev, &profile);
+ if (err)
+ return err;
}
- err = cur_profile->profile_get(cur_profile, &profile);
- mutex_unlock(&profile_lock);
+ return sysfs_emit(buf, "%s\n", profile_names[profile]);
+}
+
+/**
+ * profile_store - Set the profile for a class device
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to read from
+ * @count: The number of bytes to read
+ *
+ * Return: The number of bytes read
+ */
+static ssize_t profile_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int index, ret;
+
+ index = sysfs_match_string(profile_names, buf);
+ if (index < 0)
+ return -EINVAL;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ ret = _store_class_profile(dev, &index);
+ if (ret)
+ return ret;
+ }
+
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+
+ return count;
+}
+static DEVICE_ATTR_RW(profile);
+
+static struct attribute *profile_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_choices.attr,
+ &dev_attr_profile.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(profile);
+
+static void pprof_device_release(struct device *dev)
+{
+ struct platform_profile_handler *pprof = to_pprof_handler(dev);
+
+ kfree(pprof);
+}
+
+static const struct class platform_profile_class = {
+ .name = "platform-profile",
+ .dev_groups = profile_groups,
+ .dev_release = pprof_device_release,
+};
+
+/**
+ * _aggregate_choices - Aggregate the available profile choices
+ * @dev: The device
+ * @data: The available profile choices
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _aggregate_choices(struct device *dev, void *data)
+{
+ struct platform_profile_handler *handler;
+ unsigned long *aggregate = data;
+
+ lockdep_assert_held(&profile_lock);
+ handler = to_pprof_handler(dev);
+ if (test_bit(PLATFORM_PROFILE_LAST, aggregate))
+ bitmap_copy(aggregate, handler->choices, PLATFORM_PROFILE_LAST);
+ else
+ bitmap_and(aggregate, handler->choices, aggregate, PLATFORM_PROFILE_LAST);
+
+ return 0;
+}
+
+/**
+ * platform_profile_choices_show - Show the available profile choices for legacy sysfs interface
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t platform_profile_choices_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long aggregate[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ int err;
+
+ set_bit(PLATFORM_PROFILE_LAST, aggregate);
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ err = class_for_each_device(&platform_profile_class, NULL,
+ aggregate, _aggregate_choices);
+ if (err)
+ return err;
+ }
+
+ /* no profile handler registered any more */
+ if (bitmap_empty(aggregate, PLATFORM_PROFILE_LAST))
+ return -EINVAL;
+
+ return _commmon_choices_show(aggregate, buf);
+}
+
+/**
+ * _aggregate_profiles - Aggregate the profiles for legacy sysfs interface
+ * @dev: The device
+ * @data: The profile to return
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _aggregate_profiles(struct device *dev, void *data)
+{
+ enum platform_profile_option *profile = data;
+ enum platform_profile_option val;
+ int err;
+
+ err = get_class_profile(dev, &val);
if (err)
return err;
- /* Check that profile is valid index */
- if (WARN_ON((profile < 0) || (profile >= ARRAY_SIZE(profile_names))))
- return -EIO;
+ if (*profile != PLATFORM_PROFILE_LAST && *profile != val)
+ *profile = PLATFORM_PROFILE_CUSTOM;
+ else
+ *profile = val;
- return sysfs_emit(buf, "%s\n", profile_names[profile]);
+ return 0;
}
-static ssize_t platform_profile_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+/**
+ * _store_and_notify - Store and notify a class from legacy sysfs interface
+ * @dev: The device
+ * @data: The profile to return
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _store_and_notify(struct device *dev, void *data)
{
- int err, i;
+ enum platform_profile_option *profile = data;
+ int err;
- err = mutex_lock_interruptible(&profile_lock);
+ err = _store_class_profile(dev, profile);
if (err)
return err;
+ return _notify_class_profile(dev, NULL);
+}
- if (!cur_profile) {
- mutex_unlock(&profile_lock);
- return -ENODEV;
+/**
+ * platform_profile_show - Show the current profile for legacy sysfs interface
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t platform_profile_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
+ int err;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ err = class_for_each_device(&platform_profile_class, NULL,
+ &profile, _aggregate_profiles);
+ if (err)
+ return err;
}
+ /* no profile handler registered any more */
+ if (profile == PLATFORM_PROFILE_LAST)
+ return -EINVAL;
+
+ return sysfs_emit(buf, "%s\n", profile_names[profile]);
+}
+
+/**
+ * platform_profile_store - Set the profile for legacy sysfs interface
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to read from
+ * @count: The number of bytes to read
+ *
+ * Return: The number of bytes read
+ */
+static ssize_t platform_profile_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ int ret;
+ int i;
+
/* Scan for a matching profile */
i = sysfs_match_string(profile_names, buf);
- if (i < 0) {
- mutex_unlock(&profile_lock);
+ if (i < 0 || i == PLATFORM_PROFILE_CUSTOM)
return -EINVAL;
+ set_bit(PLATFORM_PROFILE_LAST, choices);
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ ret = class_for_each_device(&platform_profile_class, NULL,
+ choices, _aggregate_choices);
+ if (ret)
+ return ret;
+ if (!test_bit(i, choices))
+ return -EOPNOTSUPP;
+
+ ret = class_for_each_device(&platform_profile_class, NULL, &i,
+ _store_and_notify);
+ if (ret)
+ return ret;
}
- /* Check that platform supports this profile choice */
- if (!test_bit(i, cur_profile->choices)) {
- mutex_unlock(&profile_lock);
- return -EOPNOTSUPP;
- }
-
- err = cur_profile->profile_set(cur_profile, i);
- if (!err)
- sysfs_notify(acpi_kobj, NULL, "platform_profile");
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
- mutex_unlock(&profile_lock);
- if (err)
- return err;
return count;
}
@@ -124,98 +410,249 @@ static struct attribute *platform_profile_attrs[] = {
NULL
};
+static int profile_class_registered(struct device *dev, const void *data)
+{
+ return 1;
+}
+
+static umode_t profile_class_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+{
+ if (!class_find_device(&platform_profile_class, NULL, NULL, profile_class_registered))
+ return 0;
+ return attr->mode;
+}
+
static const struct attribute_group platform_profile_group = {
- .attrs = platform_profile_attrs
+ .attrs = platform_profile_attrs,
+ .is_visible = profile_class_is_visible,
};
-void platform_profile_notify(void)
+/**
+ * platform_profile_notify - Notify class device and legacy sysfs interface
+ * @dev: The class device
+ */
+void platform_profile_notify(struct device *dev)
{
- if (!cur_profile)
- return;
+ scoped_cond_guard(mutex_intr, return, &profile_lock) {
+ _notify_class_profile(dev, NULL);
+ }
sysfs_notify(acpi_kobj, NULL, "platform_profile");
}
EXPORT_SYMBOL_GPL(platform_profile_notify);
+/**
+ * platform_profile_cycle - Cycles profiles available on all registered class devices
+ *
+ * Return: 0 on success, -errno on failure
+ */
int platform_profile_cycle(void)
{
- enum platform_profile_option profile;
- enum platform_profile_option next;
+ enum platform_profile_option next = PLATFORM_PROFILE_LAST;
+ enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
+ unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
int err;
- err = mutex_lock_interruptible(&profile_lock);
- if (err)
- return err;
+ set_bit(PLATFORM_PROFILE_LAST, choices);
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ err = class_for_each_device(&platform_profile_class, NULL,
+ &profile, _aggregate_profiles);
+ if (err)
+ return err;
- if (!cur_profile) {
- mutex_unlock(&profile_lock);
- return -ENODEV;
- }
+ if (profile == PLATFORM_PROFILE_CUSTOM ||
+ profile == PLATFORM_PROFILE_LAST)
+ return -EINVAL;
- err = cur_profile->profile_get(cur_profile, &profile);
- if (err) {
- mutex_unlock(&profile_lock);
- return err;
- }
+ err = class_for_each_device(&platform_profile_class, NULL,
+ choices, _aggregate_choices);
+ if (err)
+ return err;
- next = find_next_bit_wrap(cur_profile->choices, PLATFORM_PROFILE_LAST,
- profile + 1);
+ /* never iterate into a custom if all drivers supported it */
+ clear_bit(PLATFORM_PROFILE_CUSTOM, choices);
- if (WARN_ON(next == PLATFORM_PROFILE_LAST)) {
- mutex_unlock(&profile_lock);
- return -EINVAL;
- }
+ next = find_next_bit_wrap(choices,
+ PLATFORM_PROFILE_LAST,
+ profile + 1);
- err = cur_profile->profile_set(cur_profile, next);
- mutex_unlock(&profile_lock);
+ err = class_for_each_device(&platform_profile_class, NULL, &next,
+ _store_and_notify);
- if (!err)
- sysfs_notify(acpi_kobj, NULL, "platform_profile");
+ if (err)
+ return err;
+ }
- return err;
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+
+ return 0;
}
EXPORT_SYMBOL_GPL(platform_profile_cycle);
-int platform_profile_register(struct platform_profile_handler *pprof)
+/**
+ * platform_profile_register - Creates and registers a platform profile class device
+ * @dev: Parent device
+ * @name: Name of the class device
+ * @drvdata: Driver data that will be attached to the class device
+ * @ops: Platform profile's mandatory operations
+ *
+ * Return: pointer to the new class device on success, ERR_PTR on failure
+ */
+struct device *platform_profile_register(struct device *dev, const char *name,
+ void *drvdata,
+ const struct platform_profile_ops *ops)
{
+ struct device *ppdev;
+ int minor;
int err;
- mutex_lock(&profile_lock);
- /* We can only have one active profile */
- if (cur_profile) {
- mutex_unlock(&profile_lock);
- return -EEXIST;
+ /* Sanity check */
+ if (WARN_ON_ONCE(!dev || !name || !ops || !ops->profile_get ||
+ !ops->profile_set || !ops->probe))
+ return ERR_PTR(-EINVAL);
+
+ struct platform_profile_handler *pprof __free(kfree) = kzalloc(
+ sizeof(*pprof), GFP_KERNEL);
+ if (!pprof)
+ return ERR_PTR(-ENOMEM);
+
+ err = ops->probe(drvdata, pprof->choices);
+ if (err) {
+ dev_err(dev, "platform_profile probe failed\n");
+ return ERR_PTR(err);
}
- /* Sanity check the profile handler field are set */
- if (!pprof || bitmap_empty(pprof->choices, PLATFORM_PROFILE_LAST) ||
- !pprof->profile_set || !pprof->profile_get) {
- mutex_unlock(&profile_lock);
- return -EINVAL;
+ if (bitmap_empty(pprof->choices, PLATFORM_PROFILE_LAST)) {
+ dev_err(dev, "Failed to register platform_profile class device with empty choices\n");
+ return ERR_PTR(-EINVAL);
}
- err = sysfs_create_group(acpi_kobj, &platform_profile_group);
+ guard(mutex)(&profile_lock);
+
+ /* create class interface for individual handler */
+ minor = ida_alloc(&platform_profile_ida, GFP_KERNEL);
+ if (minor < 0)
+ return ERR_PTR(minor);
+
+ pprof->name = name;
+ pprof->ops = ops;
+ pprof->minor = minor;
+ pprof->dev.class = &platform_profile_class;
+ pprof->dev.parent = dev;
+ dev_set_drvdata(&pprof->dev, drvdata);
+ dev_set_name(&pprof->dev, "platform-profile-%d", pprof->minor);
+ /* device_register() takes ownership of pprof/ppdev */
+ ppdev = &no_free_ptr(pprof)->dev;
+ err = device_register(ppdev);
if (err) {
- mutex_unlock(&profile_lock);
- return err;
+ put_device(ppdev);
+ goto cleanup_ida;
}
- cur_profile = pprof;
- mutex_unlock(&profile_lock);
- return 0;
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+
+ err = sysfs_update_group(acpi_kobj, &platform_profile_group);
+ if (err)
+ goto cleanup_cur;
+
+ return ppdev;
+
+cleanup_cur:
+ device_unregister(ppdev);
+
+cleanup_ida:
+ ida_free(&platform_profile_ida, minor);
+
+ return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(platform_profile_register);
-int platform_profile_remove(void)
+/**
+ * platform_profile_remove - Unregisters a platform profile class device
+ * @dev: Class device
+ *
+ * Return: 0
+ */
+int platform_profile_remove(struct device *dev)
{
- sysfs_remove_group(acpi_kobj, &platform_profile_group);
+ struct platform_profile_handler *pprof = to_pprof_handler(dev);
+ int id;
+ guard(mutex)(&profile_lock);
+
+ id = pprof->minor;
+ device_unregister(&pprof->dev);
+ ida_free(&platform_profile_ida, id);
+
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+
+ sysfs_update_group(acpi_kobj, &platform_profile_group);
- mutex_lock(&profile_lock);
- cur_profile = NULL;
- mutex_unlock(&profile_lock);
return 0;
}
EXPORT_SYMBOL_GPL(platform_profile_remove);
+static void devm_platform_profile_release(struct device *dev, void *res)
+{
+ struct device **ppdev = res;
+
+ platform_profile_remove(*ppdev);
+}
+
+/**
+ * devm_platform_profile_register - Device managed version of platform_profile_register
+ * @dev: Parent device
+ * @name: Name of the class device
+ * @drvdata: Driver data that will be attached to the class device
+ * @ops: Platform profile's mandatory operations
+ *
+ * Return: pointer to the new class device on success, ERR_PTR on failure
+ */
+struct device *devm_platform_profile_register(struct device *dev, const char *name,
+ void *drvdata,
+ const struct platform_profile_ops *ops)
+{
+ struct device *ppdev;
+ struct device **dr;
+
+ dr = devres_alloc(devm_platform_profile_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ ppdev = platform_profile_register(dev, name, drvdata, ops);
+ if (IS_ERR(ppdev)) {
+ devres_free(dr);
+ return ppdev;
+ }
+
+ *dr = ppdev;
+ devres_add(dev, dr);
+
+ return ppdev;
+}
+EXPORT_SYMBOL_GPL(devm_platform_profile_register);
+
+static int __init platform_profile_init(void)
+{
+ int err;
+
+ err = class_register(&platform_profile_class);
+ if (err)
+ return err;
+
+ err = sysfs_create_group(acpi_kobj, &platform_profile_group);
+ if (err)
+ class_unregister(&platform_profile_class);
+
+ return err;
+}
+
+static void __exit platform_profile_exit(void)
+{
+ sysfs_remove_group(acpi_kobj, &platform_profile_group);
+ class_unregister(&platform_profile_class);
+}
+module_init(platform_profile_init);
+module_exit(platform_profile_exit);
+
MODULE_AUTHOR("Mark Pearson <markpearson@lenovo.com>");
MODULE_DESCRIPTION("ACPI platform profile sysfs interface");
MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
index 747f83f7114d..e549914a636c 100644
--- a/drivers/acpi/prmt.c
+++ b/drivers/acpi/prmt.c
@@ -287,9 +287,7 @@ static acpi_status acpi_platformrt_space_handler(u32 function,
if (!handler || !module)
goto invalid_guid;
- if (!handler->handler_addr ||
- !handler->static_data_buffer_addr ||
- !handler->acpi_param_buffer_addr) {
+ if (!handler->handler_addr) {
buffer->prm_status = PRM_HANDLER_ERROR;
return AE_OK;
}
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 1ee81e771ae6..436019d96027 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -1187,8 +1187,6 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
}
break;
}
- if (nval == 0)
- return -EINVAL;
if (obj->type == ACPI_TYPE_BUFFER) {
if (proptype != DEV_PROP_U8)
@@ -1212,9 +1210,11 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
ret = acpi_copy_property_array_uint(items, (u64 *)val, nval);
break;
case DEV_PROP_STRING:
- ret = acpi_copy_property_array_string(
- items, (char **)val,
- min_t(u32, nval, obj->package.count));
+ nval = min_t(u32, nval, obj->package.count);
+ if (nval == 0)
+ return -ENODATA;
+
+ ret = acpi_copy_property_array_string(items, (char **)val, nval);
break;
default:
ret = -EINVAL;
@@ -1656,6 +1656,7 @@ static int acpi_fwnode_irq_get(const struct fwnode_handle *fwnode,
acpi_fwnode_device_dma_supported, \
.device_get_dma_attr = acpi_fwnode_device_get_dma_attr, \
.property_present = acpi_fwnode_property_present, \
+ .property_read_bool = acpi_fwnode_property_present, \
.property_read_int_array = \
acpi_fwnode_property_read_int_array, \
.property_read_string_array = \
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 90aaec923889..b4cd14e7fa76 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -564,6 +564,12 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
},
},
{
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "MECH-17"),
+ },
+ },
+ {
/* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"),
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 93d340027b7f..9f4efa8f75a6 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1767,6 +1767,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
{"CSC3557", },
{"INT33FE", },
{"INT3515", },
+ {"TXNW2781", },
/* Non-conforming _HID for Cirrus Logic already released */
{"CLSA0100", },
{"CLSA0101", },
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index cb45ef5240da..068c1612660b 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -408,6 +408,19 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
},
{
+ /* Vexia Edu Atla 10 tablet 5V version */
+ .matches = {
+ /* Having all 3 of these not set is somewhat unique */
+ DMI_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
+ DMI_MATCH(DMI_BOARD_NAME, "To be filled by O.E.M."),
+ /* Above strings are too generic, also match on BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "05/14/2015"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ },
+ {
/* Vexia Edu Atla 10 tablet 9V version */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index a4b98e95ab85..76052006bd87 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1971,7 +1971,7 @@ static bool binder_validate_fixup(struct binder_proc *proc,
* struct binder_task_work_cb - for deferred close
*
* @twork: callback_head for task work
- * @fd: fd to close
+ * @file: file to close
*
* Structure to pass task work to be handled after
* returning from binder_ioctl() via task_work_add().
@@ -3800,13 +3800,13 @@ err_invalid_target_handle:
}
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
+ "%d:%d transaction %s to %d:%d failed %d/%d/%d, code %u size %lld-%lld line %d\n",
proc->pid, thread->pid, reply ? "reply" :
(tr->flags & TF_ONE_WAY ? "async" : "call"),
target_proc ? target_proc->pid : 0,
target_thread ? target_thread->pid : 0,
t_debug_id, return_error, return_error_param,
- (u64)tr->data_size, (u64)tr->offsets_size,
+ tr->code, (u64)tr->data_size, (u64)tr->offsets_size,
return_error_line);
if (target_thread)
@@ -6373,7 +6373,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
seq_printf(m, " node %d", buffer->target_node->debug_id);
seq_printf(m, " size %zd:%zd offset %lx\n",
buffer->data_size, buffer->offsets_size,
- proc->alloc.buffer - buffer->user_data);
+ proc->alloc.vm_start - buffer->user_data);
}
static void print_binder_work_ilocked(struct seq_file *m,
@@ -6927,6 +6927,11 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
{} /* terminator */
};
+void binder_add_device(struct binder_device *device)
+{
+ hlist_add_head(&device->hlist, &binder_devices);
+}
+
static int __init init_binder_device(const char *name)
{
int ret;
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index a738e7745865..fcfaf1b899c8 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -61,7 +61,7 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
if (list_is_last(&buffer->entry, &alloc->buffers))
- return alloc->buffer + alloc->buffer_size - buffer->user_data;
+ return alloc->vm_start + alloc->buffer_size - buffer->user_data;
return binder_buffer_next(buffer)->user_data - buffer->user_data;
}
@@ -169,32 +169,33 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
{
struct binder_buffer *buffer;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
return buffer;
}
static inline void
-binder_set_installed_page(struct binder_lru_page *lru_page,
+binder_set_installed_page(struct binder_alloc *alloc,
+ unsigned long index,
struct page *page)
{
/* Pairs with acquire in binder_get_installed_page() */
- smp_store_release(&lru_page->page_ptr, page);
+ smp_store_release(&alloc->pages[index], page);
}
static inline struct page *
-binder_get_installed_page(struct binder_lru_page *lru_page)
+binder_get_installed_page(struct binder_alloc *alloc, unsigned long index)
{
/* Pairs with release in binder_set_installed_page() */
- return smp_load_acquire(&lru_page->page_ptr);
+ return smp_load_acquire(&alloc->pages[index]);
}
static void binder_lru_freelist_add(struct binder_alloc *alloc,
unsigned long start, unsigned long end)
{
- struct binder_lru_page *page;
unsigned long page_addr;
+ struct page *page;
trace_binder_update_page_range(alloc, false, start, end);
@@ -202,65 +203,159 @@ static void binder_lru_freelist_add(struct binder_alloc *alloc,
size_t index;
int ret;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
-
- if (!binder_get_installed_page(page))
+ index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ page = binder_get_installed_page(alloc, index);
+ if (!page)
continue;
trace_binder_free_lru_start(alloc, index);
- ret = list_lru_add_obj(&binder_freelist, &page->lru);
+ ret = list_lru_add(&binder_freelist,
+ page_to_lru(page),
+ page_to_nid(page),
+ NULL);
WARN_ON(!ret);
trace_binder_free_lru_end(alloc, index);
}
}
-static int binder_install_single_page(struct binder_alloc *alloc,
- struct binder_lru_page *lru_page,
- unsigned long addr)
+static inline
+void binder_alloc_set_mapped(struct binder_alloc *alloc, bool state)
{
- struct page *page;
- int ret = 0;
+ /* pairs with smp_load_acquire in binder_alloc_is_mapped() */
+ smp_store_release(&alloc->mapped, state);
+}
- if (!mmget_not_zero(alloc->mm))
- return -ESRCH;
+static inline bool binder_alloc_is_mapped(struct binder_alloc *alloc)
+{
+ /* pairs with smp_store_release in binder_alloc_set_mapped() */
+ return smp_load_acquire(&alloc->mapped);
+}
+
+static struct page *binder_page_lookup(struct binder_alloc *alloc,
+ unsigned long addr)
+{
+ struct mm_struct *mm = alloc->mm;
+ struct page *page;
+ long npages = 0;
/*
- * Protected with mmap_sem in write mode as multiple tasks
- * might race to install the same page.
+ * Find an existing page in the remote mm. If missing,
+ * don't attempt to fault-in just propagate an error.
*/
- mmap_write_lock(alloc->mm);
- if (binder_get_installed_page(lru_page))
- goto out;
+ mmap_read_lock(mm);
+ if (binder_alloc_is_mapped(alloc))
+ npages = get_user_pages_remote(mm, addr, 1, FOLL_NOFAULT,
+ &page, NULL);
+ mmap_read_unlock(mm);
- if (!alloc->vma) {
- pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
- ret = -ESRCH;
- goto out;
+ return npages > 0 ? page : NULL;
+}
+
+static int binder_page_insert(struct binder_alloc *alloc,
+ unsigned long addr,
+ struct page *page)
+{
+ struct mm_struct *mm = alloc->mm;
+ struct vm_area_struct *vma;
+ int ret = -ESRCH;
+
+ /* attempt per-vma lock first */
+ vma = lock_vma_under_rcu(mm, addr);
+ if (vma) {
+ if (binder_alloc_is_mapped(alloc))
+ ret = vm_insert_page(vma, addr, page);
+ vma_end_read(vma);
+ return ret;
}
+ /* fall back to mmap_lock */
+ mmap_read_lock(mm);
+ vma = vma_lookup(mm, addr);
+ if (vma && binder_alloc_is_mapped(alloc))
+ ret = vm_insert_page(vma, addr, page);
+ mmap_read_unlock(mm);
+
+ return ret;
+}
+
+static struct page *binder_page_alloc(struct binder_alloc *alloc,
+ unsigned long index)
+{
+ struct binder_shrinker_mdata *mdata;
+ struct page *page;
+
page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+ if (!page)
+ return NULL;
+
+ /* allocate and install shrinker metadata under page->private */
+ mdata = kzalloc(sizeof(*mdata), GFP_KERNEL);
+ if (!mdata) {
+ __free_page(page);
+ return NULL;
+ }
+
+ mdata->alloc = alloc;
+ mdata->page_index = index;
+ INIT_LIST_HEAD(&mdata->lru);
+ set_page_private(page, (unsigned long)mdata);
+
+ return page;
+}
+
+static void binder_free_page(struct page *page)
+{
+ kfree((struct binder_shrinker_mdata *)page_private(page));
+ __free_page(page);
+}
+
+static int binder_install_single_page(struct binder_alloc *alloc,
+ unsigned long index,
+ unsigned long addr)
+{
+ struct page *page;
+ int ret;
+
+ if (!mmget_not_zero(alloc->mm))
+ return -ESRCH;
+
+ page = binder_page_alloc(alloc, index);
if (!page) {
- pr_err("%d: failed to allocate page\n", alloc->pid);
ret = -ENOMEM;
goto out;
}
- ret = vm_insert_page(alloc->vma, addr, page);
- if (ret) {
+ ret = binder_page_insert(alloc, addr, page);
+ switch (ret) {
+ case -EBUSY:
+ /*
+ * EBUSY is ok. Someone installed the pte first but the
+ * alloc->pages[index] has not been updated yet. Discard
+ * our page and look up the one already installed.
+ */
+ ret = 0;
+ binder_free_page(page);
+ page = binder_page_lookup(alloc, addr);
+ if (!page) {
+ pr_err("%d: failed to find page at offset %lx\n",
+ alloc->pid, addr - alloc->vm_start);
+ ret = -ESRCH;
+ break;
+ }
+ fallthrough;
+ case 0:
+ /* Mark page installation complete and safe to use */
+ binder_set_installed_page(alloc, index, page);
+ break;
+ default:
+ binder_free_page(page);
pr_err("%d: %s failed to insert page at offset %lx with %d\n",
- alloc->pid, __func__, addr - alloc->buffer, ret);
- __free_page(page);
- ret = -ENOMEM;
- goto out;
+ alloc->pid, __func__, addr - alloc->vm_start, ret);
+ break;
}
-
- /* Mark page installation complete and safe to use */
- binder_set_installed_page(lru_page, page);
out:
- mmap_write_unlock(alloc->mm);
mmput_async(alloc->mm);
return ret;
}
@@ -269,7 +364,6 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
struct binder_buffer *buffer,
size_t size)
{
- struct binder_lru_page *page;
unsigned long start, final;
unsigned long page_addr;
@@ -280,15 +374,13 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
unsigned long index;
int ret;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
-
- if (binder_get_installed_page(page))
+ index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ if (binder_get_installed_page(alloc, index))
continue;
trace_binder_alloc_page_start(alloc, index);
- ret = binder_install_single_page(alloc, page, page_addr);
+ ret = binder_install_single_page(alloc, index, page_addr);
if (ret)
return ret;
@@ -302,8 +394,8 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
static void binder_lru_freelist_del(struct binder_alloc *alloc,
unsigned long start, unsigned long end)
{
- struct binder_lru_page *page;
unsigned long page_addr;
+ struct page *page;
trace_binder_update_page_range(alloc, true, start, end);
@@ -311,13 +403,16 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc,
unsigned long index;
bool on_lru;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
+ index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ page = binder_get_installed_page(alloc, index);
- if (page->page_ptr) {
+ if (page) {
trace_binder_alloc_lru_start(alloc, index);
- on_lru = list_lru_del_obj(&binder_freelist, &page->lru);
+ on_lru = list_lru_del(&binder_freelist,
+ page_to_lru(page),
+ page_to_nid(page),
+ NULL);
WARN_ON(!on_lru);
trace_binder_alloc_lru_end(alloc, index);
@@ -329,20 +424,6 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc,
}
}
-static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
- struct vm_area_struct *vma)
-{
- /* pairs with smp_load_acquire in binder_alloc_get_vma() */
- smp_store_release(&alloc->vma, vma);
-}
-
-static inline struct vm_area_struct *binder_alloc_get_vma(
- struct binder_alloc *alloc)
-{
- /* pairs with smp_store_release in binder_alloc_set_vma() */
- return smp_load_acquire(&alloc->vma);
-}
-
static void debug_no_space_locked(struct binder_alloc *alloc)
{
size_t largest_alloc_size = 0;
@@ -576,7 +657,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
int ret;
/* Check binder_alloc is fully initialized */
- if (!binder_alloc_get_vma(alloc)) {
+ if (!binder_alloc_is_mapped(alloc)) {
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n",
alloc->pid);
@@ -597,10 +678,10 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
if (!next)
return ERR_PTR(-ENOMEM);
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
if (IS_ERR(buffer)) {
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
goto out;
}
@@ -608,7 +689,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
buffer->offsets_size = offsets_size;
buffer->extra_buffers_size = extra_buffers_size;
buffer->pid = current->tgid;
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
ret = binder_install_buffer_pages(alloc, buffer, size);
if (ret) {
@@ -674,8 +755,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->free);
BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL);
- BUG_ON(buffer->user_data < alloc->buffer);
- BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
+ BUG_ON(buffer->user_data < alloc->vm_start);
+ BUG_ON(buffer->user_data > alloc->vm_start + alloc->buffer_size);
if (buffer->async_transaction) {
alloc->free_async_space += buffer_size;
@@ -734,14 +815,13 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
pgoff_t *pgoffp)
{
binder_size_t buffer_space_offset = buffer_offset +
- (buffer->user_data - alloc->buffer);
+ (buffer->user_data - alloc->vm_start);
pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
size_t index = buffer_space_offset >> PAGE_SHIFT;
- struct binder_lru_page *lru_page;
- lru_page = &alloc->pages[index];
*pgoffp = pgoff;
- return lru_page->page_ptr;
+
+ return alloc->pages[index];
}
/**
@@ -785,17 +865,17 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
* We could eliminate the call to binder_alloc_clear_buf()
* from binder_alloc_deferred_release() by moving this to
* binder_free_buf_locked(). However, that could
- * increase contention for the alloc->lock if clear_on_free
- * is used frequently for large buffers. This lock is not
+ * increase contention for the alloc mutex if clear_on_free
+ * is used frequently for large buffers. The mutex is not
* needed for correctness here.
*/
if (buffer->clear_on_free) {
binder_alloc_clear_buf(alloc, buffer);
buffer->clear_on_free = false;
}
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
binder_free_buf_locked(alloc, buffer);
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
}
/**
@@ -816,7 +896,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
{
struct binder_buffer *buffer;
const char *failure_string;
- int ret, i;
+ int ret;
if (unlikely(vma->vm_mm != alloc->mm)) {
ret = -EINVAL;
@@ -834,22 +914,17 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
SZ_4M);
mutex_unlock(&binder_alloc_mmap_lock);
- alloc->buffer = vma->vm_start;
+ alloc->vm_start = vma->vm_start;
alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE,
sizeof(alloc->pages[0]),
GFP_KERNEL);
- if (alloc->pages == NULL) {
+ if (!alloc->pages) {
ret = -ENOMEM;
failure_string = "alloc page array";
goto err_alloc_pages_failed;
}
- for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- alloc->pages[i].alloc = alloc;
- INIT_LIST_HEAD(&alloc->pages[i].lru);
- }
-
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
@@ -857,14 +932,14 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
goto err_alloc_buf_struct_failed;
}
- buffer->user_data = alloc->buffer;
+ buffer->user_data = alloc->vm_start;
list_add(&buffer->entry, &alloc->buffers);
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
alloc->free_async_space = alloc->buffer_size / 2;
/* Signal binder_alloc is fully initialized */
- binder_alloc_set_vma(alloc, vma);
+ binder_alloc_set_mapped(alloc, true);
return 0;
@@ -872,7 +947,7 @@ err_alloc_buf_struct_failed:
kvfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
- alloc->buffer = 0;
+ alloc->vm_start = 0;
mutex_lock(&binder_alloc_mmap_lock);
alloc->buffer_size = 0;
err_already_mapped:
@@ -893,8 +968,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
struct binder_buffer *buffer;
buffers = 0;
- spin_lock(&alloc->lock);
- BUG_ON(alloc->vma);
+ mutex_lock(&alloc->mutex);
+ BUG_ON(alloc->mapped);
while ((n = rb_first(&alloc->allocated_buffers))) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -925,22 +1000,26 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
int i;
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ struct page *page;
bool on_lru;
- if (!alloc->pages[i].page_ptr)
+ page = binder_get_installed_page(alloc, i);
+ if (!page)
continue;
- on_lru = list_lru_del_obj(&binder_freelist,
- &alloc->pages[i].lru);
+ on_lru = list_lru_del(&binder_freelist,
+ page_to_lru(page),
+ page_to_nid(page),
+ NULL);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%s: %d: page %d %s\n",
__func__, alloc->pid, i,
on_lru ? "on lru" : "active");
- __free_page(alloc->pages[i].page_ptr);
+ binder_free_page(page);
page_count++;
}
}
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
kvfree(alloc->pages);
if (alloc->mm)
mmdrop(alloc->mm);
@@ -964,17 +1043,17 @@ void binder_alloc_print_allocated(struct seq_file *m,
struct binder_buffer *buffer;
struct rb_node *n;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n",
buffer->debug_id,
- buffer->user_data - alloc->buffer,
+ buffer->user_data - alloc->vm_start,
buffer->data_size, buffer->offsets_size,
buffer->extra_buffers_size,
buffer->transaction ? "active" : "delivered");
}
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
}
/**
@@ -985,29 +1064,29 @@ void binder_alloc_print_allocated(struct seq_file *m,
void binder_alloc_print_pages(struct seq_file *m,
struct binder_alloc *alloc)
{
- struct binder_lru_page *page;
+ struct page *page;
int i;
int active = 0;
int lru = 0;
int free = 0;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
/*
* Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state.
*/
- if (binder_alloc_get_vma(alloc) != NULL) {
+ if (binder_alloc_is_mapped(alloc)) {
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- page = &alloc->pages[i];
- if (!page->page_ptr)
+ page = binder_get_installed_page(alloc, i);
+ if (!page)
free++;
- else if (list_empty(&page->lru))
+ else if (list_empty(page_to_lru(page)))
active++;
else
lru++;
}
}
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
}
@@ -1023,10 +1102,10 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
struct rb_node *n;
int count = 0;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
count++;
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
return count;
}
@@ -1036,12 +1115,12 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
* @alloc: binder_alloc for this proc
*
* Called from binder_vma_close() when releasing address space.
- * Clears alloc->vma to prevent new incoming transactions from
+ * Clears alloc->mapped to prevent new incoming transactions from
* allocating more buffers.
*/
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
- binder_alloc_set_vma(alloc, NULL);
+ binder_alloc_set_mapped(alloc, false);
}
/**
@@ -1058,39 +1137,50 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
void *cb_arg)
__must_hold(&lru->lock)
{
- struct binder_lru_page *page = container_of(item, typeof(*page), lru);
- struct binder_alloc *alloc = page->alloc;
+ struct binder_shrinker_mdata *mdata = container_of(item, typeof(*mdata), lru);
+ struct binder_alloc *alloc = mdata->alloc;
struct mm_struct *mm = alloc->mm;
struct vm_area_struct *vma;
struct page *page_to_free;
unsigned long page_addr;
+ int mm_locked = 0;
size_t index;
if (!mmget_not_zero(mm))
goto err_mmget;
- if (!mmap_read_trylock(mm))
- goto err_mmap_read_lock_failed;
- if (!spin_trylock(&alloc->lock))
- goto err_get_alloc_lock_failed;
- if (!page->page_ptr)
- goto err_page_already_freed;
-
- index = page - alloc->pages;
- page_addr = alloc->buffer + index * PAGE_SIZE;
-
- vma = vma_lookup(mm, page_addr);
- if (vma && vma != binder_alloc_get_vma(alloc))
+
+ index = mdata->page_index;
+ page_addr = alloc->vm_start + index * PAGE_SIZE;
+
+ /* attempt per-vma lock first */
+ vma = lock_vma_under_rcu(mm, page_addr);
+ if (!vma) {
+ /* fall back to mmap_lock */
+ if (!mmap_read_trylock(mm))
+ goto err_mmap_read_lock_failed;
+ mm_locked = 1;
+ vma = vma_lookup(mm, page_addr);
+ }
+
+ if (!mutex_trylock(&alloc->mutex))
+ goto err_get_alloc_mutex_failed;
+
+ /*
+ * Since a binder_alloc can only be mapped once, we ensure
+ * the vma corresponds to this mapping by checking whether
+ * the binder_alloc is still mapped.
+ */
+ if (vma && !binder_alloc_is_mapped(alloc))
goto err_invalid_vma;
trace_binder_unmap_kernel_start(alloc, index);
- page_to_free = page->page_ptr;
- page->page_ptr = NULL;
+ page_to_free = alloc->pages[index];
+ binder_set_installed_page(alloc, index, NULL);
trace_binder_unmap_kernel_end(alloc, index);
list_lru_isolate(lru, item);
- spin_unlock(&alloc->lock);
spin_unlock(&lru->lock);
if (vma) {
@@ -1101,17 +1191,23 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_user_end(alloc, index);
}
- mmap_read_unlock(mm);
+ mutex_unlock(&alloc->mutex);
+ if (mm_locked)
+ mmap_read_unlock(mm);
+ else
+ vma_end_read(vma);
mmput_async(mm);
- __free_page(page_to_free);
+ binder_free_page(page_to_free);
return LRU_REMOVED_RETRY;
err_invalid_vma:
-err_page_already_freed:
- spin_unlock(&alloc->lock);
-err_get_alloc_lock_failed:
- mmap_read_unlock(mm);
+ mutex_unlock(&alloc->mutex);
+err_get_alloc_mutex_failed:
+ if (mm_locked)
+ mmap_read_unlock(mm);
+ else
+ vma_end_read(vma);
err_mmap_read_lock_failed:
mmput_async(mm);
err_mmget:
@@ -1145,7 +1241,7 @@ void binder_alloc_init(struct binder_alloc *alloc)
alloc->pid = current->group_leader->pid;
alloc->mm = current->mm;
mmgrab(alloc->mm);
- spin_lock_init(&alloc->lock);
+ mutex_init(&alloc->mutex);
INIT_LIST_HEAD(&alloc->buffers);
}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index c02c8ebcb466..feecd7414241 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -9,7 +9,7 @@
#include <linux/rbtree.h>
#include <linux/list.h>
#include <linux/mm.h>
-#include <linux/spinlock.h>
+#include <linux/rtmutex.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/list_lru.h>
@@ -59,34 +59,43 @@ struct binder_buffer {
};
/**
- * struct binder_lru_page - page object used for binder shrinker
- * @page_ptr: pointer to physical page in mmap'd space
- * @lru: entry in binder_freelist
- * @alloc: binder_alloc for a proc
+ * struct binder_shrinker_mdata - binder metadata used to reclaim pages
+ * @lru: LRU entry in binder_freelist
+ * @alloc: binder_alloc owning the page to reclaim
+ * @page_index: offset in @alloc->pages[] into the page to reclaim
*/
-struct binder_lru_page {
+struct binder_shrinker_mdata {
struct list_head lru;
- struct page *page_ptr;
struct binder_alloc *alloc;
+ unsigned long page_index;
};
+static inline struct list_head *page_to_lru(struct page *p)
+{
+ struct binder_shrinker_mdata *mdata;
+
+ mdata = (struct binder_shrinker_mdata *)page_private(p);
+
+ return &mdata->lru;
+}
+
/**
* struct binder_alloc - per-binder proc state for binder allocator
- * @lock: protects binder_alloc fields
- * @vma: vm_area_struct passed to mmap_handler
- * (invariant after mmap)
+ * @mutex: protects binder_alloc fields
* @mm: copy of task->mm (invariant after open)
- * @buffer: base of per-proc address space mapped via mmap
+ * @vm_start: base of per-proc address space mapped via mmap
* @buffers: list of all buffers for this proc
* @free_buffers: rb tree of buffers available for allocation
* sorted by size
* @allocated_buffers: rb tree of allocated buffers sorted by address
* @free_async_space: VA space available for async buffers. This is
* initialized at mmap time to 1/2 the full VA space
- * @pages: array of binder_lru_page
+ * @pages: array of struct page *
* @buffer_size: size of address space specified via mmap
* @pid: pid for associated binder_proc (invariant after init)
* @pages_high: high watermark of offset in @pages
+ * @mapped: whether the vm area is mapped, each binder instance is
+ * allowed a single mapping throughout its lifetime
* @oneway_spam_detected: %true if oneway spam detection fired, clear that
* flag once the async buffer has returned to a healthy state
*
@@ -96,18 +105,18 @@ struct binder_lru_page {
* struct binder_buffer objects used to track the user buffers
*/
struct binder_alloc {
- spinlock_t lock;
- struct vm_area_struct *vma;
+ struct mutex mutex;
struct mm_struct *mm;
- unsigned long buffer;
+ unsigned long vm_start;
struct list_head buffers;
struct rb_root free_buffers;
struct rb_root allocated_buffers;
size_t free_async_space;
- struct binder_lru_page *pages;
+ struct page **pages;
size_t buffer_size;
int pid;
size_t pages_high;
+ bool mapped;
bool oneway_spam_detected;
};
@@ -153,9 +162,9 @@ binder_alloc_get_free_async_space(struct binder_alloc *alloc)
{
size_t free_async_space;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
free_async_space = alloc->free_async_space;
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
return free_async_space;
}
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
index 81442fe20a69..c88735c54848 100644
--- a/drivers/android/binder_alloc_selftest.c
+++ b/drivers/android/binder_alloc_selftest.c
@@ -104,11 +104,11 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
end = PAGE_ALIGN(buffer->user_data + size);
page_addr = buffer->user_data;
for (; page_addr < end; page_addr += PAGE_SIZE) {
- page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
- if (!alloc->pages[page_index].page_ptr ||
- !list_empty(&alloc->pages[page_index].lru)) {
+ page_index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ if (!alloc->pages[page_index] ||
+ !list_empty(page_to_lru(alloc->pages[page_index]))) {
pr_err("expect alloc but is %s at page index %d\n",
- alloc->pages[page_index].page_ptr ?
+ alloc->pages[page_index] ?
"lru" : "free", page_index);
return false;
}
@@ -148,10 +148,10 @@ static void binder_selftest_free_buf(struct binder_alloc *alloc,
* if binder shrinker ran during binder_alloc_free_buf
* calls above.
*/
- if (list_empty(&alloc->pages[i].lru)) {
+ if (list_empty(page_to_lru(alloc->pages[i]))) {
pr_err_size_seq(sizes, seq);
pr_err("expect lru but is %s at page index %d\n",
- alloc->pages[i].page_ptr ? "alloc" : "free", i);
+ alloc->pages[i] ? "alloc" : "free", i);
binder_selftest_failures++;
}
}
@@ -168,9 +168,9 @@ static void binder_selftest_free_page(struct binder_alloc *alloc)
}
for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
- if (alloc->pages[i].page_ptr) {
+ if (alloc->pages[i]) {
pr_err("expect free but is %s at page index %d\n",
- list_empty(&alloc->pages[i].lru) ?
+ list_empty(page_to_lru(alloc->pages[i])) ?
"alloc" : "lru", i);
binder_selftest_failures++;
}
@@ -291,7 +291,7 @@ void binder_selftest_alloc(struct binder_alloc *alloc)
if (!binder_selftest_run)
return;
mutex_lock(&binder_selftest_lock);
- if (!binder_selftest_run || !alloc->vma)
+ if (!binder_selftest_run || !alloc->mapped)
goto done;
pr_info("STARTED\n");
binder_selftest_alloc_offset(alloc, end_offset, 0);
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index f8d6be682f23..e4eb8357989c 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -25,8 +25,7 @@ struct binder_context {
/**
* struct binder_device - information about a binder device node
- * @hlist: list of binder devices (only used for devices requested via
- * CONFIG_ANDROID_BINDER_DEVICES)
+ * @hlist: list of binder devices
* @miscdev: information about a binder character device node
* @context: binder context information
* @binderfs_inode: This is the inode of the root dentry of the super block
@@ -582,4 +581,12 @@ struct binder_object {
};
};
+/**
+ * Add a binder device to binder_devices
+ * @device: the new binder device to add to the global list
+ *
+ * Not reentrant as the list is not protected by any locks
+ */
+void binder_add_device(struct binder_device *device);
+
#endif /* _LINUX_BINDER_INTERNAL_H */
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index fe38c6fc65d0..16de1b9e72f7 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -328,7 +328,7 @@ TRACE_EVENT(binder_update_page_range,
TP_fast_assign(
__entry->proc = alloc->pid;
__entry->allocate = allocate;
- __entry->offset = start - alloc->buffer;
+ __entry->offset = start - alloc->vm_start;
__entry->size = end - start;
),
TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index ad1fa7abc323..bc6bae76ccaf 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -207,6 +207,8 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
fsnotify_create(root->d_inode, dentry);
inode_unlock(d_inode(root));
+ binder_add_device(device);
+
return 0;
err:
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 8d27c567be1c..f813dbdc2346 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1987,7 +1987,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ahci_init_msi(pdev, n_ports, hpriv) < 0) {
/* legacy intx interrupts */
- pci_intx(pdev, 1);
+ pcim_intx(pdev, 1);
}
hpriv->irq = pci_irq_vector(pdev, 0);
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 06781bdde0d2..8e895ae45c86 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -328,7 +328,7 @@ struct ahci_port_priv {
struct ahci_host_priv {
/* Input fields */
unsigned int flags; /* AHCI_HFLAG_* */
- u32 mask_port_map; /* mask out particular bits */
+ u32 mask_port_map; /* Mask of valid ports */
void __iomem * mmio; /* bus-independent mem map */
u32 cap; /* cap to use */
@@ -379,6 +379,17 @@ struct ahci_host_priv {
int port);
};
+/*
+ * Return true if a port should be ignored because it is excluded from
+ * the host port map.
+ */
+static inline bool ahci_ignore_port(struct ahci_host_priv *hpriv,
+ unsigned int portid)
+{
+ return portid >= hpriv->nports ||
+ !(hpriv->mask_port_map & (1 << portid));
+}
+
extern int ahci_ignore_sss;
extern const struct attribute_group *ahci_shost_groups[];
@@ -397,7 +408,7 @@ extern const struct attribute_group *ahci_sdev_groups[];
.sdev_groups = ahci_sdev_groups, \
.change_queue_depth = ata_scsi_change_queue_depth, \
.tag_alloc_policy_rr = true, \
- .device_configure = ata_scsi_device_configure
+ .sdev_configure = ata_scsi_sdev_configure
extern struct ata_port_operations ahci_ops;
extern struct ata_port_operations ahci_platform_ops;
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index ef569eae4ce4..29be74fedcf0 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -288,6 +288,9 @@ static unsigned int brcm_ahci_read_id(struct ata_device *dev,
/* Re-initialize and calibrate the PHY */
for (i = 0; i < hpriv->nports; i++) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
rc = phy_init(hpriv->phys[i]);
if (rc)
goto disable_phys;
diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
index 1ec35778903d..2d6a08c23d6a 100644
--- a/drivers/ata/ahci_ceva.c
+++ b/drivers/ata/ahci_ceva.c
@@ -206,6 +206,9 @@ static int ceva_ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
goto disable_clks;
for (i = 0; i < hpriv->nports; i++) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
rc = phy_init(hpriv->phys[i]);
if (rc)
goto disable_rsts;
@@ -215,6 +218,9 @@ static int ceva_ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
ahci_platform_deassert_rsts(hpriv);
for (i = 0; i < hpriv->nports; i++) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
rc = phy_power_on(hpriv->phys[i]);
if (rc) {
phy_exit(hpriv->phys[i]);
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index 6b9b4a1dfa15..4336c8a6e208 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -176,7 +176,6 @@ static int st_ahci_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int st_ahci_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
@@ -221,9 +220,8 @@ static int st_ahci_resume(struct device *dev)
return ahci_platform_resume_host(dev);
}
-#endif
-static SIMPLE_DEV_PM_OPS(st_ahci_pm_ops, st_ahci_suspend, st_ahci_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(st_ahci_pm_ops, st_ahci_suspend, st_ahci_resume);
static const struct of_device_id st_ahci_match[] = {
{ .compatible = "st,ahci", },
@@ -234,7 +232,7 @@ MODULE_DEVICE_TABLE(of, st_ahci_match);
static struct platform_driver st_ahci_driver = {
.driver = {
.name = DRV_NAME,
- .pm = &st_ahci_pm_ops,
+ .pm = pm_sleep_ptr(&st_ahci_pm_ops),
.of_match_table = st_ahci_match,
},
.probe = st_ahci_probe,
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 2f57ec00ab82..e70b6c089cf1 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -209,7 +209,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, (void *)id, 0);
}
-static struct pci_device_id ata_generic[] = {
+static const struct pci_device_id ata_generic[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_SAMURAI_IDE), },
{ PCI_DEVICE(PCI_VENDOR_ID_HOLTEK, PCI_DEVICE_ID_HOLTEK_6565), },
{ PCI_DEVICE(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8673F), },
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 093b940bc953..d441246fa357 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1725,7 +1725,7 @@ static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* message-signalled interrupts currently).
*/
if (port_flags & PIIX_FLAG_CHECKINTR)
- pci_intx(pdev, 1);
+ pcim_intx(pdev, 1);
if (piix_check_450nx_errata(pdev)) {
/* This writes into the master table but it does not
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 7a8064520a35..53b2c7719dc5 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -49,6 +49,9 @@ int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
int rc, i;
for (i = 0; i < hpriv->nports; i++) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
rc = phy_init(hpriv->phys[i]);
if (rc)
goto disable_phys;
@@ -70,6 +73,9 @@ int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
disable_phys:
while (--i >= 0) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
phy_power_off(hpriv->phys[i]);
phy_exit(hpriv->phys[i]);
}
@@ -88,6 +94,9 @@ void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
int i;
for (i = 0; i < hpriv->nports; i++) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
phy_power_off(hpriv->phys[i]);
phy_exit(hpriv->phys[i]);
}
@@ -432,6 +441,20 @@ static int ahci_platform_get_firmware(struct ahci_host_priv *hpriv,
return 0;
}
+static u32 ahci_platform_find_max_port_id(struct device *dev)
+{
+ u32 max_port = 0;
+
+ for_each_child_of_node_scoped(dev->of_node, child) {
+ u32 port;
+
+ if (!of_property_read_u32(child, "reg", &port))
+ max_port = max(max_port, port);
+ }
+
+ return max_port;
+}
+
/**
* ahci_platform_get_resources - Get platform resources
* @pdev: platform device to get resources for
@@ -458,6 +481,7 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
u32 mask_port_map = 0;
+ u32 max_port;
if (!devres_open_group(dev, NULL, GFP_KERNEL))
return ERR_PTR(-ENOMEM);
@@ -549,15 +573,17 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
goto err_out;
}
+ /* find maximum port id for allocating structures */
+ max_port = ahci_platform_find_max_port_id(dev);
/*
- * If no sub-node was found, we still need to set nports to
- * one in order to be able to use the
+ * Set nports according to maximum port id. Clamp at
+ * AHCI_MAX_PORTS, warning message for invalid port id
+ * is generated later.
+ * When DT has no sub-nodes max_port is 0, nports is 1,
+ * in order to be able to use the
* ahci_platform_[en|dis]able_[phys|regulators] functions.
*/
- if (child_nodes)
- hpriv->nports = child_nodes;
- else
- hpriv->nports = 1;
+ hpriv->nports = min(AHCI_MAX_PORTS, max_port + 1);
hpriv->phys = devm_kcalloc(dev, hpriv->nports, sizeof(*hpriv->phys), GFP_KERNEL);
if (!hpriv->phys) {
@@ -625,6 +651,8 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
* If no sub-node was found, keep this for device tree
* compatibility
*/
+ hpriv->mask_port_map |= BIT(0);
+
rc = ahci_platform_get_phy(hpriv, 0, dev, dev->of_node);
if (rc)
goto err_out;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c085dd81ebe7..63ec2f218431 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4143,6 +4143,10 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
{ "Samsung SSD 860*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
ATA_QUIRK_NO_NCQ_ON_ATI },
+ { "Samsung SSD 870 QVO*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM |
+ ATA_QUIRK_NO_NCQ_ON_ATI |
+ ATA_QUIRK_NOLPM },
{ "Samsung SSD 870*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
ATA_QUIRK_NO_NCQ_ON_ATI },
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 9c76fb1ad2ec..ba300cc0a3a3 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -1313,7 +1313,7 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
/**
- * ata_sas_device_configure - Default device_configure routine for libata
+ * ata_sas_sdev_configure - Default sdev_configure routine for libata
* devices
* @sdev: SCSI device to configure
* @lim: queue limits
@@ -1323,14 +1323,14 @@ EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
* Zero.
*/
-int ata_sas_device_configure(struct scsi_device *sdev, struct queue_limits *lim,
- struct ata_port *ap)
+int ata_sas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim,
+ struct ata_port *ap)
{
ata_scsi_sdev_config(sdev);
return ata_scsi_dev_config(sdev, lim, ap->link.device);
}
-EXPORT_SYMBOL_GPL(ata_sas_device_configure);
+EXPORT_SYMBOL_GPL(ata_sas_sdev_configure);
/**
* ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 2ce5befd2242..2796c0da8257 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1133,7 +1133,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
}
/**
- * ata_scsi_slave_alloc - Early setup of SCSI device
+ * ata_scsi_sdev_init - Early setup of SCSI device
* @sdev: SCSI device to examine
*
* This is called from scsi_alloc_sdev() when the scsi device
@@ -1143,7 +1143,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
* Defined by SCSI layer. We don't really care.
*/
-int ata_scsi_slave_alloc(struct scsi_device *sdev)
+int ata_scsi_sdev_init(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct device_link *link;
@@ -1166,10 +1166,10 @@ int ata_scsi_slave_alloc(struct scsi_device *sdev)
return 0;
}
-EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc);
+EXPORT_SYMBOL_GPL(ata_scsi_sdev_init);
/**
- * ata_scsi_device_configure - Set SCSI device attributes
+ * ata_scsi_sdev_configure - Set SCSI device attributes
* @sdev: SCSI device to examine
* @lim: queue limits
*
@@ -1181,8 +1181,7 @@ EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc);
* Defined by SCSI layer. We don't really care.
*/
-int ata_scsi_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+int ata_scsi_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
@@ -1192,10 +1191,10 @@ int ata_scsi_device_configure(struct scsi_device *sdev,
return 0;
}
-EXPORT_SYMBOL_GPL(ata_scsi_device_configure);
+EXPORT_SYMBOL_GPL(ata_scsi_sdev_configure);
/**
- * ata_scsi_slave_destroy - SCSI device is about to be destroyed
+ * ata_scsi_sdev_destroy - SCSI device is about to be destroyed
* @sdev: SCSI device to be destroyed
*
* @sdev is about to be destroyed for hot/warm unplugging. If
@@ -1208,7 +1207,7 @@ EXPORT_SYMBOL_GPL(ata_scsi_device_configure);
* LOCKING:
* Defined by SCSI layer. We don't really care.
*/
-void ata_scsi_slave_destroy(struct scsi_device *sdev)
+void ata_scsi_sdev_destroy(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
unsigned long flags;
@@ -1228,7 +1227,7 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
kfree(sdev->dma_drain_buf);
}
-EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
+EXPORT_SYMBOL_GPL(ata_scsi_sdev_destroy);
/**
* ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 67f277e1c3bf..5a46c066abc3 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -601,7 +601,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct page *page;
- unsigned int offset;
+ unsigned int offset, count;
if (!qc->cursg) {
qc->curbytes = qc->nbytes;
@@ -617,25 +617,27 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
page = nth_page(page, (offset >> PAGE_SHIFT));
offset %= PAGE_SIZE;
- trace_ata_sff_pio_transfer_data(qc, offset, qc->sect_size);
+ /* don't overrun current sg */
+ count = min(qc->cursg->length - qc->cursg_ofs, qc->sect_size);
+
+ trace_ata_sff_pio_transfer_data(qc, offset, count);
/*
* Split the transfer when it splits a page boundary. Note that the
* split still has to be dword aligned like all ATA data transfers.
*/
WARN_ON_ONCE(offset % 4);
- if (offset + qc->sect_size > PAGE_SIZE) {
+ if (offset + count > PAGE_SIZE) {
unsigned int split_len = PAGE_SIZE - offset;
ata_pio_xfer(qc, page, offset, split_len);
- ata_pio_xfer(qc, nth_page(page, 1), 0,
- qc->sect_size - split_len);
+ ata_pio_xfer(qc, nth_page(page, 1), 0, count - split_len);
} else {
- ata_pio_xfer(qc, page, offset, qc->sect_size);
+ ata_pio_xfer(qc, page, offset, count);
}
- qc->curbytes += qc->sect_size;
- qc->cursg_ofs += qc->sect_size;
+ qc->curbytes += count;
+ qc->cursg_ofs += count;
if (qc->cursg_ofs == qc->cursg->length) {
qc->cursg = sg_next(qc->cursg);
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index aaef5924f636..308f86f9e2f0 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -525,7 +525,7 @@ static int atp867x_reinit_one(struct pci_dev *pdev)
}
#endif
-static struct pci_device_id atp867x_pci_tbl[] = {
+static const struct pci_device_id atp867x_pci_tbl[] = {
{ PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP867A), 0 },
{ PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP867B), 0 },
{ },
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 4b01bb6880b0..fbf5f07ea357 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -812,8 +812,8 @@ static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
/* Hook the standard slave config to fixup some HW related alignment
* restrictions
*/
-static int pata_macio_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int pata_macio_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct pata_macio_priv *priv = ap->private_data;
@@ -822,7 +822,7 @@ static int pata_macio_device_configure(struct scsi_device *sdev,
int rc;
/* First call original */
- rc = ata_scsi_device_configure(sdev, lim);
+ rc = ata_scsi_sdev_configure(sdev, lim);
if (rc)
return rc;
@@ -932,7 +932,7 @@ static const struct scsi_host_template pata_macio_sht = {
/* We may not need that strict one */
.dma_boundary = ATA_DMA_BOUNDARY,
.max_segment_size = PATA_MACIO_MAX_SEGMENT_SIZE,
- .device_configure = pata_macio_device_configure,
+ .sdev_configure = pata_macio_sdev_configure,
.sdev_groups = ata_common_sdev_groups,
.can_queue = ATA_DEF_QUEUE,
.tag_alloc_policy_rr = true,
diff --git a/drivers/ata/pata_piccolo.c b/drivers/ata/pata_piccolo.c
index ced906bf56be..beb53bd990be 100644
--- a/drivers/ata/pata_piccolo.c
+++ b/drivers/ata/pata_piccolo.c
@@ -97,7 +97,7 @@ static int ata_tosh_init_one(struct pci_dev *dev, const struct pci_device_id *id
return ata_pci_bmdma_init_one(dev, ppi, &tosh_sht, NULL, 0);
}
-static struct pci_device_id ata_tosh[] = {
+static const struct pci_device_id ata_tosh[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_3), },
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
index 0a9689862f71..09792aac7f9d 100644
--- a/drivers/ata/pata_rdc.c
+++ b/drivers/ata/pata_rdc.c
@@ -340,7 +340,7 @@ static int rdc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
host->private_data = hpriv;
- pci_intx(pdev, 1);
+ pcim_intx(pdev, 1);
host->flags |= ATA_HOST_PARALLEL_SCAN;
diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
index d040799bf9cb..530ee26b3012 100644
--- a/drivers/ata/sata_gemini.c
+++ b/drivers/ata/sata_gemini.c
@@ -11,7 +11,6 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/delay.h>
-#include <linux/reset.h>
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/io.h>
@@ -27,8 +26,6 @@
* @muxmode: the current muxing mode
* @ide_pins: if the device is using the plain IDE interface pins
* @sata_bridge: if the device enables the SATA bridge
- * @sata0_reset: SATA0 reset handler
- * @sata1_reset: SATA1 reset handler
* @sata0_pclk: SATA0 PCLK handler
* @sata1_pclk: SATA1 PCLK handler
*/
@@ -38,8 +35,6 @@ struct sata_gemini {
enum gemini_muxmode muxmode;
bool ide_pins;
bool sata_bridge;
- struct reset_control *sata0_reset;
- struct reset_control *sata1_reset;
struct clk *sata0_pclk;
struct clk *sata1_pclk;
};
@@ -224,18 +219,6 @@ void gemini_sata_stop_bridge(struct sata_gemini *sg, unsigned int bridge)
}
EXPORT_SYMBOL(gemini_sata_stop_bridge);
-int gemini_sata_reset_bridge(struct sata_gemini *sg,
- unsigned int bridge)
-{
- if (bridge == 0)
- reset_control_reset(sg->sata0_reset);
- else
- reset_control_reset(sg->sata1_reset);
- msleep(10);
- return gemini_sata_setup_bridge(sg, bridge);
-}
-EXPORT_SYMBOL(gemini_sata_reset_bridge);
-
static int gemini_sata_bridge_init(struct sata_gemini *sg)
{
struct device *dev = sg->dev;
@@ -265,21 +248,6 @@ static int gemini_sata_bridge_init(struct sata_gemini *sg)
return ret;
}
- sg->sata0_reset = devm_reset_control_get_exclusive(dev, "sata0");
- if (IS_ERR(sg->sata0_reset)) {
- dev_err(dev, "no SATA0 reset controller\n");
- clk_disable_unprepare(sg->sata1_pclk);
- clk_disable_unprepare(sg->sata0_pclk);
- return PTR_ERR(sg->sata0_reset);
- }
- sg->sata1_reset = devm_reset_control_get_exclusive(dev, "sata1");
- if (IS_ERR(sg->sata1_reset)) {
- dev_err(dev, "no SATA1 reset controller\n");
- clk_disable_unprepare(sg->sata1_pclk);
- clk_disable_unprepare(sg->sata0_pclk);
- return PTR_ERR(sg->sata1_reset);
- }
-
sata_id = readl(sg->base + GEMINI_SATA_ID);
sata_phy_id = readl(sg->base + GEMINI_SATA_PHY_ID);
sg->sata_bridge = true;
diff --git a/drivers/ata/sata_gemini.h b/drivers/ata/sata_gemini.h
index 6f6e691d6007..b6e4a5c86e01 100644
--- a/drivers/ata/sata_gemini.h
+++ b/drivers/ata/sata_gemini.h
@@ -17,6 +17,5 @@ bool gemini_sata_bridge_enabled(struct sata_gemini *sg, bool is_ata1);
enum gemini_muxmode gemini_sata_get_muxmode(struct sata_gemini *sg);
int gemini_sata_start_bridge(struct sata_gemini *sg, unsigned int bridge);
void gemini_sata_stop_bridge(struct sata_gemini *sg, unsigned int bridge);
-int gemini_sata_reset_bridge(struct sata_gemini *sg, unsigned int bridge);
#endif
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 21c72650f9cc..bcbf96867f89 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -673,7 +673,7 @@ static const struct scsi_host_template mv6_sht = {
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy_rr = true,
- .device_configure = ata_scsi_device_configure
+ .sdev_configure = ata_scsi_sdev_configure
};
static struct ata_port_operations mv5_ops = {
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 823cce5ea1e9..f36e2915ccf1 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -296,8 +296,8 @@ static void nv_nf2_freeze(struct ata_port *ap);
static void nv_nf2_thaw(struct ata_port *ap);
static void nv_ck804_freeze(struct ata_port *ap);
static void nv_ck804_thaw(struct ata_port *ap);
-static int nv_adma_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim);
+static int nv_adma_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
@@ -319,8 +319,8 @@ static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
static void nv_mcp55_thaw(struct ata_port *ap);
static void nv_mcp55_freeze(struct ata_port *ap);
static void nv_swncq_error_handler(struct ata_port *ap);
-static int nv_swncq_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim);
+static int nv_swncq_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
static int nv_swncq_port_start(struct ata_port *ap);
static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
@@ -382,7 +382,7 @@ static const struct scsi_host_template nv_adma_sht = {
.can_queue = NV_ADMA_MAX_CPBS,
.sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
.dma_boundary = NV_ADMA_DMA_BOUNDARY,
- .device_configure = nv_adma_device_configure,
+ .sdev_configure = nv_adma_sdev_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy_rr = true,
@@ -393,7 +393,7 @@ static const struct scsi_host_template nv_swncq_sht = {
.can_queue = ATA_MAX_QUEUE - 1,
.sg_tablesize = LIBATA_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
- .device_configure = nv_swncq_device_configure,
+ .sdev_configure = nv_swncq_sdev_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy_rr = true,
@@ -663,8 +663,8 @@ static void nv_adma_mode(struct ata_port *ap)
pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
}
-static int nv_adma_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int nv_adma_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct nv_adma_port_priv *pp = ap->private_data;
@@ -676,7 +676,7 @@ static int nv_adma_device_configure(struct scsi_device *sdev,
int adma_enable;
u32 current_reg, new_reg, config_mask;
- rc = ata_scsi_device_configure(sdev, lim);
+ rc = ata_scsi_sdev_configure(sdev, lim);
if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
/* Not a proper libata device, ignore */
@@ -1871,8 +1871,8 @@ static void nv_swncq_host_init(struct ata_host *host)
writel(~0x0, mmio + NV_INT_STATUS_MCP55);
}
-static int nv_swncq_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int nv_swncq_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
@@ -1882,7 +1882,7 @@ static int nv_swncq_device_configure(struct scsi_device *sdev,
u8 check_maxtor = 0;
unsigned char model_num[ATA_ID_PROD_LEN + 1];
- rc = ata_scsi_device_configure(sdev, lim);
+ rc = ata_scsi_sdev_configure(sdev, lim);
if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
/* Not a proper libata device, ignore */
return rc;
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 935b13e79dec..87f4cde6a686 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -380,7 +380,7 @@ static const struct scsi_host_template sil24_sht = {
.dma_boundary = ATA_DMA_BOUNDARY,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .device_configure = ata_scsi_device_configure
+ .sdev_configure = ata_scsi_sdev_configure
};
static struct ata_port_operations sil24_ops = {
@@ -1316,7 +1316,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (sata_sil24_msi && !pci_enable_msi(pdev)) {
dev_info(&pdev->dev, "Using MSI\n");
- pci_intx(pdev, 0);
+ pcim_intx(pdev, 0);
}
pci_set_master(pdev);
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index ef8724986de3..b8b6d9eff3b8 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -290,7 +290,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- pci_intx(pdev, 1);
+ pcim_intx(pdev, 1);
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &sis_sht);
}
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index 60ea45926cd1..52894ff49dcb 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -221,7 +221,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- pci_intx(pdev, 1);
+ pcim_intx(pdev, 1);
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &uli_sht);
}
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index d39b87537168..a53a2dfc1e17 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -384,7 +384,7 @@ static int vsc_sata_init_one(struct pci_dev *pdev,
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80);
if (pci_enable_msi(pdev) == 0)
- pci_intx(pdev, 0);
+ pcim_intx(pdev, 0);
/*
* Config offset 0x98 is "Extended Control and Status Register 0"
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
index a802678a6f74..32e1863ef4b2 100644
--- a/drivers/auxdisplay/img-ascii-lcd.c
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -36,7 +36,6 @@ struct img_ascii_lcd_config {
* @base: the base address of the LCD registers
* @regmap: the regmap through which LCD registers are accessed
* @offset: the offset within regmap to the start of the LCD registers
- * @cfg: pointer to the LCD model configuration
*/
struct img_ascii_lcd_ctx {
struct linedisp linedisp;
@@ -45,7 +44,6 @@ struct img_ascii_lcd_ctx {
struct regmap *regmap;
};
u32 offset;
- const struct img_ascii_lcd_config *cfg;
};
/*
@@ -71,7 +69,7 @@ static void boston_update(struct linedisp *linedisp)
#endif
}
-static struct img_ascii_lcd_config boston_config = {
+static const struct img_ascii_lcd_config boston_config = {
.num_chars = 8,
.ops = {
.update = boston_update,
@@ -100,7 +98,7 @@ static void malta_update(struct linedisp *linedisp)
pr_err_ratelimited("Failed to update LCD display: %d\n", err);
}
-static struct img_ascii_lcd_config malta_config = {
+static const struct img_ascii_lcd_config malta_config = {
.num_chars = 8,
.external_regmap = true,
.ops = {
@@ -202,7 +200,7 @@ static void sead3_update(struct linedisp *linedisp)
pr_err_ratelimited("Failed to update LCD display: %d\n", err);
}
-static struct img_ascii_lcd_config sead3_config = {
+static const struct img_ascii_lcd_config sead3_config = {
.num_chars = 16,
.external_regmap = true,
.ops = {
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 657c93c38b0d..6b9e65a42cd2 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -354,7 +354,7 @@ static struct device *next_device(struct klist_iter *i)
* count in the supplied callback.
*/
int bus_for_each_dev(const struct bus_type *bus, struct device *start,
- void *data, int (*fn)(struct device *, void *))
+ void *data, device_iter_t fn)
{
struct subsys_private *sp = bus_to_subsys(bus);
struct klist_iter i;
@@ -402,9 +402,12 @@ struct device *bus_find_device(const struct bus_type *bus,
klist_iter_init_node(&sp->klist_devices, &i,
(start ? &start->p->knode_bus : NULL));
- while ((dev = next_device(&i)))
- if (match(dev, data) && get_device(dev))
+ while ((dev = next_device(&i))) {
+ if (match(dev, data)) {
+ get_device(dev);
break;
+ }
+ }
klist_iter_exit(&i);
subsys_put(sp);
return dev;
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 582b5a02a5c4..2526c57d924e 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -323,8 +323,12 @@ void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class,
struct subsys_private *sp = class_to_subsys(class);
struct klist_node *start_knode = NULL;
- if (!sp)
+ memset(iter, 0, sizeof(*iter));
+ if (!sp) {
+ pr_crit("%s: class %p was not registered yet\n",
+ __func__, class);
return;
+ }
if (start)
start_knode = &start->p->knode_class;
@@ -351,6 +355,9 @@ struct device *class_dev_iter_next(struct class_dev_iter *iter)
struct klist_node *knode;
struct device *dev;
+ if (!iter->sp)
+ return NULL;
+
while (1) {
knode = klist_next(&iter->ki);
if (!knode)
@@ -395,7 +402,7 @@ EXPORT_SYMBOL_GPL(class_dev_iter_exit);
* code. There's no locking restriction.
*/
int class_for_each_device(const struct class *class, const struct device *start,
- void *data, int (*fn)(struct device *, void *))
+ void *data, device_iter_t fn)
{
struct subsys_private *sp = class_to_subsys(class);
struct class_dev_iter iter;
@@ -594,30 +601,10 @@ EXPORT_SYMBOL_GPL(class_compat_unregister);
* a bus device
* @cls: the compatibility class
* @dev: the target bus device
- * @device_link: an optional device to which a "device" link should be created
*/
-int class_compat_create_link(struct class_compat *cls, struct device *dev,
- struct device *device_link)
+int class_compat_create_link(struct class_compat *cls, struct device *dev)
{
- int error;
-
- error = sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev));
- if (error)
- return error;
-
- /*
- * Optionally add a "device" link (typically to the parent), as a
- * class device would have one and we want to provide as much
- * backwards compatibility as possible.
- */
- if (device_link) {
- error = sysfs_create_link(&dev->kobj, &device_link->kobj,
- "device");
- if (error)
- sysfs_remove_link(cls->kobj, dev_name(dev));
- }
-
- return error;
+ return sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev));
}
EXPORT_SYMBOL_GPL(class_compat_create_link);
@@ -626,14 +613,9 @@ EXPORT_SYMBOL_GPL(class_compat_create_link);
* a bus device
* @cls: the compatibility class
* @dev: the target bus device
- * @device_link: an optional device to which a "device" link was previously
- * created
*/
-void class_compat_remove_link(struct class_compat *cls, struct device *dev,
- struct device *device_link)
+void class_compat_remove_link(struct class_compat *cls, struct device *dev)
{
- if (device_link)
- sysfs_remove_link(&dev->kobj, "device");
sysfs_remove_link(cls->kobj, dev_name(dev));
}
EXPORT_SYMBOL_GPL(class_compat_remove_link);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 94865c9d8adc..5a1f05198114 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -3980,7 +3980,7 @@ const char *device_get_devnode(const struct device *dev,
* other than 0, we break out and return that value.
*/
int device_for_each_child(struct device *parent, void *data,
- int (*fn)(struct device *dev, void *data))
+ device_iter_t fn)
{
struct klist_iter i;
struct device *child;
@@ -4010,7 +4010,7 @@ EXPORT_SYMBOL_GPL(device_for_each_child);
* other than 0, we break out and return that value.
*/
int device_for_each_child_reverse(struct device *parent, void *data,
- int (*fn)(struct device *dev, void *data))
+ device_iter_t fn)
{
struct klist_iter i;
struct device *child;
@@ -4043,14 +4043,14 @@ EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
* device_for_each_child_reverse_from();
*/
int device_for_each_child_reverse_from(struct device *parent,
- struct device *from, const void *data,
- int (*fn)(struct device *, const void *))
+ struct device *from, void *data,
+ device_iter_t fn)
{
struct klist_iter i;
struct device *child;
int error = 0;
- if (!parent->p)
+ if (!parent || !parent->p)
return 0;
klist_iter_init_node(&parent->p->klist_children, &i,
@@ -4079,8 +4079,8 @@ EXPORT_SYMBOL_GPL(device_for_each_child_reverse_from);
*
* NOTE: you will need to drop the reference with put_device() after use.
*/
-struct device *device_find_child(struct device *parent, void *data,
- int (*match)(struct device *dev, void *data))
+struct device *device_find_child(struct device *parent, const void *data,
+ device_match_t match)
{
struct klist_iter i;
struct device *child;
@@ -4089,62 +4089,17 @@ struct device *device_find_child(struct device *parent, void *data,
return NULL;
klist_iter_init(&parent->p->klist_children, &i);
- while ((child = next_device(&i)))
- if (match(child, data) && get_device(child))
+ while ((child = next_device(&i))) {
+ if (match(child, data)) {
+ get_device(child);
break;
+ }
+ }
klist_iter_exit(&i);
return child;
}
EXPORT_SYMBOL_GPL(device_find_child);
-/**
- * device_find_child_by_name - device iterator for locating a child device.
- * @parent: parent struct device
- * @name: name of the child device
- *
- * This is similar to the device_find_child() function above, but it
- * returns a reference to a device that has the name @name.
- *
- * NOTE: you will need to drop the reference with put_device() after use.
- */
-struct device *device_find_child_by_name(struct device *parent,
- const char *name)
-{
- struct klist_iter i;
- struct device *child;
-
- if (!parent)
- return NULL;
-
- klist_iter_init(&parent->p->klist_children, &i);
- while ((child = next_device(&i)))
- if (sysfs_streq(dev_name(child), name) && get_device(child))
- break;
- klist_iter_exit(&i);
- return child;
-}
-EXPORT_SYMBOL_GPL(device_find_child_by_name);
-
-static int match_any(struct device *dev, void *unused)
-{
- return 1;
-}
-
-/**
- * device_find_any_child - device iterator for locating a child device, if any.
- * @parent: parent struct device
- *
- * This is similar to the device_find_child() function above, but it
- * returns a reference to a child device, if any.
- *
- * NOTE: you will need to drop the reference with put_device() after use.
- */
-struct device *device_find_any_child(struct device *parent)
-{
- return device_find_child(parent, NULL, match_any);
-}
-EXPORT_SYMBOL_GPL(device_find_any_child);
-
int __init devices_init(void)
{
devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
@@ -5244,15 +5199,21 @@ int device_match_name(struct device *dev, const void *name)
}
EXPORT_SYMBOL_GPL(device_match_name);
+int device_match_type(struct device *dev, const void *type)
+{
+ return dev->type == type;
+}
+EXPORT_SYMBOL_GPL(device_match_type);
+
int device_match_of_node(struct device *dev, const void *np)
{
- return dev->of_node == np;
+ return np && dev->of_node == np;
}
EXPORT_SYMBOL_GPL(device_match_of_node);
int device_match_fwnode(struct device *dev, const void *fwnode)
{
- return dev_fwnode(dev) == fwnode;
+ return fwnode && dev_fwnode(dev) == fwnode;
}
EXPORT_SYMBOL_GPL(device_match_fwnode);
@@ -5264,13 +5225,13 @@ EXPORT_SYMBOL_GPL(device_match_devt);
int device_match_acpi_dev(struct device *dev, const void *adev)
{
- return ACPI_COMPANION(dev) == adev;
+ return adev && ACPI_COMPANION(dev) == adev;
}
EXPORT_SYMBOL(device_match_acpi_dev);
int device_match_acpi_handle(struct device *dev, const void *handle)
{
- return ACPI_HANDLE(dev) == handle;
+ return handle && ACPI_HANDLE(dev) == handle;
}
EXPORT_SYMBOL(device_match_acpi_handle);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index fdaa24bb641a..a7e511849875 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -599,6 +599,7 @@ CPU_SHOW_VULN_FALLBACK(retbleed);
CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
CPU_SHOW_VULN_FALLBACK(gds);
CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
+CPU_SHOW_VULN_FALLBACK(ghostwrite);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@ -614,6 +615,7 @@ static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
+static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -630,6 +632,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_spec_rstack_overflow.attr,
&dev_attr_gather_data_sampling.attr,
&dev_attr_reg_file_data_sampling.attr,
+ &dev_attr_ghostwrite.attr,
NULL
};
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index c795edad1b96..64840e5d5fcc 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -106,7 +106,7 @@ static void devcd_del(struct work_struct *wk)
}
static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -116,7 +116,7 @@ static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
}
static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -132,19 +132,15 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute devcd_attr_data = {
- .attr = { .name = "data", .mode = S_IRUSR | S_IWUSR, },
- .size = 0,
- .read = devcd_data_read,
- .write = devcd_data_write,
-};
+static const struct bin_attribute devcd_attr_data =
+ __BIN_ATTR(data, 0600, devcd_data_read, devcd_data_write, 0);
-static struct bin_attribute *devcd_dev_bin_attrs[] = {
+static const struct bin_attribute *const devcd_dev_bin_attrs[] = {
&devcd_attr_data, NULL,
};
static const struct attribute_group devcd_dev_group = {
- .bin_attrs = devcd_dev_bin_attrs,
+ .bin_attrs_new = devcd_dev_bin_attrs,
};
static const struct attribute_group *devcd_dev_groups[] = {
@@ -186,9 +182,9 @@ static ssize_t disabled_show(const struct class *class, const struct class_attri
* mutex_lock(&devcd->mutex);
*
*
- * In the above diagram, It looks like disabled_store() would be racing with parallely
+ * In the above diagram, it looks like disabled_store() would be racing with parallelly
* running devcd_del() and result in memory abort while acquiring devcd->mutex which
- * is called after kfree of devcd memory after dropping its last reference with
+ * is called after kfree of devcd memory after dropping its last reference with
* put_device(). However, this will not happens as fn(dev, data) runs
* with its own reference to device via klist_node so it is not its last reference.
* so, above situation would not occur.
@@ -285,6 +281,8 @@ static void devcd_free_sgtable(void *data)
* @offset: start copy from @offset@ bytes from the head of the data
* in the given scatterlist
* @data_len: the length of the data in the sg_table
+ *
+ * Returns: the number of bytes copied
*/
static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset,
size_t buf_len, void *data,
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 2152eec0c135..93e7779ef21e 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -750,25 +750,38 @@ int __devm_add_action(struct device *dev, void (*action)(void *), void *data, co
EXPORT_SYMBOL_GPL(__devm_add_action);
/**
- * devm_remove_action() - removes previously added custom action
+ * devm_remove_action_nowarn() - removes previously added custom action
* @dev: Device that owns the action
* @action: Function implementing the action
* @data: Pointer to data passed to @action implementation
*
* Removes instance of @action previously added by devm_add_action().
* Both action and data should match one of the existing entries.
+ *
+ * In contrast to devm_remove_action(), this function does not WARN() if no
+ * entry could have been found.
+ *
+ * This should only be used if the action is contained in an object with
+ * independent lifetime management, e.g. the Devres rust abstraction.
+ *
+ * Causing the warning from regular driver code most likely indicates an abuse
+ * of the devres API.
+ *
+ * Returns: 0 on success, -ENOENT if no entry could have been found.
*/
-void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
+int devm_remove_action_nowarn(struct device *dev,
+ void (*action)(void *),
+ void *data)
{
struct action_devres devres = {
.data = data,
.action = action,
};
- WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
- &devres));
+ return devres_destroy(dev, devm_action_release, devm_action_match,
+ &devres);
}
-EXPORT_SYMBOL_GPL(devm_remove_action);
+EXPORT_SYMBOL_GPL(devm_remove_action_nowarn);
/**
* devm_release_action() - release previously added custom action
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index b4eb5b89c4ee..8ab010ddf709 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -115,7 +115,7 @@ EXPORT_SYMBOL_GPL(driver_set_override);
* Iterate over the @drv's list of devices calling @fn for each one.
*/
int driver_for_each_device(struct device_driver *drv, struct device *start,
- void *data, int (*fn)(struct device *, void *))
+ void *data, device_iter_t fn)
{
struct klist_iter i;
struct device *dev;
@@ -160,9 +160,12 @@ struct device *driver_find_device(const struct device_driver *drv,
klist_iter_init_node(&drv->p->klist_devices, &i,
(start ? &start->p->knode_driver : NULL));
- while ((dev = next_device(&i)))
- if (match(dev, data) && get_device(dev))
+ while ((dev = next_device(&i))) {
+ if (match(dev, data)) {
+ get_device(dev);
break;
+ }
+ }
klist_iter_exit(&i);
return dev;
}
diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c
index ddb70e29eb42..c8afc501a8a4 100644
--- a/drivers/base/firmware_loader/fallback_table.c
+++ b/drivers/base/firmware_loader/fallback_table.c
@@ -25,7 +25,7 @@ struct firmware_fallback_config fw_fallback_config = {
EXPORT_SYMBOL_NS_GPL(fw_fallback_config, "FIRMWARE_LOADER_PRIVATE");
#ifdef CONFIG_SYSCTL
-static struct ctl_table firmware_config_table[] = {
+static const struct ctl_table firmware_config_table[] = {
{
.procname = "force_sysfs_fallback",
.data = &fw_fallback_config.force_sysfs_fallback,
diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c
index c9c93b47d9a5..d254ceb56d84 100644
--- a/drivers/base/firmware_loader/sysfs.c
+++ b/drivers/base/firmware_loader/sysfs.c
@@ -259,7 +259,7 @@ static void firmware_rw(struct fw_priv *fw_priv, char *buffer,
}
static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -316,7 +316,7 @@ static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
* the driver as a firmware image.
**/
static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -356,11 +356,11 @@ out:
return retval;
}
-static struct bin_attribute firmware_attr_data = {
+static const struct bin_attribute firmware_attr_data = {
.attr = { .name = "data", .mode = 0644 },
.size = 0,
- .read = firmware_data_read,
- .write = firmware_data_write,
+ .read_new = firmware_data_read,
+ .write_new = firmware_data_write,
};
static struct attribute *fw_dev_attrs[] = {
@@ -374,14 +374,14 @@ static struct attribute *fw_dev_attrs[] = {
NULL
};
-static struct bin_attribute *fw_dev_bin_attrs[] = {
+static const struct bin_attribute *const fw_dev_bin_attrs[] = {
&firmware_attr_data,
NULL
};
static const struct attribute_group fw_dev_attr_group = {
.attrs = fw_dev_attrs,
- .bin_attrs = fw_dev_bin_attrs,
+ .bin_attrs_new = fw_dev_bin_attrs,
#ifdef CONFIG_FW_UPLOAD
.is_visible = fw_upload_is_visible,
#endif
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 67858eeb92ed..348c5dbbfa68 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -512,7 +512,7 @@ static ssize_t auto_online_blocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%s\n",
- online_type_to_str[mhp_default_online_type]);
+ online_type_to_str[mhp_get_default_online_type()]);
}
static ssize_t auto_online_blocks_store(struct device *dev,
@@ -524,7 +524,7 @@ static ssize_t auto_online_blocks_store(struct device *dev,
if (online_type < 0)
return -EINVAL;
- mhp_default_online_type = online_type;
+ mhp_set_default_online_type(online_type);
return count;
}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index cbc9a7a75def..40e1d8d8a589 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -656,13 +656,15 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy
* so change its status accordingly.
*
* Otherwise, the device is going to be resumed, so set its PM-runtime
- * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
- * to avoid confusing drivers that don't use it.
+ * status to "active" unless its power.set_active flag is clear, in
+ * which case it is not necessary to update its PM-runtime status.
*/
- if (skip_resume)
+ if (skip_resume) {
pm_runtime_set_suspended(dev);
- else if (dev_pm_skip_suspend(dev))
+ } else if (dev->power.set_active) {
pm_runtime_set_active(dev);
+ dev->power.set_active = false;
+ }
if (dev->pm_domain) {
info = "noirq power domain ";
@@ -1278,8 +1280,14 @@ Skip:
dev->power.may_skip_resume))
dev->power.must_resume = true;
- if (dev->power.must_resume)
+ if (dev->power.must_resume) {
+ if (dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) {
+ dev->power.set_active = true;
+ if (dev->parent && !dev->parent->power.ignore_children)
+ dev->parent->power.set_active = true;
+ }
dpm_superior_set_must_resume(dev);
+ }
Complete:
complete_all(&dev->power.completion);
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 837d77e3af2b..c1392743df9c 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -71,6 +71,44 @@ bool fwnode_property_present(const struct fwnode_handle *fwnode,
EXPORT_SYMBOL_GPL(fwnode_property_present);
/**
+ * device_property_read_bool - Return the value for a boolean property of a device
+ * @dev: Device whose property is being checked
+ * @propname: Name of the property
+ *
+ * Return if property @propname is true or false in the device firmware description.
+ *
+ * Return: true if property @propname is present. Otherwise, returns false.
+ */
+bool device_property_read_bool(const struct device *dev, const char *propname)
+{
+ return fwnode_property_read_bool(dev_fwnode(dev), propname);
+}
+EXPORT_SYMBOL_GPL(device_property_read_bool);
+
+/**
+ * fwnode_property_read_bool - Return the value for a boolean property of a firmware node
+ * @fwnode: Firmware node whose property to check
+ * @propname: Name of the property
+ *
+ * Return if property @propname is true or false in the firmware description.
+ */
+bool fwnode_property_read_bool(const struct fwnode_handle *fwnode,
+ const char *propname)
+{
+ bool ret;
+
+ if (IS_ERR_OR_NULL(fwnode))
+ return false;
+
+ ret = fwnode_call_bool_op(fwnode, property_read_bool, propname);
+ if (ret)
+ return ret;
+
+ return fwnode_call_bool_op(fwnode->secondary, property_read_bool, propname);
+}
+EXPORT_SYMBOL_GPL(fwnode_property_read_bool);
+
+/**
* device_property_read_u8_array - return a u8 array property of a device
* @dev: Device to get the property of
* @propname: Name of the property
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index eb6eb25b343b..b1726a3515f6 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -677,6 +677,7 @@ static const struct fwnode_operations software_node_ops = {
.get = software_node_get,
.put = software_node_put,
.property_present = software_node_property_present,
+ .property_read_bool = software_node_property_present,
.property_read_int_array = software_node_read_int_array,
.property_read_string_array = software_node_read_string_array,
.get_name = software_node_get_name,
diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig
index 5c7fac80611c..2756870615cc 100644
--- a/drivers/base/test/Kconfig
+++ b/drivers/base/test/Kconfig
@@ -12,6 +12,7 @@ config TEST_ASYNC_DRIVER_PROBE
config DM_KUNIT_TEST
tristate "KUnit Tests for the device model" if !KUNIT_ALL_TESTS
depends on KUNIT
+ default KUNIT_ALL_TESTS
config DRIVER_PE_KUNIT_TEST
tristate "KUnit Tests for property entry API" if !KUNIT_ALL_TESTS
diff --git a/drivers/base/test/platform-device-test.c b/drivers/base/test/platform-device-test.c
index ea05b8785743..6355a2231b74 100644
--- a/drivers/base/test/platform-device-test.c
+++ b/drivers/base/test/platform-device-test.c
@@ -1,8 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
+#include <kunit/platform_device.h>
#include <kunit/resource.h>
#include <linux/device.h>
+#include <linux/device/bus.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#define DEVICE_NAME "test"
@@ -217,7 +220,43 @@ static struct kunit_suite platform_device_devm_test_suite = {
.test_cases = platform_device_devm_tests,
};
-kunit_test_suite(platform_device_devm_test_suite);
+static void platform_device_find_by_null_test(struct kunit *test)
+{
+ struct platform_device *pdev;
+ int ret;
+
+ pdev = kunit_platform_device_alloc(test, DEVICE_NAME, PLATFORM_DEVID_NONE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+
+ ret = kunit_platform_device_add(test, pdev);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, of_find_device_by_node(NULL), NULL);
+
+ KUNIT_EXPECT_PTR_EQ(test, bus_find_device_by_of_node(&platform_bus_type, NULL), NULL);
+ KUNIT_EXPECT_PTR_EQ(test, bus_find_device_by_fwnode(&platform_bus_type, NULL), NULL);
+ KUNIT_EXPECT_PTR_EQ(test, bus_find_device_by_acpi_dev(&platform_bus_type, NULL), NULL);
+
+ KUNIT_EXPECT_FALSE(test, device_match_of_node(&pdev->dev, NULL));
+ KUNIT_EXPECT_FALSE(test, device_match_fwnode(&pdev->dev, NULL));
+ KUNIT_EXPECT_FALSE(test, device_match_acpi_dev(&pdev->dev, NULL));
+ KUNIT_EXPECT_FALSE(test, device_match_acpi_handle(&pdev->dev, NULL));
+}
+
+static struct kunit_case platform_device_match_tests[] = {
+ KUNIT_CASE(platform_device_find_by_null_test),
+ {}
+};
+
+static struct kunit_suite platform_device_match_test_suite = {
+ .name = "platform-device-match",
+ .test_cases = platform_device_match_tests,
+};
+
+kunit_test_suites(
+ &platform_device_devm_test_suite,
+ &platform_device_match_test_suite,
+);
MODULE_DESCRIPTION("Test module for platform devices");
MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>");
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 3523dd82d7a0..4db7f6ce8ade 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -226,10 +226,11 @@ aoedev_downdev(struct aoedev *d)
/* fast fail all pending I/O */
if (d->blkq) {
/* UP is cleared, freeze+quiesce to insure all are errored */
- blk_mq_freeze_queue(d->blkq);
+ unsigned int memflags = blk_mq_freeze_queue(d->blkq);
+
blk_mq_quiesce_queue(d->blkq);
blk_mq_unquiesce_queue(d->blkq);
- blk_mq_unfreeze_queue(d->blkq);
+ blk_mq_unfreeze_queue(d->blkq, memflags);
}
if (d->gd)
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 110f9aca2667..a81ade622a01 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -746,6 +746,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
unsigned char *p;
int sect, nsect;
unsigned long flags;
+ unsigned int memflags;
int ret;
if (type) {
@@ -758,7 +759,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
}
q = unit[drive].disk[type]->queue;
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
local_irq_save(flags);
@@ -817,7 +818,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
ret = FormatError ? -EIO : 0;
out:
blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
return ret;
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 1ec7417c7f00..c05fe27a96b6 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -586,6 +586,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
{
struct file *file = fget(arg);
struct file *old_file;
+ unsigned int memflags;
int error;
bool partscan;
bool is_loop;
@@ -623,14 +624,14 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
/* and ... switch */
disk_force_media_change(lo->lo_disk);
- blk_mq_freeze_queue(lo->lo_queue);
+ memflags = blk_mq_freeze_queue(lo->lo_queue);
mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
lo->lo_backing_file = file;
lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
mapping_set_gfp_mask(file->f_mapping,
lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
loop_update_dio(lo);
- blk_mq_unfreeze_queue(lo->lo_queue);
+ blk_mq_unfreeze_queue(lo->lo_queue, memflags);
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
loop_global_unlock(lo, is_loop);
@@ -1255,6 +1256,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
int err;
bool partscan = false;
bool size_changed = false;
+ unsigned int memflags;
err = mutex_lock_killable(&lo->lo_mutex);
if (err)
@@ -1272,7 +1274,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
}
/* I/O needs to be drained before changing lo_offset or lo_sizelimit */
- blk_mq_freeze_queue(lo->lo_queue);
+ memflags = blk_mq_freeze_queue(lo->lo_queue);
err = loop_set_status_from_info(lo, info);
if (err)
@@ -1281,8 +1283,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
partscan = !(lo->lo_flags & LO_FLAGS_PARTSCAN) &&
(info->lo_flags & LO_FLAGS_PARTSCAN);
- lo->lo_flags &= ~(LOOP_SET_STATUS_SETTABLE_FLAGS |
- LOOP_SET_STATUS_CLEARABLE_FLAGS);
+ lo->lo_flags &= ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
lo->lo_flags |= (info->lo_flags & LOOP_SET_STATUS_SETTABLE_FLAGS);
if (size_changed) {
@@ -1295,7 +1296,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
loop_update_dio(lo);
out_unfreeze:
- blk_mq_unfreeze_queue(lo->lo_queue);
+ blk_mq_unfreeze_queue(lo->lo_queue, memflags);
if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
out_unlock:
@@ -1447,6 +1448,7 @@ static int loop_set_capacity(struct loop_device *lo)
static int loop_set_dio(struct loop_device *lo, unsigned long arg)
{
bool use_dio = !!arg;
+ unsigned int memflags;
if (lo->lo_state != Lo_bound)
return -ENXIO;
@@ -1460,18 +1462,19 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg)
vfs_fsync(lo->lo_backing_file, 0);
}
- blk_mq_freeze_queue(lo->lo_queue);
+ memflags = blk_mq_freeze_queue(lo->lo_queue);
if (use_dio)
lo->lo_flags |= LO_FLAGS_DIRECT_IO;
else
lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
- blk_mq_unfreeze_queue(lo->lo_queue);
+ blk_mq_unfreeze_queue(lo->lo_queue, memflags);
return 0;
}
static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
{
struct queue_limits lim;
+ unsigned int memflags;
int err = 0;
if (lo->lo_state != Lo_bound)
@@ -1486,10 +1489,10 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
lim = queue_limits_start_update(lo->lo_queue);
loop_update_limits(lo, &lim, arg);
- blk_mq_freeze_queue(lo->lo_queue);
+ memflags = blk_mq_freeze_queue(lo->lo_queue);
err = queue_limits_commit_update(lo->lo_queue, &lim);
loop_update_dio(lo);
- blk_mq_unfreeze_queue(lo->lo_queue);
+ blk_mq_unfreeze_queue(lo->lo_queue, memflags);
return err;
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index b63a0f29a54a..7bdc7eb808ea 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1234,6 +1234,7 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
struct socket *sock;
struct nbd_sock **socks;
struct nbd_sock *nsock;
+ unsigned int memflags;
int err;
/* Arg will be cast to int, check it to avoid overflow */
@@ -1247,7 +1248,7 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
* We need to make sure we don't get any errant requests while we're
* reallocating the ->socks array.
*/
- blk_mq_freeze_queue(nbd->disk->queue);
+ memflags = blk_mq_freeze_queue(nbd->disk->queue);
if (!netlink && !nbd->task_setup &&
!test_bit(NBD_RT_BOUND, &config->runtime_flags))
@@ -1288,12 +1289,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
INIT_WORK(&nsock->work, nbd_pending_cmd_work);
socks[config->num_connections++] = nsock;
atomic_inc(&config->live_connections);
- blk_mq_unfreeze_queue(nbd->disk->queue);
+ blk_mq_unfreeze_queue(nbd->disk->queue, memflags);
return 0;
put_socket:
- blk_mq_unfreeze_queue(nbd->disk->queue);
+ blk_mq_unfreeze_queue(nbd->disk->queue, memflags);
sockfd_put(sock);
return err;
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 5b393e4a1ddf..faafd7ff43d6 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -7281,9 +7281,10 @@ static ssize_t do_rbd_remove(const char *buf, size_t count)
* Prevent new IO from being queued and wait for existing
* IO to complete/fail.
*/
- blk_mq_freeze_queue(rbd_dev->disk->queue);
+ unsigned int memflags = blk_mq_freeze_queue(rbd_dev->disk->queue);
+
blk_mark_disk_dead(rbd_dev->disk);
- blk_mq_unfreeze_queue(rbd_dev->disk->queue);
+ blk_mq_unfreeze_queue(rbd_dev->disk->queue, memflags);
}
del_gendisk(rbd_dev->disk);
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 88dcae6ec575..282f81616a78 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -918,12 +918,12 @@ struct vdc_check_port_data {
char *type;
};
-static int vdc_device_probed(struct device *dev, void *arg)
+static int vdc_device_probed(struct device *dev, const void *arg)
{
struct vio_dev *vdev = to_vio_dev(dev);
- struct vdc_check_port_data *port_data;
+ const struct vdc_check_port_data *port_data;
- port_data = (struct vdc_check_port_data *)arg;
+ port_data = (const struct vdc_check_port_data *)arg;
if ((vdev->dev_no == port_data->dev_no) &&
(!(strcmp((char *)&vdev->type, port_data->type))) &&
@@ -1113,6 +1113,7 @@ static void vdc_requeue_inflight(struct vdc_port *port)
static void vdc_queue_drain(struct vdc_port *port)
{
struct request_queue *q = port->disk->queue;
+ unsigned int memflags;
/*
* Mark the queue as draining, then freeze/quiesce to ensure
@@ -1121,13 +1122,13 @@ static void vdc_queue_drain(struct vdc_port *port)
port->drain = 1;
spin_unlock_irq(&port->vio.lock);
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
spin_lock_irq(&port->vio.lock);
port->drain = 0;
blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
}
static void vdc_ldc_reset_timer_work(struct work_struct *work)
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 9914153b365b..3aedcb5add61 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -840,6 +840,7 @@ static int grab_drive(struct floppy_state *fs, enum swim_state state,
static void release_drive(struct floppy_state *fs)
{
struct request_queue *q = disks[fs->index]->queue;
+ unsigned int memflags;
unsigned long flags;
swim3_dbg("%s", "-> release drive\n");
@@ -848,10 +849,10 @@ static void release_drive(struct floppy_state *fs)
fs->state = idle;
spin_unlock_irqrestore(&swim3_lock, flags);
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
}
static int fd_eject(struct floppy_state *fs)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index bbaa26b523b8..6a61ec35f426 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -1579,16 +1579,16 @@ static void virtblk_remove(struct virtio_device *vdev)
put_disk(vblk->disk);
}
-#ifdef CONFIG_PM_SLEEP
-static int virtblk_freeze(struct virtio_device *vdev)
+static int virtblk_freeze_priv(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
struct request_queue *q = vblk->disk->queue;
+ unsigned int memflags;
/* Ensure no requests in virtqueues before deleting vqs. */
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue_nowait(q);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
/* Ensure we don't receive any more interrupts */
virtio_reset_device(vdev);
@@ -1602,7 +1602,7 @@ static int virtblk_freeze(struct virtio_device *vdev)
return 0;
}
-static int virtblk_restore(struct virtio_device *vdev)
+static int virtblk_restore_priv(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
int ret;
@@ -1616,8 +1616,29 @@ static int virtblk_restore(struct virtio_device *vdev)
return 0;
}
+
+#ifdef CONFIG_PM_SLEEP
+static int virtblk_freeze(struct virtio_device *vdev)
+{
+ return virtblk_freeze_priv(vdev);
+}
+
+static int virtblk_restore(struct virtio_device *vdev)
+{
+ return virtblk_restore_priv(vdev);
+}
#endif
+static int virtblk_reset_prepare(struct virtio_device *vdev)
+{
+ return virtblk_freeze_priv(vdev);
+}
+
+static int virtblk_reset_done(struct virtio_device *vdev)
+{
+ return virtblk_restore_priv(vdev);
+}
+
static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
{ 0 },
@@ -1653,6 +1674,8 @@ static struct virtio_driver virtio_blk = {
.freeze = virtblk_freeze,
.restore = virtblk_restore,
#endif
+ .reset_prepare = virtblk_reset_prepare,
+ .reset_done = virtblk_reset_done,
};
static int __init virtio_blk_init(void)
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 838064593f62..a7c2b04ab943 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -544,7 +544,7 @@ static void print_stats(struct xen_blkif_ring *ring)
ring->st_rd_req, ring->st_wr_req,
ring->st_f_req, ring->st_ds_req,
ring->persistent_gnt_c, max_pgrants);
- ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
+ ring->st_print = jiffies + secs_to_jiffies(10);
ring->st_rd_req = 0;
ring->st_wr_req = 0;
ring->st_oo_req = 0;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 7903a4da40ac..9f5020b077c5 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -55,8 +55,8 @@ static size_t huge_class_size;
static const struct block_device_operations zram_devops;
static void zram_free_page(struct zram *zram, size_t index);
-static int zram_read_page(struct zram *zram, struct page *page, u32 index,
- struct bio *parent);
+static int zram_read_from_zspool(struct zram *zram, struct page *page,
+ u32 index);
static int zram_slot_trylock(struct zram *zram, u32 index)
{
@@ -112,17 +112,6 @@ static void zram_clear_flag(struct zram *zram, u32 index,
zram->table[index].flags &= ~BIT(flag);
}
-static inline void zram_set_element(struct zram *zram, u32 index,
- unsigned long element)
-{
- zram->table[index].element = element;
-}
-
-static unsigned long zram_get_element(struct zram *zram, u32 index)
-{
- return zram->table[index].element;
-}
-
static size_t zram_get_obj_size(struct zram *zram, u32 index)
{
return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
@@ -143,6 +132,27 @@ static inline bool zram_allocated(struct zram *zram, u32 index)
zram_test_flag(zram, index, ZRAM_WB);
}
+static inline void update_used_max(struct zram *zram, const unsigned long pages)
+{
+ unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages);
+
+ do {
+ if (cur_max >= pages)
+ return;
+ } while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages,
+ &cur_max, pages));
+}
+
+static bool zram_can_store_page(struct zram *zram)
+{
+ unsigned long alloced_pages;
+
+ alloced_pages = zs_get_total_pages(zram->mem_pool);
+ update_used_max(zram, alloced_pages);
+
+ return !zram->limit_pages || alloced_pages <= zram->limit_pages;
+}
+
#if PAGE_SIZE != 4096
static inline bool is_partial_io(struct bio_vec *bvec)
{
@@ -277,18 +287,6 @@ static struct zram_pp_slot *select_pp_slot(struct zram_pp_ctl *ctl)
}
#endif
-static inline void update_used_max(struct zram *zram,
- const unsigned long pages)
-{
- unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages);
-
- do {
- if (cur_max >= pages)
- return;
- } while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages,
- &cur_max, pages));
-}
-
static inline void zram_fill_page(void *ptr, unsigned long len,
unsigned long value)
{
@@ -833,13 +831,10 @@ static ssize_t writeback_store(struct device *dev,
*/
if (!zram_test_flag(zram, index, ZRAM_PP_SLOT))
goto next;
+ if (zram_read_from_zspool(zram, page, index))
+ goto next;
zram_slot_unlock(zram, index);
- if (zram_read_page(zram, page, index, NULL)) {
- release_pp_slot(zram, pps);
- continue;
- }
-
bio_init(&bio, zram->bdev, &bio_vec, 1,
REQ_OP_WRITE | REQ_SYNC);
bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
@@ -879,7 +874,7 @@ static ssize_t writeback_store(struct device *dev,
zram_free_page(zram, index);
zram_set_flag(zram, index, ZRAM_WB);
- zram_set_element(zram, index, blk_idx);
+ zram_set_handle(zram, index, blk_idx);
blk_idx = 0;
atomic64_inc(&zram->stats.pages_stored);
spin_lock(&zram->wb_limit_lock);
@@ -889,6 +884,8 @@ static ssize_t writeback_store(struct device *dev,
next:
zram_slot_unlock(zram, index);
release_pp_slot(zram, pps);
+
+ cond_resched();
}
if (blk_idx)
@@ -1505,7 +1502,7 @@ static void zram_free_page(struct zram *zram, size_t index)
if (zram_test_flag(zram, index, ZRAM_WB)) {
zram_clear_flag(zram, index, ZRAM_WB);
- free_block_bdev(zram, zram_get_element(zram, index));
+ free_block_bdev(zram, zram_get_handle(zram, index));
goto out;
}
@@ -1533,56 +1530,73 @@ out:
zram_set_obj_size(zram, index, 0);
}
-/*
- * Reads (decompresses if needed) a page from zspool (zsmalloc).
- * Corresponding ZRAM slot should be locked.
- */
-static int zram_read_from_zspool(struct zram *zram, struct page *page,
+static int read_same_filled_page(struct zram *zram, struct page *page,
u32 index)
{
+ void *mem;
+
+ mem = kmap_local_page(page);
+ zram_fill_page(mem, PAGE_SIZE, zram_get_handle(zram, index));
+ kunmap_local(mem);
+ return 0;
+}
+
+static int read_incompressible_page(struct zram *zram, struct page *page,
+ u32 index)
+{
+ unsigned long handle;
+ void *src, *dst;
+
+ handle = zram_get_handle(zram, index);
+ src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
+ dst = kmap_local_page(page);
+ copy_page(dst, src);
+ kunmap_local(dst);
+ zs_unmap_object(zram->mem_pool, handle);
+
+ return 0;
+}
+
+static int read_compressed_page(struct zram *zram, struct page *page, u32 index)
+{
struct zcomp_strm *zstrm;
unsigned long handle;
unsigned int size;
void *src, *dst;
- u32 prio;
- int ret;
+ int ret, prio;
handle = zram_get_handle(zram, index);
- if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
- unsigned long value;
- void *mem;
-
- value = handle ? zram_get_element(zram, index) : 0;
- mem = kmap_local_page(page);
- zram_fill_page(mem, PAGE_SIZE, value);
- kunmap_local(mem);
- return 0;
- }
-
size = zram_get_obj_size(zram, index);
+ prio = zram_get_priority(zram, index);
- if (size != PAGE_SIZE) {
- prio = zram_get_priority(zram, index);
- zstrm = zcomp_stream_get(zram->comps[prio]);
- }
-
+ zstrm = zcomp_stream_get(zram->comps[prio]);
src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
- if (size == PAGE_SIZE) {
- dst = kmap_local_page(page);
- copy_page(dst, src);
- kunmap_local(dst);
- ret = 0;
- } else {
- dst = kmap_local_page(page);
- ret = zcomp_decompress(zram->comps[prio], zstrm,
- src, size, dst);
- kunmap_local(dst);
- zcomp_stream_put(zram->comps[prio]);
- }
+ dst = kmap_local_page(page);
+ ret = zcomp_decompress(zram->comps[prio], zstrm, src, size, dst);
+ kunmap_local(dst);
zs_unmap_object(zram->mem_pool, handle);
+ zcomp_stream_put(zram->comps[prio]);
+
return ret;
}
+/*
+ * Reads (decompresses if needed) a page from zspool (zsmalloc).
+ * Corresponding ZRAM slot should be locked.
+ */
+static int zram_read_from_zspool(struct zram *zram, struct page *page,
+ u32 index)
+{
+ if (zram_test_flag(zram, index, ZRAM_SAME) ||
+ !zram_get_handle(zram, index))
+ return read_same_filled_page(zram, page, index);
+
+ if (!zram_test_flag(zram, index, ZRAM_HUGE))
+ return read_compressed_page(zram, page, index);
+ else
+ return read_incompressible_page(zram, page, index);
+}
+
static int zram_read_page(struct zram *zram, struct page *page, u32 index,
struct bio *parent)
{
@@ -1600,7 +1614,7 @@ static int zram_read_page(struct zram *zram, struct page *page, u32 index,
*/
zram_slot_unlock(zram, index);
- ret = read_from_bdev(zram, page, zram_get_element(zram, index),
+ ret = read_from_bdev(zram, page, zram_get_handle(zram, index),
parent);
}
@@ -1638,33 +1652,88 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
return zram_read_page(zram, bvec->bv_page, index, bio);
}
+static int write_same_filled_page(struct zram *zram, unsigned long fill,
+ u32 index)
+{
+ zram_slot_lock(zram, index);
+ zram_set_flag(zram, index, ZRAM_SAME);
+ zram_set_handle(zram, index, fill);
+ zram_slot_unlock(zram, index);
+
+ atomic64_inc(&zram->stats.same_pages);
+ atomic64_inc(&zram->stats.pages_stored);
+
+ return 0;
+}
+
+static int write_incompressible_page(struct zram *zram, struct page *page,
+ u32 index)
+{
+ unsigned long handle;
+ void *src, *dst;
+
+ /*
+ * This function is called from preemptible context so we don't need
+ * to do optimistic and fallback to pessimistic handle allocation,
+ * like we do for compressible pages.
+ */
+ handle = zs_malloc(zram->mem_pool, PAGE_SIZE,
+ GFP_NOIO | __GFP_HIGHMEM | __GFP_MOVABLE);
+ if (IS_ERR_VALUE(handle))
+ return PTR_ERR((void *)handle);
+
+ if (!zram_can_store_page(zram)) {
+ zs_free(zram->mem_pool, handle);
+ return -ENOMEM;
+ }
+
+ dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
+ src = kmap_local_page(page);
+ memcpy(dst, src, PAGE_SIZE);
+ kunmap_local(src);
+ zs_unmap_object(zram->mem_pool, handle);
+
+ zram_slot_lock(zram, index);
+ zram_set_flag(zram, index, ZRAM_HUGE);
+ zram_set_handle(zram, index, handle);
+ zram_set_obj_size(zram, index, PAGE_SIZE);
+ zram_slot_unlock(zram, index);
+
+ atomic64_add(PAGE_SIZE, &zram->stats.compr_data_size);
+ atomic64_inc(&zram->stats.huge_pages);
+ atomic64_inc(&zram->stats.huge_pages_since);
+ atomic64_inc(&zram->stats.pages_stored);
+
+ return 0;
+}
+
static int zram_write_page(struct zram *zram, struct page *page, u32 index)
{
int ret = 0;
- unsigned long alloced_pages;
unsigned long handle = -ENOMEM;
unsigned int comp_len = 0;
- void *src, *dst, *mem;
+ void *dst, *mem;
struct zcomp_strm *zstrm;
unsigned long element = 0;
- enum zram_pageflags flags = 0;
+ bool same_filled;
+
+ /* First, free memory allocated to this slot (if any) */
+ zram_slot_lock(zram, index);
+ zram_free_page(zram, index);
+ zram_slot_unlock(zram, index);
mem = kmap_local_page(page);
- if (page_same_filled(mem, &element)) {
- kunmap_local(mem);
- /* Free memory associated with this sector now. */
- flags = ZRAM_SAME;
- atomic64_inc(&zram->stats.same_pages);
- goto out;
- }
+ same_filled = page_same_filled(mem, &element);
kunmap_local(mem);
+ if (same_filled)
+ return write_same_filled_page(zram, element, index);
compress_again:
zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
- src = kmap_local_page(page);
+ mem = kmap_local_page(page);
ret = zcomp_compress(zram->comps[ZRAM_PRIMARY_COMP], zstrm,
- src, &comp_len);
- kunmap_local(src);
+ mem, &comp_len);
+ kunmap_local(mem);
if (unlikely(ret)) {
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
@@ -1673,8 +1742,11 @@ compress_again:
return ret;
}
- if (comp_len >= huge_class_size)
- comp_len = PAGE_SIZE;
+ if (comp_len >= huge_class_size) {
+ zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
+ return write_incompressible_page(zram, page, index);
+ }
+
/*
* handle allocation has 2 paths:
* a) fast path is executed with preemption disabled (for
@@ -1690,35 +1762,23 @@ compress_again:
*/
if (IS_ERR_VALUE(handle))
handle = zs_malloc(zram->mem_pool, comp_len,
- __GFP_KSWAPD_RECLAIM |
- __GFP_NOWARN |
- __GFP_HIGHMEM |
- __GFP_MOVABLE);
+ __GFP_KSWAPD_RECLAIM |
+ __GFP_NOWARN |
+ __GFP_HIGHMEM |
+ __GFP_MOVABLE);
if (IS_ERR_VALUE(handle)) {
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
atomic64_inc(&zram->stats.writestall);
handle = zs_malloc(zram->mem_pool, comp_len,
- GFP_NOIO | __GFP_HIGHMEM |
- __GFP_MOVABLE);
+ GFP_NOIO | __GFP_HIGHMEM |
+ __GFP_MOVABLE);
if (IS_ERR_VALUE(handle))
return PTR_ERR((void *)handle);
- if (comp_len != PAGE_SIZE)
- goto compress_again;
- /*
- * If the page is not compressible, you need to acquire the
- * lock and execute the code below. The zcomp_stream_get()
- * call is needed to disable the cpu hotplug and grab the
- * zstrm buffer back. It is necessary that the dereferencing
- * of the zstrm variable below occurs correctly.
- */
- zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
+ goto compress_again;
}
- alloced_pages = zs_get_total_pages(zram->mem_pool);
- update_used_max(zram, alloced_pages);
-
- if (zram->limit_pages && alloced_pages > zram->limit_pages) {
+ if (!zram_can_store_page(zram)) {
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
zs_free(zram->mem_pool, handle);
return -ENOMEM;
@@ -1726,41 +1786,19 @@ compress_again:
dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
- src = zstrm->buffer;
- if (comp_len == PAGE_SIZE)
- src = kmap_local_page(page);
- memcpy(dst, src, comp_len);
- if (comp_len == PAGE_SIZE)
- kunmap_local(src);
-
+ memcpy(dst, zstrm->buffer, comp_len);
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
zs_unmap_object(zram->mem_pool, handle);
- atomic64_add(comp_len, &zram->stats.compr_data_size);
-out:
- /*
- * Free memory associated with this sector
- * before overwriting unused sectors.
- */
- zram_slot_lock(zram, index);
- zram_free_page(zram, index);
-
- if (comp_len == PAGE_SIZE) {
- zram_set_flag(zram, index, ZRAM_HUGE);
- atomic64_inc(&zram->stats.huge_pages);
- atomic64_inc(&zram->stats.huge_pages_since);
- }
- if (flags) {
- zram_set_flag(zram, index, flags);
- zram_set_element(zram, index, element);
- } else {
- zram_set_handle(zram, index, handle);
- zram_set_obj_size(zram, index, comp_len);
- }
+ zram_slot_lock(zram, index);
+ zram_set_handle(zram, index, handle);
+ zram_set_obj_size(zram, index, comp_len);
zram_slot_unlock(zram, index);
/* Update stats */
atomic64_inc(&zram->stats.pages_stored);
+ atomic64_add(comp_len, &zram->stats.compr_data_size);
+
return ret;
}
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 134be414e210..db78d7c01b9a 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -62,10 +62,7 @@ enum zram_pageflags {
/* Allocated for each disk page */
struct zram_table_entry {
- union {
- unsigned long handle;
- unsigned long element;
- };
+ unsigned long handle;
unsigned int flags;
spinlock_t lock;
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 1230045d78a5..aa5ec1d444a9 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -1381,13 +1381,12 @@ static void btnxpuart_tx_work(struct work_struct *work)
while ((skb = nxp_dequeue(nxpdev))) {
len = serdev_device_write_buf(serdev, skb->data, skb->len);
- serdev_device_wait_until_sent(serdev, 0);
hdev->stat.byte_tx += len;
skb_pull(skb, len);
if (skb->len > 0) {
skb_queue_head(&nxpdev->txq, skb);
- break;
+ continue;
}
switch (hci_skb_pkt_type(skb)) {
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 9aa018d4f6f5..90966dfbd278 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -899,11 +899,6 @@ static void btusb_reset(struct hci_dev *hdev)
struct btusb_data *data;
int err;
- if (hdev->reset) {
- hdev->reset(hdev);
- return;
- }
-
data = hci_get_drvdata(hdev);
/* This is not an unbalanced PM reference since the device will reset */
err = usb_autopm_get_interface(data->intf);
@@ -2639,8 +2634,15 @@ static void btusb_mtk_claim_iso_intf(struct btusb_data *data)
struct btmtk_data *btmtk_data = hci_get_priv(data->hdev);
int err;
+ /*
+ * The function usb_driver_claim_interface() is documented to need
+ * locks held if it's not called from a probe routine. The code here
+ * is called from the hci_power_on workqueue, so grab the lock.
+ */
+ device_lock(&btmtk_data->isopkt_intf->dev);
err = usb_driver_claim_interface(&btusb_driver,
btmtk_data->isopkt_intf, data);
+ device_unlock(&btmtk_data->isopkt_intf->dev);
if (err < 0) {
btmtk_data->isopkt_intf = NULL;
bt_dev_err(data->hdev, "Failed to claim iso interface");
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index 4b68c84ef485..52053f7c6d9a 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -22,8 +22,8 @@ struct fsl_mc_child_objs {
struct fsl_mc_obj_desc *child_array;
};
-static bool fsl_mc_device_match(struct fsl_mc_device *mc_dev,
- struct fsl_mc_obj_desc *obj_desc)
+static bool fsl_mc_device_match(const struct fsl_mc_device *mc_dev,
+ const struct fsl_mc_obj_desc *obj_desc)
{
return mc_dev->obj_desc.id == obj_desc->id &&
strcmp(mc_dev->obj_desc.type, obj_desc->type) == 0;
@@ -112,9 +112,9 @@ void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
}
EXPORT_SYMBOL_GPL(dprc_remove_devices);
-static int __fsl_mc_device_match(struct device *dev, void *data)
+static int __fsl_mc_device_match(struct device *dev, const void *data)
{
- struct fsl_mc_obj_desc *obj_desc = data;
+ const struct fsl_mc_obj_desc *obj_desc = data;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
return fsl_mc_device_match(mc_dev, obj_desc);
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 2916d1333649..d1f3d327ddd1 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -320,90 +320,90 @@ const struct bus_type fsl_mc_bus_type = {
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
-struct device_type fsl_mc_bus_dprc_type = {
+const struct device_type fsl_mc_bus_dprc_type = {
.name = "fsl_mc_bus_dprc"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dprc_type);
-struct device_type fsl_mc_bus_dpni_type = {
+const struct device_type fsl_mc_bus_dpni_type = {
.name = "fsl_mc_bus_dpni"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpni_type);
-struct device_type fsl_mc_bus_dpio_type = {
+const struct device_type fsl_mc_bus_dpio_type = {
.name = "fsl_mc_bus_dpio"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpio_type);
-struct device_type fsl_mc_bus_dpsw_type = {
+const struct device_type fsl_mc_bus_dpsw_type = {
.name = "fsl_mc_bus_dpsw"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpsw_type);
-struct device_type fsl_mc_bus_dpbp_type = {
+const struct device_type fsl_mc_bus_dpbp_type = {
.name = "fsl_mc_bus_dpbp"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpbp_type);
-struct device_type fsl_mc_bus_dpcon_type = {
+const struct device_type fsl_mc_bus_dpcon_type = {
.name = "fsl_mc_bus_dpcon"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpcon_type);
-struct device_type fsl_mc_bus_dpmcp_type = {
+const struct device_type fsl_mc_bus_dpmcp_type = {
.name = "fsl_mc_bus_dpmcp"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmcp_type);
-struct device_type fsl_mc_bus_dpmac_type = {
+const struct device_type fsl_mc_bus_dpmac_type = {
.name = "fsl_mc_bus_dpmac"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmac_type);
-struct device_type fsl_mc_bus_dprtc_type = {
+const struct device_type fsl_mc_bus_dprtc_type = {
.name = "fsl_mc_bus_dprtc"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dprtc_type);
-struct device_type fsl_mc_bus_dpseci_type = {
+const struct device_type fsl_mc_bus_dpseci_type = {
.name = "fsl_mc_bus_dpseci"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpseci_type);
-struct device_type fsl_mc_bus_dpdmux_type = {
+const struct device_type fsl_mc_bus_dpdmux_type = {
.name = "fsl_mc_bus_dpdmux"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmux_type);
-struct device_type fsl_mc_bus_dpdcei_type = {
+const struct device_type fsl_mc_bus_dpdcei_type = {
.name = "fsl_mc_bus_dpdcei"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdcei_type);
-struct device_type fsl_mc_bus_dpaiop_type = {
+const struct device_type fsl_mc_bus_dpaiop_type = {
.name = "fsl_mc_bus_dpaiop"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpaiop_type);
-struct device_type fsl_mc_bus_dpci_type = {
+const struct device_type fsl_mc_bus_dpci_type = {
.name = "fsl_mc_bus_dpci"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpci_type);
-struct device_type fsl_mc_bus_dpdmai_type = {
+const struct device_type fsl_mc_bus_dpdmai_type = {
.name = "fsl_mc_bus_dpdmai"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmai_type);
-struct device_type fsl_mc_bus_dpdbg_type = {
+const struct device_type fsl_mc_bus_dpdbg_type = {
.name = "fsl_mc_bus_dpdbg"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdbg_type);
-static struct device_type *fsl_mc_get_device_type(const char *type)
+static const struct device_type *fsl_mc_get_device_type(const char *type)
{
static const struct {
- struct device_type *dev_type;
+ const struct device_type *dev_type;
const char *type;
} dev_types[] = {
{ &fsl_mc_bus_dprc_type, "dprc" },
diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c
index e8c92972f9df..9dcc7184817d 100644
--- a/drivers/bus/mhi/host/boot.c
+++ b/drivers/bus/mhi/host/boot.c
@@ -357,6 +357,7 @@ error_alloc_segment:
for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
mhi_buf->buf, mhi_buf->dma_addr);
+ kfree(img_info->mhi_buf);
error_alloc_mhi_buf:
kfree(img_info);
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 56ba4192c89c..c41119b9079f 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -245,6 +245,58 @@ struct mhi_pci_dev_info {
.channel = ch_num, \
}
+static const struct mhi_channel_config mhi_qcom_qdu100_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 2),
+ MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 2),
+ MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 128, 1),
+ MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 128, 1),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 3),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 3),
+ MHI_CHANNEL_CONFIG_UL(9, "QDSS", 64, 3),
+ MHI_CHANNEL_CONFIG_UL(14, "NMEA", 32, 4),
+ MHI_CHANNEL_CONFIG_DL(15, "NMEA", 32, 4),
+ MHI_CHANNEL_CONFIG_UL(16, "CSM_CTRL", 32, 4),
+ MHI_CHANNEL_CONFIG_DL(17, "CSM_CTRL", 32, 4),
+ MHI_CHANNEL_CONFIG_UL(40, "MHI_PHC", 32, 4),
+ MHI_CHANNEL_CONFIG_DL(41, "MHI_PHC", 32, 4),
+ MHI_CHANNEL_CONFIG_UL(46, "IP_SW0", 256, 5),
+ MHI_CHANNEL_CONFIG_DL(47, "IP_SW0", 256, 5),
+};
+
+static struct mhi_event_config mhi_qcom_qdu100_events[] = {
+ /* first ring is control+data ring */
+ MHI_EVENT_CONFIG_CTRL(0, 64),
+ /* SAHARA dedicated event ring */
+ MHI_EVENT_CONFIG_SW_DATA(1, 256),
+ /* Software channels dedicated event ring */
+ MHI_EVENT_CONFIG_SW_DATA(2, 64),
+ MHI_EVENT_CONFIG_SW_DATA(3, 256),
+ MHI_EVENT_CONFIG_SW_DATA(4, 256),
+ /* Software IP channels dedicated event ring */
+ MHI_EVENT_CONFIG_SW_DATA(5, 512),
+ MHI_EVENT_CONFIG_SW_DATA(6, 512),
+ MHI_EVENT_CONFIG_SW_DATA(7, 512),
+};
+
+static const struct mhi_controller_config mhi_qcom_qdu100_config = {
+ .max_channels = 128,
+ .timeout_ms = 120000,
+ .num_channels = ARRAY_SIZE(mhi_qcom_qdu100_channels),
+ .ch_cfg = mhi_qcom_qdu100_channels,
+ .num_events = ARRAY_SIZE(mhi_qcom_qdu100_events),
+ .event_cfg = mhi_qcom_qdu100_events,
+};
+
+static const struct mhi_pci_dev_info mhi_qcom_qdu100_info = {
+ .name = "qcom-qdu100",
+ .fw = "qcom/qdu100/xbl_s.melf",
+ .edl_trigger = true,
+ .config = &mhi_qcom_qdu100_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+};
+
static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1),
MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1),
@@ -742,6 +794,9 @@ static const struct pci_device_id mhi_pci_id_table[] = {
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0309),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx75_info },
+ /* QDU100, x100-DU */
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0601),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_qdu100_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1002), /* EM160R-GL (sdx24) */
@@ -949,7 +1004,7 @@ static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
*/
mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events;
- nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI);
+ nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSIX | PCI_IRQ_MSI);
if (nr_vectors < 0) {
dev_err(&pdev->dev, "Error allocating MSI vectors %d\n",
nr_vectors);
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
index 6276551d7968..1e57ebfb7622 100644
--- a/drivers/bus/moxtet.c
+++ b/drivers/bus/moxtet.c
@@ -657,7 +657,7 @@ static void moxtet_irq_print_chip(struct irq_data *d, struct seq_file *p)
id = moxtet->modules[pos->idx];
- seq_printf(p, " moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
+ seq_printf(p, "moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
pos->bit);
}
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 51745ed1bbab..b163e043c687 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -3612,7 +3612,7 @@ static int cdrom_sysctl_handler(const struct ctl_table *ctl, int write,
}
/* Place files in /proc/sys/dev/cdrom */
-static struct ctl_table cdrom_table[] = {
+static const struct ctl_table cdrom_table[] = {
{
.procname = "info",
.data = &cdrom_sysctl_settings.info,
diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c
index 76eac3653b1c..c573ed2ee71a 100644
--- a/drivers/cdx/cdx.c
+++ b/drivers/cdx/cdx.c
@@ -338,7 +338,10 @@ static void cdx_shutdown(struct device *dev)
{
struct cdx_driver *cdx_drv = to_cdx_driver(dev->driver);
struct cdx_device *cdx_dev = to_cdx_device(dev);
+ struct cdx_controller *cdx = cdx_dev->cdx;
+ if (cdx_dev->is_bus && cdx_dev->enabled && cdx->ops->bus_disable)
+ cdx->ops->bus_disable(cdx, cdx_dev->bus_num);
if (cdx_drv && cdx_drv->shutdown)
cdx_drv->shutdown(cdx_dev);
}
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 48fe96ab4649..e110857824fc 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -724,7 +724,7 @@ static int hpet_is_known(struct hpet_data *hdp)
return 0;
}
-static struct ctl_table hpet_table[] = {
+static const struct ctl_table hpet_table[] = {
{
.procname = "max-user-freq",
.data = &hpet_max_freq,
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index 05f17e3e6207..e63c316d8aaa 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -650,7 +650,7 @@ static struct ipmi_smi_watcher smi_watcher = {
#ifdef CONFIG_PROC_FS
#include <linux/sysctl.h>
-static struct ctl_table ipmi_table[] = {
+static const struct ctl_table ipmi_table[] = {
{ .procname = "poweroff_powercycle",
.data = &poweroff_powercycle,
.maxlen = sizeof(poweroff_powercycle),
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 541edc26ec89..2cf595d2e10b 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -63,16 +63,30 @@ static DEFINE_MUTEX(misc_mtx);
#define DYNAMIC_MINORS 128 /* like dynamic majors */
static DEFINE_IDA(misc_minors_ida);
-static int misc_minor_alloc(void)
+static int misc_minor_alloc(int minor)
{
- int ret;
-
- ret = ida_alloc_max(&misc_minors_ida, DYNAMIC_MINORS - 1, GFP_KERNEL);
- if (ret >= 0) {
- ret = DYNAMIC_MINORS - ret - 1;
+ int ret = 0;
+
+ if (minor == MISC_DYNAMIC_MINOR) {
+ /* allocate free id */
+ ret = ida_alloc_max(&misc_minors_ida, DYNAMIC_MINORS - 1, GFP_KERNEL);
+ if (ret >= 0) {
+ ret = DYNAMIC_MINORS - ret - 1;
+ } else {
+ ret = ida_alloc_range(&misc_minors_ida, MISC_DYNAMIC_MINOR + 1,
+ MINORMASK, GFP_KERNEL);
+ }
} else {
- ret = ida_alloc_range(&misc_minors_ida, MISC_DYNAMIC_MINOR + 1,
- MINORMASK, GFP_KERNEL);
+ /* specific minor, check if it is in dynamic or misc dynamic range */
+ if (minor < DYNAMIC_MINORS) {
+ minor = DYNAMIC_MINORS - minor - 1;
+ ret = ida_alloc_range(&misc_minors_ida, minor, minor, GFP_KERNEL);
+ } else if (minor > MISC_DYNAMIC_MINOR) {
+ ret = ida_alloc_range(&misc_minors_ida, minor, minor, GFP_KERNEL);
+ } else {
+ /* case of non-dynamic minors, no need to allocate id */
+ ret = 0;
+ }
}
return ret;
}
@@ -219,7 +233,7 @@ int misc_register(struct miscdevice *misc)
mutex_lock(&misc_mtx);
if (is_dynamic) {
- int i = misc_minor_alloc();
+ int i = misc_minor_alloc(misc->minor);
if (i < 0) {
err = -EBUSY;
@@ -228,6 +242,7 @@ int misc_register(struct miscdevice *misc)
misc->minor = i;
} else {
struct miscdevice *c;
+ int i;
list_for_each_entry(c, &misc_list, list) {
if (c->minor == misc->minor) {
@@ -235,6 +250,12 @@ int misc_register(struct miscdevice *misc)
goto out;
}
}
+
+ i = misc_minor_alloc(misc->minor);
+ if (i < 0) {
+ err = -EBUSY;
+ goto out;
+ }
}
dev = MKDEV(MISC_MAJOR, misc->minor);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 23ee76bbb4aa..2581186fa61b 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1665,7 +1665,7 @@ static int proc_do_rointvec(const struct ctl_table *table, int write, void *buf,
return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
}
-static struct ctl_table random_table[] = {
+static const struct ctl_table random_table[] = {
{
.procname = "poolsize",
.data = &sysctl_poolsize,
diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
index 69533d0bfb51..cf02ec646f46 100644
--- a/drivers/char/tpm/eventlog/acpi.c
+++ b/drivers/char/tpm/eventlog/acpi.c
@@ -63,6 +63,11 @@ static bool tpm_is_tpm2_log(void *bios_event_log, u64 len)
return n == 0;
}
+static void tpm_bios_log_free(void *data)
+{
+ kvfree(data);
+}
+
/* read binary bios log */
int tpm_read_log_acpi(struct tpm_chip *chip)
{
@@ -136,7 +141,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
}
/* malloc EventLog space */
- log->bios_event_log = devm_kmalloc(&chip->dev, len, GFP_KERNEL);
+ log->bios_event_log = kvmalloc(len, GFP_KERNEL);
if (!log->bios_event_log)
return -ENOMEM;
@@ -161,10 +166,16 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
goto err;
}
+ ret = devm_add_action(&chip->dev, tpm_bios_log_free, log->bios_event_log);
+ if (ret) {
+ log->bios_event_log = NULL;
+ goto err;
+ }
+
return format;
err:
- devm_kfree(&chip->dev, log->bios_event_log);
+ tpm_bios_log_free(log->bios_event_log);
log->bios_event_log = NULL;
return ret;
}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index c62b208b42f1..24442485e73e 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -883,9 +883,9 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
if (len + offset > PAGE_SIZE)
len = PAGE_SIZE - offset;
- src = kmap_atomic(buf->page);
+ src = kmap_local_page(buf->page);
memcpy(page_address(page) + offset, src + buf->offset, len);
- kunmap_atomic(src);
+ kunmap_local(src);
sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
}
diff --git a/drivers/clk/clk-en7523.c b/drivers/clk/clk-en7523.c
index 6a763bc9ac1a..15bbdeb60b8e 100644
--- a/drivers/clk/clk-en7523.c
+++ b/drivers/clk/clk-en7523.c
@@ -489,7 +489,6 @@ static int en7581_pci_enable(struct clk_hw *hw)
REG_PCI_CONTROL_PERSTOUT;
val = readl(np_base + REG_PCI_CONTROL);
writel(val | mask, np_base + REG_PCI_CONTROL);
- msleep(250);
return 0;
}
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index f2117fef7c7d..9c75dcc9a534 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -449,10 +449,7 @@ void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem)
{
struct clk_iomap *io;
- io = memblock_alloc(sizeof(*io), SMP_CACHE_BYTES);
- if (!io)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(*io));
+ io = memblock_alloc_or_panic(sizeof(*io), SMP_CACHE_BYTES);
io->mem = mem;
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index b39dee7b93af..f00019b078a7 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -23,7 +23,7 @@
#include <linux/acpi.h>
#include <linux/hyperv.h>
#include <clocksource/hyperv_timer.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#include <asm/mshyperv.h>
static struct clock_event_device __percpu *hv_clock_event;
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 704e84d00639..9e46960f6a86 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -18,6 +18,7 @@ config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
config ARM_AIROHA_SOC_CPUFREQ
tristate "Airoha EN7581 SoC CPUFreq support"
depends on ARCH_AIROHA || COMPILE_TEST
+ depends on OF
select PM_OPP
default ARCH_AIROHA
help
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 302df42d6887..463b69a2dff5 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -909,11 +909,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
pr_warn(FW_WARN "P-state 0 is not max freq\n");
- if (acpi_cpufreq_driver.set_boost) {
- set_boost(policy, acpi_cpufreq_driver.boost_enabled);
- policy->boost_enabled = acpi_cpufreq_driver.boost_enabled;
- }
-
return result;
err_unreg:
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index dd9b8d6993d6..313550fa62d4 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -699,7 +699,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
if (min_perf < lowest_nonlinear_perf)
min_perf = lowest_nonlinear_perf;
- max_perf = cap_perf;
+ max_perf = cpudata->max_limit_perf;
if (max_perf < min_perf)
max_perf = min_perf;
@@ -747,7 +747,6 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
guard(mutex)(&amd_pstate_driver_lock);
ret = amd_pstate_cpu_boost_update(policy, state);
- policy->boost_enabled = !ret ? state : false;
refresh_frequency_limits(policy);
return ret;
@@ -822,25 +821,28 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
static void amd_pstate_update_limits(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy = NULL;
struct amd_cpudata *cpudata;
u32 prev_high = 0, cur_high = 0;
int ret;
bool highest_perf_changed = false;
+ if (!amd_pstate_prefcore)
+ return;
+
+ policy = cpufreq_cpu_get(cpu);
if (!policy)
return;
cpudata = policy->driver_data;
- if (!amd_pstate_prefcore)
- return;
-
guard(mutex)(&amd_pstate_driver_lock);
ret = amd_get_highest_perf(cpu, &cur_high);
- if (ret)
- goto free_cpufreq_put;
+ if (ret) {
+ cpufreq_cpu_put(policy);
+ return;
+ }
prev_high = READ_ONCE(cpudata->prefcore_ranking);
highest_perf_changed = (prev_high != cur_high);
@@ -850,8 +852,6 @@ static void amd_pstate_update_limits(unsigned int cpu)
if (cur_high < CPPC_MAX_PERF)
sched_set_itmt_core_prio((int)cur_high, cpu);
}
-
-free_cpufreq_put:
cpufreq_cpu_put(policy);
if (!highest_perf_changed)
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 2486a6c5256a..8f512448382f 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -611,7 +611,8 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
* Section 8.4.7.1.1.5 of ACPI 6.1 spec)
*/
policy->min = cppc_perf_to_khz(caps, caps->lowest_nonlinear_perf);
- policy->max = cppc_perf_to_khz(caps, caps->nominal_perf);
+ policy->max = cppc_perf_to_khz(caps, policy->boost_enabled ?
+ caps->highest_perf : caps->nominal_perf);
/*
* Set cpuinfo.min_freq to Lowest to make the full range of performance
@@ -619,7 +620,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
* nonlinear perf
*/
policy->cpuinfo.min_freq = cppc_perf_to_khz(caps, caps->lowest_perf);
- policy->cpuinfo.max_freq = cppc_perf_to_khz(caps, caps->nominal_perf);
+ policy->cpuinfo.max_freq = policy->max;
policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu);
policy->shared_type = cpu_data->shared_type;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1076e37a18ad..30ffbddc7ece 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1410,10 +1410,6 @@ static int cpufreq_online(unsigned int cpu)
goto out_free_policy;
}
- /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
- if (cpufreq_boost_enabled() && policy_has_boost_freq(policy))
- policy->boost_enabled = true;
-
/*
* The initialization has succeeded and the policy is online.
* If there is a problem with its frequency table, take it
@@ -1476,6 +1472,10 @@ static int cpufreq_online(unsigned int cpu)
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_CREATE_POLICY, policy);
+ } else {
+ ret = freq_qos_update_request(policy->max_freq_req, policy->max);
+ if (ret < 0)
+ goto out_destroy_policy;
}
if (cpufreq_driver->get && has_target()) {
@@ -1570,6 +1570,19 @@ static int cpufreq_online(unsigned int cpu)
if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
policy->cdev = of_cpufreq_cooling_register(policy);
+ /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
+ if (cpufreq_driver->set_boost &&
+ policy->boost_enabled != cpufreq_boost_enabled()) {
+ policy->boost_enabled = cpufreq_boost_enabled();
+ ret = cpufreq_driver->set_boost(policy, policy->boost_enabled);
+ if (ret) {
+ /* If the set_boost fails, the online operation is not affected */
+ pr_info("%s: CPU%d: Cannot %s BOOST\n", __func__, policy->cpu,
+ policy->boost_enabled ? "enable" : "disable");
+ policy->boost_enabled = !policy->boost_enabled;
+ }
+ }
+
pr_debug("initialization complete\n");
return 0;
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index c6bdfc308e99..9cef71528076 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -24,6 +24,7 @@ struct s3c64xx_dvfs {
unsigned int vddarm_max;
};
+#ifdef CONFIG_REGULATOR
static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
[0] = { 1000000, 1150000 },
[1] = { 1050000, 1150000 },
@@ -31,6 +32,7 @@ static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
[3] = { 1200000, 1350000 },
[4] = { 1300000, 1350000 },
};
+#endif
static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
{ 0, 0, 66000 },
@@ -51,15 +53,16 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int index)
{
- struct s3c64xx_dvfs *dvfs;
- unsigned int old_freq, new_freq;
+ unsigned int new_freq = s3c64xx_freq_table[index].frequency;
int ret;
+#ifdef CONFIG_REGULATOR
+ struct s3c64xx_dvfs *dvfs;
+ unsigned int old_freq;
+
old_freq = clk_get_rate(policy->clk) / 1000;
- new_freq = s3c64xx_freq_table[index].frequency;
dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data];
-#ifdef CONFIG_REGULATOR
if (vddarm && new_freq > old_freq) {
ret = regulator_set_voltage(vddarm,
dvfs->vddarm_min,
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
index 146f97068022..5fb5228f6bf1 100644
--- a/drivers/cpuidle/cpuidle-psci-domain.c
+++ b/drivers/cpuidle/cpuidle-psci-domain.c
@@ -72,6 +72,7 @@ static int psci_pd_init(struct device_node *np, bool use_osi)
*/
if (use_osi) {
pd->power_off = psci_pd_power_off;
+ pd->flags |= GENPD_FLAG_ACTIVE_WAKEUP;
if (IS_ENABLED(CONFIG_PREEMPT_RT))
pd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
} else {
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index 173ddcac540a..8fe5e1b47ef9 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -41,11 +41,7 @@
* idle state 2, the third bin spans from the target residency of idle state 2
* up to, but not including, the target residency of idle state 3 and so on.
* The last bin spans from the target residency of the deepest idle state
- * supplied by the driver to the scheduler tick period length or to infinity if
- * the tick period length is less than the target residency of that state. In
- * the latter case, the governor also counts events with the measured idle
- * duration between the tick period length and the target residency of the
- * deepest idle state.
+ * supplied by the driver to infinity.
*
* Two metrics called "hits" and "intercepts" are associated with each bin.
* They are updated every time before selecting an idle state for the given CPU
@@ -60,6 +56,10 @@
* into by the sleep length (these events are also referred to as "intercepts"
* below).
*
+ * The governor also counts "intercepts" with the measured idle duration below
+ * the tick period length and uses this information when deciding whether or not
+ * to stop the scheduler tick.
+ *
* In order to select an idle state for a CPU, the governor takes the following
* steps (modulo the possible latency constraint that must be taken into account
* too):
@@ -106,6 +106,12 @@
#include "gov.h"
/*
+ * Idle state exit latency threshold used for deciding whether or not to check
+ * the time till the closest expected timer event.
+ */
+#define LATENCY_THRESHOLD_NS (RESIDENCY_THRESHOLD_NS / 2)
+
+/*
* The PULSE value is added to metrics when they grow and the DECAY_SHIFT value
* is used for decreasing metrics on a regular basis.
*/
@@ -124,18 +130,20 @@ struct teo_bin {
/**
* struct teo_cpu - CPU data used by the TEO cpuidle governor.
- * @time_span_ns: Time between idle state selection and post-wakeup update.
* @sleep_length_ns: Time till the closest timer event (at the selection time).
* @state_bins: Idle state data bins for this CPU.
* @total: Grand total of the "intercepts" and "hits" metrics for all bins.
- * @tick_hits: Number of "hits" after TICK_NSEC.
+ * @tick_intercepts: "Intercepts" before TICK_NSEC.
+ * @short_idles: Wakeups after short idle periods.
+ * @artificial_wakeup: Set if the wakeup has been triggered by a safety net.
*/
struct teo_cpu {
- s64 time_span_ns;
s64 sleep_length_ns;
struct teo_bin state_bins[CPUIDLE_STATE_MAX];
unsigned int total;
- unsigned int tick_hits;
+ unsigned int tick_intercepts;
+ unsigned int short_idles;
+ bool artificial_wakeup;
};
static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
@@ -152,23 +160,17 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
s64 target_residency_ns;
u64 measured_ns;
- if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) {
+ cpu_data->short_idles -= cpu_data->short_idles >> DECAY_SHIFT;
+
+ if (cpu_data->artificial_wakeup) {
/*
- * One of the safety nets has triggered or the wakeup was close
- * enough to the closest timer event expected at the idle state
- * selection time to be discarded.
+ * If one of the safety nets has triggered, assume that this
+ * might have been a long sleep.
*/
measured_ns = U64_MAX;
} else {
u64 lat_ns = drv->states[dev->last_state_idx].exit_latency_ns;
- /*
- * The computations below are to determine whether or not the
- * (saved) time till the next timer event and the measured idle
- * duration fall into the same "bin", so use last_residency_ns
- * for that instead of time_span_ns which includes the cpuidle
- * overhead.
- */
measured_ns = dev->last_residency_ns;
/*
* The delay between the wakeup and the first instruction
@@ -176,14 +178,16 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* time, so take 1/2 of the exit latency as a very rough
* approximation of the average of it.
*/
- if (measured_ns >= lat_ns)
+ if (measured_ns >= lat_ns) {
measured_ns -= lat_ns / 2;
- else
+ if (measured_ns < RESIDENCY_THRESHOLD_NS)
+ cpu_data->short_idles += PULSE;
+ } else {
measured_ns /= 2;
+ cpu_data->short_idles += PULSE;
+ }
}
- cpu_data->total = 0;
-
/*
* Decay the "hits" and "intercepts" metrics for all of the bins and
* find the bins that the sleep length and the measured idle duration
@@ -195,8 +199,6 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
bin->hits -= bin->hits >> DECAY_SHIFT;
bin->intercepts -= bin->intercepts >> DECAY_SHIFT;
- cpu_data->total += bin->hits + bin->intercepts;
-
target_residency_ns = drv->states[i].target_residency_ns;
if (target_residency_ns <= cpu_data->sleep_length_ns) {
@@ -206,38 +208,22 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
}
}
- /*
- * If the deepest state's target residency is below the tick length,
- * make a record of it to help teo_select() decide whether or not
- * to stop the tick. This effectively adds an extra hits-only bin
- * beyond the last state-related one.
- */
- if (target_residency_ns < TICK_NSEC) {
- cpu_data->tick_hits -= cpu_data->tick_hits >> DECAY_SHIFT;
-
- cpu_data->total += cpu_data->tick_hits;
-
- if (TICK_NSEC <= cpu_data->sleep_length_ns) {
- idx_timer = drv->state_count;
- if (TICK_NSEC <= measured_ns) {
- cpu_data->tick_hits += PULSE;
- goto end;
- }
- }
- }
-
+ cpu_data->tick_intercepts -= cpu_data->tick_intercepts >> DECAY_SHIFT;
/*
* If the measured idle duration falls into the same bin as the sleep
* length, this is a "hit", so update the "hits" metric for that bin.
* Otherwise, update the "intercepts" metric for the bin fallen into by
* the measured idle duration.
*/
- if (idx_timer == idx_duration)
+ if (idx_timer == idx_duration) {
cpu_data->state_bins[idx_timer].hits += PULSE;
- else
+ } else {
cpu_data->state_bins[idx_duration].intercepts += PULSE;
+ if (TICK_NSEC <= measured_ns)
+ cpu_data->tick_intercepts += PULSE;
+ }
-end:
+ cpu_data->total -= cpu_data->total >> DECAY_SHIFT;
cpu_data->total += PULSE;
}
@@ -285,14 +271,12 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
ktime_t delta_tick = TICK_NSEC / 2;
- unsigned int tick_intercept_sum = 0;
unsigned int idx_intercept_sum = 0;
unsigned int intercept_sum = 0;
unsigned int idx_hit_sum = 0;
unsigned int hit_sum = 0;
int constraint_idx = 0;
int idx0 = 0, idx = -1;
- int prev_intercept_idx;
s64 duration_ns;
int i;
@@ -301,10 +285,14 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
dev->last_state_idx = -1;
}
- cpu_data->time_span_ns = local_clock();
/*
- * Set the expected sleep length to infinity in case of an early
- * return.
+ * Set the sleep length to infinity in case the invocation of
+ * tick_nohz_get_sleep_length() below is skipped, in which case it won't
+ * be known whether or not the subsequent wakeup is caused by a timer.
+ * It is generally fine to count the wakeup as an intercept then, except
+ * for the cases when the CPU is mostly woken up by timers and there may
+ * be opportunities to ask for a deeper idle state when no imminent
+ * timers are scheduled which may be missed.
*/
cpu_data->sleep_length_ns = KTIME_MAX;
@@ -360,17 +348,13 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
goto end;
}
- tick_intercept_sum = intercept_sum +
- cpu_data->state_bins[drv->state_count-1].intercepts;
-
/*
* If the sum of the intercepts metric for all of the idle states
* shallower than the current candidate one (idx) is greater than the
* sum of the intercepts and hits metrics for the candidate state and
- * all of the deeper states a shallower idle state is likely to be a
+ * all of the deeper states, a shallower idle state is likely to be a
* better choice.
*/
- prev_intercept_idx = idx;
if (2 * idx_intercept_sum > cpu_data->total - idx_hit_sum) {
int first_suitable_idx = idx;
@@ -396,41 +380,38 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* first enabled state that is deep enough.
*/
if (teo_state_ok(i, drv) &&
- !dev->states_usage[i].disable)
+ !dev->states_usage[i].disable) {
idx = i;
- else
- idx = first_suitable_idx;
-
+ break;
+ }
+ idx = first_suitable_idx;
break;
}
if (dev->states_usage[i].disable)
continue;
- if (!teo_state_ok(i, drv)) {
+ if (teo_state_ok(i, drv)) {
/*
- * The current state is too shallow, but if an
- * alternative candidate state has been found,
- * it may still turn out to be a better choice.
+ * The current state is deep enough, but still
+ * there may be a better one.
*/
- if (first_suitable_idx != idx)
- continue;
-
- break;
+ first_suitable_idx = i;
+ continue;
}
- first_suitable_idx = i;
+ /*
+ * The current state is too shallow, so if no suitable
+ * states other than the initial candidate have been
+ * found, give up (the remaining states to check are
+ * shallower still), but otherwise the first suitable
+ * state other than the initial candidate may turn out
+ * to be preferable.
+ */
+ if (first_suitable_idx == idx)
+ break;
}
}
- if (!idx && prev_intercept_idx) {
- /*
- * We have to query the sleep length here otherwise we don't
- * know after wakeup if our guess was correct.
- */
- duration_ns = tick_nohz_get_sleep_length(&delta_tick);
- cpu_data->sleep_length_ns = duration_ns;
- goto out_tick;
- }
/*
* If there is a latency constraint, it may be necessary to select an
@@ -440,24 +421,39 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
idx = constraint_idx;
/*
- * Skip the timers check if state 0 is the current candidate one,
- * because an immediate non-timer wakeup is expected in that case.
+ * If either the candidate state is state 0 or its target residency is
+ * low enough, there is basically nothing more to do, but if the sleep
+ * length is not updated, the subsequent wakeup will be counted as an
+ * "intercept" which may be problematic in the cases when timer wakeups
+ * are dominant. Namely, it may effectively prevent deeper idle states
+ * from being selected at one point even if no imminent timers are
+ * scheduled.
+ *
+ * However, frequent timers in the RESIDENCY_THRESHOLD_NS range on one
+ * CPU are unlikely (user space has a default 50 us slack value for
+ * hrtimers and there are relatively few timers with a lower deadline
+ * value in the kernel), and even if they did happen, the potential
+ * benefit from using a deep idle state in that case would be
+ * questionable anyway for latency reasons. Thus if the measured idle
+ * duration falls into that range in the majority of cases, assume
+ * non-timer wakeups to be dominant and skip updating the sleep length
+ * to reduce latency.
+ *
+ * Also, if the latency constraint is sufficiently low, it will force
+ * shallow idle states regardless of the wakeup type, so the sleep
+ * length need not be known in that case.
*/
- if (!idx)
- goto out_tick;
-
- /*
- * If state 0 is a polling one, check if the target residency of
- * the current candidate state is low enough and skip the timers
- * check in that case too.
- */
- if ((drv->states[0].flags & CPUIDLE_FLAG_POLLING) &&
- drv->states[idx].target_residency_ns < RESIDENCY_THRESHOLD_NS)
+ if ((!idx || drv->states[idx].target_residency_ns < RESIDENCY_THRESHOLD_NS) &&
+ (2 * cpu_data->short_idles >= cpu_data->total ||
+ latency_req < LATENCY_THRESHOLD_NS))
goto out_tick;
duration_ns = tick_nohz_get_sleep_length(&delta_tick);
cpu_data->sleep_length_ns = duration_ns;
+ if (!idx)
+ goto out_tick;
+
/*
* If the closest expected timer is before the target residency of the
* candidate state, a shallower one needs to be found.
@@ -474,7 +470,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* total wakeup events, do not stop the tick.
*/
if (drv->states[idx].target_residency_ns < TICK_NSEC &&
- tick_intercept_sum > cpu_data->total / 2 + cpu_data->total / 8)
+ cpu_data->tick_intercepts > cpu_data->total / 2 + cpu_data->total / 8)
duration_ns = TICK_NSEC / 2;
end:
@@ -511,17 +507,16 @@ static void teo_reflect(struct cpuidle_device *dev, int state)
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
dev->last_state_idx = state;
- /*
- * If the wakeup was not "natural", but triggered by one of the safety
- * nets, assume that the CPU might have been idle for the entire sleep
- * length time.
- */
if (dev->poll_time_limit ||
(tick_nohz_idle_got_tick() && cpu_data->sleep_length_ns > TICK_NSEC)) {
+ /*
+ * The wakeup was not "genuine", but triggered by one of the
+ * safety nets.
+ */
dev->poll_time_limit = false;
- cpu_data->time_span_ns = cpu_data->sleep_length_ns;
+ cpu_data->artificial_wakeup = true;
} else {
- cpu_data->time_span_ns = local_clock() - cpu_data->time_span_ns;
+ cpu_data->artificial_wakeup = false;
}
}
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 0a9cdd31cbd9..19ab145f912e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -200,23 +200,6 @@ config S390_PRNG
It is available as of z9.
-config CRYPTO_DEV_NIAGARA2
- tristate "Niagara2 Stream Processing Unit driver"
- select CRYPTO_LIB_DES
- select CRYPTO_SKCIPHER
- select CRYPTO_HASH
- select CRYPTO_MD5
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- depends on SPARC64
- help
- Each core of a Niagara2 processor contains a Stream
- Processing Unit, which itself contains several cryptographic
- sub-units. One set provides the Modular Arithmetic Unit,
- used for SSL offload. The other set provides the Cipher
- Group, which can perform encryption, decryption, hashing,
- checksumming, and raw copies.
-
config CRYPTO_DEV_SL3516
tristate "Storlink SL3516 crypto offloader"
depends on ARCH_GEMINI || COMPILE_TEST
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index ad4ccef67d12..fef18ffdb128 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -21,8 +21,6 @@ obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
-obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
-n2_crypto-y := n2_core.o n2_asm.o
obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
obj-$(CONFIG_CRYPTO_DEV_OMAP) += omap-crypto.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes-driver.o
diff --git a/drivers/crypto/bcm/spu.c b/drivers/crypto/bcm/spu.c
index 6283e8c6d51d..86c227caa722 100644
--- a/drivers/crypto/bcm/spu.c
+++ b/drivers/crypto/bcm/spu.c
@@ -836,7 +836,6 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
u32 cipher_bits = 0;
u32 ecf_bits = 0;
u8 sctx_words = 0;
- u8 *ptr = spu_hdr;
flow_log("%s()\n", __func__);
flow_log(" cipher alg:%u mode:%u type %u\n", cipher_parms->alg,
@@ -847,7 +846,6 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
/* starting out: zero the header (plus some) */
memset(spu_hdr, 0, sizeof(struct SPUHEADER));
- ptr += sizeof(struct SPUHEADER);
/* format master header word */
/* Do not set the next bit even though the datasheet says to */
@@ -861,10 +859,8 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
/* copy the encryption keys in the SAD entry */
if (cipher_parms->alg) {
- if (cipher_parms->key_len) {
- ptr += cipher_parms->key_len;
+ if (cipher_parms->key_len)
sctx_words += cipher_parms->key_len / 4;
- }
/*
* if encrypting then set IV size, use SCTX IV unless no IV
@@ -873,7 +869,6 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
if (cipher_parms->iv_len) {
/* Use SCTX IV */
ecf_bits |= SCTX_IV;
- ptr += cipher_parms->iv_len;
sctx_words += cipher_parms->iv_len / 4;
}
}
diff --git a/drivers/crypto/caam/blob_gen.c b/drivers/crypto/caam/blob_gen.c
index 87781c1534ee..079a22cc9f02 100644
--- a/drivers/crypto/caam/blob_gen.c
+++ b/drivers/crypto/caam/blob_gen.c
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
* Copyright (C) 2021 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
+ * Copyright 2024 NXP
*/
#define pr_fmt(fmt) "caam blob_gen: " fmt
@@ -104,7 +105,7 @@ int caam_process_blob(struct caam_blob_priv *priv,
}
ctrlpriv = dev_get_drvdata(jrdev->parent);
- moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->ctrl->perfmon.status));
+ moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->jr[0]->perfmon.status));
if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED)
dev_warn(jrdev,
"using insecure test key, enable HAB to use unique device key!\n");
diff --git a/drivers/crypto/ccp/dbc.c b/drivers/crypto/ccp/dbc.c
index 5b105a23f699..410084a9039c 100644
--- a/drivers/crypto/ccp/dbc.c
+++ b/drivers/crypto/ccp/dbc.c
@@ -7,6 +7,8 @@
* Author: Mario Limonciello <mario.limonciello@amd.com>
*/
+#include <linux/mutex.h>
+
#include "dbc.h"
#define DBC_DEFAULT_TIMEOUT (10 * MSEC_PER_SEC)
@@ -137,64 +139,49 @@ static long dbc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -ENODEV;
dbc_dev = psp_master->dbc_data;
- mutex_lock(&dbc_dev->ioctl_mutex);
+ guard(mutex)(&dbc_dev->ioctl_mutex);
switch (cmd) {
case DBCIOCNONCE:
- if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_nonce))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_nonce)))
+ return -EFAULT;
ret = send_dbc_nonce(dbc_dev);
if (ret)
- goto unlock;
+ return ret;
- if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_nonce))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_nonce)))
+ return -EFAULT;
break;
case DBCIOCUID:
- if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_setuid))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_setuid)))
+ return -EFAULT;
*dbc_dev->payload_size = dbc_dev->header_size + sizeof(struct dbc_user_setuid);
ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_SET_UID);
if (ret)
- goto unlock;
+ return ret;
- if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_setuid))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_setuid)))
+ return -EFAULT;
break;
case DBCIOCPARAM:
- if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_param))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_param)))
+ return -EFAULT;
*dbc_dev->payload_size = dbc_dev->header_size + sizeof(struct dbc_user_param);
ret = send_dbc_parameter(dbc_dev);
if (ret)
- goto unlock;
+ return ret;
- if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_param))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_param)))
+ return -EFAULT;
break;
default:
- ret = -EINVAL;
-
+ return -EINVAL;
}
-unlock:
- mutex_unlock(&dbc_dev->ioctl_mutex);
- return ret;
+ return 0;
}
static const struct file_operations dbc_fops = {
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 96fde9437b4b..f5b47e5ff48a 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -1209,7 +1209,6 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->mode = uacce_mode;
qm->pdev = pdev;
- qm->ver = pdev->revision;
qm->sqe_size = HPRE_SQE_SIZE;
qm->dev_name = hpre_name;
@@ -1396,6 +1395,17 @@ static enum acc_err_result hpre_get_err_result(struct hisi_qm *qm)
return ACC_ERR_RECOVERED;
}
+static bool hpre_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = hpre_get_hw_err_status(qm);
+ if (err_status & qm->err_info.dev_shutdown_mask)
+ return true;
+
+ return false;
+}
+
static void hpre_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
@@ -1428,6 +1438,7 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
.show_last_dfx_regs = hpre_show_last_dfx_regs,
.err_info_init = hpre_err_info_init,
.get_err_result = hpre_get_err_result,
+ .dev_is_abnormal = hpre_dev_is_abnormal,
};
static int hpre_pf_probe_init(struct hpre *hpre)
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 19c1b5d3c954..d3f5d108b898 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -30,8 +30,6 @@
/* mailbox */
#define QM_MB_PING_ALL_VFS 0xffff
-#define QM_MB_CMD_DATA_SHIFT 32
-#define QM_MB_CMD_DATA_MASK GENMASK(31, 0)
#define QM_MB_STATUS_MASK GENMASK(12, 9)
/* sqc shift */
@@ -102,6 +100,8 @@
#define QM_PM_CTRL 0x100148
#define QM_IDLE_DISABLE BIT(9)
+#define QM_SUB_VERSION_ID 0x210
+
#define QM_VFT_CFG_DATA_L 0x100064
#define QM_VFT_CFG_DATA_H 0x100068
#define QM_SQC_VFT_BUF_SIZE (7ULL << 8)
@@ -119,6 +119,7 @@
#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
#define QM_SQC_VFT_NUM_SHIFT_V2 45
#define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0)
+#define QM_MAX_QC_TYPE 2
#define QM_ABNORMAL_INT_SOURCE 0x100000
#define QM_ABNORMAL_INT_MASK 0x100004
@@ -176,6 +177,10 @@
#define QM_IFC_INT_MASK 0x0024
#define QM_IFC_INT_STATUS 0x0028
#define QM_IFC_INT_SET_V 0x002C
+#define QM_PF2VF_PF_W 0x104700
+#define QM_VF2PF_PF_R 0x104800
+#define QM_VF2PF_VF_W 0x320
+#define QM_PF2VF_VF_R 0x380
#define QM_IFC_SEND_ALL_VFS GENMASK(6, 0)
#define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0)
#define QM_IFC_INT_SOURCE_MASK BIT(0)
@@ -185,8 +190,11 @@
#define QM_WAIT_DST_ACK 10
#define QM_MAX_PF_WAIT_COUNT 10
#define QM_MAX_VF_WAIT_COUNT 40
-#define QM_VF_RESET_WAIT_US 20000
-#define QM_VF_RESET_WAIT_CNT 3000
+#define QM_VF_RESET_WAIT_US 20000
+#define QM_VF_RESET_WAIT_CNT 3000
+#define QM_VF2PF_REG_SIZE 4
+#define QM_IFC_CMD_MASK GENMASK(31, 0)
+#define QM_IFC_DATA_SHIFT 32
#define QM_VF_RESET_WAIT_TIMEOUT_US \
(QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT)
@@ -234,8 +242,6 @@
#define QM_QOS_MAX_CIR_U 6
#define QM_AUTOSUSPEND_DELAY 3000
-#define QM_DEV_ALG_MAX_LEN 256
-
/* abnormal status value for stopping queue */
#define QM_STOP_QUEUE_FAIL 1
#define QM_DUMP_SQC_FAIL 3
@@ -276,7 +282,7 @@ enum qm_alg_type {
ALG_TYPE_1,
};
-enum qm_mb_cmd {
+enum qm_ifc_cmd {
QM_PF_FLR_PREPARE = 0x01,
QM_PF_SRST_PREPARE,
QM_PF_RESET_DONE,
@@ -333,6 +339,7 @@ static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
{QM_SUPPORT_STOP_FUNC, 0x3100, 0, BIT(10), 0x0, 0x0, 0x1},
{QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1},
{QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1},
+ {QM_SUPPORT_DAE, 0x3100, 0, BIT(15), 0x0, 0x0, 0x0},
};
static const struct hisi_qm_cap_info qm_cap_info_pf[] = {
@@ -396,6 +403,11 @@ struct hisi_qm_hw_ops {
void (*hw_error_uninit)(struct hisi_qm *qm);
enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
int (*set_msi)(struct hisi_qm *qm, bool set);
+
+ /* (u64)msg = (u32)data << 32 | (enum qm_ifc_cmd)cmd */
+ int (*set_ifc_begin)(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num);
+ void (*set_ifc_end)(struct hisi_qm *qm);
+ int (*get_ifc)(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num);
};
struct hisi_qm_hw_error {
@@ -501,15 +513,20 @@ static u32 qm_get_dev_err_status(struct hisi_qm *qm)
/* Check if the error causes the master ooo block */
static bool qm_check_dev_error(struct hisi_qm *qm)
{
- u32 val, dev_val;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
+ u32 err_status;
- if (qm->fun_type == QM_HW_VF)
+ if (pf_qm->fun_type == QM_HW_VF)
return false;
- val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask;
- dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask;
+ err_status = qm_get_hw_error_status(pf_qm);
+ if (err_status & pf_qm->err_info.qm_shutdown_mask)
+ return true;
+
+ if (pf_qm->err_ini->dev_is_abnormal)
+ return pf_qm->err_ini->dev_is_abnormal(pf_qm);
- return val || dev_val;
+ return false;
}
static int qm_wait_reset_finish(struct hisi_qm *qm)
@@ -654,7 +671,6 @@ EXPORT_SYMBOL_GPL(hisi_qm_mb);
/* op 0: set xqc information to hardware, 1: get xqc information from hardware. */
int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op)
{
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
struct qm_mailbox mailbox;
dma_addr_t xqc_dma;
void *tmp_xqc;
@@ -688,7 +704,7 @@ int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op
}
/* Setting xqc will fail if master OOO is blocked. */
- if (qm_check_dev_error(pf_qm)) {
+ if (qm_check_dev_error(qm)) {
dev_err(&qm->pdev->dev, "failed to send mailbox since qm is stop!\n");
return -EIO;
}
@@ -855,10 +871,10 @@ int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *d
strcat(algs, dev_algs[i].alg);
ptr = strrchr(algs, '\n');
- if (ptr) {
+ if (ptr)
*ptr = '\0';
- qm->uacce->algs = algs;
- }
+
+ qm->uacce->algs = algs;
return 0;
}
@@ -1052,11 +1068,10 @@ static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id)
static void qm_reset_function(struct hisi_qm *qm)
{
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
struct device *dev = &qm->pdev->dev;
int ret;
- if (qm_check_dev_error(pf_qm))
+ if (qm_check_dev_error(qm))
return;
ret = qm_reset_prepare_ready(qm);
@@ -1540,17 +1555,15 @@ static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask)
static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
{
struct device *dev = &qm->pdev->dev;
- u32 cmd;
- u64 msg;
+ enum qm_ifc_cmd cmd;
int ret;
- ret = qm_get_mb_cmd(qm, &msg, vf_id);
+ ret = qm->ops->get_ifc(qm, &cmd, NULL, vf_id);
if (ret) {
- dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id);
+ dev_err(dev, "failed to get command from VF(%u)!\n", vf_id);
return;
}
- cmd = msg & QM_MB_CMD_DATA_MASK;
switch (cmd) {
case QM_VF_PREPARE_FAIL:
dev_err(dev, "failed to stop VF(%u)!\n", vf_id);
@@ -1562,7 +1575,7 @@ static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
case QM_VF_START_DONE:
break;
default:
- dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n", cmd, vf_id);
+ dev_err(dev, "unsupported command(0x%x) sent by VF(%u)!\n", cmd, vf_id);
break;
}
}
@@ -1630,17 +1643,14 @@ static void qm_trigger_pf_interrupt(struct hisi_qm *qm)
writel(val, qm->io_base + QM_IFC_INT_SET_V);
}
-static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num)
+static int qm_ping_single_vf(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
{
struct device *dev = &qm->pdev->dev;
- struct qm_mailbox mailbox;
int cnt = 0;
u64 val;
int ret;
- qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0);
- mutex_lock(&qm->mailbox_lock);
- ret = qm_mb_nolock(qm, &mailbox);
+ ret = qm->ops->set_ifc_begin(qm, cmd, data, fun_num);
if (ret) {
dev_err(dev, "failed to send command to vf(%u)!\n", fun_num);
goto err_unlock;
@@ -1662,27 +1672,23 @@ static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num)
}
err_unlock:
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
return ret;
}
-static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
+static int qm_ping_all_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
{
struct device *dev = &qm->pdev->dev;
u32 vfs_num = qm->vfs_num;
- struct qm_mailbox mailbox;
u64 val = 0;
int cnt = 0;
int ret;
u32 i;
- qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0);
- mutex_lock(&qm->mailbox_lock);
- /* PF sends command to all VFs by mailbox */
- ret = qm_mb_nolock(qm, &mailbox);
+ ret = qm->ops->set_ifc_begin(qm, cmd, 0, QM_MB_PING_ALL_VFS);
if (ret) {
- dev_err(dev, "failed to send command to VFs!\n");
- mutex_unlock(&qm->mailbox_lock);
+ dev_err(dev, "failed to send command(0x%x) to all vfs!\n", cmd);
+ qm->ops->set_ifc_end(qm);
return ret;
}
@@ -1692,7 +1698,7 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
val = readq(qm->io_base + QM_IFC_READY_STATUS);
/* If all VFs acked, PF notifies VFs successfully. */
if (!(val & GENMASK(vfs_num, 1))) {
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
return 0;
}
@@ -1700,7 +1706,7 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
break;
}
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
/* Check which vf respond timeout. */
for (i = 1; i <= vfs_num; i++) {
@@ -1711,18 +1717,15 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
return -ETIMEDOUT;
}
-static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
+static int qm_ping_pf(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
{
- struct qm_mailbox mailbox;
int cnt = 0;
u32 val;
int ret;
- qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0);
- mutex_lock(&qm->mailbox_lock);
- ret = qm_mb_nolock(qm, &mailbox);
+ ret = qm->ops->set_ifc_begin(qm, cmd, 0, 0);
if (ret) {
- dev_err(&qm->pdev->dev, "failed to send command to PF!\n");
+ dev_err(&qm->pdev->dev, "failed to send command(0x%x) to PF!\n", cmd);
goto unlock;
}
@@ -1741,7 +1744,8 @@ static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
}
unlock:
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
+
return ret;
}
@@ -1842,6 +1846,94 @@ static int qm_set_msi_v3(struct hisi_qm *qm, bool set)
return ret;
}
+static int qm_set_ifc_begin_v3(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
+{
+ struct qm_mailbox mailbox;
+ u64 msg;
+
+ msg = cmd | (u64)data << QM_IFC_DATA_SHIFT;
+
+ qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, msg, fun_num, 0);
+ mutex_lock(&qm->mailbox_lock);
+ return qm_mb_nolock(qm, &mailbox);
+}
+
+static void qm_set_ifc_end_v3(struct hisi_qm *qm)
+{
+ mutex_unlock(&qm->mailbox_lock);
+}
+
+static int qm_get_ifc_v3(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num)
+{
+ u64 msg;
+ int ret;
+
+ ret = qm_get_mb_cmd(qm, &msg, fun_num);
+ if (ret)
+ return ret;
+
+ *cmd = msg & QM_IFC_CMD_MASK;
+
+ if (data)
+ *data = msg >> QM_IFC_DATA_SHIFT;
+
+ return 0;
+}
+
+static int qm_set_ifc_begin_v4(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
+{
+ uintptr_t offset;
+ u64 msg;
+
+ if (qm->fun_type == QM_HW_PF)
+ offset = QM_PF2VF_PF_W;
+ else
+ offset = QM_VF2PF_VF_W;
+
+ msg = cmd | (u64)data << QM_IFC_DATA_SHIFT;
+
+ mutex_lock(&qm->ifc_lock);
+ writeq(msg, qm->io_base + offset);
+
+ return 0;
+}
+
+static void qm_set_ifc_end_v4(struct hisi_qm *qm)
+{
+ mutex_unlock(&qm->ifc_lock);
+}
+
+static u64 qm_get_ifc_pf(struct hisi_qm *qm, u32 fun_num)
+{
+ uintptr_t offset;
+
+ offset = QM_VF2PF_PF_R + QM_VF2PF_REG_SIZE * fun_num;
+
+ return (u64)readl(qm->io_base + offset);
+}
+
+static u64 qm_get_ifc_vf(struct hisi_qm *qm)
+{
+ return readq(qm->io_base + QM_PF2VF_VF_R);
+}
+
+static int qm_get_ifc_v4(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num)
+{
+ u64 msg;
+
+ if (qm->fun_type == QM_HW_PF)
+ msg = qm_get_ifc_pf(qm, fun_num);
+ else
+ msg = qm_get_ifc_vf(qm);
+
+ *cmd = msg & QM_IFC_CMD_MASK;
+
+ if (data)
+ *data = msg >> QM_IFC_DATA_SHIFT;
+
+ return 0;
+}
+
static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
.qm_db = qm_db_v1,
.hw_error_init = qm_hw_error_init_v1,
@@ -1864,6 +1956,21 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
.hw_error_uninit = qm_hw_error_uninit_v3,
.hw_error_handle = qm_hw_error_handle_v2,
.set_msi = qm_set_msi_v3,
+ .set_ifc_begin = qm_set_ifc_begin_v3,
+ .set_ifc_end = qm_set_ifc_end_v3,
+ .get_ifc = qm_get_ifc_v3,
+};
+
+static const struct hisi_qm_hw_ops qm_hw_ops_v4 = {
+ .get_vft = qm_get_vft_v2,
+ .qm_db = qm_db_v2,
+ .hw_error_init = qm_hw_error_init_v3,
+ .hw_error_uninit = qm_hw_error_uninit_v3,
+ .hw_error_handle = qm_hw_error_handle_v2,
+ .set_msi = qm_set_msi_v3,
+ .set_ifc_begin = qm_set_ifc_begin_v4,
+ .set_ifc_end = qm_set_ifc_end_v4,
+ .get_ifc = qm_get_ifc_v4,
};
static void *qm_get_avail_sqe(struct hisi_qp *qp)
@@ -2156,12 +2263,11 @@ static int qm_wait_qp_empty(struct hisi_qm *qm, u32 *state, u32 qp_id)
static int qm_drain_qp(struct hisi_qp *qp)
{
struct hisi_qm *qm = qp->qm;
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
u32 state = 0;
int ret;
/* No need to judge if master OOO is blocked. */
- if (qm_check_dev_error(pf_qm))
+ if (qm_check_dev_error(qm))
return 0;
/* HW V3 supports drain qp by device */
@@ -2475,7 +2581,7 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
sizeof(struct hisi_qp_ctx)))
return -EFAULT;
- if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1)
+ if (qp_ctx.qc_type > QM_MAX_QC_TYPE)
return -EINVAL;
qm_set_sqctype(q, qp_ctx.qc_type);
@@ -2843,11 +2949,14 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
qm->ops = &qm_hw_ops_v1;
else if (qm->ver == QM_HW_V2)
qm->ops = &qm_hw_ops_v2;
- else
+ else if (qm->ver == QM_HW_V3)
qm->ops = &qm_hw_ops_v3;
+ else
+ qm->ops = &qm_hw_ops_v4;
pci_set_drvdata(pdev, qm);
mutex_init(&qm->mailbox_lock);
+ mutex_init(&qm->ifc_lock);
init_rwsem(&qm->qps_lock);
qm->qp_in_used = 0;
if (test_bit(QM_SUPPORT_RPM, &qm->caps)) {
@@ -3607,7 +3716,6 @@ static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
{
struct device *dev = &qm->pdev->dev;
- u64 mb_cmd;
u32 qos;
int ret;
@@ -3617,10 +3725,9 @@ static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
return;
}
- mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT;
- ret = qm_ping_single_vf(qm, mb_cmd, fun_num);
+ ret = qm_ping_single_vf(qm, QM_PF_SET_QOS, qos, fun_num);
if (ret)
- dev_err(dev, "failed to send cmd to VF(%u)!\n", fun_num);
+ dev_err(dev, "failed to send command(0x%x) to VF(%u)!\n", QM_PF_SET_QOS, fun_num);
}
static int qm_vf_read_qos(struct hisi_qm *qm)
@@ -4109,7 +4216,7 @@ stop_fail:
return ret;
}
-static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
+static int qm_try_stop_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd,
enum qm_stop_reason stop_reason)
{
struct pci_dev *pdev = qm->pdev;
@@ -4122,7 +4229,7 @@ static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
ret = qm_ping_all_vfs(qm, cmd);
if (ret)
- pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n");
+ pci_err(pdev, "failed to send command to all VFs before PF reset!\n");
} else {
ret = qm_vf_reset_prepare(qm, stop_reason);
if (ret)
@@ -4137,6 +4244,12 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
int ret;
+ if (qm->err_ini->set_priv_status) {
+ ret = qm->err_ini->set_priv_status(qm);
+ if (ret)
+ return ret;
+ }
+
ret = qm_reset_prepare_ready(qm);
if (ret) {
pci_err(pdev, "Controller reset not ready!\n");
@@ -4298,7 +4411,7 @@ restart_fail:
return ret;
}
-static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd)
+static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
{
struct pci_dev *pdev = qm->pdev;
int ret;
@@ -4527,7 +4640,7 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev)
* Check whether there is an ECC mbit error, If it occurs, need to
* wait for soft reset to fix it.
*/
- while (qm_check_dev_error(pf_qm)) {
+ while (qm_check_dev_error(qm)) {
msleep(++delay);
if (delay > QM_RESET_WAIT_TIMEOUT)
return;
@@ -4675,7 +4788,7 @@ static void hisi_qm_controller_reset(struct work_struct *rst_work)
static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
enum qm_stop_reason stop_reason)
{
- enum qm_mb_cmd cmd = QM_VF_PREPARE_DONE;
+ enum qm_ifc_cmd cmd = QM_VF_PREPARE_DONE;
struct pci_dev *pdev = qm->pdev;
int ret;
@@ -4709,7 +4822,7 @@ out:
static void qm_pf_reset_vf_done(struct hisi_qm *qm)
{
- enum qm_mb_cmd cmd = QM_VF_START_DONE;
+ enum qm_ifc_cmd cmd = QM_VF_START_DONE;
struct pci_dev *pdev = qm->pdev;
int ret;
@@ -4732,7 +4845,6 @@ static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
u32 val, cmd;
- u64 msg;
int ret;
/* Wait for reset to finish */
@@ -4749,16 +4861,15 @@ static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
* Whether message is got successfully,
* VF needs to ack PF by clearing the interrupt.
*/
- ret = qm_get_mb_cmd(qm, &msg, 0);
+ ret = qm->ops->get_ifc(qm, &cmd, NULL, 0);
qm_clear_cmd_interrupt(qm, 0);
if (ret) {
- dev_err(dev, "failed to get msg from PF in reset done!\n");
+ dev_err(dev, "failed to get command from PF in reset done!\n");
return ret;
}
- cmd = msg & QM_MB_CMD_DATA_MASK;
if (cmd != QM_PF_RESET_DONE) {
- dev_err(dev, "the cmd(%u) is not reset done!\n", cmd);
+ dev_err(dev, "the command(0x%x) is not reset done!\n", cmd);
ret = -EINVAL;
}
@@ -4795,22 +4906,21 @@ err_get_status:
static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
{
struct device *dev = &qm->pdev->dev;
- u64 msg;
- u32 cmd;
+ enum qm_ifc_cmd cmd;
+ u32 data;
int ret;
/*
* Get the msg from source by sending mailbox. Whether message is got
* successfully, destination needs to ack source by clearing the interrupt.
*/
- ret = qm_get_mb_cmd(qm, &msg, fun_num);
+ ret = qm->ops->get_ifc(qm, &cmd, &data, fun_num);
qm_clear_cmd_interrupt(qm, BIT(fun_num));
if (ret) {
- dev_err(dev, "failed to get msg from source!\n");
+ dev_err(dev, "failed to get command from source!\n");
return;
}
- cmd = msg & QM_MB_CMD_DATA_MASK;
switch (cmd) {
case QM_PF_FLR_PREPARE:
qm_pf_reset_vf_process(qm, QM_DOWN);
@@ -4822,10 +4932,10 @@ static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
qm_vf_get_qos(qm, fun_num);
break;
case QM_PF_SET_QOS:
- qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT;
+ qm->mb_qos = data;
break;
default:
- dev_err(dev, "unsupported cmd %u sent by function(%u)!\n", cmd, fun_num);
+ dev_err(dev, "unsupported command(0x%x) sent by function(%u)!\n", cmd, fun_num);
break;
}
}
@@ -5167,6 +5277,20 @@ static int qm_get_hw_caps(struct hisi_qm *qm)
return qm_pre_store_caps(qm);
}
+static void qm_get_version(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 sub_version_id;
+
+ qm->ver = pdev->revision;
+
+ if (pdev->revision == QM_HW_V3) {
+ sub_version_id = readl(qm->io_base + QM_SUB_VERSION_ID);
+ if (sub_version_id)
+ qm->ver = sub_version_id;
+ }
+}
+
static int qm_get_pci_res(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -5186,6 +5310,8 @@ static int qm_get_pci_res(struct hisi_qm *qm)
goto err_request_mem_regions;
}
+ qm_get_version(qm);
+
ret = qm_get_hw_caps(qm);
if (ret)
goto err_ioremap;
@@ -5205,6 +5331,7 @@ static int qm_get_pci_res(struct hisi_qm *qm)
qm->db_interval = 0;
}
+ hisi_qm_pre_init(qm);
ret = qm_get_qp_num(qm);
if (ret)
goto err_db_ioremap;
@@ -5247,6 +5374,14 @@ static int qm_clear_device(struct hisi_qm *qm)
return ret;
}
+ if (qm->err_ini->set_priv_status) {
+ ret = qm->err_ini->set_priv_status(qm);
+ if (ret) {
+ writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL);
+ return ret;
+ }
+ }
+
return qm_reset_device(qm);
}
@@ -5461,8 +5596,6 @@ int hisi_qm_init(struct hisi_qm *qm)
struct device *dev = &pdev->dev;
int ret;
- hisi_qm_pre_init(qm);
-
ret = hisi_qm_pci_init(qm);
if (ret)
return ret;
@@ -5598,6 +5731,12 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm)
if (ret)
return ret;
+ if (qm->err_ini->set_priv_status) {
+ ret = qm->err_ini->set_priv_status(qm);
+ if (ret)
+ return ret;
+ }
+
ret = qm_set_pf_mse(qm, false);
if (ret)
pci_err(pdev, "failed to disable MSE before suspending!\n");
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 356188bee6fb..4b9970230822 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -37,6 +37,7 @@ struct sec_aead_req {
u8 *a_ivin;
dma_addr_t a_ivin_dma;
struct aead_request *aead_req;
+ bool fallback;
};
/* SEC request of Crypto */
@@ -90,9 +91,7 @@ struct sec_auth_ctx {
dma_addr_t a_key_dma;
u8 *a_key;
u8 a_key_len;
- u8 mac_len;
u8 a_alg;
- bool fallback;
struct crypto_shash *hash_tfm;
struct crypto_aead *fallback_aead_tfm;
};
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index ae9ebbb4103d..66bc07da9eb6 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -948,15 +948,14 @@ static int sec_aead_mac_init(struct sec_aead_req *req)
struct aead_request *aead_req = req->aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
size_t authsize = crypto_aead_authsize(tfm);
- u8 *mac_out = req->out_mac;
struct scatterlist *sgl = aead_req->src;
+ u8 *mac_out = req->out_mac;
size_t copy_size;
off_t skip_size;
/* Copy input mac */
skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
- copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out,
- authsize, skip_size);
+ copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, authsize, skip_size);
if (unlikely(copy_size != authsize))
return -EINVAL;
@@ -1120,10 +1119,7 @@ static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
- if (unlikely(a_ctx->fallback_aead_tfm))
- return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
-
- return 0;
+ return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
}
static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
@@ -1139,7 +1135,6 @@ static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
const u32 keylen, const enum sec_hash_alg a_alg,
const enum sec_calg c_alg,
- const enum sec_mac_len mac_len,
const enum sec_cmode c_mode)
{
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
@@ -1151,7 +1146,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
ctx->a_ctx.a_alg = a_alg;
ctx->c_ctx.c_alg = c_alg;
- ctx->a_ctx.mac_len = mac_len;
c_ctx->c_mode = c_mode;
if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
@@ -1162,13 +1156,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
}
memcpy(c_ctx->c_key, key, keylen);
- if (unlikely(a_ctx->fallback_aead_tfm)) {
- ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
- if (ret)
- return ret;
- }
-
- return 0;
+ return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
}
ret = crypto_authenc_extractkeys(&keys, key, keylen);
@@ -1187,10 +1175,15 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
goto bad_key;
}
- if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) ||
- (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) {
+ if (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK) {
ret = -EINVAL;
- dev_err(dev, "MAC or AUTH key length error!\n");
+ dev_err(dev, "AUTH key length error!\n");
+ goto bad_key;
+ }
+
+ ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
+ if (ret) {
+ dev_err(dev, "set sec fallback key err!\n");
goto bad_key;
}
@@ -1202,27 +1195,19 @@ bad_key:
}
-#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \
-static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \
- u32 keylen) \
-{ \
- return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
-}
-
-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
- SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
- SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
- SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES,
- SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES,
- SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
-GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4,
- SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
-GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4,
- SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
+#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, cmode) \
+static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, u32 keylen) \
+{ \
+ return sec_aead_setkey(tfm, key, keylen, aalg, calg, cmode); \
+}
+
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, SEC_CMODE_CCM)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, SEC_CMODE_GCM)
+GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, SEC_CMODE_CCM)
+GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, SEC_CMODE_GCM)
static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
@@ -1470,9 +1455,10 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct aead_request *aead_req = req->aead_req.aead_req;
- struct sec_cipher_req *c_req = &req->c_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
+ size_t authsize = crypto_aead_authsize(tfm);
struct sec_aead_req *a_req = &req->aead_req;
- size_t authsize = ctx->a_ctx.mac_len;
+ struct sec_cipher_req *c_req = &req->c_req;
u32 data_size = aead_req->cryptlen;
u8 flage = 0;
u8 cm, cl;
@@ -1513,10 +1499,8 @@ static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct aead_request *aead_req = req->aead_req.aead_req;
- struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
- size_t authsize = crypto_aead_authsize(tfm);
- struct sec_cipher_req *c_req = &req->c_req;
struct sec_aead_req *a_req = &req->aead_req;
+ struct sec_cipher_req *c_req = &req->c_req;
memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
@@ -1524,15 +1508,11 @@ static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
/*
* CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
* the counter must set to 0x01
+ * CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length}
*/
- ctx->a_ctx.mac_len = authsize;
- /* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */
set_aead_auth_iv(ctx, req);
- }
-
- /* GCM 12Byte Cipher_IV == Auth_IV */
- if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
- ctx->a_ctx.mac_len = authsize;
+ } else if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
+ /* GCM 12Byte Cipher_IV == Auth_IV */
memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
}
}
@@ -1542,9 +1522,11 @@ static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,
{
struct sec_aead_req *a_req = &req->aead_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
- sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len);
+ sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)authsize);
/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
@@ -1568,9 +1550,11 @@ static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,
{
struct sec_aead_req *a_req = &req->aead_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
- sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3);
+ sqe3->c_icv_key |= cpu_to_le16((u16)authsize << SEC_MAC_OFFSET_V3);
/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
sqe3->a_key_addr = sqe3->c_key_addr;
@@ -1594,11 +1578,12 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
struct sec_aead_req *a_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
- sec_sqe->type2.mac_key_alg =
- cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
+ sec_sqe->type2.mac_key_alg = cpu_to_le32(authsize / SEC_SQE_LEN_RATE);
sec_sqe->type2.mac_key_alg |=
cpu_to_le32((u32)((ctx->a_key_len) /
@@ -1648,11 +1633,13 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
struct sec_aead_req *a_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
sqe3->auth_mac_key |=
- cpu_to_le32((u32)(ctx->mac_len /
+ cpu_to_le32((u32)(authsize /
SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);
sqe3->auth_mac_key |=
@@ -1703,9 +1690,9 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
{
struct aead_request *a_req = req->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
+ size_t authsize = crypto_aead_authsize(tfm);
struct sec_aead_req *aead_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
- size_t authsize = crypto_aead_authsize(tfm);
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
struct aead_request *backlog_aead_req;
struct sec_req *backlog_req;
@@ -1718,10 +1705,8 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
if (!err && c_req->encrypt) {
struct scatterlist *sgl = a_req->dst;
- sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
- aead_req->out_mac,
- authsize, a_req->cryptlen +
- a_req->assoclen);
+ sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), aead_req->out_mac,
+ authsize, a_req->cryptlen + a_req->assoclen);
if (unlikely(sz != authsize)) {
dev_err(c->dev, "copy out mac err!\n");
err = -EINVAL;
@@ -1929,8 +1914,10 @@ static void sec_aead_exit(struct crypto_aead *tfm)
static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
- struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
+ struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+ const char *aead_name = alg->base.cra_name;
int ret;
ret = sec_aead_init(tfm);
@@ -1939,11 +1926,20 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
return ret;
}
- auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
- if (IS_ERR(auth_ctx->hash_tfm)) {
+ a_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(a_ctx->hash_tfm)) {
dev_err(ctx->dev, "aead alloc shash error!\n");
sec_aead_exit(tfm);
- return PTR_ERR(auth_ctx->hash_tfm);
+ return PTR_ERR(a_ctx->hash_tfm);
+ }
+
+ a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
+ if (IS_ERR(a_ctx->fallback_aead_tfm)) {
+ dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
+ crypto_free_shash(ctx->a_ctx.hash_tfm);
+ sec_aead_exit(tfm);
+ return PTR_ERR(a_ctx->fallback_aead_tfm);
}
return 0;
@@ -1953,6 +1949,7 @@ static void sec_aead_ctx_exit(struct crypto_aead *tfm)
{
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
crypto_free_shash(ctx->a_ctx.hash_tfm);
sec_aead_exit(tfm);
}
@@ -1979,7 +1976,6 @@ static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
sec_aead_exit(tfm);
return PTR_ERR(a_ctx->fallback_aead_tfm);
}
- a_ctx->fallback = false;
return 0;
}
@@ -2233,21 +2229,20 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
{
struct aead_request *req = sreq->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- size_t authsize = crypto_aead_authsize(tfm);
+ size_t sz = crypto_aead_authsize(tfm);
u8 c_mode = ctx->c_ctx.c_mode;
struct device *dev = ctx->dev;
int ret;
- if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
- req->assoclen > SEC_MAX_AAD_LEN)) {
- dev_err(dev, "aead input spec error!\n");
+ /* Hardware does not handle cases where authsize is less than 4 bytes */
+ if (unlikely(sz < MIN_MAC_LEN)) {
+ sreq->aead_req.fallback = true;
return -EINVAL;
}
- if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) ||
- (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN ||
- authsize & MAC_LEN_MASK)))) {
- dev_err(dev, "aead input mac length error!\n");
+ if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
+ req->assoclen > SEC_MAX_AAD_LEN)) {
+ dev_err(dev, "aead input spec error!\n");
return -EINVAL;
}
@@ -2266,7 +2261,7 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
if (sreq->c_req.encrypt)
sreq->c_req.c_len = req->cryptlen;
else
- sreq->c_req.c_len = req->cryptlen - authsize;
+ sreq->c_req.c_len = req->cryptlen - sz;
if (c_mode == SEC_CMODE_CBC) {
if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
dev_err(dev, "aead crypto length error!\n");
@@ -2292,8 +2287,8 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
if (ctx->sec->qm.ver == QM_HW_V2) {
if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
- req->cryptlen <= authsize))) {
- ctx->a_ctx.fallback = true;
+ req->cryptlen <= authsize))) {
+ sreq->aead_req.fallback = true;
return -EINVAL;
}
}
@@ -2321,16 +2316,9 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
bool encrypt)
{
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
- struct device *dev = ctx->dev;
struct aead_request *subreq;
int ret;
- /* Kunpeng920 aead mode not support input 0 size */
- if (!a_ctx->fallback_aead_tfm) {
- dev_err(dev, "aead fallback tfm is NULL!\n");
- return -EINVAL;
- }
-
subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);
if (!subreq)
return -ENOMEM;
@@ -2362,10 +2350,11 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
req->aead_req.aead_req = a_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
+ req->aead_req.fallback = false;
ret = sec_aead_param_check(ctx, req);
if (unlikely(ret)) {
- if (ctx->a_ctx.fallback)
+ if (req->aead_req.fallback)
return sec_aead_soft_crypto(ctx, a_req, encrypt);
return -EINVAL;
}
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index 27a0ee5ad913..04725b514382 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -23,17 +23,6 @@ enum sec_hash_alg {
SEC_A_HMAC_SHA512 = 0x15,
};
-enum sec_mac_len {
- SEC_HMAC_CCM_MAC = 16,
- SEC_HMAC_GCM_MAC = 16,
- SEC_SM3_MAC = 32,
- SEC_HMAC_SM3_MAC = 32,
- SEC_HMAC_MD5_MAC = 16,
- SEC_HMAC_SHA1_MAC = 20,
- SEC_HMAC_SHA256_MAC = 32,
- SEC_HMAC_SHA512_MAC = 64,
-};
-
enum sec_cmode {
SEC_CMODE_ECB = 0x0,
SEC_CMODE_CBC = 0x1,
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 8ec5333bb5aa..72cf48d1f3ab 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -1097,6 +1097,17 @@ static enum acc_err_result sec_get_err_result(struct hisi_qm *qm)
return ACC_ERR_RECOVERED;
}
+static bool sec_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = sec_get_hw_err_status(qm);
+ if (err_status & qm->err_info.dev_shutdown_mask)
+ return true;
+
+ return false;
+}
+
static void sec_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
@@ -1129,6 +1140,7 @@ static const struct hisi_qm_err_ini sec_err_ini = {
.show_last_dfx_regs = sec_show_last_dfx_regs,
.err_info_init = sec_err_info_init,
.get_err_result = sec_get_err_result,
+ .dev_is_abnormal = sec_dev_is_abnormal,
};
static int sec_pf_probe_init(struct sec_dev *sec)
@@ -1180,7 +1192,6 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
int ret;
qm->pdev = pdev;
- qm->ver = pdev->revision;
qm->mode = uacce_mode;
qm->sqe_size = SEC_SQE_SIZE;
qm->dev_name = sec_name;
diff --git a/drivers/crypto/hisilicon/zip/Makefile b/drivers/crypto/hisilicon/zip/Makefile
index a936f099ee22..13de020b77d6 100644
--- a/drivers/crypto/hisilicon/zip/Makefile
+++ b/drivers/crypto/hisilicon/zip/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += hisi_zip.o
-hisi_zip-objs = zip_main.o zip_crypto.o
+hisi_zip-objs = zip_main.o zip_crypto.o dae_main.o
diff --git a/drivers/crypto/hisilicon/zip/dae_main.c b/drivers/crypto/hisilicon/zip/dae_main.c
new file mode 100644
index 000000000000..6f22e4c36e49
--- /dev/null
+++ b/drivers/crypto/hisilicon/zip/dae_main.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 HiSilicon Limited. */
+
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/uacce.h>
+#include "zip.h"
+
+/* memory */
+#define DAE_MEM_START_OFFSET 0x331040
+#define DAE_MEM_DONE_OFFSET 0x331044
+#define DAE_MEM_START_MASK 0x1
+#define DAE_MEM_DONE_MASK 0x1
+#define DAE_REG_RD_INTVRL_US 10
+#define DAE_REG_RD_TMOUT_US USEC_PER_SEC
+
+#define DAE_ALG_NAME "hashagg"
+
+/* error */
+#define DAE_AXI_CFG_OFFSET 0x331000
+#define DAE_AXI_SHUTDOWN_MASK (BIT(0) | BIT(5))
+#define DAE_ERR_SOURCE_OFFSET 0x331C84
+#define DAE_ERR_STATUS_OFFSET 0x331C88
+#define DAE_ERR_CE_OFFSET 0x331CA0
+#define DAE_ERR_CE_MASK BIT(3)
+#define DAE_ERR_NFE_OFFSET 0x331CA4
+#define DAE_ERR_NFE_MASK 0x17
+#define DAE_ERR_FE_OFFSET 0x331CA8
+#define DAE_ERR_FE_MASK 0
+#define DAE_ECC_MBIT_MASK BIT(2)
+#define DAE_ECC_INFO_OFFSET 0x33400C
+#define DAE_ERR_SHUTDOWN_OFFSET 0x331CAC
+#define DAE_ERR_SHUTDOWN_MASK 0x17
+#define DAE_ERR_ENABLE_OFFSET 0x331C80
+#define DAE_ERR_ENABLE_MASK (DAE_ERR_FE_MASK | DAE_ERR_NFE_MASK | DAE_ERR_CE_MASK)
+#define DAE_AM_CTRL_GLOBAL_OFFSET 0x330000
+#define DAE_AM_RETURN_OFFSET 0x330150
+#define DAE_AM_RETURN_MASK 0x3
+#define DAE_AXI_CFG_OFFSET 0x331000
+#define DAE_AXI_SHUTDOWN_EN_MASK (BIT(0) | BIT(5))
+
+struct hisi_dae_hw_error {
+ u32 int_msk;
+ const char *msg;
+};
+
+static const struct hisi_dae_hw_error dae_hw_error[] = {
+ { .int_msk = BIT(0), .msg = "dae_axi_bus_err" },
+ { .int_msk = BIT(1), .msg = "dae_axi_poison_err" },
+ { .int_msk = BIT(2), .msg = "dae_ecc_2bit_err" },
+ { .int_msk = BIT(3), .msg = "dae_ecc_1bit_err" },
+ { .int_msk = BIT(4), .msg = "dae_fsm_hbeat_err" },
+};
+
+static inline bool dae_is_support(struct hisi_qm *qm)
+{
+ if (test_bit(QM_SUPPORT_DAE, &qm->caps))
+ return true;
+
+ return false;
+}
+
+int hisi_dae_set_user_domain(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (!dae_is_support(qm))
+ return 0;
+
+ val = readl(qm->io_base + DAE_MEM_START_OFFSET);
+ val |= DAE_MEM_START_MASK;
+ writel(val, qm->io_base + DAE_MEM_START_OFFSET);
+ ret = readl_relaxed_poll_timeout(qm->io_base + DAE_MEM_DONE_OFFSET, val,
+ val & DAE_MEM_DONE_MASK,
+ DAE_REG_RD_INTVRL_US, DAE_REG_RD_TMOUT_US);
+ if (ret)
+ pci_err(qm->pdev, "failed to init dae memory!\n");
+
+ return ret;
+}
+
+int hisi_dae_set_alg(struct hisi_qm *qm)
+{
+ size_t len;
+
+ if (!dae_is_support(qm))
+ return 0;
+
+ if (!qm->uacce)
+ return 0;
+
+ len = strlen(qm->uacce->algs);
+ /* A line break may be required */
+ if (len + strlen(DAE_ALG_NAME) + 1 >= QM_DEV_ALG_MAX_LEN) {
+ pci_err(qm->pdev, "algorithm name is too long!\n");
+ return -EINVAL;
+ }
+
+ if (len)
+ strcat((char *)qm->uacce->algs, "\n");
+
+ strcat((char *)qm->uacce->algs, DAE_ALG_NAME);
+
+ return 0;
+}
+
+static void hisi_dae_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
+{
+ u32 axi_val, err_val;
+
+ axi_val = readl(qm->io_base + DAE_AXI_CFG_OFFSET);
+ if (enable) {
+ axi_val |= DAE_AXI_SHUTDOWN_MASK;
+ err_val = DAE_ERR_SHUTDOWN_MASK;
+ } else {
+ axi_val &= ~DAE_AXI_SHUTDOWN_MASK;
+ err_val = 0;
+ }
+
+ writel(axi_val, qm->io_base + DAE_AXI_CFG_OFFSET);
+ writel(err_val, qm->io_base + DAE_ERR_SHUTDOWN_OFFSET);
+}
+
+void hisi_dae_hw_error_enable(struct hisi_qm *qm)
+{
+ if (!dae_is_support(qm))
+ return;
+
+ /* clear dae hw error source if having */
+ writel(DAE_ERR_ENABLE_MASK, qm->io_base + DAE_ERR_SOURCE_OFFSET);
+
+ /* configure error type */
+ writel(DAE_ERR_CE_MASK, qm->io_base + DAE_ERR_CE_OFFSET);
+ writel(DAE_ERR_NFE_MASK, qm->io_base + DAE_ERR_NFE_OFFSET);
+ writel(DAE_ERR_FE_MASK, qm->io_base + DAE_ERR_FE_OFFSET);
+
+ hisi_dae_master_ooo_ctrl(qm, true);
+
+ /* enable dae hw error interrupts */
+ writel(DAE_ERR_ENABLE_MASK, qm->io_base + DAE_ERR_ENABLE_OFFSET);
+}
+
+void hisi_dae_hw_error_disable(struct hisi_qm *qm)
+{
+ if (!dae_is_support(qm))
+ return;
+
+ writel(0, qm->io_base + DAE_ERR_ENABLE_OFFSET);
+ hisi_dae_master_ooo_ctrl(qm, false);
+}
+
+static u32 hisi_dae_get_hw_err_status(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + DAE_ERR_STATUS_OFFSET);
+}
+
+static void hisi_dae_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
+{
+ if (!dae_is_support(qm))
+ return;
+
+ writel(err_sts, qm->io_base + DAE_ERR_SOURCE_OFFSET);
+}
+
+static void hisi_dae_disable_error_report(struct hisi_qm *qm, u32 err_type)
+{
+ writel(DAE_ERR_NFE_MASK & (~err_type), qm->io_base + DAE_ERR_NFE_OFFSET);
+}
+
+static void hisi_dae_log_hw_error(struct hisi_qm *qm, u32 err_type)
+{
+ const struct hisi_dae_hw_error *err = dae_hw_error;
+ struct device *dev = &qm->pdev->dev;
+ u32 ecc_info;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(dae_hw_error); i++) {
+ err = &dae_hw_error[i];
+ if (!(err->int_msk & err_type))
+ continue;
+
+ dev_err(dev, "%s [error status=0x%x] found\n",
+ err->msg, err->int_msk);
+
+ if (err->int_msk & DAE_ECC_MBIT_MASK) {
+ ecc_info = readl(qm->io_base + DAE_ECC_INFO_OFFSET);
+ dev_err(dev, "dae multi ecc sram info 0x%x\n", ecc_info);
+ }
+ }
+}
+
+enum acc_err_result hisi_dae_get_err_result(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ if (!dae_is_support(qm))
+ return ACC_ERR_NONE;
+
+ err_status = hisi_dae_get_hw_err_status(qm);
+ if (!err_status)
+ return ACC_ERR_NONE;
+
+ hisi_dae_log_hw_error(qm, err_status);
+
+ if (err_status & DAE_ERR_NFE_MASK) {
+ /* Disable the same error reporting until device is recovered. */
+ hisi_dae_disable_error_report(qm, err_status);
+ return ACC_ERR_NEED_RESET;
+ }
+ hisi_dae_clear_hw_err_status(qm, err_status);
+
+ return ACC_ERR_RECOVERED;
+}
+
+bool hisi_dae_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ if (!dae_is_support(qm))
+ return false;
+
+ err_status = hisi_dae_get_hw_err_status(qm);
+ if (err_status & DAE_ERR_NFE_MASK)
+ return true;
+
+ return false;
+}
+
+int hisi_dae_close_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (!dae_is_support(qm))
+ return 0;
+
+ val = readl(qm->io_base + DAE_AM_CTRL_GLOBAL_OFFSET);
+ val |= BIT(0);
+ writel(val, qm->io_base + DAE_AM_CTRL_GLOBAL_OFFSET);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + DAE_AM_RETURN_OFFSET,
+ val, (val == DAE_AM_RETURN_MASK),
+ DAE_REG_RD_INTVRL_US, DAE_REG_RD_TMOUT_US);
+ if (ret)
+ dev_err(&qm->pdev->dev, "failed to close dae axi ooo!\n");
+
+ return ret;
+}
+
+void hisi_dae_open_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 val;
+
+ if (!dae_is_support(qm))
+ return;
+
+ val = readl(qm->io_base + DAE_AXI_CFG_OFFSET);
+
+ writel(val & ~DAE_AXI_SHUTDOWN_EN_MASK, qm->io_base + DAE_AXI_CFG_OFFSET);
+ writel(val | DAE_AXI_SHUTDOWN_EN_MASK, qm->io_base + DAE_AXI_CFG_OFFSET);
+}
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 2fecf346c3c9..9fb2a9c01132 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -103,4 +103,12 @@ int zip_create_qps(struct hisi_qp **qps, int qp_num, int node);
int hisi_zip_register_to_crypto(struct hisi_qm *qm);
void hisi_zip_unregister_from_crypto(struct hisi_qm *qm);
bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg);
+int hisi_dae_set_user_domain(struct hisi_qm *qm);
+int hisi_dae_set_alg(struct hisi_qm *qm);
+void hisi_dae_hw_error_disable(struct hisi_qm *qm);
+void hisi_dae_hw_error_enable(struct hisi_qm *qm);
+void hisi_dae_open_axi_master_ooo(struct hisi_qm *qm);
+int hisi_dae_close_axi_master_ooo(struct hisi_qm *qm);
+bool hisi_dae_dev_is_abnormal(struct hisi_qm *qm);
+enum acc_err_result hisi_dae_get_err_result(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 9239b251c2d7..d8ba23b7cc7d 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -582,7 +582,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
hisi_zip_enable_clock_gate(qm);
- return 0;
+ return hisi_dae_set_user_domain(qm);
}
static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
@@ -631,6 +631,8 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
/* enable ZIP hw error interrupts */
writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
+
+ hisi_dae_hw_error_enable(qm);
}
static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
@@ -643,6 +645,8 @@ static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_MASK_REG);
hisi_zip_master_ooo_ctrl(qm, false);
+
+ hisi_dae_hw_error_disable(qm);
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
@@ -1129,6 +1133,8 @@ static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
writel(val | HZIP_AXI_SHUTDOWN_ENABLE,
qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+
+ hisi_dae_open_axi_master_ooo(qm);
}
static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
@@ -1147,8 +1153,11 @@ static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
static enum acc_err_result hisi_zip_get_err_result(struct hisi_qm *qm)
{
+ enum acc_err_result zip_result = ACC_ERR_NONE;
+ enum acc_err_result dae_result;
u32 err_status;
+ /* Get device hardware new error status */
err_status = hisi_zip_get_hw_err_status(qm);
if (err_status) {
if (err_status & qm->err_info.ecc_2bits_mask)
@@ -1159,11 +1168,32 @@ static enum acc_err_result hisi_zip_get_err_result(struct hisi_qm *qm)
/* Disable the same error reporting until device is recovered. */
hisi_zip_disable_error_report(qm, err_status);
return ACC_ERR_NEED_RESET;
+ } else {
+ hisi_zip_clear_hw_err_status(qm, err_status);
}
- hisi_zip_clear_hw_err_status(qm, err_status);
}
- return ACC_ERR_RECOVERED;
+ dae_result = hisi_dae_get_err_result(qm);
+
+ return (zip_result == ACC_ERR_NEED_RESET ||
+ dae_result == ACC_ERR_NEED_RESET) ?
+ ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED;
+}
+
+static bool hisi_zip_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = hisi_zip_get_hw_err_status(qm);
+ if (err_status & qm->err_info.dev_shutdown_mask)
+ return true;
+
+ return hisi_dae_dev_is_abnormal(qm);
+}
+
+static int hisi_zip_set_priv_status(struct hisi_qm *qm)
+{
+ return hisi_dae_close_axi_master_ooo(qm);
}
static void hisi_zip_err_info_init(struct hisi_qm *qm)
@@ -1200,6 +1230,8 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = {
.show_last_dfx_regs = hisi_zip_show_last_dfx_regs,
.err_info_init = hisi_zip_err_info_init,
.get_err_result = hisi_zip_get_err_result,
+ .set_priv_status = hisi_zip_set_priv_status,
+ .dev_is_abnormal = hisi_zip_dev_is_abnormal,
};
static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
@@ -1264,7 +1296,6 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
int ret;
qm->pdev = pdev;
- qm->ver = pdev->revision;
qm->mode = uacce_mode;
qm->sqe_size = HZIP_SQE_SIZE;
qm->dev_name = hisi_zip_name;
@@ -1301,17 +1332,24 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
ret = zip_pre_store_cap_reg(qm);
if (ret) {
pci_err(qm->pdev, "Failed to pre-store capability registers!\n");
- hisi_qm_uninit(qm);
- return ret;
+ goto err_qm_uninit;
}
alg_msk = qm->cap_tables.dev_cap_table[ZIP_ALG_BITMAP].cap_val;
ret = hisi_qm_set_algs(qm, alg_msk, zip_dev_algs, ARRAY_SIZE(zip_dev_algs));
if (ret) {
pci_err(qm->pdev, "Failed to set zip algs!\n");
- hisi_qm_uninit(qm);
+ goto err_qm_uninit;
}
+ ret = hisi_dae_set_alg(qm);
+ if (ret)
+ goto err_qm_uninit;
+
+ return 0;
+
+err_qm_uninit:
+ hisi_qm_uninit(qm);
return ret;
}
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index 9e557649e5d0..c3776b0de51d 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -173,7 +173,7 @@ static int set_iaa_sync_mode(const char *name)
async_mode = false;
use_irq = false;
} else if (sysfs_streq(name, "async")) {
- async_mode = true;
+ async_mode = false;
use_irq = false;
} else if (sysfs_streq(name, "async_irq")) {
async_mode = true;
diff --git a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
index 449c6d3ab2db..fcc0cf4df637 100644
--- a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
+++ b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
@@ -471,6 +471,7 @@ static int init_ixp_crypto(struct device *dev)
return -ENODEV;
}
npe_id = npe_spec.args[0];
+ of_node_put(npe_spec.np);
ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
&queue_spec);
@@ -479,6 +480,7 @@ static int init_ixp_crypto(struct device *dev)
return -ENODEV;
}
recv_qid = queue_spec.args[0];
+ of_node_put(queue_spec.np);
ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
&queue_spec);
@@ -487,6 +489,7 @@ static int init_ixp_crypto(struct device *dev)
return -ENODEV;
}
send_qid = queue_spec.args[0];
+ of_node_put(queue_spec.np);
} else {
/*
* Hardcoded engine when using platform data, this goes away
diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
index c8241f5a0a26..f20ae7e35a0d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
@@ -473,22 +473,6 @@ unlock_and_exit:
}
DEFINE_SHOW_STORE_ATTRIBUTE(tl_control);
-static int get_rp_index_from_file(const struct file *f, u8 *rp_id, u8 rp_num)
-{
- char alpha;
- u8 index;
- int ret;
-
- ret = sscanf(f->f_path.dentry->d_name.name, ADF_TL_RP_REGS_FNAME, &alpha);
- if (ret != 1)
- return -EINVAL;
-
- index = ADF_TL_DBG_RP_INDEX_ALPHA(alpha);
- *rp_id = index;
-
- return 0;
-}
-
static int adf_tl_dbg_change_rp_index(struct adf_accel_dev *accel_dev,
unsigned int new_rp_num,
unsigned int rp_regs_index)
@@ -611,18 +595,11 @@ static int tl_rp_data_show(struct seq_file *s, void *unused)
{
struct adf_accel_dev *accel_dev = s->private;
u8 rp_regs_index;
- u8 max_rp;
- int ret;
if (!accel_dev)
return -EINVAL;
- max_rp = GET_TL_DATA(accel_dev).max_rp;
- ret = get_rp_index_from_file(s->file, &rp_regs_index, max_rp);
- if (ret) {
- dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n");
- return ret;
- }
+ rp_regs_index = debugfs_get_aux_num(s->file);
return tl_print_rp_data(accel_dev, s, rp_regs_index);
}
@@ -635,7 +612,6 @@ static ssize_t tl_rp_data_write(struct file *file, const char __user *userbuf,
struct adf_telemetry *telemetry;
unsigned int new_rp_num;
u8 rp_regs_index;
- u8 max_rp;
int ret;
accel_dev = seq_f->private;
@@ -643,15 +619,10 @@ static ssize_t tl_rp_data_write(struct file *file, const char __user *userbuf,
return -EINVAL;
telemetry = accel_dev->telemetry;
- max_rp = GET_TL_DATA(accel_dev).max_rp;
mutex_lock(&telemetry->wr_lock);
- ret = get_rp_index_from_file(file, &rp_regs_index, max_rp);
- if (ret) {
- dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n");
- goto unlock_and_exit;
- }
+ rp_regs_index = debugfs_get_aux_num(file);
ret = kstrtou32_from_user(userbuf, count, 10, &new_rp_num);
if (ret)
@@ -689,7 +660,8 @@ void adf_tl_dbgfs_add(struct adf_accel_dev *accel_dev)
for (i = 0; i < max_rp; i++) {
snprintf(name, sizeof(name), ADF_TL_RP_REGS_FNAME,
ADF_TL_DBG_RP_ALPHA_INDEX(i));
- debugfs_create_file(name, 0644, dir, accel_dev, &tl_rp_data_fops);
+ debugfs_create_file_aux_num(name, 0644, dir, accel_dev, i,
+ &tl_rp_data_fops);
}
}
diff --git a/drivers/crypto/n2_asm.S b/drivers/crypto/n2_asm.S
deleted file mode 100644
index 9a67dbf340f4..000000000000
--- a/drivers/crypto/n2_asm.S
+++ /dev/null
@@ -1,96 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* n2_asm.S: Hypervisor calls for NCS support.
- *
- * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
- */
-
-#include <linux/linkage.h>
-#include <asm/hypervisor.h>
-#include "n2_core.h"
-
- /* o0: queue type
- * o1: RA of queue
- * o2: num entries in queue
- * o3: address of queue handle return
- */
-ENTRY(sun4v_ncs_qconf)
- mov HV_FAST_NCS_QCONF, %o5
- ta HV_FAST_TRAP
- stx %o1, [%o3]
- retl
- nop
-ENDPROC(sun4v_ncs_qconf)
-
- /* %o0: queue handle
- * %o1: address of queue type return
- * %o2: address of queue base address return
- * %o3: address of queue num entries return
- */
-ENTRY(sun4v_ncs_qinfo)
- mov %o1, %g1
- mov %o2, %g2
- mov %o3, %g3
- mov HV_FAST_NCS_QINFO, %o5
- ta HV_FAST_TRAP
- stx %o1, [%g1]
- stx %o2, [%g2]
- stx %o3, [%g3]
- retl
- nop
-ENDPROC(sun4v_ncs_qinfo)
-
- /* %o0: queue handle
- * %o1: address of head offset return
- */
-ENTRY(sun4v_ncs_gethead)
- mov %o1, %o2
- mov HV_FAST_NCS_GETHEAD, %o5
- ta HV_FAST_TRAP
- stx %o1, [%o2]
- retl
- nop
-ENDPROC(sun4v_ncs_gethead)
-
- /* %o0: queue handle
- * %o1: address of tail offset return
- */
-ENTRY(sun4v_ncs_gettail)
- mov %o1, %o2
- mov HV_FAST_NCS_GETTAIL, %o5
- ta HV_FAST_TRAP
- stx %o1, [%o2]
- retl
- nop
-ENDPROC(sun4v_ncs_gettail)
-
- /* %o0: queue handle
- * %o1: new tail offset
- */
-ENTRY(sun4v_ncs_settail)
- mov HV_FAST_NCS_SETTAIL, %o5
- ta HV_FAST_TRAP
- retl
- nop
-ENDPROC(sun4v_ncs_settail)
-
- /* %o0: queue handle
- * %o1: address of devino return
- */
-ENTRY(sun4v_ncs_qhandle_to_devino)
- mov %o1, %o2
- mov HV_FAST_NCS_QHANDLE_TO_DEVINO, %o5
- ta HV_FAST_TRAP
- stx %o1, [%o2]
- retl
- nop
-ENDPROC(sun4v_ncs_qhandle_to_devino)
-
- /* %o0: queue handle
- * %o1: new head offset
- */
-ENTRY(sun4v_ncs_sethead_marker)
- mov HV_FAST_NCS_SETHEAD_MARKER, %o5
- ta HV_FAST_TRAP
- retl
- nop
-ENDPROC(sun4v_ncs_sethead_marker)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
deleted file mode 100644
index 14c302d2db79..000000000000
--- a/drivers/crypto/n2_core.c
+++ /dev/null
@@ -1,2168 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
- *
- * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/cpumask.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/crypto.h>
-#include <crypto/md5.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include <crypto/aes.h>
-#include <crypto/internal/des.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-
-#include <crypto/internal/hash.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/algapi.h>
-
-#include <asm/hypervisor.h>
-#include <asm/mdesc.h>
-
-#include "n2_core.h"
-
-#define DRV_MODULE_NAME "n2_crypto"
-#define DRV_MODULE_VERSION "0.2"
-#define DRV_MODULE_RELDATE "July 28, 2011"
-
-static const char version[] =
- DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-
-MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
-MODULE_DESCRIPTION("Niagara2 Crypto driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
-
-#define N2_CRA_PRIORITY 200
-
-static DEFINE_MUTEX(spu_lock);
-
-struct spu_queue {
- cpumask_t sharing;
- unsigned long qhandle;
-
- spinlock_t lock;
- u8 q_type;
- void *q;
- unsigned long head;
- unsigned long tail;
- struct list_head jobs;
-
- unsigned long devino;
-
- char irq_name[32];
- unsigned int irq;
-
- struct list_head list;
-};
-
-struct spu_qreg {
- struct spu_queue *queue;
- unsigned long type;
-};
-
-static struct spu_queue **cpu_to_cwq;
-static struct spu_queue **cpu_to_mau;
-
-static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
-{
- if (q->q_type == HV_NCS_QTYPE_MAU) {
- off += MAU_ENTRY_SIZE;
- if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
- off = 0;
- } else {
- off += CWQ_ENTRY_SIZE;
- if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
- off = 0;
- }
- return off;
-}
-
-struct n2_request_common {
- struct list_head entry;
- unsigned int offset;
-};
-#define OFFSET_NOT_RUNNING (~(unsigned int)0)
-
-/* An async job request records the final tail value it used in
- * n2_request_common->offset, test to see if that offset is in
- * the range old_head, new_head, inclusive.
- */
-static inline bool job_finished(struct spu_queue *q, unsigned int offset,
- unsigned long old_head, unsigned long new_head)
-{
- if (old_head <= new_head) {
- if (offset > old_head && offset <= new_head)
- return true;
- } else {
- if (offset > old_head || offset <= new_head)
- return true;
- }
- return false;
-}
-
-/* When the HEAD marker is unequal to the actual HEAD, we get
- * a virtual device INO interrupt. We should process the
- * completed CWQ entries and adjust the HEAD marker to clear
- * the IRQ.
- */
-static irqreturn_t cwq_intr(int irq, void *dev_id)
-{
- unsigned long off, new_head, hv_ret;
- struct spu_queue *q = dev_id;
-
- pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
- smp_processor_id(), q->qhandle);
-
- spin_lock(&q->lock);
-
- hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
-
- pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
- smp_processor_id(), new_head, hv_ret);
-
- for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
- /* XXX ... XXX */
- }
-
- hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
- if (hv_ret == HV_EOK)
- q->head = new_head;
-
- spin_unlock(&q->lock);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t mau_intr(int irq, void *dev_id)
-{
- struct spu_queue *q = dev_id;
- unsigned long head, hv_ret;
-
- spin_lock(&q->lock);
-
- pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
- smp_processor_id(), q->qhandle);
-
- hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
-
- pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
- smp_processor_id(), head, hv_ret);
-
- sun4v_ncs_sethead_marker(q->qhandle, head);
-
- spin_unlock(&q->lock);
-
- return IRQ_HANDLED;
-}
-
-static void *spu_queue_next(struct spu_queue *q, void *cur)
-{
- return q->q + spu_next_offset(q, cur - q->q);
-}
-
-static int spu_queue_num_free(struct spu_queue *q)
-{
- unsigned long head = q->head;
- unsigned long tail = q->tail;
- unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
- unsigned long diff;
-
- if (head > tail)
- diff = head - tail;
- else
- diff = (end - tail) + head;
-
- return (diff / CWQ_ENTRY_SIZE) - 1;
-}
-
-static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
-{
- int avail = spu_queue_num_free(q);
-
- if (avail >= num_entries)
- return q->q + q->tail;
-
- return NULL;
-}
-
-static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
-{
- unsigned long hv_ret, new_tail;
-
- new_tail = spu_next_offset(q, last - q->q);
-
- hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
- if (hv_ret == HV_EOK)
- q->tail = new_tail;
- return hv_ret;
-}
-
-static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
- int enc_type, int auth_type,
- unsigned int hash_len,
- bool sfas, bool sob, bool eob, bool encrypt,
- int opcode)
-{
- u64 word = (len - 1) & CONTROL_LEN;
-
- word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
- word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
- word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
- if (sfas)
- word |= CONTROL_STORE_FINAL_AUTH_STATE;
- if (sob)
- word |= CONTROL_START_OF_BLOCK;
- if (eob)
- word |= CONTROL_END_OF_BLOCK;
- if (encrypt)
- word |= CONTROL_ENCRYPT;
- if (hmac_key_len)
- word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
- if (hash_len)
- word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
-
- return word;
-}
-
-#if 0
-static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
-{
- if (this_len >= 64 ||
- qp->head != qp->tail)
- return true;
- return false;
-}
-#endif
-
-struct n2_ahash_alg {
- struct list_head entry;
- const u8 *hash_zero;
- const u8 *hash_init;
- u8 hw_op_hashsz;
- u8 digest_size;
- u8 auth_type;
- u8 hmac_type;
- struct ahash_alg alg;
-};
-
-static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
-{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct ahash_alg *ahash_alg;
-
- ahash_alg = container_of(alg, struct ahash_alg, halg.base);
-
- return container_of(ahash_alg, struct n2_ahash_alg, alg);
-}
-
-struct n2_hmac_alg {
- const char *child_alg;
- struct n2_ahash_alg derived;
-};
-
-static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
-{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct ahash_alg *ahash_alg;
-
- ahash_alg = container_of(alg, struct ahash_alg, halg.base);
-
- return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
-}
-
-struct n2_hash_ctx {
- struct crypto_ahash *fallback_tfm;
-};
-
-#define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */
-
-struct n2_hmac_ctx {
- struct n2_hash_ctx base;
-
- struct crypto_shash *child_shash;
-
- int hash_key_len;
- unsigned char hash_key[N2_HASH_KEY_MAX];
-};
-
-struct n2_hash_req_ctx {
- union {
- struct md5_state md5;
- struct sha1_state sha1;
- struct sha256_state sha256;
- } u;
-
- struct ahash_request fallback_req;
-};
-
-static int n2_hash_async_init(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
- return crypto_ahash_init(&rctx->fallback_req);
-}
-
-static int n2_hash_async_update(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
-
- return crypto_ahash_update(&rctx->fallback_req);
-}
-
-static int n2_hash_async_final(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = req->result;
-
- return crypto_ahash_final(&rctx->fallback_req);
-}
-
-static int n2_hash_async_finup(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
-
- return crypto_ahash_finup(&rctx->fallback_req);
-}
-
-static int n2_hash_async_noimport(struct ahash_request *req, const void *in)
-{
- return -ENOSYS;
-}
-
-static int n2_hash_async_noexport(struct ahash_request *req, void *out)
-{
- return -ENOSYS;
-}
-
-static int n2_hash_cra_init(struct crypto_tfm *tfm)
-{
- const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct crypto_ahash *fallback_tfm;
- int err;
-
- fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(fallback_tfm)) {
- pr_warn("Fallback driver '%s' could not be loaded!\n",
- fallback_driver_name);
- err = PTR_ERR(fallback_tfm);
- goto out;
- }
-
- crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
- crypto_ahash_reqsize(fallback_tfm)));
-
- ctx->fallback_tfm = fallback_tfm;
- return 0;
-
-out:
- return err;
-}
-
-static void n2_hash_cra_exit(struct crypto_tfm *tfm)
-{
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-
- crypto_free_ahash(ctx->fallback_tfm);
-}
-
-static int n2_hmac_cra_init(struct crypto_tfm *tfm)
-{
- const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
- struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
- struct crypto_ahash *fallback_tfm;
- struct crypto_shash *child_shash;
- int err;
-
- fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(fallback_tfm)) {
- pr_warn("Fallback driver '%s' could not be loaded!\n",
- fallback_driver_name);
- err = PTR_ERR(fallback_tfm);
- goto out;
- }
-
- child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
- if (IS_ERR(child_shash)) {
- pr_warn("Child shash '%s' could not be loaded!\n",
- n2alg->child_alg);
- err = PTR_ERR(child_shash);
- goto out_free_fallback;
- }
-
- crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
- crypto_ahash_reqsize(fallback_tfm)));
-
- ctx->child_shash = child_shash;
- ctx->base.fallback_tfm = fallback_tfm;
- return 0;
-
-out_free_fallback:
- crypto_free_ahash(fallback_tfm);
-
-out:
- return err;
-}
-
-static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
-{
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
-
- crypto_free_ahash(ctx->base.fallback_tfm);
- crypto_free_shash(ctx->child_shash);
-}
-
-static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
- struct crypto_shash *child_shash = ctx->child_shash;
- struct crypto_ahash *fallback_tfm;
- int err, bs, ds;
-
- fallback_tfm = ctx->base.fallback_tfm;
- err = crypto_ahash_setkey(fallback_tfm, key, keylen);
- if (err)
- return err;
-
- bs = crypto_shash_blocksize(child_shash);
- ds = crypto_shash_digestsize(child_shash);
- BUG_ON(ds > N2_HASH_KEY_MAX);
- if (keylen > bs) {
- err = crypto_shash_tfm_digest(child_shash, key, keylen,
- ctx->hash_key);
- if (err)
- return err;
- keylen = ds;
- } else if (keylen <= N2_HASH_KEY_MAX)
- memcpy(ctx->hash_key, key, keylen);
-
- ctx->hash_key_len = keylen;
-
- return err;
-}
-
-static unsigned long wait_for_tail(struct spu_queue *qp)
-{
- unsigned long head, hv_ret;
-
- do {
- hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
- if (hv_ret != HV_EOK) {
- pr_err("Hypervisor error on gethead\n");
- break;
- }
- if (head == qp->tail) {
- qp->head = head;
- break;
- }
- } while (1);
- return hv_ret;
-}
-
-static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
- struct cwq_initial_entry *ent)
-{
- unsigned long hv_ret = spu_queue_submit(qp, ent);
-
- if (hv_ret == HV_EOK)
- hv_ret = wait_for_tail(qp);
-
- return hv_ret;
-}
-
-static int n2_do_async_digest(struct ahash_request *req,
- unsigned int auth_type, unsigned int digest_size,
- unsigned int result_size, void *hash_loc,
- unsigned long auth_key, unsigned int auth_key_len)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cwq_initial_entry *ent;
- struct crypto_hash_walk walk;
- struct spu_queue *qp;
- unsigned long flags;
- int err = -ENODEV;
- int nbytes, cpu;
-
- /* The total effective length of the operation may not
- * exceed 2^16.
- */
- if (unlikely(req->nbytes > (1 << 16))) {
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
-
- return crypto_ahash_digest(&rctx->fallback_req);
- }
-
- nbytes = crypto_hash_walk_first(req, &walk);
-
- cpu = get_cpu();
- qp = cpu_to_cwq[cpu];
- if (!qp)
- goto out;
-
- spin_lock_irqsave(&qp->lock, flags);
-
- /* XXX can do better, improve this later by doing a by-hand scatterlist
- * XXX walk, etc.
- */
- ent = qp->q + qp->tail;
-
- ent->control = control_word_base(nbytes, auth_key_len, 0,
- auth_type, digest_size,
- false, true, false, false,
- OPCODE_INPLACE_BIT |
- OPCODE_AUTH_MAC);
- ent->src_addr = __pa(walk.data);
- ent->auth_key_addr = auth_key;
- ent->auth_iv_addr = __pa(hash_loc);
- ent->final_auth_state_addr = 0UL;
- ent->enc_key_addr = 0UL;
- ent->enc_iv_addr = 0UL;
- ent->dest_addr = __pa(hash_loc);
-
- nbytes = crypto_hash_walk_done(&walk, 0);
- while (nbytes > 0) {
- ent = spu_queue_next(qp, ent);
-
- ent->control = (nbytes - 1);
- ent->src_addr = __pa(walk.data);
- ent->auth_key_addr = 0UL;
- ent->auth_iv_addr = 0UL;
- ent->final_auth_state_addr = 0UL;
- ent->enc_key_addr = 0UL;
- ent->enc_iv_addr = 0UL;
- ent->dest_addr = 0UL;
-
- nbytes = crypto_hash_walk_done(&walk, 0);
- }
- ent->control |= CONTROL_END_OF_BLOCK;
-
- if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
- err = -EINVAL;
- else
- err = 0;
-
- spin_unlock_irqrestore(&qp->lock, flags);
-
- if (!err)
- memcpy(req->result, hash_loc, result_size);
-out:
- put_cpu();
-
- return err;
-}
-
-static int n2_hash_async_digest(struct ahash_request *req)
-{
- struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- int ds;
-
- ds = n2alg->digest_size;
- if (unlikely(req->nbytes == 0)) {
- memcpy(req->result, n2alg->hash_zero, ds);
- return 0;
- }
- memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
-
- return n2_do_async_digest(req, n2alg->auth_type,
- n2alg->hw_op_hashsz, ds,
- &rctx->u, 0UL, 0);
-}
-
-static int n2_hmac_async_digest(struct ahash_request *req)
-{
- struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
- int ds;
-
- ds = n2alg->derived.digest_size;
- if (unlikely(req->nbytes == 0) ||
- unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
-
- return crypto_ahash_digest(&rctx->fallback_req);
- }
- memcpy(&rctx->u, n2alg->derived.hash_init,
- n2alg->derived.hw_op_hashsz);
-
- return n2_do_async_digest(req, n2alg->derived.hmac_type,
- n2alg->derived.hw_op_hashsz, ds,
- &rctx->u,
- __pa(&ctx->hash_key),
- ctx->hash_key_len);
-}
-
-struct n2_skcipher_context {
- int key_len;
- int enc_type;
- union {
- u8 aes[AES_MAX_KEY_SIZE];
- u8 des[DES_KEY_SIZE];
- u8 des3[3 * DES_KEY_SIZE];
- } key;
-};
-
-#define N2_CHUNK_ARR_LEN 16
-
-struct n2_crypto_chunk {
- struct list_head entry;
- unsigned long iv_paddr : 44;
- unsigned long arr_len : 20;
- unsigned long dest_paddr;
- unsigned long dest_final;
- struct {
- unsigned long src_paddr : 44;
- unsigned long src_len : 20;
- } arr[N2_CHUNK_ARR_LEN];
-};
-
-struct n2_request_context {
- struct skcipher_walk walk;
- struct list_head chunk_list;
- struct n2_crypto_chunk chunk;
- u8 temp_iv[16];
-};
-
-/* The SPU allows some level of flexibility for partial cipher blocks
- * being specified in a descriptor.
- *
- * It merely requires that every descriptor's length field is at least
- * as large as the cipher block size. This means that a cipher block
- * can span at most 2 descriptors. However, this does not allow a
- * partial block to span into the final descriptor as that would
- * violate the rule (since every descriptor's length must be at lest
- * the block size). So, for example, assuming an 8 byte block size:
- *
- * 0xe --> 0xa --> 0x8
- *
- * is a valid length sequence, whereas:
- *
- * 0xe --> 0xb --> 0x7
- *
- * is not a valid sequence.
- */
-
-struct n2_skcipher_alg {
- struct list_head entry;
- u8 enc_type;
- struct skcipher_alg skcipher;
-};
-
-static inline struct n2_skcipher_alg *n2_skcipher_alg(struct crypto_skcipher *tfm)
-{
- struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
-
- return container_of(alg, struct n2_skcipher_alg, skcipher);
-}
-
-static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
- struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
-
- ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
-
- switch (keylen) {
- case AES_KEYSIZE_128:
- ctx->enc_type |= ENC_TYPE_ALG_AES128;
- break;
- case AES_KEYSIZE_192:
- ctx->enc_type |= ENC_TYPE_ALG_AES192;
- break;
- case AES_KEYSIZE_256:
- ctx->enc_type |= ENC_TYPE_ALG_AES256;
- break;
- default:
- return -EINVAL;
- }
-
- ctx->key_len = keylen;
- memcpy(ctx->key.aes, key, keylen);
- return 0;
-}
-
-static int n2_des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
- struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
- int err;
-
- err = verify_skcipher_des_key(skcipher, key);
- if (err)
- return err;
-
- ctx->enc_type = n2alg->enc_type;
-
- ctx->key_len = keylen;
- memcpy(ctx->key.des, key, keylen);
- return 0;
-}
-
-static int n2_3des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
- struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
- int err;
-
- err = verify_skcipher_des3_key(skcipher, key);
- if (err)
- return err;
-
- ctx->enc_type = n2alg->enc_type;
-
- ctx->key_len = keylen;
- memcpy(ctx->key.des3, key, keylen);
- return 0;
-}
-
-static inline int skcipher_descriptor_len(int nbytes, unsigned int block_size)
-{
- int this_len = nbytes;
-
- this_len -= (nbytes & (block_size - 1));
- return this_len > (1 << 16) ? (1 << 16) : this_len;
-}
-
-static int __n2_crypt_chunk(struct crypto_skcipher *skcipher,
- struct n2_crypto_chunk *cp,
- struct spu_queue *qp, bool encrypt)
-{
- struct n2_skcipher_context *ctx = crypto_skcipher_ctx(skcipher);
- struct cwq_initial_entry *ent;
- bool in_place;
- int i;
-
- ent = spu_queue_alloc(qp, cp->arr_len);
- if (!ent) {
- pr_info("queue_alloc() of %d fails\n",
- cp->arr_len);
- return -EBUSY;
- }
-
- in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
-
- ent->control = control_word_base(cp->arr[0].src_len,
- 0, ctx->enc_type, 0, 0,
- false, true, false, encrypt,
- OPCODE_ENCRYPT |
- (in_place ? OPCODE_INPLACE_BIT : 0));
- ent->src_addr = cp->arr[0].src_paddr;
- ent->auth_key_addr = 0UL;
- ent->auth_iv_addr = 0UL;
- ent->final_auth_state_addr = 0UL;
- ent->enc_key_addr = __pa(&ctx->key);
- ent->enc_iv_addr = cp->iv_paddr;
- ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
-
- for (i = 1; i < cp->arr_len; i++) {
- ent = spu_queue_next(qp, ent);
-
- ent->control = cp->arr[i].src_len - 1;
- ent->src_addr = cp->arr[i].src_paddr;
- ent->auth_key_addr = 0UL;
- ent->auth_iv_addr = 0UL;
- ent->final_auth_state_addr = 0UL;
- ent->enc_key_addr = 0UL;
- ent->enc_iv_addr = 0UL;
- ent->dest_addr = 0UL;
- }
- ent->control |= CONTROL_END_OF_BLOCK;
-
- return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
-}
-
-static int n2_compute_chunks(struct skcipher_request *req)
-{
- struct n2_request_context *rctx = skcipher_request_ctx(req);
- struct skcipher_walk *walk = &rctx->walk;
- struct n2_crypto_chunk *chunk;
- unsigned long dest_prev;
- unsigned int tot_len;
- bool prev_in_place;
- int err, nbytes;
-
- err = skcipher_walk_async(walk, req);
- if (err)
- return err;
-
- INIT_LIST_HEAD(&rctx->chunk_list);
-
- chunk = &rctx->chunk;
- INIT_LIST_HEAD(&chunk->entry);
-
- chunk->iv_paddr = 0UL;
- chunk->arr_len = 0;
- chunk->dest_paddr = 0UL;
-
- prev_in_place = false;
- dest_prev = ~0UL;
- tot_len = 0;
-
- while ((nbytes = walk->nbytes) != 0) {
- unsigned long dest_paddr, src_paddr;
- bool in_place;
- int this_len;
-
- src_paddr = (page_to_phys(walk->src.phys.page) +
- walk->src.phys.offset);
- dest_paddr = (page_to_phys(walk->dst.phys.page) +
- walk->dst.phys.offset);
- in_place = (src_paddr == dest_paddr);
- this_len = skcipher_descriptor_len(nbytes, walk->blocksize);
-
- if (chunk->arr_len != 0) {
- if (in_place != prev_in_place ||
- (!prev_in_place &&
- dest_paddr != dest_prev) ||
- chunk->arr_len == N2_CHUNK_ARR_LEN ||
- tot_len + this_len > (1 << 16)) {
- chunk->dest_final = dest_prev;
- list_add_tail(&chunk->entry,
- &rctx->chunk_list);
- chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
- if (!chunk) {
- err = -ENOMEM;
- break;
- }
- INIT_LIST_HEAD(&chunk->entry);
- }
- }
- if (chunk->arr_len == 0) {
- chunk->dest_paddr = dest_paddr;
- tot_len = 0;
- }
- chunk->arr[chunk->arr_len].src_paddr = src_paddr;
- chunk->arr[chunk->arr_len].src_len = this_len;
- chunk->arr_len++;
-
- dest_prev = dest_paddr + this_len;
- prev_in_place = in_place;
- tot_len += this_len;
-
- err = skcipher_walk_done(walk, nbytes - this_len);
- if (err)
- break;
- }
- if (!err && chunk->arr_len != 0) {
- chunk->dest_final = dest_prev;
- list_add_tail(&chunk->entry, &rctx->chunk_list);
- }
-
- return err;
-}
-
-static void n2_chunk_complete(struct skcipher_request *req, void *final_iv)
-{
- struct n2_request_context *rctx = skcipher_request_ctx(req);
- struct n2_crypto_chunk *c, *tmp;
-
- if (final_iv)
- memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
-
- list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
- list_del(&c->entry);
- if (unlikely(c != &rctx->chunk))
- kfree(c);
- }
-
-}
-
-static int n2_do_ecb(struct skcipher_request *req, bool encrypt)
-{
- struct n2_request_context *rctx = skcipher_request_ctx(req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- int err = n2_compute_chunks(req);
- struct n2_crypto_chunk *c, *tmp;
- unsigned long flags, hv_ret;
- struct spu_queue *qp;
-
- if (err)
- return err;
-
- qp = cpu_to_cwq[get_cpu()];
- err = -ENODEV;
- if (!qp)
- goto out;
-
- spin_lock_irqsave(&qp->lock, flags);
-
- list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
- err = __n2_crypt_chunk(tfm, c, qp, encrypt);
- if (err)
- break;
- list_del(&c->entry);
- if (unlikely(c != &rctx->chunk))
- kfree(c);
- }
- if (!err) {
- hv_ret = wait_for_tail(qp);
- if (hv_ret != HV_EOK)
- err = -EINVAL;
- }
-
- spin_unlock_irqrestore(&qp->lock, flags);
-
-out:
- put_cpu();
-
- n2_chunk_complete(req, NULL);
- return err;
-}
-
-static int n2_encrypt_ecb(struct skcipher_request *req)
-{
- return n2_do_ecb(req, true);
-}
-
-static int n2_decrypt_ecb(struct skcipher_request *req)
-{
- return n2_do_ecb(req, false);
-}
-
-static int n2_do_chaining(struct skcipher_request *req, bool encrypt)
-{
- struct n2_request_context *rctx = skcipher_request_ctx(req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- unsigned long flags, hv_ret, iv_paddr;
- int err = n2_compute_chunks(req);
- struct n2_crypto_chunk *c, *tmp;
- struct spu_queue *qp;
- void *final_iv_addr;
-
- final_iv_addr = NULL;
-
- if (err)
- return err;
-
- qp = cpu_to_cwq[get_cpu()];
- err = -ENODEV;
- if (!qp)
- goto out;
-
- spin_lock_irqsave(&qp->lock, flags);
-
- if (encrypt) {
- iv_paddr = __pa(rctx->walk.iv);
- list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
- entry) {
- c->iv_paddr = iv_paddr;
- err = __n2_crypt_chunk(tfm, c, qp, true);
- if (err)
- break;
- iv_paddr = c->dest_final - rctx->walk.blocksize;
- list_del(&c->entry);
- if (unlikely(c != &rctx->chunk))
- kfree(c);
- }
- final_iv_addr = __va(iv_paddr);
- } else {
- list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
- entry) {
- if (c == &rctx->chunk) {
- iv_paddr = __pa(rctx->walk.iv);
- } else {
- iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
- tmp->arr[tmp->arr_len-1].src_len -
- rctx->walk.blocksize);
- }
- if (!final_iv_addr) {
- unsigned long pa;
-
- pa = (c->arr[c->arr_len-1].src_paddr +
- c->arr[c->arr_len-1].src_len -
- rctx->walk.blocksize);
- final_iv_addr = rctx->temp_iv;
- memcpy(rctx->temp_iv, __va(pa),
- rctx->walk.blocksize);
- }
- c->iv_paddr = iv_paddr;
- err = __n2_crypt_chunk(tfm, c, qp, false);
- if (err)
- break;
- list_del(&c->entry);
- if (unlikely(c != &rctx->chunk))
- kfree(c);
- }
- }
- if (!err) {
- hv_ret = wait_for_tail(qp);
- if (hv_ret != HV_EOK)
- err = -EINVAL;
- }
-
- spin_unlock_irqrestore(&qp->lock, flags);
-
-out:
- put_cpu();
-
- n2_chunk_complete(req, err ? NULL : final_iv_addr);
- return err;
-}
-
-static int n2_encrypt_chaining(struct skcipher_request *req)
-{
- return n2_do_chaining(req, true);
-}
-
-static int n2_decrypt_chaining(struct skcipher_request *req)
-{
- return n2_do_chaining(req, false);
-}
-
-struct n2_skcipher_tmpl {
- const char *name;
- const char *drv_name;
- u8 block_size;
- u8 enc_type;
- struct skcipher_alg skcipher;
-};
-
-static const struct n2_skcipher_tmpl skcipher_tmpls[] = {
- /* DES: ECB CBC and CFB are supported */
- { .name = "ecb(des)",
- .drv_name = "ecb-des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_DES |
- ENC_TYPE_CHAINING_ECB),
- .skcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = n2_des_setkey,
- .encrypt = n2_encrypt_ecb,
- .decrypt = n2_decrypt_ecb,
- },
- },
- { .name = "cbc(des)",
- .drv_name = "cbc-des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_DES |
- ENC_TYPE_CHAINING_CBC),
- .skcipher = {
- .ivsize = DES_BLOCK_SIZE,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = n2_des_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_decrypt_chaining,
- },
- },
-
- /* 3DES: ECB CBC and CFB are supported */
- { .name = "ecb(des3_ede)",
- .drv_name = "ecb-3des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_3DES |
- ENC_TYPE_CHAINING_ECB),
- .skcipher = {
- .min_keysize = 3 * DES_KEY_SIZE,
- .max_keysize = 3 * DES_KEY_SIZE,
- .setkey = n2_3des_setkey,
- .encrypt = n2_encrypt_ecb,
- .decrypt = n2_decrypt_ecb,
- },
- },
- { .name = "cbc(des3_ede)",
- .drv_name = "cbc-3des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_3DES |
- ENC_TYPE_CHAINING_CBC),
- .skcipher = {
- .ivsize = DES_BLOCK_SIZE,
- .min_keysize = 3 * DES_KEY_SIZE,
- .max_keysize = 3 * DES_KEY_SIZE,
- .setkey = n2_3des_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_decrypt_chaining,
- },
- },
-
- /* AES: ECB CBC and CTR are supported */
- { .name = "ecb(aes)",
- .drv_name = "ecb-aes",
- .block_size = AES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_AES128 |
- ENC_TYPE_CHAINING_ECB),
- .skcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = n2_aes_setkey,
- .encrypt = n2_encrypt_ecb,
- .decrypt = n2_decrypt_ecb,
- },
- },
- { .name = "cbc(aes)",
- .drv_name = "cbc-aes",
- .block_size = AES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_AES128 |
- ENC_TYPE_CHAINING_CBC),
- .skcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = n2_aes_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_decrypt_chaining,
- },
- },
- { .name = "ctr(aes)",
- .drv_name = "ctr-aes",
- .block_size = AES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_AES128 |
- ENC_TYPE_CHAINING_COUNTER),
- .skcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = n2_aes_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_encrypt_chaining,
- },
- },
-
-};
-#define NUM_CIPHER_TMPLS ARRAY_SIZE(skcipher_tmpls)
-
-static LIST_HEAD(skcipher_algs);
-
-struct n2_hash_tmpl {
- const char *name;
- const u8 *hash_zero;
- const u8 *hash_init;
- u8 hw_op_hashsz;
- u8 digest_size;
- u8 statesize;
- u8 block_size;
- u8 auth_type;
- u8 hmac_type;
-};
-
-static const __le32 n2_md5_init[MD5_HASH_WORDS] = {
- cpu_to_le32(MD5_H0),
- cpu_to_le32(MD5_H1),
- cpu_to_le32(MD5_H2),
- cpu_to_le32(MD5_H3),
-};
-static const u32 n2_sha1_init[SHA1_DIGEST_SIZE / 4] = {
- SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
-};
-static const u32 n2_sha256_init[SHA256_DIGEST_SIZE / 4] = {
- SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
- SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
-};
-static const u32 n2_sha224_init[SHA256_DIGEST_SIZE / 4] = {
- SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
- SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
-};
-
-static const struct n2_hash_tmpl hash_tmpls[] = {
- { .name = "md5",
- .hash_zero = md5_zero_message_hash,
- .hash_init = (u8 *)n2_md5_init,
- .auth_type = AUTH_TYPE_MD5,
- .hmac_type = AUTH_TYPE_HMAC_MD5,
- .hw_op_hashsz = MD5_DIGEST_SIZE,
- .digest_size = MD5_DIGEST_SIZE,
- .statesize = sizeof(struct md5_state),
- .block_size = MD5_HMAC_BLOCK_SIZE },
- { .name = "sha1",
- .hash_zero = sha1_zero_message_hash,
- .hash_init = (u8 *)n2_sha1_init,
- .auth_type = AUTH_TYPE_SHA1,
- .hmac_type = AUTH_TYPE_HMAC_SHA1,
- .hw_op_hashsz = SHA1_DIGEST_SIZE,
- .digest_size = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct sha1_state),
- .block_size = SHA1_BLOCK_SIZE },
- { .name = "sha256",
- .hash_zero = sha256_zero_message_hash,
- .hash_init = (u8 *)n2_sha256_init,
- .auth_type = AUTH_TYPE_SHA256,
- .hmac_type = AUTH_TYPE_HMAC_SHA256,
- .hw_op_hashsz = SHA256_DIGEST_SIZE,
- .digest_size = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct sha256_state),
- .block_size = SHA256_BLOCK_SIZE },
- { .name = "sha224",
- .hash_zero = sha224_zero_message_hash,
- .hash_init = (u8 *)n2_sha224_init,
- .auth_type = AUTH_TYPE_SHA256,
- .hmac_type = AUTH_TYPE_RESERVED,
- .hw_op_hashsz = SHA256_DIGEST_SIZE,
- .digest_size = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct sha256_state),
- .block_size = SHA224_BLOCK_SIZE },
-};
-#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
-
-static LIST_HEAD(ahash_algs);
-static LIST_HEAD(hmac_algs);
-
-static int algs_registered;
-
-static void __n2_unregister_algs(void)
-{
- struct n2_skcipher_alg *skcipher, *skcipher_tmp;
- struct n2_ahash_alg *alg, *alg_tmp;
- struct n2_hmac_alg *hmac, *hmac_tmp;
-
- list_for_each_entry_safe(skcipher, skcipher_tmp, &skcipher_algs, entry) {
- crypto_unregister_skcipher(&skcipher->skcipher);
- list_del(&skcipher->entry);
- kfree(skcipher);
- }
- list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
- crypto_unregister_ahash(&hmac->derived.alg);
- list_del(&hmac->derived.entry);
- kfree(hmac);
- }
- list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
- crypto_unregister_ahash(&alg->alg);
- list_del(&alg->entry);
- kfree(alg);
- }
-}
-
-static int n2_skcipher_init_tfm(struct crypto_skcipher *tfm)
-{
- crypto_skcipher_set_reqsize(tfm, sizeof(struct n2_request_context));
- return 0;
-}
-
-static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl *tmpl)
-{
- struct n2_skcipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
- struct skcipher_alg *alg;
- int err;
-
- if (!p)
- return -ENOMEM;
-
- alg = &p->skcipher;
- *alg = tmpl->skcipher;
-
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
- snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
- alg->base.cra_priority = N2_CRA_PRIORITY;
- alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY;
- alg->base.cra_blocksize = tmpl->block_size;
- p->enc_type = tmpl->enc_type;
- alg->base.cra_ctxsize = sizeof(struct n2_skcipher_context);
- alg->base.cra_module = THIS_MODULE;
- alg->init = n2_skcipher_init_tfm;
-
- list_add(&p->entry, &skcipher_algs);
- err = crypto_register_skcipher(alg);
- if (err) {
- pr_err("%s alg registration failed\n", alg->base.cra_name);
- list_del(&p->entry);
- kfree(p);
- } else {
- pr_info("%s alg registered\n", alg->base.cra_name);
- }
- return err;
-}
-
-static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
-{
- struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
- struct ahash_alg *ahash;
- struct crypto_alg *base;
- int err;
-
- if (!p)
- return -ENOMEM;
-
- p->child_alg = n2ahash->alg.halg.base.cra_name;
- memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
- INIT_LIST_HEAD(&p->derived.entry);
-
- ahash = &p->derived.alg;
- ahash->digest = n2_hmac_async_digest;
- ahash->setkey = n2_hmac_async_setkey;
-
- base = &ahash->halg.base;
- err = -EINVAL;
- if (snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
- p->child_alg) >= CRYPTO_MAX_ALG_NAME)
- goto out_free_p;
- if (snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2",
- p->child_alg) >= CRYPTO_MAX_ALG_NAME)
- goto out_free_p;
-
- base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
- base->cra_init = n2_hmac_cra_init;
- base->cra_exit = n2_hmac_cra_exit;
-
- list_add(&p->derived.entry, &hmac_algs);
- err = crypto_register_ahash(ahash);
- if (err) {
- pr_err("%s alg registration failed\n", base->cra_name);
- list_del(&p->derived.entry);
-out_free_p:
- kfree(p);
- } else {
- pr_info("%s alg registered\n", base->cra_name);
- }
- return err;
-}
-
-static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
-{
- struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
- struct hash_alg_common *halg;
- struct crypto_alg *base;
- struct ahash_alg *ahash;
- int err;
-
- if (!p)
- return -ENOMEM;
-
- p->hash_zero = tmpl->hash_zero;
- p->hash_init = tmpl->hash_init;
- p->auth_type = tmpl->auth_type;
- p->hmac_type = tmpl->hmac_type;
- p->hw_op_hashsz = tmpl->hw_op_hashsz;
- p->digest_size = tmpl->digest_size;
-
- ahash = &p->alg;
- ahash->init = n2_hash_async_init;
- ahash->update = n2_hash_async_update;
- ahash->final = n2_hash_async_final;
- ahash->finup = n2_hash_async_finup;
- ahash->digest = n2_hash_async_digest;
- ahash->export = n2_hash_async_noexport;
- ahash->import = n2_hash_async_noimport;
-
- halg = &ahash->halg;
- halg->digestsize = tmpl->digest_size;
- halg->statesize = tmpl->statesize;
-
- base = &halg->base;
- snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
- snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
- base->cra_priority = N2_CRA_PRIORITY;
- base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK;
- base->cra_blocksize = tmpl->block_size;
- base->cra_ctxsize = sizeof(struct n2_hash_ctx);
- base->cra_module = THIS_MODULE;
- base->cra_init = n2_hash_cra_init;
- base->cra_exit = n2_hash_cra_exit;
-
- list_add(&p->entry, &ahash_algs);
- err = crypto_register_ahash(ahash);
- if (err) {
- pr_err("%s alg registration failed\n", base->cra_name);
- list_del(&p->entry);
- kfree(p);
- } else {
- pr_info("%s alg registered\n", base->cra_name);
- }
- if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
- err = __n2_register_one_hmac(p);
- return err;
-}
-
-static int n2_register_algs(void)
-{
- int i, err = 0;
-
- mutex_lock(&spu_lock);
- if (algs_registered++)
- goto out;
-
- for (i = 0; i < NUM_HASH_TMPLS; i++) {
- err = __n2_register_one_ahash(&hash_tmpls[i]);
- if (err) {
- __n2_unregister_algs();
- goto out;
- }
- }
- for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
- err = __n2_register_one_skcipher(&skcipher_tmpls[i]);
- if (err) {
- __n2_unregister_algs();
- goto out;
- }
- }
-
-out:
- mutex_unlock(&spu_lock);
- return err;
-}
-
-static void n2_unregister_algs(void)
-{
- mutex_lock(&spu_lock);
- if (!--algs_registered)
- __n2_unregister_algs();
- mutex_unlock(&spu_lock);
-}
-
-/* To map CWQ queues to interrupt sources, the hypervisor API provides
- * a devino. This isn't very useful to us because all of the
- * interrupts listed in the device_node have been translated to
- * Linux virtual IRQ cookie numbers.
- *
- * So we have to back-translate, going through the 'intr' and 'ino'
- * property tables of the n2cp MDESC node, matching it with the OF
- * 'interrupts' property entries, in order to figure out which
- * devino goes to which already-translated IRQ.
- */
-static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
- unsigned long dev_ino)
-{
- const unsigned int *dev_intrs;
- unsigned int intr;
- int i;
-
- for (i = 0; i < ip->num_intrs; i++) {
- if (ip->ino_table[i].ino == dev_ino)
- break;
- }
- if (i == ip->num_intrs)
- return -ENODEV;
-
- intr = ip->ino_table[i].intr;
-
- dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
- if (!dev_intrs)
- return -ENODEV;
-
- for (i = 0; i < dev->archdata.num_irqs; i++) {
- if (dev_intrs[i] == intr)
- return i;
- }
-
- return -ENODEV;
-}
-
-static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
- const char *irq_name, struct spu_queue *p,
- irq_handler_t handler)
-{
- unsigned long herr;
- int index;
-
- herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
- if (herr)
- return -EINVAL;
-
- index = find_devino_index(dev, ip, p->devino);
- if (index < 0)
- return index;
-
- p->irq = dev->archdata.irqs[index];
-
- sprintf(p->irq_name, "%s-%d", irq_name, index);
-
- return request_irq(p->irq, handler, 0, p->irq_name, p);
-}
-
-static struct kmem_cache *queue_cache[2];
-
-static void *new_queue(unsigned long q_type)
-{
- return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
-}
-
-static void free_queue(void *p, unsigned long q_type)
-{
- kmem_cache_free(queue_cache[q_type - 1], p);
-}
-
-static int queue_cache_init(void)
-{
- if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
- queue_cache[HV_NCS_QTYPE_MAU - 1] =
- kmem_cache_create("mau_queue",
- (MAU_NUM_ENTRIES *
- MAU_ENTRY_SIZE),
- MAU_ENTRY_SIZE, 0, NULL);
- if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
- return -ENOMEM;
-
- if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
- queue_cache[HV_NCS_QTYPE_CWQ - 1] =
- kmem_cache_create("cwq_queue",
- (CWQ_NUM_ENTRIES *
- CWQ_ENTRY_SIZE),
- CWQ_ENTRY_SIZE, 0, NULL);
- if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
- kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
- queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
- return -ENOMEM;
- }
- return 0;
-}
-
-static void queue_cache_destroy(void)
-{
- kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
- kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
- queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
- queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
-}
-
-static long spu_queue_register_workfn(void *arg)
-{
- struct spu_qreg *qr = arg;
- struct spu_queue *p = qr->queue;
- unsigned long q_type = qr->type;
- unsigned long hv_ret;
-
- hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
- CWQ_NUM_ENTRIES, &p->qhandle);
- if (!hv_ret)
- sun4v_ncs_sethead_marker(p->qhandle, 0);
-
- return hv_ret ? -EINVAL : 0;
-}
-
-static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
-{
- int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
- struct spu_qreg qr = { .queue = p, .type = q_type };
-
- return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
-}
-
-static int spu_queue_setup(struct spu_queue *p)
-{
- int err;
-
- p->q = new_queue(p->q_type);
- if (!p->q)
- return -ENOMEM;
-
- err = spu_queue_register(p, p->q_type);
- if (err) {
- free_queue(p->q, p->q_type);
- p->q = NULL;
- }
-
- return err;
-}
-
-static void spu_queue_destroy(struct spu_queue *p)
-{
- unsigned long hv_ret;
-
- if (!p->q)
- return;
-
- hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
-
- if (!hv_ret)
- free_queue(p->q, p->q_type);
-}
-
-static void spu_list_destroy(struct list_head *list)
-{
- struct spu_queue *p, *n;
-
- list_for_each_entry_safe(p, n, list, list) {
- int i;
-
- for (i = 0; i < NR_CPUS; i++) {
- if (cpu_to_cwq[i] == p)
- cpu_to_cwq[i] = NULL;
- }
-
- if (p->irq) {
- free_irq(p->irq, p);
- p->irq = 0;
- }
- spu_queue_destroy(p);
- list_del(&p->list);
- kfree(p);
- }
-}
-
-/* Walk the backward arcs of a CWQ 'exec-unit' node,
- * gathering cpu membership information.
- */
-static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
- struct platform_device *dev,
- u64 node, struct spu_queue *p,
- struct spu_queue **table)
-{
- u64 arc;
-
- mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
- u64 tgt = mdesc_arc_target(mdesc, arc);
- const char *name = mdesc_node_name(mdesc, tgt);
- const u64 *id;
-
- if (strcmp(name, "cpu"))
- continue;
- id = mdesc_get_property(mdesc, tgt, "id", NULL);
- if (table[*id] != NULL) {
- dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n",
- dev->dev.of_node);
- return -EINVAL;
- }
- cpumask_set_cpu(*id, &p->sharing);
- table[*id] = p;
- }
- return 0;
-}
-
-/* Process an 'exec-unit' MDESC node of type 'cwq'. */
-static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
- struct platform_device *dev, struct mdesc_handle *mdesc,
- u64 node, const char *iname, unsigned long q_type,
- irq_handler_t handler, struct spu_queue **table)
-{
- struct spu_queue *p;
- int err;
-
- p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
- if (!p) {
- dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n",
- dev->dev.of_node);
- return -ENOMEM;
- }
-
- cpumask_clear(&p->sharing);
- spin_lock_init(&p->lock);
- p->q_type = q_type;
- INIT_LIST_HEAD(&p->jobs);
- list_add(&p->list, list);
-
- err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
- if (err)
- return err;
-
- err = spu_queue_setup(p);
- if (err)
- return err;
-
- return spu_map_ino(dev, ip, iname, p, handler);
-}
-
-static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
- struct spu_mdesc_info *ip, struct list_head *list,
- const char *exec_name, unsigned long q_type,
- irq_handler_t handler, struct spu_queue **table)
-{
- int err = 0;
- u64 node;
-
- mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
- const char *type;
-
- type = mdesc_get_property(mdesc, node, "type", NULL);
- if (!type || strcmp(type, exec_name))
- continue;
-
- err = handle_exec_unit(ip, list, dev, mdesc, node,
- exec_name, q_type, handler, table);
- if (err) {
- spu_list_destroy(list);
- break;
- }
- }
-
- return err;
-}
-
-static int get_irq_props(struct mdesc_handle *mdesc, u64 node,
- struct spu_mdesc_info *ip)
-{
- const u64 *ino;
- int ino_len;
- int i;
-
- ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
- if (!ino) {
- printk("NO 'ino'\n");
- return -ENODEV;
- }
-
- ip->num_intrs = ino_len / sizeof(u64);
- ip->ino_table = kzalloc((sizeof(struct ino_blob) *
- ip->num_intrs),
- GFP_KERNEL);
- if (!ip->ino_table)
- return -ENOMEM;
-
- for (i = 0; i < ip->num_intrs; i++) {
- struct ino_blob *b = &ip->ino_table[i];
- b->intr = i + 1;
- b->ino = ino[i];
- }
-
- return 0;
-}
-
-static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
- struct platform_device *dev,
- struct spu_mdesc_info *ip,
- const char *node_name)
-{
- u64 node, reg;
-
- if (of_property_read_reg(dev->dev.of_node, 0, &reg, NULL) < 0)
- return -ENODEV;
-
- mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
- const char *name;
- const u64 *chdl;
-
- name = mdesc_get_property(mdesc, node, "name", NULL);
- if (!name || strcmp(name, node_name))
- continue;
- chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
- if (!chdl || (*chdl != reg))
- continue;
- ip->cfg_handle = *chdl;
- return get_irq_props(mdesc, node, ip);
- }
-
- return -ENODEV;
-}
-
-static unsigned long n2_spu_hvapi_major;
-static unsigned long n2_spu_hvapi_minor;
-
-static int n2_spu_hvapi_register(void)
-{
- int err;
-
- n2_spu_hvapi_major = 2;
- n2_spu_hvapi_minor = 0;
-
- err = sun4v_hvapi_register(HV_GRP_NCS,
- n2_spu_hvapi_major,
- &n2_spu_hvapi_minor);
-
- if (!err)
- pr_info("Registered NCS HVAPI version %lu.%lu\n",
- n2_spu_hvapi_major,
- n2_spu_hvapi_minor);
-
- return err;
-}
-
-static void n2_spu_hvapi_unregister(void)
-{
- sun4v_hvapi_unregister(HV_GRP_NCS);
-}
-
-static int global_ref;
-
-static int grab_global_resources(void)
-{
- int err = 0;
-
- mutex_lock(&spu_lock);
-
- if (global_ref++)
- goto out;
-
- err = n2_spu_hvapi_register();
- if (err)
- goto out;
-
- err = queue_cache_init();
- if (err)
- goto out_hvapi_release;
-
- err = -ENOMEM;
- cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
- GFP_KERNEL);
- if (!cpu_to_cwq)
- goto out_queue_cache_destroy;
-
- cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
- GFP_KERNEL);
- if (!cpu_to_mau)
- goto out_free_cwq_table;
-
- err = 0;
-
-out:
- if (err)
- global_ref--;
- mutex_unlock(&spu_lock);
- return err;
-
-out_free_cwq_table:
- kfree(cpu_to_cwq);
- cpu_to_cwq = NULL;
-
-out_queue_cache_destroy:
- queue_cache_destroy();
-
-out_hvapi_release:
- n2_spu_hvapi_unregister();
- goto out;
-}
-
-static void release_global_resources(void)
-{
- mutex_lock(&spu_lock);
- if (!--global_ref) {
- kfree(cpu_to_cwq);
- cpu_to_cwq = NULL;
-
- kfree(cpu_to_mau);
- cpu_to_mau = NULL;
-
- queue_cache_destroy();
- n2_spu_hvapi_unregister();
- }
- mutex_unlock(&spu_lock);
-}
-
-static struct n2_crypto *alloc_n2cp(void)
-{
- struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
-
- if (np)
- INIT_LIST_HEAD(&np->cwq_list);
-
- return np;
-}
-
-static void free_n2cp(struct n2_crypto *np)
-{
- kfree(np->cwq_info.ino_table);
- np->cwq_info.ino_table = NULL;
-
- kfree(np);
-}
-
-static void n2_spu_driver_version(void)
-{
- static int n2_spu_version_printed;
-
- if (n2_spu_version_printed++ == 0)
- pr_info("%s", version);
-}
-
-static int n2_crypto_probe(struct platform_device *dev)
-{
- struct mdesc_handle *mdesc;
- struct n2_crypto *np;
- int err;
-
- n2_spu_driver_version();
-
- pr_info("Found N2CP at %pOF\n", dev->dev.of_node);
-
- np = alloc_n2cp();
- if (!np) {
- dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n",
- dev->dev.of_node);
- return -ENOMEM;
- }
-
- err = grab_global_resources();
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
- dev->dev.of_node);
- goto out_free_n2cp;
- }
-
- mdesc = mdesc_grab();
-
- if (!mdesc) {
- dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
- dev->dev.of_node);
- err = -ENODEV;
- goto out_free_global;
- }
- err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
- dev->dev.of_node);
- mdesc_release(mdesc);
- goto out_free_global;
- }
-
- err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
- "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
- cpu_to_cwq);
- mdesc_release(mdesc);
-
- if (err) {
- dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n",
- dev->dev.of_node);
- goto out_free_global;
- }
-
- err = n2_register_algs();
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n",
- dev->dev.of_node);
- goto out_free_spu_list;
- }
-
- dev_set_drvdata(&dev->dev, np);
-
- return 0;
-
-out_free_spu_list:
- spu_list_destroy(&np->cwq_list);
-
-out_free_global:
- release_global_resources();
-
-out_free_n2cp:
- free_n2cp(np);
-
- return err;
-}
-
-static void n2_crypto_remove(struct platform_device *dev)
-{
- struct n2_crypto *np = dev_get_drvdata(&dev->dev);
-
- n2_unregister_algs();
-
- spu_list_destroy(&np->cwq_list);
-
- release_global_resources();
-
- free_n2cp(np);
-}
-
-static struct n2_mau *alloc_ncp(void)
-{
- struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
-
- if (mp)
- INIT_LIST_HEAD(&mp->mau_list);
-
- return mp;
-}
-
-static void free_ncp(struct n2_mau *mp)
-{
- kfree(mp->mau_info.ino_table);
- mp->mau_info.ino_table = NULL;
-
- kfree(mp);
-}
-
-static int n2_mau_probe(struct platform_device *dev)
-{
- struct mdesc_handle *mdesc;
- struct n2_mau *mp;
- int err;
-
- n2_spu_driver_version();
-
- pr_info("Found NCP at %pOF\n", dev->dev.of_node);
-
- mp = alloc_ncp();
- if (!mp) {
- dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n",
- dev->dev.of_node);
- return -ENOMEM;
- }
-
- err = grab_global_resources();
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
- dev->dev.of_node);
- goto out_free_ncp;
- }
-
- mdesc = mdesc_grab();
-
- if (!mdesc) {
- dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
- dev->dev.of_node);
- err = -ENODEV;
- goto out_free_global;
- }
-
- err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
- dev->dev.of_node);
- mdesc_release(mdesc);
- goto out_free_global;
- }
-
- err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
- "mau", HV_NCS_QTYPE_MAU, mau_intr,
- cpu_to_mau);
- mdesc_release(mdesc);
-
- if (err) {
- dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n",
- dev->dev.of_node);
- goto out_free_global;
- }
-
- dev_set_drvdata(&dev->dev, mp);
-
- return 0;
-
-out_free_global:
- release_global_resources();
-
-out_free_ncp:
- free_ncp(mp);
-
- return err;
-}
-
-static void n2_mau_remove(struct platform_device *dev)
-{
- struct n2_mau *mp = dev_get_drvdata(&dev->dev);
-
- spu_list_destroy(&mp->mau_list);
-
- release_global_resources();
-
- free_ncp(mp);
-}
-
-static const struct of_device_id n2_crypto_match[] = {
- {
- .name = "n2cp",
- .compatible = "SUNW,n2-cwq",
- },
- {
- .name = "n2cp",
- .compatible = "SUNW,vf-cwq",
- },
- {
- .name = "n2cp",
- .compatible = "SUNW,kt-cwq",
- },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, n2_crypto_match);
-
-static struct platform_driver n2_crypto_driver = {
- .driver = {
- .name = "n2cp",
- .of_match_table = n2_crypto_match,
- },
- .probe = n2_crypto_probe,
- .remove = n2_crypto_remove,
-};
-
-static const struct of_device_id n2_mau_match[] = {
- {
- .name = "ncp",
- .compatible = "SUNW,n2-mau",
- },
- {
- .name = "ncp",
- .compatible = "SUNW,vf-mau",
- },
- {
- .name = "ncp",
- .compatible = "SUNW,kt-mau",
- },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, n2_mau_match);
-
-static struct platform_driver n2_mau_driver = {
- .driver = {
- .name = "ncp",
- .of_match_table = n2_mau_match,
- },
- .probe = n2_mau_probe,
- .remove = n2_mau_remove,
-};
-
-static struct platform_driver * const drivers[] = {
- &n2_crypto_driver,
- &n2_mau_driver,
-};
-
-static int __init n2_init(void)
-{
- return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
-}
-
-static void __exit n2_exit(void)
-{
- platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
-}
-
-module_init(n2_init);
-module_exit(n2_exit);
diff --git a/drivers/crypto/n2_core.h b/drivers/crypto/n2_core.h
deleted file mode 100644
index 2406763b0306..000000000000
--- a/drivers/crypto/n2_core.h
+++ /dev/null
@@ -1,232 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _N2_CORE_H
-#define _N2_CORE_H
-
-#ifndef __ASSEMBLY__
-
-struct ino_blob {
- u64 intr;
- u64 ino;
-};
-
-struct spu_mdesc_info {
- u64 cfg_handle;
- struct ino_blob *ino_table;
- int num_intrs;
-};
-
-struct n2_crypto {
- struct spu_mdesc_info cwq_info;
- struct list_head cwq_list;
-};
-
-struct n2_mau {
- struct spu_mdesc_info mau_info;
- struct list_head mau_list;
-};
-
-#define CWQ_ENTRY_SIZE 64
-#define CWQ_NUM_ENTRIES 64
-
-#define MAU_ENTRY_SIZE 64
-#define MAU_NUM_ENTRIES 64
-
-struct cwq_initial_entry {
- u64 control;
- u64 src_addr;
- u64 auth_key_addr;
- u64 auth_iv_addr;
- u64 final_auth_state_addr;
- u64 enc_key_addr;
- u64 enc_iv_addr;
- u64 dest_addr;
-};
-
-struct cwq_ext_entry {
- u64 len;
- u64 src_addr;
- u64 resv1;
- u64 resv2;
- u64 resv3;
- u64 resv4;
- u64 resv5;
- u64 resv6;
-};
-
-struct cwq_final_entry {
- u64 control;
- u64 src_addr;
- u64 resv1;
- u64 resv2;
- u64 resv3;
- u64 resv4;
- u64 resv5;
- u64 resv6;
-};
-
-#define CONTROL_LEN 0x000000000000ffffULL
-#define CONTROL_LEN_SHIFT 0
-#define CONTROL_HMAC_KEY_LEN 0x0000000000ff0000ULL
-#define CONTROL_HMAC_KEY_LEN_SHIFT 16
-#define CONTROL_ENC_TYPE 0x00000000ff000000ULL
-#define CONTROL_ENC_TYPE_SHIFT 24
-#define ENC_TYPE_ALG_RC4_STREAM 0x00ULL
-#define ENC_TYPE_ALG_RC4_NOSTREAM 0x04ULL
-#define ENC_TYPE_ALG_DES 0x08ULL
-#define ENC_TYPE_ALG_3DES 0x0cULL
-#define ENC_TYPE_ALG_AES128 0x10ULL
-#define ENC_TYPE_ALG_AES192 0x14ULL
-#define ENC_TYPE_ALG_AES256 0x18ULL
-#define ENC_TYPE_ALG_RESERVED 0x1cULL
-#define ENC_TYPE_ALG_MASK 0x1cULL
-#define ENC_TYPE_CHAINING_ECB 0x00ULL
-#define ENC_TYPE_CHAINING_CBC 0x01ULL
-#define ENC_TYPE_CHAINING_CFB 0x02ULL
-#define ENC_TYPE_CHAINING_COUNTER 0x03ULL
-#define ENC_TYPE_CHAINING_MASK 0x03ULL
-#define CONTROL_AUTH_TYPE 0x0000001f00000000ULL
-#define CONTROL_AUTH_TYPE_SHIFT 32
-#define AUTH_TYPE_RESERVED 0x00ULL
-#define AUTH_TYPE_MD5 0x01ULL
-#define AUTH_TYPE_SHA1 0x02ULL
-#define AUTH_TYPE_SHA256 0x03ULL
-#define AUTH_TYPE_CRC32 0x04ULL
-#define AUTH_TYPE_HMAC_MD5 0x05ULL
-#define AUTH_TYPE_HMAC_SHA1 0x06ULL
-#define AUTH_TYPE_HMAC_SHA256 0x07ULL
-#define AUTH_TYPE_TCP_CHECKSUM 0x08ULL
-#define AUTH_TYPE_SSL_HMAC_MD5 0x09ULL
-#define AUTH_TYPE_SSL_HMAC_SHA1 0x0aULL
-#define AUTH_TYPE_SSL_HMAC_SHA256 0x0bULL
-#define CONTROL_STRAND 0x000000e000000000ULL
-#define CONTROL_STRAND_SHIFT 37
-#define CONTROL_HASH_LEN 0x0000ff0000000000ULL
-#define CONTROL_HASH_LEN_SHIFT 40
-#define CONTROL_INTERRUPT 0x0001000000000000ULL
-#define CONTROL_STORE_FINAL_AUTH_STATE 0x0002000000000000ULL
-#define CONTROL_RESERVED 0x001c000000000000ULL
-#define CONTROL_HV_DONE 0x0004000000000000ULL
-#define CONTROL_HV_PROTOCOL_ERROR 0x0008000000000000ULL
-#define CONTROL_HV_HARDWARE_ERROR 0x0010000000000000ULL
-#define CONTROL_END_OF_BLOCK 0x0020000000000000ULL
-#define CONTROL_START_OF_BLOCK 0x0040000000000000ULL
-#define CONTROL_ENCRYPT 0x0080000000000000ULL
-#define CONTROL_OPCODE 0xff00000000000000ULL
-#define CONTROL_OPCODE_SHIFT 56
-#define OPCODE_INPLACE_BIT 0x80ULL
-#define OPCODE_SSL_KEYBLOCK 0x10ULL
-#define OPCODE_COPY 0x20ULL
-#define OPCODE_ENCRYPT 0x40ULL
-#define OPCODE_AUTH_MAC 0x41ULL
-
-#endif /* !(__ASSEMBLY__) */
-
-/* NCS v2.0 hypervisor interfaces */
-#define HV_NCS_QTYPE_MAU 0x01
-#define HV_NCS_QTYPE_CWQ 0x02
-
-/* ncs_qconf()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_QCONF
- * ARG0: Queue type (HV_NCS_QTYPE_{MAU,CWQ})
- * ARG1: Real address of queue, or handle for unconfigure
- * ARG2: Number of entries in queue, zero for unconfigure
- * RET0: status
- * RET1: queue handle
- *
- * Configure a queue in the stream processing unit.
- *
- * The real address given as the base must be 64-byte
- * aligned.
- *
- * The queue size can range from a minimum of 2 to a maximum
- * of 64. The queue size must be a power of two.
- *
- * To unconfigure a queue, specify a length of zero and place
- * the queue handle into ARG1.
- *
- * On configure success the hypervisor will set the FIRST, HEAD,
- * and TAIL registers to the address of the first entry in the
- * queue. The LAST register will be set to point to the last
- * entry in the queue.
- */
-#define HV_FAST_NCS_QCONF 0x111
-
-/* ncs_qinfo()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_QINFO
- * ARG0: Queue handle
- * RET0: status
- * RET1: Queue type (HV_NCS_QTYPE_{MAU,CWQ})
- * RET2: Queue base address
- * RET3: Number of entries
- */
-#define HV_FAST_NCS_QINFO 0x112
-
-/* ncs_gethead()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_GETHEAD
- * ARG0: Queue handle
- * RET0: status
- * RET1: queue head offset
- */
-#define HV_FAST_NCS_GETHEAD 0x113
-
-/* ncs_gettail()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_GETTAIL
- * ARG0: Queue handle
- * RET0: status
- * RET1: queue tail offset
- */
-#define HV_FAST_NCS_GETTAIL 0x114
-
-/* ncs_settail()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_SETTAIL
- * ARG0: Queue handle
- * ARG1: New tail offset
- * RET0: status
- */
-#define HV_FAST_NCS_SETTAIL 0x115
-
-/* ncs_qhandle_to_devino()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_QHANDLE_TO_DEVINO
- * ARG0: Queue handle
- * RET0: status
- * RET1: devino
- */
-#define HV_FAST_NCS_QHANDLE_TO_DEVINO 0x116
-
-/* ncs_sethead_marker()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_SETHEAD_MARKER
- * ARG0: Queue handle
- * ARG1: New head offset
- * RET0: status
- */
-#define HV_FAST_NCS_SETHEAD_MARKER 0x117
-
-#ifndef __ASSEMBLY__
-extern unsigned long sun4v_ncs_qconf(unsigned long queue_type,
- unsigned long queue_ra,
- unsigned long num_entries,
- unsigned long *qhandle);
-extern unsigned long sun4v_ncs_qinfo(unsigned long qhandle,
- unsigned long *queue_type,
- unsigned long *queue_ra,
- unsigned long *num_entries);
-extern unsigned long sun4v_ncs_gethead(unsigned long qhandle,
- unsigned long *head);
-extern unsigned long sun4v_ncs_gettail(unsigned long qhandle,
- unsigned long *tail);
-extern unsigned long sun4v_ncs_settail(unsigned long qhandle,
- unsigned long tail);
-extern unsigned long sun4v_ncs_qhandle_to_devino(unsigned long qhandle,
- unsigned long *devino);
-extern unsigned long sun4v_ncs_sethead_marker(unsigned long qhandle,
- unsigned long head);
-#endif /* !(__ASSEMBLY__) */
-
-#endif /* _N2_CORE_H */
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index e27b84616743..551dd32a8db0 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -18,7 +18,6 @@
#include <crypto/internal/aead.h>
#include <crypto/internal/engine.h>
#include <crypto/internal/skcipher.h>
-#include <crypto/scatterwalk.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
@@ -272,9 +271,9 @@ static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
int ret;
if (dd->pio_only) {
- scatterwalk_start(&dd->in_walk, dd->in_sg);
+ dd->in_sg_offset = 0;
if (out_sg_len)
- scatterwalk_start(&dd->out_walk, dd->out_sg);
+ dd->out_sg_offset = 0;
/* Enable DATAIN interrupt and let it take
care of the rest */
@@ -871,21 +870,18 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
BUG_ON(!dd->in_sg);
- BUG_ON(_calc_walked(in) > dd->in_sg->length);
+ BUG_ON(dd->in_sg_offset > dd->in_sg->length);
- src = sg_virt(dd->in_sg) + _calc_walked(in);
+ src = sg_virt(dd->in_sg) + dd->in_sg_offset;
for (i = 0; i < AES_BLOCK_WORDS; i++) {
omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src);
-
- scatterwalk_advance(&dd->in_walk, 4);
- if (dd->in_sg->length == _calc_walked(in)) {
+ dd->in_sg_offset += 4;
+ if (dd->in_sg_offset == dd->in_sg->length) {
dd->in_sg = sg_next(dd->in_sg);
if (dd->in_sg) {
- scatterwalk_start(&dd->in_walk,
- dd->in_sg);
- src = sg_virt(dd->in_sg) +
- _calc_walked(in);
+ dd->in_sg_offset = 0;
+ src = sg_virt(dd->in_sg);
}
} else {
src++;
@@ -904,20 +900,18 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
BUG_ON(!dd->out_sg);
- BUG_ON(_calc_walked(out) > dd->out_sg->length);
+ BUG_ON(dd->out_sg_offset > dd->out_sg->length);
- dst = sg_virt(dd->out_sg) + _calc_walked(out);
+ dst = sg_virt(dd->out_sg) + dd->out_sg_offset;
for (i = 0; i < AES_BLOCK_WORDS; i++) {
*dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i));
- scatterwalk_advance(&dd->out_walk, 4);
- if (dd->out_sg->length == _calc_walked(out)) {
+ dd->out_sg_offset += 4;
+ if (dd->out_sg_offset == dd->out_sg->length) {
dd->out_sg = sg_next(dd->out_sg);
if (dd->out_sg) {
- scatterwalk_start(&dd->out_walk,
- dd->out_sg);
- dst = sg_virt(dd->out_sg) +
- _calc_walked(out);
+ dd->out_sg_offset = 0;
+ dst = sg_virt(dd->out_sg);
}
} else {
dst++;
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 0f35c9164764..41d67780fd45 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -14,8 +14,6 @@
#define DST_MAXBURST 4
#define DMA_MIN (DST_MAXBURST * sizeof(u32))
-#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
-
/*
* OMAP TRM gives bitfields as start:end, where start is the higher bit
* number. For example 7:0
@@ -186,8 +184,8 @@ struct omap_aes_dev {
struct scatterlist out_sgl;
struct scatterlist *orig_out;
- struct scatter_walk in_walk;
- struct scatter_walk out_walk;
+ unsigned int in_sg_offset;
+ unsigned int out_sg_offset;
struct dma_chan *dma_lch_in;
struct dma_chan *dma_lch_out;
int in_sg_len;
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 498cbd585ed1..a099460d5f21 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -19,7 +19,6 @@
#include <crypto/engine.h>
#include <crypto/internal/des.h>
#include <crypto/internal/skcipher.h>
-#include <crypto/scatterwalk.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
@@ -40,8 +39,6 @@
#define DES_BLOCK_WORDS (DES_BLOCK_SIZE >> 2)
-#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
-
#define DES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \
((x ^ 0x01) * 0x04))
@@ -152,8 +149,8 @@ struct omap_des_dev {
struct scatterlist out_sgl;
struct scatterlist *orig_out;
- struct scatter_walk in_walk;
- struct scatter_walk out_walk;
+ unsigned int in_sg_offset;
+ unsigned int out_sg_offset;
struct dma_chan *dma_lch_in;
struct dma_chan *dma_lch_out;
int in_sg_len;
@@ -379,8 +376,8 @@ static int omap_des_crypt_dma(struct crypto_tfm *tfm,
int ret;
if (dd->pio_only) {
- scatterwalk_start(&dd->in_walk, dd->in_sg);
- scatterwalk_start(&dd->out_walk, dd->out_sg);
+ dd->in_sg_offset = 0;
+ dd->out_sg_offset = 0;
/* Enable DATAIN interrupt and let it take
care of the rest */
@@ -836,21 +833,18 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id)
BUG_ON(!dd->in_sg);
- BUG_ON(_calc_walked(in) > dd->in_sg->length);
+ BUG_ON(dd->in_sg_offset > dd->in_sg->length);
- src = sg_virt(dd->in_sg) + _calc_walked(in);
+ src = sg_virt(dd->in_sg) + dd->in_sg_offset;
for (i = 0; i < DES_BLOCK_WORDS; i++) {
omap_des_write(dd, DES_REG_DATA_N(dd, i), *src);
-
- scatterwalk_advance(&dd->in_walk, 4);
- if (dd->in_sg->length == _calc_walked(in)) {
+ dd->in_sg_offset += 4;
+ if (dd->in_sg_offset == dd->in_sg->length) {
dd->in_sg = sg_next(dd->in_sg);
if (dd->in_sg) {
- scatterwalk_start(&dd->in_walk,
- dd->in_sg);
- src = sg_virt(dd->in_sg) +
- _calc_walked(in);
+ dd->in_sg_offset = 0;
+ src = sg_virt(dd->in_sg);
}
} else {
src++;
@@ -869,20 +863,18 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id)
BUG_ON(!dd->out_sg);
- BUG_ON(_calc_walked(out) > dd->out_sg->length);
+ BUG_ON(dd->out_sg_offset > dd->out_sg->length);
- dst = sg_virt(dd->out_sg) + _calc_walked(out);
+ dst = sg_virt(dd->out_sg) + dd->out_sg_offset;
for (i = 0; i < DES_BLOCK_WORDS; i++) {
*dst = omap_des_read(dd, DES_REG_DATA_N(dd, i));
- scatterwalk_advance(&dd->out_walk, 4);
- if (dd->out_sg->length == _calc_walked(out)) {
+ dd->out_sg_offset += 4;
+ if (dd->out_sg_offset == dd->out_sg->length) {
dd->out_sg = sg_next(dd->out_sg);
if (dd->out_sg) {
- scatterwalk_start(&dd->out_walk,
- dd->out_sg);
- dst = sg_virt(dd->out_sg) +
- _calc_walked(out);
+ dd->out_sg_offset = 0;
+ dst = sg_virt(dd->out_sg);
}
} else {
dst++;
diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
index 7d811728f047..97b56e92ea33 100644
--- a/drivers/crypto/qce/aead.c
+++ b/drivers/crypto/qce/aead.c
@@ -786,7 +786,7 @@ static int qce_aead_register_one(const struct qce_aead_def *def, struct qce_devi
alg->init = qce_aead_init;
alg->exit = qce_aead_exit;
- alg->base.cra_priority = 300;
+ alg->base.cra_priority = 275;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index e228a31fe28d..e95e84486d9a 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -3,14 +3,15 @@
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/clk.h>
+#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
-#include <linux/spinlock.h>
#include <linux/types.h>
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
@@ -37,9 +38,10 @@ static const struct qce_algo_ops *qce_ops[] = {
#endif
};
-static void qce_unregister_algs(struct qce_device *qce)
+static void qce_unregister_algs(void *data)
{
const struct qce_algo_ops *ops;
+ struct qce_device *qce = data;
int i;
for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
@@ -48,19 +50,22 @@ static void qce_unregister_algs(struct qce_device *qce)
}
}
-static int qce_register_algs(struct qce_device *qce)
+static int devm_qce_register_algs(struct qce_device *qce)
{
const struct qce_algo_ops *ops;
- int i, ret = -ENODEV;
+ int i, j, ret = -ENODEV;
for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
ops = qce_ops[i];
ret = ops->register_algs(qce);
- if (ret)
- break;
+ if (ret) {
+ for (j = i - 1; j >= 0; j--)
+ ops->unregister_algs(qce);
+ return ret;
+ }
}
- return ret;
+ return devm_add_action_or_reset(qce->dev, qce_unregister_algs, qce);
}
static int qce_handle_request(struct crypto_async_request *async_req)
@@ -84,55 +89,49 @@ static int qce_handle_queue(struct qce_device *qce,
struct crypto_async_request *req)
{
struct crypto_async_request *async_req, *backlog;
- unsigned long flags;
int ret = 0, err;
- spin_lock_irqsave(&qce->lock, flags);
+ scoped_guard(mutex, &qce->lock) {
+ if (req)
+ ret = crypto_enqueue_request(&qce->queue, req);
- if (req)
- ret = crypto_enqueue_request(&qce->queue, req);
+ /* busy, do not dequeue request */
+ if (qce->req)
+ return ret;
- /* busy, do not dequeue request */
- if (qce->req) {
- spin_unlock_irqrestore(&qce->lock, flags);
- return ret;
+ backlog = crypto_get_backlog(&qce->queue);
+ async_req = crypto_dequeue_request(&qce->queue);
+ if (async_req)
+ qce->req = async_req;
}
- backlog = crypto_get_backlog(&qce->queue);
- async_req = crypto_dequeue_request(&qce->queue);
- if (async_req)
- qce->req = async_req;
-
- spin_unlock_irqrestore(&qce->lock, flags);
-
if (!async_req)
return ret;
if (backlog) {
- spin_lock_bh(&qce->lock);
- crypto_request_complete(backlog, -EINPROGRESS);
- spin_unlock_bh(&qce->lock);
+ scoped_guard(mutex, &qce->lock)
+ crypto_request_complete(backlog, -EINPROGRESS);
}
err = qce_handle_request(async_req);
if (err) {
qce->result = err;
- tasklet_schedule(&qce->done_tasklet);
+ schedule_work(&qce->done_work);
}
return ret;
}
-static void qce_tasklet_req_done(unsigned long data)
+static void qce_req_done_work(struct work_struct *work)
{
- struct qce_device *qce = (struct qce_device *)data;
+ struct qce_device *qce = container_of(work, struct qce_device,
+ done_work);
struct crypto_async_request *req;
- unsigned long flags;
- spin_lock_irqsave(&qce->lock, flags);
- req = qce->req;
- qce->req = NULL;
- spin_unlock_irqrestore(&qce->lock, flags);
+ scoped_guard(mutex, &qce->lock) {
+ req = qce->req;
+ qce->req = NULL;
+ }
if (req)
crypto_request_complete(req, qce->result);
@@ -149,7 +148,7 @@ static int qce_async_request_enqueue(struct qce_device *qce,
static void qce_async_request_done(struct qce_device *qce, int ret)
{
qce->result = ret;
- tasklet_schedule(&qce->done_tasklet);
+ schedule_work(&qce->done_work);
}
static int qce_check_version(struct qce_device *qce)
@@ -209,15 +208,15 @@ static int qce_crypto_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- qce->core = devm_clk_get_optional(qce->dev, "core");
+ qce->core = devm_clk_get_optional_enabled(qce->dev, "core");
if (IS_ERR(qce->core))
return PTR_ERR(qce->core);
- qce->iface = devm_clk_get_optional(qce->dev, "iface");
+ qce->iface = devm_clk_get_optional_enabled(qce->dev, "iface");
if (IS_ERR(qce->iface))
return PTR_ERR(qce->iface);
- qce->bus = devm_clk_get_optional(qce->dev, "bus");
+ qce->bus = devm_clk_get_optional_enabled(qce->dev, "bus");
if (IS_ERR(qce->bus))
return PTR_ERR(qce->bus);
@@ -229,64 +228,25 @@ static int qce_crypto_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = clk_prepare_enable(qce->core);
+ ret = devm_qce_dma_request(qce->dev, &qce->dma);
if (ret)
- goto err_mem_path_disable;
-
- ret = clk_prepare_enable(qce->iface);
- if (ret)
- goto err_clks_core;
-
- ret = clk_prepare_enable(qce->bus);
- if (ret)
- goto err_clks_iface;
+ return ret;
- ret = qce_dma_request(qce->dev, &qce->dma);
+ ret = qce_check_version(qce);
if (ret)
- goto err_clks;
+ return ret;
- ret = qce_check_version(qce);
+ ret = devm_mutex_init(qce->dev, &qce->lock);
if (ret)
- goto err_clks;
+ return ret;
- spin_lock_init(&qce->lock);
- tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
- (unsigned long)qce);
+ INIT_WORK(&qce->done_work, qce_req_done_work);
crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
qce->async_req_enqueue = qce_async_request_enqueue;
qce->async_req_done = qce_async_request_done;
- ret = qce_register_algs(qce);
- if (ret)
- goto err_dma;
-
- return 0;
-
-err_dma:
- qce_dma_release(&qce->dma);
-err_clks:
- clk_disable_unprepare(qce->bus);
-err_clks_iface:
- clk_disable_unprepare(qce->iface);
-err_clks_core:
- clk_disable_unprepare(qce->core);
-err_mem_path_disable:
- icc_set_bw(qce->mem_path, 0, 0);
-
- return ret;
-}
-
-static void qce_crypto_remove(struct platform_device *pdev)
-{
- struct qce_device *qce = platform_get_drvdata(pdev);
-
- tasklet_kill(&qce->done_tasklet);
- qce_unregister_algs(qce);
- qce_dma_release(&qce->dma);
- clk_disable_unprepare(qce->bus);
- clk_disable_unprepare(qce->iface);
- clk_disable_unprepare(qce->core);
+ return devm_qce_register_algs(qce);
}
static const struct of_device_id qce_crypto_of_match[] = {
@@ -299,7 +259,6 @@ MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
static struct platform_driver qce_crypto_driver = {
.probe = qce_crypto_probe,
- .remove = qce_crypto_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = qce_crypto_of_match,
diff --git a/drivers/crypto/qce/core.h b/drivers/crypto/qce/core.h
index 228fcd69ec51..eb6fa7a8b64a 100644
--- a/drivers/crypto/qce/core.h
+++ b/drivers/crypto/qce/core.h
@@ -6,13 +6,16 @@
#ifndef _CORE_H_
#define _CORE_H_
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
#include "dma.h"
/**
* struct qce_device - crypto engine device structure
* @queue: crypto request queue
* @lock: the lock protects queue and req
- * @done_tasklet: done tasklet object
+ * @done_work: workqueue context
* @req: current active request
* @result: result of current transform
* @base: virtual IO base
@@ -28,8 +31,8 @@
*/
struct qce_device {
struct crypto_queue queue;
- spinlock_t lock;
- struct tasklet_struct done_tasklet;
+ struct mutex lock;
+ struct work_struct done_work;
struct crypto_async_request *req;
int result;
void __iomem *base;
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 46db5bf366b4..1dec7aea852d 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -3,12 +3,22 @@
* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*/
+#include <linux/device.h>
#include <linux/dmaengine.h>
#include <crypto/scatterwalk.h>
#include "dma.h"
-int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
+static void qce_dma_release(void *data)
+{
+ struct qce_dma_data *dma = data;
+
+ dma_release_channel(dma->txchan);
+ dma_release_channel(dma->rxchan);
+ kfree(dma->result_buf);
+}
+
+int devm_qce_dma_request(struct device *dev, struct qce_dma_data *dma)
{
int ret;
@@ -31,7 +41,8 @@ int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ;
- return 0;
+ return devm_add_action_or_reset(dev, qce_dma_release, dma);
+
error_nomem:
dma_release_channel(dma->rxchan);
error_rx:
@@ -39,13 +50,6 @@ error_rx:
return ret;
}
-void qce_dma_release(struct qce_dma_data *dma)
-{
- dma_release_channel(dma->txchan);
- dma_release_channel(dma->rxchan);
- kfree(dma->result_buf);
-}
-
struct scatterlist *
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl,
unsigned int max_len)
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
index 786402169360..31629185000e 100644
--- a/drivers/crypto/qce/dma.h
+++ b/drivers/crypto/qce/dma.h
@@ -34,8 +34,7 @@ struct qce_dma_data {
void *ignore_buf;
};
-int qce_dma_request(struct device *dev, struct qce_dma_data *dma);
-void qce_dma_release(struct qce_dma_data *dma);
+int devm_qce_dma_request(struct device *dev, struct qce_dma_data *dma);
int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
int in_ents, struct scatterlist *sg_out, int out_ents,
dma_async_tx_callback cb, void *cb_param);
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index fc72af8aa9a7..71b748183cfa 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -482,7 +482,7 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def,
base = &alg->halg.base;
base->cra_blocksize = def->blocksize;
- base->cra_priority = 300;
+ base->cra_priority = 175;
base->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
base->cra_ctxsize = sizeof(struct qce_sha_ctx);
base->cra_alignmask = 0;
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 5b493fdc1e74..ffb334eb5b34 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -461,7 +461,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
alg->encrypt = qce_skcipher_encrypt;
alg->decrypt = qce_skcipher_decrypt;
- alg->base.cra_priority = 300;
+ alg->base.cra_priority = 275;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY;
diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c
index 9d130592cc0a..d734c9a56786 100644
--- a/drivers/crypto/tegra/tegra-se-aes.c
+++ b/drivers/crypto/tegra/tegra-se-aes.c
@@ -1750,10 +1750,13 @@ static int tegra_cmac_digest(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+ int ret;
- tegra_cmac_init(req);
- rctx->task |= SHA_UPDATE | SHA_FINAL;
+ ret = tegra_cmac_init(req);
+ if (ret)
+ return ret;
+ rctx->task |= SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c
index 4d4bd727f498..0b5cdd5676b1 100644
--- a/drivers/crypto/tegra/tegra-se-hash.c
+++ b/drivers/crypto/tegra/tegra-se-hash.c
@@ -615,13 +615,16 @@ static int tegra_sha_digest(struct ahash_request *req)
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+ int ret;
if (ctx->fallback)
return tegra_sha_fallback_digest(req);
- tegra_sha_init(req);
- rctx->task |= SHA_UPDATE | SHA_FINAL;
+ ret = tegra_sha_init(req);
+ if (ret)
+ return ret;
+ rctx->task |= SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 28edd5822486..50e6a45b30ba 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -703,7 +703,7 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
return 0;
}
-static int commit_reap(struct device *dev, const void *data)
+static int commit_reap(struct device *dev, void *data)
{
struct cxl_port *port = to_cxl_port(dev->parent);
struct cxl_decoder *cxld;
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index 9d58ab9d33c5..013b869b66cb 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -252,9 +252,9 @@ static int devm_cxl_enable_mem(struct device *host, struct cxl_dev_state *cxlds)
}
/* require dvsec ranges to be covered by a locked platform window */
-static int dvsec_range_allowed(struct device *dev, void *arg)
+static int dvsec_range_allowed(struct device *dev, const void *arg)
{
- struct range *dev_range = arg;
+ const struct range *dev_range = arg;
struct cxl_decoder *cxld;
if (!is_root_decoder(dev))
@@ -291,11 +291,11 @@ static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm)
return devm_add_action_or_reset(host, disable_hdm, cxlhdm);
}
-int cxl_dvsec_rr_decode(struct device *dev, struct cxl_port *port,
+int cxl_dvsec_rr_decode(struct cxl_dev_state *cxlds,
struct cxl_endpoint_dvsec_info *info)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
+ struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+ struct device *dev = cxlds->dev;
int hdm_count, rc, i, ranges = 0;
int d = cxlds->cxl_dvsec;
u16 cap, ctrl;
diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
index b3378d3f6acb..8853415c106a 100644
--- a/drivers/cxl/core/pmem.c
+++ b/drivers/cxl/core/pmem.c
@@ -51,17 +51,6 @@ struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
}
EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm_bridge, "CXL");
-bool is_cxl_nvdimm_bridge(struct device *dev)
-{
- return dev->type == &cxl_nvdimm_bridge_type;
-}
-EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm_bridge, "CXL");
-
-static int match_nvdimm_bridge(struct device *dev, void *data)
-{
- return is_cxl_nvdimm_bridge(dev);
-}
-
/**
* cxl_find_nvdimm_bridge() - find a bridge device relative to a port
* @port: any descendant port of an nvdimm-bridge associated
@@ -75,7 +64,9 @@ struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port)
if (!cxl_root)
return NULL;
- dev = device_find_child(&cxl_root->port.dev, NULL, match_nvdimm_bridge);
+ dev = device_find_child(&cxl_root->port.dev,
+ &cxl_nvdimm_bridge_type,
+ device_match_type);
if (!dev)
return NULL;
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index b98b1ccffd1c..e8d11a988fd9 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -778,7 +778,7 @@ out:
return rc;
}
-static int check_commit_order(struct device *dev, const void *data)
+static int check_commit_order(struct device *dev, void *data)
{
struct cxl_decoder *cxld = to_cxl_decoder(dev);
@@ -792,7 +792,7 @@ static int check_commit_order(struct device *dev, const void *data)
return 0;
}
-static int match_free_decoder(struct device *dev, void *data)
+static int match_free_decoder(struct device *dev, const void *data)
{
struct cxl_port *port = to_cxl_port(dev->parent);
struct cxl_decoder *cxld;
@@ -824,9 +824,9 @@ static int match_free_decoder(struct device *dev, void *data)
return 1;
}
-static int match_auto_decoder(struct device *dev, void *data)
+static int match_auto_decoder(struct device *dev, const void *data)
{
- struct cxl_region_params *p = data;
+ const struct cxl_region_params *p = data;
struct cxl_decoder *cxld;
struct range *r;
@@ -1733,10 +1733,12 @@ static struct cxl_port *next_port(struct cxl_port *port)
return port->parent_dport->port;
}
-static int match_switch_decoder_by_range(struct device *dev, void *data)
+static int match_switch_decoder_by_range(struct device *dev,
+ const void *data)
{
struct cxl_switch_decoder *cxlsd;
- struct range *r1, *r2 = data;
+ const struct range *r1, *r2 = data;
+
if (!is_switch_decoder(dev))
return 0;
@@ -3187,9 +3189,10 @@ err:
return rc;
}
-static int match_root_decoder_by_range(struct device *dev, void *data)
+static int match_root_decoder_by_range(struct device *dev,
+ const void *data)
{
- struct range *r1, *r2 = data;
+ const struct range *r1, *r2 = data;
struct cxl_root_decoder *cxlrd;
if (!is_root_decoder(dev))
@@ -3200,11 +3203,11 @@ static int match_root_decoder_by_range(struct device *dev, void *data)
return range_contains(r1, r2);
}
-static int match_region_by_range(struct device *dev, void *data)
+static int match_region_by_range(struct device *dev, const void *data)
{
struct cxl_region_params *p;
struct cxl_region *cxlr;
- struct range *r = data;
+ const struct range *r = data;
int rc = 0;
if (!is_cxl_region(dev))
diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
index 59cb35b40c7e..117c2e94c761 100644
--- a/drivers/cxl/core/regs.c
+++ b/drivers/cxl/core/regs.c
@@ -289,21 +289,17 @@ static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
return true;
}
-/**
- * cxl_find_regblock_instance() - Locate a register block by type / index
- * @pdev: The CXL PCI device to enumerate.
- * @type: Register Block Indicator id
- * @map: Enumeration output, clobbered on error
- * @index: Index into which particular instance of a regblock wanted in the
- * order found in register locator DVSEC.
- *
- * Return: 0 if register block enumerated, negative error code otherwise
+/*
+ * __cxl_find_regblock_instance() - Locate a register block or count instances by type / index
+ * Use CXL_INSTANCES_COUNT for @index if counting instances.
*
- * A CXL DVSEC may point to one or more register blocks, search for them
- * by @type and @index.
+ * __cxl_find_regblock_instance() may return:
+ * 0 - if register block enumerated.
+ * >= 0 - if counting instances.
+ * < 0 - error code otherwise.
*/
-int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
- struct cxl_register_map *map, int index)
+static int __cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
+ struct cxl_register_map *map, int index)
{
u32 regloc_size, regblocks;
int instance = 0;
@@ -342,8 +338,30 @@ int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
}
map->resource = CXL_RESOURCE_NONE;
+ if (index == CXL_INSTANCES_COUNT)
+ return instance;
+
return -ENODEV;
}
+
+/**
+ * cxl_find_regblock_instance() - Locate a register block by type / index
+ * @pdev: The CXL PCI device to enumerate.
+ * @type: Register Block Indicator id
+ * @map: Enumeration output, clobbered on error
+ * @index: Index into which particular instance of a regblock wanted in the
+ * order found in register locator DVSEC.
+ *
+ * Return: 0 if register block enumerated, negative error code otherwise
+ *
+ * A CXL DVSEC may point to one or more register blocks, search for them
+ * by @type and @index.
+ */
+int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
+ struct cxl_register_map *map, unsigned int index)
+{
+ return __cxl_find_regblock_instance(pdev, type, map, index);
+}
EXPORT_SYMBOL_NS_GPL(cxl_find_regblock_instance, "CXL");
/**
@@ -360,7 +378,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_find_regblock_instance, "CXL");
int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type,
struct cxl_register_map *map)
{
- return cxl_find_regblock_instance(pdev, type, map, 0);
+ return __cxl_find_regblock_instance(pdev, type, map, 0);
}
EXPORT_SYMBOL_NS_GPL(cxl_find_regblock, "CXL");
@@ -371,19 +389,13 @@ EXPORT_SYMBOL_NS_GPL(cxl_find_regblock, "CXL");
*
* Some regblocks may be repeated. Count how many instances.
*
- * Return: count of matching regblocks.
+ * Return: non-negative count of matching regblocks, negative error code otherwise.
*/
int cxl_count_regblock(struct pci_dev *pdev, enum cxl_regloc_type type)
{
struct cxl_register_map map;
- int rc, count = 0;
- while (1) {
- rc = cxl_find_regblock_instance(pdev, type, &map, count);
- if (rc)
- return count;
- count++;
- }
+ return __cxl_find_regblock_instance(pdev, type, &map, CXL_INSTANCES_COUNT);
}
EXPORT_SYMBOL_NS_GPL(cxl_count_regblock, "CXL");
diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
index 8389a94adb1a..cea706b683b5 100644
--- a/drivers/cxl/core/trace.h
+++ b/drivers/cxl/core/trace.h
@@ -166,11 +166,13 @@ TRACE_EVENT(cxl_overflow,
#define CXL_EVENT_RECORD_FLAG_MAINT_NEEDED BIT(3)
#define CXL_EVENT_RECORD_FLAG_PERF_DEGRADED BIT(4)
#define CXL_EVENT_RECORD_FLAG_HW_REPLACE BIT(5)
+#define CXL_EVENT_RECORD_FLAG_MAINT_OP_SUB_CLASS_VALID BIT(6)
#define show_hdr_flags(flags) __print_flags(flags, " | ", \
{ CXL_EVENT_RECORD_FLAG_PERMANENT, "PERMANENT_CONDITION" }, \
{ CXL_EVENT_RECORD_FLAG_MAINT_NEEDED, "MAINTENANCE_NEEDED" }, \
{ CXL_EVENT_RECORD_FLAG_PERF_DEGRADED, "PERFORMANCE_DEGRADED" }, \
- { CXL_EVENT_RECORD_FLAG_HW_REPLACE, "HARDWARE_REPLACEMENT_NEEDED" } \
+ { CXL_EVENT_RECORD_FLAG_HW_REPLACE, "HARDWARE_REPLACEMENT_NEEDED" }, \
+ { CXL_EVENT_RECORD_FLAG_MAINT_OP_SUB_CLASS_VALID, "MAINT_OP_SUB_CLASS_VALID" } \
)
/*
@@ -197,7 +199,8 @@ TRACE_EVENT(cxl_overflow,
__field(u16, hdr_related_handle) \
__field(u64, hdr_timestamp) \
__field(u8, hdr_length) \
- __field(u8, hdr_maint_op_class)
+ __field(u8, hdr_maint_op_class) \
+ __field(u8, hdr_maint_op_sub_class)
#define CXL_EVT_TP_fast_assign(cxlmd, l, hdr) \
__assign_str(memdev); \
@@ -209,17 +212,19 @@ TRACE_EVENT(cxl_overflow,
__entry->hdr_handle = le16_to_cpu((hdr).handle); \
__entry->hdr_related_handle = le16_to_cpu((hdr).related_handle); \
__entry->hdr_timestamp = le64_to_cpu((hdr).timestamp); \
- __entry->hdr_maint_op_class = (hdr).maint_op_class
+ __entry->hdr_maint_op_class = (hdr).maint_op_class; \
+ __entry->hdr_maint_op_sub_class = (hdr).maint_op_sub_class
#define CXL_EVT_TP_printk(fmt, ...) \
TP_printk("memdev=%s host=%s serial=%lld log=%s : time=%llu uuid=%pUb " \
"len=%d flags='%s' handle=%x related_handle=%x " \
- "maint_op_class=%u : " fmt, \
+ "maint_op_class=%u maint_op_sub_class=%u : " fmt, \
__get_str(memdev), __get_str(host), __entry->serial, \
cxl_event_log_type_str(__entry->log), \
__entry->hdr_timestamp, &__entry->hdr_uuid, __entry->hdr_length,\
show_hdr_flags(__entry->hdr_flags), __entry->hdr_handle, \
__entry->hdr_related_handle, __entry->hdr_maint_op_class, \
+ __entry->hdr_maint_op_sub_class, \
##__VA_ARGS__)
TRACE_EVENT(cxl_generic_event,
@@ -264,8 +269,30 @@ TRACE_EVENT(cxl_generic_event,
)
/*
+ * Component ID Format
+ * CXL 3.1 section 8.2.9.2.1; Table 8-44
+ */
+#define CXL_PLDM_COMPONENT_ID_ENTITY_VALID BIT(0)
+#define CXL_PLDM_COMPONENT_ID_RES_VALID BIT(1)
+
+#define show_comp_id_pldm_flags(flags) __print_flags(flags, " | ", \
+ { CXL_PLDM_COMPONENT_ID_ENTITY_VALID, "PLDM Entity ID" }, \
+ { CXL_PLDM_COMPONENT_ID_RES_VALID, "Resource ID" } \
+)
+
+#define show_pldm_entity_id(flags, valid_comp_id, valid_id_format, comp_id) \
+ (flags & valid_comp_id && flags & valid_id_format) ? \
+ (comp_id[0] & CXL_PLDM_COMPONENT_ID_ENTITY_VALID) ? \
+ __print_hex(&comp_id[1], 6) : "0x00" : "0x00"
+
+#define show_pldm_resource_id(flags, valid_comp_id, valid_id_format, comp_id) \
+ (flags & valid_comp_id && flags & valid_id_format) ? \
+ (comp_id[0] & CXL_PLDM_COMPONENT_ID_RES_VALID) ? \
+ __print_hex(&comp_id[7], 4) : "0x00" : "0x00"
+
+/*
* General Media Event Record - GMER
- * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
+ * CXL rev 3.1 Section 8.2.9.2.1.1; Table 8-45
*/
#define CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT BIT(0)
#define CXL_GMER_EVT_DESC_THRESHOLD_EVENT BIT(1)
@@ -279,10 +306,18 @@ TRACE_EVENT(cxl_generic_event,
#define CXL_GMER_MEM_EVT_TYPE_ECC_ERROR 0x00
#define CXL_GMER_MEM_EVT_TYPE_INV_ADDR 0x01
#define CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR 0x02
-#define show_gmer_mem_event_type(type) __print_symbolic(type, \
- { CXL_GMER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \
- { CXL_GMER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \
- { CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" } \
+#define CXL_GMER_MEM_EVT_TYPE_TE_STATE_VIOLATION 0x03
+#define CXL_GMER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR 0x04
+#define CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE 0x05
+#define CXL_GMER_MEM_EVT_TYPE_CKID_VIOLATION 0x06
+#define show_gmer_mem_event_type(type) __print_symbolic(type, \
+ { CXL_GMER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \
+ { CXL_GMER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \
+ { CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" }, \
+ { CXL_GMER_MEM_EVT_TYPE_TE_STATE_VIOLATION, "TE State Violation" }, \
+ { CXL_GMER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR, "Scrub Media ECC Error" }, \
+ { CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE, "Adv Prog CME Counter Expiration" }, \
+ { CXL_GMER_MEM_EVT_TYPE_CKID_VIOLATION, "CKID Violation" } \
)
#define CXL_GMER_TRANS_UNKNOWN 0x00
@@ -292,6 +327,8 @@ TRACE_EVENT(cxl_generic_event,
#define CXL_GMER_TRANS_HOST_INJECT_POISON 0x04
#define CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB 0x05
#define CXL_GMER_TRANS_INTERNAL_MEDIA_MANAGEMENT 0x06
+#define CXL_GMER_TRANS_INTERNAL_MEDIA_ECS 0x07
+#define CXL_GMER_TRANS_MEDIA_INITIALIZATION 0x08
#define show_trans_type(type) __print_symbolic(type, \
{ CXL_GMER_TRANS_UNKNOWN, "Unknown" }, \
{ CXL_GMER_TRANS_HOST_READ, "Host Read" }, \
@@ -299,18 +336,57 @@ TRACE_EVENT(cxl_generic_event,
{ CXL_GMER_TRANS_HOST_SCAN_MEDIA, "Host Scan Media" }, \
{ CXL_GMER_TRANS_HOST_INJECT_POISON, "Host Inject Poison" }, \
{ CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB, "Internal Media Scrub" }, \
- { CXL_GMER_TRANS_INTERNAL_MEDIA_MANAGEMENT, "Internal Media Management" } \
+ { CXL_GMER_TRANS_INTERNAL_MEDIA_MANAGEMENT, "Internal Media Management" }, \
+ { CXL_GMER_TRANS_INTERNAL_MEDIA_ECS, "Internal Media Error Check Scrub" }, \
+ { CXL_GMER_TRANS_MEDIA_INITIALIZATION, "Media Initialization" } \
)
#define CXL_GMER_VALID_CHANNEL BIT(0)
#define CXL_GMER_VALID_RANK BIT(1)
#define CXL_GMER_VALID_DEVICE BIT(2)
#define CXL_GMER_VALID_COMPONENT BIT(3)
+#define CXL_GMER_VALID_COMPONENT_ID_FORMAT BIT(4)
#define show_valid_flags(flags) __print_flags(flags, "|", \
{ CXL_GMER_VALID_CHANNEL, "CHANNEL" }, \
{ CXL_GMER_VALID_RANK, "RANK" }, \
{ CXL_GMER_VALID_DEVICE, "DEVICE" }, \
- { CXL_GMER_VALID_COMPONENT, "COMPONENT" } \
+ { CXL_GMER_VALID_COMPONENT, "COMPONENT" }, \
+ { CXL_GMER_VALID_COMPONENT_ID_FORMAT, "COMPONENT PLDM FORMAT" } \
+)
+
+#define CXL_GMER_CME_EV_FLAG_CME_MULTIPLE_MEDIA BIT(0)
+#define CXL_GMER_CME_EV_FLAG_THRESHOLD_EXCEEDED BIT(1)
+#define show_cme_threshold_ev_flags(flags) __print_flags(flags, "|", \
+ { \
+ CXL_GMER_CME_EV_FLAG_CME_MULTIPLE_MEDIA, \
+ "Corrected Memory Errors in Multiple Media Components" \
+ }, { \
+ CXL_GMER_CME_EV_FLAG_THRESHOLD_EXCEEDED, \
+ "Exceeded Programmable Threshold" \
+ } \
+)
+
+#define CXL_GMER_MEM_EVT_SUB_TYPE_NOT_REPORTED 0x00
+#define CXL_GMER_MEM_EVT_SUB_TYPE_INTERNAL_DATAPATH_ERROR 0x01
+#define CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_COMMAND_TRAINING_ERROR 0x02
+#define CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_CONTROL_TRAINING_ERROR 0x03
+#define CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_DATA_TRAINING_ERROR 0x04
+#define CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_CRC_ERROR 0x05
+#define show_mem_event_sub_type(sub_type) __print_symbolic(sub_type, \
+ { CXL_GMER_MEM_EVT_SUB_TYPE_NOT_REPORTED, "Not Reported" }, \
+ { CXL_GMER_MEM_EVT_SUB_TYPE_INTERNAL_DATAPATH_ERROR, "Internal Datapath Error" }, \
+ { \
+ CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_COMMAND_TRAINING_ERROR, \
+ "Media Link Command Training Error" \
+ }, { \
+ CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_CONTROL_TRAINING_ERROR, \
+ "Media Link Control Training Error" \
+ }, { \
+ CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_DATA_TRAINING_ERROR, \
+ "Media Link Data Training Error" \
+ }, { \
+ CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_CRC_ERROR, "Media Link CRC Error" \
+ } \
)
TRACE_EVENT(cxl_general_media,
@@ -336,6 +412,9 @@ TRACE_EVENT(cxl_general_media,
__field(u16, validity_flags)
__field(u8, rank)
__field(u8, dpa_flags)
+ __field(u32, cme_count)
+ __field(u8, sub_type)
+ __field(u8, cme_threshold_ev_flags)
__string(region_name, cxlr ? dev_name(&cxlr->dev) : "")
),
@@ -350,6 +429,7 @@ TRACE_EVENT(cxl_general_media,
__entry->dpa &= CXL_DPA_MASK;
__entry->descriptor = rec->media_hdr.descriptor;
__entry->type = rec->media_hdr.type;
+ __entry->sub_type = rec->sub_type;
__entry->transaction_type = rec->media_hdr.transaction_type;
__entry->channel = rec->media_hdr.channel;
__entry->rank = rec->media_hdr.rank;
@@ -365,27 +445,40 @@ TRACE_EVENT(cxl_general_media,
__assign_str(region_name);
uuid_copy(&__entry->region_uuid, &uuid_null);
}
+ __entry->cme_threshold_ev_flags = rec->cme_threshold_ev_flags;
+ __entry->cme_count = get_unaligned_le24(rec->cme_count);
),
CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' " \
- "descriptor='%s' type='%s' transaction_type='%s' channel=%u rank=%u " \
- "device=%x comp_id=%s validity_flags='%s' " \
- "hpa=%llx region=%s region_uuid=%pUb",
+ "descriptor='%s' type='%s' sub_type='%s' " \
+ "transaction_type='%s' channel=%u rank=%u " \
+ "device=%x validity_flags='%s' " \
+ "comp_id=%s comp_id_pldm_valid_flags='%s' " \
+ "pldm_entity_id=%s pldm_resource_id=%s " \
+ "hpa=%llx region=%s region_uuid=%pUb " \
+ "cme_threshold_ev_flags='%s' cme_count=%u",
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
show_event_desc_flags(__entry->descriptor),
show_gmer_mem_event_type(__entry->type),
+ show_mem_event_sub_type(__entry->sub_type),
show_trans_type(__entry->transaction_type),
__entry->channel, __entry->rank, __entry->device,
- __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
show_valid_flags(__entry->validity_flags),
- __entry->hpa, __get_str(region_name), &__entry->region_uuid
+ __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
+ show_comp_id_pldm_flags(__entry->comp_id[0]),
+ show_pldm_entity_id(__entry->validity_flags, CXL_GMER_VALID_COMPONENT,
+ CXL_GMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
+ show_pldm_resource_id(__entry->validity_flags, CXL_GMER_VALID_COMPONENT,
+ CXL_GMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
+ __entry->hpa, __get_str(region_name), &__entry->region_uuid,
+ show_cme_threshold_ev_flags(__entry->cme_threshold_ev_flags), __entry->cme_count
)
);
/*
* DRAM Event Record - DER
*
- * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
+ * CXL rev 3.1 section 8.2.9.2.1.2; Table 8-46
*/
/*
* DRAM Event Record defines many fields the same as the General Media Event
@@ -395,11 +488,17 @@ TRACE_EVENT(cxl_general_media,
#define CXL_DER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR 0x01
#define CXL_DER_MEM_EVT_TYPE_INV_ADDR 0x02
#define CXL_DER_MEM_EVT_TYPE_DATA_PATH_ERROR 0x03
-#define show_dram_mem_event_type(type) __print_symbolic(type, \
- { CXL_DER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \
- { CXL_DER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR, "Scrub Media ECC Error" }, \
- { CXL_DER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \
- { CXL_DER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" } \
+#define CXL_DER_MEM_EVT_TYPE_TE_STATE_VIOLATION 0x04
+#define CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE 0x05
+#define CXL_DER_MEM_EVT_TYPE_CKID_VIOLATION 0x06
+#define show_dram_mem_event_type(type) __print_symbolic(type, \
+ { CXL_DER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \
+ { CXL_DER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR, "Scrub Media ECC Error" }, \
+ { CXL_DER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \
+ { CXL_DER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" }, \
+ { CXL_DER_MEM_EVT_TYPE_TE_STATE_VIOLATION, "TE State Violation" }, \
+ { CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE, "Adv Prog CME Counter Expiration" }, \
+ { CXL_DER_MEM_EVT_TYPE_CKID_VIOLATION, "CKID Violation" } \
)
#define CXL_DER_VALID_CHANNEL BIT(0)
@@ -410,15 +509,21 @@ TRACE_EVENT(cxl_general_media,
#define CXL_DER_VALID_ROW BIT(5)
#define CXL_DER_VALID_COLUMN BIT(6)
#define CXL_DER_VALID_CORRECTION_MASK BIT(7)
-#define show_dram_valid_flags(flags) __print_flags(flags, "|", \
- { CXL_DER_VALID_CHANNEL, "CHANNEL" }, \
- { CXL_DER_VALID_RANK, "RANK" }, \
- { CXL_DER_VALID_NIBBLE, "NIBBLE" }, \
- { CXL_DER_VALID_BANK_GROUP, "BANK GROUP" }, \
- { CXL_DER_VALID_BANK, "BANK" }, \
- { CXL_DER_VALID_ROW, "ROW" }, \
- { CXL_DER_VALID_COLUMN, "COLUMN" }, \
- { CXL_DER_VALID_CORRECTION_MASK, "CORRECTION MASK" } \
+#define CXL_DER_VALID_COMPONENT BIT(8)
+#define CXL_DER_VALID_COMPONENT_ID_FORMAT BIT(9)
+#define CXL_DER_VALID_SUB_CHANNEL BIT(10)
+#define show_dram_valid_flags(flags) __print_flags(flags, "|", \
+ { CXL_DER_VALID_CHANNEL, "CHANNEL" }, \
+ { CXL_DER_VALID_RANK, "RANK" }, \
+ { CXL_DER_VALID_NIBBLE, "NIBBLE" }, \
+ { CXL_DER_VALID_BANK_GROUP, "BANK GROUP" }, \
+ { CXL_DER_VALID_BANK, "BANK" }, \
+ { CXL_DER_VALID_ROW, "ROW" }, \
+ { CXL_DER_VALID_COLUMN, "COLUMN" }, \
+ { CXL_DER_VALID_CORRECTION_MASK, "CORRECTION MASK" }, \
+ { CXL_DER_VALID_COMPONENT, "COMPONENT" }, \
+ { CXL_DER_VALID_COMPONENT_ID_FORMAT, "COMPONENT PLDM FORMAT" }, \
+ { CXL_DER_VALID_SUB_CHANNEL, "SUB CHANNEL" } \
)
TRACE_EVENT(cxl_dram,
@@ -447,6 +552,12 @@ TRACE_EVENT(cxl_dram,
__field(u8, bank_group) /* Out of order to pack trace record */
__field(u8, bank) /* Out of order to pack trace record */
__field(u8, dpa_flags) /* Out of order to pack trace record */
+ /* Following are out of order to pack trace record */
+ __array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE)
+ __field(u32, cvme_count)
+ __field(u8, sub_type)
+ __field(u8, sub_channel)
+ __field(u8, cme_threshold_ev_flags)
__string(region_name, cxlr ? dev_name(&cxlr->dev) : "")
),
@@ -460,6 +571,7 @@ TRACE_EVENT(cxl_dram,
__entry->dpa &= CXL_DPA_MASK;
__entry->descriptor = rec->media_hdr.descriptor;
__entry->type = rec->media_hdr.type;
+ __entry->sub_type = rec->sub_type;
__entry->transaction_type = rec->media_hdr.transaction_type;
__entry->validity_flags = get_unaligned_le16(rec->media_hdr.validity_flags);
__entry->channel = rec->media_hdr.channel;
@@ -479,30 +591,47 @@ TRACE_EVENT(cxl_dram,
__assign_str(region_name);
uuid_copy(&__entry->region_uuid, &uuid_null);
}
+ memcpy(__entry->comp_id, &rec->component_id,
+ CXL_EVENT_GEN_MED_COMP_ID_SIZE);
+ __entry->sub_channel = rec->sub_channel;
+ __entry->cme_threshold_ev_flags = rec->cme_threshold_ev_flags;
+ __entry->cvme_count = get_unaligned_le24(rec->cvme_count);
),
- CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' " \
+ CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' sub_type='%s' " \
"transaction_type='%s' channel=%u rank=%u nibble_mask=%x " \
"bank_group=%u bank=%u row=%u column=%u cor_mask=%s " \
"validity_flags='%s' " \
- "hpa=%llx region=%s region_uuid=%pUb",
+ "comp_id=%s comp_id_pldm_valid_flags='%s' " \
+ "pldm_entity_id=%s pldm_resource_id=%s " \
+ "hpa=%llx region=%s region_uuid=%pUb " \
+ "sub_channel=%u cme_threshold_ev_flags='%s' cvme_count=%u",
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
show_event_desc_flags(__entry->descriptor),
show_dram_mem_event_type(__entry->type),
+ show_mem_event_sub_type(__entry->sub_type),
show_trans_type(__entry->transaction_type),
__entry->channel, __entry->rank, __entry->nibble_mask,
__entry->bank_group, __entry->bank,
__entry->row, __entry->column,
__print_hex(__entry->cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE),
show_dram_valid_flags(__entry->validity_flags),
- __entry->hpa, __get_str(region_name), &__entry->region_uuid
+ __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
+ show_comp_id_pldm_flags(__entry->comp_id[0]),
+ show_pldm_entity_id(__entry->validity_flags, CXL_DER_VALID_COMPONENT,
+ CXL_DER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
+ show_pldm_resource_id(__entry->validity_flags, CXL_DER_VALID_COMPONENT,
+ CXL_DER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
+ __entry->hpa, __get_str(region_name), &__entry->region_uuid,
+ __entry->sub_channel, show_cme_threshold_ev_flags(__entry->cme_threshold_ev_flags),
+ __entry->cvme_count
)
);
/*
* Memory Module Event Record - MMER
*
- * CXL res 3.0 section 8.2.9.2.1.3; Table 8-45
+ * CXL res 3.1 section 8.2.9.2.1.3; Table 8-47
*/
#define CXL_MMER_HEALTH_STATUS_CHANGE 0x00
#define CXL_MMER_MEDIA_STATUS_CHANGE 0x01
@@ -510,27 +639,35 @@ TRACE_EVENT(cxl_dram,
#define CXL_MMER_TEMP_CHANGE 0x03
#define CXL_MMER_DATA_PATH_ERROR 0x04
#define CXL_MMER_LSA_ERROR 0x05
+#define CXL_MMER_UNRECOV_SIDEBAND_BUS_ERROR 0x06
+#define CXL_MMER_MEMORY_MEDIA_FRU_ERROR 0x07
+#define CXL_MMER_POWER_MANAGEMENT_FAULT 0x08
#define show_dev_evt_type(type) __print_symbolic(type, \
{ CXL_MMER_HEALTH_STATUS_CHANGE, "Health Status Change" }, \
{ CXL_MMER_MEDIA_STATUS_CHANGE, "Media Status Change" }, \
{ CXL_MMER_LIFE_USED_CHANGE, "Life Used Change" }, \
{ CXL_MMER_TEMP_CHANGE, "Temperature Change" }, \
{ CXL_MMER_DATA_PATH_ERROR, "Data Path Error" }, \
- { CXL_MMER_LSA_ERROR, "LSA Error" } \
+ { CXL_MMER_LSA_ERROR, "LSA Error" }, \
+ { CXL_MMER_UNRECOV_SIDEBAND_BUS_ERROR, "Unrecoverable Internal Sideband Bus Error" }, \
+ { CXL_MMER_MEMORY_MEDIA_FRU_ERROR, "Memory Media FRU Error" }, \
+ { CXL_MMER_POWER_MANAGEMENT_FAULT, "Power Management Fault" } \
)
/*
* Device Health Information - DHI
*
- * CXL res 3.0 section 8.2.9.8.3.1; Table 8-100
+ * CXL res 3.1 section 8.2.9.9.3.1; Table 8-133
*/
#define CXL_DHI_HS_MAINTENANCE_NEEDED BIT(0)
#define CXL_DHI_HS_PERFORMANCE_DEGRADED BIT(1)
#define CXL_DHI_HS_HW_REPLACEMENT_NEEDED BIT(2)
+#define CXL_DHI_HS_MEM_CAPACITY_DEGRADED BIT(3)
#define show_health_status_flags(flags) __print_flags(flags, "|", \
{ CXL_DHI_HS_MAINTENANCE_NEEDED, "MAINTENANCE_NEEDED" }, \
{ CXL_DHI_HS_PERFORMANCE_DEGRADED, "PERFORMANCE_DEGRADED" }, \
- { CXL_DHI_HS_HW_REPLACEMENT_NEEDED, "REPLACEMENT_NEEDED" } \
+ { CXL_DHI_HS_HW_REPLACEMENT_NEEDED, "REPLACEMENT_NEEDED" }, \
+ { CXL_DHI_HS_MEM_CAPACITY_DEGRADED, "MEM_CAPACITY_DEGRADED" } \
)
#define CXL_DHI_MS_NORMAL 0x00
@@ -584,6 +721,26 @@ TRACE_EVENT(cxl_dram,
#define CXL_DHI_AS_COR_VOL_ERR_CNT(as) ((as & 0x10) >> 4)
#define CXL_DHI_AS_COR_PER_ERR_CNT(as) ((as & 0x20) >> 5)
+#define CXL_MMER_VALID_COMPONENT BIT(0)
+#define CXL_MMER_VALID_COMPONENT_ID_FORMAT BIT(1)
+#define show_mem_module_valid_flags(flags) __print_flags(flags, "|", \
+ { CXL_MMER_VALID_COMPONENT, "COMPONENT" }, \
+ { CXL_MMER_VALID_COMPONENT_ID_FORMAT, "COMPONENT PLDM FORMAT" } \
+)
+#define CXL_MMER_DEV_EVT_SUB_TYPE_NOT_REPORTED 0x00
+#define CXL_MMER_DEV_EVT_SUB_TYPE_INVALID_CONFIG_DATA 0x01
+#define CXL_MMER_DEV_EVT_SUB_TYPE_UNSUPP_CONFIG_DATA 0x02
+#define CXL_MMER_DEV_EVT_SUB_TYPE_UNSUPP_MEM_MEDIA_FRU 0x03
+#define show_dev_event_sub_type(sub_type) __print_symbolic(sub_type, \
+ { CXL_MMER_DEV_EVT_SUB_TYPE_NOT_REPORTED, "Not Reported" }, \
+ { CXL_MMER_DEV_EVT_SUB_TYPE_INVALID_CONFIG_DATA, "Invalid Config Data" }, \
+ { CXL_MMER_DEV_EVT_SUB_TYPE_UNSUPP_CONFIG_DATA, "Unsupported Config Data" }, \
+ { \
+ CXL_MMER_DEV_EVT_SUB_TYPE_UNSUPP_MEM_MEDIA_FRU, \
+ "Unsupported Memory Media FRU" \
+ } \
+)
+
TRACE_EVENT(cxl_memory_module,
TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
@@ -606,6 +763,9 @@ TRACE_EVENT(cxl_memory_module,
__field(u32, cor_per_err_cnt)
__field(s16, device_temp)
__field(u8, add_status)
+ __field(u8, event_sub_type)
+ __array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE)
+ __field(u16, validity_flags)
),
TP_fast_assign(
@@ -614,6 +774,7 @@ TRACE_EVENT(cxl_memory_module,
/* Memory Module Event */
__entry->event_type = rec->event_type;
+ __entry->event_sub_type = rec->event_sub_type;
/* Device Health Info */
__entry->health_status = rec->info.health_status;
@@ -624,13 +785,20 @@ TRACE_EVENT(cxl_memory_module,
__entry->cor_per_err_cnt = get_unaligned_le32(rec->info.cor_per_err_cnt);
__entry->device_temp = get_unaligned_le16(rec->info.device_temp);
__entry->add_status = rec->info.add_status;
+ __entry->validity_flags = get_unaligned_le16(rec->validity_flags);
+ memcpy(__entry->comp_id, &rec->component_id,
+ CXL_EVENT_GEN_MED_COMP_ID_SIZE);
),
- CXL_EVT_TP_printk("event_type='%s' health_status='%s' media_status='%s' " \
- "as_life_used=%s as_dev_temp=%s as_cor_vol_err_cnt=%s " \
+ CXL_EVT_TP_printk("event_type='%s' event_sub_type='%s' health_status='%s' " \
+ "media_status='%s' as_life_used=%s as_dev_temp=%s as_cor_vol_err_cnt=%s " \
"as_cor_per_err_cnt=%s life_used=%u device_temp=%d " \
- "dirty_shutdown_cnt=%u cor_vol_err_cnt=%u cor_per_err_cnt=%u",
+ "dirty_shutdown_cnt=%u cor_vol_err_cnt=%u cor_per_err_cnt=%u " \
+ "validity_flags='%s' " \
+ "comp_id=%s comp_id_pldm_valid_flags='%s' " \
+ "pldm_entity_id=%s pldm_resource_id=%s",
show_dev_evt_type(__entry->event_type),
+ show_dev_event_sub_type(__entry->event_sub_type),
show_health_status_flags(__entry->health_status),
show_media_status(__entry->media_status),
show_two_bit_status(CXL_DHI_AS_LIFE_USED(__entry->add_status)),
@@ -639,7 +807,14 @@ TRACE_EVENT(cxl_memory_module,
show_one_bit_status(CXL_DHI_AS_COR_PER_ERR_CNT(__entry->add_status)),
__entry->life_used, __entry->device_temp,
__entry->dirty_shutdown_cnt, __entry->cor_vol_err_cnt,
- __entry->cor_per_err_cnt
+ __entry->cor_per_err_cnt,
+ show_mem_module_valid_flags(__entry->validity_flags),
+ __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
+ show_comp_id_pldm_flags(__entry->comp_id[0]),
+ show_pldm_entity_id(__entry->validity_flags, CXL_MMER_VALID_COMPONENT,
+ CXL_MMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
+ show_pldm_resource_id(__entry->validity_flags, CXL_MMER_VALID_COMPONENT,
+ CXL_MMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id)
)
);
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index f6015f24ad38..bbbaa0d0a670 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -302,10 +302,11 @@ int cxl_map_device_regs(const struct cxl_register_map *map,
struct cxl_device_regs *regs);
int cxl_map_pmu_regs(struct cxl_register_map *map, struct cxl_pmu_regs *regs);
+#define CXL_INSTANCES_COUNT -1
enum cxl_regloc_type;
int cxl_count_regblock(struct pci_dev *pdev, enum cxl_regloc_type type);
int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
- struct cxl_register_map *map, int index);
+ struct cxl_register_map *map, unsigned int index);
int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type,
struct cxl_register_map *map);
int cxl_setup_regs(struct cxl_register_map *map);
@@ -821,7 +822,8 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
struct cxl_endpoint_dvsec_info *info);
int devm_cxl_add_passthrough_decoder(struct cxl_port *port);
-int cxl_dvsec_rr_decode(struct device *dev, struct cxl_port *port,
+struct cxl_dev_state;
+int cxl_dvsec_rr_decode(struct cxl_dev_state *cxlds,
struct cxl_endpoint_dvsec_info *info);
bool is_cxl_region(struct device *dev);
@@ -864,7 +866,6 @@ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
struct cxl_port *port);
struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm(struct device *dev);
-bool is_cxl_nvdimm_bridge(struct device *dev);
int devm_cxl_add_nvdimm(struct cxl_port *parent_port, struct cxl_memdev *cxlmd);
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port);
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 6d94ff4a4f1a..a96e54c6259e 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -907,7 +907,8 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct cxl_dev_state *cxlds;
struct cxl_register_map map;
struct cxl_memdev *cxlmd;
- int i, rc, pmu_count;
+ int rc, pmu_count;
+ unsigned int i;
bool irq_avail;
/*
@@ -1009,6 +1010,9 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return rc;
pmu_count = cxl_count_regblock(pdev, CXL_REGLOC_RBI_PMU);
+ if (pmu_count < 0)
+ return pmu_count;
+
for (i = 0; i < pmu_count; i++) {
struct cxl_pmu_regs pmu_regs;
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index 4c83f6a22e58..d2bfd1ff5492 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -98,7 +98,7 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
struct cxl_port *root;
int rc;
- rc = cxl_dvsec_rr_decode(cxlds->dev, port, &info);
+ rc = cxl_dvsec_rr_decode(cxlds, &info);
if (rc < 0)
return rc;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index e994d6e0779e..8afea2e23360 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -162,8 +162,8 @@ config DMA_SA11X0
config DMA_SUN4I
tristate "Allwinner A10 DMA SoCs support"
- depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I
- default (MACH_SUN4I || MACH_SUN5I || MACH_SUN7I)
+ depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I || MACH_SUNIV
+ default (MACH_SUN4I || MACH_SUN5I || MACH_SUN7I || MACH_SUNIV)
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -740,8 +740,6 @@ source "drivers/dma/bestcomm/Kconfig"
source "drivers/dma/mediatek/Kconfig"
-source "drivers/dma/ptdma/Kconfig"
-
source "drivers/dma/qcom/Kconfig"
source "drivers/dma/dw/Kconfig"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5b2a52f4f2ee..19ba465011a6 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
-obj-$(CONFIG_AMD_PTDMA) += ptdma/
obj-$(CONFIG_APPLE_ADMAC) += apple-admac.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
diff --git a/drivers/dma/amd/Kconfig b/drivers/dma/amd/Kconfig
index 7d1f51d69675..00d874872a8f 100644
--- a/drivers/dma/amd/Kconfig
+++ b/drivers/dma/amd/Kconfig
@@ -1,4 +1,32 @@
# SPDX-License-Identifier: GPL-2.0-only
+#
+
+config AMD_AE4DMA
+ tristate "AMD AE4DMA Engine"
+ depends on (X86_64 || COMPILE_TEST) && PCI
+ depends on AMD_PTDMA
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the AMD AE4DMA controller. This controller
+ provides DMA capabilities to perform high bandwidth memory to
+ memory and IO copy operations. It performs DMA transfer through
+ queue-based descriptor management. This DMA controller is intended
+ to be used with AMD Non-Transparent Bridge devices and not for
+ general purpose peripheral DMA.
+
+config AMD_PTDMA
+ tristate "AMD PassThru DMA Engine"
+ depends on X86_64 && PCI
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the AMD PTDMA controller. This controller
+ provides DMA capabilities to perform high bandwidth memory to
+ memory and IO copy operations. It performs DMA transfer through
+ queue-based descriptor management. This DMA controller is intended
+ to be used with AMD Non-Transparent Bridge devices and not for
+ general purpose peripheral DMA.
config AMD_QDMA
tristate "AMD Queue-based DMA"
diff --git a/drivers/dma/amd/Makefile b/drivers/dma/amd/Makefile
index 37212be9364f..11278c06374d 100644
--- a/drivers/dma/amd/Makefile
+++ b/drivers/dma/amd/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_AMD_AE4DMA) += ae4dma/
+obj-$(CONFIG_AMD_PTDMA) += ptdma/
obj-$(CONFIG_AMD_QDMA) += qdma/
diff --git a/drivers/dma/amd/ae4dma/Makefile b/drivers/dma/amd/ae4dma/Makefile
new file mode 100644
index 000000000000..e918f85a80ec
--- /dev/null
+++ b/drivers/dma/amd/ae4dma/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# AMD AE4DMA driver
+#
+
+obj-$(CONFIG_AMD_AE4DMA) += ae4dma.o
+
+ae4dma-objs := ae4dma-dev.o
+
+ae4dma-$(CONFIG_PCI) += ae4dma-pci.o
diff --git a/drivers/dma/amd/ae4dma/ae4dma-dev.c b/drivers/dma/amd/ae4dma/ae4dma-dev.c
new file mode 100644
index 000000000000..8de3bef41b58
--- /dev/null
+++ b/drivers/dma/amd/ae4dma/ae4dma-dev.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD AE4DMA driver
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+
+#include "ae4dma.h"
+
+static unsigned int max_hw_q = 1;
+module_param(max_hw_q, uint, 0444);
+MODULE_PARM_DESC(max_hw_q, "max hw queues supported by engine (any non-zero value, default: 1)");
+
+static void ae4_pending_work(struct work_struct *work)
+{
+ struct ae4_cmd_queue *ae4cmd_q = container_of(work, struct ae4_cmd_queue, p_work.work);
+ struct pt_cmd_queue *cmd_q = &ae4cmd_q->cmd_q;
+ struct pt_cmd *cmd;
+ u32 cridx;
+
+ for (;;) {
+ wait_event_interruptible(ae4cmd_q->q_w,
+ ((atomic64_read(&ae4cmd_q->done_cnt)) <
+ atomic64_read(&ae4cmd_q->intr_cnt)));
+
+ atomic64_inc(&ae4cmd_q->done_cnt);
+
+ mutex_lock(&ae4cmd_q->cmd_lock);
+ cridx = readl(cmd_q->reg_control + AE4_RD_IDX_OFF);
+ while ((ae4cmd_q->dridx != cridx) && !list_empty(&ae4cmd_q->cmd)) {
+ cmd = list_first_entry(&ae4cmd_q->cmd, struct pt_cmd, entry);
+ list_del(&cmd->entry);
+
+ ae4_check_status_error(ae4cmd_q, ae4cmd_q->dridx);
+ cmd->pt_cmd_callback(cmd->data, cmd->ret);
+
+ ae4cmd_q->q_cmd_count--;
+ ae4cmd_q->dridx = (ae4cmd_q->dridx + 1) % CMD_Q_LEN;
+
+ complete_all(&ae4cmd_q->cmp);
+ }
+ mutex_unlock(&ae4cmd_q->cmd_lock);
+ }
+}
+
+static irqreturn_t ae4_core_irq_handler(int irq, void *data)
+{
+ struct ae4_cmd_queue *ae4cmd_q = data;
+ struct pt_cmd_queue *cmd_q;
+ struct pt_device *pt;
+ u32 status;
+
+ cmd_q = &ae4cmd_q->cmd_q;
+ pt = cmd_q->pt;
+
+ pt->total_interrupts++;
+ atomic64_inc(&ae4cmd_q->intr_cnt);
+
+ status = readl(cmd_q->reg_control + AE4_INTR_STS_OFF);
+ if (status & BIT(0)) {
+ status &= GENMASK(31, 1);
+ writel(status, cmd_q->reg_control + AE4_INTR_STS_OFF);
+ }
+
+ wake_up(&ae4cmd_q->q_w);
+
+ return IRQ_HANDLED;
+}
+
+void ae4_destroy_work(struct ae4_device *ae4)
+{
+ struct ae4_cmd_queue *ae4cmd_q;
+ int i;
+
+ for (i = 0; i < ae4->cmd_q_count; i++) {
+ ae4cmd_q = &ae4->ae4cmd_q[i];
+
+ if (!ae4cmd_q->pws)
+ break;
+
+ cancel_delayed_work_sync(&ae4cmd_q->p_work);
+ destroy_workqueue(ae4cmd_q->pws);
+ }
+}
+
+int ae4_core_init(struct ae4_device *ae4)
+{
+ struct pt_device *pt = &ae4->pt;
+ struct ae4_cmd_queue *ae4cmd_q;
+ struct device *dev = pt->dev;
+ struct pt_cmd_queue *cmd_q;
+ int i, ret = 0;
+
+ writel(max_hw_q, pt->io_regs);
+
+ for (i = 0; i < max_hw_q; i++) {
+ ae4cmd_q = &ae4->ae4cmd_q[i];
+ ae4cmd_q->id = ae4->cmd_q_count;
+ ae4->cmd_q_count++;
+
+ cmd_q = &ae4cmd_q->cmd_q;
+ cmd_q->pt = pt;
+
+ cmd_q->reg_control = pt->io_regs + ((i + 1) * AE4_Q_SZ);
+
+ ret = devm_request_irq(dev, ae4->ae4_irq[i], ae4_core_irq_handler, 0,
+ dev_name(pt->dev), ae4cmd_q);
+ if (ret)
+ return ret;
+
+ cmd_q->qsize = Q_SIZE(sizeof(struct ae4dma_desc));
+
+ cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize, &cmd_q->qbase_dma,
+ GFP_KERNEL);
+ if (!cmd_q->qbase)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ae4->cmd_q_count; i++) {
+ ae4cmd_q = &ae4->ae4cmd_q[i];
+
+ cmd_q = &ae4cmd_q->cmd_q;
+
+ cmd_q->reg_control = pt->io_regs + ((i + 1) * AE4_Q_SZ);
+
+ /* Update the device registers with queue information. */
+ writel(CMD_Q_LEN, cmd_q->reg_control + AE4_MAX_IDX_OFF);
+
+ cmd_q->qdma_tail = cmd_q->qbase_dma;
+ writel(lower_32_bits(cmd_q->qdma_tail), cmd_q->reg_control + AE4_Q_BASE_L_OFF);
+ writel(upper_32_bits(cmd_q->qdma_tail), cmd_q->reg_control + AE4_Q_BASE_H_OFF);
+
+ INIT_LIST_HEAD(&ae4cmd_q->cmd);
+ init_waitqueue_head(&ae4cmd_q->q_w);
+
+ ae4cmd_q->pws = alloc_ordered_workqueue("ae4dma_%d", WQ_MEM_RECLAIM, ae4cmd_q->id);
+ if (!ae4cmd_q->pws) {
+ ae4_destroy_work(ae4);
+ return -ENOMEM;
+ }
+ INIT_DELAYED_WORK(&ae4cmd_q->p_work, ae4_pending_work);
+ queue_delayed_work(ae4cmd_q->pws, &ae4cmd_q->p_work, usecs_to_jiffies(100));
+
+ init_completion(&ae4cmd_q->cmp);
+ }
+
+ ret = pt_dmaengine_register(pt);
+ if (ret)
+ ae4_destroy_work(ae4);
+ else
+ ptdma_debugfs_setup(pt);
+
+ return ret;
+}
diff --git a/drivers/dma/amd/ae4dma/ae4dma-pci.c b/drivers/dma/amd/ae4dma/ae4dma-pci.c
new file mode 100644
index 000000000000..aad0dc4294a3
--- /dev/null
+++ b/drivers/dma/amd/ae4dma/ae4dma-pci.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD AE4DMA driver
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+
+#include "ae4dma.h"
+
+static int ae4_get_irqs(struct ae4_device *ae4)
+{
+ struct ae4_msix *ae4_msix = ae4->ae4_msix;
+ struct pt_device *pt = &ae4->pt;
+ struct device *dev = pt->dev;
+ struct pci_dev *pdev;
+ int i, v, ret;
+
+ pdev = to_pci_dev(dev);
+
+ for (v = 0; v < ARRAY_SIZE(ae4_msix->msix_entry); v++)
+ ae4_msix->msix_entry[v].entry = v;
+
+ ret = pci_alloc_irq_vectors(pdev, v, v, PCI_IRQ_MSIX);
+ if (ret != v) {
+ if (ret > 0)
+ pci_free_irq_vectors(pdev);
+
+ dev_err(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (ret < 0) {
+ dev_err(dev, "could not enable MSI (%d)\n", ret);
+ return ret;
+ }
+
+ ret = pci_irq_vector(pdev, 0);
+ if (ret < 0) {
+ pci_free_irq_vectors(pdev);
+ return ret;
+ }
+
+ for (i = 0; i < MAX_AE4_HW_QUEUES; i++)
+ ae4->ae4_irq[i] = ret;
+
+ } else {
+ ae4_msix->msix_count = ret;
+ for (i = 0; i < MAX_AE4_HW_QUEUES; i++)
+ ae4->ae4_irq[i] = ae4_msix->msix_entry[i].vector;
+ }
+
+ return ret;
+}
+
+static void ae4_free_irqs(struct ae4_device *ae4)
+{
+ struct ae4_msix *ae4_msix = ae4->ae4_msix;
+ struct pt_device *pt = &ae4->pt;
+ struct device *dev = pt->dev;
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(dev);
+
+ if (ae4_msix && (ae4_msix->msix_count || ae4->ae4_irq[MAX_AE4_HW_QUEUES - 1]))
+ pci_free_irq_vectors(pdev);
+}
+
+static void ae4_deinit(struct ae4_device *ae4)
+{
+ ae4_free_irqs(ae4);
+}
+
+static int ae4_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct ae4_device *ae4;
+ struct pt_device *pt;
+ int bar_mask;
+ int ret = 0;
+
+ ae4 = devm_kzalloc(dev, sizeof(*ae4), GFP_KERNEL);
+ if (!ae4)
+ return -ENOMEM;
+
+ ae4->ae4_msix = devm_kzalloc(dev, sizeof(struct ae4_msix), GFP_KERNEL);
+ if (!ae4->ae4_msix)
+ return -ENOMEM;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ goto ae4_error;
+
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ ret = pcim_iomap_regions(pdev, bar_mask, "ae4dma");
+ if (ret)
+ goto ae4_error;
+
+ pt = &ae4->pt;
+ pt->dev = dev;
+ pt->ver = AE4_DMA_VERSION;
+
+ pt->io_regs = pcim_iomap_table(pdev)[0];
+ if (!pt->io_regs) {
+ ret = -ENOMEM;
+ goto ae4_error;
+ }
+
+ ret = ae4_get_irqs(ae4);
+ if (ret < 0)
+ goto ae4_error;
+
+ pci_set_master(pdev);
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+
+ dev_set_drvdata(dev, ae4);
+
+ ret = ae4_core_init(ae4);
+ if (ret)
+ goto ae4_error;
+
+ return 0;
+
+ae4_error:
+ ae4_deinit(ae4);
+
+ return ret;
+}
+
+static void ae4_pci_remove(struct pci_dev *pdev)
+{
+ struct ae4_device *ae4 = dev_get_drvdata(&pdev->dev);
+
+ ae4_destroy_work(ae4);
+ ae4_deinit(ae4);
+}
+
+static const struct pci_device_id ae4_pci_table[] = {
+ { PCI_VDEVICE(AMD, 0x14C8), },
+ { PCI_VDEVICE(AMD, 0x14DC), },
+ { PCI_VDEVICE(AMD, 0x149B), },
+ /* Last entry must be zero */
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ae4_pci_table);
+
+static struct pci_driver ae4_pci_driver = {
+ .name = "ae4dma",
+ .id_table = ae4_pci_table,
+ .probe = ae4_pci_probe,
+ .remove = ae4_pci_remove,
+};
+
+module_pci_driver(ae4_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AMD AE4DMA driver");
diff --git a/drivers/dma/amd/ae4dma/ae4dma.h b/drivers/dma/amd/ae4dma/ae4dma.h
new file mode 100644
index 000000000000..265c5d436008
--- /dev/null
+++ b/drivers/dma/amd/ae4dma/ae4dma.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AMD AE4DMA driver
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+#ifndef __AE4DMA_H__
+#define __AE4DMA_H__
+
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+#include "../ptdma/ptdma.h"
+#include "../../virt-dma.h"
+
+#define MAX_AE4_HW_QUEUES 16
+
+#define AE4_DESC_COMPLETED 0x03
+
+#define AE4_MAX_IDX_OFF 0x08
+#define AE4_RD_IDX_OFF 0x0c
+#define AE4_WR_IDX_OFF 0x10
+#define AE4_INTR_STS_OFF 0x14
+#define AE4_Q_BASE_L_OFF 0x18
+#define AE4_Q_BASE_H_OFF 0x1c
+#define AE4_Q_SZ 0x20
+
+#define AE4_DMA_VERSION 4
+#define CMD_AE4_DESC_DW0_VAL 2
+
+struct ae4_msix {
+ int msix_count;
+ struct msix_entry msix_entry[MAX_AE4_HW_QUEUES];
+};
+
+struct ae4_cmd_queue {
+ struct ae4_device *ae4;
+ struct pt_cmd_queue cmd_q;
+ struct list_head cmd;
+ /* protect command operations */
+ struct mutex cmd_lock;
+ struct delayed_work p_work;
+ struct workqueue_struct *pws;
+ struct completion cmp;
+ wait_queue_head_t q_w;
+ atomic64_t intr_cnt;
+ atomic64_t done_cnt;
+ u64 q_cmd_count;
+ u32 dridx;
+ u32 tail_wi;
+ u32 id;
+};
+
+union dwou {
+ u32 dw0;
+ struct dword0 {
+ u8 byte0;
+ u8 byte1;
+ u16 timestamp;
+ } dws;
+};
+
+struct dword1 {
+ u8 status;
+ u8 err_code;
+ u16 desc_id;
+};
+
+struct ae4dma_desc {
+ union dwou dwouv;
+ struct dword1 dw1;
+ u32 length;
+ u32 rsvd;
+ u32 src_hi;
+ u32 src_lo;
+ u32 dst_hi;
+ u32 dst_lo;
+};
+
+struct ae4_device {
+ struct pt_device pt;
+ struct ae4_msix *ae4_msix;
+ struct ae4_cmd_queue ae4cmd_q[MAX_AE4_HW_QUEUES];
+ unsigned int ae4_irq[MAX_AE4_HW_QUEUES];
+ unsigned int cmd_q_count;
+};
+
+int ae4_core_init(struct ae4_device *ae4);
+void ae4_destroy_work(struct ae4_device *ae4);
+void ae4_check_status_error(struct ae4_cmd_queue *ae4cmd_q, int idx);
+#endif
diff --git a/drivers/dma/ptdma/Makefile b/drivers/dma/amd/ptdma/Makefile
index ce5410268a9a..ce5410268a9a 100644
--- a/drivers/dma/ptdma/Makefile
+++ b/drivers/dma/amd/ptdma/Makefile
diff --git a/drivers/dma/ptdma/ptdma-debugfs.c b/drivers/dma/amd/ptdma/ptdma-debugfs.c
index c8307d3044a3..c7c90bbf6fd8 100644
--- a/drivers/dma/ptdma/ptdma-debugfs.c
+++ b/drivers/dma/amd/ptdma/ptdma-debugfs.c
@@ -13,6 +13,7 @@
#include <linux/seq_file.h>
#include "ptdma.h"
+#include "../ae4dma/ae4dma.h"
/* DebugFS helpers */
#define RI_VERSION_NUM 0x0000003F
@@ -23,11 +24,19 @@
static int pt_debugfs_info_show(struct seq_file *s, void *p)
{
struct pt_device *pt = s->private;
+ struct ae4_device *ae4;
unsigned int regval;
seq_printf(s, "Device name: %s\n", dev_name(pt->dev));
- seq_printf(s, " # Queues: %d\n", 1);
- seq_printf(s, " # Cmds: %d\n", pt->cmd_count);
+
+ if (pt->ver == AE4_DMA_VERSION) {
+ ae4 = container_of(pt, struct ae4_device, pt);
+ seq_printf(s, " # Queues: %d\n", ae4->cmd_q_count);
+ seq_printf(s, " # Cmds per queue: %d\n", CMD_Q_LEN);
+ } else {
+ seq_printf(s, " # Queues: %d\n", 1);
+ seq_printf(s, " # Cmds: %d\n", pt->cmd_count);
+ }
regval = ioread32(pt->io_regs + CMD_PT_VERSION);
@@ -55,6 +64,7 @@ static int pt_debugfs_stats_show(struct seq_file *s, void *p)
static int pt_debugfs_queue_show(struct seq_file *s, void *p)
{
struct pt_cmd_queue *cmd_q = s->private;
+ struct pt_device *pt;
unsigned int regval;
if (!cmd_q)
@@ -62,18 +72,24 @@ static int pt_debugfs_queue_show(struct seq_file *s, void *p)
seq_printf(s, " Pass-Thru: %ld\n", cmd_q->total_pt_ops);
- regval = ioread32(cmd_q->reg_control + 0x000C);
-
- seq_puts(s, " Enabled Interrupts:");
- if (regval & INT_EMPTY_QUEUE)
- seq_puts(s, " EMPTY");
- if (regval & INT_QUEUE_STOPPED)
- seq_puts(s, " STOPPED");
- if (regval & INT_ERROR)
- seq_puts(s, " ERROR");
- if (regval & INT_COMPLETION)
- seq_puts(s, " COMPLETION");
- seq_puts(s, "\n");
+ pt = cmd_q->pt;
+ if (pt->ver == AE4_DMA_VERSION) {
+ regval = readl(cmd_q->reg_control + 0x4);
+ seq_printf(s, " Enabled Interrupts:: status 0x%x\n", regval);
+ } else {
+ regval = ioread32(cmd_q->reg_control + 0x000C);
+
+ seq_puts(s, " Enabled Interrupts:");
+ if (regval & INT_EMPTY_QUEUE)
+ seq_puts(s, " EMPTY");
+ if (regval & INT_QUEUE_STOPPED)
+ seq_puts(s, " STOPPED");
+ if (regval & INT_ERROR)
+ seq_puts(s, " ERROR");
+ if (regval & INT_COMPLETION)
+ seq_puts(s, " COMPLETION");
+ seq_puts(s, "\n");
+ }
return 0;
}
@@ -84,8 +100,12 @@ DEFINE_SHOW_ATTRIBUTE(pt_debugfs_stats);
void ptdma_debugfs_setup(struct pt_device *pt)
{
- struct pt_cmd_queue *cmd_q;
struct dentry *debugfs_q_instance;
+ struct ae4_cmd_queue *ae4cmd_q;
+ struct pt_cmd_queue *cmd_q;
+ struct ae4_device *ae4;
+ char name[30];
+ int i;
if (!debugfs_initialized())
return;
@@ -96,11 +116,28 @@ void ptdma_debugfs_setup(struct pt_device *pt)
debugfs_create_file("stats", 0400, pt->dma_dev.dbg_dev_root, pt,
&pt_debugfs_stats_fops);
- cmd_q = &pt->cmd_q;
-
- debugfs_q_instance =
- debugfs_create_dir("q", pt->dma_dev.dbg_dev_root);
- debugfs_create_file("stats", 0400, debugfs_q_instance, cmd_q,
- &pt_debugfs_queue_fops);
+ if (pt->ver == AE4_DMA_VERSION) {
+ ae4 = container_of(pt, struct ae4_device, pt);
+ for (i = 0; i < ae4->cmd_q_count; i++) {
+ ae4cmd_q = &ae4->ae4cmd_q[i];
+ cmd_q = &ae4cmd_q->cmd_q;
+
+ memset(name, 0, sizeof(name));
+ snprintf(name, 29, "q%d", ae4cmd_q->id);
+
+ debugfs_q_instance =
+ debugfs_create_dir(name, pt->dma_dev.dbg_dev_root);
+
+ debugfs_create_file("stats", 0400, debugfs_q_instance, cmd_q,
+ &pt_debugfs_queue_fops);
+ }
+ } else {
+ debugfs_q_instance =
+ debugfs_create_dir("q", pt->dma_dev.dbg_dev_root);
+ cmd_q = &pt->cmd_q;
+ debugfs_create_file("stats", 0400, debugfs_q_instance, cmd_q,
+ &pt_debugfs_queue_fops);
+ }
}
+EXPORT_SYMBOL_GPL(ptdma_debugfs_setup);
diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/amd/ptdma/ptdma-dev.c
index a2bf13ff18b6..a2bf13ff18b6 100644
--- a/drivers/dma/ptdma/ptdma-dev.c
+++ b/drivers/dma/amd/ptdma/ptdma-dev.c
diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/amd/ptdma/ptdma-dmaengine.c
index f79240734807..35c84ec9608b 100644
--- a/drivers/dma/ptdma/ptdma-dmaengine.c
+++ b/drivers/dma/amd/ptdma/ptdma-dmaengine.c
@@ -9,9 +9,58 @@
* Author: Gary R Hook <gary.hook@amd.com>
*/
+#include <linux/bitfield.h>
#include "ptdma.h"
-#include "../dmaengine.h"
-#include "../virt-dma.h"
+#include "../ae4dma/ae4dma.h"
+#include "../../dmaengine.h"
+
+static char *ae4_error_codes[] = {
+ "",
+ "ERR 01: INVALID HEADER DW0",
+ "ERR 02: INVALID STATUS",
+ "ERR 03: INVALID LENGTH - 4 BYTE ALIGNMENT",
+ "ERR 04: INVALID SRC ADDR - 4 BYTE ALIGNMENT",
+ "ERR 05: INVALID DST ADDR - 4 BYTE ALIGNMENT",
+ "ERR 06: INVALID ALIGNMENT",
+ "ERR 07: INVALID DESCRIPTOR",
+};
+
+static void ae4_log_error(struct pt_device *d, int e)
+{
+ /* ERR 01 - 07 represents Invalid AE4 errors */
+ if (e <= 7)
+ dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", ae4_error_codes[e], e);
+ /* ERR 08 - 15 represents Invalid Descriptor errors */
+ else if (e > 7 && e <= 15)
+ dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "INVALID DESCRIPTOR", e);
+ /* ERR 16 - 31 represents Firmware errors */
+ else if (e > 15 && e <= 31)
+ dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "FIRMWARE ERROR", e);
+ /* ERR 32 - 63 represents Fatal errors */
+ else if (e > 31 && e <= 63)
+ dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "FATAL ERROR", e);
+ /* ERR 64 - 255 represents PTE errors */
+ else if (e > 63 && e <= 255)
+ dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "PTE ERROR", e);
+ else
+ dev_info(d->dev, "Unknown AE4DMA error");
+}
+
+void ae4_check_status_error(struct ae4_cmd_queue *ae4cmd_q, int idx)
+{
+ struct pt_cmd_queue *cmd_q = &ae4cmd_q->cmd_q;
+ struct ae4dma_desc desc;
+ u8 status;
+
+ memcpy(&desc, &cmd_q->qbase[idx], sizeof(struct ae4dma_desc));
+ status = desc.dw1.status;
+ if (status && status != AE4_DESC_COMPLETED) {
+ cmd_q->cmd_error = desc.dw1.err_code;
+ if (cmd_q->cmd_error)
+ ae4_log_error(cmd_q->pt, cmd_q->cmd_error);
+ }
+}
+EXPORT_SYMBOL_GPL(ae4_check_status_error);
static inline struct pt_dma_chan *to_pt_chan(struct dma_chan *dma_chan)
{
@@ -45,7 +94,71 @@ static void pt_do_cleanup(struct virt_dma_desc *vd)
kmem_cache_free(pt->dma_desc_cache, desc);
}
-static int pt_dma_start_desc(struct pt_dma_desc *desc)
+static struct pt_cmd_queue *pt_get_cmd_queue(struct pt_device *pt, struct pt_dma_chan *chan)
+{
+ struct ae4_cmd_queue *ae4cmd_q;
+ struct pt_cmd_queue *cmd_q;
+ struct ae4_device *ae4;
+
+ if (pt->ver == AE4_DMA_VERSION) {
+ ae4 = container_of(pt, struct ae4_device, pt);
+ ae4cmd_q = &ae4->ae4cmd_q[chan->id];
+ cmd_q = &ae4cmd_q->cmd_q;
+ } else {
+ cmd_q = &pt->cmd_q;
+ }
+
+ return cmd_q;
+}
+
+static int ae4_core_execute_cmd(struct ae4dma_desc *desc, struct ae4_cmd_queue *ae4cmd_q)
+{
+ bool soc = FIELD_GET(DWORD0_SOC, desc->dwouv.dw0);
+ struct pt_cmd_queue *cmd_q = &ae4cmd_q->cmd_q;
+
+ if (soc) {
+ desc->dwouv.dw0 |= FIELD_PREP(DWORD0_IOC, desc->dwouv.dw0);
+ desc->dwouv.dw0 &= ~DWORD0_SOC;
+ }
+
+ mutex_lock(&ae4cmd_q->cmd_lock);
+ memcpy(&cmd_q->qbase[ae4cmd_q->tail_wi], desc, sizeof(struct ae4dma_desc));
+ ae4cmd_q->q_cmd_count++;
+ ae4cmd_q->tail_wi = (ae4cmd_q->tail_wi + 1) % CMD_Q_LEN;
+ writel(ae4cmd_q->tail_wi, cmd_q->reg_control + AE4_WR_IDX_OFF);
+ mutex_unlock(&ae4cmd_q->cmd_lock);
+
+ wake_up(&ae4cmd_q->q_w);
+
+ return 0;
+}
+
+static int pt_core_perform_passthru_ae4(struct pt_cmd_queue *cmd_q,
+ struct pt_passthru_engine *pt_engine)
+{
+ struct ae4_cmd_queue *ae4cmd_q = container_of(cmd_q, struct ae4_cmd_queue, cmd_q);
+ struct ae4dma_desc desc;
+
+ cmd_q->cmd_error = 0;
+ cmd_q->total_pt_ops++;
+ memset(&desc, 0, sizeof(desc));
+ desc.dwouv.dws.byte0 = CMD_AE4_DESC_DW0_VAL;
+
+ desc.dw1.status = 0;
+ desc.dw1.err_code = 0;
+ desc.dw1.desc_id = 0;
+
+ desc.length = pt_engine->src_len;
+
+ desc.src_lo = upper_32_bits(pt_engine->src_dma);
+ desc.src_hi = lower_32_bits(pt_engine->src_dma);
+ desc.dst_lo = upper_32_bits(pt_engine->dst_dma);
+ desc.dst_hi = lower_32_bits(pt_engine->dst_dma);
+
+ return ae4_core_execute_cmd(&desc, ae4cmd_q);
+}
+
+static int pt_dma_start_desc(struct pt_dma_desc *desc, struct pt_dma_chan *chan)
{
struct pt_passthru_engine *pt_engine;
struct pt_device *pt;
@@ -56,13 +169,18 @@ static int pt_dma_start_desc(struct pt_dma_desc *desc)
pt_cmd = &desc->pt_cmd;
pt = pt_cmd->pt;
- cmd_q = &pt->cmd_q;
+
+ cmd_q = pt_get_cmd_queue(pt, chan);
+
pt_engine = &pt_cmd->passthru;
pt->tdata.cmd = pt_cmd;
/* Execute the command */
- pt_cmd->ret = pt_core_perform_passthru(cmd_q, pt_engine);
+ if (pt->ver == AE4_DMA_VERSION)
+ pt_cmd->ret = pt_core_perform_passthru_ae4(cmd_q, pt_engine);
+ else
+ pt_cmd->ret = pt_core_perform_passthru(cmd_q, pt_engine);
return 0;
}
@@ -151,7 +269,7 @@ static void pt_cmd_callback(void *data, int err)
if (!desc)
break;
- ret = pt_dma_start_desc(desc);
+ ret = pt_dma_start_desc(desc, chan);
if (!ret)
break;
@@ -186,7 +304,10 @@ static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
struct pt_passthru_engine *pt_engine;
+ struct pt_device *pt = chan->pt;
+ struct ae4_cmd_queue *ae4cmd_q;
struct pt_dma_desc *desc;
+ struct ae4_device *ae4;
struct pt_cmd *pt_cmd;
desc = pt_alloc_dma_desc(chan, flags);
@@ -194,7 +315,7 @@ static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
return NULL;
pt_cmd = &desc->pt_cmd;
- pt_cmd->pt = chan->pt;
+ pt_cmd->pt = pt;
pt_engine = &pt_cmd->passthru;
pt_cmd->engine = PT_ENGINE_PASSTHRU;
pt_engine->src_dma = src;
@@ -205,6 +326,14 @@ static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
desc->len = len;
+ if (pt->ver == AE4_DMA_VERSION) {
+ ae4 = container_of(pt, struct ae4_device, pt);
+ ae4cmd_q = &ae4->ae4cmd_q[chan->id];
+ mutex_lock(&ae4cmd_q->cmd_lock);
+ list_add_tail(&pt_cmd->entry, &ae4cmd_q->cmd);
+ mutex_unlock(&ae4cmd_q->cmd_lock);
+ }
+
return desc;
}
@@ -258,24 +387,43 @@ static void pt_issue_pending(struct dma_chan *dma_chan)
pt_cmd_callback(desc, 0);
}
+static void pt_check_status_trans_ae4(struct pt_device *pt, struct pt_cmd_queue *cmd_q)
+{
+ struct ae4_cmd_queue *ae4cmd_q = container_of(cmd_q, struct ae4_cmd_queue, cmd_q);
+ int i;
+
+ for (i = 0; i < CMD_Q_LEN; i++)
+ ae4_check_status_error(ae4cmd_q, i);
+}
+
static enum dma_status
pt_tx_status(struct dma_chan *c, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- struct pt_device *pt = to_pt_chan(c)->pt;
- struct pt_cmd_queue *cmd_q = &pt->cmd_q;
+ struct pt_dma_chan *chan = to_pt_chan(c);
+ struct pt_device *pt = chan->pt;
+ struct pt_cmd_queue *cmd_q;
+
+ cmd_q = pt_get_cmd_queue(pt, chan);
+
+ if (pt->ver == AE4_DMA_VERSION)
+ pt_check_status_trans_ae4(pt, cmd_q);
+ else
+ pt_check_status_trans(pt, cmd_q);
- pt_check_status_trans(pt, cmd_q);
return dma_cookie_status(c, cookie, txstate);
}
static int pt_pause(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_device *pt = chan->pt;
+ struct pt_cmd_queue *cmd_q;
unsigned long flags;
spin_lock_irqsave(&chan->vc.lock, flags);
- pt_stop_queue(&chan->pt->cmd_q);
+ cmd_q = pt_get_cmd_queue(pt, chan);
+ pt_stop_queue(cmd_q);
spin_unlock_irqrestore(&chan->vc.lock, flags);
return 0;
@@ -285,10 +433,13 @@ static int pt_resume(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
struct pt_dma_desc *desc = NULL;
+ struct pt_device *pt = chan->pt;
+ struct pt_cmd_queue *cmd_q;
unsigned long flags;
spin_lock_irqsave(&chan->vc.lock, flags);
- pt_start_queue(&chan->pt->cmd_q);
+ cmd_q = pt_get_cmd_queue(pt, chan);
+ pt_start_queue(cmd_q);
desc = pt_next_dma_desc(chan);
spin_unlock_irqrestore(&chan->vc.lock, flags);
@@ -302,11 +453,17 @@ static int pt_resume(struct dma_chan *dma_chan)
static int pt_terminate_all(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_device *pt = chan->pt;
+ struct pt_cmd_queue *cmd_q;
unsigned long flags;
- struct pt_cmd_queue *cmd_q = &chan->pt->cmd_q;
LIST_HEAD(head);
- iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
+ cmd_q = pt_get_cmd_queue(pt, chan);
+ if (pt->ver == AE4_DMA_VERSION)
+ pt_stop_queue(cmd_q);
+ else
+ iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
+
spin_lock_irqsave(&chan->vc.lock, flags);
vchan_get_all_descriptors(&chan->vc, &head);
spin_unlock_irqrestore(&chan->vc.lock, flags);
@@ -319,14 +476,24 @@ static int pt_terminate_all(struct dma_chan *dma_chan)
int pt_dmaengine_register(struct pt_device *pt)
{
- struct pt_dma_chan *chan;
struct dma_device *dma_dev = &pt->dma_dev;
- char *cmd_cache_name;
+ struct ae4_cmd_queue *ae4cmd_q = NULL;
+ struct ae4_device *ae4 = NULL;
+ struct pt_dma_chan *chan;
char *desc_cache_name;
- int ret;
+ char *cmd_cache_name;
+ int ret, i;
+
+ if (pt->ver == AE4_DMA_VERSION)
+ ae4 = container_of(pt, struct ae4_device, pt);
+
+ if (ae4)
+ pt->pt_dma_chan = devm_kcalloc(pt->dev, ae4->cmd_q_count,
+ sizeof(*pt->pt_dma_chan), GFP_KERNEL);
+ else
+ pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
+ GFP_KERNEL);
- pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
- GFP_KERNEL);
if (!pt->pt_dma_chan)
return -ENOMEM;
@@ -368,9 +535,6 @@ int pt_dmaengine_register(struct pt_device *pt)
INIT_LIST_HEAD(&dma_dev->channels);
- chan = pt->pt_dma_chan;
- chan->pt = pt;
-
/* Set base and prep routines */
dma_dev->device_free_chan_resources = pt_free_chan_resources;
dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
@@ -382,8 +546,21 @@ int pt_dmaengine_register(struct pt_device *pt)
dma_dev->device_terminate_all = pt_terminate_all;
dma_dev->device_synchronize = pt_synchronize;
- chan->vc.desc_free = pt_do_cleanup;
- vchan_init(&chan->vc, dma_dev);
+ if (ae4) {
+ for (i = 0; i < ae4->cmd_q_count; i++) {
+ chan = pt->pt_dma_chan + i;
+ ae4cmd_q = &ae4->ae4cmd_q[i];
+ chan->id = ae4cmd_q->id;
+ chan->pt = pt;
+ chan->vc.desc_free = pt_do_cleanup;
+ vchan_init(&chan->vc, dma_dev);
+ }
+ } else {
+ chan = pt->pt_dma_chan;
+ chan->pt = pt;
+ chan->vc.desc_free = pt_do_cleanup;
+ vchan_init(&chan->vc, dma_dev);
+ }
ret = dma_async_device_register(dma_dev);
if (ret)
@@ -399,6 +576,7 @@ err_cache:
return ret;
}
+EXPORT_SYMBOL_GPL(pt_dmaengine_register);
void pt_dmaengine_unregister(struct pt_device *pt)
{
diff --git a/drivers/dma/ptdma/ptdma-pci.c b/drivers/dma/amd/ptdma/ptdma-pci.c
index 22739ff0c3c5..22739ff0c3c5 100644
--- a/drivers/dma/ptdma/ptdma-pci.c
+++ b/drivers/dma/amd/ptdma/ptdma-pci.c
diff --git a/drivers/dma/ptdma/ptdma.h b/drivers/dma/amd/ptdma/ptdma.h
index 39bc37268235..0a7939105e51 100644
--- a/drivers/dma/ptdma/ptdma.h
+++ b/drivers/dma/amd/ptdma/ptdma.h
@@ -22,7 +22,7 @@
#include <linux/wait.h>
#include <linux/dmapool.h>
-#include "../virt-dma.h"
+#include "../../virt-dma.h"
#define MAX_PT_NAME_LEN 16
#define MAX_DMAPOOL_NAME_LEN 32
@@ -184,6 +184,7 @@ struct pt_dma_desc {
struct pt_dma_chan {
struct virt_dma_chan vc;
struct pt_device *pt;
+ u32 id;
};
struct pt_cmd_queue {
@@ -262,6 +263,7 @@ struct pt_device {
unsigned long total_interrupts;
struct pt_tasklet_data tdata;
+ int ver;
};
/*
diff --git a/drivers/dma/amd/qdma/qdma.c b/drivers/dma/amd/qdma/qdma.c
index 66f00ad67351..8fb2d5e1df20 100644
--- a/drivers/dma/amd/qdma/qdma.c
+++ b/drivers/dma/amd/qdma/qdma.c
@@ -283,16 +283,20 @@ static int qdma_check_queue_status(struct qdma_device *qdev,
static int qdma_clear_queue_context(const struct qdma_queue *queue)
{
- enum qdma_ctxt_type h2c_types[] = { QDMA_CTXT_DESC_SW_H2C,
- QDMA_CTXT_DESC_HW_H2C,
- QDMA_CTXT_DESC_CR_H2C,
- QDMA_CTXT_PFTCH, };
- enum qdma_ctxt_type c2h_types[] = { QDMA_CTXT_DESC_SW_C2H,
- QDMA_CTXT_DESC_HW_C2H,
- QDMA_CTXT_DESC_CR_C2H,
- QDMA_CTXT_PFTCH, };
+ static const enum qdma_ctxt_type h2c_types[] = {
+ QDMA_CTXT_DESC_SW_H2C,
+ QDMA_CTXT_DESC_HW_H2C,
+ QDMA_CTXT_DESC_CR_H2C,
+ QDMA_CTXT_PFTCH,
+ };
+ static const enum qdma_ctxt_type c2h_types[] = {
+ QDMA_CTXT_DESC_SW_C2H,
+ QDMA_CTXT_DESC_HW_C2H,
+ QDMA_CTXT_DESC_CR_C2H,
+ QDMA_CTXT_PFTCH,
+ };
struct qdma_device *qdev = queue->qdev;
- enum qdma_ctxt_type *type;
+ const enum qdma_ctxt_type *type;
int ret, num, i;
if (queue->dir == DMA_MEM_TO_DEV) {
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 7ba52dee40a9..20b10c15c696 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -875,6 +875,27 @@ static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
return chan;
}
+static int bcm2835_dma_suspend_late(struct device *dev)
+{
+ struct bcm2835_dmadev *od = dev_get_drvdata(dev);
+ struct bcm2835_chan *c, *next;
+
+ list_for_each_entry_safe(c, next, &od->ddev.channels,
+ vc.chan.device_node) {
+ void __iomem *chan_base = c->chan_base;
+
+ /* Check if DMA channel is busy */
+ if (readl(chan_base + BCM2835_DMA_ADDR))
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops bcm2835_dma_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(bcm2835_dma_suspend_late, NULL)
+};
+
static int bcm2835_dma_probe(struct platform_device *pdev)
{
struct bcm2835_dmadev *od;
@@ -1033,6 +1054,7 @@ static struct platform_driver bcm2835_dma_driver = {
.driver = {
.name = "bcm2835-dma",
.of_match_table = of_match_ptr(bcm2835_dma_of_match),
+ .pm = pm_ptr(&bcm2835_dma_pm_ops),
},
};
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index b7f15ab96855..443b2430466c 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -480,8 +480,8 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
bool disable_req, bool enable_sg)
{
struct dma_slave_config *cfg = &fsl_chan->cfg;
+ u32 burst = 0;
u16 csr = 0;
- u32 burst;
/*
* eDMA hardware SGs require the TCDs to be stored in little
@@ -496,16 +496,30 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
fsl_edma_set_tcd_to_le(fsl_chan, tcd, soff, soff);
- if (fsl_chan->is_multi_fifo) {
- /* set mloff to support multiple fifo */
- burst = cfg->direction == DMA_DEV_TO_MEM ?
- cfg->src_maxburst : cfg->dst_maxburst;
- nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4));
- /* enable DMLOE/SMLOE */
- if (cfg->direction == DMA_MEM_TO_DEV) {
+ /* If we expect to have either multi_fifo or a port window size,
+ * we will use minor loop offset, meaning bits 29-10 will be used for
+ * address offset, while bits 9-0 will be used to tell DMA how much
+ * data to read from addr.
+ * If we don't have either of those, will use a major loop reading from addr
+ * nbytes (29bits).
+ */
+ if (cfg->direction == DMA_MEM_TO_DEV) {
+ if (fsl_chan->is_multi_fifo)
+ burst = cfg->dst_maxburst * 4;
+ if (cfg->dst_port_window_size)
+ burst = cfg->dst_port_window_size * cfg->dst_addr_width;
+ if (burst) {
+ nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-burst);
nbytes |= EDMA_V3_TCD_NBYTES_DMLOE;
nbytes &= ~EDMA_V3_TCD_NBYTES_SMLOE;
- } else {
+ }
+ } else {
+ if (fsl_chan->is_multi_fifo)
+ burst = cfg->src_maxburst * 4;
+ if (cfg->src_port_window_size)
+ burst = cfg->src_port_window_size * cfg->src_addr_width;
+ if (burst) {
+ nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-burst);
nbytes |= EDMA_V3_TCD_NBYTES_SMLOE;
nbytes &= ~EDMA_V3_TCD_NBYTES_DMLOE;
}
@@ -623,11 +637,15 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
dst_addr = fsl_chan->dma_dev_addr;
soff = fsl_chan->cfg.dst_addr_width;
doff = fsl_chan->is_multi_fifo ? 4 : 0;
+ if (fsl_chan->cfg.dst_port_window_size)
+ doff = fsl_chan->cfg.dst_addr_width;
} else if (direction == DMA_DEV_TO_MEM) {
src_addr = fsl_chan->dma_dev_addr;
dst_addr = dma_buf_next;
soff = fsl_chan->is_multi_fifo ? 4 : 0;
doff = fsl_chan->cfg.src_addr_width;
+ if (fsl_chan->cfg.src_port_window_size)
+ soff = fsl_chan->cfg.src_addr_width;
} else {
/* DMA_DEV_TO_DEV */
src_addr = fsl_chan->cfg.src_addr;
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index fe8f103d4a63..10a5565ddfd7 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -68,6 +68,8 @@
#define EDMA_V3_CH_CSR_EEI BIT(2)
#define EDMA_V3_CH_CSR_DONE BIT(30)
#define EDMA_V3_CH_CSR_ACTIVE BIT(31)
+#define EDMA_V3_CH_ES_ERR BIT(31)
+#define EDMA_V3_MP_ES_VLD BIT(31)
enum fsl_edma_pm_state {
RUNNING = 0,
@@ -241,6 +243,7 @@ struct fsl_edma_engine {
const struct fsl_edma_drvdata *drvdata;
u32 n_chans;
int txirq;
+ int txirq_16_31;
int errirq;
bool big_endian;
struct edma_regs regs;
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 1a613236b3e4..f989b6c9c0a9 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -3,10 +3,11 @@
* drivers/dma/fsl-edma.c
*
* Copyright 2013-2014 Freescale Semiconductor, Inc.
+ * Copyright 2024 NXP
*
* Driver for the Freescale eDMA engine with flexible channel multiplexing
* capability for DMA request sources. The eDMA block can be found on some
- * Vybrid and Layerscape SoCs.
+ * Vybrid, Layerscape and S32G SoCs.
*/
#include <dt-bindings/dma/fsl-edma.h>
@@ -72,6 +73,60 @@ static irqreturn_t fsl_edma2_tx_handler(int irq, void *devi_id)
return fsl_edma_tx_handler(irq, fsl_chan->edma);
}
+static irqreturn_t fsl_edma3_or_tx_handler(int irq, void *dev_id,
+ u8 start, u8 end)
+{
+ struct fsl_edma_engine *fsl_edma = dev_id;
+ struct fsl_edma_chan *chan;
+ int i;
+
+ end = min(end, fsl_edma->n_chans);
+
+ for (i = start; i < end; i++) {
+ chan = &fsl_edma->chans[i];
+
+ fsl_edma3_tx_handler(irq, chan);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_edma3_tx_0_15_handler(int irq, void *dev_id)
+{
+ return fsl_edma3_or_tx_handler(irq, dev_id, 0, 16);
+}
+
+static irqreturn_t fsl_edma3_tx_16_31_handler(int irq, void *dev_id)
+{
+ return fsl_edma3_or_tx_handler(irq, dev_id, 16, 32);
+}
+
+static irqreturn_t fsl_edma3_or_err_handler(int irq, void *dev_id)
+{
+ struct fsl_edma_engine *fsl_edma = dev_id;
+ struct edma_regs *regs = &fsl_edma->regs;
+ unsigned int err, ch, ch_es;
+ struct fsl_edma_chan *chan;
+
+ err = edma_readl(fsl_edma, regs->es);
+ if (!(err & EDMA_V3_MP_ES_VLD))
+ return IRQ_NONE;
+
+ for (ch = 0; ch < fsl_edma->n_chans; ch++) {
+ chan = &fsl_edma->chans[ch];
+
+ ch_es = edma_readl_chreg(chan, ch_es);
+ if (!(ch_es & EDMA_V3_CH_ES_ERR))
+ continue;
+
+ edma_writel_chreg(chan, EDMA_V3_CH_ES_ERR, ch_es);
+ fsl_edma_disable_request(chan);
+ fsl_edma->chans[ch].status = DMA_ERROR;
+ }
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
{
struct fsl_edma_engine *fsl_edma = dev_id;
@@ -274,6 +329,49 @@ static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engi
return 0;
}
+static int fsl_edma3_or_irq_init(struct platform_device *pdev,
+ struct fsl_edma_engine *fsl_edma)
+{
+ int ret;
+
+ fsl_edma->txirq = platform_get_irq_byname(pdev, "tx-0-15");
+ if (fsl_edma->txirq < 0)
+ return fsl_edma->txirq;
+
+ fsl_edma->txirq_16_31 = platform_get_irq_byname(pdev, "tx-16-31");
+ if (fsl_edma->txirq_16_31 < 0)
+ return fsl_edma->txirq_16_31;
+
+ fsl_edma->errirq = platform_get_irq_byname(pdev, "err");
+ if (fsl_edma->errirq < 0)
+ return fsl_edma->errirq;
+
+ ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
+ fsl_edma3_tx_0_15_handler, 0, "eDMA tx0_15",
+ fsl_edma);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Can't register eDMA tx0_15 IRQ.\n");
+
+ if (fsl_edma->n_chans > 16) {
+ ret = devm_request_irq(&pdev->dev, fsl_edma->txirq_16_31,
+ fsl_edma3_tx_16_31_handler, 0,
+ "eDMA tx16_31", fsl_edma);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Can't register eDMA tx16_31 IRQ.\n");
+ }
+
+ ret = devm_request_irq(&pdev->dev, fsl_edma->errirq,
+ fsl_edma3_or_err_handler, 0, "eDMA err",
+ fsl_edma);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Can't register eDMA err IRQ.\n");
+
+ return 0;
+}
+
static int
fsl_edma2_irq_init(struct platform_device *pdev,
struct fsl_edma_engine *fsl_edma)
@@ -404,6 +502,14 @@ static struct fsl_edma_drvdata imx95_data5 = {
.setup_irq = fsl_edma3_irq_init,
};
+static const struct fsl_edma_drvdata s32g2_data = {
+ .dmamuxs = DMAMUX_NR,
+ .chreg_space_sz = EDMA_TCD,
+ .chreg_off = 0x4000,
+ .flags = FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MUX_SWAP,
+ .setup_irq = fsl_edma3_or_irq_init,
+};
+
static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,vf610-edma", .data = &vf610_data},
{ .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data},
@@ -413,6 +519,7 @@ static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,imx93-edma3", .data = &imx93_data3},
{ .compatible = "fsl,imx93-edma4", .data = &imx93_data4},
{ .compatible = "fsl,imx95-edma5", .data = &imx95_data5},
+ { .compatible = "nxp,s32g2-edma", .data = &s32g2_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
@@ -545,10 +652,6 @@ static int fsl_edma_probe(struct platform_device *pdev)
for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
char clkname[32];
- /* eDMAv3 mux register move to TCD area if ch_mux exist */
- if (drvdata->flags & FSL_EDMA_DRV_SPLIT_REG)
- break;
-
fsl_edma->muxbase[i] = devm_platform_ioremap_resource(pdev,
1 + i);
if (IS_ERR(fsl_edma->muxbase[i])) {
@@ -677,7 +780,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
}
ret = of_dma_controller_register(np,
- drvdata->flags & FSL_EDMA_DRV_SPLIT_REG ? fsl_edma3_xlate : fsl_edma_xlate,
+ drvdata->dmamuxs ? fsl_edma_xlate : fsl_edma3_xlate,
fsl_edma);
if (ret) {
dev_err(&pdev->dev,
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 57f1bf2ab20b..ff94ee892339 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -28,7 +28,6 @@ struct idxd_cdev_context {
* global to avoid conflict file names.
*/
static DEFINE_IDA(file_ida);
-static DEFINE_MUTEX(ida_lock);
/*
* ictx is an array based off of accelerator types. enum idxd_type
@@ -123,9 +122,7 @@ static void idxd_file_dev_release(struct device *dev)
struct idxd_device *idxd = wq->idxd;
int rc;
- mutex_lock(&ida_lock);
ida_free(&file_ida, ctx->id);
- mutex_unlock(&ida_lock);
/* Wait for in-flight operations to complete. */
if (wq_shared(wq)) {
@@ -284,9 +281,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
}
idxd_cdev = wq->idxd_cdev;
- mutex_lock(&ida_lock);
ctx->id = ida_alloc(&file_ida, GFP_KERNEL);
- mutex_unlock(&ida_lock);
if (ctx->id < 0) {
dev_warn(dev, "ida alloc failure\n");
goto failed_ida;
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index d84e21daa991..214b8039439f 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -374,6 +374,17 @@ struct idxd_device {
struct dentry *dbgfs_evl_file;
bool user_submission_safe;
+
+ struct idxd_saved_states *idxd_saved;
+};
+
+struct idxd_saved_states {
+ struct idxd_device saved_idxd;
+ struct idxd_evl saved_evl;
+ struct idxd_engine **saved_engines;
+ struct idxd_wq **saved_wqs;
+ struct idxd_group **saved_groups;
+ unsigned long *saved_wq_enable_map;
};
static inline unsigned int evl_ent_size(struct idxd_device *idxd)
@@ -725,8 +736,6 @@ static inline void idxd_desc_complete(struct idxd_desc *desc,
&desc->txd, &status);
}
-int idxd_register_bus_type(void);
-void idxd_unregister_bus_type(void);
int idxd_register_devices(struct idxd_device *idxd);
void idxd_unregister_devices(struct idxd_device *idxd);
void idxd_wqs_quiesce(struct idxd_device *idxd);
@@ -742,6 +751,8 @@ void idxd_unmask_error_interrupts(struct idxd_device *idxd);
/* device control */
int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
+int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev,
+ const struct pci_device_id *id);
void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
int idxd_drv_enable_wq(struct idxd_wq *wq);
void idxd_drv_disable_wq(struct idxd_wq *wq);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 140f8d772bee..b946f78f85e1 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -78,6 +78,8 @@ static struct pci_device_id idxd_pci_tbl[] = {
{ PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) },
/* IAA on DMR platforms */
{ PCI_DEVICE_DATA(INTEL, IAA_DMR, &idxd_driver_data[IDXD_TYPE_IAX]) },
+ /* IAA PTL platforms */
+ { PCI_DEVICE_DATA(INTEL, IAA_PTL, &idxd_driver_data[IDXD_TYPE_IAX]) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
@@ -723,67 +725,464 @@ static void idxd_cleanup(struct idxd_device *idxd)
idxd_disable_sva(idxd->pdev);
}
-static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+/*
+ * Attach IDXD device to IDXD driver.
+ */
+static int idxd_bind(struct device_driver *drv, const char *buf)
{
- struct device *dev = &pdev->dev;
- struct idxd_device *idxd;
- struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data;
+ const struct bus_type *bus = drv->bus;
+ struct device *dev;
+ int err = -ENODEV;
+
+ dev = bus_find_device_by_name(bus, NULL, buf);
+ if (dev)
+ err = device_driver_attach(drv, dev);
+
+ put_device(dev);
+
+ return err;
+}
+
+/*
+ * Detach IDXD device from driver.
+ */
+static void idxd_unbind(struct device_driver *drv, const char *buf)
+{
+ const struct bus_type *bus = drv->bus;
+ struct device *dev;
+
+ dev = bus_find_device_by_name(bus, NULL, buf);
+ if (dev && dev->driver == drv)
+ device_release_driver(dev);
+
+ put_device(dev);
+}
+
+#define idxd_free_saved_configs(saved_configs, count) \
+ do { \
+ int i; \
+ \
+ for (i = 0; i < (count); i++) \
+ kfree(saved_configs[i]); \
+ } while (0)
+
+static void idxd_free_saved(struct idxd_group **saved_groups,
+ struct idxd_engine **saved_engines,
+ struct idxd_wq **saved_wqs,
+ struct idxd_device *idxd)
+{
+ if (saved_groups)
+ idxd_free_saved_configs(saved_groups, idxd->max_groups);
+ if (saved_engines)
+ idxd_free_saved_configs(saved_engines, idxd->max_engines);
+ if (saved_wqs)
+ idxd_free_saved_configs(saved_wqs, idxd->max_wqs);
+}
+
+/*
+ * Save IDXD device configurations including engines, groups, wqs etc.
+ * The saved configurations can be restored when needed.
+ */
+static int idxd_device_config_save(struct idxd_device *idxd,
+ struct idxd_saved_states *idxd_saved)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i;
+
+ memcpy(&idxd_saved->saved_idxd, idxd, sizeof(*idxd));
+
+ if (idxd->evl) {
+ memcpy(&idxd_saved->saved_evl, idxd->evl,
+ sizeof(struct idxd_evl));
+ }
+
+ struct idxd_group **saved_groups __free(kfree) =
+ kcalloc_node(idxd->max_groups,
+ sizeof(struct idxd_group *),
+ GFP_KERNEL, dev_to_node(dev));
+ if (!saved_groups)
+ return -ENOMEM;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *saved_group __free(kfree) =
+ kzalloc_node(sizeof(*saved_group), GFP_KERNEL,
+ dev_to_node(dev));
+
+ if (!saved_group) {
+ /* Free saved groups */
+ idxd_free_saved(saved_groups, NULL, NULL, idxd);
+
+ return -ENOMEM;
+ }
+
+ memcpy(saved_group, idxd->groups[i], sizeof(*saved_group));
+ saved_groups[i] = no_free_ptr(saved_group);
+ }
+
+ struct idxd_engine **saved_engines =
+ kcalloc_node(idxd->max_engines,
+ sizeof(struct idxd_engine *),
+ GFP_KERNEL, dev_to_node(dev));
+ if (!saved_engines) {
+ /* Free saved groups */
+ idxd_free_saved(saved_groups, NULL, NULL, idxd);
+
+ return -ENOMEM;
+ }
+ for (i = 0; i < idxd->max_engines; i++) {
+ struct idxd_engine *saved_engine __free(kfree) =
+ kzalloc_node(sizeof(*saved_engine), GFP_KERNEL,
+ dev_to_node(dev));
+ if (!saved_engine) {
+ /* Free saved groups and engines */
+ idxd_free_saved(saved_groups, saved_engines, NULL,
+ idxd);
+
+ return -ENOMEM;
+ }
+
+ memcpy(saved_engine, idxd->engines[i], sizeof(*saved_engine));
+ saved_engines[i] = no_free_ptr(saved_engine);
+ }
+
+ unsigned long *saved_wq_enable_map __free(bitmap) =
+ bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL,
+ dev_to_node(dev));
+ if (!saved_wq_enable_map) {
+ /* Free saved groups and engines */
+ idxd_free_saved(saved_groups, saved_engines, NULL, idxd);
+
+ return -ENOMEM;
+ }
+
+ bitmap_copy(saved_wq_enable_map, idxd->wq_enable_map, idxd->max_wqs);
+
+ struct idxd_wq **saved_wqs __free(kfree) =
+ kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
+ GFP_KERNEL, dev_to_node(dev));
+ if (!saved_wqs) {
+ /* Free saved groups and engines */
+ idxd_free_saved(saved_groups, saved_engines, NULL, idxd);
+
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *saved_wq __free(kfree) =
+ kzalloc_node(sizeof(*saved_wq), GFP_KERNEL,
+ dev_to_node(dev));
+ struct idxd_wq *wq;
+
+ if (!saved_wq) {
+ /* Free saved groups, engines, and wqs */
+ idxd_free_saved(saved_groups, saved_engines, saved_wqs,
+ idxd);
+
+ return -ENOMEM;
+ }
+
+ if (!test_bit(i, saved_wq_enable_map))
+ continue;
+
+ wq = idxd->wqs[i];
+ mutex_lock(&wq->wq_lock);
+ memcpy(saved_wq, wq, sizeof(*saved_wq));
+ saved_wqs[i] = no_free_ptr(saved_wq);
+ mutex_unlock(&wq->wq_lock);
+ }
+
+ /* Save configurations */
+ idxd_saved->saved_groups = no_free_ptr(saved_groups);
+ idxd_saved->saved_engines = no_free_ptr(saved_engines);
+ idxd_saved->saved_wq_enable_map = no_free_ptr(saved_wq_enable_map);
+ idxd_saved->saved_wqs = no_free_ptr(saved_wqs);
+
+ return 0;
+}
+
+/*
+ * Restore IDXD device configurations including engines, groups, wqs etc
+ * that were saved before.
+ */
+static void idxd_device_config_restore(struct idxd_device *idxd,
+ struct idxd_saved_states *idxd_saved)
+{
+ struct idxd_evl *saved_evl = &idxd_saved->saved_evl;
+ int i;
+
+ idxd->rdbuf_limit = idxd_saved->saved_idxd.rdbuf_limit;
+
+ if (saved_evl)
+ idxd->evl->size = saved_evl->size;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *saved_group, *group;
+
+ saved_group = idxd_saved->saved_groups[i];
+ group = idxd->groups[i];
+
+ group->rdbufs_allowed = saved_group->rdbufs_allowed;
+ group->rdbufs_reserved = saved_group->rdbufs_reserved;
+ group->tc_a = saved_group->tc_a;
+ group->tc_b = saved_group->tc_b;
+ group->use_rdbuf_limit = saved_group->use_rdbuf_limit;
+
+ kfree(saved_group);
+ }
+ kfree(idxd_saved->saved_groups);
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ struct idxd_engine *saved_engine, *engine;
+
+ saved_engine = idxd_saved->saved_engines[i];
+ engine = idxd->engines[i];
+
+ engine->group = saved_engine->group;
+
+ kfree(saved_engine);
+ }
+ kfree(idxd_saved->saved_engines);
+
+ bitmap_copy(idxd->wq_enable_map, idxd_saved->saved_wq_enable_map,
+ idxd->max_wqs);
+ bitmap_free(idxd_saved->saved_wq_enable_map);
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *saved_wq, *wq;
+ size_t len;
+
+ if (!test_bit(i, idxd->wq_enable_map))
+ continue;
+
+ saved_wq = idxd_saved->saved_wqs[i];
+ wq = idxd->wqs[i];
+
+ mutex_lock(&wq->wq_lock);
+
+ wq->group = saved_wq->group;
+ wq->flags = saved_wq->flags;
+ wq->threshold = saved_wq->threshold;
+ wq->size = saved_wq->size;
+ wq->priority = saved_wq->priority;
+ wq->type = saved_wq->type;
+ len = strlen(saved_wq->name) + 1;
+ strscpy(wq->name, saved_wq->name, len);
+ wq->max_xfer_bytes = saved_wq->max_xfer_bytes;
+ wq->max_batch_size = saved_wq->max_batch_size;
+ wq->enqcmds_retries = saved_wq->enqcmds_retries;
+ wq->descs = saved_wq->descs;
+ wq->idxd_chan = saved_wq->idxd_chan;
+ len = strlen(saved_wq->driver_name) + 1;
+ strscpy(wq->driver_name, saved_wq->driver_name, len);
+
+ mutex_unlock(&wq->wq_lock);
+
+ kfree(saved_wq);
+ }
+
+ kfree(idxd_saved->saved_wqs);
+}
+
+static void idxd_reset_prepare(struct pci_dev *pdev)
+{
+ struct idxd_device *idxd = pci_get_drvdata(pdev);
+ struct device *dev = &idxd->pdev->dev;
+ const char *idxd_name;
int rc;
- rc = pci_enable_device(pdev);
- if (rc)
- return rc;
+ dev = &idxd->pdev->dev;
+ idxd_name = dev_name(idxd_confdev(idxd));
- dev_dbg(dev, "Alloc IDXD context\n");
- idxd = idxd_alloc(pdev, data);
- if (!idxd) {
- rc = -ENOMEM;
- goto err_idxd_alloc;
+ struct idxd_saved_states *idxd_saved __free(kfree) =
+ kzalloc_node(sizeof(*idxd_saved), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!idxd_saved) {
+ dev_err(dev, "HALT: no memory\n");
+
+ return;
}
- dev_dbg(dev, "Mapping BARs\n");
- idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
- if (!idxd->reg_base) {
- rc = -ENOMEM;
- goto err_iomap;
+ /* Save IDXD configurations. */
+ rc = idxd_device_config_save(idxd, idxd_saved);
+ if (rc < 0) {
+ dev_err(dev, "HALT: cannot save %s configs\n", idxd_name);
+
+ return;
+ }
+
+ idxd->idxd_saved = no_free_ptr(idxd_saved);
+
+ /* Save PCI device state. */
+ pci_save_state(idxd->pdev);
+}
+
+static void idxd_reset_done(struct pci_dev *pdev)
+{
+ struct idxd_device *idxd = pci_get_drvdata(pdev);
+ const char *idxd_name;
+ struct device *dev;
+ int rc, i;
+
+ if (!idxd->idxd_saved)
+ return;
+
+ dev = &idxd->pdev->dev;
+ idxd_name = dev_name(idxd_confdev(idxd));
+
+ /* Restore PCI device state. */
+ pci_restore_state(idxd->pdev);
+
+ /* Unbind idxd device from driver. */
+ idxd_unbind(&idxd_drv.drv, idxd_name);
+
+ /*
+ * Probe PCI device without allocating or changing
+ * idxd software data which keeps the same as before FLR.
+ */
+ idxd_pci_probe_alloc(idxd, NULL, NULL);
+
+ /* Restore IDXD configurations. */
+ idxd_device_config_restore(idxd, idxd->idxd_saved);
+
+ /* Re-configure IDXD device if allowed. */
+ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
+ rc = idxd_device_config(idxd);
+ if (rc < 0) {
+ dev_err(dev, "HALT: %s config fails\n", idxd_name);
+ goto out;
+ }
+ }
+
+ /* Bind IDXD device to driver. */
+ rc = idxd_bind(&idxd_drv.drv, idxd_name);
+ if (rc < 0) {
+ dev_err(dev, "HALT: binding %s to driver fails\n", idxd_name);
+ goto out;
}
- dev_dbg(dev, "Set DMA masks\n");
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ /* Bind enabled wq in the IDXD device to driver. */
+ for (i = 0; i < idxd->max_wqs; i++) {
+ if (test_bit(i, idxd->wq_enable_map)) {
+ struct idxd_wq *wq = idxd->wqs[i];
+ char wq_name[32];
+
+ wq->state = IDXD_WQ_DISABLED;
+ sprintf(wq_name, "wq%d.%d", idxd->id, wq->id);
+ /*
+ * Bind to user driver depending on wq type.
+ *
+ * Currently only support user type WQ. Will support
+ * kernel type WQ in the future.
+ */
+ if (wq->type == IDXD_WQT_USER)
+ rc = idxd_bind(&idxd_user_drv.drv, wq_name);
+ else
+ rc = -EINVAL;
+ if (rc < 0) {
+ clear_bit(i, idxd->wq_enable_map);
+ dev_err(dev,
+ "HALT: unable to re-enable wq %s\n",
+ dev_name(wq_confdev(wq)));
+ }
+ }
+ }
+out:
+ kfree(idxd->idxd_saved);
+}
+
+static const struct pci_error_handlers idxd_error_handler = {
+ .reset_prepare = idxd_reset_prepare,
+ .reset_done = idxd_reset_done,
+};
+
+/*
+ * Probe idxd PCI device.
+ * If idxd is not given, need to allocate idxd and set up its data.
+ *
+ * If idxd is given, idxd was allocated and setup already. Just need to
+ * configure device without re-allocating and re-configuring idxd data.
+ * This is useful for recovering from FLR.
+ */
+int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ bool alloc_idxd = idxd ? false : true;
+ struct idxd_driver_data *data;
+ struct device *dev;
+ int rc;
+
+ pdev = idxd ? idxd->pdev : pdev;
+ dev = &pdev->dev;
+ data = id ? (struct idxd_driver_data *)id->driver_data : NULL;
+ rc = pci_enable_device(pdev);
if (rc)
- goto err;
+ return rc;
+
+ if (alloc_idxd) {
+ dev_dbg(dev, "Alloc IDXD context\n");
+ idxd = idxd_alloc(pdev, data);
+ if (!idxd) {
+ rc = -ENOMEM;
+ goto err_idxd_alloc;
+ }
+
+ dev_dbg(dev, "Mapping BARs\n");
+ idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
+ if (!idxd->reg_base) {
+ rc = -ENOMEM;
+ goto err_iomap;
+ }
+
+ dev_dbg(dev, "Set DMA masks\n");
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc)
+ goto err;
+ }
dev_dbg(dev, "Set PCI master\n");
pci_set_master(pdev);
pci_set_drvdata(pdev, idxd);
- idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
- rc = idxd_probe(idxd);
- if (rc) {
- dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
- goto err;
- }
+ if (alloc_idxd) {
+ idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
+ rc = idxd_probe(idxd);
+ if (rc) {
+ dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
+ goto err;
+ }
+
+ if (data->load_device_defaults) {
+ rc = data->load_device_defaults(idxd);
+ if (rc)
+ dev_warn(dev, "IDXD loading device defaults failed\n");
+ }
+
+ rc = idxd_register_devices(idxd);
+ if (rc) {
+ dev_err(dev, "IDXD sysfs setup failed\n");
+ goto err_dev_register;
+ }
- if (data->load_device_defaults) {
- rc = data->load_device_defaults(idxd);
+ rc = idxd_device_init_debugfs(idxd);
if (rc)
- dev_warn(dev, "IDXD loading device defaults failed\n");
+ dev_warn(dev, "IDXD debugfs failed to setup\n");
}
- rc = idxd_register_devices(idxd);
- if (rc) {
- dev_err(dev, "IDXD sysfs setup failed\n");
- goto err_dev_register;
- }
+ if (!alloc_idxd) {
+ /* Release interrupts in the IDXD device. */
+ idxd_cleanup_interrupts(idxd);
- rc = idxd_device_init_debugfs(idxd);
- if (rc)
- dev_warn(dev, "IDXD debugfs failed to setup\n");
+ /* Re-enable interrupts in the IDXD device. */
+ rc = idxd_setup_interrupts(idxd);
+ if (rc)
+ dev_warn(dev, "IDXD interrupts failed to setup\n");
+ }
dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
idxd->hw.version);
- idxd->user_submission_safe = data->user_submission_safe;
+ if (data)
+ idxd->user_submission_safe = data->user_submission_safe;
return 0;
@@ -798,6 +1197,11 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return rc;
}
+static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ return idxd_pci_probe_alloc(NULL, pdev, id);
+}
+
void idxd_wqs_quiesce(struct idxd_device *idxd)
{
struct idxd_wq *wq;
@@ -864,6 +1268,7 @@ static struct pci_driver idxd_pci_driver = {
.probe = idxd_pci_probe,
.remove = idxd_remove,
.shutdown = idxd_shutdown,
+ .err_handler = &idxd_error_handler,
};
static int __init idxd_init_module(void)
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index fc049c9c9892..1107db3ce0a3 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -383,15 +383,65 @@ static void process_evl_entries(struct idxd_device *idxd)
mutex_unlock(&evl->lock);
}
+static void idxd_device_flr(struct work_struct *work)
+{
+ struct idxd_device *idxd = container_of(work, struct idxd_device, work);
+ int rc;
+
+ /*
+ * IDXD device requires a Function Level Reset (FLR).
+ * pci_reset_function() will reset the device with FLR.
+ */
+ rc = pci_reset_function(idxd->pdev);
+ if (rc)
+ dev_err(&idxd->pdev->dev, "FLR failed\n");
+}
+
+static irqreturn_t idxd_halt(struct idxd_device *idxd)
+{
+ union gensts_reg gensts;
+
+ gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
+ if (gensts.state == IDXD_DEVICE_STATE_HALT) {
+ idxd->state = IDXD_DEV_HALTED;
+ if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) {
+ /*
+ * If we need a software reset, we will throw the work
+ * on a system workqueue in order to allow interrupts
+ * for the device command completions.
+ */
+ INIT_WORK(&idxd->work, idxd_device_reinit);
+ queue_work(idxd->wq, &idxd->work);
+ } else if (gensts.reset_type == IDXD_DEVICE_RESET_FLR) {
+ idxd->state = IDXD_DEV_HALTED;
+ idxd_mask_error_interrupts(idxd);
+ dev_dbg(&idxd->pdev->dev,
+ "idxd halted, doing FLR. After FLR, configs are restored\n");
+ INIT_WORK(&idxd->work, idxd_device_flr);
+ queue_work(idxd->wq, &idxd->work);
+
+ } else {
+ idxd->state = IDXD_DEV_HALTED;
+ idxd_wqs_quiesce(idxd);
+ idxd_wqs_unmap_portal(idxd);
+ idxd_device_clear_state(idxd);
+ dev_err(&idxd->pdev->dev,
+ "idxd halted, need system reset");
+
+ return -ENXIO;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
irqreturn_t idxd_misc_thread(int vec, void *data)
{
struct idxd_irq_entry *irq_entry = data;
struct idxd_device *idxd = ie_to_idxd(irq_entry);
struct device *dev = &idxd->pdev->dev;
- union gensts_reg gensts;
u32 val = 0;
int i;
- bool err = false;
u32 cause;
cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
@@ -401,7 +451,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
if (cause & IDXD_INTC_HALT_STATE)
- goto halt;
+ return idxd_halt(idxd);
if (cause & IDXD_INTC_ERR) {
spin_lock(&idxd->dev_lock);
@@ -435,7 +485,6 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
for (i = 0; i < 4; i++)
dev_warn_ratelimited(dev, "err[%d]: %#16.16llx\n",
i, idxd->sw_err.bits[i]);
- err = true;
}
if (cause & IDXD_INTC_INT_HANDLE_REVOKED) {
@@ -480,34 +529,6 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n",
val);
- if (!err)
- goto out;
-
-halt:
- gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
- if (gensts.state == IDXD_DEVICE_STATE_HALT) {
- idxd->state = IDXD_DEV_HALTED;
- if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) {
- /*
- * If we need a software reset, we will throw the work
- * on a system workqueue in order to allow interrupts
- * for the device command completions.
- */
- INIT_WORK(&idxd->work, idxd_device_reinit);
- queue_work(idxd->wq, &idxd->work);
- } else {
- idxd->state = IDXD_DEV_HALTED;
- idxd_wqs_quiesce(idxd);
- idxd_wqs_unmap_portal(idxd);
- idxd_device_clear_state(idxd);
- dev_err(&idxd->pdev->dev,
- "idxd halted, need %s.\n",
- gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
- "FLR" : "system reset");
- }
- }
-
-out:
return IRQ_HANDLED;
}
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index c426511f2104..006ba206ab1b 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -9,6 +9,7 @@
#define PCI_DEVICE_ID_INTEL_DSA_GNRD 0x11fb
#define PCI_DEVICE_ID_INTEL_DSA_DMR 0x1212
#define PCI_DEVICE_ID_INTEL_IAA_DMR 0x1216
+#define PCI_DEVICE_ID_INTEL_IAA_PTL 0xb02d
#define DEVICE_VERSION_1 0x100
#define DEVICE_VERSION_2 0x200
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index f706eae0e76b..6af493f6ba77 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -1979,13 +1979,3 @@ void idxd_unregister_devices(struct idxd_device *idxd)
device_unregister(group_confdev(group));
}
}
-
-int idxd_register_bus_type(void)
-{
- return bus_register(&dsa_bus_type);
-}
-
-void idxd_unregister_bus_type(void)
-{
- bus_unregister(&dsa_bus_type);
-}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 40b76b40bc30..fa6e4646fdc2 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1369,10 +1369,9 @@ static int mv_xor_probe(struct platform_device *pdev)
return 0;
if (pdev->dev.of_node) {
- struct device_node *np;
int i = 0;
- for_each_child_of_node(pdev->dev.of_node, np) {
+ for_each_child_of_node_scoped(pdev->dev.of_node, np) {
struct mv_xor_chan *chan;
dma_cap_mask_t cap_mask;
int irq;
@@ -1388,7 +1387,6 @@ static int mv_xor_probe(struct platform_device *pdev)
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
ret = -ENODEV;
- of_node_put(np);
goto err_channel_add;
}
@@ -1397,7 +1395,6 @@ static int mv_xor_probe(struct platform_device *pdev)
if (IS_ERR(chan)) {
ret = PTR_ERR(chan);
irq_dispose_mapping(irq);
- of_node_put(np);
goto err_channel_add;
}
diff --git a/drivers/dma/ptdma/Kconfig b/drivers/dma/ptdma/Kconfig
deleted file mode 100644
index b430edd709f9..000000000000
--- a/drivers/dma/ptdma/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config AMD_PTDMA
- tristate "AMD PassThru DMA Engine"
- depends on X86_64 && PCI
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- help
- Enable support for the AMD PTDMA controller. This controller
- provides DMA capabilities to perform high bandwidth memory to
- memory and IO copy operations. It performs DMA transfer through
- queue-based descriptor management. This DMA controller is intended
- to be used with AMD Non-Transparent Bridge devices and not for
- general purpose peripheral DMA.
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index bbc3276992bb..c14557efd577 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -59,6 +59,9 @@ struct bam_desc_hw {
#define DESC_FLAG_NWD BIT(12)
#define DESC_FLAG_CMD BIT(11)
+#define BAM_NDP_REVISION_START 0x20
+#define BAM_NDP_REVISION_END 0x27
+
struct bam_async_desc {
struct virt_dma_desc vd;
@@ -398,6 +401,7 @@ struct bam_device {
/* dma start transaction tasklet */
struct tasklet_struct task;
+ u32 bam_revision;
};
/**
@@ -441,8 +445,10 @@ static void bam_reset(struct bam_device *bdev)
writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
/* set descriptor threshold, start with 4 bytes */
- writel_relaxed(DEFAULT_CNT_THRSHLD,
- bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
+ if (in_range(bdev->bam_revision, BAM_NDP_REVISION_START,
+ BAM_NDP_REVISION_END))
+ writel_relaxed(DEFAULT_CNT_THRSHLD,
+ bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
/* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS));
@@ -1000,9 +1006,10 @@ static void bam_apply_new_config(struct bam_chan *bchan,
maxburst = bchan->slave.src_maxburst;
else
maxburst = bchan->slave.dst_maxburst;
-
- writel_relaxed(maxburst,
- bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
+ if (in_range(bdev->bam_revision, BAM_NDP_REVISION_START,
+ BAM_NDP_REVISION_END))
+ writel_relaxed(maxburst,
+ bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
}
bchan->reconfigure = 0;
@@ -1192,10 +1199,11 @@ static int bam_init(struct bam_device *bdev)
u32 val;
/* read revision and configuration information */
- if (!bdev->num_ees) {
- val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
+ val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
+ if (!bdev->num_ees)
bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK;
- }
+
+ bdev->bam_revision = val & REVISION_MASK;
/* check that configured EE is within range */
if (bdev->ee >= bdev->num_ees)
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 52a7c8f2498f..b1f0001cc99c 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -18,6 +18,7 @@
#include "../virt-dma.h"
#define TRE_TYPE_DMA 0x10
+#define TRE_TYPE_IMMEDIATE_DMA 0x11
#define TRE_TYPE_GO 0x20
#define TRE_TYPE_CONFIG0 0x22
@@ -64,6 +65,7 @@
/* DMA TRE */
#define TRE_DMA_LEN GENMASK(23, 0)
+#define TRE_DMA_IMMEDIATE_LEN GENMASK(3, 0)
/* Register offsets from gpi-top */
#define GPII_n_CH_k_CNTXT_0_OFFS(n, k) (0x20000 + (0x4000 * (n)) + (0x80 * (k)))
@@ -1711,6 +1713,7 @@ static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc,
dma_addr_t address;
struct gpi_tre *tre;
unsigned int i;
+ int len;
/* first create config tre if applicable */
if (direction == DMA_MEM_TO_DEV && spi->set_config) {
@@ -1763,14 +1766,30 @@ static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc,
tre_idx++;
address = sg_dma_address(sgl);
- tre->dword[0] = lower_32_bits(address);
- tre->dword[1] = upper_32_bits(address);
+ len = sg_dma_len(sgl);
- tre->dword[2] = u32_encode_bits(sg_dma_len(sgl), TRE_DMA_LEN);
+ /* Support Immediate dma for write transfers for data length up to 8 bytes */
+ if (direction == DMA_MEM_TO_DEV && len <= 2 * sizeof(tre->dword[0])) {
+ /*
+ * For Immediate dma, data length may not always be length of 8 bytes,
+ * it can be length less than 8, hence initialize both dword's with 0
+ */
+ tre->dword[0] = 0;
+ tre->dword[1] = 0;
+ memcpy(&tre->dword[0], sg_virt(sgl), len);
- tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE);
- if (direction == DMA_MEM_TO_DEV)
- tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT);
+ tre->dword[2] = u32_encode_bits(len, TRE_DMA_IMMEDIATE_LEN);
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_IMMEDIATE_DMA, TRE_FLAGS_TYPE);
+ } else {
+ tre->dword[0] = lower_32_bits(address);
+ tre->dword[1] = upper_32_bits(address);
+
+ tre->dword[2] = u32_encode_bits(len, TRE_DMA_LEN);
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE);
+ }
+
+ tre->dword[3] |= u32_encode_bits(direction == DMA_MEM_TO_DEV,
+ TRE_FLAGS_IEOT);
for (i = 0; i < tre_idx; i++)
dev_dbg(dev, "TRE:%d %x:%x:%x:%x\n", i, desc->tre[i].dword[0],
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 2679c1f09faf..0c45ce8c74aa 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -2023,6 +2023,10 @@ static const struct of_device_id rcar_dmac_of_ids[] = {
.compatible = "renesas,rcar-gen4-dmac",
.data = &rcar_gen4_dmac_data,
}, {
+ /*
+ * Backward compatibility for between v5.12 - v5.19
+ * which didn't combined with "renesas,rcar-gen4-dmac"
+ */
.compatible = "renesas,dmac-r8a779a0",
.data = &rcar_gen4_dmac_data,
},
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index f37cdf6f2179..24796aaaddfa 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -13,7 +13,9 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_dma.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -31,12 +33,21 @@
#define SUN4I_DMA_CFG_SRC_ADDR_MODE(mode) ((mode) << 5)
#define SUN4I_DMA_CFG_SRC_DRQ_TYPE(type) (type)
+#define SUNIV_DMA_CFG_DST_DATA_WIDTH(width) ((width) << 24)
+#define SUNIV_DMA_CFG_SRC_DATA_WIDTH(width) ((width) << 8)
+
+#define SUN4I_MAX_BURST 8
+#define SUNIV_MAX_BURST 4
+
/** Normal DMA register values **/
/* Normal DMA source/destination data request type values */
#define SUN4I_NDMA_DRQ_TYPE_SDRAM 0x16
#define SUN4I_NDMA_DRQ_TYPE_LIMIT (0x1F + 1)
+#define SUNIV_NDMA_DRQ_TYPE_SDRAM 0x11
+#define SUNIV_NDMA_DRQ_TYPE_LIMIT (0x17 + 1)
+
/** Normal DMA register layout **/
/* Dedicated DMA source/destination address mode values */
@@ -50,6 +61,9 @@
#define SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15)
#define SUN4I_NDMA_CFG_SRC_NON_SECURE BIT(6)
+#define SUNIV_NDMA_CFG_CONT_MODE BIT(29)
+#define SUNIV_NDMA_CFG_WAIT_STATE(n) ((n) << 26)
+
/** Dedicated DMA register values **/
/* Dedicated DMA source/destination address mode values */
@@ -62,6 +76,9 @@
#define SUN4I_DDMA_DRQ_TYPE_SDRAM 0x1
#define SUN4I_DDMA_DRQ_TYPE_LIMIT (0x1F + 1)
+#define SUNIV_DDMA_DRQ_TYPE_SDRAM 0x1
+#define SUNIV_DDMA_DRQ_TYPE_LIMIT (0x9 + 1)
+
/** Dedicated DMA register layout **/
/* Dedicated DMA configuration register layout */
@@ -115,6 +132,11 @@
#define SUN4I_DMA_NR_MAX_VCHANS \
(SUN4I_NDMA_NR_MAX_VCHANS + SUN4I_DDMA_NR_MAX_VCHANS)
+#define SUNIV_NDMA_NR_MAX_CHANNELS 4
+#define SUNIV_DDMA_NR_MAX_CHANNELS 4
+#define SUNIV_NDMA_NR_MAX_VCHANS (24 * 2 - 1)
+#define SUNIV_DDMA_NR_MAX_VCHANS 10
+
/* This set of SUN4I_DDMA timing parameters were found experimentally while
* working with the SPI driver and seem to make it behave correctly */
#define SUN4I_DDMA_MAGIC_SPI_PARAMETERS \
@@ -132,6 +154,33 @@
#define SUN4I_DDMA_MAX_SEG_SIZE SZ_16M
#define SUN4I_DMA_MAX_SEG_SIZE SUN4I_NDMA_MAX_SEG_SIZE
+/*
+ * Hardware channels / ports representation
+ *
+ * The hardware is used in several SoCs, with differing numbers
+ * of channels and endpoints. This structure ties those numbers
+ * to a certain compatible string.
+ */
+struct sun4i_dma_config {
+ u32 ndma_nr_max_channels;
+ u32 ndma_nr_max_vchans;
+
+ u32 ddma_nr_max_channels;
+ u32 ddma_nr_max_vchans;
+
+ u32 dma_nr_max_channels;
+
+ void (*set_dst_data_width)(u32 *p_cfg, s8 data_width);
+ void (*set_src_data_width)(u32 *p_cfg, s8 data_width);
+ int (*convert_burst)(u32 maxburst);
+
+ u8 ndma_drq_sdram;
+ u8 ddma_drq_sdram;
+
+ u8 max_burst;
+ bool has_reset;
+};
+
struct sun4i_dma_pchan {
/* Register base of channel */
void __iomem *base;
@@ -170,7 +219,7 @@ struct sun4i_dma_contract {
};
struct sun4i_dma_dev {
- DECLARE_BITMAP(pchans_used, SUN4I_DMA_NR_MAX_CHANNELS);
+ unsigned long *pchans_used;
struct dma_device slave;
struct sun4i_dma_pchan *pchans;
struct sun4i_dma_vchan *vchans;
@@ -178,6 +227,8 @@ struct sun4i_dma_dev {
struct clk *clk;
int irq;
spinlock_t lock;
+ const struct sun4i_dma_config *cfg;
+ struct reset_control *rst;
};
static struct sun4i_dma_dev *to_sun4i_dma_dev(struct dma_device *dev)
@@ -200,7 +251,27 @@ static struct device *chan2dev(struct dma_chan *chan)
return &chan->dev->device;
}
-static int convert_burst(u32 maxburst)
+static void set_dst_data_width_a10(u32 *p_cfg, s8 data_width)
+{
+ *p_cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(data_width);
+}
+
+static void set_src_data_width_a10(u32 *p_cfg, s8 data_width)
+{
+ *p_cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(data_width);
+}
+
+static void set_dst_data_width_f1c100s(u32 *p_cfg, s8 data_width)
+{
+ *p_cfg |= SUNIV_DMA_CFG_DST_DATA_WIDTH(data_width);
+}
+
+static void set_src_data_width_f1c100s(u32 *p_cfg, s8 data_width)
+{
+ *p_cfg |= SUNIV_DMA_CFG_SRC_DATA_WIDTH(data_width);
+}
+
+static int convert_burst_a10(u32 maxburst)
{
if (maxburst > 8)
return -EINVAL;
@@ -209,6 +280,15 @@ static int convert_burst(u32 maxburst)
return (maxburst >> 2);
}
+static int convert_burst_f1c100s(u32 maxburst)
+{
+ if (maxburst > 4)
+ return -EINVAL;
+
+ /* 1 -> 0, 4 -> 1 */
+ return (maxburst >> 2);
+}
+
static int convert_buswidth(enum dma_slave_buswidth addr_width)
{
if (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)
@@ -233,15 +313,15 @@ static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv,
int i, max;
/*
- * pchans 0-SUN4I_NDMA_NR_MAX_CHANNELS are normal, and
- * SUN4I_NDMA_NR_MAX_CHANNELS+ are dedicated ones
+ * pchans 0-priv->cfg->ndma_nr_max_channels are normal, and
+ * priv->cfg->ndma_nr_max_channels+ are dedicated ones
*/
if (vchan->is_dedicated) {
- i = SUN4I_NDMA_NR_MAX_CHANNELS;
- max = SUN4I_DMA_NR_MAX_CHANNELS;
+ i = priv->cfg->ndma_nr_max_channels;
+ max = priv->cfg->dma_nr_max_channels;
} else {
i = 0;
- max = SUN4I_NDMA_NR_MAX_CHANNELS;
+ max = priv->cfg->ndma_nr_max_channels;
}
spin_lock_irqsave(&priv->lock, flags);
@@ -444,6 +524,7 @@ generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
size_t len, struct dma_slave_config *sconfig,
enum dma_transfer_direction direction)
{
+ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
struct sun4i_dma_promise *promise;
int ret;
@@ -467,13 +548,13 @@ generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
sconfig->src_addr_width, sconfig->dst_addr_width);
/* Source burst */
- ret = convert_burst(sconfig->src_maxburst);
+ ret = priv->cfg->convert_burst(sconfig->src_maxburst);
if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
/* Destination burst */
- ret = convert_burst(sconfig->dst_maxburst);
+ ret = priv->cfg->convert_burst(sconfig->dst_maxburst);
if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
@@ -482,13 +563,13 @@ generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
ret = convert_buswidth(sconfig->src_addr_width);
if (ret < 0)
goto fail;
- promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
+ priv->cfg->set_src_data_width(&promise->cfg, ret);
/* Destination bus width */
ret = convert_buswidth(sconfig->dst_addr_width);
if (ret < 0)
goto fail;
- promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
+ priv->cfg->set_dst_data_width(&promise->cfg, ret);
return promise;
@@ -510,6 +591,7 @@ static struct sun4i_dma_promise *
generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
size_t len, struct dma_slave_config *sconfig)
{
+ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
struct sun4i_dma_promise *promise;
int ret;
@@ -524,13 +606,13 @@ generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN;
/* Source burst */
- ret = convert_burst(sconfig->src_maxburst);
+ ret = priv->cfg->convert_burst(sconfig->src_maxburst);
if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
/* Destination burst */
- ret = convert_burst(sconfig->dst_maxburst);
+ ret = priv->cfg->convert_burst(sconfig->dst_maxburst);
if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
@@ -539,13 +621,13 @@ generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
ret = convert_buswidth(sconfig->src_addr_width);
if (ret < 0)
goto fail;
- promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
+ priv->cfg->set_src_data_width(&promise->cfg, ret);
/* Destination bus width */
ret = convert_buswidth(sconfig->dst_addr_width);
if (ret < 0)
goto fail;
- promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
+ priv->cfg->set_dst_data_width(&promise->cfg, ret);
return promise;
@@ -622,6 +704,7 @@ static struct dma_async_tx_descriptor *
sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
dma_addr_t src, size_t len, unsigned long flags)
{
+ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
struct dma_slave_config *sconfig = &vchan->cfg;
struct sun4i_dma_promise *promise;
@@ -638,8 +721,8 @@ sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
*/
sconfig->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
sconfig->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- sconfig->src_maxburst = 8;
- sconfig->dst_maxburst = 8;
+ sconfig->src_maxburst = priv->cfg->max_burst;
+ sconfig->dst_maxburst = priv->cfg->max_burst;
if (vchan->is_dedicated)
promise = generate_ddma_promise(chan, src, dest, len, sconfig);
@@ -654,11 +737,13 @@ sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
/* Configure memcpy mode */
if (vchan->is_dedicated) {
- promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM) |
- SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM);
+ promise->cfg |=
+ SUN4I_DMA_CFG_SRC_DRQ_TYPE(priv->cfg->ddma_drq_sdram) |
+ SUN4I_DMA_CFG_DST_DRQ_TYPE(priv->cfg->ddma_drq_sdram);
} else {
- promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
- SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
+ promise->cfg |=
+ SUN4I_DMA_CFG_SRC_DRQ_TYPE(priv->cfg->ndma_drq_sdram) |
+ SUN4I_DMA_CFG_DST_DRQ_TYPE(priv->cfg->ndma_drq_sdram);
}
/* Fill the contract with our only promise */
@@ -673,6 +758,7 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
size_t period_len, enum dma_transfer_direction dir,
unsigned long flags)
{
+ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
struct dma_slave_config *sconfig = &vchan->cfg;
struct sun4i_dma_promise *promise;
@@ -696,11 +782,11 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
if (vchan->is_dedicated) {
io_mode = SUN4I_DDMA_ADDR_MODE_IO;
linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
- ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
+ ram_type = priv->cfg->ddma_drq_sdram;
} else {
io_mode = SUN4I_NDMA_ADDR_MODE_IO;
linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
- ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
+ ram_type = priv->cfg->ndma_drq_sdram;
}
if (dir == DMA_MEM_TO_DEV) {
@@ -793,6 +879,7 @@ sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction dir,
unsigned long flags, void *context)
{
+ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
struct dma_slave_config *sconfig = &vchan->cfg;
struct sun4i_dma_promise *promise;
@@ -818,11 +905,11 @@ sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (vchan->is_dedicated) {
io_mode = SUN4I_DDMA_ADDR_MODE_IO;
linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
- ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
+ ram_type = priv->cfg->ddma_drq_sdram;
} else {
io_mode = SUN4I_NDMA_ADDR_MODE_IO;
linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
- ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
+ ram_type = priv->cfg->ndma_drq_sdram;
}
if (dir == DMA_MEM_TO_DEV)
@@ -1150,6 +1237,10 @@ static int sun4i_dma_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ priv->cfg = of_device_get_match_data(&pdev->dev);
+ if (!priv->cfg)
+ return -ENODEV;
+
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
@@ -1164,6 +1255,13 @@ static int sun4i_dma_probe(struct platform_device *pdev)
return PTR_ERR(priv->clk);
}
+ if (priv->cfg->has_reset) {
+ priv->rst = devm_reset_control_get_exclusive_deasserted(&pdev->dev, NULL);
+ if (IS_ERR(priv->rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->rst),
+ "Failed to get reset control\n");
+ }
+
platform_set_drvdata(pdev, priv);
spin_lock_init(&priv->lock);
@@ -1197,23 +1295,26 @@ static int sun4i_dma_probe(struct platform_device *pdev)
priv->slave.dev = &pdev->dev;
- priv->pchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_CHANNELS,
+ priv->pchans = devm_kcalloc(&pdev->dev, priv->cfg->dma_nr_max_channels,
sizeof(struct sun4i_dma_pchan), GFP_KERNEL);
priv->vchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_VCHANS,
sizeof(struct sun4i_dma_vchan), GFP_KERNEL);
- if (!priv->vchans || !priv->pchans)
+ priv->pchans_used = devm_kcalloc(&pdev->dev,
+ BITS_TO_LONGS(priv->cfg->dma_nr_max_channels),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!priv->vchans || !priv->pchans || !priv->pchans_used)
return -ENOMEM;
/*
- * [0..SUN4I_NDMA_NR_MAX_CHANNELS) are normal pchans, and
- * [SUN4I_NDMA_NR_MAX_CHANNELS..SUN4I_DMA_NR_MAX_CHANNELS) are
+ * [0..priv->cfg->ndma_nr_max_channels) are normal pchans, and
+ * [priv->cfg->ndma_nr_max_channels..priv->cfg->dma_nr_max_channels) are
* dedicated ones
*/
- for (i = 0; i < SUN4I_NDMA_NR_MAX_CHANNELS; i++)
+ for (i = 0; i < priv->cfg->ndma_nr_max_channels; i++)
priv->pchans[i].base = priv->base +
SUN4I_NDMA_CHANNEL_REG_BASE(i);
- for (j = 0; i < SUN4I_DMA_NR_MAX_CHANNELS; i++, j++) {
+ for (j = 0; i < priv->cfg->dma_nr_max_channels; i++, j++) {
priv->pchans[i].base = priv->base +
SUN4I_DDMA_CHANNEL_REG_BASE(j);
priv->pchans[i].is_dedicated = 1;
@@ -1284,8 +1385,51 @@ static void sun4i_dma_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->clk);
}
+static struct sun4i_dma_config sun4i_a10_dma_cfg = {
+ .ndma_nr_max_channels = SUN4I_NDMA_NR_MAX_CHANNELS,
+ .ndma_nr_max_vchans = SUN4I_NDMA_NR_MAX_VCHANS,
+
+ .ddma_nr_max_channels = SUN4I_DDMA_NR_MAX_CHANNELS,
+ .ddma_nr_max_vchans = SUN4I_DDMA_NR_MAX_VCHANS,
+
+ .dma_nr_max_channels = SUN4I_DMA_NR_MAX_CHANNELS,
+
+ .set_dst_data_width = set_dst_data_width_a10,
+ .set_src_data_width = set_src_data_width_a10,
+ .convert_burst = convert_burst_a10,
+
+ .ndma_drq_sdram = SUN4I_NDMA_DRQ_TYPE_SDRAM,
+ .ddma_drq_sdram = SUN4I_DDMA_DRQ_TYPE_SDRAM,
+
+ .max_burst = SUN4I_MAX_BURST,
+ .has_reset = false,
+};
+
+static struct sun4i_dma_config suniv_f1c100s_dma_cfg = {
+ .ndma_nr_max_channels = SUNIV_NDMA_NR_MAX_CHANNELS,
+ .ndma_nr_max_vchans = SUNIV_NDMA_NR_MAX_VCHANS,
+
+ .ddma_nr_max_channels = SUNIV_DDMA_NR_MAX_CHANNELS,
+ .ddma_nr_max_vchans = SUNIV_DDMA_NR_MAX_VCHANS,
+
+ .dma_nr_max_channels = SUNIV_NDMA_NR_MAX_CHANNELS +
+ SUNIV_DDMA_NR_MAX_CHANNELS,
+
+ .set_dst_data_width = set_dst_data_width_f1c100s,
+ .set_src_data_width = set_src_data_width_f1c100s,
+ .convert_burst = convert_burst_f1c100s,
+
+ .ndma_drq_sdram = SUNIV_NDMA_DRQ_TYPE_SDRAM,
+ .ddma_drq_sdram = SUNIV_DDMA_DRQ_TYPE_SDRAM,
+
+ .max_burst = SUNIV_MAX_BURST,
+ .has_reset = true,
+};
+
static const struct of_device_id sun4i_dma_match[] = {
- { .compatible = "allwinner,sun4i-a10-dma" },
+ { .compatible = "allwinner,sun4i-a10-dma", .data = &sun4i_a10_dma_cfg },
+ { .compatible = "allwinner,suniv-f1c100s-dma",
+ .data = &suniv_f1c100s_dma_cfg },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, sun4i_dma_match);
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 2953008d42ef..6896da8ac7ef 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -43,6 +43,10 @@
#define ADMA_CH_CONFIG_MAX_BUFS 8
#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4)
+#define TEGRA186_ADMA_GLOBAL_PAGE_CHGRP 0x30
+#define TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ 0x70
+#define TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ 0x84
+
#define ADMA_CH_FIFO_CTRL 0x2c
#define ADMA_CH_TX_FIFO_SIZE_SHIFT 8
#define ADMA_CH_RX_FIFO_SIZE_SHIFT 0
@@ -96,6 +100,7 @@ struct tegra_adma_chip_data {
unsigned int ch_fifo_size_mask;
unsigned int sreq_index_offset;
bool has_outstanding_reqs;
+ void (*set_global_pg_config)(struct tegra_adma *tdma);
};
/*
@@ -151,6 +156,7 @@ struct tegra_adma {
struct dma_device dma_dev;
struct device *dev;
void __iomem *base_addr;
+ void __iomem *ch_base_addr;
struct clk *ahub_clk;
unsigned int nr_channels;
unsigned long *dma_chan_mask;
@@ -159,6 +165,7 @@ struct tegra_adma {
/* Used to store global command register state when suspending */
unsigned int global_cmd;
+ unsigned int ch_page_no;
const struct tegra_adma_chip_data *cdata;
@@ -176,6 +183,11 @@ static inline u32 tdma_read(struct tegra_adma *tdma, u32 reg)
return readl(tdma->base_addr + tdma->cdata->global_reg_offset + reg);
}
+static inline void tdma_ch_global_write(struct tegra_adma *tdma, u32 reg, u32 val)
+{
+ writel(val, tdma->ch_base_addr + tdma->cdata->global_reg_offset + reg);
+}
+
static inline void tdma_ch_write(struct tegra_adma_chan *tdc, u32 reg, u32 val)
{
writel(val, tdc->chan_addr + reg);
@@ -217,13 +229,30 @@ static int tegra_adma_slave_config(struct dma_chan *dc,
return 0;
}
+static void tegra186_adma_global_page_config(struct tegra_adma *tdma)
+{
+ /*
+ * Clear the default page1 channel group configs and program
+ * the global registers based on the actual page usage
+ */
+ tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_CHGRP, 0);
+ tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ, 0);
+ tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ, 0);
+ tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_CHGRP + (tdma->ch_page_no * 0x4), 0xff);
+ tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ + (tdma->ch_page_no * 0x4), 0x1ffffff);
+ tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ + (tdma->ch_page_no * 0x4), 0xffffff);
+}
+
static int tegra_adma_init(struct tegra_adma *tdma)
{
u32 status;
int ret;
- /* Clear any interrupts */
- tdma_write(tdma, tdma->cdata->ch_base_offset + tdma->cdata->global_int_clear, 0x1);
+ /* Clear any channels group global interrupts */
+ tdma_ch_global_write(tdma, tdma->cdata->global_int_clear, 0x1);
+
+ if (!tdma->base_addr)
+ return 0;
/* Assert soft reset */
tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1);
@@ -237,6 +266,9 @@ static int tegra_adma_init(struct tegra_adma *tdma)
if (ret)
return ret;
+ if (tdma->cdata->set_global_pg_config)
+ tdma->cdata->set_global_pg_config(tdma);
+
/* Enable global ADMA registers */
tdma_write(tdma, ADMA_GLOBAL_CMD, 1);
@@ -736,7 +768,9 @@ static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
struct tegra_adma_chan *tdc;
int i;
- tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD);
+ if (tdma->base_addr)
+ tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD);
+
if (!tdma->global_cmd)
goto clk_disable;
@@ -777,7 +811,11 @@ static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
dev_err(dev, "ahub clk_enable failed: %d\n", ret);
return ret;
}
- tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd);
+ if (tdma->base_addr) {
+ tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd);
+ if (tdma->cdata->set_global_pg_config)
+ tdma->cdata->set_global_pg_config(tdma);
+ }
if (!tdma->global_cmd)
return 0;
@@ -817,6 +855,7 @@ static const struct tegra_adma_chip_data tegra210_chip_data = {
.ch_fifo_size_mask = 0xf,
.sreq_index_offset = 2,
.has_outstanding_reqs = false,
+ .set_global_pg_config = NULL,
};
static const struct tegra_adma_chip_data tegra186_chip_data = {
@@ -833,6 +872,7 @@ static const struct tegra_adma_chip_data tegra186_chip_data = {
.ch_fifo_size_mask = 0x1f,
.sreq_index_offset = 4,
.has_outstanding_reqs = true,
+ .set_global_pg_config = tegra186_adma_global_page_config,
};
static const struct of_device_id tegra_adma_of_match[] = {
@@ -846,7 +886,8 @@ static int tegra_adma_probe(struct platform_device *pdev)
{
const struct tegra_adma_chip_data *cdata;
struct tegra_adma *tdma;
- int ret, i;
+ struct resource *res_page, *res_base;
+ int ret, i, page_no;
cdata = of_device_get_match_data(&pdev->dev);
if (!cdata) {
@@ -865,9 +906,35 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdma->nr_channels = cdata->nr_channels;
platform_set_drvdata(pdev, tdma);
- tdma->base_addr = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(tdma->base_addr))
- return PTR_ERR(tdma->base_addr);
+ res_page = platform_get_resource_byname(pdev, IORESOURCE_MEM, "page");
+ if (res_page) {
+ tdma->ch_base_addr = devm_ioremap_resource(&pdev->dev, res_page);
+ if (IS_ERR(tdma->ch_base_addr))
+ return PTR_ERR(tdma->ch_base_addr);
+
+ res_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "global");
+ if (res_base) {
+ page_no = (res_page->start - res_base->start) / cdata->ch_base_offset;
+ if (page_no <= 0)
+ return -EINVAL;
+ tdma->ch_page_no = page_no - 1;
+ tdma->base_addr = devm_ioremap_resource(&pdev->dev, res_base);
+ if (IS_ERR(tdma->base_addr))
+ return PTR_ERR(tdma->base_addr);
+ }
+ } else {
+ /* If no 'page' property found, then reg DT binding would be legacy */
+ res_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res_base) {
+ tdma->base_addr = devm_ioremap_resource(&pdev->dev, res_base);
+ if (IS_ERR(tdma->base_addr))
+ return PTR_ERR(tdma->base_addr);
+ } else {
+ return -ENODEV;
+ }
+
+ tdma->ch_base_addr = tdma->base_addr + cdata->ch_base_offset;
+ }
tdma->ahub_clk = devm_clk_get(&pdev->dev, "d_audio");
if (IS_ERR(tdma->ahub_clk)) {
@@ -900,8 +967,7 @@ static int tegra_adma_probe(struct platform_device *pdev)
if (!test_bit(i, tdma->dma_chan_mask))
continue;
- tdc->chan_addr = tdma->base_addr + cdata->ch_base_offset
- + (cdata->ch_reg_size * i);
+ tdc->chan_addr = tdma->ch_base_addr + (cdata->ch_reg_size * i);
tdc->irq = of_irq_get(pdev->dev.of_node, i);
if (tdc->irq <= 0) {
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 343e986e66e7..4ece125b2ae7 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -208,7 +208,6 @@ struct edma_desc {
struct edma_cc;
struct edma_tc {
- struct device_node *node;
u16 id;
};
@@ -2460,19 +2459,19 @@ static int edma_probe(struct platform_device *pdev)
goto err_reg1;
}
- for (i = 0;; i++) {
+ for (i = 0; i < ecc->num_tc; i++) {
ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
1, i, &tc_args);
- if (ret || i == ecc->num_tc)
+ if (ret)
break;
- ecc->tc_list[i].node = tc_args.np;
ecc->tc_list[i].id = i;
queue_priority_mapping[i][1] = tc_args.args[0];
if (queue_priority_mapping[i][1] > lowest_priority) {
lowest_priority = queue_priority_mapping[i][1];
info->default_queue = i;
}
+ of_node_put(tc_args.np);
}
/* See if we have optional dma-channel-mask array */
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index b3f27b3f9209..7ed1956b4642 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -4404,6 +4404,18 @@ static struct udma_match_data j721s2_bcdma_csi_data = {
.soc_data = &j721s2_bcdma_csi_soc_data,
};
+static struct udma_match_data j722s_bcdma_csi_data = {
+ .type = DMA_TYPE_BCDMA,
+ .psil_base = 0x3100,
+ .enable_memcpy_support = false,
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ 0, /* No H Channels */
+ 0, /* No UH Channels */
+ },
+ .soc_data = &j721s2_bcdma_csi_soc_data,
+};
+
static const struct of_device_id udma_of_match[] = {
{
.compatible = "ti,am654-navss-main-udmap",
@@ -4435,6 +4447,10 @@ static const struct of_device_id udma_of_match[] = {
.compatible = "ti,j721s2-dmss-bcdma-csi",
.data = &j721s2_bcdma_csi_data,
},
+ {
+ .compatible = "ti,j722s-dmss-bcdma-csi",
+ .data = &j722s_bcdma_csi_data,
+ },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, udma_of_match);
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index 93772abc3b49..0d88b1a670e1 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -390,15 +390,11 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
*/
static int xdma_xfer_stop(struct xdma_chan *xchan)
{
- int ret;
struct xdma_device *xdev = xchan->xdev_hdl;
/* clear run stop bit to prevent any further auto-triggering */
- ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
- CHAN_CTRL_RUN_STOP);
- if (ret)
- return ret;
- return ret;
+ return regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
+ CHAN_CTRL_RUN_STOP);
}
/**
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 1bdd57de87a6..108a7287f4cd 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -1404,16 +1404,18 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
- j = chan->desc_submitcount;
- reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
- if (chan->direction == DMA_MEM_TO_DEV) {
- reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
- reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
- } else {
- reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
- reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
+ if (config->park) {
+ j = chan->desc_submitcount;
+ reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
+ reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
+ } else {
+ reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
+ reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
+ }
+ dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
}
- dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
/* Start the hardware */
xilinx_dma_start(chan);
diff --git a/drivers/extcon/extcon-fsa9480.c b/drivers/extcon/extcon-fsa9480.c
index e458ce0c45ab..b11b43171063 100644
--- a/drivers/extcon/extcon-fsa9480.c
+++ b/drivers/extcon/extcon-fsa9480.c
@@ -350,7 +350,7 @@ static const struct dev_pm_ops fsa9480_pm_ops = {
};
static const struct i2c_device_id fsa9480_id[] = {
- { "fsa9480", 0 },
+ { "fsa9480" },
{}
};
MODULE_DEVICE_TABLE(i2c, fsa9480_id);
diff --git a/drivers/extcon/extcon-ptn5150.c b/drivers/extcon/extcon-ptn5150.c
index 4616da7e5430..78ad86c4a3be 100644
--- a/drivers/extcon/extcon-ptn5150.c
+++ b/drivers/extcon/extcon-ptn5150.c
@@ -338,7 +338,7 @@ static const struct of_device_id ptn5150_dt_match[] = {
MODULE_DEVICE_TABLE(of, ptn5150_dt_match);
static const struct i2c_device_id ptn5150_i2c_id[] = {
- { "ptn5150", 0 },
+ { "ptn5150" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ptn5150_i2c_id);
diff --git a/drivers/extcon/extcon-rtk-type-c.c b/drivers/extcon/extcon-rtk-type-c.c
index bdc2b7b3a246..82b60b927e41 100644
--- a/drivers/extcon/extcon-rtk-type-c.c
+++ b/drivers/extcon/extcon-rtk-type-c.c
@@ -1369,6 +1369,8 @@ static int extcon_rtk_type_c_probe(struct platform_device *pdev)
}
type_c->type_c_cfg = devm_kzalloc(dev, sizeof(*type_c_cfg), GFP_KERNEL);
+ if (!type_c->type_c_cfg)
+ return -ENOMEM;
memcpy(type_c->type_c_cfg, type_c_cfg, sizeof(*type_c_cfg));
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index a99fe35f1f0d..ec3e21ad2025 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -988,7 +988,7 @@ int fw_device_set_broadcast_channel(struct device *dev, void *gen)
return 0;
}
-static int compare_configuration_rom(struct device *dev, void *data)
+static int compare_configuration_rom(struct device *dev, const void *data)
{
const struct fw_device *old = fw_device(dev);
const u32 *config_rom = data;
@@ -1039,7 +1039,7 @@ static void fw_device_init(struct work_struct *work)
//
// serialize config_rom access.
scoped_guard(rwsem_read, &fw_device_rwsem) {
- found = device_find_child(card->device, (void *)device->config_rom,
+ found = device_find_child(card->device, device->config_rom,
compare_configuration_rom);
}
if (found) {
diff --git a/drivers/firewire/device-attribute-test.c b/drivers/firewire/device-attribute-test.c
index 2f123c6b0a16..97478a96d1c9 100644
--- a/drivers/firewire/device-attribute-test.c
+++ b/drivers/firewire/device-attribute-test.c
@@ -99,6 +99,7 @@ static void device_attr_simple_avc(struct kunit *test)
struct device *unit0_dev = (struct device *)&unit0.device;
static const int unit0_expected_ids[] = {0x00ffffff, 0x00ffffff, 0x0000a02d, 0x00010001};
char *buf = kunit_kzalloc(test, PAGE_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
int ids[4] = {0, 0, 0, 0};
// Ensure associations for node and unit devices.
@@ -180,6 +181,7 @@ static void device_attr_legacy_avc(struct kunit *test)
struct device *unit0_dev = (struct device *)&unit0.device;
static const int unit0_expected_ids[] = {0x00012345, 0x00fedcba, 0x00abcdef, 0x00543210};
char *buf = kunit_kzalloc(test, PAGE_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
int ids[4] = {0, 0, 0, 0};
// Ensure associations for node and unit devices.
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index c02aed11b590..edaedd156a6d 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -3301,8 +3301,7 @@ static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
}
}
-#ifdef CONFIG_PM
-static void ohci_resume_iso_dma(struct fw_ohci *ohci)
+static void __maybe_unused ohci_resume_iso_dma(struct fw_ohci *ohci)
{
int i;
struct iso_context *ctx;
@@ -3319,7 +3318,6 @@ static void ohci_resume_iso_dma(struct fw_ohci *ohci)
ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
}
}
-#endif
static int queue_iso_transmit(struct iso_context *ctx,
struct fw_iso_packet *packet,
@@ -3888,39 +3886,25 @@ static void pci_remove(struct pci_dev *dev)
dev_notice(&dev->dev, "removing fw-ohci device\n");
}
-#ifdef CONFIG_PM
-static int pci_suspend(struct pci_dev *dev, pm_message_t state)
+static int __maybe_unused pci_suspend(struct device *dev)
{
- struct fw_ohci *ohci = pci_get_drvdata(dev);
- int err;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct fw_ohci *ohci = pci_get_drvdata(pdev);
software_reset(ohci);
- err = pci_save_state(dev);
- if (err) {
- ohci_err(ohci, "pci_save_state failed\n");
- return err;
- }
- err = pci_set_power_state(dev, pci_choose_state(dev, state));
- if (err)
- ohci_err(ohci, "pci_set_power_state failed with %d\n", err);
- pmac_ohci_off(dev);
+ pmac_ohci_off(pdev);
return 0;
}
-static int pci_resume(struct pci_dev *dev)
+
+static int __maybe_unused pci_resume(struct device *dev)
{
- struct fw_ohci *ohci = pci_get_drvdata(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct fw_ohci *ohci = pci_get_drvdata(pdev);
int err;
- pmac_ohci_on(dev);
- pci_set_power_state(dev, PCI_D0);
- pci_restore_state(dev);
- err = pci_enable_device(dev);
- if (err) {
- ohci_err(ohci, "pci_enable_device failed\n");
- return err;
- }
+ pmac_ohci_on(pdev);
/* Some systems don't setup GUID register on resume from ram */
if (!reg_read(ohci, OHCI1394_GUIDLo) &&
@@ -3937,7 +3921,6 @@ static int pci_resume(struct pci_dev *dev)
return 0;
}
-#endif
static const struct pci_device_id pci_table[] = {
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
@@ -3946,15 +3929,14 @@ static const struct pci_device_id pci_table[] = {
MODULE_DEVICE_TABLE(pci, pci_table);
+static SIMPLE_DEV_PM_OPS(pci_pm_ops, pci_suspend, pci_resume);
+
static struct pci_driver fw_ohci_pci_driver = {
.name = ohci_driver_name,
.id_table = pci_table,
.probe = pci_probe,
.remove = pci_remove,
-#ifdef CONFIG_PM
- .resume = pci_resume,
- .suspend = pci_suspend,
-#endif
+ .driver.pm = &pci_pm_ops,
};
static int __init fw_ohci_init(void)
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 827dee0f57dd..1a19828114cf 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1490,7 +1490,7 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost,
return retval;
}
-static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
+static int sbp2_scsi_sdev_init(struct scsi_device *sdev)
{
struct sbp2_logical_unit *lu = sdev->hostdata;
@@ -1506,8 +1506,8 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
return 0;
}
-static int sbp2_scsi_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int sbp2_scsi_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct sbp2_logical_unit *lu = sdev->hostdata;
@@ -1590,8 +1590,8 @@ static const struct scsi_host_template scsi_driver_template = {
.name = "SBP-2 IEEE-1394",
.proc_name = "sbp2",
.queuecommand = sbp2_scsi_queuecommand,
- .slave_alloc = sbp2_scsi_slave_alloc,
- .device_configure = sbp2_scsi_device_configure,
+ .sdev_init = sbp2_scsi_sdev_init,
+ .sdev_configure = sbp2_scsi_sdev_configure,
.eh_abort_handler = sbp2_scsi_abort,
.this_id = -1,
.sg_tablesize = SG_ALL,
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 71d8b26c4103..9f35f69e0f9e 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -106,7 +106,7 @@ config ISCSI_IBFT
select ISCSI_BOOT_SYSFS
select ISCSI_IBFT_FIND if X86
depends on ACPI && SCSI && SCSI_LOWLEVEL
- default n
+ default n
help
This option enables support for detection and exposing of iSCSI
Boot Firmware Table (iBFT) via sysfs to userspace. If you wish to
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 157172a5f2b5..a3386bf36de5 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -238,10 +238,10 @@ static int scmi_dev_match(struct device *dev, const struct device_driver *drv)
return 0;
}
-static int scmi_match_by_id_table(struct device *dev, void *data)
+static int scmi_match_by_id_table(struct device *dev, const void *data)
{
struct scmi_device *sdev = to_scmi_dev(dev);
- struct scmi_device_id *id_table = data;
+ const struct scmi_device_id *id_table = data;
return sdev->protocol_id == id_table->protocol_id &&
(id_table->name && !strcmp(sdev->name, id_table->name));
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 48b12f81141d..10ea7962323e 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -442,7 +442,7 @@ struct scmi_transport_core_operations {
*/
struct scmi_transport {
struct device *supplier;
- struct scmi_desc *desc;
+ struct scmi_desc desc;
struct scmi_transport_core_operations **core_ops;
};
@@ -468,7 +468,7 @@ static int __tag##_probe(struct platform_device *pdev) \
device_set_of_node_from_dev(&spdev->dev, dev); \
\
strans.supplier = dev; \
- strans.desc = &(__desc); \
+ memcpy(&strans.desc, &(__desc), sizeof(strans.desc)); \
strans.core_ops = &(__core_ops); \
\
ret = platform_device_add_data(spdev, &strans, sizeof(strans)); \
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 1b5fb2c4ce86..60050da54bf2 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -24,6 +24,7 @@
#include <linux/io.h>
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/kernel.h>
+#include <linux/kmod.h>
#include <linux/ktime.h>
#include <linux/hashtable.h>
#include <linux/list.h>
@@ -43,6 +44,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/scmi.h>
+#define SCMI_VENDOR_MODULE_ALIAS_FMT "scmi-protocol-0x%02x-%s"
+
static DEFINE_IDA(scmi_id);
static DEFINE_XARRAY(scmi_protocols);
@@ -276,6 +279,44 @@ scmi_vendor_protocol_lookup(int protocol_id, char *vendor_id,
}
static const struct scmi_protocol *
+scmi_vendor_protocol_get(int protocol_id, struct scmi_revision_info *version)
+{
+ const struct scmi_protocol *proto;
+
+ proto = scmi_vendor_protocol_lookup(protocol_id, version->vendor_id,
+ version->sub_vendor_id,
+ version->impl_ver);
+ if (!proto) {
+ int ret;
+
+ pr_debug("Looking for '" SCMI_VENDOR_MODULE_ALIAS_FMT "'\n",
+ protocol_id, version->vendor_id);
+
+ /* Note that vendor_id is mandatory for vendor protocols */
+ ret = request_module(SCMI_VENDOR_MODULE_ALIAS_FMT,
+ protocol_id, version->vendor_id);
+ if (ret) {
+ pr_warn("Problem loading module for protocol 0x%x\n",
+ protocol_id);
+ return NULL;
+ }
+
+ /* Lookup again, once modules loaded */
+ proto = scmi_vendor_protocol_lookup(protocol_id,
+ version->vendor_id,
+ version->sub_vendor_id,
+ version->impl_ver);
+ }
+
+ if (proto)
+ pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n",
+ protocol_id, proto->vendor_id ?: "",
+ proto->sub_vendor_id ?: "", proto->impl_ver);
+
+ return proto;
+}
+
+static const struct scmi_protocol *
scmi_protocol_get(int protocol_id, struct scmi_revision_info *version)
{
const struct scmi_protocol *proto = NULL;
@@ -283,10 +324,8 @@ scmi_protocol_get(int protocol_id, struct scmi_revision_info *version)
if (protocol_id < SCMI_PROTOCOL_VENDOR_BASE)
proto = xa_load(&scmi_protocols, protocol_id);
else
- proto = scmi_vendor_protocol_lookup(protocol_id,
- version->vendor_id,
- version->sub_vendor_id,
- version->impl_ver);
+ proto = scmi_vendor_protocol_get(protocol_id, version);
+
if (!proto || !try_module_get(proto->owner)) {
pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
return NULL;
@@ -294,11 +333,6 @@ scmi_protocol_get(int protocol_id, struct scmi_revision_info *version)
pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
- if (protocol_id >= SCMI_PROTOCOL_VENDOR_BASE)
- pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n",
- protocol_id, proto->vendor_id ?: "",
- proto->sub_vendor_id ?: "", proto->impl_ver);
-
return proto;
}
@@ -366,7 +400,9 @@ int scmi_protocol_register(const struct scmi_protocol *proto)
return ret;
}
- pr_debug("Registered SCMI Protocol 0x%x\n", proto->id);
+ pr_debug("Registered SCMI Protocol 0x%x - %s %s 0x%08X\n",
+ proto->id, proto->vendor_id, proto->sub_vendor_id,
+ proto->impl_ver);
return 0;
}
@@ -3028,7 +3064,7 @@ static const struct scmi_desc *scmi_transport_setup(struct device *dev)
int ret;
trans = dev_get_platdata(dev);
- if (!trans || !trans->desc || !trans->supplier || !trans->core_ops)
+ if (!trans || !trans->supplier || !trans->core_ops)
return NULL;
if (!device_link_add(dev, trans->supplier, DL_FLAG_AUTOREMOVE_CONSUMER)) {
@@ -3043,33 +3079,33 @@ static const struct scmi_desc *scmi_transport_setup(struct device *dev)
dev_info(dev, "Using %s\n", dev_driver_string(trans->supplier));
ret = of_property_read_u32(dev->of_node, "arm,max-rx-timeout-ms",
- &trans->desc->max_rx_timeout_ms);
+ &trans->desc.max_rx_timeout_ms);
if (ret && ret != -EINVAL)
dev_err(dev, "Malformed arm,max-rx-timeout-ms DT property.\n");
ret = of_property_read_u32(dev->of_node, "arm,max-msg-size",
- &trans->desc->max_msg_size);
+ &trans->desc.max_msg_size);
if (ret && ret != -EINVAL)
dev_err(dev, "Malformed arm,max-msg-size DT property.\n");
ret = of_property_read_u32(dev->of_node, "arm,max-msg",
- &trans->desc->max_msg);
+ &trans->desc.max_msg);
if (ret && ret != -EINVAL)
dev_err(dev, "Malformed arm,max-msg DT property.\n");
dev_info(dev,
"SCMI max-rx-timeout: %dms / max-msg-size: %dbytes / max-msg: %d\n",
- trans->desc->max_rx_timeout_ms, trans->desc->max_msg_size,
- trans->desc->max_msg);
+ trans->desc.max_rx_timeout_ms, trans->desc.max_msg_size,
+ trans->desc.max_msg);
/* System wide atomic threshold for atomic ops .. if any */
if (!of_property_read_u32(dev->of_node, "atomic-threshold-us",
- &trans->desc->atomic_threshold))
+ &trans->desc.atomic_threshold))
dev_info(dev,
"SCMI System wide atomic threshold set to %u us\n",
- trans->desc->atomic_threshold);
+ trans->desc.atomic_threshold);
- return trans->desc;
+ return &trans->desc;
}
static int scmi_probe(struct platform_device *pdev)
diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
index 9e89a6a763da..7cc0d616b8de 100644
--- a/drivers/firmware/arm_scmi/raw_mode.c
+++ b/drivers/firmware/arm_scmi/raw_mode.c
@@ -886,10 +886,8 @@ static __poll_t scmi_dbg_raw_mode_message_poll(struct file *filp,
static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
{
- u8 id;
struct scmi_raw_mode_info *raw;
struct scmi_dbg_raw_data *rd;
- const char *id_str = filp->f_path.dentry->d_parent->d_name.name;
if (!inode->i_private)
return -ENODEV;
@@ -915,8 +913,8 @@ static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
}
/* Grab channel ID from debugfs entry naming if any */
- if (!kstrtou8(id_str, 16, &id))
- rd->chan_id = id;
+ /* not set - reassing 0 we already had after kzalloc() */
+ rd->chan_id = debugfs_get_aux_num(filp);
rd->raw = raw;
filp->private_data = rd;
@@ -1225,10 +1223,12 @@ void *scmi_raw_mode_init(const struct scmi_handle *handle,
snprintf(cdir, 8, "0x%02X", channels[i]);
chd = debugfs_create_dir(cdir, top_chans);
- debugfs_create_file("message", 0600, chd, raw,
+ debugfs_create_file_aux_num("message", 0600, chd,
+ raw, channels[i],
&scmi_dbg_raw_mode_message_fops);
- debugfs_create_file("message_async", 0600, chd, raw,
+ debugfs_create_file_aux_num("message_async", 0600, chd,
+ raw, channels[i],
&scmi_dbg_raw_mode_message_async_fops);
}
}
diff --git a/drivers/firmware/arm_scmi/transports/mailbox.c b/drivers/firmware/arm_scmi/transports/mailbox.c
index b66df2981456..bd041c99b92b 100644
--- a/drivers/firmware/arm_scmi/transports/mailbox.c
+++ b/drivers/firmware/arm_scmi/transports/mailbox.c
@@ -378,6 +378,7 @@ static const struct of_device_id scmi_of_match[] = {
{ .compatible = "arm,scmi" },
{ /* Sentinel */ },
};
+MODULE_DEVICE_TABLE(of, scmi_of_match);
DEFINE_SCMI_TRANSPORT_DRIVER(scmi_mailbox, scmi_mailbox_driver,
scmi_mailbox_desc, scmi_of_match, core);
diff --git a/drivers/firmware/arm_scmi/transports/smc.c b/drivers/firmware/arm_scmi/transports/smc.c
index f632a62cfb3e..21abb571e4f2 100644
--- a/drivers/firmware/arm_scmi/transports/smc.c
+++ b/drivers/firmware/arm_scmi/transports/smc.c
@@ -301,6 +301,7 @@ static const struct of_device_id scmi_of_match[] = {
{ .compatible = "qcom,scmi-smc" },
{ /* Sentinel */ },
};
+MODULE_DEVICE_TABLE(of, scmi_of_match);
DEFINE_SCMI_TRANSPORT_DRIVER(scmi_smc, scmi_smc_driver, scmi_smc_desc,
scmi_of_match, core);
diff --git a/drivers/firmware/arm_scmi/transports/virtio.c b/drivers/firmware/arm_scmi/transports/virtio.c
index 41aea33776a9..cb934db9b2b4 100644
--- a/drivers/firmware/arm_scmi/transports/virtio.c
+++ b/drivers/firmware/arm_scmi/transports/virtio.c
@@ -921,6 +921,7 @@ static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_SCMI, VIRTIO_DEV_ANY_ID },
{ 0 }
};
+MODULE_DEVICE_TABLE(virtio, id_table);
static struct virtio_driver virtio_scmi_driver = {
.driver.name = "scmi-virtio",
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c
index 17799eacf06c..aa176c1a5eef 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c
@@ -374,10 +374,11 @@ static const struct scmi_protocol scmi_imx_bbm = {
.ops = &scmi_imx_bbm_proto_ops,
.events = &scmi_imx_bbm_protocol_events,
.supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
- .vendor_id = "NXP",
- .sub_vendor_id = "IMX",
+ .vendor_id = SCMI_IMX_VENDOR,
+ .sub_vendor_id = SCMI_IMX_SUBVENDOR,
};
module_scmi_protocol(scmi_imx_bbm);
+MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_BBM) "-" SCMI_IMX_VENDOR);
MODULE_DESCRIPTION("i.MX SCMI BBM driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
index a86ab9b35953..83b69fc4fba5 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
@@ -309,10 +309,11 @@ static const struct scmi_protocol scmi_imx_misc = {
.ops = &scmi_imx_misc_proto_ops,
.events = &scmi_imx_misc_protocol_events,
.supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
- .vendor_id = "NXP",
- .sub_vendor_id = "IMX",
+ .vendor_id = SCMI_IMX_VENDOR,
+ .sub_vendor_id = SCMI_IMX_SUBVENDOR,
};
module_scmi_protocol(scmi_imx_misc);
+MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_MISC) "-" SCMI_IMX_VENDOR);
MODULE_DESCRIPTION("i.MX SCMI MISC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/cirrus/Kconfig b/drivers/firmware/cirrus/Kconfig
index 3ccbe14e4b0c..0a883091259a 100644
--- a/drivers/firmware/cirrus/Kconfig
+++ b/drivers/firmware/cirrus/Kconfig
@@ -3,3 +3,21 @@
config FW_CS_DSP
tristate
default n
+
+config FW_CS_DSP_KUNIT_TEST_UTILS
+ tristate
+ depends on KUNIT && REGMAP
+ select FW_CS_DSP
+
+config FW_CS_DSP_KUNIT_TEST
+ tristate "KUnit tests for Cirrus Logic cs_dsp" if !KUNIT_ALL_TESTS
+ depends on KUNIT && REGMAP
+ default KUNIT_ALL_TESTS
+ select FW_CS_DSP
+ select FW_CS_DSP_KUNIT_TEST_UTILS
+ help
+ This builds KUnit tests for cs_dsp.
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in
+ Documentation/dev-tools/kunit/.
+ If in doubt, say "N".
diff --git a/drivers/firmware/cirrus/Makefile b/drivers/firmware/cirrus/Makefile
index b91318ca0ff4..b32dfa869491 100644
--- a/drivers/firmware/cirrus/Makefile
+++ b/drivers/firmware/cirrus/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
#
obj-$(CONFIG_FW_CS_DSP) += cs_dsp.o
+
+obj-y += test/
diff --git a/drivers/firmware/cirrus/test/Makefile b/drivers/firmware/cirrus/test/Makefile
new file mode 100644
index 000000000000..7a24a6079ddc
--- /dev/null
+++ b/drivers/firmware/cirrus/test/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+
+cs_dsp_test_utils-objs := \
+ cs_dsp_mock_mem_maps.o \
+ cs_dsp_mock_bin.o \
+ cs_dsp_mock_regmap.o \
+ cs_dsp_mock_utils.o \
+ cs_dsp_mock_wmfw.o
+
+cs_dsp_test-objs := \
+ cs_dsp_test_bin.o \
+ cs_dsp_test_bin_error.o \
+ cs_dsp_test_callbacks.o \
+ cs_dsp_test_control_parse.o \
+ cs_dsp_test_control_cache.o \
+ cs_dsp_test_control_rw.o \
+ cs_dsp_test_wmfw.o \
+ cs_dsp_test_wmfw_error.o \
+ cs_dsp_tests.o
+
+obj-$(CONFIG_FW_CS_DSP_KUNIT_TEST_UTILS) += cs_dsp_test_utils.o
+obj-$(CONFIG_FW_CS_DSP_KUNIT_TEST) += cs_dsp_test.o
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c b/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c
new file mode 100644
index 000000000000..49d84f7e59e6
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// bin file builder for cs_dsp KUnit tests.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/firmware.h>
+#include <linux/math.h>
+#include <linux/overflow.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+/* Buffer large enough for bin file content */
+#define CS_DSP_MOCK_BIN_BUF_SIZE 32768
+
+KUNIT_DEFINE_ACTION_WRAPPER(vfree_action_wrapper, vfree, void *)
+
+struct cs_dsp_mock_bin_builder {
+ struct cs_dsp_test *test_priv;
+ void *buf;
+ void *write_p;
+ size_t bytes_used;
+};
+
+/**
+ * cs_dsp_mock_bin_get_firmware() - Get struct firmware wrapper for data.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ *
+ * Return: Pointer to a struct firmware wrapper for the data.
+ */
+struct firmware *cs_dsp_mock_bin_get_firmware(struct cs_dsp_mock_bin_builder *builder)
+{
+ struct firmware *fw;
+
+ fw = kunit_kzalloc(builder->test_priv->test, sizeof(*fw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, fw);
+
+ fw->data = builder->buf;
+ fw->size = builder->bytes_used;
+
+ return fw;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_get_firmware, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_bin_add_raw_block() - Add a data block to the bin file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @alg_id: Algorithm ID.
+ * @alg_ver: Algorithm version.
+ * @type: Type of the block.
+ * @offset: Offset.
+ * @payload_data: Pointer to buffer containing the payload data.
+ * @payload_len_bytes: Length of payload data in bytes.
+ */
+void cs_dsp_mock_bin_add_raw_block(struct cs_dsp_mock_bin_builder *builder,
+ unsigned int alg_id, unsigned int alg_ver,
+ int type, unsigned int offset,
+ const void *payload_data, size_t payload_len_bytes)
+{
+ struct wmfw_coeff_item *item;
+ size_t bytes_needed = struct_size_t(struct wmfw_coeff_item, data, payload_len_bytes);
+
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_BIN_BUF_SIZE));
+
+ item = builder->write_p;
+
+ item->offset = cpu_to_le16(offset);
+ item->type = cpu_to_le16(type);
+ item->id = cpu_to_le32(alg_id);
+ item->ver = cpu_to_le32(alg_ver << 8);
+ item->len = cpu_to_le32(payload_len_bytes);
+
+ if (payload_len_bytes)
+ memcpy(item->data, payload_data, payload_len_bytes);
+
+ builder->write_p += bytes_needed;
+ builder->bytes_used += bytes_needed;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_raw_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static void cs_dsp_mock_bin_add_name_or_info(struct cs_dsp_mock_bin_builder *builder,
+ const char *info, int type)
+{
+ size_t info_len = strlen(info);
+ char *tmp = NULL;
+
+ if (info_len % 4) {
+ /* Create a padded string with length a multiple of 4 */
+ info_len = round_up(info_len, 4);
+ tmp = kunit_kzalloc(builder->test_priv->test, info_len, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, tmp);
+ memcpy(tmp, info, info_len);
+ info = tmp;
+ }
+
+ cs_dsp_mock_bin_add_raw_block(builder, 0, 0, WMFW_INFO_TEXT, 0, info, info_len);
+ kunit_kfree(builder->test_priv->test, tmp);
+}
+
+/**
+ * cs_dsp_mock_bin_add_info() - Add an info block to the bin file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @info: Pointer to info string to be copied into the file.
+ *
+ * The string will be padded to a length that is a multiple of 4 bytes.
+ */
+void cs_dsp_mock_bin_add_info(struct cs_dsp_mock_bin_builder *builder,
+ const char *info)
+{
+ cs_dsp_mock_bin_add_name_or_info(builder, info, WMFW_INFO_TEXT);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_info, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_bin_add_name() - Add a name block to the bin file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @name: Pointer to name string to be copied into the file.
+ */
+void cs_dsp_mock_bin_add_name(struct cs_dsp_mock_bin_builder *builder,
+ const char *name)
+{
+ cs_dsp_mock_bin_add_name_or_info(builder, name, WMFW_NAME_TEXT);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_name, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_bin_add_patch() - Add a patch data block to the bin file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @alg_id: Algorithm ID for the patch.
+ * @alg_ver: Algorithm version for the patch.
+ * @mem_region: Memory region for the patch.
+ * @reg_addr_offset: Offset to start of data in register addresses.
+ * @payload_data: Pointer to buffer containing the payload data.
+ * @payload_len_bytes: Length of payload data in bytes.
+ */
+void cs_dsp_mock_bin_add_patch(struct cs_dsp_mock_bin_builder *builder,
+ unsigned int alg_id, unsigned int alg_ver,
+ int mem_region, unsigned int reg_addr_offset,
+ const void *payload_data, size_t payload_len_bytes)
+{
+ /* Payload length must be a multiple of 4 */
+ KUNIT_ASSERT_EQ(builder->test_priv->test, payload_len_bytes % 4, 0);
+
+ cs_dsp_mock_bin_add_raw_block(builder, alg_id, alg_ver,
+ mem_region, reg_addr_offset,
+ payload_data, payload_len_bytes);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_patch, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_bin_init() - Initialize a struct cs_dsp_mock_bin_builder.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @format_version: Required bin format version.
+ * @fw_version: Firmware version to put in bin file.
+ *
+ * Return: Pointer to created struct cs_dsp_mock_bin_builder.
+ */
+struct cs_dsp_mock_bin_builder *cs_dsp_mock_bin_init(struct cs_dsp_test *priv,
+ int format_version,
+ unsigned int fw_version)
+{
+ struct cs_dsp_mock_bin_builder *builder;
+ struct wmfw_coeff_hdr *hdr;
+
+ builder = kunit_kzalloc(priv->test, sizeof(*builder), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder);
+ builder->test_priv = priv;
+
+ builder->buf = vmalloc(CS_DSP_MOCK_BIN_BUF_SIZE);
+ KUNIT_ASSERT_NOT_NULL(priv->test, builder->buf);
+ kunit_add_action_or_reset(priv->test, vfree_action_wrapper, builder->buf);
+
+ /* Create header */
+ hdr = builder->buf;
+ memcpy(hdr->magic, "WMDR", sizeof(hdr->magic));
+ hdr->len = cpu_to_le32(offsetof(struct wmfw_coeff_hdr, data));
+ hdr->ver = cpu_to_le32(fw_version | (format_version << 24));
+ hdr->core_ver = cpu_to_le32(((u32)priv->dsp->type << 24) | priv->dsp->rev);
+
+ builder->write_p = hdr->data;
+ builder->bytes_used = offsetof(struct wmfw_coeff_hdr, data);
+
+ return builder;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_init, "FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
new file mode 100644
index 000000000000..161272e47bda
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
@@ -0,0 +1,752 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Mock DSP memory maps for cs_dsp KUnit tests.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/test.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/math.h>
+
+const struct cs_dsp_region cs_dsp_mock_halo_dsp1_regions[] = {
+ { .type = WMFW_HALO_PM_PACKED, .base = 0x3800000 },
+ { .type = WMFW_HALO_XM_PACKED, .base = 0x2000000 },
+ { .type = WMFW_HALO_YM_PACKED, .base = 0x2C00000 },
+ { .type = WMFW_ADSP2_XM, .base = 0x2800000 },
+ { .type = WMFW_ADSP2_YM, .base = 0x3400000 },
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_dsp1_regions, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/* List of sizes in bytes, for each entry above */
+const unsigned int cs_dsp_mock_halo_dsp1_region_sizes[] = {
+ 0x5000, /* PM_PACKED */
+ 0x6000, /* XM_PACKED */
+ 0x47F4, /* YM_PACKED */
+ 0x8000, /* XM_UNPACKED_24 */
+ 0x5FF8, /* YM_UNPACKED_24 */
+
+ 0 /* terminator */
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_dsp1_region_sizes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+const struct cs_dsp_region cs_dsp_mock_adsp2_32bit_dsp1_regions[] = {
+ { .type = WMFW_ADSP2_PM, .base = 0x080000 },
+ { .type = WMFW_ADSP2_XM, .base = 0x0a0000 },
+ { .type = WMFW_ADSP2_YM, .base = 0x0c0000 },
+ { .type = WMFW_ADSP2_ZM, .base = 0x0e0000 },
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_32bit_dsp1_regions, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/* List of sizes in bytes, for each entry above */
+const unsigned int cs_dsp_mock_adsp2_32bit_dsp1_region_sizes[] = {
+ 0x9000, /* PM */
+ 0xa000, /* ZM */
+ 0x2000, /* XM */
+ 0x2000, /* YM */
+
+ 0 /* terminator */
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+const struct cs_dsp_region cs_dsp_mock_adsp2_16bit_dsp1_regions[] = {
+ { .type = WMFW_ADSP2_PM, .base = 0x100000 },
+ { .type = WMFW_ADSP2_ZM, .base = 0x180000 },
+ { .type = WMFW_ADSP2_XM, .base = 0x190000 },
+ { .type = WMFW_ADSP2_YM, .base = 0x1a8000 },
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_16bit_dsp1_regions, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/* List of sizes in bytes, for each entry above */
+const unsigned int cs_dsp_mock_adsp2_16bit_dsp1_region_sizes[] = {
+ 0x6000, /* PM */
+ 0x800, /* ZM */
+ 0x800, /* XM */
+ 0x800, /* YM */
+
+ 0 /* terminator */
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+int cs_dsp_mock_count_regions(const unsigned int *region_sizes)
+{
+ int i;
+
+ for (i = 0; region_sizes[i]; ++i)
+ ;
+
+ return i;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_count_regions, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_size_of_region() - Return size of given memory region.
+ *
+ * @dsp: Pointer to struct cs_dsp.
+ * @mem_type: Memory region type.
+ *
+ * Return: Size of region in bytes.
+ */
+unsigned int cs_dsp_mock_size_of_region(const struct cs_dsp *dsp, int mem_type)
+{
+ const unsigned int *sizes;
+ int i;
+
+ if (dsp->mem == cs_dsp_mock_halo_dsp1_regions)
+ sizes = cs_dsp_mock_halo_dsp1_region_sizes;
+ else if (dsp->mem == cs_dsp_mock_adsp2_32bit_dsp1_regions)
+ sizes = cs_dsp_mock_adsp2_32bit_dsp1_region_sizes;
+ else if (dsp->mem == cs_dsp_mock_adsp2_16bit_dsp1_regions)
+ sizes = cs_dsp_mock_adsp2_16bit_dsp1_region_sizes;
+ else
+ return 0;
+
+ for (i = 0; i < dsp->num_mems; ++i) {
+ if (dsp->mem[i].type == mem_type)
+ return sizes[i];
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_size_of_region, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_base_addr_for_mem() - Base register address for memory region.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @mem_type: Memory region type.
+ *
+ * Return: Base register address of region.
+ */
+unsigned int cs_dsp_mock_base_addr_for_mem(struct cs_dsp_test *priv, int mem_type)
+{
+ int num_mems = priv->dsp->num_mems;
+ const struct cs_dsp_region *region = priv->dsp->mem;
+ int i;
+
+ for (i = 0; i < num_mems; ++i) {
+ if (region[i].type == mem_type)
+ return region[i].base;
+ }
+
+ KUNIT_FAIL(priv->test, "Unexpected region %d\n", mem_type);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_base_addr_for_mem, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_reg_addr_inc_per_unpacked_word() - Unpacked register address increment per DSP word.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ *
+ * Return: Amount by which register address increments to move to the next
+ * DSP word in unpacked XM/YM/ZM.
+ */
+unsigned int cs_dsp_mock_reg_addr_inc_per_unpacked_word(struct cs_dsp_test *priv)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ return 2; /* two 16-bit register indexes per XM/YM/ZM word */
+ case WMFW_HALO:
+ return 4; /* one byte-addressed 32-bit register per XM/YM/ZM word */
+ default:
+ KUNIT_FAIL(priv->test, "Unexpected DSP type\n");
+ return -1;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_addr_inc_per_unpacked_word, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_reg_block_length_bytes() - Number of bytes in an access block.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @mem_type: Memory region type.
+ *
+ * Return: Total number of bytes in a group of registers forming the
+ * smallest bus access size (including any padding bits). For unpacked
+ * memory this is the number of registers containing one DSP word.
+ * For packed memory this is the number of registers in one packed
+ * access block.
+ */
+unsigned int cs_dsp_mock_reg_block_length_bytes(struct cs_dsp_test *priv, int mem_type)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ switch (mem_type) {
+ case WMFW_ADSP2_PM:
+ return 3 * regmap_get_val_bytes(priv->dsp->regmap);
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ case WMFW_ADSP2_ZM:
+ return sizeof(u32);
+ default:
+ break;
+ }
+ break;
+ case WMFW_HALO:
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ return sizeof(u32);
+ case WMFW_HALO_PM_PACKED:
+ return 5 * sizeof(u32);
+ case WMFW_HALO_XM_PACKED:
+ case WMFW_HALO_YM_PACKED:
+ return 3 * sizeof(u32);
+ default:
+ break;
+ }
+ break;
+ default:
+ KUNIT_FAIL(priv->test, "Unexpected DSP type\n");
+ return 0;
+ }
+
+ KUNIT_FAIL(priv->test, "Unexpected mem type\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_block_length_bytes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_reg_block_length_registers() - Number of registers in an access block.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @mem_type: Memory region type.
+ *
+ * Return: Total number of register forming the smallest bus access size.
+ * For unpacked memory this is the number of registers containing one
+ * DSP word. For packed memory this is the number of registers in one
+ * packed access block.
+ */
+unsigned int cs_dsp_mock_reg_block_length_registers(struct cs_dsp_test *priv, int mem_type)
+{
+ return cs_dsp_mock_reg_block_length_bytes(priv, mem_type) /
+ regmap_get_val_bytes(priv->dsp->regmap);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_block_length_registers, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_reg_block_length_dsp_words() - Number of dsp_words in an access block.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @mem_type: Memory region type.
+ *
+ * Return: Total number of DSP words in a group of registers forming the
+ * smallest bus access size.
+ */
+unsigned int cs_dsp_mock_reg_block_length_dsp_words(struct cs_dsp_test *priv, int mem_type)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ switch (mem_type) {
+ case WMFW_ADSP2_PM:
+ return regmap_get_val_bytes(priv->dsp->regmap) / 2;
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ case WMFW_ADSP2_ZM:
+ return 1;
+ default:
+ break;
+ }
+ break;
+ case WMFW_HALO:
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ return 1;
+ case WMFW_HALO_PM_PACKED:
+ case WMFW_HALO_XM_PACKED:
+ case WMFW_HALO_YM_PACKED:
+ return 4;
+ default:
+ break;
+ }
+ break;
+ default:
+ KUNIT_FAIL(priv->test, "Unexpected DSP type\n");
+ return 0;
+ }
+
+ KUNIT_FAIL(priv->test, "Unexpected mem type\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_block_length_dsp_words, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_has_zm() - DSP has ZM
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ *
+ * Return: True if DSP has ZM.
+ */
+bool cs_dsp_mock_has_zm(struct cs_dsp_test *priv)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ return true;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_has_zm, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_packed_to_unpacked_mem_type() - Unpacked region that is
+ * the same memory as a packed region.
+ *
+ * @packed_mem_type: Type of packed memory region.
+ *
+ * Return: unpacked type that is the same memory as packed_mem_type.
+ */
+int cs_dsp_mock_packed_to_unpacked_mem_type(int packed_mem_type)
+{
+ switch (packed_mem_type) {
+ case WMFW_HALO_XM_PACKED:
+ return WMFW_ADSP2_XM;
+ case WMFW_HALO_YM_PACKED:
+ return WMFW_ADSP2_YM;
+ default:
+ return -1;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_packed_to_unpacked_mem_type, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_num_dsp_words_to_num_packed_regs() - Number of DSP words
+ * to number of packed registers.
+ *
+ * @num_dsp_words: Number of DSP words.
+ *
+ * Convert number of DSP words to number of packed registers rounded
+ * down to the nearest register.
+ *
+ * Return: Number of packed registers.
+ */
+unsigned int cs_dsp_mock_num_dsp_words_to_num_packed_regs(unsigned int num_dsp_words)
+{
+ /* There are 3 registers for every 4 packed words */
+ return (num_dsp_words * 3) / 4;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_num_dsp_words_to_num_packed_regs, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static const struct wmfw_halo_id_hdr cs_dsp_mock_halo_xm_hdr = {
+ .fw = {
+ .core_id = cpu_to_be32(WMFW_HALO << 16),
+ .block_rev = cpu_to_be32(3 << 16),
+ .vendor_id = cpu_to_be32(0x2),
+ .id = cpu_to_be32(0xabcdef),
+ .ver = cpu_to_be32(0x090101),
+ },
+
+ /*
+ * Leave enough space for this header and 40 algorithm descriptors.
+ * base and size are counted in DSP words.
+ */
+ .xm_base = cpu_to_be32(((sizeof(struct wmfw_halo_id_hdr) +
+ (40 * sizeof(struct wmfw_halo_alg_hdr)))
+ / 4) * 3),
+ .xm_size = cpu_to_be32(0x20),
+
+ /* Allocate a dummy word of YM */
+ .ym_base = cpu_to_be32(0),
+ .ym_size = cpu_to_be32(1),
+
+ .n_algs = 0,
+};
+
+static const struct wmfw_adsp2_id_hdr cs_dsp_mock_adsp2_xm_hdr = {
+ .fw = {
+ .core_id = cpu_to_be32(WMFW_ADSP2 << 16),
+ .core_rev = cpu_to_be32(2 << 16),
+ .id = cpu_to_be32(0xabcdef),
+ .ver = cpu_to_be32(0x090101),
+ },
+
+ /*
+ * Leave enough space for this header and 40 algorithm descriptors.
+ * base and size are counted in DSP words.
+ */
+ .xm = cpu_to_be32(((sizeof(struct wmfw_adsp2_id_hdr) +
+ (40 * sizeof(struct wmfw_adsp2_alg_hdr)))
+ / 4) * 3),
+
+ .ym = cpu_to_be32(0),
+ .zm = cpu_to_be32(0),
+
+ .n_algs = 0,
+};
+
+/**
+ * cs_dsp_mock_xm_header_get_alg_base_in_words() - Algorithm base offset in DSP words.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @alg_id: Algorithm ID.
+ * @mem_type: Memory region type.
+ *
+ * Lookup an algorithm in the XM header and return the base offset in
+ * DSP words of the algorithm data in the requested memory region.
+ *
+ * Return: Offset in DSP words.
+ */
+unsigned int cs_dsp_mock_xm_header_get_alg_base_in_words(struct cs_dsp_test *priv,
+ unsigned int alg_id,
+ int mem_type)
+{
+ unsigned int xm = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ union {
+ struct wmfw_adsp2_alg_hdr adsp2;
+ struct wmfw_halo_alg_hdr halo;
+ } alg;
+ unsigned int alg_hdr_addr;
+ unsigned int val, xm_base = 0, ym_base = 0, zm_base = 0;
+ int ret;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ alg_hdr_addr = xm + (sizeof(struct wmfw_adsp2_id_hdr) / 2);
+ for (;; alg_hdr_addr += sizeof(alg.adsp2) / 2) {
+ ret = regmap_read(priv->dsp->regmap, alg_hdr_addr, &val);
+ KUNIT_ASSERT_GE(priv->test, ret, 0);
+ KUNIT_ASSERT_NE(priv->test, val, 0xbedead);
+ ret = regmap_raw_read(priv->dsp->regmap, alg_hdr_addr,
+ &alg.adsp2, sizeof(alg.adsp2));
+ KUNIT_ASSERT_GE(priv->test, ret, 0);
+ if (be32_to_cpu(alg.adsp2.alg.id) == alg_id) {
+ xm_base = be32_to_cpu(alg.adsp2.xm);
+ ym_base = be32_to_cpu(alg.adsp2.ym);
+ zm_base = be32_to_cpu(alg.adsp2.zm);
+ break;
+ }
+ }
+ break;
+ case WMFW_HALO:
+ alg_hdr_addr = xm + sizeof(struct wmfw_halo_id_hdr);
+ for (;; alg_hdr_addr += sizeof(alg.halo)) {
+ ret = regmap_read(priv->dsp->regmap, alg_hdr_addr, &val);
+ KUNIT_ASSERT_GE(priv->test, ret, 0);
+ KUNIT_ASSERT_NE(priv->test, val, 0xbedead);
+ ret = regmap_raw_read(priv->dsp->regmap, alg_hdr_addr,
+ &alg.halo, sizeof(alg.halo));
+ KUNIT_ASSERT_GE(priv->test, ret, 0);
+ if (be32_to_cpu(alg.halo.alg.id) == alg_id) {
+ xm_base = be32_to_cpu(alg.halo.xm_base);
+ ym_base = be32_to_cpu(alg.halo.ym_base);
+ break;
+ }
+ }
+ break;
+ default:
+ KUNIT_FAIL(priv->test, "Unexpected DSP type %d\n", priv->dsp->type);
+ return 0;
+ }
+
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ case WMFW_HALO_XM_PACKED:
+ return xm_base;
+ case WMFW_ADSP2_YM:
+ case WMFW_HALO_YM_PACKED:
+ return ym_base;
+ case WMFW_ADSP2_ZM:
+ return zm_base;
+ default:
+ KUNIT_FAIL(priv->test, "Bad mem_type\n");
+ return 0;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_alg_base_in_words, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_xm_header_get_fw_version_from_regmap() - Firmware version.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ *
+ * Return: Firmware version word value.
+ */
+unsigned int cs_dsp_mock_xm_header_get_fw_version_from_regmap(struct cs_dsp_test *priv)
+{
+ unsigned int xm = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ union {
+ struct wmfw_id_hdr adsp2;
+ struct wmfw_v3_id_hdr halo;
+ } hdr;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ regmap_raw_read(priv->dsp->regmap, xm, &hdr.adsp2, sizeof(hdr.adsp2));
+ return be32_to_cpu(hdr.adsp2.ver);
+ case WMFW_HALO:
+ regmap_raw_read(priv->dsp->regmap, xm, &hdr.halo, sizeof(hdr.halo));
+ return be32_to_cpu(hdr.halo.ver);
+ default:
+ KUNIT_FAIL(priv->test, NULL);
+ return 0;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_fw_version_from_regmap,
+ "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_xm_header_get_fw_version() - Firmware version.
+ *
+ * @header: Pointer to struct cs_dsp_mock_xm_header.
+ *
+ * Return: Firmware version word value.
+ */
+unsigned int cs_dsp_mock_xm_header_get_fw_version(struct cs_dsp_mock_xm_header *header)
+{
+ const struct wmfw_id_hdr *adsp2_hdr;
+ const struct wmfw_v3_id_hdr *halo_hdr;
+
+ switch (header->test_priv->dsp->type) {
+ case WMFW_ADSP2:
+ adsp2_hdr = header->blob_data;
+ return be32_to_cpu(adsp2_hdr->ver);
+ case WMFW_HALO:
+ halo_hdr = header->blob_data;
+ return be32_to_cpu(halo_hdr->ver);
+ default:
+ KUNIT_FAIL(header->test_priv->test, NULL);
+ return 0;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_fw_version, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_xm_header_drop_from_regmap_cache() - Drop XM header from regmap cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ */
+void cs_dsp_mock_xm_header_drop_from_regmap_cache(struct cs_dsp_test *priv)
+{
+ unsigned int xm = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ unsigned int bytes;
+ __be32 num_algs_be32;
+ unsigned int num_algs;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ /*
+ * Could be one 32-bit register or two 16-bit registers.
+ * A raw read will read the requested number of bytes.
+ */
+ regmap_raw_read(priv->dsp->regmap,
+ xm + (offsetof(struct wmfw_adsp2_id_hdr, n_algs) / 2),
+ &num_algs_be32, sizeof(num_algs_be32));
+ num_algs = be32_to_cpu(num_algs_be32);
+ bytes = sizeof(struct wmfw_adsp2_id_hdr) +
+ (num_algs * sizeof(struct wmfw_adsp2_alg_hdr)) +
+ 4 /* terminator word */;
+
+ regcache_drop_region(priv->dsp->regmap, xm, xm + (bytes / 2) - 1);
+ break;
+ case WMFW_HALO:
+ regmap_read(priv->dsp->regmap,
+ xm + offsetof(struct wmfw_halo_id_hdr, n_algs),
+ &num_algs);
+ bytes = sizeof(struct wmfw_halo_id_hdr) +
+ (num_algs * sizeof(struct wmfw_halo_alg_hdr)) +
+ 4 /* terminator word */;
+
+ regcache_drop_region(priv->dsp->regmap, xm, xm + bytes - 4);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_drop_from_regmap_cache, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static void cs_dsp_mock_xm_header_add_adsp2_algs(struct cs_dsp_mock_xm_header *builder,
+ const struct cs_dsp_mock_alg_def *algs,
+ size_t num_algs)
+{
+ struct wmfw_adsp2_id_hdr *hdr = builder->blob_data;
+ unsigned int next_free_xm_word, next_free_ym_word, next_free_zm_word;
+
+ next_free_xm_word = be32_to_cpu(hdr->xm);
+ next_free_ym_word = be32_to_cpu(hdr->ym);
+ next_free_zm_word = be32_to_cpu(hdr->zm);
+
+ /* Set num_algs in XM header. */
+ hdr->n_algs = cpu_to_be32(num_algs);
+
+ /* Create algorithm descriptor list */
+ struct wmfw_adsp2_alg_hdr *alg_info =
+ (struct wmfw_adsp2_alg_hdr *)(&hdr[1]);
+
+ for (; num_algs > 0; num_algs--, algs++, alg_info++) {
+ unsigned int alg_xm_last, alg_ym_last, alg_zm_last;
+
+ alg_info->alg.id = cpu_to_be32(algs->id);
+ alg_info->alg.ver = cpu_to_be32(algs->ver);
+ alg_info->xm = cpu_to_be32(algs->xm_base_words);
+ alg_info->ym = cpu_to_be32(algs->ym_base_words);
+ alg_info->zm = cpu_to_be32(algs->zm_base_words);
+
+ /* Check if we need to auto-allocate base addresses */
+ if (!alg_info->xm && algs->xm_size_words)
+ alg_info->xm = cpu_to_be32(next_free_xm_word);
+
+ if (!alg_info->ym && algs->ym_size_words)
+ alg_info->ym = cpu_to_be32(next_free_ym_word);
+
+ if (!alg_info->zm && algs->zm_size_words)
+ alg_info->zm = cpu_to_be32(next_free_zm_word);
+
+ alg_xm_last = be32_to_cpu(alg_info->xm) + algs->xm_size_words - 1;
+ if (alg_xm_last > next_free_xm_word)
+ next_free_xm_word = alg_xm_last;
+
+ alg_ym_last = be32_to_cpu(alg_info->ym) + algs->ym_size_words - 1;
+ if (alg_ym_last > next_free_ym_word)
+ next_free_ym_word = alg_ym_last;
+
+ alg_zm_last = be32_to_cpu(alg_info->zm) + algs->zm_size_words - 1;
+ if (alg_zm_last > next_free_zm_word)
+ next_free_zm_word = alg_zm_last;
+ }
+
+ /* Write list terminator */
+ *(__be32 *)(alg_info) = cpu_to_be32(0xbedead);
+}
+
+static void cs_dsp_mock_xm_header_add_halo_algs(struct cs_dsp_mock_xm_header *builder,
+ const struct cs_dsp_mock_alg_def *algs,
+ size_t num_algs)
+{
+ struct wmfw_halo_id_hdr *hdr = builder->blob_data;
+ unsigned int next_free_xm_word, next_free_ym_word;
+
+ /* Assume we're starting with bare header */
+ next_free_xm_word = be32_to_cpu(hdr->xm_base) + be32_to_cpu(hdr->xm_size) - 1;
+ next_free_ym_word = be32_to_cpu(hdr->ym_base) + be32_to_cpu(hdr->ym_size) - 1;
+
+ /* Set num_algs in XM header */
+ hdr->n_algs = cpu_to_be32(num_algs);
+
+ /* Create algorithm descriptor list */
+ struct wmfw_halo_alg_hdr *alg_info =
+ (struct wmfw_halo_alg_hdr *)(&hdr[1]);
+
+ for (; num_algs > 0; num_algs--, algs++, alg_info++) {
+ unsigned int alg_xm_last, alg_ym_last;
+
+ alg_info->alg.id = cpu_to_be32(algs->id);
+ alg_info->alg.ver = cpu_to_be32(algs->ver);
+ alg_info->xm_base = cpu_to_be32(algs->xm_base_words);
+ alg_info->xm_size = cpu_to_be32(algs->xm_size_words);
+ alg_info->ym_base = cpu_to_be32(algs->ym_base_words);
+ alg_info->ym_size = cpu_to_be32(algs->ym_size_words);
+
+ /* Check if we need to auto-allocate base addresses */
+ if (!alg_info->xm_base && alg_info->xm_size)
+ alg_info->xm_base = cpu_to_be32(next_free_xm_word);
+
+ if (!alg_info->ym_base && alg_info->ym_size)
+ alg_info->ym_base = cpu_to_be32(next_free_ym_word);
+
+ alg_xm_last = be32_to_cpu(alg_info->xm_base) + be32_to_cpu(alg_info->xm_size) - 1;
+ if (alg_xm_last > next_free_xm_word)
+ next_free_xm_word = alg_xm_last;
+
+ alg_ym_last = be32_to_cpu(alg_info->ym_base) + be32_to_cpu(alg_info->ym_size) - 1;
+ if (alg_ym_last > next_free_ym_word)
+ next_free_ym_word = alg_ym_last;
+ }
+
+ /* Write list terminator */
+ *(__be32 *)(alg_info) = cpu_to_be32(0xbedead);
+}
+
+/**
+ * cs_dsp_mock_xm_header_write_to_regmap() - Write XM header to regmap.
+ *
+ * @header: Pointer to struct cs_dsp_mock_xm_header.
+ *
+ * The data in header is written to the XM addresses in the regmap.
+ *
+ * Return: 0 on success, else negative error code.
+ */
+int cs_dsp_mock_xm_header_write_to_regmap(struct cs_dsp_mock_xm_header *header)
+{
+ struct cs_dsp_test *priv = header->test_priv;
+ unsigned int reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+
+ /*
+ * One 32-bit word corresponds to one 32-bit unpacked XM word so the
+ * blob can be written directly to the regmap.
+ */
+ return regmap_raw_write(priv->dsp->regmap, reg_addr,
+ header->blob_data, header->blob_size_bytes);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_write_to_regmap, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_create_mock_xm_header() - Create a dummy XM header.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @algs: Pointer to array of struct cs_dsp_mock_alg_def listing the
+ * dummy algorithm entries to include in the XM header.
+ * @num_algs: Number of entries in the algs array.
+ *
+ * Return: Pointer to created struct cs_dsp_mock_xm_header.
+ */
+struct cs_dsp_mock_xm_header *cs_dsp_create_mock_xm_header(struct cs_dsp_test *priv,
+ const struct cs_dsp_mock_alg_def *algs,
+ size_t num_algs)
+{
+ struct cs_dsp_mock_xm_header *builder;
+ size_t total_bytes_required;
+ const void *header;
+ size_t header_size_bytes;
+
+ builder = kunit_kzalloc(priv->test, sizeof(*builder), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder);
+ builder->test_priv = priv;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ header = &cs_dsp_mock_adsp2_xm_hdr;
+ header_size_bytes = sizeof(cs_dsp_mock_adsp2_xm_hdr);
+ total_bytes_required = header_size_bytes +
+ (num_algs * sizeof(struct wmfw_adsp2_alg_hdr))
+ + 4; /* terminator word */
+ break;
+ case WMFW_HALO:
+ header = &cs_dsp_mock_halo_xm_hdr,
+ header_size_bytes = sizeof(cs_dsp_mock_halo_xm_hdr);
+ total_bytes_required = header_size_bytes +
+ (num_algs * sizeof(struct wmfw_halo_alg_hdr))
+ + 4; /* terminator word */
+ break;
+ default:
+ KUNIT_FAIL(priv->test, "%s unexpected DSP type %d\n",
+ __func__, priv->dsp->type);
+ return NULL;
+ }
+
+ builder->blob_data = kunit_kzalloc(priv->test, total_bytes_required, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder->blob_data);
+ builder->blob_size_bytes = total_bytes_required;
+
+ memcpy(builder->blob_data, header, header_size_bytes);
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ cs_dsp_mock_xm_header_add_adsp2_algs(builder, algs, num_algs);
+ break;
+ case WMFW_HALO:
+ cs_dsp_mock_xm_header_add_halo_algs(builder, algs, num_algs);
+ break;
+ default:
+ break;
+ }
+
+ return builder;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_create_mock_xm_header, "FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c b/drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c
new file mode 100644
index 000000000000..fb8e4a5d189a
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Mock regmap for cs_dsp KUnit tests.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/test.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/regmap.h>
+
+static int cs_dsp_mock_regmap_read(void *context, const void *reg_buf,
+ const size_t reg_size, void *val_buf,
+ size_t val_size)
+{
+ struct cs_dsp_test *priv = context;
+
+ /* Should never get here because the regmap is cache-only */
+ KUNIT_FAIL(priv->test, "Unexpected bus read @%#x", *(u32 *)reg_buf);
+
+ return -EIO;
+}
+
+static int cs_dsp_mock_regmap_gather_write(void *context,
+ const void *reg_buf, size_t reg_size,
+ const void *val_buf, size_t val_size)
+{
+ struct cs_dsp_test *priv = context;
+
+ priv->saw_bus_write = true;
+
+ /* Should never get here because the regmap is cache-only */
+ KUNIT_FAIL(priv->test, "Unexpected bus gather_write @%#x", *(u32 *)reg_buf);
+
+ return -EIO;
+}
+
+static int cs_dsp_mock_regmap_write(void *context, const void *val_buf, size_t val_size)
+{
+ struct cs_dsp_test *priv = context;
+
+ priv->saw_bus_write = true;
+
+ /* Should never get here because the regmap is cache-only */
+ KUNIT_FAIL(priv->test, "Unexpected bus write @%#x", *(u32 *)val_buf);
+
+ return -EIO;
+}
+
+static const struct regmap_bus cs_dsp_mock_regmap_bus = {
+ .read = cs_dsp_mock_regmap_read,
+ .write = cs_dsp_mock_regmap_write,
+ .gather_write = cs_dsp_mock_regmap_gather_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+static const struct reg_default adsp2_32bit_register_defaults[] = {
+ { 0xffe00, 0x0000 }, /* CONTROL */
+ { 0xffe02, 0x0000 }, /* CLOCKING */
+ { 0xffe04, 0x0001 }, /* STATUS1: RAM_RDY=1 */
+ { 0xffe30, 0x0000 }, /* WDMW_CONFIG_1 */
+ { 0xffe32, 0x0000 }, /* WDMA_CONFIG_2 */
+ { 0xffe34, 0x0000 }, /* RDMA_CONFIG_1 */
+ { 0xffe40, 0x0000 }, /* SCRATCH_0_1 */
+ { 0xffe42, 0x0000 }, /* SCRATCH_2_3 */
+};
+
+static const struct regmap_range adsp2_32bit_registers[] = {
+ regmap_reg_range(0x80000, 0x88ffe), /* PM */
+ regmap_reg_range(0xa0000, 0xa9ffe), /* XM */
+ regmap_reg_range(0xc0000, 0xc1ffe), /* YM */
+ regmap_reg_range(0xe0000, 0xe1ffe), /* ZM */
+ regmap_reg_range(0xffe00, 0xffe7c), /* CORE CTRL */
+};
+
+const unsigned int cs_dsp_mock_adsp2_32bit_sysbase = 0xffe00;
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_32bit_sysbase, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static const struct regmap_access_table adsp2_32bit_rw = {
+ .yes_ranges = adsp2_32bit_registers,
+ .n_yes_ranges = ARRAY_SIZE(adsp2_32bit_registers),
+};
+
+static const struct regmap_config cs_dsp_mock_regmap_adsp2_32bit = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 2,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .wr_table = &adsp2_32bit_rw,
+ .rd_table = &adsp2_32bit_rw,
+ .max_register = 0xffe7c,
+ .reg_defaults = adsp2_32bit_register_defaults,
+ .num_reg_defaults = ARRAY_SIZE(adsp2_32bit_register_defaults),
+ .cache_type = REGCACHE_MAPLE,
+};
+
+static const struct reg_default adsp2_16bit_register_defaults[] = {
+ { 0x1100, 0x0000 }, /* CONTROL */
+ { 0x1101, 0x0000 }, /* CLOCKING */
+ { 0x1104, 0x0001 }, /* STATUS1: RAM_RDY=1 */
+ { 0x1130, 0x0000 }, /* WDMW_CONFIG_1 */
+ { 0x1131, 0x0000 }, /* WDMA_CONFIG_2 */
+ { 0x1134, 0x0000 }, /* RDMA_CONFIG_1 */
+ { 0x1140, 0x0000 }, /* SCRATCH_0 */
+ { 0x1141, 0x0000 }, /* SCRATCH_1 */
+ { 0x1142, 0x0000 }, /* SCRATCH_2 */
+ { 0x1143, 0x0000 }, /* SCRATCH_3 */
+};
+
+static const struct regmap_range adsp2_16bit_registers[] = {
+ regmap_reg_range(0x001100, 0x001143), /* CORE CTRL */
+ regmap_reg_range(0x100000, 0x105fff), /* PM */
+ regmap_reg_range(0x180000, 0x1807ff), /* ZM */
+ regmap_reg_range(0x190000, 0x1947ff), /* XM */
+ regmap_reg_range(0x1a8000, 0x1a97ff), /* YM */
+};
+
+const unsigned int cs_dsp_mock_adsp2_16bit_sysbase = 0x001100;
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_16bit_sysbase, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static const struct regmap_access_table adsp2_16bit_rw = {
+ .yes_ranges = adsp2_16bit_registers,
+ .n_yes_ranges = ARRAY_SIZE(adsp2_16bit_registers),
+};
+
+static const struct regmap_config cs_dsp_mock_regmap_adsp2_16bit = {
+ .reg_bits = 32,
+ .val_bits = 16,
+ .reg_stride = 1,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .wr_table = &adsp2_16bit_rw,
+ .rd_table = &adsp2_16bit_rw,
+ .max_register = 0x1a97ff,
+ .reg_defaults = adsp2_16bit_register_defaults,
+ .num_reg_defaults = ARRAY_SIZE(adsp2_16bit_register_defaults),
+ .cache_type = REGCACHE_MAPLE,
+};
+
+static const struct reg_default halo_register_defaults[] = {
+ /* CORE */
+ { 0x2b80010, 0 }, /* HALO_CORE_SOFT_RESET */
+ { 0x2b805c0, 0 }, /* HALO_SCRATCH1 */
+ { 0x2b805c8, 0 }, /* HALO_SCRATCH2 */
+ { 0x2b805d0, 0 }, /* HALO_SCRATCH3 */
+ { 0x2b805c8, 0 }, /* HALO_SCRATCH4 */
+ { 0x2bc1000, 0 }, /* HALO_CCM_CORE_CONTROL */
+ { 0x2bc7000, 0 }, /* HALO_WDT_CONTROL */
+
+ /* SYSINFO */
+ { 0x25e2040, 0 }, /* HALO_AHBM_WINDOW_DEBUG_0 */
+ { 0x25e2044, 0 }, /* HALO_AHBM_WINDOW_DEBUG_1 */
+};
+
+static const struct regmap_range halo_readable_registers[] = {
+ regmap_reg_range(0x2000000, 0x2005fff), /* XM_PACKED */
+ regmap_reg_range(0x25e0000, 0x25e004f), /* SYSINFO */
+ regmap_reg_range(0x25e2000, 0x25e2047), /* SYSINFO */
+ regmap_reg_range(0x2800000, 0x2807fff), /* XM */
+ regmap_reg_range(0x2b80000, 0x2bc700b), /* CORE CTRL */
+ regmap_reg_range(0x2c00000, 0x2c047f3), /* YM_PACKED */
+ regmap_reg_range(0x3400000, 0x3405ff7), /* YM */
+ regmap_reg_range(0x3800000, 0x3804fff), /* PM_PACKED */
+};
+
+static const struct regmap_range halo_writeable_registers[] = {
+ regmap_reg_range(0x2000000, 0x2005fff), /* XM_PACKED */
+ regmap_reg_range(0x2800000, 0x2807fff), /* XM */
+ regmap_reg_range(0x2b80000, 0x2bc700b), /* CORE CTRL */
+ regmap_reg_range(0x2c00000, 0x2c047f3), /* YM_PACKED */
+ regmap_reg_range(0x3400000, 0x3405ff7), /* YM */
+ regmap_reg_range(0x3800000, 0x3804fff), /* PM_PACKED */
+};
+
+const unsigned int cs_dsp_mock_halo_core_base = 0x2b80000;
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_core_base, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+const unsigned int cs_dsp_mock_halo_sysinfo_base = 0x25e0000;
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_sysinfo_base, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static const struct regmap_access_table halo_readable = {
+ .yes_ranges = halo_readable_registers,
+ .n_yes_ranges = ARRAY_SIZE(halo_readable_registers),
+};
+
+static const struct regmap_access_table halo_writeable = {
+ .yes_ranges = halo_writeable_registers,
+ .n_yes_ranges = ARRAY_SIZE(halo_writeable_registers),
+};
+
+static const struct regmap_config cs_dsp_mock_regmap_halo = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .wr_table = &halo_writeable,
+ .rd_table = &halo_readable,
+ .max_register = 0x3804ffc,
+ .reg_defaults = halo_register_defaults,
+ .num_reg_defaults = ARRAY_SIZE(halo_register_defaults),
+ .cache_type = REGCACHE_MAPLE,
+};
+
+/**
+ * cs_dsp_mock_regmap_drop_range() - drop a range of registers from the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ * @first_reg: Address of first register to drop.
+ * @last_reg: Address of last register to drop.
+ */
+void cs_dsp_mock_regmap_drop_range(struct cs_dsp_test *priv,
+ unsigned int first_reg, unsigned int last_reg)
+{
+ regcache_drop_region(priv->dsp->regmap, first_reg, last_reg);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_range, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_drop_regs() - drop a number of registers from the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ * @first_reg: Address of first register to drop.
+ * @num_regs: Number of registers to drop.
+ */
+void cs_dsp_mock_regmap_drop_regs(struct cs_dsp_test *priv,
+ unsigned int first_reg, size_t num_regs)
+{
+ int stride = regmap_get_reg_stride(priv->dsp->regmap);
+ unsigned int last = first_reg + (stride * (num_regs - 1));
+
+ cs_dsp_mock_regmap_drop_range(priv, first_reg, last);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_regs, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_drop_bytes() - drop a number of bytes from the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ * @first_reg: Address of first register to drop.
+ * @num_bytes: Number of bytes to drop from the cache. Will be rounded
+ * down to a whole number of registers. Trailing bytes that
+ * are not a multiple of the register size will not be dropped.
+ * (This is intended to help detect math errors in test code.)
+ */
+void cs_dsp_mock_regmap_drop_bytes(struct cs_dsp_test *priv,
+ unsigned int first_reg, size_t num_bytes)
+{
+ size_t num_regs = num_bytes / regmap_get_val_bytes(priv->dsp->regmap);
+
+ cs_dsp_mock_regmap_drop_regs(priv, first_reg, num_regs);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_bytes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_drop_system_regs() - Drop DSP system registers from the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ *
+ * Drops all DSP system registers from the regmap cache.
+ */
+void cs_dsp_mock_regmap_drop_system_regs(struct cs_dsp_test *priv)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ if (priv->dsp->base) {
+ regcache_drop_region(priv->dsp->regmap,
+ priv->dsp->base,
+ priv->dsp->base + 0x7c);
+ }
+ return;
+ case WMFW_HALO:
+ if (priv->dsp->base) {
+ regcache_drop_region(priv->dsp->regmap,
+ priv->dsp->base,
+ priv->dsp->base + 0x47000);
+ }
+
+ /* sysinfo registers are read-only so don't drop them */
+ return;
+ default:
+ return;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_system_regs, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_is_dirty() - Test for dirty registers in the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ * @drop_system_regs: If true the DSP system regs will be dropped from
+ * the cache before checking for dirty.
+ *
+ * All registers that are expected to be written must have been dropped
+ * from the cache (DSP system registers can be dropped by passing
+ * drop_system_regs == true). If any unexpected registers were written
+ * there will still be dirty entries in the cache and a cache sync will
+ * cause a write.
+ *
+ * Returns: true if there were dirty entries, false if not.
+ */
+bool cs_dsp_mock_regmap_is_dirty(struct cs_dsp_test *priv, bool drop_system_regs)
+{
+ if (drop_system_regs)
+ cs_dsp_mock_regmap_drop_system_regs(priv);
+
+ priv->saw_bus_write = false;
+ regcache_cache_only(priv->dsp->regmap, false);
+ regcache_sync(priv->dsp->regmap);
+ regcache_cache_only(priv->dsp->regmap, true);
+
+ return priv->saw_bus_write;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_is_dirty, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_init() - Initialize a mock regmap.
+ *
+ * @priv: Pointer to struct cs_dsp_test object. This must have a
+ * valid pointer to a struct cs_dsp in which the type and
+ * rev fields are set to the type of DSP to be simulated.
+ *
+ * On success the priv->dsp->regmap will point to the created
+ * regmap instance.
+ *
+ * Return: zero on success, else negative error code.
+ */
+int cs_dsp_mock_regmap_init(struct cs_dsp_test *priv)
+{
+ const struct regmap_config *config;
+ int ret;
+
+ switch (priv->dsp->type) {
+ case WMFW_HALO:
+ config = &cs_dsp_mock_regmap_halo;
+ break;
+ case WMFW_ADSP2:
+ if (priv->dsp->rev == 0)
+ config = &cs_dsp_mock_regmap_adsp2_16bit;
+ else
+ config = &cs_dsp_mock_regmap_adsp2_32bit;
+ break;
+ default:
+ config = NULL;
+ break;
+ }
+
+ priv->dsp->regmap = devm_regmap_init(priv->dsp->dev,
+ &cs_dsp_mock_regmap_bus,
+ priv,
+ config);
+ if (IS_ERR(priv->dsp->regmap)) {
+ ret = PTR_ERR(priv->dsp->regmap);
+ kunit_err(priv->test, "Failed to allocate register map: %d\n", ret);
+ return ret;
+ }
+
+ /* Put regmap in cache-only so it accumulates the writes done by cs_dsp */
+ regcache_cache_only(priv->dsp->regmap, true);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_init, "FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_utils.c b/drivers/firmware/cirrus/test/cs_dsp_mock_utils.c
new file mode 100644
index 000000000000..cbd0bf72b7de
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_utils.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Utility module for cs_dsp KUnit testing.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("Utilities for Cirrus Logic DSP driver testing");
+MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("FW_CS_DSP");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c b/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c
new file mode 100644
index 000000000000..5a3ac03ac37f
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// wmfw file builder for cs_dsp KUnit tests.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/firmware.h>
+#include <linux/math.h>
+#include <linux/overflow.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+/* Buffer large enough for bin file content */
+#define CS_DSP_MOCK_WMFW_BUF_SIZE 131072
+
+struct cs_dsp_mock_wmfw_builder {
+ struct cs_dsp_test *test_priv;
+ int format_version;
+ void *buf;
+ size_t buf_size_bytes;
+ void *write_p;
+ size_t bytes_used;
+
+ void *alg_data_header;
+ unsigned int num_coeffs;
+};
+
+struct wmfw_adsp2_halo_header {
+ struct wmfw_header header;
+ struct wmfw_adsp2_sizes sizes;
+ struct wmfw_footer footer;
+} __packed;
+
+struct wmfw_long_string {
+ __le16 len;
+ u8 data[] __nonstring __counted_by(len);
+} __packed;
+
+struct wmfw_short_string {
+ u8 len;
+ u8 data[] __nonstring __counted_by(len);
+} __packed;
+
+KUNIT_DEFINE_ACTION_WRAPPER(vfree_action_wrapper, vfree, void *)
+
+/**
+ * cs_dsp_mock_wmfw_format_version() - Return format version.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_wmfw_builder.
+ *
+ * Return: Format version.
+ */
+int cs_dsp_mock_wmfw_format_version(struct cs_dsp_mock_wmfw_builder *builder)
+{
+ return builder->format_version;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_format_version, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_wmfw_get_firmware() - Get struct firmware wrapper for data.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_wmfw_builder.
+ *
+ * Return: Pointer to a struct firmware wrapper for the data.
+ */
+struct firmware *cs_dsp_mock_wmfw_get_firmware(struct cs_dsp_mock_wmfw_builder *builder)
+{
+ struct firmware *fw;
+
+ if (!builder)
+ return NULL;
+
+ fw = kunit_kzalloc(builder->test_priv->test, sizeof(*fw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, fw);
+
+ fw->data = builder->buf;
+ fw->size = builder->bytes_used;
+
+ return fw;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_get_firmware, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_wmfw_add_raw_block() - Add a block to the wmfw file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @block_type: Block type.
+ * @offset: Offset.
+ * @payload_data: Pointer to buffer containing the payload data,
+ * or NULL if no data.
+ * @payload_len_bytes: Length of payload data in bytes, or zero.
+ */
+void cs_dsp_mock_wmfw_add_raw_block(struct cs_dsp_mock_wmfw_builder *builder,
+ int block_type, unsigned int offset,
+ const void *payload_data, size_t payload_len_bytes)
+{
+ struct wmfw_region *header = builder->write_p;
+ unsigned int bytes_needed = struct_size_t(struct wmfw_region, data, payload_len_bytes);
+
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ header->offset = cpu_to_le32(offset | (block_type << 24));
+ header->len = cpu_to_le32(payload_len_bytes);
+ if (payload_len_bytes > 0)
+ memcpy(header->data, payload_data, payload_len_bytes);
+
+ builder->write_p += bytes_needed;
+ builder->bytes_used += bytes_needed;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_raw_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_wmfw_add_info() - Add an info block to the wmfw file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @info: Pointer to info string to be copied into the file.
+ *
+ * The string will be padded to a length that is a multiple of 4 bytes.
+ */
+void cs_dsp_mock_wmfw_add_info(struct cs_dsp_mock_wmfw_builder *builder,
+ const char *info)
+{
+ size_t info_len = strlen(info);
+ char *tmp = NULL;
+
+ if (info_len % 4) {
+ /* Create a padded string with length a multiple of 4 */
+ info_len = round_up(info_len, 4);
+ tmp = kunit_kzalloc(builder->test_priv->test, info_len, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, tmp);
+ memcpy(tmp, info, info_len);
+ info = tmp;
+ }
+
+ cs_dsp_mock_wmfw_add_raw_block(builder, WMFW_INFO_TEXT, 0, info, info_len);
+ kunit_kfree(builder->test_priv->test, tmp);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_info, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_wmfw_add_data_block() - Add a data block to the wmfw file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @mem_region: Memory region for the block.
+ * @mem_offset_dsp_words: Offset to start of destination in DSP words.
+ * @payload_data: Pointer to buffer containing the payload data.
+ * @payload_len_bytes: Length of payload data in bytes.
+ */
+void cs_dsp_mock_wmfw_add_data_block(struct cs_dsp_mock_wmfw_builder *builder,
+ int mem_region, unsigned int mem_offset_dsp_words,
+ const void *payload_data, size_t payload_len_bytes)
+{
+ /* Blob payload length must be a multiple of 4 */
+ KUNIT_ASSERT_EQ(builder->test_priv->test, payload_len_bytes % 4, 0);
+
+ cs_dsp_mock_wmfw_add_raw_block(builder, mem_region, mem_offset_dsp_words,
+ payload_data, payload_len_bytes);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_data_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+void cs_dsp_mock_wmfw_start_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder,
+ unsigned int alg_id,
+ const char *name,
+ const char *description)
+{
+ struct wmfw_region *rgn = builder->write_p;
+ struct wmfw_adsp_alg_data *v1;
+ struct wmfw_short_string *shortstring;
+ struct wmfw_long_string *longstring;
+ size_t bytes_needed, name_len, description_len;
+ int offset;
+
+ /* Bytes needed for region header */
+ bytes_needed = offsetof(struct wmfw_region, data);
+
+ builder->alg_data_header = builder->write_p;
+ builder->num_coeffs = 0;
+
+ switch (builder->format_version) {
+ case 0:
+ KUNIT_FAIL(builder->test_priv->test, "wmfwV0 does not have alg blocks\n");
+ return;
+ case 1:
+ bytes_needed += offsetof(struct wmfw_adsp_alg_data, data);
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ memset(builder->write_p, 0, bytes_needed);
+
+ /* Create region header */
+ rgn->offset = cpu_to_le32(WMFW_ALGORITHM_DATA << 24);
+
+ /* Create algorithm entry */
+ v1 = (struct wmfw_adsp_alg_data *)&rgn->data[0];
+ v1->id = cpu_to_le32(alg_id);
+ if (name)
+ strscpy(v1->name, name, sizeof(v1->name));
+
+ if (description)
+ strscpy(v1->descr, description, sizeof(v1->descr));
+ break;
+ default:
+ name_len = 0;
+ description_len = 0;
+
+ if (name)
+ name_len = strlen(name);
+
+ if (description)
+ description_len = strlen(description);
+
+ bytes_needed += sizeof(__le32); /* alg id */
+ bytes_needed += round_up(name_len + sizeof(u8), sizeof(__le32));
+ bytes_needed += round_up(description_len + sizeof(__le16), sizeof(__le32));
+ bytes_needed += sizeof(__le32); /* coeff count */
+
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ memset(builder->write_p, 0, bytes_needed);
+
+ /* Create region header */
+ rgn->offset = cpu_to_le32(WMFW_ALGORITHM_DATA << 24);
+
+ /* Create algorithm entry */
+ *(__force __le32 *)&rgn->data[0] = cpu_to_le32(alg_id);
+
+ shortstring = (struct wmfw_short_string *)&rgn->data[4];
+ shortstring->len = name_len;
+
+ if (name_len)
+ memcpy(shortstring->data, name, name_len);
+
+ /* Round up to next __le32 */
+ offset = round_up(4 + struct_size_t(struct wmfw_short_string, data, name_len),
+ sizeof(__le32));
+
+ longstring = (struct wmfw_long_string *)&rgn->data[offset];
+ longstring->len = cpu_to_le16(description_len);
+
+ if (description_len)
+ memcpy(longstring->data, description, description_len);
+ break;
+ }
+
+ builder->write_p += bytes_needed;
+ builder->bytes_used += bytes_needed;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_start_alg_info_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+void cs_dsp_mock_wmfw_add_coeff_desc(struct cs_dsp_mock_wmfw_builder *builder,
+ const struct cs_dsp_mock_coeff_def *def)
+{
+ struct wmfw_adsp_coeff_data *v1;
+ struct wmfw_short_string *shortstring;
+ struct wmfw_long_string *longstring;
+ size_t bytes_needed, shortname_len, fullname_len, description_len;
+ __le32 *ple32;
+
+ KUNIT_ASSERT_NOT_NULL(builder->test_priv->test, builder->alg_data_header);
+
+ switch (builder->format_version) {
+ case 0:
+ return;
+ case 1:
+ bytes_needed = offsetof(struct wmfw_adsp_coeff_data, data);
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ v1 = (struct wmfw_adsp_coeff_data *)builder->write_p;
+ memset(v1, 0, sizeof(*v1));
+ v1->hdr.offset = cpu_to_le16(def->offset_dsp_words);
+ v1->hdr.type = cpu_to_le16(def->mem_type);
+ v1->hdr.size = cpu_to_le32(bytes_needed - sizeof(v1->hdr));
+ v1->ctl_type = cpu_to_le16(def->type);
+ v1->flags = cpu_to_le16(def->flags);
+ v1->len = cpu_to_le32(def->length_bytes);
+
+ if (def->fullname)
+ strscpy(v1->name, def->fullname, sizeof(v1->name));
+
+ if (def->description)
+ strscpy(v1->descr, def->description, sizeof(v1->descr));
+ break;
+ default:
+ fullname_len = 0;
+ description_len = 0;
+ shortname_len = strlen(def->shortname);
+
+ if (def->fullname)
+ fullname_len = strlen(def->fullname);
+
+ if (def->description)
+ description_len = strlen(def->description);
+
+ bytes_needed = sizeof(__le32) * 2; /* type, offset and size */
+ bytes_needed += round_up(shortname_len + sizeof(u8), sizeof(__le32));
+ bytes_needed += round_up(fullname_len + sizeof(u8), sizeof(__le32));
+ bytes_needed += round_up(description_len + sizeof(__le16), sizeof(__le32));
+ bytes_needed += sizeof(__le32) * 2; /* flags, type and length */
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ ple32 = (__force __le32 *)builder->write_p;
+ *ple32++ = cpu_to_le32(def->offset_dsp_words | (def->mem_type << 16));
+ *ple32++ = cpu_to_le32(bytes_needed - sizeof(__le32) - sizeof(__le32));
+
+ shortstring = (__force struct wmfw_short_string *)ple32;
+ shortstring->len = shortname_len;
+ memcpy(shortstring->data, def->shortname, shortname_len);
+
+ /* Round up to next __le32 multiple */
+ ple32 += round_up(struct_size_t(struct wmfw_short_string, data, shortname_len),
+ sizeof(*ple32)) / sizeof(*ple32);
+
+ shortstring = (__force struct wmfw_short_string *)ple32;
+ shortstring->len = fullname_len;
+ memcpy(shortstring->data, def->fullname, fullname_len);
+
+ /* Round up to next __le32 multiple */
+ ple32 += round_up(struct_size_t(struct wmfw_short_string, data, fullname_len),
+ sizeof(*ple32)) / sizeof(*ple32);
+
+ longstring = (__force struct wmfw_long_string *)ple32;
+ longstring->len = cpu_to_le16(description_len);
+ memcpy(longstring->data, def->description, description_len);
+
+ /* Round up to next __le32 multiple */
+ ple32 += round_up(struct_size_t(struct wmfw_long_string, data, description_len),
+ sizeof(*ple32)) / sizeof(*ple32);
+
+ *ple32++ = cpu_to_le32(def->type | (def->flags << 16));
+ *ple32 = cpu_to_le32(def->length_bytes);
+ break;
+ }
+
+ builder->write_p += bytes_needed;
+ builder->bytes_used += bytes_needed;
+ builder->num_coeffs++;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_coeff_desc, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+void cs_dsp_mock_wmfw_end_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder)
+{
+ struct wmfw_region *rgn = builder->alg_data_header;
+ struct wmfw_adsp_alg_data *v1;
+ const struct wmfw_short_string *shortstring;
+ const struct wmfw_long_string *longstring;
+ size_t offset;
+
+ KUNIT_ASSERT_NOT_NULL(builder->test_priv->test, rgn);
+
+ /* Fill in data size */
+ rgn->len = cpu_to_le32((u8 *)builder->write_p - (u8 *)rgn->data);
+
+ /* Fill in coefficient count */
+ switch (builder->format_version) {
+ case 0:
+ return;
+ case 1:
+ v1 = (struct wmfw_adsp_alg_data *)&rgn->data[0];
+ v1->ncoeff = cpu_to_le32(builder->num_coeffs);
+ break;
+ default:
+ offset = 4; /* skip alg id */
+
+ /* Get name length and round up to __le32 multiple */
+ shortstring = (const struct wmfw_short_string *)&rgn->data[offset];
+ offset += round_up(struct_size_t(struct wmfw_short_string, data, shortstring->len),
+ sizeof(__le32));
+
+ /* Get description length and round up to __le32 multiple */
+ longstring = (const struct wmfw_long_string *)&rgn->data[offset];
+ offset += round_up(struct_size_t(struct wmfw_long_string, data,
+ le16_to_cpu(longstring->len)),
+ sizeof(__le32));
+
+ *(__force __le32 *)&rgn->data[offset] = cpu_to_le32(builder->num_coeffs);
+ break;
+ }
+
+ builder->alg_data_header = NULL;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_end_alg_info_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static void cs_dsp_init_adsp2_halo_wmfw(struct cs_dsp_mock_wmfw_builder *builder)
+{
+ struct wmfw_adsp2_halo_header *hdr = builder->buf;
+ const struct cs_dsp *dsp = builder->test_priv->dsp;
+
+ memcpy(hdr->header.magic, "WMFW", sizeof(hdr->header.magic));
+ hdr->header.len = cpu_to_le32(sizeof(*hdr));
+ hdr->header.ver = builder->format_version;
+ hdr->header.core = dsp->type;
+ hdr->header.rev = cpu_to_le16(dsp->rev);
+
+ hdr->sizes.pm = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_PM));
+ hdr->sizes.xm = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_XM));
+ hdr->sizes.ym = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_YM));
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ hdr->sizes.zm = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_ZM));
+ break;
+ default:
+ break;
+ }
+
+ builder->write_p = &hdr[1];
+ builder->bytes_used += sizeof(*hdr);
+}
+
+/**
+ * cs_dsp_mock_wmfw_init() - Initialize a struct cs_dsp_mock_wmfw_builder.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @format_version: Required wmfw format version.
+ *
+ * Return: Pointer to created struct cs_dsp_mock_wmfw_builder.
+ */
+struct cs_dsp_mock_wmfw_builder *cs_dsp_mock_wmfw_init(struct cs_dsp_test *priv,
+ int format_version)
+{
+ struct cs_dsp_mock_wmfw_builder *builder;
+
+ /* If format version isn't given use the default for the target core */
+ if (format_version < 0) {
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ format_version = 2;
+ break;
+ default:
+ format_version = 3;
+ break;
+ }
+ }
+
+ builder = kunit_kzalloc(priv->test, sizeof(*builder), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder);
+
+ builder->test_priv = priv;
+ builder->format_version = format_version;
+
+ builder->buf = vmalloc(CS_DSP_MOCK_WMFW_BUF_SIZE);
+ KUNIT_ASSERT_NOT_NULL(priv->test, builder->buf);
+ kunit_add_action_or_reset(priv->test, vfree_action_wrapper, builder->buf);
+
+ builder->buf_size_bytes = CS_DSP_MOCK_WMFW_BUF_SIZE;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ case WMFW_HALO:
+ cs_dsp_init_adsp2_halo_wmfw(builder);
+ break;
+ default:
+ break;
+ }
+
+ return builder;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_init, "FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin.c
new file mode 100644
index 000000000000..1e161bbc5b4a
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin.c
@@ -0,0 +1,2556 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/firmware.h>
+#include <linux/math.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+
+/*
+ * Test method is:
+ *
+ * 1) Create a mock regmap in cache-only mode so that all writes will be cached.
+ * 2) Create a XM header with an algorithm list in the cached regmap.
+ * 3) Create dummy wmfw file to satisfy cs_dsp.
+ * 4) Create bin file content.
+ * 5) Call cs_dsp_power_up() with the bin file.
+ * 6) Readback the cached value of registers that should have been written and
+ * check they have the correct value.
+ * 7) All the registers that are expected to have been written are dropped from
+ * the cache (including the XM header). This should leave the cache clean.
+ * 8) If the cache is still dirty there have been unexpected writes.
+ *
+ * There are multiple different schemes used for addressing across
+ * ADSP2 and Halo Core DSPs:
+ *
+ * dsp words: The addressing scheme used by the DSP, pointers and lengths
+ * in DSP memory use this. A memory region (XM, YM, ZM) is
+ * also required to create a unique DSP memory address.
+ * registers: Addresses in the register map. Older ADSP2 devices have
+ * 16-bit registers with an address stride of 1. Newer ADSP2
+ * devices have 32-bit registers with an address stride of 2.
+ * Halo Core devices have 32-bit registers with a stride of 4.
+ * unpacked: Registers that have a 1:1 mapping to DSP words
+ * packed: Registers that pack multiple DSP words more efficiently into
+ * multiple 32-bit registers. Because of this the relationship
+ * between a packed _register_ address and the corresponding
+ * _dsp word_ address is different from unpacked registers.
+ * Packed registers can only be accessed as a group of
+ * multiple registers, therefore can only read/write a group
+ * of multiple DSP words.
+ * Packed registers only exist on Halo Core DSPs.
+ *
+ * Addresses can also be relative to the start of an algorithm, and this
+ * can be expressed in dsp words, register addresses, or bytes.
+ */
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *)
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *)
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_bin_builder *bin_builder;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ struct firmware *wmfw;
+};
+
+struct bin_test_param {
+ const char *name;
+ int mem_type;
+ unsigned int offset_words;
+ int alg_idx;
+};
+
+static const struct cs_dsp_mock_alg_def bin_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+ {
+ .id = 0xfbfb,
+ .ver = 0x100000,
+ .xm_size_words = 99,
+ .ym_size_words = 99,
+ .zm_size_words = 99,
+ },
+ {
+ .id = 0xc321,
+ .ver = 0x100000,
+ .xm_size_words = 120,
+ .ym_size_words = 120,
+ .zm_size_words = 120,
+ },
+ {
+ .id = 0xb123,
+ .ver = 0x100000,
+ .xm_size_words = 96,
+ .ym_size_words = 96,
+ .zm_size_words = 96,
+ },
+};
+
+/*
+ * Convert number of DSP words to number of packed registers rounded
+ * down to the nearest register.
+ * There are 3 registers for every 4 packed words.
+ */
+static unsigned int _num_words_to_num_packed_regs(unsigned int num_dsp_words)
+{
+ return (num_dsp_words * 3) / 4;
+}
+
+/* bin file that patches a single DSP word */
+static void bin_patch_one_word(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 reg_val, payload_data;
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ param->offset_words * reg_inc_per_word,
+ &payload_data, sizeof(payload_data));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ &reg_val, sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* bin file with a single payload that patches consecutive words */
+static void bin_patch_one_multiword(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 payload_data[16], readback[16];
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+
+ static_assert(ARRAY_SIZE(readback) == ARRAY_SIZE(payload_data));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+ memset(readback, 0, sizeof(readback));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ param->offset_words * reg_inc_per_word,
+ payload_data, sizeof(payload_data));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, sizeof(payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr,
+ reg_addr + (reg_inc_per_word * ARRAY_SIZE(payload_data)));
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* bin file with a multiple one-word payloads that patch consecutive words */
+static void bin_patch_multi_oneword(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 payload_data[16], readback[16];
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(ARRAY_SIZE(readback) == ARRAY_SIZE(payload_data));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+ memset(readback, 0, sizeof(readback));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ /* Add one payload per word */
+ for (i = 0; i < ARRAY_SIZE(payload_data); ++i) {
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (param->offset_words + i) * reg_inc_per_word,
+ &payload_data[i], sizeof(payload_data[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, sizeof(payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr,
+ reg_addr + (reg_inc_per_word * ARRAY_SIZE(payload_data)));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file with a multiple one-word payloads that patch a block of consecutive
+ * words but the payloads are not in address order.
+ */
+static void bin_patch_multi_oneword_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 payload_data[16], readback[16];
+ static const u8 word_order[] = { 10, 2, 12, 4, 0, 11, 6, 1, 3, 15, 5, 13, 8, 7, 9, 14 };
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(ARRAY_SIZE(readback) == ARRAY_SIZE(payload_data));
+ static_assert(ARRAY_SIZE(word_order) == ARRAY_SIZE(payload_data));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+ memset(readback, 0, sizeof(readback));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ /* Add one payload per word */
+ for (i = 0; i < ARRAY_SIZE(word_order); ++i) {
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (param->offset_words + word_order[i]) *
+ reg_inc_per_word,
+ &payload_data[word_order[i]], sizeof(payload_data[0]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, sizeof(payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr,
+ reg_addr + (reg_inc_per_word * ARRAY_SIZE(payload_data)));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file with a multiple one-word payloads. The payloads are not in address
+ * order and collectively do not patch a contiguous block of memory.
+ */
+static void bin_patch_multi_oneword_sparse_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ static const u8 word_offsets[] = {
+ 11, 69, 59, 61, 32, 75, 4, 38, 70, 13, 79, 47, 46, 53, 18, 44,
+ 54, 35, 51, 21, 26, 45, 27, 41, 66, 2, 17, 56, 40, 9, 8, 20,
+ 29, 19, 63, 42, 12, 16, 43, 3, 5, 55, 52, 22
+ };
+ u32 payload_data[44];
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+ u32 reg_val;
+ int i;
+
+ static_assert(ARRAY_SIZE(word_offsets) == ARRAY_SIZE(payload_data));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ /* Add one payload per word */
+ for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) {
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ word_offsets[i] * reg_inc_per_word,
+ &payload_data[i], sizeof(payload_data[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) {
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + word_offsets[i]) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val,
+ sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &reg_val, &payload_data[i], sizeof(reg_val));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single DSP word in each of the memory regions
+ * of one algorithm.
+ */
+static void bin_patch_one_word_multiple_mems(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ unsigned int alg_xm_base_words, alg_ym_base_words, alg_zm_base_words;
+ unsigned int reg_addr;
+ u32 payload_data[3];
+ struct firmware *fw;
+ u32 reg_val;
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ alg_xm_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_ADSP2_XM);
+ alg_ym_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_ADSP2_YM);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ alg_zm_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_ADSP2_ZM);
+ } else {
+ alg_zm_base_words = 0;
+ }
+
+ /* Add words to XM, YM and ZM */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_ADSP2_XM,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[0], sizeof(payload_data[0]));
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_ADSP2_YM,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[1], sizeof(payload_data[1]));
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_ADSP2_ZM,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[2], sizeof(payload_data[2]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM) +
+ ((alg_xm_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val, sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[0]);
+
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM) +
+ ((alg_ym_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val, sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[1]);
+
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_ZM) +
+ ((alg_zm_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val,
+ sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[2]);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single DSP word in multiple algorithms.
+ */
+static void bin_patch_one_word_multiple_algs(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 payload_data[ARRAY_SIZE(bin_test_mock_algs)];
+ unsigned int alg_base_words;
+ unsigned int reg_inc_per_word, reg_addr;
+ struct firmware *fw;
+ u32 reg_val;
+ int i;
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ /* Add one payload per algorithm */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[i].id,
+ bin_test_mock_algs[i].ver,
+ param->mem_type,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[i], sizeof(payload_data[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[i].id,
+ param->mem_type);
+ reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val,
+ sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[i]);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single DSP word in multiple algorithms.
+ * The algorithms are not patched in the same order they appear in the XM header.
+ */
+static void bin_patch_one_word_multiple_algs_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 alg_order[] = { 3, 0, 2, 1 };
+ u32 payload_data[ARRAY_SIZE(bin_test_mock_algs)];
+ unsigned int alg_base_words;
+ unsigned int reg_inc_per_word, reg_addr;
+ struct firmware *fw;
+ u32 reg_val;
+ int i, alg_idx;
+
+ static_assert(ARRAY_SIZE(alg_order) == ARRAY_SIZE(bin_test_mock_algs));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ /* Add one payload per algorithm */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_idx = alg_order[i];
+ reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[alg_idx].id,
+ bin_test_mock_algs[alg_idx].ver,
+ param->mem_type,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[i], sizeof(payload_data[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_idx = alg_order[i];
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[alg_idx].id,
+ param->mem_type);
+ reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val,
+ sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[i]);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* bin file that patches a single packed block of DSP words */
+static void bin_patch_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 packed_payload[3], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is one word longer than a packed block using one
+ * packed block followed by one unpacked word.
+ */
+static void bin_patch_1_packed_1_single_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[1], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked word following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is two words longer than a packed block using one
+ * packed block followed by two blocks of one unpacked word.
+ */
+static void bin_patch_1_packed_2_single_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payloads[2], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payloads));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payloads, sizeof(unpacked_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked words following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ &unpacked_payloads[0], sizeof(unpacked_payloads[0]));
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 5) - alg_base_words) * 4,
+ &unpacked_payloads[1], sizeof(unpacked_payloads[1]));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payloads */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payloads)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payloads, sizeof(unpacked_payloads));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payloads));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is three words longer than a packed block using one
+ * packed block followed by three blocks of one unpacked word.
+ */
+static void bin_patch_1_packed_3_single_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payloads[3], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payloads));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payloads, sizeof(unpacked_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked words following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ &unpacked_payloads[0], sizeof(unpacked_payloads[0]));
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 5) - alg_base_words) * 4,
+ &unpacked_payloads[1], sizeof(unpacked_payloads[1]));
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 6) - alg_base_words) * 4,
+ &unpacked_payloads[2], sizeof(unpacked_payloads[2]));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payloads */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payloads)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payloads, sizeof(unpacked_payloads));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payloads));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is two words longer than a packed block using one
+ * packed block followed by a block of two unpacked words.
+ */
+static void bin_patch_1_packed_2_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[2], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked words following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is three words longer than a packed block using one
+ * packed block followed by a block of three unpacked words.
+ */
+static void bin_patch_1_packed_3_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[3], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked words following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts one word before a packed boundary using one
+ * unpacked word followed by one packed block.
+ */
+static void bin_patch_1_single_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[1], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+ memset(readback, 0, sizeof(readback));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked word */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 1) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 1) * 4;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts two words before a packed boundary using two
+ * unpacked words followed by one packed block.
+ */
+static void bin_patch_2_single_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[2], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked words */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 2) - alg_base_words) * 4,
+ &unpacked_payload[0], sizeof(unpacked_payload[0]));
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 1) - alg_base_words) * 4,
+ &unpacked_payload[1], sizeof(unpacked_payload[1]));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 2) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts two words before a packed boundary using one
+ * block of two unpacked words followed by one packed block.
+ */
+static void bin_patch_2_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[2], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked words */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 2) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 2) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts three words before a packed boundary using three
+ * unpacked words followed by one packed block.
+ */
+static void bin_patch_3_single_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[3], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked words */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 3) - alg_base_words) * 4,
+ &unpacked_payload[0], sizeof(unpacked_payload[0]));
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 2) - alg_base_words) * 4,
+ &unpacked_payload[1], sizeof(unpacked_payload[1]));
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 1) - alg_base_words) * 4,
+ &unpacked_payload[2], sizeof(unpacked_payload[2]));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 3) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts three words before a packed boundary using one
+ * block of three unpacked words followed by one packed block.
+ */
+static void bin_patch_3_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[3], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked words */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 3) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 3) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* bin file with a multiple payloads that each patch one packed block. */
+static void bin_patch_multi_onepacked(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 packed_payloads[8][3], readback[8][3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int payload_offset;
+ unsigned int reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(sizeof(readback) == sizeof(packed_payloads));
+
+ get_random_bytes(packed_payloads, sizeof(packed_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+
+ /* Add one payload per packed block */
+ for (i = 0; i < ARRAY_SIZE(packed_payloads); ++i) {
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words + (i * 4));
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ payload_offset,
+ &packed_payloads[i], sizeof(packed_payloads[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payloads */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payloads, sizeof(packed_payloads));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payloads));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file with a multiple payloads that each patch one packed block.
+ * The payloads are not in address order.
+ */
+static void bin_patch_multi_onepacked_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 payload_order[] = { 4, 3, 6, 1, 0, 7, 5, 2 };
+ u32 packed_payloads[8][3], readback[8][3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int payload_offset;
+ unsigned int reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(ARRAY_SIZE(payload_order) == ARRAY_SIZE(packed_payloads));
+ static_assert(sizeof(readback) == sizeof(packed_payloads));
+
+ get_random_bytes(packed_payloads, sizeof(packed_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+
+ /* Add one payload per packed block */
+ for (i = 0; i < ARRAY_SIZE(payload_order); ++i) {
+ patch_pos_in_packed_regs =
+ _num_words_to_num_packed_regs(patch_pos_words + (payload_order[i] * 4));
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ payload_offset,
+ &packed_payloads[payload_order[i]],
+ sizeof(packed_payloads[0]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content in registers should match the order of data in packed_payloads */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payloads, sizeof(packed_payloads));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payloads));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file with a multiple payloads that each patch one packed block.
+ * The payloads are not in address order. The patched memory is not contiguous.
+ */
+static void bin_patch_multi_onepacked_sparse_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 word_offsets[] = { 60, 24, 76, 4, 40, 52, 48, 36, 12 };
+ u32 packed_payloads[9][3], readback[3];
+ unsigned int alg_base_words, alg_base_in_packed_regs;
+ unsigned int patch_pos_words, patch_pos_in_packed_regs, payload_offset;
+ unsigned int reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(ARRAY_SIZE(word_offsets) == ARRAY_SIZE(packed_payloads));
+ static_assert(sizeof(readback) == sizeof(packed_payloads[0]));
+
+ get_random_bytes(packed_payloads, sizeof(packed_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Add one payload per packed block */
+ for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) {
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + word_offsets[i], 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ payload_offset,
+ &packed_payloads[i],
+ sizeof(packed_payloads[0]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payloads */
+ for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) {
+ patch_pos_words = round_up(alg_base_words + word_offsets[i], 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payloads[i], sizeof(packed_payloads[i]));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payloads[i]));
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single packed block in each of the memory regions
+ * of one algorithm.
+ */
+static void bin_patch_1_packed_multiple_mems(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 packed_xm_payload[3], packed_ym_payload[3], readback[3];
+ unsigned int alg_xm_base_words, alg_ym_base_words;
+ unsigned int xm_patch_pos_words, ym_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_xm_payload));
+ static_assert(sizeof(readback) == sizeof(packed_ym_payload));
+
+ get_random_bytes(packed_xm_payload, sizeof(packed_xm_payload));
+ get_random_bytes(packed_ym_payload, sizeof(packed_ym_payload));
+
+ alg_xm_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_HALO_XM_PACKED);
+ alg_ym_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_HALO_YM_PACKED);
+
+ /* Round patch start word up to a packed boundary */
+ xm_patch_pos_words = round_up(alg_xm_base_words + param->offset_words, 4);
+ ym_patch_pos_words = round_up(alg_ym_base_words + param->offset_words, 4);
+
+ /* Add XM and YM patches */
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_xm_base_words);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(xm_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_HALO_XM_PACKED,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ packed_xm_payload, sizeof(packed_xm_payload));
+
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_ym_base_words);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(ym_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_HALO_YM_PACKED,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ packed_ym_payload, sizeof(packed_ym_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed XM registers should match packed_xm_payload */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(xm_patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_HALO_XM_PACKED) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_xm_payload, sizeof(packed_xm_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_xm_payload));
+
+ /* Content of packed YM registers should match packed_ym_payload */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(ym_patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_HALO_YM_PACKED) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_ym_payload, sizeof(packed_ym_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_ym_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single packed block in multiple algorithms.
+ */
+static void bin_patch_1_packed_multiple_algs(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 packed_payload[ARRAY_SIZE(bin_test_mock_algs)][3];
+ u32 readback[ARRAY_SIZE(bin_test_mock_algs)][3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr, payload_offset;
+ struct firmware *fw;
+ int i;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+
+ /* For each algorithm patch one DSP word to a value from packed_payload */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[i].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[i].id,
+ bin_test_mock_algs[i].ver,
+ param->mem_type,
+ payload_offset,
+ packed_payload[i], sizeof(packed_payload[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ memset(readback, 0, sizeof(readback));
+
+ /*
+ * Readback the registers that should have been written. Place
+ * the values into the expected location in readback[] so that
+ * the content of readback[] should match packed_payload[]
+ */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[i].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ readback[i], sizeof(readback[i])),
+ 0);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload[i]));
+ }
+
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single packed block in multiple algorithms.
+ * The algorithms are not patched in the same order they appear in the XM header.
+ */
+static void bin_patch_1_packed_multiple_algs_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 alg_order[] = { 3, 0, 2, 1 };
+ u32 packed_payload[ARRAY_SIZE(bin_test_mock_algs)][3];
+ u32 readback[ARRAY_SIZE(bin_test_mock_algs)][3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr, payload_offset;
+ struct firmware *fw;
+ int i, alg_idx;
+
+ static_assert(ARRAY_SIZE(alg_order) == ARRAY_SIZE(bin_test_mock_algs));
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+
+ /*
+ * For each algorithm index in alg_order[] patch one DSP word in
+ * that algorithm to a value from packed_payload.
+ */
+ for (i = 0; i < ARRAY_SIZE(alg_order); ++i) {
+ alg_idx = alg_order[i];
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[alg_idx].id,
+ bin_test_mock_algs[alg_idx].ver,
+ param->mem_type,
+ payload_offset,
+ packed_payload[i], sizeof(packed_payload[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ memset(readback, 0, sizeof(readback));
+
+ /*
+ * Readback the registers that should have been written. Place
+ * the values into the expected location in readback[] so that
+ * the content of readback[] should match packed_payload[]
+ */
+ for (i = 0; i < ARRAY_SIZE(alg_order); ++i) {
+ alg_idx = alg_order[i];
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[alg_idx].id,
+ param->mem_type);
+
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ readback[i], sizeof(readback[i])),
+ 0);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload[i]));
+ }
+
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that contains a mix of packed and unpacked words.
+ * payloads are in random offset order. Offsets that are on a packed boundary
+ * are written as a packed block. Offsets that are not on a packed boundary
+ * are written as a single unpacked word.
+ */
+static void bin_patch_mixed_packed_unpacked_random(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 offset_words[] = {
+ 58, 68, 50, 10, 44, 17, 74, 36, 8, 7, 49, 11, 78, 57, 65, 2,
+ 48, 38, 22, 70, 77, 21, 61, 56, 75, 34, 27, 3, 31, 20, 43, 63,
+ 5, 30, 32, 25, 33, 79, 29, 0, 37, 60, 69, 52, 13, 12, 24, 26,
+ 4, 51, 76, 72, 16, 6, 39, 62, 15, 41, 28, 73, 53, 40, 45, 54,
+ 14, 55, 46, 66, 64, 59, 23, 9, 67, 47, 19, 71, 35, 18, 42, 1,
+ };
+ struct {
+ u32 packed[80][3];
+ u32 unpacked[80];
+ } *payload;
+ u32 readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr, payload_offset;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ struct firmware *fw;
+ int i;
+
+ payload = kunit_kmalloc(test, sizeof(*payload), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, payload);
+
+ get_random_bytes(payload->packed, sizeof(payload->packed));
+ get_random_bytes(payload->unpacked, sizeof(payload->unpacked));
+
+ /* Create a patch entry for every offset in offset_words[] */
+ for (i = 0; i < ARRAY_SIZE(offset_words); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[0].id,
+ param->mem_type);
+ /*
+ * If the offset is on a packed boundary use a packed payload else
+ * use an unpacked word
+ */
+ patch_pos_words = alg_base_words + offset_words[i];
+ if ((patch_pos_words % 4) == 0) {
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[0].id,
+ bin_test_mock_algs[0].ver,
+ param->mem_type,
+ payload_offset,
+ payload->packed[i],
+ sizeof(payload->packed[i]));
+ } else {
+ payload_offset = offset_words[i] * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[0].id,
+ bin_test_mock_algs[0].ver,
+ unpacked_mem_type,
+ payload_offset,
+ &payload->unpacked[i],
+ sizeof(payload->unpacked[i]));
+ }
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /*
+ * Readback the packed registers that should have been written.
+ * Place the values into the expected location in readback[] so
+ * that the content of readback[] should match payload->packed[]
+ */
+ for (i = 0; i < ARRAY_SIZE(offset_words); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[0].id,
+ param->mem_type);
+ patch_pos_words = alg_base_words + offset_words[i];
+
+ /* Skip if the offset is not on a packed boundary */
+ if ((patch_pos_words % 4) != 0)
+ continue;
+
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload->packed[i], sizeof(payload->packed[i]));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(payload->packed[i]));
+ }
+
+ /*
+ * Readback the unpacked registers that should have been written.
+ * Place the values into the expected location in readback[] so
+ * that the content of readback[] should match payload->unpacked[]
+ */
+ for (i = 0; i < ARRAY_SIZE(offset_words); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[0].id,
+ unpacked_mem_type);
+
+ patch_pos_words = alg_base_words + offset_words[i];
+
+ /* Skip if the offset is on a packed boundary */
+ if ((patch_pos_words % 4) == 0)
+ continue;
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ ((patch_pos_words) * 4);
+
+ readback[0] = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ &readback[0], sizeof(readback[0])),
+ 0);
+ KUNIT_EXPECT_EQ(test, readback[0], payload->unpacked[i]);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(payload->unpacked[i]));
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Bin file with name and multiple info blocks */
+static void bin_patch_name_and_info(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 reg_val, payload_data;
+ char *infobuf;
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[0].id,
+ WMFW_ADSP2_YM);
+
+ /* Add a name block and info block */
+ cs_dsp_mock_bin_add_name(priv->local->bin_builder, "The name");
+ cs_dsp_mock_bin_add_info(priv->local->bin_builder, "Some info");
+
+ /* Add a big block of info */
+ infobuf = kunit_kzalloc(test, 512, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, infobuf);
+
+ for (; strlcat(infobuf, "Waffle{Blah}\n", 512) < 512; )
+ ;
+
+ cs_dsp_mock_bin_add_info(priv->local->bin_builder, infobuf);
+
+ /* Add a patch */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[0].id,
+ bin_test_mock_algs[0].ver,
+ WMFW_ADSP2_YM,
+ 0,
+ &payload_data, sizeof(payload_data));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ reg_addr += alg_base_words * reg_inc_per_word;
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ &reg_val, sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data);
+}
+
+static int cs_dsp_bin_test_common_init(struct kunit *test, struct cs_dsp *dsp)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_mock_xm_header *xm_hdr;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!priv->local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /* Create an XM header */
+ xm_hdr = cs_dsp_create_mock_xm_header(priv,
+ bin_test_mock_algs,
+ ARRAY_SIZE(bin_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xm_hdr);
+ ret = cs_dsp_mock_xm_header_write_to_regmap(xm_hdr);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ priv->local->bin_builder =
+ cs_dsp_mock_bin_init(priv, 1,
+ cs_dsp_mock_xm_header_get_fw_version_from_regmap(priv));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->local->bin_builder);
+
+ /* We must provide a dummy wmfw to load */
+ priv->local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, -1);
+ priv->local->wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_bin_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_bin_test_common_init(test, dsp);
+}
+
+static int cs_dsp_bin_test_adsp2_32bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_bin_test_common_init(test, dsp);
+}
+
+static int cs_dsp_bin_test_adsp2_16bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_bin_test_common_init(test, dsp);
+}
+
+/* Parameterize on choice of XM or YM with a range of word offsets */
+static const struct bin_test_param x_or_y_and_offset_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 0 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 1 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 2 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 3 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 4 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 23 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 22 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 21 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 20 },
+
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 0 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 1 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 2 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 3 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 4 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 23 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 22 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 21 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 20 },
+};
+
+/* Parameterize on ZM with a range of word offsets */
+static const struct bin_test_param z_and_offset_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 0 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 1 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 2 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 3 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 4 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 23 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 22 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 21 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 20 },
+};
+
+/* Parameterize on choice of packed XM or YM with a range of word offsets */
+static const struct bin_test_param packed_x_or_y_and_offset_param_cases[] = {
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 0 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 4 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 8 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 12 },
+
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 0 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 4 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 8 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 12 },
+};
+
+static void x_or_y_or_z_and_offset_param_desc(const struct bin_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s@%u",
+ cs_dsp_mem_region_name(param->mem_type),
+ param->offset_words);
+}
+
+KUNIT_ARRAY_PARAM(x_or_y_and_offset,
+ x_or_y_and_offset_param_cases,
+ x_or_y_or_z_and_offset_param_desc);
+
+KUNIT_ARRAY_PARAM(z_and_offset,
+ z_and_offset_param_cases,
+ x_or_y_or_z_and_offset_param_desc);
+
+KUNIT_ARRAY_PARAM(packed_x_or_y_and_offset,
+ packed_x_or_y_and_offset_param_cases,
+ x_or_y_or_z_and_offset_param_desc);
+
+/* Parameterize on choice of packed XM or YM */
+static const struct bin_test_param packed_x_or_y_param_cases[] = {
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 0 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 0 },
+};
+
+static void x_or_y_or_z_param_desc(const struct bin_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s", cs_dsp_mem_region_name(param->mem_type));
+}
+
+KUNIT_ARRAY_PARAM(packed_x_or_y, packed_x_or_y_param_cases, x_or_y_or_z_param_desc);
+
+static const struct bin_test_param offset_param_cases[] = {
+ { .offset_words = 0 },
+ { .offset_words = 1 },
+ { .offset_words = 2 },
+ { .offset_words = 3 },
+ { .offset_words = 4 },
+ { .offset_words = 23 },
+ { .offset_words = 22 },
+ { .offset_words = 21 },
+ { .offset_words = 20 },
+};
+
+static void offset_param_desc(const struct bin_test_param *param, char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "@%u", param->offset_words);
+}
+
+KUNIT_ARRAY_PARAM(offset, offset_param_cases, offset_param_desc);
+
+static const struct bin_test_param alg_param_cases[] = {
+ { .alg_idx = 0 },
+ { .alg_idx = 1 },
+ { .alg_idx = 2 },
+ { .alg_idx = 3 },
+};
+
+static void alg_param_desc(const struct bin_test_param *param, char *desc)
+{
+ WARN_ON(param->alg_idx >= ARRAY_SIZE(bin_test_mock_algs));
+
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg[%u] (%#x)",
+ param->alg_idx, bin_test_mock_algs[param->alg_idx].id);
+}
+
+KUNIT_ARRAY_PARAM(alg, alg_param_cases, alg_param_desc);
+
+static const struct bin_test_param x_or_y_and_alg_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_XM, .alg_idx = 0 },
+ { .mem_type = WMFW_ADSP2_XM, .alg_idx = 1 },
+ { .mem_type = WMFW_ADSP2_XM, .alg_idx = 2 },
+ { .mem_type = WMFW_ADSP2_XM, .alg_idx = 3 },
+
+ { .mem_type = WMFW_ADSP2_YM, .alg_idx = 0 },
+ { .mem_type = WMFW_ADSP2_YM, .alg_idx = 1 },
+ { .mem_type = WMFW_ADSP2_YM, .alg_idx = 2 },
+ { .mem_type = WMFW_ADSP2_YM, .alg_idx = 3 },
+};
+
+static void x_or_y_or_z_and_alg_param_desc(const struct bin_test_param *param, char *desc)
+{
+ WARN_ON(param->alg_idx >= ARRAY_SIZE(bin_test_mock_algs));
+
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s alg[%u] (%#x)",
+ cs_dsp_mem_region_name(param->mem_type),
+ param->alg_idx, bin_test_mock_algs[param->alg_idx].id);
+}
+
+KUNIT_ARRAY_PARAM(x_or_y_and_alg, x_or_y_and_alg_param_cases, x_or_y_or_z_and_alg_param_desc);
+
+static const struct bin_test_param z_and_alg_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 0 },
+ { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 1 },
+ { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 2 },
+ { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 3 },
+};
+
+KUNIT_ARRAY_PARAM(z_and_alg, z_and_alg_param_cases, x_or_y_or_z_and_alg_param_desc);
+
+static const struct bin_test_param packed_x_or_y_and_alg_param_cases[] = {
+ { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 0 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 1 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 2 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 3 },
+
+ { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 0 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 1 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 2 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 3 },
+};
+
+KUNIT_ARRAY_PARAM(packed_x_or_y_and_alg, packed_x_or_y_and_alg_param_cases,
+ x_or_y_or_z_and_alg_param_desc);
+
+static struct kunit_case cs_dsp_bin_test_cases_halo[] = {
+ /* Unpacked memory */
+ KUNIT_CASE_PARAM(bin_patch_one_word, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_multiword, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_unordered, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_sparse_unordered, x_or_y_and_alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs_unordered, x_or_y_and_offset_gen_params),
+
+ /* Packed memory tests */
+ KUNIT_CASE_PARAM(bin_patch_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_1_single_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_2_single_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_3_single_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_2_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_3_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_single_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_2_single_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_2_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_3_single_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_3_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_onepacked,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_onepacked_unordered,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_mems, offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_mems, alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_onepacked_sparse_unordered,
+ packed_x_or_y_and_alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_algs,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_algs_unordered,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_mixed_packed_unpacked_random,
+ packed_x_or_y_gen_params),
+
+ KUNIT_CASE(bin_patch_name_and_info),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_bin_test_cases_adsp2[] = {
+ /* XM and YM */
+ KUNIT_CASE_PARAM(bin_patch_one_word, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_multiword, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_unordered, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_sparse_unordered, x_or_y_and_alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs_unordered, x_or_y_and_offset_gen_params),
+
+ /* ZM */
+ KUNIT_CASE_PARAM(bin_patch_one_word, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_multiword, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_unordered, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_sparse_unordered, z_and_alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs_unordered, z_and_offset_gen_params),
+
+ /* Other */
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, alg_gen_params),
+
+ KUNIT_CASE(bin_patch_name_and_info),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_bin_test_halo = {
+ .name = "cs_dsp_bin_halo",
+ .init = cs_dsp_bin_test_halo_init,
+ .test_cases = cs_dsp_bin_test_cases_halo,
+};
+
+static struct kunit_suite cs_dsp_bin_test_adsp2_32bit = {
+ .name = "cs_dsp_bin_adsp2_32bit",
+ .init = cs_dsp_bin_test_adsp2_32bit_init,
+ .test_cases = cs_dsp_bin_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_bin_test_adsp2_16bit = {
+ .name = "cs_dsp_bin_adsp2_16bit",
+ .init = cs_dsp_bin_test_adsp2_16bit_init,
+ .test_cases = cs_dsp_bin_test_cases_adsp2,
+};
+
+kunit_test_suites(&cs_dsp_bin_test_halo,
+ &cs_dsp_bin_test_adsp2_32bit,
+ &cs_dsp_bin_test_adsp2_16bit);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
new file mode 100644
index 000000000000..5dcf62f19faf
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
@@ -0,0 +1,600 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+//
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_bin_builder *bin_builder;
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ struct firmware *wmfw;
+ int wmfw_version;
+};
+
+struct cs_dsp_bin_test_param {
+ int block_type;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_bin_err_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+};
+
+/* Load a bin containing unknown blocks. They should be skipped. */
+static void bin_load_with_unknown_blocks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ u8 random_data[8];
+ const unsigned int payload_size_bytes = 64;
+
+ payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Add some unknown blocks at the start of the bin */
+ get_random_bytes(random_data, sizeof(random_data));
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ 0xf5, 0,
+ random_data, sizeof(random_data));
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ 0xf500, 0,
+ random_data, sizeof(random_data));
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ 0xc300, 0,
+ random_data, sizeof(random_data));
+
+ /* Add a single payload to be written to DSP memory */
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ WMFW_ADSP2_YM, 0,
+ payload_data, payload_size_bytes);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ /* Check that the payload was written to memory */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+}
+
+/* Load a bin that doesn't have a valid magic marker. */
+static void bin_err_wrong_magic(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+
+ memcpy((void *)bin->data, "WMFW", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memcpy((void *)bin->data, "xMDR", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memcpy((void *)bin->data, "WxDR", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memcpy((void *)bin->data, "WMxR", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memcpy((void *)bin->data, "WMDx", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memset((void *)bin->data, 0, 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+}
+
+/* Load a bin that is too short for a valid header. */
+static void bin_err_too_short_for_header(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ do {
+ bin->size--;
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ } while (bin->size > 0);
+}
+
+/* Header length field isn't a valid header length. */
+static void bin_err_bad_header_length(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ struct wmfw_coeff_hdr *header;
+ unsigned int real_len, len;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ header = (struct wmfw_coeff_hdr *)bin->data;
+ real_len = le32_to_cpu(header->len);
+
+ for (len = 0; len < real_len; len++) {
+ header->len = cpu_to_le32(len);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ }
+
+ for (len = real_len + 1; len < real_len + 7; len++) {
+ header->len = cpu_to_le32(len);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ }
+
+ header->len = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->len = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->len = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+}
+
+/* Wrong core type in header. */
+static void bin_err_bad_core_type(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ struct wmfw_coeff_hdr *header;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ header = (struct wmfw_coeff_hdr *)bin->data;
+
+ header->core_ver = cpu_to_le32(0);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->core_ver = cpu_to_le32(1);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->core_ver = cpu_to_le32(priv->dsp->type + 1);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->core_ver = cpu_to_le32(0xff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+}
+
+/* File too short to contain a full block header */
+static void bin_too_short_for_block_header(struct kunit *test)
+{
+ const struct cs_dsp_bin_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ unsigned int header_length;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ header_length = bin->size;
+ kunit_kfree(test, bin);
+
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ param->block_type, 0,
+ NULL, 0);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ KUNIT_ASSERT_GT(test, bin->size, header_length);
+
+ for (bin->size--; bin->size > header_length; bin->size--) {
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ }
+}
+
+/* File too short to contain the block payload */
+static void bin_too_short_for_block_payload(struct kunit *test)
+{
+ const struct cs_dsp_bin_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ static const u8 payload[256] = { };
+ int i;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ param->block_type, 0,
+ payload, sizeof(payload));
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ for (i = 0; i < sizeof(payload); i++) {
+ bin->size--;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ }
+}
+
+/* Block payload length is a garbage value */
+static void bin_block_payload_len_garbage(struct kunit *test)
+{
+ const struct cs_dsp_bin_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ struct wmfw_coeff_hdr *header;
+ struct wmfw_coeff_item *block;
+ u32 payload = 0;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ param->block_type, 0,
+ &payload, sizeof(payload));
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ header = (struct wmfw_coeff_hdr *)bin->data;
+ block = (struct wmfw_coeff_item *)&bin->data[le32_to_cpu(header->len)];
+
+ /* Sanity check that we're looking at the correct part of the bin */
+ KUNIT_ASSERT_EQ(test, le16_to_cpu(block->type), param->block_type);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(block->len), sizeof(payload));
+
+ block->len = cpu_to_le32(0x8000);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ block->len = cpu_to_le32(0xffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ block->len = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ block->len = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ block->len = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+}
+
+static void cs_dsp_bin_err_test_exit(struct kunit *test)
+{
+ /*
+ * Testing error conditions can produce a lot of log output
+ * from cs_dsp error messages, so rate limit the test cases.
+ */
+ usleep_range(200, 500);
+}
+
+static int cs_dsp_bin_err_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data payload.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_bin_err_test_mock_algs,
+ ARRAY_SIZE(cs_dsp_bin_err_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, priv->local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Add dummy XM header payload to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ local->wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ local->bin_builder =
+ cs_dsp_mock_bin_init(priv, 1,
+ cs_dsp_mock_xm_header_get_fw_version_from_regmap(priv));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->bin_builder);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_bin_err_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_bin_err_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_bin_err_test_adsp2_32bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_bin_err_test_common_init(test, dsp, 2);
+}
+
+static int cs_dsp_bin_err_test_adsp2_16bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_bin_err_test_common_init(test, dsp, 1);
+}
+
+static struct kunit_case cs_dsp_bin_err_test_cases_halo[] = {
+
+ { } /* terminator */
+};
+
+static void cs_dsp_bin_err_block_types_desc(const struct cs_dsp_bin_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "block_type:%#x", param->block_type);
+}
+
+/* Some block types to test against, including illegal types */
+static const struct cs_dsp_bin_test_param bin_test_block_types_cases[] = {
+ { .block_type = WMFW_INFO_TEXT << 8 },
+ { .block_type = WMFW_METADATA << 8 },
+ { .block_type = WMFW_ADSP2_PM },
+ { .block_type = WMFW_ADSP2_XM },
+ { .block_type = 0x33 },
+ { .block_type = 0xf500 },
+ { .block_type = 0xc000 },
+};
+
+KUNIT_ARRAY_PARAM(bin_test_block_types,
+ bin_test_block_types_cases,
+ cs_dsp_bin_err_block_types_desc);
+
+static struct kunit_case cs_dsp_bin_err_test_cases_adsp2[] = {
+ KUNIT_CASE(bin_load_with_unknown_blocks),
+ KUNIT_CASE(bin_err_wrong_magic),
+ KUNIT_CASE(bin_err_too_short_for_header),
+ KUNIT_CASE(bin_err_bad_header_length),
+ KUNIT_CASE(bin_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(bin_too_short_for_block_header, bin_test_block_types_gen_params),
+ KUNIT_CASE_PARAM(bin_too_short_for_block_payload, bin_test_block_types_gen_params),
+ KUNIT_CASE_PARAM(bin_block_payload_len_garbage, bin_test_block_types_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_bin_err_test_halo = {
+ .name = "cs_dsp_bin_err_halo",
+ .init = cs_dsp_bin_err_test_halo_init,
+ .exit = cs_dsp_bin_err_test_exit,
+ .test_cases = cs_dsp_bin_err_test_cases_halo,
+};
+
+static struct kunit_suite cs_dsp_bin_err_test_adsp2_32bit = {
+ .name = "cs_dsp_bin_err_adsp2_32bit",
+ .init = cs_dsp_bin_err_test_adsp2_32bit_init,
+ .exit = cs_dsp_bin_err_test_exit,
+ .test_cases = cs_dsp_bin_err_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_bin_err_test_adsp2_16bit = {
+ .name = "cs_dsp_bin_err_adsp2_16bit",
+ .init = cs_dsp_bin_err_test_adsp2_16bit_init,
+ .exit = cs_dsp_bin_err_test_exit,
+ .test_cases = cs_dsp_bin_err_test_cases_adsp2,
+};
+
+kunit_test_suites(&cs_dsp_bin_err_test_halo,
+ &cs_dsp_bin_err_test_adsp2_32bit,
+ &cs_dsp_bin_err_test_adsp2_16bit);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c b/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c
new file mode 100644
index 000000000000..8a9b66a3b7d3
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c
@@ -0,0 +1,688 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+//
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+#define ADSP2_LOCK_REGION_CTRL 0x7A
+#define ADSP2_WDT_TIMEOUT_STS_MASK 0x2000
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *)
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *)
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+
+ int num_control_add;
+ int num_control_remove;
+ int num_pre_run;
+ int num_post_run;
+ int num_pre_stop;
+ int num_post_stop;
+ int num_watchdog_expired;
+
+ struct cs_dsp_coeff_ctl *passed_ctl[16];
+ struct cs_dsp *passed_dsp;
+};
+
+struct cs_dsp_callbacks_test_param {
+ const struct cs_dsp_client_ops *ops;
+ const char *case_name;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_callbacks_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_VOLATILE,
+ .length_bytes = 4,
+};
+
+static int cs_dsp_test_control_add_callback(struct cs_dsp_coeff_ctl *ctl)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_ctl[local->num_control_add] = ctl;
+ local->num_control_add++;
+
+ return 0;
+}
+
+static void cs_dsp_test_control_remove_callback(struct cs_dsp_coeff_ctl *ctl)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_ctl[local->num_control_remove] = ctl;
+ local->num_control_remove++;
+}
+
+static int cs_dsp_test_pre_run_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_pre_run++;
+
+ return 0;
+}
+
+static int cs_dsp_test_post_run_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_post_run++;
+
+ return 0;
+}
+
+static void cs_dsp_test_pre_stop_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_pre_stop++;
+}
+
+static void cs_dsp_test_post_stop_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_post_stop++;
+}
+
+static void cs_dsp_test_watchdog_expired_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_watchdog_expired++;
+}
+
+static const struct cs_dsp_client_ops cs_dsp_callback_test_client_ops = {
+ .control_add = cs_dsp_test_control_add_callback,
+ .control_remove = cs_dsp_test_control_remove_callback,
+ .pre_run = cs_dsp_test_pre_run_callback,
+ .post_run = cs_dsp_test_post_run_callback,
+ .pre_stop = cs_dsp_test_pre_stop_callback,
+ .post_stop = cs_dsp_test_post_stop_callback,
+ .watchdog_expired = cs_dsp_test_watchdog_expired_callback,
+};
+
+static const struct cs_dsp_client_ops cs_dsp_callback_test_empty_client_ops = {
+ /* No entries */
+};
+
+static void cs_dsp_test_run_stop_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 1);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 1);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 0);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 0);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+ local->passed_dsp = NULL;
+
+ cs_dsp_stop(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 1);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 1);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 1);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 1);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+ local->passed_dsp = NULL;
+
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 2);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 2);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 1);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 1);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+ local->passed_dsp = NULL;
+
+ cs_dsp_stop(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 2);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 2);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 2);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 2);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+ local->passed_dsp = NULL;
+}
+
+static void cs_dsp_test_ctl_v1_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ int i;
+
+ /* Add a control for each memory */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_callbacks_test_mock_algs[0].id,
+ "dummyalg", NULL);
+ def.shortname = "zm";
+ def.mem_type = WMFW_ADSP2_ZM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.shortname = "ym";
+ def.mem_type = WMFW_ADSP2_YM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.shortname = "xm";
+ def.mem_type = WMFW_ADSP2_XM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /* There should have been an add callback for each control */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 3);
+ KUNIT_EXPECT_EQ(test, local->num_control_add, 3);
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, 0);
+
+ i = 0;
+ list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list)
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl);
+
+ /*
+ * Call cs_dsp_remove() and there should be a remove callback
+ * for each control
+ */
+ memset(local->passed_ctl, 0, sizeof(local->passed_ctl));
+ cs_dsp_remove(priv->dsp);
+
+ /* Prevent double cleanup */
+ kunit_remove_action(priv->test, _cs_dsp_remove_wrapper, priv->dsp);
+
+ KUNIT_EXPECT_EQ(test, local->num_control_add, 3);
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, 3);
+
+ i = 0;
+ list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list)
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl);
+}
+
+static void cs_dsp_test_ctl_v2_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char name[2] = { };
+ int i;
+
+ /* Add some controls */
+ def.shortname = name;
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_callbacks_test_mock_algs[0].id,
+ "dummyalg", NULL);
+ for (i = 0; i < ARRAY_SIZE(local->passed_ctl); ++i) {
+ name[0] = 'A' + i;
+ def.offset_dsp_words = i;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /* There should have been an add callback for each control */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(local->passed_ctl));
+ KUNIT_EXPECT_EQ(test, local->num_control_add, ARRAY_SIZE(local->passed_ctl));
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, 0);
+
+ i = 0;
+ list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list)
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl);
+
+ /*
+ * Call cs_dsp_remove() and there should be a remove callback
+ * for each control
+ */
+ memset(local->passed_ctl, 0, sizeof(local->passed_ctl));
+ cs_dsp_remove(priv->dsp);
+
+ /* Prevent double cleanup */
+ kunit_remove_action(priv->test, _cs_dsp_remove_wrapper, priv->dsp);
+
+ KUNIT_EXPECT_EQ(test, local->num_control_add, ARRAY_SIZE(local->passed_ctl));
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, ARRAY_SIZE(local->passed_ctl));
+
+ i = 0;
+ list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list)
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl);
+}
+
+static void cs_dsp_test_no_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct firmware *wmfw;
+
+ /* Add a controls */
+ def.shortname = "A";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_callbacks_test_mock_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Run a sequence of ops that would invoke callbacks */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+ cs_dsp_stop(priv->dsp);
+ cs_dsp_remove(priv->dsp);
+
+ /* Prevent double cleanup */
+ kunit_remove_action(priv->test, _cs_dsp_remove_wrapper, priv->dsp);
+
+ /* Something went very wrong if any of our callbacks were called */
+ KUNIT_EXPECT_EQ(test, local->num_control_add, 0);
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, 0);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 0);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 0);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 0);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 0);
+}
+
+static void cs_dsp_test_adsp2v2_watchdog_callback(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+
+ /* Set the watchdog timeout bit */
+ regmap_write(priv->dsp->regmap, priv->dsp->base + ADSP2_LOCK_REGION_CTRL,
+ ADSP2_WDT_TIMEOUT_STS_MASK);
+
+ /* Notify an interrupt and the watchdog callback should be called */
+ cs_dsp_adsp2_bus_error(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 1);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+}
+
+static void cs_dsp_test_adsp2v2_watchdog_no_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+
+ /* Set the watchdog timeout bit */
+ regmap_write(priv->dsp->regmap, priv->dsp->base + ADSP2_LOCK_REGION_CTRL,
+ ADSP2_WDT_TIMEOUT_STS_MASK);
+
+ /* Notify an interrupt, which will look for a watchdog callback */
+ cs_dsp_adsp2_bus_error(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 0);
+}
+
+static void cs_dsp_test_halo_watchdog_callback(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+
+ /* Notify an interrupt and the watchdog callback should be called */
+ cs_dsp_halo_wdt_expire(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 1);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+}
+
+static void cs_dsp_test_halo_watchdog_no_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+
+ /* Notify an interrupt, which will look for a watchdog callback */
+ cs_dsp_halo_wdt_expire(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 0);
+}
+
+static int cs_dsp_callbacks_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ const struct cs_dsp_callbacks_test_param *param = test->param_value;
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ struct cs_dsp_mock_xm_header *xm_header;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm,
+ * so create a dummy one and pre-populate XM so the wmfw doesn't
+ * have to contain an XM blob.
+ */
+ xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_callbacks_test_mock_algs,
+ ARRAY_SIZE(cs_dsp_callbacks_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xm_header);
+ cs_dsp_mock_xm_header_write_to_regmap(xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Add dummy XM header payload to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ xm_header->blob_data,
+ xm_header->blob_size_bytes);
+
+ /* Init cs_dsp */
+ dsp->client_ops = param->ops;
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_callbacks_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_callbacks_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_callbacks_test_adsp2_32bit_init(struct kunit *test, int rev)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = rev;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_callbacks_test_common_init(test, dsp, 2);
+}
+
+static int cs_dsp_callbacks_test_adsp2v2_32bit_init(struct kunit *test)
+{
+ return cs_dsp_callbacks_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_callbacks_test_adsp2v1_32bit_init(struct kunit *test)
+{
+ return cs_dsp_callbacks_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_callbacks_test_adsp2_16bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_callbacks_test_common_init(test, dsp, 1);
+}
+
+static void cs_dsp_callbacks_param_desc(const struct cs_dsp_callbacks_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s", param->case_name);
+}
+
+/* Parameterize on different client callback ops tables */
+static const struct cs_dsp_callbacks_test_param cs_dsp_callbacks_ops_cases[] = {
+ { .ops = &cs_dsp_callback_test_client_ops, .case_name = "all ops" },
+};
+
+KUNIT_ARRAY_PARAM(cs_dsp_callbacks_ops,
+ cs_dsp_callbacks_ops_cases,
+ cs_dsp_callbacks_param_desc);
+
+static const struct cs_dsp_callbacks_test_param cs_dsp_no_callbacks_cases[] = {
+ { .ops = &cs_dsp_callback_test_empty_client_ops, .case_name = "empty ops" },
+};
+
+KUNIT_ARRAY_PARAM(cs_dsp_no_callbacks,
+ cs_dsp_no_callbacks_cases,
+ cs_dsp_callbacks_param_desc);
+
+static struct kunit_case cs_dsp_callbacks_adsp2_wmfwv1_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_run_stop_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_ctl_v1_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_callbacks_adsp2_wmfwv2_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_run_stop_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_ctl_v2_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_callbacks_halo_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_run_stop_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_ctl_v2_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_watchdog_adsp2v2_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_adsp2v2_watchdog_callback, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_adsp2v2_watchdog_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_watchdog_halo_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_halo_watchdog_callback, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_halo_watchdog_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_callbacks_test_halo = {
+ .name = "cs_dsp_callbacks_halo",
+ .init = cs_dsp_callbacks_test_halo_init,
+ .test_cases = cs_dsp_callbacks_halo_test_cases,
+};
+
+static struct kunit_suite cs_dsp_callbacks_test_adsp2v2_32bit = {
+ .name = "cs_dsp_callbacks_adsp2v2_32bit_wmfwv2",
+ .init = cs_dsp_callbacks_test_adsp2v2_32bit_init,
+ .test_cases = cs_dsp_callbacks_adsp2_wmfwv2_test_cases,
+};
+
+static struct kunit_suite cs_dsp_callbacks_test_adsp2v1_32bit = {
+ .name = "cs_dsp_callbacks_adsp2v1_32bit_wmfwv2",
+ .init = cs_dsp_callbacks_test_adsp2v1_32bit_init,
+ .test_cases = cs_dsp_callbacks_adsp2_wmfwv2_test_cases,
+};
+
+static struct kunit_suite cs_dsp_callbacks_test_adsp2_16bit = {
+ .name = "cs_dsp_callbacks_adsp2_16bit_wmfwv1",
+ .init = cs_dsp_callbacks_test_adsp2_16bit_init,
+ .test_cases = cs_dsp_callbacks_adsp2_wmfwv1_test_cases,
+};
+
+static struct kunit_suite cs_dsp_watchdog_test_adsp2v2_32bit = {
+ .name = "cs_dsp_watchdog_adsp2v2_32bit",
+ .init = cs_dsp_callbacks_test_adsp2v2_32bit_init,
+ .test_cases = cs_dsp_watchdog_adsp2v2_test_cases,
+};
+
+static struct kunit_suite cs_dsp_watchdog_test_halo_32bit = {
+ .name = "cs_dsp_watchdog_halo",
+ .init = cs_dsp_callbacks_test_halo_init,
+ .test_cases = cs_dsp_watchdog_halo_test_cases,
+};
+
+kunit_test_suites(&cs_dsp_callbacks_test_halo,
+ &cs_dsp_callbacks_test_adsp2v2_32bit,
+ &cs_dsp_callbacks_test_adsp2v1_32bit,
+ &cs_dsp_callbacks_test_adsp2_16bit,
+ &cs_dsp_watchdog_test_adsp2v2_32bit,
+ &cs_dsp_watchdog_test_halo_32bit);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c b/drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c
new file mode 100644
index 000000000000..83386cc978e3
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c
@@ -0,0 +1,3282 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/list.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_stop_wrapper, cs_dsp_stop, struct cs_dsp *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_ctl_cache_test_param {
+ int mem_type;
+ int alg_id;
+ unsigned int offs_words;
+ unsigned int len_bytes;
+ u16 ctl_type;
+ u16 flags;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_ctl_cache_test_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_base_words = 60,
+ .xm_size_words = 1000,
+ .ym_base_words = 0,
+ .ym_size_words = 1000,
+ .zm_base_words = 0,
+ .zm_size_words = 1000,
+ },
+ {
+ .id = 0xb,
+ .ver = 0x100001,
+ .xm_base_words = 1060,
+ .xm_size_words = 1000,
+ .ym_base_words = 1000,
+ .ym_size_words = 1000,
+ .zm_base_words = 1000,
+ .zm_size_words = 1000,
+ },
+ {
+ .id = 0x9f1234,
+ .ver = 0x100500,
+ .xm_base_words = 2060,
+ .xm_size_words = 32,
+ .ym_base_words = 2000,
+ .ym_size_words = 32,
+ .zm_base_words = 2000,
+ .zm_size_words = 32,
+ },
+ {
+ .id = 0xff00ff,
+ .ver = 0x300113,
+ .xm_base_words = 2100,
+ .xm_size_words = 32,
+ .ym_base_words = 2032,
+ .ym_size_words = 32,
+ .zm_base_words = 2032,
+ .zm_size_words = 32,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ .length_bytes = 4,
+};
+
+static const char * const cs_dsp_ctl_cache_test_fw_names[] = {
+ "misc", "mbc/vss", "haps",
+};
+
+static int _find_alg_entry(struct kunit *test, unsigned int alg_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_cache_test_algs); ++i) {
+ if (cs_dsp_ctl_cache_test_algs[i].id == alg_id)
+ break;
+ }
+
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(cs_dsp_ctl_cache_test_algs));
+
+ return i;
+}
+
+static int _get_alg_mem_base_words(struct kunit *test, int alg_index, int mem_type)
+{
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ return cs_dsp_ctl_cache_test_algs[alg_index].xm_base_words;
+ case WMFW_ADSP2_YM:
+ return cs_dsp_ctl_cache_test_algs[alg_index].ym_base_words;
+ case WMFW_ADSP2_ZM:
+ return cs_dsp_ctl_cache_test_algs[alg_index].zm_base_words;
+ default:
+ KUNIT_FAIL(test, "Bug in test: illegal memory type %d\n", mem_type);
+ return 0;
+ }
+}
+
+static struct cs_dsp_mock_wmfw_builder *_create_dummy_wmfw(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_wmfw_builder *builder;
+
+ builder = cs_dsp_mock_wmfw_init(priv, local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder);
+
+ /* Init an XM header */
+ cs_dsp_mock_wmfw_add_data_block(builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ return builder;
+}
+
+/*
+ * Memory allocated for control cache must be large enough.
+ * This creates multiple controls of different sizes so only works on
+ * wmfw V2 and later.
+ */
+static void cs_dsp_ctl_v2_cache_alloc(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ unsigned int reg, alg_base_words, alg_size_bytes;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char ctl_name[4];
+ u32 *reg_vals;
+ int num_ctls;
+
+ /* Create some DSP data to initialize the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_YM);
+ alg_size_bytes = cs_dsp_ctl_cache_test_algs[0].ym_size_words *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ reg_vals = kunit_kzalloc(test, alg_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ reg += alg_base_words * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, alg_size_bytes);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[0].id,
+ "dummyalg", NULL);
+
+ /* Create controls of different sizes */
+ def.mem_type = WMFW_ADSP2_YM;
+ def.shortname = ctl_name;
+ num_ctls = 0;
+ for (def.length_bytes = 4; def.length_bytes <= 64; def.length_bytes += 4) {
+ snprintf(ctl_name, ARRAY_SIZE(ctl_name), "%x", def.length_bytes);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ num_ctls++;
+ def.offset_dsp_words += def.length_bytes / sizeof(u32);
+ }
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&dsp->ctl_list), num_ctls);
+
+ /* Check that the block allocated for the cache is large enough */
+ list_for_each_entry(ctl, &dsp->ctl_list, list)
+ KUNIT_EXPECT_GE(test, ksize(ctl->cache), ctl->len);
+}
+
+/*
+ * Content of registers backing a control should be read into the
+ * control cache when the firmware is downloaded.
+ */
+static void cs_dsp_ctl_cache_init(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * For a non-volatile write-only control the cache should be zero-filled
+ * when the firmware is downloaded.
+ */
+static void cs_dsp_ctl_cache_init_write_only(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *readback, *zeros;
+
+ zeros = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, zeros);
+
+ readback = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create a non-volatile write-only control */
+ def.flags = param->flags & ~WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /*
+ * The control cache should have been zero-filled so should be
+ * readable through the control.
+ */
+ get_random_bytes(readback, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, zeros, param->len_bytes);
+}
+
+/*
+ * Multiple different firmware with identical controls.
+ * This is legal because different firmwares could contain the same
+ * algorithm.
+ * The control cache should be initialized only with the data from
+ * the firmware containing it.
+ */
+static void cs_dsp_ctl_cache_init_multiple_fw_same_controls(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder[3];
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(builder));
+ static_assert(ARRAY_SIZE(reg_vals) == ARRAY_SIZE(builder));
+ static_assert(ARRAY_SIZE(cs_dsp_ctl_cache_test_fw_names) >= ARRAY_SIZE(builder));
+
+ /* Create an identical control in each firmware but with different alg id */
+ for (i = 0; i < ARRAY_SIZE(builder); i++) {
+ builder[i] = _create_dummy_wmfw(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder[i]);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(builder[i],
+ cs_dsp_ctl_cache_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder[i], &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /*
+ * For each firmware create random content in the register backing
+ * the control. Then download, start, stop and power-down.
+ */
+ for (i = 0; i < ARRAY_SIZE(builder); i++) {
+ alg_base_words = _get_alg_mem_base_words(test, 0, def.mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[i], def.length_bytes);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder[i]);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(dsp, wmfw,
+ cs_dsp_ctl_cache_test_fw_names[i],
+ NULL, NULL,
+ cs_dsp_ctl_cache_test_fw_names[i]),
+ 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+ }
+
+ /* There should now be 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (strcmp(walkctl->fw_name, cs_dsp_ctl_cache_test_fw_names[0]) == 0)
+ ctl[0] = walkctl;
+ else if (strcmp(walkctl->fw_name, cs_dsp_ctl_cache_test_fw_names[1]) == 0)
+ ctl[1] = walkctl;
+ else if (strcmp(walkctl->fw_name, cs_dsp_ctl_cache_test_fw_names[2]) == 0)
+ ctl[2] = walkctl;
+ }
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+}
+
+/*
+ * Multiple different firmware with controls identical except for alg id.
+ * This is legal because the controls are qualified by algorithm id.
+ * The control cache should be initialized only with the data from
+ * the firmware containing it.
+ */
+static void cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder[3];
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(builder));
+ static_assert(ARRAY_SIZE(reg_vals) == ARRAY_SIZE(builder));
+ static_assert(ARRAY_SIZE(cs_dsp_ctl_cache_test_fw_names) >= ARRAY_SIZE(builder));
+
+ /* Create an identical control in each firmware but with different alg id */
+ for (i = 0; i < ARRAY_SIZE(builder); i++) {
+ builder[i] = _create_dummy_wmfw(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder[i]);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(builder[i],
+ cs_dsp_ctl_cache_test_algs[i].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder[i], &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /*
+ * For each firmware create random content in the register backing
+ * the control. Then download, start, stop and power-down.
+ */
+ for (i = 0; i < ARRAY_SIZE(builder); i++) {
+ alg_base_words = _get_alg_mem_base_words(test, i, def.mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[i], def.length_bytes);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder[i]);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(dsp, wmfw,
+ cs_dsp_ctl_cache_test_fw_names[i],
+ NULL, NULL,
+ cs_dsp_ctl_cache_test_fw_names[i]),
+ 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+ }
+
+ /* There should now be 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (cs_dsp_ctl_cache_test_algs[0].id == walkctl->alg_region.alg)
+ ctl[0] = walkctl;
+ else if (cs_dsp_ctl_cache_test_algs[1].id == walkctl->alg_region.alg)
+ ctl[1] = walkctl;
+ else if (cs_dsp_ctl_cache_test_algs[2].id == walkctl->alg_region.alg)
+ ctl[2] = walkctl;
+ }
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+}
+
+/*
+ * Firmware with controls at the same position in different memories.
+ * The control cache should be initialized with content from the
+ * correct memory region.
+ */
+static void cs_dsp_ctl_cache_init_multiple_mems(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(reg_vals));
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[0].id,
+ "dummyalg", NULL);
+
+ /* Create controls identical except for memory region */
+ def.mem_type = WMFW_ADSP2_YM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.mem_type = WMFW_ADSP2_XM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ def.mem_type = WMFW_ADSP2_ZM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Create random content in the registers backing each control */
+ alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_YM);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[0], def.length_bytes);
+
+ alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_XM);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[1], def.length_bytes);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_ZM);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_ZM);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[2], def.length_bytes);
+ }
+
+ /* Download, run, stop and power-down the firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* There should now be 2 or 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list),
+ cs_dsp_mock_has_zm(priv) ? 3 : 2);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (walkctl->alg_region.type == WMFW_ADSP2_YM)
+ ctl[0] = walkctl;
+ if (walkctl->alg_region.type == WMFW_ADSP2_XM)
+ ctl[1] = walkctl;
+ if (walkctl->alg_region.type == WMFW_ADSP2_ZM)
+ ctl[2] = walkctl;
+ }
+
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+ }
+}
+
+/*
+ * Firmware with controls at the same position in different algorithms
+ * The control cache should be initialized with content from the
+ * memory of the algorithm it points to.
+ */
+static void cs_dsp_ctl_cache_init_multiple_algs(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(reg_vals));
+ static_assert(ARRAY_SIZE(reg_vals) <= ARRAY_SIZE(cs_dsp_ctl_cache_test_algs));
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create controls identical except for algorithm */
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[i].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ }
+
+ /* Create random content in the registers backing each control */
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ alg_base_words = _get_alg_mem_base_words(test, i, def.mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[i], def.length_bytes);
+ }
+
+ /* Download, run, stop and power-down the firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* There should now be 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (walkctl->alg_region.alg == cs_dsp_ctl_cache_test_algs[0].id)
+ ctl[0] = walkctl;
+ if (walkctl->alg_region.alg == cs_dsp_ctl_cache_test_algs[1].id)
+ ctl[1] = walkctl;
+ if (walkctl->alg_region.alg == cs_dsp_ctl_cache_test_algs[2].id)
+ ctl[2] = walkctl;
+ }
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+}
+
+/*
+ * Firmware with controls in the same algorithm and memory but at
+ * different offsets.
+ * The control cache should be initialized with content from the
+ * correct offset.
+ * Only for wmfw format V2 and later. V1 only supports one control per
+ * memory per algorithm.
+ */
+static void cs_dsp_ctl_cache_init_multiple_offsets(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ unsigned int reg, alg_base_words, alg_base_reg;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(reg_vals));
+ static_assert(ARRAY_SIZE(reg_vals) <= ARRAY_SIZE(cs_dsp_ctl_cache_test_algs));
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[0].id,
+ "dummyalg", NULL);
+
+ /* Create controls identical except for offset */
+ def.length_bytes = 8;
+ def.offset_dsp_words = 0;
+ def.shortname = "CtlA";
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.offset_dsp_words = 5;
+ def.shortname = "CtlB";
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.offset_dsp_words = 8;
+ def.shortname = "CtlC";
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Create random content in the registers backing each control */
+ alg_base_words = _get_alg_mem_base_words(test, 0, def.mem_type);
+ alg_base_reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type);
+ alg_base_reg += alg_base_words * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ reg = alg_base_reg;
+ regmap_raw_write(dsp->regmap, reg, reg_vals[0], def.length_bytes);
+ reg = alg_base_reg + (5 * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv));
+ regmap_raw_write(dsp->regmap, reg, reg_vals[1], def.length_bytes);
+ reg = alg_base_reg + (8 * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv));
+ regmap_raw_write(dsp->regmap, reg, reg_vals[2], def.length_bytes);
+
+ /* Download, run, stop and power-down the firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* There should now be 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (walkctl->offset == 0)
+ ctl[0] = walkctl;
+ if (walkctl->offset == 5)
+ ctl[1] = walkctl;
+ if (walkctl->offset == 8)
+ ctl[2] = walkctl;
+ }
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+}
+
+/*
+ * Read from a cached control before the firmware is started.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control after the firmware has been stopped.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_stopped(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control after the DSP has been powered-up and
+ * then powered-down without running.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP then power-down */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control after the firmware has been run and
+ * stopped, then the DSP has been powered-down.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_stopped_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware then power-down */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control when a different firmware is currently
+ * loaded into the DSP.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_not_current_loaded_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control when a different firmware is currently
+ * running.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_not_current_running_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP then power-down */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Power-up with a different firmware and run it */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control with non-zero flags while the firmware is
+ * running.
+ * Should return the data in the cache, not from the registers.
+ */
+static void cs_dsp_ctl_cache_read_running(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_reg_vals, *new_reg_vals, *readback;
+
+ init_reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_reg_vals);
+
+ new_reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create data in the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(init_reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, init_reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start the firmware running */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /*
+ * Change the values in the registers backing the control then drop
+ * them from the regmap cache. This allows checking that the control
+ * read is returning values from the control cache and not accessing
+ * the registers.
+ */
+ KUNIT_ASSERT_EQ(test,
+ regmap_raw_write(dsp->regmap, reg, new_reg_vals, param->len_bytes),
+ 0);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Control should readback the origin data from its cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, init_reg_vals, param->len_bytes);
+
+ /* Stop and power-down the DSP */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Control should readback from the cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, init_reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control with flags == 0 while the firmware is
+ * running.
+ * Should behave as volatile and read from the registers.
+ * (This is for backwards compatibility with old firmware versions)
+ */
+static void cs_dsp_ctl_cache_read_running_zero_flags(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_reg_vals, *new_reg_vals, *readback;
+
+ init_reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_reg_vals);
+
+ new_reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = 0;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start the firmware running */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Change the values in the registers backing the control */
+ get_random_bytes(new_reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, new_reg_vals, param->len_bytes);
+
+ /* Control should readback the new data from the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, new_reg_vals, param->len_bytes);
+
+ /* Stop and power-down the DSP */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Change the values in the registers backing the control */
+ regmap_raw_write(dsp->regmap, reg, init_reg_vals, param->len_bytes);
+
+ /* Control should readback from the cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, new_reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control while the firmware is running.
+ * This should be a writethrough operation, writing to the cache and
+ * the registers.
+ */
+static void cs_dsp_ctl_cache_writethrough(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ memset(reg_vals, 0, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Write new data to the control, it should be written to the registers */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write unchanged data to a cached control while the firmware is running.
+ * The control write should return 0 to indicate that the content
+ * didn't change.
+ */
+static void cs_dsp_ctl_cache_writethrough_unchanged(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /*
+ * If the control is write-only the cache will have been zero-initialized
+ * so the first write will always indicate a change.
+ */
+ if (def.flags && !(def.flags & WMFW_CTL_FLAG_READABLE)) {
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ param->len_bytes),
+ 1);
+ }
+
+ /*
+ * Write the same data to the control, cs_dsp_coeff_lock_and_write_ctrl()
+ * should return 0 to indicate the content didn't change.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write unchanged data to a cached control while the firmware is not started.
+ * The control write should return 0 to indicate that the cache content
+ * didn't change.
+ */
+static void cs_dsp_ctl_cache_write_unchanged_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /*
+ * If the control is write-only the cache will have been zero-initialized
+ * so the first write will always indicate a change.
+ */
+ if (def.flags && !(def.flags & WMFW_CTL_FLAG_READABLE)) {
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ param->len_bytes),
+ 1);
+ }
+
+ /*
+ * Write the same data to the control, cs_dsp_coeff_lock_and_write_ctrl()
+ * should return 0 to indicate the content didn't change.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control while the firmware is loaded but not
+ * started.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control after the firmware has been loaded,
+ * started and stopped.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_stopped(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control after the firmware has been loaded,
+ * then the DSP powered-down.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP then power-down */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control after the firmware has been loaded,
+ * started, stopped, and then the DSP powered-down.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_stopped_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware then power-down */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control that is not in the currently loaded firmware.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_not_current_loaded_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Get the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Control from unloaded firmware should be disabled */
+ KUNIT_EXPECT_FALSE(test, ctl->enabled);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /*
+ * It should be possible to write new data to the control from
+ * the first firmware. But this should not be written to the
+ * registers.
+ */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control that is not in the currently running firmware.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_not_current_running_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP then power-down */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Get the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Power-up with a different firmware and run it */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Control from unloaded firmware should be disabled */
+ KUNIT_EXPECT_FALSE(test, ctl->enabled);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /*
+ * It should be possible to write new data to the control from
+ * the first firmware. But this should not be written to the
+ * registers.
+ */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control before running the firmware.
+ * The value written to the cache should be synced out to the registers
+ * backing the control when the firmware is run.
+ */
+static void cs_dsp_ctl_cache_sync_write_before_run(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMNEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control while the firmware is running.
+ * The value written should be synced out to the registers
+ * backing the control when the firmware is next run.
+ */
+static void cs_dsp_ctl_cache_sync_write_while_running(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_vals, *ctl_vals, *readback;
+
+ init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals);
+
+ ctl_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP and start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Write new data to the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(ctl_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes),
+ 1);
+
+ /* Stop firmware and zero the registers backing the control */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, init_vals, param->len_bytes);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control after stopping the firmware.
+ * The value written to the cache should be synced out to the registers
+ * backing the control when the firmware is next run.
+ */
+static void cs_dsp_ctl_cache_sync_write_after_stop(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMNEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control that is not in the currently loaded firmware.
+ * The value written to the cache should be synced out to the registers
+ * backing the control the next time the firmware containing the
+ * control is run.
+ */
+static void cs_dsp_ctl_cache_sync_write_not_current_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Get the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Write new data to the control, it should not be written to the registers */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMNEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Power-down DSP then power-up with the original firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * The value in the control cache should be synced out to the registers
+ * backing the control every time the firmware containing the control
+ * is run.
+ */
+static void cs_dsp_ctl_cache_sync_reapply_every_run(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_vals, *readback, *ctl_vals;
+
+ init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ ctl_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Write new data to the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(ctl_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes),
+ 1);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Stop the firmware and reset the registers */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Start the firmware again and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+}
+
+/*
+ * The value in the control cache should be retained if the same
+ * firmware is downloaded again. It should be synced out to the
+ * registers backing the control after the firmware containing the
+ * control is downloaded again and run.
+ */
+static void cs_dsp_ctl_cache_sync_reapply_after_fw_reload(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_vals, *readback, *ctl_vals;
+
+ init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ ctl_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Write new data to the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(ctl_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes),
+ 1);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Stop the firmware and power-down the DSP */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Reset the registers */
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Download the firmware again, the cache content should not change */
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+}
+
+/*
+ * The value in the control cache should be retained after a different
+ * firmware is downloaded.
+ * When the firmware containing the control is downloaded and run
+ * the value in the control cache should be synced out to the registers
+ * backing the control.
+ */
+static void cs_dsp_ctl_cache_sync_reapply_after_fw_swap(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_vals, *readback, *ctl_vals;
+
+ init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ ctl_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Write new data to the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(ctl_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes),
+ 1);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Stop the firmware and power-down the DSP */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Reset the registers */
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Download and run a different firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Reset the registers */
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Download the original firmware again */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_EXPECT_TRUE(test, ctl->set);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+}
+
+static int cs_dsp_ctl_cache_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data blob.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_ctl_cache_test_algs,
+ ARRAY_SIZE(cs_dsp_ctl_cache_test_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ /* Create wmfw builder */
+ local->wmfw_builder = _create_dummy_wmfw(test);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_ctl_cache_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_ctl_cache_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_ctl_cache_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_cache_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_cache_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_ctl_cache_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_cache_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_cache_test_adsp2_16bit_init(test, 2);
+}
+
+static void cs_dsp_ctl_all_param_desc(const struct cs_dsp_ctl_cache_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg:%#x %s@%u len:%u flags:%#x",
+ param->alg_id, cs_dsp_mem_region_name(param->mem_type),
+ param->offs_words, param->len_bytes, param->flags);
+}
+
+/* All parameters populated, with various lengths */
+static const struct cs_dsp_ctl_cache_test_param all_pop_varying_len_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 8 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 12 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 16 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 48 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 100 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 1000 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_len, all_pop_varying_len_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various offsets */
+static const struct cs_dsp_ctl_cache_test_param all_pop_varying_offset_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 0, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 2, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 3, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 8, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 10, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 128, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 180, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_offset, all_pop_varying_offset_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various X and Y memory regions */
+static const struct cs_dsp_ctl_cache_test_param all_pop_varying_xy_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_XM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_xy, all_pop_varying_xy_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, using ZM */
+static const struct cs_dsp_ctl_cache_test_param all_pop_z_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_ZM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_z, all_pop_z_cases, cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various algorithm ids */
+static const struct cs_dsp_ctl_cache_test_param all_pop_varying_alg_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xb, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0x9f1234, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xff00ff, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_alg, all_pop_varying_alg_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile readable control
+ */
+static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_readable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_readable_flags,
+ all_pop_nonvol_readable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile readable control, except flags==0
+ */
+static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_readable_nonzero_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_readable_nonzero_flags,
+ all_pop_nonvol_readable_nonzero_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile writeable control
+ */
+static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_writeable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_writeable_flags,
+ all_pop_nonvol_writeable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile write-only control of varying lengths
+ */
+static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_write_only_length_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_write_only_length,
+ all_pop_nonvol_write_only_length_cases,
+ cs_dsp_ctl_all_param_desc);
+
+static struct kunit_case cs_dsp_ctl_cache_test_cases_v1[] = {
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init_write_only,
+ all_pop_nonvol_write_only_length_gen_params),
+
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fw_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_mems),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_algs),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_started,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_loaded_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_running_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running,
+ all_pop_nonvol_readable_nonzero_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running_zero_flags,
+ all_pop_varying_len_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_unchanged_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_loaded_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_running_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_before_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_while_running,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_after_stop,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_not_current_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_every_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_reload,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_swap,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_ctl_cache_test_cases_v2[] = {
+ KUNIT_CASE(cs_dsp_ctl_v2_cache_alloc),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init_write_only,
+ all_pop_nonvol_write_only_length_gen_params),
+
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fw_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_mems),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_algs),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_offsets),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_started,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_loaded_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_running_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running,
+ all_pop_nonvol_readable_nonzero_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running_zero_flags,
+ all_pop_varying_len_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_unchanged_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_loaded_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_running_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_before_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_while_running,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_after_stop,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_not_current_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_every_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_reload,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_swap,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_ctl_cache_test_cases_v3[] = {
+ KUNIT_CASE(cs_dsp_ctl_v2_cache_alloc),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init_write_only,
+ all_pop_nonvol_write_only_length_gen_params),
+
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fw_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_mems),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_algs),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_offsets),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_started,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_loaded_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_running_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running,
+ all_pop_nonvol_readable_nonzero_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_unchanged_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_loaded_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_running_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_before_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_while_running,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_after_stop,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_not_current_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_every_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_reload,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_swap,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_halo = {
+ .name = "cs_dsp_ctl_cache_wmfwV3_halo",
+ .init = cs_dsp_ctl_cache_test_halo_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v3,
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_ctl_cache_wmfwV1_adsp2_32bit",
+ .init = cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_ctl_cache_wmfwV2_adsp2_32bit",
+ .init = cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v2,
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_ctl_cache_wmfwV1_adsp2_16bit",
+ .init = cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_ctl_cache_wmfwV2_adsp2_16bit",
+ .init = cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v2,
+};
+
+kunit_test_suites(&cs_dsp_ctl_cache_test_halo,
+ &cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1,
+ &cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2,
+ &cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1,
+ &cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c b/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c
new file mode 100644
index 000000000000..cb90964740ea
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c
@@ -0,0 +1,1851 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_ctl_parse_test_param {
+ int mem_type;
+ int alg_id;
+ unsigned int offset;
+ unsigned int length;
+ u16 ctl_type;
+ u16 flags;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_ctl_parse_test_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+ {
+ .id = 0xb,
+ .ver = 0x100001,
+ .xm_size_words = 8,
+ .ym_size_words = 8,
+ .zm_size_words = 8,
+ },
+ {
+ .id = 0x9f1234,
+ .ver = 0x100500,
+ .xm_size_words = 16,
+ .ym_size_words = 16,
+ .zm_size_words = 16,
+ },
+ {
+ .id = 0xff00ff,
+ .ver = 0x300113,
+ .xm_size_words = 16,
+ .ym_size_words = 16,
+ .zm_size_words = 16,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_VOLATILE,
+ .length_bytes = 4,
+};
+
+/* Algorithm info block without controls should load */
+static void cs_dsp_ctl_parse_no_coeffs(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+}
+
+/*
+ * V1 controls do not have names, the name field in the coefficient entry
+ * should be ignored.
+ */
+static void cs_dsp_ctl_parse_v1_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.fullname = "Dummy";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * V1 controls do not have names, the name field in the coefficient entry
+ * should be ignored. Test with a zero-length name string.
+ */
+static void cs_dsp_ctl_parse_empty_v1_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.fullname = "\0";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * V1 controls do not have names, the name field in the coefficient entry
+ * should be ignored. Test with a maximum length name string.
+ */
+static void cs_dsp_ctl_parse_max_v1_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char *name;
+
+ name = kunit_kzalloc(test, 256, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, name);
+ memset(name, 'A', 255);
+ def.fullname = name;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/* Short name from coeff descriptor should be used as control name. */
+static void cs_dsp_ctl_parse_short_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Short name from coeff descriptor should be used as control name.
+ * Test with a short name that is a single character.
+ */
+static void cs_dsp_ctl_parse_min_short_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.shortname = "Q";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 1);
+ KUNIT_EXPECT_EQ(test, ctl->subname[0], 'Q');
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Short name from coeff descriptor should be used as control name.
+ * Test with a maximum length name.
+ */
+static void cs_dsp_ctl_parse_max_short_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ char *name;
+ struct firmware *wmfw;
+
+ name = kunit_kmalloc(test, 255, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, name);
+ memset(name, 'A', 255);
+
+ def.shortname = name;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 255);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, name, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Full name from coeff descriptor should be ignored. It is a variable
+ * length field so affects the position of subsequent fields.
+ * Test with a 1-character full name.
+ */
+static void cs_dsp_ctl_parse_with_min_fullname(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.fullname = "Q";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Full name from coeff descriptor should be ignored. It is a variable
+ * length field so affects the position of subsequent fields.
+ * Test with a maximum length full name.
+ */
+static void cs_dsp_ctl_parse_with_max_fullname(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char *fullname;
+
+ fullname = kunit_kmalloc(test, 255, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, fullname);
+ memset(fullname, 'A', 255);
+ def.fullname = fullname;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Description from coeff descriptor should be ignored. It is a variable
+ * length field so affects the position of subsequent fields.
+ * Test with a 1-character description
+ */
+static void cs_dsp_ctl_parse_with_min_description(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.description = "Q";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Description from coeff descriptor should be ignored. It is a variable
+ * length field so affects the position of subsequent fields.
+ * Test with a maximum length description
+ */
+static void cs_dsp_ctl_parse_with_max_description(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char *description;
+
+ description = kunit_kmalloc(test, 65535, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, description);
+ memset(description, 'A', 65535);
+ def.description = description;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Full name and description from coeff descriptor are variable length
+ * fields so affects the position of subsequent fields.
+ * Test with a maximum length full name and description
+ */
+static void cs_dsp_ctl_parse_with_max_fullname_and_description(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char *fullname, *description;
+
+ fullname = kunit_kmalloc(test, 255, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, fullname);
+ memset(fullname, 'A', 255);
+ def.fullname = fullname;
+
+ description = kunit_kmalloc(test, 65535, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, description);
+ memset(description, 'A', 65535);
+ def.description = description;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+static const char * const cs_dsp_ctl_alignment_test_names[] = {
+ "1", "12", "123", "1234", "12345", "123456", "1234567",
+ "12345678", "123456789", "123456789A", "123456789AB",
+ "123456789ABC", "123456789ABCD", "123456789ABCDE",
+ "123456789ABCDEF",
+};
+
+/*
+ * Variable-length string fields are padded to a multiple of 4-bytes.
+ * Test this with various lengths of short name.
+ */
+static void cs_dsp_ctl_shortname_alignment(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ def.shortname = cs_dsp_ctl_alignment_test_names[i];
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, cs_dsp_ctl_alignment_test_names[i],
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, i + 1);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, cs_dsp_ctl_alignment_test_names[i],
+ ctl->subname_len);
+ /* Test fields that are parsed after the variable-length fields */
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+ }
+}
+
+/*
+ * Variable-length string fields are padded to a multiple of 4-bytes.
+ * Test this with various lengths of full name.
+ */
+static void cs_dsp_ctl_fullname_alignment(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ char ctl_name[4];
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ /*
+ * Create a unique control name of 3 characters so that
+ * the shortname field is exactly 4 bytes long including
+ * the length byte.
+ */
+ snprintf(ctl_name, sizeof(ctl_name), "%03d", i);
+ KUNIT_ASSERT_EQ(test, strlen(ctl_name), 3);
+ def.shortname = ctl_name;
+
+ def.fullname = cs_dsp_ctl_alignment_test_names[i];
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ snprintf(ctl_name, sizeof(ctl_name), "%03d", i);
+
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, ctl_name, def.mem_type,
+ cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 3);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, ctl_name, ctl->subname_len);
+ /* Test fields that are parsed after the variable-length fields */
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+ }
+}
+
+/*
+ * Variable-length string fields are padded to a multiple of 4-bytes.
+ * Test this with various lengths of description.
+ */
+static void cs_dsp_ctl_description_alignment(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ char ctl_name[4];
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ /*
+ * Create a unique control name of 3 characters so that
+ * the shortname field is exactly 4 bytes long including
+ * the length byte.
+ */
+ snprintf(ctl_name, sizeof(ctl_name), "%03d", i);
+ KUNIT_ASSERT_EQ(test, strlen(ctl_name), 3);
+ def.shortname = ctl_name;
+
+ def.description = cs_dsp_ctl_alignment_test_names[i];
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ snprintf(ctl_name, sizeof(ctl_name), "%03d", i);
+
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, ctl_name, def.mem_type,
+ cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 3);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, ctl_name, ctl->subname_len);
+ /* Test fields that are parsed after the variable-length fields */
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+ }
+}
+
+static const char * const cs_dsp_get_ctl_test_names[] = {
+ "Up", "Down", "Switch", "Mute",
+ "Left Up", "Left Down", "Right Up", "Right Down",
+ "Left Mute", "Right Mute",
+ "_trunc_1", "_trunc_2", " trunc",
+};
+
+/* Test using cs_dsp_get_ctl() to lookup various controls. */
+static void cs_dsp_get_ctl_test(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_get_ctl_test_names); i++) {
+ def.shortname = cs_dsp_get_ctl_test_names[i];
+ def.offset_dsp_words = i;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_get_ctl_test_names); i++) {
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, cs_dsp_get_ctl_test_names[i],
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(cs_dsp_get_ctl_test_names[i]));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, cs_dsp_get_ctl_test_names[i],
+ ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->offset, i);
+ }
+}
+
+/*
+ * cs_dsp_get_ctl() searches for the control in the currently loaded
+ * firmware, so create identical controls in multiple firmware and
+ * test that the correct one is found.
+ */
+static void cs_dsp_get_ctl_test_multiple_wmfw(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct cs_dsp_mock_wmfw_builder *builder2;
+ struct firmware *wmfw;
+
+ def.shortname = "_A_CONTROL";
+
+ /* Create a second mock wmfw builder */
+ builder2 = cs_dsp_mock_wmfw_init(priv,
+ cs_dsp_mock_wmfw_format_version(local->wmfw_builder));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder2);
+ cs_dsp_mock_wmfw_add_data_block(builder2,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Load a 'misc' firmware with a control */
+ def.offset_dsp_words = 1;
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Load a 'mbc/vss' firmware with a control of the same name */
+ def.offset_dsp_words = 2;
+ cs_dsp_mock_wmfw_start_alg_info_block(builder2,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder2, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder2);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_fw2", NULL, NULL, "mbc/vss"), 0);
+
+ /* A lookup should return the control for the current firmware */
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, def.shortname,
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->offset, 2);
+
+ /* Re-load the 'misc' firmware and a lookup should return its control */
+ cs_dsp_power_down(priv->dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, def.shortname,
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->offset, 1);
+}
+
+/* Test that the value of the memory type field is parsed correctly. */
+static void cs_dsp_ctl_parse_memory_type(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ /* kunit_skip() marks the test skipped forever, so just return */
+ if ((param->mem_type == WMFW_ADSP2_ZM) && !cs_dsp_mock_has_zm(priv))
+ return;
+
+ def.mem_type = param->mem_type;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.type, param->mem_type);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Test that the algorithm id from the parent alg-info block is
+ * correctly stored in the cs_dsp_coeff_ctl.
+ */
+static void cs_dsp_ctl_parse_alg_id(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ param->alg_id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.alg, param->alg_id);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.type, def.mem_type);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Test that the values of (alg id, memory type) tuple is parsed correctly.
+ * The alg id is parsed from the alg-info block, but the memory type is
+ * parsed from the coefficient info descriptor.
+ */
+static void cs_dsp_ctl_parse_alg_mem(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ /* kunit_skip() marks the test skipped forever, so just return */
+ if ((param->mem_type == WMFW_ADSP2_ZM) && !cs_dsp_mock_has_zm(priv))
+ return;
+
+ def.mem_type = param->mem_type;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ param->alg_id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.alg, param->alg_id);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.type, param->mem_type);
+}
+
+/* Test that the value of the offset field is parsed correctly. */
+static void cs_dsp_ctl_parse_offset(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.offset_dsp_words = param->offset;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->offset, param->offset);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/* Test that the value of the length field is parsed correctly. */
+static void cs_dsp_ctl_parse_length(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.length_bytes = param->length;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->offset, def.offset_dsp_words);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, param->length);
+}
+
+/* Test that the value of the control type field is parsed correctly. */
+static void cs_dsp_ctl_parse_ctl_type(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.type = param->ctl_type;
+ def.flags = param->flags;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->type, param->ctl_type);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/* Test that the value of the flags field is parsed correctly. */
+static void cs_dsp_ctl_parse_flags(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 reg_val;
+
+ /*
+ * Non volatile controls will be read to initialize the cache
+ * so the regmap cache must contain something to read.
+ */
+ reg_val = 0xf11100;
+ regmap_raw_write(priv->dsp->regmap,
+ cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM),
+ &reg_val, sizeof(reg_val));
+
+ def.flags = param->flags;
+ def.mem_type = WMFW_ADSP2_YM;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->flags, param->flags);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/* Test that invalid combinations of (control type, flags) are rejected. */
+static void cs_dsp_ctl_illegal_type_flags(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct firmware *wmfw;
+ u32 reg_val;
+
+ /*
+ * Non volatile controls will be read to initialize the cache
+ * so the regmap cache must contain something to read.
+ */
+ reg_val = 0xf11100;
+ regmap_raw_write(priv->dsp->regmap,
+ cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM),
+ &reg_val, sizeof(reg_val));
+
+ def.type = param->ctl_type;
+ def.flags = param->flags;
+ def.mem_type = WMFW_ADSP2_YM;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_LT(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+}
+
+/* Test that the correct firmware name is entered in the cs_dsp_coeff_ctl. */
+static void cs_dsp_ctl_parse_fw_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl1, *ctl2;
+ struct cs_dsp_mock_wmfw_builder *builder2;
+ struct firmware *wmfw;
+
+ /* Create a second mock wmfw builder */
+ builder2 = cs_dsp_mock_wmfw_init(priv,
+ cs_dsp_mock_wmfw_format_version(local->wmfw_builder));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder2);
+ cs_dsp_mock_wmfw_add_data_block(builder2,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Load a 'misc' firmware with a control */
+ def.offset_dsp_words = 1;
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Load a 'mbc/vss' firmware with a control */
+ def.offset_dsp_words = 2;
+ cs_dsp_mock_wmfw_start_alg_info_block(builder2,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder2, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder2);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_fw2", NULL, NULL, "mbc/vss"), 0);
+
+ /* Both controls should be in the list (order not guaranteed) */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2);
+ ctl1 = NULL;
+ ctl2 = NULL;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ if (strcmp(walkctl->fw_name, "misc") == 0)
+ ctl1 = walkctl;
+ else if (strcmp(walkctl->fw_name, "mbc/vss") == 0)
+ ctl2 = walkctl;
+ }
+
+ KUNIT_EXPECT_NOT_NULL(test, ctl1);
+ KUNIT_EXPECT_NOT_NULL(test, ctl2);
+ KUNIT_EXPECT_EQ(test, ctl1->offset, 1);
+ KUNIT_EXPECT_EQ(test, ctl2->offset, 2);
+}
+
+/* Controls are unique if the algorithm ID is different */
+static void cs_dsp_ctl_alg_id_uniqueness(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl1, *ctl2;
+ struct firmware *wmfw;
+
+ /* Create an algorithm containing the control */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Create a different algorithm containing an identical control */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[1].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Both controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2);
+ ctl1 = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ ctl2 = list_next_entry(ctl1, list);
+ KUNIT_EXPECT_NOT_NULL(test, ctl1);
+ KUNIT_EXPECT_NOT_NULL(test, ctl2);
+ KUNIT_EXPECT_NE(test, ctl1->alg_region.alg, ctl2->alg_region.alg);
+ KUNIT_EXPECT_EQ(test, ctl1->alg_region.type, ctl2->alg_region.type);
+ KUNIT_EXPECT_EQ(test, ctl1->offset, ctl2->offset);
+ KUNIT_EXPECT_EQ(test, ctl1->type, ctl2->type);
+ KUNIT_EXPECT_EQ(test, ctl1->flags, ctl2->flags);
+ KUNIT_EXPECT_EQ(test, ctl1->len, ctl2->len);
+ KUNIT_EXPECT_STREQ(test, ctl1->fw_name, ctl2->fw_name);
+ KUNIT_EXPECT_EQ(test, ctl1->subname_len, ctl2->subname_len);
+ if (ctl1->subname_len)
+ KUNIT_EXPECT_MEMEQ(test, ctl1->subname, ctl2->subname, ctl1->subname_len);
+}
+
+/* Controls are unique if the memory region is different */
+static void cs_dsp_ctl_mem_uniqueness(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl1, *ctl2;
+ struct firmware *wmfw;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ /* Create control in XM */
+ def.mem_type = WMFW_ADSP2_XM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ /* Create control in YM */
+ def.mem_type = WMFW_ADSP2_YM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Both controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2);
+ ctl1 = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ ctl2 = list_next_entry(ctl1, list);
+ KUNIT_EXPECT_NOT_NULL(test, ctl1);
+ KUNIT_EXPECT_NOT_NULL(test, ctl2);
+ KUNIT_EXPECT_EQ(test, ctl1->alg_region.alg, ctl2->alg_region.alg);
+ KUNIT_EXPECT_NE(test, ctl1->alg_region.type, ctl2->alg_region.type);
+ KUNIT_EXPECT_EQ(test, ctl1->offset, ctl2->offset);
+ KUNIT_EXPECT_EQ(test, ctl1->type, ctl2->type);
+ KUNIT_EXPECT_EQ(test, ctl1->flags, ctl2->flags);
+ KUNIT_EXPECT_EQ(test, ctl1->len, ctl2->len);
+ KUNIT_EXPECT_STREQ(test, ctl1->fw_name, ctl2->fw_name);
+ KUNIT_EXPECT_EQ(test, ctl1->subname_len, ctl2->subname_len);
+ if (ctl1->subname_len)
+ KUNIT_EXPECT_MEMEQ(test, ctl1->subname, ctl2->subname, ctl1->subname_len);
+}
+
+/* Controls are unique if they are in different firmware */
+static void cs_dsp_ctl_fw_uniqueness(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl1, *ctl2;
+ struct cs_dsp_mock_wmfw_builder *builder2;
+ struct firmware *wmfw;
+
+ /* Create a second mock wmfw builder */
+ builder2 = cs_dsp_mock_wmfw_init(priv,
+ cs_dsp_mock_wmfw_format_version(local->wmfw_builder));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder2);
+ cs_dsp_mock_wmfw_add_data_block(builder2,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Load a 'misc' firmware with a control */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Load a 'mbc/vss' firmware with the same control */
+ cs_dsp_mock_wmfw_start_alg_info_block(builder2,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder2, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder2);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw2",
+ NULL, NULL, "mbc/vss"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Both controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2);
+ ctl1 = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ ctl2 = list_next_entry(ctl1, list);
+ KUNIT_EXPECT_NOT_NULL(test, ctl1);
+ KUNIT_EXPECT_NOT_NULL(test, ctl2);
+ KUNIT_EXPECT_EQ(test, ctl1->alg_region.alg, ctl2->alg_region.alg);
+ KUNIT_EXPECT_EQ(test, ctl1->alg_region.type, ctl2->alg_region.type);
+ KUNIT_EXPECT_EQ(test, ctl1->offset, ctl2->offset);
+ KUNIT_EXPECT_EQ(test, ctl1->type, ctl2->type);
+ KUNIT_EXPECT_EQ(test, ctl1->flags, ctl2->flags);
+ KUNIT_EXPECT_EQ(test, ctl1->len, ctl2->len);
+ KUNIT_EXPECT_STRNEQ(test, ctl1->fw_name, ctl2->fw_name);
+ KUNIT_EXPECT_EQ(test, ctl1->subname_len, ctl2->subname_len);
+ if (ctl1->subname_len)
+ KUNIT_EXPECT_MEMEQ(test, ctl1->subname, ctl2->subname, ctl1->subname_len);
+}
+
+/*
+ * Controls from a wmfw are only added to the list once. If the same
+ * wmfw is reloaded the controls are not added again.
+ * This creates multiple algorithms with one control each, which will
+ * work on both V1 format and >=V2 format controls.
+ */
+static void cs_dsp_ctl_squash_reloaded_controls(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctls[ARRAY_SIZE(cs_dsp_ctl_parse_test_algs)];
+ struct cs_dsp_coeff_ctl *walkctl;
+ struct firmware *wmfw;
+ int i;
+
+ /* Create some algorithms with a control */
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_parse_test_algs); i++) {
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[i].id,
+ "dummyalg", NULL);
+ def.mem_type = WMFW_ADSP2_YM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* All controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(cs_dsp_ctl_parse_test_algs));
+
+ /* Take a copy of the pointers to controls to compare against. */
+ i = 0;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls));
+ ctls[i++] = walkctl;
+ }
+
+
+ /* Load the wmfw again */
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* The number of controls should be the same */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(cs_dsp_ctl_parse_test_algs));
+
+ /* And they should be the same objects */
+ i = 0;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls));
+ KUNIT_ASSERT_PTR_EQ(test, walkctl, ctls[i++]);
+ }
+}
+
+/*
+ * Controls from a wmfw are only added to the list once. If the same
+ * wmfw is reloaded the controls are not added again.
+ * This tests >=V2 firmware that can have multiple named controls in
+ * the same algorithm.
+ */
+static void cs_dsp_ctl_v2_squash_reloaded_controls(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctls[ARRAY_SIZE(cs_dsp_get_ctl_test_names)];
+ struct cs_dsp_coeff_ctl *walkctl;
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ /* Create some controls */
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_get_ctl_test_names); i++) {
+ def.shortname = cs_dsp_get_ctl_test_names[i];
+ def.offset_dsp_words = i;
+ if (i & BIT(0))
+ def.mem_type = WMFW_ADSP2_XM;
+ else
+ def.mem_type = WMFW_ADSP2_YM;
+
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* All controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(cs_dsp_get_ctl_test_names));
+
+ /* Take a copy of the pointers to controls to compare against. */
+ i = 0;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls));
+ ctls[i++] = walkctl;
+ }
+
+
+ /* Load the wmfw again */
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* The number of controls should be the same */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(cs_dsp_get_ctl_test_names));
+
+ /* And they should be the same objects */
+ i = 0;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls));
+ KUNIT_ASSERT_PTR_EQ(test, walkctl, ctls[i++]);
+ }
+}
+
+static const char * const cs_dsp_ctl_v2_compare_len_names[] = {
+ "LEFT",
+ "LEFT_",
+ "LEFT_SPK",
+ "LEFT_SPK_V",
+ "LEFT_SPK_VOL",
+ "LEFT_SPK_MUTE",
+ "LEFT_SPK_1",
+ "LEFT_X",
+ "LEFT2",
+};
+
+/*
+ * When comparing shortnames the full length of both strings is
+ * considered, not only the characters in of the shortest string.
+ * So that "LEFT" is not the same as "LEFT2".
+ * This is specifically to test for the bug that was fixed by commit:
+ * 7ac1102b227b ("firmware: cs_dsp: Fix new control name check")
+ */
+static void cs_dsp_ctl_v2_compare_len(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_v2_compare_len_names); i++) {
+ def.shortname = cs_dsp_ctl_v2_compare_len_names[i];
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_v2_compare_len_names); i++) {
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, cs_dsp_ctl_v2_compare_len_names[i],
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len,
+ strlen(cs_dsp_ctl_v2_compare_len_names[i]));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, cs_dsp_ctl_v2_compare_len_names[i],
+ ctl->subname_len);
+ }
+}
+
+static int cs_dsp_ctl_parse_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data blob.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_ctl_parse_test_algs,
+ ARRAY_SIZE(cs_dsp_ctl_parse_test_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, priv->local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Add dummy XM header blob to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_ctl_parse_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_ctl_parse_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_ctl_parse_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_parse_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_parse_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_ctl_parse_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_parse_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_parse_test_adsp2_16bit_init(test, 2);
+}
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_mem_type_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_XM },
+ { .mem_type = WMFW_ADSP2_YM },
+ { .mem_type = WMFW_ADSP2_ZM },
+};
+
+static void cs_dsp_ctl_mem_type_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s",
+ cs_dsp_mem_region_name(param->mem_type));
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_mem_type,
+ cs_dsp_ctl_mem_type_param_cases,
+ cs_dsp_ctl_mem_type_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_alg_id_param_cases[] = {
+ { .alg_id = 0xb },
+ { .alg_id = 0xfafa },
+ { .alg_id = 0x9f1234 },
+ { .alg_id = 0xff00ff },
+};
+
+static void cs_dsp_ctl_alg_id_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg_id:%#x", param->alg_id);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_alg_id,
+ cs_dsp_ctl_alg_id_param_cases,
+ cs_dsp_ctl_alg_id_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_offset_param_cases[] = {
+ { .offset = 0x0 },
+ { .offset = 0x1 },
+ { .offset = 0x2 },
+ { .offset = 0x3 },
+ { .offset = 0x4 },
+ { .offset = 0x5 },
+ { .offset = 0x6 },
+ { .offset = 0x7 },
+ { .offset = 0xe0 },
+ { .offset = 0xf1 },
+ { .offset = 0xfffe },
+ { .offset = 0xffff },
+};
+
+static void cs_dsp_ctl_offset_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "offset:%#x", param->offset);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_offset,
+ cs_dsp_ctl_offset_param_cases,
+ cs_dsp_ctl_offset_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_length_param_cases[] = {
+ { .length = 0x4 },
+ { .length = 0x8 },
+ { .length = 0x18 },
+ { .length = 0xf000 },
+};
+
+static void cs_dsp_ctl_length_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "length:%#x", param->length);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_length,
+ cs_dsp_ctl_length_param_cases,
+ cs_dsp_ctl_length_desc);
+
+/* Note: some control types mandate specific flags settings */
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_type_param_cases[] = {
+ { .ctl_type = WMFW_CTL_TYPE_BYTES,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE |
+ WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE |
+ WMFW_CTL_FLAG_SYS },
+};
+
+static void cs_dsp_ctl_type_flags_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "ctl_type:%#x flags:%#x",
+ param->ctl_type, param->flags);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_type,
+ cs_dsp_ctl_type_param_cases,
+ cs_dsp_ctl_type_flags_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_flags_param_cases[] = {
+ { .flags = 0 },
+ { .flags = WMFW_CTL_FLAG_READABLE },
+ { .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+};
+
+static void cs_dsp_ctl_flags_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "flags:%#x", param->flags);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_flags,
+ cs_dsp_ctl_flags_param_cases,
+ cs_dsp_ctl_flags_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_illegal_type_flags_param_cases[] = {
+ /* ACKED control must be volatile + read + write */
+ { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = 0 },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+
+ /* HOSTEVENT must be system + volatile + read + write */
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = 0 },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+
+ /* FWEVENT rules same as HOSTEVENT */
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = 0 },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+
+ /*
+ * HOSTBUFFER must be system + volatile + readable or
+ * system + volatile + readable + writeable
+ */
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = 0 },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE},
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+};
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_illegal_type_flags,
+ cs_dsp_ctl_illegal_type_flags_param_cases,
+ cs_dsp_ctl_type_flags_desc);
+
+static struct kunit_case cs_dsp_ctl_parse_test_cases_v1[] = {
+ KUNIT_CASE(cs_dsp_ctl_parse_no_coeffs),
+ KUNIT_CASE(cs_dsp_ctl_parse_v1_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_empty_v1_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_max_v1_name),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_memory_type, cs_dsp_ctl_mem_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_id, cs_dsp_ctl_alg_id_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_mem, cs_dsp_ctl_mem_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_offset, cs_dsp_ctl_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_length, cs_dsp_ctl_length_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_ctl_type, cs_dsp_ctl_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_flags, cs_dsp_ctl_flags_gen_params),
+ KUNIT_CASE(cs_dsp_ctl_parse_fw_name),
+
+ KUNIT_CASE(cs_dsp_ctl_alg_id_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_mem_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_fw_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_squash_reloaded_controls),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_ctl_parse_test_cases_v2_v3[] = {
+ KUNIT_CASE(cs_dsp_ctl_parse_no_coeffs),
+ KUNIT_CASE(cs_dsp_ctl_parse_short_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_min_short_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_max_short_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_min_fullname),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_max_fullname),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_min_description),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_max_description),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_max_fullname_and_description),
+ KUNIT_CASE(cs_dsp_ctl_shortname_alignment),
+ KUNIT_CASE(cs_dsp_ctl_fullname_alignment),
+ KUNIT_CASE(cs_dsp_ctl_description_alignment),
+ KUNIT_CASE(cs_dsp_get_ctl_test),
+ KUNIT_CASE(cs_dsp_get_ctl_test_multiple_wmfw),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_memory_type, cs_dsp_ctl_mem_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_id, cs_dsp_ctl_alg_id_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_mem, cs_dsp_ctl_mem_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_offset, cs_dsp_ctl_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_length, cs_dsp_ctl_length_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_ctl_type, cs_dsp_ctl_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_flags, cs_dsp_ctl_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_illegal_type_flags,
+ cs_dsp_ctl_illegal_type_flags_gen_params),
+ KUNIT_CASE(cs_dsp_ctl_parse_fw_name),
+
+ KUNIT_CASE(cs_dsp_ctl_alg_id_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_mem_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_fw_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_squash_reloaded_controls),
+ KUNIT_CASE(cs_dsp_ctl_v2_squash_reloaded_controls),
+ KUNIT_CASE(cs_dsp_ctl_v2_compare_len),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_halo = {
+ .name = "cs_dsp_ctl_parse_wmfwV3_halo",
+ .init = cs_dsp_ctl_parse_test_halo_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v2_v3,
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_ctl_parse_wmfwV1_adsp2_32bit",
+ .init = cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_ctl_parse_wmfwV2_adsp2_32bit",
+ .init = cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v2_v3,
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_ctl_parse_wmfwV1_adsp2_16bit",
+ .init = cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_ctl_parse_wmfwV2_adsp2_16bit",
+ .init = cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v2_v3,
+};
+
+kunit_test_suites(&cs_dsp_ctl_parse_test_halo,
+ &cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1,
+ &cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2,
+ &cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1,
+ &cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c b/drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c
new file mode 100644
index 000000000000..bda00a95d4f9
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c
@@ -0,0 +1,2669 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/list.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_stop_wrapper, cs_dsp_stop, struct cs_dsp *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_ctl_rw_test_param {
+ int mem_type;
+ int alg_id;
+ unsigned int offs_words;
+ unsigned int len_bytes;
+ u16 ctl_type;
+ u16 flags;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_ctl_rw_test_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_base_words = 60,
+ .xm_size_words = 1000,
+ .ym_base_words = 0,
+ .ym_size_words = 1000,
+ .zm_base_words = 0,
+ .zm_size_words = 1000,
+ },
+ {
+ .id = 0xb,
+ .ver = 0x100001,
+ .xm_base_words = 1060,
+ .xm_size_words = 1000,
+ .ym_base_words = 1000,
+ .ym_size_words = 1000,
+ .zm_base_words = 1000,
+ .zm_size_words = 1000,
+ },
+ {
+ .id = 0x9f1234,
+ .ver = 0x100500,
+ .xm_base_words = 2060,
+ .xm_size_words = 32,
+ .ym_base_words = 2000,
+ .ym_size_words = 32,
+ .zm_base_words = 2000,
+ .zm_size_words = 32,
+ },
+ {
+ .id = 0xff00ff,
+ .ver = 0x300113,
+ .xm_base_words = 2100,
+ .xm_size_words = 32,
+ .ym_base_words = 2032,
+ .ym_size_words = 32,
+ .zm_base_words = 2032,
+ .zm_size_words = 32,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ .length_bytes = 4,
+};
+
+static int _find_alg_entry(struct kunit *test, unsigned int alg_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_rw_test_algs); ++i) {
+ if (cs_dsp_ctl_rw_test_algs[i].id == alg_id)
+ break;
+ }
+
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(cs_dsp_ctl_rw_test_algs));
+
+ return i;
+}
+
+static int _get_alg_mem_base_words(struct kunit *test, int alg_index, int mem_type)
+{
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ return cs_dsp_ctl_rw_test_algs[alg_index].xm_base_words;
+ case WMFW_ADSP2_YM:
+ return cs_dsp_ctl_rw_test_algs[alg_index].ym_base_words;
+ case WMFW_ADSP2_ZM:
+ return cs_dsp_ctl_rw_test_algs[alg_index].zm_base_words;
+ default:
+ KUNIT_FAIL(test, "Bug in test: illegal memory type %d\n", mem_type);
+ return 0;
+ }
+}
+
+static struct cs_dsp_mock_wmfw_builder *_create_dummy_wmfw(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_wmfw_builder *builder;
+
+ builder = cs_dsp_mock_wmfw_init(priv, local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder);
+
+ /* Init an XM header */
+ cs_dsp_mock_wmfw_add_data_block(builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ return builder;
+}
+
+/*
+ * Write to a control while the firmware is running.
+ * This should write to the underlying registers.
+ */
+static void cs_dsp_ctl_write_running(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ memset(reg_vals, 0, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /*
+ * Write new data to the control, it should be written to the registers
+ * and cs_dsp_coeff_lock_and_write_ctrl() should return 1 to indicate
+ * that the control content changed.
+ */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Read from a volatile control while the firmware is running.
+ * This should return the current state of the underlying registers.
+ */
+static void cs_dsp_ctl_read_volatile_running(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ memset(reg_vals, 0, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Read the control, it should return the current register content */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /*
+ * Change the register content and read the control, it should return
+ * the new register content
+ */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_ASSERT_EQ(test, regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes), 0);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a volatile control before the firmware is started.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Read from a volatile control after the firmware has stopped.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_stopped(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Read from a volatile control after the DSP has been powered down.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_stopped_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware then power down */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Read from a volatile control when a different firmware is currently
+ * loaded into the DSP.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_not_current_loaded_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Read from a volatile control when a different firmware is currently
+ * running.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_not_current_running_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Write to a volatile control before the firmware is started.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write to a volatile control after the firmware has stopped.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_stopped(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write to a volatile control after the DSP has been powered down.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_stopped_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware then power down */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write to a volatile control when a different firmware is currently
+ * loaded into the DSP.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_not_current_loaded_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write to a volatile control when a different firmware is currently
+ * running.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_not_current_running_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Read from an offset into the control data. Should return only the
+ * portion of data from the offset position.
+ */
+static void cs_dsp_ctl_read_with_seek(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) {
+ unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32));
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words,
+ readback, len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, &reg_vals[seek_words], len_bytes);
+ }
+}
+
+/*
+ * Read from an offset into the control cache. Should return only the
+ * portion of data from the offset position.
+ * Same as cs_dsp_ctl_read_with_seek() except the control is cached
+ * and the firmware is not running.
+ */
+static void cs_dsp_ctl_read_cache_with_seek(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start and stop the firmware so the read will come from the cache */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) {
+ unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32));
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words,
+ readback, len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, &reg_vals[seek_words], len_bytes);
+ }
+}
+
+/*
+ * Read less than the full length of data from a control. Should return
+ * only the requested number of bytes.
+ */
+static void cs_dsp_ctl_read_truncated(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+ unsigned int len_bytes;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Reads are only allowed to be a multiple of the DSP word length */
+ for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) {
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, len_bytes);
+ KUNIT_EXPECT_MEMNEQ(test,
+ (u8 *)readback + len_bytes,
+ (u8 *)reg_vals + len_bytes,
+ def.length_bytes - len_bytes);
+ }
+}
+
+/*
+ * Read less than the full length of data from a cached control.
+ * Should return only the requested number of bytes.
+ * Same as cs_dsp_ctl_read_truncated() except the control is cached
+ * and the firmware is not running.
+ */
+static void cs_dsp_ctl_read_cache_truncated(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+ unsigned int len_bytes;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start and stop the firmware so the read will come from the cache */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Reads are only allowed to be a multiple of the DSP word length */
+ for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) {
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, len_bytes);
+ KUNIT_EXPECT_MEMNEQ(test,
+ (u8 *)readback + len_bytes,
+ (u8 *)reg_vals + len_bytes,
+ def.length_bytes - len_bytes);
+ }
+}
+
+/*
+ * Write to an offset into the control data. Should only change the
+ * portion of data from the offset position.
+ */
+static void cs_dsp_ctl_write_with_seek(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback, *new_data;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) {
+ unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32));
+
+ /* Reset the register values to the test data */
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ get_random_bytes(new_data, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words,
+ new_data, len_bytes),
+ 1);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, def.length_bytes),
+ 0);
+ /* Initial portion of readback should be unchanged */
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, seek_words * sizeof(u32));
+ KUNIT_EXPECT_MEMEQ(test, &readback[seek_words], new_data, len_bytes);
+ }
+}
+
+/*
+ * Write to an offset into the control cache. Should only change the
+ * portion of data from the offset position.
+ * Same as cs_dsp_ctl_write_with_seek() except the control is cached
+ * and the firmware is not running.
+ */
+static void cs_dsp_ctl_write_cache_with_seek(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback, *new_data;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start and stop the firmware so the read will come from the cache */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) {
+ unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32));
+
+ /* Reset the cache to the test data */
+ KUNIT_EXPECT_GE(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ def.length_bytes),
+ 0);
+
+ get_random_bytes(new_data, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words,
+ new_data, len_bytes),
+ 1);
+
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback,
+ def.length_bytes),
+ 0);
+ /* Initial portion of readback should be unchanged */
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, seek_words * sizeof(u32));
+ KUNIT_EXPECT_MEMEQ(test, &readback[seek_words], new_data, len_bytes);
+ }
+}
+
+/*
+ * Write less than the full length of data to a control. Should only
+ * change the requested number of bytes.
+ */
+static void cs_dsp_ctl_write_truncated(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback, *new_data;
+ unsigned int len_bytes;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Writes are only allowed to be a multiple of the DSP word length */
+ for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) {
+ /* Reset the register values to the test data */
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ get_random_bytes(new_data, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, new_data, len_bytes),
+ 1);
+
+ memset(readback, 0, def.length_bytes);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, new_data, len_bytes);
+ KUNIT_EXPECT_MEMEQ(test,
+ (u8 *)readback + len_bytes,
+ (u8 *)reg_vals + len_bytes,
+ def.length_bytes - len_bytes);
+ }
+}
+
+/*
+ * Write less than the full length of data to a cached control.
+ * Should only change the requested number of bytes.
+ * Same as cs_dsp_ctl_write_truncated() except the control is cached
+ * and the firmware is not running.
+ */
+static void cs_dsp_ctl_write_cache_truncated(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback, *new_data;
+ unsigned int len_bytes;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start and stop the firmware so the read will come from the cache */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Writes are only allowed to be a multiple of the DSP word length */
+ for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) {
+ /* Reset the cache to the test data */
+ KUNIT_EXPECT_GE(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ def.length_bytes),
+ 0);
+
+ get_random_bytes(new_data, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, new_data, len_bytes),
+ 1);
+
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, new_data, len_bytes);
+ KUNIT_EXPECT_MEMEQ(test,
+ (u8 *)readback + len_bytes,
+ (u8 *)reg_vals + len_bytes,
+ def.length_bytes - len_bytes);
+ }
+}
+
+/*
+ * Read from an offset that is beyond the end of the control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_with_seek_oob(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ seek_words = def.length_bytes / sizeof(u32);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words,
+ reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the read from the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words,
+ reg_vals, def.length_bytes),
+ 0);
+ }
+}
+
+/*
+ * Read more data than the length of the control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_with_length_overflow(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, def.length_bytes + 1),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the read from the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals,
+ def.length_bytes + 1),
+ 0);
+ }
+}
+
+/*
+ * Read with a seek and length that ends beyond the end of control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_with_seek_and_length_oob(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /*
+ * Read full control length but at a start offset of 1 so that
+ * offset + length exceeds the length of the control.
+ */
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 1, reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the read from the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 1, reg_vals,
+ def.length_bytes),
+ 0);
+ }
+}
+
+/*
+ * Write to an offset that is beyond the end of the control data.
+ * Should return an error without touching any registers.
+ */
+static void cs_dsp_ctl_write_with_seek_oob(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ seek_words = def.length_bytes / sizeof(u32);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words,
+ reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the write to the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words,
+ reg_vals, def.length_bytes),
+ 0);
+ }
+
+ /* Check that it didn't write any registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write more data than the length of the control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_write_with_length_overflow(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, def.length_bytes + 1),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the write to the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ def.length_bytes + 1),
+ 0);
+ }
+
+ /* Check that it didn't write any registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write with a seek and length that ends beyond the end of control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_write_with_seek_and_length_oob(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /*
+ * Write full control length but at a start offset of 1 so that
+ * offset + length exceeeds the length of the control.
+ */
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 1, reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the write to the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 1, reg_vals,
+ def.length_bytes),
+ 0);
+ }
+
+ /* Check that it didn't write any registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Read from a write-only control. This is legal because controls can
+ * always be read. Write-only only indicates that it is not useful to
+ * populate the cache from the DSP memory.
+ */
+static void cs_dsp_ctl_read_from_writeonly(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *ctl_vals, *readback;
+
+ /* Sanity check parameters */
+ KUNIT_ASSERT_TRUE(test, param->flags & WMFW_CTL_FLAG_WRITEABLE);
+ KUNIT_ASSERT_FALSE(test, param->flags & WMFW_CTL_FLAG_READABLE);
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ ctl_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Write some test data to the control */
+ get_random_bytes(ctl_vals, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, def.length_bytes),
+ 1);
+
+ /* Read back the data */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, def.length_bytes);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the read from the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, def.length_bytes);
+ }
+}
+
+/*
+ * Write to a read-only control.
+ * This should return an error without writing registers.
+ */
+static void cs_dsp_ctl_write_to_readonly(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ /* Sanity check parameters */
+ KUNIT_ASSERT_FALSE(test, param->flags & WMFW_CTL_FLAG_WRITEABLE);
+ KUNIT_ASSERT_TRUE(test, param->flags & WMFW_CTL_FLAG_READABLE);
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the write to the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ def.length_bytes),
+ 0);
+ }
+
+ /* Check that it didn't write any registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+static int cs_dsp_ctl_rw_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data blob.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_ctl_rw_test_algs,
+ ARRAY_SIZE(cs_dsp_ctl_rw_test_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ /* Create wmfw builder */
+ local->wmfw_builder = _create_dummy_wmfw(test);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_ctl_rw_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_ctl_rw_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_ctl_rw_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_rw_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_rw_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_ctl_rw_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_rw_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_rw_test_adsp2_16bit_init(test, 2);
+}
+
+static void cs_dsp_ctl_all_param_desc(const struct cs_dsp_ctl_rw_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg:%#x %s@%u len:%u flags:%#x",
+ param->alg_id, cs_dsp_mem_region_name(param->mem_type),
+ param->offs_words, param->len_bytes, param->flags);
+}
+
+/* All parameters populated, with various lengths */
+static const struct cs_dsp_ctl_rw_test_param all_pop_varying_len_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 8 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 12 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 16 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 48 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 100 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 1000 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_len, all_pop_varying_len_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various offsets */
+static const struct cs_dsp_ctl_rw_test_param all_pop_varying_offset_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 0, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 2, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 3, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 8, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 10, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 128, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 180, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_offset, all_pop_varying_offset_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various X and Y memory regions */
+static const struct cs_dsp_ctl_rw_test_param all_pop_varying_xy_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_XM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_xy, all_pop_varying_xy_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, using ZM */
+static const struct cs_dsp_ctl_rw_test_param all_pop_z_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_ZM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_z, all_pop_z_cases, cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various algorithm ids */
+static const struct cs_dsp_ctl_rw_test_param all_pop_varying_alg_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xb, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0x9f1234, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xff00ff, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_alg, all_pop_varying_alg_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * readable control.
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_readable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_readable_flags,
+ all_pop_readable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * read-only control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_readonly_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_readonly_flags,
+ all_pop_readonly_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile readable control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_nonvol_readable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_readable_flags,
+ all_pop_nonvol_readable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * writeable control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_writeable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_writeable_flags,
+ all_pop_writeable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * write-only control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_writeonly_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_writeonly_flags,
+ all_pop_writeonly_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile writeable control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_nonvol_writeable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_writeable_flags,
+ all_pop_nonvol_writeable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * volatile readable control.
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_volatile_readable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0 /* flags == 0 is volatile while firmware is running */
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_volatile_readable_flags,
+ all_pop_volatile_readable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * volatile readable control.
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_volatile_writeable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0 /* flags == 0 is volatile while firmware is running */
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_volatile_writeable_flags,
+ all_pop_volatile_writeable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+static struct kunit_case cs_dsp_ctl_rw_test_cases_adsp[] = {
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running,
+ all_pop_volatile_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_started,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped_powered_down,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_loaded_fw,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_running_fw,
+ all_pop_volatile_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_started,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped_powered_down,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_loaded_fw,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_running_fw,
+ all_pop_volatile_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek,
+ all_pop_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_with_seek,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_truncated,
+ all_pop_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_truncated,
+ all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek,
+ all_pop_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_with_seek,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_truncated,
+ all_pop_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_truncated,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_length_overflow,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_and_length_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_length_overflow,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_and_length_oob,
+ all_pop_varying_len_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_from_writeonly,
+ all_pop_writeonly_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_to_readonly,
+ all_pop_readonly_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_ctl_rw_test_cases_halo[] = {
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running,
+ all_pop_volatile_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_started,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped_powered_down,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_loaded_fw,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_running_fw,
+ all_pop_volatile_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_started,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped_powered_down,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_loaded_fw,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_running_fw,
+ all_pop_volatile_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek,
+ all_pop_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_with_seek,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_truncated,
+ all_pop_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_truncated,
+ all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek,
+ all_pop_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_with_seek,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_truncated,
+ all_pop_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_truncated,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_length_overflow,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_and_length_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_length_overflow,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_and_length_oob,
+ all_pop_varying_len_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_from_writeonly,
+ all_pop_writeonly_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_to_readonly,
+ all_pop_readonly_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_halo = {
+ .name = "cs_dsp_ctl_rw_wmfwV3_halo",
+ .init = cs_dsp_ctl_rw_test_halo_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_halo,
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_ctl_rw_wmfwV1_adsp2_32bit",
+ .init = cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_adsp,
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_ctl_rw_wmfwV2_adsp2_32bit",
+ .init = cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_adsp,
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_ctl_rw_wmfwV1_adsp2_16bit",
+ .init = cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_adsp,
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_ctl_rw_wmfwV2_adsp2_16bit",
+ .init = cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_adsp,
+};
+
+kunit_test_suites(&cs_dsp_ctl_rw_test_halo,
+ &cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1,
+ &cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2,
+ &cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1,
+ &cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c
new file mode 100644
index 000000000000..9e997c4ee2d6
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c
@@ -0,0 +1,2211 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+//
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+/*
+ * Test method is:
+ *
+ * 1) Create a mock regmap in cache-only mode so that all writes will be cached.
+ * 2) Create dummy wmfw file.
+ * 3) Call cs_dsp_power_up() with the bin file.
+ * 4) Readback the cached value of registers that should have been written and
+ * check they have the correct value.
+ * 5) All the registers that are expected to have been written are dropped from
+ * the cache. This should leave the cache clean.
+ * 6) If the cache is still dirty there have been unexpected writes.
+ */
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *)
+KUNIT_DEFINE_ACTION_WRAPPER(_vfree_wrapper, vfree, void *)
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *)
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_wmfw_test_param {
+ unsigned int num_blocks;
+ int mem_type;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_wmfw_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+};
+
+/*
+ * wmfw that writes the XM header.
+ * cs_dsp always reads this back from unpacked XM.
+ */
+static void wmfw_write_xm_header_unpacked(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ unsigned int reg_addr;
+ u8 *readback;
+
+ /* XM header payload was added to wmfw by test case init function */
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /* Read raw so endianness and register width don't matter */
+ readback = kunit_kzalloc(test, local->xm_header->blob_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ local->xm_header->blob_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Write one payload of length param->num_blocks */
+static void wmfw_write_one_payload(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int mem_offset_dsp_words = 0;
+ unsigned int payload_size_bytes;
+
+ payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+
+ /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */
+ do {
+ payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ } while (payload_size_bytes % 4);
+
+ payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Tests on XM must be after the XM header */
+ if (param->mem_type == WMFW_ADSP2_XM)
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Add a single payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ param->mem_type, mem_offset_dsp_words,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Write several smallest possible payloads for the given memory type */
+static void wmfw_write_multiple_oneblock_payloads(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int mem_offset_dsp_words = 0;
+ unsigned int payload_size_bytes, payload_size_dsp_words;
+ const unsigned int num_payloads = param->num_blocks;
+ int i;
+
+ /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */
+ payload_size_dsp_words = 0;
+ payload_size_bytes = 0;
+ do {
+ payload_size_dsp_words += cs_dsp_mock_reg_block_length_dsp_words(priv,
+ param->mem_type);
+ payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ } while (payload_size_bytes % 4);
+
+ payload_data = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+
+ readback = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ get_random_bytes(payload_data, num_payloads * payload_size_bytes);
+
+ /* Tests on XM must be after the XM header */
+ if (param->mem_type == WMFW_ADSP2_XM)
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / payload_size_bytes;
+
+ /* Add multiple payloads of one block each */
+ for (i = 0; i < num_payloads; ++i) {
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ param->mem_type,
+ mem_offset_dsp_words + (i * payload_size_dsp_words),
+ &payload_data[i * payload_size_bytes],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ num_payloads * payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, num_payloads * payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, num_payloads * payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write several smallest possible payloads of the given memory type
+ * in reverse address order
+ */
+static void wmfw_write_multiple_oneblock_payloads_reverse(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int mem_offset_dsp_words = 0;
+ unsigned int payload_size_bytes, payload_size_dsp_words;
+ const unsigned int num_payloads = param->num_blocks;
+ int i;
+
+ /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */
+ payload_size_dsp_words = 0;
+ payload_size_bytes = 0;
+ do {
+ payload_size_dsp_words += cs_dsp_mock_reg_block_length_dsp_words(priv,
+ param->mem_type);
+ payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ } while (payload_size_bytes % 4);
+
+ payload_data = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+
+ readback = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ get_random_bytes(payload_data, num_payloads * payload_size_bytes);
+
+ /* Tests on XM must be after the XM header */
+ if (param->mem_type == WMFW_ADSP2_XM)
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / payload_size_bytes;
+
+ /* Add multiple payloads of one block each */
+ for (i = num_payloads - 1; i >= 0; --i) {
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ param->mem_type,
+ mem_offset_dsp_words + (i * payload_size_dsp_words),
+ &payload_data[i * payload_size_bytes],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ num_payloads * payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, num_payloads * payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, num_payloads * payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write multiple payloads of length param->num_blocks.
+ * The payloads are not in address order and collectively do not patch
+ * a contiguous block of memory.
+ */
+static void wmfw_write_multiple_payloads_sparse_unordered(struct kunit *test)
+{
+ static const unsigned int random_offsets[] = {
+ 11, 69, 59, 61, 32, 75, 4, 38, 70, 13, 79, 47, 46, 53, 18, 44,
+ 54, 35, 51, 21, 26, 45, 27, 41, 66, 2, 17, 56, 40, 9, 8, 20,
+ 29, 19, 63, 42, 12, 16, 43, 3, 5, 55, 52, 22
+ };
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int mem_offset_dsp_words = 0;
+ unsigned int payload_size_bytes, payload_size_dsp_words;
+ const int num_payloads = ARRAY_SIZE(random_offsets);
+ int i;
+
+ payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ payload_size_dsp_words = param->num_blocks *
+ cs_dsp_mock_reg_block_length_dsp_words(priv, param->mem_type);
+
+ /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */
+ do {
+ payload_size_dsp_words += cs_dsp_mock_reg_block_length_dsp_words(priv,
+ param->mem_type);
+ payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ } while (payload_size_bytes % 4);
+
+ payload_data = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Tests on XM must be after the XM header */
+ if (param->mem_type == WMFW_ADSP2_XM)
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / payload_size_bytes;
+
+ /* Add multiple payloads of one block each at "random" locations */
+ for (i = 0; i < num_payloads; ++i) {
+ unsigned int offset = random_offsets[i] * payload_size_dsp_words;
+
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ param->mem_type,
+ mem_offset_dsp_words + offset,
+ &payload_data[i * payload_size_bytes],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ for (i = 0; i < num_payloads; ++i) {
+ unsigned int offset_num_regs = (random_offsets[i] * payload_size_bytes) /
+ regmap_get_val_bytes(priv->dsp->regmap);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ &readback[i * payload_size_bytes],
+ payload_size_bytes),
+ 0);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ }
+
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Write the whole of PM in a single unpacked payload */
+static void wmfw_write_all_unpacked_pm(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int payload_size_bytes;
+
+ payload_size_bytes = cs_dsp_mock_size_of_region(priv->dsp, WMFW_ADSP2_PM);
+ payload_data = vmalloc(payload_size_bytes);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ kunit_add_action_or_reset(priv->test, _vfree_wrapper, payload_data);
+
+ readback = vmalloc(payload_size_bytes);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+ kunit_add_action_or_reset(priv->test, _vfree_wrapper, readback);
+ memset(readback, 0, payload_size_bytes);
+
+ /* Add a single PM payload */
+ get_random_bytes(payload_data, payload_size_bytes);
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_PM, 0,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_PM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Write the whole of PM in a single packed payload */
+static void wmfw_write_all_packed_pm(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int payload_size_bytes;
+
+ payload_size_bytes = cs_dsp_mock_size_of_region(priv->dsp, WMFW_HALO_PM_PACKED);
+ payload_data = vmalloc(payload_size_bytes);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ kunit_add_action_or_reset(priv->test, _vfree_wrapper, payload_data);
+
+ readback = vmalloc(payload_size_bytes);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+ kunit_add_action_or_reset(priv->test, _vfree_wrapper, readback);
+ memset(readback, 0, payload_size_bytes);
+
+ /* Add a single PM payload */
+ get_random_bytes(payload_data, payload_size_bytes);
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_HALO_PM_PACKED, 0,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_HALO_PM_PACKED);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write a series of payloads to various unpacked memory regions.
+ * The payloads are of various lengths and offsets, driven by the
+ * payload_defs table. The offset and length are both given as a
+ * number of minimum-sized register blocks to keep the maths simpler.
+ * (Where a minimum-sized register block is the smallest number of
+ * registers that contain a whole number of DSP words.)
+ */
+static void wmfw_write_multiple_unpacked_mem(struct kunit *test)
+{
+ static const struct {
+ int mem_type;
+ unsigned int offset_num_blocks;
+ unsigned int num_blocks;
+ } payload_defs[] = {
+ { WMFW_ADSP2_PM, 11, 60 },
+ { WMFW_ADSP2_ZM, 69, 8 },
+ { WMFW_ADSP2_YM, 32, 74 },
+ { WMFW_ADSP2_XM, 70, 38 },
+ { WMFW_ADSP2_PM, 84, 48 },
+ { WMFW_ADSP2_XM, 46, 18 },
+ { WMFW_ADSP2_PM, 0, 8 },
+ { WMFW_ADSP2_YM, 0, 30 },
+ { WMFW_ADSP2_PM, 160, 50 },
+ { WMFW_ADSP2_ZM, 21, 26 },
+ };
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int payload_size_bytes, offset_num_dsp_words;
+ unsigned int reg_addr, offset_bytes, offset_num_regs;
+ void **payload_data;
+ void *readback;
+ int i, ret;
+
+ payload_data = kunit_kcalloc(test, ARRAY_SIZE(payload_defs), sizeof(*payload_data),
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+
+ for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) {
+ payload_size_bytes = payload_defs[i].num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv,
+ payload_defs[i].mem_type);
+
+ payload_data[i] = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data[i]);
+ get_random_bytes(payload_data[i], payload_size_bytes);
+
+ offset_num_dsp_words = payload_defs[i].offset_num_blocks *
+ cs_dsp_mock_reg_block_length_dsp_words(priv,
+ payload_defs[i].mem_type);
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ payload_defs[i].mem_type,
+ offset_num_dsp_words,
+ payload_data[i],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) {
+ payload_size_bytes = payload_defs[i].num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv,
+ payload_defs[i].mem_type);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ offset_bytes = payload_defs[i].offset_num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, payload_defs[i].mem_type);
+ offset_num_regs = offset_bytes / regmap_get_val_bytes(priv->dsp->regmap);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, payload_defs[i].mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ ret = regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes);
+ KUNIT_EXPECT_EQ_MSG(test, ret, 0, "%s @%u num:%u\n",
+ cs_dsp_mem_region_name(payload_defs[i].mem_type),
+ payload_defs[i].offset_num_blocks, payload_defs[i].num_blocks);
+ KUNIT_EXPECT_MEMEQ_MSG(test, readback, payload_data[i], payload_size_bytes,
+ "%s @%u num:%u\n",
+ cs_dsp_mem_region_name(payload_defs[i].mem_type),
+ payload_defs[i].offset_num_blocks,
+ payload_defs[i].num_blocks);
+
+ kunit_kfree(test, readback);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write a series of payloads to various packed and unpacked memory regions.
+ * The payloads are of various lengths and offsets, driven by the
+ * payload_defs table. The offset and length are both given as a
+ * number of minimum-sized register blocks to keep the maths simpler.
+ * (Where a minimum-sized register block is the smallest number of
+ * registers that contain a whole number of DSP words.)
+ */
+static void wmfw_write_multiple_packed_unpacked_mem(struct kunit *test)
+{
+ static const struct {
+ int mem_type;
+ unsigned int offset_num_blocks;
+ unsigned int num_blocks;
+ } payload_defs[] = {
+ { WMFW_HALO_PM_PACKED, 11, 60 },
+ { WMFW_ADSP2_YM, 69, 8 },
+ { WMFW_HALO_YM_PACKED, 32, 74 },
+ { WMFW_HALO_XM_PACKED, 70, 38 },
+ { WMFW_HALO_PM_PACKED, 84, 48 },
+ { WMFW_HALO_XM_PACKED, 46, 18 },
+ { WMFW_HALO_PM_PACKED, 0, 8 },
+ { WMFW_HALO_YM_PACKED, 0, 30 },
+ { WMFW_HALO_PM_PACKED, 160, 50 },
+ { WMFW_ADSP2_XM, 21, 26 },
+ };
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int payload_size_bytes, offset_num_dsp_words;
+ unsigned int reg_addr, offset_bytes, offset_num_regs;
+ void **payload_data;
+ void *readback;
+ int i, ret;
+
+ payload_data = kunit_kcalloc(test, ARRAY_SIZE(payload_defs), sizeof(*payload_data),
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+
+ for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) {
+ payload_size_bytes = payload_defs[i].num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv,
+ payload_defs[i].mem_type);
+
+ payload_data[i] = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data[i]);
+ get_random_bytes(payload_data[i], payload_size_bytes);
+
+ offset_num_dsp_words = payload_defs[i].offset_num_blocks *
+ cs_dsp_mock_reg_block_length_dsp_words(priv,
+ payload_defs[i].mem_type);
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ payload_defs[i].mem_type,
+ offset_num_dsp_words,
+ payload_data[i],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) {
+ payload_size_bytes = payload_defs[i].num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv,
+ payload_defs[i].mem_type);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ offset_bytes = payload_defs[i].offset_num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, payload_defs[i].mem_type);
+ offset_num_regs = offset_bytes / regmap_get_val_bytes(priv->dsp->regmap);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, payload_defs[i].mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ ret = regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes);
+ KUNIT_EXPECT_EQ_MSG(test, ret, 0, "%s @%u num:%u\n",
+ cs_dsp_mem_region_name(payload_defs[i].mem_type),
+ payload_defs[i].offset_num_blocks,
+ payload_defs[i].num_blocks);
+ KUNIT_EXPECT_MEMEQ_MSG(test, readback, payload_data[i], payload_size_bytes,
+ "%s @%u num:%u\n",
+ cs_dsp_mem_region_name(payload_defs[i].mem_type),
+ payload_defs[i].offset_num_blocks,
+ payload_defs[i].num_blocks);
+
+ kunit_kfree(test, readback);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is one word longer than a packed block multiple,
+ * using one packed payload followed by one unpacked word.
+ */
+static void wmfw_write_packed_1_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[1];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add payload of one unpacked word to DSP memory right after
+ * the packed payload words.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked word was written correctly and drop
+ * it from the regmap cache. The unpacked payload is offset within
+ * unpacked register space by the number of DSP words that were
+ * written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is two words longer than a packed block multiple,
+ * using one packed payload followed by one payload of two unpacked words.
+ */
+static void wmfw_write_packed_2_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[2];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add payload of two unpacked words to DSP memory right after
+ * the packed payload words.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache. The unpacked payload is offset
+ * within unpacked register space by the number of DSP words
+ * that were written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is three words longer than a packed block multiple,
+ * using one packed payload followed by one payload of three unpacked words.
+ */
+static void wmfw_write_packed_3_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[3];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add payload of three unpacked words to DSP memory right after
+ * the packed payload words.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache. The unpacked payload is offset
+ * within unpacked register space by the number of DSP words
+ * that were written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is two words longer than a packed block multiple,
+ * using one packed payload followed by two payloads of one unpacked word each.
+ */
+static void wmfw_write_packed_2_single_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[2];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add two unpacked words to DSP memory right after the packed
+ * payload words. Each unpacked word in its own payload.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ &unpacked_payload_data[0],
+ sizeof(unpacked_payload_data[0]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words + 1,
+ &unpacked_payload_data[1],
+ sizeof(unpacked_payload_data[1]));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache. The unpacked words are offset
+ * within unpacked register space by the number of DSP words
+ * that were written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is three words longer than a packed block multiple,
+ * using one packed payload followed by three payloads of one unpacked word each.
+ */
+static void wmfw_write_packed_3_single_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[3];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add three unpacked words to DSP memory right after the packed
+ * payload words. Each unpacked word in its own payload.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ &unpacked_payload_data[0],
+ sizeof(unpacked_payload_data[0]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words + 1,
+ &unpacked_payload_data[1],
+ sizeof(unpacked_payload_data[1]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words + 2,
+ &unpacked_payload_data[2],
+ sizeof(unpacked_payload_data[2]));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache. The unpacked words are offset
+ * within unpacked register space by the number of DSP words
+ * that were written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is one word longer than a packed block multiple,
+ * and does not start on a packed alignment. Use one unpacked word
+ * followed by a packed payload.
+ */
+static void wmfw_write_packed_1_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[1];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for an unaligned word before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 1;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /* Add a single unpacked word right before the first word of packed data */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 1,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Add payload of packed data to the DSP memory after the unpacked word. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked word was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 1) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is two words longer than a packed block multiple,
+ * and does not start on a packed alignment. Use one payload of two unpacked
+ * words followed by a packed payload.
+ */
+static void wmfw_write_packed_2_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[2];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for two unaligned words before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 2;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /*
+ * Add two unpacked words as a single payload right before the
+ * first word of packed data
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 2,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Add payload of packed data to the DSP memory after the unpacked words. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 2) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is three words longer than a packed block multiple,
+ * and does not start on a packed alignment. Use one payload of three unpacked
+ * words followed by a packed payload.
+ */
+static void wmfw_write_packed_3_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[3];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for three unaligned words before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 3;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /*
+ * Add three unpacked words as a single payload right before the
+ * first word of packed data
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 3,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Add payload of packed data to the DSP memory after the unpacked words. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 3) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is two words longer than a packed block multiple,
+ * and does not start on a packed alignment. Use two payloads of one unpacked
+ * word each, followed by a packed payload.
+ */
+static void wmfw_write_packed_2_single_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[2];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for two unaligned words before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 2;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /*
+ * Add two unpacked words as two payloads each containing a single
+ * unpacked word.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 2,
+ &unpacked_payload_data[0],
+ sizeof(unpacked_payload_data[0]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 1,
+ &unpacked_payload_data[1],
+ sizeof(unpacked_payload_data[1]));
+
+ /* Add payload of packed data to the DSP memory after the unpacked words. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 2) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is three words longer than a packed block multiple,
+ * and does not start on a packed alignment. Use three payloads of one unpacked
+ * word each, followed by a packed payload.
+ */
+static void wmfw_write_packed_3_single_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[3];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for two unaligned words before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 3;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /*
+ * Add three unpacked words as three payloads each containing a single
+ * unpacked word.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 3,
+ &unpacked_payload_data[0],
+ sizeof(unpacked_payload_data[0]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 2,
+ &unpacked_payload_data[1],
+ sizeof(unpacked_payload_data[1]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 1,
+ &unpacked_payload_data[2],
+ sizeof(unpacked_payload_data[2]));
+
+ /* Add payload of packed data to the DSP memory after the unpacked words. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 3) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Load a wmfw containing multiple info blocks */
+static void wmfw_load_with_info(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ char *infobuf;
+ const unsigned int payload_size_bytes = 48;
+ int ret;
+
+ payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Add a couple of info blocks at the start of the wmfw */
+ cs_dsp_mock_wmfw_add_info(local->wmfw_builder, "This is a timestamp");
+ cs_dsp_mock_wmfw_add_info(local->wmfw_builder, "This is some more info");
+
+ /* Add a single payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_YM, 0,
+ payload_data, payload_size_bytes);
+
+ /* Add a bigger info block then another small one*/
+ infobuf = kunit_kzalloc(test, 512, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, infobuf);
+
+ for (; strlcat(infobuf, "Waffle{Blah}\n", 512) < 512;)
+ ;
+
+ cs_dsp_mock_wmfw_add_info(local->wmfw_builder, infobuf);
+ cs_dsp_mock_wmfw_add_info(local->wmfw_builder, "Another block of info");
+
+ /* Add another payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_YM, 64,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ ret = cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc");
+ KUNIT_EXPECT_EQ_MSG(test, ret, 0, "cs_dsp_power_up failed: %d\n", ret);
+
+ /* Check first payload was written */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Check second payload was written */
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * 64;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+}
+
+static int cs_dsp_wmfw_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data payload.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_wmfw_test_mock_algs,
+ ARRAY_SIZE(cs_dsp_wmfw_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, priv->local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Add dummy XM header payload to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_wmfw_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_wmfw_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_wmfw_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_wmfw_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_wmfw_test_adsp2_32bit_wmfw0_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_32bit_init(test, 0);
+}
+
+static int cs_dsp_wmfw_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_wmfw_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_wmfw_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_wmfw_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_wmfw_test_adsp2_16bit_wmfw0_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_16bit_init(test, 0);
+}
+
+static int cs_dsp_wmfw_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_wmfw_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_16bit_init(test, 2);
+}
+
+static void cs_dsp_mem_param_desc(const struct cs_dsp_wmfw_test_param *param, char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s num_blocks:%u",
+ cs_dsp_mem_region_name(param->mem_type),
+ param->num_blocks);
+}
+
+static const struct cs_dsp_wmfw_test_param adsp2_all_num_blocks_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 16 },
+};
+
+KUNIT_ARRAY_PARAM(adsp2_all_num_blocks,
+ adsp2_all_num_blocks_param_cases,
+ cs_dsp_mem_param_desc);
+
+static const struct cs_dsp_wmfw_test_param halo_all_num_blocks_param_cases[] = {
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 16 },
+
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 16 },
+
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 16 },
+};
+
+KUNIT_ARRAY_PARAM(halo_all_num_blocks,
+ halo_all_num_blocks_param_cases,
+ cs_dsp_mem_param_desc);
+
+static const struct cs_dsp_wmfw_test_param packed_xy_num_blocks_param_cases[] = {
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 16 },
+
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 16 },
+};
+
+KUNIT_ARRAY_PARAM(packed_xy_num_blocks,
+ packed_xy_num_blocks_param_cases,
+ cs_dsp_mem_param_desc);
+
+static struct kunit_case cs_dsp_wmfw_test_cases_halo[] = {
+ KUNIT_CASE(wmfw_write_xm_header_unpacked),
+
+ KUNIT_CASE_PARAM(wmfw_write_one_payload,
+ halo_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads,
+ halo_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads_reverse,
+ halo_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_payloads_sparse_unordered,
+ halo_all_num_blocks_gen_params),
+
+ KUNIT_CASE(wmfw_write_all_packed_pm),
+ KUNIT_CASE(wmfw_write_multiple_packed_unpacked_mem),
+
+ KUNIT_CASE_PARAM(wmfw_write_packed_1_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_2_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_3_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_2_single_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_3_single_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_1_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_2_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_3_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_2_single_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_3_single_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+
+ KUNIT_CASE(wmfw_load_with_info),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_wmfw_test_cases_adsp2[] = {
+ KUNIT_CASE(wmfw_write_xm_header_unpacked),
+ KUNIT_CASE_PARAM(wmfw_write_one_payload,
+ adsp2_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads,
+ adsp2_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads_reverse,
+ adsp2_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_payloads_sparse_unordered,
+ adsp2_all_num_blocks_gen_params),
+
+ KUNIT_CASE(wmfw_write_all_unpacked_pm),
+ KUNIT_CASE(wmfw_write_multiple_unpacked_mem),
+
+ KUNIT_CASE(wmfw_load_with_info),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_halo = {
+ .name = "cs_dsp_wmfwV3_halo",
+ .init = cs_dsp_wmfw_test_halo_init,
+ .test_cases = cs_dsp_wmfw_test_cases_halo,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_32bit_wmfw0 = {
+ .name = "cs_dsp_wmfwV0_adsp2_32bit",
+ .init = cs_dsp_wmfw_test_adsp2_32bit_wmfw0_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_wmfwV1_adsp2_32bit",
+ .init = cs_dsp_wmfw_test_adsp2_32bit_wmfw1_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_wmfwV2_adsp2_32bit",
+ .init = cs_dsp_wmfw_test_adsp2_32bit_wmfw2_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_16bit_wmfw0 = {
+ .name = "cs_dsp_wmfwV0_adsp2_16bit",
+ .init = cs_dsp_wmfw_test_adsp2_16bit_wmfw0_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_wmfwV1_adsp2_16bit",
+ .init = cs_dsp_wmfw_test_adsp2_16bit_wmfw1_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_wmfwV2_adsp2_16bit",
+ .init = cs_dsp_wmfw_test_adsp2_16bit_wmfw2_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+kunit_test_suites(&cs_dsp_wmfw_test_halo,
+ &cs_dsp_wmfw_test_adsp2_32bit_wmfw0,
+ &cs_dsp_wmfw_test_adsp2_32bit_wmfw1,
+ &cs_dsp_wmfw_test_adsp2_32bit_wmfw2,
+ &cs_dsp_wmfw_test_adsp2_16bit_wmfw0,
+ &cs_dsp_wmfw_test_adsp2_16bit_wmfw1,
+ &cs_dsp_wmfw_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c
new file mode 100644
index 000000000000..c309843261d7
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c
@@ -0,0 +1,1347 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+//
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_wmfw_test_param {
+ int block_type;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_wmfw_err_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_VOLATILE,
+ .length_bytes = 4,
+};
+
+/* Load a wmfw containing unknown blocks. They should be skipped. */
+static void wmfw_load_with_unknown_blocks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ u8 random_data[8];
+ const unsigned int payload_size_bytes = 64;
+
+ /* Add dummy XM header payload to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Add some unknown blocks at the start of the wmfw */
+ get_random_bytes(random_data, sizeof(random_data));
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, 0xf5, 0,
+ random_data, sizeof(random_data));
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, 0xc0, 0, random_data,
+ sizeof(random_data));
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, 0x33, 0, NULL, 0);
+
+ /* Add a single payload to be written to DSP memory */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_YM, 0,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /* Check that the payload was written to memory */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+}
+
+/* Load a wmfw that doesn't have a valid magic marker. */
+static void wmfw_err_wrong_magic(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ memcpy((void *)wmfw->data, "WMDR", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memcpy((void *)wmfw->data, "xMFW", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memcpy((void *)wmfw->data, "WxFW", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memcpy((void *)wmfw->data, "WMxW", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memcpy((void *)wmfw->data, "WMFx", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memset((void *)wmfw->data, 0, 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+}
+
+/* Load a wmfw that is too short for a valid header. */
+static void wmfw_err_too_short_for_header(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ do {
+ wmfw->size--;
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ } while (wmfw->size > 0);
+}
+
+/* Header length field isn't a valid header length. */
+static void wmfw_err_bad_header_length(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ unsigned int real_len, len;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ real_len = le32_to_cpu(header->len);
+
+ for (len = 0; len < real_len; len++) {
+ header->len = cpu_to_le32(len);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+
+ for (len = real_len + 1; len < real_len + 7; len++) {
+ header->len = cpu_to_le32(len);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+
+ header->len = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ header->len = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ header->len = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* Wrong core type in header. */
+static void wmfw_err_bad_core_type(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+
+ header->core = 0;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ header->core = 1;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ header->core = priv->dsp->type + 1;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ header->core = 0xff;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+}
+
+/* File too short to contain a full block header */
+static void wmfw_too_short_for_block_header(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int header_length;
+ u32 dummy_payload = 0;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ header_length = wmfw->size;
+ kunit_kfree(test, wmfw);
+
+ /* Add the block. A block must have at least 4 bytes of payload */
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, param->block_type, 0,
+ &dummy_payload, sizeof(dummy_payload));
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_GT(test, wmfw->size, header_length);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ for (wmfw->size--; wmfw->size > header_length; wmfw->size--) {
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+}
+
+/* File too short to contain the block payload */
+static void wmfw_too_short_for_block_payload(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ static const u8 payload[256] = { };
+ int i;
+
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, param->block_type, 0,
+ payload, sizeof(payload));
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ for (i = 0; i < sizeof(payload); i++) {
+ wmfw->size--;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+}
+
+/* Block payload length is a garbage value */
+static void wmfw_block_payload_len_garbage(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ u32 payload = 0;
+
+
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, param->block_type, 0,
+ &payload, sizeof(payload));
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+
+ /* Sanity check that we're looking at the correct part of the wmfw */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(region->offset) >> 24, param->block_type);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(region->len), sizeof(payload));
+
+ region->len = cpu_to_le32(0x8000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ region->len = cpu_to_le32(0xffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ region->len = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ region->len = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ region->len = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* File too short to contain an algorithm header */
+static void wmfw_too_short_for_alg_header(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int header_length;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ header_length = wmfw->size;
+ kunit_kfree(test, wmfw);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ NULL, NULL);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_GT(test, wmfw->size, header_length);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ for (wmfw->size--; wmfw->size > header_length; wmfw->size--) {
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+}
+
+/* V1 algorithm name does not have NUL terminator */
+static void wmfw_v1_alg_name_unterminated(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ struct wmfw_adsp_alg_data *alg_data;
+ struct cs_dsp_coeff_ctl *ctl;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (struct wmfw_adsp_alg_data *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->id), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Write a string to the alg name that overflows the array */
+ memset(alg_data->descr, 0, sizeof(alg_data->descr));
+ memset(alg_data->name, 'A', sizeof(alg_data->name));
+ memset(alg_data->descr, 'A', sizeof(alg_data->descr) - 1);
+
+ /*
+ * Sanity-check that a strlen would overflow alg_data->name.
+ * FORTIFY_STRING obstructs testing what strlen() would actually
+ * return, so instead verify that a strnlen() returns
+ * sizeof(alg_data->name[]), therefore it doesn't have a NUL.
+ */
+ KUNIT_ASSERT_EQ(test, strnlen(alg_data->name, sizeof(alg_data->name)),
+ sizeof(alg_data->name));
+
+ /*
+ * The alg name isn't stored, but cs_dsp parses the name field.
+ * It should load the file successfully and create the control.
+ * If FORTIFY_STRING is enabled it will detect a buffer overflow
+ * if cs_dsp string length walks past end of alg name array.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+}
+
+/* V2+ algorithm name exceeds length of containing block */
+static void wmfw_v2_alg_name_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", NULL);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /*
+ * Sanity check we're pointing at the alg header of
+ * [ alg_id ][name_len]abc
+ */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[1]), 3 | ('a' << 8) | ('b' << 16) | ('c' << 24));
+ KUNIT_ASSERT_EQ(test, *(u8 *)&alg_data[1], 3);
+
+ /* Set name string length longer than available space */
+ *(u8 *)&alg_data[1] = 4;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(u8 *)&alg_data[1] = 7;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(u8 *)&alg_data[1] = 0x80;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(u8 *)&alg_data[1] = 0xff;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ algorithm description exceeds length of containing block */
+static void wmfw_v2_alg_description_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /*
+ * Sanity check we're pointing at the alg header of
+ * [ alg_id ][name_len]abc[desc_len]de
+ */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[2]), 2 | ('d' << 16) | ('e' << 24));
+ KUNIT_ASSERT_EQ(test, le16_to_cpu(*(__le16 *)&alg_data[2]), 2);
+
+ /* Set name string length longer than available space */
+ *(__le16 *)&alg_data[2] = cpu_to_le16(4);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(7);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(0x80);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(0xff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(0x8000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(0xffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V1 coefficient count exceeds length of containing block */
+static void wmfw_v1_coeff_count_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ struct wmfw_adsp_alg_data *alg_data;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (struct wmfw_adsp_alg_data *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->id), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Add one to the coefficient count */
+ alg_data->ncoeff = cpu_to_le32(le32_to_cpu(alg_data->ncoeff) + 1);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Make the coefficient count garbage */
+ alg_data->ncoeff = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ alg_data->ncoeff = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ alg_data->ncoeff = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ coefficient count exceeds length of containing block */
+static void wmfw_v2_coeff_count_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *ncoeff;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ ncoeff = (__force __le32 *)&alg_data[3];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(*ncoeff), 1);
+
+ /* Add one to the coefficient count */
+ *ncoeff = cpu_to_le32(2);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Make the coefficient count garbage */
+ *ncoeff = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *ncoeff = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *ncoeff = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ coefficient block size exceeds length of containing block */
+static void wmfw_v2_coeff_block_size_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *coeff;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Sanity check we're pointing at the coeff block */
+ coeff = (__force __le32 *)&alg_data[4];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16);
+
+ /* Add one to the block size */
+ coeff[1] = cpu_to_le32(le32_to_cpu(coeff[1]) + 1);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Make the block size garbage */
+ coeff[1] = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ coeff[1] = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ coeff[1] = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V1 coeff name does not have NUL terminator */
+static void wmfw_v1_coeff_name_unterminated(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ struct wmfw_adsp_alg_data *alg_data;
+ struct wmfw_adsp_coeff_data *coeff;
+ struct cs_dsp_coeff_ctl *ctl;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (struct wmfw_adsp_alg_data *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->id), cs_dsp_wmfw_err_test_mock_algs[0].id);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->ncoeff), 1);
+
+ coeff = (void *)alg_data->data;
+
+ /* Write a string to the coeff name that overflows the array */
+ memset(coeff->descr, 0, sizeof(coeff->descr));
+ memset(coeff->name, 'A', sizeof(coeff->name));
+ memset(coeff->descr, 'A', sizeof(coeff->descr) - 1);
+
+ /*
+ * Sanity-check that a strlen would overflow coeff->name.
+ * FORTIFY_STRING obstructs testing what strlen() would actually
+ * return, so instead verify that a strnlen() returns
+ * sizeof(coeff->name[]), therefore it doesn't have a NUL.
+ */
+ KUNIT_ASSERT_EQ(test, strnlen(coeff->name, sizeof(coeff->name)),
+ sizeof(coeff->name));
+
+ /*
+ * V1 controls do not have names, but cs_dsp parses the name
+ * field. It should load the file successfully and create the
+ * control.
+ * If FORTIFY_STRING is enabled it will detect a buffer overflow
+ * if cs_dsp string length walks past end of coeff name array.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+}
+
+/* V2+ coefficient shortname exceeds length of coeff block */
+static void wmfw_v2_coeff_shortname_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *coeff;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Sanity check we're pointing at the coeff block */
+ coeff = (__force __le32 *)&alg_data[4];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16);
+
+ /* Add one to the shortname length */
+ coeff[2] = cpu_to_le32(le32_to_cpu(coeff[2]) + 1);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Maximum shortname length */
+ coeff[2] = cpu_to_le32(255);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ coefficient fullname exceeds length of coeff block */
+static void wmfw_v2_coeff_fullname_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *coeff, *fullname;
+ size_t shortlen;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Sanity check we're pointing at the coeff block */
+ coeff = (__force __le32 *)&alg_data[4];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16);
+
+ /* Fullname follows the shortname rounded up to a __le32 boundary */
+ shortlen = round_up(le32_to_cpu(coeff[2]) & 0xff, sizeof(__le32));
+ fullname = &coeff[2] + (shortlen / sizeof(*coeff));
+
+ /* Fullname increases in blocks of __le32 so increase past the current __le32 */
+ fullname[0] = cpu_to_le32(round_up(le32_to_cpu(fullname[0]) + 1, sizeof(__le32)));
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Maximum fullname length */
+ fullname[0] = cpu_to_le32(255);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ coefficient description exceeds length of coeff block */
+static void wmfw_v2_coeff_description_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *coeff, *fullname, *description;
+ size_t namelen;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Sanity check we're pointing at the coeff block */
+ coeff = (__force __le32 *)&alg_data[4];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16);
+
+ /* Description follows the shortname and fullname rounded up to __le32 boundaries */
+ namelen = round_up(le32_to_cpu(coeff[2]) & 0xff, sizeof(__le32));
+ fullname = &coeff[2] + (namelen / sizeof(*coeff));
+ namelen = round_up(le32_to_cpu(fullname[0]) & 0xff, sizeof(__le32));
+ description = fullname + (namelen / sizeof(*fullname));
+
+ /* Description increases in blocks of __le32 so increase past the current __le32 */
+ description[0] = cpu_to_le32(round_up(le32_to_cpu(fullname[0]) + 1, sizeof(__le32)));
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Maximum description length */
+ fullname[0] = cpu_to_le32(0xffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+static void cs_dsp_wmfw_err_test_exit(struct kunit *test)
+{
+ /*
+ * Testing error conditions can produce a lot of log output
+ * from cs_dsp error messages, so rate limit the test cases.
+ */
+ usleep_range(200, 500);
+}
+
+static int cs_dsp_wmfw_err_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm,
+ * so create a dummy one and pre-populate XM so the wmfw doesn't
+ * have to contain an XM blob.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_wmfw_err_test_mock_algs,
+ ARRAY_SIZE(cs_dsp_wmfw_err_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+ cs_dsp_mock_xm_header_write_to_regmap(local->xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_wmfw_err_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_wmfw_err_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_wmfw_err_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_32bit_init(test, 0);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_wmfw_err_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_16bit_init(test, 0);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_16bit_init(test, 2);
+}
+
+static void cs_dsp_wmfw_err_block_types_desc(const struct cs_dsp_wmfw_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "block_type:%#x", param->block_type);
+}
+
+static const struct cs_dsp_wmfw_test_param wmfw_valid_block_types_adsp2_cases[] = {
+ { .block_type = WMFW_INFO_TEXT },
+ { .block_type = WMFW_ADSP2_PM },
+ { .block_type = WMFW_ADSP2_YM },
+};
+
+KUNIT_ARRAY_PARAM(wmfw_valid_block_types_adsp2,
+ wmfw_valid_block_types_adsp2_cases,
+ cs_dsp_wmfw_err_block_types_desc);
+
+static const struct cs_dsp_wmfw_test_param wmfw_valid_block_types_halo_cases[] = {
+ { .block_type = WMFW_INFO_TEXT },
+ { .block_type = WMFW_HALO_PM_PACKED },
+ { .block_type = WMFW_ADSP2_YM },
+};
+
+KUNIT_ARRAY_PARAM(wmfw_valid_block_types_halo,
+ wmfw_valid_block_types_halo_cases,
+ cs_dsp_wmfw_err_block_types_desc);
+
+static const struct cs_dsp_wmfw_test_param wmfw_invalid_block_types_cases[] = {
+ { .block_type = 0x33 },
+ { .block_type = 0xf5 },
+ { .block_type = 0xc0 },
+};
+
+KUNIT_ARRAY_PARAM(wmfw_invalid_block_types,
+ wmfw_invalid_block_types_cases,
+ cs_dsp_wmfw_err_block_types_desc);
+
+static struct kunit_case cs_dsp_wmfw_err_test_cases_v0[] = {
+ KUNIT_CASE(wmfw_load_with_unknown_blocks),
+ KUNIT_CASE(wmfw_err_wrong_magic),
+ KUNIT_CASE(wmfw_err_too_short_for_header),
+ KUNIT_CASE(wmfw_err_bad_header_length),
+ KUNIT_CASE(wmfw_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_wmfw_err_test_cases_v1[] = {
+ KUNIT_CASE(wmfw_load_with_unknown_blocks),
+ KUNIT_CASE(wmfw_err_wrong_magic),
+ KUNIT_CASE(wmfw_err_too_short_for_header),
+ KUNIT_CASE(wmfw_err_bad_header_length),
+ KUNIT_CASE(wmfw_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+
+ KUNIT_CASE(wmfw_too_short_for_alg_header),
+ KUNIT_CASE(wmfw_v1_alg_name_unterminated),
+ KUNIT_CASE(wmfw_v1_coeff_count_exceeds_block),
+ KUNIT_CASE(wmfw_v1_coeff_name_unterminated),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_wmfw_err_test_cases_v2[] = {
+ KUNIT_CASE(wmfw_load_with_unknown_blocks),
+ KUNIT_CASE(wmfw_err_wrong_magic),
+ KUNIT_CASE(wmfw_err_too_short_for_header),
+ KUNIT_CASE(wmfw_err_bad_header_length),
+ KUNIT_CASE(wmfw_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+
+ KUNIT_CASE(wmfw_too_short_for_alg_header),
+ KUNIT_CASE(wmfw_v2_alg_name_exceeds_block),
+ KUNIT_CASE(wmfw_v2_alg_description_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_count_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_block_size_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_shortname_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_fullname_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_description_exceeds_block),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_wmfw_err_test_cases_v3[] = {
+ KUNIT_CASE(wmfw_load_with_unknown_blocks),
+ KUNIT_CASE(wmfw_err_wrong_magic),
+ KUNIT_CASE(wmfw_err_too_short_for_header),
+ KUNIT_CASE(wmfw_err_bad_header_length),
+ KUNIT_CASE(wmfw_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_halo_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_halo_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_halo_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+
+ KUNIT_CASE(wmfw_too_short_for_alg_header),
+ KUNIT_CASE(wmfw_v2_alg_name_exceeds_block),
+ KUNIT_CASE(wmfw_v2_alg_description_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_count_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_block_size_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_shortname_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_fullname_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_description_exceeds_block),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_halo = {
+ .name = "cs_dsp_wmfwV3_err_halo",
+ .init = cs_dsp_wmfw_err_test_halo_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v3,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0 = {
+ .name = "cs_dsp_wmfwV0_err_adsp2_32bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v0,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_wmfwV1_err_adsp2_32bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_wmfwV2_err_adsp2_32bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0 = {
+ .name = "cs_dsp_wmfwV0_err_adsp2_16bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v0,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_wmfwV1_err_adsp2_16bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_wmfwV2_err_adsp2_16bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v2,
+};
+
+kunit_test_suites(&cs_dsp_wmfw_err_test_halo,
+ &cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0,
+ &cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1,
+ &cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2,
+ &cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0,
+ &cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1,
+ &cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_tests.c b/drivers/firmware/cirrus/test/cs_dsp_tests.c
new file mode 100644
index 000000000000..7b829a03ca52
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_tests.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Utility module for cs_dsp KUnit testing.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("KUnit tests for Cirrus Logic DSP driver");
+MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("FW_CS_DSP");
+MODULE_IMPORT_NS("FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c
index 937be269fee8..13ea141c0def 100644
--- a/drivers/firmware/efi/dev-path-parser.c
+++ b/drivers/firmware/efi/dev-path-parser.c
@@ -47,9 +47,9 @@ static long __init parse_acpi_path(const struct efi_dev_path *node,
return 0;
}
-static int __init match_pci_dev(struct device *dev, void *data)
+static int __init match_pci_dev(struct device *dev, const void *data)
{
- unsigned int devfn = *(unsigned int *)data;
+ unsigned int devfn = *(const unsigned int *)data;
return dev_is_pci(dev) && to_pci_dev(dev)->devfn == devfn;
}
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 60c64b81d2c3..8296bf985d1d 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -148,9 +148,6 @@ static ssize_t systab_show(struct kobject *kobj,
if (efi.smbios != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
- if (IS_ENABLED(CONFIG_X86))
- str = efi_systab_show_arch(str);
-
return str - buf;
}
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index ed4e8ddbe76a..1141cd06011f 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -11,7 +11,7 @@ cflags-y := $(KBUILD_CFLAGS)
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
-cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \
+cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -std=gnu11 \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse -fshort-wchar \
-Wno-pointer-sign \
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index c0c81ca4237e..fd6dc790c5a8 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -47,9 +47,10 @@ bool __pure __efi_soft_reserve_enabled(void)
*/
efi_status_t efi_parse_options(char const *cmdline)
{
- size_t len;
+ char *buf __free(efi_pool) = NULL;
efi_status_t status;
- char *str, *buf;
+ size_t len;
+ char *str;
if (!cmdline)
return EFI_SUCCESS;
@@ -102,7 +103,6 @@ efi_status_t efi_parse_options(char const *cmdline)
efi_parse_option_graphics(val + strlen("efifb:"));
}
}
- efi_bs_call(free_pool, buf);
return EFI_SUCCESS;
}
@@ -250,7 +250,7 @@ static efi_status_t efi_measure_tagged_event(unsigned long load_addr,
u64, const union efistub_event *);
struct { u32 hash_log_extend_event; } mixed_mode;
} method;
- struct efistub_measured_event *evt;
+ struct efistub_measured_event *evt __free(efi_pool) = NULL;
int size = struct_size(evt, tagged_event.tagged_event_data,
events[event].event_data_len);
efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
@@ -312,7 +312,6 @@ static efi_status_t efi_measure_tagged_event(unsigned long load_addr,
status = efi_fn_call(&method, hash_log_extend_event, protocol, 0,
load_addr, load_size, &evt->event_data);
- efi_bs_call(free_pool, evt);
if (status == EFI_SUCCESS)
return EFI_SUCCESS;
diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
index 382b54f40603..874f63b4a383 100644
--- a/drivers/firmware/efi/libstub/efi-stub.c
+++ b/drivers/firmware/efi/libstub/efi-stub.c
@@ -10,6 +10,7 @@
*/
#include <linux/efi.h>
+#include <linux/screen_info.h>
#include <asm/efi.h>
#include "efistub.h"
@@ -53,25 +54,16 @@ void __weak free_screen_info(struct screen_info *si)
static struct screen_info *setup_graphics(void)
{
- efi_guid_t gop_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
- efi_status_t status;
- unsigned long size;
- void **gop_handle = NULL;
- struct screen_info *si = NULL;
+ struct screen_info *si, tmp = {};
- size = 0;
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &gop_proto, NULL, &size, gop_handle);
- if (status == EFI_BUFFER_TOO_SMALL) {
- si = alloc_screen_info();
- if (!si)
- return NULL;
- status = efi_setup_gop(si, &gop_proto, size);
- if (status != EFI_SUCCESS) {
- free_screen_info(si);
- return NULL;
- }
- }
+ if (efi_setup_gop(&tmp) != EFI_SUCCESS)
+ return NULL;
+
+ si = alloc_screen_info();
+ if (!si)
+ return NULL;
+
+ *si = tmp;
return si;
}
@@ -112,8 +104,8 @@ static u32 get_supported_rt_services(void)
efi_status_t efi_handle_cmdline(efi_loaded_image_t *image, char **cmdline_ptr)
{
+ char *cmdline __free(efi_pool) = NULL;
efi_status_t status;
- char *cmdline;
/*
* Get the command line from EFI, using the LOADED_IMAGE
@@ -128,25 +120,24 @@ efi_status_t efi_handle_cmdline(efi_loaded_image_t *image, char **cmdline_ptr)
if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
status = efi_parse_options(cmdline);
- if (status != EFI_SUCCESS)
- goto fail_free_cmdline;
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to parse EFI load options\n");
+ return status;
+ }
}
if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
cmdline[0] == 0) {
status = efi_parse_options(CONFIG_CMDLINE);
- if (status != EFI_SUCCESS)
- goto fail_free_cmdline;
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to parse built-in command line\n");
+ return status;
+ }
}
- *cmdline_ptr = cmdline;
+ *cmdline_ptr = no_free_ptr(cmdline);
return EFI_SUCCESS;
-
-fail_free_cmdline:
- efi_err("Failed to parse options\n");
- efi_bs_call(free_pool, cmdline);
- return status;
}
efi_status_t efi_stub_common(efi_handle_t handle,
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 76e44c185f29..d96d4494070d 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -4,6 +4,7 @@
#define _DRIVERS_FIRMWARE_EFI_EFISTUB_H
#include <linux/compiler.h>
+#include <linux/cleanup.h>
#include <linux/efi.h>
#include <linux/kernel.h>
#include <linux/kern_levels.h>
@@ -122,11 +123,10 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
#define efi_get_handle_num(size) \
((size) / (efi_is_native() ? sizeof(efi_handle_t) : sizeof(u32)))
-#define for_each_efi_handle(handle, array, size, i) \
- for (i = 0; \
- i < efi_get_handle_num(size) && \
- ((handle = efi_get_handle_at((array), i)) || true); \
- i++)
+#define for_each_efi_handle(handle, array, num) \
+ for (int __i = 0; __i < (num) && \
+ ((handle = efi_get_handle_at((array), __i)) || true); \
+ __i++)
static inline
void efi_set_u64_split(u64 data, u32 *lo, u32 *hi)
@@ -171,7 +171,7 @@ void efi_set_u64_split(u64 data, u32 *lo, u32 *hi)
* the EFI memory map. Other related structures, e.g. x86 e820ext, need
* to factor in this headroom requirement as well.
*/
-#define EFI_MMAP_NR_SLACK_SLOTS 8
+#define EFI_MMAP_NR_SLACK_SLOTS 32
typedef struct efi_generic_dev_path efi_device_path_protocol_t;
@@ -314,7 +314,9 @@ union efi_boot_services {
void *close_protocol;
void *open_protocol_information;
void *protocols_per_handle;
- void *locate_handle_buffer;
+ efi_status_t (__efiapi *locate_handle_buffer)(int, efi_guid_t *,
+ void *, unsigned long *,
+ efi_handle_t **);
efi_status_t (__efiapi *locate_protocol)(efi_guid_t *, void *,
void **);
efi_status_t (__efiapi *install_multiple_protocol_interfaces)(efi_handle_t *, ...);
@@ -1053,6 +1055,7 @@ void efi_puts(const char *str);
__printf(1, 2) int efi_printk(char const *fmt, ...);
void efi_free(unsigned long size, unsigned long addr);
+DEFINE_FREE(efi_pool, void *, if (_T) efi_bs_call(free_pool, _T));
void efi_apply_loadoptions_quirk(const void **load_options, u32 *load_options_size);
@@ -1082,8 +1085,7 @@ efi_status_t efi_parse_options(char const *cmdline);
void efi_parse_option_graphics(char *option);
-efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto,
- unsigned long size);
+efi_status_t efi_setup_gop(struct screen_info *si);
efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
const efi_char16_t *optstr,
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
index ea5da307d542..3785fb4986b4 100644
--- a/drivers/firmware/efi/libstub/gop.c
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -133,13 +133,11 @@ void efi_parse_option_graphics(char *option)
static u32 choose_mode_modenum(efi_graphics_output_protocol_t *gop)
{
- efi_status_t status;
-
+ efi_graphics_output_mode_info_t *info __free(efi_pool) = NULL;
efi_graphics_output_protocol_mode_t *mode;
- efi_graphics_output_mode_info_t *info;
unsigned long info_size;
-
u32 max_mode, cur_mode;
+ efi_status_t status;
int pf;
mode = efi_table_attr(gop, mode);
@@ -154,17 +152,13 @@ static u32 choose_mode_modenum(efi_graphics_output_protocol_t *gop)
return cur_mode;
}
- status = efi_call_proto(gop, query_mode, cmdline.mode,
- &info_size, &info);
+ status = efi_call_proto(gop, query_mode, cmdline.mode, &info_size, &info);
if (status != EFI_SUCCESS) {
efi_err("Couldn't get mode information\n");
return cur_mode;
}
pf = info->pixel_format;
-
- efi_bs_call(free_pool, info);
-
if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX) {
efi_err("Invalid PixelFormat\n");
return cur_mode;
@@ -173,6 +167,28 @@ static u32 choose_mode_modenum(efi_graphics_output_protocol_t *gop)
return cmdline.mode;
}
+static u32 choose_mode(efi_graphics_output_protocol_t *gop,
+ bool (*match)(const efi_graphics_output_mode_info_t *, u32, void *),
+ void *ctx)
+{
+ efi_graphics_output_protocol_mode_t *mode = efi_table_attr(gop, mode);
+ u32 max_mode = efi_table_attr(mode, max_mode);
+
+ for (u32 m = 0; m < max_mode; m++) {
+ efi_graphics_output_mode_info_t *info __free(efi_pool) = NULL;
+ unsigned long info_size;
+ efi_status_t status;
+
+ status = efi_call_proto(gop, query_mode, m, &info_size, &info);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ if (match(info, m, ctx))
+ return m;
+ }
+ return (unsigned long)ctx;
+}
+
static u8 pixel_bpp(int pixel_format, efi_pixel_bitmask_t pixel_info)
{
if (pixel_format == PIXEL_BIT_MASK) {
@@ -185,192 +201,117 @@ static u8 pixel_bpp(int pixel_format, efi_pixel_bitmask_t pixel_info)
return 32;
}
-static u32 choose_mode_res(efi_graphics_output_protocol_t *gop)
+static bool match_res(const efi_graphics_output_mode_info_t *info, u32 mode, void *ctx)
{
- efi_status_t status;
+ efi_pixel_bitmask_t pi = info->pixel_information;
+ int pf = info->pixel_format;
- efi_graphics_output_protocol_mode_t *mode;
- efi_graphics_output_mode_info_t *info;
- unsigned long info_size;
-
- u32 max_mode, cur_mode;
- int pf;
- efi_pixel_bitmask_t pi;
- u32 m, w, h;
+ if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
+ return false;
- mode = efi_table_attr(gop, mode);
+ return cmdline.res.width == info->horizontal_resolution &&
+ cmdline.res.height == info->vertical_resolution &&
+ (cmdline.res.format < 0 || cmdline.res.format == pf) &&
+ (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi));
+}
- cur_mode = efi_table_attr(mode, mode);
- info = efi_table_attr(mode, info);
- pf = info->pixel_format;
- pi = info->pixel_information;
- w = info->horizontal_resolution;
- h = info->vertical_resolution;
+static u32 choose_mode_res(efi_graphics_output_protocol_t *gop)
+{
+ efi_graphics_output_protocol_mode_t *mode = efi_table_attr(gop, mode);
+ unsigned long cur_mode = efi_table_attr(mode, mode);
- if (w == cmdline.res.width && h == cmdline.res.height &&
- (cmdline.res.format < 0 || cmdline.res.format == pf) &&
- (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi)))
+ if (match_res(efi_table_attr(mode, info), cur_mode, NULL))
return cur_mode;
- max_mode = efi_table_attr(mode, max_mode);
-
- for (m = 0; m < max_mode; m++) {
- if (m == cur_mode)
- continue;
-
- status = efi_call_proto(gop, query_mode, m,
- &info_size, &info);
- if (status != EFI_SUCCESS)
- continue;
+ return choose_mode(gop, match_res, (void *)cur_mode);
+}
- pf = info->pixel_format;
- pi = info->pixel_information;
- w = info->horizontal_resolution;
- h = info->vertical_resolution;
+struct match {
+ u32 mode;
+ u32 area;
+ u8 depth;
+};
- efi_bs_call(free_pool, info);
+static bool match_auto(const efi_graphics_output_mode_info_t *info, u32 mode, void *ctx)
+{
+ u32 area = info->horizontal_resolution * info->vertical_resolution;
+ efi_pixel_bitmask_t pi = info->pixel_information;
+ int pf = info->pixel_format;
+ u8 depth = pixel_bpp(pf, pi);
+ struct match *m = ctx;
- if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
- continue;
- if (w == cmdline.res.width && h == cmdline.res.height &&
- (cmdline.res.format < 0 || cmdline.res.format == pf) &&
- (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi)))
- return m;
- }
+ if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
+ return false;
- efi_err("Couldn't find requested mode\n");
+ if (area > m->area || (area == m->area && depth > m->depth))
+ *m = (struct match){ mode, area, depth };
- return cur_mode;
+ return false;
}
static u32 choose_mode_auto(efi_graphics_output_protocol_t *gop)
{
- efi_status_t status;
-
- efi_graphics_output_protocol_mode_t *mode;
- efi_graphics_output_mode_info_t *info;
- unsigned long info_size;
-
- u32 max_mode, cur_mode, best_mode, area;
- u8 depth;
- int pf;
- efi_pixel_bitmask_t pi;
- u32 m, w, h, a;
- u8 d;
-
- mode = efi_table_attr(gop, mode);
-
- cur_mode = efi_table_attr(mode, mode);
- max_mode = efi_table_attr(mode, max_mode);
+ struct match match = {};
- info = efi_table_attr(mode, info);
-
- pf = info->pixel_format;
- pi = info->pixel_information;
- w = info->horizontal_resolution;
- h = info->vertical_resolution;
-
- best_mode = cur_mode;
- area = w * h;
- depth = pixel_bpp(pf, pi);
+ choose_mode(gop, match_auto, &match);
- for (m = 0; m < max_mode; m++) {
- if (m == cur_mode)
- continue;
-
- status = efi_call_proto(gop, query_mode, m,
- &info_size, &info);
- if (status != EFI_SUCCESS)
- continue;
+ return match.mode;
+}
- pf = info->pixel_format;
- pi = info->pixel_information;
- w = info->horizontal_resolution;
- h = info->vertical_resolution;
+static bool match_list(const efi_graphics_output_mode_info_t *info, u32 mode, void *ctx)
+{
+ efi_pixel_bitmask_t pi = info->pixel_information;
+ u32 cur_mode = (unsigned long)ctx;
+ int pf = info->pixel_format;
+ const char *dstr;
+ u8 depth = 0;
+ bool valid;
- efi_bs_call(free_pool, info);
+ valid = !(pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX);
- if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
- continue;
- a = w * h;
- if (a < area)
- continue;
- d = pixel_bpp(pf, pi);
- if (a > area || d > depth) {
- best_mode = m;
- area = a;
- depth = d;
- }
+ switch (pf) {
+ case PIXEL_RGB_RESERVED_8BIT_PER_COLOR:
+ dstr = "rgb";
+ break;
+ case PIXEL_BGR_RESERVED_8BIT_PER_COLOR:
+ dstr = "bgr";
+ break;
+ case PIXEL_BIT_MASK:
+ dstr = "";
+ depth = pixel_bpp(pf, pi);
+ break;
+ case PIXEL_BLT_ONLY:
+ dstr = "blt";
+ break;
+ default:
+ dstr = "xxx";
+ break;
}
- return best_mode;
+ efi_printk("Mode %3u %c%c: Resolution %ux%u-%s%.0hhu\n",
+ mode,
+ (mode == cur_mode) ? '*' : ' ',
+ !valid ? '-' : ' ',
+ info->horizontal_resolution,
+ info->vertical_resolution,
+ dstr, depth);
+
+ return false;
}
static u32 choose_mode_list(efi_graphics_output_protocol_t *gop)
{
- efi_status_t status;
-
- efi_graphics_output_protocol_mode_t *mode;
- efi_graphics_output_mode_info_t *info;
- unsigned long info_size;
-
- u32 max_mode, cur_mode;
- int pf;
- efi_pixel_bitmask_t pi;
- u32 m, w, h;
- u8 d;
- const char *dstr;
- bool valid;
+ efi_graphics_output_protocol_mode_t *mode = efi_table_attr(gop, mode);
+ unsigned long cur_mode = efi_table_attr(mode, mode);
+ u32 max_mode = efi_table_attr(mode, max_mode);
efi_input_key_t key;
-
- mode = efi_table_attr(gop, mode);
-
- cur_mode = efi_table_attr(mode, mode);
- max_mode = efi_table_attr(mode, max_mode);
+ efi_status_t status;
efi_printk("Available graphics modes are 0-%u\n", max_mode-1);
efi_puts(" * = current mode\n"
" - = unusable mode\n");
- for (m = 0; m < max_mode; m++) {
- status = efi_call_proto(gop, query_mode, m,
- &info_size, &info);
- if (status != EFI_SUCCESS)
- continue;
- pf = info->pixel_format;
- pi = info->pixel_information;
- w = info->horizontal_resolution;
- h = info->vertical_resolution;
-
- efi_bs_call(free_pool, info);
-
- valid = !(pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX);
- d = 0;
- switch (pf) {
- case PIXEL_RGB_RESERVED_8BIT_PER_COLOR:
- dstr = "rgb";
- break;
- case PIXEL_BGR_RESERVED_8BIT_PER_COLOR:
- dstr = "bgr";
- break;
- case PIXEL_BIT_MASK:
- dstr = "";
- d = pixel_bpp(pf, pi);
- break;
- case PIXEL_BLT_ONLY:
- dstr = "blt";
- break;
- default:
- dstr = "xxx";
- break;
- }
-
- efi_printk("Mode %3u %c%c: Resolution %ux%u-%s%.0hhu\n",
- m,
- m == cur_mode ? '*' : ' ',
- !valid ? '-' : ' ',
- w, h, dstr, d);
- }
+ choose_mode(gop, match_list, (void *)cur_mode);
efi_puts("\nPress any key to continue (or wait 10 seconds)\n");
status = efi_wait_for_key(10 * EFI_USEC_PER_SEC, &key);
@@ -461,26 +402,25 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
}
}
-static efi_graphics_output_protocol_t *
-find_gop(efi_guid_t *proto, unsigned long size, void **handles)
+static efi_graphics_output_protocol_t *find_gop(unsigned long num,
+ const efi_handle_t handles[])
{
efi_graphics_output_protocol_t *first_gop;
efi_handle_t h;
- int i;
first_gop = NULL;
- for_each_efi_handle(h, handles, size, i) {
+ for_each_efi_handle(h, handles, num) {
efi_status_t status;
efi_graphics_output_protocol_t *gop;
efi_graphics_output_protocol_mode_t *mode;
efi_graphics_output_mode_info_t *info;
-
- efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
void *dummy = NULL;
- status = efi_bs_call(handle_protocol, h, proto, (void **)&gop);
+ status = efi_bs_call(handle_protocol, h,
+ &EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID,
+ (void **)&gop);
if (status != EFI_SUCCESS)
continue;
@@ -500,7 +440,8 @@ find_gop(efi_guid_t *proto, unsigned long size, void **handles)
* Once we've found a GOP supporting ConOut,
* don't bother looking any further.
*/
- status = efi_bs_call(handle_protocol, h, &conout_proto, &dummy);
+ status = efi_bs_call(handle_protocol, h,
+ &EFI_CONSOLE_OUT_DEVICE_GUID, &dummy);
if (status == EFI_SUCCESS)
return gop;
@@ -511,16 +452,22 @@ find_gop(efi_guid_t *proto, unsigned long size, void **handles)
return first_gop;
}
-static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
- unsigned long size, void **handles)
+efi_status_t efi_setup_gop(struct screen_info *si)
{
- efi_graphics_output_protocol_t *gop;
+ efi_handle_t *handles __free(efi_pool) = NULL;
efi_graphics_output_protocol_mode_t *mode;
efi_graphics_output_mode_info_t *info;
+ efi_graphics_output_protocol_t *gop;
+ efi_status_t status;
+ unsigned long num;
- gop = find_gop(proto, size, handles);
+ status = efi_bs_call(locate_handle_buffer, EFI_LOCATE_BY_PROTOCOL,
+ &EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID, NULL, &num,
+ &handles);
+ if (status != EFI_SUCCESS)
+ return status;
- /* Did we find any GOPs? */
+ gop = find_gop(num, handles);
if (!gop)
return EFI_NOT_FOUND;
@@ -552,29 +499,3 @@ static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
return EFI_SUCCESS;
}
-
-/*
- * See if we have Graphics Output Protocol
- */
-efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto,
- unsigned long size)
-{
- efi_status_t status;
- void **gop_handle = NULL;
-
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
- (void **)&gop_handle);
- if (status != EFI_SUCCESS)
- return status;
-
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, proto, NULL,
- &size, gop_handle);
- if (status != EFI_SUCCESS)
- goto free_handle;
-
- status = setup_gop(si, proto, size, gop_handle);
-
-free_handle:
- efi_bs_call(free_pool, gop_handle);
- return status;
-}
diff --git a/drivers/firmware/efi/libstub/kaslr.c b/drivers/firmware/efi/libstub/kaslr.c
index 6318c40bda38..4bc963e999eb 100644
--- a/drivers/firmware/efi/libstub/kaslr.c
+++ b/drivers/firmware/efi/libstub/kaslr.c
@@ -57,7 +57,7 @@ u32 efi_kaslr_get_phys_seed(efi_handle_t image_handle)
*/
static bool check_image_region(u64 base, u64 size)
{
- struct efi_boot_memmap *map;
+ struct efi_boot_memmap *map __free(efi_pool) = NULL;
efi_status_t status;
bool ret = false;
int map_offset;
@@ -80,8 +80,6 @@ static bool check_image_region(u64 base, u64 size)
}
}
- efi_bs_call(free_pool, map);
-
return ret;
}
diff --git a/drivers/firmware/efi/libstub/mem.c b/drivers/firmware/efi/libstub/mem.c
index 4f1fa302234d..9c82259eea81 100644
--- a/drivers/firmware/efi/libstub/mem.c
+++ b/drivers/firmware/efi/libstub/mem.c
@@ -20,10 +20,10 @@
efi_status_t efi_get_memory_map(struct efi_boot_memmap **map,
bool install_cfg_tbl)
{
+ struct efi_boot_memmap tmp, *m __free(efi_pool) = NULL;
int memtype = install_cfg_tbl ? EFI_ACPI_RECLAIM_MEMORY
: EFI_LOADER_DATA;
efi_guid_t tbl_guid = LINUX_EFI_BOOT_MEMMAP_GUID;
- struct efi_boot_memmap *m, tmp;
efi_status_t status;
unsigned long size;
@@ -48,24 +48,20 @@ efi_status_t efi_get_memory_map(struct efi_boot_memmap **map,
*/
status = efi_bs_call(install_configuration_table, &tbl_guid, m);
if (status != EFI_SUCCESS)
- goto free_map;
+ return status;
}
m->buff_size = m->map_size = size;
status = efi_bs_call(get_memory_map, &m->map_size, m->map, &m->map_key,
&m->desc_size, &m->desc_ver);
- if (status != EFI_SUCCESS)
- goto uninstall_table;
+ if (status != EFI_SUCCESS) {
+ if (install_cfg_tbl)
+ efi_bs_call(install_configuration_table, &tbl_guid, NULL);
+ return status;
+ }
- *map = m;
+ *map = no_free_ptr(m);
return EFI_SUCCESS;
-
-uninstall_table:
- if (install_cfg_tbl)
- efi_bs_call(install_configuration_table, &tbl_guid, NULL);
-free_map:
- efi_bs_call(free_pool, m);
- return status;
}
/**
diff --git a/drivers/firmware/efi/libstub/pci.c b/drivers/firmware/efi/libstub/pci.c
index 99fb25d2bcf5..1dccf77958d3 100644
--- a/drivers/firmware/efi/libstub/pci.c
+++ b/drivers/firmware/efi/libstub/pci.c
@@ -16,37 +16,20 @@
void efi_pci_disable_bridge_busmaster(void)
{
efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
- unsigned long pci_handle_size = 0;
- efi_handle_t *pci_handle = NULL;
+ efi_handle_t *pci_handle __free(efi_pool) = NULL;
+ unsigned long pci_handle_num;
efi_handle_t handle;
efi_status_t status;
u16 class, command;
- int i;
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto,
- NULL, &pci_handle_size, NULL);
-
- if (status != EFI_BUFFER_TOO_SMALL) {
- if (status != EFI_SUCCESS && status != EFI_NOT_FOUND)
- efi_err("Failed to locate PCI I/O handles'\n");
- return;
- }
-
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, pci_handle_size,
- (void **)&pci_handle);
+ status = efi_bs_call(locate_handle_buffer, EFI_LOCATE_BY_PROTOCOL,
+ &pci_proto, NULL, &pci_handle_num, &pci_handle);
if (status != EFI_SUCCESS) {
- efi_err("Failed to allocate memory for 'pci_handle'\n");
+ efi_err("Failed to locate PCI I/O handles\n");
return;
}
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto,
- NULL, &pci_handle_size, pci_handle);
- if (status != EFI_SUCCESS) {
- efi_err("Failed to locate PCI I/O handles'\n");
- goto free_handle;
- }
-
- for_each_efi_handle(handle, pci_handle, pci_handle_size, i) {
+ for_each_efi_handle(handle, pci_handle, pci_handle_num) {
efi_pci_io_protocol_t *pci;
unsigned long segment_nr, bus_nr, device_nr, func_nr;
@@ -82,7 +65,7 @@ void efi_pci_disable_bridge_busmaster(void)
efi_bs_call(disconnect_controller, handle, NULL, NULL);
}
- for_each_efi_handle(handle, pci_handle, pci_handle_size, i) {
+ for_each_efi_handle(handle, pci_handle, pci_handle_num) {
efi_pci_io_protocol_t *pci;
status = efi_bs_call(handle_protocol, handle, &pci_proto,
@@ -108,7 +91,4 @@ void efi_pci_disable_bridge_busmaster(void)
if (status != EFI_SUCCESS)
efi_err("Failed to disable PCI busmastering\n");
}
-
-free_handle:
- efi_bs_call(free_pool, pci_handle);
}
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index c41e7b2091cd..e5872e38d9a4 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -59,9 +59,9 @@ efi_status_t efi_random_alloc(unsigned long size,
unsigned long alloc_min,
unsigned long alloc_max)
{
+ struct efi_boot_memmap *map __free(efi_pool) = NULL;
unsigned long total_slots = 0, target_slot;
unsigned long total_mirrored_slots = 0;
- struct efi_boot_memmap *map;
efi_status_t status;
int map_offset;
@@ -130,7 +130,5 @@ efi_status_t efi_random_alloc(unsigned long size,
break;
}
- efi_bs_call(free_pool, map);
-
return status;
}
diff --git a/drivers/firmware/efi/libstub/relocate.c b/drivers/firmware/efi/libstub/relocate.c
index d694bcfa1074..99b45d1cd624 100644
--- a/drivers/firmware/efi/libstub/relocate.c
+++ b/drivers/firmware/efi/libstub/relocate.c
@@ -23,14 +23,14 @@
efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long min)
{
- struct efi_boot_memmap *map;
+ struct efi_boot_memmap *map __free(efi_pool) = NULL;
efi_status_t status;
unsigned long nr_pages;
int i;
status = efi_get_memory_map(&map, false);
if (status != EFI_SUCCESS)
- goto fail;
+ return status;
/*
* Enforce minimum alignment that EFI or Linux requires when
@@ -79,11 +79,9 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
}
if (i == map->map_size / map->desc_size)
- status = EFI_NOT_FOUND;
+ return EFI_NOT_FOUND;
- efi_bs_call(free_pool, map);
-fail:
- return status;
+ return EFI_SUCCESS;
}
/**
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 188c8000d245..863910e9eefc 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -42,7 +42,7 @@ union sev_memory_acceptance_protocol {
static efi_status_t
preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
{
- struct pci_setup_rom *rom = NULL;
+ struct pci_setup_rom *rom __free(efi_pool) = NULL;
efi_status_t status;
unsigned long size;
uint64_t romsize;
@@ -75,14 +75,13 @@ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
rom->data.len = size - sizeof(struct setup_data);
rom->data.next = 0;
rom->pcilen = romsize;
- *__rom = rom;
status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
PCI_VENDOR_ID, 1, &rom->vendor);
if (status != EFI_SUCCESS) {
efi_err("Failed to read rom->vendor\n");
- goto free_struct;
+ return status;
}
status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
@@ -90,21 +89,18 @@ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
if (status != EFI_SUCCESS) {
efi_err("Failed to read rom->devid\n");
- goto free_struct;
+ return status;
}
status = efi_call_proto(pci, get_location, &rom->segment, &rom->bus,
&rom->device, &rom->function);
if (status != EFI_SUCCESS)
- goto free_struct;
+ return status;
memcpy(rom->romdata, romimage, romsize);
- return status;
-
-free_struct:
- efi_bs_call(free_pool, rom);
- return status;
+ *__rom = no_free_ptr(rom);
+ return EFI_SUCCESS;
}
/*
@@ -119,38 +115,23 @@ free_struct:
static void setup_efi_pci(struct boot_params *params)
{
efi_status_t status;
- void **pci_handle = NULL;
+ efi_handle_t *pci_handle __free(efi_pool) = NULL;
efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
- unsigned long size = 0;
struct setup_data *data;
+ unsigned long num;
efi_handle_t h;
- int i;
-
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &pci_proto, NULL, &size, pci_handle);
-
- if (status == EFI_BUFFER_TOO_SMALL) {
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
- (void **)&pci_handle);
-
- if (status != EFI_SUCCESS) {
- efi_err("Failed to allocate memory for 'pci_handle'\n");
- return;
- }
-
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &pci_proto, NULL, &size, pci_handle);
- }
+ status = efi_bs_call(locate_handle_buffer, EFI_LOCATE_BY_PROTOCOL,
+ &pci_proto, NULL, &num, &pci_handle);
if (status != EFI_SUCCESS)
- goto free_handle;
+ return;
data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
while (data && data->next)
data = (struct setup_data *)(unsigned long)data->next;
- for_each_efi_handle(h, pci_handle, size, i) {
+ for_each_efi_handle(h, pci_handle, num) {
efi_pci_io_protocol_t *pci = NULL;
struct pci_setup_rom *rom;
@@ -170,9 +151,6 @@ static void setup_efi_pci(struct boot_params *params)
data = (struct setup_data *)rom;
}
-
-free_handle:
- efi_bs_call(free_pool, pci_handle);
}
static void retrieve_apple_device_properties(struct boot_params *boot_params)
@@ -405,116 +383,13 @@ static void setup_quirks(struct boot_params *boot_params)
}
}
-/*
- * See if we have Universal Graphics Adapter (UGA) protocol
- */
-static efi_status_t
-setup_uga(struct screen_info *si, efi_guid_t *uga_proto, unsigned long size)
-{
- efi_status_t status;
- u32 width, height;
- void **uga_handle = NULL;
- efi_uga_draw_protocol_t *uga = NULL, *first_uga;
- efi_handle_t handle;
- int i;
-
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
- (void **)&uga_handle);
- if (status != EFI_SUCCESS)
- return status;
-
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- uga_proto, NULL, &size, uga_handle);
- if (status != EFI_SUCCESS)
- goto free_handle;
-
- height = 0;
- width = 0;
-
- first_uga = NULL;
- for_each_efi_handle(handle, uga_handle, size, i) {
- efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
- u32 w, h, depth, refresh;
- void *pciio;
-
- status = efi_bs_call(handle_protocol, handle, uga_proto,
- (void **)&uga);
- if (status != EFI_SUCCESS)
- continue;
-
- pciio = NULL;
- efi_bs_call(handle_protocol, handle, &pciio_proto, &pciio);
-
- status = efi_call_proto(uga, get_mode, &w, &h, &depth, &refresh);
- if (status == EFI_SUCCESS && (!first_uga || pciio)) {
- width = w;
- height = h;
-
- /*
- * Once we've found a UGA supporting PCIIO,
- * don't bother looking any further.
- */
- if (pciio)
- break;
-
- first_uga = uga;
- }
- }
-
- if (!width && !height)
- goto free_handle;
-
- /* EFI framebuffer */
- si->orig_video_isVGA = VIDEO_TYPE_EFI;
-
- si->lfb_depth = 32;
- si->lfb_width = width;
- si->lfb_height = height;
-
- si->red_size = 8;
- si->red_pos = 16;
- si->green_size = 8;
- si->green_pos = 8;
- si->blue_size = 8;
- si->blue_pos = 0;
- si->rsvd_size = 8;
- si->rsvd_pos = 24;
-
-free_handle:
- efi_bs_call(free_pool, uga_handle);
-
- return status;
-}
-
static void setup_graphics(struct boot_params *boot_params)
{
- efi_guid_t graphics_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
- struct screen_info *si;
- efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
- efi_status_t status;
- unsigned long size;
- void **gop_handle = NULL;
- void **uga_handle = NULL;
-
- si = &boot_params->screen_info;
- memset(si, 0, sizeof(*si));
+ struct screen_info *si = memset(&boot_params->screen_info, 0, sizeof(*si));
- size = 0;
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &graphics_proto, NULL, &size, gop_handle);
- if (status == EFI_BUFFER_TOO_SMALL)
- status = efi_setup_gop(si, &graphics_proto, size);
-
- if (status != EFI_SUCCESS) {
- size = 0;
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &uga_proto, NULL, &size, uga_handle);
- if (status == EFI_BUFFER_TOO_SMALL)
- setup_uga(si, &uga_proto, size);
- }
+ efi_setup_gop(si);
}
-
static void __noreturn efi_exit(efi_handle_t handle, efi_status_t status)
{
efi_bs_call(exit, handle, status, 0, NULL);
@@ -737,7 +612,7 @@ static efi_status_t allocate_e820(struct boot_params *params,
struct setup_data **e820ext,
u32 *e820ext_size)
{
- struct efi_boot_memmap *map;
+ struct efi_boot_memmap *map __free(efi_pool) = NULL;
efi_status_t status;
__u32 nr_desc;
@@ -751,13 +626,14 @@ static efi_status_t allocate_e820(struct boot_params *params,
EFI_MMAP_NR_SLACK_SLOTS;
status = alloc_e820ext(nr_e820ext, e820ext, e820ext_size);
+ if (status != EFI_SUCCESS)
+ return status;
}
- if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) && status == EFI_SUCCESS)
- status = allocate_unaccepted_bitmap(nr_desc, map);
+ if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY))
+ return allocate_unaccepted_bitmap(nr_desc, map);
- efi_bs_call(free_pool, map);
- return status;
+ return EFI_SUCCESS;
}
struct exit_boot_struct {
diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c
index cc807ed35aed..1e509595ac03 100644
--- a/drivers/firmware/efi/sysfb_efi.c
+++ b/drivers/firmware/efi/sysfb_efi.c
@@ -91,6 +91,7 @@ void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
_ret_; \
})
+#ifdef CONFIG_EFI
static int __init efifb_set_system(const struct dmi_system_id *id)
{
struct efifb_dmi_info *info = id->driver_data;
@@ -346,7 +347,6 @@ static const struct fwnode_operations efifb_fwnode_ops = {
.add_links = efifb_add_links,
};
-#ifdef CONFIG_EFI
static struct fwnode_handle efifb_fwnode;
__init void sysfb_apply_efi_quirks(void)
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 6e9788324fea..371f24569b3b 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -310,7 +310,10 @@ static ssize_t ibft_attr_show_nic(void *data, int type, char *buf)
str += sprintf_ipaddr(str, nic->ip_addr);
break;
case ISCSI_BOOT_ETH_SUBNET_MASK:
- val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1));
+ if (nic->subnet_mask_prefix > 32)
+ val = cpu_to_be32(~0);
+ else
+ val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1));
str += sprintf(str, "%pI4", &val);
break;
case ISCSI_BOOT_ETH_PREFIX_LEN:
diff --git a/drivers/firmware/qcom/qcom_scm-smc.c b/drivers/firmware/qcom/qcom_scm-smc.c
index 2b4c2826f572..574930729ddd 100644
--- a/drivers/firmware/qcom/qcom_scm-smc.c
+++ b/drivers/firmware/qcom/qcom_scm-smc.c
@@ -152,7 +152,6 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
enum qcom_scm_convention qcom_convention,
struct qcom_scm_res *res, bool atomic)
{
- struct qcom_tzmem_pool *mempool = qcom_scm_get_tzmem_pool();
int arglen = desc->arginfo & 0xf;
int i, ret;
void *args_virt __free(qcom_tzmem) = NULL;
@@ -173,6 +172,11 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i];
if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) {
+ struct qcom_tzmem_pool *mempool = qcom_scm_get_tzmem_pool();
+
+ if (!mempool)
+ return -EINVAL;
+
args_virt = qcom_tzmem_alloc(mempool,
SCM_SMC_N_EXT_ARGS * sizeof(u64),
flag);
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index 72bf87ddcd96..f0569bb9411f 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -217,7 +217,10 @@ static DEFINE_SPINLOCK(scm_query_lock);
struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void)
{
- return __scm ? __scm->mempool : NULL;
+ if (!qcom_scm_is_available())
+ return NULL;
+
+ return __scm->mempool;
}
static enum qcom_scm_convention __get_convention(void)
@@ -1279,6 +1282,220 @@ int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
}
EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key);
+bool qcom_scm_has_wrapped_key_support(void)
+{
+ return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
+ QCOM_SCM_ES_DERIVE_SW_SECRET) &&
+ __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
+ QCOM_SCM_ES_GENERATE_ICE_KEY) &&
+ __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
+ QCOM_SCM_ES_PREPARE_ICE_KEY) &&
+ __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
+ QCOM_SCM_ES_IMPORT_ICE_KEY);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_has_wrapped_key_support);
+
+/**
+ * qcom_scm_derive_sw_secret() - Derive software secret from wrapped key
+ * @eph_key: an ephemerally-wrapped key
+ * @eph_key_size: size of @eph_key in bytes
+ * @sw_secret: output buffer for the software secret
+ * @sw_secret_size: size of the software secret to derive in bytes
+ *
+ * Derive a software secret from an ephemerally-wrapped key for software crypto
+ * operations. This is done by calling into the secure execution environment,
+ * which then calls into the hardware to unwrap and derive the secret.
+ *
+ * For more information on sw_secret, see the "Hardware-wrapped keys" section of
+ * Documentation/block/inline-encryption.rst.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int qcom_scm_derive_sw_secret(const u8 *eph_key, size_t eph_key_size,
+ u8 *sw_secret, size_t sw_secret_size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_ES,
+ .cmd = QCOM_SCM_ES_DERIVE_SW_SECRET,
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL,
+ QCOM_SCM_RW, QCOM_SCM_VAL),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ int ret;
+
+ void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ eph_key_size,
+ GFP_KERNEL);
+ if (!eph_key_buf)
+ return -ENOMEM;
+
+ void *sw_secret_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ sw_secret_size,
+ GFP_KERNEL);
+ if (!sw_secret_buf)
+ return -ENOMEM;
+
+ memcpy(eph_key_buf, eph_key, eph_key_size);
+ desc.args[0] = qcom_tzmem_to_phys(eph_key_buf);
+ desc.args[1] = eph_key_size;
+ desc.args[2] = qcom_tzmem_to_phys(sw_secret_buf);
+ desc.args[3] = sw_secret_size;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+ if (!ret)
+ memcpy(sw_secret, sw_secret_buf, sw_secret_size);
+
+ memzero_explicit(eph_key_buf, eph_key_size);
+ memzero_explicit(sw_secret_buf, sw_secret_size);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_derive_sw_secret);
+
+/**
+ * qcom_scm_generate_ice_key() - Generate a wrapped key for storage encryption
+ * @lt_key: output buffer for the long-term wrapped key
+ * @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size
+ * used by the SoC.
+ *
+ * Generate a key using the built-in HW module in the SoC. The resulting key is
+ * returned wrapped with the platform-specific Key Encryption Key.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int qcom_scm_generate_ice_key(u8 *lt_key, size_t lt_key_size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_ES,
+ .cmd = QCOM_SCM_ES_GENERATE_ICE_KEY,
+ .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ int ret;
+
+ void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ lt_key_size,
+ GFP_KERNEL);
+ if (!lt_key_buf)
+ return -ENOMEM;
+
+ desc.args[0] = qcom_tzmem_to_phys(lt_key_buf);
+ desc.args[1] = lt_key_size;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+ if (!ret)
+ memcpy(lt_key, lt_key_buf, lt_key_size);
+
+ memzero_explicit(lt_key_buf, lt_key_size);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_generate_ice_key);
+
+/**
+ * qcom_scm_prepare_ice_key() - Re-wrap a key with the per-boot ephemeral key
+ * @lt_key: a long-term wrapped key
+ * @lt_key_size: size of @lt_key in bytes
+ * @eph_key: output buffer for the ephemerally-wrapped key
+ * @eph_key_size: size of @eph_key in bytes. Must be the exact wrapped key size
+ * used by the SoC.
+ *
+ * Given a long-term wrapped key, re-wrap it with the per-boot ephemeral key for
+ * added protection. The resulting key will only be valid for the current boot.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int qcom_scm_prepare_ice_key(const u8 *lt_key, size_t lt_key_size,
+ u8 *eph_key, size_t eph_key_size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_ES,
+ .cmd = QCOM_SCM_ES_PREPARE_ICE_KEY,
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL,
+ QCOM_SCM_RW, QCOM_SCM_VAL),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ int ret;
+
+ void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ lt_key_size,
+ GFP_KERNEL);
+ if (!lt_key_buf)
+ return -ENOMEM;
+
+ void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ eph_key_size,
+ GFP_KERNEL);
+ if (!eph_key_buf)
+ return -ENOMEM;
+
+ memcpy(lt_key_buf, lt_key, lt_key_size);
+ desc.args[0] = qcom_tzmem_to_phys(lt_key_buf);
+ desc.args[1] = lt_key_size;
+ desc.args[2] = qcom_tzmem_to_phys(eph_key_buf);
+ desc.args[3] = eph_key_size;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+ if (!ret)
+ memcpy(eph_key, eph_key_buf, eph_key_size);
+
+ memzero_explicit(lt_key_buf, lt_key_size);
+ memzero_explicit(eph_key_buf, eph_key_size);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_prepare_ice_key);
+
+/**
+ * qcom_scm_import_ice_key() - Import key for storage encryption
+ * @raw_key: the raw key to import
+ * @raw_key_size: size of @raw_key in bytes
+ * @lt_key: output buffer for the long-term wrapped key
+ * @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size
+ * used by the SoC.
+ *
+ * Import a raw key and return a long-term wrapped key. Uses the SoC's HWKM to
+ * wrap the raw key using the platform-specific Key Encryption Key.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int qcom_scm_import_ice_key(const u8 *raw_key, size_t raw_key_size,
+ u8 *lt_key, size_t lt_key_size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_ES,
+ .cmd = QCOM_SCM_ES_IMPORT_ICE_KEY,
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL,
+ QCOM_SCM_RW, QCOM_SCM_VAL),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ int ret;
+
+ void *raw_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ raw_key_size,
+ GFP_KERNEL);
+ if (!raw_key_buf)
+ return -ENOMEM;
+
+ void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ lt_key_size,
+ GFP_KERNEL);
+ if (!lt_key_buf)
+ return -ENOMEM;
+
+ memcpy(raw_key_buf, raw_key, raw_key_size);
+ desc.args[0] = qcom_tzmem_to_phys(raw_key_buf);
+ desc.args[1] = raw_key_size;
+ desc.args[2] = qcom_tzmem_to_phys(lt_key_buf);
+ desc.args[3] = lt_key_size;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+ if (!ret)
+ memcpy(lt_key, lt_key_buf, lt_key_size);
+
+ memzero_explicit(raw_key_buf, raw_key_size);
+ memzero_explicit(lt_key_buf, lt_key_size);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_import_ice_key);
+
/**
* qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
*
@@ -1768,18 +1985,23 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
+ any potential issues with this, only allow validated machines for now.
*/
static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
+ { .compatible = "asus,vivobook-s15" },
{ .compatible = "dell,xps13-9345" },
+ { .compatible = "hp,omnibook-x14" },
+ { .compatible = "huawei,gaokun3" },
{ .compatible = "lenovo,flex-5g" },
{ .compatible = "lenovo,thinkpad-t14s" },
{ .compatible = "lenovo,thinkpad-x13s", },
{ .compatible = "lenovo,yoga-slim7x" },
{ .compatible = "microsoft,arcata", },
+ { .compatible = "microsoft,blackrock" },
{ .compatible = "microsoft,romulus13", },
{ .compatible = "microsoft,romulus15", },
{ .compatible = "qcom,sc8180x-primus" },
{ .compatible = "qcom,x1e001de-devkit" },
{ .compatible = "qcom,x1e80100-crd" },
{ .compatible = "qcom,x1e80100-qcp" },
+ { .compatible = "qcom,x1p42100-crd" },
{ }
};
@@ -1867,7 +2089,8 @@ static int qcom_scm_qseecom_init(struct qcom_scm *scm)
*/
bool qcom_scm_is_available(void)
{
- return !!READ_ONCE(__scm);
+ /* Paired with smp_store_release() in qcom_scm_probe */
+ return !!smp_load_acquire(&__scm);
}
EXPORT_SYMBOL_GPL(qcom_scm_is_available);
@@ -2024,18 +2247,22 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (ret)
return ret;
- /* Let all above stores be available after this */
+ /* Paired with smp_load_acquire() in qcom_scm_is_available(). */
smp_store_release(&__scm, scm);
irq = platform_get_irq_optional(pdev, 0);
if (irq < 0) {
- if (irq != -ENXIO)
- return irq;
+ if (irq != -ENXIO) {
+ ret = irq;
+ goto err;
+ }
} else {
ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
IRQF_ONESHOT, "qcom-scm", __scm);
- if (ret < 0)
- return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
+ if (ret < 0) {
+ dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
+ goto err;
+ }
}
__get_convention();
@@ -2054,14 +2281,18 @@ static int qcom_scm_probe(struct platform_device *pdev)
qcom_scm_disable_sdi();
ret = of_reserved_mem_device_init(__scm->dev);
- if (ret && ret != -ENODEV)
- return dev_err_probe(__scm->dev, ret,
- "Failed to setup the reserved memory region for TZ mem\n");
+ if (ret && ret != -ENODEV) {
+ dev_err_probe(__scm->dev, ret,
+ "Failed to setup the reserved memory region for TZ mem\n");
+ goto err;
+ }
ret = qcom_tzmem_enable(__scm->dev);
- if (ret)
- return dev_err_probe(__scm->dev, ret,
- "Failed to enable the TrustZone memory allocator\n");
+ if (ret) {
+ dev_err_probe(__scm->dev, ret,
+ "Failed to enable the TrustZone memory allocator\n");
+ goto err;
+ }
memset(&pool_config, 0, sizeof(pool_config));
pool_config.initial_size = 0;
@@ -2069,9 +2300,11 @@ static int qcom_scm_probe(struct platform_device *pdev)
pool_config.max_size = SZ_256K;
__scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config);
- if (IS_ERR(__scm->mempool))
- return dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
- "Failed to create the SCM memory pool\n");
+ if (IS_ERR(__scm->mempool)) {
+ dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
+ "Failed to create the SCM memory pool\n");
+ goto err;
+ }
/*
* Initialize the QSEECOM interface.
@@ -2087,6 +2320,12 @@ static int qcom_scm_probe(struct platform_device *pdev)
WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
return 0;
+
+err:
+ /* Paired with smp_load_acquire() in qcom_scm_is_available(). */
+ smp_store_release(&__scm, NULL);
+
+ return ret;
}
static void qcom_scm_shutdown(struct platform_device *pdev)
diff --git a/drivers/firmware/qcom/qcom_scm.h b/drivers/firmware/qcom/qcom_scm.h
index e36b2f67607f..097369d38b84 100644
--- a/drivers/firmware/qcom/qcom_scm.h
+++ b/drivers/firmware/qcom/qcom_scm.h
@@ -128,6 +128,10 @@ struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void);
#define QCOM_SCM_SVC_ES 0x10 /* Enterprise Security */
#define QCOM_SCM_ES_INVALIDATE_ICE_KEY 0x03
#define QCOM_SCM_ES_CONFIG_SET_ICE_KEY 0x04
+#define QCOM_SCM_ES_DERIVE_SW_SECRET 0x07
+#define QCOM_SCM_ES_GENERATE_ICE_KEY 0x08
+#define QCOM_SCM_ES_PREPARE_ICE_KEY 0x09
+#define QCOM_SCM_ES_IMPORT_ICE_KEY 0x0a
#define QCOM_SCM_SVC_HDCP 0x11
#define QCOM_SCM_HDCP_INVOKE 0x01
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index c5c78b869561..3c52cb73237a 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -967,18 +967,15 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg)
/* first client will create kernel thread */
if (!chan->ctrl->task) {
chan->ctrl->task =
- kthread_create_on_node(svc_normal_to_secure_thread,
- (void *)chan->ctrl,
- cpu_to_node(cpu),
- "svc_smc_hvc_thread");
+ kthread_run_on_cpu(svc_normal_to_secure_thread,
+ (void *)chan->ctrl,
+ cpu, "svc_smc_hvc_thread");
if (IS_ERR(chan->ctrl->task)) {
dev_err(chan->ctrl->dev,
"failed to create svc_smc_hvc_thread\n");
kfree(p_data);
return -EINVAL;
}
- kthread_bind(chan->ctrl->task, cpu);
- wake_up_process(chan->ctrl->task);
}
pr_debug("%s: sent P-va=%p, P-com=%x, P-size=%u\n", __func__,
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
index 02b60fde0430..5aa7b8884374 100644
--- a/drivers/fpga/dfl-afu-dma-region.c
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -16,26 +16,26 @@
#include "dfl-afu.h"
-void afu_dma_region_init(struct dfl_feature_platform_data *pdata)
+void afu_dma_region_init(struct dfl_feature_dev_data *fdata)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
afu->dma_regions = RB_ROOT;
}
/**
* afu_dma_pin_pages - pin pages of given dma memory region
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @region: dma memory region to be pinned
*
* Pin all the pages of given dfl_afu_dma_region.
* Return 0 for success or negative error code.
*/
-static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
+static int afu_dma_pin_pages(struct dfl_feature_dev_data *fdata,
struct dfl_afu_dma_region *region)
{
int npages = region->length >> PAGE_SHIFT;
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
int ret, pinned;
ret = account_locked_vm(current->mm, npages, true);
@@ -73,17 +73,17 @@ unlock_vm:
/**
* afu_dma_unpin_pages - unpin pages of given dma memory region
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @region: dma memory region to be unpinned
*
* Unpin all the pages of given dfl_afu_dma_region.
* Return 0 for success or negative error code.
*/
-static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata,
+static void afu_dma_unpin_pages(struct dfl_feature_dev_data *fdata,
struct dfl_afu_dma_region *region)
{
long npages = region->length >> PAGE_SHIFT;
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
unpin_user_pages(region->pages, npages);
kfree(region->pages);
@@ -133,20 +133,20 @@ static bool dma_region_check_iova(struct dfl_afu_dma_region *region,
/**
* afu_dma_region_add - add given dma region to rbtree
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @region: dma region to be added
*
* Return 0 for success, -EEXIST if dma region has already been added.
*
- * Needs to be called with pdata->lock heold.
+ * Needs to be called with fdata->lock held.
*/
-static int afu_dma_region_add(struct dfl_feature_platform_data *pdata,
+static int afu_dma_region_add(struct dfl_feature_dev_data *fdata,
struct dfl_afu_dma_region *region)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
struct rb_node **new, *parent = NULL;
- dev_dbg(&pdata->dev->dev, "add region (iova = %llx)\n",
+ dev_dbg(&fdata->dev->dev, "add region (iova = %llx)\n",
(unsigned long long)region->iova);
new = &afu->dma_regions.rb_node;
@@ -177,50 +177,50 @@ static int afu_dma_region_add(struct dfl_feature_platform_data *pdata,
/**
* afu_dma_region_remove - remove given dma region from rbtree
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @region: dma region to be removed
*
- * Needs to be called with pdata->lock heold.
+ * Needs to be called with fdata->lock held.
*/
-static void afu_dma_region_remove(struct dfl_feature_platform_data *pdata,
+static void afu_dma_region_remove(struct dfl_feature_dev_data *fdata,
struct dfl_afu_dma_region *region)
{
struct dfl_afu *afu;
- dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
+ dev_dbg(&fdata->dev->dev, "del region (iova = %llx)\n",
(unsigned long long)region->iova);
- afu = dfl_fpga_pdata_get_private(pdata);
+ afu = dfl_fpga_fdata_get_private(fdata);
rb_erase(&region->node, &afu->dma_regions);
}
/**
* afu_dma_region_destroy - destroy all regions in rbtree
- * @pdata: feature device platform data
+ * @fdata: feature dev data
*
- * Needs to be called with pdata->lock heold.
+ * Needs to be called with fdata->lock held.
*/
-void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
+void afu_dma_region_destroy(struct dfl_feature_dev_data *fdata)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
struct rb_node *node = rb_first(&afu->dma_regions);
struct dfl_afu_dma_region *region;
while (node) {
region = container_of(node, struct dfl_afu_dma_region, node);
- dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
+ dev_dbg(&fdata->dev->dev, "del region (iova = %llx)\n",
(unsigned long long)region->iova);
rb_erase(node, &afu->dma_regions);
if (region->iova)
- dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ dma_unmap_page(dfl_fpga_fdata_to_parent(fdata),
region->iova, region->length,
DMA_BIDIRECTIONAL);
if (region->pages)
- afu_dma_unpin_pages(pdata, region);
+ afu_dma_unpin_pages(fdata, region);
node = rb_next(node);
kfree(region);
@@ -229,7 +229,7 @@ void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
/**
* afu_dma_region_find - find the dma region from rbtree based on iova and size
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @iova: address of the dma memory area
* @size: size of the dma memory area
*
@@ -239,14 +239,14 @@ void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
* [@iova, @iova+size)
* If nothing is matched returns NULL.
*
- * Needs to be called with pdata->lock held.
+ * Needs to be called with fdata->lock held.
*/
struct dfl_afu_dma_region *
-afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size)
+afu_dma_region_find(struct dfl_feature_dev_data *fdata, u64 iova, u64 size)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
struct rb_node *node = afu->dma_regions.rb_node;
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
while (node) {
struct dfl_afu_dma_region *region;
@@ -276,20 +276,20 @@ afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size)
/**
* afu_dma_region_find_iova - find the dma region from rbtree by iova
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @iova: address of the dma region
*
- * Needs to be called with pdata->lock held.
+ * Needs to be called with fdata->lock held.
*/
static struct dfl_afu_dma_region *
-afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova)
+afu_dma_region_find_iova(struct dfl_feature_dev_data *fdata, u64 iova)
{
- return afu_dma_region_find(pdata, iova, 0);
+ return afu_dma_region_find(fdata, iova, 0);
}
/**
* afu_dma_map_region - map memory region for dma
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @user_addr: address of the memory region
* @length: size of the memory region
* @iova: pointer of iova address
@@ -298,9 +298,10 @@ afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova)
* of the memory region via @iova.
* Return 0 for success, otherwise error code.
*/
-int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
+int afu_dma_map_region(struct dfl_feature_dev_data *fdata,
u64 user_addr, u64 length, u64 *iova)
{
+ struct device *dev = &fdata->dev->dev;
struct dfl_afu_dma_region *region;
int ret;
@@ -323,47 +324,47 @@ int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
region->length = length;
/* Pin the user memory region */
- ret = afu_dma_pin_pages(pdata, region);
+ ret = afu_dma_pin_pages(fdata, region);
if (ret) {
- dev_err(&pdata->dev->dev, "failed to pin memory region\n");
+ dev_err(dev, "failed to pin memory region\n");
goto free_region;
}
/* Only accept continuous pages, return error else */
if (!afu_dma_check_continuous_pages(region)) {
- dev_err(&pdata->dev->dev, "pages are not continuous\n");
+ dev_err(dev, "pages are not continuous\n");
ret = -EINVAL;
goto unpin_pages;
}
/* As pages are continuous then start to do DMA mapping */
- region->iova = dma_map_page(dfl_fpga_pdata_to_parent(pdata),
+ region->iova = dma_map_page(dfl_fpga_fdata_to_parent(fdata),
region->pages[0], 0,
region->length,
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) {
- dev_err(&pdata->dev->dev, "failed to map for dma\n");
+ if (dma_mapping_error(dfl_fpga_fdata_to_parent(fdata), region->iova)) {
+ dev_err(dev, "failed to map for dma\n");
ret = -EFAULT;
goto unpin_pages;
}
*iova = region->iova;
- mutex_lock(&pdata->lock);
- ret = afu_dma_region_add(pdata, region);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ ret = afu_dma_region_add(fdata, region);
+ mutex_unlock(&fdata->lock);
if (ret) {
- dev_err(&pdata->dev->dev, "failed to add dma region\n");
+ dev_err(dev, "failed to add dma region\n");
goto unmap_dma;
}
return 0;
unmap_dma:
- dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ dma_unmap_page(dfl_fpga_fdata_to_parent(fdata),
region->iova, region->length, DMA_BIDIRECTIONAL);
unpin_pages:
- afu_dma_unpin_pages(pdata, region);
+ afu_dma_unpin_pages(fdata, region);
free_region:
kfree(region);
return ret;
@@ -371,34 +372,34 @@ free_region:
/**
* afu_dma_unmap_region - unmap dma memory region
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @iova: dma address of the region
*
* Unmap dma memory region based on @iova.
* Return 0 for success, otherwise error code.
*/
-int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova)
+int afu_dma_unmap_region(struct dfl_feature_dev_data *fdata, u64 iova)
{
struct dfl_afu_dma_region *region;
- mutex_lock(&pdata->lock);
- region = afu_dma_region_find_iova(pdata, iova);
+ mutex_lock(&fdata->lock);
+ region = afu_dma_region_find_iova(fdata, iova);
if (!region) {
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return -EINVAL;
}
if (region->in_use) {
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return -EBUSY;
}
- afu_dma_region_remove(pdata, region);
- mutex_unlock(&pdata->lock);
+ afu_dma_region_remove(fdata, region);
+ mutex_unlock(&fdata->lock);
- dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ dma_unmap_page(dfl_fpga_fdata_to_parent(fdata),
region->iova, region->length, DMA_BIDIRECTIONAL);
- afu_dma_unpin_pages(pdata, region);
+ afu_dma_unpin_pages(fdata, region);
kfree(region);
return 0;
diff --git a/drivers/fpga/dfl-afu-error.c b/drivers/fpga/dfl-afu-error.c
index ab7be6217368..0f392d1f6d45 100644
--- a/drivers/fpga/dfl-afu-error.c
+++ b/drivers/fpga/dfl-afu-error.c
@@ -28,37 +28,36 @@
#define ERROR_MASK GENMASK_ULL(63, 0)
/* mask or unmask port errors by the error mask register. */
-static void __afu_port_err_mask(struct device *dev, bool mask)
+static void __afu_port_err_mask(struct dfl_feature_dev_data *fdata, bool mask)
{
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
writeq(mask ? ERROR_MASK : 0, base + PORT_ERROR_MASK);
}
static void afu_port_err_mask(struct device *dev, bool mask)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
- mutex_lock(&pdata->lock);
- __afu_port_err_mask(dev, mask);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ __afu_port_err_mask(fdata, mask);
+ mutex_unlock(&fdata->lock);
}
/* clear port errors. */
static int afu_port_err_clear(struct device *dev, u64 err)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
- struct platform_device *pdev = to_platform_device(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base_err, *base_hdr;
int enable_ret = 0, ret = -EBUSY;
u64 v;
- base_err = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
- base_hdr = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base_err = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
+ base_hdr = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
/*
* clear Port Errors
@@ -80,12 +79,12 @@ static int afu_port_err_clear(struct device *dev, u64 err)
}
/* Halt Port by keeping Port in reset */
- ret = __afu_port_disable(pdev);
+ ret = __afu_port_disable(fdata);
if (ret)
goto done;
/* Mask all errors */
- __afu_port_err_mask(dev, true);
+ __afu_port_err_mask(fdata, true);
/* Clear errors if err input matches with current port errors.*/
v = readq(base_err + PORT_ERROR);
@@ -102,28 +101,28 @@ static int afu_port_err_clear(struct device *dev, u64 err)
}
/* Clear mask */
- __afu_port_err_mask(dev, false);
+ __afu_port_err_mask(fdata, false);
/* Enable the Port by clearing the reset */
- enable_ret = __afu_port_enable(pdev);
+ enable_ret = __afu_port_enable(fdata);
done:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return enable_ret ? enable_ret : ret;
}
static ssize_t errors_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 error;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
error = readq(base + PORT_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)error);
}
@@ -146,15 +145,15 @@ static DEVICE_ATTR_RW(errors);
static ssize_t first_error_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 error;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
error = readq(base + PORT_FIRST_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)error);
}
@@ -164,16 +163,16 @@ static ssize_t first_malformed_req_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 req0, req1;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
req0 = readq(base + PORT_MALFORMED_REQ0);
req1 = readq(base + PORT_MALFORMED_REQ1);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%016llx%016llx\n",
(unsigned long long)req1, (unsigned long long)req0);
@@ -191,12 +190,14 @@ static umode_t port_err_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature_dev_data *fdata;
+ fdata = to_dfl_feature_dev_data(dev);
/*
* sysfs entries are visible only if related private feature is
* enumerated.
*/
- if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_ERROR))
+ if (!dfl_get_feature_by_id(fdata, PORT_FEATURE_ID_ERROR))
return 0;
return attr->mode;
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
index 2fd4f07ed081..3bf8e7338dbe 100644
--- a/drivers/fpga/dfl-afu-main.c
+++ b/drivers/fpga/dfl-afu-main.c
@@ -26,7 +26,7 @@
/**
* __afu_port_enable - enable a port by clear reset
- * @pdev: port platform device.
+ * @fdata: port feature dev data.
*
* Enable Port by clear the port soft reset bit, which is set by default.
* The AFU is unable to respond to any MMIO access while in reset.
@@ -35,18 +35,17 @@
*
* The caller needs to hold lock for protection.
*/
-int __afu_port_enable(struct platform_device *pdev)
+int __afu_port_enable(struct dfl_feature_dev_data *fdata)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
void __iomem *base;
u64 v;
- WARN_ON(!pdata->disable_count);
+ WARN_ON(!fdata->disable_count);
- if (--pdata->disable_count != 0)
+ if (--fdata->disable_count != 0)
return 0;
- base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
/* Clear port soft reset */
v = readq(base + PORT_HDR_CTRL);
@@ -60,7 +59,8 @@ int __afu_port_enable(struct platform_device *pdev)
if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
!(v & PORT_CTRL_SFTRST_ACK),
RST_POLL_INVL, RST_POLL_TIMEOUT)) {
- dev_err(&pdev->dev, "timeout, failure to enable device\n");
+ dev_err(fdata->dfl_cdev->parent,
+ "timeout, failure to enable device\n");
return -ETIMEDOUT;
}
@@ -69,22 +69,21 @@ int __afu_port_enable(struct platform_device *pdev)
/**
* __afu_port_disable - disable a port by hold reset
- * @pdev: port platform device.
+ * @fdata: port feature dev data.
*
* Disable Port by setting the port soft reset bit, it puts the port into reset.
*
* The caller needs to hold lock for protection.
*/
-int __afu_port_disable(struct platform_device *pdev)
+int __afu_port_disable(struct dfl_feature_dev_data *fdata)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
void __iomem *base;
u64 v;
- if (pdata->disable_count++ != 0)
+ if (fdata->disable_count++ != 0)
return 0;
- base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
/* Set port soft reset */
v = readq(base + PORT_HDR_CTRL);
@@ -99,7 +98,8 @@ int __afu_port_disable(struct platform_device *pdev)
if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
v & PORT_CTRL_SFTRST_ACK,
RST_POLL_INVL, RST_POLL_TIMEOUT)) {
- dev_err(&pdev->dev, "timeout, failure to disable device\n");
+ dev_err(fdata->dfl_cdev->parent,
+ "timeout, failure to disable device\n");
return -ETIMEDOUT;
}
@@ -118,34 +118,34 @@ int __afu_port_disable(struct platform_device *pdev)
* (disabled). Any attempts on MMIO access to AFU while in reset, will
* result errors reported via port error reporting sub feature (if present).
*/
-static int __port_reset(struct platform_device *pdev)
+static int __port_reset(struct dfl_feature_dev_data *fdata)
{
int ret;
- ret = __afu_port_disable(pdev);
+ ret = __afu_port_disable(fdata);
if (ret)
return ret;
- return __afu_port_enable(pdev);
+ return __afu_port_enable(fdata);
}
static int port_reset(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
int ret;
- mutex_lock(&pdata->lock);
- ret = __port_reset(pdev);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ ret = __port_reset(fdata);
+ mutex_unlock(&fdata->lock);
return ret;
}
-static int port_get_id(struct platform_device *pdev)
+static int port_get_id(struct dfl_feature_dev_data *fdata)
{
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
return FIELD_GET(PORT_CAP_PORT_NUM, readq(base + PORT_HDR_CAP));
}
@@ -153,7 +153,8 @@ static int port_get_id(struct platform_device *pdev)
static ssize_t
id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- int id = port_get_id(to_platform_device(dev));
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
+ int id = port_get_id(fdata);
return scnprintf(buf, PAGE_SIZE, "%d\n", id);
}
@@ -162,15 +163,15 @@ static DEVICE_ATTR_RO(id);
static ssize_t
ltr_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_CTRL);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_CTRL_LATENCY, v));
}
@@ -179,7 +180,7 @@ static ssize_t
ltr_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
bool ltr;
u64 v;
@@ -187,14 +188,14 @@ ltr_store(struct device *dev, struct device_attribute *attr,
if (kstrtobool(buf, &ltr))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_CTRL);
v &= ~PORT_CTRL_LATENCY;
v |= FIELD_PREP(PORT_CTRL_LATENCY, ltr ? 1 : 0);
writeq(v, base + PORT_HDR_CTRL);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -203,15 +204,15 @@ static DEVICE_ATTR_RW(ltr);
static ssize_t
ap1_event_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP1_EVT, v));
}
@@ -220,18 +221,18 @@ static ssize_t
ap1_event_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
bool clear;
if (kstrtobool(buf, &clear) || !clear)
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(PORT_STS_AP1_EVT, base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -241,15 +242,15 @@ static ssize_t
ap2_event_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP2_EVT, v));
}
@@ -258,18 +259,18 @@ static ssize_t
ap2_event_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
bool clear;
if (kstrtobool(buf, &clear) || !clear)
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(PORT_STS_AP2_EVT, base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -278,15 +279,15 @@ static DEVICE_ATTR_RW(ap2_event);
static ssize_t
power_state_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%x\n", (u8)FIELD_GET(PORT_STS_PWR_STATE, v));
}
@@ -296,18 +297,18 @@ static ssize_t
userclk_freqcmd_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
u64 userclk_freq_cmd;
void __iomem *base;
if (kstrtou64(buf, 0, &userclk_freq_cmd))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(userclk_freq_cmd, base + PORT_HDR_USRCLK_CMD0);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -317,18 +318,18 @@ static ssize_t
userclk_freqcntrcmd_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
u64 userclk_freqcntr_cmd;
void __iomem *base;
if (kstrtou64(buf, 0, &userclk_freqcntr_cmd))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(userclk_freqcntr_cmd, base + PORT_HDR_USRCLK_CMD1);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -338,15 +339,15 @@ static ssize_t
userclk_freqsts_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
u64 userclk_freqsts;
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
userclk_freqsts = readq(base + PORT_HDR_USRCLK_STS0);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)userclk_freqsts);
}
@@ -356,15 +357,15 @@ static ssize_t
userclk_freqcntrsts_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
u64 userclk_freqcntrsts;
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
userclk_freqcntrsts = readq(base + PORT_HDR_USRCLK_STS1);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n",
(unsigned long long)userclk_freqcntrsts);
@@ -388,10 +389,12 @@ static umode_t port_hdr_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature_dev_data *fdata;
umode_t mode = attr->mode;
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ fdata = to_dfl_feature_dev_data(dev);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
if (dfl_feature_revision(base) > 0) {
/*
@@ -456,21 +459,21 @@ static const struct dfl_feature_ops port_hdr_ops = {
static ssize_t
afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 guidl, guidh;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_AFU);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_AFU);
- mutex_lock(&pdata->lock);
- if (pdata->disable_count) {
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ if (fdata->disable_count) {
+ mutex_unlock(&fdata->lock);
return -EBUSY;
}
guidl = readq(base + GUID_L);
guidh = readq(base + GUID_H);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return scnprintf(buf, PAGE_SIZE, "%016llx%016llx\n", guidh, guidl);
}
@@ -485,12 +488,14 @@ static umode_t port_afu_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature_dev_data *fdata;
+ fdata = to_dfl_feature_dev_data(dev);
/*
* sysfs entries are visible only if related private feature is
* enumerated.
*/
- if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_AFU))
+ if (!dfl_get_feature_by_id(fdata, PORT_FEATURE_ID_AFU))
return 0;
return attr->mode;
@@ -504,9 +509,10 @@ static const struct attribute_group port_afu_group = {
static int port_afu_init(struct platform_device *pdev,
struct dfl_feature *feature)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct resource *res = &pdev->resource[feature->resource_index];
- return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
+ return afu_mmio_region_add(fdata,
DFL_PORT_REGION_INDEX_AFU,
resource_size(res), res->start,
DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
@@ -525,9 +531,10 @@ static const struct dfl_feature_ops port_afu_ops = {
static int port_stp_init(struct platform_device *pdev,
struct dfl_feature *feature)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct resource *res = &pdev->resource[feature->resource_index];
- return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
+ return afu_mmio_region_add(fdata,
DFL_PORT_REGION_INDEX_STP,
resource_size(res), res->start,
DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
@@ -595,22 +602,18 @@ static struct dfl_feature_driver port_feature_drvs[] = {
static int afu_open(struct inode *inode, struct file *filp)
{
- struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata = dfl_fpga_inode_to_feature_dev_data(inode);
+ struct platform_device *fdev = fdata->dev;
int ret;
- pdata = dev_get_platdata(&fdev->dev);
- if (WARN_ON(!pdata))
- return -ENODEV;
-
- mutex_lock(&pdata->lock);
- ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
+ mutex_lock(&fdata->lock);
+ ret = dfl_feature_dev_use_begin(fdata, filp->f_flags & O_EXCL);
if (!ret) {
dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
- dfl_feature_dev_use_count(pdata));
+ dfl_feature_dev_use_count(fdata));
filp->private_data = fdev;
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
@@ -618,29 +621,29 @@ static int afu_open(struct inode *inode, struct file *filp)
static int afu_release(struct inode *inode, struct file *filp)
{
struct platform_device *pdev = filp->private_data;
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata;
struct dfl_feature *feature;
dev_dbg(&pdev->dev, "Device File Release\n");
- pdata = dev_get_platdata(&pdev->dev);
+ fdata = to_dfl_feature_dev_data(&pdev->dev);
- mutex_lock(&pdata->lock);
- dfl_feature_dev_use_end(pdata);
+ mutex_lock(&fdata->lock);
+ dfl_feature_dev_use_end(fdata);
- if (!dfl_feature_dev_use_count(pdata)) {
- dfl_fpga_dev_for_each_feature(pdata, feature)
+ if (!dfl_feature_dev_use_count(fdata)) {
+ dfl_fpga_dev_for_each_feature(fdata, feature)
dfl_fpga_set_irq_triggers(feature, 0,
feature->nr_irqs, NULL);
- __port_reset(pdev);
- afu_dma_region_destroy(pdata);
+ __port_reset(fdata);
+ afu_dma_region_destroy(fdata);
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return 0;
}
-static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
+static long afu_ioctl_check_extension(struct dfl_feature_dev_data *fdata,
unsigned long arg)
{
/* No extension support for now */
@@ -648,7 +651,7 @@ static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
}
static long
-afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
+afu_ioctl_get_info(struct dfl_feature_dev_data *fdata, void __user *arg)
{
struct dfl_fpga_port_info info;
struct dfl_afu *afu;
@@ -662,12 +665,12 @@ afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
if (info.argsz < minsz)
return -EINVAL;
- mutex_lock(&pdata->lock);
- afu = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ afu = dfl_fpga_fdata_get_private(fdata);
info.flags = 0;
info.num_regions = afu->num_regions;
info.num_umsgs = afu->num_umsgs;
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
if (copy_to_user(arg, &info, sizeof(info)))
return -EFAULT;
@@ -675,7 +678,7 @@ afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
return 0;
}
-static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
+static long afu_ioctl_get_region_info(struct dfl_feature_dev_data *fdata,
void __user *arg)
{
struct dfl_fpga_port_region_info rinfo;
@@ -691,7 +694,7 @@ static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
if (rinfo.argsz < minsz || rinfo.padding)
return -EINVAL;
- ret = afu_mmio_region_get_by_index(pdata, rinfo.index, &region);
+ ret = afu_mmio_region_get_by_index(fdata, rinfo.index, &region);
if (ret)
return ret;
@@ -706,7 +709,7 @@ static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
}
static long
-afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
+afu_ioctl_dma_map(struct dfl_feature_dev_data *fdata, void __user *arg)
{
struct dfl_fpga_port_dma_map map;
unsigned long minsz;
@@ -720,16 +723,16 @@ afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
if (map.argsz < minsz || map.flags)
return -EINVAL;
- ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova);
+ ret = afu_dma_map_region(fdata, map.user_addr, map.length, &map.iova);
if (ret)
return ret;
if (copy_to_user(arg, &map, sizeof(map))) {
- afu_dma_unmap_region(pdata, map.iova);
+ afu_dma_unmap_region(fdata, map.iova);
return -EFAULT;
}
- dev_dbg(&pdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
+ dev_dbg(&fdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
(unsigned long long)map.user_addr,
(unsigned long long)map.length,
(unsigned long long)map.iova);
@@ -738,7 +741,7 @@ afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
}
static long
-afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
+afu_ioctl_dma_unmap(struct dfl_feature_dev_data *fdata, void __user *arg)
{
struct dfl_fpga_port_dma_unmap unmap;
unsigned long minsz;
@@ -751,33 +754,33 @@ afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
if (unmap.argsz < minsz || unmap.flags)
return -EINVAL;
- return afu_dma_unmap_region(pdata, unmap.iova);
+ return afu_dma_unmap_region(fdata, unmap.iova);
}
static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct platform_device *pdev = filp->private_data;
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata;
struct dfl_feature *f;
long ret;
dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
- pdata = dev_get_platdata(&pdev->dev);
+ fdata = to_dfl_feature_dev_data(&pdev->dev);
switch (cmd) {
case DFL_FPGA_GET_API_VERSION:
return DFL_FPGA_API_VERSION;
case DFL_FPGA_CHECK_EXTENSION:
- return afu_ioctl_check_extension(pdata, arg);
+ return afu_ioctl_check_extension(fdata, arg);
case DFL_FPGA_PORT_GET_INFO:
- return afu_ioctl_get_info(pdata, (void __user *)arg);
+ return afu_ioctl_get_info(fdata, (void __user *)arg);
case DFL_FPGA_PORT_GET_REGION_INFO:
- return afu_ioctl_get_region_info(pdata, (void __user *)arg);
+ return afu_ioctl_get_region_info(fdata, (void __user *)arg);
case DFL_FPGA_PORT_DMA_MAP:
- return afu_ioctl_dma_map(pdata, (void __user *)arg);
+ return afu_ioctl_dma_map(fdata, (void __user *)arg);
case DFL_FPGA_PORT_DMA_UNMAP:
- return afu_ioctl_dma_unmap(pdata, (void __user *)arg);
+ return afu_ioctl_dma_unmap(fdata, (void __user *)arg);
default:
/*
* Let sub-feature's ioctl function to handle the cmd
@@ -785,7 +788,7 @@ static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
* handled in this sub feature, and returns 0 and other
* error code if cmd is handled.
*/
- dfl_fpga_dev_for_each_feature(pdata, f)
+ dfl_fpga_dev_for_each_feature(fdata, f)
if (f->ops && f->ops->ioctl) {
ret = f->ops->ioctl(pdev, f, cmd, arg);
if (ret != -ENODEV)
@@ -805,8 +808,8 @@ static const struct vm_operations_struct afu_vma_ops = {
static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct platform_device *pdev = filp->private_data;
- struct dfl_feature_platform_data *pdata;
u64 size = vma->vm_end - vma->vm_start;
+ struct dfl_feature_dev_data *fdata;
struct dfl_afu_mmio_region region;
u64 offset;
int ret;
@@ -814,10 +817,10 @@ static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- pdata = dev_get_platdata(&pdev->dev);
+ fdata = to_dfl_feature_dev_data(&pdev->dev);
offset = vma->vm_pgoff << PAGE_SHIFT;
- ret = afu_mmio_region_get_by_offset(pdata, offset, size, &region);
+ ret = afu_mmio_region_get_by_offset(fdata, offset, size, &region);
if (ret)
return ret;
@@ -851,46 +854,45 @@ static const struct file_operations afu_fops = {
static int afu_dev_init(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_afu *afu;
afu = devm_kzalloc(&pdev->dev, sizeof(*afu), GFP_KERNEL);
if (!afu)
return -ENOMEM;
- mutex_lock(&pdata->lock);
- dfl_fpga_pdata_set_private(pdata, afu);
- afu_mmio_region_init(pdata);
- afu_dma_region_init(pdata);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ dfl_fpga_fdata_set_private(fdata, afu);
+ afu_mmio_region_init(fdata);
+ afu_dma_region_init(fdata);
+ mutex_unlock(&fdata->lock);
return 0;
}
static int afu_dev_destroy(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
- mutex_lock(&pdata->lock);
- afu_mmio_region_destroy(pdata);
- afu_dma_region_destroy(pdata);
- dfl_fpga_pdata_set_private(pdata, NULL);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ afu_mmio_region_destroy(fdata);
+ afu_dma_region_destroy(fdata);
+ dfl_fpga_fdata_set_private(fdata, NULL);
+ mutex_unlock(&fdata->lock);
return 0;
}
-static int port_enable_set(struct platform_device *pdev, bool enable)
+static int port_enable_set(struct dfl_feature_dev_data *fdata, bool enable)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
int ret;
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
if (enable)
- ret = __afu_port_enable(pdev);
+ ret = __afu_port_enable(fdata);
else
- ret = __afu_port_disable(pdev);
- mutex_unlock(&pdata->lock);
+ ret = __afu_port_disable(fdata);
+ mutex_unlock(&fdata->lock);
return ret;
}
diff --git a/drivers/fpga/dfl-afu-region.c b/drivers/fpga/dfl-afu-region.c
index 2e7b41629406..b11a5b21e666 100644
--- a/drivers/fpga/dfl-afu-region.c
+++ b/drivers/fpga/dfl-afu-region.c
@@ -12,11 +12,11 @@
/**
* afu_mmio_region_init - init function for afu mmio region support
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
*/
-void afu_mmio_region_init(struct dfl_feature_platform_data *pdata)
+void afu_mmio_region_init(struct dfl_feature_dev_data *fdata)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
INIT_LIST_HEAD(&afu->regions);
}
@@ -39,7 +39,7 @@ static struct dfl_afu_mmio_region *get_region_by_index(struct dfl_afu *afu,
/**
* afu_mmio_region_add - add a mmio region to given feature dev.
*
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
* @region_index: region index.
* @region_size: region size.
* @phys: region's physical address of this region.
@@ -47,14 +47,15 @@ static struct dfl_afu_mmio_region *get_region_by_index(struct dfl_afu *afu,
*
* Return: 0 on success, negative error code otherwise.
*/
-int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
+int afu_mmio_region_add(struct dfl_feature_dev_data *fdata,
u32 region_index, u64 region_size, u64 phys, u32 flags)
{
+ struct device *dev = &fdata->dev->dev;
struct dfl_afu_mmio_region *region;
struct dfl_afu *afu;
int ret = 0;
- region = devm_kzalloc(&pdata->dev->dev, sizeof(*region), GFP_KERNEL);
+ region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
if (!region)
return -ENOMEM;
@@ -63,13 +64,13 @@ int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
region->phys = phys;
region->flags = flags;
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
- afu = dfl_fpga_pdata_get_private(pdata);
+ afu = dfl_fpga_fdata_get_private(fdata);
/* check if @index already exists */
if (get_region_by_index(afu, region_index)) {
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
ret = -EEXIST;
goto exit;
}
@@ -80,37 +81,37 @@ int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
afu->region_cur_offset += region_size;
afu->num_regions++;
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return 0;
exit:
- devm_kfree(&pdata->dev->dev, region);
+ devm_kfree(dev, region);
return ret;
}
/**
* afu_mmio_region_destroy - destroy all mmio regions under given feature dev.
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
*/
-void afu_mmio_region_destroy(struct dfl_feature_platform_data *pdata)
+void afu_mmio_region_destroy(struct dfl_feature_dev_data *fdata)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
struct dfl_afu_mmio_region *tmp, *region;
list_for_each_entry_safe(region, tmp, &afu->regions, node)
- devm_kfree(&pdata->dev->dev, region);
+ devm_kfree(&fdata->dev->dev, region);
}
/**
* afu_mmio_region_get_by_index - find an afu region by index.
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
* @region_index: region index.
* @pregion: ptr to region for result.
*
* Return: 0 on success, negative error code otherwise.
*/
-int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
+int afu_mmio_region_get_by_index(struct dfl_feature_dev_data *fdata,
u32 region_index,
struct dfl_afu_mmio_region *pregion)
{
@@ -118,8 +119,8 @@ int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
struct dfl_afu *afu;
int ret = 0;
- mutex_lock(&pdata->lock);
- afu = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ afu = dfl_fpga_fdata_get_private(fdata);
region = get_region_by_index(afu, region_index);
if (!region) {
ret = -EINVAL;
@@ -127,14 +128,14 @@ int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
}
*pregion = *region;
exit:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
/**
* afu_mmio_region_get_by_offset - find an afu mmio region by offset and size
*
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
* @offset: region offset from start of the device fd.
* @size: region size.
* @pregion: ptr to region for result.
@@ -144,7 +145,7 @@ exit:
*
* Return: 0 on success, negative error code otherwise.
*/
-int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
+int afu_mmio_region_get_by_offset(struct dfl_feature_dev_data *fdata,
u64 offset, u64 size,
struct dfl_afu_mmio_region *pregion)
{
@@ -152,8 +153,8 @@ int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
struct dfl_afu *afu;
int ret = 0;
- mutex_lock(&pdata->lock);
- afu = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ afu = dfl_fpga_fdata_get_private(fdata);
for_each_region(region, afu)
if (region->offset <= offset &&
region->offset + region->size >= offset + size) {
@@ -162,6 +163,6 @@ int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
}
ret = -EINVAL;
exit:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
diff --git a/drivers/fpga/dfl-afu.h b/drivers/fpga/dfl-afu.h
index 7bef3e300aa2..03be4f0969c7 100644
--- a/drivers/fpga/dfl-afu.h
+++ b/drivers/fpga/dfl-afu.h
@@ -76,27 +76,27 @@ struct dfl_afu {
struct rb_root dma_regions;
};
-/* hold pdata->lock when call __afu_port_enable/disable */
-int __afu_port_enable(struct platform_device *pdev);
-int __afu_port_disable(struct platform_device *pdev);
+/* hold fdata->lock when call __afu_port_enable/disable */
+int __afu_port_enable(struct dfl_feature_dev_data *fdata);
+int __afu_port_disable(struct dfl_feature_dev_data *fdata);
-void afu_mmio_region_init(struct dfl_feature_platform_data *pdata);
-int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
+void afu_mmio_region_init(struct dfl_feature_dev_data *fdata);
+int afu_mmio_region_add(struct dfl_feature_dev_data *fdata,
u32 region_index, u64 region_size, u64 phys, u32 flags);
-void afu_mmio_region_destroy(struct dfl_feature_platform_data *pdata);
-int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
+void afu_mmio_region_destroy(struct dfl_feature_dev_data *fdata);
+int afu_mmio_region_get_by_index(struct dfl_feature_dev_data *fdata,
u32 region_index,
struct dfl_afu_mmio_region *pregion);
-int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
+int afu_mmio_region_get_by_offset(struct dfl_feature_dev_data *fdata,
u64 offset, u64 size,
struct dfl_afu_mmio_region *pregion);
-void afu_dma_region_init(struct dfl_feature_platform_data *pdata);
-void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata);
-int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
+void afu_dma_region_init(struct dfl_feature_dev_data *fdata);
+void afu_dma_region_destroy(struct dfl_feature_dev_data *fdata);
+int afu_dma_map_region(struct dfl_feature_dev_data *fdata,
u64 user_addr, u64 length, u64 *iova);
-int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova);
+int afu_dma_unmap_region(struct dfl_feature_dev_data *fdata, u64 iova);
struct dfl_afu_dma_region *
-afu_dma_region_find(struct dfl_feature_platform_data *pdata,
+afu_dma_region_find(struct dfl_feature_dev_data *fdata,
u64 iova, u64 size);
extern const struct dfl_feature_ops port_err_ops;
diff --git a/drivers/fpga/dfl-fme-br.c b/drivers/fpga/dfl-fme-br.c
index 950c606c59d4..28b0f9d062ac 100644
--- a/drivers/fpga/dfl-fme-br.c
+++ b/drivers/fpga/dfl-fme-br.c
@@ -22,34 +22,34 @@
struct fme_br_priv {
struct dfl_fme_br_pdata *pdata;
struct dfl_fpga_port_ops *port_ops;
- struct platform_device *port_pdev;
+ struct dfl_feature_dev_data *port_fdata;
};
static int fme_bridge_enable_set(struct fpga_bridge *bridge, bool enable)
{
struct fme_br_priv *priv = bridge->priv;
- struct platform_device *port_pdev;
+ struct dfl_feature_dev_data *port_fdata;
struct dfl_fpga_port_ops *ops;
- if (!priv->port_pdev) {
- port_pdev = dfl_fpga_cdev_find_port(priv->pdata->cdev,
- &priv->pdata->port_id,
- dfl_fpga_check_port_id);
- if (!port_pdev)
+ if (!priv->port_fdata) {
+ port_fdata = dfl_fpga_cdev_find_port_data(priv->pdata->cdev,
+ &priv->pdata->port_id,
+ dfl_fpga_check_port_id);
+ if (!port_fdata)
return -ENODEV;
- priv->port_pdev = port_pdev;
+ priv->port_fdata = port_fdata;
}
- if (priv->port_pdev && !priv->port_ops) {
- ops = dfl_fpga_port_ops_get(priv->port_pdev);
+ if (priv->port_fdata && !priv->port_ops) {
+ ops = dfl_fpga_port_ops_get(priv->port_fdata);
if (!ops || !ops->enable_set)
return -ENOENT;
priv->port_ops = ops;
}
- return priv->port_ops->enable_set(priv->port_pdev, enable);
+ return priv->port_ops->enable_set(priv->port_fdata, enable);
}
static const struct fpga_bridge_ops fme_bridge_ops = {
@@ -85,8 +85,6 @@ static void fme_br_remove(struct platform_device *pdev)
fpga_bridge_unregister(br);
- if (priv->port_pdev)
- put_device(&priv->port_pdev->dev);
if (priv->port_ops)
dfl_fpga_port_ops_put(priv->port_ops);
}
diff --git a/drivers/fpga/dfl-fme-error.c b/drivers/fpga/dfl-fme-error.c
index 51c2892ec06d..f00d949efe69 100644
--- a/drivers/fpga/dfl-fme-error.c
+++ b/drivers/fpga/dfl-fme-error.c
@@ -42,15 +42,15 @@
static ssize_t pcie0_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + PCIE0_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -59,7 +59,7 @@ static ssize_t pcie0_errors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
int ret = 0;
u64 v, val;
@@ -67,9 +67,9 @@ static ssize_t pcie0_errors_store(struct device *dev,
if (kstrtou64(buf, 0, &val))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(GENMASK_ULL(63, 0), base + PCIE0_ERROR_MASK);
v = readq(base + PCIE0_ERROR);
@@ -79,7 +79,7 @@ static ssize_t pcie0_errors_store(struct device *dev,
ret = -EINVAL;
writeq(0ULL, base + PCIE0_ERROR_MASK);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret ? ret : count;
}
static DEVICE_ATTR_RW(pcie0_errors);
@@ -87,15 +87,15 @@ static DEVICE_ATTR_RW(pcie0_errors);
static ssize_t pcie1_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + PCIE1_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -104,7 +104,7 @@ static ssize_t pcie1_errors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
int ret = 0;
u64 v, val;
@@ -112,9 +112,9 @@ static ssize_t pcie1_errors_store(struct device *dev,
if (kstrtou64(buf, 0, &val))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(GENMASK_ULL(63, 0), base + PCIE1_ERROR_MASK);
v = readq(base + PCIE1_ERROR);
@@ -124,7 +124,7 @@ static ssize_t pcie1_errors_store(struct device *dev,
ret = -EINVAL;
writeq(0ULL, base + PCIE1_ERROR_MASK);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret ? ret : count;
}
static DEVICE_ATTR_RW(pcie1_errors);
@@ -132,9 +132,10 @@ static DEVICE_ATTR_RW(pcie1_errors);
static ssize_t nonfatal_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
return sprintf(buf, "0x%llx\n",
(unsigned long long)readq(base + RAS_NONFAT_ERROR));
@@ -144,9 +145,10 @@ static DEVICE_ATTR_RO(nonfatal_errors);
static ssize_t catfatal_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
return sprintf(buf, "0x%llx\n",
(unsigned long long)readq(base + RAS_CATFAT_ERROR));
@@ -156,15 +158,15 @@ static DEVICE_ATTR_RO(catfatal_errors);
static ssize_t inject_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + RAS_ERROR_INJECT);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n",
(unsigned long long)FIELD_GET(INJECT_ERROR_MASK, v));
@@ -174,7 +176,7 @@ static ssize_t inject_errors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u8 inject_error;
u64 v;
@@ -185,14 +187,14 @@ static ssize_t inject_errors_store(struct device *dev,
if (inject_error & ~INJECT_ERROR_MASK)
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + RAS_ERROR_INJECT);
v &= ~INJECT_ERROR_MASK;
v |= FIELD_PREP(INJECT_ERROR_MASK, inject_error);
writeq(v, base + RAS_ERROR_INJECT);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -201,15 +203,15 @@ static DEVICE_ATTR_RW(inject_errors);
static ssize_t fme_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + FME_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -218,7 +220,7 @@ static ssize_t fme_errors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v, val;
int ret = 0;
@@ -226,9 +228,9 @@ static ssize_t fme_errors_store(struct device *dev,
if (kstrtou64(buf, 0, &val))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(GENMASK_ULL(63, 0), base + FME_ERROR_MASK);
v = readq(base + FME_ERROR);
@@ -240,7 +242,7 @@ static ssize_t fme_errors_store(struct device *dev,
/* Workaround: disable MBP_ERROR if feature revision is 0 */
writeq(dfl_feature_revision(base) ? 0ULL : MBP_ERROR,
base + FME_ERROR_MASK);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret ? ret : count;
}
static DEVICE_ATTR_RW(fme_errors);
@@ -248,15 +250,15 @@ static DEVICE_ATTR_RW(fme_errors);
static ssize_t first_error_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + FME_FIRST_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -265,15 +267,15 @@ static DEVICE_ATTR_RO(first_error);
static ssize_t next_error_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + FME_NEXT_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -295,12 +297,14 @@ static umode_t fme_global_err_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature_dev_data *fdata;
+ fdata = to_dfl_feature_dev_data(dev);
/*
* sysfs entries are visible only if related private feature is
* enumerated.
*/
- if (!dfl_get_feature_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR))
+ if (!dfl_get_feature_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR))
return 0;
return attr->mode;
@@ -314,12 +318,12 @@ const struct attribute_group fme_global_err_group = {
static void fme_err_mask(struct device *dev, bool mask)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
/* Workaround: keep MBP_ERROR always masked if revision is 0 */
if (dfl_feature_revision(base))
@@ -332,7 +336,7 @@ static void fme_err_mask(struct device *dev, bool mask)
writeq(mask ? ERROR_MASK : 0, base + RAS_NONFAT_ERROR_MASK);
writeq(mask ? ERROR_MASK : 0, base + RAS_CATFAT_ERROR_MASK);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
}
static int fme_global_err_init(struct platform_device *pdev,
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index f8d89a4a6ccb..8aca2fb20e87 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -28,10 +28,11 @@
static ssize_t ports_num_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_CAP);
@@ -47,10 +48,11 @@ static DEVICE_ATTR_RO(ports_num);
static ssize_t bitstream_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_BITSTREAM_ID);
@@ -65,10 +67,11 @@ static DEVICE_ATTR_RO(bitstream_id);
static ssize_t bitstream_metadata_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_BITSTREAM_MD);
@@ -79,10 +82,11 @@ static DEVICE_ATTR_RO(bitstream_metadata);
static ssize_t cache_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_CAP);
@@ -94,10 +98,11 @@ static DEVICE_ATTR_RO(cache_size);
static ssize_t fabric_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_CAP);
@@ -109,10 +114,11 @@ static DEVICE_ATTR_RO(fabric_version);
static ssize_t socket_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_CAP);
@@ -135,10 +141,10 @@ static const struct attribute_group fme_hdr_group = {
.attrs = fme_hdr_attrs,
};
-static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata,
+static long fme_hdr_ioctl_release_port(struct dfl_feature_dev_data *fdata,
unsigned long arg)
{
- struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
+ struct dfl_fpga_cdev *cdev = fdata->dfl_cdev;
int port_id;
if (get_user(port_id, (int __user *)arg))
@@ -147,10 +153,10 @@ static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata,
return dfl_fpga_cdev_release_port(cdev, port_id);
}
-static long fme_hdr_ioctl_assign_port(struct dfl_feature_platform_data *pdata,
+static long fme_hdr_ioctl_assign_port(struct dfl_feature_dev_data *fdata,
unsigned long arg)
{
- struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
+ struct dfl_fpga_cdev *cdev = fdata->dfl_cdev;
int port_id;
if (get_user(port_id, (int __user *)arg))
@@ -163,13 +169,13 @@ static long fme_hdr_ioctl(struct platform_device *pdev,
struct dfl_feature *feature,
unsigned int cmd, unsigned long arg)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
switch (cmd) {
case DFL_FPGA_FME_PORT_RELEASE:
- return fme_hdr_ioctl_release_port(pdata, arg);
+ return fme_hdr_ioctl_release_port(fdata, arg);
case DFL_FPGA_FME_PORT_ASSIGN:
- return fme_hdr_ioctl_assign_port(pdata, arg);
+ return fme_hdr_ioctl_assign_port(fdata, arg);
}
return -ENODEV;
@@ -411,14 +417,14 @@ static int power_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long val)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev->parent);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev->parent);
struct dfl_feature *feature = dev_get_drvdata(dev);
int ret = 0;
u64 v;
val = clamp_val(val / MICRO, 0, PWR_THRESHOLD_MAX);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
switch (attr) {
case hwmon_power_max:
@@ -438,7 +444,7 @@ static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
break;
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
@@ -589,7 +595,7 @@ static struct dfl_feature_driver fme_feature_drvs[] = {
},
};
-static long fme_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
+static long fme_ioctl_check_extension(struct dfl_feature_dev_data *fdata,
unsigned long arg)
{
/* No extension support for now */
@@ -598,49 +604,46 @@ static long fme_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
static int fme_open(struct inode *inode, struct file *filp)
{
- struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&fdev->dev);
+ struct dfl_feature_dev_data *fdata = dfl_fpga_inode_to_feature_dev_data(inode);
+ struct platform_device *fdev = fdata->dev;
int ret;
- if (WARN_ON(!pdata))
- return -ENODEV;
-
- mutex_lock(&pdata->lock);
- ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
+ mutex_lock(&fdata->lock);
+ ret = dfl_feature_dev_use_begin(fdata, filp->f_flags & O_EXCL);
if (!ret) {
dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
- dfl_feature_dev_use_count(pdata));
- filp->private_data = pdata;
+ dfl_feature_dev_use_count(fdata));
+ filp->private_data = fdata;
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
static int fme_release(struct inode *inode, struct file *filp)
{
- struct dfl_feature_platform_data *pdata = filp->private_data;
- struct platform_device *pdev = pdata->dev;
+ struct dfl_feature_dev_data *fdata = filp->private_data;
+ struct platform_device *pdev = fdata->dev;
struct dfl_feature *feature;
dev_dbg(&pdev->dev, "Device File Release\n");
- mutex_lock(&pdata->lock);
- dfl_feature_dev_use_end(pdata);
+ mutex_lock(&fdata->lock);
+ dfl_feature_dev_use_end(fdata);
- if (!dfl_feature_dev_use_count(pdata))
- dfl_fpga_dev_for_each_feature(pdata, feature)
+ if (!dfl_feature_dev_use_count(fdata))
+ dfl_fpga_dev_for_each_feature(fdata, feature)
dfl_fpga_set_irq_triggers(feature, 0,
feature->nr_irqs, NULL);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return 0;
}
static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct dfl_feature_platform_data *pdata = filp->private_data;
- struct platform_device *pdev = pdata->dev;
+ struct dfl_feature_dev_data *fdata = filp->private_data;
+ struct platform_device *pdev = fdata->dev;
struct dfl_feature *f;
long ret;
@@ -650,7 +653,7 @@ static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case DFL_FPGA_GET_API_VERSION:
return DFL_FPGA_API_VERSION;
case DFL_FPGA_CHECK_EXTENSION:
- return fme_ioctl_check_extension(pdata, arg);
+ return fme_ioctl_check_extension(fdata, arg);
default:
/*
* Let sub-feature's ioctl function to handle the cmd.
@@ -658,7 +661,7 @@ static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
* handled in this sub feature, and returns 0 or other
* error code if cmd is handled.
*/
- dfl_fpga_dev_for_each_feature(pdata, f) {
+ dfl_fpga_dev_for_each_feature(fdata, f) {
if (f->ops && f->ops->ioctl) {
ret = f->ops->ioctl(pdev, f, cmd, arg);
if (ret != -ENODEV)
@@ -672,27 +675,27 @@ static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
static int fme_dev_init(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_fme *fme;
fme = devm_kzalloc(&pdev->dev, sizeof(*fme), GFP_KERNEL);
if (!fme)
return -ENOMEM;
- mutex_lock(&pdata->lock);
- dfl_fpga_pdata_set_private(pdata, fme);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ dfl_fpga_fdata_set_private(fdata, fme);
+ mutex_unlock(&fdata->lock);
return 0;
}
static void fme_dev_destroy(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
- mutex_lock(&pdata->lock);
- dfl_fpga_pdata_set_private(pdata, NULL);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ dfl_fpga_fdata_set_private(fdata, NULL);
+ mutex_unlock(&fdata->lock);
}
static const struct file_operations fme_fops = {
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c
index cdcf6dea4cc9..b878b260af38 100644
--- a/drivers/fpga/dfl-fme-pr.c
+++ b/drivers/fpga/dfl-fme-pr.c
@@ -65,7 +65,7 @@ static struct fpga_region *dfl_fme_region_find(struct dfl_fme *fme, int port_id)
static int fme_pr(struct platform_device *pdev, unsigned long arg)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
void __user *argp = (void __user *)arg;
struct dfl_fpga_fme_port_pr port_pr;
struct fpga_image_info *info;
@@ -87,8 +87,7 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)
return -EINVAL;
/* get fme header region */
- fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
- FME_FEATURE_ID_HEADER);
+ fme_hdr = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
/* check port id */
v = readq(fme_hdr + FME_HDR_CAP);
@@ -123,8 +122,8 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)
info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
- mutex_lock(&pdata->lock);
- fme = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ fme = dfl_fpga_fdata_get_private(fdata);
/* fme device has been unregistered. */
if (!fme) {
ret = -EINVAL;
@@ -156,7 +155,7 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)
put_device(&region->dev);
unlock_exit:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
free_exit:
vfree(buf);
return ret;
@@ -164,16 +163,16 @@ free_exit:
/**
* dfl_fme_create_mgr - create fpga mgr platform device as child device
+ * @fdata: fme feature dev data
* @feature: sub feature info
- * @pdata: fme platform_device's pdata
*
* Return: mgr platform device if successful, and error code otherwise.
*/
static struct platform_device *
-dfl_fme_create_mgr(struct dfl_feature_platform_data *pdata,
+dfl_fme_create_mgr(struct dfl_feature_dev_data *fdata,
struct dfl_feature *feature)
{
- struct platform_device *mgr, *fme = pdata->dev;
+ struct platform_device *mgr, *fme = fdata->dev;
struct dfl_fme_mgr_pdata mgr_pdata;
int ret = -ENOMEM;
@@ -209,11 +208,11 @@ create_mgr_err:
/**
* dfl_fme_destroy_mgr - destroy fpga mgr platform device
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
*/
-static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata)
+static void dfl_fme_destroy_mgr(struct dfl_feature_dev_data *fdata)
{
- struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme *priv = dfl_fpga_fdata_get_private(fdata);
platform_device_unregister(priv->mgr);
}
@@ -221,15 +220,15 @@ static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata)
/**
* dfl_fme_create_bridge - create fme fpga bridge platform device as child
*
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
* @port_id: port id for the bridge to be created.
*
* Return: bridge platform device if successful, and error code otherwise.
*/
static struct dfl_fme_bridge *
-dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id)
+dfl_fme_create_bridge(struct dfl_feature_dev_data *fdata, int port_id)
{
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
struct dfl_fme_br_pdata br_pdata;
struct dfl_fme_bridge *fme_br;
int ret = -ENOMEM;
@@ -238,7 +237,7 @@ dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id)
if (!fme_br)
return ERR_PTR(ret);
- br_pdata.cdev = pdata->dfl_cdev;
+ br_pdata.cdev = fdata->dfl_cdev;
br_pdata.port_id = port_id;
fme_br->br = platform_device_alloc(DFL_FPGA_FME_BRIDGE,
@@ -274,11 +273,11 @@ static void dfl_fme_destroy_bridge(struct dfl_fme_bridge *fme_br)
/**
* dfl_fme_destroy_bridges - destroy all fpga bridge platform device
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
*/
-static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
+static void dfl_fme_destroy_bridges(struct dfl_feature_dev_data *fdata)
{
- struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme *priv = dfl_fpga_fdata_get_private(fdata);
struct dfl_fme_bridge *fbridge, *tmp;
list_for_each_entry_safe(fbridge, tmp, &priv->bridge_list, node) {
@@ -290,7 +289,7 @@ static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
/**
* dfl_fme_create_region - create fpga region platform device as child
*
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
* @mgr: mgr platform device needed for region
* @br: br platform device needed for region
* @port_id: port id
@@ -298,12 +297,12 @@ static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
* Return: fme region if successful, and error code otherwise.
*/
static struct dfl_fme_region *
-dfl_fme_create_region(struct dfl_feature_platform_data *pdata,
+dfl_fme_create_region(struct dfl_feature_dev_data *fdata,
struct platform_device *mgr,
struct platform_device *br, int port_id)
{
struct dfl_fme_region_pdata region_pdata;
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
struct dfl_fme_region *fme_region;
int ret = -ENOMEM;
@@ -353,11 +352,11 @@ static void dfl_fme_destroy_region(struct dfl_fme_region *fme_region)
/**
* dfl_fme_destroy_regions - destroy all fme regions
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
*/
-static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata)
+static void dfl_fme_destroy_regions(struct dfl_feature_dev_data *fdata)
{
- struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme *priv = dfl_fpga_fdata_get_private(fdata);
struct dfl_fme_region *fme_region, *tmp;
list_for_each_entry_safe(fme_region, tmp, &priv->region_list, node) {
@@ -369,7 +368,7 @@ static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata)
static int pr_mgmt_init(struct platform_device *pdev,
struct dfl_feature *feature)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_fme_region *fme_region;
struct dfl_fme_bridge *fme_br;
struct platform_device *mgr;
@@ -378,18 +377,17 @@ static int pr_mgmt_init(struct platform_device *pdev,
int ret = -ENODEV, i = 0;
u64 fme_cap, port_offset;
- fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
- FME_FEATURE_ID_HEADER);
+ fme_hdr = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
- priv = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ priv = dfl_fpga_fdata_get_private(fdata);
/* Initialize the region and bridge sub device list */
INIT_LIST_HEAD(&priv->region_list);
INIT_LIST_HEAD(&priv->bridge_list);
/* Create fpga mgr platform device */
- mgr = dfl_fme_create_mgr(pdata, feature);
+ mgr = dfl_fme_create_mgr(fdata, feature);
if (IS_ERR(mgr)) {
dev_err(&pdev->dev, "fail to create fpga mgr pdev\n");
goto unlock;
@@ -405,7 +403,7 @@ static int pr_mgmt_init(struct platform_device *pdev,
continue;
/* Create bridge for each port */
- fme_br = dfl_fme_create_bridge(pdata, i);
+ fme_br = dfl_fme_create_bridge(fdata, i);
if (IS_ERR(fme_br)) {
ret = PTR_ERR(fme_br);
goto destroy_region;
@@ -414,7 +412,7 @@ static int pr_mgmt_init(struct platform_device *pdev,
list_add(&fme_br->node, &priv->bridge_list);
/* Create region for each port */
- fme_region = dfl_fme_create_region(pdata, mgr,
+ fme_region = dfl_fme_create_region(fdata, mgr,
fme_br->br, i);
if (IS_ERR(fme_region)) {
ret = PTR_ERR(fme_region);
@@ -423,30 +421,30 @@ static int pr_mgmt_init(struct platform_device *pdev,
list_add(&fme_region->node, &priv->region_list);
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return 0;
destroy_region:
- dfl_fme_destroy_regions(pdata);
- dfl_fme_destroy_bridges(pdata);
- dfl_fme_destroy_mgr(pdata);
+ dfl_fme_destroy_regions(fdata);
+ dfl_fme_destroy_bridges(fdata);
+ dfl_fme_destroy_mgr(fdata);
unlock:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
static void pr_mgmt_uinit(struct platform_device *pdev,
struct dfl_feature *feature)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
- dfl_fme_destroy_regions(pdata);
- dfl_fme_destroy_bridges(pdata);
- dfl_fme_destroy_mgr(pdata);
- mutex_unlock(&pdata->lock);
+ dfl_fme_destroy_regions(fdata);
+ dfl_fme_destroy_bridges(fdata);
+ dfl_fme_destroy_mgr(fdata);
+ mutex_unlock(&fdata->lock);
}
static long fme_pr_ioctl(struct platform_device *pdev,
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index c406b949026f..7022657243c0 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -119,17 +119,6 @@ static void dfl_id_free(enum dfl_id_type type, int id)
mutex_unlock(&dfl_id_mutex);
}
-static enum dfl_id_type feature_dev_id_type(struct platform_device *pdev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
- if (!strcmp(dfl_devs[i].name, pdev->name))
- return i;
-
- return DFL_ID_MAX;
-}
-
static enum dfl_id_type dfh_id_to_type(u16 id)
{
int i;
@@ -156,12 +145,12 @@ static LIST_HEAD(dfl_port_ops_list);
/**
* dfl_fpga_port_ops_get - get matched port ops from the global list
- * @pdev: platform device to match with associated port ops.
+ * @fdata: feature dev data to match with associated port ops.
* Return: matched port ops on success, NULL otherwise.
*
* Please note that must dfl_fpga_port_ops_put after use the port_ops.
*/
-struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)
+struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct dfl_feature_dev_data *fdata)
{
struct dfl_fpga_port_ops *ops = NULL;
@@ -171,7 +160,7 @@ struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)
list_for_each_entry(ops, &dfl_port_ops_list, node) {
/* match port_ops using the name of platform device */
- if (!strcmp(pdev->name, ops->name)) {
+ if (!strcmp(fdata->pdev_name, ops->name)) {
if (!try_module_get(ops->owner))
ops = NULL;
goto done;
@@ -222,27 +211,26 @@ EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del);
/**
* dfl_fpga_check_port_id - check the port id
- * @pdev: port platform device.
+ * @fdata: port feature dev data.
* @pport_id: port id to compare.
*
* Return: 1 if port device matches with given port id, otherwise 0.
*/
-int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id)
+int dfl_fpga_check_port_id(struct dfl_feature_dev_data *fdata, void *pport_id)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct dfl_fpga_port_ops *port_ops;
- if (pdata->id != FEATURE_DEV_ID_UNUSED)
- return pdata->id == *(int *)pport_id;
+ if (fdata->id != FEATURE_DEV_ID_UNUSED)
+ return fdata->id == *(int *)pport_id;
- port_ops = dfl_fpga_port_ops_get(pdev);
+ port_ops = dfl_fpga_port_ops_get(fdata);
if (!port_ops || !port_ops->get_id)
return 0;
- pdata->id = port_ops->get_id(pdev);
+ fdata->id = port_ops->get_id(fdata);
dfl_fpga_port_ops_put(port_ops);
- return pdata->id == *(int *)pport_id;
+ return fdata->id == *(int *)pport_id;
}
EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id);
@@ -351,10 +339,10 @@ static void release_dfl_dev(struct device *dev)
}
static struct dfl_device *
-dfl_dev_add(struct dfl_feature_platform_data *pdata,
+dfl_dev_add(struct dfl_feature_dev_data *fdata,
struct dfl_feature *feature)
{
- struct platform_device *pdev = pdata->dev;
+ struct platform_device *pdev = fdata->dev;
struct resource *parent_res;
struct dfl_device *ddev;
int id, i, ret;
@@ -380,11 +368,11 @@ dfl_dev_add(struct dfl_feature_platform_data *pdata,
if (ret)
goto put_dev;
- ddev->type = feature_dev_id_type(pdev);
+ ddev->type = fdata->type;
ddev->feature_id = feature->id;
ddev->revision = feature->revision;
ddev->dfh_version = feature->dfh_version;
- ddev->cdev = pdata->dfl_cdev;
+ ddev->cdev = fdata->dfl_cdev;
if (feature->param_size) {
ddev->params = kmemdup(feature->params, feature->param_size, GFP_KERNEL);
if (!ddev->params) {
@@ -435,11 +423,11 @@ put_dev:
return ERR_PTR(ret);
}
-static void dfl_devs_remove(struct dfl_feature_platform_data *pdata)
+static void dfl_devs_remove(struct dfl_feature_dev_data *fdata)
{
struct dfl_feature *feature;
- dfl_fpga_dev_for_each_feature(pdata, feature) {
+ dfl_fpga_dev_for_each_feature(fdata, feature) {
if (feature->ddev) {
device_unregister(&feature->ddev->dev);
feature->ddev = NULL;
@@ -447,13 +435,13 @@ static void dfl_devs_remove(struct dfl_feature_platform_data *pdata)
}
}
-static int dfl_devs_add(struct dfl_feature_platform_data *pdata)
+static int dfl_devs_add(struct dfl_feature_dev_data *fdata)
{
struct dfl_feature *feature;
struct dfl_device *ddev;
int ret;
- dfl_fpga_dev_for_each_feature(pdata, feature) {
+ dfl_fpga_dev_for_each_feature(fdata, feature) {
if (feature->ioaddr)
continue;
@@ -462,7 +450,7 @@ static int dfl_devs_add(struct dfl_feature_platform_data *pdata)
goto err;
}
- ddev = dfl_dev_add(pdata, feature);
+ ddev = dfl_dev_add(fdata, feature);
if (IS_ERR(ddev)) {
ret = PTR_ERR(ddev);
goto err;
@@ -474,7 +462,7 @@ static int dfl_devs_add(struct dfl_feature_platform_data *pdata)
return 0;
err:
- dfl_devs_remove(pdata);
+ dfl_devs_remove(fdata);
return ret;
}
@@ -504,12 +492,12 @@ EXPORT_SYMBOL(dfl_driver_unregister);
*/
void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_feature *feature;
- dfl_devs_remove(pdata);
+ dfl_devs_remove(fdata);
- dfl_fpga_dev_for_each_feature(pdata, feature) {
+ dfl_fpga_dev_for_each_feature(fdata, feature) {
if (feature->ops) {
if (feature->ops->uinit)
feature->ops->uinit(pdev, feature);
@@ -520,7 +508,6 @@ void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit);
static int dfl_feature_instance_init(struct platform_device *pdev,
- struct dfl_feature_platform_data *pdata,
struct dfl_feature *feature,
struct dfl_feature_driver *drv)
{
@@ -579,16 +566,15 @@ static bool dfl_feature_drv_match(struct dfl_feature *feature,
int dfl_fpga_dev_feature_init(struct platform_device *pdev,
struct dfl_feature_driver *feature_drvs)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_feature_driver *drv = feature_drvs;
struct dfl_feature *feature;
int ret;
while (drv->ops) {
- dfl_fpga_dev_for_each_feature(pdata, feature) {
+ dfl_fpga_dev_for_each_feature(fdata, feature) {
if (dfl_feature_drv_match(feature, drv)) {
- ret = dfl_feature_instance_init(pdev, pdata,
- feature, drv);
+ ret = dfl_feature_instance_init(pdev, feature, drv);
if (ret)
goto exit;
}
@@ -596,7 +582,7 @@ int dfl_fpga_dev_feature_init(struct platform_device *pdev,
drv++;
}
- ret = dfl_devs_add(pdata);
+ ret = dfl_devs_add(fdata);
if (ret)
goto exit;
@@ -695,7 +681,7 @@ EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
* @nr_irqs: number of irqs for all feature devices.
* @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of
* this device.
- * @feature_dev: current feature device.
+ * @type: the current FIU type.
* @ioaddr: header register region address of current FIU in enumeration.
* @start: register resource start of current FIU.
* @len: max register resource length of current FIU.
@@ -708,7 +694,7 @@ struct build_feature_devs_info {
unsigned int nr_irqs;
int *irq_table;
- struct platform_device *feature_dev;
+ enum dfl_id_type type;
void __iomem *ioaddr;
resource_size_t start;
resource_size_t len;
@@ -743,50 +729,62 @@ struct dfl_feature_info {
u64 params[];
};
-static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev,
- struct platform_device *port)
+static void dfl_fpga_cdev_add_port_data(struct dfl_fpga_cdev *cdev,
+ struct dfl_feature_dev_data *fdata)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&port->dev);
-
mutex_lock(&cdev->lock);
- list_add(&pdata->node, &cdev->port_dev_list);
- get_device(&pdata->dev->dev);
+ list_add(&fdata->node, &cdev->port_dev_list);
mutex_unlock(&cdev->lock);
}
-/*
- * register current feature device, it is called when we need to switch to
- * another feature parsing or we have parsed all features on given device
- * feature list.
- */
-static int build_info_commit_dev(struct build_feature_devs_info *binfo)
+static void dfl_id_free_action(void *arg)
+{
+ struct dfl_feature_dev_data *fdata = arg;
+
+ dfl_id_free(fdata->type, fdata->pdev_id);
+}
+
+static struct dfl_feature_dev_data *
+binfo_create_feature_dev_data(struct build_feature_devs_info *binfo)
{
- struct platform_device *fdev = binfo->feature_dev;
- struct dfl_feature_platform_data *pdata;
+ enum dfl_id_type type = binfo->type;
struct dfl_feature_info *finfo, *p;
- enum dfl_id_type type;
+ struct dfl_feature_dev_data *fdata;
int ret, index = 0, res_idx = 0;
- type = feature_dev_id_type(fdev);
if (WARN_ON_ONCE(type >= DFL_ID_MAX))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
- /*
- * we do not need to care for the memory which is associated with
- * the platform device. After calling platform_device_unregister(),
- * it will be automatically freed by device's release() callback,
- * platform_device_release().
- */
- pdata = kzalloc(struct_size(pdata, features, binfo->feature_num), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
+ fdata = devm_kzalloc(binfo->dev, sizeof(*fdata), GFP_KERNEL);
+ if (!fdata)
+ return ERR_PTR(-ENOMEM);
+
+ fdata->features = devm_kcalloc(binfo->dev, binfo->feature_num,
+ sizeof(*fdata->features), GFP_KERNEL);
+ if (!fdata->features)
+ return ERR_PTR(-ENOMEM);
+
+ fdata->resources = devm_kcalloc(binfo->dev, binfo->feature_num,
+ sizeof(*fdata->resources), GFP_KERNEL);
+ if (!fdata->resources)
+ return ERR_PTR(-ENOMEM);
+
+ fdata->type = type;
- pdata->dev = fdev;
- pdata->num = binfo->feature_num;
- pdata->dfl_cdev = binfo->cdev;
- pdata->id = FEATURE_DEV_ID_UNUSED;
- mutex_init(&pdata->lock);
- lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type],
+ fdata->pdev_id = dfl_id_alloc(type, binfo->dev);
+ if (fdata->pdev_id < 0)
+ return ERR_PTR(fdata->pdev_id);
+
+ ret = devm_add_action_or_reset(binfo->dev, dfl_id_free_action, fdata);
+ if (ret)
+ return ERR_PTR(ret);
+
+ fdata->pdev_name = dfl_devs[type].name;
+ fdata->num = binfo->feature_num;
+ fdata->dfl_cdev = binfo->cdev;
+ fdata->id = FEATURE_DEV_ID_UNUSED;
+ mutex_init(&fdata->lock);
+ lockdep_set_class_and_name(&fdata->lock, &dfl_pdata_keys[type],
dfl_pdata_key_strings[type]);
/*
@@ -795,25 +793,15 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
* works properly for port device.
* and it should always be 0 for fme device.
*/
- WARN_ON(pdata->disable_count);
-
- fdev->dev.platform_data = pdata;
-
- /* each sub feature has one MMIO resource */
- fdev->num_resources = binfo->feature_num;
- fdev->resource = kcalloc(binfo->feature_num, sizeof(*fdev->resource),
- GFP_KERNEL);
- if (!fdev->resource)
- return -ENOMEM;
+ WARN_ON(fdata->disable_count);
/* fill features and resource information for feature dev */
list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
- struct dfl_feature *feature = &pdata->features[index++];
+ struct dfl_feature *feature = &fdata->features[index++];
struct dfl_feature_irq_ctx *ctx;
unsigned int i;
/* save resource information for each feature */
- feature->dev = fdev;
feature->id = finfo->fid;
feature->revision = finfo->revision;
feature->dfh_version = finfo->dfh_version;
@@ -823,7 +811,7 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
finfo->params, finfo->param_size,
GFP_KERNEL);
if (!feature->params)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
feature->param_size = finfo->param_size;
}
@@ -840,17 +828,17 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
devm_ioremap_resource(binfo->dev,
&finfo->mmio_res);
if (IS_ERR(feature->ioaddr))
- return PTR_ERR(feature->ioaddr);
+ return ERR_CAST(feature->ioaddr);
} else {
feature->resource_index = res_idx;
- fdev->resource[res_idx++] = finfo->mmio_res;
+ fdata->resources[res_idx++] = finfo->mmio_res;
}
if (finfo->nr_irqs) {
ctx = devm_kcalloc(binfo->dev, finfo->nr_irqs,
sizeof(*ctx), GFP_KERNEL);
if (!ctx)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
for (i = 0; i < finfo->nr_irqs; i++)
ctx[i].irq =
@@ -864,55 +852,94 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
kfree(finfo);
}
- ret = platform_device_add(binfo->feature_dev);
- if (!ret) {
- if (type == PORT_ID)
- dfl_fpga_cdev_add_port_dev(binfo->cdev,
- binfo->feature_dev);
- else
- binfo->cdev->fme_dev =
- get_device(&binfo->feature_dev->dev);
- /*
- * reset it to avoid build_info_free() freeing their resource.
- *
- * The resource of successfully registered feature devices
- * will be freed by platform_device_unregister(). See the
- * comments in build_info_create_dev().
- */
- binfo->feature_dev = NULL;
- }
+ fdata->resource_num = res_idx;
- return ret;
+ return fdata;
}
-static int
-build_info_create_dev(struct build_feature_devs_info *binfo,
- enum dfl_id_type type)
+/*
+ * register current feature device, it is called when we need to switch to
+ * another feature parsing or we have parsed all features on given device
+ * feature list.
+ */
+static int feature_dev_register(struct dfl_feature_dev_data *fdata)
{
+ struct dfl_feature_platform_data pdata = {};
struct platform_device *fdev;
+ struct dfl_feature *feature;
+ int ret;
- if (type >= DFL_ID_MAX)
- return -EINVAL;
-
- /*
- * we use -ENODEV as the initialization indicator which indicates
- * whether the id need to be reclaimed
- */
- fdev = platform_device_alloc(dfl_devs[type].name, -ENODEV);
+ fdev = platform_device_alloc(fdata->pdev_name, fdata->pdev_id);
if (!fdev)
return -ENOMEM;
- binfo->feature_dev = fdev;
- binfo->feature_num = 0;
+ fdata->dev = fdev;
- INIT_LIST_HEAD(&binfo->sub_features);
+ fdev->dev.parent = &fdata->dfl_cdev->region->dev;
+ fdev->dev.devt = dfl_get_devt(dfl_devs[fdata->type].devt_type, fdev->id);
- fdev->id = dfl_id_alloc(type, &fdev->dev);
- if (fdev->id < 0)
- return fdev->id;
+ dfl_fpga_dev_for_each_feature(fdata, feature)
+ feature->dev = fdev;
+
+ ret = platform_device_add_resources(fdev, fdata->resources,
+ fdata->resource_num);
+ if (ret)
+ goto err_put_dev;
+
+ pdata.fdata = fdata;
+ ret = platform_device_add_data(fdev, &pdata, sizeof(pdata));
+ if (ret)
+ goto err_put_dev;
+
+ ret = platform_device_add(fdev);
+ if (ret)
+ goto err_put_dev;
+
+ return 0;
+
+err_put_dev:
+ platform_device_put(fdev);
+
+ fdata->dev = NULL;
+
+ dfl_fpga_dev_for_each_feature(fdata, feature)
+ feature->dev = NULL;
+
+ return ret;
+}
+
+static void feature_dev_unregister(struct dfl_feature_dev_data *fdata)
+{
+ struct dfl_feature *feature;
+
+ platform_device_unregister(fdata->dev);
+
+ fdata->dev = NULL;
+
+ dfl_fpga_dev_for_each_feature(fdata, feature)
+ feature->dev = NULL;
+}
+
+static int build_info_commit_dev(struct build_feature_devs_info *binfo)
+{
+ struct dfl_feature_dev_data *fdata;
+ int ret;
+
+ fdata = binfo_create_feature_dev_data(binfo);
+ if (IS_ERR(fdata))
+ return PTR_ERR(fdata);
+
+ ret = feature_dev_register(fdata);
+ if (ret)
+ return ret;
+
+ if (binfo->type == PORT_ID)
+ dfl_fpga_cdev_add_port_data(binfo->cdev, fdata);
+ else
+ binfo->cdev->fme_dev = get_device(&fdata->dev->dev);
- fdev->dev.parent = &binfo->cdev->region->dev;
- fdev->dev.devt = dfl_get_devt(dfl_devs[type].devt_type, fdev->id);
+ /* reset the binfo for next FIU */
+ binfo->type = DFL_ID_MAX;
return 0;
}
@@ -921,22 +948,11 @@ static void build_info_free(struct build_feature_devs_info *binfo)
{
struct dfl_feature_info *finfo, *p;
- /*
- * it is a valid id, free it. See comments in
- * build_info_create_dev()
- */
- if (binfo->feature_dev && binfo->feature_dev->id >= 0) {
- dfl_id_free(feature_dev_id_type(binfo->feature_dev),
- binfo->feature_dev->id);
-
- list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
- list_del(&finfo->node);
- kfree(finfo);
- }
+ list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
+ list_del(&finfo->node);
+ kfree(finfo);
}
- platform_device_put(binfo->feature_dev);
-
devm_kfree(binfo->dev, binfo);
}
@@ -1025,7 +1041,7 @@ static int parse_feature_irqs(struct build_feature_devs_info *binfo,
* Instead, features with interrupt functionality provide
* the information in feature specific registers.
*/
- type = feature_dev_id_type(binfo->feature_dev);
+ type = binfo->type;
if (type == PORT_ID) {
switch (fid) {
case PORT_FEATURE_ID_UINT:
@@ -1217,7 +1233,7 @@ static int parse_feature_port_afu(struct build_feature_devs_info *binfo,
return create_feature_instance(binfo, ofst, size, FEATURE_ID_AFU);
}
-#define is_feature_dev_detected(binfo) (!!(binfo)->feature_dev)
+#define is_feature_dev_detected(binfo) ((binfo)->type != DFL_ID_MAX)
static int parse_feature_afu(struct build_feature_devs_info *binfo,
resource_size_t ofst)
@@ -1227,12 +1243,11 @@ static int parse_feature_afu(struct build_feature_devs_info *binfo,
return -EINVAL;
}
- switch (feature_dev_id_type(binfo->feature_dev)) {
+ switch (binfo->type) {
case PORT_ID:
return parse_feature_port_afu(binfo, ofst);
default:
- dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n",
- binfo->feature_dev->name);
+ dev_info(binfo->dev, "AFU belonging to FIU is not supported yet.\n");
}
return 0;
@@ -1273,6 +1288,7 @@ static void build_info_complete(struct build_feature_devs_info *binfo)
static int parse_feature_fiu(struct build_feature_devs_info *binfo,
resource_size_t ofst)
{
+ enum dfl_id_type type;
int ret = 0;
u32 offset;
u16 id;
@@ -1294,10 +1310,13 @@ static int parse_feature_fiu(struct build_feature_devs_info *binfo,
v = readq(binfo->ioaddr + DFH);
id = FIELD_GET(DFH_ID, v);
- /* create platform device for dfl feature dev */
- ret = build_info_create_dev(binfo, dfh_id_to_type(id));
- if (ret)
- return ret;
+ type = dfh_id_to_type(id);
+ if (type >= DFL_ID_MAX)
+ return -EINVAL;
+
+ binfo->type = type;
+ binfo->feature_num = 0;
+ INIT_LIST_HEAD(&binfo->sub_features);
ret = create_feature_instance(binfo, 0, 0, 0);
if (ret)
@@ -1515,13 +1534,9 @@ EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_irq);
static int remove_feature_dev(struct device *dev, void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
- enum dfl_id_type type = feature_dev_id_type(pdev);
- int id = pdev->id;
-
- platform_device_unregister(pdev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
- dfl_id_free(type, id);
+ feature_dev_unregister(fdata);
return 0;
}
@@ -1573,6 +1588,7 @@ dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
goto unregister_region_exit;
}
+ binfo->type = DFL_ID_MAX;
binfo->dev = info->dev;
binfo->cdev = cdev;
@@ -1614,25 +1630,10 @@ EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate);
*/
void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
{
- struct dfl_feature_platform_data *pdata, *ptmp;
-
mutex_lock(&cdev->lock);
if (cdev->fme_dev)
put_device(cdev->fme_dev);
- list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) {
- struct platform_device *port_dev = pdata->dev;
-
- /* remove released ports */
- if (!device_is_registered(&port_dev->dev)) {
- dfl_id_free(feature_dev_id_type(port_dev),
- port_dev->id);
- platform_device_put(port_dev);
- }
-
- list_del(&pdata->node);
- put_device(&port_dev->dev);
- }
mutex_unlock(&cdev->lock);
remove_feature_devs(cdev);
@@ -1643,7 +1644,7 @@ void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
/**
- * __dfl_fpga_cdev_find_port - find a port under given container device
+ * __dfl_fpga_cdev_find_port_data - find a port under given container device
*
* @cdev: container device
* @data: data passed to match function
@@ -1656,23 +1657,20 @@ EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
*
* NOTE: you will need to drop the device reference with put_device() after use.
*/
-struct platform_device *
-__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
- int (*match)(struct platform_device *, void *))
+struct dfl_feature_dev_data *
+__dfl_fpga_cdev_find_port_data(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct dfl_feature_dev_data *, void *))
{
- struct dfl_feature_platform_data *pdata;
- struct platform_device *port_dev;
-
- list_for_each_entry(pdata, &cdev->port_dev_list, node) {
- port_dev = pdata->dev;
+ struct dfl_feature_dev_data *fdata;
- if (match(port_dev, data) && get_device(&port_dev->dev))
- return port_dev;
+ list_for_each_entry(fdata, &cdev->port_dev_list, node) {
+ if (match(fdata, data))
+ return fdata;
}
return NULL;
}
-EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port);
+EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port_data);
static int __init dfl_fpga_init(void)
{
@@ -1706,33 +1704,28 @@ static int __init dfl_fpga_init(void)
*/
int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id)
{
- struct dfl_feature_platform_data *pdata;
- struct platform_device *port_pdev;
+ struct dfl_feature_dev_data *fdata;
int ret = -ENODEV;
mutex_lock(&cdev->lock);
- port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
- dfl_fpga_check_port_id);
- if (!port_pdev)
+ fdata = __dfl_fpga_cdev_find_port_data(cdev, &port_id,
+ dfl_fpga_check_port_id);
+ if (!fdata)
goto unlock_exit;
- if (!device_is_registered(&port_pdev->dev)) {
+ if (!fdata->dev) {
ret = -EBUSY;
- goto put_dev_exit;
+ goto unlock_exit;
}
- pdata = dev_get_platdata(&port_pdev->dev);
-
- mutex_lock(&pdata->lock);
- ret = dfl_feature_dev_use_begin(pdata, true);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ ret = dfl_feature_dev_use_begin(fdata, true);
+ mutex_unlock(&fdata->lock);
if (ret)
- goto put_dev_exit;
+ goto unlock_exit;
- platform_device_del(port_pdev);
+ feature_dev_unregister(fdata);
cdev->released_port_num++;
-put_dev_exit:
- put_device(&port_pdev->dev);
unlock_exit:
mutex_unlock(&cdev->lock);
return ret;
@@ -1752,34 +1745,29 @@ EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port);
*/
int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id)
{
- struct dfl_feature_platform_data *pdata;
- struct platform_device *port_pdev;
+ struct dfl_feature_dev_data *fdata;
int ret = -ENODEV;
mutex_lock(&cdev->lock);
- port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
- dfl_fpga_check_port_id);
- if (!port_pdev)
+ fdata = __dfl_fpga_cdev_find_port_data(cdev, &port_id,
+ dfl_fpga_check_port_id);
+ if (!fdata)
goto unlock_exit;
- if (device_is_registered(&port_pdev->dev)) {
+ if (fdata->dev) {
ret = -EBUSY;
- goto put_dev_exit;
+ goto unlock_exit;
}
- ret = platform_device_add(port_pdev);
+ ret = feature_dev_register(fdata);
if (ret)
- goto put_dev_exit;
-
- pdata = dev_get_platdata(&port_pdev->dev);
+ goto unlock_exit;
- mutex_lock(&pdata->lock);
- dfl_feature_dev_use_end(pdata);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ dfl_feature_dev_use_end(fdata);
+ mutex_unlock(&fdata->lock);
cdev->released_port_num--;
-put_dev_exit:
- put_device(&port_pdev->dev);
unlock_exit:
mutex_unlock(&cdev->lock);
return ret;
@@ -1789,10 +1777,11 @@ EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port);
static void config_port_access_mode(struct device *fme_dev, int port_id,
bool is_vf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(fme_dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(fme_dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_PORT_OFST(port_id));
@@ -1816,14 +1805,14 @@ static void config_port_access_mode(struct device *fme_dev, int port_id,
*/
void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev)
{
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata;
mutex_lock(&cdev->lock);
- list_for_each_entry(pdata, &cdev->port_dev_list, node) {
- if (device_is_registered(&pdata->dev->dev))
+ list_for_each_entry(fdata, &cdev->port_dev_list, node) {
+ if (fdata->dev)
continue;
- config_port_pf_mode(cdev->fme_dev, pdata->id);
+ config_port_pf_mode(cdev->fme_dev, fdata->id);
}
mutex_unlock(&cdev->lock);
}
@@ -1842,7 +1831,7 @@ EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_pf);
*/
int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs)
{
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata;
int ret = 0;
mutex_lock(&cdev->lock);
@@ -1856,11 +1845,11 @@ int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs)
goto done;
}
- list_for_each_entry(pdata, &cdev->port_dev_list, node) {
- if (device_is_registered(&pdata->dev->dev))
+ list_for_each_entry(fdata, &cdev->port_dev_list, node) {
+ if (fdata->dev)
continue;
- config_port_vf_mode(cdev->fme_dev, pdata->id);
+ config_port_vf_mode(cdev->fme_dev, fdata->id);
}
done:
mutex_unlock(&cdev->lock);
@@ -1993,7 +1982,7 @@ long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
struct dfl_feature *feature,
unsigned long arg)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_fpga_irq_set hdr;
s32 *fds;
long ret;
@@ -2013,9 +2002,9 @@ long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
if (IS_ERR(fds))
return PTR_ERR(fds);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
ret = dfl_fpga_set_irq_triggers(feature, hdr.start, hdr.count, fds);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
kfree(fds);
return ret;
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
index 5063d73b0d82..95539f1213cb 100644
--- a/drivers/fpga/dfl.h
+++ b/drivers/fpga/dfl.h
@@ -17,6 +17,7 @@
#include <linux/bitfield.h>
#include <linux/cdev.h>
#include <linux/delay.h>
+#include <linux/dfl.h>
#include <linux/eventfd.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
@@ -206,6 +207,8 @@
#define PORT_UINT_CAP_INT_NUM GENMASK_ULL(11, 0) /* Interrupts num */
#define PORT_UINT_CAP_FST_VECT GENMASK_ULL(23, 12) /* First Vector */
+struct dfl_feature_dev_data;
+
/**
* struct dfl_fpga_port_ops - port ops
*
@@ -219,15 +222,15 @@ struct dfl_fpga_port_ops {
const char *name;
struct module *owner;
struct list_head node;
- int (*get_id)(struct platform_device *pdev);
- int (*enable_set)(struct platform_device *pdev, bool enable);
+ int (*get_id)(struct dfl_feature_dev_data *fdata);
+ int (*enable_set)(struct dfl_feature_dev_data *fdata, bool enable);
};
void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops);
void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops);
-struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev);
+struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct dfl_feature_dev_data *fdata);
void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops);
-int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id);
+int dfl_fpga_check_port_id(struct dfl_feature_dev_data *fdata, void *pport_id);
/**
* struct dfl_feature_id - dfl private feature id
@@ -300,26 +303,32 @@ struct dfl_feature {
#define FEATURE_DEV_ID_UNUSED (-1)
/**
- * struct dfl_feature_platform_data - platform data for feature devices
+ * struct dfl_feature_dev_data - dfl enumeration data for dfl feature dev.
*
- * @node: node to link feature devs to container device's port_dev_list.
- * @lock: mutex to protect platform data.
- * @cdev: cdev of feature dev.
- * @dev: ptr to platform device linked with this platform data.
+ * @node: node to link the data structure to container device's port_dev_list.
+ * @lock: mutex to protect feature dev data.
+ * @dev: ptr to the feature's platform device linked with this structure.
+ * @type: type of DFL FIU for the feature dev. See enum dfl_id_type.
+ * @pdev_id: platform device id for the feature dev.
+ * @pdev_name: platform device name for the feature dev.
* @dfl_cdev: ptr to container device.
- * @id: id used for this feature device.
+ * @id: id used for the feature device.
* @disable_count: count for port disable.
* @excl_open: set on feature device exclusive open.
* @open_count: count for feature device open.
* @num: number for sub features.
* @private: ptr to feature dev private data.
- * @features: sub features of this feature dev.
+ * @features: sub features for the feature dev.
+ * @resource_num: number of resources for the feature dev.
+ * @resources: resources for the feature dev.
*/
-struct dfl_feature_platform_data {
+struct dfl_feature_dev_data {
struct list_head node;
struct mutex lock;
- struct cdev cdev;
struct platform_device *dev;
+ enum dfl_id_type type;
+ int pdev_id;
+ const char *pdev_name;
struct dfl_fpga_cdev *dfl_cdev;
int id;
unsigned int disable_count;
@@ -327,55 +336,68 @@ struct dfl_feature_platform_data {
int open_count;
void *private;
int num;
- struct dfl_feature features[];
+ struct dfl_feature *features;
+ int resource_num;
+ struct resource *resources;
+};
+
+/**
+ * struct dfl_feature_platform_data - platform data for feature devices
+ *
+ * @cdev: cdev of feature dev.
+ * @fdata: dfl enumeration data for the dfl feature device.
+ */
+struct dfl_feature_platform_data {
+ struct cdev cdev;
+ struct dfl_feature_dev_data *fdata;
};
static inline
-int dfl_feature_dev_use_begin(struct dfl_feature_platform_data *pdata,
+int dfl_feature_dev_use_begin(struct dfl_feature_dev_data *fdata,
bool excl)
{
- if (pdata->excl_open)
+ if (fdata->excl_open)
return -EBUSY;
if (excl) {
- if (pdata->open_count)
+ if (fdata->open_count)
return -EBUSY;
- pdata->excl_open = true;
+ fdata->excl_open = true;
}
- pdata->open_count++;
+ fdata->open_count++;
return 0;
}
static inline
-void dfl_feature_dev_use_end(struct dfl_feature_platform_data *pdata)
+void dfl_feature_dev_use_end(struct dfl_feature_dev_data *fdata)
{
- pdata->excl_open = false;
+ fdata->excl_open = false;
- if (WARN_ON(pdata->open_count <= 0))
+ if (WARN_ON(fdata->open_count <= 0))
return;
- pdata->open_count--;
+ fdata->open_count--;
}
static inline
-int dfl_feature_dev_use_count(struct dfl_feature_platform_data *pdata)
+int dfl_feature_dev_use_count(struct dfl_feature_dev_data *fdata)
{
- return pdata->open_count;
+ return fdata->open_count;
}
static inline
-void dfl_fpga_pdata_set_private(struct dfl_feature_platform_data *pdata,
+void dfl_fpga_fdata_set_private(struct dfl_feature_dev_data *fdata,
void *private)
{
- pdata->private = private;
+ fdata->private = private;
}
static inline
-void *dfl_fpga_pdata_get_private(struct dfl_feature_platform_data *pdata)
+void *dfl_fpga_fdata_get_private(struct dfl_feature_dev_data *fdata)
{
- return pdata->private;
+ return fdata->private;
}
struct dfl_feature_ops {
@@ -398,37 +420,36 @@ int dfl_fpga_dev_ops_register(struct platform_device *pdev,
struct module *owner);
void dfl_fpga_dev_ops_unregister(struct platform_device *pdev);
-static inline
-struct platform_device *dfl_fpga_inode_to_feature_dev(struct inode *inode)
+static inline struct dfl_feature_dev_data *
+dfl_fpga_inode_to_feature_dev_data(struct inode *inode)
{
struct dfl_feature_platform_data *pdata;
pdata = container_of(inode->i_cdev, struct dfl_feature_platform_data,
cdev);
- return pdata->dev;
+ return pdata->fdata;
}
-#define dfl_fpga_dev_for_each_feature(pdata, feature) \
- for ((feature) = (pdata)->features; \
- (feature) < (pdata)->features + (pdata)->num; (feature)++)
+#define dfl_fpga_dev_for_each_feature(fdata, feature) \
+ for ((feature) = (fdata)->features; \
+ (feature) < (fdata)->features + (fdata)->num; (feature)++)
-static inline
-struct dfl_feature *dfl_get_feature_by_id(struct device *dev, u16 id)
+static inline struct dfl_feature *
+dfl_get_feature_by_id(struct dfl_feature_dev_data *fdata, u16 id)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
struct dfl_feature *feature;
- dfl_fpga_dev_for_each_feature(pdata, feature)
+ dfl_fpga_dev_for_each_feature(fdata, feature)
if (feature->id == id)
return feature;
return NULL;
}
-static inline
-void __iomem *dfl_get_feature_ioaddr_by_id(struct device *dev, u16 id)
+static inline void __iomem *
+dfl_get_feature_ioaddr_by_id(struct dfl_feature_dev_data *fdata, u16 id)
{
- struct dfl_feature *feature = dfl_get_feature_by_id(dev, id);
+ struct dfl_feature *feature = dfl_get_feature_by_id(fdata, id);
if (feature && feature->ioaddr)
return feature->ioaddr;
@@ -437,10 +458,18 @@ void __iomem *dfl_get_feature_ioaddr_by_id(struct device *dev, u16 id)
return NULL;
}
+static inline struct dfl_feature_dev_data *
+to_dfl_feature_dev_data(struct device *dev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+
+ return pdata->fdata;
+}
+
static inline
-struct device *dfl_fpga_pdata_to_parent(struct dfl_feature_platform_data *pdata)
+struct device *dfl_fpga_fdata_to_parent(struct dfl_feature_dev_data *fdata)
{
- return pdata->dev->dev.parent->parent;
+ return fdata->dev->dev.parent->parent;
}
static inline bool dfl_feature_is_fme(void __iomem *base)
@@ -522,26 +551,21 @@ struct dfl_fpga_cdev *
dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info);
void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev);
-/*
- * need to drop the device reference with put_device() after use port platform
- * device returned by __dfl_fpga_cdev_find_port and dfl_fpga_cdev_find_port
- * functions.
- */
-struct platform_device *
-__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
- int (*match)(struct platform_device *, void *));
+struct dfl_feature_dev_data *
+__dfl_fpga_cdev_find_port_data(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct dfl_feature_dev_data *, void *));
-static inline struct platform_device *
-dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
- int (*match)(struct platform_device *, void *))
+static inline struct dfl_feature_dev_data *
+dfl_fpga_cdev_find_port_data(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct dfl_feature_dev_data *, void *))
{
- struct platform_device *pdev;
+ struct dfl_feature_dev_data *fdata;
mutex_lock(&cdev->lock);
- pdev = __dfl_fpga_cdev_find_port(cdev, data, match);
+ fdata = __dfl_fpga_cdev_find_port_data(cdev, data, match);
mutex_unlock(&cdev->lock);
- return pdev;
+ return fdata;
}
int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index add5ad29a673..98b4d1633b25 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -338,6 +338,7 @@ config GPIO_GRANITERAPIDS
config GPIO_GRGPIO
tristate "Aeroflex Gaisler GRGPIO support"
+ depends on OF || COMPILE_TEST
select GPIO_GENERIC
select IRQ_DOMAIN
help
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 4cb455b2bdee..619b6fb9d833 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -490,8 +490,7 @@ static int mxc_gpio_probe(struct platform_device *pdev)
port->gc.request = mxc_gpio_request;
port->gc.free = mxc_gpio_free;
port->gc.to_irq = mxc_gpio_to_irq;
- port->gc.base = (pdev->id < 0) ? of_alias_get_id(np, "gpio") * 32 :
- pdev->id * 32;
+ port->gc.base = of_alias_get_id(np, "gpio") * 32;
err = devm_gpiochip_add_data(&pdev->dev, &port->gc, port);
if (err)
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index be4c9981ebc4..d63c1030e6ac 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -841,25 +841,6 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
DECLARE_BITMAP(trigger, MAX_LINE);
int ret;
- if (chip->driver_data & PCA_PCAL) {
- /* Read the current interrupt status from the device */
- ret = pca953x_read_regs(chip, PCAL953X_INT_STAT, trigger);
- if (ret)
- return false;
-
- /* Check latched inputs and clear interrupt status */
- ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
- if (ret)
- return false;
-
- /* Apply filter for rising/falling edge selection */
- bitmap_replace(new_stat, chip->irq_trig_fall, chip->irq_trig_raise, cur_stat, gc->ngpio);
-
- bitmap_and(pending, new_stat, trigger, gc->ngpio);
-
- return !bitmap_empty(pending, gc->ngpio);
- }
-
ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
if (ret)
return false;
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index 686ae3d11ba3..b6c230fab840 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -413,11 +413,6 @@ static int gpio_sim_setup_sysfs(struct gpio_sim_chip *chip)
return devm_add_action_or_reset(dev, gpio_sim_sysfs_remove, chip);
}
-static int gpio_sim_dev_match_fwnode(struct device *dev, void *data)
-{
- return device_match_fwnode(dev, data);
-}
-
static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
{
struct gpio_sim_chip *chip;
@@ -503,7 +498,7 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
if (ret)
return ret;
- chip->dev = device_find_child(dev, swnode, gpio_sim_dev_match_fwnode);
+ chip->dev = device_find_child(dev, swnode, device_match_fwnode);
if (!chip->dev)
return -ENODEV;
@@ -1033,20 +1028,23 @@ gpio_sim_device_lockup_configfs(struct gpio_sim_device *dev, bool lock)
struct configfs_subsystem *subsys = dev->group.cg_subsys;
struct gpio_sim_bank *bank;
struct gpio_sim_line *line;
+ struct config_item *item;
/*
- * The device only needs to depend on leaf line entries. This is
+ * The device only needs to depend on leaf entries. This is
* sufficient to lock up all the configfs entries that the
* instantiated, alive device depends on.
*/
list_for_each_entry(bank, &dev->bank_list, siblings) {
list_for_each_entry(line, &bank->line_list, siblings) {
+ item = line->hog ? &line->hog->item
+ : &line->group.cg_item;
+
if (lock)
- WARN_ON(configfs_depend_item_unlocked(
- subsys, &line->group.cg_item));
+ WARN_ON(configfs_depend_item_unlocked(subsys,
+ item));
else
- configfs_undepend_item_unlocked(
- &line->group.cg_item);
+ configfs_undepend_item_unlocked(item);
}
}
}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 2f51546b0b88..fbef3f471bd0 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -294,6 +294,7 @@ config DRM_TTM_HELPER
tristate
depends on DRM
select DRM_TTM
+ select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
select FB_CORE if DRM_FBDEV_EMULATION
select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
help
@@ -302,6 +303,7 @@ config DRM_TTM_HELPER
config DRM_GEM_DMA_HELPER
tristate
depends on DRM
+ select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
select FB_CORE if DRM_FBDEV_EMULATION
select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
help
@@ -310,6 +312,7 @@ config DRM_GEM_DMA_HELPER
config DRM_GEM_SHMEM_HELPER
tristate
depends on DRM && MMU
+ select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
select FB_CORE if DRM_FBDEV_EMULATION
select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
help
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 36053b3d48b3..d100bb7a137c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2365,8 +2365,8 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
break;
}
- DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
- ip_block_version->funcs->name);
+ dev_info(adev->dev, "detected ip block number %d <%s>\n",
+ adev->num_ip_blocks, ip_block_version->funcs->name);
adev->ip_blocks[adev->num_ip_blocks].adev = adev;
@@ -6158,6 +6158,44 @@ static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
}
/**
+ * amdgpu_device_gpu_bandwidth - find the bandwidth of the GPU
+ *
+ * @adev: amdgpu_device pointer
+ * @speed: pointer to the speed of the link
+ * @width: pointer to the width of the link
+ *
+ * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
+ * AMD dGPU which may be a virtual upstream bridge.
+ */
+static void amdgpu_device_gpu_bandwidth(struct amdgpu_device *adev,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
+{
+ struct pci_dev *parent = adev->pdev;
+
+ if (!speed || !width)
+ return;
+
+ parent = pci_upstream_bridge(parent);
+ if (parent && parent->vendor == PCI_VENDOR_ID_ATI) {
+ /* use the upstream/downstream switches internal to dGPU */
+ *speed = pcie_get_speed_cap(parent);
+ *width = pcie_get_width_cap(parent);
+ while ((parent = pci_upstream_bridge(parent))) {
+ if (parent->vendor == PCI_VENDOR_ID_ATI) {
+ /* use the upstream/downstream switches internal to dGPU */
+ *speed = pcie_get_speed_cap(parent);
+ *width = pcie_get_width_cap(parent);
+ }
+ }
+ } else {
+ /* use the device itself */
+ *speed = pcie_get_speed_cap(adev->pdev);
+ *width = pcie_get_width_cap(adev->pdev);
+ }
+}
+
+/**
* amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
*
* @adev: amdgpu_device pointer
@@ -6168,9 +6206,8 @@ static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
*/
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
{
- struct pci_dev *pdev;
enum pci_bus_speed speed_cap, platform_speed_cap;
- enum pcie_link_width platform_link_width;
+ enum pcie_link_width platform_link_width, link_width;
if (amdgpu_pcie_gen_cap)
adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
@@ -6192,11 +6229,10 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
amdgpu_device_partner_bandwidth(adev, &platform_speed_cap,
&platform_link_width);
+ amdgpu_device_gpu_bandwidth(adev, &speed_cap, &link_width);
if (adev->pm.pcie_gen_mask == 0) {
/* asic caps */
- pdev = adev->pdev;
- speed_cap = pcie_get_speed_cap(pdev);
if (speed_cap == PCI_SPEED_UNKNOWN) {
adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
@@ -6252,51 +6288,103 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
}
}
if (adev->pm.pcie_mlw_mask == 0) {
+ /* asic caps */
+ if (link_width == PCIE_LNK_WIDTH_UNKNOWN) {
+ adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_ASIC_PCIE_MLW_MASK;
+ } else {
+ switch (link_width) {
+ case PCIE_LNK_X32:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X16:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X12:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X8:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X4:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X2:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X1:
+ adev->pm.pcie_mlw_mask |= CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1;
+ break;
+ default:
+ break;
+ }
+ }
+ /* platform caps */
if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
} else {
switch (platform_link_width) {
case PCIE_LNK_X32:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X16:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X12:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X8:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X4:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X2:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X1:
- adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
+ adev->pm.pcie_mlw_mask |= CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 492b09d84571..dce9323fb410 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -119,9 +119,10 @@
* - 3.57.0 - Compute tunneling on GFX10+
* - 3.58.0 - Add GFX12 DCC support
* - 3.59.0 - Cleared VRAM
+ * - 3.60.0 - Add AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE (Vulkan requirement)
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 59
+#define KMS_DRIVER_MINOR 60
#define KMS_DRIVER_PATCHLEVEL 0
/*
@@ -280,7 +281,7 @@ module_param_named(gartsize, amdgpu_gart_size, uint, 0600);
/**
* DOC: gttsize (int)
* Restrict the size of GTT domain (for userspace use) in MiB for testing.
- * The default is -1 (Use 1/2 RAM, minimum value is 3GB).
+ * The default is -1 (Use value specified by TTM).
*/
MODULE_PARM_DESC(gttsize, "Size of the GTT userspace domain in megabytes (-1 = auto)");
module_param_named(gttsize, amdgpu_gtt_size, int, 0600);
@@ -399,7 +400,7 @@ module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
* the kernel log for the list of IPs on the asic. The default is 0xffffffff (enable all blocks on a device).
*/
MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))");
-module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);
+module_param_named_unsafe(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);
/**
* DOC: bapm (int)
@@ -457,7 +458,7 @@ module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444);
* Enable experimental hw support (1 = enable). The default is 0 (disabled).
*/
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
-module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
+module_param_named_unsafe(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
/**
* DOC: dc (int)
@@ -568,14 +569,14 @@ module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
* Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV).
*/
MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)");
-module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
+module_param_named_unsafe(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
/**
* DOC: emu_mode (int)
* Set value 1 to enable emulation mode. This is only needed when running on an emulator. The default is 0 (disabled).
*/
MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)");
-module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);
+module_param_named_unsafe(emu_mode, amdgpu_emu_mode, int, 0444);
/**
* DOC: ras_enable (int)
@@ -730,7 +731,7 @@ module_param_named(noretry, amdgpu_noretry, int, 0644);
*/
MODULE_PARM_DESC(force_asic_type,
"A non negative value used to specify the asic type for all supported GPUs");
-module_param_named(force_asic_type, amdgpu_force_asic_type, int, 0444);
+module_param_named_unsafe(force_asic_type, amdgpu_force_asic_type, int, 0444);
/**
* DOC: use_xgmi_p2p (int)
@@ -749,7 +750,7 @@ module_param_named(use_xgmi_p2p, amdgpu_use_xgmi_p2p, int, 0444);
* assigns queues to HQDs.
*/
int sched_policy = KFD_SCHED_POLICY_HWS;
-module_param(sched_policy, int, 0444);
+module_param_unsafe(sched_policy, int, 0444);
MODULE_PARM_DESC(sched_policy,
"Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)");
@@ -799,7 +800,7 @@ MODULE_PARM_DESC(send_sigterm,
* Setting 1 enables halt on hang.
*/
int halt_if_hws_hang;
-module_param(halt_if_hws_hang, int, 0644);
+module_param_unsafe(halt_if_hws_hang, int, 0644);
MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
/**
@@ -808,7 +809,7 @@ MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (defau
* check says. Default value: false (rely on MEC2 firmware version check).
*/
bool hws_gws_support;
-module_param(hws_gws_support, bool, 0444);
+module_param_unsafe(hws_gws_support, bool, 0444);
MODULE_PARM_DESC(hws_gws_support, "Assume MEC2 FW supports GWS barriers (false = rely on FW version check (Default), true = force supported)");
/**
@@ -841,7 +842,7 @@ MODULE_PARM_DESC(no_system_mem_limit, "disable system memory limit (false = defa
*/
int amdgpu_no_queue_eviction_on_vm_fault;
MODULE_PARM_DESC(no_queue_eviction_on_vm_fault, "No queue eviction on VM fault (0 = queue eviction, 1 = no queue eviction)");
-module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm_fault, int, 0444);
+module_param_named_unsafe(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm_fault, int, 0444);
#endif
/**
@@ -849,7 +850,7 @@ module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm
*/
int amdgpu_mtype_local;
MODULE_PARM_DESC(mtype_local, "MTYPE for local memory (0 = MTYPE_RW (default), 1 = MTYPE_NC, 2 = MTYPE_CC)");
-module_param_named(mtype_local, amdgpu_mtype_local, int, 0444);
+module_param_named_unsafe(mtype_local, amdgpu_mtype_local, int, 0444);
/**
* DOC: pcie_p2p (bool)
@@ -953,7 +954,7 @@ module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
* GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)
*/
MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco)");
-module_param_named(reset_method, amdgpu_reset_method, int, 0644);
+module_param_named_unsafe(reset_method, amdgpu_reset_method, int, 0644);
/**
* DOC: bad_page_threshold (int) Bad page threshold is specifies the
@@ -1049,7 +1050,7 @@ module_param_named(seamless, amdgpu_seamless, int, 0444);
* - 0x4: Disable GPU soft recovery, always do a full reset
*/
MODULE_PARM_DESC(debug_mask, "debug options for amdgpu, disabled by default");
-module_param_named(debug_mask, amdgpu_debug_mask, uint, 0444);
+module_param_named_unsafe(debug_mask, amdgpu_debug_mask, uint, 0444);
/**
* DOC: agp (int)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index e0bc37557d2c..2ea98ec60220 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -297,7 +297,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
amdgpu_ring_patch_cond_exec(ring, cond_exec);
ring->current_ctx = fence_ctx;
- if (vm && ring->funcs->emit_switch_buffer)
+ if (job && ring->funcs->emit_switch_buffer)
amdgpu_ring_emit_switch_buffer(ring);
if (ring->funcs->emit_wave_limit &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 016a6f6c4267..98528ee94c15 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -846,7 +846,7 @@ out:
case AMDGPU_INFO_DEV_INFO: {
struct drm_amdgpu_info_device *dev_info;
uint64_t vm_size;
- uint32_t pcie_gen_mask;
+ uint32_t pcie_gen_mask, pcie_width_mask;
dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
if (!dev_info)
@@ -934,15 +934,18 @@ out:
dev_info->tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
/* Combine the chip gen mask with the platform (CPU/mobo) mask. */
- pcie_gen_mask = adev->pm.pcie_gen_mask & (adev->pm.pcie_gen_mask >> 16);
+ pcie_gen_mask = adev->pm.pcie_gen_mask &
+ (adev->pm.pcie_gen_mask >> CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT);
+ pcie_width_mask = adev->pm.pcie_mlw_mask &
+ (adev->pm.pcie_mlw_mask >> CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT);
dev_info->pcie_gen = fls(pcie_gen_mask);
dev_info->pcie_num_lanes =
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 ? 32 :
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 ? 16 :
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 ? 12 :
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 ? 8 :
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 ? 4 :
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 ? 2 : 1;
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 ? 32 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 ? 16 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 ? 12 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 ? 8 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 ? 4 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 ? 2 : 1;
dev_info->tcp_cache_size = adev->gfx.config.gc_tcp_l1_size;
dev_info->num_sqc_per_wgp = adev->gfx.config.gc_num_sqc_per_wgp;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index c6f93cbd6739..2df2444ee892 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -40,7 +40,7 @@
#define AMDGPU_MES_VERSION_MASK 0x00000fff
#define AMDGPU_MES_API_VERSION_MASK 0x00fff000
#define AMDGPU_MES_FEAT_VERSION_MASK 0xff000000
-#define AMDGPU_MES_MSCRATCH_SIZE 0x8000
+#define AMDGPU_MES_MSCRATCH_SIZE 0x40000
enum amdgpu_mes_priority_level {
AMDGPU_MES_PRIORITY_LEVEL_LOW = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 2db58b5812a8..5f60736051d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -107,6 +107,7 @@ struct amdgpu_sdma {
struct amdgpu_irq_src doorbell_invalid_irq;
struct amdgpu_irq_src pool_timeout_irq;
struct amdgpu_irq_src srbm_write_irq;
+ struct amdgpu_irq_src ctxt_empty_irq;
int num_instances;
uint32_t sdma_mask;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index ff286940ab43..01ae2f88dec8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -309,7 +309,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
mutex_lock(&adev->mman.gtt_window_lock);
while (src_mm.remaining) {
uint64_t from, to, cur_size, tiling_flags;
- uint32_t num_type, data_format, max_com;
+ uint32_t num_type, data_format, max_com, write_compress_disable;
struct dma_fence *next;
/* Never copy more than 256MiB at once to avoid a timeout */
@@ -340,9 +340,13 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
max_com = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_MAX_COMPRESSED_BLOCK);
num_type = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_NUMBER_TYPE);
data_format = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_DATA_FORMAT);
+ write_compress_disable =
+ AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_WRITE_COMPRESS_DISABLE);
copy_flags |= (AMDGPU_COPY_FLAGS_SET(MAX_COMPRESSED, max_com) |
AMDGPU_COPY_FLAGS_SET(NUMBER_TYPE, num_type) |
- AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format));
+ AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format) |
+ AMDGPU_COPY_FLAGS_SET(WRITE_COMPRESS_DISABLE,
+ write_compress_disable));
}
r = amdgpu_copy_buffer(ring, from, to, cur_size, resv,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 461fb8090ae0..208b7d1d8a27 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -119,6 +119,8 @@ struct amdgpu_copy_mem {
#define AMDGPU_COPY_FLAGS_NUMBER_TYPE_MASK 0x07
#define AMDGPU_COPY_FLAGS_DATA_FORMAT_SHIFT 8
#define AMDGPU_COPY_FLAGS_DATA_FORMAT_MASK 0x3f
+#define AMDGPU_COPY_FLAGS_WRITE_COMPRESS_DISABLE_SHIFT 14
+#define AMDGPU_COPY_FLAGS_WRITE_COMPRESS_DISABLE_MASK 0x1
#define AMDGPU_COPY_FLAGS_SET(field, value) \
(((__u32)(value) & AMDGPU_COPY_FLAGS_##field##_MASK) << AMDGPU_COPY_FLAGS_##field##_SHIFT)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
index e209b5e101df..23b6f7a4aa4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
@@ -427,7 +427,7 @@ void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
return;
sched = entity->entity.rq->sched;
- if (sched->ready) {
+ if (drm_sched_wqueue_ready(sched)) {
ring = to_amdgpu_ring(entity->entity.rq->sched);
atomic_dec(&adev->xcp_mgr->xcp[ring->xcp_id].ref_cnt);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 4b6e05750654..2523221a2519 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -1352,6 +1352,14 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
}
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ if (adev->gfx.me_fw_version >= 2480 &&
+ adev->gfx.pfp_fw_version >= 2530 &&
+ adev->gfx.mec_fw_version >= 2680 &&
+ adev->mes.fw_version[0] >= 100)
+ adev->gfx.enable_cleaner_shader = true;
+ break;
default:
adev->gfx.enable_cleaner_shader = false;
break;
@@ -4013,17 +4021,6 @@ static void gfx_v12_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
if (def != data)
WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
-
- data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
- data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
- WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
-
- /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
- if (adev->sdma.num_instances > 1) {
- data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
- data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
- WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
- }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 5b537806b4da..901e924e69ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -756,7 +756,8 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
if (amdgpu_mes_log_enable) {
mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
- mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr + pipe * AMDGPU_MES_LOG_BUFFER_SIZE;
+ mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr +
+ pipe * (AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE);
}
if (enforce_isolation)
@@ -983,29 +984,50 @@ static void mes_v12_0_enable(struct amdgpu_device *adev, bool enable)
uint32_t pipe, data = 0;
if (enable) {
- data = RREG32_SOC15(GC, 0, regCP_MES_CNTL);
- data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
- data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET, 1);
- WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
-
mutex_lock(&adev->srbm_mutex);
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
soc21_grbm_select(adev, 3, pipe, 0, 0);
+ if (amdgpu_mes_log_enable) {
+ u32 log_size = AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE;
+ /* In case uni mes is not enabled, only program for pipe 0 */
+ if (adev->mes.event_log_size >= (pipe + 1) * log_size) {
+ WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO,
+ lower_32_bits(adev->mes.event_log_gpu_addr +
+ pipe * log_size + AMDGPU_MES_LOG_BUFFER_SIZE));
+ WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI,
+ upper_32_bits(adev->mes.event_log_gpu_addr +
+ pipe * log_size + AMDGPU_MES_LOG_BUFFER_SIZE));
+ dev_info(adev->dev, "Setup CP MES MSCRATCH address : 0x%x. 0x%x\n",
+ RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI),
+ RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO));
+ }
+ }
+
+ data = RREG32_SOC15(GC, 0, regCP_MES_CNTL);
+ if (pipe == 0)
+ data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
+ else
+ data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET, 1);
+ WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
ucode_addr = adev->mes.uc_start_addr[pipe] >> 2;
WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START,
lower_32_bits(ucode_addr));
WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START_HI,
upper_32_bits(ucode_addr));
+
+ /* unhalt MES and activate one pipe each loop */
+ data = REG_SET_FIELD(0, CP_MES_CNTL, MES_PIPE0_ACTIVE, 1);
+ if (pipe)
+ data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE, 1);
+ dev_info(adev->dev, "program CP_MES_CNTL : 0x%x\n", data);
+
+ WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
+
}
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- /* unhalt MES and activate pipe0 */
- data = REG_SET_FIELD(0, CP_MES_CNTL, MES_PIPE0_ACTIVE, 1);
- data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE, 1);
- WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
-
if (amdgpu_emu_mode)
msleep(100);
else if (adev->enable_uni_mes)
@@ -1479,8 +1501,9 @@ static int mes_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini;
adev->mes.enable_legacy_queue_map = true;
- adev->mes.event_log_size = adev->enable_uni_mes ? (AMDGPU_MAX_MES_PIPES * AMDGPU_MES_LOG_BUFFER_SIZE) : AMDGPU_MES_LOG_BUFFER_SIZE;
-
+ adev->mes.event_log_size = adev->enable_uni_mes ?
+ (AMDGPU_MAX_MES_PIPES * (AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE)) :
+ (AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE);
r = amdgpu_mes_init(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 48537eba225d..5e0066cd6c51 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -1406,6 +1406,12 @@ static int sdma_v4_4_2_sw_init(struct amdgpu_ip_block *ip_block)
&adev->sdma.srbm_write_irq);
if (r)
return r;
+
+ r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_CTXEMPTY,
+ &adev->sdma.ctxt_empty_irq);
+ if (r)
+ return r;
}
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1814,6 +1820,16 @@ static int sdma_v4_4_2_process_srbm_write_irq(struct amdgpu_device *adev,
return 0;
}
+static int sdma_v4_4_2_process_ctxt_empty_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* There is nothing useful to be done here, only kept for debug */
+ dev_dbg_ratelimited(adev->dev, "SDMA context empty interrupt");
+ sdma_v4_4_2_print_iv_entry(adev, entry);
+ return 0;
+}
+
static void sdma_v4_4_2_inst_update_medium_grain_light_sleep(
struct amdgpu_device *adev, bool enable, uint32_t inst_mask)
{
@@ -2096,6 +2112,10 @@ static const struct amdgpu_irq_src_funcs sdma_v4_4_2_srbm_write_irq_funcs = {
.process = sdma_v4_4_2_process_srbm_write_irq,
};
+static const struct amdgpu_irq_src_funcs sdma_v4_4_2_ctxt_empty_irq_funcs = {
+ .process = sdma_v4_4_2_process_ctxt_empty_irq,
+};
+
static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev)
{
adev->sdma.trap_irq.num_types = adev->sdma.num_instances;
@@ -2104,6 +2124,7 @@ static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev)
adev->sdma.doorbell_invalid_irq.num_types = adev->sdma.num_instances;
adev->sdma.pool_timeout_irq.num_types = adev->sdma.num_instances;
adev->sdma.srbm_write_irq.num_types = adev->sdma.num_instances;
+ adev->sdma.ctxt_empty_irq.num_types = adev->sdma.num_instances;
adev->sdma.trap_irq.funcs = &sdma_v4_4_2_trap_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &sdma_v4_4_2_illegal_inst_irq_funcs;
@@ -2112,6 +2133,7 @@ static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev)
adev->sdma.doorbell_invalid_irq.funcs = &sdma_v4_4_2_doorbell_invalid_irq_funcs;
adev->sdma.pool_timeout_irq.funcs = &sdma_v4_4_2_pool_timeout_irq_funcs;
adev->sdma.srbm_write_irq.funcs = &sdma_v4_4_2_srbm_write_irq_funcs;
+ adev->sdma.ctxt_empty_irq.funcs = &sdma_v4_4_2_ctxt_empty_irq_funcs;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index 9c17df2cf37b..7e10e94624e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -1741,11 +1741,12 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint32_t byte_count,
uint32_t copy_flags)
{
- uint32_t num_type, data_format, max_com;
+ uint32_t num_type, data_format, max_com, write_cm;
max_com = AMDGPU_COPY_FLAGS_GET(copy_flags, MAX_COMPRESSED);
data_format = AMDGPU_COPY_FLAGS_GET(copy_flags, DATA_FORMAT);
num_type = AMDGPU_COPY_FLAGS_GET(copy_flags, NUMBER_TYPE);
+ write_cm = AMDGPU_COPY_FLAGS_GET(copy_flags, WRITE_COMPRESS_DISABLE) ? 2 : 1;
ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
@@ -1762,7 +1763,7 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib,
if ((copy_flags & (AMDGPU_COPY_FLAGS_READ_DECOMPRESSED | AMDGPU_COPY_FLAGS_WRITE_COMPRESSED)))
ib->ptr[ib->length_dw++] = SDMA_DCC_DATA_FORMAT(data_format) | SDMA_DCC_NUM_TYPE(num_type) |
((copy_flags & AMDGPU_COPY_FLAGS_READ_DECOMPRESSED) ? SDMA_DCC_READ_CM(2) : 0) |
- ((copy_flags & AMDGPU_COPY_FLAGS_WRITE_COMPRESSED) ? SDMA_DCC_WRITE_CM(1) : 0) |
+ ((copy_flags & AMDGPU_COPY_FLAGS_WRITE_COMPRESSED) ? SDMA_DCC_WRITE_CM(write_cm) : 0) |
SDMA_DCC_MAX_COM(max_com) | SDMA_DCC_MAX_UCOM(1);
else
ib->ptr[ib->length_dw++] = 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 388b44ed5928..984f0e705078 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -274,7 +274,7 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
static const uint32_t cwsr_trap_gfx9_hex[] = {
- 0xbf820001, 0xbf820258,
+ 0xbf820001, 0xbf820259,
0xb8f8f802, 0x8978ff78,
0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
@@ -390,141 +390,98 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0xbefe007c, 0xbefc0070,
0xc0611c7a, 0x0000007c,
0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0x867aff7f,
- 0x04000000, 0xbeef0080,
- 0x876f6f7a, 0xb8f02a05,
- 0x80708170, 0x8e708a70,
- 0xb8fb1605, 0x807b817b,
- 0x8e7b847b, 0x8e76827b,
- 0xbef600ff, 0x01000000,
- 0xbef20174, 0x80747074,
- 0x82758075, 0xbefc0080,
- 0xbf800000, 0xbe802b00,
- 0xbe822b02, 0xbe842b04,
- 0xbe862b06, 0xbe882b08,
- 0xbe8a2b0a, 0xbe8c2b0c,
- 0xbe8e2b0e, 0xc06b003a,
- 0x00000000, 0xbf8cc07f,
- 0xc06b013a, 0x00000010,
- 0xbf8cc07f, 0xc06b023a,
- 0x00000020, 0xbf8cc07f,
- 0xc06b033a, 0x00000030,
- 0xbf8cc07f, 0x8074c074,
- 0x82758075, 0x807c907c,
- 0xbf0a7b7c, 0xbf85ffe7,
- 0xbef40172, 0xbef00080,
- 0xbefe00c1, 0xbeff00c1,
- 0xbee80080, 0xbee90080,
- 0xbef600ff, 0x01000000,
- 0x867aff78, 0x00400000,
- 0xbf850003, 0xb8faf803,
- 0x897a7aff, 0x10000000,
- 0xbf85004d, 0xbe840080,
- 0xd2890000, 0x00000900,
- 0x80048104, 0xd2890001,
- 0x00000900, 0x80048104,
- 0xd2890002, 0x00000900,
- 0x80048104, 0xd2890003,
- 0x00000900, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
+ 0xbefc007e, 0xbf108080,
+ 0x867aff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f7a,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0xb8fb1605,
+ 0x807b817b, 0x8e7b847b,
+ 0x8e76827b, 0xbef600ff,
+ 0x01000000, 0xbef20174,
+ 0x80747074, 0x82758075,
+ 0xbefc0080, 0xbf800000,
+ 0xbe802b00, 0xbe822b02,
+ 0xbe842b04, 0xbe862b06,
+ 0xbe882b08, 0xbe8a2b0a,
+ 0xbe8c2b0c, 0xbe8e2b0e,
+ 0xc06b003a, 0x00000000,
+ 0xbf8cc07f, 0xc06b013a,
+ 0x00000010, 0xbf8cc07f,
+ 0xc06b023a, 0x00000020,
+ 0xbf8cc07f, 0xc06b033a,
+ 0x00000030, 0xbf8cc07f,
+ 0x8074c074, 0x82758075,
+ 0x807c907c, 0xbf0a7b7c,
+ 0xbf85ffe7, 0xbef40172,
+ 0xbef00080, 0xbefe00c1,
+ 0xbeff00c1, 0xbee80080,
+ 0xbee90080, 0xbef600ff,
+ 0x01000000, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf85004d,
0xbe840080, 0xd2890000,
- 0x00000901, 0x80048104,
- 0xd2890001, 0x00000901,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
0x80048104, 0xd2890002,
- 0x00000901, 0x80048104,
- 0xd2890003, 0x00000901,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000902,
+ 0xd2890000, 0x00000901,
0x80048104, 0xd2890001,
- 0x00000902, 0x80048104,
- 0xd2890002, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
0x80048104, 0xd2890003,
- 0x00000902, 0x80048104,
+ 0x00000901, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000903, 0x80048104,
- 0xd2890001, 0x00000903,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
0x80048104, 0xd2890002,
- 0x00000903, 0x80048104,
- 0xd2890003, 0x00000903,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbf820008,
- 0xe0724000, 0x701d0000,
- 0xe0724100, 0x701d0100,
- 0xe0724200, 0x701d0200,
- 0xe0724300, 0x701d0300,
- 0xbefe00c1, 0xbeff00c1,
- 0xb8fb4306, 0x867bc17b,
- 0xbf840063, 0xbf8a0000,
- 0x867aff6f, 0x04000000,
- 0xbf84005f, 0x8e7b867b,
- 0x8e7b827b, 0xbef6007b,
- 0xb8f02a05, 0x80708170,
- 0x8e708a70, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0x8070ff70,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xbefc0080,
- 0xd28c0002, 0x000100c1,
- 0xd28d0003, 0x000204c1,
- 0x867aff78, 0x00400000,
- 0xbf850003, 0xb8faf803,
- 0x897a7aff, 0x10000000,
- 0xbf850030, 0x24040682,
- 0xd86e4000, 0x00000002,
- 0xbf8cc07f, 0xbe840080,
- 0xd2890000, 0x00000900,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
0x80048104, 0xd2890001,
- 0x00000900, 0x80048104,
- 0xd2890002, 0x00000900,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
0x80048104, 0xd2890003,
- 0x00000900, 0x80048104,
+ 0x00000903, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000901, 0x80048104,
- 0xd2890001, 0x00000901,
- 0x80048104, 0xd2890002,
- 0x00000901, 0x80048104,
- 0xd2890003, 0x00000901,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0x680404ff,
- 0x00000200, 0xd0c9006a,
- 0x0000f702, 0xbf87ffd2,
- 0xbf820015, 0xd1060002,
- 0x00011103, 0x7e0602ff,
- 0x00000200, 0xbefc00ff,
- 0x00010000, 0xbe800077,
- 0x8677ff77, 0xff7fffff,
- 0x8777ff77, 0x00058000,
- 0xd8ec0000, 0x00000002,
- 0xbf8cc07f, 0xe0765000,
- 0x701d0002, 0x68040702,
- 0xd0c9006a, 0x0000f702,
- 0xbf87fff7, 0xbef70000,
- 0xbef000ff, 0x00000400,
- 0xbefe00c1, 0xbeff00c1,
- 0xb8fb2a05, 0x807b817b,
- 0x8e7b827b, 0xbef600ff,
- 0x01000000, 0xbefc0084,
- 0xbf0a7b7c, 0xbf84006d,
- 0xbf11017c, 0x807bff7b,
- 0x00001000, 0x867aff78,
+ 0xbf820008, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb4306,
+ 0x867bc17b, 0xbf840063,
+ 0xbf8a0000, 0x867aff6f,
+ 0x04000000, 0xbf84005f,
+ 0x8e7b867b, 0x8e7b827b,
+ 0xbef6007b, 0xb8f02a05,
+ 0x80708170, 0x8e708a70,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0x8070ff70, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0x867aff78,
0x00400000, 0xbf850003,
0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850051,
+ 0x10000000, 0xbf850030,
+ 0x24040682, 0xd86e4000,
+ 0x00000002, 0xbf8cc07f,
0xbe840080, 0xd2890000,
0x00000900, 0x80048104,
0xd2890001, 0x00000900,
@@ -544,137 +501,181 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
+ 0x680404ff, 0x00000200,
+ 0xd0c9006a, 0x0000f702,
+ 0xbf87ffd2, 0xbf820015,
+ 0xd1060002, 0x00011103,
+ 0x7e0602ff, 0x00000200,
+ 0xbefc00ff, 0x00010000,
+ 0xbe800077, 0x8677ff77,
+ 0xff7fffff, 0x8777ff77,
+ 0x00058000, 0xd8ec0000,
+ 0x00000002, 0xbf8cc07f,
+ 0xe0765000, 0x701d0002,
+ 0x68040702, 0xd0c9006a,
+ 0x0000f702, 0xbf87fff7,
+ 0xbef70000, 0xbef000ff,
+ 0x00000400, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb2a05,
+ 0x807b817b, 0x8e7b827b,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0084, 0xbf0a7b7c,
+ 0xbf84006d, 0xbf11017c,
+ 0x807bff7b, 0x00001000,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf850051, 0xbe840080,
+ 0xd2890000, 0x00000900,
+ 0x80048104, 0xd2890001,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
+ 0x80048104, 0xd2890003,
+ 0x00000900, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
+ 0xd2890000, 0x00000902,
0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
+ 0x00000902, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0x807c847c, 0xbf0a7b7c,
- 0xbf85ffb1, 0xbf9c0000,
- 0xbf820012, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0x807c847c,
- 0x8070ff70, 0x00000400,
- 0xbf0a7b7c, 0xbf85ffef,
- 0xbf9c0000, 0xbf8200c7,
- 0xbef4007e, 0x8675ff7f,
- 0x0000ffff, 0x8775ff75,
- 0x00040000, 0xbef60080,
- 0xbef700ff, 0x00807fac,
- 0x866eff7f, 0x04000000,
- 0xbf84001e, 0xbefe00c1,
- 0xbeff00c1, 0xb8ef4306,
- 0x866fc16f, 0xbf840019,
- 0x8e6f866f, 0x8e6f826f,
- 0xbef6006f, 0xb8f82a05,
- 0x80788178, 0x8e788a78,
- 0xb8ee1605, 0x806e816e,
- 0x8e6e866e, 0x80786e78,
- 0x8078ff78, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0xbefc0080, 0xe0510000,
- 0x781d0000, 0xe0510100,
- 0x781d0000, 0x807cff7c,
- 0x00000200, 0x8078ff78,
- 0x00000200, 0xbf0a6f7c,
- 0xbf85fff6, 0xbefe00c1,
- 0xbeff00c1, 0xbef600ff,
- 0x01000000, 0xb8ef2a05,
- 0x806f816f, 0x8e6f826f,
- 0x806fff6f, 0x00008000,
- 0xbef80080, 0xbeee0078,
- 0x8078ff78, 0x00000400,
- 0xbefc0084, 0xbf11087c,
- 0xe0524000, 0x781d0000,
- 0xe0524100, 0x781d0100,
- 0xe0524200, 0x781d0200,
- 0xe0524300, 0x781d0300,
- 0xbf8c0f70, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0x807c847c,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7c, 0xbf85ffee,
- 0xbf9c0000, 0xe0524000,
- 0x6e1d0000, 0xe0524100,
- 0x6e1d0100, 0xe0524200,
- 0x6e1d0200, 0xe0524300,
- 0x6e1d0300, 0xbf8c0f70,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0x807c847c,
+ 0xbf0a7b7c, 0xbf85ffb1,
+ 0xbf9c0000, 0xbf820012,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
+ 0x807c847c, 0x8070ff70,
+ 0x00000400, 0xbf0a7b7c,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xbf8200c7, 0xbef4007e,
+ 0x8675ff7f, 0x0000ffff,
+ 0x8775ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x00807fac, 0x866eff7f,
+ 0x04000000, 0xbf84001e,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xb8ef4306, 0x866fc16f,
+ 0xbf840019, 0x8e6f866f,
+ 0x8e6f826f, 0xbef6006f,
0xb8f82a05, 0x80788178,
0x8e788a78, 0xb8ee1605,
0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x80f8c078,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f846f, 0x8e76826f,
+ 0x80786e78, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xe0510000, 0x781d0000,
+ 0xe0510100, 0x781d0000,
+ 0x807cff7c, 0x00000200,
+ 0x8078ff78, 0x00000200,
+ 0xbf0a6f7c, 0xbf85fff6,
+ 0xbefe00c1, 0xbeff00c1,
0xbef600ff, 0x01000000,
- 0xbefc006f, 0xc031003a,
- 0x00000078, 0x80f8c078,
- 0xbf8cc07f, 0x80fc907c,
- 0xbf800000, 0xbe802d00,
- 0xbe822d02, 0xbe842d04,
- 0xbe862d06, 0xbe882d08,
- 0xbe8a2d0a, 0xbe8c2d0c,
- 0xbe8e2d0e, 0xbf06807c,
- 0xbf84fff0, 0xb8f82a05,
+ 0xb8ef2a05, 0x806f816f,
+ 0x8e6f826f, 0x806fff6f,
+ 0x00008000, 0xbef80080,
+ 0xbeee0078, 0x8078ff78,
+ 0x00000400, 0xbefc0084,
+ 0xbf11087c, 0xe0524000,
+ 0x781d0000, 0xe0524100,
+ 0x781d0100, 0xe0524200,
+ 0x781d0200, 0xe0524300,
+ 0x781d0300, 0xbf8c0f70,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
+ 0x807c847c, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7c,
+ 0xbf85ffee, 0xbf9c0000,
+ 0xe0524000, 0x6e1d0000,
+ 0xe0524100, 0x6e1d0100,
+ 0xe0524200, 0x6e1d0200,
+ 0xe0524300, 0x6e1d0300,
+ 0xbf8c0f70, 0xb8f82a05,
0x80788178, 0x8e788a78,
0xb8ee1605, 0x806e816e,
0x8e6e866e, 0x80786e78,
- 0xbef60084, 0xbef600ff,
- 0x01000000, 0xc0211bfa,
+ 0x80f8c078, 0xb8ef1605,
+ 0x806f816f, 0x8e6f846f,
+ 0x8e76826f, 0xbef600ff,
+ 0x01000000, 0xbefc006f,
+ 0xc031003a, 0x00000078,
+ 0x80f8c078, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb8f82a05, 0x80788178,
+ 0x8e788a78, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0xbef60084,
+ 0xbef600ff, 0x01000000,
+ 0xc0211bfa, 0x00000078,
+ 0x80788478, 0xc0211b3a,
0x00000078, 0x80788478,
- 0xc0211b3a, 0x00000078,
- 0x80788478, 0xc0211b7a,
+ 0xc0211b7a, 0x00000078,
+ 0x80788478, 0xc0211c3a,
0x00000078, 0x80788478,
- 0xc0211c3a, 0x00000078,
- 0x80788478, 0xc0211c7a,
+ 0xc0211c7a, 0x00000078,
+ 0x80788478, 0xc0211eba,
0x00000078, 0x80788478,
- 0xc0211eba, 0x00000078,
- 0x80788478, 0xc0211efa,
+ 0xc0211efa, 0x00000078,
+ 0x80788478, 0xc0211a3a,
0x00000078, 0x80788478,
- 0xc0211a3a, 0x00000078,
- 0x80788478, 0xc0211a7a,
+ 0xc0211a7a, 0x00000078,
+ 0x80788478, 0xc0211cfa,
0x00000078, 0x80788478,
- 0xc0211cfa, 0x00000078,
- 0x80788478, 0xbf8cc07f,
- 0xbefc006f, 0xbefe0070,
- 0xbeff0071, 0x866f7bff,
- 0x000003ff, 0xb96f4803,
- 0x866f7bff, 0xfffff800,
- 0x8f6f8b6f, 0xb96fa2c3,
- 0xb973f801, 0xb8ee2a05,
- 0x806e816e, 0x8e6e8a6e,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f866f, 0x806e6f6e,
- 0x806e746e, 0x826f8075,
- 0x866fff6f, 0x0000ffff,
- 0xc00b1c37, 0x00000050,
- 0xc00b1d37, 0x00000060,
- 0xc0031e77, 0x00000074,
- 0xbf8cc07f, 0x8f6e8b77,
- 0x866eff6e, 0x001f8000,
- 0xb96ef807, 0x866dff6d,
- 0x0000ffff, 0x86fe7e7e,
- 0x86ea6a6a, 0x8f6e837a,
- 0xb96ee0c2, 0xbf800002,
- 0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf9b0000,
+ 0xbf8cc07f, 0xbefc006f,
+ 0xbefe0070, 0xbeff0071,
+ 0x866f7bff, 0x000003ff,
+ 0xb96f4803, 0x866f7bff,
+ 0xfffff800, 0x8f6f8b6f,
+ 0xb96fa2c3, 0xb973f801,
+ 0xb8ee2a05, 0x806e816e,
+ 0x8e6e8a6e, 0xb8ef1605,
+ 0x806f816f, 0x8e6f866f,
+ 0x806e6f6e, 0x806e746e,
+ 0x826f8075, 0x866fff6f,
+ 0x0000ffff, 0xc00b1c37,
+ 0x00000050, 0xc00b1d37,
+ 0x00000060, 0xc0031e77,
+ 0x00000074, 0xbf8cc07f,
+ 0x8f6e8b77, 0x866eff6e,
+ 0x001f8000, 0xb96ef807,
+ 0x866dff6d, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0x8f6e837a, 0xb96ee0c2,
+ 0xbf800002, 0xb97a0002,
+ 0xbf8a0000, 0xbe801f6c,
+ 0xbf9b0000, 0x00000000,
};
static const uint32_t cwsr_trap_nv1x_hex[] = {
@@ -1302,7 +1303,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
};
static const uint32_t cwsr_trap_arcturus_hex[] = {
- 0xbf820001, 0xbf8202d4,
+ 0xbf820001, 0xbf8202d5,
0xb8f8f802, 0x8978ff78,
0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
@@ -1419,99 +1420,37 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0xbefe007c, 0xbefc0070,
0xc0611c7a, 0x0000007c,
0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0x867aff7f,
- 0x04000000, 0xbeef0080,
- 0x876f6f7a, 0xb8f02a05,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fb1605,
- 0x807b817b, 0x8e7b847b,
- 0x8e76827b, 0xbef600ff,
- 0x01000000, 0xbef20174,
- 0x80747074, 0x82758075,
- 0xbefc0080, 0xbf800000,
- 0xbe802b00, 0xbe822b02,
- 0xbe842b04, 0xbe862b06,
- 0xbe882b08, 0xbe8a2b0a,
- 0xbe8c2b0c, 0xbe8e2b0e,
- 0xc06b003a, 0x00000000,
- 0xbf8cc07f, 0xc06b013a,
- 0x00000010, 0xbf8cc07f,
- 0xc06b023a, 0x00000020,
- 0xbf8cc07f, 0xc06b033a,
- 0x00000030, 0xbf8cc07f,
- 0x8074c074, 0x82758075,
- 0x807c907c, 0xbf0a7b7c,
- 0xbf85ffe7, 0xbef40172,
- 0xbef00080, 0xbefe00c1,
- 0xbeff00c1, 0xbee80080,
- 0xbee90080, 0xbef600ff,
- 0x01000000, 0x867aff78,
- 0x00400000, 0xbf850003,
- 0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf85004d,
- 0xbe840080, 0xd2890000,
- 0x00000900, 0x80048104,
- 0xd2890001, 0x00000900,
- 0x80048104, 0xd2890002,
- 0x00000900, 0x80048104,
- 0xd2890003, 0x00000900,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000901,
- 0x80048104, 0xd2890001,
- 0x00000901, 0x80048104,
- 0xd2890002, 0x00000901,
- 0x80048104, 0xd2890003,
- 0x00000901, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
- 0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
- 0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
- 0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbf820008, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0xbefe00c1,
- 0xbeff00c1, 0xb8fb4306,
- 0x867bc17b, 0xbf840064,
- 0xbf8a0000, 0x867aff6f,
- 0x04000000, 0xbf840060,
- 0x8e7b867b, 0x8e7b827b,
- 0xbef6007b, 0xb8f02a05,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0x8070ff70,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xbefc0080,
- 0xd28c0002, 0x000100c1,
- 0xd28d0003, 0x000204c1,
+ 0xbefc007e, 0xbf108080,
+ 0x867aff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f7a,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b847b, 0x8e76827b,
+ 0xbef600ff, 0x01000000,
+ 0xbef20174, 0x80747074,
+ 0x82758075, 0xbefc0080,
+ 0xbf800000, 0xbe802b00,
+ 0xbe822b02, 0xbe842b04,
+ 0xbe862b06, 0xbe882b08,
+ 0xbe8a2b0a, 0xbe8c2b0c,
+ 0xbe8e2b0e, 0xc06b003a,
+ 0x00000000, 0xbf8cc07f,
+ 0xc06b013a, 0x00000010,
+ 0xbf8cc07f, 0xc06b023a,
+ 0x00000020, 0xbf8cc07f,
+ 0xc06b033a, 0x00000030,
+ 0xbf8cc07f, 0x8074c074,
+ 0x82758075, 0x807c907c,
+ 0xbf0a7b7c, 0xbf85ffe7,
+ 0xbef40172, 0xbef00080,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xbee80080, 0xbee90080,
+ 0xbef600ff, 0x01000000,
0x867aff78, 0x00400000,
0xbf850003, 0xb8faf803,
0x897a7aff, 0x10000000,
- 0xbf850030, 0x24040682,
- 0xd86e4000, 0x00000002,
- 0xbf8cc07f, 0xbe840080,
+ 0xbf85004d, 0xbe840080,
0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
0x00000900, 0x80048104,
@@ -1530,31 +1469,50 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
- 0xbf84ffee, 0x680404ff,
- 0x00000200, 0xd0c9006a,
- 0x0000f702, 0xbf87ffd2,
- 0xbf820015, 0xd1060002,
- 0x00011103, 0x7e0602ff,
- 0x00000200, 0xbefc00ff,
- 0x00010000, 0xbe800077,
- 0x8677ff77, 0xff7fffff,
- 0x8777ff77, 0x00058000,
- 0xd8ec0000, 0x00000002,
- 0xbf8cc07f, 0xe0765000,
- 0x701d0002, 0x68040702,
- 0xd0c9006a, 0x0000f702,
- 0xbf87fff7, 0xbef70000,
- 0xbef000ff, 0x00000400,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000902,
+ 0x80048104, 0xd2890001,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
+ 0x80048104, 0xd2890003,
+ 0x00000902, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbf820008,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
0xbefe00c1, 0xbeff00c1,
- 0xb8fb2a05, 0x807b817b,
- 0x8e7b827b, 0xbef600ff,
- 0x01000000, 0xbefc0084,
- 0xbf0a7b7c, 0xbf84006d,
- 0xbf11017c, 0x807bff7b,
- 0x00001000, 0x867aff78,
+ 0xb8fb4306, 0x867bc17b,
+ 0xbf840064, 0xbf8a0000,
+ 0x867aff6f, 0x04000000,
+ 0xbf840060, 0x8e7b867b,
+ 0x8e7b827b, 0xbef6007b,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0x8070ff70, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0x867aff78,
0x00400000, 0xbf850003,
0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850051,
+ 0x10000000, 0xbf850030,
+ 0x24040682, 0xd86e4000,
+ 0x00000002, 0xbf8cc07f,
0xbe840080, 0xd2890000,
0x00000900, 0x80048104,
0xd2890001, 0x00000900,
@@ -1574,215 +1532,259 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
+ 0x680404ff, 0x00000200,
+ 0xd0c9006a, 0x0000f702,
+ 0xbf87ffd2, 0xbf820015,
+ 0xd1060002, 0x00011103,
+ 0x7e0602ff, 0x00000200,
+ 0xbefc00ff, 0x00010000,
+ 0xbe800077, 0x8677ff77,
+ 0xff7fffff, 0x8777ff77,
+ 0x00058000, 0xd8ec0000,
+ 0x00000002, 0xbf8cc07f,
+ 0xe0765000, 0x701d0002,
+ 0x68040702, 0xd0c9006a,
+ 0x0000f702, 0xbf87fff7,
+ 0xbef70000, 0xbef000ff,
+ 0x00000400, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb2a05,
+ 0x807b817b, 0x8e7b827b,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0084, 0xbf0a7b7c,
+ 0xbf84006d, 0xbf11017c,
+ 0x807bff7b, 0x00001000,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf850051, 0xbe840080,
+ 0xd2890000, 0x00000900,
+ 0x80048104, 0xd2890001,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
+ 0x80048104, 0xd2890003,
+ 0x00000900, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
+ 0xd2890000, 0x00000902,
0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
+ 0x00000902, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0x807c847c, 0xbf0a7b7c,
- 0xbf85ffb1, 0xbf9c0000,
- 0xbf820012, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0x807c847c,
- 0x8070ff70, 0x00000400,
- 0xbf0a7b7c, 0xbf85ffef,
- 0xbf9c0000, 0xbefc0080,
- 0xbf11017c, 0x867aff78,
- 0x00400000, 0xbf850003,
- 0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850059,
- 0xd3d84000, 0x18000100,
- 0xd3d84001, 0x18000101,
- 0xd3d84002, 0x18000102,
- 0xd3d84003, 0x18000103,
0xbe840080, 0xd2890000,
- 0x00000900, 0x80048104,
- 0xd2890001, 0x00000900,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
0x80048104, 0xd2890002,
- 0x00000900, 0x80048104,
- 0xd2890003, 0x00000900,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000901,
+ 0xbf84ffee, 0x807c847c,
+ 0xbf0a7b7c, 0xbf85ffb1,
+ 0xbf9c0000, 0xbf820012,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
+ 0x807c847c, 0x8070ff70,
+ 0x00000400, 0xbf0a7b7c,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xbefc0080, 0xbf11017c,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf850059, 0xd3d84000,
+ 0x18000100, 0xd3d84001,
+ 0x18000101, 0xd3d84002,
+ 0x18000102, 0xd3d84003,
+ 0x18000103, 0xbe840080,
+ 0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
- 0x00000901, 0x80048104,
- 0xd2890002, 0x00000901,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
0x80048104, 0xd2890003,
- 0x00000901, 0x80048104,
+ 0x00000900, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
+ 0xd2890000, 0x00000902,
0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
+ 0x00000902, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0x807c847c, 0xbf0a7b7c,
- 0xbf85ffa9, 0xbf9c0000,
- 0xbf820016, 0xd3d84000,
- 0x18000100, 0xd3d84001,
- 0x18000101, 0xd3d84002,
- 0x18000102, 0xd3d84003,
- 0x18000103, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0x807c847c,
- 0x8070ff70, 0x00000400,
- 0xbf0a7b7c, 0xbf85ffeb,
- 0xbf9c0000, 0xbf8200e3,
- 0xbef4007e, 0x8675ff7f,
- 0x0000ffff, 0x8775ff75,
- 0x00040000, 0xbef60080,
- 0xbef700ff, 0x00807fac,
- 0x866eff7f, 0x04000000,
- 0xbf84001f, 0xbefe00c1,
- 0xbeff00c1, 0xb8ef4306,
- 0x866fc16f, 0xbf84001a,
- 0x8e6f866f, 0x8e6f826f,
- 0xbef6006f, 0xb8f82a05,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x8078ff78,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xbefc0080,
- 0xe0510000, 0x781d0000,
- 0xe0510100, 0x781d0000,
- 0x807cff7c, 0x00000200,
- 0x8078ff78, 0x00000200,
- 0xbf0a6f7c, 0xbf85fff6,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0x807c847c,
+ 0xbf0a7b7c, 0xbf85ffa9,
+ 0xbf9c0000, 0xbf820016,
+ 0xd3d84000, 0x18000100,
+ 0xd3d84001, 0x18000101,
+ 0xd3d84002, 0x18000102,
+ 0xd3d84003, 0x18000103,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
+ 0x807c847c, 0x8070ff70,
+ 0x00000400, 0xbf0a7b7c,
+ 0xbf85ffeb, 0xbf9c0000,
+ 0xbf8200e3, 0xbef4007e,
+ 0x8675ff7f, 0x0000ffff,
+ 0x8775ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x00807fac, 0x866eff7f,
+ 0x04000000, 0xbf84001f,
0xbefe00c1, 0xbeff00c1,
+ 0xb8ef4306, 0x866fc16f,
+ 0xbf84001a, 0x8e6f866f,
+ 0x8e6f826f, 0xbef6006f,
+ 0xb8f82a05, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0x8078ff78, 0x00000080,
0xbef600ff, 0x01000000,
- 0xb8ef2a05, 0x806f816f,
- 0x8e6f826f, 0x806fff6f,
- 0x00008000, 0xbef80080,
- 0xbeee0078, 0x8078ff78,
- 0x00000400, 0xbefc0084,
- 0xbf11087c, 0xe0524000,
- 0x781d0000, 0xe0524100,
- 0x781d0100, 0xe0524200,
- 0x781d0200, 0xe0524300,
- 0x781d0300, 0xbf8c0f70,
- 0x7e000300, 0x7e020301,
- 0x7e040302, 0x7e060303,
- 0x807c847c, 0x8078ff78,
- 0x00000400, 0xbf0a6f7c,
- 0xbf85ffee, 0xbefc0080,
- 0xbf11087c, 0xe0524000,
- 0x781d0000, 0xe0524100,
- 0x781d0100, 0xe0524200,
- 0x781d0200, 0xe0524300,
- 0x781d0300, 0xbf8c0f70,
- 0xd3d94000, 0x18000100,
- 0xd3d94001, 0x18000101,
- 0xd3d94002, 0x18000102,
- 0xd3d94003, 0x18000103,
- 0x807c847c, 0x8078ff78,
- 0x00000400, 0xbf0a6f7c,
- 0xbf85ffea, 0xbf9c0000,
- 0xe0524000, 0x6e1d0000,
- 0xe0524100, 0x6e1d0100,
- 0xe0524200, 0x6e1d0200,
- 0xe0524300, 0x6e1d0300,
- 0xbf8c0f70, 0xb8f82a05,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x80f8c078,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f846f, 0x8e76826f,
- 0xbef600ff, 0x01000000,
- 0xbefc006f, 0xc031003a,
- 0x00000078, 0x80f8c078,
- 0xbf8cc07f, 0x80fc907c,
- 0xbf800000, 0xbe802d00,
- 0xbe822d02, 0xbe842d04,
- 0xbe862d06, 0xbe882d08,
- 0xbe8a2d0a, 0xbe8c2d0c,
- 0xbe8e2d0e, 0xbf06807c,
- 0xbf84fff0, 0xb8f82a05,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0xbef60084,
- 0xbef600ff, 0x01000000,
- 0xc0211bfa, 0x00000078,
- 0x80788478, 0xc0211b3a,
+ 0xbefc0080, 0xe0510000,
+ 0x781d0000, 0xe0510100,
+ 0x781d0000, 0x807cff7c,
+ 0x00000200, 0x8078ff78,
+ 0x00000200, 0xbf0a6f7c,
+ 0xbf85fff6, 0xbefe00c1,
+ 0xbeff00c1, 0xbef600ff,
+ 0x01000000, 0xb8ef2a05,
+ 0x806f816f, 0x8e6f826f,
+ 0x806fff6f, 0x00008000,
+ 0xbef80080, 0xbeee0078,
+ 0x8078ff78, 0x00000400,
+ 0xbefc0084, 0xbf11087c,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0x7e000300,
+ 0x7e020301, 0x7e040302,
+ 0x7e060303, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffee,
+ 0xbefc0080, 0xbf11087c,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0xd3d94000,
+ 0x18000100, 0xd3d94001,
+ 0x18000101, 0xd3d94002,
+ 0x18000102, 0xd3d94003,
+ 0x18000103, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffea,
+ 0xbf9c0000, 0xe0524000,
+ 0x6e1d0000, 0xe0524100,
+ 0x6e1d0100, 0xe0524200,
+ 0x6e1d0200, 0xe0524300,
+ 0x6e1d0300, 0xbf8c0f70,
+ 0xb8f82a05, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0x80f8c078, 0xb8ef1605,
+ 0x806f816f, 0x8e6f846f,
+ 0x8e76826f, 0xbef600ff,
+ 0x01000000, 0xbefc006f,
+ 0xc031003a, 0x00000078,
+ 0x80f8c078, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb8f82a05, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xc0211bfa,
0x00000078, 0x80788478,
- 0xc0211b7a, 0x00000078,
- 0x80788478, 0xc0211c3a,
+ 0xc0211b3a, 0x00000078,
+ 0x80788478, 0xc0211b7a,
0x00000078, 0x80788478,
- 0xc0211c7a, 0x00000078,
- 0x80788478, 0xc0211eba,
+ 0xc0211c3a, 0x00000078,
+ 0x80788478, 0xc0211c7a,
0x00000078, 0x80788478,
- 0xc0211efa, 0x00000078,
- 0x80788478, 0xc0211a3a,
+ 0xc0211eba, 0x00000078,
+ 0x80788478, 0xc0211efa,
0x00000078, 0x80788478,
- 0xc0211a7a, 0x00000078,
- 0x80788478, 0xc0211cfa,
+ 0xc0211a3a, 0x00000078,
+ 0x80788478, 0xc0211a7a,
0x00000078, 0x80788478,
- 0xbf8cc07f, 0xbefc006f,
- 0xbefe0070, 0xbeff0071,
- 0x866f7bff, 0x000003ff,
- 0xb96f4803, 0x866f7bff,
- 0xfffff800, 0x8f6f8b6f,
- 0xb96fa2c3, 0xb973f801,
- 0xb8ee2a05, 0x806e816e,
- 0x8e6e8a6e, 0x8e6e816e,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f866f, 0x806e6f6e,
- 0x806e746e, 0x826f8075,
- 0x866fff6f, 0x0000ffff,
- 0xc00b1c37, 0x00000050,
- 0xc00b1d37, 0x00000060,
- 0xc0031e77, 0x00000074,
- 0xbf8cc07f, 0x8f6e8b77,
- 0x866eff6e, 0x001f8000,
- 0xb96ef807, 0x866dff6d,
- 0x0000ffff, 0x86fe7e7e,
- 0x86ea6a6a, 0x8f6e837a,
- 0xb96ee0c2, 0xbf800002,
- 0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf9b0000,
+ 0xc0211cfa, 0x00000078,
+ 0x80788478, 0xbf8cc07f,
+ 0xbefc006f, 0xbefe0070,
+ 0xbeff0071, 0x866f7bff,
+ 0x000003ff, 0xb96f4803,
+ 0x866f7bff, 0xfffff800,
+ 0x8f6f8b6f, 0xb96fa2c3,
+ 0xb973f801, 0xb8ee2a05,
+ 0x806e816e, 0x8e6e8a6e,
+ 0x8e6e816e, 0xb8ef1605,
+ 0x806f816f, 0x8e6f866f,
+ 0x806e6f6e, 0x806e746e,
+ 0x826f8075, 0x866fff6f,
+ 0x0000ffff, 0xc00b1c37,
+ 0x00000050, 0xc00b1d37,
+ 0x00000060, 0xc0031e77,
+ 0x00000074, 0xbf8cc07f,
+ 0x8f6e8b77, 0x866eff6e,
+ 0x001f8000, 0xb96ef807,
+ 0x866dff6d, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0x8f6e837a, 0xb96ee0c2,
+ 0xbf800002, 0xb97a0002,
+ 0xbf8a0000, 0xbe801f6c,
+ 0xbf9b0000, 0x00000000,
};
static const uint32_t cwsr_trap_aldebaran_hex[] = {
- 0xbf820001, 0xbf8202df,
+ 0xbf820001, 0xbf8202e0,
0xb8f8f802, 0x8978ff78,
0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
@@ -1899,99 +1901,37 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0xbefe007c, 0xbefc0070,
0xc0611c7a, 0x0000007c,
0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0x867aff7f,
- 0x04000000, 0xbeef0080,
- 0x876f6f7a, 0xb8f02985,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fb1605,
- 0x807b817b, 0x8e7b847b,
- 0x8e76827b, 0xbef600ff,
- 0x01000000, 0xbef20174,
- 0x80747074, 0x82758075,
- 0xbefc0080, 0xbf800000,
- 0xbe802b00, 0xbe822b02,
- 0xbe842b04, 0xbe862b06,
- 0xbe882b08, 0xbe8a2b0a,
- 0xbe8c2b0c, 0xbe8e2b0e,
- 0xc06b003a, 0x00000000,
- 0xbf8cc07f, 0xc06b013a,
- 0x00000010, 0xbf8cc07f,
- 0xc06b023a, 0x00000020,
- 0xbf8cc07f, 0xc06b033a,
- 0x00000030, 0xbf8cc07f,
- 0x8074c074, 0x82758075,
- 0x807c907c, 0xbf0a7b7c,
- 0xbf85ffe7, 0xbef40172,
- 0xbef00080, 0xbefe00c1,
- 0xbeff00c1, 0xbee80080,
- 0xbee90080, 0xbef600ff,
- 0x01000000, 0x867aff78,
- 0x00400000, 0xbf850003,
- 0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf85004d,
- 0xbe840080, 0xd2890000,
- 0x00000900, 0x80048104,
- 0xd2890001, 0x00000900,
- 0x80048104, 0xd2890002,
- 0x00000900, 0x80048104,
- 0xd2890003, 0x00000900,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000901,
- 0x80048104, 0xd2890001,
- 0x00000901, 0x80048104,
- 0xd2890002, 0x00000901,
- 0x80048104, 0xd2890003,
- 0x00000901, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
- 0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
- 0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
- 0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbf820008, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0xbefe00c1,
- 0xbeff00c1, 0xb8fb4306,
- 0x867bc17b, 0xbf840064,
- 0xbf8a0000, 0x867aff6f,
- 0x04000000, 0xbf840060,
- 0x8e7b867b, 0x8e7b827b,
- 0xbef6007b, 0xb8f02985,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0x8070ff70,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xbefc0080,
- 0xd28c0002, 0x000100c1,
- 0xd28d0003, 0x000204c1,
+ 0xbefc007e, 0xbf108080,
+ 0x867aff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f7a,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b847b, 0x8e76827b,
+ 0xbef600ff, 0x01000000,
+ 0xbef20174, 0x80747074,
+ 0x82758075, 0xbefc0080,
+ 0xbf800000, 0xbe802b00,
+ 0xbe822b02, 0xbe842b04,
+ 0xbe862b06, 0xbe882b08,
+ 0xbe8a2b0a, 0xbe8c2b0c,
+ 0xbe8e2b0e, 0xc06b003a,
+ 0x00000000, 0xbf8cc07f,
+ 0xc06b013a, 0x00000010,
+ 0xbf8cc07f, 0xc06b023a,
+ 0x00000020, 0xbf8cc07f,
+ 0xc06b033a, 0x00000030,
+ 0xbf8cc07f, 0x8074c074,
+ 0x82758075, 0x807c907c,
+ 0xbf0a7b7c, 0xbf85ffe7,
+ 0xbef40172, 0xbef00080,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xbee80080, 0xbee90080,
+ 0xbef600ff, 0x01000000,
0x867aff78, 0x00400000,
0xbf850003, 0xb8faf803,
0x897a7aff, 0x10000000,
- 0xbf850030, 0x24040682,
- 0xd86e4000, 0x00000002,
- 0xbf8cc07f, 0xbe840080,
+ 0xbf85004d, 0xbe840080,
0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
0x00000900, 0x80048104,
@@ -2010,31 +1950,50 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
- 0xbf84ffee, 0x680404ff,
- 0x00000200, 0xd0c9006a,
- 0x0000f702, 0xbf87ffd2,
- 0xbf820015, 0xd1060002,
- 0x00011103, 0x7e0602ff,
- 0x00000200, 0xbefc00ff,
- 0x00010000, 0xbe800077,
- 0x8677ff77, 0xff7fffff,
- 0x8777ff77, 0x00058000,
- 0xd8ec0000, 0x00000002,
- 0xbf8cc07f, 0xe0765000,
- 0x701d0002, 0x68040702,
- 0xd0c9006a, 0x0000f702,
- 0xbf87fff7, 0xbef70000,
- 0xbef000ff, 0x00000400,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000902,
+ 0x80048104, 0xd2890001,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
+ 0x80048104, 0xd2890003,
+ 0x00000902, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbf820008,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
0xbefe00c1, 0xbeff00c1,
- 0xb8fb2b05, 0x807b817b,
- 0x8e7b827b, 0xbef600ff,
- 0x01000000, 0xbefc0084,
- 0xbf0a7b7c, 0xbf84006d,
- 0xbf11017c, 0x807bff7b,
- 0x00001000, 0x867aff78,
+ 0xb8fb4306, 0x867bc17b,
+ 0xbf840064, 0xbf8a0000,
+ 0x867aff6f, 0x04000000,
+ 0xbf840060, 0x8e7b867b,
+ 0x8e7b827b, 0xbef6007b,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0x8070ff70, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0x867aff78,
0x00400000, 0xbf850003,
0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850051,
+ 0x10000000, 0xbf850030,
+ 0x24040682, 0xd86e4000,
+ 0x00000002, 0xbf8cc07f,
0xbe840080, 0xd2890000,
0x00000900, 0x80048104,
0xd2890001, 0x00000900,
@@ -2054,51 +2013,31 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
- 0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
- 0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
- 0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0x807c847c, 0xbf0a7b7c,
- 0xbf85ffb1, 0xbf9c0000,
- 0xbf820012, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0x807c847c,
- 0x8070ff70, 0x00000400,
- 0xbf0a7b7c, 0xbf85ffef,
- 0xbf9c0000, 0xb8fb2985,
- 0x807b817b, 0x8e7b837b,
- 0xb8fa2b05, 0x807a817a,
- 0x8e7a827a, 0x80fb7a7b,
- 0x867b7b7b, 0xbf84007a,
+ 0x680404ff, 0x00000200,
+ 0xd0c9006a, 0x0000f702,
+ 0xbf87ffd2, 0xbf820015,
+ 0xd1060002, 0x00011103,
+ 0x7e0602ff, 0x00000200,
+ 0xbefc00ff, 0x00010000,
+ 0xbe800077, 0x8677ff77,
+ 0xff7fffff, 0x8777ff77,
+ 0x00058000, 0xd8ec0000,
+ 0x00000002, 0xbf8cc07f,
+ 0xe0765000, 0x701d0002,
+ 0x68040702, 0xd0c9006a,
+ 0x0000f702, 0xbf87fff7,
+ 0xbef70000, 0xbef000ff,
+ 0x00000400, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb2b05,
+ 0x807b817b, 0x8e7b827b,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0084, 0xbf0a7b7c,
+ 0xbf84006d, 0xbf11017c,
0x807bff7b, 0x00001000,
- 0xbefc0080, 0xbf11017c,
0x867aff78, 0x00400000,
0xbf850003, 0xb8faf803,
0x897a7aff, 0x10000000,
- 0xbf850059, 0xd3d84000,
- 0x18000100, 0xd3d84001,
- 0x18000101, 0xd3d84002,
- 0x18000102, 0xd3d84003,
- 0x18000103, 0xbe840080,
+ 0xbf850051, 0xbe840080,
0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
0x00000900, 0x80048104,
@@ -2137,139 +2076,203 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0x807c847c,
- 0xbf0a7b7c, 0xbf85ffa9,
- 0xbf9c0000, 0xbf820016,
- 0xd3d84000, 0x18000100,
- 0xd3d84001, 0x18000101,
- 0xd3d84002, 0x18000102,
- 0xd3d84003, 0x18000103,
+ 0xbf0a7b7c, 0xbf85ffb1,
+ 0xbf9c0000, 0xbf820012,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
0xe0724000, 0x701d0000,
0xe0724100, 0x701d0100,
0xe0724200, 0x701d0200,
0xe0724300, 0x701d0300,
0x807c847c, 0x8070ff70,
0x00000400, 0xbf0a7b7c,
- 0xbf85ffeb, 0xbf9c0000,
- 0xbf8200ee, 0xbef4007e,
- 0x8675ff7f, 0x0000ffff,
- 0x8775ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x00807fac, 0x866eff7f,
- 0x04000000, 0xbf84001f,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xb8fb2985, 0x807b817b,
+ 0x8e7b837b, 0xb8fa2b05,
+ 0x807a817a, 0x8e7a827a,
+ 0x80fb7a7b, 0x867b7b7b,
+ 0xbf84007a, 0x807bff7b,
+ 0x00001000, 0xbefc0080,
+ 0xbf11017c, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf850059,
+ 0xd3d84000, 0x18000100,
+ 0xd3d84001, 0x18000101,
+ 0xd3d84002, 0x18000102,
+ 0xd3d84003, 0x18000103,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
+ 0x80048104, 0xd2890002,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
+ 0x80048104, 0xd2890001,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
+ 0x80048104, 0xd2890003,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x807c847c, 0xbf0a7b7c,
+ 0xbf85ffa9, 0xbf9c0000,
+ 0xbf820016, 0xd3d84000,
+ 0x18000100, 0xd3d84001,
+ 0x18000101, 0xd3d84002,
+ 0x18000102, 0xd3d84003,
+ 0x18000103, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0x807c847c,
+ 0x8070ff70, 0x00000400,
+ 0xbf0a7b7c, 0xbf85ffeb,
+ 0xbf9c0000, 0xbf8200ee,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0x866eff7f, 0x04000000,
+ 0xbf84001f, 0xbefe00c1,
+ 0xbeff00c1, 0xb8ef4306,
+ 0x866fc16f, 0xbf84001a,
+ 0x8e6f866f, 0x8e6f826f,
+ 0xbef6006f, 0xb8f82985,
+ 0x80788178, 0x8e788a78,
+ 0x8e788178, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xe0510000, 0x781d0000,
+ 0xe0510100, 0x781d0000,
+ 0x807cff7c, 0x00000200,
+ 0x8078ff78, 0x00000200,
+ 0xbf0a6f7c, 0xbf85fff6,
0xbefe00c1, 0xbeff00c1,
- 0xb8ef4306, 0x866fc16f,
- 0xbf84001a, 0x8e6f866f,
- 0x8e6f826f, 0xbef6006f,
- 0xb8f82985, 0x80788178,
- 0x8e788a78, 0x8e788178,
- 0xb8ee1605, 0x806e816e,
- 0x8e6e866e, 0x80786e78,
- 0x8078ff78, 0x00000080,
0xbef600ff, 0x01000000,
- 0xbefc0080, 0xe0510000,
- 0x781d0000, 0xe0510100,
- 0x781d0000, 0x807cff7c,
- 0x00000200, 0x8078ff78,
- 0x00000200, 0xbf0a6f7c,
- 0xbf85fff6, 0xbefe00c1,
- 0xbeff00c1, 0xbef600ff,
- 0x01000000, 0xb8ef2b05,
- 0x806f816f, 0x8e6f826f,
- 0x806fff6f, 0x00008000,
- 0xbef80080, 0xbeee0078,
- 0x8078ff78, 0x00000400,
- 0xbefc0084, 0xbf11087c,
- 0xe0524000, 0x781d0000,
- 0xe0524100, 0x781d0100,
- 0xe0524200, 0x781d0200,
- 0xe0524300, 0x781d0300,
- 0xbf8c0f70, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0x807c847c,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7c, 0xbf85ffee,
- 0xb8ef2985, 0x806f816f,
- 0x8e6f836f, 0xb8f92b05,
- 0x80798179, 0x8e798279,
- 0x80ef796f, 0x866f6f6f,
- 0xbf84001a, 0x806fff6f,
- 0x00008000, 0xbefc0080,
+ 0xb8ef2b05, 0x806f816f,
+ 0x8e6f826f, 0x806fff6f,
+ 0x00008000, 0xbef80080,
+ 0xbeee0078, 0x8078ff78,
+ 0x00000400, 0xbefc0084,
0xbf11087c, 0xe0524000,
0x781d0000, 0xe0524100,
0x781d0100, 0xe0524200,
0x781d0200, 0xe0524300,
0x781d0300, 0xbf8c0f70,
- 0xd3d94000, 0x18000100,
- 0xd3d94001, 0x18000101,
- 0xd3d94002, 0x18000102,
- 0xd3d94003, 0x18000103,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
0x807c847c, 0x8078ff78,
0x00000400, 0xbf0a6f7c,
- 0xbf85ffea, 0xbf9c0000,
- 0xe0524000, 0x6e1d0000,
- 0xe0524100, 0x6e1d0100,
- 0xe0524200, 0x6e1d0200,
- 0xe0524300, 0x6e1d0300,
- 0xbf8c0f70, 0xb8f82985,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x80f8c078,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f846f, 0x8e76826f,
- 0xbef600ff, 0x01000000,
- 0xbefc006f, 0xc031003a,
- 0x00000078, 0x80f8c078,
- 0xbf8cc07f, 0x80fc907c,
- 0xbf800000, 0xbe802d00,
- 0xbe822d02, 0xbe842d04,
- 0xbe862d06, 0xbe882d08,
- 0xbe8a2d0a, 0xbe8c2d0c,
- 0xbe8e2d0e, 0xbf06807c,
- 0xbf84fff0, 0xb8f82985,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0xbef60084,
- 0xbef600ff, 0x01000000,
- 0xc0211bfa, 0x00000078,
- 0x80788478, 0xc0211b3a,
+ 0xbf85ffee, 0xb8ef2985,
+ 0x806f816f, 0x8e6f836f,
+ 0xb8f92b05, 0x80798179,
+ 0x8e798279, 0x80ef796f,
+ 0x866f6f6f, 0xbf84001a,
+ 0x806fff6f, 0x00008000,
+ 0xbefc0080, 0xbf11087c,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0xd3d94000,
+ 0x18000100, 0xd3d94001,
+ 0x18000101, 0xd3d94002,
+ 0x18000102, 0xd3d94003,
+ 0x18000103, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffea,
+ 0xbf9c0000, 0xe0524000,
+ 0x6e1d0000, 0xe0524100,
+ 0x6e1d0100, 0xe0524200,
+ 0x6e1d0200, 0xe0524300,
+ 0x6e1d0300, 0xbf8c0f70,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0x80f8c078, 0xb8ef1605,
+ 0x806f816f, 0x8e6f846f,
+ 0x8e76826f, 0xbef600ff,
+ 0x01000000, 0xbefc006f,
+ 0xc031003a, 0x00000078,
+ 0x80f8c078, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xc0211bfa,
0x00000078, 0x80788478,
- 0xc0211b7a, 0x00000078,
- 0x80788478, 0xc0211c3a,
+ 0xc0211b3a, 0x00000078,
+ 0x80788478, 0xc0211b7a,
0x00000078, 0x80788478,
- 0xc0211c7a, 0x00000078,
- 0x80788478, 0xc0211eba,
+ 0xc0211c3a, 0x00000078,
+ 0x80788478, 0xc0211c7a,
0x00000078, 0x80788478,
- 0xc0211efa, 0x00000078,
- 0x80788478, 0xc0211a3a,
+ 0xc0211eba, 0x00000078,
+ 0x80788478, 0xc0211efa,
0x00000078, 0x80788478,
- 0xc0211a7a, 0x00000078,
- 0x80788478, 0xc0211cfa,
+ 0xc0211a3a, 0x00000078,
+ 0x80788478, 0xc0211a7a,
0x00000078, 0x80788478,
- 0xbf8cc07f, 0xbefc006f,
- 0xbefe0070, 0xbeff0071,
- 0x866f7bff, 0x000003ff,
- 0xb96f4803, 0x866f7bff,
- 0xfffff800, 0x8f6f8b6f,
- 0xb96fa2c3, 0xb973f801,
- 0xb8ee2985, 0x806e816e,
- 0x8e6e8a6e, 0x8e6e816e,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f866f, 0x806e6f6e,
- 0x806e746e, 0x826f8075,
- 0x866fff6f, 0x0000ffff,
- 0xc00b1c37, 0x00000050,
- 0xc00b1d37, 0x00000060,
- 0xc0031e77, 0x00000074,
- 0xbf8cc07f, 0x8f6e8b77,
- 0x866eff6e, 0x001f8000,
- 0xb96ef807, 0x866dff6d,
- 0x0000ffff, 0x86fe7e7e,
- 0x86ea6a6a, 0x8f6e837a,
- 0xb96ee0c2, 0xbf800002,
- 0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf9b0000,
+ 0xc0211cfa, 0x00000078,
+ 0x80788478, 0xbf8cc07f,
+ 0xbefc006f, 0xbefe0070,
+ 0xbeff0071, 0x866f7bff,
+ 0x000003ff, 0xb96f4803,
+ 0x866f7bff, 0xfffff800,
+ 0x8f6f8b6f, 0xb96fa2c3,
+ 0xb973f801, 0xb8ee2985,
+ 0x806e816e, 0x8e6e8a6e,
+ 0x8e6e816e, 0xb8ef1605,
+ 0x806f816f, 0x8e6f866f,
+ 0x806e6f6e, 0x806e746e,
+ 0x826f8075, 0x866fff6f,
+ 0x0000ffff, 0xc00b1c37,
+ 0x00000050, 0xc00b1d37,
+ 0x00000060, 0xc0031e77,
+ 0x00000074, 0xbf8cc07f,
+ 0x8f6e8b77, 0x866eff6e,
+ 0x001f8000, 0xb96ef807,
+ 0x866dff6d, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0x8f6e837a, 0xb96ee0c2,
+ 0xbf800002, 0xb97a0002,
+ 0xbf8a0000, 0xbe801f6c,
+ 0xbf9b0000, 0x00000000,
};
static const uint32_t cwsr_trap_gfx10_hex[] = {
@@ -3151,7 +3154,7 @@ static const uint32_t cwsr_trap_gfx11_hex[] = {
};
static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
- 0xbf820001, 0xbf8202db,
+ 0xbf820001, 0xbf8202dc,
0xb8f8f802, 0x8978ff78,
0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
@@ -3266,99 +3269,37 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
0xbefe007c, 0xbefc0070,
0xc0611c7a, 0x0000007c,
0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0x867aff7f,
- 0x04000000, 0xbeef0080,
- 0x876f6f7a, 0xb8f02985,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fb1605,
- 0x807b817b, 0x8e7b847b,
- 0x8e76827b, 0xbef600ff,
- 0x01000000, 0xbef20174,
- 0x80747074, 0x82758075,
- 0xbefc0080, 0xbf800000,
- 0xbe802b00, 0xbe822b02,
- 0xbe842b04, 0xbe862b06,
- 0xbe882b08, 0xbe8a2b0a,
- 0xbe8c2b0c, 0xbe8e2b0e,
- 0xc06b003a, 0x00000000,
- 0xbf8cc07f, 0xc06b013a,
- 0x00000010, 0xbf8cc07f,
- 0xc06b023a, 0x00000020,
- 0xbf8cc07f, 0xc06b033a,
- 0x00000030, 0xbf8cc07f,
- 0x8074c074, 0x82758075,
- 0x807c907c, 0xbf0a7b7c,
- 0xbf85ffe7, 0xbef40172,
- 0xbef00080, 0xbefe00c1,
- 0xbeff00c1, 0xbee80080,
- 0xbee90080, 0xbef600ff,
- 0x01000000, 0x867aff78,
- 0x00400000, 0xbf850003,
- 0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf85004d,
- 0xbe840080, 0xd2890000,
- 0x00000900, 0x80048104,
- 0xd2890001, 0x00000900,
- 0x80048104, 0xd2890002,
- 0x00000900, 0x80048104,
- 0xd2890003, 0x00000900,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000901,
- 0x80048104, 0xd2890001,
- 0x00000901, 0x80048104,
- 0xd2890002, 0x00000901,
- 0x80048104, 0xd2890003,
- 0x00000901, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
- 0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
- 0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
- 0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbf820008, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0xbefe00c1,
- 0xbeff00c1, 0xb8fb4306,
- 0x867bc17b, 0xbf840064,
- 0xbf8a0000, 0x867aff6f,
- 0x04000000, 0xbf840060,
- 0x8e7b867b, 0x8e7b827b,
- 0xbef6007b, 0xb8f02985,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0x8070ff70,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xbefc0080,
- 0xd28c0002, 0x000100c1,
- 0xd28d0003, 0x000204c1,
+ 0xbefc007e, 0xbf108080,
+ 0x867aff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f7a,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b847b, 0x8e76827b,
+ 0xbef600ff, 0x01000000,
+ 0xbef20174, 0x80747074,
+ 0x82758075, 0xbefc0080,
+ 0xbf800000, 0xbe802b00,
+ 0xbe822b02, 0xbe842b04,
+ 0xbe862b06, 0xbe882b08,
+ 0xbe8a2b0a, 0xbe8c2b0c,
+ 0xbe8e2b0e, 0xc06b003a,
+ 0x00000000, 0xbf8cc07f,
+ 0xc06b013a, 0x00000010,
+ 0xbf8cc07f, 0xc06b023a,
+ 0x00000020, 0xbf8cc07f,
+ 0xc06b033a, 0x00000030,
+ 0xbf8cc07f, 0x8074c074,
+ 0x82758075, 0x807c907c,
+ 0xbf0a7b7c, 0xbf85ffe7,
+ 0xbef40172, 0xbef00080,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xbee80080, 0xbee90080,
+ 0xbef600ff, 0x01000000,
0x867aff78, 0x00400000,
0xbf850003, 0xb8faf803,
0x897a7aff, 0x10000000,
- 0xbf850030, 0x24040682,
- 0xd86e4000, 0x00000002,
- 0xbf8cc07f, 0xbe840080,
+ 0xbf85004d, 0xbe840080,
0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
0x00000900, 0x80048104,
@@ -3377,31 +3318,50 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
- 0xbf84ffee, 0x680404ff,
- 0x00000200, 0xd0c9006a,
- 0x0000f702, 0xbf87ffd2,
- 0xbf820015, 0xd1060002,
- 0x00011103, 0x7e0602ff,
- 0x00000200, 0xbefc00ff,
- 0x00010000, 0xbe800077,
- 0x8677ff77, 0xff7fffff,
- 0x8777ff77, 0x00058000,
- 0xd8ec0000, 0x00000002,
- 0xbf8cc07f, 0xe0765000,
- 0x701d0002, 0x68040702,
- 0xd0c9006a, 0x0000f702,
- 0xbf87fff7, 0xbef70000,
- 0xbef000ff, 0x00000400,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000902,
+ 0x80048104, 0xd2890001,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
+ 0x80048104, 0xd2890003,
+ 0x00000902, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbf820008,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
0xbefe00c1, 0xbeff00c1,
- 0xb8fb2b05, 0x807b817b,
- 0x8e7b827b, 0xbef600ff,
- 0x01000000, 0xbefc0084,
- 0xbf0a7b7c, 0xbf84006d,
- 0xbf11017c, 0x807bff7b,
- 0x00001000, 0x867aff78,
+ 0xb8fb4306, 0x867bc17b,
+ 0xbf840064, 0xbf8a0000,
+ 0x867aff6f, 0x04000000,
+ 0xbf840060, 0x8e7b867b,
+ 0x8e7b827b, 0xbef6007b,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0x8070ff70, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0x867aff78,
0x00400000, 0xbf850003,
0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850051,
+ 0x10000000, 0xbf850030,
+ 0x24040682, 0xd86e4000,
+ 0x00000002, 0xbf8cc07f,
0xbe840080, 0xd2890000,
0x00000900, 0x80048104,
0xd2890001, 0x00000900,
@@ -3421,51 +3381,31 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
- 0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
- 0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
- 0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0x807c847c, 0xbf0a7b7c,
- 0xbf85ffb1, 0xbf9c0000,
- 0xbf820012, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0x807c847c,
- 0x8070ff70, 0x00000400,
- 0xbf0a7b7c, 0xbf85ffef,
- 0xbf9c0000, 0xb8fb2985,
- 0x807b817b, 0x8e7b837b,
- 0xb8fa2b05, 0x807a817a,
- 0x8e7a827a, 0x80fb7a7b,
- 0x867b7b7b, 0xbf84007a,
+ 0x680404ff, 0x00000200,
+ 0xd0c9006a, 0x0000f702,
+ 0xbf87ffd2, 0xbf820015,
+ 0xd1060002, 0x00011103,
+ 0x7e0602ff, 0x00000200,
+ 0xbefc00ff, 0x00010000,
+ 0xbe800077, 0x8677ff77,
+ 0xff7fffff, 0x8777ff77,
+ 0x00058000, 0xd8ec0000,
+ 0x00000002, 0xbf8cc07f,
+ 0xe0765000, 0x701d0002,
+ 0x68040702, 0xd0c9006a,
+ 0x0000f702, 0xbf87fff7,
+ 0xbef70000, 0xbef000ff,
+ 0x00000400, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb2b05,
+ 0x807b817b, 0x8e7b827b,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0084, 0xbf0a7b7c,
+ 0xbf84006d, 0xbf11017c,
0x807bff7b, 0x00001000,
- 0xbefc0080, 0xbf11017c,
0x867aff78, 0x00400000,
0xbf850003, 0xb8faf803,
0x897a7aff, 0x10000000,
- 0xbf850059, 0xd3d84000,
- 0x18000100, 0xd3d84001,
- 0x18000101, 0xd3d84002,
- 0x18000102, 0xd3d84003,
- 0x18000103, 0xbe840080,
+ 0xbf850051, 0xbe840080,
0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
0x00000900, 0x80048104,
@@ -3504,139 +3444,203 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0x807c847c,
- 0xbf0a7b7c, 0xbf85ffa9,
- 0xbf9c0000, 0xbf820016,
- 0xd3d84000, 0x18000100,
- 0xd3d84001, 0x18000101,
- 0xd3d84002, 0x18000102,
- 0xd3d84003, 0x18000103,
+ 0xbf0a7b7c, 0xbf85ffb1,
+ 0xbf9c0000, 0xbf820012,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
0xe0724000, 0x701d0000,
0xe0724100, 0x701d0100,
0xe0724200, 0x701d0200,
0xe0724300, 0x701d0300,
0x807c847c, 0x8070ff70,
0x00000400, 0xbf0a7b7c,
- 0xbf85ffeb, 0xbf9c0000,
- 0xbf8200ee, 0xbef4007e,
- 0x8675ff7f, 0x0000ffff,
- 0x8775ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x00807fac, 0x866eff7f,
- 0x04000000, 0xbf84001f,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xb8fb2985, 0x807b817b,
+ 0x8e7b837b, 0xb8fa2b05,
+ 0x807a817a, 0x8e7a827a,
+ 0x80fb7a7b, 0x867b7b7b,
+ 0xbf84007a, 0x807bff7b,
+ 0x00001000, 0xbefc0080,
+ 0xbf11017c, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf850059,
+ 0xd3d84000, 0x18000100,
+ 0xd3d84001, 0x18000101,
+ 0xd3d84002, 0x18000102,
+ 0xd3d84003, 0x18000103,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
+ 0x80048104, 0xd2890002,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
+ 0x80048104, 0xd2890001,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
+ 0x80048104, 0xd2890003,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x807c847c, 0xbf0a7b7c,
+ 0xbf85ffa9, 0xbf9c0000,
+ 0xbf820016, 0xd3d84000,
+ 0x18000100, 0xd3d84001,
+ 0x18000101, 0xd3d84002,
+ 0x18000102, 0xd3d84003,
+ 0x18000103, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0x807c847c,
+ 0x8070ff70, 0x00000400,
+ 0xbf0a7b7c, 0xbf85ffeb,
+ 0xbf9c0000, 0xbf8200ee,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0x866eff7f, 0x04000000,
+ 0xbf84001f, 0xbefe00c1,
+ 0xbeff00c1, 0xb8ef4306,
+ 0x866fc16f, 0xbf84001a,
+ 0x8e6f866f, 0x8e6f826f,
+ 0xbef6006f, 0xb8f82985,
+ 0x80788178, 0x8e788a78,
+ 0x8e788178, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xe0510000, 0x781d0000,
+ 0xe0510100, 0x781d0000,
+ 0x807cff7c, 0x00000200,
+ 0x8078ff78, 0x00000200,
+ 0xbf0a6f7c, 0xbf85fff6,
0xbefe00c1, 0xbeff00c1,
- 0xb8ef4306, 0x866fc16f,
- 0xbf84001a, 0x8e6f866f,
- 0x8e6f826f, 0xbef6006f,
- 0xb8f82985, 0x80788178,
- 0x8e788a78, 0x8e788178,
- 0xb8ee1605, 0x806e816e,
- 0x8e6e866e, 0x80786e78,
- 0x8078ff78, 0x00000080,
0xbef600ff, 0x01000000,
- 0xbefc0080, 0xe0510000,
- 0x781d0000, 0xe0510100,
- 0x781d0000, 0x807cff7c,
- 0x00000200, 0x8078ff78,
- 0x00000200, 0xbf0a6f7c,
- 0xbf85fff6, 0xbefe00c1,
- 0xbeff00c1, 0xbef600ff,
- 0x01000000, 0xb8ef2b05,
- 0x806f816f, 0x8e6f826f,
- 0x806fff6f, 0x00008000,
- 0xbef80080, 0xbeee0078,
- 0x8078ff78, 0x00000400,
- 0xbefc0084, 0xbf11087c,
- 0xe0524000, 0x781d0000,
- 0xe0524100, 0x781d0100,
- 0xe0524200, 0x781d0200,
- 0xe0524300, 0x781d0300,
- 0xbf8c0f70, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0x807c847c,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7c, 0xbf85ffee,
- 0xb8ef2985, 0x806f816f,
- 0x8e6f836f, 0xb8f92b05,
- 0x80798179, 0x8e798279,
- 0x80ef796f, 0x866f6f6f,
- 0xbf84001a, 0x806fff6f,
- 0x00008000, 0xbefc0080,
+ 0xb8ef2b05, 0x806f816f,
+ 0x8e6f826f, 0x806fff6f,
+ 0x00008000, 0xbef80080,
+ 0xbeee0078, 0x8078ff78,
+ 0x00000400, 0xbefc0084,
0xbf11087c, 0xe0524000,
0x781d0000, 0xe0524100,
0x781d0100, 0xe0524200,
0x781d0200, 0xe0524300,
0x781d0300, 0xbf8c0f70,
- 0xd3d94000, 0x18000100,
- 0xd3d94001, 0x18000101,
- 0xd3d94002, 0x18000102,
- 0xd3d94003, 0x18000103,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
0x807c847c, 0x8078ff78,
0x00000400, 0xbf0a6f7c,
- 0xbf85ffea, 0xbf9c0000,
- 0xe0524000, 0x6e1d0000,
- 0xe0524100, 0x6e1d0100,
- 0xe0524200, 0x6e1d0200,
- 0xe0524300, 0x6e1d0300,
- 0xbf8c0f70, 0xb8f82985,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x80f8c078,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f846f, 0x8e76826f,
- 0xbef600ff, 0x01000000,
- 0xbefc006f, 0xc031003a,
- 0x00000078, 0x80f8c078,
- 0xbf8cc07f, 0x80fc907c,
- 0xbf800000, 0xbe802d00,
- 0xbe822d02, 0xbe842d04,
- 0xbe862d06, 0xbe882d08,
- 0xbe8a2d0a, 0xbe8c2d0c,
- 0xbe8e2d0e, 0xbf06807c,
- 0xbf84fff0, 0xb8f82985,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0xbef60084,
- 0xbef600ff, 0x01000000,
- 0xc0211bfa, 0x00000078,
- 0x80788478, 0xc0211b3a,
+ 0xbf85ffee, 0xb8ef2985,
+ 0x806f816f, 0x8e6f836f,
+ 0xb8f92b05, 0x80798179,
+ 0x8e798279, 0x80ef796f,
+ 0x866f6f6f, 0xbf84001a,
+ 0x806fff6f, 0x00008000,
+ 0xbefc0080, 0xbf11087c,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0xd3d94000,
+ 0x18000100, 0xd3d94001,
+ 0x18000101, 0xd3d94002,
+ 0x18000102, 0xd3d94003,
+ 0x18000103, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffea,
+ 0xbf9c0000, 0xe0524000,
+ 0x6e1d0000, 0xe0524100,
+ 0x6e1d0100, 0xe0524200,
+ 0x6e1d0200, 0xe0524300,
+ 0x6e1d0300, 0xbf8c0f70,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0x80f8c078, 0xb8ef1605,
+ 0x806f816f, 0x8e6f846f,
+ 0x8e76826f, 0xbef600ff,
+ 0x01000000, 0xbefc006f,
+ 0xc031003a, 0x00000078,
+ 0x80f8c078, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xc0211bfa,
0x00000078, 0x80788478,
- 0xc0211b7a, 0x00000078,
- 0x80788478, 0xc0211c3a,
+ 0xc0211b3a, 0x00000078,
+ 0x80788478, 0xc0211b7a,
0x00000078, 0x80788478,
- 0xc0211c7a, 0x00000078,
- 0x80788478, 0xc0211eba,
+ 0xc0211c3a, 0x00000078,
+ 0x80788478, 0xc0211c7a,
0x00000078, 0x80788478,
- 0xc0211efa, 0x00000078,
- 0x80788478, 0xc0211a3a,
+ 0xc0211eba, 0x00000078,
+ 0x80788478, 0xc0211efa,
0x00000078, 0x80788478,
- 0xc0211a7a, 0x00000078,
- 0x80788478, 0xc0211cfa,
+ 0xc0211a3a, 0x00000078,
+ 0x80788478, 0xc0211a7a,
0x00000078, 0x80788478,
- 0xbf8cc07f, 0xbefc006f,
- 0xbefe0070, 0xbeff0071,
- 0x866f7bff, 0x000003ff,
- 0xb96f4803, 0x866f7bff,
- 0xfffff800, 0x8f6f8b6f,
- 0xb96fa2c3, 0xb973f801,
- 0xb8ee2985, 0x806e816e,
- 0x8e6e8a6e, 0x8e6e816e,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f866f, 0x806e6f6e,
- 0x806e746e, 0x826f8075,
- 0x866fff6f, 0x0000ffff,
- 0xc00b1c37, 0x00000050,
- 0xc00b1d37, 0x00000060,
- 0xc0031e77, 0x00000074,
- 0xbf8cc07f, 0x8f6e8b79,
- 0x866eff6e, 0x001f8000,
- 0xb96ef807, 0x866dff6d,
- 0x0000ffff, 0x86fe7e7e,
- 0x86ea6a6a, 0x8f6e837a,
- 0xb96ee0c2, 0xbf800002,
- 0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf9b0000,
+ 0xc0211cfa, 0x00000078,
+ 0x80788478, 0xbf8cc07f,
+ 0xbefc006f, 0xbefe0070,
+ 0xbeff0071, 0x866f7bff,
+ 0x000003ff, 0xb96f4803,
+ 0x866f7bff, 0xfffff800,
+ 0x8f6f8b6f, 0xb96fa2c3,
+ 0xb973f801, 0xb8ee2985,
+ 0x806e816e, 0x8e6e8a6e,
+ 0x8e6e816e, 0xb8ef1605,
+ 0x806f816f, 0x8e6f866f,
+ 0x806e6f6e, 0x806e746e,
+ 0x826f8075, 0x866fff6f,
+ 0x0000ffff, 0xc00b1c37,
+ 0x00000050, 0xc00b1d37,
+ 0x00000060, 0xc0031e77,
+ 0x00000074, 0xbf8cc07f,
+ 0x8f6e8b79, 0x866eff6e,
+ 0x001f8000, 0xb96ef807,
+ 0x866dff6d, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0x8f6e837a, 0xb96ee0c2,
+ 0xbf800002, 0xb97a0002,
+ 0xbf8a0000, 0xbe801f6c,
+ 0xbf9b0000, 0x00000000,
};
static const uint32_t cwsr_trap_gfx12_hex[] = {
@@ -4124,27 +4128,25 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
};
static const uint32_t cwsr_trap_gfx9_5_0_hex[] = {
- 0xbf820001, 0xbf8202d8,
+ 0xbf820001, 0xbf8202ca,
0xb8f8f802, 0x8978ff78,
0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
- 0xbf840008, 0xbf0d986d,
- 0xbf85001f, 0x866eff7b,
- 0x00000400, 0xbf850061,
- 0xbf8e0010, 0xb8fbf803,
- 0xbf82fffa, 0x866eff7b,
- 0x03800900, 0xbf850015,
- 0x866eff7b, 0x000071ff,
- 0xbf840008, 0x866fff7b,
- 0x00007080, 0xbf840001,
- 0xbeee1a87, 0xb8eff801,
- 0x8e6e8c6e, 0x866e6f6e,
- 0xbf85000a, 0xbf0d986d,
- 0xbf850003, 0x866eff6d,
- 0x00ff0000, 0xbf850005,
- 0xbf0d986d, 0xbf850004,
+ 0xbf840009, 0x866eff6d,
+ 0x00ff0000, 0xbf85001a,
0x866eff7b, 0x00000400,
- 0xbf850046, 0xbeed1a9d,
+ 0xbf850051, 0xbf8e0010,
+ 0xb8fbf803, 0xbf82fffa,
+ 0x866eff7b, 0x03c00900,
+ 0xbf850011, 0x866eff7b,
+ 0x000071ff, 0xbf840008,
+ 0x866fff7b, 0x00007080,
+ 0xbf840001, 0xbeee1a87,
+ 0xb8eff801, 0x8e6e8c6e,
+ 0x866e6f6e, 0xbf850006,
+ 0x866eff6d, 0x00ff0000,
+ 0xbf850003, 0x866eff7b,
+ 0x00000400, 0xbf85003a,
0xb8faf807, 0x867aff7a,
0x001f8000, 0x8e7a8b7a,
0x8979ff79, 0xfc000000,
@@ -4153,100 +4155,95 @@ static const uint32_t cwsr_trap_gfx9_5_0_hex[] = {
0xb8fbf813, 0x8efa887a,
0xbf0d8f7b, 0xbf840002,
0x877bff7b, 0xffff0000,
- 0xc0031cfd, 0x00000010,
- 0xc0071bbd, 0x00000000,
+ 0xc0031bbd, 0x00000010,
+ 0xbf8cc07f, 0x8e6e976e,
+ 0x8979ff79, 0x00800000,
+ 0x87796e79, 0xc0071bbd,
+ 0x00000000, 0xbf8cc07f,
0xc0071ebd, 0x00000008,
- 0xbf8cc07f, 0x8e739773,
- 0x8979ff79, 0x01800000,
- 0x87797379, 0xbf0d986d,
- 0xbf840009, 0xbf0d9879,
- 0xbf850007, 0x896dff6d,
- 0x01ff0000, 0xba7f0583,
- 0x00000000, 0xbf0d9d6d,
- 0xbeed189d, 0xbf840012,
- 0xbef91898, 0xbeed189d,
- 0x86ee6e6e, 0xbf840001,
- 0xbe801d6e, 0x866eff6d,
- 0x01ff0000, 0xbf850005,
- 0x8778ff78, 0x00002000,
- 0x80ec886c, 0x82ed806d,
- 0xbf820005, 0x866eff6d,
- 0x01000000, 0xbf850002,
- 0x806c846c, 0x826d806d,
- 0x866dff6d, 0x0000ffff,
- 0x8f7a8b79, 0x867aff7a,
- 0x001f8000, 0xb97af807,
- 0x86fe7e7e, 0x86ea6a6a,
- 0x8f6e8378, 0xb96ee0c2,
- 0xbf800002, 0xb9780002,
- 0xbe801f6c, 0x866dff6d,
- 0x0000ffff, 0xbefa0080,
- 0xb97a0283, 0xb8faf807,
+ 0xbf8cc07f, 0x86ee6e6e,
+ 0xbf840001, 0xbe801d6e,
+ 0x866eff6d, 0x01ff0000,
+ 0xbf850005, 0x8778ff78,
+ 0x00002000, 0x80ec886c,
+ 0x82ed806d, 0xbf820005,
+ 0x866eff6d, 0x01000000,
+ 0xbf850002, 0x806c846c,
+ 0x826d806d, 0x866dff6d,
+ 0x0000ffff, 0x8f7a8b79,
0x867aff7a, 0x001f8000,
- 0x8e7a8b7a, 0x8979ff79,
- 0xfc000000, 0x87797a79,
- 0xba7ff807, 0x00000000,
- 0xbeee007e, 0xbeef007f,
- 0xbefe0180, 0xbf900004,
- 0x877a8478, 0xb97af802,
- 0xbf8e0002, 0xbf88fffe,
- 0xb8fa2985, 0x807a817a,
- 0x8e7a8a7a, 0x8e7a817a,
- 0xb8fb1605, 0x807b817b,
- 0x8e7b867b, 0x807a7b7a,
- 0x807a7e7a, 0x827b807f,
- 0x867bff7b, 0x0000ffff,
- 0xc04b1c3d, 0x00000050,
- 0xbf8cc07f, 0xc04b1d3d,
- 0x00000060, 0xbf8cc07f,
- 0xc0431e7d, 0x00000074,
- 0xbf8cc07f, 0xbef4007e,
- 0x8675ff7f, 0x0000ffff,
- 0x8775ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x00807fac, 0xbef1007c,
- 0xbef00080, 0xb8f02985,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0xbef60084,
- 0xbef600ff, 0x01000000,
- 0xbefe007c, 0xbefc0070,
- 0xc0611c7a, 0x0000007c,
- 0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611b3a,
+ 0xb97af807, 0x86fe7e7e,
+ 0x86ea6a6a, 0x8f6e8378,
+ 0xb96ee0c2, 0xbf800002,
+ 0xb9780002, 0xbe801f6c,
+ 0x866dff6d, 0x0000ffff,
+ 0xbefa0080, 0xb97a0283,
+ 0xb8faf807, 0x867aff7a,
+ 0x001f8000, 0x8e7a8b7a,
+ 0x8979ff79, 0xfc000000,
+ 0x87797a79, 0xba7ff807,
+ 0x00000000, 0xbeee007e,
+ 0xbeef007f, 0xbefe0180,
+ 0xbf900004, 0x877a8478,
+ 0xb97af802, 0xbf8e0002,
+ 0xbf88fffe, 0xb8fa2985,
+ 0x807a817a, 0x8e7a8a7a,
+ 0x8e7a817a, 0xb8fb1605,
+ 0x807b817b, 0x8e7b867b,
+ 0x807a7b7a, 0x807a7e7a,
+ 0x827b807f, 0x867bff7b,
+ 0x0000ffff, 0xc04b1c3d,
+ 0x00000050, 0xbf8cc07f,
+ 0xc04b1d3d, 0x00000060,
+ 0xbf8cc07f, 0xc0431e7d,
+ 0x00000074, 0xbf8cc07f,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0xbef1007c, 0xbef00080,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xbefe007c,
+ 0xbefc0070, 0xc0611c7a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611b7a, 0x0000007c,
+ 0xc0611b3a, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611bba,
+ 0xbefc0070, 0xc0611b7a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611bfa, 0x0000007c,
+ 0xc0611bba, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611e3a,
- 0x0000007c, 0xbf8cc07f,
- 0x80708470, 0xbefc007e,
- 0xb8fbf803, 0xbefe007c,
- 0xbefc0070, 0xc0611efa,
+ 0xbefc0070, 0xc0611bfa,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611a3a, 0x0000007c,
+ 0xc0611e3a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xb8fbf803,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611efa, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611a7a,
- 0x0000007c, 0xbf8cc07f,
- 0x80708470, 0xbefc007e,
- 0xb8f1f801, 0xbefe007c,
- 0xbefc0070, 0xc0611c7a,
+ 0xbefc0070, 0xc0611a3a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611a7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xb8f1f801,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611c7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbf108080,
0x867aff7f, 0x04000000,
0xbeef0080, 0x876f6f7a,
0xb8f02985, 0x80708170,
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
index 0eabb7a8cab9..6869e07a2fff 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
@@ -447,7 +447,9 @@ L_SAVE:
s_getreg_b32 s_save_m0, hwreg(HW_REG_MODE) //MODE
write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
-
+ // Clear VSKIP state now that MODE.VSKIP has been saved.
+ // If user shader set it then vector instructions would be skipped.
+ s_setvskip 0,0
/* the first wave in the threadgroup */
s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK // extract fisrt wave bit
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 1405e8affd48..d4593374e7a1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -2325,9 +2325,9 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
*/
mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
if (mqd_mgr->check_preemption_failed(mqd_mgr, dqm->packet_mgr.priv_queue->queue->mqd)) {
+ while (halt_if_hws_hang)
+ schedule();
if (reset_queues_on_hws_hang(dqm)) {
- while (halt_if_hws_hang)
- schedule();
dqm->is_hws_hang = true;
kfd_hws_hang(dqm);
retval = -ETIME;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 4b275937d05e..d05d199b5e44 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -278,10 +278,11 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
dma_addr_t *scratch, uint64_t ttm_res_offset)
{
- uint64_t npages = migrate->cpages;
+ uint64_t npages = migrate->npages;
struct amdgpu_device *adev = node->adev;
struct device *dev = adev->dev;
struct amdgpu_res_cursor cursor;
+ uint64_t mpages = 0;
dma_addr_t *src;
uint64_t *dst;
uint64_t i, j;
@@ -295,14 +296,16 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
amdgpu_res_first(prange->ttm_res, ttm_res_offset,
npages << PAGE_SHIFT, &cursor);
- for (i = j = 0; i < npages; i++) {
+ for (i = j = 0; (i < npages) && (mpages < migrate->cpages); i++) {
struct page *spage;
- dst[i] = cursor.start + (j << PAGE_SHIFT);
- migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
- svm_migrate_get_vram_page(prange, migrate->dst[i]);
- migrate->dst[i] = migrate_pfn(migrate->dst[i]);
-
+ if (migrate->src[i] & MIGRATE_PFN_MIGRATE) {
+ dst[i] = cursor.start + (j << PAGE_SHIFT);
+ migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
+ svm_migrate_get_vram_page(prange, migrate->dst[i]);
+ migrate->dst[i] = migrate_pfn(migrate->dst[i]);
+ mpages++;
+ }
spage = migrate_pfn_to_page(migrate->src[i]);
if (spage && !is_zone_device_page(spage)) {
src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
@@ -353,9 +356,12 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
out_free_vram_pages:
if (r) {
pr_debug("failed %d to copy memory to vram\n", r);
- while (i--) {
+ for (i = 0; i < npages && mpages; i++) {
+ if (!dst[i])
+ continue;
svm_migrate_put_vram_page(adev, dst[i]);
migrate->dst[i] = 0;
+ mpages--;
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 9df56f8e09f9..bcddd989c7f3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -86,9 +86,12 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
if (pdd->already_dequeued)
return;
-
+ /* The MES context flush needs to filter out the case which the
+ * KFD process is created without setting up the MES context and
+ * queue for creating a compute queue.
+ */
dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
- if (dev->kfd->shared_resources.enable_mes &&
+ if (dev->kfd->shared_resources.enable_mes && !!pdd->proc_ctx_gpu_addr &&
down_read_trylock(&dev->adev->reset_domain->sem)) {
amdgpu_mes_flush_shader_debugger(dev->adev,
pdd->proc_ctx_gpu_addr);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 0ec178ca7434..ac3fd81fecef 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2034,6 +2034,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
adev->dm.dc->debug.force_subvp_mclk_switch = true;
+ if (amdgpu_dc_debug_mask & DC_DISABLE_SUBVP)
+ adev->dm.dc->debug.force_disable_subvp = true;
+
if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) {
adev->dm.dc->debug.using_dml2 = true;
adev->dm.dc->debug.using_dml21 = true;
@@ -11483,6 +11486,11 @@ static bool amdgpu_dm_crtc_mem_type_changed(struct drm_device *dev,
new_plane_state = drm_atomic_get_plane_state(state, plane);
old_plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(new_plane_state) || IS_ERR(old_plane_state)) {
+ DRM_ERROR("Failed to get plane state for plane %s\n", plane->name);
+ return false;
+ }
+
if (old_plane_state->fb && new_plane_state->fb &&
get_mem_type(old_plane_state->fb) != get_mem_type(new_plane_state->fb))
return true;
@@ -12318,10 +12326,14 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
sink->sink_signal == SIGNAL_TYPE_EDP)) {
- amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
- amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
- if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
- freesync_capable = true;
+ if (amdgpu_dm_connector->dc_link &&
+ amdgpu_dm_connector->dc_link->dpcd_caps.allow_invalid_MSA_timing_param) {
+ amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
+ amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
+ if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+ freesync_capable = true;
+ }
+
parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
if (vsdb_info.replay_mode) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index 1f974ea3b0c6..1648226586e2 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -89,7 +89,7 @@
#define mmCLK1_CLK4_ALLOW_DS 0x16EA8
#define mmCLK1_CLK5_ALLOW_DS 0x16EB1
-#define mmCLK5_spll_field_8 0x1B04B
+#define mmCLK5_spll_field_8 0x1B24B
#define mmDENTIST_DISPCLK_CNTL 0x0124
#define regDENTIST_DISPCLK_CNTL 0x0064
#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index cecaadf741ad..f84e795e35f5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2133,7 +2133,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
- if (context->stream_count > get_seamless_boot_stream_count(context) ||
+ if (get_seamless_boot_stream_count(context) == 0 ||
context->stream_count == 0) {
/* Must wait for no flips to be pending before doing optimize bw */
hwss_wait_for_no_pipes_pending(dc, context);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index 5bb8b78bf250..bf636b28e3e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -63,8 +63,7 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
bool should_use_dmub_lock(struct dc_link *link)
{
- if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 ||
- link->psr_settings.psr_version == DC_PSR_VERSION_1)
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
return true;
if (link->replay_settings.replay_feature_enabled)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 46f9c05de16e..e1d500633dfa 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -29,11 +29,15 @@ dml_ccflags := $(CC_FLAGS_FPU)
dml_rcflags := $(CC_FLAGS_NO_FPU)
ifneq ($(CONFIG_FRAME_WARN),0)
-ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
-frame_warn_flag := -Wframe-larger-than=3072
-else
-frame_warn_flag := -Wframe-larger-than=2048
-endif
+ ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
+ frame_warn_limit := 3072
+ else
+ frame_warn_limit := 2048
+ endif
+
+ ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y)
+ frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit)
+ endif
endif
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
index 91c4f3b4bd5f..21fd466dba26 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
@@ -28,15 +28,19 @@ dml2_ccflags := $(CC_FLAGS_FPU)
dml2_rcflags := $(CC_FLAGS_NO_FPU)
ifneq ($(CONFIG_FRAME_WARN),0)
-ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
-ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
-frame_warn_flag := -Wframe-larger-than=4096
-else
-frame_warn_flag := -Wframe-larger-than=3072
-endif
-else
-frame_warn_flag := -Wframe-larger-than=2048
-endif
+ ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
+ ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
+ frame_warn_limit := 4096
+ else
+ frame_warn_limit := 3072
+ endif
+ else
+ frame_warn_limit := 2048
+ endif
+
+ ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y)
+ frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit)
+ endif
endif
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
index 35bc917631ae..84a2de9a76d4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
@@ -1736,7 +1736,7 @@ static void CalculateBytePerPixelAndBlockSizes(
#endif
} // CalculateBytePerPixelAndBlockSizes
-static dml_float_t CalculateTWait(
+static noinline_for_stack dml_float_t CalculateTWait(
dml_uint_t PrefetchMode,
enum dml_use_mall_for_pstate_change_mode UseMALLForPStateChange,
dml_bool_t SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
@@ -4458,7 +4458,7 @@ static void CalculateSwathWidth(
}
} // CalculateSwathWidth
-static dml_float_t CalculateExtraLatency(
+static noinline_for_stack dml_float_t CalculateExtraLatency(
dml_uint_t RoundTripPingLatencyCycles,
dml_uint_t ReorderingBytes,
dml_float_t DCFCLK,
@@ -5915,7 +5915,7 @@ static dml_uint_t DSCDelayRequirement(
return DSCDelayRequirement_val;
}
-static dml_bool_t CalculateVActiveBandwithSupport(dml_uint_t NumberOfActiveSurfaces,
+static noinline_for_stack dml_bool_t CalculateVActiveBandwithSupport(dml_uint_t NumberOfActiveSurfaces,
dml_float_t ReturnBW,
dml_bool_t NotUrgentLatencyHiding[],
dml_float_t ReadBandwidthLuma[],
@@ -6019,7 +6019,7 @@ static void CalculatePrefetchBandwithSupport(
#endif
}
-static dml_float_t CalculateBandwidthAvailableForImmediateFlip(
+static noinline_for_stack dml_float_t CalculateBandwidthAvailableForImmediateFlip(
dml_uint_t NumberOfActiveSurfaces,
dml_float_t ReturnBW,
dml_float_t ReadBandwidthLuma[],
@@ -6213,7 +6213,7 @@ static dml_uint_t CalculateMaxVStartup(
return max_vstartup_lines;
}
-static void set_calculate_prefetch_schedule_params(struct display_mode_lib_st *mode_lib,
+static noinline_for_stack void set_calculate_prefetch_schedule_params(struct display_mode_lib_st *mode_lib,
struct CalculatePrefetchSchedule_params_st *CalculatePrefetchSchedule_params,
dml_uint_t j,
dml_uint_t k)
@@ -6265,7 +6265,7 @@ static void set_calculate_prefetch_schedule_params(struct display_mode_lib_st *m
CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k];
}
-static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
+static noinline_for_stack void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
{
struct dml_core_mode_support_locals_st *s = &mode_lib->scratch.dml_core_mode_support_locals;
struct CalculatePrefetchSchedule_params_st *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
index b9c6b45f6872..0c8ec30ea672 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
@@ -1017,7 +1017,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
if (disp_cfg_stream_location < 0)
disp_cfg_stream_location = dml_dispcfg->num_streams++;
- ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx);
adjust_dml21_hblank_timing_config_from_pipe_ctx(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, &context->res_ctx.pipe_ctx[stream_index]);
populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]);
@@ -1042,7 +1042,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
if (disp_cfg_plane_location < 0)
disp_cfg_plane_location = dml_dispcfg->num_planes++;
- ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml21_surface_config_from_plane_state(in_dc, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location].surface, context->stream_status[stream_index].plane_states[plane_index]);
populate_dml21_plane_config_from_plane_state(dml_ctx, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location], context->stream_status[stream_index].plane_states[plane_index], context, stream_index);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index c4dbf27abaf8..8ed49a9df378 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -2778,7 +2778,7 @@ static double dml_get_return_bandwidth_available(
return return_bw_mbps;
}
-static void calculate_bandwidth_available(
+static noinline_for_stack void calculate_bandwidth_available(
double avg_bandwidth_available_min[dml2_core_internal_soc_state_max],
double avg_bandwidth_available[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
double urg_bandwidth_available_min[dml2_core_internal_soc_state_max], // min between SDP and DRAM
@@ -3625,7 +3625,7 @@ static void CalculateDCFCLKDeepSleepTdlut(
dml2_printf("DML::%s: DCFClkDeepSleep = %f (final)\n", __func__, *DCFClkDeepSleep);
}
-static void CalculateDCFCLKDeepSleep(
+static noinline_for_stack void CalculateDCFCLKDeepSleep(
const struct dml2_display_cfg *display_cfg,
unsigned int NumberOfActiveSurfaces,
unsigned int BytePerPixelY[],
@@ -4142,7 +4142,7 @@ static bool ValidateODMMode(enum dml2_odm_mode ODMMode,
return true;
}
-static void CalculateODMMode(
+static noinline_for_stack void CalculateODMMode(
unsigned int MaximumPixelsPerLinePerDSCUnit,
unsigned int HActive,
enum dml2_output_format_class OutFormat,
@@ -4239,7 +4239,7 @@ static void CalculateODMMode(
#endif
}
-static void CalculateOutputLink(
+static noinline_for_stack void CalculateOutputLink(
struct dml2_core_internal_scratch *s,
double PHYCLK,
double PHYCLKD18,
@@ -5999,7 +5999,7 @@ static double calculate_impacted_Tsw(unsigned int exclude_plane_idx, unsigned in
}
// a global check against the aggregate effect of the per plane prefetch schedule
-static bool CheckGlobalPrefetchAdmissibility(struct dml2_core_internal_scratch *scratch,
+static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core_internal_scratch *scratch,
struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params *p)
{
struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_locals *s = &scratch->CheckGlobalPrefetchAdmissibility_locals;
@@ -7012,7 +7012,7 @@ static void calculate_bytes_to_fetch_required_to_hide_latency(
}
}
-static void calculate_vactive_det_fill_latency(
+static noinline_for_stack void calculate_vactive_det_fill_latency(
const struct dml2_display_cfg *display_cfg,
unsigned int num_active_planes,
unsigned int bytes_required_l[],
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index b416320873e1..b8a34abaf519 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -786,7 +786,7 @@ static void populate_dml_output_cfg_from_stream_state(struct dml_output_cfg_st *
case SIGNAL_TYPE_DISPLAY_PORT_MST:
case SIGNAL_TYPE_DISPLAY_PORT:
out->OutputEncoder[location] = dml_dp;
- if (dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location] != -1)
+ if (location < MAX_HPO_DP2_ENCODERS && dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location] != -1)
out->OutputEncoder[dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location]] = dml_dp2p0;
break;
case SIGNAL_TYPE_EDP:
@@ -1343,7 +1343,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
if (disp_cfg_stream_location < 0)
disp_cfg_stream_location = dml_dispcfg->num_timings++;
- ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_stream_location, context->streams[i]);
populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context, dml2);
@@ -1383,7 +1383,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
if (disp_cfg_plane_location < 0)
disp_cfg_plane_location = dml_dispcfg->num_surfaces++;
- ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml_surface_cfg_from_plane_state(dml2->v20.dml_core_ctx.project, &dml_dispcfg->surface, disp_cfg_plane_location, context->stream_status[i].plane_states[j]);
populate_dml_plane_cfg_from_plane_state(
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
index e1da48b05d00..75fb77bca83b 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
@@ -194,6 +194,9 @@ void dpp_reset(struct dpp *dpp_base)
dpp->filter_h = NULL;
dpp->filter_v = NULL;
+ memset(&dpp_base->pos, 0, sizeof(dpp_base->pos));
+ memset(&dpp_base->att, 0, sizeof(dpp_base->att));
+
memset(&dpp->scl_data, 0, sizeof(dpp->scl_data));
memset(&dpp->pwl_data, 0, sizeof(dpp->pwl_data));
}
@@ -480,10 +483,11 @@ void dpp1_set_cursor_position(
if (src_y_offset + cursor_height <= 0)
cur_en = 0; /* not visible beyond top edge*/
- REG_UPDATE(CURSOR0_CONTROL,
- CUR0_ENABLE, cur_en);
+ if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) {
+ REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
- dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ }
}
void dpp1_cnv_set_optional_cursor_attributes(
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
index 3b6ca7974e18..1236e0f9a256 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
@@ -154,9 +154,11 @@ void dpp401_set_cursor_position(
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
uint32_t cur_en = pos->enable ? 1 : 0;
- REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
+ if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) {
+ REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
- dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ }
}
void dpp401_set_optional_cursor_attributes(
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
index fe741100c0f8..d347bb06577a 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
@@ -129,7 +129,8 @@ bool hubbub3_program_watermarks(
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
+ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
return wm_pending;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
index 7fb5523f9722..b98505b240a7 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
@@ -750,7 +750,8 @@ static bool hubbub31_program_watermarks(
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
+ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
return wm_pending;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
index 5264dc26cce1..32a6be543105 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
@@ -786,7 +786,8 @@ static bool hubbub32_program_watermarks(
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
+ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow);
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
index 5eb3da8d5206..dce7269959ce 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
@@ -326,7 +326,8 @@ static bool hubbub35_program_watermarks(
DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, 0xA);/*hw delta*/
REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL, DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF);
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
+ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow);
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
index 8364c9f9231a..9b026600b90e 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
@@ -546,6 +546,12 @@ void hubp1_dcc_control(struct hubp *hubp, bool enable,
SECONDARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk);
}
+void hubp_reset(struct hubp *hubp)
+{
+ memset(&hubp->pos, 0, sizeof(hubp->pos));
+ memset(&hubp->att, 0, sizeof(hubp->att));
+}
+
void hubp1_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
@@ -1351,8 +1357,9 @@ static void hubp1_wait_pipe_read_start(struct hubp *hubp)
void hubp1_init(struct hubp *hubp)
{
- //do nothing
+ hubp_reset(hubp);
}
+
static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_program_surface_flip_and_addr =
hubp1_program_surface_flip_and_addr,
@@ -1365,6 +1372,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_set_vm_context0_settings = hubp1_set_vm_context0_settings,
.set_blank = hubp1_set_blank,
.dcc_control = hubp1_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.set_hubp_blank_en = hubp1_set_hubp_blank_en,
.set_cursor_attributes = hubp1_cursor_set_attributes,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
index a85dc3be786f..c7765e6f09e6 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
@@ -746,6 +746,8 @@ void hubp1_dcc_control(struct hubp *hubp,
bool enable,
enum hubp_ind_block_size independent_64b_blks);
+void hubp_reset(struct hubp *hubp);
+
bool hubp1_program_surface_flip_and_addr(
struct hubp *hubp,
const struct dc_plane_address *address,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
index c74f6a3313a2..91259b896e03 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
@@ -1058,11 +1058,13 @@ void hubp2_cursor_set_position(
if (src_y_offset + cursor_height <= 0)
cur_en = 0; /* not visible beyond top edge*/
- if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
- hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
+ if (hubp->pos.cur_ctl.bits.cur_enable != cur_en) {
+ if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
+ hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
- REG_UPDATE(CURSOR_CONTROL,
+ REG_UPDATE(CURSOR_CONTROL,
CURSOR_ENABLE, cur_en);
+ }
REG_SET_2(CURSOR_POSITION, 0,
CURSOR_X_POSITION, pos->x,
@@ -1674,6 +1676,7 @@ static struct hubp_funcs dcn20_hubp_funcs = {
.set_blank = hubp2_set_blank,
.set_blank_regs = hubp2_set_blank_regs,
.dcc_control = hubp2_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp2_cursor_set_position,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c
index 65c628078ca2..ec88ee424a7f 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c
@@ -121,6 +121,7 @@ static struct hubp_funcs dcn201_hubp_funcs = {
.set_cursor_position = hubp1_cursor_set_position,
.set_blank = hubp1_set_blank,
.dcc_control = hubp1_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.hubp_clk_cntl = hubp1_clk_cntl,
.hubp_vtg_sel = hubp1_vtg_sel,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
index edbdb8c88d5c..e2740482e1cf 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
@@ -811,6 +811,8 @@ static void hubp21_init(struct hubp *hubp)
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
//hubp[i].HUBPREQ_DEBUG.HUBPREQ_DEBUG[26] = 1;
REG_WRITE(HUBPREQ_DEBUG, 1 << 26);
+
+ hubp_reset(hubp);
}
static struct hubp_funcs dcn21_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
@@ -823,6 +825,7 @@ static struct hubp_funcs dcn21_hubp_funcs = {
.hubp_set_vm_system_aperture_settings = hubp21_set_vm_system_aperture_settings,
.set_blank = hubp1_set_blank,
.dcc_control = hubp1_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = hubp21_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp1_cursor_set_position,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
index 12b282ed7067..0da70b50e86d 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
@@ -499,6 +499,10 @@ void hubp3_init(struct hubp *hubp)
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
//hubp[i].HUBPREQ_DEBUG.HUBPREQ_DEBUG[26] = 1;
REG_WRITE(HUBPREQ_DEBUG, 1 << 26);
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_TTU_DISABLE, 0);
+
+ hubp_reset(hubp);
}
static struct hubp_funcs dcn30_hubp_funcs = {
@@ -513,6 +517,7 @@ static struct hubp_funcs dcn30_hubp_funcs = {
.set_blank = hubp2_set_blank,
.set_blank_regs = hubp2_set_blank_regs,
.dcc_control = hubp3_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp2_cursor_set_position,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
index 46b804ed05fb..c2900c79a2d3 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
@@ -79,6 +79,7 @@ static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
.set_blank = hubp2_set_blank,
.dcc_control = hubp3_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp2_cursor_set_position,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
index 8b5bd73b8094..f3a21c623f44 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
@@ -168,6 +168,8 @@ void hubp32_init(struct hubp *hubp)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_WRITE(HUBPREQ_DEBUG_DB, 1 << 8);
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_TTU_DISABLE, 0);
}
static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
@@ -181,6 +183,7 @@ static struct hubp_funcs dcn32_hubp_funcs = {
.set_blank = hubp2_set_blank,
.set_blank_regs = hubp2_set_blank_regs,
.dcc_control = hubp3_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp32_cursor_set_attributes,
.set_cursor_position = hubp2_cursor_set_position,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
index faf37febc6fb..5661d7a80d54 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
@@ -199,6 +199,7 @@ static struct hubp_funcs dcn35_hubp_funcs = {
.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
.set_blank = hubp2_set_blank,
.dcc_control = hubp3_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp2_cursor_set_position,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
index 28ceceaf9e31..5ed195377a6c 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
@@ -141,7 +141,7 @@ void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor
void hubp401_init(struct hubp *hubp)
{
- //For now nothing to do, HUBPREQ_DEBUG_DB register is removed on DCN4x.
+ hubp_reset(hubp);
}
void hubp401_vready_at_or_After_vsync(struct hubp *hubp,
@@ -742,11 +742,13 @@ void hubp401_cursor_set_position(
dc_fixpt_from_int(dst_x_offset),
param->h_scale_ratio));
- if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
- hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
+ if (hubp->pos.cur_ctl.bits.cur_enable != cur_en) {
+ if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
+ hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
- REG_UPDATE(CURSOR_CONTROL,
- CURSOR_ENABLE, cur_en);
+ REG_UPDATE(CURSOR_CONTROL,
+ CURSOR_ENABLE, cur_en);
+ }
REG_SET_2(CURSOR_POSITION, 0,
CURSOR_X_POSITION, x_pos,
@@ -998,6 +1000,7 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
.set_blank = hubp2_set_blank,
.set_blank_regs = hubp2_set_blank_regs,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = hubp401_set_viewport,
.set_cursor_attributes = hubp32_cursor_set_attributes,
.set_cursor_position = hubp401_cursor_set_position,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 681bb92c6069..44e405e9bc97 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -1286,6 +1286,7 @@ void dcn10_plane_atomic_power_down(struct dc *dc,
if (hws->funcs.hubp_pg_control)
hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+ hubp->funcs->hubp_reset(hubp);
dpp->funcs->dpp_reset(dpp);
REG_SET(DC_IP_REQUEST_CNTL, 0,
@@ -1447,6 +1448,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
/* Disable on the current state so the new one isn't cleared. */
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ hubp->funcs->hubp_reset(hubp);
dpp->funcs->dpp_reset(dpp);
pipe_ctx->stream_res.tg = tg;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 59fc1c114fbe..b907ad1acedd 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -236,7 +236,8 @@ void dcn35_init_hw(struct dc *dc)
}
hws->funcs.init_pipes(dc, dc->current_state);
- if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
+ if (dc->res_pool->hubbub->funcs->allow_self_refresh_control &&
+ !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter)
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
@@ -800,6 +801,7 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
/* Disable on the current state so the new one isn't cleared. */
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ hubp->funcs->hubp_reset(hubp);
dpp->funcs->dpp_reset(dpp);
pipe_ctx->stream_res.tg = tg;
@@ -956,6 +958,7 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
/*to do, need to support both case*/
hubp->power_gated = true;
+ hubp->funcs->hubp_reset(hubp);
dpp->funcs->dpp_reset(dpp);
pipe_ctx->stream = NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 2a530a4a39f7..b610beb075d5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -163,6 +163,8 @@ struct hubp_funcs {
void (*dcc_control)(struct hubp *hubp, bool enable,
enum hubp_ind_block_size blk_size);
+ void (*hubp_reset)(struct hubp *hubp);
+
void (*mem_program_viewport)(
struct hubp *hubp,
const struct rect *viewport,
diff --git a/drivers/gpu/drm/amd/include/amd_pcie.h b/drivers/gpu/drm/amd/include/amd_pcie.h
index a1ece3eecdf5..a08611cb8041 100644
--- a/drivers/gpu/drm/amd/include/amd_pcie.h
+++ b/drivers/gpu/drm/amd/include/amd_pcie.h
@@ -49,6 +49,17 @@
| CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3)
/* Following flags shows PCIe lane width switch supported in driver which are decided by chipset and ASIC */
+
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1 0x00000001
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 0x00000002
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 0x00000004
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 0x00000008
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 0x00000010
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 0x00000020
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 0x00000040
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_MASK 0x0000FFFF
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_SHIFT 0
+
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X1 0x00010000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 0x00020000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 0x00040000
@@ -56,6 +67,7 @@
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 0x00100000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 0x00200000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 0x00400000
+#define CAIL_PCIE_LINK_WIDTH_SUPPORT_MASK 0xFFFF0000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT 16
/* 1/2/4/8/16 lanes */
@@ -65,4 +77,10 @@
| CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 \
| CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+#define AMDGPU_DEFAULT_ASIC_PCIE_MLW_MASK (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1 \
+ | CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 \
+ | CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 \
+ | CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 \
+ | CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16)
+
#endif
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 05bdb4e020ae..6dccee403a3d 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -345,10 +345,15 @@ enum DC_DEBUG_MASK {
*/
DC_DISABLE_ACPI_EDID = 0x8000,
- /*
+ /**
* @DC_DISABLE_HDMI_CEC: If set, disable HDMI-CEC feature in amdgpu driver.
*/
DC_DISABLE_HDMI_CEC = 0x10000,
+
+ /**
+ * @DC_DISABLE_SUBVP: If set, disable DCN Sub-Viewport feature in amdgpu driver.
+ */
+ DC_DISABLE_SUBVP = 0x20000,
};
enum amd_dpm_forced_level;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
index 7b65a27fb302..147bfb12fd75 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -93,7 +93,6 @@
#define PPSMC_MSG_SelectPLPDMode 0x40
#define PPSMC_MSG_RmaDueToBadPageThreshold 0x43
#define PPSMC_MSG_SelectPstatePolicy 0x44
-#define PPSMC_MSG_ResetSDMA2 0x45
#define PPSMC_MSG_ResetSDMA 0x4D
#define PPSMC_Message_Count 0x4E
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index b0dab9797c70..e4cd6a0d13da 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -276,8 +276,7 @@
__SMU_DUMMY_MAP(SelectPstatePolicy), \
__SMU_DUMMY_MAP(MALLPowerController), \
__SMU_DUMMY_MAP(MALLPowerState), \
- __SMU_DUMMY_MAP(ResetSDMA), \
- __SMU_DUMMY_MAP(ResetSDMA2),
+ __SMU_DUMMY_MAP(ResetSDMA),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index 356d9422b411..8d4a96e23326 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -107,6 +107,7 @@ struct smu_13_0_dpm_context {
struct smu_13_0_dpm_tables dpm_tables;
uint32_t workload_policy_mask;
uint32_t dcef_min_ds_clk;
+ uint64_t caps;
};
enum smu_13_0_power_state {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index f6b029354327..83163d7c7f00 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1732,7 +1732,6 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
- gpu_metrics->average_mm_activity = 0;
/* Valid power data is available only from primary die */
if (aldebaran_is_primary(smu)) {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 8ab30b2f7119..da7bd9227afe 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -101,38 +101,24 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin");
#define MCA_BANK_IPID(_ip, _hwid, _type) \
[AMDGPU_MCA_IP_##_ip] = { .hwid = _hwid, .mcatype = _type, }
-static inline bool smu_v13_0_6_is_unified_metrics(struct smu_context *smu)
-{
- return (smu->adev->flags & AMD_IS_APU) &&
- smu->smc_fw_version <= 0x4556900;
-}
-
-static inline bool smu_v13_0_6_is_other_end_count_available(struct smu_context *smu)
-{
- switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
- case IP_VERSION(13, 0, 6):
- return smu->smc_fw_version >= 0x557600;
- case IP_VERSION(13, 0, 14):
- return smu->smc_fw_version >= 0x05550E00;
- default:
- return false;
- }
-}
-
-static inline bool smu_v13_0_6_is_blw_host_limit_available(struct smu_context *smu)
-{
- if (smu->adev->flags & AMD_IS_APU)
- return smu->smc_fw_version >= 0x04556F00;
-
- switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
- case IP_VERSION(13, 0, 6):
- return smu->smc_fw_version >= 0x557900;
- case IP_VERSION(13, 0, 14):
- return smu->smc_fw_version >= 0x05551000;
- default:
- return false;
- }
-}
+#define SMU_CAP(x) SMU_13_0_6_CAPS_##x
+
+enum smu_v13_0_6_caps {
+ SMU_CAP(DPM),
+ SMU_CAP(UNI_METRICS),
+ SMU_CAP(DPM_POLICY),
+ SMU_CAP(OTHER_END_METRICS),
+ SMU_CAP(SET_UCLK_MAX),
+ SMU_CAP(PCIE_METRICS),
+ SMU_CAP(HST_LIMIT_METRICS),
+ SMU_CAP(MCA_DEBUG_MODE),
+ SMU_CAP(PER_INST_METRICS),
+ SMU_CAP(CTF_LIMIT),
+ SMU_CAP(RMA_MSG),
+ SMU_CAP(ACA_SYND),
+ SMU_CAP(SDMA_RESET),
+ SMU_CAP(ALL),
+};
struct mca_bank_ipid {
enum amdgpu_mca_ip ip;
@@ -209,7 +195,6 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0),
MSG_MAP(SelectPstatePolicy, PPSMC_MSG_SelectPstatePolicy, 0),
MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
- MSG_MAP(ResetSDMA2, PPSMC_MSG_ResetSDMA2, 0),
};
// clang-format on
@@ -297,6 +282,162 @@ struct smu_v13_0_6_dpm_map {
uint32_t *freq_table;
};
+static inline void smu_v13_0_6_cap_set(struct smu_context *smu,
+ enum smu_v13_0_6_caps cap)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+
+ dpm_context->caps |= BIT_ULL(cap);
+}
+
+static inline void smu_v13_0_6_cap_clear(struct smu_context *smu,
+ enum smu_v13_0_6_caps cap)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+
+ dpm_context->caps &= ~BIT_ULL(cap);
+}
+
+static inline bool smu_v13_0_6_cap_supported(struct smu_context *smu,
+ enum smu_v13_0_6_caps cap)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+
+ return !!(dpm_context->caps & BIT_ULL(cap));
+}
+
+static void smu_v13_0_14_init_caps(struct smu_context *smu)
+{
+ enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
+ SMU_CAP(UNI_METRICS),
+ SMU_CAP(SET_UCLK_MAX),
+ SMU_CAP(DPM_POLICY),
+ SMU_CAP(PCIE_METRICS),
+ SMU_CAP(CTF_LIMIT),
+ SMU_CAP(MCA_DEBUG_MODE),
+ SMU_CAP(RMA_MSG),
+ SMU_CAP(ACA_SYND) };
+ uint32_t fw_ver = smu->smc_fw_version;
+
+ for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++)
+ smu_v13_0_6_cap_set(smu, default_cap_list[i]);
+
+ if (fw_ver >= 0x05550E00)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(OTHER_END_METRICS));
+ if (fw_ver >= 0x05551000)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
+ if (fw_ver >= 0x05550B00)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
+ if (fw_ver >= 0x5551200)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+}
+
+static void smu_v13_0_12_init_caps(struct smu_context *smu)
+{
+ enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
+ SMU_CAP(UNI_METRICS),
+ SMU_CAP(PCIE_METRICS),
+ SMU_CAP(CTF_LIMIT),
+ SMU_CAP(MCA_DEBUG_MODE),
+ SMU_CAP(RMA_MSG),
+ SMU_CAP(ACA_SYND) };
+ uint32_t fw_ver = smu->smc_fw_version;
+
+ for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++)
+ smu_v13_0_6_cap_set(smu, default_cap_list[i]);
+
+ if (fw_ver < 0x00561900)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM));
+
+ if (fw_ver >= 0x00561700)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+}
+
+static void smu_v13_0_6_init_caps(struct smu_context *smu)
+{
+ enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
+ SMU_CAP(UNI_METRICS),
+ SMU_CAP(SET_UCLK_MAX),
+ SMU_CAP(DPM_POLICY),
+ SMU_CAP(PCIE_METRICS),
+ SMU_CAP(CTF_LIMIT),
+ SMU_CAP(MCA_DEBUG_MODE),
+ SMU_CAP(RMA_MSG),
+ SMU_CAP(ACA_SYND) };
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t fw_ver = smu->smc_fw_version;
+ uint32_t pgm = (fw_ver >> 24) & 0xFF;
+
+ for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++)
+ smu_v13_0_6_cap_set(smu, default_cap_list[i]);
+
+ if (fw_ver < 0x552F00)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM));
+ if (fw_ver < 0x554500)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(CTF_LIMIT));
+
+ if (adev->flags & AMD_IS_APU) {
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(PCIE_METRICS));
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM_POLICY));
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG));
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND));
+
+ if (fw_ver <= 0x4556900)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(UNI_METRICS));
+ if (fw_ver >= 0x04556F00)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
+ if (fw_ver >= 0x04556A00)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
+ } else {
+ if (fw_ver >= 0x557600)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(OTHER_END_METRICS));
+ if (fw_ver < 0x00556000)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM_POLICY));
+ if (amdgpu_sriov_vf(adev) && (fw_ver < 0x556600))
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(SET_UCLK_MAX));
+ if (fw_ver < 0x556300)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(PCIE_METRICS));
+ if (fw_ver < 0x554800)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(MCA_DEBUG_MODE));
+ if (fw_ver >= 0x556F00)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
+ if (fw_ver < 0x00555a00)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG));
+ if (fw_ver < 0x00555600)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND));
+ if (pgm == 0 && fw_ver >= 0x557900)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
+ }
+ if (((pgm == 7) && (fw_ver >= 0x7550700)) ||
+ ((pgm == 0) && (fw_ver >= 0x00557900)) ||
+ ((pgm == 4) && (fw_ver >= 0x4557000)))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+}
+
+static void smu_v13_0_x_init_caps(struct smu_context *smu)
+{
+ switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
+ case IP_VERSION(13, 0, 12):
+ return smu_v13_0_12_init_caps(smu);
+ case IP_VERSION(13, 0, 14):
+ return smu_v13_0_14_init_caps(smu);
+ default:
+ return smu_v13_0_6_init_caps(smu);
+ }
+}
+
+static int smu_v13_0_6_check_fw_version(struct smu_context *smu)
+{
+ int r;
+
+ r = smu_v13_0_check_fw_version(smu);
+ /* Initialize caps flags once fw version is fetched */
+ if (!r)
+ smu_v13_0_x_init_caps(smu);
+
+ return r;
+}
+
static int smu_v13_0_6_init_microcode(struct smu_context *smu)
{
const struct smc_firmware_header_v2_1 *v2_1;
@@ -618,7 +759,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
- bool flag = smu_v13_0_6_is_unified_metrics(smu);
+ bool flag = !smu_v13_0_6_cap_supported(smu, SMU_CAP(UNI_METRICS));
int ret, i, retry = 100;
uint32_t table_version;
@@ -814,8 +955,7 @@ static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu)
smu_v13_0_6_setup_driver_pptable(smu);
/* DPM policy not supported in older firmwares */
- if (!(smu->adev->flags & AMD_IS_APU) &&
- (smu->smc_fw_version < 0x00556000)) {
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM_POLICY))) {
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
smu_dpm->dpm_policies->policy_mask &=
@@ -992,7 +1132,7 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
struct smu_table_context *smu_table = &smu->smu_table;
MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table;
MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
- bool flag = smu_v13_0_6_is_unified_metrics(smu);
+ bool flag = !smu_v13_0_6_cap_supported(smu, SMU_CAP(UNI_METRICS));
struct amdgpu_device *adev = smu->adev;
int ret = 0;
int xcc_id;
@@ -1005,7 +1145,7 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
switch (member) {
case METRICS_CURR_GFXCLK:
case METRICS_AVERAGE_GFXCLK:
- if (smu->smc_fw_version >= 0x552F00) {
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM))) {
xcc_id = GET_INST(GC, 0);
*value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, flag)[xcc_id]);
} else {
@@ -1692,7 +1832,7 @@ static int smu_v13_0_6_notify_unload(struct smu_context *smu)
static int smu_v13_0_6_mca_set_debug_mode(struct smu_context *smu, bool enable)
{
/* NOTE: this ClearMcaOnRead message is only supported for smu version 85.72.0 or higher */
- if (smu->smc_fw_version < 0x554800)
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(MCA_DEBUG_MODE)))
return 0;
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ClearMcaOnRead,
@@ -1837,9 +1977,8 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
if (max == pstate_table->uclk_pstate.curr.max)
return 0;
/* For VF, only allowed in FW versions 85.102 or greater */
- if (amdgpu_sriov_vf(adev) &&
- ((smu->smc_fw_version < 0x556600) ||
- (adev->flags & AMD_IS_APU)))
+ if (!smu_v13_0_6_cap_supported(smu,
+ SMU_CAP(SET_UCLK_MAX)))
return -EOPNOTSUPP;
/* Only max clock limiting is allowed for UCLK */
ret = smu_v13_0_set_soft_freq_limited_range(
@@ -2043,7 +2182,7 @@ static int smu_v13_0_6_get_enabled_mask(struct smu_context *smu,
ret = smu_cmn_get_enabled_mask(smu, feature_mask);
- if (ret == -EIO && smu->smc_fw_version < 0x552F00) {
+ if (ret == -EIO && !smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM))) {
*feature_mask = 0;
ret = 0;
}
@@ -2336,11 +2475,10 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
{
- bool per_inst, smu_13_0_6_per_inst, smu_13_0_14_per_inst, apu_per_inst;
struct smu_table_context *smu_table = &smu->smu_table;
struct gpu_metrics_v1_7 *gpu_metrics =
(struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table;
- bool flag = smu_v13_0_6_is_unified_metrics(smu);
+ bool flag = !smu_v13_0_6_cap_supported(smu, SMU_CAP(UNI_METRICS));
int ret = 0, xcc_id, inst, i, j, k, idx;
struct amdgpu_device *adev = smu->adev;
MetricsTableX_t *metrics_x;
@@ -2348,6 +2486,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
struct amdgpu_xcp *xcp;
u16 link_width_level;
u32 inst_mask;
+ bool per_inst;
metrics_x = kzalloc(max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), GFP_KERNEL);
ret = smu_v13_0_6_get_metrics_table(smu, metrics_x, true);
@@ -2421,7 +2560,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
* table for both pf & one vf for smu version 85.99.0 or higher else report only
* for pf from registers
*/
- if (smu->smc_fw_version >= 0x556300) {
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PCIE_METRICS))) {
gpu_metrics->pcie_link_width = metrics_x->PCIeLinkWidth;
gpu_metrics->pcie_link_speed =
pcie_gen_to_speed(metrics_x->PCIeLinkSpeed);
@@ -2450,7 +2589,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
metrics_x->PCIeNAKSentCountAcc;
gpu_metrics->pcie_nak_rcvd_count_acc =
metrics_x->PCIeNAKReceivedCountAcc;
- if (smu_v13_0_6_is_other_end_count_available(smu))
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(OTHER_END_METRICS)))
gpu_metrics->pcie_lc_perf_other_end_recovery =
metrics_x->PCIeOtherEndRecoveryAcc;
@@ -2475,17 +2614,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->num_partition = adev->xcp_mgr->num_xcps;
- apu_per_inst = (adev->flags & AMD_IS_APU) && (smu->smc_fw_version >= 0x04556A00);
- smu_13_0_6_per_inst = !(adev->flags & AMD_IS_APU) &&
- (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)
- == IP_VERSION(13, 0, 6)) &&
- (smu->smc_fw_version >= 0x556F00);
- smu_13_0_14_per_inst = !(adev->flags & AMD_IS_APU) &&
- (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)
- == IP_VERSION(13, 0, 14)) &&
- (smu->smc_fw_version >= 0x05550B00);
-
- per_inst = apu_per_inst || smu_13_0_6_per_inst || smu_13_0_14_per_inst;
+ per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS));
for_each_xcp(adev->xcp_mgr, xcp, i) {
amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
@@ -2516,7 +2645,8 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
SMUQ10_ROUND(metrics_x->GfxBusyAcc[inst]);
- if (smu_v13_0_6_is_blw_host_limit_available(smu))
+ if (smu_v13_0_6_cap_supported(
+ smu, SMU_CAP(HST_LIMIT_METRICS)))
gpu_metrics->xcp_stats[i].gfx_below_host_limit_acc[idx] =
SMUQ10_ROUND(metrics_x->GfxclkBelowHostLimitAcc
[inst]);
@@ -2624,7 +2754,7 @@ static int smu_v13_0_6_get_thermal_temperature_range(struct smu_context *smu,
return -EINVAL;
/*Check smu version, GetCtfLimit message only supported for smu version 85.69 or higher */
- if (smu->smc_fw_version < 0x554500)
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(CTF_LIMIT)))
return 0;
/* Get SOC Max operating temperature */
@@ -2726,11 +2856,10 @@ static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
static int smu_v13_0_6_send_rma_reason(struct smu_context *smu)
{
- struct amdgpu_device *adev = smu->adev;
int ret;
/* NOTE: the message is only valid on dGPU with pmfw 85.90.0 and above */
- if ((adev->flags & AMD_IS_APU) || smu->smc_fw_version < 0x00555a00)
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(RMA_MSG)))
return 0;
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RmaDueToBadPageThreshold, NULL);
@@ -2744,31 +2873,13 @@ static int smu_v13_0_6_send_rma_reason(struct smu_context *smu)
static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
{
- uint32_t smu_program;
int ret = 0;
- smu_program = (smu->smc_fw_version >> 24) & 0xff;
- switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
- case IP_VERSION(13, 0, 6):
- if (((smu_program == 7) && (smu->smc_fw_version > 0x07550700)) ||
- ((smu_program == 0) && (smu->smc_fw_version > 0x00557700)))
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_ResetSDMA, inst_mask, NULL);
- else if ((smu_program == 4) &&
- (smu->smc_fw_version > 0x4556e6c))
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_ResetSDMA2, inst_mask, NULL);
- break;
- case IP_VERSION(13, 0, 14):
- if ((smu_program == 5) &&
- (smu->smc_fw_version > 0x05550f00))
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_ResetSDMA2, inst_mask, NULL);
- break;
- default:
- break;
- }
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(SDMA_RESET)))
+ return -EOPNOTSUPP;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_ResetSDMA, inst_mask, NULL);
if (ret)
dev_err(smu->adev->dev,
"failed to send ResetSDMA event with mask 0x%x\n",
@@ -3087,7 +3198,7 @@ static bool mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amd
if (instlo != 0x03b30400)
return false;
- if (!(adev->flags & AMD_IS_APU) && smu->smc_fw_version >= 0x00555600) {
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(ACA_SYND))) {
errcode = MCA_REG__SYND__ERRORINFORMATION(entry->regs[MCA_REG_IDX_SYND]);
errcode &= 0xff;
} else {
@@ -3373,9 +3484,10 @@ static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev,
static int aca_smu_parse_error_code(struct amdgpu_device *adev, struct aca_bank *bank)
{
+ struct smu_context *smu = adev->powerplay.pp_handle;
int error_code;
- if (!(adev->flags & AMD_IS_APU) && adev->pm.fw_version >= 0x00555600)
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(ACA_SYND)))
error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]);
else
error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]);
@@ -3413,7 +3525,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_6_check_fw_status,
/* pptable related */
- .check_fw_version = smu_v13_0_check_fw_version,
+ .check_fw_version = smu_v13_0_6_check_fw_version,
.set_driver_table_location = smu_v13_0_set_driver_table_location,
.set_tool_table_location = smu_v13_0_set_tool_table_location,
.notify_memory_pool_location = smu_v13_0_notify_memory_pool_location,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
index ebccb74306a7..f30b3d5eeca5 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
@@ -160,6 +160,10 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl,
kwb_conn->wb_layer->layer_type,
&n_formats);
+ if (!formats) {
+ kfree(kwb_conn);
+ return -ENOMEM;
+ }
err = drm_writeback_connector_init(&kms->base, wb_conn,
&komeda_wb_connector_funcs,
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index 0e282b7b167c..b9eb67e3fa90 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -195,7 +195,7 @@ static bool __ast_dp_wait_enable(struct ast_device *ast, bool enabled)
if (enabled)
vgacrdf_test |= AST_IO_VGACRDF_DP_VIDEO_ENABLE;
- for (i = 0; i < 200; ++i) {
+ for (i = 0; i < 1000; ++i) {
if (i)
mdelay(1);
vgacrdf = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xdf,
diff --git a/drivers/gpu/drm/display/drm_dp_cec.c b/drivers/gpu/drm/display/drm_dp_cec.c
index 007ceb281d00..56a4965e518c 100644
--- a/drivers/gpu/drm/display/drm_dp_cec.c
+++ b/drivers/gpu/drm/display/drm_dp_cec.c
@@ -311,16 +311,6 @@ void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address)
if (!aux->transfer)
return;
-#ifndef CONFIG_MEDIA_CEC_RC
- /*
- * CEC_CAP_RC is part of CEC_CAP_DEFAULTS, but it is stripped by
- * cec_allocate_adapter() if CONFIG_MEDIA_CEC_RC is undefined.
- *
- * Do this here as well to ensure the tests against cec_caps are
- * correct.
- */
- cec_caps &= ~CEC_CAP_RC;
-#endif
cancel_delayed_work_sync(&aux->cec.unregister_work);
mutex_lock(&aux->cec.lock);
@@ -337,7 +327,9 @@ void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address)
num_las = CEC_MAX_LOG_ADDRS;
if (aux->cec.adap) {
- if (aux->cec.adap->capabilities == cec_caps &&
+ /* Check if the adapter properties have changed */
+ if ((aux->cec.adap->capabilities & CEC_CAP_MONITOR_ALL) ==
+ (cec_caps & CEC_CAP_MONITOR_ALL) &&
aux->cec.adap->available_log_addrs == num_las) {
/* Unchanged, so just set the phys addr */
cec_s_phys_addr(aux->cec.adap, source_physical_address, false);
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 251f94313717..aca442c25209 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -743,6 +743,15 @@ retry:
if ((conn_configured & mask) != mask && conn_configured != conn_seq)
goto retry;
+ for (i = 0; i < count; i++) {
+ struct drm_connector *connector = connectors[i];
+
+ if (connector->has_tile)
+ drm_client_get_tile_offsets(dev, connectors, connector_count,
+ modes, offsets, i,
+ connector->tile_h_loc, connector->tile_v_loc);
+ }
+
/*
* If the BIOS didn't enable everything it could, fall back to have the
* same user experiencing of lighting up as much as possible like the
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index c9008113111b..fb3614a7ba44 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1354,14 +1354,14 @@ int drm_fb_helper_set_par(struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_set_par);
-static void pan_set(struct drm_fb_helper *fb_helper, int x, int y)
+static void pan_set(struct drm_fb_helper *fb_helper, int dx, int dy)
{
struct drm_mode_set *mode_set;
mutex_lock(&fb_helper->client.modeset_mutex);
drm_client_for_each_modeset(mode_set, &fb_helper->client) {
- mode_set->x = x;
- mode_set->y = y;
+ mode_set->x += dx;
+ mode_set->y += dy;
}
mutex_unlock(&fb_helper->client.modeset_mutex);
}
@@ -1370,16 +1370,18 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
- int ret;
+ int ret, dx, dy;
- pan_set(fb_helper, var->xoffset, var->yoffset);
+ dx = var->xoffset - info->var.xoffset;
+ dy = var->yoffset - info->var.yoffset;
+ pan_set(fb_helper, dx, dy);
ret = drm_client_modeset_commit_locked(&fb_helper->client);
if (!ret) {
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
} else
- pan_set(fb_helper, info->var.xoffset, info->var.yoffset);
+ pan_set(fb_helper, -dx, -dy);
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index fc1e517e074a..7e6ce905bdaf 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -41,8 +41,9 @@ static u32 scale(u32 source_val,
{
u64 target_val;
- WARN_ON(source_min > source_max);
- WARN_ON(target_min > target_max);
+ if (WARN_ON(source_min >= source_max) ||
+ WARN_ON(target_min > target_max))
+ return target_min;
/* defensive */
source_val = clamp(source_val, source_min, source_max);
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 1aa0b298c278..50ec0c3c7588 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -397,7 +397,6 @@ void intel_display_driver_resume_access(struct intel_display *display)
*/
bool intel_display_driver_check_access(struct intel_display *display)
{
- char comm[TASK_COMM_LEN];
char current_task[TASK_COMM_LEN + 16];
char allowed_task[TASK_COMM_LEN + 16] = "none";
@@ -406,12 +405,11 @@ bool intel_display_driver_check_access(struct intel_display *display)
return true;
snprintf(current_task, sizeof(current_task), "%s[%d]",
- get_task_comm(comm, current),
- task_pid_vnr(current));
+ current->comm, task_pid_vnr(current));
if (display->access.allowed_task)
snprintf(allowed_task, sizeof(allowed_task), "%s[%d]",
- get_task_comm(comm, display->access.allowed_task),
+ display->access.allowed_task->comm,
task_pid_vnr(display->access.allowed_task));
drm_dbg_kms(display->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index f1f3b1bb1e89..aa77ddcee42c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -1791,7 +1791,7 @@ int intel_dp_dsc_max_src_input_bpc(struct intel_display *display)
if (DISPLAY_VER(display) == 11)
return 10;
- return 0;
+ return intel_dp_dsc_min_src_input_bpc();
}
int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector,
@@ -2072,11 +2072,10 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp,
/* Compressed BPP should be less than the Input DSC bpp */
dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
- for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) {
- if (valid_dsc_bpp[i] < dsc_min_bpp)
+ for (i = ARRAY_SIZE(valid_dsc_bpp) - 1; i >= 0; i--) {
+ if (valid_dsc_bpp[i] < dsc_min_bpp ||
+ valid_dsc_bpp[i] > dsc_max_bpp)
continue;
- if (valid_dsc_bpp[i] > dsc_max_bpp)
- break;
ret = dsc_compute_link_config(intel_dp,
pipe_config,
@@ -2829,7 +2828,6 @@ static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC);
- /* Currently only DP_AS_SDP_AVT_FIXED_VTOTAL mode supported */
as_sdp->sdp_type = DP_SDP_ADAPTIVE_SYNC;
as_sdp->length = 0x9;
as_sdp->duration_incr_ms = 0;
@@ -2840,7 +2838,7 @@ static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
as_sdp->target_rr = drm_mode_vrefresh(adjusted_mode);
as_sdp->target_rr_divider = true;
} else {
- as_sdp->mode = DP_AS_SDP_AVT_FIXED_VTOTAL;
+ as_sdp->mode = DP_AS_SDP_AVT_DYNAMIC_VTOTAL;
as_sdp->vtotal = adjusted_mode->vtotal;
as_sdp->target_rr = 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 0c44fc7dd86c..a65cf97ad12d 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -341,6 +341,10 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
break;
}
+
+ /* Allow using zero step to indicate one try */
+ if (!step)
+ break;
}
if (slots < 0) {
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 7464b44c8bb3..1bab7c34a794 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -41,7 +41,7 @@ intel_hdcp_adjust_hdcp_line_rekeying(struct intel_encoder *encoder,
u32 rekey_bit = 0;
/* Here we assume HDMI is in TMDS mode of operation */
- if (encoder->type != INTEL_OUTPUT_HDMI)
+ if (!intel_encoder_is_hdmi(encoder))
return;
if (DISPLAY_VER(display) >= 30) {
@@ -2188,6 +2188,19 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
drm_dbg_kms(display->drm,
"HDCP2.2 Downstream topology change\n");
+
+ ret = hdcp2_authenticate_repeater_topology(connector);
+ if (!ret) {
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_ENABLED,
+ true);
+ goto out;
+ }
+
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] Repeater topology auth failed.(%d)\n",
+ connector->base.base.id, connector->base.name,
+ ret);
} else {
drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n",
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index ff9764cac1e7..80e558042d97 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -106,8 +106,6 @@ static const u32 icl_sdr_y_plane_formats[] = {
DRM_FORMAT_Y216,
DRM_FORMAT_XYUV8888,
DRM_FORMAT_XVYU2101010,
- DRM_FORMAT_XVYU12_16161616,
- DRM_FORMAT_XVYU16161616,
};
static const u32 icl_sdr_uv_plane_formats[] = {
@@ -134,8 +132,6 @@ static const u32 icl_sdr_uv_plane_formats[] = {
DRM_FORMAT_Y216,
DRM_FORMAT_XYUV8888,
DRM_FORMAT_XVYU2101010,
- DRM_FORMAT_XVYU12_16161616,
- DRM_FORMAT_XVYU16161616,
};
static const u32 icl_hdr_plane_formats[] = {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index fe69f2c8527d..ae3343c81a64 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -209,8 +209,6 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
struct address_space *mapping = obj->base.filp->f_mapping;
unsigned int max_segment = i915_sg_segment_size(i915->drm.dev);
struct sg_table *st;
- struct sgt_iter sgt_iter;
- struct page *page;
int ret;
/*
@@ -239,9 +237,7 @@ rebuild_st:
* for PAGE_SIZE chunks instead may be helpful.
*/
if (max_segment > PAGE_SIZE) {
- for_each_sgt_page(page, sgt_iter, st)
- put_page(page);
- sg_free_table(st);
+ shmem_sg_free_table(st, mapping, false, false);
kfree(st);
max_segment = PAGE_SIZE;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 12f1ba7ca9c1..cc05bd9e43b4 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1469,6 +1469,19 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
}
+static void __update_guc_busyness_running_state(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&guc->timestamp.lock, flags);
+ for_each_engine(engine, gt, id)
+ engine->stats.guc.running = false;
+ spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+}
+
static void __update_guc_busyness_stats(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
@@ -1619,6 +1632,9 @@ void intel_guc_busyness_park(struct intel_gt *gt)
if (!guc_submission_initialized(guc))
return;
+ /* Assume no engines are running and set running state to false */
+ __update_guc_busyness_running_state(guc);
+
/*
* There is a race with suspend flow where the worker runs after suspend
* and causes an unclaimed register access warning. Cancel the worker
@@ -5519,12 +5535,20 @@ static inline void guc_log_context(struct drm_printer *p,
{
drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id);
drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
- drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
- ce->ring->head,
- ce->lrc_reg_state[CTX_RING_HEAD]);
- drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
- ce->ring->tail,
- ce->lrc_reg_state[CTX_RING_TAIL]);
+ if (intel_context_pin_if_active(ce)) {
+ drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
+ ce->ring->head,
+ ce->lrc_reg_state[CTX_RING_HEAD]);
+ drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
+ ce->ring->tail,
+ ce->lrc_reg_state[CTX_RING_TAIL]);
+ intel_context_unpin(ce);
+ } else {
+ drm_printf(p, "\t\tLRC Head: Internal %u, Memory not pinned\n",
+ ce->ring->head);
+ drm_printf(p, "\t\tLRC Tail: Internal %u, Memory not pinned\n",
+ ce->ring->tail);
+ }
drm_printf(p, "\t\tContext Pin Count: %u\n",
atomic_read(&ce->pin_count));
drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 2406cda75b7b..5384d1bb4923 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -4802,7 +4802,7 @@ err_unlock:
return ret;
}
-static struct ctl_table oa_table[] = {
+static const struct ctl_table oa_table[] = {
{
.procname = "perf_stream_paranoid",
.data = &i915_perf_stream_paranoid,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index cd25e5afe55a..f22ad2882697 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -358,7 +358,7 @@ static const struct of_device_id mtk_drm_of_ids[] = {
};
MODULE_DEVICE_TABLE(of, mtk_drm_of_ids);
-static int mtk_drm_match(struct device *dev, void *data)
+static int mtk_drm_match(struct device *dev, const void *data)
{
if (!strncmp(dev_name(dev), "mediatek-drm", sizeof("mediatek-drm") - 1))
return true;
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 2cb2e5675807..cd659b9fd1d9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -279,7 +279,6 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
const u64 plength = 0x10000;
const u64 ioffset = plength;
const u64 ilength = 0x02000;
- char name[TASK_COMM_LEN];
int cid, ret;
u64 size;
@@ -338,8 +337,7 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
chan->userd = &chan->user;
}
- get_task_comm(name, current);
- snprintf(args.name, sizeof(args.name), "%s[%d]", name, task_pid_nr(current));
+ snprintf(args.name, sizeof(args.name), "%s[%d]", current->comm, task_pid_nr(current));
ret = nvif_object_ctor(&device->object, "abi16ChanUser", 0, hosts[cid].oclass,
&args, sizeof(args), &chan->user);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 21d2d9ca5e85..5664c4c71faf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -1175,7 +1175,7 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli;
- char name[32], tmpname[TASK_COMM_LEN];
+ char name[32];
int ret;
/* need to bring up power immediately if opening device */
@@ -1185,10 +1185,9 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
return ret;
}
- get_task_comm(tmpname, current);
rcu_read_lock();
snprintf(name, sizeof(name), "%s[%d]",
- tmpname, pid_nr(rcu_dereference(fpriv->pid)));
+ current->comm, pid_nr(rcu_dereference(fpriv->pid)));
rcu_read_unlock();
if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) {
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index 89a699370a59..c67e1f906785 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -757,7 +757,6 @@ static void bochs_pci_remove(struct pci_dev *pdev)
drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
- drm_dev_put(dev);
}
static void bochs_pci_shutdown(struct pci_dev *pdev)
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index da203045df9b..72b6a119412f 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -107,8 +107,10 @@ v3d_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->bin_job->base, V3D_BIN);
trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
- dma_fence_signal(&fence->base);
+
v3d->bin_job = NULL;
+ dma_fence_signal(&fence->base);
+
status = IRQ_HANDLED;
}
@@ -118,8 +120,10 @@ v3d_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->render_job->base, V3D_RENDER);
trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
- dma_fence_signal(&fence->base);
+
v3d->render_job = NULL;
+ dma_fence_signal(&fence->base);
+
status = IRQ_HANDLED;
}
@@ -129,8 +133,10 @@ v3d_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->csd_job->base, V3D_CSD);
trace_v3d_csd_irq(&v3d->drm, fence->seqno);
- dma_fence_signal(&fence->base);
+
v3d->csd_job = NULL;
+ dma_fence_signal(&fence->base);
+
status = IRQ_HANDLED;
}
@@ -167,8 +173,10 @@ v3d_hub_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->tfu_job->base, V3D_TFU);
trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
- dma_fence_signal(&fence->base);
+
v3d->tfu_job = NULL;
+ dma_fence_signal(&fence->base);
+
status = IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index b3664c12843d..f92133a01195 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -189,10 +189,11 @@ static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
struct dma_buf_attachment *attach = obj->import_attach;
- struct dma_resv *resv = attach->dmabuf->resv;
if (attach) {
- dma_resv_lock(resv, NULL);
+ struct dma_buf *dmabuf = attach->dmabuf;
+
+ dma_resv_lock(dmabuf->resv, NULL);
virtio_gpu_detach_object_fenced(bo);
@@ -200,10 +201,10 @@ static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
dma_buf_unmap_attachment(attach, bo->sgt,
DMA_BIDIRECTIONAL);
- dma_resv_unlock(resv);
+ dma_resv_unlock(dmabuf->resv);
- dma_buf_detach(attach->dmabuf, attach);
- dma_buf_put(attach->dmabuf);
+ dma_buf_detach(dmabuf, attach);
+ dma_buf_put(dmabuf);
}
if (bo->created) {
diff --git a/drivers/gpu/drm/xe/regs/xe_oa_regs.h b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
index a49561e9f3c3..a79ad2da070c 100644
--- a/drivers/gpu/drm/xe/regs/xe_oa_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
@@ -51,6 +51,10 @@
/* Common to all OA units */
#define OA_OACONTROL_REPORT_BC_MASK REG_GENMASK(9, 9)
#define OA_OACONTROL_COUNTER_SIZE_MASK REG_GENMASK(8, 8)
+#define OAG_OACONTROL_USED_BITS \
+ (OAG_OACONTROL_OA_PES_DISAG_EN | OAG_OACONTROL_OA_CCS_SELECT_MASK | \
+ OAG_OACONTROL_OA_COUNTER_SEL_MASK | OAG_OACONTROL_OA_COUNTER_ENABLE | \
+ OA_OACONTROL_REPORT_BC_MASK | OA_OACONTROL_COUNTER_SIZE_MASK)
#define OAG_OA_DEBUG XE_REG(0xdaf8, XE_REG_OPTION_MASKED)
#define OAG_OA_DEBUG_DISABLE_MMIO_TRG REG_BIT(14)
@@ -78,6 +82,8 @@
#define OAM_CONTEXT_CONTROL_OFFSET (0x1bc)
#define OAM_CONTROL_OFFSET (0x194)
#define OAM_CONTROL_COUNTER_SEL_MASK REG_GENMASK(3, 1)
+#define OAM_OACONTROL_USED_BITS \
+ (OAM_CONTROL_COUNTER_SEL_MASK | OAG_OACONTROL_OA_COUNTER_ENABLE)
#define OAM_DEBUG_OFFSET (0x198)
#define OAM_STATUS_OFFSET (0x19c)
#define OAM_MMIO_TRG_OFFSET (0x1d0)
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index 81dc7795c065..39fe485d2085 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -119,11 +119,7 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
drm_puts(&p, "\n**** GuC CT ****\n");
xe_guc_ct_snapshot_print(ss->guc.ct, &p);
- /*
- * Don't add a new section header here because the mesa debug decoder
- * tool expects the context information to be in the 'GuC CT' section.
- */
- /* drm_puts(&p, "\n**** Contexts ****\n"); */
+ drm_puts(&p, "\n**** Contexts ****\n");
xe_guc_exec_queue_snapshot_print(ss->ge, &p);
drm_puts(&p, "\n**** Job ****\n");
@@ -395,42 +391,34 @@ int xe_devcoredump_init(struct xe_device *xe)
/**
* xe_print_blob_ascii85 - print a BLOB to some useful location in ASCII85
*
- * The output is split to multiple lines because some print targets, e.g. dmesg
- * cannot handle arbitrarily long lines. Note also that printing to dmesg in
- * piece-meal fashion is not possible, each separate call to drm_puts() has a
- * line-feed automatically added! Therefore, the entire output line must be
- * constructed in a local buffer first, then printed in one atomic output call.
+ * The output is split into multiple calls to drm_puts() because some print
+ * targets, e.g. dmesg, cannot handle arbitrarily long lines. These targets may
+ * add newlines, as is the case with dmesg: each drm_puts() call creates a
+ * separate line.
*
* There is also a scheduler yield call to prevent the 'task has been stuck for
* 120s' kernel hang check feature from firing when printing to a slow target
* such as dmesg over a serial port.
*
- * TODO: Add compression prior to the ASCII85 encoding to shrink huge buffers down.
- *
* @p: the printer object to output to
* @prefix: optional prefix to add to output string
+ * @suffix: optional suffix to add at the end. 0 disables it and is
+ * not added to the output, which is useful when using multiple calls
+ * to dump data to @p
* @blob: the Binary Large OBject to dump out
* @offset: offset in bytes to skip from the front of the BLOB, must be a multiple of sizeof(u32)
* @size: the size in bytes of the BLOB, must be a multiple of sizeof(u32)
*/
-void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
+void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffix,
const void *blob, size_t offset, size_t size)
{
const u32 *blob32 = (const u32 *)blob;
char buff[ASCII85_BUFSZ], *line_buff;
size_t line_pos = 0;
- /*
- * Splitting blobs across multiple lines is not compatible with the mesa
- * debug decoder tool. Note that even dropping the explicit '\n' below
- * doesn't help because the GuC log is so big some underlying implementation
- * still splits the lines at 512K characters. So just bail completely for
- * the moment.
- */
- return;
-
#define DMESG_MAX_LINE_LEN 800
-#define MIN_SPACE (ASCII85_BUFSZ + 2) /* 85 + "\n\0" */
+ /* Always leave space for the suffix char and the \0 */
+#define MIN_SPACE (ASCII85_BUFSZ + 2) /* 85 + "<suffix>\0" */
if (size & 3)
drm_printf(p, "Size not word aligned: %zu", size);
@@ -462,7 +450,6 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
line_pos += strlen(line_buff + line_pos);
if ((line_pos + MIN_SPACE) >= DMESG_MAX_LINE_LEN) {
- line_buff[line_pos++] = '\n';
line_buff[line_pos++] = 0;
drm_puts(p, line_buff);
@@ -474,10 +461,11 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
}
}
+ if (suffix)
+ line_buff[line_pos++] = suffix;
+
if (line_pos) {
- line_buff[line_pos++] = '\n';
line_buff[line_pos++] = 0;
-
drm_puts(p, line_buff);
}
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.h b/drivers/gpu/drm/xe/xe_devcoredump.h
index 6a17e6d60102..5391a80a4d1b 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.h
+++ b/drivers/gpu/drm/xe/xe_devcoredump.h
@@ -29,7 +29,7 @@ static inline int xe_devcoredump_init(struct xe_device *xe)
}
#endif
-void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
+void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffix,
const void *blob, size_t offset, size_t size);
#endif
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 4de26470a4ae..4e1839b483a0 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -521,7 +521,7 @@ static int wait_for_lmem_ready(struct xe_device *xe)
drm_dbg(&xe->drm, "Waiting for lmem initialization\n");
start = jiffies;
- timeout = start + msecs_to_jiffies(60 * 1000); /* 60 sec! */
+ timeout = start + secs_to_jiffies(60); /* 60 sec! */
do {
if (signal_pending(current))
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 26e64530ada2..5d6fb79957b6 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -532,8 +532,10 @@ static int all_fw_domain_init(struct xe_gt *gt)
if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
- if (IS_SRIOV_PF(gt_to_xe(gt)))
+ if (IS_SRIOV_PF(gt_to_xe(gt))) {
+ xe_gt_sriov_pf_init(gt);
xe_gt_sriov_pf_init_hw(gt);
+ }
xe_force_wake_put(gt_to_fw(gt), fw_ref);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
index e71fc3d2bda2..6f906c8e8108 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
@@ -68,6 +68,19 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
return 0;
}
+/**
+ * xe_gt_sriov_pf_init - Prepare SR-IOV PF data structures on PF.
+ * @gt: the &xe_gt to initialize
+ *
+ * Late one-time initialization of the PF data.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_init(struct xe_gt *gt)
+{
+ return xe_gt_sriov_pf_migration_init(gt);
+}
+
static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe)
{
return GRAPHICS_VERx100(xe) == 1200;
@@ -90,7 +103,6 @@ void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
pf_enable_ggtt_guest_update(gt);
xe_gt_sriov_pf_service_update(gt);
- xe_gt_sriov_pf_migration_init(gt);
}
static u32 pf_get_vf_regs_stride(struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
index 96fab779a906..f474509411c0 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
@@ -10,6 +10,7 @@ struct xe_gt;
#ifdef CONFIG_PCI_IOV
int xe_gt_sriov_pf_init_early(struct xe_gt *gt);
+int xe_gt_sriov_pf_init(struct xe_gt *gt);
void xe_gt_sriov_pf_init_hw(struct xe_gt *gt);
void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid);
void xe_gt_sriov_pf_restart(struct xe_gt *gt);
@@ -19,6 +20,11 @@ static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
return 0;
}
+static inline int xe_gt_sriov_pf_init(struct xe_gt *gt)
+{
+ return 0;
+}
+
static inline void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
{
}
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 8b65c5e959cc..50c8076b5158 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -1724,7 +1724,8 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
snapshot->g2h_outstanding);
if (snapshot->ctb)
- xe_print_blob_ascii85(p, "CTB data", snapshot->ctb, 0, snapshot->ctb_size);
+ xe_print_blob_ascii85(p, "CTB data", '\n',
+ snapshot->ctb, 0, snapshot->ctb_size);
} else {
drm_puts(p, "CT disabled\n");
}
diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c
index df4cfb698cdb..2baa4d95571f 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.c
+++ b/drivers/gpu/drm/xe/xe_guc_log.c
@@ -211,8 +211,10 @@ void xe_guc_log_snapshot_print(struct xe_guc_log_snapshot *snapshot, struct drm_
remain = snapshot->size;
for (i = 0; i < snapshot->num_chunks; i++) {
size_t size = min(GUC_LOG_CHUNK_SIZE, remain);
+ const char *prefix = i ? NULL : "Log data";
+ char suffix = i == snapshot->num_chunks - 1 ? '\n' : 0;
- xe_print_blob_ascii85(p, i ? NULL : "Log data", snapshot->copy[i], 0, size);
+ xe_print_blob_ascii85(p, prefix, suffix, snapshot->copy[i], 0, size);
remain -= size;
}
}
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index eeb96b5f49e2..fa873f3d0a9d 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -237,7 +237,6 @@ static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream)
u32 tail, hw_tail, partial_report_size, available;
int report_size = stream->oa_buffer.format->size;
unsigned long flags;
- bool pollin;
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
@@ -282,11 +281,11 @@ static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream)
stream->oa_buffer.tail = tail;
available = xe_oa_circ_diff(stream, stream->oa_buffer.tail, stream->oa_buffer.head);
- pollin = available >= stream->wait_num_reports * report_size;
+ stream->pollin = available >= stream->wait_num_reports * report_size;
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
- return pollin;
+ return stream->pollin;
}
static enum hrtimer_restart xe_oa_poll_check_timer_cb(struct hrtimer *hrtimer)
@@ -294,10 +293,8 @@ static enum hrtimer_restart xe_oa_poll_check_timer_cb(struct hrtimer *hrtimer)
struct xe_oa_stream *stream =
container_of(hrtimer, typeof(*stream), poll_check_timer);
- if (xe_oa_buffer_check_unlocked(stream)) {
- stream->pollin = true;
+ if (xe_oa_buffer_check_unlocked(stream))
wake_up(&stream->poll_wq);
- }
hrtimer_forward_now(hrtimer, ns_to_ktime(stream->poll_period_ns));
@@ -452,6 +449,12 @@ static u32 __oa_ccs_select(struct xe_oa_stream *stream)
return val;
}
+static u32 __oactrl_used_bits(struct xe_oa_stream *stream)
+{
+ return stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG ?
+ OAG_OACONTROL_USED_BITS : OAM_OACONTROL_USED_BITS;
+}
+
static void xe_oa_enable(struct xe_oa_stream *stream)
{
const struct xe_oa_format *format = stream->oa_buffer.format;
@@ -472,14 +475,14 @@ static void xe_oa_enable(struct xe_oa_stream *stream)
stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG)
val |= OAG_OACONTROL_OA_PES_DISAG_EN;
- xe_mmio_write32(&stream->gt->mmio, regs->oa_ctrl, val);
+ xe_mmio_rmw32(&stream->gt->mmio, regs->oa_ctrl, __oactrl_used_bits(stream), val);
}
static void xe_oa_disable(struct xe_oa_stream *stream)
{
struct xe_mmio *mmio = &stream->gt->mmio;
- xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctrl, 0);
+ xe_mmio_rmw32(mmio, __oa_regs(stream)->oa_ctrl, __oactrl_used_bits(stream), 0);
if (xe_mmio_wait32(mmio, __oa_regs(stream)->oa_ctrl,
OAG_OACONTROL_OA_COUNTER_ENABLE, 0, 50000, NULL, false))
drm_err(&stream->oa->xe->drm,
@@ -2534,6 +2537,8 @@ static void __xe_oa_init_oa_units(struct xe_gt *gt)
u->type = DRM_XE_OA_UNIT_TYPE_OAM;
}
+ xe_mmio_write32(&gt->mmio, u->regs.oa_ctrl, 0);
+
/* Ensure MMIO trigger remains disabled till there is a stream */
xe_mmio_write32(&gt->mmio, u->regs.oa_debug,
oag_configure_mmio_trigger(NULL, false));
diff --git a/drivers/gpu/drm/xe/xe_observation.c b/drivers/gpu/drm/xe/xe_observation.c
index 8ec1b84cbb9e..57cf01efc07f 100644
--- a/drivers/gpu/drm/xe/xe_observation.c
+++ b/drivers/gpu/drm/xe/xe_observation.c
@@ -56,7 +56,7 @@ int xe_observation_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
}
}
-static struct ctl_table observation_ctl_table[] = {
+static const struct ctl_table observation_ctl_table[] = {
{
.procname = "observation_paranoid",
.data = &xe_observation_paranoid,
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index 0b63fd48ea92..979f6d3239ba 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -1564,7 +1564,7 @@ static void zynqmp_dp_bridge_atomic_enable(struct drm_bridge *bridge,
pm_runtime_get_sync(dp->dev);
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
zynqmp_dp_disp_enable(dp, old_bridge_state);
/*
@@ -1624,7 +1624,6 @@ static void zynqmp_dp_bridge_atomic_enable(struct drm_bridge *bridge,
zynqmp_dp_write(dp, ZYNQMP_DP_SOFTWARE_RESET,
ZYNQMP_DP_SOFTWARE_RESET_ALL);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_ENABLE, 1);
- mutex_unlock(&dp->lock);
}
static void zynqmp_dp_bridge_atomic_disable(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.h b/drivers/gpu/drm/xlnx/zynqmp_dpsub.h
index 49875529c2a4..d771b8b199e0 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.h
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.h
@@ -60,6 +60,7 @@ struct zynqmp_dpsub_audio;
* @layers: Video and graphics layers
* @dp: The DisplayPort controller
* @dma_align: DMA alignment constraint (must be a power of 2)
+ * @audio: DP audio data
*/
struct zynqmp_dpsub {
struct device *dev;
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_common.h b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
index e5620d7db569..799b8686a88a 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_common.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
@@ -43,6 +43,7 @@ struct amd_mp2_sensor_info {
struct sfh_dev_status {
bool is_hpd_present;
bool is_als_present;
+ bool is_sra_present;
};
struct amd_mp2_dev {
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 0c28ca349bcd..48cfd0c58241 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -122,7 +122,7 @@ int amd_sfh_irq_init_v2(struct amd_mp2_dev *privdata)
{
int rc;
- pci_intx(privdata->pdev, true);
+ pcim_intx(privdata->pdev, true);
rc = devm_request_irq(&privdata->pdev->dev, privdata->pdev->irq,
amd_sfh_irq_handler, 0, DRIVER_NAME, privdata);
@@ -248,7 +248,7 @@ static void amd_mp2_pci_remove(void *privdata)
struct amd_mp2_dev *mp2 = privdata;
amd_sfh_hid_client_deinit(privdata);
mp2->mp2_ops->stop_all(mp2);
- pci_intx(mp2->pdev, false);
+ pcim_intx(mp2->pdev, false);
amd_sfh_clear_intr(mp2);
}
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
index db36d87d5634..e9929c4aa72e 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
@@ -30,6 +30,7 @@ static int amd_sfh_get_sensor_num(struct amd_mp2_dev *mp2, u8 *sensor_id)
case ACCEL_IDX:
case GYRO_IDX:
case MAG_IDX:
+ case SRA_IDX:
case ALS_IDX:
case HPD_IDX:
if (BIT(i) & slist->sl.sensors)
@@ -58,6 +59,8 @@ static const char *get_sensor_name(int idx)
return "gyroscope";
case MAG_IDX:
return "magnetometer";
+ case SRA_IDX:
+ return "SRA";
case ALS_IDX:
return "ALS";
case HPD_IDX:
@@ -130,6 +133,23 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
for (i = 0; i < cl_data->num_hid_devices; i++) {
cl_data->sensor_sts[i] = SENSOR_DISABLED;
+
+ if (cl_data->num_hid_devices == 1 && cl_data->sensor_idx[0] == SRA_IDX)
+ break;
+
+ if (cl_data->sensor_idx[i] == SRA_IDX) {
+ info.sensor_idx = cl_data->sensor_idx[i];
+ writel(0, privdata->mmio + amd_get_p2c_val(privdata, 0));
+ mp2_ops->start(privdata, info);
+ status = amd_sfh_wait_for_response
+ (privdata, cl_data->sensor_idx[i], ENABLE_SENSOR);
+
+ cl_data->sensor_sts[i] = (status == 0) ? SENSOR_ENABLED : SENSOR_DISABLED;
+ if (cl_data->sensor_sts[i] == SENSOR_ENABLED)
+ privdata->dev_en.is_sra_present = true;
+ continue;
+ }
+
cl_data->sensor_requested_cnt[i] = 0;
cl_data->cur_hid_dev = i;
cl_idx = cl_data->sensor_idx[i];
@@ -181,6 +201,8 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
}
for (i = 0; i < cl_data->num_hid_devices; i++) {
+ if (cl_data->sensor_idx[i] == SRA_IDX)
+ continue;
cl_data->cur_hid_dev = i;
if (cl_data->sensor_sts[i] == SENSOR_ENABLED) {
cl_data->is_any_sensor_enabled = true;
@@ -289,7 +311,7 @@ static void amd_mp2_pci_remove(void *privdata)
sfh_deinit_emp2();
amd_sfh_hid_client_deinit(privdata);
mp2->mp2_ops->stop_all(mp2);
- pci_intx(mp2->pdev, false);
+ pcim_intx(mp2->pdev, false);
amd_sfh_clear_intr(mp2);
}
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
index 4676f060da26..ffb98b4c36cb 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
@@ -87,6 +87,41 @@ void sfh_interface_init(struct amd_mp2_dev *mp2)
emp2 = mp2;
}
+static int amd_sfh_mode_info(u32 *platform_type, u32 *laptop_placement)
+{
+ struct sfh_op_mode mode;
+
+ if (!platform_type || !laptop_placement)
+ return -EINVAL;
+
+ if (!emp2 || !emp2->dev_en.is_sra_present)
+ return -ENODEV;
+
+ mode.val = readl(emp2->mmio + amd_get_c2p_val(emp2, 3));
+
+ *platform_type = mode.op_mode.devicemode;
+
+ if (mode.op_mode.ontablestate == 1) {
+ *laptop_placement = ON_TABLE;
+ } else if (mode.op_mode.ontablestate == 2) {
+ *laptop_placement = ON_LAP_MOTION;
+ } else if (mode.op_mode.inbagstate == 1) {
+ *laptop_placement = IN_BAG;
+ } else if (mode.op_mode.outbagstate == 1) {
+ *laptop_placement = OUT_OF_BAG;
+ } else if (mode.op_mode.ontablestate == 0 || mode.op_mode.inbagstate == 0 ||
+ mode.op_mode.outbagstate == 0) {
+ *laptop_placement = LP_UNKNOWN;
+ pr_warn_once("Unknown laptop placement\n");
+ } else if (mode.op_mode.ontablestate == 3 || mode.op_mode.inbagstate == 3 ||
+ mode.op_mode.outbagstate == 3) {
+ *laptop_placement = LP_UNDEFINED;
+ pr_warn_once("Undefined laptop placement\n");
+ }
+
+ return 0;
+}
+
static int amd_sfh_hpd_info(u8 *user_present)
{
struct hpd_status hpdstatus;
@@ -131,6 +166,9 @@ int amd_get_sfh_info(struct amd_sfh_info *sfh_info, enum sfh_message_type op)
return amd_sfh_hpd_info(&sfh_info->user_present);
case MT_ALS:
return amd_sfh_als_info(&sfh_info->ambient_light);
+ case MT_SRA:
+ return amd_sfh_mode_info(&sfh_info->platform_type,
+ &sfh_info->laptop_placement);
}
}
return -EINVAL;
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
index 2c211d28764d..665c99ad779f 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
@@ -22,8 +22,9 @@ enum sensor_index {
ACCEL_IDX,
GYRO_IDX,
MAG_IDX,
- ALS_IDX = 4,
- HPD_IDX = 5,
+ SRA_IDX,
+ ALS_IDX,
+ HPD_IDX,
MAX_IDX = 15,
};
@@ -164,6 +165,25 @@ struct hpd_status {
};
};
+struct sfh_op_mode {
+ union {
+ u32 val;
+ struct {
+ u32 mode : 3;
+ u32 lidstatus : 1;
+ u32 angle : 10;
+ u32 inbagstatedbg : 2;
+ u32 ontablestate : 2;
+ u32 inbagstate : 2;
+ u32 outbagstate : 2;
+ u32 inbagmlcstate : 1;
+ u32 powerstate : 2;
+ u32 data : 3;
+ u32 devicemode : 4;
+ } op_mode;
+ };
+};
+
void sfh_interface_init(struct amd_mp2_dev *mp2);
void sfh_deinit_emp2(void);
void amd_sfh1_1_set_desc_ops(struct amd_mp2_ops *mp2_ops);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 3c6011a48dab..6e084c207414 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -944,16 +944,6 @@ void vmbus_initiate_unload(bool crash)
vmbus_wait_for_unload();
}
-static void check_ready_for_resume_event(void)
-{
- /*
- * If all the old primary channels have been fixed up, then it's safe
- * to resume.
- */
- if (atomic_dec_and_test(&vmbus_connection.nr_chan_fixup_on_resume))
- complete(&vmbus_connection.ready_for_resume_event);
-}
-
static void vmbus_setup_channel_state(struct vmbus_channel *channel,
struct vmbus_channel_offer_channel *offer)
{
@@ -1109,8 +1099,6 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
/* Add the channel back to the array of channels. */
vmbus_channel_map_relid(oldchannel);
- check_ready_for_resume_event();
-
mutex_unlock(&vmbus_connection.channel_mutex);
return;
}
@@ -1296,13 +1284,28 @@ EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
/*
* vmbus_onoffers_delivered -
- * This is invoked when all offers have been delivered.
+ * The CHANNELMSG_ALLOFFERS_DELIVERED message arrives after all
+ * boot-time offers are delivered. A boot-time offer is for the primary
+ * channel for any virtual hardware configured in the VM at the time it boots.
+ * Boot-time offers include offers for physical devices assigned to the VM
+ * via Hyper-V's Discrete Device Assignment (DDA) functionality that are
+ * handled as virtual PCI devices in Linux (e.g., NVMe devices and GPUs).
+ * Boot-time offers do not include offers for VMBus sub-channels. Because
+ * devices can be hot-added to the VM after it is booted, additional channel
+ * offers that aren't boot-time offers can be received at any time after the
+ * all-offers-delivered message.
*
- * Nothing to do here.
+ * SR-IOV NIC Virtual Functions (VFs) assigned to a VM are not considered
+ * to be assigned to the VM at boot-time, and offers for VFs may occur after
+ * the all-offers-delivered message. VFs are optional accelerators to the
+ * synthetic VMBus NIC and are effectively hot-added only after the VMBus
+ * NIC channel is opened (once it knows the guest can support it, via the
+ * sriov bit in the netvsc protocol).
*/
static void vmbus_onoffers_delivered(
struct vmbus_channel_message_header *hdr)
{
+ complete(&vmbus_connection.all_offers_delivered_event);
}
/*
@@ -1578,7 +1581,8 @@ void vmbus_onmessage(struct vmbus_channel_message_header *hdr)
}
/*
- * vmbus_request_offers - Send a request to get all our pending offers.
+ * vmbus_request_offers - Send a request to get all our pending offers
+ * and wait for all boot-time offers to arrive.
*/
int vmbus_request_offers(void)
{
@@ -1596,6 +1600,10 @@ int vmbus_request_offers(void)
msg->msgtype = CHANNELMSG_REQUESTOFFERS;
+ /*
+ * This REQUESTOFFERS message will result in the host sending an all
+ * offers delivered message after all the boot-time offers are sent.
+ */
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
true);
@@ -1607,6 +1615,29 @@ int vmbus_request_offers(void)
goto cleanup;
}
+ /*
+ * Wait for the host to send all boot-time offers.
+ * Keeping it as a best-effort mechanism, where a warning is
+ * printed if a timeout occurs, and execution is resumed.
+ */
+ if (!wait_for_completion_timeout(&vmbus_connection.all_offers_delivered_event,
+ secs_to_jiffies(60))) {
+ pr_warn("timed out waiting for all boot-time offers to be delivered.\n");
+ }
+
+ /*
+ * Flush handling of offer messages (which may initiate work on
+ * other work queues).
+ */
+ flush_workqueue(vmbus_connection.work_queue);
+
+ /*
+ * Flush workqueue for processing the incoming offers. Subchannel
+ * offers and their processing can happen later, so there is no need to
+ * flush that workqueue here.
+ */
+ flush_workqueue(vmbus_connection.handle_primary_chan_wq);
+
cleanup:
kfree(msginfo);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index f001ae880e1d..8351360bba16 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -34,8 +34,8 @@ struct vmbus_connection vmbus_connection = {
.ready_for_suspend_event = COMPLETION_INITIALIZER(
vmbus_connection.ready_for_suspend_event),
- .ready_for_resume_event = COMPLETION_INITIALIZER(
- vmbus_connection.ready_for_resume_event),
+ .all_offers_delivered_event = COMPLETION_INITIALIZER(
+ vmbus_connection.all_offers_delivered_event),
};
EXPORT_SYMBOL_GPL(vmbus_connection);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index a99112e6f0b8..fec2f18679e3 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -28,7 +28,7 @@
#include <linux/sizes.h>
#include <linux/hyperv.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#include <asm/mshyperv.h>
@@ -766,16 +766,18 @@ static void hv_online_page(struct page *pg, unsigned int order)
struct hv_hotadd_state *has;
unsigned long pfn = page_to_pfn(pg);
- guard(spinlock_irqsave)(&dm_device.ha_lock);
- list_for_each_entry(has, &dm_device.ha_region_list, list) {
- /* The page belongs to a different HAS. */
- if (pfn < has->start_pfn ||
- (pfn + (1UL << order) > has->end_pfn))
- continue;
+ scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
+ list_for_each_entry(has, &dm_device.ha_region_list, list) {
+ /* The page belongs to a different HAS. */
+ if (pfn < has->start_pfn ||
+ (pfn + (1UL << order) > has->end_pfn))
+ continue;
- hv_bring_pgs_online(has, pfn, 1UL << order);
- break;
+ hv_bring_pgs_online(has, pfn, 1UL << order);
+ return;
+ }
}
+ generic_online_page(pg, order);
}
static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
@@ -1586,7 +1588,7 @@ static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
return -ENOSPC;
}
- hint->type = HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD;
+ hint->heat_type = HV_EXTMEM_HEAT_HINT_COLD_DISCARD;
hint->reserved = 0;
for_each_sg(sgl, sg, nents, i) {
union hv_gpa_page_range *range;
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index 7a35c82976e0..f2e6f55d6ca6 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -28,7 +28,7 @@
#include <linux/slab.h>
#include <linux/dma-map-ops.h>
#include <linux/set_memory.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#include <asm/mshyperv.h>
/*
@@ -141,7 +141,7 @@ static int sysctl_record_panic_msg = 1;
* sysctl option to allow the user to control whether kmsg data should be
* reported to Hyper-V on panic.
*/
-static struct ctl_table hv_ctl_table[] = {
+static const struct ctl_table hv_ctl_table[] = {
{
.procname = "hyperv_record_panic_msg",
.data = &sysctl_record_panic_msg,
@@ -278,6 +278,11 @@ static void hv_kmsg_dump_register(void)
}
}
+static inline bool hv_output_page_exists(void)
+{
+ return hv_root_partition || IS_ENABLED(CONFIG_HYPERV_VTL_MODE);
+}
+
int __init hv_common_init(void)
{
int i;
@@ -340,19 +345,19 @@ int __init hv_common_init(void)
BUG_ON(!hyperv_pcpu_input_arg);
/* Allocate the per-CPU state for output arg for root */
- if (hv_root_partition) {
+ if (hv_output_page_exists()) {
hyperv_pcpu_output_arg = alloc_percpu(void *);
BUG_ON(!hyperv_pcpu_output_arg);
}
- hv_vp_index = kmalloc_array(num_possible_cpus(), sizeof(*hv_vp_index),
+ hv_vp_index = kmalloc_array(nr_cpu_ids, sizeof(*hv_vp_index),
GFP_KERNEL);
if (!hv_vp_index) {
hv_common_free();
return -ENOMEM;
}
- for (i = 0; i < num_possible_cpus(); i++)
+ for (i = 0; i < nr_cpu_ids; i++)
hv_vp_index[i] = VP_INVAL;
return 0;
@@ -435,7 +440,7 @@ int hv_common_cpu_init(unsigned int cpu)
void **inputarg, **outputarg;
u64 msr_vp_index;
gfp_t flags;
- int pgcount = hv_root_partition ? 2 : 1;
+ const int pgcount = hv_output_page_exists() ? 2 : 1;
void *mem;
int ret;
@@ -453,7 +458,7 @@ int hv_common_cpu_init(unsigned int cpu)
if (!mem)
return -ENOMEM;
- if (hv_root_partition) {
+ if (hv_output_page_exists()) {
outputarg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg);
*outputarg = (char *)mem + HV_HYP_PAGE_SIZE;
}
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 7400a5a4d2bd..62795f6cbb00 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -27,7 +27,7 @@
#include <linux/connector.h>
#include <linux/workqueue.h>
#include <linux/hyperv.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#include "hyperv_vmbus.h"
#include "hv_utils_transport.h"
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index bde637a96c37..2e7f537d53cf 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -12,7 +12,7 @@
#include <linux/connector.h>
#include <linux/workqueue.h>
#include <linux/hyperv.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#include "hyperv_vmbus.h"
#include "hv_utils_transport.h"
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 52cb744b4d7f..29780f3a7478 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -15,10 +15,10 @@
#include <linux/list.h>
#include <linux/bitops.h>
#include <asm/sync_bitops.h>
-#include <asm/hyperv-tlfs.h>
#include <linux/atomic.h>
#include <linux/hyperv.h>
#include <linux/interrupt.h>
+#include <hyperv/hvhdk.h>
#include "hv_trace.h"
@@ -287,18 +287,10 @@ struct vmbus_connection {
struct completion ready_for_suspend_event;
/*
- * The number of primary channels that should be "fixed up"
- * upon resume: these channels are re-offered upon resume, and some
- * fields of the channel offers (i.e. child_relid and connection_id)
- * can change, so the old offermsg must be fixed up, before the resume
- * callbacks of the VSC drivers start to further touch the channels.
+ * Completed once the host has offered all boot-time channels.
+ * Note that some channels may still be under process on a workqueue.
*/
- atomic_t nr_chan_fixup_on_resume;
- /*
- * vmbus_bus_resume() waits for "nr_chan_fixup_on_resume" to
- * drop to zero.
- */
- struct completion ready_for_resume_event;
+ struct completion all_offers_delivered_event;
};
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 2892b8da20a5..0f6cd44fff29 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -2427,11 +2427,6 @@ static int vmbus_bus_suspend(struct device *dev)
if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
wait_for_completion(&vmbus_connection.ready_for_suspend_event);
- if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) {
- pr_err("Can not suspend due to a previous failed resuming\n");
- return -EBUSY;
- }
-
mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
@@ -2456,22 +2451,18 @@ static int vmbus_bus_suspend(struct device *dev)
pr_err("Sub-channel not deleted!\n");
WARN_ON_ONCE(1);
}
-
- atomic_inc(&vmbus_connection.nr_chan_fixup_on_resume);
}
mutex_unlock(&vmbus_connection.channel_mutex);
vmbus_initiate_unload(false);
- /* Reset the event for the next resume. */
- reinit_completion(&vmbus_connection.ready_for_resume_event);
-
return 0;
}
static int vmbus_bus_resume(struct device *dev)
{
+ struct vmbus_channel *channel;
struct vmbus_channel_msginfo *msginfo;
size_t msgsize;
int ret;
@@ -2502,13 +2493,23 @@ static int vmbus_bus_resume(struct device *dev)
if (ret != 0)
return ret;
- WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) == 0);
-
vmbus_request_offers();
- if (wait_for_completion_timeout(
- &vmbus_connection.ready_for_resume_event, secs_to_jiffies(10)) == 0)
- pr_err("Some vmbus device is missing after suspending?\n");
+ mutex_lock(&vmbus_connection.channel_mutex);
+ list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+ if (channel->offermsg.child_relid != INVALID_RELID)
+ continue;
+
+ /* hvsock channels are not expected to be present. */
+ if (is_hvsock_channel(channel))
+ continue;
+
+ pr_err("channel %pUl/%pUl not present after resume.\n",
+ &channel->offermsg.offer.if_type,
+ &channel->offermsg.offer.if_instance);
+ /* ToDo: Cleanup these channels here */
+ }
+ mutex_unlock(&vmbus_connection.channel_mutex);
/* Reset the event for the next suspend. */
reinit_completion(&vmbus_connection.ready_for_suspend_event);
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index b7c0b1e3c23b..9703d60e9bbf 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -332,7 +332,7 @@ static int hwmon_attr_base(enum hwmon_sensor_types type)
static DEFINE_MUTEX(hwmon_pec_mutex);
-static int hwmon_match_device(struct device *dev, void *data)
+static int hwmon_match_device(struct device *dev, const void *data)
{
return dev->class == &hwmon_class;
}
diff --git a/drivers/hwmon/spd5118.c b/drivers/hwmon/spd5118.c
index 6cee48a3e5c3..358152868d96 100644
--- a/drivers/hwmon/spd5118.c
+++ b/drivers/hwmon/spd5118.c
@@ -291,12 +291,6 @@ static umode_t spd5118_is_visible(const void *_data, enum hwmon_sensor_types typ
}
}
-static inline bool spd5118_parity8(u8 w)
-{
- w ^= w >> 4;
- return (0x6996 >> (w & 0xf)) & 1;
-}
-
/*
* Bank and vendor id are 8-bit fields with seven data bits and odd parity.
* Vendor IDs 0 and 0x7f are invalid.
@@ -304,7 +298,7 @@ static inline bool spd5118_parity8(u8 w)
*/
static bool spd5118_vendor_valid(u8 bank, u8 id)
{
- if (!spd5118_parity8(bank) || !spd5118_parity8(id))
+ if (parity8(bank) == 0 || parity8(id) == 0)
return false;
id &= 0x7f;
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index ea38ecf26fcb..0a9380350fb5 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -75,22 +75,54 @@ struct coresight_device *coresight_get_percpu_sink(int cpu)
}
EXPORT_SYMBOL_GPL(coresight_get_percpu_sink);
+static struct coresight_device *coresight_get_source(struct list_head *path)
+{
+ struct coresight_device *csdev;
+
+ if (!path)
+ return NULL;
+
+ csdev = list_first_entry(path, struct coresight_node, link)->csdev;
+ if (!coresight_is_device_source(csdev))
+ return NULL;
+
+ return csdev;
+}
+
+/**
+ * coresight_blocks_source - checks whether the connection matches the source
+ * of path if connection is bound to specific source.
+ * @src: The source device of the trace path
+ * @conn: The connection of one outport
+ *
+ * Return false if the connection doesn't have a source binded or source of the
+ * path matches the source binds to connection.
+ */
+static bool coresight_blocks_source(struct coresight_device *src,
+ struct coresight_connection *conn)
+{
+ return conn->filter_src_fwnode && (conn->filter_src_dev != src);
+}
+
static struct coresight_connection *
-coresight_find_out_connection(struct coresight_device *src_dev,
- struct coresight_device *dest_dev)
+coresight_find_out_connection(struct coresight_device *csdev,
+ struct coresight_device *out_dev,
+ struct coresight_device *trace_src)
{
int i;
struct coresight_connection *conn;
- for (i = 0; i < src_dev->pdata->nr_outconns; i++) {
- conn = src_dev->pdata->out_conns[i];
- if (conn->dest_dev == dest_dev)
+ for (i = 0; i < csdev->pdata->nr_outconns; i++) {
+ conn = csdev->pdata->out_conns[i];
+ if (coresight_blocks_source(trace_src, conn))
+ continue;
+ if (conn->dest_dev == out_dev)
return conn;
}
- dev_err(&src_dev->dev,
- "couldn't find output connection, src_dev: %s, dest_dev: %s\n",
- dev_name(&src_dev->dev), dev_name(&dest_dev->dev));
+ dev_err(&csdev->dev,
+ "couldn't find output connection, csdev: %s, out_dev: %s\n",
+ dev_name(&csdev->dev), dev_name(&out_dev->dev));
return ERR_PTR(-ENODEV);
}
@@ -251,7 +283,8 @@ static void coresight_disable_sink(struct coresight_device *csdev)
static int coresight_enable_link(struct coresight_device *csdev,
struct coresight_device *parent,
- struct coresight_device *child)
+ struct coresight_device *child,
+ struct coresight_device *source)
{
int link_subtype;
struct coresight_connection *inconn, *outconn;
@@ -259,8 +292,8 @@ static int coresight_enable_link(struct coresight_device *csdev,
if (!parent || !child)
return -EINVAL;
- inconn = coresight_find_out_connection(parent, csdev);
- outconn = coresight_find_out_connection(csdev, child);
+ inconn = coresight_find_out_connection(parent, csdev, source);
+ outconn = coresight_find_out_connection(csdev, child, source);
link_subtype = csdev->subtype.link_subtype;
if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG && IS_ERR(inconn))
@@ -273,15 +306,16 @@ static int coresight_enable_link(struct coresight_device *csdev,
static void coresight_disable_link(struct coresight_device *csdev,
struct coresight_device *parent,
- struct coresight_device *child)
+ struct coresight_device *child,
+ struct coresight_device *source)
{
struct coresight_connection *inconn, *outconn;
if (!parent || !child)
return;
- inconn = coresight_find_out_connection(parent, csdev);
- outconn = coresight_find_out_connection(csdev, child);
+ inconn = coresight_find_out_connection(parent, csdev, source);
+ outconn = coresight_find_out_connection(csdev, child, source);
link_ops(csdev)->disable(csdev, inconn, outconn);
}
@@ -375,7 +409,8 @@ static void coresight_disable_path_from(struct list_head *path,
case CORESIGHT_DEV_TYPE_LINK:
parent = list_prev_entry(nd, link)->csdev;
child = list_next_entry(nd, link)->csdev;
- coresight_disable_link(csdev, parent, child);
+ coresight_disable_link(csdev, parent, child,
+ coresight_get_source(path));
break;
default:
break;
@@ -418,7 +453,9 @@ int coresight_enable_path(struct list_head *path, enum cs_mode mode,
u32 type;
struct coresight_node *nd;
struct coresight_device *csdev, *parent, *child;
+ struct coresight_device *source;
+ source = coresight_get_source(path);
list_for_each_entry_reverse(nd, path, link) {
csdev = nd->csdev;
type = csdev->type;
@@ -456,7 +493,7 @@ int coresight_enable_path(struct list_head *path, enum cs_mode mode,
case CORESIGHT_DEV_TYPE_LINK:
parent = list_prev_entry(nd, link)->csdev;
child = list_next_entry(nd, link)->csdev;
- ret = coresight_enable_link(csdev, parent, child);
+ ret = coresight_enable_link(csdev, parent, child, source);
if (ret)
goto err;
break;
@@ -619,6 +656,7 @@ static void coresight_drop_device(struct coresight_device *csdev)
/**
* _coresight_build_path - recursively build a path from a @csdev to a sink.
* @csdev: The device to start from.
+ * @source: The trace source device of the path.
* @sink: The final sink we want in this path.
* @path: The list to add devices to.
*
@@ -628,6 +666,7 @@ static void coresight_drop_device(struct coresight_device *csdev)
* the source is the first device and the sink the last one.
*/
static int _coresight_build_path(struct coresight_device *csdev,
+ struct coresight_device *source,
struct coresight_device *sink,
struct list_head *path)
{
@@ -641,7 +680,7 @@ static int _coresight_build_path(struct coresight_device *csdev,
if (coresight_is_percpu_source(csdev) && coresight_is_percpu_sink(sink) &&
sink == per_cpu(csdev_sink, source_ops(csdev)->cpu_id(csdev))) {
- if (_coresight_build_path(sink, sink, path) == 0) {
+ if (_coresight_build_path(sink, source, sink, path) == 0) {
found = true;
goto out;
}
@@ -652,8 +691,12 @@ static int _coresight_build_path(struct coresight_device *csdev,
struct coresight_device *child_dev;
child_dev = csdev->pdata->out_conns[i]->dest_dev;
+
+ if (coresight_blocks_source(source, csdev->pdata->out_conns[i]))
+ continue;
+
if (child_dev &&
- _coresight_build_path(child_dev, sink, path) == 0) {
+ _coresight_build_path(child_dev, source, sink, path) == 0) {
found = true;
break;
}
@@ -698,7 +741,7 @@ struct list_head *coresight_build_path(struct coresight_device *source,
INIT_LIST_HEAD(path);
- rc = _coresight_build_path(source, sink, path);
+ rc = _coresight_build_path(source, source, sink, path);
if (rc) {
kfree(path);
return ERR_PTR(rc);
@@ -927,6 +970,16 @@ static int coresight_orphan_match(struct device *dev, void *data)
for (i = 0; i < src_csdev->pdata->nr_outconns; i++) {
conn = src_csdev->pdata->out_conns[i];
+ /* Fix filter source device before skip the port */
+ if (conn->filter_src_fwnode && !conn->filter_src_dev) {
+ if (dst_csdev &&
+ (conn->filter_src_fwnode == dst_csdev->dev.fwnode) &&
+ !WARN_ON_ONCE(!coresight_is_device_source(dst_csdev)))
+ conn->filter_src_dev = dst_csdev;
+ else
+ still_orphan = true;
+ }
+
/* Skip the port if it's already connected. */
if (conn->dest_dev)
continue;
@@ -977,18 +1030,40 @@ static int coresight_fixup_orphan_conns(struct coresight_device *csdev)
csdev, coresight_orphan_match);
}
+static int coresight_clear_filter_source(struct device *dev, void *data)
+{
+ int i;
+ struct coresight_device *source = data;
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ for (i = 0; i < csdev->pdata->nr_outconns; ++i) {
+ if (csdev->pdata->out_conns[i]->filter_src_dev == source)
+ csdev->pdata->out_conns[i]->filter_src_dev = NULL;
+ }
+ return 0;
+}
+
/* coresight_remove_conns - Remove other device's references to this device */
static void coresight_remove_conns(struct coresight_device *csdev)
{
int i, j;
struct coresight_connection *conn;
+ if (coresight_is_device_source(csdev))
+ bus_for_each_dev(&coresight_bustype, NULL, csdev,
+ coresight_clear_filter_source);
+
/*
* Remove the input connection references from the destination device
* for each output connection.
*/
for (i = 0; i < csdev->pdata->nr_outconns; i++) {
conn = csdev->pdata->out_conns[i];
+ if (conn->filter_src_fwnode) {
+ conn->filter_src_dev = NULL;
+ fwnode_handle_put(conn->filter_src_fwnode);
+ }
+
if (!conn->dest_dev)
continue;
diff --git a/drivers/hwtracing/coresight/coresight-dummy.c b/drivers/hwtracing/coresight/coresight-dummy.c
index 02ef2b945a0c..9be53be8964b 100644
--- a/drivers/hwtracing/coresight/coresight-dummy.c
+++ b/drivers/hwtracing/coresight/coresight-dummy.c
@@ -11,10 +11,12 @@
#include <linux/pm_runtime.h>
#include "coresight-priv.h"
+#include "coresight-trace-id.h"
struct dummy_drvdata {
struct device *dev;
struct coresight_device *csdev;
+ u8 traceid;
};
DEFINE_CORESIGHT_DEVLIST(source_devs, "dummy_source");
@@ -72,6 +74,32 @@ static const struct coresight_ops dummy_sink_cs_ops = {
.sink_ops = &dummy_sink_ops,
};
+/* User can get the trace id of dummy source from this node. */
+static ssize_t traceid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct dummy_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->traceid;
+ return sysfs_emit(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(traceid);
+
+static struct attribute *coresight_dummy_attrs[] = {
+ &dev_attr_traceid.attr,
+ NULL,
+};
+
+static const struct attribute_group coresight_dummy_group = {
+ .attrs = coresight_dummy_attrs,
+};
+
+static const struct attribute_group *coresight_dummy_groups[] = {
+ &coresight_dummy_group,
+ NULL,
+};
+
static int dummy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -79,6 +107,11 @@ static int dummy_probe(struct platform_device *pdev)
struct coresight_platform_data *pdata;
struct dummy_drvdata *drvdata;
struct coresight_desc desc = { 0 };
+ int ret = 0, trace_id = 0;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
if (of_device_is_compatible(node, "arm,coresight-dummy-source")) {
@@ -90,6 +123,26 @@ static int dummy_probe(struct platform_device *pdev)
desc.subtype.source_subtype =
CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS;
desc.ops = &dummy_source_cs_ops;
+ desc.groups = coresight_dummy_groups;
+
+ ret = coresight_get_static_trace_id(dev, &trace_id);
+ if (!ret) {
+ /* Get the static id if id is set in device tree. */
+ ret = coresight_trace_id_get_static_system_id(trace_id);
+ if (ret < 0) {
+ dev_err(dev, "Fail to get static id.\n");
+ return ret;
+ }
+ } else {
+ /* Get next available id if id is not set in device tree. */
+ trace_id = coresight_trace_id_get_system_id();
+ if (trace_id < 0) {
+ ret = trace_id;
+ return ret;
+ }
+ }
+ drvdata->traceid = (u8)trace_id;
+
} else if (of_device_is_compatible(node, "arm,coresight-dummy-sink")) {
desc.name = coresight_alloc_device_name(&sink_devs, dev);
if (!desc.name)
@@ -104,27 +157,35 @@ static int dummy_probe(struct platform_device *pdev)
}
pdata = coresight_get_platform_data(dev);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto free_id;
+ }
pdev->dev.platform_data = pdata;
- drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata)
- return -ENOMEM;
-
drvdata->dev = &pdev->dev;
platform_set_drvdata(pdev, drvdata);
desc.pdata = pdev->dev.platform_data;
desc.dev = &pdev->dev;
drvdata->csdev = coresight_register(&desc);
- if (IS_ERR(drvdata->csdev))
- return PTR_ERR(drvdata->csdev);
+ if (IS_ERR(drvdata->csdev)) {
+ ret = PTR_ERR(drvdata->csdev);
+ goto free_id;
+ }
pm_runtime_enable(dev);
dev_dbg(dev, "Dummy device initialized\n");
- return 0;
+ ret = 0;
+ goto out;
+
+free_id:
+ if (IS_VALID_CS_TRACE_ID(drvdata->traceid))
+ coresight_trace_id_put_system_id(drvdata->traceid);
+
+out:
+ return ret;
}
static void dummy_remove(struct platform_device *pdev)
@@ -132,6 +193,8 @@ static void dummy_remove(struct platform_device *pdev)
struct dummy_drvdata *drvdata = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
+ if (IS_VALID_CS_TRACE_ID(drvdata->traceid))
+ coresight_trace_id_put_system_id(drvdata->traceid);
pm_runtime_disable(dev);
coresight_unregister(drvdata->csdev);
}
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index dd8c74f893db..2c1a60577728 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -6,6 +6,7 @@
#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
+#include <linux/kvm_host.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -268,10 +269,28 @@ struct etm4_enable_arg {
*/
static void etm4x_prohibit_trace(struct etmv4_drvdata *drvdata)
{
+ u64 trfcr;
+
/* If the CPU doesn't support FEAT_TRF, nothing to do */
if (!drvdata->trfcr)
return;
- cpu_prohibit_trace();
+
+ trfcr = drvdata->trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE);
+
+ write_trfcr(trfcr);
+ kvm_tracing_set_el1_configuration(trfcr);
+}
+
+static u64 etm4x_get_kern_user_filter(struct etmv4_drvdata *drvdata)
+{
+ u64 trfcr = drvdata->trfcr;
+
+ if (drvdata->config.mode & ETM_MODE_EXCL_KERN)
+ trfcr &= ~TRFCR_EL1_ExTRE;
+ if (drvdata->config.mode & ETM_MODE_EXCL_USER)
+ trfcr &= ~TRFCR_EL1_E0TRE;
+
+ return trfcr;
}
/*
@@ -286,18 +305,28 @@ static void etm4x_prohibit_trace(struct etmv4_drvdata *drvdata)
*/
static void etm4x_allow_trace(struct etmv4_drvdata *drvdata)
{
- u64 trfcr = drvdata->trfcr;
+ u64 trfcr, guest_trfcr;
/* If the CPU doesn't support FEAT_TRF, nothing to do */
- if (!trfcr)
+ if (!drvdata->trfcr)
return;
- if (drvdata->config.mode & ETM_MODE_EXCL_KERN)
- trfcr &= ~TRFCR_ELx_ExTRE;
- if (drvdata->config.mode & ETM_MODE_EXCL_USER)
- trfcr &= ~TRFCR_ELx_E0TRE;
+ if (drvdata->config.mode & ETM_MODE_EXCL_HOST)
+ trfcr = drvdata->trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE);
+ else
+ trfcr = etm4x_get_kern_user_filter(drvdata);
write_trfcr(trfcr);
+
+ /* Set filters for guests and pass to KVM */
+ if (drvdata->config.mode & ETM_MODE_EXCL_GUEST)
+ guest_trfcr = drvdata->trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE);
+ else
+ guest_trfcr = etm4x_get_kern_user_filter(drvdata);
+
+ /* TRFCR_EL1 doesn't have CX so mask it out. */
+ guest_trfcr &= ~TRFCR_EL2_CX;
+ kvm_tracing_set_el1_configuration(guest_trfcr);
}
#ifdef CONFIG_ETM4X_IMPDEF_FEATURE
@@ -655,6 +684,12 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
if (attr->exclude_user)
config->mode = ETM_MODE_EXCL_USER;
+ if (attr->exclude_host)
+ config->mode |= ETM_MODE_EXCL_HOST;
+
+ if (attr->exclude_guest)
+ config->mode |= ETM_MODE_EXCL_GUEST;
+
/* Always start from the default config */
etm4_set_default_config(config);
@@ -1141,9 +1176,9 @@ static void cpu_detect_trace_filtering(struct etmv4_drvdata *drvdata)
* tracing at the kernel EL and EL0, forcing to use the
* virtual time as the timestamp.
*/
- trfcr = (TRFCR_ELx_TS_VIRTUAL |
- TRFCR_ELx_ExTRE |
- TRFCR_ELx_E0TRE);
+ trfcr = (TRFCR_EL1_TS_VIRTUAL |
+ TRFCR_EL1_ExTRE |
+ TRFCR_EL1_E0TRE);
/* If we are running at EL2, allow tracing the CONTEXTIDR_EL2. */
if (is_kernel_in_hyp_mode())
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index a9f19629f3f8..c767f8ae4cf1 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -2319,11 +2319,11 @@ static ssize_t ts_source_show(struct device *dev,
goto out;
}
- switch (drvdata->trfcr & TRFCR_ELx_TS_MASK) {
- case TRFCR_ELx_TS_VIRTUAL:
- case TRFCR_ELx_TS_GUEST_PHYSICAL:
- case TRFCR_ELx_TS_PHYSICAL:
- val = FIELD_GET(TRFCR_ELx_TS_MASK, drvdata->trfcr);
+ switch (drvdata->trfcr & TRFCR_EL1_TS_MASK) {
+ case TRFCR_EL1_TS_VIRTUAL:
+ case TRFCR_EL1_TS_GUEST_PHYSICAL:
+ case TRFCR_EL1_TS_PHYSICAL:
+ val = FIELD_GET(TRFCR_EL1_TS_MASK, drvdata->trfcr);
break;
default:
val = -1;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 9e9165f62e81..1119762b5cec 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -817,7 +817,7 @@ enum etm_impdef_type {
* @s_ex_level: Secure ELs where tracing is supported.
*/
struct etmv4_config {
- u32 mode;
+ u64 mode;
u32 pe_sel;
u32 cfg;
u32 eventctrl0;
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 33efe1acbef7..8faf51469bb8 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -86,14 +86,14 @@ static int funnel_enable(struct coresight_device *csdev,
bool first_enable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (atomic_read(&in->dest_refcnt) == 0) {
+ if (in->dest_refcnt == 0) {
if (drvdata->base)
rc = dynamic_funnel_enable_hw(drvdata, in->dest_port);
if (!rc)
first_enable = true;
}
if (!rc)
- atomic_inc(&in->dest_refcnt);
+ in->dest_refcnt++;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (first_enable)
@@ -130,7 +130,7 @@ static void funnel_disable(struct coresight_device *csdev,
bool last_disable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (atomic_dec_return(&in->dest_refcnt) == 0) {
+ if (--in->dest_refcnt == 0) {
if (drvdata->base)
dynamic_funnel_disable_hw(drvdata, in->dest_port);
last_disable = true;
diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
index 64e171eaad82..8192ba3279f0 100644
--- a/drivers/hwtracing/coresight/coresight-platform.c
+++ b/drivers/hwtracing/coresight/coresight-platform.c
@@ -243,6 +243,27 @@ static int of_coresight_parse_endpoint(struct device *dev,
conn.dest_fwnode = fwnode_handle_get(rdev_fwnode);
conn.dest_port = rendpoint.port;
+ /*
+ * Get the firmware node of the filter source through the
+ * reference. This could be used to filter the source in
+ * building path.
+ */
+ conn.filter_src_fwnode =
+ fwnode_find_reference(&ep->fwnode, "filter-source", 0);
+ if (IS_ERR(conn.filter_src_fwnode)) {
+ conn.filter_src_fwnode = NULL;
+ } else {
+ conn.filter_src_dev =
+ coresight_find_csdev_by_fwnode(conn.filter_src_fwnode);
+ if (conn.filter_src_dev &&
+ !coresight_is_device_source(conn.filter_src_dev)) {
+ dev_warn(dev, "port %d: Filter handle is not a trace source : %s\n",
+ conn.src_port, dev_name(&conn.filter_src_dev->dev));
+ conn.filter_src_dev = NULL;
+ conn.filter_src_fwnode = NULL;
+ }
+ }
+
new_conn = coresight_add_out_conn(dev, pdata, &conn);
if (IS_ERR_VALUE(new_conn)) {
fwnode_handle_put(conn.dest_fwnode);
@@ -796,6 +817,12 @@ int coresight_get_cpu(struct device *dev)
}
EXPORT_SYMBOL_GPL(coresight_get_cpu);
+int coresight_get_static_trace_id(struct device *dev, u32 *id)
+{
+ return fwnode_property_read_u32(dev_fwnode(dev), "arm,static-trace-id", id);
+}
+EXPORT_SYMBOL_GPL(coresight_get_static_trace_id);
+
struct coresight_platform_data *
coresight_get_platform_data(struct device *dev)
{
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 05f891ca6b5c..76403530f33e 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -42,6 +42,9 @@ extern const struct device_type coresight_dev_type[];
#define ETM_MODE_EXCL_KERN BIT(30)
#define ETM_MODE_EXCL_USER BIT(31)
+#define ETM_MODE_EXCL_HOST BIT(32)
+#define ETM_MODE_EXCL_GUEST BIT(33)
+
struct cs_pair_attribute {
struct device_attribute attr;
u32 lo_off;
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 0fba87de6d1a..a1181c9048c0 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -126,7 +126,7 @@ static int replicator_enable(struct coresight_device *csdev,
bool first_enable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (atomic_read(&out->src_refcnt) == 0) {
+ if (out->src_refcnt == 0) {
if (drvdata->base)
rc = dynamic_replicator_enable(drvdata, in->dest_port,
out->src_port);
@@ -134,7 +134,7 @@ static int replicator_enable(struct coresight_device *csdev,
first_enable = true;
}
if (!rc)
- atomic_inc(&out->src_refcnt);
+ out->src_refcnt++;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (first_enable)
@@ -180,7 +180,7 @@ static void replicator_disable(struct coresight_device *csdev,
bool last_disable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (atomic_dec_return(&out->src_refcnt) == 0) {
+ if (--out->src_refcnt == 0) {
if (drvdata->base)
dynamic_replicator_disable(drvdata, in->dest_port,
out->src_port);
diff --git a/drivers/hwtracing/coresight/coresight-self-hosted-trace.h b/drivers/hwtracing/coresight/coresight-self-hosted-trace.h
index 53840a2c41f2..303d71911870 100644
--- a/drivers/hwtracing/coresight/coresight-self-hosted-trace.h
+++ b/drivers/hwtracing/coresight/coresight-self-hosted-trace.h
@@ -21,13 +21,4 @@ static inline void write_trfcr(u64 val)
isb();
}
-static inline u64 cpu_prohibit_trace(void)
-{
- u64 trfcr = read_trfcr();
-
- /* Prohibit tracing at EL0 & the kernel EL */
- write_trfcr(trfcr & ~(TRFCR_ELx_ExTRE | TRFCR_ELx_E0TRE));
- /* Return the original value of the TRFCR */
- return trfcr;
-}
#endif /* __CORESIGHT_SELF_HOSTED_TRACE_H */
diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c
index bfca103f9f84..189a4abc2561 100644
--- a/drivers/hwtracing/coresight/coresight-tpda.c
+++ b/drivers/hwtracing/coresight/coresight-tpda.c
@@ -24,7 +24,7 @@ DEFINE_CORESIGHT_DEVLIST(tpda_devs, "tpda");
static bool coresight_device_is_tpdm(struct coresight_device *csdev)
{
- return (csdev->type == CORESIGHT_DEV_TYPE_SOURCE) &&
+ return (coresight_is_device_source(csdev)) &&
(csdev->subtype.source_subtype ==
CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM);
}
@@ -110,6 +110,16 @@ static int tpda_get_element_size(struct tpda_drvdata *drvdata,
csdev->pdata->in_conns[i]->dest_port != inport)
continue;
+ /*
+ * If this port has a hardcoded filter, use the source
+ * device directly.
+ */
+ if (csdev->pdata->in_conns[i]->filter_src_fwnode) {
+ in = csdev->pdata->in_conns[i]->filter_src_dev;
+ if (!in)
+ continue;
+ }
+
if (coresight_device_is_tpdm(in)) {
if (drvdata->dsb_esize || drvdata->cmb_esize)
return -EEXIST;
@@ -124,7 +134,6 @@ static int tpda_get_element_size(struct tpda_drvdata *drvdata,
}
}
-
return rc;
}
@@ -190,10 +199,10 @@ static int tpda_enable(struct coresight_device *csdev,
int ret = 0;
spin_lock(&drvdata->spinlock);
- if (atomic_read(&in->dest_refcnt) == 0) {
+ if (in->dest_refcnt == 0) {
ret = __tpda_enable(drvdata, in->dest_port);
if (!ret) {
- atomic_inc(&in->dest_refcnt);
+ in->dest_refcnt++;
csdev->refcnt++;
dev_dbg(drvdata->dev, "TPDA inport %d enabled.\n", in->dest_port);
}
@@ -223,7 +232,7 @@ static void tpda_disable(struct coresight_device *csdev,
struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
spin_lock(&drvdata->spinlock);
- if (atomic_dec_return(&in->dest_refcnt) == 0) {
+ if (--in->dest_refcnt == 0) {
__tpda_disable(drvdata, in->dest_port);
csdev->refcnt--;
}
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
index b7d99e91ab84..c38f9701665e 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.c
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -640,8 +640,7 @@ static ssize_t dsb_mode_store(struct device *dev,
struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
- if ((kstrtoul(buf, 0, &val)) || (val < 0) ||
- (val & ~TPDM_DSB_MODE_MASK))
+ if ((kstrtoul(buf, 0, &val)) || (val & ~TPDM_DSB_MODE_MASK))
return -EINVAL;
spin_lock(&drvdata->spinlock);
@@ -1308,8 +1307,8 @@ static void tpdm_remove(struct amba_device *adev)
*/
static struct amba_id tpdm_ids[] = {
{
- .id = 0x000f0e00,
- .mask = 0x000fff00,
+ .id = 0x001f0e00,
+ .mask = 0x00ffff00,
},
{ 0, 0, NULL },
};
diff --git a/drivers/hwtracing/coresight/coresight-trace-id.c b/drivers/hwtracing/coresight/coresight-trace-id.c
index d98e12cb30ec..378af743be45 100644
--- a/drivers/hwtracing/coresight/coresight-trace-id.c
+++ b/drivers/hwtracing/coresight/coresight-trace-id.c
@@ -12,6 +12,12 @@
#include "coresight-trace-id.h"
+enum trace_id_flags {
+ TRACE_ID_ANY = 0x0,
+ TRACE_ID_PREFER_ODD = 0x1,
+ TRACE_ID_REQ_STATIC = 0x2,
+};
+
/* Default trace ID map. Used in sysfs mode and for system sources */
static DEFINE_PER_CPU(atomic_t, id_map_default_cpu_ids) = ATOMIC_INIT(0);
static struct coresight_trace_id_map id_map_default = {
@@ -74,21 +80,25 @@ static int coresight_trace_id_find_odd_id(struct coresight_trace_id_map *id_map)
* Otherwise allocate next available ID.
*/
static int coresight_trace_id_alloc_new_id(struct coresight_trace_id_map *id_map,
- int preferred_id, bool prefer_odd_id)
+ int preferred_id, unsigned int flags)
{
int id = 0;
/* for backwards compatibility, cpu IDs may use preferred value */
- if (IS_VALID_CS_TRACE_ID(preferred_id) &&
- !test_bit(preferred_id, id_map->used_ids)) {
- id = preferred_id;
- goto trace_id_allocated;
- } else if (prefer_odd_id) {
+ if (IS_VALID_CS_TRACE_ID(preferred_id)) {
+ if (!test_bit(preferred_id, id_map->used_ids)) {
+ id = preferred_id;
+ goto trace_id_allocated;
+ } else if (flags & TRACE_ID_REQ_STATIC)
+ return -EBUSY;
+ } else if (flags & TRACE_ID_PREFER_ODD) {
/* may use odd ids to avoid preferred legacy cpu IDs */
id = coresight_trace_id_find_odd_id(id_map);
if (id)
goto trace_id_allocated;
- }
+ } else if (!IS_VALID_CS_TRACE_ID(preferred_id) &&
+ (flags & TRACE_ID_REQ_STATIC))
+ return -EINVAL;
/*
* skip reserved bit 0, look at bitmap length of
@@ -153,7 +163,7 @@ static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map
*/
id = coresight_trace_id_alloc_new_id(id_map,
CORESIGHT_LEGACY_CPU_TRACE_ID(cpu),
- false);
+ TRACE_ID_ANY);
if (!IS_VALID_CS_TRACE_ID(id))
goto get_cpu_id_out_unlock;
@@ -188,14 +198,14 @@ static void _coresight_trace_id_put_cpu_id(int cpu, struct coresight_trace_id_ma
DUMP_ID_MAP(id_map);
}
-static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *id_map)
+static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *id_map,
+ int preferred_id, unsigned int traceid_flags)
{
unsigned long flags;
int id;
spin_lock_irqsave(&id_map->lock, flags);
- /* prefer odd IDs for system components to avoid legacy CPU IDS */
- id = coresight_trace_id_alloc_new_id(id_map, 0, true);
+ id = coresight_trace_id_alloc_new_id(id_map, preferred_id, traceid_flags);
spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID(id);
@@ -255,10 +265,19 @@ EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id_map);
int coresight_trace_id_get_system_id(void)
{
- return coresight_trace_id_map_get_system_id(&id_map_default);
+ /* prefer odd IDs for system components to avoid legacy CPU IDS */
+ return coresight_trace_id_map_get_system_id(&id_map_default, 0,
+ TRACE_ID_PREFER_ODD);
}
EXPORT_SYMBOL_GPL(coresight_trace_id_get_system_id);
+int coresight_trace_id_get_static_system_id(int trace_id)
+{
+ return coresight_trace_id_map_get_system_id(&id_map_default,
+ trace_id, TRACE_ID_REQ_STATIC);
+}
+EXPORT_SYMBOL_GPL(coresight_trace_id_get_static_system_id);
+
void coresight_trace_id_put_system_id(int id)
{
coresight_trace_id_map_put_system_id(&id_map_default, id);
diff --git a/drivers/hwtracing/coresight/coresight-trace-id.h b/drivers/hwtracing/coresight/coresight-trace-id.h
index 9aae50a553ca..db68e1ec56b6 100644
--- a/drivers/hwtracing/coresight/coresight-trace-id.h
+++ b/drivers/hwtracing/coresight/coresight-trace-id.h
@@ -117,6 +117,15 @@ int coresight_trace_id_read_cpu_id_map(int cpu, struct coresight_trace_id_map *i
int coresight_trace_id_get_system_id(void);
/**
+ * Allocate a CoreSight static trace ID for a system component.
+ *
+ * Used to allocate static IDs for system trace sources such as dummy source.
+ *
+ * return: Trace ID or -EINVAL if allocation is impossible.
+ */
+int coresight_trace_id_get_static_system_id(int id);
+
+/**
* Release an allocated system trace ID.
*
* Unconditionally release a trace ID allocated to a system component.
diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c
index 919804b12a67..fff67aac8418 100644
--- a/drivers/hwtracing/coresight/coresight-trbe.c
+++ b/drivers/hwtracing/coresight/coresight-trbe.c
@@ -17,6 +17,7 @@
#include <asm/barrier.h>
#include <asm/cpufeature.h>
+#include <linux/kvm_host.h>
#include <linux/vmalloc.h>
#include "coresight-self-hosted-trace.h"
@@ -221,6 +222,7 @@ static inline void set_trbe_enabled(struct trbe_cpudata *cpudata, u64 trblimitr)
*/
trblimitr |= TRBLIMITR_EL1_E;
write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
+ kvm_enable_trbe();
/* Synchronize the TRBE enable event */
isb();
@@ -239,6 +241,7 @@ static inline void set_trbe_disabled(struct trbe_cpudata *cpudata)
*/
trblimitr &= ~TRBLIMITR_EL1_E;
write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
+ kvm_disable_trbe();
if (trbe_needs_drain_after_disable(cpudata))
trbe_drain_buffer();
@@ -253,8 +256,8 @@ static void trbe_drain_and_disable_local(struct trbe_cpudata *cpudata)
static void trbe_reset_local(struct trbe_cpudata *cpudata)
{
- trbe_drain_and_disable_local(cpudata);
write_sysreg_s(0, SYS_TRBLIMITR_EL1);
+ trbe_drain_buffer();
write_sysreg_s(0, SYS_TRBPTR_EL1);
write_sysreg_s(0, SYS_TRBBASER_EL1);
write_sysreg_s(0, SYS_TRBSR_EL1);
@@ -1110,6 +1113,16 @@ static bool is_perf_trbe(struct perf_output_handle *handle)
return true;
}
+static u64 cpu_prohibit_trace(void)
+{
+ u64 trfcr = read_trfcr();
+
+ /* Prohibit tracing at EL0 & the kernel EL */
+ write_trfcr(trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE));
+ /* Return the original value of the TRFCR */
+ return trfcr;
+}
+
static irqreturn_t arm_trbe_irq_handler(int irq, void *dev)
{
struct perf_output_handle **handle_ptr = dev;
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index d72993355473..47d9e6c3bac0 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -857,8 +857,9 @@ static irqreturn_t intel_th_irq(int irq, void *data)
/**
* intel_th_alloc() - allocate a new Intel TH device and its subdevices
* @dev: parent device
+ * @drvdata: data private to the driver
* @devres: resources indexed by th_mmio_idx
- * @irq: irq number
+ * @ndevres: number of entries in the @devres resources
*/
struct intel_th *
intel_th_alloc(struct device *dev, const struct intel_th_drvdata *drvdata,
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index eec95c724b25..fc438f445771 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -756,6 +756,7 @@ config I2C_IMX
config I2C_IMX_LPI2C
tristate "IMX Low Power I2C interface"
depends on ARCH_MXC || COMPILE_TEST
+ select I2C_SLAVE
help
Say Y here if you want to use the Low Power IIC bus controller
on the Freescale i.MX processors.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index b6d1140eb678..171d29d2770e 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1162,128 +1162,6 @@ static void dmi_check_onboard_devices(const struct dmi_header *dm, void *adap)
}
}
-/* NOTE: Keep this list in sync with drivers/platform/x86/dell-smo8800.c */
-static const char *const acpi_smo8800_ids[] = {
- "SMO8800",
- "SMO8801",
- "SMO8810",
- "SMO8811",
- "SMO8820",
- "SMO8821",
- "SMO8830",
- "SMO8831",
-};
-
-static acpi_status check_acpi_smo88xx_device(acpi_handle obj_handle,
- u32 nesting_level,
- void *context,
- void **return_value)
-{
- struct acpi_device_info *info;
- acpi_status status;
- char *hid;
- int i;
-
- status = acpi_get_object_info(obj_handle, &info);
- if (ACPI_FAILURE(status))
- return AE_OK;
-
- if (!(info->valid & ACPI_VALID_HID))
- goto smo88xx_not_found;
-
- hid = info->hardware_id.string;
- if (!hid)
- goto smo88xx_not_found;
-
- i = match_string(acpi_smo8800_ids, ARRAY_SIZE(acpi_smo8800_ids), hid);
- if (i < 0)
- goto smo88xx_not_found;
-
- kfree(info);
-
- *return_value = NULL;
- return AE_CTRL_TERMINATE;
-
-smo88xx_not_found:
- kfree(info);
- return AE_OK;
-}
-
-static bool is_dell_system_with_lis3lv02d(void)
-{
- void *err = ERR_PTR(-ENOENT);
-
- if (!dmi_match(DMI_SYS_VENDOR, "Dell Inc."))
- return false;
-
- /*
- * Check that ACPI device SMO88xx is present and is functioning.
- * Function acpi_get_devices() already filters all ACPI devices
- * which are not present or are not functioning.
- * ACPI device SMO88xx represents our ST microelectronics lis3lv02d
- * accelerometer but unfortunately ACPI does not provide any other
- * information (like I2C address).
- */
- acpi_get_devices(NULL, check_acpi_smo88xx_device, NULL, &err);
-
- return !IS_ERR(err);
-}
-
-/*
- * Accelerometer's I2C address is not specified in DMI nor ACPI,
- * so it is needed to define mapping table based on DMI product names.
- */
-static const struct {
- const char *dmi_product_name;
- unsigned short i2c_addr;
-} dell_lis3lv02d_devices[] = {
- /*
- * Dell platform team told us that these Latitude devices have
- * ST microelectronics accelerometer at I2C address 0x29.
- */
- { "Latitude E5250", 0x29 },
- { "Latitude E5450", 0x29 },
- { "Latitude E5550", 0x29 },
- { "Latitude E6440", 0x29 },
- { "Latitude E6440 ATG", 0x29 },
- { "Latitude E6540", 0x29 },
- /*
- * Additional individual entries were added after verification.
- */
- { "Latitude 5480", 0x29 },
- { "Precision 3540", 0x29 },
- { "Precision M6800", 0x29 },
- { "Vostro V131", 0x1d },
- { "Vostro 5568", 0x29 },
- { "XPS 15 7590", 0x29 },
-};
-
-static void register_dell_lis3lv02d_i2c_device(struct i801_priv *priv)
-{
- struct i2c_board_info info;
- const char *dmi_product_name;
- int i;
-
- dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
- for (i = 0; i < ARRAY_SIZE(dell_lis3lv02d_devices); ++i) {
- if (strcmp(dmi_product_name,
- dell_lis3lv02d_devices[i].dmi_product_name) == 0)
- break;
- }
-
- if (i == ARRAY_SIZE(dell_lis3lv02d_devices)) {
- dev_warn(&priv->pci_dev->dev,
- "Accelerometer lis3lv02d is present on SMBus but its"
- " address is unknown, skipping registration\n");
- return;
- }
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- info.addr = dell_lis3lv02d_devices[i].i2c_addr;
- strscpy(info.type, "lis3lv02d", I2C_NAME_SIZE);
- i2c_new_client_device(&priv->adapter, &info);
-}
-
/* Register optional targets */
static void i801_probe_optional_targets(struct i801_priv *priv)
{
@@ -1303,9 +1181,6 @@ static void i801_probe_optional_targets(struct i801_priv *priv)
if (dmi_name_in_vendors("FUJITSU"))
dmi_walk(dmi_check_onboard_devices, &priv->adapter);
- if (is_dell_system_with_lis3lv02d())
- register_dell_lis3lv02d_i2c_device(priv);
-
/* Instantiate SPD EEPROMs unless the SMBus is multiplexed */
#ifdef CONFIG_I2C_I801_MUX
if (!priv->mux_pdev)
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index c24ccefb015e..35a221e2c11c 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -583,6 +583,9 @@ static int i2c_device_probe(struct device *dev)
goto err_detach_pm_domain;
}
+ client->debugfs = debugfs_create_dir(dev_name(&client->dev),
+ client->adapter->debugfs);
+
if (driver->probe)
status = driver->probe(client);
else
@@ -602,6 +605,7 @@ static int i2c_device_probe(struct device *dev)
return 0;
err_release_driver_resources:
+ debugfs_remove_recursive(client->debugfs);
devres_release_group(&client->dev, client->devres_group_id);
err_detach_pm_domain:
dev_pm_domain_detach(&client->dev, do_power_on);
@@ -627,6 +631,8 @@ static void i2c_device_remove(struct device *dev)
driver->remove(client);
}
+ debugfs_remove_recursive(client->debugfs);
+
devres_release_group(&client->dev, client->devres_group_id);
dev_pm_domain_detach(&client->dev, true);
@@ -1015,8 +1021,6 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
if (status)
goto out_remove_swnode;
- client->debugfs = debugfs_create_dir(dev_name(&client->dev), adap->debugfs);
-
dev_dbg(&adap->dev, "client [%s] registered with bus id %s\n",
client->name, dev_name(&client->dev));
@@ -1061,7 +1065,6 @@ void i2c_unregister_device(struct i2c_client *client)
if (ACPI_COMPANION(&client->dev))
acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev));
- debugfs_remove_recursive(client->debugfs);
device_remove_software_node(&client->dev);
device_unregister(&client->dev);
}
@@ -1297,12 +1300,14 @@ new_device_store(struct device *dev, struct device_attribute *attr,
info.flags |= I2C_CLIENT_SLAVE;
}
- info.flags |= I2C_CLIENT_USER;
-
client = i2c_new_client_device(adap, &info);
if (IS_ERR(client))
return PTR_ERR(client);
+ /* Keep track of the added device */
+ mutex_lock(&adap->userspace_clients_lock);
+ list_add_tail(&client->detected, &adap->userspace_clients);
+ mutex_unlock(&adap->userspace_clients_lock);
dev_info(dev, "%s: Instantiated device %s at 0x%02hx\n", "new_device",
info.type, info.addr);
@@ -1310,15 +1315,6 @@ new_device_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_WO(new_device);
-static int __i2c_find_user_addr(struct device *dev, void *addrp)
-{
- struct i2c_client *client = i2c_verify_client(dev);
- unsigned short addr = *(unsigned short *)addrp;
-
- return client && client->flags & I2C_CLIENT_USER &&
- i2c_encode_flags_to_addr(client) == addr;
-}
-
/*
* And of course let the users delete the devices they instantiated, if
* they got it wrong. This interface can only be used to delete devices
@@ -1333,7 +1329,7 @@ delete_device_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_adapter *adap = to_i2c_adapter(dev);
- struct device *child_dev;
+ struct i2c_client *client, *next;
unsigned short addr;
char end;
int res;
@@ -1349,19 +1345,28 @@ delete_device_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
- mutex_lock(&core_lock);
/* Make sure the device was added through sysfs */
- child_dev = device_find_child(&adap->dev, &addr, __i2c_find_user_addr);
- if (child_dev) {
- i2c_unregister_device(i2c_verify_client(child_dev));
- put_device(child_dev);
- } else {
- dev_err(dev, "Can't find userspace-created device at %#x\n", addr);
- count = -ENOENT;
+ res = -ENOENT;
+ mutex_lock_nested(&adap->userspace_clients_lock,
+ i2c_adapter_depth(adap));
+ list_for_each_entry_safe(client, next, &adap->userspace_clients,
+ detected) {
+ if (i2c_encode_flags_to_addr(client) == addr) {
+ dev_info(dev, "%s: Deleting device %s at 0x%02hx\n",
+ "delete_device", client->name, client->addr);
+
+ list_del(&client->detected);
+ i2c_unregister_device(client);
+ res = count;
+ break;
+ }
}
- mutex_unlock(&core_lock);
+ mutex_unlock(&adap->userspace_clients_lock);
- return count;
+ if (res < 0)
+ dev_err(dev, "%s: Can't find device in list\n",
+ "delete_device");
+ return res;
}
static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL,
delete_device_store);
@@ -1532,6 +1537,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
adap->locked_flags = 0;
rt_mutex_init(&adap->bus_lock);
rt_mutex_init(&adap->mux_lock);
+ mutex_init(&adap->userspace_clients_lock);
+ INIT_LIST_HEAD(&adap->userspace_clients);
/* Set default timeout to 1 second if not already set */
if (adap->timeout == 0)
@@ -1697,6 +1704,23 @@ int i2c_add_numbered_adapter(struct i2c_adapter *adap)
}
EXPORT_SYMBOL_GPL(i2c_add_numbered_adapter);
+static void i2c_do_del_adapter(struct i2c_driver *driver,
+ struct i2c_adapter *adapter)
+{
+ struct i2c_client *client, *_n;
+
+ /* Remove the devices we created ourselves as the result of hardware
+ * probing (using a driver's detect method) */
+ list_for_each_entry_safe(client, _n, &driver->clients, detected) {
+ if (client->adapter == adapter) {
+ dev_dbg(&adapter->dev, "Removing %s at 0x%x\n",
+ client->name, client->addr);
+ list_del(&client->detected);
+ i2c_unregister_device(client);
+ }
+ }
+}
+
static int __unregister_client(struct device *dev, void *dummy)
{
struct i2c_client *client = i2c_verify_client(dev);
@@ -1712,6 +1736,12 @@ static int __unregister_dummy(struct device *dev, void *dummy)
return 0;
}
+static int __process_removed_adapter(struct device_driver *d, void *data)
+{
+ i2c_do_del_adapter(to_i2c_driver(d), data);
+ return 0;
+}
+
/**
* i2c_del_adapter - unregister I2C adapter
* @adap: the adapter being unregistered
@@ -1723,6 +1753,7 @@ static int __unregister_dummy(struct device *dev, void *dummy)
void i2c_del_adapter(struct i2c_adapter *adap)
{
struct i2c_adapter *found;
+ struct i2c_client *client, *next;
/* First make sure that this adapter was ever added */
mutex_lock(&core_lock);
@@ -1734,16 +1765,31 @@ void i2c_del_adapter(struct i2c_adapter *adap)
}
i2c_acpi_remove_space_handler(adap);
+ /* Tell drivers about this removal */
+ mutex_lock(&core_lock);
+ bus_for_each_drv(&i2c_bus_type, NULL, adap,
+ __process_removed_adapter);
+ mutex_unlock(&core_lock);
+
+ /* Remove devices instantiated from sysfs */
+ mutex_lock_nested(&adap->userspace_clients_lock,
+ i2c_adapter_depth(adap));
+ list_for_each_entry_safe(client, next, &adap->userspace_clients,
+ detected) {
+ dev_dbg(&adap->dev, "Removing %s at 0x%x\n", client->name,
+ client->addr);
+ list_del(&client->detected);
+ i2c_unregister_device(client);
+ }
+ mutex_unlock(&adap->userspace_clients_lock);
/* Detach any active clients. This can't fail, thus we do not
* check the returned value. This is a two-pass process, because
* we can't remove the dummy devices during the first pass: they
* could have been instantiated by real devices wishing to clean
* them up properly, so we give them a chance to do that first. */
- mutex_lock(&core_lock);
device_for_each_child(&adap->dev, NULL, __unregister_client);
device_for_each_child(&adap->dev, NULL, __unregister_dummy);
- mutex_unlock(&core_lock);
/* device name is gone after device_unregister */
dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
@@ -1963,6 +2009,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
/* add the driver to the list of i2c drivers in the driver core */
driver->driver.owner = owner;
driver->driver.bus = &i2c_bus_type;
+ INIT_LIST_HEAD(&driver->clients);
/* When registration returns, the driver core
* will have called probe() for all matching-but-unbound devices.
@@ -1980,13 +2027,10 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
}
EXPORT_SYMBOL(i2c_register_driver);
-static int __i2c_unregister_detected_client(struct device *dev, void *argp)
+static int __process_removed_driver(struct device *dev, void *data)
{
- struct i2c_client *client = i2c_verify_client(dev);
-
- if (client && client->flags & I2C_CLIENT_AUTO)
- i2c_unregister_device(client);
-
+ if (dev->type == &i2c_adapter_type)
+ i2c_do_del_adapter(data, to_i2c_adapter(dev));
return 0;
}
@@ -1997,12 +2041,7 @@ static int __i2c_unregister_detected_client(struct device *dev, void *argp)
*/
void i2c_del_driver(struct i2c_driver *driver)
{
- mutex_lock(&core_lock);
- /* Satisfy __must_check, function can't fail */
- if (driver_for_each_device(&driver->driver, NULL, NULL,
- __i2c_unregister_detected_client)) {
- }
- mutex_unlock(&core_lock);
+ i2c_for_each_dev(driver, __process_removed_driver);
driver_unregister(&driver->driver);
pr_debug("driver [%s] unregistered\n", driver->driver.name);
@@ -2429,7 +2468,6 @@ static int i2c_detect_address(struct i2c_client *temp_client,
/* Finally call the custom detection function */
memset(&info, 0, sizeof(struct i2c_board_info));
info.addr = addr;
- info.flags = I2C_CLIENT_AUTO;
err = driver->detect(temp_client, &info);
if (err) {
/* -ENODEV is returned if the detection fails. We catch it
@@ -2456,7 +2494,9 @@ static int i2c_detect_address(struct i2c_client *temp_client,
dev_dbg(&adapter->dev, "Creating %s at 0x%02x\n",
info.type, info.addr);
client = i2c_new_client_device(adapter, &info);
- if (IS_ERR(client))
+ if (!IS_ERR(client))
+ list_add_tail(&client->detected, &driver->clients);
+ else
dev_err(&adapter->dev, "Failed creating %s at 0x%02x\n",
info.type, info.addr);
}
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 42310c9a00c2..d5dc4180afbc 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -1919,7 +1919,7 @@ static int i3c_master_bus_init(struct i3c_master_controller *master)
goto err_bus_cleanup;
if (master->ops->set_speed) {
- master->ops->set_speed(master, I3C_OPEN_DRAIN_NORMAL_SPEED);
+ ret = master->ops->set_speed(master, I3C_OPEN_DRAIN_NORMAL_SPEED);
if (ret)
goto err_bus_cleanup;
}
@@ -2486,7 +2486,7 @@ static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
struct i2c_adapter *adap = i3c_master_to_i2c_adapter(master);
struct i2c_dev_desc *i2cdev;
struct i2c_dev_boardinfo *i2cboardinfo;
- int ret;
+ int ret, id = -ENODEV;
adap->dev.parent = master->dev.parent;
adap->owner = master->dev.parent->driver->owner;
@@ -2497,7 +2497,15 @@ static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
adap->timeout = 1000;
adap->retries = 3;
- ret = i2c_add_adapter(adap);
+ if (master->dev.of_node)
+ id = of_alias_get_id(master->dev.of_node, "i2c");
+
+ if (id >= 0) {
+ adap->nr = id;
+ ret = i2c_add_numbered_adapter(adap);
+ } else {
+ ret = i2c_add_adapter(adap);
+ }
if (ret)
return ret;
diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
index 90dee3ec5520..77da199c7413 100644
--- a/drivers/i3c/master/Kconfig
+++ b/drivers/i3c/master/Kconfig
@@ -57,3 +57,14 @@ config MIPI_I3C_HCI
This driver can also be built as a module. If so, the module will be
called mipi-i3c-hci.
+
+config MIPI_I3C_HCI_PCI
+ tristate "MIPI I3C Host Controller Interface PCI support"
+ depends on MIPI_I3C_HCI
+ depends on PCI
+ help
+ Support for MIPI I3C Host Controller Interface compatible hardware
+ on the PCI bus.
+
+ This driver can also be built as a module. If so, the module will be
+ called mipi-i3c-hci-pci.
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index d4b80eb8cecd..2fbf8b2addd0 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -251,14 +251,6 @@ struct dw_i3c_i2c_dev_data {
struct i3c_generic_ibi_pool *ibi_pool;
};
-static u8 even_parity(u8 p)
-{
- p ^= p >> 4;
- p &= 0xf;
-
- return (0x9669 >> p) & 1;
-}
-
static bool dw_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
const struct i3c_ccc_cmd *cmd)
{
@@ -848,7 +840,7 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m)
struct dw_i3c_xfer *xfer;
struct dw_i3c_cmd *cmd;
u32 olddevs, newdevs;
- u8 p, last_addr = 0;
+ u8 last_addr = 0;
int ret, pos;
ret = pm_runtime_resume_and_get(master->dev);
@@ -873,9 +865,9 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m)
}
master->devs[pos].addr = ret;
- p = even_parity(ret);
last_addr = ret;
- ret |= (p << 7);
+
+ ret |= parity8(ret) ? 0 : BIT(7);
writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(ret),
master->regs +
@@ -1647,6 +1639,7 @@ EXPORT_SYMBOL_GPL(dw_i3c_common_probe);
void dw_i3c_common_remove(struct dw_i3c_master *master)
{
+ cancel_work_sync(&master->hj_work);
i3c_master_unregister(&master->base);
pm_runtime_disable(master->dev);
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index 06c0592487d3..fedbe6624a1c 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -889,8 +889,7 @@ static u32 prepare_rr0_dev_address(u32 addr)
ret |= (addr & GENMASK(9, 7)) << 6;
/* RR0[0] = ~XOR(addr[6:0]) */
- if (!(hweight8(addr & 0x7f) & 1))
- ret |= 1;
+ ret |= parity8(addr & 0x7f) ? 0 : BIT(0);
return ret;
}
diff --git a/drivers/i3c/master/mipi-i3c-hci/Makefile b/drivers/i3c/master/mipi-i3c-hci/Makefile
index 1f8cd5c48fde..e3d3ef757035 100644
--- a/drivers/i3c/master/mipi-i3c-hci/Makefile
+++ b/drivers/i3c/master/mipi-i3c-hci/Makefile
@@ -5,3 +5,4 @@ mipi-i3c-hci-y := core.o ext_caps.o pio.o dma.o \
cmd_v1.o cmd_v2.o \
dat_v1.o dct_v1.o \
hci_quirks.o
+obj-$(CONFIG_MIPI_I3C_HCI_PCI) += mipi-i3c-hci-pci.o
diff --git a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
index 47b9b4d4ed3f..85c4916972e4 100644
--- a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
+++ b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
@@ -40,15 +40,6 @@
#define dat_w0_write(i, v) writel(v, hci->DAT_regs + (i) * 8)
#define dat_w1_write(i, v) writel(v, hci->DAT_regs + (i) * 8 + 4)
-static inline bool dynaddr_parity(unsigned int addr)
-{
- addr |= 1 << 7;
- addr += addr >> 4;
- addr += addr >> 2;
- addr += addr >> 1;
- return (addr & 1);
-}
-
static int hci_dat_v1_init(struct i3c_hci *hci)
{
unsigned int dat_idx;
@@ -123,7 +114,7 @@ static void hci_dat_v1_set_dynamic_addr(struct i3c_hci *hci,
dat_w0 = dat_w0_read(dat_idx);
dat_w0 &= ~(DAT_0_DYNAMIC_ADDRESS | DAT_0_DYNADDR_PARITY);
dat_w0 |= FIELD_PREP(DAT_0_DYNAMIC_ADDRESS, address) |
- (dynaddr_parity(address) ? DAT_0_DYNADDR_PARITY : 0);
+ (parity8(address) ? 0 : DAT_0_DYNADDR_PARITY);
dat_w0_write(dat_idx, dat_w0);
}
diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
index e8e56a8d2057..491dfe70b660 100644
--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
+++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
@@ -758,9 +758,26 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci)
complete(&rh->op_done);
if (status & INTR_TRANSFER_ABORT) {
+ u32 ring_status;
+
dev_notice_ratelimited(&hci->master.dev,
"ring %d: Transfer Aborted\n", i);
mipi_i3c_hci_resume(hci);
+ ring_status = rh_reg_read(RING_STATUS);
+ if (!(ring_status & RING_STATUS_RUNNING) &&
+ status & INTR_TRANSFER_COMPLETION &&
+ status & INTR_TRANSFER_ERR) {
+ /*
+ * Ring stop followed by run is an Intel
+ * specific required quirk after resuming the
+ * halted controller. Do it only when the ring
+ * is not in running state after a transfer
+ * error.
+ */
+ rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
+ rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE |
+ RING_CTRL_RUN_STOP);
+ }
}
if (status & INTR_WARN_INS_STOP_MODE)
dev_warn_ratelimited(&hci->master.dev,
diff --git a/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c b/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c
new file mode 100644
index 000000000000..c6c3a3ec11ea
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI glue code for MIPI I3C HCI driver
+ *
+ * Copyright (C) 2024 Intel Corporation
+ *
+ * Author: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+ */
+#include <linux/acpi.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+struct mipi_i3c_hci_pci_info {
+ int (*init)(struct pci_dev *pci);
+};
+
+#define INTEL_PRIV_OFFSET 0x2b0
+#define INTEL_PRIV_SIZE 0x28
+#define INTEL_PRIV_RESETS 0x04
+#define INTEL_PRIV_RESETS_RESET BIT(0)
+#define INTEL_PRIV_RESETS_RESET_DONE BIT(1)
+
+static DEFINE_IDA(mipi_i3c_hci_pci_ida);
+
+static int mipi_i3c_hci_pci_intel_init(struct pci_dev *pci)
+{
+ unsigned long timeout;
+ void __iomem *priv;
+
+ priv = devm_ioremap(&pci->dev,
+ pci_resource_start(pci, 0) + INTEL_PRIV_OFFSET,
+ INTEL_PRIV_SIZE);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Assert reset, wait for completion and release reset */
+ writel(0, priv + INTEL_PRIV_RESETS);
+ timeout = jiffies + msecs_to_jiffies(10);
+ while (!(readl(priv + INTEL_PRIV_RESETS) &
+ INTEL_PRIV_RESETS_RESET_DONE)) {
+ if (time_after(jiffies, timeout))
+ break;
+ cpu_relax();
+ }
+ writel(INTEL_PRIV_RESETS_RESET, priv + INTEL_PRIV_RESETS);
+
+ return 0;
+}
+
+static struct mipi_i3c_hci_pci_info intel_info = {
+ .init = mipi_i3c_hci_pci_intel_init,
+};
+
+static int mipi_i3c_hci_pci_probe(struct pci_dev *pci,
+ const struct pci_device_id *id)
+{
+ struct mipi_i3c_hci_pci_info *info;
+ struct platform_device *pdev;
+ struct resource res[2];
+ int dev_id, ret;
+
+ ret = pcim_enable_device(pci);
+ if (ret)
+ return ret;
+
+ pci_set_master(pci);
+
+ memset(&res, 0, sizeof(res));
+
+ res[0].flags = IORESOURCE_MEM;
+ res[0].start = pci_resource_start(pci, 0);
+ res[0].end = pci_resource_end(pci, 0);
+
+ res[1].flags = IORESOURCE_IRQ;
+ res[1].start = pci->irq;
+ res[1].end = pci->irq;
+
+ dev_id = ida_alloc(&mipi_i3c_hci_pci_ida, GFP_KERNEL);
+ if (dev_id < 0)
+ return dev_id;
+
+ pdev = platform_device_alloc("mipi-i3c-hci", dev_id);
+ if (!pdev)
+ return -ENOMEM;
+
+ pdev->dev.parent = &pci->dev;
+ device_set_node(&pdev->dev, dev_fwnode(&pci->dev));
+
+ ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
+ if (ret)
+ goto err;
+
+ info = (struct mipi_i3c_hci_pci_info *)id->driver_data;
+ if (info && info->init) {
+ ret = info->init(pci);
+ if (ret)
+ goto err;
+ }
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto err;
+
+ pci_set_drvdata(pci, pdev);
+
+ return 0;
+
+err:
+ platform_device_put(pdev);
+ ida_free(&mipi_i3c_hci_pci_ida, dev_id);
+ return ret;
+}
+
+static void mipi_i3c_hci_pci_remove(struct pci_dev *pci)
+{
+ struct platform_device *pdev = pci_get_drvdata(pci);
+ int dev_id = pdev->id;
+
+ platform_device_unregister(pdev);
+ ida_free(&mipi_i3c_hci_pci_ida, dev_id);
+}
+
+static const struct pci_device_id mipi_i3c_hci_pci_devices[] = {
+ /* Panther Lake-H */
+ { PCI_VDEVICE(INTEL, 0xe37c), (kernel_ulong_t)&intel_info},
+ { PCI_VDEVICE(INTEL, 0xe36f), (kernel_ulong_t)&intel_info},
+ /* Panther Lake-P */
+ { PCI_VDEVICE(INTEL, 0xe47c), (kernel_ulong_t)&intel_info},
+ { PCI_VDEVICE(INTEL, 0xe46f), (kernel_ulong_t)&intel_info},
+ { },
+};
+MODULE_DEVICE_TABLE(pci, mipi_i3c_hci_pci_devices);
+
+static struct pci_driver mipi_i3c_hci_pci_driver = {
+ .name = "mipi_i3c_hci_pci",
+ .id_table = mipi_i3c_hci_pci_devices,
+ .probe = mipi_i3c_hci_pci_probe,
+ .remove = mipi_i3c_hci_pci_remove,
+};
+
+module_pci_driver(mipi_i3c_hci_pci_driver);
+
+MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MIPI I3C HCI driver on PCI bus");
diff --git a/drivers/iio/accel/adxl345.h b/drivers/iio/accel/adxl345.h
index 3d5c8719db3d..517e494ba555 100644
--- a/drivers/iio/accel/adxl345.h
+++ b/drivers/iio/accel/adxl345.h
@@ -9,37 +9,93 @@
#define _ADXL345_H_
#define ADXL345_REG_DEVID 0x00
+#define ADXL345_REG_THRESH_TAP 0x1D
#define ADXL345_REG_OFSX 0x1E
#define ADXL345_REG_OFSY 0x1F
#define ADXL345_REG_OFSZ 0x20
#define ADXL345_REG_OFS_AXIS(index) (ADXL345_REG_OFSX + (index))
+
+/* Tap duration */
+#define ADXL345_REG_DUR 0x21
+/* Tap latency */
+#define ADXL345_REG_LATENT 0x22
+/* Tap window */
+#define ADXL345_REG_WINDOW 0x23
+/* Activity threshold */
+#define ADXL345_REG_THRESH_ACT 0x24
+/* Inactivity threshold */
+#define ADXL345_REG_THRESH_INACT 0x25
+/* Inactivity time */
+#define ADXL345_REG_TIME_INACT 0x26
+/* Axis enable control for activity and inactivity detection */
+#define ADXL345_REG_ACT_INACT_CTRL 0x27
+/* Free-fall threshold */
+#define ADXL345_REG_THRESH_FF 0x28
+/* Free-fall time */
+#define ADXL345_REG_TIME_FF 0x29
+/* Axis control for single tap or double tap */
+#define ADXL345_REG_TAP_AXIS 0x2A
+/* Source of single tap or double tap */
+#define ADXL345_REG_ACT_TAP_STATUS 0x2B
+/* Data rate and power mode control */
#define ADXL345_REG_BW_RATE 0x2C
#define ADXL345_REG_POWER_CTL 0x2D
+#define ADXL345_REG_INT_ENABLE 0x2E
+#define ADXL345_REG_INT_MAP 0x2F
+#define ADXL345_REG_INT_SOURCE 0x30
+#define ADXL345_REG_INT_SOURCE_MSK 0xFF
#define ADXL345_REG_DATA_FORMAT 0x31
-#define ADXL345_REG_DATAX0 0x32
-#define ADXL345_REG_DATAY0 0x34
-#define ADXL345_REG_DATAZ0 0x36
-#define ADXL345_REG_DATA_AXIS(index) \
- (ADXL345_REG_DATAX0 + (index) * sizeof(__le16))
+#define ADXL345_REG_XYZ_BASE 0x32
+#define ADXL345_REG_DATA_AXIS(index) \
+ (ADXL345_REG_XYZ_BASE + (index) * sizeof(__le16))
+
+#define ADXL345_REG_FIFO_CTL 0x38
+#define ADXL345_FIFO_CTL_SAMPLES_MSK GENMASK(4, 0)
+/* 0: INT1, 1: INT2 */
+#define ADXL345_FIFO_CTL_TRIGGER_MSK BIT(5)
+#define ADXL345_FIFO_CTL_MODE_MSK GENMASK(7, 6)
+#define ADXL345_REG_FIFO_STATUS 0x39
+#define ADXL345_REG_FIFO_STATUS_MSK 0x3F
+#define ADXL345_INT_OVERRUN BIT(0)
+#define ADXL345_INT_WATERMARK BIT(1)
+#define ADXL345_INT_FREE_FALL BIT(2)
+#define ADXL345_INT_INACTIVITY BIT(3)
+#define ADXL345_INT_ACTIVITY BIT(4)
+#define ADXL345_INT_DOUBLE_TAP BIT(5)
+#define ADXL345_INT_SINGLE_TAP BIT(6)
+#define ADXL345_INT_DATA_READY BIT(7)
+
+/*
+ * BW_RATE bits - Bandwidth and output data rate. The default value is
+ * 0x0A, which translates to a 100 Hz output data rate
+ */
#define ADXL345_BW_RATE GENMASK(3, 0)
+#define ADXL345_BW_LOW_POWER BIT(4)
#define ADXL345_BASE_RATE_NANO_HZ 97656250LL
-#define ADXL345_POWER_CTL_MEASURE BIT(3)
#define ADXL345_POWER_CTL_STANDBY 0x00
+#define ADXL345_POWER_CTL_WAKEUP GENMASK(1, 0)
+#define ADXL345_POWER_CTL_SLEEP BIT(2)
+#define ADXL345_POWER_CTL_MEASURE BIT(3)
+#define ADXL345_POWER_CTL_AUTO_SLEEP BIT(4)
+#define ADXL345_POWER_CTL_LINK BIT(5)
-#define ADXL345_DATA_FORMAT_RANGE GENMASK(1, 0) /* Set the g range */
-#define ADXL345_DATA_FORMAT_JUSTIFY BIT(2) /* Left-justified (MSB) mode */
-#define ADXL345_DATA_FORMAT_FULL_RES BIT(3) /* Up to 13-bits resolution */
-#define ADXL345_DATA_FORMAT_SPI_3WIRE BIT(6) /* 3-wire SPI mode */
-#define ADXL345_DATA_FORMAT_SELF_TEST BIT(7) /* Enable a self test */
-
+/* Set the g range */
+#define ADXL345_DATA_FORMAT_RANGE GENMASK(1, 0)
+/* Data is left justified */
+#define ADXL345_DATA_FORMAT_JUSTIFY BIT(2)
+/* Up to 13-bits resolution */
+#define ADXL345_DATA_FORMAT_FULL_RES BIT(3)
+#define ADXL345_DATA_FORMAT_SPI_3WIRE BIT(6)
+#define ADXL345_DATA_FORMAT_SELF_TEST BIT(7)
#define ADXL345_DATA_FORMAT_2G 0
#define ADXL345_DATA_FORMAT_4G 1
#define ADXL345_DATA_FORMAT_8G 2
#define ADXL345_DATA_FORMAT_16G 3
#define ADXL345_DEVID 0xE5
+#define ADXL345_FIFO_SIZE 32
/*
* In full-resolution mode, scale factor is maintained at ~4 mg/LSB
@@ -62,6 +118,7 @@ struct adxl345_chip_info {
};
int adxl345_core_probe(struct device *dev, struct regmap *regmap,
+ bool fifo_delay_default,
int (*setup)(struct device*, struct regmap*));
#endif /* _ADXL345_H_ */
diff --git a/drivers/iio/accel/adxl345_core.c b/drivers/iio/accel/adxl345_core.c
index b1efab0f6404..d1b2d3985a40 100644
--- a/drivers/iio/accel/adxl345_core.c
+++ b/drivers/iio/accel/adxl345_core.c
@@ -7,6 +7,8 @@
* Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/ADXL345.pdf
*/
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/regmap.h>
@@ -14,36 +16,92 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/kfifo_buf.h>
#include "adxl345.h"
-struct adxl345_data {
+#define ADXL345_FIFO_BYPASS 0
+#define ADXL345_FIFO_FIFO 1
+#define ADXL345_FIFO_STREAM 2
+
+#define ADXL345_DIRS 3
+
+#define ADXL345_INT_NONE 0xff
+#define ADXL345_INT1 0
+#define ADXL345_INT2 1
+
+struct adxl345_state {
const struct adxl345_chip_info *info;
struct regmap *regmap;
+ bool fifo_delay; /* delay: delay is needed for SPI */
+ int irq;
+ u8 intio;
+ u8 int_map;
+ u8 watermark;
+ u8 fifo_mode;
+ __le16 fifo_buf[ADXL345_DIRS * ADXL345_FIFO_SIZE + 1] __aligned(IIO_DMA_MINALIGN);
};
-#define ADXL345_CHANNEL(index, axis) { \
+#define ADXL345_CHANNEL(index, reg, axis) { \
.type = IIO_ACCEL, \
.modified = 1, \
.channel2 = IIO_MOD_##axis, \
- .address = index, \
+ .address = (reg), \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_CALIBBIAS), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = (index), \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 13, \
+ .storagebits = 16, \
+ .endianness = IIO_LE, \
+ }, \
}
+enum adxl345_chans {
+ chan_x, chan_y, chan_z,
+};
+
static const struct iio_chan_spec adxl345_channels[] = {
- ADXL345_CHANNEL(0, X),
- ADXL345_CHANNEL(1, Y),
- ADXL345_CHANNEL(2, Z),
+ ADXL345_CHANNEL(0, chan_x, X),
+ ADXL345_CHANNEL(1, chan_y, Y),
+ ADXL345_CHANNEL(2, chan_z, Z),
};
+static const unsigned long adxl345_scan_masks[] = {
+ BIT(chan_x) | BIT(chan_y) | BIT(chan_z),
+ 0
+};
+
+static int adxl345_set_interrupts(struct adxl345_state *st)
+{
+ int ret;
+ unsigned int int_enable = st->int_map;
+ unsigned int int_map;
+
+ /*
+ * Any bits set to 0 in the INT map register send their respective
+ * interrupts to the INT1 pin, whereas bits set to 1 send their respective
+ * interrupts to the INT2 pin. The intio shall convert this accordingly.
+ */
+ int_map = FIELD_GET(ADXL345_REG_INT_SOURCE_MSK,
+ st->intio ? st->int_map : ~st->int_map);
+
+ ret = regmap_write(st->regmap, ADXL345_REG_INT_MAP, int_map);
+ if (ret)
+ return ret;
+
+ return regmap_write(st->regmap, ADXL345_REG_INT_ENABLE, int_enable);
+}
+
static int adxl345_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
- struct adxl345_data *data = iio_priv(indio_dev);
+ struct adxl345_state *st = iio_priv(indio_dev);
__le16 accel;
long long samp_freq_nhz;
unsigned int regval;
@@ -56,7 +114,7 @@ static int adxl345_read_raw(struct iio_dev *indio_dev,
* ADXL345_REG_DATA(X0/Y0/Z0) contain the least significant byte
* and ADXL345_REG_DATA(X0/Y0/Z0) + 1 the most significant byte
*/
- ret = regmap_bulk_read(data->regmap,
+ ret = regmap_bulk_read(st->regmap,
ADXL345_REG_DATA_AXIS(chan->address),
&accel, sizeof(accel));
if (ret < 0)
@@ -66,10 +124,10 @@ static int adxl345_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 0;
- *val2 = data->info->uscale;
+ *val2 = st->info->uscale;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_CALIBBIAS:
- ret = regmap_read(data->regmap,
+ ret = regmap_read(st->regmap,
ADXL345_REG_OFS_AXIS(chan->address), &regval);
if (ret < 0)
return ret;
@@ -81,7 +139,7 @@ static int adxl345_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = regmap_read(data->regmap, ADXL345_REG_BW_RATE, &regval);
+ ret = regmap_read(st->regmap, ADXL345_REG_BW_RATE, &regval);
if (ret < 0)
return ret;
@@ -99,7 +157,7 @@ static int adxl345_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
- struct adxl345_data *data = iio_priv(indio_dev);
+ struct adxl345_state *st = iio_priv(indio_dev);
s64 n;
switch (mask) {
@@ -108,14 +166,14 @@ static int adxl345_write_raw(struct iio_dev *indio_dev,
* 8-bit resolution at +/- 2g, that is 4x accel data scale
* factor
*/
- return regmap_write(data->regmap,
+ return regmap_write(st->regmap,
ADXL345_REG_OFS_AXIS(chan->address),
val / 4);
case IIO_CHAN_INFO_SAMP_FREQ:
n = div_s64(val * NANOHZ_PER_HZ + val2,
ADXL345_BASE_RATE_NANO_HZ);
- return regmap_update_bits(data->regmap, ADXL345_REG_BW_RATE,
+ return regmap_update_bits(st->regmap, ADXL345_REG_BW_RATE,
ADXL345_BW_RATE,
clamp_val(ilog2(n), 0,
ADXL345_BW_RATE));
@@ -124,6 +182,24 @@ static int adxl345_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
+static int adxl345_set_watermark(struct iio_dev *indio_dev, unsigned int value)
+{
+ struct adxl345_state *st = iio_priv(indio_dev);
+ unsigned int fifo_mask = 0x1F;
+ int ret;
+
+ value = min(value, ADXL345_FIFO_SIZE - 1);
+
+ ret = regmap_update_bits(st->regmap, ADXL345_REG_FIFO_CTL, fifo_mask, value);
+ if (ret)
+ return ret;
+
+ st->watermark = value;
+ st->int_map |= ADXL345_INT_WATERMARK;
+
+ return 0;
+}
+
static int adxl345_write_raw_get_fmt(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
long mask)
@@ -138,6 +214,33 @@ static int adxl345_write_raw_get_fmt(struct iio_dev *indio_dev,
}
}
+/**
+ * adxl345_set_measure_en() - Enable and disable measuring.
+ *
+ * @st: The device data.
+ * @en: Enable measurements, else standby mode.
+ *
+ * For lowest power operation, standby mode can be used. In standby mode,
+ * current consumption is supposed to be reduced to 0.1uA (typical). In this
+ * mode no measurements are made. Placing the device into standby mode
+ * preserves the contents of FIFO.
+ *
+ * Return: Returns 0 if successful, or a negative error value.
+ */
+static int adxl345_set_measure_en(struct adxl345_state *st, bool en)
+{
+ unsigned int val = en ? ADXL345_POWER_CTL_MEASURE : ADXL345_POWER_CTL_STANDBY;
+
+ return regmap_write(st->regmap, ADXL345_REG_POWER_CTL, val);
+}
+
+static void adxl345_powerdown(void *ptr)
+{
+ struct adxl345_state *st = ptr;
+
+ adxl345_set_measure_en(st, false);
+}
+
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
"0.09765625 0.1953125 0.390625 0.78125 1.5625 3.125 6.25 12.5 25 50 100 200 400 800 1600 3200"
);
@@ -151,37 +254,244 @@ static const struct attribute_group adxl345_attrs_group = {
.attrs = adxl345_attrs,
};
-static const struct iio_info adxl345_info = {
- .attrs = &adxl345_attrs_group,
- .read_raw = adxl345_read_raw,
- .write_raw = adxl345_write_raw,
- .write_raw_get_fmt = adxl345_write_raw_get_fmt,
+static int adxl345_set_fifo(struct adxl345_state *st)
+{
+ int ret;
+
+ /* FIFO should only be configured while in standby mode */
+ ret = adxl345_set_measure_en(st, false);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(st->regmap, ADXL345_REG_FIFO_CTL,
+ FIELD_PREP(ADXL345_FIFO_CTL_SAMPLES_MSK,
+ st->watermark) |
+ FIELD_PREP(ADXL345_FIFO_CTL_TRIGGER_MSK,
+ st->intio) |
+ FIELD_PREP(ADXL345_FIFO_CTL_MODE_MSK,
+ st->fifo_mode));
+ if (ret < 0)
+ return ret;
+
+ return adxl345_set_measure_en(st, true);
+}
+
+/**
+ * adxl345_get_samples() - Read number of FIFO entries.
+ * @st: The initialized state instance of this driver.
+ *
+ * The sensor does not support treating any axis individually, or exclude them
+ * from measuring.
+ *
+ * Return: negative error, or value.
+ */
+static int adxl345_get_samples(struct adxl345_state *st)
+{
+ unsigned int regval = 0;
+ int ret;
+
+ ret = regmap_read(st->regmap, ADXL345_REG_FIFO_STATUS, &regval);
+ if (ret < 0)
+ return ret;
+
+ return FIELD_GET(ADXL345_REG_FIFO_STATUS_MSK, regval);
+}
+
+/**
+ * adxl345_fifo_transfer() - Read samples number of elements.
+ * @st: The instance of the state object of this sensor.
+ * @samples: The number of lines in the FIFO referred to as fifo_entry.
+ *
+ * It is recommended that a multiple-byte read of all registers be performed to
+ * prevent a change in data between reads of sequential registers. That is to
+ * read out the data registers X0, X1, Y0, Y1, Z0, Z1, i.e. 6 bytes at once.
+ *
+ * Return: 0 or error value.
+ */
+static int adxl345_fifo_transfer(struct adxl345_state *st, int samples)
+{
+ size_t count;
+ int i, ret = 0;
+
+ /* count is the 3x the fifo_buf element size, hence 6B */
+ count = sizeof(st->fifo_buf[0]) * ADXL345_DIRS;
+ for (i = 0; i < samples; i++) {
+ /* read 3x 2 byte elements from base address into next fifo_buf position */
+ ret = regmap_bulk_read(st->regmap, ADXL345_REG_XYZ_BASE,
+ st->fifo_buf + (i * count / 2), count);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * To ensure that the FIFO has completely popped, there must be at least 5
+ * us between the end of reading the data registers, signified by the
+ * transition to register 0x38 from 0x37 or the CS pin going high, and the
+ * start of new reads of the FIFO or reading the FIFO_STATUS register. For
+ * SPI operation at 1.5 MHz or lower, the register addressing portion of the
+ * transmission is sufficient delay to ensure the FIFO has completely
+ * popped. It is necessary for SPI operation greater than 1.5 MHz to
+ * de-assert the CS pin to ensure a total of 5 us, which is at most 3.4 us
+ * at 5 MHz operation.
+ */
+ if (st->fifo_delay && samples > 1)
+ udelay(3);
+ }
+ return ret;
+}
+
+/**
+ * adxl345_fifo_reset() - Empty the FIFO in error condition.
+ * @st: The instance to the state object of the sensor.
+ *
+ * Read all elements of the FIFO. Reading the interrupt source register
+ * resets the sensor.
+ */
+static void adxl345_fifo_reset(struct adxl345_state *st)
+{
+ int regval;
+ int samples;
+
+ adxl345_set_measure_en(st, false);
+
+ samples = adxl345_get_samples(st);
+ if (samples > 0)
+ adxl345_fifo_transfer(st, samples);
+
+ regmap_read(st->regmap, ADXL345_REG_INT_SOURCE, &regval);
+
+ adxl345_set_measure_en(st, true);
+}
+
+static int adxl345_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct adxl345_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = adxl345_set_interrupts(st);
+ if (ret < 0)
+ return ret;
+
+ st->fifo_mode = ADXL345_FIFO_STREAM;
+ return adxl345_set_fifo(st);
+}
+
+static int adxl345_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct adxl345_state *st = iio_priv(indio_dev);
+ int ret;
+
+ st->fifo_mode = ADXL345_FIFO_BYPASS;
+ ret = adxl345_set_fifo(st);
+ if (ret < 0)
+ return ret;
+
+ st->int_map = 0x00;
+ return adxl345_set_interrupts(st);
+}
+
+static const struct iio_buffer_setup_ops adxl345_buffer_ops = {
+ .postenable = adxl345_buffer_postenable,
+ .predisable = adxl345_buffer_predisable,
};
-static int adxl345_powerup(void *regmap)
+static int adxl345_get_status(struct adxl345_state *st)
{
- return regmap_write(regmap, ADXL345_REG_POWER_CTL, ADXL345_POWER_CTL_MEASURE);
+ int ret;
+ unsigned int regval;
+
+ ret = regmap_read(st->regmap, ADXL345_REG_INT_SOURCE, &regval);
+ if (ret < 0)
+ return ret;
+
+ return FIELD_GET(ADXL345_REG_INT_SOURCE_MSK, regval);
}
-static void adxl345_powerdown(void *regmap)
+static int adxl345_fifo_push(struct iio_dev *indio_dev,
+ int samples)
{
- regmap_write(regmap, ADXL345_REG_POWER_CTL, ADXL345_POWER_CTL_STANDBY);
+ struct adxl345_state *st = iio_priv(indio_dev);
+ int i, ret;
+
+ if (samples <= 0)
+ return -EINVAL;
+
+ ret = adxl345_fifo_transfer(st, samples);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ADXL345_DIRS * samples; i += ADXL345_DIRS)
+ iio_push_to_buffers(indio_dev, &st->fifo_buf[i]);
+
+ return 0;
+}
+
+/**
+ * adxl345_irq_handler() - Handle irqs of the ADXL345.
+ * @irq: The irq being handled.
+ * @p: The struct iio_device pointer for the device.
+ *
+ * Return: The interrupt was handled.
+ */
+static irqreturn_t adxl345_irq_handler(int irq, void *p)
+{
+ struct iio_dev *indio_dev = p;
+ struct adxl345_state *st = iio_priv(indio_dev);
+ int int_stat;
+ int samples;
+
+ int_stat = adxl345_get_status(st);
+ if (int_stat <= 0)
+ return IRQ_NONE;
+
+ if (int_stat & ADXL345_INT_OVERRUN)
+ goto err;
+
+ if (int_stat & ADXL345_INT_WATERMARK) {
+ samples = adxl345_get_samples(st);
+ if (samples < 0)
+ goto err;
+
+ if (adxl345_fifo_push(indio_dev, samples) < 0)
+ goto err;
+ }
+ return IRQ_HANDLED;
+
+err:
+ adxl345_fifo_reset(st);
+
+ return IRQ_HANDLED;
}
+static const struct iio_info adxl345_info = {
+ .attrs = &adxl345_attrs_group,
+ .read_raw = adxl345_read_raw,
+ .write_raw = adxl345_write_raw,
+ .write_raw_get_fmt = adxl345_write_raw_get_fmt,
+ .hwfifo_set_watermark = adxl345_set_watermark,
+};
+
/**
- * adxl345_core_probe() - probe and setup for the adxl345 accelerometer,
- * also covers the adlx375 accelerometer
+ * adxl345_core_probe() - Probe and setup for the accelerometer.
* @dev: Driver model representation of the device
* @regmap: Regmap instance for the device
+ * @fifo_delay_default: Using FIFO with SPI needs delay
* @setup: Setup routine to be executed right before the standard device
* setup
*
+ * For SPI operation greater than 1.6 MHz, it is necessary to deassert the CS
+ * pin to ensure a total delay of 5 us; otherwise, the delay is not sufficient.
+ * The total delay necessary for 5 MHz operation is at most 3.4 us. This is not
+ * a concern when using I2C mode because the communication rate is low enough
+ * to ensure a sufficient delay between FIFO reads.
+ * Ref: "Retrieving Data from FIFO", p. 21 of 36, Data Sheet ADXL345 Rev. G
+ *
* Return: 0 on success, negative errno on error
*/
int adxl345_core_probe(struct device *dev, struct regmap *regmap,
+ bool fifo_delay_default,
int (*setup)(struct device*, struct regmap*))
{
- struct adxl345_data *data;
+ struct adxl345_state *st;
struct iio_dev *indio_dev;
u32 regval;
unsigned int data_format_mask = (ADXL345_DATA_FORMAT_RANGE |
@@ -190,30 +500,32 @@ int adxl345_core_probe(struct device *dev, struct regmap *regmap,
ADXL345_DATA_FORMAT_SELF_TEST);
int ret;
- indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
- data = iio_priv(indio_dev);
- data->regmap = regmap;
- data->info = device_get_match_data(dev);
- if (!data->info)
+ st = iio_priv(indio_dev);
+ st->regmap = regmap;
+ st->info = device_get_match_data(dev);
+ if (!st->info)
return -ENODEV;
+ st->fifo_delay = fifo_delay_default;
- indio_dev->name = data->info->name;
+ indio_dev->name = st->info->name;
indio_dev->info = &adxl345_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = adxl345_channels;
indio_dev->num_channels = ARRAY_SIZE(adxl345_channels);
+ indio_dev->available_scan_masks = adxl345_scan_masks;
if (setup) {
/* Perform optional initial bus specific configuration */
- ret = setup(dev, data->regmap);
+ ret = setup(dev, st->regmap);
if (ret)
return ret;
/* Enable full-resolution mode */
- ret = regmap_update_bits(data->regmap, ADXL345_REG_DATA_FORMAT,
+ ret = regmap_update_bits(st->regmap, ADXL345_REG_DATA_FORMAT,
data_format_mask,
ADXL345_DATA_FORMAT_FULL_RES);
if (ret)
@@ -222,14 +534,14 @@ int adxl345_core_probe(struct device *dev, struct regmap *regmap,
} else {
/* Enable full-resolution mode (init all data_format bits) */
- ret = regmap_write(data->regmap, ADXL345_REG_DATA_FORMAT,
+ ret = regmap_write(st->regmap, ADXL345_REG_DATA_FORMAT,
ADXL345_DATA_FORMAT_FULL_RES);
if (ret)
return dev_err_probe(dev, ret,
"Failed to set data range\n");
}
- ret = regmap_read(data->regmap, ADXL345_REG_DEVID, &regval);
+ ret = regmap_read(st->regmap, ADXL345_REG_DEVID, &regval);
if (ret < 0)
return dev_err_probe(dev, ret, "Error reading device ID\n");
@@ -238,14 +550,43 @@ int adxl345_core_probe(struct device *dev, struct regmap *regmap,
regval, ADXL345_DEVID);
/* Enable measurement mode */
- ret = adxl345_powerup(data->regmap);
+ ret = adxl345_set_measure_en(st, true);
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to enable measurement mode\n");
- ret = devm_add_action_or_reset(dev, adxl345_powerdown, data->regmap);
+ ret = devm_add_action_or_reset(dev, adxl345_powerdown, st);
if (ret < 0)
return ret;
+ st->intio = ADXL345_INT1;
+ st->irq = fwnode_irq_get_byname(dev_fwnode(dev), "INT1");
+ if (st->irq < 0) {
+ st->intio = ADXL345_INT2;
+ st->irq = fwnode_irq_get_byname(dev_fwnode(dev), "INT2");
+ if (st->irq < 0)
+ st->intio = ADXL345_INT_NONE;
+ }
+
+ if (st->intio != ADXL345_INT_NONE) {
+ /* FIFO_STREAM mode is going to be activated later */
+ ret = devm_iio_kfifo_buffer_setup(dev, indio_dev, &adxl345_buffer_ops);
+ if (ret)
+ return ret;
+
+ ret = devm_request_threaded_irq(dev, st->irq, NULL,
+ &adxl345_irq_handler,
+ IRQF_SHARED | IRQF_ONESHOT,
+ indio_dev->name, indio_dev);
+ if (ret)
+ return ret;
+ } else {
+ ret = regmap_write(st->regmap, ADXL345_REG_FIFO_CTL,
+ FIELD_PREP(ADXL345_FIFO_CTL_MODE_MSK,
+ ADXL345_FIFO_BYPASS));
+ if (ret < 0)
+ return ret;
+ }
+
return devm_iio_device_register(dev, indio_dev);
}
EXPORT_SYMBOL_NS_GPL(adxl345_core_probe, "IIO_ADXL345");
diff --git a/drivers/iio/accel/adxl345_i2c.c b/drivers/iio/accel/adxl345_i2c.c
index cb23fb11fcd7..8c385dd6c01d 100644
--- a/drivers/iio/accel/adxl345_i2c.c
+++ b/drivers/iio/accel/adxl345_i2c.c
@@ -27,7 +27,7 @@ static int adxl345_i2c_probe(struct i2c_client *client)
if (IS_ERR(regmap))
return dev_err_probe(&client->dev, PTR_ERR(regmap), "Error initializing regmap\n");
- return adxl345_core_probe(&client->dev, regmap, NULL);
+ return adxl345_core_probe(&client->dev, regmap, false, NULL);
}
static const struct adxl345_chip_info adxl345_i2c_info = {
diff --git a/drivers/iio/accel/adxl345_spi.c b/drivers/iio/accel/adxl345_spi.c
index 968e7b390d4b..7e518aea17bf 100644
--- a/drivers/iio/accel/adxl345_spi.c
+++ b/drivers/iio/accel/adxl345_spi.c
@@ -12,6 +12,7 @@
#include "adxl345.h"
#define ADXL345_MAX_SPI_FREQ_HZ 5000000
+#define ADXL345_MAX_FREQ_NO_FIFO_DELAY 1500000
static const struct regmap_config adxl345_spi_regmap_config = {
.reg_bits = 8,
@@ -28,6 +29,7 @@ static int adxl345_spi_setup(struct device *dev, struct regmap *regmap)
static int adxl345_spi_probe(struct spi_device *spi)
{
struct regmap *regmap;
+ bool needs_delay;
/* Bail out if max_speed_hz exceeds 5 MHz */
if (spi->max_speed_hz > ADXL345_MAX_SPI_FREQ_HZ)
@@ -38,10 +40,11 @@ static int adxl345_spi_probe(struct spi_device *spi)
if (IS_ERR(regmap))
return dev_err_probe(&spi->dev, PTR_ERR(regmap), "Error initializing regmap\n");
+ needs_delay = spi->max_speed_hz > ADXL345_MAX_FREQ_NO_FIFO_DELAY;
if (spi->mode & SPI_3WIRE)
- return adxl345_core_probe(&spi->dev, regmap, adxl345_spi_setup);
+ return adxl345_core_probe(&spi->dev, regmap, needs_delay, adxl345_spi_setup);
else
- return adxl345_core_probe(&spi->dev, regmap, NULL);
+ return adxl345_core_probe(&spi->dev, regmap, needs_delay, NULL);
}
static const struct adxl345_chip_info adxl345_spi_info = {
diff --git a/drivers/iio/accel/bma220_spi.c b/drivers/iio/accel/bma220_spi.c
index 009e6243c6cb..96ba028157ee 100644
--- a/drivers/iio/accel/bma220_spi.c
+++ b/drivers/iio/accel/bma220_spi.c
@@ -66,7 +66,7 @@ struct bma220_data {
struct {
s8 chans[3];
/* Ensure timestamp is naturally aligned. */
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
u8 tx_buf[2] __aligned(IIO_DMA_MINALIGN);
};
diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
index 65aac60f1245..987212a7c038 100644
--- a/drivers/iio/accel/fxls8962af-core.c
+++ b/drivers/iio/accel/fxls8962af-core.c
@@ -129,6 +129,8 @@
#define FXLS8962AF_DEVICE_ID 0x62
#define FXLS8964AF_DEVICE_ID 0x84
+#define FXLS8974CF_DEVICE_ID 0x86
+#define FXLS8967AF_DEVICE_ID 0x87
/* Raw temp channel offset */
#define FXLS8962AF_TEMP_CENTER_VAL 25
@@ -766,6 +768,18 @@ static const struct fxls8962af_chip_info fxls_chip_info_table[] = {
.channels = fxls8962af_channels,
.num_channels = ARRAY_SIZE(fxls8962af_channels),
},
+ [fxls8967af] = {
+ .chip_id = FXLS8967AF_DEVICE_ID,
+ .name = "fxls8967af",
+ .channels = fxls8962af_channels,
+ .num_channels = ARRAY_SIZE(fxls8962af_channels),
+ },
+ [fxls8974cf] = {
+ .chip_id = FXLS8974CF_DEVICE_ID,
+ .name = "fxls8974cf",
+ .channels = fxls8962af_channels,
+ .num_channels = ARRAY_SIZE(fxls8962af_channels),
+ },
};
static const struct iio_info fxls8962af_info = {
diff --git a/drivers/iio/accel/fxls8962af-i2c.c b/drivers/iio/accel/fxls8962af-i2c.c
index 2e1bb43ef2a1..1b9156b6b2e3 100644
--- a/drivers/iio/accel/fxls8962af-i2c.c
+++ b/drivers/iio/accel/fxls8962af-i2c.c
@@ -30,6 +30,8 @@ static int fxls8962af_probe(struct i2c_client *client)
static const struct i2c_device_id fxls8962af_id[] = {
{ "fxls8962af", fxls8962af },
{ "fxls8964af", fxls8964af },
+ { "fxls8967af", fxls8967af },
+ { "fxls8974cf", fxls8974cf },
{}
};
MODULE_DEVICE_TABLE(i2c, fxls8962af_id);
diff --git a/drivers/iio/accel/fxls8962af.h b/drivers/iio/accel/fxls8962af.h
index 6eaa2803b26f..1c9adfc8c0dc 100644
--- a/drivers/iio/accel/fxls8962af.h
+++ b/drivers/iio/accel/fxls8962af.h
@@ -11,6 +11,8 @@ struct device;
enum {
fxls8962af,
fxls8964af,
+ fxls8967af,
+ fxls8974cf,
};
int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq);
diff --git a/drivers/iio/accel/kionix-kx022a-i2c.c b/drivers/iio/accel/kionix-kx022a-i2c.c
index b39a43ecadff..42388636ca31 100644
--- a/drivers/iio/accel/kionix-kx022a-i2c.c
+++ b/drivers/iio/accel/kionix-kx022a-i2c.c
@@ -38,7 +38,9 @@ static int kx022a_i2c_probe(struct i2c_client *i2c)
static const struct i2c_device_id kx022a_i2c_id[] = {
{ .name = "kx022a", .driver_data = (kernel_ulong_t)&kx022a_chip_info },
{ .name = "kx132-1211", .driver_data = (kernel_ulong_t)&kx132_chip_info },
+ { .name = "kx134-1211", .driver_data = (kernel_ulong_t)&kx134_chip_info },
{ .name = "kx132acr-lbz", .driver_data = (kernel_ulong_t)&kx132acr_chip_info },
+ { .name = "kx134acr-lbz", .driver_data = (kernel_ulong_t)&kx134acr_chip_info },
{ }
};
MODULE_DEVICE_TABLE(i2c, kx022a_i2c_id);
@@ -46,7 +48,9 @@ MODULE_DEVICE_TABLE(i2c, kx022a_i2c_id);
static const struct of_device_id kx022a_of_match[] = {
{ .compatible = "kionix,kx022a", .data = &kx022a_chip_info },
{ .compatible = "kionix,kx132-1211", .data = &kx132_chip_info },
+ { .compatible = "kionix,kx134-1211", .data = &kx134_chip_info },
{ .compatible = "rohm,kx132acr-lbz", .data = &kx132acr_chip_info },
+ { .compatible = "rohm,kx134acr-lbz", .data = &kx134acr_chip_info },
{ }
};
MODULE_DEVICE_TABLE(of, kx022a_of_match);
diff --git a/drivers/iio/accel/kionix-kx022a-spi.c b/drivers/iio/accel/kionix-kx022a-spi.c
index c38a47806a00..e30d21083dc8 100644
--- a/drivers/iio/accel/kionix-kx022a-spi.c
+++ b/drivers/iio/accel/kionix-kx022a-spi.c
@@ -38,7 +38,9 @@ static int kx022a_spi_probe(struct spi_device *spi)
static const struct spi_device_id kx022a_id[] = {
{ .name = "kx022a", .driver_data = (kernel_ulong_t)&kx022a_chip_info },
{ .name = "kx132-1211", .driver_data = (kernel_ulong_t)&kx132_chip_info },
+ { .name = "kx134-1211", .driver_data = (kernel_ulong_t)&kx134_chip_info },
{ .name = "kx132acr-lbz", .driver_data = (kernel_ulong_t)&kx132acr_chip_info },
+ { .name = "kx134acr-lbz", .driver_data = (kernel_ulong_t)&kx134acr_chip_info },
{ }
};
MODULE_DEVICE_TABLE(spi, kx022a_id);
@@ -46,7 +48,9 @@ MODULE_DEVICE_TABLE(spi, kx022a_id);
static const struct of_device_id kx022a_of_match[] = {
{ .compatible = "kionix,kx022a", .data = &kx022a_chip_info },
{ .compatible = "kionix,kx132-1211", .data = &kx132_chip_info },
+ { .compatible = "kionix,kx134-1211", .data = &kx134_chip_info },
{ .compatible = "rohm,kx132acr-lbz", .data = &kx132acr_chip_info },
+ { .compatible = "rohm,kx134acr-lbz", .data = &kx134acr_chip_info },
{ }
};
MODULE_DEVICE_TABLE(of, kx022a_of_match);
diff --git a/drivers/iio/accel/kionix-kx022a.c b/drivers/iio/accel/kionix-kx022a.c
index 670bac21965b..5aeb3b951ac5 100644
--- a/drivers/iio/accel/kionix-kx022a.c
+++ b/drivers/iio/accel/kionix-kx022a.c
@@ -5,6 +5,7 @@
* ROHM/KIONIX accelerometer driver
*/
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/interrupt.h>
@@ -407,11 +408,21 @@ static const int kx022a_scale_table[][2] = {
{ 0, 4788403 },
};
+/* KX134ACR-LBZ ranges are (+/-) 8, 16, 32, 64 G */
+static const int kx134acr_lbz_scale_table[][2] = {
+ { 0, 2394202 },
+ { 0, 4788403 },
+ { 0, 9576807 },
+ { 0, 19153613 },
+};
+
static int kx022a_read_avail(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
const int **vals, int *type, int *length,
long mask)
{
+ struct kx022a_data *data = iio_priv(indio_dev);
+
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
*vals = (const int *)kx022a_accel_samp_freq_table;
@@ -420,9 +431,8 @@ static int kx022a_read_avail(struct iio_dev *indio_dev,
*type = IIO_VAL_INT_PLUS_MICRO;
return IIO_AVAIL_LIST;
case IIO_CHAN_INFO_SCALE:
- *vals = (const int *)kx022a_scale_table;
- *length = ARRAY_SIZE(kx022a_scale_table) *
- ARRAY_SIZE(kx022a_scale_table[0]);
+ *vals = (const int *)data->chip_info->scale_table;
+ *length = data->chip_info->scale_table_size;
*type = IIO_VAL_INT_PLUS_NANO;
return IIO_AVAIL_LIST;
default:
@@ -438,17 +448,17 @@ static void kx022a_reg2freq(unsigned int val, int *val1, int *val2)
*val2 = kx022a_accel_samp_freq_table[val & KX022A_MASK_ODR][1];
}
-static void kx022a_reg2scale(unsigned int val, unsigned int *val1,
- unsigned int *val2)
+static void kx022a_reg2scale(struct kx022a_data *data, unsigned int val,
+ unsigned int *val1, unsigned int *val2)
{
val &= KX022A_MASK_GSEL;
val >>= KX022A_GSEL_SHIFT;
- *val1 = kx022a_scale_table[val][0];
- *val2 = kx022a_scale_table[val][1];
+ *val1 = data->chip_info->scale_table[val][0];
+ *val2 = data->chip_info->scale_table[val][1];
}
-static int kx022a_turn_on_off_unlocked(struct kx022a_data *data, bool on)
+static int __kx022a_turn_on_off(struct kx022a_data *data, bool on)
{
int ret;
@@ -469,7 +479,7 @@ static int kx022a_turn_off_lock(struct kx022a_data *data)
int ret;
mutex_lock(&data->mutex);
- ret = kx022a_turn_on_off_unlocked(data, false);
+ ret = __kx022a_turn_on_off(data, false);
if (ret)
mutex_unlock(&data->mutex);
@@ -480,7 +490,7 @@ static int kx022a_turn_on_unlock(struct kx022a_data *data)
{
int ret;
- ret = kx022a_turn_on_off_unlocked(data, true);
+ ret = __kx022a_turn_on_off(data, true);
mutex_unlock(&data->mutex);
return ret;
@@ -543,11 +553,11 @@ static int kx022a_write_raw(struct iio_dev *idev,
kx022a_turn_on_unlock(data);
break;
case IIO_CHAN_INFO_SCALE:
- n = ARRAY_SIZE(kx022a_scale_table);
+ n = data->chip_info->scale_table_size / 2;
while (n-- > 0)
- if (val == kx022a_scale_table[n][0] &&
- val2 == kx022a_scale_table[n][1])
+ if (val == data->chip_info->scale_table[n][0] &&
+ val2 == data->chip_info->scale_table[n][1])
break;
if (n < 0) {
ret = -EINVAL;
@@ -642,7 +652,7 @@ static int kx022a_read_raw(struct iio_dev *idev,
if (ret < 0)
return ret;
- kx022a_reg2scale(regval, val, val2);
+ kx022a_reg2scale(data, regval, val, val2);
return IIO_VAL_INT_PLUS_NANO;
}
@@ -912,18 +922,19 @@ static int kx022a_fifo_disable(struct kx022a_data *data)
{
int ret = 0;
- ret = kx022a_turn_off_lock(data);
+ guard(mutex)(&data->mutex);
+ ret = __kx022a_turn_on_off(data, false);
if (ret)
return ret;
ret = regmap_clear_bits(data->regmap, data->ien_reg, KX022A_MASK_WMI);
if (ret)
- goto unlock_out;
+ return ret;
ret = regmap_clear_bits(data->regmap, data->chip_info->buf_cntl2,
KX022A_MASK_BUF_EN);
if (ret)
- goto unlock_out;
+ return ret;
data->state &= ~KX022A_STATE_FIFO;
@@ -931,12 +942,7 @@ static int kx022a_fifo_disable(struct kx022a_data *data)
kfree(data->fifo_buffer);
- return kx022a_turn_on_unlock(data);
-
-unlock_out:
- mutex_unlock(&data->mutex);
-
- return ret;
+ return __kx022a_turn_on_off(data, true);
}
static int kx022a_buffer_predisable(struct iio_dev *idev)
@@ -959,33 +965,29 @@ static int kx022a_fifo_enable(struct kx022a_data *data)
if (!data->fifo_buffer)
return -ENOMEM;
- ret = kx022a_turn_off_lock(data);
+ guard(mutex)(&data->mutex);
+ ret = __kx022a_turn_on_off(data, false);
if (ret)
return ret;
/* Update watermark to HW */
ret = kx022a_fifo_set_wmi(data);
if (ret)
- goto unlock_out;
+ return ret;
/* Enable buffer */
ret = regmap_set_bits(data->regmap, data->chip_info->buf_cntl2,
KX022A_MASK_BUF_EN);
if (ret)
- goto unlock_out;
+ return ret;
data->state |= KX022A_STATE_FIFO;
ret = regmap_set_bits(data->regmap, data->ien_reg,
KX022A_MASK_WMI);
if (ret)
- goto unlock_out;
-
- return kx022a_turn_on_unlock(data);
-
-unlock_out:
- mutex_unlock(&data->mutex);
+ return ret;
- return ret;
+ return __kx022a_turn_on_off(data, true);
}
static int kx022a_buffer_postenable(struct iio_dev *idev)
@@ -1053,7 +1055,7 @@ static irqreturn_t kx022a_irq_thread_handler(int irq, void *private)
struct kx022a_data *data = iio_priv(idev);
irqreturn_t ret = IRQ_NONE;
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
if (data->trigger_enabled) {
iio_trigger_poll_nested(data->trig);
@@ -1068,8 +1070,6 @@ static irqreturn_t kx022a_irq_thread_handler(int irq, void *private)
ret = IRQ_HANDLED;
}
- mutex_unlock(&data->mutex);
-
return ret;
}
@@ -1079,32 +1079,26 @@ static int kx022a_trigger_set_state(struct iio_trigger *trig,
struct kx022a_data *data = iio_trigger_get_drvdata(trig);
int ret = 0;
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
if (data->trigger_enabled == state)
- goto unlock_out;
+ return 0;
if (data->state & KX022A_STATE_FIFO) {
dev_warn(data->dev, "Can't set trigger when FIFO enabled\n");
- ret = -EBUSY;
- goto unlock_out;
+ return -EBUSY;
}
- ret = kx022a_turn_on_off_unlocked(data, false);
+ ret = __kx022a_turn_on_off(data, false);
if (ret)
- goto unlock_out;
+ return ret;
data->trigger_enabled = state;
ret = kx022a_set_drdy_irq(data, state);
if (ret)
- goto unlock_out;
-
- ret = kx022a_turn_on_off_unlocked(data, true);
-
-unlock_out:
- mutex_unlock(&data->mutex);
+ return ret;
- return ret;
+ return __kx022a_turn_on_off(data, true);
}
static const struct iio_trigger_ops kx022a_trigger_ops = {
@@ -1121,10 +1115,15 @@ static int kx022a_chip_init(struct kx022a_data *data)
return ret;
/*
- * I've seen I2C read failures if we poll too fast after the sensor
- * reset. Slight delay gives I2C block the time to recover.
+ * According to the power-on procedure documents, there is (at least)
+ * 2ms delay required after the software reset. This should be same for
+ * all, KX022ACR-Z, KX132-1211, KX132ACR-LBZ and KX134ACR-LBZ.
+ *
+ * https://fscdn.rohm.com/kionix/en/document/AN010_KX022ACR-Z_Power-on_Procedure_E.pdf
+ * https://fscdn.rohm.com/kionix/en/document/TN027-Power-On-Procedure.pdf
+ * https://fscdn.rohm.com/kionix/en/document/AN011_KX134ACR-LBZ_Power-on_Procedure_E.pdf
*/
- msleep(1);
+ msleep(2);
ret = regmap_read_poll_timeout(data->regmap, data->chip_info->cntl2, val,
!(val & KX022A_MASK_SRST),
@@ -1158,6 +1157,9 @@ const struct kx022a_chip_info kx022a_chip_info = {
.regmap_config = &kx022a_regmap_config,
.channels = kx022a_channels,
.num_channels = ARRAY_SIZE(kx022a_channels),
+ .scale_table = kx022a_scale_table,
+ .scale_table_size = ARRAY_SIZE(kx022a_scale_table) *
+ ARRAY_SIZE(kx022a_scale_table[0]),
.fifo_length = KX022A_FIFO_LENGTH,
.who = KX022A_REG_WHO,
.id = KX022A_ID,
@@ -1183,6 +1185,9 @@ const struct kx022a_chip_info kx132_chip_info = {
.regmap_config = &kx132_regmap_config,
.channels = kx132_channels,
.num_channels = ARRAY_SIZE(kx132_channels),
+ .scale_table = kx022a_scale_table,
+ .scale_table_size = ARRAY_SIZE(kx022a_scale_table) *
+ ARRAY_SIZE(kx022a_scale_table[0]),
.fifo_length = KX132_FIFO_LENGTH,
.who = KX132_REG_WHO,
.id = KX132_ID,
@@ -1204,6 +1209,35 @@ const struct kx022a_chip_info kx132_chip_info = {
};
EXPORT_SYMBOL_NS_GPL(kx132_chip_info, "IIO_KX022A");
+const struct kx022a_chip_info kx134_chip_info = {
+ .name = "kx134-1211",
+ .regmap_config = &kx132_regmap_config,
+ .channels = kx132_channels,
+ .num_channels = ARRAY_SIZE(kx132_channels),
+ .scale_table = kx134acr_lbz_scale_table,
+ .scale_table_size = ARRAY_SIZE(kx134acr_lbz_scale_table) *
+ ARRAY_SIZE(kx134acr_lbz_scale_table[0]),
+ .fifo_length = KX132_FIFO_LENGTH,
+ .who = KX132_REG_WHO,
+ .id = KX134_1211_ID,
+ .cntl = KX132_REG_CNTL,
+ .cntl2 = KX132_REG_CNTL2,
+ .odcntl = KX132_REG_ODCNTL,
+ .buf_cntl1 = KX132_REG_BUF_CNTL1,
+ .buf_cntl2 = KX132_REG_BUF_CNTL2,
+ .buf_clear = KX132_REG_BUF_CLEAR,
+ .buf_status1 = KX132_REG_BUF_STATUS_1,
+ .buf_smp_lvl_mask = KX132_MASK_BUF_SMP_LVL,
+ .buf_read = KX132_REG_BUF_READ,
+ .inc1 = KX132_REG_INC1,
+ .inc4 = KX132_REG_INC4,
+ .inc5 = KX132_REG_INC5,
+ .inc6 = KX132_REG_INC6,
+ .xout_l = KX132_REG_XOUT_L,
+ .get_fifo_bytes_available = kx132_get_fifo_bytes_available,
+};
+EXPORT_SYMBOL_NS_GPL(kx134_chip_info, "IIO_KX022A");
+
/*
* Despite the naming, KX132ACR-LBZ is not similar to KX132-1211 but it is
* exact subset of KX022A. KX132ACR-LBZ is meant to be used for industrial
@@ -1216,6 +1250,9 @@ const struct kx022a_chip_info kx132acr_chip_info = {
.regmap_config = &kx022a_regmap_config,
.channels = kx022a_channels,
.num_channels = ARRAY_SIZE(kx022a_channels),
+ .scale_table = kx022a_scale_table,
+ .scale_table_size = ARRAY_SIZE(kx022a_scale_table) *
+ ARRAY_SIZE(kx022a_scale_table[0]),
.fifo_length = KX022A_FIFO_LENGTH,
.who = KX022A_REG_WHO,
.id = KX132ACR_LBZ_ID,
@@ -1236,6 +1273,34 @@ const struct kx022a_chip_info kx132acr_chip_info = {
};
EXPORT_SYMBOL_NS_GPL(kx132acr_chip_info, "IIO_KX022A");
+const struct kx022a_chip_info kx134acr_chip_info = {
+ .name = "kx134acr-lbz",
+ .regmap_config = &kx022a_regmap_config,
+ .channels = kx022a_channels,
+ .num_channels = ARRAY_SIZE(kx022a_channels),
+ .scale_table = kx134acr_lbz_scale_table,
+ .scale_table_size = ARRAY_SIZE(kx134acr_lbz_scale_table) *
+ ARRAY_SIZE(kx134acr_lbz_scale_table[0]),
+ .fifo_length = KX022A_FIFO_LENGTH,
+ .who = KX022A_REG_WHO,
+ .id = KX134ACR_LBZ_ID,
+ .cntl = KX022A_REG_CNTL,
+ .cntl2 = KX022A_REG_CNTL2,
+ .odcntl = KX022A_REG_ODCNTL,
+ .buf_cntl1 = KX022A_REG_BUF_CNTL1,
+ .buf_cntl2 = KX022A_REG_BUF_CNTL2,
+ .buf_clear = KX022A_REG_BUF_CLEAR,
+ .buf_status1 = KX022A_REG_BUF_STATUS_1,
+ .buf_read = KX022A_REG_BUF_READ,
+ .inc1 = KX022A_REG_INC1,
+ .inc4 = KX022A_REG_INC4,
+ .inc5 = KX022A_REG_INC5,
+ .inc6 = KX022A_REG_INC6,
+ .xout_l = KX022A_REG_XOUT_L,
+ .get_fifo_bytes_available = kx022a_get_fifo_bytes_available,
+};
+EXPORT_SYMBOL_NS_GPL(kx134acr_chip_info, "IIO_KX022A");
+
int kx022a_probe_internal(struct device *dev, const struct kx022a_chip_info *chip_info)
{
static const char * const regulator_names[] = {"io-vdd", "vdd"};
diff --git a/drivers/iio/accel/kionix-kx022a.h b/drivers/iio/accel/kionix-kx022a.h
index 7060438ad88c..0ed54f584223 100644
--- a/drivers/iio/accel/kionix-kx022a.h
+++ b/drivers/iio/accel/kionix-kx022a.h
@@ -14,6 +14,7 @@
#define KX022A_REG_WHO 0x0f
#define KX022A_ID 0xc8
#define KX132ACR_LBZ_ID 0xd8
+#define KX134ACR_LBZ_ID 0xcc
#define KX022A_REG_CNTL2 0x19
#define KX022A_MASK_SRST BIT(7)
@@ -77,6 +78,7 @@
#define KX132_REG_WHO 0x13
#define KX132_ID 0x3d
+#define KX134_1211_ID 0x46
#define KX132_FIFO_LENGTH 86
@@ -135,6 +137,14 @@ struct kx022a_data;
*
* @name: name of the device
* @regmap_config: pointer to register map configuration
+ * @scale_table: An array of tables of scaling factors for
+ * a supported acceleration measurement range.
+ * Each table containing a single scaling
+ * factor consisting of two integers. The first
+ * value in a table is the integer part, and
+ * the second value is the fractional part as
+ * parts per billion.
+ * @scale_table_size: Amount of values in tables.
* @channels: pointer to iio_chan_spec array
* @num_channels: number of iio_chan_spec channels
* @fifo_length: number of 16-bit samples in a full buffer
@@ -161,6 +171,8 @@ struct kx022a_data;
struct kx022a_chip_info {
const char *name;
const struct regmap_config *regmap_config;
+ const int (*scale_table)[2];
+ const int scale_table_size;
const struct iio_chan_spec *channels;
unsigned int num_channels;
unsigned int fifo_length;
@@ -187,6 +199,8 @@ int kx022a_probe_internal(struct device *dev, const struct kx022a_chip_info *chi
extern const struct kx022a_chip_info kx022a_chip_info;
extern const struct kx022a_chip_info kx132_chip_info;
+extern const struct kx022a_chip_info kx134_chip_info;
extern const struct kx022a_chip_info kx132acr_chip_info;
+extern const struct kx022a_chip_info kx134acr_chip_info;
#endif
diff --git a/drivers/iio/adc/ad4000.c b/drivers/iio/adc/ad4000.c
index b3b82535f5c1..1d556a842a68 100644
--- a/drivers/iio/adc/ad4000.c
+++ b/drivers/iio/adc/ad4000.c
@@ -35,10 +35,6 @@
#define AD4000_SCALE_OPTIONS 2
-#define AD4000_TQUIET1_NS 190
-#define AD4000_TQUIET2_NS 60
-#define AD4000_TCONV_NS 320
-
#define __AD4000_DIFF_CHANNEL(_sign, _real_bits, _storage_bits, _reg_access) \
{ \
.type = IIO_VOLTAGE, \
@@ -49,6 +45,7 @@
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_SCALE), \
.info_mask_separate_available = _reg_access ? BIT(IIO_CHAN_INFO_SCALE) : 0,\
+ .scan_index = 0, \
.scan_type = { \
.sign = _sign, \
.realbits = _real_bits, \
@@ -62,6 +59,12 @@
__AD4000_DIFF_CHANNEL((_sign), (_real_bits), \
((_real_bits) > 16 ? 32 : 16), (_reg_access))
+#define AD4000_DIFF_CHANNELS(_sign, _real_bits, _reg_access) \
+{ \
+ AD4000_DIFF_CHANNEL(_sign, _real_bits, _reg_access), \
+ IIO_CHAN_SOFT_TIMESTAMP(1), \
+}
+
#define __AD4000_PSEUDO_DIFF_CHANNEL(_sign, _real_bits, _storage_bits, _reg_access)\
{ \
.type = IIO_VOLTAGE, \
@@ -71,6 +74,7 @@
BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_OFFSET), \
.info_mask_separate_available = _reg_access ? BIT(IIO_CHAN_INFO_SCALE) : 0,\
+ .scan_index = 0, \
.scan_type = { \
.sign = _sign, \
.realbits = _real_bits, \
@@ -84,6 +88,12 @@
__AD4000_PSEUDO_DIFF_CHANNEL((_sign), (_real_bits), \
((_real_bits) > 16 ? 32 : 16), (_reg_access))
+#define AD4000_PSEUDO_DIFF_CHANNELS(_sign, _real_bits, _reg_access) \
+{ \
+ AD4000_PSEUDO_DIFF_CHANNEL(_sign, _real_bits, _reg_access), \
+ IIO_CHAN_SOFT_TIMESTAMP(1), \
+}
+
static const char * const ad4000_power_supplies[] = {
"vdd", "vio"
};
@@ -108,111 +118,280 @@ static const int ad4000_gains[] = {
454, 909, 1000, 1900,
};
+struct ad4000_time_spec {
+ int t_conv_ns;
+ int t_quiet2_ns;
+};
+
+/*
+ * Same timing specifications for all of AD4000, AD4001, ..., AD4008, AD4010,
+ * ADAQ4001, and ADAQ4003.
+ */
+static const struct ad4000_time_spec ad4000_t_spec = {
+ .t_conv_ns = 320,
+ .t_quiet2_ns = 60,
+};
+
+/* AD4020, AD4021, AD4022 */
+static const struct ad4000_time_spec ad4020_t_spec = {
+ .t_conv_ns = 350,
+ .t_quiet2_ns = 60,
+};
+
+/* AD7983, AD7984 */
+static const struct ad4000_time_spec ad7983_t_spec = {
+ .t_conv_ns = 500,
+ .t_quiet2_ns = 0,
+};
+
+/* AD7980, AD7982 */
+static const struct ad4000_time_spec ad7980_t_spec = {
+ .t_conv_ns = 800,
+ .t_quiet2_ns = 0,
+};
+
+/* AD7946, AD7686, AD7688, AD7988-5, AD7693 */
+static const struct ad4000_time_spec ad7686_t_spec = {
+ .t_conv_ns = 1600,
+ .t_quiet2_ns = 0,
+};
+
+/* AD7690 */
+static const struct ad4000_time_spec ad7690_t_spec = {
+ .t_conv_ns = 2100,
+ .t_quiet2_ns = 0,
+};
+
+/* AD7942, AD7685, AD7687 */
+static const struct ad4000_time_spec ad7687_t_spec = {
+ .t_conv_ns = 3200,
+ .t_quiet2_ns = 0,
+};
+
+/* AD7691 */
+static const struct ad4000_time_spec ad7691_t_spec = {
+ .t_conv_ns = 3700,
+ .t_quiet2_ns = 0,
+};
+
+/* AD7988-1 */
+static const struct ad4000_time_spec ad7988_1_t_spec = {
+ .t_conv_ns = 9500,
+ .t_quiet2_ns = 0,
+};
+
struct ad4000_chip_info {
const char *dev_name;
- struct iio_chan_spec chan_spec;
- struct iio_chan_spec reg_access_chan_spec;
+ struct iio_chan_spec chan_spec[2];
+ struct iio_chan_spec reg_access_chan_spec[2];
+ const struct ad4000_time_spec *time_spec;
bool has_hardware_gain;
};
static const struct ad4000_chip_info ad4000_chip_info = {
.dev_name = "ad4000",
- .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 0),
- .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 1),
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
+ .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 1),
+ .time_spec = &ad4000_t_spec,
};
static const struct ad4000_chip_info ad4001_chip_info = {
.dev_name = "ad4001",
- .chan_spec = AD4000_DIFF_CHANNEL('s', 16, 0),
- .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 16, 1),
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 16, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 16, 1),
+ .time_spec = &ad4000_t_spec,
};
static const struct ad4000_chip_info ad4002_chip_info = {
.dev_name = "ad4002",
- .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 0),
- .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 1),
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 18, 0),
+ .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 18, 1),
+ .time_spec = &ad4000_t_spec,
};
static const struct ad4000_chip_info ad4003_chip_info = {
.dev_name = "ad4003",
- .chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0),
- .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 1),
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 18, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 18, 1),
+ .time_spec = &ad4000_t_spec,
};
static const struct ad4000_chip_info ad4004_chip_info = {
.dev_name = "ad4004",
- .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 0),
- .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 1),
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
+ .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 1),
+ .time_spec = &ad4000_t_spec,
};
static const struct ad4000_chip_info ad4005_chip_info = {
.dev_name = "ad4005",
- .chan_spec = AD4000_DIFF_CHANNEL('s', 16, 0),
- .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 16, 1),
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 16, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 16, 1),
+ .time_spec = &ad4000_t_spec,
};
static const struct ad4000_chip_info ad4006_chip_info = {
.dev_name = "ad4006",
- .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 0),
- .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 1),
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 18, 0),
+ .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 18, 1),
+ .time_spec = &ad4000_t_spec,
};
static const struct ad4000_chip_info ad4007_chip_info = {
.dev_name = "ad4007",
- .chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0),
- .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 1),
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 18, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 18, 1),
+ .time_spec = &ad4000_t_spec,
};
static const struct ad4000_chip_info ad4008_chip_info = {
.dev_name = "ad4008",
- .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 0),
- .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 1),
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
+ .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 1),
+ .time_spec = &ad4000_t_spec,
};
static const struct ad4000_chip_info ad4010_chip_info = {
.dev_name = "ad4010",
- .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 0),
- .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 1),
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 18, 0),
+ .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 18, 1),
+ .time_spec = &ad4000_t_spec,
};
static const struct ad4000_chip_info ad4011_chip_info = {
.dev_name = "ad4011",
- .chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0),
- .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 1),
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 18, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 18, 1),
+ .time_spec = &ad4000_t_spec,
};
static const struct ad4000_chip_info ad4020_chip_info = {
.dev_name = "ad4020",
- .chan_spec = AD4000_DIFF_CHANNEL('s', 20, 0),
- .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 20, 1),
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 20, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 20, 1),
+ .time_spec = &ad4020_t_spec,
};
static const struct ad4000_chip_info ad4021_chip_info = {
.dev_name = "ad4021",
- .chan_spec = AD4000_DIFF_CHANNEL('s', 20, 0),
- .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 20, 1),
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 20, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 20, 1),
+ .time_spec = &ad4020_t_spec,
};
static const struct ad4000_chip_info ad4022_chip_info = {
.dev_name = "ad4022",
- .chan_spec = AD4000_DIFF_CHANNEL('s', 20, 0),
- .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 20, 1),
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 20, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 20, 1),
+ .time_spec = &ad4020_t_spec,
};
static const struct ad4000_chip_info adaq4001_chip_info = {
.dev_name = "adaq4001",
- .chan_spec = AD4000_DIFF_CHANNEL('s', 16, 0),
- .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 16, 1),
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 16, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 16, 1),
+ .time_spec = &ad4000_t_spec,
.has_hardware_gain = true,
};
static const struct ad4000_chip_info adaq4003_chip_info = {
.dev_name = "adaq4003",
- .chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0),
- .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 1),
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 18, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 18, 1),
+ .time_spec = &ad4000_t_spec,
.has_hardware_gain = true,
};
+static const struct ad4000_chip_info ad7685_chip_info = {
+ .dev_name = "ad7685",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
+ .time_spec = &ad7687_t_spec,
+};
+
+static const struct ad4000_chip_info ad7686_chip_info = {
+ .dev_name = "ad7686",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
+ .time_spec = &ad7686_t_spec,
+};
+
+static const struct ad4000_chip_info ad7687_chip_info = {
+ .dev_name = "ad7687",
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 16, 0),
+ .time_spec = &ad7687_t_spec,
+};
+
+static const struct ad4000_chip_info ad7688_chip_info = {
+ .dev_name = "ad7688",
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 16, 0),
+ .time_spec = &ad7686_t_spec,
+};
+
+static const struct ad4000_chip_info ad7690_chip_info = {
+ .dev_name = "ad7690",
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 18, 0),
+ .time_spec = &ad7690_t_spec,
+};
+
+static const struct ad4000_chip_info ad7691_chip_info = {
+ .dev_name = "ad7691",
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 18, 0),
+ .time_spec = &ad7691_t_spec,
+};
+
+static const struct ad4000_chip_info ad7693_chip_info = {
+ .dev_name = "ad7693",
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 16, 0),
+ .time_spec = &ad7686_t_spec,
+};
+
+static const struct ad4000_chip_info ad7942_chip_info = {
+ .dev_name = "ad7942",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 14, 0),
+ .time_spec = &ad7687_t_spec,
+};
+
+static const struct ad4000_chip_info ad7946_chip_info = {
+ .dev_name = "ad7946",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 14, 0),
+ .time_spec = &ad7686_t_spec,
+};
+
+static const struct ad4000_chip_info ad7980_chip_info = {
+ .dev_name = "ad7980",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
+ .time_spec = &ad7980_t_spec,
+};
+
+static const struct ad4000_chip_info ad7982_chip_info = {
+ .dev_name = "ad7982",
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 18, 0),
+ .time_spec = &ad7980_t_spec,
+};
+
+static const struct ad4000_chip_info ad7983_chip_info = {
+ .dev_name = "ad7983",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
+ .time_spec = &ad7983_t_spec,
+};
+
+static const struct ad4000_chip_info ad7984_chip_info = {
+ .dev_name = "ad7984",
+ .chan_spec = AD4000_DIFF_CHANNELS('s', 18, 0),
+ .time_spec = &ad7983_t_spec,
+};
+
+static const struct ad4000_chip_info ad7988_1_chip_info = {
+ .dev_name = "ad7988-1",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
+ .time_spec = &ad7988_1_t_spec,
+};
+
+static const struct ad4000_chip_info ad7988_5_chip_info = {
+ .dev_name = "ad7988-5",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
+ .time_spec = &ad7686_t_spec,
+};
+
struct ad4000_state {
struct spi_device *spi;
struct gpio_desc *cnv_gpio;
@@ -224,6 +403,7 @@ struct ad4000_state {
bool span_comp;
u16 gain_milli;
int scale_tbl[AD4000_SCALE_OPTIONS][2];
+ const struct ad4000_time_spec *time_spec;
/*
* DMA (thus cache coherency maintenance) requires the transfer buffers
@@ -234,7 +414,7 @@ struct ad4000_state {
__be16 sample_buf16;
__be32 sample_buf32;
} data;
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan __aligned(IIO_DMA_MINALIGN);
u8 tx_buf[2];
u8 rx_buf[2];
@@ -488,16 +668,15 @@ static const struct iio_info ad4000_info = {
static int ad4000_prepare_3wire_mode_message(struct ad4000_state *st,
const struct iio_chan_spec *chan)
{
- unsigned int cnv_pulse_time = AD4000_TCONV_NS;
struct spi_transfer *xfers = st->xfers;
xfers[0].cs_change = 1;
- xfers[0].cs_change_delay.value = cnv_pulse_time;
+ xfers[0].cs_change_delay.value = st->time_spec->t_conv_ns;
xfers[0].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
xfers[1].rx_buf = &st->scan.data;
xfers[1].len = BITS_TO_BYTES(chan->scan_type.storagebits);
- xfers[1].delay.value = AD4000_TQUIET2_NS;
+ xfers[1].delay.value = st->time_spec->t_quiet2_ns;
xfers[1].delay.unit = SPI_DELAY_UNIT_NSECS;
spi_message_init_with_transfers(&st->msg, st->xfers, 2);
@@ -515,7 +694,6 @@ static int ad4000_prepare_3wire_mode_message(struct ad4000_state *st,
static int ad4000_prepare_4wire_mode_message(struct ad4000_state *st,
const struct iio_chan_spec *chan)
{
- unsigned int cnv_to_sdi_time = AD4000_TCONV_NS;
struct spi_transfer *xfers = st->xfers;
/*
@@ -523,7 +701,7 @@ static int ad4000_prepare_4wire_mode_message(struct ad4000_state *st,
* going low.
*/
xfers[0].cs_off = 1;
- xfers[0].delay.value = cnv_to_sdi_time;
+ xfers[0].delay.value = st->time_spec->t_conv_ns;
xfers[0].delay.unit = SPI_DELAY_UNIT_NSECS;
xfers[1].rx_buf = &st->scan.data;
@@ -562,6 +740,7 @@ static int ad4000_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
st->spi = spi;
+ st->time_spec = chip->time_spec;
ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(ad4000_power_supplies),
ad4000_power_supplies);
@@ -591,7 +770,7 @@ static int ad4000_probe(struct spi_device *spi)
switch (st->sdi_pin) {
case AD4000_SDI_MOSI:
indio_dev->info = &ad4000_reg_access_info;
- indio_dev->channels = &chip->reg_access_chan_spec;
+ indio_dev->channels = chip->reg_access_chan_spec;
/*
* In "3-wire mode", the ADC SDI line must be kept high when
@@ -603,7 +782,7 @@ static int ad4000_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- ret = ad4000_prepare_3wire_mode_message(st, indio_dev->channels);
+ ret = ad4000_prepare_3wire_mode_message(st, &indio_dev->channels[0]);
if (ret)
return ret;
@@ -614,16 +793,16 @@ static int ad4000_probe(struct spi_device *spi)
break;
case AD4000_SDI_VIO:
indio_dev->info = &ad4000_info;
- indio_dev->channels = &chip->chan_spec;
- ret = ad4000_prepare_3wire_mode_message(st, indio_dev->channels);
+ indio_dev->channels = chip->chan_spec;
+ ret = ad4000_prepare_3wire_mode_message(st, &indio_dev->channels[0]);
if (ret)
return ret;
break;
case AD4000_SDI_CS:
indio_dev->info = &ad4000_info;
- indio_dev->channels = &chip->chan_spec;
- ret = ad4000_prepare_4wire_mode_message(st, indio_dev->channels);
+ indio_dev->channels = chip->chan_spec;
+ ret = ad4000_prepare_4wire_mode_message(st, &indio_dev->channels[0]);
if (ret)
return ret;
@@ -637,7 +816,7 @@ static int ad4000_probe(struct spi_device *spi)
}
indio_dev->name = chip->dev_name;
- indio_dev->num_channels = 1;
+ indio_dev->num_channels = 2;
ret = devm_mutex_init(dev, &st->lock);
if (ret)
@@ -658,7 +837,7 @@ static int ad4000_probe(struct spi_device *spi)
}
}
- ad4000_fill_scale_tbl(st, indio_dev->channels);
+ ad4000_fill_scale_tbl(st, &indio_dev->channels[0]);
ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
&iio_pollfunc_store_time,
@@ -686,6 +865,21 @@ static const struct spi_device_id ad4000_id[] = {
{ "ad4022", (kernel_ulong_t)&ad4022_chip_info },
{ "adaq4001", (kernel_ulong_t)&adaq4001_chip_info },
{ "adaq4003", (kernel_ulong_t)&adaq4003_chip_info },
+ { "ad7685", (kernel_ulong_t)&ad7685_chip_info },
+ { "ad7686", (kernel_ulong_t)&ad7686_chip_info },
+ { "ad7687", (kernel_ulong_t)&ad7687_chip_info },
+ { "ad7688", (kernel_ulong_t)&ad7688_chip_info },
+ { "ad7690", (kernel_ulong_t)&ad7690_chip_info },
+ { "ad7691", (kernel_ulong_t)&ad7691_chip_info },
+ { "ad7693", (kernel_ulong_t)&ad7693_chip_info },
+ { "ad7942", (kernel_ulong_t)&ad7942_chip_info },
+ { "ad7946", (kernel_ulong_t)&ad7946_chip_info },
+ { "ad7980", (kernel_ulong_t)&ad7980_chip_info },
+ { "ad7982", (kernel_ulong_t)&ad7982_chip_info },
+ { "ad7983", (kernel_ulong_t)&ad7983_chip_info },
+ { "ad7984", (kernel_ulong_t)&ad7984_chip_info },
+ { "ad7988-1", (kernel_ulong_t)&ad7988_1_chip_info },
+ { "ad7988-5", (kernel_ulong_t)&ad7988_5_chip_info },
{ }
};
MODULE_DEVICE_TABLE(spi, ad4000_id);
@@ -707,6 +901,21 @@ static const struct of_device_id ad4000_of_match[] = {
{ .compatible = "adi,ad4022", .data = &ad4022_chip_info },
{ .compatible = "adi,adaq4001", .data = &adaq4001_chip_info },
{ .compatible = "adi,adaq4003", .data = &adaq4003_chip_info },
+ { .compatible = "adi,ad7685", .data = &ad7685_chip_info },
+ { .compatible = "adi,ad7686", .data = &ad7686_chip_info },
+ { .compatible = "adi,ad7687", .data = &ad7687_chip_info },
+ { .compatible = "adi,ad7688", .data = &ad7688_chip_info },
+ { .compatible = "adi,ad7690", .data = &ad7690_chip_info },
+ { .compatible = "adi,ad7691", .data = &ad7691_chip_info },
+ { .compatible = "adi,ad7693", .data = &ad7693_chip_info },
+ { .compatible = "adi,ad7942", .data = &ad7942_chip_info },
+ { .compatible = "adi,ad7946", .data = &ad7946_chip_info },
+ { .compatible = "adi,ad7980", .data = &ad7980_chip_info },
+ { .compatible = "adi,ad7982", .data = &ad7982_chip_info },
+ { .compatible = "adi,ad7983", .data = &ad7983_chip_info },
+ { .compatible = "adi,ad7984", .data = &ad7984_chip_info },
+ { .compatible = "adi,ad7988-1", .data = &ad7988_1_chip_info },
+ { .compatible = "adi,ad7988-5", .data = &ad7988_5_chip_info },
{ }
};
MODULE_DEVICE_TABLE(of, ad4000_of_match);
diff --git a/drivers/iio/adc/ad4695.c b/drivers/iio/adc/ad4695.c
index 0146aed9069f..b79d135a5471 100644
--- a/drivers/iio/adc/ad4695.c
+++ b/drivers/iio/adc/ad4695.c
@@ -30,7 +30,7 @@
#include <linux/spi/spi.h>
#include <linux/units.h>
-#include <dt-bindings/iio/adi,ad4695.h>
+#include <dt-bindings/iio/adc/adi,ad4695.h>
/* AD4695 registers */
#define AD4695_REG_SPI_CONFIG_A 0x0000
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index 3d678c420cbf..6ae27cdd3250 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -95,6 +95,10 @@
#define AD7124_MAX_CONFIGS 8
#define AD7124_MAX_CHANNELS 16
+/* AD7124 input sources */
+#define AD7124_INPUT_TEMPSENSOR 16
+#define AD7124_INPUT_AVSS 17
+
enum ad7124_ids {
ID_AD7124_4,
ID_AD7124_8,
@@ -360,20 +364,21 @@ static int ad7124_find_free_config_slot(struct ad7124_state *st)
return free_cfg_slot;
}
+/* Only called during probe, so dev_err_probe() can be used */
static int ad7124_init_config_vref(struct ad7124_state *st, struct ad7124_channel_config *cfg)
{
+ struct device *dev = &st->sd.spi->dev;
unsigned int refsel = cfg->refsel;
switch (refsel) {
case AD7124_REFIN1:
case AD7124_REFIN2:
case AD7124_AVDD_REF:
- if (IS_ERR(st->vref[refsel])) {
- dev_err(&st->sd.spi->dev,
- "Error, trying to use external voltage reference without a %s regulator.\n",
- ad7124_ref_names[refsel]);
- return PTR_ERR(st->vref[refsel]);
- }
+ if (IS_ERR(st->vref[refsel]))
+ return dev_err_probe(dev, PTR_ERR(st->vref[refsel]),
+ "Error, trying to use external voltage reference without a %s regulator.\n",
+ ad7124_ref_names[refsel]);
+
cfg->vref_mv = regulator_get_voltage(st->vref[refsel]);
/* Conversion from uV to mV */
cfg->vref_mv /= 1000;
@@ -384,8 +389,7 @@ static int ad7124_init_config_vref(struct ad7124_state *st, struct ad7124_channe
st->adc_control |= AD7124_ADC_CTRL_REF_EN(1);
return 0;
default:
- dev_err(&st->sd.spi->dev, "Invalid reference %d\n", refsel);
- return -EINVAL;
+ return dev_err_probe(dev, -EINVAL, "Invalid reference %d\n", refsel);
}
}
@@ -571,6 +575,7 @@ static const struct ad_sigma_delta_info ad7124_sigma_delta_info = {
.data_reg = AD7124_DATA,
.num_slots = 8,
.irq_flags = IRQF_TRIGGER_FALLING,
+ .num_resetclks = 64,
};
static int ad7124_read_raw(struct iio_dev *indio_dev,
@@ -588,26 +593,59 @@ static int ad7124_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- mutex_lock(&st->cfgs_lock);
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ mutex_lock(&st->cfgs_lock);
- idx = st->channels[chan->address].cfg.pga_bits;
- *val = st->channels[chan->address].cfg.vref_mv;
- if (st->channels[chan->address].cfg.bipolar)
- *val2 = chan->scan_type.realbits - 1 + idx;
- else
- *val2 = chan->scan_type.realbits + idx;
+ idx = st->channels[chan->address].cfg.pga_bits;
+ *val = st->channels[chan->address].cfg.vref_mv;
+ if (st->channels[chan->address].cfg.bipolar)
+ *val2 = chan->scan_type.realbits - 1 + idx;
+ else
+ *val2 = chan->scan_type.realbits + idx;
+
+ mutex_unlock(&st->cfgs_lock);
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ case IIO_TEMP:
+ /*
+ * According to the data sheet
+ * Temperature (°C)
+ * = ((Conversion − 0x800000)/13584) − 272.5
+ * = (Conversion − 0x800000 - 13584 * 272.5) / 13584
+ * = (Conversion − 12090248) / 13584
+ * So scale with 1000/13584 to yield °mC. Reduce by 8 to
+ * 125/1698.
+ */
+ *val = 125;
+ *val2 = 1698;
+ return IIO_VAL_FRACTIONAL;
+
+ default:
+ return -EINVAL;
+ }
- mutex_unlock(&st->cfgs_lock);
- return IIO_VAL_FRACTIONAL_LOG2;
case IIO_CHAN_INFO_OFFSET:
- mutex_lock(&st->cfgs_lock);
- if (st->channels[chan->address].cfg.bipolar)
- *val = -(1 << (chan->scan_type.realbits - 1));
- else
- *val = 0;
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ mutex_lock(&st->cfgs_lock);
+ if (st->channels[chan->address].cfg.bipolar)
+ *val = -(1 << (chan->scan_type.realbits - 1));
+ else
+ *val = 0;
+
+ mutex_unlock(&st->cfgs_lock);
+ return IIO_VAL_INT;
+
+ case IIO_TEMP:
+ /* see calculation above */
+ *val = -12090248;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
- mutex_unlock(&st->cfgs_lock);
- return IIO_VAL_INT;
case IIO_CHAN_INFO_SAMP_FREQ:
mutex_lock(&st->cfgs_lock);
*val = st->channels[chan->address].cfg.odr;
@@ -751,12 +789,14 @@ static const struct iio_info ad7124_info = {
.attrs = &ad7124_attrs_group,
};
+/* Only called during probe, so dev_err_probe() can be used */
static int ad7124_soft_reset(struct ad7124_state *st)
{
+ struct device *dev = &st->sd.spi->dev;
unsigned int readval, timeout;
int ret;
- ret = ad_sd_reset(&st->sd, 64);
+ ret = ad_sd_reset(&st->sd);
if (ret < 0)
return ret;
@@ -765,7 +805,7 @@ static int ad7124_soft_reset(struct ad7124_state *st)
do {
ret = ad_sd_read_reg(&st->sd, AD7124_STATUS, 1, &readval);
if (ret < 0)
- return ret;
+ return dev_err_probe(dev, ret, "Error reading status register\n");
if (!(readval & AD7124_STATUS_POR_FLAG_MSK))
return 0;
@@ -774,39 +814,47 @@ static int ad7124_soft_reset(struct ad7124_state *st)
usleep_range(100, 2000);
} while (--timeout);
- dev_err(&st->sd.spi->dev, "Soft reset failed\n");
-
- return -EIO;
+ return dev_err_probe(dev, -EIO, "Soft reset failed\n");
}
static int ad7124_check_chip_id(struct ad7124_state *st)
{
+ struct device *dev = &st->sd.spi->dev;
unsigned int readval, chip_id, silicon_rev;
int ret;
ret = ad_sd_read_reg(&st->sd, AD7124_ID, 1, &readval);
if (ret < 0)
- return ret;
+ return dev_err_probe(dev, ret, "Failure to read ID register\n");
chip_id = AD7124_DEVICE_ID_GET(readval);
silicon_rev = AD7124_SILICON_REV_GET(readval);
- if (chip_id != st->chip_info->chip_id) {
- dev_err(&st->sd.spi->dev,
- "Chip ID mismatch: expected %u, got %u\n",
- st->chip_info->chip_id, chip_id);
- return -ENODEV;
- }
+ if (chip_id != st->chip_info->chip_id)
+ return dev_err_probe(dev, -ENODEV,
+ "Chip ID mismatch: expected %u, got %u\n",
+ st->chip_info->chip_id, chip_id);
- if (silicon_rev == 0) {
- dev_err(&st->sd.spi->dev,
- "Silicon revision empty. Chip may not be present\n");
- return -ENODEV;
- }
+ if (silicon_rev == 0)
+ return dev_err_probe(dev, -ENODEV,
+ "Silicon revision empty. Chip may not be present\n");
return 0;
}
+/*
+ * Input specifiers 8 - 15 are explicitly reserved for ad7124-4
+ * while they are fine for ad7124-8. Values above 31 don't fit
+ * into the register field and so are invalid for sure.
+ */
+static bool ad7124_valid_input_select(unsigned int ain, const struct ad7124_chip_info *info)
+{
+ if (ain >= info->num_inputs && ain < 16)
+ return false;
+
+ return ain <= FIELD_MAX(AD7124_CHANNEL_AINM_MSK);
+}
+
static int ad7124_parse_channel_config(struct iio_dev *indio_dev,
struct device *dev)
{
@@ -815,11 +863,23 @@ static int ad7124_parse_channel_config(struct iio_dev *indio_dev,
struct ad7124_channel *channels;
struct iio_chan_spec *chan;
unsigned int ain[2], channel = 0, tmp;
+ unsigned int num_channels;
int ret;
- st->num_channels = device_get_child_node_count(dev);
- if (!st->num_channels)
- return dev_err_probe(dev, -ENODEV, "no channel children\n");
+ num_channels = device_get_child_node_count(dev);
+
+ /*
+ * The driver assigns each logical channel defined in the device tree
+ * statically one channel register. So only accept 16 such logical
+ * channels to not treat CONFIG_0 (i.e. the register following
+ * CHANNEL_15) as an additional channel register. The driver could be
+ * improved to lift this limitation.
+ */
+ if (num_channels > AD7124_MAX_CHANNELS)
+ return dev_err_probe(dev, -EINVAL, "Too many channels defined\n");
+
+ /* Add one for temperature */
+ st->num_channels = min(num_channels + 1, AD7124_MAX_CHANNELS);
chan = devm_kcalloc(indio_dev->dev.parent, st->num_channels,
sizeof(*chan), GFP_KERNEL);
@@ -838,16 +898,23 @@ static int ad7124_parse_channel_config(struct iio_dev *indio_dev,
device_for_each_child_node_scoped(dev, child) {
ret = fwnode_property_read_u32(child, "reg", &channel);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret,
+ "Failed to parse reg property of %pfwP\n", child);
- if (channel >= indio_dev->num_channels)
+ if (channel >= num_channels)
return dev_err_probe(dev, -EINVAL,
- "Channel index >= number of channels\n");
+ "Channel index >= number of channels in %pfwP\n", child);
ret = fwnode_property_read_u32_array(child, "diff-channels",
ain, 2);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret,
+ "Failed to parse diff-channels property of %pfwP\n", child);
+
+ if (!ad7124_valid_input_select(ain[0], st->chip_info) ||
+ !ad7124_valid_input_select(ain[1], st->chip_info))
+ return dev_err_probe(dev, -EINVAL,
+ "diff-channels property of %pfwP contains invalid data\n", child);
st->channels[channel].nr = channel;
st->channels[channel].ain = AD7124_CHANNEL_AINP(ain[0]) |
@@ -874,17 +941,49 @@ static int ad7124_parse_channel_config(struct iio_dev *indio_dev,
chan[channel].channel2 = ain[1];
}
+ if (num_channels < AD7124_MAX_CHANNELS) {
+ st->channels[num_channels] = (struct ad7124_channel) {
+ .nr = num_channels,
+ .ain = AD7124_CHANNEL_AINP(AD7124_INPUT_TEMPSENSOR) |
+ AD7124_CHANNEL_AINM(AD7124_INPUT_AVSS),
+ .cfg = {
+ .bipolar = true,
+ },
+ };
+
+ chan[num_channels] = (struct iio_chan_spec) {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .scan_type = {
+ /*
+ * You might find it strange that a bipolar
+ * measurement yields an unsigned value, but
+ * this matches the device's manual.
+ */
+ .sign = 'u',
+ .realbits = 24,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ .address = num_channels,
+ .scan_index = num_channels,
+ };
+ }
+
return 0;
}
static int ad7124_setup(struct ad7124_state *st)
{
+ struct device *dev = &st->sd.spi->dev;
unsigned int fclk, power_mode;
int i, ret;
fclk = clk_get_rate(st->mclk);
if (!fclk)
- return -EINVAL;
+ return dev_err_probe(dev, -EINVAL, "Failed to get mclk rate\n");
/* The power mode changes the master clock frequency */
power_mode = ad7124_find_closest_match(ad7124_master_clk_freq_hz,
@@ -893,7 +992,7 @@ static int ad7124_setup(struct ad7124_state *st)
if (fclk != ad7124_master_clk_freq_hz[power_mode]) {
ret = clk_set_rate(st->mclk, fclk);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret, "Failed to set mclk rate\n");
}
/* Set the power mode */
@@ -924,7 +1023,7 @@ static int ad7124_setup(struct ad7124_state *st)
ret = ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL, 2, st->adc_control);
if (ret < 0)
- return ret;
+ return dev_err_probe(dev, ret, "Failed to setup CONTROL register\n");
return ret;
}
@@ -937,13 +1036,14 @@ static void ad7124_reg_disable(void *r)
static int ad7124_probe(struct spi_device *spi)
{
const struct ad7124_chip_info *info;
+ struct device *dev = &spi->dev;
struct ad7124_state *st;
struct iio_dev *indio_dev;
int i, ret;
info = spi_get_device_match_data(spi);
if (!info)
- return -ENODEV;
+ return dev_err_probe(dev, -ENODEV, "Failed to get match data\n");
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (!indio_dev)
@@ -978,17 +1078,17 @@ static int ad7124_probe(struct spi_device *spi)
ret = regulator_enable(st->vref[i]);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret, "Failed to enable regulator #%d\n", i);
ret = devm_add_action_or_reset(&spi->dev, ad7124_reg_disable,
st->vref[i]);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret, "Failed to register disable handler for regulator #%d\n", i);
}
st->mclk = devm_clk_get_enabled(&spi->dev, "mclk");
if (IS_ERR(st->mclk))
- return PTR_ERR(st->mclk);
+ return dev_err_probe(dev, PTR_ERR(st->mclk), "Failed to get mclk\n");
ret = ad7124_soft_reset(st);
if (ret < 0)
@@ -1004,10 +1104,13 @@ static int ad7124_probe(struct spi_device *spi)
ret = devm_ad_sd_setup_buffer_and_trigger(&spi->dev, indio_dev);
if (ret < 0)
- return ret;
+ return dev_err_probe(dev, ret, "Failed to setup triggers\n");
- return devm_iio_device_register(&spi->dev, indio_dev);
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to register iio device\n");
+ return 0;
}
static const struct of_device_id ad7124_of_match[] = {
diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c
index 8b03c1e5567e..6c4ed10ae580 100644
--- a/drivers/iio/adc/ad7173.c
+++ b/drivers/iio/adc/ad7173.c
@@ -150,6 +150,11 @@
#define AD7173_FILTER_ODR0_MASK GENMASK(5, 0)
#define AD7173_MAX_CONFIGS 8
+#define AD7173_MODE_CAL_INT_ZERO 0x4 /* Internal Zero-Scale Calibration */
+#define AD7173_MODE_CAL_INT_FULL 0x5 /* Internal Full-Scale Calibration */
+#define AD7173_MODE_CAL_SYS_ZERO 0x6 /* System Zero-Scale Calibration */
+#define AD7173_MODE_CAL_SYS_FULL 0x7 /* System Full-Scale Calibration */
+
struct ad7173_device_info {
const unsigned int *sinc5_data_rates;
unsigned int num_sinc5_data_rates;
@@ -175,6 +180,7 @@ struct ad7173_device_info {
bool has_input_buf;
bool has_int_ref;
bool has_ref2;
+ bool has_internal_fs_calibration;
bool higher_gpio_bits;
u8 num_gpios;
};
@@ -193,9 +199,9 @@ struct ad7173_channel_config {
};
struct ad7173_channel {
- unsigned int chan_reg;
unsigned int ain;
struct ad7173_channel_config cfg;
+ u8 syscalib_mode;
};
struct ad7173_state {
@@ -273,6 +279,7 @@ static const struct ad7173_device_info ad4111_device_info = {
.has_input_buf = true,
.has_current_inputs = true,
.has_int_ref = true,
+ .has_internal_fs_calibration = true,
.clock = 2 * HZ_PER_MHZ,
.sinc5_data_rates = ad7173_sinc5_data_rates,
.num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
@@ -292,6 +299,7 @@ static const struct ad7173_device_info ad4112_device_info = {
.has_input_buf = true,
.has_current_inputs = true,
.has_int_ref = true,
+ .has_internal_fs_calibration = true,
.clock = 2 * HZ_PER_MHZ,
.sinc5_data_rates = ad7173_sinc5_data_rates,
.num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
@@ -327,6 +335,7 @@ static const struct ad7173_device_info ad4114_device_info = {
.has_temp = true,
.has_input_buf = true,
.has_int_ref = true,
+ .has_internal_fs_calibration = true,
.clock = 2 * HZ_PER_MHZ,
.sinc5_data_rates = ad7173_sinc5_data_rates,
.num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
@@ -344,6 +353,7 @@ static const struct ad7173_device_info ad4115_device_info = {
.has_temp = true,
.has_input_buf = true,
.has_int_ref = true,
+ .has_internal_fs_calibration = true,
.clock = 8 * HZ_PER_MHZ,
.sinc5_data_rates = ad4115_sinc5_data_rates,
.num_sinc5_data_rates = ARRAY_SIZE(ad4115_sinc5_data_rates),
@@ -361,6 +371,7 @@ static const struct ad7173_device_info ad4116_device_info = {
.has_temp = true,
.has_input_buf = true,
.has_int_ref = true,
+ .has_internal_fs_calibration = true,
.clock = 4 * HZ_PER_MHZ,
.sinc5_data_rates = ad4116_sinc5_data_rates,
.num_sinc5_data_rates = ARRAY_SIZE(ad4116_sinc5_data_rates),
@@ -506,6 +517,105 @@ static const struct regmap_config ad7173_regmap_config = {
.read_flag_mask = BIT(6),
};
+enum {
+ AD7173_SYSCALIB_ZERO_SCALE,
+ AD7173_SYSCALIB_FULL_SCALE,
+};
+
+static const char * const ad7173_syscalib_modes[] = {
+ [AD7173_SYSCALIB_ZERO_SCALE] = "zero_scale",
+ [AD7173_SYSCALIB_FULL_SCALE] = "full_scale",
+};
+
+static int ad7173_set_syscalib_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int mode)
+{
+ struct ad7173_state *st = iio_priv(indio_dev);
+
+ st->channels[chan->channel].syscalib_mode = mode;
+
+ return 0;
+}
+
+static int ad7173_get_syscalib_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad7173_state *st = iio_priv(indio_dev);
+
+ return st->channels[chan->channel].syscalib_mode;
+}
+
+static ssize_t ad7173_write_syscalib(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct ad7173_state *st = iio_priv(indio_dev);
+ bool sys_calib;
+ int ret, mode;
+
+ ret = kstrtobool(buf, &sys_calib);
+ if (ret)
+ return ret;
+
+ mode = st->channels[chan->channel].syscalib_mode;
+ if (sys_calib) {
+ if (mode == AD7173_SYSCALIB_ZERO_SCALE)
+ ret = ad_sd_calibrate(&st->sd, AD7173_MODE_CAL_SYS_ZERO,
+ chan->address);
+ else
+ ret = ad_sd_calibrate(&st->sd, AD7173_MODE_CAL_SYS_FULL,
+ chan->address);
+ }
+
+ return ret ? : len;
+}
+
+static const struct iio_enum ad7173_syscalib_mode_enum = {
+ .items = ad7173_syscalib_modes,
+ .num_items = ARRAY_SIZE(ad7173_syscalib_modes),
+ .set = ad7173_set_syscalib_mode,
+ .get = ad7173_get_syscalib_mode
+};
+
+static const struct iio_chan_spec_ext_info ad7173_calibsys_ext_info[] = {
+ {
+ .name = "sys_calibration",
+ .write = ad7173_write_syscalib,
+ .shared = IIO_SEPARATE,
+ },
+ IIO_ENUM("sys_calibration_mode", IIO_SEPARATE,
+ &ad7173_syscalib_mode_enum),
+ IIO_ENUM_AVAILABLE("sys_calibration_mode", IIO_SHARED_BY_TYPE,
+ &ad7173_syscalib_mode_enum),
+ { }
+};
+
+static int ad7173_calibrate_all(struct ad7173_state *st, struct iio_dev *indio_dev)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < st->num_channels; i++) {
+ if (indio_dev->channels[i].type != IIO_VOLTAGE)
+ continue;
+
+ ret = ad_sd_calibrate(&st->sd, AD7173_MODE_CAL_INT_ZERO, st->channels[i].ain);
+ if (ret < 0)
+ return ret;
+
+ if (st->info->has_internal_fs_calibration) {
+ ret = ad_sd_calibrate(&st->sd, AD7173_MODE_CAL_INT_FULL,
+ st->channels[i].ain);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int ad7173_mask_xlate(struct gpio_regmap *gpio, unsigned int base,
unsigned int offset, unsigned int *reg,
unsigned int *mask)
@@ -765,6 +875,7 @@ static const struct ad_sigma_delta_info ad7173_sigma_delta_info = {
.read_mask = BIT(6),
.status_ch_mask = GENMASK(3, 0),
.data_reg = AD7173_REG_DATA,
+ .num_resetclks = 64,
};
static int ad7173_setup(struct iio_dev *indio_dev)
@@ -802,6 +913,10 @@ static int ad7173_setup(struct iio_dev *indio_dev)
if (!st->config_cnts)
return -ENOMEM;
+ ret = ad7173_calibrate_all(st, indio_dev);
+ if (ret)
+ return ret;
+
/* All channels are enabled by default after a reset */
return ad7173_disable_all(&st->sd);
}
@@ -1024,6 +1139,7 @@ static const struct iio_chan_spec ad7173_channel_template = {
.storagebits = 32,
.endianness = IIO_BE,
},
+ .ext_info = ad7173_calibsys_ext_info,
};
static const struct iio_chan_spec ad7173_temp_iio_channel_template = {
@@ -1317,7 +1433,6 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
chan->address = chan_index;
chan->scan_index = chan_index;
chan->channel = ain[0];
- chan_st_priv->chan_reg = chan_index;
chan_st_priv->cfg.input_buf = st->info->has_input_buf;
chan_st_priv->cfg.odr = 0;
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index 1c87db0e0460..e96a5ae92375 100644
--- a/drivers/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -361,6 +361,7 @@ static const struct ad_sigma_delta_info ad7192_sigma_delta_info = {
.status_ch_mask = GENMASK(3, 0),
.num_slots = 4,
.irq_flags = IRQF_TRIGGER_FALLING,
+ .num_resetclks = 40,
};
static const struct ad_sigma_delta_info ad7194_sigma_delta_info = {
@@ -373,6 +374,7 @@ static const struct ad_sigma_delta_info ad7194_sigma_delta_info = {
.read_mask = BIT(6),
.status_ch_mask = GENMASK(3, 0),
.irq_flags = IRQF_TRIGGER_FALLING,
+ .num_resetclks = 40,
};
static const struct ad_sd_calib_data ad7192_calib_arr[8] = {
@@ -565,7 +567,7 @@ static int ad7192_setup(struct iio_dev *indio_dev, struct device *dev)
int i, ret, id;
/* reset the serial interface */
- ret = ad_sd_reset(&st->sd, 48);
+ ret = ad_sd_reset(&st->sd);
if (ret < 0)
return ret;
usleep_range(500, 1000); /* Wait for at least 500us */
diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c
index e35d55d03d86..d8e3c7a43678 100644
--- a/drivers/iio/adc/ad7606.c
+++ b/drivers/iio/adc/ad7606.c
@@ -175,17 +175,17 @@ static const struct iio_chan_spec ad7616_channels[] = {
AD7606_CHANNEL(15, 16),
};
-static int ad7606c_18bit_chan_scale_setup(struct ad7606_state *st,
+static int ad7606c_18bit_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch);
-static int ad7606c_16bit_chan_scale_setup(struct ad7606_state *st,
+static int ad7606c_16bit_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch);
-static int ad7606_16bit_chan_scale_setup(struct ad7606_state *st,
+static int ad7606_16bit_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch);
-static int ad7607_chan_scale_setup(struct ad7606_state *st,
+static int ad7607_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch);
-static int ad7608_chan_scale_setup(struct ad7606_state *st,
+static int ad7608_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch);
-static int ad7609_chan_scale_setup(struct ad7606_state *st,
+static int ad7609_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch);
const struct ad7606_chip_info ad7605_4_info = {
@@ -323,9 +323,10 @@ int ad7606_reset(struct ad7606_state *st)
}
EXPORT_SYMBOL_NS_GPL(ad7606_reset, "IIO_AD7606");
-static int ad7606_16bit_chan_scale_setup(struct ad7606_state *st,
+static int ad7606_16bit_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch)
{
+ struct ad7606_state *st = iio_priv(indio_dev);
struct ad7606_chan_scale *cs = &st->chan_scales[ch];
if (!st->sw_mode_en) {
@@ -345,10 +346,12 @@ static int ad7606_16bit_chan_scale_setup(struct ad7606_state *st,
return 0;
}
-static int ad7606_get_chan_config(struct ad7606_state *st, int ch,
+static int ad7606_get_chan_config(struct iio_dev *indio_dev, int ch,
bool *bipolar, bool *differential)
{
- unsigned int num_channels = st->chip_info->num_channels - 1;
+ struct ad7606_state *st = iio_priv(indio_dev);
+ unsigned int num_channels = st->chip_info->num_adc_channels;
+ unsigned int offset = indio_dev->num_channels - st->chip_info->num_adc_channels;
struct device *dev = st->dev;
int ret;
@@ -364,7 +367,7 @@ static int ad7606_get_chan_config(struct ad7606_state *st, int ch,
continue;
/* channel number (here) is from 1 to num_channels */
- if (reg == 0 || reg > num_channels) {
+ if (reg < offset || reg > num_channels) {
dev_warn(dev,
"Invalid channel number (ignoring): %d\n", reg);
continue;
@@ -399,9 +402,10 @@ static int ad7606_get_chan_config(struct ad7606_state *st, int ch,
return 0;
}
-static int ad7606c_18bit_chan_scale_setup(struct ad7606_state *st,
+static int ad7606c_18bit_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch)
{
+ struct ad7606_state *st = iio_priv(indio_dev);
struct ad7606_chan_scale *cs = &st->chan_scales[ch];
bool bipolar, differential;
int ret;
@@ -413,7 +417,7 @@ static int ad7606c_18bit_chan_scale_setup(struct ad7606_state *st,
return 0;
}
- ret = ad7606_get_chan_config(st, ch, &bipolar, &differential);
+ ret = ad7606_get_chan_config(indio_dev, ch, &bipolar, &differential);
if (ret)
return ret;
@@ -455,9 +459,10 @@ static int ad7606c_18bit_chan_scale_setup(struct ad7606_state *st,
return 0;
}
-static int ad7606c_16bit_chan_scale_setup(struct ad7606_state *st,
+static int ad7606c_16bit_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch)
{
+ struct ad7606_state *st = iio_priv(indio_dev);
struct ad7606_chan_scale *cs = &st->chan_scales[ch];
bool bipolar, differential;
int ret;
@@ -469,7 +474,7 @@ static int ad7606c_16bit_chan_scale_setup(struct ad7606_state *st,
return 0;
}
- ret = ad7606_get_chan_config(st, ch, &bipolar, &differential);
+ ret = ad7606_get_chan_config(indio_dev, ch, &bipolar, &differential);
if (ret)
return ret;
@@ -512,9 +517,10 @@ static int ad7606c_16bit_chan_scale_setup(struct ad7606_state *st,
return 0;
}
-static int ad7607_chan_scale_setup(struct ad7606_state *st,
+static int ad7607_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch)
{
+ struct ad7606_state *st = iio_priv(indio_dev);
struct ad7606_chan_scale *cs = &st->chan_scales[ch];
cs->range = 0;
@@ -523,9 +529,10 @@ static int ad7607_chan_scale_setup(struct ad7606_state *st,
return 0;
}
-static int ad7608_chan_scale_setup(struct ad7606_state *st,
+static int ad7608_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch)
{
+ struct ad7606_state *st = iio_priv(indio_dev);
struct ad7606_chan_scale *cs = &st->chan_scales[ch];
cs->range = 0;
@@ -534,9 +541,10 @@ static int ad7608_chan_scale_setup(struct ad7606_state *st,
return 0;
}
-static int ad7609_chan_scale_setup(struct ad7606_state *st,
+static int ad7609_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch)
{
+ struct ad7606_state *st = iio_priv(indio_dev);
struct ad7606_chan_scale *cs = &st->chan_scales[ch];
cs->range = 0;
@@ -1146,8 +1154,8 @@ static int ad7606_sw_mode_setup(struct iio_dev *indio_dev)
static int ad7606_chan_scales_setup(struct iio_dev *indio_dev)
{
- unsigned int num_channels = indio_dev->num_channels - 1;
struct ad7606_state *st = iio_priv(indio_dev);
+ unsigned int offset = indio_dev->num_channels - st->chip_info->num_adc_channels;
struct iio_chan_spec *chans;
size_t size;
int ch, ret;
@@ -1161,8 +1169,8 @@ static int ad7606_chan_scales_setup(struct iio_dev *indio_dev)
memcpy(chans, indio_dev->channels, size);
indio_dev->channels = chans;
- for (ch = 0; ch < num_channels; ch++) {
- ret = st->chip_info->scale_setup_cb(st, &chans[ch + 1], ch);
+ for (ch = 0; ch < st->chip_info->num_adc_channels; ch++) {
+ ret = st->chip_info->scale_setup_cb(indio_dev, &chans[ch + offset], ch);
if (ret)
return ret;
}
diff --git a/drivers/iio/adc/ad7606.h b/drivers/iio/adc/ad7606.h
index 998814a92b82..8778ffe515b3 100644
--- a/drivers/iio/adc/ad7606.h
+++ b/drivers/iio/adc/ad7606.h
@@ -69,7 +69,7 @@
struct ad7606_state;
-typedef int (*ad7606_scale_setup_cb_t)(struct ad7606_state *st,
+typedef int (*ad7606_scale_setup_cb_t)(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch);
/**
diff --git a/drivers/iio/adc/ad7625.c b/drivers/iio/adc/ad7625.c
index aefe3bf75c91..afa9bf4ddf3c 100644
--- a/drivers/iio/adc/ad7625.c
+++ b/drivers/iio/adc/ad7625.c
@@ -477,12 +477,12 @@ static int devm_ad7625_pwm_get(struct device *dev,
ref_clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(ref_clk))
return dev_err_probe(dev, PTR_ERR(ref_clk),
- "failed to get ref_clk");
+ "failed to get ref_clk\n");
ref_clk_rate_hz = clk_get_rate(ref_clk);
if (!ref_clk_rate_hz)
return dev_err_probe(dev, -EINVAL,
- "failed to get ref_clk rate");
+ "failed to get ref_clk rate\n");
st->ref_clk_rate_hz = ref_clk_rate_hz;
@@ -533,7 +533,7 @@ static int devm_ad7625_regulator_setup(struct device *dev,
if (!st->info->has_internal_vref && !st->have_refin && !ref_mv)
return dev_err_probe(dev, -EINVAL,
- "Need either REFIN or REF");
+ "Need either REFIN or REF\n");
if (st->have_refin && ref_mv)
return dev_err_probe(dev, -EINVAL,
@@ -623,7 +623,7 @@ static int ad7625_probe(struct platform_device *pdev)
st->back = devm_iio_backend_get(dev, NULL);
if (IS_ERR(st->back))
return dev_err_probe(dev, PTR_ERR(st->back),
- "failed to get IIO backend");
+ "failed to get IIO backend\n");
ret = devm_iio_backend_request_buffer(dev, st->back, indio_dev);
if (ret)
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
index e1bf13fe2cd7..76118fe22db8 100644
--- a/drivers/iio/adc/ad7791.c
+++ b/drivers/iio/adc/ad7791.c
@@ -254,6 +254,7 @@ static const struct ad_sigma_delta_info ad7791_sigma_delta_info = {
.addr_shift = 4,
.read_mask = BIT(3),
.irq_flags = IRQF_TRIGGER_FALLING,
+ .num_resetclks = 32,
};
static int ad7791_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index d55c71566707..1b50d9643a63 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -206,6 +206,7 @@ static const struct ad_sigma_delta_info ad7793_sigma_delta_info = {
.addr_shift = 3,
.read_mask = BIT(6),
.irq_flags = IRQF_TRIGGER_FALLING,
+ .num_resetclks = 32,
};
static const struct ad_sd_calib_data ad7793_calib_arr[6] = {
@@ -265,7 +266,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
return ret;
/* reset the serial interface */
- ret = ad_sd_reset(&st->sd, 32);
+ ret = ad_sd_reset(&st->sd);
if (ret < 0)
goto out;
usleep_range(500, 2000); /* Wait for at least 500us */
diff --git a/drivers/iio/adc/ad7944.c b/drivers/iio/adc/ad7944.c
index a5aea4e9f1a7..0ec9cda10f5f 100644
--- a/drivers/iio/adc/ad7944.c
+++ b/drivers/iio/adc/ad7944.c
@@ -75,7 +75,7 @@ struct ad7944_adc {
u16 u16;
u32 u32;
} raw;
- u64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} sample __aligned(IIO_DMA_MINALIGN);
};
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index 3fd200b34161..d5d81581ab34 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -29,8 +29,11 @@
#define AD_SD_COMM_CHAN_MASK 0x3
#define AD_SD_REG_COMM 0x00
+#define AD_SD_REG_STATUS 0x00
#define AD_SD_REG_DATA 0x03
+#define AD_SD_REG_STATUS_RDY 0x80
+
/**
* ad_sd_set_comm() - Set communications register
*
@@ -109,7 +112,7 @@ static int ad_sd_read_reg_raw(struct ad_sigma_delta *sigma_delta,
}, {
.rx_buf = val,
.len = size,
- .cs_change = sigma_delta->bus_locked,
+ .cs_change = sigma_delta->keep_cs_asserted,
},
};
struct spi_message m;
@@ -178,13 +181,12 @@ EXPORT_SYMBOL_NS_GPL(ad_sd_read_reg, "IIO_AD_SIGMA_DELTA");
* ad_sd_reset() - Reset the serial interface
*
* @sigma_delta: The sigma delta device
- * @reset_length: Number of SCLKs with DIN = 1
*
* Returns 0 on success, an error code otherwise.
**/
-int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
- unsigned int reset_length)
+int ad_sd_reset(struct ad_sigma_delta *sigma_delta)
{
+ unsigned int reset_length = sigma_delta->info->num_resetclks;
uint8_t *buf;
unsigned int size;
int ret;
@@ -202,6 +204,107 @@ int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
}
EXPORT_SYMBOL_NS_GPL(ad_sd_reset, "IIO_AD_SIGMA_DELTA");
+static bool ad_sd_disable_irq(struct ad_sigma_delta *sigma_delta)
+{
+ guard(spinlock_irqsave)(&sigma_delta->irq_lock);
+
+ /* It's already off, return false to indicate nothing was changed */
+ if (sigma_delta->irq_dis)
+ return false;
+
+ sigma_delta->irq_dis = true;
+ disable_irq_nosync(sigma_delta->irq_line);
+ return true;
+}
+
+static void ad_sd_enable_irq(struct ad_sigma_delta *sigma_delta)
+{
+ guard(spinlock_irqsave)(&sigma_delta->irq_lock);
+
+ sigma_delta->irq_dis = false;
+ enable_irq(sigma_delta->irq_line);
+}
+
+#define AD_SD_CLEAR_DATA_BUFLEN 9
+
+/* Called with `sigma_delta->bus_locked == true` only. */
+static int ad_sigma_delta_clear_pending_event(struct ad_sigma_delta *sigma_delta)
+{
+ bool pending_event;
+ unsigned int data_read_len = BITS_TO_BYTES(sigma_delta->info->num_resetclks);
+ u8 *data;
+ struct spi_transfer t[] = {
+ {
+ .len = 1,
+ }, {
+ .len = data_read_len,
+ }
+ };
+ struct spi_message m;
+ int ret;
+
+ /*
+ * Read R̅D̅Y̅ pin (if possible) or status register to check if there is an
+ * old event.
+ */
+ if (sigma_delta->rdy_gpiod) {
+ pending_event = gpiod_get_value(sigma_delta->rdy_gpiod);
+ } else {
+ unsigned int status_reg;
+
+ ret = ad_sd_read_reg(sigma_delta, AD_SD_REG_STATUS, 1, &status_reg);
+ if (ret)
+ return ret;
+
+ pending_event = !(status_reg & AD_SD_REG_STATUS_RDY);
+ }
+
+ if (!pending_event)
+ return 0;
+
+ /*
+ * In general the size of the data register is unknown. It varies from
+ * device to device, might be one byte longer if CONTROL.DATA_STATUS is
+ * set and even varies on some devices depending on which input is
+ * selected. So send one byte to start reading the data register and
+ * then just clock for some bytes with DIN (aka MOSI) high to not
+ * confuse the register access state machine after the data register was
+ * completely read. Note however that the sequence length must be
+ * shorter than the reset procedure.
+ */
+
+ data = kzalloc(data_read_len + 1, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ spi_message_init(&m);
+ if (sigma_delta->info->has_registers) {
+ unsigned int data_reg = sigma_delta->info->data_reg ?: AD_SD_REG_DATA;
+
+ data[0] = data_reg << sigma_delta->info->addr_shift;
+ data[0] |= sigma_delta->info->read_mask;
+ data[0] |= sigma_delta->comm;
+ t[0].tx_buf = data;
+ spi_message_add_tail(&t[0], &m);
+ }
+
+ /*
+ * The first transferred byte is part of the real data register,
+ * so this doesn't need to be 0xff. In the remaining
+ * `data_read_len - 1` bytes are less than $num_resetclks ones.
+ */
+ t[1].tx_buf = data + 1;
+ data[1] = 0x00;
+ memset(data + 2, 0xff, data_read_len - 1);
+ spi_message_add_tail(&t[1], &m);
+
+ ret = spi_sync_locked(sigma_delta->spi, &m);
+
+ kfree(data);
+
+ return ret;
+}
+
int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
unsigned int mode, unsigned int channel)
{
@@ -217,16 +320,18 @@ int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
sigma_delta->keep_cs_asserted = true;
reinit_completion(&sigma_delta->completion);
+ ret = ad_sigma_delta_clear_pending_event(sigma_delta);
+ if (ret)
+ goto out;
+
ret = ad_sigma_delta_set_mode(sigma_delta, mode);
if (ret < 0)
goto out;
- sigma_delta->irq_dis = false;
- enable_irq(sigma_delta->irq_line);
+ ad_sd_enable_irq(sigma_delta);
time_left = wait_for_completion_timeout(&sigma_delta->completion, 2 * HZ);
if (time_left == 0) {
- sigma_delta->irq_dis = true;
- disable_irq_nosync(sigma_delta->irq_line);
+ ad_sd_disable_irq(sigma_delta);
ret = -EIO;
} else {
ret = 0;
@@ -292,10 +397,13 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
sigma_delta->keep_cs_asserted = true;
reinit_completion(&sigma_delta->completion);
+ ret = ad_sigma_delta_clear_pending_event(sigma_delta);
+ if (ret)
+ goto out_unlock;
+
ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_SINGLE);
- sigma_delta->irq_dis = false;
- enable_irq(sigma_delta->irq_line);
+ ad_sd_enable_irq(sigma_delta);
ret = wait_for_completion_interruptible_timeout(
&sigma_delta->completion, HZ);
@@ -314,14 +422,13 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
&raw_sample);
out:
- if (!sigma_delta->irq_dis) {
- disable_irq_nosync(sigma_delta->irq_line);
- sigma_delta->irq_dis = true;
- }
+ ad_sd_disable_irq(sigma_delta);
- sigma_delta->keep_cs_asserted = false;
ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
ad_sigma_delta_disable_one(sigma_delta, chan->address);
+
+out_unlock:
+ sigma_delta->keep_cs_asserted = false;
sigma_delta->bus_locked = false;
spi_bus_unlock(sigma_delta->spi->controller);
iio_device_release_direct_mode(indio_dev);
@@ -392,12 +499,15 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev)
sigma_delta->bus_locked = true;
sigma_delta->keep_cs_asserted = true;
+ ret = ad_sigma_delta_clear_pending_event(sigma_delta);
+ if (ret)
+ goto err_unlock;
+
ret = ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_CONTINUOUS);
if (ret)
goto err_unlock;
- sigma_delta->irq_dis = false;
- enable_irq(sigma_delta->irq_line);
+ ad_sd_enable_irq(sigma_delta);
return 0;
@@ -414,10 +524,7 @@ static int ad_sd_buffer_postdisable(struct iio_dev *indio_dev)
reinit_completion(&sigma_delta->completion);
wait_for_completion_timeout(&sigma_delta->completion, HZ);
- if (!sigma_delta->irq_dis) {
- disable_irq_nosync(sigma_delta->irq_line);
- sigma_delta->irq_dis = true;
- }
+ ad_sd_disable_irq(sigma_delta);
sigma_delta->keep_cs_asserted = false;
ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
@@ -516,8 +623,7 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p)
irq_handled:
iio_trigger_notify_done(indio_dev->trig);
- sigma_delta->irq_dis = false;
- enable_irq(sigma_delta->irq_line);
+ ad_sd_enable_irq(sigma_delta);
return IRQ_HANDLED;
}
@@ -539,12 +645,31 @@ static irqreturn_t ad_sd_data_rdy_trig_poll(int irq, void *private)
{
struct ad_sigma_delta *sigma_delta = private;
- complete(&sigma_delta->completion);
- disable_irq_nosync(irq);
- sigma_delta->irq_dis = true;
- iio_trigger_poll(sigma_delta->trig);
+ /*
+ * AD7124 and a few others use the same physical line for interrupt
+ * reporting (R̅D̅Y̅) and MISO.
+ * As MISO toggles when reading a register, this likely results in a
+ * pending interrupt. This has two consequences: a) The irq might
+ * trigger immediately after it's enabled even though the conversion
+ * isn't done yet; and b) checking the STATUS register's R̅D̅Y̅ flag is
+ * off-limits as reading that would trigger another irq event.
+ *
+ * So read the MOSI line as GPIO (if available) and only trigger the irq
+ * if the line is active. Without such a GPIO assume this is a valid
+ * interrupt.
+ *
+ * Also as disable_irq_nosync() is used to disable the irq, only act if
+ * the irq wasn't disabled before.
+ */
+ if ((!sigma_delta->rdy_gpiod || gpiod_get_value(sigma_delta->rdy_gpiod)) &&
+ ad_sd_disable_irq(sigma_delta)) {
+ complete(&sigma_delta->completion);
+ iio_trigger_poll(sigma_delta->trig);
- return IRQ_HANDLED;
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
}
/**
@@ -674,11 +799,24 @@ int ad_sd_init(struct ad_sigma_delta *sigma_delta, struct iio_dev *indio_dev,
}
}
+ spin_lock_init(&sigma_delta->irq_lock);
+
if (info->irq_line)
sigma_delta->irq_line = info->irq_line;
else
sigma_delta->irq_line = spi->irq;
+ sigma_delta->rdy_gpiod = devm_gpiod_get_optional(&spi->dev, "rdy", GPIOD_IN);
+ if (IS_ERR(sigma_delta->rdy_gpiod))
+ return dev_err_probe(&spi->dev, PTR_ERR(sigma_delta->rdy_gpiod),
+ "Failed to find rdy gpio\n");
+
+ if (sigma_delta->rdy_gpiod && !sigma_delta->irq_line) {
+ sigma_delta->irq_line = gpiod_to_irq(sigma_delta->rdy_gpiod);
+ if (sigma_delta->irq_line < 0)
+ return sigma_delta->irq_line;
+ }
+
iio_device_set_drvdata(indio_dev, sigma_delta);
return 0;
diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
index 30328626d9be..221a5fdc1eaa 100644
--- a/drivers/iio/adc/dln2-adc.c
+++ b/drivers/iio/adc/dln2-adc.c
@@ -66,8 +66,6 @@ struct dln2_adc {
/* Demux table */
unsigned int demux_count;
struct dln2_adc_demux_table demux[DLN2_ADC_MAX_CHANNELS];
- /* Precomputed timestamp padding offset and length */
- unsigned int ts_pad_offset, ts_pad_length;
};
struct dln2_adc_port_chan {
@@ -111,8 +109,6 @@ static void dln2_adc_update_demux(struct dln2_adc *dln2)
if (iio_get_masklength(indio_dev) &&
(*indio_dev->active_scan_mask & 0xff) == 0xff) {
dln2_adc_add_demux(dln2, 0, 0, 16);
- dln2->ts_pad_offset = 0;
- dln2->ts_pad_length = 0;
return;
}
@@ -127,16 +123,6 @@ static void dln2_adc_update_demux(struct dln2_adc *dln2)
out_loc += 2;
in_loc += 2;
}
-
- if (indio_dev->scan_timestamp) {
- size_t ts_offset = indio_dev->scan_bytes / sizeof(int64_t) - 1;
-
- dln2->ts_pad_offset = out_loc;
- dln2->ts_pad_length = ts_offset * sizeof(int64_t) - out_loc;
- } else {
- dln2->ts_pad_offset = 0;
- dln2->ts_pad_length = 0;
- }
}
static int dln2_adc_get_chan_count(struct dln2_adc *dln2)
@@ -494,6 +480,8 @@ static irqreturn_t dln2_adc_trigger_h(int irq, void *p)
if (ret < 0)
goto done;
+ memset(&data, 0, sizeof(data));
+
/* Demux operation */
for (i = 0; i < dln2->demux_count; ++i) {
t = &dln2->demux[i];
@@ -501,11 +489,6 @@ static irqreturn_t dln2_adc_trigger_h(int irq, void *p)
(void *)dev_data.values + t->from, t->length);
}
- /* Zero padding space between values and timestamp */
- if (dln2->ts_pad_length)
- memset((void *)data.values + dln2->ts_pad_offset,
- 0, dln2->ts_pad_length);
-
iio_push_to_buffers_with_timestamp(indio_dev, &data,
iio_get_time_ns(indio_dev));
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 48c95e12e791..40d14faa71c5 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -150,7 +150,7 @@ struct ina2xx_chip_info {
/* data buffer needs space for channel data and timestamp */
struct {
u16 chan[4];
- u64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/adc/max1118.c b/drivers/iio/adc/max1118.c
index 3d0a7d0eb7ee..565ca2e21c0c 100644
--- a/drivers/iio/adc/max1118.c
+++ b/drivers/iio/adc/max1118.c
@@ -39,7 +39,7 @@ struct max1118 {
/* Ensure natural alignment of buffer elements */
struct {
u8 channels[2];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
u8 data __aligned(IIO_DMA_MINALIGN);
diff --git a/drivers/iio/adc/max11410.c b/drivers/iio/adc/max11410.c
index f0dc4b460903..76abafd47404 100644
--- a/drivers/iio/adc/max11410.c
+++ b/drivers/iio/adc/max11410.c
@@ -143,7 +143,7 @@ struct max11410_state {
int irq;
struct {
u32 data __aligned(IIO_DMA_MINALIGN);
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 9a0baea08ab6..e8d731bc34e0 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -161,6 +161,7 @@ struct max1363_chip_info {
* @vref_uv: Actual (external or internal) reference voltage
* @send: function used to send data to the chip
* @recv: function used to receive data from the chip
+ * @data: buffer to store channel data and timestamp
*/
struct max1363_state {
struct i2c_client *client;
@@ -186,6 +187,10 @@ struct max1363_state {
const char *buf, int count);
int (*recv)(const struct i2c_client *client,
char *buf, int count);
+ struct {
+ u8 buf[MAX1363_MAX_CHANNELS * 2];
+ aligned_s64 ts;
+ } data;
};
#define MAX1363_MODE_SINGLE(_num, _mask) { \
@@ -1462,22 +1467,10 @@ static irqreturn_t max1363_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct max1363_state *st = iio_priv(indio_dev);
- __u8 *rxbuf;
int b_sent;
- size_t d_size;
unsigned long numvals = bitmap_weight(st->current_mode->modemask,
MAX1363_MAX_CHANNELS);
- /* Ensure the timestamp is 8 byte aligned */
- if (st->chip_info->bits != 8)
- d_size = numvals*2;
- else
- d_size = numvals;
- if (indio_dev->scan_timestamp) {
- d_size += sizeof(s64);
- if (d_size % sizeof(s64))
- d_size += sizeof(s64) - (d_size % sizeof(s64));
- }
/* Monitor mode prevents reading. Whilst not currently implemented
* might as well have this test in here in the meantime as it does
* no harm.
@@ -1485,21 +1478,16 @@ static irqreturn_t max1363_trigger_handler(int irq, void *p)
if (numvals == 0)
goto done;
- rxbuf = kmalloc(d_size, GFP_KERNEL);
- if (rxbuf == NULL)
- goto done;
if (st->chip_info->bits != 8)
- b_sent = st->recv(st->client, rxbuf, numvals * 2);
+ b_sent = st->recv(st->client, st->data.buf, numvals * 2);
else
- b_sent = st->recv(st->client, rxbuf, numvals);
+ b_sent = st->recv(st->client, st->data.buf, numvals);
if (b_sent < 0)
- goto done_free;
+ goto done;
- iio_push_to_buffers_with_timestamp(indio_dev, rxbuf,
+ iio_push_to_buffers_with_timestamp(indio_dev, &st->data,
iio_get_time_ns(indio_dev));
-done_free:
- kfree(rxbuf);
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c
index b097f04172c8..6748b44d568d 100644
--- a/drivers/iio/adc/mcp3911.c
+++ b/drivers/iio/adc/mcp3911.c
@@ -122,7 +122,7 @@ struct mcp3911 {
const struct mcp3911_chip_info *chip;
struct {
u32 channels[MCP39XX_MAX_NUM_CHANNELS];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
u8 tx_buf __aligned(IIO_DMA_MINALIGN);
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 2d475b43e717..997def4a4d2f 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -155,10 +155,10 @@
*/
#define MESON_SAR_ADC_REG11 0x2c
#define MESON_SAR_ADC_REG11_BANDGAP_EN BIT(13)
- #define MESON_SAR_ADC_REG11_CMV_SEL BIT(6)
- #define MESON_SAR_ADC_REG11_VREF_VOLTAGE BIT(5)
- #define MESON_SAR_ADC_REG11_EOC BIT(1)
- #define MESON_SAR_ADC_REG11_VREF_SEL BIT(0)
+ #define MESON_SAR_ADC_REG11_CMV_SEL BIT(6)
+ #define MESON_SAR_ADC_REG11_VREF_VOLTAGE BIT(5)
+ #define MESON_SAR_ADC_REG11_EOC BIT(1)
+ #define MESON_SAR_ADC_REG11_VREF_SEL BIT(0)
#define MESON_SAR_ADC_REG13 0x34
#define MESON_SAR_ADC_REG13_12BIT_CALIBRATION_MASK GENMASK(13, 8)
@@ -315,19 +315,17 @@ static const struct iio_chan_spec meson_sar_adc_and_temp_iio_channels[] = {
struct meson_sar_adc_param {
bool has_bl30_integration;
unsigned long clock_rate;
- u32 bandgap_reg;
unsigned int resolution;
const struct regmap_config *regmap_config;
u8 temperature_trimming_bits;
unsigned int temperature_multiplier;
unsigned int temperature_divider;
u8 disable_ring_counter;
- bool has_reg11;
bool has_vref_select;
u8 vref_select;
u8 cmv_select;
u8 adc_eoc;
- enum meson_sar_adc_vref_sel vref_volatge;
+ enum meson_sar_adc_vref_sel vref_voltage;
};
struct meson_sar_adc_data {
@@ -976,7 +974,7 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
MESON_SAR_ADC_REG3_CTRL_CONT_RING_COUNTER_EN,
regval);
- if (priv->param->has_reg11) {
+ if (priv->param->regmap_config->max_register >= MESON_SAR_ADC_REG11) {
regval = FIELD_PREP(MESON_SAR_ADC_REG11_EOC, priv->param->adc_eoc);
regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
MESON_SAR_ADC_REG11_EOC, regval);
@@ -989,7 +987,7 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
}
regval = FIELD_PREP(MESON_SAR_ADC_REG11_VREF_VOLTAGE,
- priv->param->vref_volatge);
+ priv->param->vref_voltage);
regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
MESON_SAR_ADC_REG11_VREF_VOLTAGE, regval);
@@ -1013,16 +1011,15 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
static void meson_sar_adc_set_bandgap(struct iio_dev *indio_dev, bool on_off)
{
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
- const struct meson_sar_adc_param *param = priv->param;
- u32 enable_mask;
- if (param->bandgap_reg == MESON_SAR_ADC_REG11)
- enable_mask = MESON_SAR_ADC_REG11_BANDGAP_EN;
+ if (priv->param->regmap_config->max_register >= MESON_SAR_ADC_REG11)
+ regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
+ MESON_SAR_ADC_REG11_BANDGAP_EN,
+ on_off ? MESON_SAR_ADC_REG11_BANDGAP_EN : 0);
else
- enable_mask = MESON_SAR_ADC_DELTA_10_TS_VBG_EN;
-
- regmap_update_bits(priv->regmap, param->bandgap_reg, enable_mask,
- on_off ? enable_mask : 0);
+ regmap_update_bits(priv->regmap, MESON_SAR_ADC_DELTA_10,
+ MESON_SAR_ADC_DELTA_10_TS_VBG_EN,
+ on_off ? MESON_SAR_ADC_DELTA_10_TS_VBG_EN : 0);
}
static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
@@ -1186,7 +1183,6 @@ static const struct iio_info meson_sar_adc_iio_info = {
static const struct meson_sar_adc_param meson_sar_adc_meson8_param = {
.has_bl30_integration = false,
.clock_rate = 1150000,
- .bandgap_reg = MESON_SAR_ADC_DELTA_10,
.regmap_config = &meson_sar_adc_regmap_config_meson8,
.resolution = 10,
.temperature_trimming_bits = 4,
@@ -1197,7 +1193,6 @@ static const struct meson_sar_adc_param meson_sar_adc_meson8_param = {
static const struct meson_sar_adc_param meson_sar_adc_meson8b_param = {
.has_bl30_integration = false,
.clock_rate = 1150000,
- .bandgap_reg = MESON_SAR_ADC_DELTA_10,
.regmap_config = &meson_sar_adc_regmap_config_meson8,
.resolution = 10,
.temperature_trimming_bits = 5,
@@ -1208,35 +1203,29 @@ static const struct meson_sar_adc_param meson_sar_adc_meson8b_param = {
static const struct meson_sar_adc_param meson_sar_adc_gxbb_param = {
.has_bl30_integration = true,
.clock_rate = 1200000,
- .bandgap_reg = MESON_SAR_ADC_REG11,
.regmap_config = &meson_sar_adc_regmap_config_gxbb,
.resolution = 10,
- .has_reg11 = true,
- .vref_volatge = 1,
+ .vref_voltage = 1,
.cmv_select = 1,
};
static const struct meson_sar_adc_param meson_sar_adc_gxl_param = {
.has_bl30_integration = true,
.clock_rate = 1200000,
- .bandgap_reg = MESON_SAR_ADC_REG11,
.regmap_config = &meson_sar_adc_regmap_config_gxbb,
.resolution = 12,
.disable_ring_counter = 1,
- .has_reg11 = true,
- .vref_volatge = 1,
+ .vref_voltage = 1,
.cmv_select = 1,
};
static const struct meson_sar_adc_param meson_sar_adc_axg_param = {
.has_bl30_integration = true,
.clock_rate = 1200000,
- .bandgap_reg = MESON_SAR_ADC_REG11,
.regmap_config = &meson_sar_adc_regmap_config_gxbb,
.resolution = 12,
.disable_ring_counter = 1,
- .has_reg11 = true,
- .vref_volatge = 1,
+ .vref_voltage = 1,
.has_vref_select = true,
.vref_select = VREF_VDDA,
.cmv_select = 1,
@@ -1245,11 +1234,9 @@ static const struct meson_sar_adc_param meson_sar_adc_axg_param = {
static const struct meson_sar_adc_param meson_sar_adc_g12a_param = {
.has_bl30_integration = false,
.clock_rate = 1200000,
- .bandgap_reg = MESON_SAR_ADC_REG11,
.regmap_config = &meson_sar_adc_regmap_config_gxbb,
.resolution = 12,
.disable_ring_counter = 1,
- .has_reg11 = true,
.adc_eoc = 1,
.has_vref_select = true,
.vref_select = VREF_VDDA,
diff --git a/drivers/iio/adc/pac1921.c b/drivers/iio/adc/pac1921.c
index b0f6727cfe38..90f61c47b1c4 100644
--- a/drivers/iio/adc/pac1921.c
+++ b/drivers/iio/adc/pac1921.c
@@ -12,6 +12,7 @@
#include <linux/iio/iio.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
+#include <linux/limits.h>
#include <linux/regmap.h>
#include <linux/units.h>
@@ -67,6 +68,14 @@ enum pac1921_mxsl {
#define PAC1921_DEFAULT_DI_GAIN 0 /* 2^(value): 1x gain (HW default) */
#define PAC1921_DEFAULT_NUM_SAMPLES 0 /* 2^(value): 1 sample (HW default) */
+#define PAC1921_ACPI_GET_uOHMS_VALS 0
+#define PAC1921_ACPI_GET_LABEL 1
+
+/* f7bb9932-86ee-4516-a236-7a7a742e55cb */
+static const guid_t pac1921_guid =
+ GUID_INIT(0xf7bb9932, 0x86ee, 0x4516, 0xa2,
+ 0x36, 0x7a, 0x7a, 0x74, 0x2e, 0x55, 0xcb);
+
/*
* Pre-computed scale factors for BUS voltage
* format: IIO_VAL_INT_PLUS_NANO
@@ -200,7 +209,7 @@ struct pac1921_priv {
struct {
u16 chan[PAC1921_NUM_MEAS_CHANS];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
};
@@ -782,7 +791,7 @@ static ssize_t pac1921_write_shunt_resistor(struct iio_dev *indio_dev,
const char *buf, size_t len)
{
struct pac1921_priv *priv = iio_priv(indio_dev);
- u64 rshunt_uohm;
+ u32 rshunt_uohm;
int val, val_fract;
int ret;
@@ -793,10 +802,17 @@ static ssize_t pac1921_write_shunt_resistor(struct iio_dev *indio_dev,
if (ret)
return ret;
- rshunt_uohm = val * MICRO + val_fract;
- if (rshunt_uohm == 0 || rshunt_uohm > INT_MAX)
+ /*
+ * This check validates the shunt is not zero and does not surpass
+ * INT_MAX. The check is done before calculating in order to avoid
+ * val * MICRO overflowing.
+ */
+ if ((!val && !val_fract) || val > INT_MAX / MICRO ||
+ (val == INT_MAX / MICRO && val_fract > INT_MAX % MICRO))
return -EINVAL;
+ rshunt_uohm = val * MICRO + val_fract;
+
guard(mutex)(&priv->lock);
priv->rshunt_uohm = rshunt_uohm;
@@ -1151,6 +1167,61 @@ static void pac1921_regulator_disable(void *data)
regulator_disable(regulator);
}
+/*
+ * Documentation related to the ACPI device definition
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/OTH/ApplicationNotes/ApplicationNotes/PAC193X-Integration-Notes-for-Microsoft-Windows-10-and-Windows-11-Driver-Support-DS00002534.pdf
+ */
+static int pac1921_match_acpi_device(struct iio_dev *indio_dev)
+{
+ acpi_handle handle;
+ union acpi_object *status;
+ char *label;
+ struct pac1921_priv *priv = iio_priv(indio_dev);
+ struct device *dev = &priv->client->dev;
+
+ handle = ACPI_HANDLE(dev);
+
+ status = acpi_evaluate_dsm(handle, &pac1921_guid, 1,
+ PAC1921_ACPI_GET_uOHMS_VALS, NULL);
+ if (!status)
+ return dev_err_probe(dev, -EINVAL,
+ "Could not read shunt from ACPI table\n");
+
+ priv->rshunt_uohm = status->package.elements[0].integer.value;
+ ACPI_FREE(status);
+
+ status = acpi_evaluate_dsm(handle, &pac1921_guid, 1,
+ PAC1921_ACPI_GET_LABEL, NULL);
+ if (!status)
+ return dev_err_probe(dev, -EINVAL,
+ "Could not read label from ACPI table\n");
+
+ label = devm_kstrdup(dev, status->package.elements[0].string.pointer,
+ GFP_KERNEL);
+ if (!label)
+ return -ENOMEM;
+
+ indio_dev->label = label;
+ ACPI_FREE(status);
+
+ return 0;
+}
+
+static int pac1921_parse_of_fw(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct pac1921_priv *priv = iio_priv(indio_dev);
+ struct device *dev = &priv->client->dev;
+
+ ret = device_property_read_u32(dev, "shunt-resistor-micro-ohms",
+ &priv->rshunt_uohm);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Cannot read shunt resistor property\n");
+
+ return 0;
+}
+
static int pac1921_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -1179,11 +1250,14 @@ static int pac1921_probe(struct i2c_client *client)
priv->di_gain = PAC1921_DEFAULT_DI_GAIN;
priv->n_samples = PAC1921_DEFAULT_NUM_SAMPLES;
- ret = device_property_read_u32(dev, "shunt-resistor-micro-ohms",
- &priv->rshunt_uohm);
+ if (is_acpi_device_node(dev->fwnode))
+ ret = pac1921_match_acpi_device(indio_dev);
+ else
+ ret = pac1921_parse_of_fw(indio_dev);
if (ret)
return dev_err_probe(dev, ret,
- "Cannot read shunt resistor property\n");
+ "Parameter parsing error\n");
+
if (priv->rshunt_uohm == 0 || priv->rshunt_uohm > INT_MAX)
return dev_err_probe(dev, -EINVAL,
"Invalid shunt resistor: %u\n",
@@ -1246,11 +1320,18 @@ static const struct of_device_id pac1921_of_match[] = {
};
MODULE_DEVICE_TABLE(of, pac1921_of_match);
+static const struct acpi_device_id pac1921_acpi_match[] = {
+ { "MCHP1921" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, pac1921_acpi_match);
+
static struct i2c_driver pac1921_driver = {
.driver = {
.name = "pac1921",
.pm = pm_sleep_ptr(&pac1921_pm_ops),
.of_match_table = pac1921_of_match,
+ .acpi_match_table = pac1921_acpi_match,
},
.probe = pac1921_probe,
.id_table = pac1921_id,
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index dfd47a6e1f4a..a29e54754c8f 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -363,7 +363,7 @@ static irqreturn_t rockchip_saradc_trigger_handler(int irq, void *p)
*/
struct {
u16 values[SARADC_MAX_CHANNELS];
- int64_t timestamp;
+ aligned_s64 timestamp;
} data;
int ret;
int i, j = 0;
diff --git a/drivers/iio/adc/rtq6056.c b/drivers/iio/adc/rtq6056.c
index 56ed948a8ae1..337bc8b31b2c 100644
--- a/drivers/iio/adc/rtq6056.c
+++ b/drivers/iio/adc/rtq6056.c
@@ -634,7 +634,7 @@ static irqreturn_t rtq6056_buffer_trigger_handler(int irq, void *p)
struct device *dev = priv->dev;
struct {
u16 vals[RTQ6056_MAX_CHANNEL];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} data;
unsigned int raw;
int i = 0, bit, ret;
diff --git a/drivers/iio/adc/rzg2l_adc.c b/drivers/iio/adc/rzg2l_adc.c
index cd3a7e46ea53..883c167c0670 100644
--- a/drivers/iio/adc/rzg2l_adc.c
+++ b/drivers/iio/adc/rzg2l_adc.c
@@ -8,12 +8,13 @@
*/
#include <linux/bitfield.h>
-#include <linux/clk.h>
+#include <linux/cleanup.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/iio/iio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -32,20 +33,15 @@
#define RZG2L_ADM1_MS BIT(2)
#define RZG2L_ADM1_BS BIT(4)
#define RZG2L_ADM1_EGA_MASK GENMASK(13, 12)
-#define RZG2L_ADM2_CHSEL_MASK GENMASK(7, 0)
#define RZG2L_ADM3_ADIL_MASK GENMASK(31, 24)
#define RZG2L_ADM3_ADCMP_MASK GENMASK(23, 16)
-#define RZG2L_ADM3_ADCMP_E FIELD_PREP(RZG2L_ADM3_ADCMP_MASK, 0xe)
-#define RZG2L_ADM3_ADSMP_MASK GENMASK(15, 0)
#define RZG2L_ADINT 0x20
-#define RZG2L_ADINT_INTEN_MASK GENMASK(7, 0)
#define RZG2L_ADINT_CSEEN BIT(16)
#define RZG2L_ADINT_INTS BIT(31)
#define RZG2L_ADSTS 0x24
#define RZG2L_ADSTS_CSEST BIT(16)
-#define RZG2L_ADSTS_INTST_MASK GENMASK(7, 0)
#define RZG2L_ADIVC 0x28
#define RZG2L_ADIVC_DIVADC_MASK GENMASK(8, 0)
@@ -56,12 +52,28 @@
#define RZG2L_ADCR(n) (0x30 + ((n) * 0x4))
#define RZG2L_ADCR_AD_MASK GENMASK(11, 0)
-#define RZG2L_ADSMP_DEFAULT_SAMPLING 0x578
-
-#define RZG2L_ADC_MAX_CHANNELS 8
-#define RZG2L_ADC_CHN_MASK 0x7
+#define RZG2L_ADC_MAX_CHANNELS 9
#define RZG2L_ADC_TIMEOUT usecs_to_jiffies(1 * 4)
+/**
+ * struct rzg2l_adc_hw_params - ADC hardware specific parameters
+ * @default_adsmp: default ADC sampling period (see ADM3 register); index 0 is
+ * used for voltage channels, index 1 is used for temperature channel
+ * @adsmp_mask: ADC sampling period mask (see ADM3 register)
+ * @adint_inten_mask: conversion end interrupt mask (see ADINT register)
+ * @default_adcmp: default ADC cmp (see ADM3 register)
+ * @num_channels: number of supported channels
+ * @adivc: specifies if ADVIC register is available
+ */
+struct rzg2l_adc_hw_params {
+ u16 default_adsmp[2];
+ u16 adsmp_mask;
+ u16 adint_inten_mask;
+ u8 default_adcmp;
+ u8 num_channels;
+ bool adivc;
+};
+
struct rzg2l_adc_data {
const struct iio_chan_spec *channels;
u8 num_channels;
@@ -69,25 +81,36 @@ struct rzg2l_adc_data {
struct rzg2l_adc {
void __iomem *base;
- struct clk *pclk;
- struct clk *adclk;
struct reset_control *presetn;
struct reset_control *adrstn;
- struct completion completion;
const struct rzg2l_adc_data *data;
+ const struct rzg2l_adc_hw_params *hw_params;
+ struct completion completion;
struct mutex lock;
u16 last_val[RZG2L_ADC_MAX_CHANNELS];
+ bool was_rpm_active;
+};
+
+/**
+ * struct rzg2l_adc_channel - ADC channel descriptor
+ * @name: ADC channel name
+ * @type: ADC channel type
+ */
+struct rzg2l_adc_channel {
+ const char * const name;
+ enum iio_chan_type type;
};
-static const char * const rzg2l_adc_channel_name[] = {
- "adc0",
- "adc1",
- "adc2",
- "adc3",
- "adc4",
- "adc5",
- "adc6",
- "adc7",
+static const struct rzg2l_adc_channel rzg2l_adc_channels[] = {
+ { "adc0", IIO_VOLTAGE },
+ { "adc1", IIO_VOLTAGE },
+ { "adc2", IIO_VOLTAGE },
+ { "adc3", IIO_VOLTAGE },
+ { "adc4", IIO_VOLTAGE },
+ { "adc5", IIO_VOLTAGE },
+ { "adc6", IIO_VOLTAGE },
+ { "adc7", IIO_VOLTAGE },
+ { "adc8", IIO_TEMP },
};
static unsigned int rzg2l_adc_readl(struct rzg2l_adc *adc, u32 reg)
@@ -115,7 +138,7 @@ static void rzg2l_adc_pwr(struct rzg2l_adc *adc, bool on)
static void rzg2l_adc_start_stop(struct rzg2l_adc *adc, bool start)
{
- int timeout = 5;
+ int ret;
u32 reg;
reg = rzg2l_adc_readl(adc, RZG2L_ADM(0));
@@ -128,15 +151,10 @@ static void rzg2l_adc_start_stop(struct rzg2l_adc *adc, bool start)
if (start)
return;
- do {
- usleep_range(100, 200);
- reg = rzg2l_adc_readl(adc, RZG2L_ADM(0));
- timeout--;
- if (!timeout) {
- pr_err("%s stopping ADC timed out\n", __func__);
- break;
- }
- } while (((reg & RZG2L_ADM0_ADBSY) || (reg & RZG2L_ADM0_ADCE)));
+ ret = read_poll_timeout(rzg2l_adc_readl, reg, !(reg & (RZG2L_ADM0_ADBSY | RZG2L_ADM0_ADCE)),
+ 200, 1000, true, adc, RZG2L_ADM(0));
+ if (ret)
+ pr_err("%s stopping ADC timed out\n", __func__);
}
static void rzg2l_set_trigger(struct rzg2l_adc *adc)
@@ -158,8 +176,18 @@ static void rzg2l_set_trigger(struct rzg2l_adc *adc)
rzg2l_adc_writel(adc, RZG2L_ADM(1), reg);
}
+static u8 rzg2l_adc_ch_to_adsmp_index(u8 ch)
+{
+ if (rzg2l_adc_channels[ch].type == IIO_VOLTAGE)
+ return 0;
+
+ return 1;
+}
+
static int rzg2l_adc_conversion_setup(struct rzg2l_adc *adc, u8 ch)
{
+ const struct rzg2l_adc_hw_params *hw_params = adc->hw_params;
+ u8 index = rzg2l_adc_ch_to_adsmp_index(ch);
u32 reg;
if (rzg2l_adc_readl(adc, RZG2L_ADM(0)) & RZG2L_ADM0_ADBSY)
@@ -169,10 +197,15 @@ static int rzg2l_adc_conversion_setup(struct rzg2l_adc *adc, u8 ch)
/* Select analog input channel subjected to conversion. */
reg = rzg2l_adc_readl(adc, RZG2L_ADM(2));
- reg &= ~RZG2L_ADM2_CHSEL_MASK;
+ reg &= ~GENMASK(hw_params->num_channels - 1, 0);
reg |= BIT(ch);
rzg2l_adc_writel(adc, RZG2L_ADM(2), reg);
+ reg = rzg2l_adc_readl(adc, RZG2L_ADM(3));
+ reg &= ~hw_params->adsmp_mask;
+ reg |= hw_params->default_adsmp[index];
+ rzg2l_adc_writel(adc, RZG2L_ADM(3), reg);
+
/*
* Setup ADINT
* INTS[31] - Select pulse signal
@@ -181,36 +214,26 @@ static int rzg2l_adc_conversion_setup(struct rzg2l_adc *adc, u8 ch)
*/
reg = rzg2l_adc_readl(adc, RZG2L_ADINT);
reg &= ~RZG2L_ADINT_INTS;
- reg &= ~RZG2L_ADINT_INTEN_MASK;
+ reg &= ~hw_params->adint_inten_mask;
reg |= (RZG2L_ADINT_CSEEN | BIT(ch));
rzg2l_adc_writel(adc, RZG2L_ADINT, reg);
return 0;
}
-static int rzg2l_adc_set_power(struct iio_dev *indio_dev, bool on)
-{
- struct device *dev = indio_dev->dev.parent;
-
- if (on)
- return pm_runtime_resume_and_get(dev);
-
- return pm_runtime_put_sync(dev);
-}
-
static int rzg2l_adc_conversion(struct iio_dev *indio_dev, struct rzg2l_adc *adc, u8 ch)
{
+ const struct rzg2l_adc_hw_params *hw_params = adc->hw_params;
+ struct device *dev = indio_dev->dev.parent;
int ret;
- ret = rzg2l_adc_set_power(indio_dev, true);
+ ret = pm_runtime_resume_and_get(dev);
if (ret)
return ret;
ret = rzg2l_adc_conversion_setup(adc, ch);
- if (ret) {
- rzg2l_adc_set_power(indio_dev, false);
- return ret;
- }
+ if (ret)
+ goto rpm_put;
reinit_completion(&adc->completion);
@@ -218,13 +241,16 @@ static int rzg2l_adc_conversion(struct iio_dev *indio_dev, struct rzg2l_adc *adc
if (!wait_for_completion_timeout(&adc->completion, RZG2L_ADC_TIMEOUT)) {
rzg2l_adc_writel(adc, RZG2L_ADINT,
- rzg2l_adc_readl(adc, RZG2L_ADINT) & ~RZG2L_ADINT_INTEN_MASK);
- rzg2l_adc_start_stop(adc, false);
- rzg2l_adc_set_power(indio_dev, false);
- return -ETIMEDOUT;
+ rzg2l_adc_readl(adc, RZG2L_ADINT) & ~hw_params->adint_inten_mask);
+ ret = -ETIMEDOUT;
}
- return rzg2l_adc_set_power(indio_dev, false);
+ rzg2l_adc_start_stop(adc, false);
+
+rpm_put:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ return ret;
}
static int rzg2l_adc_read_raw(struct iio_dev *indio_dev,
@@ -233,24 +259,22 @@ static int rzg2l_adc_read_raw(struct iio_dev *indio_dev,
{
struct rzg2l_adc *adc = iio_priv(indio_dev);
int ret;
- u8 ch;
switch (mask) {
- case IIO_CHAN_INFO_RAW:
- if (chan->type != IIO_VOLTAGE)
+ case IIO_CHAN_INFO_RAW: {
+ if (chan->type != IIO_VOLTAGE && chan->type != IIO_TEMP)
return -EINVAL;
- mutex_lock(&adc->lock);
- ch = chan->channel & RZG2L_ADC_CHN_MASK;
- ret = rzg2l_adc_conversion(indio_dev, adc, ch);
- if (ret) {
- mutex_unlock(&adc->lock);
+ guard(mutex)(&adc->lock);
+
+ ret = rzg2l_adc_conversion(indio_dev, adc, chan->channel);
+ if (ret)
return ret;
- }
- *val = adc->last_val[ch];
- mutex_unlock(&adc->lock);
+
+ *val = adc->last_val[chan->channel];
return IIO_VAL_INT;
+ }
default:
return -EINVAL;
@@ -261,7 +285,7 @@ static int rzg2l_adc_read_label(struct iio_dev *iio_dev,
const struct iio_chan_spec *chan,
char *label)
{
- return sysfs_emit(label, "%s\n", rzg2l_adc_channel_name[chan->channel]);
+ return sysfs_emit(label, "%s\n", rzg2l_adc_channels[chan->channel].name);
}
static const struct iio_info rzg2l_adc_iio_info = {
@@ -272,6 +296,7 @@ static const struct iio_info rzg2l_adc_iio_info = {
static irqreturn_t rzg2l_adc_isr(int irq, void *dev_id)
{
struct rzg2l_adc *adc = dev_id;
+ const struct rzg2l_adc_hw_params *hw_params = adc->hw_params;
unsigned long intst;
u32 reg;
int ch;
@@ -284,11 +309,11 @@ static irqreturn_t rzg2l_adc_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
- intst = reg & RZG2L_ADSTS_INTST_MASK;
+ intst = reg & GENMASK(hw_params->num_channels - 1, 0);
if (!intst)
return IRQ_NONE;
- for_each_set_bit(ch, &intst, RZG2L_ADC_MAX_CHANNELS)
+ for_each_set_bit(ch, &intst, hw_params->num_channels)
adc->last_val[ch] = rzg2l_adc_readl(adc, RZG2L_ADCR(ch)) & RZG2L_ADCR_AD_MASK;
/* clear the channel interrupt */
@@ -301,6 +326,7 @@ static irqreturn_t rzg2l_adc_isr(int irq, void *dev_id)
static int rzg2l_adc_parse_properties(struct platform_device *pdev, struct rzg2l_adc *adc)
{
+ const struct rzg2l_adc_hw_params *hw_params = adc->hw_params;
struct iio_chan_spec *chan_array;
struct rzg2l_adc_data *data;
unsigned int channel;
@@ -313,15 +339,12 @@ static int rzg2l_adc_parse_properties(struct platform_device *pdev, struct rzg2l
return -ENOMEM;
num_channels = device_get_child_node_count(&pdev->dev);
- if (!num_channels) {
- dev_err(&pdev->dev, "no channel children\n");
- return -ENODEV;
- }
+ if (!num_channels)
+ return dev_err_probe(&pdev->dev, -ENODEV, "no channel children\n");
- if (num_channels > RZG2L_ADC_MAX_CHANNELS) {
- dev_err(&pdev->dev, "num of channel children out of range\n");
- return -EINVAL;
- }
+ if (num_channels > hw_params->num_channels)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "num of channel children out of range\n");
chan_array = devm_kcalloc(&pdev->dev, num_channels, sizeof(*chan_array),
GFP_KERNEL);
@@ -334,14 +357,14 @@ static int rzg2l_adc_parse_properties(struct platform_device *pdev, struct rzg2l
if (ret)
return ret;
- if (channel >= RZG2L_ADC_MAX_CHANNELS)
+ if (channel >= hw_params->num_channels)
return -EINVAL;
- chan_array[i].type = IIO_VOLTAGE;
+ chan_array[i].type = rzg2l_adc_channels[channel].type;
chan_array[i].indexed = 1;
chan_array[i].channel = channel;
chan_array[i].info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
- chan_array[i].datasheet_name = rzg2l_adc_channel_name[channel];
+ chan_array[i].datasheet_name = rzg2l_adc_channels[channel].name;
i++;
}
@@ -352,13 +375,13 @@ static int rzg2l_adc_parse_properties(struct platform_device *pdev, struct rzg2l
return 0;
}
-static int rzg2l_adc_hw_init(struct rzg2l_adc *adc)
+static int rzg2l_adc_hw_init(struct device *dev, struct rzg2l_adc *adc)
{
- int timeout = 5;
+ const struct rzg2l_adc_hw_params *hw_params = adc->hw_params;
u32 reg;
int ret;
- ret = clk_prepare_enable(adc->pclk);
+ ret = pm_runtime_resume_and_get(dev);
if (ret)
return ret;
@@ -367,21 +390,19 @@ static int rzg2l_adc_hw_init(struct rzg2l_adc *adc)
reg |= RZG2L_ADM0_SRESB;
rzg2l_adc_writel(adc, RZG2L_ADM(0), reg);
- while (!(rzg2l_adc_readl(adc, RZG2L_ADM(0)) & RZG2L_ADM0_SRESB)) {
- if (!timeout) {
- ret = -EBUSY;
- goto exit_hw_init;
- }
- timeout--;
- usleep_range(100, 200);
+ ret = read_poll_timeout(rzg2l_adc_readl, reg, reg & RZG2L_ADM0_SRESB,
+ 200, 1000, false, adc, RZG2L_ADM(0));
+ if (ret)
+ goto exit_hw_init;
+
+ if (hw_params->adivc) {
+ /* Only division by 4 can be set */
+ reg = rzg2l_adc_readl(adc, RZG2L_ADIVC);
+ reg &= ~RZG2L_ADIVC_DIVADC_MASK;
+ reg |= RZG2L_ADIVC_DIVADC_4;
+ rzg2l_adc_writel(adc, RZG2L_ADIVC, reg);
}
- /* Only division by 4 can be set */
- reg = rzg2l_adc_readl(adc, RZG2L_ADIVC);
- reg &= ~RZG2L_ADIVC_DIVADC_MASK;
- reg |= RZG2L_ADIVC_DIVADC_4;
- rzg2l_adc_writel(adc, RZG2L_ADIVC, reg);
-
/*
* Setup AMD3
* ADIL[31:24] - Should be always set to 0
@@ -391,35 +412,18 @@ static int rzg2l_adc_hw_init(struct rzg2l_adc *adc)
reg = rzg2l_adc_readl(adc, RZG2L_ADM(3));
reg &= ~RZG2L_ADM3_ADIL_MASK;
reg &= ~RZG2L_ADM3_ADCMP_MASK;
- reg &= ~RZG2L_ADM3_ADSMP_MASK;
- reg |= (RZG2L_ADM3_ADCMP_E | RZG2L_ADSMP_DEFAULT_SAMPLING);
+ reg &= ~hw_params->adsmp_mask;
+ reg |= FIELD_PREP(RZG2L_ADM3_ADCMP_MASK, hw_params->default_adcmp) |
+ hw_params->default_adsmp[0];
+
rzg2l_adc_writel(adc, RZG2L_ADM(3), reg);
exit_hw_init:
- clk_disable_unprepare(adc->pclk);
-
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
return ret;
}
-static void rzg2l_adc_pm_runtime_disable(void *data)
-{
- struct device *dev = data;
-
- pm_runtime_disable(dev->parent);
-}
-
-static void rzg2l_adc_pm_runtime_set_suspended(void *data)
-{
- struct device *dev = data;
-
- pm_runtime_set_suspended(dev->parent);
-}
-
-static void rzg2l_adc_reset_assert(void *data)
-{
- reset_control_assert(data);
-}
-
static int rzg2l_adc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -434,6 +438,10 @@ static int rzg2l_adc_probe(struct platform_device *pdev)
adc = iio_priv(indio_dev);
+ adc->hw_params = device_get_match_data(dev);
+ if (!adc->hw_params || adc->hw_params->num_channels > RZG2L_ADC_MAX_CHANNELS)
+ return -EINVAL;
+
ret = rzg2l_adc_parse_properties(pdev, adc);
if (ret)
return ret;
@@ -444,63 +452,28 @@ static int rzg2l_adc_probe(struct platform_device *pdev)
if (IS_ERR(adc->base))
return PTR_ERR(adc->base);
- adc->pclk = devm_clk_get(dev, "pclk");
- if (IS_ERR(adc->pclk)) {
- dev_err(dev, "Failed to get pclk");
- return PTR_ERR(adc->pclk);
- }
+ adc->adrstn = devm_reset_control_get_exclusive_deasserted(dev, "adrst-n");
+ if (IS_ERR(adc->adrstn))
+ return dev_err_probe(dev, PTR_ERR(adc->adrstn),
+ "failed to get/deassert adrst-n\n");
- adc->adclk = devm_clk_get(dev, "adclk");
- if (IS_ERR(adc->adclk)) {
- dev_err(dev, "Failed to get adclk");
- return PTR_ERR(adc->adclk);
- }
+ adc->presetn = devm_reset_control_get_exclusive_deasserted(dev, "presetn");
+ if (IS_ERR(adc->presetn))
+ return dev_err_probe(dev, PTR_ERR(adc->presetn),
+ "failed to get/deassert presetn\n");
- adc->adrstn = devm_reset_control_get_exclusive(dev, "adrst-n");
- if (IS_ERR(adc->adrstn)) {
- dev_err(dev, "failed to get adrstn\n");
- return PTR_ERR(adc->adrstn);
- }
-
- adc->presetn = devm_reset_control_get_exclusive(dev, "presetn");
- if (IS_ERR(adc->presetn)) {
- dev_err(dev, "failed to get presetn\n");
- return PTR_ERR(adc->presetn);
- }
-
- ret = reset_control_deassert(adc->adrstn);
- if (ret) {
- dev_err(&pdev->dev, "failed to deassert adrstn pin, %d\n", ret);
- return ret;
- }
-
- ret = devm_add_action_or_reset(&pdev->dev,
- rzg2l_adc_reset_assert, adc->adrstn);
- if (ret) {
- dev_err(&pdev->dev, "failed to register adrstn assert devm action, %d\n",
- ret);
+ pm_runtime_set_autosuspend_delay(dev, 300);
+ pm_runtime_use_autosuspend(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
return ret;
- }
- ret = reset_control_deassert(adc->presetn);
- if (ret) {
- dev_err(&pdev->dev, "failed to deassert presetn pin, %d\n", ret);
- return ret;
- }
-
- ret = devm_add_action_or_reset(&pdev->dev,
- rzg2l_adc_reset_assert, adc->presetn);
- if (ret) {
- dev_err(&pdev->dev, "failed to register presetn assert devm action, %d\n",
- ret);
- return ret;
- }
+ platform_set_drvdata(pdev, indio_dev);
- ret = rzg2l_adc_hw_init(adc);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize ADC HW, %d\n", ret);
- return ret;
- }
+ ret = rzg2l_adc_hw_init(dev, adc);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to initialize ADC HW\n");
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -513,72 +486,130 @@ static int rzg2l_adc_probe(struct platform_device *pdev)
init_completion(&adc->completion);
- platform_set_drvdata(pdev, indio_dev);
-
indio_dev->name = DRIVER_NAME;
indio_dev->info = &rzg2l_adc_iio_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = adc->data->channels;
indio_dev->num_channels = adc->data->num_channels;
- pm_runtime_set_suspended(dev);
- ret = devm_add_action_or_reset(&pdev->dev,
- rzg2l_adc_pm_runtime_set_suspended, &indio_dev->dev);
- if (ret)
- return ret;
-
- pm_runtime_enable(dev);
- ret = devm_add_action_or_reset(&pdev->dev,
- rzg2l_adc_pm_runtime_disable, &indio_dev->dev);
- if (ret)
- return ret;
-
return devm_iio_device_register(dev, indio_dev);
}
+static const struct rzg2l_adc_hw_params rzg2l_hw_params = {
+ .num_channels = 8,
+ .default_adcmp = 0xe,
+ .default_adsmp = { 0x578 },
+ .adsmp_mask = GENMASK(15, 0),
+ .adint_inten_mask = GENMASK(7, 0),
+ .adivc = true
+};
+
+static const struct rzg2l_adc_hw_params rzg3s_hw_params = {
+ .num_channels = 9,
+ .default_adcmp = 0x1d,
+ .default_adsmp = { 0x7f, 0xff },
+ .adsmp_mask = GENMASK(7, 0),
+ .adint_inten_mask = GENMASK(11, 0),
+};
+
static const struct of_device_id rzg2l_adc_match[] = {
- { .compatible = "renesas,rzg2l-adc",},
+ { .compatible = "renesas,r9a08g045-adc", .data = &rzg3s_hw_params },
+ { .compatible = "renesas,rzg2l-adc", .data = &rzg2l_hw_params },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, rzg2l_adc_match);
-static int __maybe_unused rzg2l_adc_pm_runtime_suspend(struct device *dev)
+static int rzg2l_adc_pm_runtime_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct rzg2l_adc *adc = iio_priv(indio_dev);
rzg2l_adc_pwr(adc, false);
- clk_disable_unprepare(adc->adclk);
- clk_disable_unprepare(adc->pclk);
return 0;
}
-static int __maybe_unused rzg2l_adc_pm_runtime_resume(struct device *dev)
+static int rzg2l_adc_pm_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct rzg2l_adc *adc = iio_priv(indio_dev);
+
+ rzg2l_adc_pwr(adc, true);
+
+ return 0;
+}
+
+static int rzg2l_adc_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct rzg2l_adc *adc = iio_priv(indio_dev);
+ struct reset_control_bulk_data resets[] = {
+ { .rstc = adc->presetn },
+ { .rstc = adc->adrstn },
+ };
int ret;
- ret = clk_prepare_enable(adc->pclk);
+ if (pm_runtime_suspended(dev)) {
+ adc->was_rpm_active = false;
+ } else {
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ return ret;
+ adc->was_rpm_active = true;
+ }
+
+ ret = reset_control_bulk_assert(ARRAY_SIZE(resets), resets);
if (ret)
- return ret;
+ goto rpm_restore;
+
+ return 0;
+
+rpm_restore:
+ if (adc->was_rpm_active)
+ pm_runtime_force_resume(dev);
+
+ return ret;
+}
+
+static int rzg2l_adc_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct rzg2l_adc *adc = iio_priv(indio_dev);
+ struct reset_control_bulk_data resets[] = {
+ { .rstc = adc->adrstn },
+ { .rstc = adc->presetn },
+ };
+ int ret;
- ret = clk_prepare_enable(adc->adclk);
- if (ret) {
- clk_disable_unprepare(adc->pclk);
+ ret = reset_control_bulk_deassert(ARRAY_SIZE(resets), resets);
+ if (ret)
return ret;
+
+ if (adc->was_rpm_active) {
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ goto resets_restore;
}
- rzg2l_adc_pwr(adc, true);
+ ret = rzg2l_adc_hw_init(dev, adc);
+ if (ret)
+ goto rpm_restore;
return 0;
+
+rpm_restore:
+ if (adc->was_rpm_active) {
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ }
+resets_restore:
+ reset_control_bulk_assert(ARRAY_SIZE(resets), resets);
+ return ret;
}
static const struct dev_pm_ops rzg2l_adc_pm_ops = {
- SET_RUNTIME_PM_OPS(rzg2l_adc_pm_runtime_suspend,
- rzg2l_adc_pm_runtime_resume,
- NULL)
+ RUNTIME_PM_OPS(rzg2l_adc_pm_runtime_suspend, rzg2l_adc_pm_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(rzg2l_adc_suspend, rzg2l_adc_resume)
};
static struct platform_driver rzg2l_adc_driver = {
@@ -586,7 +617,7 @@ static struct platform_driver rzg2l_adc_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = rzg2l_adc_match,
- .pm = &rzg2l_adc_pm_ops,
+ .pm = pm_ptr(&rzg2l_adc_pm_ops),
},
};
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index 6c2cb3dabbbf..1af9be071d8d 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -37,7 +37,7 @@ struct adc081c {
/* Ensure natural alignment of buffer elements */
struct {
u16 channel;
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c
index bf98f9bf942a..da16876c32ae 100644
--- a/drivers/iio/adc/ti-adc084s021.c
+++ b/drivers/iio/adc/ti-adc084s021.c
@@ -29,7 +29,7 @@ struct adc084s021 {
/* Buffer used to align data */
struct {
__be16 channels[4];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
/*
* DMA (thus cache coherency maintenance) may require the
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index 47fe8e16aee4..4355726b373a 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -448,7 +448,7 @@ static irqreturn_t ads1015_trigger_handler(int irq, void *p)
/* Ensure natural alignment of timestamp */
struct {
s16 chan;
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
int chan, ret, res;
diff --git a/drivers/iio/adc/ti-ads1119.c b/drivers/iio/adc/ti-ads1119.c
index c268e27eec12..de019b3faa48 100644
--- a/drivers/iio/adc/ti-ads1119.c
+++ b/drivers/iio/adc/ti-ads1119.c
@@ -501,7 +501,7 @@ static irqreturn_t ads1119_trigger_handler(int irq, void *private)
struct ads1119_state *st = iio_priv(indio_dev);
struct {
s16 sample;
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
unsigned int index;
int ret;
diff --git a/drivers/iio/adc/ti-ads131e08.c b/drivers/iio/adc/ti-ads131e08.c
index 31f1f229d97a..91a79ebc4bde 100644
--- a/drivers/iio/adc/ti-ads131e08.c
+++ b/drivers/iio/adc/ti-ads131e08.c
@@ -102,7 +102,7 @@ struct ads131e08_state {
struct completion completion;
struct {
u8 data[ADS131E08_NUM_DATA_BYTES_MAX];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} tmp_buf;
u8 tx_buf[3] __aligned(IIO_DMA_MINALIGN);
diff --git a/drivers/iio/adc/ti-lmp92064.c b/drivers/iio/adc/ti-lmp92064.c
index 169e3591320b..1e4a78677fe5 100644
--- a/drivers/iio/adc/ti-lmp92064.c
+++ b/drivers/iio/adc/ti-lmp92064.c
@@ -199,7 +199,7 @@ static irqreturn_t lmp92064_trigger_handler(int irq, void *p)
struct lmp92064_adc_priv *priv = iio_priv(indio_dev);
struct {
u16 values[2];
- int64_t timestamp __aligned(8);
+ aligned_s64 timestamp;
} data;
int ret;
diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c
index b56f2503f14c..7dde5713973f 100644
--- a/drivers/iio/adc/ti-tsc2046.c
+++ b/drivers/iio/adc/ti-tsc2046.c
@@ -157,7 +157,7 @@ struct tsc2046_adc_priv {
/* Scan data for each channel */
u16 data[TI_TSC2046_MAX_CHAN];
/* Timestamp */
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan_buf;
/*
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 4d83c12975c5..513365d42aa5 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -173,10 +173,14 @@ struct vf610_adc {
/* Ensure the timestamp is naturally aligned */
struct {
u16 chan;
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
};
+struct vf610_chip_info {
+ u8 num_channels;
+};
+
static const u32 vf610_hw_avgs[] = { 1, 4, 8, 16, 32 };
static const u32 vf610_lst_adder[] = { 3, 5, 7, 9, 13, 17, 21, 25 };
@@ -808,14 +812,31 @@ static const struct iio_info vf610_adc_iio_info = {
.attrs = &vf610_attribute_group,
};
+static const struct vf610_chip_info vf610_chip_info = {
+ .num_channels = ARRAY_SIZE(vf610_adc_iio_channels),
+};
+
+static const struct vf610_chip_info imx6sx_chip_info = {
+ .num_channels = 4,
+};
+
static const struct of_device_id vf610_adc_match[] = {
- { .compatible = "fsl,vf610-adc", },
+ { .compatible = "fsl,imx6sx-adc", .data = &imx6sx_chip_info},
+ { .compatible = "fsl,vf610-adc", .data = &vf610_chip_info},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, vf610_adc_match);
+static void vf610_adc_action_remove(void *d)
+{
+ struct vf610_adc *info = d;
+
+ regulator_disable(info->vref);
+}
+
static int vf610_adc_probe(struct platform_device *pdev)
{
+ const struct vf610_chip_info *chip_info;
struct device *dev = &pdev->dev;
struct vf610_adc *info;
struct iio_dev *indio_dev;
@@ -823,10 +844,8 @@ static int vf610_adc_probe(struct platform_device *pdev)
int ret;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(struct vf610_adc));
- if (!indio_dev) {
- dev_err(&pdev->dev, "Failed allocating iio device\n");
- return -ENOMEM;
- }
+ if (!indio_dev)
+ return dev_err_probe(&pdev->dev, -ENOMEM, "Failed allocating iio device\n");
info = iio_priv(indio_dev);
info->dev = &pdev->dev;
@@ -835,6 +854,8 @@ static int vf610_adc_probe(struct platform_device *pdev)
if (IS_ERR(info->regs))
return PTR_ERR(info->regs);
+ chip_info = device_get_match_data(dev);
+
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -842,17 +863,12 @@ static int vf610_adc_probe(struct platform_device *pdev)
ret = devm_request_irq(info->dev, irq,
vf610_adc_isr, 0,
dev_name(&pdev->dev), indio_dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed requesting irq, irq = %d\n", irq);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "failed requesting irq, irq = %d\n", irq);
- info->clk = devm_clk_get(&pdev->dev, "adc");
- if (IS_ERR(info->clk)) {
- dev_err(&pdev->dev, "failed getting clock, err = %ld\n",
- PTR_ERR(info->clk));
- return PTR_ERR(info->clk);
- }
+ info->clk = devm_clk_get_enabled(&pdev->dev, "adc");
+ if (IS_ERR(info->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(info->clk), "failed getting clock\n");
info->vref = devm_regulator_get(&pdev->dev, "vref");
if (IS_ERR(info->vref))
@@ -862,6 +878,10 @@ static int vf610_adc_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = devm_add_action_or_reset(&pdev->dev, vf610_adc_action_remove, info);
+ if (ret)
+ return ret;
+
info->vref_uv = regulator_get_voltage(info->vref);
device_property_read_u32_array(dev, "fsl,adck-max-frequency", info->max_adck_rate, 3);
@@ -877,54 +897,23 @@ static int vf610_adc_probe(struct platform_device *pdev)
indio_dev->info = &vf610_adc_iio_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = vf610_adc_iio_channels;
- indio_dev->num_channels = ARRAY_SIZE(vf610_adc_iio_channels);
-
- ret = clk_prepare_enable(info->clk);
- if (ret) {
- dev_err(&pdev->dev,
- "Could not prepare or enable the clock.\n");
- goto error_adc_clk_enable;
- }
+ indio_dev->num_channels = chip_info->num_channels;
vf610_adc_cfg_init(info);
vf610_adc_hw_init(info);
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, &iio_triggered_buffer_setup_ops);
- if (ret < 0) {
- dev_err(&pdev->dev, "Couldn't initialise the buffer\n");
- goto error_iio_device_register;
- }
+ ret = devm_iio_triggered_buffer_setup(&pdev->dev, indio_dev, &iio_pollfunc_store_time,
+ NULL, &iio_triggered_buffer_setup_ops);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Couldn't initialise the buffer\n");
mutex_init(&info->lock);
- ret = iio_device_register(indio_dev);
- if (ret) {
- dev_err(&pdev->dev, "Couldn't register the device.\n");
- goto error_adc_buffer_init;
- }
+ ret = devm_iio_device_register(&pdev->dev, indio_dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Couldn't register the device.\n");
return 0;
-
-error_adc_buffer_init:
- iio_triggered_buffer_cleanup(indio_dev);
-error_iio_device_register:
- clk_disable_unprepare(info->clk);
-error_adc_clk_enable:
- regulator_disable(info->vref);
-
- return ret;
-}
-
-static void vf610_adc_remove(struct platform_device *pdev)
-{
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
- struct vf610_adc *info = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- iio_triggered_buffer_cleanup(indio_dev);
- regulator_disable(info->vref);
- clk_disable_unprepare(info->clk);
}
static int vf610_adc_suspend(struct device *dev)
@@ -972,7 +961,6 @@ static DEFINE_SIMPLE_DEV_PM_OPS(vf610_adc_pm_ops, vf610_adc_suspend,
static struct platform_driver vf610_adc_driver = {
.probe = vf610_adc_probe,
- .remove = vf610_adc_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = vf610_adc_match,
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index d2e1529ad8fd..614e1c4189a9 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -206,7 +206,7 @@ static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = {
/**
* iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
- * @dev: Parent device for the buffer
+ * @dev: DMA channel consumer device
* @channel: DMA channel name, typically "rx".
*
* This allocates a new IIO buffer which internally uses the DMAengine framework
@@ -288,6 +288,21 @@ void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
}
EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, "IIO_DMAENGINE_BUFFER");
+/**
+ * iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device
+ * @dev: DMA channel consumer device
+ * @indio_dev: IIO device to which to attach this buffer.
+ * @channel: DMA channel name, typically "rx".
+ * @dir: Direction of buffer (in or out)
+ *
+ * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc()
+ * and attaches it to an IIO device with iio_device_attach_buffer().
+ * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
+ * IIO device.
+ *
+ * Once done using the buffer iio_dmaengine_buffer_free() should be used to
+ * release it.
+ */
struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev,
const char *channel,
@@ -321,7 +336,7 @@ static void __devm_iio_dmaengine_buffer_free(void *buffer)
/**
* devm_iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device
- * @dev: Parent device for the buffer
+ * @dev: Device for devm ownership and DMA channel consumer device
* @indio_dev: IIO device to which to attach this buffer.
* @channel: DMA channel name, typically "rx".
* @dir: Direction of buffer (in or out)
diff --git a/drivers/iio/chemical/bme680.h b/drivers/iio/chemical/bme680.h
index 00ab89b3138b..7d86ed8b02e6 100644
--- a/drivers/iio/chemical/bme680.h
+++ b/drivers/iio/chemical/bme680.h
@@ -2,6 +2,7 @@
#ifndef BME680_H_
#define BME680_H_
+#include <linux/pm.h>
#include <linux/regmap.h>
#define BME680_REG_CHIP_ID 0xD0
@@ -80,6 +81,7 @@
#define BME680_CALIB_RANGE_3_LEN 5
extern const struct regmap_config bme680_regmap_config;
+extern const struct dev_pm_ops bme680_dev_pm_ops;
int bme680_core_probe(struct device *dev, struct regmap *regmap,
const char *name);
diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
index d12270409c8a..9d73fd2cf52c 100644
--- a/drivers/iio/chemical/bme680_core.c
+++ b/drivers/iio/chemical/bme680_core.c
@@ -14,7 +14,10 @@
#include <linux/device.h>
#include <linux/log2.h>
#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
@@ -111,6 +114,8 @@ enum bme680_scan {
BME680_GAS,
};
+static const char *const bme680_supply_names[] = { "vdd", "vddio" };
+
struct bme680_data {
struct regmap *regmap;
struct bme680_calib bme680;
@@ -817,9 +822,9 @@ static int bme680_read_gas(struct bme680_data *data, int *comp_gas_res)
return 0;
}
-static int bme680_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2, long mask)
+static int __bme680_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
{
struct bme680_data *data = iio_priv(indio_dev);
int chan_val, ret;
@@ -874,11 +879,11 @@ static int bme680_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_RAW:
switch (chan->type) {
case IIO_TEMP:
- ret = bme680_read_temp(data, (s16 *)&chan_val);
+ ret = bme680_read_temp(data, &temp_chan_val);
if (ret)
return ret;
- *val = chan_val;
+ *val = temp_chan_val;
return IIO_VAL_INT;
case IIO_PRESSURE:
ret = bme680_read_press(data, &chan_val);
@@ -932,14 +937,33 @@ static int bme680_read_raw(struct iio_dev *indio_dev,
}
}
+static int bme680_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct bme680_data *data = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ ret = __bme680_read_raw(indio_dev, chan, val, val2, mask);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
static bool bme680_is_valid_oversampling(int rate)
{
return (rate > 0 && rate <= 16 && is_power_of_2(rate));
}
-static int bme680_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val, int val2, long mask)
+static int __bme680_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
{
struct bme680_data *data = iio_priv(indio_dev);
@@ -984,6 +1008,25 @@ static int bme680_write_raw(struct iio_dev *indio_dev,
}
}
+static int bme680_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct bme680_data *data = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ ret = __bme680_write_raw(indio_dev, chan, val, val2, mask);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
static const char bme680_oversampling_ratio_show[] = "1 2 4 8 16";
static IIO_CONST_ATTR(oversampling_ratio_available,
@@ -1084,6 +1127,29 @@ out:
return IRQ_HANDLED;
}
+static int bme680_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct bme680_data *data = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(data->regmap);
+
+ return pm_runtime_resume_and_get(dev);
+}
+
+static int bme680_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct bme680_data *data = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(data->regmap);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops bme680_buffer_setup_ops = {
+ .preenable = bme680_buffer_preenable,
+ .postdisable = bme680_buffer_postdisable,
+};
+
int bme680_core_probe(struct device *dev, struct regmap *regmap,
const char *name)
{
@@ -1114,6 +1180,14 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap,
data->heater_dur = 150; /* milliseconds */
data->preheat_curr_mA = 0;
+ ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(bme680_supply_names),
+ bme680_supply_names);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get and enable supplies.\n");
+
+ fsleep(BME680_STARTUP_TIME_US);
+
ret = regmap_write(regmap, BME680_REG_SOFT_RESET, BME680_CMD_SOFTRESET);
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to reset chip\n");
@@ -1149,15 +1223,47 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap,
ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
iio_pollfunc_store_time,
bme680_trigger_handler,
- NULL);
+ &bme680_buffer_setup_ops);
if (ret)
return dev_err_probe(dev, ret,
"iio triggered buffer setup failed\n");
+ /* Enable runtime PM */
+ pm_runtime_set_autosuspend_delay(dev, BME680_STARTUP_TIME_US);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_active(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
return devm_iio_device_register(dev, indio_dev);
}
EXPORT_SYMBOL_NS_GPL(bme680_core_probe, "IIO_BME680");
+static int bme680_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct bme680_data *data = iio_priv(indio_dev);
+
+ return bme680_set_mode(data, BME680_MODE_SLEEP);
+}
+
+static int bme680_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct bme680_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = bme680_chip_config(data);
+ if (ret)
+ return ret;
+
+ return bme680_gas_config(data);
+}
+
+EXPORT_RUNTIME_DEV_PM_OPS(bme680_dev_pm_ops, bme680_runtime_suspend,
+ bme680_runtime_resume, NULL);
+
MODULE_AUTHOR("Himanshu Jha <himanshujha199640@gmail.com>");
MODULE_DESCRIPTION("Bosch BME680 Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/chemical/bme680_i2c.c b/drivers/iio/chemical/bme680_i2c.c
index 7a949228b4a6..ac7763f98a6a 100644
--- a/drivers/iio/chemical/bme680_i2c.c
+++ b/drivers/iio/chemical/bme680_i2c.c
@@ -51,6 +51,7 @@ static struct i2c_driver bme680_i2c_driver = {
.driver = {
.name = "bme680_i2c",
.of_match_table = bme680_of_i2c_match,
+ .pm = pm_ptr(&bme680_dev_pm_ops),
},
.probe = bme680_i2c_probe,
.id_table = bme680_i2c_id,
diff --git a/drivers/iio/chemical/bme680_spi.c b/drivers/iio/chemical/bme680_spi.c
index 3916a51ba68e..ecb24ba0ebc9 100644
--- a/drivers/iio/chemical/bme680_spi.c
+++ b/drivers/iio/chemical/bme680_spi.c
@@ -154,6 +154,7 @@ static struct spi_driver bme680_spi_driver = {
.driver = {
.name = "bme680_spi",
.of_match_table = bme680_of_spi_match,
+ .pm = pm_ptr(&bme680_dev_pm_ops),
},
.probe = bme680_spi_probe,
.id_table = bme680_spi_id,
diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
index 17d1bc518bf2..451fb65dbe60 100644
--- a/drivers/iio/chemical/ccs811.c
+++ b/drivers/iio/chemical/ccs811.c
@@ -81,7 +81,7 @@ struct ccs811_data {
/* Ensures correct alignment of timestamp if present */
struct {
s16 channels[2];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/chemical/ens160_core.c b/drivers/iio/chemical/ens160_core.c
index 4a89cd5894d9..48d5ad2075b6 100644
--- a/drivers/iio/chemical/ens160_core.c
+++ b/drivers/iio/chemical/ens160_core.c
@@ -60,7 +60,7 @@ struct ens160_data {
struct mutex mutex;
struct {
__le16 chans[2];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan __aligned(IIO_DMA_MINALIGN);
u8 fw_version[3];
__le16 buf;
diff --git a/drivers/iio/chemical/scd30_core.c b/drivers/iio/chemical/scd30_core.c
index ac3080929f0b..d613c54cb28d 100644
--- a/drivers/iio/chemical/scd30_core.c
+++ b/drivers/iio/chemical/scd30_core.c
@@ -594,7 +594,7 @@ static irqreturn_t scd30_trigger_handler(int irq, void *p)
struct scd30_state *state = iio_priv(indio_dev);
struct {
int data[SCD30_MEAS_COUNT];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
int ret;
diff --git a/drivers/iio/chemical/scd4x.c b/drivers/iio/chemical/scd4x.c
index 52cad54e8572..50e3ac44422b 100644
--- a/drivers/iio/chemical/scd4x.c
+++ b/drivers/iio/chemical/scd4x.c
@@ -665,7 +665,7 @@ static irqreturn_t scd4x_trigger_handler(int irq, void *p)
struct scd4x_state *state = iio_priv(indio_dev);
struct {
uint16_t data[3];
- int64_t ts __aligned(8);
+ aligned_s64 ts;
} scan;
int ret;
diff --git a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
index c081b5caa475..97526ba87b93 100644
--- a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
+++ b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
@@ -109,8 +109,8 @@ static bool inv_update_chip_period(struct inv_sensors_timestamp *ts,
static void inv_align_timestamp_it(struct inv_sensors_timestamp *ts)
{
- const int64_t period_min = ts->min_period * ts->mult;
- const int64_t period_max = ts->max_period * ts->mult;
+ const int64_t period_min = (int64_t)ts->min_period * ts->mult;
+ const int64_t period_max = (int64_t)ts->max_period * ts->mult;
int64_t add_max, sub_max;
int64_t delta, jitter;
int64_t adjust;
diff --git a/drivers/iio/common/ssp_sensors/ssp_iio.c b/drivers/iio/common/ssp_sensors/ssp_iio.c
index caa404edd9d0..78ac689de2fe 100644
--- a/drivers/iio/common/ssp_sensors/ssp_iio.c
+++ b/drivers/iio/common/ssp_sensors/ssp_iio.c
@@ -8,6 +8,8 @@
#include <linux/iio/kfifo_buf.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/unaligned.h>
+#include <linux/units.h>
#include "ssp_iio_sensor.h"
/**
@@ -70,8 +72,7 @@ EXPORT_SYMBOL_NS(ssp_common_buffer_postdisable, "IIO_SSP_SENSORS");
int ssp_common_process_data(struct iio_dev *indio_dev, void *buf,
unsigned int len, int64_t timestamp)
{
- __le32 time;
- int64_t calculated_time = 0;
+ int64_t calculated_time;
struct ssp_sensor_data *spd = iio_priv(indio_dev);
if (indio_dev->scan_bytes == 0)
@@ -82,11 +83,8 @@ int ssp_common_process_data(struct iio_dev *indio_dev, void *buf,
*/
memcpy(spd->buffer, buf, len);
- if (indio_dev->scan_timestamp) {
- memcpy(&time, &((char *)buf)[len], SSP_TIME_SIZE);
- calculated_time =
- timestamp + (int64_t)le32_to_cpu(time) * 1000000;
- }
+ calculated_time = timestamp +
+ (int64_t)get_unaligned_le32(buf + len) * MEGA;
return iio_push_to_buffers_with_timestamp(indio_dev, spd->buffer,
calculated_time);
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 5d01ba4edbf3..5690a37267d8 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -80,7 +80,7 @@ config AD5421
depends on SPI
help
Say yes here to build support for Analog Devices AD5421 loop-powered
- digital-to-analog convertors (DAC).
+ digital-to-analog converters (DAC).
To compile this driver as module choose M here: the module will be called
ad5421.
@@ -348,6 +348,14 @@ config AD8801
To compile this driver as a module choose M here: the module will be called
ad8801.
+config BD79703
+ tristate "ROHM Semiconductor BD79703 DAC driver"
+ depends on SPI
+ select REGMAP_SPI
+ help
+ Say yes here to build support for ROHM Semiconductor BD79703 Digital
+ to Analog Converter (DAC).
+
config CIO_DAC
tristate "Measurement Computing CIO-DAC IIO driver"
depends on X86 && (ISA_BUS || PC104)
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 414c152be779..8dd6cce81ed1 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_AD8460) += ad8460.o
obj-$(CONFIG_AD8801) += ad8801.o
obj-$(CONFIG_AD9739A) += ad9739a.o
obj-$(CONFIG_ADI_AXI_DAC) += adi-axi-dac.o
+obj-$(CONFIG_BD79703) += rohm-bd79703.o
obj-$(CONFIG_CIO_DAC) += cio-dac.o
obj-$(CONFIG_DPOT_DAC) += dpot-dac.o
obj-$(CONFIG_DS4424) += ds4424.o
diff --git a/drivers/iio/dac/ad3552r-common.c b/drivers/iio/dac/ad3552r-common.c
index 0f495df2e5ce..03e0864f5084 100644
--- a/drivers/iio/dac/ad3552r-common.c
+++ b/drivers/iio/dac/ad3552r-common.c
@@ -22,11 +22,10 @@ EXPORT_SYMBOL_NS_GPL(ad3552r_ch_ranges, "IIO_AD3552R");
const s32 ad3542r_ch_ranges[AD3542R_MAX_RANGES][2] = {
[AD3542R_CH_OUTPUT_RANGE_0__2P5V] = { 0, 2500 },
- [AD3542R_CH_OUTPUT_RANGE_0__3V] = { 0, 3000 },
[AD3542R_CH_OUTPUT_RANGE_0__5V] = { 0, 5000 },
[AD3542R_CH_OUTPUT_RANGE_0__10V] = { 0, 10000 },
- [AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V] = { -2500, 7500 },
- [AD3542R_CH_OUTPUT_RANGE_NEG_5__5V] = { -5000, 5000 }
+ [AD3542R_CH_OUTPUT_RANGE_NEG_5__5V] = { -5000, 5000 },
+ [AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V] = { -2500, 7500 }
};
EXPORT_SYMBOL_NS_GPL(ad3542r_ch_ranges, "IIO_AD3552R");
diff --git a/drivers/iio/dac/ad3552r-hs.c b/drivers/iio/dac/ad3552r-hs.c
index 216c634f3eaf..8974df625670 100644
--- a/drivers/iio/dac/ad3552r-hs.c
+++ b/drivers/iio/dac/ad3552r-hs.c
@@ -329,6 +329,12 @@ static int ad3552r_hs_setup(struct ad3552r_hs_state *st)
dev_info(st->dev, "Chip ID error. Expected 0x%x, Read 0x%x\n",
AD3552R_ID, id);
+ /* Clear reset error flag, see ad3552r manual, rev B table 38. */
+ ret = st->data->bus_reg_write(st->back, AD3552R_REG_ADDR_ERR_STATUS,
+ AD3552R_MASK_RESET_STATUS, 1);
+ if (ret)
+ return ret;
+
ret = st->data->bus_reg_write(st->back,
AD3552R_REG_ADDR_SH_REFERENCE_CONFIG,
0, 1);
diff --git a/drivers/iio/dac/ad3552r.h b/drivers/iio/dac/ad3552r.h
index fd5a3dfd1d1c..4b5581039ae9 100644
--- a/drivers/iio/dac/ad3552r.h
+++ b/drivers/iio/dac/ad3552r.h
@@ -131,7 +131,7 @@
#define AD3552R_CH1_ACTIVE BIT(1)
#define AD3552R_MAX_RANGES 5
-#define AD3542R_MAX_RANGES 6
+#define AD3542R_MAX_RANGES 5
#define AD3552R_QUAD_SPI 2
extern const s32 ad3552r_ch_ranges[AD3552R_MAX_RANGES][2];
@@ -189,16 +189,14 @@ enum ad3552r_ch_vref_select {
enum ad3542r_ch_output_range {
/* Range from 0 V to 2.5 V. Requires Rfb1x connection */
AD3542R_CH_OUTPUT_RANGE_0__2P5V,
- /* Range from 0 V to 3 V. Requires Rfb1x connection */
- AD3542R_CH_OUTPUT_RANGE_0__3V,
/* Range from 0 V to 5 V. Requires Rfb1x connection */
AD3542R_CH_OUTPUT_RANGE_0__5V,
/* Range from 0 V to 10 V. Requires Rfb2x connection */
AD3542R_CH_OUTPUT_RANGE_0__10V,
- /* Range from -2.5 V to 7.5 V. Requires Rfb2x connection */
- AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V,
/* Range from -5 V to 5 V. Requires Rfb2x connection */
AD3542R_CH_OUTPUT_RANGE_NEG_5__5V,
+ /* Range from -2.5 V to 7.5 V. Requires Rfb2x connection */
+ AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V,
};
enum ad3552r_ch_output_range {
diff --git a/drivers/iio/dac/ad5624r.h b/drivers/iio/dac/ad5624r.h
index 14a439b06eb6..098fb5a7683d 100644
--- a/drivers/iio/dac/ad5624r.h
+++ b/drivers/iio/dac/ad5624r.h
@@ -41,11 +41,9 @@ struct ad5624r_chip_info {
};
/**
- * struct ad5446_state - driver instance specific data
- * @indio_dev: the industrial I/O device
+ * struct ad5624r_state - driver instance specific data
* @us: spi_device
* @chip_info: chip model specific constants, available modes etc
- * @reg: supply regulator
* @vref_mv: actual reference voltage used
* @pwr_down_mask power down mask
* @pwr_down_mode current power down mode
diff --git a/drivers/iio/dac/ad5686-spi.c b/drivers/iio/dac/ad5686-spi.c
index 39b5dad0d6a5..9c727aa6ea18 100644
--- a/drivers/iio/dac/ad5686-spi.c
+++ b/drivers/iio/dac/ad5686-spi.c
@@ -95,11 +95,6 @@ static int ad5686_spi_probe(struct spi_device *spi)
ad5686_spi_write, ad5686_spi_read);
}
-static void ad5686_spi_remove(struct spi_device *spi)
-{
- ad5686_remove(&spi->dev);
-}
-
static const struct spi_device_id ad5686_spi_id[] = {
{"ad5310r", ID_AD5310R},
{"ad5672r", ID_AD5672R},
@@ -126,7 +121,6 @@ static struct spi_driver ad5686_spi_driver = {
.name = "ad5686",
},
.probe = ad5686_spi_probe,
- .remove = ad5686_spi_remove,
.id_table = ad5686_spi_id,
};
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index 8dc578b08784..763af690c444 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -455,39 +455,28 @@ int ad5686_probe(struct device *dev,
struct ad5686_state *st;
struct iio_dev *indio_dev;
unsigned int val, ref_bit_msk;
+ bool has_external_vref;
u8 cmd;
- int ret, i, voltage_uv = 0;
+ int ret, i;
indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
st = iio_priv(indio_dev);
- dev_set_drvdata(dev, indio_dev);
st->dev = dev;
st->write = write;
st->read = read;
- st->reg = devm_regulator_get_optional(dev, "vcc");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
- if (ret)
- return ret;
-
- ret = regulator_get_voltage(st->reg);
- if (ret < 0)
- goto error_disable_reg;
-
- voltage_uv = ret;
- }
-
st->chip_info = &ad5686_chip_info_tbl[chip_type];
- if (voltage_uv)
- st->vref_mv = voltage_uv / 1000;
- else
- st->vref_mv = st->chip_info->int_vref_mv;
+ ret = devm_regulator_get_enable_read_voltage(dev, "vcc");
+ if (ret < 0 && ret != -ENODEV)
+ return ret;
+
+ has_external_vref = ret != -ENODEV;
+ st->vref_mv = has_external_vref ? ret / 1000 : st->chip_info->int_vref_mv;
/* Set all the power down mode for all channels to 1K pulldown */
for (i = 0; i < st->chip_info->num_channels; i++)
@@ -505,12 +494,12 @@ int ad5686_probe(struct device *dev,
case AD5310_REGMAP:
cmd = AD5686_CMD_CONTROL_REG;
ref_bit_msk = AD5310_REF_BIT_MSK;
- st->use_internal_vref = !voltage_uv;
+ st->use_internal_vref = !has_external_vref;
break;
case AD5683_REGMAP:
cmd = AD5686_CMD_CONTROL_REG;
ref_bit_msk = AD5683_REF_BIT_MSK;
- st->use_internal_vref = !voltage_uv;
+ st->use_internal_vref = !has_external_vref;
break;
case AD5686_REGMAP:
cmd = AD5686_CMD_INTERNAL_REFER_SETUP;
@@ -519,43 +508,22 @@ int ad5686_probe(struct device *dev,
case AD5693_REGMAP:
cmd = AD5686_CMD_CONTROL_REG;
ref_bit_msk = AD5693_REF_BIT_MSK;
- st->use_internal_vref = !voltage_uv;
+ st->use_internal_vref = !has_external_vref;
break;
default:
- ret = -EINVAL;
- goto error_disable_reg;
+ return -EINVAL;
}
- val = (voltage_uv | ref_bit_msk);
+ val = (has_external_vref | ref_bit_msk);
ret = st->write(st, cmd, 0, !!val);
if (ret)
- goto error_disable_reg;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_disable_reg;
-
- return 0;
+ return ret;
-error_disable_reg:
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
- return ret;
+ return devm_iio_device_register(dev, indio_dev);
}
EXPORT_SYMBOL_NS_GPL(ad5686_probe, "IIO_AD5686");
-void ad5686_remove(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5686_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
-}
-EXPORT_SYMBOL_NS_GPL(ad5686_remove, "IIO_AD5686");
-
MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("Analog Devices AD5686/85/84 DAC");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5686.h b/drivers/iio/dac/ad5686.h
index 760f852911df..e7d36bae3e59 100644
--- a/drivers/iio/dac/ad5686.h
+++ b/drivers/iio/dac/ad5686.h
@@ -115,10 +115,9 @@ struct ad5686_chip_info {
};
/**
- * struct ad5446_state - driver instance specific data
+ * struct ad5686_state - driver instance specific data
* @spi: spi_device
* @chip_info: chip model specific constants, available modes etc
- * @reg: supply regulator
* @vref_mv: actual reference voltage used
* @pwr_down_mask: power down mask
* @pwr_down_mode: current power down mode
@@ -130,7 +129,6 @@ struct ad5686_chip_info {
struct ad5686_state {
struct device *dev;
const struct ad5686_chip_info *chip_info;
- struct regulator *reg;
unsigned short vref_mv;
unsigned int pwr_down_mask;
unsigned int pwr_down_mode;
@@ -157,7 +155,5 @@ int ad5686_probe(struct device *dev,
const char *name, ad5686_write_func write,
ad5686_read_func read);
-void ad5686_remove(struct device *dev);
-
#endif /* __DRIVERS_IIO_DAC_AD5686_H__ */
diff --git a/drivers/iio/dac/ad5696-i2c.c b/drivers/iio/dac/ad5696-i2c.c
index bbcda246c547..0156f32c12c8 100644
--- a/drivers/iio/dac/ad5696-i2c.c
+++ b/drivers/iio/dac/ad5696-i2c.c
@@ -65,11 +65,6 @@ static int ad5686_i2c_probe(struct i2c_client *i2c)
ad5686_i2c_write, ad5686_i2c_read);
}
-static void ad5686_i2c_remove(struct i2c_client *i2c)
-{
- ad5686_remove(&i2c->dev);
-}
-
static const struct i2c_device_id ad5686_i2c_id[] = {
{"ad5311r", ID_AD5311R},
{"ad5337r", ID_AD5337R},
@@ -116,7 +111,6 @@ static struct i2c_driver ad5686_i2c_driver = {
.of_match_table = ad5686_of_match,
},
.probe = ad5686_i2c_probe,
- .remove = ad5686_i2c_remove,
.id_table = ad5686_i2c_id,
};
diff --git a/drivers/iio/dac/ad7293.c b/drivers/iio/dac/ad7293.c
index 1d4032670482..d3f49b5337d2 100644
--- a/drivers/iio/dac/ad7293.c
+++ b/drivers/iio/dac/ad7293.c
@@ -141,8 +141,6 @@ struct ad7293_state {
/* Protect against concurrent accesses to the device, page selection and data content */
struct mutex lock;
struct gpio_desc *gpio_reset;
- struct regulator *reg_avdd;
- struct regulator *reg_vdrive;
u8 page_select;
u8 data[3] __aligned(IIO_DMA_MINALIGN);
};
@@ -777,6 +775,15 @@ static int ad7293_reset(struct ad7293_state *st)
static int ad7293_properties_parse(struct ad7293_state *st)
{
struct spi_device *spi = st->spi;
+ int ret;
+
+ ret = devm_regulator_get_enable(&spi->dev, "avdd");
+ if (ret)
+ return dev_err_probe(&spi->dev, ret, "failed to enable AVDD\n");
+
+ ret = devm_regulator_get_enable(&spi->dev, "vdrive");
+ if (ret)
+ return dev_err_probe(&spi->dev, ret, "failed to enable VDRIVE\n");
st->gpio_reset = devm_gpiod_get_optional(&st->spi->dev, "reset",
GPIOD_OUT_HIGH);
@@ -784,24 +791,9 @@ static int ad7293_properties_parse(struct ad7293_state *st)
return dev_err_probe(&spi->dev, PTR_ERR(st->gpio_reset),
"failed to get the reset GPIO\n");
- st->reg_avdd = devm_regulator_get(&spi->dev, "avdd");
- if (IS_ERR(st->reg_avdd))
- return dev_err_probe(&spi->dev, PTR_ERR(st->reg_avdd),
- "failed to get the AVDD voltage\n");
-
- st->reg_vdrive = devm_regulator_get(&spi->dev, "vdrive");
- if (IS_ERR(st->reg_vdrive))
- return dev_err_probe(&spi->dev, PTR_ERR(st->reg_vdrive),
- "failed to get the VDRIVE voltage\n");
-
return 0;
}
-static void ad7293_reg_disable(void *data)
-{
- regulator_disable(data);
-}
-
static int ad7293_init(struct ad7293_state *st)
{
int ret;
@@ -816,48 +808,6 @@ static int ad7293_init(struct ad7293_state *st)
if (ret)
return ret;
- ret = regulator_enable(st->reg_avdd);
- if (ret) {
- dev_err(&spi->dev,
- "Failed to enable specified AVDD Voltage!\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(&spi->dev, ad7293_reg_disable,
- st->reg_avdd);
- if (ret)
- return ret;
-
- ret = regulator_enable(st->reg_vdrive);
- if (ret) {
- dev_err(&spi->dev,
- "Failed to enable specified VDRIVE Voltage!\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(&spi->dev, ad7293_reg_disable,
- st->reg_vdrive);
- if (ret)
- return ret;
-
- ret = regulator_get_voltage(st->reg_avdd);
- if (ret < 0) {
- dev_err(&spi->dev, "Failed to read avdd regulator: %d\n", ret);
- return ret;
- }
-
- if (ret > 5500000 || ret < 4500000)
- return -EINVAL;
-
- ret = regulator_get_voltage(st->reg_vdrive);
- if (ret < 0) {
- dev_err(&spi->dev,
- "Failed to read vdrive regulator: %d\n", ret);
- return ret;
- }
- if (ret > 5500000 || ret < 1700000)
- return -EINVAL;
-
/* Check Chip ID */
ret = __ad7293_spi_read(st, AD7293_REG_DEVICE_ID, &chip_id);
if (ret)
diff --git a/drivers/iio/dac/ad8801.c b/drivers/iio/dac/ad8801.c
index 919e8c880697..8a362fae2eca 100644
--- a/drivers/iio/dac/ad8801.c
+++ b/drivers/iio/dac/ad8801.c
@@ -23,8 +23,6 @@ struct ad8801_state {
unsigned char dac_cache[8]; /* Value write on each channel */
unsigned int vrefh_mv;
unsigned int vrefl_mv;
- struct regulator *vrefh_reg;
- struct regulator *vrefl_reg;
__be16 data __aligned(IIO_DMA_MINALIGN);
};
@@ -122,86 +120,34 @@ static int ad8801_probe(struct spi_device *spi)
state->spi = spi;
id = spi_get_device_id(spi);
- state->vrefh_reg = devm_regulator_get(&spi->dev, "vrefh");
- if (IS_ERR(state->vrefh_reg))
- return dev_err_probe(&spi->dev, PTR_ERR(state->vrefh_reg),
- "Vrefh regulator not specified\n");
+ ret = devm_regulator_get_enable_read_voltage(&spi->dev, "vrefh");
+ if (ret < 0)
+ return dev_err_probe(&spi->dev, ret,
+ "failed to get Vrefh voltage\n");
- ret = regulator_enable(state->vrefh_reg);
- if (ret) {
- dev_err(&spi->dev, "Failed to enable vrefh regulator: %d\n",
- ret);
- return ret;
- }
-
- ret = regulator_get_voltage(state->vrefh_reg);
- if (ret < 0) {
- dev_err(&spi->dev, "Failed to read vrefh regulator: %d\n",
- ret);
- goto error_disable_vrefh_reg;
- }
state->vrefh_mv = ret / 1000;
if (id->driver_data == ID_AD8803) {
- state->vrefl_reg = devm_regulator_get(&spi->dev, "vrefl");
- if (IS_ERR(state->vrefl_reg)) {
- ret = dev_err_probe(&spi->dev, PTR_ERR(state->vrefl_reg),
- "Vrefl regulator not specified\n");
- goto error_disable_vrefh_reg;
- }
-
- ret = regulator_enable(state->vrefl_reg);
- if (ret) {
- dev_err(&spi->dev, "Failed to enable vrefl regulator: %d\n",
- ret);
- goto error_disable_vrefh_reg;
- }
-
- ret = regulator_get_voltage(state->vrefl_reg);
- if (ret < 0) {
- dev_err(&spi->dev, "Failed to read vrefl regulator: %d\n",
- ret);
- goto error_disable_vrefl_reg;
- }
+ ret = devm_regulator_get_enable_read_voltage(&spi->dev, "vrefl");
+ if (ret < 0)
+ return dev_err_probe(&spi->dev, ret,
+ "failed to get Vrefl voltage\n");
+
state->vrefl_mv = ret / 1000;
- } else {
- state->vrefl_mv = 0;
- state->vrefl_reg = NULL;
}
- spi_set_drvdata(spi, indio_dev);
indio_dev->info = &ad8801_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = ad8801_channels;
indio_dev->num_channels = ARRAY_SIZE(ad8801_channels);
indio_dev->name = id->name;
- ret = iio_device_register(indio_dev);
- if (ret) {
- dev_err(&spi->dev, "Failed to register iio device: %d\n",
- ret);
- goto error_disable_vrefl_reg;
- }
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
+ if (ret)
+ return dev_err_probe(&spi->dev, ret,
+ "Failed to register iio device\n");
return 0;
-
-error_disable_vrefl_reg:
- if (state->vrefl_reg)
- regulator_disable(state->vrefl_reg);
-error_disable_vrefh_reg:
- regulator_disable(state->vrefh_reg);
- return ret;
-}
-
-static void ad8801_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ad8801_state *state = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- if (state->vrefl_reg)
- regulator_disable(state->vrefl_reg);
- regulator_disable(state->vrefh_reg);
}
static const struct spi_device_id ad8801_ids[] = {
@@ -216,7 +162,6 @@ static struct spi_driver ad8801_driver = {
.name = "ad8801",
},
.probe = ad8801_probe,
- .remove = ad8801_remove,
.id_table = ad8801_ids,
};
module_spi_driver(ad8801_driver);
diff --git a/drivers/iio/dac/ltc2632.c b/drivers/iio/dac/ltc2632.c
index a4fb2509c950..999348836d87 100644
--- a/drivers/iio/dac/ltc2632.c
+++ b/drivers/iio/dac/ltc2632.c
@@ -41,13 +41,11 @@ struct ltc2632_chip_info {
* @spi_dev: pointer to the spi_device struct
* @powerdown_cache_mask: used to show current channel powerdown state
* @vref_mv: used reference voltage (internal or external)
- * @vref_reg: regulator for the reference voltage
*/
struct ltc2632_state {
struct spi_device *spi_dev;
unsigned int powerdown_cache_mask;
int vref_mv;
- struct regulator *vref_reg;
};
enum ltc2632_supported_device_ids {
@@ -310,6 +308,7 @@ static int ltc2632_probe(struct spi_device *spi)
struct ltc2632_state *st;
struct iio_dev *indio_dev;
struct ltc2632_chip_info *chip_info;
+ bool has_external_vref;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
@@ -318,49 +317,31 @@ static int ltc2632_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
- spi_set_drvdata(spi, indio_dev);
st->spi_dev = spi;
chip_info = (struct ltc2632_chip_info *)
spi_get_device_id(spi)->driver_data;
- st->vref_reg = devm_regulator_get_optional(&spi->dev, "vref");
- if (PTR_ERR(st->vref_reg) == -ENODEV) {
- /* use internal reference voltage */
- st->vref_reg = NULL;
- st->vref_mv = chip_info->vref_mv;
+ ret = devm_regulator_get_enable_read_voltage(&spi->dev, "vref");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(&spi->dev, ret,
+ "Failed to get vref regulator voltage\n");
- ret = ltc2632_spi_write(spi, LTC2632_CMD_INTERNAL_REFER,
- 0, 0, 0);
- if (ret) {
- dev_err(&spi->dev,
- "Set internal reference command failed, %d\n",
- ret);
- return ret;
- }
- } else if (IS_ERR(st->vref_reg)) {
- dev_err(&spi->dev,
- "Error getting voltage reference regulator\n");
- return PTR_ERR(st->vref_reg);
- } else {
- /* use external reference voltage */
- ret = regulator_enable(st->vref_reg);
- if (ret) {
- dev_err(&spi->dev,
- "enable reference regulator failed, %d\n",
- ret);
- return ret;
- }
- st->vref_mv = regulator_get_voltage(st->vref_reg) / 1000;
+ has_external_vref = ret != -ENODEV;
+ st->vref_mv = has_external_vref ? ret / 1000 : chip_info->vref_mv;
+ if (has_external_vref) {
ret = ltc2632_spi_write(spi, LTC2632_CMD_EXTERNAL_REFER,
- 0, 0, 0);
- if (ret) {
- dev_err(&spi->dev,
- "Set external reference command failed, %d\n",
- ret);
- return ret;
- }
+ 0, 0, 0);
+ if (ret)
+ return dev_err_probe(&spi->dev, ret,
+ "Set external reference command failed\n");
+ } else {
+ ret = ltc2632_spi_write(spi, LTC2632_CMD_INTERNAL_REFER,
+ 0, 0, 0);
+ if (ret)
+ return dev_err_probe(&spi->dev, ret,
+ "Set internal reference command failed\n");
}
indio_dev->name = fwnode_get_name(dev_fwnode(&spi->dev)) ?: spi_get_device_id(spi)->name;
@@ -369,18 +350,7 @@ static int ltc2632_probe(struct spi_device *spi)
indio_dev->channels = chip_info->channels;
indio_dev->num_channels = chip_info->num_channels;
- return iio_device_register(indio_dev);
-}
-
-static void ltc2632_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ltc2632_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
-
- if (st->vref_reg)
- regulator_disable(st->vref_reg);
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct spi_device_id ltc2632_id[] = {
@@ -472,7 +442,6 @@ static struct spi_driver ltc2632_driver = {
.of_match_table = ltc2632_of_match,
},
.probe = ltc2632_probe,
- .remove = ltc2632_remove,
.id_table = ltc2632_id,
};
module_spi_driver(ltc2632_driver);
diff --git a/drivers/iio/dac/ltc2688.c b/drivers/iio/dac/ltc2688.c
index 376dca163c91..bdc857c7fa6d 100644
--- a/drivers/iio/dac/ltc2688.c
+++ b/drivers/iio/dac/ltc2688.c
@@ -842,7 +842,7 @@ static int ltc2688_channel_config(struct ltc2688_state *st)
return 0;
}
-static int ltc2688_setup(struct ltc2688_state *st, struct regulator *vref)
+static int ltc2688_setup(struct ltc2688_state *st, bool has_external_vref)
{
struct device *dev = &st->spi->dev;
struct gpio_desc *gpio;
@@ -881,18 +881,13 @@ static int ltc2688_setup(struct ltc2688_state *st, struct regulator *vref)
if (ret)
return ret;
- if (!vref)
+ if (!has_external_vref)
return 0;
return regmap_set_bits(st->regmap, LTC2688_CMD_CONFIG,
LTC2688_CONFIG_EXT_REF);
}
-static void ltc2688_disable_regulator(void *regulator)
-{
- regulator_disable(regulator);
-}
-
static bool ltc2688_reg_readable(struct device *dev, unsigned int reg)
{
switch (reg) {
@@ -947,8 +942,8 @@ static int ltc2688_probe(struct spi_device *spi)
static const char * const regulators[] = { "vcc", "iovcc" };
struct ltc2688_state *st;
struct iio_dev *indio_dev;
- struct regulator *vref_reg;
struct device *dev = &spi->dev;
+ bool has_external_vref;
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
@@ -973,34 +968,15 @@ static int ltc2688_probe(struct spi_device *spi)
if (ret)
return dev_err_probe(dev, ret, "Failed to enable regulators\n");
- vref_reg = devm_regulator_get_optional(dev, "vref");
- if (IS_ERR(vref_reg)) {
- if (PTR_ERR(vref_reg) != -ENODEV)
- return dev_err_probe(dev, PTR_ERR(vref_reg),
- "Failed to get vref regulator");
-
- vref_reg = NULL;
- /* internal reference */
- st->vref = 4096;
- } else {
- ret = regulator_enable(vref_reg);
- if (ret)
- return dev_err_probe(dev, ret,
- "Failed to enable vref regulators\n");
-
- ret = devm_add_action_or_reset(dev, ltc2688_disable_regulator,
- vref_reg);
- if (ret)
- return ret;
-
- ret = regulator_get_voltage(vref_reg);
- if (ret < 0)
- return dev_err_probe(dev, ret, "Failed to get vref\n");
+ ret = devm_regulator_get_enable_read_voltage(dev, "vref");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret,
+ "Failed to get vref regulator voltage\n");
- st->vref = ret / 1000;
- }
+ has_external_vref = ret != -ENODEV;
+ st->vref = has_external_vref ? ret / 1000 : 0;
- ret = ltc2688_setup(st, vref_reg);
+ ret = ltc2688_setup(st, has_external_vref);
if (ret)
return ret;
diff --git a/drivers/iio/dac/max5821.c b/drivers/iio/dac/max5821.c
index 18ba3eaaad75..b062a18be5e7 100644
--- a/drivers/iio/dac/max5821.c
+++ b/drivers/iio/dac/max5821.c
@@ -32,7 +32,6 @@ enum max5821_device_ids {
struct max5821_data {
struct i2c_client *client;
- struct regulator *vref_reg;
unsigned short vref_mv;
bool powerdown[MAX5821_MAX_DAC_CHANNELS];
u8 powerdown_mode[MAX5821_MAX_DAC_CHANNELS];
@@ -295,11 +294,6 @@ static const struct iio_info max5821_info = {
.write_raw = max5821_write_raw,
};
-static void max5821_regulator_disable(void *reg)
-{
- regulator_disable(reg);
-}
-
static int max5821_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
@@ -321,32 +315,10 @@ static int max5821_probe(struct i2c_client *client)
data->powerdown_mode[tmp] = MAX5821_100KOHM_TO_GND;
}
- data->vref_reg = devm_regulator_get(&client->dev, "vref");
- if (IS_ERR(data->vref_reg))
- return dev_err_probe(&client->dev, PTR_ERR(data->vref_reg),
- "Failed to get vref regulator\n");
-
- ret = regulator_enable(data->vref_reg);
- if (ret) {
- dev_err(&client->dev,
- "Failed to enable vref regulator: %d\n", ret);
- return ret;
- }
-
- ret = devm_add_action_or_reset(&client->dev, max5821_regulator_disable,
- data->vref_reg);
- if (ret) {
- dev_err(&client->dev,
- "Failed to add action to managed regulator: %d\n", ret);
- return ret;
- }
-
- ret = regulator_get_voltage(data->vref_reg);
- if (ret < 0) {
- dev_err(&client->dev,
- "Failed to get voltage on regulator: %d\n", ret);
- return ret;
- }
+ ret = devm_regulator_get_enable_read_voltage(&client->dev, "vref");
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to get vref regulator voltage\n");
data->vref_mv = ret / 1000;
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 25bb1c0490af..1337fb02ccf5 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -379,7 +379,7 @@ static int mcp4725_probe_dt(struct device *dev,
struct mcp4725_platform_data *pdata)
{
/* check if is the vref-supply defined */
- pdata->use_vref = device_property_read_bool(dev, "vref-supply");
+ pdata->use_vref = device_property_present(dev, "vref-supply");
pdata->vref_buffered =
device_property_read_bool(dev, "microchip,vref-buffered");
diff --git a/drivers/iio/dac/rohm-bd79703.c b/drivers/iio/dac/rohm-bd79703.c
new file mode 100644
index 000000000000..e998ab51052e
--- /dev/null
+++ b/drivers/iio/dac/rohm-bd79703.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * BD79703 ROHM Digital to Analog converter
+ *
+ * Copyright (c) 2024, ROHM Semiconductor.
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+
+#define BD79703_MAX_REGISTER 0xf
+#define BD79703_DAC_BITS 8
+#define BD79703_REG_OUT_ALL GENMASK(2, 0)
+
+/*
+ * The BD79703 uses 12-bit SPI commands. First four bits (high bits) define
+ * channel(s) which are operated on, and also the mode. The mode can be to set
+ * a DAC word only, or set DAC word and output. The data-sheet is not very
+ * specific on how a previously set DAC word can be 'taken in to use'. Thus
+ * this driver only uses the 'set DAC and output it' -mode.
+ *
+ * The BD79703 latches last 12-bits when the chip-select is toggled. Thus we
+ * can use 16-bit transfers which should be widely supported. To simplify this
+ * further, we treat the last 8 bits as a value, and first 8 bits as an
+ * address. This allows us to separate channels/mode by address and treat the
+ * 8-bit register value as DAC word. The highest 4 bits of address will be
+ * discarded when the transfer is latched.
+ */
+static const struct regmap_config bd79703_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = BD79703_MAX_REGISTER,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+struct bd79703_data {
+ struct regmap *regmap;
+ int vfs;
+};
+
+static int bd79703_read_raw(struct iio_dev *idev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct bd79703_data *data = iio_priv(idev);
+
+ if (mask != IIO_CHAN_INFO_SCALE)
+ return -EINVAL;
+
+ *val = data->vfs / 1000;
+ *val2 = BD79703_DAC_BITS;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+}
+
+static int bd79703_write_raw(struct iio_dev *idev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct bd79703_data *data = iio_priv(idev);
+
+ if (val < 0 || val >= 1 << BD79703_DAC_BITS)
+ return -EINVAL;
+
+ return regmap_write(data->regmap, chan->channel + 1, val);
+};
+
+static const struct iio_info bd79703_info = {
+ .read_raw = bd79703_read_raw,
+ .write_raw = bd79703_write_raw,
+};
+
+#define BD79703_CHAN(_chan) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (_chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .address = (_chan), \
+}
+
+static const struct iio_chan_spec bd79703_channels[] = {
+ BD79703_CHAN(0),
+ BD79703_CHAN(1),
+ BD79703_CHAN(2),
+ BD79703_CHAN(3),
+ BD79703_CHAN(4),
+ BD79703_CHAN(5),
+};
+
+static int bd79703_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct bd79703_data *data;
+ struct iio_dev *idev;
+ int ret;
+
+ idev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!idev)
+ return -ENOMEM;
+
+ data = iio_priv(idev);
+
+ data->regmap = devm_regmap_init_spi(spi, &bd79703_regmap_config);
+ if (IS_ERR(data->regmap))
+ return dev_err_probe(dev, PTR_ERR(data->regmap),
+ "Failed to initialize Regmap\n");
+
+ ret = devm_regulator_get_enable(dev, "vcc");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable VCC\n");
+
+ ret = devm_regulator_get_enable_read_voltage(dev, "vfs");
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get Vfs\n");
+
+ data->vfs = ret;
+ idev->channels = bd79703_channels;
+ idev->num_channels = ARRAY_SIZE(bd79703_channels);
+ idev->modes = INDIO_DIRECT_MODE;
+ idev->info = &bd79703_info;
+ idev->name = "bd79703";
+
+ /* Initialize all to output zero */
+ ret = regmap_write(data->regmap, BD79703_REG_OUT_ALL, 0);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, idev);
+}
+
+static const struct spi_device_id bd79703_id[] = {
+ { "bd79703", },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, bd79703_id);
+
+static const struct of_device_id bd79703_of_match[] = {
+ { .compatible = "rohm,bd79703", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bd79703_of_match);
+
+static struct spi_driver bd79703_driver = {
+ .driver = {
+ .name = "bd79703",
+ .of_match_table = bd79703_of_match,
+ },
+ .probe = bd79703_probe,
+ .id_table = bd79703_id,
+};
+module_spi_driver(bd79703_driver);
+
+MODULE_AUTHOR("Matti Vaittinen <mazziesaccount@gmail.com>");
+MODULE_DESCRIPTION("ROHM BD79703 DAC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/gyro/adxrs290.c b/drivers/iio/gyro/adxrs290.c
index 600e9725da78..223fc181109c 100644
--- a/drivers/iio/gyro/adxrs290.c
+++ b/drivers/iio/gyro/adxrs290.c
@@ -75,7 +75,7 @@ struct adxrs290_state {
/* Ensure correct alignment of timestamp when present */
struct {
s16 channels[3];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} buffer;
};
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index ba877d067afb..deb3c6459dde 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -99,7 +99,7 @@ struct bmg160_data {
/* Ensure naturally aligned timestamp */
struct {
s16 chans[3];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
u32 dps_range;
int ev_enable_state;
diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c
index 4cfa0d439560..a624400a239c 100644
--- a/drivers/iio/gyro/itg3200_buffer.c
+++ b/drivers/iio/gyro/itg3200_buffer.c
@@ -52,7 +52,7 @@ static irqreturn_t itg3200_trigger_handler(int irq, void *p)
*/
struct {
__be16 buf[ITG3200_SCAN_ELEMENTS];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
int ret = itg3200_read_all_channels(st->i2c, scan.buf);
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index b6883e8b2a8b..d66224bed8e3 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -474,7 +474,7 @@ static irqreturn_t mpu3050_trigger_handler(int irq, void *p)
int ret;
struct {
__be16 chans[4];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
s64 timestamp;
unsigned int datums_from_fifo = 0;
diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
index 6b0aa3a3f025..2323974b805c 100644
--- a/drivers/iio/humidity/am2315.c
+++ b/drivers/iio/humidity/am2315.c
@@ -35,7 +35,7 @@ struct am2315_data {
/* Ensure timestamp is naturally aligned */
struct {
s16 chans[2];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
};
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index 9b355380c9bf..a303f704b7ed 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -44,7 +44,7 @@ struct hdc100x_data {
/* Ensure natural alignment of timestamp */
struct {
__be16 channels[2];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/humidity/hts221.h b/drivers/iio/humidity/hts221.h
index 721359e226cb..0215f11fc35e 100644
--- a/drivers/iio/humidity/hts221.h
+++ b/drivers/iio/humidity/hts221.h
@@ -40,7 +40,7 @@ struct hts221_hw {
/* Ensure natural alignment of timestamp */
struct {
__le16 channels[2];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index 0a5d13d2240e..727e0a11eac1 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -878,11 +878,32 @@ static const struct iio_chan_spec adis16545_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(17),
};
+static const struct iio_chan_spec adis16489_channels[] = {
+ ADIS16480_GYRO_CHANNEL(X),
+ ADIS16480_GYRO_CHANNEL(Y),
+ ADIS16480_GYRO_CHANNEL(Z),
+ ADIS16480_ACCEL_CHANNEL(X),
+ ADIS16480_ACCEL_CHANNEL(Y),
+ ADIS16480_ACCEL_CHANNEL(Z),
+ ADIS16480_PRESSURE_CHANNEL(),
+ ADIS16480_TEMP_CHANNEL(),
+ IIO_CHAN_SOFT_TIMESTAMP(8),
+ ADIS16480_DELTANG_CHANNEL_NO_SCAN(X),
+ ADIS16480_DELTANG_CHANNEL_NO_SCAN(Y),
+ ADIS16480_DELTANG_CHANNEL_NO_SCAN(Z),
+ ADIS16480_DELTVEL_CHANNEL_NO_SCAN(X),
+ ADIS16480_DELTVEL_CHANNEL_NO_SCAN(Y),
+ ADIS16480_DELTVEL_CHANNEL_NO_SCAN(Z),
+};
+
enum adis16480_variant {
ADIS16375,
ADIS16480,
ADIS16485,
+ ADIS16486,
+ ADIS16487,
ADIS16488,
+ ADIS16489,
ADIS16490,
ADIS16495_1,
ADIS16495_2,
@@ -1038,6 +1059,38 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.filter_freqs = adis16480_def_filter_freqs,
.adis_data = ADIS16480_DATA(16485, &adis16485_timeouts, 0, 0),
},
+ [ADIS16486] = {
+ .channels = adis16485_channels,
+ .num_channels = ARRAY_SIZE(adis16485_channels),
+ .gyro_max_val = 22500 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(450),
+ .accel_max_val = IIO_M_S_2_TO_G(20000 << 16),
+ .accel_max_scale = 18,
+ .temp_scale = 5650, /* 5.65 milli degree Celsius */
+ .deltang_max_val = IIO_DEGREE_TO_RAD(720),
+ .deltvel_max_val = 200,
+ .int_clk = 2460000,
+ .max_dec_rate = 2048,
+ .has_sleep_cnt = true,
+ .filter_freqs = adis16480_def_filter_freqs,
+ .adis_data = ADIS16480_DATA(16486, &adis16480_timeouts, 0, 0),
+ },
+ [ADIS16487] = {
+ .channels = adis16485_channels,
+ .num_channels = ARRAY_SIZE(adis16485_channels),
+ .gyro_max_val = 22500 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(450),
+ .accel_max_val = IIO_M_S_2_TO_G(20000 << 16),
+ .accel_max_scale = 5,
+ .temp_scale = 5650, /* 5.65 milli degree Celsius */
+ .deltang_max_val = IIO_DEGREE_TO_RAD(720),
+ .deltvel_max_val = 50,
+ .int_clk = 2460000,
+ .max_dec_rate = 2048,
+ .has_sleep_cnt = true,
+ .filter_freqs = adis16480_def_filter_freqs,
+ .adis_data = ADIS16480_DATA(16487, &adis16485_timeouts, 0, 0),
+ },
[ADIS16488] = {
.channels = adis16480_channels,
.num_channels = ARRAY_SIZE(adis16480_channels),
@@ -1054,6 +1107,22 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.filter_freqs = adis16480_def_filter_freqs,
.adis_data = ADIS16480_DATA(16488, &adis16485_timeouts, 0, 0),
},
+ [ADIS16489] = {
+ .channels = adis16489_channels,
+ .num_channels = ARRAY_SIZE(adis16489_channels),
+ .gyro_max_val = 22500 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(450),
+ .accel_max_val = IIO_M_S_2_TO_G(20000 << 16),
+ .accel_max_scale = 18,
+ .temp_scale = 5650, /* 5.65 milli degree Celsius */
+ .deltang_max_val = IIO_DEGREE_TO_RAD(720),
+ .deltvel_max_val = 200,
+ .int_clk = 2460000,
+ .max_dec_rate = 2048,
+ .has_sleep_cnt = true,
+ .filter_freqs = adis16480_def_filter_freqs,
+ .adis_data = ADIS16480_DATA(16489, &adis16480_timeouts, 0, 0),
+ },
[ADIS16490] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
@@ -1741,7 +1810,10 @@ static const struct spi_device_id adis16480_ids[] = {
{ "adis16375", ADIS16375 },
{ "adis16480", ADIS16480 },
{ "adis16485", ADIS16485 },
+ { "adis16486", ADIS16486 },
+ { "adis16487", ADIS16487 },
{ "adis16488", ADIS16488 },
+ { "adis16489", ADIS16489 },
{ "adis16490", ADIS16490 },
{ "adis16495-1", ADIS16495_1 },
{ "adis16495-2", ADIS16495_2 },
@@ -1763,7 +1835,10 @@ static const struct of_device_id adis16480_of_match[] = {
{ .compatible = "adi,adis16375" },
{ .compatible = "adi,adis16480" },
{ .compatible = "adi,adis16485" },
+ { .compatible = "adi,adis16486" },
+ { .compatible = "adi,adis16487" },
{ .compatible = "adi,adis16488" },
+ { .compatible = "adi,adis16489" },
{ .compatible = "adi,adis16490" },
{ .compatible = "adi,adis16495-1" },
{ .compatible = "adi,adis16495-2" },
diff --git a/drivers/iio/imu/bmi323/bmi323_core.c b/drivers/iio/imu/bmi323/bmi323_core.c
index f7d7f4442e65..7f386c5e58b4 100644
--- a/drivers/iio/imu/bmi323/bmi323_core.c
+++ b/drivers/iio/imu/bmi323/bmi323_core.c
@@ -174,7 +174,7 @@ struct bmi323_data {
__le16 fifo_buff[BMI323_FIFO_FULL_IN_WORDS] __aligned(IIO_DMA_MINALIGN);
struct {
__le16 channels[BMI323_CHAN_MAX];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} buffer;
__le16 steps_count[BMI323_STEP_LEN];
};
diff --git a/drivers/iio/imu/bno055/bno055.c b/drivers/iio/imu/bno055/bno055.c
index 0728d38260a1..597c402b98de 100644
--- a/drivers/iio/imu/bno055/bno055.c
+++ b/drivers/iio/imu/bno055/bno055.c
@@ -207,7 +207,7 @@ struct bno055_priv {
bool sw_reset;
struct {
__le16 chans[BNO055_SCAN_CH_COUNT];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} buf;
struct dentry *debugfs;
};
@@ -1193,7 +1193,7 @@ static ssize_t serialnumber_show(struct device *dev,
}
static ssize_t calibration_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
struct bno055_priv *priv = iio_priv(dev_to_iio_dev(kobj_to_dev(kobj)));
@@ -1348,16 +1348,16 @@ static struct attribute *bno055_attrs[] = {
NULL
};
-static BIN_ATTR_RO(calibration_data, BNO055_CALDATA_LEN);
+static const BIN_ATTR_RO(calibration_data, BNO055_CALDATA_LEN);
-static struct bin_attribute *bno055_bin_attrs[] = {
+static const struct bin_attribute *const bno055_bin_attrs[] = {
&bin_attr_calibration_data,
NULL
};
static const struct attribute_group bno055_attrs_group = {
.attrs = bno055_attrs,
- .bin_attrs = bno055_bin_attrs,
+ .bin_attrs_new = bno055_bin_attrs,
};
static const struct iio_info bno055_info = {
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
index 7968aa27f9fd..388520ec60b5 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
@@ -178,7 +178,7 @@ static const struct iio_chan_spec inv_icm42600_accel_channels[] = {
struct inv_icm42600_accel_buffer {
struct inv_icm42600_fifo_sensor_data accel;
int16_t temp;
- int64_t timestamp __aligned(8);
+ aligned_s64 timestamp;
};
#define INV_ICM42600_SCAN_MASK_ACCEL_3AXIS \
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
index c6bb68bf5e14..591ed78a55bb 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
@@ -78,7 +78,7 @@ static const struct iio_chan_spec inv_icm42600_gyro_channels[] = {
struct inv_icm42600_gyro_buffer {
struct inv_icm42600_fifo_sensor_data gyro;
int16_t temp;
- int64_t timestamp __aligned(8);
+ aligned_s64 timestamp;
};
#define INV_ICM42600_SCAN_MASK_GYRO_3AXIS \
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 844b611b825a..5bcd5e797046 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -277,6 +277,14 @@ static const struct inv_mpu6050_hw hw_info[] = {
.temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE},
.startup_time = {INV_ICM20690_GYRO_STARTUP_TIME, INV_ICM20690_ACCEL_STARTUP_TIME},
},
+ { .whoami = INV_IAM20380_WHOAMI_VALUE,
+ .name = "IAM20380",
+ .reg = &reg_set_6500,
+ .config = &chip_config_6500,
+ .fifo_size = 512,
+ .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE},
+ .startup_time = {INV_MPU6500_GYRO_STARTUP_TIME, INV_MPU6500_ACCEL_STARTUP_TIME},
+ },
{
.whoami = INV_IAM20680_WHOAMI_VALUE,
.name = "IAM20680",
@@ -1519,6 +1527,14 @@ static const struct iio_chan_spec inv_mpu6050_channels[] = {
INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_MPU6050_SCAN_ACCL_Z),
};
+static const struct iio_chan_spec inv_iam20380_channels[] = {
+ IIO_CHAN_SOFT_TIMESTAMP(INV_MPU6050_SCAN_TIMESTAMP),
+
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_X, INV_MPU6050_SCAN_GYRO_X),
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Y, INV_MPU6050_SCAN_GYRO_Y),
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Z, INV_MPU6050_SCAN_GYRO_Z),
+};
+
static const struct iio_chan_spec inv_mpu6500_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(INV_MPU6050_SCAN_TIMESTAMP),
@@ -1623,6 +1639,10 @@ static const struct iio_chan_spec inv_mpu9250_channels[] = {
| BIT(INV_MPU9X50_SCAN_MAGN_Y) \
| BIT(INV_MPU9X50_SCAN_MAGN_Z))
+static const unsigned long inv_iam20380_scan_masks[] = {
+ INV_MPU6050_SCAN_MASK_3AXIS_GYRO,
+};
+
static const unsigned long inv_mpu9x50_scan_masks[] = {
/* 3-axis accel */
INV_MPU6050_SCAN_MASK_3AXIS_ACCEL,
@@ -2026,6 +2046,11 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
indio_dev->num_channels = ARRAY_SIZE(inv_mpu9250_channels);
indio_dev->available_scan_masks = inv_mpu9x50_scan_masks;
break;
+ case INV_IAM20380:
+ indio_dev->channels = inv_iam20380_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_iam20380_channels);
+ indio_dev->available_scan_masks = inv_iam20380_scan_masks;
+ break;
case INV_ICM20600:
case INV_ICM20602:
indio_dev->channels = inv_mpu6500_channels;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 307a06f4df2e..91d77f94d204 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -34,6 +34,7 @@ static bool inv_mpu_i2c_aux_bus(struct device *dev)
case INV_ICM20689:
case INV_ICM20600:
case INV_ICM20602:
+ case INV_IAM20380:
case INV_IAM20680:
/* no i2c auxiliary bus on the chip */
return false;
@@ -187,6 +188,7 @@ static const struct i2c_device_id inv_mpu_id[] = {
{"icm20600", INV_ICM20600},
{"icm20602", INV_ICM20602},
{"icm20690", INV_ICM20690},
+ {"iam20380", INV_IAM20380},
{"iam20680", INV_IAM20680},
{"iam20680hp", INV_IAM20680HP},
{"iam20680ht", INV_IAM20680HT},
@@ -253,6 +255,10 @@ static const struct of_device_id inv_of_match[] = {
.data = (void *)INV_ICM20690
},
{
+ .compatible = "invensense,iam20380",
+ .data = (void *)INV_IAM20380
+ },
+ {
.compatible = "invensense,iam20680",
.data = (void *)INV_IAM20680
},
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index a6862cf42639..211901f8b8eb 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -84,6 +84,7 @@ enum inv_devices {
INV_ICM20600,
INV_ICM20602,
INV_ICM20690,
+ INV_IAM20380,
INV_IAM20680,
INV_IAM20680HP,
INV_IAM20680HT,
@@ -425,6 +426,7 @@ struct inv_mpu6050_state {
#define INV_ICM20600_WHOAMI_VALUE 0x11
#define INV_ICM20602_WHOAMI_VALUE 0x12
#define INV_ICM20690_WHOAMI_VALUE 0x20
+#define INV_IAM20380_WHOAMI_VALUE 0xB5
#define INV_IAM20680_WHOAMI_VALUE 0xA9
#define INV_IAM20680HP_WHOAMI_VALUE 0xF8
#define INV_IAM20680HT_WHOAMI_VALUE 0xFA
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index ab415874d699..20de6eb5cd35 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -79,6 +79,7 @@ static const struct spi_device_id inv_mpu_id[] = {
{"icm20600", INV_ICM20600},
{"icm20602", INV_ICM20602},
{"icm20690", INV_ICM20690},
+ {"iam20380", INV_IAM20380},
{"iam20680", INV_IAM20680},
{"iam20680hp", INV_IAM20680HP},
{"iam20680ht", INV_IAM20680HT},
@@ -141,6 +142,10 @@ static const struct of_device_id inv_of_match[] = {
.data = (void *)INV_ICM20690
},
{
+ .compatible = "invensense,iam20380",
+ .data = (void *)INV_IAM20380
+ },
+ {
.compatible = "invensense,iam20680",
.data = (void *)INV_IAM20680
},
diff --git a/drivers/iio/imu/st_lsm6dsx/Kconfig b/drivers/iio/imu/st_lsm6dsx/Kconfig
index 89d687ec3099..3cabec3b152d 100644
--- a/drivers/iio/imu/st_lsm6dsx/Kconfig
+++ b/drivers/iio/imu/st_lsm6dsx/Kconfig
@@ -6,9 +6,6 @@ config IIO_ST_LSM6DSX
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
select IIO_KFIFO_BUF
- select IIO_ST_LSM6DSX_I2C if (I2C)
- select IIO_ST_LSM6DSX_SPI if (SPI_MASTER)
- select IIO_ST_LSM6DSX_I3C if (I3C)
help
Say yes here to build support for STMicroelectronics LSM6DSx imu
sensor.
@@ -42,16 +39,19 @@ config IIO_ST_LSM6DSX
will be called st_lsm6dsx.
config IIO_ST_LSM6DSX_I2C
- tristate
- depends on IIO_ST_LSM6DSX
+ tristate "ST_LSM6DSx driver for STM 6-axis IMU MEMS sensors I2C Interface"
+ depends on I2C && IIO_ST_LSM6DSX
+ default I2C && IIO_ST_LSM6DSX
select REGMAP_I2C
config IIO_ST_LSM6DSX_SPI
- tristate
- depends on IIO_ST_LSM6DSX
+ tristate "ST_LSM6DSx driver for STM 6-axis IMU MEMS sensors SPI Interface"
+ depends on SPI_MASTER && IIO_ST_LSM6DSX
+ default SPI_MASTER && IIO_ST_LSM6DSX
select REGMAP_SPI
config IIO_ST_LSM6DSX_I3C
- tristate
- depends on IIO_ST_LSM6DSX
+ tristate "ST_LSM6DSx driver for STM 6-axis IMU MEMS sensors I3C Interface"
+ depends on I3C && IIO_ST_LSM6DSX
+ default I3C && IIO_ST_LSM6DSX
select REGMAP_I3C
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c
index 6952d901316f..f968f32890d1 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c
@@ -9,7 +9,6 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/i3c/device.h>
-#include <linux/i3c/master.h>
#include <linux/slab.h>
#include <linux/regmap.h>
@@ -30,15 +29,16 @@ static int st_lsm6dsx_i3c_probe(struct i3c_device *i3cdev)
};
const struct i3c_device_id *id = i3c_device_match_id(i3cdev,
st_lsm6dsx_i3c_ids);
+ struct device *dev = i3cdev_to_dev(i3cdev);
struct regmap *regmap;
regmap = devm_regmap_init_i3c(i3cdev, &st_lsm6dsx_i3c_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&i3cdev->dev, "Failed to register i3c regmap %ld\n", PTR_ERR(regmap));
+ dev_err(dev, "Failed to register i3c regmap %ld\n", PTR_ERR(regmap));
return PTR_ERR(regmap);
}
- return st_lsm6dsx_probe(&i3cdev->dev, 0, (uintptr_t)id->data, regmap);
+ return st_lsm6dsx_probe(dev, 0, (uintptr_t)id->data, regmap);
}
static struct i3c_driver st_lsm6dsx_driver = {
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 2708f87df719..a80f7cc25a27 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -1137,7 +1137,7 @@ static int iio_enable_buffers(struct iio_dev *indio_dev,
int ret;
indio_dev->active_scan_mask = config->scan_mask;
- indio_dev->scan_timestamp = config->scan_timestamp;
+ ACCESS_PRIVATE(indio_dev, scan_timestamp) = config->scan_timestamp;
indio_dev->scan_bytes = config->scan_bytes;
iio_dev_opaque->currentmode = config->mode;
diff --git a/drivers/iio/industrialio-gts-helper.c b/drivers/iio/industrialio-gts-helper.c
index 3b5a99815062..d70ebe3bf774 100644
--- a/drivers/iio/industrialio-gts-helper.c
+++ b/drivers/iio/industrialio-gts-helper.c
@@ -915,6 +915,41 @@ int iio_gts_find_gain_sel_for_scale_using_time(struct iio_gts *gts, int time_sel
}
EXPORT_SYMBOL_NS_GPL(iio_gts_find_gain_sel_for_scale_using_time, "IIO_GTS_HELPER");
+/**
+ * iio_gts_find_gain_time_sel_for_scale - Fetch gain and time selectors for scale
+ * @gts: Gain time scale descriptor
+ * @scale_int: Integral part of the scale (typically val1)
+ * @scale_nano: Fractional part of the scale (nano or ppb)
+ * @gain_sel: Pointer to value where gain selector is stored.
+ * @time_sel: Pointer to value where time selector is stored.
+ *
+ * Wrapper around iio_gts_find_gain_for_scale_using_time() to fetch the
+ * gain and time selectors for a given scale.
+ *
+ * Return: 0 on success and -EINVAL on error.
+ */
+int iio_gts_find_gain_time_sel_for_scale(struct iio_gts *gts, int scale_int,
+ int scale_nano, int *gain_sel,
+ int *time_sel)
+{
+ int i, ret;
+
+ for (i = 0; i < gts->num_itime; i++) {
+ *time_sel = gts->itime_table[i].sel;
+ ret = iio_gts_find_gain_sel_for_scale_using_time(gts, *time_sel,
+ scale_int,
+ scale_nano,
+ gain_sel);
+ if (ret)
+ continue;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_NS_GPL(iio_gts_find_gain_time_sel_for_scale, "IIO_GTS_HELPER");
+
static int iio_gts_get_total_gain(struct iio_gts *gts, int gain, int time)
{
const struct iio_itime_sel_mul *itime;
@@ -1086,6 +1121,48 @@ int iio_gts_find_new_gain_by_old_gain_time(struct iio_gts *gts, int old_gain,
}
EXPORT_SYMBOL_NS_GPL(iio_gts_find_new_gain_by_old_gain_time, "IIO_GTS_HELPER");
+/**
+ * iio_gts_find_new_gain_by_gain_time_min - compensate for time change
+ * @gts: Gain time scale descriptor
+ * @old_gain: Previously set gain
+ * @old_time: Selector corresponding previously set time
+ * @new_time: Selector corresponding new time to be set
+ * @new_gain: Pointer to value where new gain is to be written
+ * @in_range: Indicate if the @new_gain was in the range of
+ * supported gains.
+ *
+ * Wrapper around iio_gts_find_new_gain_by_old_gain_time() that tries to
+ * set an optimal value if no exact match was found, defaulting to the
+ * minimum gain to avoid saturations if the optimal value is not in the
+ * range of supported gains.
+ *
+ * Return: 0 on success and a negative value if no gain was found.
+ */
+int iio_gts_find_new_gain_by_gain_time_min(struct iio_gts *gts, int old_gain,
+ int old_time, int new_time,
+ int *new_gain, bool *in_range)
+{
+ int ret;
+
+ *in_range = true;
+ ret = iio_gts_find_new_gain_by_old_gain_time(gts, old_gain, old_time,
+ new_time, new_gain);
+ if (*new_gain < 0)
+ return -EINVAL;
+
+ if (ret) {
+ *new_gain = iio_find_closest_gain_low(gts, *new_gain, in_range);
+ if (*new_gain < 0) {
+ *new_gain = iio_gts_get_min_gain(gts);
+ if (*new_gain < 0)
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(iio_gts_find_new_gain_by_gain_time_min, "IIO_GTS_HELPER");
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Matti Vaittinen <mazziesaccount@gmail.com>");
MODULE_DESCRIPTION("IIO light sensor gain-time-scale helpers");
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 9050a59129e6..c174ebb7d5e6 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -7,6 +7,7 @@
#include <linux/err.h>
#include <linux/export.h>
#include <linux/minmax.h>
+#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/slab.h>
@@ -989,6 +990,11 @@ ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
{
const struct iio_chan_spec_ext_info *ext_info;
+ if (!buf || offset_in_page(buf)) {
+ pr_err("iio: invalid ext_info read buffer\n");
+ return -EINVAL;
+ }
+
ext_info = iio_lookup_ext_info(chan, attr);
if (!ext_info)
return -EINVAL;
@@ -1014,6 +1020,11 @@ EXPORT_SYMBOL_GPL(iio_write_channel_ext_info);
ssize_t iio_read_channel_label(struct iio_channel *chan, char *buf)
{
+ if (!buf || offset_in_page(buf)) {
+ pr_err("iio: invalid label read buffer\n");
+ return -EINVAL;
+ }
+
return do_iio_read_channel_label(chan->indio_dev, chan->channel, buf);
}
EXPORT_SYMBOL_GPL(iio_read_channel_label);
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 29ffa8491927..e34e551eef3e 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -329,22 +329,6 @@ config JSA1212
To compile this driver as a module, choose M here:
the module will be called jsa1212.
-config ROHM_BU27008
- tristate "ROHM BU27008 color (RGB+C/IR) sensor"
- depends on I2C
- select REGMAP_I2C
- select IIO_GTS_HELPER
- select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
- help
- Enable support for the ROHM BU27008 color sensor.
- The ROHM BU27008 is a sensor with 5 photodiodes (red, green,
- blue, clear and IR) with four configurable channels. Red and
- green being always available and two out of the rest three
- (blue, clear, IR) can be selected to be simultaneously measured.
- Typical application is adjusting LCD backlight of TVs,
- mobile phones and tablet PCs.
-
config ROHM_BU27034
tristate "ROHM BU27034 ambient light sensor"
depends on I2C
@@ -491,6 +475,19 @@ config OPT4001
If built as a dynamically linked module, it will be called
opt4001.
+config OPT4060
+ tristate "Texas Instruments OPT4060 RGBW Color Sensor"
+ depends on I2C
+ select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ If you say Y or M here, you get support for Texas Instruments
+ OPT4060 RGBW Color Sensor.
+
+ If built as a dynamically linked module, it will be called
+ opt4060.
+
config PA12203001
tristate "TXC PA12203001 light and proximity sensor"
depends on I2C
@@ -672,6 +669,7 @@ config VCNL4035
config VEML3235
tristate "VEML3235 ambient light sensor"
select REGMAP_I2C
+ select IIO_GTS_HELPER
depends on I2C
help
Say Y here if you want to build a driver for the Vishay VEML3235
@@ -683,6 +681,8 @@ config VEML3235
config VEML6030
tristate "VEML6030 and VEML6035 ambient light sensors"
select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
depends on I2C
help
Say Y here if you want to build a driver for the Vishay VEML6030
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index f14a37442712..11a4041b918a 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -42,8 +42,8 @@ obj-$(CONFIG_MAX44009) += max44009.o
obj-$(CONFIG_NOA1305) += noa1305.o
obj-$(CONFIG_OPT3001) += opt3001.o
obj-$(CONFIG_OPT4001) += opt4001.o
+obj-$(CONFIG_OPT4060) += opt4060.o
obj-$(CONFIG_PA12203001) += pa12203001.o
-obj-$(CONFIG_ROHM_BU27008) += rohm-bu27008.o
obj-$(CONFIG_ROHM_BU27034) += rohm-bu27034.o
obj-$(CONFIG_RPR0521) += rpr0521.o
obj-$(CONFIG_SI1133) += si1133.o
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index c1b43053fbc7..cf96e3dd8bc6 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -56,7 +56,7 @@ struct adjd_s311_data {
struct i2c_client *client;
struct {
s16 chans[4];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/light/as73211.c b/drivers/iio/light/as73211.c
index be0068081ebb..37fffce35dd1 100644
--- a/drivers/iio/light/as73211.c
+++ b/drivers/iio/light/as73211.c
@@ -177,6 +177,12 @@ struct as73211_data {
BIT(AS73211_SCAN_INDEX_TEMP) | \
AS73211_SCAN_MASK_COLOR)
+static const unsigned long as73211_scan_masks[] = {
+ AS73211_SCAN_MASK_COLOR,
+ AS73211_SCAN_MASK_ALL,
+ 0
+};
+
static const struct iio_chan_spec as73211_channels[] = {
{
.type = IIO_TEMP,
@@ -636,7 +642,7 @@ static irqreturn_t as73211_trigger_handler(int irq __always_unused, void *p)
struct as73211_data *data = iio_priv(indio_dev);
struct {
__le16 chan[4];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
int data_result, ret;
@@ -672,9 +678,12 @@ static irqreturn_t as73211_trigger_handler(int irq __always_unused, void *p)
/* AS73211 starts reading at address 2 */
ret = i2c_master_recv(data->client,
- (char *)&scan.chan[1], 3 * sizeof(scan.chan[1]));
+ (char *)&scan.chan[0], 3 * sizeof(scan.chan[0]));
if (ret < 0)
goto done;
+
+ /* Avoid pushing uninitialized data */
+ scan.chan[3] = 0;
}
if (data_result) {
@@ -682,9 +691,15 @@ static irqreturn_t as73211_trigger_handler(int irq __always_unused, void *p)
* Saturate all channels (in case of overflows). Temperature channel
* is not affected by overflows.
*/
- scan.chan[1] = cpu_to_le16(U16_MAX);
- scan.chan[2] = cpu_to_le16(U16_MAX);
- scan.chan[3] = cpu_to_le16(U16_MAX);
+ if (*indio_dev->active_scan_mask == AS73211_SCAN_MASK_ALL) {
+ scan.chan[1] = cpu_to_le16(U16_MAX);
+ scan.chan[2] = cpu_to_le16(U16_MAX);
+ scan.chan[3] = cpu_to_le16(U16_MAX);
+ } else {
+ scan.chan[0] = cpu_to_le16(U16_MAX);
+ scan.chan[1] = cpu_to_le16(U16_MAX);
+ scan.chan[2] = cpu_to_le16(U16_MAX);
+ }
}
iio_push_to_buffers_with_timestamp(indio_dev, &scan, iio_get_time_ns(indio_dev));
@@ -758,6 +773,7 @@ static int as73211_probe(struct i2c_client *client)
indio_dev->channels = data->spec_dev->channels;
indio_dev->num_channels = data->spec_dev->num_channels;
indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->available_scan_masks = as73211_scan_masks;
ret = i2c_smbus_read_byte_data(data->client, AS73211_REG_OSR);
if (ret < 0)
diff --git a/drivers/iio/light/bh1745.c b/drivers/iio/light/bh1745.c
index 63bf729df517..3b4056be54a0 100644
--- a/drivers/iio/light/bh1745.c
+++ b/drivers/iio/light/bh1745.c
@@ -739,7 +739,7 @@ static irqreturn_t bh1745_trigger_handler(int interrupt, void *p)
struct bh1745_data *data = iio_priv(indio_dev);
struct {
u16 chans[4];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
u16 value;
int ret;
diff --git a/drivers/iio/light/cm3232.c b/drivers/iio/light/cm3232.c
index b6288dd25bbf..5b00ad2a014e 100644
--- a/drivers/iio/light/cm3232.c
+++ b/drivers/iio/light/cm3232.c
@@ -89,6 +89,15 @@ static int cm3232_reg_init(struct cm3232_chip *chip)
chip->als_info = &cm3232_als_info_default;
+ /* Disable and reset device */
+ chip->regs_cmd = CM3232_CMD_ALS_DISABLE | CM3232_CMD_ALS_RESET;
+ ret = i2c_smbus_write_byte_data(client, CM3232_REG_ADDR_CMD,
+ chip->regs_cmd);
+ if (ret < 0) {
+ dev_err(&chip->client->dev, "Error writing reg_cmd\n");
+ return ret;
+ }
+
/* Identify device */
ret = i2c_smbus_read_word_data(client, CM3232_REG_ADDR_ID);
if (ret < 0) {
@@ -99,15 +108,6 @@ static int cm3232_reg_init(struct cm3232_chip *chip)
if ((ret & 0xFF) != chip->als_info->hw_id)
return -ENODEV;
- /* Disable and reset device */
- chip->regs_cmd = CM3232_CMD_ALS_DISABLE | CM3232_CMD_ALS_RESET;
- ret = i2c_smbus_write_byte_data(client, CM3232_REG_ADDR_CMD,
- chip->regs_cmd);
- if (ret < 0) {
- dev_err(&chip->client->dev, "Error writing reg_cmd\n");
- return ret;
- }
-
/* Register default value */
chip->regs_cmd = chip->als_info->regs_cmd_default;
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index c83acbd78275..7ab64f5c623c 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -94,6 +94,7 @@ static int prox_read_raw(struct iio_dev *indio_dev,
*val2 = 0;
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_PROCESSED:
if (chan->scan_index >= prox_state->num_channels)
return -EINVAL;
address = prox_state->channel2usage[chan->scan_index];
diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
index b176bf4c884b..326dc39e7929 100644
--- a/drivers/iio/light/isl29125.c
+++ b/drivers/iio/light/isl29125.c
@@ -54,7 +54,7 @@ struct isl29125_data {
/* Ensure timestamp is naturally aligned */
struct {
u16 chans[3];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
};
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 604f5f900a2e..669da0840eba 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -1280,7 +1280,7 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
struct ltr501_data *data = iio_priv(indio_dev);
struct {
u16 channels[3];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
__le16 als_buf[2];
u8 mask = 0;
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index b935976871a6..e8b767680133 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -78,7 +78,7 @@ struct max44000_data {
/* Ensure naturally aligned timestamp */
struct {
u16 channels[2];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/light/opt4060.c b/drivers/iio/light/opt4060.c
new file mode 100644
index 000000000000..ab55f8d2ea0c
--- /dev/null
+++ b/drivers/iio/light/opt4060.c
@@ -0,0 +1,1343 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Axis Communications AB
+ *
+ * Datasheet: https://www.ti.com/lit/gpn/opt4060
+ *
+ * Device driver for the Texas Instruments OPT4060 RGBW Color Sensor.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/math64.h>
+#include <linux/units.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/iio/events.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+/* OPT4060 register set */
+#define OPT4060_RED_MSB 0x00
+#define OPT4060_RED_LSB 0x01
+#define OPT4060_GREEN_MSB 0x02
+#define OPT4060_GREEN_LSB 0x03
+#define OPT4060_BLUE_MSB 0x04
+#define OPT4060_BLUE_LSB 0x05
+#define OPT4060_CLEAR_MSB 0x06
+#define OPT4060_CLEAR_LSB 0x07
+#define OPT4060_THRESHOLD_LOW 0x08
+#define OPT4060_THRESHOLD_HIGH 0x09
+#define OPT4060_CTRL 0x0a
+#define OPT4060_INT_CTRL 0x0b
+#define OPT4060_RES_CTRL 0x0c
+#define OPT4060_DEVICE_ID 0x11
+
+/* OPT4060 register mask */
+#define OPT4060_EXPONENT_MASK GENMASK(15, 12)
+#define OPT4060_MSB_MASK GENMASK(11, 0)
+#define OPT4060_LSB_MASK GENMASK(15, 8)
+#define OPT4060_COUNTER_MASK GENMASK(7, 4)
+#define OPT4060_CRC_MASK GENMASK(3, 0)
+
+/* OPT4060 device id mask */
+#define OPT4060_DEVICE_ID_MASK GENMASK(11, 0)
+
+/* OPT4060 control register masks */
+#define OPT4060_CTRL_QWAKE_MASK BIT(15)
+#define OPT4060_CTRL_RANGE_MASK GENMASK(13, 10)
+#define OPT4060_CTRL_CONV_TIME_MASK GENMASK(9, 6)
+#define OPT4060_CTRL_OPER_MODE_MASK GENMASK(5, 4)
+#define OPT4060_CTRL_LATCH_MASK BIT(3)
+#define OPT4060_CTRL_INT_POL_MASK BIT(2)
+#define OPT4060_CTRL_FAULT_COUNT_MASK GENMASK(1, 0)
+
+/* OPT4060 interrupt control register masks */
+#define OPT4060_INT_CTRL_THRESH_SEL GENMASK(6, 5)
+#define OPT4060_INT_CTRL_OUTPUT BIT(4)
+#define OPT4060_INT_CTRL_INT_CFG GENMASK(3, 2)
+#define OPT4060_INT_CTRL_THRESHOLD 0x0
+#define OPT4060_INT_CTRL_NEXT_CH 0x1
+#define OPT4060_INT_CTRL_ALL_CH 0x3
+
+/* OPT4060 result control register masks */
+#define OPT4060_RES_CTRL_OVERLOAD BIT(3)
+#define OPT4060_RES_CTRL_CONV_READY BIT(2)
+#define OPT4060_RES_CTRL_FLAG_H BIT(1)
+#define OPT4060_RES_CTRL_FLAG_L BIT(0)
+
+/* OPT4060 constants */
+#define OPT4060_DEVICE_ID_VAL 0x821
+
+/* OPT4060 operating modes */
+#define OPT4060_CTRL_OPER_MODE_OFF 0x0
+#define OPT4060_CTRL_OPER_MODE_FORCED 0x1
+#define OPT4060_CTRL_OPER_MODE_ONE_SHOT 0x2
+#define OPT4060_CTRL_OPER_MODE_CONTINUOUS 0x3
+
+/* OPT4060 conversion control register definitions */
+#define OPT4060_CTRL_CONVERSION_0_6MS 0x0
+#define OPT4060_CTRL_CONVERSION_1MS 0x1
+#define OPT4060_CTRL_CONVERSION_1_8MS 0x2
+#define OPT4060_CTRL_CONVERSION_3_4MS 0x3
+#define OPT4060_CTRL_CONVERSION_6_5MS 0x4
+#define OPT4060_CTRL_CONVERSION_12_7MS 0x5
+#define OPT4060_CTRL_CONVERSION_25MS 0x6
+#define OPT4060_CTRL_CONVERSION_50MS 0x7
+#define OPT4060_CTRL_CONVERSION_100MS 0x8
+#define OPT4060_CTRL_CONVERSION_200MS 0x9
+#define OPT4060_CTRL_CONVERSION_400MS 0xa
+#define OPT4060_CTRL_CONVERSION_800MS 0xb
+
+/* OPT4060 fault count control register definitions */
+#define OPT4060_CTRL_FAULT_COUNT_1 0x0
+#define OPT4060_CTRL_FAULT_COUNT_2 0x1
+#define OPT4060_CTRL_FAULT_COUNT_4 0x2
+#define OPT4060_CTRL_FAULT_COUNT_8 0x3
+
+/* OPT4060 scale light level range definitions */
+#define OPT4060_CTRL_LIGHT_SCALE_AUTO 12
+
+/* OPT4060 default values */
+#define OPT4060_DEFAULT_CONVERSION_TIME OPT4060_CTRL_CONVERSION_50MS
+
+/*
+ * enum opt4060_chan_type - OPT4060 channel types
+ * @OPT4060_RED: Red channel.
+ * @OPT4060_GREEN: Green channel.
+ * @OPT4060_BLUE: Blue channel.
+ * @OPT4060_CLEAR: Clear (white) channel.
+ * @OPT4060_ILLUM: Calculated illuminance channel.
+ * @OPT4060_NUM_CHANS: Number of channel types.
+ */
+enum opt4060_chan_type {
+ OPT4060_RED,
+ OPT4060_GREEN,
+ OPT4060_BLUE,
+ OPT4060_CLEAR,
+ OPT4060_ILLUM,
+ OPT4060_NUM_CHANS
+};
+
+struct opt4060_chip {
+ struct regmap *regmap;
+ struct device *dev;
+ struct iio_trigger *trig;
+ u8 int_time;
+ int irq;
+ /*
+ * Mutex for protecting sensor irq settings. Switching between interrupt
+ * on each sample and on thresholds needs to be synchronized.
+ */
+ struct mutex irq_setting_lock;
+ /*
+ * Mutex for protecting event enabling.
+ */
+ struct mutex event_enabling_lock;
+ struct completion completion;
+ bool thresh_event_lo_active;
+ bool thresh_event_hi_active;
+};
+
+struct opt4060_channel_factor {
+ u32 mul;
+ u32 div;
+};
+
+static const int opt4060_int_time_available[][2] = {
+ { 0, 600 },
+ { 0, 1000 },
+ { 0, 1800 },
+ { 0, 3400 },
+ { 0, 6500 },
+ { 0, 12700 },
+ { 0, 25000 },
+ { 0, 50000 },
+ { 0, 100000 },
+ { 0, 200000 },
+ { 0, 400000 },
+ { 0, 800000 },
+};
+
+/*
+ * Conversion time is integration time + time to set register
+ * this is used as integration time.
+ */
+static const int opt4060_int_time_reg[][2] = {
+ { 600, OPT4060_CTRL_CONVERSION_0_6MS },
+ { 1000, OPT4060_CTRL_CONVERSION_1MS },
+ { 1800, OPT4060_CTRL_CONVERSION_1_8MS },
+ { 3400, OPT4060_CTRL_CONVERSION_3_4MS },
+ { 6500, OPT4060_CTRL_CONVERSION_6_5MS },
+ { 12700, OPT4060_CTRL_CONVERSION_12_7MS },
+ { 25000, OPT4060_CTRL_CONVERSION_25MS },
+ { 50000, OPT4060_CTRL_CONVERSION_50MS },
+ { 100000, OPT4060_CTRL_CONVERSION_100MS },
+ { 200000, OPT4060_CTRL_CONVERSION_200MS },
+ { 400000, OPT4060_CTRL_CONVERSION_400MS },
+ { 800000, OPT4060_CTRL_CONVERSION_800MS },
+};
+
+static int opt4060_als_time_to_index(const u32 als_integration_time)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(opt4060_int_time_available); i++) {
+ if (als_integration_time == opt4060_int_time_available[i][1])
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static u8 opt4060_calculate_crc(u8 exp, u32 mantissa, u8 count)
+{
+ u8 crc;
+
+ /*
+ * Calculates a 4-bit CRC from a 20-bit mantissa, 4-bit exponent and a 4-bit counter.
+ * crc[0] = XOR(mantissa[19:0], exp[3:0], count[3:0])
+ * crc[1] = XOR(mantissa[1,3,5,7,9,11,13,15,17,19], exp[1,3], count[1,3])
+ * crc[2] = XOR(mantissa[3,7,11,15,19], exp[3], count[3])
+ * crc[3] = XOR(mantissa[3,11,19])
+ */
+ crc = (hweight32(mantissa) + hweight32(exp) + hweight32(count)) % 2;
+ crc |= ((hweight32(mantissa & 0xAAAAA) + hweight32(exp & 0xA)
+ + hweight32(count & 0xA)) % 2) << 1;
+ crc |= ((hweight32(mantissa & 0x88888) + hweight32(exp & 0x8)
+ + hweight32(count & 0x8)) % 2) << 2;
+ crc |= (hweight32(mantissa & 0x80808) % 2) << 3;
+
+ return crc;
+}
+
+static int opt4060_set_int_state(struct opt4060_chip *chip, u32 state)
+{
+ int ret;
+ unsigned int regval;
+
+ guard(mutex)(&chip->irq_setting_lock);
+
+ regval = FIELD_PREP(OPT4060_INT_CTRL_INT_CFG, state);
+ ret = regmap_update_bits(chip->regmap, OPT4060_INT_CTRL,
+ OPT4060_INT_CTRL_INT_CFG, regval);
+ if (ret)
+ dev_err(chip->dev, "Failed to set interrupt config\n");
+ return ret;
+}
+
+static int opt4060_set_sampling_mode(struct opt4060_chip *chip,
+ bool continuous)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(chip->regmap, OPT4060_CTRL, &reg);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to read ctrl register\n");
+ return ret;
+ }
+ reg &= ~OPT4060_CTRL_OPER_MODE_MASK;
+ if (continuous)
+ reg |= FIELD_PREP(OPT4060_CTRL_OPER_MODE_MASK,
+ OPT4060_CTRL_OPER_MODE_CONTINUOUS);
+ else
+ reg |= FIELD_PREP(OPT4060_CTRL_OPER_MODE_MASK,
+ OPT4060_CTRL_OPER_MODE_ONE_SHOT);
+
+ /*
+ * Trigger a new conversions by writing to CRTL register. It is not
+ * possible to use regmap_update_bits() since that will only write when
+ * data is modified.
+ */
+ ret = regmap_write(chip->regmap, OPT4060_CTRL, reg);
+ if (ret)
+ dev_err(chip->dev, "Failed to set ctrl register\n");
+ return ret;
+}
+
+static bool opt4060_event_active(struct opt4060_chip *chip)
+{
+ return chip->thresh_event_lo_active || chip->thresh_event_hi_active;
+}
+
+static int opt4060_set_state_common(struct opt4060_chip *chip,
+ bool continuous_sampling,
+ bool continuous_irq)
+{
+ int ret = 0;
+
+ /* It is important to setup irq before sampling to avoid missing samples. */
+ if (continuous_irq)
+ ret = opt4060_set_int_state(chip, OPT4060_INT_CTRL_ALL_CH);
+ else
+ ret = opt4060_set_int_state(chip, OPT4060_INT_CTRL_THRESHOLD);
+ if (ret) {
+ dev_err(chip->dev, "Failed to set irq state.\n");
+ return ret;
+ }
+
+ if (continuous_sampling || opt4060_event_active(chip))
+ ret = opt4060_set_sampling_mode(chip, true);
+ else
+ ret = opt4060_set_sampling_mode(chip, false);
+ if (ret)
+ dev_err(chip->dev, "Failed to set sampling state.\n");
+ return ret;
+}
+
+/*
+ * Function for setting the driver state for sampling and irq. Either direct
+ * mode of buffer mode will be claimed during the transition to prevent races
+ * between sysfs read, buffer or events.
+ */
+static int opt4060_set_driver_state(struct iio_dev *indio_dev,
+ bool continuous_sampling,
+ bool continuous_irq)
+{
+ struct opt4060_chip *chip = iio_priv(indio_dev);
+ int ret = 0;
+any_mode_retry:
+ if (iio_device_claim_buffer_mode(indio_dev)) {
+ /*
+ * This one is a *bit* hacky. If we cannot claim buffer mode,
+ * then try direct mode so that we make sure things cannot
+ * concurrently change. And we just keep trying until we get one
+ * of the modes...
+ */
+ if (iio_device_claim_direct_mode(indio_dev))
+ goto any_mode_retry;
+ /*
+ * This path means that we managed to claim direct mode. In
+ * this case the buffer isn't enabled and it's okay to leave
+ * continuous mode for sampling and/or irq.
+ */
+ ret = opt4060_set_state_common(chip, continuous_sampling,
+ continuous_irq);
+ iio_device_release_direct_mode(indio_dev);
+ } else {
+ /*
+ * This path means that we managed to claim buffer mode. In
+ * this case the buffer is enabled and irq and sampling must go
+ * to or remain continuous, but only if the trigger is from this
+ * device.
+ */
+ if (!iio_trigger_validate_own_device(indio_dev->trig, indio_dev))
+ ret = opt4060_set_state_common(chip, true, true);
+ else
+ ret = opt4060_set_state_common(chip, continuous_sampling,
+ continuous_irq);
+ iio_device_release_buffer_mode(indio_dev);
+ }
+ return ret;
+}
+
+/*
+ * This function is called with framework mutex locked.
+ */
+static int opt4060_trigger_set_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct opt4060_chip *chip = iio_priv(indio_dev);
+
+ return opt4060_set_state_common(chip, state, state);
+}
+
+static int opt4060_read_raw_value(struct opt4060_chip *chip,
+ unsigned long address, u32 *raw)
+{
+ int ret;
+ u16 result[2];
+ u32 mantissa_raw;
+ u16 msb, lsb;
+ u8 exp, count, crc, calc_crc;
+
+ ret = regmap_bulk_read(chip->regmap, address, result, 2);
+ if (ret) {
+ dev_err(chip->dev, "Reading channel data failed\n");
+ return ret;
+ }
+ exp = FIELD_GET(OPT4060_EXPONENT_MASK, result[0]);
+ msb = FIELD_GET(OPT4060_MSB_MASK, result[0]);
+ count = FIELD_GET(OPT4060_COUNTER_MASK, result[1]);
+ crc = FIELD_GET(OPT4060_CRC_MASK, result[1]);
+ lsb = FIELD_GET(OPT4060_LSB_MASK, result[1]);
+ mantissa_raw = (msb << 8) + lsb;
+ calc_crc = opt4060_calculate_crc(exp, mantissa_raw, count);
+ if (calc_crc != crc)
+ return -EIO;
+ *raw = mantissa_raw << exp;
+ return 0;
+}
+
+static int opt4060_trigger_new_samples(struct iio_dev *indio_dev)
+{
+ struct opt4060_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ /*
+ * The conversion time should be 500us startup time plus the integration time
+ * times the number of channels. An exact timeout isn't critical, it's better
+ * not to get incorrect errors in the log. Setting the timeout to double the
+ * theoretical time plus and extra 100ms margin.
+ */
+ unsigned int timeout_us = (500 + OPT4060_NUM_CHANS *
+ opt4060_int_time_reg[chip->int_time][0]) * 2 + 100000;
+
+ /* Setting the state in one shot mode with irq on each sample. */
+ ret = opt4060_set_driver_state(indio_dev, false, true);
+ if (ret)
+ return ret;
+
+ if (chip->irq) {
+ guard(mutex)(&chip->irq_setting_lock);
+ reinit_completion(&chip->completion);
+ if (wait_for_completion_timeout(&chip->completion,
+ usecs_to_jiffies(timeout_us)) == 0) {
+ dev_err(chip->dev, "Completion timed out.\n");
+ return -ETIME;
+ }
+ } else {
+ unsigned int ready;
+
+ ret = regmap_read_poll_timeout(chip->regmap, OPT4060_RES_CTRL,
+ ready, (ready & OPT4060_RES_CTRL_CONV_READY),
+ 1000, timeout_us);
+ if (ret)
+ dev_err(chip->dev, "Conversion ready did not finish within timeout.\n");
+ }
+ /* Setting the state in one shot mode with irq on thresholds. */
+ return opt4060_set_driver_state(indio_dev, false, false);
+}
+
+static int opt4060_read_chan_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val)
+{
+ struct opt4060_chip *chip = iio_priv(indio_dev);
+ u32 adc_raw;
+ int ret;
+
+ ret = opt4060_trigger_new_samples(indio_dev);
+ if (ret) {
+ dev_err(chip->dev, "Failed to trigger new samples.\n");
+ return ret;
+ }
+
+ ret = opt4060_read_raw_value(chip, chan->address, &adc_raw);
+ if (ret) {
+ dev_err(chip->dev, "Reading raw channel data failed.\n");
+ return ret;
+ }
+ *val = adc_raw;
+ return IIO_VAL_INT;
+}
+
+/*
+ * Returns the scale values used for red, green and blue. Scales the raw value
+ * so that for a particular test light source, typically white, the measurement
+ * intensity is the same across different color channels.
+ */
+static int opt4060_get_chan_scale(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2)
+{
+ struct opt4060_chip *chip = iio_priv(indio_dev);
+
+ switch (chan->scan_index) {
+ case OPT4060_RED:
+ /* 2.4 */
+ *val = 2;
+ *val2 = 400000;
+ break;
+ case OPT4060_GREEN:
+ /* 1.0 */
+ *val = 1;
+ *val2 = 0;
+ break;
+ case OPT4060_BLUE:
+ /* 1.3 */
+ *val = 1;
+ *val2 = 300000;
+ break;
+ default:
+ dev_err(chip->dev, "Unexpected channel index.\n");
+ return -EINVAL;
+ }
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int opt4060_calc_illuminance(struct opt4060_chip *chip, int *val)
+{
+ u32 lux_raw;
+ int ret;
+
+ /* The green wide spectral channel is used for illuminance. */
+ ret = opt4060_read_raw_value(chip, OPT4060_GREEN_MSB, &lux_raw);
+ if (ret) {
+ dev_err(chip->dev, "Reading raw channel data failed\n");
+ return ret;
+ }
+
+ /* Illuminance is calculated by ADC_RAW * 2.15e-3. */
+ *val = DIV_U64_ROUND_CLOSEST((u64)(lux_raw * 215), 1000);
+ return ret;
+}
+
+static int opt4060_read_illuminance(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ struct opt4060_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ ret = opt4060_trigger_new_samples(indio_dev);
+ if (ret) {
+ dev_err(chip->dev, "Failed to trigger new samples.\n");
+ return ret;
+ }
+ ret = opt4060_calc_illuminance(chip, val);
+ if (ret) {
+ dev_err(chip->dev, "Failed to calculate illuminance.\n");
+ return ret;
+ }
+
+ return IIO_VAL_INT;
+}
+
+static int opt4060_set_int_time(struct opt4060_chip *chip)
+{
+ unsigned int regval;
+ int ret;
+
+ regval = FIELD_PREP(OPT4060_CTRL_CONV_TIME_MASK, chip->int_time);
+ ret = regmap_update_bits(chip->regmap, OPT4060_CTRL,
+ OPT4060_CTRL_CONV_TIME_MASK, regval);
+ if (ret)
+ dev_err(chip->dev, "Failed to set integration time.\n");
+
+ return ret;
+}
+
+static int opt4060_power_down(struct opt4060_chip *chip)
+{
+ int ret;
+
+ ret = regmap_clear_bits(chip->regmap, OPT4060_CTRL, OPT4060_CTRL_OPER_MODE_MASK);
+ if (ret)
+ dev_err(chip->dev, "Failed to power down\n");
+
+ return ret;
+}
+
+static void opt4060_chip_off_action(void *chip)
+{
+ opt4060_power_down(chip);
+}
+
+#define _OPT4060_COLOR_CHANNEL(_color, _mask, _ev_spec, _num_ev_spec) \
+{ \
+ .type = IIO_INTENSITY, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_LIGHT_##_color, \
+ .info_mask_separate = _mask, \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME), \
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME), \
+ .address = OPT4060_##_color##_MSB, \
+ .scan_index = OPT4060_##_color, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 32, \
+ .storagebits = 32, \
+ .endianness = IIO_CPU, \
+ }, \
+ .event_spec = _ev_spec, \
+ .num_event_specs = _num_ev_spec, \
+}
+
+#define OPT4060_COLOR_CHANNEL(_color, _mask) \
+ _OPT4060_COLOR_CHANNEL(_color, _mask, opt4060_event_spec, \
+ ARRAY_SIZE(opt4060_event_spec)) \
+
+#define OPT4060_COLOR_CHANNEL_NO_EVENTS(_color, _mask) \
+ _OPT4060_COLOR_CHANNEL(_color, _mask, NULL, 0) \
+
+#define OPT4060_LIGHT_CHANNEL(_channel) \
+{ \
+ .type = IIO_LIGHT, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME), \
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME), \
+ .scan_index = OPT4060_##_channel, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 32, \
+ .storagebits = 32, \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+static const struct iio_event_spec opt4060_event_spec[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_PERIOD),
+ },
+};
+
+static const struct iio_chan_spec opt4060_channels[] = {
+ OPT4060_COLOR_CHANNEL(RED, BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE)),
+ OPT4060_COLOR_CHANNEL(GREEN, BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE)),
+ OPT4060_COLOR_CHANNEL(BLUE, BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE)),
+ OPT4060_COLOR_CHANNEL(CLEAR, BIT(IIO_CHAN_INFO_RAW)),
+ OPT4060_LIGHT_CHANNEL(ILLUM),
+ IIO_CHAN_SOFT_TIMESTAMP(OPT4060_NUM_CHANS),
+};
+
+static const struct iio_chan_spec opt4060_channels_no_events[] = {
+ OPT4060_COLOR_CHANNEL_NO_EVENTS(RED, BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE)),
+ OPT4060_COLOR_CHANNEL_NO_EVENTS(GREEN, BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE)),
+ OPT4060_COLOR_CHANNEL_NO_EVENTS(BLUE, BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE)),
+ OPT4060_COLOR_CHANNEL_NO_EVENTS(CLEAR, BIT(IIO_CHAN_INFO_RAW)),
+ OPT4060_LIGHT_CHANNEL(ILLUM),
+ IIO_CHAN_SOFT_TIMESTAMP(OPT4060_NUM_CHANS),
+};
+
+static int opt4060_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct opt4060_chip *chip = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return opt4060_read_chan_raw(indio_dev, chan, val);
+ case IIO_CHAN_INFO_SCALE:
+ return opt4060_get_chan_scale(indio_dev, chan, val, val2);
+ case IIO_CHAN_INFO_PROCESSED:
+ return opt4060_read_illuminance(indio_dev, chan, val);
+ case IIO_CHAN_INFO_INT_TIME:
+ *val = 0;
+ *val2 = opt4060_int_time_reg[chip->int_time][0];
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int opt4060_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct opt4060_chip *chip = iio_priv(indio_dev);
+ int int_time;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ int_time = opt4060_als_time_to_index(val2);
+ if (int_time < 0)
+ return int_time;
+ chip->int_time = int_time;
+ return opt4060_set_int_time(chip);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int opt4060_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static u32 opt4060_calc_th_reg(u32 adc_val)
+{
+ u32 th_val, th_exp, bits;
+ /*
+ * The threshold registers take 4 bits of exponent and 12 bits of data
+ * ADC = TH_VAL << (8 + TH_EXP)
+ */
+ bits = fls(adc_val);
+
+ if (bits > 31)
+ th_exp = 11; /* Maximum exponent */
+ else if (bits > 20)
+ th_exp = bits - 20;
+ else
+ th_exp = 0;
+ th_val = (adc_val >> (8 + th_exp)) & 0xfff;
+
+ return (th_exp << 12) + th_val;
+}
+
+static u32 opt4060_calc_val_from_th_reg(u32 th_reg)
+{
+ /*
+ * The threshold registers take 4 bits of exponent and 12 bits of data
+ * ADC = TH_VAL << (8 + TH_EXP)
+ */
+ u32 th_val, th_exp;
+
+ th_exp = (th_reg >> 12) & 0xf;
+ th_val = th_reg & 0xfff;
+
+ return th_val << (8 + th_exp);
+}
+
+static int opt4060_read_available(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ *length = ARRAY_SIZE(opt4060_int_time_available) * 2;
+ *vals = (const int *)opt4060_int_time_available;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_AVAIL_LIST;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t opt4060_read_ev_period(struct opt4060_chip *chip, int *val,
+ int *val2)
+{
+ int ret, pers, fault_count, int_time;
+ u64 uval;
+
+ int_time = opt4060_int_time_reg[chip->int_time][0];
+
+ ret = regmap_read(chip->regmap, OPT4060_CTRL, &fault_count);
+ if (ret < 0)
+ return ret;
+
+ fault_count = fault_count & OPT4060_CTRL_FAULT_COUNT_MASK;
+ switch (fault_count) {
+ case OPT4060_CTRL_FAULT_COUNT_2:
+ pers = 2;
+ break;
+ case OPT4060_CTRL_FAULT_COUNT_4:
+ pers = 4;
+ break;
+ case OPT4060_CTRL_FAULT_COUNT_8:
+ pers = 8;
+ break;
+
+ default:
+ pers = 1;
+ break;
+ }
+
+ uval = mul_u32_u32(int_time, pers);
+ *val = div_u64_rem(uval, MICRO, val2);
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static ssize_t opt4060_write_ev_period(struct opt4060_chip *chip, int val,
+ int val2)
+{
+ u64 uval, int_time;
+ unsigned int regval, fault_count_val;
+
+ uval = mul_u32_u32(val, MICRO) + val2;
+ int_time = opt4060_int_time_reg[chip->int_time][0];
+
+ /* Check if the period is closest to 1, 2, 4 or 8 times integration time.*/
+ if (uval <= int_time)
+ fault_count_val = OPT4060_CTRL_FAULT_COUNT_1;
+ else if (uval <= int_time * 2)
+ fault_count_val = OPT4060_CTRL_FAULT_COUNT_2;
+ else if (uval <= int_time * 4)
+ fault_count_val = OPT4060_CTRL_FAULT_COUNT_4;
+ else
+ fault_count_val = OPT4060_CTRL_FAULT_COUNT_8;
+
+ regval = FIELD_PREP(OPT4060_CTRL_FAULT_COUNT_MASK, fault_count_val);
+ return regmap_update_bits(chip->regmap, OPT4060_CTRL,
+ OPT4060_CTRL_FAULT_COUNT_MASK, regval);
+}
+
+static int opt4060_get_channel_sel(struct opt4060_chip *chip, int *ch_sel)
+{
+ int ret;
+ u32 regval;
+
+ ret = regmap_read(chip->regmap, OPT4060_INT_CTRL, &regval);
+ if (ret) {
+ dev_err(chip->dev, "Failed to get channel selection.\n");
+ return ret;
+ }
+ *ch_sel = FIELD_GET(OPT4060_INT_CTRL_THRESH_SEL, regval);
+ return ret;
+}
+
+static int opt4060_set_channel_sel(struct opt4060_chip *chip, int ch_sel)
+{
+ int ret;
+ u32 regval;
+
+ regval = FIELD_PREP(OPT4060_INT_CTRL_THRESH_SEL, ch_sel);
+ ret = regmap_update_bits(chip->regmap, OPT4060_INT_CTRL,
+ OPT4060_INT_CTRL_THRESH_SEL, regval);
+ if (ret)
+ dev_err(chip->dev, "Failed to set channel selection.\n");
+ return ret;
+}
+
+static int opt4060_get_thresholds(struct opt4060_chip *chip, u32 *th_lo, u32 *th_hi)
+{
+ int ret;
+ u32 regval;
+
+ ret = regmap_read(chip->regmap, OPT4060_THRESHOLD_LOW, &regval);
+ if (ret) {
+ dev_err(chip->dev, "Failed to read THRESHOLD_LOW.\n");
+ return ret;
+ }
+ *th_lo = opt4060_calc_val_from_th_reg(regval);
+
+ ret = regmap_read(chip->regmap, OPT4060_THRESHOLD_HIGH, &regval);
+ if (ret) {
+ dev_err(chip->dev, "Failed to read THRESHOLD_LOW.\n");
+ return ret;
+ }
+ *th_hi = opt4060_calc_val_from_th_reg(regval);
+
+ return ret;
+}
+
+static int opt4060_set_thresholds(struct opt4060_chip *chip, u32 th_lo, u32 th_hi)
+{
+ int ret;
+
+ ret = regmap_write(chip->regmap, OPT4060_THRESHOLD_LOW, opt4060_calc_th_reg(th_lo));
+ if (ret) {
+ dev_err(chip->dev, "Failed to write THRESHOLD_LOW.\n");
+ return ret;
+ }
+
+ ret = regmap_write(chip->regmap, OPT4060_THRESHOLD_HIGH, opt4060_calc_th_reg(th_hi));
+ if (ret)
+ dev_err(chip->dev, "Failed to write THRESHOLD_HIGH.\n");
+
+ return ret;
+}
+
+static int opt4060_read_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct opt4060_chip *chip = iio_priv(indio_dev);
+ u32 th_lo, th_hi;
+ int ret;
+
+ if (chan->type != IIO_INTENSITY)
+ return -EINVAL;
+ if (type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ ret = opt4060_get_thresholds(chip, &th_lo, &th_hi);
+ if (ret)
+ return ret;
+ if (dir == IIO_EV_DIR_FALLING) {
+ *val = th_lo;
+ ret = IIO_VAL_INT;
+ } else if (dir == IIO_EV_DIR_RISING) {
+ *val = th_hi;
+ ret = IIO_VAL_INT;
+ }
+ return ret;
+ case IIO_EV_INFO_PERIOD:
+ return opt4060_read_ev_period(chip, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int opt4060_write_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct opt4060_chip *chip = iio_priv(indio_dev);
+ u32 th_lo, th_hi;
+ int ret;
+
+ if (chan->type != IIO_INTENSITY)
+ return -EINVAL;
+ if (type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ ret = opt4060_get_thresholds(chip, &th_lo, &th_hi);
+ if (ret)
+ return ret;
+ if (dir == IIO_EV_DIR_FALLING)
+ th_lo = val;
+ else if (dir == IIO_EV_DIR_RISING)
+ th_hi = val;
+ return opt4060_set_thresholds(chip, th_lo, th_hi);
+ case IIO_EV_INFO_PERIOD:
+ return opt4060_write_ev_period(chip, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int opt4060_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ int ch_sel, ch_idx = chan->scan_index;
+ struct opt4060_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ if (chan->type != IIO_INTENSITY)
+ return -EINVAL;
+ if (type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ ret = opt4060_get_channel_sel(chip, &ch_sel);
+ if (ret)
+ return ret;
+
+ if (((dir == IIO_EV_DIR_FALLING) && chip->thresh_event_lo_active) ||
+ ((dir == IIO_EV_DIR_RISING) && chip->thresh_event_hi_active))
+ return ch_sel == ch_idx;
+
+ return ret;
+}
+
+static int opt4060_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, bool state)
+{
+ int ch_sel, ch_idx = chan->scan_index;
+ struct opt4060_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ guard(mutex)(&chip->event_enabling_lock);
+
+ if (chan->type != IIO_INTENSITY)
+ return -EINVAL;
+ if (type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ ret = opt4060_get_channel_sel(chip, &ch_sel);
+ if (ret)
+ return ret;
+
+ if (state) {
+ /* Only one channel can be active at the same time */
+ if ((chip->thresh_event_lo_active || chip->thresh_event_hi_active) &&
+ (ch_idx != ch_sel))
+ return -EBUSY;
+ if (dir == IIO_EV_DIR_FALLING)
+ chip->thresh_event_lo_active = true;
+ else if (dir == IIO_EV_DIR_RISING)
+ chip->thresh_event_hi_active = true;
+ ret = opt4060_set_channel_sel(chip, ch_idx);
+ if (ret)
+ return ret;
+ } else {
+ if (ch_idx == ch_sel) {
+ if (dir == IIO_EV_DIR_FALLING)
+ chip->thresh_event_lo_active = false;
+ else if (dir == IIO_EV_DIR_RISING)
+ chip->thresh_event_hi_active = false;
+ }
+ }
+
+ return opt4060_set_driver_state(indio_dev,
+ chip->thresh_event_hi_active |
+ chip->thresh_event_lo_active,
+ false);
+}
+
+static const struct iio_info opt4060_info = {
+ .read_raw = opt4060_read_raw,
+ .write_raw = opt4060_write_raw,
+ .write_raw_get_fmt = opt4060_write_raw_get_fmt,
+ .read_avail = opt4060_read_available,
+ .read_event_value = opt4060_read_event,
+ .write_event_value = opt4060_write_event,
+ .read_event_config = opt4060_read_event_config,
+ .write_event_config = opt4060_write_event_config,
+};
+
+static const struct iio_info opt4060_info_no_irq = {
+ .read_raw = opt4060_read_raw,
+ .write_raw = opt4060_write_raw,
+ .write_raw_get_fmt = opt4060_write_raw_get_fmt,
+ .read_avail = opt4060_read_available,
+};
+
+static int opt4060_load_defaults(struct opt4060_chip *chip)
+{
+ u16 reg;
+ int ret;
+
+ chip->int_time = OPT4060_DEFAULT_CONVERSION_TIME;
+
+ /* Set initial MIN/MAX thresholds */
+ ret = opt4060_set_thresholds(chip, 0, UINT_MAX);
+ if (ret)
+ return ret;
+
+ /*
+ * Setting auto-range, latched window for thresholds, one-shot conversion
+ * and quick wake-up mode as default.
+ */
+ reg = FIELD_PREP(OPT4060_CTRL_RANGE_MASK,
+ OPT4060_CTRL_LIGHT_SCALE_AUTO);
+ reg |= FIELD_PREP(OPT4060_CTRL_CONV_TIME_MASK, chip->int_time);
+ reg |= FIELD_PREP(OPT4060_CTRL_OPER_MODE_MASK,
+ OPT4060_CTRL_OPER_MODE_ONE_SHOT);
+ reg |= OPT4060_CTRL_QWAKE_MASK | OPT4060_CTRL_LATCH_MASK;
+
+ ret = regmap_write(chip->regmap, OPT4060_CTRL, reg);
+ if (ret)
+ dev_err(chip->dev, "Failed to set configuration\n");
+
+ return ret;
+}
+
+static bool opt4060_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return reg <= OPT4060_CLEAR_LSB || reg == OPT4060_RES_CTRL;
+}
+
+static bool opt4060_writable_reg(struct device *dev, unsigned int reg)
+{
+ return reg >= OPT4060_THRESHOLD_LOW || reg >= OPT4060_INT_CTRL;
+}
+
+static bool opt4060_readonly_reg(struct device *dev, unsigned int reg)
+{
+ return reg == OPT4060_DEVICE_ID;
+}
+
+static bool opt4060_readable_reg(struct device *dev, unsigned int reg)
+{
+ /* Volatile, writable and read-only registers are readable. */
+ return opt4060_volatile_reg(dev, reg) || opt4060_writable_reg(dev, reg) ||
+ opt4060_readonly_reg(dev, reg);
+}
+
+static const struct regmap_config opt4060_regmap_config = {
+ .name = "opt4060",
+ .reg_bits = 8,
+ .val_bits = 16,
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = OPT4060_DEVICE_ID,
+ .readable_reg = opt4060_readable_reg,
+ .writeable_reg = opt4060_writable_reg,
+ .volatile_reg = opt4060_volatile_reg,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+};
+
+static const struct iio_trigger_ops opt4060_trigger_ops = {
+ .set_trigger_state = opt4060_trigger_set_state,
+};
+
+static irqreturn_t opt4060_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *idev = pf->indio_dev;
+ struct opt4060_chip *chip = iio_priv(idev);
+ struct {
+ u32 chan[OPT4060_NUM_CHANS];
+ aligned_s64 ts;
+ } raw;
+ int i = 0;
+ int chan, ret;
+
+ /* If the trigger is not from this driver, a new sample is needed.*/
+ if (iio_trigger_validate_own_device(idev->trig, idev))
+ opt4060_trigger_new_samples(idev);
+
+ memset(&raw, 0, sizeof(raw));
+
+ iio_for_each_active_channel(idev, chan) {
+ if (chan == OPT4060_ILLUM)
+ ret = opt4060_calc_illuminance(chip, &raw.chan[i++]);
+ else
+ ret = opt4060_read_raw_value(chip,
+ idev->channels[chan].address,
+ &raw.chan[i++]);
+ if (ret) {
+ dev_err(chip->dev, "Reading channel data failed\n");
+ goto err_read;
+ }
+ }
+
+ iio_push_to_buffers_with_timestamp(idev, &raw, pf->timestamp);
+err_read:
+ iio_trigger_notify_done(idev->trig);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t opt4060_irq_thread(int irq, void *private)
+{
+ struct iio_dev *idev = private;
+ struct opt4060_chip *chip = iio_priv(idev);
+ int ret, dummy;
+ unsigned int int_res;
+
+ ret = regmap_read(chip->regmap, OPT4060_RES_CTRL, &int_res);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to read interrupt reasons.\n");
+ return IRQ_NONE;
+ }
+
+ /* Read OPT4060_CTRL to clear interrupt */
+ ret = regmap_read(chip->regmap, OPT4060_CTRL, &dummy);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to clear interrupt\n");
+ return IRQ_NONE;
+ }
+
+ /* Handle events */
+ if (int_res & (OPT4060_RES_CTRL_FLAG_H | OPT4060_RES_CTRL_FLAG_L)) {
+ u64 code;
+ int chan = 0;
+
+ ret = opt4060_get_channel_sel(chip, &chan);
+ if (ret) {
+ dev_err(chip->dev, "Failed to read threshold channel.\n");
+ return IRQ_NONE;
+ }
+
+ /* Check if the interrupt is from the lower threshold */
+ if (int_res & OPT4060_RES_CTRL_FLAG_L) {
+ code = IIO_MOD_EVENT_CODE(IIO_INTENSITY,
+ chan,
+ idev->channels[chan].channel2,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING);
+ iio_push_event(idev, code, iio_get_time_ns(idev));
+ }
+ /* Check if the interrupt is from the upper threshold */
+ if (int_res & OPT4060_RES_CTRL_FLAG_H) {
+ code = IIO_MOD_EVENT_CODE(IIO_INTENSITY,
+ chan,
+ idev->channels[chan].channel2,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING);
+ iio_push_event(idev, code, iio_get_time_ns(idev));
+ }
+ }
+
+ /* Handle conversion ready */
+ if (int_res & OPT4060_RES_CTRL_CONV_READY) {
+ /* Signal completion for potentially waiting reads */
+ complete(&chip->completion);
+
+ /* Handle data ready triggers */
+ if (iio_buffer_enabled(idev))
+ iio_trigger_poll_nested(chip->trig);
+ }
+ return IRQ_HANDLED;
+}
+
+static int opt4060_setup_buffer(struct opt4060_chip *chip, struct iio_dev *idev)
+{
+ int ret;
+
+ ret = devm_iio_triggered_buffer_setup(chip->dev, idev,
+ &iio_pollfunc_store_time,
+ opt4060_trigger_handler, NULL);
+ if (ret)
+ return dev_err_probe(chip->dev, ret,
+ "Buffer setup failed.\n");
+ return ret;
+}
+
+static int opt4060_setup_trigger(struct opt4060_chip *chip, struct iio_dev *idev)
+{
+ struct iio_trigger *data_trigger;
+ char *name;
+ int ret;
+
+ data_trigger = devm_iio_trigger_alloc(chip->dev, "%s-data-ready-dev%d",
+ idev->name, iio_device_id(idev));
+ if (!data_trigger)
+ return -ENOMEM;
+
+ /*
+ * The data trigger allows for sample capture on each new conversion
+ * ready interrupt.
+ */
+ chip->trig = data_trigger;
+ data_trigger->ops = &opt4060_trigger_ops;
+ iio_trigger_set_drvdata(data_trigger, idev);
+ ret = devm_iio_trigger_register(chip->dev, data_trigger);
+ if (ret)
+ return dev_err_probe(chip->dev, ret,
+ "Data ready trigger registration failed\n");
+
+ name = devm_kasprintf(chip->dev, GFP_KERNEL, "%s-opt4060",
+ dev_name(chip->dev));
+ if (!name)
+ return dev_err_probe(chip->dev, -ENOMEM, "Failed to alloc chip name\n");
+
+ ret = devm_request_threaded_irq(chip->dev, chip->irq, NULL, opt4060_irq_thread,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ name, idev);
+ if (ret)
+ return dev_err_probe(chip->dev, ret, "Could not request IRQ\n");
+
+ init_completion(&chip->completion);
+
+ ret = devm_mutex_init(chip->dev, &chip->irq_setting_lock);
+ if (ret)
+ return ret;
+
+ ret = devm_mutex_init(chip->dev, &chip->event_enabling_lock);
+ if (ret)
+ return ret;
+
+ ret = regmap_write_bits(chip->regmap, OPT4060_INT_CTRL,
+ OPT4060_INT_CTRL_OUTPUT,
+ OPT4060_INT_CTRL_OUTPUT);
+ if (ret)
+ return dev_err_probe(chip->dev, ret,
+ "Failed to set interrupt as output\n");
+
+ return 0;
+}
+
+static int opt4060_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct opt4060_chip *chip;
+ struct iio_dev *indio_dev;
+ int ret;
+ unsigned int regval, dev_id;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ chip = iio_priv(indio_dev);
+
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable vdd supply\n");
+
+ chip->regmap = devm_regmap_init_i2c(client, &opt4060_regmap_config);
+ if (IS_ERR(chip->regmap))
+ return dev_err_probe(dev, PTR_ERR(chip->regmap),
+ "regmap initialization failed\n");
+
+ chip->dev = dev;
+ chip->irq = client->irq;
+
+ ret = regmap_reinit_cache(chip->regmap, &opt4060_regmap_config);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to reinit regmap cache\n");
+
+ ret = regmap_read(chip->regmap, OPT4060_DEVICE_ID, &regval);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to read the device ID register\n");
+
+ dev_id = FIELD_GET(OPT4060_DEVICE_ID_MASK, regval);
+ if (dev_id != OPT4060_DEVICE_ID_VAL)
+ dev_info(dev, "Device ID: %#04x unknown\n", dev_id);
+
+ if (chip->irq) {
+ indio_dev->info = &opt4060_info;
+ indio_dev->channels = opt4060_channels;
+ indio_dev->num_channels = ARRAY_SIZE(opt4060_channels);
+ } else {
+ indio_dev->info = &opt4060_info_no_irq;
+ indio_dev->channels = opt4060_channels_no_events;
+ indio_dev->num_channels = ARRAY_SIZE(opt4060_channels_no_events);
+ }
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = "opt4060";
+
+ ret = opt4060_load_defaults(chip);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to set sensor defaults\n");
+
+ ret = devm_add_action_or_reset(dev, opt4060_chip_off_action, chip);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to setup power off action\n");
+
+ ret = opt4060_setup_buffer(chip, indio_dev);
+ if (ret)
+ return ret;
+
+ if (chip->irq) {
+ ret = opt4060_setup_trigger(chip, indio_dev);
+ if (ret)
+ return ret;
+ }
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct i2c_device_id opt4060_id[] = {
+ { "opt4060", },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, opt4060_id);
+
+static const struct of_device_id opt4060_of_match[] = {
+ { .compatible = "ti,opt4060" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, opt4060_of_match);
+
+static struct i2c_driver opt4060_driver = {
+ .driver = {
+ .name = "opt4060",
+ .of_match_table = opt4060_of_match,
+ },
+ .probe = opt4060_probe,
+ .id_table = opt4060_id,
+};
+module_i2c_driver(opt4060_driver);
+
+MODULE_AUTHOR("Per-Daniel Olsson <perdaniel.olsson@axis.com>");
+MODULE_DESCRIPTION("Texas Instruments OPT4060 RGBW color sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/rohm-bu27008.c b/drivers/iio/light/rohm-bu27008.c
deleted file mode 100644
index fa35dd32700c..000000000000
--- a/drivers/iio/light/rohm-bu27008.c
+++ /dev/null
@@ -1,1635 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * ROHM Colour Sensor driver for
- * - BU27008 RGBC sensor
- * - BU27010 RGBC + Flickering sensor
- *
- * Copyright (c) 2023, ROHM Semiconductor.
- */
-
-#include <linux/bitfield.h>
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/property.h>
-#include <linux/regmap.h>
-#include <linux/regulator/consumer.h>
-#include <linux/units.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/iio-gts-helper.h>
-#include <linux/iio/trigger.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
-
-/*
- * A word about register address and mask definitions.
- *
- * At a quick glance to the data-sheet register tables, the BU27010 has all the
- * registers that the BU27008 has. On top of that the BU27010 adds couple of new
- * ones.
- *
- * So, all definitions BU27008_REG_* are there also for BU27010 but none of the
- * BU27010_REG_* are present on BU27008. This makes sense as BU27010 just adds
- * some features (Flicker FIFO, more power control) on top of the BU27008.
- *
- * Unfortunately, some of the wheel has been re-invented. Even though the names
- * of the registers have stayed the same, pretty much all of the functionality
- * provided by the registers has changed place. Contents of all MODE_CONTROL
- * registers on BU27008 and BU27010 are different.
- *
- * Chip-specific mapping from register addresses/bits to functionality is done
- * in bu27_chip_data structures.
- */
-#define BU27008_REG_SYSTEM_CONTROL 0x40
-#define BU27008_MASK_SW_RESET BIT(7)
-#define BU27008_MASK_PART_ID GENMASK(5, 0)
-#define BU27008_ID 0x1a
-#define BU27008_REG_MODE_CONTROL1 0x41
-#define BU27008_MASK_MEAS_MODE GENMASK(2, 0)
-#define BU27008_MASK_CHAN_SEL GENMASK(3, 2)
-
-#define BU27008_REG_MODE_CONTROL2 0x42
-#define BU27008_MASK_RGBC_GAIN GENMASK(7, 3)
-#define BU27008_MASK_IR_GAIN_LO GENMASK(2, 0)
-#define BU27008_SHIFT_IR_GAIN 3
-
-#define BU27008_REG_MODE_CONTROL3 0x43
-#define BU27008_MASK_VALID BIT(7)
-#define BU27008_MASK_INT_EN BIT(1)
-#define BU27008_INT_EN BU27008_MASK_INT_EN
-#define BU27008_INT_DIS 0
-#define BU27008_MASK_MEAS_EN BIT(0)
-#define BU27008_MEAS_EN BIT(0)
-#define BU27008_MEAS_DIS 0
-
-#define BU27008_REG_DATA0_LO 0x50
-#define BU27008_REG_DATA1_LO 0x52
-#define BU27008_REG_DATA2_LO 0x54
-#define BU27008_REG_DATA3_LO 0x56
-#define BU27008_REG_DATA3_HI 0x57
-#define BU27008_REG_MANUFACTURER_ID 0x92
-#define BU27008_REG_MAX BU27008_REG_MANUFACTURER_ID
-
-/* BU27010 specific definitions */
-
-#define BU27010_MASK_SW_RESET BIT(7)
-#define BU27010_ID 0x1b
-#define BU27010_REG_POWER 0x3e
-#define BU27010_MASK_POWER BIT(0)
-
-#define BU27010_REG_RESET 0x3f
-#define BU27010_MASK_RESET BIT(0)
-#define BU27010_RESET_RELEASE BU27010_MASK_RESET
-
-#define BU27010_MASK_MEAS_EN BIT(1)
-
-#define BU27010_MASK_CHAN_SEL GENMASK(7, 6)
-#define BU27010_MASK_MEAS_MODE GENMASK(5, 4)
-#define BU27010_MASK_RGBC_GAIN GENMASK(3, 0)
-
-#define BU27010_MASK_DATA3_GAIN GENMASK(7, 6)
-#define BU27010_MASK_DATA2_GAIN GENMASK(5, 4)
-#define BU27010_MASK_DATA1_GAIN GENMASK(3, 2)
-#define BU27010_MASK_DATA0_GAIN GENMASK(1, 0)
-
-#define BU27010_MASK_FLC_MODE BIT(7)
-#define BU27010_MASK_FLC_GAIN GENMASK(4, 0)
-
-#define BU27010_REG_MODE_CONTROL4 0x44
-/* If flicker is ever to be supported the IRQ must be handled as a field */
-#define BU27010_IRQ_DIS_ALL GENMASK(1, 0)
-#define BU27010_DRDY_EN BIT(0)
-#define BU27010_MASK_INT_SEL GENMASK(1, 0)
-
-#define BU27010_REG_MODE_CONTROL5 0x45
-#define BU27010_MASK_RGB_VALID BIT(7)
-#define BU27010_MASK_FLC_VALID BIT(6)
-#define BU27010_MASK_WAIT_EN BIT(3)
-#define BU27010_MASK_FIFO_EN BIT(2)
-#define BU27010_MASK_RGB_EN BIT(1)
-#define BU27010_MASK_FLC_EN BIT(0)
-
-#define BU27010_REG_DATA_FLICKER_LO 0x56
-#define BU27010_MASK_DATA_FLICKER_HI GENMASK(2, 0)
-#define BU27010_REG_FLICKER_COUNT 0x5a
-#define BU27010_REG_FIFO_LEVEL_LO 0x5b
-#define BU27010_MASK_FIFO_LEVEL_HI BIT(0)
-#define BU27010_REG_FIFO_DATA_LO 0x5d
-#define BU27010_REG_FIFO_DATA_HI 0x5e
-#define BU27010_MASK_FIFO_DATA_HI GENMASK(2, 0)
-#define BU27010_REG_MANUFACTURER_ID 0x92
-#define BU27010_REG_MAX BU27010_REG_MANUFACTURER_ID
-
-/**
- * enum bu27008_chan_type - BU27008 channel types
- * @BU27008_RED: Red channel. Always via data0.
- * @BU27008_GREEN: Green channel. Always via data1.
- * @BU27008_BLUE: Blue channel. Via data2 (when used).
- * @BU27008_CLEAR: Clear channel. Via data2 or data3 (when used).
- * @BU27008_IR: IR channel. Via data3 (when used).
- * @BU27008_LUX: Illuminance channel, computed using RGB and IR.
- * @BU27008_NUM_CHANS: Number of channel types.
- */
-enum bu27008_chan_type {
- BU27008_RED,
- BU27008_GREEN,
- BU27008_BLUE,
- BU27008_CLEAR,
- BU27008_IR,
- BU27008_LUX,
- BU27008_NUM_CHANS
-};
-
-/**
- * enum bu27008_chan - BU27008 physical data channel
- * @BU27008_DATA0: Always red.
- * @BU27008_DATA1: Always green.
- * @BU27008_DATA2: Blue or clear.
- * @BU27008_DATA3: IR or clear.
- * @BU27008_NUM_HW_CHANS: Number of physical channels
- */
-enum bu27008_chan {
- BU27008_DATA0,
- BU27008_DATA1,
- BU27008_DATA2,
- BU27008_DATA3,
- BU27008_NUM_HW_CHANS
-};
-
-/* We can always measure red and green at same time */
-#define ALWAYS_SCANNABLE (BIT(BU27008_RED) | BIT(BU27008_GREEN))
-
-/* We use these data channel configs. Ensure scan_masks below follow them too */
-#define BU27008_BLUE2_CLEAR3 0x0 /* buffer is R, G, B, C */
-#define BU27008_CLEAR2_IR3 0x1 /* buffer is R, G, C, IR */
-#define BU27008_BLUE2_IR3 0x2 /* buffer is R, G, B, IR */
-
-static const unsigned long bu27008_scan_masks[] = {
- /* buffer is R, G, B, C */
- ALWAYS_SCANNABLE | BIT(BU27008_BLUE) | BIT(BU27008_CLEAR),
- /* buffer is R, G, C, IR */
- ALWAYS_SCANNABLE | BIT(BU27008_CLEAR) | BIT(BU27008_IR),
- /* buffer is R, G, B, IR */
- ALWAYS_SCANNABLE | BIT(BU27008_BLUE) | BIT(BU27008_IR),
- /* buffer is R, G, B, IR, LUX */
- ALWAYS_SCANNABLE | BIT(BU27008_BLUE) | BIT(BU27008_IR) | BIT(BU27008_LUX),
- 0
-};
-
-/*
- * Available scales with gain 1x - 1024x, timings 55, 100, 200, 400 mS
- * Time impacts to gain: 1x, 2x, 4x, 8x.
- *
- * => Max total gain is HWGAIN * gain by integration time (8 * 1024) = 8192
- *
- * Max amplification is (HWGAIN * MAX integration-time multiplier) 1024 * 8
- * = 8192. With NANO scale we get rid of accuracy loss when we start with the
- * scale 16.0 for HWGAIN1, INT-TIME 55 mS. This way the nano scale for MAX
- * total gain 8192 will be 1953125
- */
-#define BU27008_SCALE_1X 16
-
-/*
- * On BU27010 available scales with gain 1x - 4096x,
- * timings 55, 100, 200, 400 mS. Time impacts to gain: 1x, 2x, 4x, 8x.
- *
- * => Max total gain is HWGAIN * gain by integration time (8 * 4096)
- *
- * Using NANO precision for scale we must use scale 64x corresponding gain 1x
- * to avoid precision loss.
- */
-#define BU27010_SCALE_1X 64
-
-/* See the data sheet for the "Gain Setting" table */
-#define BU27008_GSEL_1X 0x00
-#define BU27008_GSEL_4X 0x08
-#define BU27008_GSEL_8X 0x09
-#define BU27008_GSEL_16X 0x0a
-#define BU27008_GSEL_32X 0x0b
-#define BU27008_GSEL_64X 0x0c
-#define BU27008_GSEL_256X 0x18
-#define BU27008_GSEL_512X 0x19
-#define BU27008_GSEL_1024X 0x1a
-
-static const struct iio_gain_sel_pair bu27008_gains[] = {
- GAIN_SCALE_GAIN(1, BU27008_GSEL_1X),
- GAIN_SCALE_GAIN(4, BU27008_GSEL_4X),
- GAIN_SCALE_GAIN(8, BU27008_GSEL_8X),
- GAIN_SCALE_GAIN(16, BU27008_GSEL_16X),
- GAIN_SCALE_GAIN(32, BU27008_GSEL_32X),
- GAIN_SCALE_GAIN(64, BU27008_GSEL_64X),
- GAIN_SCALE_GAIN(256, BU27008_GSEL_256X),
- GAIN_SCALE_GAIN(512, BU27008_GSEL_512X),
- GAIN_SCALE_GAIN(1024, BU27008_GSEL_1024X),
-};
-
-static const struct iio_gain_sel_pair bu27008_gains_ir[] = {
- GAIN_SCALE_GAIN(2, BU27008_GSEL_1X),
- GAIN_SCALE_GAIN(4, BU27008_GSEL_4X),
- GAIN_SCALE_GAIN(8, BU27008_GSEL_8X),
- GAIN_SCALE_GAIN(16, BU27008_GSEL_16X),
- GAIN_SCALE_GAIN(32, BU27008_GSEL_32X),
- GAIN_SCALE_GAIN(64, BU27008_GSEL_64X),
- GAIN_SCALE_GAIN(256, BU27008_GSEL_256X),
- GAIN_SCALE_GAIN(512, BU27008_GSEL_512X),
- GAIN_SCALE_GAIN(1024, BU27008_GSEL_1024X),
-};
-
-#define BU27010_GSEL_1X 0x00 /* 000000 */
-#define BU27010_GSEL_4X 0x08 /* 001000 */
-#define BU27010_GSEL_16X 0x09 /* 001001 */
-#define BU27010_GSEL_64X 0x0e /* 001110 */
-#define BU27010_GSEL_256X 0x1e /* 011110 */
-#define BU27010_GSEL_1024X 0x2e /* 101110 */
-#define BU27010_GSEL_4096X 0x3f /* 111111 */
-
-static const struct iio_gain_sel_pair bu27010_gains[] = {
- GAIN_SCALE_GAIN(1, BU27010_GSEL_1X),
- GAIN_SCALE_GAIN(4, BU27010_GSEL_4X),
- GAIN_SCALE_GAIN(16, BU27010_GSEL_16X),
- GAIN_SCALE_GAIN(64, BU27010_GSEL_64X),
- GAIN_SCALE_GAIN(256, BU27010_GSEL_256X),
- GAIN_SCALE_GAIN(1024, BU27010_GSEL_1024X),
- GAIN_SCALE_GAIN(4096, BU27010_GSEL_4096X),
-};
-
-static const struct iio_gain_sel_pair bu27010_gains_ir[] = {
- GAIN_SCALE_GAIN(2, BU27010_GSEL_1X),
- GAIN_SCALE_GAIN(4, BU27010_GSEL_4X),
- GAIN_SCALE_GAIN(16, BU27010_GSEL_16X),
- GAIN_SCALE_GAIN(64, BU27010_GSEL_64X),
- GAIN_SCALE_GAIN(256, BU27010_GSEL_256X),
- GAIN_SCALE_GAIN(1024, BU27010_GSEL_1024X),
- GAIN_SCALE_GAIN(4096, BU27010_GSEL_4096X),
-};
-
-#define BU27008_MEAS_MODE_100MS 0x00
-#define BU27008_MEAS_MODE_55MS 0x01
-#define BU27008_MEAS_MODE_200MS 0x02
-#define BU27008_MEAS_MODE_400MS 0x04
-
-#define BU27010_MEAS_MODE_100MS 0x00
-#define BU27010_MEAS_MODE_55MS 0x03
-#define BU27010_MEAS_MODE_200MS 0x01
-#define BU27010_MEAS_MODE_400MS 0x02
-
-#define BU27008_MEAS_TIME_MAX_MS 400
-
-static const struct iio_itime_sel_mul bu27008_itimes[] = {
- GAIN_SCALE_ITIME_US(400000, BU27008_MEAS_MODE_400MS, 8),
- GAIN_SCALE_ITIME_US(200000, BU27008_MEAS_MODE_200MS, 4),
- GAIN_SCALE_ITIME_US(100000, BU27008_MEAS_MODE_100MS, 2),
- GAIN_SCALE_ITIME_US(55000, BU27008_MEAS_MODE_55MS, 1),
-};
-
-static const struct iio_itime_sel_mul bu27010_itimes[] = {
- GAIN_SCALE_ITIME_US(400000, BU27010_MEAS_MODE_400MS, 8),
- GAIN_SCALE_ITIME_US(200000, BU27010_MEAS_MODE_200MS, 4),
- GAIN_SCALE_ITIME_US(100000, BU27010_MEAS_MODE_100MS, 2),
- GAIN_SCALE_ITIME_US(55000, BU27010_MEAS_MODE_55MS, 1),
-};
-
-/*
- * All the RGBC channels share the same gain.
- * IR gain can be fine-tuned from the gain set for the RGBC by 2 bit, but this
- * would yield quite complex gain setting. Especially since not all bit
- * compinations are supported. And in any case setting GAIN for RGBC will
- * always also change the IR-gain.
- *
- * On top of this, the selector '0' which corresponds to hw-gain 1X on RGBC,
- * corresponds to gain 2X on IR. Rest of the selctors correspond to same gains
- * though. This, however, makes it not possible to use shared gain for all
- * RGBC and IR settings even though they are all changed at the one go.
- */
-#define BU27008_CHAN(color, data, separate_avail) \
-{ \
- .type = IIO_INTENSITY, \
- .modified = 1, \
- .channel2 = IIO_MOD_LIGHT_##color, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_SCALE), \
- .info_mask_separate_available = (separate_avail), \
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME), \
- .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME), \
- .address = BU27008_REG_##data##_LO, \
- .scan_index = BU27008_##color, \
- .scan_type = { \
- .sign = 'u', \
- .realbits = 16, \
- .storagebits = 16, \
- .endianness = IIO_LE, \
- }, \
-}
-
-/* For raw reads we always configure DATA3 for CLEAR */
-static const struct iio_chan_spec bu27008_channels[] = {
- BU27008_CHAN(RED, DATA0, BIT(IIO_CHAN_INFO_SCALE)),
- BU27008_CHAN(GREEN, DATA1, BIT(IIO_CHAN_INFO_SCALE)),
- BU27008_CHAN(BLUE, DATA2, BIT(IIO_CHAN_INFO_SCALE)),
- BU27008_CHAN(CLEAR, DATA2, BIT(IIO_CHAN_INFO_SCALE)),
- /*
- * We don't allow setting scale for IR (because of shared gain bits).
- * Hence we don't advertise available ones either.
- */
- BU27008_CHAN(IR, DATA3, 0),
- {
- .type = IIO_LIGHT,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_SCALE),
- .channel = BU27008_LUX,
- .scan_index = BU27008_LUX,
- .scan_type = {
- .sign = 'u',
- .realbits = 64,
- .storagebits = 64,
- .endianness = IIO_CPU,
- },
- },
- IIO_CHAN_SOFT_TIMESTAMP(BU27008_NUM_CHANS),
-};
-
-struct bu27008_data;
-
-struct bu27_chip_data {
- const char *name;
- int (*chip_init)(struct bu27008_data *data);
- int (*get_gain_sel)(struct bu27008_data *data, int *sel);
- int (*write_gain_sel)(struct bu27008_data *data, int sel);
- const struct regmap_config *regmap_cfg;
- const struct iio_gain_sel_pair *gains;
- const struct iio_gain_sel_pair *gains_ir;
- const struct iio_itime_sel_mul *itimes;
- int num_gains;
- int num_gains_ir;
- int num_itimes;
- int scale1x;
-
- int drdy_en_reg;
- int drdy_en_mask;
- int meas_en_reg;
- int meas_en_mask;
- int valid_reg;
- int chan_sel_reg;
- int chan_sel_mask;
- int int_time_mask;
- u8 part_id;
-};
-
-struct bu27008_data {
- const struct bu27_chip_data *cd;
- struct regmap *regmap;
- struct iio_trigger *trig;
- struct device *dev;
- struct iio_gts gts;
- struct iio_gts gts_ir;
- int irq;
-
- /*
- * Prevent changing gain/time config when scale is read/written.
- * Similarly, protect the integration_time read/change sequence.
- * Prevent changing gain/time when data is read.
- */
- struct mutex mutex;
-};
-
-static const struct regmap_range bu27008_volatile_ranges[] = {
- {
- .range_min = BU27008_REG_SYSTEM_CONTROL, /* SWRESET */
- .range_max = BU27008_REG_SYSTEM_CONTROL,
- }, {
- .range_min = BU27008_REG_MODE_CONTROL3, /* VALID */
- .range_max = BU27008_REG_MODE_CONTROL3,
- }, {
- .range_min = BU27008_REG_DATA0_LO, /* DATA */
- .range_max = BU27008_REG_DATA3_HI,
- },
-};
-
-static const struct regmap_range bu27010_volatile_ranges[] = {
- {
- .range_min = BU27010_REG_RESET, /* RSTB */
- .range_max = BU27008_REG_SYSTEM_CONTROL, /* RESET */
- }, {
- .range_min = BU27010_REG_MODE_CONTROL5, /* VALID bits */
- .range_max = BU27010_REG_MODE_CONTROL5,
- }, {
- .range_min = BU27008_REG_DATA0_LO,
- .range_max = BU27010_REG_FIFO_DATA_HI,
- },
-};
-
-static const struct regmap_access_table bu27008_volatile_regs = {
- .yes_ranges = &bu27008_volatile_ranges[0],
- .n_yes_ranges = ARRAY_SIZE(bu27008_volatile_ranges),
-};
-
-static const struct regmap_access_table bu27010_volatile_regs = {
- .yes_ranges = &bu27010_volatile_ranges[0],
- .n_yes_ranges = ARRAY_SIZE(bu27010_volatile_ranges),
-};
-
-static const struct regmap_range bu27008_read_only_ranges[] = {
- {
- .range_min = BU27008_REG_DATA0_LO,
- .range_max = BU27008_REG_DATA3_HI,
- }, {
- .range_min = BU27008_REG_MANUFACTURER_ID,
- .range_max = BU27008_REG_MANUFACTURER_ID,
- },
-};
-
-static const struct regmap_range bu27010_read_only_ranges[] = {
- {
- .range_min = BU27008_REG_DATA0_LO,
- .range_max = BU27010_REG_FIFO_DATA_HI,
- }, {
- .range_min = BU27010_REG_MANUFACTURER_ID,
- .range_max = BU27010_REG_MANUFACTURER_ID,
- }
-};
-
-static const struct regmap_access_table bu27008_ro_regs = {
- .no_ranges = &bu27008_read_only_ranges[0],
- .n_no_ranges = ARRAY_SIZE(bu27008_read_only_ranges),
-};
-
-static const struct regmap_access_table bu27010_ro_regs = {
- .no_ranges = &bu27010_read_only_ranges[0],
- .n_no_ranges = ARRAY_SIZE(bu27010_read_only_ranges),
-};
-
-static const struct regmap_config bu27008_regmap = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = BU27008_REG_MAX,
- .cache_type = REGCACHE_RBTREE,
- .volatile_table = &bu27008_volatile_regs,
- .wr_table = &bu27008_ro_regs,
- /*
- * All register writes are serialized by the mutex which protects the
- * scale setting/getting. This is needed because scale is combined by
- * gain and integration time settings and we need to ensure those are
- * not read / written when scale is being computed.
- *
- * As a result of this serializing, we don't need regmap locking. Note,
- * this is not true if we add any configurations which are not
- * serialized by the mutex and which may need for example a protected
- * read-modify-write cycle (eg. regmap_update_bits()). Please, revise
- * this when adding features to the driver.
- */
- .disable_locking = true,
-};
-
-static const struct regmap_config bu27010_regmap = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = BU27010_REG_MAX,
- .cache_type = REGCACHE_RBTREE,
- .volatile_table = &bu27010_volatile_regs,
- .wr_table = &bu27010_ro_regs,
- .disable_locking = true,
-};
-
-static int bu27008_write_gain_sel(struct bu27008_data *data, int sel)
-{
- int regval;
-
- regval = FIELD_PREP(BU27008_MASK_RGBC_GAIN, sel);
-
- /*
- * We do always set also the LOW bits of IR-gain because othervice we
- * would risk resulting an invalid GAIN register value.
- *
- * We could allow setting separate gains for RGBC and IR when the
- * values were such that HW could support both gain settings.
- * Eg, when the shared bits were same for both gain values.
- *
- * This, however, has a negligible benefit compared to the increased
- * software complexity when we would need to go through the gains
- * for both channels separately when the integration time changes.
- * This would end up with nasty logic for computing gain values for
- * both channels - and rejecting them if shared bits changed.
- *
- * We should then build the logic by guessing what a user prefers.
- * RGBC or IR gains correctly set while other jumps to odd value?
- * Maybe look-up a value where both gains are somehow optimized
- * <what this somehow is, is ATM unknown to us>. Or maybe user would
- * expect us to reject changes when optimal gains can't be set to both
- * channels w/given integration time. At best that would result
- * solution that works well for a very specific subset of
- * configurations but causes unexpected corner-cases.
- *
- * So, we keep it simple. Always set same selector to IR and RGBC.
- * We disallow setting IR (as I expect that most of the users are
- * interested in RGBC). This way we can show the user that the scales
- * for RGBC and IR channels are different (1X Vs 2X with sel 0) while
- * still keeping the operation deterministic.
- */
- regval |= FIELD_PREP(BU27008_MASK_IR_GAIN_LO, sel);
-
- return regmap_update_bits(data->regmap, BU27008_REG_MODE_CONTROL2,
- BU27008_MASK_RGBC_GAIN, regval);
-}
-
-static int bu27010_write_gain_sel(struct bu27008_data *data, int sel)
-{
- unsigned int regval;
- int ret, chan_selector;
-
- /*
- * Gain 'selector' is composed of two registers. Selector is 6bit value,
- * 4 high bits being the RGBC gain fieild in MODE_CONTROL1 register and
- * two low bits being the channel specific gain in MODE_CONTROL2.
- *
- * Let's take the 4 high bits of whole 6 bit selector, and prepare
- * the MODE_CONTROL1 value (RGBC gain part).
- */
- regval = FIELD_PREP(BU27010_MASK_RGBC_GAIN, (sel >> 2));
-
- ret = regmap_update_bits(data->regmap, BU27008_REG_MODE_CONTROL1,
- BU27010_MASK_RGBC_GAIN, regval);
- if (ret)
- return ret;
-
- /*
- * Two low two bits of the selector must be written for all 4
- * channels in the MODE_CONTROL2 register. Copy these two bits for
- * all channels.
- */
- chan_selector = sel & GENMASK(1, 0);
-
- regval = FIELD_PREP(BU27010_MASK_DATA0_GAIN, chan_selector);
- regval |= FIELD_PREP(BU27010_MASK_DATA1_GAIN, chan_selector);
- regval |= FIELD_PREP(BU27010_MASK_DATA2_GAIN, chan_selector);
- regval |= FIELD_PREP(BU27010_MASK_DATA3_GAIN, chan_selector);
-
- return regmap_write(data->regmap, BU27008_REG_MODE_CONTROL2, regval);
-}
-
-static int bu27008_get_gain_sel(struct bu27008_data *data, int *sel)
-{
- int ret;
-
- /*
- * If we always "lock" the gain selectors for all channels to prevent
- * unsupported configs, then it does not matter which channel is used
- * we can just return selector from any of them.
- *
- * This, however is not true if we decide to support only 4X and 16X
- * and then individual gains for channels. Currently this is not the
- * case.
- *
- * If we some day decide to support individual gains, then we need to
- * have channel information here.
- */
-
- ret = regmap_read(data->regmap, BU27008_REG_MODE_CONTROL2, sel);
- if (ret)
- return ret;
-
- *sel = FIELD_GET(BU27008_MASK_RGBC_GAIN, *sel);
-
- return 0;
-}
-
-static int bu27010_get_gain_sel(struct bu27008_data *data, int *sel)
-{
- int ret, tmp;
-
- /*
- * We always "lock" the gain selectors for all channels to prevent
- * unsupported configs. It does not matter which channel is used
- * we can just return selector from any of them.
- *
- * Read the channel0 gain.
- */
- ret = regmap_read(data->regmap, BU27008_REG_MODE_CONTROL2, sel);
- if (ret)
- return ret;
-
- *sel = FIELD_GET(BU27010_MASK_DATA0_GAIN, *sel);
-
- /* Read the shared gain */
- ret = regmap_read(data->regmap, BU27008_REG_MODE_CONTROL1, &tmp);
- if (ret)
- return ret;
-
- /*
- * The gain selector is made as a combination of common RGBC gain and
- * the channel specific gain. The channel specific gain forms the low
- * bits of selector and RGBC gain is appended right after it.
- *
- * Compose the selector from channel0 gain and shared RGBC gain.
- */
- *sel |= FIELD_GET(BU27010_MASK_RGBC_GAIN, tmp) << fls(BU27010_MASK_DATA0_GAIN);
-
- return ret;
-}
-
-static int bu27008_chip_init(struct bu27008_data *data)
-{
- int ret;
-
- ret = regmap_write_bits(data->regmap, BU27008_REG_SYSTEM_CONTROL,
- BU27008_MASK_SW_RESET, BU27008_MASK_SW_RESET);
- if (ret)
- return dev_err_probe(data->dev, ret, "Sensor reset failed\n");
-
- /*
- * The data-sheet does not tell how long performing the IC reset takes.
- * However, the data-sheet says the minimum time it takes the IC to be
- * able to take inputs after power is applied, is 100 uS. I'd assume
- * > 1 mS is enough.
- */
- msleep(1);
-
- ret = regmap_reinit_cache(data->regmap, data->cd->regmap_cfg);
- if (ret)
- dev_err(data->dev, "Failed to reinit reg cache\n");
-
- return ret;
-}
-
-static int bu27010_chip_init(struct bu27008_data *data)
-{
- int ret;
-
- ret = regmap_write_bits(data->regmap, BU27008_REG_SYSTEM_CONTROL,
- BU27010_MASK_SW_RESET, BU27010_MASK_SW_RESET);
- if (ret)
- return dev_err_probe(data->dev, ret, "Sensor reset failed\n");
-
- msleep(1);
-
- /* Power ON*/
- ret = regmap_write_bits(data->regmap, BU27010_REG_POWER,
- BU27010_MASK_POWER, BU27010_MASK_POWER);
- if (ret)
- return dev_err_probe(data->dev, ret, "Sensor power-on failed\n");
-
- msleep(1);
-
- /* Release blocks from reset */
- ret = regmap_write_bits(data->regmap, BU27010_REG_RESET,
- BU27010_MASK_RESET, BU27010_RESET_RELEASE);
- if (ret)
- return dev_err_probe(data->dev, ret, "Sensor powering failed\n");
-
- msleep(1);
-
- /*
- * The IRQ enabling on BU27010 is done in a peculiar way. The IRQ
- * enabling is not a bit mask where individual IRQs could be enabled but
- * a field which values are:
- * 00 => IRQs disabled
- * 01 => Data-ready (RGBC/IR)
- * 10 => Data-ready (flicker)
- * 11 => Flicker FIFO
- *
- * So, only one IRQ can be enabled at a time and enabling for example
- * flicker FIFO would automagically disable data-ready IRQ.
- *
- * Currently the driver does not support the flicker. Hence, we can
- * just treat the RGBC data-ready as single bit which can be enabled /
- * disabled. This works for as long as the second bit in the field
- * stays zero. Here we ensure it gets zeroed.
- */
- return regmap_clear_bits(data->regmap, BU27010_REG_MODE_CONTROL4,
- BU27010_IRQ_DIS_ALL);
-}
-
-static const struct bu27_chip_data bu27010_chip = {
- .name = "bu27010",
- .chip_init = bu27010_chip_init,
- .get_gain_sel = bu27010_get_gain_sel,
- .write_gain_sel = bu27010_write_gain_sel,
- .regmap_cfg = &bu27010_regmap,
- .gains = &bu27010_gains[0],
- .gains_ir = &bu27010_gains_ir[0],
- .itimes = &bu27010_itimes[0],
- .num_gains = ARRAY_SIZE(bu27010_gains),
- .num_gains_ir = ARRAY_SIZE(bu27010_gains_ir),
- .num_itimes = ARRAY_SIZE(bu27010_itimes),
- .scale1x = BU27010_SCALE_1X,
- .drdy_en_reg = BU27010_REG_MODE_CONTROL4,
- .drdy_en_mask = BU27010_DRDY_EN,
- .meas_en_reg = BU27010_REG_MODE_CONTROL5,
- .meas_en_mask = BU27010_MASK_MEAS_EN,
- .valid_reg = BU27010_REG_MODE_CONTROL5,
- .chan_sel_reg = BU27008_REG_MODE_CONTROL1,
- .chan_sel_mask = BU27010_MASK_CHAN_SEL,
- .int_time_mask = BU27010_MASK_MEAS_MODE,
- .part_id = BU27010_ID,
-};
-
-static const struct bu27_chip_data bu27008_chip = {
- .name = "bu27008",
- .chip_init = bu27008_chip_init,
- .get_gain_sel = bu27008_get_gain_sel,
- .write_gain_sel = bu27008_write_gain_sel,
- .regmap_cfg = &bu27008_regmap,
- .gains = &bu27008_gains[0],
- .gains_ir = &bu27008_gains_ir[0],
- .itimes = &bu27008_itimes[0],
- .num_gains = ARRAY_SIZE(bu27008_gains),
- .num_gains_ir = ARRAY_SIZE(bu27008_gains_ir),
- .num_itimes = ARRAY_SIZE(bu27008_itimes),
- .scale1x = BU27008_SCALE_1X,
- .drdy_en_reg = BU27008_REG_MODE_CONTROL3,
- .drdy_en_mask = BU27008_MASK_INT_EN,
- .valid_reg = BU27008_REG_MODE_CONTROL3,
- .meas_en_reg = BU27008_REG_MODE_CONTROL3,
- .meas_en_mask = BU27008_MASK_MEAS_EN,
- .chan_sel_reg = BU27008_REG_MODE_CONTROL3,
- .chan_sel_mask = BU27008_MASK_CHAN_SEL,
- .int_time_mask = BU27008_MASK_MEAS_MODE,
- .part_id = BU27008_ID,
-};
-
-#define BU27008_MAX_VALID_RESULT_WAIT_US 50000
-#define BU27008_VALID_RESULT_WAIT_QUANTA_US 1000
-
-static int bu27008_chan_read_data(struct bu27008_data *data, int reg, int *val)
-{
- int ret, valid;
- __le16 tmp;
-
- ret = regmap_read_poll_timeout(data->regmap, data->cd->valid_reg,
- valid, (valid & BU27008_MASK_VALID),
- BU27008_VALID_RESULT_WAIT_QUANTA_US,
- BU27008_MAX_VALID_RESULT_WAIT_US);
- if (ret)
- return ret;
-
- ret = regmap_bulk_read(data->regmap, reg, &tmp, sizeof(tmp));
- if (ret)
- dev_err(data->dev, "Reading channel data failed\n");
-
- *val = le16_to_cpu(tmp);
-
- return ret;
-}
-
-static int bu27008_get_gain(struct bu27008_data *data, struct iio_gts *gts, int *gain)
-{
- int ret, sel;
-
- ret = data->cd->get_gain_sel(data, &sel);
- if (ret)
- return ret;
-
- ret = iio_gts_find_gain_by_sel(gts, sel);
- if (ret < 0) {
- dev_err(data->dev, "unknown gain value 0x%x\n", sel);
- return ret;
- }
-
- *gain = ret;
-
- return 0;
-}
-
-static int bu27008_set_gain(struct bu27008_data *data, int gain)
-{
- int ret;
-
- ret = iio_gts_find_sel_by_gain(&data->gts, gain);
- if (ret < 0)
- return ret;
-
- return data->cd->write_gain_sel(data, ret);
-}
-
-static int bu27008_get_int_time_sel(struct bu27008_data *data, int *sel)
-{
- int ret, val;
-
- ret = regmap_read(data->regmap, BU27008_REG_MODE_CONTROL1, &val);
- if (ret)
- return ret;
-
- val &= data->cd->int_time_mask;
- val >>= ffs(data->cd->int_time_mask) - 1;
-
- *sel = val;
-
- return 0;
-}
-
-static int bu27008_set_int_time_sel(struct bu27008_data *data, int sel)
-{
- sel <<= ffs(data->cd->int_time_mask) - 1;
-
- return regmap_update_bits(data->regmap, BU27008_REG_MODE_CONTROL1,
- data->cd->int_time_mask, sel);
-}
-
-static int bu27008_get_int_time_us(struct bu27008_data *data)
-{
- int ret, sel;
-
- ret = bu27008_get_int_time_sel(data, &sel);
- if (ret)
- return ret;
-
- return iio_gts_find_int_time_by_sel(&data->gts, sel);
-}
-
-static int _bu27008_get_scale(struct bu27008_data *data, bool ir, int *val,
- int *val2)
-{
- struct iio_gts *gts;
- int gain, ret;
-
- if (ir)
- gts = &data->gts_ir;
- else
- gts = &data->gts;
-
- ret = bu27008_get_gain(data, gts, &gain);
- if (ret)
- return ret;
-
- ret = bu27008_get_int_time_us(data);
- if (ret < 0)
- return ret;
-
- return iio_gts_get_scale(gts, gain, ret, val, val2);
-}
-
-static int bu27008_get_scale(struct bu27008_data *data, bool ir, int *val,
- int *val2)
-{
- int ret;
-
- mutex_lock(&data->mutex);
- ret = _bu27008_get_scale(data, ir, val, val2);
- mutex_unlock(&data->mutex);
-
- return ret;
-}
-
-static int bu27008_set_int_time(struct bu27008_data *data, int time)
-{
- int ret;
-
- ret = iio_gts_find_sel_by_int_time(&data->gts, time);
- if (ret < 0)
- return ret;
-
- return bu27008_set_int_time_sel(data, ret);
-}
-
-/* Try to change the time so that the scale is maintained */
-static int bu27008_try_set_int_time(struct bu27008_data *data, int int_time_new)
-{
- int ret, old_time_sel, new_time_sel, old_gain, new_gain;
-
- mutex_lock(&data->mutex);
-
- ret = bu27008_get_int_time_sel(data, &old_time_sel);
- if (ret < 0)
- goto unlock_out;
-
- if (!iio_gts_valid_time(&data->gts, int_time_new)) {
- dev_dbg(data->dev, "Unsupported integration time %u\n",
- int_time_new);
-
- ret = -EINVAL;
- goto unlock_out;
- }
-
- /* If we already use requested time, then we're done */
- new_time_sel = iio_gts_find_sel_by_int_time(&data->gts, int_time_new);
- if (new_time_sel == old_time_sel)
- goto unlock_out;
-
- ret = bu27008_get_gain(data, &data->gts, &old_gain);
- if (ret)
- goto unlock_out;
-
- ret = iio_gts_find_new_gain_sel_by_old_gain_time(&data->gts, old_gain,
- old_time_sel, new_time_sel, &new_gain);
- if (ret) {
- int scale1, scale2;
- bool ok;
-
- _bu27008_get_scale(data, false, &scale1, &scale2);
- dev_dbg(data->dev,
- "Can't support time %u with current scale %u %u\n",
- int_time_new, scale1, scale2);
-
- if (new_gain < 0)
- goto unlock_out;
-
- /*
- * If caller requests for integration time change and we
- * can't support the scale - then the caller should be
- * prepared to 'pick up the pieces and deal with the
- * fact that the scale changed'.
- */
- ret = iio_find_closest_gain_low(&data->gts, new_gain, &ok);
- if (!ok)
- dev_dbg(data->dev, "optimal gain out of range\n");
-
- if (ret < 0) {
- dev_dbg(data->dev,
- "Total gain increase. Risk of saturation");
- ret = iio_gts_get_min_gain(&data->gts);
- if (ret < 0)
- goto unlock_out;
- }
- new_gain = ret;
- dev_dbg(data->dev, "scale changed, new gain %u\n", new_gain);
- }
-
- ret = bu27008_set_gain(data, new_gain);
- if (ret)
- goto unlock_out;
-
- ret = bu27008_set_int_time(data, int_time_new);
-
-unlock_out:
- mutex_unlock(&data->mutex);
-
- return ret;
-}
-
-static int bu27008_meas_set(struct bu27008_data *data, bool enable)
-{
- if (enable)
- return regmap_set_bits(data->regmap, data->cd->meas_en_reg,
- data->cd->meas_en_mask);
- return regmap_clear_bits(data->regmap, data->cd->meas_en_reg,
- data->cd->meas_en_mask);
-}
-
-static int bu27008_chan_cfg(struct bu27008_data *data,
- struct iio_chan_spec const *chan)
-{
- int chan_sel;
-
- if (chan->scan_index == BU27008_BLUE)
- chan_sel = BU27008_BLUE2_CLEAR3;
- else
- chan_sel = BU27008_CLEAR2_IR3;
-
- /*
- * prepare bitfield for channel sel. The FIELD_PREP works only when
- * mask is constant. In our case the mask is assigned based on the
- * chip type. Hence the open-coded FIELD_PREP here. We don't bother
- * zeroing the irrelevant bits though - update_bits takes care of that.
- */
- chan_sel <<= ffs(data->cd->chan_sel_mask) - 1;
-
- return regmap_update_bits(data->regmap, data->cd->chan_sel_reg,
- BU27008_MASK_CHAN_SEL, chan_sel);
-}
-
-static int bu27008_read_one(struct bu27008_data *data, struct iio_dev *idev,
- struct iio_chan_spec const *chan, int *val, int *val2)
-{
- int ret, int_time;
-
- ret = bu27008_chan_cfg(data, chan);
- if (ret)
- return ret;
-
- ret = bu27008_meas_set(data, true);
- if (ret)
- return ret;
-
- ret = bu27008_get_int_time_us(data);
- if (ret < 0)
- int_time = BU27008_MEAS_TIME_MAX_MS;
- else
- int_time = ret / USEC_PER_MSEC;
-
- msleep(int_time);
-
- ret = bu27008_chan_read_data(data, chan->address, val);
- if (!ret)
- ret = IIO_VAL_INT;
-
- if (bu27008_meas_set(data, false))
- dev_warn(data->dev, "measurement disabling failed\n");
-
- return ret;
-}
-
-#define BU27008_LUX_DATA_RED 0
-#define BU27008_LUX_DATA_GREEN 1
-#define BU27008_LUX_DATA_BLUE 2
-#define BU27008_LUX_DATA_IR 3
-#define LUX_DATA_SIZE (BU27008_NUM_HW_CHANS * sizeof(__le16))
-
-static int bu27008_read_lux_chans(struct bu27008_data *data, unsigned int time,
- __le16 *chan_data)
-{
- int ret, chan_sel, tmpret, valid;
-
- chan_sel = BU27008_BLUE2_IR3 << (ffs(data->cd->chan_sel_mask) - 1);
-
- ret = regmap_update_bits(data->regmap, data->cd->chan_sel_reg,
- data->cd->chan_sel_mask, chan_sel);
- if (ret)
- return ret;
-
- ret = bu27008_meas_set(data, true);
- if (ret)
- return ret;
-
- msleep(time / USEC_PER_MSEC);
-
- ret = regmap_read_poll_timeout(data->regmap, data->cd->valid_reg,
- valid, (valid & BU27008_MASK_VALID),
- BU27008_VALID_RESULT_WAIT_QUANTA_US,
- BU27008_MAX_VALID_RESULT_WAIT_US);
- if (ret)
- goto out;
-
- ret = regmap_bulk_read(data->regmap, BU27008_REG_DATA0_LO, chan_data,
- LUX_DATA_SIZE);
- if (ret)
- goto out;
-out:
- tmpret = bu27008_meas_set(data, false);
- if (tmpret)
- dev_warn(data->dev, "Stopping measurement failed\n");
-
- return ret;
-}
-
-/*
- * Following equation for computing lux out of register values was given by
- * ROHM HW colleagues;
- *
- * Red = RedData*1024 / Gain * 20 / meas_mode
- * Green = GreenData* 1024 / Gain * 20 / meas_mode
- * Blue = BlueData* 1024 / Gain * 20 / meas_mode
- * IR = IrData* 1024 / Gain * 20 / meas_mode
- *
- * where meas_mode is the integration time in mS / 10
- *
- * IRratio = (IR > 0.18 * Green) ? 0 : 1
- *
- * Lx = max(c1*Red + c2*Green + c3*Blue,0)
- *
- * for
- * IRratio 0: c1 = -0.00002237, c2 = 0.0003219, c3 = -0.000120371
- * IRratio 1: c1 = -0.00001074, c2 = 0.000305415, c3 = -0.000129367
- */
-
-/*
- * The max chan data is 0xffff. When we multiply it by 1024 * 20, we'll get
- * 0x4FFFB000 which still fits in 32-bit integer. This won't overflow.
- */
-#define NORM_CHAN_DATA_FOR_LX_CALC(chan, gain, time) (le16_to_cpu(chan) * \
- 1024 * 20 / (gain) / (time))
-static u64 bu27008_calc_nlux(struct bu27008_data *data, __le16 *lux_data,
- unsigned int gain, unsigned int gain_ir, unsigned int time)
-{
- unsigned int red, green, blue, ir;
- s64 c1, c2, c3, nlux;
-
- time /= 10000;
- ir = NORM_CHAN_DATA_FOR_LX_CALC(lux_data[BU27008_LUX_DATA_IR], gain_ir, time);
- red = NORM_CHAN_DATA_FOR_LX_CALC(lux_data[BU27008_LUX_DATA_RED], gain, time);
- green = NORM_CHAN_DATA_FOR_LX_CALC(lux_data[BU27008_LUX_DATA_GREEN], gain, time);
- blue = NORM_CHAN_DATA_FOR_LX_CALC(lux_data[BU27008_LUX_DATA_BLUE], gain, time);
-
- if ((u64)ir * 100LLU > (u64)green * 18LLU) {
- c1 = -22370;
- c2 = 321900;
- c3 = -120371;
- } else {
- c1 = -10740;
- c2 = 305415;
- c3 = -129367;
- }
- nlux = c1 * red + c2 * green + c3 * blue;
-
- return max_t(s64, 0, nlux);
-}
-
-static int bu27008_get_time_n_gains(struct bu27008_data *data,
- unsigned int *gain, unsigned int *gain_ir, unsigned int *time)
-{
- int ret;
-
- ret = bu27008_get_gain(data, &data->gts, gain);
- if (ret < 0)
- return ret;
-
- ret = bu27008_get_gain(data, &data->gts_ir, gain_ir);
- if (ret < 0)
- return ret;
-
- ret = bu27008_get_int_time_us(data);
- if (ret < 0)
- return ret;
-
- /* Max integration time is 400000. Fits in signed int. */
- *time = ret;
-
- return 0;
-}
-
-struct bu27008_buf {
- __le16 chan[BU27008_NUM_HW_CHANS];
- u64 lux __aligned(8);
- s64 ts __aligned(8);
-};
-
-static int bu27008_buffer_fill_lux(struct bu27008_data *data,
- struct bu27008_buf *raw)
-{
- unsigned int gain, gain_ir, time;
- int ret;
-
- ret = bu27008_get_time_n_gains(data, &gain, &gain_ir, &time);
- if (ret)
- return ret;
-
- raw->lux = bu27008_calc_nlux(data, raw->chan, gain, gain_ir, time);
-
- return 0;
-}
-
-static int bu27008_read_lux(struct bu27008_data *data, struct iio_dev *idev,
- struct iio_chan_spec const *chan,
- int *val, int *val2)
-{
- __le16 lux_data[BU27008_NUM_HW_CHANS];
- unsigned int gain, gain_ir, time;
- u64 nlux;
- int ret;
-
- ret = bu27008_get_time_n_gains(data, &gain, &gain_ir, &time);
- if (ret)
- return ret;
-
- ret = bu27008_read_lux_chans(data, time, lux_data);
- if (ret)
- return ret;
-
- nlux = bu27008_calc_nlux(data, lux_data, gain, gain_ir, time);
- *val = (int)nlux;
- *val2 = nlux >> 32LLU;
-
- return IIO_VAL_INT_64;
-}
-
-static int bu27008_read_raw(struct iio_dev *idev,
- struct iio_chan_spec const *chan,
- int *val, int *val2, long mask)
-{
- struct bu27008_data *data = iio_priv(idev);
- int busy, ret;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- busy = iio_device_claim_direct_mode(idev);
- if (busy)
- return -EBUSY;
-
- mutex_lock(&data->mutex);
- if (chan->type == IIO_LIGHT)
- ret = bu27008_read_lux(data, idev, chan, val, val2);
- else
- ret = bu27008_read_one(data, idev, chan, val, val2);
- mutex_unlock(&data->mutex);
-
- iio_device_release_direct_mode(idev);
-
- return ret;
-
- case IIO_CHAN_INFO_SCALE:
- if (chan->type == IIO_LIGHT) {
- *val = 0;
- *val2 = 1;
- return IIO_VAL_INT_PLUS_NANO;
- }
- ret = bu27008_get_scale(data, chan->scan_index == BU27008_IR,
- val, val2);
- if (ret)
- return ret;
-
- return IIO_VAL_INT_PLUS_NANO;
-
- case IIO_CHAN_INFO_INT_TIME:
- ret = bu27008_get_int_time_us(data);
- if (ret < 0)
- return ret;
-
- *val = 0;
- *val2 = ret;
-
- return IIO_VAL_INT_PLUS_MICRO;
-
- default:
- return -EINVAL;
- }
-}
-
-/* Called if the new scale could not be supported with existing int-time */
-static int bu27008_try_find_new_time_gain(struct bu27008_data *data, int val,
- int val2, int *gain_sel)
-{
- int i, ret, new_time_sel;
-
- for (i = 0; i < data->gts.num_itime; i++) {
- new_time_sel = data->gts.itime_table[i].sel;
- ret = iio_gts_find_gain_sel_for_scale_using_time(&data->gts,
- new_time_sel, val, val2, gain_sel);
- if (!ret)
- break;
- }
- if (i == data->gts.num_itime) {
- dev_err(data->dev, "Can't support scale %u %u\n", val, val2);
-
- return -EINVAL;
- }
-
- return bu27008_set_int_time_sel(data, new_time_sel);
-}
-
-static int bu27008_set_scale(struct bu27008_data *data,
- struct iio_chan_spec const *chan,
- int val, int val2)
-{
- int ret, gain_sel, time_sel;
-
- if (chan->scan_index == BU27008_IR)
- return -EINVAL;
-
- mutex_lock(&data->mutex);
-
- ret = bu27008_get_int_time_sel(data, &time_sel);
- if (ret < 0)
- goto unlock_out;
-
- ret = iio_gts_find_gain_sel_for_scale_using_time(&data->gts, time_sel,
- val, val2, &gain_sel);
- if (ret) {
- ret = bu27008_try_find_new_time_gain(data, val, val2, &gain_sel);
- if (ret)
- goto unlock_out;
-
- }
- ret = data->cd->write_gain_sel(data, gain_sel);
-
-unlock_out:
- mutex_unlock(&data->mutex);
-
- return ret;
-}
-
-static int bu27008_write_raw_get_fmt(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- long mask)
-{
-
- switch (mask) {
- case IIO_CHAN_INFO_SCALE:
- return IIO_VAL_INT_PLUS_NANO;
- case IIO_CHAN_INFO_INT_TIME:
- return IIO_VAL_INT_PLUS_MICRO;
- default:
- return -EINVAL;
- }
-}
-
-static int bu27008_write_raw(struct iio_dev *idev,
- struct iio_chan_spec const *chan,
- int val, int val2, long mask)
-{
- struct bu27008_data *data = iio_priv(idev);
- int ret;
-
- /*
- * Do not allow changing scale when measurement is ongoing as doing so
- * could make values in the buffer inconsistent.
- */
- ret = iio_device_claim_direct_mode(idev);
- if (ret)
- return ret;
-
- switch (mask) {
- case IIO_CHAN_INFO_SCALE:
- ret = bu27008_set_scale(data, chan, val, val2);
- break;
- case IIO_CHAN_INFO_INT_TIME:
- if (val) {
- ret = -EINVAL;
- break;
- }
- ret = bu27008_try_set_int_time(data, val2);
- break;
- default:
- ret = -EINVAL;
- break;
- }
- iio_device_release_direct_mode(idev);
-
- return ret;
-}
-
-static int bu27008_read_avail(struct iio_dev *idev,
- struct iio_chan_spec const *chan, const int **vals,
- int *type, int *length, long mask)
-{
- struct bu27008_data *data = iio_priv(idev);
-
- switch (mask) {
- case IIO_CHAN_INFO_INT_TIME:
- return iio_gts_avail_times(&data->gts, vals, type, length);
- case IIO_CHAN_INFO_SCALE:
- if (chan->channel2 == IIO_MOD_LIGHT_IR)
- return iio_gts_all_avail_scales(&data->gts_ir, vals,
- type, length);
- return iio_gts_all_avail_scales(&data->gts, vals, type, length);
- default:
- return -EINVAL;
- }
-}
-
-static int bu27008_update_scan_mode(struct iio_dev *idev,
- const unsigned long *scan_mask)
-{
- struct bu27008_data *data = iio_priv(idev);
- int chan_sel;
-
- /* Configure channel selection */
- if (test_bit(BU27008_BLUE, idev->active_scan_mask)) {
- if (test_bit(BU27008_CLEAR, idev->active_scan_mask))
- chan_sel = BU27008_BLUE2_CLEAR3;
- else
- chan_sel = BU27008_BLUE2_IR3;
- } else {
- chan_sel = BU27008_CLEAR2_IR3;
- }
-
- chan_sel <<= ffs(data->cd->chan_sel_mask) - 1;
-
- return regmap_update_bits(data->regmap, data->cd->chan_sel_reg,
- data->cd->chan_sel_mask, chan_sel);
-}
-
-static const struct iio_info bu27008_info = {
- .read_raw = &bu27008_read_raw,
- .write_raw = &bu27008_write_raw,
- .write_raw_get_fmt = &bu27008_write_raw_get_fmt,
- .read_avail = &bu27008_read_avail,
- .update_scan_mode = bu27008_update_scan_mode,
- .validate_trigger = iio_validate_own_trigger,
-};
-
-static int bu27008_trigger_set_state(struct iio_trigger *trig, bool state)
-{
- struct bu27008_data *data = iio_trigger_get_drvdata(trig);
- int ret;
-
-
- if (state)
- ret = regmap_set_bits(data->regmap, data->cd->drdy_en_reg,
- data->cd->drdy_en_mask);
- else
- ret = regmap_clear_bits(data->regmap, data->cd->drdy_en_reg,
- data->cd->drdy_en_mask);
- if (ret)
- dev_err(data->dev, "Failed to set trigger state\n");
-
- return ret;
-}
-
-static void bu27008_trigger_reenable(struct iio_trigger *trig)
-{
- struct bu27008_data *data = iio_trigger_get_drvdata(trig);
-
- enable_irq(data->irq);
-}
-
-static const struct iio_trigger_ops bu27008_trigger_ops = {
- .set_trigger_state = bu27008_trigger_set_state,
- .reenable = bu27008_trigger_reenable,
-};
-
-static irqreturn_t bu27008_trigger_handler(int irq, void *p)
-{
- struct iio_poll_func *pf = p;
- struct iio_dev *idev = pf->indio_dev;
- struct bu27008_data *data = iio_priv(idev);
- struct bu27008_buf raw;
- int ret, dummy;
-
- memset(&raw, 0, sizeof(raw));
-
- /*
- * After some measurements, it seems reading the
- * BU27008_REG_MODE_CONTROL3 debounces the IRQ line
- */
- ret = regmap_read(data->regmap, data->cd->valid_reg, &dummy);
- if (ret < 0)
- goto err_read;
-
- ret = regmap_bulk_read(data->regmap, BU27008_REG_DATA0_LO, &raw.chan,
- sizeof(raw.chan));
- if (ret < 0)
- goto err_read;
-
- if (test_bit(BU27008_LUX, idev->active_scan_mask)) {
- ret = bu27008_buffer_fill_lux(data, &raw);
- if (ret)
- goto err_read;
- }
-
- iio_push_to_buffers_with_timestamp(idev, &raw, pf->timestamp);
-err_read:
- iio_trigger_notify_done(idev->trig);
-
- return IRQ_HANDLED;
-}
-
-static int bu27008_buffer_preenable(struct iio_dev *idev)
-{
- struct bu27008_data *data = iio_priv(idev);
-
- return bu27008_meas_set(data, true);
-}
-
-static int bu27008_buffer_postdisable(struct iio_dev *idev)
-{
- struct bu27008_data *data = iio_priv(idev);
-
- return bu27008_meas_set(data, false);
-}
-
-static const struct iio_buffer_setup_ops bu27008_buffer_ops = {
- .preenable = bu27008_buffer_preenable,
- .postdisable = bu27008_buffer_postdisable,
-};
-
-static irqreturn_t bu27008_data_rdy_poll(int irq, void *private)
-{
- /*
- * The BU27008 keeps IRQ asserted until we read the VALID bit from
- * a register. We need to keep the IRQ disabled until then.
- */
- disable_irq_nosync(irq);
- iio_trigger_poll(private);
-
- return IRQ_HANDLED;
-}
-
-static int bu27008_setup_trigger(struct bu27008_data *data, struct iio_dev *idev)
-{
- struct iio_trigger *itrig;
- char *name;
- int ret;
-
- ret = devm_iio_triggered_buffer_setup(data->dev, idev,
- &iio_pollfunc_store_time,
- bu27008_trigger_handler,
- &bu27008_buffer_ops);
- if (ret)
- return dev_err_probe(data->dev, ret,
- "iio_triggered_buffer_setup_ext FAIL\n");
-
- itrig = devm_iio_trigger_alloc(data->dev, "%sdata-rdy-dev%d",
- idev->name, iio_device_id(idev));
- if (!itrig)
- return -ENOMEM;
-
- data->trig = itrig;
-
- itrig->ops = &bu27008_trigger_ops;
- iio_trigger_set_drvdata(itrig, data);
-
- name = devm_kasprintf(data->dev, GFP_KERNEL, "%s-bu27008",
- dev_name(data->dev));
-
- ret = devm_request_irq(data->dev, data->irq,
- &bu27008_data_rdy_poll,
- 0, name, itrig);
- if (ret)
- return dev_err_probe(data->dev, ret, "Could not request IRQ\n");
-
- ret = devm_iio_trigger_register(data->dev, itrig);
- if (ret)
- return dev_err_probe(data->dev, ret,
- "Trigger registration failed\n");
-
- /* set default trigger */
- idev->trig = iio_trigger_get(itrig);
-
- return 0;
-}
-
-static int bu27008_probe(struct i2c_client *i2c)
-{
- struct device *dev = &i2c->dev;
- struct bu27008_data *data;
- struct regmap *regmap;
- unsigned int part_id, reg;
- struct iio_dev *idev;
- int ret;
-
- idev = devm_iio_device_alloc(dev, sizeof(*data));
- if (!idev)
- return -ENOMEM;
-
- ret = devm_regulator_get_enable(dev, "vdd");
- if (ret)
- return dev_err_probe(dev, ret, "Failed to get regulator\n");
-
- data = iio_priv(idev);
-
- data->cd = device_get_match_data(&i2c->dev);
- if (!data->cd)
- return -ENODEV;
-
- regmap = devm_regmap_init_i2c(i2c, data->cd->regmap_cfg);
- if (IS_ERR(regmap))
- return dev_err_probe(dev, PTR_ERR(regmap),
- "Failed to initialize Regmap\n");
-
-
- ret = regmap_read(regmap, BU27008_REG_SYSTEM_CONTROL, &reg);
- if (ret)
- return dev_err_probe(dev, ret, "Failed to access sensor\n");
-
- part_id = FIELD_GET(BU27008_MASK_PART_ID, reg);
-
- if (part_id != data->cd->part_id)
- dev_warn(dev, "unknown device 0x%x\n", part_id);
-
- ret = devm_iio_init_iio_gts(dev, data->cd->scale1x, 0, data->cd->gains,
- data->cd->num_gains, data->cd->itimes,
- data->cd->num_itimes, &data->gts);
- if (ret)
- return ret;
-
- ret = devm_iio_init_iio_gts(dev, data->cd->scale1x, 0, data->cd->gains_ir,
- data->cd->num_gains_ir, data->cd->itimes,
- data->cd->num_itimes, &data->gts_ir);
- if (ret)
- return ret;
-
- mutex_init(&data->mutex);
- data->regmap = regmap;
- data->dev = dev;
- data->irq = i2c->irq;
-
- idev->channels = bu27008_channels;
- idev->num_channels = ARRAY_SIZE(bu27008_channels);
- idev->name = data->cd->name;
- idev->info = &bu27008_info;
- idev->modes = INDIO_DIRECT_MODE;
- idev->available_scan_masks = bu27008_scan_masks;
-
- ret = data->cd->chip_init(data);
- if (ret)
- return ret;
-
- if (i2c->irq) {
- ret = bu27008_setup_trigger(data, idev);
- if (ret)
- return ret;
- } else {
- dev_info(dev, "No IRQ, buffered mode disabled\n");
- }
-
- ret = devm_iio_device_register(dev, idev);
- if (ret)
- return dev_err_probe(dev, ret,
- "Unable to register iio device\n");
-
- return 0;
-}
-
-static const struct of_device_id bu27008_of_match[] = {
- { .compatible = "rohm,bu27008", .data = &bu27008_chip },
- { .compatible = "rohm,bu27010", .data = &bu27010_chip },
- { }
-};
-MODULE_DEVICE_TABLE(of, bu27008_of_match);
-
-static struct i2c_driver bu27008_i2c_driver = {
- .driver = {
- .name = "bu27008",
- .of_match_table = bu27008_of_match,
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
- },
- .probe = bu27008_probe,
-};
-module_i2c_driver(bu27008_i2c_driver);
-
-MODULE_DESCRIPTION("ROHM BU27008 and BU27010 colour sensor driver");
-MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
-MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS("IIO_GTS_HELPER");
diff --git a/drivers/iio/light/rohm-bu27034.c b/drivers/iio/light/rohm-bu27034.c
index 4f591c2278f2..cc25596cb248 100644
--- a/drivers/iio/light/rohm-bu27034.c
+++ b/drivers/iio/light/rohm-bu27034.c
@@ -7,6 +7,7 @@
#include <linux/bitfield.h>
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/module.h>
@@ -205,7 +206,7 @@ struct bu27034_data {
struct {
u32 mlux;
__le16 channels[BU27034_NUM_HW_DATA_CHANS];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
@@ -395,30 +396,26 @@ static int bu27034_try_set_int_time(struct bu27034_data *data, int time_us)
int numg = ARRAY_SIZE(gains);
int ret, int_time_old, i;
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
ret = bu27034_get_int_time(data);
if (ret < 0)
- goto unlock_out;
+ return ret;
int_time_old = ret;
if (!iio_gts_valid_time(&data->gts, time_us)) {
dev_err(data->dev, "Unsupported integration time %u\n",
time_us);
- ret = -EINVAL;
-
- goto unlock_out;
+ return -EINVAL;
}
- if (time_us == int_time_old) {
- ret = 0;
- goto unlock_out;
- }
+ if (time_us == int_time_old)
+ return 0;
for (i = 0; i < numg; i++) {
ret = bu27034_get_gain(data, gains[i].chan, &gains[i].old_gain);
if (ret)
- goto unlock_out;
+ return 0;
ret = iio_gts_find_new_gain_by_old_gain_time(&data->gts,
gains[i].old_gain,
@@ -434,7 +431,7 @@ static int bu27034_try_set_int_time(struct bu27034_data *data, int time_us)
gains[i].chan, time_us, scale1, scale2);
if (gains[i].new_gain < 0)
- goto unlock_out;
+ return ret;
/*
* If caller requests for integration time change and we
@@ -455,7 +452,7 @@ static int bu27034_try_set_int_time(struct bu27034_data *data, int time_us)
"Total gain increase. Risk of saturation");
ret = iio_gts_get_min_gain(&data->gts);
if (ret < 0)
- goto unlock_out;
+ return ret;
}
dev_dbg(data->dev, "chan %u scale changed\n",
gains[i].chan);
@@ -468,15 +465,10 @@ static int bu27034_try_set_int_time(struct bu27034_data *data, int time_us)
for (i = 0; i < numg; i++) {
ret = bu27034_set_gain(data, gains[i].chan, gains[i].new_gain);
if (ret)
- goto unlock_out;
+ return ret;
}
- ret = bu27034_set_int_time(data, time_us);
-
-unlock_out:
- mutex_unlock(&data->mutex);
-
- return ret;
+ return bu27034_set_int_time(data, time_us);
}
static int bu27034_set_scale(struct bu27034_data *data, int chan,
@@ -492,10 +484,10 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan,
return -EINVAL;
}
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
ret = regmap_read(data->regmap, BU27034_REG_MODE_CONTROL1, &time_sel);
if (ret)
- goto unlock_out;
+ return ret;
ret = iio_gts_find_gain_sel_for_scale_using_time(&data->gts, time_sel,
val, val2, &gain_sel);
@@ -518,7 +510,7 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan,
ret = bu27034_get_gain(data, gain.chan, &gain.old_gain);
if (ret)
- goto unlock_out;
+ return ret;
/*
* Iterate through all the times to see if we find one which
@@ -551,26 +543,20 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan,
if (!found) {
dev_dbg(data->dev,
"Can't set scale maintaining other channel\n");
- ret = -EINVAL;
-
- goto unlock_out;
+ return -EINVAL;
}
ret = bu27034_set_gain(data, gain.chan, gain.new_gain);
if (ret)
- goto unlock_out;
+ return ret;
ret = regmap_update_bits(data->regmap, BU27034_REG_MODE_CONTROL1,
BU27034_MASK_MEAS_MODE, new_time_sel);
if (ret)
- goto unlock_out;
+ return ret;
}
- ret = bu27034_write_gain_sel(data, chan, gain_sel);
-unlock_out:
- mutex_unlock(&data->mutex);
-
- return ret;
+ return bu27034_write_gain_sel(data, chan, gain_sel);
}
/*
@@ -1221,42 +1207,33 @@ static int bu27034_buffer_enable(struct iio_dev *idev)
struct task_struct *task;
int ret;
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
ret = bu27034_meas_set(data, true);
if (ret)
- goto unlock_out;
+ return ret;
task = kthread_run(bu27034_buffer_thread, idev,
"bu27034-buffering-%u",
iio_device_id(idev));
- if (IS_ERR(task)) {
- ret = PTR_ERR(task);
- goto unlock_out;
- }
+ if (IS_ERR(task))
+ return PTR_ERR(task);
data->task = task;
-unlock_out:
- mutex_unlock(&data->mutex);
-
- return ret;
+ return 0;
}
static int bu27034_buffer_disable(struct iio_dev *idev)
{
struct bu27034_data *data = iio_priv(idev);
- int ret;
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
if (data->task) {
kthread_stop(data->task);
data->task = NULL;
}
- ret = bu27034_meas_set(data, false);
- mutex_unlock(&data->mutex);
-
- return ret;
+ return bu27034_meas_set(data, false);
}
static const struct iio_buffer_setup_ops bu27034_buffer_ops = {
diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c
index 56f5fbbf79ac..2ba917c5c138 100644
--- a/drivers/iio/light/rpr0521.c
+++ b/drivers/iio/light/rpr0521.c
@@ -203,7 +203,7 @@ struct rpr0521_data {
struct {
__le16 channels[3];
u8 garbage;
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/light/st_uvis25.h b/drivers/iio/light/st_uvis25.h
index 283086887caf..1f93e3dc45c2 100644
--- a/drivers/iio/light/st_uvis25.h
+++ b/drivers/iio/light/st_uvis25.h
@@ -30,7 +30,7 @@ struct st_uvis25_hw {
/* Ensure timestamp is naturally aligned */
struct {
u8 chan;
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
index 4fecdf10aeb1..884e43e4cda4 100644
--- a/drivers/iio/light/tcs3414.c
+++ b/drivers/iio/light/tcs3414.c
@@ -56,7 +56,7 @@ struct tcs3414_data {
/* Ensure timestamp is naturally aligned */
struct {
u16 chans[4];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
};
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index 4186aac04902..2bd36a344ea5 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -67,7 +67,7 @@ struct tcs3472_data {
/* Ensure timestamp is naturally aligned */
struct {
u16 chans[4];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
};
diff --git a/drivers/iio/light/veml3235.c b/drivers/iio/light/veml3235.c
index 66361c3012a3..77c9ae17ed47 100644
--- a/drivers/iio/light/veml3235.c
+++ b/drivers/iio/light/veml3235.c
@@ -11,6 +11,7 @@
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
+#include <linux/iio/iio-gts-helper.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -35,17 +36,33 @@ struct veml3235_data {
struct device *dev;
struct regmap *regmap;
struct veml3235_rf rf;
+ struct iio_gts gts;
};
-static const int veml3235_it_times[][2] = {
- { 0, 50000 },
- { 0, 100000 },
- { 0, 200000 },
- { 0, 400000 },
- { 0, 800000 },
+static const struct iio_itime_sel_mul veml3235_it_sel[] = {
+ GAIN_SCALE_ITIME_US(50000, 0, 1),
+ GAIN_SCALE_ITIME_US(100000, 1, 2),
+ GAIN_SCALE_ITIME_US(200000, 2, 4),
+ GAIN_SCALE_ITIME_US(400000, 3, 8),
+ GAIN_SCALE_ITIME_US(800000, 4, 16),
};
-static const int veml3235_scale_vals[] = { 1, 2, 4, 8 };
+/*
+ * The MSB (DG) doubles the value of the rest of the field, which leads to
+ * two possible combinations to obtain gain = 2 and gain = 4. The gain
+ * handling can be simplified by restricting DG = 1 to the only gain that
+ * really requires it, gain = 8. Note that "X10" is a reserved value.
+ */
+#define VEML3235_SEL_GAIN_X1 0
+#define VEML3235_SEL_GAIN_X2 1
+#define VEML3235_SEL_GAIN_X4 3
+#define VEML3235_SEL_GAIN_X8 7
+static const struct iio_gain_sel_pair veml3235_gain_sel[] = {
+ GAIN_SCALE_GAIN(1, VEML3235_SEL_GAIN_X1),
+ GAIN_SCALE_GAIN(2, VEML3235_SEL_GAIN_X2),
+ GAIN_SCALE_GAIN(4, VEML3235_SEL_GAIN_X4),
+ GAIN_SCALE_GAIN(8, VEML3235_SEL_GAIN_X8),
+};
static int veml3235_power_on(struct veml3235_data *data)
{
@@ -101,42 +118,58 @@ static const struct iio_chan_spec veml3235_channels[] = {
},
};
+static const struct regmap_range veml3235_readable_ranges[] = {
+ regmap_reg_range(VEML3235_REG_CONF, VEML3235_REG_ID),
+};
+
+static const struct regmap_access_table veml3235_readable_table = {
+ .yes_ranges = veml3235_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(veml3235_readable_ranges),
+};
+
+static const struct regmap_range veml3235_writable_ranges[] = {
+ regmap_reg_range(VEML3235_REG_CONF, VEML3235_REG_CONF),
+};
+
+static const struct regmap_access_table veml3235_writable_table = {
+ .yes_ranges = veml3235_writable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(veml3235_writable_ranges),
+};
+
+static const struct regmap_range veml3235_volatile_ranges[] = {
+ regmap_reg_range(VEML3235_REG_WH_DATA, VEML3235_REG_ALS_DATA),
+};
+
+static const struct regmap_access_table veml3235_volatile_table = {
+ .yes_ranges = veml3235_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(veml3235_volatile_ranges),
+};
+
static const struct regmap_config veml3235_regmap_config = {
.name = "veml3235_regmap",
.reg_bits = 8,
.val_bits = 16,
.max_register = VEML3235_REG_ID,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .rd_table = &veml3235_readable_table,
+ .wr_table = &veml3235_writable_table,
+ .volatile_table = &veml3235_volatile_table,
+ .cache_type = REGCACHE_RBTREE,
};
static int veml3235_get_it(struct veml3235_data *data, int *val, int *val2)
{
- int ret, reg;
+ int ret, it_idx;
- ret = regmap_field_read(data->rf.it, &reg);
+ ret = regmap_field_read(data->rf.it, &it_idx);
if (ret)
return ret;
- switch (reg) {
- case 0:
- *val2 = 50000;
- break;
- case 1:
- *val2 = 100000;
- break;
- case 2:
- *val2 = 200000;
- break;
- case 3:
- *val2 = 400000;
- break;
- case 4:
- *val2 = 800000;
- break;
- default:
- return -EINVAL;
- }
+ ret = iio_gts_find_int_time_by_sel(&data->gts, it_idx);
+ if (ret < 0)
+ return ret;
+ *val2 = ret;
*val = 0;
return IIO_VAL_INT_PLUS_MICRO;
@@ -145,78 +178,78 @@ static int veml3235_get_it(struct veml3235_data *data, int *val, int *val2)
static int veml3235_set_it(struct iio_dev *indio_dev, int val, int val2)
{
struct veml3235_data *data = iio_priv(indio_dev);
- int ret, new_it;
+ int ret, gain_idx, it_idx, new_gain, prev_gain, prev_it;
+ bool in_range;
- if (val)
+ if (val || !iio_gts_valid_time(&data->gts, val2))
return -EINVAL;
- switch (val2) {
- case 50000:
- new_it = 0x00;
- break;
- case 100000:
- new_it = 0x01;
- break;
- case 200000:
- new_it = 0x02;
- break;
- case 400000:
- new_it = 0x03;
- break;
- case 800000:
- new_it = 0x04;
- break;
- default:
- return -EINVAL;
- }
+ ret = regmap_field_read(data->rf.it, &it_idx);
+ if (ret)
+ return ret;
- ret = regmap_field_write(data->rf.it, new_it);
- if (ret) {
- dev_err(data->dev,
- "failed to update integration time: %d\n", ret);
+ ret = regmap_field_read(data->rf.gain, &gain_idx);
+ if (ret)
return ret;
- }
- return 0;
+ prev_it = iio_gts_find_int_time_by_sel(&data->gts, it_idx);
+ if (prev_it < 0)
+ return prev_it;
+
+ if (prev_it == val2)
+ return 0;
+
+ prev_gain = iio_gts_find_gain_by_sel(&data->gts, gain_idx);
+ if (prev_gain < 0)
+ return prev_gain;
+
+ ret = iio_gts_find_new_gain_by_gain_time_min(&data->gts, prev_gain, prev_it,
+ val2, &new_gain, &in_range);
+ if (ret)
+ return ret;
+
+ if (!in_range)
+ dev_dbg(data->dev, "Optimal gain out of range\n");
+
+ ret = iio_gts_find_sel_by_int_time(&data->gts, val2);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_field_write(data->rf.it, ret);
+ if (ret)
+ return ret;
+
+ ret = iio_gts_find_sel_by_gain(&data->gts, new_gain);
+ if (ret < 0)
+ return ret;
+
+ return regmap_field_write(data->rf.gain, ret);
}
-static int veml3235_set_gain(struct iio_dev *indio_dev, int val, int val2)
+static int veml3235_set_scale(struct iio_dev *indio_dev, int val, int val2)
{
struct veml3235_data *data = iio_priv(indio_dev);
- int ret, new_gain;
+ int ret, it_idx, gain_sel, time_sel;
- if (val2 != 0)
- return -EINVAL;
+ ret = regmap_field_read(data->rf.it, &it_idx);
+ if (ret)
+ return ret;
- switch (val) {
- case 1:
- new_gain = 0x00;
- break;
- case 2:
- new_gain = 0x01;
- break;
- case 4:
- new_gain = 0x03;
- break;
- case 8:
- new_gain = 0x07;
- break;
- default:
- return -EINVAL;
- }
+ ret = iio_gts_find_gain_time_sel_for_scale(&data->gts, val, val2,
+ &gain_sel, &time_sel);
+ if (ret)
+ return ret;
- ret = regmap_field_write(data->rf.gain, new_gain);
- if (ret) {
- dev_err(data->dev, "failed to set gain: %d\n", ret);
+ ret = regmap_field_write(data->rf.it, time_sel);
+ if (ret)
return ret;
- }
- return 0;
+ return regmap_field_write(data->rf.gain, gain_sel);
}
-static int veml3235_get_gain(struct veml3235_data *data, int *val)
+static int veml3235_get_scale(struct veml3235_data *data, int *val, int *val2)
{
- int ret, reg;
+ int gain, it, reg, ret;
ret = regmap_field_read(data->rf.gain, &reg);
if (ret) {
@@ -224,25 +257,25 @@ static int veml3235_get_gain(struct veml3235_data *data, int *val)
return ret;
}
- switch (reg & 0x03) {
- case 0:
- *val = 1;
- break;
- case 1:
- *val = 2;
- break;
- case 3:
- *val = 4;
- break;
- default:
- return -EINVAL;
+ gain = iio_gts_find_gain_by_sel(&data->gts, reg);
+ if (gain < 0)
+ return gain;
+
+ ret = regmap_field_read(data->rf.it, &reg);
+ if (ret) {
+ dev_err(data->dev, "failed to read integration time %d\n", ret);
+ return ret;
}
- /* Double gain */
- if (reg & 0x04)
- *val *= 2;
+ it = iio_gts_find_int_time_by_sel(&data->gts, reg);
+ if (it < 0)
+ return it;
+
+ ret = iio_gts_get_scale(&data->gts, gain, it, val, val2);
+ if (ret)
+ return ret;
- return IIO_VAL_INT;
+ return IIO_VAL_INT_PLUS_NANO;
}
static int veml3235_read_raw(struct iio_dev *indio_dev,
@@ -276,7 +309,7 @@ static int veml3235_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_INT_TIME:
return veml3235_get_it(data, val, val2);
case IIO_CHAN_INFO_SCALE:
- return veml3235_get_gain(data, val);
+ return veml3235_get_scale(data, val, val2);
default:
return -EINVAL;
}
@@ -287,17 +320,27 @@ static int veml3235_read_avail(struct iio_dev *indio_dev,
const int **vals, int *type, int *length,
long mask)
{
+ struct veml3235_data *data = iio_priv(indio_dev);
+
switch (mask) {
case IIO_CHAN_INFO_INT_TIME:
- *vals = (int *)&veml3235_it_times;
- *length = 2 * ARRAY_SIZE(veml3235_it_times);
- *type = IIO_VAL_INT_PLUS_MICRO;
- return IIO_AVAIL_LIST;
+ return iio_gts_avail_times(&data->gts, vals, type, length);
+ case IIO_CHAN_INFO_SCALE:
+ return iio_gts_all_avail_scales(&data->gts, vals, type, length);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml3235_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
case IIO_CHAN_INFO_SCALE:
- *vals = (int *)&veml3235_scale_vals;
- *length = ARRAY_SIZE(veml3235_scale_vals);
- *type = IIO_VAL_INT;
- return IIO_AVAIL_LIST;
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_INT_TIME:
+ return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
@@ -311,7 +354,7 @@ static int veml3235_write_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_INT_TIME:
return veml3235_set_it(indio_dev, val, val2);
case IIO_CHAN_INFO_SCALE:
- return veml3235_set_gain(indio_dev, val, val2);
+ return veml3235_set_scale(indio_dev, val, val2);
}
return -EINVAL;
@@ -321,7 +364,7 @@ static void veml3235_read_id(struct veml3235_data *data)
{
int ret, reg;
- ret = regmap_field_read(data->rf.id, &reg);
+ ret = regmap_field_read(data->rf.id, &reg);
if (ret) {
dev_info(data->dev, "failed to read ID\n");
return;
@@ -371,6 +414,13 @@ static int veml3235_hw_init(struct iio_dev *indio_dev)
struct device *dev = data->dev;
int ret;
+ ret = devm_iio_init_iio_gts(data->dev, 0, 272640000,
+ veml3235_gain_sel, ARRAY_SIZE(veml3235_gain_sel),
+ veml3235_it_sel, ARRAY_SIZE(veml3235_it_sel),
+ &data->gts);
+ if (ret)
+ return dev_err_probe(data->dev, ret, "failed to init iio gts\n");
+
/* Set gain to 1 and integration time to 100 ms */
ret = regmap_field_write(data->rf.gain, 0x00);
if (ret)
@@ -389,9 +439,10 @@ static int veml3235_hw_init(struct iio_dev *indio_dev)
}
static const struct iio_info veml3235_info = {
- .read_raw = veml3235_read_raw,
- .read_avail = veml3235_read_avail,
+ .read_raw = veml3235_read_raw,
+ .read_avail = veml3235_read_avail,
.write_raw = veml3235_write_raw,
+ .write_raw_get_fmt = veml3235_write_raw_get_fmt,
};
static int veml3235_probe(struct i2c_client *client)
@@ -493,3 +544,4 @@ module_i2c_driver(veml3235_driver);
MODULE_AUTHOR("Javier Carrasco <javier.carrasco.cruz@gmail.com>");
MODULE_DESCRIPTION("VEML3235 Ambient Light Sensor");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_GTS_HELPER");
diff --git a/drivers/iio/light/veml6030.c b/drivers/iio/light/veml6030.c
index ccb43dfd5cf7..9b71825eea9b 100644
--- a/drivers/iio/light/veml6030.c
+++ b/drivers/iio/light/veml6030.c
@@ -28,6 +28,8 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
/* Device registers */
#define VEML6030_REG_ALS_CONF 0x00
@@ -37,6 +39,7 @@
#define VEML6030_REG_ALS_DATA 0x04
#define VEML6030_REG_WH_DATA 0x05
#define VEML6030_REG_ALS_INT 0x06
+#define VEML6030_REG_DATA(ch) (VEML6030_REG_ALS_DATA + (ch))
/* Bit masks for specific functionality */
#define VEML6030_ALS_IT GENMASK(9, 6)
@@ -56,6 +59,12 @@
#define VEML6035_INT_CHAN BIT(3)
#define VEML6035_CHAN_EN BIT(2)
+enum veml6030_scan {
+ VEML6030_SCAN_ALS,
+ VEML6030_SCAN_WH,
+ VEML6030_SCAN_TIMESTAMP,
+};
+
struct veml603x_chip {
const char *name;
const int(*scale_vals)[][2];
@@ -242,6 +251,13 @@ static const struct iio_chan_spec veml6030_channels[] = {
BIT(IIO_CHAN_INFO_SCALE),
.event_spec = veml6030_event_spec,
.num_event_specs = ARRAY_SIZE(veml6030_event_spec),
+ .scan_index = VEML6030_SCAN_ALS,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
},
{
.type = IIO_INTENSITY,
@@ -253,7 +269,15 @@ static const struct iio_chan_spec veml6030_channels[] = {
BIT(IIO_CHAN_INFO_SCALE),
.info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME) |
BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = VEML6030_SCAN_WH,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
},
+ IIO_CHAN_SOFT_TIMESTAMP(VEML6030_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec veml7700_channels[] = {
@@ -266,6 +290,13 @@ static const struct iio_chan_spec veml7700_channels[] = {
BIT(IIO_CHAN_INFO_SCALE),
.info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME) |
BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = VEML6030_SCAN_ALS,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
},
{
.type = IIO_INTENSITY,
@@ -277,7 +308,15 @@ static const struct iio_chan_spec veml7700_channels[] = {
BIT(IIO_CHAN_INFO_SCALE),
.info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME) |
BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = VEML6030_SCAN_WH,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
},
+ IIO_CHAN_SOFT_TIMESTAMP(VEML6030_SCAN_TIMESTAMP),
};
static const struct regmap_config veml6030_regmap_config = {
@@ -889,6 +928,37 @@ static irqreturn_t veml6030_event_handler(int irq, void *private)
return IRQ_HANDLED;
}
+static irqreturn_t veml6030_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *iio = pf->indio_dev;
+ struct veml6030_data *data = iio_priv(iio);
+ unsigned int reg;
+ int ch, ret, i = 0;
+ struct {
+ u16 chans[2];
+ aligned_s64 timestamp;
+ } scan;
+
+ memset(&scan, 0, sizeof(scan));
+
+ iio_for_each_active_channel(iio, ch) {
+ ret = regmap_read(data->regmap, VEML6030_REG_DATA(ch),
+ &reg);
+ if (ret)
+ goto done;
+
+ scan.chans[i++] = reg;
+ }
+
+ iio_push_to_buffers_with_timestamp(iio, &scan, pf->timestamp);
+
+done:
+ iio_trigger_notify_done(iio->trig);
+
+ return IRQ_HANDLED;
+}
+
static int veml6030_set_info(struct iio_dev *indio_dev)
{
struct veml6030_data *data = iio_priv(indio_dev);
@@ -1077,6 +1147,12 @@ static int veml6030_probe(struct i2c_client *client)
if (ret < 0)
return ret;
+ ret = devm_iio_triggered_buffer_setup(&client->dev, indio_dev, NULL,
+ veml6030_trigger_handler, NULL);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to register triggered buffer");
+
return devm_iio_device_register(&client->dev, indio_dev);
}
diff --git a/drivers/iio/magnetometer/af8133j.c b/drivers/iio/magnetometer/af8133j.c
index acd291f3e792..a70bf8a3c73b 100644
--- a/drivers/iio/magnetometer/af8133j.c
+++ b/drivers/iio/magnetometer/af8133j.c
@@ -360,7 +360,7 @@ static irqreturn_t af8133j_trigger_handler(int irq, void *p)
s64 timestamp = iio_get_time_ns(indio_dev);
struct {
__le16 values[3];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} sample;
int ret;
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 8306a18706ac..08975c60e325 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -197,7 +197,7 @@ struct ak8974 {
/* Ensure timestamp is naturally aligned */
struct {
__le16 channels[3];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 18077fb463a9..ef1363126cc2 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -426,7 +426,7 @@ struct ak8975_data {
/* Ensure natural alignment of timestamp */
struct {
s16 channels[3];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index 7f545740178e..88bb673e40d8 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -140,7 +140,7 @@ struct bmc150_magn_data {
/* Ensure timestamp is naturally aligned */
struct {
s32 chans[3];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
struct iio_trigger *dready_trig;
bool dready_trigger_on;
diff --git a/drivers/iio/magnetometer/hmc5843.h b/drivers/iio/magnetometer/hmc5843.h
index 60fbb5431c88..ffd669b1ee7c 100644
--- a/drivers/iio/magnetometer/hmc5843.h
+++ b/drivers/iio/magnetometer/hmc5843.h
@@ -44,7 +44,7 @@ struct hmc5843_data {
struct iio_mount_matrix orientation;
struct {
__be16 chans[3];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
};
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index 5295dc0100e4..2fe8e97f2cf8 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -60,7 +60,7 @@ struct mag3110_data {
struct {
__be16 channels[3];
u8 temperature;
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c
index c55a38650c0d..28012b20c64f 100644
--- a/drivers/iio/magnetometer/yamaha-yas530.c
+++ b/drivers/iio/magnetometer/yamaha-yas530.c
@@ -236,7 +236,7 @@ struct yas5xx {
*/
struct {
s32 channels[4];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/multiplexer/iio-mux.c b/drivers/iio/multiplexer/iio-mux.c
index 2953403bef53..c309d991490c 100644
--- a/drivers/iio/multiplexer/iio-mux.c
+++ b/drivers/iio/multiplexer/iio-mux.c
@@ -7,6 +7,7 @@
* Author: Peter Rosin <peda@axentia.se>
*/
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/iio/consumer.h>
#include <linux/iio/iio.h>
@@ -237,49 +238,18 @@ static ssize_t mux_write_ext_info(struct iio_dev *indio_dev, uintptr_t private,
return ret;
}
-static int mux_configure_channel(struct device *dev, struct mux *mux,
- u32 state, const char *label, int idx)
+static int mux_configure_chan_ext_info(struct device *dev, struct mux *mux,
+ int idx, int num_ext_info)
{
struct mux_child *child = &mux->child[idx];
- struct iio_chan_spec *chan = &mux->chan[idx];
struct iio_chan_spec const *pchan = mux->parent->channel;
- char *page = NULL;
- int num_ext_info;
int i;
int ret;
- chan->indexed = 1;
- chan->output = pchan->output;
- chan->datasheet_name = label;
- chan->ext_info = mux->ext_info;
-
- ret = iio_get_channel_type(mux->parent, &chan->type);
- if (ret < 0) {
- dev_err(dev, "failed to get parent channel type\n");
- return ret;
- }
-
- if (iio_channel_has_info(pchan, IIO_CHAN_INFO_RAW))
- chan->info_mask_separate |= BIT(IIO_CHAN_INFO_RAW);
- if (iio_channel_has_info(pchan, IIO_CHAN_INFO_SCALE))
- chan->info_mask_separate |= BIT(IIO_CHAN_INFO_SCALE);
-
- if (iio_channel_has_available(pchan, IIO_CHAN_INFO_RAW))
- chan->info_mask_separate_available |= BIT(IIO_CHAN_INFO_RAW);
-
- if (state >= mux_control_states(mux->control)) {
- dev_err(dev, "too many channels\n");
- return -EINVAL;
- }
-
- chan->channel = state;
+ char *page __free(kfree) = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
- num_ext_info = iio_get_channel_ext_info_count(mux->parent);
- if (num_ext_info) {
- page = devm_kzalloc(dev, PAGE_SIZE, GFP_KERNEL);
- if (!page)
- return -ENOMEM;
- }
child->ext_info_cache = devm_kcalloc(dev,
num_ext_info,
sizeof(*child->ext_info_cache),
@@ -318,8 +288,46 @@ static int mux_configure_channel(struct device *dev, struct mux *mux,
child->ext_info_cache[i].size = ret;
}
- if (page)
- devm_kfree(dev, page);
+ return 0;
+}
+
+static int mux_configure_channel(struct device *dev, struct mux *mux, u32 state,
+ const char *label, int idx)
+{
+ struct iio_chan_spec *chan = &mux->chan[idx];
+ struct iio_chan_spec const *pchan = mux->parent->channel;
+ int num_ext_info;
+ int ret;
+
+ chan->indexed = 1;
+ chan->output = pchan->output;
+ chan->datasheet_name = label;
+ chan->ext_info = mux->ext_info;
+
+ ret = iio_get_channel_type(mux->parent, &chan->type);
+ if (ret < 0) {
+ dev_err(dev, "failed to get parent channel type\n");
+ return ret;
+ }
+
+ if (iio_channel_has_info(pchan, IIO_CHAN_INFO_RAW))
+ chan->info_mask_separate |= BIT(IIO_CHAN_INFO_RAW);
+ if (iio_channel_has_info(pchan, IIO_CHAN_INFO_SCALE))
+ chan->info_mask_separate |= BIT(IIO_CHAN_INFO_SCALE);
+
+ if (iio_channel_has_available(pchan, IIO_CHAN_INFO_RAW))
+ chan->info_mask_separate_available |= BIT(IIO_CHAN_INFO_RAW);
+
+ if (state >= mux_control_states(mux->control)) {
+ dev_err(dev, "too many channels\n");
+ return -EINVAL;
+ }
+
+ chan->channel = state;
+
+ num_ext_info = iio_get_channel_ext_info_count(mux->parent);
+ if (num_ext_info)
+ return mux_configure_chan_ext_info(dev, mux, idx, num_ext_info);
return 0;
}
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 5376605b69b4..d44ab65c94cb 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -1002,7 +1002,7 @@ static int bmp280_preinit(struct bmp280_data *data)
* after resetting, the device uses the complete power-on sequence so
* it needs to wait for the defined start-up time.
*/
- fsleep(data->start_up_time);
+ fsleep(data->start_up_time_us);
ret = regmap_read(data->regmap, BMP280_REG_STATUS, &reg);
if (ret)
@@ -1161,7 +1161,7 @@ const struct bmp280_chip_info bmp280_chip_info = {
.chip_id = bmp280_chip_ids,
.num_chip_id = ARRAY_SIZE(bmp280_chip_ids),
.regmap_config = &bmp280_regmap_config,
- .start_up_time = 2000,
+ .start_up_time_us = 2000,
.channels = bmp280_channels,
.num_channels = ARRAY_SIZE(bmp280_channels),
.avail_scan_masks = bmp280_avail_scan_masks,
@@ -1347,7 +1347,7 @@ const struct bmp280_chip_info bme280_chip_info = {
.chip_id = bme280_chip_ids,
.num_chip_id = ARRAY_SIZE(bme280_chip_ids),
.regmap_config = &bme280_regmap_config,
- .start_up_time = 2000,
+ .start_up_time_us = 2000,
.channels = bme280_channels,
.num_channels = ARRAY_SIZE(bme280_channels),
.avail_scan_masks = bme280_avail_scan_masks,
@@ -1414,7 +1414,7 @@ static int bmp380_cmd(struct bmp280_data *data, u8 cmd)
return ret;
}
/* Wait for 2ms for command to be processed */
- usleep_range(data->start_up_time, data->start_up_time + 100);
+ fsleep(data->start_up_time_us);
/* Check for command processing error */
ret = regmap_read(data->regmap, BMP380_REG_ERROR, &reg);
if (ret) {
@@ -1806,7 +1806,7 @@ static int bmp380_chip_config(struct bmp280_data *data)
* formula in datasheet section 3.9.2 with an offset of ~+15%
* as it seen as well in table 3.9.1.
*/
- msleep(150);
+ fsleep(150 * USEC_PER_MSEC);
/* Check config error flag */
ret = regmap_read(data->regmap, BMP380_REG_ERROR, &tmp);
@@ -1957,7 +1957,7 @@ const struct bmp280_chip_info bmp380_chip_info = {
.num_chip_id = ARRAY_SIZE(bmp380_chip_ids),
.regmap_config = &bmp380_regmap_config,
.spi_read_extra_byte = true,
- .start_up_time = 2000,
+ .start_up_time_us = 2000,
.channels = bmp380_channels,
.num_channels = ARRAY_SIZE(bmp380_channels),
.avail_scan_masks = bmp280_avail_scan_masks,
@@ -2006,7 +2006,8 @@ static int bmp580_soft_reset(struct bmp280_data *data)
dev_err(data->dev, "failed to send reset command to device\n");
return ret;
}
- usleep_range(2000, 2500);
+ /* From datasheet's table 4: electrical characteristics */
+ fsleep(2000);
/* Dummy read of chip_id */
ret = regmap_read(data->regmap, BMP580_REG_CHIP_ID, &reg);
@@ -2208,7 +2209,7 @@ static int bmp580_nvmem_read_impl(void *priv, unsigned int offset, void *val,
goto exit;
}
/* Wait standby transition time */
- usleep_range(2500, 3000);
+ fsleep(2500);
while (bytes >= sizeof(*dst)) {
addr = bmp580_nvmem_addrs[offset / sizeof(*dst)];
@@ -2274,7 +2275,7 @@ static int bmp580_nvmem_write_impl(void *priv, unsigned int offset, void *val,
goto exit;
}
/* Wait standby transition time */
- usleep_range(2500, 3000);
+ fsleep(2500);
while (bytes >= sizeof(*buf)) {
addr = bmp580_nvmem_addrs[offset / sizeof(*buf)];
@@ -2458,7 +2459,7 @@ static int bmp580_chip_config(struct bmp280_data *data)
return ret;
}
/* From datasheet's table 4: electrical characteristics */
- usleep_range(2500, 3000);
+ fsleep(2500);
/* Set default DSP mode settings */
reg_val = FIELD_PREP(BMP580_DSP_COMP_MASK, BMP580_DSP_PRESS_TEMP_COMP_EN) |
@@ -2649,7 +2650,7 @@ const struct bmp280_chip_info bmp580_chip_info = {
.chip_id = bmp580_chip_ids,
.num_chip_id = ARRAY_SIZE(bmp580_chip_ids),
.regmap_config = &bmp580_regmap_config,
- .start_up_time = 2000,
+ .start_up_time_us = 2000,
.channels = bmp580_channels,
.num_channels = ARRAY_SIZE(bmp580_channels),
.avail_scan_masks = bmp280_avail_scan_masks,
@@ -2720,7 +2721,7 @@ static int bmp180_wait_for_eoc(struct bmp280_data *data, u8 ctrl_meas)
delay_us =
conversion_time_max[data->oversampling_press];
- usleep_range(delay_us, delay_us + 1000);
+ fsleep(delay_us);
}
ret = regmap_read(data->regmap, BMP280_REG_CTRL_MEAS, &ctrl);
@@ -2988,7 +2989,7 @@ const struct bmp280_chip_info bmp180_chip_info = {
.chip_id = bmp180_chip_ids,
.num_chip_id = ARRAY_SIZE(bmp180_chip_ids),
.regmap_config = &bmp180_regmap_config,
- .start_up_time = 2000,
+ .start_up_time_us = 2000,
.channels = bmp280_channels,
.num_channels = ARRAY_SIZE(bmp280_channels),
.avail_scan_masks = bmp280_avail_scan_masks,
@@ -3066,7 +3067,7 @@ const struct bmp280_chip_info bmp085_chip_info = {
.chip_id = bmp180_chip_ids,
.num_chip_id = ARRAY_SIZE(bmp180_chip_ids),
.regmap_config = &bmp180_regmap_config,
- .start_up_time = 2000,
+ .start_up_time_us = 2000,
.channels = bmp280_channels,
.num_channels = ARRAY_SIZE(bmp280_channels),
.avail_scan_masks = bmp280_avail_scan_masks,
@@ -3175,7 +3176,7 @@ int bmp280_common_probe(struct device *dev,
data->oversampling_temp = chip_info->oversampling_temp_default;
data->iir_filter_coeff = chip_info->iir_filter_coeff_default;
data->sampling_freq = chip_info->sampling_freq_default;
- data->start_up_time = chip_info->start_up_time;
+ data->start_up_time_us = chip_info->start_up_time_us;
/* Bring up regulators */
regulator_bulk_set_supply_names(data->supplies,
@@ -3201,7 +3202,7 @@ int bmp280_common_probe(struct device *dev,
return ret;
/* Wait to make sure we started up properly */
- usleep_range(data->start_up_time, data->start_up_time + 100);
+ fsleep(data->start_up_time_us);
/* Bring chip out of reset if there is an assigned GPIO line */
gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
@@ -3287,7 +3288,7 @@ int bmp280_common_probe(struct device *dev,
* Set autosuspend to two orders of magnitude larger than the
* start-up time.
*/
- pm_runtime_set_autosuspend_delay(dev, data->start_up_time / 10);
+ pm_runtime_set_autosuspend_delay(dev, data->start_up_time_us / 10);
pm_runtime_use_autosuspend(dev);
pm_runtime_put(dev);
@@ -3306,7 +3307,7 @@ static int bmp280_runtime_suspend(struct device *dev)
data->chip_info->set_mode(data, BMP280_SLEEP);
- fsleep(data->start_up_time);
+ fsleep(data->start_up_time_us);
return regulator_bulk_disable(BMP280_NUM_SUPPLIES, data->supplies);
}
@@ -3320,7 +3321,7 @@ static int bmp280_runtime_resume(struct device *dev)
if (ret)
return ret;
- usleep_range(data->start_up_time, data->start_up_time + 100);
+ fsleep(data->start_up_time_us);
ret = data->chip_info->chip_config(data);
if (ret)
diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h
index 2df1175b6b85..5b2ee1d0ee46 100644
--- a/drivers/iio/pressure/bmp280.h
+++ b/drivers/iio/pressure/bmp280.h
@@ -434,7 +434,7 @@ struct bmp280_data {
struct bmp380_calib bmp380;
} calib;
struct regulator_bulk_data supplies[BMP280_NUM_SUPPLIES];
- unsigned int start_up_time; /* in microseconds */
+ unsigned int start_up_time_us;
/* log of base 2 of oversampling rate */
u8 oversampling_press;
@@ -470,8 +470,8 @@ struct bmp280_data {
/* Sensor data buffer */
u8 buf[BME280_BURST_READ_BYTES];
/* Calibration data buffers */
- __le16 bmp280_cal_buf[BMP280_CONTIGUOUS_CALIB_REGS / 2];
- __be16 bmp180_cal_buf[BMP180_REG_CALIB_COUNT / 2];
+ __le16 bmp280_cal_buf[BMP280_CONTIGUOUS_CALIB_REGS / sizeof(__le16)];
+ __be16 bmp180_cal_buf[BMP180_REG_CALIB_COUNT / sizeof(__be16)];
u8 bme280_humid_cal_buf[BME280_CONTIGUOUS_CALIB_REGS];
u8 bmp380_cal_buf[BMP380_CALIB_REG_COUNT];
/* Miscellaneous, endianness-aware data buffers */
@@ -490,7 +490,7 @@ struct bmp280_chip_info {
const struct iio_chan_spec *channels;
int num_channels;
- unsigned int start_up_time;
+ unsigned int start_up_time_us;
const unsigned long *avail_scan_masks;
const int *oversampling_temp_avail;
diff --git a/drivers/iio/pressure/hsc030pa.h b/drivers/iio/pressure/hsc030pa.h
index 9b40f46f575f..5db46784f4c6 100644
--- a/drivers/iio/pressure/hsc030pa.h
+++ b/drivers/iio/pressure/hsc030pa.h
@@ -58,7 +58,7 @@ struct hsc_data {
s32 p_offset_dec;
struct {
__be16 chan[2];
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
u8 buffer[HSC_REG_MEASUREMENT_RD_SIZE] __aligned(IIO_DMA_MINALIGN);
};
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index 056c8271c49d..00c077b2a2a4 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -213,7 +213,7 @@ static irqreturn_t ms5611_trigger_handler(int irq, void *p)
/* Ensure buffer elements are naturally aligned */
struct {
s32 channels[2];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
int ret;
diff --git a/drivers/iio/pressure/rohm-bm1390.c b/drivers/iio/pressure/rohm-bm1390.c
index f24d9f927681..9c1197f0e742 100644
--- a/drivers/iio/pressure/rohm-bm1390.c
+++ b/drivers/iio/pressure/rohm-bm1390.c
@@ -8,6 +8,7 @@
#include <linux/bitfield.h>
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/module.h>
@@ -138,7 +139,7 @@ enum {
struct bm1390_data_buf {
u32 pressure;
__be16 temp;
- s64 ts __aligned(8);
+ aligned_s64 ts;
};
/* BM1390 has FIFO for 4 pressure samples */
@@ -263,14 +264,14 @@ static int bm1390_read_data(struct bm1390_data *data,
{
int ret, warn;
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
/*
* We use 'continuous mode' even for raw read because according to the
* data-sheet an one-shot mode can't be used with IIR filter.
*/
ret = bm1390_meas_set(data, BM1390_MEAS_MODE_CONTINUOUS);
if (ret)
- goto unlock_out;
+ return ret;
switch (chan->type) {
case IIO_PRESSURE:
@@ -287,10 +288,8 @@ static int bm1390_read_data(struct bm1390_data *data,
warn = bm1390_meas_set(data, BM1390_MEAS_MODE_STOP);
if (warn)
dev_warn(data->dev, "Failed to stop measurement (%d)\n", warn);
-unlock_out:
- mutex_unlock(&data->mutex);
- return ret;
+ return 0;
}
static int bm1390_read_raw(struct iio_dev *idev,
@@ -543,38 +542,33 @@ static int bm1390_fifo_enable(struct iio_dev *idev)
if (data->irq <= 0)
return -EINVAL;
- mutex_lock(&data->mutex);
- if (data->trigger_enabled) {
- ret = -EBUSY;
- goto unlock_out;
- }
+ guard(mutex)(&data->mutex);
+
+ if (data->trigger_enabled)
+ return -EBUSY;
/* Update watermark to HW */
ret = bm1390_fifo_set_wmi(data);
if (ret)
- goto unlock_out;
+ return ret;
/* Enable WMI_IRQ */
ret = regmap_set_bits(data->regmap, BM1390_REG_MODE_CTRL,
BM1390_MASK_WMI_EN);
if (ret)
- goto unlock_out;
+ return ret;
/* Enable FIFO */
ret = regmap_set_bits(data->regmap, BM1390_REG_FIFO_CTRL,
BM1390_MASK_FIFO_EN);
if (ret)
- goto unlock_out;
+ return ret;
data->state = BM1390_STATE_FIFO;
data->old_timestamp = iio_get_time_ns(idev);
- ret = bm1390_meas_set(data, BM1390_MEAS_MODE_CONTINUOUS);
-unlock_out:
- mutex_unlock(&data->mutex);
-
- return ret;
+ return bm1390_meas_set(data, BM1390_MEAS_MODE_CONTINUOUS);
}
static int bm1390_fifo_disable(struct iio_dev *idev)
@@ -584,27 +578,22 @@ static int bm1390_fifo_disable(struct iio_dev *idev)
msleep(1);
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
ret = bm1390_meas_set(data, BM1390_MEAS_MODE_STOP);
if (ret)
- goto unlock_out;
+ return ret;
/* Disable FIFO */
ret = regmap_clear_bits(data->regmap, BM1390_REG_FIFO_CTRL,
BM1390_MASK_FIFO_EN);
if (ret)
- goto unlock_out;
+ return ret;
data->state = BM1390_STATE_SAMPLE;
/* Disable WMI_IRQ */
- ret = regmap_clear_bits(data->regmap, BM1390_REG_MODE_CTRL,
+ return regmap_clear_bits(data->regmap, BM1390_REG_MODE_CTRL,
BM1390_MASK_WMI_EN);
-
-unlock_out:
- mutex_unlock(&data->mutex);
-
- return ret;
}
static int bm1390_buffer_postenable(struct iio_dev *idev)
@@ -688,25 +677,24 @@ static irqreturn_t bm1390_irq_thread_handler(int irq, void *private)
{
struct iio_dev *idev = private;
struct bm1390_data *data = iio_priv(idev);
- int ret = IRQ_NONE;
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
if (data->trigger_enabled) {
iio_trigger_poll_nested(data->trig);
- ret = IRQ_HANDLED;
- } else if (data->state == BM1390_STATE_FIFO) {
+ return IRQ_HANDLED;
+ }
+
+ if (data->state == BM1390_STATE_FIFO) {
int ok;
ok = __bm1390_fifo_flush(idev, BM1390_FIFO_LENGTH,
data->timestamp);
if (ok > 0)
- ret = IRQ_HANDLED;
+ return IRQ_HANDLED;
}
- mutex_unlock(&data->mutex);
-
- return ret;
+ return IRQ_NONE;
}
static int bm1390_set_drdy_irq(struct bm1390_data *data, bool en)
@@ -722,17 +710,16 @@ static int bm1390_trigger_set_state(struct iio_trigger *trig,
bool state)
{
struct bm1390_data *data = iio_trigger_get_drvdata(trig);
- int ret = 0;
+ int ret;
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
if (data->trigger_enabled == state)
- goto unlock_out;
+ return 0;
if (data->state == BM1390_STATE_FIFO) {
dev_warn(data->dev, "Can't set trigger when FIFO enabled\n");
- ret = -EBUSY;
- goto unlock_out;
+ return -EBUSY;
}
data->trigger_enabled = state;
@@ -740,13 +727,13 @@ static int bm1390_trigger_set_state(struct iio_trigger *trig,
if (state) {
ret = bm1390_meas_set(data, BM1390_MEAS_MODE_CONTINUOUS);
if (ret)
- goto unlock_out;
+ return ret;
} else {
int dummy;
ret = bm1390_meas_set(data, BM1390_MEAS_MODE_STOP);
if (ret)
- goto unlock_out;
+ return ret;
/*
* We need to read the status register in order to ACK the
@@ -758,12 +745,7 @@ static int bm1390_trigger_set_state(struct iio_trigger *trig,
dev_warn(data->dev, "status read failed\n");
}
- ret = bm1390_set_drdy_irq(data, state);
-
-unlock_out:
- mutex_unlock(&data->mutex);
-
- return ret;
+ return bm1390_set_drdy_irq(data, state);
}
static const struct iio_trigger_ops bm1390_trigger_ops = {
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 96fa97451cbf..9d3caf2bef18 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -63,7 +63,7 @@ struct as3935_state {
/* Ensure timestamp is naturally aligned */
struct {
u8 chan;
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
u8 buf[2] __aligned(IIO_DMA_MINALIGN);
};
diff --git a/drivers/iio/proximity/aw96103.c b/drivers/iio/proximity/aw96103.c
index cdd254da9e50..3472a2c36e44 100644
--- a/drivers/iio/proximity/aw96103.c
+++ b/drivers/iio/proximity/aw96103.c
@@ -433,7 +433,7 @@ static int aw96103_write_event_config(struct iio_dev *indio_dev,
state ? BIT(chan->channel) : 0);
}
-static struct iio_info iio_info = {
+static const struct iio_info iio_info = {
.read_raw = aw96103_read_raw,
.read_event_value = aw96103_read_event_val,
.write_event_value = aw96103_write_event_val,
diff --git a/drivers/iio/proximity/hx9023s.c b/drivers/iio/proximity/hx9023s.c
index 4021feb7a7ac..e092a935dbac 100644
--- a/drivers/iio/proximity/hx9023s.c
+++ b/drivers/iio/proximity/hx9023s.c
@@ -14,6 +14,7 @@
#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/errno.h>
+#include <linux/firmware.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/irqreturn.h>
@@ -100,6 +101,17 @@
#define HX9023S_INTERRUPT_MASK GENMASK(9, 0)
#define HX9023S_PROX_DEBOUNCE_MASK GENMASK(3, 0)
+#define FW_VER_OFFSET 2
+#define FW_REG_CNT_OFFSET 3
+#define FW_DATA_OFFSET 16
+
+struct hx9023s_bin {
+ u16 reg_count;
+ u16 fw_size;
+ u8 fw_ver;
+ u8 data[] __counted_by(fw_size);
+};
+
struct hx9023s_ch_data {
s16 raw; /* Raw Data*/
s16 lp; /* Low Pass Filter Data*/
@@ -134,7 +146,7 @@ struct hx9023s_data {
struct {
__le16 channels[HX9023S_CH_NUM];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} buffer;
/*
@@ -998,6 +1010,77 @@ static int hx9023s_id_check(struct iio_dev *indio_dev)
return 0;
}
+static int hx9023s_bin_load(struct hx9023s_data *data, struct hx9023s_bin *bin)
+{
+ u8 *cfg_start = bin->data + FW_DATA_OFFSET;
+ u8 addr, val;
+ u16 i;
+ int ret;
+
+ for (i = 0; i < bin->reg_count; i++) {
+ addr = cfg_start[i * 2];
+ val = cfg_start[i * 2 + 1];
+ ret = regmap_write(data->regmap, addr, val);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hx9023s_send_cfg(const struct firmware *fw, struct hx9023s_data *data)
+{
+ struct hx9023s_bin *bin __free(kfree) =
+ kzalloc(fw->size + sizeof(*bin), GFP_KERNEL);
+ if (!bin)
+ return -ENOMEM;
+
+ memcpy(bin->data, fw->data, fw->size);
+ release_firmware(fw);
+
+ bin->fw_size = fw->size;
+ bin->fw_ver = bin->data[FW_VER_OFFSET];
+ bin->reg_count = get_unaligned_le16(bin->data + FW_REG_CNT_OFFSET);
+
+ return hx9023s_bin_load(data, bin);
+}
+
+static void hx9023s_cfg_update(const struct firmware *fw, void *context)
+{
+ struct hx9023s_data *data = context;
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+
+ if (!fw || !fw->data) {
+ dev_warn(dev, "No firmware\n");
+ goto no_fw;
+ }
+
+ ret = hx9023s_send_cfg(fw, data);
+ if (ret) {
+ dev_warn(dev, "Firmware update failed: %d\n", ret);
+ goto no_fw;
+ }
+
+ ret = regcache_sync(data->regmap);
+ if (ret)
+ dev_err(dev, "regcache sync failed\n");
+
+ return;
+
+no_fw:
+ ret = regmap_multi_reg_write(data->regmap, hx9023s_reg_init_list,
+ ARRAY_SIZE(hx9023s_reg_init_list));
+ if (ret) {
+ dev_err(dev, "Error loading default configuration\n");
+ return;
+ }
+
+ ret = regcache_sync(data->regmap);
+ if (ret)
+ dev_err(dev, "regcache sync failed\n");
+}
+
static int hx9023s_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -1036,18 +1119,14 @@ static int hx9023s_probe(struct i2c_client *client)
indio_dev->modes = INDIO_DIRECT_MODE;
i2c_set_clientdata(client, indio_dev);
- ret = regmap_multi_reg_write(data->regmap, hx9023s_reg_init_list,
- ARRAY_SIZE(hx9023s_reg_init_list));
- if (ret)
- return dev_err_probe(dev, ret, "device init failed\n");
-
ret = hx9023s_ch_cfg(data);
if (ret)
return dev_err_probe(dev, ret, "channel config failed\n");
- ret = regcache_sync(data->regmap);
+ ret = request_firmware_nowait(THIS_MODULE, true, "hx9023s.bin", dev,
+ GFP_KERNEL, data, hx9023s_cfg_update);
if (ret)
- return dev_err_probe(dev, ret, "regcache sync failed\n");
+ return dev_err_probe(dev, ret, "reg config failed\n");
if (client->irq) {
ret = devm_request_threaded_irq(dev, client->irq,
diff --git a/drivers/iio/proximity/mb1232.c b/drivers/iio/proximity/mb1232.c
index 614e65cb9d42..cfc75d001f20 100644
--- a/drivers/iio/proximity/mb1232.c
+++ b/drivers/iio/proximity/mb1232.c
@@ -45,7 +45,7 @@ struct mb1232_data {
/* Ensure correct alignment of data to push to IIO buffer */
struct {
s16 distance;
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 5c959730aecd..f3d054b06b4c 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -47,7 +47,7 @@ struct lidar_data {
/* Ensure timestamp is naturally aligned */
struct {
u16 chan;
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
};
diff --git a/drivers/iio/proximity/srf08.c b/drivers/iio/proximity/srf08.c
index a75ea5042876..86cab113ef3d 100644
--- a/drivers/iio/proximity/srf08.c
+++ b/drivers/iio/proximity/srf08.c
@@ -66,7 +66,7 @@ struct srf08_data {
/* Ensure timestamp is naturally aligned */
struct {
s16 chan;
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
/* Sensor-Type */
diff --git a/drivers/iio/proximity/sx_common.h b/drivers/iio/proximity/sx_common.h
index fb14e6f06a6d..259b5c695233 100644
--- a/drivers/iio/proximity/sx_common.h
+++ b/drivers/iio/proximity/sx_common.h
@@ -125,7 +125,7 @@ struct sx_common_data {
/* Ensure correct alignment of timestamp when present. */
struct {
__be16 channels[SX_COMMON_MAX_NUM_CHANNELS];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} buffer;
unsigned int suspend_ctrl;
diff --git a/drivers/iio/resolver/ad2s1210.c b/drivers/iio/resolver/ad2s1210.c
index a414eef12e5e..b681129a99b6 100644
--- a/drivers/iio/resolver/ad2s1210.c
+++ b/drivers/iio/resolver/ad2s1210.c
@@ -164,7 +164,7 @@ struct ad2s1210_state {
struct {
__be16 chan[2];
/* Ensure timestamp is naturally aligned. */
- s64 timestamp __aligned(8);
+ aligned_s64 timestamp;
} scan;
/** SPI transmit buffer. */
u8 rx[2];
diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
index 02b27f471baa..1998047a1f24 100644
--- a/drivers/iio/temperature/tmp006.c
+++ b/drivers/iio/temperature/tmp006.c
@@ -248,7 +248,7 @@ static irqreturn_t tmp006_trigger_handler(int irq, void *p)
struct tmp006_data *data = iio_priv(indio_dev);
struct {
s16 channels[2];
- s64 ts __aligned(8);
+ aligned_s64 ts;
} scan;
s32 ret;
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index bb60b2d7b2ec..e41cb741253b 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -38,6 +38,9 @@ static const void *triggers_table[][MAX_TRIGGERS] = {
{ TIM15_TRGO,},
{ TIM16_OC1,},
{ TIM17_OC1,},
+ { }, /* timer 18 */
+ { }, /* timer 19 */
+ { TIM20_TRGO, TIM20_TRGO2, TIM20_OC1, TIM20_OC2, TIM20_OC3, },
};
/* List the triggers accepted by each timer */
@@ -119,7 +122,7 @@ static int stm32_timer_start(struct stm32_timer_trigger *priv,
unsigned int frequency)
{
unsigned long long prd, div;
- int prescaler = 0;
+ int prescaler = 0, ret;
u32 ccer;
/* Period and prescaler values depends of clock rate */
@@ -150,10 +153,12 @@ static int stm32_timer_start(struct stm32_timer_trigger *priv,
if (ccer & TIM_CCER_CCXE)
return -EBUSY;
- mutex_lock(&priv->lock);
+ guard(mutex)(&priv->lock);
if (!priv->enabled) {
priv->enabled = true;
- clk_enable(priv->clk);
+ ret = clk_enable(priv->clk);
+ if (ret)
+ return ret;
}
regmap_write(priv->regmap, TIM_PSC, prescaler);
@@ -173,7 +178,6 @@ static int stm32_timer_start(struct stm32_timer_trigger *priv,
/* Enable controller */
regmap_set_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN);
- mutex_unlock(&priv->lock);
return 0;
}
@@ -307,7 +311,7 @@ static ssize_t stm32_tt_store_master_mode(struct device *dev,
struct stm32_timer_trigger *priv = dev_get_drvdata(dev);
struct iio_trigger *trig = to_iio_trigger(dev);
u32 mask, shift, master_mode_max;
- int i;
+ int i, ret;
if (stm32_timer_is_trgo2_name(trig->name)) {
mask = TIM_CR2_MMS2;
@@ -322,15 +326,16 @@ static ssize_t stm32_tt_store_master_mode(struct device *dev,
for (i = 0; i <= master_mode_max; i++) {
if (!strncmp(master_mode_table[i], buf,
strlen(master_mode_table[i]))) {
- mutex_lock(&priv->lock);
+ guard(mutex)(&priv->lock);
if (!priv->enabled) {
/* Clock should be enabled first */
priv->enabled = true;
- clk_enable(priv->clk);
+ ret = clk_enable(priv->clk);
+ if (ret)
+ return ret;
}
regmap_update_bits(priv->regmap, TIM_CR2, mask,
i << shift);
- mutex_unlock(&priv->lock);
return len;
}
}
@@ -482,6 +487,7 @@ static int stm32_counter_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
@@ -491,12 +497,14 @@ static int stm32_counter_write_raw(struct iio_dev *indio_dev,
/* fixed scale */
return -EINVAL;
- case IIO_CHAN_INFO_ENABLE:
- mutex_lock(&priv->lock);
+ case IIO_CHAN_INFO_ENABLE: {
+ guard(mutex)(&priv->lock);
if (val) {
if (!priv->enabled) {
priv->enabled = true;
- clk_enable(priv->clk);
+ ret = clk_enable(priv->clk);
+ if (ret)
+ return ret;
}
regmap_set_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN);
} else {
@@ -506,11 +514,12 @@ static int stm32_counter_write_raw(struct iio_dev *indio_dev,
clk_disable(priv->clk);
}
}
- mutex_unlock(&priv->lock);
+
return 0;
}
-
- return -EINVAL;
+ default:
+ return -EINVAL;
+ }
}
static int stm32_counter_validate_trigger(struct iio_dev *indio_dev,
@@ -602,6 +611,7 @@ static int stm32_set_enable_mode(struct iio_dev *indio_dev,
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
int sms = stm32_enable_mode2sms(mode);
+ int ret;
if (sms < 0)
return sms;
@@ -609,12 +619,15 @@ static int stm32_set_enable_mode(struct iio_dev *indio_dev,
* Triggered mode sets CEN bit automatically by hardware. So, first
* enable counter clock, so it can use it. Keeps it in sync with CEN.
*/
- mutex_lock(&priv->lock);
- if (sms == 6 && !priv->enabled) {
- clk_enable(priv->clk);
- priv->enabled = true;
+ scoped_guard(mutex, &priv->lock) {
+ if (sms == 6 && !priv->enabled) {
+ ret = clk_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ priv->enabled = true;
+ }
}
- mutex_unlock(&priv->lock);
regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms);
@@ -781,7 +794,7 @@ static int stm32_timer_trigger_probe(struct platform_device *pdev)
return -EINVAL;
/* Create an IIO device only if we have triggers to be validated */
- if (*cfg->valids_table[index])
+ if (cfg->valids_table && *cfg->valids_table[index])
priv = stm32_setup_counter_device(dev);
else
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -794,7 +807,8 @@ static int stm32_timer_trigger_probe(struct platform_device *pdev)
priv->clk = ddata->clk;
priv->max_arr = ddata->max_arr;
priv->triggers = triggers_table[index];
- priv->valids = cfg->valids_table[index];
+ if (cfg->valids_table && *cfg->valids_table[index])
+ priv->valids = cfg->valids_table[index];
stm32_timer_detect_trgo2(priv);
mutex_init(&priv->lock);
@@ -886,6 +900,16 @@ static const struct stm32_timer_trigger_cfg stm32h7_timer_trg_cfg = {
.num_valids_table = ARRAY_SIZE(stm32h7_valids_table),
};
+static const struct stm32_timer_trigger_cfg stm32mp25_timer_trg_cfg = {
+ /*
+ * valids_table not used: counter framework is now superseding the deprecated IIO
+ * counter interface (IIO_COUNT), so don't support it. num_valids_table is only
+ * kept here to register the IIO HW triggers. valids_table should be moved at some
+ * point to the stm32-timer-cnt driver instead.
+ */
+ .num_valids_table = ARRAY_SIZE(triggers_table),
+};
+
static const struct of_device_id stm32_trig_of_match[] = {
{
.compatible = "st,stm32-timer-trigger",
@@ -893,6 +917,9 @@ static const struct of_device_id stm32_trig_of_match[] = {
}, {
.compatible = "st,stm32h7-timer-trigger",
.data = (void *)&stm32h7_timer_trg_cfg,
+ }, {
+ .compatible = "st,stm32mp25-timer-trigger",
+ .data = (void *)&stm32mp25_timer_trg_cfg,
},
{ /* end node */ },
};
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index b7c078b7f7cf..f8413f8a9f26 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -1127,41 +1127,6 @@ err:
}
EXPORT_SYMBOL(ib_find_cached_pkey);
-int ib_find_exact_cached_pkey(struct ib_device *device, u32 port_num,
- u16 pkey, u16 *index)
-{
- struct ib_pkey_cache *cache;
- unsigned long flags;
- int i;
- int ret = -ENOENT;
-
- if (!rdma_is_port_valid(device, port_num))
- return -EINVAL;
-
- read_lock_irqsave(&device->cache_lock, flags);
-
- cache = device->port_data[port_num].cache.pkey;
- if (!cache) {
- ret = -EINVAL;
- goto err;
- }
-
- *index = -1;
-
- for (i = 0; i < cache->table_len; ++i)
- if (cache->table[i] == pkey) {
- *index = i;
- ret = 0;
- break;
- }
-
-err:
- read_unlock_irqrestore(&device->cache_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(ib_find_exact_cached_pkey);
-
int ib_get_cached_lmc(struct ib_device *device, u32 port_num, u8 *lmc)
{
unsigned long flags;
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index ca9b956c034d..0ded91f056f3 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -209,23 +209,6 @@ static void __ibdev_printk(const char *level, const struct ib_device *ibdev,
printk("%s(NULL ib_device): %pV", level, vaf);
}
-void ibdev_printk(const char *level, const struct ib_device *ibdev,
- const char *format, ...)
-{
- struct va_format vaf;
- va_list args;
-
- va_start(args, format);
-
- vaf.fmt = format;
- vaf.va = &args;
-
- __ibdev_printk(level, ibdev, &vaf);
-
- va_end(args);
-}
-EXPORT_SYMBOL(ibdev_printk);
-
#define define_ibdev_printk_level(func, level) \
void func(const struct ib_device *ibdev, const char *fmt, ...) \
{ \
@@ -2296,6 +2279,33 @@ struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
EXPORT_SYMBOL(ib_device_get_netdev);
/**
+ * ib_query_netdev_port - Query the port number of a net_device
+ * associated with an ibdev
+ * @ibdev: IB device
+ * @ndev: Network device
+ * @port: IB port the net_device is connected to
+ */
+int ib_query_netdev_port(struct ib_device *ibdev, struct net_device *ndev,
+ u32 *port)
+{
+ struct net_device *ib_ndev;
+ u32 port_num;
+
+ rdma_for_each_port(ibdev, port_num) {
+ ib_ndev = ib_device_get_netdev(ibdev, port_num);
+ if (ndev == ib_ndev) {
+ *port = port_num;
+ dev_put(ib_ndev);
+ return 0;
+ }
+ dev_put(ib_ndev);
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(ib_query_netdev_port);
+
+/**
* ib_device_get_by_netdev - Find an IB device associated with a netdev
* @ndev: netdev to locate
* @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
@@ -2761,6 +2771,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, set_vf_guid);
SET_DEVICE_OP(dev_ops, set_vf_link_state);
SET_DEVICE_OP(dev_ops, ufile_hw_cleanup);
+ SET_DEVICE_OP(dev_ops, report_port_event);
SET_OBJ_SIZE(dev_ops, ib_ah);
SET_OBJ_SIZE(dev_ops, ib_counters);
@@ -2854,11 +2865,62 @@ static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
},
};
+void ib_dispatch_port_state_event(struct ib_device *ibdev, struct net_device *ndev)
+{
+ enum ib_port_state curr_state;
+ struct ib_event ibevent = {};
+ u32 port;
+
+ if (ib_query_netdev_port(ibdev, ndev, &port))
+ return;
+
+ curr_state = ib_get_curr_port_state(ndev);
+
+ write_lock_irq(&ibdev->cache_lock);
+ if (ibdev->port_data[port].cache.last_port_state == curr_state) {
+ write_unlock_irq(&ibdev->cache_lock);
+ return;
+ }
+ ibdev->port_data[port].cache.last_port_state = curr_state;
+ write_unlock_irq(&ibdev->cache_lock);
+
+ ibevent.event = (curr_state == IB_PORT_DOWN) ?
+ IB_EVENT_PORT_ERR : IB_EVENT_PORT_ACTIVE;
+ ibevent.device = ibdev;
+ ibevent.element.port_num = port;
+ ib_dispatch_event(&ibevent);
+}
+EXPORT_SYMBOL(ib_dispatch_port_state_event);
+
+static void handle_port_event(struct net_device *ndev, unsigned long event)
+{
+ struct ib_device *ibdev;
+
+ /* Currently, link events in bonding scenarios are still
+ * reported by drivers that support bonding.
+ */
+ if (netif_is_lag_master(ndev) || netif_is_lag_port(ndev))
+ return;
+
+ ibdev = ib_device_get_by_netdev(ndev, RDMA_DRIVER_UNKNOWN);
+ if (!ibdev)
+ return;
+
+ if (ibdev->ops.report_port_event) {
+ ibdev->ops.report_port_event(ibdev, ndev, event);
+ goto put_ibdev;
+ }
+
+ ib_dispatch_port_state_event(ibdev, ndev);
+
+put_ibdev:
+ ib_device_put(ibdev);
+};
+
static int ib_netdevice_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
- struct net_device *ib_ndev;
struct ib_device *ibdev;
u32 port;
@@ -2868,15 +2930,21 @@ static int ib_netdevice_event(struct notifier_block *this,
if (!ibdev)
return NOTIFY_DONE;
- rdma_for_each_port(ibdev, port) {
- ib_ndev = ib_device_get_netdev(ibdev, port);
- if (ndev == ib_ndev)
- rdma_nl_notify_event(ibdev, port,
- RDMA_NETDEV_RENAME_EVENT);
- dev_put(ib_ndev);
+ if (ib_query_netdev_port(ibdev, ndev, &port)) {
+ ib_device_put(ibdev);
+ break;
}
+
+ rdma_nl_notify_event(ibdev, port, RDMA_NETDEV_RENAME_EVENT);
ib_device_put(ibdev);
break;
+
+ case NETDEV_UP:
+ case NETDEV_CHANGE:
+ case NETDEV_DOWN:
+ handle_port_event(ndev, event);
+ break;
+
default:
break;
}
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index 64d9c492de64..8d3dfef9ebaa 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -462,86 +462,3 @@ int ib_ud_header_pack(struct ib_ud_header *header,
return len;
}
EXPORT_SYMBOL(ib_ud_header_pack);
-
-/**
- * ib_ud_header_unpack - Unpack UD header struct from wire format
- * @header:UD header struct
- * @buf:Buffer to pack into
- *
- * ib_ud_header_pack() unpacks the UD header structure @header from wire
- * format in the buffer @buf.
- */
-int ib_ud_header_unpack(void *buf,
- struct ib_ud_header *header)
-{
- ib_unpack(lrh_table, ARRAY_SIZE(lrh_table),
- buf, &header->lrh);
- buf += IB_LRH_BYTES;
-
- if (header->lrh.link_version != 0) {
- pr_warn("Invalid LRH.link_version %u\n",
- header->lrh.link_version);
- return -EINVAL;
- }
-
- switch (header->lrh.link_next_header) {
- case IB_LNH_IBA_LOCAL:
- header->grh_present = 0;
- break;
-
- case IB_LNH_IBA_GLOBAL:
- header->grh_present = 1;
- ib_unpack(grh_table, ARRAY_SIZE(grh_table),
- buf, &header->grh);
- buf += IB_GRH_BYTES;
-
- if (header->grh.ip_version != 6) {
- pr_warn("Invalid GRH.ip_version %u\n",
- header->grh.ip_version);
- return -EINVAL;
- }
- if (header->grh.next_header != 0x1b) {
- pr_warn("Invalid GRH.next_header 0x%02x\n",
- header->grh.next_header);
- return -EINVAL;
- }
- break;
-
- default:
- pr_warn("Invalid LRH.link_next_header %u\n",
- header->lrh.link_next_header);
- return -EINVAL;
- }
-
- ib_unpack(bth_table, ARRAY_SIZE(bth_table),
- buf, &header->bth);
- buf += IB_BTH_BYTES;
-
- switch (header->bth.opcode) {
- case IB_OPCODE_UD_SEND_ONLY:
- header->immediate_present = 0;
- break;
- case IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE:
- header->immediate_present = 1;
- break;
- default:
- pr_warn("Invalid BTH.opcode 0x%02x\n", header->bth.opcode);
- return -EINVAL;
- }
-
- if (header->bth.transport_header_version != 0) {
- pr_warn("Invalid BTH.transport_header_version %u\n",
- header->bth.transport_header_version);
- return -EINVAL;
- }
-
- ib_unpack(deth_table, ARRAY_SIZE(deth_table),
- buf, &header->deth);
- buf += IB_DETH_BYTES;
-
- if (header->immediate_present)
- memcpy(&header->immediate_data, buf, sizeof header->immediate_data);
-
- return 0;
-}
-EXPORT_SYMBOL(ib_ud_header_unpack);
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
index 11a080646916..e803f609ec87 100644
--- a/drivers/infiniband/core/uverbs_marshall.c
+++ b/drivers/infiniband/core/uverbs_marshall.c
@@ -171,45 +171,3 @@ void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
__ib_copy_path_rec_to_user(dst, src);
}
EXPORT_SYMBOL(ib_copy_path_rec_to_user);
-
-void ib_copy_path_rec_from_user(struct sa_path_rec *dst,
- struct ib_user_path_rec *src)
-{
- u32 slid, dlid;
-
- memset(dst, 0, sizeof(*dst));
- if ((ib_is_opa_gid((union ib_gid *)src->sgid)) ||
- (ib_is_opa_gid((union ib_gid *)src->dgid))) {
- dst->rec_type = SA_PATH_REC_TYPE_OPA;
- slid = opa_get_lid_from_gid((union ib_gid *)src->sgid);
- dlid = opa_get_lid_from_gid((union ib_gid *)src->dgid);
- } else {
- dst->rec_type = SA_PATH_REC_TYPE_IB;
- slid = ntohs(src->slid);
- dlid = ntohs(src->dlid);
- }
- memcpy(dst->dgid.raw, src->dgid, sizeof dst->dgid);
- memcpy(dst->sgid.raw, src->sgid, sizeof dst->sgid);
-
- sa_path_set_dlid(dst, dlid);
- sa_path_set_slid(dst, slid);
- sa_path_set_raw_traffic(dst, src->raw_traffic);
- dst->flow_label = src->flow_label;
- dst->hop_limit = src->hop_limit;
- dst->traffic_class = src->traffic_class;
- dst->reversible = src->reversible;
- dst->numb_path = src->numb_path;
- dst->pkey = src->pkey;
- dst->sl = src->sl;
- dst->mtu_selector = src->mtu_selector;
- dst->mtu = src->mtu;
- dst->rate_selector = src->rate_selector;
- dst->rate = src->rate;
- dst->packet_life_time = src->packet_life_time;
- dst->preference = src->preference;
- dst->packet_life_time_selector = src->packet_life_time_selector;
-
- /* TODO: No need to set this */
- sa_path_set_dmac_zero(dst);
-}
-EXPORT_SYMBOL(ib_copy_path_rec_from_user);
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index 1211f4317a9f..aba96ca9bce5 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/
obj-$(CONFIG_INFINIBAND_VMWARE_PVRDMA) += vmw_pvrdma/
obj-$(CONFIG_INFINIBAND_USNIC) += usnic/
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
-obj-$(CONFIG_INFINIBAND_HNS) += hns/
+obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns/
obj-$(CONFIG_INFINIBAND_QEDR) += qedr/
obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re/
obj-$(CONFIG_INFINIBAND_ERDMA) += erdma/
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 2975b11b79bf..b91a85a491d0 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -204,7 +204,7 @@ struct bnxt_re_dev {
struct bnxt_re_nq_record *nqr;
/* Device Resources */
- struct bnxt_qplib_dev_attr dev_attr;
+ struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_ctx qplib_ctx;
struct bnxt_qplib_res qplib_res;
struct bnxt_qplib_dpi dpi_privileged;
@@ -229,6 +229,9 @@ struct bnxt_re_dev {
DECLARE_HASHTABLE(srq_hash, MAX_SRQ_HASH_BITS);
struct dentry *dbg_root;
struct dentry *qp_debugfs;
+ unsigned long event_bitmap;
+ struct bnxt_qplib_cc_param cc_param;
+ struct workqueue_struct *dcb_wq;
};
#define to_bnxt_re_dev(ptr, member) \
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 1e63f8091748..3ac47f4e6122 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -37,18 +37,9 @@
*
*/
-#include <linux/interrupt.h>
#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
#include <linux/pci.h>
-#include <linux/prefetch.h>
-#include <linux/delay.h>
-#include <rdma/ib_addr.h>
-
-#include "bnxt_ulp.h"
#include "roce_hsi.h"
#include "qplib_res.h"
#include "qplib_sp.h"
@@ -357,7 +348,7 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
goto done;
}
bnxt_re_copy_err_stats(rdev, stats, err_s);
- if (_is_ext_stats_supported(rdev->dev_attr.dev_cap_flags) &&
+ if (_is_ext_stats_supported(rdev->dev_attr->dev_cap_flags) &&
!rdev->is_virtfn) {
rc = bnxt_re_get_ext_stat(rdev, stats);
if (rc) {
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index e3d26bd6de05..2de101d6e825 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -52,8 +52,6 @@
#include <rdma/uverbs_ioctl.h>
#include <linux/hashtable.h>
-#include "bnxt_ulp.h"
-
#include "roce_hsi.h"
#include "qplib_res.h"
#include "qplib_sp.h"
@@ -161,7 +159,7 @@ static int __qp_access_flags_to_ib(struct bnxt_qplib_chip_ctx *cctx, u8 qflags)
static void bnxt_re_check_and_set_relaxed_ordering(struct bnxt_re_dev *rdev,
struct bnxt_qplib_mrw *qplib_mr)
{
- if (_is_relaxed_ordering_supported(rdev->dev_attr.dev_cap_flags2) &&
+ if (_is_relaxed_ordering_supported(rdev->dev_attr->dev_cap_flags2) &&
pcie_relaxed_ordering_enabled(rdev->en_dev->pdev))
qplib_mr->flags |= CMDQ_REGISTER_MR_FLAGS_ENABLE_RO;
}
@@ -186,7 +184,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_udata *udata)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
memset(ib_attr, 0, sizeof(*ib_attr));
memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
@@ -275,7 +273,7 @@ int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
struct ib_port_attr *port_attr)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
int rc;
memset(port_attr, 0, sizeof(*port_attr));
@@ -333,8 +331,8 @@ void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
- rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
- rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
+ rdev->dev_attr->fw_ver[0], rdev->dev_attr->fw_ver[1],
+ rdev->dev_attr->fw_ver[2], rdev->dev_attr->fw_ver[3]);
}
int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
@@ -585,7 +583,7 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
mr->qplib_mr.pd = &pd->qplib_pd;
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
- if (!_is_alloc_mr_unified(rdev->dev_attr.dev_cap_flags)) {
+ if (!_is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags)) {
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
@@ -1057,7 +1055,7 @@ static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
sq = &qplqp->sq;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
align = sizeof(struct sq_send_hdr);
ilsize = ALIGN(init_attr->cap.max_inline_data, align);
@@ -1277,7 +1275,7 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
rq = &qplqp->rq;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
if (init_attr->srq) {
struct bnxt_re_srq *srq;
@@ -1314,7 +1312,7 @@ static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
qplqp->rq.max_sge = dev_attr->max_qp_sges;
@@ -1340,7 +1338,7 @@ static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
sq = &qplqp->sq;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
sq->max_sge = init_attr->cap.max_send_sge;
entries = init_attr->cap.max_send_wr;
@@ -1393,7 +1391,7 @@ static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
entries = bnxt_re_init_depth(init_attr->cap.max_send_wr + 1, uctx);
@@ -1442,7 +1440,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
/* Setup misc params */
ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
@@ -1612,7 +1610,7 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
ib_pd = ib_qp->pd;
pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
rdev = pd->rdev;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
@@ -1840,7 +1838,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
ib_pd = ib_srq->pd;
pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
rdev = pd->rdev;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
@@ -2044,7 +2042,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
{
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_re_dev *rdev = qp->rdev;
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
enum ib_qp_state curr_qp_state, new_qp_state;
int rc, entries;
unsigned int flags;
@@ -3091,7 +3089,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata = &attrs->driver_udata;
struct bnxt_re_ucontext *uctx =
rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
struct bnxt_qplib_chip_ctx *cctx;
int cqe = attr->cqe;
int rc, entries;
@@ -3226,7 +3224,7 @@ int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
rdev = cq->rdev;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
if (!ibcq->uobject) {
ibdev_err(&rdev->ibdev, "Kernel CQ Resize not supported");
return -EOPNOTSUPP;
@@ -4199,7 +4197,7 @@ static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64
mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
- if (!_is_alloc_mr_unified(rdev->dev_attr.dev_cap_flags)) {
+ if (!_is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags)) {
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
@@ -4291,7 +4289,7 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
struct bnxt_re_ucontext *uctx =
container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
struct bnxt_re_user_mmap_entry *entry;
struct bnxt_re_uctx_resp resp = {};
struct bnxt_re_uctx_req ureq = {};
@@ -4467,9 +4465,10 @@ int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
case BNXT_RE_MMAP_TOGGLE_PAGE:
/* Driver doesn't expect write access for user space */
if (vma->vm_flags & VM_WRITE)
- return -EFAULT;
- ret = vm_insert_page(vma, vma->vm_start,
- virt_to_page((void *)bnxt_entry->mem_offset));
+ ret = -EFAULT;
+ else
+ ret = vm_insert_page(vma, vma->vm_start,
+ virt_to_page((void *)bnxt_entry->mem_offset));
break;
default:
ret = -EINVAL;
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index c143f273b759..e9e4da4dd576 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -79,17 +79,12 @@ MODULE_LICENSE("Dual BSD/GPL");
/* globals */
static DEFINE_MUTEX(bnxt_re_mutex);
-static void bnxt_re_stop_irq(void *handle);
-static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev);
-static int bnxt_re_netdev_event(struct notifier_block *notifier,
- unsigned long event, void *ptr);
-static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev);
-static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type);
static int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev);
static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
u32 *offset);
-static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable);
+static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp,
+ u8 port_num, enum ib_event_type event);
static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
{
struct bnxt_qplib_chip_ctx *cctx;
@@ -153,6 +148,10 @@ static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
if (!rdev->chip_ctx)
return;
+
+ kfree(rdev->dev_attr);
+ rdev->dev_attr = NULL;
+
chip_ctx = rdev->chip_ctx;
rdev->chip_ctx = NULL;
rdev->rcfw.res = NULL;
@@ -166,7 +165,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
{
struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev;
- int rc;
+ int rc = -ENOMEM;
en_dev = rdev->en_dev;
@@ -182,7 +181,10 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
rdev->qplib_res.cctx = rdev->chip_ctx;
rdev->rcfw.res = &rdev->qplib_res;
- rdev->qplib_res.dattr = &rdev->dev_attr;
+ rdev->dev_attr = kzalloc(sizeof(*rdev->dev_attr), GFP_KERNEL);
+ if (!rdev->dev_attr)
+ goto free_chip_ctx;
+ rdev->qplib_res.dattr = rdev->dev_attr;
rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev);
rdev->qplib_res.en_dev = en_dev;
@@ -190,16 +192,20 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
bnxt_re_set_db_offset(rdev);
rc = bnxt_qplib_map_db_bar(&rdev->qplib_res);
- if (rc) {
- kfree(rdev->chip_ctx);
- rdev->chip_ctx = NULL;
- return rc;
- }
+ if (rc)
+ goto free_dev_attr;
if (bnxt_qplib_determine_atomics(en_dev->pdev))
ibdev_info(&rdev->ibdev,
"platform doesn't support global atomics.");
return 0;
+free_dev_attr:
+ kfree(rdev->dev_attr);
+ rdev->dev_attr = NULL;
+free_chip_ctx:
+ kfree(rdev->chip_ctx);
+ rdev->chip_ctx = NULL;
+ return rc;
}
/* SR-IOV helper functions */
@@ -221,7 +227,7 @@ static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
struct bnxt_qplib_ctx *ctx;
int i;
- attr = &rdev->dev_attr;
+ attr = rdev->dev_attr;
ctx = &rdev->qplib_ctx;
ctx->qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
@@ -235,7 +241,7 @@ static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
rdev->qplib_ctx.tqm_ctx.qcount[i] =
- rdev->dev_attr.tqm_alloc_reqs[i];
+ rdev->dev_attr->tqm_alloc_reqs[i];
}
static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx *qplib_ctx, u32 num_vf)
@@ -302,17 +308,123 @@ static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev)
&rdev->qplib_ctx);
}
-static void bnxt_re_shutdown(struct auxiliary_device *adev)
-{
- struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
+struct bnxt_re_dcb_work {
+ struct work_struct work;
struct bnxt_re_dev *rdev;
+ struct hwrm_async_event_cmpl cmpl;
+};
- rdev = en_info->rdev;
- ib_unregister_device(&rdev->ibdev);
- bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
+static bool bnxt_re_is_qp1_qp(struct bnxt_re_qp *qp)
+{
+ return qp->ib_qp.qp_type == IB_QPT_GSI;
+}
+
+static struct bnxt_re_qp *bnxt_re_get_qp1_qp(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_re_qp *qp;
+
+ mutex_lock(&rdev->qp_lock);
+ list_for_each_entry(qp, &rdev->qp_list, list) {
+ if (bnxt_re_is_qp1_qp(qp)) {
+ mutex_unlock(&rdev->qp_lock);
+ return qp;
+ }
+ }
+ mutex_unlock(&rdev->qp_lock);
+ return NULL;
+}
+
+static int bnxt_re_update_qp1_tos_dscp(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_re_qp *qp;
+
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return 0;
+
+ qp = bnxt_re_get_qp1_qp(rdev);
+ if (!qp)
+ return 0;
+
+ qp->qplib_qp.modify_flags = CMDQ_MODIFY_QP_MODIFY_MASK_TOS_DSCP;
+ qp->qplib_qp.tos_dscp = rdev->cc_param.qp1_tos_dscp;
+
+ return bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
+}
+
+static void bnxt_re_init_dcb_wq(struct bnxt_re_dev *rdev)
+{
+ rdev->dcb_wq = create_singlethread_workqueue("bnxt_re_dcb_wq");
}
-static void bnxt_re_stop_irq(void *handle)
+static void bnxt_re_uninit_dcb_wq(struct bnxt_re_dev *rdev)
+{
+ if (!rdev->dcb_wq)
+ return;
+ destroy_workqueue(rdev->dcb_wq);
+}
+
+static void bnxt_re_dcb_wq_task(struct work_struct *work)
+{
+ struct bnxt_re_dcb_work *dcb_work =
+ container_of(work, struct bnxt_re_dcb_work, work);
+ struct bnxt_re_dev *rdev = dcb_work->rdev;
+ struct bnxt_qplib_cc_param *cc_param;
+ int rc;
+
+ if (!rdev)
+ goto free_dcb;
+
+ cc_param = &rdev->cc_param;
+ rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, cc_param);
+ if (rc) {
+ ibdev_dbg(&rdev->ibdev, "Failed to query ccparam rc:%d", rc);
+ goto free_dcb;
+ }
+ if (cc_param->qp1_tos_dscp != cc_param->tos_dscp) {
+ cc_param->qp1_tos_dscp = cc_param->tos_dscp;
+ rc = bnxt_re_update_qp1_tos_dscp(rdev);
+ if (rc) {
+ ibdev_dbg(&rdev->ibdev, "%s: Failed to modify QP1 rc:%d",
+ __func__, rc);
+ goto free_dcb;
+ }
+ }
+
+free_dcb:
+ kfree(dcb_work);
+}
+
+static void bnxt_re_async_notifier(void *handle, struct hwrm_async_event_cmpl *cmpl)
+{
+ struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
+ struct bnxt_re_dcb_work *dcb_work;
+ u32 data1, data2;
+ u16 event_id;
+
+ event_id = le16_to_cpu(cmpl->event_id);
+ data1 = le32_to_cpu(cmpl->event_data1);
+ data2 = le32_to_cpu(cmpl->event_data2);
+
+ ibdev_dbg(&rdev->ibdev, "Async event_id = %d data1 = %d data2 = %d",
+ event_id, data1, data2);
+
+ switch (event_id) {
+ case ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE:
+ dcb_work = kzalloc(sizeof(*dcb_work), GFP_ATOMIC);
+ if (!dcb_work)
+ break;
+
+ dcb_work->rdev = rdev;
+ memcpy(&dcb_work->cmpl, cmpl, sizeof(*cmpl));
+ INIT_WORK(&dcb_work->work, bnxt_re_dcb_wq_task);
+ queue_work(rdev->dcb_wq, &dcb_work->work);
+ break;
+ default:
+ break;
+ }
+}
+
+static void bnxt_re_stop_irq(void *handle, bool reset)
{
struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle);
struct bnxt_qplib_rcfw *rcfw;
@@ -323,6 +435,14 @@ static void bnxt_re_stop_irq(void *handle)
rdev = en_info->rdev;
rcfw = &rdev->rcfw;
+ if (reset) {
+ set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
+ set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
+ wake_up_all(&rdev->rcfw.cmdq.waitq);
+ bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
+ IB_EVENT_DEVICE_FATAL);
+ }
+
for (indx = BNXT_RE_NQ_IDX; indx < rdev->nqr->num_msix; indx++) {
nq = &rdev->nqr->nq[indx - 1];
bnxt_qplib_nq_stop_irq(nq, false);
@@ -378,6 +498,7 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
}
static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
+ .ulp_async_notifier = bnxt_re_async_notifier,
.ulp_irq_stop = bnxt_re_stop_irq,
.ulp_irq_restart = bnxt_re_start_irq
};
@@ -839,17 +960,6 @@ static void bnxt_re_disassociate_ucontext(struct ib_ucontext *ibcontext)
}
/* Device */
-
-static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev)
-{
- struct ib_device *ibdev =
- ib_device_get_by_netdev(netdev, RDMA_DRIVER_BNXT_RE);
- if (!ibdev)
- return NULL;
-
- return container_of(ibdev, struct bnxt_re_dev, ibdev);
-}
-
static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
char *buf)
{
@@ -1627,12 +1737,11 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
/* Configure and allocate resources for qplib */
rdev->qplib_res.rcfw = &rdev->rcfw;
- rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
+ rc = bnxt_qplib_get_dev_attr(&rdev->rcfw);
if (rc)
goto fail;
- rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->en_dev->pdev,
- rdev->netdev, &rdev->dev_attr);
+ rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->netdev);
if (rc)
goto fail;
@@ -1807,6 +1916,26 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
return 0;
}
+static void bnxt_re_net_unregister_async_event(struct bnxt_re_dev *rdev)
+{
+ if (rdev->is_virtfn)
+ return;
+
+ memset(&rdev->event_bitmap, 0, sizeof(rdev->event_bitmap));
+ bnxt_register_async_events(rdev->en_dev, &rdev->event_bitmap,
+ ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
+}
+
+static void bnxt_re_net_register_async_event(struct bnxt_re_dev *rdev)
+{
+ if (rdev->is_virtfn)
+ return;
+
+ rdev->event_bitmap |= (1 << ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
+ bnxt_register_async_events(rdev->en_dev, &rdev->event_bitmap,
+ ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
+}
+
static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
{
struct bnxt_en_dev *en_dev = rdev->en_dev;
@@ -1886,6 +2015,9 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
bnxt_re_debugfs_rem_pdev(rdev);
+ bnxt_re_net_unregister_async_event(rdev);
+ bnxt_re_uninit_dcb_wq(rdev);
+
if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
cancel_delayed_work_sync(&rdev->worker);
@@ -2032,7 +2164,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
rdev->pacing.dbr_pacing = false;
}
}
- rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
+ rc = bnxt_qplib_get_dev_attr(&rdev->rcfw);
if (rc)
goto disable_rcfw;
@@ -2081,6 +2213,11 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags);
if (!rdev->is_virtfn) {
+ /* Query f/w defaults of CC params */
+ rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, &rdev->cc_param);
+ if (rc)
+ ibdev_warn(&rdev->ibdev, "Failed to query CC defaults\n");
+
rc = bnxt_re_setup_qos(rdev);
if (rc)
ibdev_info(&rdev->ibdev,
@@ -2099,6 +2236,9 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
bnxt_re_debugfs_add_pdev(rdev);
+ bnxt_re_init_dcb_wq(rdev);
+ bnxt_re_net_register_async_event(rdev);
+
return 0;
free_sctx:
bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
@@ -2117,6 +2257,30 @@ fail:
return rc;
}
+static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable)
+{
+ struct bnxt_qplib_cc_param cc_param = {};
+
+ /* Do not enable congestion control on VFs */
+ if (rdev->is_virtfn)
+ return;
+
+ /* Currently enabling only for GenP5 adapters */
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return;
+
+ if (enable) {
+ cc_param.enable = 1;
+ cc_param.tos_ecn = 1;
+ }
+
+ cc_param.mask = (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC |
+ CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN);
+
+ if (bnxt_qplib_modify_cc(&rdev->qplib_res, &cc_param))
+ ibdev_err(&rdev->ibdev, "Failed to setup CC enable = %d\n", enable);
+}
+
static void bnxt_re_update_en_info_rdev(struct bnxt_re_dev *rdev,
struct bnxt_re_en_dev_info *en_info,
struct auxiliary_device *adev)
@@ -2163,20 +2327,10 @@ static int bnxt_re_add_device(struct auxiliary_device *adev, u8 op_type)
goto re_dev_uninit;
}
- rdev->nb.notifier_call = bnxt_re_netdev_event;
- rc = register_netdevice_notifier(&rdev->nb);
- if (rc) {
- rdev->nb.notifier_call = NULL;
- pr_err("%s: Cannot register to netdevice_notifier",
- ROCE_DRV_MODULE_NAME);
- goto re_dev_unreg;
- }
bnxt_re_setup_cc(rdev, true);
return 0;
-re_dev_unreg:
- ib_unregister_device(&rdev->ibdev);
re_dev_uninit:
bnxt_re_update_en_info_rdev(NULL, en_info, adev);
bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
@@ -2186,79 +2340,6 @@ exit:
return rc;
}
-static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable)
-{
- struct bnxt_qplib_cc_param cc_param = {};
-
- /* Do not enable congestion control on VFs */
- if (rdev->is_virtfn)
- return;
-
- /* Currently enabling only for GenP5 adapters */
- if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
- return;
-
- if (enable) {
- cc_param.enable = 1;
- cc_param.tos_ecn = 1;
- }
-
- cc_param.mask = (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC |
- CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN);
-
- if (bnxt_qplib_modify_cc(&rdev->qplib_res, &cc_param))
- ibdev_err(&rdev->ibdev, "Failed to setup CC enable = %d\n", enable);
-}
-
-/*
- * "Notifier chain callback can be invoked for the same chain from
- * different CPUs at the same time".
- *
- * For cases when the netdev is already present, our call to the
- * register_netdevice_notifier() will actually get the rtnl_lock()
- * before sending NETDEV_REGISTER and (if up) NETDEV_UP
- * events.
- *
- * But for cases when the netdev is not already present, the notifier
- * chain is subjected to be invoked from different CPUs simultaneously.
- *
- * This is protected by the netdev_mutex.
- */
-static int bnxt_re_netdev_event(struct notifier_block *notifier,
- unsigned long event, void *ptr)
-{
- struct net_device *real_dev, *netdev = netdev_notifier_info_to_dev(ptr);
- struct bnxt_re_dev *rdev;
-
- real_dev = rdma_vlan_dev_real_dev(netdev);
- if (!real_dev)
- real_dev = netdev;
-
- if (real_dev != netdev)
- goto exit;
-
- rdev = bnxt_re_from_netdev(real_dev);
- if (!rdev)
- return NOTIFY_DONE;
-
-
- switch (event) {
- case NETDEV_UP:
- case NETDEV_DOWN:
- case NETDEV_CHANGE:
- bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
- netif_carrier_ok(real_dev) ?
- IB_EVENT_PORT_ACTIVE :
- IB_EVENT_PORT_ERR);
- break;
- default:
- break;
- }
- ib_device_put(&rdev->ibdev);
-exit:
- return NOTIFY_DONE;
-}
-
#define BNXT_ADEV_NAME "bnxt_en"
static void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 op_type,
@@ -2316,13 +2397,9 @@ static int bnxt_re_probe(struct auxiliary_device *adev,
rc = bnxt_re_add_device(adev, BNXT_RE_COMPLETE_INIT);
if (rc)
- goto err;
- mutex_unlock(&bnxt_re_mutex);
- return 0;
+ kfree(en_info);
-err:
mutex_unlock(&bnxt_re_mutex);
- kfree(en_info);
return rc;
}
@@ -2375,6 +2452,16 @@ static int bnxt_re_resume(struct auxiliary_device *adev)
return 0;
}
+static void bnxt_re_shutdown(struct auxiliary_device *adev)
+{
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
+ struct bnxt_re_dev *rdev;
+
+ rdev = en_info->rdev;
+ ib_unregister_device(&rdev->ibdev);
+ bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
+}
+
static const struct auxiliary_device_id bnxt_re_id_table[] = {
{ .name = BNXT_ADEV_NAME ".rdma", },
{},
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 0660101b5310..0d9487c889ff 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -343,6 +343,7 @@ struct bnxt_qplib_qp {
u32 msn;
u32 msn_tbl_sz;
bool is_host_msn_tbl;
+ u8 tos_dscp;
};
#define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 96ceec1e8199..02922a0987ad 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -876,14 +876,13 @@ void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
}
-int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
- struct net_device *netdev,
- struct bnxt_qplib_dev_attr *dev_attr)
+int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev)
{
+ struct bnxt_qplib_dev_attr *dev_attr;
int rc;
- res->pdev = pdev;
res->netdev = netdev;
+ dev_attr = res->dattr;
rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
if (rc)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index cbfc49a1a56d..be5d907a036b 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -424,9 +424,7 @@ int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res);
int bnxt_qplib_init_res(struct bnxt_qplib_res *res);
void bnxt_qplib_free_res(struct bnxt_qplib_res *res);
-int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
- struct net_device *netdev,
- struct bnxt_qplib_dev_attr *dev_attr);
+int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev);
void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
struct bnxt_qplib_ctx *ctx);
int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 9df3e3271577..4ccd4405355a 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -88,9 +88,9 @@ static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
fw_ver[3] = resp.fw_rsvd;
}
-int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_dev_attr *attr)
+int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
{
+ struct bnxt_qplib_dev_attr *attr = rcfw->res->dattr;
struct creq_query_func_resp resp = {};
struct bnxt_qplib_cmdqmsg msg = {};
struct creq_query_func_resp_sb *sb;
@@ -1022,3 +1022,116 @@ free_mem:
dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr);
return rc;
}
+
+static void bnxt_qplib_read_cc_gen1(struct bnxt_qplib_cc_param_ext *cc_ext,
+ struct creq_query_roce_cc_gen1_resp_sb_tlv *sb)
+{
+ cc_ext->inact_th_hi = le16_to_cpu(sb->inactivity_th_hi);
+ cc_ext->min_delta_cnp = le16_to_cpu(sb->min_time_between_cnps);
+ cc_ext->init_cp = le16_to_cpu(sb->init_cp);
+ cc_ext->tr_update_mode = sb->tr_update_mode;
+ cc_ext->tr_update_cyls = sb->tr_update_cycles;
+ cc_ext->fr_rtt = sb->fr_num_rtts;
+ cc_ext->ai_rate_incr = sb->ai_rate_increase;
+ cc_ext->rr_rtt_th = le16_to_cpu(sb->reduction_relax_rtts_th);
+ cc_ext->ar_cr_th = le16_to_cpu(sb->additional_relax_cr_th);
+ cc_ext->cr_min_th = le16_to_cpu(sb->cr_min_th);
+ cc_ext->bw_avg_weight = sb->bw_avg_weight;
+ cc_ext->cr_factor = sb->actual_cr_factor;
+ cc_ext->cr_th_max_cp = le16_to_cpu(sb->max_cp_cr_th);
+ cc_ext->cp_bias_en = sb->cp_bias_en;
+ cc_ext->cp_bias = sb->cp_bias;
+ cc_ext->cnp_ecn = sb->cnp_ecn;
+ cc_ext->rtt_jitter_en = sb->rtt_jitter_en;
+ cc_ext->bytes_per_usec = le16_to_cpu(sb->link_bytes_per_usec);
+ cc_ext->cc_cr_reset_th = le16_to_cpu(sb->reset_cc_cr_th);
+ cc_ext->cr_width = sb->cr_width;
+ cc_ext->min_quota = sb->quota_period_min;
+ cc_ext->max_quota = sb->quota_period_max;
+ cc_ext->abs_max_quota = sb->quota_period_abs_max;
+ cc_ext->tr_lb = le16_to_cpu(sb->tr_lower_bound);
+ cc_ext->cr_prob_fac = sb->cr_prob_factor;
+ cc_ext->tr_prob_fac = sb->tr_prob_factor;
+ cc_ext->fair_cr_th = le16_to_cpu(sb->fairness_cr_th);
+ cc_ext->red_div = sb->red_div;
+ cc_ext->cnp_ratio_th = sb->cnp_ratio_th;
+ cc_ext->ai_ext_rtt = le16_to_cpu(sb->exp_ai_rtts);
+ cc_ext->exp_crcp_ratio = sb->exp_ai_cr_cp_ratio;
+ cc_ext->low_rate_en = sb->use_rate_table;
+ cc_ext->cpcr_update_th = le16_to_cpu(sb->cp_exp_update_th);
+ cc_ext->ai_rtt_th1 = le16_to_cpu(sb->high_exp_ai_rtts_th1);
+ cc_ext->ai_rtt_th2 = le16_to_cpu(sb->high_exp_ai_rtts_th2);
+ cc_ext->cf_rtt_th = le16_to_cpu(sb->actual_cr_cong_free_rtts_th);
+ cc_ext->sc_cr_th1 = le16_to_cpu(sb->severe_cong_cr_th1);
+ cc_ext->sc_cr_th2 = le16_to_cpu(sb->severe_cong_cr_th2);
+ cc_ext->l64B_per_rtt = le32_to_cpu(sb->link64B_per_rtt);
+ cc_ext->cc_ack_bytes = sb->cc_ack_bytes;
+ cc_ext->reduce_cf_rtt_th = le16_to_cpu(sb->reduce_init_cong_free_rtts_th);
+}
+
+int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_cc_param *cc_param)
+{
+ struct bnxt_qplib_tlv_query_rcc_sb *ext_sb;
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_query_roce_cc_resp resp = {};
+ struct creq_query_roce_cc_resp_sb *sb;
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_query_roce_cc req = {};
+ struct bnxt_qplib_rcfw_sbuf sbuf;
+ size_t resp_size;
+ int rc;
+
+ /* Query the parameters from chip */
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, CMDQ_BASE_OPCODE_QUERY_ROCE_CC,
+ sizeof(req));
+ if (bnxt_qplib_is_chip_gen_p5_p7(res->cctx))
+ resp_size = sizeof(*ext_sb);
+ else
+ resp_size = sizeof(*sb);
+
+ sbuf.size = ALIGN(resp_size, BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
+ return -ENOMEM;
+
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(res->rcfw, &msg);
+ if (rc)
+ goto out;
+
+ ext_sb = sbuf.sb;
+ sb = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ? &ext_sb->base_sb :
+ (struct creq_query_roce_cc_resp_sb *)ext_sb;
+
+ cc_param->enable = sb->enable_cc & CREQ_QUERY_ROCE_CC_RESP_SB_ENABLE_CC;
+ cc_param->tos_ecn = (sb->tos_dscp_tos_ecn &
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_MASK) >>
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_SFT;
+ cc_param->tos_dscp = (sb->tos_dscp_tos_ecn &
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_MASK) >>
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_SFT;
+ cc_param->alt_tos_dscp = sb->alt_tos_dscp;
+ cc_param->alt_vlan_pcp = sb->alt_vlan_pcp;
+
+ cc_param->g = sb->g;
+ cc_param->nph_per_state = sb->num_phases_per_state;
+ cc_param->init_cr = le16_to_cpu(sb->init_cr);
+ cc_param->init_tr = le16_to_cpu(sb->init_tr);
+ cc_param->cc_mode = sb->cc_mode;
+ cc_param->inact_th = le16_to_cpu(sb->inactivity_th);
+ cc_param->rtt = le16_to_cpu(sb->rtt);
+ cc_param->tcp_cp = le16_to_cpu(sb->tcp_cp);
+ cc_param->time_pph = sb->time_per_phase;
+ cc_param->pkts_pph = sb->pkts_per_phase;
+ if (bnxt_qplib_is_chip_gen_p5_p7(res->cctx)) {
+ bnxt_qplib_read_cc_gen1(&cc_param->cc_ext, &ext_sb->gen1_sb);
+ cc_param->inact_th |= (cc_param->cc_ext.inact_th_hi & 0x3F) << 16;
+ }
+out:
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr);
+ return rc;
+}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index e6beeb514b7d..e626b05038a1 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -296,6 +296,7 @@ struct bnxt_qplib_cc_param_ext {
struct bnxt_qplib_cc_param {
u8 alt_vlan_pcp;
+ u8 qp1_tos_dscp;
u16 alt_tos_dscp;
u8 cc_mode;
u8 enable;
@@ -325,8 +326,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u16 gid_idx,
const u8 *smac);
-int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_dev_attr *attr);
+int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx);
@@ -355,6 +355,8 @@ int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res,
struct bnxt_qplib_cc_param *cc_param);
int bnxt_qplib_read_context(struct bnxt_qplib_rcfw *rcfw, u8 type, u32 xid,
u32 resp_size, void *resp_va);
+int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_cc_param *cc_param);
#define BNXT_VAR_MAX_WQE 4352
#define BNXT_VAR_MAX_SLOT_ALIGN 256
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 80970a1738f8..034b85c42255 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -1114,8 +1114,10 @@ static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
* The math here assumes sizeof cpl_pass_accept_req >= sizeof
* cpl_rx_pkt.
*/
- skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
- sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
+ skb = alloc_skb(size_add(gl->tot_len,
+ sizeof(struct cpl_pass_accept_req) +
+ sizeof(struct rss_header)) - pktshift,
+ GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 7b5c4522b426..955f061a55e9 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1599,6 +1599,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
int count;
int rq_flushed = 0, sq_flushed;
unsigned long flag;
+ struct ib_event ev;
pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
@@ -1607,6 +1608,13 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
if (schp != rchp)
spin_lock(&schp->lock);
spin_lock(&qhp->lock);
+ if (qhp->srq && qhp->attr.state == C4IW_QP_STATE_ERROR &&
+ qhp->ibqp.event_handler) {
+ ev.device = qhp->ibqp.device;
+ ev.element.qp = &qhp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ qhp->ibqp.event_handler(&ev, qhp->ibqp.qp_context);
+ }
if (qhp->wq.flushed) {
spin_unlock(&qhp->lock);
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
index d7fc9d5eeefd..838182d0409c 100644
--- a/drivers/infiniband/hw/efa/efa.h
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_H_
@@ -57,15 +57,15 @@ struct efa_dev {
u64 db_bar_addr;
u64 db_bar_len;
- unsigned int num_irq_vectors;
- int admin_msix_vector_idx;
+ u32 num_irq_vectors;
+ u32 admin_msix_vector_idx;
struct efa_irq admin_irq;
struct efa_stats stats;
/* Array of completion EQs */
struct efa_eq *eqs;
- unsigned int neqs;
+ u32 neqs;
/* Only stores CQs with interrupts enabled */
struct xarray cqs_xa;
diff --git a/drivers/infiniband/hw/efa/efa_com.h b/drivers/infiniband/hw/efa/efa_com.h
index 77282234ce68..4d9ca97e4296 100644
--- a/drivers/infiniband/hw/efa/efa_com.h
+++ b/drivers/infiniband/hw/efa/efa_com.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_COM_H_
@@ -65,7 +65,7 @@ struct efa_com_admin_queue {
u16 depth;
struct efa_com_admin_cq cq;
struct efa_com_admin_sq sq;
- u16 msix_vector_idx;
+ u32 msix_vector_idx;
unsigned long state;
@@ -89,7 +89,7 @@ struct efa_com_aenq {
struct efa_aenq_handlers *aenq_handlers;
dma_addr_t dma_addr;
u32 cc; /* consumer counter */
- u16 msix_vector_idx;
+ u32 msix_vector_idx;
u16 depth;
u8 phase;
};
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index ad225823e6f2..4f03c0ec819f 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include <linux/module.h>
@@ -141,8 +141,7 @@ static int efa_request_irq(struct efa_dev *dev, struct efa_irq *irq)
return 0;
}
-static void efa_setup_comp_irq(struct efa_dev *dev, struct efa_eq *eq,
- int vector)
+static void efa_setup_comp_irq(struct efa_dev *dev, struct efa_eq *eq, u32 vector)
{
u32 cpu;
@@ -305,7 +304,7 @@ static void efa_destroy_eq(struct efa_dev *dev, struct efa_eq *eq)
efa_free_irq(dev, &eq->irq);
}
-static int efa_create_eq(struct efa_dev *dev, struct efa_eq *eq, u8 msix_vec)
+static int efa_create_eq(struct efa_dev *dev, struct efa_eq *eq, u32 msix_vec)
{
int err;
@@ -328,21 +327,17 @@ err_free_comp_irq:
static int efa_create_eqs(struct efa_dev *dev)
{
- unsigned int neqs = dev->dev_attr.max_eq;
- int err;
- int i;
-
- neqs = min_t(unsigned int, neqs,
- dev->num_irq_vectors - EFA_COMP_EQS_VEC_BASE);
+ u32 neqs = dev->dev_attr.max_eq;
+ int err, i;
+ neqs = min_t(u32, neqs, dev->num_irq_vectors - EFA_COMP_EQS_VEC_BASE);
dev->neqs = neqs;
dev->eqs = kcalloc(neqs, sizeof(*dev->eqs), GFP_KERNEL);
if (!dev->eqs)
return -ENOMEM;
for (i = 0; i < neqs; i++) {
- err = efa_create_eq(dev, &dev->eqs[i],
- i + EFA_COMP_EQS_VEC_BASE);
+ err = efa_create_eq(dev, &dev->eqs[i], i + EFA_COMP_EQS_VEC_BASE);
if (err)
goto err_destroy_eqs;
}
@@ -470,7 +465,6 @@ static void efa_ib_device_remove(struct efa_dev *dev)
ibdev_info(&dev->ibdev, "Unregister ib device\n");
ib_unregister_device(&dev->ibdev);
efa_destroy_eqs(dev);
- efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_NORMAL);
efa_release_doorbell_bar(dev);
}
@@ -643,12 +637,14 @@ err_disable_device:
return ERR_PTR(err);
}
-static void efa_remove_device(struct pci_dev *pdev)
+static void efa_remove_device(struct pci_dev *pdev,
+ enum efa_regs_reset_reason_types reset_reason)
{
struct efa_dev *dev = pci_get_drvdata(pdev);
struct efa_com_dev *edev;
edev = &dev->edev;
+ efa_com_dev_reset(edev, reset_reason);
efa_com_admin_destroy(edev);
efa_free_irq(dev, &dev->admin_irq);
efa_disable_msix(dev);
@@ -676,7 +672,7 @@ static int efa_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_remove_device:
- efa_remove_device(pdev);
+ efa_remove_device(pdev, EFA_REGS_RESET_INIT_ERR);
return err;
}
@@ -685,7 +681,7 @@ static void efa_remove(struct pci_dev *pdev)
struct efa_dev *dev = pci_get_drvdata(pdev);
efa_ib_device_remove(dev);
- efa_remove_device(pdev);
+ efa_remove_device(pdev, EFA_REGS_RESET_NORMAL);
}
static void efa_shutdown(struct pci_dev *pdev)
diff --git a/drivers/infiniband/hw/erdma/Kconfig b/drivers/infiniband/hw/erdma/Kconfig
index 169038e3ceb1..267fc1f3c42a 100644
--- a/drivers/infiniband/hw/erdma/Kconfig
+++ b/drivers/infiniband/hw/erdma/Kconfig
@@ -5,7 +5,7 @@ config INFINIBAND_ERDMA
depends on INFINIBAND_ADDR_TRANS
depends on INFINIBAND_USER_ACCESS
help
- This is a RDMA/iWarp driver for Alibaba Elastic RDMA Adapter(ERDMA),
+ This is a RDMA driver for Alibaba Elastic RDMA Adapter(ERDMA),
which supports RDMA features in Alibaba cloud environment.
To compile this driver as module, choose M here. The module will be
diff --git a/drivers/infiniband/hw/erdma/erdma.h b/drivers/infiniband/hw/erdma/erdma.h
index 3c166359448d..2a023b99f992 100644
--- a/drivers/infiniband/hw/erdma/erdma.h
+++ b/drivers/infiniband/hw/erdma/erdma.h
@@ -16,7 +16,7 @@
#include "erdma_hw.h"
#define DRV_MODULE_NAME "erdma"
-#define ERDMA_NODE_DESC "Elastic RDMA(iWARP) stack"
+#define ERDMA_NODE_DESC "Elastic RDMA Adapter stack"
struct erdma_eq {
void *qbuf;
@@ -101,8 +101,6 @@ struct erdma_cmdq {
struct erdma_comp_wait *wait_pool;
spinlock_t lock;
- bool use_event;
-
struct erdma_cmdq_sq sq;
struct erdma_cmdq_cq cq;
struct erdma_eq eq;
@@ -148,6 +146,8 @@ struct erdma_devattr {
u32 max_mr;
u32 max_pd;
u32 max_mw;
+ u32 max_gid;
+ u32 max_ah;
u32 local_dma_key;
};
@@ -177,7 +177,8 @@ struct erdma_resource_cb {
enum {
ERDMA_RES_TYPE_PD = 0,
ERDMA_RES_TYPE_STAG_IDX = 1,
- ERDMA_RES_CNT = 2,
+ ERDMA_RES_TYPE_AH = 2,
+ ERDMA_RES_CNT = 3,
};
struct erdma_dev {
@@ -192,8 +193,6 @@ struct erdma_dev {
u8 __iomem *func_bar;
struct erdma_devattr attrs;
- /* physical port state (only one port per device) */
- enum ib_port_state state;
u32 mtu;
/* cmdq and aeq use the same msix vector */
@@ -215,6 +214,7 @@ struct erdma_dev {
struct dma_pool *db_pool;
struct dma_pool *resp_pool;
+ enum erdma_proto_type proto;
};
static inline void *get_queue_entry(void *qbuf, u32 idx, u32 depth, u32 shift)
@@ -265,7 +265,7 @@ void erdma_cmdq_destroy(struct erdma_dev *dev);
void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op);
int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
- u64 *resp0, u64 *resp1);
+ u64 *resp0, u64 *resp1, bool sleepable);
void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq);
int erdma_ceqs_init(struct erdma_dev *dev);
diff --git a/drivers/infiniband/hw/erdma/erdma_cm.c b/drivers/infiniband/hw/erdma/erdma_cm.c
index 771059a8eb7d..1b23c698ec25 100644
--- a/drivers/infiniband/hw/erdma/erdma_cm.c
+++ b/drivers/infiniband/hw/erdma/erdma_cm.c
@@ -567,7 +567,8 @@ reject_conn:
static int erdma_proc_mpareply(struct erdma_cep *cep)
{
- struct erdma_qp_attrs qp_attrs;
+ enum erdma_qpa_mask_iwarp to_modify_attrs = 0;
+ struct erdma_mod_qp_params_iwarp params;
struct erdma_qp *qp = cep->qp;
struct mpa_rr *rep;
int ret;
@@ -597,26 +598,29 @@ static int erdma_proc_mpareply(struct erdma_cep *cep)
return -EINVAL;
}
- memset(&qp_attrs, 0, sizeof(qp_attrs));
- qp_attrs.irq_size = cep->ird;
- qp_attrs.orq_size = cep->ord;
- qp_attrs.state = ERDMA_QP_STATE_RTS;
+ memset(&params, 0, sizeof(params));
+ params.state = ERDMA_QPS_IWARP_RTS;
+ params.irq_size = cep->ird;
+ params.orq_size = cep->ord;
down_write(&qp->state_lock);
- if (qp->attrs.state > ERDMA_QP_STATE_RTR) {
+ if (qp->attrs.iwarp.state > ERDMA_QPS_IWARP_RTR) {
ret = -EINVAL;
up_write(&qp->state_lock);
goto out_err;
}
- qp->attrs.qp_type = ERDMA_QP_ACTIVE;
- if (__mpa_ext_cc(cep->mpa.ext_data.bits) != qp->attrs.cc)
- qp->attrs.cc = COMPROMISE_CC;
+ to_modify_attrs = ERDMA_QPA_IWARP_STATE | ERDMA_QPA_IWARP_LLP_HANDLE |
+ ERDMA_QPA_IWARP_MPA | ERDMA_QPA_IWARP_IRD |
+ ERDMA_QPA_IWARP_ORD;
- ret = erdma_modify_qp_internal(qp, &qp_attrs,
- ERDMA_QP_ATTR_STATE |
- ERDMA_QP_ATTR_LLP_HANDLE |
- ERDMA_QP_ATTR_MPA);
+ params.qp_type = ERDMA_QP_ACTIVE;
+ if (__mpa_ext_cc(cep->mpa.ext_data.bits) != qp->attrs.cc) {
+ to_modify_attrs |= ERDMA_QPA_IWARP_CC;
+ params.cc = COMPROMISE_CC;
+ }
+
+ ret = erdma_modify_qp_state_iwarp(qp, &params, to_modify_attrs);
up_write(&qp->state_lock);
@@ -722,7 +726,7 @@ static int erdma_newconn_connected(struct erdma_cep *cep)
__mpa_rr_set_revision(&cep->mpa.hdr.params.bits, MPA_REVISION_EXT_1);
memcpy(cep->mpa.hdr.key, MPA_KEY_REQ, MPA_KEY_SIZE);
- cep->mpa.ext_data.cookie = cpu_to_be32(cep->qp->attrs.cookie);
+ cep->mpa.ext_data.cookie = cpu_to_be32(cep->qp->attrs.iwarp.cookie);
__mpa_ext_set_cc(&cep->mpa.ext_data.bits, cep->qp->attrs.cc);
ret = erdma_send_mpareqrep(cep, cep->private_data, cep->pd_len);
@@ -1126,10 +1130,11 @@ error_put_qp:
int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
{
- struct erdma_dev *dev = to_edev(id->device);
struct erdma_cep *cep = (struct erdma_cep *)id->provider_data;
+ struct erdma_mod_qp_params_iwarp mod_qp_params;
+ enum erdma_qpa_mask_iwarp to_modify_attrs = 0;
+ struct erdma_dev *dev = to_edev(id->device);
struct erdma_qp *qp;
- struct erdma_qp_attrs qp_attrs;
int ret;
erdma_cep_set_inuse(cep);
@@ -1156,7 +1161,7 @@ int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
erdma_qp_get(qp);
down_write(&qp->state_lock);
- if (qp->attrs.state > ERDMA_QP_STATE_RTR) {
+ if (qp->attrs.iwarp.state > ERDMA_QPS_IWARP_RTR) {
ret = -EINVAL;
up_write(&qp->state_lock);
goto error;
@@ -1181,11 +1186,11 @@ int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
cep->cm_id = id;
id->add_ref(id);
- memset(&qp_attrs, 0, sizeof(qp_attrs));
- qp_attrs.orq_size = params->ord;
- qp_attrs.irq_size = params->ird;
+ memset(&mod_qp_params, 0, sizeof(mod_qp_params));
- qp_attrs.state = ERDMA_QP_STATE_RTS;
+ mod_qp_params.irq_size = params->ird;
+ mod_qp_params.orq_size = params->ord;
+ mod_qp_params.state = ERDMA_QPS_IWARP_RTS;
/* Associate QP with CEP */
erdma_cep_get(cep);
@@ -1194,19 +1199,21 @@ int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
cep->state = ERDMA_EPSTATE_RDMA_MODE;
- qp->attrs.qp_type = ERDMA_QP_PASSIVE;
- qp->attrs.pd_len = params->private_data_len;
+ mod_qp_params.qp_type = ERDMA_QP_PASSIVE;
+ mod_qp_params.pd_len = params->private_data_len;
- if (qp->attrs.cc != __mpa_ext_cc(cep->mpa.ext_data.bits))
- qp->attrs.cc = COMPROMISE_CC;
+ to_modify_attrs = ERDMA_QPA_IWARP_STATE | ERDMA_QPA_IWARP_ORD |
+ ERDMA_QPA_IWARP_LLP_HANDLE | ERDMA_QPA_IWARP_IRD |
+ ERDMA_QPA_IWARP_MPA;
+
+ if (qp->attrs.cc != __mpa_ext_cc(cep->mpa.ext_data.bits)) {
+ to_modify_attrs |= ERDMA_QPA_IWARP_CC;
+ mod_qp_params.cc = COMPROMISE_CC;
+ }
/* move to rts */
- ret = erdma_modify_qp_internal(qp, &qp_attrs,
- ERDMA_QP_ATTR_STATE |
- ERDMA_QP_ATTR_ORD |
- ERDMA_QP_ATTR_LLP_HANDLE |
- ERDMA_QP_ATTR_IRD |
- ERDMA_QP_ATTR_MPA);
+ ret = erdma_modify_qp_state_iwarp(qp, &mod_qp_params, to_modify_attrs);
+
up_write(&qp->state_lock);
if (ret)
@@ -1214,7 +1221,7 @@ int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
cep->mpa.ext_data.bits = 0;
__mpa_ext_set_cc(&cep->mpa.ext_data.bits, qp->attrs.cc);
- cep->mpa.ext_data.cookie = cpu_to_be32(cep->qp->attrs.cookie);
+ cep->mpa.ext_data.cookie = cpu_to_be32(cep->qp->attrs.iwarp.cookie);
ret = erdma_send_mpareqrep(cep, params->private_data,
params->private_data_len);
diff --git a/drivers/infiniband/hw/erdma/erdma_cmdq.c b/drivers/infiniband/hw/erdma/erdma_cmdq.c
index a3d8922d1ad1..b867aefe83b2 100644
--- a/drivers/infiniband/hw/erdma/erdma_cmdq.c
+++ b/drivers/infiniband/hw/erdma/erdma_cmdq.c
@@ -182,7 +182,6 @@ int erdma_cmdq_init(struct erdma_dev *dev)
int err;
cmdq->max_outstandings = ERDMA_CMDQ_MAX_OUTSTANDING;
- cmdq->use_event = false;
sema_init(&cmdq->credits, cmdq->max_outstandings);
@@ -223,8 +222,6 @@ err_destroy_sq:
void erdma_finish_cmdq_init(struct erdma_dev *dev)
{
- /* after device init successfully, change cmdq to event mode. */
- dev->cmdq.use_event = true;
arm_cmdq_cq(&dev->cmdq);
}
@@ -312,8 +309,7 @@ static int erdma_poll_single_cmd_completion(struct erdma_cmdq *cmdq)
/* Copy 16B comp data after cqe hdr to outer */
be32_to_cpu_array(comp_wait->comp_data, cqe + 2, 4);
- if (cmdq->use_event)
- complete(&comp_wait->wait_event);
+ complete(&comp_wait->wait_event);
return 0;
}
@@ -332,9 +328,6 @@ static void erdma_polling_cmd_completions(struct erdma_cmdq *cmdq)
if (erdma_poll_single_cmd_completion(cmdq))
break;
- if (comp_num && cmdq->use_event)
- arm_cmdq_cq(cmdq);
-
spin_unlock_irqrestore(&cmdq->cq.lock, flags);
}
@@ -342,8 +335,7 @@ void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq)
{
int got_event = 0;
- if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state) ||
- !cmdq->use_event)
+ if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state))
return;
while (get_next_valid_eqe(&cmdq->eq)) {
@@ -354,6 +346,7 @@ void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq)
if (got_event) {
cmdq->cq.cmdsn++;
erdma_polling_cmd_completions(cmdq);
+ arm_cmdq_cq(cmdq);
}
notify_eq(&cmdq->eq);
@@ -372,7 +365,7 @@ static int erdma_poll_cmd_completion(struct erdma_comp_wait *comp_ctx,
if (time_is_before_jiffies(comp_timeout))
return -ETIME;
- msleep(20);
+ udelay(20);
}
return 0;
@@ -403,7 +396,7 @@ void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op)
}
int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
- u64 *resp0, u64 *resp1)
+ u64 *resp0, u64 *resp1, bool sleepable)
{
struct erdma_comp_wait *comp_wait;
int ret;
@@ -411,7 +404,12 @@ int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state))
return -ENODEV;
- down(&cmdq->credits);
+ if (!sleepable) {
+ while (down_trylock(&cmdq->credits))
+ ;
+ } else {
+ down(&cmdq->credits);
+ }
comp_wait = get_comp_wait(cmdq);
if (IS_ERR(comp_wait)) {
@@ -425,7 +423,7 @@ int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
push_cmdq_sqe(cmdq, req, req_size, comp_wait);
spin_unlock(&cmdq->sq.lock);
- if (cmdq->use_event)
+ if (sleepable)
ret = erdma_wait_cmd_completion(comp_wait, cmdq,
ERDMA_CMDQ_TIMEOUT_MS);
else
diff --git a/drivers/infiniband/hw/erdma/erdma_cq.c b/drivers/infiniband/hw/erdma/erdma_cq.c
index 70f89f0162aa..1f456327e63c 100644
--- a/drivers/infiniband/hw/erdma/erdma_cq.c
+++ b/drivers/infiniband/hw/erdma/erdma_cq.c
@@ -105,6 +105,22 @@ static const struct {
{ ERDMA_WC_RETRY_EXC_ERR, IB_WC_RETRY_EXC_ERR, ERDMA_WC_VENDOR_NO_ERR },
};
+static void erdma_process_ud_cqe(struct erdma_cqe *cqe, struct ib_wc *wc)
+{
+ u32 ud_info;
+
+ wc->wc_flags |= (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE);
+ ud_info = be32_to_cpu(cqe->ud.info);
+ wc->network_hdr_type = FIELD_GET(ERDMA_CQE_NTYPE_MASK, ud_info);
+ if (wc->network_hdr_type == ERDMA_NETWORK_TYPE_IPV4)
+ wc->network_hdr_type = RDMA_NETWORK_IPV4;
+ else
+ wc->network_hdr_type = RDMA_NETWORK_IPV6;
+ wc->src_qp = FIELD_GET(ERDMA_CQE_SQPN_MASK, ud_info);
+ wc->sl = FIELD_GET(ERDMA_CQE_SL_MASK, ud_info);
+ wc->pkey_index = 0;
+}
+
#define ERDMA_POLLCQ_NO_QP 1
static int erdma_poll_one_cqe(struct erdma_cq *cq, struct ib_wc *wc)
@@ -168,6 +184,10 @@ static int erdma_poll_one_cqe(struct erdma_cq *cq, struct ib_wc *wc)
wc->wc_flags |= IB_WC_WITH_INVALIDATE;
}
+ if (erdma_device_rocev2(dev) &&
+ (qp->ibqp.qp_type == IB_QPT_UD || qp->ibqp.qp_type == IB_QPT_GSI))
+ erdma_process_ud_cqe(cqe, wc);
+
if (syndrome >= ERDMA_NUM_WC_STATUS)
syndrome = ERDMA_WC_GENERAL_ERR;
@@ -201,3 +221,48 @@ int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
return npolled;
}
+
+void erdma_remove_cqes_of_qp(struct ib_cq *ibcq, u32 qpn)
+{
+ struct erdma_cq *cq = to_ecq(ibcq);
+ struct erdma_cqe *cqe, *dst_cqe;
+ u32 prev_cq_ci, cur_cq_ci;
+ u32 ncqe = 0, nqp_cqe = 0;
+ unsigned long flags;
+ u8 owner;
+
+ spin_lock_irqsave(&cq->kern_cq.lock, flags);
+
+ prev_cq_ci = cq->kern_cq.ci;
+
+ while (ncqe < cq->depth && (cqe = get_next_valid_cqe(cq)) != NULL) {
+ ++cq->kern_cq.ci;
+ ++ncqe;
+ }
+
+ while (ncqe > 0) {
+ cur_cq_ci = prev_cq_ci + ncqe - 1;
+ cqe = get_queue_entry(cq->kern_cq.qbuf, cur_cq_ci, cq->depth,
+ CQE_SHIFT);
+
+ if (be32_to_cpu(cqe->qpn) == qpn) {
+ ++nqp_cqe;
+ } else if (nqp_cqe) {
+ dst_cqe = get_queue_entry(cq->kern_cq.qbuf,
+ cur_cq_ci + nqp_cqe,
+ cq->depth, CQE_SHIFT);
+ owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK,
+ be32_to_cpu(dst_cqe->hdr));
+ cqe->hdr = cpu_to_be32(
+ (be32_to_cpu(cqe->hdr) &
+ ~ERDMA_CQE_HDR_OWNER_MASK) |
+ FIELD_PREP(ERDMA_CQE_HDR_OWNER_MASK, owner));
+ memcpy(dst_cqe, cqe, sizeof(*cqe));
+ }
+
+ --ncqe;
+ }
+
+ cq->kern_cq.ci = prev_cq_ci + nqp_cqe;
+ spin_unlock_irqrestore(&cq->kern_cq.lock, flags);
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_eq.c b/drivers/infiniband/hw/erdma/erdma_eq.c
index 9a72fec6d5cc..6486234a2360 100644
--- a/drivers/infiniband/hw/erdma/erdma_eq.c
+++ b/drivers/infiniband/hw/erdma/erdma_eq.c
@@ -236,7 +236,8 @@ static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
req.db_dma_addr_l = lower_32_bits(eq->dbrec_dma);
req.db_dma_addr_h = upper_32_bits(eq->dbrec_dma);
- return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ false);
}
static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
@@ -278,7 +279,8 @@ static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
req.qtype = ERDMA_EQ_TYPE_CEQ;
req.vector_idx = ceqn + 1;
- err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ false);
if (err)
return;
diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h
index 05978f3b1475..ea4db53901a4 100644
--- a/drivers/infiniband/hw/erdma/erdma_hw.h
+++ b/drivers/infiniband/hw/erdma/erdma_hw.h
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
+#include <linux/if_ether.h>
/* PCIe device related definition. */
#define ERDMA_PCI_WIDTH 64
@@ -21,8 +22,21 @@
#define ERDMA_NUM_MSIX_VEC 32U
#define ERDMA_MSIX_VECTOR_CMDQ 0
+/* RoCEv2 related */
+#define ERDMA_ROCEV2_GID_SIZE 16
+#define ERDMA_MAX_PKEYS 1
+#define ERDMA_DEFAULT_PKEY 0xFFFF
+
+/* erdma device protocol type */
+enum erdma_proto_type {
+ ERDMA_PROTO_IWARP = 0,
+ ERDMA_PROTO_ROCEV2 = 1,
+ ERDMA_PROTO_COUNT = 2,
+};
+
/* PCIe Bar0 Registers. */
#define ERDMA_REGS_VERSION_REG 0x0
+#define ERDMA_REGS_DEV_PROTO_REG 0xC
#define ERDMA_REGS_DEV_CTRL_REG 0x10
#define ERDMA_REGS_DEV_ST_REG 0x14
#define ERDMA_REGS_NETDEV_MAC_L_REG 0x18
@@ -136,7 +150,11 @@ enum CMDQ_RDMA_OPCODE {
CMDQ_OPCODE_DESTROY_CQ = 5,
CMDQ_OPCODE_REFLUSH = 6,
CMDQ_OPCODE_REG_MR = 8,
- CMDQ_OPCODE_DEREG_MR = 9
+ CMDQ_OPCODE_DEREG_MR = 9,
+ CMDQ_OPCODE_SET_GID = 14,
+ CMDQ_OPCODE_CREATE_AH = 15,
+ CMDQ_OPCODE_DESTROY_AH = 16,
+ CMDQ_OPCODE_QUERY_QP = 17,
};
enum CMDQ_COMMON_OPCODE {
@@ -284,6 +302,36 @@ struct erdma_cmdq_dereg_mr_req {
u32 cfg;
};
+/* create_av cfg0 */
+#define ERDMA_CMD_CREATE_AV_FL_MASK GENMASK(19, 0)
+#define ERDMA_CMD_CREATE_AV_NTYPE_MASK BIT(20)
+
+struct erdma_av_cfg {
+ u32 cfg0;
+ u8 traffic_class;
+ u8 hop_limit;
+ u8 sl;
+ u8 rsvd;
+ u16 udp_sport;
+ u16 sgid_index;
+ u8 dmac[ETH_ALEN];
+ u8 padding[2];
+ u8 dgid[ERDMA_ROCEV2_GID_SIZE];
+};
+
+struct erdma_cmdq_create_ah_req {
+ u64 hdr;
+ u32 pdn;
+ u32 ahn;
+ struct erdma_av_cfg av_cfg;
+};
+
+struct erdma_cmdq_destroy_ah_req {
+ u64 hdr;
+ u32 pdn;
+ u32 ahn;
+};
+
/* modify qp cfg */
#define ERDMA_CMD_MODIFY_QP_STATE_MASK GENMASK(31, 24)
#define ERDMA_CMD_MODIFY_QP_CC_MASK GENMASK(23, 20)
@@ -301,6 +349,36 @@ struct erdma_cmdq_modify_qp_req {
u32 recv_nxt;
};
+/* modify qp cfg1 for roce device */
+#define ERDMA_CMD_MODIFY_QP_DQPN_MASK GENMASK(19, 0)
+
+struct erdma_cmdq_mod_qp_req_rocev2 {
+ u64 hdr;
+ u32 cfg0;
+ u32 cfg1;
+ u32 attr_mask;
+ u32 qkey;
+ u32 rq_psn;
+ u32 sq_psn;
+ struct erdma_av_cfg av_cfg;
+};
+
+/* query qp response mask */
+#define ERDMA_CMD_QUERY_QP_RESP_SQ_PSN_MASK GENMASK_ULL(23, 0)
+#define ERDMA_CMD_QUERY_QP_RESP_RQ_PSN_MASK GENMASK_ULL(47, 24)
+#define ERDMA_CMD_QUERY_QP_RESP_QP_STATE_MASK GENMASK_ULL(55, 48)
+#define ERDMA_CMD_QUERY_QP_RESP_SQ_DRAINING_MASK GENMASK_ULL(56, 56)
+
+struct erdma_cmdq_query_qp_req_rocev2 {
+ u64 hdr;
+ u32 qpn;
+};
+
+enum erdma_qp_type {
+ ERDMA_QPT_RC = 0,
+ ERDMA_QPT_UD = 1,
+};
+
/* create qp cfg0 */
#define ERDMA_CMD_CREATE_QP_SQ_DEPTH_MASK GENMASK(31, 20)
#define ERDMA_CMD_CREATE_QP_QPN_MASK GENMASK(19, 0)
@@ -309,6 +387,9 @@ struct erdma_cmdq_modify_qp_req {
#define ERDMA_CMD_CREATE_QP_RQ_DEPTH_MASK GENMASK(31, 20)
#define ERDMA_CMD_CREATE_QP_PD_MASK GENMASK(19, 0)
+/* create qp cfg2 */
+#define ERDMA_CMD_CREATE_QP_TYPE_MASK GENMASK(3, 0)
+
/* create qp cqn_mtt_cfg */
#define ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK GENMASK(31, 28)
#define ERDMA_CMD_CREATE_QP_DB_CFG_MASK BIT(25)
@@ -342,6 +423,7 @@ struct erdma_cmdq_create_qp_req {
u64 rq_mtt_entry[3];
u32 db_cfg;
+ u32 cfg2;
};
struct erdma_cmdq_destroy_qp_req {
@@ -394,10 +476,33 @@ struct erdma_cmdq_query_stats_resp {
u64 rx_pps_meter_drop_packets_cnt;
};
+enum erdma_network_type {
+ ERDMA_NETWORK_TYPE_IPV4 = 0,
+ ERDMA_NETWORK_TYPE_IPV6 = 1,
+};
+
+enum erdma_set_gid_op {
+ ERDMA_SET_GID_OP_ADD = 0,
+ ERDMA_SET_GID_OP_DEL = 1,
+};
+
+/* set gid cfg */
+#define ERDMA_CMD_SET_GID_SGID_IDX_MASK GENMASK(15, 0)
+#define ERDMA_CMD_SET_GID_NTYPE_MASK BIT(16)
+#define ERDMA_CMD_SET_GID_OP_MASK BIT(31)
+
+struct erdma_cmdq_set_gid_req {
+ u64 hdr;
+ u32 cfg;
+ u8 gid[ERDMA_ROCEV2_GID_SIZE];
+};
+
/* cap qword 0 definition */
+#define ERDMA_CMD_DEV_CAP_MAX_GID_MASK GENMASK_ULL(51, 48)
#define ERDMA_CMD_DEV_CAP_MAX_CQE_MASK GENMASK_ULL(47, 40)
#define ERDMA_CMD_DEV_CAP_FLAGS_MASK GENMASK_ULL(31, 24)
#define ERDMA_CMD_DEV_CAP_MAX_RECV_WR_MASK GENMASK_ULL(23, 16)
+#define ERDMA_CMD_DEV_CAP_MAX_AH_MASK GENMASK_ULL(15, 8)
#define ERDMA_CMD_DEV_CAP_MAX_MR_SIZE_MASK GENMASK_ULL(7, 0)
/* cap qword 1 definition */
@@ -426,6 +531,10 @@ enum {
#define ERDMA_CQE_QTYPE_RQ 1
#define ERDMA_CQE_QTYPE_CMDQ 2
+#define ERDMA_CQE_NTYPE_MASK BIT(31)
+#define ERDMA_CQE_SL_MASK GENMASK(27, 20)
+#define ERDMA_CQE_SQPN_MASK GENMASK(19, 0)
+
struct erdma_cqe {
__be32 hdr;
__be32 qe_idx;
@@ -435,7 +544,16 @@ struct erdma_cqe {
__be32 inv_rkey;
};
__be32 size;
- __be32 rsvd[3];
+ union {
+ struct {
+ __be32 rsvd[3];
+ } rc;
+
+ struct {
+ __be32 rsvd[2];
+ __be32 info;
+ } ud;
+ };
};
struct erdma_sge {
@@ -487,7 +605,7 @@ struct erdma_write_sqe {
struct erdma_sge sgl[];
};
-struct erdma_send_sqe {
+struct erdma_send_sqe_rc {
__le64 hdr;
union {
__be32 imm_data;
@@ -498,6 +616,17 @@ struct erdma_send_sqe {
struct erdma_sge sgl[];
};
+struct erdma_send_sqe_ud {
+ __le64 hdr;
+ __be32 imm_data;
+ __le32 length;
+ __le32 qkey;
+ __le32 dst_qpn;
+ __le32 ahn;
+ __le32 rsvd;
+ struct erdma_sge sgl[];
+};
+
struct erdma_readreq_sqe {
__le64 hdr;
__le32 invalid_stag;
diff --git a/drivers/infiniband/hw/erdma/erdma_main.c b/drivers/infiniband/hw/erdma/erdma_main.c
index 62f497a71004..f35b30235018 100644
--- a/drivers/infiniband/hw/erdma/erdma_main.c
+++ b/drivers/infiniband/hw/erdma/erdma_main.c
@@ -26,14 +26,6 @@ static int erdma_netdev_event(struct notifier_block *nb, unsigned long event,
goto done;
switch (event) {
- case NETDEV_UP:
- dev->state = IB_PORT_ACTIVE;
- erdma_port_event(dev, IB_EVENT_PORT_ACTIVE);
- break;
- case NETDEV_DOWN:
- dev->state = IB_PORT_DOWN;
- erdma_port_event(dev, IB_EVENT_PORT_ERR);
- break;
case NETDEV_CHANGEMTU:
if (dev->mtu != netdev->mtu) {
erdma_set_mtu(dev, netdev->mtu);
@@ -172,6 +164,8 @@ static int erdma_device_init(struct erdma_dev *dev, struct pci_dev *pdev)
{
int ret;
+ dev->proto = erdma_reg_read32(dev, ERDMA_REGS_DEV_PROTO_REG);
+
dev->resp_pool = dma_pool_create("erdma_resp_pool", &pdev->dev,
ERDMA_HW_RESP_SIZE, ERDMA_HW_RESP_SIZE,
0);
@@ -390,7 +384,7 @@ static int erdma_dev_attrs_init(struct erdma_dev *dev)
CMDQ_OPCODE_QUERY_DEVICE);
err = erdma_post_cmd_wait(&dev->cmdq, &req_hdr, sizeof(req_hdr), &cap0,
- &cap1);
+ &cap1, true);
if (err)
return err;
@@ -398,6 +392,8 @@ static int erdma_dev_attrs_init(struct erdma_dev *dev)
dev->attrs.max_mr_size = 1ULL << ERDMA_GET_CAP(MAX_MR_SIZE, cap0);
dev->attrs.max_mw = 1 << ERDMA_GET_CAP(MAX_MW, cap1);
dev->attrs.max_recv_wr = 1 << ERDMA_GET_CAP(MAX_RECV_WR, cap0);
+ dev->attrs.max_gid = 1 << ERDMA_GET_CAP(MAX_GID, cap0);
+ dev->attrs.max_ah = 1 << ERDMA_GET_CAP(MAX_AH, cap0);
dev->attrs.local_dma_key = ERDMA_GET_CAP(DMA_LOCAL_KEY, cap1);
dev->attrs.cc = ERDMA_GET_CAP(DEFAULT_CC, cap1);
dev->attrs.max_qp = ERDMA_NQP_PER_QBLOCK * ERDMA_GET_CAP(QBLOCK, cap1);
@@ -415,12 +411,13 @@ static int erdma_dev_attrs_init(struct erdma_dev *dev)
dev->res_cb[ERDMA_RES_TYPE_PD].max_cap = ERDMA_MAX_PD;
dev->res_cb[ERDMA_RES_TYPE_STAG_IDX].max_cap = dev->attrs.max_mr;
+ dev->res_cb[ERDMA_RES_TYPE_AH].max_cap = dev->attrs.max_ah;
erdma_cmdq_build_reqhdr(&req_hdr, CMDQ_SUBMOD_COMMON,
CMDQ_OPCODE_QUERY_FW_INFO);
err = erdma_post_cmd_wait(&dev->cmdq, &req_hdr, sizeof(req_hdr), &cap0,
- &cap1);
+ &cap1, true);
if (!err)
dev->attrs.fw_version =
FIELD_GET(ERDMA_CMD_INFO0_FW_VER_MASK, cap0);
@@ -441,7 +438,8 @@ static int erdma_device_config(struct erdma_dev *dev)
req.cfg = FIELD_PREP(ERDMA_CMD_CONFIG_DEVICE_PGSHIFT_MASK, PAGE_SHIFT) |
FIELD_PREP(ERDMA_CMD_CONFIG_DEVICE_PS_EN_MASK, 1);
- return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
}
static int erdma_res_cb_init(struct erdma_dev *dev)
@@ -474,6 +472,29 @@ static void erdma_res_cb_free(struct erdma_dev *dev)
bitmap_free(dev->res_cb[i].bitmap);
}
+static const struct ib_device_ops erdma_device_ops_rocev2 = {
+ .get_link_layer = erdma_get_link_layer,
+ .add_gid = erdma_add_gid,
+ .del_gid = erdma_del_gid,
+ .query_pkey = erdma_query_pkey,
+ .create_ah = erdma_create_ah,
+ .destroy_ah = erdma_destroy_ah,
+ .query_ah = erdma_query_ah,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, erdma_ah, ibah),
+};
+
+static const struct ib_device_ops erdma_device_ops_iwarp = {
+ .iw_accept = erdma_accept,
+ .iw_add_ref = erdma_qp_get_ref,
+ .iw_connect = erdma_connect,
+ .iw_create_listen = erdma_create_listen,
+ .iw_destroy_listen = erdma_destroy_listen,
+ .iw_get_qp = erdma_get_ibqp,
+ .iw_reject = erdma_reject,
+ .iw_rem_ref = erdma_qp_put_ref,
+};
+
static const struct ib_device_ops erdma_device_ops = {
.owner = THIS_MODULE,
.driver_id = RDMA_DRIVER_ERDMA,
@@ -494,18 +515,9 @@ static const struct ib_device_ops erdma_device_ops = {
.get_dma_mr = erdma_get_dma_mr,
.get_hw_stats = erdma_get_hw_stats,
.get_port_immutable = erdma_get_port_immutable,
- .iw_accept = erdma_accept,
- .iw_add_ref = erdma_qp_get_ref,
- .iw_connect = erdma_connect,
- .iw_create_listen = erdma_create_listen,
- .iw_destroy_listen = erdma_destroy_listen,
- .iw_get_qp = erdma_get_ibqp,
- .iw_reject = erdma_reject,
- .iw_rem_ref = erdma_qp_put_ref,
.map_mr_sg = erdma_map_mr_sg,
.mmap = erdma_mmap,
.mmap_free = erdma_mmap_free,
- .modify_qp = erdma_modify_qp,
.post_recv = erdma_post_recv,
.post_send = erdma_post_send,
.poll_cq = erdma_poll_cq,
@@ -515,6 +527,7 @@ static const struct ib_device_ops erdma_device_ops = {
.query_qp = erdma_query_qp,
.req_notify_cq = erdma_req_notify_cq,
.reg_user_mr = erdma_reg_user_mr,
+ .modify_qp = erdma_modify_qp,
INIT_RDMA_OBJ_SIZE(ib_cq, erdma_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, erdma_pd, ibpd),
@@ -537,7 +550,14 @@ static int erdma_ib_device_add(struct pci_dev *pdev)
if (ret)
return ret;
- ibdev->node_type = RDMA_NODE_RNIC;
+ if (erdma_device_iwarp(dev)) {
+ ibdev->node_type = RDMA_NODE_RNIC;
+ ib_set_device_ops(ibdev, &erdma_device_ops_iwarp);
+ } else {
+ ibdev->node_type = RDMA_NODE_IB_CA;
+ ib_set_device_ops(ibdev, &erdma_device_ops_rocev2);
+ }
+
memcpy(ibdev->node_desc, ERDMA_NODE_DESC, sizeof(ERDMA_NODE_DESC));
/*
diff --git a/drivers/infiniband/hw/erdma/erdma_qp.c b/drivers/infiniband/hw/erdma/erdma_qp.c
index 4d1f9114cd97..25f6c49aec77 100644
--- a/drivers/infiniband/hw/erdma/erdma_qp.c
+++ b/drivers/infiniband/hw/erdma/erdma_qp.c
@@ -11,20 +11,20 @@
void erdma_qp_llp_close(struct erdma_qp *qp)
{
- struct erdma_qp_attrs qp_attrs;
+ struct erdma_mod_qp_params_iwarp params;
down_write(&qp->state_lock);
- switch (qp->attrs.state) {
- case ERDMA_QP_STATE_RTS:
- case ERDMA_QP_STATE_RTR:
- case ERDMA_QP_STATE_IDLE:
- case ERDMA_QP_STATE_TERMINATE:
- qp_attrs.state = ERDMA_QP_STATE_CLOSING;
- erdma_modify_qp_internal(qp, &qp_attrs, ERDMA_QP_ATTR_STATE);
+ switch (qp->attrs.iwarp.state) {
+ case ERDMA_QPS_IWARP_RTS:
+ case ERDMA_QPS_IWARP_RTR:
+ case ERDMA_QPS_IWARP_IDLE:
+ case ERDMA_QPS_IWARP_TERMINATE:
+ params.state = ERDMA_QPS_IWARP_CLOSING;
+ erdma_modify_qp_state_iwarp(qp, &params, ERDMA_QPA_IWARP_STATE);
break;
- case ERDMA_QP_STATE_CLOSING:
- qp->attrs.state = ERDMA_QP_STATE_IDLE;
+ case ERDMA_QPS_IWARP_CLOSING:
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_IDLE;
break;
default:
break;
@@ -48,9 +48,10 @@ struct ib_qp *erdma_get_ibqp(struct ib_device *ibdev, int id)
return NULL;
}
-static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
- struct erdma_qp_attrs *attrs,
- enum erdma_qp_attr_mask mask)
+static int
+erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_iwarp *params,
+ enum erdma_qpa_mask_iwarp mask)
{
int ret;
struct erdma_dev *dev = qp->dev;
@@ -59,12 +60,15 @@ static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
struct erdma_cep *cep = qp->cep;
struct sockaddr_storage local_addr, remote_addr;
- if (!(mask & ERDMA_QP_ATTR_LLP_HANDLE))
+ if (!(mask & ERDMA_QPA_IWARP_LLP_HANDLE))
return -EINVAL;
- if (!(mask & ERDMA_QP_ATTR_MPA))
+ if (!(mask & ERDMA_QPA_IWARP_MPA))
return -EINVAL;
+ if (!(mask & ERDMA_QPA_IWARP_CC))
+ params->cc = qp->attrs.cc;
+
ret = getname_local(cep->sock, &local_addr);
if (ret < 0)
return ret;
@@ -73,18 +77,16 @@ static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
if (ret < 0)
return ret;
- qp->attrs.state = ERDMA_QP_STATE_RTS;
-
tp = tcp_sk(qp->cep->sock->sk);
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
CMDQ_OPCODE_MODIFY_QP);
- req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, qp->attrs.state) |
- FIELD_PREP(ERDMA_CMD_MODIFY_QP_CC_MASK, qp->attrs.cc) |
+ req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, params->state) |
+ FIELD_PREP(ERDMA_CMD_MODIFY_QP_CC_MASK, params->cc) |
FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
- req.cookie = be32_to_cpu(qp->cep->mpa.ext_data.cookie);
+ req.cookie = be32_to_cpu(cep->mpa.ext_data.cookie);
req.dip = to_sockaddr_in(remote_addr).sin_addr.s_addr;
req.sip = to_sockaddr_in(local_addr).sin_addr.s_addr;
req.dport = to_sockaddr_in(remote_addr).sin_port;
@@ -92,33 +94,57 @@ static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
req.send_nxt = tp->snd_nxt;
/* rsvd tcp seq for mpa-rsp in server. */
- if (qp->attrs.qp_type == ERDMA_QP_PASSIVE)
- req.send_nxt += MPA_DEFAULT_HDR_LEN + qp->attrs.pd_len;
+ if (params->qp_type == ERDMA_QP_PASSIVE)
+ req.send_nxt += MPA_DEFAULT_HDR_LEN + params->pd_len;
req.recv_nxt = tp->rcv_nxt;
- return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
+ if (ret)
+ return ret;
+
+ if (mask & ERDMA_QPA_IWARP_IRD)
+ qp->attrs.irq_size = params->irq_size;
+
+ if (mask & ERDMA_QPA_IWARP_ORD)
+ qp->attrs.orq_size = params->orq_size;
+
+ if (mask & ERDMA_QPA_IWARP_CC)
+ qp->attrs.cc = params->cc;
+
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_RTS;
+
+ return 0;
}
-static int erdma_modify_qp_state_to_stop(struct erdma_qp *qp,
- struct erdma_qp_attrs *attrs,
- enum erdma_qp_attr_mask mask)
+static int
+erdma_modify_qp_state_to_stop(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_iwarp *params,
+ enum erdma_qpa_mask_iwarp mask)
{
struct erdma_dev *dev = qp->dev;
struct erdma_cmdq_modify_qp_req req;
-
- qp->attrs.state = attrs->state;
+ int ret;
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
CMDQ_OPCODE_MODIFY_QP);
- req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, attrs->state) |
+ req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, params->state) |
FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
- return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
+ if (ret)
+ return ret;
+
+ qp->attrs.iwarp.state = params->state;
+
+ return 0;
}
-int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
- enum erdma_qp_attr_mask mask)
+int erdma_modify_qp_state_iwarp(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_iwarp *params,
+ int mask)
{
bool need_reflush = false;
int drop_conn, ret = 0;
@@ -126,31 +152,31 @@ int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
if (!mask)
return 0;
- if (!(mask & ERDMA_QP_ATTR_STATE))
+ if (!(mask & ERDMA_QPA_IWARP_STATE))
return 0;
- switch (qp->attrs.state) {
- case ERDMA_QP_STATE_IDLE:
- case ERDMA_QP_STATE_RTR:
- if (attrs->state == ERDMA_QP_STATE_RTS) {
- ret = erdma_modify_qp_state_to_rts(qp, attrs, mask);
- } else if (attrs->state == ERDMA_QP_STATE_ERROR) {
- qp->attrs.state = ERDMA_QP_STATE_ERROR;
+ switch (qp->attrs.iwarp.state) {
+ case ERDMA_QPS_IWARP_IDLE:
+ case ERDMA_QPS_IWARP_RTR:
+ if (params->state == ERDMA_QPS_IWARP_RTS) {
+ ret = erdma_modify_qp_state_to_rts(qp, params, mask);
+ } else if (params->state == ERDMA_QPS_IWARP_ERROR) {
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_ERROR;
need_reflush = true;
if (qp->cep) {
erdma_cep_put(qp->cep);
qp->cep = NULL;
}
- ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
+ ret = erdma_modify_qp_state_to_stop(qp, params, mask);
}
break;
- case ERDMA_QP_STATE_RTS:
+ case ERDMA_QPS_IWARP_RTS:
drop_conn = 0;
- if (attrs->state == ERDMA_QP_STATE_CLOSING ||
- attrs->state == ERDMA_QP_STATE_TERMINATE ||
- attrs->state == ERDMA_QP_STATE_ERROR) {
- ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
+ if (params->state == ERDMA_QPS_IWARP_CLOSING ||
+ params->state == ERDMA_QPS_IWARP_TERMINATE ||
+ params->state == ERDMA_QPS_IWARP_ERROR) {
+ ret = erdma_modify_qp_state_to_stop(qp, params, mask);
drop_conn = 1;
need_reflush = true;
}
@@ -159,17 +185,17 @@ int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
erdma_qp_cm_drop(qp);
break;
- case ERDMA_QP_STATE_TERMINATE:
- if (attrs->state == ERDMA_QP_STATE_ERROR)
- qp->attrs.state = ERDMA_QP_STATE_ERROR;
+ case ERDMA_QPS_IWARP_TERMINATE:
+ if (params->state == ERDMA_QPS_IWARP_ERROR)
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_ERROR;
break;
- case ERDMA_QP_STATE_CLOSING:
- if (attrs->state == ERDMA_QP_STATE_IDLE) {
- qp->attrs.state = ERDMA_QP_STATE_IDLE;
- } else if (attrs->state == ERDMA_QP_STATE_ERROR) {
- ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
- qp->attrs.state = ERDMA_QP_STATE_ERROR;
- } else if (attrs->state != ERDMA_QP_STATE_CLOSING) {
+ case ERDMA_QPS_IWARP_CLOSING:
+ if (params->state == ERDMA_QPS_IWARP_IDLE) {
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_IDLE;
+ } else if (params->state == ERDMA_QPS_IWARP_ERROR) {
+ ret = erdma_modify_qp_state_to_stop(qp, params, mask);
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_ERROR;
+ } else if (params->state != ERDMA_QPS_IWARP_CLOSING) {
return -ECONNABORTED;
}
break;
@@ -186,6 +212,98 @@ int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
return ret;
}
+static int modify_qp_cmd_rocev2(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_rocev2 *params,
+ enum erdma_qpa_mask_rocev2 attr_mask)
+{
+ struct erdma_cmdq_mod_qp_req_rocev2 req;
+
+ memset(&req, 0, sizeof(req));
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_MODIFY_QP);
+
+ req.cfg0 = FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_STATE)
+ req.cfg0 |= FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK,
+ params->state);
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_DST_QPN)
+ req.cfg1 = FIELD_PREP(ERDMA_CMD_MODIFY_QP_DQPN_MASK,
+ params->dst_qpn);
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_QKEY)
+ req.qkey = params->qkey;
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_AV)
+ erdma_set_av_cfg(&req.av_cfg, &params->av);
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_SQ_PSN)
+ req.sq_psn = params->sq_psn;
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_RQ_PSN)
+ req.rq_psn = params->rq_psn;
+
+ req.attr_mask = attr_mask;
+
+ return erdma_post_cmd_wait(&qp->dev->cmdq, &req, sizeof(req), NULL,
+ NULL, true);
+}
+
+static void erdma_reset_qp(struct erdma_qp *qp)
+{
+ qp->kern_qp.sq_pi = 0;
+ qp->kern_qp.sq_ci = 0;
+ qp->kern_qp.rq_pi = 0;
+ qp->kern_qp.rq_ci = 0;
+ memset(qp->kern_qp.swr_tbl, 0, qp->attrs.sq_size * sizeof(u64));
+ memset(qp->kern_qp.rwr_tbl, 0, qp->attrs.rq_size * sizeof(u64));
+ memset(qp->kern_qp.sq_buf, 0, qp->attrs.sq_size << SQEBB_SHIFT);
+ memset(qp->kern_qp.rq_buf, 0, qp->attrs.rq_size << RQE_SHIFT);
+ erdma_remove_cqes_of_qp(&qp->scq->ibcq, QP_ID(qp));
+ if (qp->rcq != qp->scq)
+ erdma_remove_cqes_of_qp(&qp->rcq->ibcq, QP_ID(qp));
+}
+
+int erdma_modify_qp_state_rocev2(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_rocev2 *params,
+ int attr_mask)
+{
+ struct erdma_dev *dev = to_edev(qp->ibqp.device);
+ int ret;
+
+ ret = modify_qp_cmd_rocev2(qp, params, attr_mask);
+ if (ret)
+ return ret;
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_STATE)
+ qp->attrs.rocev2.state = params->state;
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_QKEY)
+ qp->attrs.rocev2.qkey = params->qkey;
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_DST_QPN)
+ qp->attrs.rocev2.dst_qpn = params->dst_qpn;
+
+ if (attr_mask & ERDMA_QPA_ROCEV2_AV)
+ memcpy(&qp->attrs.rocev2.av, &params->av,
+ sizeof(struct erdma_av));
+
+ if (rdma_is_kernel_res(&qp->ibqp.res) &&
+ params->state == ERDMA_QPS_ROCEV2_RESET)
+ erdma_reset_qp(qp);
+
+ if (rdma_is_kernel_res(&qp->ibqp.res) &&
+ params->state == ERDMA_QPS_ROCEV2_ERROR) {
+ qp->flags |= ERDMA_QP_IN_FLUSHING;
+ mod_delayed_work(dev->reflush_wq, &qp->reflush_dwork,
+ usecs_to_jiffies(100));
+ }
+
+ return 0;
+}
+
static void erdma_qp_safe_free(struct kref *ref)
{
struct erdma_qp *qp = container_of(ref, struct erdma_qp, ref);
@@ -282,17 +400,57 @@ static int fill_sgl(struct erdma_qp *qp, const struct ib_send_wr *send_wr,
return 0;
}
+static void init_send_sqe_rc(struct erdma_qp *qp, struct erdma_send_sqe_rc *sqe,
+ const struct ib_send_wr *wr, u32 *hw_op)
+{
+ u32 op = ERDMA_OP_SEND;
+
+ if (wr->opcode == IB_WR_SEND_WITH_IMM) {
+ op = ERDMA_OP_SEND_WITH_IMM;
+ sqe->imm_data = wr->ex.imm_data;
+ } else if (wr->opcode == IB_WR_SEND_WITH_INV) {
+ op = ERDMA_OP_SEND_WITH_INV;
+ sqe->invalid_stag = cpu_to_le32(wr->ex.invalidate_rkey);
+ }
+
+ *hw_op = op;
+}
+
+static void init_send_sqe_ud(struct erdma_qp *qp, struct erdma_send_sqe_ud *sqe,
+ const struct ib_send_wr *wr, u32 *hw_op)
+{
+ const struct ib_ud_wr *uwr = ud_wr(wr);
+ struct erdma_ah *ah = to_eah(uwr->ah);
+ u32 op = ERDMA_OP_SEND;
+
+ if (wr->opcode == IB_WR_SEND_WITH_IMM) {
+ op = ERDMA_OP_SEND_WITH_IMM;
+ sqe->imm_data = wr->ex.imm_data;
+ }
+
+ *hw_op = op;
+
+ sqe->ahn = cpu_to_le32(ah->ahn);
+ sqe->dst_qpn = cpu_to_le32(uwr->remote_qpn);
+ /* Not allowed to send control qkey */
+ if (uwr->remote_qkey & 0x80000000)
+ sqe->qkey = cpu_to_le32(qp->attrs.rocev2.qkey);
+ else
+ sqe->qkey = cpu_to_le32(uwr->remote_qkey);
+}
+
static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
const struct ib_send_wr *send_wr)
{
u32 wqe_size, wqebb_cnt, hw_op, flags, sgl_offset;
u32 idx = *pi & (qp->attrs.sq_size - 1);
enum ib_wr_opcode op = send_wr->opcode;
+ struct erdma_send_sqe_rc *rc_send_sqe;
+ struct erdma_send_sqe_ud *ud_send_sqe;
struct erdma_atomic_sqe *atomic_sqe;
struct erdma_readreq_sqe *read_sqe;
struct erdma_reg_mr_sqe *regmr_sge;
struct erdma_write_sqe *write_sqe;
- struct erdma_send_sqe *send_sqe;
struct ib_rdma_wr *rdma_wr;
struct erdma_sge *sge;
__le32 *length_field;
@@ -301,6 +459,10 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
u32 attrs;
int ret;
+ if (qp->ibqp.qp_type != IB_QPT_RC && send_wr->opcode != IB_WR_SEND &&
+ send_wr->opcode != IB_WR_SEND_WITH_IMM)
+ return -EINVAL;
+
entry = get_queue_entry(qp->kern_qp.sq_buf, idx, qp->attrs.sq_size,
SQEBB_SHIFT);
@@ -374,21 +536,20 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
case IB_WR_SEND:
case IB_WR_SEND_WITH_IMM:
case IB_WR_SEND_WITH_INV:
- send_sqe = (struct erdma_send_sqe *)entry;
- hw_op = ERDMA_OP_SEND;
- if (op == IB_WR_SEND_WITH_IMM) {
- hw_op = ERDMA_OP_SEND_WITH_IMM;
- send_sqe->imm_data = send_wr->ex.imm_data;
- } else if (op == IB_WR_SEND_WITH_INV) {
- hw_op = ERDMA_OP_SEND_WITH_INV;
- send_sqe->invalid_stag =
- cpu_to_le32(send_wr->ex.invalidate_rkey);
+ if (qp->ibqp.qp_type == IB_QPT_RC) {
+ rc_send_sqe = (struct erdma_send_sqe_rc *)entry;
+ init_send_sqe_rc(qp, rc_send_sqe, send_wr, &hw_op);
+ length_field = &rc_send_sqe->length;
+ wqe_size = sizeof(struct erdma_send_sqe_rc);
+ } else {
+ ud_send_sqe = (struct erdma_send_sqe_ud *)entry;
+ init_send_sqe_ud(qp, ud_send_sqe, send_wr, &hw_op);
+ length_field = &ud_send_sqe->length;
+ wqe_size = sizeof(struct erdma_send_sqe_ud);
}
- wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
- length_field = &send_sqe->length;
- wqe_size = sizeof(struct erdma_send_sqe);
- sgl_offset = wqe_size;
+ sgl_offset = wqe_size;
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
break;
case IB_WR_REG_MR:
wqe_hdr |=
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
index 51d619edb6c5..af36a8d2df22 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -55,6 +55,13 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
ilog2(qp->attrs.rq_size)) |
FIELD_PREP(ERDMA_CMD_CREATE_QP_PD_MASK, pd->pdn);
+ if (qp->ibqp.qp_type == IB_QPT_RC)
+ req.cfg2 = FIELD_PREP(ERDMA_CMD_CREATE_QP_TYPE_MASK,
+ ERDMA_QPT_RC);
+ else
+ req.cfg2 = FIELD_PREP(ERDMA_CMD_CREATE_QP_TYPE_MASK,
+ ERDMA_QPT_UD);
+
if (rdma_is_kernel_res(&qp->ibqp.res)) {
u32 pgsz_range = ilog2(SZ_1M) - ERDMA_HW_PAGE_SHIFT;
@@ -119,10 +126,10 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
}
}
- err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0,
- &resp1);
- if (!err)
- qp->attrs.cookie =
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0, &resp1,
+ true);
+ if (!err && erdma_device_iwarp(dev))
+ qp->attrs.iwarp.cookie =
FIELD_GET(ERDMA_CMDQ_CREATE_QP_RESP_COOKIE_MASK, resp0);
return err;
@@ -178,7 +185,8 @@ static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
}
post_cmd:
- return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
}
static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
@@ -240,7 +248,8 @@ static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
}
}
- return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
}
static int erdma_alloc_idx(struct erdma_resource_cb *res_cb)
@@ -336,6 +345,11 @@ int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
attr->max_fast_reg_page_list_len = ERDMA_MAX_FRMR_PA;
attr->page_size_cap = ERDMA_PAGE_SIZE_SUPPORT;
+ if (erdma_device_rocev2(dev)) {
+ attr->max_pkeys = ERDMA_MAX_PKEYS;
+ attr->max_ah = dev->attrs.max_ah;
+ }
+
if (dev->attrs.cap_flags & ERDMA_DEV_CAP_FLAGS_ATOMIC)
attr->atomic_cap = IB_ATOMIC_GLOB;
@@ -367,7 +381,14 @@ int erdma_query_port(struct ib_device *ibdev, u32 port,
memset(attr, 0, sizeof(*attr));
- attr->gid_tbl_len = 1;
+ if (erdma_device_iwarp(dev)) {
+ attr->gid_tbl_len = 1;
+ } else {
+ attr->gid_tbl_len = dev->attrs.max_gid;
+ attr->ip_gids = true;
+ attr->pkey_tbl_len = ERDMA_MAX_PKEYS;
+ }
+
attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
attr->max_msg_sz = -1;
@@ -377,14 +398,10 @@ int erdma_query_port(struct ib_device *ibdev, u32 port,
ib_get_eth_speed(ibdev, port, &attr->active_speed, &attr->active_width);
attr->max_mtu = ib_mtu_int_to_enum(ndev->mtu);
attr->active_mtu = ib_mtu_int_to_enum(ndev->mtu);
- if (netif_running(ndev) && netif_carrier_ok(ndev))
- dev->state = IB_PORT_ACTIVE;
- else
- dev->state = IB_PORT_DOWN;
- attr->state = dev->state;
+ attr->state = ib_get_curr_port_state(ndev);
out:
- if (dev->state == IB_PORT_ACTIVE)
+ if (attr->state == IB_PORT_ACTIVE)
attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
else
attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
@@ -395,8 +412,18 @@ out:
int erdma_get_port_immutable(struct ib_device *ibdev, u32 port,
struct ib_port_immutable *port_immutable)
{
- port_immutable->gid_tbl_len = 1;
- port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+ struct erdma_dev *dev = to_edev(ibdev);
+
+ if (erdma_device_iwarp(dev)) {
+ port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+ port_immutable->gid_tbl_len = 1;
+ } else {
+ port_immutable->core_cap_flags =
+ RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ port_immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ port_immutable->gid_tbl_len = dev->attrs.max_gid;
+ port_immutable->pkey_tbl_len = ERDMA_MAX_PKEYS;
+ }
return 0;
}
@@ -438,7 +465,8 @@ static void erdma_flush_worker(struct work_struct *work)
req.qpn = QP_ID(qp);
req.sq_pi = qp->kern_qp.sq_pi;
req.rq_pi = qp->kern_qp.rq_pi;
- erdma_post_cmd_wait(&qp->dev->cmdq, &req, sizeof(req), NULL, NULL);
+ erdma_post_cmd_wait(&qp->dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
}
static int erdma_qp_validate_cap(struct erdma_dev *dev,
@@ -459,7 +487,11 @@ static int erdma_qp_validate_cap(struct erdma_dev *dev,
static int erdma_qp_validate_attr(struct erdma_dev *dev,
struct ib_qp_init_attr *attrs)
{
- if (attrs->qp_type != IB_QPT_RC)
+ if (erdma_device_iwarp(dev) && attrs->qp_type != IB_QPT_RC)
+ return -EOPNOTSUPP;
+
+ if (erdma_device_rocev2(dev) && attrs->qp_type != IB_QPT_RC &&
+ attrs->qp_type != IB_QPT_UD && attrs->qp_type != IB_QPT_GSI)
return -EOPNOTSUPP;
if (attrs->srq)
@@ -937,7 +969,8 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
udata, struct erdma_ucontext, ibucontext);
struct erdma_ureq_create_qp ureq;
struct erdma_uresp_create_qp uresp;
- int ret;
+ void *old_entry;
+ int ret = 0;
ret = erdma_qp_validate_cap(dev, attrs);
if (ret)
@@ -956,9 +989,16 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
kref_init(&qp->ref);
init_completion(&qp->safe_free);
- ret = xa_alloc_cyclic(&dev->qp_xa, &qp->ibqp.qp_num, qp,
- XA_LIMIT(1, dev->attrs.max_qp - 1),
- &dev->next_alloc_qpn, GFP_KERNEL);
+ if (qp->ibqp.qp_type == IB_QPT_GSI) {
+ old_entry = xa_store(&dev->qp_xa, 1, qp, GFP_KERNEL);
+ if (xa_is_err(old_entry))
+ ret = xa_err(old_entry);
+ } else {
+ ret = xa_alloc_cyclic(&dev->qp_xa, &qp->ibqp.qp_num, qp,
+ XA_LIMIT(1, dev->attrs.max_qp - 1),
+ &dev->next_alloc_qpn, GFP_KERNEL);
+ }
+
if (ret < 0) {
ret = -ENOMEM;
goto err_out;
@@ -995,7 +1035,12 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
qp->attrs.max_send_sge = attrs->cap.max_send_sge;
qp->attrs.max_recv_sge = attrs->cap.max_recv_sge;
- qp->attrs.state = ERDMA_QP_STATE_IDLE;
+
+ if (erdma_device_iwarp(qp->dev))
+ qp->attrs.iwarp.state = ERDMA_QPS_IWARP_IDLE;
+ else
+ qp->attrs.rocev2.state = ERDMA_QPS_ROCEV2_RESET;
+
INIT_DELAYED_WORK(&qp->reflush_dwork, erdma_flush_worker);
ret = create_qp_cmd(uctx, qp);
@@ -1219,7 +1264,8 @@ int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
req.cfg = FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, ibmr->lkey >> 8) |
FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, ibmr->lkey & 0xFF);
- ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
if (ret)
return ret;
@@ -1244,7 +1290,8 @@ int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
CMDQ_OPCODE_DESTROY_CQ);
req.cqn = cq->cqn;
- err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
if (err)
return err;
@@ -1269,13 +1316,20 @@ int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
struct erdma_dev *dev = to_edev(ibqp->device);
struct erdma_ucontext *ctx = rdma_udata_to_drv_context(
udata, struct erdma_ucontext, ibucontext);
- struct erdma_qp_attrs qp_attrs;
- int err;
struct erdma_cmdq_destroy_qp_req req;
+ union erdma_mod_qp_params params;
+ int err;
down_write(&qp->state_lock);
- qp_attrs.state = ERDMA_QP_STATE_ERROR;
- erdma_modify_qp_internal(qp, &qp_attrs, ERDMA_QP_ATTR_STATE);
+ if (erdma_device_iwarp(dev)) {
+ params.iwarp.state = ERDMA_QPS_IWARP_ERROR;
+ erdma_modify_qp_state_iwarp(qp, &params.iwarp,
+ ERDMA_QPA_IWARP_STATE);
+ } else {
+ params.rocev2.state = ERDMA_QPS_ROCEV2_ERROR;
+ erdma_modify_qp_state_rocev2(qp, &params.rocev2,
+ ERDMA_QPA_ROCEV2_STATE);
+ }
up_write(&qp->state_lock);
cancel_delayed_work_sync(&qp->reflush_dwork);
@@ -1284,7 +1338,8 @@ int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
CMDQ_OPCODE_DESTROY_QP);
req.qpn = QP_ID(qp);
- err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
if (err)
return err;
@@ -1382,7 +1437,8 @@ static int alloc_db_resources(struct erdma_dev *dev, struct erdma_ucontext *ctx,
FIELD_PREP(ERDMA_CMD_EXT_DB_RQ_EN_MASK, 1) |
FIELD_PREP(ERDMA_CMD_EXT_DB_SQ_EN_MASK, 1);
- ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &val0, &val1);
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &val0, &val1,
+ true);
if (ret)
return ret;
@@ -1417,7 +1473,8 @@ static void free_db_resources(struct erdma_dev *dev, struct erdma_ucontext *ctx)
req.rdb_off = ctx->ext_db.rdb_off;
req.cdb_off = ctx->ext_db.cdb_off;
- ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
if (ret)
ibdev_err_ratelimited(&dev->ibdev,
"free db resources failed %d", ret);
@@ -1506,69 +1563,248 @@ void erdma_dealloc_ucontext(struct ib_ucontext *ibctx)
atomic_dec(&dev->num_ctx);
}
-static int ib_qp_state_to_erdma_qp_state[IB_QPS_ERR + 1] = {
- [IB_QPS_RESET] = ERDMA_QP_STATE_IDLE,
- [IB_QPS_INIT] = ERDMA_QP_STATE_IDLE,
- [IB_QPS_RTR] = ERDMA_QP_STATE_RTR,
- [IB_QPS_RTS] = ERDMA_QP_STATE_RTS,
- [IB_QPS_SQD] = ERDMA_QP_STATE_CLOSING,
- [IB_QPS_SQE] = ERDMA_QP_STATE_TERMINATE,
- [IB_QPS_ERR] = ERDMA_QP_STATE_ERROR
+static void erdma_attr_to_av(const struct rdma_ah_attr *ah_attr,
+ struct erdma_av *av, u16 sport)
+{
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+
+ av->port = rdma_ah_get_port_num(ah_attr);
+ av->sgid_index = grh->sgid_index;
+ av->hop_limit = grh->hop_limit;
+ av->traffic_class = grh->traffic_class;
+ av->sl = rdma_ah_get_sl(ah_attr);
+
+ av->flow_label = grh->flow_label;
+ av->udp_sport = sport;
+
+ ether_addr_copy(av->dmac, ah_attr->roce.dmac);
+ memcpy(av->dgid, grh->dgid.raw, ERDMA_ROCEV2_GID_SIZE);
+
+ if (ipv6_addr_v4mapped((struct in6_addr *)&grh->dgid))
+ av->ntype = ERDMA_NETWORK_TYPE_IPV4;
+ else
+ av->ntype = ERDMA_NETWORK_TYPE_IPV6;
+}
+
+static void erdma_av_to_attr(struct erdma_av *av, struct rdma_ah_attr *ah_attr)
+{
+ ah_attr->type = RDMA_AH_ATTR_TYPE_ROCE;
+
+ rdma_ah_set_sl(ah_attr, av->sl);
+ rdma_ah_set_port_num(ah_attr, av->port);
+ rdma_ah_set_ah_flags(ah_attr, IB_AH_GRH);
+
+ rdma_ah_set_grh(ah_attr, NULL, av->flow_label, av->sgid_index,
+ av->hop_limit, av->traffic_class);
+ rdma_ah_set_dgid_raw(ah_attr, av->dgid);
+}
+
+static int ib_qps_to_erdma_qps[ERDMA_PROTO_COUNT][IB_QPS_ERR + 1] = {
+ [ERDMA_PROTO_IWARP] = {
+ [IB_QPS_RESET] = ERDMA_QPS_IWARP_IDLE,
+ [IB_QPS_INIT] = ERDMA_QPS_IWARP_IDLE,
+ [IB_QPS_RTR] = ERDMA_QPS_IWARP_RTR,
+ [IB_QPS_RTS] = ERDMA_QPS_IWARP_RTS,
+ [IB_QPS_SQD] = ERDMA_QPS_IWARP_CLOSING,
+ [IB_QPS_SQE] = ERDMA_QPS_IWARP_TERMINATE,
+ [IB_QPS_ERR] = ERDMA_QPS_IWARP_ERROR,
+ },
+ [ERDMA_PROTO_ROCEV2] = {
+ [IB_QPS_RESET] = ERDMA_QPS_ROCEV2_RESET,
+ [IB_QPS_INIT] = ERDMA_QPS_ROCEV2_INIT,
+ [IB_QPS_RTR] = ERDMA_QPS_ROCEV2_RTR,
+ [IB_QPS_RTS] = ERDMA_QPS_ROCEV2_RTS,
+ [IB_QPS_SQD] = ERDMA_QPS_ROCEV2_SQD,
+ [IB_QPS_SQE] = ERDMA_QPS_ROCEV2_SQE,
+ [IB_QPS_ERR] = ERDMA_QPS_ROCEV2_ERROR,
+ },
};
-int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
- struct ib_udata *udata)
+static int erdma_qps_to_ib_qps[ERDMA_PROTO_COUNT][ERDMA_QPS_ROCEV2_COUNT] = {
+ [ERDMA_PROTO_IWARP] = {
+ [ERDMA_QPS_IWARP_IDLE] = IB_QPS_INIT,
+ [ERDMA_QPS_IWARP_RTR] = IB_QPS_RTR,
+ [ERDMA_QPS_IWARP_RTS] = IB_QPS_RTS,
+ [ERDMA_QPS_IWARP_CLOSING] = IB_QPS_ERR,
+ [ERDMA_QPS_IWARP_TERMINATE] = IB_QPS_ERR,
+ [ERDMA_QPS_IWARP_ERROR] = IB_QPS_ERR,
+ },
+ [ERDMA_PROTO_ROCEV2] = {
+ [ERDMA_QPS_ROCEV2_RESET] = IB_QPS_RESET,
+ [ERDMA_QPS_ROCEV2_INIT] = IB_QPS_INIT,
+ [ERDMA_QPS_ROCEV2_RTR] = IB_QPS_RTR,
+ [ERDMA_QPS_ROCEV2_RTS] = IB_QPS_RTS,
+ [ERDMA_QPS_ROCEV2_SQD] = IB_QPS_SQD,
+ [ERDMA_QPS_ROCEV2_SQE] = IB_QPS_SQE,
+ [ERDMA_QPS_ROCEV2_ERROR] = IB_QPS_ERR,
+ },
+};
+
+static inline enum erdma_qps_iwarp ib_to_iwarp_qps(enum ib_qp_state state)
{
- struct erdma_qp_attrs new_attrs;
- enum erdma_qp_attr_mask erdma_attr_mask = 0;
- struct erdma_qp *qp = to_eqp(ibqp);
- int ret = 0;
+ return ib_qps_to_erdma_qps[ERDMA_PROTO_IWARP][state];
+}
- if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
- return -EOPNOTSUPP;
+static inline enum erdma_qps_rocev2 ib_to_rocev2_qps(enum ib_qp_state state)
+{
+ return ib_qps_to_erdma_qps[ERDMA_PROTO_ROCEV2][state];
+}
- memset(&new_attrs, 0, sizeof(new_attrs));
+static inline enum ib_qp_state iwarp_to_ib_qps(enum erdma_qps_iwarp state)
+{
+ return erdma_qps_to_ib_qps[ERDMA_PROTO_IWARP][state];
+}
- if (attr_mask & IB_QP_STATE) {
- new_attrs.state = ib_qp_state_to_erdma_qp_state[attr->qp_state];
+static inline enum ib_qp_state rocev2_to_ib_qps(enum erdma_qps_rocev2 state)
+{
+ return erdma_qps_to_ib_qps[ERDMA_PROTO_ROCEV2][state];
+}
- erdma_attr_mask |= ERDMA_QP_ATTR_STATE;
+static int erdma_check_qp_attrs(struct erdma_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask)
+{
+ enum ib_qp_state cur_state, nxt_state;
+ struct erdma_dev *dev = qp->dev;
+ int ret = -EINVAL;
+
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if ((attr_mask & IB_QP_PORT) &&
+ !rdma_is_port_valid(&dev->ibdev, attr->port_num))
+ goto out;
+
+ if (erdma_device_rocev2(dev)) {
+ cur_state = (attr_mask & IB_QP_CUR_STATE) ?
+ attr->cur_qp_state :
+ rocev2_to_ib_qps(qp->attrs.rocev2.state);
+
+ nxt_state = (attr_mask & IB_QP_STATE) ? attr->qp_state :
+ cur_state;
+
+ if (!ib_modify_qp_is_ok(cur_state, nxt_state, qp->ibqp.qp_type,
+ attr_mask))
+ goto out;
+
+ if ((attr_mask & IB_QP_AV) &&
+ erdma_check_gid_attr(
+ rdma_ah_read_grh(&attr->ah_attr)->sgid_attr))
+ goto out;
+
+ if ((attr_mask & IB_QP_PKEY_INDEX) &&
+ attr->pkey_index >= ERDMA_MAX_PKEYS)
+ goto out;
+ }
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static void erdma_init_mod_qp_params_rocev2(
+ struct erdma_qp *qp, struct erdma_mod_qp_params_rocev2 *params,
+ int *erdma_attr_mask, struct ib_qp_attr *attr, int ib_attr_mask)
+{
+ enum erdma_qpa_mask_rocev2 to_modify_attrs = 0;
+ enum erdma_qps_rocev2 cur_state, nxt_state;
+ u16 udp_sport;
+
+ if (ib_attr_mask & IB_QP_CUR_STATE)
+ cur_state = ib_to_rocev2_qps(attr->cur_qp_state);
+ else
+ cur_state = qp->attrs.rocev2.state;
+
+ if (ib_attr_mask & IB_QP_STATE)
+ nxt_state = ib_to_rocev2_qps(attr->qp_state);
+ else
+ nxt_state = cur_state;
+
+ to_modify_attrs |= ERDMA_QPA_ROCEV2_STATE;
+ params->state = nxt_state;
+
+ if (ib_attr_mask & IB_QP_QKEY) {
+ to_modify_attrs |= ERDMA_QPA_ROCEV2_QKEY;
+ params->qkey = attr->qkey;
+ }
+
+ if (ib_attr_mask & IB_QP_SQ_PSN) {
+ to_modify_attrs |= ERDMA_QPA_ROCEV2_SQ_PSN;
+ params->sq_psn = attr->sq_psn;
+ }
+
+ if (ib_attr_mask & IB_QP_RQ_PSN) {
+ to_modify_attrs |= ERDMA_QPA_ROCEV2_RQ_PSN;
+ params->rq_psn = attr->rq_psn;
+ }
+
+ if (ib_attr_mask & IB_QP_DEST_QPN) {
+ to_modify_attrs |= ERDMA_QPA_ROCEV2_DST_QPN;
+ params->dst_qpn = attr->dest_qp_num;
}
+ if (ib_attr_mask & IB_QP_AV) {
+ to_modify_attrs |= ERDMA_QPA_ROCEV2_AV;
+ udp_sport = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
+ QP_ID(qp), params->dst_qpn);
+ erdma_attr_to_av(&attr->ah_attr, &params->av, udp_sport);
+ }
+
+ *erdma_attr_mask = to_modify_attrs;
+}
+
+int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
+ struct ib_udata *udata)
+{
+ struct erdma_qp *qp = to_eqp(ibqp);
+ union erdma_mod_qp_params params;
+ int ret = 0, erdma_attr_mask = 0;
+
down_write(&qp->state_lock);
- ret = erdma_modify_qp_internal(qp, &new_attrs, erdma_attr_mask);
+ ret = erdma_check_qp_attrs(qp, attr, attr_mask);
+ if (ret)
+ goto out;
- up_write(&qp->state_lock);
+ if (erdma_device_iwarp(qp->dev)) {
+ if (attr_mask & IB_QP_STATE) {
+ erdma_attr_mask |= ERDMA_QPA_IWARP_STATE;
+ params.iwarp.state = ib_to_iwarp_qps(attr->qp_state);
+ }
+
+ ret = erdma_modify_qp_state_iwarp(qp, &params.iwarp,
+ erdma_attr_mask);
+ } else {
+ erdma_init_mod_qp_params_rocev2(
+ qp, &params.rocev2, &erdma_attr_mask, attr, attr_mask);
+
+ ret = erdma_modify_qp_state_rocev2(qp, &params.rocev2,
+ erdma_attr_mask);
+ }
+out:
+ up_write(&qp->state_lock);
return ret;
}
static enum ib_qp_state query_qp_state(struct erdma_qp *qp)
{
- switch (qp->attrs.state) {
- case ERDMA_QP_STATE_IDLE:
- return IB_QPS_INIT;
- case ERDMA_QP_STATE_RTR:
- return IB_QPS_RTR;
- case ERDMA_QP_STATE_RTS:
- return IB_QPS_RTS;
- case ERDMA_QP_STATE_CLOSING:
- return IB_QPS_ERR;
- case ERDMA_QP_STATE_TERMINATE:
- return IB_QPS_ERR;
- case ERDMA_QP_STATE_ERROR:
- return IB_QPS_ERR;
- default:
- return IB_QPS_ERR;
- }
+ if (erdma_device_iwarp(qp->dev))
+ return iwarp_to_ib_qps(qp->attrs.iwarp.state);
+ else
+ return rocev2_to_ib_qps(qp->attrs.rocev2.state);
}
int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
{
+ struct erdma_cmdq_query_qp_req_rocev2 req;
struct erdma_dev *dev;
struct erdma_qp *qp;
+ u64 resp0, resp1;
+ int ret;
if (ibqp && qp_attr && qp_init_attr) {
qp = to_eqp(ibqp);
@@ -1595,8 +1831,37 @@ int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_init_attr->cap = qp_attr->cap;
- qp_attr->qp_state = query_qp_state(qp);
- qp_attr->cur_qp_state = query_qp_state(qp);
+ if (erdma_device_rocev2(dev)) {
+ /* Query hardware to get some attributes */
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_QUERY_QP);
+ req.qpn = QP_ID(qp);
+
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0,
+ &resp1, true);
+ if (ret)
+ return ret;
+
+ qp_attr->sq_psn =
+ FIELD_GET(ERDMA_CMD_QUERY_QP_RESP_SQ_PSN_MASK, resp0);
+ qp_attr->rq_psn =
+ FIELD_GET(ERDMA_CMD_QUERY_QP_RESP_RQ_PSN_MASK, resp0);
+ qp_attr->qp_state = rocev2_to_ib_qps(FIELD_GET(
+ ERDMA_CMD_QUERY_QP_RESP_QP_STATE_MASK, resp0));
+ qp_attr->cur_qp_state = qp_attr->qp_state;
+ qp_attr->sq_draining = FIELD_GET(
+ ERDMA_CMD_QUERY_QP_RESP_SQ_DRAINING_MASK, resp0);
+
+ qp_attr->pkey_index = 0;
+ qp_attr->dest_qp_num = qp->attrs.rocev2.dst_qpn;
+
+ if (qp->ibqp.qp_type == IB_QPT_RC)
+ erdma_av_to_attr(&qp->attrs.rocev2.av,
+ &qp_attr->ah_attr);
+ } else {
+ qp_attr->qp_state = query_qp_state(qp);
+ qp_attr->cur_qp_state = qp_attr->qp_state;
+ }
return 0;
}
@@ -1736,7 +2001,7 @@ void erdma_set_mtu(struct erdma_dev *dev, u32 mtu)
CMDQ_OPCODE_CONF_MTU);
req.mtu = mtu;
- erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, true);
}
void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason)
@@ -1806,7 +2071,8 @@ static int erdma_query_hw_stats(struct erdma_dev *dev,
req.target_addr = dma_addr;
req.target_length = ERDMA_HW_RESP_SIZE;
- err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
if (err)
goto out;
@@ -1839,3 +2105,159 @@ int erdma_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
return stats->num_counters;
}
+
+enum rdma_link_layer erdma_get_link_layer(struct ib_device *ibdev, u32 port_num)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+static int erdma_set_gid(struct erdma_dev *dev, u8 op, u32 idx,
+ const union ib_gid *gid)
+{
+ struct erdma_cmdq_set_gid_req req;
+ u8 ntype;
+
+ req.cfg = FIELD_PREP(ERDMA_CMD_SET_GID_SGID_IDX_MASK, idx) |
+ FIELD_PREP(ERDMA_CMD_SET_GID_OP_MASK, op);
+
+ if (op == ERDMA_SET_GID_OP_ADD) {
+ if (ipv6_addr_v4mapped((struct in6_addr *)gid))
+ ntype = ERDMA_NETWORK_TYPE_IPV4;
+ else
+ ntype = ERDMA_NETWORK_TYPE_IPV6;
+
+ req.cfg |= FIELD_PREP(ERDMA_CMD_SET_GID_NTYPE_MASK, ntype);
+
+ memcpy(&req.gid, gid, ERDMA_ROCEV2_GID_SIZE);
+ }
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_SET_GID);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ true);
+}
+
+int erdma_add_gid(const struct ib_gid_attr *attr, void **context)
+{
+ struct erdma_dev *dev = to_edev(attr->device);
+ int ret;
+
+ ret = erdma_check_gid_attr(attr);
+ if (ret)
+ return ret;
+
+ return erdma_set_gid(dev, ERDMA_SET_GID_OP_ADD, attr->index,
+ &attr->gid);
+}
+
+int erdma_del_gid(const struct ib_gid_attr *attr, void **context)
+{
+ return erdma_set_gid(to_edev(attr->device), ERDMA_SET_GID_OP_DEL,
+ attr->index, NULL);
+}
+
+int erdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
+{
+ if (index >= ERDMA_MAX_PKEYS)
+ return -EINVAL;
+
+ *pkey = ERDMA_DEFAULT_PKEY;
+ return 0;
+}
+
+void erdma_set_av_cfg(struct erdma_av_cfg *av_cfg, struct erdma_av *av)
+{
+ av_cfg->cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_AV_FL_MASK, av->flow_label) |
+ FIELD_PREP(ERDMA_CMD_CREATE_AV_NTYPE_MASK, av->ntype);
+
+ av_cfg->traffic_class = av->traffic_class;
+ av_cfg->hop_limit = av->hop_limit;
+ av_cfg->sl = av->sl;
+
+ av_cfg->udp_sport = av->udp_sport;
+ av_cfg->sgid_index = av->sgid_index;
+
+ ether_addr_copy(av_cfg->dmac, av->dmac);
+ memcpy(av_cfg->dgid, av->dgid, ERDMA_ROCEV2_GID_SIZE);
+}
+
+int erdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ const struct ib_global_route *grh =
+ rdma_ah_read_grh(init_attr->ah_attr);
+ struct erdma_dev *dev = to_edev(ibah->device);
+ struct erdma_pd *pd = to_epd(ibah->pd);
+ struct erdma_ah *ah = to_eah(ibah);
+ struct erdma_cmdq_create_ah_req req;
+ u32 udp_sport;
+ int ret;
+
+ ret = erdma_check_gid_attr(grh->sgid_attr);
+ if (ret)
+ return ret;
+
+ ret = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_AH]);
+ if (ret < 0)
+ return ret;
+
+ ah->ahn = ret;
+
+ if (grh->flow_label)
+ udp_sport = rdma_flow_label_to_udp_sport(grh->flow_label);
+ else
+ udp_sport =
+ IB_ROCE_UDP_ENCAP_VALID_PORT_MIN + (ah->ahn & 0x3FFF);
+
+ erdma_attr_to_av(init_attr->ah_attr, &ah->av, udp_sport);
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_CREATE_AH);
+
+ req.pdn = pd->pdn;
+ req.ahn = ah->ahn;
+ erdma_set_av_cfg(&req.av_cfg, &ah->av);
+
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ init_attr->flags & RDMA_CREATE_AH_SLEEPABLE);
+ if (ret) {
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_AH], ah->ahn);
+ return ret;
+ }
+
+ return 0;
+}
+
+int erdma_destroy_ah(struct ib_ah *ibah, u32 flags)
+{
+ struct erdma_dev *dev = to_edev(ibah->device);
+ struct erdma_pd *pd = to_epd(ibah->pd);
+ struct erdma_ah *ah = to_eah(ibah);
+ struct erdma_cmdq_destroy_ah_req req;
+ int ret;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_DESTROY_AH);
+
+ req.pdn = pd->pdn;
+ req.ahn = ah->ahn;
+
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
+ flags & RDMA_DESTROY_AH_SLEEPABLE);
+ if (ret)
+ return ret;
+
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_AH], ah->ahn);
+
+ return 0;
+}
+
+int erdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
+{
+ struct erdma_ah *ah = to_eah(ibah);
+
+ memset(ah_attr, 0, sizeof(*ah_attr));
+ erdma_av_to_attr(&ah->av, ah_attr);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
index c998acd39a78..f9408ccc8bad 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.h
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
@@ -136,6 +136,25 @@ struct erdma_user_dbrecords_page {
int refcnt;
};
+struct erdma_av {
+ u8 port;
+ u8 hop_limit;
+ u8 traffic_class;
+ u8 sl;
+ u8 sgid_index;
+ u16 udp_sport;
+ u32 flow_label;
+ u8 dmac[ETH_ALEN];
+ u8 dgid[ERDMA_ROCEV2_GID_SIZE];
+ enum erdma_network_type ntype;
+};
+
+struct erdma_ah {
+ struct ib_ah ibah;
+ struct erdma_av av;
+ u32 ahn;
+};
+
struct erdma_uqp {
struct erdma_mem sq_mem;
struct erdma_mem rq_mem;
@@ -176,33 +195,91 @@ struct erdma_kqp {
u8 sig_all;
};
-enum erdma_qp_state {
- ERDMA_QP_STATE_IDLE = 0,
- ERDMA_QP_STATE_RTR = 1,
- ERDMA_QP_STATE_RTS = 2,
- ERDMA_QP_STATE_CLOSING = 3,
- ERDMA_QP_STATE_TERMINATE = 4,
- ERDMA_QP_STATE_ERROR = 5,
- ERDMA_QP_STATE_UNDEF = 7,
- ERDMA_QP_STATE_COUNT = 8
+enum erdma_qps_iwarp {
+ ERDMA_QPS_IWARP_IDLE = 0,
+ ERDMA_QPS_IWARP_RTR = 1,
+ ERDMA_QPS_IWARP_RTS = 2,
+ ERDMA_QPS_IWARP_CLOSING = 3,
+ ERDMA_QPS_IWARP_TERMINATE = 4,
+ ERDMA_QPS_IWARP_ERROR = 5,
+ ERDMA_QPS_IWARP_UNDEF = 6,
+ ERDMA_QPS_IWARP_COUNT = 7,
+};
+
+enum erdma_qpa_mask_iwarp {
+ ERDMA_QPA_IWARP_STATE = (1 << 0),
+ ERDMA_QPA_IWARP_LLP_HANDLE = (1 << 2),
+ ERDMA_QPA_IWARP_ORD = (1 << 3),
+ ERDMA_QPA_IWARP_IRD = (1 << 4),
+ ERDMA_QPA_IWARP_SQ_SIZE = (1 << 5),
+ ERDMA_QPA_IWARP_RQ_SIZE = (1 << 6),
+ ERDMA_QPA_IWARP_MPA = (1 << 7),
+ ERDMA_QPA_IWARP_CC = (1 << 8),
};
-enum erdma_qp_attr_mask {
- ERDMA_QP_ATTR_STATE = (1 << 0),
- ERDMA_QP_ATTR_LLP_HANDLE = (1 << 2),
- ERDMA_QP_ATTR_ORD = (1 << 3),
- ERDMA_QP_ATTR_IRD = (1 << 4),
- ERDMA_QP_ATTR_SQ_SIZE = (1 << 5),
- ERDMA_QP_ATTR_RQ_SIZE = (1 << 6),
- ERDMA_QP_ATTR_MPA = (1 << 7)
+enum erdma_qps_rocev2 {
+ ERDMA_QPS_ROCEV2_RESET = 0,
+ ERDMA_QPS_ROCEV2_INIT = 1,
+ ERDMA_QPS_ROCEV2_RTR = 2,
+ ERDMA_QPS_ROCEV2_RTS = 3,
+ ERDMA_QPS_ROCEV2_SQD = 4,
+ ERDMA_QPS_ROCEV2_SQE = 5,
+ ERDMA_QPS_ROCEV2_ERROR = 6,
+ ERDMA_QPS_ROCEV2_COUNT = 7,
+};
+
+enum erdma_qpa_mask_rocev2 {
+ ERDMA_QPA_ROCEV2_STATE = (1 << 0),
+ ERDMA_QPA_ROCEV2_QKEY = (1 << 1),
+ ERDMA_QPA_ROCEV2_AV = (1 << 2),
+ ERDMA_QPA_ROCEV2_SQ_PSN = (1 << 3),
+ ERDMA_QPA_ROCEV2_RQ_PSN = (1 << 4),
+ ERDMA_QPA_ROCEV2_DST_QPN = (1 << 5),
};
enum erdma_qp_flags {
ERDMA_QP_IN_FLUSHING = (1 << 0),
};
+#define ERDMA_QP_ACTIVE 0
+#define ERDMA_QP_PASSIVE 1
+
+struct erdma_mod_qp_params_iwarp {
+ enum erdma_qps_iwarp state;
+ enum erdma_cc_alg cc;
+ u8 qp_type;
+ u8 pd_len;
+ u32 irq_size;
+ u32 orq_size;
+};
+
+struct erdma_qp_attrs_iwarp {
+ enum erdma_qps_iwarp state;
+ u32 cookie;
+};
+
+struct erdma_mod_qp_params_rocev2 {
+ enum erdma_qps_rocev2 state;
+ u32 qkey;
+ u32 sq_psn;
+ u32 rq_psn;
+ u32 dst_qpn;
+ struct erdma_av av;
+};
+
+union erdma_mod_qp_params {
+ struct erdma_mod_qp_params_iwarp iwarp;
+ struct erdma_mod_qp_params_rocev2 rocev2;
+};
+
+struct erdma_qp_attrs_rocev2 {
+ enum erdma_qps_rocev2 state;
+ u32 qkey;
+ u32 dst_qpn;
+ struct erdma_av av;
+};
+
struct erdma_qp_attrs {
- enum erdma_qp_state state;
enum erdma_cc_alg cc; /* Congestion control algorithm */
u32 sq_size;
u32 rq_size;
@@ -210,11 +287,10 @@ struct erdma_qp_attrs {
u32 irq_size;
u32 max_send_sge;
u32 max_recv_sge;
- u32 cookie;
-#define ERDMA_QP_ACTIVE 0
-#define ERDMA_QP_PASSIVE 1
- u8 qp_type;
- u8 pd_len;
+ union {
+ struct erdma_qp_attrs_iwarp iwarp;
+ struct erdma_qp_attrs_rocev2 rocev2;
+ };
};
struct erdma_qp {
@@ -286,11 +362,25 @@ static inline struct erdma_cq *find_cq_by_cqn(struct erdma_dev *dev, int id)
void erdma_qp_get(struct erdma_qp *qp);
void erdma_qp_put(struct erdma_qp *qp);
-int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
- enum erdma_qp_attr_mask mask);
+int erdma_modify_qp_state_iwarp(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_iwarp *params,
+ int mask);
+int erdma_modify_qp_state_rocev2(struct erdma_qp *qp,
+ struct erdma_mod_qp_params_rocev2 *params,
+ int attr_mask);
void erdma_qp_llp_close(struct erdma_qp *qp);
void erdma_qp_cm_drop(struct erdma_qp *qp);
+static inline bool erdma_device_iwarp(struct erdma_dev *dev)
+{
+ return dev->proto == ERDMA_PROTO_IWARP;
+}
+
+static inline bool erdma_device_rocev2(struct erdma_dev *dev)
+{
+ return dev->proto == ERDMA_PROTO_ROCEV2;
+}
+
static inline struct erdma_ucontext *to_ectx(struct ib_ucontext *ibctx)
{
return container_of(ibctx, struct erdma_ucontext, ibucontext);
@@ -316,6 +406,21 @@ static inline struct erdma_cq *to_ecq(struct ib_cq *ibcq)
return container_of(ibcq, struct erdma_cq, ibcq);
}
+static inline struct erdma_ah *to_eah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct erdma_ah, ibah);
+}
+
+static inline int erdma_check_gid_attr(const struct ib_gid_attr *attr)
+{
+ u8 ntype = rdma_gid_attr_network_type(attr);
+
+ if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6)
+ return -EINVAL;
+
+ return 0;
+}
+
static inline struct erdma_user_mmap_entry *
to_emmap(struct rdma_user_mmap_entry *ibmmap)
{
@@ -360,6 +465,7 @@ int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
const struct ib_recv_wr **bad_recv_wr);
int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+void erdma_remove_cqes_of_qp(struct ib_cq *ibcq, u32 qpn);
struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
u32 max_num_sg);
int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
@@ -370,5 +476,15 @@ struct rdma_hw_stats *erdma_alloc_hw_port_stats(struct ib_device *device,
u32 port_num);
int erdma_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
u32 port, int index);
+enum rdma_link_layer erdma_get_link_layer(struct ib_device *ibdev,
+ u32 port_num);
+int erdma_add_gid(const struct ib_gid_attr *attr, void **context);
+int erdma_del_gid(const struct ib_gid_attr *attr, void **context);
+int erdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey);
+void erdma_set_av_cfg(struct erdma_av_cfg *av_cfg, struct erdma_av *av);
+int erdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
+int erdma_destroy_ah(struct ib_ah *ibah, u32 flags);
+int erdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
#endif
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index eb38f81aeeb1..cb630551cf1a 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -2339,20 +2339,6 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \
rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (port), ##__VA_ARGS__)
-/*
- * this is used for formatting hw error messages...
- */
-struct hfi1_hwerror_msgs {
- u64 mask;
- const char *msg;
- size_t sz;
-};
-
-/* in intr.c... */
-void hfi1_format_hwerrors(u64 hwerrs,
- const struct hfi1_hwerror_msgs *hwerrmsgs,
- size_t nhwerrmsgs, char *msg, size_t lmsg);
-
#define USER_OPCODE_CHECK_VAL 0xC0
#define USER_OPCODE_CHECK_MASK 0xC0
#define OPCODE_CHECK_VAL_DISABLED 0x0
diff --git a/drivers/infiniband/hw/hfi1/intr.c b/drivers/infiniband/hw/hfi1/intr.c
index 3737f632d62a..d8dd1a599631 100644
--- a/drivers/infiniband/hw/hfi1/intr.c
+++ b/drivers/infiniband/hw/hfi1/intr.c
@@ -47,37 +47,6 @@ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
hfi1_event_pkey_change(ppd->dd, ppd->port);
}
-/**
- * format_hwmsg - format a single hwerror message
- * @msg: message buffer
- * @msgl: length of message buffer
- * @hwmsg: message to add to message buffer
- */
-static void format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
-{
- strlcat(msg, "[", msgl);
- strlcat(msg, hwmsg, msgl);
- strlcat(msg, "]", msgl);
-}
-
-/**
- * hfi1_format_hwerrors - format hardware error messages for display
- * @hwerrs: hardware errors bit vector
- * @hwerrmsgs: hardware error descriptions
- * @nhwerrmsgs: number of hwerrmsgs
- * @msg: message buffer
- * @msgl: message buffer length
- */
-void hfi1_format_hwerrors(u64 hwerrs, const struct hfi1_hwerror_msgs *hwerrmsgs,
- size_t nhwerrmsgs, char *msg, size_t msgl)
-{
- int i;
-
- for (i = 0; i < nhwerrmsgs; i++)
- if (hwerrs & hwerrmsgs[i].mask)
- format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
-}
-
static void signal_ib_event(struct hfi1_pportdata *ppd, enum ib_event_type ev)
{
struct ib_event event;
diff --git a/drivers/infiniband/hw/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h
index 49805a24bb0a..7259f4f55700 100644
--- a/drivers/infiniband/hw/hfi1/iowait.h
+++ b/drivers/infiniband/hw/hfi1/iowait.h
@@ -92,7 +92,7 @@ struct iowait_work {
*
* The lock field is used by waiters to record
* the seqlock_t that guards the list head.
- * Waiters explicity know that, but the destroy
+ * Waiters explicitly know that, but the destroy
* code that unwaits QPs does not.
*/
struct iowait {
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index d62ba5fdd80c..d94216c7d576 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -27,8 +27,8 @@ static struct hfi1_pportdata *hfi1_get_pportdata_kobj(struct kobject *kobj)
* Congestion control table size followed by table entries
*/
static ssize_t cc_table_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t pos, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
{
int ret;
struct hfi1_pportdata *ppd = hfi1_get_pportdata_kobj(kobj);
@@ -57,7 +57,7 @@ static ssize_t cc_table_bin_read(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
+static const BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
/*
* Congestion settings: port control, control map and an array of 16
@@ -65,7 +65,7 @@ static BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
* trigger threshold and the minimum injection rate delay.
*/
static ssize_t cc_setting_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
struct hfi1_pportdata *ppd = hfi1_get_pportdata_kobj(kobj);
@@ -93,9 +93,9 @@ static ssize_t cc_setting_bin_read(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE);
+static const BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE);
-static struct bin_attribute *port_cc_bin_attributes[] = {
+static const struct bin_attribute *const port_cc_bin_attributes[] = {
&bin_attr_cc_setting_bin,
&bin_attr_cc_table_bin,
NULL
@@ -134,7 +134,7 @@ static struct attribute *port_cc_attributes[] = {
static const struct attribute_group port_cc_group = {
.name = "CCMgtA",
.attrs = port_cc_attributes,
- .bin_attrs = port_cc_bin_attributes,
+ .bin_attrs_new = port_cc_bin_attributes,
};
/* Start sc2vl */
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
index ab3fbba70789..44cdb706fe27 100644
--- a/drivers/infiniband/hw/hns/Kconfig
+++ b/drivers/infiniband/hw/hns/Kconfig
@@ -1,21 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
-config INFINIBAND_HNS
- tristate "HNS RoCE Driver"
- depends on NET_VENDOR_HISILICON
- depends on ARM64 || (COMPILE_TEST && 64BIT)
- depends on (HNS_DSAF && HNS_ENET) || HNS3
- help
- This is a RoCE/RDMA driver for the Hisilicon RoCE engine.
-
- To compile HIP08 driver as module, choose M here.
-
config INFINIBAND_HNS_HIP08
- bool "Hisilicon Hip08 Family RoCE support"
- depends on INFINIBAND_HNS && PCI && HNS3
- depends on INFINIBAND_HNS=m || HNS3=y
+ tristate "Hisilicon Hip08 Family RoCE support"
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on PCI && HNS3
help
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
The RoCE engine is a PCI device.
- To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
- module will be called hns-roce-hw-v2.
+ To compile this driver, choose M here. This module will be called
+ hns-roce-hw-v2.
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index be1e1cdbcfa8..7917af8e6380 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -5,12 +5,9 @@
ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
-hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
+hns-roce-hw-v2-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o \
- hns_roce_debugfs.o
+ hns_roce_debugfs.o hns_roce_hw_v2.o
-ifdef CONFIG_INFINIBAND_HNS_HIP08
-hns-roce-hw-v2-objs := hns_roce_hw_v2.o $(hns-roce-objs)
-obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o
-endif
+obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 0144e7210d05..dded339802b3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -7185,9 +7185,22 @@ static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
return ret;
}
+static void hns_roce_hw_v2_link_status_change(struct hnae3_handle *handle,
+ bool linkup)
+{
+ struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
+ struct net_device *netdev = handle->rinfo.netdev;
+
+ if (linkup || !hr_dev)
+ return;
+
+ ib_dispatch_port_state_event(&hr_dev->ib_dev, netdev);
+}
+
static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
.init_instance = hns_roce_hw_v2_init_instance,
.uninit_instance = hns_roce_hw_v2_uninit_instance,
+ .link_status_change = hns_roce_hw_v2_link_status_change,
.reset_notify = hns_roce_hw_v2_reset_notify,
};
diff --git a/drivers/infiniband/hw/irdma/osdep.h b/drivers/infiniband/hw/irdma/osdep.h
index e1e3d3ae72b7..ddf02a462efa 100644
--- a/drivers/infiniband/hw/irdma/osdep.h
+++ b/drivers/infiniband/hw/irdma/osdep.h
@@ -59,10 +59,6 @@ int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
int irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,
struct irdma_hmc_fcn_info *hmcfcninfo,
u16 *pmf_idx);
-int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
-int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
int irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
struct irdma_dma_mem *mem);
void *irdma_remove_cqp_head(struct irdma_sc_dev *dev);
diff --git a/drivers/infiniband/hw/irdma/protos.h b/drivers/infiniband/hw/irdma/protos.h
index d7c8ea948bcd..c0c9441885d3 100644
--- a/drivers/infiniband/hw/irdma/protos.h
+++ b/drivers/infiniband/hw/irdma/protos.h
@@ -85,10 +85,6 @@ int irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
int irdma_process_bh(struct irdma_sc_dev *dev);
int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
struct irdma_update_sds_info *info);
-int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
-int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
int irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
struct irdma_dma_mem *mem);
int irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index 0422787592d8..0e594122baa7 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -320,9 +320,6 @@ int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
case NETDEV_DOWN:
iwdev->iw_status = 0;
fallthrough;
- case NETDEV_UP:
- irdma_port_ibevent(iwdev);
- break;
default:
break;
}
@@ -972,74 +969,6 @@ void irdma_terminate_del_timer(struct irdma_sc_qp *qp)
}
/**
- * irdma_cqp_query_fpm_val_cmd - send cqp command for fpm
- * @dev: function device struct
- * @val_mem: buffer for fpm
- * @hmc_fn_id: function id for fpm
- */
-int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
-{
- struct irdma_cqp_request *cqp_request;
- struct cqp_cmds_info *cqp_info;
- struct irdma_pci_f *rf = dev_to_rf(dev);
- int status;
-
- cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
- if (!cqp_request)
- return -ENOMEM;
-
- cqp_info = &cqp_request->info;
- cqp_request->param = NULL;
- cqp_info->in.u.query_fpm_val.cqp = dev->cqp;
- cqp_info->in.u.query_fpm_val.fpm_val_pa = val_mem->pa;
- cqp_info->in.u.query_fpm_val.fpm_val_va = val_mem->va;
- cqp_info->in.u.query_fpm_val.hmc_fn_id = hmc_fn_id;
- cqp_info->cqp_cmd = IRDMA_OP_QUERY_FPM_VAL;
- cqp_info->post_sq = 1;
- cqp_info->in.u.query_fpm_val.scratch = (uintptr_t)cqp_request;
-
- status = irdma_handle_cqp_op(rf, cqp_request);
- irdma_put_cqp_request(&rf->cqp, cqp_request);
-
- return status;
-}
-
-/**
- * irdma_cqp_commit_fpm_val_cmd - commit fpm values in hw
- * @dev: hardware control device structure
- * @val_mem: buffer with fpm values
- * @hmc_fn_id: function id for fpm
- */
-int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
- struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
-{
- struct irdma_cqp_request *cqp_request;
- struct cqp_cmds_info *cqp_info;
- struct irdma_pci_f *rf = dev_to_rf(dev);
- int status;
-
- cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
- if (!cqp_request)
- return -ENOMEM;
-
- cqp_info = &cqp_request->info;
- cqp_request->param = NULL;
- cqp_info->in.u.commit_fpm_val.cqp = dev->cqp;
- cqp_info->in.u.commit_fpm_val.fpm_val_pa = val_mem->pa;
- cqp_info->in.u.commit_fpm_val.fpm_val_va = val_mem->va;
- cqp_info->in.u.commit_fpm_val.hmc_fn_id = hmc_fn_id;
- cqp_info->cqp_cmd = IRDMA_OP_COMMIT_FPM_VAL;
- cqp_info->post_sq = 1;
- cqp_info->in.u.commit_fpm_val.scratch = (uintptr_t)cqp_request;
-
- status = irdma_handle_cqp_op(rf, cqp_request);
- irdma_put_cqp_request(&rf->cqp, cqp_request);
-
- return status;
-}
-
-/**
* irdma_cqp_cq_create_cmd - create a cq for the cqp
* @dev: device pointer
* @cq: pointer to created cq
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index aa9ea6ba26e5..c592374f4a58 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -150,8 +150,12 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev,
return PTR_ERR(*umem);
shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
- err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
+ if (shift < 0) {
+ err = shift;
+ goto err_buf;
+ }
+ err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
if (err)
goto err_buf;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 529db874d67c..dd35e03402ab 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -351,7 +351,7 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
struct mlx4_port_gid_table *port_gid_table;
int ret = 0;
int hw_update = 0;
- struct gid_entry *gids;
+ struct gid_entry *gids = NULL;
if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
return -EINVAL;
@@ -389,10 +389,10 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
}
spin_unlock_bh(&iboe->lock);
- if (!ret && hw_update) {
+ if (gids)
ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
- kfree(gids);
- }
+
+ kfree(gids);
return ret;
}
@@ -2341,37 +2341,38 @@ static void mlx4_ib_scan_netdev(struct mlx4_ib_dev *ibdev,
iboe->netdevs[dev->dev_port] = event != NETDEV_UNREGISTER ? dev : NULL;
- if (event == NETDEV_UP || event == NETDEV_DOWN) {
- enum ib_port_state port_state;
- struct ib_event ibev = { };
+ spin_unlock_bh(&iboe->lock);
- if (ib_get_cached_port_state(&ibdev->ib_dev, dev->dev_port + 1,
- &port_state))
- goto iboe_out;
+ if (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER)
+ mlx4_ib_update_qps(ibdev, dev, dev->dev_port + 1);
+}
- if (event == NETDEV_UP &&
- (port_state != IB_PORT_ACTIVE ||
- iboe->last_port_state[dev->dev_port] != IB_PORT_DOWN))
- goto iboe_out;
- if (event == NETDEV_DOWN &&
- (port_state != IB_PORT_DOWN ||
- iboe->last_port_state[dev->dev_port] != IB_PORT_ACTIVE))
- goto iboe_out;
- iboe->last_port_state[dev->dev_port] = port_state;
+static void mlx4_ib_port_event(struct ib_device *ibdev, struct net_device *ndev,
+ unsigned long event)
+{
+ struct mlx4_ib_dev *mlx4_ibdev =
+ container_of(ibdev, struct mlx4_ib_dev, ib_dev);
+ struct mlx4_ib_iboe *iboe = &mlx4_ibdev->iboe;
- ibev.device = &ibdev->ib_dev;
- ibev.element.port_num = dev->dev_port + 1;
- ibev.event = event == NETDEV_UP ? IB_EVENT_PORT_ACTIVE :
- IB_EVENT_PORT_ERR;
- ib_dispatch_event(&ibev);
- }
+ if (!net_eq(dev_net(ndev), &init_net))
+ return;
+
+ ASSERT_RTNL();
+
+ if (ndev->dev.parent != mlx4_ibdev->ib_dev.dev.parent)
+ return;
+
+ spin_lock_bh(&iboe->lock);
+
+ iboe->netdevs[ndev->dev_port] = event != NETDEV_UNREGISTER ? ndev : NULL;
+
+ if (event == NETDEV_UP || event == NETDEV_DOWN)
+ ib_dispatch_port_state_event(&mlx4_ibdev->ib_dev, ndev);
-iboe_out:
spin_unlock_bh(&iboe->lock);
- if (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
- event == NETDEV_UP || event == NETDEV_CHANGE)
- mlx4_ib_update_qps(ibdev, dev, dev->dev_port + 1);
+ if (event == NETDEV_UP || event == NETDEV_CHANGE)
+ mlx4_ib_update_qps(mlx4_ibdev, ndev, ndev->dev_port + 1);
}
static int mlx4_ib_netdev_event(struct notifier_block *this,
@@ -2569,6 +2570,7 @@ static const struct ib_device_ops mlx4_ib_dev_ops = {
.req_notify_cq = mlx4_ib_arm_cq,
.rereg_user_mr = mlx4_ib_rereg_user_mr,
.resize_cq = mlx4_ib_resize_cq,
+ .report_port_event = mlx4_ib_port_event,
INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, mlx4_ib_cq, ibcq),
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index b52bceff7d97..f53b1846594c 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -667,6 +667,9 @@ struct mlx4_uverbs_ex_query_device {
__u32 reserved;
};
+/* 4k - 4G */
+#define MLX4_PAGE_SIZE_SUPPORTED ((unsigned long)GENMASK_ULL(31, 12))
+
static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
{
return container_of(ibdev, struct mlx4_ib_dev, ib_dev);
@@ -936,8 +939,19 @@ mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table)
{
return 0;
}
-int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
- int *num_of_mtts);
+static inline int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem,
+ u64 start,
+ int *num_of_mtts)
+{
+ unsigned long pg_sz;
+
+ pg_sz = ib_umem_find_best_pgsz(umem, MLX4_PAGE_SIZE_SUPPORTED, start);
+ if (!pg_sz)
+ return -EOPNOTSUPP;
+
+ *num_of_mtts = ib_umem_num_dma_blocks(umem, pg_sz);
+ return order_base_2(pg_sz);
+}
int mlx4_ib_cm_init(void);
void mlx4_ib_cm_destroy(void);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index a40bf58bcdd3..e77645a673fb 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -87,286 +87,20 @@ err_free:
return ERR_PTR(err);
}
-enum {
- MLX4_MAX_MTT_SHIFT = 31
-};
-
-static int mlx4_ib_umem_write_mtt_block(struct mlx4_ib_dev *dev,
- struct mlx4_mtt *mtt,
- u64 mtt_size, u64 mtt_shift, u64 len,
- u64 cur_start_addr, u64 *pages,
- int *start_index, int *npages)
-{
- u64 cur_end_addr = cur_start_addr + len;
- u64 cur_end_addr_aligned = 0;
- u64 mtt_entries;
- int err = 0;
- int k;
-
- len += (cur_start_addr & (mtt_size - 1ULL));
- cur_end_addr_aligned = round_up(cur_end_addr, mtt_size);
- len += (cur_end_addr_aligned - cur_end_addr);
- if (len & (mtt_size - 1ULL)) {
- pr_warn("write_block: len %llx is not aligned to mtt_size %llx\n",
- len, mtt_size);
- return -EINVAL;
- }
-
- mtt_entries = (len >> mtt_shift);
-
- /*
- * Align the MTT start address to the mtt_size.
- * Required to handle cases when the MR starts in the middle of an MTT
- * record. Was not required in old code since the physical addresses
- * provided by the dma subsystem were page aligned, which was also the
- * MTT size.
- */
- cur_start_addr = round_down(cur_start_addr, mtt_size);
- /* A new block is started ... */
- for (k = 0; k < mtt_entries; ++k) {
- pages[*npages] = cur_start_addr + (mtt_size * k);
- (*npages)++;
- /*
- * Be friendly to mlx4_write_mtt() and pass it chunks of
- * appropriate size.
- */
- if (*npages == PAGE_SIZE / sizeof(u64)) {
- err = mlx4_write_mtt(dev->dev, mtt, *start_index,
- *npages, pages);
- if (err)
- return err;
-
- (*start_index) += *npages;
- *npages = 0;
- }
- }
-
- return 0;
-}
-
-static inline u64 alignment_of(u64 ptr)
-{
- return ilog2(ptr & (~(ptr - 1)));
-}
-
-static int mlx4_ib_umem_calc_block_mtt(u64 next_block_start,
- u64 current_block_end,
- u64 block_shift)
-{
- /* Check whether the alignment of the new block is aligned as well as
- * the previous block.
- * Block address must start with zeros till size of entity_size.
- */
- if ((next_block_start & ((1ULL << block_shift) - 1ULL)) != 0)
- /*
- * It is not as well aligned as the previous block-reduce the
- * mtt size accordingly. Here we take the last right bit which
- * is 1.
- */
- block_shift = alignment_of(next_block_start);
-
- /*
- * Check whether the alignment of the end of previous block - is it
- * aligned as well as the start of the block
- */
- if (((current_block_end) & ((1ULL << block_shift) - 1ULL)) != 0)
- /*
- * It is not as well aligned as the start of the block -
- * reduce the mtt size accordingly.
- */
- block_shift = alignment_of(current_block_end);
-
- return block_shift;
-}
-
int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
struct ib_umem *umem)
{
- u64 *pages;
- u64 len = 0;
- int err = 0;
- u64 mtt_size;
- u64 cur_start_addr = 0;
- u64 mtt_shift;
- int start_index = 0;
- int npages = 0;
- struct scatterlist *sg;
- int i;
-
- pages = (u64 *) __get_free_page(GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
-
- mtt_shift = mtt->page_shift;
- mtt_size = 1ULL << mtt_shift;
+ struct ib_block_iter biter;
+ int err, i = 0;
+ u64 addr;
- for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
- if (cur_start_addr + len == sg_dma_address(sg)) {
- /* still the same block */
- len += sg_dma_len(sg);
- continue;
- }
- /*
- * A new block is started ...
- * If len is malaligned, write an extra mtt entry to cover the
- * misaligned area (round up the division)
- */
- err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
- mtt_shift, len,
- cur_start_addr,
- pages, &start_index,
- &npages);
- if (err)
- goto out;
-
- cur_start_addr = sg_dma_address(sg);
- len = sg_dma_len(sg);
- }
-
- /* Handle the last block */
- if (len > 0) {
- /*
- * If len is malaligned, write an extra mtt entry to cover
- * the misaligned area (round up the division)
- */
- err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
- mtt_shift, len,
- cur_start_addr, pages,
- &start_index, &npages);
+ rdma_umem_for_each_dma_block(umem, &biter, BIT(mtt->page_shift)) {
+ addr = rdma_block_iter_dma_address(&biter);
+ err = mlx4_write_mtt(dev->dev, mtt, i++, 1, &addr);
if (err)
- goto out;
- }
-
- if (npages)
- err = mlx4_write_mtt(dev->dev, mtt, start_index, npages, pages);
-
-out:
- free_page((unsigned long) pages);
- return err;
-}
-
-/*
- * Calculate optimal mtt size based on contiguous pages.
- * Function will return also the number of pages that are not aligned to the
- * calculated mtt_size to be added to total number of pages. For that we should
- * check the first chunk length & last chunk length and if not aligned to
- * mtt_size we should increment the non_aligned_pages number. All chunks in the
- * middle already handled as part of mtt shift calculation for both their start
- * & end addresses.
- */
-int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
- int *num_of_mtts)
-{
- u64 block_shift = MLX4_MAX_MTT_SHIFT;
- u64 min_shift = PAGE_SHIFT;
- u64 last_block_aligned_end = 0;
- u64 current_block_start = 0;
- u64 first_block_start = 0;
- u64 current_block_len = 0;
- u64 last_block_end = 0;
- struct scatterlist *sg;
- u64 current_block_end;
- u64 misalignment_bits;
- u64 next_block_start;
- u64 total_len = 0;
- int i;
-
- *num_of_mtts = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
-
- for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
- /*
- * Initialization - save the first chunk start as the
- * current_block_start - block means contiguous pages.
- */
- if (current_block_len == 0 && current_block_start == 0) {
- current_block_start = sg_dma_address(sg);
- first_block_start = current_block_start;
- /*
- * Find the bits that are different between the physical
- * address and the virtual address for the start of the
- * MR.
- * umem_get aligned the start_va to a page boundary.
- * Therefore, we need to align the start va to the same
- * boundary.
- * misalignment_bits is needed to handle the case of a
- * single memory region. In this case, the rest of the
- * logic will not reduce the block size. If we use a
- * block size which is bigger than the alignment of the
- * misalignment bits, we might use the virtual page
- * number instead of the physical page number, resulting
- * in access to the wrong data.
- */
- misalignment_bits =
- (start_va & (~(((u64)(PAGE_SIZE)) - 1ULL))) ^
- current_block_start;
- block_shift = min(alignment_of(misalignment_bits),
- block_shift);
- }
-
- /*
- * Go over the scatter entries and check if they continue the
- * previous scatter entry.
- */
- next_block_start = sg_dma_address(sg);
- current_block_end = current_block_start + current_block_len;
- /* If we have a split (non-contig.) between two blocks */
- if (current_block_end != next_block_start) {
- block_shift = mlx4_ib_umem_calc_block_mtt
- (next_block_start,
- current_block_end,
- block_shift);
-
- /*
- * If we reached the minimum shift for 4k page we stop
- * the loop.
- */
- if (block_shift <= min_shift)
- goto end;
-
- /*
- * If not saved yet we are in first block - we save the
- * length of first block to calculate the
- * non_aligned_pages number at the end.
- */
- total_len += current_block_len;
-
- /* Start a new block */
- current_block_start = next_block_start;
- current_block_len = sg_dma_len(sg);
- continue;
- }
- /* The scatter entry is another part of the current block,
- * increase the block size.
- * An entry in the scatter can be larger than 4k (page) as of
- * dma mapping which merge some blocks together.
- */
- current_block_len += sg_dma_len(sg);
+ return err;
}
-
- /* Account for the last block in the total len */
- total_len += current_block_len;
- /* Add to the first block the misalignment that it suffers from. */
- total_len += (first_block_start & ((1ULL << block_shift) - 1ULL));
- last_block_end = current_block_start + current_block_len;
- last_block_aligned_end = round_up(last_block_end, 1ULL << block_shift);
- total_len += (last_block_aligned_end - last_block_end);
-
- if (total_len & ((1ULL << block_shift) - 1ULL))
- pr_warn("misaligned total length detected (%llu, %llu)!",
- total_len, block_shift);
-
- *num_of_mtts = total_len >> block_shift;
-end:
- if (block_shift < min_shift) {
- /*
- * If shift is less than the min we set a warning and return the
- * min shift.
- */
- pr_warn("umem_calc_optimal_mtt_size - unexpected shift %lld\n", block_shift);
-
- block_shift = min_shift;
- }
- return block_shift;
+ return 0;
}
static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start,
@@ -424,6 +158,10 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n);
+ if (shift < 0) {
+ err = shift;
+ goto err_umem;
+ }
err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
convert_access(access_flags), n, shift, &mr->mmr);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 9d08aa99f3cb..50fd407103c7 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -925,8 +925,12 @@ static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
}
shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
- err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
+ if (shift < 0) {
+ err = shift;
+ goto err_buf;
+ }
+ err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
if (err)
goto err_buf;
@@ -1108,8 +1112,12 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
}
shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
- err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
+ if (shift < 0) {
+ err = shift;
+ goto err_buf;
+ }
+ err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
if (err)
goto err_buf;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index f5b59d02f4d3..81849eb671a1 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -242,6 +242,10 @@ static int mlx5_netdev_event(struct notifier_block *this,
case NETDEV_DOWN: {
struct net_device *upper = NULL;
+ if (!netif_is_lag_master(ndev) && !netif_is_lag_port(ndev) &&
+ !mlx5_core_mp_enabled(mdev))
+ return NOTIFY_DONE;
+
if (mlx5_lag_is_roce(mdev) || mlx5_lag_is_sriov(mdev)) {
struct net_device *lag_ndev;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index a01b592aa716..974a45c92fbb 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -669,6 +669,12 @@ struct mlx5_ib_mkey {
#define mlx5_update_odp_stats(mr, counter_name, value) \
atomic64_add(value, &((mr)->odp_stats.counter_name))
+#define mlx5_update_odp_stats_with_handled(mr, counter_name, value) \
+ do { \
+ mlx5_update_odp_stats(mr, counter_name, value); \
+ atomic64_add(1, &((mr)->odp_stats.counter_name##_handled)); \
+ } while (0)
+
struct mlx5_ib_mr {
struct ib_mr ibmr;
struct mlx5_ib_mkey mmkey;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 45d9dc9c6c8f..bb02b6adbf2c 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -2021,6 +2021,11 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
{
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
+ bool is_odp = is_odp_mr(mr);
+ int ret = 0;
+
+ if (is_odp)
+ mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex);
if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) {
ent = mr->mmkey.cache_ent;
@@ -2032,7 +2037,7 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
ent->tmp_cleanup_scheduled = true;
}
spin_unlock_irq(&ent->mkeys_queue.lock);
- return 0;
+ goto out;
}
if (ent) {
@@ -2041,7 +2046,15 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
mr->mmkey.cache_ent = NULL;
spin_unlock_irq(&ent->mkeys_queue.lock);
}
- return destroy_mkey(dev, mr);
+ ret = destroy_mkey(dev, mr);
+out:
+ if (is_odp) {
+ if (!ret)
+ to_ib_umem_odp(mr->umem)->private = NULL;
+ mutex_unlock(&to_ib_umem_odp(mr->umem)->umem_mutex);
+ }
+
+ return ret;
}
static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr)
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 4b37446758fd..f1e23583e6c0 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -228,13 +228,27 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
struct mlx5_ib_mr *imr = mr->parent;
+ /*
+ * If userspace is racing freeing the parent implicit ODP MR then we can
+ * loose the race with parent destruction. In this case
+ * mlx5_ib_free_odp_mr() will free everything in the implicit_children
+ * xarray so NOP is fine. This child MR cannot be destroyed here because
+ * we are under its umem_mutex.
+ */
if (!refcount_inc_not_zero(&imr->mmkey.usecount))
return;
- xa_erase(&imr->implicit_children, idx);
+ xa_lock(&imr->implicit_children);
+ if (__xa_cmpxchg(&imr->implicit_children, idx, mr, NULL, GFP_KERNEL) !=
+ mr) {
+ xa_unlock(&imr->implicit_children);
+ return;
+ }
+
if (MLX5_CAP_ODP(mr_to_mdev(mr)->mdev, mem_page_fault))
- xa_erase(&mr_to_mdev(mr)->odp_mkeys,
- mlx5_base_mkey(mr->mmkey.key));
+ __xa_erase(&mr_to_mdev(mr)->odp_mkeys,
+ mlx5_base_mkey(mr->mmkey.key));
+ xa_unlock(&imr->implicit_children);
/* Freeing a MR is a sleeping operation, so bounce to a work queue */
INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
@@ -268,6 +282,8 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
if (!umem_odp->npages)
goto out;
mr = umem_odp->private;
+ if (!mr)
+ goto out;
start = max_t(u64, ib_umem_start(umem_odp), range->start);
end = min_t(u64, ib_umem_end(umem_odp), range->end);
@@ -313,7 +329,7 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
MLX5_IB_UPD_XLT_ZAP |
MLX5_IB_UPD_XLT_ATOMIC);
- mlx5_update_odp_stats(mr, invalidations, invalidations);
+ mlx5_update_odp_stats_with_handled(mr, invalidations, invalidations);
/*
* We are now sure that the device will not access the
@@ -500,18 +516,18 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
refcount_inc(&ret->mmkey.usecount);
goto out_lock;
}
- xa_unlock(&imr->implicit_children);
if (MLX5_CAP_ODP(dev->mdev, mem_page_fault)) {
- ret = xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
- &mr->mmkey, GFP_KERNEL);
+ ret = __xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
+ &mr->mmkey, GFP_KERNEL);
if (xa_is_err(ret)) {
ret = ERR_PTR(xa_err(ret));
- xa_erase(&imr->implicit_children, idx);
- goto out_mr;
+ __xa_erase(&imr->implicit_children, idx);
+ goto out_lock;
}
mr->mmkey.type = MLX5_MKEY_IMPLICIT_CHILD;
}
+ xa_unlock(&imr->implicit_children);
mlx5_ib_dbg(mr_to_mdev(imr), "key %x mr %p\n", mr->mmkey.key, mr);
return mr;
@@ -944,8 +960,7 @@ out:
/*
* Handle a single data segment in a page-fault WQE or RDMA region.
*
- * Returns number of OS pages retrieved on success. The caller may continue to
- * the next data segment.
+ * Returns zero on success. The caller may continue to the next data segment.
* Can return the following error codes:
* -EAGAIN to designate a temporary error. The caller will abort handling the
* page fault and resolve it.
@@ -958,7 +973,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
u32 *bytes_committed,
u32 *bytes_mapped)
{
- int npages = 0, ret, i, outlen, cur_outlen = 0, depth = 0;
+ int ret, i, outlen, cur_outlen = 0, depth = 0, pages_in_range;
struct pf_frame *head = NULL, *frame;
struct mlx5_ib_mkey *mmkey;
struct mlx5_ib_mr *mr;
@@ -993,13 +1008,20 @@ next_mr:
case MLX5_MKEY_MR:
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+ pages_in_range = (ALIGN(io_virt + bcnt, PAGE_SIZE) -
+ (io_virt & PAGE_MASK)) >>
+ PAGE_SHIFT;
ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0, false);
if (ret < 0)
goto end;
- mlx5_update_odp_stats(mr, faults, ret);
+ mlx5_update_odp_stats_with_handled(mr, faults, ret);
+
+ if (ret < pages_in_range) {
+ ret = -EFAULT;
+ goto end;
+ }
- npages += ret;
ret = 0;
break;
@@ -1090,7 +1112,7 @@ end:
kfree(out);
*bytes_committed = 0;
- return ret ? ret : npages;
+ return ret;
}
/*
@@ -1109,8 +1131,7 @@ end:
* the committed bytes).
* @receive_queue: receive WQE end of sg list
*
- * Returns the number of pages loaded if positive, zero for an empty WQE, or a
- * negative error code.
+ * Returns zero for success or a negative error code.
*/
static int pagefault_data_segments(struct mlx5_ib_dev *dev,
struct mlx5_pagefault *pfault,
@@ -1118,7 +1139,7 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
void *wqe_end, u32 *bytes_mapped,
u32 *total_wqe_bytes, bool receive_queue)
{
- int ret = 0, npages = 0;
+ int ret = 0;
u64 io_virt;
__be32 key;
u32 byte_count;
@@ -1175,10 +1196,9 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
bytes_mapped);
if (ret < 0)
break;
- npages += ret;
}
- return ret < 0 ? ret : npages;
+ return ret;
}
/*
@@ -1414,12 +1434,6 @@ resolve_page_fault:
free_page((unsigned long)wqe_start);
}
-static int pages_in_range(u64 address, u32 length)
-{
- return (ALIGN(address + length, PAGE_SIZE) -
- (address & PAGE_MASK)) >> PAGE_SHIFT;
-}
-
static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
struct mlx5_pagefault *pfault)
{
@@ -1458,7 +1472,7 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
if (ret == -EAGAIN) {
/* We're racing with an invalidation, don't prefetch */
prefetch_activated = 0;
- } else if (ret < 0 || pages_in_range(address, length) > ret) {
+ } else if (ret < 0) {
mlx5_ib_page_fault_resume(dev, pfault, 1);
if (ret != -ENOENT)
mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%llx, type: 0x%x\n",
@@ -1529,7 +1543,7 @@ static void mlx5_ib_mr_memory_pfault_handler(struct mlx5_ib_dev *dev,
goto err;
}
- mlx5_update_odp_stats(mr, faults, ret);
+ mlx5_update_odp_stats_with_handled(mr, faults, ret);
mlx5r_deref_odp_mkey(mmkey);
if (pfault->memory.flags & MLX5_MEMORY_PAGE_FAULT_FLAGS_LAST)
diff --git a/drivers/infiniband/hw/mlx5/restrack.c b/drivers/infiniband/hw/mlx5/restrack.c
index affcf8fe943c..67841922c7b8 100644
--- a/drivers/infiniband/hw/mlx5/restrack.c
+++ b/drivers/infiniband/hw/mlx5/restrack.c
@@ -96,9 +96,18 @@ static int fill_stat_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr)
atomic64_read(&mr->odp_stats.faults)))
goto err_table;
if (rdma_nl_stat_hwcounter_entry(
+ msg, "page_faults_handled",
+ atomic64_read(&mr->odp_stats.faults_handled)))
+ goto err_table;
+ if (rdma_nl_stat_hwcounter_entry(
msg, "page_invalidations",
atomic64_read(&mr->odp_stats.invalidations)))
goto err_table;
+ if (rdma_nl_stat_hwcounter_entry(
+ msg, "page_invalidations_handled",
+ atomic64_read(&mr->odp_stats.invalidations_handled)))
+ goto err_table;
+
if (rdma_nl_stat_hwcounter_entry(msg, "page_prefetch",
atomic64_read(&mr->odp_stats.prefetch)))
goto err_table;
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index ba2cd68b53e6..805e37dc7621 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -214,8 +214,8 @@ static const struct attribute_group port_linkcontrol_group = {
* Congestion control table size followed by table entries
*/
static ssize_t cc_table_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t pos, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
{
struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj);
int ret;
@@ -241,7 +241,7 @@ static ssize_t cc_table_bin_read(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
+static const BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
/*
* Congestion settings: port control, control map and an array of 16
@@ -249,8 +249,8 @@ static BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
* trigger threshold and the minimum injection rate delay.
*/
static ssize_t cc_setting_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t pos, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
{
struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj);
int ret;
@@ -274,9 +274,9 @@ static ssize_t cc_setting_bin_read(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE);
+static const BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE);
-static struct bin_attribute *port_ccmgta_attributes[] = {
+static const struct bin_attribute *const port_ccmgta_attributes[] = {
&bin_attr_cc_setting_bin,
&bin_attr_cc_table_bin,
NULL,
@@ -295,7 +295,7 @@ static umode_t qib_ccmgta_is_bin_visible(struct kobject *kobj,
static const struct attribute_group port_ccmgta_attribute_group = {
.name = "CCMgtA",
.is_bin_visible = qib_ccmgta_is_bin_visible,
- .bin_attrs = port_ccmgta_attributes,
+ .bin_attrs_new = port_ccmgta_attributes,
};
/* Start sl2vl */
diff --git a/drivers/infiniband/hw/usnic/usnic_abi.h b/drivers/infiniband/hw/usnic/usnic_abi.h
index 7fe9502ce8d3..86a82a4da0aa 100644
--- a/drivers/infiniband/hw/usnic/usnic_abi.h
+++ b/drivers/infiniband/hw/usnic/usnic_abi.h
@@ -72,7 +72,7 @@ struct usnic_ib_create_qp_resp {
u64 bar_bus_addr;
u32 bar_len;
/*
- * WQ, RQ, CQ are explicity specified bc exposing a generic resources inteface
+ * WQ, RQ, CQ are explicitly specified bc exposing a generic resources inteface
* expands the scope of ABI to many files.
*/
u32 wq_cnt;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index 13b654ddd3cc..4ddcd5860e0f 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -151,34 +151,6 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
ib_event.element.port_num = 1;
ib_dispatch_event(&ib_event);
break;
- case NETDEV_UP:
- case NETDEV_DOWN:
- case NETDEV_CHANGE:
- if (!us_ibdev->ufdev->link_up &&
- netif_carrier_ok(netdev)) {
- usnic_fwd_carrier_up(us_ibdev->ufdev);
- usnic_info("Link UP on %s\n",
- dev_name(&us_ibdev->ib_dev.dev));
- ib_event.event = IB_EVENT_PORT_ACTIVE;
- ib_event.device = &us_ibdev->ib_dev;
- ib_event.element.port_num = 1;
- ib_dispatch_event(&ib_event);
- } else if (us_ibdev->ufdev->link_up &&
- !netif_carrier_ok(netdev)) {
- usnic_fwd_carrier_down(us_ibdev->ufdev);
- usnic_info("Link DOWN on %s\n",
- dev_name(&us_ibdev->ib_dev.dev));
- usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
- ib_event.event = IB_EVENT_PORT_ERR;
- ib_event.device = &us_ibdev->ib_dev;
- ib_event.element.port_num = 1;
- ib_dispatch_event(&ib_event);
- } else {
- usnic_dbg("Ignoring %s on %s\n",
- netdev_cmd_to_name(event),
- dev_name(&us_ibdev->ib_dev.dev));
- }
- break;
case NETDEV_CHANGEADDR:
if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr,
sizeof(us_ibdev->ufdev->mac))) {
@@ -218,6 +190,50 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
mutex_unlock(&us_ibdev->usdev_lock);
}
+static void usnic_ib_handle_port_event(struct ib_device *ibdev,
+ struct net_device *netdev,
+ unsigned long event)
+{
+ struct usnic_ib_dev *us_ibdev =
+ container_of(ibdev, struct usnic_ib_dev, ib_dev);
+ struct ib_event ib_event;
+
+ mutex_lock(&us_ibdev->usdev_lock);
+ switch (event) {
+ case NETDEV_UP:
+ case NETDEV_DOWN:
+ case NETDEV_CHANGE:
+ if (!us_ibdev->ufdev->link_up &&
+ netif_carrier_ok(netdev)) {
+ usnic_fwd_carrier_up(us_ibdev->ufdev);
+ usnic_info("Link UP on %s\n",
+ dev_name(&us_ibdev->ib_dev.dev));
+ ib_event.event = IB_EVENT_PORT_ACTIVE;
+ ib_event.device = &us_ibdev->ib_dev;
+ ib_event.element.port_num = 1;
+ ib_dispatch_event(&ib_event);
+ } else if (us_ibdev->ufdev->link_up &&
+ !netif_carrier_ok(netdev)) {
+ usnic_fwd_carrier_down(us_ibdev->ufdev);
+ usnic_info("Link DOWN on %s\n",
+ dev_name(&us_ibdev->ib_dev.dev));
+ usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
+ ib_event.event = IB_EVENT_PORT_ERR;
+ ib_event.device = &us_ibdev->ib_dev;
+ ib_event.element.port_num = 1;
+ ib_dispatch_event(&ib_event);
+ } else {
+ usnic_dbg("Ignoring %s on %s\n",
+ netdev_cmd_to_name(event),
+ dev_name(&us_ibdev->ib_dev.dev));
+ }
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&us_ibdev->usdev_lock);
+}
+
static int usnic_ib_netdevice_event(struct notifier_block *notifier,
unsigned long event, void *ptr)
{
@@ -358,6 +374,7 @@ static const struct ib_device_ops usnic_dev_ops = {
.query_port = usnic_ib_query_port,
.query_qp = usnic_ib_query_qp,
.reg_user_mr = usnic_ib_reg_mr,
+ .report_port_event = usnic_ib_handle_port_event,
INIT_RDMA_OBJ_SIZE(ib_pd, usnic_ib_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_cq, usnic_ib_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_qp, usnic_ib_qp_grp, ibqp),
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 768aad364c89..1664d1d7d969 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -143,6 +143,46 @@ static int pvrdma_port_immutable(struct ib_device *ibdev, u32 port_num,
return 0;
}
+static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port,
+ enum ib_event_type event)
+{
+ struct ib_event ib_event;
+
+ memset(&ib_event, 0, sizeof(ib_event));
+ ib_event.device = &dev->ib_dev;
+ ib_event.element.port_num = port;
+ ib_event.event = event;
+ ib_dispatch_event(&ib_event);
+}
+
+static void pvrdma_report_event_handle(struct ib_device *ibdev,
+ struct net_device *ndev,
+ unsigned long event)
+{
+ struct pvrdma_dev *dev = container_of(ibdev, struct pvrdma_dev, ib_dev);
+
+ switch (event) {
+ case NETDEV_DOWN:
+ pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
+ break;
+ case NETDEV_UP:
+ pvrdma_write_reg(dev, PVRDMA_REG_CTL,
+ PVRDMA_DEVICE_CTL_UNQUIESCE);
+
+ mb();
+
+ if (pvrdma_read_reg(dev, PVRDMA_REG_ERR))
+ dev_err(&dev->pdev->dev,
+ "failed to activate device during link up\n");
+ else
+ pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
+ break;
+
+ default:
+ break;
+ }
+}
+
static const struct ib_device_ops pvrdma_dev_ops = {
.owner = THIS_MODULE,
.driver_id = RDMA_DRIVER_VMW_PVRDMA,
@@ -181,6 +221,7 @@ static const struct ib_device_ops pvrdma_dev_ops = {
.query_qp = pvrdma_query_qp,
.reg_user_mr = pvrdma_reg_user_mr,
.req_notify_cq = pvrdma_req_notify_cq,
+ .report_port_event = pvrdma_report_event_handle,
INIT_RDMA_OBJ_SIZE(ib_ah, pvrdma_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, pvrdma_cq, ibcq),
@@ -362,18 +403,6 @@ static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type)
}
}
-static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port,
- enum ib_event_type event)
-{
- struct ib_event ib_event;
-
- memset(&ib_event, 0, sizeof(ib_event));
- ib_event.device = &dev->ib_dev;
- ib_event.element.port_num = port;
- ib_event.event = event;
- ib_dispatch_event(&ib_event);
-}
-
static void pvrdma_dev_event(struct pvrdma_dev *dev, u8 port, int type)
{
if (port < 1 || port > dev->dsr->caps.phys_port_cnt) {
@@ -666,21 +695,8 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
switch (event) {
case NETDEV_REBOOT:
- case NETDEV_DOWN:
pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
break;
- case NETDEV_UP:
- pvrdma_write_reg(dev, PVRDMA_REG_CTL,
- PVRDMA_DEVICE_CTL_UNQUIESCE);
-
- mb();
-
- if (pvrdma_read_reg(dev, PVRDMA_REG_ERR))
- dev_err(&dev->pdev->dev,
- "failed to activate device during link up\n");
- else
- pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
- break;
case NETDEV_UNREGISTER:
ib_device_set_netdev(&dev->ib_dev, NULL, 1);
dev_put(dev->netdev);
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 8cc64ceeb356..132a87e52d5c 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -571,11 +571,6 @@ static void rxe_port_event(struct rxe_dev *rxe,
/* Caller must hold net_info_lock */
void rxe_port_up(struct rxe_dev *rxe)
{
- struct rxe_port *port;
-
- port = &rxe->port;
- port->attr.state = IB_PORT_ACTIVE;
-
rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE);
dev_info(&rxe->ib_dev.dev, "set active\n");
}
@@ -583,11 +578,6 @@ void rxe_port_up(struct rxe_dev *rxe)
/* Caller must hold net_info_lock */
void rxe_port_down(struct rxe_dev *rxe)
{
- struct rxe_port *port;
-
- port = &rxe->port;
- port->attr.state = IB_PORT_DOWN;
-
rxe_port_event(rxe, IB_EVENT_PORT_ERR);
rxe_counter_inc(rxe, RXE_CNT_LINK_DOWNED);
dev_info(&rxe->ib_dev.dev, "set down\n");
@@ -601,7 +591,7 @@ void rxe_set_port_state(struct rxe_dev *rxe)
if (!ndev)
return;
- if (netif_running(ndev) && netif_carrier_ok(ndev))
+ if (ib_get_curr_port_state(ndev) == IB_PORT_ACTIVE)
rxe_port_up(rxe);
else
rxe_port_down(rxe);
@@ -623,18 +613,14 @@ static int rxe_notify(struct notifier_block *not_blk,
case NETDEV_UNREGISTER:
ib_unregister_device_queued(&rxe->ib_dev);
break;
- case NETDEV_UP:
- rxe_port_up(rxe);
- break;
- case NETDEV_DOWN:
- rxe_port_down(rxe);
- break;
case NETDEV_CHANGEMTU:
rxe_dbg_dev(rxe, "%s changed mtu to %d\n", ndev->name, ndev->mtu);
rxe_set_mtu(rxe, ndev->mtu);
break;
+ case NETDEV_DOWN:
case NETDEV_CHANGE:
- rxe_set_port_state(rxe);
+ if (ib_get_curr_port_state(ndev) == IB_PORT_DOWN)
+ rxe_counter_inc(rxe, RXE_CNT_LINK_DOWNED);
break;
case NETDEV_REBOOT:
case NETDEV_GOING_DOWN:
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index d2f57ead78ad..003f681e5dc0 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -129,7 +129,7 @@ enum rxe_device_param {
enum rxe_port_param {
RXE_PORT_GID_TBL_LEN = 1024,
RXE_PORT_PORT_CAP_FLAGS = IB_PORT_CM_SUP,
- RXE_PORT_MAX_MSG_SZ = 0x800000,
+ RXE_PORT_MAX_MSG_SZ = (1UL << 31),
RXE_PORT_BAD_PKEY_CNTR = 0,
RXE_PORT_QKEY_VIOL_CNTR = 0,
RXE_PORT_LID = 0,
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 67567d62195e..d9cb682fd71f 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -178,7 +178,6 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
{
struct rxe_pool *pool = elem->pool;
struct xarray *xa = &pool->xa;
- static int timeout = RXE_POOL_TIMEOUT;
int ret, err = 0;
void *xa_ret;
@@ -202,19 +201,19 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
* return to rdma-core
*/
if (sleepable) {
- if (!completion_done(&elem->complete) && timeout) {
+ if (!completion_done(&elem->complete)) {
ret = wait_for_completion_timeout(&elem->complete,
- timeout);
+ msecs_to_jiffies(50000));
/* Shouldn't happen. There are still references to
* the object but, rather than deadlock, free the
* object or pass back to rdma-core.
*/
if (WARN_ON(!ret))
- err = -EINVAL;
+ err = -ETIMEDOUT;
}
} else {
- unsigned long until = jiffies + timeout;
+ unsigned long until = jiffies + RXE_POOL_TIMEOUT;
/* AH objects are unique in that the destroy_ah verb
* can be called in atomic context. This delay
@@ -226,7 +225,7 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
mdelay(1);
if (WARN_ON(!completion_done(&elem->complete)))
- err = -EINVAL;
+ err = -ETIMEDOUT;
}
if (pool->cleanup)
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 8a5fc20fd186..6152a0fdfc8c 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -62,6 +62,7 @@ static int rxe_query_port(struct ib_device *ibdev,
ret = ib_get_eth_speed(ibdev, port_num, &attr->active_speed,
&attr->active_width);
+ attr->state = ib_get_curr_port_state(ndev);
if (attr->state == IB_PORT_ACTIVE)
attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
else if (dev_get_flags(ndev) & IFF_UP)
@@ -696,7 +697,7 @@ static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
for (i = 0; i < ibwr->num_sge; i++)
length += ibwr->sg_list[i].length;
- if (length > (1UL << 31)) {
+ if (length > RXE_PORT_MAX_MSG_SZ) {
rxe_err_qp(qp, "message length too long\n");
break;
}
@@ -980,8 +981,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
for (i = 0; i < num_sge; i++)
length += ibwr->sg_list[i].length;
- /* IBA max message size is 2^31 */
- if (length >= (1UL<<31)) {
+ if (length > RXE_PORT_MAX_MSG_SZ) {
err = -EINVAL;
rxe_dbg("message length too long\n");
goto err_out;
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index 14d3103aee6f..b17752bd1ecc 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -379,14 +379,6 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
sdev = to_siw_dev(base_dev);
switch (event) {
- case NETDEV_UP:
- siw_port_event(sdev, 1, IB_EVENT_PORT_ACTIVE);
- break;
-
- case NETDEV_DOWN:
- siw_port_event(sdev, 1, IB_EVENT_PORT_ERR);
- break;
-
case NETDEV_REGISTER:
/*
* Device registration now handled only by
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 7ca0297d68a4..5ac8bd450d24 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -189,10 +189,9 @@ int siw_query_port(struct ib_device *base_dev, u32 port,
attr->max_msg_sz = -1;
attr->max_mtu = ib_mtu_int_to_enum(ndev->max_mtu);
attr->active_mtu = ib_mtu_int_to_enum(READ_ONCE(ndev->mtu));
- attr->phys_state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
+ attr->state = ib_get_curr_port_state(ndev);
+ attr->phys_state = attr->state == IB_PORT_ACTIVE ?
IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
- attr->state = attr->phys_state == IB_PORT_PHYS_STATE_LINK_UP ?
- IB_PORT_ACTIVE : IB_PORT_DOWN;
attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
/*
* All zero
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
index 4e17d546d4cc..bf38ac6f87c4 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs.c
@@ -584,6 +584,9 @@ static void dev_free(struct kref *ref)
list_del(&dev->entry);
mutex_unlock(&pool->mutex);
+ if (pool->ops && pool->ops->deinit)
+ pool->ops->deinit(dev);
+
ib_dealloc_pd(dev->ib_pd);
kfree(dev);
}
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 2916e77f589b..1378651735f6 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2844,7 +2844,8 @@ static int srp_target_alloc(struct scsi_target *starget)
return 0;
}
-static int srp_slave_configure(struct scsi_device *sdev)
+static int srp_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct Scsi_Host *shost = sdev->host;
struct srp_target_port *target = host_to_target(shost);
@@ -3067,7 +3068,7 @@ static const struct scsi_host_template srp_template = {
.name = "InfiniBand SRP initiator",
.proc_name = DRV_NAME,
.target_alloc = srp_target_alloc,
- .slave_configure = srp_slave_configure,
+ .sdev_configure = srp_sdev_configure,
.info = srp_target_info,
.init_cmd_priv = srp_init_cmd_priv,
.exit_cmd_priv = srp_exit_cmd_priv,
@@ -3978,7 +3979,6 @@ static struct srp_host *srp_add_port(struct srp_device *device, u32 port)
return host;
put_host:
- device_del(&host->dev);
put_device(&host->dev);
return NULL;
}
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 3bdbd34314b3..88ecdf5218ee 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -152,20 +152,6 @@ config INPUT_EVDEV
To compile this driver as a module, choose M here: the
module will be called evdev.
-config INPUT_EVBUG
- tristate "Event debugging"
- help
- Say Y here if you have a problem with the input subsystem and
- want all events (keypresses, mouse movements), to be output to
- the system log. While this is useful for debugging, it's also
- a security threat - your keypresses include your passwords, of
- course.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here: the
- module will be called evbug.
-
config INPUT_KUNIT_TEST
tristate "KUnit tests for Input" if !KUNIT_ALL_TESTS
depends on INPUT && KUNIT
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index c78753274921..930b64d2115e 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -18,7 +18,6 @@ obj-$(CONFIG_INPUT_LEDS) += input-leds.o
obj-$(CONFIG_INPUT_MOUSEDEV) += mousedev.o
obj-$(CONFIG_INPUT_JOYDEV) += joydev.o
obj-$(CONFIG_INPUT_EVDEV) += evdev.o
-obj-$(CONFIG_INPUT_EVBUG) += evbug.o
obj-$(CONFIG_INPUT_KEYBOARD) += keyboard/
obj-$(CONFIG_INPUT_MOUSE) += mouse/
diff --git a/drivers/input/evbug.c b/drivers/input/evbug.c
deleted file mode 100644
index e47bdf92088a..000000000000
--- a/drivers/input/evbug.c
+++ /dev/null
@@ -1,100 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 1999-2001 Vojtech Pavlik
- */
-
-/*
- * Input driver event debug module - dumps all events into syslog
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/input.h>
-#include <linux/init.h>
-#include <linux/device.h>
-
-MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
-MODULE_DESCRIPTION("Input driver event debug module");
-MODULE_LICENSE("GPL");
-
-static void evbug_event(struct input_handle *handle, unsigned int type, unsigned int code, int value)
-{
- printk(KERN_DEBUG pr_fmt("Event. Dev: %s, Type: %d, Code: %d, Value: %d\n"),
- dev_name(&handle->dev->dev), type, code, value);
-}
-
-static int evbug_connect(struct input_handler *handler, struct input_dev *dev,
- const struct input_device_id *id)
-{
- struct input_handle *handle;
- int error;
-
- handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
- if (!handle)
- return -ENOMEM;
-
- handle->dev = dev;
- handle->handler = handler;
- handle->name = "evbug";
-
- error = input_register_handle(handle);
- if (error)
- goto err_free_handle;
-
- error = input_open_device(handle);
- if (error)
- goto err_unregister_handle;
-
- printk(KERN_DEBUG pr_fmt("Connected device: %s (%s at %s)\n"),
- dev_name(&dev->dev),
- dev->name ?: "unknown",
- dev->phys ?: "unknown");
-
- return 0;
-
- err_unregister_handle:
- input_unregister_handle(handle);
- err_free_handle:
- kfree(handle);
- return error;
-}
-
-static void evbug_disconnect(struct input_handle *handle)
-{
- printk(KERN_DEBUG pr_fmt("Disconnected device: %s\n"),
- dev_name(&handle->dev->dev));
-
- input_close_device(handle);
- input_unregister_handle(handle);
- kfree(handle);
-}
-
-static const struct input_device_id evbug_ids[] = {
- { .driver_info = 1 }, /* Matches all devices */
- { }, /* Terminating zero entry */
-};
-
-MODULE_DEVICE_TABLE(input, evbug_ids);
-
-static struct input_handler evbug_handler = {
- .event = evbug_event,
- .connect = evbug_connect,
- .disconnect = evbug_disconnect,
- .name = "evbug",
- .id_table = evbug_ids,
-};
-
-static int __init evbug_init(void)
-{
- return input_register_handler(&evbug_handler);
-}
-
-static void __exit evbug_exit(void)
-{
- input_unregister_handler(&evbug_handler);
-}
-
-module_init(evbug_init);
-module_exit(evbug_exit);
diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
index 609a5f01761b..b527308cb52e 100644
--- a/drivers/input/ff-core.c
+++ b/drivers/input/ff-core.c
@@ -93,7 +93,7 @@ int input_ff_upload(struct input_dev *dev, struct ff_effect *effect,
{
struct ff_device *ff = dev->ff;
struct ff_effect *old;
- int ret = 0;
+ int error;
int id;
if (!test_bit(EV_FF, dev->evbit))
@@ -114,22 +114,20 @@ int input_ff_upload(struct input_dev *dev, struct ff_effect *effect,
}
if (!test_bit(effect->type, ff->ffbit)) {
- ret = compat_effect(ff, effect);
- if (ret)
- return ret;
+ error = compat_effect(ff, effect);
+ if (error)
+ return error;
}
- mutex_lock(&ff->mutex);
+ guard(mutex)(&ff->mutex);
if (effect->id == -1) {
for (id = 0; id < ff->max_effects; id++)
if (!ff->effect_owners[id])
break;
- if (id >= ff->max_effects) {
- ret = -ENOSPC;
- goto out;
- }
+ if (id >= ff->max_effects)
+ return -ENOSPC;
effect->id = id;
old = NULL;
@@ -137,30 +135,26 @@ int input_ff_upload(struct input_dev *dev, struct ff_effect *effect,
} else {
id = effect->id;
- ret = check_effect_access(ff, id, file);
- if (ret)
- goto out;
+ error = check_effect_access(ff, id, file);
+ if (error)
+ return error;
old = &ff->effects[id];
- if (!check_effects_compatible(effect, old)) {
- ret = -EINVAL;
- goto out;
- }
+ if (!check_effects_compatible(effect, old))
+ return -EINVAL;
}
- ret = ff->upload(dev, effect, old);
- if (ret)
- goto out;
+ error = ff->upload(dev, effect, old);
+ if (error)
+ return error;
- spin_lock_irq(&dev->event_lock);
- ff->effects[id] = *effect;
- ff->effect_owners[id] = file;
- spin_unlock_irq(&dev->event_lock);
+ scoped_guard(spinlock_irq, &dev->event_lock) {
+ ff->effects[id] = *effect;
+ ff->effect_owners[id] = file;
+ }
- out:
- mutex_unlock(&ff->mutex);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(input_ff_upload);
@@ -178,17 +172,16 @@ static int erase_effect(struct input_dev *dev, int effect_id,
if (error)
return error;
- spin_lock_irq(&dev->event_lock);
- ff->playback(dev, effect_id, 0);
- ff->effect_owners[effect_id] = NULL;
- spin_unlock_irq(&dev->event_lock);
+ scoped_guard(spinlock_irq, &dev->event_lock) {
+ ff->playback(dev, effect_id, 0);
+ ff->effect_owners[effect_id] = NULL;
+ }
if (ff->erase) {
error = ff->erase(dev, effect_id);
if (error) {
- spin_lock_irq(&dev->event_lock);
- ff->effect_owners[effect_id] = file;
- spin_unlock_irq(&dev->event_lock);
+ scoped_guard(spinlock_irq, &dev->event_lock)
+ ff->effect_owners[effect_id] = file;
return error;
}
@@ -210,16 +203,12 @@ static int erase_effect(struct input_dev *dev, int effect_id,
int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file)
{
struct ff_device *ff = dev->ff;
- int ret;
if (!test_bit(EV_FF, dev->evbit))
return -ENOSYS;
- mutex_lock(&ff->mutex);
- ret = erase_effect(dev, effect_id, file);
- mutex_unlock(&ff->mutex);
-
- return ret;
+ guard(mutex)(&ff->mutex);
+ return erase_effect(dev, effect_id, file);
}
EXPORT_SYMBOL_GPL(input_ff_erase);
@@ -239,13 +228,11 @@ int input_ff_flush(struct input_dev *dev, struct file *file)
dev_dbg(&dev->dev, "flushing now\n");
- mutex_lock(&ff->mutex);
+ guard(mutex)(&ff->mutex);
for (i = 0; i < ff->max_effects; i++)
erase_effect(dev, i, file);
- mutex_unlock(&ff->mutex);
-
return 0;
}
EXPORT_SYMBOL_GPL(input_ff_flush);
@@ -303,8 +290,6 @@ EXPORT_SYMBOL_GPL(input_ff_event);
*/
int input_ff_create(struct input_dev *dev, unsigned int max_effects)
{
- struct ff_device *ff;
- size_t ff_dev_size;
int i;
if (!max_effects) {
@@ -317,25 +302,19 @@ int input_ff_create(struct input_dev *dev, unsigned int max_effects)
return -EINVAL;
}
- ff_dev_size = struct_size(ff, effect_owners, max_effects);
- if (ff_dev_size == SIZE_MAX) /* overflow */
- return -EINVAL;
-
- ff = kzalloc(ff_dev_size, GFP_KERNEL);
+ struct ff_device *ff __free(kfree) =
+ kzalloc(struct_size(ff, effect_owners, max_effects),
+ GFP_KERNEL);
if (!ff)
return -ENOMEM;
- ff->effects = kcalloc(max_effects, sizeof(struct ff_effect),
- GFP_KERNEL);
- if (!ff->effects) {
- kfree(ff);
+ ff->effects = kcalloc(max_effects, sizeof(*ff->effects), GFP_KERNEL);
+ if (!ff->effects)
return -ENOMEM;
- }
ff->max_effects = max_effects;
mutex_init(&ff->mutex);
- dev->ff = ff;
dev->flush = input_ff_flush;
dev->event = input_ff_event;
__set_bit(EV_FF, dev->evbit);
@@ -348,6 +327,8 @@ int input_ff_create(struct input_dev *dev, unsigned int max_effects)
if (test_bit(FF_PERIODIC, ff->ffbit))
__set_bit(FF_RUMBLE, dev->ffbit);
+ dev->ff = no_free_ptr(ff);
+
return 0;
}
EXPORT_SYMBOL_GPL(input_ff_create);
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c
index c321cdabd214..e9120ba6bae0 100644
--- a/drivers/input/ff-memless.c
+++ b/drivers/input/ff-memless.c
@@ -401,13 +401,11 @@ static void ml_effect_timer(struct timer_list *t)
{
struct ml_device *ml = from_timer(ml, t, timer);
struct input_dev *dev = ml->dev;
- unsigned long flags;
pr_debug("timer: updating effects\n");
- spin_lock_irqsave(&dev->event_lock, flags);
+ guard(spinlock_irqsave)(&dev->event_lock);
ml_play_effects(ml);
- spin_unlock_irqrestore(&dev->event_lock, flags);
}
/*
@@ -465,7 +463,7 @@ static int ml_ff_upload(struct input_dev *dev,
struct ml_device *ml = dev->ff->private;
struct ml_effect_state *state = &ml->states[effect->id];
- spin_lock_irq(&dev->event_lock);
+ guard(spinlock_irq)(&dev->event_lock);
if (test_bit(FF_EFFECT_STARTED, &state->flags)) {
__clear_bit(FF_EFFECT_PLAYING, &state->flags);
@@ -477,8 +475,6 @@ static int ml_ff_upload(struct input_dev *dev,
ml_schedule_timer(ml);
}
- spin_unlock_irq(&dev->event_lock);
-
return 0;
}
@@ -507,12 +503,11 @@ static void ml_ff_destroy(struct ff_device *ff)
int input_ff_create_memless(struct input_dev *dev, void *data,
int (*play_effect)(struct input_dev *, void *, struct ff_effect *))
{
- struct ml_device *ml;
struct ff_device *ff;
int error;
int i;
- ml = kzalloc(sizeof(struct ml_device), GFP_KERNEL);
+ struct ml_device *ml __free(kfree) = kzalloc(sizeof(*ml), GFP_KERNEL);
if (!ml)
return -ENOMEM;
@@ -525,13 +520,10 @@ int input_ff_create_memless(struct input_dev *dev, void *data,
set_bit(FF_GAIN, dev->ffbit);
error = input_ff_create(dev, FF_MEMLESS_EFFECTS);
- if (error) {
- kfree(ml);
+ if (error)
return error;
- }
ff = dev->ff;
- ff->private = ml;
ff->upload = ml_ff_upload;
ff->playback = ml_ff_playback;
ff->set_gain = ml_ff_set_gain;
@@ -548,6 +540,8 @@ int input_ff_create_memless(struct input_dev *dev, void *data,
for (i = 0; i < FF_MEMLESS_EFFECTS; i++)
ml->states[i].effect = &ff->effects[i];
+ ff->private = no_free_ptr(ml);
+
return 0;
}
EXPORT_SYMBOL_GPL(input_ff_create_memless);
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index 6b04a674f832..337006dd9dcf 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -39,20 +39,20 @@ static void copy_abs(struct input_dev *dev, unsigned int dst, unsigned int src)
int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots,
unsigned int flags)
{
- struct input_mt *mt = dev->mt;
- int i;
-
if (!num_slots)
return 0;
- if (mt)
- return mt->num_slots != num_slots ? -EINVAL : 0;
+
+ if (dev->mt)
+ return dev->mt->num_slots != num_slots ? -EINVAL : 0;
+
/* Arbitrary limit for avoiding too large memory allocation. */
if (num_slots > 1024)
return -EINVAL;
- mt = kzalloc(struct_size(mt, slots, num_slots), GFP_KERNEL);
+ struct input_mt *mt __free(kfree) =
+ kzalloc(struct_size(mt, slots, num_slots), GFP_KERNEL);
if (!mt)
- goto err_mem;
+ return -ENOMEM;
mt->num_slots = num_slots;
mt->flags = flags;
@@ -86,21 +86,18 @@ int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots,
unsigned int n2 = num_slots * num_slots;
mt->red = kcalloc(n2, sizeof(*mt->red), GFP_KERNEL);
if (!mt->red)
- goto err_mem;
+ return -ENOMEM;
}
/* Mark slots as 'inactive' */
- for (i = 0; i < num_slots; i++)
+ for (unsigned int i = 0; i < num_slots; i++)
input_mt_set_value(&mt->slots[i], ABS_MT_TRACKING_ID, -1);
/* Mark slots as 'unused' */
mt->frame = 1;
- dev->mt = mt;
+ dev->mt = no_free_ptr(mt);
return 0;
-err_mem:
- kfree(mt);
- return -ENOMEM;
}
EXPORT_SYMBOL(input_mt_init_slots);
@@ -285,14 +282,10 @@ void input_mt_drop_unused(struct input_dev *dev)
struct input_mt *mt = dev->mt;
if (mt) {
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
+ guard(spinlock_irqsave)(&dev->event_lock);
__input_mt_drop_unused(dev, mt);
mt->frame++;
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
}
}
EXPORT_SYMBOL(input_mt_drop_unused);
@@ -339,11 +332,8 @@ void input_mt_sync_frame(struct input_dev *dev)
return;
if (mt->flags & INPUT_MT_DROP_UNUSED) {
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
+ guard(spinlock_irqsave)(&dev->event_lock);
__input_mt_drop_unused(dev, mt);
- spin_unlock_irqrestore(&dev->event_lock, flags);
}
if ((mt->flags & INPUT_MT_POINTER) && !(mt->flags & INPUT_MT_SEMI_MT))
diff --git a/drivers/input/input-poller.c b/drivers/input/input-poller.c
index 688e3cb1c2a0..9c57713a6151 100644
--- a/drivers/input/input-poller.c
+++ b/drivers/input/input-poller.c
@@ -162,7 +162,7 @@ static ssize_t input_dev_set_poll_interval(struct device *dev,
if (interval > poller->poll_interval_max)
return -EINVAL;
- mutex_lock(&input->mutex);
+ guard(mutex)(&input->mutex);
poller->poll_interval = interval;
@@ -172,8 +172,6 @@ static ssize_t input_dev_set_poll_interval(struct device *dev,
input_dev_poller_queue_work(poller);
}
- mutex_unlock(&input->mutex);
-
return count;
}
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 7f0477e04ad2..c9e3ac64bcd0 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -115,23 +115,23 @@ static void input_pass_values(struct input_dev *dev,
lockdep_assert_held(&dev->event_lock);
- rcu_read_lock();
+ scoped_guard(rcu) {
+ handle = rcu_dereference(dev->grab);
+ if (handle) {
+ count = handle->handle_events(handle, vals, count);
+ break;
+ }
- handle = rcu_dereference(dev->grab);
- if (handle) {
- count = handle->handle_events(handle, vals, count);
- } else {
- list_for_each_entry_rcu(handle, &dev->h_list, d_node)
+ list_for_each_entry_rcu(handle, &dev->h_list, d_node) {
if (handle->open) {
count = handle->handle_events(handle, vals,
count);
if (!count)
break;
}
+ }
}
- rcu_read_unlock();
-
/* trigger auto repeat for key events */
if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) {
for (v = vals; v != vals + count; v++) {
@@ -390,13 +390,9 @@ void input_handle_event(struct input_dev *dev,
void input_event(struct input_dev *dev,
unsigned int type, unsigned int code, int value)
{
- unsigned long flags;
-
if (is_event_supported(type, dev->evbit, EV_MAX)) {
-
- spin_lock_irqsave(&dev->event_lock, flags);
+ guard(spinlock_irqsave)(&dev->event_lock);
input_handle_event(dev, type, code, value);
- spin_unlock_irqrestore(&dev->event_lock, flags);
}
}
EXPORT_SYMBOL(input_event);
@@ -417,18 +413,15 @@ void input_inject_event(struct input_handle *handle,
{
struct input_dev *dev = handle->dev;
struct input_handle *grab;
- unsigned long flags;
if (is_event_supported(type, dev->evbit, EV_MAX)) {
- spin_lock_irqsave(&dev->event_lock, flags);
+ guard(spinlock_irqsave)(&dev->event_lock);
+ guard(rcu)();
- rcu_read_lock();
grab = rcu_dereference(dev->grab);
if (!grab || grab == handle)
input_handle_event(dev, type, code, value);
- rcu_read_unlock();
- spin_unlock_irqrestore(&dev->event_lock, flags);
}
}
EXPORT_SYMBOL(input_inject_event);
@@ -526,22 +519,15 @@ EXPORT_SYMBOL(input_copy_abs);
int input_grab_device(struct input_handle *handle)
{
struct input_dev *dev = handle->dev;
- int retval;
- retval = mutex_lock_interruptible(&dev->mutex);
- if (retval)
- return retval;
+ scoped_cond_guard(mutex_intr, return -EINTR, &dev->mutex) {
+ if (dev->grab)
+ return -EBUSY;
- if (dev->grab) {
- retval = -EBUSY;
- goto out;
+ rcu_assign_pointer(dev->grab, handle);
}
- rcu_assign_pointer(dev->grab, handle);
-
- out:
- mutex_unlock(&dev->mutex);
- return retval;
+ return 0;
}
EXPORT_SYMBOL(input_grab_device);
@@ -576,9 +562,8 @@ void input_release_device(struct input_handle *handle)
{
struct input_dev *dev = handle->dev;
- mutex_lock(&dev->mutex);
+ guard(mutex)(&dev->mutex);
__input_release_device(handle);
- mutex_unlock(&dev->mutex);
}
EXPORT_SYMBOL(input_release_device);
@@ -592,67 +577,57 @@ EXPORT_SYMBOL(input_release_device);
int input_open_device(struct input_handle *handle)
{
struct input_dev *dev = handle->dev;
- int retval;
-
- retval = mutex_lock_interruptible(&dev->mutex);
- if (retval)
- return retval;
-
- if (dev->going_away) {
- retval = -ENODEV;
- goto out;
- }
+ int error;
- handle->open++;
+ scoped_cond_guard(mutex_intr, return -EINTR, &dev->mutex) {
+ if (dev->going_away)
+ return -ENODEV;
- if (handle->handler->passive_observer)
- goto out;
+ handle->open++;
- if (dev->users++ || dev->inhibited) {
- /*
- * Device is already opened and/or inhibited,
- * so we can exit immediately and report success.
- */
- goto out;
- }
+ if (handle->handler->passive_observer)
+ return 0;
- if (dev->open) {
- retval = dev->open(dev);
- if (retval) {
- dev->users--;
- handle->open--;
+ if (dev->users++ || dev->inhibited) {
/*
- * Make sure we are not delivering any more events
- * through this handle
+ * Device is already opened and/or inhibited,
+ * so we can exit immediately and report success.
*/
- synchronize_rcu();
- goto out;
+ return 0;
}
- }
- if (dev->poller)
- input_dev_poller_start(dev->poller);
+ if (dev->open) {
+ error = dev->open(dev);
+ if (error) {
+ dev->users--;
+ handle->open--;
+ /*
+ * Make sure we are not delivering any more
+ * events through this handle.
+ */
+ synchronize_rcu();
+ return error;
+ }
+ }
- out:
- mutex_unlock(&dev->mutex);
- return retval;
+ if (dev->poller)
+ input_dev_poller_start(dev->poller);
+ }
+
+ return 0;
}
EXPORT_SYMBOL(input_open_device);
int input_flush_device(struct input_handle *handle, struct file *file)
{
struct input_dev *dev = handle->dev;
- int retval;
- retval = mutex_lock_interruptible(&dev->mutex);
- if (retval)
- return retval;
-
- if (dev->flush)
- retval = dev->flush(dev, file);
+ scoped_cond_guard(mutex_intr, return -EINTR, &dev->mutex) {
+ if (dev->flush)
+ return dev->flush(dev, file);
+ }
- mutex_unlock(&dev->mutex);
- return retval;
+ return 0;
}
EXPORT_SYMBOL(input_flush_device);
@@ -667,7 +642,7 @@ void input_close_device(struct input_handle *handle)
{
struct input_dev *dev = handle->dev;
- mutex_lock(&dev->mutex);
+ guard(mutex)(&dev->mutex);
__input_release_device(handle);
@@ -688,8 +663,6 @@ void input_close_device(struct input_handle *handle)
*/
synchronize_rcu();
}
-
- mutex_unlock(&dev->mutex);
}
EXPORT_SYMBOL(input_close_device);
@@ -726,11 +699,10 @@ static void input_disconnect_device(struct input_dev *dev)
* not to protect access to dev->going_away but rather to ensure
* that there are no threads in the middle of input_open_device()
*/
- mutex_lock(&dev->mutex);
- dev->going_away = true;
- mutex_unlock(&dev->mutex);
+ scoped_guard(mutex, &dev->mutex)
+ dev->going_away = true;
- spin_lock_irq(&dev->event_lock);
+ guard(spinlock_irq)(&dev->event_lock);
/*
* Simulate keyup events for all pressed keys so that handlers
@@ -743,8 +715,6 @@ static void input_disconnect_device(struct input_dev *dev)
list_for_each_entry(handle, &dev->h_list, d_node)
handle->open = 0;
-
- spin_unlock_irq(&dev->event_lock);
}
/**
@@ -901,14 +871,9 @@ static int input_default_setkeycode(struct input_dev *dev,
*/
int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke)
{
- unsigned long flags;
- int retval;
+ guard(spinlock_irqsave)(&dev->event_lock);
- spin_lock_irqsave(&dev->event_lock, flags);
- retval = dev->getkeycode(dev, ke);
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- return retval;
+ return dev->getkeycode(dev, ke);
}
EXPORT_SYMBOL(input_get_keycode);
@@ -923,18 +888,17 @@ EXPORT_SYMBOL(input_get_keycode);
int input_set_keycode(struct input_dev *dev,
const struct input_keymap_entry *ke)
{
- unsigned long flags;
unsigned int old_keycode;
- int retval;
+ int error;
if (ke->keycode > KEY_MAX)
return -EINVAL;
- spin_lock_irqsave(&dev->event_lock, flags);
+ guard(spinlock_irqsave)(&dev->event_lock);
- retval = dev->setkeycode(dev, ke, &old_keycode);
- if (retval)
- goto out;
+ error = dev->setkeycode(dev, ke, &old_keycode);
+ if (error)
+ return error;
/* Make sure KEY_RESERVED did not get enabled. */
__clear_bit(KEY_RESERVED, dev->keybit);
@@ -962,10 +926,7 @@ int input_set_keycode(struct input_dev *dev,
EV_SYN, SYN_REPORT, 1);
}
- out:
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- return retval;
+ return 0;
}
EXPORT_SYMBOL(input_set_keycode);
@@ -1799,26 +1760,21 @@ static void input_dev_toggle(struct input_dev *dev, bool activate)
*/
void input_reset_device(struct input_dev *dev)
{
- unsigned long flags;
-
- mutex_lock(&dev->mutex);
- spin_lock_irqsave(&dev->event_lock, flags);
+ guard(mutex)(&dev->mutex);
+ guard(spinlock_irqsave)(&dev->event_lock);
input_dev_toggle(dev, true);
if (input_dev_release_keys(dev))
input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
- mutex_unlock(&dev->mutex);
}
EXPORT_SYMBOL(input_reset_device);
static int input_inhibit_device(struct input_dev *dev)
{
- mutex_lock(&dev->mutex);
+ guard(mutex)(&dev->mutex);
if (dev->inhibited)
- goto out;
+ return 0;
if (dev->users) {
if (dev->close)
@@ -1827,54 +1783,50 @@ static int input_inhibit_device(struct input_dev *dev)
input_dev_poller_stop(dev->poller);
}
- spin_lock_irq(&dev->event_lock);
- input_mt_release_slots(dev);
- input_dev_release_keys(dev);
- input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
- input_dev_toggle(dev, false);
- spin_unlock_irq(&dev->event_lock);
+ scoped_guard(spinlock_irq, &dev->event_lock) {
+ input_mt_release_slots(dev);
+ input_dev_release_keys(dev);
+ input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
+ input_dev_toggle(dev, false);
+ }
dev->inhibited = true;
-out:
- mutex_unlock(&dev->mutex);
return 0;
}
static int input_uninhibit_device(struct input_dev *dev)
{
- int ret = 0;
+ int error;
- mutex_lock(&dev->mutex);
+ guard(mutex)(&dev->mutex);
if (!dev->inhibited)
- goto out;
+ return 0;
if (dev->users) {
if (dev->open) {
- ret = dev->open(dev);
- if (ret)
- goto out;
+ error = dev->open(dev);
+ if (error)
+ return error;
}
if (dev->poller)
input_dev_poller_start(dev->poller);
}
dev->inhibited = false;
- spin_lock_irq(&dev->event_lock);
- input_dev_toggle(dev, true);
- spin_unlock_irq(&dev->event_lock);
-out:
- mutex_unlock(&dev->mutex);
- return ret;
+ scoped_guard(spinlock_irq, &dev->event_lock)
+ input_dev_toggle(dev, true);
+
+ return 0;
}
static int input_dev_suspend(struct device *dev)
{
struct input_dev *input_dev = to_input_dev(dev);
- spin_lock_irq(&input_dev->event_lock);
+ guard(spinlock_irq)(&input_dev->event_lock);
/*
* Keys that are pressed now are unlikely to be
@@ -1886,8 +1838,6 @@ static int input_dev_suspend(struct device *dev)
/* Turn off LEDs and sounds, if any are active. */
input_dev_toggle(input_dev, false);
- spin_unlock_irq(&input_dev->event_lock);
-
return 0;
}
@@ -1895,13 +1845,11 @@ static int input_dev_resume(struct device *dev)
{
struct input_dev *input_dev = to_input_dev(dev);
- spin_lock_irq(&input_dev->event_lock);
+ guard(spinlock_irq)(&input_dev->event_lock);
/* Restore state of LEDs and sounds, if any were active. */
input_dev_toggle(input_dev, true);
- spin_unlock_irq(&input_dev->event_lock);
-
return 0;
}
@@ -1909,7 +1857,7 @@ static int input_dev_freeze(struct device *dev)
{
struct input_dev *input_dev = to_input_dev(dev);
- spin_lock_irq(&input_dev->event_lock);
+ guard(spinlock_irq)(&input_dev->event_lock);
/*
* Keys that are pressed now are unlikely to be
@@ -1918,8 +1866,6 @@ static int input_dev_freeze(struct device *dev)
if (input_dev_release_keys(input_dev))
input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1);
- spin_unlock_irq(&input_dev->event_lock);
-
return 0;
}
@@ -1927,13 +1873,11 @@ static int input_dev_poweroff(struct device *dev)
{
struct input_dev *input_dev = to_input_dev(dev);
- spin_lock_irq(&input_dev->event_lock);
+ guard(spinlock_irq)(&input_dev->event_lock);
/* Turn off LEDs and sounds, if any are active. */
input_dev_toggle(input_dev, false);
- spin_unlock_irq(&input_dev->event_lock);
-
return 0;
}
@@ -2274,18 +2218,16 @@ static void __input_unregister_device(struct input_dev *dev)
input_disconnect_device(dev);
- mutex_lock(&input_mutex);
-
- list_for_each_entry_safe(handle, next, &dev->h_list, d_node)
- handle->handler->disconnect(handle);
- WARN_ON(!list_empty(&dev->h_list));
+ scoped_guard(mutex, &input_mutex) {
+ list_for_each_entry_safe(handle, next, &dev->h_list, d_node)
+ handle->handler->disconnect(handle);
+ WARN_ON(!list_empty(&dev->h_list));
- del_timer_sync(&dev->timer);
- list_del_init(&dev->node);
+ del_timer_sync(&dev->timer);
+ list_del_init(&dev->node);
- input_wakeup_procfs_readers();
-
- mutex_unlock(&input_mutex);
+ input_wakeup_procfs_readers();
+ }
device_del(&dev->dev);
}
@@ -2308,9 +2250,8 @@ static void devm_input_device_unregister(struct device *dev, void *res)
static void input_repeat_key(struct timer_list *t)
{
struct input_dev *dev = from_timer(dev, t, timer);
- unsigned long flags;
- spin_lock_irqsave(&dev->event_lock, flags);
+ guard(spinlock_irqsave)(&dev->event_lock);
if (!dev->inhibited &&
test_bit(dev->repeat_key, dev->key) &&
@@ -2324,8 +2265,6 @@ static void input_repeat_key(struct timer_list *t)
mod_timer(&dev->timer, jiffies +
msecs_to_jiffies(dev->rep[REP_PERIOD]));
}
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
}
/**
@@ -2370,10 +2309,10 @@ static int input_device_tune_vals(struct input_dev *dev)
if (!vals)
return -ENOMEM;
- spin_lock_irq(&dev->event_lock);
- dev->max_vals = max_vals;
- swap(dev->vals, vals);
- spin_unlock_irq(&dev->event_lock);
+ scoped_guard(spinlock_irq, &dev->event_lock) {
+ dev->max_vals = max_vals;
+ swap(dev->vals, vals);
+ }
/* Because of swap() above, this frees the old vals memory */
kfree(vals);
@@ -2465,18 +2404,15 @@ int input_register_device(struct input_dev *dev)
path ? path : "N/A");
kfree(path);
- error = mutex_lock_interruptible(&input_mutex);
- if (error)
- goto err_device_del;
+ error = -EINTR;
+ scoped_cond_guard(mutex_intr, goto err_device_del, &input_mutex) {
+ list_add_tail(&dev->node, &input_dev_list);
- list_add_tail(&dev->node, &input_dev_list);
+ list_for_each_entry(handler, &input_handler_list, node)
+ input_attach_handler(dev, handler);
- list_for_each_entry(handler, &input_handler_list, node)
- input_attach_handler(dev, handler);
-
- input_wakeup_procfs_readers();
-
- mutex_unlock(&input_mutex);
+ input_wakeup_procfs_readers();
+ }
if (dev->devres_managed) {
dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n",
@@ -2556,20 +2492,17 @@ int input_register_handler(struct input_handler *handler)
if (error)
return error;
- INIT_LIST_HEAD(&handler->h_list);
+ scoped_cond_guard(mutex_intr, return -EINTR, &input_mutex) {
+ INIT_LIST_HEAD(&handler->h_list);
- error = mutex_lock_interruptible(&input_mutex);
- if (error)
- return error;
-
- list_add_tail(&handler->node, &input_handler_list);
+ list_add_tail(&handler->node, &input_handler_list);
- list_for_each_entry(dev, &input_dev_list, node)
- input_attach_handler(dev, handler);
+ list_for_each_entry(dev, &input_dev_list, node)
+ input_attach_handler(dev, handler);
- input_wakeup_procfs_readers();
+ input_wakeup_procfs_readers();
+ }
- mutex_unlock(&input_mutex);
return 0;
}
EXPORT_SYMBOL(input_register_handler);
@@ -2585,7 +2518,7 @@ void input_unregister_handler(struct input_handler *handler)
{
struct input_handle *handle, *next;
- mutex_lock(&input_mutex);
+ guard(mutex)(&input_mutex);
list_for_each_entry_safe(handle, next, &handler->h_list, h_node)
handler->disconnect(handle);
@@ -2594,8 +2527,6 @@ void input_unregister_handler(struct input_handler *handler)
list_del_init(&handler->node);
input_wakeup_procfs_readers();
-
- mutex_unlock(&input_mutex);
}
EXPORT_SYMBOL(input_unregister_handler);
@@ -2615,19 +2546,17 @@ int input_handler_for_each_handle(struct input_handler *handler, void *data,
int (*fn)(struct input_handle *, void *))
{
struct input_handle *handle;
- int retval = 0;
+ int retval;
- rcu_read_lock();
+ guard(rcu)();
list_for_each_entry_rcu(handle, &handler->h_list, h_node) {
retval = fn(handle, data);
if (retval)
- break;
+ return retval;
}
- rcu_read_unlock();
-
- return retval;
+ return 0;
}
EXPORT_SYMBOL(input_handler_for_each_handle);
@@ -2715,27 +2644,22 @@ int input_register_handle(struct input_handle *handle)
{
struct input_handler *handler = handle->handler;
struct input_dev *dev = handle->dev;
- int error;
input_handle_setup_event_handler(handle);
/*
* We take dev->mutex here to prevent race with
* input_release_device().
*/
- error = mutex_lock_interruptible(&dev->mutex);
- if (error)
- return error;
-
- /*
- * Filters go to the head of the list, normal handlers
- * to the tail.
- */
- if (handler->filter)
- list_add_rcu(&handle->d_node, &dev->h_list);
- else
- list_add_tail_rcu(&handle->d_node, &dev->h_list);
-
- mutex_unlock(&dev->mutex);
+ scoped_cond_guard(mutex_intr, return -EINTR, &dev->mutex) {
+ /*
+ * Filters go to the head of the list, normal handlers
+ * to the tail.
+ */
+ if (handler->filter)
+ list_add_rcu(&handle->d_node, &dev->h_list);
+ else
+ list_add_tail_rcu(&handle->d_node, &dev->h_list);
+ }
/*
* Since we are supposed to be called from ->connect()
@@ -2771,9 +2695,8 @@ void input_unregister_handle(struct input_handle *handle)
/*
* Take dev->mutex to prevent race with input_release_device().
*/
- mutex_lock(&dev->mutex);
- list_del_rcu(&handle->d_node);
- mutex_unlock(&dev->mutex);
+ scoped_guard(mutex, &dev->mutex)
+ list_del_rcu(&handle->d_node);
synchronize_rcu();
}
diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
index f6e92db4d789..3a5873e5fcb3 100644
--- a/drivers/input/joystick/sidewinder.c
+++ b/drivers/input/joystick/sidewinder.c
@@ -14,6 +14,7 @@
#include <linux/input.h>
#include <linux/gameport.h>
#include <linux/jiffies.h>
+#include <linux/string_choices.h>
#define DRIVER_DESC "Microsoft SideWinder joystick family driver"
@@ -677,7 +678,7 @@ static int sw_connect(struct gameport *gameport, struct gameport_driver *drv)
case 48: /* Ambiguous */
if (j == 14) { /* ID length 14*3 -> FFP */
sw->type = SW_ID_FFP;
- sprintf(comment, " [AC %s]", sw_get_bits(idbuf,38,1,3) ? "off" : "on");
+ sprintf(comment, " [AC %s]", str_off_on(sw_get_bits(idbuf,38,1,3)));
} else
sw->type = SW_ID_PP;
break;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index ff9bc87f2f70..8fe2a51df649 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -150,6 +150,7 @@ static const struct xpad_device {
{ 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
{ 0x045e, 0x028f, "Microsoft X-Box 360 pad v2", 0, XTYPE_XBOX360 },
{ 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
+ { 0x045e, 0x02a9, "Xbox 360 Wireless Receiver (Unofficial)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE },
{ 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", MAP_PADDLES, XTYPE_XBOXONE },
@@ -305,6 +306,7 @@ static const struct xpad_device {
{ 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x17ef, 0x6182, "Lenovo Legion Controller for Windows", 0, XTYPE_XBOX360 },
{ 0x1949, 0x041a, "Amazon Game Controller", 0, XTYPE_XBOX360 },
+ { 0x1a86, 0xe310, "QH Electronics Controller", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0x0130, "Ion Drum Rocker", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
@@ -373,16 +375,19 @@ static const struct xpad_device {
{ 0x294b, 0x3303, "Snakebyte GAMEPAD BASE X", 0, XTYPE_XBOXONE },
{ 0x294b, 0x3404, "Snakebyte GAMEPAD RGB X", 0, XTYPE_XBOXONE },
{ 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE },
- { 0x2dc8, 0x3106, "8BitDo Pro 2 Wired Controller", 0, XTYPE_XBOX360 },
+ { 0x2dc8, 0x3106, "8BitDo Ultimate Wireless / Pro 2 Wired Controller", 0, XTYPE_XBOX360 },
{ 0x2dc8, 0x310a, "8BitDo Ultimate 2C Wireless Controller", 0, XTYPE_XBOX360 },
{ 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1220, "Wooting Two HE", 0, XTYPE_XBOX360 },
+ { 0x31e3, 0x1230, "Wooting Two HE (ARM)", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1300, "Wooting 60HE (AVR)", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1310, "Wooting 60HE (ARM)", 0, XTYPE_XBOX360 },
{ 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 },
+ { 0x3285, 0x0646, "Nacon Pro Compact", 0, XTYPE_XBOXONE },
+ { 0x3285, 0x0663, "Nacon Evol-X", 0, XTYPE_XBOXONE },
{ 0x3537, 0x1004, "GameSir T4 Kaleid", 0, XTYPE_XBOX360 },
{ 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
@@ -514,6 +519,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
XPAD_XBOX360_VENDOR(0x17ef), /* Lenovo */
XPAD_XBOX360_VENDOR(0x1949), /* Amazon controllers */
+ XPAD_XBOX360_VENDOR(0x1a86), /* QH Electronics */
XPAD_XBOX360_VENDOR(0x1bad), /* Harmonix Rock Band guitar and drums */
XPAD_XBOX360_VENDOR(0x20d6), /* PowerA controllers */
XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA controllers */
@@ -530,6 +536,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x2f24), /* GameSir controllers */
XPAD_XBOX360_VENDOR(0x31e3), /* Wooting Keyboards */
XPAD_XBOX360_VENDOR(0x3285), /* Nacon GC-100 */
+ XPAD_XBOXONE_VENDOR(0x3285), /* Nacon Evol-X */
XPAD_XBOX360_VENDOR(0x3537), /* GameSir Controllers */
XPAD_XBOXONE_VENDOR(0x3537), /* GameSir Controllers */
{ }
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index ec94fcfa4cde..adf0f311996c 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -89,7 +89,7 @@ static const unsigned short atkbd_set2_keycode[ATKBD_KEYMAP_SIZE] = {
0, 46, 45, 32, 18, 5, 4, 95, 0, 57, 47, 33, 20, 19, 6,183,
0, 49, 48, 35, 34, 21, 7,184, 0, 0, 50, 36, 22, 8, 9,185,
0, 51, 37, 23, 24, 11, 10, 0, 0, 52, 53, 38, 39, 25, 12, 0,
- 0, 89, 40, 0, 26, 13, 0, 0, 58, 54, 28, 27, 0, 43, 0, 85,
+ 0, 89, 40, 0, 26, 13, 0,193, 58, 54, 28, 27, 0, 43, 0, 85,
0, 86, 91, 90, 92, 0, 14, 94, 0, 79,124, 75, 71,121, 0, 0,
82, 83, 80, 76, 77, 72, 1, 69, 87, 78, 81, 74, 55, 73, 70, 99,
diff --git a/drivers/input/keyboard/dlink-dir685-touchkeys.c b/drivers/input/keyboard/dlink-dir685-touchkeys.c
index 993cdbda509e..4184dd2eaeeb 100644
--- a/drivers/input/keyboard/dlink-dir685-touchkeys.c
+++ b/drivers/input/keyboard/dlink-dir685-touchkeys.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/bitops.h>
struct dir685_touchkeys {
@@ -48,7 +49,7 @@ static irqreturn_t dir685_tk_irq_thread(int irq, void *data)
changed = tk->cur_key ^ key;
for_each_set_bit(i, &changed, num_bits) {
dev_dbg(tk->dev, "key %d is %s\n", i,
- test_bit(i, &key) ? "down" : "up");
+ str_down_up(test_bit(i, &key)));
input_report_key(tk->input, tk->codes[i], test_bit(i, &key));
}
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index e26bf2956344..e19442c6f80f 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -21,6 +21,7 @@
#include <linux/platform_data/lm8323.h>
#include <linux/pm.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
/* Commands to send to the chip. */
#define LM8323_CMD_READ_ID 0x80 /* Read chip ID. */
@@ -269,7 +270,7 @@ static void process_keys(struct lm8323_chip *lm)
unsigned short keycode = lm->keymap[key];
dev_vdbg(&lm->client->dev, "key 0x%02x %s\n",
- key, isdown ? "down" : "up");
+ key, str_down_up(isdown));
if (lm->kp_enabled) {
input_event(lm->idev, EV_MSC, MSC_SCAN, key);
diff --git a/drivers/input/misc/ideapad_slidebar.c b/drivers/input/misc/ideapad_slidebar.c
index f6e5fc807b4d..ab2e0a401904 100644
--- a/drivers/input/misc/ideapad_slidebar.c
+++ b/drivers/input/misc/ideapad_slidebar.c
@@ -121,7 +121,7 @@ static void slidebar_mode_set(u8 mode)
}
static bool slidebar_i8042_filter(unsigned char data, unsigned char str,
- struct serio *port)
+ struct serio *port, void *context)
{
static bool extended = false;
@@ -219,7 +219,7 @@ static int __init ideapad_probe(struct platform_device* pdev)
input_set_capability(slidebar_input_dev, EV_ABS, ABS_X);
input_set_abs_params(slidebar_input_dev, ABS_X, 0, 0xff, 0, 0);
- err = i8042_install_filter(slidebar_i8042_filter);
+ err = i8042_install_filter(slidebar_i8042_filter, NULL);
if (err) {
dev_err(&pdev->dev,
"Failed to install i8042 filter: %d\n", err);
diff --git a/drivers/input/misc/max77693-haptic.c b/drivers/input/misc/max77693-haptic.c
index 0e646f1b257b..cdb9be737e48 100644
--- a/drivers/input/misc/max77693-haptic.c
+++ b/drivers/input/misc/max77693-haptic.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/workqueue.h>
#include <linux/regulator/consumer.h>
#include <linux/mfd/max77693.h>
@@ -94,7 +95,7 @@ static int max77843_haptic_bias(struct max77693_haptic *haptic, bool on)
on << MAINCTRL1_BIASEN_SHIFT);
if (error) {
dev_err(haptic->dev, "failed to %s bias: %d\n",
- on ? "enable" : "disable", error);
+ str_enable_disable(on), error);
return error;
}
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c
index 08412239b8e6..0c661140fb88 100644
--- a/drivers/input/misc/mma8450.c
+++ b/drivers/input/misc/mma8450.c
@@ -38,6 +38,8 @@
#define MMA8450_CTRL_REG1 0x38
#define MMA8450_CTRL_REG2 0x39
+#define MMA8450_ID 0xc6
+#define MMA8450_WHO_AM_I 0x0f
static int mma8450_read(struct i2c_client *c, unsigned int off)
{
@@ -148,8 +150,20 @@ static void mma8450_close(struct input_dev *input)
*/
static int mma8450_probe(struct i2c_client *c)
{
+ struct i2c_adapter *adapter = c->adapter;
struct input_dev *input;
- int err;
+ int err, client_id;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA))
+ return dev_err_probe(&c->dev, -EINVAL,
+ "I2C adapter doesn't support SMBUS BYTE");
+
+ client_id = i2c_smbus_read_byte_data(c, MMA8450_WHO_AM_I);
+ if (client_id != MMA8450_ID)
+ return dev_err_probe(&c->dev, -EINVAL,
+ "unexpected chip ID 0x%x (vs 0x%x)\n",
+ client_id, MMA8450_ID);
input = devm_input_allocate_device(&c->dev);
if (!input)
diff --git a/drivers/input/misc/nxp-bbnsm-pwrkey.c b/drivers/input/misc/nxp-bbnsm-pwrkey.c
index eb4173f9c820..7ba8d166d68c 100644
--- a/drivers/input/misc/nxp-bbnsm-pwrkey.c
+++ b/drivers/input/misc/nxp-bbnsm-pwrkey.c
@@ -187,6 +187,12 @@ static int bbnsm_pwrkey_probe(struct platform_device *pdev)
return 0;
}
+static void bbnsm_pwrkey_remove(struct platform_device *pdev)
+{
+ dev_pm_clear_wake_irq(&pdev->dev);
+ device_init_wakeup(&pdev->dev, false);
+}
+
static int __maybe_unused bbnsm_pwrkey_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -223,6 +229,8 @@ static struct platform_driver bbnsm_pwrkey_driver = {
.of_match_table = bbnsm_pwrkey_ids,
},
.probe = bbnsm_pwrkey_probe,
+ .remove = bbnsm_pwrkey_remove,
+
};
module_platform_driver(bbnsm_pwrkey_driver);
diff --git a/drivers/input/misc/regulator-haptic.c b/drivers/input/misc/regulator-haptic.c
index 3666ba6d1f30..9711f5c7c78a 100644
--- a/drivers/input/misc/regulator-haptic.c
+++ b/drivers/input/misc/regulator-haptic.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#define MAX_MAGNITUDE_SHIFT 16
@@ -44,7 +45,7 @@ static int regulator_haptic_toggle(struct regulator_haptic *haptic, bool on)
if (error) {
dev_err(haptic->dev,
"failed to switch regulator %s: %d\n",
- on ? "on" : "off", error);
+ str_on_off(on), error);
return error;
}
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index a841883660fb..fee1796da3d0 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/string_choices.h>
#include <linux/input.h>
#include <linux/uaccess.h>
#include <linux/jiffies.h>
@@ -199,7 +200,7 @@ static int elan_set_power(struct elan_tp_data *data, bool on)
} while (--repeat > 0);
dev_err(&data->client->dev, "failed to set power %s: %d\n",
- on ? "on" : "off", error);
+ str_on_off(on), error);
return error;
}
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 2735f86c23cc..aba57abe6978 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -665,23 +665,50 @@ static void synaptics_pt_stop(struct serio *serio)
priv->pt_port = NULL;
}
+static int synaptics_pt_open(struct serio *serio)
+{
+ struct psmouse *parent = psmouse_from_serio(serio->parent);
+ struct synaptics_data *priv = parent->private;
+
+ guard(serio_pause_rx)(parent->ps2dev.serio);
+ priv->pt_port_open = true;
+
+ return 0;
+}
+
+static void synaptics_pt_close(struct serio *serio)
+{
+ struct psmouse *parent = psmouse_from_serio(serio->parent);
+ struct synaptics_data *priv = parent->private;
+
+ guard(serio_pause_rx)(parent->ps2dev.serio);
+ priv->pt_port_open = false;
+}
+
static int synaptics_is_pt_packet(u8 *buf)
{
return (buf[0] & 0xFC) == 0x84 && (buf[3] & 0xCC) == 0xC4;
}
-static void synaptics_pass_pt_packet(struct serio *ptport, u8 *packet)
+static void synaptics_pass_pt_packet(struct synaptics_data *priv, u8 *packet)
{
- struct psmouse *child = psmouse_from_serio(ptport);
+ struct serio *ptport;
- if (child && child->state == PSMOUSE_ACTIVATED) {
- serio_interrupt(ptport, packet[1], 0);
- serio_interrupt(ptport, packet[4], 0);
- serio_interrupt(ptport, packet[5], 0);
- if (child->pktsize == 4)
- serio_interrupt(ptport, packet[2], 0);
- } else {
- serio_interrupt(ptport, packet[1], 0);
+ ptport = priv->pt_port;
+ if (!ptport)
+ return;
+
+ serio_interrupt(ptport, packet[1], 0);
+
+ if (priv->pt_port_open) {
+ struct psmouse *child = psmouse_from_serio(ptport);
+
+ if (child->state == PSMOUSE_ACTIVATED) {
+ serio_interrupt(ptport, packet[4], 0);
+ serio_interrupt(ptport, packet[5], 0);
+ if (child->pktsize == 4)
+ serio_interrupt(ptport, packet[2], 0);
+ }
}
}
@@ -720,6 +747,8 @@ static void synaptics_pt_create(struct psmouse *psmouse)
serio->write = synaptics_pt_write;
serio->start = synaptics_pt_start;
serio->stop = synaptics_pt_stop;
+ serio->open = synaptics_pt_open;
+ serio->close = synaptics_pt_close;
serio->parent = psmouse->ps2dev.serio;
psmouse->pt_activate = synaptics_pt_activate;
@@ -1216,11 +1245,10 @@ static psmouse_ret_t synaptics_process_byte(struct psmouse *psmouse)
if (SYN_CAP_PASS_THROUGH(priv->info.capabilities) &&
synaptics_is_pt_packet(psmouse->packet)) {
- if (priv->pt_port)
- synaptics_pass_pt_packet(priv->pt_port,
- psmouse->packet);
- } else
+ synaptics_pass_pt_packet(priv, psmouse->packet);
+ } else {
synaptics_process_packet(psmouse);
+ }
return PSMOUSE_FULL_PACKET;
}
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 899aee598632..3853165b6b3a 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -188,6 +188,7 @@ struct synaptics_data {
bool disable_gesture; /* disable gestures */
struct serio *pt_port; /* Pass-through serio port */
+ bool pt_port_open;
/*
* Last received Advanced Gesture Mode (AGM) packet. An AGM packet
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 509330a27880..cab5a4c5baf5 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -179,8 +179,8 @@ static struct platform_device *i8042_platform_device;
static struct notifier_block i8042_kbd_bind_notifier_block;
static bool i8042_handle_data(int irq);
-static bool (*i8042_platform_filter)(unsigned char data, unsigned char str,
- struct serio *serio);
+static i8042_filter_t i8042_platform_filter;
+static void *i8042_platform_filter_context;
void i8042_lock_chip(void)
{
@@ -194,8 +194,7 @@ void i8042_unlock_chip(void)
}
EXPORT_SYMBOL(i8042_unlock_chip);
-int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
- struct serio *serio))
+int i8042_install_filter(i8042_filter_t filter, void *context)
{
guard(spinlock_irqsave)(&i8042_lock);
@@ -203,12 +202,12 @@ int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
return -EBUSY;
i8042_platform_filter = filter;
+ i8042_platform_filter_context = context;
return 0;
}
EXPORT_SYMBOL(i8042_install_filter);
-int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
- struct serio *port))
+int i8042_remove_filter(i8042_filter_t filter)
{
guard(spinlock_irqsave)(&i8042_lock);
@@ -216,6 +215,7 @@ int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
return -EINVAL;
i8042_platform_filter = NULL;
+ i8042_platform_filter_context = NULL;
return 0;
}
EXPORT_SYMBOL(i8042_remove_filter);
@@ -480,7 +480,10 @@ static bool i8042_filter(unsigned char data, unsigned char str,
}
}
- if (i8042_platform_filter && i8042_platform_filter(data, str, serio)) {
+ if (!i8042_platform_filter)
+ return false;
+
+ if (i8042_platform_filter(data, str, serio, i8042_platform_filter_context)) {
dbg("Filtered out by platform filter\n");
return true;
}
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index f4e950920e84..eb3cc2befcdf 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -23,6 +23,7 @@
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/bitops.h>
#include <linux/input/mt.h>
@@ -102,7 +103,7 @@ static irqreturn_t egalax_ts_interrupt(int irq, void *dev_id)
input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, down);
dev_dbg(&client->dev, "%s id:%d x:%d y:%d z:%d",
- down ? "down" : "up", id, x, y, z);
+ str_down_up(down), id, x, y, z);
if (down) {
input_report_abs(input_dev, ABS_MT_POSITION_X, x);
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 362fb9b0a198..1219f4f23d40 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -337,6 +337,15 @@ config INTERCONNECT_QCOM_SM8650
This is a driver for the Qualcomm Network-on-Chip on SM8650-based
platforms.
+config INTERCONNECT_QCOM_SM8750
+ tristate "Qualcomm SM8750 interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on SM8750-based
+ platforms.
+
config INTERCONNECT_QCOM_X1E80100
tristate "Qualcomm X1E80100 interconnect driver"
depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index 9997728c02bf..7887b1e8d69b 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -40,6 +40,7 @@ qnoc-sm8350-objs := sm8350.o
qnoc-sm8450-objs := sm8450.o
qnoc-sm8550-objs := sm8550.o
qnoc-sm8650-objs := sm8650.o
+qnoc-sm8750-objs := sm8750.o
qnoc-x1e80100-objs := x1e80100.o
icc-smd-rpm-objs := smd-rpm.o icc-rpm.o icc-rpm-clocks.o
@@ -80,5 +81,6 @@ obj-$(CONFIG_INTERCONNECT_QCOM_SM8350) += qnoc-sm8350.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8450) += qnoc-sm8450.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8550) += qnoc-sm8550.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8650) += qnoc-sm8650.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SM8750) += qnoc-sm8750.o
obj-$(CONFIG_INTERCONNECT_QCOM_X1E80100) += qnoc-x1e80100.o
obj-$(CONFIG_INTERCONNECT_QCOM_SMD_RPM) += icc-smd-rpm.o
diff --git a/drivers/interconnect/qcom/sm8750.c b/drivers/interconnect/qcom/sm8750.c
new file mode 100644
index 000000000000..69bc22222075
--- /dev/null
+++ b/drivers/interconnect/qcom/sm8750.c
@@ -0,0 +1,1705 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,sm8750-rpmh.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+
+#define SM8750_MASTER_GPU_TCU 0
+#define SM8750_MASTER_SYS_TCU 1
+#define SM8750_MASTER_APPSS_PROC 2
+#define SM8750_MASTER_LLCC 3
+#define SM8750_MASTER_QDSS_BAM 4
+#define SM8750_MASTER_QSPI_0 5
+#define SM8750_MASTER_QUP_1 6
+#define SM8750_MASTER_QUP_2 7
+#define SM8750_MASTER_A1NOC_SNOC 8
+#define SM8750_MASTER_A2NOC_SNOC 9
+#define SM8750_MASTER_CAMNOC_HF 10
+#define SM8750_MASTER_CAMNOC_NRT_ICP_SF 11
+#define SM8750_MASTER_CAMNOC_RT_CDM_SF 12
+#define SM8750_MASTER_CAMNOC_SF 13
+#define SM8750_MASTER_GEM_NOC_CNOC 14
+#define SM8750_MASTER_GEM_NOC_PCIE_SNOC 15
+#define SM8750_MASTER_GFX3D 16
+#define SM8750_MASTER_LPASS_GEM_NOC 17
+#define SM8750_MASTER_LPASS_LPINOC 18
+#define SM8750_MASTER_LPIAON_NOC 19
+#define SM8750_MASTER_LPASS_PROC 20
+#define SM8750_MASTER_MDP 21
+#define SM8750_MASTER_MSS_PROC 22
+#define SM8750_MASTER_MNOC_HF_MEM_NOC 23
+#define SM8750_MASTER_MNOC_SF_MEM_NOC 24
+#define SM8750_MASTER_CDSP_PROC 25
+#define SM8750_MASTER_COMPUTE_NOC 26
+#define SM8750_MASTER_ANOC_PCIE_GEM_NOC 27
+#define SM8750_MASTER_SNOC_SF_MEM_NOC 28
+#define SM8750_MASTER_UBWC_P 29
+#define SM8750_MASTER_CDSP_HCP 30
+#define SM8750_MASTER_VIDEO_CV_PROC 31
+#define SM8750_MASTER_VIDEO_EVA 32
+#define SM8750_MASTER_VIDEO_MVP 33
+#define SM8750_MASTER_VIDEO_V_PROC 34
+#define SM8750_MASTER_CNOC_CFG 35
+#define SM8750_MASTER_CNOC_MNOC_CFG 36
+#define SM8750_MASTER_PCIE_ANOC_CFG 37
+#define SM8750_MASTER_QUP_CORE_0 38
+#define SM8750_MASTER_QUP_CORE_1 39
+#define SM8750_MASTER_QUP_CORE_2 40
+#define SM8750_MASTER_CRYPTO 41
+#define SM8750_MASTER_IPA 42
+#define SM8750_MASTER_QUP_3 43
+#define SM8750_MASTER_SOCCP_AGGR_NOC 44
+#define SM8750_MASTER_SP 45
+#define SM8750_MASTER_GIC 46
+#define SM8750_MASTER_PCIE_0 47
+#define SM8750_MASTER_QDSS_ETR 48
+#define SM8750_MASTER_QDSS_ETR_1 49
+#define SM8750_MASTER_SDCC_2 50
+#define SM8750_MASTER_SDCC_4 51
+#define SM8750_MASTER_UFS_MEM 52
+#define SM8750_MASTER_USB3_0 53
+#define SM8750_SLAVE_UBWC_P 54
+#define SM8750_SLAVE_EBI1 55
+#define SM8750_SLAVE_AHB2PHY_SOUTH 56
+#define SM8750_SLAVE_AHB2PHY_NORTH 57
+#define SM8750_SLAVE_AOSS 58
+#define SM8750_SLAVE_CAMERA_CFG 59
+#define SM8750_SLAVE_CLK_CTL 60
+#define SM8750_SLAVE_CRYPTO_0_CFG 61
+#define SM8750_SLAVE_DISPLAY_CFG 62
+#define SM8750_SLAVE_EVA_CFG 63
+#define SM8750_SLAVE_GFX3D_CFG 64
+#define SM8750_SLAVE_I2C 65
+#define SM8750_SLAVE_I3C_IBI0_CFG 66
+#define SM8750_SLAVE_I3C_IBI1_CFG 67
+#define SM8750_SLAVE_IMEM_CFG 68
+#define SM8750_SLAVE_IPA_CFG 69
+#define SM8750_SLAVE_IPC_ROUTER_CFG 70
+#define SM8750_SLAVE_CNOC_MSS 71
+#define SM8750_SLAVE_PCIE_CFG 72
+#define SM8750_SLAVE_PRNG 73
+#define SM8750_SLAVE_QDSS_CFG 74
+#define SM8750_SLAVE_QSPI_0 75
+#define SM8750_SLAVE_QUP_3 76
+#define SM8750_SLAVE_QUP_1 77
+#define SM8750_SLAVE_QUP_2 78
+#define SM8750_SLAVE_SDCC_2 79
+#define SM8750_SLAVE_SDCC_4 80
+#define SM8750_SLAVE_SOCCP 81
+#define SM8750_SLAVE_SPSS_CFG 82
+#define SM8750_SLAVE_TCSR 83
+#define SM8750_SLAVE_TLMM 84
+#define SM8750_SLAVE_TME_CFG 85
+#define SM8750_SLAVE_UFS_MEM_CFG 86
+#define SM8750_SLAVE_USB3_0 87
+#define SM8750_SLAVE_VENUS_CFG 88
+#define SM8750_SLAVE_VSENSE_CTRL_CFG 89
+#define SM8750_SLAVE_A1NOC_SNOC 90
+#define SM8750_SLAVE_A2NOC_SNOC 91
+#define SM8750_SLAVE_APPSS 92
+#define SM8750_SLAVE_GEM_NOC_CNOC 93
+#define SM8750_SLAVE_SNOC_GEM_NOC_SF 94
+#define SM8750_SLAVE_LLCC 95
+#define SM8750_SLAVE_LPASS_GEM_NOC 96
+#define SM8750_SLAVE_LPIAON_NOC_LPASS_AG_NOC 97
+#define SM8750_SLAVE_LPICX_NOC_LPIAON_NOC 98
+#define SM8750_SLAVE_MNOC_HF_MEM_NOC 99
+#define SM8750_SLAVE_MNOC_SF_MEM_NOC 100
+#define SM8750_SLAVE_CDSP_MEM_NOC 101
+#define SM8750_SLAVE_MEM_NOC_PCIE_SNOC 102
+#define SM8750_SLAVE_ANOC_PCIE_GEM_NOC 103
+#define SM8750_SLAVE_CNOC_CFG 104
+#define SM8750_SLAVE_DDRSS_CFG 105
+#define SM8750_SLAVE_CNOC_MNOC_CFG 106
+#define SM8750_SLAVE_PCIE_ANOC_CFG 107
+#define SM8750_SLAVE_QUP_CORE_0 108
+#define SM8750_SLAVE_QUP_CORE_1 109
+#define SM8750_SLAVE_QUP_CORE_2 110
+#define SM8750_SLAVE_BOOT_IMEM 111
+#define SM8750_SLAVE_IMEM 112
+#define SM8750_SLAVE_BOOT_IMEM_2 113
+#define SM8750_SLAVE_SERVICE_CNOC 114
+#define SM8750_SLAVE_SERVICE_MNOC 115
+#define SM8750_SLAVE_SERVICE_PCIE_ANOC 116
+#define SM8750_SLAVE_PCIE_0 117
+#define SM8750_SLAVE_QDSS_STM 118
+#define SM8750_SLAVE_TCU 119
+
+static struct qcom_icc_node qhm_qspi = {
+ .name = "qhm_qspi",
+ .id = SM8750_MASTER_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup1 = {
+ .name = "qhm_qup1",
+ .id = SM8750_MASTER_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_qup02 = {
+ .name = "qxm_qup02",
+ .id = SM8750_MASTER_QUP_3,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc4 = {
+ .name = "xm_sdc4",
+ .id = SM8750_MASTER_SDCC_4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+ .name = "xm_ufs_mem",
+ .id = SM8750_MASTER_UFS_MEM,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+ .name = "xm_usb3_0",
+ .id = SM8750_MASTER_USB3_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+ .name = "qhm_qdss_bam",
+ .id = SM8750_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup2 = {
+ .name = "qhm_qup2",
+ .id = SM8750_MASTER_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .id = SM8750_MASTER_CRYPTO,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_ipa = {
+ .name = "qxm_ipa",
+ .id = SM8750_MASTER_IPA,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_soccp = {
+ .name = "qxm_soccp",
+ .id = SM8750_MASTER_SOCCP_AGGR_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_sp = {
+ .name = "qxm_sp",
+ .id = SM8750_MASTER_SP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr_0 = {
+ .name = "xm_qdss_etr_0",
+ .id = SM8750_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr_1 = {
+ .name = "xm_qdss_etr_1",
+ .id = SM8750_MASTER_QDSS_ETR_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+ .name = "xm_sdc2",
+ .id = SM8750_MASTER_SDCC_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qup0_core_master = {
+ .name = "qup0_core_master",
+ .id = SM8750_MASTER_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_QUP_CORE_0 },
+};
+
+static struct qcom_icc_node qup1_core_master = {
+ .name = "qup1_core_master",
+ .id = SM8750_MASTER_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_QUP_CORE_1 },
+};
+
+static struct qcom_icc_node qup2_core_master = {
+ .name = "qup2_core_master",
+ .id = SM8750_MASTER_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_QUP_CORE_2 },
+};
+
+static struct qcom_icc_node qsm_cfg = {
+ .name = "qsm_cfg",
+ .id = SM8750_MASTER_CNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 33,
+ .links = { SM8750_SLAVE_AHB2PHY_SOUTH, SM8750_SLAVE_AHB2PHY_NORTH,
+ SM8750_SLAVE_CAMERA_CFG, SM8750_SLAVE_CLK_CTL,
+ SM8750_SLAVE_CRYPTO_0_CFG, SM8750_SLAVE_DISPLAY_CFG,
+ SM8750_SLAVE_EVA_CFG, SM8750_SLAVE_GFX3D_CFG,
+ SM8750_SLAVE_I2C, SM8750_SLAVE_I3C_IBI0_CFG,
+ SM8750_SLAVE_I3C_IBI1_CFG, SM8750_SLAVE_IMEM_CFG,
+ SM8750_SLAVE_CNOC_MSS, SM8750_SLAVE_PCIE_CFG,
+ SM8750_SLAVE_PRNG, SM8750_SLAVE_QDSS_CFG,
+ SM8750_SLAVE_QSPI_0, SM8750_SLAVE_QUP_3,
+ SM8750_SLAVE_QUP_1, SM8750_SLAVE_QUP_2,
+ SM8750_SLAVE_SDCC_2, SM8750_SLAVE_SDCC_4,
+ SM8750_SLAVE_SPSS_CFG, SM8750_SLAVE_TCSR,
+ SM8750_SLAVE_TLMM, SM8750_SLAVE_UFS_MEM_CFG,
+ SM8750_SLAVE_USB3_0, SM8750_SLAVE_VENUS_CFG,
+ SM8750_SLAVE_VSENSE_CTRL_CFG, SM8750_SLAVE_CNOC_MNOC_CFG,
+ SM8750_SLAVE_PCIE_ANOC_CFG, SM8750_SLAVE_QDSS_STM,
+ SM8750_SLAVE_TCU },
+};
+
+static struct qcom_icc_node qnm_gemnoc_cnoc = {
+ .name = "qnm_gemnoc_cnoc",
+ .id = SM8750_MASTER_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 12,
+ .links = { SM8750_SLAVE_AOSS, SM8750_SLAVE_IPA_CFG,
+ SM8750_SLAVE_IPC_ROUTER_CFG, SM8750_SLAVE_SOCCP,
+ SM8750_SLAVE_TME_CFG, SM8750_SLAVE_APPSS,
+ SM8750_SLAVE_CNOC_CFG, SM8750_SLAVE_DDRSS_CFG,
+ SM8750_SLAVE_BOOT_IMEM, SM8750_SLAVE_IMEM,
+ SM8750_SLAVE_BOOT_IMEM_2, SM8750_SLAVE_SERVICE_CNOC },
+};
+
+static struct qcom_icc_node qnm_gemnoc_pcie = {
+ .name = "qnm_gemnoc_pcie",
+ .id = SM8750_MASTER_GEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_PCIE_0 },
+};
+
+static struct qcom_icc_node alm_gpu_tcu = {
+ .name = "alm_gpu_tcu",
+ .id = SM8750_MASTER_GPU_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node alm_sys_tcu = {
+ .name = "alm_sys_tcu",
+ .id = SM8750_MASTER_SYS_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node chm_apps = {
+ .name = "chm_apps",
+ .id = SM8750_MASTER_APPSS_PROC,
+ .channels = 4,
+ .buswidth = 32,
+ .num_links = 4,
+ .links = { SM8750_SLAVE_UBWC_P, SM8750_SLAVE_GEM_NOC_CNOC,
+ SM8750_SLAVE_LLCC, SM8750_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_gpu = {
+ .name = "qnm_gpu",
+ .id = SM8750_MASTER_GFX3D,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_lpass_gemnoc = {
+ .name = "qnm_lpass_gemnoc",
+ .id = SM8750_MASTER_LPASS_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC,
+ SM8750_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_mdsp = {
+ .name = "qnm_mdsp",
+ .id = SM8750_MASTER_MSS_PROC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC,
+ SM8750_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+ .name = "qnm_mnoc_hf",
+ .id = SM8750_MASTER_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+ .name = "qnm_mnoc_sf",
+ .id = SM8750_MASTER_MNOC_SF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_nsp_gemnoc = {
+ .name = "qnm_nsp_gemnoc",
+ .id = SM8750_MASTER_COMPUTE_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 3,
+ .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC,
+ SM8750_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_pcie = {
+ .name = "qnm_pcie",
+ .id = SM8750_MASTER_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .id = SM8750_MASTER_SNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC,
+ SM8750_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_ubwc_p = {
+ .name = "qnm_ubwc_p",
+ .id = SM8750_MASTER_UBWC_P,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node xm_gic = {
+ .name = "xm_gic",
+ .id = SM8750_MASTER_GIC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_lpiaon_noc = {
+ .name = "qnm_lpiaon_noc",
+ .id = SM8750_MASTER_LPIAON_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_LPASS_GEM_NOC },
+};
+
+static struct qcom_icc_node qnm_lpass_lpinoc = {
+ .name = "qnm_lpass_lpinoc",
+ .id = SM8750_MASTER_LPASS_LPINOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_LPIAON_NOC_LPASS_AG_NOC },
+};
+
+static struct qcom_icc_node qnm_lpinoc_dsp_qns4m = {
+ .name = "qnm_lpinoc_dsp_qns4m",
+ .id = SM8750_MASTER_LPASS_PROC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_LPICX_NOC_LPIAON_NOC },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .id = SM8750_MASTER_LLCC,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_EBI1 },
+};
+
+static struct qcom_icc_node qnm_camnoc_hf = {
+ .name = "qnm_camnoc_hf",
+ .id = SM8750_MASTER_CAMNOC_HF,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_nrt_icp_sf = {
+ .name = "qnm_camnoc_nrt_icp_sf",
+ .id = SM8750_MASTER_CAMNOC_NRT_ICP_SF,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_rt_cdm_sf = {
+ .name = "qnm_camnoc_rt_cdm_sf",
+ .id = SM8750_MASTER_CAMNOC_RT_CDM_SF,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_sf = {
+ .name = "qnm_camnoc_sf",
+ .id = SM8750_MASTER_CAMNOC_SF,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdp = {
+ .name = "qnm_mdp",
+ .id = SM8750_MASTER_MDP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_vapss_hcp = {
+ .name = "qnm_vapss_hcp",
+ .id = SM8750_MASTER_CDSP_HCP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_cv_cpu = {
+ .name = "qnm_video_cv_cpu",
+ .id = SM8750_MASTER_VIDEO_CV_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_eva = {
+ .name = "qnm_video_eva",
+ .id = SM8750_MASTER_VIDEO_EVA,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_mvp = {
+ .name = "qnm_video_mvp",
+ .id = SM8750_MASTER_VIDEO_MVP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_v_cpu = {
+ .name = "qnm_video_v_cpu",
+ .id = SM8750_MASTER_VIDEO_V_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qsm_mnoc_cfg = {
+ .name = "qsm_mnoc_cfg",
+ .id = SM8750_MASTER_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_SERVICE_MNOC },
+};
+
+static struct qcom_icc_node qnm_nsp = {
+ .name = "qnm_nsp",
+ .id = SM8750_MASTER_CDSP_PROC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_CDSP_MEM_NOC },
+};
+
+static struct qcom_icc_node qsm_pcie_anoc_cfg = {
+ .name = "qsm_pcie_anoc_cfg",
+ .id = SM8750_MASTER_PCIE_ANOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_SERVICE_PCIE_ANOC },
+};
+
+static struct qcom_icc_node xm_pcie3 = {
+ .name = "xm_pcie3",
+ .id = SM8750_MASTER_PCIE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+ .name = "qnm_aggre1_noc",
+ .id = SM8750_MASTER_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+ .name = "qnm_aggre2_noc",
+ .id = SM8750_MASTER_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+ .name = "qns_a1noc_snoc",
+ .id = SM8750_SLAVE_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_MASTER_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+ .name = "qns_a2noc_snoc",
+ .id = SM8750_SLAVE_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_MASTER_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qup0_core_slave = {
+ .name = "qup0_core_slave",
+ .id = SM8750_SLAVE_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qup1_core_slave = {
+ .name = "qup1_core_slave",
+ .id = SM8750_SLAVE_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qup2_core_slave = {
+ .name = "qup2_core_slave",
+ .id = SM8750_SLAVE_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ahb2phy0 = {
+ .name = "qhs_ahb2phy0",
+ .id = SM8750_SLAVE_AHB2PHY_SOUTH,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ahb2phy1 = {
+ .name = "qhs_ahb2phy1",
+ .id = SM8750_SLAVE_AHB2PHY_NORTH,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+ .name = "qhs_camera_cfg",
+ .id = SM8750_SLAVE_CAMERA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .id = SM8750_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .id = SM8750_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_display_cfg = {
+ .name = "qhs_display_cfg",
+ .id = SM8750_SLAVE_DISPLAY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_eva_cfg = {
+ .name = "qhs_eva_cfg",
+ .id = SM8750_SLAVE_EVA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+ .name = "qhs_gpuss_cfg",
+ .id = SM8750_SLAVE_GFX3D_CFG,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_i2c = {
+ .name = "qhs_i2c",
+ .id = SM8750_SLAVE_I2C,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_i3c_ibi0_cfg = {
+ .name = "qhs_i3c_ibi0_cfg",
+ .id = SM8750_SLAVE_I3C_IBI0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_i3c_ibi1_cfg = {
+ .name = "qhs_i3c_ibi1_cfg",
+ .id = SM8750_SLAVE_I3C_IBI1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .id = SM8750_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_mss_cfg = {
+ .name = "qhs_mss_cfg",
+ .id = SM8750_SLAVE_CNOC_MSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie_cfg = {
+ .name = "qhs_pcie_cfg",
+ .id = SM8750_SLAVE_PCIE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_prng = {
+ .name = "qhs_prng",
+ .id = SM8750_SLAVE_PRNG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .id = SM8750_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qspi = {
+ .name = "qhs_qspi",
+ .id = SM8750_SLAVE_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup02 = {
+ .name = "qhs_qup02",
+ .id = SM8750_SLAVE_QUP_3,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup1 = {
+ .name = "qhs_qup1",
+ .id = SM8750_SLAVE_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup2 = {
+ .name = "qhs_qup2",
+ .id = SM8750_SLAVE_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+ .name = "qhs_sdc2",
+ .id = SM8750_SLAVE_SDCC_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_sdc4 = {
+ .name = "qhs_sdc4",
+ .id = SM8750_SLAVE_SDCC_4,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_spss_cfg = {
+ .name = "qhs_spss_cfg",
+ .id = SM8750_SLAVE_SPSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .id = SM8750_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tlmm = {
+ .name = "qhs_tlmm",
+ .id = SM8750_SLAVE_TLMM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+ .name = "qhs_ufs_mem_cfg",
+ .id = SM8750_SLAVE_UFS_MEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb3_0 = {
+ .name = "qhs_usb3_0",
+ .id = SM8750_SLAVE_USB3_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+ .name = "qhs_venus_cfg",
+ .id = SM8750_SLAVE_VENUS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
+ .name = "qhs_vsense_ctrl_cfg",
+ .id = SM8750_SLAVE_VSENSE_CTRL_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qss_mnoc_cfg = {
+ .name = "qss_mnoc_cfg",
+ .id = SM8750_SLAVE_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_MASTER_CNOC_MNOC_CFG },
+};
+
+static struct qcom_icc_node qss_pcie_anoc_cfg = {
+ .name = "qss_pcie_anoc_cfg",
+ .id = SM8750_SLAVE_PCIE_ANOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_MASTER_PCIE_ANOC_CFG },
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .id = SM8750_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .id = SM8750_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .id = SM8750_SLAVE_AOSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ipa = {
+ .name = "qhs_ipa",
+ .id = SM8750_SLAVE_IPA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ipc_router = {
+ .name = "qhs_ipc_router",
+ .id = SM8750_SLAVE_IPC_ROUTER_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_soccp = {
+ .name = "qhs_soccp",
+ .id = SM8750_SLAVE_SOCCP,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tme_cfg = {
+ .name = "qhs_tme_cfg",
+ .id = SM8750_SLAVE_TME_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_apss = {
+ .name = "qns_apss",
+ .id = SM8750_SLAVE_APPSS,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qss_cfg = {
+ .name = "qss_cfg",
+ .id = SM8750_SLAVE_CNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8750_MASTER_CNOC_CFG },
+};
+
+static struct qcom_icc_node qss_ddrss_cfg = {
+ .name = "qss_ddrss_cfg",
+ .id = SM8750_SLAVE_DDRSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qxs_boot_imem = {
+ .name = "qxs_boot_imem",
+ .id = SM8750_SLAVE_BOOT_IMEM,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .id = SM8750_SLAVE_IMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qxs_modem_boot_imem = {
+ .name = "qxs_modem_boot_imem",
+ .id = SM8750_SLAVE_BOOT_IMEM_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node srvc_cnoc_main = {
+ .name = "srvc_cnoc_main",
+ .id = SM8750_SLAVE_SERVICE_CNOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie = {
+ .name = "xs_pcie",
+ .id = SM8750_SLAVE_PCIE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node chs_ubwc_p = {
+ .name = "chs_ubwc_p",
+ .id = SM8750_SLAVE_UBWC_P,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_gem_noc_cnoc = {
+ .name = "qns_gem_noc_cnoc",
+ .id = SM8750_SLAVE_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_MASTER_GEM_NOC_CNOC },
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .id = SM8750_SLAVE_LLCC,
+ .channels = 4,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_MASTER_LLCC },
+};
+
+static struct qcom_icc_node qns_pcie = {
+ .name = "qns_pcie",
+ .id = SM8750_SLAVE_MEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_MASTER_GEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qns_lpass_ag_noc_gemnoc = {
+ .name = "qns_lpass_ag_noc_gemnoc",
+ .id = SM8750_SLAVE_LPASS_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_MASTER_LPASS_GEM_NOC },
+};
+
+static struct qcom_icc_node qns_lpass_aggnoc = {
+ .name = "qns_lpass_aggnoc",
+ .id = SM8750_SLAVE_LPIAON_NOC_LPASS_AG_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_MASTER_LPIAON_NOC },
+};
+
+static struct qcom_icc_node qns_lpi_aon_noc = {
+ .name = "qns_lpi_aon_noc",
+ .id = SM8750_SLAVE_LPICX_NOC_LPIAON_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_MASTER_LPASS_LPINOC },
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .id = SM8750_SLAVE_EBI1,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+ .name = "qns_mem_noc_hf",
+ .id = SM8750_SLAVE_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8750_MASTER_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf = {
+ .name = "qns_mem_noc_sf",
+ .id = SM8750_SLAVE_MNOC_SF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8750_MASTER_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+ .name = "srvc_mnoc",
+ .id = SM8750_SLAVE_SERVICE_MNOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_nsp_gemnoc = {
+ .name = "qns_nsp_gemnoc",
+ .id = SM8750_SLAVE_CDSP_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8750_MASTER_COMPUTE_NOC },
+};
+
+static struct qcom_icc_node qns_pcie_mem_noc = {
+ .name = "qns_pcie_mem_noc",
+ .id = SM8750_SLAVE_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8750_MASTER_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node srvc_pcie_aggre_noc = {
+ .name = "srvc_pcie_aggre_noc",
+ .id = SM8750_SLAVE_SERVICE_PCIE_ANOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+ .name = "qns_gemnoc_sf",
+ .id = SM8750_SLAVE_SNOC_GEM_NOC_SF,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8750_MASTER_SNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .enable_mask = BIT(0),
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .enable_mask = BIT(0),
+ .keepalive = true,
+ .num_nodes = 44,
+ .nodes = { &qsm_cfg, &qhs_ahb2phy0,
+ &qhs_ahb2phy1, &qhs_camera_cfg,
+ &qhs_clk_ctl, &qhs_crypto0_cfg,
+ &qhs_eva_cfg, &qhs_gpuss_cfg,
+ &qhs_i3c_ibi0_cfg, &qhs_i3c_ibi1_cfg,
+ &qhs_imem_cfg, &qhs_mss_cfg,
+ &qhs_pcie_cfg, &qhs_prng,
+ &qhs_qdss_cfg, &qhs_qspi,
+ &qhs_sdc2, &qhs_sdc4,
+ &qhs_spss_cfg, &qhs_tcsr,
+ &qhs_tlmm, &qhs_ufs_mem_cfg,
+ &qhs_usb3_0, &qhs_venus_cfg,
+ &qhs_vsense_ctrl_cfg, &qss_mnoc_cfg,
+ &qss_pcie_anoc_cfg, &xs_qdss_stm,
+ &xs_sys_tcu_cfg, &qnm_gemnoc_cnoc,
+ &qnm_gemnoc_pcie, &qhs_aoss,
+ &qhs_ipa, &qhs_ipc_router,
+ &qhs_soccp, &qhs_tme_cfg,
+ &qns_apss, &qss_cfg,
+ &qss_ddrss_cfg, &qxs_boot_imem,
+ &qxs_imem, &qxs_modem_boot_imem,
+ &srvc_cnoc_main, &xs_pcie },
+};
+
+static struct qcom_icc_bcm bcm_cn1 = {
+ .name = "CN1",
+ .num_nodes = 5,
+ .nodes = { &qhs_display_cfg, &qhs_i2c,
+ &qhs_qup02, &qhs_qup1,
+ &qhs_qup2 },
+};
+
+static struct qcom_icc_bcm bcm_co0 = {
+ .name = "CO0",
+ .enable_mask = BIT(0),
+ .num_nodes = 2,
+ .nodes = { &qnm_nsp, &qns_nsp_gemnoc },
+};
+
+static struct qcom_icc_bcm bcm_lp0 = {
+ .name = "LP0",
+ .num_nodes = 2,
+ .nodes = { &qnm_lpass_lpinoc, &qns_lpass_aggnoc },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .enable_mask = BIT(0),
+ .num_nodes = 9,
+ .nodes = { &qnm_camnoc_hf, &qnm_camnoc_nrt_icp_sf,
+ &qnm_camnoc_rt_cdm_sf, &qnm_camnoc_sf,
+ &qnm_vapss_hcp, &qnm_video_cv_cpu,
+ &qnm_video_mvp, &qnm_video_v_cpu,
+ &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup0_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup1 = {
+ .name = "QUP1",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup1_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup2 = {
+ .name = "QUP2",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup2_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_sh1 = {
+ .name = "SH1",
+ .enable_mask = BIT(0),
+ .num_nodes = 14,
+ .nodes = { &alm_gpu_tcu, &alm_sys_tcu,
+ &chm_apps, &qnm_gpu,
+ &qnm_mdsp, &qnm_mnoc_hf,
+ &qnm_mnoc_sf, &qnm_nsp_gemnoc,
+ &qnm_pcie, &qnm_snoc_sf,
+ &xm_gic, &chs_ubwc_p,
+ &qns_gem_noc_cnoc, &qns_pcie },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre1_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre2_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .num_nodes = 1,
+ .nodes = { &qns_pcie_mem_noc },
+};
+
+static struct qcom_icc_bcm bcm_ubw0 = {
+ .name = "UBW0",
+ .num_nodes = 1,
+ .nodes = { &qnm_ubwc_p },
+};
+
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
+ [MASTER_QSPI_0] = &qhm_qspi,
+ [MASTER_QUP_1] = &qhm_qup1,
+ [MASTER_QUP_3] = &qxm_qup02,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [MASTER_USB3_0] = &xm_usb3_0,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+};
+
+static const struct qcom_icc_desc sm8750_aggre1_noc = {
+ .nodes = aggre1_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+};
+
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
+ &bcm_ce0,
+};
+
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_QUP_2] = &qhm_qup2,
+ [MASTER_CRYPTO] = &qxm_crypto,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_SOCCP_AGGR_NOC] = &qxm_soccp,
+ [MASTER_SP] = &qxm_sp,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr_0,
+ [MASTER_QDSS_ETR_1] = &xm_qdss_etr_1,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+};
+
+static const struct qcom_icc_desc sm8750_aggre2_noc = {
+ .nodes = aggre2_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+ .bcms = aggre2_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const clk_virt_bcms[] = {
+ &bcm_qup0,
+ &bcm_qup1,
+ &bcm_qup2,
+};
+
+static struct qcom_icc_node * const clk_virt_nodes[] = {
+ [MASTER_QUP_CORE_0] = &qup0_core_master,
+ [MASTER_QUP_CORE_1] = &qup1_core_master,
+ [MASTER_QUP_CORE_2] = &qup2_core_master,
+ [SLAVE_QUP_CORE_0] = &qup0_core_slave,
+ [SLAVE_QUP_CORE_1] = &qup1_core_slave,
+ [SLAVE_QUP_CORE_2] = &qup2_core_slave,
+};
+
+static const struct qcom_icc_desc sm8750_clk_virt = {
+ .nodes = clk_virt_nodes,
+ .num_nodes = ARRAY_SIZE(clk_virt_nodes),
+ .bcms = clk_virt_bcms,
+ .num_bcms = ARRAY_SIZE(clk_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
+ &bcm_cn0,
+ &bcm_cn1,
+};
+
+static struct qcom_icc_node * const config_noc_nodes[] = {
+ [MASTER_CNOC_CFG] = &qsm_cfg,
+ [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
+ [SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+ [SLAVE_EVA_CFG] = &qhs_eva_cfg,
+ [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_I2C] = &qhs_i2c,
+ [SLAVE_I3C_IBI0_CFG] = &qhs_i3c_ibi0_cfg,
+ [SLAVE_I3C_IBI1_CFG] = &qhs_i3c_ibi1_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_CNOC_MSS] = &qhs_mss_cfg,
+ [SLAVE_PCIE_CFG] = &qhs_pcie_cfg,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QSPI_0] = &qhs_qspi,
+ [SLAVE_QUP_3] = &qhs_qup02,
+ [SLAVE_QUP_1] = &qhs_qup1,
+ [SLAVE_QUP_2] = &qhs_qup2,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SDCC_4] = &qhs_sdc4,
+ [SLAVE_SPSS_CFG] = &qhs_spss_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM] = &qhs_tlmm,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB3_0] = &qhs_usb3_0,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+ [SLAVE_CNOC_MNOC_CFG] = &qss_mnoc_cfg,
+ [SLAVE_PCIE_ANOC_CFG] = &qss_pcie_anoc_cfg,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static const struct qcom_icc_desc sm8750_config_noc = {
+ .nodes = config_noc_nodes,
+ .num_nodes = ARRAY_SIZE(config_noc_nodes),
+ .bcms = config_noc_bcms,
+ .num_bcms = ARRAY_SIZE(config_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const cnoc_main_bcms[] = {
+ &bcm_cn0,
+};
+
+static struct qcom_icc_node * const cnoc_main_nodes[] = {
+ [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
+ [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
+ [SLAVE_SOCCP] = &qhs_soccp,
+ [SLAVE_TME_CFG] = &qhs_tme_cfg,
+ [SLAVE_APPSS] = &qns_apss,
+ [SLAVE_CNOC_CFG] = &qss_cfg,
+ [SLAVE_DDRSS_CFG] = &qss_ddrss_cfg,
+ [SLAVE_BOOT_IMEM] = &qxs_boot_imem,
+ [SLAVE_IMEM] = &qxs_imem,
+ [SLAVE_BOOT_IMEM_2] = &qxs_modem_boot_imem,
+ [SLAVE_SERVICE_CNOC] = &srvc_cnoc_main,
+ [SLAVE_PCIE_0] = &xs_pcie,
+};
+
+static const struct qcom_icc_desc sm8750_cnoc_main = {
+ .nodes = cnoc_main_nodes,
+ .num_nodes = ARRAY_SIZE(cnoc_main_nodes),
+ .bcms = cnoc_main_bcms,
+ .num_bcms = ARRAY_SIZE(cnoc_main_bcms),
+};
+
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh1,
+ &bcm_ubw0,
+};
+
+static struct qcom_icc_node * const gem_noc_nodes[] = {
+ [MASTER_GPU_TCU] = &alm_gpu_tcu,
+ [MASTER_SYS_TCU] = &alm_sys_tcu,
+ [MASTER_APPSS_PROC] = &chm_apps,
+ [MASTER_GFX3D] = &qnm_gpu,
+ [MASTER_LPASS_GEM_NOC] = &qnm_lpass_gemnoc,
+ [MASTER_MSS_PROC] = &qnm_mdsp,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_COMPUTE_NOC] = &qnm_nsp_gemnoc,
+ [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [MASTER_UBWC_P] = &qnm_ubwc_p,
+ [MASTER_GIC] = &xm_gic,
+ [SLAVE_UBWC_P] = &chs_ubwc_p,
+ [SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie,
+};
+
+static const struct qcom_icc_desc sm8750_gem_noc = {
+ .nodes = gem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+ .bcms = gem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
+ [MASTER_LPIAON_NOC] = &qnm_lpiaon_noc,
+ [SLAVE_LPASS_GEM_NOC] = &qns_lpass_ag_noc_gemnoc,
+};
+
+static const struct qcom_icc_desc sm8750_lpass_ag_noc = {
+ .nodes = lpass_ag_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
+};
+
+static struct qcom_icc_bcm * const lpass_lpiaon_noc_bcms[] = {
+ &bcm_lp0,
+};
+
+static struct qcom_icc_node * const lpass_lpiaon_noc_nodes[] = {
+ [MASTER_LPASS_LPINOC] = &qnm_lpass_lpinoc,
+ [SLAVE_LPIAON_NOC_LPASS_AG_NOC] = &qns_lpass_aggnoc,
+};
+
+static const struct qcom_icc_desc sm8750_lpass_lpiaon_noc = {
+ .nodes = lpass_lpiaon_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_lpiaon_noc_nodes),
+ .bcms = lpass_lpiaon_noc_bcms,
+ .num_bcms = ARRAY_SIZE(lpass_lpiaon_noc_bcms),
+};
+
+static struct qcom_icc_node * const lpass_lpicx_noc_nodes[] = {
+ [MASTER_LPASS_PROC] = &qnm_lpinoc_dsp_qns4m,
+ [SLAVE_LPICX_NOC_LPIAON_NOC] = &qns_lpi_aon_noc,
+};
+
+static const struct qcom_icc_desc sm8750_lpass_lpicx_noc = {
+ .nodes = lpass_lpicx_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_lpicx_noc_nodes),
+};
+
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+};
+
+static struct qcom_icc_node * const mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI1] = &ebi,
+};
+
+static const struct qcom_icc_desc sm8750_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+};
+
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
+ [MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
+ [MASTER_CAMNOC_NRT_ICP_SF] = &qnm_camnoc_nrt_icp_sf,
+ [MASTER_CAMNOC_RT_CDM_SF] = &qnm_camnoc_rt_cdm_sf,
+ [MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
+ [MASTER_MDP] = &qnm_mdp,
+ [MASTER_CDSP_HCP] = &qnm_vapss_hcp,
+ [MASTER_VIDEO_CV_PROC] = &qnm_video_cv_cpu,
+ [MASTER_VIDEO_EVA] = &qnm_video_eva,
+ [MASTER_VIDEO_MVP] = &qnm_video_mvp,
+ [MASTER_VIDEO_V_PROC] = &qnm_video_v_cpu,
+ [MASTER_CNOC_MNOC_CFG] = &qsm_mnoc_cfg,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+};
+
+static const struct qcom_icc_desc sm8750_mmss_noc = {
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const nsp_noc_bcms[] = {
+ &bcm_co0,
+};
+
+static struct qcom_icc_node * const nsp_noc_nodes[] = {
+ [MASTER_CDSP_PROC] = &qnm_nsp,
+ [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
+};
+
+static const struct qcom_icc_desc sm8750_nsp_noc = {
+ .nodes = nsp_noc_nodes,
+ .num_nodes = ARRAY_SIZE(nsp_noc_nodes),
+ .bcms = nsp_noc_bcms,
+ .num_bcms = ARRAY_SIZE(nsp_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const pcie_anoc_bcms[] = {
+ &bcm_sn4,
+};
+
+static struct qcom_icc_node * const pcie_anoc_nodes[] = {
+ [MASTER_PCIE_ANOC_CFG] = &qsm_pcie_anoc_cfg,
+ [MASTER_PCIE_0] = &xm_pcie3,
+ [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
+ [SLAVE_SERVICE_PCIE_ANOC] = &srvc_pcie_aggre_noc,
+};
+
+static const struct qcom_icc_desc sm8750_pcie_anoc = {
+ .nodes = pcie_anoc_nodes,
+ .num_nodes = ARRAY_SIZE(pcie_anoc_nodes),
+ .bcms = pcie_anoc_bcms,
+ .num_bcms = ARRAY_SIZE(pcie_anoc_bcms),
+};
+
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
+ &bcm_sn0,
+ &bcm_sn2,
+ &bcm_sn3,
+};
+
+static struct qcom_icc_node * const system_noc_nodes[] = {
+ [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+ [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+ [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+};
+
+static const struct qcom_icc_desc sm8750_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sm8750-aggre1-noc", .data = &sm8750_aggre1_noc},
+ { .compatible = "qcom,sm8750-aggre2-noc", .data = &sm8750_aggre2_noc},
+ { .compatible = "qcom,sm8750-clk-virt", .data = &sm8750_clk_virt},
+ { .compatible = "qcom,sm8750-config-noc", .data = &sm8750_config_noc},
+ { .compatible = "qcom,sm8750-cnoc-main", .data = &sm8750_cnoc_main},
+ { .compatible = "qcom,sm8750-gem-noc", .data = &sm8750_gem_noc},
+ { .compatible = "qcom,sm8750-lpass-ag-noc", .data = &sm8750_lpass_ag_noc},
+ { .compatible = "qcom,sm8750-lpass-lpiaon-noc", .data = &sm8750_lpass_lpiaon_noc},
+ { .compatible = "qcom,sm8750-lpass-lpicx-noc", .data = &sm8750_lpass_lpicx_noc},
+ { .compatible = "qcom,sm8750-mc-virt", .data = &sm8750_mc_virt},
+ { .compatible = "qcom,sm8750-mmss-noc", .data = &sm8750_mmss_noc},
+ { .compatible = "qcom,sm8750-nsp-noc", .data = &sm8750_nsp_noc},
+ { .compatible = "qcom,sm8750-pcie-anoc", .data = &sm8750_pcie_anoc},
+ { .compatible = "qcom,sm8750-system-noc", .data = &sm8750_system_noc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qcom_icc_rpmh_probe,
+ .remove = qcom_icc_rpmh_remove,
+ .driver = {
+ .name = "qnoc-sm8750",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+
+static int __init qnoc_driver_init(void)
+{
+ return platform_driver_register(&qnoc_driver);
+}
+core_initcall(qnoc_driver_init);
+
+static void __exit qnoc_driver_exit(void)
+{
+ platform_driver_unregister(&qnoc_driver);
+}
+module_exit(qnoc_driver_exit);
+
+MODULE_DESCRIPTION("SM8750 NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 47c46e4b739e..ec1b5e32b972 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -367,6 +367,18 @@ config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
'arm-smmu.disable_bypass' will continue to override this
config.
+config ARM_SMMU_MMU_500_CPRE_ERRATA
+ bool "Enable errata workaround for CPRE in SMMU reset path"
+ depends on ARM_SMMU
+ default y
+ help
+ Say Y here (by default) to apply workaround to disable
+ MMU-500's next-page prefetcher for sake of 4 known errata.
+
+ Say N here only when it is sure that any errata related to
+ prefetch enablement are not applicable on the platform.
+ Refer silicon-errata.rst for info on errata IDs.
+
config ARM_SMMU_QCOM
def_tristate y
depends on ARM_SMMU && ARCH_QCOM
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 1bef5d55b2f9..68debf5ee2d7 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -16,7 +16,6 @@ irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data);
irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data);
irqreturn_t amd_iommu_int_thread_galog(int irq, void *data);
irqreturn_t amd_iommu_int_handler(int irq, void *data);
-void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid);
void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type,
u8 cntrl_intr, u8 cntrl_log,
u32 status_run_mask, u32 status_overflow_mask);
@@ -41,13 +40,13 @@ void amd_iommu_disable(void);
int amd_iommu_reenable(int mode);
int amd_iommu_enable_faulting(unsigned int cpu);
extern int amd_iommu_guest_ir;
-extern enum io_pgtable_fmt amd_iommu_pgtable;
+extern enum protection_domain_mode amd_iommu_pgtable;
extern int amd_iommu_gpt_level;
extern unsigned long amd_iommu_pgsize_bitmap;
/* Protection domain ops */
void amd_iommu_init_identity_domain(void);
-struct protection_domain *protection_domain_alloc(unsigned int type, int nid);
+struct protection_domain *protection_domain_alloc(void);
void protection_domain_free(struct protection_domain *domain);
struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
struct mm_struct *mm);
@@ -89,7 +88,6 @@ int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag);
*/
void amd_iommu_flush_all_caches(struct amd_iommu *iommu);
void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
-void amd_iommu_domain_update(struct protection_domain *domain);
void amd_iommu_domain_flush_pages(struct protection_domain *domain,
u64 address, size_t size);
void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data,
@@ -184,3 +182,6 @@ void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
struct dev_table_entry *get_dev_table(struct amd_iommu *iommu);
#endif
+
+struct dev_table_entry *amd_iommu_get_ivhd_dte_flags(u16 segid, u16 devid);
+struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid);
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index fdb0357e0bb9..0bbda60d3cdc 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -220,6 +220,8 @@
#define DEV_ENTRY_EX 0x67
#define DEV_ENTRY_SYSMGT1 0x68
#define DEV_ENTRY_SYSMGT2 0x69
+#define DTE_DATA1_SYSMGT_MASK GENMASK_ULL(41, 40)
+
#define DEV_ENTRY_IRQ_TBL_EN 0x80
#define DEV_ENTRY_INIT_PASS 0xb8
#define DEV_ENTRY_EINT_PASS 0xb9
@@ -407,8 +409,7 @@
#define DTE_FLAG_HAD (3ULL << 7)
#define DTE_FLAG_GIOV BIT_ULL(54)
#define DTE_FLAG_GV BIT_ULL(55)
-#define DTE_GLX_SHIFT (56)
-#define DTE_GLX_MASK (3)
+#define DTE_GLX GENMASK_ULL(57, 56)
#define DTE_FLAG_IR BIT_ULL(61)
#define DTE_FLAG_IW BIT_ULL(62)
@@ -416,18 +417,18 @@
#define DTE_FLAG_MASK (0x3ffULL << 32)
#define DEV_DOMID_MASK 0xffffULL
-#define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL)
-#define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL)
-#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL)
-
-#define DTE_GCR3_SHIFT_A 58
-#define DTE_GCR3_SHIFT_B 16
-#define DTE_GCR3_SHIFT_C 43
+#define DTE_GCR3_14_12 GENMASK_ULL(60, 58)
+#define DTE_GCR3_30_15 GENMASK_ULL(31, 16)
+#define DTE_GCR3_51_31 GENMASK_ULL(63, 43)
#define DTE_GPT_LEVEL_SHIFT 54
+#define DTE_GPT_LEVEL_MASK GENMASK_ULL(55, 54)
#define GCR3_VALID 0x01ULL
+/* DTE[128:179] | DTE[184:191] */
+#define DTE_DATA2_INTR_MASK ~GENMASK_ULL(55, 52)
+
#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR)
#define IOMMU_PTE_DIRTY(pte) ((pte) & IOMMU_PTE_HD)
@@ -468,7 +469,7 @@ extern bool amd_iommu_dump;
#define DUMP_printk(format, arg...) \
do { \
if (amd_iommu_dump) \
- pr_info("AMD-Vi: " format, ## arg); \
+ pr_info(format, ## arg); \
} while(0);
/* global flag if IOMMUs cache non-present entries */
@@ -516,6 +517,9 @@ extern struct kmem_cache *amd_iommu_irq_cache;
#define for_each_pdom_dev_data_safe(pdom_dev_data, next, pdom) \
list_for_each_entry_safe((pdom_dev_data), (next), &pdom->dev_data_list, list)
+#define for_each_ivhd_dte_flags(entry) \
+ list_for_each_entry((entry), &amd_ivhd_dev_flags_list, list)
+
struct amd_iommu;
struct iommu_domain;
struct irq_domain;
@@ -837,6 +841,7 @@ struct devid_map {
struct iommu_dev_data {
/*Protect against attach/detach races */
struct mutex mutex;
+ spinlock_t dte_lock; /* DTE lock for 256-bit access */
struct list_head list; /* For domain->dev_list */
struct llist_node dev_data_list; /* For global dev_data_list */
@@ -881,7 +886,21 @@ extern struct list_head amd_iommu_list;
* Structure defining one entry in the device table
*/
struct dev_table_entry {
- u64 data[4];
+ union {
+ u64 data[4];
+ u128 data128[2];
+ };
+};
+
+/*
+ * Structure to sture persistent DTE flags from IVHD
+ */
+struct ivhd_dte_flags {
+ struct list_head list;
+ u16 segid;
+ u16 devid_first;
+ u16 devid_last;
+ struct dev_table_entry dte;
};
/*
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 614f216215ea..c5cd92edada0 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -152,7 +152,7 @@ struct ivmd_header {
bool amd_iommu_dump;
bool amd_iommu_irq_remap __read_mostly;
-enum io_pgtable_fmt amd_iommu_pgtable = AMD_IOMMU_V1;
+enum protection_domain_mode amd_iommu_pgtable = PD_MODE_V1;
/* Guest page table level */
int amd_iommu_gpt_level = PAGE_MODE_4_LEVEL;
@@ -174,8 +174,8 @@ bool amd_iommu_snp_en;
EXPORT_SYMBOL(amd_iommu_snp_en);
LIST_HEAD(amd_iommu_pci_seg_list); /* list of all PCI segments */
-LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
- system */
+LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the system */
+LIST_HEAD(amd_ivhd_dev_flags_list); /* list of all IVHD device entry settings */
/* Number of IOMMUs present in the system */
static int amd_iommus_present;
@@ -984,36 +984,12 @@ static void iommu_enable_gt(struct amd_iommu *iommu)
}
/* sets a specific bit in the device table entry. */
-static void __set_dev_entry_bit(struct dev_table_entry *dev_table,
- u16 devid, u8 bit)
+static void set_dte_bit(struct dev_table_entry *dte, u8 bit)
{
int i = (bit >> 6) & 0x03;
int _bit = bit & 0x3f;
- dev_table[devid].data[i] |= (1UL << _bit);
-}
-
-static void set_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
-{
- struct dev_table_entry *dev_table = get_dev_table(iommu);
-
- return __set_dev_entry_bit(dev_table, devid, bit);
-}
-
-static int __get_dev_entry_bit(struct dev_table_entry *dev_table,
- u16 devid, u8 bit)
-{
- int i = (bit >> 6) & 0x03;
- int _bit = bit & 0x3f;
-
- return (dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
-}
-
-static int get_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
-{
- struct dev_table_entry *dev_table = get_dev_table(iommu);
-
- return __get_dev_entry_bit(dev_table, devid, bit);
+ dte->data[i] |= (1UL << _bit);
}
static bool __copy_device_table(struct amd_iommu *iommu)
@@ -1081,11 +1057,9 @@ static bool __copy_device_table(struct amd_iommu *iommu)
}
/* If gcr3 table existed, mask it out */
if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
- tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
- tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
+ tmp = (DTE_GCR3_30_15 | DTE_GCR3_51_31);
pci_seg->old_dev_tbl_cpy[devid].data[1] &= ~tmp;
- tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
- tmp |= DTE_FLAG_GV;
+ tmp = (DTE_GCR3_14_12 | DTE_FLAG_GV);
pci_seg->old_dev_tbl_cpy[devid].data[0] &= ~tmp;
}
}
@@ -1136,42 +1110,107 @@ static bool copy_device_table(void)
return true;
}
-void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid)
+struct dev_table_entry *amd_iommu_get_ivhd_dte_flags(u16 segid, u16 devid)
{
- int sysmgt;
+ struct ivhd_dte_flags *e;
+ unsigned int best_len = UINT_MAX;
+ struct dev_table_entry *dte = NULL;
- sysmgt = get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1) |
- (get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2) << 1);
+ for_each_ivhd_dte_flags(e) {
+ /*
+ * Need to go through the whole list to find the smallest range,
+ * which contains the devid.
+ */
+ if ((e->segid == segid) &&
+ (e->devid_first <= devid) && (devid <= e->devid_last)) {
+ unsigned int len = e->devid_last - e->devid_first;
+
+ if (len < best_len) {
+ dte = &(e->dte);
+ best_len = len;
+ }
+ }
+ }
+ return dte;
+}
+
+static bool search_ivhd_dte_flags(u16 segid, u16 first, u16 last)
+{
+ struct ivhd_dte_flags *e;
- if (sysmgt == 0x01)
- set_dev_entry_bit(iommu, devid, DEV_ENTRY_IW);
+ for_each_ivhd_dte_flags(e) {
+ if ((e->segid == segid) &&
+ (e->devid_first == first) &&
+ (e->devid_last == last))
+ return true;
+ }
+ return false;
}
/*
* This function takes the device specific flags read from the ACPI
* table and sets up the device table entry with that information
*/
-static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
- u16 devid, u32 flags, u32 ext_flags)
+static void __init
+set_dev_entry_from_acpi_range(struct amd_iommu *iommu, u16 first, u16 last,
+ u32 flags, u32 ext_flags)
{
- if (flags & ACPI_DEVFLAG_INITPASS)
- set_dev_entry_bit(iommu, devid, DEV_ENTRY_INIT_PASS);
- if (flags & ACPI_DEVFLAG_EXTINT)
- set_dev_entry_bit(iommu, devid, DEV_ENTRY_EINT_PASS);
- if (flags & ACPI_DEVFLAG_NMI)
- set_dev_entry_bit(iommu, devid, DEV_ENTRY_NMI_PASS);
- if (flags & ACPI_DEVFLAG_SYSMGT1)
- set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1);
- if (flags & ACPI_DEVFLAG_SYSMGT2)
- set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2);
- if (flags & ACPI_DEVFLAG_LINT0)
- set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT0_PASS);
- if (flags & ACPI_DEVFLAG_LINT1)
- set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT1_PASS);
+ int i;
+ struct dev_table_entry dte = {};
- amd_iommu_apply_erratum_63(iommu, devid);
+ /* Parse IVHD DTE setting flags and store information */
+ if (flags) {
+ struct ivhd_dte_flags *d;
- amd_iommu_set_rlookup_table(iommu, devid);
+ if (search_ivhd_dte_flags(iommu->pci_seg->id, first, last))
+ return;
+
+ d = kzalloc(sizeof(struct ivhd_dte_flags), GFP_KERNEL);
+ if (!d)
+ return;
+
+ pr_debug("%s: devid range %#x:%#x\n", __func__, first, last);
+
+ if (flags & ACPI_DEVFLAG_INITPASS)
+ set_dte_bit(&dte, DEV_ENTRY_INIT_PASS);
+ if (flags & ACPI_DEVFLAG_EXTINT)
+ set_dte_bit(&dte, DEV_ENTRY_EINT_PASS);
+ if (flags & ACPI_DEVFLAG_NMI)
+ set_dte_bit(&dte, DEV_ENTRY_NMI_PASS);
+ if (flags & ACPI_DEVFLAG_SYSMGT1)
+ set_dte_bit(&dte, DEV_ENTRY_SYSMGT1);
+ if (flags & ACPI_DEVFLAG_SYSMGT2)
+ set_dte_bit(&dte, DEV_ENTRY_SYSMGT2);
+ if (flags & ACPI_DEVFLAG_LINT0)
+ set_dte_bit(&dte, DEV_ENTRY_LINT0_PASS);
+ if (flags & ACPI_DEVFLAG_LINT1)
+ set_dte_bit(&dte, DEV_ENTRY_LINT1_PASS);
+
+ /* Apply erratum 63, which needs info in initial_dte */
+ if (FIELD_GET(DTE_DATA1_SYSMGT_MASK, dte.data[1]) == 0x1)
+ dte.data[0] |= DTE_FLAG_IW;
+
+ memcpy(&d->dte, &dte, sizeof(dte));
+ d->segid = iommu->pci_seg->id;
+ d->devid_first = first;
+ d->devid_last = last;
+ list_add_tail(&d->list, &amd_ivhd_dev_flags_list);
+ }
+
+ for (i = first; i <= last; i++) {
+ if (flags) {
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+
+ memcpy(&dev_table[i], &dte, sizeof(dte));
+ }
+ amd_iommu_set_rlookup_table(iommu, i);
+ }
+}
+
+static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
+ u16 devid, u32 flags, u32 ext_flags)
+{
+ set_dev_entry_from_acpi_range(iommu, devid, devid, flags, ext_flags);
}
int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line)
@@ -1239,7 +1278,7 @@ static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u32 *devid,
entry->cmd_line = cmd_line;
entry->root_devid = (entry->devid & (~0x7));
- pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n",
+ pr_info("%s, add hid:%s, uid:%s, rdevid:%#x\n",
entry->cmd_line ? "cmd" : "ivrs",
entry->hid, entry->uid, entry->root_devid);
@@ -1331,15 +1370,12 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
switch (e->type) {
case IVHD_DEV_ALL:
- DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
-
- for (dev_i = 0; dev_i <= pci_seg->last_bdf; ++dev_i)
- set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
+ DUMP_printk(" DEV_ALL\t\t\tsetting: %#02x\n", e->flags);
+ set_dev_entry_from_acpi_range(iommu, 0, pci_seg->last_bdf, e->flags, 0);
break;
case IVHD_DEV_SELECT:
- DUMP_printk(" DEV_SELECT\t\t\t devid: %04x:%02x:%02x.%x "
- "flags: %02x\n",
+ DUMP_printk(" DEV_SELECT\t\t\tdevid: %04x:%02x:%02x.%x flags: %#02x\n",
seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
@@ -1350,8 +1386,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_SELECT_RANGE_START:
- DUMP_printk(" DEV_SELECT_RANGE_START\t "
- "devid: %04x:%02x:%02x.%x flags: %02x\n",
+ DUMP_printk(" DEV_SELECT_RANGE_START\tdevid: %04x:%02x:%02x.%x flags: %#02x\n",
seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
@@ -1364,8 +1399,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_ALIAS:
- DUMP_printk(" DEV_ALIAS\t\t\t devid: %04x:%02x:%02x.%x "
- "flags: %02x devid_to: %02x:%02x.%x\n",
+ DUMP_printk(" DEV_ALIAS\t\t\tdevid: %04x:%02x:%02x.%x flags: %#02x devid_to: %02x:%02x.%x\n",
seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
@@ -1382,9 +1416,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_ALIAS_RANGE:
- DUMP_printk(" DEV_ALIAS_RANGE\t\t "
- "devid: %04x:%02x:%02x.%x flags: %02x "
- "devid_to: %04x:%02x:%02x.%x\n",
+ DUMP_printk(" DEV_ALIAS_RANGE\t\tdevid: %04x:%02x:%02x.%x flags: %#02x devid_to: %04x:%02x:%02x.%x\n",
seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
@@ -1401,8 +1433,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_EXT_SELECT:
- DUMP_printk(" DEV_EXT_SELECT\t\t devid: %04x:%02x:%02x.%x "
- "flags: %02x ext: %08x\n",
+ DUMP_printk(" DEV_EXT_SELECT\t\tdevid: %04x:%02x:%02x.%x flags: %#02x ext: %08x\n",
seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
@@ -1414,8 +1445,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_EXT_SELECT_RANGE:
- DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
- "%04x:%02x:%02x.%x flags: %02x ext: %08x\n",
+ DUMP_printk(" DEV_EXT_SELECT_RANGE\tdevid: %04x:%02x:%02x.%x flags: %#02x ext: %08x\n",
seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
@@ -1428,21 +1458,18 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_RANGE_END:
- DUMP_printk(" DEV_RANGE_END\t\t devid: %04x:%02x:%02x.%x\n",
+ DUMP_printk(" DEV_RANGE_END\t\tdevid: %04x:%02x:%02x.%x\n",
seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid));
devid = e->devid;
for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
- if (alias) {
+ if (alias)
pci_seg->alias_table[dev_i] = devid_to;
- set_dev_entry_from_acpi(iommu,
- devid_to, flags, ext_flags);
- }
- set_dev_entry_from_acpi(iommu, dev_i,
- flags, ext_flags);
}
+ set_dev_entry_from_acpi_range(iommu, devid_start, devid, flags, ext_flags);
+ set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags);
break;
case IVHD_DEV_SPECIAL: {
u8 handle, type;
@@ -1461,11 +1488,12 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
else
var = "UNKNOWN";
- DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %04x:%02x:%02x.%x\n",
+ DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %04x:%02x:%02x.%x, flags: %#02x\n",
var, (int)handle,
seg_id, PCI_BUS_NUM(devid),
PCI_SLOT(devid),
- PCI_FUNC(devid));
+ PCI_FUNC(devid),
+ e->flags);
ret = add_special_device(type, handle, &devid, false);
if (ret)
@@ -1525,11 +1553,12 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
}
devid = PCI_SEG_DEVID_TO_SBDF(seg_id, e->devid);
- DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %04x:%02x:%02x.%x\n",
+ DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %04x:%02x:%02x.%x, flags: %#02x\n",
hid, uid, seg_id,
PCI_BUS_NUM(devid),
PCI_SLOT(devid),
- PCI_FUNC(devid));
+ PCI_FUNC(devid),
+ e->flags);
flags = e->flags;
@@ -1757,13 +1786,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
else
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
- /*
- * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
- * GAM also requires GA mode. Therefore, we need to
- * check cmpxchg16b support before enabling it.
- */
- if (!boot_cpu_has(X86_FEATURE_CX16) ||
- ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
+ /* GAM requires GA mode. */
+ if ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
break;
case 0x11:
@@ -1773,13 +1797,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
else
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
- /*
- * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
- * XT, GAM also requires GA mode. Therefore, we need to
- * check cmpxchg16b support before enabling them.
- */
- if (!boot_cpu_has(X86_FEATURE_CX16) ||
- ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) {
+ /* XT and GAM require GA mode. */
+ if ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0) {
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
break;
}
@@ -2145,7 +2164,7 @@ static void print_iommu_info(void)
if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
pr_info("X2APIC enabled\n");
}
- if (amd_iommu_pgtable == AMD_IOMMU_V2) {
+ if (amd_iommu_pgtable == PD_MODE_V2) {
pr_info("V2 page table enabled (Paging mode : %d level)\n",
amd_iommu_gpt_level);
}
@@ -2575,9 +2594,9 @@ static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
return;
for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
- __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_VALID);
+ set_dte_bit(&dev_table[devid], DEV_ENTRY_VALID);
if (!amd_iommu_snp_en)
- __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_TRANSLATION);
+ set_dte_bit(&dev_table[devid], DEV_ENTRY_TRANSLATION);
}
}
@@ -2605,8 +2624,7 @@ static void init_device_table(void)
for_each_pci_segment(pci_seg) {
for (devid = 0; devid <= pci_seg->last_bdf; ++devid)
- __set_dev_entry_bit(pci_seg->dev_table,
- devid, DEV_ENTRY_IRQ_TBL_EN);
+ set_dte_bit(&pci_seg->dev_table[devid], DEV_ENTRY_IRQ_TBL_EN);
}
}
@@ -3033,6 +3051,11 @@ static int __init early_amd_iommu_init(void)
return -EINVAL;
}
+ if (!boot_cpu_has(X86_FEATURE_CX16)) {
+ pr_err("Failed to initialize. The CMPXCHG16B feature is required.\n");
+ return -EINVAL;
+ }
+
/*
* Validate checksum here so we don't need to do it when
* we actually parse the table
@@ -3059,10 +3082,10 @@ static int __init early_amd_iommu_init(void)
FIELD_GET(FEATURE_GATS, amd_iommu_efr) == GUEST_PGTABLE_5_LEVEL)
amd_iommu_gpt_level = PAGE_MODE_5_LEVEL;
- if (amd_iommu_pgtable == AMD_IOMMU_V2) {
+ if (amd_iommu_pgtable == PD_MODE_V2) {
if (!amd_iommu_v2_pgtbl_supported()) {
pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n");
- amd_iommu_pgtable = AMD_IOMMU_V1;
+ amd_iommu_pgtable = PD_MODE_V1;
}
}
@@ -3185,7 +3208,7 @@ static void iommu_snp_enable(void)
goto disable_snp;
}
- if (amd_iommu_pgtable != AMD_IOMMU_V1) {
+ if (amd_iommu_pgtable != PD_MODE_V1) {
pr_warn("SNP: IOMMU is configured with V2 page table mode, SNP cannot be supported.\n");
goto disable_snp;
}
@@ -3398,25 +3421,23 @@ static bool amd_iommu_sme_check(void)
* IOMMUs
*
****************************************************************************/
-int __init amd_iommu_detect(void)
+void __init amd_iommu_detect(void)
{
int ret;
if (no_iommu || (iommu_detected && !gart_iommu_aperture))
- return -ENODEV;
+ return;
if (!amd_iommu_sme_check())
- return -ENODEV;
+ return;
ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
if (ret)
- return ret;
+ return;
amd_iommu_detected = true;
iommu_detected = 1;
x86_init.iommu.iommu_init = amd_iommu_init;
-
- return 1;
}
/****************************************************************************
@@ -3464,9 +3485,9 @@ static int __init parse_amd_iommu_options(char *str)
} else if (strncmp(str, "force_isolation", 15) == 0) {
amd_iommu_force_isolation = true;
} else if (strncmp(str, "pgtbl_v1", 8) == 0) {
- amd_iommu_pgtable = AMD_IOMMU_V1;
+ amd_iommu_pgtable = PD_MODE_V1;
} else if (strncmp(str, "pgtbl_v2", 8) == 0) {
- amd_iommu_pgtable = AMD_IOMMU_V2;
+ amd_iommu_pgtable = PD_MODE_V2;
} else if (strncmp(str, "irtcachedis", 11) == 0) {
amd_iommu_irtcachedis = true;
} else if (strncmp(str, "nohugepages", 11) == 0) {
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 5aaeda77eef2..b48a72bd7b23 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -83,12 +83,142 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
static void set_dte_entry(struct amd_iommu *iommu,
struct iommu_dev_data *dev_data);
+static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid);
+
+static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid);
+
/****************************************************************************
*
* Helper functions
*
****************************************************************************/
+static __always_inline void amd_iommu_atomic128_set(__int128 *ptr, __int128 val)
+{
+ /*
+ * Note:
+ * We use arch_cmpxchg128_local() because:
+ * - Need cmpxchg16b instruction mainly for 128-bit store to DTE
+ * (not necessary for cmpxchg since this function is already
+ * protected by a spin_lock for this DTE).
+ * - Neither need LOCK_PREFIX nor try loop because of the spin_lock.
+ */
+ arch_cmpxchg128_local(ptr, *ptr, val);
+}
+
+static void write_dte_upper128(struct dev_table_entry *ptr, struct dev_table_entry *new)
+{
+ struct dev_table_entry old;
+
+ old.data128[1] = ptr->data128[1];
+ /*
+ * Preserve DTE_DATA2_INTR_MASK. This needs to be
+ * done here since it requires to be inside
+ * spin_lock(&dev_data->dte_lock) context.
+ */
+ new->data[2] &= ~DTE_DATA2_INTR_MASK;
+ new->data[2] |= old.data[2] & DTE_DATA2_INTR_MASK;
+
+ amd_iommu_atomic128_set(&ptr->data128[1], new->data128[1]);
+}
+
+static void write_dte_lower128(struct dev_table_entry *ptr, struct dev_table_entry *new)
+{
+ amd_iommu_atomic128_set(&ptr->data128[0], new->data128[0]);
+}
+
+/*
+ * Note:
+ * IOMMU reads the entire Device Table entry in a single 256-bit transaction
+ * but the driver is programming DTE using 2 128-bit cmpxchg. So, the driver
+ * need to ensure the following:
+ * - DTE[V|GV] bit is being written last when setting.
+ * - DTE[V|GV] bit is being written first when clearing.
+ *
+ * This function is used only by code, which updates DMA translation part of the DTE.
+ * So, only consider control bits related to DMA when updating the entry.
+ */
+static void update_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data,
+ struct dev_table_entry *new)
+{
+ unsigned long flags;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+ struct dev_table_entry *ptr = &dev_table[dev_data->devid];
+
+ spin_lock_irqsave(&dev_data->dte_lock, flags);
+
+ if (!(ptr->data[0] & DTE_FLAG_V)) {
+ /* Existing DTE is not valid. */
+ write_dte_upper128(ptr, new);
+ write_dte_lower128(ptr, new);
+ iommu_flush_dte_sync(iommu, dev_data->devid);
+ } else if (!(new->data[0] & DTE_FLAG_V)) {
+ /* Existing DTE is valid. New DTE is not valid. */
+ write_dte_lower128(ptr, new);
+ write_dte_upper128(ptr, new);
+ iommu_flush_dte_sync(iommu, dev_data->devid);
+ } else if (!FIELD_GET(DTE_FLAG_GV, ptr->data[0])) {
+ /*
+ * Both DTEs are valid.
+ * Existing DTE has no guest page table.
+ */
+ write_dte_upper128(ptr, new);
+ write_dte_lower128(ptr, new);
+ iommu_flush_dte_sync(iommu, dev_data->devid);
+ } else if (!FIELD_GET(DTE_FLAG_GV, new->data[0])) {
+ /*
+ * Both DTEs are valid.
+ * Existing DTE has guest page table,
+ * new DTE has no guest page table,
+ */
+ write_dte_lower128(ptr, new);
+ write_dte_upper128(ptr, new);
+ iommu_flush_dte_sync(iommu, dev_data->devid);
+ } else if (FIELD_GET(DTE_GPT_LEVEL_MASK, ptr->data[2]) !=
+ FIELD_GET(DTE_GPT_LEVEL_MASK, new->data[2])) {
+ /*
+ * Both DTEs are valid and have guest page table,
+ * but have different number of levels. So, we need
+ * to upadte both upper and lower 128-bit value, which
+ * require disabling and flushing.
+ */
+ struct dev_table_entry clear = {};
+
+ /* First disable DTE */
+ write_dte_lower128(ptr, &clear);
+ iommu_flush_dte_sync(iommu, dev_data->devid);
+
+ /* Then update DTE */
+ write_dte_upper128(ptr, new);
+ write_dte_lower128(ptr, new);
+ iommu_flush_dte_sync(iommu, dev_data->devid);
+ } else {
+ /*
+ * Both DTEs are valid and have guest page table,
+ * and same number of levels. We just need to only
+ * update the lower 128-bit. So no need to disable DTE.
+ */
+ write_dte_lower128(ptr, new);
+ }
+
+ spin_unlock_irqrestore(&dev_data->dte_lock, flags);
+}
+
+static void get_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data,
+ struct dev_table_entry *dte)
+{
+ unsigned long flags;
+ struct dev_table_entry *ptr;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+
+ ptr = &dev_table[dev_data->devid];
+
+ spin_lock_irqsave(&dev_data->dte_lock, flags);
+ dte->data128[0] = ptr->data128[0];
+ dte->data128[1] = ptr->data128[1];
+ spin_unlock_irqrestore(&dev_data->dte_lock, flags);
+}
+
static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom)
{
return (pdom && (pdom->pd_mode == PD_MODE_V2));
@@ -209,6 +339,7 @@ static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
return NULL;
mutex_init(&dev_data->mutex);
+ spin_lock_init(&dev_data->dte_lock);
dev_data->devid = devid;
ratelimit_default_init(&dev_data->rs);
@@ -216,7 +347,7 @@ static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
return dev_data;
}
-static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid)
+struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid)
{
struct iommu_dev_data *dev_data;
struct llist_node *node;
@@ -236,9 +367,11 @@ static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid
static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
{
+ struct dev_table_entry new;
struct amd_iommu *iommu;
- struct dev_table_entry *dev_table;
+ struct iommu_dev_data *dev_data, *alias_data;
u16 devid = pci_dev_id(pdev);
+ int ret = 0;
if (devid == alias)
return 0;
@@ -247,13 +380,27 @@ static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
if (!iommu)
return 0;
- amd_iommu_set_rlookup_table(iommu, alias);
- dev_table = get_dev_table(iommu);
- memcpy(dev_table[alias].data,
- dev_table[devid].data,
- sizeof(dev_table[alias].data));
+ /* Copy the data from pdev */
+ dev_data = dev_iommu_priv_get(&pdev->dev);
+ if (!dev_data) {
+ pr_err("%s : Failed to get dev_data for 0x%x\n", __func__, devid);
+ ret = -EINVAL;
+ goto out;
+ }
+ get_dte256(iommu, dev_data, &new);
- return 0;
+ /* Setup alias */
+ alias_data = find_dev_data(iommu, alias);
+ if (!alias_data) {
+ pr_err("%s : Failed to get alias dev_data for 0x%x\n", __func__, alias);
+ ret = -EINVAL;
+ goto out;
+ }
+ update_dte256(iommu, alias_data, &new);
+
+ amd_iommu_set_rlookup_table(iommu, alias);
+out:
+ return ret;
}
static void clone_aliases(struct amd_iommu *iommu, struct device *dev)
@@ -526,6 +673,12 @@ static int iommu_init_device(struct amd_iommu *iommu, struct device *dev)
return -ENOMEM;
dev_data->dev = dev;
+
+ /*
+ * The dev_iommu_priv_set() needes to be called before setup_aliases.
+ * Otherwise, subsequent call to dev_iommu_priv_get() will fail.
+ */
+ dev_iommu_priv_set(dev, dev_data);
setup_aliases(iommu, dev);
/*
@@ -539,8 +692,6 @@ static int iommu_init_device(struct amd_iommu *iommu, struct device *dev)
dev_data->flags = pdev_get_caps(to_pci_dev(dev));
}
- dev_iommu_priv_set(dev, dev_data);
-
return 0;
}
@@ -571,10 +722,13 @@ static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev)
static void dump_dte_entry(struct amd_iommu *iommu, u16 devid)
{
int i;
- struct dev_table_entry *dev_table = get_dev_table(iommu);
+ struct dev_table_entry dte;
+ struct iommu_dev_data *dev_data = find_dev_data(iommu, devid);
+
+ get_dte256(iommu, dev_data, &dte);
for (i = 0; i < 4; ++i)
- pr_err("DTE[%d]: %016llx\n", i, dev_table[devid].data[i]);
+ pr_err("DTE[%d]: %016llx\n", i, dte.data[i]);
}
static void dump_command(unsigned long phys_addr)
@@ -1261,6 +1415,15 @@ static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
return iommu_queue_command(iommu, &cmd);
}
+static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid)
+{
+ int ret;
+
+ ret = iommu_flush_dte(iommu, devid);
+ if (!ret)
+ iommu_completion_wait(iommu);
+}
+
static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
{
u32 devid;
@@ -1603,15 +1766,6 @@ void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
domain_flush_complete(domain);
}
-void amd_iommu_domain_update(struct protection_domain *domain)
-{
- /* Update device table */
- amd_iommu_update_and_flush_device_table(domain);
-
- /* Flush domain TLB(s) and wait for completion */
- amd_iommu_domain_flush_all(domain);
-}
-
int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag)
{
struct iommu_dev_data *dev_data;
@@ -1826,90 +1980,109 @@ int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid)
return ret;
}
+static void make_clear_dte(struct iommu_dev_data *dev_data, struct dev_table_entry *ptr,
+ struct dev_table_entry *new)
+{
+ /* All existing DTE must have V bit set */
+ new->data128[0] = DTE_FLAG_V;
+ new->data128[1] = 0;
+}
+
+/*
+ * Note:
+ * The old value for GCR3 table and GPT have been cleared from caller.
+ */
+static void set_dte_gcr3_table(struct amd_iommu *iommu,
+ struct iommu_dev_data *dev_data,
+ struct dev_table_entry *target)
+{
+ struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
+ u64 gcr3;
+
+ if (!gcr3_info->gcr3_tbl)
+ return;
+
+ pr_debug("%s: devid=%#x, glx=%#x, gcr3_tbl=%#llx\n",
+ __func__, dev_data->devid, gcr3_info->glx,
+ (unsigned long long)gcr3_info->gcr3_tbl);
+
+ gcr3 = iommu_virt_to_phys(gcr3_info->gcr3_tbl);
+
+ target->data[0] |= DTE_FLAG_GV |
+ FIELD_PREP(DTE_GLX, gcr3_info->glx) |
+ FIELD_PREP(DTE_GCR3_14_12, gcr3 >> 12);
+ if (pdom_is_v2_pgtbl_mode(dev_data->domain))
+ target->data[0] |= DTE_FLAG_GIOV;
+
+ target->data[1] |= FIELD_PREP(DTE_GCR3_30_15, gcr3 >> 15) |
+ FIELD_PREP(DTE_GCR3_51_31, gcr3 >> 31);
+
+ /* Guest page table can only support 4 and 5 levels */
+ if (amd_iommu_gpt_level == PAGE_MODE_5_LEVEL)
+ target->data[2] |= FIELD_PREP(DTE_GPT_LEVEL_MASK, GUEST_PGTABLE_5_LEVEL);
+ else
+ target->data[2] |= FIELD_PREP(DTE_GPT_LEVEL_MASK, GUEST_PGTABLE_4_LEVEL);
+}
+
static void set_dte_entry(struct amd_iommu *iommu,
struct iommu_dev_data *dev_data)
{
- u64 pte_root = 0;
- u64 flags = 0;
- u32 old_domid;
- u16 devid = dev_data->devid;
u16 domid;
+ u32 old_domid;
+ struct dev_table_entry *initial_dte;
+ struct dev_table_entry new = {};
struct protection_domain *domain = dev_data->domain;
- struct dev_table_entry *dev_table = get_dev_table(iommu);
struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
+ struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid];
if (gcr3_info && gcr3_info->gcr3_tbl)
domid = dev_data->gcr3_info.domid;
else
domid = domain->id;
+ make_clear_dte(dev_data, dte, &new);
+
if (domain->iop.mode != PAGE_MODE_NONE)
- pte_root = iommu_virt_to_phys(domain->iop.root);
+ new.data[0] = iommu_virt_to_phys(domain->iop.root);
- pte_root |= (domain->iop.mode & DEV_ENTRY_MODE_MASK)
+ new.data[0] |= (domain->iop.mode & DEV_ENTRY_MODE_MASK)
<< DEV_ENTRY_MODE_SHIFT;
- pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V;
+ new.data[0] |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V;
/*
- * When SNP is enabled, Only set TV bit when IOMMU
- * page translation is in use.
+ * When SNP is enabled, we can only support TV=1 with non-zero domain ID.
+ * This is prevented by the SNP-enable and IOMMU_DOMAIN_IDENTITY check in
+ * do_iommu_domain_alloc().
*/
- if (!amd_iommu_snp_en || (domid != 0))
- pte_root |= DTE_FLAG_TV;
-
- flags = dev_table[devid].data[1];
-
- if (dev_data->ats_enabled)
- flags |= DTE_FLAG_IOTLB;
+ WARN_ON(amd_iommu_snp_en && (domid == 0));
+ new.data[0] |= DTE_FLAG_TV;
if (dev_data->ppr)
- pte_root |= 1ULL << DEV_ENTRY_PPR;
+ new.data[0] |= 1ULL << DEV_ENTRY_PPR;
if (domain->dirty_tracking)
- pte_root |= DTE_FLAG_HAD;
-
- if (gcr3_info && gcr3_info->gcr3_tbl) {
- u64 gcr3 = iommu_virt_to_phys(gcr3_info->gcr3_tbl);
- u64 glx = gcr3_info->glx;
- u64 tmp;
+ new.data[0] |= DTE_FLAG_HAD;
- pte_root |= DTE_FLAG_GV;
- pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
-
- /* First mask out possible old values for GCR3 table */
- tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
- flags &= ~tmp;
-
- tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
- flags &= ~tmp;
-
- /* Encode GCR3 table into DTE */
- tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
- pte_root |= tmp;
-
- tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
- flags |= tmp;
-
- tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
- flags |= tmp;
+ if (dev_data->ats_enabled)
+ new.data[1] |= DTE_FLAG_IOTLB;
- if (amd_iommu_gpt_level == PAGE_MODE_5_LEVEL) {
- dev_table[devid].data[2] |=
- ((u64)GUEST_PGTABLE_5_LEVEL << DTE_GPT_LEVEL_SHIFT);
- }
+ old_domid = READ_ONCE(dte->data[1]) & DEV_DOMID_MASK;
+ new.data[1] |= domid;
- /* GIOV is supported with V2 page table mode only */
- if (pdom_is_v2_pgtbl_mode(domain))
- pte_root |= DTE_FLAG_GIOV;
+ /*
+ * Restore cached persistent DTE bits, which can be set by information
+ * in IVRS table. See set_dev_entry_from_acpi().
+ */
+ initial_dte = amd_iommu_get_ivhd_dte_flags(iommu->pci_seg->id, dev_data->devid);
+ if (initial_dte) {
+ new.data128[0] |= initial_dte->data128[0];
+ new.data128[1] |= initial_dte->data128[1];
}
- flags &= ~DEV_DOMID_MASK;
- flags |= domid;
+ set_dte_gcr3_table(iommu, dev_data, &new);
- old_domid = dev_table[devid].data[1] & DEV_DOMID_MASK;
- dev_table[devid].data[1] = flags;
- dev_table[devid].data[0] = pte_root;
+ update_dte256(iommu, dev_data, &new);
/*
* A kdump kernel might be replacing a domain ID that was copied from
@@ -1921,19 +2094,16 @@ static void set_dte_entry(struct amd_iommu *iommu,
}
}
-static void clear_dte_entry(struct amd_iommu *iommu, u16 devid)
+/*
+ * Clear DMA-remap related flags to block all DMA (blockeded domain)
+ */
+static void clear_dte_entry(struct amd_iommu *iommu, struct iommu_dev_data *dev_data)
{
- struct dev_table_entry *dev_table = get_dev_table(iommu);
-
- /* remove entry from the device table seen by the hardware */
- dev_table[devid].data[0] = DTE_FLAG_V;
-
- if (!amd_iommu_snp_en)
- dev_table[devid].data[0] |= DTE_FLAG_TV;
+ struct dev_table_entry new = {};
+ struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid];
- dev_table[devid].data[1] &= DTE_FLAG_MASK;
-
- amd_iommu_apply_erratum_63(iommu, devid);
+ make_clear_dte(dev_data, dte, &new);
+ update_dte256(iommu, dev_data, &new);
}
/* Update and flush DTE for the given device */
@@ -1944,7 +2114,7 @@ static void dev_update_dte(struct iommu_dev_data *dev_data, bool set)
if (set)
set_dte_entry(iommu, dev_data);
else
- clear_dte_entry(iommu, dev_data->devid);
+ clear_dte_entry(iommu, dev_data);
clone_aliases(iommu, dev_data->dev);
device_flush_dte(dev_data);
@@ -2007,7 +2177,6 @@ static int pdom_attach_iommu(struct amd_iommu *iommu,
struct protection_domain *pdom)
{
struct pdom_iommu_info *pdom_iommu_info, *curr;
- struct io_pgtable_cfg *cfg = &pdom->iop.pgtbl.cfg;
unsigned long flags;
int ret = 0;
@@ -2036,10 +2205,6 @@ static int pdom_attach_iommu(struct amd_iommu *iommu,
goto out_unlock;
}
- /* Update NUMA Node ID */
- if (cfg->amd.nid == NUMA_NO_NODE)
- cfg->amd.nid = dev_to_node(&iommu->dev->dev);
-
out_unlock:
spin_unlock_irqrestore(&pdom->lock, flags);
return ret;
@@ -2276,16 +2441,15 @@ void protection_domain_free(struct protection_domain *domain)
kfree(domain);
}
-static void protection_domain_init(struct protection_domain *domain, int nid)
+static void protection_domain_init(struct protection_domain *domain)
{
spin_lock_init(&domain->lock);
INIT_LIST_HEAD(&domain->dev_list);
INIT_LIST_HEAD(&domain->dev_data_list);
xa_init(&domain->iommu_array);
- domain->iop.pgtbl.cfg.amd.nid = nid;
}
-struct protection_domain *protection_domain_alloc(unsigned int type, int nid)
+struct protection_domain *protection_domain_alloc(void)
{
struct protection_domain *domain;
int domid;
@@ -2301,42 +2465,37 @@ struct protection_domain *protection_domain_alloc(unsigned int type, int nid)
}
domain->id = domid;
- protection_domain_init(domain, nid);
+ protection_domain_init(domain);
return domain;
}
static int pdom_setup_pgtable(struct protection_domain *domain,
- unsigned int type, int pgtable)
+ struct device *dev)
{
struct io_pgtable_ops *pgtbl_ops;
+ enum io_pgtable_fmt fmt;
- /* No need to allocate io pgtable ops in passthrough mode */
- if (!(type & __IOMMU_DOMAIN_PAGING))
- return 0;
-
- switch (pgtable) {
- case AMD_IOMMU_V1:
- domain->pd_mode = PD_MODE_V1;
+ switch (domain->pd_mode) {
+ case PD_MODE_V1:
+ fmt = AMD_IOMMU_V1;
break;
- case AMD_IOMMU_V2:
- domain->pd_mode = PD_MODE_V2;
+ case PD_MODE_V2:
+ fmt = AMD_IOMMU_V2;
break;
- default:
- return -EINVAL;
}
- pgtbl_ops =
- alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl.cfg, domain);
+ domain->iop.pgtbl.cfg.amd.nid = dev_to_node(dev);
+ pgtbl_ops = alloc_io_pgtable_ops(fmt, &domain->iop.pgtbl.cfg, domain);
if (!pgtbl_ops)
return -ENOMEM;
return 0;
}
-static inline u64 dma_max_address(int pgtable)
+static inline u64 dma_max_address(enum protection_domain_mode pgtable)
{
- if (pgtable == AMD_IOMMU_V1)
+ if (pgtable == PD_MODE_V1)
return ~0ULL;
/* V2 with 4/5 level page table */
@@ -2348,31 +2507,21 @@ static bool amd_iommu_hd_support(struct amd_iommu *iommu)
return iommu && (iommu->features & FEATURE_HDSUP);
}
-static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
- struct device *dev,
- u32 flags, int pgtable)
+static struct iommu_domain *
+do_iommu_domain_alloc(struct device *dev, u32 flags,
+ enum protection_domain_mode pgtable)
{
bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
struct protection_domain *domain;
- struct amd_iommu *iommu = NULL;
int ret;
- if (dev)
- iommu = get_amd_iommu_from_dev(dev);
-
- /*
- * Since DTE[Mode]=0 is prohibited on SNP-enabled system,
- * default to use IOMMU_DOMAIN_DMA[_FQ].
- */
- if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY))
- return ERR_PTR(-EINVAL);
-
- domain = protection_domain_alloc(type,
- dev ? dev_to_node(dev) : NUMA_NO_NODE);
+ domain = protection_domain_alloc();
if (!domain)
return ERR_PTR(-ENOMEM);
- ret = pdom_setup_pgtable(domain, type, pgtable);
+ domain->pd_mode = pgtable;
+ ret = pdom_setup_pgtable(domain, dev);
if (ret) {
pdom_id_free(domain->id);
kfree(domain);
@@ -2384,72 +2533,45 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
domain->domain.geometry.force_aperture = true;
domain->domain.pgsize_bitmap = domain->iop.pgtbl.cfg.pgsize_bitmap;
- if (iommu) {
- domain->domain.type = type;
- domain->domain.ops = iommu->iommu.ops->default_domain_ops;
+ domain->domain.type = IOMMU_DOMAIN_UNMANAGED;
+ domain->domain.ops = iommu->iommu.ops->default_domain_ops;
- if (dirty_tracking)
- domain->domain.dirty_ops = &amd_dirty_ops;
- }
+ if (dirty_tracking)
+ domain->domain.dirty_ops = &amd_dirty_ops;
return &domain->domain;
}
-static struct iommu_domain *amd_iommu_domain_alloc(unsigned int type)
-{
- struct iommu_domain *domain;
- int pgtable = amd_iommu_pgtable;
-
- /*
- * Force IOMMU v1 page table when allocating
- * domain for pass-through devices.
- */
- if (type == IOMMU_DOMAIN_UNMANAGED)
- pgtable = AMD_IOMMU_V1;
-
- domain = do_iommu_domain_alloc(type, NULL, 0, pgtable);
- if (IS_ERR(domain))
- return NULL;
-
- return domain;
-}
-
static struct iommu_domain *
amd_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
const struct iommu_user_data *user_data)
{
- unsigned int type = IOMMU_DOMAIN_UNMANAGED;
- struct amd_iommu *iommu = NULL;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
const u32 supported_flags = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
IOMMU_HWPT_ALLOC_PASID;
- if (dev)
- iommu = get_amd_iommu_from_dev(dev);
-
if ((flags & ~supported_flags) || user_data)
return ERR_PTR(-EOPNOTSUPP);
- /* Allocate domain with v2 page table if IOMMU supports PASID. */
- if (flags & IOMMU_HWPT_ALLOC_PASID) {
+ switch (flags & supported_flags) {
+ case IOMMU_HWPT_ALLOC_DIRTY_TRACKING:
+ /* Allocate domain with v1 page table for dirty tracking */
+ if (!amd_iommu_hd_support(iommu))
+ break;
+ return do_iommu_domain_alloc(dev, flags, PD_MODE_V1);
+ case IOMMU_HWPT_ALLOC_PASID:
+ /* Allocate domain with v2 page table if IOMMU supports PASID. */
if (!amd_iommu_pasid_supported())
- return ERR_PTR(-EOPNOTSUPP);
-
- return do_iommu_domain_alloc(type, dev, flags, AMD_IOMMU_V2);
- }
-
- /* Allocate domain with v1 page table for dirty tracking */
- if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) {
- if (iommu && amd_iommu_hd_support(iommu)) {
- return do_iommu_domain_alloc(type, dev,
- flags, AMD_IOMMU_V1);
- }
-
- return ERR_PTR(-EOPNOTSUPP);
+ break;
+ return do_iommu_domain_alloc(dev, flags, PD_MODE_V2);
+ case 0:
+ /* If nothing specific is required use the kernel commandline default */
+ return do_iommu_domain_alloc(dev, 0, amd_iommu_pgtable);
+ default:
+ break;
}
-
- /* If nothing specific is required use the kernel commandline default */
- return do_iommu_domain_alloc(type, dev, 0, amd_iommu_pgtable);
+ return ERR_PTR(-EOPNOTSUPP);
}
void amd_iommu_domain_free(struct iommu_domain *dom)
@@ -2475,10 +2597,19 @@ static int blocked_domain_attach_device(struct iommu_domain *domain,
return 0;
}
+static int blocked_domain_set_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid,
+ struct iommu_domain *old)
+{
+ amd_iommu_remove_dev_pasid(dev, pasid, old);
+ return 0;
+}
+
static struct iommu_domain blocked_domain = {
.type = IOMMU_DOMAIN_BLOCKED,
.ops = &(const struct iommu_domain_ops) {
.attach_dev = blocked_domain_attach_device,
+ .set_dev_pasid = blocked_domain_set_dev_pasid,
}
};
@@ -2498,7 +2629,7 @@ void amd_iommu_init_identity_domain(void)
identity_domain.id = pdom_id_alloc();
- protection_domain_init(&identity_domain, NUMA_NO_NODE);
+ protection_domain_init(&identity_domain);
}
/* Same as blocked domain except it supports only ops->attach_dev() */
@@ -2666,12 +2797,12 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
bool enable)
{
struct protection_domain *pdomain = to_pdomain(domain);
- struct dev_table_entry *dev_table;
+ struct dev_table_entry *dte;
struct iommu_dev_data *dev_data;
bool domain_flush = false;
struct amd_iommu *iommu;
unsigned long flags;
- u64 pte_root;
+ u64 new;
spin_lock_irqsave(&pdomain->lock, flags);
if (!(pdomain->dirty_tracking ^ enable)) {
@@ -2680,16 +2811,15 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
}
list_for_each_entry(dev_data, &pdomain->dev_list, list) {
+ spin_lock(&dev_data->dte_lock);
iommu = get_amd_iommu_from_dev_data(dev_data);
-
- dev_table = get_dev_table(iommu);
- pte_root = dev_table[dev_data->devid].data[0];
-
- pte_root = (enable ? pte_root | DTE_FLAG_HAD :
- pte_root & ~DTE_FLAG_HAD);
+ dte = &get_dev_table(iommu)[dev_data->devid];
+ new = dte->data[0];
+ new = (enable ? new | DTE_FLAG_HAD : new & ~DTE_FLAG_HAD);
+ dte->data[0] = new;
+ spin_unlock(&dev_data->dte_lock);
/* Flush device DTE */
- dev_table[dev_data->devid].data[0] = pte_root;
device_flush_dte(dev_data);
domain_flush = true;
}
@@ -2890,7 +3020,6 @@ const struct iommu_ops amd_iommu_ops = {
.blocked_domain = &blocked_domain,
.release_domain = &release_domain,
.identity_domain = &identity_domain.domain,
- .domain_alloc = amd_iommu_domain_alloc,
.domain_alloc_paging_flags = amd_iommu_domain_alloc_paging_flags,
.domain_alloc_sva = amd_iommu_domain_alloc_sva,
.probe_device = amd_iommu_probe_device,
@@ -2901,7 +3030,6 @@ const struct iommu_ops amd_iommu_ops = {
.def_domain_type = amd_iommu_def_domain_type,
.dev_enable_feat = amd_iommu_dev_enable_feature,
.dev_disable_feat = amd_iommu_dev_disable_feature,
- .remove_dev_pasid = amd_iommu_remove_dev_pasid,
.page_response = amd_iommu_page_response,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = amd_iommu_attach_device,
@@ -2956,17 +3084,23 @@ out:
static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
struct irq_remap_table *table)
{
- u64 dte;
- struct dev_table_entry *dev_table = get_dev_table(iommu);
+ u64 new;
+ struct dev_table_entry *dte = &get_dev_table(iommu)[devid];
+ struct iommu_dev_data *dev_data = search_dev_data(iommu, devid);
+
+ if (dev_data)
+ spin_lock(&dev_data->dte_lock);
- dte = dev_table[devid].data[2];
- dte &= ~DTE_IRQ_PHYS_ADDR_MASK;
- dte |= iommu_virt_to_phys(table->table);
- dte |= DTE_IRQ_REMAP_INTCTL;
- dte |= DTE_INTTABLEN;
- dte |= DTE_IRQ_REMAP_ENABLE;
+ new = READ_ONCE(dte->data[2]);
+ new &= ~DTE_IRQ_PHYS_ADDR_MASK;
+ new |= iommu_virt_to_phys(table->table);
+ new |= DTE_IRQ_REMAP_INTCTL;
+ new |= DTE_INTTABLEN;
+ new |= DTE_IRQ_REMAP_ENABLE;
+ WRITE_ONCE(dte->data[2], new);
- dev_table[devid].data[2] = dte;
+ if (dev_data)
+ spin_unlock(&dev_data->dte_lock);
}
static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)
diff --git a/drivers/iommu/amd/pasid.c b/drivers/iommu/amd/pasid.c
index 8c73a30c2800..11150cfd6718 100644
--- a/drivers/iommu/amd/pasid.c
+++ b/drivers/iommu/amd/pasid.c
@@ -185,12 +185,13 @@ struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
struct protection_domain *pdom;
int ret;
- pdom = protection_domain_alloc(IOMMU_DOMAIN_SVA, dev_to_node(dev));
+ pdom = protection_domain_alloc();
if (!pdom)
return ERR_PTR(-ENOMEM);
pdom->domain.ops = &amd_sva_domain_ops;
pdom->mn.ops = &sva_mn;
+ pdom->domain.type = IOMMU_DOMAIN_SVA;
ret = mmu_notifier_register(&pdom->mn, mm);
if (ret) {
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
index c7cc613050d9..5aa2e7af58b4 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
@@ -178,18 +178,12 @@ arm_vsmmu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
const struct iommu_user_data *user_data)
{
struct arm_vsmmu *vsmmu = container_of(viommu, struct arm_vsmmu, core);
- const u32 SUPPORTED_FLAGS = IOMMU_HWPT_FAULT_ID_VALID;
struct arm_smmu_nested_domain *nested_domain;
struct iommu_hwpt_arm_smmuv3 arg;
bool enable_ats = false;
int ret;
- /*
- * Faults delivered to the nested domain are faults that originated by
- * the S1 in the domain. The core code will match all PASIDs when
- * delivering the fault due to user_pasid_table
- */
- if (flags & ~SUPPORTED_FLAGS)
+ if (flags)
return ERR_PTR(-EOPNOTSUPP);
ret = iommu_copy_struct_from_user(&arg, user_data,
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 1d3e71569775..9ba596430e7c 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -112,6 +112,15 @@ void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
* from the current CPU register
*/
target->data[3] = cpu_to_le64(read_sysreg(mair_el1));
+
+ /*
+ * Note that we don't bother with S1PIE on the SMMU, we just rely on
+ * our default encoding scheme matching direct permissions anyway.
+ * SMMU has no notion of S1POE nor GCS, so make sure that is clear if
+ * either is enabled for CPUs, just in case anyone imagines otherwise.
+ */
+ if (system_supports_poe() || system_supports_gcs())
+ dev_warn_once(master->smmu->dev, "SVA devices ignore permission overlays and GCS\n");
}
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_sva_cd);
@@ -206,8 +215,12 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
unsigned long asid_bits;
u32 feat_mask = ARM_SMMU_FEAT_COHERENCY;
- if (vabits_actual == 52)
+ if (vabits_actual == 52) {
+ /* We don't support LPA2 */
+ if (PAGE_SIZE != SZ_64K)
+ return false;
feat_mask |= ARM_SMMU_FEAT_VAX;
+ }
if ((smmu->features & feat_mask) != feat_mask)
return false;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index a5c7002ff75b..358072b4e293 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -26,6 +26,7 @@
#include <linux/pci.h>
#include <linux/pci-ats.h>
#include <linux/platform_device.h>
+#include <linux/string_choices.h>
#include <kunit/visibility.h>
#include <uapi/linux/iommufd.h>
@@ -83,8 +84,28 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
{ 0, NULL},
};
-static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
- struct arm_smmu_device *smmu, u32 flags);
+static const char * const event_str[] = {
+ [EVT_ID_BAD_STREAMID_CONFIG] = "C_BAD_STREAMID",
+ [EVT_ID_STE_FETCH_FAULT] = "F_STE_FETCH",
+ [EVT_ID_BAD_STE_CONFIG] = "C_BAD_STE",
+ [EVT_ID_STREAM_DISABLED_FAULT] = "F_STREAM_DISABLED",
+ [EVT_ID_BAD_SUBSTREAMID_CONFIG] = "C_BAD_SUBSTREAMID",
+ [EVT_ID_CD_FETCH_FAULT] = "F_CD_FETCH",
+ [EVT_ID_BAD_CD_CONFIG] = "C_BAD_CD",
+ [EVT_ID_TRANSLATION_FAULT] = "F_TRANSLATION",
+ [EVT_ID_ADDR_SIZE_FAULT] = "F_ADDR_SIZE",
+ [EVT_ID_ACCESS_FAULT] = "F_ACCESS",
+ [EVT_ID_PERMISSION_FAULT] = "F_PERMISSION",
+ [EVT_ID_VMS_FETCH_FAULT] = "F_VMS_FETCH",
+};
+
+static const char * const event_class_str[] = {
+ [0] = "CD fetch",
+ [1] = "Stage 1 translation table fetch",
+ [2] = "Input address caused fault",
+ [3] = "Reserved",
+};
+
static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master);
static void parse_driver_options(struct arm_smmu_device *smmu)
@@ -1759,17 +1780,49 @@ arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid)
}
/* IRQ and event handlers */
-static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
+static void arm_smmu_decode_event(struct arm_smmu_device *smmu, u64 *raw,
+ struct arm_smmu_event *event)
+{
+ struct arm_smmu_master *master;
+
+ event->id = FIELD_GET(EVTQ_0_ID, raw[0]);
+ event->sid = FIELD_GET(EVTQ_0_SID, raw[0]);
+ event->ssv = FIELD_GET(EVTQ_0_SSV, raw[0]);
+ event->ssid = event->ssv ? FIELD_GET(EVTQ_0_SSID, raw[0]) : IOMMU_NO_PASID;
+ event->privileged = FIELD_GET(EVTQ_1_PnU, raw[1]);
+ event->instruction = FIELD_GET(EVTQ_1_InD, raw[1]);
+ event->s2 = FIELD_GET(EVTQ_1_S2, raw[1]);
+ event->read = FIELD_GET(EVTQ_1_RnW, raw[1]);
+ event->stag = FIELD_GET(EVTQ_1_STAG, raw[1]);
+ event->stall = FIELD_GET(EVTQ_1_STALL, raw[1]);
+ event->class = FIELD_GET(EVTQ_1_CLASS, raw[1]);
+ event->iova = FIELD_GET(EVTQ_2_ADDR, raw[2]);
+ event->ipa = raw[3] & EVTQ_3_IPA;
+ event->fetch_addr = raw[3] & EVTQ_3_FETCH_ADDR;
+ event->ttrnw = FIELD_GET(EVTQ_1_TT_READ, raw[1]);
+ event->class_tt = false;
+ event->dev = NULL;
+
+ if (event->id == EVT_ID_PERMISSION_FAULT)
+ event->class_tt = (event->class == EVTQ_1_CLASS_TT);
+
+ mutex_lock(&smmu->streams_mutex);
+ master = arm_smmu_find_master(smmu, event->sid);
+ if (master)
+ event->dev = get_device(master->dev);
+ mutex_unlock(&smmu->streams_mutex);
+}
+
+static int arm_smmu_handle_event(struct arm_smmu_device *smmu,
+ struct arm_smmu_event *event)
{
int ret = 0;
u32 perm = 0;
struct arm_smmu_master *master;
- bool ssid_valid = evt[0] & EVTQ_0_SSV;
- u32 sid = FIELD_GET(EVTQ_0_SID, evt[0]);
struct iopf_fault fault_evt = { };
struct iommu_fault *flt = &fault_evt.fault;
- switch (FIELD_GET(EVTQ_0_ID, evt[0])) {
+ switch (event->id) {
case EVT_ID_TRANSLATION_FAULT:
case EVT_ID_ADDR_SIZE_FAULT:
case EVT_ID_ACCESS_FAULT:
@@ -1779,35 +1832,35 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
return -EOPNOTSUPP;
}
- if (!(evt[1] & EVTQ_1_STALL))
+ if (!event->stall)
return -EOPNOTSUPP;
- if (evt[1] & EVTQ_1_RnW)
+ if (event->read)
perm |= IOMMU_FAULT_PERM_READ;
else
perm |= IOMMU_FAULT_PERM_WRITE;
- if (evt[1] & EVTQ_1_InD)
+ if (event->instruction)
perm |= IOMMU_FAULT_PERM_EXEC;
- if (evt[1] & EVTQ_1_PnU)
+ if (event->privileged)
perm |= IOMMU_FAULT_PERM_PRIV;
flt->type = IOMMU_FAULT_PAGE_REQ;
flt->prm = (struct iommu_fault_page_request) {
.flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE,
- .grpid = FIELD_GET(EVTQ_1_STAG, evt[1]),
+ .grpid = event->stag,
.perm = perm,
- .addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
+ .addr = event->iova,
};
- if (ssid_valid) {
+ if (event->ssv) {
flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
- flt->prm.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
+ flt->prm.pasid = event->ssid;
}
mutex_lock(&smmu->streams_mutex);
- master = arm_smmu_find_master(smmu, sid);
+ master = arm_smmu_find_master(smmu, event->sid);
if (!master) {
ret = -EINVAL;
goto out_unlock;
@@ -1819,29 +1872,82 @@ out_unlock:
return ret;
}
+static void arm_smmu_dump_raw_event(struct arm_smmu_device *smmu, u64 *raw,
+ struct arm_smmu_event *event)
+{
+ int i;
+
+ dev_err(smmu->dev, "event 0x%02x received:\n", event->id);
+
+ for (i = 0; i < EVTQ_ENT_DWORDS; ++i)
+ dev_err(smmu->dev, "\t0x%016llx\n", raw[i]);
+}
+
+#define ARM_SMMU_EVT_KNOWN(e) ((e)->id < ARRAY_SIZE(event_str) && event_str[(e)->id])
+#define ARM_SMMU_LOG_EVT_STR(e) ARM_SMMU_EVT_KNOWN(e) ? event_str[(e)->id] : "UNKNOWN"
+#define ARM_SMMU_LOG_CLIENT(e) (e)->dev ? dev_name((e)->dev) : "(unassigned sid)"
+
+static void arm_smmu_dump_event(struct arm_smmu_device *smmu, u64 *raw,
+ struct arm_smmu_event *evt,
+ struct ratelimit_state *rs)
+{
+ if (!__ratelimit(rs))
+ return;
+
+ arm_smmu_dump_raw_event(smmu, raw, evt);
+
+ switch (evt->id) {
+ case EVT_ID_TRANSLATION_FAULT:
+ case EVT_ID_ADDR_SIZE_FAULT:
+ case EVT_ID_ACCESS_FAULT:
+ case EVT_ID_PERMISSION_FAULT:
+ dev_err(smmu->dev, "event: %s client: %s sid: %#x ssid: %#x iova: %#llx ipa: %#llx",
+ ARM_SMMU_LOG_EVT_STR(evt), ARM_SMMU_LOG_CLIENT(evt),
+ evt->sid, evt->ssid, evt->iova, evt->ipa);
+
+ dev_err(smmu->dev, "%s %s %s %s \"%s\"%s%s stag: %#x",
+ evt->privileged ? "priv" : "unpriv",
+ evt->instruction ? "inst" : "data",
+ str_read_write(evt->read),
+ evt->s2 ? "s2" : "s1", event_class_str[evt->class],
+ evt->class_tt ? (evt->ttrnw ? " ttd_read" : " ttd_write") : "",
+ evt->stall ? " stall" : "", evt->stag);
+
+ break;
+
+ case EVT_ID_STE_FETCH_FAULT:
+ case EVT_ID_CD_FETCH_FAULT:
+ case EVT_ID_VMS_FETCH_FAULT:
+ dev_err(smmu->dev, "event: %s client: %s sid: %#x ssid: %#x fetch_addr: %#llx",
+ ARM_SMMU_LOG_EVT_STR(evt), ARM_SMMU_LOG_CLIENT(evt),
+ evt->sid, evt->ssid, evt->fetch_addr);
+
+ break;
+
+ default:
+ dev_err(smmu->dev, "event: %s client: %s sid: %#x ssid: %#x",
+ ARM_SMMU_LOG_EVT_STR(evt), ARM_SMMU_LOG_CLIENT(evt),
+ evt->sid, evt->ssid);
+ }
+}
+
static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
{
- int i, ret;
+ u64 evt[EVTQ_ENT_DWORDS];
+ struct arm_smmu_event event = {0};
struct arm_smmu_device *smmu = dev;
struct arm_smmu_queue *q = &smmu->evtq.q;
struct arm_smmu_ll_queue *llq = &q->llq;
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
- u64 evt[EVTQ_ENT_DWORDS];
do {
while (!queue_remove_raw(q, evt)) {
- u8 id = FIELD_GET(EVTQ_0_ID, evt[0]);
-
- ret = arm_smmu_handle_evt(smmu, evt);
- if (!ret || !__ratelimit(&rs))
- continue;
-
- dev_info(smmu->dev, "event 0x%02x received:\n", id);
- for (i = 0; i < ARRAY_SIZE(evt); ++i)
- dev_info(smmu->dev, "\t0x%016llx\n",
- (unsigned long long)evt[i]);
+ arm_smmu_decode_event(smmu, evt, &event);
+ if (arm_smmu_handle_event(smmu, &event))
+ arm_smmu_dump_event(smmu, evt, &event, &rs);
+ put_device(event.dev);
cond_resched();
}
@@ -2353,39 +2459,12 @@ struct arm_smmu_domain *arm_smmu_domain_alloc(void)
if (!smmu_domain)
return ERR_PTR(-ENOMEM);
- mutex_init(&smmu_domain->init_mutex);
INIT_LIST_HEAD(&smmu_domain->devices);
spin_lock_init(&smmu_domain->devices_lock);
return smmu_domain;
}
-static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
-{
- struct arm_smmu_domain *smmu_domain;
-
- /*
- * Allocate the domain and initialise some of its data structures.
- * We can't really do anything meaningful until we've added a
- * master.
- */
- smmu_domain = arm_smmu_domain_alloc();
- if (IS_ERR(smmu_domain))
- return ERR_CAST(smmu_domain);
-
- if (dev) {
- struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- int ret;
-
- ret = arm_smmu_domain_finalise(smmu_domain, master->smmu, 0);
- if (ret) {
- kfree(smmu_domain);
- return ERR_PTR(ret);
- }
- }
- return &smmu_domain->domain;
-}
-
static void arm_smmu_domain_free_paging(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -2451,12 +2530,6 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_domain *smmu_domain);
bool enable_dirty = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
- /* Restrict the stage to what we can actually support */
- if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
- smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
- if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
- smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
-
pgtbl_cfg = (struct io_pgtable_cfg) {
.pgsize_bitmap = smmu->pgsize_bitmap,
.coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY,
@@ -2745,9 +2818,14 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
* Translation Requests and Translated transactions are denied
* as though ATS is disabled for the stream (STE.EATS == 0b00),
* causing F_BAD_ATS_TREQ and F_TRANSL_FORBIDDEN events
- * (IHI0070Ea 5.2 Stream Table Entry). Thus ATS can only be
- * enabled if we have arm_smmu_domain, those always have page
- * tables.
+ * (IHI0070Ea 5.2 Stream Table Entry).
+ *
+ * However, if we have installed a CD table and are using S1DSS
+ * then ATS will work in S1DSS bypass. See "13.6.4 Full ATS
+ * skipping stage 1".
+ *
+ * Disable ATS if we are going to create a normal 0b100 bypass
+ * STE.
*/
state->ats_enabled = !state->disable_ats &&
arm_smmu_ats_supported(master);
@@ -2853,15 +2931,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
state.master = master = dev_iommu_priv_get(dev);
smmu = master->smmu;
- mutex_lock(&smmu_domain->init_mutex);
-
- if (!smmu_domain->smmu) {
- ret = arm_smmu_domain_finalise(smmu_domain, smmu, 0);
- } else if (smmu_domain->smmu != smmu)
- ret = -EINVAL;
-
- mutex_unlock(&smmu_domain->init_mutex);
- if (ret)
+ if (smmu_domain->smmu != smmu)
return ret;
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
@@ -2918,16 +2988,9 @@ static int arm_smmu_s1_set_dev_pasid(struct iommu_domain *domain,
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
struct arm_smmu_device *smmu = master->smmu;
struct arm_smmu_cd target_cd;
- int ret = 0;
- mutex_lock(&smmu_domain->init_mutex);
- if (!smmu_domain->smmu)
- ret = arm_smmu_domain_finalise(smmu_domain, smmu, 0);
- else if (smmu_domain->smmu != smmu)
- ret = -EINVAL;
- mutex_unlock(&smmu_domain->init_mutex);
- if (ret)
- return ret;
+ if (smmu_domain->smmu != smmu)
+ return -EINVAL;
if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1)
return -EINVAL;
@@ -3016,13 +3079,12 @@ out_unlock:
return ret;
}
-static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
- struct iommu_domain *domain)
+static int arm_smmu_blocking_set_dev_pasid(struct iommu_domain *new_domain,
+ struct device *dev, ioasid_t pasid,
+ struct iommu_domain *old_domain)
{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(old_domain);
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- struct arm_smmu_domain *smmu_domain;
-
- smmu_domain = to_smmu_domain(domain);
mutex_lock(&arm_smmu_asid_lock);
arm_smmu_clear_cd(master, pasid);
@@ -3043,6 +3105,7 @@ static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
sid_domain->type == IOMMU_DOMAIN_BLOCKED)
sid_domain->ops->attach_dev(sid_domain, dev);
}
+ return 0;
}
static void arm_smmu_attach_dev_ste(struct iommu_domain *domain,
@@ -3070,8 +3133,10 @@ static void arm_smmu_attach_dev_ste(struct iommu_domain *domain,
if (arm_smmu_ssids_in_use(&master->cd_table)) {
/*
* If a CD table has to be present then we need to run with ATS
- * on even though the RID will fail ATS queries with UR. This is
- * because we have no idea what the PASID's need.
+ * on because we have to assume a PASID is using ATS. For
+ * IDENTITY this will setup things so that S1DSS=bypass which
+ * follows the explanation in "13.6.4 Full ATS skipping stage 1"
+ * and allows for ATS on the RID to work.
*/
state.cd_needs_ats = true;
arm_smmu_attach_prepare(&state, domain);
@@ -3124,6 +3189,7 @@ static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain,
static const struct iommu_domain_ops arm_smmu_blocked_ops = {
.attach_dev = arm_smmu_attach_dev_blocked,
+ .set_dev_pasid = arm_smmu_blocking_set_dev_pasid,
};
static struct iommu_domain arm_smmu_blocked_domain = {
@@ -3136,6 +3202,7 @@ arm_smmu_domain_alloc_paging_flags(struct device *dev, u32 flags,
const struct iommu_user_data *user_data)
{
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+ struct arm_smmu_device *smmu = master->smmu;
const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
IOMMU_HWPT_ALLOC_PASID |
IOMMU_HWPT_ALLOC_NEST_PARENT;
@@ -3147,25 +3214,43 @@ arm_smmu_domain_alloc_paging_flags(struct device *dev, u32 flags,
if (user_data)
return ERR_PTR(-EOPNOTSUPP);
- if (flags & IOMMU_HWPT_ALLOC_PASID)
- return arm_smmu_domain_alloc_paging(dev);
-
smmu_domain = arm_smmu_domain_alloc();
if (IS_ERR(smmu_domain))
return ERR_CAST(smmu_domain);
- if (flags & IOMMU_HWPT_ALLOC_NEST_PARENT) {
- if (!(master->smmu->features & ARM_SMMU_FEAT_NESTING)) {
+ switch (flags) {
+ case 0:
+ /* Prefer S1 if available */
+ if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+ else
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
+ break;
+ case IOMMU_HWPT_ALLOC_NEST_PARENT:
+ if (!(smmu->features & ARM_SMMU_FEAT_NESTING)) {
ret = -EOPNOTSUPP;
goto err_free;
}
smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
smmu_domain->nest_parent = true;
+ break;
+ case IOMMU_HWPT_ALLOC_DIRTY_TRACKING:
+ case IOMMU_HWPT_ALLOC_DIRTY_TRACKING | IOMMU_HWPT_ALLOC_PASID:
+ case IOMMU_HWPT_ALLOC_PASID:
+ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) {
+ ret = -EOPNOTSUPP;
+ goto err_free;
+ }
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto err_free;
}
smmu_domain->domain.type = IOMMU_DOMAIN_UNMANAGED;
smmu_domain->domain.ops = arm_smmu_ops.default_domain_ops;
- ret = arm_smmu_domain_finalise(smmu_domain, master->smmu, flags);
+ ret = arm_smmu_domain_finalise(smmu_domain, smmu, flags);
if (ret)
goto err_free;
return &smmu_domain->domain;
@@ -3237,8 +3322,8 @@ static struct platform_driver arm_smmu_driver;
static
struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
{
- struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
- fwnode);
+ struct device *dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode);
+
put_device(dev);
return dev ? dev_get_drvdata(dev) : NULL;
}
@@ -3543,7 +3628,6 @@ static struct iommu_ops arm_smmu_ops = {
.blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable,
.hw_info = arm_smmu_hw_info,
- .domain_alloc_paging = arm_smmu_domain_alloc_paging,
.domain_alloc_sva = arm_smmu_sva_domain_alloc,
.domain_alloc_paging_flags = arm_smmu_domain_alloc_paging_flags,
.probe_device = arm_smmu_probe_device,
@@ -3551,7 +3635,6 @@ static struct iommu_ops arm_smmu_ops = {
.device_group = arm_smmu_device_group,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
- .remove_dev_pasid = arm_smmu_remove_dev_pasid,
.dev_enable_feat = arm_smmu_dev_enable_feature,
.dev_disable_feat = arm_smmu_dev_disable_feature,
.page_response = arm_smmu_page_response,
@@ -4239,7 +4322,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
*/
if (!!(reg & IDR0_COHACC) != coherent)
dev_warn(smmu->dev, "IDR0.COHACC overridden by FW configuration (%s)\n",
- coherent ? "true" : "false");
+ str_true_false(coherent));
switch (FIELD_GET(IDR0_STALL_MODEL, reg)) {
case IDR0_STALL_MODEL_FORCE:
@@ -4663,7 +4746,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
/* Initialise in-memory data structures */
ret = arm_smmu_init_structures(smmu);
if (ret)
- return ret;
+ goto err_free_iopf;
/* Record our private device structure */
platform_set_drvdata(pdev, smmu);
@@ -4674,22 +4757,29 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
/* Reset the device */
ret = arm_smmu_device_reset(smmu);
if (ret)
- return ret;
+ goto err_disable;
/* And we're up. Go go go! */
ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
"smmu3.%pa", &ioaddr);
if (ret)
- return ret;
+ goto err_disable;
ret = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
if (ret) {
dev_err(dev, "Failed to register iommu\n");
- iommu_device_sysfs_remove(&smmu->iommu);
- return ret;
+ goto err_free_sysfs;
}
return 0;
+
+err_free_sysfs:
+ iommu_device_sysfs_remove(&smmu->iommu);
+err_disable:
+ arm_smmu_device_disable(smmu);
+err_free_iopf:
+ iopf_queue_free(smmu->evtq.iopf);
+ return ret;
}
static void arm_smmu_device_remove(struct platform_device *pdev)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 0107d3f333a1..bd9d7c85576a 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -452,10 +452,18 @@ static inline unsigned int arm_smmu_cdtab_l2_idx(unsigned int ssid)
#define EVTQ_0_ID GENMASK_ULL(7, 0)
+#define EVT_ID_BAD_STREAMID_CONFIG 0x02
+#define EVT_ID_STE_FETCH_FAULT 0x03
+#define EVT_ID_BAD_STE_CONFIG 0x04
+#define EVT_ID_STREAM_DISABLED_FAULT 0x06
+#define EVT_ID_BAD_SUBSTREAMID_CONFIG 0x08
+#define EVT_ID_CD_FETCH_FAULT 0x09
+#define EVT_ID_BAD_CD_CONFIG 0x0a
#define EVT_ID_TRANSLATION_FAULT 0x10
#define EVT_ID_ADDR_SIZE_FAULT 0x11
#define EVT_ID_ACCESS_FAULT 0x12
#define EVT_ID_PERMISSION_FAULT 0x13
+#define EVT_ID_VMS_FETCH_FAULT 0x25
#define EVTQ_0_SSV (1UL << 11)
#define EVTQ_0_SSID GENMASK_ULL(31, 12)
@@ -467,9 +475,11 @@ static inline unsigned int arm_smmu_cdtab_l2_idx(unsigned int ssid)
#define EVTQ_1_RnW (1UL << 35)
#define EVTQ_1_S2 (1UL << 39)
#define EVTQ_1_CLASS GENMASK_ULL(41, 40)
+#define EVTQ_1_CLASS_TT 0x01
#define EVTQ_1_TT_READ (1UL << 44)
#define EVTQ_2_ADDR GENMASK_ULL(63, 0)
#define EVTQ_3_IPA GENMASK_ULL(51, 12)
+#define EVTQ_3_FETCH_ADDR GENMASK_ULL(51, 3)
/* PRI queue */
#define PRIQ_ENT_SZ_SHIFT 4
@@ -789,6 +799,26 @@ struct arm_smmu_stream {
struct rb_node node;
};
+struct arm_smmu_event {
+ u8 stall : 1,
+ ssv : 1,
+ privileged : 1,
+ instruction : 1,
+ s2 : 1,
+ read : 1,
+ ttrnw : 1,
+ class_tt : 1;
+ u8 id;
+ u8 class;
+ u16 stag;
+ u32 sid;
+ u32 ssid;
+ u64 iova;
+ u64 ipa;
+ u64 fetch_addr;
+ struct device *dev;
+};
+
/* SMMU private data for each master */
struct arm_smmu_master {
struct arm_smmu_device *smmu;
@@ -813,7 +843,6 @@ enum arm_smmu_domain_stage {
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
- struct mutex init_mutex; /* Protects smmu pointer */
struct io_pgtable_ops *pgtbl_ops;
atomic_t nr_ats_masters;
diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
index 6e41ddaa24d6..d525ab43a4ae 100644
--- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
+++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
@@ -79,7 +79,6 @@
#define TEGRA241_VCMDQ_PAGE1(q) (TEGRA241_VCMDQ_PAGE1_BASE + 0x80*(q))
#define VCMDQ_ADDR GENMASK(47, 5)
#define VCMDQ_LOG2SIZE GENMASK(4, 0)
-#define VCMDQ_LOG2SIZE_MAX 19
#define TEGRA241_VCMDQ_BASE 0x00000
#define TEGRA241_VCMDQ_CONS_INDX_BASE 0x00008
@@ -505,12 +504,15 @@ static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
struct arm_smmu_cmdq *cmdq = &vcmdq->cmdq;
struct arm_smmu_queue *q = &cmdq->q;
char name[16];
+ u32 regval;
int ret;
snprintf(name, 16, "vcmdq%u", vcmdq->idx);
- /* Queue size, capped to ensure natural alignment */
- q->llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, VCMDQ_LOG2SIZE_MAX);
+ /* Cap queue size to SMMU's IDR1.CMDQS and ensure natural alignment */
+ regval = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
+ q->llq.max_n_shift =
+ min_t(u32, CMDQ_MAX_SZ_SHIFT, FIELD_GET(IDR1_CMDQS, regval));
/* Use the common helper to init the VCMDQ, and then... */
ret = arm_smmu_init_one_queue(smmu, q, vcmdq->page0,
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
index 99030e6b16e7..db9b9a8e139c 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
@@ -110,7 +110,6 @@ static struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smm
int arm_mmu500_reset(struct arm_smmu_device *smmu)
{
u32 reg, major;
- int i;
/*
* On MMU-500 r2p0 onwards we need to clear ACR.CACHE_LOCK before
* writes to the context bank ACTLRs will stick. And we just hope that
@@ -128,11 +127,12 @@ int arm_mmu500_reset(struct arm_smmu_device *smmu)
reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sACR, reg);
+#ifdef CONFIG_ARM_SMMU_MMU_500_CPRE_ERRATA
/*
* Disable MMU-500's not-particularly-beneficial next-page
* prefetcher for the sake of at least 5 known errata.
*/
- for (i = 0; i < smmu->num_context_banks; ++i) {
+ for (int i = 0; i < smmu->num_context_banks; ++i) {
reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR);
reg &= ~ARM_MMU500_ACTLR_CPRE;
arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg);
@@ -140,6 +140,7 @@ int arm_mmu500_reset(struct arm_smmu_device *smmu)
if (reg & ARM_MMU500_ACTLR_CPRE)
dev_warn_once(smmu->dev, "Failed to disable prefetcher for errata workarounds, check SACR.CACHE_LOCK\n");
}
+#endif
return 0;
}
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
index 548783f3f8e8..d03b2239baad 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
@@ -73,7 +73,7 @@ void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu)
if (__ratelimit(&rs)) {
dev_err(smmu->dev, "TLB sync timed out -- SMMU may be deadlocked\n");
- cfg = qsmmu->cfg;
+ cfg = qsmmu->data->cfg;
if (!cfg)
return;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 6372f3e25c4b..59d02687280e 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -16,6 +16,40 @@
#define QCOM_DUMMY_VAL -1
+/*
+ * SMMU-500 TRM defines BIT(0) as CMTLB (Enable context caching in the
+ * macro TLB) and BIT(1) as CPRE (Enable context caching in the prefetch
+ * buffer). The remaining bits are implementation defined and vary across
+ * SoCs.
+ */
+
+#define CPRE (1 << 1)
+#define CMTLB (1 << 0)
+#define PREFETCH_SHIFT 8
+#define PREFETCH_DEFAULT 0
+#define PREFETCH_SHALLOW (1 << PREFETCH_SHIFT)
+#define PREFETCH_MODERATE (2 << PREFETCH_SHIFT)
+#define PREFETCH_DEEP (3 << PREFETCH_SHIFT)
+#define GFX_ACTLR_PRR (1 << 5)
+
+static const struct of_device_id qcom_smmu_actlr_client_of_match[] = {
+ { .compatible = "qcom,adreno",
+ .data = (const void *) (PREFETCH_DEEP | CPRE | CMTLB) },
+ { .compatible = "qcom,adreno-gmu",
+ .data = (const void *) (PREFETCH_DEEP | CPRE | CMTLB) },
+ { .compatible = "qcom,adreno-smmu",
+ .data = (const void *) (PREFETCH_DEEP | CPRE | CMTLB) },
+ { .compatible = "qcom,fastrpc",
+ .data = (const void *) (PREFETCH_DEEP | CPRE | CMTLB) },
+ { .compatible = "qcom,sc7280-mdss",
+ .data = (const void *) (PREFETCH_SHALLOW | CPRE | CMTLB) },
+ { .compatible = "qcom,sc7280-venus",
+ .data = (const void *) (PREFETCH_SHALLOW | CPRE | CMTLB) },
+ { .compatible = "qcom,sm8550-mdss",
+ .data = (const void *) (PREFETCH_DEFAULT | CMTLB) },
+ { }
+};
+
static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
{
return container_of(smmu, struct qcom_smmu, smmu);
@@ -99,6 +133,47 @@ static void qcom_adreno_smmu_resume_translation(const void *cookie, bool termina
arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_RESUME, reg);
}
+static void qcom_adreno_smmu_set_prr_bit(const void *cookie, bool set)
+{
+ struct arm_smmu_domain *smmu_domain = (void *)cookie;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ u32 reg = 0;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(smmu->dev);
+ if (ret < 0) {
+ dev_err(smmu->dev, "failed to get runtime PM: %d\n", ret);
+ return;
+ }
+
+ reg = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_ACTLR);
+ reg &= ~GFX_ACTLR_PRR;
+ if (set)
+ reg |= FIELD_PREP(GFX_ACTLR_PRR, 1);
+ arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_ACTLR, reg);
+ pm_runtime_put_autosuspend(smmu->dev);
+}
+
+static void qcom_adreno_smmu_set_prr_addr(const void *cookie, phys_addr_t page_addr)
+{
+ struct arm_smmu_domain *smmu_domain = (void *)cookie;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(smmu->dev);
+ if (ret < 0) {
+ dev_err(smmu->dev, "failed to get runtime PM: %d\n", ret);
+ return;
+ }
+
+ writel_relaxed(lower_32_bits(page_addr),
+ smmu->base + ARM_SMMU_GFX_PRR_CFG_LADDR);
+ writel_relaxed(upper_32_bits(page_addr),
+ smmu->base + ARM_SMMU_GFX_PRR_CFG_UADDR);
+ pm_runtime_put_autosuspend(smmu->dev);
+}
+
#define QCOM_ADRENO_SMMU_GPU_SID 0
static bool qcom_adreno_smmu_is_gpu_device(struct device *dev)
@@ -207,13 +282,37 @@ static bool qcom_adreno_can_do_ttbr1(struct arm_smmu_device *smmu)
return true;
}
+static void qcom_smmu_set_actlr_dev(struct device *dev, struct arm_smmu_device *smmu, int cbndx,
+ const struct of_device_id *client_match)
+{
+ const struct of_device_id *match =
+ of_match_device(client_match, dev);
+
+ if (!match) {
+ dev_dbg(dev, "no ACTLR settings present\n");
+ return;
+ }
+
+ arm_smmu_cb_write(smmu, cbndx, ARM_SMMU_CB_ACTLR, (unsigned long)match->data);
+}
+
static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
{
+ const struct device_node *np = smmu_domain->smmu->dev->of_node;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
+ const struct of_device_id *client_match;
+ int cbndx = smmu_domain->cfg.cbndx;
struct adreno_smmu_priv *priv;
smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
+ client_match = qsmmu->data->client_match;
+
+ if (client_match)
+ qcom_smmu_set_actlr_dev(dev, smmu, cbndx, client_match);
+
/* Only enable split pagetables for the GPU device (SID 0) */
if (!qcom_adreno_smmu_is_gpu_device(dev))
return 0;
@@ -239,6 +338,14 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
priv->get_fault_info = qcom_adreno_smmu_get_fault_info;
priv->set_stall = qcom_adreno_smmu_set_stall;
priv->resume_translation = qcom_adreno_smmu_resume_translation;
+ priv->set_prr_bit = NULL;
+ priv->set_prr_addr = NULL;
+
+ if (of_device_is_compatible(np, "qcom,smmu-500") &&
+ of_device_is_compatible(np, "qcom,adreno-smmu")) {
+ priv->set_prr_bit = qcom_adreno_smmu_set_prr_bit;
+ priv->set_prr_addr = qcom_adreno_smmu_set_prr_addr;
+ }
return 0;
}
@@ -269,8 +376,18 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
static int qcom_smmu_init_context(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
+ const struct of_device_id *client_match;
+ int cbndx = smmu_domain->cfg.cbndx;
+
smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
+ client_match = qsmmu->data->client_match;
+
+ if (client_match)
+ qcom_smmu_set_actlr_dev(dev, smmu, cbndx, client_match);
+
return 0;
}
@@ -507,7 +624,7 @@ static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
return ERR_PTR(-ENOMEM);
qsmmu->smmu.impl = impl;
- qsmmu->cfg = data->cfg;
+ qsmmu->data = data;
return &qsmmu->smmu;
}
@@ -550,6 +667,7 @@ static const struct qcom_smmu_match_data qcom_smmu_500_impl0_data = {
.impl = &qcom_smmu_500_impl,
.adreno_impl = &qcom_adreno_smmu_500_impl,
.cfg = &qcom_smmu_impl0_cfg,
+ .client_match = qcom_smmu_actlr_client_of_match,
};
/*
@@ -567,6 +685,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
{ .compatible = "qcom,sc8180x-smmu-500", .data = &qcom_smmu_500_impl0_data },
{ .compatible = "qcom,sc8280xp-smmu-500", .data = &qcom_smmu_500_impl0_data },
{ .compatible = "qcom,sdm630-smmu-v2", .data = &qcom_smmu_v2_data },
+ { .compatible = "qcom,sdm670-smmu-v2", .data = &qcom_smmu_v2_data },
{ .compatible = "qcom,sdm845-smmu-v2", .data = &qcom_smmu_v2_data },
{ .compatible = "qcom,sdm845-smmu-500", .data = &sdm845_smmu_500_data },
{ .compatible = "qcom,sm6115-smmu-500", .data = &qcom_smmu_500_impl0_data},
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
index 3c134d1a6277..8addd453f5f1 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
@@ -8,7 +8,7 @@
struct qcom_smmu {
struct arm_smmu_device smmu;
- const struct qcom_smmu_config *cfg;
+ const struct qcom_smmu_match_data *data;
bool bypass_quirk;
u8 bypass_cbndx;
u32 stall_enabled;
@@ -28,6 +28,7 @@ struct qcom_smmu_match_data {
const struct qcom_smmu_config *cfg;
const struct arm_smmu_impl *impl;
const struct arm_smmu_impl *adreno_impl;
+ const struct of_device_id * const client_match;
};
irqreturn_t qcom_smmu_context_fault(int irq, void *dev);
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 650664e0f6e3..de205a34ffc6 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -34,6 +34,7 @@
#include <linux/pm_runtime.h>
#include <linux/ratelimit.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/fsl/mc.h>
@@ -1411,8 +1412,8 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
static
struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
{
- struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
- fwnode);
+ struct device *dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode);
+
put_device(dev);
return dev ? dev_get_drvdata(dev) : NULL;
}
@@ -1437,17 +1438,6 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
goto out_free;
} else {
smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
-
- /*
- * Defer probe if the relevant SMMU instance hasn't finished
- * probing yet. This is a fragile hack and we'd ideally
- * avoid this race in the core code. Until that's ironed
- * out, however, this is the most pragmatic option on the
- * table.
- */
- if (!smmu)
- return ERR_PTR(dev_err_probe(dev, -EPROBE_DEFER,
- "smmu dev has not bound yet\n"));
}
ret = -EINVAL;
@@ -2117,7 +2107,7 @@ static void arm_smmu_rmr_install_bypass_smr(struct arm_smmu_device *smmu)
}
dev_notice(smmu->dev, "\tpreserved %d boot mapping%s\n", cnt,
- cnt == 1 ? "" : "s");
+ str_plural(cnt));
iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
}
@@ -2227,29 +2217,26 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
i, irq);
}
+ platform_set_drvdata(pdev, smmu);
+
+ /* Check for RMRs and install bypass SMRs if any */
+ arm_smmu_rmr_install_bypass_smr(smmu);
+
+ arm_smmu_device_reset(smmu);
+ arm_smmu_test_smr_masks(smmu);
+
err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
"smmu.%pa", &smmu->ioaddr);
- if (err) {
- dev_err(dev, "Failed to register iommu in sysfs\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "Failed to register iommu in sysfs\n");
err = iommu_device_register(&smmu->iommu, &arm_smmu_ops,
using_legacy_binding ? NULL : dev);
if (err) {
- dev_err(dev, "Failed to register iommu\n");
iommu_device_sysfs_remove(&smmu->iommu);
- return err;
+ return dev_err_probe(dev, err, "Failed to register iommu\n");
}
- platform_set_drvdata(pdev, smmu);
-
- /* Check for RMRs and install bypass SMRs if any */
- arm_smmu_rmr_install_bypass_smr(smmu);
-
- arm_smmu_device_reset(smmu);
- arm_smmu_test_smr_masks(smmu);
-
/*
* We want to avoid touching dev->power.lock in fastpaths unless
* it's really going to do something useful - pm_runtime_enabled()
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h
index e2aeb511ae90..2dbf3243b5ad 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
@@ -154,6 +154,8 @@ enum arm_smmu_cbar_type {
#define ARM_SMMU_SCTLR_M BIT(0)
#define ARM_SMMU_CB_ACTLR 0x4
+#define ARM_SMMU_GFX_PRR_CFG_LADDR 0x6008
+#define ARM_SMMU_GFX_PRR_CFG_UADDR 0x600C
#define ARM_SMMU_CB_RESUME 0x8
#define ARM_SMMU_RESUME_TERMINATE BIT(0)
diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
index 8a5c17b97310..2a86aa5d54c6 100644
--- a/drivers/iommu/hyperv-iommu.c
+++ b/drivers/iommu/hyperv-iommu.c
@@ -164,8 +164,8 @@ static int __init hyperv_prepare_irq_remapping(void)
* max cpu affinity for IOAPIC irqs. Scan cpu 0-255 and set cpu
* into ioapic_max_cpumask if its APIC ID is less than 256.
*/
- for (i = min_t(unsigned int, num_possible_cpus() - 1, 255); i >= 0; i--)
- if (cpu_physical_id(i) < 256)
+ for (i = min_t(unsigned int, nr_cpu_ids - 1, 255); i >= 0; i--)
+ if (cpu_possible(i) && cpu_physical_id(i) < 256)
cpumask_set_cpu(i, &ioapic_max_cpumask);
return 0;
diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile
index d3bb0798092d..6c7528130cf9 100644
--- a/drivers/iommu/intel/Makefile
+++ b/drivers/iommu/intel/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += iommu.o pasid.o nested.o cache.o prq.o
-obj-$(CONFIG_DMAR_TABLE) += trace.o cap_audit.o
+obj-$(CONFIG_DMAR_TABLE) += trace.o
obj-$(CONFIG_DMAR_PERF) += perf.o
obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o
diff --git a/drivers/iommu/intel/cache.c b/drivers/iommu/intel/cache.c
index 09694cca8752..fc35cba59145 100644
--- a/drivers/iommu/intel/cache.c
+++ b/drivers/iommu/intel/cache.c
@@ -47,6 +47,7 @@ static int cache_tag_assign(struct dmar_domain *domain, u16 did,
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
struct cache_tag *tag, *temp;
+ struct list_head *prev;
unsigned long flags;
tag = kzalloc(sizeof(*tag), GFP_KERNEL);
@@ -65,6 +66,7 @@ static int cache_tag_assign(struct dmar_domain *domain, u16 did,
tag->dev = iommu->iommu.dev;
spin_lock_irqsave(&domain->cache_lock, flags);
+ prev = &domain->cache_tags;
list_for_each_entry(temp, &domain->cache_tags, node) {
if (cache_tage_match(temp, did, iommu, dev, pasid, type)) {
temp->users++;
@@ -73,8 +75,15 @@ static int cache_tag_assign(struct dmar_domain *domain, u16 did,
trace_cache_tag_assign(temp);
return 0;
}
+ if (temp->iommu == iommu)
+ prev = &temp->node;
}
- list_add_tail(&tag->node, &domain->cache_tags);
+ /*
+ * Link cache tags of same iommu unit together, so corresponding
+ * flush ops can be batched for iommu unit.
+ */
+ list_add(&tag->node, prev);
+
spin_unlock_irqrestore(&domain->cache_lock, flags);
trace_cache_tag_assign(tag);
diff --git a/drivers/iommu/intel/cap_audit.c b/drivers/iommu/intel/cap_audit.c
deleted file mode 100644
index 9862dc20b35e..000000000000
--- a/drivers/iommu/intel/cap_audit.c
+++ /dev/null
@@ -1,217 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * cap_audit.c - audit iommu capabilities for boot time and hot plug
- *
- * Copyright (C) 2021 Intel Corporation
- *
- * Author: Kyung Min Park <kyung.min.park@intel.com>
- * Lu Baolu <baolu.lu@linux.intel.com>
- */
-
-#define pr_fmt(fmt) "DMAR: " fmt
-
-#include "iommu.h"
-#include "cap_audit.h"
-
-static u64 intel_iommu_cap_sanity;
-static u64 intel_iommu_ecap_sanity;
-
-static inline void check_irq_capabilities(struct intel_iommu *a,
- struct intel_iommu *b)
-{
- CHECK_FEATURE_MISMATCH(a, b, cap, pi_support, CAP_PI_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, eim_support, ECAP_EIM_MASK);
-}
-
-static inline void check_dmar_capabilities(struct intel_iommu *a,
- struct intel_iommu *b)
-{
- MINIMAL_FEATURE_IOMMU(b, cap, CAP_MAMV_MASK);
- MINIMAL_FEATURE_IOMMU(b, cap, CAP_NFR_MASK);
- MINIMAL_FEATURE_IOMMU(b, cap, CAP_SLLPS_MASK);
- MINIMAL_FEATURE_IOMMU(b, cap, CAP_FRO_MASK);
- MINIMAL_FEATURE_IOMMU(b, cap, CAP_MGAW_MASK);
- MINIMAL_FEATURE_IOMMU(b, cap, CAP_SAGAW_MASK);
- MINIMAL_FEATURE_IOMMU(b, cap, CAP_NDOMS_MASK);
- MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_PSS_MASK);
- MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_MHMV_MASK);
- MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_IRO_MASK);
-
- CHECK_FEATURE_MISMATCH(a, b, cap, fl5lp_support, CAP_FL5LP_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, fl1gp_support, CAP_FL1GP_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, read_drain, CAP_RD_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, write_drain, CAP_WD_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, pgsel_inv, CAP_PSI_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, zlr, CAP_ZLR_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, caching_mode, CAP_CM_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, phmr, CAP_PHMR_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, plmr, CAP_PLMR_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, rwbf, CAP_RWBF_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, afl, CAP_AFL_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, rps, ECAP_RPS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, smpwc, ECAP_SMPWC_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, flts, ECAP_FLTS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, slts, ECAP_SLTS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, nwfs, ECAP_NWFS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, slads, ECAP_SLADS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, smts, ECAP_SMTS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, pds, ECAP_PDS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, dit, ECAP_DIT_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, pasid, ECAP_PASID_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, eafs, ECAP_EAFS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, srs, ECAP_SRS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, ers, ECAP_ERS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, prs, ECAP_PRS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, nest, ECAP_NEST_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, mts, ECAP_MTS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, sc_support, ECAP_SC_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, pass_through, ECAP_PT_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, dev_iotlb_support, ECAP_DT_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, qis, ECAP_QI_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, coherent, ECAP_C_MASK);
-}
-
-static int cap_audit_hotplug(struct intel_iommu *iommu, enum cap_audit_type type)
-{
- bool mismatch = false;
- u64 old_cap = intel_iommu_cap_sanity;
- u64 old_ecap = intel_iommu_ecap_sanity;
-
- if (type == CAP_AUDIT_HOTPLUG_IRQR) {
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, pi_support, CAP_PI_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, eim_support, ECAP_EIM_MASK);
- goto out;
- }
-
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, fl5lp_support, CAP_FL5LP_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, fl1gp_support, CAP_FL1GP_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, read_drain, CAP_RD_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, write_drain, CAP_WD_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, pgsel_inv, CAP_PSI_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, zlr, CAP_ZLR_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, caching_mode, CAP_CM_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, phmr, CAP_PHMR_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, plmr, CAP_PLMR_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, rwbf, CAP_RWBF_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, afl, CAP_AFL_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, rps, ECAP_RPS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, smpwc, ECAP_SMPWC_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, flts, ECAP_FLTS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, slts, ECAP_SLTS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, nwfs, ECAP_NWFS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, slads, ECAP_SLADS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, smts, ECAP_SMTS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, pds, ECAP_PDS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, dit, ECAP_DIT_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, pasid, ECAP_PASID_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, eafs, ECAP_EAFS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, srs, ECAP_SRS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, ers, ECAP_ERS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, prs, ECAP_PRS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, nest, ECAP_NEST_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, mts, ECAP_MTS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, sc_support, ECAP_SC_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, pass_through, ECAP_PT_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, dev_iotlb_support, ECAP_DT_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, qis, ECAP_QI_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, coherent, ECAP_C_MASK);
-
- /* Abort hot plug if the hot plug iommu feature is smaller than global */
- MINIMAL_FEATURE_HOTPLUG(iommu, cap, max_amask_val, CAP_MAMV_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, cap, num_fault_regs, CAP_NFR_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, cap, super_page_val, CAP_SLLPS_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, cap, fault_reg_offset, CAP_FRO_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, cap, mgaw, CAP_MGAW_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, cap, sagaw, CAP_SAGAW_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, cap, ndoms, CAP_NDOMS_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, ecap, pss, ECAP_PSS_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, ecap, max_handle_mask, ECAP_MHMV_MASK, mismatch);
- MINIMAL_FEATURE_HOTPLUG(iommu, ecap, iotlb_offset, ECAP_IRO_MASK, mismatch);
-
-out:
- if (mismatch) {
- intel_iommu_cap_sanity = old_cap;
- intel_iommu_ecap_sanity = old_ecap;
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int cap_audit_static(struct intel_iommu *iommu, enum cap_audit_type type)
-{
- struct dmar_drhd_unit *d;
- struct intel_iommu *i;
- int rc = 0;
-
- rcu_read_lock();
- if (list_empty(&dmar_drhd_units))
- goto out;
-
- for_each_active_iommu(i, d) {
- if (!iommu) {
- intel_iommu_ecap_sanity = i->ecap;
- intel_iommu_cap_sanity = i->cap;
- iommu = i;
- continue;
- }
-
- if (type == CAP_AUDIT_STATIC_DMAR)
- check_dmar_capabilities(iommu, i);
- else
- check_irq_capabilities(iommu, i);
- }
-
- /*
- * If the system is sane to support scalable mode, either SL or FL
- * should be sane.
- */
- if (intel_cap_smts_sanity() &&
- !intel_cap_flts_sanity() && !intel_cap_slts_sanity())
- rc = -EOPNOTSUPP;
-
-out:
- rcu_read_unlock();
- return rc;
-}
-
-int intel_cap_audit(enum cap_audit_type type, struct intel_iommu *iommu)
-{
- switch (type) {
- case CAP_AUDIT_STATIC_DMAR:
- case CAP_AUDIT_STATIC_IRQR:
- return cap_audit_static(iommu, type);
- case CAP_AUDIT_HOTPLUG_DMAR:
- case CAP_AUDIT_HOTPLUG_IRQR:
- return cap_audit_hotplug(iommu, type);
- default:
- break;
- }
-
- return -EFAULT;
-}
-
-bool intel_cap_smts_sanity(void)
-{
- return ecap_smts(intel_iommu_ecap_sanity);
-}
-
-bool intel_cap_pasid_sanity(void)
-{
- return ecap_pasid(intel_iommu_ecap_sanity);
-}
-
-bool intel_cap_nest_sanity(void)
-{
- return ecap_nest(intel_iommu_ecap_sanity);
-}
-
-bool intel_cap_flts_sanity(void)
-{
- return ecap_flts(intel_iommu_ecap_sanity);
-}
-
-bool intel_cap_slts_sanity(void)
-{
- return ecap_slts(intel_iommu_ecap_sanity);
-}
diff --git a/drivers/iommu/intel/cap_audit.h b/drivers/iommu/intel/cap_audit.h
deleted file mode 100644
index d07b75938961..000000000000
--- a/drivers/iommu/intel/cap_audit.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * cap_audit.h - audit iommu capabilities header
- *
- * Copyright (C) 2021 Intel Corporation
- *
- * Author: Kyung Min Park <kyung.min.park@intel.com>
- */
-
-/*
- * Capability Register Mask
- */
-#define CAP_FL5LP_MASK BIT_ULL(60)
-#define CAP_PI_MASK BIT_ULL(59)
-#define CAP_FL1GP_MASK BIT_ULL(56)
-#define CAP_RD_MASK BIT_ULL(55)
-#define CAP_WD_MASK BIT_ULL(54)
-#define CAP_MAMV_MASK GENMASK_ULL(53, 48)
-#define CAP_NFR_MASK GENMASK_ULL(47, 40)
-#define CAP_PSI_MASK BIT_ULL(39)
-#define CAP_SLLPS_MASK GENMASK_ULL(37, 34)
-#define CAP_FRO_MASK GENMASK_ULL(33, 24)
-#define CAP_ZLR_MASK BIT_ULL(22)
-#define CAP_MGAW_MASK GENMASK_ULL(21, 16)
-#define CAP_SAGAW_MASK GENMASK_ULL(12, 8)
-#define CAP_CM_MASK BIT_ULL(7)
-#define CAP_PHMR_MASK BIT_ULL(6)
-#define CAP_PLMR_MASK BIT_ULL(5)
-#define CAP_RWBF_MASK BIT_ULL(4)
-#define CAP_AFL_MASK BIT_ULL(3)
-#define CAP_NDOMS_MASK GENMASK_ULL(2, 0)
-
-/*
- * Extended Capability Register Mask
- */
-#define ECAP_RPS_MASK BIT_ULL(49)
-#define ECAP_SMPWC_MASK BIT_ULL(48)
-#define ECAP_FLTS_MASK BIT_ULL(47)
-#define ECAP_SLTS_MASK BIT_ULL(46)
-#define ECAP_SLADS_MASK BIT_ULL(45)
-#define ECAP_VCS_MASK BIT_ULL(44)
-#define ECAP_SMTS_MASK BIT_ULL(43)
-#define ECAP_PDS_MASK BIT_ULL(42)
-#define ECAP_DIT_MASK BIT_ULL(41)
-#define ECAP_PASID_MASK BIT_ULL(40)
-#define ECAP_PSS_MASK GENMASK_ULL(39, 35)
-#define ECAP_EAFS_MASK BIT_ULL(34)
-#define ECAP_NWFS_MASK BIT_ULL(33)
-#define ECAP_SRS_MASK BIT_ULL(31)
-#define ECAP_ERS_MASK BIT_ULL(30)
-#define ECAP_PRS_MASK BIT_ULL(29)
-#define ECAP_NEST_MASK BIT_ULL(26)
-#define ECAP_MTS_MASK BIT_ULL(25)
-#define ECAP_MHMV_MASK GENMASK_ULL(23, 20)
-#define ECAP_IRO_MASK GENMASK_ULL(17, 8)
-#define ECAP_SC_MASK BIT_ULL(7)
-#define ECAP_PT_MASK BIT_ULL(6)
-#define ECAP_EIM_MASK BIT_ULL(4)
-#define ECAP_DT_MASK BIT_ULL(2)
-#define ECAP_QI_MASK BIT_ULL(1)
-#define ECAP_C_MASK BIT_ULL(0)
-
-/*
- * u64 intel_iommu_cap_sanity, intel_iommu_ecap_sanity will be adjusted as each
- * IOMMU gets audited.
- */
-#define DO_CHECK_FEATURE_MISMATCH(a, b, cap, feature, MASK) \
-do { \
- if (cap##_##feature(a) != cap##_##feature(b)) { \
- intel_iommu_##cap##_sanity &= ~(MASK); \
- pr_info("IOMMU feature %s inconsistent", #feature); \
- } \
-} while (0)
-
-#define CHECK_FEATURE_MISMATCH(a, b, cap, feature, MASK) \
- DO_CHECK_FEATURE_MISMATCH((a)->cap, (b)->cap, cap, feature, MASK)
-
-#define CHECK_FEATURE_MISMATCH_HOTPLUG(b, cap, feature, MASK) \
-do { \
- if (cap##_##feature(intel_iommu_##cap##_sanity)) \
- DO_CHECK_FEATURE_MISMATCH(intel_iommu_##cap##_sanity, \
- (b)->cap, cap, feature, MASK); \
-} while (0)
-
-#define MINIMAL_FEATURE_IOMMU(iommu, cap, MASK) \
-do { \
- u64 min_feature = intel_iommu_##cap##_sanity & (MASK); \
- min_feature = min_t(u64, min_feature, (iommu)->cap & (MASK)); \
- intel_iommu_##cap##_sanity = (intel_iommu_##cap##_sanity & ~(MASK)) | \
- min_feature; \
-} while (0)
-
-#define MINIMAL_FEATURE_HOTPLUG(iommu, cap, feature, MASK, mismatch) \
-do { \
- if ((intel_iommu_##cap##_sanity & (MASK)) > \
- (cap##_##feature((iommu)->cap))) \
- mismatch = true; \
- else \
- (iommu)->cap = ((iommu)->cap & ~(MASK)) | \
- (intel_iommu_##cap##_sanity & (MASK)); \
-} while (0)
-
-enum cap_audit_type {
- CAP_AUDIT_STATIC_DMAR,
- CAP_AUDIT_STATIC_IRQR,
- CAP_AUDIT_HOTPLUG_DMAR,
- CAP_AUDIT_HOTPLUG_IRQR,
-};
-
-bool intel_cap_smts_sanity(void);
-bool intel_cap_pasid_sanity(void);
-bool intel_cap_nest_sanity(void);
-bool intel_cap_flts_sanity(void);
-bool intel_cap_slts_sanity(void);
-
-static inline bool scalable_mode_support(void)
-{
- return (intel_iommu_sm && intel_cap_smts_sanity());
-}
-
-static inline bool pasid_mode_support(void)
-{
- return scalable_mode_support() && intel_cap_pasid_sanity();
-}
-
-static inline bool nested_mode_support(void)
-{
- return scalable_mode_support() && intel_cap_nest_sanity();
-}
-
-int intel_cap_audit(enum cap_audit_type type, struct intel_iommu *iommu);
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 79e0da9eb626..cc46098f875b 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -29,7 +29,6 @@
#include "../irq_remapping.h"
#include "../iommu-pages.h"
#include "pasid.h"
-#include "cap_audit.h"
#include "perfmon.h"
#define ROOT_SIZE VTD_PAGE_SIZE
@@ -2118,10 +2117,6 @@ static int __init init_dmars(void)
struct intel_iommu *iommu;
int ret;
- ret = intel_cap_audit(CAP_AUDIT_STATIC_DMAR, NULL);
- if (ret)
- goto free_iommu;
-
for_each_iommu(iommu, drhd) {
if (drhd->ignored) {
iommu_disable_translation(iommu);
@@ -2617,10 +2612,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
struct intel_iommu *iommu = dmaru->iommu;
int ret;
- ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_DMAR, iommu);
- if (ret)
- goto out;
-
/*
* Disable translation if already enabled prior to OS handover.
*/
@@ -3250,10 +3241,15 @@ static int blocking_domain_attach_dev(struct iommu_domain *domain,
return 0;
}
+static int blocking_domain_set_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid,
+ struct iommu_domain *old);
+
static struct iommu_domain blocking_domain = {
.type = IOMMU_DOMAIN_BLOCKED,
.ops = &(const struct iommu_domain_ops) {
.attach_dev = blocking_domain_attach_dev,
+ .set_dev_pasid = blocking_domain_set_dev_pasid,
}
};
@@ -3342,8 +3338,7 @@ intel_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
bool first_stage;
if (flags &
- (~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING
- | IOMMU_HWPT_FAULT_ID_VALID)))
+ (~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING)))
return ERR_PTR(-EOPNOTSUPP);
if (nested_parent && !nested_supported(iommu))
return ERR_PTR(-EOPNOTSUPP);
@@ -4090,22 +4085,26 @@ void domain_remove_dev_pasid(struct iommu_domain *domain,
break;
}
}
- WARN_ON_ONCE(!dev_pasid);
spin_unlock_irqrestore(&dmar_domain->lock, flags);
cache_tag_unassign_domain(dmar_domain, dev, pasid);
domain_detach_iommu(dmar_domain, iommu);
- intel_iommu_debugfs_remove_dev_pasid(dev_pasid);
- kfree(dev_pasid);
+ if (!WARN_ON_ONCE(!dev_pasid)) {
+ intel_iommu_debugfs_remove_dev_pasid(dev_pasid);
+ kfree(dev_pasid);
+ }
}
-static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
- struct iommu_domain *domain)
+static int blocking_domain_set_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid,
+ struct iommu_domain *old)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
intel_pasid_tear_down_entry(info->iommu, dev, pasid, false);
- domain_remove_dev_pasid(domain, dev, pasid);
+ domain_remove_dev_pasid(old, dev, pasid);
+
+ return 0;
}
struct dev_pasid_info *
@@ -4445,21 +4444,6 @@ static struct iommu_domain identity_domain = {
},
};
-static struct iommu_domain *intel_iommu_domain_alloc_paging(struct device *dev)
-{
- struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct intel_iommu *iommu = info->iommu;
- struct dmar_domain *dmar_domain;
- bool first_stage;
-
- first_stage = first_level_by_default(iommu);
- dmar_domain = paging_domain_alloc(dev, first_stage);
- if (IS_ERR(dmar_domain))
- return ERR_CAST(dmar_domain);
-
- return &dmar_domain->domain;
-}
-
const struct iommu_ops intel_iommu_ops = {
.blocked_domain = &blocking_domain,
.release_domain = &blocking_domain,
@@ -4468,7 +4452,6 @@ const struct iommu_ops intel_iommu_ops = {
.hw_info = intel_iommu_hw_info,
.domain_alloc_paging_flags = intel_iommu_domain_alloc_paging_flags,
.domain_alloc_sva = intel_svm_domain_alloc,
- .domain_alloc_paging = intel_iommu_domain_alloc_paging,
.domain_alloc_nested = intel_iommu_domain_alloc_nested,
.probe_device = intel_iommu_probe_device,
.release_device = intel_iommu_release_device,
@@ -4478,7 +4461,6 @@ const struct iommu_ops intel_iommu_ops = {
.dev_disable_feat = intel_iommu_dev_disable_feat,
.is_attach_deferred = intel_iommu_is_attach_deferred,
.def_domain_type = device_def_domain_type,
- .remove_dev_pasid = intel_iommu_remove_dev_pasid,
.pgsize_bitmap = SZ_4K,
.page_response = intel_iommu_page_response,
.default_domain_ops = &(const struct iommu_domain_ops) {
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index f5402df72a9b..ad795c772f21 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -24,7 +24,6 @@
#include "iommu.h"
#include "../irq_remapping.h"
#include "../iommu-pages.h"
-#include "cap_audit.h"
enum irq_mode {
IRQ_REMAPPING,
@@ -727,9 +726,6 @@ static int __init intel_prepare_irq_remapping(void)
if (dmar_table_init() < 0)
return -ENODEV;
- if (intel_cap_audit(CAP_AUDIT_STATIC_IRQR, NULL))
- return -ENODEV;
-
if (!dmar_ir_support())
return -ENODEV;
@@ -1533,10 +1529,6 @@ static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
int ret;
int eim = x2apic_enabled();
- ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_IRQR, iommu);
- if (ret)
- return ret;
-
if (eim && !ecap_eim_support(iommu->ecap)) {
pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
iommu->reg_phys, iommu->ecap);
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 5b7d85f1e143..fb59a7d35958 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -244,11 +244,31 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
spin_lock(&iommu->lock);
pte = intel_pasid_get_entry(dev, pasid);
- if (WARN_ON(!pte) || !pasid_pte_is_present(pte)) {
+ if (WARN_ON(!pte)) {
spin_unlock(&iommu->lock);
return;
}
+ if (!pasid_pte_is_present(pte)) {
+ if (!pasid_pte_is_fault_disabled(pte)) {
+ WARN_ON(READ_ONCE(pte->val[0]) != 0);
+ spin_unlock(&iommu->lock);
+ return;
+ }
+
+ /*
+ * When a PASID is used for SVA by a device, it's possible
+ * that the pasid entry is non-present with the Fault
+ * Processing Disabled bit set. Clear the pasid entry and
+ * drain the PRQ for the PASID before return.
+ */
+ pasid_clear_entry(pte);
+ spin_unlock(&iommu->lock);
+ intel_iommu_drain_pasid_prq(dev, pasid);
+
+ return;
+ }
+
did = pasid_get_domain_id(pte);
pgtt = pasid_pte_get_pgtt(pte);
intel_pasid_clear_entry(dev, pasid, fault_ignore);
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index 082f4fe20216..668d8ece6b14 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -73,6 +73,12 @@ static inline bool pasid_pte_is_present(struct pasid_entry *pte)
return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT;
}
+/* Get FPD(Fault Processing Disable) bit of a PASID table entry */
+static inline bool pasid_pte_is_fault_disabled(struct pasid_entry *pte)
+{
+ return READ_ONCE(pte->val[0]) & PASID_PTE_FPD;
+}
+
/* Get PGTT field of a PASID table entry */
static inline u16 pasid_pte_get_pgtt(struct pasid_entry *pte)
{
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 6b9bb58a414f..7632c80edea6 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -223,6 +223,34 @@ static inline int arm_lpae_max_entries(int i, struct arm_lpae_io_pgtable *data)
return ptes_per_table - (i & (ptes_per_table - 1));
}
+/*
+ * Check if concatenated PGDs are mandatory according to Arm DDI0487 (K.a)
+ * 1) R_DXBSH: For 16KB, and 48-bit input size, use level 1 instead of 0.
+ * 2) R_SRKBC: After de-ciphering the table for PA size and valid initial lookup
+ * a) 40 bits PA size with 4K: use level 1 instead of level 0 (2 tables for ias = oas)
+ * b) 40 bits PA size with 16K: use level 2 instead of level 1 (16 tables for ias = oas)
+ * c) 42 bits PA size with 4K: use level 1 instead of level 0 (8 tables for ias = oas)
+ * d) 48 bits PA size with 16K: use level 1 instead of level 0 (2 tables for ias = oas)
+ */
+static inline bool arm_lpae_concat_mandatory(struct io_pgtable_cfg *cfg,
+ struct arm_lpae_io_pgtable *data)
+{
+ unsigned int ias = cfg->ias;
+ unsigned int oas = cfg->oas;
+
+ /* Covers 1 and 2.d */
+ if ((ARM_LPAE_GRANULE(data) == SZ_16K) && (data->start_level == 0))
+ return (oas == 48) || (ias == 48);
+
+ /* Covers 2.a and 2.c */
+ if ((ARM_LPAE_GRANULE(data) == SZ_4K) && (data->start_level == 0))
+ return (oas == 40) || (oas == 42);
+
+ /* Case 2.b */
+ return (ARM_LPAE_GRANULE(data) == SZ_16K) &&
+ (data->start_level == 1) && (oas == 40);
+}
+
static bool selftest_running = false;
static dma_addr_t __arm_lpae_dma_addr(void *pages)
@@ -676,85 +704,107 @@ static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iov
data->start_level, ptep);
}
+struct io_pgtable_walk_data {
+ struct io_pgtable *iop;
+ void *data;
+ int (*visit)(struct io_pgtable_walk_data *walk_data, int lvl,
+ arm_lpae_iopte *ptep, size_t size);
+ unsigned long flags;
+ u64 addr;
+ const u64 end;
+};
+
+static int __arm_lpae_iopte_walk(struct arm_lpae_io_pgtable *data,
+ struct io_pgtable_walk_data *walk_data,
+ arm_lpae_iopte *ptep,
+ int lvl);
+
+struct iova_to_phys_data {
+ arm_lpae_iopte pte;
+ int lvl;
+};
+
+static int visit_iova_to_phys(struct io_pgtable_walk_data *walk_data, int lvl,
+ arm_lpae_iopte *ptep, size_t size)
+{
+ struct iova_to_phys_data *data = walk_data->data;
+ data->pte = *ptep;
+ data->lvl = lvl;
+ return 0;
+}
+
static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
unsigned long iova)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
- arm_lpae_iopte pte, *ptep = data->pgd;
- int lvl = data->start_level;
-
- do {
- /* Valid IOPTE pointer? */
- if (!ptep)
- return 0;
-
- /* Grab the IOPTE we're interested in */
- ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
- pte = READ_ONCE(*ptep);
-
- /* Valid entry? */
- if (!pte)
- return 0;
+ struct iova_to_phys_data d;
+ struct io_pgtable_walk_data walk_data = {
+ .data = &d,
+ .visit = visit_iova_to_phys,
+ .addr = iova,
+ .end = iova + 1,
+ };
+ int ret;
- /* Leaf entry? */
- if (iopte_leaf(pte, lvl, data->iop.fmt))
- goto found_translation;
+ ret = __arm_lpae_iopte_walk(data, &walk_data, data->pgd, data->start_level);
+ if (ret)
+ return 0;
- /* Take it to the next level */
- ptep = iopte_deref(pte, data);
- } while (++lvl < ARM_LPAE_MAX_LEVELS);
+ iova &= (ARM_LPAE_BLOCK_SIZE(d.lvl, data) - 1);
+ return iopte_to_paddr(d.pte, data) | iova;
+}
- /* Ran out of page tables to walk */
+static int visit_pgtable_walk(struct io_pgtable_walk_data *walk_data, int lvl,
+ arm_lpae_iopte *ptep, size_t size)
+{
+ struct arm_lpae_io_pgtable_walk_data *data = walk_data->data;
+ data->ptes[lvl] = *ptep;
return 0;
-
-found_translation:
- iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
- return iopte_to_paddr(pte, data) | iova;
}
-struct io_pgtable_walk_data {
- struct iommu_dirty_bitmap *dirty;
- unsigned long flags;
- u64 addr;
- const u64 end;
-};
+static int arm_lpae_pgtable_walk(struct io_pgtable_ops *ops, unsigned long iova,
+ void *wd)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_walk_data walk_data = {
+ .data = wd,
+ .visit = visit_pgtable_walk,
+ .addr = iova,
+ .end = iova + 1,
+ };
-static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
- struct io_pgtable_walk_data *walk_data,
- arm_lpae_iopte *ptep,
- int lvl);
+ return __arm_lpae_iopte_walk(data, &walk_data, data->pgd, data->start_level);
+}
-static int io_pgtable_visit_dirty(struct arm_lpae_io_pgtable *data,
- struct io_pgtable_walk_data *walk_data,
- arm_lpae_iopte *ptep, int lvl)
+static int io_pgtable_visit(struct arm_lpae_io_pgtable *data,
+ struct io_pgtable_walk_data *walk_data,
+ arm_lpae_iopte *ptep, int lvl)
{
struct io_pgtable *iop = &data->iop;
arm_lpae_iopte pte = READ_ONCE(*ptep);
- if (iopte_leaf(pte, lvl, iop->fmt)) {
- size_t size = ARM_LPAE_BLOCK_SIZE(lvl, data);
+ size_t size = ARM_LPAE_BLOCK_SIZE(lvl, data);
+ int ret = walk_data->visit(walk_data, lvl, ptep, size);
+ if (ret)
+ return ret;
- if (iopte_writeable_dirty(pte)) {
- iommu_dirty_bitmap_record(walk_data->dirty,
- walk_data->addr, size);
- if (!(walk_data->flags & IOMMU_DIRTY_NO_CLEAR))
- iopte_set_writeable_clean(ptep);
- }
+ if (iopte_leaf(pte, lvl, iop->fmt)) {
walk_data->addr += size;
return 0;
}
- if (WARN_ON(!iopte_table(pte, lvl)))
+ if (!iopte_table(pte, lvl)) {
return -EINVAL;
+ }
ptep = iopte_deref(pte, data);
- return __arm_lpae_iopte_walk_dirty(data, walk_data, ptep, lvl + 1);
+ return __arm_lpae_iopte_walk(data, walk_data, ptep, lvl + 1);
}
-static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
- struct io_pgtable_walk_data *walk_data,
- arm_lpae_iopte *ptep,
- int lvl)
+static int __arm_lpae_iopte_walk(struct arm_lpae_io_pgtable *data,
+ struct io_pgtable_walk_data *walk_data,
+ arm_lpae_iopte *ptep,
+ int lvl)
{
u32 idx;
int max_entries, ret;
@@ -769,7 +819,7 @@ static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
for (idx = ARM_LPAE_LVL_IDX(walk_data->addr, lvl, data);
(idx < max_entries) && (walk_data->addr < walk_data->end); ++idx) {
- ret = io_pgtable_visit_dirty(data, walk_data, ptep + idx, lvl);
+ ret = io_pgtable_visit(data, walk_data, ptep + idx, lvl);
if (ret)
return ret;
}
@@ -777,6 +827,23 @@ static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
return 0;
}
+static int visit_dirty(struct io_pgtable_walk_data *walk_data, int lvl,
+ arm_lpae_iopte *ptep, size_t size)
+{
+ struct iommu_dirty_bitmap *dirty = walk_data->data;
+
+ if (!iopte_leaf(*ptep, lvl, walk_data->iop->fmt))
+ return 0;
+
+ if (iopte_writeable_dirty(*ptep)) {
+ iommu_dirty_bitmap_record(dirty, walk_data->addr, size);
+ if (!(walk_data->flags & IOMMU_DIRTY_NO_CLEAR))
+ iopte_set_writeable_clean(ptep);
+ }
+
+ return 0;
+}
+
static int arm_lpae_read_and_clear_dirty(struct io_pgtable_ops *ops,
unsigned long iova, size_t size,
unsigned long flags,
@@ -785,7 +852,9 @@ static int arm_lpae_read_and_clear_dirty(struct io_pgtable_ops *ops,
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable_cfg *cfg = &data->iop.cfg;
struct io_pgtable_walk_data walk_data = {
- .dirty = dirty,
+ .iop = &data->iop,
+ .data = dirty,
+ .visit = visit_dirty,
.flags = flags,
.addr = iova,
.end = iova + size,
@@ -800,7 +869,7 @@ static int arm_lpae_read_and_clear_dirty(struct io_pgtable_ops *ops,
if (data->iop.fmt != ARM_64_LPAE_S1)
return -EINVAL;
- return __arm_lpae_iopte_walk_dirty(data, &walk_data, ptep, lvl);
+ return __arm_lpae_iopte_walk(data, &walk_data, ptep, lvl);
}
static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
@@ -882,6 +951,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
.unmap_pages = arm_lpae_unmap_pages,
.iova_to_phys = arm_lpae_iova_to_phys,
.read_and_clear_dirty = arm_lpae_read_and_clear_dirty,
+ .pgtable_walk = arm_lpae_pgtable_walk,
};
return data;
@@ -1006,18 +1076,12 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
if (!data)
return NULL;
- /*
- * Concatenate PGDs at level 1 if possible in order to reduce
- * the depth of the stage-2 walk.
- */
- if (data->start_level == 0) {
- unsigned long pgd_pages;
-
- pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
- if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
- data->pgd_bits += data->bits_per_level;
- data->start_level++;
- }
+ if (arm_lpae_concat_mandatory(cfg, data)) {
+ if (WARN_ON((ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte)) >
+ ARM_LPAE_S2_MAX_CONCAT_PAGES))
+ return NULL;
+ data->pgd_bits += data->bits_per_level;
+ data->start_level++;
}
/* VTCR */
@@ -1364,15 +1428,14 @@ static int __init arm_lpae_do_selftests(void)
SZ_64K | SZ_512M,
};
- static const unsigned int ias[] __initconst = {
+ static const unsigned int address_size[] __initconst = {
32, 36, 40, 42, 44, 48,
};
- int i, j, pass = 0, fail = 0;
+ int i, j, k, pass = 0, fail = 0;
struct device dev;
struct io_pgtable_cfg cfg = {
.tlb = &dummy_tlb_ops,
- .oas = 48,
.coherent_walk = true,
.iommu_dev = &dev,
};
@@ -1381,15 +1444,19 @@ static int __init arm_lpae_do_selftests(void)
set_dev_node(&dev, NUMA_NO_NODE);
for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
- for (j = 0; j < ARRAY_SIZE(ias); ++j) {
- cfg.pgsize_bitmap = pgsize[i];
- cfg.ias = ias[j];
- pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
- pgsize[i], ias[j]);
- if (arm_lpae_run_tests(&cfg))
- fail++;
- else
- pass++;
+ for (j = 0; j < ARRAY_SIZE(address_size); ++j) {
+ /* Don't use ias > oas as it is not valid for stage-2. */
+ for (k = 0; k <= j; ++k) {
+ cfg.pgsize_bitmap = pgsize[i];
+ cfg.ias = address_size[k];
+ cfg.oas = address_size[j];
+ pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u OAS %u\n",
+ pgsize[i], cfg.ias, cfg.oas);
+ if (arm_lpae_run_tests(&cfg))
+ fail++;
+ else
+ pass++;
+ }
}
}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 599030e1e890..870c3cdbd0f6 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -2819,7 +2819,7 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode)
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
if (!ops)
- return -EPROBE_DEFER;
+ return driver_deferred_probe_check_state(dev);
if (fwspec)
return ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL;
@@ -3312,6 +3312,16 @@ bool iommu_group_dma_owner_claimed(struct iommu_group *group)
}
EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed);
+static void iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
+ struct iommu_domain *domain)
+{
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
+ struct iommu_domain *blocked_domain = ops->blocked_domain;
+
+ WARN_ON(blocked_domain->ops->set_dev_pasid(blocked_domain,
+ dev, pasid, domain));
+}
+
static int __iommu_set_group_pasid(struct iommu_domain *domain,
struct iommu_group *group, ioasid_t pasid)
{
@@ -3330,11 +3340,9 @@ static int __iommu_set_group_pasid(struct iommu_domain *domain,
err_revert:
last_gdev = device;
for_each_group_device(group, device) {
- const struct iommu_ops *ops = dev_iommu_ops(device->dev);
-
if (device == last_gdev)
break;
- ops->remove_dev_pasid(device->dev, pasid, domain);
+ iommu_remove_dev_pasid(device->dev, pasid, domain);
}
return ret;
}
@@ -3344,12 +3352,9 @@ static void __iommu_remove_group_pasid(struct iommu_group *group,
struct iommu_domain *domain)
{
struct group_device *device;
- const struct iommu_ops *ops;
- for_each_group_device(group, device) {
- ops = dev_iommu_ops(device->dev);
- ops->remove_dev_pasid(device->dev, pasid, domain);
- }
+ for_each_group_device(group, device)
+ iommu_remove_dev_pasid(device->dev, pasid, domain);
}
/*
@@ -3368,16 +3373,20 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
/* Caller must be a probed driver on dev */
struct iommu_group *group = dev->iommu_group;
struct group_device *device;
+ const struct iommu_ops *ops;
int ret;
- if (!domain->ops->set_dev_pasid)
- return -EOPNOTSUPP;
-
if (!group)
return -ENODEV;
- if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner ||
- pasid == IOMMU_NO_PASID)
+ ops = dev_iommu_ops(dev);
+
+ if (!domain->ops->set_dev_pasid ||
+ !ops->blocked_domain ||
+ !ops->blocked_domain->ops->set_dev_pasid)
+ return -EOPNOTSUPP;
+
+ if (ops != domain->owner || pasid == IOMMU_NO_PASID)
return -EINVAL;
mutex_lock(&group->mutex);
diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c
index 1fe804e28a86..d9a937450e55 100644
--- a/drivers/iommu/iommufd/fault.c
+++ b/drivers/iommu/iommufd/fault.c
@@ -103,15 +103,23 @@ static void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
{
struct iommufd_fault *fault = hwpt->fault;
struct iopf_group *group, *next;
+ struct list_head free_list;
unsigned long index;
if (!fault)
return;
+ INIT_LIST_HEAD(&free_list);
mutex_lock(&fault->mutex);
+ spin_lock(&fault->lock);
list_for_each_entry_safe(group, next, &fault->deliver, node) {
if (group->attach_handle != &handle->handle)
continue;
+ list_move(&group->node, &free_list);
+ }
+ spin_unlock(&fault->lock);
+
+ list_for_each_entry_safe(group, next, &free_list, node) {
list_del(&group->node);
iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
iopf_free_group(group);
@@ -213,6 +221,7 @@ void iommufd_fault_destroy(struct iommufd_object *obj)
{
struct iommufd_fault *fault = container_of(obj, struct iommufd_fault, obj);
struct iopf_group *group, *next;
+ unsigned long index;
/*
* The iommufd object's reference count is zero at this point.
@@ -225,6 +234,13 @@ void iommufd_fault_destroy(struct iommufd_object *obj)
iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
iopf_free_group(group);
}
+ xa_for_each(&fault->response, index, group) {
+ xa_erase(&fault->response, index);
+ iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
+ iopf_free_group(group);
+ }
+ xa_destroy(&fault->response);
+ mutex_destroy(&fault->mutex);
}
static void iommufd_compose_fault_message(struct iommu_fault *fault,
@@ -247,7 +263,7 @@ static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
{
size_t fault_size = sizeof(struct iommu_hwpt_pgfault);
struct iommufd_fault *fault = filep->private_data;
- struct iommu_hwpt_pgfault data;
+ struct iommu_hwpt_pgfault data = {};
struct iommufd_device *idev;
struct iopf_group *group;
struct iopf_fault *iopf;
@@ -258,17 +274,19 @@ static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
return -ESPIPE;
mutex_lock(&fault->mutex);
- while (!list_empty(&fault->deliver) && count > done) {
- group = list_first_entry(&fault->deliver,
- struct iopf_group, node);
-
- if (group->fault_count * fault_size > count - done)
+ while ((group = iommufd_fault_deliver_fetch(fault))) {
+ if (done >= count ||
+ group->fault_count * fault_size > count - done) {
+ iommufd_fault_deliver_restore(fault, group);
break;
+ }
rc = xa_alloc(&fault->response, &group->cookie, group,
xa_limit_32b, GFP_KERNEL);
- if (rc)
+ if (rc) {
+ iommufd_fault_deliver_restore(fault, group);
break;
+ }
idev = to_iommufd_handle(group->attach_handle)->idev;
list_for_each_entry(iopf, &group->faults, list) {
@@ -277,13 +295,12 @@ static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
group->cookie);
if (copy_to_user(buf + done, &data, fault_size)) {
xa_erase(&fault->response, group->cookie);
+ iommufd_fault_deliver_restore(fault, group);
rc = -EFAULT;
break;
}
done += fault_size;
}
-
- list_del(&group->node);
}
mutex_unlock(&fault->mutex);
@@ -341,10 +358,10 @@ static __poll_t iommufd_fault_fops_poll(struct file *filep,
__poll_t pollflags = EPOLLOUT;
poll_wait(filep, &fault->wait_queue, wait);
- mutex_lock(&fault->mutex);
+ spin_lock(&fault->lock);
if (!list_empty(&fault->deliver))
pollflags |= EPOLLIN | EPOLLRDNORM;
- mutex_unlock(&fault->mutex);
+ spin_unlock(&fault->lock);
return pollflags;
}
@@ -386,6 +403,7 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
INIT_LIST_HEAD(&fault->deliver);
xa_init_flags(&fault->response, XA_FLAGS_ALLOC1);
mutex_init(&fault->mutex);
+ spin_lock_init(&fault->lock);
init_waitqueue_head(&fault->wait_queue);
filep = anon_inode_getfile("[iommufd-pgfault]", &iommufd_fault_fops,
@@ -434,9 +452,9 @@ int iommufd_fault_iopf_handler(struct iopf_group *group)
hwpt = group->attach_handle->domain->fault_data;
fault = hwpt->fault;
- mutex_lock(&fault->mutex);
+ spin_lock(&fault->lock);
list_add_tail(&group->node, &fault->deliver);
- mutex_unlock(&fault->mutex);
+ spin_unlock(&fault->lock);
wake_up_interruptible(&fault->wait_queue);
diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c
index ce03c3804651..598be26a14e2 100644
--- a/drivers/iommu/iommufd/hw_pagetable.c
+++ b/drivers/iommu/iommufd/hw_pagetable.c
@@ -140,8 +140,8 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
hwpt_paging->nest_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
if (ops->domain_alloc_paging_flags) {
- hwpt->domain = ops->domain_alloc_paging_flags(idev->dev, flags,
- user_data);
+ hwpt->domain = ops->domain_alloc_paging_flags(idev->dev,
+ flags & ~IOMMU_HWPT_FAULT_ID_VALID, user_data);
if (IS_ERR(hwpt->domain)) {
rc = PTR_ERR(hwpt->domain);
hwpt->domain = NULL;
@@ -280,6 +280,8 @@ iommufd_viommu_alloc_hwpt_nested(struct iommufd_viommu *viommu, u32 flags,
struct iommufd_hw_pagetable *hwpt;
int rc;
+ if (flags & ~IOMMU_HWPT_FAULT_ID_VALID)
+ return ERR_PTR(-EOPNOTSUPP);
if (!user_data->len)
return ERR_PTR(-EOPNOTSUPP);
if (!viommu->ops || !viommu->ops->alloc_domain_nested)
@@ -296,7 +298,9 @@ iommufd_viommu_alloc_hwpt_nested(struct iommufd_viommu *viommu, u32 flags,
hwpt_nested->parent = viommu->hwpt;
hwpt->domain =
- viommu->ops->alloc_domain_nested(viommu, flags, user_data);
+ viommu->ops->alloc_domain_nested(viommu,
+ flags & ~IOMMU_HWPT_FAULT_ID_VALID,
+ user_data);
if (IS_ERR(hwpt->domain)) {
rc = PTR_ERR(hwpt->domain);
hwpt->domain = NULL;
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index b6d706cf2c66..0b1bafc7fd99 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -443,14 +443,39 @@ struct iommufd_fault {
struct iommufd_ctx *ictx;
struct file *filep;
- /* The lists of outstanding faults protected by below mutex. */
- struct mutex mutex;
+ spinlock_t lock; /* protects the deliver list */
struct list_head deliver;
+ struct mutex mutex; /* serializes response flows */
struct xarray response;
struct wait_queue_head wait_queue;
};
+/* Fetch the first node out of the fault->deliver list */
+static inline struct iopf_group *
+iommufd_fault_deliver_fetch(struct iommufd_fault *fault)
+{
+ struct list_head *list = &fault->deliver;
+ struct iopf_group *group = NULL;
+
+ spin_lock(&fault->lock);
+ if (!list_empty(list)) {
+ group = list_first_entry(list, struct iopf_group, node);
+ list_del(&group->node);
+ }
+ spin_unlock(&fault->lock);
+ return group;
+}
+
+/* Restore a node back to the head of the fault->deliver list */
+static inline void iommufd_fault_deliver_restore(struct iommufd_fault *fault,
+ struct iopf_group *group)
+{
+ spin_lock(&fault->lock);
+ list_add(&group->node, &fault->deliver);
+ spin_unlock(&fault->lock);
+}
+
struct iommufd_attach_handle {
struct iommu_attach_handle handle;
struct iommufd_device *idev;
diff --git a/drivers/iommu/iommufd/iova_bitmap.c b/drivers/iommu/iommufd/iova_bitmap.c
index ab665cf38ef4..39a86a4a1d3a 100644
--- a/drivers/iommu/iommufd/iova_bitmap.c
+++ b/drivers/iommu/iommufd/iova_bitmap.c
@@ -130,7 +130,7 @@ struct iova_bitmap {
static unsigned long iova_bitmap_offset_to_index(struct iova_bitmap *bitmap,
unsigned long iova)
{
- unsigned long pgsize = 1 << bitmap->mapped.pgshift;
+ unsigned long pgsize = 1UL << bitmap->mapped.pgshift;
return iova / (BITS_PER_TYPE(*bitmap->bitmap) * pgsize);
}
diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index 97c5e3567d33..ccf616462a1c 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -104,7 +104,7 @@ static int iommufd_object_dec_wait_shortterm(struct iommufd_ctx *ictx,
if (wait_event_timeout(ictx->destroy_wait,
refcount_read(&to_destroy->shortterm_users) ==
0,
- msecs_to_jiffies(10000)))
+ msecs_to_jiffies(60000)))
return 0;
pr_crit("Time out waiting for iommufd object to become free\n");
@@ -307,9 +307,9 @@ union ucmd_buffer {
struct iommu_ioas_map map;
struct iommu_ioas_unmap unmap;
struct iommu_option option;
+ struct iommu_vdevice_alloc vdev;
struct iommu_vfio_ioas vfio_ioas;
struct iommu_viommu_alloc viommu;
- struct iommu_vdevice_alloc vdev;
#ifdef CONFIG_IOMMUFD_TEST
struct iommu_test_cmd test;
#endif
@@ -333,8 +333,8 @@ struct iommufd_ioctl_op {
}
static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = {
IOCTL_OP(IOMMU_DESTROY, iommufd_destroy, struct iommu_destroy, id),
- IOCTL_OP(IOMMU_FAULT_QUEUE_ALLOC, iommufd_fault_alloc, struct iommu_fault_alloc,
- out_fault_fd),
+ IOCTL_OP(IOMMU_FAULT_QUEUE_ALLOC, iommufd_fault_alloc,
+ struct iommu_fault_alloc, out_fault_fd),
IOCTL_OP(IOMMU_GET_HW_INFO, iommufd_get_hw_info, struct iommu_hw_info,
__reserved),
IOCTL_OP(IOMMU_HWPT_ALLOC, iommufd_hwpt_alloc, struct iommu_hwpt_alloc,
@@ -355,20 +355,18 @@ static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = {
src_iova),
IOCTL_OP(IOMMU_IOAS_IOVA_RANGES, iommufd_ioas_iova_ranges,
struct iommu_ioas_iova_ranges, out_iova_alignment),
- IOCTL_OP(IOMMU_IOAS_MAP, iommufd_ioas_map, struct iommu_ioas_map,
- iova),
+ IOCTL_OP(IOMMU_IOAS_MAP, iommufd_ioas_map, struct iommu_ioas_map, iova),
IOCTL_OP(IOMMU_IOAS_MAP_FILE, iommufd_ioas_map_file,
struct iommu_ioas_map_file, iova),
IOCTL_OP(IOMMU_IOAS_UNMAP, iommufd_ioas_unmap, struct iommu_ioas_unmap,
length),
- IOCTL_OP(IOMMU_OPTION, iommufd_option, struct iommu_option,
- val64),
+ IOCTL_OP(IOMMU_OPTION, iommufd_option, struct iommu_option, val64),
+ IOCTL_OP(IOMMU_VDEVICE_ALLOC, iommufd_vdevice_alloc_ioctl,
+ struct iommu_vdevice_alloc, virt_id),
IOCTL_OP(IOMMU_VFIO_IOAS, iommufd_vfio_ioas, struct iommu_vfio_ioas,
__reserved),
IOCTL_OP(IOMMU_VIOMMU_ALLOC, iommufd_viommu_alloc_ioctl,
struct iommu_viommu_alloc, out_viommu_id),
- IOCTL_OP(IOMMU_VDEVICE_ALLOC, iommufd_vdevice_alloc_ioctl,
- struct iommu_vdevice_alloc, virt_id),
#ifdef CONFIG_IOMMUFD_TEST
IOCTL_OP(IOMMU_TEST_CMD, iommufd_test, struct iommu_test_cmd, last),
#endif
@@ -490,8 +488,8 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
[IOMMUFD_OBJ_DEVICE] = {
.destroy = iommufd_device_destroy,
},
- [IOMMUFD_OBJ_IOAS] = {
- .destroy = iommufd_ioas_destroy,
+ [IOMMUFD_OBJ_FAULT] = {
+ .destroy = iommufd_fault_destroy,
},
[IOMMUFD_OBJ_HWPT_PAGING] = {
.destroy = iommufd_hwpt_paging_destroy,
@@ -501,15 +499,15 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
.destroy = iommufd_hwpt_nested_destroy,
.abort = iommufd_hwpt_nested_abort,
},
- [IOMMUFD_OBJ_FAULT] = {
- .destroy = iommufd_fault_destroy,
- },
- [IOMMUFD_OBJ_VIOMMU] = {
- .destroy = iommufd_viommu_destroy,
+ [IOMMUFD_OBJ_IOAS] = {
+ .destroy = iommufd_ioas_destroy,
},
[IOMMUFD_OBJ_VDEVICE] = {
.destroy = iommufd_vdevice_destroy,
},
+ [IOMMUFD_OBJ_VIOMMU] = {
+ .destroy = iommufd_viommu_destroy,
+ },
#ifdef CONFIG_IOMMUFD_TEST
[IOMMUFD_OBJ_SELFTEST] = {
.destroy = iommufd_selftest_destroy,
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index a0de6d6d4e68..d40deb0a4f06 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -311,25 +311,6 @@ static const struct iommu_dirty_ops dirty_ops = {
.read_and_clear_dirty = mock_domain_read_and_clear_dirty,
};
-static struct iommu_domain *mock_domain_alloc_paging(struct device *dev)
-{
- struct mock_dev *mdev = to_mock_dev(dev);
- struct mock_iommu_domain *mock;
-
- mock = kzalloc(sizeof(*mock), GFP_KERNEL);
- if (!mock)
- return NULL;
- mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
- mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
- mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE;
- if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA)
- mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE;
- mock->domain.ops = mock_ops.default_domain_ops;
- mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
- xa_init(&mock->pfns);
- return &mock->domain;
-}
-
static struct mock_iommu_domain_nested *
__mock_domain_alloc_nested(const struct iommu_user_data *user_data)
{
@@ -385,21 +366,30 @@ mock_domain_alloc_paging_flags(struct device *dev, u32 flags,
bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
IOMMU_HWPT_ALLOC_NEST_PARENT;
- bool no_dirty_ops = to_mock_dev(dev)->flags &
- MOCK_FLAGS_DEVICE_NO_DIRTY;
- struct iommu_domain *domain;
+ struct mock_dev *mdev = to_mock_dev(dev);
+ bool no_dirty_ops = mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY;
+ struct mock_iommu_domain *mock;
if (user_data)
return ERR_PTR(-EOPNOTSUPP);
if ((flags & ~PAGING_FLAGS) || (has_dirty_flag && no_dirty_ops))
return ERR_PTR(-EOPNOTSUPP);
- domain = mock_domain_alloc_paging(dev);
- if (!domain)
+ mock = kzalloc(sizeof(*mock), GFP_KERNEL);
+ if (!mock)
return ERR_PTR(-ENOMEM);
+ mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
+ mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
+ mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE;
+ if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA)
+ mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE;
+ mock->domain.ops = mock_ops.default_domain_ops;
+ mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
+ xa_init(&mock->pfns);
+
if (has_dirty_flag)
- domain->dirty_ops = &dirty_ops;
- return domain;
+ mock->domain.dirty_ops = &dirty_ops;
+ return &mock->domain;
}
static void mock_domain_free(struct iommu_domain *domain)
@@ -595,7 +585,7 @@ mock_viommu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
struct mock_iommu_domain_nested *mock_nested;
- if (flags & ~IOMMU_HWPT_FAULT_ID_VALID)
+ if (flags)
return ERR_PTR(-EOPNOTSUPP);
mock_nested = __mock_domain_alloc_nested(user_data);
@@ -713,7 +703,6 @@ static const struct iommu_ops mock_ops = {
.owner = THIS_MODULE,
.pgsize_bitmap = MOCK_IO_PAGE_SIZE,
.hw_info = mock_domain_hw_info,
- .domain_alloc_paging = mock_domain_alloc_paging,
.domain_alloc_paging_flags = mock_domain_alloc_paging_flags,
.domain_alloc_nested = mock_domain_alloc_nested,
.capable = mock_domain_capable,
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index ce40f0a419ea..2769e4544038 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -725,47 +725,32 @@ static int msm_iommu_probe(struct platform_device *pdev)
iommu->dev = &pdev->dev;
INIT_LIST_HEAD(&iommu->ctx_list);
- iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
+ iommu->pclk = devm_clk_get_prepared(iommu->dev, "smmu_pclk");
if (IS_ERR(iommu->pclk))
return dev_err_probe(iommu->dev, PTR_ERR(iommu->pclk),
"could not get smmu_pclk\n");
- ret = clk_prepare(iommu->pclk);
- if (ret)
- return dev_err_probe(iommu->dev, ret,
- "could not prepare smmu_pclk\n");
-
- iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
- if (IS_ERR(iommu->clk)) {
- clk_unprepare(iommu->pclk);
+ iommu->clk = devm_clk_get_prepared(iommu->dev, "iommu_clk");
+ if (IS_ERR(iommu->clk))
return dev_err_probe(iommu->dev, PTR_ERR(iommu->clk),
"could not get iommu_clk\n");
- }
-
- ret = clk_prepare(iommu->clk);
- if (ret) {
- clk_unprepare(iommu->pclk);
- return dev_err_probe(iommu->dev, ret, "could not prepare iommu_clk\n");
- }
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
iommu->base = devm_ioremap_resource(iommu->dev, r);
if (IS_ERR(iommu->base)) {
ret = dev_err_probe(iommu->dev, PTR_ERR(iommu->base), "could not get iommu base\n");
- goto fail;
+ return ret;
}
ioaddr = r->start;
iommu->irq = platform_get_irq(pdev, 0);
- if (iommu->irq < 0) {
- ret = -ENODEV;
- goto fail;
- }
+ if (iommu->irq < 0)
+ return -ENODEV;
ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
if (ret) {
dev_err(iommu->dev, "could not get ncb\n");
- goto fail;
+ return ret;
}
iommu->ncb = val;
@@ -780,8 +765,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
if (!par) {
pr_err("Invalid PAR value detected\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
@@ -791,7 +775,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
iommu);
if (ret) {
pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
- goto fail;
+ return ret;
}
list_add(&iommu->dev_node, &qcom_iommu_devices);
@@ -800,23 +784,19 @@ static int msm_iommu_probe(struct platform_device *pdev)
"msm-smmu.%pa", &ioaddr);
if (ret) {
pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
- goto fail;
+ return ret;
}
ret = iommu_device_register(&iommu->iommu, &msm_iommu_ops, &pdev->dev);
if (ret) {
pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
- goto fail;
+ return ret;
}
pr_info("device mapped at %p, irq %d with %d ctx banks\n",
iommu->base, iommu->irq, iommu->ncb);
return ret;
-fail:
- clk_unprepare(iommu->clk);
- clk_unprepare(iommu->pclk);
- return ret;
}
static const struct of_device_id msm_iommu_dt_match[] = {
@@ -824,20 +804,11 @@ static const struct of_device_id msm_iommu_dt_match[] = {
{}
};
-static void msm_iommu_remove(struct platform_device *pdev)
-{
- struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
-
- clk_unprepare(iommu->clk);
- clk_unprepare(iommu->pclk);
-}
-
static struct platform_driver msm_iommu_driver = {
.driver = {
.name = "msm_iommu",
.of_match_table = msm_iommu_dt_match,
},
.probe = msm_iommu_probe,
- .remove = msm_iommu_remove,
};
builtin_platform_driver(msm_iommu_driver);
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index ab60901f8f92..034b0e670384 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -29,6 +29,7 @@
#include <linux/spinlock.h>
#include <linux/soc/mediatek/infracfg.h>
#include <linux/soc/mediatek/mtk_sip_svc.h>
+#include <linux/string_choices.h>
#include <asm/barrier.h>
#include <soc/mediatek/smi.h>
@@ -510,7 +511,7 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
bank->parent_dev,
"fault type=0x%x iova=0x%llx pa=0x%llx master=0x%x(larb=%d port=%d) layer=%d %s\n",
int_state, fault_iova, fault_pa, regval, fault_larb, fault_port,
- layer, write ? "write" : "read");
+ layer, str_write_read(write));
}
/* Interrupt clear */
@@ -602,7 +603,7 @@ static int mtk_iommu_config(struct mtk_iommu_data *data, struct device *dev,
larb_mmu->bank[portid] = upper_32_bits(region->iova_base);
dev_dbg(dev, "%s iommu for larb(%s) port 0x%lx region %d rgn-bank %d.\n",
- enable ? "enable" : "disable", dev_name(larb_mmu->dev),
+ str_enable_disable(enable), dev_name(larb_mmu->dev),
portid_msk, regionid, upper_32_bits(region->iova_base));
if (enable)
@@ -630,8 +631,8 @@ static int mtk_iommu_config(struct mtk_iommu_data *data, struct device *dev,
}
if (ret)
dev_err(dev, "%s iommu(%s) inframaster 0x%lx fail(%d).\n",
- enable ? "enable" : "disable",
- dev_name(data->dev), portid_msk, ret);
+ str_enable_disable(enable), dev_name(data->dev),
+ portid_msk, ret);
}
return ret;
}
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index b6de1ca00cef..a565b9e40f4a 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -25,6 +25,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <asm/barrier.h>
#include <asm/dma-iommu.h>
#include <dt-bindings/memory/mtk-memory-port.h>
@@ -243,7 +244,7 @@ static void mtk_iommu_v1_config(struct mtk_iommu_v1_data *data,
larb_mmu = &data->larb_imu[larbid];
dev_dbg(dev, "%s iommu port: %d\n",
- enable ? "enable" : "disable", portid);
+ str_enable_disable(enable), portid);
if (enable)
larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index e7a6a1611d19..97987cd78da9 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -29,8 +29,6 @@ static int of_iommu_xlate(struct device *dev,
return -ENODEV;
ret = iommu_fwspec_init(dev, of_fwnode_handle(iommu_spec->np));
- if (ret == -EPROBE_DEFER)
- return driver_deferred_probe_check_state(dev);
if (ret)
return ret;
diff --git a/drivers/iommu/riscv/iommu-pci.c b/drivers/iommu/riscv/iommu-pci.c
index c7a89143014c..d82d2b00904c 100644
--- a/drivers/iommu/riscv/iommu-pci.c
+++ b/drivers/iommu/riscv/iommu-pci.c
@@ -101,6 +101,13 @@ static void riscv_iommu_pci_remove(struct pci_dev *pdev)
riscv_iommu_remove(iommu);
}
+static void riscv_iommu_pci_shutdown(struct pci_dev *pdev)
+{
+ struct riscv_iommu_device *iommu = dev_get_drvdata(&pdev->dev);
+
+ riscv_iommu_disable(iommu);
+}
+
static const struct pci_device_id riscv_iommu_pci_tbl[] = {
{PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_RISCV_IOMMU), 0},
{PCI_VDEVICE(RIVOS, PCI_DEVICE_ID_RIVOS_RISCV_IOMMU_GA), 0},
@@ -112,6 +119,7 @@ static struct pci_driver riscv_iommu_pci_driver = {
.id_table = riscv_iommu_pci_tbl,
.probe = riscv_iommu_pci_probe,
.remove = riscv_iommu_pci_remove,
+ .shutdown = riscv_iommu_pci_shutdown,
.driver = {
.suppress_bind_attrs = true,
},
diff --git a/drivers/iommu/riscv/iommu-platform.c b/drivers/iommu/riscv/iommu-platform.c
index 382ba2841849..725e919b97ef 100644
--- a/drivers/iommu/riscv/iommu-platform.c
+++ b/drivers/iommu/riscv/iommu-platform.c
@@ -11,18 +11,43 @@
*/
#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include "iommu-bits.h"
#include "iommu.h"
+static void riscv_iommu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct device *dev = msi_desc_to_dev(desc);
+ struct riscv_iommu_device *iommu = dev_get_drvdata(dev);
+ u16 idx = desc->msi_index;
+ u64 addr;
+
+ addr = ((u64)msg->address_hi << 32) | msg->address_lo;
+
+ if (addr != (addr & RISCV_IOMMU_MSI_CFG_TBL_ADDR)) {
+ dev_err_once(dev,
+ "uh oh, the IOMMU can't send MSIs to 0x%llx, sending to 0x%llx instead\n",
+ addr, addr & RISCV_IOMMU_MSI_CFG_TBL_ADDR);
+ }
+
+ addr &= RISCV_IOMMU_MSI_CFG_TBL_ADDR;
+
+ riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_MSI_CFG_TBL_ADDR(idx), addr);
+ riscv_iommu_writel(iommu, RISCV_IOMMU_REG_MSI_CFG_TBL_DATA(idx), msg->data);
+ riscv_iommu_writel(iommu, RISCV_IOMMU_REG_MSI_CFG_TBL_CTRL(idx), 0);
+}
+
static int riscv_iommu_platform_probe(struct platform_device *pdev)
{
+ enum riscv_iommu_igs_settings igs;
struct device *dev = &pdev->dev;
struct riscv_iommu_device *iommu = NULL;
struct resource *res = NULL;
- int vec;
+ int vec, ret;
iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
if (!iommu)
@@ -40,16 +65,6 @@ static int riscv_iommu_platform_probe(struct platform_device *pdev)
iommu->caps = riscv_iommu_readq(iommu, RISCV_IOMMU_REG_CAPABILITIES);
iommu->fctl = riscv_iommu_readl(iommu, RISCV_IOMMU_REG_FCTL);
- /* For now we only support WSI */
- switch (FIELD_GET(RISCV_IOMMU_CAPABILITIES_IGS, iommu->caps)) {
- case RISCV_IOMMU_CAPABILITIES_IGS_WSI:
- case RISCV_IOMMU_CAPABILITIES_IGS_BOTH:
- break;
- default:
- return dev_err_probe(dev, -ENODEV,
- "unable to use wire-signaled interrupts\n");
- }
-
iommu->irqs_count = platform_irq_count(pdev);
if (iommu->irqs_count <= 0)
return dev_err_probe(dev, -ENODEV,
@@ -57,13 +72,58 @@ static int riscv_iommu_platform_probe(struct platform_device *pdev)
if (iommu->irqs_count > RISCV_IOMMU_INTR_COUNT)
iommu->irqs_count = RISCV_IOMMU_INTR_COUNT;
- for (vec = 0; vec < iommu->irqs_count; vec++)
- iommu->irqs[vec] = platform_get_irq(pdev, vec);
+ igs = FIELD_GET(RISCV_IOMMU_CAPABILITIES_IGS, iommu->caps);
+ switch (igs) {
+ case RISCV_IOMMU_CAPABILITIES_IGS_BOTH:
+ case RISCV_IOMMU_CAPABILITIES_IGS_MSI:
+ if (is_of_node(dev->fwnode))
+ of_msi_configure(dev, to_of_node(dev->fwnode));
+
+ if (!dev_get_msi_domain(dev)) {
+ dev_warn(dev, "failed to find an MSI domain\n");
+ goto msi_fail;
+ }
+
+ ret = platform_device_msi_init_and_alloc_irqs(dev, iommu->irqs_count,
+ riscv_iommu_write_msi_msg);
+ if (ret) {
+ dev_warn(dev, "failed to allocate MSIs\n");
+ goto msi_fail;
+ }
+
+ for (vec = 0; vec < iommu->irqs_count; vec++)
+ iommu->irqs[vec] = msi_get_virq(dev, vec);
+
+ /* Enable message-signaled interrupts, fctl.WSI */
+ if (iommu->fctl & RISCV_IOMMU_FCTL_WSI) {
+ iommu->fctl ^= RISCV_IOMMU_FCTL_WSI;
+ riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FCTL, iommu->fctl);
+ }
+
+ dev_info(dev, "using MSIs\n");
+ break;
+
+msi_fail:
+ if (igs != RISCV_IOMMU_CAPABILITIES_IGS_BOTH) {
+ return dev_err_probe(dev, -ENODEV,
+ "unable to use wire-signaled interrupts\n");
+ }
- /* Enable wire-signaled interrupts, fctl.WSI */
- if (!(iommu->fctl & RISCV_IOMMU_FCTL_WSI)) {
- iommu->fctl |= RISCV_IOMMU_FCTL_WSI;
- riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FCTL, iommu->fctl);
+ fallthrough;
+
+ case RISCV_IOMMU_CAPABILITIES_IGS_WSI:
+ for (vec = 0; vec < iommu->irqs_count; vec++)
+ iommu->irqs[vec] = platform_get_irq(pdev, vec);
+
+ /* Enable wire-signaled interrupts, fctl.WSI */
+ if (!(iommu->fctl & RISCV_IOMMU_FCTL_WSI)) {
+ iommu->fctl |= RISCV_IOMMU_FCTL_WSI;
+ riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FCTL, iommu->fctl);
+ }
+ dev_info(dev, "using wire-signaled interrupts\n");
+ break;
+ default:
+ return dev_err_probe(dev, -ENODEV, "invalid IGS\n");
}
return riscv_iommu_init(iommu);
@@ -71,7 +131,18 @@ static int riscv_iommu_platform_probe(struct platform_device *pdev)
static void riscv_iommu_platform_remove(struct platform_device *pdev)
{
- riscv_iommu_remove(dev_get_drvdata(&pdev->dev));
+ struct riscv_iommu_device *iommu = dev_get_drvdata(&pdev->dev);
+ bool msi = !(iommu->fctl & RISCV_IOMMU_FCTL_WSI);
+
+ riscv_iommu_remove(iommu);
+
+ if (msi)
+ platform_device_msi_free_irqs_all(&pdev->dev);
+};
+
+static void riscv_iommu_platform_shutdown(struct platform_device *pdev)
+{
+ riscv_iommu_disable(dev_get_drvdata(&pdev->dev));
};
static const struct of_device_id riscv_iommu_of_match[] = {
@@ -82,6 +153,7 @@ static const struct of_device_id riscv_iommu_of_match[] = {
static struct platform_driver riscv_iommu_platform_driver = {
.probe = riscv_iommu_platform_probe,
.remove = riscv_iommu_platform_remove,
+ .shutdown = riscv_iommu_platform_shutdown,
.driver = {
.name = "riscv,iommu",
.of_match_table = riscv_iommu_of_match,
diff --git a/drivers/iommu/riscv/iommu.c b/drivers/iommu/riscv/iommu.c
index 8a05def774bd..8f049d4a0e2c 100644
--- a/drivers/iommu/riscv/iommu.c
+++ b/drivers/iommu/riscv/iommu.c
@@ -240,6 +240,12 @@ static int riscv_iommu_queue_enable(struct riscv_iommu_device *iommu,
return rc;
}
+ /* Empty queue before enabling it */
+ if (queue->qid == RISCV_IOMMU_INTR_CQ)
+ riscv_iommu_writel(queue->iommu, Q_TAIL(queue), 0);
+ else
+ riscv_iommu_writel(queue->iommu, Q_HEAD(queue), 0);
+
/*
* Enable queue with interrupts, clear any memory fault if any.
* Wait for the hardware to acknowledge request and activate queue
@@ -645,9 +651,11 @@ static struct riscv_iommu_dc *riscv_iommu_get_dc(struct riscv_iommu_device *iomm
* This is best effort IOMMU translation shutdown flow.
* Disable IOMMU without waiting for hardware response.
*/
-static void riscv_iommu_disable(struct riscv_iommu_device *iommu)
+void riscv_iommu_disable(struct riscv_iommu_device *iommu)
{
- riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_DDTP, 0);
+ riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_DDTP,
+ FIELD_PREP(RISCV_IOMMU_DDTP_IOMMU_MODE,
+ RISCV_IOMMU_DDTP_IOMMU_MODE_BARE));
riscv_iommu_writel(iommu, RISCV_IOMMU_REG_CQCSR, 0);
riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FQCSR, 0);
riscv_iommu_writel(iommu, RISCV_IOMMU_REG_PQCSR, 0);
@@ -1270,7 +1278,7 @@ static phys_addr_t riscv_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
dma_addr_t iova)
{
struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain);
- unsigned long pte_size;
+ size_t pte_size;
unsigned long *ptr;
ptr = riscv_iommu_pte_fetch(domain, iova, &pte_size);
diff --git a/drivers/iommu/riscv/iommu.h b/drivers/iommu/riscv/iommu.h
index b1c4664542b4..46df79dd5495 100644
--- a/drivers/iommu/riscv/iommu.h
+++ b/drivers/iommu/riscv/iommu.h
@@ -64,6 +64,7 @@ struct riscv_iommu_device {
int riscv_iommu_init(struct riscv_iommu_device *iommu);
void riscv_iommu_remove(struct riscv_iommu_device *iommu);
+void riscv_iommu_disable(struct riscv_iommu_device *iommu);
#define riscv_iommu_readl(iommu, addr) \
readl_relaxed((iommu)->reg + (addr))
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 4b369419b32c..323cc665c357 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -25,6 +25,7 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include "iommu-pages.h"
@@ -611,7 +612,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
dev_err(iommu->dev, "Page fault at %pad of type %s\n",
&iova,
- (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
+ str_write_read(flags == IOMMU_FAULT_WRITE));
log_iova(iommu, i, iova);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index be063bfb50c4..c11b9965c4ad 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -169,6 +169,7 @@ config IXP4XX_IRQ
config LAN966X_OIC
tristate "Microchip LAN966x OIC Support"
+ depends on MCHP_LAN966X_PCI || COMPILE_TEST
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
help
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index da5250f0155c..2b1684c60e3c 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -577,7 +577,8 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
AIC_FIQ_HWIRQ(AIC_TMR_EL02_VIRT));
}
- if (read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & PMCR0_IACT) {
+ if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) ==
+ (FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_FIQ) | PMCR0_IACT)) {
int irq;
if (cpumask_test_cpu(smp_processor_id(),
&aic_irqc->fiq_aff[AIC_CPU_PMU_P]->aff))
diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
index b337f6c05f18..4eebed39880a 100644
--- a/drivers/irqchip/irq-mvebu-icu.c
+++ b/drivers/irqchip/irq-mvebu-icu.c
@@ -68,7 +68,8 @@ static int mvebu_icu_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
unsigned long *hwirq, unsigned int *type)
{
unsigned int param_count = static_branch_unlikely(&legacy_bindings) ? 3 : 2;
- struct mvebu_icu_msi_data *msi_data = d->host_data;
+ struct msi_domain_info *info = d->host_data;
+ struct mvebu_icu_msi_data *msi_data = info->chip_data;
struct mvebu_icu *icu = msi_data->icu;
/* Check the count of the parameters in dt */
diff --git a/drivers/irqchip/irq-partition-percpu.c b/drivers/irqchip/irq-partition-percpu.c
index 8e76d2913e6b..4441ffe149ea 100644
--- a/drivers/irqchip/irq-partition-percpu.c
+++ b/drivers/irqchip/irq-partition-percpu.c
@@ -98,7 +98,7 @@ static void partition_irq_print_chip(struct irq_data *d, struct seq_file *p)
struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
- seq_printf(p, " %5s-%lu", chip->name, data->hwirq);
+ seq_printf(p, "%5s-%lu", chip->name, data->hwirq);
}
static struct irq_chip partition_irq_chip = {
diff --git a/drivers/irqchip/irq-riscv-imsic-early.c b/drivers/irqchip/irq-riscv-imsic-early.c
index c5c2e6929a2f..275df5005705 100644
--- a/drivers/irqchip/irq-riscv-imsic-early.c
+++ b/drivers/irqchip/irq-riscv-imsic-early.c
@@ -27,7 +27,7 @@ static void imsic_ipi_send(unsigned int cpu)
{
struct imsic_local_config *local = per_cpu_ptr(imsic->global.local, cpu);
- writel_relaxed(IMSIC_IPI_ID, local->msi_va);
+ writel(IMSIC_IPI_ID, local->msi_va);
}
static void imsic_ipi_starting_cpu(void)
diff --git a/drivers/irqchip/irq-thead-c900-aclint-sswi.c b/drivers/irqchip/irq-thead-c900-aclint-sswi.c
index b0e366ade427..8ff6e7a1363b 100644
--- a/drivers/irqchip/irq-thead-c900-aclint-sswi.c
+++ b/drivers/irqchip/irq-thead-c900-aclint-sswi.c
@@ -31,7 +31,7 @@ static DEFINE_PER_CPU(void __iomem *, sswi_cpu_regs);
static void thead_aclint_sswi_ipi_send(unsigned int cpu)
{
- writel_relaxed(0x1, per_cpu(sswi_cpu_regs, cpu));
+ writel(0x1, per_cpu(sswi_cpu_regs, cpu));
}
static void thead_aclint_sswi_ipi_clear(void)
diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
index 7d3b24c8ecae..4fe1a9c0bc1b 100644
--- a/drivers/leds/leds-turris-omnia.c
+++ b/drivers/leds/leds-turris-omnia.c
@@ -438,7 +438,7 @@ static int omnia_mcu_get_features(const struct i2c_client *mcu_client)
return reply;
}
-static int omnia_match_mcu_client(struct device *dev, void *data)
+static int omnia_match_mcu_client(struct device *dev, const void *data)
{
struct i2c_client *client;
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index a01bc5090cdf..a1534cc6c641 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -492,11 +492,7 @@ int __init smu_init (void)
goto fail_np;
}
- smu = memblock_alloc(sizeof(struct smu_device), SMP_CACHE_BYTES);
- if (!smu)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(struct smu_device));
-
+ smu = memblock_alloc_or_panic(sizeof(struct smu_device), SMP_CACHE_BYTES);
spin_lock_init(&smu->lock);
INIT_LIST_HEAD(&smu->cmd_list);
INIT_LIST_HEAD(&smu->cmd_i2c_list);
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 8ecba7fb999e..ed52db272f4d 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -36,6 +36,17 @@ config ARM_MHU_V3
that provides different means of transports: supported extensions
will be discovered and possibly managed at probe-time.
+config EXYNOS_MBOX
+ tristate "Exynos Mailbox"
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ help
+ Say Y here if you want to build the Samsung Exynos Mailbox controller
+ driver. The controller has 16 flag bits for hardware interrupt
+ generation and a shared register for passing mailbox messages.
+ When the controller is used by the ACPM interface the shared register
+ is ignored and the mailbox controller acts as a doorbell that raises
+ the interrupt to the ACPM firmware.
+
config IMX_MBOX
tristate "i.MX Mailbox"
depends on ARCH_MXC || COMPILE_TEST
@@ -178,6 +189,19 @@ config POLARFIRE_SOC_MAILBOX
If unsure, say N.
+config MCHP_SBI_IPC_MBOX
+ tristate "Microchip Inter-processor Communication (IPC) SBI driver"
+ depends on RISCV_SBI || COMPILE_TEST
+ depends on ARCH_MICROCHIP
+ help
+ Mailbox implementation for Microchip devices with an
+ Inter-process communication (IPC) controller.
+
+ To compile this driver as a module, choose M here. the
+ module will be called mailbox-mchp-ipc-sbi.
+
+ If unsure, say N.
+
config QCOM_APCS_IPC
tristate "Qualcomm APCS IPC driver"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 5f4f5b0ce2cc..9a1542b55539 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -11,6 +11,8 @@ obj-$(CONFIG_ARM_MHU_V2) += arm_mhuv2.o
obj-$(CONFIG_ARM_MHU_V3) += arm_mhuv3.o
+obj-$(CONFIG_EXYNOS_MBOX) += exynos-mailbox.o
+
obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o
obj-$(CONFIG_ARMADA_37XX_RWTM_MBOX) += armada-37xx-rwtm-mailbox.o
@@ -45,6 +47,8 @@ obj-$(CONFIG_BCM_FLEXRM_MBOX) += bcm-flexrm-mailbox.o
obj-$(CONFIG_POLARFIRE_SOC_MAILBOX) += mailbox-mpfs.o
+obj-$(CONFIG_MCHP_SBI_IPC_MBOX) += mailbox-mchp-ipc-sbi.o
+
obj-$(CONFIG_QCOM_APCS_IPC) += qcom-apcs-ipc-mailbox.o
obj-$(CONFIG_TEGRA_HSP_MBOX) += tegra-hsp.o
diff --git a/drivers/mailbox/exynos-mailbox.c b/drivers/mailbox/exynos-mailbox.c
new file mode 100644
index 000000000000..20049f0ec5ff
--- /dev/null
+++ b/drivers/mailbox/exynos-mailbox.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/exynos-message.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define EXYNOS_MBOX_MCUCTRL 0x0 /* Mailbox Control Register */
+#define EXYNOS_MBOX_INTCR0 0x24 /* Interrupt Clear Register 0 */
+#define EXYNOS_MBOX_INTMR0 0x28 /* Interrupt Mask Register 0 */
+#define EXYNOS_MBOX_INTSR0 0x2c /* Interrupt Status Register 0 */
+#define EXYNOS_MBOX_INTMSR0 0x30 /* Interrupt Mask Status Register 0 */
+#define EXYNOS_MBOX_INTGR1 0x40 /* Interrupt Generation Register 1 */
+#define EXYNOS_MBOX_INTMR1 0x48 /* Interrupt Mask Register 1 */
+#define EXYNOS_MBOX_INTSR1 0x4c /* Interrupt Status Register 1 */
+#define EXYNOS_MBOX_INTMSR1 0x50 /* Interrupt Mask Status Register 1 */
+
+#define EXYNOS_MBOX_INTMR0_MASK GENMASK(15, 0)
+#define EXYNOS_MBOX_INTGR1_MASK GENMASK(15, 0)
+
+#define EXYNOS_MBOX_CHAN_COUNT HWEIGHT32(EXYNOS_MBOX_INTGR1_MASK)
+
+/**
+ * struct exynos_mbox - driver's private data.
+ * @regs: mailbox registers base address.
+ * @mbox: pointer to the mailbox controller.
+ * @pclk: pointer to the mailbox peripheral clock.
+ */
+struct exynos_mbox {
+ void __iomem *regs;
+ struct mbox_controller *mbox;
+ struct clk *pclk;
+};
+
+static int exynos_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct device *dev = chan->mbox->dev;
+ struct exynos_mbox *exynos_mbox = dev_get_drvdata(dev);
+ struct exynos_mbox_msg *msg = data;
+
+ if (msg->chan_id >= exynos_mbox->mbox->num_chans) {
+ dev_err(dev, "Invalid channel ID %d\n", msg->chan_id);
+ return -EINVAL;
+ }
+
+ if (msg->chan_type != EXYNOS_MBOX_CHAN_TYPE_DOORBELL) {
+ dev_err(dev, "Unsupported channel type [%d]\n", msg->chan_type);
+ return -EINVAL;
+ };
+
+ writel(BIT(msg->chan_id), exynos_mbox->regs + EXYNOS_MBOX_INTGR1);
+
+ return 0;
+}
+
+static const struct mbox_chan_ops exynos_mbox_chan_ops = {
+ .send_data = exynos_mbox_send_data,
+};
+
+static struct mbox_chan *exynos_mbox_of_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ int i;
+
+ if (sp->args_count != 0)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Return the first available channel. When we don't pass the
+ * channel ID from device tree, each channel populated by the driver is
+ * just a software construct or a virtual channel. We use 'void *data'
+ * in send_data() to pass the channel identifiers.
+ */
+ for (i = 0; i < mbox->num_chans; i++)
+ if (mbox->chans[i].cl == NULL)
+ return &mbox->chans[i];
+ return ERR_PTR(-EINVAL);
+}
+
+static const struct of_device_id exynos_mbox_match[] = {
+ { .compatible = "google,gs101-mbox" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_mbox_match);
+
+static int exynos_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct exynos_mbox *exynos_mbox;
+ struct mbox_controller *mbox;
+ struct mbox_chan *chans;
+ int i;
+
+ exynos_mbox = devm_kzalloc(dev, sizeof(*exynos_mbox), GFP_KERNEL);
+ if (!exynos_mbox)
+ return -ENOMEM;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ chans = devm_kcalloc(dev, EXYNOS_MBOX_CHAN_COUNT, sizeof(*chans),
+ GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ exynos_mbox->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(exynos_mbox->regs))
+ return PTR_ERR(exynos_mbox->regs);
+
+ exynos_mbox->pclk = devm_clk_get_enabled(dev, "pclk");
+ if (IS_ERR(exynos_mbox->pclk))
+ return dev_err_probe(dev, PTR_ERR(exynos_mbox->pclk),
+ "Failed to enable clock.\n");
+
+ mbox->num_chans = EXYNOS_MBOX_CHAN_COUNT;
+ mbox->chans = chans;
+ mbox->dev = dev;
+ mbox->ops = &exynos_mbox_chan_ops;
+ mbox->of_xlate = exynos_mbox_of_xlate;
+
+ for (i = 0; i < EXYNOS_MBOX_CHAN_COUNT; i++)
+ chans[i].mbox = mbox;
+
+ exynos_mbox->mbox = mbox;
+
+ platform_set_drvdata(pdev, exynos_mbox);
+
+ /* Mask out all interrupts. We support just polling channels for now. */
+ writel(EXYNOS_MBOX_INTMR0_MASK, exynos_mbox->regs + EXYNOS_MBOX_INTMR0);
+
+ return devm_mbox_controller_register(dev, mbox);
+}
+
+static struct platform_driver exynos_mbox_driver = {
+ .probe = exynos_mbox_probe,
+ .driver = {
+ .name = "exynos-acpm-mbox",
+ .of_match_table = exynos_mbox_match,
+ },
+};
+module_platform_driver(exynos_mbox_driver);
+
+MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@linaro.org>");
+MODULE_DESCRIPTION("Samsung Exynos mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/mailbox-mchp-ipc-sbi.c b/drivers/mailbox/mailbox-mchp-ipc-sbi.c
new file mode 100644
index 000000000000..a6e52009a424
--- /dev/null
+++ b/drivers/mailbox/mailbox-mchp-ipc-sbi.c
@@ -0,0 +1,504 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip Inter-Processor communication (IPC) driver
+ *
+ * Copyright (c) 2021 - 2024 Microchip Technology Inc. All rights reserved.
+ *
+ * Author: Valentina Fernandez <valentina.fernandezalanis@microchip.com>
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of_device.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/mailbox/mchp-ipc.h>
+#include <asm/sbi.h>
+#include <asm/vendorid_list.h>
+
+#define IRQ_STATUS_BITS 12
+#define NUM_CHANS_PER_CLUSTER 5
+#define IPC_DMA_BIT_MASK 32
+#define SBI_EXT_MICROCHIP_TECHNOLOGY (SBI_EXT_VENDOR_START | \
+ MICROCHIP_VENDOR_ID)
+
+enum {
+ SBI_EXT_IPC_PROBE = 0x100,
+ SBI_EXT_IPC_CH_INIT,
+ SBI_EXT_IPC_SEND,
+ SBI_EXT_IPC_RECEIVE,
+ SBI_EXT_IPC_STATUS,
+};
+
+enum ipc_hw {
+ MIV_IHC,
+};
+
+/**
+ * struct mchp_ipc_mbox_info - IPC probe message format
+ *
+ * @hw_type: IPC implementation available in the hardware
+ * @num_channels: number of IPC channels available in the hardware
+ *
+ * Used to retrieve information on the IPC implementation
+ * using the SBI_EXT_IPC_PROBE SBI function id.
+ */
+struct mchp_ipc_mbox_info {
+ enum ipc_hw hw_type;
+ u8 num_channels;
+};
+
+/**
+ * struct mchp_ipc_init - IPC channel init message format
+ *
+ * @max_msg_size: maxmimum message size in bytes of a given channel
+ *
+ * struct used by the SBI_EXT_IPC_CH_INIT SBI function id to get
+ * the max message size in bytes of the initialized channel.
+ */
+struct mchp_ipc_init {
+ u16 max_msg_size;
+};
+
+/**
+ * struct mchp_ipc_status - IPC status message format
+ *
+ * @status: interrupt status for all channels associated to a cluster
+ * @cluster: specifies the cluster instance that originated an irq
+ *
+ * struct used by the SBI_EXT_IPC_STATUS SBI function id to get
+ * the message present and message clear interrupt status for all the
+ * channels associated to a cluster.
+ */
+struct mchp_ipc_status {
+ u32 status;
+ u8 cluster;
+};
+
+/**
+ * struct mchp_ipc_sbi_msg - IPC SBI payload message
+ *
+ * @buf_addr: physical address where the received data should be copied to
+ * @size: maximum size(in bytes) that can be stored in the buffer pointed to by `buf`
+ * @irq_type: mask representing the irq types that triggered an irq
+ *
+ * struct used by the SBI_EXT_IPC_SEND/SBI_EXT_IPC_RECEIVE SBI function
+ * ids to send/receive a message from an associated processor using
+ * the IPC.
+ */
+struct mchp_ipc_sbi_msg {
+ u64 buf_addr;
+ u16 size;
+ u8 irq_type;
+};
+
+struct mchp_ipc_cluster_cfg {
+ void *buf_base;
+ phys_addr_t buf_base_addr;
+ int irq;
+};
+
+struct mchp_ipc_sbi_mbox {
+ struct device *dev;
+ struct mbox_chan *chans;
+ struct mchp_ipc_cluster_cfg *cluster_cfg;
+ void *buf_base;
+ unsigned long buf_base_addr;
+ struct mbox_controller controller;
+ enum ipc_hw hw_type;
+};
+
+static int mchp_ipc_sbi_chan_send(u32 command, u32 channel, unsigned long address)
+{
+ struct sbiret ret;
+
+ ret = sbi_ecall(SBI_EXT_MICROCHIP_TECHNOLOGY, command, channel,
+ address, 0, 0, 0, 0);
+
+ if (ret.error)
+ return sbi_err_map_linux_errno(ret.error);
+ else
+ return ret.value;
+}
+
+static int mchp_ipc_sbi_send(u32 command, unsigned long address)
+{
+ struct sbiret ret;
+
+ ret = sbi_ecall(SBI_EXT_MICROCHIP_TECHNOLOGY, command, address,
+ 0, 0, 0, 0, 0);
+
+ if (ret.error)
+ return sbi_err_map_linux_errno(ret.error);
+ else
+ return ret.value;
+}
+
+static struct mchp_ipc_sbi_mbox *to_mchp_ipc_mbox(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct mchp_ipc_sbi_mbox, controller);
+}
+
+static inline void mchp_ipc_prepare_receive_req(struct mbox_chan *chan)
+{
+ struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
+ struct mchp_ipc_sbi_msg request;
+
+ request.buf_addr = chan_info->msg_buf_rx_addr;
+ request.size = chan_info->max_msg_size;
+ memcpy(chan_info->buf_base_rx, &request, sizeof(struct mchp_ipc_sbi_msg));
+}
+
+static inline void mchp_ipc_process_received_data(struct mbox_chan *chan,
+ struct mchp_ipc_msg *ipc_msg)
+{
+ struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
+ struct mchp_ipc_sbi_msg sbi_msg;
+
+ memcpy(&sbi_msg, chan_info->buf_base_rx, sizeof(struct mchp_ipc_sbi_msg));
+ ipc_msg->buf = (u32 *)chan_info->msg_buf_rx;
+ ipc_msg->size = sbi_msg.size;
+}
+
+static irqreturn_t mchp_ipc_cluster_aggr_isr(int irq, void *data)
+{
+ struct mbox_chan *chan;
+ struct mchp_ipc_sbi_chan *chan_info;
+ struct mchp_ipc_sbi_mbox *ipc = (struct mchp_ipc_sbi_mbox *)data;
+ struct mchp_ipc_msg ipc_msg;
+ struct mchp_ipc_status status_msg;
+ int ret;
+ unsigned long hartid;
+ u32 i, chan_index, chan_id;
+
+ /* Find out the hart that originated the irq */
+ for_each_online_cpu(i) {
+ hartid = cpuid_to_hartid_map(i);
+ if (irq == ipc->cluster_cfg[hartid].irq)
+ break;
+ }
+
+ status_msg.cluster = hartid;
+ memcpy(ipc->cluster_cfg[hartid].buf_base, &status_msg, sizeof(struct mchp_ipc_status));
+
+ ret = mchp_ipc_sbi_send(SBI_EXT_IPC_STATUS, ipc->cluster_cfg[hartid].buf_base_addr);
+ if (ret < 0) {
+ dev_err_ratelimited(ipc->dev, "could not get IHC irq status ret=%d\n", ret);
+ return IRQ_HANDLED;
+ }
+
+ memcpy(&status_msg, ipc->cluster_cfg[hartid].buf_base, sizeof(struct mchp_ipc_status));
+
+ /*
+ * Iterate over each bit set in the IHC interrupt status register (IRQ_STATUS) to identify
+ * the channel(s) that have a message to be processed/acknowledged.
+ * The bits are organized in alternating format, where each pair of bits represents
+ * the status of the message present and message clear interrupts for each cluster/hart
+ * (from hart 0 to hart 5). Each cluster can have up to 5 fixed channels associated.
+ */
+
+ for_each_set_bit(i, (unsigned long *)&status_msg.status, IRQ_STATUS_BITS) {
+ /* Find out the destination hart that triggered the interrupt */
+ chan_index = i / 2;
+
+ /*
+ * The IP has no loopback channels, so we need to decrement the index when
+ * the target hart has a greater index than our own
+ */
+ if (chan_index >= status_msg.cluster)
+ chan_index--;
+
+ /*
+ * Calculate the channel id given the hart and channel index. Channel IDs
+ * are unique across all clusters of an IPC, and iterate contiguously
+ * across all clusters.
+ */
+ chan_id = status_msg.cluster * (NUM_CHANS_PER_CLUSTER + chan_index);
+
+ chan = &ipc->chans[chan_id];
+ chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
+
+ if (i % 2 == 0) {
+ mchp_ipc_prepare_receive_req(chan);
+ ret = mchp_ipc_sbi_chan_send(SBI_EXT_IPC_RECEIVE, chan_id,
+ chan_info->buf_base_rx_addr);
+ if (ret < 0)
+ continue;
+
+ mchp_ipc_process_received_data(chan, &ipc_msg);
+ mbox_chan_received_data(&ipc->chans[chan_id], (void *)&ipc_msg);
+
+ } else {
+ ret = mchp_ipc_sbi_chan_send(SBI_EXT_IPC_RECEIVE, chan_id,
+ chan_info->buf_base_rx_addr);
+ mbox_chan_txdone(&ipc->chans[chan_id], ret);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static int mchp_ipc_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
+ const struct mchp_ipc_msg *msg = data;
+ struct mchp_ipc_sbi_msg sbi_payload;
+
+ memcpy(chan_info->msg_buf_tx, msg->buf, msg->size);
+ sbi_payload.buf_addr = chan_info->msg_buf_tx_addr;
+ sbi_payload.size = msg->size;
+ memcpy(chan_info->buf_base_tx, &sbi_payload, sizeof(sbi_payload));
+
+ return mchp_ipc_sbi_chan_send(SBI_EXT_IPC_SEND, chan_info->id, chan_info->buf_base_tx_addr);
+}
+
+static int mchp_ipc_startup(struct mbox_chan *chan)
+{
+ struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
+ struct mchp_ipc_sbi_mbox *ipc = to_mchp_ipc_mbox(chan->mbox);
+ struct mchp_ipc_init ch_init_msg;
+ int ret;
+
+ /*
+ * The TX base buffer is used to transmit two types of messages:
+ * - struct mchp_ipc_init to initialize the channel
+ * - struct mchp_ipc_sbi_msg to transmit user data/payload
+ * Ensure the TX buffer size is large enough to accommodate either message type.
+ */
+ size_t max_size = max(sizeof(struct mchp_ipc_init), sizeof(struct mchp_ipc_sbi_msg));
+
+ chan_info->buf_base_tx = kmalloc(max_size, GFP_KERNEL);
+ if (!chan_info->buf_base_tx) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ chan_info->buf_base_tx_addr = __pa(chan_info->buf_base_tx);
+
+ chan_info->buf_base_rx = kmalloc(max_size, GFP_KERNEL);
+ if (!chan_info->buf_base_rx) {
+ ret = -ENOMEM;
+ goto fail_free_buf_base_tx;
+ }
+
+ chan_info->buf_base_rx_addr = __pa(chan_info->buf_base_rx);
+
+ ret = mchp_ipc_sbi_chan_send(SBI_EXT_IPC_CH_INIT, chan_info->id,
+ chan_info->buf_base_tx_addr);
+ if (ret < 0) {
+ dev_err(ipc->dev, "channel %u init failed\n", chan_info->id);
+ goto fail_free_buf_base_rx;
+ }
+
+ memcpy(&ch_init_msg, chan_info->buf_base_tx, sizeof(struct mchp_ipc_init));
+ chan_info->max_msg_size = ch_init_msg.max_msg_size;
+
+ chan_info->msg_buf_tx = kmalloc(chan_info->max_msg_size, GFP_KERNEL);
+ if (!chan_info->msg_buf_tx) {
+ ret = -ENOMEM;
+ goto fail_free_buf_base_rx;
+ }
+
+ chan_info->msg_buf_tx_addr = __pa(chan_info->msg_buf_tx);
+
+ chan_info->msg_buf_rx = kmalloc(chan_info->max_msg_size, GFP_KERNEL);
+ if (!chan_info->msg_buf_rx) {
+ ret = -ENOMEM;
+ goto fail_free_buf_msg_tx;
+ }
+
+ chan_info->msg_buf_rx_addr = __pa(chan_info->msg_buf_rx);
+
+ switch (ipc->hw_type) {
+ case MIV_IHC:
+ return 0;
+ default:
+ goto fail_free_buf_msg_rx;
+ }
+
+ if (ret) {
+ dev_err(ipc->dev, "failed to register interrupt(s)\n");
+ goto fail_free_buf_msg_rx;
+ }
+
+ return ret;
+
+fail_free_buf_msg_rx:
+ kfree(chan_info->msg_buf_rx);
+fail_free_buf_msg_tx:
+ kfree(chan_info->msg_buf_tx);
+fail_free_buf_base_rx:
+ kfree(chan_info->buf_base_rx);
+fail_free_buf_base_tx:
+ kfree(chan_info->buf_base_tx);
+fail:
+ return ret;
+}
+
+static void mchp_ipc_shutdown(struct mbox_chan *chan)
+{
+ struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
+
+ kfree(chan_info->buf_base_tx);
+ kfree(chan_info->buf_base_rx);
+ kfree(chan_info->msg_buf_tx);
+ kfree(chan_info->msg_buf_rx);
+}
+
+static const struct mbox_chan_ops mchp_ipc_ops = {
+ .startup = mchp_ipc_startup,
+ .send_data = mchp_ipc_send_data,
+ .shutdown = mchp_ipc_shutdown,
+};
+
+static struct mbox_chan *mchp_ipc_mbox_xlate(struct mbox_controller *controller,
+ const struct of_phandle_args *spec)
+{
+ struct mchp_ipc_sbi_mbox *ipc = to_mchp_ipc_mbox(controller);
+ unsigned int chan_id = spec->args[0];
+
+ if (chan_id >= ipc->controller.num_chans) {
+ dev_err(ipc->dev, "invalid channel id %d\n", chan_id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &ipc->chans[chan_id];
+}
+
+static int mchp_ipc_get_cluster_aggr_irq(struct mchp_ipc_sbi_mbox *ipc)
+{
+ struct platform_device *pdev = to_platform_device(ipc->dev);
+ char *irq_name;
+ int cpuid, ret;
+ unsigned long hartid;
+ bool irq_found = false;
+
+ for_each_online_cpu(cpuid) {
+ hartid = cpuid_to_hartid_map(cpuid);
+ irq_name = devm_kasprintf(ipc->dev, GFP_KERNEL, "hart-%lu", hartid);
+ ret = platform_get_irq_byname_optional(pdev, irq_name);
+ if (ret <= 0)
+ continue;
+
+ ipc->cluster_cfg[hartid].irq = ret;
+ ret = devm_request_irq(ipc->dev, ipc->cluster_cfg[hartid].irq,
+ mchp_ipc_cluster_aggr_isr, IRQF_SHARED,
+ "miv-ihc-irq", ipc);
+ if (ret)
+ return ret;
+
+ ipc->cluster_cfg[hartid].buf_base = devm_kmalloc(ipc->dev,
+ sizeof(struct mchp_ipc_status),
+ GFP_KERNEL);
+
+ if (!ipc->cluster_cfg[hartid].buf_base)
+ return -ENOMEM;
+
+ ipc->cluster_cfg[hartid].buf_base_addr = __pa(ipc->cluster_cfg[hartid].buf_base);
+
+ irq_found = true;
+ }
+
+ return irq_found;
+}
+
+static int mchp_ipc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mchp_ipc_mbox_info ipc_info;
+ struct mchp_ipc_sbi_mbox *ipc;
+ struct mchp_ipc_sbi_chan *priv;
+ bool irq_avail = false;
+ int ret;
+ u32 chan_id;
+
+ ret = sbi_probe_extension(SBI_EXT_MICROCHIP_TECHNOLOGY);
+ if (ret <= 0)
+ return dev_err_probe(dev, ret, "Microchip SBI extension not detected\n");
+
+ ipc = devm_kzalloc(dev, sizeof(*ipc), GFP_KERNEL);
+ if (!ipc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ipc);
+
+ ipc->buf_base = devm_kmalloc(dev, sizeof(struct mchp_ipc_mbox_info), GFP_KERNEL);
+ if (!ipc->buf_base)
+ return -ENOMEM;
+
+ ipc->buf_base_addr = __pa(ipc->buf_base);
+
+ ret = mchp_ipc_sbi_send(SBI_EXT_IPC_PROBE, ipc->buf_base_addr);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "could not probe IPC SBI service\n");
+
+ memcpy(&ipc_info, ipc->buf_base, sizeof(struct mchp_ipc_mbox_info));
+ ipc->controller.num_chans = ipc_info.num_channels;
+ ipc->hw_type = ipc_info.hw_type;
+
+ ipc->chans = devm_kcalloc(dev, ipc->controller.num_chans, sizeof(*ipc->chans), GFP_KERNEL);
+ if (!ipc->chans)
+ return -ENOMEM;
+
+ ipc->dev = dev;
+ ipc->controller.txdone_irq = true;
+ ipc->controller.dev = ipc->dev;
+ ipc->controller.ops = &mchp_ipc_ops;
+ ipc->controller.chans = ipc->chans;
+ ipc->controller.of_xlate = mchp_ipc_mbox_xlate;
+
+ for (chan_id = 0; chan_id < ipc->controller.num_chans; chan_id++) {
+ priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ipc->chans[chan_id].con_priv = priv;
+ priv->id = chan_id;
+ }
+
+ if (ipc->hw_type == MIV_IHC) {
+ ipc->cluster_cfg = devm_kcalloc(dev, num_online_cpus(),
+ sizeof(struct mchp_ipc_cluster_cfg),
+ GFP_KERNEL);
+ if (!ipc->cluster_cfg)
+ return -ENOMEM;
+
+ if (mchp_ipc_get_cluster_aggr_irq(ipc))
+ irq_avail = true;
+ }
+
+ if (!irq_avail)
+ return dev_err_probe(dev, -ENODEV, "missing interrupt property\n");
+
+ ret = devm_mbox_controller_register(dev, &ipc->controller);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Inter-Processor communication (IPC) registration failed\n");
+
+ return 0;
+}
+
+static const struct of_device_id mchp_ipc_of_match[] = {
+ {.compatible = "microchip,sbi-ipc", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mchp_ipc_of_match);
+
+static struct platform_driver mchp_ipc_driver = {
+ .driver = {
+ .name = "microchip_ipc",
+ .of_match_table = mchp_ipc_of_match,
+ },
+ .probe = mchp_ipc_probe,
+};
+
+module_platform_driver(mchp_ipc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Valentina Fernandez <valentina.fernandezalanis@microchip.com>");
+MODULE_DESCRIPTION("Microchip Inter-Processor Communication (IPC) driver");
diff --git a/drivers/mailbox/mailbox-mpfs.c b/drivers/mailbox/mailbox-mpfs.c
index 4df546e3b7ea..d5d9effece97 100644
--- a/drivers/mailbox/mailbox-mpfs.c
+++ b/drivers/mailbox/mailbox-mpfs.c
@@ -251,7 +251,7 @@ static inline int mpfs_mbox_syscon_probe(struct mpfs_mbox *mbox, struct platform
return PTR_ERR(mbox->sysreg_scb);
mbox->mbox_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(mbox->ctrl_base))
+ if (IS_ERR(mbox->mbox_base))
return PTR_ERR(mbox->mbox_base);
return 0;
diff --git a/drivers/mailbox/mailbox-th1520.c b/drivers/mailbox/mailbox-th1520.c
index 4e84640ac3b8..a6b2aa9ae952 100644
--- a/drivers/mailbox/mailbox-th1520.c
+++ b/drivers/mailbox/mailbox-th1520.c
@@ -41,7 +41,7 @@
#ifdef CONFIG_PM_SLEEP
/* store MBOX context across system-wide suspend/resume transitions */
struct th1520_mbox_context {
- u32 intr_mask[TH_1520_MBOX_CHANS - 1];
+ u32 intr_mask[TH_1520_MBOX_CHANS];
};
#endif
@@ -387,8 +387,10 @@ static void __iomem *th1520_map_mmio(struct platform_device *pdev,
mapped = devm_ioremap(&pdev->dev, res->start + offset,
resource_size(res) - offset);
- if (IS_ERR(mapped))
+ if (!mapped) {
dev_err(&pdev->dev, "Failed to map resource: %s\n", res_name);
+ return ERR_PTR(-ENOMEM);
+ }
return mapped;
}
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index f0d1fc0fb9ff..11c41e935a36 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -157,6 +157,7 @@ static const struct of_device_id qcom_apcs_ipc_of_match[] = {
{ .compatible = "qcom,sm6125-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sm6115-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,ipq5332-apcs-apps-global", .data = &ipq6018_apcs_data },
+ { .compatible = "qcom,ipq5424-apcs-apps-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq6018_apcs_data },
{ .compatible = "qcom,sc7180-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sc8180x-apss-shared", .data = &apps_shared_apcs_data },
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
index 14c7907c6632..0b17a38ea6bf 100644
--- a/drivers/mailbox/qcom-ipcc.c
+++ b/drivers/mailbox/qcom-ipcc.c
@@ -14,6 +14,7 @@
#include <dt-bindings/mailbox/qcom-ipcc.h>
/* IPCC Register offsets */
+#define IPCC_REG_CONFIG 0x08
#define IPCC_REG_SEND_ID 0x0c
#define IPCC_REG_RECV_ID 0x10
#define IPCC_REG_RECV_SIGNAL_ENABLE 0x14
@@ -21,6 +22,7 @@
#define IPCC_REG_RECV_SIGNAL_CLEAR 0x1c
#define IPCC_REG_CLIENT_CLEAR 0x38
+#define IPCC_CLEAR_ON_RECV_RD BIT(0)
#define IPCC_SIGNAL_ID_MASK GENMASK(15, 0)
#define IPCC_CLIENT_ID_MASK GENMASK(31, 16)
@@ -274,6 +276,7 @@ static int qcom_ipcc_pm_resume(struct device *dev)
static int qcom_ipcc_probe(struct platform_device *pdev)
{
struct qcom_ipcc *ipcc;
+ u32 config_value;
static int id;
char *name;
int ret;
@@ -288,6 +291,19 @@ static int qcom_ipcc_probe(struct platform_device *pdev)
if (IS_ERR(ipcc->base))
return PTR_ERR(ipcc->base);
+ /*
+ * It is possible that boot firmware is using the same IPCC instance
+ * as of the HLOS and it has kept CLEAR_ON_RECV_RD set which basically
+ * means Interrupt pending registers are cleared when RECV_ID is read.
+ * The register automatically updates to the next pending interrupt/client
+ * status based on priority.
+ */
+ config_value = readl(ipcc->base + IPCC_REG_CONFIG);
+ if (config_value & IPCC_CLEAR_ON_RECV_RD) {
+ config_value &= ~(IPCC_CLEAR_ON_RECV_RD);
+ writel(config_value, ipcc->base + IPCC_REG_CONFIG);
+ }
+
ipcc->irq = platform_get_irq(pdev, 0);
if (ipcc->irq < 0)
return ipcc->irq;
diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
index 8d5e2d7dc03b..c1981f091bd1 100644
--- a/drivers/mailbox/tegra-hsp.c
+++ b/drivers/mailbox/tegra-hsp.c
@@ -388,7 +388,6 @@ static void tegra_hsp_sm_recv32(struct tegra_hsp_channel *channel)
value = tegra_hsp_channel_readl(channel, HSP_SM_SHRD_MBOX);
value &= ~HSP_SM_SHRD_MBOX_FULL;
msg = (void *)(unsigned long)value;
- mbox_chan_received_data(channel->chan, msg);
/*
* Need to clear all bits here since some producers, such as TCU, depend
@@ -398,6 +397,8 @@ static void tegra_hsp_sm_recv32(struct tegra_hsp_channel *channel)
* explicitly, so we have to make sure we cover all possible cases.
*/
tegra_hsp_channel_writel(channel, 0x0, HSP_SM_SHRD_MBOX);
+
+ mbox_chan_received_data(channel->chan, msg);
}
static const struct tegra_hsp_sm_ops tegra_hsp_sm_32bit_ops = {
@@ -433,7 +434,6 @@ static void tegra_hsp_sm_recv128(struct tegra_hsp_channel *channel)
value[3] = tegra_hsp_channel_readl(channel, HSP_SHRD_MBOX_TYPE1_DATA3);
msg = (void *)(unsigned long)value;
- mbox_chan_received_data(channel->chan, msg);
/*
* Clear data registers and tag.
@@ -443,6 +443,8 @@ static void tegra_hsp_sm_recv128(struct tegra_hsp_channel *channel)
tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA2);
tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA3);
tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_TAG);
+
+ mbox_chan_received_data(channel->chan, msg);
}
static const struct tegra_hsp_sm_ops tegra_hsp_sm_128bit_ops = {
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
index aa5249da59b2..0c143beaafda 100644
--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -905,7 +905,7 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *nc, *np = pdev->dev.of_node;
- struct zynqmp_ipi_pdata __percpu *pdata;
+ struct zynqmp_ipi_pdata *pdata;
struct of_phandle_args out_irq;
struct zynqmp_ipi_mbox *mbox;
int num_mboxes, ret = -EINVAL;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 1ae2c71bb383..02a2919f4e5a 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -59,6 +59,7 @@ struct convert_context {
struct bio *bio_out;
struct bvec_iter iter_out;
atomic_t cc_pending;
+ unsigned int tag_offset;
u64 cc_sector;
union {
struct skcipher_request *req;
@@ -1187,7 +1188,7 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
tag_len = io->cc->tuple_size * (bio_sectors(bio) >> io->cc->sector_shift);
- bip->bip_iter.bi_sector = io->cc->start + io->sector;
+ bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata),
tag_len, offset_in_page(io->integrity_metadata));
@@ -1256,6 +1257,7 @@ static void crypt_convert_init(struct crypt_config *cc,
if (bio_out)
ctx->iter_out = bio_out->bi_iter;
ctx->cc_sector = sector + cc->iv_offset;
+ ctx->tag_offset = 0;
init_completion(&ctx->restart);
}
@@ -1588,7 +1590,6 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_
static blk_status_t crypt_convert(struct crypt_config *cc,
struct convert_context *ctx, bool atomic, bool reset_pending)
{
- unsigned int tag_offset = 0;
unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
int r;
@@ -1611,9 +1612,9 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
atomic_inc(&ctx->cc_pending);
if (crypt_integrity_aead(cc))
- r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset);
+ r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, ctx->tag_offset);
else
- r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset);
+ r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, ctx->tag_offset);
switch (r) {
/*
@@ -1633,8 +1634,8 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
* exit and continue processing in a workqueue
*/
ctx->r.req = NULL;
+ ctx->tag_offset++;
ctx->cc_sector += sector_step;
- tag_offset++;
return BLK_STS_DEV_RESOURCE;
}
} else {
@@ -1648,8 +1649,8 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
*/
case -EINPROGRESS:
ctx->r.req = NULL;
+ ctx->tag_offset++;
ctx->cc_sector += sector_step;
- tag_offset++;
continue;
/*
* The request was already processed (synchronously).
@@ -1657,7 +1658,7 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
case 0:
atomic_dec(&ctx->cc_pending);
ctx->cc_sector += sector_step;
- tag_offset++;
+ ctx->tag_offset++;
if (!atomic)
cond_resched();
continue;
@@ -1719,6 +1720,7 @@ retry:
clone->bi_private = io;
clone->bi_end_io = crypt_endio;
clone->bi_ioprio = io->base_bio->bi_ioprio;
+ clone->bi_iter.bi_sector = cc->start + io->sector;
remaining_size = size;
@@ -1909,7 +1911,6 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
crypt_dec_pending(io);
return 1;
}
- clone->bi_iter.bi_sector = cc->start + io->sector;
crypt_convert_init(cc, &io->ctx, clone, clone, io->sector);
io->saved_bi_iter = clone->bi_iter;
dm_submit_bio_remap(io->base_bio, clone);
@@ -1925,13 +1926,13 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
clone = bio_alloc_clone(cc->dev->bdev, io->base_bio, gfp, &cc->bs);
if (!clone)
return 1;
+
+ clone->bi_iter.bi_sector = cc->start + io->sector;
clone->bi_private = io;
clone->bi_end_io = crypt_endio;
crypt_inc_pending(io);
- clone->bi_iter.bi_sector = cc->start + io->sector;
-
if (dm_crypt_integrity_io_alloc(io, clone)) {
crypt_dec_pending(io);
bio_put(clone);
@@ -2039,8 +2040,6 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
/* crypt_convert should have filled the clone bio */
BUG_ON(io->ctx.iter_out.bi_size);
- clone->bi_iter.bi_sector = cc->start + io->sector;
-
if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) ||
test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) {
dm_submit_bio_remap(io->base_bio, clone);
@@ -2092,13 +2091,12 @@ static void kcryptd_crypt_write_continue(struct work_struct *work)
struct crypt_config *cc = io->cc;
struct convert_context *ctx = &io->ctx;
int crypt_finished;
- sector_t sector = io->sector;
blk_status_t r;
wait_for_completion(&ctx->restart);
reinit_completion(&ctx->restart);
- r = crypt_convert(cc, &io->ctx, true, false);
+ r = crypt_convert(cc, &io->ctx, false, false);
if (r)
io->error = r;
crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
@@ -2109,10 +2107,8 @@ static void kcryptd_crypt_write_continue(struct work_struct *work)
}
/* Encryption was already finished, submit io now */
- if (crypt_finished) {
+ if (crypt_finished)
kcryptd_crypt_write_io_submit(io, 0);
- io->sector = sector;
- }
crypt_dec_pending(io);
}
@@ -2123,14 +2119,13 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
struct convert_context *ctx = &io->ctx;
struct bio *clone;
int crypt_finished;
- sector_t sector = io->sector;
blk_status_t r;
/*
* Prevent io from disappearing until this function completes.
*/
crypt_inc_pending(io);
- crypt_convert_init(cc, ctx, NULL, io->base_bio, sector);
+ crypt_convert_init(cc, ctx, NULL, io->base_bio, io->sector);
clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
if (unlikely(!clone)) {
@@ -2147,8 +2142,6 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
io->ctx.iter_in = clone->bi_iter;
}
- sector += bio_sectors(clone);
-
crypt_inc_pending(io);
r = crypt_convert(cc, ctx,
test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
@@ -2172,10 +2165,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
}
/* Encryption was already finished, submit io now */
- if (crypt_finished) {
+ if (crypt_finished)
kcryptd_crypt_write_io_submit(io, 0);
- io->sector = sector;
- }
dec:
crypt_dec_pending(io);
@@ -2203,7 +2194,7 @@ static void kcryptd_crypt_read_continue(struct work_struct *work)
wait_for_completion(&io->ctx.restart);
reinit_completion(&io->ctx.restart);
- r = crypt_convert(cc, &io->ctx, true, false);
+ r = crypt_convert(cc, &io->ctx, false, false);
if (r)
io->error = r;
@@ -2221,7 +2212,6 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
crypt_inc_pending(io);
if (io->ctx.aead_recheck) {
- io->ctx.cc_sector = io->sector + cc->iv_offset;
r = crypt_convert(cc, &io->ctx,
test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
} else {
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index d7a8e2f40db3..c37668790577 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -379,6 +379,7 @@ static void do_region(const blk_opf_t opf, unsigned int region,
atomic_inc(&io->count);
submit_bio(bio);
+ WARN_ON_ONCE(opf & REQ_ATOMIC && remaining);
} while (remaining);
}
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 49fb0f684193..66318aba4bdb 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -199,9 +199,10 @@ static size_t linear_dax_recovery_write(struct dm_target *ti, pgoff_t pgoff,
static struct target_type linear_target = {
.name = "linear",
- .version = {1, 4, 0},
+ .version = {1, 5, 0},
.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT |
- DM_TARGET_ZONED_HM | DM_TARGET_PASSES_CRYPTO,
+ DM_TARGET_ZONED_HM | DM_TARGET_PASSES_CRYPTO |
+ DM_TARGET_ATOMIC_WRITES,
.report_zones = linear_report_zones,
.module = THIS_MODULE,
.ctr = linear_ctr,
diff --git a/drivers/md/dm-ps-io-affinity.c b/drivers/md/dm-ps-io-affinity.c
index 461ee6b2044d..716807e511ee 100644
--- a/drivers/md/dm-ps-io-affinity.c
+++ b/drivers/md/dm-ps-io-affinity.c
@@ -116,7 +116,7 @@ static int ioa_create(struct path_selector *ps, unsigned int argc, char **argv)
if (!s)
return -ENOMEM;
- s->path_map = kzalloc(nr_cpu_ids * sizeof(struct path_info *),
+ s->path_map = kcalloc(nr_cpu_ids, sizeof(struct path_info *),
GFP_KERNEL);
if (!s->path_map)
goto free_selector;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 1e0d3b9b75d6..6adc55fd90d3 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3196,7 +3196,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (reshape_sectors || rs_is_raid1(rs)) {
/*
* We can only prepare for a reshape here, because the
- * raid set needs to run to provide the repective reshape
+ * raid set needs to run to provide the respective reshape
* check functions via its MD personality instance.
*
* So do the reshape check after md_run() succeeded.
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9511dae5b556..8c6f1f7e6456 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -656,7 +656,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
unsigned int i;
struct dm_io_region io[MAX_NR_MIRRORS], *dest = io;
struct mirror *m;
- blk_opf_t op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH);
+ blk_opf_t op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH | REQ_ATOMIC);
struct dm_io_request io_req = {
.bi_opf = REQ_OP_WRITE | op_flags,
.mem.type = DM_IO_BIO,
@@ -1483,8 +1483,9 @@ static int mirror_iterate_devices(struct dm_target *ti,
static struct target_type mirror_target = {
.name = "mirror",
- .version = {1, 14, 0},
+ .version = {1, 15, 0},
.module = THIS_MODULE,
+ .features = DM_TARGET_ATOMIC_WRITES,
.ctr = mirror_ctr,
.dtr = mirror_dtr,
.map = mirror_map,
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 4112071de0be..3786ac67cefe 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -465,8 +465,9 @@ static void stripe_io_hints(struct dm_target *ti,
static struct target_type stripe_target = {
.name = "striped",
- .version = {1, 6, 0},
- .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT,
+ .version = {1, 7, 0},
+ .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT |
+ DM_TARGET_ATOMIC_WRITES,
.module = THIS_MODULE,
.ctr = stripe_ctr,
.dtr = stripe_dtr,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index bd8b796ae683..0ef5203387b2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1806,6 +1806,32 @@ static bool dm_table_supports_secure_erase(struct dm_table *t)
return true;
}
+static int device_not_atomic_write_capable(struct dm_target *ti,
+ struct dm_dev *dev, sector_t start,
+ sector_t len, void *data)
+{
+ return !bdev_can_atomic_write(dev->bdev);
+}
+
+static bool dm_table_supports_atomic_writes(struct dm_table *t)
+{
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
+
+ if (!dm_target_supports_atomic_writes(ti->type))
+ return false;
+
+ if (!ti->type->iterate_devices)
+ return false;
+
+ if (ti->type->iterate_devices(ti,
+ device_not_atomic_write_capable, NULL)) {
+ return false;
+ }
+ }
+ return true;
+}
+
int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
@@ -1854,6 +1880,9 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
return r;
}
+ if (dm_table_supports_atomic_writes(t))
+ limits->features |= BLK_FEAT_ATOMIC_WRITES;
+
r = queue_limits_set(q, limits);
if (r)
return r;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 12ecf07a3841..4d1e42891d24 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1479,12 +1479,12 @@ static void setup_split_accounting(struct clone_info *ci, unsigned int len)
static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
struct dm_target *ti, unsigned int num_bios,
- unsigned *len, gfp_t gfp_flag)
+ unsigned *len)
{
struct bio *bio;
- int try = (gfp_flag & GFP_NOWAIT) ? 0 : 1;
+ int try;
- for (; try < 2; try++) {
+ for (try = 0; try < 2; try++) {
int bio_nr;
if (try && num_bios > 1)
@@ -1508,8 +1508,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
}
static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
- unsigned int num_bios, unsigned int *len,
- gfp_t gfp_flag)
+ unsigned int num_bios, unsigned int *len)
{
struct bio_list blist = BIO_EMPTY_LIST;
struct bio *clone;
@@ -1526,7 +1525,7 @@ static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_targe
* Using alloc_multiple_bios(), even if num_bios is 1, to consistently
* support allocating using GFP_NOWAIT with GFP_NOIO fallback.
*/
- alloc_multiple_bios(&blist, ci, ti, num_bios, len, gfp_flag);
+ alloc_multiple_bios(&blist, ci, ti, num_bios, len);
while ((clone = bio_list_pop(&blist))) {
if (num_bios > 1)
dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
@@ -1564,7 +1563,7 @@ static void __send_empty_flush(struct clone_info *ci)
atomic_add(ti->num_flush_bios, &ci->io->io_count);
bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios,
- NULL, GFP_NOWAIT);
+ NULL);
atomic_sub(ti->num_flush_bios - bios, &ci->io->io_count);
}
} else {
@@ -1612,7 +1611,7 @@ static void __send_abnormal_io(struct clone_info *ci, struct dm_target *ti,
__max_io_len(ti, ci->sector, max_granularity, max_sectors));
atomic_add(num_bios, &ci->io->io_count);
- bios = __send_duplicate_bios(ci, ti, num_bios, &len, GFP_NOIO);
+ bios = __send_duplicate_bios(ci, ti, num_bios, &len);
/*
* alloc_io() takes one extra reference for submission, so the
* reference won't reach 0 without the following (+1) subtraction
@@ -1746,6 +1745,9 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci)
ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED);
len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
+ if (ci->bio->bi_opf & REQ_ATOMIC && len != ci->sector_count)
+ return BLK_STS_IOERR;
+
setup_split_accounting(ci, len);
if (unlikely(ci->bio->bi_opf & REQ_NOWAIT)) {
@@ -1849,7 +1851,7 @@ static blk_status_t __send_zone_reset_all_emulated(struct clone_info *ci,
* not go crazy with the clone allocation.
*/
alloc_multiple_bios(&blist, ci, ti, min(nr_reset, 32),
- NULL, GFP_NOIO);
+ NULL);
}
/* Get a clone and change it to a regular reset operation. */
@@ -1881,7 +1883,7 @@ static void __send_zone_reset_all_native(struct clone_info *ci,
unsigned int bios;
atomic_add(1, &ci->io->io_count);
- bios = __send_duplicate_bios(ci, ti, 1, NULL, GFP_NOIO);
+ bios = __send_duplicate_bios(ci, ti, 1, NULL);
atomic_sub(1 - bios, &ci->io->io_count);
ci->sector_count = 0;
@@ -1969,6 +1971,15 @@ static void dm_split_and_process_bio(struct mapped_device *md,
/* Only support nowait for normal IO */
if (unlikely(bio->bi_opf & REQ_NOWAIT) && !is_abnormal) {
+ /*
+ * Don't support NOWAIT for FLUSH because it may allocate
+ * multiple bios and there's no easy way how to undo the
+ * allocations.
+ */
+ if (bio->bi_opf & REQ_PREFLUSH) {
+ bio_wouldblock_error(bio);
+ return;
+ }
io = alloc_io(md, bio, GFP_NOWAIT);
if (unlikely(!io)) {
/* Unable to do anything without dm_io. */
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index ec4ecd96e6b1..23c09d22fcdb 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -2355,7 +2355,10 @@ static int bitmap_get_stats(void *data, struct md_bitmap_stats *stats)
if (!bitmap)
return -ENOENT;
-
+ if (bitmap->mddev->bitmap_info.external)
+ return -ENOENT;
+ if (!bitmap->storage.sb_page) /* no superblock */
+ return -EINVAL;
sb = kmap_local_page(bitmap->storage.sb_page);
stats->sync_size = le64_to_cpu(sb->sync_size);
kunmap_local(sb);
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index a382929ce7ba..369aed044b40 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -76,10 +76,8 @@ static int linear_set_limits(struct mddev *mddev)
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
lim.io_min = mddev->chunk_sectors << 9;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
- if (err) {
- queue_limits_cancel_update(mddev->gendisk->queue);
+ if (err)
return err;
- }
return queue_limits_set(mddev->gendisk->queue, &lim);
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 866015b681af..30b3dbbce2d2 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -294,7 +294,7 @@ void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev)
static struct ctl_table_header *raid_table_header;
-static struct ctl_table raid_table[] = {
+static const struct ctl_table raid_table[] = {
{
.procname = "speed_limit_min",
.data = &sysctl_speed_limit_min,
@@ -8376,6 +8376,10 @@ static int md_seq_show(struct seq_file *seq, void *v)
return 0;
spin_unlock(&all_mddevs_lock);
+
+ /* prevent bitmap to be freed after checking */
+ mutex_lock(&mddev->bitmap_info.mutex);
+
spin_lock(&mddev->lock);
if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
seq_printf(seq, "%s : ", mdname(mddev));
@@ -8451,6 +8455,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "\n");
}
spin_unlock(&mddev->lock);
+ mutex_unlock(&mddev->bitmap_info.mutex);
spin_lock(&all_mddevs_lock);
if (mddev == list_last_entry(&all_mddevs, struct mddev, all_mddevs))
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index c7ba4e6cbbc7..98c745d90f48 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -13,6 +13,7 @@
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/hash.h>
+#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/device-mapper.h>
@@ -77,7 +78,7 @@ static void prefetch_issue(struct prefetch_set *p, struct dm_block_manager *bm)
/*----------------------------------------------------------------*/
struct shadow_info {
- struct hlist_node hlist;
+ struct rb_node node;
dm_block_t where;
};
@@ -95,7 +96,7 @@ struct dm_transaction_manager {
struct dm_space_map *sm;
spinlock_t lock;
- struct hlist_head buckets[DM_HASH_SIZE];
+ struct rb_root buckets[DM_HASH_SIZE];
struct prefetch_set prefetches;
};
@@ -106,14 +107,22 @@ static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
{
int r = 0;
unsigned int bucket = dm_hash_block(b, DM_HASH_MASK);
- struct shadow_info *si;
+ struct rb_node **node;
spin_lock(&tm->lock);
- hlist_for_each_entry(si, tm->buckets + bucket, hlist)
- if (si->where == b) {
+ node = &tm->buckets[bucket].rb_node;
+ while (*node) {
+ struct shadow_info *si =
+ rb_entry(*node, struct shadow_info, node);
+ if (b == si->where) {
r = 1;
break;
}
+ if (b < si->where)
+ node = &si->node.rb_left;
+ else
+ node = &si->node.rb_right;
+ }
spin_unlock(&tm->lock);
return r;
@@ -130,30 +139,41 @@ static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
si = kmalloc(sizeof(*si), GFP_NOIO);
if (si) {
+ struct rb_node **node, *parent;
si->where = b;
bucket = dm_hash_block(b, DM_HASH_MASK);
+
spin_lock(&tm->lock);
- hlist_add_head(&si->hlist, tm->buckets + bucket);
+ node = &tm->buckets[bucket].rb_node;
+ parent = NULL;
+ while (*node) {
+ struct shadow_info *si =
+ rb_entry(*node, struct shadow_info, node);
+ parent = *node;
+ if (b < si->where)
+ node = &si->node.rb_left;
+ else
+ node = &si->node.rb_right;
+ }
+ rb_link_node(&si->node, parent, node);
+ rb_insert_color(&si->node, &tm->buckets[bucket]);
spin_unlock(&tm->lock);
}
}
static void wipe_shadow_table(struct dm_transaction_manager *tm)
{
- struct shadow_info *si;
- struct hlist_node *tmp;
- struct hlist_head *bucket;
- int i;
+ unsigned int i;
spin_lock(&tm->lock);
for (i = 0; i < DM_HASH_SIZE; i++) {
- bucket = tm->buckets + i;
- hlist_for_each_entry_safe(si, tmp, bucket, hlist)
+ while (!RB_EMPTY_ROOT(&tm->buckets[i])) {
+ struct shadow_info *si =
+ rb_entry(tm->buckets[i].rb_node, struct shadow_info, node);
+ rb_erase(&si->node, &tm->buckets[i]);
kfree(si);
-
- INIT_HLIST_HEAD(bucket);
+ }
}
-
spin_unlock(&tm->lock);
}
@@ -162,7 +182,7 @@ static void wipe_shadow_table(struct dm_transaction_manager *tm)
static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm,
struct dm_space_map *sm)
{
- int i;
+ unsigned int i;
struct dm_transaction_manager *tm;
tm = kmalloc(sizeof(*tm), GFP_KERNEL);
@@ -176,7 +196,7 @@ static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm,
spin_lock_init(&tm->lock);
for (i = 0; i < DM_HASH_SIZE; i++)
- INIT_HLIST_HEAD(tm->buckets + i);
+ tm->buckets[i] = RB_ROOT;
prefetch_init(&tm->prefetches);
diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
index c7d36010c890..ba6828ef540e 100644
--- a/drivers/media/cec/core/cec-adap.c
+++ b/drivers/media/cec/core/cec-adap.c
@@ -7,12 +7,13 @@
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/ktime.h>
-#include <linux/slab.h>
#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
diff --git a/drivers/media/cec/core/cec-core.c b/drivers/media/cec/core/cec-core.c
index ca0db8d457b4..e10bd588a586 100644
--- a/drivers/media/cec/core/cec-core.c
+++ b/drivers/media/cec/core/cec-core.c
@@ -5,13 +5,14 @@
* Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
+#include <linux/debugfs.h>
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
-#include <linux/slab.h>
#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
diff --git a/drivers/media/cec/core/cec-pin-error-inj.c b/drivers/media/cec/core/cec-pin-error-inj.c
index fc0968b9d40e..6e61a04b8168 100644
--- a/drivers/media/cec/core/cec-pin-error-inj.c
+++ b/drivers/media/cec/core/cec-pin-error-inj.c
@@ -4,8 +4,9 @@
*/
#include <linux/delay.h>
-#include <linux/slab.h>
#include <linux/sched/types.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
#include <media/cec-pin.h>
#include "cec-pin-priv.h"
diff --git a/drivers/media/cec/core/cec-pin.c b/drivers/media/cec/core/cec-pin.c
index 330d5d5d86ab..a70451d99ebc 100644
--- a/drivers/media/cec/core/cec-pin.c
+++ b/drivers/media/cec/core/cec-pin.c
@@ -4,8 +4,9 @@
*/
#include <linux/delay.h>
-#include <linux/slab.h>
#include <linux/sched/types.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
#include <media/cec-pin.h>
#include "cec-pin-priv.h"
diff --git a/drivers/media/cec/platform/cec-gpio/cec-gpio.c b/drivers/media/cec/platform/cec-gpio/cec-gpio.c
index cf64e8871fe5..50cdc557c943 100644
--- a/drivers/media/cec/platform/cec-gpio/cec-gpio.c
+++ b/drivers/media/cec/platform/cec-gpio/cec-gpio.c
@@ -3,11 +3,12 @@
* Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
-#include <linux/module.h>
-#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
#include <media/cec-notifier.h>
#include <media/cec-pin.h>
diff --git a/drivers/media/common/b2c2/flexcop-common.h b/drivers/media/common/b2c2/flexcop-common.h
index f944c59cf495..a468ea7e77a1 100644
--- a/drivers/media/common/b2c2/flexcop-common.h
+++ b/drivers/media/common/b2c2/flexcop-common.h
@@ -125,8 +125,6 @@ void flexcop_dma_free(struct flexcop_dma *dma);
int flexcop_dma_control_timer_irq(struct flexcop_device *fc,
flexcop_dma_index_t no, int onoff);
-int flexcop_dma_control_size_irq(struct flexcop_device *fc,
- flexcop_dma_index_t no, int onoff);
int flexcop_dma_config(struct flexcop_device *fc, struct flexcop_dma *dma,
flexcop_dma_index_t dma_idx);
int flexcop_dma_xfer_control(struct flexcop_device *fc,
@@ -170,8 +168,6 @@ int flexcop_sram_init(struct flexcop_device *fc);
void flexcop_determine_revision(struct flexcop_device *fc);
void flexcop_device_name(struct flexcop_device *fc,
const char *prefix, const char *suffix);
-void flexcop_dump_reg(struct flexcop_device *fc,
- flexcop_ibi_register reg, int num);
/* from flexcop-hw-filter.c */
int flexcop_pid_feed_control(struct flexcop_device *fc,
diff --git a/drivers/media/common/b2c2/flexcop-misc.c b/drivers/media/common/b2c2/flexcop-misc.c
index 83d01d3a81cc..251c4f731ed1 100644
--- a/drivers/media/common/b2c2/flexcop-misc.c
+++ b/drivers/media/common/b2c2/flexcop-misc.c
@@ -70,16 +70,3 @@ void flexcop_device_name(struct flexcop_device *fc,
flexcop_bus_names[fc->bus_type],
flexcop_revision_names[fc->rev], suffix);
}
-
-void flexcop_dump_reg(struct flexcop_device *fc,
- flexcop_ibi_register reg, int num)
-{
- flexcop_ibi_value v;
- int i;
- for (i = 0; i < num; i++) {
- v = fc->read_ibi_reg(fc, reg+4*i);
- deb_rdump("0x%03x: %08x, ", reg+4*i, v.raw);
- }
- deb_rdump("\n");
-}
-EXPORT_SYMBOL(flexcop_dump_reg);
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 9ce5f010de3f..6063782e937a 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -731,7 +731,7 @@ static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter)
ret = (*secfeed)->allocate_filter(*secfeed, secfilter);
if (ret < 0) {
dvb_dmxdev_feed_restart(filter);
- filter->feed.sec->start_filtering(*secfeed);
+ *secfeed = NULL;
dprintk("could not get filter\n");
return ret;
}
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index d925ca24183b..415f1f91cc30 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -311,12 +311,8 @@ static int cxd2841er_set_reg_bits(struct cxd2841er_priv *priv,
static u32 cxd2841er_calc_iffreq_xtal(enum cxd2841er_xtal xtal, u32 ifhz)
{
- u64 tmp;
-
- tmp = (u64) ifhz * 16777216;
- do_div(tmp, ((xtal == SONY_XTAL_24000) ? 48000000 : 41000000));
-
- return (u32) tmp;
+ return div_u64(ifhz * 16777216ull,
+ (xtal == SONY_XTAL_24000) ? 48000000 : 41000000);
}
static u32 cxd2841er_calc_iffreq(u32 ifhz)
diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
index e1ae0f9fad43..2cdab2f3d9dc 100644
--- a/drivers/media/i2c/ccs/ccs-core.c
+++ b/drivers/media/i2c/ccs/ccs-core.c
@@ -3335,9 +3335,11 @@ static int ccs_probe(struct i2c_client *client)
rval = request_firmware(&fw, filename, &client->dev);
if (!rval) {
- ccs_data_parse(&sensor->sdata, fw->data, fw->size, &client->dev,
- true);
+ rval = ccs_data_parse(&sensor->sdata, fw->data, fw->size,
+ &client->dev, true);
release_firmware(fw);
+ if (rval)
+ goto out_power_off;
}
if (!(ccsdev->flags & CCS_DEVICE_FLAG_IS_SMIA) ||
@@ -3351,9 +3353,11 @@ static int ccs_probe(struct i2c_client *client)
rval = request_firmware(&fw, filename, &client->dev);
if (!rval) {
- ccs_data_parse(&sensor->mdata, fw->data, fw->size,
- &client->dev, true);
+ rval = ccs_data_parse(&sensor->mdata, fw->data,
+ fw->size, &client->dev, true);
release_firmware(fw);
+ if (rval)
+ goto out_release_sdata;
}
}
@@ -3566,15 +3570,15 @@ out_disable_runtime_pm:
out_cleanup:
ccs_cleanup(sensor);
+out_free_ccs_limits:
+ kfree(sensor->ccs_limits);
+
out_release_mdata:
kvfree(sensor->mdata.backing);
out_release_sdata:
kvfree(sensor->sdata.backing);
-out_free_ccs_limits:
- kfree(sensor->ccs_limits);
-
out_power_off:
ccs_power_off(&client->dev);
mutex_destroy(&sensor->mutex);
diff --git a/drivers/media/i2c/ccs/ccs-data.c b/drivers/media/i2c/ccs/ccs-data.c
index 08400edf77ce..f469afcea680 100644
--- a/drivers/media/i2c/ccs/ccs-data.c
+++ b/drivers/media/i2c/ccs/ccs-data.c
@@ -10,6 +10,7 @@
#include <linux/limits.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include "ccs-data-defs.h"
@@ -97,7 +98,7 @@ ccs_data_parse_length_specifier(const struct __ccs_data_length_specifier *__len,
plen = ((size_t)
(__len3->length[0] &
((1 << CCS_DATA_LENGTH_SPECIFIER_SIZE_SHIFT) - 1))
- << 16) + (__len3->length[0] << 8) + __len3->length[1];
+ << 16) + (__len3->length[1] << 8) + __len3->length[2];
break;
}
default:
@@ -948,15 +949,15 @@ int ccs_data_parse(struct ccs_data_container *ccsdata, const void *data,
rval = __ccs_data_parse(&bin, ccsdata, data, len, dev, verbose);
if (rval)
- return rval;
+ goto out_cleanup;
rval = bin_backing_alloc(&bin);
if (rval)
- return rval;
+ goto out_cleanup;
rval = __ccs_data_parse(&bin, ccsdata, data, len, dev, false);
if (rval)
- goto out_free;
+ goto out_cleanup;
if (verbose && ccsdata->version)
print_ccs_data_version(dev, ccsdata->version);
@@ -965,15 +966,17 @@ int ccs_data_parse(struct ccs_data_container *ccsdata, const void *data,
rval = -EPROTO;
dev_dbg(dev, "parsing mismatch; base %p; now %p; end %p\n",
bin.base, bin.now, bin.end);
- goto out_free;
+ goto out_cleanup;
}
ccsdata->backing = bin.base;
return 0;
-out_free:
+out_cleanup:
kvfree(bin.base);
+ memset(ccsdata, 0, sizeof(*ccsdata));
+ dev_warn(dev, "failed to parse CCS static data: %d\n", rval);
return rval;
}
diff --git a/drivers/media/i2c/ds90ub913.c b/drivers/media/i2c/ds90ub913.c
index 79bddfee2e2e..fd2d2d5272bf 100644
--- a/drivers/media/i2c/ds90ub913.c
+++ b/drivers/media/i2c/ds90ub913.c
@@ -8,6 +8,7 @@
* Copyright (c) 2023 Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk-provider.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -146,6 +147,19 @@ static int ub913_write(const struct ub913_data *priv, u8 reg, u8 val)
return ret;
}
+static int ub913_update_bits(const struct ub913_data *priv, u8 reg, u8 mask,
+ u8 val)
+{
+ int ret;
+
+ ret = regmap_update_bits(priv->regmap, reg, mask, val);
+ if (ret < 0)
+ dev_err(&priv->client->dev,
+ "Cannot update register 0x%02x %d!\n", reg, ret);
+
+ return ret;
+}
+
/*
* GPIO chip
*/
@@ -733,10 +747,13 @@ static int ub913_hw_init(struct ub913_data *priv)
if (ret)
return dev_err_probe(dev, ret, "i2c master init failed\n");
- ub913_read(priv, UB913_REG_GENERAL_CFG, &v);
- v &= ~UB913_REG_GENERAL_CFG_PCLK_RISING;
- v |= priv->pclk_polarity_rising ? UB913_REG_GENERAL_CFG_PCLK_RISING : 0;
- ub913_write(priv, UB913_REG_GENERAL_CFG, v);
+ ret = ub913_update_bits(priv, UB913_REG_GENERAL_CFG,
+ UB913_REG_GENERAL_CFG_PCLK_RISING,
+ FIELD_PREP(UB913_REG_GENERAL_CFG_PCLK_RISING,
+ priv->pclk_polarity_rising));
+
+ if (ret)
+ return ret;
return 0;
}
@@ -793,7 +810,6 @@ static void ub913_subdev_uninit(struct ub913_data *priv)
v4l2_async_unregister_subdev(&priv->sd);
ub913_v4l2_nf_unregister(priv);
v4l2_subdev_cleanup(&priv->sd);
- fwnode_handle_put(priv->sd.fwnode);
media_entity_cleanup(&priv->sd.entity);
}
diff --git a/drivers/media/i2c/ds90ub953.c b/drivers/media/i2c/ds90ub953.c
index 725589b3e1c5..46569381b332 100644
--- a/drivers/media/i2c/ds90ub953.c
+++ b/drivers/media/i2c/ds90ub953.c
@@ -65,6 +65,9 @@
#define UB953_REG_GPIO_INPUT_CTRL_OUT_EN(n) BIT(4 + (n))
#define UB953_REG_GPIO_INPUT_CTRL_INPUT_EN(n) BIT(0 + (n))
+#define UB953_REG_BC_CTRL 0x49
+#define UB953_REG_BC_CTRL_CRC_ERR_CLR BIT(3)
+
#define UB953_REG_REV_MASK_ID 0x50
#define UB953_REG_GENERAL_STATUS 0x52
@@ -397,8 +400,13 @@ static int ub953_gpiochip_probe(struct ub953_data *priv)
int ret;
/* Set all GPIOs to local input mode */
- ub953_write(priv, UB953_REG_LOCAL_GPIO_DATA, 0);
- ub953_write(priv, UB953_REG_GPIO_INPUT_CTRL, 0xf);
+ ret = ub953_write(priv, UB953_REG_LOCAL_GPIO_DATA, 0);
+ if (ret)
+ return ret;
+
+ ret = ub953_write(priv, UB953_REG_GPIO_INPUT_CTRL, 0xf);
+ if (ret)
+ return ret;
gc->label = dev_name(dev);
gc->parent = dev;
@@ -618,6 +626,12 @@ static int ub953_log_status(struct v4l2_subdev *sd)
ub953_read(priv, UB953_REG_CRC_ERR_CNT2, &v2);
dev_info(dev, "CRC error count %u\n", v1 | (v2 << 8));
+ /* Clear CRC error counter */
+ if (v1 || v2)
+ regmap_update_bits(priv->regmap, UB953_REG_BC_CTRL,
+ UB953_REG_BC_CTRL_CRC_ERR_CLR,
+ UB953_REG_BC_CTRL_CRC_ERR_CLR);
+
ub953_read(priv, UB953_REG_CSI_ERR_CNT, &v);
dev_info(dev, "CSI error count %u\n", v);
@@ -958,10 +972,11 @@ static void ub953_calc_clkout_params(struct ub953_data *priv,
clkout_data->rate = clkout_rate;
}
-static void ub953_write_clkout_regs(struct ub953_data *priv,
- const struct ub953_clkout_data *clkout_data)
+static int ub953_write_clkout_regs(struct ub953_data *priv,
+ const struct ub953_clkout_data *clkout_data)
{
u8 clkout_ctrl0, clkout_ctrl1;
+ int ret;
if (priv->hw_data->is_ub971)
clkout_ctrl0 = clkout_data->m;
@@ -971,8 +986,15 @@ static void ub953_write_clkout_regs(struct ub953_data *priv,
clkout_ctrl1 = clkout_data->n;
- ub953_write(priv, UB953_REG_CLKOUT_CTRL0, clkout_ctrl0);
- ub953_write(priv, UB953_REG_CLKOUT_CTRL1, clkout_ctrl1);
+ ret = ub953_write(priv, UB953_REG_CLKOUT_CTRL0, clkout_ctrl0);
+ if (ret)
+ return ret;
+
+ ret = ub953_write(priv, UB953_REG_CLKOUT_CTRL1, clkout_ctrl1);
+ if (ret)
+ return ret;
+
+ return 0;
}
static unsigned long ub953_clkout_recalc_rate(struct clk_hw *hw,
@@ -1052,9 +1074,7 @@ static int ub953_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
dev_dbg(&priv->client->dev, "%s %lu (requested %lu)\n", __func__,
clkout_data.rate, rate);
- ub953_write_clkout_regs(priv, &clkout_data);
-
- return 0;
+ return ub953_write_clkout_regs(priv, &clkout_data);
}
static const struct clk_ops ub953_clkout_ops = {
@@ -1079,7 +1099,9 @@ static int ub953_register_clkout(struct ub953_data *priv)
/* Initialize clkout to 25MHz by default */
ub953_calc_clkout_params(priv, UB953_DEFAULT_CLKOUT_RATE, &clkout_data);
- ub953_write_clkout_regs(priv, &clkout_data);
+ ret = ub953_write_clkout_regs(priv, &clkout_data);
+ if (ret)
+ return ret;
priv->clkout_clk_hw.init = &init;
@@ -1226,10 +1248,15 @@ static int ub953_hw_init(struct ub953_data *priv)
if (ret)
return dev_err_probe(dev, ret, "i2c init failed\n");
- ub953_write(priv, UB953_REG_GENERAL_CFG,
- (priv->non_continous_clk ? 0 : UB953_REG_GENERAL_CFG_CONT_CLK) |
- ((priv->num_data_lanes - 1) << UB953_REG_GENERAL_CFG_CSI_LANE_SEL_SHIFT) |
- UB953_REG_GENERAL_CFG_CRC_TX_GEN_ENABLE);
+ v = 0;
+ v |= priv->non_continous_clk ? 0 : UB953_REG_GENERAL_CFG_CONT_CLK;
+ v |= (priv->num_data_lanes - 1) <<
+ UB953_REG_GENERAL_CFG_CSI_LANE_SEL_SHIFT;
+ v |= UB953_REG_GENERAL_CFG_CRC_TX_GEN_ENABLE;
+
+ ret = ub953_write(priv, UB953_REG_GENERAL_CFG, v);
+ if (ret)
+ return ret;
return 0;
}
@@ -1288,7 +1315,6 @@ static void ub953_subdev_uninit(struct ub953_data *priv)
v4l2_async_unregister_subdev(&priv->sd);
ub953_v4l2_notifier_unregister(priv);
v4l2_subdev_cleanup(&priv->sd);
- fwnode_handle_put(priv->sd.fwnode);
media_entity_cleanup(&priv->sd.entity);
}
diff --git a/drivers/media/i2c/ds90ub960.c b/drivers/media/i2c/ds90ub960.c
index 1b1ff7f7505b..5dde8452739b 100644
--- a/drivers/media/i2c/ds90ub960.c
+++ b/drivers/media/i2c/ds90ub960.c
@@ -43,6 +43,7 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/units.h>
#include <linux/workqueue.h>
#include <media/i2c/ds90ub9xx.h>
@@ -51,7 +52,16 @@
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
-#define MHZ(v) ((u32)((v) * 1000000U))
+#define MHZ(v) ((u32)((v) * HZ_PER_MHZ))
+
+/*
+ * If this is defined, the i2c addresses from UB960_DEBUG_I2C_RX_ID to
+ * UB960_DEBUG_I2C_RX_ID + 3 can be used to access the paged RX port registers
+ * directly.
+ *
+ * Only for debug purposes.
+ */
+/* #define UB960_DEBUG_I2C_RX_ID 0x40 */
#define UB960_POLL_TIME_MS 500
@@ -349,12 +359,13 @@
#define UB960_SR_FPD3_RX_ID(n) (0xf0 + (n))
#define UB960_SR_FPD3_RX_ID_LEN 6
-#define UB960_SR_I2C_RX_ID(n) (0xf8 + (n)) /* < UB960_FPD_RX_NPORTS */
+#define UB960_SR_I2C_RX_ID(n) (0xf8 + (n))
+
+#define UB9702_SR_REFCLK_FREQ 0x3d
/* Indirect register blocks */
#define UB960_IND_TARGET_PAT_GEN 0x00
#define UB960_IND_TARGET_RX_ANA(n) (0x01 + (n))
-#define UB960_IND_TARGET_CSI_CSIPLL_REG_1 0x92 /* UB9702 */
#define UB960_IND_TARGET_CSI_ANA 0x07
/* UB960_IR_PGEN_*: Indirect Registers for Test Pattern Generator */
@@ -568,11 +579,23 @@ struct ub960_format_info {
};
static const struct ub960_format_info ub960_formats[] = {
+ { .code = MEDIA_BUS_FMT_RGB888_1X24, .bpp = 24, .datatype = MIPI_CSI2_DT_RGB888, },
+
{ .code = MEDIA_BUS_FMT_YUYV8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
{ .code = MEDIA_BUS_FMT_UYVY8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
{ .code = MEDIA_BUS_FMT_VYUY8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
{ .code = MEDIA_BUS_FMT_YVYU8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
+ { .code = MEDIA_BUS_FMT_SBGGR8_1X8, .bpp = 8, .datatype = MIPI_CSI2_DT_RAW8, },
+ { .code = MEDIA_BUS_FMT_SGBRG8_1X8, .bpp = 8, .datatype = MIPI_CSI2_DT_RAW8, },
+ { .code = MEDIA_BUS_FMT_SGRBG8_1X8, .bpp = 8, .datatype = MIPI_CSI2_DT_RAW8, },
+ { .code = MEDIA_BUS_FMT_SRGGB8_1X8, .bpp = 8, .datatype = MIPI_CSI2_DT_RAW8, },
+
+ { .code = MEDIA_BUS_FMT_SBGGR10_1X10, .bpp = 10, .datatype = MIPI_CSI2_DT_RAW10, },
+ { .code = MEDIA_BUS_FMT_SGBRG10_1X10, .bpp = 10, .datatype = MIPI_CSI2_DT_RAW10, },
+ { .code = MEDIA_BUS_FMT_SGRBG10_1X10, .bpp = 10, .datatype = MIPI_CSI2_DT_RAW10, },
+ { .code = MEDIA_BUS_FMT_SRGGB10_1X10, .bpp = 10, .datatype = MIPI_CSI2_DT_RAW10, },
+
{ .code = MEDIA_BUS_FMT_SBGGR12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
{ .code = MEDIA_BUS_FMT_SGBRG12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
{ .code = MEDIA_BUS_FMT_SGRBG12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
@@ -1552,7 +1575,12 @@ static int ub960_rxport_wait_locks(struct ub960_data *priv,
if (missing == 0)
break;
- msleep(50);
+ /*
+ * The sleep time of 10 ms was found by testing to give a lock
+ * with a few iterations. It can be decreased if on some setups
+ * the lock can be achieved much faster.
+ */
+ fsleep(10 * USEC_PER_MSEC);
}
if (lock_mask)
@@ -1574,16 +1602,24 @@ static int ub960_rxport_wait_locks(struct ub960_data *priv,
ub960_rxport_read16(priv, nport, UB960_RR_RX_FREQ_HIGH, &v);
- ret = ub960_rxport_get_strobe_pos(priv, nport, &strobe_pos);
- if (ret)
- return ret;
+ if (priv->hw_data->is_ub9702) {
+ dev_dbg(dev, "\trx%u: locked, freq %llu Hz\n",
+ nport, ((u64)v * HZ_PER_MHZ) >> 8);
+ } else {
+ ret = ub960_rxport_get_strobe_pos(priv, nport,
+ &strobe_pos);
+ if (ret)
+ return ret;
- ret = ub960_rxport_get_eq_level(priv, nport, &eq_level);
- if (ret)
- return ret;
+ ret = ub960_rxport_get_eq_level(priv, nport, &eq_level);
+ if (ret)
+ return ret;
- dev_dbg(dev, "\trx%u: locked, SP: %d, EQ: %u, freq %llu Hz\n",
- nport, strobe_pos, eq_level, (v * 1000000ULL) >> 8);
+ dev_dbg(dev,
+ "\trx%u: locked, SP: %d, EQ: %u, freq %llu Hz\n",
+ nport, strobe_pos, eq_level,
+ ((u64)v * HZ_PER_MHZ) >> 8);
+ }
}
return 0;
@@ -2412,7 +2448,6 @@ static int ub960_configure_ports_for_streaming(struct ub960_data *priv,
} rx_data[UB960_MAX_RX_NPORTS] = {};
u8 vc_map[UB960_MAX_RX_NPORTS] = {};
struct v4l2_subdev_route *route;
- unsigned int nport;
int ret;
ret = ub960_validate_stream_vcs(priv);
@@ -2482,7 +2517,8 @@ static int ub960_configure_ports_for_streaming(struct ub960_data *priv,
*/
fwd_ctl = GENMASK(7, 4);
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
+ for (unsigned int nport = 0; nport < priv->hw_data->num_rxports;
+ nport++) {
struct ub960_rxport *rxport = priv->rxports[nport];
u8 vc = vc_map[nport];
@@ -2522,7 +2558,7 @@ static int ub960_configure_ports_for_streaming(struct ub960_data *priv,
for (i = 0; i < 8; i++)
ub960_rxport_write(priv, nport,
UB960_RR_VC_ID_MAP(i),
- nport);
+ (nport << 4) | nport);
}
break;
@@ -2939,20 +2975,78 @@ static const struct v4l2_subdev_pad_ops ub960_pad_ops = {
.set_fmt = ub960_set_fmt,
};
+static void ub960_log_status_ub960_sp_eq(struct ub960_data *priv,
+ unsigned int nport)
+{
+ struct device *dev = &priv->client->dev;
+ u8 eq_level;
+ s8 strobe_pos;
+ int ret;
+ u8 v;
+
+ /* Strobe */
+
+ ret = ub960_read(priv, UB960_XR_AEQ_CTL1, &v);
+ if (ret)
+ return;
+
+ dev_info(dev, "\t%s strobe\n",
+ (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) ? "Adaptive" :
+ "Manual");
+
+ if (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) {
+ ret = ub960_read(priv, UB960_XR_SFILTER_CFG, &v);
+ if (ret)
+ return;
+
+ dev_info(dev, "\tStrobe range [%d, %d]\n",
+ ((v >> UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) & 0xf) - 7,
+ ((v >> UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT) & 0xf) - 7);
+ }
+
+ ret = ub960_rxport_get_strobe_pos(priv, nport, &strobe_pos);
+ if (ret)
+ return;
+
+ dev_info(dev, "\tStrobe pos %d\n", strobe_pos);
+
+ /* EQ */
+
+ ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v);
+ if (ret)
+ return;
+
+ dev_info(dev, "\t%s EQ\n",
+ (v & UB960_RR_AEQ_BYPASS_ENABLE) ? "Manual" :
+ "Adaptive");
+
+ if (!(v & UB960_RR_AEQ_BYPASS_ENABLE)) {
+ ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_MIN_MAX, &v);
+ if (ret)
+ return;
+
+ dev_info(dev, "\tEQ range [%u, %u]\n",
+ (v >> UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT) & 0xf,
+ (v >> UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT) & 0xf);
+ }
+
+ if (ub960_rxport_get_eq_level(priv, nport, &eq_level) == 0)
+ dev_info(dev, "\tEQ level %u\n", eq_level);
+}
+
static int ub960_log_status(struct v4l2_subdev *sd)
{
struct ub960_data *priv = sd_to_ub960(sd);
struct device *dev = &priv->client->dev;
struct v4l2_subdev_state *state;
unsigned int nport;
- unsigned int i;
u16 v16 = 0;
u8 v = 0;
u8 id[UB960_SR_FPD3_RX_ID_LEN];
state = v4l2_subdev_lock_and_get_active_state(sd);
- for (i = 0; i < sizeof(id); i++)
+ for (unsigned int i = 0; i < sizeof(id); i++)
ub960_read(priv, UB960_SR_FPD3_RX_ID(i), &id[i]);
dev_info(dev, "ID '%.*s'\n", (int)sizeof(id), id);
@@ -2986,9 +3080,6 @@ static int ub960_log_status(struct v4l2_subdev *sd)
for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
struct ub960_rxport *rxport = priv->rxports[nport];
- u8 eq_level;
- s8 strobe_pos;
- unsigned int i;
dev_info(dev, "RX %u\n", nport);
@@ -3009,7 +3100,7 @@ static int ub960_log_status(struct v4l2_subdev *sd)
dev_info(dev, "\trx_port_sts2 %#02x\n", v);
ub960_rxport_read16(priv, nport, UB960_RR_RX_FREQ_HIGH, &v16);
- dev_info(dev, "\tlink freq %llu Hz\n", (v16 * 1000000ULL) >> 8);
+ dev_info(dev, "\tlink freq %llu Hz\n", ((u64)v16 * HZ_PER_MHZ) >> 8);
ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI, &v16);
dev_info(dev, "\tparity errors %u\n", v16);
@@ -3023,47 +3114,11 @@ static int ub960_log_status(struct v4l2_subdev *sd)
ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER, &v);
dev_info(dev, "\tcsi_err_counter %u\n", v);
- /* Strobe */
-
- ub960_read(priv, UB960_XR_AEQ_CTL1, &v);
-
- dev_info(dev, "\t%s strobe\n",
- (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) ? "Adaptive" :
- "Manual");
-
- if (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) {
- ub960_read(priv, UB960_XR_SFILTER_CFG, &v);
-
- dev_info(dev, "\tStrobe range [%d, %d]\n",
- ((v >> UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) & 0xf) - 7,
- ((v >> UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT) & 0xf) - 7);
- }
-
- ub960_rxport_get_strobe_pos(priv, nport, &strobe_pos);
-
- dev_info(dev, "\tStrobe pos %d\n", strobe_pos);
-
- /* EQ */
-
- ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v);
-
- dev_info(dev, "\t%s EQ\n",
- (v & UB960_RR_AEQ_BYPASS_ENABLE) ? "Manual" :
- "Adaptive");
-
- if (!(v & UB960_RR_AEQ_BYPASS_ENABLE)) {
- ub960_rxport_read(priv, nport, UB960_RR_AEQ_MIN_MAX, &v);
-
- dev_info(dev, "\tEQ range [%u, %u]\n",
- (v >> UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT) & 0xf,
- (v >> UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT) & 0xf);
- }
-
- if (ub960_rxport_get_eq_level(priv, nport, &eq_level) == 0)
- dev_info(dev, "\tEQ level %u\n", eq_level);
+ if (!priv->hw_data->is_ub9702)
+ ub960_log_status_ub960_sp_eq(priv, nport);
/* GPIOs */
- for (i = 0; i < UB960_NUM_BC_GPIOS; i++) {
+ for (unsigned int i = 0; i < UB960_NUM_BC_GPIOS; i++) {
u8 ctl_reg;
u8 ctl_shift;
@@ -3834,13 +3889,16 @@ static int ub960_enable_core_hw(struct ub960_data *priv)
if (ret)
goto err_pd_gpio;
- ret = ub960_read(priv, UB960_XR_REFCLK_FREQ, &refclk_freq);
+ if (priv->hw_data->is_ub9702)
+ ret = ub960_read(priv, UB9702_SR_REFCLK_FREQ, &refclk_freq);
+ else
+ ret = ub960_read(priv, UB960_XR_REFCLK_FREQ, &refclk_freq);
if (ret)
goto err_pd_gpio;
dev_dbg(dev, "refclk valid %u freq %u MHz (clk fw freq %lu MHz)\n",
!!(dev_sts & BIT(4)), refclk_freq,
- clk_get_rate(priv->refclk) / 1000000);
+ clk_get_rate(priv->refclk) / HZ_PER_MHZ);
/* Disable all RX ports by default */
ret = ub960_write(priv, UB960_SR_RX_PORT_CTL, 0);
@@ -3974,6 +4032,12 @@ static int ub960_probe(struct i2c_client *client)
schedule_delayed_work(&priv->poll_work,
msecs_to_jiffies(UB960_POLL_TIME_MS));
+#ifdef UB960_DEBUG_I2C_RX_ID
+ for (unsigned int i = 0; i < priv->hw_data->num_rxports; i++)
+ ub960_write(priv, UB960_SR_I2C_RX_ID(i),
+ (UB960_DEBUG_I2C_RX_ID + i) << 1);
+#endif
+
return 0;
err_free_sers:
diff --git a/drivers/media/i2c/imx208.c b/drivers/media/i2c/imx208.c
index 2184c90f7864..2b5a6ce7b1ae 100644
--- a/drivers/media/i2c/imx208.c
+++ b/drivers/media/i2c/imx208.c
@@ -814,7 +814,7 @@ out_unlock:
}
static ssize_t otp_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client = to_i2c_client(kobj_to_dev(kobj));
diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
index f5ee6bd3b52d..fbf7eba3d71d 100644
--- a/drivers/media/i2c/imx290.c
+++ b/drivers/media/i2c/imx290.c
@@ -170,12 +170,15 @@ enum imx290_model {
IMX290_MODEL_IMX290LQR,
IMX290_MODEL_IMX290LLR,
IMX290_MODEL_IMX327LQR,
+ IMX290_MODEL_IMX462LQR,
+ IMX290_MODEL_IMX462LLR,
};
struct imx290_model_info {
enum imx290_colour_variant colour_variant;
const struct cci_reg_sequence *init_regs;
size_t init_regs_num;
+ unsigned int max_analog_gain;
const char *name;
};
@@ -267,7 +270,6 @@ static const struct cci_reg_sequence imx290_global_init_settings[] = {
{ IMX290_WINWV, 1097 },
{ IMX290_XSOUTSEL, IMX290_XSOUTSEL_XVSOUTSEL_VSYNC |
IMX290_XSOUTSEL_XHSOUTSEL_HSYNC },
- { CCI_REG8(0x3011), 0x02 },
{ CCI_REG8(0x3012), 0x64 },
{ CCI_REG8(0x3013), 0x00 },
};
@@ -275,6 +277,51 @@ static const struct cci_reg_sequence imx290_global_init_settings[] = {
static const struct cci_reg_sequence imx290_global_init_settings_290[] = {
{ CCI_REG8(0x300f), 0x00 },
{ CCI_REG8(0x3010), 0x21 },
+ { CCI_REG8(0x3011), 0x00 },
+ { CCI_REG8(0x3016), 0x09 },
+ { CCI_REG8(0x3070), 0x02 },
+ { CCI_REG8(0x3071), 0x11 },
+ { CCI_REG8(0x309b), 0x10 },
+ { CCI_REG8(0x309c), 0x22 },
+ { CCI_REG8(0x30a2), 0x02 },
+ { CCI_REG8(0x30a6), 0x20 },
+ { CCI_REG8(0x30a8), 0x20 },
+ { CCI_REG8(0x30aa), 0x20 },
+ { CCI_REG8(0x30ac), 0x20 },
+ { CCI_REG8(0x30b0), 0x43 },
+ { CCI_REG8(0x3119), 0x9e },
+ { CCI_REG8(0x311c), 0x1e },
+ { CCI_REG8(0x311e), 0x08 },
+ { CCI_REG8(0x3128), 0x05 },
+ { CCI_REG8(0x313d), 0x83 },
+ { CCI_REG8(0x3150), 0x03 },
+ { CCI_REG8(0x317e), 0x00 },
+ { CCI_REG8(0x32b8), 0x50 },
+ { CCI_REG8(0x32b9), 0x10 },
+ { CCI_REG8(0x32ba), 0x00 },
+ { CCI_REG8(0x32bb), 0x04 },
+ { CCI_REG8(0x32c8), 0x50 },
+ { CCI_REG8(0x32c9), 0x10 },
+ { CCI_REG8(0x32ca), 0x00 },
+ { CCI_REG8(0x32cb), 0x04 },
+ { CCI_REG8(0x332c), 0xd3 },
+ { CCI_REG8(0x332d), 0x10 },
+ { CCI_REG8(0x332e), 0x0d },
+ { CCI_REG8(0x3358), 0x06 },
+ { CCI_REG8(0x3359), 0xe1 },
+ { CCI_REG8(0x335a), 0x11 },
+ { CCI_REG8(0x3360), 0x1e },
+ { CCI_REG8(0x3361), 0x61 },
+ { CCI_REG8(0x3362), 0x10 },
+ { CCI_REG8(0x33b0), 0x50 },
+ { CCI_REG8(0x33b2), 0x1a },
+ { CCI_REG8(0x33b3), 0x04 },
+};
+
+static const struct cci_reg_sequence imx290_global_init_settings_462[] = {
+ { CCI_REG8(0x300f), 0x00 },
+ { CCI_REG8(0x3010), 0x21 },
+ { CCI_REG8(0x3011), 0x02 },
{ CCI_REG8(0x3016), 0x09 },
{ CCI_REG8(0x3070), 0x02 },
{ CCI_REG8(0x3071), 0x11 },
@@ -328,6 +375,7 @@ static const struct cci_reg_sequence xclk_regs[][IMX290_NUM_CLK_REGS] = {
};
static const struct cci_reg_sequence imx290_global_init_settings_327[] = {
+ { CCI_REG8(0x3011), 0x02 },
{ CCI_REG8(0x309e), 0x4A },
{ CCI_REG8(0x309f), 0x4A },
{ CCI_REG8(0x313b), 0x61 },
@@ -876,14 +924,10 @@ static int imx290_ctrl_init(struct imx290 *imx290)
* up to 72.0dB (240) add further digital gain. Limit the range to
* analog gain only, support for digital gain can be added separately
* if needed.
- *
- * The IMX327 and IMX462 are largely compatible with the IMX290, but
- * have an analog gain range of 0.0dB to 29.4dB and 42dB of digital
- * gain. When support for those sensors gets added to the driver, the
- * gain control should be adjusted accordingly.
*/
v4l2_ctrl_new_std(&imx290->ctrls, &imx290_ctrl_ops,
- V4L2_CID_ANALOGUE_GAIN, 0, 100, 1, 0);
+ V4L2_CID_ANALOGUE_GAIN, 0,
+ imx290->model->max_analog_gain, 1, 0);
/*
* Correct range will be determined through imx290_ctrl_update setting
@@ -1441,20 +1485,37 @@ static const struct imx290_model_info imx290_models[] = {
.colour_variant = IMX290_VARIANT_COLOUR,
.init_regs = imx290_global_init_settings_290,
.init_regs_num = ARRAY_SIZE(imx290_global_init_settings_290),
+ .max_analog_gain = 100,
.name = "imx290",
},
[IMX290_MODEL_IMX290LLR] = {
.colour_variant = IMX290_VARIANT_MONO,
.init_regs = imx290_global_init_settings_290,
.init_regs_num = ARRAY_SIZE(imx290_global_init_settings_290),
+ .max_analog_gain = 100,
.name = "imx290",
},
[IMX290_MODEL_IMX327LQR] = {
.colour_variant = IMX290_VARIANT_COLOUR,
.init_regs = imx290_global_init_settings_327,
.init_regs_num = ARRAY_SIZE(imx290_global_init_settings_327),
+ .max_analog_gain = 98,
.name = "imx327",
},
+ [IMX290_MODEL_IMX462LQR] = {
+ .colour_variant = IMX290_VARIANT_COLOUR,
+ .init_regs = imx290_global_init_settings_462,
+ .init_regs_num = ARRAY_SIZE(imx290_global_init_settings_462),
+ .max_analog_gain = 98,
+ .name = "imx462",
+ },
+ [IMX290_MODEL_IMX462LLR] = {
+ .colour_variant = IMX290_VARIANT_MONO,
+ .init_regs = imx290_global_init_settings_462,
+ .init_regs_num = ARRAY_SIZE(imx290_global_init_settings_462),
+ .max_analog_gain = 98,
+ .name = "imx462",
+ },
};
static int imx290_parse_dt(struct imx290 *imx290)
@@ -1653,6 +1714,12 @@ static const struct of_device_id imx290_of_match[] = {
}, {
.compatible = "sony,imx327lqr",
.data = &imx290_models[IMX290_MODEL_IMX327LQR],
+ }, {
+ .compatible = "sony,imx462lqr",
+ .data = &imx290_models[IMX290_MODEL_IMX462LQR],
+ }, {
+ .compatible = "sony,imx462llr",
+ .data = &imx290_models[IMX290_MODEL_IMX462LLR],
},
{ /* sentinel */ },
};
diff --git a/drivers/media/i2c/imx296.c b/drivers/media/i2c/imx296.c
index 83149fa729c4..f3bec16b527c 100644
--- a/drivers/media/i2c/imx296.c
+++ b/drivers/media/i2c/imx296.c
@@ -954,6 +954,8 @@ static int imx296_identify_model(struct imx296 *sensor)
return ret;
}
+ usleep_range(2000, 5000);
+
ret = imx296_read(sensor, IMX296_SENSOR_INFO);
if (ret < 0) {
dev_err(sensor->dev, "failed to read sensor information (%d)\n",
diff --git a/drivers/media/i2c/imx412.c b/drivers/media/i2c/imx412.c
index 0bfe3046fcc8..c74097a59c42 100644
--- a/drivers/media/i2c/imx412.c
+++ b/drivers/media/i2c/imx412.c
@@ -547,7 +547,7 @@ static int imx412_update_exp_gain(struct imx412 *imx412, u32 exposure, u32 gain)
lpfr = imx412->vblank + imx412->cur_mode->height;
- dev_dbg(imx412->dev, "Set exp %u, analog gain %u, lpfr %u",
+ dev_dbg(imx412->dev, "Set exp %u, analog gain %u, lpfr %u\n",
exposure, gain, lpfr);
ret = imx412_write_reg(imx412, IMX412_REG_HOLD, 1, 1);
@@ -594,7 +594,7 @@ static int imx412_set_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_VBLANK:
imx412->vblank = imx412->vblank_ctrl->val;
- dev_dbg(imx412->dev, "Received vblank %u, new lpfr %u",
+ dev_dbg(imx412->dev, "Received vblank %u, new lpfr %u\n",
imx412->vblank,
imx412->vblank + imx412->cur_mode->height);
@@ -613,7 +613,7 @@ static int imx412_set_ctrl(struct v4l2_ctrl *ctrl)
exposure = ctrl->val;
analog_gain = imx412->again_ctrl->val;
- dev_dbg(imx412->dev, "Received exp %u, analog gain %u",
+ dev_dbg(imx412->dev, "Received exp %u, analog gain %u\n",
exposure, analog_gain);
ret = imx412_update_exp_gain(imx412, exposure, analog_gain);
@@ -622,7 +622,7 @@ static int imx412_set_ctrl(struct v4l2_ctrl *ctrl)
break;
default:
- dev_err(imx412->dev, "Invalid control %d", ctrl->id);
+ dev_err(imx412->dev, "Invalid control %d\n", ctrl->id);
ret = -EINVAL;
}
@@ -803,14 +803,14 @@ static int imx412_start_streaming(struct imx412 *imx412)
ret = imx412_write_regs(imx412, reg_list->regs,
reg_list->num_of_regs);
if (ret) {
- dev_err(imx412->dev, "fail to write initial registers");
+ dev_err(imx412->dev, "fail to write initial registers\n");
return ret;
}
/* Setup handler will write actual exposure and gain */
ret = __v4l2_ctrl_handler_setup(imx412->sd.ctrl_handler);
if (ret) {
- dev_err(imx412->dev, "fail to setup handler");
+ dev_err(imx412->dev, "fail to setup handler\n");
return ret;
}
@@ -821,7 +821,7 @@ static int imx412_start_streaming(struct imx412 *imx412)
ret = imx412_write_reg(imx412, IMX412_REG_MODE_SELECT,
1, IMX412_MODE_STREAMING);
if (ret) {
- dev_err(imx412->dev, "fail to start streaming");
+ dev_err(imx412->dev, "fail to start streaming\n");
return ret;
}
@@ -895,7 +895,7 @@ static int imx412_detect(struct imx412 *imx412)
return ret;
if (val != IMX412_ID) {
- dev_err(imx412->dev, "chip id mismatch: %x!=%x",
+ dev_err(imx412->dev, "chip id mismatch: %x!=%x\n",
IMX412_ID, val);
return -ENXIO;
}
@@ -927,7 +927,7 @@ static int imx412_parse_hw_config(struct imx412 *imx412)
imx412->reset_gpio = devm_gpiod_get_optional(imx412->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(imx412->reset_gpio)) {
- dev_err(imx412->dev, "failed to get reset gpio %ld",
+ dev_err(imx412->dev, "failed to get reset gpio %ld\n",
PTR_ERR(imx412->reset_gpio));
return PTR_ERR(imx412->reset_gpio);
}
@@ -935,13 +935,13 @@ static int imx412_parse_hw_config(struct imx412 *imx412)
/* Get sensor input clock */
imx412->inclk = devm_clk_get(imx412->dev, NULL);
if (IS_ERR(imx412->inclk)) {
- dev_err(imx412->dev, "could not get inclk");
+ dev_err(imx412->dev, "could not get inclk\n");
return PTR_ERR(imx412->inclk);
}
rate = clk_get_rate(imx412->inclk);
if (rate != IMX412_INCLK_RATE) {
- dev_err(imx412->dev, "inclk frequency mismatch");
+ dev_err(imx412->dev, "inclk frequency mismatch\n");
return -EINVAL;
}
@@ -966,14 +966,14 @@ static int imx412_parse_hw_config(struct imx412 *imx412)
if (bus_cfg.bus.mipi_csi2.num_data_lanes != IMX412_NUM_DATA_LANES) {
dev_err(imx412->dev,
- "number of CSI2 data lanes %d is not supported",
+ "number of CSI2 data lanes %d is not supported\n",
bus_cfg.bus.mipi_csi2.num_data_lanes);
ret = -EINVAL;
goto done_endpoint_free;
}
if (!bus_cfg.nr_of_link_frequencies) {
- dev_err(imx412->dev, "no link frequencies defined");
+ dev_err(imx412->dev, "no link frequencies defined\n");
ret = -EINVAL;
goto done_endpoint_free;
}
@@ -1034,7 +1034,7 @@ static int imx412_power_on(struct device *dev)
ret = clk_prepare_enable(imx412->inclk);
if (ret) {
- dev_err(imx412->dev, "fail to enable inclk");
+ dev_err(imx412->dev, "fail to enable inclk\n");
goto error_reset;
}
@@ -1145,7 +1145,7 @@ static int imx412_init_controls(struct imx412 *imx412)
imx412->hblank_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
if (ctrl_hdlr->error) {
- dev_err(imx412->dev, "control init failed: %d",
+ dev_err(imx412->dev, "control init failed: %d\n",
ctrl_hdlr->error);
v4l2_ctrl_handler_free(ctrl_hdlr);
return ctrl_hdlr->error;
@@ -1183,7 +1183,7 @@ static int imx412_probe(struct i2c_client *client)
ret = imx412_parse_hw_config(imx412);
if (ret) {
- dev_err(imx412->dev, "HW configuration is not supported");
+ dev_err(imx412->dev, "HW configuration is not supported\n");
return ret;
}
@@ -1191,14 +1191,14 @@ static int imx412_probe(struct i2c_client *client)
ret = imx412_power_on(imx412->dev);
if (ret) {
- dev_err(imx412->dev, "failed to power-on the sensor");
+ dev_err(imx412->dev, "failed to power-on the sensor\n");
goto error_mutex_destroy;
}
/* Check module identity */
ret = imx412_detect(imx412);
if (ret) {
- dev_err(imx412->dev, "failed to find sensor: %d", ret);
+ dev_err(imx412->dev, "failed to find sensor: %d\n", ret);
goto error_power_off;
}
@@ -1208,7 +1208,7 @@ static int imx412_probe(struct i2c_client *client)
ret = imx412_init_controls(imx412);
if (ret) {
- dev_err(imx412->dev, "failed to init controls: %d", ret);
+ dev_err(imx412->dev, "failed to init controls: %d\n", ret);
goto error_power_off;
}
@@ -1222,14 +1222,14 @@ static int imx412_probe(struct i2c_client *client)
imx412->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&imx412->sd.entity, 1, &imx412->pad);
if (ret) {
- dev_err(imx412->dev, "failed to init entity pads: %d", ret);
+ dev_err(imx412->dev, "failed to init entity pads: %d\n", ret);
goto error_handler_free;
}
ret = v4l2_async_register_subdev_sensor(&imx412->sd);
if (ret < 0) {
dev_err(imx412->dev,
- "failed to register async subdev: %d", ret);
+ "failed to register async subdev: %d\n", ret);
goto error_media_entity;
}
diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index c484b753a718..9a5d118b87b0 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -11,6 +11,7 @@
#include <linux/pm_runtime.h>
#include <linux/nvmem-provider.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -76,6 +77,14 @@
/* OTP registers from sensor */
#define OV2740_REG_OTP_CUSTOMER 0x7010
+static const char * const ov2740_supply_name[] = {
+ "AVDD",
+ "DOVDD",
+ "DVDD",
+};
+
+#define OV2740_NUM_SUPPLIES ARRAY_SIZE(ov2740_supply_name)
+
struct nvm_data {
struct nvmem_device *nvmem;
struct regmap *regmap;
@@ -523,9 +532,11 @@ struct ov2740 {
struct v4l2_ctrl *hblank;
struct v4l2_ctrl *exposure;
- /* GPIOs, clocks */
+ /* GPIOs, clocks, regulators */
struct gpio_desc *reset_gpio;
+ struct gpio_desc *powerdown_gpio;
struct clk *clk;
+ struct regulator_bulk_data supplies[OV2740_NUM_SUPPLIES];
/* Current mode */
const struct ov2740_mode *cur_mode;
@@ -644,6 +655,8 @@ static int ov2740_identify_module(struct ov2740 *ov2740)
return -ENXIO;
}
+ dev_dbg(&client->dev, "chip id: %x\n", val);
+
ov2740->identified = true;
return 0;
@@ -753,15 +766,17 @@ static const struct v4l2_ctrl_ops ov2740_ctrl_ops = {
static int ov2740_init_controls(struct ov2740 *ov2740)
{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
struct v4l2_ctrl_handler *ctrl_hdlr;
const struct ov2740_mode *cur_mode;
s64 exposure_max, h_blank, pixel_rate;
u32 vblank_min, vblank_max, vblank_default;
+ struct v4l2_fwnode_device_properties props;
int size;
int ret;
ctrl_hdlr = &ov2740->ctrl_handler;
- ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 10);
if (ret)
return ret;
@@ -811,6 +826,13 @@ static int ov2740_init_controls(struct ov2740 *ov2740)
V4L2_CID_TEST_PATTERN,
ARRAY_SIZE(ov2740_test_pattern_menu) - 1,
0, 0, ov2740_test_pattern_menu);
+
+ ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ if (ret)
+ return ret;
+
+ v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &ov2740_ctrl_ops, &props);
+
if (ctrl_hdlr->error) {
v4l2_ctrl_handler_free(ctrl_hdlr);
return ctrl_hdlr->error;
@@ -1295,7 +1317,9 @@ static int ov2740_suspend(struct device *dev)
struct ov2740 *ov2740 = to_ov2740(sd);
gpiod_set_value_cansleep(ov2740->reset_gpio, 1);
+ gpiod_set_value_cansleep(ov2740->powerdown_gpio, 1);
clk_disable_unprepare(ov2740->clk);
+ regulator_bulk_disable(OV2740_NUM_SUPPLIES, ov2740->supplies);
return 0;
}
@@ -1305,10 +1329,17 @@ static int ov2740_resume(struct device *dev)
struct ov2740 *ov2740 = to_ov2740(sd);
int ret;
- ret = clk_prepare_enable(ov2740->clk);
+ ret = regulator_bulk_enable(OV2740_NUM_SUPPLIES, ov2740->supplies);
if (ret)
return ret;
+ ret = clk_prepare_enable(ov2740->clk);
+ if (ret) {
+ regulator_bulk_disable(OV2740_NUM_SUPPLIES, ov2740->supplies);
+ return ret;
+ }
+
+ gpiod_set_value_cansleep(ov2740->powerdown_gpio, 0);
gpiod_set_value_cansleep(ov2740->reset_gpio, 0);
msleep(20);
@@ -1320,7 +1351,7 @@ static int ov2740_probe(struct i2c_client *client)
struct device *dev = &client->dev;
struct ov2740 *ov2740;
bool full_power;
- int ret;
+ int i, ret;
ov2740 = devm_kzalloc(&client->dev, sizeof(*ov2740), GFP_KERNEL);
if (!ov2740)
@@ -1337,9 +1368,17 @@ static int ov2740_probe(struct i2c_client *client)
if (IS_ERR(ov2740->reset_gpio)) {
return dev_err_probe(dev, PTR_ERR(ov2740->reset_gpio),
"failed to get reset GPIO\n");
- } else if (ov2740->reset_gpio) {
+ }
+
+ ov2740->powerdown_gpio = devm_gpiod_get_optional(dev, "powerdown", GPIOD_OUT_HIGH);
+ if (IS_ERR(ov2740->powerdown_gpio)) {
+ return dev_err_probe(dev, PTR_ERR(ov2740->powerdown_gpio),
+ "failed to get powerdown GPIO\n");
+ }
+
+ if (ov2740->reset_gpio || ov2740->powerdown_gpio) {
/*
- * Ensure reset is asserted for at least 20 ms before
+ * Ensure reset/powerdown is asserted for at least 20 ms before
* ov2740_resume() deasserts it.
*/
msleep(20);
@@ -1350,6 +1389,13 @@ static int ov2740_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(ov2740->clk),
"failed to get clock\n");
+ for (i = 0; i < OV2740_NUM_SUPPLIES; i++)
+ ov2740->supplies[i].supply = ov2740_supply_name[i];
+
+ ret = devm_regulator_bulk_get(dev, OV2740_NUM_SUPPLIES, ov2740->supplies);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
+
full_power = acpi_dev_state_d0(&client->dev);
if (full_power) {
/* ACPI does not always clear the reset GPIO / enable the clock */
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index da5cb5f45a4f..0dae0438aa80 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -1982,6 +1982,7 @@ static int ov5640_get_light_freq(struct ov5640_dev *sensor)
light_freq = 50;
} else {
/* 60Hz */
+ light_freq = 60;
}
}
diff --git a/drivers/media/i2c/ov9282.c b/drivers/media/i2c/ov9282.c
index 9f52af6f047f..87e5d7ce5a47 100644
--- a/drivers/media/i2c/ov9282.c
+++ b/drivers/media/i2c/ov9282.c
@@ -40,7 +40,7 @@
/* Exposure control */
#define OV9282_REG_EXPOSURE 0x3500
#define OV9282_EXPOSURE_MIN 1
-#define OV9282_EXPOSURE_OFFSET 12
+#define OV9282_EXPOSURE_OFFSET 25
#define OV9282_EXPOSURE_STEP 1
#define OV9282_EXPOSURE_DEFAULT 0x0282
diff --git a/drivers/media/pci/b2c2/flexcop-dma.c b/drivers/media/pci/b2c2/flexcop-dma.c
index ff8058568240..2ef97be4dc54 100644
--- a/drivers/media/pci/b2c2/flexcop-dma.c
+++ b/drivers/media/pci/b2c2/flexcop-dma.c
@@ -123,23 +123,6 @@ static int flexcop_dma_remap(struct flexcop_device *fc,
return 0;
}
-int flexcop_dma_control_size_irq(struct flexcop_device *fc,
- flexcop_dma_index_t no,
- int onoff)
-{
- flexcop_ibi_value v = fc->read_ibi_reg(fc, ctrl_208);
-
- if (no & FC_DMA_1)
- v.ctrl_208.DMA1_IRQ_Enable_sig = onoff;
-
- if (no & FC_DMA_2)
- v.ctrl_208.DMA2_IRQ_Enable_sig = onoff;
-
- fc->write_ibi_reg(fc, ctrl_208, v);
- return 0;
-}
-EXPORT_SYMBOL(flexcop_dma_control_size_irq);
-
int flexcop_dma_control_timer_irq(struct flexcop_device *fc,
flexcop_dma_index_t no,
int onoff)
diff --git a/drivers/media/pci/cx18/cx18-gpio.c b/drivers/media/pci/cx18/cx18-gpio.c
index c85eb8d25837..485a6cbeb15a 100644
--- a/drivers/media/pci/cx18/cx18-gpio.c
+++ b/drivers/media/pci/cx18/cx18-gpio.c
@@ -305,21 +305,6 @@ int cx18_gpio_register(struct cx18 *cx, u32 hw)
return v4l2_device_register_subdev(&cx->v4l2_dev, sd);
}
-void cx18_reset_ir_gpio(void *data)
-{
- struct cx18 *cx = to_cx18(data);
-
- if (cx->card->gpio_i2c_slave_reset.ir_reset_mask == 0)
- return;
-
- CX18_DEBUG_INFO("Resetting IR microcontroller\n");
-
- v4l2_subdev_call(&cx->sd_resetctrl,
- core, reset, CX18_GPIO_RESET_Z8F0811);
-}
-EXPORT_SYMBOL(cx18_reset_ir_gpio);
-/* This symbol is exported for use by lirc_pvr150 for the IR-blaster */
-
/* Xceive tuner reset function */
int cx18_reset_tuner_gpio(void *dev, int component, int cmd, int value)
{
diff --git a/drivers/media/pci/cx18/cx18-gpio.h b/drivers/media/pci/cx18/cx18-gpio.h
index 0fa4c7ad2286..8d5797dea7f5 100644
--- a/drivers/media/pci/cx18/cx18-gpio.h
+++ b/drivers/media/pci/cx18/cx18-gpio.h
@@ -17,5 +17,4 @@ enum cx18_gpio_reset_type {
CX18_GPIO_RESET_XC2028 = 2,
};
-void cx18_reset_ir_gpio(void *data);
int cx18_reset_tuner_gpio(void *dev, int component, int cmd, int value);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/drivers/media/pci/intel/ipu6/ipu6-buttress.c
index e898902e83f3..d8db5aa5d528 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-buttress.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-buttress.c
@@ -847,10 +847,10 @@ int ipu6_buttress_init(struct ipu6_device *isp)
INIT_LIST_HEAD(&b->constraints);
isp->secure_mode = ipu6_buttress_get_secure_mode(isp);
- dev_info(&isp->pdev->dev, "IPU6 in %s mode touch 0x%x mask 0x%x\n",
- isp->secure_mode ? "secure" : "non-secure",
- readl(isp->base + BUTTRESS_REG_SECURITY_TOUCH),
- readl(isp->base + BUTTRESS_REG_CAMERA_MASK));
+ dev_dbg(&isp->pdev->dev, "IPU6 in %s mode touch 0x%x mask 0x%x\n",
+ isp->secure_mode ? "secure" : "non-secure",
+ readl(isp->base + BUTTRESS_REG_SECURITY_TOUCH),
+ readl(isp->base + BUTTRESS_REG_CAMERA_MASK));
b->wdt_cached_value = readl(isp->base + BUTTRESS_REG_WDT);
writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-cpd.c b/drivers/media/pci/intel/ipu6/ipu6-cpd.c
index 8b8142bcb2d5..b7013f6524ec 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-cpd.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-cpd.c
@@ -275,7 +275,7 @@ static int ipu6_cpd_validate_moduledata(struct ipu6_device *isp,
return -EINVAL;
}
- dev_info(&isp->pdev->dev, "FW version: %x\n", mod_hdr->fw_pkg_date);
+ dev_dbg(&isp->pdev->dev, "FW version: %x\n", mod_hdr->fw_pkg_date);
ret = ipu6_cpd_validate_cpd(isp, moduledata + mod_hdr->hdr_len,
moduledata_size - mod_hdr->hdr_len,
moduledata_size);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys.c b/drivers/media/pci/intel/ipu6/ipu6-isys.c
index 77f9c7319868..8df1d83a74b5 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys.c
@@ -1133,6 +1133,7 @@ static int isys_probe(struct auxiliary_device *auxdev,
free_fw_msg_bufs:
free_fw_msg_bufs(isys);
out_remove_pkg_dir_shared_buffer:
+ cpu_latency_qos_remove_request(&isys->pm_qos);
if (!isp->secure_mode)
ipu6_cpd_free_pkg_dir(adev);
remove_shared_buffer:
diff --git a/drivers/media/pci/mgb4/mgb4_core.c b/drivers/media/pci/mgb4/mgb4_core.c
index bc63dc81bcae..f90ffc4dad52 100644
--- a/drivers/media/pci/mgb4/mgb4_core.c
+++ b/drivers/media/pci/mgb4/mgb4_core.c
@@ -40,7 +40,9 @@
#include "mgb4_trigger.h"
#include "mgb4_core.h"
-#define MGB4_USER_IRQS 16
+#define MGB4_USER_IRQS 16
+#define MGB4_MGB4_BAR_ID 0
+#define MGB4_XDMA_BAR_ID 1
#define DIGITEQ_VID 0x1ed8
#define T100_DID 0x0101
@@ -123,7 +125,7 @@ static const struct hwmon_chip_info temp_chip_info = {
};
#endif
-static int match_i2c_adap(struct device *dev, void *data)
+static int match_i2c_adap(struct device *dev, const void *data)
{
return i2c_verify_adapter(dev) ? 1 : 0;
}
@@ -139,7 +141,7 @@ static struct i2c_adapter *get_i2c_adap(struct platform_device *pdev)
return dev ? to_i2c_adapter(dev) : NULL;
}
-static int match_spi_adap(struct device *dev, void *data)
+static int match_spi_adap(struct device *dev, const void *data)
{
return to_spi_device(dev) ? 1 : 0;
}
diff --git a/drivers/media/pci/mgb4/mgb4_core.h b/drivers/media/pci/mgb4/mgb4_core.h
index 9aec62514c0b..e86742d7b6c4 100644
--- a/drivers/media/pci/mgb4/mgb4_core.h
+++ b/drivers/media/pci/mgb4/mgb4_core.h
@@ -18,9 +18,6 @@
#define MGB4_VIN_DEVICES 2
#define MGB4_VOUT_DEVICES 2
-#define MGB4_MGB4_BAR_ID 0
-#define MGB4_XDMA_BAR_ID 1
-
#define MGB4_IS_GMSL(mgbdev) \
((mgbdev)->module_version >> 4 == 2)
#define MGB4_IS_FPDL3(mgbdev) \
diff --git a/drivers/media/pci/mgb4/mgb4_sysfs_in.c b/drivers/media/pci/mgb4/mgb4_sysfs_in.c
index 0ba66a2cf145..9626fa59e3d3 100644
--- a/drivers/media/pci/mgb4/mgb4_sysfs_in.c
+++ b/drivers/media/pci/mgb4/mgb4_sysfs_in.c
@@ -333,7 +333,7 @@ static ssize_t hsync_width_show(struct device *dev,
struct video_device *vdev = to_video_device(dev);
struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
u32 sig = mgb4_read_reg(&vindev->mgbdev->video,
- vindev->config->regs.signal);
+ vindev->config->regs.hsync);
return sprintf(buf, "%u\n", (sig & 0x00FF0000) >> 16);
}
@@ -344,7 +344,7 @@ static ssize_t vsync_width_show(struct device *dev,
struct video_device *vdev = to_video_device(dev);
struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
u32 sig = mgb4_read_reg(&vindev->mgbdev->video,
- vindev->config->regs.signal2);
+ vindev->config->regs.vsync);
return sprintf(buf, "%u\n", (sig & 0x00FF0000) >> 16);
}
@@ -355,7 +355,7 @@ static ssize_t hback_porch_show(struct device *dev,
struct video_device *vdev = to_video_device(dev);
struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
u32 sig = mgb4_read_reg(&vindev->mgbdev->video,
- vindev->config->regs.signal);
+ vindev->config->regs.hsync);
return sprintf(buf, "%u\n", (sig & 0x0000FF00) >> 8);
}
@@ -366,7 +366,7 @@ static ssize_t hfront_porch_show(struct device *dev,
struct video_device *vdev = to_video_device(dev);
struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
u32 sig = mgb4_read_reg(&vindev->mgbdev->video,
- vindev->config->regs.signal);
+ vindev->config->regs.hsync);
return sprintf(buf, "%u\n", (sig & 0x000000FF));
}
@@ -377,7 +377,7 @@ static ssize_t vback_porch_show(struct device *dev,
struct video_device *vdev = to_video_device(dev);
struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
u32 sig = mgb4_read_reg(&vindev->mgbdev->video,
- vindev->config->regs.signal2);
+ vindev->config->regs.vsync);
return sprintf(buf, "%u\n", (sig & 0x0000FF00) >> 8);
}
@@ -388,7 +388,7 @@ static ssize_t vfront_porch_show(struct device *dev,
struct video_device *vdev = to_video_device(dev);
struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
u32 sig = mgb4_read_reg(&vindev->mgbdev->video,
- vindev->config->regs.signal2);
+ vindev->config->regs.vsync);
return sprintf(buf, "%u\n", (sig & 0x000000FF));
}
diff --git a/drivers/media/pci/mgb4/mgb4_vin.c b/drivers/media/pci/mgb4/mgb4_vin.c
index 3f171c624b40..434eaf0440e2 100644
--- a/drivers/media/pci/mgb4/mgb4_vin.c
+++ b/drivers/media/pci/mgb4/mgb4_vin.c
@@ -143,8 +143,8 @@ static int get_timings(struct mgb4_vin_dev *vindev,
u32 status = mgb4_read_reg(video, regs->status);
u32 pclk = mgb4_read_reg(video, regs->pclk);
- u32 signal = mgb4_read_reg(video, regs->signal);
- u32 signal2 = mgb4_read_reg(video, regs->signal2);
+ u32 hsync = mgb4_read_reg(video, regs->hsync);
+ u32 vsync = mgb4_read_reg(video, regs->vsync);
u32 resolution = mgb4_read_reg(video, regs->resolution);
if (!(status & (1U << 2)))
@@ -161,12 +161,12 @@ static int get_timings(struct mgb4_vin_dev *vindev,
if (status & (1U << 13))
timings->bt.polarities |= V4L2_DV_VSYNC_POS_POL;
timings->bt.pixelclock = pclk * 1000;
- timings->bt.hsync = (signal & 0x00FF0000) >> 16;
- timings->bt.vsync = (signal2 & 0x00FF0000) >> 16;
- timings->bt.hbackporch = (signal & 0x0000FF00) >> 8;
- timings->bt.hfrontporch = signal & 0x000000FF;
- timings->bt.vbackporch = (signal2 & 0x0000FF00) >> 8;
- timings->bt.vfrontporch = signal2 & 0x000000FF;
+ timings->bt.hsync = (hsync & 0x00FF0000) >> 16;
+ timings->bt.vsync = (vsync & 0x00FF0000) >> 16;
+ timings->bt.hbackporch = (hsync & 0x0000FF00) >> 8;
+ timings->bt.hfrontporch = hsync & 0x000000FF;
+ timings->bt.vbackporch = (vsync & 0x0000FF00) >> 8;
+ timings->bt.vfrontporch = vsync & 0x000000FF;
return 0;
}
@@ -864,9 +864,9 @@ static void create_debugfs(struct mgb4_vin_dev *vindev)
vindev->regs[5].name = "PCLK_FREQUENCY";
vindev->regs[5].offset = vindev->config->regs.pclk;
vindev->regs[6].name = "VIDEO_PARAMS_1";
- vindev->regs[6].offset = vindev->config->regs.signal;
+ vindev->regs[6].offset = vindev->config->regs.hsync;
vindev->regs[7].name = "VIDEO_PARAMS_2";
- vindev->regs[7].offset = vindev->config->regs.signal2;
+ vindev->regs[7].offset = vindev->config->regs.vsync;
vindev->regs[8].name = "PADDING_PIXELS";
vindev->regs[8].offset = vindev->config->regs.padding;
if (has_timeperframe(video)) {
diff --git a/drivers/media/pci/mgb4/mgb4_vin.h b/drivers/media/pci/mgb4/mgb4_vin.h
index 8fd10c0a5554..2a2c829914ce 100644
--- a/drivers/media/pci/mgb4/mgb4_vin.h
+++ b/drivers/media/pci/mgb4/mgb4_vin.h
@@ -22,8 +22,8 @@ struct mgb4_vin_regs {
u32 frame_period;
u32 sync;
u32 pclk;
- u32 signal;
- u32 signal2;
+ u32 hsync;
+ u32 vsync;
u32 padding;
u32 timer;
};
diff --git a/drivers/media/pci/mgb4/mgb4_vout.c b/drivers/media/pci/mgb4/mgb4_vout.c
index 6b2791e29de1..14c5725bd4d8 100644
--- a/drivers/media/pci/mgb4/mgb4_vout.c
+++ b/drivers/media/pci/mgb4/mgb4_vout.c
@@ -24,10 +24,6 @@
#include "mgb4_cmt.h"
#include "mgb4_vout.h"
-#define DEFAULT_WIDTH 1280
-#define DEFAULT_HEIGHT 640
-#define DEFAULT_PERIOD (MGB4_HW_FREQ / 60)
-
ATTRIBUTE_GROUPS(mgb4_fpdl3_out);
ATTRIBUTE_GROUPS(mgb4_gmsl_out);
@@ -180,7 +176,10 @@ static void stop_streaming(struct vb2_queue *vq)
xdma_disable_user_irq(mgbdev->xdev, irq);
cancel_work_sync(&voutdev->dma_work);
+
mgb4_mask_reg(&mgbdev->video, voutdev->config->regs.config, 0x2, 0x0);
+ mgb4_write_reg(&mgbdev->video, voutdev->config->regs.padding, 0);
+
return_all_buffers(voutdev, VB2_BUF_STATE_ERROR);
}
@@ -196,6 +195,7 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
int rv;
u32 addr;
+ mgb4_write_reg(video, config->regs.padding, voutdev->padding);
mgb4_mask_reg(video, config->regs.config, 0x2, 0x2);
addr = mgb4_read_reg(video, config->regs.address);
@@ -359,7 +359,6 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
voutdev->padding = (f->fmt.pix.bytesperline - (f->fmt.pix.width
* pixelsize)) / pixelsize;
- mgb4_write_reg(video, voutdev->config->regs.padding, voutdev->padding);
return 0;
}
@@ -661,11 +660,10 @@ static void fpga_init(struct mgb4_vout_dev *voutdev)
const struct mgb4_vout_regs *regs = &voutdev->config->regs;
mgb4_write_reg(video, regs->config, 0x00000011);
- mgb4_write_reg(video, regs->resolution,
- (DEFAULT_WIDTH << 16) | DEFAULT_HEIGHT);
+ mgb4_write_reg(video, regs->resolution, (1280 << 16) | 640);
mgb4_write_reg(video, regs->hsync, 0x00283232);
mgb4_write_reg(video, regs->vsync, 0x40141F1E);
- mgb4_write_reg(video, regs->frame_limit, DEFAULT_PERIOD);
+ mgb4_write_reg(video, regs->frame_limit, MGB4_HW_FREQ / 60);
mgb4_write_reg(video, regs->padding, 0x00000000);
voutdev->freq = mgb4_cmt_set_vout_freq(voutdev, 61150 >> 1) << 1;
diff --git a/drivers/media/pci/saa7164/saa7164-vbi.c b/drivers/media/pci/saa7164/saa7164-vbi.c
index a6738baab688..ac958a5fca78 100644
--- a/drivers/media/pci/saa7164/saa7164-vbi.c
+++ b/drivers/media/pci/saa7164/saa7164-vbi.c
@@ -77,9 +77,7 @@ static int saa7164_vbi_buffers_alloc(struct saa7164_port *port)
/* TODO: NTSC SPECIFIC */
/* Init and establish defaults */
params->samplesperline = 1440;
- params->numberoflines = 12;
params->numberoflines = 18;
- params->pitch = 1600;
params->pitch = 1440;
params->numpagetables = 2 +
((params->numberoflines * params->pitch) / PAGE_SIZE);
diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
index 1a9e2bccc413..6ec1480a6d18 100644
--- a/drivers/media/pci/solo6x10/solo6x10-core.c
+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
@@ -362,7 +362,7 @@ static ssize_t sdram_offsets_show(struct device *dev,
}
static ssize_t sdram_show(struct file *file, struct kobject *kobj,
- struct bin_attribute *a, char *buf,
+ const struct bin_attribute *a, char *buf,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -432,7 +432,7 @@ static int solo_sysfs_init(struct solo_dev *solo_dev)
sysfs_attr_init(&sdram_attr->attr);
sdram_attr->attr.name = "sdram";
sdram_attr->attr.mode = 0440;
- sdram_attr->read = sdram_show;
+ sdram_attr->read_new = sdram_show;
sdram_attr->size = solo_dev->sdram_size;
if (device_create_bin_file(dev, sdram_attr)) {
diff --git a/drivers/media/platform/broadcom/bcm2835-unicam.c b/drivers/media/platform/broadcom/bcm2835-unicam.c
index 3aed0e493c81..f10064107d54 100644
--- a/drivers/media/platform/broadcom/bcm2835-unicam.c
+++ b/drivers/media/platform/broadcom/bcm2835-unicam.c
@@ -199,6 +199,7 @@ struct unicam_device {
/* subdevice async notifier */
struct v4l2_async_notifier notifier;
unsigned int sequence;
+ bool frame_started;
/* Sensor node */
struct {
@@ -546,7 +547,8 @@ unicam_find_format_by_fourcc(u32 fourcc, u32 pad)
}
for (i = 0; i < num_formats; ++i) {
- if (formats[i].fourcc == fourcc)
+ if (formats[i].fourcc == fourcc ||
+ formats[i].unpacked_fourcc == fourcc)
return &formats[i];
}
@@ -638,7 +640,14 @@ static inline void unicam_reg_write_field(struct unicam_device *unicam, u32 offs
static void unicam_wr_dma_addr(struct unicam_node *node,
struct unicam_buffer *buf)
{
- dma_addr_t endaddr = buf->dma_addr + buf->size;
+ /*
+ * Due to a HW bug causing buffer overruns in circular buffer mode under
+ * certain (not yet fully known) conditions, the dummy buffer allocation
+ * is set to a a single page size, but the hardware gets programmed with
+ * a buffer size of 0.
+ */
+ dma_addr_t endaddr = buf->dma_addr +
+ (buf != &node->dummy_buf ? buf->size : 0);
if (node->id == UNICAM_IMAGE_NODE) {
unicam_reg_write(node->dev, UNICAM_IBSA0, buf->dma_addr);
@@ -742,6 +751,8 @@ static irqreturn_t unicam_isr(int irq, void *dev)
* buffer forever.
*/
if (fe) {
+ bool inc_seq = unicam->frame_started;
+
/*
* Ensure we have swapped buffers already as we can't
* stop the peripheral. If no buffer is available, use a
@@ -761,11 +772,24 @@ static irqreturn_t unicam_isr(int irq, void *dev)
* + FS + LS). In this case, we cannot signal the buffer
* as complete, as the HW will reuse that buffer.
*/
- if (node->cur_frm && node->cur_frm != node->next_frm)
+ if (node->cur_frm && node->cur_frm != node->next_frm) {
unicam_process_buffer_complete(node, sequence);
+ inc_seq = true;
+ }
node->cur_frm = node->next_frm;
}
- unicam->sequence++;
+
+ /*
+ * Increment the sequence number conditionally on either a FS
+ * having already occurred, or in the FE + FS condition as
+ * caught in the FE handler above. This ensures the sequence
+ * number corresponds to the frames generated by the sensor, not
+ * the frames dequeued to userland.
+ */
+ if (inc_seq) {
+ unicam->sequence++;
+ unicam->frame_started = false;
+ }
}
if (ista & UNICAM_FSI) {
@@ -795,6 +819,7 @@ static irqreturn_t unicam_isr(int irq, void *dev)
}
unicam_queue_event_sof(unicam);
+ unicam->frame_started = true;
}
/*
@@ -816,11 +841,6 @@ static irqreturn_t unicam_isr(int irq, void *dev)
}
}
- if (unicam_reg_read(unicam, UNICAM_ICTL) & UNICAM_FCM) {
- /* Switch out of trigger mode if selected */
- unicam_reg_write_field(unicam, UNICAM_ICTL, 1, UNICAM_TFC);
- unicam_reg_write_field(unicam, UNICAM_ICTL, 0, UNICAM_FCM);
- }
return IRQ_HANDLED;
}
@@ -984,8 +1004,7 @@ static void unicam_start_rx(struct unicam_device *unicam,
unicam_reg_write_field(unicam, UNICAM_ANA, 0, UNICAM_DDL);
- /* Always start in trigger frame capture mode (UNICAM_FCM set) */
- val = UNICAM_FSIE | UNICAM_FEIE | UNICAM_FCM | UNICAM_IBOB;
+ val = UNICAM_FSIE | UNICAM_FEIE | UNICAM_IBOB;
line_int_freq = max(fmt->height >> 2, 128);
unicam_set_field(&val, line_int_freq, UNICAM_LCIE_MASK);
unicam_reg_write(unicam, UNICAM_ICTL, val);
@@ -1413,6 +1432,7 @@ static int unicam_sd_enable_streams(struct v4l2_subdev *sd,
if (unicam->pipe.nodes & BIT(UNICAM_METADATA_NODE))
unicam_start_metadata(unicam);
+ unicam->frame_started = false;
unicam_start_rx(unicam, state);
}
diff --git a/drivers/media/platform/marvell/mcam-core.c b/drivers/media/platform/marvell/mcam-core.c
index 9ec01228f907..b8360d37000a 100644
--- a/drivers/media/platform/marvell/mcam-core.c
+++ b/drivers/media/platform/marvell/mcam-core.c
@@ -935,7 +935,12 @@ static int mclk_enable(struct clk_hw *hw)
ret = pm_runtime_resume_and_get(cam->dev);
if (ret < 0)
return ret;
- clk_enable(cam->clk[0]);
+ ret = clk_enable(cam->clk[0]);
+ if (ret) {
+ pm_runtime_put(cam->dev);
+ return ret;
+ }
+
mcam_reg_write(cam, REG_CLKCTRL, (mclk_src << 29) | mclk_div);
mcam_ctlr_power_up(cam);
diff --git a/drivers/media/platform/marvell/mmp-driver.c b/drivers/media/platform/marvell/mmp-driver.c
index 3fd4fc1b9c48..d3da7ebb4a2b 100644
--- a/drivers/media/platform/marvell/mmp-driver.c
+++ b/drivers/media/platform/marvell/mmp-driver.c
@@ -232,12 +232,22 @@ static int mmpcam_probe(struct platform_device *pdev)
mcam_init_clk(mcam);
/*
+ * Register with V4L.
+ */
+
+ ret = v4l2_device_register(mcam->dev, &mcam->v4l2_dev);
+ if (ret)
+ return ret;
+
+ /*
* Create a match of the sensor against its OF node.
*/
ep = fwnode_graph_get_next_endpoint(of_fwnode_handle(pdev->dev.of_node),
NULL);
- if (!ep)
- return -ENODEV;
+ if (!ep) {
+ ret = -ENODEV;
+ goto out_v4l2_device_unregister;
+ }
v4l2_async_nf_init(&mcam->notifier, &mcam->v4l2_dev);
@@ -246,7 +256,7 @@ static int mmpcam_probe(struct platform_device *pdev)
fwnode_handle_put(ep);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
- goto out;
+ goto out_v4l2_device_unregister;
}
/*
@@ -254,7 +264,7 @@ static int mmpcam_probe(struct platform_device *pdev)
*/
ret = mccic_register(mcam);
if (ret)
- goto out;
+ goto out_v4l2_device_unregister;
/*
* Add OF clock provider.
@@ -283,6 +293,8 @@ static int mmpcam_probe(struct platform_device *pdev)
return 0;
out:
mccic_shutdown(mcam);
+out_v4l2_device_unregister:
+ v4l2_device_unregister(&mcam->v4l2_dev);
return ret;
}
@@ -293,6 +305,7 @@ static void mmpcam_remove(struct platform_device *pdev)
struct mcam_camera *mcam = &cam->mcam;
mccic_shutdown(mcam);
+ v4l2_device_unregister(&mcam->v4l2_dev);
pm_runtime_force_suspend(mcam->dev);
}
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
index ea2ea119dd2a..e5ccf673e152 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
@@ -114,19 +114,15 @@ static struct img_config *__get_config_offset(struct mdp_dev *mdp,
if (pp_idx >= mdp->mdp_data->pp_used)
goto err_param;
- if (CFG_CHECK(MT8183, p_id))
+ if (CFG_CHECK(MT8183, p_id)) {
cfg_c = CFG_OFST(MT8183, param->config, pp_idx);
- else if (CFG_CHECK(MT8195, p_id))
- cfg_c = CFG_OFST(MT8195, param->config, pp_idx);
- else
- goto err_param;
-
- if (CFG_CHECK(MT8183, p_id))
cfg_n = CFG_OFST(MT8183, param->config, pp_idx + 1);
- else if (CFG_CHECK(MT8195, p_id))
+ } else if (CFG_CHECK(MT8195, p_id)) {
+ cfg_c = CFG_OFST(MT8195, param->config, pp_idx);
cfg_n = CFG_OFST(MT8195, param->config, pp_idx + 1);
- else
+ } else {
goto err_param;
+ }
if ((long)cfg_n - (long)mdp->vpu.config > bound) {
dev_err(dev, "config offset %ld OOB %ld\n", (long)cfg_n, bound);
@@ -325,8 +321,7 @@ static int mdp_path_config_subfrm(struct mdp_cmdq_cmd *cmd,
/* Enable mux settings */
for (index = 0; index < ctrl->num_sets; index++) {
set = &ctrl->sets[index];
- cmdq_pkt_write_mask(&cmd->pkt, set->subsys_id, set->reg,
- set->value, 0xFFFFFFFF);
+ cmdq_pkt_write(&cmd->pkt, set->subsys_id, set->reg, set->value);
}
/* Config sub-frame information */
for (index = (num_comp - 1); index >= 0; index--) {
@@ -381,8 +376,7 @@ static int mdp_path_config_subfrm(struct mdp_cmdq_cmd *cmd,
/* Disable mux settings */
for (index = 0; index < ctrl->num_sets; index++) {
set = &ctrl->sets[index];
- cmdq_pkt_write_mask(&cmd->pkt, set->subsys_id, set->reg,
- 0, 0xFFFFFFFF);
+ cmdq_pkt_write(&cmd->pkt, set->subsys_id, set->reg, 0);
}
return 0;
@@ -471,43 +465,6 @@ static int mdp_path_config(struct mdp_dev *mdp, struct mdp_cmdq_cmd *cmd,
return 0;
}
-static int mdp_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt,
- size_t size)
-{
- struct device *dev;
- dma_addr_t dma_addr;
-
- pkt->va_base = kzalloc(size, GFP_KERNEL);
- if (!pkt->va_base)
- return -ENOMEM;
-
- pkt->buf_size = size;
- pkt->cl = (void *)client;
-
- dev = client->chan->mbox->dev;
- dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
- kfree(pkt->va_base);
- return -ENOMEM;
- }
-
- pkt->pa_base = dma_addr;
-
- return 0;
-}
-
-static void mdp_cmdq_pkt_destroy(struct cmdq_pkt *pkt)
-{
- struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
-
- dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
- DMA_TO_DEVICE);
- kfree(pkt->va_base);
- pkt->va_base = NULL;
-}
-
static void mdp_auto_release_work(struct work_struct *work)
{
struct mdp_cmdq_cmd *cmd;
@@ -538,7 +495,7 @@ static void mdp_auto_release_work(struct work_struct *work)
wake_up(&mdp->callback_wq);
}
- mdp_cmdq_pkt_destroy(&cmd->pkt);
+ cmdq_pkt_destroy(mdp->cmdq_clt[cmd->pp_idx], &cmd->pkt);
kfree(cmd->comps);
cmd->comps = NULL;
kfree(cmd);
@@ -578,7 +535,7 @@ static void mdp_handle_cmdq_callback(struct mbox_client *cl, void *mssg)
if (refcount_dec_and_test(&mdp->job_count))
wake_up(&mdp->callback_wq);
- mdp_cmdq_pkt_destroy(&cmd->pkt);
+ cmdq_pkt_destroy(mdp->cmdq_clt[cmd->pp_idx], &cmd->pkt);
kfree(cmd->comps);
cmd->comps = NULL;
kfree(cmd);
@@ -607,20 +564,13 @@ static struct mdp_cmdq_cmd *mdp_cmdq_prepare(struct mdp_dev *mdp,
goto err_uninit;
}
- if (CFG_CHECK(MT8183, p_id))
- num_comp = CFG_GET(MT8183, config, num_components);
- else if (CFG_CHECK(MT8195, p_id))
- num_comp = CFG_GET(MT8195, config, num_components);
- else
- goto err_uninit;
-
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto err_uninit;
}
- ret = mdp_cmdq_pkt_create(mdp->cmdq_clt[pp_idx], &cmd->pkt, SZ_16K);
+ ret = cmdq_pkt_create(mdp->cmdq_clt[pp_idx], &cmd->pkt, SZ_16K);
if (ret)
goto err_free_cmd;
@@ -632,6 +582,7 @@ static struct mdp_cmdq_cmd *mdp_cmdq_prepare(struct mdp_dev *mdp,
ret = -EINVAL;
goto err_destroy_pkt;
}
+
comps = kcalloc(num_comp, sizeof(*comps), GFP_KERNEL);
if (!comps) {
ret = -ENOMEM;
@@ -676,7 +627,8 @@ static struct mdp_cmdq_cmd *mdp_cmdq_prepare(struct mdp_dev *mdp,
dev_err(dev, "mdp_path_config error %d\n", pp_idx);
goto err_free_path;
}
- cmdq_pkt_finalize(&cmd->pkt);
+ cmdq_pkt_eoc(&cmd->pkt);
+ cmdq_pkt_jump_rel(&cmd->pkt, CMDQ_INST_SIZE, mdp->cmdq_shift_pa[pp_idx]);
for (i = 0; i < num_comp; i++) {
s32 inner_id = MDP_COMP_NONE;
@@ -699,6 +651,7 @@ static struct mdp_cmdq_cmd *mdp_cmdq_prepare(struct mdp_dev *mdp,
cmd->comps = comps;
cmd->num_comps = num_comp;
cmd->mdp_ctx = param->mdp_ctx;
+ cmd->pp_idx = pp_idx;
kfree(path);
return cmd;
@@ -710,7 +663,7 @@ err_free_path:
err_free_comps:
kfree(comps);
err_destroy_pkt:
- mdp_cmdq_pkt_destroy(&cmd->pkt);
+ cmdq_pkt_destroy(mdp->cmdq_clt[pp_idx], &cmd->pkt);
err_free_cmd:
kfree(cmd);
err_uninit:
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h
index 53a30ad7e0b0..935ae9825728 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h
@@ -35,6 +35,7 @@ struct mdp_cmdq_cmd {
struct mdp_comp *comps;
void *mdp_ctx;
u8 num_comps;
+ u8 pp_idx;
};
struct mdp_dev;
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
index 8f62fb167156..683c066ed975 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
@@ -72,14 +72,14 @@ static int init_rdma(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
/* Disable RSZ1 */
if (ctx->comp->inner_id == rdma0 && prz1)
- MM_REG_WRITE(cmd, subsys_id, prz1->reg_base, PRZ_ENABLE,
- 0x0, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, prz1->reg_base,
+ PRZ_ENABLE, 0x0, BIT(0));
}
/* Reset RDMA */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_RESET, BIT(0), BIT(0));
- MM_REG_POLL(cmd, subsys_id, base, MDP_RDMA_MON_STA_1, BIT(8), BIT(8));
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_RESET, 0x0, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_RESET, BIT(0), BIT(0));
+ MM_REG_POLL_MASK(cmd, subsys_id, base, MDP_RDMA_MON_STA_1, BIT(8), BIT(8));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_RESET, 0x0, BIT(0));
return 0;
}
@@ -98,26 +98,25 @@ static int config_rdma_frame(struct mdp_comp_ctx *ctx,
if (mdp_cfg && mdp_cfg->rdma_support_10bit) {
if (block10bit)
- MM_REG_WRITE(cmd, subsys_id, base,
- MDP_RDMA_RESV_DUMMY_0, 0x7, 0x7);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base,
+ MDP_RDMA_RESV_DUMMY_0, 0x7, 0x7);
else
- MM_REG_WRITE(cmd, subsys_id, base,
- MDP_RDMA_RESV_DUMMY_0, 0x0, 0x7);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base,
+ MDP_RDMA_RESV_DUMMY_0, 0x0, 0x7);
}
/* Setup smi control */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_GMCIF_CON,
- (7 << 4) + //burst type to 8
- (1 << 16), //enable pre-ultra
- 0x00030071);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_GMCIF_CON,
+ (7 << 4) + //burst type to 8
+ (1 << 16), //enable pre-ultra
+ 0x00030071);
/* Setup source frame info */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.src_ctrl);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.src_ctrl);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_CON, reg,
- 0x03C8FE0F);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_SRC_CON, reg, 0x03C8FE0F);
if (mdp_cfg)
if (mdp_cfg->rdma_support_10bit && en_ufo) {
@@ -126,17 +125,15 @@ static int config_rdma_frame(struct mdp_comp_ctx *ctx,
reg = CFG_COMP(MT8183, ctx->param, rdma.ufo_dec_y);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.ufo_dec_y);
- MM_REG_WRITE(cmd, subsys_id,
- base, MDP_RDMA_UFO_DEC_LENGTH_BASE_Y,
- reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base,
+ MDP_RDMA_UFO_DEC_LENGTH_BASE_Y, reg);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.ufo_dec_c);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.ufo_dec_c);
- MM_REG_WRITE(cmd, subsys_id,
- base, MDP_RDMA_UFO_DEC_LENGTH_BASE_C,
- reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base,
+ MDP_RDMA_UFO_DEC_LENGTH_BASE_C, reg);
/* Set 10bit source frame pitch */
if (block10bit) {
@@ -144,9 +141,9 @@ static int config_rdma_frame(struct mdp_comp_ctx *ctx,
reg = CFG_COMP(MT8183, ctx->param, rdma.mf_bkgd_in_pxl);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.mf_bkgd_in_pxl);
- MM_REG_WRITE(cmd, subsys_id,
- base, MDP_RDMA_MF_BKGD_SIZE_IN_PXL,
- reg, 0x001FFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base,
+ MDP_RDMA_MF_BKGD_SIZE_IN_PXL,
+ reg, 0x001FFFFF);
}
}
@@ -157,128 +154,121 @@ static int config_rdma_frame(struct mdp_comp_ctx *ctx,
reg = CFG_COMP(MT8195, ctx->param, rdma.control);
rdma_con_mask = 0x1130;
}
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_CON, reg,
- rdma_con_mask);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_CON, reg, rdma_con_mask);
/* Setup source buffer base */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.iova[0]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.iova[0]);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_0, reg,
- 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_0, reg);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.iova[1]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.iova[1]);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_1, reg,
- 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_1, reg);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.iova[2]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.iova[2]);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_2, reg,
- 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_2, reg);
/* Setup source buffer end */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.iova_end[0]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.iova_end[0]);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_0,
- reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_0, reg);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.iova_end[1]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.iova_end[1]);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_1,
- reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_1, reg);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.iova_end[2]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.iova_end[2]);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_2,
- reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_2, reg);
/* Setup source frame pitch */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.mf_bkgd);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.mf_bkgd);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_BKGD_SIZE_IN_BYTE,
- reg, 0x001FFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_MF_BKGD_SIZE_IN_BYTE,
+ reg, 0x001FFFFF);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.sf_bkgd);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.sf_bkgd);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SF_BKGD_SIZE_IN_BYTE,
- reg, 0x001FFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_SF_BKGD_SIZE_IN_BYTE,
+ reg, 0x001FFFFF);
/* Setup color transform */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.transform);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.transform);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_TRANSFORM_0,
- reg, 0x0F110000);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_TRANSFORM_0,
+ reg, 0x0F110000);
if (!mdp_cfg || !mdp_cfg->rdma_esl_setting)
goto rdma_config_done;
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.dmabuf_con0);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_0,
- reg, 0x0FFF00FF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_0,
+ reg, 0x0FFF00FF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_high_con0);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_HIGH_CON_0,
- reg, 0x3FFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_HIGH_CON_0,
+ reg, 0x3FFFFFFF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_low_con0);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_LOW_CON_0,
- reg, 0x3FFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_LOW_CON_0,
+ reg, 0x3FFFFFFF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.dmabuf_con1);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_1,
- reg, 0x0F7F007F);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_1,
+ reg, 0x0F7F007F);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_high_con1);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_HIGH_CON_1,
- reg, 0x3FFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_HIGH_CON_1,
+ reg, 0x3FFFFFFF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_low_con1);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_LOW_CON_1,
- reg, 0x3FFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_LOW_CON_1,
+ reg, 0x3FFFFFFF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.dmabuf_con2);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_2,
- reg, 0x0F3F003F);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_2,
+ reg, 0x0F3F003F);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_high_con2);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_HIGH_CON_2,
- reg, 0x3FFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_HIGH_CON_2,
+ reg, 0x3FFFFFFF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_low_con2);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_LOW_CON_2,
- reg, 0x3FFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_LOW_CON_2,
+ reg, 0x3FFFFFFF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.dmabuf_con3);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_3,
- reg, 0x0F3F003F);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_3,
+ reg, 0x0F3F003F);
rdma_config_done:
return 0;
@@ -297,15 +287,14 @@ static int config_rdma_subfrm(struct mdp_comp_ctx *ctx,
u32 reg = 0;
/* Enable RDMA */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_EN, BIT(0), BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_EN, BIT(0), BIT(0));
/* Set Y pixel offset */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset[0]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].offset[0]);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_0,
- reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_0, reg);
/* Set 10bit UFO mode */
if (mdp_cfg) {
@@ -315,8 +304,7 @@ static int config_rdma_subfrm(struct mdp_comp_ctx *ctx,
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].offset_0_p);
MM_REG_WRITE(cmd, subsys_id, base,
- MDP_RDMA_SRC_OFFSET_0_P,
- reg, 0xFFFFFFFF);
+ MDP_RDMA_SRC_OFFSET_0_P, reg);
}
}
@@ -325,40 +313,38 @@ static int config_rdma_subfrm(struct mdp_comp_ctx *ctx,
reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset[1]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].offset[1]);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_1,
- reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_1, reg);
/* Set V pixel offset */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset[2]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].offset[2]);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_2,
- reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_2, reg);
/* Set source size */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].src);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].src);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_SRC_SIZE, reg,
- 0x1FFF1FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_MF_SRC_SIZE, reg,
+ 0x1FFF1FFF);
/* Set target size */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].clip);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].clip);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_CLIP_SIZE,
- reg, 0x1FFF1FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_MF_CLIP_SIZE,
+ reg, 0x1FFF1FFF);
/* Set crop offset */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].clip_ofst);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].clip_ofst);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_OFFSET_1,
- reg, 0x003F001F);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_MF_OFFSET_1,
+ reg, 0x003F001F);
if (CFG_CHECK(MT8183, p_id)) {
csf_l = CFG_COMP(MT8183, ctx->param, subfrms[index].in.left);
@@ -369,8 +355,8 @@ static int config_rdma_subfrm(struct mdp_comp_ctx *ctx,
}
if (mdp_cfg && mdp_cfg->rdma_upsample_repeat_only)
if ((csf_r - csf_l + 1) > 320)
- MM_REG_WRITE(cmd, subsys_id, base,
- MDP_RDMA_RESV_DUMMY_0, BIT(2), BIT(2));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base,
+ MDP_RDMA_RESV_DUMMY_0, BIT(2), BIT(2));
return 0;
}
@@ -393,7 +379,7 @@ static int wait_rdma_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
/* Disable RDMA */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_EN, 0x0, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_EN, 0x0, BIT(0));
return 0;
}
@@ -411,10 +397,10 @@ static int init_rsz(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
u8 subsys_id = ctx->comp->subsys_id;
/* Reset RSZ */
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x10000, BIT(16));
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x0, BIT(16));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_ENABLE, 0x10000, BIT(16));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_ENABLE, 0x0, BIT(16));
/* Enable RSZ */
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, BIT(0), BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_ENABLE, BIT(0), BIT(0));
if (CFG_CHECK(MT8195, p_id)) {
struct device *dev;
@@ -437,7 +423,7 @@ static int config_rsz_frame(struct mdp_comp_ctx *ctx,
u32 reg = 0;
if (mdp_cfg && mdp_cfg->rsz_etc_control)
- MM_REG_WRITE(cmd, subsys_id, base, RSZ_ETC_CONTROL, 0x0, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, RSZ_ETC_CONTROL, 0x0);
if (CFG_CHECK(MT8183, p_id))
bypass = CFG_COMP(MT8183, ctx->param, frame.bypass);
@@ -446,7 +432,7 @@ static int config_rsz_frame(struct mdp_comp_ctx *ctx,
if (bypass) {
/* Disable RSZ */
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x0, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_ENABLE, 0x0, BIT(0));
return 0;
}
@@ -454,29 +440,27 @@ static int config_rsz_frame(struct mdp_comp_ctx *ctx,
reg = CFG_COMP(MT8183, ctx->param, rsz.control1);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rsz.control1);
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1, reg,
- 0x03FFFDF3);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_CONTROL_1, reg, 0x03FFFDF3);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rsz.control2);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rsz.control2);
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_2, reg,
- 0x0FFFC290);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_CONTROL_2, reg, 0x0FFFC290);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rsz.coeff_step_x);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rsz.coeff_step_x);
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_HORIZONTAL_COEFF_STEP,
- reg, 0x007FFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_HORIZONTAL_COEFF_STEP, reg,
+ 0x007FFFFF);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rsz.coeff_step_y);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rsz.coeff_step_y);
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_VERTICAL_COEFF_STEP,
- reg, 0x007FFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_VERTICAL_COEFF_STEP, reg,
+ 0x007FFFFF);
return 0;
}
@@ -495,15 +479,13 @@ static int config_rsz_subfrm(struct mdp_comp_ctx *ctx,
reg = CFG_COMP(MT8183, ctx->param, rsz.subfrms[index].control2);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rsz.subfrms[index].control2);
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_2, reg,
- 0x00003800);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_CONTROL_2, reg, 0x00003800);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rsz.subfrms[index].src);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rsz.subfrms[index].src);
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_INPUT_IMAGE, reg,
- 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_INPUT_IMAGE, reg);
if (CFG_CHECK(MT8183, p_id)) {
csf_l = CFG_COMP(MT8183, ctx->param, subfrms[index].in.left);
@@ -514,60 +496,56 @@ static int config_rsz_subfrm(struct mdp_comp_ctx *ctx,
}
if (mdp_cfg && mdp_cfg->rsz_disable_dcm_small_sample)
if ((csf_r - csf_l + 1) <= 16)
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1,
- BIT(27), BIT(27));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_CONTROL_1,
+ BIT(27), BIT(27));
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.left);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, subfrms[index].luma.left);
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_HORIZONTAL_INTEGER_OFFSET,
- reg, 0xFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_LUMA_HORIZONTAL_INTEGER_OFFSET,
+ reg, 0xFFFF);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.left_subpix);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, subfrms[index].luma.left_subpix);
- MM_REG_WRITE(cmd, subsys_id,
- base, PRZ_LUMA_HORIZONTAL_SUBPIXEL_OFFSET,
- reg, 0x1FFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_LUMA_HORIZONTAL_SUBPIXEL_OFFSET,
+ reg, 0x1FFFFF);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.top);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, subfrms[index].luma.top);
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_INTEGER_OFFSET,
- reg, 0xFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_INTEGER_OFFSET,
+ reg, 0xFFFF);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.top_subpix);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, subfrms[index].luma.top_subpix);
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_SUBPIXEL_OFFSET,
- reg, 0x1FFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_SUBPIXEL_OFFSET,
+ reg, 0x1FFFFF);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, subfrms[index].chroma.left);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, subfrms[index].chroma.left);
- MM_REG_WRITE(cmd, subsys_id,
- base, PRZ_CHROMA_HORIZONTAL_INTEGER_OFFSET,
- reg, 0xFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_CHROMA_HORIZONTAL_INTEGER_OFFSET,
+ reg, 0xFFFF);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, subfrms[index].chroma.left_subpix);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, subfrms[index].chroma.left_subpix);
- MM_REG_WRITE(cmd, subsys_id,
- base, PRZ_CHROMA_HORIZONTAL_SUBPIXEL_OFFSET,
- reg, 0x1FFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_CHROMA_HORIZONTAL_SUBPIXEL_OFFSET,
+ reg, 0x1FFFFF);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, rsz.subfrms[index].clip);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rsz.subfrms[index].clip);
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_OUTPUT_IMAGE, reg,
- 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_OUTPUT_IMAGE, reg);
if (CFG_CHECK(MT8195, p_id)) {
struct device *dev;
@@ -596,19 +574,19 @@ static int config_rsz_subfrm(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, rsz.subfrms[index].merge_cfg);
MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
- MDP_MERGE_CFG_0, reg, 0xFFFFFFFF);
+ MDP_MERGE_CFG_0, reg);
MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
- MDP_MERGE_CFG_4, reg, 0xFFFFFFFF);
+ MDP_MERGE_CFG_4, reg);
MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
- MDP_MERGE_CFG_24, reg, 0xFFFFFFFF);
+ MDP_MERGE_CFG_24, reg);
MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
- MDP_MERGE_CFG_25, reg, 0xFFFFFFFF);
+ MDP_MERGE_CFG_25, reg);
/* Bypass mode */
MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
- MDP_MERGE_CFG_12, BIT(0), 0xFFFFFFFF);
+ MDP_MERGE_CFG_12, BIT(0));
MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
- MDP_MERGE_ENABLE, BIT(0), 0xFFFFFFFF);
+ MDP_MERGE_ENABLE, BIT(0));
}
rsz_subfrm_done:
@@ -634,8 +612,8 @@ static int advance_rsz_subfrm(struct mdp_comp_ctx *ctx,
}
if ((csf_r - csf_l + 1) <= 16)
- MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1, 0x0,
- BIT(27));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_CONTROL_1, 0x0,
+ BIT(27));
}
return 0;
@@ -655,15 +633,15 @@ static int init_wrot(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
u8 subsys_id = ctx->comp->subsys_id;
/* Reset WROT */
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_SOFT_RST, BIT(0), BIT(0));
- MM_REG_POLL(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, BIT(0), BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_SOFT_RST, BIT(0), BIT(0));
+ MM_REG_POLL_MASK(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, BIT(0), BIT(0));
/* Reset setting */
if (CFG_CHECK(MT8195, p_id))
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_CTRL, 0x0, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_CTRL, 0x0);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_SOFT_RST, 0x0, BIT(0));
- MM_REG_POLL(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, 0x0, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_SOFT_RST, 0x0, BIT(0));
+ MM_REG_POLL_MASK(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, 0x0, BIT(0));
return 0;
}
@@ -681,39 +659,36 @@ static int config_wrot_frame(struct mdp_comp_ctx *ctx,
reg = CFG_COMP(MT8183, ctx->param, wrot.iova[0]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.iova[0]);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR, reg,
- 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR, reg);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.iova[1]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.iova[1]);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_C, reg,
- 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_C, reg);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.iova[2]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.iova[2]);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_V, reg,
- 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_V, reg);
if (mdp_cfg && mdp_cfg->wrot_support_10bit) {
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.scan_10bit);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_SCAN_10BIT,
- reg, 0x0000000F);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_SCAN_10BIT,
+ reg, 0x0000000F);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.pending_zero);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_PENDING_ZERO,
- reg, 0x04000000);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_PENDING_ZERO,
+ reg, 0x04000000);
}
if (CFG_CHECK(MT8195, p_id)) {
reg = CFG_COMP(MT8195, ctx->param, wrot.bit_number);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_CTRL_2,
- reg, 0x00000007);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_CTRL_2,
+ reg, 0x00000007);
}
/* Write frame related registers */
@@ -721,14 +696,13 @@ static int config_wrot_frame(struct mdp_comp_ctx *ctx,
reg = CFG_COMP(MT8183, ctx->param, wrot.control);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.control);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_CTRL, reg,
- 0xF131510F);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_CTRL, reg, 0xF131510F);
/* Write pre-ultra threshold */
if (CFG_CHECK(MT8195, p_id)) {
reg = CFG_COMP(MT8195, ctx->param, wrot.pre_ultra);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_DMA_PREULTRA, reg,
- 0x00FFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_DMA_PREULTRA, reg,
+ 0x00FFFFFF);
}
/* Write frame Y pitch */
@@ -736,37 +710,34 @@ static int config_wrot_frame(struct mdp_comp_ctx *ctx,
reg = CFG_COMP(MT8183, ctx->param, wrot.stride[0]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.stride[0]);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE, reg,
- 0x0000FFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_STRIDE, reg, 0x0000FFFF);
/* Write frame UV pitch */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.stride[1]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.stride[1]);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE_C, reg,
- 0xFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_STRIDE_C, reg, 0xFFFF);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.stride[2]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.stride[2]);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE_V, reg,
- 0xFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_STRIDE_V, reg, 0xFFFF);
/* Write matrix control */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.mat_ctrl);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.mat_ctrl);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAT_CTRL, reg, 0xF3);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_MAT_CTRL, reg, 0xF3);
/* Set the fixed ALPHA as 0xFF */
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_DITHER, 0xFF000000,
- 0xFF000000);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_DITHER, 0xFF000000,
+ 0xFF000000);
/* Set VIDO_EOL_SEL */
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_RSV_1, BIT(31), BIT(31));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_RSV_1, BIT(31), BIT(31));
/* Set VIDO_FIFO_TEST */
if (CFG_CHECK(MT8183, p_id))
@@ -775,8 +746,8 @@ static int config_wrot_frame(struct mdp_comp_ctx *ctx,
reg = CFG_COMP(MT8195, ctx->param, wrot.fifo_test);
if (reg != 0)
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_FIFO_TEST,
- reg, 0xFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_FIFO_TEST, reg,
+ 0xFFF);
/* Filter enable */
if (mdp_cfg && mdp_cfg->wrot_filter_constraint) {
@@ -784,13 +755,13 @@ static int config_wrot_frame(struct mdp_comp_ctx *ctx,
reg = CFG_COMP(MT8183, ctx->param, wrot.filter);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.filter);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE,
- reg, 0x77);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE, reg,
+ 0x77);
/* Turn off WROT DMA DCM */
if (CFG_CHECK(MT8195, p_id))
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_ROT_EN,
- (0x1 << 23) + (0x1 << 20), 0x900000);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_ROT_EN,
+ (0x1 << 23) + (0x1 << 20), 0x900000);
}
return 0;
@@ -808,57 +779,52 @@ static int config_wrot_subfrm(struct mdp_comp_ctx *ctx,
reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].offset[0]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].offset[0]);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR,
- reg, 0x0FFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_OFST_ADDR, reg, 0x0FFFFFFF);
/* Write U pixel offset */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].offset[1]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].offset[1]);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR_C,
- reg, 0x0FFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_OFST_ADDR_C, reg, 0x0FFFFFFF);
/* Write V pixel offset */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].offset[2]);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].offset[2]);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR_V,
- reg, 0x0FFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_OFST_ADDR_V, reg,
+ 0x0FFFFFFF);
/* Write source size */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].src);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].src);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_IN_SIZE, reg,
- 0x1FFF1FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_IN_SIZE, reg, 0x1FFF1FFF);
/* Write target size */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].clip);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].clip);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_TAR_SIZE, reg,
- 0x1FFF1FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_TAR_SIZE, reg, 0x1FFF1FFF);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].clip_ofst);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].clip_ofst);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_CROP_OFST, reg,
- 0x1FFF1FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_CROP_OFST, reg, 0x1FFF1FFF);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].main_buf);
else if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].main_buf);
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE,
- reg, 0x1FFF7F00);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE, reg,
+ 0x1FFF7F00);
/* Enable WROT */
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_ROT_EN, BIT(0), BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_ROT_EN, BIT(0), BIT(0));
return 0;
}
@@ -881,11 +847,11 @@ static int wait_wrot_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
if (mdp_cfg && mdp_cfg->wrot_filter_constraint)
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE, 0x0,
- 0x77);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE, 0x0,
+ 0x77);
/* Disable WROT */
- MM_REG_WRITE(cmd, subsys_id, base, VIDO_ROT_EN, 0x0, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_ROT_EN, 0x0, BIT(0));
return 0;
}
@@ -904,9 +870,9 @@ static int init_wdma(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
u8 subsys_id = ctx->comp->subsys_id;
/* Reset WDMA */
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_RST, BIT(0), BIT(0));
- MM_REG_POLL(cmd, subsys_id, base, WDMA_FLOW_CTRL_DBG, BIT(0), BIT(0));
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_RST, 0x0, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_RST, BIT(0), BIT(0));
+ MM_REG_POLL_MASK(cmd, subsys_id, base, WDMA_FLOW_CTRL_DBG, BIT(0), BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_RST, 0x0, BIT(0));
return 0;
}
@@ -918,40 +884,35 @@ static int config_wdma_frame(struct mdp_comp_ctx *ctx,
u8 subsys_id = ctx->comp->subsys_id;
u32 reg = 0;
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_BUF_CON2, 0x10101050,
- 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_BUF_CON2, 0x10101050);
/* Setup frame information */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wdma.wdma_cfg);
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_CFG, reg,
- 0x0F01B8F0);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_CFG, reg, 0x0F01B8F0);
/* Setup frame base address */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wdma.iova[0]);
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_ADDR, reg,
- 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_ADDR, reg);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wdma.iova[1]);
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_U_ADDR, reg,
- 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_U_ADDR, reg);
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wdma.iova[2]);
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_V_ADDR, reg,
- 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_V_ADDR, reg);
/* Setup Y pitch */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wdma.w_in_byte);
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_W_IN_BYTE,
- reg, 0x0000FFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_DST_W_IN_BYTE, reg,
+ 0x0000FFFF);
/* Setup UV pitch */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wdma.uv_stride);
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_UV_PITCH,
- reg, 0x0000FFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_DST_UV_PITCH, reg,
+ 0x0000FFFF);
/* Set the fixed ALPHA as 0xFF */
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_ALPHA, 0x800000FF,
- 0x800000FF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_ALPHA, 0x800000FF,
+ 0x800000FF);
return 0;
}
@@ -966,36 +927,33 @@ static int config_wdma_subfrm(struct mdp_comp_ctx *ctx,
/* Write Y pixel offset */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].offset[0]);
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_ADDR_OFFSET,
- reg, 0x0FFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_DST_ADDR_OFFSET, reg,
+ 0x0FFFFFFF);
/* Write U pixel offset */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].offset[1]);
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_U_ADDR_OFFSET,
- reg, 0x0FFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_DST_U_ADDR_OFFSET, reg,
+ 0x0FFFFFFF);
/* Write V pixel offset */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].offset[2]);
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_V_ADDR_OFFSET,
- reg, 0x0FFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_DST_V_ADDR_OFFSET, reg,
+ 0x0FFFFFFF);
/* Write source size */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].src);
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_SRC_SIZE, reg,
- 0x3FFF3FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_SRC_SIZE, reg, 0x3FFF3FFF);
/* Write target size */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].clip);
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_CLIP_SIZE, reg,
- 0x3FFF3FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_CLIP_SIZE, reg, 0x3FFF3FFF);
/* Write clip offset */
if (CFG_CHECK(MT8183, p_id))
reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].clip_ofst);
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_CLIP_COORD, reg,
- 0x3FFF3FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_CLIP_COORD, reg, 0x3FFF3FFF);
/* Enable WDMA */
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_EN, BIT(0), BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_EN, BIT(0), BIT(0));
return 0;
}
@@ -1007,7 +965,7 @@ static int wait_wdma_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
/* Disable WDMA */
- MM_REG_WRITE(cmd, subsys_id, base, WDMA_EN, 0x0, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_EN, 0x0, BIT(0));
return 0;
}
@@ -1033,19 +991,17 @@ static int reset_luma_hist(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
/* Reset histogram */
for (i = 0; i <= hist_num; i++)
- MM_REG_WRITE_MASK(cmd, subsys_id, base,
- (MDP_LUMA_HIST_INIT + (i << 2)),
- 0, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base,
+ (MDP_LUMA_HIST_INIT + (i << 2)), 0);
if (mdp_cfg->tdshp_constrain)
MM_REG_WRITE(cmd, subsys_id, base,
- MDP_DC_TWO_D_W1_RESULT_INIT, 0, 0xFFFFFFFF);
+ MDP_DC_TWO_D_W1_RESULT_INIT, 0);
if (mdp_cfg->tdshp_contour)
for (i = 0; i < hist_num; i++)
- MM_REG_WRITE_MASK(cmd, subsys_id, base,
- (MDP_CONTOUR_HIST_INIT + (i << 2)),
- 0, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base,
+ (MDP_CONTOUR_HIST_INIT + (i << 2)), 0);
return 0;
}
@@ -1055,9 +1011,9 @@ static int init_tdshp(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
phys_addr_t base = ctx->comp->reg_base;
u16 subsys_id = ctx->comp->subsys_id;
- MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_CTRL, BIT(0), BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_TDSHP_CTRL, BIT(0), BIT(0));
/* Enable FIFO */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_CFG, BIT(1), BIT(1));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_TDSHP_CFG, BIT(1), BIT(1));
return reset_luma_hist(ctx, cmd);
}
@@ -1072,7 +1028,7 @@ static int config_tdshp_frame(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, tdshp.cfg);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_CFG, reg, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_TDSHP_CFG, reg, BIT(0));
return 0;
}
@@ -1086,26 +1042,24 @@ static int config_tdshp_subfrm(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, tdshp.subfrms[index].src);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_INPUT_SIZE,
- reg, MDP_TDSHP_INPUT_SIZE_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_INPUT_SIZE, reg);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, tdshp.subfrms[index].clip_ofst);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_OUTPUT_OFFSET,
- reg, 0x00FF00FF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_TDSHP_OUTPUT_OFFSET, reg,
+ 0x00FF00FF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, tdshp.subfrms[index].clip);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_OUTPUT_SIZE,
- reg, MDP_TDSHP_OUTPUT_SIZE_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_OUTPUT_SIZE, reg);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, tdshp.subfrms[index].hist_cfg_0);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HIST_CFG_00, reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_HIST_CFG_00, reg);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, tdshp.subfrms[index].hist_cfg_1);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HIST_CFG_01, reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_HIST_CFG_01, reg);
return 0;
}
@@ -1122,21 +1076,19 @@ static int init_color(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
phys_addr_t base = ctx->comp->reg_base;
u16 subsys_id = ctx->comp->subsys_id;
- MM_REG_WRITE(cmd, subsys_id, base,
- MDP_COLOR_START, 0x1, BIT(1) | BIT(0));
- MM_REG_WRITE(cmd, subsys_id, base,
- MDP_COLOR_WIN_X_MAIN, 0xFFFF0000, 0xFFFFFFFF);
- MM_REG_WRITE(cmd, subsys_id, base,
- MDP_COLOR_WIN_Y_MAIN, 0xFFFF0000, 0xFFFFFFFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_COLOR_START, 0x1,
+ BIT(1) | BIT(0));
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_WIN_X_MAIN, 0xFFFF0000);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_WIN_Y_MAIN, 0xFFFF0000);
/* Reset color matrix */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_CM1_EN, 0x0, BIT(0));
- MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_CM2_EN, 0x0, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_COLOR_CM1_EN, 0x0, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_COLOR_CM2_EN, 0x0, BIT(0));
/* Enable interrupt */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_INTEN, 0x7, 0x7);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_COLOR_INTEN, 0x7, 0x7);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_OUT_SEL, 0x333, 0x333);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_COLOR_OUT_SEL, 0x333, 0x333);
return 0;
}
@@ -1151,8 +1103,7 @@ static int config_color_frame(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, color.start);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_START,
- reg, MDP_COLOR_START_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_START, reg);
return 0;
}
@@ -1166,13 +1117,13 @@ static int config_color_subfrm(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, color.subfrms[index].in_hsize);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_INTERNAL_IP_WIDTH,
- reg, 0x00003FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_COLOR_INTERNAL_IP_WIDTH,
+ reg, 0x00003FFF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, color.subfrms[index].in_vsize);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_INTERNAL_IP_HEIGHT,
- reg, 0x00003FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_COLOR_INTERNAL_IP_HEIGHT,
+ reg, 0x00003FFF);
return 0;
}
@@ -1190,9 +1141,9 @@ static int init_ccorr(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
u8 subsys_id = ctx->comp->subsys_id;
/* CCORR enable */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_EN, BIT(0), BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_CCORR_EN, BIT(0), BIT(0));
/* Relay mode */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_CFG, BIT(0), BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_CCORR_CFG, BIT(0), BIT(0));
return 0;
}
@@ -1214,8 +1165,8 @@ static int config_ccorr_subfrm(struct mdp_comp_ctx *ctx,
hsize = csf_r - csf_l + 1;
vsize = csf_b - csf_t + 1;
- MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_SIZE,
- (hsize << 16) + (vsize << 0), 0x1FFF1FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_CCORR_SIZE,
+ (hsize << 16) + (vsize << 0), 0x1FFF1FFF);
return 0;
}
@@ -1231,7 +1182,7 @@ static int init_aal(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
u16 subsys_id = ctx->comp->subsys_id;
/* Always set MDP_AAL enable to 1 */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_EN, BIT(0), BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_AAL_EN, BIT(0), BIT(0));
return 0;
}
@@ -1246,11 +1197,11 @@ static int config_aal_frame(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, aal.cfg_main);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_CFG_MAIN, reg, BIT(7));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_AAL_CFG_MAIN, reg, BIT(7));
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, aal.cfg);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_CFG, reg, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_AAL_CFG, reg, BIT(0));
return 0;
}
@@ -1264,18 +1215,16 @@ static int config_aal_subfrm(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, aal.subfrms[index].src);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_SIZE,
- reg, MDP_AAL_SIZE_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_SIZE, reg);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, aal.subfrms[index].clip_ofst);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_OUTPUT_OFFSET,
- reg, 0x00FF00FF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_AAL_OUTPUT_OFFSET, reg,
+ 0x00FF00FF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, aal.subfrms[index].clip);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_OUTPUT_SIZE,
- reg, MDP_AAL_OUTPUT_SIZE_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_OUTPUT_SIZE, reg);
return 0;
}
@@ -1293,7 +1242,7 @@ static int init_hdr(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
u16 subsys_id = ctx->comp->subsys_id;
/* Always set MDP_HDR enable to 1 */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_TOP, BIT(0), BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_TOP, BIT(0), BIT(0));
return 0;
}
@@ -1308,11 +1257,11 @@ static int config_hdr_frame(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, hdr.top);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_TOP, reg, BIT(29) | BIT(28));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_TOP, reg, BIT(29) | BIT(28));
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, hdr.relay);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_RELAY, reg, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_RELAY, reg, BIT(0));
return 0;
}
@@ -1326,37 +1275,36 @@ static int config_hdr_subfrm(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].win_size);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_TILE_POS,
- reg, MDP_HDR_TILE_POS_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_TILE_POS, reg);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].src);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_SIZE_0, reg, 0x1FFF1FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_SIZE_0, reg, 0x1FFF1FFF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].clip_ofst0);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_SIZE_1, reg, 0x1FFF1FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_SIZE_1, reg, 0x1FFF1FFF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].clip_ofst1);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_SIZE_2, reg, 0x1FFF1FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_SIZE_2, reg, 0x1FFF1FFF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].hist_ctrl_0);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_HIST_CTRL_0, reg, 0x00003FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_HIST_CTRL_0, reg, 0x00003FFF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].hist_ctrl_1);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_HIST_CTRL_1, reg, 0x00003FFF);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_HIST_CTRL_1, reg, 0x00003FFF);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].hdr_top);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_TOP, reg, BIT(6) | BIT(5));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_TOP, reg, BIT(6) | BIT(5));
/* Enable histogram */
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].hist_addr);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_HIST_ADDR, reg, BIT(9));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_HIST_ADDR, reg, BIT(9));
return 0;
}
@@ -1373,8 +1321,8 @@ static int init_fg(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
phys_addr_t base = ctx->comp->reg_base;
u16 subsys_id = ctx->comp->subsys_id;
- MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_TRIGGER, BIT(2), BIT(2));
- MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_TRIGGER, 0x0, BIT(2));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_FG_TRIGGER, BIT(2), BIT(2));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_FG_TRIGGER, 0x0, BIT(2));
return 0;
}
@@ -1389,11 +1337,11 @@ static int config_fg_frame(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, fg.ctrl_0);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_FG_CTRL_0, reg, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_FG_FG_CTRL_0, reg, BIT(0));
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, fg.ck_en);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_FG_CK_EN, reg, 0x7);
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_FG_FG_CK_EN, reg, 0x7);
return 0;
}
@@ -1407,11 +1355,11 @@ static int config_fg_subfrm(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, fg.subfrms[index].info_0);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_TILE_INFO_0, reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_TILE_INFO_0, reg);
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, fg.subfrms[index].info_1);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_TILE_INFO_1, reg, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_TILE_INFO_1, reg);
return 0;
}
@@ -1428,14 +1376,11 @@ static int init_ovl(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
phys_addr_t base = ctx->comp->reg_base;
u16 subsys_id = ctx->comp->subsys_id;
- MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_EN,
- BIT(0), MDP_OVL_EN_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_EN, BIT(0));
/* Set to relay mode */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_SRC_CON,
- BIT(9), MDP_OVL_SRC_CON_MASK);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_DP_CON,
- BIT(0), MDP_OVL_DP_CON_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_SRC_CON, BIT(9));
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_DP_CON, BIT(0));
return 0;
}
@@ -1450,11 +1395,11 @@ static int config_ovl_frame(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, ovl.L0_con);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_L0_CON, reg, BIT(29) | BIT(28));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_OVL_L0_CON, reg, BIT(29) | BIT(28));
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, ovl.src_con);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_SRC_CON, reg, BIT(0));
+ MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_OVL_SRC_CON, reg, BIT(0));
return 0;
}
@@ -1468,14 +1413,12 @@ static int config_ovl_subfrm(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, ovl.subfrms[index].L0_src_size);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_L0_SRC_SIZE,
- reg, MDP_OVL_L0_SRC_SIZE_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_L0_SRC_SIZE, reg);
/* Setup output size */
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, ovl.subfrms[index].roi_size);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_ROI_SIZE,
- reg, MDP_OVL_ROI_SIZE_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_ROI_SIZE, reg);
return 0;
}
@@ -1492,13 +1435,10 @@ static int init_pad(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
phys_addr_t base = ctx->comp->reg_base;
u16 subsys_id = ctx->comp->subsys_id;
- MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_CON,
- BIT(1), MDP_PAD_CON_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_CON, BIT(1));
/* Reset */
- MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_W_SIZE,
- 0, MDP_PAD_W_SIZE_MASK);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_H_SIZE,
- 0, MDP_PAD_H_SIZE_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_W_SIZE, 0);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_H_SIZE, 0);
return 0;
}
@@ -1512,8 +1452,7 @@ static int config_pad_subfrm(struct mdp_comp_ctx *ctx,
if (CFG_CHECK(MT8195, p_id))
reg = CFG_COMP(MT8195, ctx->param, pad.subfrms[index].pic_size);
- MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_PIC_SIZE,
- reg, MDP_PAD_PIC_SIZE_MASK);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_PIC_SIZE, reg);
return 0;
}
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h
index 3e5d2da1c807..681906c16419 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h
@@ -9,18 +9,18 @@
#include "mtk-mdp3-cmdq.h"
-#define MM_REG_WRITE_MASK(cmd, id, base, ofst, val, mask, ...) \
- cmdq_pkt_write_mask(&((cmd)->pkt), id, \
- (base) + (ofst), (val), (mask), ##__VA_ARGS__)
-
-#define MM_REG_WRITE(cmd, id, base, ofst, val, mask, ...) \
+#define MM_REG_WRITE_MASK(cmd, id, base, ofst, val, mask) \
do { \
typeof(mask) (m) = (mask); \
- MM_REG_WRITE_MASK(cmd, id, base, ofst, val, \
+ cmdq_pkt_write_mask(&((cmd)->pkt), id, (base) + (ofst), \
+ (val), \
(((m) & (ofst##_MASK)) == (ofst##_MASK)) ? \
- (0xffffffff) : (m), ##__VA_ARGS__); \
+ (0xffffffff) : (m)); \
} while (0)
+#define MM_REG_WRITE(cmd, id, base, ofst, val) \
+ cmdq_pkt_write(&((cmd)->pkt), id, (base) + (ofst), (val))
+
#define MM_REG_WAIT(cmd, evt) \
do { \
typeof(cmd) (c) = (cmd); \
@@ -49,20 +49,17 @@ do { \
cmdq_pkt_set_event(&((c)->pkt), (e)); \
} while (0)
-#define MM_REG_POLL_MASK(cmd, id, base, ofst, val, _mask, ...) \
+#define MM_REG_POLL_MASK(cmd, id, base, ofst, val, _mask) \
do { \
typeof(_mask) (_m) = (_mask); \
cmdq_pkt_poll_mask(&((cmd)->pkt), id, \
- (base) + (ofst), (val), (_m), ##__VA_ARGS__); \
+ (base) + (ofst), (val), \
+ (((_m) & (ofst##_MASK)) == (ofst##_MASK)) ? \
+ (0xffffffff) : (_m)); \
} while (0)
-#define MM_REG_POLL(cmd, id, base, ofst, val, mask, ...) \
-do { \
- typeof(mask) (m) = (mask); \
- MM_REG_POLL_MASK((cmd), id, base, ofst, val, \
- (((m) & (ofst##_MASK)) == (ofst##_MASK)) ? \
- (0xffffffff) : (m), ##__VA_ARGS__); \
-} while (0)
+#define MM_REG_POLL(cmd, id, base, ofst, val) \
+ cmdq_pkt_poll(&((cmd)->pkt), id, (base) + (ofst), (val))
enum mtk_mdp_comp_id {
MDP_COMP_NONE = -1, /* Invalid engine */
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
index 5e94ff0d0756..f571f561f070 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
@@ -312,6 +312,8 @@ static int mdp_probe(struct platform_device *pdev)
ret = PTR_ERR(mdp->cmdq_clt[i]);
goto err_mbox_destroy;
}
+
+ mdp->cmdq_shift_pa[i] = cmdq_get_shift_pa(mdp->cmdq_clt[i]->chan);
}
init_waitqueue_head(&mdp->callback_wq);
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h
index 430251f63754..05cade1d098e 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h
@@ -126,6 +126,7 @@ struct mdp_dev {
u32 id_count;
struct ida mdp_ida;
struct cmdq_client *cmdq_clt[MDP_PP_MAX];
+ u8 cmdq_shift_pa[MDP_PP_MAX];
wait_queue_head_t callback_wq;
struct v4l2_device v4l2_dev;
diff --git a/drivers/media/platform/nuvoton/npcm-video.c b/drivers/media/platform/nuvoton/npcm-video.c
index 4f5d75645b2b..024cd8ee1709 100644
--- a/drivers/media/platform/nuvoton/npcm-video.c
+++ b/drivers/media/platform/nuvoton/npcm-video.c
@@ -1665,9 +1665,9 @@ static int npcm_video_ece_init(struct npcm_video *video)
dev_info(dev, "Support HEXTILE pixel format\n");
ece_pdev = of_find_device_by_node(ece_node);
- if (IS_ERR(ece_pdev)) {
+ if (!ece_pdev) {
dev_err(dev, "Failed to find ECE device\n");
- return PTR_ERR(ece_pdev);
+ return -ENODEV;
}
of_node_put(ece_node);
diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
index 7f5fe551179b..1221b309a916 100644
--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
@@ -2677,11 +2677,12 @@ static void mxc_jpeg_detach_pm_domains(struct mxc_jpeg_dev *jpeg)
int i;
for (i = 0; i < jpeg->num_domains; i++) {
- if (jpeg->pd_dev[i] && !pm_runtime_suspended(jpeg->pd_dev[i]))
+ if (!IS_ERR_OR_NULL(jpeg->pd_dev[i]) &&
+ !pm_runtime_suspended(jpeg->pd_dev[i]))
pm_runtime_force_suspend(jpeg->pd_dev[i]);
- if (jpeg->pd_link[i] && !IS_ERR(jpeg->pd_link[i]))
+ if (!IS_ERR_OR_NULL(jpeg->pd_link[i]))
device_link_del(jpeg->pd_link[i]);
- if (jpeg->pd_dev[i] && !IS_ERR(jpeg->pd_dev[i]))
+ if (!IS_ERR_OR_NULL(jpeg->pd_dev[i]))
dev_pm_domain_detach(jpeg->pd_dev[i], true);
jpeg->pd_dev[i] = NULL;
jpeg->pd_link[i] = NULL;
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
index aaf58063677c..1e79b1211b60 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
@@ -307,6 +307,19 @@ static const struct mxc_isi_plat_data mxc_imx8mp_data = {
.has_36bit_dma = true,
};
+static const struct mxc_isi_plat_data mxc_imx8ulp_data = {
+ .model = MXC_ISI_IMX8ULP,
+ .num_ports = 1,
+ .num_channels = 1,
+ .reg_offset = 0x0,
+ .ier_reg = &mxc_imx8_isi_ier_v2,
+ .set_thd = &mxc_imx8_isi_thd_v1,
+ .clks = mxc_imx8mn_clks,
+ .num_clks = ARRAY_SIZE(mxc_imx8mn_clks),
+ .buf_active_reverse = true,
+ .has_36bit_dma = false,
+};
+
static const struct mxc_isi_plat_data mxc_imx93_data = {
.model = MXC_ISI_IMX93,
.num_ports = 1,
@@ -528,6 +541,7 @@ static void mxc_isi_remove(struct platform_device *pdev)
static const struct of_device_id mxc_isi_of_match[] = {
{ .compatible = "fsl,imx8mn-isi", .data = &mxc_imx8mn_data },
{ .compatible = "fsl,imx8mp-isi", .data = &mxc_imx8mp_data },
+ { .compatible = "fsl,imx8ulp-isi", .data = &mxc_imx8ulp_data },
{ .compatible = "fsl,imx93-isi", .data = &mxc_imx93_data },
{ /* sentinel */ },
};
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
index 2810ebe9b5f7..9c7fe9e5f941 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
@@ -158,6 +158,7 @@ struct mxc_gasket_ops {
enum model {
MXC_ISI_IMX8MN,
MXC_ISI_IMX8MP,
+ MXC_ISI_IMX8ULP,
MXC_ISI_IMX93,
};
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
index c0ba34ea82fd..8654150728a8 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
@@ -861,6 +861,7 @@ int mxc_isi_video_buffer_prepare(struct mxc_isi_dev *isi, struct vb2_buffer *vb2
const struct mxc_isi_format_info *info,
const struct v4l2_pix_format_mplane *pix)
{
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb2);
unsigned int i;
for (i = 0; i < info->mem_planes; i++) {
@@ -875,6 +876,8 @@ int mxc_isi_video_buffer_prepare(struct mxc_isi_dev *isi, struct vb2_buffer *vb2
vb2_set_plane_payload(vb2, i, size);
}
+ v4l2_buf->field = pix->field;
+
return 0;
}
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
index df7e93a5a4f6..f341f7b7fd8a 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
@@ -505,9 +505,9 @@ static void csiphy_gen2_config_lanes(struct csiphy_device *csiphy,
u32 val;
switch (csiphy->camss->res->version) {
- case CAMSS_845:
- r = &lane_regs_sdm845[0][0];
- array_size = ARRAY_SIZE(lane_regs_sdm845[0]);
+ case CAMSS_7280:
+ r = &lane_regs_sm8250[0][0];
+ array_size = ARRAY_SIZE(lane_regs_sm8250[0]);
break;
case CAMSS_8250:
r = &lane_regs_sm8250[0][0];
@@ -517,6 +517,10 @@ static void csiphy_gen2_config_lanes(struct csiphy_device *csiphy,
r = &lane_regs_sc8280xp[0][0];
array_size = ARRAY_SIZE(lane_regs_sc8280xp[0]);
break;
+ case CAMSS_845:
+ r = &lane_regs_sdm845[0][0];
+ array_size = ARRAY_SIZE(lane_regs_sdm845[0]);
+ break;
default:
WARN(1, "unknown cspi version\n");
return;
@@ -557,9 +561,10 @@ static bool csiphy_is_gen2(u32 version)
bool ret = false;
switch (version) {
- case CAMSS_845:
+ case CAMSS_7280:
case CAMSS_8250:
case CAMSS_8280XP:
+ case CAMSS_845:
ret = true;
break;
}
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
index 5af2b382a843..3791c2d8a6cf 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
@@ -103,6 +103,11 @@ const struct csiphy_formats csiphy_formats_8x96 = {
.formats = formats_8x96
};
+const struct csiphy_formats csiphy_formats_sc7280 = {
+ .nformats = ARRAY_SIZE(formats_sdm845),
+ .formats = formats_sdm845
+};
+
const struct csiphy_formats csiphy_formats_sdm845 = {
.nformats = ARRAY_SIZE(formats_sdm845),
.formats = formats_sdm845
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.h b/drivers/media/platform/qcom/camss/camss-csiphy.h
index eebc1ff1cfab..90cc3f976643 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.h
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.h
@@ -26,6 +26,12 @@ struct csiphy_lane {
u8 pol;
};
+/**
+ * struct csiphy_lanes_cfg - CSIPHY lanes configuration
+ * @num_data: number of data lanes
+ * @data: data lanes configuration
+ * @clk: clock lane configuration (only for D-PHY)
+ */
struct csiphy_lanes_cfg {
int num_data;
struct csiphy_lane *data;
@@ -111,6 +117,7 @@ void msm_csiphy_unregister_entity(struct csiphy_device *csiphy);
extern const struct csiphy_formats csiphy_formats_8x16;
extern const struct csiphy_formats csiphy_formats_8x96;
+extern const struct csiphy_formats csiphy_formats_sc7280;
extern const struct csiphy_formats csiphy_formats_sdm845;
extern const struct csiphy_hw_ops csiphy_ops_2ph_1_0;
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
index 80a62ba11295..95f6a1ac7eaf 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
@@ -334,11 +334,12 @@ static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code,
return sink_code;
}
break;
- case CAMSS_8x96:
case CAMSS_660:
- case CAMSS_845:
+ case CAMSS_7280:
+ case CAMSS_8x96:
case CAMSS_8250:
case CAMSS_8280XP:
+ case CAMSS_845:
switch (sink_code) {
case MEDIA_BUS_FMT_YUYV8_1X16:
{
@@ -1693,9 +1694,10 @@ static int vfe_bpl_align(struct vfe_device *vfe)
int ret = 8;
switch (vfe->camss->res->version) {
- case CAMSS_845:
+ case CAMSS_7280:
case CAMSS_8250:
case CAMSS_8280XP:
+ case CAMSS_845:
ret = 16;
break;
default:
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index 9fb31f4c18ad..a85e9df0f301 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -1266,6 +1266,310 @@ static const struct resources_icc icc_res_sm8250[] = {
},
};
+static const struct camss_subdev_resources csiphy_res_7280[] = {
+ /* CSIPHY0 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+
+ .clock = { "csiphy0", "csiphy0_timer" },
+ .clock_rate = { { 300000000, 400000000 },
+ { 300000000 } },
+ .reg = { "csiphy0" },
+ .interrupt = { "csiphy0" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sc7280
+ }
+ },
+ /* CSIPHY1 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+
+ .clock = { "csiphy1", "csiphy1_timer" },
+ .clock_rate = { { 300000000, 400000000 },
+ { 300000000 } },
+ .reg = { "csiphy1" },
+ .interrupt = { "csiphy1" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sc7280
+ }
+ },
+ /* CSIPHY2 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+
+ .clock = { "csiphy2", "csiphy2_timer" },
+ .clock_rate = { { 300000000, 400000000 },
+ { 300000000 } },
+ .reg = { "csiphy2" },
+ .interrupt = { "csiphy2" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sc7280
+ }
+ },
+ /* CSIPHY3 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+
+ .clock = { "csiphy3", "csiphy3_timer" },
+ .clock_rate = { { 300000000, 400000000 },
+ { 300000000 } },
+ .reg = { "csiphy3" },
+ .interrupt = { "csiphy3" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sc7280
+ }
+ },
+ /* CSIPHY4 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+
+ .clock = { "csiphy4", "csiphy4_timer" },
+ .clock_rate = { { 300000000, 400000000 },
+ { 300000000 } },
+ .reg = { "csiphy4" },
+ .interrupt = { "csiphy4" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sc7280
+ }
+ },
+};
+
+static const struct camss_subdev_resources csid_res_7280[] = {
+ /* CSID0 */
+ {
+ .regulators = {},
+
+ .clock = { "vfe0_csid", "vfe0_cphy_rx", "vfe0" },
+ .clock_rate = { { 300000000, 400000000 },
+ { 0 },
+ { 380000000, 510000000, 637000000, 760000000 }
+ },
+
+ .reg = { "csid0" },
+ .interrupt = { "csid0" },
+ .csid = {
+ .is_lite = false,
+ .hw_ops = &csid_ops_gen2,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID1 */
+ {
+ .regulators = {},
+
+ .clock = { "vfe1_csid", "vfe1_cphy_rx", "vfe1" },
+ .clock_rate = { { 300000000, 400000000 },
+ { 0 },
+ { 380000000, 510000000, 637000000, 760000000 }
+ },
+
+ .reg = { "csid1" },
+ .interrupt = { "csid1" },
+ .csid = {
+ .is_lite = false,
+ .hw_ops = &csid_ops_gen2,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID2 */
+ {
+ .regulators = {},
+
+ .clock = { "vfe2_csid", "vfe2_cphy_rx", "vfe2" },
+ .clock_rate = { { 300000000, 400000000 },
+ { 0 },
+ { 380000000, 510000000, 637000000, 760000000 }
+ },
+
+ .reg = { "csid2" },
+ .interrupt = { "csid2" },
+ .csid = {
+ .is_lite = false,
+ .hw_ops = &csid_ops_gen2,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID3 */
+ {
+ .regulators = {},
+
+ .clock = { "vfe_lite0_csid", "vfe_lite0_cphy_rx", "vfe_lite0" },
+ .clock_rate = { { 300000000, 400000000 },
+ { 0 },
+ { 320000000, 400000000, 480000000, 600000000 }
+ },
+
+ .reg = { "csid_lite0" },
+ .interrupt = { "csid_lite0" },
+ .csid = {
+ .is_lite = true,
+ .hw_ops = &csid_ops_gen2,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID4 */
+ {
+ .regulators = {},
+
+ .clock = { "vfe_lite1_csid", "vfe_lite1_cphy_rx", "vfe_lite1" },
+ .clock_rate = { { 300000000, 400000000 },
+ { 0 },
+ { 320000000, 400000000, 480000000, 600000000 }
+ },
+
+ .reg = { "csid_lite1" },
+ .interrupt = { "csid_lite1" },
+ .csid = {
+ .is_lite = true,
+ .hw_ops = &csid_ops_gen2,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+};
+
+static const struct camss_subdev_resources vfe_res_7280[] = {
+ /* VFE0 */
+ {
+ .regulators = {},
+
+ .clock = { "camnoc_axi", "cpas_ahb", "icp_ahb", "vfe0",
+ "vfe0_axi", "gcc_cam_hf_axi" },
+ .clock_rate = { { 150000000, 240000000, 320000000, 400000000, 480000000 },
+ { 80000000 },
+ { 0 },
+ { 380000000, 510000000, 637000000, 760000000 },
+ { 0 },
+ { 0 } },
+
+ .reg = { "vfe0" },
+ .interrupt = { "vfe0" },
+ .vfe = {
+ .line_num = 3,
+ .is_lite = false,
+ .has_pd = true,
+ .pd_name = "ife0",
+ .hw_ops = &vfe_ops_170,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE1 */
+ {
+ .regulators = {},
+
+ .clock = { "camnoc_axi", "cpas_ahb", "icp_ahb", "vfe1",
+ "vfe1_axi", "gcc_cam_hf_axi" },
+ .clock_rate = { { 150000000, 240000000, 320000000, 400000000, 480000000 },
+ { 80000000 },
+ { 0 },
+ { 380000000, 510000000, 637000000, 760000000 },
+ { 0 },
+ { 0 } },
+
+ .reg = { "vfe1" },
+ .interrupt = { "vfe1" },
+ .vfe = {
+ .line_num = 3,
+ .is_lite = false,
+ .has_pd = true,
+ .pd_name = "ife1",
+ .hw_ops = &vfe_ops_170,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE2 */
+ {
+ .regulators = {},
+
+ .clock = { "camnoc_axi", "cpas_ahb", "icp_ahb", "vfe2",
+ "vfe2_axi", "gcc_cam_hf_axi" },
+ .clock_rate = { { 150000000, 240000000, 320000000, 400000000, 480000000 },
+ { 80000000 },
+ { 0 },
+ { 380000000, 510000000, 637000000, 760000000 },
+ { 0 },
+ { 0 } },
+
+ .reg = { "vfe2" },
+ .interrupt = { "vfe2" },
+ .vfe = {
+ .line_num = 3,
+ .is_lite = false,
+ .hw_ops = &vfe_ops_170,
+ .has_pd = true,
+ .pd_name = "ife2",
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE3 (lite) */
+ {
+ .clock = { "camnoc_axi", "cpas_ahb", "icp_ahb",
+ "vfe_lite0", "gcc_cam_hf_axi" },
+ .clock_rate = { { 150000000, 240000000, 320000000, 400000000, 480000000 },
+ { 80000000 },
+ { 0 },
+ { 320000000, 400000000, 480000000, 600000000 },
+ { 0 } },
+
+ .regulators = {},
+ .reg = { "vfe_lite0" },
+ .interrupt = { "vfe_lite0" },
+ .vfe = {
+ .line_num = 4,
+ .is_lite = true,
+ .hw_ops = &vfe_ops_170,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE4 (lite) */
+ {
+ .clock = { "camnoc_axi", "cpas_ahb", "icp_ahb",
+ "vfe_lite1", "gcc_cam_hf_axi" },
+ .clock_rate = { { 150000000, 240000000, 320000000, 400000000, 480000000 },
+ { 80000000 },
+ { 0 },
+ { 320000000, 400000000, 480000000, 600000000 },
+ { 0 } },
+
+ .regulators = {},
+ .reg = { "vfe_lite1" },
+ .interrupt = { "vfe_lite1" },
+ .vfe = {
+ .line_num = 4,
+ .is_lite = true,
+ .hw_ops = &vfe_ops_170,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+};
+
+static const struct resources_icc icc_res_sc7280[] = {
+ {
+ .name = "ahb",
+ .icc_bw_tbl.avg = 38400,
+ .icc_bw_tbl.peak = 76800,
+ },
+ {
+ .name = "hf_0",
+ .icc_bw_tbl.avg = 2097152,
+ .icc_bw_tbl.peak = 2097152,
+ },
+};
+
static const struct camss_subdev_resources csiphy_res_sc8280xp[] = {
/* CSIPHY0 */
{
@@ -1995,6 +2299,24 @@ static int camss_init_subdevices(struct camss *camss)
/*
* camss_link_entities - Register subdev nodes and create links
+ * camss_link_err - print error in case link creation fails
+ * @src_name: name for source of the link
+ * @sink_name: name for sink of the link
+ */
+inline void camss_link_err(struct camss *camss,
+ const char *src_name,
+ const char *sink_name,
+ int ret)
+{
+ dev_err(camss->dev,
+ "Failed to link %s->%s entities: %d\n",
+ src_name,
+ sink_name,
+ ret);
+}
+
+/*
+ * camss_link_entities - Register subdev nodes and create links
* @camss: CAMSS device
*
* Return 0 on success or a negative error code on failure
@@ -2012,11 +2334,10 @@ static int camss_link_entities(struct camss *camss)
MSM_CSID_PAD_SINK,
0);
if (ret < 0) {
- dev_err(camss->dev,
- "Failed to link %s->%s entities: %d\n",
- camss->csiphy[i].subdev.entity.name,
- camss->csid[j].subdev.entity.name,
- ret);
+ camss_link_err(camss,
+ camss->csiphy[i].subdev.entity.name,
+ camss->csid[j].subdev.entity.name,
+ ret);
return ret;
}
}
@@ -2031,11 +2352,10 @@ static int camss_link_entities(struct camss *camss)
MSM_ISPIF_PAD_SINK,
0);
if (ret < 0) {
- dev_err(camss->dev,
- "Failed to link %s->%s entities: %d\n",
- camss->csid[i].subdev.entity.name,
- camss->ispif->line[j].subdev.entity.name,
- ret);
+ camss_link_err(camss,
+ camss->csid[i].subdev.entity.name,
+ camss->ispif->line[j].subdev.entity.name,
+ ret);
return ret;
}
}
@@ -2053,11 +2373,9 @@ static int camss_link_entities(struct camss *camss)
MSM_VFE_PAD_SINK,
0);
if (ret < 0) {
- dev_err(camss->dev,
- "Failed to link %s->%s entities: %d\n",
- ispif->entity.name,
- vfe->entity.name,
- ret);
+ camss_link_err(camss, ispif->entity.name,
+ vfe->entity.name,
+ ret);
return ret;
}
}
@@ -2074,11 +2392,9 @@ static int camss_link_entities(struct camss *camss)
MSM_VFE_PAD_SINK,
0);
if (ret < 0) {
- dev_err(camss->dev,
- "Failed to link %s->%s entities: %d\n",
- csid->entity.name,
- vfe->entity.name,
- ret);
+ camss_link_err(camss, csid->entity.name,
+ vfe->entity.name,
+ ret);
return ret;
}
}
@@ -2227,9 +2543,9 @@ static int camss_subdev_notifier_complete(struct v4l2_async_notifier *async)
input, MSM_CSIPHY_PAD_SINK,
MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
if (ret < 0) {
- dev_err(camss->dev,
- "Failed to link %s->%s entities: %d\n",
- sensor->name, input->name, ret);
+ camss_link_err(camss, sensor->name,
+ input->name,
+ ret);
return ret;
}
}
@@ -2622,14 +2938,29 @@ static const struct camss_resources sc8280xp_resources = {
.link_entities = camss_link_entities
};
+static const struct camss_resources sc7280_resources = {
+ .version = CAMSS_7280,
+ .pd_name = "top",
+ .csiphy_res = csiphy_res_7280,
+ .csid_res = csid_res_7280,
+ .vfe_res = vfe_res_7280,
+ .icc_res = icc_res_sc7280,
+ .icc_path_num = ARRAY_SIZE(icc_res_sc7280),
+ .csiphy_num = ARRAY_SIZE(csiphy_res_7280),
+ .csid_num = ARRAY_SIZE(csid_res_7280),
+ .vfe_num = ARRAY_SIZE(vfe_res_7280),
+ .link_entities = camss_link_entities
+};
+
static const struct of_device_id camss_dt_match[] = {
{ .compatible = "qcom,msm8916-camss", .data = &msm8916_resources },
{ .compatible = "qcom,msm8953-camss", .data = &msm8953_resources },
{ .compatible = "qcom,msm8996-camss", .data = &msm8996_resources },
+ { .compatible = "qcom,sc7280-camss", .data = &sc7280_resources },
+ { .compatible = "qcom,sc8280xp-camss", .data = &sc8280xp_resources },
{ .compatible = "qcom,sdm660-camss", .data = &sdm660_resources },
{ .compatible = "qcom,sdm845-camss", .data = &sdm845_resources },
{ .compatible = "qcom,sm8250-camss", .data = &sm8250_resources },
- { .compatible = "qcom,sc8280xp-camss", .data = &sc8280xp_resources },
{ }
};
diff --git a/drivers/media/platform/qcom/camss/camss.h b/drivers/media/platform/qcom/camss/camss.h
index 9da7f48f5dd7..9a046eea334f 100644
--- a/drivers/media/platform/qcom/camss/camss.h
+++ b/drivers/media/platform/qcom/camss/camss.h
@@ -77,13 +77,14 @@ enum pm_domain {
};
enum camss_version {
+ CAMSS_660,
+ CAMSS_7280,
CAMSS_8x16,
CAMSS_8x53,
CAMSS_8x96,
- CAMSS_660,
- CAMSS_845,
CAMSS_8250,
CAMSS_8280XP,
+ CAMSS_845,
};
enum icc_count {
diff --git a/drivers/media/platform/qcom/venus/Kconfig b/drivers/media/platform/qcom/venus/Kconfig
index bfd50e8f3421..bc2e410b29cb 100644
--- a/drivers/media/platform/qcom/venus/Kconfig
+++ b/drivers/media/platform/qcom/venus/Kconfig
@@ -3,6 +3,7 @@ config VIDEO_QCOM_VENUS
depends on V4L_MEM2MEM_DRIVERS
depends on VIDEO_DEV && QCOM_SMEM
depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
+ select OF_DYNAMIC if ARCH_QCOM
select QCOM_MDT_LOADER if ARCH_QCOM
select QCOM_SCM
select VIDEOBUF2_DMA_CONTIG
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 2d27c5167246..77d48578ecd2 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -286,6 +286,89 @@ static irqreturn_t venus_isr_thread(int irq, void *dev_id)
return ret;
}
+#if defined(CONFIG_OF_DYNAMIC)
+static int venus_add_video_core(struct venus_core *core, const char *node_name,
+ const char *compat)
+{
+ struct of_changeset *ocs = core->ocs;
+ struct device *dev = core->dev;
+ struct device_node *np, *enp;
+ int ret;
+
+ if (!node_name)
+ return 0;
+
+ enp = of_find_node_by_name(dev->of_node, node_name);
+ if (enp) {
+ of_node_put(enp);
+ return 0;
+ }
+
+ np = of_changeset_create_node(ocs, dev->of_node, node_name);
+ if (!np) {
+ dev_err(dev, "Unable to create new node\n");
+ return -ENODEV;
+ }
+
+ ret = of_changeset_add_prop_string(ocs, np, "compatible", compat);
+ if (ret)
+ dev_err(dev, "unable to add %s\n", compat);
+
+ of_node_put(np);
+
+ return ret;
+}
+
+static int venus_add_dynamic_nodes(struct venus_core *core)
+{
+ struct device *dev = core->dev;
+ int ret;
+
+ core->ocs = kmalloc(sizeof(*core->ocs), GFP_KERNEL);
+ if (!core->ocs)
+ return -ENOMEM;
+
+ of_changeset_init(core->ocs);
+
+ ret = venus_add_video_core(core, core->res->dec_nodename, "venus-decoder");
+ if (ret)
+ goto err;
+
+ ret = venus_add_video_core(core, core->res->enc_nodename, "venus-encoder");
+ if (ret)
+ goto err;
+
+ ret = of_changeset_apply(core->ocs);
+ if (ret) {
+ dev_err(dev, "applying changeset fail ret %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+err:
+ of_changeset_destroy(core->ocs);
+ kfree(core->ocs);
+ core->ocs = NULL;
+ return ret;
+}
+
+static void venus_remove_dynamic_nodes(struct venus_core *core)
+{
+ if (core->ocs) {
+ of_changeset_revert(core->ocs);
+ of_changeset_destroy(core->ocs);
+ kfree(core->ocs);
+ }
+}
+#else
+static int venus_add_dynamic_nodes(struct venus_core *core)
+{
+ return 0;
+}
+
+static void venus_remove_dynamic_nodes(struct venus_core *core) {}
+#endif
+
static int venus_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -365,9 +448,15 @@ static int venus_probe(struct platform_device *pdev)
if (ret < 0)
goto err_runtime_disable;
+ if (core->res->dec_nodename || core->res->enc_nodename) {
+ ret = venus_add_dynamic_nodes(core);
+ if (ret)
+ goto err_runtime_disable;
+ }
+
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret)
- goto err_runtime_disable;
+ goto err_remove_dynamic_nodes;
ret = venus_firmware_init(core);
if (ret)
@@ -411,6 +500,8 @@ err_firmware_deinit:
venus_firmware_deinit(core);
err_of_depopulate:
of_platform_depopulate(dev);
+err_remove_dynamic_nodes:
+ venus_remove_dynamic_nodes(core);
err_runtime_disable:
pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
@@ -443,6 +534,8 @@ static void venus_remove(struct platform_device *pdev)
venus_firmware_deinit(core);
+ venus_remove_dynamic_nodes(core);
+
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
@@ -506,18 +599,14 @@ err_cpucfg_path:
void venus_close_common(struct venus_inst *inst)
{
/*
- * First, remove the inst from the ->instances list, so that
- * to_instance() will return NULL.
- */
- hfi_session_destroy(inst);
- /*
- * Second, make sure we don't have IRQ/IRQ-thread currently running
+ * Make sure we don't have IRQ/IRQ-thread currently running
* or pending execution, which would race with the inst destruction.
*/
synchronize_irq(inst->core->irq);
v4l2_m2m_ctx_release(inst->m2m_ctx);
v4l2_m2m_release(inst->m2m_dev);
+ hfi_session_destroy(inst);
v4l2_fh_del(&inst->fh);
v4l2_fh_exit(&inst->fh);
v4l2_ctrl_handler_free(&inst->ctrl_handler);
@@ -582,6 +671,8 @@ static const struct venus_resources msm8916_res = {
.vmem_addr = 0,
.dma_mask = 0xddc00000 - 1,
.fwname = "qcom/venus-1.8/venus.mbn",
+ .dec_nodename = "video-decoder",
+ .enc_nodename = "video-encoder",
};
static const struct freq_tbl msm8996_freq_table[] = {
@@ -791,6 +882,8 @@ static const struct venus_resources sdm845_res_v2 = {
.cp_nonpixel_start = 0x1000000,
.cp_nonpixel_size = 0x24800000,
.fwname = "qcom/venus-5.2/venus.mbn",
+ .dec_nodename = "video-core0",
+ .enc_nodename = "video-core1",
};
static const struct freq_tbl sc7180_freq_table[] = {
@@ -839,6 +932,8 @@ static const struct venus_resources sc7180_res = {
.cp_nonpixel_start = 0x1000000,
.cp_nonpixel_size = 0x24800000,
.fwname = "qcom/venus-5.4/venus.mbn",
+ .dec_nodename = "video-decoder",
+ .enc_nodename = "video-encoder",
};
static const struct freq_tbl sm8250_freq_table[] = {
@@ -894,6 +989,8 @@ static const struct venus_resources sm8250_res = {
.vmem_addr = 0,
.dma_mask = 0xe0000000 - 1,
.fwname = "qcom/vpu-1.0/venus.mbn",
+ .dec_nodename = "video-decoder",
+ .enc_nodename = "video-encoder",
};
static const struct freq_tbl sc7280_freq_table[] = {
@@ -956,6 +1053,8 @@ static const struct venus_resources sc7280_res = {
.cp_nonpixel_start = 0x1000000,
.cp_nonpixel_size = 0x24800000,
.fwname = "qcom/vpu-2.0/venus.mbn",
+ .dec_nodename = "video-decoder",
+ .enc_nodename = "video-encoder",
};
static const struct of_device_id venus_dt_match[] = {
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 44f1c3bc4186..abeeafa86697 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -90,6 +90,8 @@ struct venus_resources {
u32 cp_nonpixel_start;
u32 cp_nonpixel_size;
const char *fwname;
+ const char *enc_nodename;
+ const char *dec_nodename;
};
enum venus_fmt {
@@ -169,6 +171,7 @@ struct venus_format {
* @root: debugfs root directory
* @venus_ver: the venus firmware version
* @dump_core: a flag indicating that a core dump is required
+ * @ocs: OF changeset pointer
*/
struct venus_core {
void __iomem *base;
@@ -231,6 +234,7 @@ struct venus_core {
u32 rev;
} venus_ver;
unsigned long dump_core;
+ struct of_changeset *ocs;
};
struct vdec_controls {
diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c
index e00aedb41d16..675e6fd1e9fa 100644
--- a/drivers/media/platform/qcom/venus/hfi.c
+++ b/drivers/media/platform/qcom/venus/hfi.c
@@ -138,29 +138,6 @@ int hfi_core_trigger_ssr(struct venus_core *core, u32 type)
return core->ops->core_trigger_ssr(core, type);
}
-int hfi_core_ping(struct venus_core *core)
-{
- int ret;
-
- mutex_lock(&core->lock);
-
- ret = core->ops->core_ping(core, 0xbeef);
- if (ret)
- goto unlock;
-
- ret = wait_for_completion_timeout(&core->done, TIMEOUT);
- if (!ret) {
- ret = -ETIMEDOUT;
- goto unlock;
- }
- ret = 0;
- if (core->error != HFI_ERR_NONE)
- ret = -ENODEV;
-unlock:
- mutex_unlock(&core->lock);
- return ret;
-}
-
static int wait_session_msg(struct venus_inst *inst)
{
int ret;
diff --git a/drivers/media/platform/qcom/venus/hfi.h b/drivers/media/platform/qcom/venus/hfi.h
index f25d412d6553..0338841d5992 100644
--- a/drivers/media/platform/qcom/venus/hfi.h
+++ b/drivers/media/platform/qcom/venus/hfi.h
@@ -108,7 +108,6 @@ struct hfi_inst_ops {
struct hfi_ops {
int (*core_init)(struct venus_core *core);
int (*core_deinit)(struct venus_core *core);
- int (*core_ping)(struct venus_core *core, u32 cookie);
int (*core_trigger_ssr)(struct venus_core *core, u32 trigger_type);
int (*session_init)(struct venus_inst *inst, u32 session_type,
@@ -152,7 +151,6 @@ int hfi_core_deinit(struct venus_core *core, bool blocking);
int hfi_core_suspend(struct venus_core *core);
int hfi_core_resume(struct venus_core *core, bool force);
int hfi_core_trigger_ssr(struct venus_core *core, u32 type);
-int hfi_core_ping(struct venus_core *core);
int hfi_session_create(struct venus_inst *inst, const struct hfi_inst_ops *ops);
void hfi_session_destroy(struct venus_inst *inst);
int hfi_session_init(struct venus_inst *inst, u32 pixfmt);
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index f9437b6412b9..a9167867063c 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -1178,16 +1178,6 @@ static int venus_core_deinit(struct venus_core *core)
return 0;
}
-static int venus_core_ping(struct venus_core *core, u32 cookie)
-{
- struct venus_hfi_device *hdev = to_hfi_priv(core);
- struct hfi_sys_ping_pkt pkt;
-
- pkt_sys_ping(&pkt, cookie);
-
- return venus_iface_cmdq_write(hdev, &pkt, false);
-}
-
static int venus_core_trigger_ssr(struct venus_core *core, u32 trigger_type)
{
struct venus_hfi_device *hdev = to_hfi_priv(core);
@@ -1639,7 +1629,6 @@ static int venus_suspend(struct venus_core *core)
static const struct hfi_ops venus_hfi_ops = {
.core_init = venus_core_init,
.core_deinit = venus_core_deinit,
- .core_ping = venus_core_ping,
.core_trigger_ssr = venus_core_trigger_ssr,
.session_init = venus_session_init,
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index 98c22b9f9372..9f82882b77bc 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -1697,10 +1697,6 @@ static int vdec_open(struct file *file)
if (ret)
goto err_free;
- ret = hfi_session_create(inst, &vdec_hfi_ops);
- if (ret)
- goto err_ctrl_deinit;
-
vdec_inst_init(inst);
ida_init(&inst->dpb_ids);
@@ -1712,15 +1708,19 @@ static int vdec_open(struct file *file)
inst->m2m_dev = v4l2_m2m_init(&vdec_m2m_ops);
if (IS_ERR(inst->m2m_dev)) {
ret = PTR_ERR(inst->m2m_dev);
- goto err_session_destroy;
+ goto err_ctrl_deinit;
}
inst->m2m_ctx = v4l2_m2m_ctx_init(inst->m2m_dev, inst, m2m_queue_init);
if (IS_ERR(inst->m2m_ctx)) {
ret = PTR_ERR(inst->m2m_ctx);
- goto err_m2m_release;
+ goto err_m2m_dev_release;
}
+ ret = hfi_session_create(inst, &vdec_hfi_ops);
+ if (ret)
+ goto err_m2m_ctx_release;
+
v4l2_fh_init(&inst->fh, core->vdev_dec);
inst->fh.ctrl_handler = &inst->ctrl_handler;
@@ -1730,10 +1730,10 @@ static int vdec_open(struct file *file)
return 0;
-err_m2m_release:
+err_m2m_ctx_release:
+ v4l2_m2m_ctx_release(inst->m2m_ctx);
+err_m2m_dev_release:
v4l2_m2m_release(inst->m2m_dev);
-err_session_destroy:
- hfi_session_destroy(inst);
err_ctrl_deinit:
v4l2_ctrl_handler_free(&inst->ctrl_handler);
err_free:
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index c1c543535aaf..c7f8e37dba9b 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -1492,10 +1492,6 @@ static int venc_open(struct file *file)
if (ret)
goto err_free;
- ret = hfi_session_create(inst, &venc_hfi_ops);
- if (ret)
- goto err_ctrl_deinit;
-
venc_inst_init(inst);
/*
@@ -1505,15 +1501,19 @@ static int venc_open(struct file *file)
inst->m2m_dev = v4l2_m2m_init(&venc_m2m_ops);
if (IS_ERR(inst->m2m_dev)) {
ret = PTR_ERR(inst->m2m_dev);
- goto err_session_destroy;
+ goto err_ctrl_deinit;
}
inst->m2m_ctx = v4l2_m2m_ctx_init(inst->m2m_dev, inst, m2m_queue_init);
if (IS_ERR(inst->m2m_ctx)) {
ret = PTR_ERR(inst->m2m_ctx);
- goto err_m2m_release;
+ goto err_m2m_dev_release;
}
+ ret = hfi_session_create(inst, &venc_hfi_ops);
+ if (ret)
+ goto err_m2m_ctx_release;
+
v4l2_fh_init(&inst->fh, core->vdev_enc);
inst->fh.ctrl_handler = &inst->ctrl_handler;
@@ -1523,10 +1523,10 @@ static int venc_open(struct file *file)
return 0;
-err_m2m_release:
+err_m2m_ctx_release:
+ v4l2_m2m_ctx_release(inst->m2m_ctx);
+err_m2m_dev_release:
v4l2_m2m_release(inst->m2m_dev);
-err_session_destroy:
- hfi_session_destroy(inst);
err_ctrl_deinit:
v4l2_ctrl_handler_free(&inst->ctrl_handler);
err_free:
diff --git a/drivers/media/platform/renesas/rcar-csi2.c b/drivers/media/platform/renesas/rcar-csi2.c
index 27ffdd28cbf7..0a53dd47d7bf 100644
--- a/drivers/media/platform/renesas/rcar-csi2.c
+++ b/drivers/media/platform/renesas/rcar-csi2.c
@@ -183,17 +183,19 @@ struct rcar_csi2;
#define V4H_CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_REG(n) (0x23840 + ((n) * 2)) /* n = 0 - 11 */
#define V4H_CORE_DIG_RW_COMMON_REG(n) (0x23880 + ((n) * 2)) /* n = 0 - 15 */
#define V4H_CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_REG(n) (0x239e0 + ((n) * 2)) /* n = 0 - 3 */
-#define V4H_CORE_DIG_CLANE_1_RW_CFG_0_REG 0x2a400
#define V4H_CORE_DIG_CLANE_1_RW_HS_TX_6_REG 0x2a60c
/* V4H C-PHY */
#define V4H_CORE_DIG_RW_TRIO0_REG(n) (0x22100 + ((n) * 2)) /* n = 0 - 3 */
#define V4H_CORE_DIG_RW_TRIO1_REG(n) (0x22500 + ((n) * 2)) /* n = 0 - 3 */
#define V4H_CORE_DIG_RW_TRIO2_REG(n) (0x22900 + ((n) * 2)) /* n = 0 - 3 */
+#define V4H_CORE_DIG_CLANE_0_RW_CFG_0_REG 0x2a000
#define V4H_CORE_DIG_CLANE_0_RW_LP_0_REG 0x2a080
#define V4H_CORE_DIG_CLANE_0_RW_HS_RX_REG(n) (0x2a100 + ((n) * 2)) /* n = 0 - 6 */
+#define V4H_CORE_DIG_CLANE_1_RW_CFG_0_REG 0x2a400
#define V4H_CORE_DIG_CLANE_1_RW_LP_0_REG 0x2a480
#define V4H_CORE_DIG_CLANE_1_RW_HS_RX_REG(n) (0x2a500 + ((n) * 2)) /* n = 0 - 6 */
+#define V4H_CORE_DIG_CLANE_2_RW_CFG_0_REG 0x2a800
#define V4H_CORE_DIG_CLANE_2_RW_LP_0_REG 0x2a880
#define V4H_CORE_DIG_CLANE_2_RW_HS_RX_REG(n) (0x2a900 + ((n) * 2)) /* n = 0 - 6 */
@@ -672,6 +674,21 @@ static const struct rcar_csi2_format *rcsi2_code_to_fmt(unsigned int code)
return NULL;
}
+struct rcsi2_cphy_line_order {
+ enum v4l2_mbus_csi2_cphy_line_orders_type order;
+ u16 cfg;
+ u16 ctrl29;
+};
+
+static const struct rcsi2_cphy_line_order rcsi2_cphy_line_orders[] = {
+ { .order = V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ABC, .cfg = 0x0, .ctrl29 = 0x0 },
+ { .order = V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ACB, .cfg = 0xa, .ctrl29 = 0x1 },
+ { .order = V4L2_MBUS_CSI2_CPHY_LINE_ORDER_BAC, .cfg = 0xc, .ctrl29 = 0x1 },
+ { .order = V4L2_MBUS_CSI2_CPHY_LINE_ORDER_BCA, .cfg = 0x5, .ctrl29 = 0x0 },
+ { .order = V4L2_MBUS_CSI2_CPHY_LINE_ORDER_CAB, .cfg = 0x3, .ctrl29 = 0x0 },
+ { .order = V4L2_MBUS_CSI2_CPHY_LINE_ORDER_CBA, .cfg = 0x9, .ctrl29 = 0x1 }
+};
+
enum rcar_csi2_pads {
RCAR_CSI2_SINK,
RCAR_CSI2_SOURCE_VC0,
@@ -722,6 +739,7 @@ struct rcar_csi2 {
bool cphy;
unsigned short lanes;
unsigned char lane_swap[4];
+ enum v4l2_mbus_csi2_cphy_line_orders_type line_orders[3];
};
static inline struct rcar_csi2 *sd_to_csi2(struct v4l2_subdev *sd)
@@ -754,11 +772,24 @@ static void rcsi2_write(struct rcar_csi2 *priv, unsigned int reg, u32 data)
iowrite32(data, priv->base + reg);
}
+static u16 rcsi2_read16(struct rcar_csi2 *priv, unsigned int reg)
+{
+ return ioread16(priv->base + reg);
+}
+
static void rcsi2_write16(struct rcar_csi2 *priv, unsigned int reg, u16 data)
{
iowrite16(data, priv->base + reg);
}
+static void rcsi2_modify16(struct rcar_csi2 *priv, unsigned int reg, u16 data, u16 mask)
+{
+ u16 val;
+
+ val = rcsi2_read16(priv, reg) & ~mask;
+ rcsi2_write16(priv, reg, val | data);
+}
+
static int rcsi2_phtw_write(struct rcar_csi2 *priv, u8 data, u8 code)
{
unsigned int timeout;
@@ -1112,6 +1143,26 @@ static int rcsi2_start_receiver_gen3(struct rcar_csi2 *priv,
return 0;
}
+static void rsci2_set_line_order(struct rcar_csi2 *priv,
+ enum v4l2_mbus_csi2_cphy_line_orders_type order,
+ unsigned int cfgreg, unsigned int ctrlreg)
+{
+ const struct rcsi2_cphy_line_order *info = NULL;
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(rcsi2_cphy_line_orders); i++) {
+ if (rcsi2_cphy_line_orders[i].order == order) {
+ info = &rcsi2_cphy_line_orders[i];
+ break;
+ }
+ }
+
+ if (!info)
+ return;
+
+ rcsi2_modify16(priv, cfgreg, info->cfg, 0x000f);
+ rcsi2_modify16(priv, ctrlreg, info->ctrl29, 0x0100);
+}
+
static int rcsi2_wait_phy_start_v4h(struct rcar_csi2 *priv, u32 match)
{
unsigned int timeout;
@@ -1189,12 +1240,18 @@ static int rcsi2_c_phy_setting_v4h(struct rcar_csi2 *priv, int msps)
rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO1_REG(1), conf->trio1);
rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO2_REG(1), conf->trio1);
- /*
- * Configure pin-swap.
- * TODO: This registers is not documented yet, the values should depend
- * on the 'clock-lanes' and 'data-lanes' devicetree properties.
- */
- rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_CFG_0_REG, 0xf5);
+ /* Configure data line order. */
+ rsci2_set_line_order(priv, priv->line_orders[0],
+ V4H_CORE_DIG_CLANE_0_RW_CFG_0_REG,
+ V4H_CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_REG(9));
+ rsci2_set_line_order(priv, priv->line_orders[1],
+ V4H_CORE_DIG_CLANE_1_RW_CFG_0_REG,
+ V4H_CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_REG(9));
+ rsci2_set_line_order(priv, priv->line_orders[2],
+ V4H_CORE_DIG_CLANE_2_RW_CFG_0_REG,
+ V4H_CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_REG(9));
+
+ /* TODO: This registers is not documented. */
rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_HS_TX_6_REG, 0x5000);
/* Leave Shutdown mode */
@@ -1349,15 +1406,15 @@ static int rcsi2_init_common_v4m(struct rcar_csi2 *priv, unsigned int mbps)
static const struct phtw_value step2[] = {
{ .data = 0x00, .code = 0x00 },
{ .data = 0x80, .code = 0xe0 },
- { .data = 0x01, .code = 0xe1 },
+ { .data = 0x31, .code = 0xe1 },
{ .data = 0x06, .code = 0x00 },
- { .data = 0x0f, .code = 0x11 },
+ { .data = 0x11, .code = 0x11 },
{ .data = 0x08, .code = 0x00 },
- { .data = 0x0f, .code = 0x11 },
+ { .data = 0x11, .code = 0x11 },
{ .data = 0x0a, .code = 0x00 },
- { .data = 0x0f, .code = 0x11 },
+ { .data = 0x11, .code = 0x11 },
{ .data = 0x0c, .code = 0x00 },
- { .data = 0x0f, .code = 0x11 },
+ { .data = 0x11, .code = 0x11 },
{ .data = 0x01, .code = 0x00 },
{ .data = 0x31, .code = 0xaa },
{ .data = 0x05, .code = 0x00 },
@@ -1370,6 +1427,11 @@ static int rcsi2_init_common_v4m(struct rcar_csi2 *priv, unsigned int mbps)
{ .data = 0x05, .code = 0x09 },
};
+ static const struct phtw_value step3[] = {
+ { .data = 0x01, .code = 0x00 },
+ { .data = 0x06, .code = 0xab },
+ };
+
if (priv->info->hsfreqrange) {
ret = rcsi2_set_phypll(priv, mbps);
if (ret)
@@ -1400,7 +1462,7 @@ static int rcsi2_init_common_v4m(struct rcar_csi2 *priv, unsigned int mbps)
return ret;
}
- return ret;
+ return rcsi2_phtw_write_array(priv, step3, ARRAY_SIZE(step3));
}
static int rcsi2_start_receiver_v4m(struct rcar_csi2 *priv,
@@ -1732,6 +1794,9 @@ static int rcsi2_parse_v4l2(struct rcar_csi2 *priv,
}
}
+ for (i = 0; i < ARRAY_SIZE(priv->line_orders); i++)
+ priv->line_orders[i] = vep->bus.mipi_csi2.line_orders[i];
+
return 0;
}
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
index 17a1af507a27..cd69c8a686d3 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
@@ -558,7 +558,7 @@ static int rzg2l_cru_start_streaming_vq(struct vb2_queue *vq, unsigned int count
goto assert_aresetn;
}
- /* Allocate scratch buffer. */
+ /* Allocate scratch buffer */
cru->scratch = dma_alloc_coherent(cru->dev, cru->format.sizeimage,
&cru->scratch_phys, GFP_KERNEL);
if (!cru->scratch) {
diff --git a/drivers/media/platform/rockchip/rga/rga-buf.c b/drivers/media/platform/rockchip/rga/rga-buf.c
index 8a48e9d91f96..4396348811c8 100644
--- a/drivers/media/platform/rockchip/rga/rga-buf.c
+++ b/drivers/media/platform/rockchip/rga/rga-buf.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2017 Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
* Author: Jacob Chen <jacob-chen@iotwrt.com>
*/
diff --git a/drivers/media/platform/rockchip/rga/rga-hw.c b/drivers/media/platform/rockchip/rga/rga-hw.c
index 11c3d7234757..bf55beec0fac 100644
--- a/drivers/media/platform/rockchip/rga/rga-hw.c
+++ b/drivers/media/platform/rockchip/rga/rga-hw.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author: Jacob Chen <jacob-chen@iotwrt.com>
*/
diff --git a/drivers/media/platform/rockchip/rga/rga-hw.h b/drivers/media/platform/rockchip/rga/rga-hw.h
index e8917e5630a4..cc6bd7f5b030 100644
--- a/drivers/media/platform/rockchip/rga/rga-hw.h
+++ b/drivers/media/platform/rockchip/rga/rga-hw.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author: Jacob Chen <jacob-chen@iotwrt.com>
*/
#ifndef __RGA_HW_H__
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
index 1739ac0c8e92..3dccab5fa4a1 100644
--- a/drivers/media/platform/rockchip/rga/rga.c
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author: Jacob Chen <jacob-chen@iotwrt.com>
*/
diff --git a/drivers/media/platform/rockchip/rga/rga.h b/drivers/media/platform/rockchip/rga/rga.h
index 8105bb2efe57..530e12de73c4 100644
--- a/drivers/media/platform/rockchip/rga/rga.h
+++ b/drivers/media/platform/rockchip/rga/rga.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author: Jacob Chen <jacob-chen@iotwrt.com>
*/
#ifndef __RGA_H__
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
index 02339cd94486..6dcefd144d5a 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
@@ -35,8 +35,6 @@
#define RKISP1_SP_DEV_NAME RKISP1_DRIVER_NAME "_selfpath"
#define RKISP1_MP_DEV_NAME RKISP1_DRIVER_NAME "_mainpath"
-#define RKISP1_MIN_BUFFERS_NEEDED 3
-
enum rkisp1_plane {
RKISP1_PLANE_Y = 0,
RKISP1_PLANE_CB = 1,
@@ -1561,7 +1559,7 @@ static int rkisp1_register_capture(struct rkisp1_capture *cap)
q->ops = &rkisp1_vb2_ops;
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct rkisp1_buffer);
- q->min_queued_buffers = RKISP1_MIN_BUFFERS_NEEDED;
+ q->min_queued_buffers = 1;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &node->vlock;
q->dev = cap->rkisp1->dev;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
index 0100b9c3edbe..dc65a7924f8a 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
@@ -228,6 +228,9 @@ static int rkisp1_subdev_notifier_register(struct rkisp1_device *rkisp1)
break;
}
+ if (ret)
+ break;
+
/* Parse the endpoint and validate the bus type. */
ret = v4l2_fwnode_endpoint_parse(ep, &vep);
if (ret) {
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-is-errno.c b/drivers/media/platform/samsung/exynos4-is/fimc-is-errno.c
index 7a48fad1df16..ac67a04e5eeb 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-is-errno.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is-errno.c
@@ -12,137 +12,6 @@
#include "fimc-is-errno.h"
-const char *fimc_is_param_strerr(unsigned int error)
-{
- switch (error) {
- case ERROR_COMMON_CMD:
- return "ERROR_COMMON_CMD: Invalid Command";
- case ERROR_COMMON_PARAMETER:
- return "ERROR_COMMON_PARAMETER: Invalid Parameter";
- case ERROR_COMMON_SETFILE_LOAD:
- return "ERROR_COMMON_SETFILE_LOAD: Illegal Setfile Loading";
- case ERROR_COMMON_SETFILE_ADJUST:
- return "ERROR_COMMON_SETFILE_ADJUST: Setfile isn't adjusted";
- case ERROR_COMMON_SETFILE_INDEX:
- return "ERROR_COMMON_SETFILE_INDEX: Invalid setfile index";
- case ERROR_COMMON_INPUT_PATH:
- return "ERROR_COMMON_INPUT_PATH: Input path can be changed in ready state";
- case ERROR_COMMON_INPUT_INIT:
- return "ERROR_COMMON_INPUT_INIT: IP can not start if input path is not set";
- case ERROR_COMMON_OUTPUT_PATH:
- return "ERROR_COMMON_OUTPUT_PATH: Output path can be changed in ready state (stop)";
- case ERROR_COMMON_OUTPUT_INIT:
- return "ERROR_COMMON_OUTPUT_INIT: IP can not start if output path is not set";
- case ERROR_CONTROL_BYPASS:
- return "ERROR_CONTROL_BYPASS";
- case ERROR_OTF_INPUT_FORMAT:
- return "ERROR_OTF_INPUT_FORMAT: Invalid format (DRC: YUV444, FD: YUV444, 422, 420)";
- case ERROR_OTF_INPUT_WIDTH:
- return "ERROR_OTF_INPUT_WIDTH: Invalid width (DRC: 128~8192, FD: 32~8190)";
- case ERROR_OTF_INPUT_HEIGHT:
- return "ERROR_OTF_INPUT_HEIGHT: Invalid bit-width (DRC: 8~12bits, FD: 8bit)";
- case ERROR_OTF_INPUT_BIT_WIDTH:
- return "ERROR_OTF_INPUT_BIT_WIDTH: Invalid bit-width (DRC: 8~12bits, FD: 8bit)";
- case ERROR_DMA_INPUT_WIDTH:
- return "ERROR_DMA_INPUT_WIDTH: Invalid width (DRC: 128~8192, FD: 32~8190)";
- case ERROR_DMA_INPUT_HEIGHT:
- return "ERROR_DMA_INPUT_HEIGHT: Invalid height (DRC: 64~8192, FD: 16~8190)";
- case ERROR_DMA_INPUT_FORMAT:
- return "ERROR_DMA_INPUT_FORMAT: Invalid format (DRC: YUV444 or YUV422, FD: YUV444,422,420)";
- case ERROR_DMA_INPUT_BIT_WIDTH:
- return "ERROR_DMA_INPUT_BIT_WIDTH: Invalid bit-width (DRC: 8~12bits, FD: 8bit)";
- case ERROR_DMA_INPUT_ORDER:
- return "ERROR_DMA_INPUT_ORDER: Invalid order(DRC: YYCbCr,YCbYCr,FD:NO,YYCbCr,YCbYCr,CbCr,CrCb)";
- case ERROR_DMA_INPUT_PLANE:
- return "ERROR_DMA_INPUT_PLANE: Invalid plane (DRC: 3, FD: 1, 2, 3)";
- case ERROR_OTF_OUTPUT_WIDTH:
- return "ERROR_OTF_OUTPUT_WIDTH: Invalid width (DRC: 128~8192)";
- case ERROR_OTF_OUTPUT_HEIGHT:
- return "ERROR_OTF_OUTPUT_HEIGHT: Invalid height (DRC: 64~8192)";
- case ERROR_OTF_OUTPUT_FORMAT:
- return "ERROR_OTF_OUTPUT_FORMAT: Invalid format (DRC: YUV444)";
- case ERROR_OTF_OUTPUT_BIT_WIDTH:
- return "ERROR_OTF_OUTPUT_BIT_WIDTH: Invalid bit-width (DRC: 8~12bits, FD: 8bit)";
- case ERROR_DMA_OUTPUT_WIDTH:
- return "ERROR_DMA_OUTPUT_WIDTH";
- case ERROR_DMA_OUTPUT_HEIGHT:
- return "ERROR_DMA_OUTPUT_HEIGHT";
- case ERROR_DMA_OUTPUT_FORMAT:
- return "ERROR_DMA_OUTPUT_FORMAT";
- case ERROR_DMA_OUTPUT_BIT_WIDTH:
- return "ERROR_DMA_OUTPUT_BIT_WIDTH";
- case ERROR_DMA_OUTPUT_PLANE:
- return "ERROR_DMA_OUTPUT_PLANE";
- case ERROR_DMA_OUTPUT_ORDER:
- return "ERROR_DMA_OUTPUT_ORDER";
-
- /* Sensor Error(100~199) */
- case ERROR_SENSOR_I2C_FAIL:
- return "ERROR_SENSOR_I2C_FAIL";
- case ERROR_SENSOR_INVALID_FRAMERATE:
- return "ERROR_SENSOR_INVALID_FRAMERATE";
- case ERROR_SENSOR_INVALID_EXPOSURETIME:
- return "ERROR_SENSOR_INVALID_EXPOSURETIME";
- case ERROR_SENSOR_INVALID_SIZE:
- return "ERROR_SENSOR_INVALID_SIZE";
- case ERROR_SENSOR_INVALID_SETTING:
- return "ERROR_SENSOR_INVALID_SETTING";
- case ERROR_SENSOR_ACTUATOR_INIT_FAIL:
- return "ERROR_SENSOR_ACTUATOR_INIT_FAIL";
- case ERROR_SENSOR_INVALID_AF_POS:
- return "ERROR_SENSOR_INVALID_AF_POS";
- case ERROR_SENSOR_UNSUPPORT_FUNC:
- return "ERROR_SENSOR_UNSUPPORT_FUNC";
- case ERROR_SENSOR_UNSUPPORT_PERI:
- return "ERROR_SENSOR_UNSUPPORT_PERI";
- case ERROR_SENSOR_UNSUPPORT_AF:
- return "ERROR_SENSOR_UNSUPPORT_AF";
-
- /* ISP Error (200~299) */
- case ERROR_ISP_AF_BUSY:
- return "ERROR_ISP_AF_BUSY";
- case ERROR_ISP_AF_INVALID_COMMAND:
- return "ERROR_ISP_AF_INVALID_COMMAND";
- case ERROR_ISP_AF_INVALID_MODE:
- return "ERROR_ISP_AF_INVALID_MODE";
-
- /* DRC Error (300~399) */
- /* FD Error (400~499) */
- case ERROR_FD_CONFIG_MAX_NUMBER_STATE:
- return "ERROR_FD_CONFIG_MAX_NUMBER_STATE";
- case ERROR_FD_CONFIG_MAX_NUMBER_INVALID:
- return "ERROR_FD_CONFIG_MAX_NUMBER_INVALID";
- case ERROR_FD_CONFIG_YAW_ANGLE_STATE:
- return "ERROR_FD_CONFIG_YAW_ANGLE_STATE";
- case ERROR_FD_CONFIG_YAW_ANGLE_INVALID:
- return "ERROR_FD_CONFIG_YAW_ANGLE_INVALID\n";
- case ERROR_FD_CONFIG_ROLL_ANGLE_STATE:
- return "ERROR_FD_CONFIG_ROLL_ANGLE_STATE";
- case ERROR_FD_CONFIG_ROLL_ANGLE_INVALID:
- return "ERROR_FD_CONFIG_ROLL_ANGLE_INVALID";
- case ERROR_FD_CONFIG_SMILE_MODE_INVALID:
- return "ERROR_FD_CONFIG_SMILE_MODE_INVALID";
- case ERROR_FD_CONFIG_BLINK_MODE_INVALID:
- return "ERROR_FD_CONFIG_BLINK_MODE_INVALID";
- case ERROR_FD_CONFIG_EYES_DETECT_INVALID:
- return "ERROR_FD_CONFIG_EYES_DETECT_INVALID";
- case ERROR_FD_CONFIG_MOUTH_DETECT_INVALID:
- return "ERROR_FD_CONFIG_MOUTH_DETECT_INVALID";
- case ERROR_FD_CONFIG_ORIENTATION_STATE:
- return "ERROR_FD_CONFIG_ORIENTATION_STATE";
- case ERROR_FD_CONFIG_ORIENTATION_INVALID:
- return "ERROR_FD_CONFIG_ORIENTATION_INVALID";
- case ERROR_FD_CONFIG_ORIENTATION_VALUE_INVALID:
- return "ERROR_FD_CONFIG_ORIENTATION_VALUE_INVALID";
- case ERROR_FD_RESULT:
- return "ERROR_FD_RESULT";
- case ERROR_FD_MODE:
- return "ERROR_FD_MODE";
- default:
- return "Unknown";
- }
-}
-
const char *fimc_is_strerr(unsigned int error)
{
error &= ~IS_ERROR_TIME_OUT_FLAG;
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-is-errno.h b/drivers/media/platform/samsung/exynos4-is/fimc-is-errno.h
index 809e117331c0..fa8204ffec7b 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-is-errno.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is-errno.h
@@ -240,6 +240,5 @@ enum fimc_is_error {
};
const char *fimc_is_strerr(unsigned int error);
-const char *fimc_is_param_strerr(unsigned int error);
#endif /* FIMC_IS_ERR_H_ */
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-is-param.c b/drivers/media/platform/samsung/exynos4-is/fimc-is-param.c
index 9c816ae3b3e5..443362da8cc8 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-is-param.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is-param.c
@@ -204,15 +204,6 @@ int __is_hw_update_params(struct fimc_is *is)
return ret;
}
-void __is_get_frame_size(struct fimc_is *is, struct v4l2_mbus_framefmt *mf)
-{
- struct isp_param *isp;
-
- isp = &is->config[is->config_index].isp;
- mf->width = isp->otf_input.width;
- mf->height = isp->otf_input.height;
-}
-
void __is_set_frame_size(struct fimc_is *is, struct v4l2_mbus_framefmt *mf)
{
unsigned int index = is->config_index;
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-is-param.h b/drivers/media/platform/samsung/exynos4-is/fimc-is-param.h
index 206904674927..10ad02f36fed 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-is-param.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is-param.h
@@ -994,7 +994,6 @@ void fimc_is_set_initial_params(struct fimc_is *is);
unsigned int __get_pending_param_count(struct fimc_is *is);
int __is_hw_update_params(struct fimc_is *is);
-void __is_get_frame_size(struct fimc_is *is, struct v4l2_mbus_framefmt *mf);
void __is_set_frame_size(struct fimc_is *is, struct v4l2_mbus_framefmt *mf);
void __is_set_sensor(struct fimc_is *is, int fps);
void __is_set_isp_aa_ae(struct fimc_is *is);
diff --git a/drivers/media/platform/samsung/exynos4-is/mipi-csis.c b/drivers/media/platform/samsung/exynos4-is/mipi-csis.c
index 63f3eecdd7e6..452880b5350c 100644
--- a/drivers/media/platform/samsung/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/samsung/exynos4-is/mipi-csis.c
@@ -940,13 +940,19 @@ static int s5pcsis_pm_resume(struct device *dev, bool runtime)
state->supplies);
goto unlock;
}
- clk_enable(state->clock[CSIS_CLK_GATE]);
+ ret = clk_enable(state->clock[CSIS_CLK_GATE]);
+ if (ret) {
+ phy_power_off(state->phy);
+ regulator_bulk_disable(CSIS_NUM_SUPPLIES,
+ state->supplies);
+ goto unlock;
+ }
}
if (state->flags & ST_STREAMING)
s5pcsis_start_stream(state);
state->flags &= ~ST_SUSPENDED;
- unlock:
+unlock:
mutex_unlock(&state->lock);
return ret ? -EAGAIN : 0;
}
diff --git a/drivers/media/platform/samsung/s3c-camif/camif-core.c b/drivers/media/platform/samsung/s3c-camif/camif-core.c
index de6e8f151849..221e3c447f36 100644
--- a/drivers/media/platform/samsung/s3c-camif/camif-core.c
+++ b/drivers/media/platform/samsung/s3c-camif/camif-core.c
@@ -527,10 +527,19 @@ static void s3c_camif_remove(struct platform_device *pdev)
static int s3c_camif_runtime_resume(struct device *dev)
{
struct camif_dev *camif = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_enable(camif->clock[CLK_GATE]);
+ if (ret)
+ return ret;
- clk_enable(camif->clock[CLK_GATE]);
/* null op on s3c244x */
- clk_enable(camif->clock[CLK_CAM]);
+ ret = clk_enable(camif->clock[CLK_CAM]);
+ if (ret) {
+ clk_disable(camif->clock[CLK_GATE]);
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
index 2fe3c9228ac5..5f80931f056d 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
@@ -774,8 +774,10 @@ static int s5p_mfc_open(struct file *file)
int ret = 0;
mfc_debug_enter();
- if (mutex_lock_interruptible(&dev->mfc_mutex))
- return -ERESTARTSYS;
+ if (mutex_lock_interruptible(&dev->mfc_mutex)) {
+ ret = -ERESTARTSYS;
+ goto err_enter;
+ }
dev->num_inst++; /* It is guarded by mfc_mutex in vfd */
/* Allocate memory for context */
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -946,6 +948,7 @@ err_no_ctx:
err_alloc:
dev->num_inst--;
mutex_unlock(&dev->mfc_mutex);
+err_enter:
mfc_debug_leave();
return ret;
}
diff --git a/drivers/media/platform/st/stm32/Kconfig b/drivers/media/platform/st/stm32/Kconfig
index 9df9a2a17728..f12e67bcc9bc 100644
--- a/drivers/media/platform/st/stm32/Kconfig
+++ b/drivers/media/platform/st/stm32/Kconfig
@@ -1,6 +1,20 @@
# SPDX-License-Identifier: GPL-2.0-only
# V4L drivers
+config VIDEO_STM32_CSI
+ tristate "STM32 Camera Serial Interface (CSI) support"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && OF
+ depends on ARCH_STM32 || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ help
+ This module makes the STM32 Camera Serial Interface (CSI)
+ available as a v4l2 device.
+
+ To compile this driver as a module, choose M here: the module
+ will be called stm32-csi.
+
config VIDEO_STM32_DCMI
tristate "STM32 Digital Camera Memory Interface (DCMI) support"
depends on V4L_PLATFORM_DRIVERS
diff --git a/drivers/media/platform/st/stm32/Makefile b/drivers/media/platform/st/stm32/Makefile
index 7ed8297b9b19..9ae57897f030 100644
--- a/drivers/media/platform/st/stm32/Makefile
+++ b/drivers/media/platform/st/stm32/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_VIDEO_STM32_CSI) += stm32-csi.o
obj-$(CONFIG_VIDEO_STM32_DCMI) += stm32-dcmi.o
obj-$(CONFIG_VIDEO_STM32_DCMIPP) += stm32-dcmipp/
stm32-dma2d-objs := dma2d/dma2d.o dma2d/dma2d-hw.o
diff --git a/drivers/media/platform/st/stm32/stm32-csi.c b/drivers/media/platform/st/stm32/stm32-csi.c
new file mode 100644
index 000000000000..48941aae8c9b
--- /dev/null
+++ b/drivers/media/platform/st/stm32/stm32-csi.c
@@ -0,0 +1,1137 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for STM32 Camera Serial Interface
+ *
+ * Copyright (C) STMicroelectronics SA 2024
+ * Author: Alain Volmat <alain.volmat@foss.st.com>
+ * for STMicroelectronics.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include <media/mipi-csi2.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+#define STM32_CSI_CR 0x0000
+#define STM32_CSI_CR_CSIEN BIT(0)
+#define STM32_CSI_CR_VCXSTART(x) BIT(2 + ((x) * 4))
+#define STM32_CSI_CR_VCXSTOP(x) BIT(3 + ((x) * 4))
+#define STM32_CSI_PCR 0x0004
+#define STM32_CSI_PCR_DL1EN BIT(3)
+#define STM32_CSI_PCR_DL0EN BIT(2)
+#define STM32_CSI_PCR_CLEN BIT(1)
+#define STM32_CSI_PCR_PWRDOWN BIT(0)
+#define STM32_CSI_VCXCFGR1(x) ((((x) + 1) * 0x0010) + 0x0)
+#define STM32_CSI_VCXCFGR1_ALLDT BIT(0)
+#define STM32_CSI_VCXCFGR1_DT0EN BIT(1)
+#define STM32_CSI_VCXCFGR1_DT1EN BIT(2)
+#define STM32_CSI_VCXCFGR1_CDTFT_SHIFT 8
+#define STM32_CSI_VCXCFGR1_DT0_SHIFT 16
+#define STM32_CSI_VCXCFGR1_DT0FT_SHIFT 24
+#define STM32_CSI_VCXCFGR2(x) ((((x) + 1) * 0x0010) + 0x4)
+#define STM32_CSI_VCXCFGR2_DT1_SHIFT 0
+#define STM32_CSI_VCXCFGR2_DT1FT_SHIFT 8
+#define STM32_CSI_INPUT_BPP8 2
+#define STM32_CSI_INPUT_BPP10 3
+#define STM32_CSI_INPUT_BPP12 4
+#define STM32_CSI_INPUT_BPP14 5
+#define STM32_CSI_LMCFGR 0x0070
+#define STM32_CSI_LMCFGR_LANENB_SHIFT 8
+#define STM32_CSI_LMCFGR_DLMAP_SHIFT 16
+#define STM32_CSI_IER0 0x0080
+#define STM32_CSI_IER1 0x0084
+#define STM32_CSI_SR0 0x0090
+#define STM32_CSI_SR0_SYNCERRF BIT(30)
+#define STM32_CSI_SR0_SPKTERRF BIT(28)
+#define STM32_CSI_SR0_IDERRF BIT(27)
+#define STM32_CSI_SR0_CECCERRF BIT(26)
+#define STM32_CSI_SR0_ECCERRF BIT(25)
+#define STM32_CSI_SR0_CRCERRF BIT(24)
+#define STM32_CSI_SR0_CCFIFOFF BIT(21)
+#define STM32_CSI_SR0_VCXSTATEF(x) BIT(17 + (x))
+#define STM32_CSI_SR1 0x0094
+#define STM32_CSI_SR1_ECTRLDL1F BIT(12)
+#define STM32_CSI_SR1_ESYNCESCDL1F BIT(11)
+#define STM32_CSI_SR1_EESCDL1F BIT(10)
+#define STM32_CSI_SR1_ESOTSYNCDL1F BIT(9)
+#define STM32_CSI_SR1_ESOTDL1F BIT(8)
+#define STM32_CSI_SR1_ECTRLDL0F BIT(4)
+#define STM32_CSI_SR1_ESYNCESCDL0F BIT(3)
+#define STM32_CSI_SR1_EESCDL0F BIT(2)
+#define STM32_CSI_SR1_ESOTSYNCDL0F BIT(1)
+#define STM32_CSI_SR1_ESOTDL0F BIT(0)
+#define STM32_CSI_FCR0 0x0100
+#define STM32_CSI_FCR1 0x0104
+#define STM32_CSI_SPDFR 0x0110
+#define STM32_CSI_DT_MASK 0x3f
+#define STM32_CSI_VC_MASK 0x03
+#define STM32_CSI_ERR1 0x0114
+#define STM32_CSI_ERR1_IDVCERR_SHIFT 22
+#define STM32_CSI_ERR1_IDDTERR_SHIFT 16
+#define STM32_CSI_ERR1_CECCVCERR_SHIFT 14
+#define STM32_CSI_ERR1_CECCDTERR_SHIFT 8
+#define STM32_CSI_ERR1_CRCVCERR_SHIFT 6
+#define STM32_CSI_ERR1_CRCDTERR_SHIFT 0
+#define STM32_CSI_ERR2 0x0118
+#define STM32_CSI_ERR2_SYNCVCERR_SHIFT 18
+#define STM32_CSI_ERR2_SPKTVCERR_SHIFT 6
+#define STM32_CSI_ERR2_SPKTDTERR_SHIFT 0
+#define STM32_CSI_PRCR 0x1000
+#define STM32_CSI_PRCR_PEN BIT(1)
+#define STM32_CSI_PMCR 0x1004
+#define STM32_CSI_PFCR 0x1008
+#define STM32_CSI_PFCR_CCFR_MASK GENMASK(5, 0)
+#define STM32_CSI_PFCR_CCFR_SHIFT 0
+#define STM32_CSI_PFCR_HSFR_MASK GENMASK(14, 8)
+#define STM32_CSI_PFCR_HSFR_SHIFT 8
+#define STM32_CSI_PFCR_DLD BIT(16)
+#define STM32_CSI_PTCR0 0x1010
+#define STM32_CSI_PTCR0_TCKEN BIT(0)
+#define STM32_CSI_PTCR1 0x1014
+#define STM32_CSI_PTCR1_TWM BIT(16)
+#define STM32_CSI_PTCR1_TDI_MASK GENMASK(7, 0)
+#define STM32_CSI_PTCR1_TDI_SHIFT 0
+#define STM32_CSI_PTSR 0x1018
+
+#define STM32_CSI_LANES_MAX 2
+
+#define STM32_CSI_SR0_ERRORS (STM32_CSI_SR0_SYNCERRF | STM32_CSI_SR0_SPKTERRF |\
+ STM32_CSI_SR0_IDERRF | STM32_CSI_SR0_CECCERRF |\
+ STM32_CSI_SR0_ECCERRF | STM32_CSI_SR0_CRCERRF |\
+ STM32_CSI_SR0_CCFIFOFF)
+#define STM32_CSI_SR1_DL0_ERRORS (STM32_CSI_SR1_ECTRLDL0F | STM32_CSI_SR1_ESYNCESCDL0F |\
+ STM32_CSI_SR1_EESCDL0F | STM32_CSI_SR1_ESOTSYNCDL0F |\
+ STM32_CSI_SR1_ESOTDL0F)
+#define STM32_CSI_SR1_DL1_ERRORS (STM32_CSI_SR1_ECTRLDL1F | STM32_CSI_SR1_ESYNCESCDL1F |\
+ STM32_CSI_SR1_EESCDL1F | STM32_CSI_SR1_ESOTSYNCDL1F |\
+ STM32_CSI_SR1_ESOTDL1F)
+#define STM32_CSI_SR1_ERRORS (STM32_CSI_SR1_DL0_ERRORS | STM32_CSI_SR1_DL1_ERRORS)
+
+enum stm32_csi_pads {
+ STM32_CSI_PAD_SINK,
+ STM32_CSI_PAD_SOURCE,
+ STM32_CSI_PAD_MAX,
+};
+
+struct stm32_csi_event {
+ u32 mask;
+ const char * const name;
+};
+
+static const struct stm32_csi_event stm32_csi_events_sr0[] = {
+ {STM32_CSI_SR0_SYNCERRF, "Synchronization error"},
+ {STM32_CSI_SR0_SPKTERRF, "Short packet error"},
+ {STM32_CSI_SR0_IDERRF, "Data type ID error"},
+ {STM32_CSI_SR0_CECCERRF, "Corrected ECC error"},
+ {STM32_CSI_SR0_ECCERRF, "ECC error"},
+ {STM32_CSI_SR0_CRCERRF, "CRC error"},
+ {STM32_CSI_SR0_CCFIFOFF, "Clk changer FIFO full error"},
+};
+
+#define STM32_CSI_NUM_SR0_EVENTS ARRAY_SIZE(stm32_csi_events_sr0)
+
+static const struct stm32_csi_event stm32_csi_events_sr1[] = {
+ {STM32_CSI_SR1_ECTRLDL1F, "L1: D-PHY control error"},
+ {STM32_CSI_SR1_ESYNCESCDL1F,
+ "L1: D-PHY low power data transmission synchro error"},
+ {STM32_CSI_SR1_EESCDL1F, "L1: D-PHY escape entry error"},
+ {STM32_CSI_SR1_ESOTSYNCDL1F,
+ "L1: Start of transmission synchro error"},
+ {STM32_CSI_SR1_ESOTDL1F, "L1: Start of transmission error"},
+ {STM32_CSI_SR1_ECTRLDL0F, "L0: D-PHY control error"},
+ {STM32_CSI_SR1_ESYNCESCDL0F,
+ "L0: D-PHY low power data transmission synchro error"},
+ {STM32_CSI_SR1_EESCDL0F, "L0: D-PHY escape entry error"},
+ {STM32_CSI_SR1_ESOTSYNCDL0F,
+ "L0: Start of transmission synchro error"},
+ {STM32_CSI_SR1_ESOTDL0F, "L0: Start of transmission error"},
+};
+
+#define STM32_CSI_NUM_SR1_EVENTS ARRAY_SIZE(stm32_csi_events_sr1)
+
+enum stm32_csi_clk {
+ STM32_CSI_CLK_PCLK,
+ STM32_CSI_CLK_TXESC,
+ STM32_CSI_CLK_CSI2PHY,
+ STM32_CSI_CLK_NB,
+};
+
+static const char * const stm32_csi_clks_id[] = {
+ "pclk",
+ "txesc",
+ "csi2phy",
+};
+
+struct stm32_csi_dev {
+ struct device *dev;
+
+ void __iomem *base;
+
+ struct clk_bulk_data clks[STM32_CSI_CLK_NB];
+ struct regulator_bulk_data supplies[2];
+
+ u8 lanes[STM32_CSI_LANES_MAX];
+ u8 num_lanes;
+
+ /*
+ * spinlock slock is used to protect to srX_counters tables being
+ * accessed from log_status and interrupt context
+ */
+ spinlock_t slock;
+
+ u32 sr0_counters[STM32_CSI_NUM_SR0_EVENTS];
+ u32 sr1_counters[STM32_CSI_NUM_SR1_EVENTS];
+
+ struct v4l2_subdev sd;
+ struct v4l2_async_notifier notifier;
+ struct media_pad pads[STM32_CSI_PAD_MAX];
+
+ /* Remote source */
+ struct v4l2_subdev *s_subdev;
+ u32 s_subdev_pad_nb;
+};
+
+struct stm32_csi_fmts {
+ u32 code;
+ u32 datatype;
+ u32 input_fmt;
+ u8 bpp;
+};
+
+#define FMT_MBUS_DT_DTFMT_BPP(mbus, dt, input, byteperpixel) \
+ { \
+ .code = MEDIA_BUS_FMT_##mbus, \
+ .datatype = MIPI_CSI2_DT_##dt, \
+ .input_fmt = STM32_CSI_INPUT_##input, \
+ .bpp = byteperpixel, \
+ }
+static const struct stm32_csi_fmts stm32_csi_formats[] = {
+ /* YUV 422 8 bit */
+ FMT_MBUS_DT_DTFMT_BPP(UYVY8_1X16, YUV422_8B, BPP8, 8),
+ FMT_MBUS_DT_DTFMT_BPP(YUYV8_1X16, YUV422_8B, BPP8, 8),
+ FMT_MBUS_DT_DTFMT_BPP(YVYU8_1X16, YUV422_8B, BPP8, 8),
+ FMT_MBUS_DT_DTFMT_BPP(VYUY8_1X16, YUV422_8B, BPP8, 8),
+
+ /* Raw Bayer */
+ /* 8 bit */
+ FMT_MBUS_DT_DTFMT_BPP(SBGGR8_1X8, RAW8, BPP8, 8),
+ FMT_MBUS_DT_DTFMT_BPP(SGBRG8_1X8, RAW8, BPP8, 8),
+ FMT_MBUS_DT_DTFMT_BPP(SGRBG8_1X8, RAW8, BPP8, 8),
+ FMT_MBUS_DT_DTFMT_BPP(SRGGB8_1X8, RAW8, BPP8, 8),
+ /* 10 bit */
+ FMT_MBUS_DT_DTFMT_BPP(SRGGB10_1X10, RAW10, BPP10, 10),
+ FMT_MBUS_DT_DTFMT_BPP(SGBRG10_1X10, RAW10, BPP10, 10),
+ FMT_MBUS_DT_DTFMT_BPP(SGRBG10_1X10, RAW10, BPP10, 10),
+ FMT_MBUS_DT_DTFMT_BPP(SRGGB10_1X10, RAW10, BPP10, 10),
+ /* 12 bit */
+ FMT_MBUS_DT_DTFMT_BPP(SRGGB12_1X12, RAW12, BPP12, 12),
+ FMT_MBUS_DT_DTFMT_BPP(SGBRG12_1X12, RAW12, BPP12, 12),
+ FMT_MBUS_DT_DTFMT_BPP(SGRBG12_1X12, RAW12, BPP12, 12),
+ FMT_MBUS_DT_DTFMT_BPP(SRGGB12_1X12, RAW12, BPP12, 12),
+ /* 14 bit */
+ FMT_MBUS_DT_DTFMT_BPP(SRGGB14_1X14, RAW14, BPP14, 14),
+ FMT_MBUS_DT_DTFMT_BPP(SGBRG14_1X14, RAW14, BPP14, 14),
+ FMT_MBUS_DT_DTFMT_BPP(SGRBG14_1X14, RAW14, BPP14, 14),
+ FMT_MBUS_DT_DTFMT_BPP(SRGGB14_1X14, RAW14, BPP14, 14),
+
+ /* RGB 565 */
+ FMT_MBUS_DT_DTFMT_BPP(RGB565_1X16, RGB565, BPP8, 8),
+
+ /* JPEG (datatype isn't used) */
+ FMT_MBUS_DT_DTFMT_BPP(JPEG_1X8, NULL, BPP8, 8),
+};
+
+struct stm32_csi_mbps_phy_reg {
+ unsigned int mbps;
+ unsigned int hsfreqrange;
+ unsigned int osc_freq_target;
+};
+
+/*
+ * Table describing configuration of the PHY depending on the
+ * intended Bit Rate. From table 5-8 Frequency Ranges and Defaults
+ * of the Synopsis DWC MIPI PHY databook
+ */
+static const struct stm32_csi_mbps_phy_reg snps_stm32mp25[] = {
+ { .mbps = 80, .hsfreqrange = 0x00, .osc_freq_target = 460 },
+ { .mbps = 90, .hsfreqrange = 0x10, .osc_freq_target = 460 },
+ { .mbps = 100, .hsfreqrange = 0x20, .osc_freq_target = 460 },
+ { .mbps = 110, .hsfreqrange = 0x30, .osc_freq_target = 460 },
+ { .mbps = 120, .hsfreqrange = 0x01, .osc_freq_target = 460 },
+ { .mbps = 130, .hsfreqrange = 0x11, .osc_freq_target = 460 },
+ { .mbps = 140, .hsfreqrange = 0x21, .osc_freq_target = 460 },
+ { .mbps = 150, .hsfreqrange = 0x31, .osc_freq_target = 460 },
+ { .mbps = 160, .hsfreqrange = 0x02, .osc_freq_target = 460 },
+ { .mbps = 170, .hsfreqrange = 0x12, .osc_freq_target = 460 },
+ { .mbps = 180, .hsfreqrange = 0x22, .osc_freq_target = 460 },
+ { .mbps = 190, .hsfreqrange = 0x32, .osc_freq_target = 460 },
+ { .mbps = 205, .hsfreqrange = 0x03, .osc_freq_target = 460 },
+ { .mbps = 220, .hsfreqrange = 0x13, .osc_freq_target = 460 },
+ { .mbps = 235, .hsfreqrange = 0x23, .osc_freq_target = 460 },
+ { .mbps = 250, .hsfreqrange = 0x33, .osc_freq_target = 460 },
+ { .mbps = 275, .hsfreqrange = 0x04, .osc_freq_target = 460 },
+ { .mbps = 300, .hsfreqrange = 0x14, .osc_freq_target = 460 },
+ { .mbps = 325, .hsfreqrange = 0x25, .osc_freq_target = 460 },
+ { .mbps = 350, .hsfreqrange = 0x35, .osc_freq_target = 460 },
+ { .mbps = 400, .hsfreqrange = 0x05, .osc_freq_target = 460 },
+ { .mbps = 450, .hsfreqrange = 0x16, .osc_freq_target = 460 },
+ { .mbps = 500, .hsfreqrange = 0x26, .osc_freq_target = 460 },
+ { .mbps = 550, .hsfreqrange = 0x37, .osc_freq_target = 460 },
+ { .mbps = 600, .hsfreqrange = 0x07, .osc_freq_target = 460 },
+ { .mbps = 650, .hsfreqrange = 0x18, .osc_freq_target = 460 },
+ { .mbps = 700, .hsfreqrange = 0x28, .osc_freq_target = 460 },
+ { .mbps = 750, .hsfreqrange = 0x39, .osc_freq_target = 460 },
+ { .mbps = 800, .hsfreqrange = 0x09, .osc_freq_target = 460 },
+ { .mbps = 850, .hsfreqrange = 0x19, .osc_freq_target = 460 },
+ { .mbps = 900, .hsfreqrange = 0x29, .osc_freq_target = 460 },
+ { .mbps = 950, .hsfreqrange = 0x3a, .osc_freq_target = 460 },
+ { .mbps = 1000, .hsfreqrange = 0x0a, .osc_freq_target = 460 },
+ { .mbps = 1050, .hsfreqrange = 0x1a, .osc_freq_target = 460 },
+ { .mbps = 1100, .hsfreqrange = 0x2a, .osc_freq_target = 460 },
+ { .mbps = 1150, .hsfreqrange = 0x3b, .osc_freq_target = 460 },
+ { .mbps = 1200, .hsfreqrange = 0x0b, .osc_freq_target = 460 },
+ { .mbps = 1250, .hsfreqrange = 0x1b, .osc_freq_target = 460 },
+ { .mbps = 1300, .hsfreqrange = 0x2b, .osc_freq_target = 460 },
+ { .mbps = 1350, .hsfreqrange = 0x3c, .osc_freq_target = 460 },
+ { .mbps = 1400, .hsfreqrange = 0x0c, .osc_freq_target = 460 },
+ { .mbps = 1450, .hsfreqrange = 0x1c, .osc_freq_target = 460 },
+ { .mbps = 1500, .hsfreqrange = 0x2c, .osc_freq_target = 460 },
+ { .mbps = 1550, .hsfreqrange = 0x3d, .osc_freq_target = 285 },
+ { .mbps = 1600, .hsfreqrange = 0x0d, .osc_freq_target = 295 },
+ { .mbps = 1650, .hsfreqrange = 0x1d, .osc_freq_target = 304 },
+ { .mbps = 1700, .hsfreqrange = 0x2e, .osc_freq_target = 313 },
+ { .mbps = 1750, .hsfreqrange = 0x3e, .osc_freq_target = 322 },
+ { .mbps = 1800, .hsfreqrange = 0x0e, .osc_freq_target = 331 },
+ { .mbps = 1850, .hsfreqrange = 0x1e, .osc_freq_target = 341 },
+ { .mbps = 1900, .hsfreqrange = 0x2f, .osc_freq_target = 350 },
+ { .mbps = 1950, .hsfreqrange = 0x3f, .osc_freq_target = 359 },
+ { .mbps = 2000, .hsfreqrange = 0x0f, .osc_freq_target = 368 },
+ { .mbps = 2050, .hsfreqrange = 0x40, .osc_freq_target = 377 },
+ { .mbps = 2100, .hsfreqrange = 0x41, .osc_freq_target = 387 },
+ { .mbps = 2150, .hsfreqrange = 0x42, .osc_freq_target = 396 },
+ { .mbps = 2200, .hsfreqrange = 0x43, .osc_freq_target = 405 },
+ { .mbps = 2250, .hsfreqrange = 0x44, .osc_freq_target = 414 },
+ { .mbps = 2300, .hsfreqrange = 0x45, .osc_freq_target = 423 },
+ { .mbps = 2350, .hsfreqrange = 0x46, .osc_freq_target = 432 },
+ { .mbps = 2400, .hsfreqrange = 0x47, .osc_freq_target = 442 },
+ { .mbps = 2450, .hsfreqrange = 0x48, .osc_freq_target = 451 },
+ { .mbps = 2500, .hsfreqrange = 0x49, .osc_freq_target = 460 },
+ { /* sentinel */ }
+};
+
+static const struct v4l2_mbus_framefmt fmt_default = {
+ .width = 640,
+ .height = 480,
+ .code = MEDIA_BUS_FMT_RGB565_1X16,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_REC709,
+ .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
+ .quantization = V4L2_QUANTIZATION_DEFAULT,
+ .xfer_func = V4L2_XFER_FUNC_DEFAULT,
+};
+
+static const struct stm32_csi_fmts *stm32_csi_code_to_fmt(unsigned int code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(stm32_csi_formats); i++)
+ if (stm32_csi_formats[i].code == code)
+ return &stm32_csi_formats[i];
+
+ return NULL;
+}
+
+static inline struct stm32_csi_dev *to_csidev(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct stm32_csi_dev, sd);
+}
+
+static int stm32_csi_setup_lane_merger(struct stm32_csi_dev *csidev)
+{
+ u32 lmcfgr = 0;
+ int i;
+
+ for (i = 0; i < csidev->num_lanes; i++) {
+ if (!csidev->lanes[i] || csidev->lanes[i] > STM32_CSI_LANES_MAX) {
+ dev_err(csidev->dev, "Invalid lane id (%d)\n", csidev->lanes[i]);
+ return -EINVAL;
+ }
+ lmcfgr |= (csidev->lanes[i] << ((i * 4) + STM32_CSI_LMCFGR_DLMAP_SHIFT));
+ }
+
+ lmcfgr |= (csidev->num_lanes << STM32_CSI_LMCFGR_LANENB_SHIFT);
+
+ writel_relaxed(lmcfgr, csidev->base + STM32_CSI_LMCFGR);
+
+ return 0;
+}
+
+static void stm32_csi_phy_reg_write(struct stm32_csi_dev *csidev,
+ u32 addr, u32 val)
+{
+ /* Based on sequence described at section 5.2.3.2 of DesignWave document */
+ /* For writing the 4-bit testcode MSBs */
+ /* Set testen to high */
+ writel_relaxed(STM32_CSI_PTCR1_TWM, csidev->base + STM32_CSI_PTCR1);
+
+ /* Set testclk to high */
+ writel_relaxed(STM32_CSI_PTCR0_TCKEN, csidev->base + STM32_CSI_PTCR0);
+
+ /* Place 0x00 in testdin */
+ writel_relaxed(STM32_CSI_PTCR1_TWM, csidev->base + STM32_CSI_PTCR1);
+
+ /*
+ * Set testclk to low (with the falling edge on testclk, the testdin
+ * signal content is latched internally)
+ */
+ writel_relaxed(0, csidev->base + STM32_CSI_PTCR0);
+
+ /* Set testen to low */
+ writel_relaxed(0, csidev->base + STM32_CSI_PTCR1);
+
+ /* Place the 8-bit word corresponding to the testcode MSBs in testdin */
+ writel_relaxed(((addr >> 8) & STM32_CSI_PTCR1_TDI_MASK) << STM32_CSI_PTCR1_TDI_SHIFT,
+ csidev->base + STM32_CSI_PTCR1);
+
+ /* Set testclk to high */
+ writel_relaxed(STM32_CSI_PTCR0_TCKEN, csidev->base + STM32_CSI_PTCR0);
+
+ /* For writing the 8-bit testcode LSBs */
+ /* Set testclk to low */
+ writel_relaxed(0, csidev->base + STM32_CSI_PTCR0);
+
+ /* Set testen to high */
+ writel_relaxed(STM32_CSI_PTCR1_TWM, csidev->base + STM32_CSI_PTCR1);
+
+ /* Set testclk to high */
+ writel_relaxed(STM32_CSI_PTCR0_TCKEN, csidev->base + STM32_CSI_PTCR0);
+
+ /* Place the 8-bit word test data in testdin */
+ writel_relaxed((addr & STM32_CSI_PTCR1_TDI_MASK) <<
+ STM32_CSI_PTCR1_TDI_SHIFT | STM32_CSI_PTCR1_TWM,
+ csidev->base + STM32_CSI_PTCR1);
+
+ /*
+ * Set testclk to low (with the falling edge on testclk, the testdin
+ * signal content is latched internally)
+ */
+ writel_relaxed(0, csidev->base + STM32_CSI_PTCR0);
+
+ /* Set testen to low */
+ writel_relaxed(0, csidev->base + STM32_CSI_PTCR1);
+
+ /* For writing the data */
+ /* Place the 8-bit word corresponding to the page offset in testdin */
+ writel_relaxed((val & STM32_CSI_PTCR1_TDI_MASK) << STM32_CSI_PTCR1_TDI_SHIFT,
+ csidev->base + STM32_CSI_PTCR1);
+
+ /* Set testclk to high (test data is programmed internally */
+ writel_relaxed(STM32_CSI_PTCR0_TCKEN, csidev->base + STM32_CSI_PTCR0);
+
+ /* Finish by setting testclk to low */
+ writel_relaxed(0, csidev->base + STM32_CSI_PTCR0);
+}
+
+static int stm32_csi_start(struct stm32_csi_dev *csidev,
+ struct v4l2_subdev_state *state)
+{
+ const struct stm32_csi_mbps_phy_reg *phy_regs;
+ struct v4l2_mbus_framefmt *sink_fmt;
+ const struct stm32_csi_fmts *fmt;
+ unsigned long phy_clk_frate;
+ unsigned int mbps;
+ u32 lanes_ie = 0;
+ u32 lanes_en = 0;
+ s64 link_freq;
+ int ret;
+ u32 ccfr;
+
+ dev_dbg(csidev->dev, "Starting the CSI2\n");
+
+ /* Get the bpp value on pad0 (input of CSI) */
+ sink_fmt = v4l2_subdev_state_get_format(state, STM32_CSI_PAD_SINK);
+ fmt = stm32_csi_code_to_fmt(sink_fmt->code);
+
+ /* Get the remote sensor link frequency */
+ if (!csidev->s_subdev)
+ return -EIO;
+
+ link_freq = v4l2_get_link_freq(csidev->s_subdev->ctrl_handler,
+ fmt->bpp, 2 * csidev->num_lanes);
+ if (link_freq < 0)
+ return link_freq;
+
+ /* MBPS is expressed in Mbps, hence link_freq / 100000 * 2 */
+ mbps = div_s64(link_freq, 500000);
+ dev_dbg(csidev->dev, "Computed Mbps: %u\n", mbps);
+
+ for (phy_regs = snps_stm32mp25; phy_regs->mbps != 0; phy_regs++)
+ if (phy_regs->mbps >= mbps)
+ break;
+
+ if (!phy_regs->mbps) {
+ dev_err(csidev->dev, "Unsupported PHY speed (%u Mbps)", mbps);
+ return -ERANGE;
+ }
+
+ dev_dbg(csidev->dev, "PHY settings: (%u Mbps, %u HS FRange, %u OSC Freq)\n",
+ phy_regs->mbps, phy_regs->hsfreqrange,
+ phy_regs->osc_freq_target);
+
+ /* Prepare lanes related configuration bits */
+ lanes_ie |= STM32_CSI_SR1_DL0_ERRORS;
+ lanes_en |= STM32_CSI_PCR_DL0EN;
+ if (csidev->num_lanes == 2) {
+ lanes_ie |= STM32_CSI_SR1_DL1_ERRORS;
+ lanes_en |= STM32_CSI_PCR_DL1EN;
+ }
+
+ ret = pm_runtime_get_sync(csidev->dev);
+ if (ret < 0)
+ return ret;
+
+ /* Retrieve CSI2PHY clock rate to compute CCFR value */
+ phy_clk_frate = clk_get_rate(csidev->clks[STM32_CSI_CLK_CSI2PHY].clk);
+ if (!phy_clk_frate) {
+ pm_runtime_put(csidev->dev);
+ dev_err(csidev->dev, "CSI2PHY clock rate invalid (0)\n");
+ return ret;
+ }
+
+ ret = stm32_csi_setup_lane_merger(csidev);
+ if (ret) {
+ pm_runtime_put(csidev->dev);
+ return ret;
+ }
+
+ /* Enable the CSI */
+ writel_relaxed(STM32_CSI_CR_CSIEN, csidev->base + STM32_CSI_CR);
+
+ /* Enable some global CSI related interrupts - bits are same as SR0 */
+ writel_relaxed(STM32_CSI_SR0_ERRORS, csidev->base + STM32_CSI_IER0);
+
+ /* Enable lanes related error interrupts */
+ writel_relaxed(lanes_ie, csidev->base + STM32_CSI_IER1);
+
+ /* Initialization of the D-PHY */
+ /* Stop the D-PHY */
+ writel_relaxed(0, csidev->base + STM32_CSI_PRCR);
+
+ /* Keep the D-PHY in power down state */
+ writel_relaxed(0, csidev->base + STM32_CSI_PCR);
+
+ /* Enable testclr clock during 15ns */
+ writel_relaxed(STM32_CSI_PTCR0_TCKEN, csidev->base + STM32_CSI_PTCR0);
+ udelay(1);
+ writel_relaxed(0, csidev->base + STM32_CSI_PTCR0);
+
+ /* Set hsfreqrange */
+ phy_clk_frate /= 1000000;
+ ccfr = (phy_clk_frate - 17) * 4;
+ writel_relaxed((ccfr << STM32_CSI_PFCR_CCFR_SHIFT) |
+ (phy_regs->hsfreqrange << STM32_CSI_PFCR_HSFR_SHIFT),
+ csidev->base + STM32_CSI_PFCR);
+
+ /* set reg @08 deskew_polarity_rw 1'b1 */
+ stm32_csi_phy_reg_write(csidev, 0x08, 0x38);
+
+ /* set reg @0xE4 counter_for_des_en_config_if_rx 0x10 + DLL prog EN */
+ /* This is because 13<= cfgclkfreqrange[5:0]<=38 */
+ stm32_csi_phy_reg_write(csidev, 0xe4, 0x11);
+
+ /* set reg @0xe2 & reg @0xe3 value DLL target oscilation freq */
+ /* Based on the table page 77, osc_freq_target */
+ stm32_csi_phy_reg_write(csidev, 0xe2, phy_regs->osc_freq_target & 0xFF);
+ stm32_csi_phy_reg_write(csidev, 0xe3, (phy_regs->osc_freq_target >> 8) & 0x0F);
+
+ writel_relaxed(STM32_CSI_PFCR_DLD | readl_relaxed(csidev->base + STM32_CSI_PFCR),
+ csidev->base + STM32_CSI_PFCR);
+
+ /* Enable Lanes */
+ writel_relaxed(lanes_en | STM32_CSI_PCR_CLEN, csidev->base + STM32_CSI_PCR);
+ writel_relaxed(lanes_en | STM32_CSI_PCR_CLEN | STM32_CSI_PCR_PWRDOWN,
+ csidev->base + STM32_CSI_PCR);
+
+ writel_relaxed(STM32_CSI_PRCR_PEN, csidev->base + STM32_CSI_PRCR);
+
+ /* Remove the force */
+ writel_relaxed(0, csidev->base + STM32_CSI_PMCR);
+
+ return ret;
+}
+
+static void stm32_csi_stop(struct stm32_csi_dev *csidev)
+{
+ dev_dbg(csidev->dev, "Stopping the CSI2\n");
+
+ /* Disable the D-PHY */
+ writel_relaxed(0, csidev->base + STM32_CSI_PCR);
+
+ /* Disable ITs */
+ writel_relaxed(0, csidev->base + STM32_CSI_IER0);
+ writel_relaxed(0, csidev->base + STM32_CSI_IER1);
+
+ /* Disable the CSI */
+ writel_relaxed(0, csidev->base + STM32_CSI_CR);
+
+ pm_runtime_put(csidev->dev);
+}
+
+static int stm32_csi_start_vc(struct stm32_csi_dev *csidev,
+ struct v4l2_subdev_state *state, u32 vc)
+{
+ struct v4l2_mbus_framefmt *mbus_fmt;
+ const struct stm32_csi_fmts *fmt;
+ u32 cfgr1 = 0;
+ int ret = 0;
+ u32 status;
+
+ mbus_fmt = v4l2_subdev_state_get_format(state, STM32_CSI_PAD_SOURCE);
+ fmt = stm32_csi_code_to_fmt(mbus_fmt->code);
+
+ /* If the mbus code is JPEG, don't enable filtering */
+ if (mbus_fmt->code == MEDIA_BUS_FMT_JPEG_1X8) {
+ cfgr1 |= STM32_CSI_VCXCFGR1_ALLDT;
+ cfgr1 |= fmt->input_fmt << STM32_CSI_VCXCFGR1_CDTFT_SHIFT;
+ dev_dbg(csidev->dev, "VC%d: enable AllDT mode\n", vc);
+ } else {
+ cfgr1 |= fmt->datatype << STM32_CSI_VCXCFGR1_DT0_SHIFT;
+ cfgr1 |= fmt->input_fmt << STM32_CSI_VCXCFGR1_DT0FT_SHIFT;
+ cfgr1 |= STM32_CSI_VCXCFGR1_DT0EN;
+ dev_dbg(csidev->dev, "VC%d: enable DT0(0x%x)/DT0FT(0x%x)\n",
+ vc, fmt->datatype, fmt->input_fmt);
+ }
+ writel_relaxed(cfgr1, csidev->base + STM32_CSI_VCXCFGR1(vc));
+
+ /* Enable processing of the virtual-channel and wait for its status */
+ writel_relaxed(STM32_CSI_CR_VCXSTART(vc) | STM32_CSI_CR_CSIEN,
+ csidev->base + STM32_CSI_CR);
+
+ ret = readl_relaxed_poll_timeout(csidev->base + STM32_CSI_SR0,
+ status,
+ status & STM32_CSI_SR0_VCXSTATEF(vc),
+ 1000, 1000000);
+ if (ret) {
+ dev_err(csidev->dev, "failed to start VC(%d)\n", vc);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int stm32_csi_stop_vc(struct stm32_csi_dev *csidev, u32 vc)
+{
+ int ret = 0;
+ u32 status;
+
+ /* Stop the Virtual Channel */
+ writel_relaxed(STM32_CSI_CR_VCXSTOP(vc) | STM32_CSI_CR_CSIEN,
+ csidev->base + STM32_CSI_CR);
+
+ ret = readl_relaxed_poll_timeout(csidev->base + STM32_CSI_SR0,
+ status,
+ !(status & STM32_CSI_SR0_VCXSTATEF(vc)),
+ 1000, 1000000);
+ if (ret) {
+ dev_err(csidev->dev, "failed to stop VC(%d)\n", vc);
+ return ret;
+ }
+
+ /* Disable all DTs */
+ writel_relaxed(0, csidev->base + STM32_CSI_VCXCFGR1(vc));
+ writel_relaxed(0, csidev->base + STM32_CSI_VCXCFGR2(vc));
+
+ return 0;
+}
+
+static int stm32_csi_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct stm32_csi_dev *csidev = to_csidev(sd);
+ int ret;
+
+ ret = v4l2_subdev_disable_streams(csidev->s_subdev,
+ csidev->s_subdev_pad_nb, BIT_ULL(0));
+ if (ret)
+ return ret;
+
+ /* Stop the VC0 */
+ ret = stm32_csi_stop_vc(csidev, 0);
+ if (ret)
+ dev_err(csidev->dev, "Failed to stop VC0\n");
+
+ stm32_csi_stop(csidev);
+
+ return 0;
+}
+
+static int stm32_csi_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct stm32_csi_dev *csidev = to_csidev(sd);
+ int ret;
+
+ ret = stm32_csi_start(csidev, state);
+ if (ret)
+ return ret;
+
+ /* Configure & start the VC0 */
+ ret = stm32_csi_start_vc(csidev, state, 0);
+ if (ret) {
+ dev_err(csidev->dev, "Failed to start VC0\n");
+ stm32_csi_stop(csidev);
+ return ret;
+ }
+
+ ret = v4l2_subdev_enable_streams(csidev->s_subdev,
+ csidev->s_subdev_pad_nb, BIT_ULL(0));
+ if (ret) {
+ stm32_csi_stop_vc(csidev, 0);
+ stm32_csi_stop(csidev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int stm32_csi_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ int i;
+
+ for (i = 0; i < sd->entity.num_pads; i++)
+ *v4l2_subdev_state_get_format(state, i) = fmt_default;
+
+ return 0;
+}
+
+static int stm32_csi_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(stm32_csi_formats))
+ return -EINVAL;
+
+ code->code = stm32_csi_formats[code->index].code;
+ return 0;
+}
+
+static int stm32_csi_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct stm32_csi_dev *csidev = to_csidev(sd);
+ struct v4l2_mbus_framefmt *framefmt;
+ const struct stm32_csi_fmts *fmt;
+
+ fmt = stm32_csi_code_to_fmt(format->format.code);
+ if (!fmt) {
+ dev_dbg(csidev->dev, "Unsupported code %d, use default\n",
+ format->format.code);
+ format->format.code = fmt_default.code;
+ }
+
+ framefmt = v4l2_subdev_state_get_format(state, STM32_CSI_PAD_SINK);
+
+ if (format->pad == STM32_CSI_PAD_SOURCE)
+ format->format = *framefmt;
+ else
+ *framefmt = format->format;
+
+ framefmt = v4l2_subdev_state_get_format(state, STM32_CSI_PAD_SOURCE);
+ *framefmt = format->format;
+
+ return 0;
+}
+
+static int stm32_csi_log_status(struct v4l2_subdev *sd)
+{
+ struct stm32_csi_dev *csidev = to_csidev(sd);
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&csidev->slock, flags);
+
+ for (i = 0; i < STM32_CSI_NUM_SR0_EVENTS; i++) {
+ if (csidev->sr0_counters[i])
+ dev_info(csidev->dev, "%s events: %d\n",
+ stm32_csi_events_sr0[i].name,
+ csidev->sr0_counters[i]);
+ }
+
+ for (i = 0; i < STM32_CSI_NUM_SR1_EVENTS; i++) {
+ if (csidev->sr1_counters[i])
+ dev_info(csidev->dev, "%s events: %d\n",
+ stm32_csi_events_sr1[i].name,
+ csidev->sr1_counters[i]);
+ }
+
+ spin_unlock_irqrestore(&csidev->slock, flags);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops stm32_csi_core_ops = {
+ .log_status = stm32_csi_log_status,
+};
+
+static const struct v4l2_subdev_video_ops stm32_csi_video_ops = {
+ .s_stream = v4l2_subdev_s_stream_helper,
+};
+
+static const struct v4l2_subdev_pad_ops stm32_csi_pad_ops = {
+ .enum_mbus_code = stm32_csi_enum_mbus_code,
+ .set_fmt = stm32_csi_set_pad_format,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .enable_streams = stm32_csi_enable_streams,
+ .disable_streams = stm32_csi_disable_streams,
+};
+
+static const struct v4l2_subdev_ops stm32_csi_subdev_ops = {
+ .core = &stm32_csi_core_ops,
+ .pad = &stm32_csi_pad_ops,
+ .video = &stm32_csi_video_ops,
+};
+
+static const struct v4l2_subdev_internal_ops stm32_csi_subdev_internal_ops = {
+ .init_state = stm32_csi_init_state,
+};
+
+static int stm32_csi_async_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *s_subdev,
+ struct v4l2_async_connection *asd)
+{
+ struct v4l2_subdev *sd = notifier->sd;
+ struct stm32_csi_dev *csidev = to_csidev(sd);
+ int remote_pad;
+
+ remote_pad = media_entity_get_fwnode_pad(&s_subdev->entity,
+ s_subdev->fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (remote_pad < 0) {
+ dev_err(csidev->dev, "Couldn't find output pad for subdev %s\n",
+ s_subdev->name);
+ return remote_pad;
+ }
+
+ csidev->s_subdev = s_subdev;
+ csidev->s_subdev_pad_nb = remote_pad;
+
+ return media_create_pad_link(&csidev->s_subdev->entity,
+ remote_pad, &csidev->sd.entity,
+ STM32_CSI_PAD_SINK,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+}
+
+static const struct v4l2_async_notifier_operations stm32_csi_notifier_ops = {
+ .bound = stm32_csi_async_bound,
+};
+
+static irqreturn_t stm32_csi_irq_thread(int irq, void *arg)
+{
+ struct stm32_csi_dev *csidev = arg;
+ unsigned long flags;
+ u32 sr0, sr1;
+ int i;
+
+ sr0 = readl_relaxed(csidev->base + STM32_CSI_SR0);
+ sr1 = readl_relaxed(csidev->base + STM32_CSI_SR1);
+
+ /* Clear interrupt */
+ writel_relaxed(sr0 & STM32_CSI_SR0_ERRORS,
+ csidev->base + STM32_CSI_FCR0);
+ writel_relaxed(sr1 & STM32_CSI_SR1_ERRORS,
+ csidev->base + STM32_CSI_FCR1);
+
+ spin_lock_irqsave(&csidev->slock, flags);
+
+ for (i = 0; i < STM32_CSI_NUM_SR0_EVENTS; i++)
+ if (sr0 & stm32_csi_events_sr0[i].mask)
+ csidev->sr0_counters[i]++;
+
+ for (i = 0; i < STM32_CSI_NUM_SR1_EVENTS; i++)
+ if (sr1 & stm32_csi_events_sr1[i].mask)
+ csidev->sr1_counters[i]++;
+
+ spin_unlock_irqrestore(&csidev->slock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int stm32_csi_get_resources(struct stm32_csi_dev *csidev,
+ struct platform_device *pdev)
+{
+ int irq, ret, i;
+
+ csidev->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(csidev->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(csidev->base),
+ "Failed to ioremap resource\n");
+
+ for (i = 0; i < STM32_CSI_CLK_NB; i++)
+ csidev->clks[i].id = stm32_csi_clks_id[i];
+
+ ret = devm_clk_bulk_get(&pdev->dev, STM32_CSI_CLK_NB,
+ csidev->clks);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Couldn't get clks\n");
+
+ csidev->supplies[0].supply = "vdd";
+ csidev->supplies[1].supply = "vdda18";
+ ret = devm_regulator_bulk_get(&pdev->dev, ARRAY_SIZE(csidev->supplies),
+ csidev->supplies);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to request regulator vdd\n");
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ stm32_csi_irq_thread, IRQF_ONESHOT,
+ dev_name(&pdev->dev), csidev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Unable to request irq");
+
+ return 0;
+}
+
+static int stm32_csi_parse_dt(struct stm32_csi_dev *csidev)
+{
+ struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = V4L2_MBUS_CSI2_DPHY };
+ struct v4l2_async_connection *asd;
+ struct fwnode_handle *ep;
+ int ret;
+
+ /* Get bus characteristics from devicetree */
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csidev->dev), 0, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!ep) {
+ dev_err(csidev->dev, "Could not find the endpoint\n");
+ return -ENODEV;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(ep, &v4l2_ep);
+ fwnode_handle_put(ep);
+ if (ret) {
+ dev_err(csidev->dev, "Could not parse v4l2 endpoint\n");
+ return ret;
+ }
+
+ csidev->num_lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes;
+ if (csidev->num_lanes > STM32_CSI_LANES_MAX) {
+ dev_err(csidev->dev, "Unsupported number of data-lanes: %d\n",
+ csidev->num_lanes);
+ return -EINVAL;
+ }
+
+ memcpy(csidev->lanes, v4l2_ep.bus.mipi_csi2.data_lanes,
+ sizeof(csidev->lanes));
+
+ ep = fwnode_graph_get_next_endpoint(dev_fwnode(csidev->dev), NULL);
+ if (!ep) {
+ dev_err(csidev->dev, "Failed to get next endpoint\n");
+ return -EINVAL;
+ }
+
+ v4l2_async_subdev_nf_init(&csidev->notifier, &csidev->sd);
+
+ asd = v4l2_async_nf_add_fwnode_remote(&csidev->notifier, ep,
+ struct v4l2_async_connection);
+
+ fwnode_handle_put(ep);
+
+ if (IS_ERR(asd)) {
+ dev_err(csidev->dev, "Failed to add fwnode remote subdev\n");
+ return PTR_ERR(asd);
+ }
+
+ csidev->notifier.ops = &stm32_csi_notifier_ops;
+
+ ret = v4l2_async_nf_register(&csidev->notifier);
+ if (ret) {
+ dev_err(csidev->dev, "Failed to register notifier\n");
+ v4l2_async_nf_cleanup(&csidev->notifier);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int stm32_csi_probe(struct platform_device *pdev)
+{
+ struct stm32_csi_dev *csidev;
+ struct reset_control *rstc;
+ int ret;
+
+ csidev = devm_kzalloc(&pdev->dev, sizeof(*csidev), GFP_KERNEL);
+ if (!csidev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, csidev);
+ csidev->dev = &pdev->dev;
+
+ spin_lock_init(&csidev->slock);
+
+ ret = stm32_csi_get_resources(csidev, pdev);
+ if (ret)
+ goto err_free_priv;
+
+ ret = stm32_csi_parse_dt(csidev);
+ if (ret)
+ goto err_free_priv;
+
+ csidev->sd.owner = THIS_MODULE;
+ csidev->sd.dev = &pdev->dev;
+ csidev->sd.internal_ops = &stm32_csi_subdev_internal_ops;
+ v4l2_subdev_init(&csidev->sd, &stm32_csi_subdev_ops);
+ v4l2_set_subdevdata(&csidev->sd, &pdev->dev);
+ snprintf(csidev->sd.name, sizeof(csidev->sd.name), "%s",
+ dev_name(&pdev->dev));
+
+ /* Create our media pads */
+ csidev->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ csidev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ csidev->pads[STM32_CSI_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ csidev->pads[STM32_CSI_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&csidev->sd.entity, STM32_CSI_PAD_MAX,
+ csidev->pads);
+ if (ret)
+ goto err_cleanup;
+
+ ret = v4l2_subdev_init_finalize(&csidev->sd);
+ if (ret < 0)
+ goto err_cleanup;
+
+ ret = v4l2_async_register_subdev(&csidev->sd);
+ if (ret < 0)
+ goto err_cleanup;
+
+ /* Reset device */
+ rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(rstc)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(rstc),
+ "Couldn't get reset control\n");
+ goto err_cleanup;
+ }
+
+ ret = reset_control_assert(rstc);
+ if (ret) {
+ ret = dev_err_probe(&pdev->dev, ret,
+ "Failed to assert the reset line\n");
+ goto err_cleanup;
+ }
+
+ usleep_range(3000, 5000);
+
+ ret = reset_control_deassert(rstc);
+ if (ret) {
+ ret = dev_err_probe(&pdev->dev, ret,
+ "Failed to deassert the reset line\n");
+ goto err_cleanup;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ dev_info(&pdev->dev,
+ "Probed CSI with %u lanes\n", csidev->num_lanes);
+
+ return 0;
+
+err_cleanup:
+ v4l2_async_nf_cleanup(&csidev->notifier);
+err_free_priv:
+ return ret;
+}
+
+static void stm32_csi_remove(struct platform_device *pdev)
+{
+ struct stm32_csi_dev *csidev = platform_get_drvdata(pdev);
+
+ v4l2_async_unregister_subdev(&csidev->sd);
+
+ pm_runtime_disable(&pdev->dev);
+}
+
+static int stm32_csi_runtime_suspend(struct device *dev)
+{
+ struct stm32_csi_dev *csidev = dev_get_drvdata(dev);
+ int ret;
+
+ clk_bulk_disable_unprepare(STM32_CSI_CLK_NB, csidev->clks);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(csidev->supplies),
+ csidev->supplies);
+ if (ret < 0)
+ dev_err(dev, "cannot disable regulators %d\n", ret);
+
+ return 0;
+}
+
+static int stm32_csi_runtime_resume(struct device *dev)
+{
+ struct stm32_csi_dev *csidev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(csidev->supplies),
+ csidev->supplies);
+ if (ret)
+ goto error_out;
+
+ ret = clk_bulk_prepare_enable(STM32_CSI_CLK_NB, csidev->clks);
+ if (ret)
+ goto error_disable_supplies;
+
+ return 0;
+
+error_disable_supplies:
+ ret = regulator_bulk_disable(ARRAY_SIZE(csidev->supplies), csidev->supplies);
+ if (ret < 0)
+ dev_err(dev, "cannot disable regulators %d\n", ret);
+error_out:
+ dev_err(csidev->dev, "Failed to resume: %d\n", ret);
+
+ return ret;
+}
+
+static const struct of_device_id stm32_csi_of_table[] = {
+ { .compatible = "st,stm32mp25-csi", },
+ { /* end node */ },
+};
+MODULE_DEVICE_TABLE(of, stm32_csi_of_table);
+
+static const struct dev_pm_ops stm32_csi_pm_ops = {
+ RUNTIME_PM_OPS(stm32_csi_runtime_suspend,
+ stm32_csi_runtime_resume, NULL)
+};
+
+static struct platform_driver stm32_csi_driver = {
+ .driver = {
+ .name = "stm32-csi",
+ .of_match_table = stm32_csi_of_table,
+ .pm = pm_ptr(&stm32_csi_pm_ops),
+ },
+ .probe = stm32_csi_probe,
+ .remove = stm32_csi_remove,
+};
+
+module_platform_driver(stm32_csi_driver);
+
+MODULE_AUTHOR("Alain Volmat <alain.volmat@foss.st.com>");
+MODULE_DESCRIPTION("STM32 CSI controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/Makefile b/drivers/media/platform/st/stm32/stm32-dcmipp/Makefile
index 8920d9388a21..159105fb40b8 100644
--- a/drivers/media/platform/st/stm32/stm32-dcmipp/Makefile
+++ b/drivers/media/platform/st/stm32/stm32-dcmipp/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-stm32-dcmipp-y := dcmipp-core.o dcmipp-common.o dcmipp-parallel.o dcmipp-byteproc.o dcmipp-bytecap.o
+stm32-dcmipp-y := dcmipp-core.o dcmipp-common.o dcmipp-input.o dcmipp-byteproc.o dcmipp-bytecap.o
obj-$(CONFIG_VIDEO_STM32_DCMIPP) += stm32-dcmipp.o
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
index 7edd49bfe7e5..1c1b6b48918e 100644
--- a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
+++ b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
@@ -56,15 +56,32 @@ struct dcmipp_bytecap_pix_map {
static const struct dcmipp_bytecap_pix_map dcmipp_bytecap_pix_map_list[] = {
PIXMAP_MBUS_PFMT(RGB565_2X8_LE, RGB565),
+ PIXMAP_MBUS_PFMT(RGB565_1X16, RGB565),
PIXMAP_MBUS_PFMT(YUYV8_2X8, YUYV),
+ PIXMAP_MBUS_PFMT(YUYV8_1X16, YUYV),
PIXMAP_MBUS_PFMT(YVYU8_2X8, YVYU),
+ PIXMAP_MBUS_PFMT(YVYU8_1X16, YVYU),
PIXMAP_MBUS_PFMT(UYVY8_2X8, UYVY),
+ PIXMAP_MBUS_PFMT(UYVY8_1X16, UYVY),
PIXMAP_MBUS_PFMT(VYUY8_2X8, VYUY),
+ PIXMAP_MBUS_PFMT(VYUY8_1X16, VYUY),
PIXMAP_MBUS_PFMT(Y8_1X8, GREY),
PIXMAP_MBUS_PFMT(SBGGR8_1X8, SBGGR8),
PIXMAP_MBUS_PFMT(SGBRG8_1X8, SGBRG8),
PIXMAP_MBUS_PFMT(SGRBG8_1X8, SGRBG8),
PIXMAP_MBUS_PFMT(SRGGB8_1X8, SRGGB8),
+ PIXMAP_MBUS_PFMT(SBGGR10_1X10, SBGGR10),
+ PIXMAP_MBUS_PFMT(SGBRG10_1X10, SGBRG10),
+ PIXMAP_MBUS_PFMT(SGRBG10_1X10, SGRBG10),
+ PIXMAP_MBUS_PFMT(SRGGB10_1X10, SRGGB10),
+ PIXMAP_MBUS_PFMT(SBGGR12_1X12, SBGGR12),
+ PIXMAP_MBUS_PFMT(SGBRG12_1X12, SGBRG12),
+ PIXMAP_MBUS_PFMT(SGRBG12_1X12, SGRBG12),
+ PIXMAP_MBUS_PFMT(SRGGB12_1X12, SRGGB12),
+ PIXMAP_MBUS_PFMT(SBGGR14_1X14, SBGGR14),
+ PIXMAP_MBUS_PFMT(SGBRG14_1X14, SGBRG14),
+ PIXMAP_MBUS_PFMT(SGRBG14_1X14, SGRBG14),
+ PIXMAP_MBUS_PFMT(SRGGB14_1X14, SRGGB14),
PIXMAP_MBUS_PFMT(JPEG_1X8, JPEG),
};
@@ -112,6 +129,7 @@ struct dcmipp_bytecap_device {
u32 sequence;
struct media_pipeline pipe;
struct v4l2_subdev *s_subdev;
+ u32 s_subdev_pad_nb;
enum dcmipp_state state;
@@ -250,34 +268,34 @@ static int dcmipp_bytecap_enum_fmt_vid_cap(struct file *file, void *priv,
{
const struct dcmipp_bytecap_pix_map *vpix;
unsigned int index = f->index;
- unsigned int i;
+ unsigned int i, prev_pixelformat = 0;
- if (f->mbus_code) {
- /*
- * If a media bus code is specified, only enumerate formats
- * compatible with it.
- */
- for (i = 0; i < ARRAY_SIZE(dcmipp_bytecap_pix_map_list); i++) {
- vpix = &dcmipp_bytecap_pix_map_list[i];
- if (vpix->code != f->mbus_code)
- continue;
+ /*
+ * List up all formats (or only ones matching f->mbus_code), taking
+ * care of removing duplicated entries (due to support of both
+ * parallel & csi 16 bits formats
+ */
+ for (i = 0; i < ARRAY_SIZE(dcmipp_bytecap_pix_map_list); i++) {
+ vpix = &dcmipp_bytecap_pix_map_list[i];
+ /* Skip formats not matching requested mbus code */
+ if (f->mbus_code && vpix->code != f->mbus_code)
+ continue;
- if (index == 0)
- break;
+ /* Skip duplicated pixelformat */
+ if (vpix->pixelformat == prev_pixelformat)
+ continue;
- index--;
- }
+ prev_pixelformat = vpix->pixelformat;
- if (i == ARRAY_SIZE(dcmipp_bytecap_pix_map_list))
- return -EINVAL;
- } else {
- /* Otherwise, enumerate all formats. */
- if (f->index >= ARRAY_SIZE(dcmipp_bytecap_pix_map_list))
- return -EINVAL;
+ if (index == 0)
+ break;
- vpix = &dcmipp_bytecap_pix_map_list[f->index];
+ index--;
}
+ if (i == ARRAY_SIZE(dcmipp_bytecap_pix_map_list))
+ return -EINVAL;
+
f->pixelformat = vpix->pixelformat;
return 0;
@@ -337,33 +355,6 @@ static const struct v4l2_ioctl_ops dcmipp_bytecap_ioctl_ops = {
.vidioc_streamoff = vb2_ioctl_streamoff,
};
-static int dcmipp_pipeline_s_stream(struct dcmipp_bytecap_device *vcap,
- int state)
-{
- struct media_pad *pad;
- int ret;
-
- /*
- * Get source subdev - since link is IMMUTABLE, pointer is cached
- * within the dcmipp_bytecap_device structure
- */
- if (!vcap->s_subdev) {
- pad = media_pad_remote_pad_first(&vcap->vdev.entity.pads[0]);
- if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
- return -EINVAL;
- vcap->s_subdev = media_entity_to_v4l2_subdev(pad->entity);
- }
-
- ret = v4l2_subdev_call(vcap->s_subdev, video, s_stream, state);
- if (ret < 0) {
- dev_err(vcap->dev, "failed to %s streaming (%d)\n",
- state ? "start" : "stop", ret);
- return ret;
- }
-
- return 0;
-}
-
static void dcmipp_start_capture(struct dcmipp_bytecap_device *vcap,
struct dcmipp_buf *buf)
{
@@ -395,11 +386,24 @@ static int dcmipp_bytecap_start_streaming(struct vb2_queue *vq,
struct dcmipp_bytecap_device *vcap = vb2_get_drv_priv(vq);
struct media_entity *entity = &vcap->vdev.entity;
struct dcmipp_buf *buf;
+ struct media_pad *pad;
int ret;
vcap->sequence = 0;
memset(&vcap->count, 0, sizeof(vcap->count));
+ /*
+ * Get source subdev - since link is IMMUTABLE, pointer is cached
+ * within the dcmipp_bytecap_device structure
+ */
+ if (!vcap->s_subdev) {
+ pad = media_pad_remote_pad_first(&vcap->vdev.entity.pads[0]);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ return -EINVAL;
+ vcap->s_subdev = media_entity_to_v4l2_subdev(pad->entity);
+ vcap->s_subdev_pad_nb = pad->index;
+ }
+
ret = pm_runtime_resume_and_get(vcap->dev);
if (ret < 0) {
dev_err(vcap->dev, "%s: Failed to start streaming, cannot get sync (%d)\n",
@@ -414,7 +418,8 @@ static int dcmipp_bytecap_start_streaming(struct vb2_queue *vq,
goto err_pm_put;
}
- ret = dcmipp_pipeline_s_stream(vcap, 1);
+ ret = v4l2_subdev_enable_streams(vcap->s_subdev,
+ vcap->s_subdev_pad_nb, BIT_ULL(0));
if (ret)
goto err_media_pipeline_stop;
@@ -482,7 +487,10 @@ static void dcmipp_bytecap_stop_streaming(struct vb2_queue *vq)
int ret;
u32 status;
- dcmipp_pipeline_s_stream(vcap, 0);
+ ret = v4l2_subdev_disable_streams(vcap->s_subdev,
+ vcap->s_subdev_pad_nb, BIT_ULL(0));
+ if (ret)
+ dev_warn(vcap->dev, "Failed to disable stream\n");
/* Stop the media pipeline */
media_pipeline_stop(vcap->vdev.entity.pads);
@@ -810,8 +818,7 @@ static int dcmipp_bytecap_link_validate(struct media_link *link)
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
.pad = link->source->index,
};
- const struct dcmipp_bytecap_pix_map *vpix;
- int ret;
+ int ret, i;
ret = v4l2_subdev_call(source_sd, pad, get_fmt, NULL, &source_fmt);
if (ret < 0)
@@ -825,10 +832,17 @@ static int dcmipp_bytecap_link_validate(struct media_link *link)
return -EINVAL;
}
- vpix = dcmipp_bytecap_pix_map_by_pixelformat(vcap->format.pixelformat);
- if (source_fmt.format.code != vpix->code) {
- dev_err(vcap->dev, "Wrong mbus_code 0x%x, (0x%x expected)\n",
- vpix->code, source_fmt.format.code);
+ for (i = 0; i < ARRAY_SIZE(dcmipp_bytecap_pix_map_list); i++) {
+ if (dcmipp_bytecap_pix_map_list[i].pixelformat ==
+ vcap->format.pixelformat &&
+ dcmipp_bytecap_pix_map_list[i].code ==
+ source_fmt.format.code)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(dcmipp_bytecap_pix_map_list)) {
+ dev_err(vcap->dev, "mbus code 0x%x do not match capture device format (0x%x)\n",
+ vcap->format.pixelformat, source_fmt.format.code);
return -EINVAL;
}
@@ -887,7 +901,7 @@ struct dcmipp_ent_device *dcmipp_bytecap_ent_init(struct device *dev,
q->dev = dev;
/* DCMIPP requires 16 bytes aligned buffers */
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32) & ~0x0f);
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(dev, "Failed to set DMA mask\n");
goto err_mutex_destroy;
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-byteproc.c b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-byteproc.c
index 5a361ad6b023..3c742a546441 100644
--- a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-byteproc.c
+++ b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-byteproc.c
@@ -48,15 +48,32 @@ struct dcmipp_byteproc_pix_map {
}
static const struct dcmipp_byteproc_pix_map dcmipp_byteproc_pix_map_list[] = {
PIXMAP_MBUS_BPP(RGB565_2X8_LE, 2),
+ PIXMAP_MBUS_BPP(RGB565_1X16, 2),
PIXMAP_MBUS_BPP(YUYV8_2X8, 2),
+ PIXMAP_MBUS_BPP(YUYV8_1X16, 2),
PIXMAP_MBUS_BPP(YVYU8_2X8, 2),
+ PIXMAP_MBUS_BPP(YVYU8_1X16, 2),
PIXMAP_MBUS_BPP(UYVY8_2X8, 2),
+ PIXMAP_MBUS_BPP(UYVY8_1X16, 2),
PIXMAP_MBUS_BPP(VYUY8_2X8, 2),
+ PIXMAP_MBUS_BPP(VYUY8_1X16, 2),
PIXMAP_MBUS_BPP(Y8_1X8, 1),
PIXMAP_MBUS_BPP(SBGGR8_1X8, 1),
PIXMAP_MBUS_BPP(SGBRG8_1X8, 1),
PIXMAP_MBUS_BPP(SGRBG8_1X8, 1),
PIXMAP_MBUS_BPP(SRGGB8_1X8, 1),
+ PIXMAP_MBUS_BPP(SBGGR10_1X10, 2),
+ PIXMAP_MBUS_BPP(SGBRG10_1X10, 2),
+ PIXMAP_MBUS_BPP(SGRBG10_1X10, 2),
+ PIXMAP_MBUS_BPP(SRGGB10_1X10, 2),
+ PIXMAP_MBUS_BPP(SBGGR12_1X12, 2),
+ PIXMAP_MBUS_BPP(SGBRG12_1X12, 2),
+ PIXMAP_MBUS_BPP(SGRBG12_1X12, 2),
+ PIXMAP_MBUS_BPP(SRGGB12_1X12, 2),
+ PIXMAP_MBUS_BPP(SBGGR14_1X14, 2),
+ PIXMAP_MBUS_BPP(SGBRG14_1X14, 2),
+ PIXMAP_MBUS_BPP(SGRBG14_1X14, 2),
+ PIXMAP_MBUS_BPP(SRGGB14_1X14, 2),
PIXMAP_MBUS_BPP(JPEG_1X8, 1),
};
@@ -78,7 +95,6 @@ struct dcmipp_byteproc_device {
struct v4l2_subdev sd;
struct device *dev;
void __iomem *regs;
- bool streaming;
};
static const struct v4l2_mbus_framefmt fmt_default = {
@@ -239,11 +255,10 @@ static int dcmipp_byteproc_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
- struct dcmipp_byteproc_device *byteproc = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *mf;
struct v4l2_rect *crop, *compose;
- if (byteproc->streaming)
+ if (v4l2_subdev_is_streaming(sd))
return -EBUSY;
mf = v4l2_subdev_state_get_format(sd_state, fmt->pad);
@@ -382,30 +397,19 @@ static int dcmipp_byteproc_set_selection(struct v4l2_subdev *sd,
return 0;
}
-static const struct v4l2_subdev_pad_ops dcmipp_byteproc_pad_ops = {
- .enum_mbus_code = dcmipp_byteproc_enum_mbus_code,
- .enum_frame_size = dcmipp_byteproc_enum_frame_size,
- .get_fmt = v4l2_subdev_get_fmt,
- .set_fmt = dcmipp_byteproc_set_fmt,
- .get_selection = dcmipp_byteproc_get_selection,
- .set_selection = dcmipp_byteproc_set_selection,
-};
-
static int dcmipp_byteproc_configure_scale_crop
- (struct dcmipp_byteproc_device *byteproc)
+ (struct dcmipp_byteproc_device *byteproc,
+ struct v4l2_subdev_state *state)
{
const struct dcmipp_byteproc_pix_map *vpix;
- struct v4l2_subdev_state *state;
struct v4l2_mbus_framefmt *sink_fmt;
u32 hprediv, vprediv;
struct v4l2_rect *compose, *crop;
u32 val = 0;
- state = v4l2_subdev_lock_and_get_active_state(&byteproc->sd);
sink_fmt = v4l2_subdev_state_get_format(state, 0);
compose = v4l2_subdev_state_get_compose(state, 0);
crop = v4l2_subdev_state_get_crop(state, 1);
- v4l2_subdev_unlock_state(state);
/* find output format bpp */
vpix = dcmipp_byteproc_pix_map_by_code(sink_fmt->code);
@@ -460,48 +464,73 @@ static int dcmipp_byteproc_configure_scale_crop
return 0;
}
-static int dcmipp_byteproc_s_stream(struct v4l2_subdev *sd, int enable)
+static int dcmipp_byteproc_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
{
struct dcmipp_byteproc_device *byteproc = v4l2_get_subdevdata(sd);
struct v4l2_subdev *s_subdev;
- struct media_pad *pad;
- int ret = 0;
+ struct media_pad *s_pad;
+ int ret;
/* Get source subdev */
- pad = media_pad_remote_pad_first(&sd->entity.pads[0]);
- if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ s_pad = media_pad_remote_pad_first(&sd->entity.pads[0]);
+ if (!s_pad || !is_media_entity_v4l2_subdev(s_pad->entity))
return -EINVAL;
- s_subdev = media_entity_to_v4l2_subdev(pad->entity);
-
- if (enable) {
- ret = dcmipp_byteproc_configure_scale_crop(byteproc);
- if (ret)
- return ret;
-
- ret = v4l2_subdev_call(s_subdev, video, s_stream, enable);
- if (ret < 0) {
- dev_err(byteproc->dev,
- "failed to start source subdev streaming (%d)\n",
- ret);
- return ret;
- }
- } else {
- ret = v4l2_subdev_call(s_subdev, video, s_stream, enable);
- if (ret < 0) {
- dev_err(byteproc->dev,
- "failed to stop source subdev streaming (%d)\n",
- ret);
- return ret;
- }
+ s_subdev = media_entity_to_v4l2_subdev(s_pad->entity);
+
+ ret = dcmipp_byteproc_configure_scale_crop(byteproc, state);
+ if (ret)
+ return ret;
+
+ ret = v4l2_subdev_enable_streams(s_subdev, s_pad->index, BIT_ULL(0));
+ if (ret < 0) {
+ dev_err(byteproc->dev,
+ "failed to start source subdev streaming (%d)\n", ret);
+ return ret;
}
- byteproc->streaming = enable;
+ return 0;
+}
+
+static int dcmipp_byteproc_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct dcmipp_byteproc_device *byteproc = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev *s_subdev;
+ struct media_pad *s_pad;
+ int ret;
+
+ /* Get source subdev */
+ s_pad = media_pad_remote_pad_first(&sd->entity.pads[0]);
+ if (!s_pad || !is_media_entity_v4l2_subdev(s_pad->entity))
+ return -EINVAL;
+ s_subdev = media_entity_to_v4l2_subdev(s_pad->entity);
+
+ ret = v4l2_subdev_disable_streams(s_subdev, s_pad->index, BIT_ULL(0));
+ if (ret < 0) {
+ dev_err(byteproc->dev,
+ "failed to start source subdev streaming (%d)\n", ret);
+ return ret;
+ }
return 0;
}
+static const struct v4l2_subdev_pad_ops dcmipp_byteproc_pad_ops = {
+ .enum_mbus_code = dcmipp_byteproc_enum_mbus_code,
+ .enum_frame_size = dcmipp_byteproc_enum_frame_size,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = dcmipp_byteproc_set_fmt,
+ .get_selection = dcmipp_byteproc_get_selection,
+ .set_selection = dcmipp_byteproc_set_selection,
+ .enable_streams = dcmipp_byteproc_enable_streams,
+ .disable_streams = dcmipp_byteproc_disable_streams,
+};
+
static const struct v4l2_subdev_video_ops dcmipp_byteproc_video_ops = {
- .s_stream = dcmipp_byteproc_s_stream,
+ .s_stream = v4l2_subdev_s_stream_helper,
};
static const struct v4l2_subdev_ops dcmipp_byteproc_ops = {
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-common.h b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-common.h
index 7a7cf43baf24..fe5f97233f5e 100644
--- a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-common.h
+++ b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-common.h
@@ -199,11 +199,11 @@ static inline void __reg_clear(struct device *dev, void __iomem *base, u32 reg,
}
/* DCMIPP subdev init / release entry points */
-struct dcmipp_ent_device *dcmipp_par_ent_init(struct device *dev,
+struct dcmipp_ent_device *dcmipp_inp_ent_init(struct device *dev,
const char *entity_name,
struct v4l2_device *v4l2_dev,
void __iomem *regs);
-void dcmipp_par_ent_release(struct dcmipp_ent_device *ved);
+void dcmipp_inp_ent_release(struct dcmipp_ent_device *ved);
struct dcmipp_ent_device *
dcmipp_byteproc_ent_init(struct device *dev, const char *entity_name,
struct v4l2_device *v4l2_dev, void __iomem *regs);
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c
index 3806f7c6e2fe..71acf539e1f3 100644
--- a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c
+++ b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c
@@ -40,6 +40,7 @@ struct dcmipp_device {
/* Hardware resources */
void __iomem *regs;
+ struct clk *mclk;
struct clk *kclk;
/* The pipeline configuration */
@@ -87,6 +88,7 @@ struct dcmipp_pipeline_config {
size_t num_ents;
const struct dcmipp_ent_link *links;
size_t num_links;
+ u32 hw_revision;
};
/* --------------------------------------------------------------------------
@@ -95,9 +97,9 @@ struct dcmipp_pipeline_config {
static const struct dcmipp_ent_config stm32mp13_ent_config[] = {
{
- .name = "dcmipp_parallel",
- .init = dcmipp_par_ent_init,
- .release = dcmipp_par_ent_release,
+ .name = "dcmipp_input",
+ .init = dcmipp_inp_ent_init,
+ .release = dcmipp_inp_ent_release,
},
{
.name = "dcmipp_dump_postproc",
@@ -111,22 +113,58 @@ static const struct dcmipp_ent_config stm32mp13_ent_config[] = {
},
};
-#define ID_PARALLEL 0
+#define ID_INPUT 0
#define ID_DUMP_BYTEPROC 1
#define ID_DUMP_CAPTURE 2
static const struct dcmipp_ent_link stm32mp13_ent_links[] = {
- DCMIPP_ENT_LINK(ID_PARALLEL, 1, ID_DUMP_BYTEPROC, 0,
+ DCMIPP_ENT_LINK(ID_INPUT, 1, ID_DUMP_BYTEPROC, 0,
MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
DCMIPP_ENT_LINK(ID_DUMP_BYTEPROC, 1, ID_DUMP_CAPTURE, 0,
MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
};
+#define DCMIPP_STM32MP13_VERR 0x10
static const struct dcmipp_pipeline_config stm32mp13_pipe_cfg = {
.ents = stm32mp13_ent_config,
.num_ents = ARRAY_SIZE(stm32mp13_ent_config),
.links = stm32mp13_ent_links,
- .num_links = ARRAY_SIZE(stm32mp13_ent_links)
+ .num_links = ARRAY_SIZE(stm32mp13_ent_links),
+ .hw_revision = DCMIPP_STM32MP13_VERR
+};
+
+static const struct dcmipp_ent_config stm32mp25_ent_config[] = {
+ {
+ .name = "dcmipp_input",
+ .init = dcmipp_inp_ent_init,
+ .release = dcmipp_inp_ent_release,
+ },
+ {
+ .name = "dcmipp_dump_postproc",
+ .init = dcmipp_byteproc_ent_init,
+ .release = dcmipp_byteproc_ent_release,
+ },
+ {
+ .name = "dcmipp_dump_capture",
+ .init = dcmipp_bytecap_ent_init,
+ .release = dcmipp_bytecap_ent_release,
+ },
+};
+
+static const struct dcmipp_ent_link stm32mp25_ent_links[] = {
+ DCMIPP_ENT_LINK(ID_INPUT, 1, ID_DUMP_BYTEPROC, 0,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
+ DCMIPP_ENT_LINK(ID_DUMP_BYTEPROC, 1, ID_DUMP_CAPTURE, 0,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
+};
+
+#define DCMIPP_STM32MP25_VERR 0x30
+static const struct dcmipp_pipeline_config stm32mp25_pipe_cfg = {
+ .ents = stm32mp25_ent_config,
+ .num_ents = ARRAY_SIZE(stm32mp25_ent_config),
+ .links = stm32mp25_ent_links,
+ .num_links = ARRAY_SIZE(stm32mp25_ent_links),
+ .hw_revision = DCMIPP_STM32MP25_VERR
};
#define LINK_FLAG_TO_STR(f) ((f) == 0 ? "" :\
@@ -209,6 +247,7 @@ err_init_entity:
static const struct of_device_id dcmipp_of_match[] = {
{ .compatible = "st,stm32mp13-dcmipp", .data = &stm32mp13_pipe_cfg },
+ { .compatible = "st,stm32mp25-dcmipp", .data = &stm32mp25_pipe_cfg },
{ /* end node */ },
};
MODULE_DEVICE_TABLE(of, dcmipp_of_match);
@@ -258,13 +297,22 @@ static int dcmipp_graph_notify_bound(struct v4l2_async_notifier *notifier,
{
struct dcmipp_device *dcmipp = notifier_to_dcmipp(notifier);
unsigned int ret;
- int src_pad;
+ int src_pad, i;
struct dcmipp_ent_device *sink;
- struct v4l2_fwnode_endpoint vep = { .bus_type = V4L2_MBUS_PARALLEL };
+ struct v4l2_fwnode_endpoint vep = { 0 };
struct fwnode_handle *ep;
+ enum v4l2_mbus_type supported_types[] = {
+ V4L2_MBUS_PARALLEL, V4L2_MBUS_BT656, V4L2_MBUS_CSI2_DPHY
+ };
+ int supported_types_nb = ARRAY_SIZE(supported_types);
dev_dbg(dcmipp->dev, "Subdev \"%s\" bound\n", subdev->name);
+ /* Only MP25 supports CSI input */
+ if (!of_device_is_compatible(dcmipp->dev->of_node,
+ "st,stm32mp25-dcmipp"))
+ supported_types_nb--;
+
/*
* Link this sub-device to DCMIPP, it could be
* a parallel camera sensor or a CSI-2 to parallel bridge
@@ -281,21 +329,23 @@ static int dcmipp_graph_notify_bound(struct v4l2_async_notifier *notifier,
return -ENODEV;
}
- /* Check for parallel bus-type first, then bt656 */
- ret = v4l2_fwnode_endpoint_parse(ep, &vep);
- if (ret) {
- vep.bus_type = V4L2_MBUS_BT656;
+ /* Check for supported MBUS type */
+ for (i = 0; i < supported_types_nb; i++) {
+ vep.bus_type = supported_types[i];
ret = v4l2_fwnode_endpoint_parse(ep, &vep);
- if (ret) {
- dev_err(dcmipp->dev, "Could not parse the endpoint\n");
- fwnode_handle_put(ep);
- return ret;
- }
+ if (!ret)
+ break;
}
fwnode_handle_put(ep);
- if (vep.bus.parallel.bus_width == 0) {
+ if (ret) {
+ dev_err(dcmipp->dev, "Could not parse the endpoint\n");
+ return ret;
+ }
+
+ if (vep.bus_type != V4L2_MBUS_CSI2_DPHY &&
+ vep.bus.parallel.bus_width == 0) {
dev_err(dcmipp->dev, "Invalid parallel interface bus-width\n");
return -ENODEV;
}
@@ -308,11 +358,13 @@ static int dcmipp_graph_notify_bound(struct v4l2_async_notifier *notifier,
return -ENODEV;
}
- /* Parallel input device detected, connect it to parallel subdev */
- sink = dcmipp->entity[ID_PARALLEL];
- sink->bus.flags = vep.bus.parallel.flags;
- sink->bus.bus_width = vep.bus.parallel.bus_width;
- sink->bus.data_shift = vep.bus.parallel.data_shift;
+ /* Connect input device to the dcmipp_input subdev */
+ sink = dcmipp->entity[ID_INPUT];
+ if (vep.bus_type != V4L2_MBUS_CSI2_DPHY) {
+ sink->bus.flags = vep.bus.parallel.flags;
+ sink->bus.bus_width = vep.bus.parallel.bus_width;
+ sink->bus.data_shift = vep.bus.parallel.data_shift;
+ }
sink->bus_type = vep.bus_type;
ret = media_create_pad_link(&subdev->entity, src_pad, sink->ent, 0,
MEDIA_LNK_FL_IMMUTABLE |
@@ -411,7 +463,7 @@ static int dcmipp_graph_init(struct dcmipp_device *dcmipp)
static int dcmipp_probe(struct platform_device *pdev)
{
struct dcmipp_device *dcmipp;
- struct clk *kclk;
+ struct clk *kclk, *mclk;
const struct dcmipp_pipeline_config *pipe_cfg;
struct reset_control *rstc;
int irq;
@@ -471,12 +523,20 @@ static int dcmipp_probe(struct platform_device *pdev)
return ret;
}
- kclk = devm_clk_get(&pdev->dev, NULL);
+ kclk = devm_clk_get(&pdev->dev, "kclk");
if (IS_ERR(kclk))
return dev_err_probe(&pdev->dev, PTR_ERR(kclk),
"Unable to get kclk\n");
dcmipp->kclk = kclk;
+ if (!of_device_is_compatible(pdev->dev.of_node, "st,stm32mp13-dcmipp")) {
+ mclk = devm_clk_get(&pdev->dev, "mclk");
+ if (IS_ERR(mclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(mclk),
+ "Unable to get mclk\n");
+ dcmipp->mclk = mclk;
+ }
+
dcmipp->entity = devm_kcalloc(&pdev->dev, dcmipp->pipe_cfg->num_ents,
sizeof(*dcmipp->entity), GFP_KERNEL);
if (!dcmipp->entity)
@@ -496,6 +556,7 @@ static int dcmipp_probe(struct platform_device *pdev)
/* Initialize media device */
strscpy(dcmipp->mdev.model, DCMIPP_MDEV_MODEL_NAME,
sizeof(dcmipp->mdev.model));
+ dcmipp->mdev.hw_revision = pipe_cfg->hw_revision;
dcmipp->mdev.dev = &pdev->dev;
media_device_init(&dcmipp->mdev);
@@ -538,6 +599,7 @@ static int dcmipp_runtime_suspend(struct device *dev)
struct dcmipp_device *dcmipp = dev_get_drvdata(dev);
clk_disable_unprepare(dcmipp->kclk);
+ clk_disable_unprepare(dcmipp->mclk);
return 0;
}
@@ -547,9 +609,17 @@ static int dcmipp_runtime_resume(struct device *dev)
struct dcmipp_device *dcmipp = dev_get_drvdata(dev);
int ret;
+ ret = clk_prepare_enable(dcmipp->mclk);
+ if (ret) {
+ dev_err(dev, "%s: Failed to prepare_enable mclk\n", __func__);
+ return ret;
+ }
+
ret = clk_prepare_enable(dcmipp->kclk);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(dcmipp->mclk);
dev_err(dev, "%s: Failed to prepare_enable kclk\n", __func__);
+ }
return ret;
}
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-input.c b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-input.c
new file mode 100644
index 000000000000..7e5311b67d7e
--- /dev/null
+++ b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-input.c
@@ -0,0 +1,540 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for STM32 Digital Camera Memory Interface Pixel Processor
+ *
+ * Copyright (C) STMicroelectronics SA 2023
+ * Authors: Hugues Fruchet <hugues.fruchet@foss.st.com>
+ * Alain Volmat <alain.volmat@foss.st.com>
+ * for STMicroelectronics.
+ */
+
+#include <linux/v4l2-mediabus.h>
+#include <media/mipi-csi2.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-subdev.h>
+
+#include "dcmipp-common.h"
+
+#define DCMIPP_PRCR 0x104
+#define DCMIPP_PRCR_FORMAT_SHIFT 16
+#define DCMIPP_PRCR_FORMAT_YUV422 0x1e
+#define DCMIPP_PRCR_FORMAT_RGB565 0x22
+#define DCMIPP_PRCR_FORMAT_RAW8 0x2a
+#define DCMIPP_PRCR_FORMAT_RAW10 0x2b
+#define DCMIPP_PRCR_FORMAT_RAW12 0x2c
+#define DCMIPP_PRCR_FORMAT_RAW14 0x2d
+#define DCMIPP_PRCR_FORMAT_G8 0x4a
+#define DCMIPP_PRCR_FORMAT_BYTE_STREAM 0x5a
+#define DCMIPP_PRCR_ESS BIT(4)
+#define DCMIPP_PRCR_PCKPOL BIT(5)
+#define DCMIPP_PRCR_HSPOL BIT(6)
+#define DCMIPP_PRCR_VSPOL BIT(7)
+#define DCMIPP_PRCR_ENABLE BIT(14)
+#define DCMIPP_PRCR_SWAPCYCLES BIT(25)
+
+#define DCMIPP_PRESCR 0x108
+#define DCMIPP_PRESUR 0x10c
+
+#define DCMIPP_CMCR 0x204
+#define DCMIPP_CMCR_INSEL BIT(0)
+
+#define DCMIPP_P0FSCR 0x404
+#define DCMIPP_P0FSCR_DTMODE_MASK GENMASK(17, 16)
+#define DCMIPP_P0FSCR_DTMODE_SHIFT 16
+#define DCMIPP_P0FSCR_DTMODE_DTIDA 0x00
+#define DCMIPP_P0FSCR_DTMODE_ALLDT 0x03
+#define DCMIPP_P0FSCR_DTIDA_MASK GENMASK(5, 0)
+#define DCMIPP_P0FSCR_DTIDA_SHIFT 0
+
+#define IS_SINK(pad) (!(pad))
+#define IS_SRC(pad) ((pad))
+
+struct dcmipp_inp_pix_map {
+ unsigned int code_sink;
+ unsigned int code_src;
+ /* Parallel related information */
+ u8 prcr_format;
+ u8 prcr_swapcycles;
+ /* CSI related information */
+ unsigned int dt;
+};
+
+#define PIXMAP_SINK_SRC_PRCR_SWAP(sink, src, prcr, swap, data_type) \
+ { \
+ .code_sink = MEDIA_BUS_FMT_##sink, \
+ .code_src = MEDIA_BUS_FMT_##src, \
+ .prcr_format = DCMIPP_PRCR_FORMAT_##prcr, \
+ .prcr_swapcycles = swap, \
+ .dt = data_type, \
+ }
+static const struct dcmipp_inp_pix_map dcmipp_inp_pix_map_list[] = {
+ /* RGB565 */
+ PIXMAP_SINK_SRC_PRCR_SWAP(RGB565_2X8_LE, RGB565_2X8_LE, RGB565, 1, MIPI_CSI2_DT_RGB565),
+ PIXMAP_SINK_SRC_PRCR_SWAP(RGB565_2X8_BE, RGB565_2X8_LE, RGB565, 0, MIPI_CSI2_DT_RGB565),
+ PIXMAP_SINK_SRC_PRCR_SWAP(RGB565_1X16, RGB565_1X16, RGB565, 0, MIPI_CSI2_DT_RGB565),
+ /* YUV422 */
+ PIXMAP_SINK_SRC_PRCR_SWAP(YUYV8_2X8, YUYV8_2X8, YUV422, 1, MIPI_CSI2_DT_YUV422_8B),
+ PIXMAP_SINK_SRC_PRCR_SWAP(YUYV8_1X16, YUYV8_1X16, YUV422, 0, MIPI_CSI2_DT_YUV422_8B),
+ PIXMAP_SINK_SRC_PRCR_SWAP(YUYV8_2X8, UYVY8_2X8, YUV422, 0, MIPI_CSI2_DT_YUV422_8B),
+ PIXMAP_SINK_SRC_PRCR_SWAP(UYVY8_2X8, UYVY8_2X8, YUV422, 1, MIPI_CSI2_DT_YUV422_8B),
+ PIXMAP_SINK_SRC_PRCR_SWAP(UYVY8_1X16, UYVY8_1X16, YUV422, 0, MIPI_CSI2_DT_YUV422_8B),
+ PIXMAP_SINK_SRC_PRCR_SWAP(UYVY8_2X8, YUYV8_2X8, YUV422, 0, MIPI_CSI2_DT_YUV422_8B),
+ PIXMAP_SINK_SRC_PRCR_SWAP(YVYU8_2X8, YVYU8_2X8, YUV422, 1, MIPI_CSI2_DT_YUV422_8B),
+ PIXMAP_SINK_SRC_PRCR_SWAP(YVYU8_1X16, YVYU8_1X16, YUV422, 0, MIPI_CSI2_DT_YUV422_8B),
+ PIXMAP_SINK_SRC_PRCR_SWAP(VYUY8_2X8, VYUY8_2X8, YUV422, 1, MIPI_CSI2_DT_YUV422_8B),
+ PIXMAP_SINK_SRC_PRCR_SWAP(VYUY8_1X16, VYUY8_1X16, YUV422, 0, MIPI_CSI2_DT_YUV422_8B),
+ /* GREY */
+ PIXMAP_SINK_SRC_PRCR_SWAP(Y8_1X8, Y8_1X8, G8, 0, MIPI_CSI2_DT_RAW8),
+ /* Raw Bayer */
+ PIXMAP_SINK_SRC_PRCR_SWAP(SBGGR8_1X8, SBGGR8_1X8, RAW8, 0, MIPI_CSI2_DT_RAW8),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SGBRG8_1X8, SGBRG8_1X8, RAW8, 0, MIPI_CSI2_DT_RAW8),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SGRBG8_1X8, SGRBG8_1X8, RAW8, 0, MIPI_CSI2_DT_RAW8),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SRGGB8_1X8, SRGGB8_1X8, RAW8, 0, MIPI_CSI2_DT_RAW8),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SBGGR10_1X10, SBGGR10_1X10, RAW10, 0, MIPI_CSI2_DT_RAW10),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SGBRG10_1X10, SGBRG10_1X10, RAW10, 0, MIPI_CSI2_DT_RAW10),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SGRBG10_1X10, SGRBG10_1X10, RAW10, 0, MIPI_CSI2_DT_RAW10),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SRGGB10_1X10, SRGGB10_1X10, RAW10, 0, MIPI_CSI2_DT_RAW10),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SBGGR12_1X12, SBGGR12_1X12, RAW12, 0, MIPI_CSI2_DT_RAW12),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SGBRG12_1X12, SGBRG12_1X12, RAW12, 0, MIPI_CSI2_DT_RAW12),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SGRBG12_1X12, SGRBG12_1X12, RAW12, 0, MIPI_CSI2_DT_RAW12),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SRGGB12_1X12, SRGGB12_1X12, RAW12, 0, MIPI_CSI2_DT_RAW12),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SBGGR14_1X14, SBGGR14_1X14, RAW14, 0, MIPI_CSI2_DT_RAW14),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SGBRG14_1X14, SGBRG14_1X14, RAW14, 0, MIPI_CSI2_DT_RAW14),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SGRBG14_1X14, SGRBG14_1X14, RAW14, 0, MIPI_CSI2_DT_RAW14),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SRGGB14_1X14, SRGGB14_1X14, RAW14, 0, MIPI_CSI2_DT_RAW14),
+ /* JPEG */
+ PIXMAP_SINK_SRC_PRCR_SWAP(JPEG_1X8, JPEG_1X8, BYTE_STREAM, 0, 0),
+};
+
+/*
+ * Search through the pix_map table, skipping two consecutive entry with the
+ * same code
+ */
+static inline const struct dcmipp_inp_pix_map *dcmipp_inp_pix_map_by_index
+ (unsigned int index,
+ unsigned int pad)
+{
+ unsigned int i = 0;
+ u32 prev_code = 0, cur_code;
+
+ while (i < ARRAY_SIZE(dcmipp_inp_pix_map_list)) {
+ if (IS_SRC(pad))
+ cur_code = dcmipp_inp_pix_map_list[i].code_src;
+ else
+ cur_code = dcmipp_inp_pix_map_list[i].code_sink;
+
+ if (cur_code == prev_code) {
+ i++;
+ continue;
+ }
+ prev_code = cur_code;
+
+ if (index == 0)
+ break;
+ i++;
+ index--;
+ }
+
+ if (i >= ARRAY_SIZE(dcmipp_inp_pix_map_list))
+ return NULL;
+
+ return &dcmipp_inp_pix_map_list[i];
+}
+
+static inline const struct dcmipp_inp_pix_map *dcmipp_inp_pix_map_by_code
+ (u32 code_sink, u32 code_src)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dcmipp_inp_pix_map_list); i++) {
+ if ((dcmipp_inp_pix_map_list[i].code_sink == code_sink &&
+ dcmipp_inp_pix_map_list[i].code_src == code_src) ||
+ (dcmipp_inp_pix_map_list[i].code_sink == code_src &&
+ dcmipp_inp_pix_map_list[i].code_src == code_sink) ||
+ (dcmipp_inp_pix_map_list[i].code_sink == code_sink &&
+ code_src == 0) ||
+ (code_sink == 0 &&
+ dcmipp_inp_pix_map_list[i].code_src == code_src))
+ return &dcmipp_inp_pix_map_list[i];
+ }
+ return NULL;
+}
+
+struct dcmipp_inp_device {
+ struct dcmipp_ent_device ved;
+ struct v4l2_subdev sd;
+ struct device *dev;
+ void __iomem *regs;
+};
+
+static const struct v4l2_mbus_framefmt fmt_default = {
+ .width = DCMIPP_FMT_WIDTH_DEFAULT,
+ .height = DCMIPP_FMT_HEIGHT_DEFAULT,
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = DCMIPP_COLORSPACE_DEFAULT,
+ .ycbcr_enc = DCMIPP_YCBCR_ENC_DEFAULT,
+ .quantization = DCMIPP_QUANTIZATION_DEFAULT,
+ .xfer_func = DCMIPP_XFER_FUNC_DEFAULT,
+};
+
+static int dcmipp_inp_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ unsigned int i;
+
+ for (i = 0; i < sd->entity.num_pads; i++) {
+ struct v4l2_mbus_framefmt *mf;
+
+ mf = v4l2_subdev_state_get_format(sd_state, i);
+ *mf = fmt_default;
+ }
+
+ return 0;
+}
+
+static int dcmipp_inp_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ const struct dcmipp_inp_pix_map *vpix =
+ dcmipp_inp_pix_map_by_index(code->index, code->pad);
+
+ if (!vpix)
+ return -EINVAL;
+
+ code->code = IS_SRC(code->pad) ? vpix->code_src : vpix->code_sink;
+
+ return 0;
+}
+
+static int dcmipp_inp_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ const struct dcmipp_inp_pix_map *vpix;
+
+ if (fse->index)
+ return -EINVAL;
+
+ /* Only accept code in the pix map table */
+ vpix = dcmipp_inp_pix_map_by_code(IS_SINK(fse->pad) ? fse->code : 0,
+ IS_SRC(fse->pad) ? fse->code : 0);
+ if (!vpix)
+ return -EINVAL;
+
+ fse->min_width = DCMIPP_FRAME_MIN_WIDTH;
+ fse->max_width = DCMIPP_FRAME_MAX_WIDTH;
+ fse->min_height = DCMIPP_FRAME_MIN_HEIGHT;
+ fse->max_height = DCMIPP_FRAME_MAX_HEIGHT;
+
+ return 0;
+}
+
+static void dcmipp_inp_adjust_fmt(struct dcmipp_inp_device *inp,
+ struct v4l2_mbus_framefmt *fmt, __u32 pad)
+{
+ const struct dcmipp_inp_pix_map *vpix;
+
+ /* Only accept code in the pix map table */
+ vpix = dcmipp_inp_pix_map_by_code(IS_SINK(pad) ? fmt->code : 0,
+ IS_SRC(pad) ? fmt->code : 0);
+ if (!vpix)
+ fmt->code = fmt_default.code;
+
+ /* Exclude JPEG if BT656 bus is selected */
+ if (vpix && vpix->code_sink == MEDIA_BUS_FMT_JPEG_1X8 &&
+ inp->ved.bus_type == V4L2_MBUS_BT656)
+ fmt->code = fmt_default.code;
+
+ fmt->width = clamp_t(u32, fmt->width, DCMIPP_FRAME_MIN_WIDTH,
+ DCMIPP_FRAME_MAX_WIDTH) & ~1;
+ fmt->height = clamp_t(u32, fmt->height, DCMIPP_FRAME_MIN_HEIGHT,
+ DCMIPP_FRAME_MAX_HEIGHT) & ~1;
+
+ if (fmt->field == V4L2_FIELD_ANY || fmt->field == V4L2_FIELD_ALTERNATE)
+ fmt->field = fmt_default.field;
+
+ dcmipp_colorimetry_clamp(fmt);
+}
+
+static int dcmipp_inp_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct dcmipp_inp_device *inp = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf;
+
+ if (v4l2_subdev_is_streaming(sd))
+ return -EBUSY;
+
+ mf = v4l2_subdev_state_get_format(sd_state, fmt->pad);
+
+ /* Set the new format */
+ dcmipp_inp_adjust_fmt(inp, &fmt->format, fmt->pad);
+
+ dev_dbg(inp->dev, "%s: format update: old:%dx%d (0x%x, %d, %d, %d, %d) new:%dx%d (0x%x, %d, %d, %d, %d)\n",
+ inp->sd.name,
+ /* old */
+ mf->width, mf->height, mf->code,
+ mf->colorspace, mf->quantization,
+ mf->xfer_func, mf->ycbcr_enc,
+ /* new */
+ fmt->format.width, fmt->format.height, fmt->format.code,
+ fmt->format.colorspace, fmt->format.quantization,
+ fmt->format.xfer_func, fmt->format.ycbcr_enc);
+
+ *mf = fmt->format;
+
+ /* When setting the sink format, report that format on the src pad */
+ if (IS_SINK(fmt->pad)) {
+ mf = v4l2_subdev_state_get_format(sd_state, 1);
+ *mf = fmt->format;
+ dcmipp_inp_adjust_fmt(inp, mf, 1);
+ }
+
+ return 0;
+}
+
+static int dcmipp_inp_configure_parallel(struct dcmipp_inp_device *inp,
+ struct v4l2_subdev_state *state)
+{
+ u32 val = 0;
+ const struct dcmipp_inp_pix_map *vpix;
+ struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_mbus_framefmt *src_fmt;
+
+ /* Set vertical synchronization polarity */
+ if (inp->ved.bus.flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
+ val |= DCMIPP_PRCR_VSPOL;
+
+ /* Set horizontal synchronization polarity */
+ if (inp->ved.bus.flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+ val |= DCMIPP_PRCR_HSPOL;
+
+ /* Set pixel clock polarity */
+ if (inp->ved.bus.flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
+ val |= DCMIPP_PRCR_PCKPOL;
+
+ /*
+ * BT656 embedded synchronisation bus mode.
+ *
+ * Default SAV/EAV mode is supported here with default codes
+ * SAV=0xff000080 & EAV=0xff00009d.
+ * With DCMIPP this means LSC=SAV=0x80 & LEC=EAV=0x9d.
+ */
+ if (inp->ved.bus_type == V4L2_MBUS_BT656) {
+ val |= DCMIPP_PRCR_ESS;
+
+ /* Unmask all codes */
+ reg_write(inp, DCMIPP_PRESUR, 0xffffffff);/* FEC:LEC:LSC:FSC */
+
+ /* Trig on LSC=0x80 & LEC=0x9d codes, ignore FSC and FEC */
+ reg_write(inp, DCMIPP_PRESCR, 0xff9d80ff);/* FEC:LEC:LSC:FSC */
+ }
+
+ /* Set format */
+ sink_fmt = v4l2_subdev_state_get_format(state, 0);
+ src_fmt = v4l2_subdev_state_get_format(state, 1);
+
+ vpix = dcmipp_inp_pix_map_by_code(sink_fmt->code, src_fmt->code);
+ if (!vpix) {
+ dev_err(inp->dev, "Invalid sink/src format configuration\n");
+ return -EINVAL;
+ }
+
+ val |= vpix->prcr_format << DCMIPP_PRCR_FORMAT_SHIFT;
+
+ /* swap cycles */
+ if (vpix->prcr_swapcycles)
+ val |= DCMIPP_PRCR_SWAPCYCLES;
+
+ reg_write(inp, DCMIPP_PRCR, val);
+
+ /* Select the DCMIPP parallel interface */
+ reg_write(inp, DCMIPP_CMCR, 0);
+
+ /* Enable parallel interface */
+ reg_set(inp, DCMIPP_PRCR, DCMIPP_PRCR_ENABLE);
+
+ return 0;
+}
+
+static int dcmipp_inp_configure_csi(struct dcmipp_inp_device *inp,
+ struct v4l2_subdev_state *state)
+{
+ const struct dcmipp_inp_pix_map *vpix;
+ struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_mbus_framefmt *src_fmt;
+
+ /* Get format information */
+ sink_fmt = v4l2_subdev_state_get_format(state, 0);
+ src_fmt = v4l2_subdev_state_get_format(state, 1);
+
+ vpix = dcmipp_inp_pix_map_by_code(sink_fmt->code, src_fmt->code);
+ if (!vpix) {
+ dev_err(inp->dev, "Invalid sink/src format configuration\n");
+ return -EINVAL;
+ }
+
+ /* Apply configuration on each input pipe */
+ reg_clear(inp, DCMIPP_P0FSCR,
+ DCMIPP_P0FSCR_DTMODE_MASK | DCMIPP_P0FSCR_DTIDA_MASK);
+
+ /* In case of JPEG we don't know the DT so we allow all data */
+ /*
+ * TODO - check instead dt == 0 for the time being to allow other
+ * unknown data-type
+ */
+ if (!vpix->dt)
+ reg_set(inp, DCMIPP_P0FSCR,
+ DCMIPP_P0FSCR_DTMODE_ALLDT << DCMIPP_P0FSCR_DTMODE_SHIFT);
+ else
+ reg_set(inp, DCMIPP_P0FSCR,
+ vpix->dt << DCMIPP_P0FSCR_DTIDA_SHIFT |
+ DCMIPP_P0FSCR_DTMODE_DTIDA);
+
+ /* Select the DCMIPP CSI interface */
+ reg_write(inp, DCMIPP_CMCR, DCMIPP_CMCR_INSEL);
+
+ return 0;
+}
+
+static int dcmipp_inp_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct dcmipp_inp_device *inp =
+ container_of(sd, struct dcmipp_inp_device, sd);
+ struct v4l2_subdev *s_subdev;
+ struct media_pad *s_pad;
+ int ret = 0;
+
+ /* Get source subdev */
+ s_pad = media_pad_remote_pad_first(&sd->entity.pads[0]);
+ if (!s_pad || !is_media_entity_v4l2_subdev(s_pad->entity))
+ return -EINVAL;
+ s_subdev = media_entity_to_v4l2_subdev(s_pad->entity);
+
+ if (inp->ved.bus_type == V4L2_MBUS_PARALLEL ||
+ inp->ved.bus_type == V4L2_MBUS_BT656)
+ ret = dcmipp_inp_configure_parallel(inp, state);
+ else if (inp->ved.bus_type == V4L2_MBUS_CSI2_DPHY)
+ ret = dcmipp_inp_configure_csi(inp, state);
+ if (ret)
+ return ret;
+
+ ret = v4l2_subdev_enable_streams(s_subdev, s_pad->index, BIT_ULL(0));
+ if (ret < 0) {
+ dev_err(inp->dev,
+ "failed to start source subdev streaming (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dcmipp_inp_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct dcmipp_inp_device *inp =
+ container_of(sd, struct dcmipp_inp_device, sd);
+ struct v4l2_subdev *s_subdev;
+ struct media_pad *s_pad;
+ int ret;
+
+ /* Get source subdev */
+ s_pad = media_pad_remote_pad_first(&sd->entity.pads[0]);
+ if (!s_pad || !is_media_entity_v4l2_subdev(s_pad->entity))
+ return -EINVAL;
+ s_subdev = media_entity_to_v4l2_subdev(s_pad->entity);
+
+ ret = v4l2_subdev_disable_streams(s_subdev, s_pad->index, BIT_ULL(0));
+ if (ret < 0) {
+ dev_err(inp->dev,
+ "failed to stop source subdev streaming (%d)\n", ret);
+ return ret;
+ }
+
+ if (inp->ved.bus_type == V4L2_MBUS_PARALLEL ||
+ inp->ved.bus_type == V4L2_MBUS_BT656) {
+ /* Disable parallel interface */
+ reg_clear(inp, DCMIPP_PRCR, DCMIPP_PRCR_ENABLE);
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops dcmipp_inp_pad_ops = {
+ .enum_mbus_code = dcmipp_inp_enum_mbus_code,
+ .enum_frame_size = dcmipp_inp_enum_frame_size,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = dcmipp_inp_set_fmt,
+ .enable_streams = dcmipp_inp_enable_streams,
+ .disable_streams = dcmipp_inp_disable_streams,
+};
+
+static const struct v4l2_subdev_video_ops dcmipp_inp_video_ops = {
+ .s_stream = v4l2_subdev_s_stream_helper,
+};
+
+static const struct v4l2_subdev_ops dcmipp_inp_ops = {
+ .pad = &dcmipp_inp_pad_ops,
+ .video = &dcmipp_inp_video_ops,
+};
+
+static void dcmipp_inp_release(struct v4l2_subdev *sd)
+{
+ struct dcmipp_inp_device *inp =
+ container_of(sd, struct dcmipp_inp_device, sd);
+
+ kfree(inp);
+}
+
+static const struct v4l2_subdev_internal_ops dcmipp_inp_int_ops = {
+ .init_state = dcmipp_inp_init_state,
+ .release = dcmipp_inp_release,
+};
+
+void dcmipp_inp_ent_release(struct dcmipp_ent_device *ved)
+{
+ struct dcmipp_inp_device *inp =
+ container_of(ved, struct dcmipp_inp_device, ved);
+
+ dcmipp_ent_sd_unregister(ved, &inp->sd);
+}
+
+struct dcmipp_ent_device *dcmipp_inp_ent_init(struct device *dev,
+ const char *entity_name,
+ struct v4l2_device *v4l2_dev,
+ void __iomem *regs)
+{
+ struct dcmipp_inp_device *inp;
+ const unsigned long pads_flag[] = {
+ MEDIA_PAD_FL_SINK, MEDIA_PAD_FL_SOURCE,
+ };
+ int ret;
+
+ /* Allocate the inp struct */
+ inp = kzalloc(sizeof(*inp), GFP_KERNEL);
+ if (!inp)
+ return ERR_PTR(-ENOMEM);
+
+ inp->regs = regs;
+
+ /* Initialize ved and sd */
+ ret = dcmipp_ent_sd_register(&inp->ved, &inp->sd, v4l2_dev,
+ entity_name, MEDIA_ENT_F_VID_IF_BRIDGE,
+ ARRAY_SIZE(pads_flag), pads_flag,
+ &dcmipp_inp_int_ops, &dcmipp_inp_ops,
+ NULL, NULL);
+ if (ret) {
+ kfree(inp);
+ return ERR_PTR(ret);
+ }
+
+ inp->dev = dev;
+
+ return &inp->ved;
+}
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-parallel.c b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-parallel.c
deleted file mode 100644
index 62c5c3331cfe..000000000000
--- a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-parallel.c
+++ /dev/null
@@ -1,440 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Driver for STM32 Digital Camera Memory Interface Pixel Processor
- *
- * Copyright (C) STMicroelectronics SA 2023
- * Authors: Hugues Fruchet <hugues.fruchet@foss.st.com>
- * Alain Volmat <alain.volmat@foss.st.com>
- * for STMicroelectronics.
- */
-
-#include <linux/v4l2-mediabus.h>
-#include <media/v4l2-event.h>
-#include <media/v4l2-subdev.h>
-
-#include "dcmipp-common.h"
-
-#define DCMIPP_PRCR 0x104
-#define DCMIPP_PRCR_FORMAT_SHIFT 16
-#define DCMIPP_PRCR_FORMAT_YUV422 0x1e
-#define DCMIPP_PRCR_FORMAT_RGB565 0x22
-#define DCMIPP_PRCR_FORMAT_RAW8 0x2a
-#define DCMIPP_PRCR_FORMAT_G8 0x4a
-#define DCMIPP_PRCR_FORMAT_BYTE_STREAM 0x5a
-#define DCMIPP_PRCR_ESS BIT(4)
-#define DCMIPP_PRCR_PCKPOL BIT(5)
-#define DCMIPP_PRCR_HSPOL BIT(6)
-#define DCMIPP_PRCR_VSPOL BIT(7)
-#define DCMIPP_PRCR_ENABLE BIT(14)
-#define DCMIPP_PRCR_SWAPCYCLES BIT(25)
-
-#define DCMIPP_PRESCR 0x108
-#define DCMIPP_PRESUR 0x10c
-
-#define IS_SINK(pad) (!(pad))
-#define IS_SRC(pad) ((pad))
-
-struct dcmipp_par_pix_map {
- unsigned int code_sink;
- unsigned int code_src;
- u8 prcr_format;
- u8 prcr_swapcycles;
-};
-
-#define PIXMAP_SINK_SRC_PRCR_SWAP(sink, src, prcr, swap) \
- { \
- .code_sink = MEDIA_BUS_FMT_##sink, \
- .code_src = MEDIA_BUS_FMT_##src, \
- .prcr_format = DCMIPP_PRCR_FORMAT_##prcr, \
- .prcr_swapcycles = swap, \
- }
-static const struct dcmipp_par_pix_map dcmipp_par_pix_map_list[] = {
- /* RGB565 */
- PIXMAP_SINK_SRC_PRCR_SWAP(RGB565_2X8_LE, RGB565_2X8_LE, RGB565, 1),
- PIXMAP_SINK_SRC_PRCR_SWAP(RGB565_2X8_BE, RGB565_2X8_LE, RGB565, 0),
- /* YUV422 */
- PIXMAP_SINK_SRC_PRCR_SWAP(YUYV8_2X8, YUYV8_2X8, YUV422, 1),
- PIXMAP_SINK_SRC_PRCR_SWAP(YUYV8_2X8, UYVY8_2X8, YUV422, 0),
- PIXMAP_SINK_SRC_PRCR_SWAP(UYVY8_2X8, UYVY8_2X8, YUV422, 1),
- PIXMAP_SINK_SRC_PRCR_SWAP(UYVY8_2X8, YUYV8_2X8, YUV422, 0),
- PIXMAP_SINK_SRC_PRCR_SWAP(YVYU8_2X8, YVYU8_2X8, YUV422, 1),
- PIXMAP_SINK_SRC_PRCR_SWAP(VYUY8_2X8, VYUY8_2X8, YUV422, 1),
- /* GREY */
- PIXMAP_SINK_SRC_PRCR_SWAP(Y8_1X8, Y8_1X8, G8, 0),
- /* Raw Bayer */
- PIXMAP_SINK_SRC_PRCR_SWAP(SBGGR8_1X8, SBGGR8_1X8, RAW8, 0),
- PIXMAP_SINK_SRC_PRCR_SWAP(SGBRG8_1X8, SGBRG8_1X8, RAW8, 0),
- PIXMAP_SINK_SRC_PRCR_SWAP(SGRBG8_1X8, SGRBG8_1X8, RAW8, 0),
- PIXMAP_SINK_SRC_PRCR_SWAP(SRGGB8_1X8, SRGGB8_1X8, RAW8, 0),
- /* JPEG */
- PIXMAP_SINK_SRC_PRCR_SWAP(JPEG_1X8, JPEG_1X8, BYTE_STREAM, 0),
-};
-
-/*
- * Search through the pix_map table, skipping two consecutive entry with the
- * same code
- */
-static inline const struct dcmipp_par_pix_map *dcmipp_par_pix_map_by_index
- (unsigned int index,
- unsigned int pad)
-{
- unsigned int i = 0;
- u32 prev_code = 0, cur_code;
-
- while (i < ARRAY_SIZE(dcmipp_par_pix_map_list)) {
- if (IS_SRC(pad))
- cur_code = dcmipp_par_pix_map_list[i].code_src;
- else
- cur_code = dcmipp_par_pix_map_list[i].code_sink;
-
- if (cur_code == prev_code) {
- i++;
- continue;
- }
- prev_code = cur_code;
-
- if (index == 0)
- break;
- i++;
- index--;
- }
-
- if (i >= ARRAY_SIZE(dcmipp_par_pix_map_list))
- return NULL;
-
- return &dcmipp_par_pix_map_list[i];
-}
-
-static inline const struct dcmipp_par_pix_map *dcmipp_par_pix_map_by_code
- (u32 code_sink, u32 code_src)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(dcmipp_par_pix_map_list); i++) {
- if ((dcmipp_par_pix_map_list[i].code_sink == code_sink &&
- dcmipp_par_pix_map_list[i].code_src == code_src) ||
- (dcmipp_par_pix_map_list[i].code_sink == code_src &&
- dcmipp_par_pix_map_list[i].code_src == code_sink) ||
- (dcmipp_par_pix_map_list[i].code_sink == code_sink &&
- code_src == 0) ||
- (code_sink == 0 &&
- dcmipp_par_pix_map_list[i].code_src == code_src))
- return &dcmipp_par_pix_map_list[i];
- }
- return NULL;
-}
-
-struct dcmipp_par_device {
- struct dcmipp_ent_device ved;
- struct v4l2_subdev sd;
- struct device *dev;
- void __iomem *regs;
- bool streaming;
-};
-
-static const struct v4l2_mbus_framefmt fmt_default = {
- .width = DCMIPP_FMT_WIDTH_DEFAULT,
- .height = DCMIPP_FMT_HEIGHT_DEFAULT,
- .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
- .field = V4L2_FIELD_NONE,
- .colorspace = DCMIPP_COLORSPACE_DEFAULT,
- .ycbcr_enc = DCMIPP_YCBCR_ENC_DEFAULT,
- .quantization = DCMIPP_QUANTIZATION_DEFAULT,
- .xfer_func = DCMIPP_XFER_FUNC_DEFAULT,
-};
-
-static int dcmipp_par_init_state(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
-{
- unsigned int i;
-
- for (i = 0; i < sd->entity.num_pads; i++) {
- struct v4l2_mbus_framefmt *mf;
-
- mf = v4l2_subdev_state_get_format(sd_state, i);
- *mf = fmt_default;
- }
-
- return 0;
-}
-
-static int dcmipp_par_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- const struct dcmipp_par_pix_map *vpix =
- dcmipp_par_pix_map_by_index(code->index, code->pad);
-
- if (!vpix)
- return -EINVAL;
-
- code->code = IS_SRC(code->pad) ? vpix->code_src : vpix->code_sink;
-
- return 0;
-}
-
-static int dcmipp_par_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_frame_size_enum *fse)
-{
- const struct dcmipp_par_pix_map *vpix;
-
- if (fse->index)
- return -EINVAL;
-
- /* Only accept code in the pix map table */
- vpix = dcmipp_par_pix_map_by_code(IS_SINK(fse->pad) ? fse->code : 0,
- IS_SRC(fse->pad) ? fse->code : 0);
- if (!vpix)
- return -EINVAL;
-
- fse->min_width = DCMIPP_FRAME_MIN_WIDTH;
- fse->max_width = DCMIPP_FRAME_MAX_WIDTH;
- fse->min_height = DCMIPP_FRAME_MIN_HEIGHT;
- fse->max_height = DCMIPP_FRAME_MAX_HEIGHT;
-
- return 0;
-}
-
-static void dcmipp_par_adjust_fmt(struct dcmipp_par_device *par,
- struct v4l2_mbus_framefmt *fmt, __u32 pad)
-{
- const struct dcmipp_par_pix_map *vpix;
-
- /* Only accept code in the pix map table */
- vpix = dcmipp_par_pix_map_by_code(IS_SINK(pad) ? fmt->code : 0,
- IS_SRC(pad) ? fmt->code : 0);
- if (!vpix)
- fmt->code = fmt_default.code;
-
- /* Exclude JPEG if BT656 bus is selected */
- if (vpix && vpix->code_sink == MEDIA_BUS_FMT_JPEG_1X8 &&
- par->ved.bus_type == V4L2_MBUS_BT656)
- fmt->code = fmt_default.code;
-
- fmt->width = clamp_t(u32, fmt->width, DCMIPP_FRAME_MIN_WIDTH,
- DCMIPP_FRAME_MAX_WIDTH) & ~1;
- fmt->height = clamp_t(u32, fmt->height, DCMIPP_FRAME_MIN_HEIGHT,
- DCMIPP_FRAME_MAX_HEIGHT) & ~1;
-
- if (fmt->field == V4L2_FIELD_ANY || fmt->field == V4L2_FIELD_ALTERNATE)
- fmt->field = fmt_default.field;
-
- dcmipp_colorimetry_clamp(fmt);
-}
-
-static int dcmipp_par_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *fmt)
-{
- struct dcmipp_par_device *par = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *mf;
-
- if (par->streaming)
- return -EBUSY;
-
- mf = v4l2_subdev_state_get_format(sd_state, fmt->pad);
-
- /* Set the new format */
- dcmipp_par_adjust_fmt(par, &fmt->format, fmt->pad);
-
- dev_dbg(par->dev, "%s: format update: old:%dx%d (0x%x, %d, %d, %d, %d) new:%dx%d (0x%x, %d, %d, %d, %d)\n",
- par->sd.name,
- /* old */
- mf->width, mf->height, mf->code,
- mf->colorspace, mf->quantization,
- mf->xfer_func, mf->ycbcr_enc,
- /* new */
- fmt->format.width, fmt->format.height, fmt->format.code,
- fmt->format.colorspace, fmt->format.quantization,
- fmt->format.xfer_func, fmt->format.ycbcr_enc);
-
- *mf = fmt->format;
-
- /* When setting the sink format, report that format on the src pad */
- if (IS_SINK(fmt->pad)) {
- mf = v4l2_subdev_state_get_format(sd_state, 1);
- *mf = fmt->format;
- dcmipp_par_adjust_fmt(par, mf, 1);
- }
-
- return 0;
-}
-
-static const struct v4l2_subdev_pad_ops dcmipp_par_pad_ops = {
- .enum_mbus_code = dcmipp_par_enum_mbus_code,
- .enum_frame_size = dcmipp_par_enum_frame_size,
- .get_fmt = v4l2_subdev_get_fmt,
- .set_fmt = dcmipp_par_set_fmt,
-};
-
-static int dcmipp_par_configure(struct dcmipp_par_device *par)
-{
- u32 val = 0;
- const struct dcmipp_par_pix_map *vpix;
- struct v4l2_subdev_state *state;
- struct v4l2_mbus_framefmt *sink_fmt;
- struct v4l2_mbus_framefmt *src_fmt;
-
- /* Set vertical synchronization polarity */
- if (par->ved.bus.flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
- val |= DCMIPP_PRCR_VSPOL;
-
- /* Set horizontal synchronization polarity */
- if (par->ved.bus.flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
- val |= DCMIPP_PRCR_HSPOL;
-
- /* Set pixel clock polarity */
- if (par->ved.bus.flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
- val |= DCMIPP_PRCR_PCKPOL;
-
- /*
- * BT656 embedded synchronisation bus mode.
- *
- * Default SAV/EAV mode is supported here with default codes
- * SAV=0xff000080 & EAV=0xff00009d.
- * With DCMIPP this means LSC=SAV=0x80 & LEC=EAV=0x9d.
- */
- if (par->ved.bus_type == V4L2_MBUS_BT656) {
- val |= DCMIPP_PRCR_ESS;
-
- /* Unmask all codes */
- reg_write(par, DCMIPP_PRESUR, 0xffffffff);/* FEC:LEC:LSC:FSC */
-
- /* Trig on LSC=0x80 & LEC=0x9d codes, ignore FSC and FEC */
- reg_write(par, DCMIPP_PRESCR, 0xff9d80ff);/* FEC:LEC:LSC:FSC */
- }
-
- /* Set format */
- state = v4l2_subdev_lock_and_get_active_state(&par->sd);
- sink_fmt = v4l2_subdev_state_get_format(state, 0);
- src_fmt = v4l2_subdev_state_get_format(state, 1);
- v4l2_subdev_unlock_state(state);
-
- vpix = dcmipp_par_pix_map_by_code(sink_fmt->code, src_fmt->code);
- if (!vpix) {
- dev_err(par->dev, "Invalid sink/src format configuration\n");
- return -EINVAL;
- }
-
- val |= vpix->prcr_format << DCMIPP_PRCR_FORMAT_SHIFT;
-
- /* swap cycles */
- if (vpix->prcr_swapcycles)
- val |= DCMIPP_PRCR_SWAPCYCLES;
-
- reg_write(par, DCMIPP_PRCR, val);
-
- return 0;
-}
-
-static int dcmipp_par_s_stream(struct v4l2_subdev *sd, int enable)
-{
- struct dcmipp_par_device *par =
- container_of(sd, struct dcmipp_par_device, sd);
- struct v4l2_subdev *s_subdev;
- struct media_pad *pad;
- int ret = 0;
-
- /* Get source subdev */
- pad = media_pad_remote_pad_first(&sd->entity.pads[0]);
- if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
- return -EINVAL;
- s_subdev = media_entity_to_v4l2_subdev(pad->entity);
-
- if (enable) {
- ret = dcmipp_par_configure(par);
- if (ret)
- return ret;
-
- /* Enable parallel interface */
- reg_set(par, DCMIPP_PRCR, DCMIPP_PRCR_ENABLE);
-
- ret = v4l2_subdev_call(s_subdev, video, s_stream, enable);
- if (ret < 0) {
- dev_err(par->dev,
- "failed to start source subdev streaming (%d)\n",
- ret);
- return ret;
- }
- } else {
- ret = v4l2_subdev_call(s_subdev, video, s_stream, enable);
- if (ret < 0) {
- dev_err(par->dev,
- "failed to stop source subdev streaming (%d)\n",
- ret);
- return ret;
- }
-
- /* Disable parallel interface */
- reg_clear(par, DCMIPP_PRCR, DCMIPP_PRCR_ENABLE);
- }
-
- par->streaming = enable;
-
- return ret;
-}
-
-static const struct v4l2_subdev_video_ops dcmipp_par_video_ops = {
- .s_stream = dcmipp_par_s_stream,
-};
-
-static const struct v4l2_subdev_ops dcmipp_par_ops = {
- .pad = &dcmipp_par_pad_ops,
- .video = &dcmipp_par_video_ops,
-};
-
-static void dcmipp_par_release(struct v4l2_subdev *sd)
-{
- struct dcmipp_par_device *par =
- container_of(sd, struct dcmipp_par_device, sd);
-
- kfree(par);
-}
-
-static const struct v4l2_subdev_internal_ops dcmipp_par_int_ops = {
- .init_state = dcmipp_par_init_state,
- .release = dcmipp_par_release,
-};
-
-void dcmipp_par_ent_release(struct dcmipp_ent_device *ved)
-{
- struct dcmipp_par_device *par =
- container_of(ved, struct dcmipp_par_device, ved);
-
- dcmipp_ent_sd_unregister(ved, &par->sd);
-}
-
-struct dcmipp_ent_device *dcmipp_par_ent_init(struct device *dev,
- const char *entity_name,
- struct v4l2_device *v4l2_dev,
- void __iomem *regs)
-{
- struct dcmipp_par_device *par;
- const unsigned long pads_flag[] = {
- MEDIA_PAD_FL_SINK, MEDIA_PAD_FL_SOURCE,
- };
- int ret;
-
- /* Allocate the par struct */
- par = kzalloc(sizeof(*par), GFP_KERNEL);
- if (!par)
- return ERR_PTR(-ENOMEM);
-
- par->regs = regs;
-
- /* Initialize ved and sd */
- ret = dcmipp_ent_sd_register(&par->ved, &par->sd, v4l2_dev,
- entity_name, MEDIA_ENT_F_VID_IF_BRIDGE,
- ARRAY_SIZE(pads_flag), pads_flag,
- &dcmipp_par_int_ops, &dcmipp_par_ops,
- NULL, NULL);
- if (ret) {
- kfree(par);
- return ERR_PTR(ret);
- }
-
- par->dev = dev;
-
- return &par->ved;
-}
diff --git a/drivers/media/platform/verisilicon/hantro.h b/drivers/media/platform/verisilicon/hantro.h
index 811260dc3c77..edc217eed293 100644
--- a/drivers/media/platform/verisilicon/hantro.h
+++ b/drivers/media/platform/verisilicon/hantro.h
@@ -227,6 +227,7 @@ struct hantro_dev {
* @src_fmt: V4L2 pixel format of active source format.
* @vpu_dst_fmt: Descriptor of active destination format.
* @dst_fmt: V4L2 pixel format of active destination format.
+ * @ref_fmt: V4L2 pixel format of the reference frames format.
*
* @ctrl_handler: Control handler used to register controls.
* @jpeg_quality: User-specified JPEG compression quality.
@@ -255,6 +256,7 @@ struct hantro_ctx {
struct v4l2_pix_format_mplane src_fmt;
const struct hantro_fmt *vpu_dst_fmt;
struct v4l2_pix_format_mplane dst_fmt;
+ struct v4l2_pix_format_mplane ref_fmt;
struct v4l2_ctrl_handler ctrl_handler;
int jpeg_quality;
@@ -332,12 +334,19 @@ struct hantro_vp9_decoded_buffer_info {
u32 bit_depth : 4;
};
+struct hantro_av1_decoded_buffer_info {
+ /* Info needed when the decoded frame serves as a reference frame. */
+ size_t chroma_offset;
+ size_t mv_offset;
+};
+
struct hantro_decoded_buffer {
/* Must be the first field in this struct. */
struct v4l2_m2m_buffer base;
union {
struct hantro_vp9_decoded_buffer_info vp9;
+ struct hantro_av1_decoded_buffer_info av1;
};
};
diff --git a/drivers/media/platform/verisilicon/hantro_g2.c b/drivers/media/platform/verisilicon/hantro_g2.c
index 5c1d799d8618..aae0b562fabb 100644
--- a/drivers/media/platform/verisilicon/hantro_g2.c
+++ b/drivers/media/platform/verisilicon/hantro_g2.c
@@ -47,7 +47,7 @@ irqreturn_t hantro_g2_irq(int irq, void *dev_id)
size_t hantro_g2_chroma_offset(struct hantro_ctx *ctx)
{
- return ctx->dst_fmt.width * ctx->dst_fmt.height * ctx->bit_depth / 8;
+ return ctx->ref_fmt.plane_fmt[0].bytesperline * ctx->ref_fmt.height;
}
size_t hantro_g2_motion_vectors_offset(struct hantro_ctx *ctx)
diff --git a/drivers/media/platform/verisilicon/hantro_postproc.c b/drivers/media/platform/verisilicon/hantro_postproc.c
index 232c93eea7ee..c435a393e0cb 100644
--- a/drivers/media/platform/verisilicon/hantro_postproc.c
+++ b/drivers/media/platform/verisilicon/hantro_postproc.c
@@ -194,35 +194,25 @@ void hantro_postproc_free(struct hantro_ctx *ctx)
static unsigned int hantro_postproc_buffer_size(struct hantro_ctx *ctx)
{
- struct v4l2_pix_format_mplane pix_mp;
- const struct hantro_fmt *fmt;
unsigned int buf_size;
- /* this should always pick native format */
- fmt = hantro_get_default_fmt(ctx, false, ctx->bit_depth, HANTRO_AUTO_POSTPROC);
- if (!fmt)
- return 0;
-
- v4l2_fill_pixfmt_mp(&pix_mp, fmt->fourcc, ctx->src_fmt.width,
- ctx->src_fmt.height);
-
- buf_size = pix_mp.plane_fmt[0].sizeimage;
+ buf_size = ctx->ref_fmt.plane_fmt[0].sizeimage;
if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_H264_SLICE)
- buf_size += hantro_h264_mv_size(pix_mp.width,
- pix_mp.height);
+ buf_size += hantro_h264_mv_size(ctx->ref_fmt.width,
+ ctx->ref_fmt.height);
else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_VP9_FRAME)
- buf_size += hantro_vp9_mv_size(pix_mp.width,
- pix_mp.height);
+ buf_size += hantro_vp9_mv_size(ctx->ref_fmt.width,
+ ctx->ref_fmt.height);
else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_HEVC_SLICE) {
- buf_size += hantro_hevc_mv_size(pix_mp.width,
- pix_mp.height);
+ buf_size += hantro_hevc_mv_size(ctx->ref_fmt.width,
+ ctx->ref_fmt.height);
if (ctx->hevc_dec.use_compression)
- buf_size += hantro_hevc_compressed_size(pix_mp.width,
- pix_mp.height);
+ buf_size += hantro_hevc_compressed_size(ctx->ref_fmt.width,
+ ctx->ref_fmt.height);
}
else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_AV1_FRAME)
- buf_size += hantro_av1_mv_size(pix_mp.width,
- pix_mp.height);
+ buf_size += hantro_av1_mv_size(ctx->ref_fmt.width,
+ ctx->ref_fmt.height);
return buf_size;
}
diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.c b/drivers/media/platform/verisilicon/hantro_v4l2.c
index 2513adfbd825..2bce940a5822 100644
--- a/drivers/media/platform/verisilicon/hantro_v4l2.c
+++ b/drivers/media/platform/verisilicon/hantro_v4l2.c
@@ -126,6 +126,24 @@ hantro_find_format(const struct hantro_ctx *ctx, u32 fourcc)
return NULL;
}
+static int
+hantro_set_reference_frames_format(struct hantro_ctx *ctx)
+{
+ const struct hantro_fmt *fmt;
+ int dst_bit_depth = hantro_get_format_depth(ctx->vpu_dst_fmt->fourcc);
+
+ fmt = hantro_get_default_fmt(ctx, false, dst_bit_depth, HANTRO_AUTO_POSTPROC);
+ if (!fmt)
+ return -EINVAL;
+
+ ctx->ref_fmt.width = ctx->src_fmt.width;
+ ctx->ref_fmt.height = ctx->src_fmt.height;
+
+ v4l2_apply_frmsize_constraints(&ctx->ref_fmt.width, &ctx->ref_fmt.height, &fmt->frmsize);
+ return v4l2_fill_pixfmt_mp(&ctx->ref_fmt, fmt->fourcc,
+ ctx->ref_fmt.width, ctx->ref_fmt.height);
+}
+
const struct hantro_fmt *
hantro_get_default_fmt(const struct hantro_ctx *ctx, bool bitstream,
int bit_depth, bool need_postproc)
@@ -595,6 +613,9 @@ static int hantro_set_fmt_cap(struct hantro_ctx *ctx,
ctx->vpu_dst_fmt = hantro_find_format(ctx, pix_mp->pixelformat);
ctx->dst_fmt = *pix_mp;
+ ret = hantro_set_reference_frames_format(ctx);
+ if (ret)
+ return ret;
/*
* Current raw format might have become invalid with newly
diff --git a/drivers/media/platform/verisilicon/imx8m_vpu_hw.c b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
index f850d8bddef6..35799da534ed 100644
--- a/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
+++ b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
@@ -187,23 +187,23 @@ static const struct hantro_fmt imx8m_vpu_g2_dec_fmts[] = {
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_UHD_WIDTH,
- .step_width = TILE_MB_DIM,
+ .step_width = 8,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_UHD_HEIGHT,
- .step_height = TILE_MB_DIM,
+ .step_height = 32,
},
},
{
- .fourcc = V4L2_PIX_FMT_P010_4L4,
+ .fourcc = V4L2_PIX_FMT_NV15_4L4,
.codec_mode = HANTRO_MODE_NONE,
.match_depth = true,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_UHD_WIDTH,
- .step_width = TILE_MB_DIM,
+ .step_width = 8,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_UHD_HEIGHT,
- .step_height = TILE_MB_DIM,
+ .step_height = 32,
},
},
{
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c b/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
index e54f5fac325b..69b5d9e12926 100644
--- a/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
+++ b/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
@@ -686,8 +686,6 @@ rockchip_vpu981_av1_dec_set_ref(struct hantro_ctx *ctx, int ref, int idx,
struct hantro_dev *vpu = ctx->dev;
struct hantro_decoded_buffer *dst;
dma_addr_t luma_addr, chroma_addr, mv_addr = 0;
- size_t cr_offset = rockchip_vpu981_av1_dec_luma_size(ctx);
- size_t mv_offset = rockchip_vpu981_av1_dec_chroma_size(ctx);
int cur_width = frame->frame_width_minus_1 + 1;
int cur_height = frame->frame_height_minus_1 + 1;
int scale_width =
@@ -744,8 +742,8 @@ rockchip_vpu981_av1_dec_set_ref(struct hantro_ctx *ctx, int ref, int idx,
dst = vb2_to_hantro_decoded_buf(&av1_dec->frame_refs[idx].vb2_ref->vb2_buf);
luma_addr = hantro_get_dec_buf_addr(ctx, &dst->base.vb.vb2_buf);
- chroma_addr = luma_addr + cr_offset;
- mv_addr = luma_addr + mv_offset;
+ chroma_addr = luma_addr + dst->av1.chroma_offset;
+ mv_addr = luma_addr + dst->av1.mv_offset;
hantro_write_addr(vpu, AV1_REFERENCE_Y(ref), luma_addr);
hantro_write_addr(vpu, AV1_REFERENCE_CB(ref), chroma_addr);
@@ -2089,6 +2087,9 @@ rockchip_vpu981_av1_dec_set_output_buffer(struct hantro_ctx *ctx)
chroma_addr = luma_addr + cr_offset;
mv_addr = luma_addr + mv_offset;
+ dst->av1.chroma_offset = cr_offset;
+ dst->av1.mv_offset = mv_offset;
+
hantro_write_addr(vpu, AV1_TILE_OUT_LU, luma_addr);
hantro_write_addr(vpu, AV1_TILE_OUT_CH, chroma_addr);
hantro_write_addr(vpu, AV1_TILE_OUT_MV, mv_addr);
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index d52eccdc7eb9..72776d08046a 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -221,10 +221,6 @@ config USB_RAREMONO
source "drivers/media/radio/si470x/Kconfig"
source "drivers/media/radio/si4713/Kconfig"
-# TI's ST based wl128x FM radio
-
-source "drivers/media/radio/wl128x/Kconfig"
-
#
# ISA drivers configuration
#
diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
index cfb6af7d3bc3..1ff46f3a6ed3 100644
--- a/drivers/media/radio/Makefile
+++ b/drivers/media/radio/Makefile
@@ -31,7 +31,6 @@ obj-$(CONFIG_RADIO_TIMBERDALE) += radio-timb.o
obj-$(CONFIG_RADIO_TRUST) += radio-trust.o
obj-$(CONFIG_RADIO_TYPHOON) += radio-typhoon.o
obj-$(CONFIG_RADIO_WL1273) += radio-wl1273.o
-obj-$(CONFIG_RADIO_WL128X) += wl128x/
obj-$(CONFIG_RADIO_ZOLTRIX) += radio-zoltrix.o
obj-$(CONFIG_USB_DSBR) += dsbr100.o
diff --git a/drivers/media/radio/wl128x/Kconfig b/drivers/media/radio/wl128x/Kconfig
deleted file mode 100644
index 3e7713872e3f..000000000000
--- a/drivers/media/radio/wl128x/Kconfig
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# TI's wl128x FM driver based on TI's ST driver.
-#
-config RADIO_WL128X
- tristate "Texas Instruments WL128x FM Radio"
- depends on VIDEO_DEV && RFKILL && TTY && TI_ST
- depends on GPIOLIB || COMPILE_TEST
- help
- Choose Y here if you have this FM radio chip.
-
- In order to control your radio card, you will need to use programs
- that are compatible with the Video For Linux 2 API. Information on
- this API and pointers to "v4l2" programs may be found at
- <file:Documentation/userspace-api/media/index.rst>.
diff --git a/drivers/media/radio/wl128x/Makefile b/drivers/media/radio/wl128x/Makefile
deleted file mode 100644
index 4396ca416cfa..000000000000
--- a/drivers/media/radio/wl128x/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for TI's shared transport driver based wl128x
-# FM radio.
-#
-obj-$(CONFIG_RADIO_WL128X) += fm_drv.o
-fm_drv-objs := fmdrv_common.o fmdrv_rx.o fmdrv_tx.o fmdrv_v4l2.o
diff --git a/drivers/media/radio/wl128x/fmdrv.h b/drivers/media/radio/wl128x/fmdrv.h
deleted file mode 100644
index 03117a41dbd4..000000000000
--- a/drivers/media/radio/wl128x/fmdrv.h
+++ /dev/null
@@ -1,229 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * FM Driver for Connectivity chip of Texas Instruments.
- *
- * Common header for all FM driver sub-modules.
- *
- * Copyright (C) 2011 Texas Instruments
- */
-
-#ifndef _FM_DRV_H
-#define _FM_DRV_H
-
-#include <linux/skbuff.h>
-#include <linux/interrupt.h>
-#include <sound/core.h>
-#include <sound/initval.h>
-#include <linux/timer.h>
-#include <linux/workqueue.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ctrls.h>
-
-#define FM_DRV_VERSION "0.1.1"
-#define FM_DRV_NAME "ti_fmdrv"
-#define FM_DRV_CARD_SHORT_NAME "TI FM Radio"
-#define FM_DRV_CARD_LONG_NAME "Texas Instruments FM Radio"
-
-/* Flag info */
-#define FM_INTTASK_RUNNING 0
-#define FM_INTTASK_SCHEDULE_PENDING 1
-#define FM_FW_DW_INPROGRESS 2
-#define FM_CORE_READY 3
-#define FM_CORE_TRANSPORT_READY 4
-#define FM_AF_SWITCH_INPROGRESS 5
-#define FM_CORE_TX_XMITING 6
-
-#define FM_TUNE_COMPLETE 0x1
-#define FM_BAND_LIMIT 0x2
-
-#define FM_DRV_TX_TIMEOUT (5*HZ) /* 5 seconds */
-#define FM_DRV_RX_SEEK_TIMEOUT (20*HZ) /* 20 seconds */
-
-#define fmerr(format, ...) \
- printk(KERN_ERR "fmdrv: " format, ## __VA_ARGS__)
-#define fmwarn(format, ...) \
- printk(KERN_WARNING "fmdrv: " format, ##__VA_ARGS__)
-#ifdef DEBUG
-#define fmdbg(format, ...) \
- printk(KERN_DEBUG "fmdrv: " format, ## __VA_ARGS__)
-#else /* DEBUG */
-#define fmdbg(format, ...) do {} while(0)
-#endif
-enum {
- FM_MODE_OFF,
- FM_MODE_TX,
- FM_MODE_RX,
- FM_MODE_ENTRY_MAX
-};
-
-#define FM_RX_RDS_INFO_FIELD_MAX 8 /* 4 Group * 2 Bytes */
-
-/* RX RDS data format */
-struct fm_rdsdata_format {
- union {
- struct {
- u8 buff[FM_RX_RDS_INFO_FIELD_MAX];
- } groupdatabuff;
- struct {
- u16 pidata;
- u8 blk_b[2];
- u8 blk_c[2];
- u8 blk_d[2];
- } groupgeneral;
- struct {
- u16 pidata;
- u8 blk_b[2];
- u8 af[2];
- u8 ps[2];
- } group0A;
- struct {
- u16 pi[2];
- u8 blk_b[2];
- u8 ps[2];
- } group0B;
- } data;
-};
-
-/* FM region (Europe/US, Japan) info */
-struct region_info {
- u32 chanl_space;
- u32 bot_freq;
- u32 top_freq;
- u8 fm_band;
-};
-struct fmdev;
-typedef void (*int_handler_prototype) (struct fmdev *);
-
-/* FM Interrupt processing related info */
-struct fm_irq {
- u8 stage;
- u16 flag; /* FM interrupt flag */
- u16 mask; /* FM interrupt mask */
- /* Interrupt process timeout handler */
- struct timer_list timer;
- u8 retry;
- int_handler_prototype *handlers;
-};
-
-/* RDS info */
-struct fm_rds {
- u8 flag; /* RX RDS on/off status */
- u8 last_blk_idx; /* Last received RDS block */
-
- /* RDS buffer */
- wait_queue_head_t read_queue;
- u32 buf_size; /* Size is always multiple of 3 */
- u32 wr_idx;
- u32 rd_idx;
- u8 *buff;
-};
-
-#define FM_RDS_MAX_AF_LIST 25
-
-/*
- * Current RX channel Alternate Frequency cache.
- * This info is used to switch to other freq (AF)
- * when current channel signal strength is below RSSI threshold.
- */
-struct tuned_station_info {
- u16 picode;
- u32 af_cache[FM_RDS_MAX_AF_LIST];
- u8 afcache_size;
- u8 af_list_max;
-};
-
-/* FM RX mode info */
-struct fm_rx {
- struct region_info region; /* Current selected band */
- u32 freq; /* Current RX frquency */
- u8 mute_mode; /* Current mute mode */
- u8 deemphasis_mode; /* Current deemphasis mode */
- /* RF dependent soft mute mode */
- u8 rf_depend_mute;
- u16 volume; /* Current volume level */
- u16 rssi_threshold; /* Current RSSI threshold level */
- /* Holds the index of the current AF jump */
- u8 afjump_idx;
- /* Will hold the frequency before the jump */
- u32 freq_before_jump;
- u8 rds_mode; /* RDS operation mode (RDS/RDBS) */
- u8 af_mode; /* Alternate frequency on/off */
- struct tuned_station_info stat_info;
- struct fm_rds rds;
-};
-
-#define FMTX_RDS_TXT_STR_SIZE 25
-/*
- * FM TX RDS data
- *
- * @ text_type: is the text following PS or RT
- * @ text: radio text string which could either be PS or RT
- * @ af_freq: alternate frequency for Tx
- * TODO: to be declared in application
- */
-struct tx_rds {
- u8 text_type;
- u8 text[FMTX_RDS_TXT_STR_SIZE];
- u8 flag;
- u32 af_freq;
-};
-/*
- * FM TX global data
- *
- * @ pwr_lvl: Power Level of the Transmission from mixer control
- * @ xmit_state: Transmission state = Updated locally upon Start/Stop
- * @ audio_io: i2S/Analog
- * @ tx_frq: Transmission frequency
- */
-struct fmtx_data {
- u8 pwr_lvl;
- u8 xmit_state;
- u8 audio_io;
- u8 region;
- u16 aud_mode;
- u32 preemph;
- u32 tx_frq;
- struct tx_rds rds;
-};
-
-/* FM driver operation structure */
-struct fmdev {
- struct video_device *radio_dev; /* V4L2 video device pointer */
- struct v4l2_device v4l2_dev; /* V4L2 top level struct */
- struct snd_card *card; /* Card which holds FM mixer controls */
- u16 asci_id;
- spinlock_t rds_buff_lock; /* To protect access to RDS buffer */
- spinlock_t resp_skb_lock; /* To protect access to received SKB */
-
- long flag; /* FM driver state machine info */
- int streg_cbdata; /* status of ST registration */
-
- struct sk_buff_head rx_q; /* RX queue */
- struct work_struct rx_bh_work; /* RX BH Work */
-
- struct sk_buff_head tx_q; /* TX queue */
- struct work_struct tx_bh_work; /* TX BH Work */
- unsigned long last_tx_jiffies; /* Timestamp of last pkt sent */
- atomic_t tx_cnt; /* Number of packets can send at a time */
-
- struct sk_buff *resp_skb; /* Response from the chip */
- /* Main task completion handler */
- struct completion maintask_comp;
- /* Opcode of last command sent to the chip */
- u8 pre_op;
- /* Handler used for wakeup when response packet is received */
- struct completion *resp_comp;
- struct fm_irq irq_info;
- u8 curr_fmmode; /* Current FM chip mode (TX, RX, OFF) */
- struct fm_rx rx; /* FM receiver info */
- struct fmtx_data tx_data;
-
- /* V4L2 ctrl framework handler*/
- struct v4l2_ctrl_handler ctrl_handler;
-
- /* For core assisted locking */
- struct mutex mutex;
-};
-#endif
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
deleted file mode 100644
index 4d032436691c..000000000000
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ /dev/null
@@ -1,1676 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * FM Driver for Connectivity chip of Texas Instruments.
- *
- * This sub-module of FM driver is common for FM RX and TX
- * functionality. This module is responsible for:
- * 1) Forming group of Channel-8 commands to perform particular
- * functionality (eg., frequency set require more than
- * one Channel-8 command to be sent to the chip).
- * 2) Sending each Channel-8 command to the chip and reading
- * response back over Shared Transport.
- * 3) Managing TX and RX Queues and BH bh Works.
- * 4) Handling FM Interrupt packet and taking appropriate action.
- * 5) Loading FM firmware to the chip (common, FM TX, and FM RX
- * firmware files based on mode selection)
- *
- * Copyright (C) 2011 Texas Instruments
- * Author: Raja Mani <raja_mani@ti.com>
- * Author: Manjunatha Halli <manjunatha_halli@ti.com>
- */
-
-#include <linux/delay.h>
-#include <linux/firmware.h>
-#include <linux/module.h>
-#include <linux/nospec.h>
-#include <linux/jiffies.h>
-
-#include "fmdrv.h"
-#include "fmdrv_v4l2.h"
-#include "fmdrv_common.h"
-#include <linux/ti_wilink_st.h>
-#include "fmdrv_rx.h"
-#include "fmdrv_tx.h"
-
-/* Region info */
-static struct region_info region_configs[] = {
- /* Europe/US */
- {
- .chanl_space = FM_CHANNEL_SPACING_200KHZ * FM_FREQ_MUL,
- .bot_freq = 87500, /* 87.5 MHz */
- .top_freq = 108000, /* 108 MHz */
- .fm_band = 0,
- },
- /* Japan */
- {
- .chanl_space = FM_CHANNEL_SPACING_200KHZ * FM_FREQ_MUL,
- .bot_freq = 76000, /* 76 MHz */
- .top_freq = 90000, /* 90 MHz */
- .fm_band = 1,
- },
-};
-
-/* Band selection */
-static u8 default_radio_region; /* Europe/US */
-module_param(default_radio_region, byte, 0);
-MODULE_PARM_DESC(default_radio_region, "Region: 0=Europe/US, 1=Japan");
-
-/* RDS buffer blocks */
-static u32 default_rds_buf = 300;
-module_param(default_rds_buf, uint, 0444);
-MODULE_PARM_DESC(default_rds_buf, "RDS buffer entries");
-
-/* Radio Nr */
-static u32 radio_nr = -1;
-module_param(radio_nr, int, 0444);
-MODULE_PARM_DESC(radio_nr, "Radio Nr");
-
-/* FM irq handlers forward declaration */
-static void fm_irq_send_flag_getcmd(struct fmdev *);
-static void fm_irq_handle_flag_getcmd_resp(struct fmdev *);
-static void fm_irq_handle_hw_malfunction(struct fmdev *);
-static void fm_irq_handle_rds_start(struct fmdev *);
-static void fm_irq_send_rdsdata_getcmd(struct fmdev *);
-static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *);
-static void fm_irq_handle_rds_finish(struct fmdev *);
-static void fm_irq_handle_tune_op_ended(struct fmdev *);
-static void fm_irq_handle_power_enb(struct fmdev *);
-static void fm_irq_handle_low_rssi_start(struct fmdev *);
-static void fm_irq_afjump_set_pi(struct fmdev *);
-static void fm_irq_handle_set_pi_resp(struct fmdev *);
-static void fm_irq_afjump_set_pimask(struct fmdev *);
-static void fm_irq_handle_set_pimask_resp(struct fmdev *);
-static void fm_irq_afjump_setfreq(struct fmdev *);
-static void fm_irq_handle_setfreq_resp(struct fmdev *);
-static void fm_irq_afjump_enableint(struct fmdev *);
-static void fm_irq_afjump_enableint_resp(struct fmdev *);
-static void fm_irq_start_afjump(struct fmdev *);
-static void fm_irq_handle_start_afjump_resp(struct fmdev *);
-static void fm_irq_afjump_rd_freq(struct fmdev *);
-static void fm_irq_afjump_rd_freq_resp(struct fmdev *);
-static void fm_irq_handle_low_rssi_finish(struct fmdev *);
-static void fm_irq_send_intmsk_cmd(struct fmdev *);
-static void fm_irq_handle_intmsk_cmd_resp(struct fmdev *);
-
-/*
- * When FM common module receives interrupt packet, following handlers
- * will be executed one after another to service the interrupt(s)
- */
-enum fmc_irq_handler_index {
- FM_SEND_FLAG_GETCMD_IDX,
- FM_HANDLE_FLAG_GETCMD_RESP_IDX,
-
- /* HW malfunction irq handler */
- FM_HW_MAL_FUNC_IDX,
-
- /* RDS threshold reached irq handler */
- FM_RDS_START_IDX,
- FM_RDS_SEND_RDS_GETCMD_IDX,
- FM_RDS_HANDLE_RDS_GETCMD_RESP_IDX,
- FM_RDS_FINISH_IDX,
-
- /* Tune operation ended irq handler */
- FM_HW_TUNE_OP_ENDED_IDX,
-
- /* TX power enable irq handler */
- FM_HW_POWER_ENB_IDX,
-
- /* Low RSSI irq handler */
- FM_LOW_RSSI_START_IDX,
- FM_AF_JUMP_SETPI_IDX,
- FM_AF_JUMP_HANDLE_SETPI_RESP_IDX,
- FM_AF_JUMP_SETPI_MASK_IDX,
- FM_AF_JUMP_HANDLE_SETPI_MASK_RESP_IDX,
- FM_AF_JUMP_SET_AF_FREQ_IDX,
- FM_AF_JUMP_HANDLE_SET_AFFREQ_RESP_IDX,
- FM_AF_JUMP_ENABLE_INT_IDX,
- FM_AF_JUMP_ENABLE_INT_RESP_IDX,
- FM_AF_JUMP_START_AFJUMP_IDX,
- FM_AF_JUMP_HANDLE_START_AFJUMP_RESP_IDX,
- FM_AF_JUMP_RD_FREQ_IDX,
- FM_AF_JUMP_RD_FREQ_RESP_IDX,
- FM_LOW_RSSI_FINISH_IDX,
-
- /* Interrupt process post action */
- FM_SEND_INTMSK_CMD_IDX,
- FM_HANDLE_INTMSK_CMD_RESP_IDX,
-};
-
-/* FM interrupt handler table */
-static int_handler_prototype int_handler_table[] = {
- fm_irq_send_flag_getcmd,
- fm_irq_handle_flag_getcmd_resp,
- fm_irq_handle_hw_malfunction,
- fm_irq_handle_rds_start, /* RDS threshold reached irq handler */
- fm_irq_send_rdsdata_getcmd,
- fm_irq_handle_rdsdata_getcmd_resp,
- fm_irq_handle_rds_finish,
- fm_irq_handle_tune_op_ended,
- fm_irq_handle_power_enb, /* TX power enable irq handler */
- fm_irq_handle_low_rssi_start,
- fm_irq_afjump_set_pi,
- fm_irq_handle_set_pi_resp,
- fm_irq_afjump_set_pimask,
- fm_irq_handle_set_pimask_resp,
- fm_irq_afjump_setfreq,
- fm_irq_handle_setfreq_resp,
- fm_irq_afjump_enableint,
- fm_irq_afjump_enableint_resp,
- fm_irq_start_afjump,
- fm_irq_handle_start_afjump_resp,
- fm_irq_afjump_rd_freq,
- fm_irq_afjump_rd_freq_resp,
- fm_irq_handle_low_rssi_finish,
- fm_irq_send_intmsk_cmd, /* Interrupt process post action */
- fm_irq_handle_intmsk_cmd_resp
-};
-
-static long (*g_st_write) (struct sk_buff *skb);
-static struct completion wait_for_fmdrv_reg_comp;
-
-static inline void fm_irq_call(struct fmdev *fmdev)
-{
- fmdev->irq_info.handlers[fmdev->irq_info.stage](fmdev);
-}
-
-/* Continue next function in interrupt handler table */
-static inline void fm_irq_call_stage(struct fmdev *fmdev, u8 stage)
-{
- fmdev->irq_info.stage = stage;
- fm_irq_call(fmdev);
-}
-
-static inline void fm_irq_timeout_stage(struct fmdev *fmdev, u8 stage)
-{
- fmdev->irq_info.stage = stage;
- mod_timer(&fmdev->irq_info.timer, jiffies + FM_DRV_TX_TIMEOUT);
-}
-
-#ifdef FM_DUMP_TXRX_PKT
- /* To dump outgoing FM Channel-8 packets */
-inline void dump_tx_skb_data(struct sk_buff *skb)
-{
- int len, len_org;
- u8 index;
- struct fm_cmd_msg_hdr *cmd_hdr;
-
- cmd_hdr = (struct fm_cmd_msg_hdr *)skb->data;
- printk(KERN_INFO "<<%shdr:%02x len:%02x opcode:%02x type:%s dlen:%02x",
- fm_cb(skb)->completion ? " " : "*", cmd_hdr->hdr,
- cmd_hdr->len, cmd_hdr->op,
- cmd_hdr->rd_wr ? "RD" : "WR", cmd_hdr->dlen);
-
- len_org = skb->len - FM_CMD_MSG_HDR_SIZE;
- if (len_org > 0) {
- printk(KERN_CONT "\n data(%d): ", cmd_hdr->dlen);
- len = min(len_org, 14);
- for (index = 0; index < len; index++)
- printk(KERN_CONT "%x ",
- skb->data[FM_CMD_MSG_HDR_SIZE + index]);
- printk(KERN_CONT "%s", (len_org > 14) ? ".." : "");
- }
- printk(KERN_CONT "\n");
-}
-
- /* To dump incoming FM Channel-8 packets */
-inline void dump_rx_skb_data(struct sk_buff *skb)
-{
- int len, len_org;
- u8 index;
- struct fm_event_msg_hdr *evt_hdr;
-
- evt_hdr = (struct fm_event_msg_hdr *)skb->data;
- printk(KERN_INFO ">> hdr:%02x len:%02x sts:%02x numhci:%02x opcode:%02x type:%s dlen:%02x",
- evt_hdr->hdr, evt_hdr->len,
- evt_hdr->status, evt_hdr->num_fm_hci_cmds, evt_hdr->op,
- (evt_hdr->rd_wr) ? "RD" : "WR", evt_hdr->dlen);
-
- len_org = skb->len - FM_EVT_MSG_HDR_SIZE;
- if (len_org > 0) {
- printk(KERN_CONT "\n data(%d): ", evt_hdr->dlen);
- len = min(len_org, 14);
- for (index = 0; index < len; index++)
- printk(KERN_CONT "%x ",
- skb->data[FM_EVT_MSG_HDR_SIZE + index]);
- printk(KERN_CONT "%s", (len_org > 14) ? ".." : "");
- }
- printk(KERN_CONT "\n");
-}
-#endif
-
-void fmc_update_region_info(struct fmdev *fmdev, u8 region_to_set)
-{
- fmdev->rx.region = region_configs[region_to_set];
-}
-
-/*
- * FM common sub-module will queue this bh work whenever it receives
- * FM packet from ST driver.
- */
-static void recv_bh_work(struct work_struct *t)
-{
- struct fmdev *fmdev;
- struct fm_irq *irq_info;
- struct fm_event_msg_hdr *evt_hdr;
- struct sk_buff *skb;
- u8 num_fm_hci_cmds;
- unsigned long flags;
-
- fmdev = from_work(fmdev, t, tx_bh_work);
- irq_info = &fmdev->irq_info;
- /* Process all packets in the RX queue */
- while ((skb = skb_dequeue(&fmdev->rx_q))) {
- if (skb->len < sizeof(struct fm_event_msg_hdr)) {
- fmerr("skb(%p) has only %d bytes, at least need %zu bytes to decode\n",
- skb,
- skb->len, sizeof(struct fm_event_msg_hdr));
- kfree_skb(skb);
- continue;
- }
-
- evt_hdr = (void *)skb->data;
- num_fm_hci_cmds = evt_hdr->num_fm_hci_cmds;
-
- /* FM interrupt packet? */
- if (evt_hdr->op == FM_INTERRUPT) {
- /* FM interrupt handler started already? */
- if (!test_bit(FM_INTTASK_RUNNING, &fmdev->flag)) {
- set_bit(FM_INTTASK_RUNNING, &fmdev->flag);
- if (irq_info->stage != 0) {
- fmerr("Inval stage resetting to zero\n");
- irq_info->stage = 0;
- }
-
- /*
- * Execute first function in interrupt handler
- * table.
- */
- irq_info->handlers[irq_info->stage](fmdev);
- } else {
- set_bit(FM_INTTASK_SCHEDULE_PENDING, &fmdev->flag);
- }
- kfree_skb(skb);
- }
- /* Anyone waiting for this with completion handler? */
- else if (evt_hdr->op == fmdev->pre_op && fmdev->resp_comp != NULL) {
-
- spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
- fmdev->resp_skb = skb;
- spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
- complete(fmdev->resp_comp);
-
- fmdev->resp_comp = NULL;
- atomic_set(&fmdev->tx_cnt, 1);
- }
- /* Is this for interrupt handler? */
- else if (evt_hdr->op == fmdev->pre_op && fmdev->resp_comp == NULL) {
- if (fmdev->resp_skb != NULL)
- fmerr("Response SKB ptr not NULL\n");
-
- spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
- fmdev->resp_skb = skb;
- spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
-
- /* Execute interrupt handler where state index points */
- irq_info->handlers[irq_info->stage](fmdev);
-
- kfree_skb(skb);
- atomic_set(&fmdev->tx_cnt, 1);
- } else {
- fmerr("Nobody claimed SKB(%p),purging\n", skb);
- }
-
- /*
- * Check flow control field. If Num_FM_HCI_Commands field is
- * not zero, queue FM TX bh work.
- */
- if (num_fm_hci_cmds && atomic_read(&fmdev->tx_cnt))
- if (!skb_queue_empty(&fmdev->tx_q))
- queue_work(system_bh_wq, &fmdev->tx_bh_work);
- }
-}
-
-/* FM send_bh_work: is scheduled when FM packet has to be sent to chip */
-static void send_bh_work(struct work_struct *t)
-{
- struct fmdev *fmdev;
- struct sk_buff *skb;
- int len;
-
- fmdev = from_work(fmdev, t, tx_bh_work);
-
- if (!atomic_read(&fmdev->tx_cnt))
- return;
-
- /* Check, is there any timeout happened to last transmitted packet */
- if (time_is_before_jiffies(fmdev->last_tx_jiffies + FM_DRV_TX_TIMEOUT)) {
- fmerr("TX timeout occurred\n");
- atomic_set(&fmdev->tx_cnt, 1);
- }
-
- /* Send queued FM TX packets */
- skb = skb_dequeue(&fmdev->tx_q);
- if (!skb)
- return;
-
- atomic_dec(&fmdev->tx_cnt);
- fmdev->pre_op = fm_cb(skb)->fm_op;
-
- if (fmdev->resp_comp != NULL)
- fmerr("Response completion handler is not NULL\n");
-
- fmdev->resp_comp = fm_cb(skb)->completion;
-
- /* Write FM packet to ST driver */
- len = g_st_write(skb);
- if (len < 0) {
- kfree_skb(skb);
- fmdev->resp_comp = NULL;
- fmerr("TX bh work failed to send skb(%p)\n", skb);
- atomic_set(&fmdev->tx_cnt, 1);
- } else {
- fmdev->last_tx_jiffies = jiffies;
- }
-}
-
-/*
- * Queues FM Channel-8 packet to FM TX queue and schedules FM TX bh work for
- * transmission
- */
-static int fm_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
- int payload_len, struct completion *wait_completion)
-{
- struct sk_buff *skb;
- struct fm_cmd_msg_hdr *hdr;
- int size;
-
- if (fm_op >= FM_INTERRUPT) {
- fmerr("Invalid fm opcode - %d\n", fm_op);
- return -EINVAL;
- }
- if (test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag) && payload == NULL) {
- fmerr("Payload data is NULL during fw download\n");
- return -EINVAL;
- }
- if (!test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag))
- size =
- FM_CMD_MSG_HDR_SIZE + ((payload == NULL) ? 0 : payload_len);
- else
- size = payload_len;
-
- skb = alloc_skb(size, GFP_ATOMIC);
- if (!skb) {
- fmerr("No memory to create new SKB\n");
- return -ENOMEM;
- }
- /*
- * Don't fill FM header info for the commands which come from
- * FM firmware file.
- */
- if (!test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag) ||
- test_bit(FM_INTTASK_RUNNING, &fmdev->flag)) {
- /* Fill command header info */
- hdr = skb_put(skb, FM_CMD_MSG_HDR_SIZE);
- hdr->hdr = FM_PKT_LOGICAL_CHAN_NUMBER; /* 0x08 */
-
- /* 3 (fm_opcode,rd_wr,dlen) + payload len) */
- hdr->len = ((payload == NULL) ? 0 : payload_len) + 3;
-
- /* FM opcode */
- hdr->op = fm_op;
-
- /* read/write type */
- hdr->rd_wr = type;
- hdr->dlen = payload_len;
- fm_cb(skb)->fm_op = fm_op;
-
- /*
- * If firmware download has finished and the command is
- * not a read command then payload is != NULL - a write
- * command with u16 payload - convert to be16
- */
- if (payload != NULL)
- *(__be16 *)payload = cpu_to_be16(*(u16 *)payload);
-
- } else if (payload != NULL) {
- fm_cb(skb)->fm_op = *((u8 *)payload + 2);
- }
- if (payload != NULL)
- skb_put_data(skb, payload, payload_len);
-
- fm_cb(skb)->completion = wait_completion;
- skb_queue_tail(&fmdev->tx_q, skb);
- queue_work(system_bh_wq, &fmdev->tx_bh_work);
-
- return 0;
-}
-
-/* Sends FM Channel-8 command to the chip and waits for the response */
-int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
- unsigned int payload_len, void *response, int *response_len)
-{
- struct sk_buff *skb;
- struct fm_event_msg_hdr *evt_hdr;
- unsigned long flags;
- int ret;
-
- init_completion(&fmdev->maintask_comp);
- ret = fm_send_cmd(fmdev, fm_op, type, payload, payload_len,
- &fmdev->maintask_comp);
- if (ret)
- return ret;
-
- if (!wait_for_completion_timeout(&fmdev->maintask_comp,
- FM_DRV_TX_TIMEOUT)) {
- fmerr("Timeout(%d sec),didn't get regcompletion signal from RX bh work\n",
- jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000);
- return -ETIMEDOUT;
- }
- spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
- if (!fmdev->resp_skb) {
- spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
- fmerr("Response SKB is missing\n");
- return -EFAULT;
- }
- skb = fmdev->resp_skb;
- fmdev->resp_skb = NULL;
- spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
-
- evt_hdr = (void *)skb->data;
- if (evt_hdr->status != 0) {
- fmerr("Received event pkt status(%d) is not zero\n",
- evt_hdr->status);
- kfree_skb(skb);
- return -EIO;
- }
- /* Send response data to caller */
- if (response != NULL && response_len != NULL && evt_hdr->dlen &&
- evt_hdr->dlen <= payload_len) {
- /* Skip header info and copy only response data */
- skb_pull(skb, sizeof(struct fm_event_msg_hdr));
- memcpy(response, skb->data, evt_hdr->dlen);
- *response_len = evt_hdr->dlen;
- } else if (response_len != NULL && evt_hdr->dlen == 0) {
- *response_len = 0;
- }
- kfree_skb(skb);
-
- return 0;
-}
-
-/* --- Helper functions used in FM interrupt handlers ---*/
-static inline int check_cmdresp_status(struct fmdev *fmdev,
- struct sk_buff **skb)
-{
- struct fm_event_msg_hdr *fm_evt_hdr;
- unsigned long flags;
-
- del_timer(&fmdev->irq_info.timer);
-
- spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
- *skb = fmdev->resp_skb;
- fmdev->resp_skb = NULL;
- spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
-
- fm_evt_hdr = (void *)(*skb)->data;
- if (fm_evt_hdr->status != 0) {
- fmerr("irq: opcode %x response status is not zero Initiating irq recovery process\n",
- fm_evt_hdr->op);
-
- mod_timer(&fmdev->irq_info.timer, jiffies + FM_DRV_TX_TIMEOUT);
- return -1;
- }
-
- return 0;
-}
-
-static inline void fm_irq_common_cmd_resp_helper(struct fmdev *fmdev, u8 stage)
-{
- struct sk_buff *skb;
-
- if (!check_cmdresp_status(fmdev, &skb))
- fm_irq_call_stage(fmdev, stage);
-}
-
-/*
- * Interrupt process timeout handler.
- * One of the irq handler did not get proper response from the chip. So take
- * recovery action here. FM interrupts are disabled in the beginning of
- * interrupt process. Therefore reset stage index to re-enable default
- * interrupts. So that next interrupt will be processed as usual.
- */
-static void int_timeout_handler(struct timer_list *t)
-{
- struct fmdev *fmdev;
- struct fm_irq *fmirq;
-
- fmdbg("irq: timeout,trying to re-enable fm interrupts\n");
- fmdev = from_timer(fmdev, t, irq_info.timer);
- fmirq = &fmdev->irq_info;
- fmirq->retry++;
-
- if (fmirq->retry > FM_IRQ_TIMEOUT_RETRY_MAX) {
- /* Stop recovery action (interrupt reenable process) and
- * reset stage index & retry count values */
- fmirq->stage = 0;
- fmirq->retry = 0;
- fmerr("Recovery action failed duringirq processing, max retry reached\n");
- return;
- }
- fm_irq_call_stage(fmdev, FM_SEND_INTMSK_CMD_IDX);
-}
-
-/* --------- FM interrupt handlers ------------*/
-static void fm_irq_send_flag_getcmd(struct fmdev *fmdev)
-{
- u16 flag;
-
- /* Send FLAG_GET command , to know the source of interrupt */
- if (!fm_send_cmd(fmdev, FLAG_GET, REG_RD, NULL, sizeof(flag), NULL))
- fm_irq_timeout_stage(fmdev, FM_HANDLE_FLAG_GETCMD_RESP_IDX);
-}
-
-static void fm_irq_handle_flag_getcmd_resp(struct fmdev *fmdev)
-{
- struct sk_buff *skb;
- struct fm_event_msg_hdr *fm_evt_hdr;
-
- if (check_cmdresp_status(fmdev, &skb))
- return;
-
- fm_evt_hdr = (void *)skb->data;
- if (fm_evt_hdr->dlen > sizeof(fmdev->irq_info.flag))
- return;
-
- /* Skip header info and copy only response data */
- skb_pull(skb, sizeof(struct fm_event_msg_hdr));
- memcpy(&fmdev->irq_info.flag, skb->data, fm_evt_hdr->dlen);
-
- fmdev->irq_info.flag = be16_to_cpu((__force __be16)fmdev->irq_info.flag);
- fmdbg("irq: flag register(0x%x)\n", fmdev->irq_info.flag);
-
- /* Continue next function in interrupt handler table */
- fm_irq_call_stage(fmdev, FM_HW_MAL_FUNC_IDX);
-}
-
-static void fm_irq_handle_hw_malfunction(struct fmdev *fmdev)
-{
- if (fmdev->irq_info.flag & FM_MAL_EVENT & fmdev->irq_info.mask)
- fmerr("irq: HW MAL int received - do nothing\n");
-
- /* Continue next function in interrupt handler table */
- fm_irq_call_stage(fmdev, FM_RDS_START_IDX);
-}
-
-static void fm_irq_handle_rds_start(struct fmdev *fmdev)
-{
- if (fmdev->irq_info.flag & FM_RDS_EVENT & fmdev->irq_info.mask) {
- fmdbg("irq: rds threshold reached\n");
- fmdev->irq_info.stage = FM_RDS_SEND_RDS_GETCMD_IDX;
- } else {
- /* Continue next function in interrupt handler table */
- fmdev->irq_info.stage = FM_HW_TUNE_OP_ENDED_IDX;
- }
-
- fm_irq_call(fmdev);
-}
-
-static void fm_irq_send_rdsdata_getcmd(struct fmdev *fmdev)
-{
- /* Send the command to read RDS data from the chip */
- if (!fm_send_cmd(fmdev, RDS_DATA_GET, REG_RD, NULL,
- (FM_RX_RDS_FIFO_THRESHOLD * 3), NULL))
- fm_irq_timeout_stage(fmdev, FM_RDS_HANDLE_RDS_GETCMD_RESP_IDX);
-}
-
-/* Keeps track of current RX channel AF (Alternate Frequency) */
-static void fm_rx_update_af_cache(struct fmdev *fmdev, u8 af)
-{
- struct tuned_station_info *stat_info = &fmdev->rx.stat_info;
- u8 reg_idx = fmdev->rx.region.fm_band;
- u8 index;
- u32 freq;
-
- /* First AF indicates the number of AF follows. Reset the list */
- if ((af >= FM_RDS_1_AF_FOLLOWS) && (af <= FM_RDS_25_AF_FOLLOWS)) {
- fmdev->rx.stat_info.af_list_max = (af - FM_RDS_1_AF_FOLLOWS + 1);
- fmdev->rx.stat_info.afcache_size = 0;
- fmdbg("No of expected AF : %d\n", fmdev->rx.stat_info.af_list_max);
- return;
- }
-
- if (af < FM_RDS_MIN_AF)
- return;
- if (reg_idx == FM_BAND_EUROPE_US && af > FM_RDS_MAX_AF)
- return;
- if (reg_idx == FM_BAND_JAPAN && af > FM_RDS_MAX_AF_JAPAN)
- return;
-
- freq = fmdev->rx.region.bot_freq + (af * 100);
- if (freq == fmdev->rx.freq) {
- fmdbg("Current freq(%d) is matching with received AF(%d)\n",
- fmdev->rx.freq, freq);
- return;
- }
- /* Do check in AF cache */
- for (index = 0; index < stat_info->afcache_size; index++) {
- if (stat_info->af_cache[index] == freq)
- break;
- }
- /* Reached the limit of the list - ignore the next AF */
- if (index == stat_info->af_list_max) {
- fmdbg("AF cache is full\n");
- return;
- }
- /*
- * If we reached the end of the list then this AF is not
- * in the list - add it.
- */
- if (index == stat_info->afcache_size) {
- fmdbg("Storing AF %d to cache index %d\n", freq, index);
- stat_info->af_cache[index] = freq;
- stat_info->afcache_size++;
- }
-}
-
-/*
- * Converts RDS buffer data from big endian format
- * to little endian format.
- */
-static void fm_rdsparse_swapbytes(struct fmdev *fmdev,
- struct fm_rdsdata_format *rds_format)
-{
- u8 index = 0;
- u8 *rds_buff;
-
- /*
- * Since in Orca the 2 RDS Data bytes are in little endian and
- * in Dolphin they are in big endian, the parsing of the RDS data
- * is chip dependent
- */
- if (fmdev->asci_id != 0x6350) {
- rds_buff = &rds_format->data.groupdatabuff.buff[0];
- while (index + 1 < FM_RX_RDS_INFO_FIELD_MAX) {
- swap(rds_buff[index], rds_buff[index + 1]);
- index += 2;
- }
- }
-}
-
-static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *fmdev)
-{
- struct sk_buff *skb;
- struct fm_rdsdata_format rds_fmt;
- struct fm_rds *rds = &fmdev->rx.rds;
- unsigned long group_idx, flags;
- u8 *rds_data, meta_data, tmpbuf[FM_RDS_BLK_SIZE];
- u8 type, blk_idx, idx;
- u16 cur_picode;
- u32 rds_len;
-
- if (check_cmdresp_status(fmdev, &skb))
- return;
-
- /* Skip header info */
- skb_pull(skb, sizeof(struct fm_event_msg_hdr));
- rds_data = skb->data;
- rds_len = skb->len;
-
- /* Parse the RDS data */
- while (rds_len >= FM_RDS_BLK_SIZE) {
- meta_data = rds_data[2];
- /* Get the type: 0=A, 1=B, 2=C, 3=C', 4=D, 5=E */
- type = (meta_data & 0x07);
-
- /* Transform the blk type into index sequence (0, 1, 2, 3, 4) */
- blk_idx = (type <= FM_RDS_BLOCK_C ? type : (type - 1));
- fmdbg("Block index:%d(%s)\n", blk_idx,
- (meta_data & FM_RDS_STATUS_ERR_MASK) ? "Bad" : "Ok");
-
- if ((meta_data & FM_RDS_STATUS_ERR_MASK) != 0)
- break;
-
- if (blk_idx > FM_RDS_BLK_IDX_D) {
- fmdbg("Block sequence mismatch\n");
- rds->last_blk_idx = -1;
- break;
- }
-
- /* Skip checkword (control) byte and copy only data byte */
- idx = array_index_nospec(blk_idx * (FM_RDS_BLK_SIZE - 1),
- FM_RX_RDS_INFO_FIELD_MAX - (FM_RDS_BLK_SIZE - 1));
-
- memcpy(&rds_fmt.data.groupdatabuff.buff[idx], rds_data,
- FM_RDS_BLK_SIZE - 1);
-
- rds->last_blk_idx = blk_idx;
-
- /* If completed a whole group then handle it */
- if (blk_idx == FM_RDS_BLK_IDX_D) {
- fmdbg("Good block received\n");
- fm_rdsparse_swapbytes(fmdev, &rds_fmt);
-
- /*
- * Extract PI code and store in local cache.
- * We need this during AF switch processing.
- */
- cur_picode = be16_to_cpu((__force __be16)rds_fmt.data.groupgeneral.pidata);
- if (fmdev->rx.stat_info.picode != cur_picode)
- fmdev->rx.stat_info.picode = cur_picode;
-
- fmdbg("picode:%d\n", cur_picode);
-
- group_idx = (rds_fmt.data.groupgeneral.blk_b[0] >> 3);
- fmdbg("(fmdrv):Group:%ld%s\n", group_idx/2,
- (group_idx % 2) ? "B" : "A");
-
- group_idx = 1 << (rds_fmt.data.groupgeneral.blk_b[0] >> 3);
- if (group_idx == FM_RDS_GROUP_TYPE_MASK_0A) {
- fm_rx_update_af_cache(fmdev, rds_fmt.data.group0A.af[0]);
- fm_rx_update_af_cache(fmdev, rds_fmt.data.group0A.af[1]);
- }
- }
- rds_len -= FM_RDS_BLK_SIZE;
- rds_data += FM_RDS_BLK_SIZE;
- }
-
- /* Copy raw rds data to internal rds buffer */
- rds_data = skb->data;
- rds_len = skb->len;
-
- spin_lock_irqsave(&fmdev->rds_buff_lock, flags);
- while (rds_len > 0) {
- /*
- * Fill RDS buffer as per V4L2 specification.
- * Store control byte
- */
- type = (rds_data[2] & 0x07);
- blk_idx = (type <= FM_RDS_BLOCK_C ? type : (type - 1));
- tmpbuf[2] = blk_idx; /* Offset name */
- tmpbuf[2] |= blk_idx << 3; /* Received offset */
-
- /* Store data byte */
- tmpbuf[0] = rds_data[0];
- tmpbuf[1] = rds_data[1];
-
- memcpy(&rds->buff[rds->wr_idx], &tmpbuf, FM_RDS_BLK_SIZE);
- rds->wr_idx = (rds->wr_idx + FM_RDS_BLK_SIZE) % rds->buf_size;
-
- /* Check for overflow & start over */
- if (rds->wr_idx == rds->rd_idx) {
- fmdbg("RDS buffer overflow\n");
- rds->wr_idx = 0;
- rds->rd_idx = 0;
- break;
- }
- rds_len -= FM_RDS_BLK_SIZE;
- rds_data += FM_RDS_BLK_SIZE;
- }
- spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags);
-
- /* Wakeup read queue */
- if (rds->wr_idx != rds->rd_idx)
- wake_up_interruptible(&rds->read_queue);
-
- fm_irq_call_stage(fmdev, FM_RDS_FINISH_IDX);
-}
-
-static void fm_irq_handle_rds_finish(struct fmdev *fmdev)
-{
- fm_irq_call_stage(fmdev, FM_HW_TUNE_OP_ENDED_IDX);
-}
-
-static void fm_irq_handle_tune_op_ended(struct fmdev *fmdev)
-{
- if (fmdev->irq_info.flag & (FM_FR_EVENT | FM_BL_EVENT) & fmdev->
- irq_info.mask) {
- fmdbg("irq: tune ended/bandlimit reached\n");
- if (test_and_clear_bit(FM_AF_SWITCH_INPROGRESS, &fmdev->flag)) {
- fmdev->irq_info.stage = FM_AF_JUMP_RD_FREQ_IDX;
- } else {
- complete(&fmdev->maintask_comp);
- fmdev->irq_info.stage = FM_HW_POWER_ENB_IDX;
- }
- } else
- fmdev->irq_info.stage = FM_HW_POWER_ENB_IDX;
-
- fm_irq_call(fmdev);
-}
-
-static void fm_irq_handle_power_enb(struct fmdev *fmdev)
-{
- if (fmdev->irq_info.flag & FM_POW_ENB_EVENT) {
- fmdbg("irq: Power Enabled/Disabled\n");
- complete(&fmdev->maintask_comp);
- }
-
- fm_irq_call_stage(fmdev, FM_LOW_RSSI_START_IDX);
-}
-
-static void fm_irq_handle_low_rssi_start(struct fmdev *fmdev)
-{
- if ((fmdev->rx.af_mode == FM_RX_RDS_AF_SWITCH_MODE_ON) &&
- (fmdev->irq_info.flag & FM_LEV_EVENT & fmdev->irq_info.mask) &&
- (fmdev->rx.freq != FM_UNDEFINED_FREQ) &&
- (fmdev->rx.stat_info.afcache_size != 0)) {
- fmdbg("irq: rssi level has fallen below threshold level\n");
-
- /* Disable further low RSSI interrupts */
- fmdev->irq_info.mask &= ~FM_LEV_EVENT;
-
- fmdev->rx.afjump_idx = 0;
- fmdev->rx.freq_before_jump = fmdev->rx.freq;
- fmdev->irq_info.stage = FM_AF_JUMP_SETPI_IDX;
- } else {
- /* Continue next function in interrupt handler table */
- fmdev->irq_info.stage = FM_SEND_INTMSK_CMD_IDX;
- }
-
- fm_irq_call(fmdev);
-}
-
-static void fm_irq_afjump_set_pi(struct fmdev *fmdev)
-{
- u16 payload;
-
- /* Set PI code - must be updated if the AF list is not empty */
- payload = fmdev->rx.stat_info.picode;
- if (!fm_send_cmd(fmdev, RDS_PI_SET, REG_WR, &payload, sizeof(payload), NULL))
- fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SETPI_RESP_IDX);
-}
-
-static void fm_irq_handle_set_pi_resp(struct fmdev *fmdev)
-{
- fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_SETPI_MASK_IDX);
-}
-
-/*
- * Set PI mask.
- * 0xFFFF = Enable PI code matching
- * 0x0000 = Disable PI code matching
- */
-static void fm_irq_afjump_set_pimask(struct fmdev *fmdev)
-{
- u16 payload;
-
- payload = 0x0000;
- if (!fm_send_cmd(fmdev, RDS_PI_MASK_SET, REG_WR, &payload, sizeof(payload), NULL))
- fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SETPI_MASK_RESP_IDX);
-}
-
-static void fm_irq_handle_set_pimask_resp(struct fmdev *fmdev)
-{
- fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_SET_AF_FREQ_IDX);
-}
-
-static void fm_irq_afjump_setfreq(struct fmdev *fmdev)
-{
- u16 frq_index;
- u16 payload;
-
- fmdbg("Switch to %d KHz\n", fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx]);
- frq_index = (fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx] -
- fmdev->rx.region.bot_freq) / FM_FREQ_MUL;
-
- payload = frq_index;
- if (!fm_send_cmd(fmdev, AF_FREQ_SET, REG_WR, &payload, sizeof(payload), NULL))
- fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SET_AFFREQ_RESP_IDX);
-}
-
-static void fm_irq_handle_setfreq_resp(struct fmdev *fmdev)
-{
- fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_ENABLE_INT_IDX);
-}
-
-static void fm_irq_afjump_enableint(struct fmdev *fmdev)
-{
- u16 payload;
-
- /* Enable FR (tuning operation ended) interrupt */
- payload = FM_FR_EVENT;
- if (!fm_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload, sizeof(payload), NULL))
- fm_irq_timeout_stage(fmdev, FM_AF_JUMP_ENABLE_INT_RESP_IDX);
-}
-
-static void fm_irq_afjump_enableint_resp(struct fmdev *fmdev)
-{
- fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_START_AFJUMP_IDX);
-}
-
-static void fm_irq_start_afjump(struct fmdev *fmdev)
-{
- u16 payload;
-
- payload = FM_TUNER_AF_JUMP_MODE;
- if (!fm_send_cmd(fmdev, TUNER_MODE_SET, REG_WR, &payload,
- sizeof(payload), NULL))
- fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_START_AFJUMP_RESP_IDX);
-}
-
-static void fm_irq_handle_start_afjump_resp(struct fmdev *fmdev)
-{
- struct sk_buff *skb;
-
- if (check_cmdresp_status(fmdev, &skb))
- return;
-
- fmdev->irq_info.stage = FM_SEND_FLAG_GETCMD_IDX;
- set_bit(FM_AF_SWITCH_INPROGRESS, &fmdev->flag);
- clear_bit(FM_INTTASK_RUNNING, &fmdev->flag);
-}
-
-static void fm_irq_afjump_rd_freq(struct fmdev *fmdev)
-{
- u16 payload;
-
- if (!fm_send_cmd(fmdev, FREQ_SET, REG_RD, NULL, sizeof(payload), NULL))
- fm_irq_timeout_stage(fmdev, FM_AF_JUMP_RD_FREQ_RESP_IDX);
-}
-
-static void fm_irq_afjump_rd_freq_resp(struct fmdev *fmdev)
-{
- struct sk_buff *skb;
- u16 read_freq;
- u32 curr_freq, jumped_freq;
-
- if (check_cmdresp_status(fmdev, &skb))
- return;
-
- /* Skip header info and copy only response data */
- skb_pull(skb, sizeof(struct fm_event_msg_hdr));
- memcpy(&read_freq, skb->data, sizeof(read_freq));
- read_freq = be16_to_cpu((__force __be16)read_freq);
- curr_freq = fmdev->rx.region.bot_freq + ((u32)read_freq * FM_FREQ_MUL);
-
- jumped_freq = fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx];
-
- /* If the frequency was changed the jump succeeded */
- if ((curr_freq != fmdev->rx.freq_before_jump) && (curr_freq == jumped_freq)) {
- fmdbg("Successfully switched to alternate freq %d\n", curr_freq);
- fmdev->rx.freq = curr_freq;
- fm_rx_reset_rds_cache(fmdev);
-
- /* AF feature is on, enable low level RSSI interrupt */
- if (fmdev->rx.af_mode == FM_RX_RDS_AF_SWITCH_MODE_ON)
- fmdev->irq_info.mask |= FM_LEV_EVENT;
-
- fmdev->irq_info.stage = FM_LOW_RSSI_FINISH_IDX;
- } else { /* jump to the next freq in the AF list */
- fmdev->rx.afjump_idx++;
-
- /* If we reached the end of the list - stop searching */
- if (fmdev->rx.afjump_idx >= fmdev->rx.stat_info.afcache_size) {
- fmdbg("AF switch processing failed\n");
- fmdev->irq_info.stage = FM_LOW_RSSI_FINISH_IDX;
- } else { /* AF List is not over - try next one */
-
- fmdbg("Trying next freq in AF cache\n");
- fmdev->irq_info.stage = FM_AF_JUMP_SETPI_IDX;
- }
- }
- fm_irq_call(fmdev);
-}
-
-static void fm_irq_handle_low_rssi_finish(struct fmdev *fmdev)
-{
- fm_irq_call_stage(fmdev, FM_SEND_INTMSK_CMD_IDX);
-}
-
-static void fm_irq_send_intmsk_cmd(struct fmdev *fmdev)
-{
- u16 payload;
-
- /* Re-enable FM interrupts */
- payload = fmdev->irq_info.mask;
-
- if (!fm_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
- sizeof(payload), NULL))
- fm_irq_timeout_stage(fmdev, FM_HANDLE_INTMSK_CMD_RESP_IDX);
-}
-
-static void fm_irq_handle_intmsk_cmd_resp(struct fmdev *fmdev)
-{
- struct sk_buff *skb;
-
- if (check_cmdresp_status(fmdev, &skb))
- return;
- /*
- * This is last function in interrupt table to be executed.
- * So, reset stage index to 0.
- */
- fmdev->irq_info.stage = FM_SEND_FLAG_GETCMD_IDX;
-
- /* Start processing any pending interrupt */
- if (test_and_clear_bit(FM_INTTASK_SCHEDULE_PENDING, &fmdev->flag))
- fmdev->irq_info.handlers[fmdev->irq_info.stage](fmdev);
- else
- clear_bit(FM_INTTASK_RUNNING, &fmdev->flag);
-}
-
-/* Returns availability of RDS data in internal buffer */
-int fmc_is_rds_data_available(struct fmdev *fmdev, struct file *file,
- struct poll_table_struct *pts)
-{
- poll_wait(file, &fmdev->rx.rds.read_queue, pts);
- if (fmdev->rx.rds.rd_idx != fmdev->rx.rds.wr_idx)
- return 0;
-
- return -EAGAIN;
-}
-
-/* Copies RDS data from internal buffer to user buffer */
-int fmc_transfer_rds_from_internal_buff(struct fmdev *fmdev, struct file *file,
- u8 __user *buf, size_t count)
-{
- u32 block_count;
- u8 tmpbuf[FM_RDS_BLK_SIZE];
- unsigned long flags;
- int ret;
-
- if (fmdev->rx.rds.wr_idx == fmdev->rx.rds.rd_idx) {
- if (file->f_flags & O_NONBLOCK)
- return -EWOULDBLOCK;
-
- ret = wait_event_interruptible(fmdev->rx.rds.read_queue,
- (fmdev->rx.rds.wr_idx != fmdev->rx.rds.rd_idx));
- if (ret)
- return -EINTR;
- }
-
- /* Calculate block count from byte count */
- count /= FM_RDS_BLK_SIZE;
- block_count = 0;
- ret = 0;
-
- while (block_count < count) {
- spin_lock_irqsave(&fmdev->rds_buff_lock, flags);
-
- if (fmdev->rx.rds.wr_idx == fmdev->rx.rds.rd_idx) {
- spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags);
- break;
- }
- memcpy(tmpbuf, &fmdev->rx.rds.buff[fmdev->rx.rds.rd_idx],
- FM_RDS_BLK_SIZE);
- fmdev->rx.rds.rd_idx += FM_RDS_BLK_SIZE;
- if (fmdev->rx.rds.rd_idx >= fmdev->rx.rds.buf_size)
- fmdev->rx.rds.rd_idx = 0;
-
- spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags);
-
- if (copy_to_user(buf, tmpbuf, FM_RDS_BLK_SIZE))
- break;
-
- block_count++;
- buf += FM_RDS_BLK_SIZE;
- ret += FM_RDS_BLK_SIZE;
- }
- return ret;
-}
-
-int fmc_set_freq(struct fmdev *fmdev, u32 freq_to_set)
-{
- switch (fmdev->curr_fmmode) {
- case FM_MODE_RX:
- return fm_rx_set_freq(fmdev, freq_to_set);
-
- case FM_MODE_TX:
- return fm_tx_set_freq(fmdev, freq_to_set);
-
- default:
- return -EINVAL;
- }
-}
-
-int fmc_get_freq(struct fmdev *fmdev, u32 *cur_tuned_frq)
-{
- if (fmdev->rx.freq == FM_UNDEFINED_FREQ) {
- fmerr("RX frequency is not set\n");
- return -EPERM;
- }
- if (cur_tuned_frq == NULL) {
- fmerr("Invalid memory\n");
- return -ENOMEM;
- }
-
- switch (fmdev->curr_fmmode) {
- case FM_MODE_RX:
- *cur_tuned_frq = fmdev->rx.freq;
- return 0;
-
- case FM_MODE_TX:
- *cur_tuned_frq = 0; /* TODO : Change this later */
- return 0;
-
- default:
- return -EINVAL;
- }
-
-}
-
-int fmc_set_region(struct fmdev *fmdev, u8 region_to_set)
-{
- switch (fmdev->curr_fmmode) {
- case FM_MODE_RX:
- return fm_rx_set_region(fmdev, region_to_set);
-
- case FM_MODE_TX:
- return fm_tx_set_region(fmdev, region_to_set);
-
- default:
- return -EINVAL;
- }
-}
-
-int fmc_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset)
-{
- switch (fmdev->curr_fmmode) {
- case FM_MODE_RX:
- return fm_rx_set_mute_mode(fmdev, mute_mode_toset);
-
- case FM_MODE_TX:
- return fm_tx_set_mute_mode(fmdev, mute_mode_toset);
-
- default:
- return -EINVAL;
- }
-}
-
-int fmc_set_stereo_mono(struct fmdev *fmdev, u16 mode)
-{
- switch (fmdev->curr_fmmode) {
- case FM_MODE_RX:
- return fm_rx_set_stereo_mono(fmdev, mode);
-
- case FM_MODE_TX:
- return fm_tx_set_stereo_mono(fmdev, mode);
-
- default:
- return -EINVAL;
- }
-}
-
-int fmc_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis)
-{
- switch (fmdev->curr_fmmode) {
- case FM_MODE_RX:
- return fm_rx_set_rds_mode(fmdev, rds_en_dis);
-
- case FM_MODE_TX:
- return fm_tx_set_rds_mode(fmdev, rds_en_dis);
-
- default:
- return -EINVAL;
- }
-}
-
-/* Sends power off command to the chip */
-static int fm_power_down(struct fmdev *fmdev)
-{
- u16 payload;
- int ret;
-
- if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
- fmerr("FM core is not ready\n");
- return -EPERM;
- }
- if (fmdev->curr_fmmode == FM_MODE_OFF) {
- fmdbg("FM chip is already in OFF state\n");
- return 0;
- }
-
- payload = 0x0;
- ret = fmc_send_cmd(fmdev, FM_POWER_MODE, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- return fmc_release(fmdev);
-}
-
-/* Reads init command from FM firmware file and loads to the chip */
-static int fm_download_firmware(struct fmdev *fmdev, const u8 *fw_name)
-{
- const struct firmware *fw_entry;
- struct bts_header *fw_header;
- struct bts_action *action;
- struct bts_action_delay *delay;
- u8 *fw_data;
- int ret, fw_len;
-
- set_bit(FM_FW_DW_INPROGRESS, &fmdev->flag);
-
- ret = request_firmware(&fw_entry, fw_name,
- &fmdev->radio_dev->dev);
- if (ret < 0) {
- fmerr("Unable to read firmware(%s) content\n", fw_name);
- return ret;
- }
- fmdbg("Firmware(%s) length : %zu bytes\n", fw_name, fw_entry->size);
-
- fw_data = (void *)fw_entry->data;
- fw_len = fw_entry->size;
-
- fw_header = (struct bts_header *)fw_data;
- if (fw_header->magic != FM_FW_FILE_HEADER_MAGIC) {
- fmerr("%s not a legal TI firmware file\n", fw_name);
- ret = -EINVAL;
- goto rel_fw;
- }
- fmdbg("FW(%s) magic number : 0x%x\n", fw_name, fw_header->magic);
-
- /* Skip file header info , we already verified it */
- fw_data += sizeof(struct bts_header);
- fw_len -= sizeof(struct bts_header);
-
- while (fw_data && fw_len > 0) {
- action = (struct bts_action *)fw_data;
-
- switch (action->type) {
- case ACTION_SEND_COMMAND: /* Send */
- ret = fmc_send_cmd(fmdev, 0, 0, action->data,
- action->size, NULL, NULL);
- if (ret)
- goto rel_fw;
-
- break;
-
- case ACTION_DELAY: /* Delay */
- delay = (struct bts_action_delay *)action->data;
- mdelay(delay->msec);
- break;
- }
-
- fw_data += (sizeof(struct bts_action) + (action->size));
- fw_len -= (sizeof(struct bts_action) + (action->size));
- }
- fmdbg("Transferred only %d of %d bytes of the firmware to chip\n",
- fw_entry->size - fw_len, fw_entry->size);
-rel_fw:
- release_firmware(fw_entry);
- clear_bit(FM_FW_DW_INPROGRESS, &fmdev->flag);
-
- return ret;
-}
-
-/* Loads default RX configuration to the chip */
-static int load_default_rx_configuration(struct fmdev *fmdev)
-{
- int ret;
-
- ret = fm_rx_set_volume(fmdev, FM_DEFAULT_RX_VOLUME);
- if (ret < 0)
- return ret;
-
- return fm_rx_set_rssi_threshold(fmdev, FM_DEFAULT_RSSI_THRESHOLD);
-}
-
-/* Does FM power on sequence */
-static int fm_power_up(struct fmdev *fmdev, u8 mode)
-{
- u16 payload;
- __be16 asic_id = 0, asic_ver = 0;
- int resp_len, ret;
- u8 fw_name[50];
-
- if (mode >= FM_MODE_ENTRY_MAX) {
- fmerr("Invalid firmware download option\n");
- return -EINVAL;
- }
-
- /*
- * Initialize FM common module. FM GPIO toggling is
- * taken care in Shared Transport driver.
- */
- ret = fmc_prepare(fmdev);
- if (ret < 0) {
- fmerr("Unable to prepare FM Common\n");
- return ret;
- }
-
- payload = FM_ENABLE;
- if (fmc_send_cmd(fmdev, FM_POWER_MODE, REG_WR, &payload,
- sizeof(payload), NULL, NULL))
- goto rel;
-
- /* Allow the chip to settle down in Channel-8 mode */
- msleep(20);
-
- if (fmc_send_cmd(fmdev, ASIC_ID_GET, REG_RD, NULL,
- sizeof(asic_id), &asic_id, &resp_len))
- goto rel;
-
- if (fmc_send_cmd(fmdev, ASIC_VER_GET, REG_RD, NULL,
- sizeof(asic_ver), &asic_ver, &resp_len))
- goto rel;
-
- fmdbg("ASIC ID: 0x%x , ASIC Version: %d\n",
- be16_to_cpu(asic_id), be16_to_cpu(asic_ver));
-
- sprintf(fw_name, "%s_%x.%d.bts", FM_FMC_FW_FILE_START,
- be16_to_cpu(asic_id), be16_to_cpu(asic_ver));
-
- ret = fm_download_firmware(fmdev, fw_name);
- if (ret < 0) {
- fmdbg("Failed to download firmware file %s\n", fw_name);
- goto rel;
- }
- sprintf(fw_name, "%s_%x.%d.bts", (mode == FM_MODE_RX) ?
- FM_RX_FW_FILE_START : FM_TX_FW_FILE_START,
- be16_to_cpu(asic_id), be16_to_cpu(asic_ver));
-
- ret = fm_download_firmware(fmdev, fw_name);
- if (ret < 0) {
- fmdbg("Failed to download firmware file %s\n", fw_name);
- goto rel;
- } else
- return ret;
-rel:
- return fmc_release(fmdev);
-}
-
-/* Set FM Modes(TX, RX, OFF) */
-int fmc_set_mode(struct fmdev *fmdev, u8 fm_mode)
-{
- int ret = 0;
-
- if (fm_mode >= FM_MODE_ENTRY_MAX) {
- fmerr("Invalid FM mode\n");
- return -EINVAL;
- }
- if (fmdev->curr_fmmode == fm_mode) {
- fmdbg("Already fm is in mode(%d)\n", fm_mode);
- return ret;
- }
-
- switch (fm_mode) {
- case FM_MODE_OFF: /* OFF Mode */
- ret = fm_power_down(fmdev);
- if (ret < 0) {
- fmerr("Failed to set OFF mode\n");
- return ret;
- }
- break;
-
- case FM_MODE_TX: /* TX Mode */
- case FM_MODE_RX: /* RX Mode */
- /* Power down before switching to TX or RX mode */
- if (fmdev->curr_fmmode != FM_MODE_OFF) {
- ret = fm_power_down(fmdev);
- if (ret < 0) {
- fmerr("Failed to set OFF mode\n");
- return ret;
- }
- msleep(30);
- }
- ret = fm_power_up(fmdev, fm_mode);
- if (ret < 0) {
- fmerr("Failed to load firmware\n");
- return ret;
- }
- }
- fmdev->curr_fmmode = fm_mode;
-
- /* Set default configuration */
- if (fmdev->curr_fmmode == FM_MODE_RX) {
- fmdbg("Loading default rx configuration..\n");
- ret = load_default_rx_configuration(fmdev);
- if (ret < 0)
- fmerr("Failed to load default values\n");
- }
-
- return ret;
-}
-
-/* Returns current FM mode (TX, RX, OFF) */
-int fmc_get_mode(struct fmdev *fmdev, u8 *fmmode)
-{
- if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
- fmerr("FM core is not ready\n");
- return -EPERM;
- }
- if (fmmode == NULL) {
- fmerr("Invalid memory\n");
- return -ENOMEM;
- }
-
- *fmmode = fmdev->curr_fmmode;
- return 0;
-}
-
-/* Called by ST layer when FM packet is available */
-static long fm_st_receive(void *arg, struct sk_buff *skb)
-{
- struct fmdev *fmdev;
-
- fmdev = arg;
-
- if (skb == NULL) {
- fmerr("Invalid SKB received from ST\n");
- return -EFAULT;
- }
-
- if (skb->cb[0] != FM_PKT_LOGICAL_CHAN_NUMBER) {
- fmerr("Received SKB (%p) is not FM Channel 8 pkt\n", skb);
- return -EINVAL;
- }
-
- memcpy(skb_push(skb, 1), &skb->cb[0], 1);
- skb_queue_tail(&fmdev->rx_q, skb);
- queue_work(system_bh_wq, &fmdev->rx_bh_work);
-
- return 0;
-}
-
-/*
- * Called by ST layer to indicate protocol registration completion
- * status.
- */
-static void fm_st_reg_comp_cb(void *arg, int data)
-{
- struct fmdev *fmdev;
-
- fmdev = (struct fmdev *)arg;
- fmdev->streg_cbdata = data;
- complete(&wait_for_fmdrv_reg_comp);
-}
-
-/*
- * This function will be called from FM V4L2 open function.
- * Register with ST driver and initialize driver data.
- */
-int fmc_prepare(struct fmdev *fmdev)
-{
- static struct st_proto_s fm_st_proto;
- int ret;
-
- if (test_bit(FM_CORE_READY, &fmdev->flag)) {
- fmdbg("FM Core is already up\n");
- return 0;
- }
-
- memset(&fm_st_proto, 0, sizeof(fm_st_proto));
- fm_st_proto.recv = fm_st_receive;
- fm_st_proto.match_packet = NULL;
- fm_st_proto.reg_complete_cb = fm_st_reg_comp_cb;
- fm_st_proto.write = NULL; /* TI ST driver will fill write pointer */
- fm_st_proto.priv_data = fmdev;
- fm_st_proto.chnl_id = 0x08;
- fm_st_proto.max_frame_size = 0xff;
- fm_st_proto.hdr_len = 1;
- fm_st_proto.offset_len_in_hdr = 0;
- fm_st_proto.len_size = 1;
- fm_st_proto.reserve = 1;
-
- ret = st_register(&fm_st_proto);
- if (ret == -EINPROGRESS) {
- init_completion(&wait_for_fmdrv_reg_comp);
- fmdev->streg_cbdata = -EINPROGRESS;
- fmdbg("%s waiting for ST reg completion signal\n", __func__);
-
- if (!wait_for_completion_timeout(&wait_for_fmdrv_reg_comp,
- FM_ST_REG_TIMEOUT)) {
- fmerr("Timeout(%d sec), didn't get reg completion signal from ST\n",
- jiffies_to_msecs(FM_ST_REG_TIMEOUT) / 1000);
- return -ETIMEDOUT;
- }
- if (fmdev->streg_cbdata != 0) {
- fmerr("ST reg comp CB called with error status %d\n",
- fmdev->streg_cbdata);
- return -EAGAIN;
- }
-
- ret = 0;
- } else if (ret < 0) {
- fmerr("st_register failed %d\n", ret);
- return -EAGAIN;
- }
-
- if (fm_st_proto.write != NULL) {
- g_st_write = fm_st_proto.write;
- } else {
- fmerr("Failed to get ST write func pointer\n");
- ret = st_unregister(&fm_st_proto);
- if (ret < 0)
- fmerr("st_unregister failed %d\n", ret);
- return -EAGAIN;
- }
-
- spin_lock_init(&fmdev->rds_buff_lock);
- spin_lock_init(&fmdev->resp_skb_lock);
-
- /* Initialize TX queue and TX bh work */
- skb_queue_head_init(&fmdev->tx_q);
- INIT_WORK(&fmdev->tx_bh_work, send_bh_work);
-
- /* Initialize RX Queue and RX bh work */
- skb_queue_head_init(&fmdev->rx_q);
- INIT_WORK(&fmdev->rx_bh_work, recv_bh_work);
-
- fmdev->irq_info.stage = 0;
- atomic_set(&fmdev->tx_cnt, 1);
- fmdev->resp_comp = NULL;
-
- timer_setup(&fmdev->irq_info.timer, int_timeout_handler, 0);
- /*TODO: add FM_STIC_EVENT later */
- fmdev->irq_info.mask = FM_MAL_EVENT;
-
- /* Region info */
- fmdev->rx.region = region_configs[default_radio_region];
-
- fmdev->rx.mute_mode = FM_MUTE_OFF;
- fmdev->rx.rf_depend_mute = FM_RX_RF_DEPENDENT_MUTE_OFF;
- fmdev->rx.rds.flag = FM_RDS_DISABLE;
- fmdev->rx.freq = FM_UNDEFINED_FREQ;
- fmdev->rx.rds_mode = FM_RDS_SYSTEM_RDS;
- fmdev->rx.af_mode = FM_RX_RDS_AF_SWITCH_MODE_OFF;
- fmdev->irq_info.retry = 0;
-
- fm_rx_reset_rds_cache(fmdev);
- init_waitqueue_head(&fmdev->rx.rds.read_queue);
-
- fm_rx_reset_station_info(fmdev);
- set_bit(FM_CORE_READY, &fmdev->flag);
-
- return ret;
-}
-
-/*
- * This function will be called from FM V4L2 release function.
- * Unregister from ST driver.
- */
-int fmc_release(struct fmdev *fmdev)
-{
- static struct st_proto_s fm_st_proto;
- int ret;
-
- if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
- fmdbg("FM Core is already down\n");
- return 0;
- }
- /* Service pending read */
- wake_up_interruptible(&fmdev->rx.rds.read_queue);
-
- cancel_work_sync(&fmdev->tx_bh_work);
- cancel_work_sync(&fmdev->rx_bh_work);
-
- skb_queue_purge(&fmdev->tx_q);
- skb_queue_purge(&fmdev->rx_q);
-
- fmdev->resp_comp = NULL;
- fmdev->rx.freq = 0;
-
- memset(&fm_st_proto, 0, sizeof(fm_st_proto));
- fm_st_proto.chnl_id = 0x08;
-
- ret = st_unregister(&fm_st_proto);
-
- if (ret < 0)
- fmerr("Failed to de-register FM from ST %d\n", ret);
- else
- fmdbg("Successfully unregistered from ST\n");
-
- clear_bit(FM_CORE_READY, &fmdev->flag);
- return ret;
-}
-
-/*
- * Module init function. Ask FM V4L module to register video device.
- * Allocate memory for FM driver context and RX RDS buffer.
- */
-static int __init fm_drv_init(void)
-{
- struct fmdev *fmdev = NULL;
- int ret = -ENOMEM;
-
- fmdbg("FM driver version %s\n", FM_DRV_VERSION);
-
- fmdev = kzalloc(sizeof(struct fmdev), GFP_KERNEL);
- if (NULL == fmdev) {
- fmerr("Can't allocate operation structure memory\n");
- return ret;
- }
- fmdev->rx.rds.buf_size = default_rds_buf * FM_RDS_BLK_SIZE;
- fmdev->rx.rds.buff = kzalloc(fmdev->rx.rds.buf_size, GFP_KERNEL);
- if (NULL == fmdev->rx.rds.buff) {
- fmerr("Can't allocate rds ring buffer\n");
- goto rel_dev;
- }
-
- ret = fm_v4l2_init_video_device(fmdev, radio_nr);
- if (ret < 0)
- goto rel_rdsbuf;
-
- fmdev->irq_info.handlers = int_handler_table;
- fmdev->curr_fmmode = FM_MODE_OFF;
- fmdev->tx_data.pwr_lvl = FM_PWR_LVL_DEF;
- fmdev->tx_data.preemph = FM_TX_PREEMPH_50US;
- return ret;
-
-rel_rdsbuf:
- kfree(fmdev->rx.rds.buff);
-rel_dev:
- kfree(fmdev);
-
- return ret;
-}
-
-/* Module exit function. Ask FM V4L module to unregister video device */
-static void __exit fm_drv_exit(void)
-{
- struct fmdev *fmdev = NULL;
-
- fmdev = fm_v4l2_deinit_video_device();
- if (fmdev != NULL) {
- kfree(fmdev->rx.rds.buff);
- kfree(fmdev);
- }
-}
-
-module_init(fm_drv_init);
-module_exit(fm_drv_exit);
-
-/* ------------- Module Info ------------- */
-MODULE_AUTHOR("Manjunatha Halli <manjunatha_halli@ti.com>");
-MODULE_DESCRIPTION("FM Driver for TI's Connectivity chip. " FM_DRV_VERSION);
-MODULE_VERSION(FM_DRV_VERSION);
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/radio/wl128x/fmdrv_common.h b/drivers/media/radio/wl128x/fmdrv_common.h
deleted file mode 100644
index 6a287eadae75..000000000000
--- a/drivers/media/radio/wl128x/fmdrv_common.h
+++ /dev/null
@@ -1,389 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * FM Driver for Connectivity chip of Texas Instruments.
- * FM Common module header file
- *
- * Copyright (C) 2011 Texas Instruments
- */
-
-#ifndef _FMDRV_COMMON_H
-#define _FMDRV_COMMON_H
-
-#define FM_ST_REG_TIMEOUT msecs_to_jiffies(6000) /* 6 sec */
-#define FM_PKT_LOGICAL_CHAN_NUMBER 0x08 /* Logical channel 8 */
-
-#define REG_RD 0x1
-#define REG_WR 0x0
-
-struct fm_reg_table {
- u8 opcode;
- u8 type;
- u8 *name;
-};
-
-#define STEREO_GET 0
-#define RSSI_LVL_GET 1
-#define IF_COUNT_GET 2
-#define FLAG_GET 3
-#define RDS_SYNC_GET 4
-#define RDS_DATA_GET 5
-#define FREQ_SET 10
-#define AF_FREQ_SET 11
-#define MOST_MODE_SET 12
-#define MOST_BLEND_SET 13
-#define DEMPH_MODE_SET 14
-#define SEARCH_LVL_SET 15
-#define BAND_SET 16
-#define MUTE_STATUS_SET 17
-#define RDS_PAUSE_LVL_SET 18
-#define RDS_PAUSE_DUR_SET 19
-#define RDS_MEM_SET 20
-#define RDS_BLK_B_SET 21
-#define RDS_MSK_B_SET 22
-#define RDS_PI_MASK_SET 23
-#define RDS_PI_SET 24
-#define RDS_SYSTEM_SET 25
-#define INT_MASK_SET 26
-#define SEARCH_DIR_SET 27
-#define VOLUME_SET 28
-#define AUDIO_ENABLE_SET 29
-#define PCM_MODE_SET 30
-#define I2S_MODE_CONFIG_SET 31
-#define POWER_SET 32
-#define INTX_CONFIG_SET 33
-#define PULL_EN_SET 34
-#define HILO_SET 35
-#define SWITCH2FREF 36
-#define FREQ_DRIFT_REPORT 37
-
-#define PCE_GET 40
-#define FIRM_VER_GET 41
-#define ASIC_VER_GET 42
-#define ASIC_ID_GET 43
-#define MAN_ID_GET 44
-#define TUNER_MODE_SET 45
-#define STOP_SEARCH 46
-#define RDS_CNTRL_SET 47
-
-#define WRITE_HARDWARE_REG 100
-#define CODE_DOWNLOAD 101
-#define RESET 102
-
-#define FM_POWER_MODE 254
-#define FM_INTERRUPT 255
-
-/* Transmitter API */
-
-#define CHANL_SET 55
-#define CHANL_BW_SET 56
-#define REF_SET 57
-#define POWER_ENB_SET 90
-#define POWER_ATT_SET 58
-#define POWER_LEV_SET 59
-#define AUDIO_DEV_SET 60
-#define PILOT_DEV_SET 61
-#define RDS_DEV_SET 62
-#define TX_BAND_SET 65
-#define PUPD_SET 91
-#define AUDIO_IO_SET 63
-#define PREMPH_SET 64
-#define MONO_SET 66
-#define MUTE 92
-#define MPX_LMT_ENABLE 67
-#define PI_SET 93
-#define ECC_SET 69
-#define PTY 70
-#define AF 71
-#define DISPLAY_MODE 74
-#define RDS_REP_SET 77
-#define RDS_CONFIG_DATA_SET 98
-#define RDS_DATA_SET 99
-#define RDS_DATA_ENB 94
-#define TA_SET 78
-#define TP_SET 79
-#define DI_SET 80
-#define MS_SET 81
-#define PS_SCROLL_SPEED 82
-#define TX_AUDIO_LEVEL_TEST 96
-#define TX_AUDIO_LEVEL_TEST_THRESHOLD 73
-#define TX_AUDIO_INPUT_LEVEL_RANGE_SET 54
-#define RX_ANTENNA_SELECT 87
-#define I2C_DEV_ADDR_SET 86
-#define REF_ERR_CALIB_PARAM_SET 88
-#define REF_ERR_CALIB_PERIODICITY_SET 89
-#define SOC_INT_TRIGGER 52
-#define SOC_AUDIO_PATH_SET 83
-#define SOC_PCMI_OVERRIDE 84
-#define SOC_I2S_OVERRIDE 85
-#define RSSI_BLOCK_SCAN_FREQ_SET 95
-#define RSSI_BLOCK_SCAN_START 97
-#define RSSI_BLOCK_SCAN_DATA_GET 5
-#define READ_FMANT_TUNE_VALUE 104
-
-/* SKB helpers */
-struct fm_skb_cb {
- __u8 fm_op;
- struct completion *completion;
-};
-
-#define fm_cb(skb) ((struct fm_skb_cb *)(skb->cb))
-
-/* FM Channel-8 command message format */
-struct fm_cmd_msg_hdr {
- __u8 hdr; /* Logical Channel-8 */
- __u8 len; /* Number of bytes follows */
- __u8 op; /* FM Opcode */
- __u8 rd_wr; /* Read/Write command */
- __u8 dlen; /* Length of payload */
-} __attribute__ ((packed));
-
-#define FM_CMD_MSG_HDR_SIZE 5 /* sizeof(struct fm_cmd_msg_hdr) */
-
-/* FM Channel-8 event messgage format */
-struct fm_event_msg_hdr {
- __u8 header; /* Logical Channel-8 */
- __u8 len; /* Number of bytes follows */
- __u8 status; /* Event status */
- __u8 num_fm_hci_cmds; /* Number of pkts the host allowed to send */
- __u8 op; /* FM Opcode */
- __u8 rd_wr; /* Read/Write command */
- __u8 dlen; /* Length of payload */
-} __attribute__ ((packed));
-
-#define FM_EVT_MSG_HDR_SIZE 7 /* sizeof(struct fm_event_msg_hdr) */
-
-/* TI's magic number in firmware file */
-#define FM_FW_FILE_HEADER_MAGIC 0x42535442
-
-#define FM_ENABLE 1
-#define FM_DISABLE 0
-
-/* FLAG_GET register bits */
-#define FM_FR_EVENT BIT(0)
-#define FM_BL_EVENT BIT(1)
-#define FM_RDS_EVENT BIT(2)
-#define FM_BBLK_EVENT BIT(3)
-#define FM_LSYNC_EVENT BIT(4)
-#define FM_LEV_EVENT BIT(5)
-#define FM_IFFR_EVENT BIT(6)
-#define FM_PI_EVENT BIT(7)
-#define FM_PD_EVENT BIT(8)
-#define FM_STIC_EVENT BIT(9)
-#define FM_MAL_EVENT BIT(10)
-#define FM_POW_ENB_EVENT BIT(11)
-
-/*
- * Firmware files of FM. ASIC ID and ASIC version will be appened to this,
- * later.
- */
-#define FM_FMC_FW_FILE_START ("fmc_ch8")
-#define FM_RX_FW_FILE_START ("fm_rx_ch8")
-#define FM_TX_FW_FILE_START ("fm_tx_ch8")
-
-#define FM_UNDEFINED_FREQ 0xFFFFFFFF
-
-/* Band types */
-#define FM_BAND_EUROPE_US 0
-#define FM_BAND_JAPAN 1
-
-/* Seek directions */
-#define FM_SEARCH_DIRECTION_DOWN 0
-#define FM_SEARCH_DIRECTION_UP 1
-
-/* Tunner modes */
-#define FM_TUNER_STOP_SEARCH_MODE 0
-#define FM_TUNER_PRESET_MODE 1
-#define FM_TUNER_AUTONOMOUS_SEARCH_MODE 2
-#define FM_TUNER_AF_JUMP_MODE 3
-
-/* Min and Max volume */
-#define FM_RX_VOLUME_MIN 0
-#define FM_RX_VOLUME_MAX 70
-
-/* Volume gain step */
-#define FM_RX_VOLUME_GAIN_STEP 0x370
-
-/* Mute modes */
-#define FM_MUTE_ON 0
-#define FM_MUTE_OFF 1
-#define FM_MUTE_ATTENUATE 2
-
-#define FM_RX_UNMUTE_MODE 0x00
-#define FM_RX_RF_DEP_MODE 0x01
-#define FM_RX_AC_MUTE_MODE 0x02
-#define FM_RX_HARD_MUTE_LEFT_MODE 0x04
-#define FM_RX_HARD_MUTE_RIGHT_MODE 0x08
-#define FM_RX_SOFT_MUTE_FORCE_MODE 0x10
-
-/* RF dependent mute mode */
-#define FM_RX_RF_DEPENDENT_MUTE_ON 1
-#define FM_RX_RF_DEPENDENT_MUTE_OFF 0
-
-/* RSSI threshold min and max */
-#define FM_RX_RSSI_THRESHOLD_MIN -128
-#define FM_RX_RSSI_THRESHOLD_MAX 127
-
-/* Stereo/Mono mode */
-#define FM_STEREO_MODE 0
-#define FM_MONO_MODE 1
-#define FM_STEREO_SOFT_BLEND 1
-
-/* FM RX De-emphasis filter modes */
-#define FM_RX_EMPHASIS_FILTER_50_USEC 0
-#define FM_RX_EMPHASIS_FILTER_75_USEC 1
-
-/* FM RDS modes */
-#define FM_RDS_DISABLE 0
-#define FM_RDS_ENABLE 1
-
-#define FM_NO_PI_CODE 0
-
-/* FM and RX RDS block enable/disable */
-#define FM_RX_PWR_SET_FM_ON_RDS_OFF 0x1
-#define FM_RX_PWR_SET_FM_AND_RDS_BLK_ON 0x3
-#define FM_RX_PWR_SET_FM_AND_RDS_BLK_OFF 0x0
-
-/* RX RDS */
-#define FM_RX_RDS_FLUSH_FIFO 0x1
-#define FM_RX_RDS_FIFO_THRESHOLD 64 /* tuples */
-#define FM_RDS_BLK_SIZE 3 /* 3 bytes */
-
-/* RDS block types */
-#define FM_RDS_BLOCK_A 0
-#define FM_RDS_BLOCK_B 1
-#define FM_RDS_BLOCK_C 2
-#define FM_RDS_BLOCK_Ctag 3
-#define FM_RDS_BLOCK_D 4
-#define FM_RDS_BLOCK_E 5
-
-#define FM_RDS_BLK_IDX_A 0
-#define FM_RDS_BLK_IDX_B 1
-#define FM_RDS_BLK_IDX_C 2
-#define FM_RDS_BLK_IDX_D 3
-#define FM_RDS_BLK_IDX_UNKNOWN 0xF0
-
-#define FM_RDS_STATUS_ERR_MASK 0x18
-
-/*
- * Represents an RDS group type & version.
- * There are 15 groups, each group has 2 versions: A and B.
- */
-#define FM_RDS_GROUP_TYPE_MASK_0A BIT(0)
-#define FM_RDS_GROUP_TYPE_MASK_0B BIT(1)
-#define FM_RDS_GROUP_TYPE_MASK_1A BIT(2)
-#define FM_RDS_GROUP_TYPE_MASK_1B BIT(3)
-#define FM_RDS_GROUP_TYPE_MASK_2A BIT(4)
-#define FM_RDS_GROUP_TYPE_MASK_2B BIT(5)
-#define FM_RDS_GROUP_TYPE_MASK_3A BIT(6)
-#define FM_RDS_GROUP_TYPE_MASK_3B BIT(7)
-#define FM_RDS_GROUP_TYPE_MASK_4A BIT(8)
-#define FM_RDS_GROUP_TYPE_MASK_4B BIT(9)
-#define FM_RDS_GROUP_TYPE_MASK_5A BIT(10)
-#define FM_RDS_GROUP_TYPE_MASK_5B BIT(11)
-#define FM_RDS_GROUP_TYPE_MASK_6A BIT(12)
-#define FM_RDS_GROUP_TYPE_MASK_6B BIT(13)
-#define FM_RDS_GROUP_TYPE_MASK_7A BIT(14)
-#define FM_RDS_GROUP_TYPE_MASK_7B BIT(15)
-#define FM_RDS_GROUP_TYPE_MASK_8A BIT(16)
-#define FM_RDS_GROUP_TYPE_MASK_8B BIT(17)
-#define FM_RDS_GROUP_TYPE_MASK_9A BIT(18)
-#define FM_RDS_GROUP_TYPE_MASK_9B BIT(19)
-#define FM_RDS_GROUP_TYPE_MASK_10A BIT(20)
-#define FM_RDS_GROUP_TYPE_MASK_10B BIT(21)
-#define FM_RDS_GROUP_TYPE_MASK_11A BIT(22)
-#define FM_RDS_GROUP_TYPE_MASK_11B BIT(23)
-#define FM_RDS_GROUP_TYPE_MASK_12A BIT(24)
-#define FM_RDS_GROUP_TYPE_MASK_12B BIT(25)
-#define FM_RDS_GROUP_TYPE_MASK_13A BIT(26)
-#define FM_RDS_GROUP_TYPE_MASK_13B BIT(27)
-#define FM_RDS_GROUP_TYPE_MASK_14A BIT(28)
-#define FM_RDS_GROUP_TYPE_MASK_14B BIT(29)
-#define FM_RDS_GROUP_TYPE_MASK_15A BIT(30)
-#define FM_RDS_GROUP_TYPE_MASK_15B BIT(31)
-
-/* RX Alternate Frequency info */
-#define FM_RDS_MIN_AF 1
-#define FM_RDS_MAX_AF 204
-#define FM_RDS_MAX_AF_JAPAN 140
-#define FM_RDS_1_AF_FOLLOWS 225
-#define FM_RDS_25_AF_FOLLOWS 249
-
-/* RDS system type (RDS/RBDS) */
-#define FM_RDS_SYSTEM_RDS 0
-#define FM_RDS_SYSTEM_RBDS 1
-
-/* AF on/off */
-#define FM_RX_RDS_AF_SWITCH_MODE_ON 1
-#define FM_RX_RDS_AF_SWITCH_MODE_OFF 0
-
-/* Retry count when interrupt process goes wrong */
-#define FM_IRQ_TIMEOUT_RETRY_MAX 5 /* 5 times */
-
-/* Audio IO set values */
-#define FM_RX_AUDIO_ENABLE_I2S 0x01
-#define FM_RX_AUDIO_ENABLE_ANALOG 0x02
-#define FM_RX_AUDIO_ENABLE_I2S_AND_ANALOG 0x03
-#define FM_RX_AUDIO_ENABLE_DISABLE 0x00
-
-/* HI/LO set values */
-#define FM_RX_IFFREQ_TO_HI_SIDE 0x0
-#define FM_RX_IFFREQ_TO_LO_SIDE 0x1
-#define FM_RX_IFFREQ_HILO_AUTOMATIC 0x2
-
-/*
- * Default RX mode configuration. Chip will be configured
- * with this default values after loading RX firmware.
- */
-#define FM_DEFAULT_RX_VOLUME 10
-#define FM_DEFAULT_RSSI_THRESHOLD 3
-
-/* Range for TX power level in units for dB/uV */
-#define FM_PWR_LVL_LOW 91
-#define FM_PWR_LVL_HIGH 122
-
-/* Chip specific default TX power level value */
-#define FM_PWR_LVL_DEF 4
-
-/* FM TX Pre-emphasis filter values */
-#define FM_TX_PREEMPH_OFF 1
-#define FM_TX_PREEMPH_50US 0
-#define FM_TX_PREEMPH_75US 2
-
-/* FM TX antenna impedance values */
-#define FM_TX_ANT_IMP_50 0
-#define FM_TX_ANT_IMP_200 1
-#define FM_TX_ANT_IMP_500 2
-
-/* Functions exported by FM common sub-module */
-int fmc_prepare(struct fmdev *);
-int fmc_release(struct fmdev *);
-
-void fmc_update_region_info(struct fmdev *, u8);
-int fmc_send_cmd(struct fmdev *, u8, u16,
- void *, unsigned int, void *, int *);
-int fmc_is_rds_data_available(struct fmdev *, struct file *,
- struct poll_table_struct *);
-int fmc_transfer_rds_from_internal_buff(struct fmdev *, struct file *,
- u8 __user *, size_t);
-
-int fmc_set_freq(struct fmdev *, u32);
-int fmc_set_mode(struct fmdev *, u8);
-int fmc_set_region(struct fmdev *, u8);
-int fmc_set_mute_mode(struct fmdev *, u8);
-int fmc_set_stereo_mono(struct fmdev *, u16);
-int fmc_set_rds_mode(struct fmdev *, u8);
-
-int fmc_get_freq(struct fmdev *, u32 *);
-int fmc_get_region(struct fmdev *, u8 *);
-int fmc_get_mode(struct fmdev *, u8 *);
-
-/*
- * channel spacing
- */
-#define FM_CHANNEL_SPACING_50KHZ 1
-#define FM_CHANNEL_SPACING_100KHZ 2
-#define FM_CHANNEL_SPACING_200KHZ 4
-#define FM_FREQ_MUL 50
-
-#endif
-
diff --git a/drivers/media/radio/wl128x/fmdrv_rx.c b/drivers/media/radio/wl128x/fmdrv_rx.c
deleted file mode 100644
index 419cf2e03bcf..000000000000
--- a/drivers/media/radio/wl128x/fmdrv_rx.c
+++ /dev/null
@@ -1,820 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * FM Driver for Connectivity chip of Texas Instruments.
- * This sub-module of FM driver implements FM RX functionality.
- *
- * Copyright (C) 2011 Texas Instruments
- * Author: Raja Mani <raja_mani@ti.com>
- * Author: Manjunatha Halli <manjunatha_halli@ti.com>
- */
-
-#include "fmdrv.h"
-#include "fmdrv_common.h"
-#include "fmdrv_rx.h"
-
-void fm_rx_reset_rds_cache(struct fmdev *fmdev)
-{
- fmdev->rx.rds.flag = FM_RDS_DISABLE;
- fmdev->rx.rds.last_blk_idx = 0;
- fmdev->rx.rds.wr_idx = 0;
- fmdev->rx.rds.rd_idx = 0;
-
- if (fmdev->rx.af_mode == FM_RX_RDS_AF_SWITCH_MODE_ON)
- fmdev->irq_info.mask |= FM_LEV_EVENT;
-}
-
-void fm_rx_reset_station_info(struct fmdev *fmdev)
-{
- fmdev->rx.stat_info.picode = FM_NO_PI_CODE;
- fmdev->rx.stat_info.afcache_size = 0;
- fmdev->rx.stat_info.af_list_max = 0;
-}
-
-int fm_rx_set_freq(struct fmdev *fmdev, u32 freq)
-{
- unsigned long timeleft;
- u16 payload, curr_frq, intr_flag;
- u32 curr_frq_in_khz;
- u32 resp_len;
- int ret;
-
- if (freq < fmdev->rx.region.bot_freq || freq > fmdev->rx.region.top_freq) {
- fmerr("Invalid frequency %d\n", freq);
- return -EINVAL;
- }
-
- /* Set audio enable */
- payload = FM_RX_AUDIO_ENABLE_I2S_AND_ANALOG;
-
- ret = fmc_send_cmd(fmdev, AUDIO_ENABLE_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Set hilo to automatic selection */
- payload = FM_RX_IFFREQ_HILO_AUTOMATIC;
- ret = fmc_send_cmd(fmdev, HILO_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Calculate frequency index and set*/
- payload = (freq - fmdev->rx.region.bot_freq) / FM_FREQ_MUL;
-
- ret = fmc_send_cmd(fmdev, FREQ_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Read flags - just to clear any pending interrupts if we had */
- ret = fmc_send_cmd(fmdev, FLAG_GET, REG_RD, NULL, 2, NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Enable FR, BL interrupts */
- intr_flag = fmdev->irq_info.mask;
- fmdev->irq_info.mask = (FM_FR_EVENT | FM_BL_EVENT);
- payload = fmdev->irq_info.mask;
- ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Start tune */
- payload = FM_TUNER_PRESET_MODE;
- ret = fmc_send_cmd(fmdev, TUNER_MODE_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- goto exit;
-
- /* Wait for tune ended interrupt */
- init_completion(&fmdev->maintask_comp);
- timeleft = wait_for_completion_timeout(&fmdev->maintask_comp,
- FM_DRV_TX_TIMEOUT);
- if (!timeleft) {
- fmerr("Timeout(%d sec),didn't get tune ended int\n",
- jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000);
- ret = -ETIMEDOUT;
- goto exit;
- }
-
- /* Read freq back to confirm */
- ret = fmc_send_cmd(fmdev, FREQ_SET, REG_RD, NULL, 2, &curr_frq, &resp_len);
- if (ret < 0)
- goto exit;
-
- curr_frq = be16_to_cpu((__force __be16)curr_frq);
- curr_frq_in_khz = (fmdev->rx.region.bot_freq + ((u32)curr_frq * FM_FREQ_MUL));
-
- if (curr_frq_in_khz != freq) {
- pr_info("Frequency is set to (%d) but requested freq is (%d)\n",
- curr_frq_in_khz, freq);
- }
-
- /* Update local cache */
- fmdev->rx.freq = curr_frq_in_khz;
-exit:
- /* Re-enable default FM interrupts */
- fmdev->irq_info.mask = intr_flag;
- payload = fmdev->irq_info.mask;
- ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Reset RDS cache and current station pointers */
- fm_rx_reset_rds_cache(fmdev);
- fm_rx_reset_station_info(fmdev);
-
- return ret;
-}
-
-static int fm_rx_set_channel_spacing(struct fmdev *fmdev, u32 spacing)
-{
- u16 payload;
- int ret;
-
- if (spacing > 0 && spacing <= 50000)
- spacing = FM_CHANNEL_SPACING_50KHZ;
- else if (spacing > 50000 && spacing <= 100000)
- spacing = FM_CHANNEL_SPACING_100KHZ;
- else
- spacing = FM_CHANNEL_SPACING_200KHZ;
-
- /* set channel spacing */
- payload = spacing;
- ret = fmc_send_cmd(fmdev, CHANL_BW_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- fmdev->rx.region.chanl_space = spacing * FM_FREQ_MUL;
-
- return ret;
-}
-
-int fm_rx_seek(struct fmdev *fmdev, u32 seek_upward,
- u32 wrap_around, u32 spacing)
-{
- u32 resp_len;
- u16 curr_frq, next_frq, last_frq;
- u16 payload, int_reason, intr_flag;
- u16 offset, space_idx;
- unsigned long timeleft;
- int ret;
-
- /* Set channel spacing */
- ret = fm_rx_set_channel_spacing(fmdev, spacing);
- if (ret < 0) {
- fmerr("Failed to set channel spacing\n");
- return ret;
- }
-
- /* Read the current frequency from chip */
- ret = fmc_send_cmd(fmdev, FREQ_SET, REG_RD, NULL,
- sizeof(curr_frq), &curr_frq, &resp_len);
- if (ret < 0)
- return ret;
-
- curr_frq = be16_to_cpu((__force __be16)curr_frq);
- last_frq = (fmdev->rx.region.top_freq - fmdev->rx.region.bot_freq) / FM_FREQ_MUL;
-
- /* Check the offset in order to be aligned to the channel spacing*/
- space_idx = fmdev->rx.region.chanl_space / FM_FREQ_MUL;
- offset = curr_frq % space_idx;
-
- next_frq = seek_upward ? curr_frq + space_idx /* Seek Up */ :
- curr_frq - space_idx /* Seek Down */ ;
-
- /*
- * Add or subtract offset in order to stay aligned to the channel
- * spacing.
- */
- if ((short)next_frq < 0)
- next_frq = last_frq - offset;
- else if (next_frq > last_frq)
- next_frq = 0 + offset;
-
-again:
- /* Set calculated next frequency to perform seek */
- payload = next_frq;
- ret = fmc_send_cmd(fmdev, FREQ_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Set search direction (0:Seek Down, 1:Seek Up) */
- payload = (seek_upward ? FM_SEARCH_DIRECTION_UP : FM_SEARCH_DIRECTION_DOWN);
- ret = fmc_send_cmd(fmdev, SEARCH_DIR_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Read flags - just to clear any pending interrupts if we had */
- ret = fmc_send_cmd(fmdev, FLAG_GET, REG_RD, NULL, 2, NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Enable FR, BL interrupts */
- intr_flag = fmdev->irq_info.mask;
- fmdev->irq_info.mask = (FM_FR_EVENT | FM_BL_EVENT);
- payload = fmdev->irq_info.mask;
- ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Start seek */
- payload = FM_TUNER_AUTONOMOUS_SEARCH_MODE;
- ret = fmc_send_cmd(fmdev, TUNER_MODE_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Wait for tune ended/band limit reached interrupt */
- init_completion(&fmdev->maintask_comp);
- timeleft = wait_for_completion_timeout(&fmdev->maintask_comp,
- FM_DRV_RX_SEEK_TIMEOUT);
- if (!timeleft) {
- fmerr("Timeout(%d sec),didn't get tune ended int\n",
- jiffies_to_msecs(FM_DRV_RX_SEEK_TIMEOUT) / 1000);
- return -ENODATA;
- }
-
- int_reason = fmdev->irq_info.flag & (FM_TUNE_COMPLETE | FM_BAND_LIMIT);
-
- /* Re-enable default FM interrupts */
- fmdev->irq_info.mask = intr_flag;
- payload = fmdev->irq_info.mask;
- ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- if (int_reason & FM_BL_EVENT) {
- if (wrap_around == 0) {
- fmdev->rx.freq = seek_upward ?
- fmdev->rx.region.top_freq :
- fmdev->rx.region.bot_freq;
- } else {
- fmdev->rx.freq = seek_upward ?
- fmdev->rx.region.bot_freq :
- fmdev->rx.region.top_freq;
- /* Calculate frequency index to write */
- next_frq = (fmdev->rx.freq -
- fmdev->rx.region.bot_freq) / FM_FREQ_MUL;
- goto again;
- }
- } else {
- /* Read freq to know where operation tune operation stopped */
- ret = fmc_send_cmd(fmdev, FREQ_SET, REG_RD, NULL, 2,
- &curr_frq, &resp_len);
- if (ret < 0)
- return ret;
-
- curr_frq = be16_to_cpu((__force __be16)curr_frq);
- fmdev->rx.freq = (fmdev->rx.region.bot_freq +
- ((u32)curr_frq * FM_FREQ_MUL));
-
- }
- /* Reset RDS cache and current station pointers */
- fm_rx_reset_rds_cache(fmdev);
- fm_rx_reset_station_info(fmdev);
-
- return ret;
-}
-
-int fm_rx_set_volume(struct fmdev *fmdev, u16 vol_to_set)
-{
- u16 payload;
- int ret;
-
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- if (vol_to_set > FM_RX_VOLUME_MAX) {
- fmerr("Volume is not within(%d-%d) range\n",
- FM_RX_VOLUME_MIN, FM_RX_VOLUME_MAX);
- return -EINVAL;
- }
- vol_to_set *= FM_RX_VOLUME_GAIN_STEP;
-
- payload = vol_to_set;
- ret = fmc_send_cmd(fmdev, VOLUME_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- fmdev->rx.volume = vol_to_set;
- return ret;
-}
-
-/* Get volume */
-int fm_rx_get_volume(struct fmdev *fmdev, u16 *curr_vol)
-{
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- if (curr_vol == NULL) {
- fmerr("Invalid memory\n");
- return -ENOMEM;
- }
-
- *curr_vol = fmdev->rx.volume / FM_RX_VOLUME_GAIN_STEP;
-
- return 0;
-}
-
-/* To get current band's bottom and top frequency */
-int fm_rx_get_band_freq_range(struct fmdev *fmdev, u32 *bot_freq, u32 *top_freq)
-{
- if (bot_freq != NULL)
- *bot_freq = fmdev->rx.region.bot_freq;
-
- if (top_freq != NULL)
- *top_freq = fmdev->rx.region.top_freq;
-
- return 0;
-}
-
-/* Returns current band index (0-Europe/US; 1-Japan) */
-void fm_rx_get_region(struct fmdev *fmdev, u8 *region)
-{
- *region = fmdev->rx.region.fm_band;
-}
-
-/* Sets band (0-Europe/US; 1-Japan) */
-int fm_rx_set_region(struct fmdev *fmdev, u8 region_to_set)
-{
- u16 payload;
- u32 new_frq = 0;
- int ret;
-
- if (region_to_set != FM_BAND_EUROPE_US &&
- region_to_set != FM_BAND_JAPAN) {
- fmerr("Invalid band\n");
- return -EINVAL;
- }
-
- if (fmdev->rx.region.fm_band == region_to_set) {
- fmerr("Requested band is already configured\n");
- return 0;
- }
-
- /* Send cmd to set the band */
- payload = (u16)region_to_set;
- ret = fmc_send_cmd(fmdev, BAND_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- fmc_update_region_info(fmdev, region_to_set);
-
- /* Check whether current RX frequency is within band boundary */
- if (fmdev->rx.freq < fmdev->rx.region.bot_freq)
- new_frq = fmdev->rx.region.bot_freq;
- else if (fmdev->rx.freq > fmdev->rx.region.top_freq)
- new_frq = fmdev->rx.region.top_freq;
-
- if (new_frq) {
- fmdbg("Current freq is not within band limit boundary,switching to %d KHz\n",
- new_frq);
- /* Current RX frequency is not in range. So, update it */
- ret = fm_rx_set_freq(fmdev, new_frq);
- }
-
- return ret;
-}
-
-/* Reads current mute mode (Mute Off/On/Attenuate)*/
-int fm_rx_get_mute_mode(struct fmdev *fmdev, u8 *curr_mute_mode)
-{
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- if (curr_mute_mode == NULL) {
- fmerr("Invalid memory\n");
- return -ENOMEM;
- }
-
- *curr_mute_mode = fmdev->rx.mute_mode;
-
- return 0;
-}
-
-static int fm_config_rx_mute_reg(struct fmdev *fmdev)
-{
- u16 payload, muteval;
- int ret;
-
- muteval = 0;
- switch (fmdev->rx.mute_mode) {
- case FM_MUTE_ON:
- muteval = FM_RX_AC_MUTE_MODE;
- break;
-
- case FM_MUTE_OFF:
- muteval = FM_RX_UNMUTE_MODE;
- break;
-
- case FM_MUTE_ATTENUATE:
- muteval = FM_RX_SOFT_MUTE_FORCE_MODE;
- break;
- }
- if (fmdev->rx.rf_depend_mute == FM_RX_RF_DEPENDENT_MUTE_ON)
- muteval |= FM_RX_RF_DEP_MODE;
- else
- muteval &= ~FM_RX_RF_DEP_MODE;
-
- payload = muteval;
- ret = fmc_send_cmd(fmdev, MUTE_STATUS_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-/* Configures mute mode (Mute Off/On/Attenuate) */
-int fm_rx_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset)
-{
- u8 org_state;
- int ret;
-
- if (fmdev->rx.mute_mode == mute_mode_toset)
- return 0;
-
- org_state = fmdev->rx.mute_mode;
- fmdev->rx.mute_mode = mute_mode_toset;
-
- ret = fm_config_rx_mute_reg(fmdev);
- if (ret < 0) {
- fmdev->rx.mute_mode = org_state;
- return ret;
- }
-
- return 0;
-}
-
-/* Gets RF dependent soft mute mode enable/disable status */
-int fm_rx_get_rfdepend_softmute(struct fmdev *fmdev, u8 *curr_mute_mode)
-{
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- if (curr_mute_mode == NULL) {
- fmerr("Invalid memory\n");
- return -ENOMEM;
- }
-
- *curr_mute_mode = fmdev->rx.rf_depend_mute;
-
- return 0;
-}
-
-/* Sets RF dependent soft mute mode */
-int fm_rx_set_rfdepend_softmute(struct fmdev *fmdev, u8 rfdepend_mute)
-{
- u8 org_state;
- int ret;
-
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- if (rfdepend_mute != FM_RX_RF_DEPENDENT_MUTE_ON &&
- rfdepend_mute != FM_RX_RF_DEPENDENT_MUTE_OFF) {
- fmerr("Invalid RF dependent soft mute\n");
- return -EINVAL;
- }
- if (fmdev->rx.rf_depend_mute == rfdepend_mute)
- return 0;
-
- org_state = fmdev->rx.rf_depend_mute;
- fmdev->rx.rf_depend_mute = rfdepend_mute;
-
- ret = fm_config_rx_mute_reg(fmdev);
- if (ret < 0) {
- fmdev->rx.rf_depend_mute = org_state;
- return ret;
- }
-
- return 0;
-}
-
-/* Returns the signal strength level of current channel */
-int fm_rx_get_rssi_level(struct fmdev *fmdev, u16 *rssilvl)
-{
- __be16 curr_rssi_lel;
- u32 resp_len;
- int ret;
-
- if (rssilvl == NULL) {
- fmerr("Invalid memory\n");
- return -ENOMEM;
- }
- /* Read current RSSI level */
- ret = fmc_send_cmd(fmdev, RSSI_LVL_GET, REG_RD, NULL, 2,
- &curr_rssi_lel, &resp_len);
- if (ret < 0)
- return ret;
-
- *rssilvl = be16_to_cpu(curr_rssi_lel);
-
- return 0;
-}
-
-/*
- * Sets the signal strength level that once reached
- * will stop the auto search process
- */
-int fm_rx_set_rssi_threshold(struct fmdev *fmdev, short rssi_lvl_toset)
-{
- u16 payload;
- int ret;
-
- if (rssi_lvl_toset < FM_RX_RSSI_THRESHOLD_MIN ||
- rssi_lvl_toset > FM_RX_RSSI_THRESHOLD_MAX) {
- fmerr("Invalid RSSI threshold level\n");
- return -EINVAL;
- }
- payload = (u16)rssi_lvl_toset;
- ret = fmc_send_cmd(fmdev, SEARCH_LVL_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- fmdev->rx.rssi_threshold = rssi_lvl_toset;
-
- return 0;
-}
-
-/* Returns current RX RSSI threshold value */
-int fm_rx_get_rssi_threshold(struct fmdev *fmdev, short *curr_rssi_lvl)
-{
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- if (curr_rssi_lvl == NULL) {
- fmerr("Invalid memory\n");
- return -ENOMEM;
- }
-
- *curr_rssi_lvl = fmdev->rx.rssi_threshold;
-
- return 0;
-}
-
-/* Sets RX stereo/mono modes */
-int fm_rx_set_stereo_mono(struct fmdev *fmdev, u16 mode)
-{
- u16 payload;
- int ret;
-
- if (mode != FM_STEREO_MODE && mode != FM_MONO_MODE) {
- fmerr("Invalid mode\n");
- return -EINVAL;
- }
-
- /* Set stereo/mono mode */
- payload = (u16)mode;
- ret = fmc_send_cmd(fmdev, MOST_MODE_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Set stereo blending mode */
- payload = FM_STEREO_SOFT_BLEND;
- ret = fmc_send_cmd(fmdev, MOST_BLEND_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-/* Gets current RX stereo/mono mode */
-int fm_rx_get_stereo_mono(struct fmdev *fmdev, u16 *mode)
-{
- __be16 curr_mode;
- u32 resp_len;
- int ret;
-
- if (mode == NULL) {
- fmerr("Invalid memory\n");
- return -ENOMEM;
- }
-
- ret = fmc_send_cmd(fmdev, MOST_MODE_SET, REG_RD, NULL, 2,
- &curr_mode, &resp_len);
- if (ret < 0)
- return ret;
-
- *mode = be16_to_cpu(curr_mode);
-
- return 0;
-}
-
-/* Choose RX de-emphasis filter mode (50us/75us) */
-int fm_rx_set_deemphasis_mode(struct fmdev *fmdev, u16 mode)
-{
- u16 payload;
- int ret;
-
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- if (mode != FM_RX_EMPHASIS_FILTER_50_USEC &&
- mode != FM_RX_EMPHASIS_FILTER_75_USEC) {
- fmerr("Invalid rx de-emphasis mode (%d)\n", mode);
- return -EINVAL;
- }
-
- payload = mode;
- ret = fmc_send_cmd(fmdev, DEMPH_MODE_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- fmdev->rx.deemphasis_mode = mode;
-
- return 0;
-}
-
-/* Gets current RX de-emphasis filter mode */
-int fm_rx_get_deemph_mode(struct fmdev *fmdev, u16 *curr_deemphasis_mode)
-{
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- if (curr_deemphasis_mode == NULL) {
- fmerr("Invalid memory\n");
- return -ENOMEM;
- }
-
- *curr_deemphasis_mode = fmdev->rx.deemphasis_mode;
-
- return 0;
-}
-
-/* Enable/Disable RX RDS */
-int fm_rx_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis)
-{
- u16 payload;
- int ret;
-
- if (rds_en_dis != FM_RDS_ENABLE && rds_en_dis != FM_RDS_DISABLE) {
- fmerr("Invalid rds option\n");
- return -EINVAL;
- }
-
- if (rds_en_dis == FM_RDS_ENABLE
- && fmdev->rx.rds.flag == FM_RDS_DISABLE) {
- /* Turn on RX RDS and RDS circuit */
- payload = FM_RX_PWR_SET_FM_AND_RDS_BLK_ON;
- ret = fmc_send_cmd(fmdev, POWER_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Clear and reset RDS FIFO */
- payload = FM_RX_RDS_FLUSH_FIFO;
- ret = fmc_send_cmd(fmdev, RDS_CNTRL_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Read flags - just to clear any pending interrupts. */
- ret = fmc_send_cmd(fmdev, FLAG_GET, REG_RD, NULL, 2,
- NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Set RDS FIFO threshold value */
- payload = FM_RX_RDS_FIFO_THRESHOLD;
- ret = fmc_send_cmd(fmdev, RDS_MEM_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Enable RDS interrupt */
- fmdev->irq_info.mask |= FM_RDS_EVENT;
- payload = fmdev->irq_info.mask;
- ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0) {
- fmdev->irq_info.mask &= ~FM_RDS_EVENT;
- return ret;
- }
-
- /* Update our local flag */
- fmdev->rx.rds.flag = FM_RDS_ENABLE;
- } else if (rds_en_dis == FM_RDS_DISABLE
- && fmdev->rx.rds.flag == FM_RDS_ENABLE) {
- /* Turn off RX RDS */
- payload = FM_RX_PWR_SET_FM_ON_RDS_OFF;
- ret = fmc_send_cmd(fmdev, POWER_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Reset RDS pointers */
- fmdev->rx.rds.last_blk_idx = 0;
- fmdev->rx.rds.wr_idx = 0;
- fmdev->rx.rds.rd_idx = 0;
- fm_rx_reset_station_info(fmdev);
-
- /* Update RDS local cache */
- fmdev->irq_info.mask &= ~(FM_RDS_EVENT);
- fmdev->rx.rds.flag = FM_RDS_DISABLE;
- }
-
- return 0;
-}
-
-/* Returns current RX RDS enable/disable status */
-int fm_rx_get_rds_mode(struct fmdev *fmdev, u8 *curr_rds_en_dis)
-{
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- if (curr_rds_en_dis == NULL) {
- fmerr("Invalid memory\n");
- return -ENOMEM;
- }
-
- *curr_rds_en_dis = fmdev->rx.rds.flag;
-
- return 0;
-}
-
-/* Sets RDS operation mode (RDS/RDBS) */
-int fm_rx_set_rds_system(struct fmdev *fmdev, u8 rds_mode)
-{
- u16 payload;
- int ret;
-
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- if (rds_mode != FM_RDS_SYSTEM_RDS && rds_mode != FM_RDS_SYSTEM_RBDS) {
- fmerr("Invalid rds mode\n");
- return -EINVAL;
- }
- /* Set RDS operation mode */
- payload = (u16)rds_mode;
- ret = fmc_send_cmd(fmdev, RDS_SYSTEM_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- fmdev->rx.rds_mode = rds_mode;
-
- return 0;
-}
-
-/* Configures Alternate Frequency switch mode */
-int fm_rx_set_af_switch(struct fmdev *fmdev, u8 af_mode)
-{
- u16 payload;
- int ret;
-
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- if (af_mode != FM_RX_RDS_AF_SWITCH_MODE_ON &&
- af_mode != FM_RX_RDS_AF_SWITCH_MODE_OFF) {
- fmerr("Invalid af mode\n");
- return -EINVAL;
- }
- /* Enable/disable low RSSI interrupt based on af_mode */
- if (af_mode == FM_RX_RDS_AF_SWITCH_MODE_ON)
- fmdev->irq_info.mask |= FM_LEV_EVENT;
- else
- fmdev->irq_info.mask &= ~FM_LEV_EVENT;
-
- payload = fmdev->irq_info.mask;
- ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- fmdev->rx.af_mode = af_mode;
-
- return 0;
-}
-
-/* Returns Alternate Frequency switch status */
-int fm_rx_get_af_switch(struct fmdev *fmdev, u8 *af_mode)
-{
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- if (af_mode == NULL) {
- fmerr("Invalid memory\n");
- return -ENOMEM;
- }
-
- *af_mode = fmdev->rx.af_mode;
-
- return 0;
-}
diff --git a/drivers/media/radio/wl128x/fmdrv_rx.h b/drivers/media/radio/wl128x/fmdrv_rx.h
deleted file mode 100644
index 2748e99662c3..000000000000
--- a/drivers/media/radio/wl128x/fmdrv_rx.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * FM Driver for Connectivity chip of Texas Instruments.
- * FM RX module header.
- *
- * Copyright (C) 2011 Texas Instruments
- */
-
-#ifndef _FMDRV_RX_H
-#define _FMDRV_RX_H
-
-int fm_rx_set_freq(struct fmdev *, u32);
-int fm_rx_set_mute_mode(struct fmdev *, u8);
-int fm_rx_set_stereo_mono(struct fmdev *, u16);
-int fm_rx_set_rds_mode(struct fmdev *, u8);
-int fm_rx_set_rds_system(struct fmdev *, u8);
-int fm_rx_set_volume(struct fmdev *, u16);
-int fm_rx_set_rssi_threshold(struct fmdev *, short);
-int fm_rx_set_region(struct fmdev *, u8);
-int fm_rx_set_rfdepend_softmute(struct fmdev *, u8);
-int fm_rx_set_deemphasis_mode(struct fmdev *, u16);
-int fm_rx_set_af_switch(struct fmdev *, u8);
-
-void fm_rx_reset_rds_cache(struct fmdev *);
-void fm_rx_reset_station_info(struct fmdev *);
-
-int fm_rx_seek(struct fmdev *, u32, u32, u32);
-
-int fm_rx_get_rds_mode(struct fmdev *, u8 *);
-int fm_rx_get_mute_mode(struct fmdev *, u8 *);
-int fm_rx_get_volume(struct fmdev *, u16 *);
-int fm_rx_get_band_freq_range(struct fmdev *,
- u32 *, u32 *);
-int fm_rx_get_stereo_mono(struct fmdev *, u16 *);
-int fm_rx_get_rssi_level(struct fmdev *, u16 *);
-int fm_rx_get_rssi_threshold(struct fmdev *, short *);
-int fm_rx_get_rfdepend_softmute(struct fmdev *, u8 *);
-int fm_rx_get_deemph_mode(struct fmdev *, u16 *);
-int fm_rx_get_af_switch(struct fmdev *, u8 *);
-void fm_rx_get_region(struct fmdev *, u8 *);
-
-int fm_rx_set_chanl_spacing(struct fmdev *, u8);
-int fm_rx_get_chanl_spacing(struct fmdev *, u8 *);
-#endif
-
diff --git a/drivers/media/radio/wl128x/fmdrv_tx.c b/drivers/media/radio/wl128x/fmdrv_tx.c
deleted file mode 100644
index c589de02f4f5..000000000000
--- a/drivers/media/radio/wl128x/fmdrv_tx.c
+++ /dev/null
@@ -1,413 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * FM Driver for Connectivity chip of Texas Instruments.
- * This sub-module of FM driver implements FM TX functionality.
- *
- * Copyright (C) 2011 Texas Instruments
- */
-
-#include <linux/delay.h>
-#include "fmdrv.h"
-#include "fmdrv_common.h"
-#include "fmdrv_tx.h"
-
-int fm_tx_set_stereo_mono(struct fmdev *fmdev, u16 mode)
-{
- u16 payload;
- int ret;
-
- if (fmdev->tx_data.aud_mode == mode)
- return 0;
-
- fmdbg("stereo mode: %d\n", mode);
-
- /* Set Stereo/Mono mode */
- payload = (1 - mode);
- ret = fmc_send_cmd(fmdev, MONO_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- fmdev->tx_data.aud_mode = mode;
-
- return ret;
-}
-
-static int set_rds_text(struct fmdev *fmdev, u8 *rds_text)
-{
- u16 payload;
- int ret;
-
- ret = fmc_send_cmd(fmdev, RDS_DATA_SET, REG_WR, rds_text,
- strlen(rds_text), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Scroll mode */
- payload = (u16)0x1;
- ret = fmc_send_cmd(fmdev, DISPLAY_MODE, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int set_rds_data_mode(struct fmdev *fmdev, u8 mode)
-{
- u16 payload;
- int ret;
-
- /* Setting unique PI TODO: how unique? */
- payload = (u16)0xcafe;
- ret = fmc_send_cmd(fmdev, PI_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Set decoder id */
- payload = (u16)0xa;
- ret = fmc_send_cmd(fmdev, DI_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* TODO: RDS_MODE_GET? */
- return 0;
-}
-
-static int set_rds_len(struct fmdev *fmdev, u8 type, u16 len)
-{
- u16 payload;
- int ret;
-
- len |= type << 8;
- payload = len;
- ret = fmc_send_cmd(fmdev, RDS_CONFIG_DATA_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* TODO: LENGTH_GET? */
- return 0;
-}
-
-int fm_tx_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis)
-{
- u16 payload;
- int ret;
- u8 rds_text[] = "Zoom2\n";
-
- fmdbg("rds_en_dis:%d(E:%d, D:%d)\n", rds_en_dis,
- FM_RDS_ENABLE, FM_RDS_DISABLE);
-
- if (rds_en_dis == FM_RDS_ENABLE) {
- /* Set RDS length */
- set_rds_len(fmdev, 0, strlen(rds_text));
-
- /* Set RDS text */
- set_rds_text(fmdev, rds_text);
-
- /* Set RDS mode */
- set_rds_data_mode(fmdev, 0x0);
- }
-
- /* Send command to enable RDS */
- if (rds_en_dis == FM_RDS_ENABLE)
- payload = 0x01;
- else
- payload = 0x00;
-
- ret = fmc_send_cmd(fmdev, RDS_DATA_ENB, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- if (rds_en_dis == FM_RDS_ENABLE) {
- /* Set RDS length */
- set_rds_len(fmdev, 0, strlen(rds_text));
-
- /* Set RDS text */
- set_rds_text(fmdev, rds_text);
- }
- fmdev->tx_data.rds.flag = rds_en_dis;
-
- return 0;
-}
-
-int fm_tx_set_radio_text(struct fmdev *fmdev, u8 *rds_text, u8 rds_type)
-{
- u16 payload;
- int ret;
-
- if (fmdev->curr_fmmode != FM_MODE_TX)
- return -EPERM;
-
- fm_tx_set_rds_mode(fmdev, 0);
-
- /* Set RDS length */
- set_rds_len(fmdev, rds_type, strlen(rds_text));
-
- /* Set RDS text */
- set_rds_text(fmdev, rds_text);
-
- /* Set RDS mode */
- set_rds_data_mode(fmdev, 0x0);
-
- payload = 1;
- ret = fmc_send_cmd(fmdev, RDS_DATA_ENB, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-int fm_tx_set_af(struct fmdev *fmdev, u32 af)
-{
- u16 payload;
- int ret;
-
- if (fmdev->curr_fmmode != FM_MODE_TX)
- return -EPERM;
-
- fmdbg("AF: %d\n", af);
-
- af = (af - 87500) / 100;
- payload = (u16)af;
- ret = fmc_send_cmd(fmdev, TA_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-int fm_tx_set_region(struct fmdev *fmdev, u8 region)
-{
- u16 payload;
- int ret;
-
- if (region != FM_BAND_EUROPE_US && region != FM_BAND_JAPAN) {
- fmerr("Invalid band\n");
- return -EINVAL;
- }
-
- /* Send command to set the band */
- payload = (u16)region;
- ret = fmc_send_cmd(fmdev, TX_BAND_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-int fm_tx_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset)
-{
- u16 payload;
- int ret;
-
- fmdbg("tx: mute mode %d\n", mute_mode_toset);
-
- payload = mute_mode_toset;
- ret = fmc_send_cmd(fmdev, MUTE, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-/* Set TX Audio I/O */
-static int set_audio_io(struct fmdev *fmdev)
-{
- struct fmtx_data *tx = &fmdev->tx_data;
- u16 payload;
- int ret;
-
- /* Set Audio I/O Enable */
- payload = tx->audio_io;
- ret = fmc_send_cmd(fmdev, AUDIO_IO_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* TODO: is audio set? */
- return 0;
-}
-
-/* Start TX Transmission */
-static int enable_xmit(struct fmdev *fmdev, u8 new_xmit_state)
-{
- struct fmtx_data *tx = &fmdev->tx_data;
- unsigned long timeleft;
- u16 payload;
- int ret;
-
- /* Enable POWER_ENB interrupts */
- payload = FM_POW_ENB_EVENT;
- ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Set Power Enable */
- payload = new_xmit_state;
- ret = fmc_send_cmd(fmdev, POWER_ENB_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* Wait for Power Enabled */
- init_completion(&fmdev->maintask_comp);
- timeleft = wait_for_completion_timeout(&fmdev->maintask_comp,
- FM_DRV_TX_TIMEOUT);
- if (!timeleft) {
- fmerr("Timeout(%d sec),didn't get tune ended interrupt\n",
- jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000);
- return -ETIMEDOUT;
- }
-
- set_bit(FM_CORE_TX_XMITING, &fmdev->flag);
- tx->xmit_state = new_xmit_state;
-
- return 0;
-}
-
-/* Set TX power level */
-int fm_tx_set_pwr_lvl(struct fmdev *fmdev, u8 new_pwr_lvl)
-{
- u16 payload;
- struct fmtx_data *tx = &fmdev->tx_data;
- int ret;
-
- if (fmdev->curr_fmmode != FM_MODE_TX)
- return -EPERM;
- fmdbg("tx: pwr_level_to_set %ld\n", (long int)new_pwr_lvl);
-
- /* If the core isn't ready update global variable */
- if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
- tx->pwr_lvl = new_pwr_lvl;
- return 0;
- }
-
- /* Set power level: Application will specify power level value in
- * units of dB/uV, whereas range and step are specific to FM chip.
- * For TI's WL chips, convert application specified power level value
- * to chip specific value by subtracting 122 from it. Refer to TI FM
- * data sheet for details.
- * */
-
- payload = (FM_PWR_LVL_HIGH - new_pwr_lvl);
- ret = fmc_send_cmd(fmdev, POWER_LEV_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- /* TODO: is the power level set? */
- tx->pwr_lvl = new_pwr_lvl;
-
- return 0;
-}
-
-/*
- * Sets FM TX pre-emphasis filter value (OFF, 50us, or 75us)
- * Convert V4L2 specified filter values to chip specific filter values.
- */
-int fm_tx_set_preemph_filter(struct fmdev *fmdev, u32 preemphasis)
-{
- struct fmtx_data *tx = &fmdev->tx_data;
- u16 payload;
- int ret;
-
- if (fmdev->curr_fmmode != FM_MODE_TX)
- return -EPERM;
-
- switch (preemphasis) {
- case V4L2_PREEMPHASIS_DISABLED:
- payload = FM_TX_PREEMPH_OFF;
- break;
- case V4L2_PREEMPHASIS_50_uS:
- payload = FM_TX_PREEMPH_50US;
- break;
- case V4L2_PREEMPHASIS_75_uS:
- payload = FM_TX_PREEMPH_75US;
- break;
- }
-
- ret = fmc_send_cmd(fmdev, PREMPH_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- tx->preemph = payload;
-
- return ret;
-}
-
-/* Get the TX tuning capacitor value.*/
-int fm_tx_get_tune_cap_val(struct fmdev *fmdev)
-{
- u16 curr_val;
- u32 resp_len;
- int ret;
-
- if (fmdev->curr_fmmode != FM_MODE_TX)
- return -EPERM;
-
- ret = fmc_send_cmd(fmdev, READ_FMANT_TUNE_VALUE, REG_RD,
- NULL, sizeof(curr_val), &curr_val, &resp_len);
- if (ret < 0)
- return ret;
-
- curr_val = be16_to_cpu((__force __be16)curr_val);
-
- return curr_val;
-}
-
-/* Set TX Frequency */
-int fm_tx_set_freq(struct fmdev *fmdev, u32 freq_to_set)
-{
- struct fmtx_data *tx = &fmdev->tx_data;
- u16 payload, chanl_index;
- int ret;
-
- if (test_bit(FM_CORE_TX_XMITING, &fmdev->flag)) {
- enable_xmit(fmdev, 0);
- clear_bit(FM_CORE_TX_XMITING, &fmdev->flag);
- }
-
- /* Enable FR, BL interrupts */
- payload = (FM_FR_EVENT | FM_BL_EVENT);
- ret = fmc_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- tx->tx_frq = (unsigned long)freq_to_set;
- fmdbg("tx: freq_to_set %ld\n", (long int)tx->tx_frq);
-
- chanl_index = freq_to_set / 10;
-
- /* Set current tuner channel */
- payload = chanl_index;
- ret = fmc_send_cmd(fmdev, CHANL_SET, REG_WR, &payload,
- sizeof(payload), NULL, NULL);
- if (ret < 0)
- return ret;
-
- fm_tx_set_pwr_lvl(fmdev, tx->pwr_lvl);
- fm_tx_set_preemph_filter(fmdev, tx->preemph);
-
- tx->audio_io = 0x01; /* I2S */
- set_audio_io(fmdev);
-
- enable_xmit(fmdev, 0x01); /* Enable transmission */
-
- tx->aud_mode = FM_STEREO_MODE;
- tx->rds.flag = FM_RDS_DISABLE;
-
- return 0;
-}
-
diff --git a/drivers/media/radio/wl128x/fmdrv_tx.h b/drivers/media/radio/wl128x/fmdrv_tx.h
deleted file mode 100644
index aebdadf9e99b..000000000000
--- a/drivers/media/radio/wl128x/fmdrv_tx.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * FM Driver for Connectivity chip of Texas Instruments.
- * FM TX module header.
- *
- * Copyright (C) 2011 Texas Instruments
- */
-
-#ifndef _FMDRV_TX_H
-#define _FMDRV_TX_H
-
-int fm_tx_set_freq(struct fmdev *, u32);
-int fm_tx_set_pwr_lvl(struct fmdev *, u8);
-int fm_tx_set_region(struct fmdev *, u8);
-int fm_tx_set_mute_mode(struct fmdev *, u8);
-int fm_tx_set_stereo_mono(struct fmdev *, u16);
-int fm_tx_set_rds_mode(struct fmdev *, u8);
-int fm_tx_set_radio_text(struct fmdev *, u8 *, u8);
-int fm_tx_set_af(struct fmdev *, u32);
-int fm_tx_set_preemph_filter(struct fmdev *, u32);
-int fm_tx_get_tune_cap_val(struct fmdev *);
-
-#endif
-
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
deleted file mode 100644
index 1c146d14dbbd..000000000000
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ /dev/null
@@ -1,604 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * FM Driver for Connectivity chip of Texas Instruments.
- * This file provides interfaces to V4L2 subsystem.
- *
- * This module registers with V4L2 subsystem as Radio
- * data system interface (/dev/radio). During the registration,
- * it will expose two set of function pointers.
- *
- * 1) File operation related API (open, close, read, write, poll...etc).
- * 2) Set of V4L2 IOCTL complaint API.
- *
- * Copyright (C) 2011 Texas Instruments
- * Author: Raja Mani <raja_mani@ti.com>
- * Author: Manjunatha Halli <manjunatha_halli@ti.com>
- */
-
-#include <linux/export.h>
-
-#include "fmdrv.h"
-#include "fmdrv_v4l2.h"
-#include "fmdrv_common.h"
-#include "fmdrv_rx.h"
-#include "fmdrv_tx.h"
-
-static struct video_device gradio_dev;
-static u8 radio_disconnected;
-
-/* -- V4L2 RADIO (/dev/radioX) device file operation interfaces --- */
-
-/* Read RX RDS data */
-static ssize_t fm_v4l2_fops_read(struct file *file, char __user * buf,
- size_t count, loff_t *ppos)
-{
- u8 rds_mode;
- int ret;
- struct fmdev *fmdev;
-
- fmdev = video_drvdata(file);
-
- if (!radio_disconnected) {
- fmerr("FM device is already disconnected\n");
- return -EIO;
- }
-
- if (mutex_lock_interruptible(&fmdev->mutex))
- return -ERESTARTSYS;
-
- /* Turn on RDS mode if it is disabled */
- ret = fm_rx_get_rds_mode(fmdev, &rds_mode);
- if (ret < 0) {
- fmerr("Unable to read current rds mode\n");
- goto read_unlock;
- }
-
- if (rds_mode == FM_RDS_DISABLE) {
- ret = fmc_set_rds_mode(fmdev, FM_RDS_ENABLE);
- if (ret < 0) {
- fmerr("Failed to enable rds mode\n");
- goto read_unlock;
- }
- }
-
- /* Copy RDS data from internal buffer to user buffer */
- ret = fmc_transfer_rds_from_internal_buff(fmdev, file, buf, count);
-read_unlock:
- mutex_unlock(&fmdev->mutex);
- return ret;
-}
-
-/* Write TX RDS data */
-static ssize_t fm_v4l2_fops_write(struct file *file, const char __user * buf,
- size_t count, loff_t *ppos)
-{
- struct tx_rds rds;
- int ret;
- struct fmdev *fmdev;
-
- ret = copy_from_user(&rds, buf, sizeof(rds));
- rds.text[sizeof(rds.text) - 1] = '\0';
- fmdbg("(%d)type: %d, text %s, af %d\n",
- ret, rds.text_type, rds.text, rds.af_freq);
- if (ret)
- return -EFAULT;
-
- fmdev = video_drvdata(file);
- if (mutex_lock_interruptible(&fmdev->mutex))
- return -ERESTARTSYS;
- fm_tx_set_radio_text(fmdev, rds.text, rds.text_type);
- fm_tx_set_af(fmdev, rds.af_freq);
- mutex_unlock(&fmdev->mutex);
-
- return sizeof(rds);
-}
-
-static __poll_t fm_v4l2_fops_poll(struct file *file, struct poll_table_struct *pts)
-{
- int ret;
- struct fmdev *fmdev;
-
- fmdev = video_drvdata(file);
- mutex_lock(&fmdev->mutex);
- ret = fmc_is_rds_data_available(fmdev, file, pts);
- mutex_unlock(&fmdev->mutex);
- if (ret < 0)
- return EPOLLIN | EPOLLRDNORM;
-
- return 0;
-}
-
-/*
- * Handle open request for "/dev/radioX" device.
- * Start with FM RX mode as default.
- */
-static int fm_v4l2_fops_open(struct file *file)
-{
- int ret;
- struct fmdev *fmdev = NULL;
-
- /* Don't allow multiple open */
- if (radio_disconnected) {
- fmerr("FM device is already opened\n");
- return -EBUSY;
- }
-
- fmdev = video_drvdata(file);
-
- if (mutex_lock_interruptible(&fmdev->mutex))
- return -ERESTARTSYS;
- ret = fmc_prepare(fmdev);
- if (ret < 0) {
- fmerr("Unable to prepare FM CORE\n");
- goto open_unlock;
- }
-
- fmdbg("Load FM RX firmware..\n");
-
- ret = fmc_set_mode(fmdev, FM_MODE_RX);
- if (ret < 0) {
- fmerr("Unable to load FM RX firmware\n");
- goto open_unlock;
- }
- radio_disconnected = 1;
-
-open_unlock:
- mutex_unlock(&fmdev->mutex);
- return ret;
-}
-
-static int fm_v4l2_fops_release(struct file *file)
-{
- int ret;
- struct fmdev *fmdev;
-
- fmdev = video_drvdata(file);
- if (!radio_disconnected) {
- fmdbg("FM device is already closed\n");
- return 0;
- }
-
- mutex_lock(&fmdev->mutex);
- ret = fmc_set_mode(fmdev, FM_MODE_OFF);
- if (ret < 0) {
- fmerr("Unable to turn off the chip\n");
- goto release_unlock;
- }
-
- ret = fmc_release(fmdev);
- if (ret < 0) {
- fmerr("FM CORE release failed\n");
- goto release_unlock;
- }
- radio_disconnected = 0;
-
-release_unlock:
- mutex_unlock(&fmdev->mutex);
- return ret;
-}
-
-/* V4L2 RADIO (/dev/radioX) device IOCTL interfaces */
-static int fm_v4l2_vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *capability)
-{
- strscpy(capability->driver, FM_DRV_NAME, sizeof(capability->driver));
- strscpy(capability->card, FM_DRV_CARD_SHORT_NAME,
- sizeof(capability->card));
- sprintf(capability->bus_info, "UART");
- return 0;
-}
-
-static int fm_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct fmdev *fmdev = container_of(ctrl->handler,
- struct fmdev, ctrl_handler);
-
- switch (ctrl->id) {
- case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
- ctrl->val = fm_tx_get_tune_cap_val(fmdev);
- break;
- default:
- fmwarn("%s: Unknown IOCTL: %d\n", __func__, ctrl->id);
- break;
- }
-
- return 0;
-}
-
-static int fm_v4l2_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct fmdev *fmdev = container_of(ctrl->handler,
- struct fmdev, ctrl_handler);
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_VOLUME: /* set volume */
- return fm_rx_set_volume(fmdev, (u16)ctrl->val);
-
- case V4L2_CID_AUDIO_MUTE: /* set mute */
- return fmc_set_mute_mode(fmdev, (u8)ctrl->val);
-
- case V4L2_CID_TUNE_POWER_LEVEL:
- /* set TX power level - ext control */
- return fm_tx_set_pwr_lvl(fmdev, (u8)ctrl->val);
-
- case V4L2_CID_TUNE_PREEMPHASIS:
- return fm_tx_set_preemph_filter(fmdev, (u8) ctrl->val);
-
- default:
- return -EINVAL;
- }
-}
-
-static int fm_v4l2_vidioc_g_audio(struct file *file, void *priv,
- struct v4l2_audio *audio)
-{
- memset(audio, 0, sizeof(*audio));
- strscpy(audio->name, "Radio", sizeof(audio->name));
- audio->capability = V4L2_AUDCAP_STEREO;
-
- return 0;
-}
-
-static int fm_v4l2_vidioc_s_audio(struct file *file, void *priv,
- const struct v4l2_audio *audio)
-{
- if (audio->index != 0)
- return -EINVAL;
-
- return 0;
-}
-
-/* Get tuner attributes. If current mode is NOT RX, return error */
-static int fm_v4l2_vidioc_g_tuner(struct file *file, void *priv,
- struct v4l2_tuner *tuner)
-{
- struct fmdev *fmdev = video_drvdata(file);
- u32 bottom_freq;
- u32 top_freq;
- u16 stereo_mono_mode;
- u16 rssilvl;
- int ret;
-
- if (tuner->index != 0)
- return -EINVAL;
-
- if (fmdev->curr_fmmode != FM_MODE_RX)
- return -EPERM;
-
- ret = fm_rx_get_band_freq_range(fmdev, &bottom_freq, &top_freq);
- if (ret != 0)
- return ret;
-
- ret = fm_rx_get_stereo_mono(fmdev, &stereo_mono_mode);
- if (ret != 0)
- return ret;
-
- ret = fm_rx_get_rssi_level(fmdev, &rssilvl);
- if (ret != 0)
- return ret;
-
- strscpy(tuner->name, "FM", sizeof(tuner->name));
- tuner->type = V4L2_TUNER_RADIO;
- /* Store rangelow and rangehigh freq in unit of 62.5 Hz */
- tuner->rangelow = bottom_freq * 16;
- tuner->rangehigh = top_freq * 16;
- tuner->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO |
- ((fmdev->rx.rds.flag == FM_RDS_ENABLE) ? V4L2_TUNER_SUB_RDS : 0);
- tuner->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS |
- V4L2_TUNER_CAP_LOW |
- V4L2_TUNER_CAP_HWSEEK_BOUNDED |
- V4L2_TUNER_CAP_HWSEEK_WRAP;
- tuner->audmode = (stereo_mono_mode ?
- V4L2_TUNER_MODE_MONO : V4L2_TUNER_MODE_STEREO);
-
- /*
- * Actual rssi value lies in between -128 to +127.
- * Convert this range from 0 to 255 by adding +128
- */
- rssilvl += 128;
-
- /*
- * Return signal strength value should be within 0 to 65535.
- * Find out correct signal radio by multiplying (65535/255) = 257
- */
- tuner->signal = rssilvl * 257;
- tuner->afc = 0;
-
- return ret;
-}
-
-/*
- * Set tuner attributes. If current mode is NOT RX, set to RX.
- * Currently, we set only audio mode (mono/stereo) and RDS state (on/off).
- * Should we set other tuner attributes, too?
- */
-static int fm_v4l2_vidioc_s_tuner(struct file *file, void *priv,
- const struct v4l2_tuner *tuner)
-{
- struct fmdev *fmdev = video_drvdata(file);
- u16 aud_mode;
- u8 rds_mode;
- int ret;
-
- if (tuner->index != 0)
- return -EINVAL;
-
- aud_mode = (tuner->audmode == V4L2_TUNER_MODE_STEREO) ?
- FM_STEREO_MODE : FM_MONO_MODE;
- rds_mode = (tuner->rxsubchans & V4L2_TUNER_SUB_RDS) ?
- FM_RDS_ENABLE : FM_RDS_DISABLE;
-
- if (fmdev->curr_fmmode != FM_MODE_RX) {
- ret = fmc_set_mode(fmdev, FM_MODE_RX);
- if (ret < 0) {
- fmerr("Failed to set RX mode\n");
- return ret;
- }
- }
-
- ret = fmc_set_stereo_mono(fmdev, aud_mode);
- if (ret < 0) {
- fmerr("Failed to set RX stereo/mono mode\n");
- return ret;
- }
-
- ret = fmc_set_rds_mode(fmdev, rds_mode);
- if (ret < 0)
- fmerr("Failed to set RX RDS mode\n");
-
- return ret;
-}
-
-/* Get tuner or modulator radio frequency */
-static int fm_v4l2_vidioc_g_freq(struct file *file, void *priv,
- struct v4l2_frequency *freq)
-{
- struct fmdev *fmdev = video_drvdata(file);
- int ret;
-
- ret = fmc_get_freq(fmdev, &freq->frequency);
- if (ret < 0) {
- fmerr("Failed to get frequency\n");
- return ret;
- }
-
- /* Frequency unit of 62.5 Hz*/
- freq->frequency = (u32) freq->frequency * 16;
-
- return 0;
-}
-
-/* Set tuner or modulator radio frequency */
-static int fm_v4l2_vidioc_s_freq(struct file *file, void *priv,
- const struct v4l2_frequency *freq)
-{
- struct fmdev *fmdev = video_drvdata(file);
-
- /*
- * As V4L2_TUNER_CAP_LOW is set 1 user sends the frequency
- * in units of 62.5 Hz.
- */
- return fmc_set_freq(fmdev, freq->frequency / 16);
-}
-
-/* Set hardware frequency seek. If current mode is NOT RX, set it RX. */
-static int fm_v4l2_vidioc_s_hw_freq_seek(struct file *file, void *priv,
- const struct v4l2_hw_freq_seek *seek)
-{
- struct fmdev *fmdev = video_drvdata(file);
- int ret;
-
- if (file->f_flags & O_NONBLOCK)
- return -EWOULDBLOCK;
-
- if (fmdev->curr_fmmode != FM_MODE_RX) {
- ret = fmc_set_mode(fmdev, FM_MODE_RX);
- if (ret != 0) {
- fmerr("Failed to set RX mode\n");
- return ret;
- }
- }
-
- ret = fm_rx_seek(fmdev, seek->seek_upward, seek->wrap_around,
- seek->spacing);
- if (ret < 0)
- fmerr("RX seek failed - %d\n", ret);
-
- return ret;
-}
-/* Get modulator attributes. If mode is not TX, return no attributes. */
-static int fm_v4l2_vidioc_g_modulator(struct file *file, void *priv,
- struct v4l2_modulator *mod)
-{
- struct fmdev *fmdev = video_drvdata(file);
-
- if (mod->index != 0)
- return -EINVAL;
-
- if (fmdev->curr_fmmode != FM_MODE_TX)
- return -EPERM;
-
- mod->txsubchans = ((fmdev->tx_data.aud_mode == FM_STEREO_MODE) ?
- V4L2_TUNER_SUB_STEREO : V4L2_TUNER_SUB_MONO) |
- ((fmdev->tx_data.rds.flag == FM_RDS_ENABLE) ?
- V4L2_TUNER_SUB_RDS : 0);
-
- mod->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS |
- V4L2_TUNER_CAP_LOW;
-
- return 0;
-}
-
-/* Set modulator attributes. If mode is not TX, set to TX. */
-static int fm_v4l2_vidioc_s_modulator(struct file *file, void *priv,
- const struct v4l2_modulator *mod)
-{
- struct fmdev *fmdev = video_drvdata(file);
- u8 rds_mode;
- u16 aud_mode;
- int ret;
-
- if (mod->index != 0)
- return -EINVAL;
-
- if (fmdev->curr_fmmode != FM_MODE_TX) {
- ret = fmc_set_mode(fmdev, FM_MODE_TX);
- if (ret != 0) {
- fmerr("Failed to set TX mode\n");
- return ret;
- }
- }
-
- aud_mode = (mod->txsubchans & V4L2_TUNER_SUB_STEREO) ?
- FM_STEREO_MODE : FM_MONO_MODE;
- rds_mode = (mod->txsubchans & V4L2_TUNER_SUB_RDS) ?
- FM_RDS_ENABLE : FM_RDS_DISABLE;
- ret = fm_tx_set_stereo_mono(fmdev, aud_mode);
- if (ret < 0) {
- fmerr("Failed to set mono/stereo mode for TX\n");
- return ret;
- }
- ret = fm_tx_set_rds_mode(fmdev, rds_mode);
- if (ret < 0)
- fmerr("Failed to set rds mode for TX\n");
-
- return ret;
-}
-
-static const struct v4l2_file_operations fm_drv_fops = {
- .owner = THIS_MODULE,
- .read = fm_v4l2_fops_read,
- .write = fm_v4l2_fops_write,
- .poll = fm_v4l2_fops_poll,
- .unlocked_ioctl = video_ioctl2,
- .open = fm_v4l2_fops_open,
- .release = fm_v4l2_fops_release,
-};
-
-static const struct v4l2_ctrl_ops fm_ctrl_ops = {
- .s_ctrl = fm_v4l2_s_ctrl,
- .g_volatile_ctrl = fm_g_volatile_ctrl,
-};
-static const struct v4l2_ioctl_ops fm_drv_ioctl_ops = {
- .vidioc_querycap = fm_v4l2_vidioc_querycap,
- .vidioc_g_audio = fm_v4l2_vidioc_g_audio,
- .vidioc_s_audio = fm_v4l2_vidioc_s_audio,
- .vidioc_g_tuner = fm_v4l2_vidioc_g_tuner,
- .vidioc_s_tuner = fm_v4l2_vidioc_s_tuner,
- .vidioc_g_frequency = fm_v4l2_vidioc_g_freq,
- .vidioc_s_frequency = fm_v4l2_vidioc_s_freq,
- .vidioc_s_hw_freq_seek = fm_v4l2_vidioc_s_hw_freq_seek,
- .vidioc_g_modulator = fm_v4l2_vidioc_g_modulator,
- .vidioc_s_modulator = fm_v4l2_vidioc_s_modulator
-};
-
-/* V4L2 RADIO device parent structure */
-static const struct video_device fm_viddev_template = {
- .fops = &fm_drv_fops,
- .ioctl_ops = &fm_drv_ioctl_ops,
- .name = FM_DRV_NAME,
- .release = video_device_release_empty,
- /*
- * To ensure both the tuner and modulator ioctls are accessible we
- * set the vfl_dir to M2M to indicate this.
- *
- * It is not really a mem2mem device of course, but it can both receive
- * and transmit using the same radio device. It's the only radio driver
- * that does this and it should really be split in two radio devices,
- * but that would affect applications using this driver.
- */
- .vfl_dir = VFL_DIR_M2M,
- .device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_TUNER | V4L2_CAP_RADIO |
- V4L2_CAP_MODULATOR | V4L2_CAP_AUDIO |
- V4L2_CAP_READWRITE | V4L2_CAP_RDS_CAPTURE,
-};
-
-int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
-{
- struct v4l2_ctrl *ctrl;
- int ret;
-
- strscpy(fmdev->v4l2_dev.name, FM_DRV_NAME,
- sizeof(fmdev->v4l2_dev.name));
- ret = v4l2_device_register(NULL, &fmdev->v4l2_dev);
- if (ret < 0)
- return ret;
-
- /* Init mutex for core locking */
- mutex_init(&fmdev->mutex);
-
- /* Setup FM driver's V4L2 properties */
- gradio_dev = fm_viddev_template;
-
- video_set_drvdata(&gradio_dev, fmdev);
-
- gradio_dev.lock = &fmdev->mutex;
- gradio_dev.v4l2_dev = &fmdev->v4l2_dev;
-
- /* Register with V4L2 subsystem as RADIO device */
- if (video_register_device(&gradio_dev, VFL_TYPE_RADIO, radio_nr)) {
- v4l2_device_unregister(&fmdev->v4l2_dev);
- fmerr("Could not register video device\n");
- return -ENOMEM;
- }
-
- fmdev->radio_dev = &gradio_dev;
-
- /* Register to v4l2 ctrl handler framework */
- fmdev->radio_dev->ctrl_handler = &fmdev->ctrl_handler;
-
- ret = v4l2_ctrl_handler_init(&fmdev->ctrl_handler, 5);
- if (ret < 0) {
- fmerr("(fmdev): Can't init ctrl handler\n");
- v4l2_ctrl_handler_free(&fmdev->ctrl_handler);
- video_unregister_device(fmdev->radio_dev);
- v4l2_device_unregister(&fmdev->v4l2_dev);
- return -EBUSY;
- }
-
- /*
- * Following controls are handled by V4L2 control framework.
- * Added in ascending ID order.
- */
- v4l2_ctrl_new_std(&fmdev->ctrl_handler, &fm_ctrl_ops,
- V4L2_CID_AUDIO_VOLUME, FM_RX_VOLUME_MIN,
- FM_RX_VOLUME_MAX, 1, FM_RX_VOLUME_MAX);
-
- v4l2_ctrl_new_std(&fmdev->ctrl_handler, &fm_ctrl_ops,
- V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
-
- v4l2_ctrl_new_std_menu(&fmdev->ctrl_handler, &fm_ctrl_ops,
- V4L2_CID_TUNE_PREEMPHASIS, V4L2_PREEMPHASIS_75_uS,
- 0, V4L2_PREEMPHASIS_75_uS);
-
- v4l2_ctrl_new_std(&fmdev->ctrl_handler, &fm_ctrl_ops,
- V4L2_CID_TUNE_POWER_LEVEL, FM_PWR_LVL_LOW,
- FM_PWR_LVL_HIGH, 1, FM_PWR_LVL_HIGH);
-
- ctrl = v4l2_ctrl_new_std(&fmdev->ctrl_handler, &fm_ctrl_ops,
- V4L2_CID_TUNE_ANTENNA_CAPACITOR, 0,
- 255, 1, 255);
-
- if (ctrl)
- ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
-
- return 0;
-}
-
-void *fm_v4l2_deinit_video_device(void)
-{
- struct fmdev *fmdev;
-
-
- fmdev = video_get_drvdata(&gradio_dev);
-
- /* Unregister to v4l2 ctrl handler framework*/
- v4l2_ctrl_handler_free(&fmdev->ctrl_handler);
-
- /* Unregister RADIO device from V4L2 subsystem */
- video_unregister_device(&gradio_dev);
-
- v4l2_device_unregister(&fmdev->v4l2_dev);
-
- return fmdev;
-}
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.h b/drivers/media/radio/wl128x/fmdrv_v4l2.h
deleted file mode 100644
index 963214e9d6f2..000000000000
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * FM Driver for Connectivity chip of Texas Instruments.
- *
- * FM V4L2 module header.
- *
- * Copyright (C) 2011 Texas Instruments
- */
-
-#ifndef _FMDRV_V4L2_H
-#define _FMDRV_V4L2_H
-
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-ctrls.h>
-
-int fm_v4l2_init_video_device(struct fmdev *, int);
-void *fm_v4l2_deinit_video_device(void);
-
-#endif
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index 276bf3c8a8cb..8af94246e591 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -194,8 +194,10 @@ static int iguanair_send(struct iguanair *ir, unsigned size)
if (rc)
return rc;
- if (wait_for_completion_timeout(&ir->completion, TIMEOUT) == 0)
+ if (wait_for_completion_timeout(&ir->completion, TIMEOUT) == 0) {
+ usb_kill_urb(ir->urb_out);
return -ETIMEDOUT;
+ }
return rc;
}
diff --git a/drivers/media/rc/imon_raw.c b/drivers/media/rc/imon_raw.c
index b02ded52f19e..3a526dea6532 100644
--- a/drivers/media/rc/imon_raw.c
+++ b/drivers/media/rc/imon_raw.c
@@ -37,7 +37,7 @@ static void imon_ir_data(struct imon *imon)
if (packet_no == 0xff)
return;
- dev_dbg(imon->dev, "data: %*ph", 8, imon->ir_buf);
+ dev_dbg(imon->dev, "data: %8ph", imon->ir_buf);
/*
* Only the first 5 bytes contain IR data. Right shift so we move
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index cd7af4d88b7f..044767eb3a38 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -28,7 +28,6 @@
#include <linux/workqueue.h>
#include <linux/usb.h>
#include <linux/usb/input.h>
-#include <linux/pm_wakeup.h>
#include <media/rc-core.h>
#define DRIVER_VERSION "1.95"
@@ -658,8 +657,8 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
if (len == 2)
dev_dbg(dev, "Get hw/sw rev?");
else
- dev_dbg(dev, "hw/sw rev %*ph",
- 4, &buf[offset + 2]);
+ dev_dbg(dev, "hw/sw rev %4ph",
+ &buf[offset + 2]);
break;
case MCE_CMD_RESUME:
dev_dbg(dev, "Device resume requested");
diff --git a/drivers/media/test-drivers/vidtv/vidtv_bridge.c b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
index e1dd8adeba46..438483c62fac 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_bridge.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
@@ -191,10 +191,11 @@ static int vidtv_start_streaming(struct vidtv_dvb *dvb)
mux_args.mux_buf_sz = mux_buf_sz;
- dvb->streaming = true;
dvb->mux = vidtv_mux_init(dvb->fe[0], dev, &mux_args);
if (!dvb->mux)
return -ENOMEM;
+
+ dvb->streaming = true;
vidtv_mux_start_thread(dvb->mux);
dev_dbg_ratelimited(dev, "Started streaming\n");
@@ -205,6 +206,11 @@ static int vidtv_stop_streaming(struct vidtv_dvb *dvb)
{
struct device *dev = &dvb->pdev->dev;
+ if (!dvb->streaming) {
+ dev_warn_ratelimited(dev, "No streaming. Skipping.\n");
+ return 0;
+ }
+
dvb->streaming = false;
vidtv_mux_stop_thread(dvb->mux);
vidtv_mux_destroy(dvb->mux);
diff --git a/drivers/media/tuners/fc0013.c b/drivers/media/tuners/fc0013.c
index 1006a2798eef..90d2ef067594 100644
--- a/drivers/media/tuners/fc0013.c
+++ b/drivers/media/tuners/fc0013.c
@@ -112,70 +112,6 @@ static int fc0013_sleep(struct dvb_frontend *fe)
return 0;
}
-int fc0013_rc_cal_add(struct dvb_frontend *fe, int rc_val)
-{
- struct fc0013_priv *priv = fe->tuner_priv;
- int ret;
- u8 rc_cal;
- int val;
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
-
- /* push rc_cal value, get rc_cal value */
- ret = fc0013_writereg(priv, 0x10, 0x00);
- if (ret)
- goto error_out;
-
- /* get rc_cal value */
- ret = fc0013_readreg(priv, 0x10, &rc_cal);
- if (ret)
- goto error_out;
-
- rc_cal &= 0x0f;
-
- val = (int)rc_cal + rc_val;
-
- /* forcing rc_cal */
- ret = fc0013_writereg(priv, 0x0d, 0x11);
- if (ret)
- goto error_out;
-
- /* modify rc_cal value */
- if (val > 15)
- ret = fc0013_writereg(priv, 0x10, 0x0f);
- else if (val < 0)
- ret = fc0013_writereg(priv, 0x10, 0x00);
- else
- ret = fc0013_writereg(priv, 0x10, (u8)val);
-
-error_out:
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
-
- return ret;
-}
-EXPORT_SYMBOL(fc0013_rc_cal_add);
-
-int fc0013_rc_cal_reset(struct dvb_frontend *fe)
-{
- struct fc0013_priv *priv = fe->tuner_priv;
- int ret;
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
-
- ret = fc0013_writereg(priv, 0x0d, 0x01);
- if (!ret)
- ret = fc0013_writereg(priv, 0x10, 0x00);
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
-
- return ret;
-}
-EXPORT_SYMBOL(fc0013_rc_cal_reset);
-
static int fc0013_set_vhf_track(struct fc0013_priv *priv, u32 freq)
{
int ret;
diff --git a/drivers/media/tuners/fc0013.h b/drivers/media/tuners/fc0013.h
index 74ce5903f199..47ab36342ee8 100644
--- a/drivers/media/tuners/fc0013.h
+++ b/drivers/media/tuners/fc0013.h
@@ -16,8 +16,6 @@ extern struct dvb_frontend *fc0013_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
u8 i2c_address, int dual_master,
enum fc001x_xtal_freq xtal_freq);
-extern int fc0013_rc_cal_add(struct dvb_frontend *fe, int rc_val);
-extern int fc0013_rc_cal_reset(struct dvb_frontend *fe);
#else
static inline struct dvb_frontend *fc0013_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
@@ -28,15 +26,6 @@ static inline struct dvb_frontend *fc0013_attach(struct dvb_frontend *fe,
return NULL;
}
-static inline int fc0013_rc_cal_add(struct dvb_frontend *fe, int rc_val)
-{
- return 0;
-}
-
-static inline int fc0013_rc_cal_reset(struct dvb_frontend *fe)
-{
- return 0;
-}
#endif
#endif
diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c
index 6139ef5d891d..1cfec76b72f3 100644
--- a/drivers/media/usb/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c
@@ -2704,7 +2704,6 @@ int cx231xx_set_gpio_value(struct cx231xx *dev, int pin_number, int pin_value)
dev->gpio_dir = value;
status = cx231xx_set_gpio_bit(dev, dev->gpio_dir,
dev->gpio_val);
- value = 0;
}
if (pin_value == 0)
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 0d2c42819d39..218f712f56b1 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -322,13 +322,16 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
ret = -EOPNOTSUPP;
} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
(msg[0].addr == state->af9033_i2c_addr[1])) {
+ /* demod access via firmware interface */
+ u32 reg;
+
if (msg[0].len < 3 || msg[1].len < 1) {
ret = -EOPNOTSUPP;
goto unlock;
}
- /* demod access via firmware interface */
- u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
- msg[0].buf[2];
+
+ reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+ msg[0].buf[2];
if (msg[0].addr == state->af9033_i2c_addr[1])
reg |= 0x100000;
@@ -385,13 +388,16 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
ret = -EOPNOTSUPP;
} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
(msg[0].addr == state->af9033_i2c_addr[1])) {
+ /* demod access via firmware interface */
+ u32 reg;
+
if (msg[0].len < 3) {
ret = -EOPNOTSUPP;
goto unlock;
}
- /* demod access via firmware interface */
- u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
- msg[0].buf[2];
+
+ reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+ msg[0].buf[2];
if (msg[0].addr == state->af9033_i2c_addr[1])
reg |= 0x100000;
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 8a34e6c0d6a6..f0537b741d13 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -373,6 +373,7 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
struct dvb_usb_device *d = adap_to_d(adap);
struct lme2510_state *lme_int = adap_to_priv(adap);
struct usb_host_endpoint *ep;
+ int ret;
lme_int->lme_urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -390,11 +391,20 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
/* Quirk of pipe reporting PIPE_BULK but behaves as interrupt */
ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe);
+ if (!ep) {
+ usb_free_urb(lme_int->lme_urb);
+ return -ENODEV;
+ }
if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK)
lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa);
- usb_submit_urb(lme_int->lme_urb, GFP_KERNEL);
+ ret = usb_submit_urb(lme_int->lme_urb, GFP_KERNEL);
+ if (ret) {
+ usb_free_urb(lme_int->lme_urb);
+ return ret;
+ }
+
info("INT Interrupt Service Started");
return 0;
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index 4fe26e82e3d1..4e58476d305e 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -1579,6 +1579,40 @@ static void uvc_ctrl_send_slave_event(struct uvc_video_chain *chain,
uvc_ctrl_send_event(chain, handle, ctrl, mapping, val, changes);
}
+static void uvc_ctrl_set_handle(struct uvc_fh *handle, struct uvc_control *ctrl,
+ struct uvc_fh *new_handle)
+{
+ lockdep_assert_held(&handle->chain->ctrl_mutex);
+
+ if (new_handle) {
+ if (ctrl->handle)
+ dev_warn_ratelimited(&handle->stream->dev->udev->dev,
+ "UVC non compliance: Setting an async control with a pending operation.");
+
+ if (new_handle == ctrl->handle)
+ return;
+
+ if (ctrl->handle) {
+ WARN_ON(!ctrl->handle->pending_async_ctrls);
+ if (ctrl->handle->pending_async_ctrls)
+ ctrl->handle->pending_async_ctrls--;
+ }
+
+ ctrl->handle = new_handle;
+ handle->pending_async_ctrls++;
+ return;
+ }
+
+ /* Cannot clear the handle for a control not owned by us.*/
+ if (WARN_ON(ctrl->handle != handle))
+ return;
+
+ ctrl->handle = NULL;
+ if (WARN_ON(!handle->pending_async_ctrls))
+ return;
+ handle->pending_async_ctrls--;
+}
+
void uvc_ctrl_status_event(struct uvc_video_chain *chain,
struct uvc_control *ctrl, const u8 *data)
{
@@ -1588,8 +1622,12 @@ void uvc_ctrl_status_event(struct uvc_video_chain *chain,
mutex_lock(&chain->ctrl_mutex);
+ /* Flush the control cache, the data might have changed. */
+ ctrl->loaded = 0;
+
handle = ctrl->handle;
- ctrl->handle = NULL;
+ if (handle)
+ uvc_ctrl_set_handle(handle, ctrl, NULL);
list_for_each_entry(mapping, &ctrl->info.mappings, list) {
s32 value = __uvc_ctrl_get_value(mapping, data);
@@ -1640,10 +1678,8 @@ bool uvc_ctrl_status_event_async(struct urb *urb, struct uvc_video_chain *chain,
struct uvc_device *dev = chain->dev;
struct uvc_ctrl_work *w = &dev->async_ctrl;
- if (list_empty(&ctrl->info.mappings)) {
- ctrl->handle = NULL;
+ if (list_empty(&ctrl->info.mappings))
return false;
- }
w->data = data;
w->urb = urb;
@@ -1673,13 +1709,13 @@ static void uvc_ctrl_send_events(struct uvc_fh *handle,
{
struct uvc_control_mapping *mapping;
struct uvc_control *ctrl;
- u32 changes = V4L2_EVENT_CTRL_CH_VALUE;
unsigned int i;
unsigned int j;
for (i = 0; i < xctrls_count; ++i) {
- ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping);
+ u32 changes = V4L2_EVENT_CTRL_CH_VALUE;
+ ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping);
if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
/* Notification will be sent from an Interrupt event. */
continue;
@@ -1811,7 +1847,10 @@ int uvc_ctrl_begin(struct uvc_video_chain *chain)
}
static int uvc_ctrl_commit_entity(struct uvc_device *dev,
- struct uvc_entity *entity, int rollback, struct uvc_control **err_ctrl)
+ struct uvc_fh *handle,
+ struct uvc_entity *entity,
+ int rollback,
+ struct uvc_control **err_ctrl)
{
struct uvc_control *ctrl;
unsigned int i;
@@ -1859,6 +1898,10 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
*err_ctrl = ctrl;
return ret;
}
+
+ if (!rollback && handle &&
+ ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
+ uvc_ctrl_set_handle(handle, ctrl, handle);
}
return 0;
@@ -1895,8 +1938,8 @@ int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
/* Find the control. */
list_for_each_entry(entity, &chain->entities, chain) {
- ret = uvc_ctrl_commit_entity(chain->dev, entity, rollback,
- &err_ctrl);
+ ret = uvc_ctrl_commit_entity(chain->dev, handle, entity,
+ rollback, &err_ctrl);
if (ret < 0) {
if (ctrls)
ctrls->error_idx =
@@ -1941,6 +1984,8 @@ int uvc_ctrl_set(struct uvc_fh *handle,
s32 max;
int ret;
+ lockdep_assert_held(&chain->ctrl_mutex);
+
if (__uvc_query_v4l2_class(chain, xctrl->id, 0) >= 0)
return -EACCES;
@@ -2046,9 +2091,6 @@ int uvc_ctrl_set(struct uvc_fh *handle,
mapping->set(mapping, value,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
- if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
- ctrl->handle = handle;
-
ctrl->dirty = 1;
ctrl->modified = 1;
return 0;
@@ -2377,7 +2419,7 @@ int uvc_ctrl_restore_values(struct uvc_device *dev)
ctrl->dirty = 1;
}
- ret = uvc_ctrl_commit_entity(dev, entity, 0, NULL);
+ ret = uvc_ctrl_commit_entity(dev, NULL, entity, 0, NULL);
if (ret < 0)
return ret;
}
@@ -2770,6 +2812,26 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
return 0;
}
+void uvc_ctrl_cleanup_fh(struct uvc_fh *handle)
+{
+ struct uvc_entity *entity;
+
+ guard(mutex)(&handle->chain->ctrl_mutex);
+
+ if (!handle->pending_async_ctrls)
+ return;
+
+ list_for_each_entry(entity, &handle->chain->dev->entities, list) {
+ for (unsigned int i = 0; i < entity->ncontrols; ++i) {
+ if (entity->controls[i].handle != handle)
+ continue;
+ uvc_ctrl_set_handle(handle, &entity->controls[i], NULL);
+ }
+ }
+
+ WARN_ON(handle->pending_async_ctrls);
+}
+
/*
* Cleanup device controls.
*/
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index b3c8411dc05c..deadbcea5e22 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -32,7 +32,7 @@
unsigned int uvc_clock_param = CLOCK_MONOTONIC;
unsigned int uvc_hw_timestamps_param;
-unsigned int uvc_no_drop_param;
+unsigned int uvc_no_drop_param = 1;
static unsigned int uvc_quirks_param = -1;
unsigned int uvc_dbg_param;
unsigned int uvc_timeout_param = UVC_CTRL_STREAMING_TIMEOUT;
@@ -220,20 +220,127 @@ static struct uvc_streaming *uvc_stream_new(struct uvc_device *dev,
* Descriptors parsing
*/
+static int uvc_parse_frame(struct uvc_device *dev,
+ struct uvc_streaming *streaming,
+ struct uvc_format *format, struct uvc_frame *frame,
+ u32 **intervals, u8 ftype, int width_multiplier,
+ const unsigned char *buffer, int buflen)
+{
+ struct usb_host_interface *alts = streaming->intf->cur_altsetting;
+ unsigned int maxIntervalIndex;
+ unsigned int interval;
+ unsigned int i, n;
+
+ if (ftype != UVC_VS_FRAME_FRAME_BASED)
+ n = buflen > 25 ? buffer[25] : 0;
+ else
+ n = buflen > 21 ? buffer[21] : 0;
+
+ n = n ? n : 3;
+
+ if (buflen < 26 + 4 * n) {
+ uvc_dbg(dev, DESCR,
+ "device %d videostreaming interface %d FRAME error\n",
+ dev->udev->devnum, alts->desc.bInterfaceNumber);
+ return -EINVAL;
+ }
+
+ frame->bFrameIndex = buffer[3];
+ frame->bmCapabilities = buffer[4];
+ frame->wWidth = get_unaligned_le16(&buffer[5]) * width_multiplier;
+ frame->wHeight = get_unaligned_le16(&buffer[7]);
+ frame->dwMinBitRate = get_unaligned_le32(&buffer[9]);
+ frame->dwMaxBitRate = get_unaligned_le32(&buffer[13]);
+ if (ftype != UVC_VS_FRAME_FRAME_BASED) {
+ frame->dwMaxVideoFrameBufferSize =
+ get_unaligned_le32(&buffer[17]);
+ frame->dwDefaultFrameInterval =
+ get_unaligned_le32(&buffer[21]);
+ frame->bFrameIntervalType = buffer[25];
+ } else {
+ frame->dwMaxVideoFrameBufferSize = 0;
+ frame->dwDefaultFrameInterval =
+ get_unaligned_le32(&buffer[17]);
+ frame->bFrameIntervalType = buffer[21];
+ }
+
+ /*
+ * Copy the frame intervals.
+ *
+ * Some bogus devices report dwMinFrameInterval equal to
+ * dwMaxFrameInterval and have dwFrameIntervalStep set to zero. Setting
+ * all null intervals to 1 fixes the problem and some other divisions
+ * by zero that could happen.
+ */
+ frame->dwFrameInterval = *intervals;
+
+ for (i = 0; i < n; ++i) {
+ interval = get_unaligned_le32(&buffer[26 + 4 * i]);
+ (*intervals)[i] = interval ? interval : 1;
+ }
+
+ /*
+ * Apply more fixes, quirks and workarounds to handle incorrect or
+ * broken descriptors.
+ */
+
+ /*
+ * Several UVC chipsets screw up dwMaxVideoFrameBufferSize completely.
+ * Observed behaviours range from setting the value to 1.1x the actual
+ * frame size to hardwiring the 16 low bits to 0. This results in a
+ * higher than necessary memory usage as well as a wrong image size
+ * information. For uncompressed formats this can be fixed by computing
+ * the value from the frame size.
+ */
+ if (!(format->flags & UVC_FMT_FLAG_COMPRESSED))
+ frame->dwMaxVideoFrameBufferSize = format->bpp * frame->wWidth
+ * frame->wHeight / 8;
+
+ /*
+ * Clamp the default frame interval to the boundaries. A zero
+ * bFrameIntervalType value indicates a continuous frame interval
+ * range, with dwFrameInterval[0] storing the minimum value and
+ * dwFrameInterval[1] storing the maximum value.
+ */
+ maxIntervalIndex = frame->bFrameIntervalType ? n - 1 : 1;
+ frame->dwDefaultFrameInterval =
+ clamp(frame->dwDefaultFrameInterval,
+ frame->dwFrameInterval[0],
+ frame->dwFrameInterval[maxIntervalIndex]);
+
+ /*
+ * Some devices report frame intervals that are not functional. If the
+ * corresponding quirk is set, restrict operation to the first interval
+ * only.
+ */
+ if (dev->quirks & UVC_QUIRK_RESTRICT_FRAME_RATE) {
+ frame->bFrameIntervalType = 1;
+ (*intervals)[0] = frame->dwDefaultFrameInterval;
+ }
+
+ uvc_dbg(dev, DESCR, "- %ux%u (%u.%u fps)\n",
+ frame->wWidth, frame->wHeight,
+ 10000000 / frame->dwDefaultFrameInterval,
+ (100000000 / frame->dwDefaultFrameInterval) % 10);
+
+ *intervals += n;
+
+ return buffer[0];
+}
+
static int uvc_parse_format(struct uvc_device *dev,
struct uvc_streaming *streaming, struct uvc_format *format,
struct uvc_frame *frames, u32 **intervals, const unsigned char *buffer,
int buflen)
{
- struct usb_interface *intf = streaming->intf;
- struct usb_host_interface *alts = intf->cur_altsetting;
+ struct usb_host_interface *alts = streaming->intf->cur_altsetting;
const struct uvc_format_desc *fmtdesc;
struct uvc_frame *frame;
const unsigned char *start = buffer;
unsigned int width_multiplier = 1;
- unsigned int interval;
unsigned int i, n;
u8 ftype;
+ int ret;
format->type = buffer[2];
format->index = buffer[3];
@@ -371,111 +478,19 @@ static int uvc_parse_format(struct uvc_device *dev,
* Parse the frame descriptors. Only uncompressed, MJPEG and frame
* based formats have frame descriptors.
*/
- while (ftype && buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE &&
- buffer[2] == ftype) {
- unsigned int maxIntervalIndex;
-
- frame = &frames[format->nframes];
- if (ftype != UVC_VS_FRAME_FRAME_BASED)
- n = buflen > 25 ? buffer[25] : 0;
- else
- n = buflen > 21 ? buffer[21] : 0;
-
- n = n ? n : 3;
-
- if (buflen < 26 + 4*n) {
- uvc_dbg(dev, DESCR,
- "device %d videostreaming interface %d FRAME error\n",
- dev->udev->devnum,
- alts->desc.bInterfaceNumber);
- return -EINVAL;
- }
-
- frame->bFrameIndex = buffer[3];
- frame->bmCapabilities = buffer[4];
- frame->wWidth = get_unaligned_le16(&buffer[5])
- * width_multiplier;
- frame->wHeight = get_unaligned_le16(&buffer[7]);
- frame->dwMinBitRate = get_unaligned_le32(&buffer[9]);
- frame->dwMaxBitRate = get_unaligned_le32(&buffer[13]);
- if (ftype != UVC_VS_FRAME_FRAME_BASED) {
- frame->dwMaxVideoFrameBufferSize =
- get_unaligned_le32(&buffer[17]);
- frame->dwDefaultFrameInterval =
- get_unaligned_le32(&buffer[21]);
- frame->bFrameIntervalType = buffer[25];
- } else {
- frame->dwMaxVideoFrameBufferSize = 0;
- frame->dwDefaultFrameInterval =
- get_unaligned_le32(&buffer[17]);
- frame->bFrameIntervalType = buffer[21];
- }
-
- /*
- * Copy the frame intervals.
- *
- * Some bogus devices report dwMinFrameInterval equal to
- * dwMaxFrameInterval and have dwFrameIntervalStep set to
- * zero. Setting all null intervals to 1 fixes the problem and
- * some other divisions by zero that could happen.
- */
- frame->dwFrameInterval = *intervals;
-
- for (i = 0; i < n; ++i) {
- interval = get_unaligned_le32(&buffer[26+4*i]);
- (*intervals)[i] = interval ? interval : 1;
- }
-
- /*
- * Apply more fixes, quirks and workarounds to handle incorrect
- * or broken descriptors.
- */
-
- /*
- * Several UVC chipsets screw up dwMaxVideoFrameBufferSize
- * completely. Observed behaviours range from setting the
- * value to 1.1x the actual frame size to hardwiring the
- * 16 low bits to 0. This results in a higher than necessary
- * memory usage as well as a wrong image size information. For
- * uncompressed formats this can be fixed by computing the
- * value from the frame size.
- */
- if (!(format->flags & UVC_FMT_FLAG_COMPRESSED))
- frame->dwMaxVideoFrameBufferSize = format->bpp
- * frame->wWidth * frame->wHeight / 8;
-
- /*
- * Clamp the default frame interval to the boundaries. A zero
- * bFrameIntervalType value indicates a continuous frame
- * interval range, with dwFrameInterval[0] storing the minimum
- * value and dwFrameInterval[1] storing the maximum value.
- */
- maxIntervalIndex = frame->bFrameIntervalType ? n - 1 : 1;
- frame->dwDefaultFrameInterval =
- clamp(frame->dwDefaultFrameInterval,
- frame->dwFrameInterval[0],
- frame->dwFrameInterval[maxIntervalIndex]);
-
- /*
- * Some devices report frame intervals that are not functional.
- * If the corresponding quirk is set, restrict operation to the
- * first interval only.
- */
- if (dev->quirks & UVC_QUIRK_RESTRICT_FRAME_RATE) {
- frame->bFrameIntervalType = 1;
- (*intervals)[0] = frame->dwDefaultFrameInterval;
+ if (ftype) {
+ while (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE &&
+ buffer[2] == ftype) {
+ frame = &frames[format->nframes];
+ ret = uvc_parse_frame(dev, streaming, format, frame,
+ intervals, ftype, width_multiplier,
+ buffer, buflen);
+ if (ret < 0)
+ return ret;
+ format->nframes++;
+ buflen -= ret;
+ buffer += ret;
}
-
- uvc_dbg(dev, DESCR, "- %ux%u (%u.%u fps)\n",
- frame->wWidth, frame->wHeight,
- 10000000 / frame->dwDefaultFrameInterval,
- (100000000 / frame->dwDefaultFrameInterval) % 10);
-
- format->nframes++;
- *intervals += n;
-
- buflen -= buffer[0];
- buffer += buffer[0];
}
if (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE &&
@@ -775,27 +790,14 @@ static const u8 uvc_media_transport_input_guid[16] =
UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT;
static const u8 uvc_processing_guid[16] = UVC_GUID_UVC_PROCESSING;
-static struct uvc_entity *uvc_alloc_new_entity(struct uvc_device *dev, u16 type,
- u16 id, unsigned int num_pads,
- unsigned int extra_size)
+static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id,
+ unsigned int num_pads, unsigned int extra_size)
{
struct uvc_entity *entity;
unsigned int num_inputs;
unsigned int size;
unsigned int i;
- /* Per UVC 1.1+ spec 3.7.2, the ID should be non-zero. */
- if (id == 0) {
- dev_err(&dev->udev->dev, "Found Unit with invalid ID 0.\n");
- return ERR_PTR(-EINVAL);
- }
-
- /* Per UVC 1.1+ spec 3.7.2, the ID is unique. */
- if (uvc_entity_by_id(dev, id)) {
- dev_err(&dev->udev->dev, "Found multiple Units with ID %u\n", id);
- return ERR_PTR(-EINVAL);
- }
-
extra_size = roundup(extra_size, sizeof(*entity->pads));
if (num_pads)
num_inputs = type & UVC_TERM_OUTPUT ? num_pads : num_pads - 1;
@@ -805,7 +807,7 @@ static struct uvc_entity *uvc_alloc_new_entity(struct uvc_device *dev, u16 type,
+ num_inputs;
entity = kzalloc(size, GFP_KERNEL);
if (entity == NULL)
- return ERR_PTR(-ENOMEM);
+ return NULL;
entity->id = id;
entity->type = type;
@@ -917,10 +919,10 @@ static int uvc_parse_vendor_control(struct uvc_device *dev,
break;
}
- unit = uvc_alloc_new_entity(dev, UVC_VC_EXTENSION_UNIT,
- buffer[3], p + 1, 2 * n);
- if (IS_ERR(unit))
- return PTR_ERR(unit);
+ unit = uvc_alloc_entity(UVC_VC_EXTENSION_UNIT, buffer[3],
+ p + 1, 2*n);
+ if (unit == NULL)
+ return -ENOMEM;
memcpy(unit->guid, &buffer[4], 16);
unit->extension.bNumControls = buffer[20];
@@ -1029,10 +1031,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return -EINVAL;
}
- term = uvc_alloc_new_entity(dev, type | UVC_TERM_INPUT,
- buffer[3], 1, n + p);
- if (IS_ERR(term))
- return PTR_ERR(term);
+ term = uvc_alloc_entity(type | UVC_TERM_INPUT, buffer[3],
+ 1, n + p);
+ if (term == NULL)
+ return -ENOMEM;
if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA) {
term->camera.bControlSize = n;
@@ -1088,10 +1090,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return 0;
}
- term = uvc_alloc_new_entity(dev, type | UVC_TERM_OUTPUT,
- buffer[3], 1, 0);
- if (IS_ERR(term))
- return PTR_ERR(term);
+ term = uvc_alloc_entity(type | UVC_TERM_OUTPUT, buffer[3],
+ 1, 0);
+ if (term == NULL)
+ return -ENOMEM;
memcpy(term->baSourceID, &buffer[7], 1);
@@ -1110,10 +1112,9 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return -EINVAL;
}
- unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
- p + 1, 0);
- if (IS_ERR(unit))
- return PTR_ERR(unit);
+ unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, 0);
+ if (unit == NULL)
+ return -ENOMEM;
memcpy(unit->baSourceID, &buffer[5], p);
@@ -1133,9 +1134,9 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return -EINVAL;
}
- unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3], 2, n);
- if (IS_ERR(unit))
- return PTR_ERR(unit);
+ unit = uvc_alloc_entity(buffer[2], buffer[3], 2, n);
+ if (unit == NULL)
+ return -ENOMEM;
memcpy(unit->baSourceID, &buffer[4], 1);
unit->processing.wMaxMultiplier =
@@ -1162,10 +1163,9 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return -EINVAL;
}
- unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
- p + 1, n);
- if (IS_ERR(unit))
- return PTR_ERR(unit);
+ unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, n);
+ if (unit == NULL)
+ return -ENOMEM;
memcpy(unit->guid, &buffer[4], 16);
unit->extension.bNumControls = buffer[20];
@@ -1295,20 +1295,19 @@ static int uvc_gpio_parse(struct uvc_device *dev)
struct gpio_desc *gpio_privacy;
int irq;
- gpio_privacy = devm_gpiod_get_optional(&dev->udev->dev, "privacy",
+ gpio_privacy = devm_gpiod_get_optional(&dev->intf->dev, "privacy",
GPIOD_IN);
if (IS_ERR_OR_NULL(gpio_privacy))
return PTR_ERR_OR_ZERO(gpio_privacy);
irq = gpiod_to_irq(gpio_privacy);
if (irq < 0)
- return dev_err_probe(&dev->udev->dev, irq,
+ return dev_err_probe(&dev->intf->dev, irq,
"No IRQ for privacy GPIO\n");
- unit = uvc_alloc_new_entity(dev, UVC_EXT_GPIO_UNIT,
- UVC_EXT_GPIO_UNIT_ID, 0, 1);
- if (IS_ERR(unit))
- return PTR_ERR(unit);
+ unit = uvc_alloc_entity(UVC_EXT_GPIO_UNIT, UVC_EXT_GPIO_UNIT_ID, 0, 1);
+ if (!unit)
+ return -ENOMEM;
unit->gpio.gpio_privacy = gpio_privacy;
unit->gpio.irq = irq;
@@ -1329,15 +1328,27 @@ static int uvc_gpio_parse(struct uvc_device *dev)
static int uvc_gpio_init_irq(struct uvc_device *dev)
{
struct uvc_entity *unit = dev->gpio_unit;
+ int ret;
if (!unit || unit->gpio.irq < 0)
return 0;
- return devm_request_threaded_irq(&dev->udev->dev, unit->gpio.irq, NULL,
- uvc_gpio_irq,
- IRQF_ONESHOT | IRQF_TRIGGER_FALLING |
- IRQF_TRIGGER_RISING,
- "uvc_privacy_gpio", dev);
+ ret = request_threaded_irq(unit->gpio.irq, NULL, uvc_gpio_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING |
+ IRQF_TRIGGER_RISING,
+ "uvc_privacy_gpio", dev);
+
+ unit->gpio.initialized = !ret;
+
+ return ret;
+}
+
+static void uvc_gpio_deinit(struct uvc_device *dev)
+{
+ if (!dev->gpio_unit || !dev->gpio_unit->gpio.initialized)
+ return;
+
+ free_irq(dev->gpio_unit->gpio.irq, dev);
}
/* ------------------------------------------------------------------------
@@ -1934,6 +1945,8 @@ static void uvc_unregister_video(struct uvc_device *dev)
{
struct uvc_streaming *stream;
+ uvc_gpio_deinit(dev);
+
list_for_each_entry(stream, &dev->streams, list) {
/* Nothing to do here, continue. */
if (!video_is_registered(&stream->vdev))
@@ -1995,7 +2008,7 @@ int uvc_register_video_device(struct uvc_device *dev,
int ret;
/* Initialize the video buffers queue. */
- ret = uvc_queue_init(queue, type, !uvc_no_drop_param);
+ ret = uvc_queue_init(queue, type);
if (ret)
return ret;
@@ -2424,8 +2437,25 @@ module_param_call(clock, uvc_clock_param_set, uvc_clock_param_get,
MODULE_PARM_DESC(clock, "Video buffers timestamp clock");
module_param_named(hwtimestamps, uvc_hw_timestamps_param, uint, 0644);
MODULE_PARM_DESC(hwtimestamps, "Use hardware timestamps");
-module_param_named(nodrop, uvc_no_drop_param, uint, 0644);
+
+static int param_set_nodrop(const char *val, const struct kernel_param *kp)
+{
+ pr_warn_once("uvcvideo: "
+ DEPRECATED
+ "nodrop parameter will be eventually removed.\n");
+ return param_set_bool(val, kp);
+}
+
+static const struct kernel_param_ops param_ops_nodrop = {
+ .set = param_set_nodrop,
+ .get = param_get_uint,
+};
+
+param_check_uint(nodrop, &uvc_no_drop_param);
+module_param_cb(nodrop, &param_ops_nodrop, &uvc_no_drop_param, 0644);
+__MODULE_PARM_TYPE(nodrop, "uint");
MODULE_PARM_DESC(nodrop, "Don't drop incomplete frames");
+
module_param_named(quirks, uvc_quirks_param, uint, 0644);
MODULE_PARM_DESC(quirks, "Forced device quirks");
module_param_named(trace, uvc_dbg_param, uint, 0644);
@@ -2802,6 +2832,15 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax },
+ /* Sonix Technology Co. Ltd. - 292A IPC AR0330 */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x0c45,
+ .idProduct = 0x6366,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_MJPEG_NO_EOF) },
/* MT6227 */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
@@ -2830,6 +2869,15 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax },
+ /* Kurokesu C1 PRO */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x16d0,
+ .idProduct = 0x0ed1,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_MJPEG_NO_EOF) },
/* Syntek (HP Spartan) */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index 26ee85657fc8..2ee142621042 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -208,8 +208,7 @@ static const struct vb2_ops uvc_meta_queue_qops = {
.stop_streaming = uvc_stop_streaming,
};
-int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
- int drop_corrupted)
+int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type)
{
int ret;
@@ -239,7 +238,6 @@ int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
mutex_init(&queue->mutex);
spin_lock_init(&queue->irqlock);
INIT_LIST_HEAD(&queue->irqqueue);
- queue->flags = drop_corrupted ? UVC_QUEUE_DROP_CORRUPTED : 0;
return 0;
}
@@ -472,14 +470,15 @@ static void uvc_queue_buffer_complete(struct kref *ref)
struct vb2_buffer *vb = &buf->buf.vb2_buf;
struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
- if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) {
+ if (buf->error && !uvc_no_drop_param) {
uvc_queue_buffer_requeue(queue, buf);
return;
}
buf->state = buf->error ? UVC_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
- vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
+ vb2_buffer_done(&buf->buf.vb2_buf, buf->error ? VB2_BUF_STATE_ERROR :
+ VB2_BUF_STATE_DONE);
}
/*
diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c
index 06c867510c8f..ee01dce4b783 100644
--- a/drivers/media/usb/uvc/uvc_status.c
+++ b/drivers/media/usb/uvc/uvc_status.c
@@ -262,8 +262,6 @@ int uvc_status_init(struct uvc_device *dev)
if (ep == NULL)
return 0;
- uvc_input_init(dev);
-
dev->status = kzalloc(sizeof(*dev->status), GFP_KERNEL);
if (!dev->status)
return -ENOMEM;
@@ -271,6 +269,7 @@ int uvc_status_init(struct uvc_device *dev)
dev->int_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->int_urb) {
kfree(dev->status);
+ dev->status = NULL;
return -ENOMEM;
}
@@ -289,11 +288,16 @@ int uvc_status_init(struct uvc_device *dev)
dev->status, sizeof(*dev->status), uvc_status_complete,
dev, interval);
+ uvc_input_init(dev);
+
return 0;
}
void uvc_status_unregister(struct uvc_device *dev)
{
+ if (!dev->status)
+ return;
+
uvc_status_suspend(dev);
uvc_input_unregister(dev);
}
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 97c5407f6603..93c6cdb23881 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -26,6 +26,8 @@
#include "uvcvideo.h"
+static int uvc_acquire_privileges(struct uvc_fh *handle);
+
static int uvc_control_add_xu_mapping(struct uvc_video_chain *chain,
struct uvc_control_mapping *map,
const struct uvc_xu_control_mapping *xmap)
@@ -361,9 +363,11 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
return ret;
}
-static int uvc_v4l2_get_format(struct uvc_streaming *stream,
- struct v4l2_format *fmt)
+static int uvc_ioctl_g_fmt(struct file *file, void *fh,
+ struct v4l2_format *fmt)
{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
const struct uvc_format *format;
const struct uvc_frame *frame;
int ret = 0;
@@ -395,14 +399,20 @@ done:
return ret;
}
-static int uvc_v4l2_set_format(struct uvc_streaming *stream,
- struct v4l2_format *fmt)
+static int uvc_ioctl_s_fmt(struct file *file, void *fh,
+ struct v4l2_format *fmt)
{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
struct uvc_streaming_control probe;
const struct uvc_format *format;
const struct uvc_frame *frame;
int ret;
+ ret = uvc_acquire_privileges(handle);
+ if (ret < 0)
+ return ret;
+
if (fmt->type != stream->type)
return -EINVAL;
@@ -426,10 +436,12 @@ done:
return ret;
}
-static int uvc_v4l2_get_streamparm(struct uvc_streaming *stream,
- struct v4l2_streamparm *parm)
+static int uvc_ioctl_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *parm)
{
u32 numerator, denominator;
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
if (parm->type != stream->type)
return -EINVAL;
@@ -461,9 +473,11 @@ static int uvc_v4l2_get_streamparm(struct uvc_streaming *stream,
return 0;
}
-static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream,
- struct v4l2_streamparm *parm)
+static int uvc_ioctl_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *parm)
{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
struct uvc_streaming_control probe;
struct v4l2_fract timeperframe;
const struct uvc_format *format;
@@ -472,6 +486,10 @@ static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream,
unsigned int i;
int ret;
+ ret = uvc_acquire_privileges(handle);
+ if (ret < 0)
+ return ret;
+
if (parm->type != stream->type)
return -EINVAL;
@@ -573,6 +591,7 @@ static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream,
* - VIDIOC_S_INPUT
* - VIDIOC_S_PARM
* - VIDIOC_S_FMT
+ * - VIDIOC_CREATE_BUFS
* - VIDIOC_REQBUFS
*/
static int uvc_acquire_privileges(struct uvc_fh *handle)
@@ -652,6 +671,8 @@ static int uvc_v4l2_release(struct file *file)
uvc_dbg(stream->dev, CALLS, "%s\n", __func__);
+ uvc_ctrl_cleanup_fh(handle);
+
/* Only free resources if this is a privileged handle. */
if (uvc_has_privileges(handle))
uvc_queue_release(&stream->queue);
@@ -685,11 +706,13 @@ static int uvc_ioctl_querycap(struct file *file, void *fh,
return 0;
}
-static int uvc_ioctl_enum_fmt(struct uvc_streaming *stream,
+static int uvc_ioctl_enum_fmt(struct file *file, void *fh,
struct v4l2_fmtdesc *fmt)
{
- const struct uvc_format *format;
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
enum v4l2_buf_type type = fmt->type;
+ const struct uvc_format *format;
u32 index = fmt->index;
if (fmt->type != stream->type || fmt->index >= stream->nformats)
@@ -707,82 +730,8 @@ static int uvc_ioctl_enum_fmt(struct uvc_streaming *stream,
return 0;
}
-static int uvc_ioctl_enum_fmt_vid_cap(struct file *file, void *fh,
- struct v4l2_fmtdesc *fmt)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
-
- return uvc_ioctl_enum_fmt(stream, fmt);
-}
-
-static int uvc_ioctl_enum_fmt_vid_out(struct file *file, void *fh,
- struct v4l2_fmtdesc *fmt)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
-
- return uvc_ioctl_enum_fmt(stream, fmt);
-}
-
-static int uvc_ioctl_g_fmt_vid_cap(struct file *file, void *fh,
- struct v4l2_format *fmt)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
-
- return uvc_v4l2_get_format(stream, fmt);
-}
-
-static int uvc_ioctl_g_fmt_vid_out(struct file *file, void *fh,
- struct v4l2_format *fmt)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
-
- return uvc_v4l2_get_format(stream, fmt);
-}
-
-static int uvc_ioctl_s_fmt_vid_cap(struct file *file, void *fh,
- struct v4l2_format *fmt)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
- int ret;
-
- ret = uvc_acquire_privileges(handle);
- if (ret < 0)
- return ret;
-
- return uvc_v4l2_set_format(stream, fmt);
-}
-
-static int uvc_ioctl_s_fmt_vid_out(struct file *file, void *fh,
- struct v4l2_format *fmt)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
- int ret;
-
- ret = uvc_acquire_privileges(handle);
- if (ret < 0)
- return ret;
-
- return uvc_v4l2_set_format(stream, fmt);
-}
-
-static int uvc_ioctl_try_fmt_vid_cap(struct file *file, void *fh,
- struct v4l2_format *fmt)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
- struct uvc_streaming_control probe;
-
- return uvc_v4l2_try_format(stream, fmt, &probe, NULL, NULL);
-}
-
-static int uvc_ioctl_try_fmt_vid_out(struct file *file, void *fh,
- struct v4l2_format *fmt)
+static int uvc_ioctl_try_fmt(struct file *file, void *fh,
+ struct v4l2_format *fmt)
{
struct uvc_fh *handle = fh;
struct uvc_streaming *stream = handle->stream;
@@ -1212,29 +1161,6 @@ static int uvc_ioctl_g_selection(struct file *file, void *fh,
return 0;
}
-static int uvc_ioctl_g_parm(struct file *file, void *fh,
- struct v4l2_streamparm *parm)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
-
- return uvc_v4l2_get_streamparm(stream, parm);
-}
-
-static int uvc_ioctl_s_parm(struct file *file, void *fh,
- struct v4l2_streamparm *parm)
-{
- struct uvc_fh *handle = fh;
- struct uvc_streaming *stream = handle->stream;
- int ret;
-
- ret = uvc_acquire_privileges(handle);
- if (ret < 0)
- return ret;
-
- return uvc_v4l2_set_streamparm(stream, parm);
-}
-
static int uvc_ioctl_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
@@ -1543,15 +1469,17 @@ static unsigned long uvc_v4l2_get_unmapped_area(struct file *file,
#endif
const struct v4l2_ioctl_ops uvc_ioctl_ops = {
+ .vidioc_g_fmt_vid_cap = uvc_ioctl_g_fmt,
+ .vidioc_g_fmt_vid_out = uvc_ioctl_g_fmt,
+ .vidioc_s_fmt_vid_cap = uvc_ioctl_s_fmt,
+ .vidioc_s_fmt_vid_out = uvc_ioctl_s_fmt,
+ .vidioc_g_parm = uvc_ioctl_g_parm,
+ .vidioc_s_parm = uvc_ioctl_s_parm,
.vidioc_querycap = uvc_ioctl_querycap,
- .vidioc_enum_fmt_vid_cap = uvc_ioctl_enum_fmt_vid_cap,
- .vidioc_enum_fmt_vid_out = uvc_ioctl_enum_fmt_vid_out,
- .vidioc_g_fmt_vid_cap = uvc_ioctl_g_fmt_vid_cap,
- .vidioc_g_fmt_vid_out = uvc_ioctl_g_fmt_vid_out,
- .vidioc_s_fmt_vid_cap = uvc_ioctl_s_fmt_vid_cap,
- .vidioc_s_fmt_vid_out = uvc_ioctl_s_fmt_vid_out,
- .vidioc_try_fmt_vid_cap = uvc_ioctl_try_fmt_vid_cap,
- .vidioc_try_fmt_vid_out = uvc_ioctl_try_fmt_vid_out,
+ .vidioc_enum_fmt_vid_cap = uvc_ioctl_enum_fmt,
+ .vidioc_enum_fmt_vid_out = uvc_ioctl_enum_fmt,
+ .vidioc_try_fmt_vid_cap = uvc_ioctl_try_fmt,
+ .vidioc_try_fmt_vid_out = uvc_ioctl_try_fmt,
.vidioc_reqbufs = uvc_ioctl_reqbufs,
.vidioc_querybuf = uvc_ioctl_querybuf,
.vidioc_qbuf = uvc_ioctl_qbuf,
@@ -1570,8 +1498,6 @@ const struct v4l2_ioctl_ops uvc_ioctl_ops = {
.vidioc_try_ext_ctrls = uvc_ioctl_try_ext_ctrls,
.vidioc_querymenu = uvc_ioctl_querymenu,
.vidioc_g_selection = uvc_ioctl_g_selection,
- .vidioc_g_parm = uvc_ioctl_g_parm,
- .vidioc_s_parm = uvc_ioctl_s_parm,
.vidioc_enum_framesizes = uvc_ioctl_enum_framesizes,
.vidioc_enum_frameintervals = uvc_ioctl_enum_frameintervals,
.vidioc_subscribe_event = uvc_ioctl_subscribe_event,
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index e00f38dd07d9..e3567aeb0007 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -20,6 +20,7 @@
#include <linux/atomic.h>
#include <linux/unaligned.h>
+#include <media/jpeg.h>
#include <media/v4l2-common.h>
#include "uvcvideo.h"
@@ -79,6 +80,27 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
if (likely(ret == size))
return 0;
+ /*
+ * Some devices return shorter USB control packets than expected if the
+ * returned value can fit in less bytes. Zero all the bytes that the
+ * device has not written.
+ *
+ * This quirk is applied to all controls, regardless of their data type.
+ * Most controls are little-endian integers, in which case the missing
+ * bytes become 0 MSBs. For other data types, a different heuristic
+ * could be implemented if a device is found needing it.
+ *
+ * We exclude UVC_GET_INFO from the quirk. UVC_GET_LEN does not need
+ * to be excluded because its size is always 1.
+ */
+ if (ret > 0 && query != UVC_GET_INFO) {
+ memset(data + ret, 0, size - ret);
+ dev_warn_once(&dev->udev->dev,
+ "UVC non compliance: %s control %u on unit %u returned %d bytes when we expected %u.\n",
+ uvc_query_name(query), cs, unit, ret, size);
+ return 0;
+ }
+
if (ret != -EPIPE) {
dev_err(&dev->udev->dev,
"Failed to query (%s) UVC control %u on unit %u: %d (exp. %u).\n",
@@ -96,8 +118,12 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
error = *(u8 *)data;
*(u8 *)data = tmp;
- if (ret != 1)
+ if (ret != 1) {
+ dev_err_ratelimited(&dev->udev->dev,
+ "Failed to query (%s) UVC error code control %u on unit %u: %d (exp. 1).\n",
+ uvc_query_name(query), cs, unit, ret);
return ret < 0 ? ret : -EPIPE;
+ }
uvc_dbg(dev, CONTROL, "Control error %u\n", error);
@@ -297,8 +323,9 @@ static int uvc_get_video_ctrl(struct uvc_streaming *stream,
goto out;
} else if (ret != size) {
dev_err(&stream->intf->dev,
- "Failed to query (%u) UVC %s control : %d (exp. %u).\n",
- query, probe ? "probe" : "commit", ret, size);
+ "Failed to query (%s) UVC %s control : %d (exp. %u).\n",
+ uvc_query_name(query), probe ? "probe" : "commit",
+ ret, size);
ret = (ret == -EPROTO) ? -EPROTO : -EIO;
goto out;
}
@@ -1116,6 +1143,7 @@ static void uvc_video_stats_stop(struct uvc_streaming *stream)
static int uvc_video_decode_start(struct uvc_streaming *stream,
struct uvc_buffer *buf, const u8 *data, int len)
{
+ u8 header_len;
u8 fid;
/*
@@ -1129,6 +1157,7 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
return -EINVAL;
}
+ header_len = data[0];
fid = data[1] & UVC_STREAM_FID;
/*
@@ -1210,9 +1239,31 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
return -EAGAIN;
}
+ /*
+ * Some cameras, when running two parallel streams (one MJPEG alongside
+ * another non-MJPEG stream), are known to lose the EOF packet for a frame.
+ * We can detect the end of a frame by checking for a new SOI marker, as
+ * the SOI always lies on the packet boundary between two frames for
+ * these devices.
+ */
+ if (stream->dev->quirks & UVC_QUIRK_MJPEG_NO_EOF &&
+ (stream->cur_format->fcc == V4L2_PIX_FMT_MJPEG ||
+ stream->cur_format->fcc == V4L2_PIX_FMT_JPEG)) {
+ const u8 *packet = data + header_len;
+
+ if (len >= header_len + 2 &&
+ packet[0] == 0xff && packet[1] == JPEG_MARKER_SOI &&
+ buf->bytesused != 0) {
+ buf->state = UVC_BUF_STATE_READY;
+ buf->error = 1;
+ stream->last_fid ^= UVC_STREAM_FID;
+ return -EAGAIN;
+ }
+ }
+
stream->last_fid = fid;
- return data[0];
+ return header_len;
}
static inline enum dma_data_direction uvc_stream_dir(
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 07f9921d83f2..5e388f05f3fc 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -76,6 +76,7 @@
#define UVC_QUIRK_NO_RESET_RESUME 0x00004000
#define UVC_QUIRK_DISABLE_AUTOSUSPEND 0x00008000
#define UVC_QUIRK_INVALID_DEVICE_SOF 0x00010000
+#define UVC_QUIRK_MJPEG_NO_EOF 0x00020000
/* Format flags */
#define UVC_FMT_FLAG_COMPRESSED 0x00000001
@@ -234,6 +235,7 @@ struct uvc_entity {
u8 *bmControls;
struct gpio_desc *gpio_privacy;
int irq;
+ bool initialized;
} gpio;
};
@@ -316,7 +318,6 @@ struct uvc_buffer {
};
#define UVC_QUEUE_DISCONNECTED (1 << 0)
-#define UVC_QUEUE_DROP_CORRUPTED (1 << 1)
struct uvc_video_queue {
struct vb2_queue queue;
@@ -337,7 +338,11 @@ struct uvc_video_chain {
struct uvc_entity *processing; /* Processing unit */
struct uvc_entity *selector; /* Selector unit */
- struct mutex ctrl_mutex; /* Protects ctrl.info */
+ struct mutex ctrl_mutex; /*
+ * Protects ctrl.info,
+ * ctrl.handle and
+ * uvc_fh.pending_async_ctrls
+ */
struct v4l2_prio_state prio; /* V4L2 priority state */
u32 caps; /* V4L2 chain-wide caps */
@@ -612,6 +617,7 @@ struct uvc_fh {
struct uvc_video_chain *chain;
struct uvc_streaming *stream;
enum uvc_handle_state state;
+ unsigned int pending_async_ctrls;
};
struct uvc_driver {
@@ -674,8 +680,7 @@ extern struct uvc_driver uvc_driver;
struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id);
/* Video buffers queue management. */
-int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
- int drop_corrupted);
+int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type);
void uvc_queue_release(struct uvc_video_queue *queue);
int uvc_request_buffers(struct uvc_video_queue *queue,
struct v4l2_requestbuffers *rb);
@@ -797,6 +802,8 @@ int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
struct uvc_xu_control_query *xqry);
+void uvc_ctrl_cleanup_fh(struct uvc_fh *handle);
+
/* Utility functions */
struct usb_host_endpoint *uvc_find_endpoint(struct usb_host_interface *alts,
u8 epaddr);
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index f19c8adf2c61..cb153ce42c45 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -127,7 +127,7 @@ static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode,
{
struct v4l2_mbus_config_mipi_csi2 *bus = &vep->bus.mipi_csi2;
bool have_clk_lane = false, have_data_lanes = false,
- have_lane_polarities = false;
+ have_lane_polarities = false, have_line_orders = false;
unsigned int flags = 0, lanes_used = 0;
u32 array[1 + V4L2_MBUS_CSI2_MAX_DATA_LANES];
u32 clock_lane = 0;
@@ -197,6 +197,17 @@ static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode,
have_lane_polarities = true;
}
+ rval = fwnode_property_count_u32(fwnode, "line-orders");
+ if (rval > 0) {
+ if (rval != num_data_lanes) {
+ pr_warn("invalid number of line-orders entries (need %u, got %u)\n",
+ num_data_lanes, rval);
+ return -EINVAL;
+ }
+
+ have_line_orders = true;
+ }
+
if (!fwnode_property_read_u32(fwnode, "clock-lanes", &v)) {
clock_lane = v;
pr_debug("clock lane position %u\n", v);
@@ -250,6 +261,36 @@ static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode,
} else {
pr_debug("no lane polarities defined, assuming not inverted\n");
}
+
+ if (have_line_orders) {
+ fwnode_property_read_u32_array(fwnode,
+ "line-orders", array,
+ num_data_lanes);
+
+ for (i = 0; i < num_data_lanes; i++) {
+ static const char * const orders[] = {
+ "ABC", "ACB", "BAC", "BCA", "CAB", "CBA"
+ };
+
+ if (array[i] >= ARRAY_SIZE(orders)) {
+ pr_warn("lane %u invalid line-order assuming ABC (got %u)\n",
+ i, array[i]);
+ bus->line_orders[i] =
+ V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ABC;
+ continue;
+ }
+
+ bus->line_orders[i] = array[i];
+ pr_debug("lane %u line order %s", i,
+ orders[array[i]]);
+ }
+ } else {
+ for (i = 0; i < num_data_lanes; i++)
+ bus->line_orders[i] =
+ V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ABC;
+
+ pr_debug("no line orders defined, assuming ABC\n");
+ }
}
return 0;
diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c
index 4bb91359e3a9..937d358697e1 100644
--- a/drivers/media/v4l2-core/v4l2-mc.c
+++ b/drivers/media/v4l2-core/v4l2-mc.c
@@ -329,7 +329,7 @@ int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd,
if (!(sink->flags & MEDIA_PAD_FL_SINK))
return -EINVAL;
- fwnode_graph_for_each_endpoint(dev_fwnode(src_sd->dev), endpoint) {
+ fwnode_graph_for_each_endpoint(src_sd->fwnode, endpoint) {
struct fwnode_handle *remote_ep;
int src_idx, sink_idx, ret;
struct media_pad *src;
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 50eb9f49512b..e2a75a52563f 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -358,17 +358,6 @@ static unsigned int gpmc_ps_to_ticks(unsigned int time_ps)
return (time_ps + tick_ps - 1) / tick_ps;
}
-static unsigned int gpmc_clk_ticks_to_ns(unsigned int ticks, int cs,
- enum gpmc_clk_domain cd)
-{
- return ticks * gpmc_get_clk_period(cs, cd) / 1000;
-}
-
-unsigned int gpmc_ticks_to_ns(unsigned int ticks)
-{
- return gpmc_clk_ticks_to_ns(ticks, /* any CS */ 0, GPMC_CD_FCLK);
-}
-
static unsigned int gpmc_ticks_to_ps(unsigned int ticks)
{
return ticks * gpmc_get_fclk_period();
@@ -415,6 +404,13 @@ static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
}
#ifdef CONFIG_OMAP_GPMC_DEBUG
+
+static unsigned int gpmc_clk_ticks_to_ns(unsigned int ticks, int cs,
+ enum gpmc_clk_domain cd)
+{
+ return ticks * gpmc_get_clk_period(cs, cd) / 1000;
+}
+
/**
* get_gpmc_timing_reg - read a timing parameter and print DTS settings for it.
* @cs: Chip Select Region
@@ -1295,21 +1291,6 @@ int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
}
EXPORT_SYMBOL_GPL(gpmc_omap_onenand_set_timings);
-int gpmc_get_client_irq(unsigned int irq_config)
-{
- if (!gpmc_irq_domain) {
- pr_warn("%s called before GPMC IRQ domain available\n",
- __func__);
- return 0;
- }
-
- /* we restrict this to NAND IRQs only */
- if (irq_config >= GPMC_NR_NAND_IRQS)
- return 0;
-
- return irq_create_mapping(gpmc_irq_domain, irq_config);
-}
-
static int gpmc_irq_endis(unsigned long hwirq, bool endis)
{
u32 regval;
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index 7193f848d17e..9b7d30a21a5b 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -474,14 +474,15 @@ tegra_emc_find_node_by_ram_code(struct tegra_emc *emc)
ram_code = tegra_read_ram_code();
- for (np = of_find_node_by_name(dev->of_node, "emc-tables"); np;
- np = of_find_node_by_name(np, "emc-tables")) {
+ for_each_child_of_node(dev->of_node, np) {
+ if (!of_node_name_eq(np, "emc-tables"))
+ continue;
err = of_property_read_u32(np, "nvidia,ram-code", &value);
if (err || value != ram_code) {
struct device_node *lpddr2_np;
bool cfg_mismatches = false;
- lpddr2_np = of_find_node_by_name(np, "lpddr2");
+ lpddr2_np = of_get_child_by_name(np, "lpddr2");
if (lpddr2_np) {
const struct lpddr2_info *info;
@@ -518,7 +519,6 @@ tegra_emc_find_node_by_ram_code(struct tegra_emc *emc)
}
if (cfg_mismatches) {
- of_node_put(np);
continue;
}
}
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
index d54dc3cfff73..c8b83c9edbd5 100644
--- a/drivers/memory/ti-aemif.c
+++ b/drivers/memory/ti-aemif.c
@@ -13,7 +13,9 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/memory/ti-aemif.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
@@ -69,39 +71,27 @@
#define ACR_SSTROBE_MASK BIT(31)
#define ASIZE_16BIT 1
-#define CONFIG_MASK (TA(TA_MAX) | \
- RHOLD(RHOLD_MAX) | \
- RSTROBE(RSTROBE_MAX) | \
- RSETUP(RSETUP_MAX) | \
- WHOLD(WHOLD_MAX) | \
- WSTROBE(WSTROBE_MAX) | \
- WSETUP(WSETUP_MAX) | \
- EW(EW_MAX) | SSTROBE(SSTROBE_MAX) | \
- ASIZE_MAX)
+#define TIMINGS_MASK (TA(TA_MAX) | \
+ RHOLD(RHOLD_MAX) | \
+ RSTROBE(RSTROBE_MAX) | \
+ RSETUP(RSETUP_MAX) | \
+ WHOLD(WHOLD_MAX) | \
+ WSTROBE(WSTROBE_MAX) | \
+ WSETUP(WSETUP_MAX))
+
+#define CONFIG_MASK (EW(EW_MAX) | SSTROBE(SSTROBE_MAX) | ASIZE_MAX)
/**
- * struct aemif_cs_data: structure to hold cs parameters
+ * struct aemif_cs_data: structure to hold CS parameters
+ * @timings: timings configuration
* @cs: chip-select number
- * @wstrobe: write strobe width, ns
- * @rstrobe: read strobe width, ns
- * @wsetup: write setup width, ns
- * @whold: write hold width, ns
- * @rsetup: read setup width, ns
- * @rhold: read hold width, ns
- * @ta: minimum turn around time, ns
* @enable_ss: enable/disable select strobe mode
* @enable_ew: enable/disable extended wait mode
* @asize: width of the asynchronous device's data bus
*/
struct aemif_cs_data {
+ struct aemif_cs_timings timings;
u8 cs;
- u16 wstrobe;
- u16 rstrobe;
- u8 wsetup;
- u8 whold;
- u8 rsetup;
- u8 rhold;
- u8 ta;
u8 enable_ss;
u8 enable_ew;
u8 asize;
@@ -115,6 +105,7 @@ struct aemif_cs_data {
* @num_cs: number of assigned chip-selects
* @cs_offset: start number of cs nodes
* @cs_data: array of chip-select settings
+ * @config_cs_lock: lock used to access CS configuration
*/
struct aemif_device {
void __iomem *base;
@@ -123,20 +114,94 @@ struct aemif_device {
u8 num_cs;
int cs_offset;
struct aemif_cs_data cs_data[NUM_CS];
+ struct mutex config_cs_lock;
};
/**
+ * aemif_check_cs_timings() - Check the validity of a CS timing configuration.
+ * @timings: timings configuration
+ *
+ * @return: 0 if the timing configuration is valid, negative error number otherwise.
+ */
+int aemif_check_cs_timings(struct aemif_cs_timings *timings)
+{
+ if (timings->ta > TA_MAX)
+ return -EINVAL;
+
+ if (timings->rhold > RHOLD_MAX)
+ return -EINVAL;
+
+ if (timings->rstrobe > RSTROBE_MAX)
+ return -EINVAL;
+
+ if (timings->rsetup > RSETUP_MAX)
+ return -EINVAL;
+
+ if (timings->whold > WHOLD_MAX)
+ return -EINVAL;
+
+ if (timings->wstrobe > WSTROBE_MAX)
+ return -EINVAL;
+
+ if (timings->wsetup > WSETUP_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(aemif_check_cs_timings);
+
+/**
+ * aemif_set_cs_timings() - Set the timing configuration of a given chip select.
+ * @aemif: aemif device to configure
+ * @cs: index of the chip select to configure
+ * @timings: timings configuration to set
+ *
+ * @return: 0 on success, else negative errno.
+ */
+int aemif_set_cs_timings(struct aemif_device *aemif, u8 cs,
+ struct aemif_cs_timings *timings)
+{
+ unsigned int offset;
+ u32 val, set;
+ int ret;
+
+ if (!timings || !aemif)
+ return -EINVAL;
+
+ if (cs > aemif->num_cs)
+ return -EINVAL;
+
+ ret = aemif_check_cs_timings(timings);
+ if (ret)
+ return ret;
+
+ set = TA(timings->ta) | RHOLD(timings->rhold) | RSTROBE(timings->rstrobe) |
+ RSETUP(timings->rsetup) | WHOLD(timings->whold) |
+ WSTROBE(timings->wstrobe) | WSETUP(timings->wsetup);
+
+ offset = A1CR_OFFSET + cs * 4;
+
+ mutex_lock(&aemif->config_cs_lock);
+ val = readl(aemif->base + offset);
+ val &= ~TIMINGS_MASK;
+ val |= set;
+ writel(val, aemif->base + offset);
+ mutex_unlock(&aemif->config_cs_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(aemif_set_cs_timings);
+
+/**
* aemif_calc_rate - calculate timing data.
* @pdev: platform device to calculate for
* @wanted: The cycle time needed in nanoseconds.
* @clk: The input clock rate in kHz.
- * @max: The maximum divider value that can be programmed.
*
- * On success, returns the calculated timing value minus 1 for easy
- * programming into AEMIF timing registers, else negative errno.
+ * @return: the calculated timing value minus 1 for easy
+ * programming into AEMIF timing registers.
*/
-static int aemif_calc_rate(struct platform_device *pdev, int wanted,
- unsigned long clk, int max)
+static u32 aemif_calc_rate(struct platform_device *pdev, int wanted, unsigned long clk)
{
int result;
@@ -149,10 +214,6 @@ static int aemif_calc_rate(struct platform_device *pdev, int wanted,
if (result < 0)
result = 0;
- /* ... But configuring tighter timings is not an option. */
- else if (result > max)
- result = -EINVAL;
-
return result;
}
@@ -174,48 +235,25 @@ static int aemif_config_abus(struct platform_device *pdev, int csnum)
{
struct aemif_device *aemif = platform_get_drvdata(pdev);
struct aemif_cs_data *data = &aemif->cs_data[csnum];
- int ta, rhold, rstrobe, rsetup, whold, wstrobe, wsetup;
- unsigned long clk_rate = aemif->clk_rate;
unsigned offset;
u32 set, val;
offset = A1CR_OFFSET + (data->cs - aemif->cs_offset) * 4;
- ta = aemif_calc_rate(pdev, data->ta, clk_rate, TA_MAX);
- rhold = aemif_calc_rate(pdev, data->rhold, clk_rate, RHOLD_MAX);
- rstrobe = aemif_calc_rate(pdev, data->rstrobe, clk_rate, RSTROBE_MAX);
- rsetup = aemif_calc_rate(pdev, data->rsetup, clk_rate, RSETUP_MAX);
- whold = aemif_calc_rate(pdev, data->whold, clk_rate, WHOLD_MAX);
- wstrobe = aemif_calc_rate(pdev, data->wstrobe, clk_rate, WSTROBE_MAX);
- wsetup = aemif_calc_rate(pdev, data->wsetup, clk_rate, WSETUP_MAX);
-
- if (ta < 0 || rhold < 0 || rstrobe < 0 || rsetup < 0 ||
- whold < 0 || wstrobe < 0 || wsetup < 0) {
- dev_err(&pdev->dev, "%s: cannot get suitable timings\n",
- __func__);
- return -EINVAL;
- }
-
- set = TA(ta) | RHOLD(rhold) | RSTROBE(rstrobe) | RSETUP(rsetup) |
- WHOLD(whold) | WSTROBE(wstrobe) | WSETUP(wsetup);
-
- set |= (data->asize & ACR_ASIZE_MASK);
+ set = (data->asize & ACR_ASIZE_MASK);
if (data->enable_ew)
set |= ACR_EW_MASK;
if (data->enable_ss)
set |= ACR_SSTROBE_MASK;
+ mutex_lock(&aemif->config_cs_lock);
val = readl(aemif->base + offset);
val &= ~CONFIG_MASK;
val |= set;
writel(val, aemif->base + offset);
+ mutex_unlock(&aemif->config_cs_lock);
- return 0;
-}
-
-static inline int aemif_cycles_to_nsec(int val, unsigned long clk_rate)
-{
- return ((val + 1) * NSEC_PER_MSEC) / clk_rate;
+ return aemif_set_cs_timings(aemif, data->cs - aemif->cs_offset, &data->timings);
}
/**
@@ -231,19 +269,18 @@ static void aemif_get_hw_params(struct platform_device *pdev, int csnum)
{
struct aemif_device *aemif = platform_get_drvdata(pdev);
struct aemif_cs_data *data = &aemif->cs_data[csnum];
- unsigned long clk_rate = aemif->clk_rate;
u32 val, offset;
offset = A1CR_OFFSET + (data->cs - aemif->cs_offset) * 4;
val = readl(aemif->base + offset);
- data->ta = aemif_cycles_to_nsec(TA_VAL(val), clk_rate);
- data->rhold = aemif_cycles_to_nsec(RHOLD_VAL(val), clk_rate);
- data->rstrobe = aemif_cycles_to_nsec(RSTROBE_VAL(val), clk_rate);
- data->rsetup = aemif_cycles_to_nsec(RSETUP_VAL(val), clk_rate);
- data->whold = aemif_cycles_to_nsec(WHOLD_VAL(val), clk_rate);
- data->wstrobe = aemif_cycles_to_nsec(WSTROBE_VAL(val), clk_rate);
- data->wsetup = aemif_cycles_to_nsec(WSETUP_VAL(val), clk_rate);
+ data->timings.ta = TA_VAL(val);
+ data->timings.rhold = RHOLD_VAL(val);
+ data->timings.rstrobe = RSTROBE_VAL(val);
+ data->timings.rsetup = RSETUP_VAL(val);
+ data->timings.whold = WHOLD_VAL(val);
+ data->timings.wstrobe = WSTROBE_VAL(val);
+ data->timings.wsetup = WSETUP_VAL(val);
data->enable_ew = EW_VAL(val);
data->enable_ss = SSTROBE_VAL(val);
data->asize = val & ASIZE_MAX;
@@ -261,6 +298,7 @@ static int of_aemif_parse_abus_config(struct platform_device *pdev,
struct device_node *np)
{
struct aemif_device *aemif = platform_get_drvdata(pdev);
+ unsigned long clk_rate = aemif->clk_rate;
struct aemif_cs_data *data;
u32 cs;
u32 val;
@@ -288,32 +326,33 @@ static int of_aemif_parse_abus_config(struct platform_device *pdev,
/* override the values from device node */
if (!of_property_read_u32(np, "ti,cs-min-turnaround-ns", &val))
- data->ta = val;
+ data->timings.ta = aemif_calc_rate(pdev, val, clk_rate);
if (!of_property_read_u32(np, "ti,cs-read-hold-ns", &val))
- data->rhold = val;
+ data->timings.rhold = aemif_calc_rate(pdev, val, clk_rate);
if (!of_property_read_u32(np, "ti,cs-read-strobe-ns", &val))
- data->rstrobe = val;
+ data->timings.rstrobe = aemif_calc_rate(pdev, val, clk_rate);
if (!of_property_read_u32(np, "ti,cs-read-setup-ns", &val))
- data->rsetup = val;
+ data->timings.rsetup = aemif_calc_rate(pdev, val, clk_rate);
if (!of_property_read_u32(np, "ti,cs-write-hold-ns", &val))
- data->whold = val;
+ data->timings.whold = aemif_calc_rate(pdev, val, clk_rate);
if (!of_property_read_u32(np, "ti,cs-write-strobe-ns", &val))
- data->wstrobe = val;
+ data->timings.wstrobe = aemif_calc_rate(pdev, val, clk_rate);
if (!of_property_read_u32(np, "ti,cs-write-setup-ns", &val))
- data->wsetup = val;
+ data->timings.wsetup = aemif_calc_rate(pdev, val, clk_rate);
if (!of_property_read_u32(np, "ti,cs-bus-width", &val))
if (val == 16)
data->asize = 1;
data->enable_ew = of_property_read_bool(np, "ti,cs-extended-wait-mode");
data->enable_ss = of_property_read_bool(np, "ti,cs-select-strobe-mode");
- return 0;
+
+ return aemif_check_cs_timings(&data->timings);
}
static const struct of_device_id aemif_of_match[] = {
@@ -351,6 +390,7 @@ static int aemif_probe(struct platform_device *pdev)
if (IS_ERR(aemif->base))
return PTR_ERR(aemif->base);
+ mutex_init(&aemif->config_cs_lock);
if (np) {
/*
* For every controller device node, there is a cs device node
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index ae4e8b8e6eb7..043b9ec756ff 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -200,7 +200,7 @@ static int memstick_dummy_check(struct memstick_dev *card)
/**
* memstick_detect_change - schedule media detection on memstick host
- * @host - host to use
+ * @host: host to use
*/
void memstick_detect_change(struct memstick_host *host)
{
@@ -210,13 +210,15 @@ EXPORT_SYMBOL(memstick_detect_change);
/**
* memstick_next_req - called by host driver to obtain next request to process
- * @host - host to use
- * @mrq - pointer to stick the request to
+ * @host: host to use
+ * @mrq: pointer to stick the request to
*
* Host calls this function from idle state (*mrq == NULL) or after finishing
* previous request (*mrq should point to it). If previous request was
- * unsuccessful, it is retried for predetermined number of times. Return value
- * of 0 means that new request was assigned to the host.
+ * unsuccessful, it is retried for predetermined number of times.
+ *
+ * Returns: value of 0 means that new request was assigned to the host.
+ * Otherwise a negative error code is returned.
*/
int memstick_next_req(struct memstick_host *host, struct memstick_request **mrq)
{
@@ -242,7 +244,7 @@ EXPORT_SYMBOL(memstick_next_req);
/**
* memstick_new_req - notify the host that some requests are pending
- * @host - host to use
+ * @host: host to use
*/
void memstick_new_req(struct memstick_host *host)
{
@@ -256,9 +258,9 @@ EXPORT_SYMBOL(memstick_new_req);
/**
* memstick_init_req_sg - set request fields needed for bulk data transfer
- * @mrq - request to use
- * @tpc - memstick Transport Protocol Command
- * @sg - TPC argument
+ * @mrq: request to use
+ * @tpc: memstick Transport Protocol Command
+ * @sg: TPC argument
*/
void memstick_init_req_sg(struct memstick_request *mrq, unsigned char tpc,
const struct scatterlist *sg)
@@ -281,10 +283,10 @@ EXPORT_SYMBOL(memstick_init_req_sg);
/**
* memstick_init_req - set request fields needed for short data transfer
- * @mrq - request to use
- * @tpc - memstick Transport Protocol Command
- * @buf - TPC argument buffer
- * @length - TPC argument size
+ * @mrq: request to use
+ * @tpc: memstick Transport Protocol Command
+ * @buf: TPC argument buffer
+ * @length: TPC argument size
*
* The intended use of this function (transfer of data items several bytes
* in size) allows us to just copy the value between request structure and
@@ -360,7 +362,9 @@ static int h_memstick_set_rw_addr(struct memstick_dev *card,
/**
* memstick_set_rw_addr - issue SET_RW_REG_ADDR request and wait for it to
* complete
- * @card - media device to use
+ * @card: media device to use
+ *
+ * Returns: error setting for the current request
*/
int memstick_set_rw_addr(struct memstick_dev *card)
{
@@ -487,6 +491,8 @@ out_power_off:
* memstick_alloc_host - allocate a memstick_host structure
* @extra: size of the user private data to allocate
* @dev: parent device of the host
+ *
+ * Returns: %NULL on failure or the allocated &memstick_host pointer on success
*/
struct memstick_host *memstick_alloc_host(unsigned int extra,
struct device *dev)
@@ -507,7 +513,9 @@ EXPORT_SYMBOL(memstick_alloc_host);
/**
* memstick_add_host - start request processing on memstick host
- * @host - host to use
+ * @host: host to use
+ *
+ * Returns: %0 on success or a negative error code on failure
*/
int memstick_add_host(struct memstick_host *host)
{
@@ -543,7 +551,7 @@ EXPORT_SYMBOL(memstick_add_host);
/**
* memstick_remove_host - stop request processing on memstick host
- * @host - host to use
+ * @host: host to use
*/
void memstick_remove_host(struct memstick_host *host)
{
@@ -565,7 +573,7 @@ EXPORT_SYMBOL(memstick_remove_host);
/**
* memstick_free_host - free memstick host
- * @host - host to use
+ * @host: host to use
*/
void memstick_free_host(struct memstick_host *host)
{
@@ -576,7 +584,7 @@ EXPORT_SYMBOL(memstick_free_host);
/**
* memstick_suspend_host - notify bus driver of host suspension
- * @host - host to use
+ * @host: host to use
*/
void memstick_suspend_host(struct memstick_host *host)
{
@@ -588,7 +596,7 @@ EXPORT_SYMBOL(memstick_suspend_host);
/**
* memstick_resume_host - notify bus driver of host resumption
- * @host - host to use
+ * @host: host to use
*/
void memstick_resume_host(struct memstick_host *host)
{
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index ee61b70aa677..8f587c0efd9d 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -96,7 +96,7 @@ static u8 mptfcTaskCtx = MPT_MAX_PROTOCOL_DRIVERS;
static u8 mptfcInternalCtx = MPT_MAX_PROTOCOL_DRIVERS;
static int mptfc_target_alloc(struct scsi_target *starget);
-static int mptfc_slave_alloc(struct scsi_device *sdev);
+static int mptfc_sdev_init(struct scsi_device *sdev);
static int mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt);
static void mptfc_target_destroy(struct scsi_target *starget);
static void mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout);
@@ -113,10 +113,10 @@ static const struct scsi_host_template mptfc_driver_template = {
.info = mptscsih_info,
.queuecommand = mptfc_qcmd,
.target_alloc = mptfc_target_alloc,
- .slave_alloc = mptfc_slave_alloc,
- .slave_configure = mptscsih_slave_configure,
+ .sdev_init = mptfc_sdev_init,
+ .sdev_configure = mptscsih_sdev_configure,
.target_destroy = mptfc_target_destroy,
- .slave_destroy = mptscsih_slave_destroy,
+ .sdev_destroy = mptscsih_sdev_destroy,
.change_queue_depth = mptscsih_change_queue_depth,
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = mptfc_abort,
@@ -503,7 +503,7 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
/*
* if already mapped, remap here. If not mapped,
* target_alloc will allocate vtarget and map,
- * slave_alloc will fill in vdevice from vtarget.
+ * sdev_init will fill in vdevice from vtarget.
*/
if (ri->starget) {
vtarget = ri->starget->hostdata;
@@ -631,7 +631,7 @@ mptfc_dump_lun_info(MPT_ADAPTER *ioc, struct fc_rport *rport, struct scsi_device
* Init memory once per LUN.
*/
static int
-mptfc_slave_alloc(struct scsi_device *sdev)
+mptfc_sdev_init(struct scsi_device *sdev)
{
MPT_SCSI_HOST *hd;
VirtTarget *vtarget;
@@ -651,7 +651,7 @@ mptfc_slave_alloc(struct scsi_device *sdev)
vdevice = kzalloc(sizeof(VirtDevice), GFP_KERNEL);
if (!vdevice) {
- printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n",
+ printk(MYIOC_s_ERR_FMT "sdev_init kmalloc(%zd) FAILED!\n",
ioc->name, sizeof(VirtDevice));
return -ENOMEM;
}
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index d0549a4daf76..7e79da9684ed 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1710,7 +1710,7 @@ mptsas_firmware_event_work(struct work_struct *work)
static int
-mptsas_slave_configure(struct scsi_device *sdev)
+mptsas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct Scsi_Host *host = sdev->host;
MPT_SCSI_HOST *hd = shost_priv(host);
@@ -1736,7 +1736,7 @@ mptsas_slave_configure(struct scsi_device *sdev)
mptsas_add_device_component_starget(ioc, scsi_target(sdev));
out:
- return mptscsih_slave_configure(sdev);
+ return mptscsih_sdev_configure(sdev, lim);
}
static int
@@ -1867,7 +1867,7 @@ mptsas_target_destroy(struct scsi_target *starget)
static int
-mptsas_slave_alloc(struct scsi_device *sdev)
+mptsas_sdev_init(struct scsi_device *sdev)
{
struct Scsi_Host *host = sdev->host;
MPT_SCSI_HOST *hd = shost_priv(host);
@@ -1880,7 +1880,7 @@ mptsas_slave_alloc(struct scsi_device *sdev)
vdevice = kzalloc(sizeof(VirtDevice), GFP_KERNEL);
if (!vdevice) {
- printk(MYIOC_s_ERR_FMT "slave_alloc kzalloc(%zd) FAILED!\n",
+ printk(MYIOC_s_ERR_FMT "sdev_init kzalloc(%zd) FAILED!\n",
ioc->name, sizeof(VirtDevice));
return -ENOMEM;
}
@@ -2005,10 +2005,10 @@ static const struct scsi_host_template mptsas_driver_template = {
.info = mptscsih_info,
.queuecommand = mptsas_qcmd,
.target_alloc = mptsas_target_alloc,
- .slave_alloc = mptsas_slave_alloc,
- .slave_configure = mptsas_slave_configure,
+ .sdev_init = mptsas_sdev_init,
+ .sdev_configure = mptsas_sdev_configure,
.target_destroy = mptsas_target_destroy,
- .slave_destroy = mptscsih_slave_destroy,
+ .sdev_destroy = mptscsih_sdev_destroy,
.change_queue_depth = mptscsih_change_queue_depth,
.eh_timed_out = mptsas_eh_timed_out,
.eh_abort_handler = mptscsih_abort,
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 6c3f25cc33ff..a9604ba3c805 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1071,7 +1071,7 @@ EXPORT_SYMBOL(mptscsih_flush_running_cmds);
*
* Returns: None.
*
- * Called from slave_destroy.
+ * Called from sdev_destroy.
*/
static void
mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
@@ -2331,7 +2331,7 @@ EXPORT_SYMBOL(mptscsih_raid_id_to_num);
* Called if no device present or device being unloaded
*/
void
-mptscsih_slave_destroy(struct scsi_device *sdev)
+mptscsih_sdev_destroy(struct scsi_device *sdev)
{
struct Scsi_Host *host = sdev->host;
MPT_SCSI_HOST *hd = shost_priv(host);
@@ -2399,7 +2399,7 @@ mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
* Return non-zero if fails.
*/
int
-mptscsih_slave_configure(struct scsi_device *sdev)
+mptscsih_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct Scsi_Host *sh = sdev->host;
VirtTarget *vtarget;
@@ -3302,8 +3302,8 @@ EXPORT_SYMBOL(mptscsih_resume);
EXPORT_SYMBOL(mptscsih_show_info);
EXPORT_SYMBOL(mptscsih_info);
EXPORT_SYMBOL(mptscsih_qcmd);
-EXPORT_SYMBOL(mptscsih_slave_destroy);
-EXPORT_SYMBOL(mptscsih_slave_configure);
+EXPORT_SYMBOL(mptscsih_sdev_destroy);
+EXPORT_SYMBOL(mptscsih_sdev_configure);
EXPORT_SYMBOL(mptscsih_abort);
EXPORT_SYMBOL(mptscsih_dev_reset);
EXPORT_SYMBOL(mptscsih_target_reset);
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index e3d92c392673..ece451c575e1 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -116,8 +116,9 @@ extern const char * mptscsih_info(struct Scsi_Host *SChost);
extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt);
extern int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel,
u8 id, u64 lun, int ctx2abort, ulong timeout);
-extern void mptscsih_slave_destroy(struct scsi_device *device);
-extern int mptscsih_slave_configure(struct scsi_device *device);
+extern void mptscsih_sdev_destroy(struct scsi_device *device);
+extern int mptscsih_sdev_configure(struct scsi_device *device,
+ struct queue_limits *lim);
extern int mptscsih_abort(struct scsi_cmnd * SCpnt);
extern int mptscsih_dev_reset(struct scsi_cmnd * SCpnt);
extern int mptscsih_target_reset(struct scsi_cmnd * SCpnt);
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 4184d0c70ac3..a3901fbfac4f 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -713,7 +713,7 @@ static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd,
mptspi_read_parameters(sdev->sdev_target);
}
-static int mptspi_slave_alloc(struct scsi_device *sdev)
+static int mptspi_sdev_init(struct scsi_device *sdev)
{
MPT_SCSI_HOST *hd = shost_priv(sdev->host);
VirtTarget *vtarget;
@@ -727,7 +727,7 @@ static int mptspi_slave_alloc(struct scsi_device *sdev)
vdevice = kzalloc(sizeof(VirtDevice), GFP_KERNEL);
if (!vdevice) {
- printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n",
+ printk(MYIOC_s_ERR_FMT "sdev_init kmalloc(%zd) FAILED!\n",
ioc->name, sizeof(VirtDevice));
return -ENOMEM;
}
@@ -746,7 +746,8 @@ static int mptspi_slave_alloc(struct scsi_device *sdev)
return 0;
}
-static int mptspi_slave_configure(struct scsi_device *sdev)
+static int mptspi_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct _MPT_SCSI_HOST *hd = shost_priv(sdev->host);
VirtTarget *vtarget = scsi_target(sdev)->hostdata;
@@ -754,7 +755,7 @@ static int mptspi_slave_configure(struct scsi_device *sdev)
mptspi_initTarget(hd, vtarget, sdev);
- ret = mptscsih_slave_configure(sdev);
+ ret = mptscsih_sdev_configure(sdev, lim);
if (ret)
return ret;
@@ -799,7 +800,7 @@ mptspi_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt)
return mptscsih_qcmd(SCpnt);
}
-static void mptspi_slave_destroy(struct scsi_device *sdev)
+static void mptspi_sdev_destroy(struct scsi_device *sdev)
{
struct scsi_target *starget = scsi_target(sdev);
VirtTarget *vtarget = starget->hostdata;
@@ -817,7 +818,7 @@ static void mptspi_slave_destroy(struct scsi_device *sdev)
mptspi_write_spi_device_pg1(starget, &pg1);
}
- mptscsih_slave_destroy(sdev);
+ mptscsih_sdev_destroy(sdev);
}
static const struct scsi_host_template mptspi_driver_template = {
@@ -828,10 +829,10 @@ static const struct scsi_host_template mptspi_driver_template = {
.info = mptscsih_info,
.queuecommand = mptspi_qcmd,
.target_alloc = mptspi_target_alloc,
- .slave_alloc = mptspi_slave_alloc,
- .slave_configure = mptspi_slave_configure,
+ .sdev_init = mptspi_sdev_init,
+ .sdev_configure = mptspi_sdev_configure,
.target_destroy = mptspi_target_destroy,
- .slave_destroy = mptspi_slave_destroy,
+ .sdev_destroy = mptspi_sdev_destroy,
.change_queue_depth = mptscsih_change_queue_depth,
.eh_abort_handler = mptscsih_abort,
.eh_device_reset_handler = mptscsih_dev_reset,
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 09cbe3f0ab1e..56bc72c7ce4a 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -517,7 +517,6 @@ config OPEN_DICE
config NTSYNC
tristate "NT synchronization primitive emulation"
- depends on BROKEN
help
This module provides kernel support for emulation of Windows NT
synchronization primitives. It is not a hardware driver.
@@ -613,8 +612,7 @@ config MARVELL_CN10K_DPI
config MCHP_LAN966X_PCI
tristate "Microchip LAN966x PCIe Support"
depends on PCI
- select OF
- select OF_OVERLAY
+ depends on OF_OVERLAY
select IRQ_DOMAIN
help
This enables the support for the LAN966x PCIe device.
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 40bf953185c7..545aad06d088 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o
obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
+obj-$(CONFIG_TEST_MISC_MINOR) += misc_minor_kunit.o
obj-$(CONFIG_SGI_XP) += sgi-xp/
obj-$(CONFIG_SGI_GRU) += sgi-gru/
obj-$(CONFIG_SMPRO_ERRMON) += smpro-errmon.o
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index 2bb1dd2511f9..fc64474b8241 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -714,7 +714,7 @@ static ssize_t __c2port_read_flash_data(struct c2port_device *dev,
}
static ssize_t c2port_read_flash_data(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buffer, loff_t offset, size_t count)
{
struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj));
@@ -829,7 +829,7 @@ static ssize_t __c2port_write_flash_data(struct c2port_device *dev,
}
static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buffer, loff_t offset, size_t count)
{
struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj));
@@ -849,8 +849,8 @@ static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj,
return ret;
}
/* size is computed at run-time */
-static BIN_ATTR(flash_data, 0644, c2port_read_flash_data,
- c2port_write_flash_data, 0);
+static const BIN_ATTR(flash_data, 0644, c2port_read_flash_data,
+ c2port_write_flash_data, 0);
/*
* Class attributes
@@ -869,14 +869,27 @@ static struct attribute *c2port_attrs[] = {
NULL,
};
-static struct bin_attribute *c2port_bin_attrs[] = {
+static const struct bin_attribute *const c2port_bin_attrs[] = {
&bin_attr_flash_data,
NULL,
};
+static size_t c2port_bin_attr_size(struct kobject *kobj,
+ const struct bin_attribute *attr,
+ int i)
+{
+ struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj));
+
+ if (attr == &bin_attr_flash_data)
+ return c2dev->ops->blocks_num * c2dev->ops->block_size;
+
+ return attr->size;
+}
+
static const struct attribute_group c2port_group = {
.attrs = c2port_attrs,
- .bin_attrs = c2port_bin_attrs,
+ .bin_attrs_new = c2port_bin_attrs,
+ .bin_size = c2port_bin_attr_size,
};
static const struct attribute_group *c2port_groups[] = {
@@ -912,8 +925,7 @@ struct c2port_device *c2port_device_register(char *name,
if (ret < 0)
goto error_idr_alloc;
c2dev->id = ret;
-
- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
+ c2dev->ops = ops;
c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
"c2port%d", c2dev->id);
@@ -924,7 +936,6 @@ struct c2port_device *c2port_device_register(char *name,
dev_set_drvdata(c2dev->dev, c2dev);
strscpy(c2dev->name, name, sizeof(c2dev->name));
- c2dev->ops = ops;
mutex_init(&c2dev->mutex);
/* By default C2 port access is off */
diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c
index 77b0490a1b38..e0174da5e9fc 100644
--- a/drivers/misc/cardreader/rtsx_usb.c
+++ b/drivers/misc/cardreader/rtsx_usb.c
@@ -286,6 +286,7 @@ static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status)
int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
{
int ret;
+ u8 interrupt_val = 0;
u16 *buf;
if (!status)
@@ -308,6 +309,20 @@ int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
ret = rtsx_usb_get_status_with_bulk(ucr, status);
}
+ rtsx_usb_read_register(ucr, CARD_INT_PEND, &interrupt_val);
+ /* Cross check presence with interrupts */
+ if (*status & XD_CD)
+ if (!(interrupt_val & XD_INT))
+ *status &= ~XD_CD;
+
+ if (*status & SD_CD)
+ if (!(interrupt_val & SD_INT))
+ *status &= ~SD_CD;
+
+ if (*status & MS_CD)
+ if (!(interrupt_val & MS_INT))
+ *status &= ~MS_CD;
+
/* usb_control_msg may return positive when success */
if (ret < 0)
return ret;
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index 409bd1c39663..b1fc6446bd4b 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -444,7 +444,7 @@ static ssize_t api_version_compatible_show(struct device *device,
}
static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj));
@@ -538,7 +538,7 @@ static ssize_t class_show(struct kobject *kobj,
}
static ssize_t afu_read_config(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct afu_config_record *cr = to_cr(kobj);
@@ -620,7 +620,7 @@ static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int c
cr->config_attr.attr.name = "config";
cr->config_attr.attr.mode = S_IRUSR;
cr->config_attr.size = afu->crs_len;
- cr->config_attr.read = afu_read_config;
+ cr->config_attr.read_new = afu_read_config;
rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type,
&afu->dev.kobj, "cr%i", cr->cr);
@@ -693,7 +693,7 @@ int cxl_sysfs_afu_add(struct cxl_afu *afu)
afu->attr_eb.attr.name = "afu_err_buff";
afu->attr_eb.attr.mode = S_IRUGO;
afu->attr_eb.size = afu->eb_len;
- afu->attr_eb.read = afu_eb_read;
+ afu->attr_eb.read_new = afu_eb_read;
rc = device_create_bin_file(&afu->dev, &afu->attr_eb);
if (rc) {
diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c
index 4175df7ef011..5d5a70a62e98 100644
--- a/drivers/misc/ds1682.c
+++ b/drivers/misc/ds1682.c
@@ -154,7 +154,7 @@ static const struct attribute_group ds1682_group = {
* User data attribute
*/
static ssize_t ds1682_eeprom_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client = kobj_to_i2c_client(kobj);
@@ -172,7 +172,7 @@ static ssize_t ds1682_eeprom_read(struct file *filp, struct kobject *kobj,
}
static ssize_t ds1682_eeprom_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client = kobj_to_i2c_client(kobj);
@@ -194,8 +194,8 @@ static const struct bin_attribute ds1682_eeprom_attr = {
.mode = S_IRUGO | S_IWUSR,
},
.size = DS1682_EEPROM_SIZE,
- .read = ds1682_eeprom_read,
- .write = ds1682_eeprom_write,
+ .read_new = ds1682_eeprom_read,
+ .write_new = ds1682_eeprom_write,
};
static int ds1682_nvmem_read(void *priv, unsigned int offset, void *val,
diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
index 43421fe37d33..1fc632ebf22f 100644
--- a/drivers/misc/eeprom/idt_89hpesx.c
+++ b/drivers/misc/eeprom/idt_89hpesx.c
@@ -847,7 +847,7 @@ err_mutex_unlock:
* @count: Number of bytes to write
*/
static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct idt_89hpesx_dev *pdev;
@@ -871,7 +871,7 @@ static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
* @count: Number of bytes to write
*/
static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct idt_89hpesx_dev *pdev;
@@ -1017,7 +1017,7 @@ static ssize_t idt_dbgfs_csr_read(struct file *filep, char __user *ubuf,
* NOTE Size will be changed in compliance with OF node. EEPROM attribute will
* be read-only as well if the corresponding flag is specified in OF node.
*/
-static BIN_ATTR_RW(eeprom, EEPROM_DEF_SIZE);
+static const BIN_ATTR_RW(eeprom, EEPROM_DEF_SIZE);
/*
* csr_dbgfs_ops - CSR debugfs-node read/write operations
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index 6fab2ffa736b..1c36ad153e78 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -104,7 +104,7 @@ exit_up:
}
static ssize_t max6875_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client = kobj_to_i2c_client(kobj);
@@ -127,7 +127,7 @@ static const struct bin_attribute user_eeprom_attr = {
.mode = S_IRUGO,
},
.size = USER_EEPROM_SIZE,
- .read = max6875_read,
+ .read_new = max6875_read,
};
static int max6875_probe(struct i2c_client *client)
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 48d08eeb2d20..7b7a22c91fe4 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -139,14 +139,14 @@ struct fastrpc_mmap_rsp_msg {
};
struct fastrpc_mmap_req_msg {
- s32 pgid;
+ s32 client_id;
u32 flags;
u64 vaddr;
s32 num;
};
struct fastrpc_mem_map_req_msg {
- s32 pgid;
+ s32 client_id;
s32 fd;
s32 offset;
u32 flags;
@@ -156,20 +156,20 @@ struct fastrpc_mem_map_req_msg {
};
struct fastrpc_munmap_req_msg {
- s32 pgid;
+ s32 client_id;
u64 vaddr;
u64 size;
};
struct fastrpc_mem_unmap_req_msg {
- s32 pgid;
+ s32 client_id;
s32 fd;
u64 vaddrin;
u64 len;
};
struct fastrpc_msg {
- int pid; /* process group id */
+ int client_id; /* process client id */
int tid; /* thread id */
u64 ctx; /* invoke caller context */
u32 handle; /* handle to invoke */
@@ -234,7 +234,7 @@ struct fastrpc_invoke_ctx {
int nbufs;
int retval;
int pid;
- int tgid;
+ int client_id;
u32 sc;
u32 *crc;
u64 ctxid;
@@ -299,7 +299,7 @@ struct fastrpc_user {
struct fastrpc_session_ctx *sctx;
struct fastrpc_buf *init_mem;
- int tgid;
+ int client_id;
int pd;
bool is_secure_dev;
/* Lock for lists */
@@ -614,7 +614,7 @@ static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
ctx->sc = sc;
ctx->retval = -1;
ctx->pid = current->pid;
- ctx->tgid = user->tgid;
+ ctx->client_id = user->client_id;
ctx->cctx = cctx;
init_completion(&ctx->work);
INIT_WORK(&ctx->put_work, fastrpc_context_put_wq);
@@ -992,7 +992,7 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
mmap_read_lock(current->mm);
vma = find_vma(current->mm, ctx->args[i].ptr);
if (vma)
- pages[i].addr += ctx->args[i].ptr -
+ pages[i].addr += (ctx->args[i].ptr & PAGE_MASK) -
vma->vm_start;
mmap_read_unlock(current->mm);
@@ -1019,8 +1019,8 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
(pkt_size - rlen);
pages[i].addr = pages[i].addr & PAGE_MASK;
- pg_start = (args & PAGE_MASK) >> PAGE_SHIFT;
- pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
+ pg_start = (rpra[i].buf.pv & PAGE_MASK) >> PAGE_SHIFT;
+ pg_end = ((rpra[i].buf.pv + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
args = args + mlen;
rlen -= mlen;
@@ -1115,11 +1115,11 @@ static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
int ret;
cctx = fl->cctx;
- msg->pid = fl->tgid;
+ msg->client_id = fl->client_id;
msg->tid = current->pid;
if (kernel)
- msg->pid = 0;
+ msg->client_id = 0;
msg->ctx = ctx->ctxid | fl->pd;
msg->handle = handle;
@@ -1244,7 +1244,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
int err;
bool scm_done = false;
struct {
- int pgid;
+ int client_id;
u32 namelen;
u32 pageslen;
} inbuf;
@@ -1293,7 +1293,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
}
}
- inbuf.pgid = fl->tgid;
+ inbuf.client_id = fl->client_id;
inbuf.namelen = init.namelen;
inbuf.pageslen = 0;
fl->pd = USER_PD;
@@ -1363,7 +1363,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
int memlen;
int err;
struct {
- int pgid;
+ int client_id;
u32 namelen;
u32 filelen;
u32 pageslen;
@@ -1395,7 +1395,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
goto err;
}
- inbuf.pgid = fl->tgid;
+ inbuf.client_id = fl->client_id;
inbuf.namelen = strlen(current->comm) + 1;
inbuf.filelen = init.filelen;
inbuf.pageslen = 1;
@@ -1469,8 +1469,9 @@ err:
}
static struct fastrpc_session_ctx *fastrpc_session_alloc(
- struct fastrpc_channel_ctx *cctx)
+ struct fastrpc_user *fl)
{
+ struct fastrpc_channel_ctx *cctx = fl->cctx;
struct fastrpc_session_ctx *session = NULL;
unsigned long flags;
int i;
@@ -1480,6 +1481,8 @@ static struct fastrpc_session_ctx *fastrpc_session_alloc(
if (!cctx->session[i].used && cctx->session[i].valid) {
cctx->session[i].used = true;
session = &cctx->session[i];
+ /* any non-zero ID will work, session_idx + 1 is the simplest one */
+ fl->client_id = i + 1;
break;
}
}
@@ -1501,12 +1504,12 @@ static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
{
struct fastrpc_invoke_args args[1];
- int tgid = 0;
+ int client_id = 0;
u32 sc;
- tgid = fl->tgid;
- args[0].ptr = (u64)(uintptr_t) &tgid;
- args[0].length = sizeof(tgid);
+ client_id = fl->client_id;
+ args[0].ptr = (u64)(uintptr_t) &client_id;
+ args[0].length = sizeof(client_id);
args[0].fd = -1;
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
@@ -1579,11 +1582,10 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
INIT_LIST_HEAD(&fl->maps);
INIT_LIST_HEAD(&fl->mmaps);
INIT_LIST_HEAD(&fl->user);
- fl->tgid = current->tgid;
fl->cctx = cctx;
fl->is_secure_dev = fdevice->secure;
- fl->sctx = fastrpc_session_alloc(cctx);
+ fl->sctx = fastrpc_session_alloc(fl);
if (!fl->sctx) {
dev_err(&cctx->rpdev->dev, "No session available\n");
mutex_destroy(&fl->mutex);
@@ -1647,11 +1649,11 @@ static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
{
struct fastrpc_invoke_args args[1];
- int tgid = fl->tgid;
+ int client_id = fl->client_id;
u32 sc;
- args[0].ptr = (u64)(uintptr_t) &tgid;
- args[0].length = sizeof(tgid);
+ args[0].ptr = (u64)(uintptr_t) &client_id;
+ args[0].length = sizeof(client_id);
args[0].fd = -1;
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
fl->pd = pd;
@@ -1803,7 +1805,7 @@ static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *
int err;
u32 sc;
- req_msg.pgid = fl->tgid;
+ req_msg.client_id = fl->client_id;
req_msg.size = buf->size;
req_msg.vaddr = buf->raddr;
@@ -1889,7 +1891,7 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
return err;
}
- req_msg.pgid = fl->tgid;
+ req_msg.client_id = fl->client_id;
req_msg.flags = req.flags;
req_msg.vaddr = req.vaddrin;
req_msg.num = sizeof(pages);
@@ -1978,7 +1980,7 @@ static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_me
return -EINVAL;
}
- req_msg.pgid = fl->tgid;
+ req_msg.client_id = fl->client_id;
req_msg.len = map->len;
req_msg.vaddrin = map->raddr;
req_msg.fd = map->fd;
@@ -2031,7 +2033,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
return err;
}
- req_msg.pgid = fl->tgid;
+ req_msg.client_id = fl->client_id;
req_msg.fd = req.fd;
req_msg.offset = req.offset;
req_msg.vaddrin = req.vaddrin;
@@ -2344,7 +2346,7 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
err = fastrpc_device_register(rdev, data, false, domains[domain_id]);
if (err)
- goto fdev_error;
+ goto populate_error;
break;
default:
err = -EINVAL;
diff --git a/drivers/misc/keba/cp500.c b/drivers/misc/keba/cp500.c
index 255d3022dae8..d0c6113dcff3 100644
--- a/drivers/misc/keba/cp500.c
+++ b/drivers/misc/keba/cp500.c
@@ -126,8 +126,9 @@ static struct cp500_devs cp520_devices = {
};
struct cp500_nvmem {
- struct nvmem_device *nvmem;
+ struct nvmem_device *base_nvmem;
unsigned int offset;
+ struct nvmem_device *nvmem;
};
struct cp500 {
@@ -581,8 +582,8 @@ static int cp500_nvmem_read(void *priv, unsigned int offset, void *val,
struct cp500_nvmem *nvmem = priv;
int ret;
- ret = nvmem_device_read(nvmem->nvmem, nvmem->offset + offset, bytes,
- val);
+ ret = nvmem_device_read(nvmem->base_nvmem, nvmem->offset + offset,
+ bytes, val);
if (ret != bytes)
return ret;
@@ -595,15 +596,16 @@ static int cp500_nvmem_write(void *priv, unsigned int offset, void *val,
struct cp500_nvmem *nvmem = priv;
int ret;
- ret = nvmem_device_write(nvmem->nvmem, nvmem->offset + offset, bytes,
- val);
+ ret = nvmem_device_write(nvmem->base_nvmem, nvmem->offset + offset,
+ bytes, val);
if (ret != bytes)
return ret;
return 0;
}
-static int cp500_nvmem_register(struct cp500 *cp500, struct nvmem_device *nvmem)
+static int cp500_nvmem_register(struct cp500 *cp500,
+ struct nvmem_device *base_nvmem)
{
struct device *dev = &cp500->pci_dev->dev;
struct nvmem_config nvmem_config = {};
@@ -625,27 +627,52 @@ static int cp500_nvmem_register(struct cp500 *cp500, struct nvmem_device *nvmem)
nvmem_config.reg_read = cp500_nvmem_read;
nvmem_config.reg_write = cp500_nvmem_write;
- cp500->nvmem_cpu.nvmem = nvmem;
+ cp500->nvmem_cpu.base_nvmem = base_nvmem;
cp500->nvmem_cpu.offset = CP500_EEPROM_CPU_OFFSET;
nvmem_config.name = CP500_EEPROM_CPU_NAME;
nvmem_config.size = CP500_EEPROM_CPU_SIZE;
nvmem_config.priv = &cp500->nvmem_cpu;
- tmp = devm_nvmem_register(dev, &nvmem_config);
+ tmp = nvmem_register(&nvmem_config);
if (IS_ERR(tmp))
return PTR_ERR(tmp);
+ cp500->nvmem_cpu.nvmem = tmp;
- cp500->nvmem_user.nvmem = nvmem;
+ cp500->nvmem_user.base_nvmem = base_nvmem;
cp500->nvmem_user.offset = CP500_EEPROM_USER_OFFSET;
nvmem_config.name = CP500_EEPROM_USER_NAME;
nvmem_config.size = CP500_EEPROM_USER_SIZE;
nvmem_config.priv = &cp500->nvmem_user;
- tmp = devm_nvmem_register(dev, &nvmem_config);
- if (IS_ERR(tmp))
+ tmp = nvmem_register(&nvmem_config);
+ if (IS_ERR(tmp)) {
+ nvmem_unregister(cp500->nvmem_cpu.nvmem);
+ cp500->nvmem_cpu.nvmem = NULL;
+
return PTR_ERR(tmp);
+ }
+ cp500->nvmem_user.nvmem = tmp;
return 0;
}
+static void cp500_nvmem_unregister(struct cp500 *cp500)
+{
+ int notified;
+
+ if (cp500->nvmem_user.nvmem) {
+ nvmem_unregister(cp500->nvmem_user.nvmem);
+ cp500->nvmem_user.nvmem = NULL;
+ }
+ if (cp500->nvmem_cpu.nvmem) {
+ nvmem_unregister(cp500->nvmem_cpu.nvmem);
+ cp500->nvmem_cpu.nvmem = NULL;
+ }
+
+ /* CPU and user nvmem use the same base_nvmem, put only once */
+ notified = atomic_read(&cp500->nvmem_notified);
+ if (notified)
+ nvmem_device_put(cp500->nvmem_cpu.base_nvmem);
+}
+
static int cp500_nvmem_match(struct device *dev, const void *data)
{
const struct cp500 *cp500 = data;
@@ -663,13 +690,6 @@ static int cp500_nvmem_match(struct device *dev, const void *data)
return 0;
}
-static void cp500_devm_nvmem_put(void *data)
-{
- struct nvmem_device *nvmem = data;
-
- nvmem_device_put(nvmem);
-}
-
static int cp500_nvmem(struct notifier_block *nb, unsigned long action,
void *data)
{
@@ -698,10 +718,6 @@ static int cp500_nvmem(struct notifier_block *nb, unsigned long action,
return NOTIFY_DONE;
}
- ret = devm_add_action_or_reset(dev, cp500_devm_nvmem_put, nvmem);
- if (ret)
- return ret;
-
ret = cp500_nvmem_register(cp500, nvmem);
if (ret)
return ret;
@@ -932,12 +948,17 @@ static void cp500_remove(struct pci_dev *pci_dev)
{
struct cp500 *cp500 = pci_get_drvdata(pci_dev);
+ /*
+ * unregister CPU and user nvmem and put base_nvmem before parent
+ * auxiliary device of base_nvmem is unregistered
+ */
+ nvmem_unregister_notifier(&cp500->nvmem_notifier);
+ cp500_nvmem_unregister(cp500);
+
cp500_unregister_auxiliary_devs(cp500);
cp500_disable(cp500);
- nvmem_unregister_notifier(&cp500->nvmem_notifier);
-
pci_set_drvdata(pci_dev, 0);
pci_free_irq_vectors(pci_dev);
diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
index 3c1359d8d4e6..04756302b878 100644
--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
+++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
@@ -147,6 +147,9 @@ static int pci1xxxx_gpio_set_config(struct gpio_chip *gpio, unsigned int offset,
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
pci1xxx_assign_bit(priv->reg_base, OPENDRAIN_OFFSET(offset), (offset % 32), true);
break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ pci1xxx_assign_bit(priv->reg_base, OPENDRAIN_OFFSET(offset), (offset % 32), false);
+ break;
default:
ret = -ENOTSUPP;
break;
diff --git a/drivers/misc/misc_minor_kunit.c b/drivers/misc/misc_minor_kunit.c
new file mode 100644
index 000000000000..293e0fb7e43e
--- /dev/null
+++ b/drivers/misc/misc_minor_kunit.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+
+/* dynamic minor (2) */
+static struct miscdevice dev_dynamic_minor = {
+ .minor = 2,
+ .name = "dev_dynamic_minor",
+};
+
+/* static minor (LCD_MINOR) */
+static struct miscdevice dev_static_minor = {
+ .minor = LCD_MINOR,
+ .name = "dev_static_minor",
+};
+
+/* misc dynamic minor */
+static struct miscdevice dev_misc_dynamic_minor = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "dev_misc_dynamic_minor",
+};
+
+static void kunit_dynamic_minor(struct kunit *test)
+{
+ int ret;
+
+ ret = misc_register(&dev_dynamic_minor);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, 2, dev_dynamic_minor.minor);
+ misc_deregister(&dev_dynamic_minor);
+}
+
+static void kunit_static_minor(struct kunit *test)
+{
+ int ret;
+
+ ret = misc_register(&dev_static_minor);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, LCD_MINOR, dev_static_minor.minor);
+ misc_deregister(&dev_static_minor);
+}
+
+static void kunit_misc_dynamic_minor(struct kunit *test)
+{
+ int ret;
+
+ ret = misc_register(&dev_misc_dynamic_minor);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ misc_deregister(&dev_misc_dynamic_minor);
+}
+
+static struct kunit_case test_cases[] = {
+ KUNIT_CASE(kunit_dynamic_minor),
+ KUNIT_CASE(kunit_static_minor),
+ KUNIT_CASE(kunit_misc_dynamic_minor),
+ {}
+};
+
+static struct kunit_suite test_suite = {
+ .name = "misc_minor_test",
+ .test_cases = test_cases,
+};
+kunit_test_suite(test_suite);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vimal Agrawal");
+MODULE_DESCRIPTION("misc minor testing");
diff --git a/drivers/misc/ntsync.c b/drivers/misc/ntsync.c
index 4954553b7baa..055395cde42b 100644
--- a/drivers/misc/ntsync.c
+++ b/drivers/misc/ntsync.c
@@ -6,11 +6,17 @@
*/
#include <linux/anon_inodes.h>
+#include <linux/atomic.h>
#include <linux/file.h>
#include <linux/fs.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/overflow.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <uapi/linux/ntsync.h>
@@ -19,6 +25,8 @@
enum ntsync_type {
NTSYNC_TYPE_SEM,
+ NTSYNC_TYPE_MUTEX,
+ NTSYNC_TYPE_EVENT,
};
/*
@@ -30,10 +38,13 @@ enum ntsync_type {
*
* Both rely on struct file for reference counting. Individual
* ntsync_obj objects take a reference to the device when created.
+ * Wait operations take a reference to each object being waited on for
+ * the duration of the wait.
*/
struct ntsync_obj {
spinlock_t lock;
+ int dev_locked;
enum ntsync_type type;
@@ -46,22 +57,344 @@ struct ntsync_obj {
__u32 count;
__u32 max;
} sem;
+ struct {
+ __u32 count;
+ pid_t owner;
+ bool ownerdead;
+ } mutex;
+ struct {
+ bool manual;
+ bool signaled;
+ } event;
} u;
+
+ /*
+ * any_waiters is protected by the object lock, but all_waiters is
+ * protected by the device wait_all_lock.
+ */
+ struct list_head any_waiters;
+ struct list_head all_waiters;
+
+ /*
+ * Hint describing how many tasks are queued on this object in a
+ * wait-all operation.
+ *
+ * Any time we do a wake, we may need to wake "all" waiters as well as
+ * "any" waiters. In order to atomically wake "all" waiters, we must
+ * lock all of the objects, and that means grabbing the wait_all_lock
+ * below (and, due to lock ordering rules, before locking this object).
+ * However, wait-all is a rare operation, and grabbing the wait-all
+ * lock for every wake would create unnecessary contention.
+ * Therefore we first check whether all_hint is zero, and, if it is,
+ * we skip trying to wake "all" waiters.
+ *
+ * Since wait requests must originate from user-space threads, we're
+ * limited here by PID_MAX_LIMIT, so there's no risk of overflow.
+ */
+ atomic_t all_hint;
+};
+
+struct ntsync_q_entry {
+ struct list_head node;
+ struct ntsync_q *q;
+ struct ntsync_obj *obj;
+ __u32 index;
+};
+
+struct ntsync_q {
+ struct task_struct *task;
+ __u32 owner;
+
+ /*
+ * Protected via atomic_try_cmpxchg(). Only the thread that wins the
+ * compare-and-swap may actually change object states and wake this
+ * task.
+ */
+ atomic_t signaled;
+
+ bool all;
+ bool ownerdead;
+ __u32 count;
+ struct ntsync_q_entry entries[];
};
struct ntsync_device {
+ /*
+ * Wait-all operations must atomically grab all objects, and be totally
+ * ordered with respect to each other and wait-any operations.
+ * If one thread is trying to acquire several objects, another thread
+ * cannot touch the object at the same time.
+ *
+ * This device-wide lock is used to serialize wait-for-all
+ * operations, and operations on an object that is involved in a
+ * wait-for-all.
+ */
+ struct mutex wait_all_lock;
+
struct file *file;
};
/*
+ * Single objects are locked using obj->lock.
+ *
+ * Multiple objects are 'locked' while holding dev->wait_all_lock.
+ * In this case however, individual objects are not locked by holding
+ * obj->lock, but by setting obj->dev_locked.
+ *
+ * This means that in order to lock a single object, the sequence is slightly
+ * more complicated than usual. Specifically it needs to check obj->dev_locked
+ * after acquiring obj->lock, if set, it needs to drop the lock and acquire
+ * dev->wait_all_lock in order to serialize against the multi-object operation.
+ */
+
+static void dev_lock_obj(struct ntsync_device *dev, struct ntsync_obj *obj)
+{
+ lockdep_assert_held(&dev->wait_all_lock);
+ lockdep_assert(obj->dev == dev);
+ spin_lock(&obj->lock);
+ /*
+ * By setting obj->dev_locked inside obj->lock, it is ensured that
+ * anyone holding obj->lock must see the value.
+ */
+ obj->dev_locked = 1;
+ spin_unlock(&obj->lock);
+}
+
+static void dev_unlock_obj(struct ntsync_device *dev, struct ntsync_obj *obj)
+{
+ lockdep_assert_held(&dev->wait_all_lock);
+ lockdep_assert(obj->dev == dev);
+ spin_lock(&obj->lock);
+ obj->dev_locked = 0;
+ spin_unlock(&obj->lock);
+}
+
+static void obj_lock(struct ntsync_obj *obj)
+{
+ struct ntsync_device *dev = obj->dev;
+
+ for (;;) {
+ spin_lock(&obj->lock);
+ if (likely(!obj->dev_locked))
+ break;
+
+ spin_unlock(&obj->lock);
+ mutex_lock(&dev->wait_all_lock);
+ spin_lock(&obj->lock);
+ /*
+ * obj->dev_locked should be set and released under the same
+ * wait_all_lock section, since we now own this lock, it should
+ * be clear.
+ */
+ lockdep_assert(!obj->dev_locked);
+ spin_unlock(&obj->lock);
+ mutex_unlock(&dev->wait_all_lock);
+ }
+}
+
+static void obj_unlock(struct ntsync_obj *obj)
+{
+ spin_unlock(&obj->lock);
+}
+
+static bool ntsync_lock_obj(struct ntsync_device *dev, struct ntsync_obj *obj)
+{
+ bool all;
+
+ obj_lock(obj);
+ all = atomic_read(&obj->all_hint);
+ if (unlikely(all)) {
+ obj_unlock(obj);
+ mutex_lock(&dev->wait_all_lock);
+ dev_lock_obj(dev, obj);
+ }
+
+ return all;
+}
+
+static void ntsync_unlock_obj(struct ntsync_device *dev, struct ntsync_obj *obj, bool all)
+{
+ if (all) {
+ dev_unlock_obj(dev, obj);
+ mutex_unlock(&dev->wait_all_lock);
+ } else {
+ obj_unlock(obj);
+ }
+}
+
+#define ntsync_assert_held(obj) \
+ lockdep_assert((lockdep_is_held(&(obj)->lock) != LOCK_STATE_NOT_HELD) || \
+ ((lockdep_is_held(&(obj)->dev->wait_all_lock) != LOCK_STATE_NOT_HELD) && \
+ (obj)->dev_locked))
+
+static bool is_signaled(struct ntsync_obj *obj, __u32 owner)
+{
+ ntsync_assert_held(obj);
+
+ switch (obj->type) {
+ case NTSYNC_TYPE_SEM:
+ return !!obj->u.sem.count;
+ case NTSYNC_TYPE_MUTEX:
+ if (obj->u.mutex.owner && obj->u.mutex.owner != owner)
+ return false;
+ return obj->u.mutex.count < UINT_MAX;
+ case NTSYNC_TYPE_EVENT:
+ return obj->u.event.signaled;
+ }
+
+ WARN(1, "bad object type %#x\n", obj->type);
+ return false;
+}
+
+/*
+ * "locked_obj" is an optional pointer to an object which is already locked and
+ * should not be locked again. This is necessary so that changing an object's
+ * state and waking it can be a single atomic operation.
+ */
+static void try_wake_all(struct ntsync_device *dev, struct ntsync_q *q,
+ struct ntsync_obj *locked_obj)
+{
+ __u32 count = q->count;
+ bool can_wake = true;
+ int signaled = -1;
+ __u32 i;
+
+ lockdep_assert_held(&dev->wait_all_lock);
+ if (locked_obj)
+ lockdep_assert(locked_obj->dev_locked);
+
+ for (i = 0; i < count; i++) {
+ if (q->entries[i].obj != locked_obj)
+ dev_lock_obj(dev, q->entries[i].obj);
+ }
+
+ for (i = 0; i < count; i++) {
+ if (!is_signaled(q->entries[i].obj, q->owner)) {
+ can_wake = false;
+ break;
+ }
+ }
+
+ if (can_wake && atomic_try_cmpxchg(&q->signaled, &signaled, 0)) {
+ for (i = 0; i < count; i++) {
+ struct ntsync_obj *obj = q->entries[i].obj;
+
+ switch (obj->type) {
+ case NTSYNC_TYPE_SEM:
+ obj->u.sem.count--;
+ break;
+ case NTSYNC_TYPE_MUTEX:
+ if (obj->u.mutex.ownerdead)
+ q->ownerdead = true;
+ obj->u.mutex.ownerdead = false;
+ obj->u.mutex.count++;
+ obj->u.mutex.owner = q->owner;
+ break;
+ case NTSYNC_TYPE_EVENT:
+ if (!obj->u.event.manual)
+ obj->u.event.signaled = false;
+ break;
+ }
+ }
+ wake_up_process(q->task);
+ }
+
+ for (i = 0; i < count; i++) {
+ if (q->entries[i].obj != locked_obj)
+ dev_unlock_obj(dev, q->entries[i].obj);
+ }
+}
+
+static void try_wake_all_obj(struct ntsync_device *dev, struct ntsync_obj *obj)
+{
+ struct ntsync_q_entry *entry;
+
+ lockdep_assert_held(&dev->wait_all_lock);
+ lockdep_assert(obj->dev_locked);
+
+ list_for_each_entry(entry, &obj->all_waiters, node)
+ try_wake_all(dev, entry->q, obj);
+}
+
+static void try_wake_any_sem(struct ntsync_obj *sem)
+{
+ struct ntsync_q_entry *entry;
+
+ ntsync_assert_held(sem);
+ lockdep_assert(sem->type == NTSYNC_TYPE_SEM);
+
+ list_for_each_entry(entry, &sem->any_waiters, node) {
+ struct ntsync_q *q = entry->q;
+ int signaled = -1;
+
+ if (!sem->u.sem.count)
+ break;
+
+ if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) {
+ sem->u.sem.count--;
+ wake_up_process(q->task);
+ }
+ }
+}
+
+static void try_wake_any_mutex(struct ntsync_obj *mutex)
+{
+ struct ntsync_q_entry *entry;
+
+ ntsync_assert_held(mutex);
+ lockdep_assert(mutex->type == NTSYNC_TYPE_MUTEX);
+
+ list_for_each_entry(entry, &mutex->any_waiters, node) {
+ struct ntsync_q *q = entry->q;
+ int signaled = -1;
+
+ if (mutex->u.mutex.count == UINT_MAX)
+ break;
+ if (mutex->u.mutex.owner && mutex->u.mutex.owner != q->owner)
+ continue;
+
+ if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) {
+ if (mutex->u.mutex.ownerdead)
+ q->ownerdead = true;
+ mutex->u.mutex.ownerdead = false;
+ mutex->u.mutex.count++;
+ mutex->u.mutex.owner = q->owner;
+ wake_up_process(q->task);
+ }
+ }
+}
+
+static void try_wake_any_event(struct ntsync_obj *event)
+{
+ struct ntsync_q_entry *entry;
+
+ ntsync_assert_held(event);
+ lockdep_assert(event->type == NTSYNC_TYPE_EVENT);
+
+ list_for_each_entry(entry, &event->any_waiters, node) {
+ struct ntsync_q *q = entry->q;
+ int signaled = -1;
+
+ if (!event->u.event.signaled)
+ break;
+
+ if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) {
+ if (!event->u.event.manual)
+ event->u.event.signaled = false;
+ wake_up_process(q->task);
+ }
+ }
+}
+
+/*
* Actually change the semaphore state, returning -EOVERFLOW if it is made
* invalid.
*/
-static int post_sem_state(struct ntsync_obj *sem, __u32 count)
+static int release_sem_state(struct ntsync_obj *sem, __u32 count)
{
__u32 sum;
- lockdep_assert_held(&sem->lock);
+ ntsync_assert_held(sem);
if (check_add_overflow(sem->u.sem.count, count, &sum) ||
sum > sem->u.sem.max)
@@ -71,11 +404,13 @@ static int post_sem_state(struct ntsync_obj *sem, __u32 count)
return 0;
}
-static int ntsync_sem_post(struct ntsync_obj *sem, void __user *argp)
+static int ntsync_sem_release(struct ntsync_obj *sem, void __user *argp)
{
+ struct ntsync_device *dev = sem->dev;
__u32 __user *user_args = argp;
__u32 prev_count;
__u32 args;
+ bool all;
int ret;
if (copy_from_user(&args, argp, sizeof(args)))
@@ -84,12 +419,17 @@ static int ntsync_sem_post(struct ntsync_obj *sem, void __user *argp)
if (sem->type != NTSYNC_TYPE_SEM)
return -EINVAL;
- spin_lock(&sem->lock);
+ all = ntsync_lock_obj(dev, sem);
prev_count = sem->u.sem.count;
- ret = post_sem_state(sem, args);
+ ret = release_sem_state(sem, args);
+ if (!ret) {
+ if (all)
+ try_wake_all_obj(dev, sem);
+ try_wake_any_sem(sem);
+ }
- spin_unlock(&sem->lock);
+ ntsync_unlock_obj(dev, sem, all);
if (!ret && put_user(prev_count, user_args))
ret = -EFAULT;
@@ -97,13 +437,229 @@ static int ntsync_sem_post(struct ntsync_obj *sem, void __user *argp)
return ret;
}
-static int ntsync_obj_release(struct inode *inode, struct file *file)
+/*
+ * Actually change the mutex state, returning -EPERM if not the owner.
+ */
+static int unlock_mutex_state(struct ntsync_obj *mutex,
+ const struct ntsync_mutex_args *args)
{
- struct ntsync_obj *obj = file->private_data;
+ ntsync_assert_held(mutex);
+
+ if (mutex->u.mutex.owner != args->owner)
+ return -EPERM;
+
+ if (!--mutex->u.mutex.count)
+ mutex->u.mutex.owner = 0;
+ return 0;
+}
+
+static int ntsync_mutex_unlock(struct ntsync_obj *mutex, void __user *argp)
+{
+ struct ntsync_mutex_args __user *user_args = argp;
+ struct ntsync_device *dev = mutex->dev;
+ struct ntsync_mutex_args args;
+ __u32 prev_count;
+ bool all;
+ int ret;
+
+ if (copy_from_user(&args, argp, sizeof(args)))
+ return -EFAULT;
+ if (!args.owner)
+ return -EINVAL;
+
+ if (mutex->type != NTSYNC_TYPE_MUTEX)
+ return -EINVAL;
+
+ all = ntsync_lock_obj(dev, mutex);
+
+ prev_count = mutex->u.mutex.count;
+ ret = unlock_mutex_state(mutex, &args);
+ if (!ret) {
+ if (all)
+ try_wake_all_obj(dev, mutex);
+ try_wake_any_mutex(mutex);
+ }
+
+ ntsync_unlock_obj(dev, mutex, all);
+
+ if (!ret && put_user(prev_count, &user_args->count))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+/*
+ * Actually change the mutex state to mark its owner as dead,
+ * returning -EPERM if not the owner.
+ */
+static int kill_mutex_state(struct ntsync_obj *mutex, __u32 owner)
+{
+ ntsync_assert_held(mutex);
+
+ if (mutex->u.mutex.owner != owner)
+ return -EPERM;
+
+ mutex->u.mutex.ownerdead = true;
+ mutex->u.mutex.owner = 0;
+ mutex->u.mutex.count = 0;
+ return 0;
+}
+
+static int ntsync_mutex_kill(struct ntsync_obj *mutex, void __user *argp)
+{
+ struct ntsync_device *dev = mutex->dev;
+ __u32 owner;
+ bool all;
+ int ret;
+
+ if (get_user(owner, (__u32 __user *)argp))
+ return -EFAULT;
+ if (!owner)
+ return -EINVAL;
+
+ if (mutex->type != NTSYNC_TYPE_MUTEX)
+ return -EINVAL;
+
+ all = ntsync_lock_obj(dev, mutex);
+
+ ret = kill_mutex_state(mutex, owner);
+ if (!ret) {
+ if (all)
+ try_wake_all_obj(dev, mutex);
+ try_wake_any_mutex(mutex);
+ }
+
+ ntsync_unlock_obj(dev, mutex, all);
+
+ return ret;
+}
+
+static int ntsync_event_set(struct ntsync_obj *event, void __user *argp, bool pulse)
+{
+ struct ntsync_device *dev = event->dev;
+ __u32 prev_state;
+ bool all;
+
+ if (event->type != NTSYNC_TYPE_EVENT)
+ return -EINVAL;
+
+ all = ntsync_lock_obj(dev, event);
+
+ prev_state = event->u.event.signaled;
+ event->u.event.signaled = true;
+ if (all)
+ try_wake_all_obj(dev, event);
+ try_wake_any_event(event);
+ if (pulse)
+ event->u.event.signaled = false;
+
+ ntsync_unlock_obj(dev, event, all);
+
+ if (put_user(prev_state, (__u32 __user *)argp))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int ntsync_event_reset(struct ntsync_obj *event, void __user *argp)
+{
+ struct ntsync_device *dev = event->dev;
+ __u32 prev_state;
+ bool all;
+
+ if (event->type != NTSYNC_TYPE_EVENT)
+ return -EINVAL;
+
+ all = ntsync_lock_obj(dev, event);
+
+ prev_state = event->u.event.signaled;
+ event->u.event.signaled = false;
+
+ ntsync_unlock_obj(dev, event, all);
+
+ if (put_user(prev_state, (__u32 __user *)argp))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int ntsync_sem_read(struct ntsync_obj *sem, void __user *argp)
+{
+ struct ntsync_sem_args __user *user_args = argp;
+ struct ntsync_device *dev = sem->dev;
+ struct ntsync_sem_args args;
+ bool all;
+
+ if (sem->type != NTSYNC_TYPE_SEM)
+ return -EINVAL;
+
+ all = ntsync_lock_obj(dev, sem);
+
+ args.count = sem->u.sem.count;
+ args.max = sem->u.sem.max;
+
+ ntsync_unlock_obj(dev, sem, all);
+
+ if (copy_to_user(user_args, &args, sizeof(args)))
+ return -EFAULT;
+ return 0;
+}
+static int ntsync_mutex_read(struct ntsync_obj *mutex, void __user *argp)
+{
+ struct ntsync_mutex_args __user *user_args = argp;
+ struct ntsync_device *dev = mutex->dev;
+ struct ntsync_mutex_args args;
+ bool all;
+ int ret;
+
+ if (mutex->type != NTSYNC_TYPE_MUTEX)
+ return -EINVAL;
+
+ all = ntsync_lock_obj(dev, mutex);
+
+ args.count = mutex->u.mutex.count;
+ args.owner = mutex->u.mutex.owner;
+ ret = mutex->u.mutex.ownerdead ? -EOWNERDEAD : 0;
+
+ ntsync_unlock_obj(dev, mutex, all);
+
+ if (copy_to_user(user_args, &args, sizeof(args)))
+ return -EFAULT;
+ return ret;
+}
+
+static int ntsync_event_read(struct ntsync_obj *event, void __user *argp)
+{
+ struct ntsync_event_args __user *user_args = argp;
+ struct ntsync_device *dev = event->dev;
+ struct ntsync_event_args args;
+ bool all;
+
+ if (event->type != NTSYNC_TYPE_EVENT)
+ return -EINVAL;
+
+ all = ntsync_lock_obj(dev, event);
+
+ args.manual = event->u.event.manual;
+ args.signaled = event->u.event.signaled;
+
+ ntsync_unlock_obj(dev, event, all);
+
+ if (copy_to_user(user_args, &args, sizeof(args)))
+ return -EFAULT;
+ return 0;
+}
+
+static void ntsync_free_obj(struct ntsync_obj *obj)
+{
fput(obj->dev->file);
kfree(obj);
+}
+static int ntsync_obj_release(struct inode *inode, struct file *file)
+{
+ ntsync_free_obj(file->private_data);
return 0;
}
@@ -114,8 +670,24 @@ static long ntsync_obj_ioctl(struct file *file, unsigned int cmd,
void __user *argp = (void __user *)parm;
switch (cmd) {
- case NTSYNC_IOC_SEM_POST:
- return ntsync_sem_post(obj, argp);
+ case NTSYNC_IOC_SEM_RELEASE:
+ return ntsync_sem_release(obj, argp);
+ case NTSYNC_IOC_SEM_READ:
+ return ntsync_sem_read(obj, argp);
+ case NTSYNC_IOC_MUTEX_UNLOCK:
+ return ntsync_mutex_unlock(obj, argp);
+ case NTSYNC_IOC_MUTEX_KILL:
+ return ntsync_mutex_kill(obj, argp);
+ case NTSYNC_IOC_MUTEX_READ:
+ return ntsync_mutex_read(obj, argp);
+ case NTSYNC_IOC_EVENT_SET:
+ return ntsync_event_set(obj, argp, false);
+ case NTSYNC_IOC_EVENT_RESET:
+ return ntsync_event_reset(obj, argp);
+ case NTSYNC_IOC_EVENT_PULSE:
+ return ntsync_event_set(obj, argp, true);
+ case NTSYNC_IOC_EVENT_READ:
+ return ntsync_event_read(obj, argp);
default:
return -ENOIOCTLCMD;
}
@@ -140,6 +712,9 @@ static struct ntsync_obj *ntsync_alloc_obj(struct ntsync_device *dev,
obj->dev = dev;
get_file(dev->file);
spin_lock_init(&obj->lock);
+ INIT_LIST_HEAD(&obj->any_waiters);
+ INIT_LIST_HEAD(&obj->all_waiters);
+ atomic_set(&obj->all_hint, 0);
return obj;
}
@@ -165,7 +740,6 @@ static int ntsync_obj_get_fd(struct ntsync_obj *obj)
static int ntsync_create_sem(struct ntsync_device *dev, void __user *argp)
{
- struct ntsync_sem_args __user *user_args = argp;
struct ntsync_sem_args args;
struct ntsync_obj *sem;
int fd;
@@ -182,12 +756,398 @@ static int ntsync_create_sem(struct ntsync_device *dev, void __user *argp)
sem->u.sem.count = args.count;
sem->u.sem.max = args.max;
fd = ntsync_obj_get_fd(sem);
- if (fd < 0) {
- kfree(sem);
- return fd;
+ if (fd < 0)
+ ntsync_free_obj(sem);
+
+ return fd;
+}
+
+static int ntsync_create_mutex(struct ntsync_device *dev, void __user *argp)
+{
+ struct ntsync_mutex_args args;
+ struct ntsync_obj *mutex;
+ int fd;
+
+ if (copy_from_user(&args, argp, sizeof(args)))
+ return -EFAULT;
+
+ if (!args.owner != !args.count)
+ return -EINVAL;
+
+ mutex = ntsync_alloc_obj(dev, NTSYNC_TYPE_MUTEX);
+ if (!mutex)
+ return -ENOMEM;
+ mutex->u.mutex.count = args.count;
+ mutex->u.mutex.owner = args.owner;
+ fd = ntsync_obj_get_fd(mutex);
+ if (fd < 0)
+ ntsync_free_obj(mutex);
+
+ return fd;
+}
+
+static int ntsync_create_event(struct ntsync_device *dev, void __user *argp)
+{
+ struct ntsync_event_args args;
+ struct ntsync_obj *event;
+ int fd;
+
+ if (copy_from_user(&args, argp, sizeof(args)))
+ return -EFAULT;
+
+ event = ntsync_alloc_obj(dev, NTSYNC_TYPE_EVENT);
+ if (!event)
+ return -ENOMEM;
+ event->u.event.manual = args.manual;
+ event->u.event.signaled = args.signaled;
+ fd = ntsync_obj_get_fd(event);
+ if (fd < 0)
+ ntsync_free_obj(event);
+
+ return fd;
+}
+
+static struct ntsync_obj *get_obj(struct ntsync_device *dev, int fd)
+{
+ struct file *file = fget(fd);
+ struct ntsync_obj *obj;
+
+ if (!file)
+ return NULL;
+
+ if (file->f_op != &ntsync_obj_fops) {
+ fput(file);
+ return NULL;
+ }
+
+ obj = file->private_data;
+ if (obj->dev != dev) {
+ fput(file);
+ return NULL;
}
- return put_user(fd, &user_args->sem);
+ return obj;
+}
+
+static void put_obj(struct ntsync_obj *obj)
+{
+ fput(obj->file);
+}
+
+static int ntsync_schedule(const struct ntsync_q *q, const struct ntsync_wait_args *args)
+{
+ ktime_t timeout = ns_to_ktime(args->timeout);
+ clockid_t clock = CLOCK_MONOTONIC;
+ ktime_t *timeout_ptr;
+ int ret = 0;
+
+ timeout_ptr = (args->timeout == U64_MAX ? NULL : &timeout);
+
+ if (args->flags & NTSYNC_WAIT_REALTIME)
+ clock = CLOCK_REALTIME;
+
+ do {
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (atomic_read(&q->signaled) != -1) {
+ ret = 0;
+ break;
+ }
+ ret = schedule_hrtimeout_range_clock(timeout_ptr, 0, HRTIMER_MODE_ABS, clock);
+ } while (ret < 0);
+ __set_current_state(TASK_RUNNING);
+
+ return ret;
+}
+
+/*
+ * Allocate and initialize the ntsync_q structure, but do not queue us yet.
+ */
+static int setup_wait(struct ntsync_device *dev,
+ const struct ntsync_wait_args *args, bool all,
+ struct ntsync_q **ret_q)
+{
+ int fds[NTSYNC_MAX_WAIT_COUNT + 1];
+ const __u32 count = args->count;
+ struct ntsync_q *q;
+ __u32 total_count;
+ __u32 i, j;
+
+ if (args->pad || (args->flags & ~NTSYNC_WAIT_REALTIME))
+ return -EINVAL;
+
+ if (args->count > NTSYNC_MAX_WAIT_COUNT)
+ return -EINVAL;
+
+ total_count = count;
+ if (args->alert)
+ total_count++;
+
+ if (copy_from_user(fds, u64_to_user_ptr(args->objs),
+ array_size(count, sizeof(*fds))))
+ return -EFAULT;
+ if (args->alert)
+ fds[count] = args->alert;
+
+ q = kmalloc(struct_size(q, entries, total_count), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+ q->task = current;
+ q->owner = args->owner;
+ atomic_set(&q->signaled, -1);
+ q->all = all;
+ q->ownerdead = false;
+ q->count = count;
+
+ for (i = 0; i < total_count; i++) {
+ struct ntsync_q_entry *entry = &q->entries[i];
+ struct ntsync_obj *obj = get_obj(dev, fds[i]);
+
+ if (!obj)
+ goto err;
+
+ if (all) {
+ /* Check that the objects are all distinct. */
+ for (j = 0; j < i; j++) {
+ if (obj == q->entries[j].obj) {
+ put_obj(obj);
+ goto err;
+ }
+ }
+ }
+
+ entry->obj = obj;
+ entry->q = q;
+ entry->index = i;
+ }
+
+ *ret_q = q;
+ return 0;
+
+err:
+ for (j = 0; j < i; j++)
+ put_obj(q->entries[j].obj);
+ kfree(q);
+ return -EINVAL;
+}
+
+static void try_wake_any_obj(struct ntsync_obj *obj)
+{
+ switch (obj->type) {
+ case NTSYNC_TYPE_SEM:
+ try_wake_any_sem(obj);
+ break;
+ case NTSYNC_TYPE_MUTEX:
+ try_wake_any_mutex(obj);
+ break;
+ case NTSYNC_TYPE_EVENT:
+ try_wake_any_event(obj);
+ break;
+ }
+}
+
+static int ntsync_wait_any(struct ntsync_device *dev, void __user *argp)
+{
+ struct ntsync_wait_args args;
+ __u32 i, total_count;
+ struct ntsync_q *q;
+ int signaled;
+ bool all;
+ int ret;
+
+ if (copy_from_user(&args, argp, sizeof(args)))
+ return -EFAULT;
+
+ ret = setup_wait(dev, &args, false, &q);
+ if (ret < 0)
+ return ret;
+
+ total_count = args.count;
+ if (args.alert)
+ total_count++;
+
+ /* queue ourselves */
+
+ for (i = 0; i < total_count; i++) {
+ struct ntsync_q_entry *entry = &q->entries[i];
+ struct ntsync_obj *obj = entry->obj;
+
+ all = ntsync_lock_obj(dev, obj);
+ list_add_tail(&entry->node, &obj->any_waiters);
+ ntsync_unlock_obj(dev, obj, all);
+ }
+
+ /*
+ * Check if we are already signaled.
+ *
+ * Note that the API requires that normal objects are checked before
+ * the alert event. Hence we queue the alert event last, and check
+ * objects in order.
+ */
+
+ for (i = 0; i < total_count; i++) {
+ struct ntsync_obj *obj = q->entries[i].obj;
+
+ if (atomic_read(&q->signaled) != -1)
+ break;
+
+ all = ntsync_lock_obj(dev, obj);
+ try_wake_any_obj(obj);
+ ntsync_unlock_obj(dev, obj, all);
+ }
+
+ /* sleep */
+
+ ret = ntsync_schedule(q, &args);
+
+ /* and finally, unqueue */
+
+ for (i = 0; i < total_count; i++) {
+ struct ntsync_q_entry *entry = &q->entries[i];
+ struct ntsync_obj *obj = entry->obj;
+
+ all = ntsync_lock_obj(dev, obj);
+ list_del(&entry->node);
+ ntsync_unlock_obj(dev, obj, all);
+
+ put_obj(obj);
+ }
+
+ signaled = atomic_read(&q->signaled);
+ if (signaled != -1) {
+ struct ntsync_wait_args __user *user_args = argp;
+
+ /* even if we caught a signal, we need to communicate success */
+ ret = q->ownerdead ? -EOWNERDEAD : 0;
+
+ if (put_user(signaled, &user_args->index))
+ ret = -EFAULT;
+ } else if (!ret) {
+ ret = -ETIMEDOUT;
+ }
+
+ kfree(q);
+ return ret;
+}
+
+static int ntsync_wait_all(struct ntsync_device *dev, void __user *argp)
+{
+ struct ntsync_wait_args args;
+ struct ntsync_q *q;
+ int signaled;
+ __u32 i;
+ int ret;
+
+ if (copy_from_user(&args, argp, sizeof(args)))
+ return -EFAULT;
+
+ ret = setup_wait(dev, &args, true, &q);
+ if (ret < 0)
+ return ret;
+
+ /* queue ourselves */
+
+ mutex_lock(&dev->wait_all_lock);
+
+ for (i = 0; i < args.count; i++) {
+ struct ntsync_q_entry *entry = &q->entries[i];
+ struct ntsync_obj *obj = entry->obj;
+
+ atomic_inc(&obj->all_hint);
+
+ /*
+ * obj->all_waiters is protected by dev->wait_all_lock rather
+ * than obj->lock, so there is no need to acquire obj->lock
+ * here.
+ */
+ list_add_tail(&entry->node, &obj->all_waiters);
+ }
+ if (args.alert) {
+ struct ntsync_q_entry *entry = &q->entries[args.count];
+ struct ntsync_obj *obj = entry->obj;
+
+ dev_lock_obj(dev, obj);
+ list_add_tail(&entry->node, &obj->any_waiters);
+ dev_unlock_obj(dev, obj);
+ }
+
+ /* check if we are already signaled */
+
+ try_wake_all(dev, q, NULL);
+
+ mutex_unlock(&dev->wait_all_lock);
+
+ /*
+ * Check if the alert event is signaled, making sure to do so only
+ * after checking if the other objects are signaled.
+ */
+
+ if (args.alert) {
+ struct ntsync_obj *obj = q->entries[args.count].obj;
+
+ if (atomic_read(&q->signaled) == -1) {
+ bool all = ntsync_lock_obj(dev, obj);
+ try_wake_any_obj(obj);
+ ntsync_unlock_obj(dev, obj, all);
+ }
+ }
+
+ /* sleep */
+
+ ret = ntsync_schedule(q, &args);
+
+ /* and finally, unqueue */
+
+ mutex_lock(&dev->wait_all_lock);
+
+ for (i = 0; i < args.count; i++) {
+ struct ntsync_q_entry *entry = &q->entries[i];
+ struct ntsync_obj *obj = entry->obj;
+
+ /*
+ * obj->all_waiters is protected by dev->wait_all_lock rather
+ * than obj->lock, so there is no need to acquire it here.
+ */
+ list_del(&entry->node);
+
+ atomic_dec(&obj->all_hint);
+
+ put_obj(obj);
+ }
+
+ mutex_unlock(&dev->wait_all_lock);
+
+ if (args.alert) {
+ struct ntsync_q_entry *entry = &q->entries[args.count];
+ struct ntsync_obj *obj = entry->obj;
+ bool all;
+
+ all = ntsync_lock_obj(dev, obj);
+ list_del(&entry->node);
+ ntsync_unlock_obj(dev, obj, all);
+
+ put_obj(obj);
+ }
+
+ signaled = atomic_read(&q->signaled);
+ if (signaled != -1) {
+ struct ntsync_wait_args __user *user_args = argp;
+
+ /* even if we caught a signal, we need to communicate success */
+ ret = q->ownerdead ? -EOWNERDEAD : 0;
+
+ if (put_user(signaled, &user_args->index))
+ ret = -EFAULT;
+ } else if (!ret) {
+ ret = -ETIMEDOUT;
+ }
+
+ kfree(q);
+ return ret;
}
static int ntsync_char_open(struct inode *inode, struct file *file)
@@ -198,6 +1158,8 @@ static int ntsync_char_open(struct inode *inode, struct file *file)
if (!dev)
return -ENOMEM;
+ mutex_init(&dev->wait_all_lock);
+
file->private_data = dev;
dev->file = file;
return nonseekable_open(inode, file);
@@ -219,8 +1181,16 @@ static long ntsync_char_ioctl(struct file *file, unsigned int cmd,
void __user *argp = (void __user *)parm;
switch (cmd) {
+ case NTSYNC_IOC_CREATE_EVENT:
+ return ntsync_create_event(dev, argp);
+ case NTSYNC_IOC_CREATE_MUTEX:
+ return ntsync_create_mutex(dev, argp);
case NTSYNC_IOC_CREATE_SEM:
return ntsync_create_sem(dev, argp);
+ case NTSYNC_IOC_WAIT_ALL:
+ return ntsync_wait_all(dev, argp);
+ case NTSYNC_IOC_WAIT_ANY:
+ return ntsync_wait_any(dev, argp);
default:
return -ENOIOCTLCMD;
}
diff --git a/drivers/misc/ocxl/sysfs.c b/drivers/misc/ocxl/sysfs.c
index 07520d6e6dc5..e849641687a0 100644
--- a/drivers/misc/ocxl/sysfs.c
+++ b/drivers/misc/ocxl/sysfs.c
@@ -94,7 +94,7 @@ static struct device_attribute afu_attrs[] = {
};
static ssize_t global_mmio_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct ocxl_afu *afu = to_afu(kobj_to_dev(kobj));
@@ -155,7 +155,7 @@ int ocxl_sysfs_register_afu(struct ocxl_file_info *info)
info->attr_global_mmio.attr.name = "global_mmio_area";
info->attr_global_mmio.attr.mode = 0600;
info->attr_global_mmio.size = info->afu->config.global_mmio_size;
- info->attr_global_mmio.read = global_mmio_read;
+ info->attr_global_mmio.read_new = global_mmio_read;
info->attr_global_mmio.mmap = global_mmio_mmap;
rc = device_create_bin_file(&info->dev, &info->attr_global_mmio);
if (rc) {
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index 8d2b7135738e..6121c0940cd1 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -483,7 +483,7 @@ static int pch_phub_write_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
}
static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
unsigned int rom_signature;
@@ -553,7 +553,7 @@ return_err_nomutex:
}
static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
int err;
@@ -655,8 +655,8 @@ static const struct bin_attribute pch_bin_attr = {
.mode = S_IRUGO | S_IWUSR,
},
.size = PCH_PHUB_OROM_SIZE + 1,
- .read = pch_phub_bin_read,
- .write = pch_phub_bin_write,
+ .read_new = pch_phub_bin_read,
+ .write_new = pch_phub_bin_write,
};
static int pch_phub_probe(struct pci_dev *pdev,
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 3aaaf47fa4ee..d5ac71a49386 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -69,6 +69,9 @@
#define PCI_ENDPOINT_TEST_FLAGS 0x2c
#define FLAG_USE_DMA BIT(0)
+#define PCI_ENDPOINT_TEST_CAPS 0x30
+#define CAP_UNALIGNED_ACCESS BIT(0)
+
#define PCI_DEVICE_ID_TI_AM654 0xb00c
#define PCI_DEVICE_ID_TI_J7200 0xb00f
#define PCI_DEVICE_ID_TI_AM64 0xb010
@@ -166,43 +169,47 @@ static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
test->irq_type = IRQ_TYPE_UNDEFINED;
}
-static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
+static int pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
int type)
{
- int irq = -1;
+ int irq;
struct pci_dev *pdev = test->pdev;
struct device *dev = &pdev->dev;
- bool res = true;
switch (type) {
case IRQ_TYPE_INTX:
irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
- if (irq < 0)
+ if (irq < 0) {
dev_err(dev, "Failed to get Legacy interrupt\n");
+ return irq;
+ }
+
break;
case IRQ_TYPE_MSI:
irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
- if (irq < 0)
+ if (irq < 0) {
dev_err(dev, "Failed to get MSI interrupts\n");
+ return irq;
+ }
+
break;
case IRQ_TYPE_MSIX:
irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
- if (irq < 0)
+ if (irq < 0) {
dev_err(dev, "Failed to get MSI-X interrupts\n");
+ return irq;
+ }
+
break;
default:
dev_err(dev, "Invalid IRQ type selected\n");
- }
-
- if (irq < 0) {
- irq = 0;
- res = false;
+ return -EINVAL;
}
test->irq_type = type;
test->num_irqs = irq;
- return res;
+ return 0;
}
static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
@@ -217,22 +224,22 @@ static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
test->num_irqs = 0;
}
-static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
+static int pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
{
int i;
- int err;
+ int ret;
struct pci_dev *pdev = test->pdev;
struct device *dev = &pdev->dev;
for (i = 0; i < test->num_irqs; i++) {
- err = devm_request_irq(dev, pci_irq_vector(pdev, i),
+ ret = devm_request_irq(dev, pci_irq_vector(pdev, i),
pci_endpoint_test_irqhandler,
IRQF_SHARED, test->name, test);
- if (err)
+ if (ret)
goto fail;
}
- return true;
+ return 0;
fail:
switch (irq_type) {
@@ -252,7 +259,7 @@ fail:
break;
}
- return false;
+ return ret;
}
static const u32 bar_test_pattern[] = {
@@ -277,16 +284,16 @@ static int pci_endpoint_test_bar_memcmp(struct pci_endpoint_test *test,
return memcmp(write_buf, read_buf, size);
}
-static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
+static int pci_endpoint_test_bar(struct pci_endpoint_test *test,
enum pci_barno barno)
{
- int j, bar_size, buf_size, iters, remain;
+ int j, bar_size, buf_size, iters;
void *write_buf __free(kfree) = NULL;
void *read_buf __free(kfree) = NULL;
struct pci_dev *pdev = test->pdev;
if (!test->bar[barno])
- return false;
+ return -ENOMEM;
bar_size = pci_resource_len(pdev, barno);
@@ -301,28 +308,105 @@ static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
write_buf = kmalloc(buf_size, GFP_KERNEL);
if (!write_buf)
- return false;
+ return -ENOMEM;
read_buf = kmalloc(buf_size, GFP_KERNEL);
if (!read_buf)
- return false;
+ return -ENOMEM;
iters = bar_size / buf_size;
for (j = 0; j < iters; j++)
if (pci_endpoint_test_bar_memcmp(test, barno, buf_size * j,
write_buf, read_buf, buf_size))
- return false;
+ return -EIO;
+
+ return 0;
+}
+
+static u32 bar_test_pattern_with_offset(enum pci_barno barno, int offset)
+{
+ u32 val;
+
+ /* Keep the BAR pattern in the top byte. */
+ val = bar_test_pattern[barno] & 0xff000000;
+ /* Store the (partial) offset in the remaining bytes. */
+ val |= offset & 0x00ffffff;
+
+ return val;
+}
+
+static void pci_endpoint_test_bars_write_bar(struct pci_endpoint_test *test,
+ enum pci_barno barno)
+{
+ struct pci_dev *pdev = test->pdev;
+ int j, size;
+
+ size = pci_resource_len(pdev, barno);
+
+ if (barno == test->test_reg_bar)
+ size = 0x4;
+
+ for (j = 0; j < size; j += 4)
+ writel_relaxed(bar_test_pattern_with_offset(barno, j),
+ test->bar[barno] + j);
+}
+
+static int pci_endpoint_test_bars_read_bar(struct pci_endpoint_test *test,
+ enum pci_barno barno)
+{
+ struct pci_dev *pdev = test->pdev;
+ struct device *dev = &pdev->dev;
+ int j, size;
+ u32 val;
+
+ size = pci_resource_len(pdev, barno);
+
+ if (barno == test->test_reg_bar)
+ size = 0x4;
+
+ for (j = 0; j < size; j += 4) {
+ u32 expected = bar_test_pattern_with_offset(barno, j);
+
+ val = readl_relaxed(test->bar[barno] + j);
+ if (val != expected) {
+ dev_err(dev,
+ "BAR%d incorrect data at offset: %#x, got: %#x expected: %#x\n",
+ barno, j, val, expected);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int pci_endpoint_test_bars(struct pci_endpoint_test *test)
+{
+ enum pci_barno bar;
+ bool ret;
+
+ /* Write all BARs in order (without reading). */
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ if (test->bar[bar])
+ pci_endpoint_test_bars_write_bar(test, bar);
- remain = bar_size % buf_size;
- if (remain)
- if (pci_endpoint_test_bar_memcmp(test, barno, buf_size * iters,
- write_buf, read_buf, remain))
- return false;
+ /*
+ * Read all BARs in order (without writing).
+ * If there is an address translation issue on the EP, writing one BAR
+ * might have overwritten another BAR. Ensure that this is not the case.
+ * (Reading back the BAR directly after writing can not detect this.)
+ */
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ if (test->bar[bar]) {
+ ret = pci_endpoint_test_bars_read_bar(test, bar);
+ if (!ret)
+ return ret;
+ }
+ }
- return true;
+ return 0;
}
-static bool pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
+static int pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
{
u32 val;
@@ -334,16 +418,17 @@ static bool pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
val = wait_for_completion_timeout(&test->irq_raised,
msecs_to_jiffies(1000));
if (!val)
- return false;
+ return -ETIMEDOUT;
- return true;
+ return 0;
}
-static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
+static int pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
u16 msi_num, bool msix)
{
- u32 val;
struct pci_dev *pdev = test->pdev;
+ u32 val;
+ int ret;
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
msix ? IRQ_TYPE_MSIX : IRQ_TYPE_MSI);
@@ -354,9 +439,16 @@ static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
val = wait_for_completion_timeout(&test->irq_raised,
msecs_to_jiffies(1000));
if (!val)
- return false;
+ return -ETIMEDOUT;
+
+ ret = pci_irq_vector(pdev, msi_num - 1);
+ if (ret < 0)
+ return ret;
+
+ if (ret != test->last_irq)
+ return -EIO;
- return pci_irq_vector(pdev, msi_num - 1) == test->last_irq;
+ return 0;
}
static int pci_endpoint_test_validate_xfer_params(struct device *dev,
@@ -375,11 +467,10 @@ static int pci_endpoint_test_validate_xfer_params(struct device *dev,
return 0;
}
-static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
+static int pci_endpoint_test_copy(struct pci_endpoint_test *test,
unsigned long arg)
{
struct pci_endpoint_test_xfer_param param;
- bool ret = false;
void *src_addr;
void *dst_addr;
u32 flags = 0;
@@ -398,17 +489,17 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
int irq_type = test->irq_type;
u32 src_crc32;
u32 dst_crc32;
- int err;
+ int ret;
- err = copy_from_user(&param, (void __user *)arg, sizeof(param));
- if (err) {
+ ret = copy_from_user(&param, (void __user *)arg, sizeof(param));
+ if (ret) {
dev_err(dev, "Failed to get transfer param\n");
- return false;
+ return -EFAULT;
}
- err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
- if (err)
- return false;
+ ret = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
+ if (ret)
+ return ret;
size = param.size;
@@ -418,22 +509,21 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
- goto err;
+ return -EINVAL;
}
orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
if (!orig_src_addr) {
dev_err(dev, "Failed to allocate source buffer\n");
- ret = false;
- goto err;
+ return -ENOMEM;
}
get_random_bytes(orig_src_addr, size + alignment);
orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
size + alignment, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, orig_src_phys_addr)) {
+ ret = dma_mapping_error(dev, orig_src_phys_addr);
+ if (ret) {
dev_err(dev, "failed to map source buffer address\n");
- ret = false;
goto err_src_phys_addr;
}
@@ -457,15 +547,15 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
if (!orig_dst_addr) {
dev_err(dev, "Failed to allocate destination address\n");
- ret = false;
+ ret = -ENOMEM;
goto err_dst_addr;
}
orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
size + alignment, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, orig_dst_phys_addr)) {
+ ret = dma_mapping_error(dev, orig_dst_phys_addr);
+ if (ret) {
dev_err(dev, "failed to map destination buffer address\n");
- ret = false;
goto err_dst_phys_addr;
}
@@ -498,8 +588,8 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
DMA_FROM_DEVICE);
dst_crc32 = crc32_le(~0, dst_addr, size);
- if (dst_crc32 == src_crc32)
- ret = true;
+ if (dst_crc32 != src_crc32)
+ ret = -EIO;
err_dst_phys_addr:
kfree(orig_dst_addr);
@@ -510,16 +600,13 @@ err_dst_addr:
err_src_phys_addr:
kfree(orig_src_addr);
-
-err:
return ret;
}
-static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
+static int pci_endpoint_test_write(struct pci_endpoint_test *test,
unsigned long arg)
{
struct pci_endpoint_test_xfer_param param;
- bool ret = false;
u32 flags = 0;
bool use_dma;
u32 reg;
@@ -534,17 +621,17 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
int irq_type = test->irq_type;
size_t size;
u32 crc32;
- int err;
+ int ret;
- err = copy_from_user(&param, (void __user *)arg, sizeof(param));
- if (err != 0) {
+ ret = copy_from_user(&param, (void __user *)arg, sizeof(param));
+ if (ret) {
dev_err(dev, "Failed to get transfer param\n");
- return false;
+ return -EFAULT;
}
- err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
- if (err)
- return false;
+ ret = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
+ if (ret)
+ return ret;
size = param.size;
@@ -554,23 +641,22 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
- goto err;
+ return -EINVAL;
}
orig_addr = kzalloc(size + alignment, GFP_KERNEL);
if (!orig_addr) {
dev_err(dev, "Failed to allocate address\n");
- ret = false;
- goto err;
+ return -ENOMEM;
}
get_random_bytes(orig_addr, size + alignment);
orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
DMA_TO_DEVICE);
- if (dma_mapping_error(dev, orig_phys_addr)) {
+ ret = dma_mapping_error(dev, orig_phys_addr);
+ if (ret) {
dev_err(dev, "failed to map source buffer address\n");
- ret = false;
goto err_phys_addr;
}
@@ -603,24 +689,21 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
wait_for_completion(&test->irq_raised);
reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
- if (reg & STATUS_READ_SUCCESS)
- ret = true;
+ if (!(reg & STATUS_READ_SUCCESS))
+ ret = -EIO;
dma_unmap_single(dev, orig_phys_addr, size + alignment,
DMA_TO_DEVICE);
err_phys_addr:
kfree(orig_addr);
-
-err:
return ret;
}
-static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
+static int pci_endpoint_test_read(struct pci_endpoint_test *test,
unsigned long arg)
{
struct pci_endpoint_test_xfer_param param;
- bool ret = false;
u32 flags = 0;
bool use_dma;
size_t size;
@@ -634,17 +717,17 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
size_t alignment = test->alignment;
int irq_type = test->irq_type;
u32 crc32;
- int err;
+ int ret;
- err = copy_from_user(&param, (void __user *)arg, sizeof(param));
- if (err) {
+ ret = copy_from_user(&param, (void __user *)arg, sizeof(param));
+ if (ret) {
dev_err(dev, "Failed to get transfer param\n");
- return false;
+ return -EFAULT;
}
- err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
- if (err)
- return false;
+ ret = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
+ if (ret)
+ return ret;
size = param.size;
@@ -654,21 +737,20 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
- goto err;
+ return -EINVAL;
}
orig_addr = kzalloc(size + alignment, GFP_KERNEL);
if (!orig_addr) {
dev_err(dev, "Failed to allocate destination address\n");
- ret = false;
- goto err;
+ return -ENOMEM;
}
orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, orig_phys_addr)) {
+ ret = dma_mapping_error(dev, orig_phys_addr);
+ if (ret) {
dev_err(dev, "failed to map source buffer address\n");
- ret = false;
goto err_phys_addr;
}
@@ -700,50 +782,51 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
DMA_FROM_DEVICE);
crc32 = crc32_le(~0, addr, size);
- if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
- ret = true;
+ if (crc32 != pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
+ ret = -EIO;
err_phys_addr:
kfree(orig_addr);
-err:
return ret;
}
-static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
+static int pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
{
pci_endpoint_test_release_irq(test);
pci_endpoint_test_free_irq_vectors(test);
- return true;
+
+ return 0;
}
-static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
+static int pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
int req_irq_type)
{
struct pci_dev *pdev = test->pdev;
struct device *dev = &pdev->dev;
+ int ret;
if (req_irq_type < IRQ_TYPE_INTX || req_irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
- return false;
+ return -EINVAL;
}
if (test->irq_type == req_irq_type)
- return true;
+ return 0;
pci_endpoint_test_release_irq(test);
pci_endpoint_test_free_irq_vectors(test);
- if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
- goto err;
-
- if (!pci_endpoint_test_request_irq(test))
- goto err;
+ ret = pci_endpoint_test_alloc_irq_vectors(test, req_irq_type);
+ if (ret)
+ return ret;
- return true;
+ ret = pci_endpoint_test_request_irq(test);
+ if (ret) {
+ pci_endpoint_test_free_irq_vectors(test);
+ return ret;
+ }
-err:
- pci_endpoint_test_free_irq_vectors(test);
- return false;
+ return 0;
}
static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
@@ -768,6 +851,9 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
goto ret;
ret = pci_endpoint_test_bar(test, bar);
break;
+ case PCITEST_BARS:
+ ret = pci_endpoint_test_bars(test);
+ break;
case PCITEST_INTX_IRQ:
ret = pci_endpoint_test_intx_irq(test);
break;
@@ -805,10 +891,24 @@ static const struct file_operations pci_endpoint_test_fops = {
.unlocked_ioctl = pci_endpoint_test_ioctl,
};
+static void pci_endpoint_test_get_capabilities(struct pci_endpoint_test *test)
+{
+ struct pci_dev *pdev = test->pdev;
+ struct device *dev = &pdev->dev;
+ u32 caps;
+
+ caps = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CAPS);
+ dev_dbg(dev, "PCI_ENDPOINT_TEST_CAPS: %#x\n", caps);
+
+ /* CAP_UNALIGNED_ACCESS is set if the EP can do unaligned access */
+ if (caps & CAP_UNALIGNED_ACCESS)
+ test->alignment = 0;
+}
+
static int pci_endpoint_test_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- int err;
+ int ret;
int id;
char name[24];
enum pci_barno bar;
@@ -847,24 +947,23 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
- err = pci_enable_device(pdev);
- if (err) {
+ ret = pci_enable_device(pdev);
+ if (ret) {
dev_err(dev, "Cannot enable PCI device\n");
- return err;
+ return ret;
}
- err = pci_request_regions(pdev, DRV_MODULE_NAME);
- if (err) {
+ ret = pci_request_regions(pdev, DRV_MODULE_NAME);
+ if (ret) {
dev_err(dev, "Cannot obtain PCI resources\n");
goto err_disable_pdev;
}
pci_set_master(pdev);
- if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) {
- err = -EINVAL;
+ ret = pci_endpoint_test_alloc_irq_vectors(test, irq_type);
+ if (ret)
goto err_disable_irq;
- }
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
@@ -879,7 +978,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
test->base = test->bar[test_reg_bar];
if (!test->base) {
- err = -ENOMEM;
+ ret = -ENOMEM;
dev_err(dev, "Cannot perform PCI test without BAR%d\n",
test_reg_bar);
goto err_iounmap;
@@ -889,7 +988,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
id = ida_alloc(&pci_endpoint_test_ida, GFP_KERNEL);
if (id < 0) {
- err = id;
+ ret = id;
dev_err(dev, "Unable to get id\n");
goto err_iounmap;
}
@@ -897,27 +996,28 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
test->name = kstrdup(name, GFP_KERNEL);
if (!test->name) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto err_ida_remove;
}
- if (!pci_endpoint_test_request_irq(test)) {
- err = -EINVAL;
+ ret = pci_endpoint_test_request_irq(test);
+ if (ret)
goto err_kfree_test_name;
- }
+
+ pci_endpoint_test_get_capabilities(test);
misc_device = &test->miscdev;
misc_device->minor = MISC_DYNAMIC_MINOR;
misc_device->name = kstrdup(name, GFP_KERNEL);
if (!misc_device->name) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto err_release_irq;
}
misc_device->parent = &pdev->dev;
misc_device->fops = &pci_endpoint_test_fops;
- err = misc_register(misc_device);
- if (err) {
+ ret = misc_register(misc_device);
+ if (ret) {
dev_err(dev, "Failed to register device\n");
goto err_kfree_name;
}
@@ -949,7 +1049,7 @@ err_disable_irq:
err_disable_pdev:
pci_disable_device(pdev);
- return err;
+ return ret;
}
static void pci_endpoint_test_remove(struct pci_dev *pdev)
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 61b66e318488..7a3c34306de9 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -93,7 +93,7 @@ int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT;
static int xpc_disengage_min_timelimit; /* = 0 */
static int xpc_disengage_max_timelimit = 120;
-static struct ctl_table xpc_sys_xpc_hb[] = {
+static const struct ctl_table xpc_sys_xpc_hb[] = {
{
.procname = "hb_interval",
.data = &xpc_hb_interval,
@@ -111,7 +111,7 @@ static struct ctl_table xpc_sys_xpc_hb[] = {
.extra1 = &xpc_hb_check_min_interval,
.extra2 = &xpc_hb_check_max_interval},
};
-static struct ctl_table xpc_sys_xpc[] = {
+static const struct ctl_table xpc_sys_xpc[] = {
{
.procname = "disengage_timelimit",
.data = &xpc_disengage_timelimit,
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index e40b027a88e2..e5069882457e 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -23,7 +23,7 @@
#define SRAM_GRANULARITY 32
static ssize_t sram_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t pos, size_t count)
{
struct sram_partition *part;
@@ -38,7 +38,7 @@ static ssize_t sram_read(struct file *filp, struct kobject *kobj,
}
static ssize_t sram_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t pos, size_t count)
{
struct sram_partition *part;
@@ -83,8 +83,8 @@ static int sram_add_export(struct sram_dev *sram, struct sram_reserve *block,
return -ENOMEM;
part->battr.attr.mode = S_IRUSR | S_IWUSR;
- part->battr.read = sram_read;
- part->battr.write = sram_write;
+ part->battr.read_new = sram_read;
+ part->battr.write_new = sram_write;
part->battr.size = block->size;
return device_create_bin_file(sram->dev, &part->battr);
diff --git a/drivers/mtd/devices/mchp48l640.c b/drivers/mtd/devices/mchp48l640.c
index f576e6a890e8..7584d0ba9396 100644
--- a/drivers/mtd/devices/mchp48l640.c
+++ b/drivers/mtd/devices/mchp48l640.c
@@ -27,6 +27,7 @@
struct mchp48_caps {
unsigned int size;
unsigned int page_size;
+ bool auto_disable_wel;
};
struct mchp48l640_flash {
@@ -194,9 +195,15 @@ static int mchp48l640_write_page(struct mtd_info *mtd, loff_t to, size_t len,
else
goto fail;
- ret = mchp48l640_waitforbit(flash, MCHP48L640_STATUS_WEL, false);
- if (ret)
- goto fail;
+ if (flash->caps->auto_disable_wel) {
+ ret = mchp48l640_waitforbit(flash, MCHP48L640_STATUS_WEL, false);
+ if (ret)
+ goto fail;
+ } else {
+ ret = mchp48l640_write_prepare(flash, false);
+ if (ret)
+ goto fail;
+ }
kfree(cmd);
return 0;
@@ -293,6 +300,13 @@ static int mchp48l640_read(struct mtd_info *mtd, loff_t from, size_t len,
static const struct mchp48_caps mchp48l640_caps = {
.size = SZ_8K,
.page_size = 32,
+ .auto_disable_wel = true,
+};
+
+static const struct mchp48_caps mb85rs128ty_caps = {
+ .size = SZ_16K,
+ .page_size = 256,
+ .auto_disable_wel = false,
};
static int mchp48l640_probe(struct spi_device *spi)
@@ -353,6 +367,10 @@ static const struct of_device_id mchp48l640_of_table[] = {
.compatible = "microchip,48l640",
.data = &mchp48l640_caps,
},
+ {
+ .compatible = "fujitsu,mb85rs128ty",
+ .data = &mb85rs128ty_caps,
+ },
{}
};
MODULE_DEVICE_TABLE(of, mchp48l640_of_table);
@@ -362,6 +380,10 @@ static const struct spi_device_id mchp48l640_spi_ids[] = {
.name = "48l640",
.driver_data = (kernel_ulong_t)&mchp48l640_caps,
},
+ {
+ .name = "mb85rs128ty",
+ .driver_data = (kernel_ulong_t)&mb85rs128ty_caps,
+ },
{}
};
MODULE_DEVICE_TABLE(spi, mchp48l640_spi_ids);
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index f756c60a4931..fd9ec165e61a 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -30,6 +30,7 @@
#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of.h>
+#include <linux/security.h>
struct phram_mtd_list {
struct mtd_info mtd;
@@ -410,19 +411,23 @@ static int __init init_phram(void)
{
int ret;
+ ret = security_locked_down(LOCKDOWN_DEV_MEM);
+ if (ret)
+ return ret;
+
ret = platform_driver_register(&phram_driver);
if (ret)
return ret;
#ifndef MODULE
- if (phram_paramline[0])
+ if (phram_paramline[0]) {
ret = phram_setup(phram_paramline);
+ if (ret)
+ platform_driver_unregister(&phram_driver);
+ }
phram_init_called = 1;
#endif
- if (ret)
- platform_driver_unregister(&phram_driver);
-
return ret;
}
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
index dba584fa2a53..f2266145b821 100644
--- a/drivers/mtd/devices/st_spi_fsm.c
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -2104,7 +2104,6 @@ static void stfsm_remove(struct platform_device *pdev)
WARN_ON(mtd_device_unregister(&fsm->mtd));
}
-#ifdef CONFIG_PM_SLEEP
static int stfsmfsm_suspend(struct device *dev)
{
struct stfsm *fsm = dev_get_drvdata(dev);
@@ -2120,9 +2119,8 @@ static int stfsmfsm_resume(struct device *dev)
return clk_prepare_enable(fsm->clk);
}
-#endif
-static SIMPLE_DEV_PM_OPS(stfsm_pm_ops, stfsmfsm_suspend, stfsmfsm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(stfsm_pm_ops, stfsmfsm_suspend, stfsmfsm_resume);
static const struct of_device_id stfsm_match[] = {
{ .compatible = "st,spi-fsm", },
@@ -2136,7 +2134,7 @@ static struct platform_driver stfsm_driver = {
.driver = {
.name = "st-spi-fsm",
.of_match_table = stfsm_match,
- .pm = &stfsm_pm_ops,
+ .pm = pm_sleep_ptr(&stfsm_pm_ops),
},
};
module_platform_driver(stfsm_driver);
diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c
index 217f4e69233f..82a1e7b7e4d8 100644
--- a/drivers/mtd/hyperbus/hbmc-am654.c
+++ b/drivers/mtd/hyperbus/hbmc-am654.c
@@ -174,26 +174,30 @@ static int am654_hbmc_probe(struct platform_device *pdev)
priv->hbdev.np = of_get_next_child(np, NULL);
ret = of_address_to_resource(priv->hbdev.np, 0, &res);
if (ret)
- return ret;
+ goto put_node;
- if (of_property_read_bool(dev->of_node, "mux-controls")) {
+ if (of_property_present(dev->of_node, "mux-controls")) {
struct mux_control *control = devm_mux_control_get(dev, NULL);
- if (IS_ERR(control))
- return PTR_ERR(control);
+ if (IS_ERR(control)) {
+ ret = PTR_ERR(control);
+ goto put_node;
+ }
ret = mux_control_select(control, 1);
if (ret) {
dev_err(dev, "Failed to select HBMC mux\n");
- return ret;
+ goto put_node;
}
priv->mux_ctrl = control;
}
priv->hbdev.map.size = resource_size(&res);
priv->hbdev.map.virt = devm_ioremap_resource(dev, &res);
- if (IS_ERR(priv->hbdev.map.virt))
- return PTR_ERR(priv->hbdev.map.virt);
+ if (IS_ERR(priv->hbdev.map.virt)) {
+ ret = PTR_ERR(priv->hbdev.map.virt);
+ goto disable_mux;
+ }
priv->ctlr.dev = dev;
priv->ctlr.ops = &am654_hbmc_ops;
@@ -226,6 +230,8 @@ release_dma:
disable_mux:
if (priv->mux_ctrl)
mux_control_deselect(priv->mux_ctrl);
+put_node:
+ of_node_put(priv->hbdev.np);
return ret;
}
@@ -241,6 +247,7 @@ static void am654_hbmc_remove(struct platform_device *pdev)
if (dev_priv->rx_chan)
dma_release_channel(dev_priv->rx_chan);
+ of_node_put(priv->hbdev.np);
}
static const struct of_device_id am654_hbmc_dt_ids[] = {
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index ee7e1d908986..847c11542f02 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -404,6 +404,7 @@ out_list_del:
int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
{
unsigned long flags;
+ unsigned int memflags;
lockdep_assert_held(&mtd_table_mutex);
@@ -420,10 +421,10 @@ int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
spin_unlock_irqrestore(&old->queue_lock, flags);
/* freeze+quiesce queue to ensure all requests are flushed */
- blk_mq_freeze_queue(old->rq);
+ memflags = blk_mq_freeze_queue(old->rq);
blk_mq_quiesce_queue(old->rq);
blk_mq_unquiesce_queue(old->rq);
- blk_mq_unfreeze_queue(old->rq);
+ blk_mq_unfreeze_queue(old->rq, memflags);
/* If the device is currently open, tell trans driver to close it,
then put mtd device, and don't touch it again */
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 19e1291ac4d5..da1586a36574 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -3,7 +3,7 @@
nandcore-objs := core.o bbt.o
obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o
-
+obj-$(CONFIG_MTD_NAND_QCOM) += qpic_common.o
obj-y += onenand/
obj-y += raw/
obj-y += spi/
diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
index f66385faf631..0dc2ea4fc857 100644
--- a/drivers/mtd/nand/onenand/onenand_base.c
+++ b/drivers/mtd/nand/onenand/onenand_base.c
@@ -2923,6 +2923,7 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
ret = ONENAND_IS_4KB_PAGE(this) ?
onenand_mlc_read_ops_nolock(mtd, from, &ops) :
onenand_read_ops_nolock(mtd, from, &ops);
+ *retlen = ops.retlen;
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
diff --git a/drivers/mtd/nand/qpic_common.c b/drivers/mtd/nand/qpic_common.c
new file mode 100644
index 000000000000..e0ed25b5afea
--- /dev/null
+++ b/drivers/mtd/nand/qpic_common.c
@@ -0,0 +1,759 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/qcom_adm.h>
+#include <linux/dma/qcom_bam_dma.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mtd/nand-qpic-common.h>
+
+/**
+ * qcom_free_bam_transaction() - Frees the BAM transaction memory
+ * @nandc: qpic nand controller
+ *
+ * This function frees the bam transaction memory
+ */
+void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
+{
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ kfree(bam_txn);
+}
+EXPORT_SYMBOL(qcom_free_bam_transaction);
+
+/**
+ * qcom_alloc_bam_transaction() - allocate BAM transaction
+ * @nandc: qpic nand controller
+ *
+ * This function will allocate and initialize the BAM transaction structure
+ */
+struct bam_transaction *
+qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
+{
+ struct bam_transaction *bam_txn;
+ size_t bam_txn_size;
+ unsigned int num_cw = nandc->max_cwperpage;
+ void *bam_txn_buf;
+
+ bam_txn_size =
+ sizeof(*bam_txn) + num_cw *
+ ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
+ (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
+ (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
+
+ bam_txn_buf = kzalloc(bam_txn_size, GFP_KERNEL);
+ if (!bam_txn_buf)
+ return NULL;
+
+ bam_txn = bam_txn_buf;
+ bam_txn_buf += sizeof(*bam_txn);
+
+ bam_txn->bam_ce = bam_txn_buf;
+ bam_txn_buf +=
+ sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
+
+ bam_txn->cmd_sgl = bam_txn_buf;
+ bam_txn_buf +=
+ sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
+
+ bam_txn->data_sgl = bam_txn_buf;
+
+ init_completion(&bam_txn->txn_done);
+
+ return bam_txn;
+}
+EXPORT_SYMBOL(qcom_alloc_bam_transaction);
+
+/**
+ * qcom_clear_bam_transaction() - Clears the BAM transaction
+ * @nandc: qpic nand controller
+ *
+ * This function will clear the BAM transaction indexes.
+ */
+void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
+{
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ if (!nandc->props->supports_bam)
+ return;
+
+ memset(&bam_txn->bam_positions, 0, sizeof(bam_txn->bam_positions));
+ bam_txn->last_data_desc = NULL;
+
+ sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
+ QPIC_PER_CW_CMD_SGL);
+ sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
+ QPIC_PER_CW_DATA_SGL);
+
+ reinit_completion(&bam_txn->txn_done);
+}
+EXPORT_SYMBOL(qcom_clear_bam_transaction);
+
+/**
+ * qcom_qpic_bam_dma_done() - Callback for DMA descriptor completion
+ * @data: data pointer
+ *
+ * This function is a callback for DMA descriptor completion
+ */
+void qcom_qpic_bam_dma_done(void *data)
+{
+ struct bam_transaction *bam_txn = data;
+
+ complete(&bam_txn->txn_done);
+}
+EXPORT_SYMBOL(qcom_qpic_bam_dma_done);
+
+/**
+ * qcom_nandc_dev_to_mem() - Check for dma sync for cpu or device
+ * @nandc: qpic nand controller
+ * @is_cpu: cpu or Device
+ *
+ * This function will check for dma sync for cpu or device
+ */
+inline void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
+{
+ if (!nandc->props->supports_bam)
+ return;
+
+ if (is_cpu)
+ dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+ else
+ dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+}
+EXPORT_SYMBOL(qcom_nandc_dev_to_mem);
+
+/**
+ * qcom_prepare_bam_async_desc() - Prepare DMA descriptor
+ * @nandc: qpic nand controller
+ * @chan: dma channel
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function maps the scatter gather list for DMA transfer and forms the
+ * DMA descriptor for BAM.This descriptor will be added in the NAND DMA
+ * descriptor queue which will be submitted to DMA engine.
+ */
+int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
+ struct dma_chan *chan, unsigned long flags)
+{
+ struct desc_info *desc;
+ struct scatterlist *sgl;
+ unsigned int sgl_cnt;
+ int ret;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+ enum dma_transfer_direction dir_eng;
+ struct dma_async_tx_descriptor *dma_desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ if (chan == nandc->cmd_chan) {
+ sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
+ sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
+ bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
+ dir_eng = DMA_MEM_TO_DEV;
+ desc->dir = DMA_TO_DEVICE;
+ } else if (chan == nandc->tx_chan) {
+ sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
+ sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
+ bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
+ dir_eng = DMA_MEM_TO_DEV;
+ desc->dir = DMA_TO_DEVICE;
+ } else {
+ sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
+ sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
+ bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
+ dir_eng = DMA_DEV_TO_MEM;
+ desc->dir = DMA_FROM_DEVICE;
+ }
+
+ sg_mark_end(sgl + sgl_cnt - 1);
+ ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
+ if (ret == 0) {
+ dev_err(nandc->dev, "failure in mapping desc\n");
+ kfree(desc);
+ return -ENOMEM;
+ }
+
+ desc->sgl_cnt = sgl_cnt;
+ desc->bam_sgl = sgl;
+
+ dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
+ flags);
+
+ if (!dma_desc) {
+ dev_err(nandc->dev, "failure in prep desc\n");
+ dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
+ kfree(desc);
+ return -EINVAL;
+ }
+
+ desc->dma_desc = dma_desc;
+
+ /* update last data/command descriptor */
+ if (chan == nandc->cmd_chan)
+ bam_txn->last_cmd_desc = dma_desc;
+ else
+ bam_txn->last_data_desc = dma_desc;
+
+ list_add_tail(&desc->node, &nandc->desc_list);
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_prepare_bam_async_desc);
+
+/**
+ * qcom_prep_bam_dma_desc_cmd() - Prepares the command descriptor for BAM DMA
+ * @nandc: qpic nand controller
+ * @read: read or write type
+ * @reg_off: offset within the controller's data buffer
+ * @vaddr: virtual address of the buffer we want to write to
+ * @size: DMA transaction size in bytes
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function will prepares the command descriptor for BAM DMA
+ * which will be used for NAND register reads and writes.
+ */
+int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+ int reg_off, const void *vaddr,
+ int size, unsigned int flags)
+{
+ int bam_ce_size;
+ int i, ret;
+ struct bam_cmd_element *bam_ce_buffer;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
+
+ /* fill the command desc */
+ for (i = 0; i < size; i++) {
+ if (read)
+ bam_prep_ce(&bam_ce_buffer[i],
+ nandc_reg_phys(nandc, reg_off + 4 * i),
+ BAM_READ_COMMAND,
+ reg_buf_dma_addr(nandc,
+ (__le32 *)vaddr + i));
+ else
+ bam_prep_ce_le32(&bam_ce_buffer[i],
+ nandc_reg_phys(nandc, reg_off + 4 * i),
+ BAM_WRITE_COMMAND,
+ *((__le32 *)vaddr + i));
+ }
+
+ bam_txn->bam_ce_pos += size;
+
+ /* use the separate sgl after this command */
+ if (flags & NAND_BAM_NEXT_SGL) {
+ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
+ bam_ce_size = (bam_txn->bam_ce_pos -
+ bam_txn->bam_ce_start) *
+ sizeof(struct bam_cmd_element);
+ sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
+ bam_ce_buffer, bam_ce_size);
+ bam_txn->cmd_sgl_pos++;
+ bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
+
+ if (flags & NAND_BAM_NWD) {
+ ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ DMA_PREP_FENCE | DMA_PREP_CMD);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_prep_bam_dma_desc_cmd);
+
+/**
+ * qcom_prep_bam_dma_desc_data() - Prepares the data descriptor for BAM DMA
+ * @nandc: qpic nand controller
+ * @read: read or write type
+ * @vaddr: virtual address of the buffer we want to write to
+ * @size: DMA transaction size in bytes
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function will prepares the data descriptor for BAM DMA which
+ * will be used for NAND data reads and writes.
+ */
+int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
+ const void *vaddr, int size, unsigned int flags)
+{
+ int ret;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ if (read) {
+ sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
+ vaddr, size);
+ bam_txn->rx_sgl_pos++;
+ } else {
+ sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
+ vaddr, size);
+ bam_txn->tx_sgl_pos++;
+
+ /*
+ * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
+ * is not set, form the DMA descriptor
+ */
+ if (!(flags & NAND_BAM_NO_EOT)) {
+ ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
+ DMA_PREP_INTERRUPT);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_prep_bam_dma_desc_data);
+
+/**
+ * qcom_prep_adm_dma_desc() - Prepare descriptor for adma
+ * @nandc: qpic nand controller
+ * @read: read or write type
+ * @reg_off: offset within the controller's data buffer
+ * @vaddr: virtual address of the buffer we want to write to
+ * @size: adm dma transaction size in bytes
+ * @flow_control: flow controller
+ *
+ * This function will prepare descriptor for adma
+ */
+int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
+ int reg_off, const void *vaddr, int size,
+ bool flow_control)
+{
+ struct qcom_adm_peripheral_config periph_conf = {};
+ struct dma_async_tx_descriptor *dma_desc;
+ struct dma_slave_config slave_conf = {0};
+ enum dma_transfer_direction dir_eng;
+ struct desc_info *desc;
+ struct scatterlist *sgl;
+ int ret;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ sgl = &desc->adm_sgl;
+
+ sg_init_one(sgl, vaddr, size);
+
+ if (read) {
+ dir_eng = DMA_DEV_TO_MEM;
+ desc->dir = DMA_FROM_DEVICE;
+ } else {
+ dir_eng = DMA_MEM_TO_DEV;
+ desc->dir = DMA_TO_DEVICE;
+ }
+
+ ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
+ if (!ret) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ slave_conf.device_fc = flow_control;
+ if (read) {
+ slave_conf.src_maxburst = 16;
+ slave_conf.src_addr = nandc->base_dma + reg_off;
+ if (nandc->data_crci) {
+ periph_conf.crci = nandc->data_crci;
+ slave_conf.peripheral_config = &periph_conf;
+ slave_conf.peripheral_size = sizeof(periph_conf);
+ }
+ } else {
+ slave_conf.dst_maxburst = 16;
+ slave_conf.dst_addr = nandc->base_dma + reg_off;
+ if (nandc->cmd_crci) {
+ periph_conf.crci = nandc->cmd_crci;
+ slave_conf.peripheral_config = &periph_conf;
+ slave_conf.peripheral_size = sizeof(periph_conf);
+ }
+ }
+
+ ret = dmaengine_slave_config(nandc->chan, &slave_conf);
+ if (ret) {
+ dev_err(nandc->dev, "failed to configure dma channel\n");
+ goto err;
+ }
+
+ dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
+ if (!dma_desc) {
+ dev_err(nandc->dev, "failed to prepare desc\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ desc->dma_desc = dma_desc;
+
+ list_add_tail(&desc->node, &nandc->desc_list);
+
+ return 0;
+err:
+ kfree(desc);
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_prep_adm_dma_desc);
+
+/**
+ * qcom_read_reg_dma() - read a given number of registers to the reg_read_buf pointer
+ * @nandc: qpic nand controller
+ * @first: offset of the first register in the contiguous block
+ * @num_regs: number of registers to read
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function will prepares a descriptor to read a given number of
+ * contiguous registers to the reg_read_buf pointer.
+ */
+int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
+ int num_regs, unsigned int flags)
+{
+ bool flow_control = false;
+ void *vaddr;
+
+ vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
+ nandc->reg_read_pos += num_regs;
+
+ if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
+ first = dev_cmd_reg_addr(nandc, first);
+
+ if (nandc->props->supports_bam)
+ return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
+ num_regs, flags);
+
+ if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
+ flow_control = true;
+
+ return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
+ num_regs * sizeof(u32), flow_control);
+}
+EXPORT_SYMBOL(qcom_read_reg_dma);
+
+/**
+ * qcom_write_reg_dma() - write a given number of registers
+ * @nandc: qpic nand controller
+ * @vaddr: contiguous memory from where register value will
+ * be written
+ * @first: offset of the first register in the contiguous block
+ * @num_regs: number of registers to write
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function will prepares a descriptor to write a given number of
+ * contiguous registers
+ */
+int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
+ int first, int num_regs, unsigned int flags)
+{
+ bool flow_control = false;
+
+ if (first == NAND_EXEC_CMD)
+ flags |= NAND_BAM_NWD;
+
+ if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
+ first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
+
+ if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
+ first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
+
+ if (nandc->props->supports_bam)
+ return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
+ num_regs, flags);
+
+ if (first == NAND_FLASH_CMD)
+ flow_control = true;
+
+ return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
+ num_regs * sizeof(u32), flow_control);
+}
+EXPORT_SYMBOL(qcom_write_reg_dma);
+
+/**
+ * qcom_read_data_dma() - transfer data
+ * @nandc: qpic nand controller
+ * @reg_off: offset within the controller's data buffer
+ * @vaddr: virtual address of the buffer we want to write to
+ * @size: DMA transaction size in bytes
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function will prepares a DMA descriptor to transfer data from the
+ * controller's internal buffer to the buffer 'vaddr'
+ */
+int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+ const u8 *vaddr, int size, unsigned int flags)
+{
+ if (nandc->props->supports_bam)
+ return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
+
+ return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
+}
+EXPORT_SYMBOL(qcom_read_data_dma);
+
+/**
+ * qcom_write_data_dma() - transfer data
+ * @nandc: qpic nand controller
+ * @reg_off: offset within the controller's data buffer
+ * @vaddr: virtual address of the buffer we want to read from
+ * @size: DMA transaction size in bytes
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function will prepares a DMA descriptor to transfer data from
+ * 'vaddr' to the controller's internal buffer
+ */
+int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+ const u8 *vaddr, int size, unsigned int flags)
+{
+ if (nandc->props->supports_bam)
+ return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
+
+ return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
+}
+EXPORT_SYMBOL(qcom_write_data_dma);
+
+/**
+ * qcom_submit_descs() - submit dma descriptor
+ * @nandc: qpic nand controller
+ *
+ * This function will submit all the prepared dma descriptor
+ * cmd or data descriptor
+ */
+int qcom_submit_descs(struct qcom_nand_controller *nandc)
+{
+ struct desc_info *desc, *n;
+ dma_cookie_t cookie = 0;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+ int ret = 0;
+
+ if (nandc->props->supports_bam) {
+ if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
+ ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
+ if (ret)
+ goto err_unmap_free_desc;
+ }
+
+ if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
+ ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
+ DMA_PREP_INTERRUPT);
+ if (ret)
+ goto err_unmap_free_desc;
+ }
+
+ if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
+ ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ DMA_PREP_CMD);
+ if (ret)
+ goto err_unmap_free_desc;
+ }
+ }
+
+ list_for_each_entry(desc, &nandc->desc_list, node)
+ cookie = dmaengine_submit(desc->dma_desc);
+
+ if (nandc->props->supports_bam) {
+ bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
+ bam_txn->last_cmd_desc->callback_param = bam_txn;
+
+ dma_async_issue_pending(nandc->tx_chan);
+ dma_async_issue_pending(nandc->rx_chan);
+ dma_async_issue_pending(nandc->cmd_chan);
+
+ if (!wait_for_completion_timeout(&bam_txn->txn_done,
+ QPIC_NAND_COMPLETION_TIMEOUT))
+ ret = -ETIMEDOUT;
+ } else {
+ if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
+ ret = -ETIMEDOUT;
+ }
+
+err_unmap_free_desc:
+ /*
+ * Unmap the dma sg_list and free the desc allocated by both
+ * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
+ */
+ list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
+ list_del(&desc->node);
+
+ if (nandc->props->supports_bam)
+ dma_unmap_sg(nandc->dev, desc->bam_sgl,
+ desc->sgl_cnt, desc->dir);
+ else
+ dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
+ desc->dir);
+
+ kfree(desc);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_submit_descs);
+
+/**
+ * qcom_clear_read_regs() - reset the read register buffer
+ * @nandc: qpic nand controller
+ *
+ * This function reset the register read buffer for next NAND operation
+ */
+void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
+{
+ nandc->reg_read_pos = 0;
+ qcom_nandc_dev_to_mem(nandc, false);
+}
+EXPORT_SYMBOL(qcom_clear_read_regs);
+
+/**
+ * qcom_nandc_unalloc() - unallocate qpic nand controller
+ * @nandc: qpic nand controller
+ *
+ * This function will unallocate memory alloacted for qpic nand controller
+ */
+void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
+{
+ if (nandc->props->supports_bam) {
+ if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
+ dma_unmap_single(nandc->dev, nandc->reg_read_dma,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+
+ if (nandc->tx_chan)
+ dma_release_channel(nandc->tx_chan);
+
+ if (nandc->rx_chan)
+ dma_release_channel(nandc->rx_chan);
+
+ if (nandc->cmd_chan)
+ dma_release_channel(nandc->cmd_chan);
+ } else {
+ if (nandc->chan)
+ dma_release_channel(nandc->chan);
+ }
+}
+EXPORT_SYMBOL(qcom_nandc_unalloc);
+
+/**
+ * qcom_nandc_alloc() - Allocate qpic nand controller
+ * @nandc: qpic nand controller
+ *
+ * This function will allocate memory for qpic nand controller
+ */
+int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
+{
+ int ret;
+
+ ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(nandc->dev, "failed to set DMA mask\n");
+ return ret;
+ }
+
+ /*
+ * we use the internal buffer for reading ONFI params, reading small
+ * data like ID and status, and preforming read-copy-write operations
+ * when writing to a codeword partially. 532 is the maximum possible
+ * size of a codeword for our nand controller
+ */
+ nandc->buf_size = 532;
+
+ nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
+ if (!nandc->data_buffer)
+ return -ENOMEM;
+
+ nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
+ if (!nandc->regs)
+ return -ENOMEM;
+
+ nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
+ sizeof(*nandc->reg_read_buf),
+ GFP_KERNEL);
+ if (!nandc->reg_read_buf)
+ return -ENOMEM;
+
+ if (nandc->props->supports_bam) {
+ nandc->reg_read_dma =
+ dma_map_single(nandc->dev, nandc->reg_read_buf,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
+ dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
+ return -EIO;
+ }
+
+ nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
+ if (IS_ERR(nandc->tx_chan)) {
+ ret = PTR_ERR(nandc->tx_chan);
+ nandc->tx_chan = NULL;
+ dev_err_probe(nandc->dev, ret,
+ "tx DMA channel request failed\n");
+ goto unalloc;
+ }
+
+ nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
+ if (IS_ERR(nandc->rx_chan)) {
+ ret = PTR_ERR(nandc->rx_chan);
+ nandc->rx_chan = NULL;
+ dev_err_probe(nandc->dev, ret,
+ "rx DMA channel request failed\n");
+ goto unalloc;
+ }
+
+ nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
+ if (IS_ERR(nandc->cmd_chan)) {
+ ret = PTR_ERR(nandc->cmd_chan);
+ nandc->cmd_chan = NULL;
+ dev_err_probe(nandc->dev, ret,
+ "cmd DMA channel request failed\n");
+ goto unalloc;
+ }
+
+ /*
+ * Initially allocate BAM transaction to read ONFI param page.
+ * After detecting all the devices, this BAM transaction will
+ * be freed and the next BAM transaction will be allocated with
+ * maximum codeword size
+ */
+ nandc->max_cwperpage = 1;
+ nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
+ if (!nandc->bam_txn) {
+ dev_err(nandc->dev,
+ "failed to allocate bam transaction\n");
+ ret = -ENOMEM;
+ goto unalloc;
+ }
+ } else {
+ nandc->chan = dma_request_chan(nandc->dev, "rxtx");
+ if (IS_ERR(nandc->chan)) {
+ ret = PTR_ERR(nandc->chan);
+ nandc->chan = NULL;
+ dev_err_probe(nandc->dev, ret,
+ "rxtx DMA channel request failed\n");
+ return ret;
+ }
+ }
+
+ INIT_LIST_HEAD(&nandc->desc_list);
+ INIT_LIST_HEAD(&nandc->host_list);
+
+ return 0;
+unalloc:
+ qcom_nandc_unalloc(nandc);
+ return ret;
+}
+EXPORT_SYMBOL(qcom_nandc_alloc);
+
+MODULE_DESCRIPTION("QPIC controller common api");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index d0aaccf72d78..b8035df8f732 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -279,8 +279,8 @@ config MTD_NAND_SH_FLCTL
config MTD_NAND_DAVINCI
tristate "DaVinci/Keystone NAND controller"
- depends on ARCH_DAVINCI || (ARCH_KEYSTONE && TI_AEMIF) || COMPILE_TEST
- depends on HAS_IOMEM
+ depends on COMPILE_TEST || ARCH_DAVINCI || ARCH_KEYSTONE
+ depends on HAS_IOMEM && TI_AEMIF
help
Enable the driver for NAND flash chips on Texas Instruments
DaVinci/Keystone processors.
@@ -454,6 +454,14 @@ config MTD_NAND_TS72XX
help
Enables support for NAND controller on ts72xx SBCs.
+config MTD_NAND_NUVOTON_MA35
+ tristate "Nuvoton MA35 SoC NAND controller"
+ depends on ARCH_MA35 || COMPILE_TEST
+ depends on OF
+ help
+ Enables support for the NAND controller found on
+ the Nuvoton MA35 series SoCs.
+
comment "Misc"
config MTD_SM_COMMON
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index d0b0e6b83568..99e79c448847 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_MTD_NAND_INTEL_LGM) += intel-nand-controller.o
obj-$(CONFIG_MTD_NAND_ROCKCHIP) += rockchip-nand-controller.o
obj-$(CONFIG_MTD_NAND_PL35X) += pl35x-nand-controller.o
obj-$(CONFIG_MTD_NAND_RENESAS) += renesas-nand-controller.o
+obj-$(CONFIG_MTD_NAND_NUVOTON_MA35) += nuvoton-ma35d1-nand-controller.o
nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_onfi.o
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 9c253a511e45..fea5b6119956 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -2342,6 +2342,11 @@ static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
brcmnand_send_cmd(host, CMD_PROGRAM_PAGE);
status = brcmnand_waitfunc(chip);
+ if (status < 0) {
+ ret = status;
+ goto out;
+ }
+
if (status & NAND_STATUS_FAIL) {
dev_info(ctrl->dev, "program failed at %llx\n",
(unsigned long long)addr);
diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c
index 1f8354acfb50..3986553881d0 100644
--- a/drivers/mtd/nand/raw/davinci_nand.c
+++ b/drivers/mtd/nand/raw/davinci_nand.c
@@ -10,9 +10,11 @@
* Dirk Behme <Dirk.Behme@gmail.com>
*/
+#include <linux/clk.h>
#include <linux/err.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/memory/ti-aemif.h>
#include <linux/module.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/rawnand.h>
@@ -43,6 +45,9 @@
#define MASK_ALE 0x08
#define MASK_CLE 0x10
+#define MAX_TSU_PS 3000 /* Input setup time in ps */
+#define MAX_TH_PS 1600 /* Input hold time in ps */
+
struct davinci_nand_pdata {
uint32_t mask_ale;
uint32_t mask_cle;
@@ -66,6 +71,7 @@ struct davinci_nand_pdata {
/* none == NAND_ECC_ENGINE_TYPE_NONE (strongly *not* advised!!)
* soft == NAND_ECC_ENGINE_TYPE_SOFT
+ * on-die == NAND_ECC_ENGINE_TYPE_ON_DIE
* else == NAND_ECC_ENGINE_TYPE_ON_HOST, according to ecc_bits
*
* All DaVinci-family chips support 1-bit hardware ECC.
@@ -117,6 +123,9 @@ struct davinci_nand_info {
uint32_t mask_cle;
uint32_t core_chipsel;
+
+ struct clk *clk;
+ struct aemif_device *aemif;
};
static DEFINE_SPINLOCK(davinci_nand_lock);
@@ -479,6 +488,44 @@ static const struct mtd_ooblayout_ops hwecc4_small_ooblayout_ops = {
.free = hwecc4_ooblayout_small_free,
};
+static int hwecc4_ooblayout_large_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ unsigned int total_ecc_bytes = nand->ecc.ctx.total;
+ int nregions = total_ecc_bytes / 10; /* 10 bytes per chunk */
+
+ if (section >= nregions)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 6;
+ oobregion->length = 10;
+
+ return 0;
+}
+
+static int hwecc4_ooblayout_large_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ unsigned int total_ecc_bytes = nand->ecc.ctx.total;
+ int nregions = total_ecc_bytes / 10; /* 10 bytes per chunk */
+
+ /* First region is used for BBT */
+ if (section >= (nregions - 1))
+ return -ERANGE;
+
+ oobregion->offset = ((section + 1) * 16);
+ oobregion->length = 6;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops hwecc4_large_ooblayout_ops = {
+ .ecc = hwecc4_ooblayout_large_ecc,
+ .free = hwecc4_ooblayout_large_free,
+};
+
#if defined(CONFIG_OF)
static const struct of_device_id davinci_nand_of_match[] = {
{.compatible = "ti,davinci-nand", },
@@ -525,6 +572,8 @@ nand_davinci_get_pdata(struct platform_device *pdev)
pdata->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
if (!strncmp("hw", mode, 2))
pdata->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ if (!strncmp("on-die", mode, 6))
+ pdata->engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
}
if (!device_property_read_u32(&pdev->dev,
"ti,davinci-ecc-bits", &prop))
@@ -580,6 +629,7 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
switch (chip->ecc.engine_type) {
case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
pdata->ecc_bits = 0;
break;
case NAND_ECC_ENGINE_TYPE_SOFT:
@@ -638,9 +688,12 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
mtd_set_ooblayout(mtd,
&hwecc4_small_ooblayout_ops);
} else if (chunks == 4 || chunks == 8) {
- mtd_set_ooblayout(mtd,
- nand_get_large_page_ooblayout());
chip->ecc.read_page = nand_read_page_hwecc_oob_first;
+
+ if (chip->options & NAND_IS_BOOT_MEDIUM)
+ mtd_set_ooblayout(mtd, &hwecc4_large_ooblayout_ops);
+ else
+ mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
} else {
return -EIO;
}
@@ -724,7 +777,7 @@ static int davinci_nand_exec_instr(struct davinci_nand_info *info,
case NAND_OP_WAITRDY_INSTR:
timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
ret = readl_relaxed_poll_timeout(info->base + NANDFSR_OFFSET,
- status, status & BIT(0), 100,
+ status, status & BIT(0), 5,
timeout_us);
if (ret)
return ret;
@@ -764,9 +817,82 @@ static int davinci_nand_exec_op(struct nand_chip *chip,
return 0;
}
+#define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP((ps) / 1000, (period_ns)))
+
+static int davinci_nand_setup_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf)
+{
+ struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
+ const struct nand_sdr_timings *sdr;
+ struct aemif_cs_timings timings;
+ s32 cfg, min, cyc_ns;
+ int ret;
+
+ cyc_ns = 1000000000 / clk_get_rate(info->clk);
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ cfg = TO_CYCLES(sdr->tCLR_min, cyc_ns) - 1;
+ timings.rsetup = cfg > 0 ? cfg : 0;
+
+ cfg = max_t(s32, TO_CYCLES(sdr->tREA_max + MAX_TSU_PS, cyc_ns),
+ TO_CYCLES(sdr->tRP_min, cyc_ns)) - 1;
+ timings.rstrobe = cfg > 0 ? cfg : 0;
+
+ min = TO_CYCLES(sdr->tCEA_max + MAX_TSU_PS, cyc_ns) - 2;
+ while ((s32)(timings.rsetup + timings.rstrobe) < min)
+ timings.rstrobe++;
+
+ cfg = TO_CYCLES((s32)(MAX_TH_PS - sdr->tCHZ_max), cyc_ns) - 1;
+ timings.rhold = cfg > 0 ? cfg : 0;
+
+ min = TO_CYCLES(sdr->tRC_min, cyc_ns) - 3;
+ while ((s32)(timings.rsetup + timings.rstrobe + timings.rhold) < min)
+ timings.rhold++;
+
+ cfg = TO_CYCLES((s32)(sdr->tRHZ_max - (timings.rhold + 1) * cyc_ns * 1000), cyc_ns);
+ cfg = max_t(s32, cfg, TO_CYCLES(sdr->tCHZ_max, cyc_ns)) - 1;
+ timings.ta = cfg > 0 ? cfg : 0;
+
+ cfg = TO_CYCLES(sdr->tWP_min, cyc_ns) - 1;
+ timings.wstrobe = cfg > 0 ? cfg : 0;
+
+ cfg = max_t(s32, TO_CYCLES(sdr->tCLS_min, cyc_ns), TO_CYCLES(sdr->tALS_min, cyc_ns));
+ cfg = max_t(s32, cfg, TO_CYCLES(sdr->tCS_min, cyc_ns)) - 1;
+ timings.wsetup = cfg > 0 ? cfg : 0;
+
+ min = TO_CYCLES(sdr->tDS_min, cyc_ns) - 2;
+ while ((s32)(timings.wsetup + timings.wstrobe) < min)
+ timings.wstrobe++;
+
+ cfg = max_t(s32, TO_CYCLES(sdr->tCLH_min, cyc_ns), TO_CYCLES(sdr->tALH_min, cyc_ns));
+ cfg = max_t(s32, cfg, TO_CYCLES(sdr->tCH_min, cyc_ns));
+ cfg = max_t(s32, cfg, TO_CYCLES(sdr->tDH_min, cyc_ns)) - 1;
+ timings.whold = cfg > 0 ? cfg : 0;
+
+ min = TO_CYCLES(sdr->tWC_min, cyc_ns) - 2;
+ while ((s32)(timings.wsetup + timings.wstrobe + timings.whold) < min)
+ timings.whold++;
+
+ dev_dbg(&info->pdev->dev, "RSETUP %x RSTROBE %x RHOLD %x\n",
+ timings.rsetup, timings.rstrobe, timings.rhold);
+ dev_dbg(&info->pdev->dev, "TA %x\n", timings.ta);
+ dev_dbg(&info->pdev->dev, "WSETUP %x WSTROBE %x WHOLD %x\n",
+ timings.wsetup, timings.wstrobe, timings.whold);
+
+ ret = aemif_check_cs_timings(&timings);
+ if (ret || chipnr == NAND_DATA_IFACE_CHECK_ONLY)
+ return ret;
+
+ return aemif_set_cs_timings(info->aemif, info->core_chipsel, &timings);
+}
+
static const struct nand_controller_ops davinci_nand_controller_ops = {
.attach_chip = davinci_nand_attach_chip,
.exec_op = davinci_nand_exec_op,
+ .setup_interface = davinci_nand_setup_interface,
};
static int nand_davinci_probe(struct platform_device *pdev)
@@ -822,9 +948,14 @@ static int nand_davinci_probe(struct platform_device *pdev)
return -EADDRNOTAVAIL;
}
+ info->clk = devm_clk_get_enabled(&pdev->dev, "aemif");
+ if (IS_ERR(info->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(info->clk), "failed to get clock");
+
info->pdev = pdev;
info->base = base;
info->vaddr = vaddr;
+ info->aemif = dev_get_drvdata(pdev->dev.parent);
mtd = nand_to_mtd(&info->chip);
mtd->dev.parent = &pdev->dev;
diff --git a/drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c b/drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c
new file mode 100644
index 000000000000..c23b537948d5
--- /dev/null
+++ b/drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c
@@ -0,0 +1,1029 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Nuvoton Technology Corp.
+ */
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* NFI Registers */
+#define MA35_NFI_REG_DMACTL 0x400
+#define DMA_EN BIT(0)
+#define DMA_RST BIT(1)
+#define DMA_BUSY BIT(9)
+
+#define MA35_NFI_REG_DMASA 0x408
+#define MA35_NFI_REG_GCTL 0x800
+#define GRST BIT(0)
+#define NAND_EN BIT(3)
+
+#define MA35_NFI_REG_NANDCTL 0x8A0
+#define SWRST BIT(0)
+#define DMA_R_EN BIT(1)
+#define DMA_W_EN BIT(2)
+#define ECC_CHK BIT(7)
+#define PROT3BEN BIT(8)
+#define PSIZE_2K BIT(16)
+#define PSIZE_4K BIT(17)
+#define PSIZE_8K GENMASK(17, 16)
+#define PSIZE_MASK GENMASK(17, 16)
+#define BCH_T24 BIT(18)
+#define BCH_T8 BIT(20)
+#define BCH_T12 BIT(21)
+#define BCH_NONE (0x0)
+#define BCH_MASK GENMASK(22, 18)
+#define ECC_EN BIT(23)
+#define DISABLE_CS0 BIT(25)
+
+#define MA35_NFI_REG_NANDINTEN 0x8A8
+#define MA35_NFI_REG_NANDINTSTS 0x8AC
+#define INT_DMA BIT(0)
+#define INT_ECC BIT(2)
+#define INT_RB0 BIT(10)
+
+#define MA35_NFI_REG_NANDCMD 0x8B0
+#define MA35_NFI_REG_NANDADDR 0x8B4
+#define ENDADDR BIT(31)
+
+#define MA35_NFI_REG_NANDDATA 0x8B8
+#define MA35_NFI_REG_NANDRACTL 0x8BC
+#define MA35_NFI_REG_NANDECTL 0x8C0
+#define ENABLE_WP 0x0
+#define DISABLE_WP BIT(0)
+
+#define MA35_NFI_REG_NANDECCES0 0x8D0
+#define ECC_STATUS_MASK GENMASK(1, 0)
+#define ECC_ERR_CNT_MASK GENMASK(4, 0)
+
+#define MA35_NFI_REG_NANDECCEA0 0x900
+#define MA35_NFI_REG_NANDECCED0 0x960
+#define MA35_NFI_REG_NANDRA0 0xA00
+
+/* Define for the BCH hardware ECC engine */
+/* define the total padding bytes for 512/1024 data segment */
+#define MA35_BCH_PADDING_512 32
+#define MA35_BCH_PADDING_1024 64
+/* define the BCH parity code length for 512 bytes data pattern */
+#define MA35_PARITY_BCH8 15
+#define MA35_PARITY_BCH12 23
+/* define the BCH parity code length for 1024 bytes data pattern */
+#define MA35_PARITY_BCH24 45
+
+#define MA35_MAX_NSELS (2)
+#define PREFIX_RA_IS_EMPTY(reg) FIELD_GET(GENMASK(31, 16), (reg))
+
+struct ma35_nand_chip {
+ struct list_head node;
+ struct nand_chip chip;
+
+ u32 eccstatus;
+ u8 nsels;
+ u8 sels[] __counted_by(nsels);
+};
+
+struct ma35_nand_info {
+ struct nand_controller controller;
+ struct device *dev;
+ void __iomem *regs;
+ int irq;
+ struct clk *clk;
+ struct completion complete;
+ struct list_head chips;
+
+ u8 *buffer;
+ unsigned long assigned_cs;
+};
+
+static inline struct ma35_nand_chip *to_ma35_nand(struct nand_chip *chip)
+{
+ return container_of(chip, struct ma35_nand_chip, chip);
+}
+
+static int ma35_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oob_region)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oob_region->length = chip->ecc.total;
+ oob_region->offset = mtd->oobsize - oob_region->length;
+
+ return 0;
+}
+
+static int ma35_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oob_region)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oob_region->length = mtd->oobsize - chip->ecc.total - 2;
+ oob_region->offset = 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops ma35_ooblayout_ops = {
+ .free = ma35_ooblayout_free,
+ .ecc = ma35_ooblayout_ecc,
+};
+
+static inline void ma35_clear_spare(struct nand_chip *chip, int size)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ int i;
+
+ for (i = 0; i < size / 4; i++)
+ writel(0xff, nand->regs + MA35_NFI_REG_NANDRA0);
+}
+
+static inline void read_remaining_bytes(struct ma35_nand_info *nand, u32 *buf,
+ u32 offset, int size, int swap)
+{
+ u32 value = readl(nand->regs + MA35_NFI_REG_NANDRA0 + offset);
+ u8 *ptr = (u8 *)buf;
+ int i, shift;
+
+ for (i = 0; i < size; i++) {
+ shift = (swap ? 3 - i : i) * 8;
+ ptr[i] = (value >> shift) & 0xff;
+ }
+}
+
+static inline void ma35_read_spare(struct nand_chip *chip, int size, u32 *buf, u32 offset)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ u32 off = round_down(offset, 4);
+ int len = offset % 4;
+ int i;
+
+ if (len) {
+ read_remaining_bytes(nand, buf, off, 4 - len, 1);
+ off += 4;
+ size -= (4 - len);
+ }
+
+ for (i = 0; i < size / 4; i++)
+ *buf++ = readl(nand->regs + MA35_NFI_REG_NANDRA0 + off + (i * 4));
+
+ read_remaining_bytes(nand, buf, off + (size & ~3), size % 4, 0);
+}
+
+static inline void ma35_write_spare(struct nand_chip *chip, int size, u32 *buf)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ u32 value;
+ int i, j;
+ u8 *ptr;
+
+ for (i = 0, j = 0; i < size / 4; i++, j += 4)
+ writel(*buf++, nand->regs + MA35_NFI_REG_NANDRA0 + j);
+
+ ptr = (u8 *)buf;
+ switch (size % 4) {
+ case 1:
+ writel(*ptr, nand->regs + MA35_NFI_REG_NANDRA0 + j);
+ break;
+ case 2:
+ value = *ptr | (*(ptr + 1) << 8);
+ writel(value, nand->regs + MA35_NFI_REG_NANDRA0 + j);
+ break;
+ case 3:
+ value = *ptr | (*(ptr + 1) << 8) | (*(ptr + 2) << 16);
+ writel(value, nand->regs + MA35_NFI_REG_NANDRA0 + j);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ma35_nand_target_enable(struct nand_chip *chip, unsigned int cs)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ u32 reg;
+
+ switch (cs) {
+ case 0:
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg & ~DISABLE_CS0, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDINTSTS);
+ reg |= INT_RB0;
+ writel(reg, nand->regs + MA35_NFI_REG_NANDINTSTS);
+ break;
+ default:
+ break;
+ }
+}
+
+static int ma35_nand_hwecc_init(struct nand_chip *chip, struct ma35_nand_info *nand)
+{
+ struct ma35_nand_chip *nvtnand = to_ma35_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct device *dev = mtd->dev.parent;
+ u32 reg;
+
+ nand->buffer = devm_kzalloc(dev, mtd->writesize, GFP_KERNEL);
+ if (!nand->buffer)
+ return -ENOMEM;
+
+ /* Redundant area size */
+ writel(mtd->oobsize, nand->regs + MA35_NFI_REG_NANDRACTL);
+
+ /* Protect redundant 3 bytes and disable ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ reg |= (PROT3BEN | ECC_CHK);
+ reg &= ~ECC_EN;
+
+ if (chip->ecc.strength != 0) {
+ chip->ecc.steps = mtd->writesize / chip->ecc.size;
+ nvtnand->eccstatus = (chip->ecc.steps < 4) ? 1 : chip->ecc.steps / 4;
+ /* Set BCH algorithm */
+ reg &= ~BCH_MASK;
+ switch (chip->ecc.strength) {
+ case 8:
+ chip->ecc.total = chip->ecc.steps * MA35_PARITY_BCH8;
+ reg |= BCH_T8;
+ break;
+ case 12:
+ chip->ecc.total = chip->ecc.steps * MA35_PARITY_BCH12;
+ reg |= BCH_T12;
+ break;
+ case 24:
+ chip->ecc.total = chip->ecc.steps * MA35_PARITY_BCH24;
+ reg |= BCH_T24;
+ break;
+ default:
+ dev_err(nand->dev, "ECC strength unsupported\n");
+ return -EINVAL;
+ }
+
+ chip->ecc.bytes = chip->ecc.total / chip->ecc.steps;
+ }
+ writel(reg, nand->regs + MA35_NFI_REG_NANDCTL);
+ return 0;
+}
+
+/* Correct data by BCH alrogithm */
+static void ma35_nfi_correct(struct nand_chip *chip, u8 index,
+ u8 err_cnt, u8 *addr)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ u32 temp_data[24], temp_addr[24];
+ u32 padding_len, parity_len;
+ u32 value, offset, remain;
+ u32 err_data[6];
+ u8 i, j;
+
+ /* Configurations */
+ if (chip->ecc.strength <= 8) {
+ parity_len = MA35_PARITY_BCH8;
+ padding_len = MA35_BCH_PADDING_512;
+ } else if (chip->ecc.strength <= 12) {
+ parity_len = MA35_PARITY_BCH12;
+ padding_len = MA35_BCH_PADDING_512;
+ } else if (chip->ecc.strength <= 24) {
+ parity_len = MA35_PARITY_BCH24;
+ padding_len = MA35_BCH_PADDING_1024;
+ } else {
+ dev_err(nand->dev, "Invalid BCH_TSEL = 0x%lx\n",
+ readl(nand->regs + MA35_NFI_REG_NANDCTL) & BCH_MASK);
+ return;
+ }
+
+ /*
+ * got valid BCH_ECC_DATAx and parse them to temp_data[]
+ * got the valid register number of BCH_ECC_DATAx since
+ * one register include 4 error bytes
+ */
+ j = (err_cnt + 3) / 4;
+ j = (j > 6) ? 6 : j;
+ for (i = 0; i < j; i++)
+ err_data[i] = readl(nand->regs + MA35_NFI_REG_NANDECCED0 + i * 4);
+
+ for (i = 0; i < j; i++) {
+ temp_data[i * 4 + 0] = err_data[i] & 0xff;
+ temp_data[i * 4 + 1] = (err_data[i] >> 8) & 0xff;
+ temp_data[i * 4 + 2] = (err_data[i] >> 16) & 0xff;
+ temp_data[i * 4 + 3] = (err_data[i] >> 24) & 0xff;
+ }
+
+ /*
+ * got valid REG_BCH_ECC_ADDRx and parse them to temp_addr[]
+ * got the valid register number of REG_BCH_ECC_ADDRx since
+ * one register include 2 error addresses
+ */
+ j = (err_cnt + 1) / 2;
+ j = (j > 12) ? 12 : j;
+ for (i = 0; i < j; i++) {
+ temp_addr[i * 2 + 0] = readl(nand->regs + MA35_NFI_REG_NANDECCEA0 + i * 4)
+ & 0x07ff;
+ temp_addr[i * 2 + 1] = (readl(nand->regs + MA35_NFI_REG_NANDECCEA0 + i * 4)
+ >> 16) & 0x07ff;
+ }
+
+ /* pointer to begin address of field that with data error */
+ addr += index * chip->ecc.size;
+
+ /* correct each error bytes */
+ for (i = 0; i < err_cnt; i++) {
+ u32 corrected_index = temp_addr[i];
+
+ if (corrected_index < chip->ecc.size) {
+ /* for wrong data in field */
+ *(addr + corrected_index) ^= temp_data[i];
+ } else if (corrected_index < (chip->ecc.size + 3)) {
+ /* for wrong first-3-bytes in redundancy area */
+ corrected_index -= chip->ecc.size;
+ temp_addr[i] += (parity_len * index); /* field offset */
+
+ value = readl(nand->regs + MA35_NFI_REG_NANDRA0);
+ value ^= temp_data[i] << (8 * corrected_index);
+ writel(value, nand->regs + MA35_NFI_REG_NANDRA0);
+ } else {
+ /*
+ * for wrong parity code in redundancy area
+ * ERR_ADDRx = [data in field] + [3 bytes] + [xx] + [parity code]
+ * |<-- padding bytes -->|
+ * The ERR_ADDRx for last parity code always = field size + padding size.
+ * The first parity code = field size + padding size - parity code length.
+ * For example, for BCH T12, the first parity code = 512 + 32 - 23 = 521.
+ * That is, error byte address offset within field is
+ */
+ corrected_index -= (chip->ecc.size + padding_len - parity_len);
+
+ /*
+ * final address = first parity code of first field +
+ * offset of fields +
+ * offset within field
+ */
+ offset = (readl(nand->regs + MA35_NFI_REG_NANDRACTL) & 0x1ff) -
+ (parity_len * chip->ecc.steps) +
+ (parity_len * index) + corrected_index;
+
+ remain = offset % 4;
+ value = readl(nand->regs + MA35_NFI_REG_NANDRA0 + offset - remain);
+ value ^= temp_data[i] << (8 * remain);
+ writel(value, nand->regs + MA35_NFI_REG_NANDRA0 + offset - remain);
+ }
+ }
+}
+
+static int ma35_nfi_ecc_check(struct nand_chip *chip, u8 *addr)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct ma35_nand_chip *nvtnand = to_ma35_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int maxbitflips = 0;
+ int cnt = 0;
+ u32 status;
+ int i, j;
+
+ for (j = 0; j < nvtnand->eccstatus; j++) {
+ status = readl(nand->regs + MA35_NFI_REG_NANDECCES0 + j * 4);
+ if (!status)
+ continue;
+
+ for (i = 0; i < 4; i++) {
+ if ((status & ECC_STATUS_MASK) == 0x01) {
+ /* Correctable error */
+ cnt = (status >> 2) & ECC_ERR_CNT_MASK;
+ ma35_nfi_correct(chip, j * 4 + i, cnt, addr);
+ maxbitflips = max_t(u32, maxbitflips, cnt);
+ mtd->ecc_stats.corrected += cnt;
+ } else {
+ /* Uncorrectable error */
+ mtd->ecc_stats.failed++;
+ dev_err(nand->dev, "uncorrectable error! 0x%4x\n", status);
+ return -EBADMSG;
+ }
+ status >>= 8;
+ }
+ }
+ return maxbitflips;
+}
+
+static void ma35_nand_dmac_init(struct ma35_nand_info *nand)
+{
+ /* DMAC reset and enable */
+ writel(DMA_RST | DMA_EN, nand->regs + MA35_NFI_REG_DMACTL);
+ writel(DMA_EN, nand->regs + MA35_NFI_REG_DMACTL);
+
+ /* Clear DMA finished flag and enable */
+ writel(INT_DMA | INT_ECC, nand->regs + MA35_NFI_REG_NANDINTSTS);
+ writel(INT_DMA, nand->regs + MA35_NFI_REG_NANDINTEN);
+}
+
+static int ma35_nand_do_write(struct nand_chip *chip, const u8 *addr, u32 len)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ dma_addr_t dma_addr;
+ int ret = 0, i;
+ u32 reg;
+
+ if (len != mtd->writesize) {
+ for (i = 0; i < len; i++)
+ writel(addr[i], nand->regs + MA35_NFI_REG_NANDDATA);
+ return 0;
+ }
+
+ ma35_nand_dmac_init(nand);
+
+ /* To mark this page as dirty. */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDRA0);
+ if (reg & 0xffff0000)
+ writel(reg & 0xffff, nand->regs + MA35_NFI_REG_NANDRA0);
+
+ dma_addr = dma_map_single(nand->dev, (void *)addr, len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(nand->dev, dma_addr);
+ if (ret) {
+ dev_err(nand->dev, "dma mapping error\n");
+ return -EINVAL;
+ }
+ dma_sync_single_for_device(nand->dev, dma_addr, len, DMA_TO_DEVICE);
+
+ reinit_completion(&nand->complete);
+ writel(dma_addr, nand->regs + MA35_NFI_REG_DMASA);
+ writel(readl(nand->regs + MA35_NFI_REG_NANDCTL) | DMA_W_EN,
+ nand->regs + MA35_NFI_REG_NANDCTL);
+ ret = wait_for_completion_timeout(&nand->complete, msecs_to_jiffies(1000));
+ if (!ret) {
+ dev_err(nand->dev, "write timeout\n");
+ ret = -ETIMEDOUT;
+ }
+
+ dma_unmap_single(nand->dev, dma_addr, len, DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int ma35_nand_do_read(struct nand_chip *chip, u8 *addr, u32 len)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret = 0, cnt = 0, i;
+ dma_addr_t dma_addr;
+ u32 reg;
+
+ if (len != mtd->writesize) {
+ for (i = 0; i < len; i++)
+ addr[i] = readb(nand->regs + MA35_NFI_REG_NANDDATA);
+ return 0;
+ }
+
+ ma35_nand_dmac_init(nand);
+
+ /* Setup and start DMA using dma_addr */
+ dma_addr = dma_map_single(nand->dev, (void *)addr, len, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(nand->dev, dma_addr);
+ if (ret) {
+ dev_err(nand->dev, "dma mapping error\n");
+ return -EINVAL;
+ }
+
+ reinit_completion(&nand->complete);
+ writel(dma_addr, nand->regs + MA35_NFI_REG_DMASA);
+ writel(readl(nand->regs + MA35_NFI_REG_NANDCTL) | DMA_R_EN,
+ nand->regs + MA35_NFI_REG_NANDCTL);
+ ret = wait_for_completion_timeout(&nand->complete, msecs_to_jiffies(1000));
+ if (!ret) {
+ dev_err(nand->dev, "read timeout\n");
+ ret = -ETIMEDOUT;
+ }
+
+ dma_unmap_single(nand->dev, dma_addr, len, DMA_FROM_DEVICE);
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDINTSTS);
+ if (reg & INT_ECC) {
+ cnt = ma35_nfi_ecc_check(chip, addr);
+ if (cnt < 0) {
+ writel(DMA_RST | DMA_EN, nand->regs + MA35_NFI_REG_DMACTL);
+ writel(readl(nand->regs + MA35_NFI_REG_NANDCTL) | SWRST,
+ nand->regs + MA35_NFI_REG_NANDCTL);
+ }
+ writel(INT_ECC, nand->regs + MA35_NFI_REG_NANDINTSTS);
+ }
+
+ ret = ret < 0 ? ret : cnt;
+ return ret;
+}
+
+static int ma35_nand_format_subpage(struct nand_chip *chip, u32 offset,
+ u32 len, const u8 *buf)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 page_off = round_down(offset, chip->ecc.size);
+ u32 end = DIV_ROUND_UP(page_off + len, chip->ecc.size);
+ u32 start = page_off / chip->ecc.size;
+ u32 reg;
+ int i;
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDRACTL) | 0xffff0000;
+ memset(nand->buffer, 0xff, mtd->writesize);
+ for (i = start; i < end; i++) {
+ memcpy(nand->buffer + i * chip->ecc.size,
+ buf + i * chip->ecc.size, chip->ecc.size);
+ reg &= ~(1 << (i + 16));
+ }
+ writel(reg, nand->regs + MA35_NFI_REG_NANDRACTL);
+
+ return 0;
+}
+
+static int ma35_nand_write_subpage_hwecc(struct nand_chip *chip, u32 offset,
+ u32 data_len, const u8 *buf,
+ int oob_required, int page)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 reg, oobpoi, index;
+ int i;
+
+ /* Enable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg | ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ ma35_nand_target_enable(chip, chip->cur_cs);
+
+ ma35_clear_spare(chip, mtd->oobsize);
+ ma35_write_spare(chip, mtd->oobsize - chip->ecc.total,
+ (u32 *)chip->oob_poi);
+
+ ma35_nand_format_subpage(chip, offset, data_len, buf);
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ ma35_nand_do_write(chip, nand->buffer, mtd->writesize);
+ nand_prog_page_end_op(chip);
+
+ oobpoi = mtd->oobsize - chip->ecc.total;
+ reg = readl(nand->regs + MA35_NFI_REG_NANDRACTL);
+ for (i = 0; i < chip->ecc.steps; i++) {
+ index = i * chip->ecc.bytes;
+ if (!(reg & (1 << (i + 16)))) {
+ ma35_read_spare(chip, chip->ecc.bytes,
+ (u32 *)(chip->oob_poi + oobpoi + index),
+ oobpoi + index);
+ }
+ }
+
+ writel(mtd->oobsize, nand->regs + MA35_NFI_REG_NANDRACTL);
+ /* Disable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg & ~ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ return 0;
+}
+
+static int ma35_nand_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 reg;
+
+ /* Enable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg | ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ ma35_nand_target_enable(chip, chip->cur_cs);
+
+ ma35_clear_spare(chip, mtd->oobsize);
+ ma35_write_spare(chip, mtd->oobsize - chip->ecc.total,
+ (u32 *)chip->oob_poi);
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ ma35_nand_do_write(chip, buf, mtd->writesize);
+ nand_prog_page_end_op(chip);
+
+ ma35_read_spare(chip, chip->ecc.total,
+ (u32 *)(chip->oob_poi + (mtd->oobsize - chip->ecc.total)),
+ mtd->oobsize - chip->ecc.total);
+
+ /* Disable HW ECC engine */
+ writel(reg & ~ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ return 0;
+}
+
+static int ma35_nand_read_subpage_hwecc(struct nand_chip *chip, u32 offset,
+ u32 data_len, u8 *buf, int page)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int bitflips = 0;
+ u32 reg;
+
+ /* Enable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg | ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ ma35_nand_target_enable(chip, chip->cur_cs);
+ nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+ ma35_write_spare(chip, mtd->oobsize, (u32 *)chip->oob_poi);
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDRA0);
+ if (PREFIX_RA_IS_EMPTY(reg)) {
+ memset((void *)buf, 0xff, mtd->writesize);
+ } else {
+ nand_read_page_op(chip, page, offset, NULL, 0);
+ bitflips = ma35_nand_do_read(chip, buf + offset, data_len);
+ ma35_read_spare(chip, mtd->oobsize, (u32 *)chip->oob_poi, 0);
+ }
+
+ /* Disable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg & ~ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ return bitflips;
+}
+
+static int ma35_nand_read_page_hwecc(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int bitflips = 0;
+ u32 reg;
+
+ /* Enable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg | ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ ma35_nand_target_enable(chip, chip->cur_cs);
+ nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+ ma35_write_spare(chip, mtd->oobsize, (u32 *)chip->oob_poi);
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDRA0);
+ if (PREFIX_RA_IS_EMPTY(reg)) {
+ memset((void *)buf, 0xff, mtd->writesize);
+ } else {
+ nand_read_page_op(chip, page, 0, NULL, 0);
+ bitflips = ma35_nand_do_read(chip, buf, mtd->writesize);
+ ma35_read_spare(chip, mtd->oobsize, (u32 *)chip->oob_poi, 0);
+ }
+
+ /* Disable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg & ~ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ return bitflips;
+}
+
+static int ma35_nand_read_oob_hwecc(struct nand_chip *chip, int page)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 reg;
+
+ ma35_nand_target_enable(chip, chip->cur_cs);
+ nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+
+ /* copy OOB data to controller redundant area for page read */
+ ma35_write_spare(chip, mtd->oobsize, (u32 *)chip->oob_poi);
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDRA0);
+ if (PREFIX_RA_IS_EMPTY(reg))
+ memset((void *)chip->oob_poi, 0xff, mtd->oobsize);
+
+ return 0;
+}
+
+static inline void ma35_hw_init(struct ma35_nand_info *nand)
+{
+ u32 reg;
+
+ /* Disable flash wp. */
+ writel(DISABLE_WP, nand->regs + MA35_NFI_REG_NANDECTL);
+
+ /* resets the internal state machine and counters */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ reg |= SWRST;
+ writel(reg, nand->regs + MA35_NFI_REG_NANDCTL);
+}
+
+static irqreturn_t ma35_nand_irq(int irq, void *id)
+{
+ struct ma35_nand_info *nand = (struct ma35_nand_info *)id;
+ u32 isr;
+
+ isr = readl(nand->regs + MA35_NFI_REG_NANDINTSTS);
+ if (isr & INT_DMA) {
+ writel(INT_DMA, nand->regs + MA35_NFI_REG_NANDINTSTS);
+ complete(&nand->complete);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int ma35_nand_attach_chip(struct nand_chip *chip)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct device *dev = mtd->dev.parent;
+ u32 reg;
+
+ if (chip->options & NAND_BUSWIDTH_16) {
+ dev_err(dev, "16 bits bus width not supported");
+ return -EINVAL;
+ }
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL) & (~PSIZE_MASK);
+ switch (mtd->writesize) {
+ case SZ_2K:
+ writel(reg | PSIZE_2K, nand->regs + MA35_NFI_REG_NANDCTL);
+ break;
+ case SZ_4K:
+ writel(reg | PSIZE_4K, nand->regs + MA35_NFI_REG_NANDCTL);
+ break;
+ case SZ_8K:
+ writel(reg | PSIZE_8K, nand->regs + MA35_NFI_REG_NANDCTL);
+ break;
+ default:
+ dev_err(dev, "Unsupported page size");
+ return -EINVAL;
+ }
+
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ /* Do not store BBT bits in the OOB section as it is not protected */
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+ chip->options |= NAND_USES_DMA | NAND_SUBPAGE_READ;
+ chip->ecc.write_subpage = ma35_nand_write_subpage_hwecc;
+ chip->ecc.write_page = ma35_nand_write_page_hwecc;
+ chip->ecc.read_subpage = ma35_nand_read_subpage_hwecc;
+ chip->ecc.read_page = ma35_nand_read_page_hwecc;
+ chip->ecc.read_oob = ma35_nand_read_oob_hwecc;
+ return ma35_nand_hwecc_init(chip, nand);
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ma35_nfc_exec_instr(struct nand_chip *chip,
+ const struct nand_op_instr *instr)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ unsigned int i;
+ int ret = 0;
+ u32 status;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ writel(instr->ctx.cmd.opcode, nand->regs + MA35_NFI_REG_NANDCMD);
+ break;
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ if (i == (instr->ctx.addr.naddrs - 1))
+ writel(instr->ctx.addr.addrs[i] | ENDADDR,
+ nand->regs + MA35_NFI_REG_NANDADDR);
+ else
+ writel(instr->ctx.addr.addrs[i],
+ nand->regs + MA35_NFI_REG_NANDADDR);
+ }
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ ret = ma35_nand_do_read(chip, instr->ctx.data.buf.in, instr->ctx.data.len);
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ ret = ma35_nand_do_write(chip, instr->ctx.data.buf.out, instr->ctx.data.len);
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ return readl_poll_timeout(nand->regs + MA35_NFI_REG_NANDINTSTS, status,
+ status & INT_RB0, 20,
+ instr->ctx.waitrdy.timeout_ms * MSEC_PER_SEC);
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int ma35_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ int ret = 0;
+ u32 i;
+
+ if (check_only)
+ return 0;
+
+ ma35_nand_target_enable(chip, op->cs);
+
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = ma35_nfc_exec_instr(chip, &op->instrs[i]);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static const struct nand_controller_ops ma35_nfc_ops = {
+ .attach_chip = ma35_nand_attach_chip,
+ .exec_op = ma35_nfc_exec_op,
+};
+
+static int ma35_nand_chip_init(struct device *dev, struct ma35_nand_info *nand,
+ struct device_node *np)
+{
+ struct ma35_nand_chip *nvtnand;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ int nsels;
+ int ret;
+ u32 cs;
+ int i;
+
+ nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
+ if (!nsels || nsels > MA35_MAX_NSELS) {
+ dev_err(dev, "invalid reg property size %d\n", nsels);
+ return -EINVAL;
+ }
+
+ nvtnand = devm_kzalloc(dev, struct_size(nvtnand, sels, nsels),
+ GFP_KERNEL);
+ if (!nvtnand)
+ return -ENOMEM;
+
+ nvtnand->nsels = nsels;
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(np, "reg", i, &cs);
+ if (ret) {
+ dev_err(dev, "reg property failure : %d\n", ret);
+ return ret;
+ }
+
+ if (cs >= MA35_MAX_NSELS) {
+ dev_err(dev, "invalid CS: %u\n", cs);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(cs, &nand->assigned_cs)) {
+ dev_err(dev, "CS %u already assigned\n", cs);
+ return -EINVAL;
+ }
+
+ nvtnand->sels[i] = cs;
+ }
+
+ chip = &nvtnand->chip;
+ chip->controller = &nand->controller;
+
+ nand_set_flash_node(chip, np);
+ nand_set_controller_data(chip, nand);
+
+ mtd = nand_to_mtd(chip);
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = dev;
+
+ mtd_set_ooblayout(mtd, &ma35_ooblayout_ops);
+ ret = nand_scan(chip, nsels);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ list_add_tail(&nvtnand->node, &nand->chips);
+
+ return 0;
+}
+
+static void ma35_chips_cleanup(struct ma35_nand_info *nand)
+{
+ struct ma35_nand_chip *nvtnand, *tmp;
+ struct nand_chip *chip;
+ int ret;
+
+ list_for_each_entry_safe(nvtnand, tmp, &nand->chips, node) {
+ chip = &nvtnand->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&nvtnand->node);
+ }
+}
+
+static int ma35_nand_chips_init(struct device *dev, struct ma35_nand_info *nand)
+{
+ struct device_node *np = dev->of_node, *nand_np;
+ int ret;
+
+ for_each_child_of_node(np, nand_np) {
+ ret = ma35_nand_chip_init(dev, nand, nand_np);
+ if (ret) {
+ ma35_chips_cleanup(nand);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int ma35_nand_probe(struct platform_device *pdev)
+{
+ struct ma35_nand_info *nand;
+ int ret = 0;
+
+ nand = devm_kzalloc(&pdev->dev, sizeof(*nand), GFP_KERNEL);
+ if (!nand)
+ return -ENOMEM;
+
+ nand_controller_init(&nand->controller);
+ INIT_LIST_HEAD(&nand->chips);
+ nand->controller.ops = &ma35_nfc_ops;
+
+ init_completion(&nand->complete);
+
+ nand->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(nand->regs))
+ return PTR_ERR(nand->regs);
+
+ nand->dev = &pdev->dev;
+
+ nand->clk = devm_clk_get_enabled(&pdev->dev, "nand_gate");
+ if (IS_ERR(nand->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(nand->clk),
+ "failed to find NAND clock\n");
+
+ nand->irq = platform_get_irq(pdev, 0);
+ if (nand->irq < 0)
+ return dev_err_probe(&pdev->dev, nand->irq,
+ "failed to get platform irq\n");
+
+ ret = devm_request_irq(&pdev->dev, nand->irq, ma35_nand_irq,
+ IRQF_TRIGGER_HIGH, "ma35d1-nand-controller", nand);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request NAND irq\n");
+ return -ENXIO;
+ }
+
+ platform_set_drvdata(pdev, nand);
+
+ writel(GRST | NAND_EN, nand->regs + MA35_NFI_REG_GCTL);
+ ma35_hw_init(nand);
+ ret = ma35_nand_chips_init(&pdev->dev, nand);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init NAND chips\n");
+ clk_disable(nand->clk);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void ma35_nand_remove(struct platform_device *pdev)
+{
+ struct ma35_nand_info *nand = platform_get_drvdata(pdev);
+
+ ma35_chips_cleanup(nand);
+}
+
+static const struct of_device_id ma35_nand_of_match[] = {
+ { .compatible = "nuvoton,ma35d1-nand-controller" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ma35_nand_of_match);
+
+static struct platform_driver ma35_nand_driver = {
+ .driver = {
+ .name = "ma35d1-nand-controller",
+ .of_match_table = ma35_nand_of_match,
+ },
+ .probe = ma35_nand_probe,
+ .remove = ma35_nand_remove,
+};
+
+module_platform_driver(ma35_nand_driver);
+
+MODULE_DESCRIPTION("Nuvoton ma35 NAND driver");
+MODULE_AUTHOR("Hui-Ping Chen <hpchen0nvt@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 636bba2528bf..d2d2aeee42a7 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -15,431 +15,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-
-/* NANDc reg offsets */
-#define NAND_FLASH_CMD 0x00
-#define NAND_ADDR0 0x04
-#define NAND_ADDR1 0x08
-#define NAND_FLASH_CHIP_SELECT 0x0c
-#define NAND_EXEC_CMD 0x10
-#define NAND_FLASH_STATUS 0x14
-#define NAND_BUFFER_STATUS 0x18
-#define NAND_DEV0_CFG0 0x20
-#define NAND_DEV0_CFG1 0x24
-#define NAND_DEV0_ECC_CFG 0x28
-#define NAND_AUTO_STATUS_EN 0x2c
-#define NAND_DEV1_CFG0 0x30
-#define NAND_DEV1_CFG1 0x34
-#define NAND_READ_ID 0x40
-#define NAND_READ_STATUS 0x44
-#define NAND_DEV_CMD0 0xa0
-#define NAND_DEV_CMD1 0xa4
-#define NAND_DEV_CMD2 0xa8
-#define NAND_DEV_CMD_VLD 0xac
-#define SFLASHC_BURST_CFG 0xe0
-#define NAND_ERASED_CW_DETECT_CFG 0xe8
-#define NAND_ERASED_CW_DETECT_STATUS 0xec
-#define NAND_EBI2_ECC_BUF_CFG 0xf0
-#define FLASH_BUF_ACC 0x100
-
-#define NAND_CTRL 0xf00
-#define NAND_VERSION 0xf08
-#define NAND_READ_LOCATION_0 0xf20
-#define NAND_READ_LOCATION_1 0xf24
-#define NAND_READ_LOCATION_2 0xf28
-#define NAND_READ_LOCATION_3 0xf2c
-#define NAND_READ_LOCATION_LAST_CW_0 0xf40
-#define NAND_READ_LOCATION_LAST_CW_1 0xf44
-#define NAND_READ_LOCATION_LAST_CW_2 0xf48
-#define NAND_READ_LOCATION_LAST_CW_3 0xf4c
-
-/* dummy register offsets, used by write_reg_dma */
-#define NAND_DEV_CMD1_RESTORE 0xdead
-#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
-
-/* NAND_FLASH_CMD bits */
-#define PAGE_ACC BIT(4)
-#define LAST_PAGE BIT(5)
-
-/* NAND_FLASH_CHIP_SELECT bits */
-#define NAND_DEV_SEL 0
-#define DM_EN BIT(2)
-
-/* NAND_FLASH_STATUS bits */
-#define FS_OP_ERR BIT(4)
-#define FS_READY_BSY_N BIT(5)
-#define FS_MPU_ERR BIT(8)
-#define FS_DEVICE_STS_ERR BIT(16)
-#define FS_DEVICE_WP BIT(23)
-
-/* NAND_BUFFER_STATUS bits */
-#define BS_UNCORRECTABLE_BIT BIT(8)
-#define BS_CORRECTABLE_ERR_MSK 0x1f
-
-/* NAND_DEVn_CFG0 bits */
-#define DISABLE_STATUS_AFTER_WRITE 4
-#define CW_PER_PAGE 6
-#define UD_SIZE_BYTES 9
-#define UD_SIZE_BYTES_MASK GENMASK(18, 9)
-#define ECC_PARITY_SIZE_BYTES_RS 19
-#define SPARE_SIZE_BYTES 23
-#define SPARE_SIZE_BYTES_MASK GENMASK(26, 23)
-#define NUM_ADDR_CYCLES 27
-#define STATUS_BFR_READ 30
-#define SET_RD_MODE_AFTER_STATUS 31
-
-/* NAND_DEVn_CFG0 bits */
-#define DEV0_CFG1_ECC_DISABLE 0
-#define WIDE_FLASH 1
-#define NAND_RECOVERY_CYCLES 2
-#define CS_ACTIVE_BSY 5
-#define BAD_BLOCK_BYTE_NUM 6
-#define BAD_BLOCK_IN_SPARE_AREA 16
-#define WR_RD_BSY_GAP 17
-#define ENABLE_BCH_ECC 27
-
-/* NAND_DEV0_ECC_CFG bits */
-#define ECC_CFG_ECC_DISABLE 0
-#define ECC_SW_RESET 1
-#define ECC_MODE 4
-#define ECC_PARITY_SIZE_BYTES_BCH 8
-#define ECC_NUM_DATA_BYTES 16
-#define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16)
-#define ECC_FORCE_CLK_OPEN 30
-
-/* NAND_DEV_CMD1 bits */
-#define READ_ADDR 0
-
-/* NAND_DEV_CMD_VLD bits */
-#define READ_START_VLD BIT(0)
-#define READ_STOP_VLD BIT(1)
-#define WRITE_START_VLD BIT(2)
-#define ERASE_START_VLD BIT(3)
-#define SEQ_READ_START_VLD BIT(4)
-
-/* NAND_EBI2_ECC_BUF_CFG bits */
-#define NUM_STEPS 0
-
-/* NAND_ERASED_CW_DETECT_CFG bits */
-#define ERASED_CW_ECC_MASK 1
-#define AUTO_DETECT_RES 0
-#define MASK_ECC BIT(ERASED_CW_ECC_MASK)
-#define RESET_ERASED_DET BIT(AUTO_DETECT_RES)
-#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
-#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
-#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
-
-/* NAND_ERASED_CW_DETECT_STATUS bits */
-#define PAGE_ALL_ERASED BIT(7)
-#define CODEWORD_ALL_ERASED BIT(6)
-#define PAGE_ERASED BIT(5)
-#define CODEWORD_ERASED BIT(4)
-#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
-#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
-
-/* NAND_READ_LOCATION_n bits */
-#define READ_LOCATION_OFFSET 0
-#define READ_LOCATION_SIZE 16
-#define READ_LOCATION_LAST 31
-
-/* Version Mask */
-#define NAND_VERSION_MAJOR_MASK 0xf0000000
-#define NAND_VERSION_MAJOR_SHIFT 28
-#define NAND_VERSION_MINOR_MASK 0x0fff0000
-#define NAND_VERSION_MINOR_SHIFT 16
-
-/* NAND OP_CMDs */
-#define OP_PAGE_READ 0x2
-#define OP_PAGE_READ_WITH_ECC 0x3
-#define OP_PAGE_READ_WITH_ECC_SPARE 0x4
-#define OP_PAGE_READ_ONFI_READ 0x5
-#define OP_PROGRAM_PAGE 0x6
-#define OP_PAGE_PROGRAM_WITH_ECC 0x7
-#define OP_PROGRAM_PAGE_SPARE 0x9
-#define OP_BLOCK_ERASE 0xa
-#define OP_CHECK_STATUS 0xc
-#define OP_FETCH_ID 0xb
-#define OP_RESET_DEVICE 0xd
-
-/* Default Value for NAND_DEV_CMD_VLD */
-#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
- ERASE_START_VLD | SEQ_READ_START_VLD)
-
-/* NAND_CTRL bits */
-#define BAM_MODE_EN BIT(0)
-
-/*
- * the NAND controller performs reads/writes with ECC in 516 byte chunks.
- * the driver calls the chunks 'step' or 'codeword' interchangeably
- */
-#define NANDC_STEP_SIZE 512
-
-/*
- * the largest page size we support is 8K, this will have 16 steps/codewords
- * of 512 bytes each
- */
-#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
-
-/* we read at most 3 registers per codeword scan */
-#define MAX_REG_RD (3 * MAX_NUM_STEPS)
-
-/* ECC modes supported by the controller */
-#define ECC_NONE BIT(0)
-#define ECC_RS_4BIT BIT(1)
-#define ECC_BCH_4BIT BIT(2)
-#define ECC_BCH_8BIT BIT(3)
-
-#define nandc_set_read_loc_first(chip, reg, cw_offset, read_size, is_last_read_loc) \
-nandc_set_reg(chip, reg, \
- ((cw_offset) << READ_LOCATION_OFFSET) | \
- ((read_size) << READ_LOCATION_SIZE) | \
- ((is_last_read_loc) << READ_LOCATION_LAST))
-
-#define nandc_set_read_loc_last(chip, reg, cw_offset, read_size, is_last_read_loc) \
-nandc_set_reg(chip, reg, \
- ((cw_offset) << READ_LOCATION_OFFSET) | \
- ((read_size) << READ_LOCATION_SIZE) | \
- ((is_last_read_loc) << READ_LOCATION_LAST))
-/*
- * Returns the actual register address for all NAND_DEV_ registers
- * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
- */
-#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
-
-/* Returns the NAND register physical address */
-#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
-
-/* Returns the dma address for reg read buffer */
-#define reg_buf_dma_addr(chip, vaddr) \
- ((chip)->reg_read_dma + \
- ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
-
-#define QPIC_PER_CW_CMD_ELEMENTS 32
-#define QPIC_PER_CW_CMD_SGL 32
-#define QPIC_PER_CW_DATA_SGL 8
-
-#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
-
-/*
- * Flags used in DMA descriptor preparation helper functions
- * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
- */
-/* Don't set the EOT in current tx BAM sgl */
-#define NAND_BAM_NO_EOT BIT(0)
-/* Set the NWD flag in current BAM sgl */
-#define NAND_BAM_NWD BIT(1)
-/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
-#define NAND_BAM_NEXT_SGL BIT(2)
-/*
- * Erased codeword status is being used two times in single transfer so this
- * flag will determine the current value of erased codeword status register
- */
-#define NAND_ERASED_CW_SET BIT(4)
-
-#define MAX_ADDRESS_CYCLE 5
-
-/*
- * This data type corresponds to the BAM transaction which will be used for all
- * NAND transfers.
- * @bam_ce - the array of BAM command elements
- * @cmd_sgl - sgl for NAND BAM command pipe
- * @data_sgl - sgl for NAND BAM consumer/producer pipe
- * @last_data_desc - last DMA desc in data channel (tx/rx).
- * @last_cmd_desc - last DMA desc in command channel.
- * @txn_done - completion for NAND transfer.
- * @bam_ce_pos - the index in bam_ce which is available for next sgl
- * @bam_ce_start - the index in bam_ce which marks the start position ce
- * for current sgl. It will be used for size calculation
- * for current sgl
- * @cmd_sgl_pos - current index in command sgl.
- * @cmd_sgl_start - start index in command sgl.
- * @tx_sgl_pos - current index in data sgl for tx.
- * @tx_sgl_start - start index in data sgl for tx.
- * @rx_sgl_pos - current index in data sgl for rx.
- * @rx_sgl_start - start index in data sgl for rx.
- * @wait_second_completion - wait for second DMA desc completion before making
- * the NAND transfer completion.
- */
-struct bam_transaction {
- struct bam_cmd_element *bam_ce;
- struct scatterlist *cmd_sgl;
- struct scatterlist *data_sgl;
- struct dma_async_tx_descriptor *last_data_desc;
- struct dma_async_tx_descriptor *last_cmd_desc;
- struct completion txn_done;
- u32 bam_ce_pos;
- u32 bam_ce_start;
- u32 cmd_sgl_pos;
- u32 cmd_sgl_start;
- u32 tx_sgl_pos;
- u32 tx_sgl_start;
- u32 rx_sgl_pos;
- u32 rx_sgl_start;
- bool wait_second_completion;
-};
-
-/*
- * This data type corresponds to the nand dma descriptor
- * @dma_desc - low level DMA engine descriptor
- * @list - list for desc_info
- *
- * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
- * ADM
- * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
- * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
- * @dir - DMA transfer direction
- */
-struct desc_info {
- struct dma_async_tx_descriptor *dma_desc;
- struct list_head node;
-
- union {
- struct scatterlist adm_sgl;
- struct {
- struct scatterlist *bam_sgl;
- int sgl_cnt;
- };
- };
- enum dma_data_direction dir;
-};
-
-/*
- * holds the current register values that we want to write. acts as a contiguous
- * chunk of memory which we use to write the controller registers through DMA.
- */
-struct nandc_regs {
- __le32 cmd;
- __le32 addr0;
- __le32 addr1;
- __le32 chip_sel;
- __le32 exec;
-
- __le32 cfg0;
- __le32 cfg1;
- __le32 ecc_bch_cfg;
-
- __le32 clrflashstatus;
- __le32 clrreadstatus;
-
- __le32 cmd1;
- __le32 vld;
-
- __le32 orig_cmd1;
- __le32 orig_vld;
-
- __le32 ecc_buf_cfg;
- __le32 read_location0;
- __le32 read_location1;
- __le32 read_location2;
- __le32 read_location3;
- __le32 read_location_last0;
- __le32 read_location_last1;
- __le32 read_location_last2;
- __le32 read_location_last3;
-
- __le32 erased_cw_detect_cfg_clr;
- __le32 erased_cw_detect_cfg_set;
-};
-
-/*
- * NAND controller data struct
- *
- * @dev: parent device
- *
- * @base: MMIO base
- *
- * @core_clk: controller clock
- * @aon_clk: another controller clock
- *
- * @regs: a contiguous chunk of memory for DMA register
- * writes. contains the register values to be
- * written to controller
- *
- * @props: properties of current NAND controller,
- * initialized via DT match data
- *
- * @controller: base controller structure
- * @host_list: list containing all the chips attached to the
- * controller
- *
- * @chan: dma channel
- * @cmd_crci: ADM DMA CRCI for command flow control
- * @data_crci: ADM DMA CRCI for data flow control
- *
- * @desc_list: DMA descriptor list (list of desc_infos)
- *
- * @data_buffer: our local DMA buffer for page read/writes,
- * used when we can't use the buffer provided
- * by upper layers directly
- * @reg_read_buf: local buffer for reading back registers via DMA
- *
- * @base_phys: physical base address of controller registers
- * @base_dma: dma base address of controller registers
- * @reg_read_dma: contains dma address for register read buffer
- *
- * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
- * functions
- * @max_cwperpage: maximum QPIC codewords required. calculated
- * from all connected NAND devices pagesize
- *
- * @reg_read_pos: marker for data read in reg_read_buf
- *
- * @cmd1/vld: some fixed controller register values
- *
- * @exec_opwrite: flag to select correct number of code word
- * while reading status
- */
-struct qcom_nand_controller {
- struct device *dev;
-
- void __iomem *base;
-
- struct clk *core_clk;
- struct clk *aon_clk;
-
- struct nandc_regs *regs;
- struct bam_transaction *bam_txn;
-
- const struct qcom_nandc_props *props;
-
- struct nand_controller controller;
- struct list_head host_list;
-
- union {
- /* will be used only by QPIC for BAM DMA */
- struct {
- struct dma_chan *tx_chan;
- struct dma_chan *rx_chan;
- struct dma_chan *cmd_chan;
- };
-
- /* will be used only by EBI2 for ADM DMA */
- struct {
- struct dma_chan *chan;
- unsigned int cmd_crci;
- unsigned int data_crci;
- };
- };
-
- struct list_head desc_list;
-
- u8 *data_buffer;
- __le32 *reg_read_buf;
-
- phys_addr_t base_phys;
- dma_addr_t base_dma;
- dma_addr_t reg_read_dma;
-
- int buf_size;
- int buf_count;
- int buf_start;
- unsigned int max_cwperpage;
-
- int reg_read_pos;
-
- u32 cmd1, vld;
- bool exec_opwrite;
-};
+#include <linux/mtd/nand-qpic-common.h>
/*
* NAND special boot partitions
@@ -471,9 +47,9 @@ struct qcom_op {
unsigned int data_instr_idx;
unsigned int rdy_timeout_ms;
unsigned int rdy_delay_ns;
- u32 addr1_reg;
- u32 addr2_reg;
- u32 cmd_reg;
+ __le32 addr1_reg;
+ __le32 addr2_reg;
+ __le32 cmd_reg;
u8 flag;
};
@@ -544,243 +120,113 @@ struct qcom_nand_host {
bool bch_enabled;
};
-/*
- * This data type corresponds to the NAND controller properties which varies
- * among different NAND controllers.
- * @ecc_modes - ecc mode for NAND
- * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
- * @is_bam - whether NAND controller is using BAM
- * @is_qpic - whether NAND CTRL is part of qpic IP
- * @qpic_v2 - flag to indicate QPIC IP version 2
- * @use_codeword_fixup - whether NAND has different layout for boot partitions
- */
-struct qcom_nandc_props {
- u32 ecc_modes;
- u32 dev_cmd_reg_start;
- bool is_bam;
- bool is_qpic;
- bool qpic_v2;
- bool use_codeword_fixup;
-};
-
-/* Frees the BAM transaction memory */
-static void free_bam_transaction(struct qcom_nand_controller *nandc)
-{
- struct bam_transaction *bam_txn = nandc->bam_txn;
-
- devm_kfree(nandc->dev, bam_txn);
-}
-
-/* Allocates and Initializes the BAM transaction */
-static struct bam_transaction *
-alloc_bam_transaction(struct qcom_nand_controller *nandc)
-{
- struct bam_transaction *bam_txn;
- size_t bam_txn_size;
- unsigned int num_cw = nandc->max_cwperpage;
- void *bam_txn_buf;
-
- bam_txn_size =
- sizeof(*bam_txn) + num_cw *
- ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
- (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
- (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
-
- bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
- if (!bam_txn_buf)
- return NULL;
-
- bam_txn = bam_txn_buf;
- bam_txn_buf += sizeof(*bam_txn);
-
- bam_txn->bam_ce = bam_txn_buf;
- bam_txn_buf +=
- sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
-
- bam_txn->cmd_sgl = bam_txn_buf;
- bam_txn_buf +=
- sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
-
- bam_txn->data_sgl = bam_txn_buf;
-
- init_completion(&bam_txn->txn_done);
-
- return bam_txn;
-}
-
-/* Clears the BAM transaction indexes */
-static void clear_bam_transaction(struct qcom_nand_controller *nandc)
-{
- struct bam_transaction *bam_txn = nandc->bam_txn;
-
- if (!nandc->props->is_bam)
- return;
-
- bam_txn->bam_ce_pos = 0;
- bam_txn->bam_ce_start = 0;
- bam_txn->cmd_sgl_pos = 0;
- bam_txn->cmd_sgl_start = 0;
- bam_txn->tx_sgl_pos = 0;
- bam_txn->tx_sgl_start = 0;
- bam_txn->rx_sgl_pos = 0;
- bam_txn->rx_sgl_start = 0;
- bam_txn->last_data_desc = NULL;
- bam_txn->wait_second_completion = false;
-
- sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
- QPIC_PER_CW_CMD_SGL);
- sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
- QPIC_PER_CW_DATA_SGL);
-
- reinit_completion(&bam_txn->txn_done);
-}
-
-/* Callback for DMA descriptor completion */
-static void qpic_bam_dma_done(void *data)
-{
- struct bam_transaction *bam_txn = data;
-
- /*
- * In case of data transfer with NAND, 2 callbacks will be generated.
- * One for command channel and another one for data channel.
- * If current transaction has data descriptors
- * (i.e. wait_second_completion is true), then set this to false
- * and wait for second DMA descriptor completion.
- */
- if (bam_txn->wait_second_completion)
- bam_txn->wait_second_completion = false;
- else
- complete(&bam_txn->txn_done);
-}
-
-static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
+static struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
{
return container_of(chip, struct qcom_nand_host, chip);
}
-static inline struct qcom_nand_controller *
+static struct qcom_nand_controller *
get_qcom_nand_controller(struct nand_chip *chip)
{
- return container_of(chip->controller, struct qcom_nand_controller,
- controller);
+ return (struct qcom_nand_controller *)
+ ((u8 *)chip->controller - sizeof(struct qcom_nand_controller));
}
-static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
+static u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
{
return ioread32(nandc->base + offset);
}
-static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
- u32 val)
+static void nandc_write(struct qcom_nand_controller *nandc, int offset,
+ u32 val)
{
iowrite32(val, nandc->base + offset);
}
-static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
- bool is_cpu)
+/* Helper to check whether this is the last CW or not */
+static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
{
- if (!nandc->props->is_bam)
- return;
-
- if (is_cpu)
- dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
- MAX_REG_RD *
- sizeof(*nandc->reg_read_buf),
- DMA_FROM_DEVICE);
- else
- dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
- MAX_REG_RD *
- sizeof(*nandc->reg_read_buf),
- DMA_FROM_DEVICE);
+ return cw == (ecc->steps - 1);
}
-static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
+/**
+ * nandc_set_read_loc_first() - to set read location first register
+ * @chip: NAND Private Flash Chip Data
+ * @reg_base: location register base
+ * @cw_offset: code word offset
+ * @read_size: code word read length
+ * @is_last_read_loc: is this the last read location
+ *
+ * This function will set location register value
+ */
+static void nandc_set_read_loc_first(struct nand_chip *chip,
+ int reg_base, u32 cw_offset,
+ u32 read_size, u32 is_last_read_loc)
{
- switch (offset) {
- case NAND_FLASH_CMD:
- return &regs->cmd;
- case NAND_ADDR0:
- return &regs->addr0;
- case NAND_ADDR1:
- return &regs->addr1;
- case NAND_FLASH_CHIP_SELECT:
- return &regs->chip_sel;
- case NAND_EXEC_CMD:
- return &regs->exec;
- case NAND_FLASH_STATUS:
- return &regs->clrflashstatus;
- case NAND_DEV0_CFG0:
- return &regs->cfg0;
- case NAND_DEV0_CFG1:
- return &regs->cfg1;
- case NAND_DEV0_ECC_CFG:
- return &regs->ecc_bch_cfg;
- case NAND_READ_STATUS:
- return &regs->clrreadstatus;
- case NAND_DEV_CMD1:
- return &regs->cmd1;
- case NAND_DEV_CMD1_RESTORE:
- return &regs->orig_cmd1;
- case NAND_DEV_CMD_VLD:
- return &regs->vld;
- case NAND_DEV_CMD_VLD_RESTORE:
- return &regs->orig_vld;
- case NAND_EBI2_ECC_BUF_CFG:
- return &regs->ecc_buf_cfg;
- case NAND_READ_LOCATION_0:
- return &regs->read_location0;
- case NAND_READ_LOCATION_1:
- return &regs->read_location1;
- case NAND_READ_LOCATION_2:
- return &regs->read_location2;
- case NAND_READ_LOCATION_3:
- return &regs->read_location3;
- case NAND_READ_LOCATION_LAST_CW_0:
- return &regs->read_location_last0;
- case NAND_READ_LOCATION_LAST_CW_1:
- return &regs->read_location_last1;
- case NAND_READ_LOCATION_LAST_CW_2:
- return &regs->read_location_last2;
- case NAND_READ_LOCATION_LAST_CW_3:
- return &regs->read_location_last3;
- default:
- return NULL;
- }
-}
-
-static void nandc_set_reg(struct nand_chip *chip, int offset,
- u32 val)
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ __le32 locreg_val;
+ u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
+ ((read_size) << READ_LOCATION_SIZE) |
+ ((is_last_read_loc) << READ_LOCATION_LAST));
+
+ locreg_val = cpu_to_le32(val);
+
+ if (reg_base == NAND_READ_LOCATION_0)
+ nandc->regs->read_location0 = locreg_val;
+ else if (reg_base == NAND_READ_LOCATION_1)
+ nandc->regs->read_location1 = locreg_val;
+ else if (reg_base == NAND_READ_LOCATION_2)
+ nandc->regs->read_location2 = locreg_val;
+ else if (reg_base == NAND_READ_LOCATION_3)
+ nandc->regs->read_location3 = locreg_val;
+}
+
+/**
+ * nandc_set_read_loc_last - to set read location last register
+ * @chip: NAND Private Flash Chip Data
+ * @reg_base: location register base
+ * @cw_offset: code word offset
+ * @read_size: code word read length
+ * @is_last_read_loc: is this the last read location
+ *
+ * This function will set location last register value
+ */
+static void nandc_set_read_loc_last(struct nand_chip *chip,
+ int reg_base, u32 cw_offset,
+ u32 read_size, u32 is_last_read_loc)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- struct nandc_regs *regs = nandc->regs;
- __le32 *reg;
+ __le32 locreg_val;
+ u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
+ ((read_size) << READ_LOCATION_SIZE) |
+ ((is_last_read_loc) << READ_LOCATION_LAST));
- reg = offset_to_nandc_reg(regs, offset);
+ locreg_val = cpu_to_le32(val);
- if (reg)
- *reg = cpu_to_le32(val);
-}
-
-/* Helper to check the code word, whether it is last cw or not */
-static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
-{
- return cw == (ecc->steps - 1);
+ if (reg_base == NAND_READ_LOCATION_LAST_CW_0)
+ nandc->regs->read_location_last0 = locreg_val;
+ else if (reg_base == NAND_READ_LOCATION_LAST_CW_1)
+ nandc->regs->read_location_last1 = locreg_val;
+ else if (reg_base == NAND_READ_LOCATION_LAST_CW_2)
+ nandc->regs->read_location_last2 = locreg_val;
+ else if (reg_base == NAND_READ_LOCATION_LAST_CW_3)
+ nandc->regs->read_location_last3 = locreg_val;
}
/* helper to configure location register values */
static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg,
- int cw_offset, int read_size, int is_last_read_loc)
+ u32 cw_offset, u32 read_size, u32 is_last_read_loc)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int reg_base = NAND_READ_LOCATION_0;
- if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
+ if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
reg_base = NAND_READ_LOCATION_LAST_CW_0;
reg_base += reg * 4;
- if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
+ if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
return nandc_set_read_loc_last(chip, reg_base, cw_offset,
read_size, is_last_read_loc);
else
@@ -792,12 +238,13 @@ static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg,
static void set_address(struct qcom_nand_host *host, u16 column, int page)
{
struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
if (chip->options & NAND_BUSWIDTH_16)
column >>= 1;
- nandc_set_reg(chip, NAND_ADDR0, page << 16 | column);
- nandc_set_reg(chip, NAND_ADDR1, page >> 16 & 0xff);
+ nandc->regs->addr0 = cpu_to_le32(page << 16 | column);
+ nandc->regs->addr1 = cpu_to_le32(page >> 16 & 0xff);
}
/*
@@ -811,41 +258,43 @@ static void set_address(struct qcom_nand_host *host, u16 column, int page)
static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, int cw)
{
struct nand_chip *chip = &host->chip;
- u32 cmd, cfg0, cfg1, ecc_bch_cfg;
+ __le32 cmd, cfg0, cfg1, ecc_bch_cfg;
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
if (read) {
if (host->use_ecc)
- cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
+ cmd = cpu_to_le32(OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE);
else
- cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
+ cmd = cpu_to_le32(OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
} else {
- cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
+ cmd = cpu_to_le32(OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE);
}
if (host->use_ecc) {
- cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
- (num_cw - 1) << CW_PER_PAGE;
+ cfg0 = cpu_to_le32((host->cfg0 & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE);
- cfg1 = host->cfg1;
- ecc_bch_cfg = host->ecc_bch_cfg;
+ cfg1 = cpu_to_le32(host->cfg1);
+ ecc_bch_cfg = cpu_to_le32(host->ecc_bch_cfg);
} else {
- cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
- (num_cw - 1) << CW_PER_PAGE;
+ cfg0 = cpu_to_le32((host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE);
- cfg1 = host->cfg1_raw;
- ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+ cfg1 = cpu_to_le32(host->cfg1_raw);
+ ecc_bch_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
}
- nandc_set_reg(chip, NAND_FLASH_CMD, cmd);
- nandc_set_reg(chip, NAND_DEV0_CFG0, cfg0);
- nandc_set_reg(chip, NAND_DEV0_CFG1, cfg1);
- nandc_set_reg(chip, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
- if (!nandc->props->qpic_v2)
- nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
- nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
- nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+ nandc->regs->cmd = cmd;
+ nandc->regs->cfg0 = cfg0;
+ nandc->regs->cfg1 = cfg1;
+ nandc->regs->ecc_bch_cfg = ecc_bch_cfg;
+
+ if (!nandc->props->qpic_version2)
+ nandc->regs->ecc_buf_cfg = cpu_to_le32(host->ecc_buf_cfg);
+
+ nandc->regs->clrflashstatus = cpu_to_le32(host->clrflashstatus);
+ nandc->regs->clrreadstatus = cpu_to_le32(host->clrreadstatus);
+ nandc->regs->exec = cpu_to_le32(1);
if (read)
nandc_set_read_loc(chip, cw, 0, 0, host->use_ecc ?
@@ -853,366 +302,6 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, i
}
/*
- * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
- * for BAM. This descriptor will be added in the NAND DMA descriptor queue
- * which will be submitted to DMA engine.
- */
-static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
- struct dma_chan *chan,
- unsigned long flags)
-{
- struct desc_info *desc;
- struct scatterlist *sgl;
- unsigned int sgl_cnt;
- int ret;
- struct bam_transaction *bam_txn = nandc->bam_txn;
- enum dma_transfer_direction dir_eng;
- struct dma_async_tx_descriptor *dma_desc;
-
- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- if (chan == nandc->cmd_chan) {
- sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
- sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
- bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
- dir_eng = DMA_MEM_TO_DEV;
- desc->dir = DMA_TO_DEVICE;
- } else if (chan == nandc->tx_chan) {
- sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
- sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
- bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
- dir_eng = DMA_MEM_TO_DEV;
- desc->dir = DMA_TO_DEVICE;
- } else {
- sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
- sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
- bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
- dir_eng = DMA_DEV_TO_MEM;
- desc->dir = DMA_FROM_DEVICE;
- }
-
- sg_mark_end(sgl + sgl_cnt - 1);
- ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
- if (ret == 0) {
- dev_err(nandc->dev, "failure in mapping desc\n");
- kfree(desc);
- return -ENOMEM;
- }
-
- desc->sgl_cnt = sgl_cnt;
- desc->bam_sgl = sgl;
-
- dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
- flags);
-
- if (!dma_desc) {
- dev_err(nandc->dev, "failure in prep desc\n");
- dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
- kfree(desc);
- return -EINVAL;
- }
-
- desc->dma_desc = dma_desc;
-
- /* update last data/command descriptor */
- if (chan == nandc->cmd_chan)
- bam_txn->last_cmd_desc = dma_desc;
- else
- bam_txn->last_data_desc = dma_desc;
-
- list_add_tail(&desc->node, &nandc->desc_list);
-
- return 0;
-}
-
-/*
- * Prepares the command descriptor for BAM DMA which will be used for NAND
- * register reads and writes. The command descriptor requires the command
- * to be formed in command element type so this function uses the command
- * element from bam transaction ce array and fills the same with required
- * data. A single SGL can contain multiple command elements so
- * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
- * after the current command element.
- */
-static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
- int reg_off, const void *vaddr,
- int size, unsigned int flags)
-{
- int bam_ce_size;
- int i, ret;
- struct bam_cmd_element *bam_ce_buffer;
- struct bam_transaction *bam_txn = nandc->bam_txn;
-
- bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
-
- /* fill the command desc */
- for (i = 0; i < size; i++) {
- if (read)
- bam_prep_ce(&bam_ce_buffer[i],
- nandc_reg_phys(nandc, reg_off + 4 * i),
- BAM_READ_COMMAND,
- reg_buf_dma_addr(nandc,
- (__le32 *)vaddr + i));
- else
- bam_prep_ce_le32(&bam_ce_buffer[i],
- nandc_reg_phys(nandc, reg_off + 4 * i),
- BAM_WRITE_COMMAND,
- *((__le32 *)vaddr + i));
- }
-
- bam_txn->bam_ce_pos += size;
-
- /* use the separate sgl after this command */
- if (flags & NAND_BAM_NEXT_SGL) {
- bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
- bam_ce_size = (bam_txn->bam_ce_pos -
- bam_txn->bam_ce_start) *
- sizeof(struct bam_cmd_element);
- sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
- bam_ce_buffer, bam_ce_size);
- bam_txn->cmd_sgl_pos++;
- bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
-
- if (flags & NAND_BAM_NWD) {
- ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
- DMA_PREP_FENCE |
- DMA_PREP_CMD);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
-/*
- * Prepares the data descriptor for BAM DMA which will be used for NAND
- * data reads and writes.
- */
-static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
- const void *vaddr,
- int size, unsigned int flags)
-{
- int ret;
- struct bam_transaction *bam_txn = nandc->bam_txn;
-
- if (read) {
- sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
- vaddr, size);
- bam_txn->rx_sgl_pos++;
- } else {
- sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
- vaddr, size);
- bam_txn->tx_sgl_pos++;
-
- /*
- * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
- * is not set, form the DMA descriptor
- */
- if (!(flags & NAND_BAM_NO_EOT)) {
- ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
- DMA_PREP_INTERRUPT);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
-static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
- int reg_off, const void *vaddr, int size,
- bool flow_control)
-{
- struct desc_info *desc;
- struct dma_async_tx_descriptor *dma_desc;
- struct scatterlist *sgl;
- struct dma_slave_config slave_conf;
- struct qcom_adm_peripheral_config periph_conf = {};
- enum dma_transfer_direction dir_eng;
- int ret;
-
- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- sgl = &desc->adm_sgl;
-
- sg_init_one(sgl, vaddr, size);
-
- if (read) {
- dir_eng = DMA_DEV_TO_MEM;
- desc->dir = DMA_FROM_DEVICE;
- } else {
- dir_eng = DMA_MEM_TO_DEV;
- desc->dir = DMA_TO_DEVICE;
- }
-
- ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
- if (ret == 0) {
- ret = -ENOMEM;
- goto err;
- }
-
- memset(&slave_conf, 0x00, sizeof(slave_conf));
-
- slave_conf.device_fc = flow_control;
- if (read) {
- slave_conf.src_maxburst = 16;
- slave_conf.src_addr = nandc->base_dma + reg_off;
- if (nandc->data_crci) {
- periph_conf.crci = nandc->data_crci;
- slave_conf.peripheral_config = &periph_conf;
- slave_conf.peripheral_size = sizeof(periph_conf);
- }
- } else {
- slave_conf.dst_maxburst = 16;
- slave_conf.dst_addr = nandc->base_dma + reg_off;
- if (nandc->cmd_crci) {
- periph_conf.crci = nandc->cmd_crci;
- slave_conf.peripheral_config = &periph_conf;
- slave_conf.peripheral_size = sizeof(periph_conf);
- }
- }
-
- ret = dmaengine_slave_config(nandc->chan, &slave_conf);
- if (ret) {
- dev_err(nandc->dev, "failed to configure dma channel\n");
- goto err;
- }
-
- dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
- if (!dma_desc) {
- dev_err(nandc->dev, "failed to prepare desc\n");
- ret = -EINVAL;
- goto err;
- }
-
- desc->dma_desc = dma_desc;
-
- list_add_tail(&desc->node, &nandc->desc_list);
-
- return 0;
-err:
- kfree(desc);
-
- return ret;
-}
-
-/*
- * read_reg_dma: prepares a descriptor to read a given number of
- * contiguous registers to the reg_read_buf pointer
- *
- * @first: offset of the first register in the contiguous block
- * @num_regs: number of registers to read
- * @flags: flags to control DMA descriptor preparation
- */
-static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
- int num_regs, unsigned int flags)
-{
- bool flow_control = false;
- void *vaddr;
-
- vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
- nandc->reg_read_pos += num_regs;
-
- if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
- first = dev_cmd_reg_addr(nandc, first);
-
- if (nandc->props->is_bam)
- return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
- num_regs, flags);
-
- if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
- flow_control = true;
-
- return prep_adm_dma_desc(nandc, true, first, vaddr,
- num_regs * sizeof(u32), flow_control);
-}
-
-/*
- * write_reg_dma: prepares a descriptor to write a given number of
- * contiguous registers
- *
- * @first: offset of the first register in the contiguous block
- * @num_regs: number of registers to write
- * @flags: flags to control DMA descriptor preparation
- */
-static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
- int num_regs, unsigned int flags)
-{
- bool flow_control = false;
- struct nandc_regs *regs = nandc->regs;
- void *vaddr;
-
- vaddr = offset_to_nandc_reg(regs, first);
-
- if (first == NAND_ERASED_CW_DETECT_CFG) {
- if (flags & NAND_ERASED_CW_SET)
- vaddr = &regs->erased_cw_detect_cfg_set;
- else
- vaddr = &regs->erased_cw_detect_cfg_clr;
- }
-
- if (first == NAND_EXEC_CMD)
- flags |= NAND_BAM_NWD;
-
- if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
- first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
-
- if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
- first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
-
- if (nandc->props->is_bam)
- return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
- num_regs, flags);
-
- if (first == NAND_FLASH_CMD)
- flow_control = true;
-
- return prep_adm_dma_desc(nandc, false, first, vaddr,
- num_regs * sizeof(u32), flow_control);
-}
-
-/*
- * read_data_dma: prepares a DMA descriptor to transfer data from the
- * controller's internal buffer to the buffer 'vaddr'
- *
- * @reg_off: offset within the controller's data buffer
- * @vaddr: virtual address of the buffer we want to write to
- * @size: DMA transaction size in bytes
- * @flags: flags to control DMA descriptor preparation
- */
-static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
- const u8 *vaddr, int size, unsigned int flags)
-{
- if (nandc->props->is_bam)
- return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
-
- return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
-}
-
-/*
- * write_data_dma: prepares a DMA descriptor to transfer data from
- * 'vaddr' to the controller's internal buffer
- *
- * @reg_off: offset within the controller's data buffer
- * @vaddr: virtual address of the buffer we want to read from
- * @size: DMA transaction size in bytes
- * @flags: flags to control DMA descriptor preparation
- */
-static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
- const u8 *vaddr, int size, unsigned int flags)
-{
- if (nandc->props->is_bam)
- return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
-
- return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
-}
-
-/*
* Helper to prepare DMA descriptors for configuring registers
* before reading a NAND page.
*/
@@ -1220,13 +309,14 @@ static void config_nand_page_read(struct nand_chip *chip)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- write_reg_dma(nandc, NAND_ADDR0, 2, 0);
- write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
- if (!nandc->props->qpic_v2)
- write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
- write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
- write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
- NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ if (!nandc->props->qpic_version2)
+ qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
+ NAND_ERASED_CW_DETECT_CFG, 1, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
+ NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
}
/*
@@ -1239,23 +329,23 @@ config_nand_cw_read(struct nand_chip *chip, bool use_ecc, int cw)
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- int reg = NAND_READ_LOCATION_0;
+ __le32 *reg = &nandc->regs->read_location0;
- if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
- reg = NAND_READ_LOCATION_LAST_CW_0;
+ if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
+ reg = &nandc->regs->read_location_last0;
- if (nandc->props->is_bam)
- write_reg_dma(nandc, reg, 4, NAND_BAM_NEXT_SGL);
+ if (nandc->props->supports_bam)
+ qcom_write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
if (use_ecc) {
- read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
- read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
- NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
+ qcom_read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
+ NAND_BAM_NEXT_SGL);
} else {
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
}
}
@@ -1279,11 +369,11 @@ static void config_nand_page_write(struct nand_chip *chip)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- write_reg_dma(nandc, NAND_ADDR0, 2, 0);
- write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
- if (!nandc->props->qpic_v2)
- write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
- NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ if (!nandc->props->qpic_version2)
+ qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
+ NAND_BAM_NEXT_SGL);
}
/*
@@ -1294,95 +384,14 @@ static void config_nand_cw_write(struct nand_chip *chip)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
- write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
-}
-
-/* helpers to submit/free our list of dma descriptors */
-static int submit_descs(struct qcom_nand_controller *nandc)
-{
- struct desc_info *desc, *n;
- dma_cookie_t cookie = 0;
- struct bam_transaction *bam_txn = nandc->bam_txn;
- int ret = 0;
-
- if (nandc->props->is_bam) {
- if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
- ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
- if (ret)
- goto err_unmap_free_desc;
- }
-
- if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
- ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
- DMA_PREP_INTERRUPT);
- if (ret)
- goto err_unmap_free_desc;
- }
-
- if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
- ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
- DMA_PREP_CMD);
- if (ret)
- goto err_unmap_free_desc;
- }
- }
-
- list_for_each_entry(desc, &nandc->desc_list, node)
- cookie = dmaengine_submit(desc->dma_desc);
-
- if (nandc->props->is_bam) {
- bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
- bam_txn->last_cmd_desc->callback_param = bam_txn;
- if (bam_txn->last_data_desc) {
- bam_txn->last_data_desc->callback = qpic_bam_dma_done;
- bam_txn->last_data_desc->callback_param = bam_txn;
- bam_txn->wait_second_completion = true;
- }
-
- dma_async_issue_pending(nandc->tx_chan);
- dma_async_issue_pending(nandc->rx_chan);
- dma_async_issue_pending(nandc->cmd_chan);
-
- if (!wait_for_completion_timeout(&bam_txn->txn_done,
- QPIC_NAND_COMPLETION_TIMEOUT))
- ret = -ETIMEDOUT;
- } else {
- if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
- ret = -ETIMEDOUT;
- }
-
-err_unmap_free_desc:
- /*
- * Unmap the dma sg_list and free the desc allocated by both
- * prepare_bam_async_desc() and prep_adm_dma_desc() functions.
- */
- list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
- list_del(&desc->node);
-
- if (nandc->props->is_bam)
- dma_unmap_sg(nandc->dev, desc->bam_sgl,
- desc->sgl_cnt, desc->dir);
- else
- dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
- desc->dir);
+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
- kfree(desc);
- }
-
- return ret;
-}
-
-/* reset the register read buffer for next NAND operation */
-static void clear_read_regs(struct qcom_nand_controller *nandc)
-{
- nandc->reg_read_pos = 0;
- nandc_read_buffer_sync(nandc, false);
+ qcom_write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1,
+ NAND_BAM_NEXT_SGL);
}
/*
@@ -1446,7 +455,7 @@ static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
int i;
- nandc_read_buffer_sync(nandc, true);
+ qcom_nandc_dev_to_mem(nandc, true);
for (i = 0; i < cw_cnt; i++) {
u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
@@ -1473,13 +482,13 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
nand_read_page_op(chip, page, 0, NULL, 0);
nandc->buf_count = 0;
nandc->buf_start = 0;
- clear_read_regs(nandc);
+ qcom_clear_read_regs(nandc);
host->use_ecc = false;
- if (nandc->props->qpic_v2)
+ if (nandc->props->qpic_version2)
raw_cw = ecc->steps - 1;
- clear_bam_transaction(nandc);
+ qcom_clear_bam_transaction(nandc);
set_address(host, host->cw_size * cw, page);
update_rw_regs(host, 1, true, raw_cw);
config_nand_page_read(chip);
@@ -1497,7 +506,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
}
- if (nandc->props->is_bam) {
+ if (nandc->props->supports_bam) {
nandc_set_read_loc(chip, cw, 0, read_loc, data_size1, 0);
read_loc += data_size1;
@@ -1512,18 +521,18 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
config_nand_cw_read(chip, false, raw_cw);
- read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
+ qcom_read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
reg_off += data_size1;
- read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
+ qcom_read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
reg_off += oob_size1;
- read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
+ qcom_read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
reg_off += data_size2;
- read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
+ qcom_read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
return ret;
@@ -1621,7 +630,7 @@ static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
buf = (struct read_stats *)nandc->reg_read_buf;
- nandc_read_buffer_sync(nandc, true);
+ qcom_nandc_dev_to_mem(nandc, true);
for (i = 0; i < ecc->steps; i++, buf++) {
u32 flash, buffer, erased_cw;
@@ -1734,7 +743,7 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
oob_size = host->ecc_bytes_hw + host->spare_bytes;
}
- if (nandc->props->is_bam) {
+ if (nandc->props->supports_bam) {
if (data_buf && oob_buf) {
nandc_set_read_loc(chip, i, 0, 0, data_size, 0);
nandc_set_read_loc(chip, i, 1, data_size,
@@ -1750,8 +759,8 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
config_nand_cw_read(chip, true, i);
if (data_buf)
- read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
- data_size, 0);
+ qcom_read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
+ data_size, 0);
/*
* when ecc is enabled, the controller doesn't read the real
@@ -1766,8 +775,8 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
for (j = 0; j < host->bbm_size; j++)
*oob_buf++ = 0xff;
- read_data_dma(nandc, FLASH_BUF_ACC + data_size,
- oob_buf, oob_size, 0);
+ qcom_read_data_dma(nandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
}
if (data_buf)
@@ -1776,7 +785,7 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
oob_buf += oob_size;
}
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to read page/oob\n");
return ret;
@@ -1797,7 +806,7 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
int size;
int ret;
- clear_read_regs(nandc);
+ qcom_clear_read_regs(nandc);
size = host->use_ecc ? host->cw_data : host->cw_size;
@@ -1809,9 +818,9 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
config_nand_single_cw_page_read(chip, host->use_ecc, ecc->steps - 1);
- read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
+ qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret)
dev_err(nandc->dev, "failed to copy last codeword\n");
@@ -1897,14 +906,14 @@ static int qcom_nandc_read_page(struct nand_chip *chip, u8 *buf,
nandc->buf_count = 0;
nandc->buf_start = 0;
host->use_ecc = true;
- clear_read_regs(nandc);
+ qcom_clear_read_regs(nandc);
set_address(host, 0, page);
update_rw_regs(host, ecc->steps, true, 0);
data_buf = buf;
oob_buf = oob_required ? chip->oob_poi : NULL;
- clear_bam_transaction(nandc);
+ qcom_clear_bam_transaction(nandc);
return read_page_ecc(host, data_buf, oob_buf, page);
}
@@ -1945,8 +954,8 @@ static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
if (host->nr_boot_partitions)
qcom_nandc_codeword_fixup(host, page);
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
host->use_ecc = true;
set_address(host, 0, page);
@@ -1973,8 +982,8 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
set_address(host, 0, page);
nandc->buf_count = 0;
nandc->buf_start = 0;
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
data_buf = (u8 *)buf;
oob_buf = chip->oob_poi;
@@ -1995,8 +1004,8 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
oob_size = ecc->bytes;
}
- write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
- i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
+ qcom_write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
+ i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
/*
* when ECC is enabled, we don't really need to write anything
@@ -2008,8 +1017,8 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
if (qcom_nandc_is_last_cw(ecc, i)) {
oob_buf += host->bbm_size;
- write_data_dma(nandc, FLASH_BUF_ACC + data_size,
- oob_buf, oob_size, 0);
+ qcom_write_data_dma(nandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
}
config_nand_cw_write(chip);
@@ -2018,7 +1027,7 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
oob_buf += oob_size;
}
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to write page\n");
return ret;
@@ -2043,8 +1052,8 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip,
qcom_nandc_codeword_fixup(host, page);
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
data_buf = (u8 *)buf;
oob_buf = chip->oob_poi;
@@ -2070,28 +1079,28 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip,
oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
}
- write_data_dma(nandc, reg_off, data_buf, data_size1,
- NAND_BAM_NO_EOT);
+ qcom_write_data_dma(nandc, reg_off, data_buf, data_size1,
+ NAND_BAM_NO_EOT);
reg_off += data_size1;
data_buf += data_size1;
- write_data_dma(nandc, reg_off, oob_buf, oob_size1,
- NAND_BAM_NO_EOT);
+ qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size1,
+ NAND_BAM_NO_EOT);
reg_off += oob_size1;
oob_buf += oob_size1;
- write_data_dma(nandc, reg_off, data_buf, data_size2,
- NAND_BAM_NO_EOT);
+ qcom_write_data_dma(nandc, reg_off, data_buf, data_size2,
+ NAND_BAM_NO_EOT);
reg_off += data_size2;
data_buf += data_size2;
- write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
+ qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
oob_buf += oob_size2;
config_nand_cw_write(chip);
}
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to write raw page\n");
return ret;
@@ -2121,7 +1130,7 @@ static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
qcom_nandc_codeword_fixup(host, page);
host->use_ecc = true;
- clear_bam_transaction(nandc);
+ qcom_clear_bam_transaction(nandc);
/* calculate the data and oob size for the last codeword/step */
data_size = ecc->size - ((ecc->steps - 1) << 2);
@@ -2136,11 +1145,11 @@ static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
update_rw_regs(host, 1, false, 0);
config_nand_page_write(chip);
- write_data_dma(nandc, FLASH_BUF_ACC,
- nandc->data_buffer, data_size + oob_size, 0);
+ qcom_write_data_dma(nandc, FLASH_BUF_ACC,
+ nandc->data_buffer, data_size + oob_size, 0);
config_nand_cw_write(chip);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to write oob\n");
return ret;
@@ -2167,7 +1176,7 @@ static int qcom_nandc_block_bad(struct nand_chip *chip, loff_t ofs)
*/
host->use_ecc = false;
- clear_bam_transaction(nandc);
+ qcom_clear_bam_transaction(nandc);
ret = copy_last_cw(host, page);
if (ret)
goto err;
@@ -2194,8 +1203,8 @@ static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
struct nand_ecc_ctrl *ecc = &chip->ecc;
int page, ret;
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
/*
* to mark the BBM as bad, we flash the entire last codeword with 0s.
@@ -2212,11 +1221,11 @@ static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
update_rw_regs(host, 1, false, ecc->steps - 1);
config_nand_page_write(chip);
- write_data_dma(nandc, FLASH_BUF_ACC,
- nandc->data_buffer, host->cw_size, 0);
+ qcom_write_data_dma(nandc, FLASH_BUF_ACC,
+ nandc->data_buffer, host->cw_size, 0);
config_nand_cw_write(chip);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to update BBM\n");
return ret;
@@ -2455,15 +1464,15 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
/* Free the initially allocated BAM transaction for reading the ONFI params */
- if (nandc->props->is_bam)
- free_bam_transaction(nandc);
+ if (nandc->props->supports_bam)
+ qcom_free_bam_transaction(nandc);
nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
cwperpage);
/* Now allocate the BAM transaction based on updated max_cwperpage */
- if (nandc->props->is_bam) {
- nandc->bam_txn = alloc_bam_transaction(nandc);
+ if (nandc->props->supports_bam) {
+ nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
if (!nandc->bam_txn) {
dev_err(nandc->dev,
"failed to allocate bam transaction\n");
@@ -2485,44 +1494,43 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
host->cw_size = host->cw_data + ecc->bytes;
bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
- host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
- | host->cw_data << UD_SIZE_BYTES
- | 0 << DISABLE_STATUS_AFTER_WRITE
- | 5 << NUM_ADDR_CYCLES
- | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
- | 0 << STATUS_BFR_READ
- | 1 << SET_RD_MODE_AFTER_STATUS
- | host->spare_bytes << SPARE_SIZE_BYTES;
-
- host->cfg1 = 7 << NAND_RECOVERY_CYCLES
- | 0 << CS_ACTIVE_BSY
- | bad_block_byte << BAD_BLOCK_BYTE_NUM
- | 0 << BAD_BLOCK_IN_SPARE_AREA
- | 2 << WR_RD_BSY_GAP
- | wide_bus << WIDE_FLASH
- | host->bch_enabled << ENABLE_BCH_ECC;
-
- host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
- | host->cw_size << UD_SIZE_BYTES
- | 5 << NUM_ADDR_CYCLES
- | 0 << SPARE_SIZE_BYTES;
-
- host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
- | 0 << CS_ACTIVE_BSY
- | 17 << BAD_BLOCK_BYTE_NUM
- | 1 << BAD_BLOCK_IN_SPARE_AREA
- | 2 << WR_RD_BSY_GAP
- | wide_bus << WIDE_FLASH
- | 1 << DEV0_CFG1_ECC_DISABLE;
-
- host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
- | 0 << ECC_SW_RESET
- | host->cw_data << ECC_NUM_DATA_BYTES
- | 1 << ECC_FORCE_CLK_OPEN
- | ecc_mode << ECC_MODE
- | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
-
- if (!nandc->props->qpic_v2)
+ host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_data) |
+ FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 0) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
+ FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, host->ecc_bytes_hw) |
+ FIELD_PREP(STATUS_BFR_READ, 0) |
+ FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, host->spare_bytes);
+
+ host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
+ FIELD_PREP(WIDE_FLASH, wide_bus) |
+ FIELD_PREP(ENABLE_BCH_ECC, host->bch_enabled);
+
+ host->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_size) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
+
+ host->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
+ FIELD_PREP(CS_ACTIVE_BSY, 0) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
+ FIELD_PREP(WIDE_FLASH, wide_bus) |
+ FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
+
+ host->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !host->bch_enabled) |
+ FIELD_PREP(ECC_SW_RESET, 0) |
+ FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, host->cw_data) |
+ FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) |
+ FIELD_PREP(ECC_MODE_MASK, ecc_mode) |
+ FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, host->ecc_bytes_hw);
+
+ if (!nandc->props->qpic_version2)
host->ecc_buf_cfg = 0x203 << NUM_STEPS;
host->clrflashstatus = FS_READY_BSY_N;
@@ -2556,7 +1564,7 @@ static int qcom_op_cmd_mapping(struct nand_chip *chip, u8 opcode,
cmd = OP_FETCH_ID;
break;
case NAND_CMD_PARAM:
- if (nandc->props->qpic_v2)
+ if (nandc->props->qpic_version2)
cmd = OP_PAGE_READ_ONFI_READ;
else
cmd = OP_PAGE_READ;
@@ -2609,7 +1617,7 @@ static int qcom_parse_instructions(struct nand_chip *chip,
if (ret < 0)
return ret;
- q_op->cmd_reg = ret;
+ q_op->cmd_reg = cpu_to_le32(ret);
q_op->rdy_delay_ns = instr->delay_ns;
break;
@@ -2619,10 +1627,10 @@ static int qcom_parse_instructions(struct nand_chip *chip,
addrs = &instr->ctx.addr.addrs[offset];
for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
- q_op->addr1_reg |= addrs[i] << (i * 8);
+ q_op->addr1_reg |= cpu_to_le32(addrs[i] << (i * 8));
if (naddrs > 4)
- q_op->addr2_reg |= addrs[4];
+ q_op->addr2_reg |= cpu_to_le32(addrs[4]);
q_op->rdy_delay_ns = instr->delay_ns;
break;
@@ -2663,7 +1671,7 @@ static int qcom_wait_rdy_poll(struct nand_chip *chip, unsigned int time_ms)
unsigned long start = jiffies + msecs_to_jiffies(time_ms);
u32 flash;
- nandc_read_buffer_sync(nandc, true);
+ qcom_nandc_dev_to_mem(nandc, true);
do {
flash = le32_to_cpu(nandc->reg_read_buf[0]);
@@ -2703,23 +1711,23 @@ static int qcom_read_status_exec(struct nand_chip *chip,
nandc->buf_start = 0;
host->use_ecc = false;
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
- nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+ nandc->regs->cmd = q_op.cmd_reg;
+ nandc->regs->exec = cpu_to_le32(1);
- write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure in submitting status descriptor\n");
goto err_out;
}
- nandc_read_buffer_sync(nandc, true);
+ qcom_nandc_dev_to_mem(nandc, true);
for (i = 0; i < num_cw; i++) {
flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
@@ -2760,23 +1768,21 @@ static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subo
nandc->buf_start = 0;
host->use_ecc = false;
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
- nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
- nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
- nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
- nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT,
- nandc->props->is_bam ? 0 : DM_EN);
+ nandc->regs->cmd = q_op.cmd_reg;
+ nandc->regs->addr0 = q_op.addr1_reg;
+ nandc->regs->addr1 = q_op.addr2_reg;
+ nandc->regs->chip_sel = cpu_to_le32(nandc->props->supports_bam ? 0 : DM_EN);
+ nandc->regs->exec = cpu_to_le32(1);
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
- read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
-
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure in submitting read id descriptor\n");
goto err_out;
@@ -2786,7 +1792,7 @@ static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subo
op_id = q_op.data_instr_idx;
len = nand_subop_get_data_len(subop, op_id);
- nandc_read_buffer_sync(nandc, true);
+ qcom_nandc_dev_to_mem(nandc, true);
memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
err_out:
@@ -2807,15 +1813,14 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
if (q_op.flag == OP_PROGRAM_PAGE) {
goto wait_rdy;
- } else if (q_op.cmd_reg == OP_BLOCK_ERASE) {
- q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
- nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
- nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
- nandc_set_reg(chip, NAND_DEV0_CFG0,
- host->cfg0_raw & ~(7 << CW_PER_PAGE));
- nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
+ } else if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE)) {
+ q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
+ nandc->regs->addr0 = q_op.addr1_reg;
+ nandc->regs->addr1 = q_op.addr2_reg;
+ nandc->regs->cfg0 = cpu_to_le32(host->cfg0_raw & ~(7 << CW_PER_PAGE));
+ nandc->regs->cfg1 = cpu_to_le32(host->cfg1_raw);
instrs = 3;
- } else if (q_op.cmd_reg != OP_RESET_DEVICE) {
+ } else if (q_op.cmd_reg != cpu_to_le32(OP_RESET_DEVICE)) {
return 0;
}
@@ -2823,20 +1828,20 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
nandc->buf_start = 0;
host->use_ecc = false;
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
- nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+ nandc->regs->cmd = q_op.cmd_reg;
+ nandc->regs->exec = cpu_to_le32(1);
- write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
- if (q_op.cmd_reg == OP_BLOCK_ERASE)
- write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
+ if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE))
+ qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure in submitting misc descriptor\n");
goto err_out;
@@ -2864,46 +1869,46 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
if (ret)
return ret;
- q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
+ q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
nandc->buf_count = 0;
nandc->buf_start = 0;
host->use_ecc = false;
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
-
- nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
-
- nandc_set_reg(chip, NAND_ADDR0, 0);
- nandc_set_reg(chip, NAND_ADDR1, 0);
- nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
- | 512 << UD_SIZE_BYTES
- | 5 << NUM_ADDR_CYCLES
- | 0 << SPARE_SIZE_BYTES);
- nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
- | 0 << CS_ACTIVE_BSY
- | 17 << BAD_BLOCK_BYTE_NUM
- | 1 << BAD_BLOCK_IN_SPARE_AREA
- | 2 << WR_RD_BSY_GAP
- | 0 << WIDE_FLASH
- | 1 << DEV0_CFG1_ECC_DISABLE);
- if (!nandc->props->qpic_v2)
- nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
+
+ nandc->regs->cmd = q_op.cmd_reg;
+ nandc->regs->addr0 = 0;
+ nandc->regs->addr1 = 0;
+
+ host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, 0) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, 512) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
+
+ host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
+ FIELD_PREP(CS_ACTIVE_BSY, 0) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
+ FIELD_PREP(WIDE_FLASH, 0) |
+ FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
+
+ if (!nandc->props->qpic_version2)
+ nandc->regs->ecc_buf_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
/* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
- if (!nandc->props->qpic_v2) {
- nandc_set_reg(chip, NAND_DEV_CMD_VLD,
- (nandc->vld & ~READ_START_VLD));
- nandc_set_reg(chip, NAND_DEV_CMD1,
- (nandc->cmd1 & ~(0xFF << READ_ADDR))
- | NAND_CMD_PARAM << READ_ADDR);
+ if (!nandc->props->qpic_version2) {
+ nandc->regs->vld = cpu_to_le32((nandc->vld & ~READ_START_VLD));
+ nandc->regs->cmd1 = cpu_to_le32((nandc->cmd1 & ~(0xFF << READ_ADDR))
+ | NAND_CMD_PARAM << READ_ADDR);
}
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+ nandc->regs->exec = cpu_to_le32(1);
- if (!nandc->props->qpic_v2) {
- nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
- nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
+ if (!nandc->props->qpic_version2) {
+ nandc->regs->orig_cmd1 = cpu_to_le32(nandc->cmd1);
+ nandc->regs->orig_vld = cpu_to_le32(nandc->vld);
}
instr = q_op.data_instr;
@@ -2912,9 +1917,9 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
nandc_set_read_loc(chip, 0, 0, 0, len, 1);
- if (!nandc->props->qpic_v2) {
- write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
- write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
+ if (!nandc->props->qpic_version2) {
+ qcom_write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
}
nandc->buf_count = len;
@@ -2922,16 +1927,17 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
config_nand_single_cw_page_read(chip, false, 0);
- read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
- nandc->buf_count, 0);
+ qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
+ nandc->buf_count, 0);
/* restore CMD1 and VLD regs */
- if (!nandc->props->qpic_v2) {
- write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
- write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
+ if (!nandc->props->qpic_version2) {
+ qcom_write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
+ NAND_BAM_NEXT_SGL);
}
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure in submitting param page descriptor\n");
goto err_out;
@@ -3015,151 +2021,24 @@ static const struct nand_controller_ops qcom_nandc_ops = {
.exec_op = qcom_nand_exec_op,
};
-static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
-{
- if (nandc->props->is_bam) {
- if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
- dma_unmap_single(nandc->dev, nandc->reg_read_dma,
- MAX_REG_RD *
- sizeof(*nandc->reg_read_buf),
- DMA_FROM_DEVICE);
-
- if (nandc->tx_chan)
- dma_release_channel(nandc->tx_chan);
-
- if (nandc->rx_chan)
- dma_release_channel(nandc->rx_chan);
-
- if (nandc->cmd_chan)
- dma_release_channel(nandc->cmd_chan);
- } else {
- if (nandc->chan)
- dma_release_channel(nandc->chan);
- }
-}
-
-static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
-{
- int ret;
-
- ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
- if (ret) {
- dev_err(nandc->dev, "failed to set DMA mask\n");
- return ret;
- }
-
- /*
- * we use the internal buffer for reading ONFI params, reading small
- * data like ID and status, and preforming read-copy-write operations
- * when writing to a codeword partially. 532 is the maximum possible
- * size of a codeword for our nand controller
- */
- nandc->buf_size = 532;
-
- nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
- if (!nandc->data_buffer)
- return -ENOMEM;
-
- nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
- if (!nandc->regs)
- return -ENOMEM;
-
- nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
- sizeof(*nandc->reg_read_buf),
- GFP_KERNEL);
- if (!nandc->reg_read_buf)
- return -ENOMEM;
-
- if (nandc->props->is_bam) {
- nandc->reg_read_dma =
- dma_map_single(nandc->dev, nandc->reg_read_buf,
- MAX_REG_RD *
- sizeof(*nandc->reg_read_buf),
- DMA_FROM_DEVICE);
- if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
- dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
- return -EIO;
- }
-
- nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
- if (IS_ERR(nandc->tx_chan)) {
- ret = PTR_ERR(nandc->tx_chan);
- nandc->tx_chan = NULL;
- dev_err_probe(nandc->dev, ret,
- "tx DMA channel request failed\n");
- goto unalloc;
- }
-
- nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
- if (IS_ERR(nandc->rx_chan)) {
- ret = PTR_ERR(nandc->rx_chan);
- nandc->rx_chan = NULL;
- dev_err_probe(nandc->dev, ret,
- "rx DMA channel request failed\n");
- goto unalloc;
- }
-
- nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
- if (IS_ERR(nandc->cmd_chan)) {
- ret = PTR_ERR(nandc->cmd_chan);
- nandc->cmd_chan = NULL;
- dev_err_probe(nandc->dev, ret,
- "cmd DMA channel request failed\n");
- goto unalloc;
- }
-
- /*
- * Initially allocate BAM transaction to read ONFI param page.
- * After detecting all the devices, this BAM transaction will
- * be freed and the next BAM transaction will be allocated with
- * maximum codeword size
- */
- nandc->max_cwperpage = 1;
- nandc->bam_txn = alloc_bam_transaction(nandc);
- if (!nandc->bam_txn) {
- dev_err(nandc->dev,
- "failed to allocate bam transaction\n");
- ret = -ENOMEM;
- goto unalloc;
- }
- } else {
- nandc->chan = dma_request_chan(nandc->dev, "rxtx");
- if (IS_ERR(nandc->chan)) {
- ret = PTR_ERR(nandc->chan);
- nandc->chan = NULL;
- dev_err_probe(nandc->dev, ret,
- "rxtx DMA channel request failed\n");
- return ret;
- }
- }
-
- INIT_LIST_HEAD(&nandc->desc_list);
- INIT_LIST_HEAD(&nandc->host_list);
-
- nand_controller_init(&nandc->controller);
- nandc->controller.ops = &qcom_nandc_ops;
-
- return 0;
-unalloc:
- qcom_nandc_unalloc(nandc);
- return ret;
-}
-
/* one time setup of a few nand controller registers */
static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
{
u32 nand_ctrl;
+ nand_controller_init(nandc->controller);
+ nandc->controller->ops = &qcom_nandc_ops;
+
/* kill onenand */
- if (!nandc->props->is_qpic)
+ if (!nandc->props->nandc_part_of_qpic)
nandc_write(nandc, SFLASHC_BURST_CFG, 0);
- if (!nandc->props->qpic_v2)
+ if (!nandc->props->qpic_version2)
nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
NAND_DEV_CMD_VLD_VAL);
/* enable ADM or BAM DMA */
- if (nandc->props->is_bam) {
+ if (nandc->props->supports_bam) {
nand_ctrl = nandc_read(nandc, NAND_CTRL);
/*
@@ -3176,7 +2055,7 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
}
/* save the original values of these registers */
- if (!nandc->props->qpic_v2) {
+ if (!nandc->props->qpic_version2) {
nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
nandc->vld = NAND_DEV_CMD_VLD_VAL;
}
@@ -3288,7 +2167,7 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
chip->legacy.block_bad = qcom_nandc_block_bad;
chip->legacy.block_markbad = qcom_nandc_block_markbad;
- chip->controller = &nandc->controller;
+ chip->controller = nandc->controller;
chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
NAND_SKIP_BBTSCAN;
@@ -3349,7 +2228,7 @@ static int qcom_nandc_parse_dt(struct platform_device *pdev)
struct device_node *np = nandc->dev->of_node;
int ret;
- if (!nandc->props->is_bam) {
+ if (!nandc->props->supports_bam) {
ret = of_property_read_u32(np, "qcom,cmd-crci",
&nandc->cmd_crci);
if (ret) {
@@ -3371,17 +2250,21 @@ static int qcom_nandc_parse_dt(struct platform_device *pdev)
static int qcom_nandc_probe(struct platform_device *pdev)
{
struct qcom_nand_controller *nandc;
+ struct nand_controller *controller;
const void *dev_data;
struct device *dev = &pdev->dev;
struct resource *res;
int ret;
- nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
+ nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc) + sizeof(*controller),
+ GFP_KERNEL);
if (!nandc)
return -ENOMEM;
+ controller = (struct nand_controller *)&nandc[1];
platform_set_drvdata(pdev, nandc);
nandc->dev = dev;
+ nandc->controller = controller;
dev_data = of_device_get_match_data(dev);
if (!dev_data) {
@@ -3474,30 +2357,30 @@ static void qcom_nandc_remove(struct platform_device *pdev)
static const struct qcom_nandc_props ipq806x_nandc_props = {
.ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
- .is_bam = false,
+ .supports_bam = false,
.use_codeword_fixup = true,
.dev_cmd_reg_start = 0x0,
};
static const struct qcom_nandc_props ipq4019_nandc_props = {
.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
- .is_bam = true,
- .is_qpic = true,
+ .supports_bam = true,
+ .nandc_part_of_qpic = true,
.dev_cmd_reg_start = 0x0,
};
static const struct qcom_nandc_props ipq8074_nandc_props = {
.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
- .is_bam = true,
- .is_qpic = true,
+ .supports_bam = true,
+ .nandc_part_of_qpic = true,
.dev_cmd_reg_start = 0x7000,
};
static const struct qcom_nandc_props sdx55_nandc_props = {
.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
- .is_bam = true,
- .is_qpic = true,
- .qpic_v2 = true,
+ .supports_bam = true,
+ .nandc_part_of_qpic = true,
+ .qpic_version2 = true,
.dev_cmd_reg_start = 0x7000,
};
diff --git a/drivers/mtd/nand/spi/Makefile b/drivers/mtd/nand/spi/Makefile
index 19cc77288ebb..1e61ab21893a 100644
--- a/drivers/mtd/nand/spi/Makefile
+++ b/drivers/mtd/nand/spi/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
spinand-objs := core.o alliancememory.o ato.o esmt.o foresee.o gigadevice.o macronix.o
-spinand-objs += micron.o paragon.o toshiba.o winbond.o xtx.o
+spinand-objs += micron.o paragon.o skyhigh.o toshiba.o winbond.o xtx.o
obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
diff --git a/drivers/mtd/nand/spi/alliancememory.c b/drivers/mtd/nand/spi/alliancememory.c
index 7936ea546b03..6046c73f8424 100644
--- a/drivers/mtd/nand/spi/alliancememory.c
+++ b/drivers/mtd/nand/spi/alliancememory.c
@@ -21,8 +21,8 @@ static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/ato.c b/drivers/mtd/nand/spi/ato.c
index 82b377c06812..bb5298911137 100644
--- a/drivers/mtd/nand/spi/ato.c
+++ b/drivers/mtd/nand/spi/ato.c
@@ -15,8 +15,8 @@
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 94f33c8be031..da4713692674 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -294,6 +294,9 @@ static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
struct spinand_device *spinand = nand_to_spinand(nand);
bool enable = (req->mode != MTD_OPS_RAW);
+ if (!enable && spinand->flags & SPINAND_NO_RAW_ACCESS)
+ return -EOPNOTSUPP;
+
memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand));
/* Only enable or disable the engine */
@@ -901,9 +904,17 @@ static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
.oobbuf.in = marker,
.mode = MTD_OPS_RAW,
};
+ int ret;
spinand_select_target(spinand, pos->target);
- spinand_read_page(spinand, &req);
+
+ ret = spinand_read_page(spinand, &req);
+ if (ret == -EOPNOTSUPP) {
+ /* Retry with ECC in case raw access is not supported */
+ req.mode = MTD_OPS_PLACE_OOB;
+ spinand_read_page(spinand, &req);
+ }
+
if (marker[0] != 0xff || marker[1] != 0xff)
return true;
@@ -942,11 +953,14 @@ static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
if (ret)
return ret;
- ret = spinand_write_enable_op(spinand);
- if (ret)
- return ret;
+ ret = spinand_write_page(spinand, &req);
+ if (ret == -EOPNOTSUPP) {
+ /* Retry with ECC in case raw access is not supported */
+ req.mode = MTD_OPS_PLACE_OOB;
+ ret = spinand_write_page(spinand, &req);
+ }
- return spinand_write_page(spinand, &req);
+ return ret;
}
static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
@@ -1117,6 +1131,7 @@ static const struct spinand_manufacturer *spinand_manufacturers[] = {
&macronix_spinand_manufacturer,
&micron_spinand_manufacturer,
&paragon_spinand_manufacturer,
+ &skyhigh_spinand_manufacturer,
&toshiba_spinand_manufacturer,
&winbond_spinand_manufacturer,
&xtx_spinand_manufacturer,
@@ -1198,10 +1213,13 @@ spinand_select_op_variant(struct spinand_device *spinand,
const struct spinand_op_variants *variants)
{
struct nand_device *nand = spinand_to_nand(spinand);
+ const struct spi_mem_op *best_variant = NULL;
+ u64 best_op_duration_ns = ULLONG_MAX;
unsigned int i;
for (i = 0; i < variants->nops; i++) {
struct spi_mem_op op = variants->ops[i];
+ u64 op_duration_ns = 0;
unsigned int nbytes;
int ret;
@@ -1220,13 +1238,17 @@ spinand_select_op_variant(struct spinand_device *spinand,
break;
nbytes -= op.data.nbytes;
+
+ op_duration_ns += spi_mem_calc_op_duration(&op);
}
- if (!nbytes)
- return &variants->ops[i];
+ if (!nbytes && op_duration_ns < best_op_duration_ns) {
+ best_op_duration_ns = op_duration_ns;
+ best_variant = &variants->ops[i];
+ }
}
- return NULL;
+ return best_variant;
}
/**
diff --git a/drivers/mtd/nand/spi/esmt.c b/drivers/mtd/nand/spi/esmt.c
index 4597a82de23a..323a20901fc9 100644
--- a/drivers/mtd/nand/spi/esmt.c
+++ b/drivers/mtd/nand/spi/esmt.c
@@ -15,8 +15,8 @@
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/foresee.c b/drivers/mtd/nand/spi/foresee.c
index e0d2d9257045..ecd5f6bffa33 100644
--- a/drivers/mtd/nand/spi/foresee.c
+++ b/drivers/mtd/nand/spi/foresee.c
@@ -14,8 +14,8 @@
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
@@ -81,6 +81,16 @@ static const struct spinand_info foresee_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&f35sqa002g_ooblayout,
f35sqa002g_ecc_get_status)),
+ SPINAND_INFO("F35SQA001G",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x71, 0x71),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&f35sqa002g_ooblayout,
+ f35sqa002g_ecc_get_status)),
};
static const struct spinand_manufacturer_ops foresee_spinand_manuf_ops = {
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index 6023cba748bb..d620bb02a20a 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -28,32 +28,32 @@ static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_f,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP_3A(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP_3A(false, 0, 0, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP_3A(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP_3A(0, 0, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_1gq5,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_2gq5,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 4, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index d277c3220fdc..3dc4d63d6832 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -28,8 +28,8 @@ struct macronix_priv {
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
index 12601bc4227a..ad0bb9755a09 100644
--- a/drivers/mtd/nand/spi/micron.c
+++ b/drivers/mtd/nand/spi/micron.c
@@ -33,8 +33,8 @@ static SPINAND_OP_VARIANTS(quadio_read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(x4_write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
@@ -48,8 +48,8 @@ static SPINAND_OP_VARIANTS(x4_update_cache_variants,
static SPINAND_OP_VARIANTS(x4_read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(x1_write_cache_variants,
SPINAND_PROG_LOAD(true, 0, NULL, 0));
diff --git a/drivers/mtd/nand/spi/paragon.c b/drivers/mtd/nand/spi/paragon.c
index 519ade513c1f..6e7cc6995380 100644
--- a/drivers/mtd/nand/spi/paragon.c
+++ b/drivers/mtd/nand/spi/paragon.c
@@ -26,8 +26,8 @@ static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/skyhigh.c b/drivers/mtd/nand/spi/skyhigh.c
new file mode 100644
index 000000000000..961df0d74984
--- /dev/null
+++ b/drivers/mtd/nand/spi/skyhigh.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 SkyHigh Memory Limited
+ *
+ * Author: Takahiro Kuwano <takahiro.kuwano@infineon.com>
+ * Co-Author: KR Kim <kr.kim@skyhighmemory.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_SKYHIGH 0x01
+#define SKYHIGH_STATUS_ECC_1TO2_BITFLIPS (1 << 4)
+#define SKYHIGH_STATUS_ECC_3TO6_BITFLIPS (2 << 4)
+#define SKYHIGH_STATUS_ECC_UNCOR_ERROR (3 << 4)
+#define SKYHIGH_CONFIG_PROTECT_EN BIT(1)
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 4, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int skyhigh_spinand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ /* ECC bytes are stored in hidden area. */
+ return -ERANGE;
+}
+
+static int skyhigh_spinand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* ECC bytes are stored in hidden area. Reserve 2 bytes for the BBM. */
+ region->offset = 2;
+ region->length = mtd->oobsize - 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops skyhigh_spinand_ooblayout = {
+ .ecc = skyhigh_spinand_ooblayout_ecc,
+ .free = skyhigh_spinand_ooblayout_free,
+};
+
+static int skyhigh_spinand_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case SKYHIGH_STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ case SKYHIGH_STATUS_ECC_1TO2_BITFLIPS:
+ return 2;
+
+ case SKYHIGH_STATUS_ECC_3TO6_BITFLIPS:
+ return 6;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct spinand_info skyhigh_spinand_table[] = {
+ SPINAND_INFO("S35ML01G301",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x15),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(6, 32),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_NO_RAW_ACCESS,
+ SPINAND_ECCINFO(&skyhigh_spinand_ooblayout,
+ skyhigh_spinand_ecc_get_status)),
+ SPINAND_INFO("S35ML01G300",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(6, 32),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_NO_RAW_ACCESS,
+ SPINAND_ECCINFO(&skyhigh_spinand_ooblayout,
+ skyhigh_spinand_ecc_get_status)),
+ SPINAND_INFO("S35ML02G300",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
+ NAND_ECCREQ(6, 32),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_NO_RAW_ACCESS,
+ SPINAND_ECCINFO(&skyhigh_spinand_ooblayout,
+ skyhigh_spinand_ecc_get_status)),
+ SPINAND_INFO("S35ML04G300",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35),
+ NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 2, 1, 1),
+ NAND_ECCREQ(6, 32),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_NO_RAW_ACCESS,
+ SPINAND_ECCINFO(&skyhigh_spinand_ooblayout,
+ skyhigh_spinand_ecc_get_status)),
+};
+
+static int skyhigh_spinand_init(struct spinand_device *spinand)
+{
+ /*
+ * Config_Protect_En (bit 1 in Block Lock register) must be set to 1
+ * before writing other bits. Do it here before core unlocks all blocks
+ * by writing block protection bits.
+ */
+ return spinand_write_reg_op(spinand, REG_BLOCK_LOCK,
+ SKYHIGH_CONFIG_PROTECT_EN);
+}
+
+static const struct spinand_manufacturer_ops skyhigh_spinand_manuf_ops = {
+ .init = skyhigh_spinand_init,
+};
+
+const struct spinand_manufacturer skyhigh_spinand_manufacturer = {
+ .id = SPINAND_MFR_SKYHIGH,
+ .name = "SkyHigh",
+ .chips = skyhigh_spinand_table,
+ .nchips = ARRAY_SIZE(skyhigh_spinand_table),
+ .ops = &skyhigh_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
index bbbcaa87c0bc..2e2106b2705f 100644
--- a/drivers/mtd/nand/spi/toshiba.c
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -17,8 +17,8 @@
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_x4_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
index 7180e615ac97..8394a1b1fb0c 100644
--- a/drivers/mtd/nand/spi/winbond.c
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
+#include <linux/units.h>
#define SPINAND_MFR_WINBOND 0xEF
@@ -17,13 +18,31 @@
#define W25N04KV_STATUS_ECC_5_8_BITFLIPS (3 << 4)
+/*
+ * "X2" in the core is equivalent to "dual output" in the datasheets,
+ * "X4" in the core is equivalent to "quad output" in the datasheets.
+ */
+
+static SPINAND_OP_VARIANTS(read_cache_dtr_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_DTR_OP(0, 8, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_DTR_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_DTR_OP(0, 4, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_DTR_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DTR_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0, 54 * HZ_PER_MHZ));
+
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
@@ -194,7 +213,7 @@ static const struct spinand_info winbond_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbc, 0x21),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ SPINAND_INFO_OP_VARIANTS(&read_cache_dtr_variants,
&write_cache_variants,
&update_cache_variants),
0,
@@ -223,7 +242,7 @@ static const struct spinand_info winbond_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbf, 0x22),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 2, 1),
NAND_ECCREQ(1, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ SPINAND_INFO_OP_VARIANTS(&read_cache_dtr_variants,
&write_cache_variants,
&update_cache_variants),
0,
diff --git a/drivers/mtd/nand/spi/xtx.c b/drivers/mtd/nand/spi/xtx.c
index 66a4255bdf06..3f539ca0de86 100644
--- a/drivers/mtd/nand/spi/xtx.c
+++ b/drivers/mtd/nand/spi/xtx.c
@@ -27,8 +27,8 @@ static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
diff --git a/drivers/mtd/spi-nor/atmel.c b/drivers/mtd/spi-nor/atmel.c
index 45d1153a04a0..82c592f0a1e1 100644
--- a/drivers/mtd/spi-nor/atmel.c
+++ b/drivers/mtd/spi-nor/atmel.c
@@ -238,6 +238,10 @@ static const struct flash_info atmel_nor_parts[] = {
.flags = SPI_NOR_HAS_LOCK,
.no_sfdp_flags = SECT_4K,
.fixups = &at25fs_nor_fixups
+ }, {
+ .id = SNOR_ID(0x1f, 0x87, 0x01),
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
},
};
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index b6f374ded390..19eb98bd6821 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -17,6 +17,7 @@
#include <linux/mtd/spi-nor.h>
#include <linux/mutex.h>
#include <linux/of_platform.h>
+#include <linux/regulator/consumer.h>
#include <linux/sched/task_stack.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -3576,7 +3577,8 @@ static int spi_nor_create_write_dirmap(struct spi_nor *nor)
static int spi_nor_probe(struct spi_mem *spimem)
{
struct spi_device *spi = spimem->spi;
- struct flash_platform_data *data = dev_get_platdata(&spi->dev);
+ struct device *dev = &spi->dev;
+ struct flash_platform_data *data = dev_get_platdata(dev);
struct spi_nor *nor;
/*
* Enable all caps by default. The core will mask them after
@@ -3586,13 +3588,17 @@ static int spi_nor_probe(struct spi_mem *spimem)
char *flash_name;
int ret;
- nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
+ ret = devm_regulator_get_enable(dev, "vcc");
+ if (ret)
+ return ret;
+
+ nor = devm_kzalloc(dev, sizeof(*nor), GFP_KERNEL);
if (!nor)
return -ENOMEM;
nor->spimem = spimem;
- nor->dev = &spi->dev;
- spi_nor_set_flash_node(nor, spi->dev.of_node);
+ nor->dev = dev;
+ spi_nor_set_flash_node(nor, dev->of_node);
spi_mem_set_drvdata(spimem, nor);
@@ -3628,9 +3634,8 @@ static int spi_nor_probe(struct spi_mem *spimem)
*/
if (nor->params->page_size > PAGE_SIZE) {
nor->bouncebuf_size = nor->params->page_size;
- devm_kfree(nor->dev, nor->bouncebuf);
- nor->bouncebuf = devm_kmalloc(nor->dev,
- nor->bouncebuf_size,
+ devm_kfree(dev, nor->bouncebuf);
+ nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
GFP_KERNEL);
if (!nor->bouncebuf)
return -ENOMEM;
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
index 5c33740ed7f5..ceff412f7d65 100644
--- a/drivers/mtd/spi-nor/core.h
+++ b/drivers/mtd/spi-nor/core.h
@@ -448,7 +448,11 @@ struct spi_nor_id {
* @id: pointer to struct spi_nor_id or NULL, which means "no ID" (mostly
* older chips).
* @name: (obsolete) the name of the flash. Do not set it for new additions.
- * @size: the size of the flash in bytes.
+ * @size: the size of the flash in bytes. The flash size is one
+ * property parsed by the SFDP. We use it as an indicator
+ * whether we need SFDP parsing for a particular flash.
+ * I.e. non-legacy flash entries in flash_info will have
+ * a size of zero iff SFDP should be used.
* @sector_size: (optional) the size listed here is what works with
* SPINOR_OP_SE, which isn't necessarily called a "sector" by
* the vendor. Defaults to 64k.
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
index 830da21eea08..99936fd25d43 100644
--- a/drivers/mtd/spi-nor/macronix.c
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -143,12 +143,6 @@ static const struct flash_info macronix_nor_parts[] = {
.size = SZ_16M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
}, {
- .id = SNOR_ID(0xc2, 0x25, 0x39),
- .name = "mx25u25635f",
- .size = SZ_32M,
- .no_sfdp_flags = SECT_4K,
- .fixup_flags = SPI_NOR_4B_OPCODES,
- }, {
.id = SNOR_ID(0xc2, 0x25, 0x3a),
.name = "mx25u51245g",
.size = SZ_64M,
@@ -230,7 +224,8 @@ static int macronix_nor_octal_dtr_en(struct spi_nor *nor)
return ret;
/* Read flash ID to make sure the switch was successful. */
- ret = spi_nor_read_id(nor, 4, 4, buf, SNOR_PROTO_8_8_8_DTR);
+ ret = spi_nor_read_id(nor, nor->addr_nbytes, 4, buf,
+ SNOR_PROTO_8_8_8_DTR);
if (ret) {
dev_dbg(nor->dev, "error %d reading JEDEC ID after enabling 8D-8D-8D mode\n", ret);
return ret;
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
index 5a88a6096ca8..bf08dbf5e742 100644
--- a/drivers/mtd/spi-nor/spansion.c
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -958,6 +958,11 @@ static const struct flash_info spansion_nor_parts[] = {
.mfr_flags = USE_CLPEF,
.fixups = &s25hx_t_fixups
}, {
+ /* S28HL256T */
+ .id = SNOR_ID(0x34, 0x5a, 0x19),
+ .mfr_flags = USE_CLPEF,
+ .fixups = &s28hx_t_fixups,
+ }, {
.id = SNOR_ID(0x34, 0x5a, 0x1a),
.name = "s28hl512t",
.mfr_flags = USE_CLPEF,
@@ -968,6 +973,11 @@ static const struct flash_info spansion_nor_parts[] = {
.mfr_flags = USE_CLPEF,
.fixups = &s28hx_t_fixups,
}, {
+ /* S28HL02GT */
+ .id = SNOR_ID(0x34, 0x5a, 0x1c),
+ .mfr_flags = USE_CLPEF,
+ .fixups = &s28hx_t_fixups,
+ }, {
.id = SNOR_ID(0x34, 0x5b, 0x19),
.mfr_flags = USE_CLPEF,
.fixups = &s28hx_t_fixups,
diff --git a/drivers/mtd/spi-nor/sysfs.c b/drivers/mtd/spi-nor/sysfs.c
index 5e9eb268073d..4f12ff755df0 100644
--- a/drivers/mtd/spi-nor/sysfs.c
+++ b/drivers/mtd/spi-nor/sysfs.c
@@ -50,7 +50,7 @@ static struct attribute *spi_nor_sysfs_entries[] = {
};
static ssize_t sfdp_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct spi_device *spi = to_spi_device(kobj_to_dev(kobj));
@@ -62,9 +62,9 @@ static ssize_t sfdp_read(struct file *filp, struct kobject *kobj,
return memory_read_from_buffer(buf, count, &off, nor->sfdp->dwords,
sfdp_size);
}
-static BIN_ATTR_RO(sfdp, 0);
+static const BIN_ATTR_RO(sfdp, 0);
-static struct bin_attribute *spi_nor_sysfs_bin_entries[] = {
+static const struct bin_attribute *const spi_nor_sysfs_bin_entries[] = {
&bin_attr_sfdp,
NULL
};
@@ -104,7 +104,7 @@ static const struct attribute_group spi_nor_sysfs_group = {
.is_visible = spi_nor_sysfs_is_visible,
.is_bin_visible = spi_nor_sysfs_is_bin_visible,
.attrs = spi_nor_sysfs_entries,
- .bin_attrs = spi_nor_sysfs_bin_entries,
+ .bin_attrs_new = spi_nor_sysfs_bin_entries,
};
const struct attribute_group *spi_nor_sysfs_groups[] = {
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 30be4ed68fad..ef6a22f372f9 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -1537,7 +1537,7 @@ static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
if (token) {
int err = kstrtoint(token, 10, &p->ubi_num);
- if (err) {
+ if (err || p->ubi_num < UBI_DEV_NUM_AUTO) {
pr_err("UBI error: bad value for ubi_num parameter: %s\n",
token);
return -EINVAL;
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 6bb80d7714bc..b700a0efaa93 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -828,6 +828,70 @@ out_free:
return err;
}
+static int ubi_get_ec_info(struct ubi_device *ubi, struct ubi_ecinfo_req __user *ureq)
+{
+ struct ubi_ecinfo_req req;
+ struct ubi_wl_entry *wl;
+ int read_cnt;
+ int peb;
+ int end_peb;
+
+ /* Copy the input arguments */
+ if (copy_from_user(&req, ureq, sizeof(struct ubi_ecinfo_req)))
+ return -EFAULT;
+
+ /* Check input arguments */
+ if (req.length <= 0 || req.start < 0 || req.start >= ubi->peb_count)
+ return -EINVAL;
+
+ if (check_add_overflow(req.start, req.length, &end_peb))
+ return -EINVAL;
+
+ if (end_peb > ubi->peb_count)
+ end_peb = ubi->peb_count;
+
+ /* Check access rights before filling erase_counters array */
+ if (!access_ok((void __user *)ureq->erase_counters,
+ (end_peb-req.start) * sizeof(int32_t)))
+ return -EFAULT;
+
+ /* Fill erase counter array */
+ read_cnt = 0;
+ for (peb = req.start; peb < end_peb; read_cnt++, peb++) {
+ int ec;
+
+ if (ubi_io_is_bad(ubi, peb)) {
+ if (__put_user(UBI_UNKNOWN, ureq->erase_counters+read_cnt))
+ return -EFAULT;
+
+ continue;
+ }
+
+ spin_lock(&ubi->wl_lock);
+
+ wl = ubi->lookuptbl[peb];
+ if (wl)
+ ec = wl->ec;
+ else
+ ec = UBI_UNKNOWN;
+
+ spin_unlock(&ubi->wl_lock);
+
+ if (__put_user(ec, ureq->erase_counters+read_cnt))
+ return -EFAULT;
+
+ }
+
+ /* Return actual read length */
+ req.read_length = read_cnt;
+
+ /* Copy everything except erase counter array */
+ if (copy_to_user(ureq, &req, sizeof(struct ubi_ecinfo_req)))
+ return -EFAULT;
+
+ return 0;
+}
+
static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -991,6 +1055,12 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
break;
}
+ case UBI_IOCECNFO:
+ {
+ err = ubi_get_ec_info(ubi, argp);
+ break;
+ }
+
default:
err = -ENOTTY;
break;
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 26cc53ad34ec..c792b9bcab9b 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -549,7 +549,6 @@ struct ubi_debug_info {
* @peb_buf: a buffer of PEB size used for different purposes
* @buf_mutex: protects @peb_buf
* @ckvol_mutex: serializes static volume checking when opening
- * @wl_reboot_notifier: close all wear-leveling work before reboot
*
* @dbg: debugging information for this UBI device
*/
@@ -652,7 +651,6 @@ struct ubi_device {
void *peb_buf;
struct mutex buf_mutex;
struct mutex ckvol_mutex;
- struct notifier_block wl_reboot_notifier;
struct ubi_debug_info dbg;
};
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 4f6f339d8fb8..fbd399cf6503 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -89,7 +89,6 @@
#include <linux/crc32.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
-#include <linux/reboot.h>
#include "ubi.h"
#include "wl.h"
@@ -128,8 +127,6 @@ static int self_check_in_wl_tree(const struct ubi_device *ubi,
struct ubi_wl_entry *e, struct rb_root *root);
static int self_check_in_pq(const struct ubi_device *ubi,
struct ubi_wl_entry *e);
-static int ubi_wl_reboot_notifier(struct notifier_block *n,
- unsigned long state, void *cmd);
/**
* wl_tree_add - add a wear-leveling entry to a WL RB-tree.
@@ -1953,13 +1950,6 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
if (!ubi->ro_mode && !ubi->fm_disabled)
ubi_ensure_anchor_pebs(ubi);
#endif
-
- if (!ubi->wl_reboot_notifier.notifier_call) {
- ubi->wl_reboot_notifier.notifier_call = ubi_wl_reboot_notifier;
- ubi->wl_reboot_notifier.priority = 1; /* Higher than MTD */
- register_reboot_notifier(&ubi->wl_reboot_notifier);
- }
-
return 0;
out_free:
@@ -2005,17 +1995,6 @@ void ubi_wl_close(struct ubi_device *ubi)
kfree(ubi->lookuptbl);
}
-static int ubi_wl_reboot_notifier(struct notifier_block *n,
- unsigned long state, void *cmd)
-{
- struct ubi_device *ubi;
-
- ubi = container_of(n, struct ubi_device, wl_reboot_notifier);
- ubi_wl_close(ubi);
-
- return NOTIFY_DONE;
-}
-
/**
* self_check_ec - make sure that the erase counter of a PEB is correct.
* @ubi: UBI device description object
diff --git a/drivers/mux/core.c b/drivers/mux/core.c
index 78c0022697ec..02be4ba37257 100644
--- a/drivers/mux/core.c
+++ b/drivers/mux/core.c
@@ -42,7 +42,7 @@ struct mux_state {
unsigned int state;
};
-static struct class mux_class = {
+static const struct class mux_class = {
.name = "mux",
};
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index b19492a7f6ad..8adbec7c5084 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -63,13 +63,8 @@ void bond_debug_unregister(struct bonding *bond)
void bond_debug_reregister(struct bonding *bond)
{
- struct dentry *d;
-
- d = debugfs_rename(bonding_debug_root, bond->debug_dir,
- bonding_debug_root, bond->dev->name);
- if (!IS_ERR(d)) {
- bond->debug_dir = d;
- } else {
+ int err = debugfs_change_name(bond->debug_dir, "%s", bond->dev->name);
+ if (err) {
netdev_warn(bond->dev, "failed to reregister, so just unregister old one\n");
bond_debug_unregister(bond);
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 7b78c2bada81..e45bba240cbc 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1538,17 +1538,20 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
NETIF_F_HIGHDMA | NETIF_F_LRO)
#define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
+ NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE | \
+ NETIF_F_GSO_PARTIAL)
#define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
NETIF_F_GSO_SOFTWARE)
+#define BOND_GSO_PARTIAL_FEATURES (NETIF_F_GSO_ESP)
+
static void bond_compute_features(struct bonding *bond)
{
+ netdev_features_t gso_partial_features = BOND_GSO_PARTIAL_FEATURES;
unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
IFF_XMIT_DST_RELEASE_PERM;
- netdev_features_t gso_partial_features = NETIF_F_GSO_ESP;
netdev_features_t vlan_features = BOND_VLAN_FEATURES;
netdev_features_t enc_features = BOND_ENC_FEATURES;
#ifdef CONFIG_XFRM_OFFLOAD
@@ -1582,8 +1585,9 @@ static void bond_compute_features(struct bonding *bond)
BOND_XFRM_FEATURES);
#endif /* CONFIG_XFRM_OFFLOAD */
- if (slave->dev->hw_enc_features & NETIF_F_GSO_PARTIAL)
- gso_partial_features &= slave->dev->gso_partial_features;
+ gso_partial_features = netdev_increment_features(gso_partial_features,
+ slave->dev->gso_partial_features,
+ BOND_GSO_PARTIAL_FEATURES);
mpls_features = netdev_increment_features(mpls_features,
slave->dev->mpls_features,
@@ -1598,12 +1602,8 @@ static void bond_compute_features(struct bonding *bond)
}
bond_dev->hard_header_len = max_hard_header_len;
- if (gso_partial_features & NETIF_F_GSO_ESP)
- bond_dev->gso_partial_features |= NETIF_F_GSO_ESP;
- else
- bond_dev->gso_partial_features &= ~NETIF_F_GSO_ESP;
-
done:
+ bond_dev->gso_partial_features = gso_partial_features;
bond_dev->vlan_features = vlan_features;
bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
NETIF_F_HW_VLAN_CTAG_TX |
@@ -6046,6 +6046,7 @@ void bond_setup(struct net_device *bond_dev)
bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
bond_dev->features |= bond_dev->hw_features;
bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
+ bond_dev->features |= NETIF_F_GSO_PARTIAL;
#ifdef CONFIG_XFRM_OFFLOAD
bond_dev->hw_features |= BOND_XFRM_FEATURES;
/* Only enable XFRM features if this is an active-backup config */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index b0a6c96b6ef4..b35808d3d07f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -505,21 +505,6 @@ void xgbe_debugfs_exit(struct xgbe_prv_data *pdata)
void xgbe_debugfs_rename(struct xgbe_prv_data *pdata)
{
- char *buf;
-
- if (!pdata->xgbe_debugfs)
- return;
-
- buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
- if (!buf)
- return;
-
- if (!strcmp(pdata->xgbe_debugfs->d_name.name, buf))
- goto out;
-
- debugfs_rename(pdata->xgbe_debugfs->d_parent, pdata->xgbe_debugfs,
- pdata->xgbe_debugfs->d_parent, buf);
-
-out:
- kfree(buf);
+ debugfs_change_name(pdata->xgbe_debugfs,
+ "amd-xgbe-%s", pdata->netdev->name);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index fe0e3e2a8117..71e50fc65c14 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -1441,7 +1441,9 @@ void aq_nic_deinit(struct aq_nic_s *self, bool link_down)
aq_ptp_ring_free(self);
aq_ptp_free(self);
- if (likely(self->aq_fw_ops->deinit) && link_down) {
+ /* May be invoked during hot unplug. */
+ if (pci_device_is_present(self->pdev) &&
+ likely(self->aq_fw_ops->deinit) && link_down) {
mutex_lock(&self->fwreq_mutex);
self->aq_fw_ops->deinit(self->aq_hw);
mutex_unlock(&self->fwreq_mutex);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index d73ef262991d..6fee9a41839c 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -328,8 +328,7 @@
#define BGMAC_RX_FRAME_OFFSET 30 /* There are 2 unused bytes between header and real data */
#define BGMAC_RX_BUF_OFFSET (NET_SKB_PAD + NET_IP_ALIGN - \
BGMAC_RX_FRAME_OFFSET)
-/* Jumbo frame size with FCS */
-#define BGMAC_RX_MAX_FRAME_SIZE 9724
+#define BGMAC_RX_MAX_FRAME_SIZE 1536
#define BGMAC_RX_BUF_SIZE (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE)
#define BGMAC_RX_ALLOC_SIZE (SKB_DATA_ALIGN(BGMAC_RX_BUF_SIZE + BGMAC_RX_BUF_OFFSET) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 589a1008601c..7b8b5b39c7bb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2854,6 +2854,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
}
__bnxt_queue_sp_work(bp);
async_event_process_exit:
+ bnxt_ulp_async_events(bp, cmpl);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 7c88b9f05c4c..e4a7f37036ed 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -298,6 +298,7 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
{
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
+ bool reset = false;
if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
return;
@@ -311,7 +312,9 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
ops = rtnl_dereference(ulp->ulp_ops);
if (!ops || !ops->ulp_irq_stop)
return;
- ops->ulp_irq_stop(ulp->handle);
+ if (test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
+ reset = true;
+ ops->ulp_irq_stop(ulp->handle, reset);
}
}
@@ -346,9 +349,36 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
}
}
-int bnxt_register_async_events(struct bnxt_en_dev *edev,
- unsigned long *events_bmap,
- u16 max_id)
+void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
+{
+ u16 event_id = le16_to_cpu(cmpl->event_id);
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+ struct bnxt_ulp *ulp;
+
+ if (!bnxt_ulp_registered(edev))
+ return;
+ ulp = edev->ulp_tbl;
+
+ rcu_read_lock();
+
+ ops = rcu_dereference(ulp->ulp_ops);
+ if (!ops || !ops->ulp_async_notifier)
+ goto exit_unlock_rcu;
+ if (!ulp->async_events_bmap || event_id > ulp->max_async_event_id)
+ goto exit_unlock_rcu;
+
+ /* Read max_async_event_id first before testing the bitmap. */
+ smp_rmb();
+
+ if (test_bit(event_id, ulp->async_events_bmap))
+ ops->ulp_async_notifier(ulp->handle, cmpl);
+exit_unlock_rcu:
+ rcu_read_unlock();
+}
+
+void bnxt_register_async_events(struct bnxt_en_dev *edev,
+ unsigned long *events_bmap, u16 max_id)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
@@ -360,7 +390,6 @@ int bnxt_register_async_events(struct bnxt_en_dev *edev,
smp_wmb();
ulp->max_async_event_id = max_id;
bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true);
- return 0;
}
EXPORT_SYMBOL(bnxt_register_async_events);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 54ad9f8273d7..7fa3b8d1ebd2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -30,7 +30,9 @@ struct bnxt_msix_entry {
};
struct bnxt_ulp_ops {
- void (*ulp_irq_stop)(void *);
+ /* async_notifier() cannot sleep (in BH context) */
+ void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
+ void (*ulp_irq_stop)(void *, bool);
void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *);
};
@@ -126,6 +128,6 @@ int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt_ulp_ops *ulp_ops,
void *handle);
void bnxt_unregister_dev(struct bnxt_en_dev *edev);
int bnxt_send_msg(struct bnxt_en_dev *edev, struct bnxt_fw_msg *fw_msg);
-int bnxt_register_async_events(struct bnxt_en_dev *edev,
- unsigned long *events_bmap, u16 max_id);
+void bnxt_register_async_events(struct bnxt_en_dev *edev,
+ unsigned long *events_bmap, u16 max_id);
#endif
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 0715ea5bf13e..3b082114f2e5 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -41,9 +41,12 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
+ u32 phy_wolopts = 0;
- if (dev->phydev)
+ if (dev->phydev) {
phy_ethtool_get_wol(dev->phydev, wol);
+ phy_wolopts = wol->wolopts;
+ }
/* MAC is not wake-up capable, return what the PHY does */
if (!device_can_wakeup(kdev))
@@ -51,9 +54,14 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
/* Overlay MAC capabilities with that of the PHY queried before */
wol->supported |= WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
- wol->wolopts = priv->wolopts;
- memset(wol->sopass, 0, sizeof(wol->sopass));
+ wol->wolopts |= priv->wolopts;
+ /* Return the PHY configured magic password */
+ if (phy_wolopts & WAKE_MAGICSECURE)
+ return;
+
+ /* Otherwise the MAC one */
+ memset(wol->sopass, 0, sizeof(wol->sopass));
if (wol->wolopts & WAKE_MAGICSECURE)
memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
}
@@ -70,7 +78,7 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
/* Try Wake-on-LAN from the PHY first */
if (dev->phydev) {
ret = phy_ethtool_set_wol(dev->phydev, wol);
- if (ret != -EOPNOTSUPP)
+ if (ret != -EOPNOTSUPP && wol->wolopts)
return ret;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 9cc8db10a8d6..d9d675f1ebfe 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -55,6 +55,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/crc32poly.h>
+#include <linux/dmi.h>
#include <net/checksum.h>
#include <net/gso.h>
@@ -7424,7 +7425,7 @@ static void tg3_napi_enable(struct tg3 *tp)
for (i = 0; i < tp->irq_cnt; i++) {
tnapi = &tp->napi[i];
- napi_enable(&tnapi->napi);
+ napi_enable_locked(&tnapi->napi);
if (tnapi->tx_buffers) {
netif_queue_set_napi(tp->dev, txq_idx,
NETDEV_QUEUE_TYPE_TX,
@@ -7445,9 +7446,10 @@ static void tg3_napi_init(struct tg3 *tp)
int i;
for (i = 0; i < tp->irq_cnt; i++) {
- netif_napi_add(tp->dev, &tp->napi[i].napi,
- i ? tg3_poll_msix : tg3_poll);
- netif_napi_set_irq(&tp->napi[i].napi, tp->napi[i].irq_vec);
+ netif_napi_add_locked(tp->dev, &tp->napi[i].napi,
+ i ? tg3_poll_msix : tg3_poll);
+ netif_napi_set_irq_locked(&tp->napi[i].napi,
+ tp->napi[i].irq_vec);
}
}
@@ -11259,6 +11261,8 @@ static void tg3_timer_stop(struct tg3 *tp)
static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
__releases(tp->lock)
__acquires(tp->lock)
+ __releases(tp->dev->lock)
+ __acquires(tp->dev->lock)
{
int err;
@@ -11271,7 +11275,9 @@ static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
tg3_timer_stop(tp);
tp->irq_sync = 0;
tg3_napi_enable(tp);
+ netdev_unlock(tp->dev);
dev_close(tp->dev);
+ netdev_lock(tp->dev);
tg3_full_lock(tp, 0);
}
return err;
@@ -11299,6 +11305,7 @@ static void tg3_reset_task(struct work_struct *work)
tg3_netif_stop(tp);
+ netdev_lock(tp->dev);
tg3_full_lock(tp, 1);
if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
@@ -11318,12 +11325,14 @@ static void tg3_reset_task(struct work_struct *work)
* call cancel_work_sync() and wait forever.
*/
tg3_flag_clear(tp, RESET_TASK_PENDING);
+ netdev_unlock(tp->dev);
dev_close(tp->dev);
goto out;
}
tg3_netif_start(tp);
tg3_full_unlock(tp);
+ netdev_unlock(tp->dev);
tg3_phy_start(tp);
tg3_flag_clear(tp, RESET_TASK_PENDING);
out:
@@ -11683,9 +11692,11 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
if (err)
goto out_ints_fini;
+ netdev_lock(dev);
tg3_napi_init(tp);
tg3_napi_enable(tp);
+ netdev_unlock(dev);
for (i = 0; i < tp->irq_cnt; i++) {
err = tg3_request_irq(tp, i);
@@ -12569,6 +12580,7 @@ static int tg3_set_ringparam(struct net_device *dev,
irq_sync = 1;
}
+ netdev_lock(dev);
tg3_full_lock(tp, irq_sync);
tp->rx_pending = ering->rx_pending;
@@ -12597,6 +12609,7 @@ static int tg3_set_ringparam(struct net_device *dev,
}
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (irq_sync && !err)
tg3_phy_start(tp);
@@ -12678,6 +12691,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
irq_sync = 1;
}
+ netdev_lock(dev);
tg3_full_lock(tp, irq_sync);
if (epause->autoneg)
@@ -12707,6 +12721,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
}
tg3_full_unlock(tp);
+ netdev_unlock(dev);
}
tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
@@ -13911,6 +13926,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
data[TG3_INTERRUPT_TEST] = 1;
}
+ netdev_lock(dev);
tg3_full_lock(tp, 0);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
@@ -13922,6 +13938,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
}
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (irq_sync && !err2)
tg3_phy_start(tp);
@@ -14365,6 +14382,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_set_mtu(dev, tp, new_mtu);
+ netdev_lock(dev);
tg3_full_lock(tp, 1);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
@@ -14384,6 +14402,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_netif_start(tp);
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (!err)
tg3_phy_start(tp);
@@ -18164,6 +18183,7 @@ static int tg3_resume(struct device *device)
netif_device_attach(dev);
+ netdev_lock(dev);
tg3_full_lock(tp, 0);
tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
@@ -18180,6 +18200,7 @@ static int tg3_resume(struct device *device)
out:
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (!err)
tg3_phy_start(tp);
@@ -18192,6 +18213,50 @@ unlock:
static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
+/* Systems where ACPI _PTS (Prepare To Sleep) S5 will result in a fatal
+ * PCIe AER event on the tg3 device if the tg3 device is not, or cannot
+ * be, powered down.
+ */
+static const struct dmi_system_id tg3_restart_aer_quirk_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R440"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R540"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R640"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R650"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R740"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R750"),
+ },
+ },
+ {}
+};
+
static void tg3_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -18208,6 +18273,19 @@ static void tg3_shutdown(struct pci_dev *pdev)
if (system_state == SYSTEM_POWER_OFF)
tg3_power_down(tp);
+ else if (system_state == SYSTEM_RESTART &&
+ dmi_first_match(tg3_restart_aer_quirk_table) &&
+ pdev->current_state != PCI_D3cold &&
+ pdev->current_state != PCI_UNKNOWN) {
+ /* Disable PCIe AER on the tg3 to avoid a fatal
+ * error during this system restart.
+ */
+ pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_CERE |
+ PCI_EXP_DEVCTL_NFERE |
+ PCI_EXP_DEVCTL_FERE |
+ PCI_EXP_DEVCTL_URRE);
+ }
rtnl_unlock();
@@ -18260,7 +18338,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
done:
if (state == pci_channel_io_perm_failure) {
if (netdev) {
+ netdev_lock(netdev);
tg3_napi_enable(tp);
+ netdev_unlock(netdev);
dev_close(netdev);
}
err = PCI_ERS_RESULT_DISCONNECT;
@@ -18314,7 +18394,9 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
done:
if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
+ netdev_lock(netdev);
tg3_napi_enable(tp);
+ netdev_unlock(netdev);
dev_close(netdev);
}
rtnl_unlock();
@@ -18340,12 +18422,14 @@ static void tg3_io_resume(struct pci_dev *pdev)
if (!netdev || !netif_running(netdev))
goto done;
+ netdev_lock(netdev);
tg3_full_lock(tp, 0);
tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
tg3_flag_set(tp, INIT_COMPLETE);
err = tg3_restart_hw(tp, true);
if (err) {
tg3_full_unlock(tp);
+ netdev_unlock(netdev);
netdev_err(netdev, "Cannot restart hardware after reset.\n");
goto done;
}
@@ -18357,6 +18441,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
tg3_netif_start(tp);
tg3_full_unlock(tp);
+ netdev_unlock(netdev);
tg3_phy_start(tp);
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 8735e333034c..b87eaf0c250c 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1777,10 +1777,11 @@ static void dm9000_drv_remove(struct platform_device *pdev)
unregister_netdev(ndev);
dm9000_release_board(pdev, dm);
- free_netdev(ndev); /* free device structure */
if (dm->power_supply)
regulator_disable(dm->power_supply);
+ free_netdev(ndev); /* free device structure */
+
dev_dbg(&pdev->dev, "released and freed device\n");
}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 68725506a095..f7c4ce8e9a26 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -840,6 +840,8 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
struct fec_enet_private *fep = netdev_priv(ndev);
int hdr_len, total_len, data_left;
struct bufdesc *bdp = txq->bd.cur;
+ struct bufdesc *tmp_bdp;
+ struct bufdesc_ex *ebdp;
struct tso_t tso;
unsigned int index = 0;
int ret;
@@ -913,7 +915,34 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
return 0;
err_release:
- /* TODO: Release all used data descriptors for TSO */
+ /* Release all used data descriptors for TSO */
+ tmp_bdp = txq->bd.cur;
+
+ while (tmp_bdp != bdp) {
+ /* Unmap data buffers */
+ if (tmp_bdp->cbd_bufaddr &&
+ !IS_TSO_HEADER(txq, fec32_to_cpu(tmp_bdp->cbd_bufaddr)))
+ dma_unmap_single(&fep->pdev->dev,
+ fec32_to_cpu(tmp_bdp->cbd_bufaddr),
+ fec16_to_cpu(tmp_bdp->cbd_datlen),
+ DMA_TO_DEVICE);
+
+ /* Clear standard buffer descriptor fields */
+ tmp_bdp->cbd_sc = 0;
+ tmp_bdp->cbd_datlen = 0;
+ tmp_bdp->cbd_bufaddr = 0;
+
+ /* Handle extended descriptor if enabled */
+ if (fep->bufdesc_ex) {
+ ebdp = (struct bufdesc_ex *)tmp_bdp;
+ ebdp->cbd_esc = 0;
+ }
+
+ tmp_bdp = fec_enet_get_nextdesc(tmp_bdp, &txq->bd);
+ }
+
+ dev_kfree_skb_any(skb);
+
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 9a63fbc69408..b25fb400f476 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -40,6 +40,21 @@ EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
*/
static DEFINE_MUTEX(hnae3_common_lock);
+/* ensure the drivers being unloaded one by one */
+static DEFINE_MUTEX(hnae3_unload_lock);
+
+void hnae3_acquire_unload_lock(void)
+{
+ mutex_lock(&hnae3_unload_lock);
+}
+EXPORT_SYMBOL(hnae3_acquire_unload_lock);
+
+void hnae3_release_unload_lock(void)
+{
+ mutex_unlock(&hnae3_unload_lock);
+}
+EXPORT_SYMBOL(hnae3_release_unload_lock);
+
static bool hnae3_client_match(enum hnae3_client_type client_type)
{
if (client_type == HNAE3_CLIENT_KNIC ||
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 12ba380eb701..4e44f28288f9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -963,4 +963,6 @@ int hnae3_register_client(struct hnae3_client *client);
void hnae3_set_client_init_flag(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev,
unsigned int inited);
+void hnae3_acquire_unload_lock(void);
+void hnae3_release_unload_lock(void);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index a7e3b22f641c..9ff797fb36c4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -6002,9 +6002,11 @@ module_init(hns3_init_module);
*/
static void __exit hns3_exit_module(void)
{
+ hnae3_acquire_unload_lock();
pci_unregister_driver(&hns3_driver);
hnae3_unregister_client(&client);
hns3_dbg_unregister_debugfs();
+ hnae3_release_unload_lock();
}
module_exit(hns3_exit_module);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index db7845009252..3f17b3073e50 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -12919,9 +12919,11 @@ static int __init hclge_init(void)
static void __exit hclge_exit(void)
{
+ hnae3_acquire_unload_lock();
hnae3_unregister_ae_algo_prepare(&ae_algo);
hnae3_unregister_ae_algo(&ae_algo);
destroy_workqueue(hclge_wq);
+ hnae3_release_unload_lock();
}
module_init(hclge_init);
module_exit(hclge_exit);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 163c6e59ea4c..9ba767740a04 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -3410,8 +3410,10 @@ static int __init hclgevf_init(void)
static void __exit hclgevf_exit(void)
{
+ hnae3_acquire_unload_lock();
hnae3_unregister_ae_algo(&ae_algovf);
destroy_workqueue(hclgevf_wq);
+ hnae3_release_unload_lock();
}
module_init(hclgevf_init);
module_exit(hclgevf_exit);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index cbfaaa5b7d02..2d7a18fcc3be 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -773,6 +773,11 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
f->state = IAVF_VLAN_ADD;
adapter->num_vlan_filters++;
iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
+ } else if (f->state == IAVF_VLAN_REMOVE) {
+ /* IAVF_VLAN_REMOVE means that VLAN wasn't yet removed.
+ * We can safely only change the state here.
+ */
+ f->state = IAVF_VLAN_ACTIVE;
}
clearout:
@@ -793,8 +798,18 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
f = iavf_find_vlan(adapter, vlan);
if (f) {
- f->state = IAVF_VLAN_REMOVE;
- iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
+ /* IAVF_ADD_VLAN means that VLAN wasn't even added yet.
+ * Remove it from the list.
+ */
+ if (f->state == IAVF_VLAN_ADD) {
+ list_del(&f->list);
+ kfree(f);
+ adapter->num_vlan_filters--;
+ } else {
+ f->state = IAVF_VLAN_REMOVE;
+ iavf_schedule_aq_request(adapter,
+ IAVF_FLAG_AQ_DEL_VLAN_FILTER);
+ }
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index d116e2b10bce..dbdb83567364 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -981,6 +981,9 @@ static int ice_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv
/* preallocate memory for ice_sched_node */
node = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
*priv = node;
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 01536a382e54..bdee499f991a 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1498,7 +1498,6 @@ struct ice_aqc_dnl_equa_param {
#define ICE_AQC_RX_EQU_POST1 (0x12 << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_RX_EQU_BFLF (0x13 << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_RX_EQU_BFHF (0x14 << ICE_AQC_RX_EQU_SHIFT)
-#define ICE_AQC_RX_EQU_DRATE (0x15 << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_RX_EQU_CTLE_GAINHF (0x20 << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_RX_EQU_CTLE_GAINLF (0x21 << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_RX_EQU_CTLE_GAINDC (0x22 << ICE_AQC_RX_EQU_SHIFT)
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 3072634bf049..f241493a6ac8 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -710,7 +710,6 @@ static int ice_get_tx_rx_equa(struct ice_hw *hw, u8 serdes_num,
{ ICE_AQC_RX_EQU_POST1, rx, &ptr->rx_equ_post1 },
{ ICE_AQC_RX_EQU_BFLF, rx, &ptr->rx_equ_bflf },
{ ICE_AQC_RX_EQU_BFHF, rx, &ptr->rx_equ_bfhf },
- { ICE_AQC_RX_EQU_DRATE, rx, &ptr->rx_equ_drate },
{ ICE_AQC_RX_EQU_CTLE_GAINHF, rx, &ptr->rx_equ_ctle_gainhf },
{ ICE_AQC_RX_EQU_CTLE_GAINLF, rx, &ptr->rx_equ_ctle_gainlf },
{ ICE_AQC_RX_EQU_CTLE_GAINDC, rx, &ptr->rx_equ_ctle_gaindc },
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.h b/drivers/net/ethernet/intel/ice/ice_ethtool.h
index 8f2ad1c172c0..23b2cfbc9684 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.h
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.h
@@ -15,7 +15,6 @@ struct ice_serdes_equalization_to_ethtool {
int rx_equ_post1;
int rx_equ_bflf;
int rx_equ_bfhf;
- int rx_equ_drate;
int rx_equ_ctle_gainhf;
int rx_equ_ctle_gainlf;
int rx_equ_ctle_gaindc;
diff --git a/drivers/net/ethernet/intel/ice/ice_parser.h b/drivers/net/ethernet/intel/ice/ice_parser.h
index 6509d807627c..4f56d53d56b9 100644
--- a/drivers/net/ethernet/intel/ice/ice_parser.h
+++ b/drivers/net/ethernet/intel/ice/ice_parser.h
@@ -257,7 +257,6 @@ ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size,
/*** ICE_SID_RXPARSER_BOOST_TCAM and ICE_SID_LBL_RXPARSER_TMEM sections ***/
#define ICE_BST_TCAM_TABLE_SIZE 256
#define ICE_BST_TCAM_KEY_SIZE 20
-#define ICE_BST_KEY_TCAM_SIZE 19
/* Boost TCAM item */
struct ice_bst_tcam_item {
@@ -401,7 +400,6 @@ u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag);
#define ICE_PARSER_GPR_NUM 128
#define ICE_PARSER_FLG_NUM 64
#define ICE_PARSER_ERR_NUM 16
-#define ICE_BST_KEY_SIZE 10
#define ICE_MARKER_ID_SIZE 9
#define ICE_MARKER_MAX_SIZE \
(ICE_MARKER_ID_SIZE * BITS_PER_BYTE - 1)
@@ -431,13 +429,13 @@ struct ice_parser_rt {
u8 pkt_buf[ICE_PARSER_MAX_PKT_LEN + ICE_PARSER_PKT_REV];
u16 pkt_len;
u16 po;
- u8 bst_key[ICE_BST_KEY_SIZE];
+ u8 bst_key[ICE_BST_TCAM_KEY_SIZE];
struct ice_pg_cam_key pg_key;
+ u8 pg_prio;
struct ice_alu *alu0;
struct ice_alu *alu1;
struct ice_alu *alu2;
struct ice_pg_cam_action *action;
- u8 pg_prio;
struct ice_gpr_pu pu;
u8 markers[ICE_MARKER_ID_SIZE];
bool protocols[ICE_PO_PAIR_SIZE];
diff --git a/drivers/net/ethernet/intel/ice/ice_parser_rt.c b/drivers/net/ethernet/intel/ice/ice_parser_rt.c
index dedf5e854e4b..3995d662e050 100644
--- a/drivers/net/ethernet/intel/ice/ice_parser_rt.c
+++ b/drivers/net/ethernet/intel/ice/ice_parser_rt.c
@@ -125,22 +125,20 @@ static void ice_bst_key_init(struct ice_parser_rt *rt,
else
key[idd] = imem->b_kb.prio;
- idd = ICE_BST_KEY_TCAM_SIZE - 1;
+ idd = ICE_BST_TCAM_KEY_SIZE - 2;
for (i = idd; i >= 0; i--) {
int j;
j = ho + idd - i;
if (j < ICE_PARSER_MAX_PKT_LEN)
- key[i] = rt->pkt_buf[ho + idd - i];
+ key[i] = rt->pkt_buf[j];
else
key[i] = 0;
}
- ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generated Boost TCAM Key:\n");
- ice_debug(rt->psr->hw, ICE_DBG_PARSER, "%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
- key[0], key[1], key[2], key[3], key[4],
- key[5], key[6], key[7], key[8], key[9]);
- ice_debug(rt->psr->hw, ICE_DBG_PARSER, "\n");
+ ice_debug_array_w_prefix(rt->psr->hw, ICE_DBG_PARSER,
+ KBUILD_MODNAME ": Generated Boost TCAM Key",
+ key, ICE_BST_TCAM_KEY_SIZE);
}
static u16 ice_bit_rev_u16(u16 v, int len)
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 5d2d7736fd5f..9c9ea4c1b93b 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -527,15 +527,14 @@ err:
* @xdp: xdp_buff used as input to the XDP program
* @xdp_prog: XDP program to run
* @xdp_ring: ring to be used for XDP_TX action
- * @rx_buf: Rx buffer to store the XDP action
* @eop_desc: Last descriptor in packet to read metadata from
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
-static void
+static u32
ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
- struct ice_rx_buf *rx_buf, union ice_32b_rx_flex_desc *eop_desc)
+ union ice_32b_rx_flex_desc *eop_desc)
{
unsigned int ret = ICE_XDP_PASS;
u32 act;
@@ -574,7 +573,7 @@ out_failure:
ret = ICE_XDP_CONSUMED;
}
exit:
- ice_set_rx_bufs_act(xdp, rx_ring, ret);
+ return ret;
}
/**
@@ -860,10 +859,8 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
xdp_buff_set_frags_flag(xdp);
}
- if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
- ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
+ if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS))
return -ENOMEM;
- }
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
rx_buf->page_offset, size);
@@ -924,7 +921,6 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
struct ice_rx_buf *rx_buf;
rx_buf = &rx_ring->rx_buf[ntc];
- rx_buf->pgcnt = page_count(rx_buf->page);
prefetchw(rx_buf->page);
if (!size)
@@ -941,6 +937,31 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
}
/**
+ * ice_get_pgcnts - grab page_count() for gathered fragments
+ * @rx_ring: Rx descriptor ring to store the page counts on
+ *
+ * This function is intended to be called right before running XDP
+ * program so that the page recycling mechanism will be able to take
+ * a correct decision regarding underlying pages; this is done in such
+ * way as XDP program can change the refcount of page
+ */
+static void ice_get_pgcnts(struct ice_rx_ring *rx_ring)
+{
+ u32 nr_frags = rx_ring->nr_frags + 1;
+ u32 idx = rx_ring->first_desc;
+ struct ice_rx_buf *rx_buf;
+ u32 cnt = rx_ring->count;
+
+ for (int i = 0; i < nr_frags; i++) {
+ rx_buf = &rx_ring->rx_buf[idx];
+ rx_buf->pgcnt = page_count(rx_buf->page);
+
+ if (++idx == cnt)
+ idx = 0;
+ }
+}
+
+/**
* ice_build_skb - Build skb around an existing buffer
* @rx_ring: Rx descriptor ring to transact packets on
* @xdp: xdp_buff pointing to the data
@@ -1051,12 +1072,12 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
rx_buf->page_offset + headlen, size,
xdp->frame_sz);
} else {
- /* buffer is unused, change the act that should be taken later
- * on; data was copied onto skb's linear part so there's no
+ /* buffer is unused, restore biased page count in Rx buffer;
+ * data was copied onto skb's linear part so there's no
* need for adjusting page offset and we can reuse this buffer
* as-is
*/
- rx_buf->act = ICE_SKB_CONSUMED;
+ rx_buf->pagecnt_bias++;
}
if (unlikely(xdp_buff_has_frags(xdp))) {
@@ -1104,6 +1125,65 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
}
/**
+ * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all frame frags
+ * @rx_ring: Rx ring with all the auxiliary data
+ * @xdp: XDP buffer carrying linear + frags part
+ * @xdp_xmit: XDP_TX/XDP_REDIRECT verdict storage
+ * @ntc: a current next_to_clean value to be stored at rx_ring
+ * @verdict: return code from XDP program execution
+ *
+ * Walk through gathered fragments and satisfy internal page
+ * recycle mechanism; we take here an action related to verdict
+ * returned by XDP program;
+ */
+static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ u32 *xdp_xmit, u32 ntc, u32 verdict)
+{
+ u32 nr_frags = rx_ring->nr_frags + 1;
+ u32 idx = rx_ring->first_desc;
+ u32 cnt = rx_ring->count;
+ u32 post_xdp_frags = 1;
+ struct ice_rx_buf *buf;
+ int i;
+
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ post_xdp_frags += xdp_get_shared_info_from_buff(xdp)->nr_frags;
+
+ for (i = 0; i < post_xdp_frags; i++) {
+ buf = &rx_ring->rx_buf[idx];
+
+ if (verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+ ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+ *xdp_xmit |= verdict;
+ } else if (verdict & ICE_XDP_CONSUMED) {
+ buf->pagecnt_bias++;
+ } else if (verdict == ICE_XDP_PASS) {
+ ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+ }
+
+ ice_put_rx_buf(rx_ring, buf);
+
+ if (++idx == cnt)
+ idx = 0;
+ }
+ /* handle buffers that represented frags released by XDP prog;
+ * for these we keep pagecnt_bias as-is; refcount from struct page
+ * has been decremented within XDP prog and we do not have to increase
+ * the biased refcnt
+ */
+ for (; i < nr_frags; i++) {
+ buf = &rx_ring->rx_buf[idx];
+ ice_put_rx_buf(rx_ring, buf);
+ if (++idx == cnt)
+ idx = 0;
+ }
+
+ xdp->data = NULL;
+ rx_ring->first_desc = ntc;
+ rx_ring->nr_frags = 0;
+}
+
+/**
* ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: Rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -1120,15 +1200,13 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
unsigned int offset = rx_ring->rx_offset;
struct xdp_buff *xdp = &rx_ring->xdp;
- u32 cached_ntc = rx_ring->first_desc;
struct ice_tx_ring *xdp_ring = NULL;
struct bpf_prog *xdp_prog = NULL;
u32 ntc = rx_ring->next_to_clean;
+ u32 cached_ntu, xdp_verdict;
u32 cnt = rx_ring->count;
u32 xdp_xmit = 0;
- u32 cached_ntu;
bool failure;
- u32 first;
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
if (xdp_prog) {
@@ -1190,6 +1268,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
xdp_buff_clear_frags_flag(xdp);
} else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
+ ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc, ICE_XDP_CONSUMED);
break;
}
if (++ntc == cnt)
@@ -1199,15 +1278,15 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
if (ice_is_non_eop(rx_ring, rx_desc))
continue;
- ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf, rx_desc);
- if (rx_buf->act == ICE_XDP_PASS)
+ ice_get_pgcnts(rx_ring);
+ xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc);
+ if (xdp_verdict == ICE_XDP_PASS)
goto construct_skb;
total_rx_bytes += xdp_get_buff_len(xdp);
total_rx_pkts++;
- xdp->data = NULL;
- rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
+ ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
+
continue;
construct_skb:
if (likely(ice_ring_uses_build_skb(rx_ring)))
@@ -1217,18 +1296,12 @@ construct_skb:
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->ring_stats->rx_stats.alloc_page_failed++;
- rx_buf->act = ICE_XDP_CONSUMED;
- if (unlikely(xdp_buff_has_frags(xdp)))
- ice_set_rx_bufs_act(xdp, rx_ring,
- ICE_XDP_CONSUMED);
- xdp->data = NULL;
- rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
- break;
+ xdp_verdict = ICE_XDP_CONSUMED;
}
- xdp->data = NULL;
- rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
+ ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
+
+ if (!skb)
+ break;
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
@@ -1257,23 +1330,6 @@ construct_skb:
total_rx_pkts++;
}
- first = rx_ring->first_desc;
- while (cached_ntc != first) {
- struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc];
-
- if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) {
- ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
- xdp_xmit |= buf->act;
- } else if (buf->act & ICE_XDP_CONSUMED) {
- buf->pagecnt_bias++;
- } else if (buf->act == ICE_XDP_PASS) {
- ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
- }
-
- ice_put_rx_buf(rx_ring, buf);
- if (++cached_ntc >= cnt)
- cached_ntc = 0;
- }
rx_ring->next_to_clean = ntc;
/* return up to cleaned_count buffers to hardware */
failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring));
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index cb347c852ba9..806bce701df3 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -201,7 +201,6 @@ struct ice_rx_buf {
struct page *page;
unsigned int page_offset;
unsigned int pgcnt;
- unsigned int act;
unsigned int pagecnt_bias;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index 79f960c6680d..6cf32b404127 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -6,49 +6,6 @@
#include "ice.h"
/**
- * ice_set_rx_bufs_act - propagate Rx buffer action to frags
- * @xdp: XDP buffer representing frame (linear and frags part)
- * @rx_ring: Rx ring struct
- * act: action to store onto Rx buffers related to XDP buffer parts
- *
- * Set action that should be taken before putting Rx buffer from first frag
- * to the last.
- */
-static inline void
-ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
- const unsigned int act)
-{
- u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
- u32 nr_frags = rx_ring->nr_frags + 1;
- u32 idx = rx_ring->first_desc;
- u32 cnt = rx_ring->count;
- struct ice_rx_buf *buf;
-
- for (int i = 0; i < nr_frags; i++) {
- buf = &rx_ring->rx_buf[idx];
- buf->act = act;
-
- if (++idx == cnt)
- idx = 0;
- }
-
- /* adjust pagecnt_bias on frags freed by XDP prog */
- if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) {
- u32 delta = rx_ring->nr_frags - sinfo_frags;
-
- while (delta) {
- if (idx == 0)
- idx = cnt - 1;
- else
- idx--;
- buf = &rx_ring->rx_buf[idx];
- buf->pagecnt_bias--;
- delta--;
- }
- }
-}
-
-/**
* ice_test_staterr - tests bits in Rx descriptor status and error fields
* @status_err_n: Rx descriptor status_error0 or status_error1 bits
* @stat_err_bits: value to mask
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
index 4849590a5591..b28991dd1870 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
@@ -376,6 +376,9 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
if (!(le16_to_cpu(desc->flags) & IDPF_CTLQ_FLAG_DD))
break;
+ /* Ensure no other fields are read until DD flag is checked */
+ dma_rmb();
+
/* strip off FW internal code */
desc_err = le16_to_cpu(desc->ret_val) & 0xff;
@@ -563,6 +566,9 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
if (!(flags & IDPF_CTLQ_FLAG_DD))
break;
+ /* Ensure no other fields are read until DD flag is checked */
+ dma_rmb();
+
q_msg[i].vmvf_type = (flags &
(IDPF_CTLQ_FLAG_FTYPE_VM |
IDPF_CTLQ_FLAG_FTYPE_PF)) >>
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
index f71d3182580b..b6c515d14cbf 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -174,7 +174,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
pci_set_drvdata(pdev, adapter);
- adapter->init_wq = alloc_workqueue("%s-%s-init", 0, 0,
+ adapter->init_wq = alloc_workqueue("%s-%s-init",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->init_wq) {
@@ -183,7 +184,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free;
}
- adapter->serv_wq = alloc_workqueue("%s-%s-service", 0, 0,
+ adapter->serv_wq = alloc_workqueue("%s-%s-service",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->serv_wq) {
@@ -192,7 +194,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_serv_wq_alloc;
}
- adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", 0, 0,
+ adapter->mbx_wq = alloc_workqueue("%s-%s-mbx",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->mbx_wq) {
@@ -201,7 +204,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_mbx_wq_alloc;
}
- adapter->stats_wq = alloc_workqueue("%s-%s-stats", 0, 0,
+ adapter->stats_wq = alloc_workqueue("%s-%s-stats",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->stats_wq) {
@@ -210,7 +214,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_stats_wq_alloc;
}
- adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event", 0, 0,
+ adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->vc_event_wq) {
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index d46c95f91b0d..3d2413b8684f 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -517,8 +517,10 @@ static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
retval = -ENXIO;
goto only_unlock;
case IDPF_VC_XN_WAITING:
- dev_notice_ratelimited(&adapter->pdev->dev, "Transaction timed-out (op %d, %dms)\n",
- params->vc_op, params->timeout_ms);
+ dev_notice_ratelimited(&adapter->pdev->dev,
+ "Transaction timed-out (op:%d cookie:%04x vc_op:%d salt:%02x timeout:%dms)\n",
+ params->vc_op, cookie, xn->vc_op,
+ xn->salt, params->timeout_ms);
retval = -ETIME;
break;
case IDPF_VC_XN_COMPLETED_SUCCESS:
@@ -612,14 +614,16 @@ idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
return -EINVAL;
}
xn = &adapter->vcxn_mngr->ring[xn_idx];
+ idpf_vc_xn_lock(xn);
salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info);
if (xn->salt != salt) {
- dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (%02x != %02x)\n",
- xn->salt, salt);
+ dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (exp:%d@%02x(%d) != got:%d@%02x)\n",
+ xn->vc_op, xn->salt, xn->state,
+ ctlq_msg->cookie.mbx.chnl_opcode, salt);
+ idpf_vc_xn_unlock(xn);
return -EINVAL;
}
- idpf_vc_xn_lock(xn);
switch (xn->state) {
case IDPF_VC_XN_WAITING:
/* success */
@@ -3077,12 +3081,21 @@ init_failed:
*/
void idpf_vc_core_deinit(struct idpf_adapter *adapter)
{
+ bool remove_in_prog;
+
if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
return;
+ /* Avoid transaction timeouts when called during reset */
+ remove_in_prog = test_bit(IDPF_REMOVE_IN_PROG, adapter->flags);
+ if (!remove_in_prog)
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
+
idpf_deinit_task(adapter);
idpf_intr_rel(adapter);
- idpf_vc_xn_shutdown(adapter->vcxn_mngr);
+
+ if (remove_in_prog)
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
cancel_delayed_work_sync(&adapter->serv_task);
cancel_delayed_work_sync(&adapter->mbx_task);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 82f4333fb426..4fe121b9f94b 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4432,6 +4432,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
*/
if (pp->is_stopped) {
spin_unlock(&pp->lock);
+ netdev_unlock(port->napi.dev);
return 0;
}
netif_tx_stop_all_queues(pp->dev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 148144f5b61d..a1f9ec03c2ce 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -917,19 +917,18 @@ static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
/* The 'qsize' entry dumps current Aura/Pool context Qsize
* and each context's current enable/disable status in a bitmap.
*/
-static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
+static int rvu_dbg_qsize_display(struct seq_file *s, void *unsused,
int blktype)
{
- void (*print_qsize)(struct seq_file *filp,
+ void (*print_qsize)(struct seq_file *s,
struct rvu_pfvf *pfvf) = NULL;
- struct dentry *current_dir;
struct rvu_pfvf *pfvf;
struct rvu *rvu;
int qsize_id;
u16 pcifunc;
int blkaddr;
- rvu = filp->private;
+ rvu = s->private;
switch (blktype) {
case BLKTYPE_NPA:
qsize_id = rvu->rvu_dbg.npa_qsize_id;
@@ -945,32 +944,28 @@ static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
return -EINVAL;
}
- if (blktype == BLKTYPE_NPA) {
+ if (blktype == BLKTYPE_NPA)
blkaddr = BLKADDR_NPA;
- } else {
- current_dir = filp->file->f_path.dentry->d_parent;
- blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
- BLKADDR_NIX1 : BLKADDR_NIX0);
- }
+ else
+ blkaddr = debugfs_get_aux_num(s->file);
if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
- print_qsize(filp, pfvf);
+ print_qsize(s, pfvf);
return 0;
}
-static ssize_t rvu_dbg_qsize_write(struct file *filp,
+static ssize_t rvu_dbg_qsize_write(struct file *file,
const char __user *buffer, size_t count,
loff_t *ppos, int blktype)
{
char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
- struct seq_file *seqfile = filp->private_data;
+ struct seq_file *seqfile = file->private_data;
char *cmd_buf, *cmd_buf_tmp, *subtoken;
struct rvu *rvu = seqfile->private;
- struct dentry *current_dir;
int blkaddr;
u16 pcifunc;
int ret, lf;
@@ -996,13 +991,10 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
goto qsize_write_done;
}
- if (blktype == BLKTYPE_NPA) {
+ if (blktype == BLKTYPE_NPA)
blkaddr = BLKADDR_NPA;
- } else {
- current_dir = filp->f_path.dentry->d_parent;
- blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
- BLKADDR_NIX1 : BLKADDR_NIX0);
- }
+ else
+ blkaddr = debugfs_get_aux_num(file);
if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
ret = -EINVAL;
@@ -2704,8 +2696,8 @@ static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
&rvu_dbg_nix_ndc_tx_hits_miss_fops);
debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
&rvu_dbg_nix_ndc_rx_hits_miss_fops);
- debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_qsize_fops);
+ debugfs_create_file_aux_num("qsize", 0600, rvu->rvu_dbg.nix, rvu,
+ blkaddr, &rvu_dbg_nix_qsize_fops);
debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
&rvu_dbg_nix_band_prof_ctx_fops);
debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
@@ -2854,28 +2846,14 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
return err;
}
-static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
+static int rvu_dbg_derive_lmacid(struct seq_file *s)
{
- struct dentry *current_dir;
- char *buf;
-
- current_dir = filp->file->f_path.dentry->d_parent;
- buf = strrchr(current_dir->d_name.name, 'c');
- if (!buf)
- return -EINVAL;
-
- return kstrtoint(buf + 1, 10, lmac_id);
+ return debugfs_get_aux_num(s->file);
}
-static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+static int rvu_dbg_cgx_stat_display(struct seq_file *s, void *unused)
{
- int lmac_id, err;
-
- err = rvu_dbg_derive_lmacid(filp, &lmac_id);
- if (!err)
- return cgx_print_stats(filp, lmac_id);
-
- return err;
+ return cgx_print_stats(s, rvu_dbg_derive_lmacid(s));
}
RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
@@ -2933,15 +2911,9 @@ static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
return 0;
}
-static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
+static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *s, void *unused)
{
- int err, lmac_id;
-
- err = rvu_dbg_derive_lmacid(filp, &lmac_id);
- if (!err)
- return cgx_print_dmac_flt(filp, lmac_id);
-
- return err;
+ return cgx_print_dmac_flt(s, rvu_dbg_derive_lmacid(s));
}
RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
@@ -2980,10 +2952,10 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
rvu->rvu_dbg.lmac =
debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
- debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
- cgx, &rvu_dbg_cgx_stat_fops);
- debugfs_create_file("mac_filter", 0600,
- rvu->rvu_dbg.lmac, cgx,
+ debugfs_create_file_aux_num("stats", 0600, rvu->rvu_dbg.lmac,
+ cgx, lmac_id, &rvu_dbg_cgx_stat_fops);
+ debugfs_create_file_aux_num("mac_filter", 0600,
+ rvu->rvu_dbg.lmac, cgx, lmac_id,
&rvu_dbg_cgx_dmac_flt_fops);
}
}
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 25bf6ec44289..a1bada9eaaf6 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3742,10 +3742,7 @@ static int skge_device_event(struct notifier_block *unused,
skge = netdev_priv(dev);
switch (event) {
case NETDEV_CHANGENAME:
- if (skge->debugfs)
- skge->debugfs = debugfs_rename(skge_debug,
- skge->debugfs,
- skge_debug, dev->name);
+ debugfs_change_name(skge->debugfs, "%s", dev->name);
break;
case NETDEV_GOING_DOWN:
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 988fa28cfb5f..d7121c836508 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4494,10 +4494,7 @@ static int sky2_device_event(struct notifier_block *unused,
switch (event) {
case NETDEV_CHANGENAME:
- if (sky2->debugfs) {
- sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs,
- sky2_debug, dev->name);
- }
+ debugfs_change_name(sky2->debugfs, "%s", dev->name);
break;
case NETDEV_GOING_DOWN:
diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c
index 415d784de741..09f448f29124 100644
--- a/drivers/net/ethernet/mediatek/airoha_eth.c
+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
@@ -266,11 +266,11 @@
#define REG_GDM3_FWD_CFG GDM3_BASE
#define GDM3_PAD_EN_MASK BIT(28)
-#define REG_GDM4_FWD_CFG (GDM4_BASE + 0x100)
+#define REG_GDM4_FWD_CFG GDM4_BASE
#define GDM4_PAD_EN_MASK BIT(28)
#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8)
-#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x33c)
+#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x23c)
#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16)
#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12)
#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index bd41b75d246e..a814b63ed97e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2087,7 +2087,7 @@ static struct mlx5e_xdpsq *mlx5e_open_xdpredirect_sq(struct mlx5e_channel *c,
struct mlx5e_xdpsq *xdpsq;
int err;
- xdpsq = kvzalloc_node(sizeof(*xdpsq), GFP_KERNEL, c->cpu);
+ xdpsq = kvzalloc_node(sizeof(*xdpsq), GFP_KERNEL, cpu_to_node(c->cpu));
if (!xdpsq)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 720f577929db..499e5e39d513 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1120,20 +1120,6 @@ static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
}
}
-static void nv_napi_enable(struct net_device *dev)
-{
- struct fe_priv *np = get_nvpriv(dev);
-
- napi_enable(&np->napi);
-}
-
-static void nv_napi_disable(struct net_device *dev)
-{
- struct fe_priv *np = get_nvpriv(dev);
-
- napi_disable(&np->napi);
-}
-
#define MII_READ (-1)
/* mii_rw: read/write a register on the PHY.
*
@@ -3114,7 +3100,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
* Changing the MTU is a rare event, it shouldn't matter.
*/
nv_disable_irq(dev);
- nv_napi_disable(dev);
+ napi_disable(&np->napi);
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
spin_lock(&np->lock);
@@ -3143,7 +3129,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
spin_unlock(&np->lock);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
- nv_napi_enable(dev);
+ napi_enable(&np->napi);
nv_enable_irq(dev);
}
return 0;
@@ -4731,7 +4717,7 @@ static int nv_set_ringparam(struct net_device *dev,
if (netif_running(dev)) {
nv_disable_irq(dev);
- nv_napi_disable(dev);
+ napi_disable(&np->napi);
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
spin_lock(&np->lock);
@@ -4784,7 +4770,7 @@ static int nv_set_ringparam(struct net_device *dev,
spin_unlock(&np->lock);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
- nv_napi_enable(dev);
+ napi_enable(&np->napi);
nv_enable_irq(dev);
}
return 0;
@@ -5277,7 +5263,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
if (test->flags & ETH_TEST_FL_OFFLINE) {
if (netif_running(dev)) {
netif_stop_queue(dev);
- nv_napi_disable(dev);
+ napi_disable(&np->napi);
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
spin_lock_irq(&np->lock);
@@ -5334,7 +5320,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
/* restart rx engine */
nv_start_rxtx(dev);
netif_start_queue(dev);
- nv_napi_enable(dev);
+ napi_enable(&np->napi);
nv_enable_hw_interrupts(dev, np->irqmask);
}
}
@@ -5576,6 +5562,7 @@ static int nv_open(struct net_device *dev)
/* ask for interrupts */
nv_enable_hw_interrupts(dev, np->irqmask);
+ netdev_lock(dev);
spin_lock_irq(&np->lock);
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
writel(0, base + NvRegMulticastAddrB);
@@ -5594,7 +5581,7 @@ static int nv_open(struct net_device *dev)
ret = nv_update_linkspeed(dev);
nv_start_rxtx(dev);
netif_start_queue(dev);
- nv_napi_enable(dev);
+ napi_enable_locked(&np->napi);
if (ret) {
netif_carrier_on(dev);
@@ -5611,6 +5598,7 @@ static int nv_open(struct net_device *dev)
round_jiffies(jiffies + STATS_INTERVAL));
spin_unlock_irq(&np->lock);
+ netdev_unlock(dev);
/* If the loopback feature was set while the device was down, make sure
* that it's set correctly now.
@@ -5632,7 +5620,7 @@ static int nv_close(struct net_device *dev)
spin_lock_irq(&np->lock);
np->in_shutdown = 1;
spin_unlock_irq(&np->lock);
- nv_napi_disable(dev);
+ napi_disable(&np->napi);
synchronize_irq(np->pci_dev->irq);
del_timer_sync(&np->oom_kick);
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 9ce0e8a64ba8..a73dcaffa8c5 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1684,6 +1684,7 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
if (tmp8 & CmdTxEnb)
RTL_W8 (ChipCmd, CmdRxEnb);
+ netdev_lock(dev);
spin_lock_bh(&tp->rx_lock);
/* Disable interrupts by clearing the interrupt mask. */
RTL_W16 (IntrMask, 0x0000);
@@ -1694,11 +1695,12 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
spin_unlock_irq(&tp->lock);
/* ...and finally, reset everything */
- napi_enable(&tp->napi);
+ napi_enable_locked(&tp->napi);
rtl8139_hw_start(dev);
netif_wake_queue(dev);
spin_unlock_bh(&tp->rx_lock);
+ netdev_unlock(dev);
}
static void rtl8139_tx_timeout(struct net_device *dev, unsigned int txqueue)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index bc395294a32d..c9f4976a3527 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -3217,10 +3217,15 @@ static int ravb_suspend(struct device *dev)
netif_device_detach(ndev);
- if (priv->wol_enabled)
- return ravb_wol_setup(ndev);
+ rtnl_lock();
+ if (priv->wol_enabled) {
+ ret = ravb_wol_setup(ndev);
+ rtnl_unlock();
+ return ret;
+ }
ret = ravb_close(ndev);
+ rtnl_unlock();
if (ret)
return ret;
@@ -3245,19 +3250,20 @@ static int ravb_resume(struct device *dev)
if (!netif_running(ndev))
return 0;
+ rtnl_lock();
/* If WoL is enabled restore the interface. */
- if (priv->wol_enabled) {
+ if (priv->wol_enabled)
ret = ravb_wol_restore(ndev);
- if (ret)
- return ret;
- } else {
+ else
ret = pm_runtime_force_resume(dev);
- if (ret)
- return ret;
+ if (ret) {
+ rtnl_unlock();
+ return ret;
}
/* Reopening the interface will restore the device to the working state. */
ret = ravb_open(ndev);
+ rtnl_unlock();
if (ret < 0)
goto out_rpm_put;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 8887b8921009..5fc8027c92c7 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3494,10 +3494,12 @@ static int sh_eth_suspend(struct device *dev)
netif_device_detach(ndev);
+ rtnl_lock();
if (mdp->wol_enabled)
ret = sh_eth_wol_setup(ndev);
else
ret = sh_eth_close(ndev);
+ rtnl_unlock();
return ret;
}
@@ -3511,10 +3513,12 @@ static int sh_eth_resume(struct device *dev)
if (!netif_running(ndev))
return 0;
+ rtnl_lock();
if (mdp->wol_enabled)
ret = sh_eth_wol_restore(ndev);
else
ret = sh_eth_open(ndev);
+ rtnl_unlock();
if (ret < 0)
return ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index edbf8994455d..b34ebb916b89 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -6535,11 +6535,7 @@ static int stmmac_device_event(struct notifier_block *unused,
switch (event) {
case NETDEV_CHANGENAME:
- if (priv->dbgfs_dir)
- priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
- priv->dbgfs_dir,
- stmmac_fs_dir,
- dev->name);
+ debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
break;
}
done:
@@ -7221,6 +7217,36 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
if (priv->dma_cap.tsoen)
dev_info(priv->device, "TSO supported\n");
+ if (priv->dma_cap.number_rx_queues &&
+ priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
+ dev_warn(priv->device,
+ "Number of Rx queues (%u) exceeds dma capability\n",
+ priv->plat->rx_queues_to_use);
+ priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
+ }
+ if (priv->dma_cap.number_tx_queues &&
+ priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
+ dev_warn(priv->device,
+ "Number of Tx queues (%u) exceeds dma capability\n",
+ priv->plat->tx_queues_to_use);
+ priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
+ }
+
+ if (priv->dma_cap.rx_fifo_size &&
+ priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
+ dev_warn(priv->device,
+ "Rx FIFO size (%u) exceeds dma capability\n",
+ priv->plat->rx_fifo_size);
+ priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
+ }
+ if (priv->dma_cap.tx_fifo_size &&
+ priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
+ dev_warn(priv->device,
+ "Tx FIFO size (%u) exceeds dma capability\n",
+ priv->plat->tx_fifo_size);
+ priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
+ }
+
priv->hw->vlan_fail_q_en =
(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index d7459866d24c..72177fea1cfb 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6086,7 +6086,7 @@ static void niu_enable_napi(struct niu *np)
int i;
for (i = 0; i < np->num_ldg; i++)
- napi_enable(&np->ldg[i].napi);
+ napi_enable_locked(&np->ldg[i].napi);
}
static void niu_disable_napi(struct niu *np)
@@ -6116,7 +6116,9 @@ static int niu_open(struct net_device *dev)
if (err)
goto out_free_channels;
+ netdev_lock(dev);
niu_enable_napi(np);
+ netdev_unlock(dev);
spin_lock_irq(&np->lock);
@@ -6521,6 +6523,7 @@ static void niu_reset_task(struct work_struct *work)
niu_reset_buffers(np);
+ netdev_lock(np->dev);
spin_lock_irqsave(&np->lock, flags);
err = niu_init_hw(np);
@@ -6531,6 +6534,7 @@ static void niu_reset_task(struct work_struct *work)
}
spin_unlock_irqrestore(&np->lock, flags);
+ netdev_unlock(np->dev);
}
static void niu_tx_timeout(struct net_device *dev, unsigned int txqueue)
@@ -6761,7 +6765,9 @@ static int niu_change_mtu(struct net_device *dev, int new_mtu)
niu_free_channels(np);
+ netdev_lock(dev);
niu_enable_napi(np);
+ netdev_unlock(dev);
err = niu_alloc_channels(np);
if (err)
@@ -9937,6 +9943,7 @@ static int __maybe_unused niu_resume(struct device *dev_d)
spin_lock_irqsave(&np->lock, flags);
+ netdev_lock(dev);
err = niu_init_hw(np);
if (!err) {
np->timer.expires = jiffies + HZ;
@@ -9945,6 +9952,7 @@ static int __maybe_unused niu_resume(struct device *dev_d)
}
spin_unlock_irqrestore(&np->lock, flags);
+ netdev_unlock(dev);
return err;
}
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 894911f3d560..e56ebbdd428d 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1568,7 +1568,7 @@ static void init_registers(struct net_device *dev)
if (rp->quirks & rqMgmt)
rhine_init_cam_filter(dev);
- napi_enable(&rp->napi);
+ napi_enable_locked(&rp->napi);
iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
@@ -1696,7 +1696,10 @@ static int rhine_open(struct net_device *dev)
rhine_power_init(dev);
rhine_chip_reset(dev);
rhine_task_enable(rp);
+
+ netdev_lock(dev);
init_registers(dev);
+ netdev_unlock(dev);
netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
__func__, ioread16(ioaddr + ChipCmd),
@@ -1727,6 +1730,8 @@ static void rhine_reset_task(struct work_struct *work)
napi_disable(&rp->napi);
netif_tx_disable(dev);
+
+ netdev_lock(dev);
spin_lock_bh(&rp->lock);
/* clear all descriptors */
@@ -1740,6 +1745,7 @@ static void rhine_reset_task(struct work_struct *work)
init_registers(dev);
spin_unlock_bh(&rp->lock);
+ netdev_unlock(dev);
netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
@@ -2541,9 +2547,12 @@ static int rhine_resume(struct device *device)
alloc_tbufs(dev);
rhine_reset_rbufs(rp);
rhine_task_enable(rp);
+
+ netdev_lock(dev);
spin_lock_bh(&rp->lock);
init_registers(dev);
spin_unlock_bh(&rp->lock);
+ netdev_unlock(dev);
netif_device_attach(dev);
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
index 3b23f3d3ca2b..5c80fbee7913 100644
--- a/drivers/net/netdevsim/ethtool.c
+++ b/drivers/net/netdevsim/ethtool.c
@@ -74,7 +74,7 @@ static void nsim_get_ringparam(struct net_device *dev,
memcpy(ring, &ns->ethtool.ring, sizeof(ns->ethtool.ring));
kernel_ring->hds_thresh_max = NSIM_HDS_THRESHOLD_MAX;
- if (kernel_ring->tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
+ if (dev->cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
kernel_ring->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
}
diff --git a/drivers/net/netdevsim/hwstats.c b/drivers/net/netdevsim/hwstats.c
index 0e58aa7f0374..66b3215db3ac 100644
--- a/drivers/net/netdevsim/hwstats.c
+++ b/drivers/net/netdevsim/hwstats.c
@@ -331,7 +331,6 @@ enum nsim_dev_hwstats_do {
};
struct nsim_dev_hwstats_fops {
- const struct file_operations fops;
enum nsim_dev_hwstats_do action;
enum netdev_offload_xstats_type type;
};
@@ -342,13 +341,12 @@ nsim_dev_hwstats_do_write(struct file *file,
size_t count, loff_t *ppos)
{
struct nsim_dev_hwstats *hwstats = file->private_data;
- struct nsim_dev_hwstats_fops *hwsfops;
+ const struct nsim_dev_hwstats_fops *hwsfops;
struct list_head *hwsdev_list;
int ifindex;
int err;
- hwsfops = container_of(debugfs_real_fops(file),
- struct nsim_dev_hwstats_fops, fops);
+ hwsfops = debugfs_get_aux(file);
err = kstrtoint_from_user(data, count, 0, &ifindex);
if (err)
@@ -381,14 +379,13 @@ nsim_dev_hwstats_do_write(struct file *file,
return count;
}
+static struct debugfs_short_fops debugfs_ops = {
+ .write = nsim_dev_hwstats_do_write,
+ .llseek = generic_file_llseek,
+};
+
#define NSIM_DEV_HWSTATS_FOPS(ACTION, TYPE) \
{ \
- .fops = { \
- .open = simple_open, \
- .write = nsim_dev_hwstats_do_write, \
- .llseek = generic_file_llseek, \
- .owner = THIS_MODULE, \
- }, \
.action = ACTION, \
.type = TYPE, \
}
@@ -433,12 +430,12 @@ int nsim_dev_hwstats_init(struct nsim_dev *nsim_dev)
goto err_remove_hwstats_recursive;
}
- debugfs_create_file("enable_ifindex", 0200, hwstats->l3_ddir, hwstats,
- &nsim_dev_hwstats_l3_enable_fops.fops);
- debugfs_create_file("disable_ifindex", 0200, hwstats->l3_ddir, hwstats,
- &nsim_dev_hwstats_l3_disable_fops.fops);
- debugfs_create_file("fail_next_enable", 0200, hwstats->l3_ddir, hwstats,
- &nsim_dev_hwstats_l3_fail_fops.fops);
+ debugfs_create_file_aux("enable_ifindex", 0200, hwstats->l3_ddir, hwstats,
+ &nsim_dev_hwstats_l3_enable_fops, &debugfs_ops);
+ debugfs_create_file_aux("disable_ifindex", 0200, hwstats->l3_ddir, hwstats,
+ &nsim_dev_hwstats_l3_disable_fops, &debugfs_ops);
+ debugfs_create_file_aux("fail_next_enable", 0200, hwstats->l3_ddir, hwstats,
+ &nsim_dev_hwstats_l3_fail_fops, &debugfs_ops);
INIT_DELAYED_WORK(&hwstats->traffic_dw,
&nsim_dev_hwstats_traffic_work);
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index dcf073bc4802..96d54c08043d 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -134,6 +134,7 @@ struct netdevsim {
u32 sleep;
u32 __ports[2][NSIM_UDP_TUNNEL_N_PORTS];
u32 (*ports)[NSIM_UDP_TUNNEL_N_PORTS];
+ struct dentry *ddir;
struct debugfs_u32_array dfs_ports[2];
} udp_ports;
diff --git a/drivers/net/netdevsim/udp_tunnels.c b/drivers/net/netdevsim/udp_tunnels.c
index 02dc3123eb6c..640b4983a9a0 100644
--- a/drivers/net/netdevsim/udp_tunnels.c
+++ b/drivers/net/netdevsim/udp_tunnels.c
@@ -112,9 +112,11 @@ nsim_udp_tunnels_info_reset_write(struct file *file, const char __user *data,
struct net_device *dev = file->private_data;
struct netdevsim *ns = netdev_priv(dev);
- memset(ns->udp_ports.ports, 0, sizeof(ns->udp_ports.__ports));
rtnl_lock();
- udp_tunnel_nic_reset_ntf(dev);
+ if (dev->reg_state == NETREG_REGISTERED) {
+ memset(ns->udp_ports.ports, 0, sizeof(ns->udp_ports.__ports));
+ udp_tunnel_nic_reset_ntf(dev);
+ }
rtnl_unlock();
return count;
@@ -144,23 +146,23 @@ int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
else
ns->udp_ports.ports = nsim_dev->udp_ports.__ports;
- debugfs_create_u32("udp_ports_inject_error", 0600,
- ns->nsim_dev_port->ddir,
+ ns->udp_ports.ddir = debugfs_create_dir("udp_ports",
+ ns->nsim_dev_port->ddir);
+
+ debugfs_create_u32("inject_error", 0600, ns->udp_ports.ddir,
&ns->udp_ports.inject_error);
ns->udp_ports.dfs_ports[0].array = ns->udp_ports.ports[0];
ns->udp_ports.dfs_ports[0].n_elements = NSIM_UDP_TUNNEL_N_PORTS;
- debugfs_create_u32_array("udp_ports_table0", 0400,
- ns->nsim_dev_port->ddir,
+ debugfs_create_u32_array("table0", 0400, ns->udp_ports.ddir,
&ns->udp_ports.dfs_ports[0]);
ns->udp_ports.dfs_ports[1].array = ns->udp_ports.ports[1];
ns->udp_ports.dfs_ports[1].n_elements = NSIM_UDP_TUNNEL_N_PORTS;
- debugfs_create_u32_array("udp_ports_table1", 0400,
- ns->nsim_dev_port->ddir,
+ debugfs_create_u32_array("table1", 0400, ns->udp_ports.ddir,
&ns->udp_ports.dfs_ports[1]);
- debugfs_create_file("udp_ports_reset", 0200, ns->nsim_dev_port->ddir,
+ debugfs_create_file("reset", 0200, ns->udp_ports.ddir,
dev, &nsim_udp_tunnels_info_reset_fops);
/* Note: it's not normal to allocate the info struct like this!
@@ -196,6 +198,9 @@ int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
void nsim_udp_tunnels_info_destroy(struct net_device *dev)
{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ debugfs_remove_recursive(ns->udp_ports.ddir);
kfree(dev->udp_tunnel_nic_info);
dev->udp_tunnel_nic_info = NULL;
}
diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c
index 4494b3e39ce2..a3996471a1c9 100644
--- a/drivers/net/phy/marvell-88q2xxx.c
+++ b/drivers/net/phy/marvell-88q2xxx.c
@@ -95,6 +95,10 @@
#define MDIO_MMD_PCS_MV_TDR_OFF_CUTOFF 65246
+struct mv88q2xxx_priv {
+ bool enable_temp;
+};
+
struct mmd_val {
int devad;
u32 regnum;
@@ -710,17 +714,12 @@ static const struct hwmon_chip_info mv88q2xxx_hwmon_chip_info = {
static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
{
+ struct mv88q2xxx_priv *priv = phydev->priv;
struct device *dev = &phydev->mdio.dev;
struct device *hwmon;
char *hwmon_name;
- int ret;
-
- /* Enable temperature sense */
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TEMP_SENSOR2,
- MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK, 0);
- if (ret < 0)
- return ret;
+ priv->enable_temp = true;
hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev));
if (IS_ERR(hwmon_name))
return PTR_ERR(hwmon_name);
@@ -743,6 +742,14 @@ static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
static int mv88q2xxx_probe(struct phy_device *phydev)
{
+ struct mv88q2xxx_priv *priv;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
return mv88q2xxx_hwmon_probe(phydev);
}
@@ -810,6 +817,18 @@ static int mv88q222x_revb1_revb2_config_init(struct phy_device *phydev)
static int mv88q222x_config_init(struct phy_device *phydev)
{
+ struct mv88q2xxx_priv *priv = phydev->priv;
+ int ret;
+
+ /* Enable temperature sense */
+ if (priv->enable_temp) {
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR2,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK, 0);
+ if (ret < 0)
+ return ret;
+ }
+
if (phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] == PHY_ID_88Q2220_REVB0)
return mv88q222x_revb0_config_init(phydev);
else
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index 323717a4821f..34231b5b9175 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -1297,6 +1297,8 @@ static int nxp_c45_soft_reset(struct phy_device *phydev)
if (ret)
return ret;
+ usleep_range(2000, 2050);
+
return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
VEND1_DEVICE_CONTROL, ret,
!(ret & DEVICE_CONTROL_RESET), 20000,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 28624cca91f8..acf96f262488 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -574,18 +574,14 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
return ret;
}
-static inline bool tun_capable(struct tun_struct *tun)
+static inline bool tun_not_capable(struct tun_struct *tun)
{
const struct cred *cred = current_cred();
struct net *net = dev_net(tun->dev);
- if (ns_capable(net->user_ns, CAP_NET_ADMIN))
- return 1;
- if (uid_valid(tun->owner) && uid_eq(cred->euid, tun->owner))
- return 1;
- if (gid_valid(tun->group) && in_egroup_p(tun->group))
- return 1;
- return 0;
+ return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
+ (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
+ !ns_capable(net->user_ns, CAP_NET_ADMIN);
}
static void tun_set_real_num_queues(struct tun_struct *tun)
@@ -2782,7 +2778,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
!!(tun->flags & IFF_MULTI_QUEUE))
return -EINVAL;
- if (!tun_capable(tun))
+ if (tun_not_capable(tun))
return -EPERM;
err = security_tun_dev_open(tun->security);
if (err < 0)
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 46afb95ffabe..a19789b57190 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -61,7 +61,18 @@
#define IPHETH_USBINTF_PROTO 1
#define IPHETH_IP_ALIGN 2 /* padding at front of URB */
-#define IPHETH_NCM_HEADER_SIZE (12 + 96) /* NCMH + NCM0 */
+/* On iOS devices, NCM headers in RX have a fixed size regardless of DPE count:
+ * - NTH16 (NCMH): 12 bytes, as per CDC NCM 1.0 spec
+ * - NDP16 (NCM0): 96 bytes, of which
+ * - NDP16 fixed header: 8 bytes
+ * - maximum of 22 DPEs (21 datagrams + trailer), 4 bytes each
+ */
+#define IPHETH_NDP16_MAX_DPE 22
+#define IPHETH_NDP16_HEADER_SIZE (sizeof(struct usb_cdc_ncm_ndp16) + \
+ IPHETH_NDP16_MAX_DPE * \
+ sizeof(struct usb_cdc_ncm_dpe16))
+#define IPHETH_NCM_HEADER_SIZE (sizeof(struct usb_cdc_ncm_nth16) + \
+ IPHETH_NDP16_HEADER_SIZE)
#define IPHETH_TX_BUF_SIZE ETH_FRAME_LEN
#define IPHETH_RX_BUF_SIZE_LEGACY (IPHETH_IP_ALIGN + ETH_FRAME_LEN)
#define IPHETH_RX_BUF_SIZE_NCM 65536
@@ -207,15 +218,23 @@ static int ipheth_rcvbulk_callback_legacy(struct urb *urb)
return ipheth_consume_skb(buf, len, dev);
}
+/* In "NCM mode", the iOS device encapsulates RX (phone->computer) traffic
+ * in NCM Transfer Blocks (similarly to CDC NCM). However, unlike reverse
+ * tethering (handled by the `cdc_ncm` driver), regular tethering is not
+ * compliant with the CDC NCM spec, as the device is missing the necessary
+ * descriptors, and TX (computer->phone) traffic is not encapsulated
+ * at all. Thus `ipheth` implements a very limited subset of the spec with
+ * the sole purpose of parsing RX URBs.
+ */
static int ipheth_rcvbulk_callback_ncm(struct urb *urb)
{
struct usb_cdc_ncm_nth16 *ncmh;
struct usb_cdc_ncm_ndp16 *ncm0;
struct usb_cdc_ncm_dpe16 *dpe;
struct ipheth_device *dev;
+ u16 dg_idx, dg_len;
int retval = -EINVAL;
char *buf;
- int len;
dev = urb->context;
@@ -226,40 +245,42 @@ static int ipheth_rcvbulk_callback_ncm(struct urb *urb)
ncmh = urb->transfer_buffer;
if (ncmh->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN) ||
- le16_to_cpu(ncmh->wNdpIndex) >= urb->actual_length) {
- dev->net->stats.rx_errors++;
- return retval;
- }
+ /* On iOS, NDP16 directly follows NTH16 */
+ ncmh->wNdpIndex != cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16)))
+ goto rx_error;
- ncm0 = urb->transfer_buffer + le16_to_cpu(ncmh->wNdpIndex);
- if (ncm0->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN) ||
- le16_to_cpu(ncmh->wHeaderLength) + le16_to_cpu(ncm0->wLength) >=
- urb->actual_length) {
- dev->net->stats.rx_errors++;
- return retval;
- }
+ ncm0 = urb->transfer_buffer + sizeof(struct usb_cdc_ncm_nth16);
+ if (ncm0->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN))
+ goto rx_error;
dpe = ncm0->dpe16;
- while (le16_to_cpu(dpe->wDatagramIndex) != 0 &&
- le16_to_cpu(dpe->wDatagramLength) != 0) {
- if (le16_to_cpu(dpe->wDatagramIndex) >= urb->actual_length ||
- le16_to_cpu(dpe->wDatagramIndex) +
- le16_to_cpu(dpe->wDatagramLength) > urb->actual_length) {
+ for (int dpe_i = 0; dpe_i < IPHETH_NDP16_MAX_DPE; ++dpe_i, ++dpe) {
+ dg_idx = le16_to_cpu(dpe->wDatagramIndex);
+ dg_len = le16_to_cpu(dpe->wDatagramLength);
+
+ /* Null DPE must be present after last datagram pointer entry
+ * (3.3.1 USB CDC NCM spec v1.0)
+ */
+ if (dg_idx == 0 && dg_len == 0)
+ return 0;
+
+ if (dg_idx < IPHETH_NCM_HEADER_SIZE ||
+ dg_idx >= urb->actual_length ||
+ dg_len > urb->actual_length - dg_idx) {
dev->net->stats.rx_length_errors++;
return retval;
}
- buf = urb->transfer_buffer + le16_to_cpu(dpe->wDatagramIndex);
- len = le16_to_cpu(dpe->wDatagramLength);
+ buf = urb->transfer_buffer + dg_idx;
- retval = ipheth_consume_skb(buf, len, dev);
+ retval = ipheth_consume_skb(buf, dg_len, dev);
if (retval != 0)
return retval;
-
- dpe++;
}
- return 0;
+rx_error:
+ dev->net->stats.rx_errors++;
+ return retval;
}
static void ipheth_rcvbulk_callback(struct urb *urb)
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 01a3b2417a54..ddff6f19ff98 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -71,6 +71,14 @@
#define MSR_SPEED (1<<3)
#define MSR_LINK (1<<2)
+/* USB endpoints */
+enum rtl8150_usb_ep {
+ RTL8150_USB_EP_CONTROL = 0,
+ RTL8150_USB_EP_BULK_IN = 1,
+ RTL8150_USB_EP_BULK_OUT = 2,
+ RTL8150_USB_EP_INT_IN = 3,
+};
+
/* Interrupt pipe data */
#define INT_TSR 0x00
#define INT_RSR 0x01
@@ -867,6 +875,13 @@ static int rtl8150_probe(struct usb_interface *intf,
struct usb_device *udev = interface_to_usbdev(intf);
rtl8150_t *dev;
struct net_device *netdev;
+ static const u8 bulk_ep_addr[] = {
+ RTL8150_USB_EP_BULK_IN | USB_DIR_IN,
+ RTL8150_USB_EP_BULK_OUT | USB_DIR_OUT,
+ 0};
+ static const u8 int_ep_addr[] = {
+ RTL8150_USB_EP_INT_IN | USB_DIR_IN,
+ 0};
netdev = alloc_etherdev(sizeof(rtl8150_t));
if (!netdev)
@@ -880,6 +895,13 @@ static int rtl8150_probe(struct usb_interface *intf,
return -ENOMEM;
}
+ /* Verify that all required endpoints are present */
+ if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) ||
+ !usb_check_int_endpoints(intf, int_ep_addr)) {
+ dev_err(&intf->dev, "couldn't find required endpoints\n");
+ goto out;
+ }
+
tasklet_setup(&dev->tl, rx_fixup);
spin_lock_init(&dev->rx_pool_lock);
diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.c b/drivers/net/vmxnet3/vmxnet3_xdp.c
index 1341374a4588..616ecc38d172 100644
--- a/drivers/net/vmxnet3/vmxnet3_xdp.c
+++ b/drivers/net/vmxnet3/vmxnet3_xdp.c
@@ -28,7 +28,7 @@ vmxnet3_xdp_get_tq(struct vmxnet3_adapter *adapter)
if (likely(cpu < tq_number))
tq = &adapter->tx_queue[cpu];
else
- tq = &adapter->tx_queue[reciprocal_scale(cpu, tq_number)];
+ tq = &adapter->tx_queue[cpu % tq_number];
return tq;
}
@@ -124,6 +124,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
u32 buf_size;
u32 dw2;
+ spin_lock_irq(&tq->tx_lock);
dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
dw2 |= xdpf->len;
ctx.sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
@@ -134,6 +135,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
if (vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) == 0) {
tq->stats.tx_ring_full++;
+ spin_unlock_irq(&tq->tx_lock);
return -ENOSPC;
}
@@ -142,8 +144,10 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
xdpf->data, buf_size,
DMA_TO_DEVICE);
- if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
+ if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) {
+ spin_unlock_irq(&tq->tx_lock);
return -EFAULT;
+ }
tbi->map_type |= VMXNET3_MAP_SINGLE;
} else { /* XDP buffer from page pool */
page = virt_to_page(xdpf->data);
@@ -182,6 +186,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
dma_wmb();
gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
VMXNET3_TXD_GEN);
+ spin_unlock_irq(&tq->tx_lock);
/* No need to handle the case when tx_num_deferred doesn't reach
* threshold. Backend driver at hypervisor side will poll and reset
@@ -225,6 +230,7 @@ vmxnet3_xdp_xmit(struct net_device *dev,
{
struct vmxnet3_adapter *adapter = netdev_priv(dev);
struct vmxnet3_tx_queue *tq;
+ struct netdev_queue *nq;
int i;
if (unlikely(test_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)))
@@ -236,6 +242,9 @@ vmxnet3_xdp_xmit(struct net_device *dev,
if (tq->stopped)
return -ENETDOWN;
+ nq = netdev_get_tx_queue(adapter->netdev, tq->qid);
+
+ __netif_tx_lock(nq, smp_processor_id());
for (i = 0; i < n; i++) {
if (vmxnet3_xdp_xmit_frame(adapter, frames[i], tq, true)) {
tq->stats.xdp_xmit_err++;
@@ -243,6 +252,7 @@ vmxnet3_xdp_xmit(struct net_device *dev,
}
}
tq->stats.xdp_xmit += i;
+ __netif_tx_unlock(nq);
return i;
}
diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c
index d2023e7131bd..6e6e9f05509a 100644
--- a/drivers/net/vxlan/vxlan_vnifilter.c
+++ b/drivers/net/vxlan/vxlan_vnifilter.c
@@ -411,6 +411,11 @@ static int vxlan_vnifilter_dump(struct sk_buff *skb, struct netlink_callback *cb
struct tunnel_msg *tmsg;
struct net_device *dev;
+ if (cb->nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct tunnel_msg))) {
+ NL_SET_ERR_MSG(cb->extack, "Invalid msg length");
+ return -EINVAL;
+ }
+
tmsg = nlmsg_data(cb->nlh);
if (tmsg->flags & ~TUNNEL_MSG_VALID_USER_FLAGS) {
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index 57281a135dd7..bf192529e3fe 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -178,7 +178,7 @@ static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
* received 'update stats' event, we keep a 3 seconds timeout in case,
* fw_stats_done is not marked yet
*/
- timeout = jiffies + msecs_to_jiffies(3 * 1000);
+ timeout = jiffies + secs_to_jiffies(3);
ath11k_debugfs_fw_stats_reset(ar);
diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c
index bb40889d7c72..2d734567000a 100644
--- a/drivers/net/wireless/ath/carl9170/debug.c
+++ b/drivers/net/wireless/ath/carl9170/debug.c
@@ -54,7 +54,6 @@ struct carl9170_debugfs_fops {
char *(*read)(struct ar9170 *ar, char *buf, size_t bufsize,
ssize_t *len);
ssize_t (*write)(struct ar9170 *aru, const char *buf, size_t size);
- const struct file_operations fops;
enum carl9170_device_state req_dev_state;
};
@@ -62,7 +61,7 @@ struct carl9170_debugfs_fops {
static ssize_t carl9170_debugfs_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
- struct carl9170_debugfs_fops *dfops;
+ const struct carl9170_debugfs_fops *dfops;
struct ar9170 *ar;
char *buf = NULL, *res_buf = NULL;
ssize_t ret = 0;
@@ -75,8 +74,7 @@ static ssize_t carl9170_debugfs_read(struct file *file, char __user *userbuf,
if (!ar)
return -ENODEV;
- dfops = container_of(debugfs_real_fops(file),
- struct carl9170_debugfs_fops, fops);
+ dfops = debugfs_get_aux(file);
if (!dfops->read)
return -ENOSYS;
@@ -113,7 +111,7 @@ out_free:
static ssize_t carl9170_debugfs_write(struct file *file,
const char __user *userbuf, size_t count, loff_t *ppos)
{
- struct carl9170_debugfs_fops *dfops;
+ const struct carl9170_debugfs_fops *dfops;
struct ar9170 *ar;
char *buf = NULL;
int err = 0;
@@ -128,8 +126,7 @@ static ssize_t carl9170_debugfs_write(struct file *file,
if (!ar)
return -ENODEV;
- dfops = container_of(debugfs_real_fops(file),
- struct carl9170_debugfs_fops, fops);
+ dfops = debugfs_get_aux(file);
if (!dfops->write)
return -ENOSYS;
@@ -165,6 +162,11 @@ out_free:
return err;
}
+static struct debugfs_short_fops debugfs_fops = {
+ .read = carl9170_debugfs_read,
+ .write = carl9170_debugfs_write,
+};
+
#define __DEBUGFS_DECLARE_FILE(name, _read, _write, _read_bufsize, \
_attr, _dstate) \
static const struct carl9170_debugfs_fops carl_debugfs_##name ##_ops = {\
@@ -173,12 +175,6 @@ static const struct carl9170_debugfs_fops carl_debugfs_##name ##_ops = {\
.write = _write, \
.attr = _attr, \
.req_dev_state = _dstate, \
- .fops = { \
- .open = simple_open, \
- .read = carl9170_debugfs_read, \
- .write = carl9170_debugfs_write, \
- .owner = THIS_MODULE \
- }, \
}
#define DEBUGFS_DECLARE_FILE(name, _read, _write, _read_bufsize, _attr) \
@@ -816,9 +812,9 @@ void carl9170_debugfs_register(struct ar9170 *ar)
ar->hw->wiphy->debugfsdir);
#define DEBUGFS_ADD(name) \
- debugfs_create_file(#name, carl_debugfs_##name ##_ops.attr, \
- ar->debug_dir, ar, \
- &carl_debugfs_##name ## _ops.fops)
+ debugfs_create_file_aux(#name, carl_debugfs_##name ##_ops.attr, \
+ ar->debug_dir, ar, &carl_debugfs_##name ## _ops, \
+ &debugfs_fops)
DEBUGFS_ADD(usb_tx_anch_urbs);
DEBUGFS_ADD(usb_rx_pool_urbs);
diff --git a/drivers/net/wireless/broadcom/b43/debugfs.c b/drivers/net/wireless/broadcom/b43/debugfs.c
index efa98444e3fb..5a49970afc8c 100644
--- a/drivers/net/wireless/broadcom/b43/debugfs.c
+++ b/drivers/net/wireless/broadcom/b43/debugfs.c
@@ -30,7 +30,6 @@ static struct dentry *rootdir;
struct b43_debugfs_fops {
ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
- struct file_operations fops;
/* Offset of struct b43_dfs_file in struct b43_dfsentry */
size_t file_struct_offset;
};
@@ -491,7 +490,7 @@ static ssize_t b43_debugfs_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct b43_wldev *dev;
- struct b43_debugfs_fops *dfops;
+ const struct b43_debugfs_fops *dfops;
struct b43_dfs_file *dfile;
ssize_t ret;
char *buf;
@@ -511,8 +510,7 @@ static ssize_t b43_debugfs_read(struct file *file, char __user *userbuf,
goto out_unlock;
}
- dfops = container_of(debugfs_real_fops(file),
- struct b43_debugfs_fops, fops);
+ dfops = debugfs_get_aux(file);
if (!dfops->read) {
err = -ENOSYS;
goto out_unlock;
@@ -555,7 +553,7 @@ static ssize_t b43_debugfs_write(struct file *file,
size_t count, loff_t *ppos)
{
struct b43_wldev *dev;
- struct b43_debugfs_fops *dfops;
+ const struct b43_debugfs_fops *dfops;
char *buf;
int err = 0;
@@ -573,8 +571,7 @@ static ssize_t b43_debugfs_write(struct file *file,
goto out_unlock;
}
- dfops = container_of(debugfs_real_fops(file),
- struct b43_debugfs_fops, fops);
+ dfops = debugfs_get_aux(file);
if (!dfops->write) {
err = -ENOSYS;
goto out_unlock;
@@ -602,16 +599,16 @@ out_unlock:
}
+static struct debugfs_short_fops debugfs_ops = {
+ .read = b43_debugfs_read,
+ .write = b43_debugfs_write,
+ .llseek = generic_file_llseek,
+};
+
#define B43_DEBUGFS_FOPS(name, _read, _write) \
static struct b43_debugfs_fops fops_##name = { \
.read = _read, \
.write = _write, \
- .fops = { \
- .open = simple_open, \
- .read = b43_debugfs_read, \
- .write = b43_debugfs_write, \
- .llseek = generic_file_llseek, \
- }, \
.file_struct_offset = offsetof(struct b43_dfsentry, \
file_##name), \
}
@@ -703,9 +700,9 @@ void b43_debugfs_add_device(struct b43_wldev *dev)
#define ADD_FILE(name, mode) \
do { \
- debugfs_create_file(__stringify(name), \
+ debugfs_create_file_aux(__stringify(name), \
mode, e->subdir, dev, \
- &fops_##name.fops); \
+ &fops_##name, &debugfs_ops); \
} while (0)
diff --git a/drivers/net/wireless/broadcom/b43legacy/debugfs.c b/drivers/net/wireless/broadcom/b43legacy/debugfs.c
index 6b0e8d117061..5d04bcc216e5 100644
--- a/drivers/net/wireless/broadcom/b43legacy/debugfs.c
+++ b/drivers/net/wireless/broadcom/b43legacy/debugfs.c
@@ -31,7 +31,6 @@ static struct dentry *rootdir;
struct b43legacy_debugfs_fops {
ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
- struct file_operations fops;
/* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
size_t file_struct_offset;
/* Take wl->irq_lock before calling read/write? */
@@ -188,7 +187,7 @@ static ssize_t b43legacy_debugfs_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct b43legacy_wldev *dev;
- struct b43legacy_debugfs_fops *dfops;
+ const struct b43legacy_debugfs_fops *dfops;
struct b43legacy_dfs_file *dfile;
ssize_t ret;
char *buf;
@@ -208,8 +207,7 @@ static ssize_t b43legacy_debugfs_read(struct file *file, char __user *userbuf,
goto out_unlock;
}
- dfops = container_of(debugfs_real_fops(file),
- struct b43legacy_debugfs_fops, fops);
+ dfops = debugfs_get_aux(file);
if (!dfops->read) {
err = -ENOSYS;
goto out_unlock;
@@ -257,7 +255,7 @@ static ssize_t b43legacy_debugfs_write(struct file *file,
size_t count, loff_t *ppos)
{
struct b43legacy_wldev *dev;
- struct b43legacy_debugfs_fops *dfops;
+ const struct b43legacy_debugfs_fops *dfops;
char *buf;
int err = 0;
@@ -275,8 +273,7 @@ static ssize_t b43legacy_debugfs_write(struct file *file,
goto out_unlock;
}
- dfops = container_of(debugfs_real_fops(file),
- struct b43legacy_debugfs_fops, fops);
+ dfops = debugfs_get_aux(file);
if (!dfops->write) {
err = -ENOSYS;
goto out_unlock;
@@ -308,17 +305,16 @@ out_unlock:
return err ? err : count;
}
+static struct debugfs_short_fops debugfs_ops = {
+ .read = b43legacy_debugfs_read,
+ .write = b43legacy_debugfs_write,
+ .llseek = generic_file_llseek
+};
#define B43legacy_DEBUGFS_FOPS(name, _read, _write, _take_irqlock) \
static struct b43legacy_debugfs_fops fops_##name = { \
.read = _read, \
.write = _write, \
- .fops = { \
- .open = simple_open, \
- .read = b43legacy_debugfs_read, \
- .write = b43legacy_debugfs_write, \
- .llseek = generic_file_llseek, \
- }, \
.file_struct_offset = offsetof(struct b43legacy_dfsentry, \
file_##name), \
.take_irqlock = _take_irqlock, \
@@ -386,9 +382,9 @@ void b43legacy_debugfs_add_device(struct b43legacy_wldev *dev)
#define ADD_FILE(name, mode) \
do { \
- debugfs_create_file(__stringify(name), mode, \
+ debugfs_create_file_aux(__stringify(name), mode, \
e->subdir, dev, \
- &fops_##name.fops); \
+ &fops_##name, &debugfs_ops); \
} while (0)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 0949e7975ff1..b70d20128f98 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -1810,7 +1810,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
rfi->cur_idx = cur_idx;
}
} else {
- /* explicity window move updating the expected index */
+ /* explicitly window move updating the expected index */
exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index a259f4dd9540..413973d05b43 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -1479,14 +1479,13 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
mt7603_beacon_set_timer(dev, -1, beacon_int);
- local_bh_disable();
napi_enable(&dev->mt76.tx_napi);
- napi_schedule(&dev->mt76.tx_napi);
-
napi_enable(&dev->mt76.napi[0]);
- napi_schedule(&dev->mt76.napi[0]);
-
napi_enable(&dev->mt76.napi[1]);
+
+ local_bh_disable();
+ napi_schedule(&dev->mt76.tx_napi);
+ napi_schedule(&dev->mt76.napi[0]);
napi_schedule(&dev->mt76.napi[1]);
local_bh_enable();
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
index 9a278589df4e..68010e27f065 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
@@ -164,12 +164,16 @@ static int mt7615_pci_resume(struct pci_dev *pdev)
dev_err(mdev->dev, "PDMA engine must be reinitialized\n");
mt76_worker_enable(&mdev->tx_worker);
- local_bh_disable();
+
mt76_for_each_q_rx(mdev, i) {
napi_enable(&mdev->napi[i]);
- napi_schedule(&mdev->napi[i]);
}
napi_enable(&mdev->tx_napi);
+
+ local_bh_disable();
+ mt76_for_each_q_rx(mdev, i) {
+ napi_schedule(&mdev->napi[i]);
+ }
napi_schedule(&mdev->tx_napi);
local_bh_enable();
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
index a0ca3bbdfcaf..c2e4e6aabd9f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
@@ -262,12 +262,14 @@ void mt7615_mac_reset_work(struct work_struct *work)
mt76_worker_enable(&dev->mt76.tx_worker);
- local_bh_disable();
napi_enable(&dev->mt76.tx_napi);
- napi_schedule(&dev->mt76.tx_napi);
-
mt76_for_each_q_rx(&dev->mt76, i) {
napi_enable(&dev->mt76.napi[i]);
+ }
+
+ local_bh_disable();
+ napi_schedule(&dev->mt76.tx_napi);
+ mt76_for_each_q_rx(&dev->mt76, i) {
napi_schedule(&dev->mt76.napi[i]);
}
local_bh_enable();
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 1eb955f3ca13..b456ccd00d58 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -282,14 +282,16 @@ static int mt76x0e_resume(struct pci_dev *pdev)
mt76_worker_enable(&mdev->tx_worker);
- local_bh_disable();
mt76_for_each_q_rx(mdev, i) {
mt76_queue_rx_reset(dev, i);
napi_enable(&mdev->napi[i]);
- napi_schedule(&mdev->napi[i]);
}
-
napi_enable(&mdev->tx_napi);
+
+ local_bh_disable();
+ mt76_for_each_q_rx(mdev, i) {
+ napi_schedule(&mdev->napi[i]);
+ }
napi_schedule(&mdev->tx_napi);
local_bh_enable();
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index 7d840ad4ae65..a82c75ba26e6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -504,12 +504,14 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
mt76_worker_enable(&dev->mt76.tx_worker);
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
- local_bh_disable();
napi_enable(&dev->mt76.tx_napi);
- napi_schedule(&dev->mt76.tx_napi);
-
mt76_for_each_q_rx(&dev->mt76, i) {
napi_enable(&dev->mt76.napi[i]);
+ }
+
+ local_bh_disable();
+ napi_schedule(&dev->mt76.tx_napi);
+ mt76_for_each_q_rx(&dev->mt76, i) {
napi_schedule(&dev->mt76.napi[i]);
}
local_bh_enable();
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index 67c9d1caa0bd..727bfdd00b40 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -151,12 +151,15 @@ mt76x2e_resume(struct pci_dev *pdev)
mt76_worker_enable(&mdev->tx_worker);
- local_bh_disable();
mt76_for_each_q_rx(mdev, i) {
napi_enable(&mdev->napi[i]);
- napi_schedule(&mdev->napi[i]);
}
napi_enable(&mdev->tx_napi);
+
+ local_bh_disable();
+ mt76_for_each_q_rx(mdev, i) {
+ napi_schedule(&mdev->napi[i]);
+ }
napi_schedule(&mdev->tx_napi);
local_bh_enable();
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index 13bdc0a7174c..2ba6eb3038ce 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -1356,10 +1356,15 @@ mt7915_mac_restart(struct mt7915_dev *dev)
mt7915_dma_reset(dev, true);
- local_bh_disable();
mt76_for_each_q_rx(mdev, i) {
if (mdev->q_rx[i].ndesc) {
napi_enable(&dev->mt76.napi[i]);
+ }
+ }
+
+ local_bh_disable();
+ mt76_for_each_q_rx(mdev, i) {
+ if (mdev->q_rx[i].ndesc) {
napi_schedule(&dev->mt76.napi[i]);
}
}
@@ -1419,8 +1424,9 @@ out:
if (phy2)
clear_bit(MT76_RESET, &phy2->mt76->state);
- local_bh_disable();
napi_enable(&dev->mt76.tx_napi);
+
+ local_bh_disable();
napi_schedule(&dev->mt76.tx_napi);
local_bh_enable();
@@ -1570,9 +1576,12 @@ void mt7915_mac_reset_work(struct work_struct *work)
if (phy2)
clear_bit(MT76_RESET, &phy2->mt76->state);
- local_bh_disable();
mt76_for_each_q_rx(&dev->mt76, i) {
napi_enable(&dev->mt76.napi[i]);
+ }
+
+ local_bh_disable();
+ mt76_for_each_q_rx(&dev->mt76, i) {
napi_schedule(&dev->mt76.napi[i]);
}
local_bh_enable();
@@ -1581,8 +1590,8 @@ void mt7915_mac_reset_work(struct work_struct *work)
mt76_worker_enable(&dev->mt76.tx_worker);
- local_bh_disable();
napi_enable(&dev->mt76.tx_napi);
+ local_bh_disable();
napi_schedule(&dev->mt76.tx_napi);
local_bh_enable();
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index ba870e1b05fb..a0c9df3c2cc7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -523,12 +523,15 @@ static int mt7921_pci_resume(struct device *device)
mt76_worker_enable(&mdev->tx_worker);
- local_bh_disable();
mt76_for_each_q_rx(mdev, i) {
napi_enable(&mdev->napi[i]);
- napi_schedule(&mdev->napi[i]);
}
napi_enable(&mdev->tx_napi);
+
+ local_bh_disable();
+ mt76_for_each_q_rx(mdev, i) {
+ napi_schedule(&mdev->napi[i]);
+ }
napi_schedule(&mdev->tx_napi);
local_bh_enable();
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
index 2452b1a2d118..881812ba03ff 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
@@ -81,9 +81,12 @@ int mt7921e_mac_reset(struct mt792x_dev *dev)
mt792x_wpdma_reset(dev, true);
- local_bh_disable();
mt76_for_each_q_rx(&dev->mt76, i) {
napi_enable(&dev->mt76.napi[i]);
+ }
+
+ local_bh_disable();
+ mt76_for_each_q_rx(&dev->mt76, i) {
napi_schedule(&dev->mt76.napi[i]);
}
local_bh_enable();
@@ -115,8 +118,8 @@ int mt7921e_mac_reset(struct mt792x_dev *dev)
err = __mt7921_start(&dev->phy);
out:
- local_bh_disable();
napi_enable(&dev->mt76.tx_napi);
+ local_bh_disable();
napi_schedule(&dev->mt76.tx_napi);
local_bh_enable();
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
index f36893e20c61..c7b5dc1dbb34 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
@@ -556,12 +556,15 @@ static int mt7925_pci_resume(struct device *device)
mt76_worker_enable(&mdev->tx_worker);
- local_bh_disable();
mt76_for_each_q_rx(mdev, i) {
napi_enable(&mdev->napi[i]);
- napi_schedule(&mdev->napi[i]);
}
napi_enable(&mdev->tx_napi);
+
+ local_bh_disable();
+ mt76_for_each_q_rx(mdev, i) {
+ napi_schedule(&mdev->napi[i]);
+ }
napi_schedule(&mdev->tx_napi);
local_bh_enable();
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c
index faedbf766d1a..4578d16bf456 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c
@@ -101,12 +101,15 @@ int mt7925e_mac_reset(struct mt792x_dev *dev)
mt792x_wpdma_reset(dev, true);
- local_bh_disable();
mt76_for_each_q_rx(&dev->mt76, i) {
napi_enable(&dev->mt76.napi[i]);
- napi_schedule(&dev->mt76.napi[i]);
}
napi_enable(&dev->mt76.tx_napi);
+
+ local_bh_disable();
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ napi_schedule(&dev->mt76.napi[i]);
+ }
napi_schedule(&dev->mt76.tx_napi);
local_bh_enable();
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index bc8cba4dca47..019c925ae600 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -1695,7 +1695,6 @@ mt7996_mac_restart(struct mt7996_dev *dev)
mt7996_dma_reset(dev, true);
- local_bh_disable();
mt76_for_each_q_rx(mdev, i) {
if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
mt76_queue_is_wed_rro(&mdev->q_rx[i]))
@@ -1703,10 +1702,11 @@ mt7996_mac_restart(struct mt7996_dev *dev)
if (mdev->q_rx[i].ndesc) {
napi_enable(&dev->mt76.napi[i]);
+ local_bh_disable();
napi_schedule(&dev->mt76.napi[i]);
+ local_bh_enable();
}
}
- local_bh_enable();
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
@@ -1764,8 +1764,8 @@ out:
if (phy3)
clear_bit(MT76_RESET, &phy3->mt76->state);
- local_bh_disable();
napi_enable(&dev->mt76.tx_napi);
+ local_bh_disable();
napi_schedule(&dev->mt76.tx_napi);
local_bh_enable();
@@ -1958,23 +1958,23 @@ void mt7996_mac_reset_work(struct work_struct *work)
if (phy3)
clear_bit(MT76_RESET, &phy3->mt76->state);
- local_bh_disable();
mt76_for_each_q_rx(&dev->mt76, i) {
if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
continue;
napi_enable(&dev->mt76.napi[i]);
+ local_bh_disable();
napi_schedule(&dev->mt76.napi[i]);
+ local_bh_enable();
}
- local_bh_enable();
tasklet_schedule(&dev->mt76.irq_tasklet);
mt76_worker_enable(&dev->mt76.tx_worker);
- local_bh_disable();
napi_enable(&dev->mt76.tx_napi);
+ local_bh_disable();
napi_schedule(&dev->mt76.tx_napi);
local_bh_enable();
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
index f66eb43094d4..3adcfac2886f 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
@@ -204,7 +204,7 @@ static void qtnf_pcie_init_irq(struct qtnf_pcie_bus_priv *priv, bool use_msi)
if (!priv->msi_enabled) {
pr_warn("legacy PCIE interrupts enabled\n");
- pci_intx(pdev, 1);
+ pcim_intx(pdev, 1);
}
}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 2237715e42eb..0ccf4a9e523a 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -1212,7 +1212,7 @@ enum nd_ioctl_mode {
DIMM_IOCTL,
};
-static int match_dimm(struct device *dev, void *data)
+static int match_dimm(struct device *dev, const void *data)
{
long id = (long) data;
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index 030dbde6b088..9e84ab411564 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -67,13 +67,6 @@ bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
return claimed;
}
-static int namespace_match(struct device *dev, void *data)
-{
- char *name = data;
-
- return strcmp(name, dev_name(dev)) == 0;
-}
-
static bool is_idle(struct device *dev, struct nd_namespace_common *ndns)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
@@ -168,7 +161,7 @@ ssize_t nd_namespace_store(struct device *dev,
goto out;
}
- found = device_find_child(dev->parent, name, namespace_match);
+ found = device_find_child_by_name(dev->parent, name);
if (!found) {
dev_dbg(dev, "'%s' not found under %s\n", name,
dev_name(dev->parent));
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 76b615d4d5b9..818d4e49aab5 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1700,7 +1700,13 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
&result);
- if (status < 0)
+
+ /*
+ * It's either a kernel error or the host observed a connection
+ * lost. In either case it's not possible communicate with the
+ * controller and thus enter the error code path.
+ */
+ if (status < 0 || status == NVME_SC_HOST_PATH_ERROR)
return status;
/*
@@ -2132,15 +2138,16 @@ static int nvme_update_ns_info_generic(struct nvme_ns *ns,
struct nvme_ns_info *info)
{
struct queue_limits lim;
+ unsigned int memflags;
int ret;
lim = queue_limits_start_update(ns->disk->queue);
nvme_set_ctrl_limits(ns->ctrl, &lim);
- blk_mq_freeze_queue(ns->disk->queue);
+ memflags = blk_mq_freeze_queue(ns->disk->queue);
ret = queue_limits_commit_update(ns->disk->queue, &lim);
set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
- blk_mq_unfreeze_queue(ns->disk->queue);
+ blk_mq_unfreeze_queue(ns->disk->queue, memflags);
/* Hide the block-interface for these devices */
if (!ret)
@@ -2155,6 +2162,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
struct nvme_id_ns_nvm *nvm = NULL;
struct nvme_zone_info zi = {};
struct nvme_id_ns *id;
+ unsigned int memflags;
sector_t capacity;
unsigned lbaf;
int ret;
@@ -2186,7 +2194,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
lim = queue_limits_start_update(ns->disk->queue);
- blk_mq_freeze_queue(ns->disk->queue);
+ memflags = blk_mq_freeze_queue(ns->disk->queue);
ns->head->lba_shift = id->lbaf[lbaf].ds;
ns->head->nuse = le64_to_cpu(id->nuse);
capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze));
@@ -2219,7 +2227,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
ret = queue_limits_commit_update(ns->disk->queue, &lim);
if (ret) {
- blk_mq_unfreeze_queue(ns->disk->queue);
+ blk_mq_unfreeze_queue(ns->disk->queue, memflags);
goto out;
}
@@ -2235,7 +2243,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
ns->head->features |= NVME_NS_DEAC;
set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
set_bit(NVME_NS_READY, &ns->flags);
- blk_mq_unfreeze_queue(ns->disk->queue);
+ blk_mq_unfreeze_queue(ns->disk->queue, memflags);
if (blk_queue_is_zoned(ns->queue)) {
ret = blk_revalidate_disk_zones(ns->disk);
@@ -2291,9 +2299,10 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
if (!ret && nvme_ns_head_multipath(ns->head)) {
struct queue_limits *ns_lim = &ns->disk->queue->limits;
struct queue_limits lim;
+ unsigned int memflags;
lim = queue_limits_start_update(ns->head->disk->queue);
- blk_mq_freeze_queue(ns->head->disk->queue);
+ memflags = blk_mq_freeze_queue(ns->head->disk->queue);
/*
* queue_limits mixes values that are the hardware limitations
* for bio splitting with what is the device configuration.
@@ -2325,7 +2334,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
nvme_mpath_revalidate_paths(ns);
- blk_mq_unfreeze_queue(ns->head->disk->queue);
+ blk_mq_unfreeze_queue(ns->head->disk->queue, memflags);
}
return ret;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 094be164ffdc..f4f1866fbd5b 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -781,11 +781,19 @@ restart:
static void
nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
{
+ enum nvme_ctrl_state state;
+ unsigned long flags;
+
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: controller connectivity lost. Awaiting "
"Reconnect", ctrl->cnum);
- switch (nvme_ctrl_state(&ctrl->ctrl)) {
+ spin_lock_irqsave(&ctrl->lock, flags);
+ set_bit(ASSOC_FAILED, &ctrl->flags);
+ state = nvme_ctrl_state(&ctrl->ctrl);
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ switch (state) {
case NVME_CTRL_NEW:
case NVME_CTRL_LIVE:
/*
@@ -2079,7 +2087,8 @@ done:
nvme_fc_complete_rq(rq);
check_error:
- if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING)
+ if (terminate_assoc &&
+ nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_RESETTING)
queue_work(nvme_reset_wq, &ctrl->ioerr_work);
}
@@ -2533,6 +2542,8 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
static void
nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
{
+ enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
+
/*
* if an error (io timeout, etc) while (re)connecting, the remote
* port requested terminating of the association (disconnect_ls)
@@ -2540,9 +2551,8 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
* the controller. Abort any ios on the association and let the
* create_association error path resolve things.
*/
- if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
+ if (state == NVME_CTRL_CONNECTING) {
__nvme_fc_abort_outstanding_ios(ctrl, true);
- set_bit(ASSOC_FAILED, &ctrl->flags);
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: transport error during (re)connect\n",
ctrl->cnum);
@@ -2550,7 +2560,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
}
/* Otherwise, only proceed if in LIVE state - e.g. on first error */
- if (ctrl->ctrl.state != NVME_CTRL_LIVE)
+ if (state != NVME_CTRL_LIVE)
return;
dev_warn(ctrl->ctrl.device,
@@ -3167,12 +3177,18 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
else
ret = nvme_fc_recreate_io_queues(ctrl);
}
- if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
- ret = -EIO;
if (ret)
goto out_term_aen_ops;
- changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+ spin_lock_irqsave(&ctrl->lock, flags);
+ if (!test_bit(ASSOC_FAILED, &ctrl->flags))
+ changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+ else
+ ret = -EIO;
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ if (ret)
+ goto out_term_aen_ops;
ctrl->ctrl.nr_reconnects = 0;
@@ -3578,8 +3594,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
spin_unlock_irqrestore(&rport->lock, flags);
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
- !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
dev_err(ctrl->ctrl.device,
"NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
goto fail_ctrl;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index a85d190942bd..2a7635565083 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -60,7 +60,7 @@ void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
lockdep_assert_held(&subsys->lock);
list_for_each_entry(h, &subsys->nsheads, entry)
if (h->disk)
- blk_mq_unfreeze_queue(h->disk->queue);
+ blk_mq_unfreeze_queue_nomemrestore(h->disk->queue);
}
void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 278bed4e35bb..9197a5b173fd 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2153,14 +2153,6 @@ static int nvme_alloc_host_mem_multi(struct nvme_dev *dev, u64 preferred,
return 0;
out_free_bufs:
- while (--i >= 0) {
- size_t size = le32_to_cpu(descs[i].size) * NVME_CTRL_PAGE_SIZE;
-
- dma_free_attrs(dev->dev, size, bufs[i],
- le64_to_cpu(descs[i].addr),
- DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
- }
-
kfree(bufs);
out_free_descs:
dma_free_coherent(dev->dev, descs_size, descs, descs_dma);
@@ -3147,7 +3139,9 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
* because of high power consumption (> 2 Watt) in s2idle
* sleep. Only some boards with Intel CPU are affected.
*/
- if (dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
+ if (dmi_match(DMI_BOARD_NAME, "DN50Z-140HC-YD") ||
+ dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
+ dmi_match(DMI_BOARD_NAME, "GXxMRXx") ||
dmi_match(DMI_BOARD_NAME, "PH4PG31") ||
dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") ||
dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71"))
diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
index b68a9e5f1ea3..3a41b9ab0f13 100644
--- a/drivers/nvme/host/sysfs.c
+++ b/drivers/nvme/host/sysfs.c
@@ -792,7 +792,7 @@ static umode_t nvme_tls_attrs_are_visible(struct kobject *kobj,
return a->mode;
}
-const struct attribute_group nvme_tls_attrs_group = {
+static const struct attribute_group nvme_tls_attrs_group = {
.attrs = nvme_tls_attrs,
.is_visible = nvme_tls_attrs_are_visible,
};
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index e670dc185a96..acc138bbf8f2 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -1068,6 +1068,7 @@ static void nvme_execute_identify_ns_nvm(struct nvmet_req *req)
goto out;
}
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+ kfree(id);
out:
nvmet_req_complete(req, status);
}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index a7ff05b3be29..eb406c90c167 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -287,7 +287,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
args.subsysnqn = d->subsysnqn;
args.hostnqn = d->hostnqn;
args.hostid = &d->hostid;
- args.kato = c->kato;
+ args.kato = le32_to_cpu(c->kato);
ctrl = nvmet_alloc_ctrl(&args);
if (!ctrl)
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index c1f574fe3280..83be0657e6df 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -272,7 +272,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
iter_flags = SG_MITER_FROM_SG;
}
- if (req->cmd->rw.control & NVME_RW_LR)
+ if (req->cmd->rw.control & cpu_to_le16(NVME_RW_LR))
opf |= REQ_FAILFAST_DEV;
if (is_pci_p2pdma_page(sg_page(req->sg)))
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index b540216c0c9a..4be8d22d2d8d 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -589,7 +589,7 @@ struct nvmet_alloc_ctrl_args {
const struct nvmet_fabrics_ops *ops;
struct device *p2p_client;
u32 kato;
- u32 result;
+ __le32 result;
u16 error_loc;
u16 status;
};
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index d6494dfc20a7..fff85bbf0ecd 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -213,7 +213,7 @@ static struct attribute *nvmem_attrs[] = {
};
static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
struct device *dev;
@@ -246,7 +246,7 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
}
static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
struct device *dev;
@@ -340,7 +340,7 @@ static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry,
const char *id, int index);
static ssize_t nvmem_cell_attr_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
struct nvmem_cell_entry *entry;
@@ -374,22 +374,22 @@ destroy_cell:
}
/* default read/write permissions */
-static struct bin_attribute bin_attr_rw_nvmem = {
+static const struct bin_attribute bin_attr_rw_nvmem = {
.attr = {
.name = "nvmem",
.mode = 0644,
},
- .read = bin_attr_nvmem_read,
- .write = bin_attr_nvmem_write,
+ .read_new = bin_attr_nvmem_read,
+ .write_new = bin_attr_nvmem_write,
};
-static struct bin_attribute *nvmem_bin_attributes[] = {
+static const struct bin_attribute *const nvmem_bin_attributes[] = {
&bin_attr_rw_nvmem,
NULL,
};
static const struct attribute_group nvmem_bin_group = {
- .bin_attrs = nvmem_bin_attributes,
+ .bin_attrs_new = nvmem_bin_attributes,
.attrs = nvmem_attrs,
.is_bin_visible = nvmem_bin_attr_is_visible,
.bin_size = nvmem_bin_attr_size,
@@ -401,12 +401,12 @@ static const struct attribute_group *nvmem_dev_groups[] = {
NULL,
};
-static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
+static const struct bin_attribute bin_attr_nvmem_eeprom_compat = {
.attr = {
.name = "eeprom",
},
- .read = bin_attr_nvmem_read,
- .write = bin_attr_nvmem_write,
+ .read_new = bin_attr_nvmem_read,
+ .write_new = bin_attr_nvmem_write,
};
/*
@@ -461,6 +461,7 @@ static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem)
.name = "cells",
};
struct nvmem_cell_entry *entry;
+ const struct bin_attribute **pattrs;
struct bin_attribute *attrs;
unsigned int ncells = 0, i = 0;
int ret = 0;
@@ -472,9 +473,9 @@ static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem)
/* Allocate an array of attributes with a sentinel */
ncells = list_count_nodes(&nvmem->cells);
- group.bin_attrs = devm_kcalloc(&nvmem->dev, ncells + 1,
- sizeof(struct bin_attribute *), GFP_KERNEL);
- if (!group.bin_attrs) {
+ pattrs = devm_kcalloc(&nvmem->dev, ncells + 1,
+ sizeof(struct bin_attribute *), GFP_KERNEL);
+ if (!pattrs) {
ret = -ENOMEM;
goto unlock_mutex;
}
@@ -494,17 +495,19 @@ static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem)
entry->bit_offset);
attrs[i].attr.mode = 0444 & nvmem_bin_attr_get_umode(nvmem);
attrs[i].size = entry->bytes;
- attrs[i].read = &nvmem_cell_attr_read;
+ attrs[i].read_new = &nvmem_cell_attr_read;
attrs[i].private = entry;
if (!attrs[i].attr.name) {
ret = -ENOMEM;
goto unlock_mutex;
}
- group.bin_attrs[i] = &attrs[i];
+ pattrs[i] = &attrs[i];
i++;
}
+ group.bin_attrs_new = pattrs;
+
ret = device_add_group(&nvmem->dev, &group);
if (ret)
goto unlock_mutex;
@@ -1790,6 +1793,8 @@ static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, si
return -EINVAL;
if (cell->bit_offset || cell->nbits) {
+ if (len != BITS_TO_BYTES(cell->nbits) && len != cell->bytes)
+ return -EINVAL;
buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
if (IS_ERR(buf))
return PTR_ERR(buf);
diff --git a/drivers/nvmem/imx-ocotp-ele.c b/drivers/nvmem/imx-ocotp-ele.c
index 1ba494497698..ca6dd71d8a2e 100644
--- a/drivers/nvmem/imx-ocotp-ele.c
+++ b/drivers/nvmem/imx-ocotp-ele.c
@@ -71,13 +71,15 @@ static int imx_ocotp_reg_read(void *context, unsigned int offset, void *val, siz
u32 *buf;
void *p;
int i;
+ u8 skipbytes;
- index = offset;
- num_bytes = round_up(bytes, 4);
- count = num_bytes >> 2;
+ if (offset + bytes > priv->data->size)
+ bytes = priv->data->size - offset;
- if (count > ((priv->data->size >> 2) - index))
- count = (priv->data->size >> 2) - index;
+ index = offset >> 2;
+ skipbytes = offset - (index << 2);
+ num_bytes = round_up(bytes + skipbytes, 4);
+ count = num_bytes >> 2;
p = kzalloc(num_bytes, GFP_KERNEL);
if (!p)
@@ -100,7 +102,7 @@ static int imx_ocotp_reg_read(void *context, unsigned int offset, void *val, siz
*buf++ = readl_relaxed(reg + (i << 2));
}
- memcpy(val, (u8 *)p, bytes);
+ memcpy(val, ((u8 *)p) + skipbytes, bytes);
mutex_unlock(&priv->lock);
@@ -109,6 +111,26 @@ static int imx_ocotp_reg_read(void *context, unsigned int offset, void *val, siz
return 0;
};
+static int imx_ocotp_cell_pp(void *context, const char *id, int index,
+ unsigned int offset, void *data, size_t bytes)
+{
+ u8 *buf = data;
+ int i;
+
+ /* Deal with some post processing of nvmem cell data */
+ if (id && !strcmp(id, "mac-address"))
+ for (i = 0; i < bytes / 2; i++)
+ swap(buf[i], buf[bytes - i - 1]);
+
+ return 0;
+}
+
+static void imx_ocotp_fixup_dt_cell_info(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *cell)
+{
+ cell->read_post_process = imx_ocotp_cell_pp;
+}
+
static int imx_ele_ocotp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -131,10 +153,12 @@ static int imx_ele_ocotp_probe(struct platform_device *pdev)
priv->config.owner = THIS_MODULE;
priv->config.size = priv->data->size;
priv->config.reg_read = priv->data->reg_read;
- priv->config.word_size = 4;
+ priv->config.word_size = 1;
priv->config.stride = 1;
priv->config.priv = priv;
priv->config.read_only = true;
+ priv->config.add_legacy_fixed_of_cells = true;
+ priv->config.fixup_dt_cell_info = imx_ocotp_fixup_dt_cell_info;
mutex_init(&priv->lock);
nvmem = devm_nvmem_register(dev, &priv->config);
diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c
index 9aa8f42faa4c..4f1cca6eab71 100644
--- a/drivers/nvmem/qcom-spmi-sdam.c
+++ b/drivers/nvmem/qcom-spmi-sdam.c
@@ -144,6 +144,7 @@ static int sdam_probe(struct platform_device *pdev)
sdam->sdam_config.owner = THIS_MODULE;
sdam->sdam_config.add_legacy_fixed_of_cells = true;
sdam->sdam_config.stride = 1;
+ sdam->sdam_config.size = sdam->size;
sdam->sdam_config.word_size = 1;
sdam->sdam_config.reg_read = sdam_read;
sdam->sdam_config.reg_write = sdam_write;
diff --git a/drivers/nvmem/rmem.c b/drivers/nvmem/rmem.c
index 7f907c5a445e..b39d628cb60a 100644
--- a/drivers/nvmem/rmem.c
+++ b/drivers/nvmem/rmem.c
@@ -3,28 +3,40 @@
* Copyright (C) 2020 Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
*/
+#include <linux/crc32.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
struct rmem {
struct device *dev;
struct nvmem_device *nvmem;
struct reserved_mem *mem;
+};
+
+struct rmem_match_data {
+ int (*checksum)(struct rmem *priv);
+};
- phys_addr_t size;
+struct __packed rmem_eyeq5_header {
+ u32 magic;
+ u32 version;
+ u32 size;
};
+#define RMEM_EYEQ5_MAGIC ((u32)0xDABBAD00)
+
static int rmem_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct rmem *priv = context;
- size_t available = priv->mem->size;
- loff_t off = offset;
void *addr;
- int count;
+
+ if ((phys_addr_t)offset + bytes > priv->mem->size)
+ return -EIO;
/*
* Only map the reserved memory at this point to avoid potential rogue
@@ -36,26 +48,79 @@ static int rmem_read(void *context, unsigned int offset,
* An alternative would be setting the memory as RO, set_memory_ro(),
* but as of Dec 2020 this isn't possible on arm64.
*/
- addr = memremap(priv->mem->base, available, MEMREMAP_WB);
+ addr = memremap(priv->mem->base, priv->mem->size, MEMREMAP_WB);
if (!addr) {
dev_err(priv->dev, "Failed to remap memory region\n");
return -ENOMEM;
}
- count = memory_read_from_buffer(val, bytes, &off, addr, available);
+ memcpy(val, addr + offset, bytes);
memunmap(addr);
- if (count < 0)
- return count;
+ return 0;
+}
+
+static int rmem_eyeq5_checksum(struct rmem *priv)
+{
+ void *buf __free(kfree) = NULL;
+ struct rmem_eyeq5_header header;
+ u32 computed_crc, *target_crc;
+ size_t data_size;
+ int ret;
+
+ ret = rmem_read(priv, 0, &header, sizeof(header));
+ if (ret)
+ return ret;
+
+ if (header.magic != RMEM_EYEQ5_MAGIC)
+ return -EINVAL;
+
+ /*
+ * Avoid massive kmalloc() if header read is invalid;
+ * the check would be done by the next rmem_read() anyway.
+ */
+ if (header.size > priv->mem->size)
+ return -EINVAL;
+
+ /*
+ * 0 +-------------------+
+ * | Header (12 bytes) | \
+ * +-------------------+ |
+ * | | | data to be CRCed
+ * | ... | |
+ * | | /
+ * data_size +-------------------+
+ * | CRC (4 bytes) |
+ * header.size +-------------------+
+ */
+
+ buf = kmalloc(header.size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = rmem_read(priv, 0, buf, header.size);
+ if (ret)
+ return ret;
- return count == bytes ? 0 : -EIO;
+ data_size = header.size - sizeof(*target_crc);
+ target_crc = buf + data_size;
+ computed_crc = crc32(U32_MAX, buf, data_size) ^ U32_MAX;
+
+ if (computed_crc == *target_crc)
+ return 0;
+
+ dev_err(priv->dev,
+ "checksum failed: computed %#x, expected %#x, header (%#x, %#x, %#x)\n",
+ computed_crc, *target_crc, header.magic, header.version, header.size);
+ return -EINVAL;
}
static int rmem_probe(struct platform_device *pdev)
{
struct nvmem_config config = { };
struct device *dev = &pdev->dev;
+ const struct rmem_match_data *match_data = device_get_match_data(dev);
struct reserved_mem *mem;
struct rmem *priv;
@@ -78,10 +143,22 @@ static int rmem_probe(struct platform_device *pdev)
config.size = mem->size;
config.reg_read = rmem_read;
+ if (match_data && match_data->checksum) {
+ int ret = match_data->checksum(priv);
+
+ if (ret)
+ return ret;
+ }
+
return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config));
}
+static const struct rmem_match_data rmem_eyeq5_match_data = {
+ .checksum = rmem_eyeq5_checksum,
+};
+
static const struct of_device_id rmem_match[] = {
+ { .compatible = "mobileye,eyeq5-bootloader-config", .data = &rmem_eyeq5_match_data },
{ .compatible = "nvmem-rmem", },
{ /* sentinel */ },
};
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 8770004d9b08..125833e5ce52 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -16,25 +16,10 @@
#include <linux/string.h>
#include <linux/dma-direct.h> /* for bus_dma_region */
-#include "of_private.h"
-
-/* Max address size we deal with */
-#define OF_MAX_ADDR_CELLS 4
-#define OF_CHECK_ADDR_COUNT(na) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS)
-#define OF_CHECK_COUNTS(na, ns) (OF_CHECK_ADDR_COUNT(na) && (ns) > 0)
+/* Uncomment me to enable of_dump_addr() debugging output */
+// #define DEBUG
-/* Debug utility */
-#ifdef DEBUG
-static void of_dump_addr(const char *s, const __be32 *addr, int na)
-{
- pr_debug("%s", s);
- while (na--)
- pr_cont(" %08x", be32_to_cpu(*(addr++)));
- pr_cont("\n");
-}
-#else
-static void of_dump_addr(const char *s, const __be32 *addr, int na) { }
-#endif
+#include "of_private.h"
/* Callbacks for bus specific translators */
struct of_bus {
@@ -200,17 +185,15 @@ static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns,
static int __of_address_resource_bounds(struct resource *r, u64 start, u64 size)
{
- u64 end = start;
-
if (overflows_type(start, r->start))
return -EOVERFLOW;
- if (size && check_add_overflow(end, size - 1, &end))
- return -EOVERFLOW;
- if (overflows_type(end, r->end))
- return -EOVERFLOW;
r->start = start;
- r->end = end;
+
+ if (!size)
+ r->end = wrapping_sub(typeof(r->end), r->start, 1);
+ else if (size && check_add_overflow(r->start, size - 1, &r->end))
+ return -EOVERFLOW;
return 0;
}
@@ -828,6 +811,8 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
else
range->cpu_addr = of_translate_address(parser->node,
parser->range + na);
+
+ range->parent_bus_addr = of_read_number(parser->range + na, parser->pna);
range->size = of_read_number(parser->range + parser->pna + na, ns);
parser->range += np;
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 6f5abea2462a..af6c68bbb427 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -894,10 +894,10 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt
/* The path could begin with an alias */
if (*path != '/') {
int len;
- const char *p = separator;
+ const char *p = strchrnul(path, '/');
- if (!p)
- p = strchrnul(path, '/');
+ if (separator && separator < p)
+ p = separator;
len = p - path;
/* of_aliases must not be NULL */
@@ -1027,19 +1027,15 @@ struct device_node *of_find_node_with_property(struct device_node *from,
const char *prop_name)
{
struct device_node *np;
- const struct property *pp;
unsigned long flags;
raw_spin_lock_irqsave(&devtree_lock, flags);
for_each_of_allnodes_from(from, np) {
- for (pp = np->properties; pp; pp = pp->next) {
- if (of_prop_cmp(pp->name, prop_name) == 0) {
- of_node_get(np);
- goto out;
- }
+ if (__of_find_property(np, prop_name, NULL)) {
+ of_node_get(np);
+ break;
}
}
-out:
of_node_put(from);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
@@ -1453,8 +1449,8 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
char *pass_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name);
struct device_node *cur, *new = NULL;
const __be32 *map, *mask, *pass;
- static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) };
- static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(0) };
+ static const __be32 dummy_mask[] = { [0 ... (MAX_PHANDLE_ARGS - 1)] = cpu_to_be32(~0) };
+ static const __be32 dummy_pass[] = { [0 ... (MAX_PHANDLE_ARGS - 1)] = cpu_to_be32(0) };
__be32 initial_match_array[MAX_PHANDLE_ARGS];
const __be32 *match_array = initial_match_array;
int i, ret, map_len, match;
@@ -1546,7 +1542,6 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
* specifier into the out_args structure, keeping the
* bits specified in <list>-map-pass-thru.
*/
- match_array = map - new_size;
for (i = 0; i < new_size; i++) {
__be32 val = *(map - new_size + i);
@@ -1555,6 +1550,7 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
val |= cpu_to_be32(out_args->args[i]) & pass[i];
}
+ initial_match_array[i] = val;
out_args->args[i] = be32_to_cpu(val);
}
out_args->args_count = list_size = new_size;
@@ -1822,8 +1818,7 @@ static void of_alias_add(struct alias_prop *ap, struct device_node *np,
* for storing the resulting tree
*
* The function scans all the properties of the 'aliases' node and populates
- * the global lookup table with the properties. It returns the
- * number of alias properties found, or an error code in case of failure.
+ * the global lookup table with the properties.
*/
void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
{
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 0121100372b4..aedd0e2dcd89 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -8,7 +8,6 @@
#define pr_fmt(fmt) "OF: fdt: " fmt
-#include <linux/acpi.h>
#include <linux/crash_dump.h>
#include <linux/crc32.h>
#include <linux/kernel.h>
@@ -497,6 +496,7 @@ static void __init fdt_reserve_elfcorehdr(void)
void __init early_init_fdt_scan_reserved_mem(void)
{
int n;
+ int res;
u64 base, size;
if (!initial_boot_params)
@@ -507,7 +507,11 @@ void __init early_init_fdt_scan_reserved_mem(void)
/* Process header /memreserve/ fields */
for (n = 0; ; n++) {
- fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
+ res = fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
+ if (res) {
+ pr_err("Invalid memory reservation block index %d\n", n);
+ break;
+ }
if (!size)
break;
memblock_reserve(base, size);
@@ -1126,13 +1130,7 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
{
- void *ptr = memblock_alloc(size, align);
-
- if (!ptr)
- panic("%s: Failed to allocate %llu bytes align=0x%llx\n",
- __func__, size, align);
-
- return ptr;
+ return memblock_alloc_or_panic(size, align);
}
bool __init early_init_dt_verify(void *dt_virt, phys_addr_t dt_phys)
@@ -1215,14 +1213,7 @@ void __init unflatten_device_tree(void)
/* Save the statically-placed regions in the reserved_mem array */
fdt_scan_reserved_mem_reg_nodes();
- /* Don't use the bootloader provided DTB if ACPI is enabled */
- if (!acpi_disabled)
- fdt = NULL;
-
- /*
- * Populate an empty root node when ACPI is enabled or bootloader
- * doesn't provide one.
- */
+ /* Populate an empty root node when bootloader doesn't provide one */
if (!fdt) {
fdt = (void *) __dtb_empty_root_begin;
/* fdt_totalsize() will be used for copy size */
@@ -1264,18 +1255,9 @@ void __init unflatten_and_copy_device_tree(void)
}
#ifdef CONFIG_SYSFS
-static ssize_t of_fdt_raw_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
-{
- memcpy(buf, initial_boot_params + off, count);
- return count;
-}
-
static int __init of_fdt_raw_init(void)
{
- static struct bin_attribute of_fdt_raw_attr =
- __BIN_ATTR(fdt, S_IRUSR, of_fdt_raw_read, NULL, 0);
+ static __ro_after_init BIN_ATTR_SIMPLE_ADMIN_RO(fdt);
if (!initial_boot_params)
return 0;
@@ -1285,8 +1267,9 @@ static int __init of_fdt_raw_init(void)
pr_warn("not creating '/sys/firmware/fdt': CRC check failed\n");
return 0;
}
- of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
- return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
+ bin_attr_fdt.private = initial_boot_params;
+ bin_attr_fdt.size = fdt_totalsize(initial_boot_params);
+ return sysfs_create_bin_file(firmware_kobj, &bin_attr_fdt);
}
late_initcall(of_fdt_raw_init);
#endif
diff --git a/drivers/of/fdt_address.c b/drivers/of/fdt_address.c
index 9804d7f06705..f358d2c80754 100644
--- a/drivers/of/fdt_address.c
+++ b/drivers/of/fdt_address.c
@@ -17,23 +17,10 @@
#include <linux/of_fdt.h>
#include <linux/sizes.h>
-/* Max address size we deal with */
-#define OF_MAX_ADDR_CELLS 4
-#define OF_CHECK_COUNTS(na, ns) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS && \
- (ns) > 0)
-
-/* Debug utility */
-#ifdef DEBUG
-static void __init of_dump_addr(const char *s, const __be32 *addr, int na)
-{
- pr_debug("%s", s);
- while(na--)
- pr_cont(" %08x", *(addr++));
- pr_cont("\n");
-}
-#else
-static void __init of_dump_addr(const char *s, const __be32 *addr, int na) { }
-#endif
+/* Uncomment me to enable of_dump_addr() debugging output */
+// #define DEBUG
+
+#include "of_private.h"
/* Callbacks for bus specific translators */
struct of_bus {
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 98b1cf78ecac..6c843d54ebb1 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -171,7 +171,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
struct device_node *ipar, *tnode, *old = NULL;
__be32 initial_match_array[MAX_PHANDLE_ARGS];
const __be32 *match_array = initial_match_array;
- const __be32 *tmp, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) };
+ const __be32 *tmp, dummy_imask[] = { [0 ... (MAX_PHANDLE_ARGS - 1)] = cpu_to_be32(~0) };
u32 intsize = 1, addrsize;
int i, rc = -EINVAL;
diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c
index cab9b169dc67..aa887166f0d2 100644
--- a/drivers/of/kobj.c
+++ b/drivers/of/kobj.c
@@ -29,7 +29,7 @@ const struct kobj_type of_node_ktype = {
};
static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
struct property *pp = container_of(bin_attr, struct property, attr);
@@ -77,7 +77,7 @@ int __of_add_property_sysfs(struct device_node *np, struct property *pp)
pp->attr.attr.name = safe_name(&np->kobj, pp->name);
pp->attr.attr.mode = secure ? 0400 : 0444;
pp->attr.size = secure ? 0 : pp->length;
- pp->attr.read = of_node_property_read;
+ pp->attr.read_new = of_node_property_read;
rc = sysfs_create_bin_file(&np->kobj, &pp->attr);
WARN(rc, "error adding attribute %s to node %pOF\n", pp->name, np);
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index ea5a0951ec5e..f3e1193c8ded 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -119,6 +119,8 @@ extern void *__unflatten_device_tree(const void *blob,
void *(*dt_alloc)(u64 size, u64 align),
bool detached);
+void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align));
+
/**
* General utilities for working with live trees.
*
@@ -188,4 +190,22 @@ void __init fdt_scan_reserved_mem_reg_nodes(void);
bool of_fdt_device_is_available(const void *blob, unsigned long node);
+/* Max address size we deal with */
+#define OF_MAX_ADDR_CELLS 4
+#define OF_CHECK_ADDR_COUNT(na) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS)
+#define OF_CHECK_COUNTS(na, ns) (OF_CHECK_ADDR_COUNT(na) && (ns) > 0)
+
+/* Debug utility */
+#ifdef DEBUG
+static void __maybe_unused of_dump_addr(const char *s, const __be32 *addr, int na)
+{
+ pr_debug("%s", s);
+ while (na--)
+ pr_cont(" %08x", be32_to_cpu(*(addr++)));
+ pr_cont("\n");
+}
+#else
+static void __maybe_unused of_dump_addr(const char *s, const __be32 *addr, int na) { }
+#endif
+
#endif /* _LINUX_OF_PRIVATE_H */
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 45517b9e57b1..75e819f66a56 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -52,7 +52,8 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
memblock_phys_free(base, size);
}
- kmemleak_ignore_phys(base);
+ if (!err)
+ kmemleak_ignore_phys(base);
return err;
}
@@ -262,6 +263,11 @@ void __init fdt_scan_reserved_mem_reg_nodes(void)
uname);
continue;
}
+
+ if (len > t_len)
+ pr_warn("%s() ignores %d regions in node '%s'\n",
+ __func__, len / t_len - 1, uname);
+
base = dt_mem_next_cell(dt_root_addr_cells, &prop);
size = dt_mem_next_cell(dt_root_size_cells, &prop);
@@ -409,12 +415,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
prop = of_get_flat_dt_prop(node, "alignment", &len);
if (prop) {
- if (len != dt_root_addr_cells * sizeof(__be32)) {
+ if (len != dt_root_size_cells * sizeof(__be32)) {
pr_err("invalid alignment property in '%s' node.\n",
uname);
return -EINVAL;
}
- align = dt_mem_next_cell(dt_root_addr_cells, &prop);
+ align = dt_mem_next_cell(dt_root_size_cells, &prop);
}
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
@@ -435,13 +441,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
return -EINVAL;
}
- base = 0;
-
while (len > 0) {
start = dt_mem_next_cell(dt_root_addr_cells, &prop);
end = start + dt_mem_next_cell(dt_root_size_cells,
&prop);
+ base = 0;
ret = __reserved_mem_alloc_in_range(size, align,
start, end, nomap, &base);
if (ret == 0) {
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index 7eda43c66c91..cb0cb374b21f 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -19,6 +19,8 @@
#include <linux/of.h>
#include <linux/of_pdt.h>
+#include "of_private.h"
+
static struct of_pdt_ops *of_pdt_prom_ops __initdata;
#if defined(CONFIG_SPARC)
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 9bafcff3e628..c6d8afb284e8 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -24,16 +24,6 @@
#include "of_private.h"
-const struct of_device_id of_default_bus_match_table[] = {
- { .compatible = "simple-bus", },
- { .compatible = "simple-mfd", },
- { .compatible = "isa", },
-#ifdef CONFIG_ARM_AMBA
- { .compatible = "arm,amba-bus", },
-#endif /* CONFIG_ARM_AMBA */
- {} /* Empty terminated list */
-};
-
/**
* of_find_device_by_node - Find the platform_device associated with a node
* @np: Pointer to device tree node
@@ -484,8 +474,17 @@ int of_platform_default_populate(struct device_node *root,
const struct of_dev_auxdata *lookup,
struct device *parent)
{
- return of_platform_populate(root, of_default_bus_match_table, lookup,
- parent);
+ static const struct of_device_id match_table[] = {
+ { .compatible = "simple-bus", },
+ { .compatible = "simple-mfd", },
+ { .compatible = "isa", },
+#ifdef CONFIG_ARM_AMBA
+ { .compatible = "arm,amba-bus", },
+#endif /* CONFIG_ARM_AMBA */
+ {} /* Empty terminated list */
+ };
+
+ return of_platform_populate(root, match_table, lookup, parent);
}
EXPORT_SYMBOL_GPL(of_platform_default_populate);
diff --git a/drivers/of/property.c b/drivers/of/property.c
index cfc8aea002e4..208d922cc24c 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -32,6 +32,32 @@
#include "of_private.h"
/**
+ * of_property_read_bool - Find a property
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ *
+ * Search for a boolean property in a device node. Usage on non-boolean
+ * property types is deprecated.
+ *
+ * Return: true if the property exists false otherwise.
+ */
+bool of_property_read_bool(const struct device_node *np, const char *propname)
+{
+ struct property *prop = of_find_property(np, propname, NULL);
+
+ /*
+ * Boolean properties should not have a value. Testing for property
+ * presence should either use of_property_present() or just read the
+ * property value and check the returned error code.
+ */
+ if (prop && prop->length)
+ pr_warn("%pOF: Read of boolean property '%s' with a value.\n", np, propname);
+
+ return prop ? true : false;
+}
+EXPORT_SYMBOL(of_property_read_bool);
+
+/**
* of_graph_is_present() - check graph's presence
* @node: pointer to device_node containing graph port
*
@@ -966,6 +992,12 @@ of_fwnode_device_get_dma_attr(const struct fwnode_handle *fwnode)
static bool of_fwnode_property_present(const struct fwnode_handle *fwnode,
const char *propname)
{
+ return of_property_present(to_of_node(fwnode), propname);
+}
+
+static bool of_fwnode_property_read_bool(const struct fwnode_handle *fwnode,
+ const char *propname)
+{
return of_property_read_bool(to_of_node(fwnode), propname);
}
@@ -1390,9 +1422,9 @@ static struct device_node *parse_interrupt_map(struct device_node *np,
addrcells = of_bus_n_addr_cells(np);
imap = of_get_property(np, "interrupt-map", &imaplen);
- imaplen /= sizeof(*imap);
if (!imap)
return NULL;
+ imaplen /= sizeof(*imap);
imap_end = imap + imaplen;
@@ -1560,6 +1592,7 @@ const struct fwnode_operations of_fwnode_ops = {
.device_dma_supported = of_fwnode_device_dma_supported,
.device_get_dma_attr = of_fwnode_device_get_dma_attr,
.property_present = of_fwnode_property_present,
+ .property_read_bool = of_fwnode_property_read_bool,
.property_read_int_array = of_fwnode_property_read_int_array,
.property_read_string_array = of_fwnode_property_read_string_array,
.get_name = of_fwnode_get_name,
diff --git a/drivers/of/unittest-data/tests-platform.dtsi b/drivers/of/unittest-data/tests-platform.dtsi
index cd310b26b50c..4171f43cf01c 100644
--- a/drivers/of/unittest-data/tests-platform.dtsi
+++ b/drivers/of/unittest-data/tests-platform.dtsi
@@ -33,6 +33,11 @@
reg = <0x100>;
};
};
+
+ test-device@2 {
+ compatible = "test,rust-device";
+ reg = <0x2>;
+ };
};
platform-tests-2 {
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 0fa0c0fd9a6a..f88ddb1cf5d7 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -161,6 +161,15 @@ static void __init of_unittest_find_node_by_name(void)
"option alias path test, subcase #1 failed\n");
of_node_put(np);
+ np = of_find_node_opts_by_path("testcase-alias/phandle-tests/consumer-a:testaliasoption",
+ &options);
+ name = kasprintf(GFP_KERNEL, "%pOF", np);
+ unittest(np && name && !strcmp("/testcase-data/phandle-tests/consumer-a", name) &&
+ !strcmp("testaliasoption", options),
+ "option alias path test, subcase #2 failed\n");
+ of_node_put(np);
+ kfree(name);
+
np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL);
unittest(np, "NULL option alias path test failed\n");
of_node_put(np);
@@ -3680,13 +3689,7 @@ static struct device_node *overlay_base_root;
static void * __init dt_alloc_memory(u64 size, u64 align)
{
- void *ptr = memblock_alloc(size, align);
-
- if (!ptr)
- panic("%s: Failed to allocate %llu bytes align=0x%llx\n",
- __func__, size, align);
-
- return ptr;
+ return memblock_alloc_or_panic(size, align);
}
/*
diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
index 105de7c3274a..8fc6238b1728 100644
--- a/drivers/opp/debugfs.c
+++ b/drivers/opp/debugfs.c
@@ -217,7 +217,7 @@ static void opp_migrate_dentry(struct opp_device *opp_dev,
{
struct opp_device *new_dev = NULL, *iter;
const struct device *dev;
- struct dentry *dentry;
+ int err;
/* Look for next opp-dev */
list_for_each_entry(iter, &opp_table->dev_list, node)
@@ -234,16 +234,14 @@ static void opp_migrate_dentry(struct opp_device *opp_dev,
opp_set_dev_name(dev, opp_table->dentry_name);
- dentry = debugfs_rename(rootdir, opp_dev->dentry, rootdir,
- opp_table->dentry_name);
- if (IS_ERR(dentry)) {
+ err = debugfs_change_name(opp_dev->dentry, "%s", opp_table->dentry_name);
+ if (err) {
dev_err(dev, "%s: Failed to rename link from: %s to %s\n",
__func__, dev_name(opp_dev->dev), dev_name(dev));
return;
}
- new_dev->dentry = dentry;
- opp_table->dentry = dentry;
+ new_dev->dentry = opp_table->dentry = opp_dev->dentry;
}
/**
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
index 3644997a8342..24d4f3a3ec3d 100644
--- a/drivers/parport/parport_serial.c
+++ b/drivers/parport/parport_serial.c
@@ -266,10 +266,14 @@ static struct pci_device_id parport_serial_pci_tbl[] = {
{ 0x1409, 0x7168, 0x1409, 0xd079, 0, 0, timedia_9079c },
/* WCH CARDS */
- { 0x4348, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, wch_ch353_1s1p},
- { 0x4348, 0x7053, 0x4348, 0x3253, 0, 0, wch_ch353_2s1p},
- { 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382_0s1p},
- { 0x1c00, 0x3250, 0x1c00, 0x3250, 0, 0, wch_ch382_2s1p},
+ { PCI_VENDOR_ID_WCHCN, PCI_DEVICE_ID_WCHCN_CH353_1S1P,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, wch_ch353_1s1p },
+ { PCI_VENDOR_ID_WCHCN, PCI_DEVICE_ID_WCHCN_CH353_2S1P,
+ 0x4348, 0x3253, 0, 0, wch_ch353_2s1p },
+ { PCI_VENDOR_ID_WCHIC, PCI_DEVICE_ID_WCHIC_CH382_0S1P,
+ 0x1c00, 0x3050, 0, 0, wch_ch382_0s1p },
+ { PCI_VENDOR_ID_WCHIC, PCI_DEVICE_ID_WCHIC_CH382_2S1P,
+ 0x1c00, 0x3250, 0, 0, wch_ch382_2s1p },
/* BrainBoxes PX272/PX306 MIO card */
{ PCI_VENDOR_ID_INTASHIELD, 0x4100,
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 6afff1f1b143..c6b266c772c8 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -410,7 +410,7 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
if (WARN_ON(pdev->pasid_enabled))
return -EBUSY;
- if (!pdev->eetlp_prefix_path && !pdev->pasid_no_tlp)
+ if (!pdev->eetlp_prefix_max && !pdev->pasid_no_tlp)
return -EINVAL;
if (!pasid)
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 5c62e1a3ba52..33d6bf460ffe 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -635,30 +635,20 @@ static int dra7xx_pcie_unaligned_memaccess(struct device *dev)
{
int ret;
struct device_node *np = dev->of_node;
- struct of_phandle_args args;
+ unsigned int args[2];
struct regmap *regmap;
- regmap = syscon_regmap_lookup_by_phandle(np,
- "ti,syscon-unaligned-access");
+ regmap = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-unaligned-access",
+ 2, args);
if (IS_ERR(regmap)) {
dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
return -EINVAL;
}
- ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
- 2, 0, &args);
- if (ret) {
- dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
- return ret;
- }
-
- ret = regmap_update_bits(regmap, args.args[0], args.args[1],
- args.args[1]);
+ ret = regmap_update_bits(regmap, args[0], args[1], args[1]);
if (ret)
dev_err(dev, "failed to enable unaligned access\n");
- of_node_put(args.np);
-
return ret;
}
@@ -671,18 +661,13 @@ static int dra7xx_pcie_configure_two_lane(struct device *dev,
u32 mask;
u32 val;
- pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel");
+ pcie_syscon = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-lane-sel",
+ 1, &pcie_reg);
if (IS_ERR(pcie_syscon)) {
dev_err(dev, "unable to get ti,syscon-lane-sel\n");
return -EINVAL;
}
- if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1,
- &pcie_reg)) {
- dev_err(dev, "couldn't get lane selection reg offset\n");
- return -EINVAL;
- }
-
mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN;
val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN;
regmap_update_bits(pcie_syscon, pcie_reg, mask, val);
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index c8d5c90aa4d4..90ace941090f 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -33,6 +33,7 @@
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include "../../pci.h"
#include "pcie-designware.h"
#define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9)
@@ -55,6 +56,22 @@
#define IMX95_PE0_GEN_CTRL_3 0x1058
#define IMX95_PCIE_LTSSM_EN BIT(0)
+#define IMX95_PE0_LUT_ACSCTRL 0x1008
+#define IMX95_PEO_LUT_RWA BIT(16)
+#define IMX95_PE0_LUT_ENLOC GENMASK(4, 0)
+
+#define IMX95_PE0_LUT_DATA1 0x100c
+#define IMX95_PE0_LUT_VLD BIT(31)
+#define IMX95_PE0_LUT_DAC_ID GENMASK(10, 8)
+#define IMX95_PE0_LUT_STREAM_ID GENMASK(5, 0)
+
+#define IMX95_PE0_LUT_DATA2 0x1010
+#define IMX95_PE0_LUT_REQID GENMASK(31, 16)
+#define IMX95_PE0_LUT_MASK GENMASK(15, 0)
+
+#define IMX95_SID_MASK GENMASK(5, 0)
+#define IMX95_MAX_LUT 32
+
#define to_imx_pcie(x) dev_get_drvdata((x)->dev)
enum imx_pcie_variants {
@@ -70,6 +87,7 @@ enum imx_pcie_variants {
IMX8MQ_EP,
IMX8MM_EP,
IMX8MP_EP,
+ IMX8Q_EP,
IMX95_EP,
};
@@ -87,6 +105,7 @@ enum imx_pcie_variants {
* workaround suspend resume on some devices which are affected by this errata.
*/
#define IMX_PCIE_FLAG_BROKEN_SUSPEND BIT(9)
+#define IMX_PCIE_FLAG_HAS_LUT BIT(10)
#define imx_check_flag(pci, val) (pci->drvdata->flags & val)
@@ -103,6 +122,7 @@ struct imx_pcie_drvdata {
const char *gpr;
const char * const *clk_names;
const u32 clks_cnt;
+ const u32 clks_optional_cnt;
const u32 ltssm_off;
const u32 ltssm_mask;
const u32 mode_off[IMX_PCIE_MAX_INSTANCES];
@@ -111,19 +131,18 @@ struct imx_pcie_drvdata {
int (*init_phy)(struct imx_pcie *pcie);
int (*enable_ref_clk)(struct imx_pcie *pcie, bool enable);
int (*core_reset)(struct imx_pcie *pcie, bool assert);
+ const struct dw_pcie_host_ops *ops;
};
struct imx_pcie {
struct dw_pcie *pci;
struct gpio_desc *reset_gpiod;
- bool link_is_up;
struct clk_bulk_data clks[IMX_PCIE_MAX_CLKS];
struct regmap *iomuxc_gpr;
u16 msi_ctrl;
u32 controller_id;
struct reset_control *pciephy_reset;
struct reset_control *apps_reset;
- struct reset_control *turnoff_reset;
u32 tx_deemph_gen1;
u32 tx_deemph_gen2_3p5db;
u32 tx_deemph_gen2_6db;
@@ -139,6 +158,9 @@ struct imx_pcie {
struct device *pd_pcie_phy;
struct phy *phy;
const struct imx_pcie_drvdata *drvdata;
+
+ /* Ensure that only one device's LUT is configured at any given time */
+ struct mutex lock;
};
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
@@ -234,11 +256,11 @@ static void imx_pcie_configure_type(struct imx_pcie *imx_pcie)
id = imx_pcie->controller_id;
- /* If mode_mask is 0, then generic PHY driver is used to set the mode */
+ /* If mode_mask is 0, generic PHY driver is used to set the mode */
if (!drvdata->mode_mask[0])
return;
- /* If mode_mask[id] is zero, means each controller have its individual gpr */
+ /* If mode_mask[id] is 0, each controller has its individual GPR */
if (!drvdata->mode_mask[id])
id = 0;
@@ -375,14 +397,15 @@ static int pcie_phy_write(struct imx_pcie *imx_pcie, int addr, u16 data)
static int imx8mq_pcie_init_phy(struct imx_pcie *imx_pcie)
{
- /* TODO: Currently this code assumes external oscillator is being used */
+ /* TODO: This code assumes external oscillator is being used */
regmap_update_bits(imx_pcie->iomuxc_gpr,
imx_pcie_grp_offset(imx_pcie),
IMX8MQ_GPR_PCIE_REF_USE_PAD,
IMX8MQ_GPR_PCIE_REF_USE_PAD);
/*
- * Regarding the datasheet, the PCIE_VPH is suggested to be 1.8V. If the PCIE_VPH is
- * supplied by 3.3V, the VREG_BYPASS should be cleared to zero.
+ * Per the datasheet, the PCIE_VPH is suggested to be 1.8V. If the
+ * PCIE_VPH is supplied by 3.3V, the VREG_BYPASS should be cleared
+ * to zero.
*/
if (imx_pcie->vph && regulator_get_voltage(imx_pcie->vph) > 3000000)
regmap_update_bits(imx_pcie->iomuxc_gpr,
@@ -393,13 +416,6 @@ static int imx8mq_pcie_init_phy(struct imx_pcie *imx_pcie)
return 0;
}
-static int imx7d_pcie_init_phy(struct imx_pcie *imx_pcie)
-{
- regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
-
- return 0;
-}
-
static int imx_pcie_init_phy(struct imx_pcie *imx_pcie)
{
regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
@@ -576,7 +592,7 @@ static int imx_pcie_attach_pd(struct device *dev)
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!link) {
- dev_err(dev, "Failed to add device_link to pcie pd.\n");
+ dev_err(dev, "Failed to add device_link to pcie pd\n");
return -EINVAL;
}
@@ -589,7 +605,7 @@ static int imx_pcie_attach_pd(struct device *dev)
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!link) {
- dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
+ dev_err(dev, "Failed to add device_link to pcie_phy pd\n");
return -EINVAL;
}
@@ -598,10 +614,9 @@ static int imx_pcie_attach_pd(struct device *dev)
static int imx6sx_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
- if (enable)
- regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
-
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
+ enable ? 0 : IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
return 0;
}
@@ -611,10 +626,10 @@ static int imx6q_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
/* power up core phy and enable ref clock */
regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD);
/*
- * the async reset input need ref clock to sync internally,
+ * The async reset input need ref clock to sync internally,
* when the ref clock comes after reset, internal synced
* reset time is too short, cannot meet the requirement.
- * add one ~10us delay here.
+ * Add a ~10us delay here.
*/
usleep_range(10, 100);
regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN);
@@ -630,19 +645,20 @@ static int imx8mm_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
int offset = imx_pcie_grp_offset(imx_pcie);
- if (enable) {
- regmap_clear_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE);
- regmap_set_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
- }
-
+ regmap_update_bits(imx_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
+ enable ? 0 : IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
+ enable ? IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN : 0);
return 0;
}
static int imx7d_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
- if (!enable)
- regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
+ enable ? 0 : IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
return 0;
}
@@ -775,6 +791,7 @@ static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie)
static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie)
{
reset_control_deassert(imx_pcie->pciephy_reset);
+ reset_control_deassert(imx_pcie->apps_reset);
if (imx_pcie->drvdata->core_reset)
imx_pcie->drvdata->core_reset(imx_pcie, false);
@@ -884,6 +901,7 @@ static int imx_pcie_start_link(struct dw_pcie *pci)
if (imx_pcie->drvdata->flags &
IMX_PCIE_FLAG_IMX_SPEED_CHANGE) {
+
/*
* On i.MX7, DIRECT_SPEED_CHANGE behaves differently
* from i.MX6 family when no link speed transition
@@ -892,7 +910,6 @@ static int imx_pcie_start_link(struct dw_pcie *pci)
* which will cause the following code to report false
* failure.
*/
-
ret = imx_pcie_wait_for_speed_change(imx_pcie);
if (ret) {
dev_err(dev, "Failed to bring link up!\n");
@@ -908,13 +925,11 @@ static int imx_pcie_start_link(struct dw_pcie *pci)
dev_info(dev, "Link: Only Gen1 is enabled\n");
}
- imx_pcie->link_is_up = true;
tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
return 0;
err_reset_phy:
- imx_pcie->link_is_up = false;
dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
@@ -930,6 +945,184 @@ static void imx_pcie_stop_link(struct dw_pcie *pci)
imx_pcie_ltssm_disable(dev);
}
+static int imx_pcie_add_lut(struct imx_pcie *imx_pcie, u16 rid, u8 sid)
+{
+ struct dw_pcie *pci = imx_pcie->pci;
+ struct device *dev = pci->dev;
+ u32 data1, data2;
+ int free = -1;
+ int i;
+
+ if (sid >= 64) {
+ dev_err(dev, "Invalid SID for index %d\n", sid);
+ return -EINVAL;
+ }
+
+ guard(mutex)(&imx_pcie->lock);
+
+ /*
+ * Iterate through all LUT entries to check for duplicate RID and
+ * identify the first available entry. Configure this available entry
+ * immediately after verification to avoid rescanning it.
+ */
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, &data1);
+
+ if (!(data1 & IMX95_PE0_LUT_VLD)) {
+ if (free < 0)
+ free = i;
+ continue;
+ }
+
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2);
+
+ /* Do not add duplicate RID */
+ if (rid == FIELD_GET(IMX95_PE0_LUT_REQID, data2)) {
+ dev_warn(dev, "Existing LUT entry available for RID (%d)", rid);
+ return 0;
+ }
+ }
+
+ if (free < 0) {
+ dev_err(dev, "LUT entry is not available\n");
+ return -ENOSPC;
+ }
+
+ data1 = FIELD_PREP(IMX95_PE0_LUT_DAC_ID, 0);
+ data1 |= FIELD_PREP(IMX95_PE0_LUT_STREAM_ID, sid);
+ data1 |= IMX95_PE0_LUT_VLD;
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, data1);
+
+ data2 = IMX95_PE0_LUT_MASK; /* Match all bits of RID */
+ data2 |= FIELD_PREP(IMX95_PE0_LUT_REQID, rid);
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, data2);
+
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, free);
+
+ return 0;
+}
+
+static void imx_pcie_remove_lut(struct imx_pcie *imx_pcie, u16 rid)
+{
+ u32 data2;
+ int i;
+
+ guard(mutex)(&imx_pcie->lock);
+
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2);
+ if (FIELD_GET(IMX95_PE0_LUT_REQID, data2) == rid) {
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_DATA1, 0);
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_DATA2, 0);
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_ACSCTRL, i);
+
+ break;
+ }
+ }
+}
+
+static int imx_pcie_enable_device(struct pci_host_bridge *bridge,
+ struct pci_dev *pdev)
+{
+ struct imx_pcie *imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata));
+ u32 sid_i, sid_m, rid = pci_dev_id(pdev);
+ struct device_node *target;
+ struct device *dev;
+ int err_i, err_m;
+ u32 sid = 0;
+
+ dev = imx_pcie->pci->dev;
+
+ target = NULL;
+ err_i = of_map_id(dev->of_node, rid, "iommu-map", "iommu-map-mask",
+ &target, &sid_i);
+ if (target) {
+ of_node_put(target);
+ } else {
+ /*
+ * "target == NULL && err_i == 0" means RID out of map range.
+ * Use 1:1 map RID to streamID. Hardware can't support this
+ * because the streamID is only 6 bits
+ */
+ err_i = -EINVAL;
+ }
+
+ target = NULL;
+ err_m = of_map_id(dev->of_node, rid, "msi-map", "msi-map-mask",
+ &target, &sid_m);
+
+ /*
+ * err_m target
+ * 0 NULL RID out of range. Use 1:1 map RID to
+ * streamID, Current hardware can't
+ * support it, so return -EINVAL.
+ * != 0 NULL msi-map does not exist, use built-in MSI
+ * 0 != NULL Get correct streamID from RID
+ * != 0 != NULL Invalid combination
+ */
+ if (!err_m && !target)
+ return -EINVAL;
+ else if (target)
+ of_node_put(target); /* Find streamID map entry for RID in msi-map */
+
+ /*
+ * msi-map iommu-map
+ * N N DWC MSI Ctrl
+ * Y Y ITS + SMMU, require the same SID
+ * Y N ITS
+ * N Y DWC MSI Ctrl + SMMU
+ */
+ if (err_i && err_m)
+ return 0;
+
+ if (!err_i && !err_m) {
+ /*
+ * Glue Layer
+ * <==========>
+ * ┌─────┐ ┌──────────┐
+ * │ LUT │ 6-bit streamID │ │
+ * │ │─────────────────►│ MSI │
+ * └─────┘ 2-bit ctrl ID │ │
+ * ┌───────────►│ │
+ * (i.MX95) │ │ │
+ * 00 PCIe0 │ │ │
+ * 01 ENETC │ │ │
+ * 10 PCIe1 │ │ │
+ * │ └──────────┘
+ * The MSI glue layer auto adds 2 bits controller ID ahead of
+ * streamID, so mask these 2 bits to get streamID. The
+ * IOMMU glue layer doesn't do that.
+ */
+ if (sid_i != (sid_m & IMX95_SID_MASK)) {
+ dev_err(dev, "iommu-map and msi-map entries mismatch!\n");
+ return -EINVAL;
+ }
+ }
+
+ if (!err_i)
+ sid = sid_i;
+ else if (!err_m)
+ sid = sid_m & IMX95_SID_MASK;
+
+ return imx_pcie_add_lut(imx_pcie, rid, sid);
+}
+
+static void imx_pcie_disable_device(struct pci_host_bridge *bridge,
+ struct pci_dev *pdev)
+{
+ struct imx_pcie *imx_pcie;
+
+ imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata));
+ imx_pcie_remove_lut(imx_pcie, pci_dev_id(pdev));
+}
+
static int imx_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -946,6 +1139,11 @@ static int imx_pcie_host_init(struct dw_pcie_rp *pp)
}
}
+ if (pp->bridge && imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT)) {
+ pp->bridge->enable_device = imx_pcie_enable_device;
+ pp->bridge->disable_device = imx_pcie_disable_device;
+ }
+
imx_pcie_assert_core_reset(imx_pcie);
if (imx_pcie->drvdata->init_phy)
@@ -966,7 +1164,9 @@ static int imx_pcie_host_init(struct dw_pcie_rp *pp)
goto err_clk_disable;
}
- ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
+ ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE,
+ imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE ?
+ PHY_MODE_PCIE_EP : PHY_MODE_PCIE_RC);
if (ret) {
dev_err(dev, "unable to set PCIe PHY mode\n");
goto err_phy_exit;
@@ -1033,9 +1233,31 @@ static u64 imx_pcie_cpu_addr_fixup(struct dw_pcie *pcie, u64 cpu_addr)
return cpu_addr - entry->offset;
}
+/*
+ * In old DWC implementations, PCIE_ATU_INHIBIT_PAYLOAD in iATU Ctrl2
+ * register is reserved, so the generic DWC implementation of sending the
+ * PME_Turn_Off message using a dummy MMIO write cannot be used.
+ */
+static void imx_pcie_pme_turn_off(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
+
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF);
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF);
+
+ usleep_range(PCIE_PME_TO_L2_TIMEOUT_US/10, PCIE_PME_TO_L2_TIMEOUT_US);
+}
+
static const struct dw_pcie_host_ops imx_pcie_host_ops = {
.init = imx_pcie_host_init,
.deinit = imx_pcie_host_exit,
+ .pme_turn_off = imx_pcie_pme_turn_off,
+};
+
+static const struct dw_pcie_host_ops imx_pcie_host_dw_pme_ops = {
+ .init = imx_pcie_host_init,
+ .deinit = imx_pcie_host_exit,
};
static const struct dw_pcie_ops dw_pcie_ops = {
@@ -1082,16 +1304,27 @@ static const struct pci_epc_features imx8m_pcie_epc_features = {
.align = SZ_64K,
};
+static const struct pci_epc_features imx8q_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
+ .align = SZ_64K,
+};
+
/*
- * BAR# | Default BAR enable | Default BAR Type | Default BAR Size | BAR Sizing Scheme
- * ================================================================================================
- * BAR0 | Enable | 64-bit | 1 MB | Programmable Size
- * BAR1 | Disable | 32-bit | 64 KB | Fixed Size
- * BAR1 should be disabled if BAR0 is 64bit.
- * BAR2 | Enable | 32-bit | 1 MB | Programmable Size
- * BAR3 | Enable | 32-bit | 64 KB | Programmable Size
- * BAR4 | Enable | 32-bit | 1M | Programmable Size
- * BAR5 | Enable | 32-bit | 64 KB | Programmable Size
+ * | Default | Default | Default | BAR Sizing
+ * BAR# | Enable? | Type | Size | Scheme
+ * =======================================================
+ * BAR0 | Enable | 64-bit | 1 MB | Programmable Size
+ * BAR1 | Disable | 32-bit | 64 KB | Fixed Size
+ * (BAR1 should be disabled if BAR0 is 64-bit)
+ * BAR2 | Enable | 32-bit | 1 MB | Programmable Size
+ * BAR3 | Enable | 32-bit | 64 KB | Programmable Size
+ * BAR4 | Enable | 32-bit | 1 MB | Programmable Size
+ * BAR5 | Enable | 32-bit | 64 KB | Programmable Size
*/
static const struct pci_epc_features imx95_pcie_epc_features = {
.msi_capable = true,
@@ -1118,7 +1351,6 @@ static int imx_add_pcie_ep(struct imx_pcie *imx_pcie,
struct platform_device *pdev)
{
int ret;
- unsigned int pcie_dbi2_offset;
struct dw_pcie_ep *ep;
struct dw_pcie *pci = imx_pcie->pci;
struct dw_pcie_rp *pp = &pci->pp;
@@ -1128,28 +1360,6 @@ static int imx_add_pcie_ep(struct imx_pcie *imx_pcie,
ep = &pci->ep;
ep->ops = &pcie_ep_ops;
- switch (imx_pcie->drvdata->variant) {
- case IMX8MQ_EP:
- case IMX8MM_EP:
- case IMX8MP_EP:
- pcie_dbi2_offset = SZ_1M;
- break;
- default:
- pcie_dbi2_offset = SZ_4K;
- break;
- }
-
- pci->dbi_base2 = pci->dbi_base + pcie_dbi2_offset;
-
- /*
- * FIXME: Ideally, dbi2 base address should come from DT. But since only IMX95 is defining
- * "dbi2" in DT, "dbi_base2" is set to NULL here for that platform alone so that the DWC
- * core code can fetch that from DT. But once all platform DTs were fixed, this and the
- * above "dbi_base2" setting should be removed.
- */
- if (device_property_match_string(dev, "reg-names", "dbi2") >= 0)
- pci->dbi_base2 = NULL;
-
if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_SUPPORT_64BIT))
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
@@ -1176,43 +1386,6 @@ static int imx_add_pcie_ep(struct imx_pcie *imx_pcie,
return 0;
}
-static void imx_pcie_pm_turnoff(struct imx_pcie *imx_pcie)
-{
- struct device *dev = imx_pcie->pci->dev;
-
- /* Some variants have a turnoff reset in DT */
- if (imx_pcie->turnoff_reset) {
- reset_control_assert(imx_pcie->turnoff_reset);
- reset_control_deassert(imx_pcie->turnoff_reset);
- goto pm_turnoff_sleep;
- }
-
- /* Others poke directly at IOMUXC registers */
- switch (imx_pcie->drvdata->variant) {
- case IMX6SX:
- case IMX6QP:
- regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_PM_TURN_OFF,
- IMX6SX_GPR12_PCIE_PM_TURN_OFF);
- regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
- break;
- default:
- dev_err(dev, "PME_Turn_Off not implemented\n");
- return;
- }
-
- /*
- * Components with an upstream port must respond to
- * PME_Turn_Off with PME_TO_Ack but we can't check.
- *
- * The standard recommends a 1-10ms timeout after which to
- * proceed anyway as if acks were received.
- */
-pm_turnoff_sleep:
- usleep_range(1000, 10000);
-}
-
static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save)
{
u8 offset;
@@ -1236,7 +1409,6 @@ static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save)
static int imx_pcie_suspend_noirq(struct device *dev)
{
struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
- struct dw_pcie_rp *pp = &imx_pcie->pci->pp;
if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
@@ -1251,9 +1423,7 @@ static int imx_pcie_suspend_noirq(struct device *dev)
imx_pcie_assert_core_reset(imx_pcie);
imx_pcie->drvdata->enable_ref_clk(imx_pcie, false);
} else {
- imx_pcie_pm_turnoff(imx_pcie);
- imx_pcie_stop_link(imx_pcie->pci);
- imx_pcie_host_exit(pp);
+ return dw_pcie_suspend_noirq(imx_pcie->pci);
}
return 0;
@@ -1263,7 +1433,6 @@ static int imx_pcie_resume_noirq(struct device *dev)
{
int ret;
struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
- struct dw_pcie_rp *pp = &imx_pcie->pci->pp;
if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
@@ -1275,6 +1444,7 @@ static int imx_pcie_resume_noirq(struct device *dev)
ret = imx_pcie_deassert_core_reset(imx_pcie);
if (ret)
return ret;
+
/*
* Using PCIE_TEST_PD seems to disable MSI and powers down the
* root complex. This is why we have to setup the rc again and
@@ -1283,17 +1453,12 @@ static int imx_pcie_resume_noirq(struct device *dev)
ret = dw_pcie_setup_rc(&imx_pcie->pci->pp);
if (ret)
return ret;
- imx_pcie_msi_save_restore(imx_pcie, false);
} else {
- ret = imx_pcie_host_init(pp);
+ ret = dw_pcie_resume_noirq(imx_pcie->pci);
if (ret)
return ret;
- imx_pcie_msi_save_restore(imx_pcie, false);
- dw_pcie_setup_rc(pp);
-
- if (imx_pcie->link_is_up)
- imx_pcie_start_link(imx_pcie->pci);
}
+ imx_pcie_msi_save_restore(imx_pcie, false);
return 0;
}
@@ -1311,9 +1476,8 @@ static int imx_pcie_probe(struct platform_device *pdev)
struct device_node *np;
struct resource *dbi_base;
struct device_node *node = dev->of_node;
- int ret;
+ int i, ret, req_cnt;
u16 val;
- int i;
imx_pcie = devm_kzalloc(dev, sizeof(*imx_pcie), GFP_KERNEL);
if (!imx_pcie)
@@ -1325,11 +1489,17 @@ static int imx_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
- pci->pp.ops = &imx_pcie_host_ops;
imx_pcie->pci = pci;
imx_pcie->drvdata = of_device_get_match_data(dev);
+ mutex_init(&imx_pcie->lock);
+
+ if (imx_pcie->drvdata->ops)
+ pci->pp.ops = imx_pcie->drvdata->ops;
+ else
+ pci->pp.ops = &imx_pcie_host_dw_pme_ops;
+
/* Find the PHY if one is defined, only imx7d uses it */
np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
if (np) {
@@ -1363,9 +1533,13 @@ static int imx_pcie_probe(struct platform_device *pdev)
imx_pcie->clks[i].id = imx_pcie->drvdata->clk_names[i];
/* Fetch clocks */
- ret = devm_clk_bulk_get(dev, imx_pcie->drvdata->clks_cnt, imx_pcie->clks);
+ req_cnt = imx_pcie->drvdata->clks_cnt - imx_pcie->drvdata->clks_optional_cnt;
+ ret = devm_clk_bulk_get(dev, req_cnt, imx_pcie->clks);
if (ret)
return ret;
+ imx_pcie->clks[req_cnt].clk = devm_clk_get_optional(dev, "ref");
+ if (IS_ERR(imx_pcie->clks[req_cnt].clk))
+ return PTR_ERR(imx_pcie->clks[req_cnt].clk);
if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHYDRV)) {
imx_pcie->phy = devm_phy_get(dev, "pcie-phy");
@@ -1391,7 +1565,6 @@ static int imx_pcie_probe(struct platform_device *pdev)
switch (imx_pcie->drvdata->variant) {
case IMX8MQ:
case IMX8MQ_EP:
- case IMX7D:
if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
imx_pcie->controller_id = 1;
break;
@@ -1399,13 +1572,6 @@ static int imx_pcie_probe(struct platform_device *pdev)
break;
}
- /* Grab turnoff reset */
- imx_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
- if (IS_ERR(imx_pcie->turnoff_reset)) {
- dev_err(dev, "Failed to get TURNOFF reset control\n");
- return PTR_ERR(imx_pcie->turnoff_reset);
- }
-
if (imx_pcie->drvdata->gpr) {
/* Grab GPR config register range */
imx_pcie->iomuxc_gpr =
@@ -1484,6 +1650,7 @@ static int imx_pcie_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
} else {
+ pci->pp.use_atu_msg = true;
ret = dw_pcie_host_init(&pci->pp);
if (ret < 0)
return ret;
@@ -1513,6 +1680,7 @@ static const char * const imx8mm_clks[] = {"pcie_bus", "pcie", "pcie_aux"};
static const char * const imx8mq_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux"};
static const char * const imx6sx_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_inbound_axi"};
static const char * const imx8q_clks[] = {"mstr", "slv", "dbi"};
+static const char * const imx95_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux", "ref"};
static const struct imx_pcie_drvdata drvdata[] = {
[IMX6Q] = {
@@ -1548,6 +1716,7 @@ static const struct imx_pcie_drvdata drvdata[] = {
.init_phy = imx6sx_pcie_init_phy,
.enable_ref_clk = imx6sx_pcie_enable_ref_clk,
.core_reset = imx6sx_pcie_core_reset,
+ .ops = &imx_pcie_host_ops,
},
[IMX6QP] = {
.variant = IMX6QP,
@@ -1565,6 +1734,7 @@ static const struct imx_pcie_drvdata drvdata[] = {
.init_phy = imx_pcie_init_phy,
.enable_ref_clk = imx6q_pcie_enable_ref_clk,
.core_reset = imx6qp_pcie_core_reset,
+ .ops = &imx_pcie_host_ops,
},
[IMX7D] = {
.variant = IMX7D,
@@ -1576,14 +1746,14 @@ static const struct imx_pcie_drvdata drvdata[] = {
.clks_cnt = ARRAY_SIZE(imx6q_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
- .init_phy = imx7d_pcie_init_phy,
.enable_ref_clk = imx7d_pcie_enable_ref_clk,
.core_reset = imx7d_pcie_core_reset,
},
[IMX8MQ] = {
.variant = IMX8MQ,
.flags = IMX_PCIE_FLAG_HAS_APP_RESET |
- IMX_PCIE_FLAG_HAS_PHY_RESET,
+ IMX_PCIE_FLAG_HAS_PHY_RESET |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx8mq-iomuxc-gpr",
.clk_names = imx8mq_clks,
.clks_cnt = ARRAY_SIZE(imx8mq_clks),
@@ -1621,15 +1791,19 @@ static const struct imx_pcie_drvdata drvdata[] = {
[IMX8Q] = {
.variant = IMX8Q,
.flags = IMX_PCIE_FLAG_HAS_PHYDRV |
- IMX_PCIE_FLAG_CPU_ADDR_FIXUP,
+ IMX_PCIE_FLAG_CPU_ADDR_FIXUP |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.clk_names = imx8q_clks,
.clks_cnt = ARRAY_SIZE(imx8q_clks),
},
[IMX95] = {
.variant = IMX95,
- .flags = IMX_PCIE_FLAG_HAS_SERDES,
- .clk_names = imx8mq_clks,
- .clks_cnt = ARRAY_SIZE(imx8mq_clks),
+ .flags = IMX_PCIE_FLAG_HAS_SERDES |
+ IMX_PCIE_FLAG_HAS_LUT |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .clk_names = imx95_clks,
+ .clks_cnt = ARRAY_SIZE(imx95_clks),
+ .clks_optional_cnt = 1,
.ltssm_off = IMX95_PE0_GEN_CTRL_3,
.ltssm_mask = IMX95_PCIE_LTSSM_EN,
.mode_off[0] = IMX95_PE0_GEN_CTRL_1,
@@ -1678,6 +1852,14 @@ static const struct imx_pcie_drvdata drvdata[] = {
.epc_features = &imx8m_pcie_epc_features,
.enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
+ [IMX8Q_EP] = {
+ .variant = IMX8Q_EP,
+ .flags = IMX_PCIE_FLAG_HAS_PHYDRV,
+ .mode = DW_PCIE_EP_TYPE,
+ .epc_features = &imx8q_pcie_epc_features,
+ .clk_names = imx8q_clks,
+ .clks_cnt = ARRAY_SIZE(imx8q_clks),
+ },
[IMX95_EP] = {
.variant = IMX95_EP,
.flags = IMX_PCIE_FLAG_HAS_SERDES |
@@ -1707,6 +1889,7 @@ static const struct of_device_id imx_pcie_of_match[] = {
{ .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], },
{ .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], },
{ .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], },
+ { .compatible = "fsl,imx8q-pcie-ep", .data = &drvdata[IMX8Q_EP], },
{ .compatible = "fsl,imx95-pcie-ep", .data = &drvdata[IMX95_EP], },
{},
};
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index ee6f52568133..239a05b36e8e 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -329,7 +329,6 @@ static int ls_pcie_probe(struct platform_device *pdev)
struct ls_pcie *pcie;
struct resource *dbi_base;
u32 index[2];
- int ret;
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@@ -355,16 +354,15 @@ static int ls_pcie_probe(struct platform_device *pdev)
pcie->pf_lut_base = pci->dbi_base + pcie->drvdata->pf_lut_off;
if (pcie->drvdata->scfg_support) {
- pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,pcie-scfg");
+ pcie->scfg =
+ syscon_regmap_lookup_by_phandle_args(dev->of_node,
+ "fsl,pcie-scfg", 2,
+ index);
if (IS_ERR(pcie->scfg)) {
dev_err(dev, "No syscfg phandle specified\n");
return PTR_ERR(pcie->scfg);
}
- ret = of_property_read_u32_array(dev->of_node, "fsl,pcie-scfg", index, 2);
- if (ret)
- return ret;
-
pcie->index = index[1];
}
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index f8e7283dacd4..234c8cbcae3a 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -369,9 +369,22 @@ static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
+static const struct pci_epc_features artpec6_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+};
+
+static const struct pci_epc_features *
+artpec6_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &artpec6_pcie_epc_features;
+}
+
static const struct dw_pcie_ep_ops pcie_ep_ops = {
.init = artpec6_pcie_ep_init,
.raise_irq = artpec6_pcie_raise_irq,
+ .get_features = artpec6_pcie_get_features,
};
static int artpec6_pcie_probe(struct platform_device *pdev)
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index f3ac7d46a855..8e07d432e74f 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -128,7 +128,8 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
}
static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
- dma_addr_t cpu_addr, enum pci_barno bar)
+ dma_addr_t cpu_addr, enum pci_barno bar,
+ size_t size)
{
int ret;
u32 free_win;
@@ -145,7 +146,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
}
ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type,
- cpu_addr, bar);
+ cpu_addr, bar, size);
if (ret < 0) {
dev_err(pci->dev, "Failed to program IB window\n");
return ret;
@@ -222,19 +223,30 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (bar & 1))
return -EINVAL;
- reg = PCI_BASE_ADDRESS_0 + (4 * bar);
-
- if (!(flags & PCI_BASE_ADDRESS_SPACE))
- type = PCIE_ATU_TYPE_MEM;
- else
- type = PCIE_ATU_TYPE_IO;
+ /*
+ * Certain EPF drivers dynamically change the physical address of a BAR
+ * (i.e. they call set_bar() twice, without ever calling clear_bar(), as
+ * calling clear_bar() would clear the BAR's PCI address assigned by the
+ * host).
+ */
+ if (ep->epf_bar[bar]) {
+ /*
+ * We can only dynamically change a BAR if the new BAR size and
+ * BAR flags do not differ from the existing configuration.
+ */
+ if (ep->epf_bar[bar]->barno != bar ||
+ ep->epf_bar[bar]->size != size ||
+ ep->epf_bar[bar]->flags != flags)
+ return -EINVAL;
- ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar);
- if (ret)
- return ret;
+ /*
+ * When dynamically changing a BAR, skip writing the BAR reg, as
+ * that would clear the BAR's PCI address assigned by the host.
+ */
+ goto config_atu;
+ }
- if (ep->epf_bar[bar])
- return 0;
+ reg = PCI_BASE_ADDRESS_0 + (4 * bar);
dw_pcie_dbi_ro_wr_en(pci);
@@ -246,9 +258,21 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
}
- ep->epf_bar[bar] = epf_bar;
dw_pcie_dbi_ro_wr_dis(pci);
+config_atu:
+ if (!(flags & PCI_BASE_ADDRESS_SPACE))
+ type = PCIE_ATU_TYPE_MEM;
+ else
+ type = PCIE_ATU_TYPE_IO;
+
+ ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar,
+ size);
+ if (ret)
+ return ret;
+
+ ep->epf_bar[bar] = epf_bar;
+
return 0;
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index d2291c3ceb8b..ffaded8f2df7 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -436,18 +436,18 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
return ret;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
- if (res) {
- pp->cfg0_size = resource_size(res);
- pp->cfg0_base = res->start;
-
- pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pp->va_cfg0_base))
- return PTR_ERR(pp->va_cfg0_base);
- } else {
- dev_err(dev, "Missing *config* reg space\n");
+ if (!res) {
+ dev_err(dev, "Missing \"config\" reg space\n");
return -ENODEV;
}
+ pp->cfg0_size = resource_size(res);
+ pp->cfg0_base = res->start;
+
+ pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pp->va_cfg0_base))
+ return PTR_ERR(pp->va_cfg0_base);
+
bridge = devm_pci_alloc_host_bridge(dev, 0);
if (!bridge)
return -ENOMEM;
@@ -530,8 +530,14 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
goto err_remove_edma;
}
- /* Ignore errors, the link may come up later */
- dw_pcie_wait_for_link(pci);
+ /*
+ * Note: Skip the link up delay only when a Link Up IRQ is present.
+ * If there is no Link Up IRQ, we should not bypass the delay
+ * because that would require users to manually rescan for devices.
+ */
+ if (!pp->use_linkup_irq)
+ /* Ignore errors, the link may come up later */
+ dw_pcie_wait_for_link(pci);
bridge->sysdata = pp;
@@ -918,7 +924,7 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci)
{
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u32 val;
- int ret = 0;
+ int ret;
/*
* If L1SS is supported, then do not put the link into L2 as some
@@ -927,25 +933,33 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci)
if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1)
return 0;
- if (dw_pcie_get_ltssm(pci) <= DW_PCIE_LTSSM_DETECT_ACT)
- return 0;
-
- if (pci->pp.ops->pme_turn_off)
+ if (pci->pp.ops->pme_turn_off) {
pci->pp.ops->pme_turn_off(&pci->pp);
- else
+ } else {
ret = dw_pcie_pme_turn_off(pci);
+ if (ret)
+ return ret;
+ }
- if (ret)
- return ret;
-
- ret = read_poll_timeout(dw_pcie_get_ltssm, val, val == DW_PCIE_LTSSM_L2_IDLE,
+ ret = read_poll_timeout(dw_pcie_get_ltssm, val,
+ val == DW_PCIE_LTSSM_L2_IDLE ||
+ val <= DW_PCIE_LTSSM_DETECT_WAIT,
PCIE_PME_TO_L2_TIMEOUT_US/10,
PCIE_PME_TO_L2_TIMEOUT_US, false, pci);
if (ret) {
+ /* Only log message when LTSSM isn't in DETECT or POLL */
dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val);
return ret;
}
+ /*
+ * Per PCIe r6.0, sec 5.3.3.2.1, software should wait at least
+ * 100ns after L2/L3 Ready before turning off refclock and
+ * main power. This is harmless when no endpoint is connected.
+ */
+ udelay(1);
+
+ dw_pcie_stop_link(pci);
if (pci->pp.ops->deinit)
pci->pp.ops->deinit(&pci->pp);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 6d6cbc8b5b2c..145e7f579072 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -597,11 +597,12 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
}
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u8 bar)
+ int type, u64 cpu_addr, u8 bar, size_t size)
{
u32 retries, val;
- if (!IS_ALIGNED(cpu_addr, pci->region_align))
+ if (!IS_ALIGNED(cpu_addr, pci->region_align) ||
+ !IS_ALIGNED(cpu_addr, size))
return -EINVAL;
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
@@ -970,7 +971,7 @@ static int dw_pcie_edma_irq_verify(struct dw_pcie *pci)
{
struct platform_device *pdev = to_platform_device(pci->dev);
u16 ch_cnt = pci->edma.ll_wr_cnt + pci->edma.ll_rd_cnt;
- char name[6];
+ char name[15];
int ret;
if (pci->edma.nr_irqs == 1)
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 347ab74ac35a..501d9ddfea16 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -330,6 +330,7 @@ enum dw_pcie_ltssm {
/* Need to align with PCIE_PORT_DEBUG0 bits 0:5 */
DW_PCIE_LTSSM_DETECT_QUIET = 0x0,
DW_PCIE_LTSSM_DETECT_ACT = 0x1,
+ DW_PCIE_LTSSM_DETECT_WAIT = 0x6,
DW_PCIE_LTSSM_L0 = 0x11,
DW_PCIE_LTSSM_L2_IDLE = 0x15,
@@ -379,6 +380,7 @@ struct dw_pcie_rp {
bool use_atu_msg;
int msg_atu_index;
struct resource *msg_res;
+ bool use_linkup_irq;
};
struct dw_pcie_ep_ops {
@@ -491,16 +493,13 @@ int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
u64 cpu_addr, u64 pci_addr, u64 size);
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u8 bar);
+ int type, u64 cpu_addr, u8 bar, size_t size);
void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
void dw_pcie_setup(struct dw_pcie *pci);
void dw_pcie_iatu_detect(struct dw_pcie *pci);
int dw_pcie_edma_detect(struct dw_pcie *pci);
void dw_pcie_edma_remove(struct dw_pcie *pci);
-int dw_pcie_suspend_noirq(struct dw_pcie *pci);
-int dw_pcie_resume_noirq(struct dw_pcie *pci);
-
static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
{
dw_pcie_write_dbi(pci, reg, 0x4, val);
@@ -678,6 +677,8 @@ static inline enum dw_pcie_ltssm dw_pcie_get_ltssm(struct dw_pcie *pci)
}
#ifdef CONFIG_PCIE_DW_HOST
+int dw_pcie_suspend_noirq(struct dw_pcie *pci);
+int dw_pcie_resume_noirq(struct dw_pcie *pci);
irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp);
int dw_pcie_setup_rc(struct dw_pcie_rp *pp);
int dw_pcie_host_init(struct dw_pcie_rp *pp);
@@ -686,6 +687,16 @@ int dw_pcie_allocate_domains(struct dw_pcie_rp *pp);
void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
#else
+static inline int dw_pcie_suspend_noirq(struct dw_pcie *pci)
+{
+ return 0;
+}
+
+static inline int dw_pcie_resume_noirq(struct dw_pcie *pci)
+{
+ return 0;
+}
+
static inline irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
{
return IRQ_NONE;
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index 1170e1107508..93698abff4d9 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -389,6 +389,34 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.stop_link = rockchip_pcie_stop_link,
};
+static irqreturn_t rockchip_pcie_rc_sys_irq_thread(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct dw_pcie *pci = &rockchip->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = pci->dev;
+ u32 reg, val;
+
+ reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
+ rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
+
+ dev_dbg(dev, "PCIE_CLIENT_INTR_STATUS_MISC: %#x\n", reg);
+ dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm(rockchip));
+
+ if (reg & PCIE_RDLH_LINK_UP_CHGED) {
+ val = rockchip_pcie_get_ltssm(rockchip);
+ if ((val & PCIE_LINKUP) == PCIE_LINKUP) {
+ dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
+ /* Rescan the bus to enumerate endpoint devices */
+ pci_lock_rescan_remove();
+ pci_rescan_bus(pp->bridge->bus);
+ pci_unlock_rescan_remove();
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
{
struct rockchip_pcie *rockchip = arg;
@@ -418,14 +446,29 @@ static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
return IRQ_HANDLED;
}
-static int rockchip_pcie_configure_rc(struct rockchip_pcie *rockchip)
+static int rockchip_pcie_configure_rc(struct platform_device *pdev,
+ struct rockchip_pcie *rockchip)
{
+ struct device *dev = &pdev->dev;
struct dw_pcie_rp *pp;
+ int irq, ret;
u32 val;
if (!IS_ENABLED(CONFIG_PCIE_ROCKCHIP_DW_HOST))
return -ENODEV;
+ irq = platform_get_irq_byname(pdev, "sys");
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ rockchip_pcie_rc_sys_irq_thread,
+ IRQF_ONESHOT, "pcie-sys-rc", rockchip);
+ if (ret) {
+ dev_err(dev, "failed to request PCIe sys IRQ\n");
+ return ret;
+ }
+
/* LTSSM enable control mode */
val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
@@ -435,8 +478,19 @@ static int rockchip_pcie_configure_rc(struct rockchip_pcie *rockchip)
pp = &rockchip->pci.pp;
pp->ops = &rockchip_pcie_host_ops;
+ pp->use_linkup_irq = true;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ /* unmask DLL up/down indicator */
+ val = HIWORD_UPDATE(PCIE_RDLH_LINK_UP_CHGED, 0);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_INTR_MASK_MISC);
- return dw_pcie_host_init(pp);
+ return ret;
}
static int rockchip_pcie_configure_ep(struct platform_device *pdev,
@@ -450,14 +504,12 @@ static int rockchip_pcie_configure_ep(struct platform_device *pdev,
return -ENODEV;
irq = platform_get_irq_byname(pdev, "sys");
- if (irq < 0) {
- dev_err(dev, "missing sys IRQ resource\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_threaded_irq(dev, irq, NULL,
rockchip_pcie_ep_sys_irq_thread,
- IRQF_ONESHOT, "pcie-sys", rockchip);
+ IRQF_ONESHOT, "pcie-sys-ep", rockchip);
if (ret) {
dev_err(dev, "failed to request PCIe sys IRQ\n");
return ret;
@@ -491,7 +543,8 @@ static int rockchip_pcie_configure_ep(struct platform_device *pdev,
pci_epc_init_notify(rockchip->pci.ep.epc);
/* unmask DLL up/down indicator and hot reset/link-down reset */
- rockchip_pcie_writel_apb(rockchip, 0x60000, PCIE_CLIENT_INTR_MASK_MISC);
+ val = HIWORD_UPDATE(PCIE_RDLH_LINK_UP_CHGED | PCIE_LINK_REQ_RST_NOT_INT, 0);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_INTR_MASK_MISC);
return ret;
}
@@ -553,7 +606,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
switch (data->mode) {
case DW_PCIE_RC_TYPE:
- ret = rockchip_pcie_configure_rc(rockchip);
+ ret = rockchip_pcie_configure_rc(pdev, rockchip);
if (ret)
goto deinit_clk;
break;
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index dc102d8bd58c..e4d3366ead1f 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1569,6 +1569,8 @@ static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data)
pci_lock_rescan_remove();
pci_rescan_bus(pp->bridge->bus);
pci_unlock_rescan_remove();
+
+ qcom_pcie_icc_opp_update(pcie);
} else {
dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n",
status);
@@ -1703,6 +1705,10 @@ static int qcom_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
+ irq = platform_get_irq_byname_optional(pdev, "global");
+ if (irq > 0)
+ pp->use_linkup_irq = true;
+
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "cannot initialize host\n");
@@ -1716,7 +1722,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
goto err_host_deinit;
}
- irq = platform_get_irq_byname_optional(pdev, "global");
if (irq > 0) {
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
qcom_pcie_global_irq_thread,
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index cf5f59a745b3..f441bfd6f96a 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -75,6 +75,8 @@ int pci_host_common_probe(struct platform_device *pdev)
bridge->sysdata = cfg;
bridge->ops = (struct pci_ops *)&ops->pci_ops;
+ bridge->enable_device = ops->enable_device;
+ bridge->disable_device = ops->disable_device;
bridge->msi_domain = true;
return pci_host_probe(bridge);
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index 46d3afe1d308..665f35f9d826 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -1715,6 +1715,7 @@ static const struct of_device_id mvebu_pcie_of_match_table[] = {
{ .compatible = "marvell,kirkwood-pcie", },
{},
};
+MODULE_DEVICE_TABLE(of, mvebu_pcie_of_match_table);
static const struct dev_pm_ops mvebu_pcie_pm_ops = {
NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
index fefab2758a06..a7e51bc1c2fe 100644
--- a/drivers/pci/controller/pcie-apple.c
+++ b/drivers/pci/controller/pcie-apple.c
@@ -26,7 +26,6 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/msi.h>
-#include <linux/notifier.h>
#include <linux/of_irq.h>
#include <linux/pci-ecam.h>
@@ -667,12 +666,16 @@ static struct apple_pcie_port *apple_pcie_get_port(struct pci_dev *pdev)
return NULL;
}
-static int apple_pcie_add_device(struct apple_pcie_port *port,
- struct pci_dev *pdev)
+static int apple_pcie_enable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev)
{
u32 sid, rid = pci_dev_id(pdev);
+ struct apple_pcie_port *port;
int idx, err;
+ port = apple_pcie_get_port(pdev);
+ if (!port)
+ return 0;
+
dev_dbg(&pdev->dev, "added to bus %s, index %d\n",
pci_name(pdev->bus->self), port->idx);
@@ -698,12 +701,16 @@ static int apple_pcie_add_device(struct apple_pcie_port *port,
return idx >= 0 ? 0 : -ENOSPC;
}
-static void apple_pcie_release_device(struct apple_pcie_port *port,
- struct pci_dev *pdev)
+static void apple_pcie_disable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev)
{
+ struct apple_pcie_port *port;
u32 rid = pci_dev_id(pdev);
int idx;
+ port = apple_pcie_get_port(pdev);
+ if (!port)
+ return;
+
mutex_lock(&port->pcie->lock);
for_each_set_bit(idx, port->sid_map, port->sid_map_sz) {
@@ -721,45 +728,6 @@ static void apple_pcie_release_device(struct apple_pcie_port *port,
mutex_unlock(&port->pcie->lock);
}
-static int apple_pcie_bus_notifier(struct notifier_block *nb,
- unsigned long action,
- void *data)
-{
- struct device *dev = data;
- struct pci_dev *pdev = to_pci_dev(dev);
- struct apple_pcie_port *port;
- int err;
-
- /*
- * This is a bit ugly. We assume that if we get notified for
- * any PCI device, we must be in charge of it, and that there
- * is no other PCI controller in the whole system. It probably
- * holds for now, but who knows for how long?
- */
- port = apple_pcie_get_port(pdev);
- if (!port)
- return NOTIFY_DONE;
-
- switch (action) {
- case BUS_NOTIFY_ADD_DEVICE:
- err = apple_pcie_add_device(port, pdev);
- if (err)
- return notifier_from_errno(err);
- break;
- case BUS_NOTIFY_DEL_DEVICE:
- apple_pcie_release_device(port, pdev);
- break;
- default:
- return NOTIFY_DONE;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block apple_pcie_nb = {
- .notifier_call = apple_pcie_bus_notifier,
-};
-
static int apple_pcie_init(struct pci_config_window *cfg)
{
struct device *dev = cfg->parent;
@@ -799,23 +767,10 @@ static int apple_pcie_init(struct pci_config_window *cfg)
return 0;
}
-static int apple_pcie_probe(struct platform_device *pdev)
-{
- int ret;
-
- ret = bus_register_notifier(&pci_bus_type, &apple_pcie_nb);
- if (ret)
- return ret;
-
- ret = pci_host_common_probe(pdev);
- if (ret)
- bus_unregister_notifier(&pci_bus_type, &apple_pcie_nb);
-
- return ret;
-}
-
static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = {
.init = apple_pcie_init,
+ .enable_device = apple_pcie_enable_device,
+ .disable_device = apple_pcie_disable_device,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
.read = pci_generic_config_read,
@@ -830,7 +785,7 @@ static const struct of_device_id apple_pcie_of_match[] = {
MODULE_DEVICE_TABLE(of, apple_pcie_of_match);
static struct platform_driver apple_pcie_driver = {
- .probe = apple_pcie_probe,
+ .probe = pci_host_common_probe,
.driver = {
.name = "pcie-apple",
.of_match_table = apple_pcie_of_match,
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index be52e3a123ab..aa24ac9aaecc 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -125,6 +125,8 @@
#define MAX_NUM_PHY_RESETS 3
+#define PCIE_MTK_RESET_TIME_US 10
+
/* Time in ms needed to complete PCIe reset on EN7581 SoC */
#define PCIE_EN7581_RESET_TIME_MS 100
@@ -133,10 +135,18 @@ struct mtk_gen3_pcie;
#define PCIE_CONF_LINK2_CTL_STS (PCIE_CFG_OFFSET_ADDR + 0xb0)
#define PCIE_CONF_LINK2_LCR2_LINK_SPEED GENMASK(3, 0)
+enum mtk_gen3_pcie_flags {
+ SKIP_PCIE_RSTB = BIT(0), /* Skip PERST# assertion during device
+ * probing or suspend/resume phase to
+ * avoid hw bugs/issues.
+ */
+};
+
/**
* struct mtk_gen3_pcie_pdata - differentiate between host generations
* @power_up: pcie power_up callback
* @phy_resets: phy reset lines SoC data.
+ * @flags: pcie device flags.
*/
struct mtk_gen3_pcie_pdata {
int (*power_up)(struct mtk_gen3_pcie *pcie);
@@ -144,6 +154,7 @@ struct mtk_gen3_pcie_pdata {
const char *id[MAX_NUM_PHY_RESETS];
int num_resets;
} phy_resets;
+ u32 flags;
};
/**
@@ -438,22 +449,33 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
val |= PCIE_DISABLE_DVFSRC_VLT_REQ;
writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG);
- /* Assert all reset signals */
- val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
- val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
- writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
-
/*
- * Described in PCIe CEM specification sections 2.2 (PERST# Signal)
- * and 2.2.1 (Initial Power-Up (G3 to S0)).
- * The deassertion of PERST# should be delayed 100ms (TPVPERL)
- * for the power and clock to become stable.
+ * Airoha EN7581 has a hw bug asserting/releasing PCIE_PE_RSTB signal
+ * causing occasional PCIe link down. In order to overcome the issue,
+ * PCIE_RSTB signals are not asserted/released at this stage and the
+ * PCIe block is reset using en7523_reset_assert() and
+ * en7581_pci_enable().
*/
- msleep(100);
-
- /* De-assert reset signals */
- val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
- writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
+ /* Assert all reset signals */
+ val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
+ val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
+ PCIE_PE_RSTB;
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+
+ /*
+ * Described in PCIe CEM specification revision 6.0.
+ *
+ * The deassertion of PERST# should be delayed 100ms (TPVPERL)
+ * for the power and clock to become stable.
+ */
+ msleep(PCIE_T_PVPERL_MS);
+
+ /* De-assert reset signals */
+ val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
+ PCIE_PE_RSTB);
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ }
/* Check if the link is up or not */
err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val,
@@ -913,11 +935,20 @@ static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
u32 val;
/*
- * Wait for the time needed to complete the bulk assert in
- * mtk_pcie_setup for EN7581 SoC.
+ * The controller may have been left out of reset by the bootloader
+ * so make sure that we get a clean start by asserting resets here.
*/
- mdelay(PCIE_EN7581_RESET_TIME_MS);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+ reset_control_assert(pcie->mac_reset);
+ /* Wait for the time needed to complete the reset lines assert. */
+ msleep(PCIE_EN7581_RESET_TIME_MS);
+
+ /*
+ * Unlike the other MediaTek Gen3 controllers, the Airoha EN7581
+ * requires PHY initialization and power-on before PHY reset deassert.
+ */
err = phy_init(pcie->phy);
if (err) {
dev_err(dev, "failed to initialize PHY\n");
@@ -940,17 +971,11 @@ static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
* Wait for the time needed to complete the bulk de-assert above.
* This time is specific for EN7581 SoC.
*/
- mdelay(PCIE_EN7581_RESET_TIME_MS);
+ msleep(PCIE_EN7581_RESET_TIME_MS);
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
- err = clk_bulk_prepare(pcie->num_clks, pcie->clks);
- if (err) {
- dev_err(dev, "failed to prepare clock\n");
- goto err_clk_prepare;
- }
-
val = FIELD_PREP(PCIE_VAL_LN0_DOWNSTREAM, 0x47) |
FIELD_PREP(PCIE_VAL_LN1_DOWNSTREAM, 0x47) |
FIELD_PREP(PCIE_VAL_LN0_UPSTREAM, 0x41) |
@@ -963,17 +988,22 @@ static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
FIELD_PREP(PCIE_K_FINETUNE_MAX, 0xf);
writel_relaxed(val, pcie->base + PCIE_PIPE4_PIE8_REG);
- err = clk_bulk_enable(pcie->num_clks, pcie->clks);
+ err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
if (err) {
dev_err(dev, "failed to prepare clock\n");
- goto err_clk_enable;
+ goto err_clk_prepare_enable;
}
+ /*
+ * Airoha EN7581 performs PCIe reset via clk callbacks since it has a
+ * hw issue with PCIE_PE_RSTB signal. Add wait for the time needed to
+ * complete the PCIe reset.
+ */
+ msleep(PCIE_T_PVPERL_MS);
+
return 0;
-err_clk_enable:
- clk_bulk_unprepare(pcie->num_clks, pcie->clks);
-err_clk_prepare:
+err_clk_prepare_enable:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
@@ -990,6 +1020,15 @@ static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
struct device *dev = pcie->dev;
int err;
+ /*
+ * The controller may have been left out of reset by the bootloader
+ * so make sure that we get a clean start by asserting resets here.
+ */
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+ reset_control_assert(pcie->mac_reset);
+ usleep_range(PCIE_MTK_RESET_TIME_US, 2 * PCIE_MTK_RESET_TIME_US);
+
/* PHY power on and enable pipe clock */
err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
if (err) {
@@ -1074,14 +1113,6 @@ static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
* counter since the bulk is shared.
*/
reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
- /*
- * The controller may have been left out of reset by the bootloader
- * so make sure that we get a clean start by asserting resets here.
- */
- reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
-
- reset_control_assert(pcie->mac_reset);
- usleep_range(10, 20);
/* Don't touch the hardware registers before power up */
err = pcie->soc->power_up(pcie);
@@ -1231,10 +1262,12 @@ static int mtk_pcie_suspend_noirq(struct device *dev)
return err;
}
- /* Pull down the PERST# pin */
- val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
- val |= PCIE_PE_RSTB;
- writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
+ /* Assert the PERST# pin */
+ val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
+ val |= PCIE_PE_RSTB;
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ }
dev_dbg(pcie->dev, "entered L2 states successfully");
@@ -1285,6 +1318,7 @@ static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_en7581 = {
.id[2] = "phy-lane2",
.num_resets = 3,
},
+ .flags = SKIP_PCIE_RSTB,
};
static const struct of_device_id mtk_pcie_of_match[] = {
@@ -1301,6 +1335,7 @@ static struct platform_driver mtk_pcie_driver = {
.name = "mtk-pcie-gen3",
.of_match_table = mtk_pcie_of_match,
.pm = &mtk_pcie_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
index 047e2cef5afc..c5e0d025bc43 100644
--- a/drivers/pci/controller/pcie-rcar-ep.c
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -107,7 +107,7 @@ static int rcar_pcie_parse_outbound_ranges(struct rcar_pcie_endpoint *ep,
}
if (!devm_request_mem_region(&pdev->dev, res->start,
resource_size(res),
- outbound_name)) {
+ res->name)) {
dev_err(pcie->dev, "Cannot request memory region %s.\n",
outbound_name);
return -EIO;
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index 1064b7b06cef..85ea36df2f59 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -40,6 +40,10 @@
* @irq_pci_fn: the latest PCI function that has updated the mapping of
* the MSI/INTX IRQ dedicated outbound region.
* @irq_pending: bitmask of asserted INTX IRQs.
+ * @perst_irq: IRQ used for the PERST# signal.
+ * @perst_asserted: True if the PERST# signal was asserted.
+ * @link_up: True if the PCI link is up.
+ * @link_training: Work item to execute PCI link training.
*/
struct rockchip_pcie_ep {
struct rockchip_pcie rockchip;
@@ -784,6 +788,7 @@ static int rockchip_pcie_ep_init_ob_mem(struct rockchip_pcie_ep *ep)
SZ_1M);
if (!ep->irq_cpu_addr) {
dev_err(dev, "failed to reserve memory space for MSI\n");
+ err = -ENOMEM;
goto err_epc_mem_exit;
}
diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c
index b9ade7632e11..0f88da378805 100644
--- a/drivers/pci/controller/pcie-rockchip.c
+++ b/drivers/pci/controller/pcie-rockchip.c
@@ -30,7 +30,7 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
struct platform_device *pdev = to_platform_device(dev);
struct device_node *node = dev->of_node;
struct resource *regs;
- int err;
+ int err, i;
if (rockchip->is_rc) {
regs = platform_get_resource_byname(pdev,
@@ -69,55 +69,23 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
if (rockchip->link_gen < 0 || rockchip->link_gen > 2)
rockchip->link_gen = 2;
- rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core");
- if (IS_ERR(rockchip->core_rst)) {
- if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing core reset property in node\n");
- return PTR_ERR(rockchip->core_rst);
- }
-
- rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt");
- if (IS_ERR(rockchip->mgmt_rst)) {
- if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing mgmt reset property in node\n");
- return PTR_ERR(rockchip->mgmt_rst);
- }
-
- rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev,
- "mgmt-sticky");
- if (IS_ERR(rockchip->mgmt_sticky_rst)) {
- if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing mgmt-sticky reset property in node\n");
- return PTR_ERR(rockchip->mgmt_sticky_rst);
- }
+ for (i = 0; i < ROCKCHIP_NUM_PM_RSTS; i++)
+ rockchip->pm_rsts[i].id = rockchip_pci_pm_rsts[i];
- rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe");
- if (IS_ERR(rockchip->pipe_rst)) {
- if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pipe reset property in node\n");
- return PTR_ERR(rockchip->pipe_rst);
- }
-
- rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm");
- if (IS_ERR(rockchip->pm_rst)) {
- if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pm reset property in node\n");
- return PTR_ERR(rockchip->pm_rst);
- }
+ err = devm_reset_control_bulk_get_exclusive(dev,
+ ROCKCHIP_NUM_PM_RSTS,
+ rockchip->pm_rsts);
+ if (err)
+ return dev_err_probe(dev, err, "Cannot get the PM reset\n");
- rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk");
- if (IS_ERR(rockchip->pclk_rst)) {
- if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pclk reset property in node\n");
- return PTR_ERR(rockchip->pclk_rst);
- }
+ for (i = 0; i < ROCKCHIP_NUM_CORE_RSTS; i++)
+ rockchip->core_rsts[i].id = rockchip_pci_core_rsts[i];
- rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk");
- if (IS_ERR(rockchip->aclk_rst)) {
- if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing aclk reset property in node\n");
- return PTR_ERR(rockchip->aclk_rst);
- }
+ err = devm_reset_control_bulk_get_exclusive(dev,
+ ROCKCHIP_NUM_CORE_RSTS,
+ rockchip->core_rsts);
+ if (err)
+ return dev_err_probe(dev, err, "Cannot get the Core resets\n");
if (rockchip->is_rc)
rockchip->perst_gpio = devm_gpiod_get_optional(dev, "ep",
@@ -129,29 +97,10 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
return dev_err_probe(dev, PTR_ERR(rockchip->perst_gpio),
"failed to get PERST# GPIO\n");
- rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
- if (IS_ERR(rockchip->aclk_pcie)) {
- dev_err(dev, "aclk clock not found\n");
- return PTR_ERR(rockchip->aclk_pcie);
- }
-
- rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf");
- if (IS_ERR(rockchip->aclk_perf_pcie)) {
- dev_err(dev, "aclk_perf clock not found\n");
- return PTR_ERR(rockchip->aclk_perf_pcie);
- }
-
- rockchip->hclk_pcie = devm_clk_get(dev, "hclk");
- if (IS_ERR(rockchip->hclk_pcie)) {
- dev_err(dev, "hclk clock not found\n");
- return PTR_ERR(rockchip->hclk_pcie);
- }
-
- rockchip->clk_pcie_pm = devm_clk_get(dev, "pm");
- if (IS_ERR(rockchip->clk_pcie_pm)) {
- dev_err(dev, "pm clock not found\n");
- return PTR_ERR(rockchip->clk_pcie_pm);
- }
+ rockchip->num_clks = devm_clk_bulk_get_all(dev, &rockchip->clks);
+ if (rockchip->num_clks < 0)
+ return dev_err_probe(dev, rockchip->num_clks,
+ "failed to get clocks\n");
return 0;
}
@@ -169,23 +118,10 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
int err, i;
u32 regs;
- err = reset_control_assert(rockchip->aclk_rst);
- if (err) {
- dev_err(dev, "assert aclk_rst err %d\n", err);
- return err;
- }
-
- err = reset_control_assert(rockchip->pclk_rst);
- if (err) {
- dev_err(dev, "assert pclk_rst err %d\n", err);
- return err;
- }
-
- err = reset_control_assert(rockchip->pm_rst);
- if (err) {
- dev_err(dev, "assert pm_rst err %d\n", err);
- return err;
- }
+ err = reset_control_bulk_assert(ROCKCHIP_NUM_PM_RSTS,
+ rockchip->pm_rsts);
+ if (err)
+ return dev_err_probe(dev, err, "Couldn't assert PM resets\n");
for (i = 0; i < MAX_LANE_NUM; i++) {
err = phy_init(rockchip->phys[i]);
@@ -195,47 +131,19 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
}
}
- err = reset_control_assert(rockchip->core_rst);
- if (err) {
- dev_err(dev, "assert core_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_assert(rockchip->mgmt_rst);
- if (err) {
- dev_err(dev, "assert mgmt_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_assert(rockchip->mgmt_sticky_rst);
- if (err) {
- dev_err(dev, "assert mgmt_sticky_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_assert(rockchip->pipe_rst);
+ err = reset_control_bulk_assert(ROCKCHIP_NUM_CORE_RSTS,
+ rockchip->core_rsts);
if (err) {
- dev_err(dev, "assert pipe_rst err %d\n", err);
+ dev_err_probe(dev, err, "Couldn't assert Core resets\n");
goto err_exit_phy;
}
udelay(10);
- err = reset_control_deassert(rockchip->pm_rst);
- if (err) {
- dev_err(dev, "deassert pm_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_deassert(rockchip->aclk_rst);
- if (err) {
- dev_err(dev, "deassert aclk_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_deassert(rockchip->pclk_rst);
+ err = reset_control_bulk_deassert(ROCKCHIP_NUM_PM_RSTS,
+ rockchip->pm_rsts);
if (err) {
- dev_err(dev, "deassert pclk_rst err %d\n", err);
+ dev_err(dev, "Couldn't deassert PM resets %d\n", err);
goto err_exit_phy;
}
@@ -275,31 +183,10 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
goto err_power_off_phy;
}
- /*
- * Please don't reorder the deassert sequence of the following
- * four reset pins.
- */
- err = reset_control_deassert(rockchip->mgmt_sticky_rst);
- if (err) {
- dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
- goto err_power_off_phy;
- }
-
- err = reset_control_deassert(rockchip->core_rst);
- if (err) {
- dev_err(dev, "deassert core_rst err %d\n", err);
- goto err_power_off_phy;
- }
-
- err = reset_control_deassert(rockchip->mgmt_rst);
- if (err) {
- dev_err(dev, "deassert mgmt_rst err %d\n", err);
- goto err_power_off_phy;
- }
-
- err = reset_control_deassert(rockchip->pipe_rst);
+ err = reset_control_bulk_deassert(ROCKCHIP_NUM_CORE_RSTS,
+ rockchip->core_rsts);
if (err) {
- dev_err(dev, "deassert pipe_rst err %d\n", err);
+ dev_err(dev, "Couldn't deassert Core reset %d\n", err);
goto err_power_off_phy;
}
@@ -375,50 +262,18 @@ int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip)
struct device *dev = rockchip->dev;
int err;
- err = clk_prepare_enable(rockchip->aclk_pcie);
- if (err) {
- dev_err(dev, "unable to enable aclk_pcie clock\n");
- return err;
- }
-
- err = clk_prepare_enable(rockchip->aclk_perf_pcie);
- if (err) {
- dev_err(dev, "unable to enable aclk_perf_pcie clock\n");
- goto err_aclk_perf_pcie;
- }
-
- err = clk_prepare_enable(rockchip->hclk_pcie);
- if (err) {
- dev_err(dev, "unable to enable hclk_pcie clock\n");
- goto err_hclk_pcie;
- }
-
- err = clk_prepare_enable(rockchip->clk_pcie_pm);
- if (err) {
- dev_err(dev, "unable to enable clk_pcie_pm clock\n");
- goto err_clk_pcie_pm;
- }
+ err = clk_bulk_prepare_enable(rockchip->num_clks, rockchip->clks);
+ if (err)
+ return dev_err_probe(dev, err, "failed to enable clocks\n");
return 0;
-
-err_clk_pcie_pm:
- clk_disable_unprepare(rockchip->hclk_pcie);
-err_hclk_pcie:
- clk_disable_unprepare(rockchip->aclk_perf_pcie);
-err_aclk_perf_pcie:
- clk_disable_unprepare(rockchip->aclk_pcie);
- return err;
}
EXPORT_SYMBOL_GPL(rockchip_pcie_enable_clocks);
-void rockchip_pcie_disable_clocks(void *data)
+void rockchip_pcie_disable_clocks(struct rockchip_pcie *rockchip)
{
- struct rockchip_pcie *rockchip = data;
- clk_disable_unprepare(rockchip->clk_pcie_pm);
- clk_disable_unprepare(rockchip->hclk_pcie);
- clk_disable_unprepare(rockchip->aclk_perf_pcie);
- clk_disable_unprepare(rockchip->aclk_pcie);
+ clk_bulk_disable_unprepare(rockchip->num_clks, rockchip->clks);
}
EXPORT_SYMBOL_GPL(rockchip_pcie_disable_clocks);
diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
index a51b087ce878..11def598534b 100644
--- a/drivers/pci/controller/pcie-rockchip.h
+++ b/drivers/pci/controller/pcie-rockchip.h
@@ -11,9 +11,11 @@
#ifndef _PCIE_ROCKCHIP_H
#define _PCIE_ROCKCHIP_H
+#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/pci-ecam.h>
+#include <linux/reset.h>
/*
* The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
@@ -309,22 +311,31 @@
(((c) << ((b) * 8 + 5)) & \
ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
+#define ROCKCHIP_NUM_PM_RSTS ARRAY_SIZE(rockchip_pci_pm_rsts)
+#define ROCKCHIP_NUM_CORE_RSTS ARRAY_SIZE(rockchip_pci_core_rsts)
+
+static const char * const rockchip_pci_pm_rsts[] = {
+ "pm",
+ "pclk",
+ "aclk",
+};
+
+static const char * const rockchip_pci_core_rsts[] = {
+ "mgmt-sticky",
+ "core",
+ "mgmt",
+ "pipe",
+};
+
struct rockchip_pcie {
void __iomem *reg_base; /* DT axi-base */
void __iomem *apb_base; /* DT apb-base */
bool legacy_phy;
struct phy *phys[MAX_LANE_NUM];
- struct reset_control *core_rst;
- struct reset_control *mgmt_rst;
- struct reset_control *mgmt_sticky_rst;
- struct reset_control *pipe_rst;
- struct reset_control *pm_rst;
- struct reset_control *aclk_rst;
- struct reset_control *pclk_rst;
- struct clk *aclk_pcie;
- struct clk *aclk_perf_pcie;
- struct clk *hclk_pcie;
- struct clk *clk_pcie_pm;
+ struct reset_control_bulk_data pm_rsts[ROCKCHIP_NUM_PM_RSTS];
+ struct reset_control_bulk_data core_rsts[ROCKCHIP_NUM_CORE_RSTS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct regulator *vpcie12v; /* 12V power supply */
struct regulator *vpcie3v3; /* 3.3V power supply */
struct regulator *vpcie1v8; /* 1.8V power supply */
@@ -358,7 +369,7 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip);
int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip);
void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip);
int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip);
-void rockchip_pcie_disable_clocks(void *data);
+void rockchip_pcie_disable_clocks(struct rockchip_pcie *rockchip);
void rockchip_pcie_cfg_configuration_accesses(
struct rockchip_pcie *rockchip, u32 type);
diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
index a0f5e1d67b04..81e8bfae53d0 100644
--- a/drivers/pci/controller/pcie-xilinx-cpm.c
+++ b/drivers/pci/controller/pcie-xilinx-cpm.c
@@ -30,11 +30,14 @@
#define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C
#define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340
#define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348
-#define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1)
+#define XILINX_CPM_PCIE0_MISC_IR_LOCAL BIT(1)
+#define XILINX_CPM_PCIE1_MISC_IR_LOCAL BIT(2)
-#define XILINX_CPM_PCIE_IR_STATUS 0x000002A0
-#define XILINX_CPM_PCIE_IR_ENABLE 0x000002A8
-#define XILINX_CPM_PCIE_IR_LOCAL BIT(0)
+#define XILINX_CPM_PCIE0_IR_STATUS 0x000002A0
+#define XILINX_CPM_PCIE1_IR_STATUS 0x000002B4
+#define XILINX_CPM_PCIE0_IR_ENABLE 0x000002A8
+#define XILINX_CPM_PCIE1_IR_ENABLE 0x000002BC
+#define XILINX_CPM_PCIE_IR_LOCAL BIT(0)
#define IMR(x) BIT(XILINX_PCIE_INTR_ ##x)
@@ -80,14 +83,21 @@
enum xilinx_cpm_version {
CPM,
CPM5,
+ CPM5_HOST1,
};
/**
* struct xilinx_cpm_variant - CPM variant information
* @version: CPM version
+ * @ir_status: Offset for the error interrupt status register
+ * @ir_enable: Offset for the CPM5 local error interrupt enable register
+ * @ir_misc_value: A bitmask for the miscellaneous interrupt status
*/
struct xilinx_cpm_variant {
enum xilinx_cpm_version version;
+ u32 ir_status;
+ u32 ir_enable;
+ u32 ir_misc_value;
};
/**
@@ -269,6 +279,7 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
{
struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
+ const struct xilinx_cpm_variant *variant = port->variant;
unsigned long val;
int i;
@@ -279,11 +290,11 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
generic_handle_domain_irq(port->cpm_domain, i);
pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR);
- if (port->variant->version == CPM5) {
- val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_IR_STATUS);
+ if (variant->ir_status) {
+ val = readl_relaxed(port->cpm_base + variant->ir_status);
if (val)
writel_relaxed(val, port->cpm_base +
- XILINX_CPM_PCIE_IR_STATUS);
+ variant->ir_status);
}
/*
@@ -465,6 +476,8 @@ static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie *port)
*/
static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
{
+ const struct xilinx_cpm_variant *variant = port->variant;
+
if (cpm_pcie_link_up(port))
dev_info(port->dev, "PCIe Link is UP\n");
else
@@ -483,15 +496,15 @@ static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
* XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to
* CPM SLCR block.
*/
- writel(XILINX_CPM_PCIE_MISC_IR_LOCAL,
+ writel(variant->ir_misc_value,
port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE);
- if (port->variant->version == CPM5) {
+ if (variant->ir_enable) {
writel(XILINX_CPM_PCIE_IR_LOCAL,
- port->cpm_base + XILINX_CPM_PCIE_IR_ENABLE);
+ port->cpm_base + variant->ir_enable);
}
- /* Enable the Bridge enable bit */
+ /* Set Bridge enable bit */
pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) |
XILINX_CPM_PCIE_REG_RPSC_BEN,
XILINX_CPM_PCIE_REG_RPSC);
@@ -609,10 +622,21 @@ err_parse_dt:
static const struct xilinx_cpm_variant cpm_host = {
.version = CPM,
+ .ir_misc_value = XILINX_CPM_PCIE0_MISC_IR_LOCAL,
};
static const struct xilinx_cpm_variant cpm5_host = {
.version = CPM5,
+ .ir_misc_value = XILINX_CPM_PCIE0_MISC_IR_LOCAL,
+ .ir_status = XILINX_CPM_PCIE0_IR_STATUS,
+ .ir_enable = XILINX_CPM_PCIE0_IR_ENABLE,
+};
+
+static const struct xilinx_cpm_variant cpm5_host1 = {
+ .version = CPM5_HOST1,
+ .ir_misc_value = XILINX_CPM_PCIE1_MISC_IR_LOCAL,
+ .ir_status = XILINX_CPM_PCIE1_IR_STATUS,
+ .ir_enable = XILINX_CPM_PCIE1_IR_ENABLE,
};
static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
@@ -624,6 +648,10 @@ static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
.compatible = "xlnx,versal-cpm5-host",
.data = &cpm5_host,
},
+ {
+ .compatible = "xlnx,versal-cpm5-host1",
+ .data = &cpm5_host1,
+ },
{}
};
diff --git a/drivers/pci/controller/plda/pcie-microchip-host.c b/drivers/pci/controller/plda/pcie-microchip-host.c
index 6630cacef301..3fdfffdf0270 100644
--- a/drivers/pci/controller/plda/pcie-microchip-host.c
+++ b/drivers/pci/controller/plda/pcie-microchip-host.c
@@ -7,20 +7,27 @@
* Author: Daire McNamara <daire.mcnamara@microchip.com>
*/
+#include <linux/align.h>
+#include <linux/bits.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
+#include <linux/log2.h>
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include <linux/wordpart.h>
#include "../../pci.h"
#include "pcie-plda.h"
+#define MC_MAX_NUM_INBOUND_WINDOWS 8
+#define MPFS_NC_BOUNCE_ADDR 0x80000000
+
/* PCIe Bridge Phy and Controller Phy offsets */
#define MC_PCIE1_BRIDGE_ADDR 0x00008000u
#define MC_PCIE1_CTRL_ADDR 0x0000a000u
@@ -607,6 +614,91 @@ static void mc_disable_interrupts(struct mc_pcie *port)
writel_relaxed(GENMASK(31, 0), port->bridge_base_addr + ISTATUS_HOST);
}
+static void mc_pcie_setup_inbound_atr(struct mc_pcie *port, int window_index,
+ u64 axi_addr, u64 pcie_addr, u64 size)
+{
+ u32 table_offset = window_index * ATR_ENTRY_SIZE;
+ void __iomem *table_addr = port->bridge_base_addr + table_offset;
+ u32 atr_sz;
+ u32 val;
+
+ atr_sz = ilog2(size) - 1;
+
+ val = ALIGN_DOWN(lower_32_bits(pcie_addr), SZ_4K);
+ val |= FIELD_PREP(ATR_SIZE_MASK, atr_sz);
+ val |= ATR_IMPL_ENABLE;
+
+ writel(val, table_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
+
+ writel(upper_32_bits(pcie_addr), table_addr + ATR0_PCIE_WIN0_SRC_ADDR);
+
+ writel(lower_32_bits(axi_addr), table_addr + ATR0_PCIE_WIN0_TRSL_ADDR_LSB);
+ writel(upper_32_bits(axi_addr), table_addr + ATR0_PCIE_WIN0_TRSL_ADDR_UDW);
+
+ writel(TRSL_ID_AXI4_MASTER_0, table_addr + ATR0_PCIE_WIN0_TRSL_PARAM);
+}
+
+static int mc_pcie_setup_inbound_ranges(struct platform_device *pdev,
+ struct mc_pcie *port)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = dev->of_node;
+ struct of_range_parser parser;
+ struct of_range range;
+ int atr_index = 0;
+
+ /*
+ * MPFS PCIe Root Port is 32-bit only, behind a Fabric Interface
+ * Controller FPGA logic block which contains the AXI-S interface.
+ *
+ * From the point of view of the PCIe Root Port, there are only two
+ * supported Root Port configurations:
+ *
+ * Configuration 1: for use with fully coherent designs; supports a
+ * window from 0x0 (CPU space) to specified PCIe space.
+ *
+ * Configuration 2: for use with non-coherent designs; supports two
+ * 1 GB windows to CPU space; one mapping CPU space 0 to PCIe space
+ * 0x80000000 and a second mapping CPU space 0x40000000 to PCIe
+ * space 0xc0000000. This cfg needs two windows because of how the
+ * MSI space is allocated in the AXI-S range on MPFS.
+ *
+ * The FIC interface outside the PCIe block *must* complete the
+ * inbound address translation as per MCHP MPFS FPGA design
+ * guidelines.
+ */
+ if (device_property_read_bool(dev, "dma-noncoherent")) {
+ /*
+ * Always need same two tables in this case. Need two tables
+ * due to hardware interactions between address and size.
+ */
+ mc_pcie_setup_inbound_atr(port, 0, 0,
+ MPFS_NC_BOUNCE_ADDR, SZ_1G);
+ mc_pcie_setup_inbound_atr(port, 1, SZ_1G,
+ MPFS_NC_BOUNCE_ADDR + SZ_1G, SZ_1G);
+ } else {
+ /* Find any DMA ranges */
+ if (of_pci_dma_range_parser_init(&parser, dn)) {
+ /* No DMA range property - setup default */
+ mc_pcie_setup_inbound_atr(port, 0, 0, 0, SZ_4G);
+ return 0;
+ }
+
+ for_each_of_range(&parser, &range) {
+ if (atr_index >= MC_MAX_NUM_INBOUND_WINDOWS) {
+ dev_err(dev, "too many inbound ranges; %d available tables\n",
+ MC_MAX_NUM_INBOUND_WINDOWS);
+ return -EINVAL;
+ }
+ mc_pcie_setup_inbound_atr(port, atr_index, 0,
+ range.pci_addr, range.size);
+ atr_index++;
+ }
+ }
+
+ return 0;
+}
+
static int mc_platform_init(struct pci_config_window *cfg)
{
struct device *dev = cfg->parent;
@@ -627,6 +719,10 @@ static int mc_platform_init(struct pci_config_window *cfg)
if (ret)
return ret;
+ ret = mc_pcie_setup_inbound_ranges(pdev, port);
+ if (ret)
+ return ret;
+
port->plda.event_ops = &mc_event_ops;
port->plda.event_irq_chip = &mc_event_irq_chip;
port->plda.events_bitmap = GENMASK(NUM_EVENTS - 1, 0);
diff --git a/drivers/pci/controller/plda/pcie-plda-host.c b/drivers/pci/controller/plda/pcie-plda-host.c
index 8533dc618d45..4153214ca410 100644
--- a/drivers/pci/controller/plda/pcie-plda-host.c
+++ b/drivers/pci/controller/plda/pcie-plda-host.c
@@ -8,11 +8,14 @@
* Author: Daire McNamara <daire.mcnamara@microchip.com>
*/
+#include <linux/align.h>
+#include <linux/bitfield.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <linux/pci_regs.h>
#include <linux/pci-ecam.h>
+#include <linux/wordpart.h>
#include "pcie-plda.h"
@@ -502,8 +505,9 @@ void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
ATR0_AXI4_SLV0_TRSL_PARAM);
- val = lower_32_bits(axi_addr) | (atr_sz << ATR_SIZE_SHIFT) |
- ATR_IMPL_ENABLE;
+ val = ALIGN_DOWN(lower_32_bits(axi_addr), SZ_4K);
+ val |= FIELD_PREP(ATR_SIZE_MASK, atr_sz);
+ val |= ATR_IMPL_ENABLE;
writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
ATR0_AXI4_SLV0_SRCADDR_PARAM);
@@ -518,13 +522,20 @@ void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
val = upper_32_bits(pci_addr);
writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
+}
+EXPORT_SYMBOL_GPL(plda_pcie_setup_window);
+
+void plda_pcie_setup_inbound_address_translation(struct plda_pcie_rp *port)
+{
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ u32 val;
val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
}
-EXPORT_SYMBOL_GPL(plda_pcie_setup_window);
+EXPORT_SYMBOL_GPL(plda_pcie_setup_inbound_address_translation);
int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
struct plda_pcie_rp *port)
diff --git a/drivers/pci/controller/plda/pcie-plda.h b/drivers/pci/controller/plda/pcie-plda.h
index 0e7dc0d8e5ba..61ece26065ea 100644
--- a/drivers/pci/controller/plda/pcie-plda.h
+++ b/drivers/pci/controller/plda/pcie-plda.h
@@ -89,14 +89,15 @@
/* PCIe AXI slave table init defines */
#define ATR0_AXI4_SLV0_SRCADDR_PARAM 0x800u
-#define ATR_SIZE_SHIFT 1
-#define ATR_IMPL_ENABLE 1
+#define ATR_SIZE_MASK GENMASK(6, 1)
+#define ATR_IMPL_ENABLE BIT(0)
#define ATR0_AXI4_SLV0_SRC_ADDR 0x804u
#define ATR0_AXI4_SLV0_TRSL_ADDR_LSB 0x808u
#define ATR0_AXI4_SLV0_TRSL_ADDR_UDW 0x80cu
#define ATR0_AXI4_SLV0_TRSL_PARAM 0x810u
#define PCIE_TX_RX_INTERFACE 0x00000000u
#define PCIE_CONFIG_INTERFACE 0x00000001u
+#define TRSL_ID_AXI4_MASTER_0 0x00000004u
#define CONFIG_SPACE_ADDR_OFFSET 0x1000u
@@ -204,6 +205,7 @@ int plda_init_interrupts(struct platform_device *pdev,
void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
phys_addr_t axi_addr, phys_addr_t pci_addr,
size_t size);
+void plda_pcie_setup_inbound_address_translation(struct plda_pcie_rp *port);
int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
struct plda_pcie_rp *port);
int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops,
diff --git a/drivers/pci/devres.c b/drivers/pci/devres.c
index 3b59a86a764b..3431a7df3e0d 100644
--- a/drivers/pci/devres.c
+++ b/drivers/pci/devres.c
@@ -101,7 +101,7 @@ static inline void pcim_addr_devres_clear(struct pcim_addr_devres *res)
* @bar: BAR the range is within
* @offset: offset from the BAR's start address
* @maxlen: length in bytes, beginning at @offset
- * @name: name associated with the request
+ * @name: name of the driver requesting the resource
* @req_flags: flags for the request, e.g., for kernel-exclusive requests
*
* Returns: 0 on success, a negative error code on failure.
@@ -411,46 +411,20 @@ static inline bool mask_contains_bar(int mask, int bar)
return mask & BIT(bar);
}
-/*
- * This is a copy of pci_intx() used to bypass the problem of recursive
- * function calls due to the hybrid nature of pci_intx().
- */
-static void __pcim_intx(struct pci_dev *pdev, int enable)
-{
- u16 pci_command, new;
-
- pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
-
- if (enable)
- new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
- else
- new = pci_command | PCI_COMMAND_INTX_DISABLE;
-
- if (new != pci_command)
- pci_write_config_word(pdev, PCI_COMMAND, new);
-}
-
static void pcim_intx_restore(struct device *dev, void *data)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct pcim_intx_devres *res = data;
- __pcim_intx(pdev, res->orig_intx);
+ pci_intx(pdev, res->orig_intx);
}
-static struct pcim_intx_devres *get_or_create_intx_devres(struct device *dev)
+static void save_orig_intx(struct pci_dev *pdev, struct pcim_intx_devres *res)
{
- struct pcim_intx_devres *res;
-
- res = devres_find(dev, pcim_intx_restore, NULL, NULL);
- if (res)
- return res;
+ u16 pci_command;
- res = devres_alloc(pcim_intx_restore, sizeof(*res), GFP_KERNEL);
- if (res)
- devres_add(dev, res);
-
- return res;
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+ res->orig_intx = !(pci_command & PCI_COMMAND_INTX_DISABLE);
}
/**
@@ -466,16 +440,28 @@ static struct pcim_intx_devres *get_or_create_intx_devres(struct device *dev)
int pcim_intx(struct pci_dev *pdev, int enable)
{
struct pcim_intx_devres *res;
+ struct device *dev = &pdev->dev;
- res = get_or_create_intx_devres(&pdev->dev);
- if (!res)
- return -ENOMEM;
+ /*
+ * pcim_intx() must only restore the INTx value that existed before the
+ * driver was loaded, i.e., before it called pcim_intx() for the
+ * first time.
+ */
+ res = devres_find(dev, pcim_intx_restore, NULL, NULL);
+ if (!res) {
+ res = devres_alloc(pcim_intx_restore, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ save_orig_intx(pdev, res);
+ devres_add(dev, res);
+ }
- res->orig_intx = !enable;
- __pcim_intx(pdev, enable);
+ pci_intx(pdev, enable);
return 0;
}
+EXPORT_SYMBOL_GPL(pcim_intx);
static void pcim_disable_device(void *pdev_raw)
{
@@ -723,7 +709,7 @@ EXPORT_SYMBOL(pcim_iounmap);
* pcim_iomap_region - Request and iomap a PCI BAR
* @pdev: PCI device to map IO resources for
* @bar: Index of a BAR to map
- * @name: Name associated with the request
+ * @name: Name of the driver requesting the resource
*
* Returns: __iomem pointer on success, an IOMEM_ERR_PTR on failure.
*
@@ -790,7 +776,7 @@ EXPORT_SYMBOL(pcim_iounmap_region);
* pcim_iomap_regions - Request and iomap PCI BARs (DEPRECATED)
* @pdev: PCI device to map IO resources for
* @mask: Mask of BARs to request and iomap
- * @name: Name associated with the requests
+ * @name: Name of the driver requesting the resources
*
* Returns: 0 on success, negative error code on failure.
*
@@ -855,9 +841,9 @@ static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name,
/**
* pcim_request_region - Request a PCI BAR
- * @pdev: PCI device to requestion region for
+ * @pdev: PCI device to request region for
* @bar: Index of BAR to request
- * @name: Name associated with the request
+ * @name: Name of the driver requesting the resource
*
* Returns: 0 on success, a negative error code on failure.
*
@@ -874,9 +860,9 @@ EXPORT_SYMBOL(pcim_request_region);
/**
* pcim_request_region_exclusive - Request a PCI BAR exclusively
- * @pdev: PCI device to requestion region for
+ * @pdev: PCI device to request region for
* @bar: Index of BAR to request
- * @name: Name associated with the request
+ * @name: Name of the driver requesting the resource
*
* Returns: 0 on success, a negative error code on failure.
*
@@ -932,7 +918,7 @@ static void pcim_release_all_regions(struct pci_dev *pdev)
/**
* pcim_request_all_regions - Request all regions
* @pdev: PCI device to map IO resources for
- * @name: name associated with the request
+ * @name: name of the driver requesting the resources
*
* Returns: 0 on success, negative error code on failure.
*
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index ef6677f34116..b94e205ae10b 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -44,6 +44,8 @@
#define TIMER_RESOLUTION 1
+#define CAP_UNALIGNED_ACCESS BIT(0)
+
static struct workqueue_struct *kpcitest_workqueue;
struct pci_epf_test {
@@ -74,6 +76,7 @@ struct pci_epf_test_reg {
u32 irq_type;
u32 irq_number;
u32 flags;
+ u32 caps;
} __packed;
static struct pci_epf_header test_header = {
@@ -251,7 +254,7 @@ static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
fail_back_rx:
dma_release_channel(epf_test->dma_chan_rx);
- epf_test->dma_chan_tx = NULL;
+ epf_test->dma_chan_rx = NULL;
fail_back_tx:
dma_cap_zero(mask);
@@ -328,8 +331,8 @@ static void pci_epf_test_copy(struct pci_epf_test *epf_test,
void *copy_buf = NULL, *buf;
if (reg->flags & FLAG_USE_DMA) {
- if (epf_test->dma_private) {
- dev_err(dev, "Cannot transfer data using DMA\n");
+ if (!dma_has_cap(DMA_MEMCPY, epf_test->dma_chan_tx->device->cap_mask)) {
+ dev_err(dev, "DMA controller doesn't support MEMCPY\n");
ret = -EINVAL;
goto set_status;
}
@@ -739,6 +742,20 @@ static void pci_epf_test_clear_bar(struct pci_epf *epf)
}
}
+static void pci_epf_test_set_capabilities(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ enum pci_barno test_reg_bar = epf_test->test_reg_bar;
+ struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
+ struct pci_epc *epc = epf->epc;
+ u32 caps = 0;
+
+ if (epc->ops->align_addr)
+ caps |= CAP_UNALIGNED_ACCESS;
+
+ reg->caps = cpu_to_le32(caps);
+}
+
static int pci_epf_test_epc_init(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
@@ -763,6 +780,8 @@ static int pci_epf_test_epc_init(struct pci_epf *epf)
}
}
+ pci_epf_test_set_capabilities(epf);
+
ret = pci_epf_test_set_bar(epf);
if (ret)
return ret;
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index bed7c7d1fe3c..9e9ca5f8e8f8 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -60,26 +60,17 @@ struct pci_epc *pci_epc_get(const char *epc_name)
int ret = -EINVAL;
struct pci_epc *epc;
struct device *dev;
- struct class_dev_iter iter;
- class_dev_iter_init(&iter, &pci_epc_class, NULL, NULL);
- while ((dev = class_dev_iter_next(&iter))) {
- if (strcmp(epc_name, dev_name(dev)))
- continue;
+ dev = class_find_device_by_name(&pci_epc_class, epc_name);
+ if (!dev)
+ goto err;
- epc = to_pci_epc(dev);
- if (!try_module_get(epc->ops->owner)) {
- ret = -EINVAL;
- goto err;
- }
-
- class_dev_iter_exit(&iter);
- get_device(&epc->dev);
+ epc = to_pci_epc(dev);
+ if (try_module_get(epc->ops->owner))
return epc;
- }
err:
- class_dev_iter_exit(&iter);
+ put_device(dev);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(pci_epc_get);
@@ -609,10 +600,20 @@ EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
- int ret;
+ const struct pci_epc_features *epc_features;
+ enum pci_barno bar = epf_bar->barno;
int flags = epf_bar->flags;
+ int ret;
- if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
+ epc_features = pci_epc_get_features(epc, func_no, vfunc_no);
+ if (!epc_features)
+ return -EINVAL;
+
+ if (epc_features->bar[bar].type == BAR_FIXED &&
+ (epc_features->bar[bar].fixed_size != epf_bar->size))
+ return -EINVAL;
+
+ if (!is_power_of_2(epf_bar->size))
return -EINVAL;
if ((epf_bar->barno == BAR_5 && flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
@@ -942,7 +943,7 @@ void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
{
int r;
- r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match,
+ r = devres_release(dev, devm_pci_epc_release, devm_pci_epc_match,
epc);
dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
}
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 8fa2797d4169..50bc2892a36c 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -202,6 +202,7 @@ void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf)
mutex_lock(&epf_pf->lock);
clear_bit(epf_vf->vfunc_no, &epf_pf->vfunction_num_map);
+ epf_vf->epf_pf = NULL;
list_del(&epf_vf->list);
mutex_unlock(&epf_pf->lock);
}
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 8f3a0a33f362..b3aa34e3a4a2 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -84,7 +84,7 @@ static int ibm_get_attention_status(struct hotplug_slot *slot, u8 *status);
static void ibm_handle_events(acpi_handle handle, u32 event, void *context);
static int ibm_get_table_from_acpi(char **bufp);
static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t size);
static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
u32 lvl, void *context, void **rv);
@@ -98,7 +98,7 @@ static struct bin_attribute ibm_apci_table_attr __ro_after_init = {
.name = "apci_table",
.mode = S_IRUGO,
},
- .read = ibm_read_apci_table,
+ .read_new = ibm_read_apci_table,
.write = NULL,
};
static struct acpiphp_attention_info ibm_attention_info =
@@ -353,7 +353,7 @@ read_table_done:
* our solution is to only allow reading the table in all at once.
*/
static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t size)
{
int bytes_read = -EINVAL;
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 4be402fe9ab9..9e4770cdd4d5 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -747,6 +747,7 @@ static int sriov_init(struct pci_dev *dev, int pos)
struct resource *res;
const char *res_name;
struct pci_dev *pdev;
+ u32 sriovbars[PCI_SRIOV_NUM_BARS];
pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
if (ctrl & PCI_SRIOV_CTRL_VFE) {
@@ -783,6 +784,10 @@ found:
if (!iov)
return -ENOMEM;
+ /* Sizing SR-IOV BARs with VF Enable cleared - no decode */
+ __pci_size_stdbars(dev, PCI_SRIOV_NUM_BARS,
+ pos + PCI_SRIOV_BAR, sriovbars);
+
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
res = &dev->resource[i + PCI_IOV_RESOURCES];
@@ -796,7 +801,8 @@ found:
bar64 = (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
else
bar64 = __pci_read_base(dev, pci_bar_unknown, res,
- pos + PCI_SRIOV_BAR + i * 4);
+ pos + PCI_SRIOV_BAR + i * 4,
+ &sriovbars[i]);
if (!res->flags)
continue;
if (resource_size(res) & (PAGE_SIZE - 1)) {
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 52f770bcc481..7a806f5c0d20 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -190,7 +190,8 @@ EXPORT_SYMBOL_GPL(of_pci_get_devfn);
*
* Returns 0 on success or a negative error-code on failure.
*/
-int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
+static int of_pci_parse_bus_range(struct device_node *node,
+ struct resource *res)
{
u32 bus_range[2];
int error;
@@ -207,7 +208,6 @@ int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
return 0;
}
-EXPORT_SYMBOL_GPL(of_pci_parse_bus_range);
/**
* of_get_pci_domain_nr - Find the host bridge domain number
@@ -302,8 +302,6 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
* devm_of_pci_get_host_bridge_resources() - Resource-managed parsing of PCI
* host bridge resources from DT
* @dev: host bridge device
- * @busno: bus number associated with the bridge root bus
- * @bus_max: maximum number of buses for this bridge
* @resources: list where the range of resources will be added after DT parsing
* @ib_resources: list where the range of inbound resources (with addresses
* from 'dma-ranges') will be added after DT parsing
@@ -319,7 +317,6 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
* value if it failed.
*/
static int devm_of_pci_get_host_bridge_resources(struct device *dev,
- unsigned char busno, unsigned char bus_max,
struct list_head *resources,
struct list_head *ib_resources,
resource_size_t *io_base)
@@ -343,14 +340,15 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev,
err = of_pci_parse_bus_range(dev_node, bus_range);
if (err) {
- bus_range->start = busno;
- bus_range->end = bus_max;
+ bus_range->start = 0;
+ bus_range->end = 0xff;
bus_range->flags = IORESOURCE_BUS;
- dev_info(dev, " No bus range found for %pOF, using %pR\n",
- dev_node, bus_range);
} else {
- if (bus_range->end > bus_range->start + bus_max)
- bus_range->end = bus_range->start + bus_max;
+ if (bus_range->end > 0xff) {
+ dev_warn(dev, " Invalid end bus number in %pR, defaulting to 0xff\n",
+ bus_range);
+ bus_range->end = 0xff;
+ }
}
pci_add_resource(resources, bus_range);
@@ -597,7 +595,7 @@ static int pci_parse_request_of_pci_ranges(struct device *dev,
INIT_LIST_HEAD(&bridge->windows);
INIT_LIST_HEAD(&bridge->dma_ranges);
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &bridge->windows,
+ err = devm_of_pci_get_host_bridge_resources(dev, &bridge->windows,
&bridge->dma_ranges, &iobase);
if (err)
return err;
diff --git a/drivers/pci/of_property.c b/drivers/pci/of_property.c
index 886c236e5de6..58fbafac7c6a 100644
--- a/drivers/pci/of_property.c
+++ b/drivers/pci/of_property.c
@@ -26,7 +26,7 @@ struct of_pci_addr_pair {
* side and the child address is the corresponding address on the secondary
* side.
*/
-struct of_pci_range {
+struct of_pci_range_entry {
u32 child_addr[OF_PCI_ADDRESS_CELLS];
u32 parent_addr[OF_PCI_ADDRESS_CELLS];
u32 size[OF_PCI_SIZE_CELLS];
@@ -101,7 +101,7 @@ static int of_pci_prop_bus_range(struct pci_dev *pdev,
static int of_pci_prop_ranges(struct pci_dev *pdev, struct of_changeset *ocs,
struct device_node *np)
{
- struct of_pci_range *rp;
+ struct of_pci_range_entry *rp;
struct resource *res;
int i, j, ret;
u32 flags, num;
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 7abd4f546d3c..0cb7e0aaba0e 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -161,7 +161,7 @@ out:
return ret;
}
-static struct bin_attribute p2pmem_alloc_attr = {
+static const struct bin_attribute p2pmem_alloc_attr = {
.attr = { .name = "allocate", .mode = 0660 },
.mmap = p2pmem_alloc_mmap,
/*
@@ -180,14 +180,14 @@ static struct attribute *p2pmem_attrs[] = {
NULL,
};
-static struct bin_attribute *p2pmem_bin_attrs[] = {
+static const struct bin_attribute *const p2pmem_bin_attrs[] = {
&p2pmem_alloc_attr,
NULL,
};
static const struct attribute_group p2pmem_group = {
.attrs = p2pmem_attrs,
- .bin_attrs = p2pmem_bin_attrs,
+ .bin_attrs_new = p2pmem_bin_attrs,
.name = "p2pmem",
};
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 7679d75d71e5..b46ce1a2c554 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -13,6 +13,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/pci.h>
@@ -694,7 +695,7 @@ static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(boot_vga);
static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
@@ -769,7 +770,7 @@ static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
}
static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
@@ -837,9 +838,9 @@ static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0);
+static const BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0);
-static struct bin_attribute *pci_dev_config_attrs[] = {
+static const struct bin_attribute *const pci_dev_config_attrs[] = {
&bin_attr_config,
NULL,
};
@@ -856,7 +857,7 @@ static size_t pci_dev_config_attr_bin_size(struct kobject *kobj,
}
static const struct attribute_group pci_dev_config_attr_group = {
- .bin_attrs = pci_dev_config_attrs,
+ .bin_attrs_new = pci_dev_config_attrs,
.bin_size = pci_dev_config_attr_bin_size,
};
@@ -887,8 +888,8 @@ pci_llseek_resource(struct file *filep,
* callback routine (pci_legacy_read).
*/
static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
@@ -912,8 +913,8 @@ static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj,
* callback routine (pci_legacy_write).
*/
static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
@@ -1003,8 +1004,8 @@ void pci_create_legacy_files(struct pci_bus *b)
b->legacy_io->attr.name = "legacy_io";
b->legacy_io->size = 0xffff;
b->legacy_io->attr.mode = 0600;
- b->legacy_io->read = pci_read_legacy_io;
- b->legacy_io->write = pci_write_legacy_io;
+ b->legacy_io->read_new = pci_read_legacy_io;
+ b->legacy_io->write_new = pci_write_legacy_io;
/* See pci_create_attr() for motivation */
b->legacy_io->llseek = pci_llseek_resource;
b->legacy_io->mmap = pci_mmap_legacy_io;
@@ -1099,7 +1100,7 @@ static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
}
static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count, bool write)
{
#ifdef CONFIG_HAS_IOPORT
@@ -1142,14 +1143,14 @@ static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
}
static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
return pci_resource_io(filp, kobj, attr, buf, off, count, false);
}
static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
int ret;
@@ -1210,8 +1211,8 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
} else {
sprintf(res_attr_name, "resource%d", num);
if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
- res_attr->read = pci_read_resource_io;
- res_attr->write = pci_write_resource_io;
+ res_attr->read_new = pci_read_resource_io;
+ res_attr->write_new = pci_write_resource_io;
if (arch_can_pci_mmap_io())
res_attr->mmap = pci_mmap_resource_uc;
} else {
@@ -1292,7 +1293,7 @@ void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
* writing anything except 0 enables it
*/
static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
@@ -1318,7 +1319,7 @@ static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj,
* device corresponding to @kobj.
*/
static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
@@ -1344,9 +1345,9 @@ static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0);
+static const BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0);
-static struct bin_attribute *pci_dev_rom_attrs[] = {
+static const struct bin_attribute *const pci_dev_rom_attrs[] = {
&bin_attr_rom,
NULL,
};
@@ -1372,7 +1373,7 @@ static size_t pci_dev_rom_attr_bin_size(struct kobject *kobj,
}
static const struct attribute_group pci_dev_rom_attr_group = {
- .bin_attrs = pci_dev_rom_attrs,
+ .bin_attrs_new = pci_dev_rom_attrs,
.is_bin_visible = pci_dev_rom_attr_is_visible,
.bin_size = pci_dev_rom_attr_bin_size,
};
@@ -1421,6 +1422,113 @@ static const struct attribute_group pci_dev_reset_attr_group = {
.is_visible = pci_dev_reset_attr_is_visible,
};
+static ssize_t reset_method_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ ssize_t len = 0;
+ int i, m;
+
+ for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
+ m = pdev->reset_methods[i];
+ if (!m)
+ break;
+
+ len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
+ pci_reset_fn_methods[m].name);
+ }
+
+ if (len)
+ len += sysfs_emit_at(buf, len, "\n");
+
+ return len;
+}
+
+static int reset_method_lookup(const char *name)
+{
+ int m;
+
+ for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
+ if (sysfs_streq(name, pci_reset_fn_methods[m].name))
+ return m;
+ }
+
+ return 0; /* not found */
+}
+
+static ssize_t reset_method_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ char *tmp_options, *name;
+ int m, n;
+ u8 reset_methods[PCI_NUM_RESET_METHODS] = {};
+
+ if (sysfs_streq(buf, "")) {
+ pdev->reset_methods[0] = 0;
+ pci_warn(pdev, "All device reset methods disabled by user");
+ return count;
+ }
+
+ if (sysfs_streq(buf, "default")) {
+ pci_init_reset_methods(pdev);
+ return count;
+ }
+
+ char *options __free(kfree) = kstrndup(buf, count, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ n = 0;
+ tmp_options = options;
+ while ((name = strsep(&tmp_options, " ")) != NULL) {
+ if (sysfs_streq(name, ""))
+ continue;
+
+ name = strim(name);
+
+ /* Leave previous methods unchanged if input is invalid */
+ m = reset_method_lookup(name);
+ if (!m) {
+ pci_err(pdev, "Invalid reset method '%s'", name);
+ return -EINVAL;
+ }
+
+ if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
+ pci_err(pdev, "Unsupported reset method '%s'", name);
+ return -EINVAL;
+ }
+
+ if (n == PCI_NUM_RESET_METHODS - 1) {
+ pci_err(pdev, "Too many reset methods\n");
+ return -EINVAL;
+ }
+
+ reset_methods[n++] = m;
+ }
+
+ reset_methods[n] = 0;
+
+ /* Warn if dev-specific supported but not highest priority */
+ if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
+ reset_methods[0] != 1)
+ pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
+ memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
+ return count;
+}
+static DEVICE_ATTR_RW(reset_method);
+
+static struct attribute *pci_dev_reset_method_attrs[] = {
+ &dev_attr_reset_method.attr,
+ NULL,
+};
+
+static const struct attribute_group pci_dev_reset_method_attr_group = {
+ .attrs = pci_dev_reset_method_attrs,
+ .is_visible = pci_dev_reset_attr_is_visible,
+};
+
static ssize_t __resource_resize_show(struct device *dev, int n, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 661f98c6c63a..869d204a70a3 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -23,7 +23,6 @@
#include <linux/string.h>
#include <linux/log2.h>
#include <linux/logic_pio.h>
-#include <linux/pm_wakeup.h>
#include <linux/device.h>
#include <linux/pm_runtime.h>
#include <linux/pci_hotplug.h>
@@ -1100,34 +1099,6 @@ static void pci_enable_acs(struct pci_dev *dev)
}
/**
- * pcie_read_tlp_log - read TLP Header Log
- * @dev: PCIe device
- * @where: PCI Config offset of TLP Header Log
- * @tlp_log: TLP Log structure to fill
- *
- * Fill @tlp_log from TLP Header Log registers, e.g., AER or DPC.
- *
- * Return: 0 on success and filled TLP Log structure, <0 on error.
- */
-int pcie_read_tlp_log(struct pci_dev *dev, int where,
- struct pcie_tlp_log *tlp_log)
-{
- int i, ret;
-
- memset(tlp_log, 0, sizeof(*tlp_log));
-
- for (i = 0; i < 4; i++) {
- ret = pci_read_config_dword(dev, where + i * 4,
- &tlp_log->dw[i]);
- if (ret)
- return pcibios_err_to_errno(ret);
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pcie_read_tlp_log);
-
-/**
* pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
* @dev: PCI device to have its BARs restored
*
@@ -2059,6 +2030,28 @@ int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
return pci_enable_resources(dev, bars);
}
+static int pci_host_bridge_enable_device(struct pci_dev *dev)
+{
+ struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus);
+ int err;
+
+ if (host_bridge && host_bridge->enable_device) {
+ err = host_bridge->enable_device(host_bridge, dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void pci_host_bridge_disable_device(struct pci_dev *dev)
+{
+ struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus);
+
+ if (host_bridge && host_bridge->disable_device)
+ host_bridge->disable_device(host_bridge, dev);
+}
+
static int do_pci_enable_device(struct pci_dev *dev, int bars)
{
int err;
@@ -2074,9 +2067,13 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
if (bridge)
pcie_aspm_powersave_config_link(bridge);
+ err = pci_host_bridge_enable_device(dev);
+ if (err)
+ return err;
+
err = pcibios_enable_device(dev, bars);
if (err < 0)
- return err;
+ goto err_enable;
pci_fixup_device(pci_fixup_enable, dev);
if (dev->msi_enabled || dev->msix_enabled)
@@ -2091,6 +2088,12 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
}
return 0;
+
+err_enable:
+ pci_host_bridge_disable_device(dev);
+
+ return err;
+
}
/**
@@ -2274,6 +2277,8 @@ void pci_disable_device(struct pci_dev *dev)
if (atomic_dec_return(&dev->enable_cnt) != 0)
return;
+ pci_host_bridge_disable_device(dev);
+
do_pci_disable_device(dev);
dev->is_busmaster = 0;
@@ -3941,15 +3946,14 @@ EXPORT_SYMBOL(pci_release_region);
* __pci_request_region - Reserved PCI I/O and memory resource
* @pdev: PCI device whose resources are to be reserved
* @bar: BAR to be reserved
- * @res_name: Name to be associated with resource.
+ * @name: name of the driver requesting the resource
* @exclusive: whether the region access is exclusive or not
*
* Returns: 0 on success, negative error code on failure.
*
- * Mark the PCI region associated with PCI device @pdev BAR @bar as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
+ * Mark the PCI region associated with PCI device @pdev BAR @bar as being
+ * reserved by owner @name. Do not access any address inside the PCI regions
+ * unless this call returns successfully.
*
* If @exclusive is set, then the region is marked so that userspace
* is explicitly not allowed to map the resource via /dev/mem or
@@ -3959,13 +3963,13 @@ EXPORT_SYMBOL(pci_release_region);
* message is also printed on failure.
*/
static int __pci_request_region(struct pci_dev *pdev, int bar,
- const char *res_name, int exclusive)
+ const char *name, int exclusive)
{
if (pci_is_managed(pdev)) {
if (exclusive == IORESOURCE_EXCLUSIVE)
- return pcim_request_region_exclusive(pdev, bar, res_name);
+ return pcim_request_region_exclusive(pdev, bar, name);
- return pcim_request_region(pdev, bar, res_name);
+ return pcim_request_region(pdev, bar, name);
}
if (pci_resource_len(pdev, bar) == 0)
@@ -3973,11 +3977,11 @@ static int __pci_request_region(struct pci_dev *pdev, int bar,
if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
if (!request_region(pci_resource_start(pdev, bar),
- pci_resource_len(pdev, bar), res_name))
+ pci_resource_len(pdev, bar), name))
goto err_out;
} else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
if (!__request_mem_region(pci_resource_start(pdev, bar),
- pci_resource_len(pdev, bar), res_name,
+ pci_resource_len(pdev, bar), name,
exclusive))
goto err_out;
}
@@ -3994,14 +3998,13 @@ err_out:
* pci_request_region - Reserve PCI I/O and memory resource
* @pdev: PCI device whose resources are to be reserved
* @bar: BAR to be reserved
- * @res_name: Name to be associated with resource
+ * @name: name of the driver requesting the resource
*
* Returns: 0 on success, negative error code on failure.
*
- * Mark the PCI region associated with PCI device @pdev BAR @bar as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
+ * Mark the PCI region associated with PCI device @pdev BAR @bar as being
+ * reserved by owner @name. Do not access any address inside the PCI regions
+ * unless this call returns successfully.
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
@@ -4011,9 +4014,9 @@ err_out:
* when pcim_enable_device() has been called in advance. This hybrid feature is
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
-int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
+int pci_request_region(struct pci_dev *pdev, int bar, const char *name)
{
- return __pci_request_region(pdev, bar, res_name, 0);
+ return __pci_request_region(pdev, bar, name, 0);
}
EXPORT_SYMBOL(pci_request_region);
@@ -4036,13 +4039,13 @@ void pci_release_selected_regions(struct pci_dev *pdev, int bars)
EXPORT_SYMBOL(pci_release_selected_regions);
static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
- const char *res_name, int excl)
+ const char *name, int excl)
{
int i;
for (i = 0; i < PCI_STD_NUM_BARS; i++)
if (bars & (1 << i))
- if (__pci_request_region(pdev, i, res_name, excl))
+ if (__pci_request_region(pdev, i, name, excl))
goto err_out;
return 0;
@@ -4059,7 +4062,7 @@ err_out:
* pci_request_selected_regions - Reserve selected PCI I/O and memory resources
* @pdev: PCI device whose resources are to be reserved
* @bars: Bitmask of BARs to be requested
- * @res_name: Name to be associated with resource
+ * @name: Name of the driver requesting the resources
*
* Returns: 0 on success, negative error code on failure.
*
@@ -4069,9 +4072,9 @@ err_out:
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_selected_regions(struct pci_dev *pdev, int bars,
- const char *res_name)
+ const char *name)
{
- return __pci_request_selected_regions(pdev, bars, res_name, 0);
+ return __pci_request_selected_regions(pdev, bars, name, 0);
}
EXPORT_SYMBOL(pci_request_selected_regions);
@@ -4079,7 +4082,7 @@ EXPORT_SYMBOL(pci_request_selected_regions);
* pci_request_selected_regions_exclusive - Request regions exclusively
* @pdev: PCI device to request regions from
* @bars: bit mask of BARs to request
- * @res_name: name to be associated with the requests
+ * @name: name of the driver requesting the resources
*
* Returns: 0 on success, negative error code on failure.
*
@@ -4089,9 +4092,9 @@ EXPORT_SYMBOL(pci_request_selected_regions);
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
- const char *res_name)
+ const char *name)
{
- return __pci_request_selected_regions(pdev, bars, res_name,
+ return __pci_request_selected_regions(pdev, bars, name,
IORESOURCE_EXCLUSIVE);
}
EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
@@ -4114,12 +4117,11 @@ EXPORT_SYMBOL(pci_release_regions);
/**
* pci_request_regions - Reserve PCI I/O and memory resources
* @pdev: PCI device whose resources are to be reserved
- * @res_name: Name to be associated with resource.
+ * @name: name of the driver requesting the resources
*
- * Mark all PCI regions associated with PCI device @pdev as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
+ * Mark all PCI regions associated with PCI device @pdev as being reserved by
+ * owner @name. Do not access any address inside the PCI regions unless this
+ * call returns successfully.
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
@@ -4129,22 +4131,22 @@ EXPORT_SYMBOL(pci_release_regions);
* when pcim_enable_device() has been called in advance. This hybrid feature is
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
-int pci_request_regions(struct pci_dev *pdev, const char *res_name)
+int pci_request_regions(struct pci_dev *pdev, const char *name)
{
return pci_request_selected_regions(pdev,
- ((1 << PCI_STD_NUM_BARS) - 1), res_name);
+ ((1 << PCI_STD_NUM_BARS) - 1), name);
}
EXPORT_SYMBOL(pci_request_regions);
/**
* pci_request_regions_exclusive - Reserve PCI I/O and memory resources
* @pdev: PCI device whose resources are to be reserved
- * @res_name: Name to be associated with resource.
+ * @name: name of the driver requesting the resources
*
* Returns: 0 on success, negative error code on failure.
*
* Mark all PCI regions associated with PCI device @pdev as being reserved
- * by owner @res_name. Do not access any address inside the PCI regions
+ * by owner @name. Do not access any address inside the PCI regions
* unless this call returns successfully.
*
* pci_request_regions_exclusive() will mark the region so that /dev/mem
@@ -4158,10 +4160,10 @@ EXPORT_SYMBOL(pci_request_regions);
* when pcim_enable_device() has been called in advance. This hybrid feature is
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
-int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
+int pci_request_regions_exclusive(struct pci_dev *pdev, const char *name)
{
return pci_request_selected_regions_exclusive(pdev,
- ((1 << PCI_STD_NUM_BARS) - 1), res_name);
+ ((1 << PCI_STD_NUM_BARS) - 1), name);
}
EXPORT_SYMBOL(pci_request_regions_exclusive);
@@ -4488,11 +4490,6 @@ void pci_disable_parity(struct pci_dev *dev)
* @enable: boolean: whether to enable or disable PCI INTx
*
* Enables/disables PCI INTx for device @pdev
- *
- * NOTE:
- * This is a "hybrid" function: It's normally unmanaged, but becomes managed
- * when pcim_enable_device() has been called in advance. This hybrid feature is
- * DEPRECATED! If you want managed cleanup, use pcim_intx() instead.
*/
void pci_intx(struct pci_dev *pdev, int enable)
{
@@ -4505,15 +4502,10 @@ void pci_intx(struct pci_dev *pdev, int enable)
else
new = pci_command | PCI_COMMAND_INTX_DISABLE;
- if (new != pci_command) {
- /* Preserve the "hybrid" behavior for backwards compatibility */
- if (pci_is_managed(pdev)) {
- WARN_ON_ONCE(pcim_intx(pdev, enable) != 0);
- return;
- }
+ if (new == pci_command)
+ return;
- pci_write_config_word(pdev, PCI_COMMAND, new);
- }
+ pci_write_config_word(pdev, PCI_COMMAND, new);
}
EXPORT_SYMBOL_GPL(pci_intx);
@@ -5204,7 +5196,7 @@ static void pci_dev_restore(struct pci_dev *dev)
}
/* dev->reset_methods[] is a 0-terminated list of indices into this array */
-static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
+const struct pci_reset_fn_method pci_reset_fn_methods[] = {
{ },
{ pci_dev_specific_reset, .name = "device_specific" },
{ pci_dev_acpi_reset, .name = "acpi" },
@@ -5215,129 +5207,6 @@ static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
{ cxl_reset_bus_function, .name = "cxl_bus" },
};
-static ssize_t reset_method_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- ssize_t len = 0;
- int i, m;
-
- for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
- m = pdev->reset_methods[i];
- if (!m)
- break;
-
- len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
- pci_reset_fn_methods[m].name);
- }
-
- if (len)
- len += sysfs_emit_at(buf, len, "\n");
-
- return len;
-}
-
-static int reset_method_lookup(const char *name)
-{
- int m;
-
- for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
- if (sysfs_streq(name, pci_reset_fn_methods[m].name))
- return m;
- }
-
- return 0; /* not found */
-}
-
-static ssize_t reset_method_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- char *options, *tmp_options, *name;
- int m, n;
- u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 };
-
- if (sysfs_streq(buf, "")) {
- pdev->reset_methods[0] = 0;
- pci_warn(pdev, "All device reset methods disabled by user");
- return count;
- }
-
- if (sysfs_streq(buf, "default")) {
- pci_init_reset_methods(pdev);
- return count;
- }
-
- options = kstrndup(buf, count, GFP_KERNEL);
- if (!options)
- return -ENOMEM;
-
- n = 0;
- tmp_options = options;
- while ((name = strsep(&tmp_options, " ")) != NULL) {
- if (sysfs_streq(name, ""))
- continue;
-
- name = strim(name);
-
- m = reset_method_lookup(name);
- if (!m) {
- pci_err(pdev, "Invalid reset method '%s'", name);
- goto error;
- }
-
- if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
- pci_err(pdev, "Unsupported reset method '%s'", name);
- goto error;
- }
-
- if (n == PCI_NUM_RESET_METHODS - 1) {
- pci_err(pdev, "Too many reset methods\n");
- goto error;
- }
-
- reset_methods[n++] = m;
- }
-
- reset_methods[n] = 0;
-
- /* Warn if dev-specific supported but not highest priority */
- if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
- reset_methods[0] != 1)
- pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
- memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
- kfree(options);
- return count;
-
-error:
- /* Leave previous methods unchanged */
- kfree(options);
- return -EINVAL;
-}
-static DEVICE_ATTR_RW(reset_method);
-
-static struct attribute *pci_dev_reset_method_attrs[] = {
- &dev_attr_reset_method.attr,
- NULL,
-};
-
-static umode_t pci_dev_reset_method_attr_is_visible(struct kobject *kobj,
- struct attribute *a, int n)
-{
- struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
-
- if (!pci_reset_supported(pdev))
- return 0;
-
- return a->mode;
-}
-
-const struct attribute_group pci_dev_reset_method_attr_group = {
- .attrs = pci_dev_reset_method_attrs,
- .is_visible = pci_dev_reset_method_attr_is_visible,
-};
-
/**
* __pci_reset_function_locked - reset a PCI device function while holding
* the @dev mutex lock.
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 2e40fc63ba31..01e51db8d285 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -4,6 +4,8 @@
#include <linux/pci.h>
+struct pcie_tlp_log;
+
/* Number of possible devfns: 0.0 to 1f.7 inclusive */
#define MAX_NR_DEVFNS 256
@@ -315,8 +317,10 @@ bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *pl, int rrs_timeout);
int pci_setup_device(struct pci_dev *dev);
+void __pci_size_stdbars(struct pci_dev *dev, int count,
+ unsigned int pos, u32 *sizes);
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
- struct resource *res, unsigned int reg);
+ struct resource *res, unsigned int reg, u32 *sizes);
void pci_configure_ari(struct pci_dev *dev);
void __pci_bus_size_bridges(struct pci_bus *bus,
struct list_head *realloc_head);
@@ -547,6 +551,12 @@ struct aer_err_info {
int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
+
+int pcie_read_tlp_log(struct pci_dev *dev, int where, int where2,
+ unsigned int tlp_len, struct pcie_tlp_log *log);
+unsigned int aer_tlp_log_len(struct pci_dev *dev, u32 aercc);
+void pcie_print_tlp_log(const struct pci_dev *dev,
+ const struct pcie_tlp_log *log, const char *pfx);
#endif /* CONFIG_PCIEAER */
#ifdef CONFIG_PCIEPORTBUS
@@ -565,6 +575,7 @@ void pci_dpc_init(struct pci_dev *pdev);
void dpc_process_error(struct pci_dev *pdev);
pci_ers_result_t dpc_reset_link(struct pci_dev *pdev);
bool pci_dpc_recovered(struct pci_dev *pdev);
+unsigned int dpc_tlp_log_len(struct pci_dev *dev);
#else
static inline void pci_save_dpc_state(struct pci_dev *dev) { }
static inline void pci_restore_dpc_state(struct pci_dev *dev) { }
@@ -766,6 +777,7 @@ struct pci_reset_fn_method {
int (*reset_fn)(struct pci_dev *pdev, bool probe);
char *name;
};
+extern const struct pci_reset_fn_method pci_reset_fn_methods[];
#ifdef CONFIG_PCI_QUIRKS
int pci_dev_specific_reset(struct pci_dev *dev, bool probe);
@@ -797,7 +809,6 @@ static inline u64 pci_rebar_size_to_bytes(int size)
struct device_node;
#ifdef CONFIG_OF
-int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
int of_get_pci_domain_nr(struct device_node *node);
int of_pci_get_max_link_speed(struct device_node *node);
u32 of_pci_get_slot_power_limit(struct device_node *node,
@@ -814,12 +825,6 @@ bool of_pci_supply_present(struct device_node *np);
#else
static inline int
-of_pci_parse_bus_range(struct device_node *node, struct resource *res)
-{
- return -EINVAL;
-}
-
-static inline int
of_get_pci_domain_nr(struct device_node *node)
{
return -1;
@@ -960,8 +965,6 @@ static inline pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
extern const struct attribute_group aspm_ctrl_attr_group;
#endif
-extern const struct attribute_group pci_dev_reset_method_attr_group;
-
#ifdef CONFIG_X86_INTEL_MID
bool pci_use_mid_pm(void);
int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state);
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index 53ccab62314d..173829aa02e6 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -7,7 +7,7 @@ pcieportdrv-y := portdrv.o rcec.o
obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o bwctrl.o
obj-y += aspm.o
-obj-$(CONFIG_PCIEAER) += aer.o err.o
+obj-$(CONFIG_PCIEAER) += aer.o err.o tlp.o
obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o
obj-$(CONFIG_PCIE_PME) += pme.o
obj-$(CONFIG_PCIE_DPC) += dpc.o
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 34ce9f834d0c..508474e17183 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -665,12 +665,6 @@ static void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
}
}
-static void __print_tlp_header(struct pci_dev *dev, struct pcie_tlp_log *t)
-{
- pci_err(dev, " TLP Header: %08x %08x %08x %08x\n",
- t->dw[0], t->dw[1], t->dw[2], t->dw[3]);
-}
-
static void __aer_print_error(struct pci_dev *dev,
struct aer_err_info *info)
{
@@ -725,7 +719,7 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
__aer_print_error(dev, info);
if (info->tlp_header_valid)
- __print_tlp_header(dev, &info->tlp);
+ pcie_print_tlp_log(dev, &info->tlp, dev_fmt(" "));
out:
if (info->id && info->error_dev_num > 1 && info->id == id)
@@ -797,7 +791,7 @@ void pci_print_aer(struct pci_dev *dev, int aer_severity,
aer->uncor_severity);
if (tlp_header_valid)
- __print_tlp_header(dev, &aer->header_log);
+ pcie_print_tlp_log(dev, &aer->header_log, dev_fmt(" "));
trace_aer_event(dev_name(&dev->dev), (status & ~mask),
aer_severity, tlp_header_valid, &aer->header_log);
@@ -1248,7 +1242,10 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
if (info->status & AER_LOG_TLP_MASKS) {
info->tlp_header_valid = 1;
- pcie_read_tlp_log(dev, aer + PCI_ERR_HEADER_LOG, &info->tlp);
+ pcie_read_tlp_log(dev, aer + PCI_ERR_HEADER_LOG,
+ aer + PCI_ERR_PREFIX_LOG,
+ aer_tlp_log_len(dev, aercc),
+ &info->tlp);
}
}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 28567d457613..da3e7edcf49d 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -81,24 +81,44 @@ void pci_configure_aspm_l1ss(struct pci_dev *pdev)
void pci_save_aspm_l1ss_state(struct pci_dev *pdev)
{
+ struct pci_dev *parent = pdev->bus->self;
struct pci_cap_saved_state *save_state;
- u16 l1ss = pdev->l1ss;
u32 *cap;
/*
+ * If this is a Downstream Port, we never restore the L1SS state
+ * directly; we only restore it when we restore the state of the
+ * Upstream Port below it.
+ */
+ if (pcie_downstream_port(pdev) || !parent)
+ return;
+
+ if (!pdev->l1ss || !parent->l1ss)
+ return;
+
+ /*
* Save L1 substate configuration. The ASPM L0s/L1 configuration
* in PCI_EXP_LNKCTL_ASPMC is saved by pci_save_pcie_state().
*/
- if (!l1ss)
+ save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
+ if (!save_state)
return;
- save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
+ cap = &save_state->cap.data[0];
+ pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cap++);
+ pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cap++);
+
+ /*
+ * Save parent's L1 substate configuration so we have it for
+ * pci_restore_aspm_l1ss_state(pdev) to restore.
+ */
+ save_state = pci_find_saved_ext_cap(parent, PCI_EXT_CAP_ID_L1SS);
if (!save_state)
return;
cap = &save_state->cap.data[0];
- pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL2, cap++);
- pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, cap++);
+ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, cap++);
+ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, cap++);
}
void pci_restore_aspm_l1ss_state(struct pci_dev *pdev)
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 2b6ef7efa3c1..242cabd5eeeb 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -190,7 +190,7 @@ out:
static void dpc_process_rp_pio_error(struct pci_dev *pdev)
{
u16 cap = pdev->dpc_cap, dpc_status, first_error;
- u32 status, mask, sev, syserr, exc, log, prefix;
+ u32 status, mask, sev, syserr, exc, log;
struct pcie_tlp_log tlp_log;
int i;
@@ -215,22 +215,18 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev)
first_error == i ? " (First)" : "");
}
- if (pdev->dpc_rp_log_size < 4)
+ if (pdev->dpc_rp_log_size < PCIE_STD_NUM_TLP_HEADERLOG)
goto clear_status;
- pcie_read_tlp_log(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG, &tlp_log);
- pci_err(pdev, "TLP Header: %#010x %#010x %#010x %#010x\n",
- tlp_log.dw[0], tlp_log.dw[1], tlp_log.dw[2], tlp_log.dw[3]);
+ pcie_read_tlp_log(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG,
+ cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG,
+ dpc_tlp_log_len(pdev), &tlp_log);
+ pcie_print_tlp_log(pdev, &tlp_log, dev_fmt(""));
- if (pdev->dpc_rp_log_size < 5)
+ if (pdev->dpc_rp_log_size < PCIE_STD_NUM_TLP_HEADERLOG + 1)
goto clear_status;
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG, &log);
pci_err(pdev, "RP PIO ImpSpec Log %#010x\n", log);
- for (i = 0; i < pdev->dpc_rp_log_size - 5; i++) {
- pci_read_config_dword(pdev,
- cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG + i * 4, &prefix);
- pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
- }
clear_status:
pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status);
}
@@ -404,7 +400,9 @@ void pci_dpc_init(struct pci_dev *pdev)
if (!pdev->dpc_rp_log_size) {
pdev->dpc_rp_log_size =
FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE, cap);
- if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) {
+ if (pdev->dpc_rp_log_size < PCIE_STD_NUM_TLP_HEADERLOG ||
+ pdev->dpc_rp_log_size > PCIE_STD_NUM_TLP_HEADERLOG + 1 +
+ PCIE_STD_MAX_TLP_PREFIXLOG) {
pci_err(pdev, "RP PIO log size %u is invalid\n",
pdev->dpc_rp_log_size);
pdev->dpc_rp_log_size = 0;
diff --git a/drivers/pci/pcie/tlp.c b/drivers/pci/pcie/tlp.c
new file mode 100644
index 000000000000..0860b5da837f
--- /dev/null
+++ b/drivers/pci/pcie/tlp.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe TLP Log handling
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+
+#include <linux/aer.h>
+#include <linux/array_size.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+
+#include "../pci.h"
+
+/**
+ * aer_tlp_log_len - Calculate AER Capability TLP Header/Prefix Log length
+ * @dev: PCIe device
+ * @aercc: AER Capabilities and Control register value
+ *
+ * Return: TLP Header/Prefix Log length
+ */
+unsigned int aer_tlp_log_len(struct pci_dev *dev, u32 aercc)
+{
+ return PCIE_STD_NUM_TLP_HEADERLOG +
+ ((aercc & PCI_ERR_CAP_PREFIX_LOG_PRESENT) ?
+ dev->eetlp_prefix_max : 0);
+}
+
+#ifdef CONFIG_PCIE_DPC
+/**
+ * dpc_tlp_log_len - Calculate DPC RP PIO TLP Header/Prefix Log length
+ * @dev: PCIe device
+ *
+ * Return: TLP Header/Prefix Log length
+ */
+unsigned int dpc_tlp_log_len(struct pci_dev *dev)
+{
+ /* Remove ImpSpec Log register from the count */
+ if (dev->dpc_rp_log_size >= PCIE_STD_NUM_TLP_HEADERLOG + 1)
+ return dev->dpc_rp_log_size - 1;
+
+ return dev->dpc_rp_log_size;
+}
+#endif
+
+/**
+ * pcie_read_tlp_log - read TLP Header Log
+ * @dev: PCIe device
+ * @where: PCI Config offset of TLP Header Log
+ * @where2: PCI Config offset of TLP Prefix Log
+ * @tlp_len: TLP Log length (Header Log + TLP Prefix Log in DWORDs)
+ * @log: TLP Log structure to fill
+ *
+ * Fill @log from TLP Header Log registers, e.g., AER or DPC.
+ *
+ * Return: 0 on success and filled TLP Log structure, <0 on error.
+ */
+int pcie_read_tlp_log(struct pci_dev *dev, int where, int where2,
+ unsigned int tlp_len, struct pcie_tlp_log *log)
+{
+ unsigned int i;
+ int off, ret;
+ u32 *to;
+
+ memset(log, 0, sizeof(*log));
+
+ for (i = 0; i < tlp_len; i++) {
+ if (i < PCIE_STD_NUM_TLP_HEADERLOG) {
+ off = where + i * 4;
+ to = &log->dw[i];
+ } else {
+ off = where2 + (i - PCIE_STD_NUM_TLP_HEADERLOG) * 4;
+ to = &log->prefix[i - PCIE_STD_NUM_TLP_HEADERLOG];
+ }
+
+ ret = pci_read_config_dword(dev, off, to);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+ }
+
+ return 0;
+}
+
+#define EE_PREFIX_STR " E-E Prefixes:"
+
+/**
+ * pcie_print_tlp_log - Print TLP Header / Prefix Log contents
+ * @dev: PCIe device
+ * @log: TLP Log structure
+ * @pfx: String prefix
+ *
+ * Prints TLP Header and Prefix Log information held by @log.
+ */
+void pcie_print_tlp_log(const struct pci_dev *dev,
+ const struct pcie_tlp_log *log, const char *pfx)
+{
+ char buf[11 * (PCIE_STD_NUM_TLP_HEADERLOG + ARRAY_SIZE(log->prefix)) +
+ sizeof(EE_PREFIX_STR)];
+ unsigned int i;
+ int len;
+
+ len = scnprintf(buf, sizeof(buf), "%#010x %#010x %#010x %#010x",
+ log->dw[0], log->dw[1], log->dw[2], log->dw[3]);
+
+ if (log->prefix[0])
+ len += scnprintf(buf + len, sizeof(buf) - len, EE_PREFIX_STR);
+ for (i = 0; i < ARRAY_SIZE(log->prefix); i++) {
+ if (!log->prefix[i])
+ break;
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ " %#010x", log->prefix[i]);
+ }
+
+ pci_err(dev, "%sTLP Header: %s\n", pfx, buf);
+}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 2e81ab0f5a25..b6536ed599c3 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -165,40 +165,66 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
#define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
/**
+ * __pci_size_bars - Read the raw BAR mask for a range of PCI BARs
+ * @dev: the PCI device
+ * @count: number of BARs to size
+ * @pos: starting config space position
+ * @sizes: array to store mask values
+ * @rom: indicate whether to use ROM mask, which avoids enabling ROM BARs
+ *
+ * Provided @sizes array must be sufficiently sized to store results for
+ * @count u32 BARs. Caller is responsible for disabling decode to specified
+ * BAR range around calling this function. This function is intended to avoid
+ * disabling decode around sizing each BAR individually, which can result in
+ * non-trivial overhead in virtualized environments with very large PCI BARs.
+ */
+static void __pci_size_bars(struct pci_dev *dev, int count,
+ unsigned int pos, u32 *sizes, bool rom)
+{
+ u32 orig, mask = rom ? PCI_ROM_ADDRESS_MASK : ~0;
+ int i;
+
+ for (i = 0; i < count; i++, pos += 4, sizes++) {
+ pci_read_config_dword(dev, pos, &orig);
+ pci_write_config_dword(dev, pos, mask);
+ pci_read_config_dword(dev, pos, sizes);
+ pci_write_config_dword(dev, pos, orig);
+ }
+}
+
+void __pci_size_stdbars(struct pci_dev *dev, int count,
+ unsigned int pos, u32 *sizes)
+{
+ __pci_size_bars(dev, count, pos, sizes, false);
+}
+
+static void __pci_size_rom(struct pci_dev *dev, unsigned int pos, u32 *sizes)
+{
+ __pci_size_bars(dev, 1, pos, sizes, true);
+}
+
+/**
* __pci_read_base - Read a PCI BAR
* @dev: the PCI device
* @type: type of the BAR
* @res: resource buffer to be filled in
* @pos: BAR position in the config space
+ * @sizes: array of one or more pre-read BAR masks
*
* Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
*/
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
- struct resource *res, unsigned int pos)
+ struct resource *res, unsigned int pos, u32 *sizes)
{
- u32 l = 0, sz = 0, mask;
+ u32 l = 0, sz;
u64 l64, sz64, mask64;
- u16 orig_cmd;
struct pci_bus_region region, inverted_region;
const char *res_name = pci_resource_name(dev, res - dev->resource);
- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
-
- /* No printks while decoding is disabled! */
- if (!dev->mmio_always_on) {
- pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
- if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
- pci_write_config_word(dev, PCI_COMMAND,
- orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
- }
- }
-
res->name = pci_name(dev);
pci_read_config_dword(dev, pos, &l);
- pci_write_config_dword(dev, pos, l | mask);
- pci_read_config_dword(dev, pos, &sz);
- pci_write_config_dword(dev, pos, l);
+ sz = sizes[0];
/*
* All bits set in sz means the device isn't working properly.
@@ -238,18 +264,13 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
if (res->flags & IORESOURCE_MEM_64) {
pci_read_config_dword(dev, pos + 4, &l);
- pci_write_config_dword(dev, pos + 4, ~0);
- pci_read_config_dword(dev, pos + 4, &sz);
- pci_write_config_dword(dev, pos + 4, l);
+ sz = sizes[1];
l64 |= ((u64)l << 32);
sz64 |= ((u64)sz << 32);
mask64 |= ((u64)~0 << 32);
}
- if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
- pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
-
if (!sz64)
goto fail;
@@ -320,7 +341,11 @@ out:
static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
{
+ u32 rombar, stdbars[PCI_STD_NUM_BARS];
unsigned int pos, reg;
+ u16 orig_cmd;
+
+ BUILD_BUG_ON(howmany > PCI_STD_NUM_BARS);
if (dev->non_compliant_bars)
return;
@@ -329,10 +354,28 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
if (dev->is_virtfn)
return;
+ /* No printks while decoding is disabled! */
+ if (!dev->mmio_always_on) {
+ pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
+ if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
+ pci_write_config_word(dev, PCI_COMMAND,
+ orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
+ }
+ }
+
+ __pci_size_stdbars(dev, howmany, PCI_BASE_ADDRESS_0, stdbars);
+ if (rom)
+ __pci_size_rom(dev, rom, &rombar);
+
+ if (!dev->mmio_always_on &&
+ (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
+ pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
+
for (pos = 0; pos < howmany; pos++) {
struct resource *res = &dev->resource[pos];
reg = PCI_BASE_ADDRESS_0 + (pos << 2);
- pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
+ pos += __pci_read_base(dev, pci_bar_unknown,
+ res, reg, &stdbars[pos]);
}
if (rom) {
@@ -340,7 +383,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
dev->rom_base_reg = rom;
res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
- __pci_read_base(dev, pci_bar_mem32, res, rom);
+ __pci_read_base(dev, pci_bar_mem32, res, rom, &rombar);
}
}
@@ -2251,8 +2294,8 @@ static void pci_configure_relaxed_ordering(struct pci_dev *dev)
static void pci_configure_eetlp_prefix(struct pci_dev *dev)
{
-#ifdef CONFIG_PCI_PASID
struct pci_dev *bridge;
+ unsigned int eetlp_max;
int pcie_type;
u32 cap;
@@ -2264,15 +2307,19 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev)
return;
pcie_type = pci_pcie_type(dev);
+
+ eetlp_max = FIELD_GET(PCI_EXP_DEVCAP2_EE_PREFIX_MAX, cap);
+ /* 00b means 4 */
+ eetlp_max = eetlp_max ?: 4;
+
if (pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
pcie_type == PCI_EXP_TYPE_RC_END)
- dev->eetlp_prefix_path = 1;
+ dev->eetlp_prefix_max = eetlp_max;
else {
bridge = pci_upstream_bridge(dev);
- if (bridge && bridge->eetlp_prefix_path)
- dev->eetlp_prefix_path = 1;
+ if (bridge && bridge->eetlp_prefix_max)
+ dev->eetlp_prefix_max = eetlp_max;
}
-#endif
}
static void pci_configure_serr(struct pci_dev *dev)
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 76f4df75b08a..b84ff7bade82 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -12,6 +12,7 @@
* file, where their drivers can use them.
*/
+#include <linux/aer.h>
#include <linux/align.h>
#include <linux/bitfield.h>
#include <linux/types.h>
@@ -5984,6 +5985,17 @@ SWITCHTEC_QUIRK(0x5552); /* PAXA 52XG5 */
SWITCHTEC_QUIRK(0x5536); /* PAXA 36XG5 */
SWITCHTEC_QUIRK(0x5528); /* PAXA 28XG5 */
+#define SWITCHTEC_PCI100X_QUIRK(vid) \
+ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_EFAR, vid, \
+ PCI_CLASS_BRIDGE_OTHER, 8, quirk_switchtec_ntb_dma_alias)
+SWITCHTEC_PCI100X_QUIRK(0x1001); /* PCI1001XG4 */
+SWITCHTEC_PCI100X_QUIRK(0x1002); /* PCI1002XG4 */
+SWITCHTEC_PCI100X_QUIRK(0x1003); /* PCI1003XG4 */
+SWITCHTEC_PCI100X_QUIRK(0x1004); /* PCI1004XG4 */
+SWITCHTEC_PCI100X_QUIRK(0x1005); /* PCI1005XG4 */
+SWITCHTEC_PCI100X_QUIRK(0x1006); /* PCI1006XG4 */
+
+
/*
* The PLX NTB uses devfn proxy IDs to move TLPs between NT endpoints.
* These IDs are used to forward responses to the originator on the other
@@ -6233,8 +6245,9 @@ static void dpc_log_size(struct pci_dev *dev)
return;
if (FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE, val) == 0) {
- pci_info(dev, "Overriding RP PIO Log Size to 4\n");
- dev->dpc_rp_log_size = 4;
+ pci_info(dev, "Overriding RP PIO Log Size to %d\n",
+ PCIE_STD_NUM_TLP_HEADERLOG);
+ dev->dpc_rp_log_size = PCIE_STD_NUM_TLP_HEADERLOG;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x461f, dpc_log_size);
@@ -6253,6 +6266,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2b, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa72f, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa73f, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa76e, dpc_log_size);
#endif
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index c7e1089ffdaf..b14dfab04d84 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -1739,6 +1739,26 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
.driver_data = gen, \
}
+#define SWITCHTEC_PCI100X_DEVICE(device_id, gen) \
+ { \
+ .vendor = PCI_VENDOR_ID_EFAR, \
+ .device = device_id, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .class = (PCI_CLASS_MEMORY_OTHER << 8), \
+ .class_mask = 0xFFFFFFFF, \
+ .driver_data = gen, \
+ }, \
+ { \
+ .vendor = PCI_VENDOR_ID_EFAR, \
+ .device = device_id, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .class = (PCI_CLASS_BRIDGE_OTHER << 8), \
+ .class_mask = 0xFFFFFFFF, \
+ .driver_data = gen, \
+ }
+
static const struct pci_device_id switchtec_pci_tbl[] = {
SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3), /* PFX 24xG3 */
SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3), /* PFX 32xG3 */
@@ -1833,6 +1853,12 @@ static const struct pci_device_id switchtec_pci_tbl[] = {
SWITCHTEC_PCI_DEVICE(0x5552, SWITCHTEC_GEN5), /* PAXA 52XG5 */
SWITCHTEC_PCI_DEVICE(0x5536, SWITCHTEC_GEN5), /* PAXA 36XG5 */
SWITCHTEC_PCI_DEVICE(0x5528, SWITCHTEC_GEN5), /* PAXA 28XG5 */
+ SWITCHTEC_PCI100X_DEVICE(0x1001, SWITCHTEC_GEN4), /* PCI1001 16XG4 */
+ SWITCHTEC_PCI100X_DEVICE(0x1002, SWITCHTEC_GEN4), /* PCI1002 12XG4 */
+ SWITCHTEC_PCI100X_DEVICE(0x1003, SWITCHTEC_GEN4), /* PCI1003 16XG4 */
+ SWITCHTEC_PCI100X_DEVICE(0x1004, SWITCHTEC_GEN4), /* PCI1004 16XG4 */
+ SWITCHTEC_PCI100X_DEVICE(0x1005, SWITCHTEC_GEN4), /* PCI1005 16XG4 */
+ SWITCHTEC_PCI100X_DEVICE(0x1006, SWITCHTEC_GEN4), /* PCI1006 16XG4 */
{0}
};
MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
diff --git a/drivers/pci/tph.c b/drivers/pci/tph.c
index 1e604fbbda65..07de59ca2ebf 100644
--- a/drivers/pci/tph.c
+++ b/drivers/pci/tph.c
@@ -360,7 +360,7 @@ int pcie_tph_set_st_entry(struct pci_dev *pdev, unsigned int index, u16 tag)
return err;
}
- set_ctrl_reg_req_en(pdev, pdev->tph_mode);
+ set_ctrl_reg_req_en(pdev, pdev->tph_req_type);
pci_dbg(pdev, "set steering tag: %s table, index=%d, tag=%#04x\n",
(loc == PCI_TPH_LOC_MSIX) ? "MSI-X" : "ST", index, tag);
diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
index a469bcbc0da7..3d29b2602d0f 100644
--- a/drivers/pci/vpd.c
+++ b/drivers/pci/vpd.c
@@ -271,8 +271,8 @@ void pci_vpd_init(struct pci_dev *dev)
}
static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
struct pci_dev *vpd_dev = dev;
@@ -295,8 +295,8 @@ static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
}
static ssize_t vpd_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
struct pci_dev *vpd_dev = dev;
@@ -317,9 +317,9 @@ static ssize_t vpd_write(struct file *filp, struct kobject *kobj,
return ret;
}
-static BIN_ATTR(vpd, 0600, vpd_read, vpd_write, 0);
+static const BIN_ATTR(vpd, 0600, vpd_read, vpd_write, 0);
-static struct bin_attribute *vpd_attrs[] = {
+static const struct bin_attribute *const vpd_attrs[] = {
&bin_attr_vpd,
NULL,
};
@@ -336,7 +336,7 @@ static umode_t vpd_attr_is_visible(struct kobject *kobj,
}
const struct attribute_group pci_dev_vpd_attr_group = {
- .bin_attrs = vpd_attrs,
+ .bin_attrs_new = vpd_attrs,
.is_bin_visible = vpd_attr_is_visible,
};
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index b5cc11abc962..0e360feb3432 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -1279,7 +1279,7 @@ static int armv8pmu_proc_user_access_handler(const struct ctl_table *table, int
return 0;
}
-static struct ctl_table armv8_pmu_sysctl_table[] = {
+static const struct ctl_table armv8_pmu_sysctl_table[] = {
{
.procname = "perf_user_access",
.data = &sysctl_perf_user_access,
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 194c153e5d71..698de8ddf895 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -1317,7 +1317,7 @@ static int riscv_pmu_proc_user_access_handler(const struct ctl_table *table,
return 0;
}
-static struct ctl_table sbi_pmu_sysctl_table[] = {
+static const struct ctl_table sbi_pmu_sysctl_table[] = {
{
.procname = "perf_user_access",
.data = &sysctl_perf_user_access,
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index cd159a71b23c..29b8fd4b9351 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -23,7 +23,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-sun4i-usb.h>
#include <linux/platform_device.h>
diff --git a/drivers/phy/freescale/phy-fsl-samsung-hdmi.c b/drivers/phy/freescale/phy-fsl-samsung-hdmi.c
index d3ccf547ba1c..45004f598e4d 100644
--- a/drivers/phy/freescale/phy-fsl-samsung-hdmi.c
+++ b/drivers/phy/freescale/phy-fsl-samsung-hdmi.c
@@ -331,25 +331,17 @@ fsl_samsung_hdmi_phy_configure_pll_lock_det(struct fsl_samsung_hdmi_phy *phy,
{
u32 pclk = cfg->pixclk;
u32 fld_tg_code;
- u32 pclk_khz;
- u8 div = 1;
-
- switch (cfg->pixclk) {
- case 22250000 ... 47500000:
- div = 1;
- break;
- case 50349650 ... 99000000:
- div = 2;
- break;
- case 100699300 ... 198000000:
- div = 4;
- break;
- case 205000000 ... 297000000:
- div = 8;
- break;
+ u32 int_pllclk;
+ u8 div;
+
+ /* Find int_pllclk speed */
+ for (div = 0; div < 4; div++) {
+ int_pllclk = pclk / (1 << div);
+ if (int_pllclk < (50 * MHZ))
+ break;
}
- writeb(FIELD_PREP(REG12_CK_DIV_MASK, ilog2(div)), phy->regs + PHY_REG(12));
+ writeb(FIELD_PREP(REG12_CK_DIV_MASK, div), phy->regs + PHY_REG(12));
/*
* Calculation for the frequency lock detector target code (fld_tg_code)
@@ -362,10 +354,8 @@ fsl_samsung_hdmi_phy_configure_pll_lock_det(struct fsl_samsung_hdmi_phy *phy,
* settings rounding up always too. TODO: Check if that is
* correct.
*/
- pclk /= div;
- pclk_khz = pclk / 1000;
- fld_tg_code = 256 * 1000 * 1000 / pclk_khz * 24;
- fld_tg_code = DIV_ROUND_UP(fld_tg_code, 1000);
+
+ fld_tg_code = DIV_ROUND_UP(24 * MHZ * 256, int_pllclk);
/* FLD_TOL and FLD_RP_CODE taken from downstream driver */
writeb(FIELD_PREP(REG13_TG_CODE_LOW_MASK, fld_tg_code),
@@ -406,16 +396,15 @@ static unsigned long fsl_samsung_hdmi_phy_find_pms(unsigned long fout, u8 *p, u1
continue;
/*
- * TODO: Ref Manual doesn't state the range of _m
- * so this should be further refined if possible.
- * This range was set based on the original values
- * in the lookup table
+ * The Ref manual doesn't explicitly state the range of M,
+ * but it does show it as an 8-bit value, so reject
+ * any value above 255.
*/
tmp = (u64)fout * (_p * _s);
do_div(tmp, 24 * MHZ);
- _m = tmp;
- if (_m < 0x30 || _m > 0x7b)
+ if (tmp > 255)
continue;
+ _m = tmp;
/*
* Rev 2 of the Ref Manual states the
@@ -440,9 +429,13 @@ static unsigned long fsl_samsung_hdmi_phy_find_pms(unsigned long fout, u8 *p, u1
min_delta = delta;
best_freq = tmp;
}
+
+ /* If we have an exact match, stop looking for a better value */
+ if (!delta)
+ goto done;
}
}
-
+done:
if (best_freq) {
*p = best_p;
*m = best_m;
diff --git a/drivers/phy/hisilicon/phy-hi3670-pcie.c b/drivers/phy/hisilicon/phy-hi3670-pcie.c
index 0ac9634b398d..dbc7dcce682b 100644
--- a/drivers/phy/hisilicon/phy-hi3670-pcie.c
+++ b/drivers/phy/hisilicon/phy-hi3670-pcie.c
@@ -16,15 +16,20 @@
*/
#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/clk.h>
-#include <linux/gpio.h>
-#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/types.h>
#define AXI_CLK_FREQ 207500000
#define REF_CLK_FREQ 100000000
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index fefc02d921e6..71f9c14fb50d 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -422,7 +422,7 @@ static int mvebu_comphy_ethernet_init_reset(struct mvebu_comphy_lane *lane)
/* wait until clocks are ready */
mdelay(1);
- /* exlicitly disable 40B, the bits isn't clear on reset */
+ /* explicitly disable 40B, the bits isn't clear on reset */
regmap_read(priv->regmap, MVEBU_COMPHY_CONF6(lane->id), &val);
val &= ~MVEBU_COMPHY_CONF6_40B;
regmap_write(priv->regmap, MVEBU_COMPHY_CONF6(lane->id), val);
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c b/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c
index bbfe11d6a69d..b38f3ae26b3f 100644
--- a/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c
+++ b/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c
@@ -9,6 +9,8 @@
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/types.h>
#include <linux/units.h>
#include <linux/nvmem-consumer.h>
@@ -478,8 +480,50 @@ static int mtk_hdmi_phy_configure(struct phy *phy, union phy_configure_opts *opt
return ret;
}
+static int mtk_hdmi_phy_pwr5v_enable(struct regulator_dev *rdev)
+{
+ struct mtk_hdmi_phy *hdmi_phy = rdev_get_drvdata(rdev);
+
+ mtk_phy_set_bits(hdmi_phy->regs + HDMI_CTL_1, RG_HDMITX_PWR5V_O);
+
+ return 0;
+}
+
+static int mtk_hdmi_phy_pwr5v_disable(struct regulator_dev *rdev)
+{
+ struct mtk_hdmi_phy *hdmi_phy = rdev_get_drvdata(rdev);
+
+ mtk_phy_clear_bits(hdmi_phy->regs + HDMI_CTL_1, RG_HDMITX_PWR5V_O);
+
+ return 0;
+}
+
+static int mtk_hdmi_phy_pwr5v_is_enabled(struct regulator_dev *rdev)
+{
+ struct mtk_hdmi_phy *hdmi_phy = rdev_get_drvdata(rdev);
+
+ return !!(readl(hdmi_phy->regs + HDMI_CTL_1) & RG_HDMITX_PWR5V_O);
+}
+
+static const struct regulator_ops mtk_hdmi_pwr5v_regulator_ops = {
+ .enable = mtk_hdmi_phy_pwr5v_enable,
+ .disable = mtk_hdmi_phy_pwr5v_disable,
+ .is_enabled = mtk_hdmi_phy_pwr5v_is_enabled
+};
+
+static const struct regulator_desc mtk_hdmi_phy_pwr5v_desc = {
+ .name = "hdmi-pwr5v",
+ .id = -1,
+ .n_voltages = 1,
+ .fixed_uV = 5000000,
+ .ops = &mtk_hdmi_pwr5v_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+};
+
struct mtk_hdmi_phy_conf mtk_hdmi_phy_8195_conf = {
.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
+ .hdmi_phy_regulator_desc = &mtk_hdmi_phy_pwr5v_desc,
.hdmi_phy_clk_ops = &mtk_hdmi_pll_ops,
.hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
.hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.h b/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.h
index 22a68dc9550c..e26caaf4d104 100644
--- a/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.h
+++ b/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.h
@@ -103,6 +103,9 @@
#define HDMI_ANA_CTL 0x7c
#define REG_ANA_HDMI20_FIFO_EN BIT(16)
+#define HDMI_CTL_1 0xc4
+#define RG_HDMITX_PWR5V_O BIT(9)
+
#define HDMI_CTL_3 0xcc
#define REG_HDMITXPLL_DIV GENMASK(4, 0)
#define REG_HDMITX_REF_XTAL_SEL BIT(7)
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi.c b/drivers/phy/mediatek/phy-mtk-hdmi.c
index d2e824771f9d..52a7d525ff9b 100644
--- a/drivers/phy/mediatek/phy-mtk-hdmi.c
+++ b/drivers/phy/mediatek/phy-mtk-hdmi.c
@@ -75,6 +75,28 @@ static void mtk_hdmi_phy_clk_get_data(struct mtk_hdmi_phy *hdmi_phy,
clk_init->ops = hdmi_phy->conf->hdmi_phy_clk_ops;
}
+static int mtk_hdmi_phy_register_regulators(struct mtk_hdmi_phy *hdmi_phy)
+{
+ const struct regulator_desc *vreg_desc = hdmi_phy->conf->hdmi_phy_regulator_desc;
+ const struct regulator_init_data vreg_init_data = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ }
+ };
+ struct regulator_config vreg_config = {
+ .dev = hdmi_phy->dev,
+ .driver_data = hdmi_phy,
+ .init_data = &vreg_init_data,
+ .of_node = hdmi_phy->dev->of_node
+ };
+
+ hdmi_phy->rdev = devm_regulator_register(hdmi_phy->dev, vreg_desc, &vreg_config);
+ if (IS_ERR(hdmi_phy->rdev))
+ return PTR_ERR(hdmi_phy->rdev);
+
+ return 0;
+}
+
static int mtk_hdmi_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -150,6 +172,12 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
if (hdmi_phy->conf->pll_default_off)
hdmi_phy->conf->hdmi_phy_disable_tmds(hdmi_phy);
+ if (hdmi_phy->conf->hdmi_phy_regulator_desc) {
+ ret = mtk_hdmi_phy_register_regulators(hdmi_phy);
+ if (ret)
+ return ret;
+ }
+
return of_clk_add_provider(dev->of_node, of_clk_src_simple_get,
hdmi_phy->pll);
}
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi.h b/drivers/phy/mediatek/phy-mtk-hdmi.h
index 71c02d043485..99d917e0036a 100644
--- a/drivers/phy/mediatek/phy-mtk-hdmi.h
+++ b/drivers/phy/mediatek/phy-mtk-hdmi.h
@@ -13,6 +13,8 @@
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
#include <linux/types.h>
struct mtk_hdmi_phy;
@@ -20,6 +22,7 @@ struct mtk_hdmi_phy;
struct mtk_hdmi_phy_conf {
unsigned long flags;
bool pll_default_off;
+ const struct regulator_desc *hdmi_phy_regulator_desc;
const struct clk_ops *hdmi_phy_clk_ops;
void (*hdmi_phy_enable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
void (*hdmi_phy_disable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
@@ -32,6 +35,7 @@ struct mtk_hdmi_phy {
struct mtk_hdmi_phy_conf *conf;
struct clk *pll;
struct clk_hw pll_hw;
+ struct regulator_dev *rdev;
unsigned long pll_rate;
unsigned char drv_imp_clk;
unsigned char drv_imp_d2;
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index 3f7095ec5978..a496fbe3352b 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -381,17 +381,12 @@ static const char *const u3_phy_files[] = {
static int u2_phy_params_show(struct seq_file *sf, void *unused)
{
struct mtk_phy_instance *inst = sf->private;
- const char *fname = file_dentry(sf->file)->d_iname;
struct u2phy_banks *u2_banks = &inst->u2_banks;
void __iomem *com = u2_banks->com;
u32 max = 0;
u32 tmp = 0;
u32 val = 0;
- int ret;
-
- ret = match_string(u2_phy_files, ARRAY_SIZE(u2_phy_files), fname);
- if (ret < 0)
- return ret;
+ int ret = debugfs_get_aux_num(sf->file);
switch (ret) {
case U2P_EYE_VRT:
@@ -438,7 +433,7 @@ static int u2_phy_params_show(struct seq_file *sf, void *unused)
break;
}
- seq_printf(sf, "%s : %d [0, %d]\n", fname, val, max);
+ seq_printf(sf, "%s : %d [0, %d]\n", u2_phy_files[ret], val, max);
return 0;
}
@@ -451,23 +446,18 @@ static int u2_phy_params_open(struct inode *inode, struct file *file)
static ssize_t u2_phy_params_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
- const char *fname = file_dentry(file)->d_iname;
struct seq_file *sf = file->private_data;
struct mtk_phy_instance *inst = sf->private;
struct u2phy_banks *u2_banks = &inst->u2_banks;
void __iomem *com = u2_banks->com;
ssize_t rc;
u32 val;
- int ret;
+ int ret = debugfs_get_aux_num(file);
rc = kstrtouint_from_user(ubuf, USER_BUF_LEN(count), 0, &val);
if (rc)
return rc;
- ret = match_string(u2_phy_files, ARRAY_SIZE(u2_phy_files), fname);
- if (ret < 0)
- return (ssize_t)ret;
-
switch (ret) {
case U2P_EYE_VRT:
mtk_phy_update_field(com + U3P_USBPHYACR1, PA1_RG_VRT_SEL, val);
@@ -516,23 +506,18 @@ static void u2_phy_dbgfs_files_create(struct mtk_phy_instance *inst)
int i;
for (i = 0; i < count; i++)
- debugfs_create_file(u2_phy_files[i], 0644, inst->phy->debugfs,
- inst, &u2_phy_fops);
+ debugfs_create_file_aux_num(u2_phy_files[i], 0644, inst->phy->debugfs,
+ inst, i, &u2_phy_fops);
}
static int u3_phy_params_show(struct seq_file *sf, void *unused)
{
struct mtk_phy_instance *inst = sf->private;
- const char *fname = file_dentry(sf->file)->d_iname;
struct u3phy_banks *u3_banks = &inst->u3_banks;
u32 val = 0;
u32 max = 0;
u32 tmp;
- int ret;
-
- ret = match_string(u3_phy_files, ARRAY_SIZE(u3_phy_files), fname);
- if (ret < 0)
- return ret;
+ int ret = debugfs_get_aux_num(sf->file);
switch (ret) {
case U3P_EFUSE_EN:
@@ -564,7 +549,7 @@ static int u3_phy_params_show(struct seq_file *sf, void *unused)
break;
}
- seq_printf(sf, "%s : %d [0, %d]\n", fname, val, max);
+ seq_printf(sf, "%s : %d [0, %d]\n", u3_phy_files[ret], val, max);
return 0;
}
@@ -577,23 +562,18 @@ static int u3_phy_params_open(struct inode *inode, struct file *file)
static ssize_t u3_phy_params_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
- const char *fname = file_dentry(file)->d_iname;
struct seq_file *sf = file->private_data;
struct mtk_phy_instance *inst = sf->private;
struct u3phy_banks *u3_banks = &inst->u3_banks;
void __iomem *phyd = u3_banks->phyd;
ssize_t rc;
u32 val;
- int ret;
+ int ret = debugfs_get_aux_num(sf->file);
rc = kstrtouint_from_user(ubuf, USER_BUF_LEN(count), 0, &val);
if (rc)
return rc;
- ret = match_string(u3_phy_files, ARRAY_SIZE(u3_phy_files), fname);
- if (ret < 0)
- return (ssize_t)ret;
-
switch (ret) {
case U3P_EFUSE_EN:
mtk_phy_update_field(phyd + U3P_U3_PHYD_RSV,
@@ -636,8 +616,8 @@ static void u3_phy_dbgfs_files_create(struct mtk_phy_instance *inst)
int i;
for (i = 0; i < count; i++)
- debugfs_create_file(u3_phy_files[i], 0644, inst->phy->debugfs,
- inst, &u3_phy_fops);
+ debugfs_create_file_aux_num(u3_phy_files[i], 0644, inst->phy->debugfs,
+ inst, i, &u3_phy_fops);
}
static int phy_type_show(struct seq_file *sf, void *unused)
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 413f76e2d174..8dfdce605a90 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -749,8 +749,8 @@ EXPORT_SYMBOL_GPL(devm_phy_put);
/**
* of_phy_simple_xlate() - returns the phy instance from phy provider
- * @dev: the PHY provider device
- * @args: of_phandle_args (not used here)
+ * @dev: the PHY provider device (not used here)
+ * @args: of_phandle_args
*
* Intended to be used by phy provider for the common case where #phy-cells is
* 0. For other cases where #phy-cells is greater than '0', the phy provider
@@ -760,21 +760,14 @@ EXPORT_SYMBOL_GPL(devm_phy_put);
struct phy *of_phy_simple_xlate(struct device *dev,
const struct of_phandle_args *args)
{
- struct phy *phy;
- struct class_dev_iter iter;
-
- class_dev_iter_init(&iter, &phy_class, NULL, NULL);
- while ((dev = class_dev_iter_next(&iter))) {
- phy = to_phy(dev);
- if (args->np != phy->dev.of_node)
- continue;
+ struct device *target_dev;
- class_dev_iter_exit(&iter);
- return phy;
- }
+ target_dev = class_find_device_by_of_node(&phy_class, args->np);
+ if (!target_dev)
+ return ERR_PTR(-ENODEV);
- class_dev_iter_exit(&iter);
- return ERR_PTR(-ENODEV);
+ put_device(target_dev);
+ return to_phy(target_dev);
}
EXPORT_SYMBOL_GPL(of_phy_simple_xlate);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 3bae39381fd0..b09fa00e9fe7 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -400,6 +400,57 @@ static const struct qmp_phy_init_tbl qmp_v3_usb3_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
};
+static const struct qmp_phy_init_tbl sar2130p_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE1, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE1, 0x2e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MSB_MODE1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE1, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE1_MODE1, 0x25),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE2_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xb7),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xb7),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MSB_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE1_MODE0, 0x25),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE2_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BG_TIMER, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_BUF_ENABLE, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_CFG, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_AUTO_GAIN_ADJ_CTRL_1, 0xb6),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_AUTO_GAIN_ADJ_CTRL_2, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_AUTO_GAIN_ADJ_CTRL_3, 0x37),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_ADDITIONAL_MISC, 0x0c),
+};
+
static const struct qmp_phy_init_tbl sm6350_usb3_rx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
@@ -1730,6 +1781,51 @@ static const struct qmp_combo_offsets qmp_combo_offsets_v5 = {
.dp_dp_phy = 0x2200,
};
+static const struct qmp_phy_cfg sar2130p_usb3dpphy_cfg = {
+ .offsets = &qmp_combo_offsets_v3,
+
+ .serdes_tbl = sar2130p_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sar2130p_usb3_serdes_tbl),
+ .tx_tbl = sm8550_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8550_usb3_tx_tbl),
+ .rx_tbl = sm8550_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8550_usb3_rx_tbl),
+ .pcs_tbl = sm8550_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8550_usb3_pcs_tbl),
+ .pcs_usb_tbl = sm8550_usb3_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sm8550_usb3_pcs_usb_tbl),
+
+ .dp_serdes_tbl = qmp_v6_dp_serdes_tbl,
+ .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl),
+ .dp_tx_tbl = qmp_v6_dp_tx_tbl,
+ .dp_tx_tbl_num = ARRAY_SIZE(qmp_v6_dp_tx_tbl),
+
+ .serdes_tbl_rbr = qmp_v6_dp_serdes_tbl_rbr,
+ .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_rbr),
+ .serdes_tbl_hbr = qmp_v6_dp_serdes_tbl_hbr,
+ .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr),
+ .serdes_tbl_hbr2 = qmp_v6_dp_serdes_tbl_hbr2,
+ .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr2),
+ .serdes_tbl_hbr3 = qmp_v6_dp_serdes_tbl_hbr3,
+ .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr3),
+
+ .swing_hbr_rbr = &qmp_dp_v5_voltage_swing_hbr_rbr,
+ .pre_emphasis_hbr_rbr = &qmp_dp_v6_pre_emphasis_hbr_rbr,
+ .swing_hbr3_hbr2 = &qmp_dp_v5_voltage_swing_hbr3_hbr2,
+ .pre_emphasis_hbr3_hbr2 = &qmp_dp_v5_pre_emphasis_hbr3_hbr2,
+
+ .dp_aux_init = qmp_v4_dp_aux_init,
+ .configure_dp_tx = qmp_v4_configure_dp_tx,
+ .configure_dp_phy = qmp_v4_configure_dp_phy,
+ .calibrate_dp_phy = qmp_v4_calibrate_dp_phy,
+
+ .regs = qmp_v6_usb3phy_regs_layout,
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+};
+
static const struct qmp_phy_cfg sc7180_usb3dpphy_cfg = {
.offsets = &qmp_combo_offsets_v3,
@@ -3768,6 +3864,10 @@ err_node_put:
static const struct of_device_id qmp_combo_of_match_table[] = {
{
+ .compatible = "qcom,sar2130p-qmp-usb3-dp-phy",
+ .data = &sar2130p_usb3dpphy_cfg,
+ },
+ {
.compatible = "qcom,sc7180-qmp-usb3-dp-phy",
.data = &sc7180_usb3dpphy_cfg,
},
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
index 873f2f9844c6..018bbb300830 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
@@ -728,6 +728,83 @@ static const struct qmp_phy_init_tbl ipq9574_gen3x2_pcie_pcs_misc_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
};
+static const struct qmp_phy_init_tbl qcs615_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0xf),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x6),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0xf),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x9),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x4),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x3),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0xd),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x35),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x2),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x4),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0xa),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x2),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19),
+};
+
+static const struct qmp_phy_init_tbl qcs615_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x4),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x4),
+};
+
+static const struct qmp_phy_init_tbl qcs615_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x6),
+ QMP_PHY_INIT_CFG(QSERDES_TX_RES_CODE_LANE_OFFSET, 0x2),
+ QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12),
+};
+
+static const struct qmp_phy_init_tbl qcs615_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_ENDPOINT_REFCLK_DRIVE, 0x4),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_OSC_DTCT_ACTIONS, 0x0),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x0),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_PLL_LOCK_CHK_DLY_TIME_AUXCLK_LSB, 0x0),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_LP_WAKEUP_DLY_TIME_AUXCLK, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_PLL_LOCK_CHK_DLY_TIME, 0x73),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_SIGDET_CNTRL, 0x7),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_RX_SIGDET_LVL, 0x99),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_TXDEEMPH_M3P5DB_V0, 0xe),
+};
+
static const struct qmp_phy_init_tbl sdm845_qmp_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
@@ -1773,7 +1850,7 @@ static const struct qmp_phy_init_tbl sdx55_qmp_pcie_rc_pcs_misc_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
};
-static const struct qmp_phy_init_tbl sdx55_qmp_pcie_ep_pcs_misc_tbl[] = {
+static const struct qmp_phy_init_tbl sdx55_qmp_pcie_ep_pcs_lane1_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_LANE1_INSIG_SW_CTRL2, 0x00),
QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_LANE1_INSIG_MX_CTRL2, 0x00),
};
@@ -1907,6 +1984,9 @@ static const struct qmp_phy_init_tbl sdx65_qmp_pcie_pcs_misc_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG2, 0x0d),
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5, 0x02),
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN, 0x2e),
+};
+
+static const struct qmp_phy_init_tbl sdx65_qmp_pcie_pcs_lane1_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_LANE1_INSIG_SW_CTRL2, 0x00),
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_LANE1_INSIG_MX_CTRL2, 0x00),
};
@@ -2582,8 +2662,6 @@ static const struct qmp_phy_init_tbl sa8775p_qmp_gen4_pcie_rc_pcs_misc_tbl[] = {
static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_pcs_alt_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG4, 0x16),
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG5, 0x22),
- QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_LANE1_INSIG_SW_CTRL2, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_LANE1_INSIG_MX_CTRL2, 0x00),
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_G3S2_PRE_GAIN, 0x2e),
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_RX_SIGDET_LVL, 0x66),
};
@@ -2724,10 +2802,106 @@ static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_ep_pcs_alt_tbl[] =
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_INSIG_SW_CTRL7, 0x00),
};
+static const struct qmp_phy_init_tbl sar2130p_qmp_gen3x2_pcie_rc_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE1, 0x4c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_ENABLE1, 0x90),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYS_CLK_CTRL, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BG_TIMER, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE1, 0x68),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE1, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE1, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_SELECT, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_ADDITIONAL_MISC_3, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0xa0),
+};
+
+static const struct qmp_phy_init_tbl sar2130p_qmp_gen3x2_pcie_pcs_lane1_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_LANE1_INSIG_SW_CTRL2, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_LANE1_INSIG_MX_CTRL2, 0x01),
+};
+
+static const struct qmp_phy_init_tbl sar2130p_qmp_gen3x2_pcie_rc_tx_tbl[] = {
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V6_TX_BIST_MODE_LANENO, 0x00, 2),
+};
+
+static const struct qmp_phy_init_tbl sar2130p_qmp_gen3x2_pcie_rc_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_G12S1_TXDEEMPH_M6DB, 0x17),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_G3S2_PRE_GAIN, 0x2e),
+};
+
+static const struct qmp_phy_init_tbl sar2130p_qmp_gen3x2_pcie_ep_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BG_TIMER, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYS_CLK_CTRL, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE1, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE1, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE1, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN0_MODE0, 0xfb),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN1_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN0_MODE1, 0xfb),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN1_MODE1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_MODE, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0xa0),
+};
+
+static const struct qmp_phy_init_tbl sar2130p_qmp_gen3x2_pcie_ep_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_G12S1_TXDEEMPH_M6DB, 0x17),
+};
+
+static const struct qmp_phy_init_tbl sar2130p_qmp_gen3x2_pcie_ep_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_EQ_CONFIG1, 0x1e),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_POWER_STATE_CONFIG2, 0x14),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_POWER_STATE_CONFIG4, 0x07),
+};
+
struct qmp_pcie_offsets {
u16 serdes;
u16 pcs;
u16 pcs_misc;
+ u16 pcs_lane1;
u16 tx;
u16 rx;
u16 tx2;
@@ -2752,6 +2926,8 @@ struct qmp_phy_cfg_tbls {
int pcs_num;
const struct qmp_phy_init_tbl *pcs_misc;
int pcs_misc_num;
+ const struct qmp_phy_init_tbl *pcs_lane1;
+ int pcs_lane1_num;
const struct qmp_phy_init_tbl *ln_shrd;
int ln_shrd_num;
};
@@ -2811,6 +2987,7 @@ struct qmp_pcie {
void __iomem *serdes;
void __iomem *pcs;
void __iomem *pcs_misc;
+ void __iomem *pcs_lane1;
void __iomem *tx;
void __iomem *rx;
void __iomem *tx2;
@@ -2927,6 +3104,7 @@ static const struct qmp_pcie_offsets qmp_pcie_offsets_v4_20 = {
.serdes = 0x1000,
.pcs = 0x1200,
.pcs_misc = 0x1600,
+ .pcs_lane1 = 0x1e00,
.tx = 0x0000,
.rx = 0x0200,
.tx2 = 0x0800,
@@ -2957,6 +3135,7 @@ static const struct qmp_pcie_offsets qmp_pcie_offsets_v5_20 = {
.serdes = 0x1000,
.pcs = 0x1200,
.pcs_misc = 0x1400,
+ .pcs_lane1 = 0x1e00,
.tx = 0x0000,
.rx = 0x0200,
.tx2 = 0x0800,
@@ -3132,6 +3311,31 @@ static const struct qmp_phy_cfg ipq9574_gen3x2_pciephy_cfg = {
.pipe_clock_rate = 250000000,
};
+static const struct qmp_phy_cfg qcs615_pciephy_cfg = {
+ .lanes = 1,
+
+ .offsets = &qmp_pcie_offsets_v2,
+
+ .tbls = {
+ .serdes = qcs615_pcie_serdes_tbl,
+ .serdes_num = ARRAY_SIZE(qcs615_pcie_serdes_tbl),
+ .tx = qcs615_pcie_tx_tbl,
+ .tx_num = ARRAY_SIZE(qcs615_pcie_tx_tbl),
+ .rx = qcs615_pcie_rx_tbl,
+ .rx_num = ARRAY_SIZE(qcs615_pcie_rx_tbl),
+ .pcs = qcs615_pcie_pcs_tbl,
+ .pcs_num = ARRAY_SIZE(qcs615_pcie_pcs_tbl),
+ },
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = pciephy_v2_regs_layout,
+
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+};
+
static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = {
.lanes = 1,
@@ -3283,6 +3487,49 @@ static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
.skip_start_delay = true,
};
+static const struct qmp_phy_cfg sar2130p_qmp_gen3x2_pciephy_cfg = {
+ .lanes = 2,
+
+ .offsets = &qmp_pcie_offsets_v5,
+
+ .tbls = {
+ .tx = sm8550_qmp_gen3x2_pcie_tx_tbl,
+ .tx_num = ARRAY_SIZE(sm8550_qmp_gen3x2_pcie_tx_tbl),
+ .rx = sm8550_qmp_gen3x2_pcie_rx_tbl,
+ .rx_num = ARRAY_SIZE(sm8550_qmp_gen3x2_pcie_rx_tbl),
+ .pcs = sm8550_qmp_gen3x2_pcie_pcs_tbl,
+ .pcs_num = ARRAY_SIZE(sm8550_qmp_gen3x2_pcie_pcs_tbl),
+ .pcs_lane1 = sar2130p_qmp_gen3x2_pcie_pcs_lane1_tbl,
+ .pcs_lane1_num = ARRAY_SIZE(sar2130p_qmp_gen3x2_pcie_pcs_lane1_tbl),
+ },
+ .tbls_rc = &(const struct qmp_phy_cfg_tbls) {
+ .serdes = sar2130p_qmp_gen3x2_pcie_rc_serdes_tbl,
+ .serdes_num = ARRAY_SIZE(sar2130p_qmp_gen3x2_pcie_rc_serdes_tbl),
+ .tx = sar2130p_qmp_gen3x2_pcie_rc_tx_tbl,
+ .tx_num = ARRAY_SIZE(sar2130p_qmp_gen3x2_pcie_rc_tx_tbl),
+ .pcs = sar2130p_qmp_gen3x2_pcie_rc_pcs_tbl,
+ .pcs_num = ARRAY_SIZE(sar2130p_qmp_gen3x2_pcie_rc_pcs_tbl),
+ .pcs_misc = sm8550_qmp_gen3x2_pcie_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(sm8550_qmp_gen3x2_pcie_pcs_misc_tbl),
+ },
+ .tbls_ep = &(const struct qmp_phy_cfg_tbls) {
+ .serdes = sar2130p_qmp_gen3x2_pcie_ep_serdes_tbl,
+ .serdes_num = ARRAY_SIZE(sar2130p_qmp_gen3x2_pcie_ep_serdes_tbl),
+ .pcs = sar2130p_qmp_gen3x2_pcie_ep_pcs_tbl,
+ .pcs_num = ARRAY_SIZE(sar2130p_qmp_gen3x2_pcie_ep_pcs_tbl),
+ .pcs_misc = sar2130p_qmp_gen3x2_pcie_ep_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(sar2130p_qmp_gen3x2_pcie_ep_pcs_misc_tbl),
+ },
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = pciephy_v5_regs_layout,
+
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+};
+
static const struct qmp_phy_cfg sc8180x_pciephy_cfg = {
.lanes = 2,
@@ -3440,8 +3687,8 @@ static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = {
.tbls_ep = &(const struct qmp_phy_cfg_tbls) {
.serdes = sdx55_qmp_pcie_ep_serdes_tbl,
.serdes_num = ARRAY_SIZE(sdx55_qmp_pcie_ep_serdes_tbl),
- .pcs_misc = sdx55_qmp_pcie_ep_pcs_misc_tbl,
- .pcs_misc_num = ARRAY_SIZE(sdx55_qmp_pcie_ep_pcs_misc_tbl),
+ .pcs_lane1 = sdx55_qmp_pcie_ep_pcs_lane1_tbl,
+ .pcs_lane1_num = ARRAY_SIZE(sdx55_qmp_pcie_ep_pcs_lane1_tbl),
},
.reset_list = sdm845_pciephy_reset_l,
@@ -3540,6 +3787,8 @@ static const struct qmp_phy_cfg sdx65_qmp_pciephy_cfg = {
.pcs_num = ARRAY_SIZE(sdx65_qmp_pcie_pcs_tbl),
.pcs_misc = sdx65_qmp_pcie_pcs_misc_tbl,
.pcs_misc_num = ARRAY_SIZE(sdx65_qmp_pcie_pcs_misc_tbl),
+ .pcs_lane1 = sdx65_qmp_pcie_pcs_lane1_tbl,
+ .pcs_lane1_num = ARRAY_SIZE(sdx65_qmp_pcie_pcs_lane1_tbl),
},
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
@@ -3739,6 +3988,8 @@ static const struct qmp_phy_cfg sa8775p_qmp_gen4x2_pciephy_cfg = {
.pcs_num = ARRAY_SIZE(sa8775p_qmp_gen4x2_pcie_pcs_alt_tbl),
.pcs_misc = sa8775p_qmp_gen4_pcie_pcs_misc_tbl,
.pcs_misc_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_pcs_misc_tbl),
+ .pcs_lane1 = sdx65_qmp_pcie_pcs_lane1_tbl,
+ .pcs_lane1_num = ARRAY_SIZE(sdx65_qmp_pcie_pcs_lane1_tbl),
},
.tbls_rc = &(const struct qmp_phy_cfg_tbls) {
@@ -3945,6 +4196,7 @@ static void qmp_pcie_init_registers(struct qmp_pcie *qmp, const struct qmp_phy_c
void __iomem *rx2 = qmp->rx2;
void __iomem *pcs = qmp->pcs;
void __iomem *pcs_misc = qmp->pcs_misc;
+ void __iomem *pcs_lane1 = qmp->pcs_lane1;
void __iomem *ln_shrd = qmp->ln_shrd;
if (!tbls)
@@ -3969,6 +4221,7 @@ static void qmp_pcie_init_registers(struct qmp_pcie *qmp, const struct qmp_phy_c
qmp_configure(qmp->dev, pcs, tbls->pcs, tbls->pcs_num);
qmp_configure(qmp->dev, pcs_misc, tbls->pcs_misc, tbls->pcs_misc_num);
+ qmp_configure(qmp->dev, pcs_lane1, tbls->pcs_lane1, tbls->pcs_lane1_num);
if (cfg->lanes >= 4 && qmp->tcsr_4ln_config) {
qmp_configure(qmp->dev, serdes, cfg->serdes_4ln_tbl,
@@ -4420,6 +4673,14 @@ static int qmp_pcie_parse_dt_legacy(struct qmp_pcie *qmp, struct device_node *np
}
}
+ /*
+ * For all platforms where legacy bindings existed, PCS_LANE1 was
+ * mapped as a part of the PCS_MISC region.
+ */
+ if (!IS_ERR(qmp->pcs_misc) && cfg->offsets->pcs_lane1 != 0)
+ qmp->pcs_lane1 = qmp->pcs_misc +
+ (cfg->offsets->pcs_lane1 - cfg->offsets->pcs_misc);
+
clk = devm_get_clk_from_child(dev, np, NULL);
if (IS_ERR(clk)) {
return dev_err_probe(dev, PTR_ERR(clk),
@@ -4487,6 +4748,7 @@ static int qmp_pcie_parse_dt(struct qmp_pcie *qmp)
qmp->serdes = base + offs->serdes;
qmp->pcs = base + offs->pcs;
qmp->pcs_misc = base + offs->pcs_misc;
+ qmp->pcs_lane1 = base + offs->pcs_lane1;
qmp->tx = base + offs->tx;
qmp->rx = base + offs->rx;
@@ -4612,12 +4874,18 @@ static const struct of_device_id qmp_pcie_of_match_table[] = {
.compatible = "qcom,msm8998-qmp-pcie-phy",
.data = &msm8998_pciephy_cfg,
}, {
+ .compatible = "qcom,qcs615-qmp-gen3x1-pcie-phy",
+ .data = &qcs615_pciephy_cfg,
+ }, {
.compatible = "qcom,sa8775p-qmp-gen4x2-pcie-phy",
.data = &sa8775p_qmp_gen4x2_pciephy_cfg,
}, {
.compatible = "qcom,sa8775p-qmp-gen4x4-pcie-phy",
.data = &sa8775p_qmp_gen4x4_pciephy_cfg,
}, {
+ .compatible = "qcom,sar2130p-qmp-gen3x2-pcie-phy",
+ .data = &sar2130p_qmp_gen3x2_pciephy_cfg,
+ }, {
.compatible = "qcom,sc8180x-qmp-pcie-phy",
.data = &sc8180x_pciephy_cfg,
}, {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4_20.h
index ac872a9eff9a..ab892d1067c2 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4_20.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4_20.h
@@ -13,7 +13,8 @@
#define QPHY_V4_20_PCS_PCIE_G4_RXEQEVAL_TIME 0x0f4
#define QPHY_V4_20_PCS_PCIE_G4_EQ_CONFIG2 0x0fc
#define QPHY_V4_20_PCS_PCIE_G4_EQ_CONFIG5 0x108
-#define QPHY_V4_20_PCS_LANE1_INSIG_SW_CTRL2 0x824
-#define QPHY_V4_20_PCS_LANE1_INSIG_MX_CTRL2 0x828
+
+#define QPHY_V4_20_PCS_LANE1_INSIG_SW_CTRL2 0x024
+#define QPHY_V4_20_PCS_LANE1_INSIG_MX_CTRL2 0x028
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h
index cdf8c04ea078..283d63c81593 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h
@@ -17,7 +17,8 @@
#define QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5 0x108
#define QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN 0x15c
#define QPHY_V5_20_PCS_PCIE_RX_MARGINING_CONFIG3 0x184
-#define QPHY_V5_20_PCS_LANE1_INSIG_SW_CTRL2 0xa24
-#define QPHY_V5_20_PCS_LANE1_INSIG_MX_CTRL2 0xa28
+
+#define QPHY_V5_20_PCS_LANE1_INSIG_SW_CTRL2 0x024
+#define QPHY_V5_20_PCS_LANE1_INSIG_MX_CTRL2 0x028
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6.h
index 0ca79333d942..45397cb3c0c6 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6.h
@@ -14,4 +14,7 @@
#define QPHY_PCIE_V6_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x20
#define QPHY_PCIE_V6_PCS_PCIE_OSC_DTCT_ACTIONS 0x94
+#define QPHY_PCIE_V6_PCS_LANE1_INSIG_SW_CTRL2 0x024
+#define QPHY_PCIE_V6_PCS_LANE1_INSIG_MX_CTRL2 0x028
+
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h
index bf36399d0057..1ecf4b5beba6 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h
@@ -34,6 +34,7 @@
#define QPHY_V2_PCS_USB_PCS_STATUS 0x17c /* USB */
#define QPHY_V2_PCS_PLL_LOCK_CHK_DLY_TIME_AUXCLK_LSB 0x1a8
#define QPHY_V2_PCS_OSC_DTCT_ACTIONS 0x1ac
+#define QPHY_V2_PCS_SIGDET_CNTRL 0x1b0
#define QPHY_V2_PCS_RX_SIGDET_LVL 0x1d8
#define QPHY_V2_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB 0x1dc
#define QPHY_V2_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB 0x1e0
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6.h
index 08299d2b78f0..aa5afb921f12 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6.h
@@ -17,6 +17,8 @@
#define QPHY_V6_PCS_LOCK_DETECT_CONFIG3 0x0cc
#define QPHY_V6_PCS_LOCK_DETECT_CONFIG6 0x0d8
#define QPHY_V6_PCS_REFGEN_REQ_CONFIG1 0x0dc
+#define QPHY_V6_PCS_G12S1_TXDEEMPH_M6DB 0x168
+#define QPHY_V6_PCS_G3S2_PRE_GAIN 0x170
#define QPHY_V6_PCS_RX_SIGDET_LVL 0x188
#define QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_L 0x190
#define QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_H 0x194
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h
index 23ffcfae9efa..f47fdc9cecda 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h
@@ -6,6 +6,7 @@
#ifndef QCOM_PHY_QMP_QSERDES_TXRX_USB_V6_H_
#define QCOM_PHY_QMP_QSERDES_TXRX_USB_V6_H_
+#define QSERDES_V6_TX_BIST_MODE_LANENO 0x00
#define QSERDES_V6_TX_CLKBUF_ENABLE 0x08
#define QSERDES_V6_TX_TX_EMP_POST1_LVL 0x0c
#define QSERDES_V6_TX_TX_DRV_LVL 0x14
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index c9c337840715..787721570457 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -2298,6 +2298,9 @@ err_node_put:
static const struct of_device_id qmp_usb_of_match_table[] = {
{
+ .compatible = "qcom,ipq5424-qmp-usb3-phy",
+ .data = &ipq9574_usb3phy_cfg,
+ }, {
.compatible = "qcom,ipq6018-qmp-usb3-phy",
.data = &ipq6018_usb3phy_cfg,
}, {
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index c52655a383ce..1f5f7df14d5a 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -151,6 +151,34 @@ static const struct qusb2_phy_init_tbl ipq6018_init_tbl[] = {
QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_AUTOPGM_CTL1, 0x9F),
};
+static const struct qusb2_phy_init_tbl ipq5424_init_tbl[] = {
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL, 0x14),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE1, 0x00),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE2, 0x53),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE4, 0xc3),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_TUNE, 0x30),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_USER_CTL1, 0x79),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_USER_CTL2, 0x21),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE5, 0x00),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_PWR_CTRL, 0x00),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TEST2, 0x14),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_TEST, 0x80),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_AUTOPGM_CTL1, 0x9f),
+};
+
+static const struct qusb2_phy_init_tbl qcs615_init_tbl[] = {
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE1, 0xc8),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE2, 0xb3),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE3, 0x83),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE4, 0xc0),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_TUNE, 0x30),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_USER_CTL1, 0x79),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_USER_CTL2, 0x21),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TEST2, 0x14),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_AUTOPGM_CTL1, 0x9f),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_PWR_CTRL, 0x00),
+};
+
static const unsigned int ipq6018_regs_layout[] = {
[QUSB2PHY_PLL_STATUS] = 0x38,
[QUSB2PHY_PORT_TUNE1] = 0x80,
@@ -331,6 +359,27 @@ static const struct qusb2_phy_cfg ipq6018_phy_cfg = {
.autoresume_en = BIT(0),
};
+static const struct qusb2_phy_cfg ipq5424_phy_cfg = {
+ .tbl = ipq5424_init_tbl,
+ .tbl_num = ARRAY_SIZE(ipq5424_init_tbl),
+ .regs = ipq6018_regs_layout,
+
+ .disable_ctrl = POWER_DOWN,
+ .mask_core_ready = PLL_LOCKED,
+ .autoresume_en = BIT(0),
+};
+
+static const struct qusb2_phy_cfg qcs615_phy_cfg = {
+ .tbl = qcs615_init_tbl,
+ .tbl_num = ARRAY_SIZE(qcs615_init_tbl),
+ .regs = ipq6018_regs_layout,
+
+ .disable_ctrl = (CLAMP_N_EN | FREEZIO_N | POWER_DOWN),
+ .mask_core_ready = PLL_LOCKED,
+ /* autoresume not used */
+ .autoresume_en = BIT(0),
+};
+
static const struct qusb2_phy_cfg qusb2_v2_phy_cfg = {
.tbl = qusb2_v2_init_tbl,
.tbl_num = ARRAY_SIZE(qusb2_v2_init_tbl),
@@ -905,6 +954,9 @@ static const struct phy_ops qusb2_phy_gen_ops = {
static const struct of_device_id qusb2_phy_of_match_table[] = {
{
+ .compatible = "qcom,ipq5424-qusb2-phy",
+ .data = &ipq5424_phy_cfg,
+ }, {
.compatible = "qcom,ipq6018-qusb2-phy",
.data = &ipq6018_phy_cfg,
}, {
@@ -923,6 +975,9 @@ static const struct of_device_id qusb2_phy_of_match_table[] = {
.compatible = "qcom,msm8998-qusb2-phy",
.data = &msm8998_phy_cfg,
}, {
+ .compatible = "qcom,qcs615-qusb2-phy",
+ .data = &qcs615_phy_cfg,
+ }, {
.compatible = "qcom,qcm2290-qusb2-phy",
.data = &sm6115_phy_cfg,
}, {
diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
index 2eb3329ca23f..a1532ef8bbe9 100644
--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
@@ -37,6 +37,10 @@
#define PHYREG8 0x1C
#define PHYREG8_SSC_EN BIT(4)
+#define PHYREG10 0x24
+#define PHYREG10_SSC_PCM_MASK GENMASK(3, 0)
+#define PHYREG10_SSC_PCM_3500PPM 7
+
#define PHYREG11 0x28
#define PHYREG11_SU_TRIM_0_7 0xF0
@@ -61,17 +65,26 @@
#define PHYREG16 0x3C
#define PHYREG16_SSC_CNT_VALUE 0x5f
+#define PHYREG17 0x40
+
#define PHYREG18 0x44
#define PHYREG18_PLL_LOOP 0x32
+#define PHYREG21 0x50
+#define PHYREG21_RX_SQUELCH_VAL 0x0D
+
#define PHYREG27 0x6C
#define PHYREG27_RX_TRIM_RK3588 0x4C
+#define PHYREG30 0x74
+
#define PHYREG32 0x7C
#define PHYREG32_SSC_MASK GENMASK(7, 4)
+#define PHYREG32_SSC_DIR_MASK GENMASK(5, 4)
#define PHYREG32_SSC_DIR_SHIFT 4
#define PHYREG32_SSC_UPWARD 0
#define PHYREG32_SSC_DOWNWARD 1
+#define PHYREG32_SSC_OFFSET_MASK GENMASK(7, 6)
#define PHYREG32_SSC_OFFSET_SHIFT 6
#define PHYREG32_SSC_OFFSET_500PPM 1
@@ -79,6 +92,7 @@
#define PHYREG33_PLL_KVCO_MASK GENMASK(4, 2)
#define PHYREG33_PLL_KVCO_SHIFT 2
#define PHYREG33_PLL_KVCO_VALUE 2
+#define PHYREG33_PLL_KVCO_VALUE_RK3576 4
struct rockchip_combphy_priv;
@@ -98,6 +112,7 @@ struct rockchip_combphy_grfcfg {
struct combphy_reg pipe_rxterm_set;
struct combphy_reg pipe_txelec_set;
struct combphy_reg pipe_txcomp_set;
+ struct combphy_reg pipe_clk_24m;
struct combphy_reg pipe_clk_25m;
struct combphy_reg pipe_clk_100m;
struct combphy_reg pipe_phymode_sel;
@@ -584,6 +599,266 @@ static const struct rockchip_combphy_cfg rk3568_combphy_cfgs = {
.combphy_cfg = rk3568_combphy_cfg,
};
+static int rk3576_combphy_cfg(struct rockchip_combphy_priv *priv)
+{
+ const struct rockchip_combphy_grfcfg *cfg = priv->cfg->grfcfg;
+ unsigned long rate;
+ u32 val;
+
+ switch (priv->type) {
+ case PHY_TYPE_PCIE:
+ /* Set SSC downward spread spectrum */
+ val = FIELD_PREP(PHYREG32_SSC_MASK, PHYREG32_SSC_DOWNWARD);
+ rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK, val, PHYREG32);
+
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con0_for_pcie, true);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_pcie, true);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con2_for_pcie, true);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con3_for_pcie, true);
+ break;
+
+ case PHY_TYPE_USB3:
+ /* Set SSC downward spread spectrum */
+ val = FIELD_PREP(PHYREG32_SSC_MASK, PHYREG32_SSC_DOWNWARD);
+ rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK, val, PHYREG32);
+
+ /* Enable adaptive CTLE for USB3.0 Rx */
+ val = readl(priv->mmio + PHYREG15);
+ val |= PHYREG15_CTLE_EN;
+ writel(val, priv->mmio + PHYREG15);
+
+ /* Set PLL KVCO fine tuning signals */
+ rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK, BIT(3), PHYREG33);
+
+ /* Set PLL LPF R1 to su_trim[10:7]=1001 */
+ writel(PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + PHYREG12);
+
+ /* Set PLL input clock divider 1/2 */
+ val = FIELD_PREP(PHYREG6_PLL_DIV_MASK, PHYREG6_PLL_DIV_2);
+ rockchip_combphy_updatel(priv, PHYREG6_PLL_DIV_MASK, val, PHYREG6);
+
+ /* Set PLL loop divider */
+ writel(PHYREG18_PLL_LOOP, priv->mmio + PHYREG18);
+
+ /* Set PLL KVCO to min and set PLL charge pump current to max */
+ writel(PHYREG11_SU_TRIM_0_7, priv->mmio + PHYREG11);
+
+ /* Set Rx squelch input filler bandwidth */
+ writel(PHYREG21_RX_SQUELCH_VAL, priv->mmio + PHYREG21);
+
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txcomp_sel, false);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txelec_sel, false);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->usb_mode_set, true);
+ break;
+
+ case PHY_TYPE_SATA:
+ /* Enable adaptive CTLE for SATA Rx */
+ val = readl(priv->mmio + PHYREG15);
+ val |= PHYREG15_CTLE_EN;
+ writel(val, priv->mmio + PHYREG15);
+
+ /* Set tx_rterm = 50 ohm and rx_rterm = 43.5 ohm */
+ val = PHYREG7_TX_RTERM_50OHM << PHYREG7_TX_RTERM_SHIFT;
+ val |= PHYREG7_RX_RTERM_44OHM << PHYREG7_RX_RTERM_SHIFT;
+ writel(val, priv->mmio + PHYREG7);
+
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con0_for_sata, true);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_sata, true);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con2_for_sata, true);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con3_for_sata, true);
+ rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_con0_for_sata, true);
+ rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_con1_for_sata, true);
+ break;
+
+ default:
+ dev_err(priv->dev, "incompatible PHY type\n");
+ return -EINVAL;
+ }
+
+ rate = clk_get_rate(priv->refclk);
+
+ switch (rate) {
+ case REF_CLOCK_24MHz:
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_24m, true);
+ if (priv->type == PHY_TYPE_USB3 || priv->type == PHY_TYPE_SATA) {
+ /* Set ssc_cnt[9:0]=0101111101 & 31.5KHz */
+ val = FIELD_PREP(PHYREG15_SSC_CNT_MASK, PHYREG15_SSC_CNT_VALUE);
+ rockchip_combphy_updatel(priv, PHYREG15_SSC_CNT_MASK,
+ val, PHYREG15);
+
+ writel(PHYREG16_SSC_CNT_VALUE, priv->mmio + PHYREG16);
+ } else if (priv->type == PHY_TYPE_PCIE) {
+ /* PLL KVCO tuning fine */
+ val = FIELD_PREP(PHYREG33_PLL_KVCO_MASK, PHYREG33_PLL_KVCO_VALUE_RK3576);
+ rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK,
+ val, PHYREG33);
+
+ /* Set up rx_pck invert and rx msb to disable */
+ writel(0x00, priv->mmio + PHYREG27);
+
+ /*
+ * Set up SU adjust signal:
+ * su_trim[7:0], PLL KVCO adjust bits[2:0] to min
+ * su_trim[15:8], PLL LPF R1 adujst bits[9:7]=3'b011
+ * su_trim[31:24], CKDRV adjust
+ */
+ writel(0x90, priv->mmio + PHYREG11);
+ writel(0x02, priv->mmio + PHYREG12);
+ writel(0x57, priv->mmio + PHYREG14);
+
+ writel(PHYREG16_SSC_CNT_VALUE, priv->mmio + PHYREG16);
+ }
+ break;
+
+ case REF_CLOCK_25MHz:
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_25m, true);
+ break;
+
+ case REF_CLOCK_100MHz:
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_100m, true);
+ if (priv->type == PHY_TYPE_PCIE) {
+ /* gate_tx_pck_sel length select work for L1SS */
+ writel(0xc0, priv->mmio + PHYREG30);
+
+ /* PLL KVCO tuning fine */
+ val = FIELD_PREP(PHYREG33_PLL_KVCO_MASK, PHYREG33_PLL_KVCO_VALUE_RK3576);
+ rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK,
+ val, PHYREG33);
+
+ /* Set up rx_trim: PLL LPF C1 85pf R1 1.25kohm */
+ writel(0x4c, priv->mmio + PHYREG27);
+
+ /*
+ * Set up SU adjust signal:
+ * su_trim[7:0], PLL KVCO adjust bits[2:0] to min
+ * su_trim[15:8], bypass PLL loop divider code, and
+ * PLL LPF R1 adujst bits[9:7]=3'b101
+ * su_trim[23:16], CKRCV adjust
+ * su_trim[31:24], CKDRV adjust
+ */
+ writel(0x90, priv->mmio + PHYREG11);
+ writel(0x43, priv->mmio + PHYREG12);
+ writel(0x88, priv->mmio + PHYREG13);
+ writel(0x56, priv->mmio + PHYREG14);
+ } else if (priv->type == PHY_TYPE_SATA) {
+ /* downward spread spectrum +500ppm */
+ val = FIELD_PREP(PHYREG32_SSC_DIR_MASK, PHYREG32_SSC_DOWNWARD);
+ val |= FIELD_PREP(PHYREG32_SSC_OFFSET_MASK, PHYREG32_SSC_OFFSET_500PPM);
+ rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK, val, PHYREG32);
+
+ /* ssc ppm adjust to 3500ppm */
+ rockchip_combphy_updatel(priv, PHYREG10_SSC_PCM_MASK,
+ PHYREG10_SSC_PCM_3500PPM,
+ PHYREG10);
+ }
+ break;
+
+ default:
+ dev_err(priv->dev, "Unsupported rate: %lu\n", rate);
+ return -EINVAL;
+ }
+
+ if (priv->ext_refclk) {
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_ext, true);
+ if (priv->type == PHY_TYPE_PCIE && rate == REF_CLOCK_100MHz) {
+ val = FIELD_PREP(PHYREG33_PLL_KVCO_MASK, PHYREG33_PLL_KVCO_VALUE_RK3576);
+ rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK,
+ val, PHYREG33);
+
+ /* Set up rx_trim: PLL LPF C1 85pf R1 2.5kohm */
+ writel(0x0c, priv->mmio + PHYREG27);
+
+ /*
+ * Set up SU adjust signal:
+ * su_trim[7:0], PLL KVCO adjust bits[2:0] to min
+ * su_trim[15:8], bypass PLL loop divider code, and
+ * PLL LPF R1 adujst bits[9:7]=3'b101.
+ * su_trim[23:16], CKRCV adjust
+ * su_trim[31:24], CKDRV adjust
+ */
+ writel(0x90, priv->mmio + PHYREG11);
+ writel(0x43, priv->mmio + PHYREG12);
+ writel(0x88, priv->mmio + PHYREG13);
+ writel(0x56, priv->mmio + PHYREG14);
+ }
+ }
+
+ if (priv->enable_ssc) {
+ val = readl(priv->mmio + PHYREG8);
+ val |= PHYREG8_SSC_EN;
+ writel(val, priv->mmio + PHYREG8);
+
+ if (priv->type == PHY_TYPE_PCIE && rate == REF_CLOCK_24MHz) {
+ /* Set PLL loop divider */
+ writel(0x00, priv->mmio + PHYREG17);
+ writel(PHYREG18_PLL_LOOP, priv->mmio + PHYREG18);
+
+ /* Set up rx_pck invert and rx msb to disable */
+ writel(0x00, priv->mmio + PHYREG27);
+
+ /*
+ * Set up SU adjust signal:
+ * su_trim[7:0], PLL KVCO adjust bits[2:0] to min
+ * su_trim[15:8], PLL LPF R1 adujst bits[9:7]=3'b101
+ * su_trim[23:16], CKRCV adjust
+ * su_trim[31:24], CKDRV adjust
+ */
+ writel(0x90, priv->mmio + PHYREG11);
+ writel(0x02, priv->mmio + PHYREG12);
+ writel(0x08, priv->mmio + PHYREG13);
+ writel(0x57, priv->mmio + PHYREG14);
+ writel(0x40, priv->mmio + PHYREG15);
+
+ writel(PHYREG16_SSC_CNT_VALUE, priv->mmio + PHYREG16);
+
+ val = FIELD_PREP(PHYREG33_PLL_KVCO_MASK, PHYREG33_PLL_KVCO_VALUE_RK3576);
+ writel(val, priv->mmio + PHYREG33);
+ }
+ }
+
+ return 0;
+}
+
+static const struct rockchip_combphy_grfcfg rk3576_combphy_grfcfgs = {
+ /* pipe-phy-grf */
+ .pcie_mode_set = { 0x0000, 5, 0, 0x00, 0x11 },
+ .usb_mode_set = { 0x0000, 5, 0, 0x00, 0x04 },
+ .pipe_rxterm_set = { 0x0000, 12, 12, 0x00, 0x01 },
+ .pipe_txelec_set = { 0x0004, 1, 1, 0x00, 0x01 },
+ .pipe_txcomp_set = { 0x0004, 4, 4, 0x00, 0x01 },
+ .pipe_clk_24m = { 0x0004, 14, 13, 0x00, 0x00 },
+ .pipe_clk_25m = { 0x0004, 14, 13, 0x00, 0x01 },
+ .pipe_clk_100m = { 0x0004, 14, 13, 0x00, 0x02 },
+ .pipe_phymode_sel = { 0x0008, 1, 1, 0x00, 0x01 },
+ .pipe_rate_sel = { 0x0008, 2, 2, 0x00, 0x01 },
+ .pipe_rxterm_sel = { 0x0008, 8, 8, 0x00, 0x01 },
+ .pipe_txelec_sel = { 0x0008, 12, 12, 0x00, 0x01 },
+ .pipe_txcomp_sel = { 0x0008, 15, 15, 0x00, 0x01 },
+ .pipe_clk_ext = { 0x000c, 9, 8, 0x02, 0x01 },
+ .pipe_phy_status = { 0x0034, 6, 6, 0x01, 0x00 },
+ .con0_for_pcie = { 0x0000, 15, 0, 0x00, 0x1000 },
+ .con1_for_pcie = { 0x0004, 15, 0, 0x00, 0x0000 },
+ .con2_for_pcie = { 0x0008, 15, 0, 0x00, 0x0101 },
+ .con3_for_pcie = { 0x000c, 15, 0, 0x00, 0x0200 },
+ .con0_for_sata = { 0x0000, 15, 0, 0x00, 0x0129 },
+ .con1_for_sata = { 0x0004, 15, 0, 0x00, 0x0000 },
+ .con2_for_sata = { 0x0008, 15, 0, 0x00, 0x80c1 },
+ .con3_for_sata = { 0x000c, 15, 0, 0x00, 0x0407 },
+ /* php-grf */
+ .pipe_con0_for_sata = { 0x001C, 2, 0, 0x00, 0x2 },
+ .pipe_con1_for_sata = { 0x0020, 2, 0, 0x00, 0x2 },
+};
+
+static const struct rockchip_combphy_cfg rk3576_combphy_cfgs = {
+ .num_phys = 2,
+ .phy_ids = {
+ 0x2b050000,
+ 0x2b060000
+ },
+ .grfcfg = &rk3576_combphy_grfcfgs,
+ .combphy_cfg = rk3576_combphy_cfg,
+};
+
static int rk3588_combphy_cfg(struct rockchip_combphy_priv *priv)
{
const struct rockchip_combphy_grfcfg *cfg = priv->cfg->grfcfg;
@@ -776,6 +1051,10 @@ static const struct of_device_id rockchip_combphy_of_match[] = {
.data = &rk3568_combphy_cfgs,
},
{
+ .compatible = "rockchip,rk3576-naneng-combphy",
+ .data = &rk3576_combphy_cfgs,
+ },
+ {
.compatible = "rockchip,rk3588-naneng-combphy",
.data = &rk3588_combphy_cfgs,
},
diff --git a/drivers/phy/rockchip/phy-rockchip-pcie.c b/drivers/phy/rockchip/phy-rockchip-pcie.c
index 51cc5ece0e63..bd44af36c67a 100644
--- a/drivers/phy/rockchip/phy-rockchip-pcie.c
+++ b/drivers/phy/rockchip/phy-rockchip-pcie.c
@@ -124,7 +124,7 @@ static int rockchip_pcie_phy_power_off(struct phy *phy)
struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst);
int err = 0;
- mutex_lock(&rk_phy->pcie_mutex);
+ guard(mutex)(&rk_phy->pcie_mutex);
regmap_write(rk_phy->reg_base,
rk_phy->phy_data->pcie_laneoff,
@@ -132,27 +132,22 @@ static int rockchip_pcie_phy_power_off(struct phy *phy)
PHY_LANE_IDLE_MASK,
PHY_LANE_IDLE_A_SHIFT + inst->index));
- if (--rk_phy->pwr_cnt)
- goto err_out;
+ if (--rk_phy->pwr_cnt) {
+ return 0;
+ }
err = reset_control_assert(rk_phy->phy_rst);
if (err) {
dev_err(&phy->dev, "assert phy_rst err %d\n", err);
- goto err_restore;
+ rk_phy->pwr_cnt++;
+ regmap_write(rk_phy->reg_base,
+ rk_phy->phy_data->pcie_laneoff,
+ HIWORD_UPDATE(!PHY_LANE_IDLE_OFF,
+ PHY_LANE_IDLE_MASK,
+ PHY_LANE_IDLE_A_SHIFT + inst->index));
+ return err;
}
-err_out:
- mutex_unlock(&rk_phy->pcie_mutex);
- return 0;
-
-err_restore:
- rk_phy->pwr_cnt++;
- regmap_write(rk_phy->reg_base,
- rk_phy->phy_data->pcie_laneoff,
- HIWORD_UPDATE(!PHY_LANE_IDLE_OFF,
- PHY_LANE_IDLE_MASK,
- PHY_LANE_IDLE_A_SHIFT + inst->index));
- mutex_unlock(&rk_phy->pcie_mutex);
return err;
}
@@ -162,17 +157,18 @@ static int rockchip_pcie_phy_power_on(struct phy *phy)
struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst);
int err = 0;
u32 status;
- unsigned long timeout;
- mutex_lock(&rk_phy->pcie_mutex);
+ guard(mutex)(&rk_phy->pcie_mutex);
- if (rk_phy->pwr_cnt++)
- goto err_out;
+ if (rk_phy->pwr_cnt++) {
+ return 0;
+ }
err = reset_control_deassert(rk_phy->phy_rst);
if (err) {
dev_err(&phy->dev, "deassert phy_rst err %d\n", err);
- goto err_pwr_cnt;
+ rk_phy->pwr_cnt--;
+ return err;
}
regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
@@ -191,21 +187,11 @@ static int rockchip_pcie_phy_power_on(struct phy *phy)
* so we make it large enough here. And we use loop-break
* method which should not be harmful.
*/
- timeout = jiffies + msecs_to_jiffies(1000);
-
- err = -EINVAL;
- while (time_before(jiffies, timeout)) {
- regmap_read(rk_phy->reg_base,
- rk_phy->phy_data->pcie_status,
- &status);
- if (status & PHY_PLL_LOCKED) {
- dev_dbg(&phy->dev, "pll locked!\n");
- err = 0;
- break;
- }
- msleep(20);
- }
-
+ err = regmap_read_poll_timeout(rk_phy->reg_base,
+ rk_phy->phy_data->pcie_status,
+ status,
+ status & PHY_PLL_LOCKED,
+ 200, 100000);
if (err) {
dev_err(&phy->dev, "pll lock timeout!\n");
goto err_pll_lock;
@@ -214,19 +200,11 @@ static int rockchip_pcie_phy_power_on(struct phy *phy)
phy_wr_cfg(rk_phy, PHY_CFG_CLK_TEST, PHY_CFG_SEPE_RATE);
phy_wr_cfg(rk_phy, PHY_CFG_CLK_SCC, PHY_CFG_PLL_100M);
- err = -ETIMEDOUT;
- while (time_before(jiffies, timeout)) {
- regmap_read(rk_phy->reg_base,
- rk_phy->phy_data->pcie_status,
- &status);
- if (!(status & PHY_PLL_OUTPUT)) {
- dev_dbg(&phy->dev, "pll output enable done!\n");
- err = 0;
- break;
- }
- msleep(20);
- }
-
+ err = regmap_read_poll_timeout(rk_phy->reg_base,
+ rk_phy->phy_data->pcie_status,
+ status,
+ !(status & PHY_PLL_OUTPUT),
+ 200, 100000);
if (err) {
dev_err(&phy->dev, "pll output enable timeout!\n");
goto err_pll_lock;
@@ -236,33 +214,22 @@ static int rockchip_pcie_phy_power_on(struct phy *phy)
HIWORD_UPDATE(PHY_CFG_PLL_LOCK,
PHY_CFG_ADDR_MASK,
PHY_CFG_ADDR_SHIFT));
- err = -EINVAL;
- while (time_before(jiffies, timeout)) {
- regmap_read(rk_phy->reg_base,
- rk_phy->phy_data->pcie_status,
- &status);
- if (status & PHY_PLL_LOCKED) {
- dev_dbg(&phy->dev, "pll relocked!\n");
- err = 0;
- break;
- }
- msleep(20);
- }
+ err = regmap_read_poll_timeout(rk_phy->reg_base,
+ rk_phy->phy_data->pcie_status,
+ status,
+ status & PHY_PLL_LOCKED,
+ 200, 100000);
if (err) {
dev_err(&phy->dev, "pll relock timeout!\n");
goto err_pll_lock;
}
-err_out:
- mutex_unlock(&rk_phy->pcie_mutex);
- return 0;
+ return err;
err_pll_lock:
reset_control_assert(rk_phy->phy_rst);
-err_pwr_cnt:
rk_phy->pwr_cnt--;
- mutex_unlock(&rk_phy->pcie_mutex);
return err;
}
@@ -272,33 +239,19 @@ static int rockchip_pcie_phy_init(struct phy *phy)
struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst);
int err = 0;
- mutex_lock(&rk_phy->pcie_mutex);
-
- if (rk_phy->init_cnt++)
- goto err_out;
+ guard(mutex)(&rk_phy->pcie_mutex);
- err = clk_prepare_enable(rk_phy->clk_pciephy_ref);
- if (err) {
- dev_err(&phy->dev, "Fail to enable pcie ref clock.\n");
- goto err_refclk;
+ if (rk_phy->init_cnt++) {
+ return 0;
}
err = reset_control_assert(rk_phy->phy_rst);
if (err) {
dev_err(&phy->dev, "assert phy_rst err %d\n", err);
- goto err_reset;
+ rk_phy->init_cnt--;
+ return err;
}
-err_out:
- mutex_unlock(&rk_phy->pcie_mutex);
- return 0;
-
-err_reset:
-
- clk_disable_unprepare(rk_phy->clk_pciephy_ref);
-err_refclk:
- rk_phy->init_cnt--;
- mutex_unlock(&rk_phy->pcie_mutex);
return err;
}
@@ -307,15 +260,12 @@ static int rockchip_pcie_phy_exit(struct phy *phy)
struct phy_pcie_instance *inst = phy_get_drvdata(phy);
struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst);
- mutex_lock(&rk_phy->pcie_mutex);
+ guard(mutex)(&rk_phy->pcie_mutex);
if (--rk_phy->init_cnt)
goto err_init_cnt;
- clk_disable_unprepare(rk_phy->clk_pciephy_ref);
-
err_init_cnt:
- mutex_unlock(&rk_phy->pcie_mutex);
return 0;
}
@@ -371,18 +321,14 @@ static int rockchip_pcie_phy_probe(struct platform_device *pdev)
mutex_init(&rk_phy->pcie_mutex);
rk_phy->phy_rst = devm_reset_control_get(dev, "phy");
- if (IS_ERR(rk_phy->phy_rst)) {
- if (PTR_ERR(rk_phy->phy_rst) != -EPROBE_DEFER)
- dev_err(dev,
- "missing phy property for reset controller\n");
- return PTR_ERR(rk_phy->phy_rst);
- }
-
- rk_phy->clk_pciephy_ref = devm_clk_get(dev, "refclk");
- if (IS_ERR(rk_phy->clk_pciephy_ref)) {
- dev_err(dev, "refclk not found.\n");
- return PTR_ERR(rk_phy->clk_pciephy_ref);
- }
+ if (IS_ERR(rk_phy->phy_rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rk_phy->phy_rst),
+ "missing phy property for reset controller\n");
+
+ rk_phy->clk_pciephy_ref = devm_clk_get_enabled(dev, "refclk");
+ if (IS_ERR(rk_phy->clk_pciephy_ref))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rk_phy->clk_pciephy_ref),
+ "failed to get phyclk\n");
/* parse #phy-cells to see if it's legacy PHY model */
if (of_property_read_u32(dev->of_node, "#phy-cells", &phy_num))
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index 122ae0fdc785..d9701b6106d5 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author: Chris Zhong <zyw@rock-chips.com>
* Kever Yang <kever.yang@rock-chips.com>
*
diff --git a/drivers/phy/samsung/Kconfig b/drivers/phy/samsung/Kconfig
index f10afa3d7ff5..e2330b0894d6 100644
--- a/drivers/phy/samsung/Kconfig
+++ b/drivers/phy/samsung/Kconfig
@@ -33,6 +33,7 @@ config PHY_SAMSUNG_UFS
tristate "Exynos SoC series UFS PHY driver"
depends on OF && (ARCH_EXYNOS || COMPILE_TEST)
select GENERIC_PHY
+ select MFD_SYSCON
help
Enable this to support the Samsung Exynos SoC UFS PHY driver for
Samsung Exynos SoCs. This driver provides the interface for UFS host
diff --git a/drivers/phy/samsung/phy-samsung-ufs.c b/drivers/phy/samsung/phy-samsung-ufs.c
index 6c5d41552649..8e9ccd39f97e 100644
--- a/drivers/phy/samsung/phy-samsung-ufs.c
+++ b/drivers/phy/samsung/phy-samsung-ufs.c
@@ -13,11 +13,11 @@
#include <linux/of.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/soc/samsung/exynos-pmu.h>
#include "phy-samsung-ufs.h"
@@ -268,8 +268,8 @@ static int samsung_ufs_phy_probe(struct platform_device *pdev)
goto out;
}
- phy->reg_pmu = exynos_get_pmu_regmap_by_phandle(dev->of_node,
- "samsung,pmu-syscon");
+ phy->reg_pmu = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "samsung,pmu-syscon");
if (IS_ERR(phy->reg_pmu)) {
err = PTR_ERR(phy->reg_pmu);
dev_err(dev, "failed syscon remap for pmu\n");
diff --git a/drivers/phy/tegra/Kconfig b/drivers/phy/tegra/Kconfig
index c591c958f1eb..f30cfb42b210 100644
--- a/drivers/phy/tegra/Kconfig
+++ b/drivers/phy/tegra/Kconfig
@@ -13,7 +13,8 @@ config PHY_TEGRA_XUSB
config PHY_TEGRA194_P2U
tristate "NVIDIA Tegra194 PIPE2UPHY PHY driver"
- depends on ARCH_TEGRA_194_SOC || COMPILE_TEST
+ depends on ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC || COMPILE_TEST
select GENERIC_PHY
help
- Enable this to support the P2U (PIPE to UPHY) that is part of Tegra 19x SOCs.
+ Enable this to support the P2U (PIPE to UPHY) that is part of Tegra 19x
+ and 234 SOCs.
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index 84af6aae36d1..a96be8f244e0 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -21,6 +21,7 @@
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include "../pinctrl-utils.h"
@@ -254,7 +255,7 @@ static int nsp_gpio_irq_set_type(struct irq_data *d, unsigned int type)
raw_spin_unlock_irqrestore(&chip->lock, flags);
dev_dbg(chip->dev, "gpio:%u level_low:%s falling:%s\n", gpio,
- level_low ? "true" : "false", falling ? "true" : "false");
+ str_true_false(level_low), str_true_false(falling));
return 0;
}
diff --git a/drivers/pinctrl/cirrus/pinctrl-lochnagar.c b/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
index 014297a3fbd2..0f32866a4aef 100644
--- a/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
+++ b/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
@@ -15,6 +15,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/pinconf-generic.h>
@@ -1068,7 +1069,7 @@ static void lochnagar_gpio_set(struct gpio_chip *chip,
value = !!value;
dev_dbg(priv->dev, "Set GPIO %s to %s\n",
- pin->name, value ? "high" : "low");
+ pin->name, str_high_low(value));
switch (pin->type) {
case LN_PTYPE_MUX:
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index b3eec63c00ba..4bdbf6bb26e2 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1256,6 +1256,20 @@ static void pinctrl_link_add(struct pinctrl_dev *pctldev,
DL_FLAG_AUTOREMOVE_CONSUMER);
}
+static void pinctrl_cond_disable_mux_setting(struct pinctrl_state *state,
+ struct pinctrl_setting *target_setting)
+{
+ struct pinctrl_setting *setting;
+
+ list_for_each_entry(setting, &state->settings, node) {
+ if (target_setting && (&setting->node == &target_setting->node))
+ break;
+
+ if (setting->type == PIN_MAP_TYPE_MUX_GROUP)
+ pinmux_disable_setting(setting);
+ }
+}
+
/**
* pinctrl_commit_state() - select/activate/program a pinctrl state to HW
* @p: the pinctrl handle for the device that requests configuration
@@ -1263,7 +1277,7 @@ static void pinctrl_link_add(struct pinctrl_dev *pctldev,
*/
static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
{
- struct pinctrl_setting *setting, *setting2;
+ struct pinctrl_setting *setting;
struct pinctrl_state *old_state = READ_ONCE(p->state);
int ret;
@@ -1274,11 +1288,7 @@ static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
* still owned by the new state will be re-acquired by the call
* to pinmux_enable_setting() in the loop below.
*/
- list_for_each_entry(setting, &old_state->settings, node) {
- if (setting->type != PIN_MAP_TYPE_MUX_GROUP)
- continue;
- pinmux_disable_setting(setting);
- }
+ pinctrl_cond_disable_mux_setting(old_state, NULL);
}
p->state = NULL;
@@ -1322,7 +1332,7 @@ static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
}
if (ret < 0) {
- goto unapply_new_state;
+ goto unapply_mux_setting;
}
/* Do not link hogs (circular dependency) */
@@ -1334,23 +1344,23 @@ static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
return 0;
+unapply_mux_setting:
+ pinctrl_cond_disable_mux_setting(state, NULL);
+ goto restore_old_state;
+
unapply_new_state:
dev_err(p->dev, "Error applying setting, reverse things back\n");
- list_for_each_entry(setting2, &state->settings, node) {
- if (&setting2->node == &setting->node)
- break;
- /*
- * All we can do here is pinmux_disable_setting.
- * That means that some pins are muxed differently now
- * than they were before applying the setting (We can't
- * "unmux a pin"!), but it's not a big deal since the pins
- * are free to be muxed by another apply_setting.
- */
- if (setting2->type == PIN_MAP_TYPE_MUX_GROUP)
- pinmux_disable_setting(setting2);
- }
+ /*
+ * All we can do here is pinmux_disable_setting.
+ * That means that some pins are muxed differently now
+ * than they were before applying the setting (We can't
+ * "unmux a pin"!), but it's not a big deal since the pins
+ * are free to be muxed by another apply_setting.
+ */
+ pinctrl_cond_disable_mux_setting(state, setting);
+restore_old_state:
/* There's no infinite recursive loop here because p->state is NULL */
if (old_state)
pinctrl_select_state(p, old_state);
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index a417a031659c..58f32818a0e6 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -202,6 +202,13 @@ config PINCTRL_MT7986
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_MOORE
+config PINCTRL_MT7988
+ bool "Mediatek MT7988 pin control"
+ depends on OF
+ depends on ARM64 || COMPILE_TEST
+ default ARM64 && ARCH_MEDIATEK
+ select PINCTRL_MTK_MOORE
+
config PINCTRL_MT8167
bool "MediaTek MT8167 pin control"
depends on OF
diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
index 1405d434218e..721ae83476d0 100644
--- a/drivers/pinctrl/mediatek/Makefile
+++ b/drivers/pinctrl/mediatek/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_PINCTRL_MT7623) += pinctrl-mt7623.o
obj-$(CONFIG_PINCTRL_MT7629) += pinctrl-mt7629.o
obj-$(CONFIG_PINCTRL_MT7981) += pinctrl-mt7981.o
obj-$(CONFIG_PINCTRL_MT7986) += pinctrl-mt7986.o
+obj-$(CONFIG_PINCTRL_MT7988) += pinctrl-mt7988.o
obj-$(CONFIG_PINCTRL_MT8167) += pinctrl-mt8167.o
obj-$(CONFIG_PINCTRL_MT8173) += pinctrl-mt8173.o
obj-$(CONFIG_PINCTRL_MT8183) += pinctrl-mt8183.o
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7988.c b/drivers/pinctrl/mediatek/pinctrl-mt7988.c
new file mode 100644
index 000000000000..68b4097792b8
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7988.c
@@ -0,0 +1,1556 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The MT7988 driver based on Linux generic pinctrl binding.
+ *
+ * Copyright (C) 2020 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ */
+
+#include "pinctrl-moore.h"
+
+enum mt7988_pinctrl_reg_page {
+ GPIO_BASE,
+ IOCFG_TR_BASE,
+ IOCFG_BR_BASE,
+ IOCFG_RB_BASE,
+ IOCFG_LB_BASE,
+ IOCFG_TL_BASE,
+};
+
+#define MT7988_PIN(_number, _name) MTK_PIN(_number, _name, 0, _number, DRV_GRP4)
+
+#define PIN_FIELD_BASE(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, \
+ _x_bits) \
+ PIN_FIELD_CALC(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, \
+ _x_bits, 32, 0)
+
+#define PINS_FIELD_BASE(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, \
+ _x_bits) \
+ PIN_FIELD_CALC(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, \
+ _x_bits, 32, 1)
+
+static const struct mtk_pin_field_calc mt7988_pin_mode_range[] = {
+ PIN_FIELD(0, 83, 0x300, 0x10, 0, 4),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_dir_range[] = {
+ PIN_FIELD(0, 83, 0x0, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_di_range[] = {
+ PIN_FIELD(0, 83, 0x200, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_do_range[] = {
+ PIN_FIELD(0, 83, 0x100, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_ies_range[] = {
+ PIN_FIELD_BASE(0, 0, 5, 0x30, 0x10, 13, 1),
+ PIN_FIELD_BASE(1, 1, 5, 0x30, 0x10, 14, 1),
+ PIN_FIELD_BASE(2, 2, 5, 0x30, 0x10, 11, 1),
+ PIN_FIELD_BASE(3, 3, 5, 0x30, 0x10, 12, 1),
+ PIN_FIELD_BASE(4, 4, 5, 0x30, 0x10, 0, 1),
+ PIN_FIELD_BASE(5, 5, 5, 0x30, 0x10, 9, 1),
+ PIN_FIELD_BASE(6, 6, 5, 0x30, 0x10, 10, 1),
+
+ PIN_FIELD_BASE(7, 7, 4, 0x30, 0x10, 8, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x30, 0x10, 6, 1),
+ PIN_FIELD_BASE(9, 9, 4, 0x30, 0x10, 5, 1),
+ PIN_FIELD_BASE(10, 10, 4, 0x30, 0x10, 3, 1),
+
+ PIN_FIELD_BASE(11, 11, 1, 0x40, 0x10, 0, 1),
+ PIN_FIELD_BASE(12, 12, 1, 0x40, 0x10, 21, 1),
+ PIN_FIELD_BASE(13, 13, 1, 0x40, 0x10, 1, 1),
+ PIN_FIELD_BASE(14, 14, 1, 0x40, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(15, 15, 5, 0x30, 0x10, 7, 1),
+ PIN_FIELD_BASE(16, 16, 5, 0x30, 0x10, 8, 1),
+ PIN_FIELD_BASE(17, 17, 5, 0x30, 0x10, 3, 1),
+ PIN_FIELD_BASE(18, 18, 5, 0x30, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(19, 19, 4, 0x30, 0x10, 7, 1),
+ PIN_FIELD_BASE(20, 20, 4, 0x30, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(21, 21, 3, 0x50, 0x10, 17, 1),
+ PIN_FIELD_BASE(22, 22, 3, 0x50, 0x10, 23, 1),
+ PIN_FIELD_BASE(23, 23, 3, 0x50, 0x10, 20, 1),
+ PIN_FIELD_BASE(24, 24, 3, 0x50, 0x10, 19, 1),
+ PIN_FIELD_BASE(25, 25, 3, 0x50, 0x10, 21, 1),
+ PIN_FIELD_BASE(26, 26, 3, 0x50, 0x10, 22, 1),
+ PIN_FIELD_BASE(27, 27, 3, 0x50, 0x10, 18, 1),
+ PIN_FIELD_BASE(28, 28, 3, 0x50, 0x10, 25, 1),
+ PIN_FIELD_BASE(29, 29, 3, 0x50, 0x10, 26, 1),
+ PIN_FIELD_BASE(30, 30, 3, 0x50, 0x10, 27, 1),
+ PIN_FIELD_BASE(31, 31, 3, 0x50, 0x10, 24, 1),
+ PIN_FIELD_BASE(32, 32, 3, 0x50, 0x10, 28, 1),
+ PIN_FIELD_BASE(33, 33, 3, 0x60, 0x10, 0, 1),
+ PIN_FIELD_BASE(34, 34, 3, 0x50, 0x10, 31, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0x50, 0x10, 29, 1),
+ PIN_FIELD_BASE(36, 36, 3, 0x50, 0x10, 30, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0x60, 0x10, 1, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0x50, 0x10, 11, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0x50, 0x10, 10, 1),
+ PIN_FIELD_BASE(40, 40, 3, 0x50, 0x10, 0, 1),
+ PIN_FIELD_BASE(41, 41, 3, 0x50, 0x10, 1, 1),
+ PIN_FIELD_BASE(42, 42, 3, 0x50, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 43, 3, 0x50, 0x10, 8, 1),
+ PIN_FIELD_BASE(44, 44, 3, 0x50, 0x10, 7, 1),
+ PIN_FIELD_BASE(45, 45, 3, 0x50, 0x10, 6, 1),
+ PIN_FIELD_BASE(46, 46, 3, 0x50, 0x10, 5, 1),
+ PIN_FIELD_BASE(47, 47, 3, 0x50, 0x10, 4, 1),
+ PIN_FIELD_BASE(48, 48, 3, 0x50, 0x10, 3, 1),
+ PIN_FIELD_BASE(49, 49, 3, 0x50, 0x10, 2, 1),
+ PIN_FIELD_BASE(50, 50, 3, 0x50, 0x10, 15, 1),
+ PIN_FIELD_BASE(51, 51, 3, 0x50, 0x10, 12, 1),
+ PIN_FIELD_BASE(52, 52, 3, 0x50, 0x10, 13, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x50, 0x10, 14, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x50, 0x10, 16, 1),
+
+ PIN_FIELD_BASE(55, 55, 1, 0x40, 0x10, 14, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x40, 0x10, 15, 1),
+ PIN_FIELD_BASE(57, 57, 1, 0x40, 0x10, 13, 1),
+ PIN_FIELD_BASE(58, 58, 1, 0x40, 0x10, 4, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x40, 0x10, 5, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x40, 0x10, 6, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x40, 0x10, 3, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x40, 0x10, 7, 1),
+ PIN_FIELD_BASE(63, 63, 1, 0x40, 0x10, 20, 1),
+ PIN_FIELD_BASE(64, 64, 1, 0x40, 0x10, 8, 1),
+ PIN_FIELD_BASE(65, 65, 1, 0x40, 0x10, 9, 1),
+ PIN_FIELD_BASE(66, 66, 1, 0x40, 0x10, 10, 1),
+ PIN_FIELD_BASE(67, 67, 1, 0x40, 0x10, 11, 1),
+ PIN_FIELD_BASE(68, 68, 1, 0x40, 0x10, 12, 1),
+
+ PIN_FIELD_BASE(69, 69, 5, 0x30, 0x10, 1, 1),
+ PIN_FIELD_BASE(70, 70, 5, 0x30, 0x10, 2, 1),
+ PIN_FIELD_BASE(71, 71, 5, 0x30, 0x10, 5, 1),
+ PIN_FIELD_BASE(72, 72, 5, 0x30, 0x10, 6, 1),
+
+ PIN_FIELD_BASE(73, 73, 4, 0x30, 0x10, 10, 1),
+ PIN_FIELD_BASE(74, 74, 4, 0x30, 0x10, 1, 1),
+ PIN_FIELD_BASE(75, 75, 4, 0x30, 0x10, 11, 1),
+ PIN_FIELD_BASE(76, 76, 4, 0x30, 0x10, 9, 1),
+ PIN_FIELD_BASE(77, 77, 4, 0x30, 0x10, 2, 1),
+ PIN_FIELD_BASE(78, 78, 4, 0x30, 0x10, 0, 1),
+ PIN_FIELD_BASE(79, 79, 4, 0x30, 0x10, 12, 1),
+
+ PIN_FIELD_BASE(80, 80, 1, 0x40, 0x10, 18, 1),
+ PIN_FIELD_BASE(81, 81, 1, 0x40, 0x10, 19, 1),
+ PIN_FIELD_BASE(82, 82, 1, 0x40, 0x10, 16, 1),
+ PIN_FIELD_BASE(83, 83, 1, 0x40, 0x10, 17, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_smt_range[] = {
+ PIN_FIELD_BASE(0, 0, 5, 0xc0, 0x10, 13, 1),
+ PIN_FIELD_BASE(1, 1, 5, 0xc0, 0x10, 14, 1),
+ PIN_FIELD_BASE(2, 2, 5, 0xc0, 0x10, 11, 1),
+ PIN_FIELD_BASE(3, 3, 5, 0xc0, 0x10, 12, 1),
+ PIN_FIELD_BASE(4, 4, 5, 0xc0, 0x10, 0, 1),
+ PIN_FIELD_BASE(5, 5, 5, 0xc0, 0x10, 9, 1),
+ PIN_FIELD_BASE(6, 6, 5, 0xc0, 0x10, 10, 1),
+
+ PIN_FIELD_BASE(7, 7, 4, 0xb0, 0x10, 8, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0xb0, 0x10, 6, 1),
+ PIN_FIELD_BASE(9, 9, 4, 0xb0, 0x10, 5, 1),
+ PIN_FIELD_BASE(10, 10, 4, 0xb0, 0x10, 3, 1),
+
+ PIN_FIELD_BASE(11, 11, 1, 0xe0, 0x10, 0, 1),
+ PIN_FIELD_BASE(12, 12, 1, 0xe0, 0x10, 21, 1),
+ PIN_FIELD_BASE(13, 13, 1, 0xe0, 0x10, 1, 1),
+ PIN_FIELD_BASE(14, 14, 1, 0xe0, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(15, 15, 5, 0xc0, 0x10, 7, 1),
+ PIN_FIELD_BASE(16, 16, 5, 0xc0, 0x10, 8, 1),
+ PIN_FIELD_BASE(17, 17, 5, 0xc0, 0x10, 3, 1),
+ PIN_FIELD_BASE(18, 18, 5, 0xc0, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(19, 19, 4, 0xb0, 0x10, 7, 1),
+ PIN_FIELD_BASE(20, 20, 4, 0xb0, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(21, 21, 3, 0x140, 0x10, 17, 1),
+ PIN_FIELD_BASE(22, 22, 3, 0x140, 0x10, 23, 1),
+ PIN_FIELD_BASE(23, 23, 3, 0x140, 0x10, 20, 1),
+ PIN_FIELD_BASE(24, 24, 3, 0x140, 0x10, 19, 1),
+ PIN_FIELD_BASE(25, 25, 3, 0x140, 0x10, 21, 1),
+ PIN_FIELD_BASE(26, 26, 3, 0x140, 0x10, 22, 1),
+ PIN_FIELD_BASE(27, 27, 3, 0x140, 0x10, 18, 1),
+ PIN_FIELD_BASE(28, 28, 3, 0x140, 0x10, 25, 1),
+ PIN_FIELD_BASE(29, 29, 3, 0x140, 0x10, 26, 1),
+ PIN_FIELD_BASE(30, 30, 3, 0x140, 0x10, 27, 1),
+ PIN_FIELD_BASE(31, 31, 3, 0x140, 0x10, 24, 1),
+ PIN_FIELD_BASE(32, 32, 3, 0x140, 0x10, 28, 1),
+ PIN_FIELD_BASE(33, 33, 3, 0x150, 0x10, 0, 1),
+ PIN_FIELD_BASE(34, 34, 3, 0x140, 0x10, 31, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0x140, 0x10, 29, 1),
+ PIN_FIELD_BASE(36, 36, 3, 0x140, 0x10, 30, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0x150, 0x10, 1, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0x140, 0x10, 11, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0x140, 0x10, 10, 1),
+ PIN_FIELD_BASE(40, 40, 3, 0x140, 0x10, 0, 1),
+ PIN_FIELD_BASE(41, 41, 3, 0x140, 0x10, 1, 1),
+ PIN_FIELD_BASE(42, 42, 3, 0x140, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 43, 3, 0x140, 0x10, 8, 1),
+ PIN_FIELD_BASE(44, 44, 3, 0x140, 0x10, 7, 1),
+ PIN_FIELD_BASE(45, 45, 3, 0x140, 0x10, 6, 1),
+ PIN_FIELD_BASE(46, 46, 3, 0x140, 0x10, 5, 1),
+ PIN_FIELD_BASE(47, 47, 3, 0x140, 0x10, 4, 1),
+ PIN_FIELD_BASE(48, 48, 3, 0x140, 0x10, 3, 1),
+ PIN_FIELD_BASE(49, 49, 3, 0x140, 0x10, 2, 1),
+ PIN_FIELD_BASE(50, 50, 3, 0x140, 0x10, 15, 1),
+ PIN_FIELD_BASE(51, 51, 3, 0x140, 0x10, 12, 1),
+ PIN_FIELD_BASE(52, 52, 3, 0x140, 0x10, 13, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x140, 0x10, 14, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x140, 0x10, 16, 1),
+
+ PIN_FIELD_BASE(55, 55, 1, 0xe0, 0x10, 14, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0xe0, 0x10, 15, 1),
+ PIN_FIELD_BASE(57, 57, 1, 0xe0, 0x10, 13, 1),
+ PIN_FIELD_BASE(58, 58, 1, 0xe0, 0x10, 4, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0xe0, 0x10, 5, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0xe0, 0x10, 6, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0xe0, 0x10, 3, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0xe0, 0x10, 7, 1),
+ PIN_FIELD_BASE(63, 63, 1, 0xe0, 0x10, 20, 1),
+ PIN_FIELD_BASE(64, 64, 1, 0xe0, 0x10, 8, 1),
+ PIN_FIELD_BASE(65, 65, 1, 0xe0, 0x10, 9, 1),
+ PIN_FIELD_BASE(66, 66, 1, 0xe0, 0x10, 10, 1),
+ PIN_FIELD_BASE(67, 67, 1, 0xe0, 0x10, 11, 1),
+ PIN_FIELD_BASE(68, 68, 1, 0xe0, 0x10, 12, 1),
+
+ PIN_FIELD_BASE(69, 69, 5, 0xc0, 0x10, 1, 1),
+ PIN_FIELD_BASE(70, 70, 5, 0xc0, 0x10, 2, 1),
+ PIN_FIELD_BASE(71, 71, 5, 0xc0, 0x10, 5, 1),
+ PIN_FIELD_BASE(72, 72, 5, 0xc0, 0x10, 6, 1),
+
+ PIN_FIELD_BASE(73, 73, 4, 0xb0, 0x10, 10, 1),
+ PIN_FIELD_BASE(74, 74, 4, 0xb0, 0x10, 1, 1),
+ PIN_FIELD_BASE(75, 75, 4, 0xb0, 0x10, 11, 1),
+ PIN_FIELD_BASE(76, 76, 4, 0xb0, 0x10, 9, 1),
+ PIN_FIELD_BASE(77, 77, 4, 0xb0, 0x10, 2, 1),
+ PIN_FIELD_BASE(78, 78, 4, 0xb0, 0x10, 0, 1),
+ PIN_FIELD_BASE(79, 79, 4, 0xb0, 0x10, 12, 1),
+
+ PIN_FIELD_BASE(80, 80, 1, 0xe0, 0x10, 18, 1),
+ PIN_FIELD_BASE(81, 81, 1, 0xe0, 0x10, 19, 1),
+ PIN_FIELD_BASE(82, 82, 1, 0xe0, 0x10, 16, 1),
+ PIN_FIELD_BASE(83, 83, 1, 0xe0, 0x10, 17, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_pu_range[] = {
+ PIN_FIELD_BASE(7, 7, 4, 0x60, 0x10, 5, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x60, 0x10, 4, 1),
+ PIN_FIELD_BASE(9, 9, 4, 0x60, 0x10, 3, 1),
+ PIN_FIELD_BASE(10, 10, 4, 0x60, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(13, 13, 1, 0x70, 0x10, 0, 1),
+ PIN_FIELD_BASE(14, 14, 1, 0x70, 0x10, 1, 1),
+ PIN_FIELD_BASE(63, 63, 1, 0x70, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(75, 75, 4, 0x60, 0x10, 7, 1),
+ PIN_FIELD_BASE(76, 76, 4, 0x60, 0x10, 6, 1),
+ PIN_FIELD_BASE(77, 77, 4, 0x60, 0x10, 1, 1),
+ PIN_FIELD_BASE(78, 78, 4, 0x60, 0x10, 0, 1),
+ PIN_FIELD_BASE(79, 79, 4, 0x60, 0x10, 8, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_pd_range[] = {
+ PIN_FIELD_BASE(7, 7, 4, 0x40, 0x10, 5, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x40, 0x10, 4, 1),
+ PIN_FIELD_BASE(9, 9, 4, 0x40, 0x10, 3, 1),
+ PIN_FIELD_BASE(10, 10, 4, 0x40, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(13, 13, 1, 0x50, 0x10, 0, 1),
+ PIN_FIELD_BASE(14, 14, 1, 0x50, 0x10, 1, 1),
+
+ PIN_FIELD_BASE(15, 15, 5, 0x40, 0x10, 4, 1),
+ PIN_FIELD_BASE(16, 16, 5, 0x40, 0x10, 5, 1),
+ PIN_FIELD_BASE(17, 17, 5, 0x40, 0x10, 0, 1),
+ PIN_FIELD_BASE(18, 18, 5, 0x40, 0x10, 1, 1),
+
+ PIN_FIELD_BASE(63, 63, 1, 0x50, 0x10, 2, 1),
+ PIN_FIELD_BASE(71, 71, 5, 0x40, 0x10, 2, 1),
+ PIN_FIELD_BASE(72, 72, 5, 0x40, 0x10, 3, 1),
+
+ PIN_FIELD_BASE(75, 75, 4, 0x40, 0x10, 7, 1),
+ PIN_FIELD_BASE(76, 76, 4, 0x40, 0x10, 6, 1),
+ PIN_FIELD_BASE(77, 77, 4, 0x40, 0x10, 1, 1),
+ PIN_FIELD_BASE(78, 78, 4, 0x40, 0x10, 0, 1),
+ PIN_FIELD_BASE(79, 79, 4, 0x40, 0x10, 8, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_drv_range[] = {
+ PIN_FIELD_BASE(0, 0, 5, 0x00, 0x10, 21, 3),
+ PIN_FIELD_BASE(1, 1, 5, 0x00, 0x10, 24, 3),
+ PIN_FIELD_BASE(2, 2, 5, 0x00, 0x10, 15, 3),
+ PIN_FIELD_BASE(3, 3, 5, 0x00, 0x10, 18, 3),
+ PIN_FIELD_BASE(4, 4, 5, 0x00, 0x10, 0, 3),
+ PIN_FIELD_BASE(5, 5, 5, 0x00, 0x10, 9, 3),
+ PIN_FIELD_BASE(6, 6, 5, 0x00, 0x10, 12, 3),
+
+ PIN_FIELD_BASE(7, 7, 4, 0x00, 0x10, 24, 3),
+ PIN_FIELD_BASE(8, 8, 4, 0x00, 0x10, 28, 3),
+ PIN_FIELD_BASE(9, 9, 4, 0x00, 0x10, 15, 3),
+ PIN_FIELD_BASE(10, 10, 4, 0x00, 0x10, 9, 3),
+
+ PIN_FIELD_BASE(11, 11, 1, 0x00, 0x10, 0, 3),
+ PIN_FIELD_BASE(12, 12, 1, 0x20, 0x10, 3, 3),
+ PIN_FIELD_BASE(13, 13, 1, 0x00, 0x10, 3, 3),
+ PIN_FIELD_BASE(14, 14, 1, 0x00, 0x10, 6, 3),
+
+ PIN_FIELD_BASE(19, 19, 4, 0x00, 0x10, 21, 3),
+ PIN_FIELD_BASE(20, 20, 4, 0x00, 0x10, 12, 3),
+
+ PIN_FIELD_BASE(21, 21, 3, 0x10, 0x10, 21, 3),
+ PIN_FIELD_BASE(22, 22, 3, 0x20, 0x10, 9, 3),
+ PIN_FIELD_BASE(23, 23, 3, 0x20, 0x10, 0, 3),
+ PIN_FIELD_BASE(24, 24, 3, 0x10, 0x10, 27, 3),
+ PIN_FIELD_BASE(25, 25, 3, 0x20, 0x10, 3, 3),
+ PIN_FIELD_BASE(26, 26, 3, 0x20, 0x10, 6, 3),
+ PIN_FIELD_BASE(27, 27, 3, 0x10, 0x10, 24, 3),
+ PIN_FIELD_BASE(28, 28, 3, 0x20, 0x10, 15, 3),
+ PIN_FIELD_BASE(29, 29, 3, 0x20, 0x10, 18, 3),
+ PIN_FIELD_BASE(30, 30, 3, 0x20, 0x10, 21, 3),
+ PIN_FIELD_BASE(31, 31, 3, 0x20, 0x10, 12, 3),
+ PIN_FIELD_BASE(32, 32, 3, 0x20, 0x10, 24, 3),
+ PIN_FIELD_BASE(33, 33, 3, 0x30, 0x10, 6, 3),
+ PIN_FIELD_BASE(34, 34, 3, 0x30, 0x10, 3, 3),
+ PIN_FIELD_BASE(35, 35, 3, 0x20, 0x10, 27, 3),
+ PIN_FIELD_BASE(36, 36, 3, 0x30, 0x10, 0, 3),
+ PIN_FIELD_BASE(37, 37, 3, 0x30, 0x10, 9, 3),
+ PIN_FIELD_BASE(38, 38, 3, 0x10, 0x10, 3, 3),
+ PIN_FIELD_BASE(39, 39, 3, 0x10, 0x10, 0, 3),
+ PIN_FIELD_BASE(40, 40, 3, 0x00, 0x10, 0, 3),
+ PIN_FIELD_BASE(41, 41, 3, 0x00, 0x10, 3, 3),
+ PIN_FIELD_BASE(42, 42, 3, 0x00, 0x10, 27, 3),
+ PIN_FIELD_BASE(43, 43, 3, 0x00, 0x10, 24, 3),
+ PIN_FIELD_BASE(44, 44, 3, 0x00, 0x10, 21, 3),
+ PIN_FIELD_BASE(45, 45, 3, 0x00, 0x10, 18, 3),
+ PIN_FIELD_BASE(46, 46, 3, 0x00, 0x10, 15, 3),
+ PIN_FIELD_BASE(47, 47, 3, 0x00, 0x10, 12, 3),
+ PIN_FIELD_BASE(48, 48, 3, 0x00, 0x10, 9, 3),
+ PIN_FIELD_BASE(49, 49, 3, 0x00, 0x10, 6, 3),
+ PIN_FIELD_BASE(50, 50, 3, 0x10, 0x10, 15, 3),
+ PIN_FIELD_BASE(51, 51, 3, 0x10, 0x10, 6, 3),
+ PIN_FIELD_BASE(52, 52, 3, 0x10, 0x10, 9, 3),
+ PIN_FIELD_BASE(53, 53, 3, 0x10, 0x10, 12, 3),
+ PIN_FIELD_BASE(54, 54, 3, 0x10, 0x10, 18, 3),
+
+ PIN_FIELD_BASE(55, 55, 1, 0x10, 0x10, 12, 3),
+ PIN_FIELD_BASE(56, 56, 1, 0x10, 0x10, 15, 3),
+ PIN_FIELD_BASE(57, 57, 1, 0x10, 0x10, 9, 3),
+ PIN_FIELD_BASE(58, 58, 1, 0x00, 0x10, 12, 3),
+ PIN_FIELD_BASE(59, 59, 1, 0x00, 0x10, 15, 3),
+ PIN_FIELD_BASE(60, 60, 1, 0x00, 0x10, 18, 3),
+ PIN_FIELD_BASE(61, 61, 1, 0x00, 0x10, 9, 3),
+ PIN_FIELD_BASE(62, 62, 1, 0x00, 0x10, 21, 3),
+ PIN_FIELD_BASE(63, 63, 1, 0x20, 0x10, 0, 3),
+ PIN_FIELD_BASE(64, 64, 1, 0x00, 0x10, 24, 3),
+ PIN_FIELD_BASE(65, 65, 1, 0x00, 0x10, 27, 3),
+ PIN_FIELD_BASE(66, 66, 1, 0x10, 0x10, 0, 3),
+ PIN_FIELD_BASE(67, 67, 1, 0x10, 0x10, 3, 3),
+ PIN_FIELD_BASE(68, 68, 1, 0x10, 0x10, 6, 3),
+
+ PIN_FIELD_BASE(69, 69, 5, 0x00, 0x10, 3, 3),
+ PIN_FIELD_BASE(70, 70, 5, 0x00, 0x10, 6, 3),
+
+ PIN_FIELD_BASE(73, 73, 4, 0x10, 0x10, 0, 3),
+ PIN_FIELD_BASE(74, 74, 4, 0x00, 0x10, 3, 3),
+ PIN_FIELD_BASE(75, 75, 4, 0x10, 0x10, 3, 3),
+ PIN_FIELD_BASE(76, 76, 4, 0x00, 0x10, 27, 3),
+ PIN_FIELD_BASE(77, 77, 4, 0x00, 0x10, 6, 3),
+ PIN_FIELD_BASE(78, 78, 4, 0x00, 0x10, 0, 3),
+ PIN_FIELD_BASE(79, 79, 4, 0x10, 0x10, 6, 3),
+
+ PIN_FIELD_BASE(80, 80, 1, 0x10, 0x10, 24, 3),
+ PIN_FIELD_BASE(81, 81, 1, 0x10, 0x10, 27, 3),
+ PIN_FIELD_BASE(82, 82, 1, 0x10, 0x10, 18, 3),
+ PIN_FIELD_BASE(83, 83, 1, 0x10, 0x10, 21, 3),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_pupd_range[] = {
+ PIN_FIELD_BASE(0, 0, 5, 0x50, 0x10, 7, 1),
+ PIN_FIELD_BASE(1, 1, 5, 0x50, 0x10, 8, 1),
+ PIN_FIELD_BASE(2, 2, 5, 0x50, 0x10, 5, 1),
+ PIN_FIELD_BASE(3, 3, 5, 0x50, 0x10, 6, 1),
+ PIN_FIELD_BASE(4, 4, 5, 0x50, 0x10, 0, 1),
+ PIN_FIELD_BASE(5, 5, 5, 0x50, 0x10, 3, 1),
+ PIN_FIELD_BASE(6, 6, 5, 0x50, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(11, 11, 1, 0x60, 0x10, 0, 1),
+ PIN_FIELD_BASE(12, 12, 1, 0x60, 0x10, 18, 1),
+
+ PIN_FIELD_BASE(19, 19, 4, 0x50, 0x10, 2, 1),
+ PIN_FIELD_BASE(20, 20, 4, 0x50, 0x10, 1, 1),
+
+ PIN_FIELD_BASE(21, 21, 3, 0x70, 0x10, 17, 1),
+ PIN_FIELD_BASE(22, 22, 3, 0x70, 0x10, 23, 1),
+ PIN_FIELD_BASE(23, 23, 3, 0x70, 0x10, 20, 1),
+ PIN_FIELD_BASE(24, 24, 3, 0x70, 0x10, 19, 1),
+ PIN_FIELD_BASE(25, 25, 3, 0x70, 0x10, 21, 1),
+ PIN_FIELD_BASE(26, 26, 3, 0x70, 0x10, 22, 1),
+ PIN_FIELD_BASE(27, 27, 3, 0x70, 0x10, 18, 1),
+ PIN_FIELD_BASE(28, 28, 3, 0x70, 0x10, 25, 1),
+ PIN_FIELD_BASE(29, 29, 3, 0x70, 0x10, 26, 1),
+ PIN_FIELD_BASE(30, 30, 3, 0x70, 0x10, 27, 1),
+ PIN_FIELD_BASE(31, 31, 3, 0x70, 0x10, 24, 1),
+ PIN_FIELD_BASE(32, 32, 3, 0x70, 0x10, 28, 1),
+ PIN_FIELD_BASE(33, 33, 3, 0x80, 0x10, 0, 1),
+ PIN_FIELD_BASE(34, 34, 3, 0x70, 0x10, 31, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0x70, 0x10, 29, 1),
+ PIN_FIELD_BASE(36, 36, 3, 0x70, 0x10, 30, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0x80, 0x10, 1, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0x70, 0x10, 11, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0x70, 0x10, 10, 1),
+ PIN_FIELD_BASE(40, 40, 3, 0x70, 0x10, 0, 1),
+ PIN_FIELD_BASE(41, 41, 3, 0x70, 0x10, 1, 1),
+ PIN_FIELD_BASE(42, 42, 3, 0x70, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 43, 3, 0x70, 0x10, 8, 1),
+ PIN_FIELD_BASE(44, 44, 3, 0x70, 0x10, 7, 1),
+ PIN_FIELD_BASE(45, 45, 3, 0x70, 0x10, 6, 1),
+ PIN_FIELD_BASE(46, 46, 3, 0x70, 0x10, 5, 1),
+ PIN_FIELD_BASE(47, 47, 3, 0x70, 0x10, 4, 1),
+ PIN_FIELD_BASE(48, 48, 3, 0x70, 0x10, 3, 1),
+ PIN_FIELD_BASE(49, 49, 3, 0x70, 0x10, 2, 1),
+ PIN_FIELD_BASE(50, 50, 3, 0x70, 0x10, 15, 1),
+ PIN_FIELD_BASE(51, 51, 3, 0x70, 0x10, 12, 1),
+ PIN_FIELD_BASE(52, 52, 3, 0x70, 0x10, 13, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x70, 0x10, 14, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x70, 0x10, 16, 1),
+
+ PIN_FIELD_BASE(55, 55, 1, 0x60, 0x10, 12, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x60, 0x10, 13, 1),
+ PIN_FIELD_BASE(57, 57, 1, 0x60, 0x10, 11, 1),
+ PIN_FIELD_BASE(58, 58, 1, 0x60, 0x10, 2, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x60, 0x10, 3, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x60, 0x10, 4, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x60, 0x10, 1, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x60, 0x10, 5, 1),
+ PIN_FIELD_BASE(64, 64, 1, 0x60, 0x10, 6, 1),
+ PIN_FIELD_BASE(65, 65, 1, 0x60, 0x10, 7, 1),
+ PIN_FIELD_BASE(66, 66, 1, 0x60, 0x10, 8, 1),
+ PIN_FIELD_BASE(67, 67, 1, 0x60, 0x10, 9, 1),
+ PIN_FIELD_BASE(68, 68, 1, 0x60, 0x10, 10, 1),
+
+ PIN_FIELD_BASE(69, 69, 5, 0x50, 0x10, 1, 1),
+ PIN_FIELD_BASE(70, 70, 5, 0x50, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(73, 73, 4, 0x50, 0x10, 3, 1),
+ PIN_FIELD_BASE(74, 74, 4, 0x50, 0x10, 0, 1),
+
+ PIN_FIELD_BASE(80, 80, 1, 0x60, 0x10, 16, 1),
+ PIN_FIELD_BASE(81, 81, 1, 0x60, 0x10, 17, 1),
+ PIN_FIELD_BASE(82, 82, 1, 0x60, 0x10, 14, 1),
+ PIN_FIELD_BASE(83, 83, 1, 0x60, 0x10, 15, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_r0_range[] = {
+ PIN_FIELD_BASE(0, 0, 5, 0x60, 0x10, 7, 1),
+ PIN_FIELD_BASE(1, 1, 5, 0x60, 0x10, 8, 1),
+ PIN_FIELD_BASE(2, 2, 5, 0x60, 0x10, 5, 1),
+ PIN_FIELD_BASE(3, 3, 5, 0x60, 0x10, 6, 1),
+ PIN_FIELD_BASE(4, 4, 5, 0x60, 0x10, 0, 1),
+ PIN_FIELD_BASE(5, 5, 5, 0x60, 0x10, 3, 1),
+ PIN_FIELD_BASE(6, 6, 5, 0x60, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(11, 11, 1, 0x80, 0x10, 0, 1),
+ PIN_FIELD_BASE(12, 12, 1, 0x80, 0x10, 18, 1),
+
+ PIN_FIELD_BASE(19, 19, 4, 0x70, 0x10, 2, 1),
+ PIN_FIELD_BASE(20, 20, 4, 0x70, 0x10, 1, 1),
+
+ PIN_FIELD_BASE(21, 21, 3, 0x90, 0x10, 17, 1),
+ PIN_FIELD_BASE(22, 22, 3, 0x90, 0x10, 23, 1),
+ PIN_FIELD_BASE(23, 23, 3, 0x90, 0x10, 20, 1),
+ PIN_FIELD_BASE(24, 24, 3, 0x90, 0x10, 19, 1),
+ PIN_FIELD_BASE(25, 25, 3, 0x90, 0x10, 21, 1),
+ PIN_FIELD_BASE(26, 26, 3, 0x90, 0x10, 22, 1),
+ PIN_FIELD_BASE(27, 27, 3, 0x90, 0x10, 18, 1),
+ PIN_FIELD_BASE(28, 28, 3, 0x90, 0x10, 25, 1),
+ PIN_FIELD_BASE(29, 29, 3, 0x90, 0x10, 26, 1),
+ PIN_FIELD_BASE(30, 30, 3, 0x90, 0x10, 27, 1),
+ PIN_FIELD_BASE(31, 31, 3, 0x90, 0x10, 24, 1),
+ PIN_FIELD_BASE(32, 32, 3, 0x90, 0x10, 28, 1),
+ PIN_FIELD_BASE(33, 33, 3, 0xa0, 0x10, 0, 1),
+ PIN_FIELD_BASE(34, 34, 3, 0x90, 0x10, 31, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0x90, 0x10, 29, 1),
+ PIN_FIELD_BASE(36, 36, 3, 0x90, 0x10, 30, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0xa0, 0x10, 1, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0x90, 0x10, 11, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0x90, 0x10, 10, 1),
+ PIN_FIELD_BASE(40, 40, 3, 0x90, 0x10, 0, 1),
+ PIN_FIELD_BASE(41, 41, 3, 0x90, 0x10, 1, 1),
+ PIN_FIELD_BASE(42, 42, 3, 0x90, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 43, 3, 0x90, 0x10, 8, 1),
+ PIN_FIELD_BASE(44, 44, 3, 0x90, 0x10, 7, 1),
+ PIN_FIELD_BASE(45, 45, 3, 0x90, 0x10, 6, 1),
+ PIN_FIELD_BASE(46, 46, 3, 0x90, 0x10, 5, 1),
+ PIN_FIELD_BASE(47, 47, 3, 0x90, 0x10, 4, 1),
+ PIN_FIELD_BASE(48, 48, 3, 0x90, 0x10, 3, 1),
+ PIN_FIELD_BASE(49, 49, 3, 0x90, 0x10, 2, 1),
+ PIN_FIELD_BASE(50, 50, 3, 0x90, 0x10, 15, 1),
+ PIN_FIELD_BASE(51, 51, 3, 0x90, 0x10, 12, 1),
+ PIN_FIELD_BASE(52, 52, 3, 0x90, 0x10, 13, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x90, 0x10, 14, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x90, 0x10, 16, 1),
+
+ PIN_FIELD_BASE(55, 55, 1, 0x80, 0x10, 12, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x80, 0x10, 13, 1),
+ PIN_FIELD_BASE(57, 57, 1, 0x80, 0x10, 11, 1),
+ PIN_FIELD_BASE(58, 58, 1, 0x80, 0x10, 2, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x80, 0x10, 3, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x80, 0x10, 4, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x80, 0x10, 1, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x80, 0x10, 5, 1),
+ PIN_FIELD_BASE(64, 64, 1, 0x80, 0x10, 6, 1),
+ PIN_FIELD_BASE(65, 65, 1, 0x80, 0x10, 7, 1),
+ PIN_FIELD_BASE(66, 66, 1, 0x80, 0x10, 8, 1),
+ PIN_FIELD_BASE(67, 67, 1, 0x80, 0x10, 9, 1),
+ PIN_FIELD_BASE(68, 68, 1, 0x80, 0x10, 10, 1),
+
+ PIN_FIELD_BASE(69, 69, 5, 0x60, 0x10, 1, 1),
+ PIN_FIELD_BASE(70, 70, 5, 0x60, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(73, 73, 4, 0x70, 0x10, 3, 1),
+ PIN_FIELD_BASE(74, 74, 4, 0x70, 0x10, 0, 1),
+
+ PIN_FIELD_BASE(80, 80, 1, 0x80, 0x10, 16, 1),
+ PIN_FIELD_BASE(81, 81, 1, 0x80, 0x10, 17, 1),
+ PIN_FIELD_BASE(82, 82, 1, 0x80, 0x10, 14, 1),
+ PIN_FIELD_BASE(83, 83, 1, 0x80, 0x10, 15, 1),
+};
+
+static const struct mtk_pin_field_calc mt7988_pin_r1_range[] = {
+ PIN_FIELD_BASE(0, 0, 5, 0x70, 0x10, 7, 1),
+ PIN_FIELD_BASE(1, 1, 5, 0x70, 0x10, 8, 1),
+ PIN_FIELD_BASE(2, 2, 5, 0x70, 0x10, 5, 1),
+ PIN_FIELD_BASE(3, 3, 5, 0x70, 0x10, 6, 1),
+ PIN_FIELD_BASE(4, 4, 5, 0x70, 0x10, 0, 1),
+ PIN_FIELD_BASE(5, 5, 5, 0x70, 0x10, 3, 1),
+ PIN_FIELD_BASE(6, 6, 5, 0x70, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(11, 11, 1, 0x90, 0x10, 0, 1),
+ PIN_FIELD_BASE(12, 12, 1, 0x90, 0x10, 18, 1),
+
+ PIN_FIELD_BASE(19, 19, 4, 0x80, 0x10, 2, 1),
+ PIN_FIELD_BASE(20, 20, 4, 0x80, 0x10, 1, 1),
+
+ PIN_FIELD_BASE(21, 21, 3, 0xb0, 0x10, 17, 1),
+ PIN_FIELD_BASE(22, 22, 3, 0xb0, 0x10, 23, 1),
+ PIN_FIELD_BASE(23, 23, 3, 0xb0, 0x10, 20, 1),
+ PIN_FIELD_BASE(24, 24, 3, 0xb0, 0x10, 19, 1),
+ PIN_FIELD_BASE(25, 25, 3, 0xb0, 0x10, 21, 1),
+ PIN_FIELD_BASE(26, 26, 3, 0xb0, 0x10, 22, 1),
+ PIN_FIELD_BASE(27, 27, 3, 0xb0, 0x10, 18, 1),
+ PIN_FIELD_BASE(28, 28, 3, 0xb0, 0x10, 25, 1),
+ PIN_FIELD_BASE(29, 29, 3, 0xb0, 0x10, 26, 1),
+ PIN_FIELD_BASE(30, 30, 3, 0xb0, 0x10, 27, 1),
+ PIN_FIELD_BASE(31, 31, 3, 0xb0, 0x10, 24, 1),
+ PIN_FIELD_BASE(32, 32, 3, 0xb0, 0x10, 28, 1),
+ PIN_FIELD_BASE(33, 33, 3, 0xc0, 0x10, 0, 1),
+ PIN_FIELD_BASE(34, 34, 3, 0xb0, 0x10, 31, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0xb0, 0x10, 29, 1),
+ PIN_FIELD_BASE(36, 36, 3, 0xb0, 0x10, 30, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0xc0, 0x10, 1, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0xb0, 0x10, 11, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0xb0, 0x10, 10, 1),
+ PIN_FIELD_BASE(40, 40, 3, 0xb0, 0x10, 0, 1),
+ PIN_FIELD_BASE(41, 41, 3, 0xb0, 0x10, 1, 1),
+ PIN_FIELD_BASE(42, 42, 3, 0xb0, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 43, 3, 0xb0, 0x10, 8, 1),
+ PIN_FIELD_BASE(44, 44, 3, 0xb0, 0x10, 7, 1),
+ PIN_FIELD_BASE(45, 45, 3, 0xb0, 0x10, 6, 1),
+ PIN_FIELD_BASE(46, 46, 3, 0xb0, 0x10, 5, 1),
+ PIN_FIELD_BASE(47, 47, 3, 0xb0, 0x10, 4, 1),
+ PIN_FIELD_BASE(48, 48, 3, 0xb0, 0x10, 3, 1),
+ PIN_FIELD_BASE(49, 49, 3, 0xb0, 0x10, 2, 1),
+ PIN_FIELD_BASE(50, 50, 3, 0xb0, 0x10, 15, 1),
+ PIN_FIELD_BASE(51, 51, 3, 0xb0, 0x10, 12, 1),
+ PIN_FIELD_BASE(52, 52, 3, 0xb0, 0x10, 13, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0xb0, 0x10, 14, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0xb0, 0x10, 16, 1),
+
+ PIN_FIELD_BASE(55, 55, 1, 0x90, 0x10, 12, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x90, 0x10, 13, 1),
+ PIN_FIELD_BASE(57, 57, 1, 0x90, 0x10, 11, 1),
+ PIN_FIELD_BASE(58, 58, 1, 0x90, 0x10, 2, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x90, 0x10, 3, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x90, 0x10, 4, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x90, 0x10, 1, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x90, 0x10, 5, 1),
+ PIN_FIELD_BASE(64, 64, 1, 0x90, 0x10, 6, 1),
+ PIN_FIELD_BASE(65, 65, 1, 0x90, 0x10, 7, 1),
+ PIN_FIELD_BASE(66, 66, 1, 0x90, 0x10, 8, 1),
+ PIN_FIELD_BASE(67, 67, 1, 0x90, 0x10, 9, 1),
+ PIN_FIELD_BASE(68, 68, 1, 0x90, 0x10, 10, 1),
+
+ PIN_FIELD_BASE(69, 69, 5, 0x70, 0x10, 1, 1),
+ PIN_FIELD_BASE(70, 70, 5, 0x70, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(73, 73, 4, 0x80, 0x10, 3, 1),
+ PIN_FIELD_BASE(74, 74, 4, 0x80, 0x10, 0, 1),
+
+ PIN_FIELD_BASE(80, 80, 1, 0x90, 0x10, 16, 1),
+ PIN_FIELD_BASE(81, 81, 1, 0x90, 0x10, 17, 1),
+ PIN_FIELD_BASE(82, 82, 1, 0x90, 0x10, 14, 1),
+ PIN_FIELD_BASE(83, 83, 1, 0x90, 0x10, 15, 1),
+};
+
+static const unsigned int mt7988_pull_type[] = {
+ MTK_PULL_PUPD_R1R0_TYPE,/*0*/ MTK_PULL_PUPD_R1R0_TYPE,/*1*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*2*/ MTK_PULL_PUPD_R1R0_TYPE,/*3*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*4*/ MTK_PULL_PUPD_R1R0_TYPE,/*5*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*6*/ MTK_PULL_PU_PD_TYPE, /*7*/
+ MTK_PULL_PU_PD_TYPE, /*8*/ MTK_PULL_PU_PD_TYPE, /*9*/
+ MTK_PULL_PU_PD_TYPE, /*10*/ MTK_PULL_PUPD_R1R0_TYPE,/*11*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*12*/ MTK_PULL_PU_PD_TYPE, /*13*/
+ MTK_PULL_PU_PD_TYPE, /*14*/ MTK_PULL_PD_TYPE, /*15*/
+ MTK_PULL_PD_TYPE, /*16*/ MTK_PULL_PD_TYPE, /*17*/
+ MTK_PULL_PD_TYPE, /*18*/ MTK_PULL_PUPD_R1R0_TYPE,/*19*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*20*/ MTK_PULL_PUPD_R1R0_TYPE,/*21*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*22*/ MTK_PULL_PUPD_R1R0_TYPE,/*23*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*24*/ MTK_PULL_PUPD_R1R0_TYPE,/*25*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*26*/ MTK_PULL_PUPD_R1R0_TYPE,/*27*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*28*/ MTK_PULL_PUPD_R1R0_TYPE,/*29*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*30*/ MTK_PULL_PUPD_R1R0_TYPE,/*31*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*32*/ MTK_PULL_PUPD_R1R0_TYPE,/*33*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*34*/ MTK_PULL_PUPD_R1R0_TYPE,/*35*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*36*/ MTK_PULL_PUPD_R1R0_TYPE,/*37*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*38*/ MTK_PULL_PUPD_R1R0_TYPE,/*39*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*40*/ MTK_PULL_PUPD_R1R0_TYPE,/*41*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*42*/ MTK_PULL_PUPD_R1R0_TYPE,/*43*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*44*/ MTK_PULL_PUPD_R1R0_TYPE,/*45*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*46*/ MTK_PULL_PUPD_R1R0_TYPE,/*47*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*48*/ MTK_PULL_PUPD_R1R0_TYPE,/*49*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*50*/ MTK_PULL_PUPD_R1R0_TYPE,/*51*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*52*/ MTK_PULL_PUPD_R1R0_TYPE,/*53*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*54*/ MTK_PULL_PUPD_R1R0_TYPE,/*55*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*56*/ MTK_PULL_PUPD_R1R0_TYPE,/*57*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*58*/ MTK_PULL_PUPD_R1R0_TYPE,/*59*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*60*/ MTK_PULL_PUPD_R1R0_TYPE,/*61*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*62*/ MTK_PULL_PU_PD_TYPE, /*63*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*64*/ MTK_PULL_PUPD_R1R0_TYPE,/*65*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*66*/ MTK_PULL_PUPD_R1R0_TYPE,/*67*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*68*/ MTK_PULL_PUPD_R1R0_TYPE,/*69*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*70*/ MTK_PULL_PD_TYPE, /*71*/
+ MTK_PULL_PD_TYPE, /*72*/ MTK_PULL_PUPD_R1R0_TYPE,/*73*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*74*/ MTK_PULL_PU_PD_TYPE, /*75*/
+ MTK_PULL_PU_PD_TYPE, /*76*/ MTK_PULL_PU_PD_TYPE, /*77*/
+ MTK_PULL_PU_PD_TYPE, /*78*/ MTK_PULL_PU_PD_TYPE, /*79*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*80*/ MTK_PULL_PUPD_R1R0_TYPE,/*81*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*82*/ MTK_PULL_PUPD_R1R0_TYPE,/*83*/
+};
+
+static const struct mtk_pin_reg_calc mt7988_reg_cals[] = {
+ [PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt7988_pin_mode_range),
+ [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt7988_pin_dir_range),
+ [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt7988_pin_di_range),
+ [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt7988_pin_do_range),
+ [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt7988_pin_smt_range),
+ [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt7988_pin_ies_range),
+ [PINCTRL_PIN_REG_PU] = MTK_RANGE(mt7988_pin_pu_range),
+ [PINCTRL_PIN_REG_PD] = MTK_RANGE(mt7988_pin_pd_range),
+ [PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt7988_pin_drv_range),
+ [PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt7988_pin_pupd_range),
+ [PINCTRL_PIN_REG_R0] = MTK_RANGE(mt7988_pin_r0_range),
+ [PINCTRL_PIN_REG_R1] = MTK_RANGE(mt7988_pin_r1_range),
+};
+
+static const struct mtk_pin_desc mt7988_pins[] = {
+ MT7988_PIN(0, "UART2_RXD"),
+ MT7988_PIN(1, "UART2_TXD"),
+ MT7988_PIN(2, "UART2_CTS"),
+ MT7988_PIN(3, "UART2_RTS"),
+ MT7988_PIN(4, "GPIO_A"),
+ MT7988_PIN(5, "SMI_0_MDC"),
+ MT7988_PIN(6, "SMI_0_MDIO"),
+ MT7988_PIN(7, "PCIE30_2L_0_WAKE_N"),
+ MT7988_PIN(8, "PCIE30_2L_0_CLKREQ_N"),
+ MT7988_PIN(9, "PCIE30_1L_1_WAKE_N"),
+ MT7988_PIN(10, "PCIE30_1L_1_CLKREQ_N"),
+ MT7988_PIN(11, "GPIO_P"),
+ MT7988_PIN(12, "WATCHDOG"),
+ MT7988_PIN(13, "GPIO_RESET"),
+ MT7988_PIN(14, "GPIO_WPS"),
+ MT7988_PIN(15, "PMIC_I2C_SCL"),
+ MT7988_PIN(16, "PMIC_I2C_SDA"),
+ MT7988_PIN(17, "I2C_1_SCL"),
+ MT7988_PIN(18, "I2C_1_SDA"),
+ MT7988_PIN(19, "PCIE30_2L_0_PRESET_N"),
+ MT7988_PIN(20, "PCIE30_1L_1_PRESET_N"),
+ MT7988_PIN(21, "PWMD1"),
+ MT7988_PIN(22, "SPI0_WP"),
+ MT7988_PIN(23, "SPI0_HOLD"),
+ MT7988_PIN(24, "SPI0_CSB"),
+ MT7988_PIN(25, "SPI0_MISO"),
+ MT7988_PIN(26, "SPI0_MOSI"),
+ MT7988_PIN(27, "SPI0_CLK"),
+ MT7988_PIN(28, "SPI1_CSB"),
+ MT7988_PIN(29, "SPI1_MISO"),
+ MT7988_PIN(30, "SPI1_MOSI"),
+ MT7988_PIN(31, "SPI1_CLK"),
+ MT7988_PIN(32, "SPI2_CLK"),
+ MT7988_PIN(33, "SPI2_MOSI"),
+ MT7988_PIN(34, "SPI2_MISO"),
+ MT7988_PIN(35, "SPI2_CSB"),
+ MT7988_PIN(36, "SPI2_HOLD"),
+ MT7988_PIN(37, "SPI2_WP"),
+ MT7988_PIN(38, "EMMC_RSTB"),
+ MT7988_PIN(39, "EMMC_DSL"),
+ MT7988_PIN(40, "EMMC_CK"),
+ MT7988_PIN(41, "EMMC_CMD"),
+ MT7988_PIN(42, "EMMC_DATA_7"),
+ MT7988_PIN(43, "EMMC_DATA_6"),
+ MT7988_PIN(44, "EMMC_DATA_5"),
+ MT7988_PIN(45, "EMMC_DATA_4"),
+ MT7988_PIN(46, "EMMC_DATA_3"),
+ MT7988_PIN(47, "EMMC_DATA_2"),
+ MT7988_PIN(48, "EMMC_DATA_1"),
+ MT7988_PIN(49, "EMMC_DATA_0"),
+ MT7988_PIN(50, "PCM_FS_I2S_LRCK"),
+ MT7988_PIN(51, "PCM_CLK_I2S_BCLK"),
+ MT7988_PIN(52, "PCM_DRX_I2S_DIN"),
+ MT7988_PIN(53, "PCM_DTX_I2S_DOUT"),
+ MT7988_PIN(54, "PCM_MCK_I2S_MCLK"),
+ MT7988_PIN(55, "UART0_RXD"),
+ MT7988_PIN(56, "UART0_TXD"),
+ MT7988_PIN(57, "PWMD0"),
+ MT7988_PIN(58, "JTAG_JTDI"),
+ MT7988_PIN(59, "JTAG_JTDO"),
+ MT7988_PIN(60, "JTAG_JTMS"),
+ MT7988_PIN(61, "JTAG_JTCLK"),
+ MT7988_PIN(62, "JTAG_JTRST_N"),
+ MT7988_PIN(63, "USB_DRV_VBUS_P1"),
+ MT7988_PIN(64, "LED_A"),
+ MT7988_PIN(65, "LED_B"),
+ MT7988_PIN(66, "LED_C"),
+ MT7988_PIN(67, "LED_D"),
+ MT7988_PIN(68, "LED_E"),
+ MT7988_PIN(69, "GPIO_B"),
+ MT7988_PIN(70, "GPIO_C"),
+ MT7988_PIN(71, "I2C_2_SCL"),
+ MT7988_PIN(72, "I2C_2_SDA"),
+ MT7988_PIN(73, "PCIE30_2L_1_PRESET_N"),
+ MT7988_PIN(74, "PCIE30_1L_0_PRESET_N"),
+ MT7988_PIN(75, "PCIE30_2L_1_WAKE_N"),
+ MT7988_PIN(76, "PCIE30_2L_1_CLKREQ_N"),
+ MT7988_PIN(77, "PCIE30_1L_0_WAKE_N"),
+ MT7988_PIN(78, "PCIE30_1L_0_CLKREQ_N"),
+ MT7988_PIN(79, "USB_DRV_VBUS_P0"),
+ MT7988_PIN(80, "UART1_RXD"),
+ MT7988_PIN(81, "UART1_TXD"),
+ MT7988_PIN(82, "UART1_CTS"),
+ MT7988_PIN(83, "UART1_RTS"),
+};
+
+/* jtag */
+static const int mt7988_tops_jtag0_0_pins[] = { 0, 1, 2, 3, 4 };
+static int mt7988_tops_jtag0_0_funcs[] = { 2, 2, 2, 2, 2 };
+
+static const int mt7988_wo0_jtag_pins[] = { 50, 51, 52, 53, 54 };
+static int mt7988_wo0_jtag_funcs[] = { 3, 3, 3, 3, 3 };
+
+static const int mt7988_wo1_jtag_pins[] = { 50, 51, 52, 53, 54 };
+static int mt7988_wo1_jtag_funcs[] = { 4, 4, 4, 4, 4 };
+
+static const int mt7988_wo2_jtag_pins[] = { 50, 51, 52, 53, 54 };
+static int mt7988_wo2_jtag_funcs[] = { 5, 5, 5, 5, 5 };
+
+static const int mt7988_jtag_pins[] = { 58, 59, 60, 61, 62 };
+static int mt7988_jtag_funcs[] = { 1, 1, 1, 1, 1 };
+
+static const int mt7988_tops_jtag0_1_pins[] = { 58, 59, 60, 61, 62 };
+static int mt7988_tops_jtag0_1_funcs[] = { 4, 4, 4, 4, 4 };
+
+/* int_usxgmii */
+static const int mt7988_int_usxgmii_pins[] = { 2, 3 };
+static int mt7988_int_usxgmii_funcs[] = { 3, 3 };
+
+/* pwm */
+static const int mt7988_pwm0_pins[] = { 57 };
+static int mt7988_pwm0_funcs[] = { 1 };
+
+static const int mt7988_pwm1_pins[] = { 21 };
+static int mt7988_pwm1_funcs[] = { 1 };
+
+static const int mt7988_pwm2_pins[] = { 80 };
+static int mt7988_pwm2_funcs[] = { 2 };
+
+static const int mt7988_pwm2_0_pins[] = { 58 };
+static int mt7988_pwm2_0_funcs[] = { 5 };
+
+static const int mt7988_pwm3_pins[] = { 81 };
+static int mt7988_pwm3_funcs[] = { 2 };
+
+static const int mt7988_pwm3_0_pins[] = { 59 };
+static int mt7988_pwm3_0_funcs[] = { 5 };
+
+static const int mt7988_pwm4_pins[] = { 82 };
+static int mt7988_pwm4_funcs[] = { 2 };
+
+static const int mt7988_pwm4_0_pins[] = { 60 };
+static int mt7988_pwm4_0_funcs[] = { 5 };
+
+static const int mt7988_pwm5_pins[] = { 83 };
+static int mt7988_pwm5_funcs[] = { 2 };
+
+static const int mt7988_pwm5_0_pins[] = { 61 };
+static int mt7988_pwm5_0_funcs[] = { 5 };
+
+static const int mt7988_pwm6_pins[] = { 69 };
+static int mt7988_pwm6_funcs[] = { 3 };
+
+static const int mt7988_pwm6_0_pins[] = { 62 };
+static int mt7988_pwm6_0_funcs[] = { 5 };
+
+static const int mt7988_pwm7_pins[] = { 70 };
+static int mt7988_pwm7_funcs[] = { 3 };
+
+static const int mt7988_pwm7_0_pins[] = { 4 };
+static int mt7988_pwm7_0_funcs[] = { 3 };
+
+/* dfd */
+static const int mt7988_dfd_pins[] = { 0, 1, 2, 3, 4 };
+static int mt7988_dfd_funcs[] = { 4, 4, 4, 4, 4 };
+
+/* i2c */
+static const int mt7988_xfi_phy0_i2c0_pins[] = { 0, 1 };
+static int mt7988_xfi_phy0_i2c0_funcs[] = { 5, 5 };
+
+static const int mt7988_xfi_phy1_i2c0_pins[] = { 0, 1 };
+static int mt7988_xfi_phy1_i2c0_funcs[] = { 6, 6 };
+
+static const int mt7988_xfi_phy_pll_i2c0_pins[] = { 3, 4 };
+static int mt7988_xfi_phy_pll_i2c0_funcs[] = { 5, 5 };
+
+static const int mt7988_xfi_phy_pll_i2c1_pins[] = { 3, 4 };
+static int mt7988_xfi_phy_pll_i2c1_funcs[] = { 6, 6 };
+
+static const int mt7988_i2c0_0_pins[] = { 5, 6 };
+static int mt7988_i2c0_0_funcs[] = { 2, 2 };
+
+static const int mt7988_i2c1_sfp_pins[] = { 5, 6 };
+static int mt7988_i2c1_sfp_funcs[] = { 4, 4 };
+
+static const int mt7988_xfi_pextp_phy0_i2c_pins[] = { 5, 6 };
+static int mt7988_xfi_pextp_phy0_i2c_funcs[] = { 5, 5 };
+
+static const int mt7988_xfi_pextp_phy1_i2c_pins[] = { 5, 6 };
+static int mt7988_xfi_pextp_phy1_i2c_funcs[] = { 6, 6 };
+
+static const int mt7988_i2c0_1_pins[] = { 15, 16 };
+static int mt7988_i2c0_1_funcs[] = { 1, 1 };
+
+static const int mt7988_u30_phy_i2c0_pins[] = { 15, 16 };
+static int mt7988_u30_phy_i2c0_funcs[] = { 2, 2 };
+
+static const int mt7988_u32_phy_i2c0_pins[] = { 15, 16 };
+static int mt7988_u32_phy_i2c0_funcs[] = { 3, 3 };
+
+static const int mt7988_xfi_phy0_i2c1_pins[] = { 15, 16 };
+static int mt7988_xfi_phy0_i2c1_funcs[] = { 5, 5 };
+
+static const int mt7988_xfi_phy1_i2c1_pins[] = { 15, 16 };
+static int mt7988_xfi_phy1_i2c1_funcs[] = { 6, 6 };
+
+static const int mt7988_xfi_phy_pll_i2c2_pins[] = { 15, 16 };
+static int mt7988_xfi_phy_pll_i2c2_funcs[] = { 7, 7 };
+
+static const int mt7988_i2c1_0_pins[] = { 17, 18 };
+static int mt7988_i2c1_0_funcs[] = { 1, 1 };
+
+static const int mt7988_u30_phy_i2c1_pins[] = { 17, 18 };
+static int mt7988_u30_phy_i2c1_funcs[] = { 2, 2 };
+
+static const int mt7988_u32_phy_i2c1_pins[] = { 17, 18 };
+static int mt7988_u32_phy_i2c1_funcs[] = { 3, 3 };
+
+static const int mt7988_xfi_phy_pll_i2c3_pins[] = { 17, 18 };
+static int mt7988_xfi_phy_pll_i2c3_funcs[] = { 4, 4 };
+
+static const int mt7988_sgmii0_i2c_pins[] = { 17, 18 };
+static int mt7988_sgmii0_i2c_funcs[] = { 5, 5 };
+
+static const int mt7988_sgmii1_i2c_pins[] = { 17, 18 };
+static int mt7988_sgmii1_i2c_funcs[] = { 6, 6 };
+
+static const int mt7988_i2c1_2_pins[] = { 69, 70 };
+static int mt7988_i2c1_2_funcs[] = { 2, 2 };
+
+static const int mt7988_i2c2_0_pins[] = { 69, 70 };
+static int mt7988_i2c2_0_funcs[] = { 4, 4 };
+
+static const int mt7988_i2c2_1_pins[] = { 71, 72 };
+static int mt7988_i2c2_1_funcs[] = { 1, 1 };
+
+/* eth */
+static const int mt7988_mdc_mdio0_pins[] = { 5, 6 };
+static int mt7988_mdc_mdio0_funcs[] = { 1, 1 };
+
+static const int mt7988_2p5g_ext_mdio_pins[] = { 28, 29 };
+static int mt7988_2p5g_ext_mdio_funcs[] = { 6, 6 };
+
+static const int mt7988_gbe_ext_mdio_pins[] = { 30, 31 };
+static int mt7988_gbe_ext_mdio_funcs[] = { 6, 6 };
+
+static const int mt7988_mdc_mdio1_pins[] = { 69, 70 };
+static int mt7988_mdc_mdio1_funcs[] = { 1, 1 };
+
+/* pcie */
+static const int mt7988_pcie_wake_n0_0_pins[] = { 7 };
+static int mt7988_pcie_wake_n0_0_funcs[] = { 1 };
+
+static const int mt7988_pcie_clk_req_n0_0_pins[] = { 8 };
+static int mt7988_pcie_clk_req_n0_0_funcs[] = { 1 };
+
+static const int mt7988_pcie_wake_n3_0_pins[] = { 9 };
+static int mt7988_pcie_wake_n3_0_funcs[] = { 1 };
+
+static const int mt7988_pcie_clk_req_n3_pins[] = { 10 };
+static int mt7988_pcie_clk_req_n3_funcs[] = { 1 };
+
+static const int mt7988_pcie_clk_req_n0_1_pins[] = { 10 };
+static int mt7988_pcie_clk_req_n0_1_funcs[] = { 2 };
+
+static const int mt7988_pcie_p0_phy_i2c_pins[] = { 7, 8 };
+static int mt7988_pcie_p0_phy_i2c_funcs[] = { 3, 3 };
+
+static const int mt7988_pcie_p1_phy_i2c_pins[] = { 7, 8 };
+static int mt7988_pcie_p1_phy_i2c_funcs[] = { 4, 4 };
+
+static const int mt7988_pcie_p3_phy_i2c_pins[] = { 9, 10 };
+static int mt7988_pcie_p3_phy_i2c_funcs[] = { 4, 4 };
+
+static const int mt7988_pcie_p2_phy_i2c_pins[] = { 7, 8 };
+static int mt7988_pcie_p2_phy_i2c_funcs[] = { 5, 5 };
+
+static const int mt7988_ckm_phy_i2c_pins[] = { 9, 10 };
+static int mt7988_ckm_phy_i2c_funcs[] = { 5, 5 };
+
+static const int mt7988_pcie_wake_n0_1_pins[] = { 13 };
+static int mt7988_pcie_wake_n0_1_funcs[] = { 2 };
+
+static const int mt7988_pcie_wake_n3_1_pins[] = { 14 };
+static int mt7988_pcie_wake_n3_1_funcs[] = { 2 };
+
+static const int mt7988_pcie_2l_0_pereset_pins[] = { 19 };
+static int mt7988_pcie_2l_0_pereset_funcs[] = { 1 };
+
+static const int mt7988_pcie_1l_1_pereset_pins[] = { 20 };
+static int mt7988_pcie_1l_1_pereset_funcs[] = { 1 };
+
+static const int mt7988_pcie_clk_req_n2_1_pins[] = { 63 };
+static int mt7988_pcie_clk_req_n2_1_funcs[] = { 2 };
+
+static const int mt7988_pcie_2l_1_pereset_pins[] = { 73 };
+static int mt7988_pcie_2l_1_pereset_funcs[] = { 1 };
+
+static const int mt7988_pcie_1l_0_pereset_pins[] = { 74 };
+static int mt7988_pcie_1l_0_pereset_funcs[] = { 1 };
+
+static const int mt7988_pcie_wake_n1_0_pins[] = { 75 };
+static int mt7988_pcie_wake_n1_0_funcs[] = { 1 };
+
+static const int mt7988_pcie_clk_req_n1_pins[] = { 76 };
+static int mt7988_pcie_clk_req_n1_funcs[] = { 1 };
+
+static const int mt7988_pcie_wake_n2_0_pins[] = { 77 };
+static int mt7988_pcie_wake_n2_0_funcs[] = { 1 };
+
+static const int mt7988_pcie_clk_req_n2_0_pins[] = { 78 };
+static int mt7988_pcie_clk_req_n2_0_funcs[] = { 1 };
+
+static const int mt7988_pcie_wake_n2_1_pins[] = { 79 };
+static int mt7988_pcie_wake_n2_1_funcs[] = { 2 };
+
+/* pmic */
+static const int mt7988_pmic_pins[] = { 11 };
+static int mt7988_pmic_funcs[] = { 1 };
+
+/* watchdog */
+static const int mt7988_watchdog_pins[] = { 12 };
+static int mt7988_watchdog_funcs[] = { 1 };
+
+/* spi */
+static const int mt7988_spi0_wp_hold_pins[] = { 22, 23 };
+static int mt7988_spi0_wp_hold_funcs[] = { 1, 1 };
+
+static const int mt7988_spi0_pins[] = { 24, 25, 26, 27 };
+static int mt7988_spi0_funcs[] = { 1, 1, 1, 1 };
+
+static const int mt7988_spi1_pins[] = { 28, 29, 30, 31 };
+static int mt7988_spi1_funcs[] = { 1, 1, 1, 1 };
+
+static const int mt7988_spi2_pins[] = { 32, 33, 34, 35 };
+static int mt7988_spi2_funcs[] = { 1, 1, 1, 1 };
+
+static const int mt7988_spi2_wp_hold_pins[] = { 36, 37 };
+static int mt7988_spi2_wp_hold_funcs[] = { 1, 1 };
+
+/* flash */
+static const int mt7988_snfi_pins[] = { 22, 23, 24, 25, 26, 27 };
+static int mt7988_snfi_funcs[] = { 2, 2, 2, 2, 2, 2 };
+
+static const int mt7988_emmc_45_pins[] = {
+ 21, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37
+};
+static int mt7988_emmc_45_funcs[] = { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 };
+
+static const int mt7988_sdcard_pins[] = { 32, 33, 34, 35, 36, 37 };
+static int mt7988_sdcard_funcs[] = { 5, 5, 5, 5, 5, 5 };
+
+static const int mt7988_emmc_51_pins[] = { 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49 };
+static int mt7988_emmc_51_funcs[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
+
+/* uart */
+static const int mt7988_uart2_pins[] = { 0, 1, 2, 3 };
+static int mt7988_uart2_funcs[] = { 1, 1, 1, 1 };
+
+static const int mt7988_tops_uart0_0_pins[] = { 22, 23 };
+static int mt7988_tops_uart0_0_funcs[] = { 3, 3 };
+
+static const int mt7988_uart2_0_pins[] = { 28, 29, 30, 31 };
+static int mt7988_uart2_0_funcs[] = { 2, 2, 2, 2 };
+
+static const int mt7988_uart1_0_pins[] = { 32, 33, 34, 35 };
+static int mt7988_uart1_0_funcs[] = { 2, 2, 2, 2 };
+
+static const int mt7988_uart2_1_pins[] = { 32, 33, 34, 35 };
+static int mt7988_uart2_1_funcs[] = { 3, 3, 3, 3 };
+
+static const int mt7988_net_wo0_uart_txd_0_pins[] = { 28 };
+static int mt7988_net_wo0_uart_txd_0_funcs[] = { 3 };
+
+static const int mt7988_net_wo1_uart_txd_0_pins[] = { 29 };
+static int mt7988_net_wo1_uart_txd_0_funcs[] = { 3 };
+
+static const int mt7988_net_wo2_uart_txd_0_pins[] = { 30 };
+static int mt7988_net_wo2_uart_txd_0_funcs[] = { 3 };
+
+static const int mt7988_tops_uart1_0_pins[] = { 28, 29 };
+static int mt7988_tops_uart1_0_funcs[] = { 4, 4 };
+
+static const int mt7988_tops_uart0_1_pins[] = { 30, 31 };
+static int mt7988_tops_uart0_1_funcs[] = { 4, 4 };
+
+static const int mt7988_tops_uart1_1_pins[] = { 36, 37 };
+static int mt7988_tops_uart1_1_funcs[] = { 3, 3 };
+
+static const int mt7988_uart0_pins[] = { 55, 56 };
+static int mt7988_uart0_funcs[] = { 1, 1 };
+
+static const int mt7988_tops_uart0_2_pins[] = { 55, 56 };
+static int mt7988_tops_uart0_2_funcs[] = { 2, 2 };
+
+static const int mt7988_uart2_2_pins[] = { 50, 51, 52, 53 };
+static int mt7988_uart2_2_funcs[] = { 2, 2, 2, 2 };
+
+static const int mt7988_uart1_1_pins[] = { 58, 59, 60, 61 };
+static int mt7988_uart1_1_funcs[] = { 2, 2, 2, 2 };
+
+static const int mt7988_uart2_3_pins[] = { 58, 59, 60, 61 };
+static int mt7988_uart2_3_funcs[] = { 3, 3, 3, 3 };
+
+static const int mt7988_uart1_2_pins[] = { 80, 81, 82, 83 };
+static int mt7988_uart1_2_funcs[] = { 1, 1, 1, 1 };
+
+static const int mt7988_uart1_2_lite_pins[] = { 80, 81 };
+static int mt7988_uart1_2_lite_funcs[] = { 1, 1 };
+
+static const int mt7988_tops_uart1_2_pins[] = { 80, 81 };
+static int mt7988_tops_uart1_2_funcs[] = { 4, 4, };
+
+static const int mt7988_net_wo0_uart_txd_1_pins[] = { 80 };
+static int mt7988_net_wo0_uart_txd_1_funcs[] = { 3 };
+
+static const int mt7988_net_wo1_uart_txd_1_pins[] = { 81 };
+static int mt7988_net_wo1_uart_txd_1_funcs[] = { 3 };
+
+static const int mt7988_net_wo2_uart_txd_1_pins[] = { 82 };
+static int mt7988_net_wo2_uart_txd_1_funcs[] = { 3 };
+
+/* udi */
+static const int mt7988_udi_pins[] = { 32, 33, 34, 35, 36 };
+static int mt7988_udi_funcs[] = { 4, 4, 4, 4, 4 };
+
+/* i2s */
+static const int mt7988_i2s_pins[] = { 50, 51, 52, 53, 54 };
+static int mt7988_i2s_funcs[] = { 1, 1, 1, 1, 1 };
+
+/* pcm */
+static const int mt7988_pcm_pins[] = { 50, 51, 52, 53 };
+static int mt7988_pcm_funcs[] = { 1, 1, 1, 1 };
+
+/* led */
+static const int mt7988_gbe0_led1_pins[] = { 58 };
+static int mt7988_gbe0_led1_funcs[] = { 6 };
+static const int mt7988_gbe1_led1_pins[] = { 59 };
+static int mt7988_gbe1_led1_funcs[] = { 6 };
+static const int mt7988_gbe2_led1_pins[] = { 60 };
+static int mt7988_gbe2_led1_funcs[] = { 6 };
+static const int mt7988_gbe3_led1_pins[] = { 61 };
+static int mt7988_gbe3_led1_funcs[] = { 6 };
+
+static const int mt7988_2p5gbe_led1_pins[] = { 62 };
+static int mt7988_2p5gbe_led1_funcs[] = { 6 };
+
+static const int mt7988_gbe0_led0_pins[] = { 64 };
+static int mt7988_gbe0_led0_funcs[] = { 1 };
+static const int mt7988_gbe1_led0_pins[] = { 65 };
+static int mt7988_gbe1_led0_funcs[] = { 1 };
+static const int mt7988_gbe2_led0_pins[] = { 66 };
+static int mt7988_gbe2_led0_funcs[] = { 1 };
+static const int mt7988_gbe3_led0_pins[] = { 67 };
+static int mt7988_gbe3_led0_funcs[] = { 1 };
+
+static const int mt7988_2p5gbe_led0_pins[] = { 68 };
+static int mt7988_2p5gbe_led0_funcs[] = { 1 };
+
+/* usb */
+static const int mt7988_drv_vbus_p1_pins[] = { 63 };
+static int mt7988_drv_vbus_p1_funcs[] = { 1 };
+
+static const int mt7988_drv_vbus_pins[] = { 79 };
+static int mt7988_drv_vbus_funcs[] = { 1 };
+
+static const struct group_desc mt7988_groups[] = {
+ /* @GPIO(0,1,2,3): uart2 */
+ PINCTRL_PIN_GROUP("uart2", mt7988_uart2),
+ /* @GPIO(0,1,2,3,4): tops_jtag0_0 */
+ PINCTRL_PIN_GROUP("tops_jtag0_0", mt7988_tops_jtag0_0),
+ /* @GPIO(2,3): int_usxgmii */
+ PINCTRL_PIN_GROUP("int_usxgmii", mt7988_int_usxgmii),
+ /* @GPIO(0,1,2,3,4): dfd */
+ PINCTRL_PIN_GROUP("dfd", mt7988_dfd),
+ /* @GPIO(0,1): xfi_phy0_i2c0 */
+ PINCTRL_PIN_GROUP("xfi_phy0_i2c0", mt7988_xfi_phy0_i2c0),
+ /* @GPIO(0,1): xfi_phy1_i2c0 */
+ PINCTRL_PIN_GROUP("xfi_phy1_i2c0", mt7988_xfi_phy1_i2c0),
+ /* @GPIO(3,4): xfi_phy_pll_i2c0 */
+ PINCTRL_PIN_GROUP("xfi_phy_pll_i2c0", mt7988_xfi_phy_pll_i2c0),
+ /* @GPIO(3,4): xfi_phy_pll_i2c1 */
+ PINCTRL_PIN_GROUP("xfi_phy_pll_i2c1", mt7988_xfi_phy_pll_i2c1),
+ /* @GPIO(4): pwm7 */
+ PINCTRL_PIN_GROUP("pwm7_0", mt7988_pwm7_0),
+ /* @GPIO(5,6) i2c0_0 */
+ PINCTRL_PIN_GROUP("i2c0_0", mt7988_i2c0_0),
+ /* @GPIO(5,6) i2c1_sfp */
+ PINCTRL_PIN_GROUP("i2c1_sfp", mt7988_i2c1_sfp),
+ /* @GPIO(5,6) xfi_pextp_phy0_i2c */
+ PINCTRL_PIN_GROUP("xfi_pextp_phy0_i2c", mt7988_xfi_pextp_phy0_i2c),
+ /* @GPIO(5,6) xfi_pextp_phy1_i2c */
+ PINCTRL_PIN_GROUP("xfi_pextp_phy1_i2c", mt7988_xfi_pextp_phy1_i2c),
+ /* @GPIO(5,6) mdc_mdio0 */
+ PINCTRL_PIN_GROUP("mdc_mdio0", mt7988_mdc_mdio0),
+ /* @GPIO(7): pcie_wake_n0_0 */
+ PINCTRL_PIN_GROUP("pcie_wake_n0_0", mt7988_pcie_wake_n0_0),
+ /* @GPIO(8): pcie_clk_req_n0_0 */
+ PINCTRL_PIN_GROUP("pcie_clk_req_n0_0", mt7988_pcie_clk_req_n0_0),
+ /* @GPIO(9): pcie_wake_n3_0 */
+ PINCTRL_PIN_GROUP("pcie_wake_n3_0", mt7988_pcie_wake_n3_0),
+ /* @GPIO(10): pcie_clk_req_n3 */
+ PINCTRL_PIN_GROUP("pcie_clk_req_n3", mt7988_pcie_clk_req_n3),
+ /* @GPIO(10): pcie_clk_req_n0_1 */
+ PINCTRL_PIN_GROUP("pcie_clk_req_n0_1", mt7988_pcie_clk_req_n0_1),
+ /* @GPIO(7,8) pcie_p0_phy_i2c */
+ PINCTRL_PIN_GROUP("pcie_p0_phy_i2c", mt7988_pcie_p0_phy_i2c),
+ /* @GPIO(7,8) pcie_p1_phy_i2c */
+ PINCTRL_PIN_GROUP("pcie_p1_phy_i2c", mt7988_pcie_p1_phy_i2c),
+ /* @GPIO(7,8) pcie_p2_phy_i2c */
+ PINCTRL_PIN_GROUP("pcie_p2_phy_i2c", mt7988_pcie_p2_phy_i2c),
+ /* @GPIO(9,10) pcie_p3_phy_i2c */
+ PINCTRL_PIN_GROUP("pcie_p3_phy_i2c", mt7988_pcie_p3_phy_i2c),
+ /* @GPIO(9,10) ckm_phy_i2c */
+ PINCTRL_PIN_GROUP("ckm_phy_i2c", mt7988_ckm_phy_i2c),
+ /* @GPIO(11): pmic */
+ PINCTRL_PIN_GROUP("pcie_pmic", mt7988_pmic),
+ /* @GPIO(12): watchdog */
+ PINCTRL_PIN_GROUP("watchdog", mt7988_watchdog),
+ /* @GPIO(13): pcie_wake_n0_1 */
+ PINCTRL_PIN_GROUP("pcie_wake_n0_1", mt7988_pcie_wake_n0_1),
+ /* @GPIO(14): pcie_wake_n3_1 */
+ PINCTRL_PIN_GROUP("pcie_wake_n3_1", mt7988_pcie_wake_n3_1),
+ /* @GPIO(15,16) i2c0_1 */
+ PINCTRL_PIN_GROUP("i2c0_1", mt7988_i2c0_1),
+ /* @GPIO(15,16) u30_phy_i2c0 */
+ PINCTRL_PIN_GROUP("u30_phy_i2c0", mt7988_u30_phy_i2c0),
+ /* @GPIO(15,16) u32_phy_i2c0 */
+ PINCTRL_PIN_GROUP("u32_phy_i2c0", mt7988_u32_phy_i2c0),
+ /* @GPIO(15,16) xfi_phy0_i2c1 */
+ PINCTRL_PIN_GROUP("xfi_phy0_i2c1", mt7988_xfi_phy0_i2c1),
+ /* @GPIO(15,16) xfi_phy1_i2c1 */
+ PINCTRL_PIN_GROUP("xfi_phy1_i2c1", mt7988_xfi_phy1_i2c1),
+ /* @GPIO(15,16) xfi_phy_pll_i2c2 */
+ PINCTRL_PIN_GROUP("xfi_phy_pll_i2c2", mt7988_xfi_phy_pll_i2c2),
+ /* @GPIO(17,18) i2c1_0 */
+ PINCTRL_PIN_GROUP("i2c1_0", mt7988_i2c1_0),
+ /* @GPIO(17,18) u30_phy_i2c1 */
+ PINCTRL_PIN_GROUP("u30_phy_i2c1", mt7988_u30_phy_i2c1),
+ /* @GPIO(17,18) u32_phy_i2c1 */
+ PINCTRL_PIN_GROUP("u32_phy_i2c1", mt7988_u32_phy_i2c1),
+ /* @GPIO(17,18) xfi_phy_pll_i2c3 */
+ PINCTRL_PIN_GROUP("xfi_phy_pll_i2c3", mt7988_xfi_phy_pll_i2c3),
+ /* @GPIO(17,18) sgmii0_i2c */
+ PINCTRL_PIN_GROUP("sgmii0_i2c", mt7988_sgmii0_i2c),
+ /* @GPIO(17,18) sgmii1_i2c */
+ PINCTRL_PIN_GROUP("sgmii1_i2c", mt7988_sgmii1_i2c),
+ /* @GPIO(19): pcie_2l_0_pereset */
+ PINCTRL_PIN_GROUP("pcie_2l_0_pereset", mt7988_pcie_2l_0_pereset),
+ /* @GPIO(20): pcie_1l_1_pereset */
+ PINCTRL_PIN_GROUP("pcie_1l_1_pereset", mt7988_pcie_1l_1_pereset),
+ /* @GPIO(21): pwm1 */
+ PINCTRL_PIN_GROUP("pwm1", mt7988_pwm1),
+ /* @GPIO(22,23) spi0_wp_hold */
+ PINCTRL_PIN_GROUP("spi0_wp_hold", mt7988_spi0_wp_hold),
+ /* @GPIO(24,25,26,27) spi0 */
+ PINCTRL_PIN_GROUP("spi0", mt7988_spi0),
+ /* @GPIO(28,29,30,31) spi1 */
+ PINCTRL_PIN_GROUP("spi1", mt7988_spi1),
+ /* @GPIO(32,33,34,35) spi2 */
+ PINCTRL_PIN_GROUP("spi2", mt7988_spi2),
+ /* @GPIO(36,37) spi2_wp_hold */
+ PINCTRL_PIN_GROUP("spi2_wp_hold", mt7988_spi2_wp_hold),
+ /* @GPIO(22,23,24,25,26,27) snfi */
+ PINCTRL_PIN_GROUP("snfi", mt7988_snfi),
+ /* @GPIO(22,23) tops_uart0_0 */
+ PINCTRL_PIN_GROUP("tops_uart0_0", mt7988_tops_uart0_0),
+ /* @GPIO(28,29,30,31) uart2_0 */
+ PINCTRL_PIN_GROUP("uart2_0", mt7988_uart2_0),
+ /* @GPIO(32,33,34,35) uart1_0 */
+ PINCTRL_PIN_GROUP("uart1_0", mt7988_uart1_0),
+ /* @GPIO(32,33,34,35) uart2_1 */
+ PINCTRL_PIN_GROUP("uart2_1", mt7988_uart2_1),
+ /* @GPIO(28) net_wo0_uart_txd_0 */
+ PINCTRL_PIN_GROUP("net_wo0_uart_txd_0", mt7988_net_wo0_uart_txd_0),
+ /* @GPIO(29) net_wo1_uart_txd_0 */
+ PINCTRL_PIN_GROUP("net_wo1_uart_txd_0", mt7988_net_wo1_uart_txd_0),
+ /* @GPIO(30) net_wo2_uart_txd_0 */
+ PINCTRL_PIN_GROUP("net_wo2_uart_txd_0", mt7988_net_wo2_uart_txd_0),
+ /* @GPIO(28,29) tops_uart1_0 */
+ PINCTRL_PIN_GROUP("tops_uart0_0", mt7988_tops_uart1_0),
+ /* @GPIO(30,31) tops_uart0_1 */
+ PINCTRL_PIN_GROUP("tops_uart0_1", mt7988_tops_uart0_1),
+ /* @GPIO(36,37) tops_uart1_1 */
+ PINCTRL_PIN_GROUP("tops_uart1_1", mt7988_tops_uart1_1),
+ /* @GPIO(32,33,34,35,36) udi */
+ PINCTRL_PIN_GROUP("udi", mt7988_udi),
+ /* @GPIO(21,28,29,30,31,32,33,34,35,36,37) emmc_45 */
+ PINCTRL_PIN_GROUP("emmc_45", mt7988_emmc_45),
+ /* @GPIO(32,33,34,35,36,37) sdcard */
+ PINCTRL_PIN_GROUP("sdcard", mt7988_sdcard),
+ /* @GPIO(38,39,40,41,42,43,44,45,46,47,48,49) emmc_51 */
+ PINCTRL_PIN_GROUP("emmc_51", mt7988_emmc_51),
+ /* @GPIO(28,29) 2p5g_ext_mdio */
+ PINCTRL_PIN_GROUP("2p5g_ext_mdio", mt7988_2p5g_ext_mdio),
+ /* @GPIO(30,31) gbe_ext_mdio */
+ PINCTRL_PIN_GROUP("gbe_ext_mdio", mt7988_gbe_ext_mdio),
+ /* @GPIO(50,51,52,53,54) i2s */
+ PINCTRL_PIN_GROUP("i2s", mt7988_i2s),
+ /* @GPIO(50,51,52,53) pcm */
+ PINCTRL_PIN_GROUP("pcm", mt7988_pcm),
+ /* @GPIO(55,56) uart0 */
+ PINCTRL_PIN_GROUP("uart0", mt7988_uart0),
+ /* @GPIO(55,56) tops_uart0_2 */
+ PINCTRL_PIN_GROUP("tops_uart0_2", mt7988_tops_uart0_2),
+ /* @GPIO(50,51,52,53) uart2_2 */
+ PINCTRL_PIN_GROUP("uart2_2", mt7988_uart2_2),
+ /* @GPIO(50,51,52,53,54) wo0_jtag */
+ PINCTRL_PIN_GROUP("wo0_jtag", mt7988_wo0_jtag),
+ /* @GPIO(50,51,52,53,54) wo1-wo1_jtag */
+ PINCTRL_PIN_GROUP("wo1_jtag", mt7988_wo1_jtag),
+ /* @GPIO(50,51,52,53,54) wo2_jtag */
+ PINCTRL_PIN_GROUP("wo2_jtag", mt7988_wo2_jtag),
+ /* @GPIO(57) pwm0 */
+ PINCTRL_PIN_GROUP("pwm0", mt7988_pwm0),
+ /* @GPIO(58) pwm2_0 */
+ PINCTRL_PIN_GROUP("pwm2_0", mt7988_pwm2_0),
+ /* @GPIO(59) pwm3_0 */
+ PINCTRL_PIN_GROUP("pwm3_0", mt7988_pwm3_0),
+ /* @GPIO(60) pwm4_0 */
+ PINCTRL_PIN_GROUP("pwm4_0", mt7988_pwm4_0),
+ /* @GPIO(61) pwm5_0 */
+ PINCTRL_PIN_GROUP("pwm5_0", mt7988_pwm5_0),
+ /* @GPIO(58,59,60,61,62) jtag */
+ PINCTRL_PIN_GROUP("jtag", mt7988_jtag),
+ /* @GPIO(58,59,60,61,62) tops_jtag0_1 */
+ PINCTRL_PIN_GROUP("tops_jtag0_1", mt7988_tops_jtag0_1),
+ /* @GPIO(58,59,60,61) uart2_3 */
+ PINCTRL_PIN_GROUP("uart2_3", mt7988_uart2_3),
+ /* @GPIO(58,59,60,61) uart1_1 */
+ PINCTRL_PIN_GROUP("uart1_1", mt7988_uart1_1),
+ /* @GPIO(58,59,60,61) gbe_led1 */
+ PINCTRL_PIN_GROUP("gbe0_led1", mt7988_gbe0_led1),
+ PINCTRL_PIN_GROUP("gbe1_led1", mt7988_gbe1_led1),
+ PINCTRL_PIN_GROUP("gbe2_led1", mt7988_gbe2_led1),
+ PINCTRL_PIN_GROUP("gbe3_led1", mt7988_gbe3_led1),
+ /* @GPIO(62) pwm6_0 */
+ PINCTRL_PIN_GROUP("pwm6_0", mt7988_pwm6_0),
+ /* @GPIO(62) 2p5gbe_led1 */
+ PINCTRL_PIN_GROUP("2p5gbe_led1", mt7988_2p5gbe_led1),
+ /* @GPIO(64,65,66,67) gbe_led0 */
+ PINCTRL_PIN_GROUP("gbe0_led0", mt7988_gbe0_led0),
+ PINCTRL_PIN_GROUP("gbe1_led0", mt7988_gbe1_led0),
+ PINCTRL_PIN_GROUP("gbe2_led0", mt7988_gbe2_led0),
+ PINCTRL_PIN_GROUP("gbe3_led0", mt7988_gbe3_led0),
+ /* @GPIO(68) 2p5gbe_led0 */
+ PINCTRL_PIN_GROUP("2p5gbe_led0", mt7988_2p5gbe_led0),
+ /* @GPIO(63) drv_vbus_p1 */
+ PINCTRL_PIN_GROUP("drv_vbus_p1", mt7988_drv_vbus_p1),
+ /* @GPIO(63) pcie_clk_req_n2_1 */
+ PINCTRL_PIN_GROUP("pcie_clk_req_n2_1", mt7988_pcie_clk_req_n2_1),
+ /* @GPIO(69, 70) mdc_mdio1 */
+ PINCTRL_PIN_GROUP("mdc_mdio1", mt7988_mdc_mdio1),
+ /* @GPIO(69, 70) i2c1_2 */
+ PINCTRL_PIN_GROUP("i2c1_2", mt7988_i2c1_2),
+ /* @GPIO(69) pwm6 */
+ PINCTRL_PIN_GROUP("pwm6", mt7988_pwm6),
+ /* @GPIO(70) pwm7 */
+ PINCTRL_PIN_GROUP("pwm7", mt7988_pwm7),
+ /* @GPIO(69,70) i2c2_0 */
+ PINCTRL_PIN_GROUP("i2c2_0", mt7988_i2c2_0),
+ /* @GPIO(71,72) i2c2_1 */
+ PINCTRL_PIN_GROUP("i2c2_1", mt7988_i2c2_1),
+ /* @GPIO(73) pcie_2l_1_pereset */
+ PINCTRL_PIN_GROUP("pcie_2l_1_pereset", mt7988_pcie_2l_1_pereset),
+ /* @GPIO(74) pcie_1l_0_pereset */
+ PINCTRL_PIN_GROUP("pcie_1l_0_pereset", mt7988_pcie_1l_0_pereset),
+ /* @GPIO(75) pcie_wake_n1_0 */
+ PINCTRL_PIN_GROUP("pcie_wake_n1_0", mt7988_pcie_wake_n1_0),
+ /* @GPIO(76) pcie_clk_req_n1 */
+ PINCTRL_PIN_GROUP("pcie_clk_req_n1", mt7988_pcie_clk_req_n1),
+ /* @GPIO(77) pcie_wake_n2_0 */
+ PINCTRL_PIN_GROUP("pcie_wake_n2_0", mt7988_pcie_wake_n2_0),
+ /* @GPIO(78) pcie_clk_req_n2_0 */
+ PINCTRL_PIN_GROUP("pcie_clk_req_n2_0", mt7988_pcie_clk_req_n2_0),
+ /* @GPIO(79) drv_vbus */
+ PINCTRL_PIN_GROUP("drv_vbus", mt7988_drv_vbus),
+ /* @GPIO(79) pcie_wake_n2_1 */
+ PINCTRL_PIN_GROUP("pcie_wake_n2_1", mt7988_pcie_wake_n2_1),
+ /* @GPIO(80,81,82,83) uart1_2 */
+ PINCTRL_PIN_GROUP("uart1_2", mt7988_uart1_2),
+ /* @GPIO(80,81) uart1_2_lite */
+ PINCTRL_PIN_GROUP("uart1_2_lite", mt7988_uart1_2_lite),
+ /* @GPIO(80) pwm2 */
+ PINCTRL_PIN_GROUP("pwm2", mt7988_pwm2),
+ /* @GPIO(81) pwm3 */
+ PINCTRL_PIN_GROUP("pwm3", mt7988_pwm3),
+ /* @GPIO(82) pwm4 */
+ PINCTRL_PIN_GROUP("pwm4", mt7988_pwm4),
+ /* @GPIO(83) pwm5 */
+ PINCTRL_PIN_GROUP("pwm5", mt7988_pwm5),
+ /* @GPIO(80) net_wo0_uart_txd_0 */
+ PINCTRL_PIN_GROUP("net_wo0_uart_txd_0", mt7988_net_wo0_uart_txd_0),
+ /* @GPIO(81) net_wo1_uart_txd_0 */
+ PINCTRL_PIN_GROUP("net_wo1_uart_txd_0", mt7988_net_wo1_uart_txd_0),
+ /* @GPIO(82) net_wo2_uart_txd_0 */
+ PINCTRL_PIN_GROUP("net_wo2_uart_txd_0", mt7988_net_wo2_uart_txd_0),
+ /* @GPIO(80,81) tops_uart1_2 */
+ PINCTRL_PIN_GROUP("tops_uart1_2", mt7988_tops_uart1_2),
+ /* @GPIO(80) net_wo0_uart_txd_1 */
+ PINCTRL_PIN_GROUP("net_wo0_uart_txd_1", mt7988_net_wo0_uart_txd_1),
+ /* @GPIO(81) net_wo1_uart_txd_1 */
+ PINCTRL_PIN_GROUP("net_wo1_uart_txd_1", mt7988_net_wo1_uart_txd_1),
+ /* @GPIO(82) net_wo2_uart_txd_1 */
+ PINCTRL_PIN_GROUP("net_wo2_uart_txd_1", mt7988_net_wo2_uart_txd_1),
+};
+
+/* Joint those groups owning the same capability in user point of view which
+ * allows that people tend to use through the device tree.
+ */
+static const char * const mt7988_jtag_groups[] = {
+ "tops_jtag0_0", "wo0_jtag", "wo1_jtag",
+ "wo2_jtag", "jtag", "tops_jtag0_1",
+};
+static const char * const mt7988_int_usxgmii_groups[] = {
+ "int_usxgmii",
+};
+static const char * const mt7988_pwm_groups[] = {
+ "pwm0", "pwm1", "pwm2", "pwm2_0", "pwm3", "pwm3_0", "pwm4", "pwm4_0",
+ "pwm5", "pwm5_0", "pwm6", "pwm6_0", "pwm7", "pwm7_0",
+
+};
+static const char * const mt7988_dfd_groups[] = {
+ "dfd",
+};
+static const char * const mt7988_i2c_groups[] = {
+ "xfi_phy0_i2c0",
+ "xfi_phy1_i2c0",
+ "xfi_phy_pll_i2c0",
+ "xfi_phy_pll_i2c1",
+ "i2c0_0",
+ "i2c1_sfp",
+ "xfi_pextp_phy0_i2c",
+ "xfi_pextp_phy1_i2c",
+ "i2c0_1",
+ "u30_phy_i2c0",
+ "u32_phy_i2c0",
+ "xfi_phy0_i2c1",
+ "xfi_phy1_i2c1",
+ "xfi_phy_pll_i2c2",
+ "i2c1_0",
+ "u30_phy_i2c1",
+ "u32_phy_i2c1",
+ "xfi_phy_pll_i2c3",
+ "sgmii0_i2c",
+ "sgmii1_i2c",
+ "i2c1_2",
+ "i2c2_0",
+ "i2c2_1",
+};
+static const char * const mt7988_ethernet_groups[] = {
+ "mdc_mdio0",
+ "2p5g_ext_mdio",
+ "gbe_ext_mdio",
+ "mdc_mdio1",
+};
+static const char * const mt7988_pcie_groups[] = {
+ "pcie_wake_n0_0", "pcie_clk_req_n0_0", "pcie_wake_n3_0",
+ "pcie_clk_req_n3", "pcie_p0_phy_i2c", "pcie_p1_phy_i2c",
+ "pcie_p3_phy_i2c", "pcie_p2_phy_i2c", "ckm_phy_i2c",
+ "pcie_wake_n0_1", "pcie_wake_n3_1", "pcie_2l_0_pereset",
+ "pcie_1l_1_pereset", "pcie_clk_req_n2_1", "pcie_2l_1_pereset",
+ "pcie_1l_0_pereset", "pcie_wake_n1_0", "pcie_clk_req_n1",
+ "pcie_wake_n2_0", "pcie_clk_req_n2_0", "pcie_wake_n2_1",
+ "pcie_clk_req_n0_1"
+};
+static const char * const mt7988_pmic_groups[] = {
+ "pmic",
+};
+static const char * const mt7988_wdt_groups[] = {
+ "watchdog",
+};
+static const char * const mt7988_spi_groups[] = {
+ "spi0", "spi0_wp_hold", "spi1", "spi2", "spi2_wp_hold",
+};
+static const char * const mt7988_flash_groups[] = { "emmc_45", "sdcard", "snfi",
+ "emmc_51" };
+static const char * const mt7988_uart_groups[] = {
+ "uart2",
+ "tops_uart0_0",
+ "uart2_0",
+ "uart1_0",
+ "uart2_1",
+ "net_wo0_uart_txd_0",
+ "net_wo1_uart_txd_0",
+ "net_wo2_uart_txd_0",
+ "tops_uart1_0",
+ "ops_uart0_1",
+ "ops_uart1_1",
+ "uart0",
+ "tops_uart0_2",
+ "uart1_1",
+ "uart2_3",
+ "uart1_2",
+ "uart1_2_lite",
+ "tops_uart1_2",
+ "net_wo0_uart_txd_1",
+ "net_wo1_uart_txd_1",
+ "net_wo2_uart_txd_1",
+};
+static const char * const mt7988_udi_groups[] = {
+ "udi",
+};
+static const char * const mt7988_audio_groups[] = {
+ "i2s", "pcm",
+};
+static const char * const mt7988_led_groups[] = {
+ "gbe0_led1", "gbe1_led1", "gbe2_led1", "gbe3_led1", "2p5gbe_led1",
+ "gbe0_led0", "gbe1_led0", "gbe2_led0", "gbe3_led0", "2p5gbe_led0",
+ "wf5g_led0", "wf5g_led1",
+};
+static const char * const mt7988_usb_groups[] = {
+ "drv_vbus",
+ "drv_vbus_p1",
+};
+
+static const struct function_desc mt7988_functions[] = {
+ { { "audio", mt7988_audio_groups, ARRAY_SIZE(mt7988_audio_groups) },
+ NULL },
+ { { "jtag", mt7988_jtag_groups, ARRAY_SIZE(mt7988_jtag_groups) },
+ NULL },
+ { { "int_usxgmii", mt7988_int_usxgmii_groups,
+ ARRAY_SIZE(mt7988_int_usxgmii_groups) },
+ NULL },
+ { { "pwm", mt7988_pwm_groups, ARRAY_SIZE(mt7988_pwm_groups) }, NULL },
+ { { "dfd", mt7988_dfd_groups, ARRAY_SIZE(mt7988_dfd_groups) }, NULL },
+ { { "i2c", mt7988_i2c_groups, ARRAY_SIZE(mt7988_i2c_groups) }, NULL },
+ { { "eth", mt7988_ethernet_groups, ARRAY_SIZE(mt7988_ethernet_groups) },
+ NULL },
+ { { "pcie", mt7988_pcie_groups, ARRAY_SIZE(mt7988_pcie_groups) },
+ NULL },
+ { { "pmic", mt7988_pmic_groups, ARRAY_SIZE(mt7988_pmic_groups) },
+ NULL },
+ { { "watchdog", mt7988_wdt_groups, ARRAY_SIZE(mt7988_wdt_groups) },
+ NULL },
+ { { "spi", mt7988_spi_groups, ARRAY_SIZE(mt7988_spi_groups) }, NULL },
+ { { "flash", mt7988_flash_groups, ARRAY_SIZE(mt7988_flash_groups) },
+ NULL },
+ { { "uart", mt7988_uart_groups, ARRAY_SIZE(mt7988_uart_groups) },
+ NULL },
+ { { "udi", mt7988_udi_groups, ARRAY_SIZE(mt7988_udi_groups) }, NULL },
+ { { "usb", mt7988_usb_groups, ARRAY_SIZE(mt7988_usb_groups) }, NULL },
+ { { "led", mt7988_led_groups, ARRAY_SIZE(mt7988_led_groups) }, NULL },
+};
+
+static const struct mtk_eint_hw mt7988_eint_hw = {
+ .port_mask = 7,
+ .ports = 7,
+ .ap_num = ARRAY_SIZE(mt7988_pins),
+ .db_cnt = 16,
+};
+
+static const char * const mt7988_pinctrl_register_base_names[] = {
+ "gpio", "iocfg_tr", "iocfg_br",
+ "iocfg_rb", "iocfg_lb", "iocfg_tl",
+};
+
+static const struct mtk_pin_soc mt7988_data = {
+ .reg_cal = mt7988_reg_cals,
+ .pins = mt7988_pins,
+ .npins = ARRAY_SIZE(mt7988_pins),
+ .grps = mt7988_groups,
+ .ngrps = ARRAY_SIZE(mt7988_groups),
+ .funcs = mt7988_functions,
+ .nfuncs = ARRAY_SIZE(mt7988_functions),
+ .eint_hw = &mt7988_eint_hw,
+ .gpio_m = 0,
+ .ies_present = false,
+ .base_names = mt7988_pinctrl_register_base_names,
+ .nbase_names = ARRAY_SIZE(mt7988_pinctrl_register_base_names),
+ .bias_disable_set = mtk_pinconf_bias_disable_set,
+ .bias_disable_get = mtk_pinconf_bias_disable_get,
+ .bias_set = mtk_pinconf_bias_set,
+ .bias_get = mtk_pinconf_bias_get,
+ .pull_type = mt7988_pull_type,
+ .bias_set_combo = mtk_pinconf_bias_set_combo,
+ .bias_get_combo = mtk_pinconf_bias_get_combo,
+ .drive_set = mtk_pinconf_drive_set_rev1,
+ .drive_get = mtk_pinconf_drive_get_rev1,
+ .adv_pull_get = mtk_pinconf_adv_pull_get,
+ .adv_pull_set = mtk_pinconf_adv_pull_set,
+};
+
+static const struct of_device_id mt7988_pinctrl_of_match[] = {
+ { .compatible = "mediatek,mt7988-pinctrl" },
+ {}
+};
+
+static int mt7988_pinctrl_probe(struct platform_device *pdev)
+{
+ return mtk_moore_pinctrl_probe(pdev, &mt7988_data);
+}
+
+static struct platform_driver mt7988_pinctrl_driver = {
+ .driver = {
+ .name = "mt7988-pinctrl",
+ .of_match_table = mt7988_pinctrl_of_match,
+ },
+ .probe = mt7988_pinctrl_probe,
+};
+
+static int __init mt7988_pinctrl_init(void)
+{
+ return platform_driver_register(&mt7988_pinctrl_driver);
+}
+arch_initcall(mt7988_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index 54301fbba524..00e95682b9f8 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -573,7 +573,7 @@ EXPORT_SYMBOL_GPL(mtk_pinconf_bias_get_rev1);
*/
static int mtk_pinconf_bias_set_pu_pd(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc,
- u32 pullup, u32 arg)
+ u32 pullup, u32 arg, bool pd_only)
{
int err, pu, pd;
@@ -587,18 +587,16 @@ static int mtk_pinconf_bias_set_pu_pd(struct mtk_pinctrl *hw,
pu = 0;
pd = 1;
} else {
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PU, pu);
- if (err)
- goto out;
-
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PD, pd);
+ if (!pd_only) {
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PU, pu);
+ if (err)
+ return err;
+ }
-out:
- return err;
+ return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PD, pd);
}
static int mtk_pinconf_bias_set_pullsel_pullen(struct mtk_pinctrl *hw,
@@ -737,7 +735,7 @@ static int mtk_pinconf_bias_set_pu_pd_rsel(struct mtk_pinctrl *hw,
return err;
}
- return mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, enable);
+ return mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, enable, false);
}
int mtk_pinconf_bias_set_combo(struct mtk_pinctrl *hw,
@@ -758,8 +756,14 @@ int mtk_pinconf_bias_set_combo(struct mtk_pinctrl *hw,
return 0;
}
+ if (try_all_type & MTK_PULL_PD_TYPE) {
+ err = mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, arg, true);
+ if (!err)
+ return err;
+ }
+
if (try_all_type & MTK_PULL_PU_PD_TYPE) {
- err = mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, arg);
+ err = mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, arg, false);
if (!err)
return 0;
}
@@ -878,6 +882,29 @@ out:
return err;
}
+static int mtk_pinconf_bias_get_pd(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ u32 *pullup, u32 *enable)
+{
+ int err, pd;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PD, &pd);
+ if (err)
+ goto out;
+
+ if (pd == 0) {
+ *pullup = 0;
+ *enable = MTK_DISABLE;
+ } else if (pd == 1) {
+ *pullup = 0;
+ *enable = MTK_ENABLE;
+ } else
+ err = -EINVAL;
+
+out:
+ return err;
+}
+
static int mtk_pinconf_bias_get_pullsel_pullen(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc,
u32 *pullup, u32 *enable)
@@ -947,6 +974,12 @@ int mtk_pinconf_bias_get_combo(struct mtk_pinctrl *hw,
return 0;
}
+ if (try_all_type & MTK_PULL_PD_TYPE) {
+ err = mtk_pinconf_bias_get_pd(hw, desc, pullup, enable);
+ if (!err)
+ return err;
+ }
+
if (try_all_type & MTK_PULL_PU_PD_TYPE) {
err = mtk_pinconf_bias_get_pu_pd(hw, desc, pullup, enable);
if (!err)
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
index 23688ca6d04e..9c271dc2b521 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
@@ -24,6 +24,7 @@
* turned on/off itself. But it can't be selected pull up/down
*/
#define MTK_PULL_RSEL_TYPE BIT(3)
+#define MTK_PULL_PD_TYPE BIT(4)
/* MTK_PULL_PU_PD_RSEL_TYPE is a type which is controlled by
* MTK_PULL_PU_PD_TYPE and MTK_PULL_RSEL_TYPE.
*/
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 4ce2e35a6373..8cd4ba5cf0bd 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -22,6 +22,7 @@
#include <linux/property.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include <linux/mfd/abx500.h>
@@ -496,7 +497,7 @@ static void abx500_gpio_dbg_show_one(struct seq_file *s,
seq_printf(s, " %-9s", pull_up_down[pd]);
} else
- seq_printf(s, " %-9s", chip->get(chip, offset) ? "hi" : "lo");
+ seq_printf(s, " %-9s", str_hi_lo(chip->get(chip, offset)));
mode = abx500_get_mode(pctldev, chip, offset);
@@ -865,7 +866,7 @@ static int abx500_pin_config_set(struct pinctrl_dev *pctldev,
pin, configs[i],
(param == PIN_CONFIG_OUTPUT) ? "output " : "input",
(param == PIN_CONFIG_OUTPUT) ?
- (argument ? "high" : "low") :
+ str_high_low(argument) :
(argument ? "pull up" : "pull down"));
/* on ABx500, there is no GPIO0, so adjust the offset */
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index f4f10c60c1d2..8940e04fcf4c 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -28,6 +28,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
/* Since we request GPIOs from ourself */
@@ -438,9 +439,9 @@ static void nmk_prcm_altcx_set_mode(struct nmk_pinctrl *npct,
* - Any spurious wake up event during switch sequence to be ignored and
* cleared
*/
-static void nmk_gpio_glitch_slpm_init(unsigned int *slpm)
+static int nmk_gpio_glitch_slpm_init(unsigned int *slpm)
{
- int i;
+ int i, j, ret;
for (i = 0; i < NMK_MAX_BANKS; i++) {
struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
@@ -449,11 +450,21 @@ static void nmk_gpio_glitch_slpm_init(unsigned int *slpm)
if (!chip)
break;
- clk_enable(chip->clk);
+ ret = clk_enable(chip->clk);
+ if (ret) {
+ for (j = 0; j < i; j++) {
+ chip = nmk_gpio_chips[j];
+ clk_disable(chip->clk);
+ }
+
+ return ret;
+ }
slpm[i] = readl(chip->addr + NMK_GPIO_SLPC);
writel(temp, chip->addr + NMK_GPIO_SLPC);
}
+
+ return 0;
}
static void nmk_gpio_glitch_slpm_restore(unsigned int *slpm)
@@ -923,7 +934,9 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned int function,
slpm[nmk_chip->bank] &= ~BIT(bit);
}
- nmk_gpio_glitch_slpm_init(slpm);
+ ret = nmk_gpio_glitch_slpm_init(slpm);
+ if (ret)
+ goto out_pre_slpm_init;
}
for (i = 0; i < g->grp.npins; i++) {
@@ -940,7 +953,10 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned int function,
dev_dbg(npct->dev, "setting pin %d to altsetting %d\n",
g->grp.pins[i], g->altsetting);
- clk_enable(nmk_chip->clk);
+ ret = clk_enable(nmk_chip->clk);
+ if (ret)
+ goto out_glitch;
+
/*
* If the pin is switching to altfunc, and there was an
* interrupt installed on it which has been lazy disabled,
@@ -988,6 +1004,7 @@ static int nmk_gpio_request_enable(struct pinctrl_dev *pctldev,
struct nmk_gpio_chip *nmk_chip;
struct gpio_chip *chip;
unsigned int bit;
+ int ret;
if (!range) {
dev_err(npct->dev, "invalid range\n");
@@ -1004,7 +1021,9 @@ static int nmk_gpio_request_enable(struct pinctrl_dev *pctldev,
find_nmk_gpio_from_pin(pin, &bit);
- clk_enable(nmk_chip->clk);
+ ret = clk_enable(nmk_chip->clk);
+ if (ret)
+ return ret;
/* There is no glitch when converting any pin to GPIO */
__nmk_gpio_set_mode(nmk_chip, bit, NMK_GPIO_ALT_GPIO);
clk_disable(nmk_chip->clk);
@@ -1058,6 +1077,7 @@ static int nmk_pin_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long cfg;
int pull, slpm, output, val, i;
bool lowemi, gpiomode, sleep;
+ int ret;
nmk_chip = find_nmk_gpio_from_pin(pin, &bit);
if (!nmk_chip) {
@@ -1106,17 +1126,19 @@ static int nmk_pin_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
slpm_pull ? pullnames[pull] : "same",
slpm_output ? (output ? "output" : "input")
: "same",
- slpm_val ? (val ? "high" : "low") : "same");
+ slpm_val ? str_high_low(val) : "same");
}
dev_dbg(nmk_chip->chip.parent,
"pin %d [%#lx]: pull %s, slpm %s (%s%s), lowemi %s\n",
pin, cfg, pullnames[pull], slpmnames[slpm],
output ? "output " : "input",
- output ? (val ? "high" : "low") : "",
- lowemi ? "on" : "off");
+ output ? str_high_low(val) : "",
+ str_on_off(lowemi));
- clk_enable(nmk_chip->clk);
+ ret = clk_enable(nmk_chip->clk);
+ if (ret)
+ return ret;
if (gpiomode)
/* No glitch when going to GPIO mode */
__nmk_gpio_set_mode(nmk_chip, bit, NMK_GPIO_ALT_GPIO);
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 0b13d7f17b32..42547f64453e 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -89,12 +89,12 @@ static void pinconf_generic_dump_one(struct pinctrl_dev *pctldev,
seq_puts(s, items[i].display);
/* Print unit if available */
if (items[i].has_arg) {
- seq_printf(s, " (0x%x",
- pinconf_to_config_argument(config));
+ u32 val = pinconf_to_config_argument(config);
+
if (items[i].format)
- seq_printf(s, " %s)", items[i].format);
+ seq_printf(s, " (%u %s)", val, items[i].format);
else
- seq_puts(s, ")");
+ seq_printf(s, " (0x%x)", val);
}
}
}
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index fff6d4209ad5..1d7fdcdec4c8 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -30,6 +30,7 @@
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinmux.h>
+#include <linux/string_choices.h>
#include <linux/suspend.h>
#include "core.h"
@@ -458,7 +459,7 @@ static int amd_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
if (err)
dev_err(&gpio_dev->pdev->dev, "failed to %s wake-up interrupt\n",
- on ? "enable" : "disable");
+ str_enable_disable(on));
return 0;
}
@@ -908,12 +909,13 @@ static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
return false;
}
-static int amd_gpio_suspend(struct device *dev)
+static int amd_gpio_suspend_hibernate_common(struct device *dev, bool is_suspend)
{
struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
unsigned long flags;
int i;
+ u32 wake_mask = is_suspend ? WAKE_SOURCE_SUSPEND : WAKE_SOURCE_HIBERNATE;
for (i = 0; i < desc->npins; i++) {
int pin = desc->pins[i].number;
@@ -925,11 +927,11 @@ static int amd_gpio_suspend(struct device *dev)
gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin * 4) & ~PIN_IRQ_PENDING;
/* mask any interrupts not intended to be a wake source */
- if (!(gpio_dev->saved_regs[i] & WAKE_SOURCE)) {
+ if (!(gpio_dev->saved_regs[i] & wake_mask)) {
writel(gpio_dev->saved_regs[i] & ~BIT(INTERRUPT_MASK_OFF),
gpio_dev->base + pin * 4);
- pm_pr_dbg("Disabling GPIO #%d interrupt for suspend.\n",
- pin);
+ pm_pr_dbg("Disabling GPIO #%d interrupt for %s.\n",
+ pin, is_suspend ? "suspend" : "hibernate");
}
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
@@ -938,6 +940,16 @@ static int amd_gpio_suspend(struct device *dev)
return 0;
}
+static int amd_gpio_suspend(struct device *dev)
+{
+ return amd_gpio_suspend_hibernate_common(dev, true);
+}
+
+static int amd_gpio_hibernate(struct device *dev)
+{
+ return amd_gpio_suspend_hibernate_common(dev, false);
+}
+
static int amd_gpio_resume(struct device *dev)
{
struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
@@ -961,8 +973,12 @@ static int amd_gpio_resume(struct device *dev)
}
static const struct dev_pm_ops amd_gpio_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(amd_gpio_suspend,
- amd_gpio_resume)
+ .suspend_late = amd_gpio_suspend,
+ .resume_early = amd_gpio_resume,
+ .freeze_late = amd_gpio_hibernate,
+ .thaw_early = amd_gpio_resume,
+ .poweroff_late = amd_gpio_hibernate,
+ .restore_early = amd_gpio_resume,
};
#endif
diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
index 667be49c3f48..3a1e5bffaf6e 100644
--- a/drivers/pinctrl/pinctrl-amd.h
+++ b/drivers/pinctrl/pinctrl-amd.h
@@ -80,10 +80,9 @@
#define FUNCTION_MASK GENMASK(1, 0)
#define FUNCTION_INVALID GENMASK(7, 0)
-#define WAKE_SOURCE (BIT(WAKE_CNTRL_OFF_S0I3) | \
- BIT(WAKE_CNTRL_OFF_S3) | \
- BIT(WAKE_CNTRL_OFF_S4) | \
- BIT(WAKECNTRL_Z_OFF))
+#define WAKE_SOURCE_SUSPEND (BIT(WAKE_CNTRL_OFF_S0I3) | \
+ BIT(WAKE_CNTRL_OFF_S3))
+#define WAKE_SOURCE_HIBERNATE BIT(WAKE_CNTRL_OFF_S4)
struct amd_function {
const char *name;
diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c
index 0d6c2027d4c1..d73004b4a45e 100644
--- a/drivers/pinctrl/pinctrl-cy8c95x0.c
+++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
@@ -42,7 +42,7 @@
#define CY8C95X0_PORTSEL 0x18
/* Port settings, write PORTSEL first */
#define CY8C95X0_INTMASK 0x19
-#define CY8C95X0_PWMSEL 0x1A
+#define CY8C95X0_SELPWM 0x1A
#define CY8C95X0_INVERT 0x1B
#define CY8C95X0_DIRECTION 0x1C
/* Drive mode register change state on writing '1' */
@@ -328,14 +328,14 @@ static int cypress_get_pin_mask(struct cy8c95x0_pinctrl *chip, unsigned int pin)
static bool cy8c95x0_readable_register(struct device *dev, unsigned int reg)
{
/*
- * Only 12 registers are present per port (see Table 6 in the
- * datasheet).
+ * Only 12 registers are present per port (see Table 6 in the datasheet).
*/
- if (reg >= CY8C95X0_VIRTUAL && (reg % MUXED_STRIDE) < 12)
- return true;
+ if (reg >= CY8C95X0_VIRTUAL && (reg % MUXED_STRIDE) >= 12)
+ return false;
switch (reg) {
case 0x24 ... 0x27:
+ case 0x31 ... 0x3f:
return false;
default:
return true;
@@ -344,8 +344,11 @@ static bool cy8c95x0_readable_register(struct device *dev, unsigned int reg)
static bool cy8c95x0_writeable_register(struct device *dev, unsigned int reg)
{
- if (reg >= CY8C95X0_VIRTUAL)
- return true;
+ /*
+ * Only 12 registers are present per port (see Table 6 in the datasheet).
+ */
+ if (reg >= CY8C95X0_VIRTUAL && (reg % MUXED_STRIDE) >= 12)
+ return false;
switch (reg) {
case CY8C95X0_INPUT_(0) ... CY8C95X0_INPUT_(7):
@@ -353,6 +356,7 @@ static bool cy8c95x0_writeable_register(struct device *dev, unsigned int reg)
case CY8C95X0_DEVID:
return false;
case 0x24 ... 0x27:
+ case 0x31 ... 0x3f:
return false;
default:
return true;
@@ -365,8 +369,8 @@ static bool cy8c95x0_volatile_register(struct device *dev, unsigned int reg)
case CY8C95X0_INPUT_(0) ... CY8C95X0_INPUT_(7):
case CY8C95X0_INTSTATUS_(0) ... CY8C95X0_INTSTATUS_(7):
case CY8C95X0_INTMASK:
+ case CY8C95X0_SELPWM:
case CY8C95X0_INVERT:
- case CY8C95X0_PWMSEL:
case CY8C95X0_DIRECTION:
case CY8C95X0_DRV_PU:
case CY8C95X0_DRV_PD:
@@ -395,7 +399,7 @@ static bool cy8c95x0_muxed_register(unsigned int reg)
{
switch (reg) {
case CY8C95X0_INTMASK:
- case CY8C95X0_PWMSEL:
+ case CY8C95X0_SELPWM:
case CY8C95X0_INVERT:
case CY8C95X0_DIRECTION:
case CY8C95X0_DRV_PU:
@@ -466,7 +470,11 @@ static const struct regmap_config cy8c9520_i2c_regmap = {
.max_register = 0, /* Updated at runtime */
.num_reg_defaults_raw = 0, /* Updated at runtime */
.use_single_read = true, /* Workaround for regcache bug */
+#if IS_ENABLED(CONFIG_DEBUG_PINCTRL)
+ .disable_locking = false,
+#else
.disable_locking = true,
+#endif
};
static inline int cy8c95x0_regmap_update_bits_base(struct cy8c95x0_pinctrl *chip,
@@ -789,7 +797,7 @@ static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip,
reg = CY8C95X0_DIRECTION;
break;
case PIN_CONFIG_MODE_PWM:
- reg = CY8C95X0_PWMSEL;
+ reg = CY8C95X0_SELPWM;
break;
case PIN_CONFIG_OUTPUT:
reg = CY8C95X0_OUTPUT;
@@ -868,7 +876,7 @@ static int cy8c95x0_gpio_set_pincfg(struct cy8c95x0_pinctrl *chip,
reg = CY8C95X0_DRV_PP_FAST;
break;
case PIN_CONFIG_MODE_PWM:
- reg = CY8C95X0_PWMSEL;
+ reg = CY8C95X0_SELPWM;
break;
case PIN_CONFIG_OUTPUT_ENABLE:
return cy8c95x0_pinmux_direction(chip, off, !arg);
@@ -1153,7 +1161,7 @@ static void cy8c95x0_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *
bitmap_zero(mask, MAX_LINE);
__set_bit(pin, mask);
- if (cy8c95x0_read_regs_mask(chip, CY8C95X0_PWMSEL, pwm, mask)) {
+ if (cy8c95x0_read_regs_mask(chip, CY8C95X0_SELPWM, pwm, mask)) {
seq_puts(s, "not available");
return;
}
@@ -1198,7 +1206,7 @@ static int cy8c95x0_set_mode(struct cy8c95x0_pinctrl *chip, unsigned int off, bo
u8 port = cypress_get_port(chip, off);
u8 bit = cypress_get_pin_mask(chip, off);
- return cy8c95x0_regmap_write_bits(chip, CY8C95X0_PWMSEL, port, bit, mode ? bit : 0);
+ return cy8c95x0_regmap_write_bits(chip, CY8C95X0_SELPWM, port, bit, mode ? bit : 0);
}
static int cy8c95x0_pinmux_mode(struct cy8c95x0_pinctrl *chip,
@@ -1347,7 +1355,7 @@ static int cy8c95x0_irq_setup(struct cy8c95x0_pinctrl *chip, int irq)
ret = devm_request_threaded_irq(chip->dev, irq,
NULL, cy8c95x0_irq_handler,
- IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_HIGH,
+ IRQF_ONESHOT | IRQF_SHARED,
dev_name(chip->dev), chip);
if (ret) {
dev_err(chip->dev, "failed to request irq %d\n", irq);
@@ -1438,15 +1446,15 @@ static int cy8c95x0_probe(struct i2c_client *client)
switch (chip->tpin) {
case 20:
strscpy(chip->name, cy8c95x0_id[0].name);
- regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 3 * MUXED_STRIDE;
+ regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 3 * MUXED_STRIDE - 1;
break;
case 40:
strscpy(chip->name, cy8c95x0_id[1].name);
- regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 6 * MUXED_STRIDE;
+ regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 6 * MUXED_STRIDE - 1;
break;
case 60:
strscpy(chip->name, cy8c95x0_id[2].name);
- regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 8 * MUXED_STRIDE;
+ regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 8 * MUXED_STRIDE - 1;
break;
default:
return -ENODEV;
diff --git a/drivers/pinctrl/pinctrl-gemini.c b/drivers/pinctrl/pinctrl-gemini.c
index 631612539af7..e9f61927858d 100644
--- a/drivers/pinctrl/pinctrl-gemini.c
+++ b/drivers/pinctrl/pinctrl-gemini.c
@@ -14,6 +14,7 @@
#include <linux/regmap.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinconf-generic.h>
@@ -2237,7 +2238,7 @@ static int gemini_pmx_set_mux(struct pinctrl_dev *pctldev,
"pin group %s could not be %s: "
"probably a hardware limitation\n",
gemini_padgroups[i],
- enabled ? "enabled" : "disabled");
+ str_enabled_disabled(enabled));
dev_err(pmx->dev,
"GLOBAL MISC CTRL before: %08x, after %08x, expected %08x\n",
before, after, expected);
@@ -2245,7 +2246,7 @@ static int gemini_pmx_set_mux(struct pinctrl_dev *pctldev,
dev_dbg(pmx->dev,
"padgroup %s %s\n",
gemini_padgroups[i],
- enabled ? "enabled" : "disabled");
+ str_enabled_disabled(enabled));
}
}
@@ -2259,7 +2260,7 @@ static int gemini_pmx_set_mux(struct pinctrl_dev *pctldev,
"pin group %s could not be %s: "
"probably a hardware limitation\n",
gemini_padgroups[i],
- enabled ? "enabled" : "disabled");
+ str_enabled_disabled(enabled));
dev_err(pmx->dev,
"GLOBAL MISC CTRL before: %08x, after %08x, expected %08x\n",
before, after, expected);
@@ -2267,7 +2268,7 @@ static int gemini_pmx_set_mux(struct pinctrl_dev *pctldev,
dev_dbg(pmx->dev,
"padgroup %s %s\n",
gemini_padgroups[i],
- enabled ? "enabled" : "disabled");
+ str_enabled_disabled(enabled));
}
}
@@ -2588,7 +2589,7 @@ static int gemini_pmx_probe(struct platform_device *pdev)
tmp = val;
for_each_set_bit(i, &tmp, PADS_MAXBIT) {
dev_dbg(dev, "pad group %s %s\n", gemini_padgroups[i],
- (val & BIT(i)) ? "enabled" : "disabled");
+ str_enabled_disabled(val & BIT(i)));
}
/* Check if flash pin is set */
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 31703737731b..bc7ee54e062b 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -3699,7 +3699,7 @@ static void ingenic_gpio_irq_print_chip(struct irq_data *data, struct seq_file *
{
struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
- seq_printf(p, "%s", gpio_chip->label);
+ seq_puts(p, gpio_chip->label);
}
static const struct irq_chip ingenic_gpio_irqchip = {
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 61532a7a612a..329d54b11529 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -1777,7 +1777,7 @@ static const struct pinctrl_ops ocelot_pctl_ops = {
.dt_free_map = pinconf_generic_dt_free_map,
};
-static struct ocelot_match_data luton_desc = {
+static const struct ocelot_match_data luton_desc = {
.desc = {
.name = "luton-pinctrl",
.pins = luton_pins,
@@ -1788,7 +1788,7 @@ static struct ocelot_match_data luton_desc = {
},
};
-static struct ocelot_match_data serval_desc = {
+static const struct ocelot_match_data serval_desc = {
.desc = {
.name = "serval-pinctrl",
.pins = serval_pins,
@@ -1799,7 +1799,7 @@ static struct ocelot_match_data serval_desc = {
},
};
-static struct ocelot_match_data ocelot_desc = {
+static const struct ocelot_match_data ocelot_desc = {
.desc = {
.name = "ocelot-pinctrl",
.pins = ocelot_pins,
@@ -1810,7 +1810,7 @@ static struct ocelot_match_data ocelot_desc = {
},
};
-static struct ocelot_match_data jaguar2_desc = {
+static const struct ocelot_match_data jaguar2_desc = {
.desc = {
.name = "jaguar2-pinctrl",
.pins = jaguar2_pins,
@@ -1821,7 +1821,7 @@ static struct ocelot_match_data jaguar2_desc = {
},
};
-static struct ocelot_match_data servalt_desc = {
+static const struct ocelot_match_data servalt_desc = {
.desc = {
.name = "servalt-pinctrl",
.pins = servalt_pins,
@@ -1832,7 +1832,7 @@ static struct ocelot_match_data servalt_desc = {
},
};
-static struct ocelot_match_data sparx5_desc = {
+static const struct ocelot_match_data sparx5_desc = {
.desc = {
.name = "sparx5-pinctrl",
.pins = sparx5_pins,
@@ -1850,7 +1850,7 @@ static struct ocelot_match_data sparx5_desc = {
},
};
-static struct ocelot_match_data lan966x_desc = {
+static const struct ocelot_match_data lan966x_desc = {
.desc = {
.name = "lan966x-pinctrl",
.pins = lan966x_pins,
@@ -1867,7 +1867,7 @@ static struct ocelot_match_data lan966x_desc = {
},
};
-static struct ocelot_match_data lan969x_desc = {
+static const struct ocelot_match_data lan969x_desc = {
.desc = {
.name = "lan969x-pinctrl",
.pins = lan969x_pins,
@@ -2116,7 +2116,7 @@ static void ocelot_irq_ack(struct irq_data *data)
static int ocelot_irq_set_type(struct irq_data *data, unsigned int type);
-static struct irq_chip ocelot_level_irqchip = {
+static const struct irq_chip ocelot_level_irqchip = {
.name = "gpio",
.irq_mask = ocelot_irq_mask,
.irq_ack = ocelot_irq_ack,
@@ -2126,7 +2126,7 @@ static struct irq_chip ocelot_level_irqchip = {
GPIOCHIP_IRQ_RESOURCE_HELPERS
};
-static struct irq_chip ocelot_irqchip = {
+static const struct irq_chip ocelot_irqchip = {
.name = "gpio",
.irq_mask = ocelot_irq_mask,
.irq_ack = ocelot_irq_ack,
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 36d4eaf0ebd1..15145882950f 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Pinctrl driver for Rockchip SoCs
- *
+ * Copyright (c) 2020-2024 Rockchip Electronics Co., Ltd.
* Copyright (c) 2013 MundoReader S.L.
* Author: Heiko Stuebner <heiko@sntech.de>
*
@@ -2003,6 +2003,151 @@ static int rk3399_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
return 0;
}
+#define RK3562_DRV_BITS_PER_PIN 8
+#define RK3562_DRV_PINS_PER_REG 2
+#define RK3562_DRV_GPIO0_OFFSET 0x20070
+#define RK3562_DRV_GPIO1_OFFSET 0x200
+#define RK3562_DRV_GPIO2_OFFSET 0x240
+#define RK3562_DRV_GPIO3_OFFSET 0x10280
+#define RK3562_DRV_GPIO4_OFFSET 0x102C0
+
+static int rk3562_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+ switch (bank->bank_num) {
+ case 0:
+ *reg = RK3562_DRV_GPIO0_OFFSET;
+ break;
+
+ case 1:
+ *reg = RK3562_DRV_GPIO1_OFFSET;
+ break;
+
+ case 2:
+ *reg = RK3562_DRV_GPIO2_OFFSET;
+ break;
+
+ case 3:
+ *reg = RK3562_DRV_GPIO3_OFFSET;
+ break;
+
+ case 4:
+ *reg = RK3562_DRV_GPIO4_OFFSET;
+ break;
+
+ default:
+ dev_err(info->dev, "unsupported bank_num %d\n", bank->bank_num);
+ break;
+ }
+
+ *reg += ((pin_num / RK3562_DRV_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3562_DRV_PINS_PER_REG;
+ *bit *= RK3562_DRV_BITS_PER_PIN;
+
+ return 0;
+}
+
+#define RK3562_PULL_BITS_PER_PIN 2
+#define RK3562_PULL_PINS_PER_REG 8
+#define RK3562_PULL_GPIO0_OFFSET 0x20020
+#define RK3562_PULL_GPIO1_OFFSET 0x80
+#define RK3562_PULL_GPIO2_OFFSET 0x90
+#define RK3562_PULL_GPIO3_OFFSET 0x100A0
+#define RK3562_PULL_GPIO4_OFFSET 0x100B0
+
+static int rk3562_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+ switch (bank->bank_num) {
+ case 0:
+ *reg = RK3562_PULL_GPIO0_OFFSET;
+ break;
+
+ case 1:
+ *reg = RK3562_PULL_GPIO1_OFFSET;
+ break;
+
+ case 2:
+ *reg = RK3562_PULL_GPIO2_OFFSET;
+ break;
+
+ case 3:
+ *reg = RK3562_PULL_GPIO3_OFFSET;
+ break;
+
+ case 4:
+ *reg = RK3562_PULL_GPIO4_OFFSET;
+ break;
+
+ default:
+ dev_err(info->dev, "unsupported bank_num %d\n", bank->bank_num);
+ break;
+ }
+
+ *reg += ((pin_num / RK3562_PULL_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3562_PULL_PINS_PER_REG;
+ *bit *= RK3562_PULL_BITS_PER_PIN;
+
+ return 0;
+}
+
+#define RK3562_SMT_BITS_PER_PIN 2
+#define RK3562_SMT_PINS_PER_REG 8
+#define RK3562_SMT_GPIO0_OFFSET 0x20030
+#define RK3562_SMT_GPIO1_OFFSET 0xC0
+#define RK3562_SMT_GPIO2_OFFSET 0xD0
+#define RK3562_SMT_GPIO3_OFFSET 0x100E0
+#define RK3562_SMT_GPIO4_OFFSET 0x100F0
+
+static int rk3562_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num,
+ struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+ switch (bank->bank_num) {
+ case 0:
+ *reg = RK3562_SMT_GPIO0_OFFSET;
+ break;
+
+ case 1:
+ *reg = RK3562_SMT_GPIO1_OFFSET;
+ break;
+
+ case 2:
+ *reg = RK3562_SMT_GPIO2_OFFSET;
+ break;
+
+ case 3:
+ *reg = RK3562_SMT_GPIO3_OFFSET;
+ break;
+
+ case 4:
+ *reg = RK3562_SMT_GPIO4_OFFSET;
+ break;
+
+ default:
+ dev_err(info->dev, "unsupported bank_num %d\n", bank->bank_num);
+ break;
+ }
+
+ *reg += ((pin_num / RK3562_SMT_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3562_SMT_PINS_PER_REG;
+ *bit *= RK3562_SMT_BITS_PER_PIN;
+
+ return 0;
+}
+
#define RK3568_PULL_PMU_OFFSET 0x20
#define RK3568_PULL_GRF_OFFSET 0x80
#define RK3568_PULL_BITS_PER_PIN 2
@@ -2495,7 +2640,8 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
rmask_bits = RK3588_DRV_BITS_PER_PIN;
ret = strength;
goto config;
- } else if (ctrl->type == RK3568) {
+ } else if (ctrl->type == RK3562 ||
+ ctrl->type == RK3568) {
rmask_bits = RK3568_DRV_BITS_PER_PIN;
ret = (1 << (strength + 1)) - 1;
goto config;
@@ -2639,6 +2785,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
case RK3328:
case RK3368:
case RK3399:
+ case RK3562:
case RK3568:
case RK3576:
case RK3588:
@@ -2699,6 +2846,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
case RK3328:
case RK3368:
case RK3399:
+ case RK3562:
case RK3568:
case RK3576:
case RK3588:
@@ -2810,6 +2958,7 @@ static int rockchip_get_schmitt(struct rockchip_pin_bank *bank, int pin_num)
data >>= bit;
switch (ctrl->type) {
+ case RK3562:
case RK3568:
return data & ((1 << RK3568_SCHMITT_BITS_PER_PIN) - 1);
default:
@@ -2839,6 +2988,7 @@ static int rockchip_set_schmitt(struct rockchip_pin_bank *bank,
/* enable the write to the equivalent lower bits */
switch (ctrl->type) {
+ case RK3562:
case RK3568:
data = ((1 << RK3568_SCHMITT_BITS_PER_PIN) - 1) << (bit + 16);
rmask = data | (data >> 16);
@@ -2965,6 +3115,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
case RK3328:
case RK3368:
case RK3399:
+ case RK3562:
case RK3568:
case RK3576:
case RK3588:
@@ -4086,6 +4237,49 @@ static struct rockchip_pin_ctrl rk3399_pin_ctrl = {
.drv_calc_reg = rk3399_calc_drv_reg_and_bit,
};
+static struct rockchip_pin_bank rk3562_pin_banks[] = {
+ PIN_BANK_IOMUX_FLAGS_OFFSET(0, 32, "gpio0",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ 0x20000, 0x20008, 0x20010, 0x20018),
+ PIN_BANK_IOMUX_FLAGS_OFFSET(1, 32, "gpio1",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ 0, 0x08, 0x10, 0x18),
+ PIN_BANK_IOMUX_FLAGS_OFFSET(2, 32, "gpio2",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ 0x20, 0, 0, 0),
+ PIN_BANK_IOMUX_FLAGS_OFFSET(3, 32, "gpio3",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ 0x10040, 0x10048, 0x10050, 0x10058),
+ PIN_BANK_IOMUX_FLAGS_OFFSET(4, 16, "gpio4",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ 0,
+ 0,
+ 0x10060, 0x10068, 0, 0),
+};
+
+static struct rockchip_pin_ctrl rk3562_pin_ctrl __maybe_unused = {
+ .pin_banks = rk3562_pin_banks,
+ .nr_banks = ARRAY_SIZE(rk3562_pin_banks),
+ .label = "RK3562-GPIO",
+ .type = RK3562,
+ .pull_calc_reg = rk3562_calc_pull_reg_and_bit,
+ .drv_calc_reg = rk3562_calc_drv_reg_and_bit,
+ .schmitt_calc_reg = rk3562_calc_schmitt_reg_and_bit,
+};
+
static struct rockchip_pin_bank rk3568_pin_banks[] = {
PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", IOMUX_SOURCE_PMU | IOMUX_WIDTH_4BIT,
IOMUX_SOURCE_PMU | IOMUX_WIDTH_4BIT,
@@ -4210,6 +4404,8 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = {
.data = &rk3368_pin_ctrl },
{ .compatible = "rockchip,rk3399-pinctrl",
.data = &rk3399_pin_ctrl },
+ { .compatible = "rockchip,rk3562-pinctrl",
+ .data = &rk3562_pin_ctrl },
{ .compatible = "rockchip,rk3568-pinctrl",
.data = &rk3568_pin_ctrl },
{ .compatible = "rockchip,rk3576-pinctrl",
diff --git a/drivers/pinctrl/pinctrl-rockchip.h b/drivers/pinctrl/pinctrl-rockchip.h
index 6ebbb0a88ce7..87a20cec8e21 100644
--- a/drivers/pinctrl/pinctrl-rockchip.h
+++ b/drivers/pinctrl/pinctrl-rockchip.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2020-2021 Rockchip Electronics Co. Ltd.
+ * Copyright (c) 2020-2024 Rockchip Electronics Co., Ltd.
*
* Copyright (c) 2013 MundoReader S.L.
* Author: Heiko Stuebner <heiko@sntech.de>
@@ -196,6 +196,7 @@ enum rockchip_pinctrl_type {
RK3328,
RK3368,
RK3399,
+ RK3562,
RK3568,
RK3576,
RK3588,
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index 521f6fef0b9f..aae01120dc52 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -380,7 +380,7 @@ static void stmfx_pinconf_dbg_show(struct pinctrl_dev *pctldev,
seq_printf(s, "input %s ", str_high_low(val));
if (type)
seq_printf(s, "with internal pull-%s ",
- pupd ? "up" : "down");
+ str_up_down(pupd));
else
seq_printf(s, "%s ", pupd ? "floating" : "analog");
}
diff --git a/drivers/pinctrl/qcom/Kconfig.msm b/drivers/pinctrl/qcom/Kconfig.msm
index 206226318e45..35f47660a56b 100644
--- a/drivers/pinctrl/qcom/Kconfig.msm
+++ b/drivers/pinctrl/qcom/Kconfig.msm
@@ -137,6 +137,12 @@ config PINCTRL_MSM8916
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found on the Qualcomm 8916 platform.
+config PINCTRL_MSM8917
+ tristate "Qualcomm 8917 pin controller driver"
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm TLMM block found on the Qualcomm MSM8917 platform.
+
config PINCTRL_MSM8953
tristate "Qualcomm 8953 pin controller driver"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 9a23d41d801c..5c4100925cf9 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o
obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
obj-$(CONFIG_PINCTRL_MSM8909) += pinctrl-msm8909.o
obj-$(CONFIG_PINCTRL_MSM8916) += pinctrl-msm8916.o
+obj-$(CONFIG_PINCTRL_MSM8917) += pinctrl-msm8917.o
obj-$(CONFIG_PINCTRL_MSM8953) += pinctrl-msm8953.o
obj-$(CONFIG_PINCTRL_MSM8976) += pinctrl-msm8976.o
obj-$(CONFIG_PINCTRL_MSM8994) += pinctrl-msm8994.o
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5424.c b/drivers/pinctrl/qcom/pinctrl-ipq5424.c
index 796299cd2e4e..0d610b076da3 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq5424.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq5424.c
@@ -233,7 +233,10 @@ enum ipq5424_functions {
msm_mux_sdc_clk,
msm_mux_sdc_cmd,
msm_mux_sdc_data,
- msm_mux_spi0,
+ msm_mux_spi0_clk,
+ msm_mux_spi0_cs,
+ msm_mux_spi0_miso,
+ msm_mux_spi0_mosi,
msm_mux_spi1,
msm_mux_spi10,
msm_mux_spi11,
@@ -297,8 +300,8 @@ static const char * const qspi_clk_groups[] = {
"gpio5",
};
-static const char * const spi0_groups[] = {
- "gpio6", "gpio7", "gpio8", "gpio9",
+static const char * const spi0_clk_groups[] = {
+ "gpio6",
};
static const char * const pwm1_groups[] = {
@@ -315,14 +318,26 @@ static const char * const qdss_tracedata_a_groups[] = {
"gpio38", "gpio39",
};
+static const char * const spi0_cs_groups[] = {
+ "gpio7",
+};
+
static const char * const cri_trng1_groups[] = {
"gpio7",
};
+static const char * const spi0_miso_groups[] = {
+ "gpio8",
+};
+
static const char * const cri_trng2_groups[] = {
"gpio8",
};
+static const char * const spi0_mosi_groups[] = {
+ "gpio9",
+};
+
static const char * const cri_trng3_groups[] = {
"gpio9",
};
@@ -680,7 +695,10 @@ static const struct pinfunction ipq5424_functions[] = {
MSM_PIN_FUNCTION(sdc_clk),
MSM_PIN_FUNCTION(sdc_cmd),
MSM_PIN_FUNCTION(sdc_data),
- MSM_PIN_FUNCTION(spi0),
+ MSM_PIN_FUNCTION(spi0_clk),
+ MSM_PIN_FUNCTION(spi0_cs),
+ MSM_PIN_FUNCTION(spi0_miso),
+ MSM_PIN_FUNCTION(spi0_mosi),
MSM_PIN_FUNCTION(spi1),
MSM_PIN_FUNCTION(spi10),
MSM_PIN_FUNCTION(spi11),
@@ -700,10 +718,10 @@ static const struct msm_pingroup ipq5424_groups[] = {
PINGROUP(3, sdc_data, qspi_data, pwm2, _, _, _, _, _, _),
PINGROUP(4, sdc_cmd, qspi_cs, _, _, _, _, _, _, _),
PINGROUP(5, sdc_clk, qspi_clk, _, _, _, _, _, _, _),
- PINGROUP(6, spi0, pwm1, _, cri_trng0, qdss_tracedata_a, _, _, _, _),
- PINGROUP(7, spi0, pwm1, _, cri_trng1, qdss_tracedata_a, _, _, _, _),
- PINGROUP(8, spi0, pwm1, wci_txd, wci_rxd, _, cri_trng2, qdss_tracedata_a, _, _),
- PINGROUP(9, spi0, pwm1, _, cri_trng3, qdss_tracedata_a, _, _, _, _),
+ PINGROUP(6, spi0_clk, pwm1, _, cri_trng0, qdss_tracedata_a, _, _, _, _),
+ PINGROUP(7, spi0_cs, pwm1, _, cri_trng1, qdss_tracedata_a, _, _, _, _),
+ PINGROUP(8, spi0_miso, pwm1, wci_txd, wci_rxd, _, cri_trng2, qdss_tracedata_a, _, _),
+ PINGROUP(9, spi0_mosi, pwm1, _, cri_trng3, qdss_tracedata_a, _, _, _, _),
PINGROUP(10, uart0, pwm0, spi11, _, wci_txd, wci_rxd, _, qdss_tracedata_a, _),
PINGROUP(11, uart0, pwm0, spi1, _, wci_txd, wci_rxd, _, qdss_tracedata_a, _),
PINGROUP(12, uart0, pwm0, spi11, _, prng_rosc0, qdss_tracedata_a, _, _, _),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index ec913c2e200f..47daa47153c9 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -19,6 +19,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinconf-generic.h>
@@ -714,7 +715,7 @@ static void msm_gpio_dbg_show_one(struct seq_file *s,
}
seq_printf(s, " %-8s: %-3s", g->grp.name, is_out ? "out" : "in");
- seq_printf(s, " %-4s func%d", val ? "high" : "low", func);
+ seq_printf(s, " %-4s func%d", str_high_low(val), func);
seq_printf(s, " %dmA", msm_regval_to_drive(drive));
if (pctrl->soc->pull_no_keeper)
seq_printf(s, " %s", pulls_no_keeper[pull]);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8917.c b/drivers/pinctrl/qcom/pinctrl-msm8917.c
new file mode 100644
index 000000000000..cff137bb3b23
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-msm8917.c
@@ -0,0 +1,1620 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-msm.h"
+
+static const struct pinctrl_pin_desc msm8917_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "SDC1_CLK"),
+ PINCTRL_PIN(135, "SDC1_CMD"),
+ PINCTRL_PIN(136, "SDC1_DATA"),
+ PINCTRL_PIN(137, "SDC1_RCLK"),
+ PINCTRL_PIN(138, "SDC2_CLK"),
+ PINCTRL_PIN(139, "SDC2_CMD"),
+ PINCTRL_PIN(140, "SDC2_DATA"),
+ PINCTRL_PIN(141, "QDSD_CLK"),
+ PINCTRL_PIN(142, "QDSD_CMD"),
+ PINCTRL_PIN(143, "QDSD_DATA0"),
+ PINCTRL_PIN(144, "QDSD_DATA1"),
+ PINCTRL_PIN(145, "QDSD_DATA2"),
+ PINCTRL_PIN(146, "QDSD_DATA3"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+
+static const unsigned int sdc1_clk_pins[] = { 134 };
+static const unsigned int sdc1_cmd_pins[] = { 135 };
+static const unsigned int sdc1_data_pins[] = { 136 };
+static const unsigned int sdc1_rclk_pins[] = { 137 };
+static const unsigned int sdc2_clk_pins[] = { 138 };
+static const unsigned int sdc2_cmd_pins[] = { 139 };
+static const unsigned int sdc2_data_pins[] = { 140 };
+static const unsigned int qdsd_clk_pins[] = { 141 };
+static const unsigned int qdsd_cmd_pins[] = { 142 };
+static const unsigned int qdsd_data0_pins[] = { 143 };
+static const unsigned int qdsd_data1_pins[] = { 144 };
+static const unsigned int qdsd_data2_pins[] = { 145 };
+static const unsigned int qdsd_data3_pins[] = { 146 };
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .grp = PINCTRL_PINGROUP("gpio" #id, \
+ gpio##id##_pins, \
+ ARRAY_SIZE(gpio##id##_pins)), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = 0x1000 * id, \
+ .io_reg = 0x4 + 0x1000 * id, \
+ .intr_cfg_reg = 0x8 + 0x1000 * id, \
+ .intr_status_reg = 0xc + 0x1000 * id, \
+ .intr_target_reg = 0x8 + 0x1000 * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 4, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .grp = PINCTRL_PINGROUP(#pg_name, \
+ pg_name##_pins, \
+ ARRAY_SIZE(pg_name##_pins)), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_target_kpss_val = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+enum msm8917_functions {
+ msm_mux_accel_int,
+ msm_mux_adsp_ext,
+ msm_mux_alsp_int,
+ msm_mux_atest_bbrx0,
+ msm_mux_atest_bbrx1,
+ msm_mux_atest_char,
+ msm_mux_atest_char0,
+ msm_mux_atest_char1,
+ msm_mux_atest_char2,
+ msm_mux_atest_char3,
+ msm_mux_atest_combodac_to_gpio_native,
+ msm_mux_atest_gpsadc_dtest0_native,
+ msm_mux_atest_gpsadc_dtest1_native,
+ msm_mux_atest_tsens,
+ msm_mux_atest_wlan0,
+ msm_mux_atest_wlan1,
+ msm_mux_audio_ref,
+ msm_mux_audio_reset,
+ msm_mux_bimc_dte0,
+ msm_mux_bimc_dte1,
+ msm_mux_blsp6_spi,
+ msm_mux_blsp8_spi,
+ msm_mux_blsp_i2c1,
+ msm_mux_blsp_i2c2,
+ msm_mux_blsp_i2c3,
+ msm_mux_blsp_i2c4,
+ msm_mux_blsp_i2c5,
+ msm_mux_blsp_i2c6,
+ msm_mux_blsp_i2c7,
+ msm_mux_blsp_i2c8,
+ msm_mux_blsp_spi1,
+ msm_mux_blsp_spi2,
+ msm_mux_blsp_spi3,
+ msm_mux_blsp_spi4,
+ msm_mux_blsp_spi5,
+ msm_mux_blsp_spi6,
+ msm_mux_blsp_spi7,
+ msm_mux_blsp_spi8,
+ msm_mux_blsp_uart1,
+ msm_mux_blsp_uart2,
+ msm_mux_blsp_uart3,
+ msm_mux_blsp_uart4,
+ msm_mux_blsp_uart5,
+ msm_mux_blsp_uart6,
+ msm_mux_blsp_uart7,
+ msm_mux_blsp_uart8,
+ msm_mux_cam0_ldo,
+ msm_mux_cam1_rst,
+ msm_mux_cam1_standby,
+ msm_mux_cam2_rst,
+ msm_mux_cam2_standby,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async,
+ msm_mux_cci_i2c,
+ msm_mux_cci_timer0,
+ msm_mux_cci_timer1,
+ msm_mux_cdc_pdm0,
+ msm_mux_codec_int1,
+ msm_mux_codec_int2,
+ msm_mux_codec_mad,
+ msm_mux_coex_uart,
+ msm_mux_cri_trng,
+ msm_mux_cri_trng0,
+ msm_mux_cri_trng1,
+ msm_mux_dbg_out,
+ msm_mux_dmic0_clk,
+ msm_mux_dmic0_data,
+ msm_mux_ebi_cdc,
+ msm_mux_ebi_ch0,
+ msm_mux_ext_lpass,
+ msm_mux_forced_usb,
+ msm_mux_fp_gpio,
+ msm_mux_fp_int,
+ msm_mux_gcc_gp1_clk_a,
+ msm_mux_gcc_gp1_clk_b,
+ msm_mux_gcc_gp2_clk_a,
+ msm_mux_gcc_gp2_clk_b,
+ msm_mux_gcc_gp3_clk_a,
+ msm_mux_gcc_gp3_clk_b,
+ msm_mux_gcc_plltest,
+ msm_mux_gcc_tlmm,
+ msm_mux_gpio,
+ msm_mux_gsm0_tx,
+ msm_mux_key_focus,
+ msm_mux_key_snapshot,
+ msm_mux_key_volp,
+ msm_mux_ldo_en,
+ msm_mux_ldo_update,
+ msm_mux_lpass_slimbus,
+ msm_mux_lpass_slimbus0,
+ msm_mux_lpass_slimbus1,
+ msm_mux_m_voc,
+ msm_mux_mag_int,
+ msm_mux_mdp_vsync,
+ msm_mux_mipi_dsi0,
+ msm_mux_modem_tsync,
+ msm_mux_nav_pps,
+ msm_mux_nav_pps_in_a,
+ msm_mux_nav_pps_in_b,
+ msm_mux_nav_tsync,
+ msm_mux_nfc_pwr,
+ msm_mux_ov_ldo,
+ msm_mux_pa_indicator,
+ msm_mux_pbs0,
+ msm_mux_pbs1,
+ msm_mux_pbs2,
+ msm_mux_pri_mi2s,
+ msm_mux_pri_mi2s_mclk_a,
+ msm_mux_pri_mi2s_mclk_b,
+ msm_mux_pri_mi2s_ws,
+ msm_mux_prng_rosc,
+ msm_mux_pwr_crypto_enabled_a,
+ msm_mux_pwr_crypto_enabled_b,
+ msm_mux_pwr_modem_enabled_a,
+ msm_mux_pwr_modem_enabled_b,
+ msm_mux_pwr_nav_enabled_a,
+ msm_mux_pwr_nav_enabled_b,
+ msm_mux_qdss_cti_trig_in_a0,
+ msm_mux_qdss_cti_trig_in_a1,
+ msm_mux_qdss_cti_trig_in_b0,
+ msm_mux_qdss_cti_trig_in_b1,
+ msm_mux_qdss_cti_trig_out_a0,
+ msm_mux_qdss_cti_trig_out_a1,
+ msm_mux_qdss_cti_trig_out_b0,
+ msm_mux_qdss_cti_trig_out_b1,
+ msm_mux_qdss_traceclk_a,
+ msm_mux_qdss_traceclk_b,
+ msm_mux_qdss_tracectl_a,
+ msm_mux_qdss_tracectl_b,
+ msm_mux_qdss_tracedata_a,
+ msm_mux_qdss_tracedata_b,
+ msm_mux_sd_write,
+ msm_mux_sdcard_det,
+ msm_mux_sec_mi2s,
+ msm_mux_sec_mi2s_mclk_a,
+ msm_mux_sec_mi2s_mclk_b,
+ msm_mux_sensor_rst,
+ msm_mux_smb_int,
+ msm_mux_ssbi_wtr1,
+ msm_mux_ts_resout,
+ msm_mux_ts_sample,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_data,
+ msm_mux_uim1_present,
+ msm_mux_uim1_reset,
+ msm_mux_uim2_clk,
+ msm_mux_uim2_data,
+ msm_mux_uim2_present,
+ msm_mux_uim2_reset,
+ msm_mux_uim_batt,
+ msm_mux_us_emitter,
+ msm_mux_us_euro,
+ msm_mux_wcss_bt,
+ msm_mux_wcss_fm,
+ msm_mux_wcss_wlan,
+ msm_mux_wcss_wlan0,
+ msm_mux_wcss_wlan1,
+ msm_mux_wcss_wlan2,
+ msm_mux_webcam_rst,
+ msm_mux_webcam_standby,
+ msm_mux_wsa_io,
+ msm_mux_wsa_irq,
+ msm_mux__,
+};
+
+static const char * const qdss_tracedata_b_groups[] = {
+ "gpio0", "gpio1", "gpio6", "gpio7", "gpio12", "gpio13", "gpio23",
+ "gpio42", "gpio43", "gpio44", "gpio47", "gpio66", "gpio86", "gpio87",
+ "gpio88", "gpio92",
+};
+
+static const char * const blsp_uart1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133",
+};
+
+static const char * const blsp_spi1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+
+static const char * const adsp_ext_groups[] = {
+ "gpio1",
+};
+
+static const char * const blsp_i2c1_groups[] = {
+ "gpio2", "gpio3",
+};
+
+static const char * const prng_rosc_groups[] = {
+ "gpio2",
+};
+
+static const char * const qdss_cti_trig_out_b0_groups[] = {
+ "gpio2",
+};
+
+static const char * const blsp_spi2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+
+static const char * const blsp_uart2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+
+static const char * const blsp_uart3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+
+static const char * const pbs0_groups[] = {
+ "gpio8",
+};
+
+static const char * const pbs1_groups[] = {
+ "gpio9",
+};
+
+static const char * const pwr_modem_enabled_b_groups[] = {
+ "gpio9",
+};
+
+static const char * const blsp_i2c3_groups[] = {
+ "gpio10", "gpio11",
+};
+
+static const char * const gcc_gp2_clk_b_groups[] = {
+ "gpio10",
+};
+
+static const char * const ldo_update_groups[] = {
+ "gpio4",
+};
+
+static const char * const atest_combodac_to_gpio_native_groups[] = {
+ "gpio4", "gpio12", "gpio13", "gpio20", "gpio21", "gpio28", "gpio29",
+ "gpio30", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43", "gpio44",
+ "gpio45", "gpio46", "gpio47", "gpio48", "gpio67", "gpio115",
+};
+
+static const char * const ldo_en_groups[] = {
+ "gpio5",
+};
+
+static const char * const blsp_i2c2_groups[] = {
+ "gpio6", "gpio7",
+};
+
+static const char * const gcc_gp1_clk_b_groups[] = {
+ "gpio6",
+};
+
+static const char * const pbs2_groups[] = {
+ "gpio7",
+};
+
+static const char * const atest_gpsadc_dtest0_native_groups[] = {
+ "gpio7",
+};
+
+static const char * const blsp_spi3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+
+static const char * const gcc_gp3_clk_b_groups[] = {
+ "gpio11",
+};
+
+static const char * const blsp_spi4_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+
+static const char * const blsp_uart4_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+
+static const char * const sec_mi2s_groups[] = {
+ "gpio12", "gpio13", "gpio94", "gpio95",
+};
+
+static const char * const pwr_nav_enabled_b_groups[] = {
+ "gpio12",
+};
+
+static const char * const codec_mad_groups[] = {
+ "gpio13",
+};
+
+static const char * const pwr_crypto_enabled_b_groups[] = {
+ "gpio13",
+};
+
+static const char * const blsp_i2c4_groups[] = {
+ "gpio14", "gpio15",
+};
+
+static const char * const blsp_spi5_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+
+static const char * const blsp_uart5_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+
+static const char * const qdss_traceclk_a_groups[] = {
+ "gpio16",
+};
+
+static const char * const atest_bbrx1_groups[] = {
+ "gpio16",
+};
+
+static const char * const m_voc_groups[] = {
+ "gpio17", "gpio21",
+};
+
+static const char * const qdss_cti_trig_in_a0_groups[] = {
+ "gpio17",
+};
+
+static const char * const qdss_cti_trig_in_b0_groups[] = {
+ "gpio21",
+};
+
+static const char * const blsp_i2c6_groups[] = {
+ "gpio22", "gpio23",
+};
+
+static const char * const qdss_traceclk_b_groups[] = {
+ "gpio22",
+};
+
+static const char * const atest_wlan0_groups[] = {
+ "gpio22",
+};
+
+static const char * const atest_bbrx0_groups[] = {
+ "gpio17",
+};
+
+static const char * const blsp_i2c5_groups[] = {
+ "gpio18", "gpio19",
+};
+
+static const char * const qdss_tracectl_a_groups[] = {
+ "gpio18",
+};
+
+static const char * const atest_gpsadc_dtest1_native_groups[] = {
+ "gpio18",
+};
+
+static const char * const qdss_tracedata_a_groups[] = {
+ "gpio19", "gpio26", "gpio27", "gpio28", "gpio29", "gpio30", "gpio31",
+ "gpio32", "gpio33", "gpio34", "gpio35", "gpio36", "gpio38", "gpio39",
+ "gpio40", "gpio50",
+};
+
+static const char * const blsp_spi6_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+
+static const char * const blsp_uart6_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+
+static const char * const qdss_tracectl_b_groups[] = {
+ "gpio20",
+};
+
+static const char * const atest_wlan1_groups[] = {
+ "gpio23",
+};
+
+static const char * const mdp_vsync_groups[] = {
+ "gpio24", "gpio25",
+};
+
+static const char * const pri_mi2s_mclk_a_groups[] = {
+ "gpio25",
+};
+
+static const char * const sec_mi2s_mclk_a_groups[] = {
+ "gpio25",
+};
+
+static const char * const cam_mclk_groups[] = {
+ "gpio26", "gpio27", "gpio28",
+};
+
+static const char * const cci_i2c_groups[] = {
+ "gpio29", "gpio30", "gpio31", "gpio32",
+};
+
+static const char * const pwr_modem_enabled_a_groups[] = {
+ "gpio29",
+};
+
+static const char * const cci_timer0_groups[] = {
+ "gpio33",
+};
+
+static const char * const cci_timer1_groups[] = {
+ "gpio34",
+};
+
+static const char * const cam1_standby_groups[] = {
+ "gpio35",
+};
+
+static const char * const pwr_nav_enabled_a_groups[] = {
+ "gpio35",
+};
+
+static const char * const cam1_rst_groups[] = {
+ "gpio36",
+};
+
+static const char * const pwr_crypto_enabled_a_groups[] = {
+ "gpio36",
+};
+
+static const char * const forced_usb_groups[] = {
+ "gpio37",
+};
+
+static const char * const qdss_cti_trig_out_b1_groups[] = {
+ "gpio37",
+};
+
+static const char * const cam2_rst_groups[] = {
+ "gpio38",
+};
+
+static const char * const webcam_standby_groups[] = {
+ "gpio39",
+};
+
+static const char * const cci_async_groups[] = {
+ "gpio39",
+};
+
+static const char * const webcam_rst_groups[] = {
+ "gpio40",
+};
+
+static const char * const ov_ldo_groups[] = {
+ "gpio41",
+};
+
+static const char * const sd_write_groups[] = {
+ "gpio41",
+};
+
+static const char * const accel_int_groups[] = {
+ "gpio42",
+};
+
+static const char * const gcc_gp1_clk_a_groups[] = {
+ "gpio42",
+};
+
+static const char * const alsp_int_groups[] = {
+ "gpio43",
+};
+
+static const char * const gcc_gp2_clk_a_groups[] = {
+ "gpio43",
+};
+
+static const char * const mag_int_groups[] = {
+ "gpio44",
+};
+
+static const char * const gcc_gp3_clk_a_groups[] = {
+ "gpio44",
+};
+
+static const char * const blsp6_spi_groups[] = {
+ "gpio47",
+};
+
+static const char * const fp_int_groups[] = {
+ "gpio48",
+};
+
+static const char * const qdss_cti_trig_in_b1_groups[] = {
+ "gpio48",
+};
+
+static const char * const uim_batt_groups[] = {
+ "gpio49",
+};
+
+static const char * const cam2_standby_groups[] = {
+ "gpio50",
+};
+
+static const char * const uim1_data_groups[] = {
+ "gpio51",
+};
+
+static const char * const uim1_clk_groups[] = {
+ "gpio52",
+};
+
+static const char * const uim1_reset_groups[] = {
+ "gpio53",
+};
+
+static const char * const uim1_present_groups[] = {
+ "gpio54",
+};
+
+static const char * const uim2_data_groups[] = {
+ "gpio55",
+};
+
+static const char * const uim2_clk_groups[] = {
+ "gpio56",
+};
+
+static const char * const uim2_reset_groups[] = {
+ "gpio57",
+};
+
+static const char * const uim2_present_groups[] = {
+ "gpio58",
+};
+
+static const char * const sensor_rst_groups[] = {
+ "gpio59",
+};
+
+static const char * const mipi_dsi0_groups[] = {
+ "gpio60",
+};
+
+static const char * const smb_int_groups[] = {
+ "gpio61",
+};
+
+static const char * const cam0_ldo_groups[] = {
+ "gpio62",
+};
+
+static const char * const us_euro_groups[] = {
+ "gpio63",
+};
+
+static const char * const atest_char3_groups[] = {
+ "gpio63",
+};
+
+static const char * const dbg_out_groups[] = {
+ "gpio63",
+};
+
+static const char * const bimc_dte0_groups[] = {
+ "gpio63", "gpio65",
+};
+
+static const char * const ts_resout_groups[] = {
+ "gpio64",
+};
+
+static const char * const ts_sample_groups[] = {
+ "gpio65",
+};
+
+static const char * const sec_mi2s_mclk_b_groups[] = {
+ "gpio66",
+};
+
+static const char * const pri_mi2s_groups[] = {
+ "gpio66", "gpio85", "gpio86", "gpio88", "gpio94", "gpio95",
+};
+
+static const char * const sdcard_det_groups[] = {
+ "gpio67",
+};
+
+static const char * const atest_char1_groups[] = {
+ "gpio67",
+};
+
+static const char * const ebi_cdc_groups[] = {
+ "gpio67", "gpio69", "gpio118", "gpio119", "gpio120", "gpio123",
+};
+
+static const char * const audio_reset_groups[] = {
+ "gpio68",
+};
+
+static const char * const atest_char0_groups[] = {
+ "gpio68",
+};
+
+static const char * const audio_ref_groups[] = {
+ "gpio69",
+};
+
+static const char * const cdc_pdm0_groups[] = {
+ "gpio69", "gpio70", "gpio71", "gpio72", "gpio73", "gpio74",
+};
+
+static const char * const pri_mi2s_mclk_b_groups[] = {
+ "gpio69",
+};
+
+static const char * const lpass_slimbus_groups[] = {
+ "gpio70",
+};
+
+static const char * const lpass_slimbus0_groups[] = {
+ "gpio71",
+};
+
+static const char * const lpass_slimbus1_groups[] = {
+ "gpio72",
+};
+
+static const char * const codec_int1_groups[] = {
+ "gpio73",
+};
+
+static const char * const codec_int2_groups[] = {
+ "gpio74",
+};
+
+static const char * const wcss_bt_groups[] = {
+ "gpio75", "gpio83", "gpio84",
+};
+
+static const char * const atest_char2_groups[] = {
+ "gpio75",
+};
+
+static const char * const ebi_ch0_groups[] = {
+ "gpio75",
+};
+
+static const char * const wcss_wlan2_groups[] = {
+ "gpio76",
+};
+
+static const char * const wcss_wlan1_groups[] = {
+ "gpio77",
+};
+
+static const char * const wcss_wlan0_groups[] = {
+ "gpio78",
+};
+
+static const char * const wcss_wlan_groups[] = {
+ "gpio79", "gpio80",
+};
+
+static const char * const wcss_fm_groups[] = {
+ "gpio81", "gpio82",
+};
+
+static const char * const ext_lpass_groups[] = {
+ "gpio81",
+};
+
+static const char * const cri_trng_groups[] = {
+ "gpio82",
+};
+
+static const char * const cri_trng1_groups[] = {
+ "gpio83",
+};
+
+static const char * const cri_trng0_groups[] = {
+ "gpio84",
+};
+
+static const char * const blsp_spi7_groups[] = {
+ "gpio85", "gpio86", "gpio87", "gpio88",
+};
+
+static const char * const blsp_uart7_groups[] = {
+ "gpio85", "gpio86", "gpio87", "gpio88",
+};
+
+static const char * const pri_mi2s_ws_groups[] = {
+ "gpio87",
+};
+
+static const char * const blsp_i2c7_groups[] = {
+ "gpio87", "gpio88",
+};
+
+static const char * const gcc_tlmm_groups[] = {
+ "gpio87",
+};
+
+static const char * const dmic0_clk_groups[] = {
+ "gpio89",
+};
+
+static const char * const dmic0_data_groups[] = {
+ "gpio90",
+};
+
+static const char * const key_volp_groups[] = {
+ "gpio91",
+};
+
+static const char * const qdss_cti_trig_in_a1_groups[] = {
+ "gpio91",
+};
+
+static const char * const us_emitter_groups[] = {
+ "gpio92",
+};
+
+static const char * const wsa_irq_groups[] = {
+ "gpio93",
+};
+
+static const char * const wsa_io_groups[] = {
+ "gpio94", "gpio95",
+};
+
+static const char * const blsp_spi8_groups[] = {
+ "gpio96", "gpio97", "gpio98", "gpio99",
+};
+
+static const char * const blsp_uart8_groups[] = {
+ "gpio96", "gpio97", "gpio98", "gpio99",
+};
+
+static const char * const blsp_i2c8_groups[] = {
+ "gpio98", "gpio99",
+};
+
+static const char * const gcc_plltest_groups[] = {
+ "gpio98", "gpio99",
+};
+
+static const char * const nav_pps_in_a_groups[] = {
+ "gpio115",
+};
+
+static const char * const pa_indicator_groups[] = {
+ "gpio116",
+};
+
+static const char * const modem_tsync_groups[] = {
+ "gpio117",
+};
+
+static const char * const nav_tsync_groups[] = {
+ "gpio117",
+};
+
+static const char * const nav_pps_in_b_groups[] = {
+ "gpio117",
+};
+
+static const char * const nav_pps_groups[] = {
+ "gpio117",
+};
+
+static const char * const gsm0_tx_groups[] = {
+ "gpio119",
+};
+
+static const char * const atest_char_groups[] = {
+ "gpio120",
+};
+
+static const char * const atest_tsens_groups[] = {
+ "gpio120",
+};
+
+static const char * const bimc_dte1_groups[] = {
+ "gpio121", "gpio122",
+};
+
+static const char * const ssbi_wtr1_groups[] = {
+ "gpio122", "gpio123",
+};
+
+static const char * const fp_gpio_groups[] = {
+ "gpio124",
+};
+
+static const char * const coex_uart_groups[] = {
+ "gpio124", "gpio127",
+};
+
+static const char * const key_snapshot_groups[] = {
+ "gpio127",
+};
+
+static const char * const key_focus_groups[] = {
+ "gpio128",
+};
+
+static const char * const nfc_pwr_groups[] = {
+ "gpio129",
+};
+
+static const char * const blsp8_spi_groups[] = {
+ "gpio130",
+};
+
+static const char * const qdss_cti_trig_out_a0_groups[] = {
+ "gpio132",
+};
+
+static const char * const qdss_cti_trig_out_a1_groups[] = {
+ "gpio133",
+};
+
+static const struct pinfunction msm8917_functions[] = {
+ MSM_PIN_FUNCTION(accel_int),
+ MSM_PIN_FUNCTION(adsp_ext),
+ MSM_PIN_FUNCTION(alsp_int),
+ MSM_PIN_FUNCTION(atest_bbrx0),
+ MSM_PIN_FUNCTION(atest_bbrx1),
+ MSM_PIN_FUNCTION(atest_char),
+ MSM_PIN_FUNCTION(atest_char0),
+ MSM_PIN_FUNCTION(atest_char1),
+ MSM_PIN_FUNCTION(atest_char2),
+ MSM_PIN_FUNCTION(atest_char3),
+ MSM_PIN_FUNCTION(atest_combodac_to_gpio_native),
+ MSM_PIN_FUNCTION(atest_gpsadc_dtest0_native),
+ MSM_PIN_FUNCTION(atest_gpsadc_dtest1_native),
+ MSM_PIN_FUNCTION(atest_tsens),
+ MSM_PIN_FUNCTION(atest_wlan0),
+ MSM_PIN_FUNCTION(atest_wlan1),
+ MSM_PIN_FUNCTION(audio_ref),
+ MSM_PIN_FUNCTION(audio_reset),
+ MSM_PIN_FUNCTION(bimc_dte0),
+ MSM_PIN_FUNCTION(bimc_dte1),
+ MSM_PIN_FUNCTION(blsp6_spi),
+ MSM_PIN_FUNCTION(blsp8_spi),
+ MSM_PIN_FUNCTION(blsp_i2c1),
+ MSM_PIN_FUNCTION(blsp_i2c2),
+ MSM_PIN_FUNCTION(blsp_i2c3),
+ MSM_PIN_FUNCTION(blsp_i2c4),
+ MSM_PIN_FUNCTION(blsp_i2c5),
+ MSM_PIN_FUNCTION(blsp_i2c6),
+ MSM_PIN_FUNCTION(blsp_i2c7),
+ MSM_PIN_FUNCTION(blsp_i2c8),
+ MSM_PIN_FUNCTION(blsp_spi1),
+ MSM_PIN_FUNCTION(blsp_spi2),
+ MSM_PIN_FUNCTION(blsp_spi3),
+ MSM_PIN_FUNCTION(blsp_spi4),
+ MSM_PIN_FUNCTION(blsp_spi5),
+ MSM_PIN_FUNCTION(blsp_spi6),
+ MSM_PIN_FUNCTION(blsp_spi7),
+ MSM_PIN_FUNCTION(blsp_spi8),
+ MSM_PIN_FUNCTION(blsp_uart1),
+ MSM_PIN_FUNCTION(blsp_uart2),
+ MSM_PIN_FUNCTION(blsp_uart3),
+ MSM_PIN_FUNCTION(blsp_uart4),
+ MSM_PIN_FUNCTION(blsp_uart5),
+ MSM_PIN_FUNCTION(blsp_uart6),
+ MSM_PIN_FUNCTION(blsp_uart7),
+ MSM_PIN_FUNCTION(blsp_uart8),
+ MSM_PIN_FUNCTION(cam0_ldo),
+ MSM_PIN_FUNCTION(cam1_rst),
+ MSM_PIN_FUNCTION(cam1_standby),
+ MSM_PIN_FUNCTION(cam2_rst),
+ MSM_PIN_FUNCTION(cam2_standby),
+ MSM_PIN_FUNCTION(cam_mclk),
+ MSM_PIN_FUNCTION(cci_async),
+ MSM_PIN_FUNCTION(cci_i2c),
+ MSM_PIN_FUNCTION(cci_timer0),
+ MSM_PIN_FUNCTION(cci_timer1),
+ MSM_PIN_FUNCTION(cdc_pdm0),
+ MSM_PIN_FUNCTION(codec_int1),
+ MSM_PIN_FUNCTION(codec_int2),
+ MSM_PIN_FUNCTION(codec_mad),
+ MSM_PIN_FUNCTION(coex_uart),
+ MSM_PIN_FUNCTION(cri_trng),
+ MSM_PIN_FUNCTION(cri_trng0),
+ MSM_PIN_FUNCTION(cri_trng1),
+ MSM_PIN_FUNCTION(dbg_out),
+ MSM_PIN_FUNCTION(dmic0_clk),
+ MSM_PIN_FUNCTION(dmic0_data),
+ MSM_PIN_FUNCTION(ebi_cdc),
+ MSM_PIN_FUNCTION(ebi_ch0),
+ MSM_PIN_FUNCTION(ext_lpass),
+ MSM_PIN_FUNCTION(forced_usb),
+ MSM_PIN_FUNCTION(fp_gpio),
+ MSM_PIN_FUNCTION(fp_int),
+ MSM_PIN_FUNCTION(gcc_gp1_clk_a),
+ MSM_PIN_FUNCTION(gcc_gp1_clk_b),
+ MSM_PIN_FUNCTION(gcc_gp2_clk_a),
+ MSM_PIN_FUNCTION(gcc_gp2_clk_b),
+ MSM_PIN_FUNCTION(gcc_gp3_clk_a),
+ MSM_PIN_FUNCTION(gcc_gp3_clk_b),
+ MSM_PIN_FUNCTION(gcc_plltest),
+ MSM_PIN_FUNCTION(gcc_tlmm),
+ MSM_PIN_FUNCTION(gpio),
+ MSM_PIN_FUNCTION(gsm0_tx),
+ MSM_PIN_FUNCTION(key_focus),
+ MSM_PIN_FUNCTION(key_snapshot),
+ MSM_PIN_FUNCTION(key_volp),
+ MSM_PIN_FUNCTION(ldo_en),
+ MSM_PIN_FUNCTION(ldo_update),
+ MSM_PIN_FUNCTION(lpass_slimbus),
+ MSM_PIN_FUNCTION(lpass_slimbus0),
+ MSM_PIN_FUNCTION(lpass_slimbus1),
+ MSM_PIN_FUNCTION(m_voc),
+ MSM_PIN_FUNCTION(mag_int),
+ MSM_PIN_FUNCTION(mdp_vsync),
+ MSM_PIN_FUNCTION(mipi_dsi0),
+ MSM_PIN_FUNCTION(modem_tsync),
+ MSM_PIN_FUNCTION(nav_pps),
+ MSM_PIN_FUNCTION(nav_pps_in_a),
+ MSM_PIN_FUNCTION(nav_pps_in_b),
+ MSM_PIN_FUNCTION(nav_tsync),
+ MSM_PIN_FUNCTION(nfc_pwr),
+ MSM_PIN_FUNCTION(ov_ldo),
+ MSM_PIN_FUNCTION(pa_indicator),
+ MSM_PIN_FUNCTION(pbs0),
+ MSM_PIN_FUNCTION(pbs1),
+ MSM_PIN_FUNCTION(pbs2),
+ MSM_PIN_FUNCTION(pri_mi2s),
+ MSM_PIN_FUNCTION(pri_mi2s_mclk_a),
+ MSM_PIN_FUNCTION(pri_mi2s_mclk_b),
+ MSM_PIN_FUNCTION(pri_mi2s_ws),
+ MSM_PIN_FUNCTION(prng_rosc),
+ MSM_PIN_FUNCTION(pwr_crypto_enabled_a),
+ MSM_PIN_FUNCTION(pwr_crypto_enabled_b),
+ MSM_PIN_FUNCTION(pwr_modem_enabled_a),
+ MSM_PIN_FUNCTION(pwr_modem_enabled_b),
+ MSM_PIN_FUNCTION(pwr_nav_enabled_a),
+ MSM_PIN_FUNCTION(pwr_nav_enabled_b),
+ MSM_PIN_FUNCTION(qdss_cti_trig_in_a0),
+ MSM_PIN_FUNCTION(qdss_cti_trig_in_a1),
+ MSM_PIN_FUNCTION(qdss_cti_trig_in_b0),
+ MSM_PIN_FUNCTION(qdss_cti_trig_in_b1),
+ MSM_PIN_FUNCTION(qdss_cti_trig_out_a0),
+ MSM_PIN_FUNCTION(qdss_cti_trig_out_a1),
+ MSM_PIN_FUNCTION(qdss_cti_trig_out_b0),
+ MSM_PIN_FUNCTION(qdss_cti_trig_out_b1),
+ MSM_PIN_FUNCTION(qdss_traceclk_a),
+ MSM_PIN_FUNCTION(qdss_traceclk_b),
+ MSM_PIN_FUNCTION(qdss_tracectl_a),
+ MSM_PIN_FUNCTION(qdss_tracectl_b),
+ MSM_PIN_FUNCTION(qdss_tracedata_a),
+ MSM_PIN_FUNCTION(qdss_tracedata_b),
+ MSM_PIN_FUNCTION(sd_write),
+ MSM_PIN_FUNCTION(sdcard_det),
+ MSM_PIN_FUNCTION(sec_mi2s),
+ MSM_PIN_FUNCTION(sec_mi2s_mclk_a),
+ MSM_PIN_FUNCTION(sec_mi2s_mclk_b),
+ MSM_PIN_FUNCTION(sensor_rst),
+ MSM_PIN_FUNCTION(smb_int),
+ MSM_PIN_FUNCTION(ssbi_wtr1),
+ MSM_PIN_FUNCTION(ts_resout),
+ MSM_PIN_FUNCTION(ts_sample),
+ MSM_PIN_FUNCTION(uim1_clk),
+ MSM_PIN_FUNCTION(uim1_data),
+ MSM_PIN_FUNCTION(uim1_present),
+ MSM_PIN_FUNCTION(uim1_reset),
+ MSM_PIN_FUNCTION(uim2_clk),
+ MSM_PIN_FUNCTION(uim2_data),
+ MSM_PIN_FUNCTION(uim2_present),
+ MSM_PIN_FUNCTION(uim2_reset),
+ MSM_PIN_FUNCTION(uim_batt),
+ MSM_PIN_FUNCTION(us_emitter),
+ MSM_PIN_FUNCTION(us_euro),
+ MSM_PIN_FUNCTION(wcss_bt),
+ MSM_PIN_FUNCTION(wcss_fm),
+ MSM_PIN_FUNCTION(wcss_wlan),
+ MSM_PIN_FUNCTION(wcss_wlan0),
+ MSM_PIN_FUNCTION(wcss_wlan1),
+ MSM_PIN_FUNCTION(wcss_wlan2),
+ MSM_PIN_FUNCTION(webcam_rst),
+ MSM_PIN_FUNCTION(webcam_standby),
+ MSM_PIN_FUNCTION(wsa_io),
+ MSM_PIN_FUNCTION(wsa_irq),
+};
+
+static const struct msm_pingroup msm8917_groups[] = {
+ PINGROUP(0, blsp_spi1, blsp_uart1, qdss_tracedata_b, _, _, _, _,
+ _, _),
+ PINGROUP(1, blsp_spi1, blsp_uart1, adsp_ext, _, _, _, _, _,
+ qdss_tracedata_b),
+ PINGROUP(2, blsp_spi1, blsp_uart1, blsp_i2c1, prng_rosc, _, _, _,
+ _, _),
+ PINGROUP(3, blsp_spi1, blsp_uart1, blsp_i2c1, _, _, _, _, _, _),
+ PINGROUP(4, blsp_spi2, blsp_uart2, ldo_update, _,
+ atest_combodac_to_gpio_native, _, _, _, _),
+ PINGROUP(5, blsp_spi2, blsp_uart2, ldo_en, _, _, _, _, _, _),
+ PINGROUP(6, blsp_spi2, blsp_uart2, blsp_i2c2, gcc_gp1_clk_b,
+ qdss_tracedata_b, _, _, _, _),
+ PINGROUP(7, blsp_spi2, blsp_uart2, blsp_i2c2, pbs2, _,
+ qdss_tracedata_b, _, atest_gpsadc_dtest0_native, _),
+ PINGROUP(8, blsp_spi3, blsp_uart3, pbs0, _, _, _, _, _, _),
+ PINGROUP(9, blsp_spi3, blsp_uart3, pbs1, pwr_modem_enabled_b, _, _,
+ _, _, _),
+ PINGROUP(10, blsp_spi3, blsp_uart3, blsp_i2c3, gcc_gp2_clk_b, _, _,
+ _, _, _),
+ PINGROUP(11, blsp_spi3, blsp_uart3, blsp_i2c3, gcc_gp3_clk_b, _, _,
+ _, _, _),
+ PINGROUP(12, blsp_spi4, blsp_uart4, sec_mi2s, pwr_nav_enabled_b, _,
+ _, _, _, _),
+ PINGROUP(13, blsp_spi4, blsp_uart4, sec_mi2s, pwr_crypto_enabled_b, _,
+ _, _, _, _),
+ PINGROUP(14, blsp_spi4, blsp_uart4, blsp_i2c4, _, _, _, _, _, _),
+ PINGROUP(15, blsp_spi4, blsp_uart4, blsp_i2c4, _, _, _, _, _, _),
+ PINGROUP(16, blsp_spi5, blsp_uart5, _, _, _, _, qdss_traceclk_a,
+ _, atest_bbrx1),
+ PINGROUP(17, blsp_spi5, blsp_uart5, m_voc, qdss_cti_trig_in_a0, _,
+ atest_bbrx0, _, _, _),
+ PINGROUP(18, blsp_spi5, blsp_uart5, blsp_i2c5, qdss_tracectl_a, _,
+ atest_gpsadc_dtest1_native, _, _, _),
+ PINGROUP(19, blsp_spi5, blsp_uart5, blsp_i2c5, qdss_tracedata_a, _,
+ _, _, _, _),
+ PINGROUP(20, blsp_spi6, blsp_uart6, _, _, _, _, _, _,
+ qdss_tracectl_b),
+ PINGROUP(21, blsp_spi6, blsp_uart6, m_voc, _, _, _, _, _,
+ qdss_cti_trig_in_b0),
+ PINGROUP(22, blsp_spi6, blsp_uart6, blsp_i2c6, qdss_traceclk_b, _,
+ atest_wlan0, _, _, _),
+ PINGROUP(23, blsp_spi6, blsp_uart6, blsp_i2c6, qdss_tracedata_b, _,
+ atest_wlan1, _, _, _),
+ PINGROUP(24, mdp_vsync, _, _, _, _, _, _, _, _),
+ PINGROUP(25, mdp_vsync, pri_mi2s_mclk_a, sec_mi2s_mclk_a, _, _, _,
+ _, _, _),
+ PINGROUP(26, cam_mclk, _, _, _, _, _, qdss_tracedata_a, _, _),
+ PINGROUP(27, cam_mclk, _, _, _, _, _, _, _, qdss_tracedata_a),
+ PINGROUP(28, cam_mclk, _, _, _, _, _, qdss_tracedata_a, _,
+ atest_combodac_to_gpio_native),
+ PINGROUP(29, cci_i2c, pwr_modem_enabled_a, _, _, _, _, _,
+ qdss_tracedata_a, _),
+ PINGROUP(30, cci_i2c, _, _, _, _, _, _, _, qdss_tracedata_a),
+ PINGROUP(31, cci_i2c, _, _, _, _, _, _, _, qdss_tracedata_a),
+ PINGROUP(32, cci_i2c, _, _, _, _, _, _, _, qdss_tracedata_a),
+ PINGROUP(33, cci_timer0, _, _, _, _, _, _, _, qdss_tracedata_a),
+ PINGROUP(34, cci_timer1, _, _, _, _, _, _, _, qdss_tracedata_a),
+ PINGROUP(35, pwr_nav_enabled_a, _, _, _, _, _, _, _,
+ qdss_tracedata_a),
+ PINGROUP(36, pwr_crypto_enabled_a, _, _, _, _, _, _, _,
+ qdss_tracedata_a),
+ PINGROUP(37, _, _, _, _, _, qdss_cti_trig_out_b1, _, _, _),
+ PINGROUP(38, _, qdss_tracedata_a, _, _, _, _, _, _, _),
+ PINGROUP(39, cci_async, _, _, _, _, _, qdss_tracedata_a, _,
+ atest_combodac_to_gpio_native),
+ PINGROUP(40, _, _, _, _, qdss_tracedata_a, _,
+ atest_combodac_to_gpio_native, _, _),
+ PINGROUP(41, sd_write, _, _, _, _, _, _, _,
+ atest_combodac_to_gpio_native),
+ PINGROUP(42, gcc_gp1_clk_a, qdss_tracedata_b, _,
+ atest_combodac_to_gpio_native, _, _, _, _, _),
+ PINGROUP(43, gcc_gp2_clk_a, qdss_tracedata_b, _,
+ atest_combodac_to_gpio_native, _, _, _, _, _),
+ PINGROUP(44, gcc_gp3_clk_a, qdss_tracedata_b, _,
+ atest_combodac_to_gpio_native, _, _, _, _, _),
+ PINGROUP(45, _, _, atest_combodac_to_gpio_native, _, _, _, _, _,
+ _),
+ PINGROUP(46, _, _, atest_combodac_to_gpio_native, _, _, _, _, _,
+ _),
+ PINGROUP(47, blsp6_spi, _, qdss_tracedata_b, _,
+ atest_combodac_to_gpio_native, _, _, _, _),
+ PINGROUP(48, _, qdss_cti_trig_in_b1, _,
+ atest_combodac_to_gpio_native, _, _, _, _, _),
+ PINGROUP(49, uim_batt, _, _, _, _, _, _, _, _),
+ PINGROUP(50, qdss_tracedata_a, _, _, _, _, _, _, _, _),
+ PINGROUP(51, uim1_data, _, _, _, _, _, _, _, _),
+ PINGROUP(52, uim1_clk, _, _, _, _, _, _, _, _),
+ PINGROUP(53, uim1_reset, _, _, _, _, _, _, _, _),
+ PINGROUP(54, uim1_present, _, _, _, _, _, _, _, _),
+ PINGROUP(55, uim2_data, _, _, _, _, _, _, _, _),
+ PINGROUP(56, uim2_clk, _, _, _, _, _, _, _, _),
+ PINGROUP(57, uim2_reset, _, _, _, _, _, _, _, _),
+ PINGROUP(58, uim2_present, _, _, _, _, _, _, _, _),
+ PINGROUP(59, _, _, _, _, _, _, _, _, _),
+ PINGROUP(60, _, _, _, _, _, _, _, _, _),
+ PINGROUP(61, _, _, _, _, _, _, _, _, _),
+ PINGROUP(62, _, _, _, _, _, _, _, _, _),
+ PINGROUP(63, atest_char3, dbg_out, bimc_dte0, _, _, _, _, _, _),
+ PINGROUP(64, _, _, _, _, _, _, _, _, _),
+ PINGROUP(65, bimc_dte0, _, _, _, _, _, _, _, _),
+ PINGROUP(66, sec_mi2s_mclk_b, pri_mi2s, _, qdss_tracedata_b, _, _,
+ _, _, _),
+ PINGROUP(67, atest_char1, ebi_cdc, _, atest_combodac_to_gpio_native,
+ _, _, _, _, _),
+ PINGROUP(68, atest_char0, _, _, _, _, _, _, _, _),
+ PINGROUP(69, audio_ref, cdc_pdm0, pri_mi2s_mclk_b, ebi_cdc, _, _, _,
+ _, _),
+ PINGROUP(70, lpass_slimbus, cdc_pdm0, _, _, _, _, _, _, _),
+ PINGROUP(71, lpass_slimbus0, cdc_pdm0, _, _, _, _, _, _, _),
+ PINGROUP(72, lpass_slimbus1, cdc_pdm0, _, _, _, _, _, _, _),
+ PINGROUP(73, cdc_pdm0, _, _, _, _, _, _, _, _),
+ PINGROUP(74, cdc_pdm0, _, _, _, _, _, _, _, _),
+ PINGROUP(75, wcss_bt, atest_char2, _, ebi_ch0, _, _, _, _, _),
+ PINGROUP(76, wcss_wlan2, _, _, _, _, _, _, _, _),
+ PINGROUP(77, wcss_wlan1, _, _, _, _, _, _, _, _),
+ PINGROUP(78, wcss_wlan0, _, _, _, _, _, _, _, _),
+ PINGROUP(79, wcss_wlan, _, _, _, _, _, _, _, _),
+ PINGROUP(80, wcss_wlan, _, _, _, _, _, _, _, _),
+ PINGROUP(81, wcss_fm, ext_lpass, _, _, _, _, _, _, _),
+ PINGROUP(82, wcss_fm, cri_trng, _, _, _, _, _, _, _),
+ PINGROUP(83, wcss_bt, cri_trng1, _, _, _, _, _, _, _),
+ PINGROUP(84, wcss_bt, cri_trng0, _, _, _, _, _, _, _),
+ PINGROUP(85, pri_mi2s, blsp_spi7, blsp_uart7, _, _, _, _, _, _),
+ PINGROUP(86, pri_mi2s, blsp_spi7, blsp_uart7, qdss_tracedata_b, _, _,
+ _, _, _),
+ PINGROUP(87, pri_mi2s_ws, blsp_spi7, blsp_uart7, blsp_i2c7,
+ qdss_tracedata_b, gcc_tlmm, _, _, _),
+ PINGROUP(88, pri_mi2s, blsp_spi7, blsp_uart7, blsp_i2c7, _, _, _,
+ _, _),
+ PINGROUP(89, dmic0_clk, _, _, _, _, _, _, _, _),
+ PINGROUP(90, dmic0_data, _, _, _, _, _, _, _, _),
+ PINGROUP(91, _, _, _, _, _, qdss_cti_trig_in_a1, _, _, _),
+ PINGROUP(92, _, _, _, _, _, qdss_tracedata_b, _, _, _),
+ PINGROUP(93, _, _, _, _, _, _, _, _, _),
+ PINGROUP(94, wsa_io, sec_mi2s, pri_mi2s, _, _, _, _, _, _),
+ PINGROUP(95, wsa_io, sec_mi2s, pri_mi2s, _, _, _, _, _, _),
+ PINGROUP(96, blsp_spi8, blsp_uart8, _, _, _, _, _, _, _),
+ PINGROUP(97, blsp_spi8, blsp_uart8, _, _, _, _, _, _, _),
+ PINGROUP(98, blsp_spi8, blsp_uart8, blsp_i2c8, gcc_plltest, _, _, _,
+ _, _),
+ PINGROUP(99, blsp_spi8, blsp_uart8, blsp_i2c8, gcc_plltest, _, _, _,
+ _, _),
+ PINGROUP(100, _, _, _, _, _, _, _, _, _),
+ PINGROUP(101, _, _, _, _, _, _, _, _, _),
+ PINGROUP(102, _, _, _, _, _, _, _, _, _),
+ PINGROUP(103, _, _, _, _, _, _, _, _, _),
+ PINGROUP(104, _, _, _, _, _, _, _, _, _),
+ PINGROUP(105, _, _, _, _, _, _, _, _, _),
+ PINGROUP(106, _, _, _, _, _, _, _, _, _),
+ PINGROUP(107, _, _, _, _, _, _, _, _, _),
+ PINGROUP(108, _, _, _, _, _, _, _, _, _),
+ PINGROUP(109, _, _, _, _, _, _, _, _, _),
+ PINGROUP(110, _, _, _, _, _, _, _, _, _),
+ PINGROUP(111, _, _, _, _, _, _, _, _, _),
+ PINGROUP(112, _, _, _, _, _, _, _, _, _),
+ PINGROUP(113, _, _, _, _, _, _, _, _, _),
+ PINGROUP(114, _, _, _, _, _, _, _, _, _),
+ PINGROUP(115, _, _, nav_pps_in_a, _, atest_combodac_to_gpio_native,
+ _, _, _, _),
+ PINGROUP(116, _, pa_indicator, _, _, _, _, _, _, _),
+ PINGROUP(117, _, modem_tsync, nav_tsync, nav_pps_in_b, nav_pps, _,
+ _, _, _),
+ PINGROUP(118, _, ebi_cdc, _, _, _, _, _, _, _),
+ PINGROUP(119, gsm0_tx, _, ebi_cdc, _, _, _, _, _, _),
+ PINGROUP(120, _, atest_char, ebi_cdc, _, atest_tsens, _, _, _, _),
+ PINGROUP(121, _, _, _, bimc_dte1, _, _, _, _, _),
+ PINGROUP(122, _, ssbi_wtr1, _, _, bimc_dte1, _, _, _, _),
+ PINGROUP(123, _, ssbi_wtr1, ebi_cdc, _, _, _, _, _, _),
+ PINGROUP(124, coex_uart, _, _, _, _, _, _, _, _),
+ PINGROUP(125, _, _, _, _, _, _, _, _, _),
+ PINGROUP(126, _, _, _, _, _, _, _, _, _),
+ PINGROUP(127, coex_uart, _, _, _, _, _, _, _, _),
+ PINGROUP(128, _, _, _, _, _, _, _, _, _),
+ PINGROUP(129, _, _, _, _, _, _, _, _, _),
+ PINGROUP(130, blsp8_spi, _, _, _, _, _, _, _, _),
+ PINGROUP(131, _, _, _, _, _, _, _, _, _),
+ PINGROUP(132, qdss_cti_trig_out_a0, _, _, _, _, _, _, _, _),
+ PINGROUP(133, qdss_cti_trig_out_a1, _, _, _, _, _, _, _, _),
+ SDC_PINGROUP(sdc1_clk, 0x10a000, 13, 6),
+ SDC_PINGROUP(sdc1_cmd, 0x10a000, 11, 3),
+ SDC_PINGROUP(sdc1_data, 0x10a000, 9, 0),
+ SDC_PINGROUP(sdc1_rclk, 0x10a000, 15, 0),
+ SDC_PINGROUP(sdc2_clk, 0x109000, 14, 6),
+ SDC_PINGROUP(sdc2_cmd, 0x109000, 11, 3),
+ SDC_PINGROUP(sdc2_data, 0x109000, 9, 0),
+ SDC_PINGROUP(qdsd_clk, 0x19c000, 3, 0),
+ SDC_PINGROUP(qdsd_cmd, 0x19c000, 8, 5),
+ SDC_PINGROUP(qdsd_data0, 0x19c000, 13, 10),
+ SDC_PINGROUP(qdsd_data1, 0x19c000, 18, 15),
+ SDC_PINGROUP(qdsd_data2, 0x19c000, 23, 20),
+ SDC_PINGROUP(qdsd_data3, 0x19c000, 28, 25),
+};
+
+static const struct msm_pinctrl_soc_data msm8917_pinctrl = {
+ .pins = msm8917_pins,
+ .npins = ARRAY_SIZE(msm8917_pins),
+ .functions = msm8917_functions,
+ .nfunctions = ARRAY_SIZE(msm8917_functions),
+ .groups = msm8917_groups,
+ .ngroups = ARRAY_SIZE(msm8917_groups),
+ .ngpios = 134,
+};
+
+static int msm8917_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &msm8917_pinctrl);
+}
+
+static const struct of_device_id msm8917_pinctrl_of_match[] = {
+ { .compatible = "qcom,msm8917-pinctrl", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, msm8917_pinctrl_of_match);
+
+static struct platform_driver msm8917_pinctrl_driver = {
+ .driver = {
+ .name = "msm8917-pinctrl",
+ .of_match_table = msm8917_pinctrl_of_match,
+ },
+ .probe = msm8917_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init msm8917_pinctrl_init(void)
+{
+ return platform_driver_register(&msm8917_pinctrl_driver);
+}
+arch_initcall(msm8917_pinctrl_init);
+
+static void __exit msm8917_pinctrl_exit(void)
+{
+ platform_driver_unregister(&msm8917_pinctrl_driver);
+}
+module_exit(msm8917_pinctrl_exit);
+
+MODULE_DESCRIPTION("Qualcomm msm8917 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 0c806b8128b6..c8ce61066070 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -14,6 +14,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spmi.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include <linux/pinctrl/pinconf-generic.h>
@@ -702,7 +703,7 @@ static void pmic_gpio_config_dbg_show(struct pinctrl_dev *pctldev,
else
seq_printf(s, " %-4s",
pad->output_enabled ? "out" : "in");
- seq_printf(s, " %-4s", pad->out_value ? "high" : "low");
+ seq_printf(s, " %-4s", str_high_low(pad->out_value));
seq_printf(s, " %-7s", pmic_gpio_functions[function]);
seq_printf(s, " vin-%d", pad->power_source);
seq_printf(s, " %-27s", biases[pad->pullup]);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 84de584cf7eb..7b28c5fb2402 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -11,6 +11,7 @@
#include <linux/regmap.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include <linux/pinctrl/pinconf-generic.h>
@@ -544,7 +545,7 @@ static void pmic_mpp_config_dbg_show(struct pinctrl_dev *pctldev,
seq_printf(s, " %d", pad->aout_level);
if (pad->has_pullup)
seq_printf(s, " %-8s", biases[pad->pullup]);
- seq_printf(s, " %-4s", pad->out_value ? "high" : "low");
+ seq_printf(s, " %-4s", str_high_low(pad->out_value));
if (pad->dtest)
seq_printf(s, " dtest%d", pad->dtest);
if (pad->paired)
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index 2225dc49d477..82679417e25f 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -13,6 +13,7 @@
#include <linux/regmap.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
@@ -569,7 +570,7 @@ static void pm8xxx_gpio_dbg_show_one(struct seq_file *s,
seq_printf(s, " VIN%d", pin->power_source);
seq_printf(s, " %-27s", biases[pin->bias]);
seq_printf(s, " %-10s", buffer_types[pin->open_drain]);
- seq_printf(s, " %-4s", pin->output_value ? "high" : "low");
+ seq_printf(s, " %-4s", str_high_low(pin->output_value));
seq_printf(s, " %-7s", strengths[pin->output_strength]);
if (pin->inverted)
seq_puts(s, " inverted");
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 9b1039c08aa6..4841bbfe4864 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -13,6 +13,7 @@
#include <linux/regmap.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
@@ -576,8 +577,7 @@ static void pm8xxx_mpp_dbg_show_one(struct seq_file *s,
seq_puts(s, "out ");
if (!pin->paired) {
- seq_puts(s, pin->output_value ?
- "high" : "low");
+ seq_puts(s, str_high_low(pin->output_value));
} else {
seq_puts(s, pin->output_value ?
"inverted" : "follow");
@@ -589,8 +589,7 @@ static void pm8xxx_mpp_dbg_show_one(struct seq_file *s,
if (pin->output) {
seq_printf(s, "out %s ", aout_lvls[pin->aout_level]);
if (!pin->paired) {
- seq_puts(s, pin->output_value ?
- "high" : "low");
+ seq_puts(s, str_high_low(pin->output_value));
} else {
seq_puts(s, pin->output_value ?
"inverted" : "follow");
@@ -605,8 +604,7 @@ static void pm8xxx_mpp_dbg_show_one(struct seq_file *s,
seq_printf(s, "dtest%d", pin->dtest);
} else {
if (!pin->paired) {
- seq_puts(s, pin->output_value ?
- "high" : "low");
+ seq_puts(s, str_high_low(pin->output_value));
} else {
seq_puts(s, pin->output_value ?
"inverted" : "follow");
diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig
index 7f3f41c7fe54..3c18d908b21e 100644
--- a/drivers/pinctrl/renesas/Kconfig
+++ b/drivers/pinctrl/renesas/Kconfig
@@ -41,6 +41,7 @@ config PINCTRL_RENESAS
select PINCTRL_PFC_R8A779H0 if ARCH_R8A779H0
select PINCTRL_RZG2L if ARCH_RZG2L
select PINCTRL_RZV2M if ARCH_R9A09G011
+ select PINCTRL_RZG2L if ARCH_R9A09G047
select PINCTRL_RZG2L if ARCH_R9A09G057
select PINCTRL_PFC_SH7203 if CPU_SUBTYPE_SH7203
select PINCTRL_PFC_SH7264 if CPU_SUBTYPE_SH7264
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index 1df9cec2873f..ce4a07a3df49 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -26,6 +26,8 @@
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
+#include <dt-bindings/pinctrl/renesas,r9a09g047-pinctrl.h>
+#include <dt-bindings/pinctrl/renesas,r9a09g057-pinctrl.h>
#include <dt-bindings/pinctrl/rzg2l-pinctrl.h>
#include "../core.h"
@@ -157,7 +159,7 @@
#define PWPR_REGWE_B BIT(5) /* OEN Register Write Enable, known only in RZ/V2H(P) */
#define PM_MASK 0x03
-#define PFC_MASK 0x07
+#define PFC_MASK 0x0f
#define IEN_MASK 0x01
#define IOLH_MASK 0x03
#define SR_MASK 0x01
@@ -381,13 +383,51 @@ static u64 rzg2l_pinctrl_get_variable_pin_cfg(struct rzg2l_pinctrl *pctrl,
return 0;
}
+static const u64 r9a09g047_variable_pin_cfg[] = {
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 0, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 1, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 2, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 3, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 4, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 5, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 6, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 7, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 0, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 1, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 2, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 3, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 4, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 5, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 6, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 7, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 0, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 1, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 2, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 3, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 4, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 5, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 6, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 7, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PH, 0, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PH, 1, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PH, 2, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PH, 3, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PH, 4, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PH, 5, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PJ, 0, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PJ, 1, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PJ, 2, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PJ, 3, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PJ, 4, RZV2H_MPXED_PIN_FUNCS),
+};
+
static const u64 r9a09g057_variable_pin_cfg[] = {
- RZG2L_VARIABLE_PIN_CFG_PACK(11, 0, RZV2H_MPXED_PIN_FUNCS),
- RZG2L_VARIABLE_PIN_CFG_PACK(11, 1, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
- RZG2L_VARIABLE_PIN_CFG_PACK(11, 2, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
- RZG2L_VARIABLE_PIN_CFG_PACK(11, 3, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
- RZG2L_VARIABLE_PIN_CFG_PACK(11, 4, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
- RZG2L_VARIABLE_PIN_CFG_PACK(11, 5, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZV2H_PB, 0, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZV2H_PB, 1, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZV2H_PB, 2, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZV2H_PB, 3, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZV2H_PB, 4, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZV2H_PB, 5, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
};
#ifdef CONFIG_RISCV
@@ -1962,6 +2002,73 @@ static const u64 r9a08g045_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(6, 0x2a, RZG3S_MPXED_PIN_FUNCS(A)), /* P18 */
};
+static const char * const rzg3e_gpio_names[] = {
+ "P00", "P01", "P02", "P03", "P04", "P05", "P06", "P07",
+ "P10", "P11", "P12", "P13", "P14", "P15", "P16", "P17",
+ "P20", "P21", "P22", "P23", "P24", "P25", "P26", "P27",
+ "P30", "P31", "P32", "P33", "P34", "P35", "P36", "P37",
+ "P40", "P41", "P42", "P43", "P44", "P45", "P46", "P47",
+ "P50", "P51", "P52", "P53", "P54", "P55", "P56", "P57",
+ "P60", "P61", "P62", "P63", "P64", "P65", "P66", "P67",
+ "P70", "P71", "P72", "P73", "P74", "P75", "P76", "P77",
+ "P80", "P81", "P82", "P83", "P84", "P85", "P86", "P87",
+ "", "", "", "", "", "", "", "",
+ "PA0", "PA1", "PA2", "PA3", "PA4", "PA5", "PA6", "PA7",
+ "PB0", "PB1", "PB2", "PB3", "PB4", "PB5", "PB6", "PB7",
+ "PC0", "PC1", "PC2", "PC3", "PC4", "PC5", "PC6", "PC7",
+ "PD0", "PD1", "PD2", "PD3", "PD4", "PD5", "PD6", "PD7",
+ "PE0", "PE1", "PE2", "PE3", "PE4", "PE5", "PE6", "PE7",
+ "PF0", "PF1", "PF2", "PF3", "PF4", "PF5", "PF6", "PF7",
+ "PG0", "PG1", "PG2", "PG3", "PG4", "PG5", "PG6", "PG7",
+ "PH0", "PH1", "PH2", "PH3", "PH4", "PH5", "PH6", "PH7",
+ "", "", "", "", "", "", "", "",
+ "PJ0", "PJ1", "PJ2", "PJ3", "PJ4", "PJ5", "PJ6", "PJ7",
+ "PK0", "PK1", "PK2", "PK3", "PK4", "PK5", "PK6", "PK7",
+ "PL0", "PL1", "PL2", "PL3", "PL4", "PL5", "PL6", "PL7",
+ "PM0", "PM1", "PM2", "PM3", "PM4", "PM5", "PM6", "PM7",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "PS0", "PS1", "PS2", "PS3", "PS4", "PS5", "PS6", "PS7",
+};
+
+static const u64 r9a09g047_gpio_configs[] = {
+ RZG2L_GPIO_PORT_PACK(8, 0x20, RZV2H_MPXED_PIN_FUNCS), /* P0 */
+ RZG2L_GPIO_PORT_PACK(8, 0x21, RZV2H_MPXED_PIN_FUNCS |
+ PIN_CFG_ELC), /* P1 */
+ RZG2L_GPIO_PORT_PACK(2, 0x22, RZG2L_MPXED_COMMON_PIN_FUNCS(RZV2H) |
+ PIN_CFG_NOD), /* P2 */
+ RZG2L_GPIO_PORT_PACK(8, 0x23, RZV2H_MPXED_PIN_FUNCS), /* P3 */
+ RZG2L_GPIO_PORT_PACK(6, 0x24, RZV2H_MPXED_PIN_FUNCS), /* P4 */
+ RZG2L_GPIO_PORT_PACK(7, 0x25, RZV2H_MPXED_PIN_FUNCS), /* P5 */
+ RZG2L_GPIO_PORT_PACK(7, 0x26, RZV2H_MPXED_PIN_FUNCS), /* P6 */
+ RZG2L_GPIO_PORT_PACK(8, 0x27, RZV2H_MPXED_PIN_FUNCS |
+ PIN_CFG_ELC), /* P7 */
+ RZG2L_GPIO_PORT_PACK(6, 0x28, RZV2H_MPXED_PIN_FUNCS), /* P8 */
+ 0x0,
+ RZG2L_GPIO_PORT_PACK_VARIABLE(8, 0x2a), /* PA */
+ RZG2L_GPIO_PORT_PACK(8, 0x2b, RZV2H_MPXED_PIN_FUNCS), /* PB */
+ RZG2L_GPIO_PORT_PACK(3, 0x2c, RZV2H_MPXED_PIN_FUNCS), /* PC */
+ RZG2L_GPIO_PORT_PACK_VARIABLE(8, 0x2d), /* PD */
+ RZG2L_GPIO_PORT_PACK(8, 0x2e, RZV2H_MPXED_PIN_FUNCS), /* PE */
+ RZG2L_GPIO_PORT_PACK(3, 0x2f, RZV2H_MPXED_PIN_FUNCS), /* PF */
+ RZG2L_GPIO_PORT_PACK_VARIABLE(8, 0x30), /* PG */
+ RZG2L_GPIO_PORT_PACK_VARIABLE(6, 0x31), /* PH */
+ 0x0,
+ RZG2L_GPIO_PORT_PACK_VARIABLE(5, 0x33), /* PJ */
+ RZG2L_GPIO_PORT_PACK(4, 0x34, RZV2H_MPXED_PIN_FUNCS), /* PK */
+ RZG2L_GPIO_PORT_PACK(8, 0x35, RZV2H_MPXED_PIN_FUNCS), /* PL */
+ RZG2L_GPIO_PORT_PACK(8, 0x36, RZV2H_MPXED_PIN_FUNCS), /* PM */
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ RZG2L_GPIO_PORT_PACK(4, 0x3c, RZV2H_MPXED_PIN_FUNCS), /* PS */
+};
+
static const char * const rzv2h_gpio_names[] = {
"P00", "P01", "P02", "P03", "P04", "P05", "P06", "P07",
"P10", "P11", "P12", "P13", "P14", "P15", "P16", "P17",
@@ -2085,6 +2192,8 @@ static const struct rzg2l_dedicated_configs rzg3s_dedicated_pins[] = {
{ "TMS/SWDIO", RZG2L_SINGLE_PIN_PACK(0x1, 0, (PIN_CFG_IOLH_A | PIN_CFG_IEN |
PIN_CFG_SOFT_PS)) },
{ "TDO", RZG2L_SINGLE_PIN_PACK(0x1, 1, (PIN_CFG_IOLH_A | PIN_CFG_SOFT_PS)) },
+ { "AUDIO_CLK1", RZG2L_SINGLE_PIN_PACK(0x2, 0, PIN_CFG_IEN) },
+ { "AUDIO_CLK2", RZG2L_SINGLE_PIN_PACK(0x2, 1, PIN_CFG_IEN) },
{ "WDTOVF_PERROUT#", RZG2L_SINGLE_PIN_PACK(0x6, 0, PIN_CFG_IOLH_A | PIN_CFG_SOFT_PS) },
{ "SD0_CLK", RZG2L_SINGLE_PIN_PACK(0x10, 0, (PIN_CFG_IOLH_B | PIN_CFG_IO_VMC_SD0)) },
{ "SD0_CMD", RZG2L_SINGLE_PIN_PACK(0x10, 1, (PIN_CFG_IOLH_B | PIN_CFG_IEN |
@@ -2250,6 +2359,43 @@ static struct rzg2l_dedicated_configs rzv2h_dedicated_pins[] = {
{ "ET1_RXD3", RZG2L_SINGLE_PIN_PACK(0x14, 7, (PIN_CFG_PUPD)) },
};
+static struct rzg2l_dedicated_configs rzg3e_dedicated_pins[] = {
+ { "WDTUDFCA", RZG2L_SINGLE_PIN_PACK(0x5, 0,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_PUPD | PIN_CFG_NOD)) },
+ { "WDTUDFCM", RZG2L_SINGLE_PIN_PACK(0x5, 1,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_PUPD | PIN_CFG_NOD)) },
+ { "SCIF_RXD", RZG2L_SINGLE_PIN_PACK(0x6, 0,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_PUPD)) },
+ { "SCIF_TXD", RZG2L_SINGLE_PIN_PACK(0x6, 1,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_PUPD)) },
+ { "SD0CLK", RZG2L_SINGLE_PIN_PACK(0x9, 0,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "SD0CMD", RZG2L_SINGLE_PIN_PACK(0x9, 1,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0RSTN", RZG2L_SINGLE_PIN_PACK(0x9, 2,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "SD0PWEN", RZG2L_SINGLE_PIN_PACK(0x9, 3,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "SD0IOVS", RZG2L_SINGLE_PIN_PACK(0x9, 4,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "SD0DAT0", RZG2L_SINGLE_PIN_PACK(0xa, 0,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT1", RZG2L_SINGLE_PIN_PACK(0xa, 1,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT2", RZG2L_SINGLE_PIN_PACK(0xa, 2,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT3", RZG2L_SINGLE_PIN_PACK(0xa, 3,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT4", RZG2L_SINGLE_PIN_PACK(0xa, 4,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT5", RZG2L_SINGLE_PIN_PACK(0xa, 5,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT6", RZG2L_SINGLE_PIN_PACK(0xa, 6,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT7", RZG2L_SINGLE_PIN_PACK(0xa, 7,
+ (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_PUPD)) },
+};
+
static int rzg2l_gpio_get_gpioint(unsigned int virq, struct rzg2l_pinctrl *pctrl)
{
const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[virq];
@@ -2760,6 +2906,9 @@ static int rzg2l_pinctrl_probe(struct platform_device *pdev)
BUILD_BUG_ON(ARRAY_SIZE(r9a08g045_gpio_configs) * RZG2L_PINS_PER_PORT >
ARRAY_SIZE(rzg2l_gpio_names));
+ BUILD_BUG_ON(ARRAY_SIZE(r9a09g047_gpio_configs) * RZG2L_PINS_PER_PORT >
+ ARRAY_SIZE(rzg3e_gpio_names));
+
BUILD_BUG_ON(ARRAY_SIZE(r9a09g057_gpio_configs) * RZG2L_PINS_PER_PORT >
ARRAY_SIZE(rzv2h_gpio_names));
@@ -3158,6 +3307,29 @@ static struct rzg2l_pinctrl_data r9a08g045_data = {
.bias_param_to_hw = &rzg2l_bias_param_to_hw,
};
+static struct rzg2l_pinctrl_data r9a09g047_data = {
+ .port_pins = rzg3e_gpio_names,
+ .port_pin_configs = r9a09g047_gpio_configs,
+ .n_ports = ARRAY_SIZE(r9a09g047_gpio_configs),
+ .dedicated_pins = rzg3e_dedicated_pins,
+ .n_port_pins = ARRAY_SIZE(r9a09g047_gpio_configs) * RZG2L_PINS_PER_PORT,
+ .n_dedicated_pins = ARRAY_SIZE(rzg3e_dedicated_pins),
+ .hwcfg = &rzv2h_hwcfg,
+ .variable_pin_cfg = r9a09g047_variable_pin_cfg,
+ .n_variable_pin_cfg = ARRAY_SIZE(r9a09g047_variable_pin_cfg),
+ .num_custom_params = ARRAY_SIZE(renesas_rzv2h_custom_bindings),
+ .custom_params = renesas_rzv2h_custom_bindings,
+#ifdef CONFIG_DEBUG_FS
+ .custom_conf_items = renesas_rzv2h_conf_items,
+#endif
+ .pwpr_pfc_lock_unlock = &rzv2h_pwpr_pfc_lock_unlock,
+ .pmc_writeb = &rzv2h_pmc_writeb,
+ .oen_read = &rzv2h_oen_read,
+ .oen_write = &rzv2h_oen_write,
+ .hw_to_bias_param = &rzv2h_hw_to_bias_param,
+ .bias_param_to_hw = &rzv2h_bias_param_to_hw,
+};
+
static struct rzg2l_pinctrl_data r9a09g057_data = {
.port_pins = rzv2h_gpio_names,
.port_pin_configs = r9a09g057_gpio_configs,
@@ -3195,6 +3367,10 @@ static const struct of_device_id rzg2l_pinctrl_of_table[] = {
.data = &r9a08g045_data,
},
{
+ .compatible = "renesas,r9a09g047-pinctrl",
+ .data = &r9a09g047_data,
+ },
+ {
.compatible = "renesas,r9a09g057-pinctrl",
.data = &r9a09g057_data,
},
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index b79c211c0374..42093bae8bb7 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -23,6 +23,7 @@
#include <linux/of_irq.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/regmap.h>
#include <linux/err.h>
#include <linux/soc/samsung/exynos-pmu.h>
@@ -442,7 +443,7 @@ static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
unsigned long bit = 1UL << (2 * bank->eint_offset + irqd->hwirq);
- pr_info("wake %s for irq %u (%s-%lu)\n", on ? "enabled" : "disabled",
+ pr_info("wake %s for irq %u (%s-%lu)\n", str_enabled_disabled(on),
irqd->irq, bank->name, irqd->hwirq);
if (!on)
@@ -636,7 +637,7 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
if (clk_enable(b->drvdata->pclk)) {
dev_err(b->gpio_chip.parent,
"unable to enable clock for pending IRQs\n");
- return;
+ goto out;
}
}
@@ -652,6 +653,7 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
if (eintd->nr_banks)
clk_disable(eintd->banks[0]->drvdata->pclk);
+out:
chained_irq_exit(chip, desc);
}
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index bbedd980ec67..cfced7afd4ca 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -1172,7 +1172,7 @@ static void samsung_banks_node_get(struct device *dev, struct samsung_pinctrl_dr
else
dev_warn(dev, "Missing node for bank %s - invalid DTB\n",
bank->name);
- /* child reference dropped in samsung_drop_banks_of_node() */
+ /* child reference dropped in samsung_banks_node_put() */
}
}
@@ -1272,7 +1272,7 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
ret = platform_get_irq_optional(pdev, 0);
if (ret < 0 && ret != -ENXIO)
- return ret;
+ goto err_put_banks;
if (ret > 0)
drvdata->irq = ret;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 5b7fa77c1184..cc0b4d1d7cff 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -22,6 +22,7 @@
#include <linux/reset.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/machine.h>
@@ -86,7 +87,6 @@ struct stm32_pinctrl_group {
struct stm32_gpio_bank {
void __iomem *base;
- struct clk *clk;
struct reset_control *rstc;
spinlock_t lock;
struct gpio_chip gpio_chip;
@@ -108,6 +108,7 @@ struct stm32_pinctrl {
unsigned ngroups;
const char **grp_names;
struct stm32_gpio_bank *banks;
+ struct clk_bulk_data *clks;
unsigned nbanks;
const struct stm32_pinctrl_match_data *match_data;
struct irq_domain *domain;
@@ -1217,7 +1218,7 @@ static void stm32_pconf_dbg_show(struct pinctrl_dev *pctldev,
case 0:
val = stm32_pconf_get(bank, offset, true);
seq_printf(s, "- %s - %s",
- val ? "high" : "low",
+ str_high_low(val),
biasing[bias]);
break;
@@ -1227,7 +1228,7 @@ static void stm32_pconf_dbg_show(struct pinctrl_dev *pctldev,
speed = stm32_pconf_get_speed(bank, offset);
val = stm32_pconf_get(bank, offset, false);
seq_printf(s, "- %s - %s - %s - %s %s",
- val ? "high" : "low",
+ str_high_low(val),
drive ? "open drain" : "push pull",
biasing[bias],
speeds[speed], "speed");
@@ -1308,12 +1309,6 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
if (IS_ERR(bank->base))
return PTR_ERR(bank->base);
- err = clk_prepare_enable(bank->clk);
- if (err) {
- dev_err(dev, "failed to prepare_enable clk (%d)\n", err);
- return err;
- }
-
bank->gpio_chip = stm32_gpio_template;
fwnode_property_read_string(fwnode, "st,bank-name", &bank->gpio_chip.label);
@@ -1360,26 +1355,20 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
bank->fwnode, &stm32_gpio_domain_ops,
bank);
- if (!bank->domain) {
- err = -ENODEV;
- goto err_clk;
- }
+ if (!bank->domain)
+ return -ENODEV;
}
names = devm_kcalloc(dev, npins, sizeof(char *), GFP_KERNEL);
- if (!names) {
- err = -ENOMEM;
- goto err_clk;
- }
+ if (!names)
+ return -ENOMEM;
for (i = 0; i < npins; i++) {
stm32_pin = stm32_pctrl_get_desc_pin_from_gpio(pctl, bank, i);
if (stm32_pin && stm32_pin->pin.name) {
names[i] = devm_kasprintf(dev, GFP_KERNEL, "%s", stm32_pin->pin.name);
- if (!names[i]) {
- err = -ENOMEM;
- goto err_clk;
- }
+ if (!names[i])
+ return -ENOMEM;
} else {
names[i] = NULL;
}
@@ -1390,15 +1379,11 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
err = gpiochip_add_data(&bank->gpio_chip, bank);
if (err) {
dev_err(dev, "Failed to add gpiochip(%d)!\n", bank_nr);
- goto err_clk;
+ return err;
}
dev_info(dev, "%s bank added\n", bank->gpio_chip.label);
return 0;
-
-err_clk:
- clk_disable_unprepare(bank->clk);
- return err;
}
static struct irq_domain *stm32_pctrl_get_irq_domain(struct platform_device *pdev)
@@ -1621,6 +1606,11 @@ int stm32_pctl_probe(struct platform_device *pdev)
if (!pctl->banks)
return -ENOMEM;
+ pctl->clks = devm_kcalloc(dev, banks, sizeof(*pctl->clks),
+ GFP_KERNEL);
+ if (!pctl->clks)
+ return -ENOMEM;
+
i = 0;
for_each_gpiochip_node(dev, child) {
struct stm32_gpio_bank *bank = &pctl->banks[i];
@@ -1632,24 +1622,27 @@ int stm32_pctl_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- bank->clk = of_clk_get_by_name(np, NULL);
- if (IS_ERR(bank->clk)) {
+ pctl->clks[i].clk = of_clk_get_by_name(np, NULL);
+ if (IS_ERR(pctl->clks[i].clk)) {
fwnode_handle_put(child);
- return dev_err_probe(dev, PTR_ERR(bank->clk),
+ return dev_err_probe(dev, PTR_ERR(pctl->clks[i].clk),
"failed to get clk\n");
}
+ pctl->clks[i].id = "pctl";
i++;
}
+ ret = clk_bulk_prepare_enable(banks, pctl->clks);
+ if (ret) {
+ dev_err(dev, "failed to prepare_enable clk (%d)\n", ret);
+ return ret;
+ }
+
for_each_gpiochip_node(dev, child) {
ret = stm32_gpiolib_register_bank(pctl, child);
if (ret) {
fwnode_handle_put(child);
-
- for (i = 0; i < pctl->nbanks; i++)
- clk_disable_unprepare(pctl->banks[i].clk);
-
- return ret;
+ goto err_register;
}
pctl->nbanks++;
@@ -1658,6 +1651,15 @@ int stm32_pctl_probe(struct platform_device *pdev)
dev_info(dev, "Pinctrl STM32 initialized\n");
return 0;
+err_register:
+ for (i = 0; i < pctl->nbanks; i++) {
+ struct stm32_gpio_bank *bank = &pctl->banks[i];
+
+ gpiochip_remove(&bank->gpio_chip);
+ }
+
+ clk_bulk_disable_unprepare(banks, pctl->clks);
+ return ret;
}
static int __maybe_unused stm32_pinctrl_restore_gpio_regs(
@@ -1726,10 +1728,8 @@ static int __maybe_unused stm32_pinctrl_restore_gpio_regs(
int __maybe_unused stm32_pinctrl_suspend(struct device *dev)
{
struct stm32_pinctrl *pctl = dev_get_drvdata(dev);
- int i;
- for (i = 0; i < pctl->nbanks; i++)
- clk_disable(pctl->banks[i].clk);
+ clk_bulk_disable(pctl->nbanks, pctl->clks);
return 0;
}
@@ -1738,10 +1738,11 @@ int __maybe_unused stm32_pinctrl_resume(struct device *dev)
{
struct stm32_pinctrl *pctl = dev_get_drvdata(dev);
struct stm32_pinctrl_group *g = pctl->groups;
- int i;
+ int i, ret;
- for (i = 0; i < pctl->nbanks; i++)
- clk_enable(pctl->banks[i].clk);
+ ret = clk_bulk_enable(pctl->nbanks, pctl->clks);
+ if (ret)
+ return ret;
for (i = 0; i < pctl->ngroups; i++, g++)
stm32_pinctrl_restore_gpio_regs(pctl, g->pin);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
index df90c75fb3c5..b97de80ae2f3 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
@@ -256,72 +256,84 @@ static const struct sunxi_desc_pin a100_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D12 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* D3P */
SUNXI_FUNCTION(0x4, "dsi0"), /* DP3 */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 8)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 9),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D13 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* D3N */
SUNXI_FUNCTION(0x4, "dsi0"), /* DM3 */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 9)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D14 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* D0P */
SUNXI_FUNCTION(0x4, "spi1"), /* CS */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 10)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D15 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* D0N */
SUNXI_FUNCTION(0x4, "spi1"), /* CLK */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D18 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* D1P */
SUNXI_FUNCTION(0x4, "spi1"), /* MOSI */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 12)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D19 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* D1N */
SUNXI_FUNCTION(0x4, "spi1"), /* MISO */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 13)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D20 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* D2P */
SUNXI_FUNCTION(0x4, "uart3"), /* TX */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 14)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D21 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* D2N */
SUNXI_FUNCTION(0x4, "uart3"), /* RX */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 15)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 16),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D22 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* CKP */
SUNXI_FUNCTION(0x4, "uart3"), /* RTS */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 16)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 17),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D23 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* CKN */
SUNXI_FUNCTION(0x4, "uart3"), /* CTS */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 17)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* D3P */
SUNXI_FUNCTION(0x4, "uart4"), /* TX */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 18)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* DE */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* D3N */
SUNXI_FUNCTION(0x4, "uart4"), /* RX */
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 19)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20),
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index b7dbaf77b6db..1b2f2bd09662 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -237,12 +237,19 @@ config CROS_EC_SYSFS
To compile this driver as a module, choose M here: the
module will be called cros_ec_sysfs.
+config CROS_EC_TYPEC_ALTMODES
+ bool
+ help
+ Selectable symbol to enable altmodes.
+
config CROS_EC_TYPEC
tristate "ChromeOS EC Type-C Connector Control"
depends on MFD_CROS_EC_DEV && TYPEC
depends on CROS_USBPD_NOTIFY
depends on USB_ROLE_SWITCH
default MFD_CROS_EC_DEV
+ select CROS_EC_TYPEC_ALTMODES if TYPEC_DP_ALTMODE
+ select CROS_EC_TYPEC_ALTMODES if TYPEC_TBT_ALTMODE
help
If you say Y here, you get support for accessing Type C connector
information from the Chrome OS EC.
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index fb8335458a22..1a5a484563cc 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -19,7 +19,11 @@ obj-$(CONFIG_CROS_EC_SPI) += cros_ec_spi.o
obj-$(CONFIG_CROS_EC_UART) += cros_ec_uart.o
cros_ec_lpcs-objs := cros_ec_lpc.o cros_ec_lpc_mec.o
cros-ec-typec-objs := cros_ec_typec.o cros_typec_vdm.o
+ifneq ($(CONFIG_CROS_EC_TYPEC_ALTMODES),)
+ cros-ec-typec-objs += cros_typec_altmode.o
+endif
obj-$(CONFIG_CROS_EC_TYPEC) += cros-ec-typec.o
+
obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpcs.o
obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o cros_ec_trace.o
obj-$(CONFIG_CROS_KBD_LED_BACKLIGHT) += cros_kbd_led_backlight.o
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index ae2f86296954..6ee182101bc9 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -18,6 +18,7 @@
#include "cros_ec_typec.h"
#include "cros_typec_vdm.h"
+#include "cros_typec_altmode.h"
#define DRV_NAME "cros-ec-typec"
@@ -290,30 +291,32 @@ static int cros_typec_register_port_altmodes(struct cros_typec_data *typec,
struct typec_altmode *amode;
/* All PD capable CrOS devices are assumed to support DP altmode. */
+ memset(&desc, 0, sizeof(desc));
desc.svid = USB_TYPEC_DP_SID;
desc.mode = USB_TYPEC_DP_MODE;
desc.vdo = DP_PORT_VDO;
- amode = typec_port_register_altmode(port->port, &desc);
+ amode = cros_typec_register_displayport(port, &desc,
+ typec->ap_driven_altmode);
if (IS_ERR(amode))
return PTR_ERR(amode);
port->port_altmode[CROS_EC_ALTMODE_DP] = amode;
- typec_altmode_set_drvdata(amode, port);
- amode->ops = &port_amode_ops;
/*
* Register TBT compatibility alt mode. The EC will not enter the mode
- * if it doesn't support it, so it's safe to register it unconditionally
- * here for now.
+ * if it doesn't support it and it will not enter automatically by
+ * design so we can use the |ap_driven_altmode| feature to check if we
+ * should register it.
*/
- memset(&desc, 0, sizeof(desc));
- desc.svid = USB_TYPEC_TBT_SID;
- desc.mode = TYPEC_ANY_MODE;
- amode = typec_port_register_altmode(port->port, &desc);
- if (IS_ERR(amode))
- return PTR_ERR(amode);
- port->port_altmode[CROS_EC_ALTMODE_TBT] = amode;
- typec_altmode_set_drvdata(amode, port);
- amode->ops = &port_amode_ops;
+ if (typec->ap_driven_altmode) {
+ memset(&desc, 0, sizeof(desc));
+ desc.svid = USB_TYPEC_TBT_SID;
+ desc.mode = TBT_MODE;
+ desc.inactive = true;
+ amode = cros_typec_register_thunderbolt(port, &desc);
+ if (IS_ERR(amode))
+ return PTR_ERR(amode);
+ port->port_altmode[CROS_EC_ALTMODE_TBT] = amode;
+ }
port->state.alt = NULL;
port->state.mode = TYPEC_STATE_USB;
@@ -576,6 +579,10 @@ static int cros_typec_enable_dp(struct cros_typec_data *typec,
if (!ret)
ret = typec_mux_set(port->mux, &port->state);
+ if (!ret)
+ ret = cros_typec_displayport_status_update(port->state.alt,
+ port->state.data);
+
return ret;
}
@@ -619,6 +626,7 @@ static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
};
struct ec_params_usb_pd_mux_ack mux_ack;
enum typec_orientation orientation;
+ struct cros_typec_altmode_node *node;
int ret;
ret = cros_ec_cmd(typec->ec, 0, EC_CMD_USB_PD_MUX_INFO,
@@ -677,6 +685,14 @@ static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
port->mux_flags);
}
+ /* Iterate all partner alt-modes and set the active alternate mode. */
+ list_for_each_entry(node, &port->partner_mode_list, list) {
+ typec_altmode_update_active(
+ node->amode,
+ port->state.alt &&
+ node->amode->svid == port->state.alt->svid);
+ }
+
mux_ack:
if (!typec->needs_mux_ack)
return ret;
@@ -1244,6 +1260,8 @@ static int cros_typec_probe(struct platform_device *pdev)
typec->typec_cmd_supported = cros_ec_check_features(ec_dev, EC_FEATURE_TYPEC_CMD);
typec->needs_mux_ack = cros_ec_check_features(ec_dev, EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK);
+ typec->ap_driven_altmode = cros_ec_check_features(
+ ec_dev, EC_FEATURE_TYPEC_REQUIRE_AP_MODE_ENTRY);
ret = cros_ec_cmd(typec->ec, 0, EC_CMD_USB_PD_PORTS, NULL, 0,
&resp, sizeof(resp));
diff --git a/drivers/platform/chrome/cros_ec_typec.h b/drivers/platform/chrome/cros_ec_typec.h
index deda180a646f..9fd5342bb0ad 100644
--- a/drivers/platform/chrome/cros_ec_typec.h
+++ b/drivers/platform/chrome/cros_ec_typec.h
@@ -39,6 +39,7 @@ struct cros_typec_data {
struct work_struct port_work;
bool typec_cmd_supported;
bool needs_mux_ack;
+ bool ap_driven_altmode;
};
/* Per port data. */
diff --git a/drivers/platform/chrome/cros_typec_altmode.c b/drivers/platform/chrome/cros_typec_altmode.c
new file mode 100644
index 000000000000..557340b53af0
--- /dev/null
+++ b/drivers/platform/chrome/cros_typec_altmode.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Alt-mode implementation on ChromeOS EC.
+ *
+ * Copyright 2024 Google LLC
+ * Author: Abhishek Pandit-Subedi <abhishekpandit@chromium.org>
+ */
+#include "cros_ec_typec.h"
+
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_tbt.h>
+#include <linux/usb/pd_vdo.h>
+
+#include "cros_typec_altmode.h"
+
+struct cros_typec_altmode_data {
+ struct work_struct work;
+ struct cros_typec_port *port;
+ struct typec_altmode *alt;
+ bool ap_mode_entry;
+
+ struct mutex lock;
+ u32 header;
+ u32 *vdo_data;
+ u8 vdo_size;
+
+ u16 sid;
+ u8 mode;
+};
+
+struct cros_typec_dp_data {
+ struct cros_typec_altmode_data adata;
+ struct typec_displayport_data data;
+ bool configured;
+ bool pending_status_update;
+};
+
+static void cros_typec_altmode_work(struct work_struct *work)
+{
+ struct cros_typec_altmode_data *data =
+ container_of(work, struct cros_typec_altmode_data, work);
+
+ mutex_lock(&data->lock);
+
+ if (typec_altmode_vdm(data->alt, data->header, data->vdo_data,
+ data->vdo_size))
+ dev_err(&data->alt->dev, "VDM 0x%x failed\n", data->header);
+
+ data->header = 0;
+ data->vdo_data = NULL;
+ data->vdo_size = 0;
+
+ mutex_unlock(&data->lock);
+}
+
+static int cros_typec_altmode_enter(struct typec_altmode *alt, u32 *vdo)
+{
+ struct cros_typec_altmode_data *adata = typec_altmode_get_drvdata(alt);
+ struct ec_params_typec_control req = {
+ .port = adata->port->port_num,
+ .command = TYPEC_CONTROL_COMMAND_ENTER_MODE,
+ };
+ int svdm_version;
+ int ret;
+
+ if (!adata->ap_mode_entry) {
+ dev_warn(&alt->dev,
+ "EC does not support AP driven mode entry\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (adata->sid == USB_TYPEC_DP_SID)
+ req.mode_to_enter = CROS_EC_ALTMODE_DP;
+ else if (adata->sid == USB_TYPEC_TBT_SID)
+ req.mode_to_enter = CROS_EC_ALTMODE_TBT;
+ else
+ return -EOPNOTSUPP;
+
+ ret = cros_ec_cmd(adata->port->typec_data->ec, 0, EC_CMD_TYPEC_CONTROL,
+ &req, sizeof(req), NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ svdm_version = typec_altmode_get_svdm_version(alt);
+ if (svdm_version < 0)
+ return svdm_version;
+
+ mutex_lock(&adata->lock);
+
+ adata->header = VDO(adata->sid, 1, svdm_version, CMD_ENTER_MODE);
+ adata->header |= VDO_OPOS(adata->mode);
+ adata->header |= VDO_CMDT(CMDT_RSP_ACK);
+ adata->vdo_data = NULL;
+ adata->vdo_size = 1;
+ schedule_work(&adata->work);
+
+ mutex_unlock(&adata->lock);
+ return ret;
+}
+
+static int cros_typec_altmode_exit(struct typec_altmode *alt)
+{
+ struct cros_typec_altmode_data *adata = typec_altmode_get_drvdata(alt);
+ struct ec_params_typec_control req = {
+ .port = adata->port->port_num,
+ .command = TYPEC_CONTROL_COMMAND_EXIT_MODES,
+ };
+ int svdm_version;
+ int ret;
+
+ if (!adata->ap_mode_entry) {
+ dev_warn(&alt->dev,
+ "EC does not support AP driven mode exit\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = cros_ec_cmd(adata->port->typec_data->ec, 0, EC_CMD_TYPEC_CONTROL,
+ &req, sizeof(req), NULL, 0);
+
+ if (ret < 0)
+ return ret;
+
+ svdm_version = typec_altmode_get_svdm_version(alt);
+ if (svdm_version < 0)
+ return svdm_version;
+
+ mutex_lock(&adata->lock);
+
+ adata->header = VDO(adata->sid, 1, svdm_version, CMD_EXIT_MODE);
+ adata->header |= VDO_OPOS(adata->mode);
+ adata->header |= VDO_CMDT(CMDT_RSP_ACK);
+ adata->vdo_data = NULL;
+ adata->vdo_size = 1;
+ schedule_work(&adata->work);
+
+ mutex_unlock(&adata->lock);
+ return ret;
+}
+
+static int cros_typec_displayport_vdm(struct typec_altmode *alt, u32 header,
+ const u32 *data, int count)
+{
+ struct cros_typec_dp_data *dp_data = typec_altmode_get_drvdata(alt);
+ struct cros_typec_altmode_data *adata = &dp_data->adata;
+
+
+ int cmd_type = PD_VDO_CMDT(header);
+ int cmd = PD_VDO_CMD(header);
+ int svdm_version;
+
+ svdm_version = typec_altmode_get_svdm_version(alt);
+ if (svdm_version < 0)
+ return svdm_version;
+
+ mutex_lock(&adata->lock);
+
+ switch (cmd_type) {
+ case CMDT_INIT:
+ if (PD_VDO_SVDM_VER(header) < svdm_version) {
+ typec_partner_set_svdm_version(adata->port->partner,
+ PD_VDO_SVDM_VER(header));
+ svdm_version = PD_VDO_SVDM_VER(header);
+ }
+
+ adata->header = VDO(adata->sid, 1, svdm_version, cmd);
+ adata->header |= VDO_OPOS(adata->mode);
+
+ /*
+ * DP_CMD_CONFIGURE: We can't actually do anything with the
+ * provided VDO yet so just send back an ACK.
+ *
+ * DP_CMD_STATUS_UPDATE: We wait for Mux changes to send
+ * DPStatus Acks.
+ */
+ switch (cmd) {
+ case DP_CMD_CONFIGURE:
+ dp_data->data.conf = *data;
+ adata->header |= VDO_CMDT(CMDT_RSP_ACK);
+ dp_data->configured = true;
+ schedule_work(&adata->work);
+ break;
+ case DP_CMD_STATUS_UPDATE:
+ dp_data->pending_status_update = true;
+ break;
+ default:
+ adata->header |= VDO_CMDT(CMDT_RSP_ACK);
+ schedule_work(&adata->work);
+ break;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&adata->lock);
+ return 0;
+}
+
+static int cros_typec_thunderbolt_vdm(struct typec_altmode *alt, u32 header,
+ const u32 *data, int count)
+{
+ struct cros_typec_altmode_data *adata = typec_altmode_get_drvdata(alt);
+
+ int cmd_type = PD_VDO_CMDT(header);
+ int cmd = PD_VDO_CMD(header);
+ int svdm_version;
+
+ svdm_version = typec_altmode_get_svdm_version(alt);
+ if (svdm_version < 0)
+ return svdm_version;
+
+ mutex_lock(&adata->lock);
+
+ switch (cmd_type) {
+ case CMDT_INIT:
+ if (PD_VDO_SVDM_VER(header) < svdm_version) {
+ typec_partner_set_svdm_version(adata->port->partner,
+ PD_VDO_SVDM_VER(header));
+ svdm_version = PD_VDO_SVDM_VER(header);
+ }
+
+ adata->header = VDO(adata->sid, 1, svdm_version, cmd);
+ adata->header |= VDO_OPOS(adata->mode);
+
+ switch (cmd) {
+ case CMD_ENTER_MODE:
+ /* Don't respond to the enter mode vdm because it
+ * triggers mux configuration. This is handled directly
+ * by the cros_ec_typec driver so the Thunderbolt driver
+ * doesn't need to be involved.
+ */
+ break;
+ default:
+ adata->header |= VDO_CMDT(CMDT_RSP_ACK);
+ schedule_work(&adata->work);
+ break;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&adata->lock);
+ return 0;
+}
+
+
+static int cros_typec_altmode_vdm(struct typec_altmode *alt, u32 header,
+ const u32 *data, int count)
+{
+ struct cros_typec_altmode_data *adata = typec_altmode_get_drvdata(alt);
+
+ if (!adata->ap_mode_entry)
+ return -EOPNOTSUPP;
+
+ if (adata->sid == USB_TYPEC_DP_SID)
+ return cros_typec_displayport_vdm(alt, header, data, count);
+
+ if (adata->sid == USB_TYPEC_TBT_SID)
+ return cros_typec_thunderbolt_vdm(alt, header, data, count);
+
+ return -EINVAL;
+}
+
+static const struct typec_altmode_ops cros_typec_altmode_ops = {
+ .enter = cros_typec_altmode_enter,
+ .exit = cros_typec_altmode_exit,
+ .vdm = cros_typec_altmode_vdm,
+};
+
+#if IS_ENABLED(CONFIG_TYPEC_DP_ALTMODE)
+int cros_typec_displayport_status_update(struct typec_altmode *altmode,
+ struct typec_displayport_data *data)
+{
+ struct cros_typec_dp_data *dp_data =
+ typec_altmode_get_drvdata(altmode);
+ struct cros_typec_altmode_data *adata = &dp_data->adata;
+
+ if (!dp_data->pending_status_update) {
+ dev_dbg(&altmode->dev,
+ "Got DPStatus without a pending request\n");
+ return 0;
+ }
+
+ if (dp_data->configured && dp_data->data.conf != data->conf)
+ dev_dbg(&altmode->dev,
+ "DP Conf doesn't match. Requested 0x%04x, Actual 0x%04x\n",
+ dp_data->data.conf, data->conf);
+
+ mutex_lock(&adata->lock);
+
+ dp_data->data = *data;
+ dp_data->pending_status_update = false;
+ adata->header |= VDO_CMDT(CMDT_RSP_ACK);
+ adata->vdo_data = &dp_data->data.status;
+ adata->vdo_size = 2;
+ schedule_work(&adata->work);
+
+ mutex_unlock(&adata->lock);
+
+ return 0;
+}
+
+struct typec_altmode *
+cros_typec_register_displayport(struct cros_typec_port *port,
+ struct typec_altmode_desc *desc,
+ bool ap_mode_entry)
+{
+ struct typec_altmode *alt;
+ struct cros_typec_dp_data *dp_data;
+ struct cros_typec_altmode_data *adata;
+
+ alt = typec_port_register_altmode(port->port, desc);
+ if (IS_ERR(alt))
+ return alt;
+
+ dp_data = devm_kzalloc(&alt->dev, sizeof(*dp_data), GFP_KERNEL);
+ if (!dp_data) {
+ typec_unregister_altmode(alt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ adata = &dp_data->adata;
+ INIT_WORK(&adata->work, cros_typec_altmode_work);
+ mutex_init(&adata->lock);
+ adata->alt = alt;
+ adata->port = port;
+ adata->ap_mode_entry = ap_mode_entry;
+ adata->sid = desc->svid;
+ adata->mode = desc->mode;
+
+ typec_altmode_set_ops(alt, &cros_typec_altmode_ops);
+ typec_altmode_set_drvdata(alt, adata);
+
+ return alt;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_TYPEC_TBT_ALTMODE)
+struct typec_altmode *
+cros_typec_register_thunderbolt(struct cros_typec_port *port,
+ struct typec_altmode_desc *desc)
+{
+ struct typec_altmode *alt;
+ struct cros_typec_altmode_data *adata;
+
+ alt = typec_port_register_altmode(port->port, desc);
+ if (IS_ERR(alt))
+ return alt;
+
+ adata = devm_kzalloc(&alt->dev, sizeof(*adata), GFP_KERNEL);
+ if (!adata) {
+ typec_unregister_altmode(alt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_WORK(&adata->work, cros_typec_altmode_work);
+ adata->alt = alt;
+ adata->port = port;
+ adata->ap_mode_entry = true;
+ adata->sid = desc->svid;
+ adata->mode = desc->mode;
+
+ typec_altmode_set_ops(alt, &cros_typec_altmode_ops);
+ typec_altmode_set_drvdata(alt, adata);
+
+ return alt;
+}
+#endif
diff --git a/drivers/platform/chrome/cros_typec_altmode.h b/drivers/platform/chrome/cros_typec_altmode.h
new file mode 100644
index 000000000000..3f2aa95d065a
--- /dev/null
+++ b/drivers/platform/chrome/cros_typec_altmode.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __CROS_TYPEC_ALTMODE_H__
+#define __CROS_TYPEC_ALTMODE_H__
+
+#include <linux/kconfig.h>
+#include <linux/usb/typec.h>
+
+struct cros_typec_port;
+struct typec_altmode;
+struct typec_altmode_desc;
+struct typec_displayport_data;
+
+#if IS_ENABLED(CONFIG_TYPEC_DP_ALTMODE)
+struct typec_altmode *
+cros_typec_register_displayport(struct cros_typec_port *port,
+ struct typec_altmode_desc *desc,
+ bool ap_mode_entry);
+
+int cros_typec_displayport_status_update(struct typec_altmode *altmode,
+ struct typec_displayport_data *data);
+#else
+static inline struct typec_altmode *
+cros_typec_register_displayport(struct cros_typec_port *port,
+ struct typec_altmode_desc *desc,
+ bool ap_mode_entry)
+{
+ return typec_port_register_altmode(port->port, desc);
+}
+
+static inline int cros_typec_displayport_status_update(struct typec_altmode *altmode,
+ struct typec_displayport_data *data)
+{
+ return 0;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_TYPEC_TBT_ALTMODE)
+struct typec_altmode *
+cros_typec_register_thunderbolt(struct cros_typec_port *port,
+ struct typec_altmode_desc *desc);
+#else
+static inline struct typec_altmode *
+cros_typec_register_thunderbolt(struct cros_typec_port *port,
+ struct typec_altmode_desc *desc)
+{
+ return typec_port_register_altmode(port->port, desc);
+}
+#endif
+
+#endif /* __CROS_TYPEC_ALTMODE_H__ */
diff --git a/drivers/platform/mellanox/mlxbf-bootctl.c b/drivers/platform/mellanox/mlxbf-bootctl.c
index c5b36837e694..9cae07348d5e 100644
--- a/drivers/platform/mellanox/mlxbf-bootctl.c
+++ b/drivers/platform/mellanox/mlxbf-bootctl.c
@@ -177,7 +177,7 @@ static ssize_t post_reset_wdog_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", ret);
+ return sysfs_emit(buf, "%d\n", ret);
}
static ssize_t post_reset_wdog_store(struct device *dev,
@@ -206,7 +206,7 @@ static ssize_t mlxbf_bootctl_show(int smc_op, char *buf)
if (action < 0)
return action;
- return sprintf(buf, "%s\n", mlxbf_bootctl_action_to_string(action));
+ return sysfs_emit(buf, "%s\n", mlxbf_bootctl_action_to_string(action));
}
static int mlxbf_bootctl_store(int smc_op, const char *buf, size_t count)
@@ -274,14 +274,14 @@ static ssize_t lifecycle_state_show(struct device *dev,
* due to using the test bits.
*/
if (test_state) {
- return sprintf(buf, "%s(test)\n",
+ return sysfs_emit(buf, "%s(test)\n",
mlxbf_bootctl_lifecycle_states[lc_state]);
} else if (use_dev_key &&
(lc_state == MLXBF_BOOTCTL_SB_LIFECYCLE_GA_SECURE)) {
- return sprintf(buf, "Secured (development)\n");
+ return sysfs_emit(buf, "Secured (development)\n");
}
- return sprintf(buf, "%s\n", mlxbf_bootctl_lifecycle_states[lc_state]);
+ return sysfs_emit(buf, "%s\n", mlxbf_bootctl_lifecycle_states[lc_state]);
}
static ssize_t secure_boot_fuse_state_show(struct device *dev,
@@ -332,9 +332,9 @@ static ssize_t secure_boot_fuse_state_show(struct device *dev,
else
status = valid ? "Invalid" : "Free";
}
- buf_len += sprintf(buf + buf_len, "%d:%s ", key, status);
+ buf_len += sysfs_emit(buf + buf_len, "%d:%s ", key, status);
}
- buf_len += sprintf(buf + buf_len, "\n");
+ buf_len += sysfs_emit(buf + buf_len, "\n");
return buf_len;
}
@@ -939,7 +939,7 @@ MODULE_DEVICE_TABLE(acpi, mlxbf_bootctl_acpi_ids);
static ssize_t mlxbf_bootctl_bootfifo_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t pos,
size_t count)
{
@@ -971,9 +971,9 @@ static ssize_t mlxbf_bootctl_bootfifo_read(struct file *filp,
return p - buf;
}
-static struct bin_attribute mlxbf_bootctl_bootfifo_sysfs_attr = {
+static const struct bin_attribute mlxbf_bootctl_bootfifo_sysfs_attr = {
.attr = { .name = "bootfifo", .mode = 0400 },
- .read = mlxbf_bootctl_bootfifo_read,
+ .read_new = mlxbf_bootctl_bootfifo_read,
};
static bool mlxbf_bootctl_guid_match(const guid_t *guid,
diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
index 9d18dfca6a67..36a00692347d 100644
--- a/drivers/platform/mellanox/mlxbf-pmc.c
+++ b/drivers/platform/mellanox/mlxbf-pmc.c
@@ -88,6 +88,7 @@
#define MLXBF_PMC_CRSPACE_PERFMON_CTL(n) (n * MLXBF_PMC_CRSPACE_PERFMON_REG0_SZ)
#define MLXBF_PMC_CRSPACE_PERFMON_EN BIT(30)
#define MLXBF_PMC_CRSPACE_PERFMON_CLR BIT(28)
+#define MLXBF_PMC_CRSPACE_PERFMON_COUNT_CLOCK(n) (MLXBF_PMC_CRSPACE_PERFMON_CTL(n) + 0x4)
#define MLXBF_PMC_CRSPACE_PERFMON_VAL0(n) (MLXBF_PMC_CRSPACE_PERFMON_CTL(n) + 0xc)
/**
@@ -114,6 +115,7 @@ struct mlxbf_pmc_attribute {
* @attr_event: Attributes for "event" sysfs files
* @attr_event_list: Attributes for "event_list" sysfs files
* @attr_enable: Attributes for "enable" sysfs files
+ * @attr_count_clock: Attributes for "count_clock" sysfs files
* @block_attr: All attributes needed for the block
* @block_attr_grp: Attribute group for the block
*/
@@ -126,6 +128,7 @@ struct mlxbf_pmc_block_info {
struct mlxbf_pmc_attribute *attr_event;
struct mlxbf_pmc_attribute attr_event_list;
struct mlxbf_pmc_attribute attr_enable;
+ struct mlxbf_pmc_attribute attr_count_clock;
struct attribute *block_attr[MLXBF_PMC_MAX_ATTRS];
struct attribute_group block_attr_grp;
};
@@ -859,6 +862,37 @@ static const struct mlxbf_pmc_events mlxbf_pmc_llt_miss_events[] = {
{75, "HISTOGRAM_HISTOGRAM_BIN9"},
};
+static const struct mlxbf_pmc_events mlxbf_pmc_clock_events[] = {
+ { 0x0, "FMON_CLK_LAST_COUNT_PLL_D1_INST0" },
+ { 0x4, "REFERENCE_WINDOW_WIDTH_PLL_D1_INST0" },
+ { 0x8, "FMON_CLK_LAST_COUNT_PLL_D1_INST1" },
+ { 0xc, "REFERENCE_WINDOW_WIDTH_PLL_D1_INST1" },
+ { 0x10, "FMON_CLK_LAST_COUNT_PLL_G1" },
+ { 0x14, "REFERENCE_WINDOW_WIDTH_PLL_G1" },
+ { 0x18, "FMON_CLK_LAST_COUNT_PLL_W1" },
+ { 0x1c, "REFERENCE_WINDOW_WIDTH_PLL_W1" },
+ { 0x20, "FMON_CLK_LAST_COUNT_PLL_T1" },
+ { 0x24, "REFERENCE_WINDOW_WIDTH_PLL_T1" },
+ { 0x28, "FMON_CLK_LAST_COUNT_PLL_A0" },
+ { 0x2c, "REFERENCE_WINDOW_WIDTH_PLL_A0" },
+ { 0x30, "FMON_CLK_LAST_COUNT_PLL_C0" },
+ { 0x34, "REFERENCE_WINDOW_WIDTH_PLL_C0" },
+ { 0x38, "FMON_CLK_LAST_COUNT_PLL_N1" },
+ { 0x3c, "REFERENCE_WINDOW_WIDTH_PLL_N1" },
+ { 0x40, "FMON_CLK_LAST_COUNT_PLL_I1" },
+ { 0x44, "REFERENCE_WINDOW_WIDTH_PLL_I1" },
+ { 0x48, "FMON_CLK_LAST_COUNT_PLL_R1" },
+ { 0x4c, "REFERENCE_WINDOW_WIDTH_PLL_R1" },
+ { 0x50, "FMON_CLK_LAST_COUNT_PLL_P1" },
+ { 0x54, "REFERENCE_WINDOW_WIDTH_PLL_P1" },
+ { 0x58, "FMON_CLK_LAST_COUNT_REF_100_INST0" },
+ { 0x5c, "REFERENCE_WINDOW_WIDTH_REF_100_INST0" },
+ { 0x60, "FMON_CLK_LAST_COUNT_REF_100_INST1" },
+ { 0x64, "REFERENCE_WINDOW_WIDTH_REF_100_INST1" },
+ { 0x68, "FMON_CLK_LAST_COUNT_REF_156" },
+ { 0x6c, "REFERENCE_WINDOW_WIDTH_REF_156" },
+};
+
static struct mlxbf_pmc_context *pmc;
/* UUID used to probe ATF service. */
@@ -1032,6 +1066,9 @@ static const struct mlxbf_pmc_events *mlxbf_pmc_event_list(const char *blk, size
} else if (strstr(blk, "llt")) {
events = mlxbf_pmc_llt_events;
size = ARRAY_SIZE(mlxbf_pmc_llt_events);
+ } else if (strstr(blk, "clock_measure")) {
+ events = mlxbf_pmc_clock_events;
+ size = ARRAY_SIZE(mlxbf_pmc_clock_events);
} else {
events = NULL;
size = 0;
@@ -1168,7 +1205,7 @@ static int mlxbf_pmc_program_l3_counter(unsigned int blk_num, u32 cnt_num, u32 e
/* Method to handle crspace counter programming */
static int mlxbf_pmc_program_crspace_counter(unsigned int blk_num, u32 cnt_num, u32 evt)
{
- void *addr;
+ void __iomem *addr;
u32 word;
int ret;
@@ -1192,7 +1229,7 @@ static int mlxbf_pmc_program_crspace_counter(unsigned int blk_num, u32 cnt_num,
/* Method to clear crspace counter value */
static int mlxbf_pmc_clear_crspace_counter(unsigned int blk_num, u32 cnt_num)
{
- void *addr;
+ void __iomem *addr;
addr = pmc->block[blk_num].mmio_base +
MLXBF_PMC_CRSPACE_PERFMON_VAL0(pmc->block[blk_num].counters) +
@@ -1405,7 +1442,7 @@ static int mlxbf_pmc_read_l3_event(unsigned int blk_num, u32 cnt_num, u64 *resul
static int mlxbf_pmc_read_crspace_event(unsigned int blk_num, u32 cnt_num, u64 *result)
{
u32 word, evt;
- void *addr;
+ void __iomem *addr;
int ret;
addr = pmc->block[blk_num].mmio_base +
@@ -1466,14 +1503,15 @@ static int mlxbf_pmc_read_event(unsigned int blk_num, u32 cnt_num, bool is_l3, u
/* Method to read a register */
static int mlxbf_pmc_read_reg(unsigned int blk_num, u32 offset, u64 *result)
{
- u32 ecc_out;
+ u32 reg;
- if (strstr(pmc->block_name[blk_num], "ecc")) {
+ if ((strstr(pmc->block_name[blk_num], "ecc")) ||
+ (strstr(pmc->block_name[blk_num], "clock_measure"))) {
if (mlxbf_pmc_readl(pmc->block[blk_num].mmio_base + offset,
- &ecc_out))
+ &reg))
return -EFAULT;
- *result = ecc_out;
+ *result = reg;
return 0;
}
@@ -1487,6 +1525,9 @@ static int mlxbf_pmc_read_reg(unsigned int blk_num, u32 offset, u64 *result)
/* Method to write to a register */
static int mlxbf_pmc_write_reg(unsigned int blk_num, u32 offset, u64 data)
{
+ if (strstr(pmc->block_name[blk_num], "clock_measure"))
+ return -EINVAL;
+
if (strstr(pmc->block_name[blk_num], "ecc")) {
return mlxbf_pmc_write(pmc->block[blk_num].mmio_base + offset,
MLXBF_PMC_WRITE_REG_32, data);
@@ -1763,6 +1804,49 @@ static ssize_t mlxbf_pmc_enable_store(struct device *dev,
return count;
}
+/* Show function for "count_clock" sysfs files - only for crspace */
+static ssize_t mlxbf_pmc_count_clock_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mlxbf_pmc_attribute *attr_count_clock = container_of(
+ attr, struct mlxbf_pmc_attribute, dev_attr);
+ unsigned int blk_num;
+ u32 reg;
+
+ blk_num = attr_count_clock->nr;
+
+ if (mlxbf_pmc_readl(pmc->block[blk_num].mmio_base +
+ MLXBF_PMC_CRSPACE_PERFMON_COUNT_CLOCK(pmc->block[blk_num].counters),
+ &reg))
+ return -EINVAL;
+
+ return sysfs_emit(buf, "%u\n", reg);
+}
+
+/* Store function for "count_clock" sysfs files - only for crspace */
+static ssize_t mlxbf_pmc_count_clock_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mlxbf_pmc_attribute *attr_count_clock = container_of(
+ attr, struct mlxbf_pmc_attribute, dev_attr);
+ unsigned int blk_num;
+ u32 reg;
+ int err;
+
+ blk_num = attr_count_clock->nr;
+
+ err = kstrtouint(buf, 0, &reg);
+ if (err < 0)
+ return err;
+
+ mlxbf_pmc_write(pmc->block[blk_num].mmio_base +
+ MLXBF_PMC_CRSPACE_PERFMON_COUNT_CLOCK(pmc->block[blk_num].counters),
+ MLXBF_PMC_WRITE_REG_32, reg);
+
+ return count;
+}
+
/* Populate attributes for blocks with counters to monitor performance */
static int mlxbf_pmc_init_perftype_counter(struct device *dev, unsigned int blk_num)
{
@@ -1801,6 +1885,21 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, unsigned int blk_
attr = NULL;
}
+ if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_CRSPACE) {
+ /* Program crspace counters to count clock cycles using "count_clock" sysfs */
+ attr = &pmc->block[blk_num].attr_count_clock;
+ attr->dev_attr.attr.mode = 0644;
+ attr->dev_attr.show = mlxbf_pmc_count_clock_show;
+ attr->dev_attr.store = mlxbf_pmc_count_clock_store;
+ attr->nr = blk_num;
+ attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "count_clock");
+ if (!attr->dev_attr.attr.name)
+ return -ENOMEM;
+ pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
+ attr = NULL;
+ }
+
pmc->block[blk_num].attr_counter = devm_kcalloc(
dev, pmc->block[blk_num].counters,
sizeof(struct mlxbf_pmc_attribute), GFP_KERNEL);
diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
index 6aa2a4650367..b347000e4329 100644
--- a/drivers/platform/mellanox/mlxreg-hotplug.c
+++ b/drivers/platform/mellanox/mlxreg-hotplug.c
@@ -232,7 +232,7 @@ static ssize_t mlxreg_hotplug_attr_show(struct device *dev,
regval = !!(regval & data->mask);
}
- return sprintf(buf, "%u\n", regval);
+ return sysfs_emit(buf, "%u\n", regval);
}
#define PRIV_ATTR(i) priv->mlxreg_hotplug_attr[i]
diff --git a/drivers/platform/mellanox/mlxreg-io.c b/drivers/platform/mellanox/mlxreg-io.c
index 595276206baf..97fefe6c38d1 100644
--- a/drivers/platform/mellanox/mlxreg-io.c
+++ b/drivers/platform/mellanox/mlxreg-io.c
@@ -126,7 +126,7 @@ mlxreg_io_attr_show(struct device *dev, struct device_attribute *attr,
mutex_unlock(&priv->io_lock);
- return sprintf(buf, "%u\n", regval);
+ return sysfs_emit(buf, "%u\n", regval);
access_error:
mutex_unlock(&priv->io_lock);
diff --git a/drivers/platform/surface/surface_platform_profile.c b/drivers/platform/surface/surface_platform_profile.c
index 08db878f1d7d..0e479e35e66e 100644
--- a/drivers/platform/surface/surface_platform_profile.c
+++ b/drivers/platform/surface/surface_platform_profile.c
@@ -40,7 +40,7 @@ struct ssam_tmp_profile_info {
struct ssam_platform_profile_device {
struct ssam_device *sdev;
- struct platform_profile_handler handler;
+ struct device *ppdev;
bool has_fan;
};
@@ -154,14 +154,14 @@ static int convert_profile_to_ssam_fan(struct ssam_device *sdev, enum platform_p
}
}
-static int ssam_platform_profile_get(struct platform_profile_handler *pprof,
+static int ssam_platform_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
struct ssam_platform_profile_device *tpd;
enum ssam_tmp_profile tp;
int status;
- tpd = container_of(pprof, struct ssam_platform_profile_device, handler);
+ tpd = dev_get_drvdata(dev);
status = ssam_tmp_profile_get(tpd->sdev, &tp);
if (status)
@@ -175,13 +175,13 @@ static int ssam_platform_profile_get(struct platform_profile_handler *pprof,
return 0;
}
-static int ssam_platform_profile_set(struct platform_profile_handler *pprof,
+static int ssam_platform_profile_set(struct device *dev,
enum platform_profile_option profile)
{
struct ssam_platform_profile_device *tpd;
int tp;
- tpd = container_of(pprof, struct ssam_platform_profile_device, handler);
+ tpd = dev_get_drvdata(dev);
tp = convert_profile_to_ssam_tmp(tpd->sdev, profile);
if (tp < 0)
@@ -201,6 +201,22 @@ static int ssam_platform_profile_set(struct platform_profile_handler *pprof,
return tp;
}
+static int ssam_platform_profile_probe(void *drvdata, unsigned long *choices)
+{
+ set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+
+ return 0;
+}
+
+static const struct platform_profile_ops ssam_platform_profile_ops = {
+ .probe = ssam_platform_profile_probe,
+ .profile_get = ssam_platform_profile_get,
+ .profile_set = ssam_platform_profile_set,
+};
+
static int surface_platform_profile_probe(struct ssam_device *sdev)
{
struct ssam_platform_profile_device *tpd;
@@ -210,23 +226,14 @@ static int surface_platform_profile_probe(struct ssam_device *sdev)
return -ENOMEM;
tpd->sdev = sdev;
-
- tpd->handler.profile_get = ssam_platform_profile_get;
- tpd->handler.profile_set = ssam_platform_profile_set;
+ ssam_device_set_drvdata(sdev, tpd);
tpd->has_fan = device_property_read_bool(&sdev->dev, "has_fan");
- set_bit(PLATFORM_PROFILE_LOW_POWER, tpd->handler.choices);
- set_bit(PLATFORM_PROFILE_BALANCED, tpd->handler.choices);
- set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, tpd->handler.choices);
- set_bit(PLATFORM_PROFILE_PERFORMANCE, tpd->handler.choices);
+ tpd->ppdev = devm_platform_profile_register(&sdev->dev, "Surface Platform Profile",
+ tpd, &ssam_platform_profile_ops);
- return platform_profile_register(&tpd->handler);
-}
-
-static void surface_platform_profile_remove(struct ssam_device *sdev)
-{
- platform_profile_remove();
+ return PTR_ERR_OR_ZERO(tpd->ppdev);
}
static const struct ssam_device_id ssam_platform_profile_match[] = {
@@ -237,7 +244,6 @@ MODULE_DEVICE_TABLE(ssam, ssam_platform_profile_match);
static struct ssam_device_driver surface_platform_profile = {
.probe = surface_platform_profile_probe,
- .remove = surface_platform_profile_remove,
.match_table = ssam_platform_profile_match,
.driver = {
.name = "surface_platform_profile",
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index d09baa3d3d90..69336bd778ee 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -30,7 +30,10 @@
#include <linux/input/sparse-keymap.h>
#include <acpi/video.h>
#include <linux/hwmon.h>
+#include <linux/units.h>
+#include <linux/unaligned.h>
#include <linux/bitfield.h>
+#include <linux/bitmap.h>
MODULE_AUTHOR("Carlos Corbacho");
MODULE_DESCRIPTION("Acer Laptop WMI Extras Driver");
@@ -67,10 +70,16 @@ MODULE_LICENSE("GPL");
#define ACER_WMID_GET_GAMING_SYS_INFO_METHODID 5
#define ACER_WMID_SET_GAMING_FAN_BEHAVIOR 14
#define ACER_WMID_SET_GAMING_MISC_SETTING_METHODID 22
+#define ACER_WMID_GET_GAMING_MISC_SETTING_METHODID 23
-#define ACER_PREDATOR_V4_THERMAL_PROFILE_EC_OFFSET 0x54
+#define ACER_GAMING_MISC_SETTING_STATUS_MASK GENMASK_ULL(7, 0)
+#define ACER_GAMING_MISC_SETTING_INDEX_MASK GENMASK_ULL(7, 0)
+#define ACER_GAMING_MISC_SETTING_VALUE_MASK GENMASK_ULL(15, 8)
-#define ACER_PREDATOR_V4_FAN_SPEED_READ_BIT_MASK GENMASK(20, 8)
+#define ACER_PREDATOR_V4_RETURN_STATUS_BIT_MASK GENMASK_ULL(7, 0)
+#define ACER_PREDATOR_V4_SENSOR_INDEX_BIT_MASK GENMASK_ULL(15, 8)
+#define ACER_PREDATOR_V4_SENSOR_READING_BIT_MASK GENMASK_ULL(23, 8)
+#define ACER_PREDATOR_V4_SUPPORTED_SENSORS_BIT_MASK GENMASK_ULL(39, 24)
/*
* Acer ACPI method GUIDs
@@ -95,12 +104,33 @@ enum acer_wmi_event_ids {
WMID_HOTKEY_EVENT = 0x1,
WMID_ACCEL_OR_KBD_DOCK_EVENT = 0x5,
WMID_GAMING_TURBO_KEY_EVENT = 0x7,
+ WMID_AC_EVENT = 0x8,
};
enum acer_wmi_predator_v4_sys_info_command {
- ACER_WMID_CMD_GET_PREDATOR_V4_BAT_STATUS = 0x02,
- ACER_WMID_CMD_GET_PREDATOR_V4_CPU_FAN_SPEED = 0x0201,
- ACER_WMID_CMD_GET_PREDATOR_V4_GPU_FAN_SPEED = 0x0601,
+ ACER_WMID_CMD_GET_PREDATOR_V4_SUPPORTED_SENSORS = 0x0000,
+ ACER_WMID_CMD_GET_PREDATOR_V4_SENSOR_READING = 0x0001,
+ ACER_WMID_CMD_GET_PREDATOR_V4_BAT_STATUS = 0x0002,
+};
+
+enum acer_wmi_predator_v4_sensor_id {
+ ACER_WMID_SENSOR_CPU_TEMPERATURE = 0x01,
+ ACER_WMID_SENSOR_CPU_FAN_SPEED = 0x02,
+ ACER_WMID_SENSOR_EXTERNAL_TEMPERATURE_2 = 0x03,
+ ACER_WMID_SENSOR_GPU_FAN_SPEED = 0x06,
+ ACER_WMID_SENSOR_GPU_TEMPERATURE = 0x0A,
+};
+
+enum acer_wmi_predator_v4_oc {
+ ACER_WMID_OC_NORMAL = 0x0000,
+ ACER_WMID_OC_TURBO = 0x0002,
+};
+
+enum acer_wmi_gaming_misc_setting {
+ ACER_WMID_MISC_SETTING_OC_1 = 0x0005,
+ ACER_WMID_MISC_SETTING_OC_2 = 0x0007,
+ ACER_WMID_MISC_SETTING_SUPPORTED_PROFILES = 0x000A,
+ ACER_WMID_MISC_SETTING_PLATFORM_PROFILE = 0x000B,
};
static const struct key_entry acer_wmi_keymap[] __initconst = {
@@ -246,7 +276,7 @@ struct hotkey_function_type_aa {
#define ACER_CAP_TURBO_LED BIT(8)
#define ACER_CAP_TURBO_FAN BIT(9)
#define ACER_CAP_PLATFORM_PROFILE BIT(10)
-#define ACER_CAP_FAN_SPEED_READ BIT(11)
+#define ACER_CAP_HWMON BIT(11)
/*
* Interface type flags
@@ -271,6 +301,7 @@ static u16 commun_func_bitmap;
static u8 commun_fn_key_number;
static bool cycle_gaming_thermal_profile = true;
static bool predator_v4;
+static u64 supported_sensors;
module_param(mailled, int, 0444);
module_param(brightness, int, 0444);
@@ -358,7 +389,7 @@ static void __init set_quirks(void)
if (quirks->predator_v4)
interface->capability |= ACER_CAP_PLATFORM_PROFILE |
- ACER_CAP_FAN_SPEED_READ;
+ ACER_CAP_HWMON;
}
static int __init dmi_matched(const struct dmi_system_id *dmi)
@@ -393,6 +424,20 @@ static struct quirk_entry quirk_acer_predator_ph315_53 = {
.gpu_fans = 1,
};
+static struct quirk_entry quirk_acer_predator_ph16_72 = {
+ .turbo = 1,
+ .cpu_fans = 1,
+ .gpu_fans = 1,
+ .predator_v4 = 1,
+};
+
+static struct quirk_entry quirk_acer_predator_pt14_51 = {
+ .turbo = 1,
+ .cpu_fans = 1,
+ .gpu_fans = 1,
+ .predator_v4 = 1,
+};
+
static struct quirk_entry quirk_acer_predator_v4 = {
.predator_v4 = 1,
};
@@ -566,6 +611,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
},
{
.callback = dmi_matched,
+ .ident = "Acer Nitro AN515-58",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Nitro AN515-58"),
+ },
+ .driver_data = &quirk_acer_predator_v4,
+ },
+ {
+ .callback = dmi_matched,
.ident = "Acer Predator PH315-53",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -593,6 +647,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
},
{
.callback = dmi_matched,
+ .ident = "Acer Predator PH16-72",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Predator PH16-72"),
+ },
+ .driver_data = &quirk_acer_predator_ph16_72,
+ },
+ {
+ .callback = dmi_matched,
.ident = "Acer Predator PH18-71",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -601,6 +664,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
.driver_data = &quirk_acer_predator_v4,
},
{
+ .callback = dmi_matched,
+ .ident = "Acer Predator PT14-51",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Predator PT14-51"),
+ },
+ .driver_data = &quirk_acer_predator_pt14_51,
+ },
+ {
.callback = set_force_caps,
.ident = "Acer Aspire Switch 10E SW3-016",
.matches = {
@@ -713,29 +785,24 @@ static const struct dmi_system_id non_acer_quirks[] __initconst = {
{}
};
-static struct platform_profile_handler platform_profile_handler;
+static struct device *platform_profile_device;
static bool platform_profile_support;
/*
* The profile used before turbo mode. This variable is needed for
* returning from turbo mode when the mode key is in toggle mode.
*/
-static int last_non_turbo_profile;
-
-enum acer_predator_v4_thermal_profile_ec {
- ACER_PREDATOR_V4_THERMAL_PROFILE_ECO = 0x04,
- ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO = 0x03,
- ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE = 0x02,
- ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET = 0x01,
- ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED = 0x00,
-};
+static int last_non_turbo_profile = INT_MIN;
+
+/* The most performant supported profile */
+static int acer_predator_v4_max_perf;
-enum acer_predator_v4_thermal_profile_wmi {
- ACER_PREDATOR_V4_THERMAL_PROFILE_ECO_WMI = 0x060B,
- ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI = 0x050B,
- ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE_WMI = 0x040B,
- ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET_WMI = 0x0B,
- ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI = 0x010B,
+enum acer_predator_v4_thermal_profile {
+ ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET = 0x00,
+ ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED = 0x01,
+ ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE = 0x04,
+ ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO = 0x05,
+ ACER_PREDATOR_V4_THERMAL_PROFILE_ECO = 0x06,
};
/* Find which quirks are needed for a particular vendor/ model pair */
@@ -1448,6 +1515,45 @@ WMI_gaming_execute_u64(u32 method_id, u64 in, u64 *out)
return status;
}
+static int WMI_gaming_execute_u32_u64(u32 method_id, u32 in, u64 *out)
+{
+ struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer input = {
+ .length = sizeof(in),
+ .pointer = &in,
+ };
+ union acpi_object *obj;
+ acpi_status status;
+ int ret = 0;
+
+ status = wmi_evaluate_method(WMID_GUID4, 0, method_id, &input, &result);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ obj = result.pointer;
+ if (obj && out) {
+ switch (obj->type) {
+ case ACPI_TYPE_INTEGER:
+ *out = obj->integer.value;
+ break;
+ case ACPI_TYPE_BUFFER:
+ if (obj->buffer.length < sizeof(*out))
+ ret = -ENOMSG;
+ else
+ *out = get_unaligned_le64(obj->buffer.pointer);
+
+ break;
+ default:
+ ret = -ENOMSG;
+ break;
+ }
+ }
+
+ kfree(obj);
+
+ return ret;
+}
+
static acpi_status WMID_gaming_set_u64(u64 value, u32 cap)
{
u32 method_id = 0;
@@ -1462,9 +1568,6 @@ static acpi_status WMID_gaming_set_u64(u64 value, u32 cap)
case ACER_CAP_TURBO_FAN:
method_id = ACER_WMID_SET_GAMING_FAN_BEHAVIOR;
break;
- case ACER_CAP_TURBO_OC:
- method_id = ACER_WMID_SET_GAMING_MISC_SETTING_METHODID;
- break;
default:
return AE_BAD_PARAMETER;
}
@@ -1497,6 +1600,24 @@ static acpi_status WMID_gaming_get_u64(u64 *value, u32 cap)
return status;
}
+static int WMID_gaming_get_sys_info(u32 command, u64 *out)
+{
+ acpi_status status;
+ u64 result;
+
+ status = WMI_gaming_execute_u64(ACER_WMID_GET_GAMING_SYS_INFO_METHODID, command, &result);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ /* The return status must be zero for the operation to have succeeded */
+ if (FIELD_GET(ACER_PREDATOR_V4_RETURN_STATUS_BIT_MASK, result))
+ return -EIO;
+
+ *out = result;
+
+ return 0;
+}
+
static void WMID_gaming_set_fan_mode(u8 fan_mode)
{
/* fan_mode = 1 is used for auto, fan_mode = 2 used for turbo*/
@@ -1518,6 +1639,48 @@ static void WMID_gaming_set_fan_mode(u8 fan_mode)
WMID_gaming_set_u64(gpu_fan_config2 | gpu_fan_config1 << 16, ACER_CAP_TURBO_FAN);
}
+static int WMID_gaming_set_misc_setting(enum acer_wmi_gaming_misc_setting setting, u8 value)
+{
+ acpi_status status;
+ u64 input = 0;
+ u64 result;
+
+ input |= FIELD_PREP(ACER_GAMING_MISC_SETTING_INDEX_MASK, setting);
+ input |= FIELD_PREP(ACER_GAMING_MISC_SETTING_VALUE_MASK, value);
+
+ status = WMI_gaming_execute_u64(ACER_WMID_SET_GAMING_MISC_SETTING_METHODID, input, &result);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ /* The return status must be zero for the operation to have succeeded */
+ if (FIELD_GET(ACER_GAMING_MISC_SETTING_STATUS_MASK, result))
+ return -EIO;
+
+ return 0;
+}
+
+static int WMID_gaming_get_misc_setting(enum acer_wmi_gaming_misc_setting setting, u8 *value)
+{
+ u64 input = 0;
+ u64 result;
+ int ret;
+
+ input |= FIELD_PREP(ACER_GAMING_MISC_SETTING_INDEX_MASK, setting);
+
+ ret = WMI_gaming_execute_u32_u64(ACER_WMID_GET_GAMING_MISC_SETTING_METHODID, input,
+ &result);
+ if (ret < 0)
+ return ret;
+
+ /* The return status must be zero for the operation to have succeeded */
+ if (FIELD_GET(ACER_GAMING_MISC_SETTING_STATUS_MASK, result))
+ return -EIO;
+
+ *value = FIELD_GET(ACER_GAMING_MISC_SETTING_VALUE_MASK, result);
+
+ return 0;
+}
+
/*
* Generic Device (interface-independent)
*/
@@ -1744,26 +1907,6 @@ static int acer_gsensor_event(void)
return 0;
}
-static int acer_get_fan_speed(int fan)
-{
- if (quirks->predator_v4) {
- acpi_status status;
- u64 fanspeed;
-
- status = WMI_gaming_execute_u64(
- ACER_WMID_GET_GAMING_SYS_INFO_METHODID,
- fan == 0 ? ACER_WMID_CMD_GET_PREDATOR_V4_CPU_FAN_SPEED :
- ACER_WMID_CMD_GET_PREDATOR_V4_GPU_FAN_SPEED,
- &fanspeed);
-
- if (ACPI_FAILURE(status))
- return -EIO;
-
- return FIELD_GET(ACER_PREDATOR_V4_FAN_SPEED_READ_BIT_MASK, fanspeed);
- }
- return -EOPNOTSUPP;
-}
-
/*
* Predator series turbo button
*/
@@ -1783,8 +1926,12 @@ static int acer_toggle_turbo(void)
WMID_gaming_set_fan_mode(0x1);
/* Set OC to normal */
- WMID_gaming_set_u64(0x5, ACER_CAP_TURBO_OC);
- WMID_gaming_set_u64(0x7, ACER_CAP_TURBO_OC);
+ if (has_cap(ACER_CAP_TURBO_OC)) {
+ WMID_gaming_set_misc_setting(ACER_WMID_MISC_SETTING_OC_1,
+ ACER_WMID_OC_NORMAL);
+ WMID_gaming_set_misc_setting(ACER_WMID_MISC_SETTING_OC_2,
+ ACER_WMID_OC_NORMAL);
+ }
} else {
/* Turn on turbo led */
WMID_gaming_set_u64(0x10001, ACER_CAP_TURBO_LED);
@@ -1793,22 +1940,25 @@ static int acer_toggle_turbo(void)
WMID_gaming_set_fan_mode(0x2);
/* Set OC to turbo mode */
- WMID_gaming_set_u64(0x205, ACER_CAP_TURBO_OC);
- WMID_gaming_set_u64(0x207, ACER_CAP_TURBO_OC);
+ if (has_cap(ACER_CAP_TURBO_OC)) {
+ WMID_gaming_set_misc_setting(ACER_WMID_MISC_SETTING_OC_1,
+ ACER_WMID_OC_TURBO);
+ WMID_gaming_set_misc_setting(ACER_WMID_MISC_SETTING_OC_2,
+ ACER_WMID_OC_TURBO);
+ }
}
return turbo_led_state;
}
static int
-acer_predator_v4_platform_profile_get(struct platform_profile_handler *pprof,
+acer_predator_v4_platform_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
u8 tp;
int err;
- err = ec_read(ACER_PREDATOR_V4_THERMAL_PROFILE_EC_OFFSET, &tp);
-
- if (err < 0)
+ err = WMID_gaming_get_misc_setting(ACER_WMID_MISC_SETTING_PLATFORM_PROFILE, &tp);
+ if (err)
return err;
switch (tp) {
@@ -1835,74 +1985,112 @@ acer_predator_v4_platform_profile_get(struct platform_profile_handler *pprof,
}
static int
-acer_predator_v4_platform_profile_set(struct platform_profile_handler *pprof,
+acer_predator_v4_platform_profile_set(struct device *dev,
enum platform_profile_option profile)
{
- int tp;
- acpi_status status;
+ int err, tp;
switch (profile) {
case PLATFORM_PROFILE_PERFORMANCE:
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI;
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO;
break;
case PLATFORM_PROFILE_BALANCED_PERFORMANCE:
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE_WMI;
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE;
break;
case PLATFORM_PROFILE_BALANCED:
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED;
break;
case PLATFORM_PROFILE_QUIET:
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET_WMI;
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET;
break;
case PLATFORM_PROFILE_LOW_POWER:
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_ECO_WMI;
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_ECO;
break;
default:
return -EOPNOTSUPP;
}
- status = WMI_gaming_execute_u64(
- ACER_WMID_SET_GAMING_MISC_SETTING_METHODID, tp, NULL);
-
- if (ACPI_FAILURE(status))
- return -EIO;
+ err = WMID_gaming_set_misc_setting(ACER_WMID_MISC_SETTING_PLATFORM_PROFILE, tp);
+ if (err)
+ return err;
- if (tp != ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI)
+ if (tp != acer_predator_v4_max_perf)
last_non_turbo_profile = tp;
return 0;
}
-static int acer_platform_profile_setup(void)
+static int
+acer_predator_v4_platform_profile_probe(void *drvdata, unsigned long *choices)
+{
+ unsigned long supported_profiles;
+ int err;
+
+ err = WMID_gaming_get_misc_setting(ACER_WMID_MISC_SETTING_SUPPORTED_PROFILES,
+ (u8 *)&supported_profiles);
+ if (err)
+ return err;
+
+ /* Iterate through supported profiles in order of increasing performance */
+ if (test_bit(ACER_PREDATOR_V4_THERMAL_PROFILE_ECO, &supported_profiles)) {
+ set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
+ acer_predator_v4_max_perf = ACER_PREDATOR_V4_THERMAL_PROFILE_ECO;
+ last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_ECO;
+ }
+
+ if (test_bit(ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET, &supported_profiles)) {
+ set_bit(PLATFORM_PROFILE_QUIET, choices);
+ acer_predator_v4_max_perf = ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET;
+ last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET;
+ }
+
+ if (test_bit(ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED, &supported_profiles)) {
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ acer_predator_v4_max_perf = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED;
+ last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED;
+ }
+
+ if (test_bit(ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE, &supported_profiles)) {
+ set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, choices);
+ acer_predator_v4_max_perf = ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE;
+
+ /* We only use this profile as a fallback option in case no prior
+ * profile is supported.
+ */
+ if (last_non_turbo_profile < 0)
+ last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE;
+ }
+
+ if (test_bit(ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO, &supported_profiles)) {
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+ acer_predator_v4_max_perf = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO;
+
+ /* We need to handle the hypothetical case where only the turbo profile
+ * is supported. In this case the turbo toggle will essentially be a
+ * no-op.
+ */
+ if (last_non_turbo_profile < 0)
+ last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO;
+ }
+
+ return 0;
+}
+
+static const struct platform_profile_ops acer_predator_v4_platform_profile_ops = {
+ .probe = acer_predator_v4_platform_profile_probe,
+ .profile_get = acer_predator_v4_platform_profile_get,
+ .profile_set = acer_predator_v4_platform_profile_set,
+};
+
+static int acer_platform_profile_setup(struct platform_device *device)
{
if (quirks->predator_v4) {
- int err;
-
- platform_profile_handler.profile_get =
- acer_predator_v4_platform_profile_get;
- platform_profile_handler.profile_set =
- acer_predator_v4_platform_profile_set;
-
- set_bit(PLATFORM_PROFILE_PERFORMANCE,
- platform_profile_handler.choices);
- set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE,
- platform_profile_handler.choices);
- set_bit(PLATFORM_PROFILE_BALANCED,
- platform_profile_handler.choices);
- set_bit(PLATFORM_PROFILE_QUIET,
- platform_profile_handler.choices);
- set_bit(PLATFORM_PROFILE_LOW_POWER,
- platform_profile_handler.choices);
-
- err = platform_profile_register(&platform_profile_handler);
- if (err)
- return err;
+ platform_profile_device = devm_platform_profile_register(
+ &device->dev, "acer-wmi", NULL, &acer_predator_v4_platform_profile_ops);
+ if (IS_ERR(platform_profile_device))
+ return PTR_ERR(platform_profile_device);
platform_profile_support = true;
-
- /* Set default non-turbo profile */
- last_non_turbo_profile =
- ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
}
return 0;
}
@@ -1910,83 +2098,41 @@ static int acer_platform_profile_setup(void)
static int acer_thermal_profile_change(void)
{
/*
- * This mode key can rotate each mode or toggle turbo mode.
- * On battery, only ECO and BALANCED mode are available.
+ * This mode key will either cycle through each mode or toggle the
+ * most performant profile.
*/
if (quirks->predator_v4) {
u8 current_tp;
- int tp, err;
- u64 on_AC;
- acpi_status status;
-
- err = ec_read(ACER_PREDATOR_V4_THERMAL_PROFILE_EC_OFFSET,
- &current_tp);
+ int err, tp;
- if (err < 0)
- return err;
+ if (cycle_gaming_thermal_profile) {
+ platform_profile_cycle();
+ } else {
+ /* Do nothing if no suitable platform profiles where found */
+ if (last_non_turbo_profile < 0)
+ return 0;
- /* Check power source */
- status = WMI_gaming_execute_u64(
- ACER_WMID_GET_GAMING_SYS_INFO_METHODID,
- ACER_WMID_CMD_GET_PREDATOR_V4_BAT_STATUS, &on_AC);
+ err = WMID_gaming_get_misc_setting(
+ ACER_WMID_MISC_SETTING_PLATFORM_PROFILE, &current_tp);
+ if (err)
+ return err;
- if (ACPI_FAILURE(status))
- return -EIO;
-
- switch (current_tp) {
- case ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO:
- if (!on_AC)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
- else if (cycle_gaming_thermal_profile)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_ECO_WMI;
- else
+ if (current_tp == acer_predator_v4_max_perf)
tp = last_non_turbo_profile;
- break;
- case ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE:
- if (!on_AC)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
- else
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI;
- break;
- case ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED:
- if (!on_AC)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_ECO_WMI;
- else if (cycle_gaming_thermal_profile)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE_WMI;
- else
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI;
- break;
- case ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET:
- if (!on_AC)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
- else if (cycle_gaming_thermal_profile)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
- else
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI;
- break;
- case ACER_PREDATOR_V4_THERMAL_PROFILE_ECO:
- if (!on_AC)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
- else if (cycle_gaming_thermal_profile)
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET_WMI;
else
- tp = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI;
- break;
- default:
- return -EOPNOTSUPP;
- }
+ tp = acer_predator_v4_max_perf;
- status = WMI_gaming_execute_u64(
- ACER_WMID_SET_GAMING_MISC_SETTING_METHODID, tp, NULL);
-
- if (ACPI_FAILURE(status))
- return -EIO;
+ err = WMID_gaming_set_misc_setting(
+ ACER_WMID_MISC_SETTING_PLATFORM_PROFILE, tp);
+ if (err)
+ return err;
- /* Store non-turbo profile for turbo mode toggle*/
- if (tp != ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI)
- last_non_turbo_profile = tp;
+ /* Store last profile for toggle */
+ if (current_tp != acer_predator_v4_max_perf)
+ last_non_turbo_profile = current_tp;
- platform_profile_notify();
+ platform_profile_notify(platform_profile_device);
+ }
}
return 0;
@@ -2280,6 +2426,9 @@ static void acer_wmi_notify(union acpi_object *obj, void *context)
if (return_value.key_num == 0x5 && has_cap(ACER_CAP_PLATFORM_PROFILE))
acer_thermal_profile_change();
break;
+ case WMID_AC_EVENT:
+ /* We ignore AC events here */
+ break;
default:
pr_warn("Unknown function number - %d - %d\n",
return_value.function, return_value.key_num);
@@ -2530,12 +2679,12 @@ static int acer_platform_probe(struct platform_device *device)
goto error_rfkill;
if (has_cap(ACER_CAP_PLATFORM_PROFILE)) {
- err = acer_platform_profile_setup();
+ err = acer_platform_profile_setup(device);
if (err)
goto error_platform_profile;
}
- if (has_cap(ACER_CAP_FAN_SPEED_READ)) {
+ if (has_cap(ACER_CAP_HWMON)) {
err = acer_wmi_hwmon_init();
if (err)
goto error_hwmon;
@@ -2544,8 +2693,6 @@ static int acer_platform_probe(struct platform_device *device)
return 0;
error_hwmon:
- if (platform_profile_support)
- platform_profile_remove();
error_platform_profile:
acer_rfkill_exit();
error_rfkill:
@@ -2566,9 +2713,6 @@ static void acer_platform_remove(struct platform_device *device)
acer_backlight_exit();
acer_rfkill_exit();
-
- if (platform_profile_support)
- platform_profile_remove();
}
#ifdef CONFIG_PM_SLEEP
@@ -2655,43 +2799,86 @@ static void __init create_debugfs(void)
&interface->debug.wmid_devices);
}
+static const enum acer_wmi_predator_v4_sensor_id acer_wmi_temp_channel_to_sensor_id[] = {
+ [0] = ACER_WMID_SENSOR_CPU_TEMPERATURE,
+ [1] = ACER_WMID_SENSOR_GPU_TEMPERATURE,
+ [2] = ACER_WMID_SENSOR_EXTERNAL_TEMPERATURE_2,
+};
+
+static const enum acer_wmi_predator_v4_sensor_id acer_wmi_fan_channel_to_sensor_id[] = {
+ [0] = ACER_WMID_SENSOR_CPU_FAN_SPEED,
+ [1] = ACER_WMID_SENSOR_GPU_FAN_SPEED,
+};
+
static umode_t acer_wmi_hwmon_is_visible(const void *data,
enum hwmon_sensor_types type, u32 attr,
int channel)
{
+ enum acer_wmi_predator_v4_sensor_id sensor_id;
+ const u64 *supported_sensors = data;
+
switch (type) {
+ case hwmon_temp:
+ sensor_id = acer_wmi_temp_channel_to_sensor_id[channel];
+ break;
case hwmon_fan:
- if (acer_get_fan_speed(channel) >= 0)
- return 0444;
+ sensor_id = acer_wmi_fan_channel_to_sensor_id[channel];
break;
default:
return 0;
}
+ if (*supported_sensors & BIT(sensor_id - 1))
+ return 0444;
+
return 0;
}
static int acer_wmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
+ u64 command = ACER_WMID_CMD_GET_PREDATOR_V4_SENSOR_READING;
+ u64 result;
int ret;
switch (type) {
+ case hwmon_temp:
+ command |= FIELD_PREP(ACER_PREDATOR_V4_SENSOR_INDEX_BIT_MASK,
+ acer_wmi_temp_channel_to_sensor_id[channel]);
+
+ ret = WMID_gaming_get_sys_info(command, &result);
+ if (ret < 0)
+ return ret;
+
+ result = FIELD_GET(ACER_PREDATOR_V4_SENSOR_READING_BIT_MASK, result);
+ *val = result * MILLIDEGREE_PER_DEGREE;
+ return 0;
case hwmon_fan:
- ret = acer_get_fan_speed(channel);
+ command |= FIELD_PREP(ACER_PREDATOR_V4_SENSOR_INDEX_BIT_MASK,
+ acer_wmi_fan_channel_to_sensor_id[channel]);
+
+ ret = WMID_gaming_get_sys_info(command, &result);
if (ret < 0)
return ret;
- *val = ret;
- break;
+
+ *val = FIELD_GET(ACER_PREDATOR_V4_SENSOR_READING_BIT_MASK, result);
+ return 0;
default:
return -EOPNOTSUPP;
}
-
- return 0;
}
static const struct hwmon_channel_info *const acer_wmi_hwmon_info[] = {
- HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT, HWMON_F_INPUT), NULL
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT
+ ),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT
+ ),
+ NULL
};
static const struct hwmon_ops acer_wmi_hwmon_ops = {
@@ -2708,9 +2895,20 @@ static int acer_wmi_hwmon_init(void)
{
struct device *dev = &acer_platform_device->dev;
struct device *hwmon;
+ u64 result;
+ int ret;
+
+ ret = WMID_gaming_get_sys_info(ACER_WMID_CMD_GET_PREDATOR_V4_SUPPORTED_SENSORS, &result);
+ if (ret < 0)
+ return ret;
+
+ /* Return early if no sensors are available */
+ supported_sensors = FIELD_GET(ACER_PREDATOR_V4_SUPPORTED_SENSORS_BIT_MASK, result);
+ if (!supported_sensors)
+ return 0;
hwmon = devm_hwmon_device_register_with_info(dev, "acer",
- &acer_platform_driver,
+ &supported_sensors,
&acer_wmi_hwmon_chip_info,
NULL);
diff --git a/drivers/platform/x86/amd/hsmp/acpi.c b/drivers/platform/x86/amd/hsmp/acpi.c
index e981d45e1c12..444b43be35a2 100644
--- a/drivers/platform/x86/amd/hsmp/acpi.c
+++ b/drivers/platform/x86/amd/hsmp/acpi.c
@@ -226,7 +226,7 @@ static int hsmp_parse_acpi_table(struct device *dev, u16 sock_ind)
}
static ssize_t hsmp_metric_tbl_acpi_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -285,19 +285,19 @@ static int init_acpi(struct device *dev)
return ret;
}
-static struct bin_attribute hsmp_metric_tbl_attr = {
+static const struct bin_attribute hsmp_metric_tbl_attr = {
.attr = { .name = HSMP_METRICS_TABLE_NAME, .mode = 0444},
- .read = hsmp_metric_tbl_acpi_read,
+ .read_new = hsmp_metric_tbl_acpi_read,
.size = sizeof(struct hsmp_metric_table),
};
-static struct bin_attribute *hsmp_attr_list[] = {
+static const struct bin_attribute *hsmp_attr_list[] = {
&hsmp_metric_tbl_attr,
NULL
};
-static struct attribute_group hsmp_attr_grp = {
- .bin_attrs = hsmp_attr_list,
+static const struct attribute_group hsmp_attr_grp = {
+ .bin_attrs_new = hsmp_attr_list,
.is_bin_visible = hsmp_is_sock_attr_visible,
};
diff --git a/drivers/platform/x86/amd/hsmp/hsmp.c b/drivers/platform/x86/amd/hsmp/hsmp.c
index 227b4ad4a51a..03164e30b3a5 100644
--- a/drivers/platform/x86/amd/hsmp/hsmp.c
+++ b/drivers/platform/x86/amd/hsmp/hsmp.c
@@ -33,7 +33,13 @@
#define HSMP_WR true
#define HSMP_RD false
-#define DRIVER_VERSION "2.3"
+#define DRIVER_VERSION "2.4"
+
+/*
+ * When same message numbers are used for both GET and SET operation,
+ * bit:31 indicates whether its SET or GET operation.
+ */
+#define CHECK_GET_BIT BIT(31)
static struct hsmp_plat_device hsmp_pdev;
@@ -167,11 +173,28 @@ static int validate_message(struct hsmp_message *msg)
if (hsmp_msg_desc_table[msg->msg_id].type == HSMP_RSVD)
return -ENOMSG;
- /* num_args and response_sz against the HSMP spec */
- if (msg->num_args != hsmp_msg_desc_table[msg->msg_id].num_args ||
- msg->response_sz != hsmp_msg_desc_table[msg->msg_id].response_sz)
+ /*
+ * num_args passed by user should match the num_args specified in
+ * message description table.
+ */
+ if (msg->num_args != hsmp_msg_desc_table[msg->msg_id].num_args)
return -EINVAL;
+ /*
+ * Some older HSMP SET messages are updated to add GET in the same message.
+ * In these messages, GET returns the current value and SET also returns
+ * the successfully set value. To support this GET and SET in same message
+ * while maintaining backward compatibility for the HSMP users,
+ * hsmp_msg_desc_table[] indicates only maximum allowed response_sz.
+ */
+ if (hsmp_msg_desc_table[msg->msg_id].type == HSMP_SET_GET) {
+ if (msg->response_sz > hsmp_msg_desc_table[msg->msg_id].response_sz)
+ return -EINVAL;
+ } else {
+ /* only HSMP_SET or HSMP_GET messages go through this strict check */
+ if (msg->response_sz != hsmp_msg_desc_table[msg->msg_id].response_sz)
+ return -EINVAL;
+ }
return 0;
}
@@ -239,6 +262,18 @@ int hsmp_test(u16 sock_ind, u32 value)
}
EXPORT_SYMBOL_NS_GPL(hsmp_test, "AMD_HSMP");
+static bool is_get_msg(struct hsmp_message *msg)
+{
+ if (hsmp_msg_desc_table[msg->msg_id].type == HSMP_GET)
+ return true;
+
+ if (hsmp_msg_desc_table[msg->msg_id].type == HSMP_SET_GET &&
+ (msg->args[0] & CHECK_GET_BIT))
+ return true;
+
+ return false;
+}
+
long hsmp_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
{
int __user *arguser = (int __user *)arg;
@@ -261,7 +296,7 @@ long hsmp_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
* Device is opened in O_WRONLY mode
* Execute only set/configure commands
*/
- if (hsmp_msg_desc_table[msg.msg_id].type != HSMP_SET)
+ if (is_get_msg(&msg))
return -EPERM;
break;
case FMODE_READ:
@@ -269,7 +304,7 @@ long hsmp_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
* Device is opened in O_RDONLY mode
* Execute only get/monitor commands
*/
- if (hsmp_msg_desc_table[msg.msg_id].type != HSMP_GET)
+ if (!is_get_msg(&msg))
return -EPERM;
break;
case FMODE_READ | FMODE_WRITE:
diff --git a/drivers/platform/x86/amd/hsmp/plat.c b/drivers/platform/x86/amd/hsmp/plat.c
index a61f815c9f80..02ca85762b68 100644
--- a/drivers/platform/x86/amd/hsmp/plat.c
+++ b/drivers/platform/x86/amd/hsmp/plat.c
@@ -59,7 +59,7 @@ static int amd_hsmp_pci_rdwr(struct hsmp_socket *sock, u32 offset,
}
static ssize_t hsmp_metric_tbl_plat_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct hsmp_socket *sock;
@@ -97,13 +97,13 @@ static umode_t hsmp_is_sock_attr_visible(struct kobject *kobj,
* is_bin_visible function is used to show / hide the necessary groups.
*/
#define HSMP_BIN_ATTR(index, _list) \
-static struct bin_attribute attr##index = { \
+static const struct bin_attribute attr##index = { \
.attr = { .name = HSMP_METRICS_TABLE_NAME, .mode = 0444}, \
.private = (void *)index, \
- .read = hsmp_metric_tbl_plat_read, \
+ .read_new = hsmp_metric_tbl_plat_read, \
.size = sizeof(struct hsmp_metric_table), \
}; \
-static struct bin_attribute _list[] = { \
+static const struct bin_attribute _list[] = { \
&attr##index, \
NULL \
}
@@ -118,8 +118,8 @@ HSMP_BIN_ATTR(6, *sock6_attr_list);
HSMP_BIN_ATTR(7, *sock7_attr_list);
#define HSMP_BIN_ATTR_GRP(index, _list, _name) \
-static struct attribute_group sock##index##_attr_grp = { \
- .bin_attrs = _list, \
+static const struct attribute_group sock##index##_attr_grp = { \
+ .bin_attrs_new = _list, \
.is_bin_visible = hsmp_is_sock_attr_visible, \
.name = #_name, \
}
diff --git a/drivers/platform/x86/amd/pmc/Makefile b/drivers/platform/x86/amd/pmc/Makefile
index f1d9ab19d24c..255d94ddf999 100644
--- a/drivers/platform/x86/amd/pmc/Makefile
+++ b/drivers/platform/x86/amd/pmc/Makefile
@@ -4,6 +4,6 @@
# AMD Power Management Controller Driver
#
-amd-pmc-objs := pmc.o pmc-quirks.o
+amd-pmc-objs := pmc.o pmc-quirks.o mp1_stb.o
obj-$(CONFIG_AMD_PMC) += amd-pmc.o
amd-pmc-$(CONFIG_AMD_MP2_STB) += mp2_stb.o
diff --git a/drivers/platform/x86/amd/pmc/mp1_stb.c b/drivers/platform/x86/amd/pmc/mp1_stb.c
new file mode 100644
index 000000000000..c005f00988f7
--- /dev/null
+++ b/drivers/platform/x86/amd/pmc/mp1_stb.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD MP1 Smart Trace Buffer (STB) Layer
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ * Sanket Goswami <Sanket.Goswami@amd.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/amd_nb.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+
+#include "pmc.h"
+
+/* STB Spill to DRAM Parameters */
+#define S2D_TELEMETRY_DRAMBYTES_MAX 0x1000000
+#define S2D_TELEMETRY_BYTES_MAX 0x100000U
+#define S2D_RSVD_RAM_SPACE 0x100000
+
+/* STB Registers */
+#define AMD_STB_PMI_0 0x03E30600
+#define AMD_PMC_STB_DUMMY_PC 0xC6000007
+
+/* STB Spill to DRAM Message Definition */
+#define STB_FORCE_FLUSH_DATA 0xCF
+#define FIFO_SIZE 4096
+
+/* STB S2D(Spill to DRAM) has different message port offset */
+#define AMD_S2D_REGISTER_MESSAGE 0xA20
+#define AMD_S2D_REGISTER_RESPONSE 0xA80
+#define AMD_S2D_REGISTER_ARGUMENT 0xA88
+
+/* STB S2D (Spill to DRAM) message port offset for 44h model */
+#define AMD_GNR_REGISTER_MESSAGE 0x524
+#define AMD_GNR_REGISTER_RESPONSE 0x570
+#define AMD_GNR_REGISTER_ARGUMENT 0xA40
+
+static bool enable_stb;
+module_param(enable_stb, bool, 0644);
+MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism");
+
+static bool dump_custom_stb;
+module_param(dump_custom_stb, bool, 0644);
+MODULE_PARM_DESC(dump_custom_stb, "Enable to dump full STB buffer");
+
+enum s2d_arg {
+ S2D_TELEMETRY_SIZE = 0x01,
+ S2D_PHYS_ADDR_LOW,
+ S2D_PHYS_ADDR_HIGH,
+ S2D_NUM_SAMPLES,
+ S2D_DRAM_SIZE,
+};
+
+struct amd_stb_v2_data {
+ size_t size;
+ u8 data[] __counted_by(size);
+};
+
+int amd_stb_write(struct amd_pmc_dev *dev, u32 data)
+{
+ int err;
+
+ err = amd_smn_write(0, AMD_STB_PMI_0, data);
+ if (err) {
+ dev_err(dev->dev, "failed to write data in stb: 0x%X\n", AMD_STB_PMI_0);
+ return pcibios_err_to_errno(err);
+ }
+
+ return 0;
+}
+
+int amd_stb_read(struct amd_pmc_dev *dev, u32 *buf)
+{
+ int i, err;
+
+ for (i = 0; i < FIFO_SIZE; i++) {
+ err = amd_smn_read(0, AMD_STB_PMI_0, buf++);
+ if (err) {
+ dev_err(dev->dev, "error reading data from stb: 0x%X\n", AMD_STB_PMI_0);
+ return pcibios_err_to_errno(err);
+ }
+ }
+
+ return 0;
+}
+
+static int amd_stb_debugfs_open(struct inode *inode, struct file *filp)
+{
+ struct amd_pmc_dev *dev = filp->f_inode->i_private;
+ u32 size = FIFO_SIZE * sizeof(u32);
+ u32 *buf;
+ int rc;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ rc = amd_stb_read(dev, buf);
+ if (rc) {
+ kfree(buf);
+ return rc;
+ }
+
+ filp->private_data = buf;
+ return rc;
+}
+
+static ssize_t amd_stb_debugfs_read(struct file *filp, char __user *buf, size_t size, loff_t *pos)
+{
+ if (!filp->private_data)
+ return -EINVAL;
+
+ return simple_read_from_buffer(buf, size, pos, filp->private_data,
+ FIFO_SIZE * sizeof(u32));
+}
+
+static int amd_stb_debugfs_release(struct inode *inode, struct file *filp)
+{
+ kfree(filp->private_data);
+ return 0;
+}
+
+static const struct file_operations amd_stb_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = amd_stb_debugfs_open,
+ .read = amd_stb_debugfs_read,
+ .release = amd_stb_debugfs_release,
+};
+
+/* Enhanced STB Firmware Reporting Mechanism */
+static int amd_stb_handle_efr(struct file *filp)
+{
+ struct amd_pmc_dev *dev = filp->f_inode->i_private;
+ struct amd_stb_v2_data *stb_data_arr;
+ u32 fsize;
+
+ fsize = dev->dram_size - S2D_RSVD_RAM_SPACE;
+ stb_data_arr = kmalloc(struct_size(stb_data_arr, data, fsize), GFP_KERNEL);
+ if (!stb_data_arr)
+ return -ENOMEM;
+
+ stb_data_arr->size = fsize;
+ memcpy_fromio(stb_data_arr->data, dev->stb_virt_addr, fsize);
+ filp->private_data = stb_data_arr;
+
+ return 0;
+}
+
+static int amd_stb_debugfs_open_v2(struct inode *inode, struct file *filp)
+{
+ struct amd_pmc_dev *dev = filp->f_inode->i_private;
+ u32 fsize, num_samples, val, stb_rdptr_offset = 0;
+ struct amd_stb_v2_data *stb_data_arr;
+ int ret;
+
+ /* Write dummy postcode while reading the STB buffer */
+ ret = amd_stb_write(dev, AMD_PMC_STB_DUMMY_PC);
+ if (ret)
+ dev_err(dev->dev, "error writing to STB: %d\n", ret);
+
+ /* Spill to DRAM num_samples uses separate SMU message port */
+ dev->msg_port = MSG_PORT_S2D;
+
+ ret = amd_pmc_send_cmd(dev, 0, &val, STB_FORCE_FLUSH_DATA, 1);
+ if (ret)
+ dev_dbg_once(dev->dev, "S2D force flush not supported: %d\n", ret);
+
+ /*
+ * We have a custom stb size and the PMFW is supposed to give
+ * the enhanced dram size. Note that we land here only for the
+ * platforms that support enhanced dram size reporting.
+ */
+ if (dump_custom_stb)
+ return amd_stb_handle_efr(filp);
+
+ /* Get the num_samples to calculate the last push location */
+ ret = amd_pmc_send_cmd(dev, S2D_NUM_SAMPLES, &num_samples, dev->stb_arg.s2d_msg_id, true);
+ /* Clear msg_port for other SMU operation */
+ dev->msg_port = MSG_PORT_PMC;
+ if (ret) {
+ dev_err(dev->dev, "error: S2D_NUM_SAMPLES not supported : %d\n", ret);
+ return ret;
+ }
+
+ fsize = min(num_samples, S2D_TELEMETRY_BYTES_MAX);
+ stb_data_arr = kmalloc(struct_size(stb_data_arr, data, fsize), GFP_KERNEL);
+ if (!stb_data_arr)
+ return -ENOMEM;
+
+ stb_data_arr->size = fsize;
+
+ /*
+ * Start capturing data from the last push location.
+ * This is for general cases, where the stb limits
+ * are meant for standard usage.
+ */
+ if (num_samples > S2D_TELEMETRY_BYTES_MAX) {
+ /* First read oldest data starting 1 behind last write till end of ringbuffer */
+ stb_rdptr_offset = num_samples % S2D_TELEMETRY_BYTES_MAX;
+ fsize = S2D_TELEMETRY_BYTES_MAX - stb_rdptr_offset;
+
+ memcpy_fromio(stb_data_arr->data, dev->stb_virt_addr + stb_rdptr_offset, fsize);
+ /* Second copy the newer samples from offset 0 - last write */
+ memcpy_fromio(stb_data_arr->data + fsize, dev->stb_virt_addr, stb_rdptr_offset);
+ } else {
+ memcpy_fromio(stb_data_arr->data, dev->stb_virt_addr, fsize);
+ }
+
+ filp->private_data = stb_data_arr;
+
+ return 0;
+}
+
+static ssize_t amd_stb_debugfs_read_v2(struct file *filp, char __user *buf, size_t size,
+ loff_t *pos)
+{
+ struct amd_stb_v2_data *data = filp->private_data;
+
+ return simple_read_from_buffer(buf, size, pos, data->data, data->size);
+}
+
+static int amd_stb_debugfs_release_v2(struct inode *inode, struct file *filp)
+{
+ kfree(filp->private_data);
+ return 0;
+}
+
+static const struct file_operations amd_stb_debugfs_fops_v2 = {
+ .owner = THIS_MODULE,
+ .open = amd_stb_debugfs_open_v2,
+ .read = amd_stb_debugfs_read_v2,
+ .release = amd_stb_debugfs_release_v2,
+};
+
+static void amd_stb_update_args(struct amd_pmc_dev *dev)
+{
+ if (cpu_feature_enabled(X86_FEATURE_ZEN5))
+ switch (boot_cpu_data.x86_model) {
+ case 0x44:
+ dev->stb_arg.msg = AMD_GNR_REGISTER_MESSAGE;
+ dev->stb_arg.arg = AMD_GNR_REGISTER_ARGUMENT;
+ dev->stb_arg.resp = AMD_GNR_REGISTER_RESPONSE;
+ return;
+ default:
+ break;
+ }
+
+ dev->stb_arg.msg = AMD_S2D_REGISTER_MESSAGE;
+ dev->stb_arg.arg = AMD_S2D_REGISTER_ARGUMENT;
+ dev->stb_arg.resp = AMD_S2D_REGISTER_RESPONSE;
+}
+
+static bool amd_is_stb_supported(struct amd_pmc_dev *dev)
+{
+ switch (dev->cpu_id) {
+ case AMD_CPU_ID_YC:
+ case AMD_CPU_ID_CB:
+ if (boot_cpu_data.x86_model == 0x44)
+ dev->stb_arg.s2d_msg_id = 0x9B;
+ else
+ dev->stb_arg.s2d_msg_id = 0xBE;
+ break;
+ case AMD_CPU_ID_PS:
+ dev->stb_arg.s2d_msg_id = 0x85;
+ break;
+ case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
+ case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT:
+ if (boot_cpu_data.x86_model == 0x70)
+ dev->stb_arg.s2d_msg_id = 0xF1;
+ else
+ dev->stb_arg.s2d_msg_id = 0xDE;
+ break;
+ default:
+ return false;
+ }
+
+ amd_stb_update_args(dev);
+ return true;
+}
+
+int amd_stb_s2d_init(struct amd_pmc_dev *dev)
+{
+ u32 phys_addr_low, phys_addr_hi;
+ u64 stb_phys_addr;
+ u32 size = 0;
+ int ret;
+
+ if (!enable_stb)
+ return 0;
+
+ if (amd_is_stb_supported(dev)) {
+ debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev,
+ &amd_stb_debugfs_fops_v2);
+ } else {
+ debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev,
+ &amd_stb_debugfs_fops);
+ return 0;
+ }
+
+ /* Spill to DRAM feature uses separate SMU message port */
+ dev->msg_port = MSG_PORT_S2D;
+
+ amd_pmc_send_cmd(dev, S2D_TELEMETRY_SIZE, &size, dev->stb_arg.s2d_msg_id, true);
+ if (size != S2D_TELEMETRY_BYTES_MAX)
+ return -EIO;
+
+ /* Get DRAM size */
+ ret = amd_pmc_send_cmd(dev, S2D_DRAM_SIZE, &dev->dram_size, dev->stb_arg.s2d_msg_id, true);
+ if (ret || !dev->dram_size)
+ dev->dram_size = S2D_TELEMETRY_DRAMBYTES_MAX;
+
+ /* Get STB DRAM address */
+ amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_LOW, &phys_addr_low, dev->stb_arg.s2d_msg_id, true);
+ amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_HIGH, &phys_addr_hi, dev->stb_arg.s2d_msg_id, true);
+
+ stb_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
+
+ /* Clear msg_port for other SMU operation */
+ dev->msg_port = MSG_PORT_PMC;
+
+ dev->stb_virt_addr = devm_ioremap(dev->dev, stb_phys_addr, dev->dram_size);
+ if (!dev->stb_virt_addr)
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
index 87b064e8ca5a..e6124498b195 100644
--- a/drivers/platform/x86/amd/pmc/pmc.c
+++ b/drivers/platform/x86/amd/pmc/pmc.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
+#include <linux/array_size.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/debugfs.h>
@@ -41,24 +42,9 @@
#define AMD_PMC_SCRATCH_REG_1AH 0xF14
/* STB Registers */
-#define AMD_PMC_STB_PMI_0 0x03E30600
#define AMD_PMC_STB_S2IDLE_PREPARE 0xC6000001
#define AMD_PMC_STB_S2IDLE_RESTORE 0xC6000002
#define AMD_PMC_STB_S2IDLE_CHECK 0xC6000003
-#define AMD_PMC_STB_DUMMY_PC 0xC6000007
-
-/* STB S2D(Spill to DRAM) has different message port offset */
-#define AMD_S2D_REGISTER_MESSAGE 0xA20
-#define AMD_S2D_REGISTER_RESPONSE 0xA80
-#define AMD_S2D_REGISTER_ARGUMENT 0xA88
-
-/* STB Spill to DRAM Parameters */
-#define S2D_TELEMETRY_BYTES_MAX 0x100000U
-#define S2D_RSVD_RAM_SPACE 0x100000
-#define S2D_TELEMETRY_DRAMBYTES_MAX 0x1000000
-
-/* STB Spill to DRAM Message Definition */
-#define STB_FORCE_FLUSH_DATA 0xCF
/* Base address of SMU for mapping physical address to virtual address */
#define AMD_PMC_MAPPING_SIZE 0x01000
@@ -98,7 +84,6 @@
#define DELAY_MIN_US 2000
#define DELAY_MAX_US 3000
-#define FIFO_SIZE 4096
enum amd_pmc_def {
MSG_TEST = 0x01,
@@ -106,24 +91,39 @@ enum amd_pmc_def {
MSG_OS_HINT_RN,
};
-enum s2d_arg {
- S2D_TELEMETRY_SIZE = 0x01,
- S2D_PHYS_ADDR_LOW,
- S2D_PHYS_ADDR_HIGH,
- S2D_NUM_SAMPLES,
- S2D_DRAM_SIZE,
-};
-
-struct amd_pmc_stb_v2_data {
- size_t size;
- u8 data[] __counted_by(size);
-};
-
struct amd_pmc_bit_map {
const char *name;
u32 bit_mask;
};
+static const struct amd_pmc_bit_map soc15_ip_blk_v2[] = {
+ {"DISPLAY", BIT(0)},
+ {"CPU", BIT(1)},
+ {"GFX", BIT(2)},
+ {"VDD", BIT(3)},
+ {"VDD_CCX", BIT(4)},
+ {"ACP", BIT(5)},
+ {"VCN_0", BIT(6)},
+ {"VCN_1", BIT(7)},
+ {"ISP", BIT(8)},
+ {"NBIO", BIT(9)},
+ {"DF", BIT(10)},
+ {"USB3_0", BIT(11)},
+ {"USB3_1", BIT(12)},
+ {"LAPIC", BIT(13)},
+ {"USB3_2", BIT(14)},
+ {"USB4_RT0", BIT(15)},
+ {"USB4_RT1", BIT(16)},
+ {"USB4_0", BIT(17)},
+ {"USB4_1", BIT(18)},
+ {"MPM", BIT(19)},
+ {"JPEG_0", BIT(20)},
+ {"JPEG_1", BIT(21)},
+ {"IPU", BIT(22)},
+ {"UMSCH", BIT(23)},
+ {"VPE", BIT(24)},
+};
+
static const struct amd_pmc_bit_map soc15_ip_blk[] = {
{"DISPLAY", BIT(0)},
{"CPU", BIT(1)},
@@ -147,25 +147,13 @@ static const struct amd_pmc_bit_map soc15_ip_blk[] = {
{"IPU", BIT(19)},
{"UMSCH", BIT(20)},
{"VPE", BIT(21)},
- {}
};
-static bool enable_stb;
-module_param(enable_stb, bool, 0644);
-MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism");
-
static bool disable_workarounds;
module_param(disable_workarounds, bool, 0644);
MODULE_PARM_DESC(disable_workarounds, "Disable workarounds for platform bugs");
-static bool dump_custom_stb;
-module_param(dump_custom_stb, bool, 0644);
-MODULE_PARM_DESC(dump_custom_stb, "Enable to dump full STB buffer");
-
static struct amd_pmc_dev pmc;
-static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret);
-static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf);
-static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data);
static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
{
@@ -194,155 +182,6 @@ struct smu_metrics {
u64 timecondition_notmet_totaltime[32];
} __packed;
-static int amd_pmc_stb_debugfs_open(struct inode *inode, struct file *filp)
-{
- struct amd_pmc_dev *dev = filp->f_inode->i_private;
- u32 size = FIFO_SIZE * sizeof(u32);
- u32 *buf;
- int rc;
-
- buf = kzalloc(size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- rc = amd_pmc_read_stb(dev, buf);
- if (rc) {
- kfree(buf);
- return rc;
- }
-
- filp->private_data = buf;
- return rc;
-}
-
-static ssize_t amd_pmc_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
- loff_t *pos)
-{
- if (!filp->private_data)
- return -EINVAL;
-
- return simple_read_from_buffer(buf, size, pos, filp->private_data,
- FIFO_SIZE * sizeof(u32));
-}
-
-static int amd_pmc_stb_debugfs_release(struct inode *inode, struct file *filp)
-{
- kfree(filp->private_data);
- return 0;
-}
-
-static const struct file_operations amd_pmc_stb_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = amd_pmc_stb_debugfs_open,
- .read = amd_pmc_stb_debugfs_read,
- .release = amd_pmc_stb_debugfs_release,
-};
-
-/* Enhanced STB Firmware Reporting Mechanism */
-static int amd_pmc_stb_handle_efr(struct file *filp)
-{
- struct amd_pmc_dev *dev = filp->f_inode->i_private;
- struct amd_pmc_stb_v2_data *stb_data_arr;
- u32 fsize;
-
- fsize = dev->dram_size - S2D_RSVD_RAM_SPACE;
- stb_data_arr = kmalloc(struct_size(stb_data_arr, data, fsize), GFP_KERNEL);
- if (!stb_data_arr)
- return -ENOMEM;
-
- stb_data_arr->size = fsize;
- memcpy_fromio(stb_data_arr->data, dev->stb_virt_addr, fsize);
- filp->private_data = stb_data_arr;
-
- return 0;
-}
-
-static int amd_pmc_stb_debugfs_open_v2(struct inode *inode, struct file *filp)
-{
- struct amd_pmc_dev *dev = filp->f_inode->i_private;
- u32 fsize, num_samples, val, stb_rdptr_offset = 0;
- struct amd_pmc_stb_v2_data *stb_data_arr;
- int ret;
-
- /* Write dummy postcode while reading the STB buffer */
- ret = amd_pmc_write_stb(dev, AMD_PMC_STB_DUMMY_PC);
- if (ret)
- dev_err(dev->dev, "error writing to STB: %d\n", ret);
-
- /* Spill to DRAM num_samples uses separate SMU message port */
- dev->msg_port = 1;
-
- ret = amd_pmc_send_cmd(dev, 0, &val, STB_FORCE_FLUSH_DATA, 1);
- if (ret)
- dev_dbg_once(dev->dev, "S2D force flush not supported: %d\n", ret);
-
- /*
- * We have a custom stb size and the PMFW is supposed to give
- * the enhanced dram size. Note that we land here only for the
- * platforms that support enhanced dram size reporting.
- */
- if (dump_custom_stb)
- return amd_pmc_stb_handle_efr(filp);
-
- /* Get the num_samples to calculate the last push location */
- ret = amd_pmc_send_cmd(dev, S2D_NUM_SAMPLES, &num_samples, dev->s2d_msg_id, true);
- /* Clear msg_port for other SMU operation */
- dev->msg_port = 0;
- if (ret) {
- dev_err(dev->dev, "error: S2D_NUM_SAMPLES not supported : %d\n", ret);
- return ret;
- }
-
- fsize = min(num_samples, S2D_TELEMETRY_BYTES_MAX);
- stb_data_arr = kmalloc(struct_size(stb_data_arr, data, fsize), GFP_KERNEL);
- if (!stb_data_arr)
- return -ENOMEM;
-
- stb_data_arr->size = fsize;
-
- /*
- * Start capturing data from the last push location.
- * This is for general cases, where the stb limits
- * are meant for standard usage.
- */
- if (num_samples > S2D_TELEMETRY_BYTES_MAX) {
- /* First read oldest data starting 1 behind last write till end of ringbuffer */
- stb_rdptr_offset = num_samples % S2D_TELEMETRY_BYTES_MAX;
- fsize = S2D_TELEMETRY_BYTES_MAX - stb_rdptr_offset;
-
- memcpy_fromio(stb_data_arr->data, dev->stb_virt_addr + stb_rdptr_offset, fsize);
- /* Second copy the newer samples from offset 0 - last write */
- memcpy_fromio(stb_data_arr->data + fsize, dev->stb_virt_addr, stb_rdptr_offset);
- } else {
- memcpy_fromio(stb_data_arr->data, dev->stb_virt_addr, fsize);
- }
-
- filp->private_data = stb_data_arr;
-
- return 0;
-}
-
-static ssize_t amd_pmc_stb_debugfs_read_v2(struct file *filp, char __user *buf, size_t size,
- loff_t *pos)
-{
- struct amd_pmc_stb_v2_data *data = filp->private_data;
-
- return simple_read_from_buffer(buf, size, pos, data->data, data->size);
-}
-
-static int amd_pmc_stb_debugfs_release_v2(struct inode *inode, struct file *filp)
-{
- kfree(filp->private_data);
- return 0;
-}
-
-static const struct file_operations amd_pmc_stb_debugfs_fops_v2 = {
- .owner = THIS_MODULE,
- .open = amd_pmc_stb_debugfs_open_v2,
- .read = amd_pmc_stb_debugfs_read_v2,
- .release = amd_pmc_stb_debugfs_release_v2,
-};
-
static void amd_pmc_get_ip_info(struct amd_pmc_dev *dev)
{
switch (dev->cpu_id) {
@@ -351,18 +190,23 @@ static void amd_pmc_get_ip_info(struct amd_pmc_dev *dev)
case AMD_CPU_ID_YC:
case AMD_CPU_ID_CB:
dev->num_ips = 12;
- dev->s2d_msg_id = 0xBE;
+ dev->ips_ptr = soc15_ip_blk;
dev->smu_msg = 0x538;
break;
case AMD_CPU_ID_PS:
dev->num_ips = 21;
- dev->s2d_msg_id = 0x85;
+ dev->ips_ptr = soc15_ip_blk;
dev->smu_msg = 0x538;
break;
case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT:
- dev->num_ips = 22;
- dev->s2d_msg_id = 0xDE;
+ if (boot_cpu_data.x86_model == 0x70) {
+ dev->num_ips = ARRAY_SIZE(soc15_ip_blk_v2);
+ dev->ips_ptr = soc15_ip_blk_v2;
+ } else {
+ dev->num_ips = ARRAY_SIZE(soc15_ip_blk);
+ dev->ips_ptr = soc15_ip_blk;
+ }
dev->smu_msg = 0x938;
break;
}
@@ -530,8 +374,8 @@ static int smu_fw_info_show(struct seq_file *s, void *unused)
seq_puts(s, "\n=== Active time (in us) ===\n");
for (idx = 0 ; idx < dev->num_ips ; idx++) {
- if (soc15_ip_blk[idx].bit_mask & dev->active_ips)
- seq_printf(s, "%-8s : %lld\n", soc15_ip_blk[idx].name,
+ if (dev->ips_ptr[idx].bit_mask & dev->active_ips)
+ seq_printf(s, "%-8s : %lld\n", dev->ips_ptr[idx].name,
table.timecondition_notmet_lastcapture[idx]);
}
@@ -626,20 +470,6 @@ static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
debugfs_remove_recursive(dev->dbgfs_dir);
}
-static bool amd_pmc_is_stb_supported(struct amd_pmc_dev *dev)
-{
- switch (dev->cpu_id) {
- case AMD_CPU_ID_YC:
- case AMD_CPU_ID_CB:
- case AMD_CPU_ID_PS:
- case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
- case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT:
- return true;
- default:
- return false;
- }
-}
-
static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
{
dev->dbgfs_dir = debugfs_create_dir("amd_pmc", NULL);
@@ -649,14 +479,17 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
&s0ix_stats_fops);
debugfs_create_file("amd_pmc_idlemask", 0644, dev->dbgfs_dir, dev,
&amd_pmc_idlemask_fops);
- /* Enable STB only when the module_param is set */
- if (enable_stb) {
- if (amd_pmc_is_stb_supported(dev))
- debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev,
- &amd_pmc_stb_debugfs_fops_v2);
- else
- debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev,
- &amd_pmc_stb_debugfs_fops);
+}
+
+static char *amd_pmc_get_msg_port(struct amd_pmc_dev *dev)
+{
+ switch (dev->msg_port) {
+ case MSG_PORT_PMC:
+ return "PMC";
+ case MSG_PORT_S2D:
+ return "S2D";
+ default:
+ return "Invalid message port";
}
}
@@ -664,10 +497,10 @@ static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
{
u32 value, message, argument, response;
- if (dev->msg_port) {
- message = AMD_S2D_REGISTER_MESSAGE;
- argument = AMD_S2D_REGISTER_ARGUMENT;
- response = AMD_S2D_REGISTER_RESPONSE;
+ if (dev->msg_port == MSG_PORT_S2D) {
+ message = dev->stb_arg.msg;
+ argument = dev->stb_arg.arg;
+ response = dev->stb_arg.resp;
} else {
message = dev->smu_msg;
argument = AMD_PMC_REGISTER_ARGUMENT;
@@ -675,26 +508,26 @@ static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
}
value = amd_pmc_reg_read(dev, response);
- dev_dbg(dev->dev, "AMD_%s_REGISTER_RESPONSE:%x\n", dev->msg_port ? "S2D" : "PMC", value);
+ dev_dbg(dev->dev, "AMD_%s_REGISTER_RESPONSE:%x\n", amd_pmc_get_msg_port(dev), value);
value = amd_pmc_reg_read(dev, argument);
- dev_dbg(dev->dev, "AMD_%s_REGISTER_ARGUMENT:%x\n", dev->msg_port ? "S2D" : "PMC", value);
+ dev_dbg(dev->dev, "AMD_%s_REGISTER_ARGUMENT:%x\n", amd_pmc_get_msg_port(dev), value);
value = amd_pmc_reg_read(dev, message);
- dev_dbg(dev->dev, "AMD_%s_REGISTER_MESSAGE:%x\n", dev->msg_port ? "S2D" : "PMC", value);
+ dev_dbg(dev->dev, "AMD_%s_REGISTER_MESSAGE:%x\n", amd_pmc_get_msg_port(dev), value);
}
-static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret)
+int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret)
{
int rc;
u32 val, message, argument, response;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
- if (dev->msg_port) {
- message = AMD_S2D_REGISTER_MESSAGE;
- argument = AMD_S2D_REGISTER_ARGUMENT;
- response = AMD_S2D_REGISTER_RESPONSE;
+ if (dev->msg_port == MSG_PORT_S2D) {
+ message = dev->stb_arg.msg;
+ argument = dev->stb_arg.arg;
+ response = dev->stb_arg.resp;
} else {
message = dev->smu_msg;
argument = AMD_PMC_REGISTER_ARGUMENT;
@@ -707,7 +540,7 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg,
PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
if (rc) {
dev_err(dev->dev, "failed to talk to SMU\n");
- goto out_unlock;
+ return rc;
}
/* Write zero to response register */
@@ -725,7 +558,7 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg,
PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
if (rc) {
dev_err(dev->dev, "SMU response timed out\n");
- goto out_unlock;
+ return rc;
}
switch (val) {
@@ -739,21 +572,19 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg,
case AMD_PMC_RESULT_CMD_REJECT_BUSY:
dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
rc = -EBUSY;
- goto out_unlock;
+ break;
case AMD_PMC_RESULT_CMD_UNKNOWN:
dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
rc = -EINVAL;
- goto out_unlock;
+ break;
case AMD_PMC_RESULT_CMD_REJECT_PREREQ:
case AMD_PMC_RESULT_FAILED:
default:
dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
rc = -EIO;
- goto out_unlock;
+ break;
}
-out_unlock:
- mutex_unlock(&dev->lock);
amd_pmc_dump_registers(dev);
return rc;
}
@@ -882,7 +713,7 @@ static void amd_pmc_s2idle_prepare(void)
return;
}
- rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_PREPARE);
+ rc = amd_stb_write(pdev, AMD_PMC_STB_S2IDLE_PREPARE);
if (rc)
dev_err(pdev->dev, "error writing to STB: %d\n", rc);
}
@@ -901,7 +732,7 @@ static void amd_pmc_s2idle_check(void)
/* Dump the IdleMask before we add to the STB */
amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
- rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_CHECK);
+ rc = amd_stb_write(pdev, AMD_PMC_STB_S2IDLE_CHECK);
if (rc)
dev_err(pdev->dev, "error writing to STB: %d\n", rc);
}
@@ -928,7 +759,7 @@ static void amd_pmc_s2idle_restore(void)
/* Let SMU know that we are looking for stats */
amd_pmc_dump_data(pdev);
- rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_RESTORE);
+ rc = amd_stb_write(pdev, AMD_PMC_STB_S2IDLE_RESTORE);
if (rc)
dev_err(pdev->dev, "error writing to STB: %d\n", rc);
@@ -982,74 +813,6 @@ static const struct pci_device_id pmc_pci_ids[] = {
{ }
};
-static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
-{
- u32 phys_addr_low, phys_addr_hi;
- u64 stb_phys_addr;
- u32 size = 0;
- int ret;
-
- /* Spill to DRAM feature uses separate SMU message port */
- dev->msg_port = 1;
-
- amd_pmc_send_cmd(dev, S2D_TELEMETRY_SIZE, &size, dev->s2d_msg_id, true);
- if (size != S2D_TELEMETRY_BYTES_MAX)
- return -EIO;
-
- /* Get DRAM size */
- ret = amd_pmc_send_cmd(dev, S2D_DRAM_SIZE, &dev->dram_size, dev->s2d_msg_id, true);
- if (ret || !dev->dram_size)
- dev->dram_size = S2D_TELEMETRY_DRAMBYTES_MAX;
-
- /* Get STB DRAM address */
- amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_LOW, &phys_addr_low, dev->s2d_msg_id, true);
- amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_HIGH, &phys_addr_hi, dev->s2d_msg_id, true);
-
- if (!phys_addr_hi && !phys_addr_low) {
- dev_err(dev->dev, "STB is not enabled on the system; disable enable_stb or contact system vendor\n");
- return -EINVAL;
- }
-
- stb_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
-
- /* Clear msg_port for other SMU operation */
- dev->msg_port = 0;
-
- dev->stb_virt_addr = devm_ioremap(dev->dev, stb_phys_addr, dev->dram_size);
- if (!dev->stb_virt_addr)
- return -ENOMEM;
-
- return 0;
-}
-
-static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
-{
- int err;
-
- err = amd_smn_write(0, AMD_PMC_STB_PMI_0, data);
- if (err) {
- dev_err(dev->dev, "failed to write data in stb: 0x%X\n", AMD_PMC_STB_PMI_0);
- return pcibios_err_to_errno(err);
- }
-
- return 0;
-}
-
-static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf)
-{
- int i, err;
-
- for (i = 0; i < FIFO_SIZE; i++) {
- err = amd_smn_read(0, AMD_PMC_STB_PMI_0, buf++);
- if (err) {
- dev_err(dev->dev, "error reading data from stb: 0x%X\n", AMD_PMC_STB_PMI_0);
- return pcibios_err_to_errno(err);
- }
- }
-
- return 0;
-}
-
static int amd_pmc_probe(struct platform_device *pdev)
{
struct amd_pmc_dev *dev = &pmc;
@@ -1107,12 +870,6 @@ static int amd_pmc_probe(struct platform_device *pdev)
/* Get num of IP blocks within the SoC */
amd_pmc_get_ip_info(dev);
- if (enable_stb && amd_pmc_is_stb_supported(dev)) {
- err = amd_pmc_s2d_init(dev);
- if (err)
- goto err_pci_dev_put;
- }
-
platform_set_drvdata(pdev, dev);
if (IS_ENABLED(CONFIG_SUSPEND)) {
err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops);
@@ -1123,6 +880,10 @@ static int amd_pmc_probe(struct platform_device *pdev)
}
amd_pmc_dbgfs_register(dev);
+ err = amd_stb_s2d_init(dev);
+ if (err)
+ goto err_pci_dev_put;
+
if (IS_ENABLED(CONFIG_AMD_MP2_STB))
amd_mp2_stb_init(dev);
pm_report_max_hw_sleep(U64_MAX);
diff --git a/drivers/platform/x86/amd/pmc/pmc.h b/drivers/platform/x86/amd/pmc/pmc.h
index f1166d15c856..f43f0253b0f5 100644
--- a/drivers/platform/x86/amd/pmc/pmc.h
+++ b/drivers/platform/x86/amd/pmc/pmc.h
@@ -14,6 +14,11 @@
#include <linux/types.h>
#include <linux/mutex.h>
+enum s2d_msg_port {
+ MSG_PORT_PMC,
+ MSG_PORT_S2D,
+};
+
struct amd_mp2_dev {
void __iomem *mmio;
void __iomem *vslbase;
@@ -25,24 +30,31 @@ struct amd_mp2_dev {
bool is_stb_data;
};
+struct stb_arg {
+ u32 s2d_msg_id;
+ u32 msg;
+ u32 arg;
+ u32 resp;
+};
+
struct amd_pmc_dev {
void __iomem *regbase;
void __iomem *smu_virt_addr;
void __iomem *stb_virt_addr;
void __iomem *fch_virt_addr;
- bool msg_port;
u32 base_addr;
u32 cpu_id;
- u32 active_ips;
u32 dram_size;
+ u32 active_ips;
+ const struct amd_pmc_bit_map *ips_ptr;
u32 num_ips;
- u32 s2d_msg_id;
u32 smu_msg;
/* SMU version information */
u8 smu_program;
u8 major;
u8 minor;
u8 rev;
+ u8 msg_port;
struct device *dev;
struct pci_dev *rdev;
struct mutex lock; /* generic mutex lock */
@@ -50,6 +62,7 @@ struct amd_pmc_dev {
struct quirk_entry *quirks;
bool disable_8042_wakeup;
struct amd_mp2_dev *mp2;
+ struct stb_arg stb_arg;
};
void amd_pmc_process_restore_quirks(struct amd_pmc_dev *dev);
@@ -70,4 +83,9 @@ void amd_mp2_stb_deinit(struct amd_pmc_dev *dev);
#define PCI_DEVICE_ID_AMD_1AH_M60H_ROOT 0x1122
#define PCI_DEVICE_ID_AMD_MP2_STB 0x172c
+int amd_stb_s2d_init(struct amd_pmc_dev *dev);
+int amd_stb_read(struct amd_pmc_dev *dev, u32 *buf);
+int amd_stb_write(struct amd_pmc_dev *dev, u32 data);
+int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret);
+
#endif /* PMC_H */
diff --git a/drivers/platform/x86/amd/pmf/Makefile b/drivers/platform/x86/amd/pmf/Makefile
index 7d6079b02589..6b26e48ce8ad 100644
--- a/drivers/platform/x86/amd/pmf/Makefile
+++ b/drivers/platform/x86/amd/pmf/Makefile
@@ -7,4 +7,4 @@
obj-$(CONFIG_AMD_PMF) += amd-pmf.o
amd-pmf-objs := core.o acpi.o sps.o \
auto-mode.o cnqf.o \
- tee-if.o spc.o pmf-quirks.o
+ tee-if.o spc.o
diff --git a/drivers/platform/x86/amd/pmf/acpi.c b/drivers/platform/x86/amd/pmf/acpi.c
index 1b9c7acf0ddf..dd5780a1d06e 100644
--- a/drivers/platform/x86/amd/pmf/acpi.c
+++ b/drivers/platform/x86/amd/pmf/acpi.c
@@ -321,17 +321,29 @@ int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req
req, sizeof(*req));
}
+static void apmf_event_handler_v2(acpi_handle handle, u32 event, void *data)
+{
+ struct amd_pmf_dev *pmf_dev = data;
+ int ret;
+
+ guard(mutex)(&pmf_dev->cb_mutex);
+
+ ret = apmf_get_sbios_requests_v2(pmf_dev, &pmf_dev->req);
+ if (ret)
+ dev_err(pmf_dev->dev, "Failed to get v2 SBIOS requests: %d\n", ret);
+}
+
static void apmf_event_handler(acpi_handle handle, u32 event, void *data)
{
struct amd_pmf_dev *pmf_dev = data;
struct apmf_sbios_req req;
int ret;
- mutex_lock(&pmf_dev->update_mutex);
+ guard(mutex)(&pmf_dev->update_mutex);
ret = apmf_get_sbios_requests(pmf_dev, &req);
if (ret) {
dev_err(pmf_dev->dev, "Failed to get SBIOS requests:%d\n", ret);
- goto out;
+ return;
}
if (req.pending_req & BIT(APMF_AMT_NOTIFICATION)) {
@@ -353,8 +365,6 @@ static void apmf_event_handler(acpi_handle handle, u32 event, void *data)
if (pmf_dev->amt_enabled)
amd_pmf_update_2_cql(pmf_dev, req.cql_event);
}
-out:
- mutex_unlock(&pmf_dev->update_mutex);
}
static int apmf_if_verify_interface(struct amd_pmf_dev *pdev)
@@ -430,6 +440,15 @@ int apmf_install_handler(struct amd_pmf_dev *pmf_dev)
apmf_event_handler(ahandle, 0, pmf_dev);
}
+ if (pmf_dev->smart_pc_enabled && pmf_dev->pmf_if_version == PMF_IF_V2) {
+ status = acpi_install_notify_handler(ahandle, ACPI_ALL_NOTIFY,
+ apmf_event_handler_v2, pmf_dev);
+ if (ACPI_FAILURE(status)) {
+ dev_err(pmf_dev->dev, "failed to install notify handler for custom BIOS inputs\n");
+ return -ENODEV;
+ }
+ }
+
return 0;
}
@@ -480,6 +499,9 @@ void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev)
if (is_apmf_func_supported(pmf_dev, APMF_FUNC_AUTO_MODE) &&
is_apmf_func_supported(pmf_dev, APMF_FUNC_SBIOS_REQUESTS))
acpi_remove_notify_handler(ahandle, ACPI_ALL_NOTIFY, apmf_event_handler);
+
+ if (pmf_dev->smart_pc_enabled && pmf_dev->pmf_if_version == PMF_IF_V2)
+ acpi_remove_notify_handler(ahandle, ACPI_ALL_NOTIFY, apmf_event_handler_v2);
}
int apmf_acpi_init(struct amd_pmf_dev *pmf_dev)
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index 7f88f3121cf5..764cc1fe90ae 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -127,7 +127,8 @@ static void amd_pmf_get_metrics(struct work_struct *work)
ktime_t time_elapsed_ms;
int socket_power;
- mutex_lock(&dev->update_mutex);
+ guard(mutex)(&dev->update_mutex);
+
/* Transfer table contents */
memset(dev->buf, 0, sizeof(dev->m_table));
amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL);
@@ -149,7 +150,6 @@ static void amd_pmf_get_metrics(struct work_struct *work)
dev->start_time = ktime_to_ms(ktime_get());
schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms));
- mutex_unlock(&dev->update_mutex);
}
static inline u32 amd_pmf_reg_read(struct amd_pmf_dev *dev, int reg_offset)
@@ -181,7 +181,7 @@ int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32
int rc;
u32 val;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
/* Wait until we get a valid response */
rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
@@ -189,7 +189,7 @@ int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32
PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
if (rc) {
dev_err(dev->dev, "failed to talk to SMU\n");
- goto out_unlock;
+ return rc;
}
/* Write zero to response register */
@@ -207,7 +207,7 @@ int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32
PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
if (rc) {
dev_err(dev->dev, "SMU response timed out\n");
- goto out_unlock;
+ return rc;
}
switch (val) {
@@ -221,21 +221,19 @@ int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32
case AMD_PMF_RESULT_CMD_REJECT_BUSY:
dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
rc = -EBUSY;
- goto out_unlock;
+ break;
case AMD_PMF_RESULT_CMD_UNKNOWN:
dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
rc = -EINVAL;
- goto out_unlock;
+ break;
case AMD_PMF_RESULT_CMD_REJECT_PREREQ:
case AMD_PMF_RESULT_FAILED:
default:
dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
rc = -EIO;
- goto out_unlock;
+ break;
}
-out_unlock:
- mutex_unlock(&dev->lock);
amd_pmf_dump_registers(dev);
return rc;
}
@@ -373,7 +371,6 @@ static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
power_supply_unreg_notifier(&dev->pwr_src_notifier);
- amd_pmf_deinit_sps(dev);
}
if (dev->smart_pc_enabled) {
@@ -456,7 +453,6 @@ static int amd_pmf_probe(struct platform_device *pdev)
mutex_init(&dev->lock);
mutex_init(&dev->update_mutex);
- amd_pmf_quirks_init(dev);
apmf_acpi_init(dev);
platform_set_drvdata(pdev, dev);
amd_pmf_dbgfs_register(dev);
diff --git a/drivers/platform/x86/amd/pmf/pmf-quirks.c b/drivers/platform/x86/amd/pmf/pmf-quirks.c
deleted file mode 100644
index 7cde5733b9ca..000000000000
--- a/drivers/platform/x86/amd/pmf/pmf-quirks.c
+++ /dev/null
@@ -1,66 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * AMD Platform Management Framework Driver Quirks
- *
- * Copyright (c) 2024, Advanced Micro Devices, Inc.
- * All Rights Reserved.
- *
- * Author: Mario Limonciello <mario.limonciello@amd.com>
- */
-
-#include <linux/dmi.h>
-
-#include "pmf.h"
-
-struct quirk_entry {
- u32 supported_func;
-};
-
-static struct quirk_entry quirk_no_sps_bug = {
- .supported_func = 0x4003,
-};
-
-static const struct dmi_system_id fwbug_list[] = {
- {
- .ident = "ROG Zephyrus G14",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "GA403U"),
- },
- .driver_data = &quirk_no_sps_bug,
- },
- {
- .ident = "ROG Ally X",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "RC72LA"),
- },
- .driver_data = &quirk_no_sps_bug,
- },
- {
- .ident = "ASUS TUF Gaming A14",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "FA401W"),
- },
- .driver_data = &quirk_no_sps_bug,
- },
- {}
-};
-
-void amd_pmf_quirks_init(struct amd_pmf_dev *dev)
-{
- const struct dmi_system_id *dmi_id;
- struct quirk_entry *quirks;
-
- dmi_id = dmi_first_match(fwbug_list);
- if (!dmi_id)
- return;
-
- quirks = dmi_id->driver_data;
- if (quirks->supported_func) {
- dev->supported_func = quirks->supported_func;
- pr_info("Using supported funcs quirk to avoid %s platform firmware bug\n",
- dmi_id->ident);
- }
-}
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index a79808fda1d8..41b2b91b8fdc 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -338,7 +338,7 @@ struct amd_pmf_dev {
struct mutex lock; /* protects the PMF interface */
u32 supported_func;
enum platform_profile_option current_profile;
- struct platform_profile_handler pprof;
+ struct device *ppdev; /* platform profile class device */
struct dentry *dbgfs_dir;
int hb_interval; /* SBIOS heartbeat interval */
struct delayed_work heart_beat;
@@ -370,6 +370,8 @@ struct amd_pmf_dev {
struct input_dev *pmf_idev;
size_t mtable_size;
struct resource *res;
+ struct apmf_sbios_req_v2 req; /* To get custom bios pending request */
+ struct mutex cb_mutex;
};
struct apmf_sps_prop_granular_v2 {
@@ -616,6 +618,30 @@ enum ta_slider {
TA_MAX,
};
+enum apmf_smartpc_custom_bios_inputs {
+ APMF_SMARTPC_CUSTOM_BIOS_INPUT1,
+ APMF_SMARTPC_CUSTOM_BIOS_INPUT2,
+};
+
+enum apmf_preq_smartpc {
+ NOTIFY_CUSTOM_BIOS_INPUT1 = 5,
+ NOTIFY_CUSTOM_BIOS_INPUT2,
+};
+
+enum platform_type {
+ PTYPE_UNKNOWN = 0,
+ LID_CLOSE,
+ CLAMSHELL,
+ FLAT,
+ TENT,
+ STAND,
+ TABLET,
+ BOOK,
+ PRESENTATION,
+ PULL_FWD,
+ PTYPE_INVALID = 0xf,
+};
+
/* Command ids for TA communication */
enum ta_pmf_command {
TA_PMF_COMMAND_POLICY_BUILDER_INITIALIZE,
@@ -657,7 +683,8 @@ struct ta_pmf_condition_info {
u32 power_slider;
u32 lid_state;
bool user_present;
- u32 rsvd1[2];
+ u32 bios_input1;
+ u32 bios_input2;
u32 monitor_count;
u32 rsvd2[2];
u32 bat_design;
@@ -667,7 +694,9 @@ struct ta_pmf_condition_info {
u32 device_state;
u32 socket_power;
u32 skin_temperature;
- u32 rsvd3[5];
+ u32 rsvd3[2];
+ u32 platform_type;
+ u32 rsvd3_1[2];
u32 ambient_light;
u32 length;
u32 avg_c0residency;
@@ -751,7 +780,6 @@ int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf);
void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
struct amd_pmf_static_slider_granular *table);
int amd_pmf_init_sps(struct amd_pmf_dev *dev);
-void amd_pmf_deinit_sps(struct amd_pmf_dev *dev);
int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
struct apmf_static_slider_granular_output *output);
bool is_pprof_balanced(struct amd_pmf_dev *pmf);
@@ -797,7 +825,4 @@ int amd_pmf_smartpc_apply_bios_output(struct amd_pmf_dev *dev, u32 val, u32 preq
void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
-/* Quirk infrastructure */
-void amd_pmf_quirks_init(struct amd_pmf_dev *dev);
-
#endif /* PMF_H */
diff --git a/drivers/platform/x86/amd/pmf/spc.c b/drivers/platform/x86/amd/pmf/spc.c
index 06226eb0eab3..f34f3130c330 100644
--- a/drivers/platform/x86/amd/pmf/spc.c
+++ b/drivers/platform/x86/amd/pmf/spc.c
@@ -16,6 +16,46 @@
#include "pmf.h"
#ifdef CONFIG_AMD_PMF_DEBUG
+static const char *platform_type_as_str(u16 platform_type)
+{
+ switch (platform_type) {
+ case CLAMSHELL:
+ return "CLAMSHELL";
+ case FLAT:
+ return "FLAT";
+ case TENT:
+ return "TENT";
+ case STAND:
+ return "STAND";
+ case TABLET:
+ return "TABLET";
+ case BOOK:
+ return "BOOK";
+ case PRESENTATION:
+ return "PRESENTATION";
+ case PULL_FWD:
+ return "PULL_FWD";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static const char *laptop_placement_as_str(u16 device_state)
+{
+ switch (device_state) {
+ case ON_TABLE:
+ return "ON_TABLE";
+ case ON_LAP_MOTION:
+ return "ON_LAP_MOTION";
+ case IN_BAG:
+ return "IN_BAG";
+ case OUT_OF_BAG:
+ return "OUT_OF_BAG";
+ default:
+ return "UNKNOWN";
+ }
+}
+
static const char *ta_slider_as_str(unsigned int state)
{
switch (state) {
@@ -47,12 +87,38 @@ void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *
dev_dbg(dev->dev, "LID State: %s\n", in->ev_info.lid_state ? "close" : "open");
dev_dbg(dev->dev, "User Presence: %s\n", in->ev_info.user_present ? "Present" : "Away");
dev_dbg(dev->dev, "Ambient Light: %d\n", in->ev_info.ambient_light);
+ dev_dbg(dev->dev, "Platform type: %s\n", platform_type_as_str(in->ev_info.platform_type));
+ dev_dbg(dev->dev, "Laptop placement: %s\n",
+ laptop_placement_as_str(in->ev_info.device_state));
+ dev_dbg(dev->dev, "Custom BIOS input1: %u\n", in->ev_info.bios_input1);
+ dev_dbg(dev->dev, "Custom BIOS input2: %u\n", in->ev_info.bios_input2);
dev_dbg(dev->dev, "==== TA inputs END ====\n");
}
#else
void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in) {}
#endif
+static void amd_pmf_get_custom_bios_inputs(struct amd_pmf_dev *pdev,
+ struct ta_pmf_enact_table *in)
+{
+ if (!pdev->req.pending_req)
+ return;
+
+ switch (pdev->req.pending_req) {
+ case BIT(NOTIFY_CUSTOM_BIOS_INPUT1):
+ in->ev_info.bios_input1 = pdev->req.custom_policy[APMF_SMARTPC_CUSTOM_BIOS_INPUT1];
+ break;
+ case BIT(NOTIFY_CUSTOM_BIOS_INPUT2):
+ in->ev_info.bios_input2 = pdev->req.custom_policy[APMF_SMARTPC_CUSTOM_BIOS_INPUT2];
+ break;
+ default:
+ dev_dbg(pdev->dev, "Invalid preq for BIOS input: 0x%x\n", pdev->req.pending_req);
+ }
+
+ /* Clear pending requests after handling */
+ memset(&pdev->req, 0, sizeof(pdev->req));
+}
+
static void amd_pmf_get_c0_residency(u16 *core_res, size_t size, struct ta_pmf_enact_table *in)
{
u16 max, avg = 0;
@@ -190,6 +256,14 @@ static void amd_pmf_get_sensor_info(struct amd_pmf_dev *dev, struct ta_pmf_enact
} else {
dev_dbg(dev->dev, "HPD is not enabled/detected\n");
}
+
+ /* Get SRA (Secondary Accelerometer) data */
+ if (!amd_get_sfh_info(&sfh_info, MT_SRA)) {
+ in->ev_info.platform_type = sfh_info.platform_type;
+ in->ev_info.device_state = sfh_info.laptop_placement;
+ } else {
+ dev_dbg(dev->dev, "SRA is not enabled/detected\n");
+ }
}
void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
@@ -201,4 +275,5 @@ void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_tab
amd_pmf_get_battery_info(dev, in);
amd_pmf_get_slider_info(dev, in);
amd_pmf_get_sensor_info(dev, in);
+ amd_pmf_get_custom_bios_inputs(dev, in);
}
diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
index 92f7fb22277d..e6cf0b22dac3 100644
--- a/drivers/platform/x86/amd/pmf/sps.c
+++ b/drivers/platform/x86/amd/pmf/sps.c
@@ -282,10 +282,10 @@ bool is_pprof_balanced(struct amd_pmf_dev *pmf)
return (pmf->current_profile == PLATFORM_PROFILE_BALANCED) ? true : false;
}
-static int amd_pmf_profile_get(struct platform_profile_handler *pprof,
+static int amd_pmf_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
- struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
+ struct amd_pmf_dev *pmf = dev_get_drvdata(dev);
*profile = pmf->current_profile;
return 0;
@@ -363,10 +363,10 @@ int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev)
return 0;
}
-static int amd_pmf_profile_set(struct platform_profile_handler *pprof,
+static int amd_pmf_profile_set(struct device *dev,
enum platform_profile_option profile)
{
- struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
+ struct amd_pmf_dev *pmf = dev_get_drvdata(dev);
int ret = 0;
pmf->current_profile = profile;
@@ -387,10 +387,23 @@ static int amd_pmf_profile_set(struct platform_profile_handler *pprof,
return 0;
}
-int amd_pmf_init_sps(struct amd_pmf_dev *dev)
+static int amd_pmf_profile_probe(void *drvdata, unsigned long *choices)
{
- int err;
+ set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+
+ return 0;
+}
+
+static const struct platform_profile_ops amd_pmf_profile_ops = {
+ .probe = amd_pmf_profile_probe,
+ .profile_get = amd_pmf_profile_get,
+ .profile_set = amd_pmf_profile_set,
+};
+int amd_pmf_init_sps(struct amd_pmf_dev *dev)
+{
dev->current_profile = PLATFORM_PROFILE_BALANCED;
if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
@@ -405,24 +418,12 @@ int amd_pmf_init_sps(struct amd_pmf_dev *dev)
amd_pmf_set_sps_power_limits(dev);
}
- dev->pprof.profile_get = amd_pmf_profile_get;
- dev->pprof.profile_set = amd_pmf_profile_set;
-
- /* Setup supported modes */
- set_bit(PLATFORM_PROFILE_LOW_POWER, dev->pprof.choices);
- set_bit(PLATFORM_PROFILE_BALANCED, dev->pprof.choices);
- set_bit(PLATFORM_PROFILE_PERFORMANCE, dev->pprof.choices);
-
/* Create platform_profile structure and register */
- err = platform_profile_register(&dev->pprof);
- if (err)
- dev_err(dev->dev, "Failed to register SPS support, this is most likely an SBIOS bug: %d\n",
- err);
+ dev->ppdev = devm_platform_profile_register(dev->dev, "amd-pmf", dev,
+ &amd_pmf_profile_ops);
+ if (IS_ERR(dev->ppdev))
+ dev_err(dev->dev, "Failed to register SPS support, this is most likely an SBIOS bug: %ld\n",
+ PTR_ERR(dev->ppdev));
- return err;
-}
-
-void amd_pmf_deinit_sps(struct amd_pmf_dev *dev)
-{
- platform_profile_remove();
+ return PTR_ERR_OR_ZERO(dev->ppdev);
}
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index a5933980ade3..3f8b2a324efd 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -50,7 +50,8 @@ MODULE_PARM_DESC(tablet_mode_sw, "Tablet mode detect: -1:auto 0:disable 1:kbd-do
static struct quirk_entry *quirks;
static bool atkbd_reports_vol_keys;
-static bool asus_i8042_filter(unsigned char data, unsigned char str, struct serio *port)
+static bool asus_i8042_filter(unsigned char data, unsigned char str, struct serio *port,
+ void *context)
{
static bool extended_e0;
static bool extended_e1;
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 8bd187e8b47f..38ef778e8c19 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -313,7 +313,7 @@ struct asus_wmi {
bool mid_fan_curve_available;
struct fan_curve_data custom_fan_curves[3];
- struct platform_profile_handler platform_profile_handler;
+ struct device *ppdev;
bool platform_profile_support;
// The RSOC controls the maximum charging percentage.
@@ -3782,7 +3782,7 @@ static ssize_t throttle_thermal_policy_store(struct device *dev,
* Ensure that platform_profile updates userspace with the change to ensure
* that platform_profile and throttle_thermal_policy_mode are in sync.
*/
- platform_profile_notify();
+ platform_profile_notify(asus->ppdev);
return count;
}
@@ -3793,13 +3793,13 @@ static ssize_t throttle_thermal_policy_store(struct device *dev,
static DEVICE_ATTR_RW(throttle_thermal_policy);
/* Platform profile ***********************************************************/
-static int asus_wmi_platform_profile_get(struct platform_profile_handler *pprof,
+static int asus_wmi_platform_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
struct asus_wmi *asus;
int tp;
- asus = container_of(pprof, struct asus_wmi, platform_profile_handler);
+ asus = dev_get_drvdata(dev);
tp = asus->throttle_thermal_policy_mode;
switch (tp) {
@@ -3819,13 +3819,13 @@ static int asus_wmi_platform_profile_get(struct platform_profile_handler *pprof,
return 0;
}
-static int asus_wmi_platform_profile_set(struct platform_profile_handler *pprof,
+static int asus_wmi_platform_profile_set(struct device *dev,
enum platform_profile_option profile)
{
struct asus_wmi *asus;
int tp;
- asus = container_of(pprof, struct asus_wmi, platform_profile_handler);
+ asus = dev_get_drvdata(dev);
switch (profile) {
case PLATFORM_PROFILE_PERFORMANCE:
@@ -3845,6 +3845,21 @@ static int asus_wmi_platform_profile_set(struct platform_profile_handler *pprof,
return throttle_thermal_policy_write(asus);
}
+static int asus_wmi_platform_profile_probe(void *drvdata, unsigned long *choices)
+{
+ set_bit(PLATFORM_PROFILE_QUIET, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+
+ return 0;
+}
+
+static const struct platform_profile_ops asus_wmi_platform_profile_ops = {
+ .probe = asus_wmi_platform_profile_probe,
+ .profile_get = asus_wmi_platform_profile_get,
+ .profile_set = asus_wmi_platform_profile_set,
+};
+
static int platform_profile_setup(struct asus_wmi *asus)
{
struct device *dev = &asus->platform_device->dev;
@@ -3869,22 +3884,11 @@ static int platform_profile_setup(struct asus_wmi *asus)
dev_info(dev, "Using throttle_thermal_policy for platform_profile support\n");
- asus->platform_profile_handler.profile_get = asus_wmi_platform_profile_get;
- asus->platform_profile_handler.profile_set = asus_wmi_platform_profile_set;
-
- set_bit(PLATFORM_PROFILE_QUIET, asus->platform_profile_handler.choices);
- set_bit(PLATFORM_PROFILE_BALANCED,
- asus->platform_profile_handler.choices);
- set_bit(PLATFORM_PROFILE_PERFORMANCE,
- asus->platform_profile_handler.choices);
-
- err = platform_profile_register(&asus->platform_profile_handler);
- if (err == -EEXIST) {
- pr_warn("%s, a platform_profile handler is already registered\n", __func__);
- return 0;
- } else if (err) {
- pr_err("%s, failed at platform_profile_register: %d\n", __func__, err);
- return err;
+ asus->ppdev = devm_platform_profile_register(dev, "asus-wmi", asus,
+ &asus_wmi_platform_profile_ops);
+ if (IS_ERR(asus->ppdev)) {
+ dev_err(dev, "Failed to register a platform_profile class device\n");
+ return PTR_ERR(asus->ppdev);
}
asus->platform_profile_support = true;
@@ -4815,7 +4819,7 @@ static int asus_wmi_add(struct platform_device *pdev)
}
if (asus->driver->i8042_filter) {
- err = i8042_install_filter(asus->driver->i8042_filter);
+ err = i8042_install_filter(asus->driver->i8042_filter, NULL);
if (err)
pr_warn("Unable to install key filter - %d\n", err);
}
@@ -4842,8 +4846,6 @@ fail_input:
fail_sysfs:
fail_custom_fan_curve:
fail_platform_profile_setup:
- if (asus->platform_profile_support)
- platform_profile_remove();
fail_fan_boost_mode:
fail_platform:
kfree(asus);
@@ -4869,9 +4871,6 @@ static void asus_wmi_remove(struct platform_device *device)
throttle_thermal_policy_set_default(asus);
asus_wmi_battery_exit(asus);
- if (asus->platform_profile_support)
- platform_profile_remove();
-
kfree(asus);
}
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index d02f15fd3482..018dfde4025e 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -73,8 +73,7 @@ struct asus_wmi_driver {
void (*key_filter) (struct asus_wmi_driver *driver, int *code,
unsigned int *value, bool *autorelease);
/* Optional standard i8042 filter */
- bool (*i8042_filter)(unsigned char data, unsigned char str,
- struct serio *serio);
+ i8042_filter_t i8042_filter;
int (*probe) (struct platform_device *device);
void (*detect_quirks) (struct asus_wmi_driver *driver);
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
index 2dddafb3f7fa..d09060aedd3f 100644
--- a/drivers/platform/x86/dell/Kconfig
+++ b/drivers/platform/x86/dell/Kconfig
@@ -152,6 +152,7 @@ config DELL_SMBIOS_SMM
config DELL_SMO8800
tristate "Dell Latitude freefall driver (ACPI SMO88XX)"
default m
+ depends on I2C
depends on ACPI || COMPILE_TEST
help
Say Y here if you want to support SMO88XX freefall devices
diff --git a/drivers/platform/x86/dell/Makefile b/drivers/platform/x86/dell/Makefile
index 79d60f1bf4c1..bb3cbd470a46 100644
--- a/drivers/platform/x86/dell/Makefile
+++ b/drivers/platform/x86/dell/Makefile
@@ -15,6 +15,7 @@ dell-smbios-objs := dell-smbios-base.o
dell-smbios-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o
dell-smbios-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o
obj-$(CONFIG_DELL_SMO8800) += dell-smo8800.o
+obj-$(CONFIG_DELL_SMO8800) += dell-lis3lv02d.o
obj-$(CONFIG_DELL_UART_BACKLIGHT) += dell-uart-backlight.o
obj-$(CONFIG_DELL_WMI) += dell-wmi.o
dell-wmi-objs := dell-wmi-base.o
diff --git a/drivers/platform/x86/dell/alienware-wmi.c b/drivers/platform/x86/dell/alienware-wmi.c
index 341d01d3e3e4..e252e0cf47ef 100644
--- a/drivers/platform/x86/dell/alienware-wmi.c
+++ b/drivers/platform/x86/dell/alienware-wmi.c
@@ -385,12 +385,6 @@ struct color_platform {
u8 red;
} __packed;
-struct platform_zone {
- u8 location;
- struct device_attribute *attr;
- struct color_platform colors;
-};
-
struct wmax_brightness_args {
u32 led_mask;
u32 percentage;
@@ -420,22 +414,9 @@ struct wmax_u32_args {
};
static struct platform_device *platform_device;
-static struct device_attribute *zone_dev_attrs;
-static struct attribute **zone_attrs;
-static struct platform_zone *zone_data;
-static struct platform_profile_handler pp_handler;
+static struct color_platform colors[4];
static enum wmax_thermal_mode supported_thermal_profiles[PLATFORM_PROFILE_LAST];
-static struct platform_driver platform_driver = {
- .driver = {
- .name = "alienware-wmi",
- }
-};
-
-static struct attribute_group zone_attribute_group = {
- .name = "rgb_zones",
-};
-
static u8 interface;
static u8 lighting_control_state;
static u8 global_brightness;
@@ -443,7 +424,7 @@ static u8 global_brightness;
/*
* Helpers used for zone control
*/
-static int parse_rgb(const char *buf, struct platform_zone *zone)
+static int parse_rgb(const char *buf, struct color_platform *colors)
{
long unsigned int rgb;
int ret;
@@ -463,28 +444,14 @@ static int parse_rgb(const char *buf, struct platform_zone *zone)
repackager.package = rgb & 0x0f0f0f0f;
pr_debug("alienware-wmi: r: %d g:%d b: %d\n",
repackager.cp.red, repackager.cp.green, repackager.cp.blue);
- zone->colors = repackager.cp;
+ *colors = repackager.cp;
return 0;
}
-static struct platform_zone *match_zone(struct device_attribute *attr)
-{
- u8 zone;
-
- for (zone = 0; zone < quirks->num_zones; zone++) {
- if ((struct device_attribute *)zone_data[zone].attr == attr) {
- pr_debug("alienware-wmi: matched zone location: %d\n",
- zone_data[zone].location);
- return &zone_data[zone];
- }
- }
- return NULL;
-}
-
/*
* Individual RGB zone control
*/
-static int alienware_update_led(struct platform_zone *zone)
+static int alienware_update_led(u8 location)
{
int method_id;
acpi_status status;
@@ -493,8 +460,8 @@ static int alienware_update_led(struct platform_zone *zone)
struct legacy_led_args legacy_args;
struct wmax_led_args wmax_basic_args;
if (interface == WMAX) {
- wmax_basic_args.led_mask = 1 << zone->location;
- wmax_basic_args.colors = zone->colors;
+ wmax_basic_args.led_mask = 1 << location;
+ wmax_basic_args.colors = colors[location];
wmax_basic_args.state = lighting_control_state;
guid = WMAX_CONTROL_GUID;
method_id = WMAX_METHOD_ZONE_CONTROL;
@@ -502,7 +469,7 @@ static int alienware_update_led(struct platform_zone *zone)
input.length = sizeof(wmax_basic_args);
input.pointer = &wmax_basic_args;
} else {
- legacy_args.colors = zone->colors;
+ legacy_args.colors = colors[location];
legacy_args.brightness = global_brightness;
legacy_args.state = 0;
if (lighting_control_state == LEGACY_BOOTING ||
@@ -511,7 +478,7 @@ static int alienware_update_led(struct platform_zone *zone)
legacy_args.state = lighting_control_state;
} else
guid = LEGACY_CONTROL_GUID;
- method_id = zone->location + 1;
+ method_id = location + 1;
input.length = sizeof(legacy_args);
input.pointer = &legacy_args;
@@ -525,35 +492,153 @@ static int alienware_update_led(struct platform_zone *zone)
}
static ssize_t zone_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf, u8 location)
{
- struct platform_zone *target_zone;
- target_zone = match_zone(attr);
- if (target_zone == NULL)
- return sprintf(buf, "red: -1, green: -1, blue: -1\n");
return sprintf(buf, "red: %d, green: %d, blue: %d\n",
- target_zone->colors.red,
- target_zone->colors.green, target_zone->colors.blue);
+ colors[location].red, colors[location].green,
+ colors[location].blue);
}
-static ssize_t zone_set(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t zone_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count, u8 location)
{
- struct platform_zone *target_zone;
int ret;
- target_zone = match_zone(attr);
- if (target_zone == NULL) {
- pr_err("alienware-wmi: invalid target zone\n");
- return 1;
- }
- ret = parse_rgb(buf, target_zone);
+
+ ret = parse_rgb(buf, &colors[location]);
if (ret)
return ret;
- ret = alienware_update_led(target_zone);
+
+ ret = alienware_update_led(location);
+
return ret ? ret : count;
}
+static ssize_t zone00_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return zone_show(dev, attr, buf, 0);
+}
+
+static ssize_t zone00_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return zone_store(dev, attr, buf, count, 0);
+}
+
+static DEVICE_ATTR_RW(zone00);
+
+static ssize_t zone01_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return zone_show(dev, attr, buf, 1);
+}
+
+static ssize_t zone01_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return zone_store(dev, attr, buf, count, 1);
+}
+
+static DEVICE_ATTR_RW(zone01);
+
+static ssize_t zone02_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return zone_show(dev, attr, buf, 2);
+}
+
+static ssize_t zone02_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return zone_store(dev, attr, buf, count, 2);
+}
+
+static DEVICE_ATTR_RW(zone02);
+
+static ssize_t zone03_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return zone_show(dev, attr, buf, 3);
+}
+
+static ssize_t zone03_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return zone_store(dev, attr, buf, count, 3);
+}
+
+static DEVICE_ATTR_RW(zone03);
+
+/*
+ * Lighting control state device attribute (Global)
+ */
+static ssize_t lighting_control_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ if (lighting_control_state == LEGACY_BOOTING)
+ return sysfs_emit(buf, "[booting] running suspend\n");
+ else if (lighting_control_state == LEGACY_SUSPEND)
+ return sysfs_emit(buf, "booting running [suspend]\n");
+
+ return sysfs_emit(buf, "booting [running] suspend\n");
+}
+
+static ssize_t lighting_control_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u8 val;
+
+ if (strcmp(buf, "booting\n") == 0)
+ val = LEGACY_BOOTING;
+ else if (strcmp(buf, "suspend\n") == 0)
+ val = LEGACY_SUSPEND;
+ else if (interface == LEGACY)
+ val = LEGACY_RUNNING;
+ else
+ val = WMAX_RUNNING;
+
+ lighting_control_state = val;
+ pr_debug("alienware-wmi: updated control state to %d\n",
+ lighting_control_state);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(lighting_control_state);
+
+static umode_t zone_attr_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ if (n < quirks->num_zones + 1)
+ return attr->mode;
+
+ return 0;
+}
+
+static bool zone_group_visible(struct kobject *kobj)
+{
+ return quirks->num_zones > 0;
+}
+DEFINE_SYSFS_GROUP_VISIBLE(zone);
+
+static struct attribute *zone_attrs[] = {
+ &dev_attr_lighting_control_state.attr,
+ &dev_attr_zone00.attr,
+ &dev_attr_zone01.attr,
+ &dev_attr_zone02.attr,
+ &dev_attr_zone03.attr,
+ NULL
+};
+
+static struct attribute_group zone_attribute_group = {
+ .name = "rgb_zones",
+ .is_visible = SYSFS_GROUP_VISIBLE(zone),
+ .attrs = zone_attrs,
+};
+
/*
* LED Brightness (Global)
*/
@@ -582,7 +667,7 @@ static void global_led_set(struct led_classdev *led_cdev,
if (interface == WMAX)
ret = wmax_brightness(brightness);
else
- ret = alienware_update_led(&zone_data[0]);
+ ret = alienware_update_led(0);
if (ret)
pr_err("LED brightness update failed\n");
}
@@ -598,46 +683,8 @@ static struct led_classdev global_led = {
.name = "alienware::global_brightness",
};
-/*
- * Lighting control state device attribute (Global)
- */
-static ssize_t show_control_state(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- if (lighting_control_state == LEGACY_BOOTING)
- return sysfs_emit(buf, "[booting] running suspend\n");
- else if (lighting_control_state == LEGACY_SUSPEND)
- return sysfs_emit(buf, "booting running [suspend]\n");
- return sysfs_emit(buf, "booting [running] suspend\n");
-}
-
-static ssize_t store_control_state(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- long unsigned int val;
- if (strcmp(buf, "booting\n") == 0)
- val = LEGACY_BOOTING;
- else if (strcmp(buf, "suspend\n") == 0)
- val = LEGACY_SUSPEND;
- else if (interface == LEGACY)
- val = LEGACY_RUNNING;
- else
- val = WMAX_RUNNING;
- lighting_control_state = val;
- pr_debug("alienware-wmi: updated control state to %d\n",
- lighting_control_state);
- return count;
-}
-
-static DEVICE_ATTR(lighting_control_state, 0644, show_control_state,
- store_control_state);
-
static int alienware_zone_init(struct platform_device *dev)
{
- u8 zone;
- char *name;
-
if (interface == WMAX) {
lighting_control_state = WMAX_RUNNING;
} else if (interface == LEGACY) {
@@ -646,68 +693,15 @@ static int alienware_zone_init(struct platform_device *dev)
global_led.max_brightness = 0x0F;
global_brightness = global_led.max_brightness;
- /*
- * - zone_dev_attrs num_zones + 1 is for individual zones and then
- * null terminated
- * - zone_attrs num_zones + 2 is for all attrs in zone_dev_attrs +
- * the lighting control + null terminated
- * - zone_data num_zones is for the distinct zones
- */
- zone_dev_attrs =
- kcalloc(quirks->num_zones + 1, sizeof(struct device_attribute),
- GFP_KERNEL);
- if (!zone_dev_attrs)
- return -ENOMEM;
-
- zone_attrs =
- kcalloc(quirks->num_zones + 2, sizeof(struct attribute *),
- GFP_KERNEL);
- if (!zone_attrs)
- return -ENOMEM;
-
- zone_data =
- kcalloc(quirks->num_zones, sizeof(struct platform_zone),
- GFP_KERNEL);
- if (!zone_data)
- return -ENOMEM;
-
- for (zone = 0; zone < quirks->num_zones; zone++) {
- name = kasprintf(GFP_KERNEL, "zone%02hhX", zone);
- if (name == NULL)
- return 1;
- sysfs_attr_init(&zone_dev_attrs[zone].attr);
- zone_dev_attrs[zone].attr.name = name;
- zone_dev_attrs[zone].attr.mode = 0644;
- zone_dev_attrs[zone].show = zone_show;
- zone_dev_attrs[zone].store = zone_set;
- zone_data[zone].location = zone;
- zone_attrs[zone] = &zone_dev_attrs[zone].attr;
- zone_data[zone].attr = &zone_dev_attrs[zone];
- }
- zone_attrs[quirks->num_zones] = &dev_attr_lighting_control_state.attr;
- zone_attribute_group.attrs = zone_attrs;
-
- led_classdev_register(&dev->dev, &global_led);
-
- return sysfs_create_group(&dev->dev.kobj, &zone_attribute_group);
+ return led_classdev_register(&dev->dev, &global_led);
}
static void alienware_zone_exit(struct platform_device *dev)
{
- u8 zone;
-
if (!quirks->num_zones)
return;
- sysfs_remove_group(&dev->dev.kobj, &zone_attribute_group);
led_classdev_unregister(&global_led);
- if (zone_dev_attrs) {
- for (zone = 0; zone < quirks->num_zones; zone++)
- kfree(zone_dev_attrs[zone].attr.name);
- }
- kfree(zone_dev_attrs);
- kfree(zone_data);
- kfree(zone_attrs);
}
static acpi_status alienware_wmax_command(void *in_args, size_t in_size,
@@ -742,14 +736,15 @@ static acpi_status alienware_wmax_command(void *in_args, size_t in_size,
* The HDMI mux sysfs node indicates the status of the HDMI input mux.
* It can toggle between standard system GPU output and HDMI input.
*/
-static ssize_t show_hdmi_cable(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t cable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- acpi_status status;
- u32 out_data;
struct wmax_basic_args in_args = {
.arg = 0,
};
+ acpi_status status;
+ u32 out_data;
+
status =
alienware_wmax_command(&in_args, sizeof(in_args),
WMAX_METHOD_HDMI_CABLE, &out_data);
@@ -763,14 +758,15 @@ static ssize_t show_hdmi_cable(struct device *dev,
return sysfs_emit(buf, "unconnected connected [unknown]\n");
}
-static ssize_t show_hdmi_source(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t source_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- acpi_status status;
- u32 out_data;
struct wmax_basic_args in_args = {
.arg = 0,
};
+ acpi_status status;
+ u32 out_data;
+
status =
alienware_wmax_command(&in_args, sizeof(in_args),
WMAX_METHOD_HDMI_STATUS, &out_data);
@@ -785,12 +781,12 @@ static ssize_t show_hdmi_source(struct device *dev,
return sysfs_emit(buf, "input gpu [unknown]\n");
}
-static ssize_t toggle_hdmi_source(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t source_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
- acpi_status status;
struct wmax_basic_args args;
+ acpi_status status;
+
if (strcmp(buf, "gpu\n") == 0)
args.arg = 1;
else if (strcmp(buf, "input\n") == 0)
@@ -808,9 +804,14 @@ static ssize_t toggle_hdmi_source(struct device *dev,
return count;
}
-static DEVICE_ATTR(cable, S_IRUGO, show_hdmi_cable, NULL);
-static DEVICE_ATTR(source, S_IRUGO | S_IWUSR, show_hdmi_source,
- toggle_hdmi_source);
+static DEVICE_ATTR_RO(cable);
+static DEVICE_ATTR_RW(source);
+
+static bool hdmi_group_visible(struct kobject *kobj)
+{
+ return quirks->hdmi_mux;
+}
+DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(hdmi);
static struct attribute *hdmi_attrs[] = {
&dev_attr_cable.attr,
@@ -820,38 +821,24 @@ static struct attribute *hdmi_attrs[] = {
static const struct attribute_group hdmi_attribute_group = {
.name = "hdmi",
+ .is_visible = SYSFS_GROUP_VISIBLE(hdmi),
.attrs = hdmi_attrs,
};
-static void remove_hdmi(struct platform_device *dev)
-{
- if (quirks->hdmi_mux > 0)
- sysfs_remove_group(&dev->dev.kobj, &hdmi_attribute_group);
-}
-
-static int create_hdmi(struct platform_device *dev)
-{
- int ret;
-
- ret = sysfs_create_group(&dev->dev.kobj, &hdmi_attribute_group);
- if (ret)
- remove_hdmi(dev);
- return ret;
-}
-
/*
* Alienware GFX amplifier support
* - Currently supports reading cable status
* - Leaving expansion room to possibly support dock/undock events later
*/
-static ssize_t show_amplifier_status(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t status_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- acpi_status status;
- u32 out_data;
struct wmax_basic_args in_args = {
.arg = 0,
};
+ acpi_status status;
+ u32 out_data;
+
status =
alienware_wmax_command(&in_args, sizeof(in_args),
WMAX_METHOD_AMPLIFIER_CABLE, &out_data);
@@ -865,7 +852,13 @@ static ssize_t show_amplifier_status(struct device *dev,
return sysfs_emit(buf, "unconnected connected [unknown]\n");
}
-static DEVICE_ATTR(status, S_IRUGO, show_amplifier_status, NULL);
+static DEVICE_ATTR_RO(status);
+
+static bool amplifier_group_visible(struct kobject *kobj)
+{
+ return quirks->amplifier;
+}
+DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(amplifier);
static struct attribute *amplifier_attrs[] = {
&dev_attr_status.attr,
@@ -874,37 +867,23 @@ static struct attribute *amplifier_attrs[] = {
static const struct attribute_group amplifier_attribute_group = {
.name = "amplifier",
+ .is_visible = SYSFS_GROUP_VISIBLE(amplifier),
.attrs = amplifier_attrs,
};
-static void remove_amplifier(struct platform_device *dev)
-{
- if (quirks->amplifier > 0)
- sysfs_remove_group(&dev->dev.kobj, &amplifier_attribute_group);
-}
-
-static int create_amplifier(struct platform_device *dev)
-{
- int ret;
-
- ret = sysfs_create_group(&dev->dev.kobj, &amplifier_attribute_group);
- if (ret)
- remove_amplifier(dev);
- return ret;
-}
-
/*
* Deep Sleep Control support
* - Modifies BIOS setting for deep sleep control allowing extra wakeup events
*/
-static ssize_t show_deepsleep_status(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t deepsleep_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- acpi_status status;
- u32 out_data;
struct wmax_basic_args in_args = {
.arg = 0,
};
+ acpi_status status;
+ u32 out_data;
+
status = alienware_wmax_command(&in_args, sizeof(in_args),
WMAX_METHOD_DEEP_SLEEP_STATUS, &out_data);
if (ACPI_SUCCESS(status)) {
@@ -919,12 +898,11 @@ static ssize_t show_deepsleep_status(struct device *dev,
return sysfs_emit(buf, "disabled s5 s5_s4 [unknown]\n");
}
-static ssize_t toggle_deepsleep(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t deepsleep_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
- acpi_status status;
struct wmax_basic_args args;
+ acpi_status status;
if (strcmp(buf, "disabled\n") == 0)
args.arg = 0;
@@ -943,7 +921,13 @@ static ssize_t toggle_deepsleep(struct device *dev,
return count;
}
-static DEVICE_ATTR(deepsleep, S_IRUGO | S_IWUSR, show_deepsleep_status, toggle_deepsleep);
+static DEVICE_ATTR_RW(deepsleep);
+
+static bool deepsleep_group_visible(struct kobject *kobj)
+{
+ return quirks->deepslp;
+}
+DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(deepsleep);
static struct attribute *deepsleep_attrs[] = {
&dev_attr_deepsleep.attr,
@@ -952,25 +936,10 @@ static struct attribute *deepsleep_attrs[] = {
static const struct attribute_group deepsleep_attribute_group = {
.name = "deepsleep",
+ .is_visible = SYSFS_GROUP_VISIBLE(deepsleep),
.attrs = deepsleep_attrs,
};
-static void remove_deepsleep(struct platform_device *dev)
-{
- if (quirks->deepslp > 0)
- sysfs_remove_group(&dev->dev.kobj, &deepsleep_attribute_group);
-}
-
-static int create_deepsleep(struct platform_device *dev)
-{
- int ret;
-
- ret = sysfs_create_group(&dev->dev.kobj, &deepsleep_attribute_group);
- if (ret)
- remove_deepsleep(dev);
- return ret;
-}
-
/*
* Thermal Profile control
* - Provides thermal profile control through the Platform Profile API
@@ -1000,13 +969,13 @@ static bool is_wmax_thermal_code(u32 code)
static int wmax_thermal_information(u8 operation, u8 arg, u32 *out_data)
{
- acpi_status status;
struct wmax_u32_args in_args = {
.operation = operation,
.arg1 = arg,
.arg2 = 0,
.arg3 = 0,
};
+ acpi_status status;
status = alienware_wmax_command(&in_args, sizeof(in_args),
WMAX_METHOD_THERMAL_INFORMATION,
@@ -1023,13 +992,13 @@ static int wmax_thermal_information(u8 operation, u8 arg, u32 *out_data)
static int wmax_thermal_control(u8 profile)
{
- acpi_status status;
struct wmax_u32_args in_args = {
.operation = WMAX_OPERATION_ACTIVATE_PROFILE,
.arg1 = profile,
.arg2 = 0,
.arg3 = 0,
};
+ acpi_status status;
u32 out_data;
status = alienware_wmax_command(&in_args, sizeof(in_args),
@@ -1047,13 +1016,13 @@ static int wmax_thermal_control(u8 profile)
static int wmax_game_shift_status(u8 operation, u32 *out_data)
{
- acpi_status status;
struct wmax_u32_args in_args = {
.operation = operation,
.arg1 = 0,
.arg2 = 0,
.arg3 = 0,
};
+ acpi_status status;
status = alienware_wmax_command(&in_args, sizeof(in_args),
WMAX_METHOD_GAME_SHIFT_STATUS,
@@ -1068,7 +1037,7 @@ static int wmax_game_shift_status(u8 operation, u32 *out_data)
return 0;
}
-static int thermal_profile_get(struct platform_profile_handler *pprof,
+static int thermal_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
u32 out_data;
@@ -1094,7 +1063,7 @@ static int thermal_profile_get(struct platform_profile_handler *pprof,
return 0;
}
-static int thermal_profile_set(struct platform_profile_handler *pprof,
+static int thermal_profile_set(struct device *dev,
enum platform_profile_option profile)
{
if (quirks->gmode) {
@@ -1120,13 +1089,13 @@ static int thermal_profile_set(struct platform_profile_handler *pprof,
return wmax_thermal_control(supported_thermal_profiles[profile]);
}
-static int create_thermal_profile(void)
+static int thermal_profile_probe(void *drvdata, unsigned long *choices)
{
- u32 out_data;
+ enum platform_profile_option profile;
+ enum wmax_thermal_mode mode;
u8 sys_desc[4];
u32 first_mode;
- enum wmax_thermal_mode mode;
- enum platform_profile_option profile;
+ u32 out_data;
int ret;
ret = wmax_thermal_information(WMAX_OPERATION_SYS_DESCRIPTION,
@@ -1153,31 +1122,56 @@ static int create_thermal_profile(void)
profile = wmax_mode_to_platform_profile[mode];
supported_thermal_profiles[profile] = out_data;
- set_bit(profile, pp_handler.choices);
+ set_bit(profile, choices);
}
- if (bitmap_empty(pp_handler.choices, PLATFORM_PROFILE_LAST))
+ if (bitmap_empty(choices, PLATFORM_PROFILE_LAST))
return -ENODEV;
if (quirks->gmode) {
supported_thermal_profiles[PLATFORM_PROFILE_PERFORMANCE] =
WMAX_THERMAL_MODE_GMODE;
- set_bit(PLATFORM_PROFILE_PERFORMANCE, pp_handler.choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
}
- pp_handler.profile_get = thermal_profile_get;
- pp_handler.profile_set = thermal_profile_set;
-
- return platform_profile_register(&pp_handler);
+ return 0;
}
-static void remove_thermal_profile(void)
+static const struct platform_profile_ops awcc_platform_profile_ops = {
+ .probe = thermal_profile_probe,
+ .profile_get = thermal_profile_get,
+ .profile_set = thermal_profile_set,
+};
+
+static int create_thermal_profile(struct platform_device *platform_device)
{
- if (quirks->thermal)
- platform_profile_remove();
+ struct device *ppdev;
+
+ ppdev = devm_platform_profile_register(&platform_device->dev, "alienware-wmi",
+ NULL, &awcc_platform_profile_ops);
+
+ return PTR_ERR_OR_ZERO(ppdev);
}
+/*
+ * Platform Driver
+ */
+static const struct attribute_group *alienfx_groups[] = {
+ &zone_attribute_group,
+ &hdmi_attribute_group,
+ &amplifier_attribute_group,
+ &deepsleep_attribute_group,
+ NULL
+};
+
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = "alienware-wmi",
+ .dev_groups = alienfx_groups,
+ },
+};
+
static int __init alienware_wmi_init(void)
{
int ret;
@@ -1217,26 +1211,8 @@ static int __init alienware_wmi_init(void)
if (ret)
goto fail_platform_device2;
- if (quirks->hdmi_mux > 0) {
- ret = create_hdmi(platform_device);
- if (ret)
- goto fail_prep_hdmi;
- }
-
- if (quirks->amplifier > 0) {
- ret = create_amplifier(platform_device);
- if (ret)
- goto fail_prep_amplifier;
- }
-
- if (quirks->deepslp > 0) {
- ret = create_deepsleep(platform_device);
- if (ret)
- goto fail_prep_deepsleep;
- }
-
if (quirks->thermal) {
- ret = create_thermal_profile();
+ ret = create_thermal_profile(platform_device);
if (ret)
goto fail_prep_thermal_profile;
}
@@ -1251,11 +1227,7 @@ static int __init alienware_wmi_init(void)
fail_prep_zones:
alienware_zone_exit(platform_device);
- remove_thermal_profile();
fail_prep_thermal_profile:
-fail_prep_deepsleep:
-fail_prep_amplifier:
-fail_prep_hdmi:
platform_device_del(platform_device);
fail_platform_device2:
platform_device_put(platform_device);
@@ -1269,13 +1241,9 @@ module_init(alienware_wmi_init);
static void __exit alienware_wmi_exit(void)
{
- if (platform_device) {
- alienware_zone_exit(platform_device);
- remove_hdmi(platform_device);
- remove_thermal_profile();
- platform_device_unregister(platform_device);
- platform_driver_unregister(&platform_driver);
- }
+ alienware_zone_exit(platform_device);
+ platform_device_unregister(platform_device);
+ platform_driver_unregister(&platform_driver);
}
module_exit(alienware_wmi_exit);
diff --git a/drivers/platform/x86/dell/dcdbas.c b/drivers/platform/x86/dell/dcdbas.c
index 0aeb8149c16b..8149be25fa26 100644
--- a/drivers/platform/x86/dell/dcdbas.c
+++ b/drivers/platform/x86/dell/dcdbas.c
@@ -163,7 +163,7 @@ static ssize_t smi_data_buf_size_store(struct device *dev,
}
static ssize_t smi_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
ssize_t ret;
@@ -176,7 +176,7 @@ static ssize_t smi_data_read(struct file *filp, struct kobject *kobj,
}
static ssize_t smi_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
ssize_t ret;
@@ -636,9 +636,9 @@ static struct notifier_block dcdbas_reboot_nb = {
.priority = INT_MIN
};
-static DCDBAS_BIN_ATTR_RW(smi_data);
+static const BIN_ATTR_ADMIN_RW(smi_data, 0);
-static struct bin_attribute *dcdbas_bin_attrs[] = {
+static const struct bin_attribute *const dcdbas_bin_attrs[] = {
&bin_attr_smi_data,
NULL
};
@@ -662,7 +662,7 @@ static struct attribute *dcdbas_dev_attrs[] = {
static const struct attribute_group dcdbas_attr_group = {
.attrs = dcdbas_dev_attrs,
- .bin_attrs = dcdbas_bin_attrs,
+ .bin_attrs_new = dcdbas_bin_attrs,
};
static int dcdbas_probe(struct platform_device *dev)
diff --git a/drivers/platform/x86/dell/dcdbas.h b/drivers/platform/x86/dell/dcdbas.h
index 942a23ddded0..a05d7f667586 100644
--- a/drivers/platform/x86/dell/dcdbas.h
+++ b/drivers/platform/x86/dell/dcdbas.h
@@ -56,14 +56,6 @@
#define DCDBAS_DEV_ATTR_WO(_name) \
DEVICE_ATTR(_name,0200,NULL,_name##_store);
-#define DCDBAS_BIN_ATTR_RW(_name) \
-struct bin_attribute bin_attr_##_name = { \
- .attr = { .name = __stringify(_name), \
- .mode = 0600 }, \
- .read = _name##_read, \
- .write = _name##_write, \
-}
-
struct smi_cmd {
__u32 magic;
__u32 ebx;
diff --git a/drivers/platform/x86/dell/dell-laptop.c b/drivers/platform/x86/dell/dell-laptop.c
index 5671bd0deee7..57748c3ea24f 100644
--- a/drivers/platform/x86/dell/dell-laptop.c
+++ b/drivers/platform/x86/dell/dell-laptop.c
@@ -103,15 +103,15 @@ static bool mute_led_registered;
struct battery_mode_info {
int token;
- const char *label;
+ enum power_supply_charge_type charge_type;
};
static const struct battery_mode_info battery_modes[] = {
- { BAT_PRI_AC_MODE_TOKEN, "Trickle" },
- { BAT_EXPRESS_MODE_TOKEN, "Fast" },
- { BAT_STANDARD_MODE_TOKEN, "Standard" },
- { BAT_ADAPTIVE_MODE_TOKEN, "Adaptive" },
- { BAT_CUSTOM_MODE_TOKEN, "Custom" },
+ { BAT_PRI_AC_MODE_TOKEN, POWER_SUPPLY_CHARGE_TYPE_TRICKLE },
+ { BAT_EXPRESS_MODE_TOKEN, POWER_SUPPLY_CHARGE_TYPE_FAST },
+ { BAT_STANDARD_MODE_TOKEN, POWER_SUPPLY_CHARGE_TYPE_STANDARD },
+ { BAT_ADAPTIVE_MODE_TOKEN, POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE },
+ { BAT_CUSTOM_MODE_TOKEN, POWER_SUPPLY_CHARGE_TYPE_CUSTOM },
};
static u32 battery_supported_modes;
@@ -725,8 +725,8 @@ static void dell_update_rfkill(struct work_struct *ignored)
}
static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
-static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
- struct serio *port)
+static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str, struct serio *port,
+ void *context)
{
static bool extended;
@@ -884,7 +884,7 @@ static int __init dell_setup_rfkill(void)
pr_warn("Unable to register dell rbtn notifier\n");
goto err_filter;
} else {
- ret = i8042_install_filter(dell_laptop_i8042_filter);
+ ret = i8042_install_filter(dell_laptop_i8042_filter, NULL);
if (ret) {
pr_warn("Unable to install key filter\n");
goto err_filter;
@@ -2261,46 +2261,42 @@ static ssize_t charge_types_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- ssize_t count = 0;
+ enum power_supply_charge_type charge_type;
int i;
for (i = 0; i < ARRAY_SIZE(battery_modes); i++) {
- bool active;
+ charge_type = battery_modes[i].charge_type;
- if (!(battery_supported_modes & BIT(i)))
+ if (!(battery_supported_modes & BIT(charge_type)))
continue;
- active = dell_battery_mode_is_active(battery_modes[i].token);
- count += sysfs_emit_at(buf, count, active ? "[%s] " : "%s ",
- battery_modes[i].label);
- }
+ if (!dell_battery_mode_is_active(battery_modes[i].token))
+ continue;
- /* convert the last space to a newline */
- if (count > 0)
- count--;
- count += sysfs_emit_at(buf, count, "\n");
+ return power_supply_charge_types_show(dev, battery_supported_modes,
+ charge_type, buf);
+ }
- return count;
+ /* No active mode found */
+ return -EIO;
}
static ssize_t charge_types_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
- bool matched = false;
- int err, i;
+ int charge_type, err, i;
- for (i = 0; i < ARRAY_SIZE(battery_modes); i++) {
- if (!(battery_supported_modes & BIT(i)))
- continue;
+ charge_type = power_supply_charge_types_parse(battery_supported_modes, buf);
+ if (charge_type < 0)
+ return charge_type;
- if (sysfs_streq(battery_modes[i].label, buf)) {
- matched = true;
+ for (i = 0; i < ARRAY_SIZE(battery_modes); i++) {
+ if (battery_modes[i].charge_type == charge_type)
break;
- }
}
- if (!matched)
- return -EINVAL;
+ if (i == ARRAY_SIZE(battery_modes))
+ return -ENOENT;
err = dell_battery_set_mode(battery_modes[i].token);
if (err)
@@ -2430,7 +2426,7 @@ static u32 __init battery_get_supported_modes(void)
for (i = 0; i < ARRAY_SIZE(battery_modes); i++) {
if (dell_smbios_find_token(battery_modes[i].token))
- modes |= BIT(i);
+ modes |= BIT(battery_modes[i].charge_type);
}
return modes;
diff --git a/drivers/platform/x86/dell/dell-lis3lv02d.c b/drivers/platform/x86/dell/dell-lis3lv02d.c
new file mode 100644
index 000000000000..efe26d667973
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-lis3lv02d.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * lis3lv02d i2c-client instantiation for ACPI SMO88xx devices without I2C resources.
+ *
+ * Copyright (C) 2024 Hans de Goede <hansg@kernel.org>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device/bus.h>
+#include <linux/dmi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include "dell-smo8800-ids.h"
+
+#define LIS3_WHO_AM_I 0x0f
+
+#define DELL_LIS3LV02D_DMI_ENTRY(product_name, i2c_addr) \
+ { \
+ .matches = { \
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), \
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, product_name), \
+ }, \
+ .driver_data = (void *)(uintptr_t)(i2c_addr), \
+ }
+
+/*
+ * Accelerometer's I2C address is not specified in DMI nor ACPI,
+ * so it is needed to define mapping table based on DMI product names.
+ */
+static const struct dmi_system_id lis3lv02d_devices[] __initconst = {
+ /*
+ * Dell platform team told us that these Latitude devices have
+ * ST microelectronics accelerometer at I2C address 0x29.
+ */
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E5250", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E5450", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E5550", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E6440", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E6440 ATG", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E6540", 0x29),
+ /*
+ * Additional individual entries were added after verification.
+ */
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude 5480", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E6330", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E6430", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Precision 3540", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Precision M6800", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Vostro V131", 0x1d),
+ DELL_LIS3LV02D_DMI_ENTRY("Vostro 5568", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("XPS 15 7590", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("XPS 15 9550", 0x29),
+ { }
+};
+
+static u8 i2c_addr;
+static struct i2c_client *i2c_dev;
+static bool notifier_registered;
+
+static bool probe_i2c_addr;
+module_param(probe_i2c_addr, bool, 0444);
+MODULE_PARM_DESC(probe_i2c_addr, "Probe the i801 I2C bus for the accelerometer on models where the address is unknown, this may be dangerous.");
+
+static int detect_lis3lv02d(struct i2c_adapter *adap, unsigned short addr)
+{
+ union i2c_smbus_data smbus_data;
+ int err;
+
+ dev_info(&adap->dev, "Probing for lis3lv02d on address 0x%02x\n", addr);
+
+ err = i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_READ, LIS3_WHO_AM_I,
+ I2C_SMBUS_BYTE_DATA, &smbus_data);
+ if (err < 0)
+ return 0; /* Not found */
+
+ /* valid who-am-i values are from drivers/misc/lis3lv02d/lis3lv02d.c */
+ switch (smbus_data.byte) {
+ case 0x32:
+ case 0x33:
+ case 0x3a:
+ case 0x3b:
+ break;
+ default:
+ dev_warn(&adap->dev, "Unknown who-am-i register value 0x%02x\n",
+ smbus_data.byte);
+ return 0; /* Not found */
+ }
+
+ dev_info(&adap->dev,
+ "Detected lis3lv02d on address 0x%02x, please report this upstream to platform-driver-x86@vger.kernel.org so that a quirk can be added\n",
+ addr);
+
+ return 1; /* Found */
+}
+
+static bool i2c_adapter_is_main_i801(struct i2c_adapter *adap)
+{
+ /*
+ * Only match the main I801 adapter and reject secondary adapters
+ * which names start with "SMBus I801 IDF adapter".
+ */
+ return strstarts(adap->name, "SMBus I801 adapter");
+}
+
+static int find_i801(struct device *dev, void *data)
+{
+ struct i2c_adapter *adap, **adap_ret = data;
+
+ adap = i2c_verify_adapter(dev);
+ if (!adap)
+ return 0;
+
+ if (!i2c_adapter_is_main_i801(adap))
+ return 0;
+
+ *adap_ret = i2c_get_adapter(adap->nr);
+ return 1;
+}
+
+static void instantiate_i2c_client(struct work_struct *work)
+{
+ struct i2c_board_info info = { };
+ struct i2c_adapter *adap = NULL;
+
+ if (i2c_dev)
+ return;
+
+ /*
+ * bus_for_each_dev() and not i2c_for_each_dev() to avoid
+ * a deadlock when find_i801() calls i2c_get_adapter().
+ */
+ bus_for_each_dev(&i2c_bus_type, NULL, &adap, find_i801);
+ if (!adap)
+ return;
+
+ strscpy(info.type, "lis3lv02d", I2C_NAME_SIZE);
+
+ if (i2c_addr) {
+ info.addr = i2c_addr;
+ i2c_dev = i2c_new_client_device(adap, &info);
+ } else {
+ /* First try address 0x29 (most used) and then try 0x1d */
+ static const unsigned short addr_list[] = { 0x29, 0x1d, I2C_CLIENT_END };
+
+ i2c_dev = i2c_new_scanned_device(adap, &info, addr_list, detect_lis3lv02d);
+ }
+
+ if (IS_ERR(i2c_dev)) {
+ dev_err(&adap->dev, "error %ld registering i2c_client\n", PTR_ERR(i2c_dev));
+ i2c_dev = NULL;
+ } else {
+ dev_dbg(&adap->dev, "registered lis3lv02d on address 0x%02x\n", info.addr);
+ }
+
+ i2c_put_adapter(adap);
+}
+static DECLARE_WORK(i2c_work, instantiate_i2c_client);
+
+static int i2c_bus_notify(struct notifier_block *nb, unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct i2c_client *client;
+ struct i2c_adapter *adap;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ adap = i2c_verify_adapter(dev);
+ if (!adap)
+ break;
+
+ if (i2c_adapter_is_main_i801(adap))
+ queue_work(system_long_wq, &i2c_work);
+ break;
+ case BUS_NOTIFY_REMOVED_DEVICE:
+ client = i2c_verify_client(dev);
+ if (!client)
+ break;
+
+ if (i2c_dev == client) {
+ dev_dbg(&client->adapter->dev, "lis3lv02d i2c_client removed\n");
+ i2c_dev = NULL;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+static struct notifier_block i2c_nb = { .notifier_call = i2c_bus_notify };
+
+static int __init match_acpi_device_ids(struct device *dev, const void *data)
+{
+ return acpi_match_device(data, dev) ? 1 : 0;
+}
+
+static int __init dell_lis3lv02d_init(void)
+{
+ const struct dmi_system_id *lis3lv02d_dmi_id;
+ struct device *dev;
+ int err;
+
+ /*
+ * First check for a matching platform_device. This protects against
+ * SMO88xx ACPI fwnodes which actually do have an I2C resource, which
+ * will already have an i2c_client instantiated (not a platform_device).
+ */
+ dev = bus_find_device(&platform_bus_type, NULL, smo8800_ids, match_acpi_device_ids);
+ if (!dev) {
+ pr_debug("No SMO88xx platform-device found\n");
+ return 0;
+ }
+ put_device(dev);
+
+ lis3lv02d_dmi_id = dmi_first_match(lis3lv02d_devices);
+ if (!lis3lv02d_dmi_id && !probe_i2c_addr) {
+ pr_warn("accelerometer is present on SMBus but its address is unknown, skipping registration\n");
+ pr_info("Pass dell_lis3lv02d.probe_i2c_addr=1 on the kernel command line to probe, this may be dangerous!\n");
+ return 0;
+ }
+
+ if (lis3lv02d_dmi_id)
+ i2c_addr = (long)lis3lv02d_dmi_id->driver_data;
+
+ /*
+ * Register i2c-bus notifier + queue initial scan for lis3lv02d
+ * i2c_client instantiation.
+ */
+ err = bus_register_notifier(&i2c_bus_type, &i2c_nb);
+ if (err)
+ return err;
+
+ notifier_registered = true;
+
+ queue_work(system_long_wq, &i2c_work);
+ return 0;
+}
+module_init(dell_lis3lv02d_init);
+
+static void __exit dell_lis3lv02d_module_exit(void)
+{
+ if (!notifier_registered)
+ return;
+
+ bus_unregister_notifier(&i2c_bus_type, &i2c_nb);
+ cancel_work_sync(&i2c_work);
+ i2c_unregister_device(i2c_dev);
+}
+module_exit(dell_lis3lv02d_module_exit);
+
+MODULE_DESCRIPTION("lis3lv02d i2c-client instantiation for ACPI SMO88xx devices");
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell/dell-pc.c b/drivers/platform/x86/dell/dell-pc.c
index 972385ca1990..483240bb36e7 100644
--- a/drivers/platform/x86/dell/dell-pc.c
+++ b/drivers/platform/x86/dell/dell-pc.c
@@ -18,10 +18,14 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_profile.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include "dell-smbios.h"
+static struct platform_device *platform_device;
+static int supported_modes;
+
static const struct dmi_system_id dell_device_table[] __initconst = {
{
.ident = "Dell Inc.",
@@ -105,8 +109,6 @@ MODULE_DEVICE_TABLE(dmi, dell_device_table);
#define DELL_ACC_SET_FIELD GENMASK(11, 8)
#define DELL_THERMAL_SUPPORTED GENMASK(3, 0)
-static struct platform_profile_handler *thermal_handler;
-
enum thermal_mode_bits {
DELL_BALANCED = BIT(0),
DELL_COOL_BOTTOM = BIT(1),
@@ -182,7 +184,7 @@ static int thermal_set_mode(enum thermal_mode_bits state)
return dell_send_request(&buffer, CLASS_INFO, SELECT_THERMAL_MANAGEMENT);
}
-static int thermal_platform_profile_set(struct platform_profile_handler *pprof,
+static int thermal_platform_profile_set(struct device *dev,
enum platform_profile_option profile)
{
switch (profile) {
@@ -199,7 +201,7 @@ static int thermal_platform_profile_set(struct platform_profile_handler *pprof,
}
}
-static int thermal_platform_profile_get(struct platform_profile_handler *pprof,
+static int thermal_platform_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
int ret;
@@ -228,10 +230,30 @@ static int thermal_platform_profile_get(struct platform_profile_handler *pprof,
return 0;
}
+static int thermal_platform_profile_probe(void *drvdata, unsigned long *choices)
+{
+ if (supported_modes & DELL_QUIET)
+ set_bit(PLATFORM_PROFILE_QUIET, choices);
+ if (supported_modes & DELL_COOL_BOTTOM)
+ set_bit(PLATFORM_PROFILE_COOL, choices);
+ if (supported_modes & DELL_BALANCED)
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ if (supported_modes & DELL_PERFORMANCE)
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+
+ return 0;
+}
+
+static const struct platform_profile_ops dell_pc_platform_profile_ops = {
+ .probe = thermal_platform_profile_probe,
+ .profile_get = thermal_platform_profile_get,
+ .profile_set = thermal_platform_profile_set,
+};
+
static int thermal_init(void)
{
+ struct device *ppdev;
int ret;
- int supported_modes;
/* If thermal commands are not supported, exit without error */
if (!dell_smbios_class_is_supported(CLASS_INFO))
@@ -244,37 +266,28 @@ static int thermal_init(void)
if (!supported_modes)
return 0;
- thermal_handler = kzalloc(sizeof(*thermal_handler), GFP_KERNEL);
- if (!thermal_handler)
- return -ENOMEM;
- thermal_handler->profile_get = thermal_platform_profile_get;
- thermal_handler->profile_set = thermal_platform_profile_set;
+ platform_device = platform_device_register_simple("dell-pc", PLATFORM_DEVID_NONE, NULL, 0);
+ if (IS_ERR(platform_device))
+ return PTR_ERR(platform_device);
- if (supported_modes & DELL_QUIET)
- set_bit(PLATFORM_PROFILE_QUIET, thermal_handler->choices);
- if (supported_modes & DELL_COOL_BOTTOM)
- set_bit(PLATFORM_PROFILE_COOL, thermal_handler->choices);
- if (supported_modes & DELL_BALANCED)
- set_bit(PLATFORM_PROFILE_BALANCED, thermal_handler->choices);
- if (supported_modes & DELL_PERFORMANCE)
- set_bit(PLATFORM_PROFILE_PERFORMANCE, thermal_handler->choices);
-
- /* Clean up if failed */
- ret = platform_profile_register(thermal_handler);
- if (ret) {
- kfree(thermal_handler);
- thermal_handler = NULL;
+ ppdev = devm_platform_profile_register(&platform_device->dev, "dell-pc",
+ NULL, &dell_pc_platform_profile_ops);
+ if (IS_ERR(ppdev)) {
+ ret = PTR_ERR(ppdev);
+ goto cleanup_platform_device;
}
+ return 0;
+
+cleanup_platform_device:
+ platform_device_unregister(platform_device);
+
return ret;
}
static void thermal_cleanup(void)
{
- if (thermal_handler) {
- platform_profile_remove();
- kfree(thermal_handler);
- }
+ platform_device_unregister(platform_device);
}
static int __init dell_init(void)
diff --git a/drivers/platform/x86/dell/dell-smo8800-ids.h b/drivers/platform/x86/dell/dell-smo8800-ids.h
new file mode 100644
index 000000000000..ec58e229ba7a
--- /dev/null
+++ b/drivers/platform/x86/dell/dell-smo8800-ids.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * ACPI SMO88XX lis3lv02d freefall / accelerometer device-ids.
+ *
+ * Copyright (C) 2012 Sonal Santan <sonal.santan@gmail.com>
+ * Copyright (C) 2014 Pali Rohár <pali@kernel.org>
+ */
+#ifndef _DELL_SMO8800_IDS_H_
+#define _DELL_SMO8800_IDS_H_
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+static const struct acpi_device_id smo8800_ids[] = {
+ { "SMO8800" },
+ { "SMO8801" },
+ { "SMO8810" },
+ { "SMO8811" },
+ { "SMO8820" },
+ { "SMO8821" },
+ { "SMO8830" },
+ { "SMO8831" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, smo8800_ids);
+
+#endif
diff --git a/drivers/platform/x86/dell/dell-smo8800.c b/drivers/platform/x86/dell/dell-smo8800.c
index 87fe03f23f24..8872f9b57fce 100644
--- a/drivers/platform/x86/dell/dell-smo8800.c
+++ b/drivers/platform/x86/dell/dell-smo8800.c
@@ -14,10 +14,10 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
-#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
+#include "dell-smo8800-ids.h"
struct smo8800_device {
u32 irq; /* acpi device irq */
@@ -163,20 +163,6 @@ static void smo8800_remove(struct platform_device *device)
dev_dbg(&device->dev, "device /dev/freefall unregistered\n");
}
-/* NOTE: Keep this list in sync with drivers/i2c/busses/i2c-i801.c */
-static const struct acpi_device_id smo8800_ids[] = {
- { "SMO8800", 0 },
- { "SMO8801", 0 },
- { "SMO8810", 0 },
- { "SMO8811", 0 },
- { "SMO8820", 0 },
- { "SMO8821", 0 },
- { "SMO8830", 0 },
- { "SMO8831", 0 },
- { "", 0 },
-};
-MODULE_DEVICE_TABLE(acpi, smo8800_ids);
-
static struct platform_driver smo8800_driver = {
.probe = smo8800_probe,
.remove = smo8800_remove,
diff --git a/drivers/platform/x86/dell/dell-uart-backlight.c b/drivers/platform/x86/dell/dell-uart-backlight.c
index bcc5c0f3bb4d..50002ef13d5d 100644
--- a/drivers/platform/x86/dell/dell-uart-backlight.c
+++ b/drivers/platform/x86/dell/dell-uart-backlight.c
@@ -159,7 +159,7 @@ static int dell_uart_set_bl_power(struct dell_uart_backlight *dell_bl, int power
set_power[0] = DELL_SOF(SET_CMD_LEN);
set_power[1] = CMD_SET_BL_POWER;
- set_power[2] = (power == FB_BLANK_UNBLANK) ? 1 : 0;
+ set_power[2] = (power == BACKLIGHT_POWER_ON) ? 1 : 0;
set_power[3] = dell_uart_checksum(set_power, 3);
ret = dell_uart_bl_command(dell_bl, set_power, SET_CMD_LEN, resp, SET_RESP_LEN);
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
index 40ddc6eb7562..d00389b860e4 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
@@ -25,7 +25,6 @@ struct wmi_sysman_priv wmi_priv = {
/* reset bios to defaults */
static const char * const reset_types[] = {"builtinsafe", "lastknowngood", "factory", "custom"};
static int reset_option = -1;
-static const struct class *fw_attr_class;
/**
@@ -541,15 +540,11 @@ static int __init sysman_init(void)
goto err_exit_bios_attr_pass_interface;
}
- ret = fw_attributes_class_get(&fw_attr_class);
- if (ret)
- goto err_exit_bios_attr_pass_interface;
-
- wmi_priv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0),
+ wmi_priv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0),
NULL, "%s", DRIVER_NAME);
if (IS_ERR(wmi_priv.class_dev)) {
ret = PTR_ERR(wmi_priv.class_dev);
- goto err_unregister_class;
+ goto err_exit_bios_attr_pass_interface;
}
wmi_priv.main_dir_kset = kset_create_and_add("attributes", NULL,
@@ -602,10 +597,7 @@ err_release_attributes_data:
release_attributes_data();
err_destroy_classdev:
- device_destroy(fw_attr_class, MKDEV(0, 0));
-
-err_unregister_class:
- fw_attributes_class_put();
+ device_destroy(&firmware_attributes_class, MKDEV(0, 0));
err_exit_bios_attr_pass_interface:
exit_bios_attr_pass_interface();
@@ -619,8 +611,7 @@ err_exit_bios_attr_set_interface:
static void __exit sysman_exit(void)
{
release_attributes_data();
- device_destroy(fw_attr_class, MKDEV(0, 0));
- fw_attributes_class_put();
+ device_destroy(&firmware_attributes_class, MKDEV(0, 0));
exit_bios_attr_set_interface();
exit_bios_attr_pass_interface();
}
diff --git a/drivers/platform/x86/dell/dell_rbu.c b/drivers/platform/x86/dell/dell_rbu.c
index 9f51e0fcab04..e30ca325938c 100644
--- a/drivers/platform/x86/dell/dell_rbu.c
+++ b/drivers/platform/x86/dell/dell_rbu.c
@@ -475,7 +475,7 @@ static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count)
}
static ssize_t data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
ssize_t ret_count = 0;
@@ -492,7 +492,7 @@ static ssize_t data_read(struct file *filp, struct kobject *kobj,
spin_unlock(&rbu_data.lock);
return ret_count;
}
-static BIN_ATTR_RO(data, 0);
+static const BIN_ATTR_RO(data, 0);
static void callbackfn_rbu(const struct firmware *fw, void *context)
{
@@ -530,7 +530,7 @@ static void callbackfn_rbu(const struct firmware *fw, void *context)
}
static ssize_t image_type_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
int size = 0;
@@ -540,7 +540,7 @@ static ssize_t image_type_read(struct file *filp, struct kobject *kobj,
}
static ssize_t image_type_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
int rc = count;
@@ -597,10 +597,10 @@ static ssize_t image_type_write(struct file *filp, struct kobject *kobj,
return rc;
}
-static BIN_ATTR_RW(image_type, 0);
+static const BIN_ATTR_RW(image_type, 0);
static ssize_t packet_size_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
int size = 0;
@@ -613,7 +613,7 @@ static ssize_t packet_size_read(struct file *filp, struct kobject *kobj,
}
static ssize_t packet_size_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
unsigned long temp;
@@ -626,9 +626,9 @@ static ssize_t packet_size_write(struct file *filp, struct kobject *kobj,
spin_unlock(&rbu_data.lock);
return count;
}
-static BIN_ATTR_RW(packet_size, 0);
+static const BIN_ATTR_RW(packet_size, 0);
-static struct bin_attribute *rbu_bin_attrs[] = {
+static const struct bin_attribute *const rbu_bin_attrs[] = {
&bin_attr_data,
&bin_attr_image_type,
&bin_attr_packet_size,
@@ -636,7 +636,7 @@ static struct bin_attribute *rbu_bin_attrs[] = {
};
static const struct attribute_group rbu_group = {
- .bin_attrs = rbu_bin_attrs,
+ .bin_attrs_new = rbu_bin_attrs,
};
static int __init dcdrbu_init(void)
diff --git a/drivers/platform/x86/firmware_attributes_class.c b/drivers/platform/x86/firmware_attributes_class.c
index 182a07d8ae3d..736e96c186d9 100644
--- a/drivers/platform/x86/firmware_attributes_class.c
+++ b/drivers/platform/x86/firmware_attributes_class.c
@@ -2,51 +2,25 @@
/* Firmware attributes class helper module */
-#include <linux/mutex.h>
-#include <linux/device/class.h>
#include <linux/module.h>
#include "firmware_attributes_class.h"
-static DEFINE_MUTEX(fw_attr_lock);
-static int fw_attr_inuse;
-
-static const struct class firmware_attributes_class = {
+const struct class firmware_attributes_class = {
.name = "firmware-attributes",
};
+EXPORT_SYMBOL_GPL(firmware_attributes_class);
-int fw_attributes_class_get(const struct class **fw_attr_class)
+static __init int fw_attributes_class_init(void)
{
- int err;
-
- mutex_lock(&fw_attr_lock);
- if (!fw_attr_inuse) { /*first time class is being used*/
- err = class_register(&firmware_attributes_class);
- if (err) {
- mutex_unlock(&fw_attr_lock);
- return err;
- }
- }
- fw_attr_inuse++;
- *fw_attr_class = &firmware_attributes_class;
- mutex_unlock(&fw_attr_lock);
- return 0;
+ return class_register(&firmware_attributes_class);
}
-EXPORT_SYMBOL_GPL(fw_attributes_class_get);
+module_init(fw_attributes_class_init);
-int fw_attributes_class_put(void)
+static __exit void fw_attributes_class_exit(void)
{
- mutex_lock(&fw_attr_lock);
- if (!fw_attr_inuse) {
- mutex_unlock(&fw_attr_lock);
- return -EINVAL;
- }
- fw_attr_inuse--;
- if (!fw_attr_inuse) /* No more consumers */
- class_unregister(&firmware_attributes_class);
- mutex_unlock(&fw_attr_lock);
- return 0;
+ class_unregister(&firmware_attributes_class);
}
-EXPORT_SYMBOL_GPL(fw_attributes_class_put);
+module_exit(fw_attributes_class_exit);
MODULE_AUTHOR("Mark Pearson <markpearson@lenovo.com>");
MODULE_DESCRIPTION("Firmware attributes class helper module");
diff --git a/drivers/platform/x86/firmware_attributes_class.h b/drivers/platform/x86/firmware_attributes_class.h
index 363c75f1ac1b..d27abe54fcf9 100644
--- a/drivers/platform/x86/firmware_attributes_class.h
+++ b/drivers/platform/x86/firmware_attributes_class.h
@@ -5,7 +5,8 @@
#ifndef FW_ATTR_CLASS_H
#define FW_ATTR_CLASS_H
-int fw_attributes_class_get(const struct class **fw_attr_class);
-int fw_attributes_class_put(void);
+#include <linux/device/class.h>
+
+extern const struct class firmware_attributes_class;
#endif /* FW_ATTR_CLASS_H */
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index ae992ac1ab4a..a0eae24ca9e6 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -505,8 +505,8 @@ static int acpi_fujitsu_bl_add(struct acpi_device *device)
return -ENOMEM;
fujitsu_bl = priv;
- strcpy(acpi_device_name(device), ACPI_FUJITSU_BL_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_FUJITSU_CLASS);
+ strscpy(acpi_device_name(device), ACPI_FUJITSU_BL_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_FUJITSU_CLASS);
device->driver_data = priv;
pr_info("ACPI: %s [%s]\n",
@@ -891,8 +891,8 @@ static int acpi_fujitsu_laptop_add(struct acpi_device *device)
WARN_ONCE(fext, "More than one FUJ02E3 ACPI device was found. Driver may not work as intended.");
fext = device;
- strcpy(acpi_device_name(device), ACPI_FUJITSU_LAPTOP_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_FUJITSU_CLASS);
+ strscpy(acpi_device_name(device), ACPI_FUJITSU_LAPTOP_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_FUJITSU_CLASS);
device->driver_data = priv;
/* kfifo */
diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
index 2dc50152158a..0b277b7e37dd 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
+++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
@@ -24,8 +24,6 @@ struct bioscfg_priv bioscfg_drv = {
.mutex = __MUTEX_INITIALIZER(bioscfg_drv.mutex),
};
-static const struct class *fw_attr_class;
-
ssize_t display_name_language_code_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
@@ -972,11 +970,7 @@ static int __init hp_init(void)
if (ret)
return ret;
- ret = fw_attributes_class_get(&fw_attr_class);
- if (ret)
- goto err_unregister_class;
-
- bioscfg_drv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0),
+ bioscfg_drv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0),
NULL, "%s", DRIVER_NAME);
if (IS_ERR(bioscfg_drv.class_dev)) {
ret = PTR_ERR(bioscfg_drv.class_dev);
@@ -1043,10 +1037,9 @@ err_release_attributes_data:
release_attributes_data();
err_destroy_classdev:
- device_destroy(fw_attr_class, MKDEV(0, 0));
+ device_destroy(&firmware_attributes_class, MKDEV(0, 0));
err_unregister_class:
- fw_attributes_class_put();
hp_exit_attr_set_interface();
return ret;
@@ -1055,9 +1048,8 @@ err_unregister_class:
static void __exit hp_exit(void)
{
release_attributes_data();
- device_destroy(fw_attr_class, MKDEV(0, 0));
+ device_destroy(&firmware_attributes_class, MKDEV(0, 0));
- fw_attributes_class_put();
hp_exit_attr_set_interface();
}
diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
index 20c55bab3b8c..db5fdee2109c 100644
--- a/drivers/platform/x86/hp/hp-wmi.c
+++ b/drivers/platform/x86/hp/hp-wmi.c
@@ -45,6 +45,10 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45E9-BE91-3D44E2C707E4");
#define HP_OMEN_EC_THERMAL_PROFILE_TIMER_OFFSET 0x63
#define HP_OMEN_EC_THERMAL_PROFILE_OFFSET 0x95
+#define HP_FAN_SPEED_AUTOMATIC 0x00
+#define HP_POWER_LIMIT_DEFAULT 0x00
+#define HP_POWER_LIMIT_NO_CHANGE 0xFF
+
#define ACPI_AC_CLASS "ac_adapter"
#define zero_if_sup(tmp) (zero_insize_support?0:sizeof(tmp)) // use when zero insize is required
@@ -83,11 +87,16 @@ static const char * const omen_timed_thermal_profile_boards[] = {
"8BAD", "8A42", "8A15"
};
-/* DMI Board names of Victus laptops */
+/* DMI Board names of Victus 16-d1xxx laptops */
static const char * const victus_thermal_profile_boards[] = {
"8A25"
};
+/* DMI Board names of Victus 16-s1000 laptops */
+static const char * const victus_s_thermal_profile_boards[] = {
+ "8C9C"
+};
+
enum hp_wmi_radio {
HPWMI_WIFI = 0x0,
HPWMI_BLUETOOTH = 0x1,
@@ -147,12 +156,32 @@ enum hp_wmi_commandtype {
HPWMI_THERMAL_PROFILE_QUERY = 0x4c,
};
+struct victus_power_limits {
+ u8 pl1;
+ u8 pl2;
+ u8 pl4;
+ u8 cpu_gpu_concurrent_limit;
+};
+
+struct victus_gpu_power_modes {
+ u8 ctgp_enable;
+ u8 ppab_enable;
+ u8 dstate;
+ u8 gpu_slowdown_temp;
+};
+
enum hp_wmi_gm_commandtype {
- HPWMI_FAN_SPEED_GET_QUERY = 0x11,
- HPWMI_SET_PERFORMANCE_MODE = 0x1A,
- HPWMI_FAN_SPEED_MAX_GET_QUERY = 0x26,
- HPWMI_FAN_SPEED_MAX_SET_QUERY = 0x27,
- HPWMI_GET_SYSTEM_DESIGN_DATA = 0x28,
+ HPWMI_FAN_SPEED_GET_QUERY = 0x11,
+ HPWMI_SET_PERFORMANCE_MODE = 0x1A,
+ HPWMI_FAN_SPEED_MAX_GET_QUERY = 0x26,
+ HPWMI_FAN_SPEED_MAX_SET_QUERY = 0x27,
+ HPWMI_GET_SYSTEM_DESIGN_DATA = 0x28,
+ HPWMI_FAN_COUNT_GET_QUERY = 0x10,
+ HPWMI_GET_GPU_THERMAL_MODES_QUERY = 0x21,
+ HPWMI_SET_GPU_THERMAL_MODES_QUERY = 0x22,
+ HPWMI_SET_POWER_LIMITS_QUERY = 0x29,
+ HPWMI_VICTUS_S_FAN_SPEED_GET_QUERY = 0x2D,
+ HPWMI_FAN_SPEED_SET_QUERY = 0x2E,
};
enum hp_wmi_command {
@@ -211,6 +240,11 @@ enum hp_thermal_profile_victus {
HP_VICTUS_THERMAL_PROFILE_QUIET = 0x03,
};
+enum hp_thermal_profile_victus_s {
+ HP_VICTUS_S_THERMAL_PROFILE_DEFAULT = 0x00,
+ HP_VICTUS_S_THERMAL_PROFILE_PERFORMANCE = 0x01,
+};
+
enum hp_thermal_profile {
HP_THERMAL_PROFILE_PERFORMANCE = 0x00,
HP_THERMAL_PROFILE_DEFAULT = 0x01,
@@ -273,7 +307,7 @@ static DEFINE_MUTEX(active_platform_profile_lock);
static struct input_dev *hp_wmi_input_dev;
static struct input_dev *camera_shutter_input_dev;
static struct platform_device *hp_wmi_platform_dev;
-static struct platform_profile_handler platform_profile_handler;
+static struct device *platform_profile_device;
static struct notifier_block platform_power_source_nb;
static enum platform_profile_option active_platform_profile;
static bool platform_profile_support;
@@ -411,6 +445,26 @@ out_free:
return ret;
}
+/*
+ * Calling this hp_wmi_get_fan_count_userdefine_trigger function also enables
+ * and/or maintains the laptop in user defined thermal and fan states, instead
+ * of using a fallback state. After a 120 seconds timeout however, the laptop
+ * goes back to its fallback state.
+ */
+static int hp_wmi_get_fan_count_userdefine_trigger(void)
+{
+ u8 fan_data[4] = {};
+ int ret;
+
+ ret = hp_wmi_perform_query(HPWMI_FAN_COUNT_GET_QUERY, HPWMI_GM,
+ &fan_data, sizeof(u8),
+ sizeof(fan_data));
+ if (ret != 0)
+ return -EINVAL;
+
+ return fan_data[0]; /* Others bytes aren't providing fan count */
+}
+
static int hp_wmi_get_fan_speed(int fan)
{
u8 fsh, fsl;
@@ -429,6 +483,23 @@ static int hp_wmi_get_fan_speed(int fan)
return (fsh << 8) | fsl;
}
+static int hp_wmi_get_fan_speed_victus_s(int fan)
+{
+ u8 fan_data[128] = {};
+ int ret;
+
+ if (fan < 0 || fan >= sizeof(fan_data))
+ return -EINVAL;
+
+ ret = hp_wmi_perform_query(HPWMI_VICTUS_S_FAN_SPEED_GET_QUERY,
+ HPWMI_GM, &fan_data, sizeof(u8),
+ sizeof(fan_data));
+ if (ret != 0)
+ return -EINVAL;
+
+ return fan_data[fan] * 100;
+}
+
static int hp_wmi_read_int(int query)
{
int val = 0, ret;
@@ -557,6 +628,30 @@ static int hp_wmi_fan_speed_max_set(int enabled)
return enabled;
}
+static int hp_wmi_fan_speed_reset(void)
+{
+ u8 fan_speed[2] = { HP_FAN_SPEED_AUTOMATIC, HP_FAN_SPEED_AUTOMATIC };
+ int ret;
+
+ ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_SET_QUERY, HPWMI_GM,
+ &fan_speed, sizeof(fan_speed), 0);
+
+ return ret;
+}
+
+static int hp_wmi_fan_speed_max_reset(void)
+{
+ int ret;
+
+ ret = hp_wmi_fan_speed_max_set(0);
+ if (ret)
+ return ret;
+
+ /* Disabling max fan speed on Victus s1xxx laptops needs a 2nd step: */
+ ret = hp_wmi_fan_speed_reset();
+ return ret;
+}
+
static int hp_wmi_fan_speed_max_get(void)
{
int val = 0, ret;
@@ -1221,7 +1316,7 @@ static int platform_profile_omen_get_ec(enum platform_profile_option *profile)
return 0;
}
-static int platform_profile_omen_get(struct platform_profile_handler *pprof,
+static int platform_profile_omen_get(struct device *dev,
enum platform_profile_option *profile)
{
/*
@@ -1318,7 +1413,7 @@ static int platform_profile_omen_set_ec(enum platform_profile_option profile)
return 0;
}
-static int platform_profile_omen_set(struct platform_profile_handler *pprof,
+static int platform_profile_omen_set(struct device *dev,
enum platform_profile_option profile)
{
int err;
@@ -1345,7 +1440,7 @@ static int thermal_profile_set(int thermal_profile)
sizeof(thermal_profile), 0);
}
-static int hp_wmi_platform_profile_get(struct platform_profile_handler *pprof,
+static int hp_wmi_platform_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
int tp;
@@ -1374,7 +1469,7 @@ static int hp_wmi_platform_profile_get(struct platform_profile_handler *pprof,
return 0;
}
-static int hp_wmi_platform_profile_set(struct platform_profile_handler *pprof,
+static int hp_wmi_platform_profile_set(struct device *dev,
enum platform_profile_option profile)
{
int err, tp;
@@ -1440,11 +1535,11 @@ static int platform_profile_victus_get_ec(enum platform_profile_option *profile)
return 0;
}
-static int platform_profile_victus_get(struct platform_profile_handler *pprof,
+static int platform_profile_victus_get(struct device *dev,
enum platform_profile_option *profile)
{
/* Same behaviour as platform_profile_omen_get */
- return platform_profile_omen_get(pprof, profile);
+ return platform_profile_omen_get(dev, profile);
}
static int platform_profile_victus_set_ec(enum platform_profile_option profile)
@@ -1472,7 +1567,162 @@ static int platform_profile_victus_set_ec(enum platform_profile_option profile)
return 0;
}
-static int platform_profile_victus_set(struct platform_profile_handler *pprof,
+static bool is_victus_s_thermal_profile(void)
+{
+ const char *board_name;
+
+ board_name = dmi_get_system_info(DMI_BOARD_NAME);
+ if (!board_name)
+ return false;
+
+ return match_string(victus_s_thermal_profile_boards,
+ ARRAY_SIZE(victus_s_thermal_profile_boards),
+ board_name) >= 0;
+}
+
+static int victus_s_gpu_thermal_profile_get(bool *ctgp_enable,
+ bool *ppab_enable,
+ u8 *dstate,
+ u8 *gpu_slowdown_temp)
+{
+ struct victus_gpu_power_modes gpu_power_modes;
+ int ret;
+
+ ret = hp_wmi_perform_query(HPWMI_GET_GPU_THERMAL_MODES_QUERY, HPWMI_GM,
+ &gpu_power_modes, sizeof(gpu_power_modes),
+ sizeof(gpu_power_modes));
+ if (ret == 0) {
+ *ctgp_enable = gpu_power_modes.ctgp_enable ? true : false;
+ *ppab_enable = gpu_power_modes.ppab_enable ? true : false;
+ *dstate = gpu_power_modes.dstate;
+ *gpu_slowdown_temp = gpu_power_modes.gpu_slowdown_temp;
+ }
+
+ return ret;
+}
+
+static int victus_s_gpu_thermal_profile_set(bool ctgp_enable,
+ bool ppab_enable,
+ u8 dstate)
+{
+ struct victus_gpu_power_modes gpu_power_modes;
+ int ret;
+
+ bool current_ctgp_state, current_ppab_state;
+ u8 current_dstate, current_gpu_slowdown_temp;
+
+ /* Retrieving GPU slowdown temperature, in order to keep it unchanged */
+ ret = victus_s_gpu_thermal_profile_get(&current_ctgp_state,
+ &current_ppab_state,
+ &current_dstate,
+ &current_gpu_slowdown_temp);
+ if (ret < 0) {
+ pr_warn("GPU modes not updated, unable to get slowdown temp\n");
+ return ret;
+ }
+
+ gpu_power_modes.ctgp_enable = ctgp_enable ? 0x01 : 0x00;
+ gpu_power_modes.ppab_enable = ppab_enable ? 0x01 : 0x00;
+ gpu_power_modes.dstate = dstate;
+ gpu_power_modes.gpu_slowdown_temp = current_gpu_slowdown_temp;
+
+
+ ret = hp_wmi_perform_query(HPWMI_SET_GPU_THERMAL_MODES_QUERY, HPWMI_GM,
+ &gpu_power_modes, sizeof(gpu_power_modes), 0);
+
+ return ret;
+}
+
+/* Note: HP_POWER_LIMIT_DEFAULT can be used to restore default PL1 and PL2 */
+static int victus_s_set_cpu_pl1_pl2(u8 pl1, u8 pl2)
+{
+ struct victus_power_limits power_limits;
+ int ret;
+
+ /* We need to know both PL1 and PL2 values in order to check them */
+ if (pl1 == HP_POWER_LIMIT_NO_CHANGE || pl2 == HP_POWER_LIMIT_NO_CHANGE)
+ return -EINVAL;
+
+ /* PL2 is not supposed to be lower than PL1 */
+ if (pl2 < pl1)
+ return -EINVAL;
+
+ power_limits.pl1 = pl1;
+ power_limits.pl2 = pl2;
+ power_limits.pl4 = HP_POWER_LIMIT_NO_CHANGE;
+ power_limits.cpu_gpu_concurrent_limit = HP_POWER_LIMIT_NO_CHANGE;
+
+ ret = hp_wmi_perform_query(HPWMI_SET_POWER_LIMITS_QUERY, HPWMI_GM,
+ &power_limits, sizeof(power_limits), 0);
+
+ return ret;
+}
+
+static int platform_profile_victus_s_set_ec(enum platform_profile_option profile)
+{
+ bool gpu_ctgp_enable, gpu_ppab_enable;
+ u8 gpu_dstate; /* Test shows 1 = 100%, 2 = 50%, 3 = 25%, 4 = 12.5% */
+ int err, tp;
+
+ switch (profile) {
+ case PLATFORM_PROFILE_PERFORMANCE:
+ tp = HP_VICTUS_S_THERMAL_PROFILE_PERFORMANCE;
+ gpu_ctgp_enable = true;
+ gpu_ppab_enable = true;
+ gpu_dstate = 1;
+ break;
+ case PLATFORM_PROFILE_BALANCED:
+ tp = HP_VICTUS_S_THERMAL_PROFILE_DEFAULT;
+ gpu_ctgp_enable = false;
+ gpu_ppab_enable = true;
+ gpu_dstate = 1;
+ break;
+ case PLATFORM_PROFILE_LOW_POWER:
+ tp = HP_VICTUS_S_THERMAL_PROFILE_DEFAULT;
+ gpu_ctgp_enable = false;
+ gpu_ppab_enable = false;
+ gpu_dstate = 1;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ hp_wmi_get_fan_count_userdefine_trigger();
+
+ err = omen_thermal_profile_set(tp);
+ if (err < 0) {
+ pr_err("Failed to set platform profile %d: %d\n", profile, err);
+ return err;
+ }
+
+ err = victus_s_gpu_thermal_profile_set(gpu_ctgp_enable,
+ gpu_ppab_enable,
+ gpu_dstate);
+ if (err < 0) {
+ pr_err("Failed to set GPU profile %d: %d\n", profile, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int platform_profile_victus_s_set(struct device *dev,
+ enum platform_profile_option profile)
+{
+ int err;
+
+ guard(mutex)(&active_platform_profile_lock);
+
+ err = platform_profile_victus_s_set_ec(profile);
+ if (err < 0)
+ return err;
+
+ active_platform_profile = profile;
+
+ return 0;
+}
+
+static int platform_profile_victus_set(struct device *dev,
enum platform_profile_option profile)
{
int err;
@@ -1488,6 +1738,26 @@ static int platform_profile_victus_set(struct platform_profile_handler *pprof,
return 0;
}
+static int hp_wmi_platform_profile_probe(void *drvdata, unsigned long *choices)
+{
+ if (is_omen_thermal_profile()) {
+ set_bit(PLATFORM_PROFILE_COOL, choices);
+ } else if (is_victus_thermal_profile()) {
+ set_bit(PLATFORM_PROFILE_QUIET, choices);
+ } else if (is_victus_s_thermal_profile()) {
+ /* Adding an equivalent to HP Omen software ECO mode: */
+ set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
+ } else {
+ set_bit(PLATFORM_PROFILE_QUIET, choices);
+ set_bit(PLATFORM_PROFILE_COOL, choices);
+ }
+
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+
+ return 0;
+}
+
static int omen_powersource_event(struct notifier_block *nb,
unsigned long value,
void *data)
@@ -1545,6 +1815,39 @@ static int omen_powersource_event(struct notifier_block *nb,
return NOTIFY_OK;
}
+static int victus_s_powersource_event(struct notifier_block *nb,
+ unsigned long value,
+ void *data)
+{
+ struct acpi_bus_event *event_entry = data;
+ int err;
+
+ if (strcmp(event_entry->device_class, ACPI_AC_CLASS) != 0)
+ return NOTIFY_DONE;
+
+ pr_debug("Received power source device event\n");
+
+ /*
+ * Switching to battery power source while Performance mode is active
+ * needs manual triggering of CPU power limits. Same goes when switching
+ * to AC power source while Performance mode is active. Other modes
+ * however are automatically behaving without any manual action.
+ * Seen on HP 16-s1034nf (board 8C9C) with F.11 and F.13 BIOS versions.
+ */
+
+ if (active_platform_profile == PLATFORM_PROFILE_PERFORMANCE) {
+ pr_debug("Triggering CPU PL1/PL2 actualization\n");
+ err = victus_s_set_cpu_pl1_pl2(HP_POWER_LIMIT_DEFAULT,
+ HP_POWER_LIMIT_DEFAULT);
+ if (err)
+ pr_warn("Failed to actualize power limits: %d\n", err);
+
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
static int omen_register_powersource_event_handler(void)
{
int err;
@@ -1560,13 +1863,57 @@ static int omen_register_powersource_event_handler(void)
return 0;
}
+static int victus_s_register_powersource_event_handler(void)
+{
+ int err;
+
+ platform_power_source_nb.notifier_call = victus_s_powersource_event;
+ err = register_acpi_notifier(&platform_power_source_nb);
+ if (err < 0) {
+ pr_warn("Failed to install ACPI power source notify handler\n");
+ return err;
+ }
+
+ return 0;
+}
+
static inline void omen_unregister_powersource_event_handler(void)
{
unregister_acpi_notifier(&platform_power_source_nb);
}
-static int thermal_profile_setup(void)
+static inline void victus_s_unregister_powersource_event_handler(void)
{
+ unregister_acpi_notifier(&platform_power_source_nb);
+}
+
+static const struct platform_profile_ops platform_profile_omen_ops = {
+ .probe = hp_wmi_platform_profile_probe,
+ .profile_get = platform_profile_omen_get,
+ .profile_set = platform_profile_omen_set,
+};
+
+static const struct platform_profile_ops platform_profile_victus_ops = {
+ .probe = hp_wmi_platform_profile_probe,
+ .profile_get = platform_profile_victus_get,
+ .profile_set = platform_profile_victus_set,
+};
+
+static const struct platform_profile_ops platform_profile_victus_s_ops = {
+ .probe = hp_wmi_platform_profile_probe,
+ .profile_get = platform_profile_omen_get,
+ .profile_set = platform_profile_victus_s_set,
+};
+
+static const struct platform_profile_ops hp_wmi_platform_profile_ops = {
+ .probe = hp_wmi_platform_profile_probe,
+ .profile_get = hp_wmi_platform_profile_get,
+ .profile_set = hp_wmi_platform_profile_set,
+};
+
+static int thermal_profile_setup(struct platform_device *device)
+{
+ const struct platform_profile_ops *ops;
int err, tp;
if (is_omen_thermal_profile()) {
@@ -1582,10 +1929,7 @@ static int thermal_profile_setup(void)
if (err < 0)
return err;
- platform_profile_handler.profile_get = platform_profile_omen_get;
- platform_profile_handler.profile_set = platform_profile_omen_set;
-
- set_bit(PLATFORM_PROFILE_COOL, platform_profile_handler.choices);
+ ops = &platform_profile_omen_ops;
} else if (is_victus_thermal_profile()) {
err = platform_profile_victus_get_ec(&active_platform_profile);
if (err < 0)
@@ -1599,10 +1943,19 @@ static int thermal_profile_setup(void)
if (err < 0)
return err;
- platform_profile_handler.profile_get = platform_profile_victus_get;
- platform_profile_handler.profile_set = platform_profile_victus_set;
+ ops = &platform_profile_victus_ops;
+ } else if (is_victus_s_thermal_profile()) {
+ /*
+ * Being unable to retrieve laptop's current thermal profile,
+ * during this setup, we set it to Balanced by default.
+ */
+ active_platform_profile = PLATFORM_PROFILE_BALANCED;
+
+ err = platform_profile_victus_s_set_ec(active_platform_profile);
+ if (err < 0)
+ return err;
- set_bit(PLATFORM_PROFILE_QUIET, platform_profile_handler.choices);
+ ops = &platform_profile_victus_s_ops;
} else {
tp = thermal_profile_get();
@@ -1617,20 +1970,15 @@ static int thermal_profile_setup(void)
if (err)
return err;
- platform_profile_handler.profile_get = hp_wmi_platform_profile_get;
- platform_profile_handler.profile_set = hp_wmi_platform_profile_set;
-
- set_bit(PLATFORM_PROFILE_QUIET, platform_profile_handler.choices);
- set_bit(PLATFORM_PROFILE_COOL, platform_profile_handler.choices);
+ ops = &hp_wmi_platform_profile_ops;
}
- set_bit(PLATFORM_PROFILE_BALANCED, platform_profile_handler.choices);
- set_bit(PLATFORM_PROFILE_PERFORMANCE, platform_profile_handler.choices);
-
- err = platform_profile_register(&platform_profile_handler);
- if (err)
- return err;
+ platform_profile_device = devm_platform_profile_register(&device->dev, "hp-wmi",
+ NULL, ops);
+ if (IS_ERR(platform_profile_device))
+ return PTR_ERR(platform_profile_device);
+ pr_info("Registered as platform profile handler\n");
platform_profile_support = true;
return 0;
@@ -1663,7 +2011,7 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
if (err < 0)
return err;
- thermal_profile_setup();
+ thermal_profile_setup(device);
return 0;
}
@@ -1689,9 +2037,6 @@ static void __exit hp_wmi_bios_remove(struct platform_device *device)
rfkill_unregister(wwan_rfkill);
rfkill_destroy(wwan_rfkill);
}
-
- if (platform_profile_support)
- platform_profile_remove();
}
static int hp_wmi_resume_handler(struct device *device)
@@ -1759,8 +2104,13 @@ static umode_t hp_wmi_hwmon_is_visible(const void *data,
case hwmon_pwm:
return 0644;
case hwmon_fan:
- if (hp_wmi_get_fan_speed(channel) >= 0)
- return 0444;
+ if (is_victus_s_thermal_profile()) {
+ if (hp_wmi_get_fan_speed_victus_s(channel) >= 0)
+ return 0444;
+ } else {
+ if (hp_wmi_get_fan_speed(channel) >= 0)
+ return 0444;
+ }
break;
default:
return 0;
@@ -1776,8 +2126,10 @@ static int hp_wmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
switch (type) {
case hwmon_fan:
- ret = hp_wmi_get_fan_speed(channel);
-
+ if (is_victus_s_thermal_profile())
+ ret = hp_wmi_get_fan_speed_victus_s(channel);
+ else
+ ret = hp_wmi_get_fan_speed(channel);
if (ret < 0)
return ret;
*val = ret;
@@ -1810,11 +2162,17 @@ static int hp_wmi_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
case hwmon_pwm:
switch (val) {
case 0:
+ if (is_victus_s_thermal_profile())
+ hp_wmi_get_fan_count_userdefine_trigger();
/* 0 is no fan speed control (max), which is 1 for us */
return hp_wmi_fan_speed_max_set(1);
case 2:
/* 2 is automatic speed control, which is 0 for us */
- return hp_wmi_fan_speed_max_set(0);
+ if (is_victus_s_thermal_profile()) {
+ hp_wmi_get_fan_count_userdefine_trigger();
+ return hp_wmi_fan_speed_max_reset();
+ } else
+ return hp_wmi_fan_speed_max_set(0);
default:
/* we don't support manual fan speed control */
return -EINVAL;
@@ -1893,6 +2251,10 @@ static int __init hp_wmi_init(void)
err = omen_register_powersource_event_handler();
if (err)
goto err_unregister_device;
+ } else if (is_victus_s_thermal_profile()) {
+ err = victus_s_register_powersource_event_handler();
+ if (err)
+ goto err_unregister_device;
}
return 0;
@@ -1912,6 +2274,9 @@ static void __exit hp_wmi_exit(void)
if (is_omen_thermal_profile() || is_victus_thermal_profile())
omen_unregister_powersource_event_handler();
+ if (is_victus_s_thermal_profile())
+ victus_s_unregister_powersource_event_handler();
+
if (wmi_has_guid(HPWMI_EVENT_GUID))
hp_wmi_input_destroy();
diff --git a/drivers/platform/x86/hp/hp_accel.c b/drivers/platform/x86/hp/hp_accel.c
index 39a6530f5072..10d5af18d639 100644
--- a/drivers/platform/x86/hp/hp_accel.c
+++ b/drivers/platform/x86/hp/hp_accel.c
@@ -267,7 +267,7 @@ static struct delayed_led_classdev hpled_led = {
};
static bool hp_accel_i8042_filter(unsigned char data, unsigned char str,
- struct serio *port)
+ struct serio *port, void *context)
{
static bool extended;
@@ -326,7 +326,7 @@ static int lis3lv02d_probe(struct platform_device *device)
/* filter to remove HPQ6000 accelerometer data
* from keyboard bus stream */
if (strstr(dev_name(&device->dev), "HPQ6000"))
- i8042_install_filter(hp_accel_i8042_filter);
+ i8042_install_filter(hp_accel_i8042_filter, NULL);
INIT_WORK(&hpled_led.work, delayed_set_status_worker);
ret = led_classdev_register(NULL, &hpled_led.led_classdev);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index e980dd18e5f6..30bd366d7b58 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -142,7 +142,7 @@ enum {
struct ideapad_dytc_priv {
enum platform_profile_option current_profile;
- struct platform_profile_handler pprof;
+ struct device *ppdev; /* platform profile device */
struct mutex mutex; /* protects the DYTC interface */
struct ideapad_private *priv;
};
@@ -933,10 +933,10 @@ static int convert_profile_to_dytc(enum platform_profile_option profile, int *pe
* dytc_profile_get: Function to register with platform_profile
* handler. Returns current platform profile.
*/
-static int dytc_profile_get(struct platform_profile_handler *pprof,
+static int dytc_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
- struct ideapad_dytc_priv *dytc = container_of(pprof, struct ideapad_dytc_priv, pprof);
+ struct ideapad_dytc_priv *dytc = dev_get_drvdata(dev);
*profile = dytc->current_profile;
return 0;
@@ -986,10 +986,10 @@ static int dytc_cql_command(struct ideapad_private *priv, unsigned long cmd,
* dytc_profile_set: Function to register with platform_profile
* handler. Sets current platform profile.
*/
-static int dytc_profile_set(struct platform_profile_handler *pprof,
+static int dytc_profile_set(struct device *dev,
enum platform_profile_option profile)
{
- struct ideapad_dytc_priv *dytc = container_of(pprof, struct ideapad_dytc_priv, pprof);
+ struct ideapad_dytc_priv *dytc = dev_get_drvdata(dev);
struct ideapad_private *priv = dytc->priv;
unsigned long output;
int err;
@@ -1023,6 +1023,15 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
return -EINTR;
}
+static int dytc_profile_probe(void *drvdata, unsigned long *choices)
+{
+ set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+
+ return 0;
+}
+
static void dytc_profile_refresh(struct ideapad_private *priv)
{
enum platform_profile_option profile;
@@ -1041,7 +1050,7 @@ static void dytc_profile_refresh(struct ideapad_private *priv)
if (profile != priv->dytc->current_profile) {
priv->dytc->current_profile = profile;
- platform_profile_notify();
+ platform_profile_notify(priv->dytc->ppdev);
}
}
@@ -1063,6 +1072,12 @@ static const struct dmi_system_id ideapad_dytc_v4_allow_table[] = {
{}
};
+static const struct platform_profile_ops dytc_profile_ops = {
+ .probe = dytc_profile_probe,
+ .profile_get = dytc_profile_get,
+ .profile_set = dytc_profile_set,
+};
+
static int ideapad_dytc_profile_init(struct ideapad_private *priv)
{
int err, dytc_version;
@@ -1103,18 +1118,15 @@ static int ideapad_dytc_profile_init(struct ideapad_private *priv)
mutex_init(&priv->dytc->mutex);
priv->dytc->priv = priv;
- priv->dytc->pprof.profile_get = dytc_profile_get;
- priv->dytc->pprof.profile_set = dytc_profile_set;
-
- /* Setup supported modes */
- set_bit(PLATFORM_PROFILE_LOW_POWER, priv->dytc->pprof.choices);
- set_bit(PLATFORM_PROFILE_BALANCED, priv->dytc->pprof.choices);
- set_bit(PLATFORM_PROFILE_PERFORMANCE, priv->dytc->pprof.choices);
/* Create platform_profile structure and register */
- err = platform_profile_register(&priv->dytc->pprof);
- if (err)
+ priv->dytc->ppdev = devm_platform_profile_register(&priv->platform_device->dev,
+ "ideapad-laptop", priv->dytc,
+ &dytc_profile_ops);
+ if (IS_ERR(priv->dytc->ppdev)) {
+ err = PTR_ERR(priv->dytc->ppdev);
goto pp_reg_failed;
+ }
/* Ensure initial values are correct */
dytc_profile_refresh(priv);
@@ -1134,7 +1146,6 @@ static void ideapad_dytc_profile_exit(struct ideapad_private *priv)
if (!priv->dytc)
return;
- platform_profile_remove();
mutex_destroy(&priv->dytc->mutex);
kfree(priv->dytc);
diff --git a/drivers/platform/x86/inspur_platform_profile.c b/drivers/platform/x86/inspur_platform_profile.c
index 8440defa6788..e02f5a55a6c5 100644
--- a/drivers/platform/x86/inspur_platform_profile.c
+++ b/drivers/platform/x86/inspur_platform_profile.c
@@ -32,7 +32,7 @@ enum inspur_tmp_profile {
struct inspur_wmi_priv {
struct wmi_device *wdev;
- struct platform_profile_handler handler;
+ struct device *ppdev;
};
static int inspur_wmi_perform_query(struct wmi_device *wdev,
@@ -84,11 +84,10 @@ out_free:
* 0x0: No Error
* 0x1: Error
*/
-static int inspur_platform_profile_set(struct platform_profile_handler *pprof,
+static int inspur_platform_profile_set(struct device *dev,
enum platform_profile_option profile)
{
- struct inspur_wmi_priv *priv = container_of(pprof, struct inspur_wmi_priv,
- handler);
+ struct inspur_wmi_priv *priv = dev_get_drvdata(dev);
u8 ret_code[4] = {0, 0, 0, 0};
int ret;
@@ -132,11 +131,10 @@ static int inspur_platform_profile_set(struct platform_profile_handler *pprof,
* 0x1: Performance Mode
* 0x2: Power Saver Mode
*/
-static int inspur_platform_profile_get(struct platform_profile_handler *pprof,
+static int inspur_platform_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
- struct inspur_wmi_priv *priv = container_of(pprof, struct inspur_wmi_priv,
- handler);
+ struct inspur_wmi_priv *priv = dev_get_drvdata(dev);
u8 ret_code[4] = {0, 0, 0, 0};
int ret;
@@ -166,6 +164,21 @@ static int inspur_platform_profile_get(struct platform_profile_handler *pprof,
return 0;
}
+static int inspur_platform_profile_probe(void *drvdata, unsigned long *choices)
+{
+ set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+
+ return 0;
+}
+
+static const struct platform_profile_ops inspur_platform_profile_ops = {
+ .probe = inspur_platform_profile_probe,
+ .profile_get = inspur_platform_profile_get,
+ .profile_set = inspur_platform_profile_set,
+};
+
static int inspur_wmi_probe(struct wmi_device *wdev, const void *context)
{
struct inspur_wmi_priv *priv;
@@ -177,19 +190,10 @@ static int inspur_wmi_probe(struct wmi_device *wdev, const void *context)
priv->wdev = wdev;
dev_set_drvdata(&wdev->dev, priv);
- priv->handler.profile_get = inspur_platform_profile_get;
- priv->handler.profile_set = inspur_platform_profile_set;
-
- set_bit(PLATFORM_PROFILE_LOW_POWER, priv->handler.choices);
- set_bit(PLATFORM_PROFILE_BALANCED, priv->handler.choices);
- set_bit(PLATFORM_PROFILE_PERFORMANCE, priv->handler.choices);
+ priv->ppdev = devm_platform_profile_register(&wdev->dev, "inspur-wmi", priv,
+ &inspur_platform_profile_ops);
- return platform_profile_register(&priv->handler);
-}
-
-static void inspur_wmi_remove(struct wmi_device *wdev)
-{
- platform_profile_remove();
+ return PTR_ERR_OR_ZERO(priv->ppdev);
}
static const struct wmi_device_id inspur_wmi_id_table[] = {
@@ -206,7 +210,6 @@ static struct wmi_driver inspur_wmi_driver = {
},
.id_table = inspur_wmi_id_table,
.probe = inspur_wmi_probe,
- .remove = inspur_wmi_remove,
.no_singleton = true,
};
diff --git a/drivers/platform/x86/intel/Kconfig b/drivers/platform/x86/intel/Kconfig
index eb698dcb9af9..19a2246f2770 100644
--- a/drivers/platform/x86/intel/Kconfig
+++ b/drivers/platform/x86/intel/Kconfig
@@ -83,6 +83,7 @@ config INTEL_BXTWC_PMIC_TMU
config INTEL_BYTCRC_PWRSRC
tristate "Intel Bay Trail Crystal Cove power source driver"
depends on INTEL_SOC_PMIC
+ depends on POWER_SUPPLY
help
This option adds a power source driver for Crystal Cove PMICs
on Intel Bay Trail devices.
diff --git a/drivers/platform/x86/intel/bytcrc_pwrsrc.c b/drivers/platform/x86/intel/bytcrc_pwrsrc.c
index 3edc2a9dab38..68ac040082df 100644
--- a/drivers/platform/x86/intel/bytcrc_pwrsrc.c
+++ b/drivers/platform/x86/intel/bytcrc_pwrsrc.c
@@ -8,13 +8,22 @@
* Copyright (C) 2013 Intel Corporation
*/
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/debugfs.h>
+#include <linux/interrupt.h>
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/property.h>
#include <linux/regmap.h>
+#define CRYSTALCOVE_PWRSRC_IRQ 0x03
#define CRYSTALCOVE_SPWRSRC_REG 0x1E
+#define CRYSTALCOVE_SPWRSRC_USB BIT(0)
+#define CRYSTALCOVE_SPWRSRC_DC BIT(1)
+#define CRYSTALCOVE_SPWRSRC_BATTERY BIT(2)
#define CRYSTALCOVE_RESETSRC0_REG 0x20
#define CRYSTALCOVE_RESETSRC1_REG 0x21
#define CRYSTALCOVE_WAKESRC_REG 0x22
@@ -22,6 +31,7 @@
struct crc_pwrsrc_data {
struct regmap *regmap;
struct dentry *debug_dentry;
+ struct power_supply *psy;
unsigned int resetsrc0;
unsigned int resetsrc1;
unsigned int wakesrc;
@@ -118,13 +128,60 @@ static int crc_pwrsrc_read_and_clear(struct crc_pwrsrc_data *data,
return regmap_write(data->regmap, reg, *val);
}
+static irqreturn_t crc_pwrsrc_irq_handler(int irq, void *_data)
+{
+ struct crc_pwrsrc_data *data = _data;
+ unsigned int irq_mask;
+
+ if (regmap_read(data->regmap, CRYSTALCOVE_PWRSRC_IRQ, &irq_mask))
+ return IRQ_NONE;
+
+ regmap_write(data->regmap, CRYSTALCOVE_PWRSRC_IRQ, irq_mask);
+
+ power_supply_changed(data->psy);
+ return IRQ_HANDLED;
+}
+
+static int crc_pwrsrc_psy_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct crc_pwrsrc_data *data = power_supply_get_drvdata(psy);
+ unsigned int pwrsrc;
+ int ret;
+
+ if (psp != POWER_SUPPLY_PROP_ONLINE)
+ return -EINVAL;
+
+ ret = regmap_read(data->regmap, CRYSTALCOVE_SPWRSRC_REG, &pwrsrc);
+ if (ret)
+ return ret;
+
+ val->intval = !!(pwrsrc & (CRYSTALCOVE_SPWRSRC_USB |
+ CRYSTALCOVE_SPWRSRC_DC));
+ return 0;
+}
+
+static const enum power_supply_property crc_pwrsrc_psy_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static const struct power_supply_desc crc_pwrsrc_psy_desc = {
+ .name = "crystal_cove_pwrsrc",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .properties = crc_pwrsrc_psy_props,
+ .num_properties = ARRAY_SIZE(crc_pwrsrc_psy_props),
+ .get_property = crc_pwrsrc_psy_get_property,
+};
+
static int crc_pwrsrc_probe(struct platform_device *pdev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
struct crc_pwrsrc_data *data;
- int ret;
+ int irq, ret;
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -149,6 +206,24 @@ static int crc_pwrsrc_probe(struct platform_device *pdev)
if (ret)
return ret;
+ if (device_property_read_bool(dev->parent, "linux,register-pwrsrc-power_supply")) {
+ struct power_supply_config psy_cfg = { .drv_data = data };
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ data->psy = devm_power_supply_register(dev, &crc_pwrsrc_psy_desc, &psy_cfg);
+ if (IS_ERR(data->psy))
+ return dev_err_probe(dev, PTR_ERR(data->psy), "registering power-supply\n");
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ crc_pwrsrc_irq_handler,
+ IRQF_ONESHOT, KBUILD_MODNAME, data);
+ if (ret)
+ return dev_err_probe(dev, ret, "requesting IRQ\n");
+ }
+
data->debug_dentry = debugfs_create_dir(KBUILD_MODNAME, NULL);
debugfs_create_file("pwrsrc", 0444, data->debug_dentry, data, &pwrsrc_fops);
debugfs_create_file("resetsrc", 0444, data->debug_dentry, data, &resetsrc_fops);
diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h
index 5c3c0dfa1bf8..f369fb0d3d82 100644
--- a/drivers/platform/x86/intel/ifs/ifs.h
+++ b/drivers/platform/x86/intel/ifs/ifs.h
@@ -23,12 +23,14 @@
* IFS Image
* ---------
*
- * Intel provides a firmware file containing the scan tests via
- * github [#f1]_. Similar to microcode there is a separate file for each
+ * Intel provides firmware files containing the scan tests via the webpage [#f1]_.
+ * Look under "In-Field Scan Test Images Download" section towards the
+ * end of the page. Similar to microcode, there are separate files for each
* family-model-stepping. IFS Images are not applicable for some test types.
* Wherever applicable the sysfs directory would provide a "current_batch" file
* (see below) for loading the image.
*
+ * .. [#f1] https://intel.com/InFieldScan
*
* IFS Image Loading
* -----------------
@@ -125,9 +127,6 @@
* 2) Hardware allows for some number of cores to be tested in parallel.
* The driver does not make use of this, it only tests one core at a time.
*
- * .. [#f1] https://github.com/intel/TBD
- *
- *
* Structural Based Functional Test at Field (SBAF):
* -------------------------------------------------
*
diff --git a/drivers/platform/x86/intel/int0002_vgpio.c b/drivers/platform/x86/intel/int0002_vgpio.c
index 0cc80603a8a9..3b48cd7a4075 100644
--- a/drivers/platform/x86/intel/int0002_vgpio.c
+++ b/drivers/platform/x86/intel/int0002_vgpio.c
@@ -83,8 +83,12 @@ static void int0002_irq_ack(struct irq_data *data)
static void int0002_irq_unmask(struct irq_data *data)
{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
u32 gpe_en_reg;
+ gpiochip_enable_irq(gc, hwirq);
+
gpe_en_reg = inl(GPE0A_EN_PORT);
gpe_en_reg |= GPE0A_PME_B0_EN_BIT;
outl(gpe_en_reg, GPE0A_EN_PORT);
@@ -92,11 +96,15 @@ static void int0002_irq_unmask(struct irq_data *data)
static void int0002_irq_mask(struct irq_data *data)
{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
u32 gpe_en_reg;
gpe_en_reg = inl(GPE0A_EN_PORT);
gpe_en_reg &= ~GPE0A_PME_B0_EN_BIT;
outl(gpe_en_reg, GPE0A_EN_PORT);
+
+ gpiochip_disable_irq(gc, hwirq);
}
static int int0002_irq_set_wake(struct irq_data *data, unsigned int on)
@@ -140,12 +148,14 @@ static bool int0002_check_wake(void *data)
return (gpe_sts_reg & GPE0A_PME_B0_STS_BIT);
}
-static struct irq_chip int0002_irqchip = {
+static const struct irq_chip int0002_irqchip = {
.name = DRV_NAME,
.irq_ack = int0002_irq_ack,
.irq_mask = int0002_irq_mask,
.irq_unmask = int0002_irq_unmask,
.irq_set_wake = int0002_irq_set_wake,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static void int0002_init_irq_valid_mask(struct gpio_chip *chip,
@@ -203,7 +213,7 @@ static int int0002_probe(struct platform_device *pdev)
}
girq = &chip->irq;
- girq->chip = &int0002_irqchip;
+ gpio_irq_chip_set_chip(girq, &int0002_irqchip);
/* This let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/platform/x86/intel/int3472/common.c b/drivers/platform/x86/intel/int3472/common.c
index b3a2578e06c1..1638be8fa71e 100644
--- a/drivers/platform/x86/intel/int3472/common.c
+++ b/drivers/platform/x86/intel/int3472/common.c
@@ -70,6 +70,8 @@ int skl_int3472_get_sensor_adev_and_name(struct device *dev,
return -ENODEV;
}
+ dev_dbg(dev, "Sensor name %s\n", acpi_dev_name(sensor));
+
*name_ret = devm_kasprintf(dev, GFP_KERNEL, I2C_DEV_NAME_FORMAT,
acpi_dev_name(sensor));
if (!*name_ret)
diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c
index d881b2cfcdfc..31015ebe20d8 100644
--- a/drivers/platform/x86/intel/int3472/discrete.c
+++ b/drivers/platform/x86/intel/int3472/discrete.c
@@ -178,11 +178,11 @@ static void int3472_get_func_and_polarity(u8 type, const char **func, u32 *polar
* to create clocks and regulators via the usual frameworks.
*
* Return:
- * * 1 - To continue the loop
- * * 0 - When all resources found are handled properly.
- * * -EINVAL - If the resource is not a GPIO IO resource
- * * -ENODEV - If the resource has no corresponding _DSM entry
- * * -Other - Errors propagated from one of the sub-functions.
+ * * 1 - Continue the loop without adding a copy of the resource to
+ * * the list passed to acpi_dev_get_resources()
+ * * 0 - Continue the loop after adding a copy of the resource to
+ * * the list passed to acpi_dev_get_resources()
+ * * -errno - Error, break loop
*/
static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
void *data)
@@ -220,10 +220,10 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
int3472_get_func_and_polarity(type, &func, &polarity);
pin = FIELD_GET(INT3472_GPIO_DSM_PIN, obj->integer.value);
- if (pin != agpio->pin_table[0])
- dev_warn(int3472->dev, "%s %s pin number mismatch _DSM %d resource %d\n",
- func, agpio->resource_source.string_ptr, pin,
- agpio->pin_table[0]);
+ /* Pin field is not really used under Windows and wraps around at 8 bits */
+ if (pin != (agpio->pin_table[0] & 0xff))
+ dev_dbg(int3472->dev, FW_BUG "%s %s pin number mismatch _DSM %d resource %d\n",
+ func, agpio->resource_source.string_ptr, pin, agpio->pin_table[0]);
active_value = FIELD_GET(INT3472_GPIO_DSM_SENSOR_ON_VAL, obj->integer.value);
if (!active_value)
@@ -289,7 +289,8 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
if (ret < 0)
return dev_err_probe(int3472->dev, ret, err_msg);
- return ret;
+ /* Tell acpi_dev_get_resources() to not make a copy of the resource */
+ return 1;
}
static int skl_int3472_parse_crs(struct int3472_discrete_device *int3472)
@@ -336,6 +337,9 @@ static int skl_int3472_discrete_probe(struct platform_device *pdev)
struct int3472_cldb cldb;
int ret;
+ if (!adev)
+ return -ENODEV;
+
ret = skl_int3472_fill_cldb(adev, &cldb);
if (ret) {
dev_err(&pdev->dev, "Couldn't fill CLDB structure\n");
diff --git a/drivers/platform/x86/intel/int3472/tps68470.c b/drivers/platform/x86/intel/int3472/tps68470.c
index 1e107fd49f82..81ac4c691963 100644
--- a/drivers/platform/x86/intel/int3472/tps68470.c
+++ b/drivers/platform/x86/intel/int3472/tps68470.c
@@ -152,6 +152,9 @@ static int skl_int3472_tps68470_probe(struct i2c_client *client)
int ret;
int i;
+ if (!adev)
+ return -ENODEV;
+
n_consumers = skl_int3472_fill_clk_pdata(&client->dev, &clk_pdata);
if (n_consumers < 0)
return n_consumers;
diff --git a/drivers/platform/x86/intel/plr_tpmi.c b/drivers/platform/x86/intel/plr_tpmi.c
index 691d43c3592c..2b55347a5a93 100644
--- a/drivers/platform/x86/intel/plr_tpmi.c
+++ b/drivers/platform/x86/intel/plr_tpmi.c
@@ -262,7 +262,7 @@ static int intel_plr_probe(struct auxiliary_device *auxdev, const struct auxilia
struct resource *res;
struct tpmi_plr *plr;
void __iomem *base;
- char name[16];
+ char name[17];
int err;
plat_info = tpmi_get_platform_data(auxdev);
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
index 10f04b944117..1ee0fb5f8250 100644
--- a/drivers/platform/x86/intel/pmc/core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -626,8 +626,8 @@ static u32 convert_ltr_scale(u32 val)
static int pmc_core_ltr_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- u64 decoded_snoop_ltr, decoded_non_snoop_ltr;
- u32 ltr_raw_data, scale, val;
+ u64 decoded_snoop_ltr, decoded_non_snoop_ltr, val;
+ u32 ltr_raw_data, scale;
u16 snoop_ltr, nonsnoop_ltr;
unsigned int i, index, ltr_index = 0;
diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c
index 8ed54b7a3333..7233b654bbad 100644
--- a/drivers/platform/x86/intel/pmt/class.c
+++ b/drivers/platform/x86/intel/pmt/class.c
@@ -81,7 +81,7 @@ EXPORT_SYMBOL_NS_GPL(pmt_telem_read_mmio, "INTEL_PMT");
*/
static ssize_t
intel_pmt_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
+ const struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
{
struct intel_pmt_entry *entry = container_of(attr,
@@ -308,7 +308,7 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry,
entry->pmt_bin_attr.attr.name = ns->name;
entry->pmt_bin_attr.attr.mode = 0440;
entry->pmt_bin_attr.mmap = intel_pmt_mmap;
- entry->pmt_bin_attr.read = intel_pmt_read;
+ entry->pmt_bin_attr.read_new = intel_pmt_read;
entry->pmt_bin_attr.size = entry->size;
ret = sysfs_create_bin_file(&dev->kobj, &entry->pmt_bin_attr);
diff --git a/drivers/platform/x86/intel/punit_ipc.c b/drivers/platform/x86/intel/punit_ipc.c
index cd0ba84cc8e4..bafac8aa2baf 100644
--- a/drivers/platform/x86/intel/punit_ipc.c
+++ b/drivers/platform/x86/intel/punit_ipc.c
@@ -131,39 +131,6 @@ static int intel_punit_ipc_check_status(IPC_DEV *ipcdev, IPC_TYPE type)
}
/**
- * intel_punit_ipc_simple_command() - Simple IPC command
- * @cmd: IPC command code.
- * @para1: First 8bit parameter, set 0 if not used.
- * @para2: Second 8bit parameter, set 0 if not used.
- *
- * Send a IPC command to P-Unit when there is no data transaction
- *
- * Return: IPC error code or 0 on success.
- */
-int intel_punit_ipc_simple_command(int cmd, int para1, int para2)
-{
- IPC_DEV *ipcdev = punit_ipcdev;
- IPC_TYPE type;
- u32 val;
- int ret;
-
- mutex_lock(&ipcdev->lock);
-
- reinit_completion(&ipcdev->cmd_complete);
- type = (cmd & IPC_PUNIT_CMD_TYPE_MASK) >> IPC_TYPE_OFFSET;
-
- val = cmd & ~IPC_PUNIT_CMD_TYPE_MASK;
- val |= CMD_RUN | para2 << CMD_PARA2_SHIFT | para1 << CMD_PARA1_SHIFT;
- ipc_write_cmd(ipcdev, type, val);
- ret = intel_punit_ipc_check_status(ipcdev, type);
-
- mutex_unlock(&ipcdev->lock);
-
- return ret;
-}
-EXPORT_SYMBOL(intel_punit_ipc_simple_command);
-
-/**
* intel_punit_ipc_command() - IPC command with data and pointers
* @cmd: IPC command code.
* @para1: First 8bit parameter, set 0 if not used.
diff --git a/drivers/platform/x86/intel/sdsi.c b/drivers/platform/x86/intel/sdsi.c
index 33f33b1070fd..30d1c2caf984 100644
--- a/drivers/platform/x86/intel/sdsi.c
+++ b/drivers/platform/x86/intel/sdsi.c
@@ -398,8 +398,8 @@ free_payload:
}
static ssize_t provision_akc_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct sdsi_priv *priv = dev_get_drvdata(dev);
@@ -409,11 +409,11 @@ static ssize_t provision_akc_write(struct file *filp, struct kobject *kobj,
return sdsi_provision(priv, buf, count, SDSI_CMD_PROVISION_AKC);
}
-static BIN_ATTR_WO(provision_akc, SDSI_SIZE_WRITE_MSG);
+static const BIN_ATTR_WO(provision_akc, SDSI_SIZE_WRITE_MSG);
static ssize_t provision_cap_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct sdsi_priv *priv = dev_get_drvdata(dev);
@@ -423,7 +423,7 @@ static ssize_t provision_cap_write(struct file *filp, struct kobject *kobj,
return sdsi_provision(priv, buf, count, SDSI_CMD_PROVISION_CAP);
}
-static BIN_ATTR_WO(provision_cap, SDSI_SIZE_WRITE_MSG);
+static const BIN_ATTR_WO(provision_cap, SDSI_SIZE_WRITE_MSG);
static ssize_t
certificate_read(u64 command, u64 control_flags, struct sdsi_priv *priv,
@@ -469,7 +469,7 @@ free_buffer:
static ssize_t
state_certificate_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
+ const struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -477,11 +477,11 @@ state_certificate_read(struct file *filp, struct kobject *kobj,
return certificate_read(SDSI_CMD_READ_STATE, 0, priv, buf, off, count);
}
-static BIN_ATTR_ADMIN_RO(state_certificate, SDSI_SIZE_READ_MSG);
+static const BIN_ATTR_ADMIN_RO(state_certificate, SDSI_SIZE_READ_MSG);
static ssize_t
meter_certificate_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
+ const struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -489,11 +489,11 @@ meter_certificate_read(struct file *filp, struct kobject *kobj,
return certificate_read(SDSI_CMD_READ_METER, 0, priv, buf, off, count);
}
-static BIN_ATTR_ADMIN_RO(meter_certificate, SDSI_SIZE_READ_MSG);
+static const BIN_ATTR_ADMIN_RO(meter_certificate, SDSI_SIZE_READ_MSG);
static ssize_t
meter_current_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
+ const struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -502,11 +502,11 @@ meter_current_read(struct file *filp, struct kobject *kobj,
return certificate_read(SDSI_CMD_READ_METER, CTRL_METER_ENABLE_DRAM,
priv, buf, off, count);
}
-static BIN_ATTR_ADMIN_RO(meter_current, SDSI_SIZE_READ_MSG);
+static const BIN_ATTR_ADMIN_RO(meter_current, SDSI_SIZE_READ_MSG);
static ssize_t registers_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct sdsi_priv *priv = dev_get_drvdata(dev);
@@ -528,9 +528,9 @@ static ssize_t registers_read(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR_ADMIN_RO(registers, SDSI_SIZE_REGS);
+static const BIN_ATTR_ADMIN_RO(registers, SDSI_SIZE_REGS);
-static struct bin_attribute *sdsi_bin_attrs[] = {
+static const struct bin_attribute *const sdsi_bin_attrs[] = {
&bin_attr_registers,
&bin_attr_state_certificate,
&bin_attr_meter_certificate,
@@ -576,7 +576,7 @@ static struct attribute *sdsi_attrs[] = {
static const struct attribute_group sdsi_group = {
.attrs = sdsi_attrs,
- .bin_attrs = sdsi_bin_attrs,
+ .bin_attrs_new = sdsi_bin_attrs,
.is_bin_visible = sdsi_battr_is_visible,
};
__ATTRIBUTE_GROUPS(sdsi);
diff --git a/drivers/platform/x86/lenovo-wmi-camera.c b/drivers/platform/x86/lenovo-wmi-camera.c
index 0c0bedaf7407..eb60fb9a5b3f 100644
--- a/drivers/platform/x86/lenovo-wmi-camera.c
+++ b/drivers/platform/x86/lenovo-wmi-camera.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/wmi.h>
+#include <linux/cleanup.h>
#define WMI_LENOVO_CAMERABUTTON_EVENT_GUID "50C76F1F-D8E4-D895-0A3D-62F4EA400013"
@@ -26,10 +27,38 @@ enum {
SW_CAMERA_ON = 1,
};
+static int camera_shutter_input_setup(struct wmi_device *wdev, u8 camera_mode)
+{
+ struct lenovo_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
+ int err;
+
+ priv->idev = input_allocate_device();
+ if (!priv->idev)
+ return -ENOMEM;
+
+ priv->idev->name = "Lenovo WMI Camera Button";
+ priv->idev->phys = "wmi/input0";
+ priv->idev->id.bustype = BUS_HOST;
+ priv->idev->dev.parent = &wdev->dev;
+
+ input_set_capability(priv->idev, EV_SW, SW_CAMERA_LENS_COVER);
+
+ input_report_switch(priv->idev, SW_CAMERA_LENS_COVER,
+ camera_mode == SW_CAMERA_ON ? 0 : 1);
+ input_sync(priv->idev);
+
+ err = input_register_device(priv->idev);
+ if (err) {
+ input_free_device(priv->idev);
+ priv->idev = NULL;
+ }
+
+ return err;
+}
+
static void lenovo_wmi_notify(struct wmi_device *wdev, union acpi_object *obj)
{
struct lenovo_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
- unsigned int keycode;
u8 camera_mode;
if (obj->type != ACPI_TYPE_BUFFER) {
@@ -53,22 +82,24 @@ static void lenovo_wmi_notify(struct wmi_device *wdev, union acpi_object *obj)
return;
}
- mutex_lock(&priv->notify_lock);
+ guard(mutex)(&priv->notify_lock);
- keycode = camera_mode == SW_CAMERA_ON ?
- KEY_CAMERA_ACCESS_ENABLE : KEY_CAMERA_ACCESS_DISABLE;
- input_report_key(priv->idev, keycode, 1);
- input_sync(priv->idev);
- input_report_key(priv->idev, keycode, 0);
- input_sync(priv->idev);
+ if (!priv->idev) {
+ if (camera_shutter_input_setup(wdev, camera_mode))
+ dev_warn(&wdev->dev, "Failed to register input device\n");
+ return;
+ }
- mutex_unlock(&priv->notify_lock);
+ if (camera_mode == SW_CAMERA_ON)
+ input_report_switch(priv->idev, SW_CAMERA_LENS_COVER, 0);
+ else
+ input_report_switch(priv->idev, SW_CAMERA_LENS_COVER, 1);
+ input_sync(priv->idev);
}
static int lenovo_wmi_probe(struct wmi_device *wdev, const void *context)
{
struct lenovo_wmi_priv *priv;
- int ret;
priv = devm_kzalloc(&wdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -76,21 +107,6 @@ static int lenovo_wmi_probe(struct wmi_device *wdev, const void *context)
dev_set_drvdata(&wdev->dev, priv);
- priv->idev = devm_input_allocate_device(&wdev->dev);
- if (!priv->idev)
- return -ENOMEM;
-
- priv->idev->name = "Lenovo WMI Camera Button";
- priv->idev->phys = "wmi/input0";
- priv->idev->id.bustype = BUS_HOST;
- priv->idev->dev.parent = &wdev->dev;
- input_set_capability(priv->idev, EV_KEY, KEY_CAMERA_ACCESS_ENABLE);
- input_set_capability(priv->idev, EV_KEY, KEY_CAMERA_ACCESS_DISABLE);
-
- ret = input_register_device(priv->idev);
- if (ret)
- return ret;
-
mutex_init(&priv->notify_lock);
return 0;
@@ -100,6 +116,9 @@ static void lenovo_wmi_remove(struct wmi_device *wdev)
{
struct lenovo_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
+ if (priv->idev)
+ input_unregister_device(priv->idev);
+
mutex_destroy(&priv->notify_lock);
}
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index e5391a37014d..c4b150fa093f 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -806,8 +806,8 @@ static void msi_send_touchpad_key(struct work_struct *ignored)
}
static DECLARE_DELAYED_WORK(msi_touchpad_dwork, msi_send_touchpad_key);
-static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str,
- struct serio *port)
+static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str, struct serio *port,
+ void *context)
{
static bool extended;
@@ -996,7 +996,7 @@ static int __init load_scm_model_init(struct platform_device *sdev)
if (result)
goto fail_input;
- result = i8042_install_filter(msi_laptop_i8042_filter);
+ result = i8042_install_filter(msi_laptop_i8042_filter, NULL);
if (result) {
pr_err("Unable to install key filter\n");
goto fail_filter;
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 22ca70eb8227..2987b4db6009 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -260,7 +260,7 @@ struct pcc_acpi {
* keypress events over the PS/2 kbd interface, filter these out.
*/
static bool panasonic_i8042_filter(unsigned char data, unsigned char str,
- struct serio *port)
+ struct serio *port, void *context)
{
static bool extended;
@@ -1100,7 +1100,7 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
pcc->platform = NULL;
}
- i8042_install_filter(panasonic_i8042_filter);
+ i8042_install_filter(panasonic_i8042_filter, NULL);
return 0;
out_platform:
diff --git a/drivers/platform/x86/quickstart.c b/drivers/platform/x86/quickstart.c
index 8d540a1c8602..c332c7cdaff5 100644
--- a/drivers/platform/x86/quickstart.c
+++ b/drivers/platform/x86/quickstart.c
@@ -20,7 +20,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
-#include <linux/pm_wakeup.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
diff --git a/drivers/platform/x86/serdev_helpers.h b/drivers/platform/x86/serdev_helpers.h
index bcf3a0c356ea..57eac75805e2 100644
--- a/drivers/platform/x86/serdev_helpers.h
+++ b/drivers/platform/x86/serdev_helpers.h
@@ -22,32 +22,14 @@
#include <linux/string.h>
static inline struct device *
-get_serdev_controller(const char *serial_ctrl_hid,
- const char *serial_ctrl_uid,
- int serial_ctrl_port,
- const char *serdev_ctrl_name)
+get_serdev_controller_from_parent(struct device *ctrl_dev,
+ int serial_ctrl_port,
+ const char *serdev_ctrl_name)
{
- struct device *ctrl_dev, *child;
- struct acpi_device *ctrl_adev;
+ struct device *child;
char name[32];
int i;
- ctrl_adev = acpi_dev_get_first_match_dev(serial_ctrl_hid, serial_ctrl_uid, -1);
- if (!ctrl_adev) {
- pr_err("error could not get %s/%s serial-ctrl adev\n",
- serial_ctrl_hid, serial_ctrl_uid);
- return ERR_PTR(-ENODEV);
- }
-
- /* get_first_physical_node() returns a weak ref */
- ctrl_dev = get_device(acpi_get_first_physical_node(ctrl_adev));
- if (!ctrl_dev) {
- pr_err("error could not get %s/%s serial-ctrl physical node\n",
- serial_ctrl_hid, serial_ctrl_uid);
- ctrl_dev = ERR_PTR(-ENODEV);
- goto put_ctrl_adev;
- }
-
/* Walk host -> uart-ctrl -> port -> serdev-ctrl */
for (i = 0; i < 3; i++) {
switch (i) {
@@ -67,14 +49,40 @@ get_serdev_controller(const char *serial_ctrl_hid,
put_device(ctrl_dev);
if (!child) {
pr_err("error could not find '%s' device\n", name);
- ctrl_dev = ERR_PTR(-ENODEV);
- goto put_ctrl_adev;
+ return ERR_PTR(-ENODEV);
}
ctrl_dev = child;
}
-put_ctrl_adev:
- acpi_dev_put(ctrl_adev);
return ctrl_dev;
}
+
+static inline struct device *
+get_serdev_controller(const char *serial_ctrl_hid,
+ const char *serial_ctrl_uid,
+ int serial_ctrl_port,
+ const char *serdev_ctrl_name)
+{
+ struct acpi_device *adev;
+ struct device *parent;
+
+ adev = acpi_dev_get_first_match_dev(serial_ctrl_hid, serial_ctrl_uid, -1);
+ if (!adev) {
+ pr_err("error could not get %s/%s serial-ctrl adev\n",
+ serial_ctrl_hid, serial_ctrl_uid ?: "*");
+ return ERR_PTR(-ENODEV);
+ }
+
+ /* get_first_physical_node() returns a weak ref */
+ parent = get_device(acpi_get_first_physical_node(adev));
+ acpi_dev_put(adev);
+ if (!parent) {
+ pr_err("error could not get %s/%s serial-ctrl physical node\n",
+ serial_ctrl_hid, serial_ctrl_uid ?: "*");
+ return ERR_PTR(-ENODEV);
+ }
+
+ /* This puts our reference on parent and returns a ref on the ctrl */
+ return get_serdev_controller_from_parent(parent, serial_ctrl_port, serdev_ctrl_name);
+}
diff --git a/drivers/platform/x86/serial-multi-instantiate.c b/drivers/platform/x86/serial-multi-instantiate.c
index ed6b28505cd6..db030b0f176a 100644
--- a/drivers/platform/x86/serial-multi-instantiate.c
+++ b/drivers/platform/x86/serial-multi-instantiate.c
@@ -384,6 +384,17 @@ static const struct smi_node cs35l57_hda = {
.bus_type = SMI_AUTO_DETECT,
};
+static const struct smi_node tas2781_hda = {
+ .instances = {
+ { "tas2781-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "tas2781-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "tas2781-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "tas2781-hda", IRQ_RESOURCE_AUTO, 0 },
+ {}
+ },
+ .bus_type = SMI_AUTO_DETECT,
+};
+
/*
* Note new device-ids must also be added to ignore_serial_bus_ids in
* drivers/acpi/scan.c: acpi_device_enumeration_by_parent().
@@ -396,6 +407,7 @@ static const struct acpi_device_id smi_acpi_ids[] = {
{ "CSC3556", (unsigned long)&cs35l56_hda },
{ "CSC3557", (unsigned long)&cs35l57_hda },
{ "INT3515", (unsigned long)&int3515_data },
+ { "TXNW2781", (unsigned long)&tas2781_hda },
/* Non-conforming _HID for Cirrus Logic already released */
{ "CLSA0100", (unsigned long)&cs35l41_hda },
{ "CLSA0101", (unsigned long)&cs35l41_hda },
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index 38de0cb20d77..323316ac6783 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -194,7 +194,6 @@ static const char * const level_options[] = {
[TLMI_LEVEL_MASTER] = "master",
};
static struct think_lmi tlmi_priv;
-static const struct class *fw_attr_class;
static DEFINE_MUTEX(tlmi_mutex);
static inline struct tlmi_pwd_setting *to_tlmi_pwd_setting(struct kobject *kobj)
@@ -1446,11 +1445,7 @@ static int tlmi_sysfs_init(void)
{
int i, ret;
- ret = fw_attributes_class_get(&fw_attr_class);
- if (ret)
- return ret;
-
- tlmi_priv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0),
+ tlmi_priv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0),
NULL, "%s", "thinklmi");
if (IS_ERR(tlmi_priv.class_dev)) {
ret = PTR_ERR(tlmi_priv.class_dev);
@@ -1563,9 +1558,8 @@ static int tlmi_sysfs_init(void)
fail_create_attr:
tlmi_release_attr();
fail_device_created:
- device_destroy(fw_attr_class, MKDEV(0, 0));
+ device_destroy(&firmware_attributes_class, MKDEV(0, 0));
fail_class_created:
- fw_attributes_class_put();
return ret;
}
@@ -1788,8 +1782,7 @@ fail_clear_attr:
static void tlmi_remove(struct wmi_device *wdev)
{
tlmi_release_attr();
- device_destroy(fw_attr_class, MKDEV(0, 0));
- fw_attributes_class_put();
+ device_destroy(&firmware_attributes_class, MKDEV(0, 0));
}
static int tlmi_probe(struct wmi_device *wdev, const void *context)
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 2cfb2ac3f465..1fcb0f99695a 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -963,6 +963,7 @@ static const struct proc_ops dispatch_proc_ops = {
static struct platform_device *tpacpi_pdev;
static struct platform_device *tpacpi_sensors_pdev;
static struct device *tpacpi_hwmon;
+static struct device *tpacpi_pprof;
static struct input_dev *tpacpi_inputdev;
static struct mutex tpacpi_inputdev_send_mutex;
static LIST_HEAD(tpacpi_all_drivers);
@@ -3275,6 +3276,7 @@ static const struct key_entry keymap_lenovo[] __initconst = {
* scancodes to preserve uAPI compatibility, see tpacpi_input_send_key().
*/
{ KE_KEY, 0x131d, { KEY_VENDOR } }, /* System debug info, similar to old ThinkPad key */
+ { KE_KEY, 0x1320, { KEY_LINK_PHONE } },
{ KE_KEY, TP_HKEY_EV_TRACK_DOUBLETAP /* 0x8036 */, { KEY_PROG4 } },
{ KE_END }
};
@@ -10415,7 +10417,7 @@ static int convert_profile_to_dytc(enum platform_profile_option profile, int *pe
* dytc_profile_get: Function to register with platform_profile
* handler. Returns current platform profile.
*/
-static int dytc_profile_get(struct platform_profile_handler *pprof,
+static int dytc_profile_get(struct device *dev,
enum platform_profile_option *profile)
{
*profile = dytc_current_profile;
@@ -10490,7 +10492,7 @@ static int dytc_cql_command(int command, int *output)
* dytc_profile_set: Function to register with platform_profile
* handler. Sets current platform profile.
*/
-static int dytc_profile_set(struct platform_profile_handler *pprof,
+static int dytc_profile_set(struct device *dev,
enum platform_profile_option profile)
{
int perfmode;
@@ -10539,6 +10541,21 @@ unlock:
return err;
}
+static int dytc_profile_probe(void *drvdata, unsigned long *choices)
+{
+ set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+
+ return 0;
+}
+
+static const struct platform_profile_ops dytc_profile_ops = {
+ .probe = dytc_profile_probe,
+ .profile_get = dytc_profile_get,
+ .profile_set = dytc_profile_set,
+};
+
static void dytc_profile_refresh(void)
{
enum platform_profile_option profile;
@@ -10567,24 +10584,14 @@ static void dytc_profile_refresh(void)
err = convert_dytc_to_profile(funcmode, perfmode, &profile);
if (!err && profile != dytc_current_profile) {
dytc_current_profile = profile;
- platform_profile_notify();
+ platform_profile_notify(tpacpi_pprof);
}
}
-static struct platform_profile_handler dytc_profile = {
- .profile_get = dytc_profile_get,
- .profile_set = dytc_profile_set,
-};
-
static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
{
int err, output;
- /* Setup supported modes */
- set_bit(PLATFORM_PROFILE_LOW_POWER, dytc_profile.choices);
- set_bit(PLATFORM_PROFILE_BALANCED, dytc_profile.choices);
- set_bit(PLATFORM_PROFILE_PERFORMANCE, dytc_profile.choices);
-
err = dytc_command(DYTC_CMD_QUERY, &output);
if (err)
return err;
@@ -10639,12 +10646,13 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
"DYTC version %d: thermal mode available\n", dytc_version);
/* Create platform_profile structure and register */
- err = platform_profile_register(&dytc_profile);
+ tpacpi_pprof = devm_platform_profile_register(&tpacpi_pdev->dev, "thinkpad-acpi",
+ NULL, &dytc_profile_ops);
/*
* If for some reason platform_profiles aren't enabled
* don't quit terminally.
*/
- if (err)
+ if (IS_ERR(tpacpi_pprof))
return -ENODEV;
/* Ensure initial values are correct */
@@ -10657,14 +10665,8 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
return 0;
}
-static void dytc_profile_exit(void)
-{
- platform_profile_remove();
-}
-
static struct ibm_struct dytc_profile_driver_data = {
.name = "dytc-profile",
- .exit = dytc_profile_exit,
};
/*************************************************************************
@@ -11681,7 +11683,7 @@ static int __init set_ibm_param(const char *val, const struct kernel_param *kp)
if (strcmp(ibm->name, kp->name) == 0 && ibm->write) {
if (strlen(val) > sizeof(ibms_init[i].param) - 1)
return -ENOSPC;
- strcpy(ibms_init[i].param, val);
+ strscpy(ibms_init[i].param, val);
return 0;
}
}
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 78a5aac2dcfd..5ad3a7183d33 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -2755,7 +2755,7 @@ static int toshiba_acpi_enable_hotkeys(struct toshiba_acpi_dev *dev)
}
static bool toshiba_acpi_i8042_filter(unsigned char data, unsigned char str,
- struct serio *port)
+ struct serio *port, void *context)
{
if (str & I8042_STR_AUXDATA)
return false;
@@ -2915,7 +2915,7 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
if (ec_handle && acpi_has_method(ec_handle, "NTFY")) {
INIT_WORK(&dev->hotkey_work, toshiba_acpi_hotkey_work);
- error = i8042_install_filter(toshiba_acpi_i8042_filter);
+ error = i8042_install_filter(toshiba_acpi_i8042_filter, NULL);
if (error) {
pr_err("Error installing key filter\n");
goto err_free_dev;
diff --git a/drivers/platform/x86/wmi-bmof.c b/drivers/platform/x86/wmi-bmof.c
index df6f0ae6e6c7..3e33da36da8a 100644
--- a/drivers/platform/x86/wmi-bmof.c
+++ b/drivers/platform/x86/wmi-bmof.c
@@ -20,66 +20,66 @@
#define WMI_BMOF_GUID "05901221-D566-11D1-B2F0-00A0C9062910"
-struct bmof_priv {
- union acpi_object *bmofdata;
- struct bin_attribute bmof_bin_attr;
-};
-
-static ssize_t read_bmof(struct file *filp, struct kobject *kobj, struct bin_attribute *attr,
+static ssize_t bmof_read(struct file *filp, struct kobject *kobj, const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
- struct bmof_priv *priv = container_of(attr, struct bmof_priv, bmof_bin_attr);
+ struct device *dev = kobj_to_dev(kobj);
+ union acpi_object *obj = dev_get_drvdata(dev);
- return memory_read_from_buffer(buf, count, &off, priv->bmofdata->buffer.pointer,
- priv->bmofdata->buffer.length);
+ return memory_read_from_buffer(buf, count, &off, obj->buffer.pointer, obj->buffer.length);
}
-static int wmi_bmof_probe(struct wmi_device *wdev, const void *context)
+static const BIN_ATTR_ADMIN_RO(bmof, 0);
+
+static const struct bin_attribute * const bmof_attrs[] = {
+ &bin_attr_bmof,
+ NULL
+};
+
+static size_t bmof_bin_size(struct kobject *kobj, const struct bin_attribute *attr, int n)
{
- struct bmof_priv *priv;
- int ret;
+ struct device *dev = kobj_to_dev(kobj);
+ union acpi_object *obj = dev_get_drvdata(dev);
+
+ return obj->buffer.length;
+}
- priv = devm_kzalloc(&wdev->dev, sizeof(struct bmof_priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+static const struct attribute_group bmof_group = {
+ .bin_size = bmof_bin_size,
+ .bin_attrs_new = bmof_attrs,
+};
+
+static const struct attribute_group *bmof_groups[] = {
+ &bmof_group,
+ NULL
+};
- dev_set_drvdata(&wdev->dev, priv);
+static int wmi_bmof_probe(struct wmi_device *wdev, const void *context)
+{
+ union acpi_object *obj;
- priv->bmofdata = wmidev_block_query(wdev, 0);
- if (!priv->bmofdata) {
+ obj = wmidev_block_query(wdev, 0);
+ if (!obj) {
dev_err(&wdev->dev, "failed to read Binary MOF\n");
return -EIO;
}
- if (priv->bmofdata->type != ACPI_TYPE_BUFFER) {
+ if (obj->type != ACPI_TYPE_BUFFER) {
dev_err(&wdev->dev, "Binary MOF is not a buffer\n");
- ret = -EIO;
- goto err_free;
+ kfree(obj);
+ return -EIO;
}
- sysfs_bin_attr_init(&priv->bmof_bin_attr);
- priv->bmof_bin_attr.attr.name = "bmof";
- priv->bmof_bin_attr.attr.mode = 0400;
- priv->bmof_bin_attr.read = read_bmof;
- priv->bmof_bin_attr.size = priv->bmofdata->buffer.length;
-
- ret = device_create_bin_file(&wdev->dev, &priv->bmof_bin_attr);
- if (ret)
- goto err_free;
+ dev_set_drvdata(&wdev->dev, obj);
return 0;
-
- err_free:
- kfree(priv->bmofdata);
- return ret;
}
static void wmi_bmof_remove(struct wmi_device *wdev)
{
- struct bmof_priv *priv = dev_get_drvdata(&wdev->dev);
+ union acpi_object *obj = dev_get_drvdata(&wdev->dev);
- device_remove_bin_file(&wdev->dev, &priv->bmof_bin_attr);
- kfree(priv->bmofdata);
+ kfree(obj);
}
static const struct wmi_device_id wmi_bmof_id_table[] = {
@@ -90,6 +90,7 @@ static const struct wmi_device_id wmi_bmof_id_table[] = {
static struct wmi_driver wmi_bmof_driver = {
.driver = {
.name = "wmi-bmof",
+ .dev_groups = bmof_groups,
},
.probe = wmi_bmof_probe,
.remove = wmi_bmof_remove,
diff --git a/drivers/platform/x86/x86-android-tablets/Makefile b/drivers/platform/x86/x86-android-tablets/Makefile
index 41ece5a37137..313be30548bc 100644
--- a/drivers/platform/x86/x86-android-tablets/Makefile
+++ b/drivers/platform/x86/x86-android-tablets/Makefile
@@ -3,7 +3,7 @@
# X86 Android tablet support Makefile
#
+obj-$(CONFIG_X86_ANDROID_TABLETS) += vexia_atla10_ec.o
obj-$(CONFIG_X86_ANDROID_TABLETS) += x86-android-tablets.o
-
x86-android-tablets-y := core.o dmi.o shared-psy-info.o \
asus.o lenovo.o other.o
diff --git a/drivers/platform/x86/x86-android-tablets/asus.c b/drivers/platform/x86/x86-android-tablets/asus.c
index 07fbeab2319a..7dde63b9943f 100644
--- a/drivers/platform/x86/x86-android-tablets/asus.c
+++ b/drivers/platform/x86/x86-android-tablets/asus.c
@@ -145,8 +145,8 @@ static const struct x86_i2c_client_info asus_me176c_i2c_clients[] __initconst =
static const struct x86_serdev_info asus_me176c_serdevs[] __initconst = {
{
- .ctrl_hid = "80860F0A",
- .ctrl_uid = "2",
+ .ctrl.acpi.hid = "80860F0A",
+ .ctrl.acpi.uid = "2",
.ctrl_devname = "serial0",
.serdev_hid = "BCM2E3A",
},
diff --git a/drivers/platform/x86/x86-android-tablets/core.c b/drivers/platform/x86/x86-android-tablets/core.c
index 4218afcec0e9..2a9c47178505 100644
--- a/drivers/platform/x86/x86-android-tablets/core.c
+++ b/drivers/platform/x86/x86-android-tablets/core.c
@@ -157,7 +157,7 @@ static struct gpiod_lookup_table * const *gpiod_lookup_tables;
static const struct software_node *bat_swnode;
static void (*exit_handler)(void);
-static struct i2c_adapter *
+static __init struct i2c_adapter *
get_i2c_adap_by_handle(const struct x86_i2c_client_info *client_info)
{
acpi_handle handle;
@@ -177,7 +177,7 @@ static __init int match_parent(struct device *dev, const void *data)
return dev->parent == data;
}
-static struct i2c_adapter *
+static __init struct i2c_adapter *
get_i2c_adap_by_pci_parent(const struct x86_i2c_client_info *client_info)
{
struct i2c_adapter *adap = NULL;
@@ -212,7 +212,7 @@ static __init int x86_instantiate_i2c_client(const struct x86_dev_info *dev_info
if (board_info.irq < 0)
return board_info.irq;
- if (dev_info->use_pci_devname)
+ if (dev_info->use_pci)
adap = get_i2c_adap_by_pci_parent(client_info);
else
adap = get_i2c_adap_by_handle(client_info);
@@ -271,15 +271,32 @@ static __init int x86_instantiate_spi_dev(const struct x86_dev_info *dev_info, i
return 0;
}
-static __init int x86_instantiate_serdev(const struct x86_serdev_info *info, int idx)
+static __init struct device *
+get_serdev_controller_by_pci_parent(const struct x86_serdev_info *info)
{
+ struct pci_dev *pdev;
+
+ pdev = pci_get_domain_bus_and_slot(0, 0, info->ctrl.pci.devfn);
+ if (!pdev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ /* This puts our reference on pdev and returns a ref on the ctrl */
+ return get_serdev_controller_from_parent(&pdev->dev, 0, info->ctrl_devname);
+}
+
+static __init int x86_instantiate_serdev(const struct x86_dev_info *dev_info, int idx)
+{
+ const struct x86_serdev_info *info = &dev_info->serdev_info[idx];
struct acpi_device *serdev_adev;
struct serdev_device *serdev;
struct device *ctrl_dev;
int ret = -ENODEV;
- ctrl_dev = get_serdev_controller(info->ctrl_hid, info->ctrl_uid, 0,
- info->ctrl_devname);
+ if (dev_info->use_pci)
+ ctrl_dev = get_serdev_controller_by_pci_parent(info);
+ else
+ ctrl_dev = get_serdev_controller(info->ctrl.acpi.hid, info->ctrl.acpi.uid,
+ 0, info->ctrl_devname);
if (IS_ERR(ctrl_dev))
return PTR_ERR(ctrl_dev);
@@ -446,7 +463,7 @@ static __init int x86_android_tablet_probe(struct platform_device *pdev)
serdev_count = dev_info->serdev_count;
for (i = 0; i < serdev_count; i++) {
- ret = x86_instantiate_serdev(&dev_info->serdev_info[i], i);
+ ret = x86_instantiate_serdev(dev_info, i);
if (ret < 0) {
x86_android_tablet_remove(pdev);
return ret;
diff --git a/drivers/platform/x86/x86-android-tablets/lenovo.c b/drivers/platform/x86/x86-android-tablets/lenovo.c
index ae087f1471c1..1241a97cda39 100644
--- a/drivers/platform/x86/x86-android-tablets/lenovo.c
+++ b/drivers/platform/x86/x86-android-tablets/lenovo.c
@@ -178,8 +178,8 @@ static const struct platform_device_info lenovo_yb1_x90_pdevs[] __initconst = {
*/
static const struct x86_serdev_info lenovo_yb1_x90_serdevs[] __initconst = {
{
- .ctrl_hid = "8086228A",
- .ctrl_uid = "1",
+ .ctrl.acpi.hid = "8086228A",
+ .ctrl.acpi.uid = "1",
.ctrl_devname = "serial0",
.serdev_hid = "BCM2E1A",
},
@@ -601,7 +601,7 @@ static const struct regulator_init_data lenovo_yoga_tab2_1380_bq24190_vbus_init_
.num_consumer_supplies = 1,
};
-struct bq24190_platform_data lenovo_yoga_tab2_1380_bq24190_pdata = {
+static struct bq24190_platform_data lenovo_yoga_tab2_1380_bq24190_pdata = {
.regulator_init_data = &lenovo_yoga_tab2_1380_bq24190_vbus_init_data,
};
@@ -726,7 +726,7 @@ static const struct platform_device_info lenovo_yoga_tab2_1380_pdevs[] __initcon
},
};
-const char * const lenovo_yoga_tab2_1380_modules[] __initconst = {
+static const char * const lenovo_yoga_tab2_1380_modules[] __initconst = {
"bq24190_charger", /* For the Vbus regulator for lc824206xa */
NULL
};
diff --git a/drivers/platform/x86/x86-android-tablets/other.c b/drivers/platform/x86/x86-android-tablets/other.c
index 735df818f76b..1d93d9edb23f 100644
--- a/drivers/platform/x86/x86-android-tablets/other.c
+++ b/drivers/platform/x86/x86-android-tablets/other.c
@@ -602,14 +602,14 @@ const struct x86_dev_info whitelabel_tm800a550l_info __initconst = {
* Vexia EDU ATLA 10 tablet, Android 4.2 / 4.4 + Guadalinex Ubuntu tablet
* distributed to schools in the Spanish Andalucía region.
*/
-const char * const crystal_cove_pwrsrc_psy[] = { "crystal_cove_pwrsrc" };
+static const char * const crystal_cove_pwrsrc_psy[] = { "crystal_cove_pwrsrc" };
static const struct property_entry vexia_edu_atla10_ulpmc_props[] = {
PROPERTY_ENTRY_STRING_ARRAY("supplied-from", crystal_cove_pwrsrc_psy),
{ }
};
-const struct software_node vexia_edu_atla10_ulpmc_node = {
+static const struct software_node vexia_edu_atla10_ulpmc_node = {
.properties = vexia_edu_atla10_ulpmc_props,
};
@@ -715,6 +715,14 @@ static const struct x86_i2c_client_info vexia_edu_atla10_i2c_clients[] __initcon
}
};
+static const struct x86_serdev_info vexia_edu_atla10_serdevs[] __initconst = {
+ {
+ .ctrl.pci.devfn = PCI_DEVFN(0x1e, 3),
+ .ctrl_devname = "serial0",
+ .serdev_hid = "OBDA8723",
+ },
+};
+
static struct gpiod_lookup_table vexia_edu_atla10_ft5416_gpios = {
.dev_id = "i2c-FTSC1000",
.table = {
@@ -755,9 +763,11 @@ static int __init vexia_edu_atla10_init(struct device *dev)
const struct x86_dev_info vexia_edu_atla10_info __initconst = {
.i2c_client_info = vexia_edu_atla10_i2c_clients,
.i2c_client_count = ARRAY_SIZE(vexia_edu_atla10_i2c_clients),
+ .serdev_info = vexia_edu_atla10_serdevs,
+ .serdev_count = ARRAY_SIZE(vexia_edu_atla10_serdevs),
.gpiod_lookup_tables = vexia_edu_atla10_gpios,
.init = vexia_edu_atla10_init,
- .use_pci_devname = true,
+ .use_pci = true,
};
/*
diff --git a/drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c b/drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c
new file mode 100644
index 000000000000..5d02af1c5aaa
--- /dev/null
+++ b/drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * power_supply class (battery) driver for the I2C attached embedded controller
+ * found on Vexia EDU ATLA 10 (9V version) tablets.
+ *
+ * This is based on the ACPI Battery device in the DSDT which should work
+ * expect that it expects the I2C controller to be enumerated as an ACPI
+ * device and the tablet's BIOS enumerates all LPSS devices as PCI devices
+ * (and changing the LPSS BIOS settings from PCI -> ACPI does not work).
+ *
+ * Copyright (c) 2024 Hans de Goede <hansg@kernel.org>
+ */
+
+#include <linux/bits.h>
+#include <linux/devm-helpers.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/power_supply.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <asm/byteorder.h>
+
+/* State field uses ACPI Battery spec status bits */
+#define ACPI_BATTERY_STATE_DISCHARGING BIT(0)
+#define ACPI_BATTERY_STATE_CHARGING BIT(1)
+
+#define ATLA10_EC_BATTERY_STATE_COMMAND 0x87
+#define ATLA10_EC_BATTERY_INFO_COMMAND 0x88
+
+/* From broken ACPI battery device in DSDT */
+#define ATLA10_EC_VOLTAGE_MIN_DESIGN_uV 3750000
+
+/* Update data every 5 seconds */
+#define UPDATE_INTERVAL_JIFFIES (5 * HZ)
+
+struct atla10_ec_battery_state {
+ u8 status; /* Using ACPI Battery spec status bits */
+ u8 capacity; /* Percent */
+ __le16 charge_now_mAh;
+ __le16 voltage_now_mV;
+ __le16 current_now_mA;
+ __le16 charge_full_mAh;
+ __le16 temp; /* centi degrees Celsius */
+} __packed;
+
+struct atla10_ec_battery_info {
+ __le16 charge_full_design_mAh;
+ __le16 voltage_now_mV; /* Should be design voltage, but is not ? */
+ __le16 charge_full_design2_mAh;
+} __packed;
+
+struct atla10_ec_data {
+ struct i2c_client *client;
+ struct power_supply *psy;
+ struct delayed_work work;
+ struct mutex update_lock;
+ struct atla10_ec_battery_info info;
+ struct atla10_ec_battery_state state;
+ bool valid; /* true if state is valid */
+ unsigned long last_update; /* In jiffies */
+};
+
+static int atla10_ec_cmd(struct atla10_ec_data *data, u8 cmd, u8 len, u8 *values)
+{
+ struct device *dev = &data->client->dev;
+ u8 buf[I2C_SMBUS_BLOCK_MAX];
+ int ret;
+
+ ret = i2c_smbus_read_block_data(data->client, cmd, buf);
+ if (ret != len) {
+ dev_err(dev, "I2C command 0x%02x error: %d\n", cmd, ret);
+ return -EIO;
+ }
+
+ memcpy(values, buf, len);
+ return 0;
+}
+
+static int atla10_ec_update(struct atla10_ec_data *data)
+{
+ int ret;
+
+ if (data->valid && time_before(jiffies, data->last_update + UPDATE_INTERVAL_JIFFIES))
+ return 0;
+
+ ret = atla10_ec_cmd(data, ATLA10_EC_BATTERY_STATE_COMMAND,
+ sizeof(data->state), (u8 *)&data->state);
+ if (ret)
+ return ret;
+
+ data->last_update = jiffies;
+ data->valid = true;
+ return 0;
+}
+
+static int atla10_ec_psy_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct atla10_ec_data *data = power_supply_get_drvdata(psy);
+ int charge_now_mAh, charge_full_mAh, ret;
+
+ guard(mutex)(&data->update_lock);
+
+ ret = atla10_ec_update(data);
+ if (ret)
+ return ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (data->state.status & ACPI_BATTERY_STATE_DISCHARGING)
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ else if (data->state.status & ACPI_BATTERY_STATE_CHARGING)
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else if (data->state.capacity == 100)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = data->state.capacity;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ /*
+ * The EC has a bug where it reports charge-full-design as
+ * charge-now when the battery is full. Clamp charge-now to
+ * charge-full to workaround this.
+ */
+ charge_now_mAh = le16_to_cpu(data->state.charge_now_mAh);
+ charge_full_mAh = le16_to_cpu(data->state.charge_full_mAh);
+ val->intval = min(charge_now_mAh, charge_full_mAh) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = le16_to_cpu(data->state.voltage_now_mV) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = le16_to_cpu(data->state.current_now_mA) * 1000;
+ /*
+ * Documentation/ABI/testing/sysfs-class-power specifies
+ * negative current for discharging.
+ */
+ if (data->state.status & ACPI_BATTERY_STATE_DISCHARGING)
+ val->intval = -val->intval;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = le16_to_cpu(data->state.charge_full_mAh) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = le16_to_cpu(data->state.temp) / 10;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = le16_to_cpu(data->info.charge_full_design_mAh) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = ATLA10_EC_VOLTAGE_MIN_DESIGN_uV;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LIPO;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void atla10_ec_external_power_changed_work(struct work_struct *work)
+{
+ struct atla10_ec_data *data = container_of(work, struct atla10_ec_data, work.work);
+
+ dev_dbg(&data->client->dev, "External power changed\n");
+ data->valid = false;
+ power_supply_changed(data->psy);
+}
+
+static void atla10_ec_external_power_changed(struct power_supply *psy)
+{
+ struct atla10_ec_data *data = power_supply_get_drvdata(psy);
+
+ /* After charger plug in/out wait 0.5s for things to stabilize */
+ mod_delayed_work(system_wq, &data->work, HZ / 2);
+}
+
+static const enum power_supply_property atla10_ec_psy_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+};
+
+static const struct power_supply_desc atla10_ec_psy_desc = {
+ .name = "atla10_ec_battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = atla10_ec_psy_props,
+ .num_properties = ARRAY_SIZE(atla10_ec_psy_props),
+ .get_property = atla10_ec_psy_get_property,
+ .external_power_changed = atla10_ec_external_power_changed,
+};
+
+static int atla10_ec_probe(struct i2c_client *client)
+{
+ struct power_supply_config psy_cfg = { };
+ struct device *dev = &client->dev;
+ struct atla10_ec_data *data;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ psy_cfg.drv_data = data;
+ data->client = client;
+
+ ret = devm_mutex_init(dev, &data->update_lock);
+ if (ret)
+ return ret;
+
+ ret = devm_delayed_work_autocancel(dev, &data->work,
+ atla10_ec_external_power_changed_work);
+ if (ret)
+ return ret;
+
+ ret = atla10_ec_cmd(data, ATLA10_EC_BATTERY_INFO_COMMAND,
+ sizeof(data->info), (u8 *)&data->info);
+ if (ret)
+ return ret;
+
+ data->psy = devm_power_supply_register(dev, &atla10_ec_psy_desc, &psy_cfg);
+ return PTR_ERR_OR_ZERO(data->psy);
+}
+
+static const struct i2c_device_id atla10_ec_id_table[] = {
+ { "vexia_atla10_ec" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, atla10_ec_id_table);
+
+static struct i2c_driver atla10_ec_driver = {
+ .driver = {
+ .name = "vexia_atla10_ec",
+ },
+ .probe = atla10_ec_probe,
+ .id_table = atla10_ec_id_table,
+};
+module_i2c_driver(atla10_ec_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Battery driver for Vexia EDU ATLA 10 tablet EC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
index 0fc7e8cff672..63a38a0069ba 100644
--- a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
+++ b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
@@ -57,8 +57,15 @@ struct x86_spi_dev_info {
};
struct x86_serdev_info {
- const char *ctrl_hid;
- const char *ctrl_uid;
+ union {
+ struct {
+ const char *hid;
+ const char *uid;
+ } acpi;
+ struct {
+ unsigned int devfn;
+ } pci;
+ } ctrl;
const char *ctrl_devname;
/*
* ATM the serdev core only supports of or ACPI matching; and so far all
@@ -91,7 +98,7 @@ struct x86_dev_info {
int gpio_button_count;
int (*init)(struct device *dev);
void (*exit)(void);
- bool use_pci_devname;
+ bool use_pci;
};
int x86_android_tablet_get_gpiod(const char *chip, int pin, const char *con_id,
diff --git a/drivers/pmdomain/arm/scmi_pm_domain.c b/drivers/pmdomain/arm/scmi_pm_domain.c
index a7784a8bb5db..86b531e15b85 100644
--- a/drivers/pmdomain/arm/scmi_pm_domain.c
+++ b/drivers/pmdomain/arm/scmi_pm_domain.c
@@ -96,6 +96,14 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
continue;
}
+ /*
+ * Register the explicit power on request to the firmware so
+ * that it is tracked as used by OSPM agent and not
+ * accidentally turned off with OSPM's knowledge
+ */
+ if (state == SCMI_POWER_STATE_GENERIC_ON)
+ power_ops->state_set(ph, i, state);
+
scmi_pd->domain = i;
scmi_pd->ph = ph;
scmi_pd->name = power_ops->name_get(ph, i);
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index 20a9efebbcb7..6c94137865c9 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -3180,6 +3180,8 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state,
if (!err)
genpd_state->residency_ns = 1000LL * residency;
+ of_property_read_string(state_node, "idle-state-name", &genpd_state->name);
+
genpd_state->power_on_latency_ns = 1000LL * exit_latency;
genpd_state->power_off_latency_ns = 1000LL * entry_latency;
genpd_state->fwnode = &state_node->fwnode;
@@ -3458,7 +3460,10 @@ static int idle_states_show(struct seq_file *s, void *data)
seq_puts(s, "State Time Spent(ms) Usage Rejected\n");
for (i = 0; i < genpd->state_count; i++) {
- idle_time += genpd->states[i].idle_time;
+ struct genpd_power_state *state = &genpd->states[i];
+ char state_name[15];
+
+ idle_time += state->idle_time;
if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
now = ktime_get_mono_fast_ns();
@@ -3468,9 +3473,13 @@ static int idle_states_show(struct seq_file *s, void *data)
}
}
+ if (!state->name)
+ snprintf(state_name, ARRAY_SIZE(state_name), "S%-13d", i);
+
do_div(idle_time, NSEC_PER_MSEC);
- seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time,
- genpd->states[i].usage, genpd->states[i].rejected);
+ seq_printf(s, "%-14s %-14llu %-14llu %llu\n",
+ state->name ?: state_name, idle_time,
+ state->usage, state->rejected);
}
genpd_unlock(genpd);
diff --git a/drivers/pmdomain/imx/gpcv2.c b/drivers/pmdomain/imx/gpcv2.c
index 9bdb80fd7210..958d34d4821b 100644
--- a/drivers/pmdomain/imx/gpcv2.c
+++ b/drivers/pmdomain/imx/gpcv2.c
@@ -1437,6 +1437,7 @@ static struct platform_driver imx_pgc_domain_driver = {
.driver = {
.name = "imx-pgc",
.pm = &imx_pgc_domain_pm_ops,
+ .suppress_bind_attrs = true,
},
.probe = imx_pgc_domain_probe,
.remove = imx_pgc_domain_remove,
@@ -1549,6 +1550,7 @@ static struct platform_driver imx_gpc_driver = {
.driver = {
.name = "imx-gpcv2",
.of_match_table = imx_gpcv2_dt_ids,
+ .suppress_bind_attrs = true,
},
.probe = imx_gpcv2_probe,
};
diff --git a/drivers/pmdomain/imx/imx8m-blk-ctrl.c b/drivers/pmdomain/imx/imx8m-blk-ctrl.c
index 23db85b7aa9e..912802b5215b 100644
--- a/drivers/pmdomain/imx/imx8m-blk-ctrl.c
+++ b/drivers/pmdomain/imx/imx8m-blk-ctrl.c
@@ -894,6 +894,7 @@ static struct platform_driver imx8m_blk_ctrl_driver = {
.name = "imx8m-blk-ctrl",
.pm = &imx8m_blk_ctrl_pm_ops,
.of_match_table = imx8m_blk_ctrl_of_match,
+ .suppress_bind_attrs = true,
},
};
module_platform_driver(imx8m_blk_ctrl_driver);
diff --git a/drivers/pmdomain/imx/imx8mp-blk-ctrl.c b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
index 3668fe66b22c..34576be606e3 100644
--- a/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
+++ b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
@@ -862,6 +862,7 @@ static struct platform_driver imx8mp_blk_ctrl_driver = {
.name = "imx8mp-blk-ctrl",
.pm = &imx8mp_blk_ctrl_pm_ops,
.of_match_table = imx8mp_blk_ctrl_of_match,
+ .suppress_bind_attrs = true,
},
};
module_platform_driver(imx8mp_blk_ctrl_driver);
diff --git a/drivers/pmdomain/mediatek/Kconfig b/drivers/pmdomain/mediatek/Kconfig
index 21305c4f17fe..0e34a517ab7d 100644
--- a/drivers/pmdomain/mediatek/Kconfig
+++ b/drivers/pmdomain/mediatek/Kconfig
@@ -26,4 +26,16 @@ config MTK_SCPSYS_PM_DOMAINS
Control Processor System (SCPSYS) has several power management related
tasks in the system.
+config AIROHA_CPU_PM_DOMAIN
+ tristate "Airoha CPU power domain"
+ default ARCH_AIROHA
+ depends on HAVE_ARM_SMCCC
+ depends on PM
+ select PM_GENERIC_DOMAINS
+ help
+ Say y here to enable CPU power domain support for Airoha SoC.
+
+ CPU frequency and power is controlled by ATF with SMC command to
+ set performance states.
+
endmenu
diff --git a/drivers/pmdomain/mediatek/Makefile b/drivers/pmdomain/mediatek/Makefile
index 8cde09e654b3..18ba92e3c418 100644
--- a/drivers/pmdomain/mediatek/Makefile
+++ b/drivers/pmdomain/mediatek/Makefile
@@ -1,3 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o
obj-$(CONFIG_MTK_SCPSYS_PM_DOMAINS) += mtk-pm-domains.o
+obj-$(CONFIG_AIROHA_CPU_PM_DOMAIN) += airoha-cpu-pmdomain.o
+
+ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy)
+# The use of R7 in the SMCCC conflicts with the compiler's use of R7 as a frame
+# pointer in Thumb2 mode, which is forcibly enabled by Clang when profiling
+# hooks are inserted via the -pg switch.
+CFLAGS_REMOVE_airoha-cpu-pmdomain.o += $(CC_FLAGS_FTRACE)
+endif
diff --git a/drivers/pmdomain/mediatek/airoha-cpu-pmdomain.c b/drivers/pmdomain/mediatek/airoha-cpu-pmdomain.c
new file mode 100644
index 000000000000..0fd88d2f9ac2
--- /dev/null
+++ b/drivers/pmdomain/mediatek/airoha-cpu-pmdomain.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+
+#define AIROHA_SIP_AVS_HANDLE 0x82000301
+#define AIROHA_AVS_OP_BASE 0xddddddd0
+#define AIROHA_AVS_OP_MASK GENMASK(1, 0)
+#define AIROHA_AVS_OP_FREQ_DYN_ADJ (AIROHA_AVS_OP_BASE | \
+ FIELD_PREP(AIROHA_AVS_OP_MASK, 0x1))
+#define AIROHA_AVS_OP_GET_FREQ (AIROHA_AVS_OP_BASE | \
+ FIELD_PREP(AIROHA_AVS_OP_MASK, 0x2))
+
+struct airoha_cpu_pmdomain_priv {
+ struct clk_hw hw;
+ struct generic_pm_domain pd;
+};
+
+static long airoha_cpu_pmdomain_clk_round(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ return rate;
+}
+
+static unsigned long airoha_cpu_pmdomain_clk_get(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_1_1_invoke(AIROHA_SIP_AVS_HANDLE, AIROHA_AVS_OP_GET_FREQ,
+ 0, 0, 0, 0, 0, 0, &res);
+
+ /* SMCCC returns freq in MHz */
+ return (int)(res.a0 * 1000 * 1000);
+}
+
+/* Airoha CPU clk SMCC is always enabled */
+static int airoha_cpu_pmdomain_clk_is_enabled(struct clk_hw *hw)
+{
+ return true;
+}
+
+static const struct clk_ops airoha_cpu_pmdomain_clk_ops = {
+ .recalc_rate = airoha_cpu_pmdomain_clk_get,
+ .is_enabled = airoha_cpu_pmdomain_clk_is_enabled,
+ .round_rate = airoha_cpu_pmdomain_clk_round,
+};
+
+static int airoha_cpu_pmdomain_set_performance_state(struct generic_pm_domain *domain,
+ unsigned int state)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_1_1_invoke(AIROHA_SIP_AVS_HANDLE, AIROHA_AVS_OP_FREQ_DYN_ADJ,
+ 0, state, 0, 0, 0, 0, &res);
+
+ /* SMC signal correct apply by unsetting BIT 0 */
+ return res.a0 & BIT(0) ? -EINVAL : 0;
+}
+
+static int airoha_cpu_pmdomain_probe(struct platform_device *pdev)
+{
+ struct airoha_cpu_pmdomain_priv *priv;
+ struct device *dev = &pdev->dev;
+ const struct clk_init_data init = {
+ .name = "cpu",
+ .ops = &airoha_cpu_pmdomain_clk_ops,
+ /* Clock with no set_rate, can't cache */
+ .flags = CLK_GET_RATE_NOCACHE,
+ };
+ struct generic_pm_domain *pd;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Init and register a get-only clk for Cpufreq */
+ priv->hw.init = &init;
+ ret = devm_clk_hw_register(dev, &priv->hw);
+ if (ret)
+ return ret;
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ &priv->hw);
+ if (ret)
+ return ret;
+
+ /* Init and register a PD for CPU */
+ pd = &priv->pd;
+ pd->name = "cpu_pd";
+ pd->flags = GENPD_FLAG_ALWAYS_ON;
+ pd->set_performance_state = airoha_cpu_pmdomain_set_performance_state;
+
+ ret = pm_genpd_init(pd, NULL, false);
+ if (ret)
+ return ret;
+
+ ret = of_genpd_add_provider_simple(dev->of_node, pd);
+ if (ret)
+ goto err_add_provider;
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+
+err_add_provider:
+ pm_genpd_remove(pd);
+
+ return ret;
+}
+
+static void airoha_cpu_pmdomain_remove(struct platform_device *pdev)
+{
+ struct airoha_cpu_pmdomain_priv *priv = platform_get_drvdata(pdev);
+
+ of_genpd_del_provider(pdev->dev.of_node);
+ pm_genpd_remove(&priv->pd);
+}
+
+static const struct of_device_id airoha_cpu_pmdomain_of_match[] = {
+ { .compatible = "airoha,en7581-cpufreq" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, airoha_cpu_pmdomain_of_match);
+
+static struct platform_driver airoha_cpu_pmdomain_driver = {
+ .probe = airoha_cpu_pmdomain_probe,
+ .remove = airoha_cpu_pmdomain_remove,
+ .driver = {
+ .name = "airoha-cpu-pmdomain",
+ .of_match_table = airoha_cpu_pmdomain_of_match,
+ },
+};
+module_platform_driver(airoha_cpu_pmdomain_driver);
+
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_DESCRIPTION("CPU PM domain driver for Airoha SoCs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pmdomain/ti/ti_sci_pm_domains.c b/drivers/pmdomain/ti/ti_sci_pm_domains.c
index 0e4bd749d067..82df7e44250b 100644
--- a/drivers/pmdomain/ti/ti_sci_pm_domains.c
+++ b/drivers/pmdomain/ti/ti_sci_pm_domains.c
@@ -12,6 +12,8 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/soc/ti/ti_sci_protocol.h>
#include <dt-bindings/soc/ti,sci_pm_domain.h>
@@ -51,6 +53,56 @@ struct ti_sci_pm_domain {
#define genpd_to_ti_sci_pd(gpd) container_of(gpd, struct ti_sci_pm_domain, pd)
+static inline bool ti_sci_pd_is_valid_constraint(s32 val)
+{
+ return val != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void ti_sci_pd_set_lat_constraint(struct device *dev, s32 val)
+{
+ struct generic_pm_domain *genpd = pd_to_genpd(dev->pm_domain);
+ struct ti_sci_pm_domain *pd = genpd_to_ti_sci_pd(genpd);
+ const struct ti_sci_handle *ti_sci = pd->parent->ti_sci;
+ u16 val_ms;
+ int ret;
+
+ /* PM QoS latency unit is usecs, TI SCI uses msecs */
+ val_ms = val / USEC_PER_MSEC;
+ ret = ti_sci->ops.pm_ops.set_latency_constraint(ti_sci, val_ms, TISCI_MSG_CONSTRAINT_SET);
+ if (ret)
+ dev_err(dev, "ti_sci_pd: set latency constraint failed: ret=%d\n",
+ ret);
+ else
+ dev_dbg(dev, "ti_sci_pd: ID:%d set latency constraint %d\n",
+ pd->idx, val);
+}
+#endif
+
+static inline void ti_sci_pd_set_wkup_constraint(struct device *dev)
+{
+ struct generic_pm_domain *genpd = pd_to_genpd(dev->pm_domain);
+ struct ti_sci_pm_domain *pd = genpd_to_ti_sci_pd(genpd);
+ const struct ti_sci_handle *ti_sci = pd->parent->ti_sci;
+ int ret;
+
+ if (device_may_wakeup(dev)) {
+ /*
+ * If device can wakeup using IO daisy chain wakeups,
+ * we do not want to set a constraint.
+ */
+ if (dev->power.wakeirq) {
+ dev_dbg(dev, "%s: has wake IRQ, not setting constraints\n", __func__);
+ return;
+ }
+
+ ret = ti_sci->ops.pm_ops.set_device_constraint(ti_sci, pd->idx,
+ TISCI_MSG_CONSTRAINT_SET);
+ if (!ret)
+ dev_dbg(dev, "ti_sci_pd: ID:%d set device constraint.\n", pd->idx);
+ }
+}
+
/*
* ti_sci_pd_power_off(): genpd power down hook
* @domain: pointer to the powerdomain to power off
@@ -79,6 +131,28 @@ static int ti_sci_pd_power_on(struct generic_pm_domain *domain)
return ti_sci->ops.dev_ops.get_device(ti_sci, pd->idx);
}
+#ifdef CONFIG_PM_SLEEP
+static int ti_sci_pd_suspend(struct device *dev)
+{
+ int ret;
+ s32 val;
+
+ ret = pm_generic_suspend(dev);
+ if (ret)
+ return ret;
+
+ val = dev_pm_qos_read_value(dev, DEV_PM_QOS_RESUME_LATENCY);
+ if (ti_sci_pd_is_valid_constraint(val))
+ ti_sci_pd_set_lat_constraint(dev, val);
+
+ ti_sci_pd_set_wkup_constraint(dev);
+
+ return 0;
+}
+#else
+#define ti_sci_pd_suspend NULL
+#endif
+
/*
* ti_sci_pd_xlate(): translation service for TI SCI genpds
* @genpdspec: DT identification data for the genpd
@@ -182,6 +256,13 @@ static int ti_sci_pm_domain_probe(struct platform_device *pdev)
pd->pd.flags |= GENPD_FLAG_ACTIVE_WAKEUP;
pd->idx = args.args[0];
pd->parent = pd_provider;
+ /*
+ * If SCI constraint functions are present, then firmware
+ * supports the constraints API.
+ */
+ if (pd_provider->ti_sci->ops.pm_ops.set_device_constraint &&
+ pd_provider->ti_sci->ops.pm_ops.set_latency_constraint)
+ pd->pd.domain.ops.suspend = ti_sci_pd_suspend;
pm_genpd_init(&pd->pd, NULL, true);
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index f5fc33a8bf44..60bf0ca64cf3 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -26,7 +26,7 @@ config POWER_RESET_AT91_POWEROFF
config POWER_RESET_AT91_RESET
tristate "Atmel AT91 reset driver"
depends on ARCH_AT91
- default SOC_AT91SAM9 || SOC_SAM9X60 || SOC_SAMA5
+ default SOC_AT91SAM9 || SOC_SAM9X60 || SOC_SAM9X7 || SOC_SAMA5
help
This driver supports restart for Atmel AT91SAM9 and SAMA5
SoCs
@@ -34,7 +34,7 @@ config POWER_RESET_AT91_RESET
config POWER_RESET_AT91_SAMA5D2_SHDWC
tristate "Atmel AT91 SAMA5D2-Compatible shutdown controller driver"
depends on ARCH_AT91
- default SOC_SAM9X60 || SOC_SAMA5
+ default SOC_SAM9X60 || SOC_SAM9X7 || SOC_SAMA5
help
This driver supports the alternate shutdown controller for some Atmel
SAMA5 SoCs. It is present for example on SAMA5D2 SoC.
diff --git a/drivers/power/reset/as3722-poweroff.c b/drivers/power/reset/as3722-poweroff.c
index bb26fa6fa67c..8075382cbc36 100644
--- a/drivers/power/reset/as3722-poweroff.c
+++ b/drivers/power/reset/as3722-poweroff.c
@@ -57,8 +57,6 @@ static int as3722_poweroff_probe(struct platform_device *pdev)
SYS_OFF_PRIO_DEFAULT,
as3722_pm_power_off,
as3722_poweroff);
-
- return 0;
}
static struct platform_driver as3722_poweroff_driver = {
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index edb0df86aff4..c2801bd6384d 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -326,6 +326,7 @@ static const struct of_device_id at91_pmc_ids[] = {
{ .compatible = "atmel,sama5d2-pmc" },
{ .compatible = "microchip,sam9x60-pmc" },
{ .compatible = "microchip,sama7g5-pmc" },
+ { .compatible = "microchip,sam9x7-pmc" },
{ /* Sentinel. */ }
};
diff --git a/drivers/power/reset/gpio-poweroff.c b/drivers/power/reset/gpio-poweroff.c
index 52cfeee2cb28..3eaae352ffb9 100644
--- a/drivers/power/reset/gpio-poweroff.c
+++ b/drivers/power/reset/gpio-poweroff.c
@@ -44,7 +44,13 @@ static int gpio_poweroff_do_poweroff(struct sys_off_data *data)
/* give it some time */
mdelay(gpio_poweroff->timeout_ms);
- WARN_ON(1);
+ /*
+ * If code reaches this point, it means that gpio-poweroff has failed
+ * to actually power off the system.
+ * Warn the user that the attempt to poweroff via gpio-poweroff
+ * has gone wrong.
+ */
+ WARN(1, "Failed to poweroff via gpio-poweroff mechanism\n");
return NOTIFY_DONE;
}
diff --git a/drivers/power/reset/keystone-reset.c b/drivers/power/reset/keystone-reset.c
index cfaa54ced0d0..d9268d150e1f 100644
--- a/drivers/power/reset/keystone-reset.c
+++ b/drivers/power/reset/keystone-reset.c
@@ -87,26 +87,16 @@ static int rsctrl_probe(struct platform_device *pdev)
return -ENODEV;
/* get regmaps */
- pllctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pll");
+ pllctrl_regs = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-pll",
+ 1, &rspll_offset);
if (IS_ERR(pllctrl_regs))
return PTR_ERR(pllctrl_regs);
- devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-dev");
+ devctrl_regs = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-dev",
+ 1, &rsmux_offset);
if (IS_ERR(devctrl_regs))
return PTR_ERR(devctrl_regs);
- ret = of_property_read_u32_index(np, "ti,syscon-pll", 1, &rspll_offset);
- if (ret) {
- dev_err(dev, "couldn't read the reset pll offset!\n");
- return -EINVAL;
- }
-
- ret = of_property_read_u32_index(np, "ti,syscon-dev", 1, &rsmux_offset);
- if (ret) {
- dev_err(dev, "couldn't read the rsmux offset!\n");
- return -EINVAL;
- }
-
/* set soft/hard reset */
val = of_property_read_bool(np, "ti,soft-reset");
val = val ? RSCFG_RSTYPE_SOFT : RSCFG_RSTYPE_HARD;
diff --git a/drivers/power/supply/88pm860x_battery.c b/drivers/power/supply/88pm860x_battery.c
index b7938fbb24a5..edae1e843c51 100644
--- a/drivers/power/supply/88pm860x_battery.c
+++ b/drivers/power/supply/88pm860x_battery.c
@@ -14,6 +14,7 @@
#include <linux/mutex.h>
#include <linux/string.h>
#include <linux/power_supply.h>
+#include <linux/string_choices.h>
#include <linux/mfd/88pm860x.h>
#include <linux/delay.h>
@@ -503,8 +504,7 @@ static void pm860x_init_battery(struct pm860x_battery_info *info)
data = pm860x_reg_read(info->i2c, PM8607_POWER_UP_LOG);
bat_remove = data & BAT_WU_LOG;
- dev_dbg(info->dev, "battery wake up? %s\n",
- bat_remove != 0 ? "yes" : "no");
+ dev_dbg(info->dev, "battery wake up? %s\n", str_yes_no(bat_remove));
/* restore SOC from RTC domain register */
if (bat_remove == 0) {
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 9f2eef6787f7..7b18358f194a 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -918,6 +918,15 @@ config FUEL_GAUGE_SC27XX
Say Y here to enable support for fuel gauge with SC27XX
PMIC chips.
+config FUEL_GAUGE_STC3117
+ tristate "STMicroelectronics STC3117 fuel gauge driver"
+ depends on CRC8
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say Y here to enable support for fuel gauge with STC3117
+ chip.
+
config CHARGER_UCS1002
tristate "Microchip UCS1002 USB Port Power Controller"
depends on I2C
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index 59c4a9f40d28..b55cc48a4c86 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -108,6 +108,7 @@ obj-$(CONFIG_CHARGER_CROS_USBPD) += cros_usbpd-charger.o
obj-$(CONFIG_CHARGER_CROS_PCHG) += cros_peripheral_charger.o
obj-$(CONFIG_CHARGER_SC2731) += sc2731_charger.o
obj-$(CONFIG_FUEL_GAUGE_SC27XX) += sc27xx_fuel_gauge.o
+obj-$(CONFIG_FUEL_GAUGE_STC3117) += stc3117_fuel_gauge.o
obj-$(CONFIG_CHARGER_UCS1002) += ucs1002_power.o
obj-$(CONFIG_CHARGER_BD99954) += bd99954-charger.o
obj-$(CONFIG_CHARGER_WILCO) += wilco-charger.o
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index 37039e28fc4b..b00c84fbc33c 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -540,10 +540,9 @@ static int ab8500_btemp_get_property(struct power_supply *psy,
return 0;
}
-static int ab8500_btemp_get_ext_psy_data(struct device *dev, void *data)
+static int ab8500_btemp_get_ext_psy_data(struct power_supply *ext, void *data)
{
struct power_supply *psy;
- struct power_supply *ext = dev_get_drvdata(dev);
const char **supplicants = (const char **)ext->supplied_to;
struct ab8500_btemp *di;
union power_supply_propval ret;
@@ -617,7 +616,7 @@ static int ab8500_btemp_get_ext_psy_data(struct device *dev, void *data)
*/
static void ab8500_btemp_external_power_changed(struct power_supply *psy)
{
- power_supply_for_each_device(psy, ab8500_btemp_get_ext_psy_data);
+ power_supply_for_each_psy(psy, ab8500_btemp_get_ext_psy_data);
}
/* ab8500 btemp driver interrupts and their respective isr */
diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c
index 14e1b448bd39..7a8d1feb8e90 100644
--- a/drivers/power/supply/ab8500_chargalg.c
+++ b/drivers/power/supply/ab8500_chargalg.c
@@ -844,10 +844,9 @@ static void handle_maxim_chg_curr(struct ab8500_chargalg *di)
}
}
-static int ab8500_chargalg_get_ext_psy_data(struct device *dev, void *data)
+static int ab8500_chargalg_get_ext_psy_data(struct power_supply *ext, void *data)
{
struct power_supply *psy;
- struct power_supply *ext = dev_get_drvdata(dev);
const char **supplicants = (const char **)ext->supplied_to;
struct ab8500_chargalg *di;
union power_supply_propval ret;
@@ -1231,7 +1230,7 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
int ret;
/* Collect data from all power_supply class devices */
- power_supply_for_each_device(di->chargalg_psy, ab8500_chargalg_get_ext_psy_data);
+ power_supply_for_each_psy(di->chargalg_psy, ab8500_chargalg_get_ext_psy_data);
ab8500_chargalg_end_of_charge(di);
ab8500_chargalg_check_temp(di);
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index cece8d6753ac..1042d37424f5 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -1894,10 +1894,9 @@ static int ab8500_charger_update_charger_current(struct ux500_charger *charger,
return ret;
}
-static int ab8500_charger_get_ext_psy_data(struct device *dev, void *data)
+static int ab8500_charger_get_ext_psy_data(struct power_supply *ext, void *data)
{
struct power_supply *psy;
- struct power_supply *ext = dev_get_drvdata(dev);
const char **supplicants = (const char **)ext->supplied_to;
struct ab8500_charger *di;
union power_supply_propval ret;
@@ -1961,7 +1960,7 @@ static void ab8500_charger_check_vbat_work(struct work_struct *work)
struct ab8500_charger *di = container_of(work,
struct ab8500_charger, check_vbat_work.work);
- power_supply_for_each_device(&di->usb_chg, ab8500_charger_get_ext_psy_data);
+ power_supply_for_each_psy(&di->usb_chg, ab8500_charger_get_ext_psy_data);
/* First run old_vbat is 0. */
if (di->old_vbat == 0)
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 78871a2143de..9dd99722667a 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -2174,10 +2174,9 @@ static int ab8500_fg_get_property(struct power_supply *psy,
return 0;
}
-static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
+static int ab8500_fg_get_ext_psy_data(struct power_supply *ext, void *data)
{
struct power_supply *psy;
- struct power_supply *ext = dev_get_drvdata(dev);
const char **supplicants = (const char **)ext->supplied_to;
struct ab8500_fg *di;
struct power_supply_battery_info *bi;
@@ -2402,7 +2401,7 @@ out:
*/
static void ab8500_fg_external_power_changed(struct power_supply *psy)
{
- power_supply_for_each_device(psy, ab8500_fg_get_ext_psy_data);
+ power_supply_for_each_psy(psy, ab8500_fg_get_ext_psy_data);
}
/**
@@ -2575,7 +2574,7 @@ static ssize_t ab8505_powercut_flagtime_read(struct device *dev,
{
int ret;
u8 reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
@@ -2598,7 +2597,7 @@ static ssize_t ab8505_powercut_flagtime_write(struct device *dev,
{
int ret;
int reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
if (kstrtoint(buf, 10, &reg_value))
@@ -2625,7 +2624,7 @@ static ssize_t ab8505_powercut_maxtime_read(struct device *dev,
{
int ret;
u8 reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
@@ -2649,7 +2648,7 @@ static ssize_t ab8505_powercut_maxtime_write(struct device *dev,
{
int ret;
int reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
if (kstrtoint(buf, 10, &reg_value))
@@ -2676,7 +2675,7 @@ static ssize_t ab8505_powercut_restart_read(struct device *dev,
{
int ret;
u8 reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
@@ -2699,7 +2698,7 @@ static ssize_t ab8505_powercut_restart_write(struct device *dev,
{
int ret;
int reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
if (kstrtoint(buf, 10, &reg_value))
@@ -2727,7 +2726,7 @@ static ssize_t ab8505_powercut_timer_read(struct device *dev,
{
int ret;
u8 reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
@@ -2750,7 +2749,7 @@ static ssize_t ab8505_powercut_restart_counter_read(struct device *dev,
{
int ret;
u8 reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
@@ -2773,7 +2772,7 @@ static ssize_t ab8505_powercut_read(struct device *dev,
{
int ret;
u8 reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
@@ -2794,7 +2793,7 @@ static ssize_t ab8505_powercut_write(struct device *dev,
{
int ret;
int reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
if (kstrtoint(buf, 10, &reg_value))
@@ -2822,7 +2821,7 @@ static ssize_t ab8505_powercut_flag_read(struct device *dev,
int ret;
u8 reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
@@ -2845,7 +2844,7 @@ static ssize_t ab8505_powercut_debounce_read(struct device *dev,
{
int ret;
u8 reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
@@ -2868,7 +2867,7 @@ static ssize_t ab8505_powercut_debounce_write(struct device *dev,
{
int ret;
int reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
if (kstrtoint(buf, 10, &reg_value))
@@ -2895,7 +2894,7 @@ static ssize_t ab8505_powercut_enable_status_read(struct device *dev,
{
int ret;
u8 reg_value;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
diff --git a/drivers/power/supply/apm_power.c b/drivers/power/supply/apm_power.c
index 8ef1b6f1f787..9236e0078578 100644
--- a/drivers/power/supply/apm_power.c
+++ b/drivers/power/supply/apm_power.c
@@ -42,11 +42,11 @@ struct find_bat_param {
int max_energy;
};
-static int __find_main_battery(struct device *dev, void *data)
+static int __find_main_battery(struct power_supply *psy, void *data)
{
struct find_bat_param *bp = (struct find_bat_param *)data;
- bp->bat = dev_get_drvdata(dev);
+ bp->bat = psy;
if (bp->bat->desc->use_for_apm) {
/* nice, we explicitly asked to report this battery. */
@@ -79,7 +79,7 @@ static void find_main_battery(void)
main_battery = NULL;
bp.main = main_battery;
- error = power_supply_for_each_device(&bp, __find_main_battery);
+ error = power_supply_for_each_psy(&bp, __find_main_battery);
if (error) {
main_battery = bp.main;
return;
diff --git a/drivers/power/supply/bq2415x_charger.c b/drivers/power/supply/bq2415x_charger.c
index 25e28dac900d..22f6a3b71632 100644
--- a/drivers/power/supply/bq2415x_charger.c
+++ b/drivers/power/supply/bq2415x_charger.c
@@ -171,6 +171,7 @@ struct bq2415x_device {
char *name;
int autotimer; /* 1 - if driver automatically reset timer, 0 - not */
int automode; /* 1 - enabled, 0 - disabled; -1 - not supported */
+ int charge_status;
int id;
};
@@ -835,11 +836,13 @@ static int bq2415x_notifier_call(struct notifier_block *nb,
if (!bq2415x_update_reported_mode(bq, prop.intval))
return NOTIFY_OK;
+ power_supply_changed(bq->charger);
+
/* if automode is not enabled do not tell about reported_mode */
if (bq->automode < 1)
return NOTIFY_OK;
- schedule_delayed_work(&bq->work, 0);
+ mod_delayed_work(system_wq, &bq->work, 0);
return NOTIFY_OK;
}
@@ -889,12 +892,19 @@ static void bq2415x_timer_work(struct work_struct *work)
int ret;
int error;
int boost;
+ int charge;
if (bq->automode > 0 && (bq->reported_mode != bq->mode)) {
sysfs_notify(&bq->charger->dev.kobj, NULL, "reported_mode");
bq2415x_set_mode(bq, bq->reported_mode);
}
+ charge = bq2415x_exec_command(bq, BQ2415X_CHARGE_STATUS);
+ if (bq->charge_status != charge) {
+ power_supply_changed(bq->charger);
+ bq->charge_status = charge;
+ }
+
if (!bq->autotimer)
return;
@@ -1050,7 +1060,7 @@ static ssize_t bq2415x_sysfs_show_status(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq2415x_device *bq = power_supply_get_drvdata(psy);
enum bq2415x_command command;
int ret;
@@ -1083,7 +1093,7 @@ static ssize_t bq2415x_sysfs_set_timer(struct device *dev,
const char *buf,
size_t count)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq2415x_device *bq = power_supply_get_drvdata(psy);
int ret = 0;
@@ -1104,7 +1114,7 @@ static ssize_t bq2415x_sysfs_show_timer(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq2415x_device *bq = power_supply_get_drvdata(psy);
if (bq->timer_error)
@@ -1128,7 +1138,7 @@ static ssize_t bq2415x_sysfs_set_mode(struct device *dev,
const char *buf,
size_t count)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq2415x_device *bq = power_supply_get_drvdata(psy);
enum bq2415x_mode mode;
int ret = 0;
@@ -1180,7 +1190,7 @@ static ssize_t bq2415x_sysfs_show_mode(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq2415x_device *bq = power_supply_get_drvdata(psy);
ssize_t ret = 0;
@@ -1217,7 +1227,7 @@ static ssize_t bq2415x_sysfs_show_reported_mode(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq2415x_device *bq = power_supply_get_drvdata(psy);
if (bq->automode < 0)
@@ -1245,7 +1255,7 @@ static ssize_t bq2415x_sysfs_set_registers(struct device *dev,
const char *buf,
size_t count)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq2415x_device *bq = power_supply_get_drvdata(psy);
ssize_t ret = 0;
unsigned int reg;
@@ -1280,7 +1290,7 @@ static ssize_t bq2415x_sysfs_show_registers(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq2415x_device *bq = power_supply_get_drvdata(psy);
ssize_t ret = 0;
@@ -1298,7 +1308,7 @@ static ssize_t bq2415x_sysfs_set_limit(struct device *dev,
const char *buf,
size_t count)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq2415x_device *bq = power_supply_get_drvdata(psy);
long val;
int ret;
@@ -1329,7 +1339,7 @@ static ssize_t bq2415x_sysfs_show_limit(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq2415x_device *bq = power_supply_get_drvdata(psy);
int ret;
@@ -1357,7 +1367,7 @@ static ssize_t bq2415x_sysfs_set_enable(struct device *dev,
const char *buf,
size_t count)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq2415x_device *bq = power_supply_get_drvdata(psy);
enum bq2415x_command command;
long val;
@@ -1392,7 +1402,7 @@ static ssize_t bq2415x_sysfs_show_enable(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq2415x_device *bq = power_supply_get_drvdata(psy);
enum bq2415x_command command;
int ret;
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index c47f32f152e6..b4ba01744368 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -152,6 +152,7 @@
#define BQ24296_REG_VPRS_PN_MASK (BIT(7) | BIT(6) | BIT(5))
#define BQ24296_REG_VPRS_PN_SHIFT 5
#define BQ24296_REG_VPRS_PN_24296 0x1
+#define BQ24296_REG_VPRS_PN_24297 0x3
#define BQ24190_REG_VPRS_TS_PROFILE_MASK BIT(2)
#define BQ24190_REG_VPRS_TS_PROFILE_SHIFT 2
#define BQ24190_REG_VPRS_DEV_REG_MASK (BIT(1) | BIT(0))
@@ -208,6 +209,7 @@ enum bq24190_chip {
BQ24192i,
BQ24196,
BQ24296,
+ BQ24297,
};
/*
@@ -422,7 +424,7 @@ static struct bq24190_sysfs_field_info bq24190_sysfs_field_tbl[] = {
BQ24190_SYSFS_FIELD_RO(watchdog, CTTC, WATCHDOG),
BQ24190_SYSFS_FIELD_RW(en_timer, CTTC, EN_TIMER),
BQ24190_SYSFS_FIELD_RW(chg_timer, CTTC, CHG_TIMER),
- BQ24190_SYSFS_FIELD_RW(jeta_iset, CTTC, JEITA_ISET),
+ BQ24190_SYSFS_FIELD_RW(jeita_iset, CTTC, JEITA_ISET),
BQ24190_SYSFS_FIELD_RW(bat_comp, ICTRC, BAT_COMP),
BQ24190_SYSFS_FIELD_RW(vclamp, ICTRC, VCLAMP),
BQ24190_SYSFS_FIELD_RW(treg, ICTRC, TREG),
@@ -480,7 +482,7 @@ static struct bq24190_sysfs_field_info *bq24190_sysfs_field_lookup(
static ssize_t bq24190_sysfs_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq24190_dev_info *bdi = power_supply_get_drvdata(psy);
struct bq24190_sysfs_field_info *info;
ssize_t count;
@@ -510,7 +512,7 @@ static ssize_t bq24190_sysfs_show(struct device *dev,
static ssize_t bq24190_sysfs_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq24190_dev_info *bdi = power_supply_get_drvdata(psy);
struct bq24190_sysfs_field_info *info;
int ret;
@@ -1319,6 +1321,7 @@ static int bq24190_charger_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ case POWER_SUPPLY_PROP_CHARGE_TYPES:
ret = bq24190_charger_get_charge_type(bdi, val);
break;
case POWER_SUPPLY_PROP_HEALTH:
@@ -1399,6 +1402,7 @@ static int bq24190_charger_set_property(struct power_supply *psy,
ret = bq24190_charger_set_temp_alert_max(bdi, val);
break;
case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ case POWER_SUPPLY_PROP_CHARGE_TYPES:
ret = bq24190_charger_set_charge_type(bdi, val);
break;
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
@@ -1427,6 +1431,7 @@ static int bq24190_charger_property_is_writeable(struct power_supply *psy,
case POWER_SUPPLY_PROP_ONLINE:
case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ case POWER_SUPPLY_PROP_CHARGE_TYPES:
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
@@ -1475,6 +1480,7 @@ static void bq24190_charger_external_power_changed(struct power_supply *psy)
static enum power_supply_property bq24190_charger_properties[] = {
POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_CHARGE_TYPES,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_STATUS,
@@ -1504,6 +1510,9 @@ static const struct power_supply_desc bq24190_charger_desc = {
.set_property = bq24190_charger_set_property,
.property_is_writeable = bq24190_charger_property_is_writeable,
.external_power_changed = bq24190_charger_external_power_changed,
+ .charge_types = BIT(POWER_SUPPLY_CHARGE_TYPE_NONE) |
+ BIT(POWER_SUPPLY_CHARGE_TYPE_TRICKLE) |
+ BIT(POWER_SUPPLY_CHARGE_TYPE_FAST),
};
/* Battery power supply property routines */
@@ -1897,6 +1906,7 @@ static int bq24296_check_chip(struct bq24190_dev_info *bdi)
switch (v) {
case BQ24296_REG_VPRS_PN_24296:
+ case BQ24296_REG_VPRS_PN_24297:
break;
default:
dev_err(bdi->dev, "Error unknown model: 0x%02x\n", v);
@@ -2033,6 +2043,17 @@ static const struct bq24190_chip_info bq24190_chip_info_tbl[] = {
.get_ntc_status = bq24296_charger_get_ntc_status,
.set_otg_vbus = bq24296_set_otg_vbus,
},
+ [BQ24297] = {
+ .ichg_array_size = BQ24296_CCC_ICHG_VALUES_LEN,
+#ifdef CONFIG_REGULATOR
+ .vbus_desc = &bq24296_vbus_desc,
+#endif
+ .check_chip = bq24296_check_chip,
+ .set_chg_config = bq24296_battery_set_chg_config,
+ .ntc_fault_mask = BQ24296_REG_F_NTC_FAULT_MASK,
+ .get_ntc_status = bq24296_charger_get_ntc_status,
+ .set_otg_vbus = bq24296_set_otg_vbus,
+ },
};
static int bq24190_probe(struct i2c_client *client)
@@ -2289,6 +2310,7 @@ static const struct i2c_device_id bq24190_i2c_ids[] = {
{ "bq24192i", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24192i] },
{ "bq24196", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24196] },
{ "bq24296", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24296] },
+ { "bq24297", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24297] },
{ },
};
MODULE_DEVICE_TABLE(i2c, bq24190_i2c_ids);
@@ -2299,6 +2321,7 @@ static const struct of_device_id bq24190_of_match[] = {
{ .compatible = "ti,bq24192i", .data = &bq24190_chip_info_tbl[BQ24192i] },
{ .compatible = "ti,bq24196", .data = &bq24190_chip_info_tbl[BQ24196] },
{ .compatible = "ti,bq24296", .data = &bq24190_chip_info_tbl[BQ24296] },
+ { .compatible = "ti,bq24297", .data = &bq24190_chip_info_tbl[BQ24297] },
{ },
};
MODULE_DEVICE_TABLE(of, bq24190_of_match);
diff --git a/drivers/power/supply/bq24257_charger.c b/drivers/power/supply/bq24257_charger.c
index 801d0d2c5f2e..1416586f2459 100644
--- a/drivers/power/supply/bq24257_charger.c
+++ b/drivers/power/supply/bq24257_charger.c
@@ -759,7 +759,7 @@ static ssize_t bq24257_show_ovp_voltage(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq24257_device *bq = power_supply_get_drvdata(psy);
return sysfs_emit(buf, "%u\n", bq24257_vovp_map[bq->init_data.vovp]);
@@ -769,7 +769,7 @@ static ssize_t bq24257_show_in_dpm_voltage(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq24257_device *bq = power_supply_get_drvdata(psy);
return sysfs_emit(buf, "%u\n", bq24257_vindpm_map[bq->init_data.vindpm]);
@@ -779,7 +779,7 @@ static ssize_t bq24257_sysfs_show_enable(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq24257_device *bq = power_supply_get_drvdata(psy);
int ret;
@@ -801,7 +801,7 @@ static ssize_t bq24257_sysfs_set_enable(struct device *dev,
const char *buf,
size_t count)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct bq24257_device *bq = power_supply_get_drvdata(psy);
long val;
int ret;
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 40c5ac7a1118..90a5bccfc6b9 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -123,6 +123,7 @@ enum bq27xxx_reg_index {
BQ27XXX_DM_BLOCK, /* Data Block */
BQ27XXX_DM_DATA, /* Block Data */
BQ27XXX_DM_CKSUM, /* Block Data Checksum */
+ BQ27XXX_REG_SEDVF, /* End-of-discharge Voltage */
BQ27XXX_REG_MAX, /* sentinel */
};
@@ -159,6 +160,7 @@ static u8
[BQ27XXX_DM_BLOCK] = INVALID_REG_ADDR,
[BQ27XXX_DM_DATA] = INVALID_REG_ADDR,
[BQ27XXX_DM_CKSUM] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_SEDVF] = 0x77,
},
bq27010_regs[BQ27XXX_REG_MAX] = {
[BQ27XXX_REG_CTRL] = 0x00,
@@ -184,6 +186,7 @@ static u8
[BQ27XXX_DM_BLOCK] = INVALID_REG_ADDR,
[BQ27XXX_DM_DATA] = INVALID_REG_ADDR,
[BQ27XXX_DM_CKSUM] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_SEDVF] = 0x77,
},
bq2750x_regs[BQ27XXX_REG_MAX] = {
[BQ27XXX_REG_CTRL] = 0x00,
@@ -579,6 +582,7 @@ static enum power_supply_property bq27000_props[] = {
POWER_SUPPLY_PROP_POWER_AVG,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
};
static enum power_supply_property bq27010_props[] = {
@@ -599,6 +603,7 @@ static enum power_supply_property bq27010_props[] = {
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
};
#define bq2750x_props bq27510g3_props
@@ -2039,6 +2044,36 @@ static int bq27xxx_battery_voltage(struct bq27xxx_device_info *di,
return 0;
}
+/*
+ * Return the design minimum battery Voltage in microvolts
+ * Or < 0 if something fails.
+ */
+static int bq27xxx_battery_read_dmin_volt(struct bq27xxx_device_info *di,
+ union power_supply_propval *val)
+{
+ int volt;
+
+ /* We only have to read design minimum voltage once */
+ if (di->voltage_min_design > 0) {
+ val->intval = di->voltage_min_design;
+ return 0;
+ }
+
+ volt = bq27xxx_read(di, BQ27XXX_REG_SEDVF, true);
+ if (volt < 0) {
+ dev_err(di->dev, "error reading design min voltage\n");
+ return volt;
+ }
+
+ /* SEDVF = Design EDVF / 8 - 256 */
+ val->intval = volt * 8000 + 2048000;
+
+ /* Save for later reads */
+ di->voltage_min_design = val->intval;
+
+ return 0;
+}
+
static int bq27xxx_simple_value(int value,
union power_supply_propval *val)
{
@@ -2119,8 +2154,10 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
* power_supply_battery_info visible in sysfs.
*/
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
- case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
return -EINVAL;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ ret = bq27xxx_battery_read_dmin_volt(di, val);
+ break;
case POWER_SUPPLY_PROP_CYCLE_COUNT:
ret = bq27xxx_battery_read_cyct(di, val);
break;
diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c
index a69faef444c0..c49e0e4d02f7 100644
--- a/drivers/power/supply/charger-manager.c
+++ b/drivers/power/supply/charger-manager.c
@@ -22,6 +22,7 @@
#include <linux/platform_device.h>
#include <linux/power/charger-manager.h>
#include <linux/regulator/consumer.h>
+#include <linux/string_choices.h>
#include <linux/sysfs.h>
#include <linux/of.h>
#include <linux/thermal.h>
@@ -1088,7 +1089,7 @@ static ssize_t charger_state_show(struct device *dev,
if (!charger->externally_control)
state = regulator_is_enabled(charger->consumer);
- return sysfs_emit(buf, "%s\n", state ? "enabled" : "disabled");
+ return sysfs_emit(buf, "%s\n", str_enabled_disabled(state));
}
static ssize_t charger_externally_control_show(struct device *dev,
diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
index 7781b45a67a7..6625d539d9ae 100644
--- a/drivers/power/supply/cpcap-charger.c
+++ b/drivers/power/supply/cpcap-charger.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
@@ -515,7 +516,7 @@ static void cpcap_charger_vbus_work(struct work_struct *work)
out_err:
cpcap_charger_update_state(ddata, POWER_SUPPLY_STATUS_UNKNOWN);
dev_err(ddata->dev, "%s could not %s vbus: %i\n", __func__,
- ddata->vbus_enabled ? "enable" : "disable", error);
+ str_enable_disable(ddata->vbus_enabled), error);
}
static int cpcap_charger_set_vbus(struct phy_companion *comparator,
diff --git a/drivers/power/supply/cros_charge-control.c b/drivers/power/supply/cros_charge-control.c
index 9b0a7500296b..02d5bdbe2e8d 100644
--- a/drivers/power/supply/cros_charge-control.c
+++ b/drivers/power/supply/cros_charge-control.c
@@ -20,13 +20,6 @@
BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE) | \
BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE))
-enum CROS_CHCTL_ATTR {
- CROS_CHCTL_ATTR_START_THRESHOLD,
- CROS_CHCTL_ATTR_END_THRESHOLD,
- CROS_CHCTL_ATTR_CHARGE_BEHAVIOUR,
- _CROS_CHCTL_ATTR_COUNT
-};
-
/*
* Semantics of data *returned* from the EC API and Linux sysfs differ
* slightly, also the v1 API can not return any data.
@@ -38,18 +31,13 @@ enum CROS_CHCTL_ATTR {
*/
struct cros_chctl_priv {
+ struct device *dev;
struct cros_ec_device *cros_ec;
struct acpi_battery_hook battery_hook;
struct power_supply *hooked_battery;
u8 cmd_version;
- /* The callbacks need to access this priv structure.
- * As neither the struct device nor power_supply are under the drivers
- * control, embed the attributes within priv to use with container_of().
- */
- struct device_attribute device_attrs[_CROS_CHCTL_ATTR_COUNT];
- struct attribute *attributes[_CROS_CHCTL_ATTR_COUNT];
- struct attribute_group group;
+ const struct power_supply_ext *psy_ext;
struct mutex lock; /* protects fields below and cros_ec */
enum power_supply_charge_behaviour current_behaviour;
@@ -119,26 +107,39 @@ static int cros_chctl_configure_ec(struct cros_chctl_priv *priv)
return cros_chctl_send_charge_control_cmd(priv->cros_ec, priv->cmd_version, &req);
}
-static struct cros_chctl_priv *cros_chctl_attr_to_priv(struct attribute *attr,
- enum CROS_CHCTL_ATTR idx)
+static int cros_chctl_psy_ext_get_prop(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *data,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
{
- struct device_attribute *dev_attr = container_of(attr, struct device_attribute, attr);
+ struct cros_chctl_priv *priv = data;
- return container_of(dev_attr, struct cros_chctl_priv, device_attrs[idx]);
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD:
+ val->intval = priv->current_start_threshold;
+ return 0;
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD:
+ val->intval = priv->current_end_threshold;
+ return 0;
+ case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
+ val->intval = priv->current_behaviour;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
-static ssize_t cros_chctl_store_threshold(struct device *dev, struct cros_chctl_priv *priv,
- int is_end_threshold, const char *buf, size_t count)
+static int cros_chctl_psy_ext_set_threshold(struct cros_chctl_priv *priv,
+ enum power_supply_property psp,
+ int val)
{
- int ret, val;
+ int ret;
- ret = kstrtoint(buf, 10, &val);
- if (ret < 0)
- return ret;
if (val < 0 || val > 100)
return -EINVAL;
- if (is_end_threshold) {
+ if (psp == POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD) {
/* Start threshold is not exposed, use fixed value */
if (priv->cmd_version == 2)
priv->current_start_threshold = val == 100 ? 0 : val;
@@ -158,93 +159,73 @@ static ssize_t cros_chctl_store_threshold(struct device *dev, struct cros_chctl_
return ret;
}
- return count;
-}
-
-static ssize_t charge_control_start_threshold_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
- CROS_CHCTL_ATTR_START_THRESHOLD);
-
- guard(mutex)(&priv->lock);
- return sysfs_emit(buf, "%u\n", (unsigned int)priv->current_start_threshold);
+ return 0;
}
-static ssize_t charge_control_start_threshold_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
- CROS_CHCTL_ATTR_START_THRESHOLD);
-
- guard(mutex)(&priv->lock);
- return cros_chctl_store_threshold(dev, priv, 0, buf, count);
-}
-static ssize_t charge_control_end_threshold_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static int cros_chctl_psy_ext_set_prop(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *data,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
{
- struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
- CROS_CHCTL_ATTR_END_THRESHOLD);
+ struct cros_chctl_priv *priv = data;
+ int ret;
guard(mutex)(&priv->lock);
- return sysfs_emit(buf, "%u\n", (unsigned int)priv->current_end_threshold);
-}
-
-static ssize_t charge_control_end_threshold_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
- CROS_CHCTL_ATTR_END_THRESHOLD);
- guard(mutex)(&priv->lock);
- return cros_chctl_store_threshold(dev, priv, 1, buf, count);
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD:
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD:
+ return cros_chctl_psy_ext_set_threshold(priv, psp, val->intval);
+ case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
+ priv->current_behaviour = val->intval;
+ ret = cros_chctl_configure_ec(priv);
+ if (ret < 0)
+ return ret;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
-static ssize_t charge_behaviour_show(struct device *dev, struct device_attribute *attr, char *buf)
+static int cros_chctl_psy_prop_is_writeable(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *data,
+ enum power_supply_property psp)
{
- struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
- CROS_CHCTL_ATTR_CHARGE_BEHAVIOUR);
-
- guard(mutex)(&priv->lock);
- return power_supply_charge_behaviour_show(dev, EC_CHARGE_CONTROL_BEHAVIOURS,
- priv->current_behaviour, buf);
+ return true;
}
-static ssize_t charge_behaviour_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
- CROS_CHCTL_ATTR_CHARGE_BEHAVIOUR);
- int ret;
-
- ret = power_supply_charge_behaviour_parse(EC_CHARGE_CONTROL_BEHAVIOURS, buf);
- if (ret < 0)
- return ret;
-
- guard(mutex)(&priv->lock);
- priv->current_behaviour = ret;
+#define DEFINE_CROS_CHCTL_POWER_SUPPLY_EXTENSION(_name, ...) \
+ static const enum power_supply_property _name ## _props[] = { \
+ __VA_ARGS__, \
+ }; \
+ \
+ static const struct power_supply_ext _name = { \
+ .name = "cros-charge-control", \
+ .properties = _name ## _props, \
+ .num_properties = ARRAY_SIZE(_name ## _props), \
+ .charge_behaviours = EC_CHARGE_CONTROL_BEHAVIOURS, \
+ .get_property = cros_chctl_psy_ext_get_prop, \
+ .set_property = cros_chctl_psy_ext_set_prop, \
+ .property_is_writeable = cros_chctl_psy_prop_is_writeable, \
+ }
- ret = cros_chctl_configure_ec(priv);
- if (ret < 0)
- return ret;
+DEFINE_CROS_CHCTL_POWER_SUPPLY_EXTENSION(cros_chctl_psy_ext_v1,
+ POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR
+);
- return count;
-}
+DEFINE_CROS_CHCTL_POWER_SUPPLY_EXTENSION(cros_chctl_psy_ext_v2,
+ POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD
+);
-static umode_t cros_chtl_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
-{
- struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(attr, n);
-
- if (n == CROS_CHCTL_ATTR_START_THRESHOLD && priv->cmd_version < 3)
- return 0;
- else if (n == CROS_CHCTL_ATTR_END_THRESHOLD && priv->cmd_version < 2)
- return 0;
-
- return attr->mode;
-}
+DEFINE_CROS_CHCTL_POWER_SUPPLY_EXTENSION(cros_chctl_psy_ext_v3,
+ POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD
+);
static int cros_chctl_add_battery(struct power_supply *battery, struct acpi_battery_hook *hook)
{
@@ -254,7 +235,7 @@ static int cros_chctl_add_battery(struct power_supply *battery, struct acpi_batt
return 0;
priv->hooked_battery = battery;
- return device_add_group(&battery->dev, &priv->group);
+ return power_supply_register_extension(battery, priv->psy_ext, priv->dev, priv);
}
static int cros_chctl_remove_battery(struct power_supply *battery, struct acpi_battery_hook *hook)
@@ -262,7 +243,7 @@ static int cros_chctl_remove_battery(struct power_supply *battery, struct acpi_b
struct cros_chctl_priv *priv = container_of(hook, struct cros_chctl_priv, battery_hook);
if (priv->hooked_battery == battery) {
- device_remove_group(&battery->dev, &priv->group);
+ power_supply_unregister_extension(battery, priv->psy_ext);
priv->hooked_battery = NULL;
}
@@ -288,7 +269,6 @@ static int cros_chctl_probe(struct platform_device *pdev)
struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
struct cros_ec_device *cros_ec = ec_dev->ec_dev;
struct cros_chctl_priv *priv;
- size_t i;
int ret;
ret = cros_chctl_fwk_charge_control_versions(cros_ec);
@@ -321,19 +301,15 @@ static int cros_chctl_probe(struct platform_device *pdev)
dev_dbg(dev, "Command version: %u\n", (unsigned int)priv->cmd_version);
+ priv->dev = dev;
priv->cros_ec = cros_ec;
- priv->device_attrs[CROS_CHCTL_ATTR_START_THRESHOLD] =
- (struct device_attribute)__ATTR_RW(charge_control_start_threshold);
- priv->device_attrs[CROS_CHCTL_ATTR_END_THRESHOLD] =
- (struct device_attribute)__ATTR_RW(charge_control_end_threshold);
- priv->device_attrs[CROS_CHCTL_ATTR_CHARGE_BEHAVIOUR] =
- (struct device_attribute)__ATTR_RW(charge_behaviour);
- for (i = 0; i < _CROS_CHCTL_ATTR_COUNT; i++) {
- sysfs_attr_init(&priv->device_attrs[i].attr);
- priv->attributes[i] = &priv->device_attrs[i].attr;
- }
- priv->group.is_visible = cros_chtl_attr_is_visible;
- priv->group.attrs = priv->attributes;
+
+ if (priv->cmd_version == 1)
+ priv->psy_ext = &cros_chctl_psy_ext_v1;
+ else if (priv->cmd_version == 2)
+ priv->psy_ext = &cros_chctl_psy_ext_v2;
+ else
+ priv->psy_ext = &cros_chctl_psy_ext_v3;
priv->battery_hook.name = dev_name(dev);
priv->battery_hook.add_battery = cros_chctl_add_battery;
diff --git a/drivers/power/supply/da9030_battery.c b/drivers/power/supply/da9030_battery.c
index 34328f5d556e..ac2e319e9517 100644
--- a/drivers/power/supply/da9030_battery.c
+++ b/drivers/power/supply/da9030_battery.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
+#include <linux/string_choices.h>
#include <linux/mfd/da903x.h>
#include <linux/debugfs.h>
@@ -138,7 +139,7 @@ static int bat_debug_show(struct seq_file *s, void *data)
{
struct da9030_charger *charger = s->private;
- seq_printf(s, "charger is %s\n", charger->is_on ? "on" : "off");
+ seq_printf(s, "charger is %s\n", str_on_off(charger->is_on));
if (charger->chdet) {
seq_printf(s, "iset = %dmA, vset = %dmV\n",
charger->mA, charger->mV);
diff --git a/drivers/power/supply/ds2760_battery.c b/drivers/power/supply/ds2760_battery.c
index 7cf4ea06b500..83bdec5a2bda 100644
--- a/drivers/power/supply/ds2760_battery.c
+++ b/drivers/power/supply/ds2760_battery.c
@@ -195,22 +195,22 @@ static int w1_ds2760_recall_eeprom(struct device *dev, int addr)
}
static ssize_t w1_slave_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
return w1_ds2760_read(dev, buf, off, count);
}
-static BIN_ATTR_RO(w1_slave, DS2760_DATA_SIZE);
+static const BIN_ATTR_RO(w1_slave, DS2760_DATA_SIZE);
-static struct bin_attribute *w1_ds2760_bin_attrs[] = {
+static const struct bin_attribute *const w1_ds2760_bin_attrs[] = {
&bin_attr_w1_slave,
NULL,
};
static const struct attribute_group w1_ds2760_group = {
- .bin_attrs = w1_ds2760_bin_attrs,
+ .bin_attrs_new = w1_ds2760_bin_attrs,
};
static const struct attribute_group *w1_ds2760_groups[] = {
diff --git a/drivers/power/supply/ds2780_battery.c b/drivers/power/supply/ds2780_battery.c
index 1e7f297f6cb1..dd9ac7a32967 100644
--- a/drivers/power/supply/ds2780_battery.c
+++ b/drivers/power/supply/ds2780_battery.c
@@ -621,7 +621,7 @@ static ssize_t ds2780_set_pio_pin(struct device *dev,
static ssize_t ds2780_read_param_eeprom_bin(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -634,7 +634,7 @@ static ssize_t ds2780_read_param_eeprom_bin(struct file *filp,
static ssize_t ds2780_write_param_eeprom_bin(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -654,19 +654,19 @@ static ssize_t ds2780_write_param_eeprom_bin(struct file *filp,
return count;
}
-static struct bin_attribute ds2780_param_eeprom_bin_attr = {
+static const struct bin_attribute ds2780_param_eeprom_bin_attr = {
.attr = {
.name = "param_eeprom",
.mode = S_IRUGO | S_IWUSR,
},
.size = DS2780_PARAM_EEPROM_SIZE,
- .read = ds2780_read_param_eeprom_bin,
- .write = ds2780_write_param_eeprom_bin,
+ .read_new = ds2780_read_param_eeprom_bin,
+ .write_new = ds2780_write_param_eeprom_bin,
};
static ssize_t ds2780_read_user_eeprom_bin(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -679,7 +679,7 @@ static ssize_t ds2780_read_user_eeprom_bin(struct file *filp,
static ssize_t ds2780_write_user_eeprom_bin(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -699,14 +699,14 @@ static ssize_t ds2780_write_user_eeprom_bin(struct file *filp,
return count;
}
-static struct bin_attribute ds2780_user_eeprom_bin_attr = {
+static const struct bin_attribute ds2780_user_eeprom_bin_attr = {
.attr = {
.name = "user_eeprom",
.mode = S_IRUGO | S_IWUSR,
},
.size = DS2780_USER_EEPROM_SIZE,
- .read = ds2780_read_user_eeprom_bin,
- .write = ds2780_write_user_eeprom_bin,
+ .read_new = ds2780_read_user_eeprom_bin,
+ .write_new = ds2780_write_user_eeprom_bin,
};
static DEVICE_ATTR(pmod_enabled, S_IRUGO | S_IWUSR, ds2780_get_pmod_enabled,
@@ -726,7 +726,7 @@ static struct attribute *ds2780_sysfs_attrs[] = {
NULL
};
-static struct bin_attribute *ds2780_sysfs_bin_attrs[] = {
+static const struct bin_attribute *const ds2780_sysfs_bin_attrs[] = {
&ds2780_param_eeprom_bin_attr,
&ds2780_user_eeprom_bin_attr,
NULL
@@ -734,7 +734,7 @@ static struct bin_attribute *ds2780_sysfs_bin_attrs[] = {
static const struct attribute_group ds2780_sysfs_group = {
.attrs = ds2780_sysfs_attrs,
- .bin_attrs = ds2780_sysfs_bin_attrs,
+ .bin_attrs_new = ds2780_sysfs_bin_attrs,
};
static const struct attribute_group *ds2780_sysfs_groups[] = {
diff --git a/drivers/power/supply/ds2781_battery.c b/drivers/power/supply/ds2781_battery.c
index c4f8ccc687f9..8a1f1f9835e0 100644
--- a/drivers/power/supply/ds2781_battery.c
+++ b/drivers/power/supply/ds2781_battery.c
@@ -623,7 +623,7 @@ static ssize_t ds2781_set_pio_pin(struct device *dev,
static ssize_t ds2781_read_param_eeprom_bin(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -636,7 +636,7 @@ static ssize_t ds2781_read_param_eeprom_bin(struct file *filp,
static ssize_t ds2781_write_param_eeprom_bin(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -656,19 +656,19 @@ static ssize_t ds2781_write_param_eeprom_bin(struct file *filp,
return count;
}
-static struct bin_attribute ds2781_param_eeprom_bin_attr = {
+static const struct bin_attribute ds2781_param_eeprom_bin_attr = {
.attr = {
.name = "param_eeprom",
.mode = S_IRUGO | S_IWUSR,
},
.size = DS2781_PARAM_EEPROM_SIZE,
- .read = ds2781_read_param_eeprom_bin,
- .write = ds2781_write_param_eeprom_bin,
+ .read_new = ds2781_read_param_eeprom_bin,
+ .write_new = ds2781_write_param_eeprom_bin,
};
static ssize_t ds2781_read_user_eeprom_bin(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -682,7 +682,7 @@ static ssize_t ds2781_read_user_eeprom_bin(struct file *filp,
static ssize_t ds2781_write_user_eeprom_bin(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -702,14 +702,14 @@ static ssize_t ds2781_write_user_eeprom_bin(struct file *filp,
return count;
}
-static struct bin_attribute ds2781_user_eeprom_bin_attr = {
+static const struct bin_attribute ds2781_user_eeprom_bin_attr = {
.attr = {
.name = "user_eeprom",
.mode = S_IRUGO | S_IWUSR,
},
.size = DS2781_USER_EEPROM_SIZE,
- .read = ds2781_read_user_eeprom_bin,
- .write = ds2781_write_user_eeprom_bin,
+ .read_new = ds2781_read_user_eeprom_bin,
+ .write_new = ds2781_write_user_eeprom_bin,
};
static DEVICE_ATTR(pmod_enabled, S_IRUGO | S_IWUSR, ds2781_get_pmod_enabled,
@@ -729,7 +729,7 @@ static struct attribute *ds2781_sysfs_attrs[] = {
NULL
};
-static struct bin_attribute *ds2781_sysfs_bin_attrs[] = {
+static const struct bin_attribute *const ds2781_sysfs_bin_attrs[] = {
&ds2781_param_eeprom_bin_attr,
&ds2781_user_eeprom_bin_attr,
NULL,
@@ -737,7 +737,7 @@ static struct bin_attribute *ds2781_sysfs_bin_attrs[] = {
static const struct attribute_group ds2781_sysfs_group = {
.attrs = ds2781_sysfs_attrs,
- .bin_attrs = ds2781_sysfs_bin_attrs,
+ .bin_attrs_new = ds2781_sysfs_bin_attrs,
};
diff --git a/drivers/power/supply/ds2782_battery.c b/drivers/power/supply/ds2782_battery.c
index 85aa9c465aa4..cae95d35d398 100644
--- a/drivers/power/supply/ds2782_battery.c
+++ b/drivers/power/supply/ds2782_battery.c
@@ -11,6 +11,7 @@
* UEvent sending added by Evgeny Romanov <romanov@neurosoft.ru>
*/
+#include <linux/devm-helpers.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -57,14 +58,12 @@ struct ds278x_info {
struct power_supply_desc battery_desc;
const struct ds278x_battery_ops *ops;
struct delayed_work bat_work;
- int id;
int rsns;
int capacity;
int status; /* State Of Charge */
};
-static DEFINE_IDR(battery_id);
-static DEFINE_MUTEX(battery_lock);
+static DEFINE_IDA(battery_id);
static inline int ds278x_read_reg(struct ds278x_info *info, int reg, u8 *val)
{
@@ -312,21 +311,6 @@ static void ds278x_power_supply_init(struct power_supply_desc *battery)
battery->external_power_changed = NULL;
}
-static void ds278x_battery_remove(struct i2c_client *client)
-{
- struct ds278x_info *info = i2c_get_clientdata(client);
- int id = info->id;
-
- power_supply_unregister(info->battery);
- cancel_delayed_work_sync(&info->bat_work);
- kfree(info->battery_desc.name);
- kfree(info);
-
- mutex_lock(&battery_lock);
- idr_remove(&battery_id, id);
- mutex_unlock(&battery_lock);
-}
-
#ifdef CONFIG_PM_SLEEP
static int ds278x_suspend(struct device *dev)
@@ -368,6 +352,13 @@ static const struct ds278x_battery_ops ds278x_ops[] = {
}
};
+static void ds278x_free_ida(void *data)
+{
+ int num = (uintptr_t)data;
+
+ ida_free(&battery_id, num);
+}
+
static int ds278x_battery_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
@@ -387,32 +378,27 @@ static int ds278x_battery_probe(struct i2c_client *client)
}
/* Get an ID for this battery */
- mutex_lock(&battery_lock);
- ret = idr_alloc(&battery_id, client, 0, 0, GFP_KERNEL);
- mutex_unlock(&battery_lock);
- if (ret < 0)
- goto fail_id;
- num = ret;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- ret = -ENOMEM;
- goto fail_info;
- }
+ num = ida_alloc(&battery_id, GFP_KERNEL);
+ if (num < 0)
+ return num;
+ ret = devm_add_action_or_reset(&client->dev, ds278x_free_ida, (void *)(uintptr_t)num);
+ if (ret)
+ return ret;
- info->battery_desc.name = kasprintf(GFP_KERNEL, "%s-%d",
- client->name, num);
- if (!info->battery_desc.name) {
- ret = -ENOMEM;
- goto fail_name;
- }
+ info = devm_kzalloc(&client->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->battery_desc.name = devm_kasprintf(&client->dev, GFP_KERNEL,
+ "%s-%d", client->name, num);
+ if (!info->battery_desc.name)
+ return -ENOMEM;
if (id->driver_data == DS2786)
info->rsns = pdata->rsns;
i2c_set_clientdata(client, info);
info->client = client;
- info->id = num;
info->ops = &ds278x_ops[id->driver_data];
ds278x_power_supply_init(&info->battery_desc);
psy_cfg.drv_data = info;
@@ -420,30 +406,20 @@ static int ds278x_battery_probe(struct i2c_client *client)
info->capacity = 100;
info->status = POWER_SUPPLY_STATUS_FULL;
- INIT_DELAYED_WORK(&info->bat_work, ds278x_bat_work);
-
- info->battery = power_supply_register(&client->dev,
- &info->battery_desc, &psy_cfg);
+ info->battery = devm_power_supply_register(&client->dev,
+ &info->battery_desc,
+ &psy_cfg);
if (IS_ERR(info->battery)) {
dev_err(&client->dev, "failed to register battery\n");
- ret = PTR_ERR(info->battery);
- goto fail_register;
- } else {
- schedule_delayed_work(&info->bat_work, DS278x_DELAY);
+ return PTR_ERR(info->battery);
}
- return 0;
+ ret = devm_delayed_work_autocancel(&client->dev, &info->bat_work, ds278x_bat_work);
+ if (ret)
+ return ret;
+ schedule_delayed_work(&info->bat_work, DS278x_DELAY);
-fail_register:
- kfree(info->battery_desc.name);
-fail_name:
- kfree(info);
-fail_info:
- mutex_lock(&battery_lock);
- idr_remove(&battery_id, num);
- mutex_unlock(&battery_lock);
-fail_id:
- return ret;
+ return 0;
}
static const struct i2c_device_id ds278x_id[] = {
@@ -459,7 +435,6 @@ static struct i2c_driver ds278x_battery_driver = {
.pm = &ds278x_battery_pm_ops,
},
.probe = ds278x_battery_probe,
- .remove = ds278x_battery_remove,
.id_table = ds278x_id,
};
module_i2c_driver(ds278x_battery_driver);
diff --git a/drivers/power/supply/gpio-charger.c b/drivers/power/supply/gpio-charger.c
index 6139f736ecbe..46d18ce6a739 100644
--- a/drivers/power/supply/gpio-charger.c
+++ b/drivers/power/supply/gpio-charger.c
@@ -195,6 +195,8 @@ static int init_charge_current_limit(struct device *dev,
{
int i, len;
u32 cur_limit = U32_MAX;
+ bool set_def_limit;
+ u32 def_limit;
gpio_charger->current_limit_gpios = devm_gpiod_get_array_optional(dev,
"charge-current-limit", GPIOD_OUT_LOW);
@@ -228,6 +230,9 @@ static int init_charge_current_limit(struct device *dev,
if (len < 0)
return len;
+ set_def_limit = !device_property_read_u32(dev,
+ "charge-current-limit-default-microamp",
+ &def_limit);
for (i=0; i < gpio_charger->current_limit_map_size; i++) {
if (gpio_charger->current_limit_map[i].limit_ua > cur_limit) {
dev_err(dev, "charge-current-limit-mapping not sorted by current in descending order\n");
@@ -235,8 +240,16 @@ static int init_charge_current_limit(struct device *dev,
}
cur_limit = gpio_charger->current_limit_map[i].limit_ua;
+ if (set_def_limit && def_limit == cur_limit) {
+ set_charge_current_limit(gpio_charger, cur_limit);
+ return 0;
+ }
}
+ if (set_def_limit)
+ dev_warn(dev, "charge-current-limit-default-microamp %u not listed in charge-current-limit-mapping\n",
+ def_limit);
+
/* default to smallest current limitation for safety reasons */
len = gpio_charger->current_limit_map_size - 1;
set_charge_current_limit(gpio_charger,
diff --git a/drivers/power/supply/ip5xxx_power.c b/drivers/power/supply/ip5xxx_power.c
index 82263646ddc6..c448e0ac0dfa 100644
--- a/drivers/power/supply/ip5xxx_power.c
+++ b/drivers/power/supply/ip5xxx_power.c
@@ -7,76 +7,154 @@
#include <linux/power_supply.h>
#include <linux/regmap.h>
-#define IP5XXX_SYS_CTL0 0x01
-#define IP5XXX_SYS_CTL0_WLED_DET_EN BIT(4)
-#define IP5XXX_SYS_CTL0_WLED_EN BIT(3)
-#define IP5XXX_SYS_CTL0_BOOST_EN BIT(2)
-#define IP5XXX_SYS_CTL0_CHARGER_EN BIT(1)
-#define IP5XXX_SYS_CTL1 0x02
-#define IP5XXX_SYS_CTL1_LIGHT_SHDN_EN BIT(1)
-#define IP5XXX_SYS_CTL1_LOAD_PWRUP_EN BIT(0)
-#define IP5XXX_SYS_CTL2 0x0c
-#define IP5XXX_SYS_CTL2_LIGHT_SHDN_TH GENMASK(7, 3)
-#define IP5XXX_SYS_CTL3 0x03
-#define IP5XXX_SYS_CTL3_LONG_PRESS_TIME_SEL GENMASK(7, 6)
-#define IP5XXX_SYS_CTL3_BTN_SHDN_EN BIT(5)
-#define IP5XXX_SYS_CTL4 0x04
-#define IP5XXX_SYS_CTL4_SHDN_TIME_SEL GENMASK(7, 6)
-#define IP5XXX_SYS_CTL4_VIN_PULLOUT_BOOST_EN BIT(5)
-#define IP5XXX_SYS_CTL5 0x07
-#define IP5XXX_SYS_CTL5_NTC_DIS BIT(6)
-#define IP5XXX_SYS_CTL5_WLED_MODE_SEL BIT(1)
-#define IP5XXX_SYS_CTL5_BTN_SHDN_SEL BIT(0)
-#define IP5XXX_CHG_CTL1 0x22
-#define IP5XXX_CHG_CTL1_BOOST_UVP_SEL GENMASK(3, 2)
-#define IP5XXX_CHG_CTL2 0x24
-#define IP5XXX_CHG_CTL2_BAT_TYPE_SEL GENMASK(6, 5)
-#define IP5XXX_CHG_CTL2_BAT_TYPE_SEL_4_2V (0x0 << 5)
-#define IP5XXX_CHG_CTL2_BAT_TYPE_SEL_4_3V (0x1 << 5)
-#define IP5XXX_CHG_CTL2_BAT_TYPE_SEL_4_35V (0x2 << 5)
-#define IP5XXX_CHG_CTL2_CONST_VOLT_SEL GENMASK(2, 1)
-#define IP5XXX_CHG_CTL4 0x26
-#define IP5XXX_CHG_CTL4_BAT_TYPE_SEL_EN BIT(6)
-#define IP5XXX_CHG_CTL4A 0x25
-#define IP5XXX_CHG_CTL4A_CONST_CUR_SEL GENMASK(4, 0)
-#define IP5XXX_MFP_CTL0 0x51
-#define IP5XXX_MFP_CTL1 0x52
-#define IP5XXX_GPIO_CTL2 0x53
-#define IP5XXX_GPIO_CTL2A 0x54
-#define IP5XXX_GPIO_CTL3 0x55
-#define IP5XXX_READ0 0x71
-#define IP5XXX_READ0_CHG_STAT GENMASK(7, 5)
-#define IP5XXX_READ0_CHG_STAT_IDLE (0x0 << 5)
-#define IP5XXX_READ0_CHG_STAT_TRICKLE (0x1 << 5)
-#define IP5XXX_READ0_CHG_STAT_CONST_VOLT (0x2 << 5)
-#define IP5XXX_READ0_CHG_STAT_CONST_CUR (0x3 << 5)
-#define IP5XXX_READ0_CHG_STAT_CONST_VOLT_STOP (0x4 << 5)
-#define IP5XXX_READ0_CHG_STAT_FULL (0x5 << 5)
-#define IP5XXX_READ0_CHG_STAT_TIMEOUT (0x6 << 5)
-#define IP5XXX_READ0_CHG_OP BIT(4)
-#define IP5XXX_READ0_CHG_END BIT(3)
-#define IP5XXX_READ0_CONST_VOLT_TIMEOUT BIT(2)
-#define IP5XXX_READ0_CHG_TIMEOUT BIT(1)
-#define IP5XXX_READ0_TRICKLE_TIMEOUT BIT(0)
-#define IP5XXX_READ0_TIMEOUT GENMASK(2, 0)
-#define IP5XXX_READ1 0x72
-#define IP5XXX_READ1_WLED_PRESENT BIT(7)
-#define IP5XXX_READ1_LIGHT_LOAD BIT(6)
-#define IP5XXX_READ1_VIN_OVERVOLT BIT(5)
-#define IP5XXX_READ2 0x77
-#define IP5XXX_READ2_BTN_PRESS BIT(3)
-#define IP5XXX_READ2_BTN_LONG_PRESS BIT(1)
-#define IP5XXX_READ2_BTN_SHORT_PRESS BIT(0)
-#define IP5XXX_BATVADC_DAT0 0xa2
-#define IP5XXX_BATVADC_DAT1 0xa3
-#define IP5XXX_BATIADC_DAT0 0xa4
-#define IP5XXX_BATIADC_DAT1 0xa5
-#define IP5XXX_BATOCV_DAT0 0xa8
-#define IP5XXX_BATOCV_DAT1 0xa9
+#define IP5XXX_BAT_TYPE_4_2V 0x0
+#define IP5XXX_BAT_TYPE_4_3V 0x1
+#define IP5XXX_BAT_TYPE_4_35V 0x2
+#define IP5XXX_BAT_TYPE_4_4V 0x3
+#define IP5XXX_CHG_STAT_IDLE 0x0
+#define IP5XXX_CHG_STAT_TRICKLE 0x1
+#define IP5XXX_CHG_STAT_CONST_VOLT 0x2
+#define IP5XXX_CHG_STAT_CONST_CUR 0x3
+#define IP5XXX_CHG_STAT_CONST_VOLT_STOP 0x4
+#define IP5XXX_CHG_STAT_FULL 0x5
+#define IP5XXX_CHG_STAT_TIMEOUT 0x6
struct ip5xxx {
struct regmap *regmap;
bool initialized;
+ struct {
+ struct {
+ /* Charger enable */
+ struct regmap_field *enable;
+ /* Constant voltage value */
+ struct regmap_field *const_volt_sel;
+ /* Constant current value */
+ struct regmap_field *const_curr_sel;
+ /* Charger status */
+ struct regmap_field *status;
+ /* Charging ended flag */
+ struct regmap_field *chg_end;
+ /* Timeout flags (CV, charge, trickle) */
+ struct regmap_field *timeout;
+ /* Overvoltage limit */
+ struct regmap_field *vin_overvolt;
+ } charger;
+ struct {
+ /* Boost converter enable */
+ struct regmap_field *enable;
+ struct {
+ /* Light load shutdown enable */
+ struct regmap_field *enable;
+ /* Light load shutdown current limit */
+ struct regmap_field *i_limit;
+ } light_load_shutdown;
+ /* Automatic powerup on increased load */
+ struct regmap_field *load_powerup_en;
+ /* Automatic powerup on VIN pull-out */
+ struct regmap_field *vin_pullout_en;
+ /* Undervoltage limit */
+ struct regmap_field *undervolt_limit;
+ /* Light load status flag */
+ struct regmap_field *light_load_status;
+ } boost;
+ struct {
+ /* NTC disable */
+ struct regmap_field *ntc_dis;
+ /* Battery voltage type */
+ struct regmap_field *type;
+ /* Battery voltage autoset from Vset pin */
+ struct regmap_field *vset_en;
+ struct {
+ /* Battery measurement registers */
+ struct ip5xxx_battery_adc_regs {
+ struct regmap_field *low;
+ struct regmap_field *high;
+ } volt, curr, open_volt;
+ } adc;
+ } battery;
+ struct {
+ /* Double/long press shutdown enable */
+ struct regmap_field *shdn_enable;
+ /* WLED activation: double press or long press */
+ struct regmap_field *wled_mode;
+ /* Shutdown activation: double press or long press */
+ struct regmap_field *shdn_mode;
+ /* Long press time */
+ struct regmap_field *long_press_time;
+ /* Button pressed */
+ struct regmap_field *pressed;
+ /* Button long-pressed */
+ struct regmap_field *long_pressed;
+ /* Button short-pressed */
+ struct regmap_field *short_pressed;
+ } btn;
+ struct {
+ /* WLED enable */
+ struct regmap_field *enable;
+ /* WLED detect */
+ struct regmap_field *detect_en;
+ /* WLED present */
+ struct regmap_field *present;
+ } wled;
+ } regs;
+
+ /* Maximum supported battery voltage (via regs.battery.type) */
+ int vbat_max;
+ /* Scaling constants for regs.boost.undervolt_limit */
+ struct {
+ int setpoint;
+ int microvolts_per_bit;
+ } boost_undervolt;
+ /* Scaling constants for regs.charger.const_curr_sel */
+ struct {
+ int setpoint;
+ } const_curr;
+ /* Whether regs.charger.chg_end is inverted */
+ u8 chg_end_inverted;
+};
+
+#define REG_FIELD_UNSUPPORTED { .lsb = 1 }
+/* Register fields layout. Unsupported registers marked as { .lsb = 1 } */
+struct ip5xxx_regfield_config {
+ const struct reg_field charger_enable;
+ const struct reg_field charger_const_volt_sel;
+ const struct reg_field charger_const_curr_sel;
+ const struct reg_field charger_status;
+ const struct reg_field charger_chg_end;
+ const struct reg_field charger_timeout;
+ const struct reg_field charger_vin_overvolt;
+ const struct reg_field boost_enable;
+ const struct reg_field boost_llshdn_enable;
+ const struct reg_field boost_llshdn_i_limit;
+ const struct reg_field boost_load_powerup_en;
+ const struct reg_field boost_vin_pullout_en;
+ const struct reg_field boost_undervolt_limit;
+ const struct reg_field boost_light_load_status;
+ const struct reg_field battery_ntc_dis;
+ const struct reg_field battery_type;
+ const struct reg_field battery_vset_en;
+ const struct reg_field battery_adc_volt_low;
+ const struct reg_field battery_adc_volt_high;
+ const struct reg_field battery_adc_curr_low;
+ const struct reg_field battery_adc_curr_high;
+ const struct reg_field battery_adc_ovolt_low;
+ const struct reg_field battery_adc_ovolt_high;
+ const struct reg_field btn_shdn_enable;
+ const struct reg_field btn_wled_mode;
+ const struct reg_field btn_shdn_mode;
+ const struct reg_field btn_long_press_time;
+ const struct reg_field btn_pressed;
+ const struct reg_field btn_long_pressed;
+ const struct reg_field btn_short_pressed;
+ const struct reg_field wled_enable;
+ const struct reg_field wled_detect_en;
+ const struct reg_field wled_present;
+
+ int vbat_max;
+ int boost_undervolt_setpoint;
+ int boost_undervolt_uv_per_bit;
+ int const_curr_setpoint;
+ u8 chg_end_inverted;
};
/*
@@ -87,24 +165,30 @@ struct ip5xxx {
* 2) Attempt the initialization sequence on each subsequent register access
* until it succeeds.
*/
-static int ip5xxx_read(struct ip5xxx *ip5xxx, unsigned int reg,
+static int ip5xxx_read(struct ip5xxx *ip5xxx, struct regmap_field *field,
unsigned int *val)
{
int ret;
- ret = regmap_read(ip5xxx->regmap, reg, val);
+ if (!field)
+ return -EOPNOTSUPP;
+
+ ret = regmap_field_read(field, val);
if (ret)
ip5xxx->initialized = false;
return ret;
}
-static int ip5xxx_update_bits(struct ip5xxx *ip5xxx, unsigned int reg,
- unsigned int mask, unsigned int val)
+static int ip5xxx_write(struct ip5xxx *ip5xxx, struct regmap_field *field,
+ unsigned int val)
{
int ret;
- ret = regmap_update_bits(ip5xxx->regmap, reg, mask, val);
+ if (!field)
+ return -EOPNOTSUPP;
+
+ ret = regmap_field_write(field, val);
if (ret)
ip5xxx->initialized = false;
@@ -123,28 +207,26 @@ static int ip5xxx_initialize(struct power_supply *psy)
* Disable shutdown under light load.
* Enable power on when under load.
*/
- ret = ip5xxx_update_bits(ip5xxx, IP5XXX_SYS_CTL1,
- IP5XXX_SYS_CTL1_LIGHT_SHDN_EN |
- IP5XXX_SYS_CTL1_LOAD_PWRUP_EN,
- IP5XXX_SYS_CTL1_LOAD_PWRUP_EN);
+ if (ip5xxx->regs.boost.light_load_shutdown.enable) {
+ ret = ip5xxx_write(ip5xxx, ip5xxx->regs.boost.light_load_shutdown.enable, 0);
+ if (ret)
+ return ret;
+ }
+ ret = ip5xxx_write(ip5xxx, ip5xxx->regs.boost.load_powerup_en, 1);
if (ret)
return ret;
/*
* Enable shutdown after a long button press (as configured below).
*/
- ret = ip5xxx_update_bits(ip5xxx, IP5XXX_SYS_CTL3,
- IP5XXX_SYS_CTL3_BTN_SHDN_EN,
- IP5XXX_SYS_CTL3_BTN_SHDN_EN);
+ ret = ip5xxx_write(ip5xxx, ip5xxx->regs.btn.shdn_enable, 1);
if (ret)
return ret;
/*
* Power on automatically when VIN is removed.
*/
- ret = ip5xxx_update_bits(ip5xxx, IP5XXX_SYS_CTL4,
- IP5XXX_SYS_CTL4_VIN_PULLOUT_BOOST_EN,
- IP5XXX_SYS_CTL4_VIN_PULLOUT_BOOST_EN);
+ ret = ip5xxx_write(ip5xxx, ip5xxx->regs.boost.vin_pullout_en, 1);
if (ret)
return ret;
@@ -152,12 +234,15 @@ static int ip5xxx_initialize(struct power_supply *psy)
* Enable the NTC.
* Configure the button for two presses => LED, long press => shutdown.
*/
- ret = ip5xxx_update_bits(ip5xxx, IP5XXX_SYS_CTL5,
- IP5XXX_SYS_CTL5_NTC_DIS |
- IP5XXX_SYS_CTL5_WLED_MODE_SEL |
- IP5XXX_SYS_CTL5_BTN_SHDN_SEL,
- IP5XXX_SYS_CTL5_WLED_MODE_SEL |
- IP5XXX_SYS_CTL5_BTN_SHDN_SEL);
+ if (ip5xxx->regs.battery.ntc_dis) {
+ ret = ip5xxx_write(ip5xxx, ip5xxx->regs.battery.ntc_dis, 0);
+ if (ret)
+ return ret;
+ }
+ ret = ip5xxx_write(ip5xxx, ip5xxx->regs.btn.wled_mode, 1);
+ if (ret)
+ return ret;
+ ret = ip5xxx_write(ip5xxx, ip5xxx->regs.btn.shdn_mode, 1);
if (ret)
return ret;
@@ -186,24 +271,37 @@ static int ip5xxx_battery_get_status(struct ip5xxx *ip5xxx, int *val)
unsigned int rval;
int ret;
- ret = ip5xxx_read(ip5xxx, IP5XXX_READ0, &rval);
+ if (!ip5xxx->regs.charger.status) {
+ // Fall-back to Charging Ended bit
+ ret = ip5xxx_read(ip5xxx, ip5xxx->regs.charger.chg_end, &rval);
+ if (ret)
+ return ret;
+
+ if (rval == ip5xxx->chg_end_inverted)
+ *val = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ *val = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ return 0;
+ }
+
+ ret = ip5xxx_read(ip5xxx, ip5xxx->regs.charger.status, &rval);
if (ret)
return ret;
- switch (rval & IP5XXX_READ0_CHG_STAT) {
- case IP5XXX_READ0_CHG_STAT_IDLE:
+ switch (rval) {
+ case IP5XXX_CHG_STAT_IDLE:
*val = POWER_SUPPLY_STATUS_DISCHARGING;
break;
- case IP5XXX_READ0_CHG_STAT_TRICKLE:
- case IP5XXX_READ0_CHG_STAT_CONST_CUR:
- case IP5XXX_READ0_CHG_STAT_CONST_VOLT:
+ case IP5XXX_CHG_STAT_TRICKLE:
+ case IP5XXX_CHG_STAT_CONST_CUR:
+ case IP5XXX_CHG_STAT_CONST_VOLT:
*val = POWER_SUPPLY_STATUS_CHARGING;
break;
- case IP5XXX_READ0_CHG_STAT_CONST_VOLT_STOP:
- case IP5XXX_READ0_CHG_STAT_FULL:
+ case IP5XXX_CHG_STAT_CONST_VOLT_STOP:
+ case IP5XXX_CHG_STAT_FULL:
*val = POWER_SUPPLY_STATUS_FULL;
break;
- case IP5XXX_READ0_CHG_STAT_TIMEOUT:
+ case IP5XXX_CHG_STAT_TIMEOUT:
*val = POWER_SUPPLY_STATUS_NOT_CHARGING;
break;
default:
@@ -218,22 +316,22 @@ static int ip5xxx_battery_get_charge_type(struct ip5xxx *ip5xxx, int *val)
unsigned int rval;
int ret;
- ret = ip5xxx_read(ip5xxx, IP5XXX_READ0, &rval);
+ ret = ip5xxx_read(ip5xxx, ip5xxx->regs.charger.status, &rval);
if (ret)
return ret;
- switch (rval & IP5XXX_READ0_CHG_STAT) {
- case IP5XXX_READ0_CHG_STAT_IDLE:
- case IP5XXX_READ0_CHG_STAT_CONST_VOLT_STOP:
- case IP5XXX_READ0_CHG_STAT_FULL:
- case IP5XXX_READ0_CHG_STAT_TIMEOUT:
+ switch (rval) {
+ case IP5XXX_CHG_STAT_IDLE:
+ case IP5XXX_CHG_STAT_CONST_VOLT_STOP:
+ case IP5XXX_CHG_STAT_FULL:
+ case IP5XXX_CHG_STAT_TIMEOUT:
*val = POWER_SUPPLY_CHARGE_TYPE_NONE;
break;
- case IP5XXX_READ0_CHG_STAT_TRICKLE:
+ case IP5XXX_CHG_STAT_TRICKLE:
*val = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
break;
- case IP5XXX_READ0_CHG_STAT_CONST_CUR:
- case IP5XXX_READ0_CHG_STAT_CONST_VOLT:
+ case IP5XXX_CHG_STAT_CONST_CUR:
+ case IP5XXX_CHG_STAT_CONST_VOLT:
*val = POWER_SUPPLY_CHARGE_TYPE_STANDARD;
break;
default:
@@ -248,11 +346,11 @@ static int ip5xxx_battery_get_health(struct ip5xxx *ip5xxx, int *val)
unsigned int rval;
int ret;
- ret = ip5xxx_read(ip5xxx, IP5XXX_READ0, &rval);
+ ret = ip5xxx_read(ip5xxx, ip5xxx->regs.charger.timeout, &rval);
if (ret)
return ret;
- if (rval & IP5XXX_READ0_TIMEOUT)
+ if (rval)
*val = POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE;
else
*val = POWER_SUPPLY_HEALTH_GOOD;
@@ -265,7 +363,7 @@ static int ip5xxx_battery_get_voltage_max(struct ip5xxx *ip5xxx, int *val)
unsigned int rval;
int ret;
- ret = ip5xxx_read(ip5xxx, IP5XXX_CHG_CTL2, &rval);
+ ret = ip5xxx_read(ip5xxx, ip5xxx->regs.battery.type, &rval);
if (ret)
return ret;
@@ -273,16 +371,19 @@ static int ip5xxx_battery_get_voltage_max(struct ip5xxx *ip5xxx, int *val)
* It is not clear what this will return if
* IP5XXX_CHG_CTL4_BAT_TYPE_SEL_EN is not set...
*/
- switch (rval & IP5XXX_CHG_CTL2_BAT_TYPE_SEL) {
- case IP5XXX_CHG_CTL2_BAT_TYPE_SEL_4_2V:
+ switch (rval) {
+ case IP5XXX_BAT_TYPE_4_2V:
*val = 4200000;
break;
- case IP5XXX_CHG_CTL2_BAT_TYPE_SEL_4_3V:
+ case IP5XXX_BAT_TYPE_4_3V:
*val = 4300000;
break;
- case IP5XXX_CHG_CTL2_BAT_TYPE_SEL_4_35V:
+ case IP5XXX_BAT_TYPE_4_35V:
*val = 4350000;
break;
+ case IP5XXX_BAT_TYPE_4_4V:
+ *val = 4400000;
+ break;
default:
return -EINVAL;
}
@@ -291,16 +392,16 @@ static int ip5xxx_battery_get_voltage_max(struct ip5xxx *ip5xxx, int *val)
}
static int ip5xxx_battery_read_adc(struct ip5xxx *ip5xxx,
- u8 lo_reg, u8 hi_reg, int *val)
+ struct ip5xxx_battery_adc_regs *regs, int *val)
{
unsigned int hi, lo;
int ret;
- ret = ip5xxx_read(ip5xxx, lo_reg, &lo);
+ ret = ip5xxx_read(ip5xxx, regs->low, &lo);
if (ret)
return ret;
- ret = ip5xxx_read(ip5xxx, hi_reg, &hi);
+ ret = ip5xxx_read(ip5xxx, regs->high, &hi);
if (ret)
return ret;
@@ -335,33 +436,35 @@ static int ip5xxx_battery_get_property(struct power_supply *psy,
return ip5xxx_battery_get_voltage_max(ip5xxx, &val->intval);
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- ret = ip5xxx_battery_read_adc(ip5xxx, IP5XXX_BATVADC_DAT0,
- IP5XXX_BATVADC_DAT1, &raw);
+ ret = ip5xxx_battery_read_adc(ip5xxx, &ip5xxx->regs.battery.adc.volt, &raw);
+ if (ret)
+ return ret;
val->intval = 2600000 + DIV_ROUND_CLOSEST(raw * 26855, 100);
return 0;
case POWER_SUPPLY_PROP_VOLTAGE_OCV:
- ret = ip5xxx_battery_read_adc(ip5xxx, IP5XXX_BATOCV_DAT0,
- IP5XXX_BATOCV_DAT1, &raw);
+ ret = ip5xxx_battery_read_adc(ip5xxx, &ip5xxx->regs.battery.adc.open_volt, &raw);
+ if (ret)
+ return ret;
val->intval = 2600000 + DIV_ROUND_CLOSEST(raw * 26855, 100);
return 0;
case POWER_SUPPLY_PROP_CURRENT_NOW:
- ret = ip5xxx_battery_read_adc(ip5xxx, IP5XXX_BATIADC_DAT0,
- IP5XXX_BATIADC_DAT1, &raw);
+ ret = ip5xxx_battery_read_adc(ip5xxx, &ip5xxx->regs.battery.adc.curr, &raw);
+ if (ret)
+ return ret;
val->intval = DIV_ROUND_CLOSEST(raw * 149197, 200);
return 0;
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
- ret = ip5xxx_read(ip5xxx, IP5XXX_CHG_CTL4A, &rval);
+ ret = ip5xxx_read(ip5xxx, ip5xxx->regs.charger.const_curr_sel, &rval);
if (ret)
return ret;
- rval &= IP5XXX_CHG_CTL4A_CONST_CUR_SEL;
- val->intval = 100000 * rval;
+ val->intval = ip5xxx->const_curr.setpoint + 100000 * rval;
return 0;
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
@@ -373,12 +476,11 @@ static int ip5xxx_battery_get_property(struct power_supply *psy,
if (ret)
return ret;
- ret = ip5xxx_read(ip5xxx, IP5XXX_CHG_CTL2, &rval);
+ ret = ip5xxx_read(ip5xxx, ip5xxx->regs.charger.const_volt_sel, &rval);
if (ret)
return ret;
- rval &= IP5XXX_CHG_CTL2_CONST_VOLT_SEL;
- val->intval = vmax + 14000 * (rval >> 1);
+ val->intval = vmax + 14000 * rval;
return 0;
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
@@ -399,30 +501,36 @@ static int ip5xxx_battery_set_voltage_max(struct ip5xxx *ip5xxx, int val)
unsigned int rval;
int ret;
+ if (val > ip5xxx->vbat_max)
+ return -EINVAL;
+
switch (val) {
case 4200000:
- rval = IP5XXX_CHG_CTL2_BAT_TYPE_SEL_4_2V;
+ rval = IP5XXX_BAT_TYPE_4_2V;
break;
case 4300000:
- rval = IP5XXX_CHG_CTL2_BAT_TYPE_SEL_4_3V;
+ rval = IP5XXX_BAT_TYPE_4_3V;
break;
case 4350000:
- rval = IP5XXX_CHG_CTL2_BAT_TYPE_SEL_4_35V;
+ rval = IP5XXX_BAT_TYPE_4_35V;
+ break;
+ case 4400000:
+ rval = IP5XXX_BAT_TYPE_4_4V;
break;
default:
return -EINVAL;
}
- ret = ip5xxx_update_bits(ip5xxx, IP5XXX_CHG_CTL2,
- IP5XXX_CHG_CTL2_BAT_TYPE_SEL, rval);
+ ret = ip5xxx_write(ip5xxx, ip5xxx->regs.battery.type, rval);
if (ret)
return ret;
- ret = ip5xxx_update_bits(ip5xxx, IP5XXX_CHG_CTL4,
- IP5XXX_CHG_CTL4_BAT_TYPE_SEL_EN,
- IP5XXX_CHG_CTL4_BAT_TYPE_SEL_EN);
- if (ret)
- return ret;
+ /* Don't try to auto-detect battery type, even if the IC could */
+ if (ip5xxx->regs.battery.vset_en) {
+ ret = ip5xxx_write(ip5xxx, ip5xxx->regs.battery.vset_en, 1);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -443,7 +551,7 @@ static int ip5xxx_battery_set_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_STATUS:
switch (val->intval) {
case POWER_SUPPLY_STATUS_CHARGING:
- rval = IP5XXX_SYS_CTL0_CHARGER_EN;
+ rval = 1;
break;
case POWER_SUPPLY_STATUS_DISCHARGING:
case POWER_SUPPLY_STATUS_NOT_CHARGING:
@@ -452,25 +560,22 @@ static int ip5xxx_battery_set_property(struct power_supply *psy,
default:
return -EINVAL;
}
- return ip5xxx_update_bits(ip5xxx, IP5XXX_SYS_CTL0,
- IP5XXX_SYS_CTL0_CHARGER_EN, rval);
+ return ip5xxx_write(ip5xxx, ip5xxx->regs.charger.enable, rval);
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
return ip5xxx_battery_set_voltage_max(ip5xxx, val->intval);
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
- rval = val->intval / 100000;
- return ip5xxx_update_bits(ip5xxx, IP5XXX_CHG_CTL4A,
- IP5XXX_CHG_CTL4A_CONST_CUR_SEL, rval);
+ rval = (val->intval - ip5xxx->const_curr.setpoint) / 100000;
+ return ip5xxx_write(ip5xxx, ip5xxx->regs.charger.const_curr_sel, rval);
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
ret = ip5xxx_battery_get_voltage_max(ip5xxx, &vmax);
if (ret)
return ret;
- rval = ((val->intval - vmax) / 14000) << 1;
- return ip5xxx_update_bits(ip5xxx, IP5XXX_CHG_CTL2,
- IP5XXX_CHG_CTL2_CONST_VOLT_SEL, rval);
+ rval = (val->intval - vmax) / 14000;
+ return ip5xxx_write(ip5xxx, ip5xxx->regs.charger.const_volt_sel, rval);
default:
return -EINVAL;
@@ -515,20 +620,20 @@ static int ip5xxx_boost_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
- ret = ip5xxx_read(ip5xxx, IP5XXX_SYS_CTL0, &rval);
+ ret = ip5xxx_read(ip5xxx, ip5xxx->regs.boost.enable, &rval);
if (ret)
return ret;
- val->intval = !!(rval & IP5XXX_SYS_CTL0_BOOST_EN);
+ val->intval = !!rval;
return 0;
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
- ret = ip5xxx_read(ip5xxx, IP5XXX_CHG_CTL1, &rval);
+ ret = ip5xxx_read(ip5xxx, ip5xxx->regs.boost.undervolt_limit, &rval);
if (ret)
return ret;
- rval &= IP5XXX_CHG_CTL1_BOOST_UVP_SEL;
- val->intval = 4530000 + 100000 * (rval >> 2);
+ val->intval = ip5xxx->boost_undervolt.setpoint +
+ ip5xxx->boost_undervolt.microvolts_per_bit * rval;
return 0;
default:
@@ -550,14 +655,12 @@ static int ip5xxx_boost_set_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
- rval = val->intval ? IP5XXX_SYS_CTL0_BOOST_EN : 0;
- return ip5xxx_update_bits(ip5xxx, IP5XXX_SYS_CTL0,
- IP5XXX_SYS_CTL0_BOOST_EN, rval);
+ return ip5xxx_write(ip5xxx, ip5xxx->regs.boost.enable, !!val->intval);
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
- rval = ((val->intval - 4530000) / 100000) << 2;
- return ip5xxx_update_bits(ip5xxx, IP5XXX_CHG_CTL1,
- IP5XXX_CHG_CTL1_BOOST_UVP_SEL, rval);
+ rval = (val->intval - ip5xxx->boost_undervolt.setpoint) /
+ ip5xxx->boost_undervolt.microvolts_per_bit;
+ return ip5xxx_write(ip5xxx, ip5xxx->regs.boost.undervolt_limit, rval);
default:
return -EINVAL;
@@ -583,13 +686,152 @@ static const struct power_supply_desc ip5xxx_boost_desc = {
static const struct regmap_config ip5xxx_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = IP5XXX_BATOCV_DAT1,
+ .max_register = 0xa9,
+};
+
+static struct ip5xxx_regfield_config ip51xx_fields = {
+ .charger_enable = REG_FIELD(0x01, 1, 1),
+ .charger_const_volt_sel = REG_FIELD(0x24, 1, 2),
+ .charger_const_curr_sel = REG_FIELD(0x25, 0, 4),
+ .charger_status = REG_FIELD(0x71, 5, 7),
+ .charger_chg_end = REG_FIELD(0x71, 3, 3),
+ .charger_timeout = REG_FIELD(0x71, 0, 2),
+ .charger_vin_overvolt = REG_FIELD(0x72, 5, 5),
+ .boost_enable = REG_FIELD(0x01, 2, 2),
+ .boost_llshdn_enable = REG_FIELD(0x02, 1, 1),
+ .boost_llshdn_i_limit = REG_FIELD(0x0c, 3, 7),
+ .boost_load_powerup_en = REG_FIELD(0x02, 0, 0),
+ .boost_vin_pullout_en = REG_FIELD(0x04, 5, 5),
+ .boost_undervolt_limit = REG_FIELD(0x22, 2, 3),
+ .boost_light_load_status = REG_FIELD(0x72, 6, 6),
+ .battery_ntc_dis = REG_FIELD(0x07, 6, 6),
+ .battery_type = REG_FIELD(0x24, 5, 6),
+ .battery_vset_en = REG_FIELD(0x26, 6, 6),
+ .battery_adc_volt_low = REG_FIELD(0xa2, 0, 7),
+ .battery_adc_volt_high = REG_FIELD(0xa3, 0, 5),
+ .battery_adc_curr_low = REG_FIELD(0xa4, 0, 7),
+ .battery_adc_curr_high = REG_FIELD(0xa5, 0, 5),
+ .battery_adc_ovolt_low = REG_FIELD(0xa8, 0, 7),
+ .battery_adc_ovolt_high = REG_FIELD(0xa9, 0, 5),
+ .btn_shdn_enable = REG_FIELD(0x03, 5, 5),
+ .btn_wled_mode = REG_FIELD(0x07, 1, 1),
+ .btn_shdn_mode = REG_FIELD(0x07, 0, 0),
+ .btn_long_press_time = REG_FIELD(0x03, 6, 7),
+ .btn_pressed = REG_FIELD(0x77, 3, 3),
+ .btn_long_pressed = REG_FIELD(0x77, 1, 1),
+ .btn_short_pressed = REG_FIELD(0x77, 0, 0),
+ .wled_enable = REG_FIELD(0x01, 3, 3),
+ .wled_detect_en = REG_FIELD(0x01, 4, 4),
+ .wled_present = REG_FIELD(0x72, 7, 7),
+
+ .vbat_max = 4350000,
+ .boost_undervolt_setpoint = 4530000,
+ .boost_undervolt_uv_per_bit = 100000,
+};
+
+static struct ip5xxx_regfield_config ip5306_fields = {
+ .charger_enable = REG_FIELD(0x00, 4, 4),
+ .charger_const_volt_sel = REG_FIELD(0x22, 0, 1),
+ .charger_const_curr_sel = REG_FIELD(0x24, 0, 4),
+ .charger_status = REG_FIELD_UNSUPPORTED, // other bits...
+ .charger_chg_end = REG_FIELD(0x71, 3, 3),
+ .charger_timeout = REG_FIELD_UNSUPPORTED,
+ .charger_vin_overvolt = REG_FIELD_UNSUPPORTED,
+ .boost_enable = REG_FIELD(0x00, 5, 5),
+ .boost_llshdn_enable = REG_FIELD_UNSUPPORTED,
+ .boost_llshdn_i_limit = REG_FIELD_UNSUPPORTED,
+ .boost_load_powerup_en = REG_FIELD(0x00, 2, 2),
+ .boost_vin_pullout_en = REG_FIELD(0x01, 2, 2),
+ .boost_undervolt_limit = REG_FIELD(0x21, 2, 4),
+ .boost_light_load_status = REG_FIELD(0x72, 2, 2),
+ .battery_ntc_dis = REG_FIELD_UNSUPPORTED,
+ .battery_type = REG_FIELD(0x22, 2, 3),
+ .battery_vset_en = REG_FIELD_UNSUPPORTED,
+ .battery_adc_volt_low = REG_FIELD_UNSUPPORTED,
+ .battery_adc_volt_high = REG_FIELD_UNSUPPORTED,
+ .battery_adc_curr_low = REG_FIELD_UNSUPPORTED,
+ .battery_adc_curr_high = REG_FIELD_UNSUPPORTED,
+ .battery_adc_ovolt_low = REG_FIELD_UNSUPPORTED,
+ .battery_adc_ovolt_high = REG_FIELD_UNSUPPORTED,
+ .btn_shdn_enable = REG_FIELD(0x00, 0, 0),
+ .btn_wled_mode = REG_FIELD(0x01, 6, 6),
+ .btn_shdn_mode = REG_FIELD(0x01, 7, 7),
+ .btn_long_press_time = REG_FIELD(0x02, 4, 4), // +1s
+ .btn_pressed = REG_FIELD_UNSUPPORTED,
+ /* TODO: double press */
+ .btn_long_pressed = REG_FIELD(0x77, 1, 1),
+ .btn_short_pressed = REG_FIELD(0x77, 0, 0),
+ .wled_enable = REG_FIELD_UNSUPPORTED,
+ .wled_detect_en = REG_FIELD_UNSUPPORTED,
+ .wled_present = REG_FIELD_UNSUPPORTED,
+
+ .vbat_max = 4400000,
+ .boost_undervolt_setpoint = 4450000,
+ .boost_undervolt_uv_per_bit = 50000,
+ .const_curr_setpoint = 50000,
+ .chg_end_inverted = 1,
};
+#define ip5xxx_setup_reg(_field, _reg) \
+ do { \
+ if (likely(cfg->_field.lsb <= cfg->_field.msb)) { \
+ struct regmap_field *_tmp = devm_regmap_field_alloc(dev, \
+ ip5xxx->regmap, cfg->_field); \
+ if (!IS_ERR(_tmp)) \
+ ip5xxx->regs._reg = _tmp; \
+ } \
+ } while (0)
+
+static void ip5xxx_setup_regs(struct device *dev, struct ip5xxx *ip5xxx,
+ const struct ip5xxx_regfield_config *cfg)
+{
+ ip5xxx_setup_reg(charger_enable, charger.enable);
+ ip5xxx_setup_reg(charger_const_volt_sel, charger.const_volt_sel);
+ ip5xxx_setup_reg(charger_const_curr_sel, charger.const_curr_sel);
+ ip5xxx_setup_reg(charger_status, charger.status);
+ ip5xxx_setup_reg(charger_chg_end, charger.chg_end);
+ ip5xxx_setup_reg(charger_timeout, charger.timeout);
+ ip5xxx_setup_reg(charger_vin_overvolt, charger.vin_overvolt);
+ ip5xxx_setup_reg(boost_enable, boost.enable);
+ ip5xxx_setup_reg(boost_llshdn_enable, boost.light_load_shutdown.enable);
+ ip5xxx_setup_reg(boost_llshdn_i_limit, boost.light_load_shutdown.i_limit);
+ ip5xxx_setup_reg(boost_load_powerup_en, boost.load_powerup_en);
+ ip5xxx_setup_reg(boost_vin_pullout_en, boost.vin_pullout_en);
+ ip5xxx_setup_reg(boost_undervolt_limit, boost.undervolt_limit);
+ ip5xxx_setup_reg(boost_light_load_status, boost.light_load_status);
+ ip5xxx_setup_reg(battery_ntc_dis, battery.ntc_dis);
+ ip5xxx_setup_reg(battery_type, battery.type);
+ ip5xxx_setup_reg(battery_vset_en, battery.vset_en);
+ ip5xxx_setup_reg(battery_adc_volt_low, battery.adc.volt.low);
+ ip5xxx_setup_reg(battery_adc_volt_high, battery.adc.volt.high);
+ ip5xxx_setup_reg(battery_adc_curr_low, battery.adc.curr.low);
+ ip5xxx_setup_reg(battery_adc_curr_high, battery.adc.curr.high);
+ ip5xxx_setup_reg(battery_adc_ovolt_low, battery.adc.open_volt.low);
+ ip5xxx_setup_reg(battery_adc_ovolt_high, battery.adc.open_volt.high);
+ ip5xxx_setup_reg(btn_shdn_enable, btn.shdn_enable);
+ ip5xxx_setup_reg(btn_wled_mode, btn.wled_mode);
+ ip5xxx_setup_reg(btn_shdn_mode, btn.shdn_mode);
+ ip5xxx_setup_reg(btn_long_press_time, btn.long_press_time);
+ ip5xxx_setup_reg(btn_pressed, btn.pressed);
+ ip5xxx_setup_reg(btn_long_pressed, btn.long_pressed);
+ ip5xxx_setup_reg(btn_short_pressed, btn.short_pressed);
+ ip5xxx_setup_reg(wled_enable, wled.enable);
+ ip5xxx_setup_reg(wled_detect_en, wled.detect_en);
+ ip5xxx_setup_reg(wled_present, wled.present);
+
+ ip5xxx->vbat_max = cfg->vbat_max;
+ ip5xxx->boost_undervolt.setpoint = cfg->boost_undervolt_setpoint;
+ ip5xxx->boost_undervolt.microvolts_per_bit = cfg->boost_undervolt_uv_per_bit;
+ ip5xxx->const_curr.setpoint = cfg->const_curr_setpoint;
+ ip5xxx->chg_end_inverted = cfg->chg_end_inverted;
+}
+
static int ip5xxx_power_probe(struct i2c_client *client)
{
+ const struct ip5xxx_regfield_config *fields = &ip51xx_fields;
struct power_supply_config psy_cfg = {};
struct device *dev = &client->dev;
+ const struct of_device_id *of_id;
struct power_supply *psy;
struct ip5xxx *ip5xxx;
@@ -601,6 +843,11 @@ static int ip5xxx_power_probe(struct i2c_client *client)
if (IS_ERR(ip5xxx->regmap))
return PTR_ERR(ip5xxx->regmap);
+ of_id = i2c_of_match_device(dev->driver->of_match_table, client);
+ if (of_id)
+ fields = (const struct ip5xxx_regfield_config *)of_id->data;
+ ip5xxx_setup_regs(dev, ip5xxx, fields);
+
psy_cfg.of_node = dev->of_node;
psy_cfg.drv_data = ip5xxx;
@@ -616,10 +863,11 @@ static int ip5xxx_power_probe(struct i2c_client *client)
}
static const struct of_device_id ip5xxx_power_of_match[] = {
- { .compatible = "injoinic,ip5108" },
- { .compatible = "injoinic,ip5109" },
- { .compatible = "injoinic,ip5207" },
- { .compatible = "injoinic,ip5209" },
+ { .compatible = "injoinic,ip5108", .data = &ip51xx_fields },
+ { .compatible = "injoinic,ip5109", .data = &ip51xx_fields },
+ { .compatible = "injoinic,ip5207", .data = &ip51xx_fields },
+ { .compatible = "injoinic,ip5209", .data = &ip51xx_fields },
+ { .compatible = "injoinic,ip5306", .data = &ip5306_fields },
{ }
};
MODULE_DEVICE_TABLE(of, ip5xxx_power_of_match);
diff --git a/drivers/power/supply/ltc4162-l-charger.c b/drivers/power/supply/ltc4162-l-charger.c
index 2e4bc74e1c4a..23eb426295db 100644
--- a/drivers/power/supply/ltc4162-l-charger.c
+++ b/drivers/power/supply/ltc4162-l-charger.c
@@ -1,9 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Driver for Analog Devices (Linear Technology) LTC4162-L charger IC.
+ * Driver for Analog Devices (Linear Technology)
+ * LTC4162-L 35V/3.2A Multi-Cell Lithium-Ion Step-Down Battery Charger
+ * LTC4162-F 35V/3.2A Multi-Cell LiFePO4 Step-Down Battery Charger
+ * LTC4162-S 35V/3.2A Lead-Acid Step-Down Battery Charger
+ * LTC4015 35V/3.2A Multichemistry Buck Battery Charger Controller
* Copyright (C) 2020, Topic Embedded Products
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/of.h>
@@ -47,6 +52,20 @@
#define LTC4162L_VBAT_FILT 0x47
#define LTC4162L_INPUT_UNDERVOLTAGE_DAC 0x4B
+#define LTC4162L_CHEM_MASK GENMASK(11, 8)
+
+enum ltc4162_chem {
+ ltc4162_lad,
+ ltc4162_l42,
+ ltc4162_l41,
+ ltc4162_l40,
+ ltc4162_fad,
+ ltc4162_ffs,
+ ltc4162_fst,
+ ltc4162_sst = 8,
+ ltc4162_sad,
+};
+
/* Enumeration as in datasheet. Individual bits are mutually exclusive. */
enum ltc4162l_state {
battery_detection = 2048,
@@ -75,10 +94,28 @@ enum ltc4162l_charge_status {
/* Magic number to write to ARM_SHIP_MODE register */
#define LTC4162L_ARM_SHIP_MODE_MAGIC 21325
+struct ltc4162l_info;
+
+struct ltc4162l_chip_info {
+ const char *name;
+ int (*get_vbat)(struct ltc4162l_info *info, unsigned int reg,
+ union power_supply_propval *val);
+ int (*get_vcharge)(struct ltc4162l_info *info, unsigned int reg,
+ union power_supply_propval *val);
+ int (*set_vcharge)(struct ltc4162l_info *info, unsigned int reg,
+ unsigned int value);
+ int (*get_die_temp)(struct ltc4162l_info *info,
+ union power_supply_propval *val);
+ unsigned int ibat_resolution_pv;
+ unsigned int vin_resolution_uv;
+ u8 telemetry_mask;
+};
+
struct ltc4162l_info {
struct i2c_client *client;
struct regmap *regmap;
struct power_supply *charger;
+ const struct ltc4162l_chip_info *chip_info;
u32 rsnsb; /* Series resistor that sets charge current, microOhm */
u32 rsnsi; /* Series resistor to measure input current, microOhm */
u8 cell_count; /* Number of connected cells, 0 while unknown */
@@ -108,6 +145,18 @@ static u8 ltc4162l_get_cell_count(struct ltc4162l_info *info)
return val;
};
+static u8 ltc4162l_get_chem_type(struct ltc4162l_info *info)
+{
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(info->regmap, LTC4162L_CHEM_CELLS_REG, &val);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(LTC4162L_CHEM_MASK, val);
+};
+
/* Convert enum value to POWER_SUPPLY_STATUS value */
static int ltc4162l_state_decode(enum ltc4162l_state value)
{
@@ -223,25 +272,83 @@ static int ltc4162l_get_vbat(struct ltc4162l_info *info,
unsigned int reg,
union power_supply_propval *val)
{
- unsigned int regval;
+ unsigned int regval, chem_type;
int ret;
ret = regmap_read(info->regmap, reg, &regval);
if (ret)
return ret;
- /* cell_count × 192.4μV/LSB */
- regval *= 1924;
- regval *= ltc4162l_get_cell_count(info);
- regval /= 10;
- val->intval = regval;
+ /*
+ * cell_count × scaling factor
+ * For ltc4162-s, it uses a cell_count value of 2 for each group of 3
+ * physical (2V) cells, thus will return 2, 4, 6, 8 for 6V, 12V, 18V,
+ * and 24V respectively, and has to divide by 2 to multiply the scale
+ * factor by 1, 2, 3, or 4 to represent a 6V, 12V, 18V, or 24V battery
+ * respectively.
+ */
+ chem_type = ltc4162l_get_chem_type(info);
+ switch (chem_type) {
+ case ltc4162_lad ... ltc4162_fst:
+ regval *= 1924;
+ regval *= ltc4162l_get_cell_count(info);
+ regval /= 10;
+ val->intval = regval;
- return 0;
+ return 0;
+ case ltc4162_sst ... ltc4162_sad:
+ regval *= 3848;
+ regval *= ltc4162l_get_cell_count(info) / 2;
+ regval /= 10;
+ val->intval = regval;
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ltc4015_get_vbat(struct ltc4162l_info *info,
+ unsigned int reg,
+ union power_supply_propval *val)
+{
+ unsigned int regval, chem_type;
+ int ret;
+
+ ret = regmap_read(info->regmap, reg, &regval);
+ if (ret)
+ return ret;
+
+ /*
+ * cell count x scaling factor
+ * ltc4015 lead-acid fixed and lead-acid programmable corresponds to
+ * 0x7 and 0x8 chem respectively
+ */
+ chem_type = ltc4162l_get_chem_type(info);
+ switch (chem_type) {
+ case ltc4162_lad ... ltc4162_fst:
+ regval *= 192264;
+ regval *= ltc4162l_get_cell_count(info);
+ regval /= 1000;
+ val->intval = regval;
+
+ return 0;
+ case ltc4162_sst - 1 ... ltc4162_sad - 1:
+ regval *= 128176;
+ regval *= ltc4162l_get_cell_count(info);
+ regval /= 1000;
+ val->intval = regval;
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
static int ltc4162l_get_ibat(struct ltc4162l_info *info,
union power_supply_propval *val)
{
+ const struct ltc4162l_chip_info *chip_info = info->chip_info;
unsigned int regval;
int ret;
@@ -249,9 +356,8 @@ static int ltc4162l_get_ibat(struct ltc4162l_info *info,
if (ret)
return ret;
- /* Signed 16-bit number, 1.466μV / RSNSB amperes/LSB. */
ret = (s16)(regval & 0xFFFF);
- val->intval = 100 * mult_frac(ret, 14660, (int)info->rsnsb);
+ val->intval = mult_frac(ret, chip_info->ibat_resolution_pv, info->rsnsb);
return 0;
}
@@ -260,6 +366,7 @@ static int ltc4162l_get_ibat(struct ltc4162l_info *info,
static int ltc4162l_get_input_voltage(struct ltc4162l_info *info,
union power_supply_propval *val)
{
+ const struct ltc4162l_chip_info *chip_info = info->chip_info;
unsigned int regval;
int ret;
@@ -267,8 +374,7 @@ static int ltc4162l_get_input_voltage(struct ltc4162l_info *info,
if (ret)
return ret;
- /* 1.649mV/LSB */
- val->intval = regval * 1694;
+ val->intval = regval * chip_info->vin_resolution_uv;
return 0;
}
@@ -276,6 +382,7 @@ static int ltc4162l_get_input_voltage(struct ltc4162l_info *info,
static int ltc4162l_get_input_current(struct ltc4162l_info *info,
union power_supply_propval *val)
{
+ const struct ltc4162l_chip_info *chip_info = info->chip_info;
unsigned int regval;
int ret;
@@ -283,11 +390,9 @@ static int ltc4162l_get_input_current(struct ltc4162l_info *info,
if (ret)
return ret;
- /* Signed 16-bit number, 1.466μV / RSNSI amperes/LSB. */
ret = (s16)(regval & 0xFFFF);
- ret *= 14660;
+ ret *= chip_info->ibat_resolution_pv;
ret /= info->rsnsi;
- ret *= 100;
val->intval = ret;
@@ -305,7 +410,7 @@ static int ltc4162l_get_icharge(struct ltc4162l_info *info,
if (ret)
return ret;
- regval &= BIT(6) - 1; /* Only the lower 5 bits */
+ regval &= GENMASK(5, 0);
/* The charge current servo level: (icharge_dac + 1) × 1mV/RSNSB */
++regval;
@@ -336,7 +441,7 @@ static int ltc4162l_get_vcharge(struct ltc4162l_info *info,
unsigned int reg,
union power_supply_propval *val)
{
- unsigned int regval;
+ unsigned int regval, chem_type;
int ret;
u32 voltage;
@@ -344,41 +449,181 @@ static int ltc4162l_get_vcharge(struct ltc4162l_info *info,
if (ret)
return ret;
- regval &= BIT(6) - 1; /* Only the lower 5 bits */
+ regval &= GENMASK(5, 0);
/*
* charge voltage setting can be computed from
- * cell_count × (vcharge_setting × 12.5mV + 3.8125V)
- * where vcharge_setting ranges from 0 to 31 (4.2V max).
+ * cell_count × (vcharge_setting × a + b)
+ * where vcharge_setting ranges from 0 to c (d).
+ * for ltc4162l: a = 12.5mV , b = 3.8125V, c = 31, d = 4.2Vmax
+ * for ltc4162f: a = 12.5mV , b = 3.4125V, c = 31, d = 3.8Vmax
+ *
+ * for ltc4162s, the charge voltage setting can be computed from
+ * N x (vcharge_setting x 28.571mV + 6.0V)
+ * where N is 1, 2, 3, or 4 for 6V, 12V, 18V, or 24V battery respectively,
+ * and vcharge_setting ranges from 0 to 31
*/
- voltage = 3812500 + (regval * 12500);
- voltage *= ltc4162l_get_cell_count(info);
- val->intval = voltage;
+ chem_type = ltc4162l_get_chem_type(info);
+ switch (chem_type) {
+ case ltc4162_lad ... ltc4162_l40:
+ voltage = 3812500 + (regval * 12500);
+ voltage *= ltc4162l_get_cell_count(info);
+ val->intval = voltage;
- return 0;
+ return 0;
+ case ltc4162_fad ... ltc4162_fst:
+ voltage = 3412500 + (regval * 12500);
+ voltage *= ltc4162l_get_cell_count(info);
+ val->intval = voltage;
+
+ return 0;
+ case ltc4162_sst ... ltc4162_sad:
+ voltage = 6000000 + (regval * 28571);
+ voltage *= ltc4162l_get_cell_count(info) / 2;
+ val->intval = voltage;
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
-static int ltc4162l_set_vcharge(struct ltc4162l_info *info,
- unsigned int reg,
- unsigned int value)
+static int ltc4015_get_vcharge(struct ltc4162l_info *info,
+ unsigned int reg,
+ union power_supply_propval *val)
{
- u8 cell_count = ltc4162l_get_cell_count(info);
+ unsigned int regval, chem_type;
+ int ret;
+ u32 voltage;
+
+ ret = regmap_read(info->regmap, reg, &regval);
+ if (ret)
+ return ret;
- if (!cell_count)
- return -EBUSY; /* Not available yet, try again later */
+ regval &= GENMASK(5, 0);
+ /*
+ * charge voltage setting can be computed from:
+ * cell_count × (vcharge_setting × a + b)
+ * where vcharge_setting ranges from 0 to c (d).
+ * Li-Ion: a = 1/80V, b = 3.8125V, c = 31, d = 4.2Vmax
+ * LiFePO4: a = 1/80V, b = 3.4125V, c = 31, d = 3.8Vmax
+ * Lead Acid: a = 1/105V, b = 2V, c = 35, d = 2.6Vmax
+ */
+ chem_type = ltc4162l_get_chem_type(info);
+ switch (chem_type) {
+ case ltc4162_lad ... ltc4162_l40:
+ voltage = 3812500 + (regval * 12500);
+ voltage *= ltc4162l_get_cell_count(info);
+ val->intval = voltage;
+
+ return 0;
+ case ltc4162_fad ... ltc4162_fst:
+ voltage = 3412500 + (regval * 12500);
+ voltage *= ltc4162l_get_cell_count(info);
+ val->intval = voltage;
+
+ return 0;
+ case ltc4162_sst - 1 ... ltc4162_sad - 1:
+ voltage = 2000000 + mult_frac(regval, 1000000, 105);
+ voltage *= ltc4162l_get_cell_count(info);
+ val->intval = voltage;
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ltc4162l_vcharge(unsigned int base_voltage,
+ unsigned int scale_factor,
+ unsigned int range,
+ unsigned int value,
+ u8 cell_count)
+{
value /= cell_count;
- if (value < 3812500)
+ if (value < base_voltage)
return -EINVAL;
- value -= 3812500;
- value /= 12500;
+ value -= base_voltage;
+ value /= scale_factor;
- if (value > 31)
+ if (value > range)
return -EINVAL;
- return regmap_write(info->regmap, reg, value);
+ return value;
+}
+
+static int ltc4162l_set_vcharge(struct ltc4162l_info *info,
+ unsigned int reg,
+ unsigned int value)
+{
+ unsigned int chem_type;
+ u8 cell_count;
+
+ chem_type = ltc4162l_get_chem_type(info);
+ switch (chem_type) {
+ case ltc4162_lad ... ltc4162_l40:
+ cell_count = ltc4162l_get_cell_count(info);
+ if (!cell_count)
+ return -EBUSY;
+
+ value = ltc4162l_vcharge(3812500, 12500, 31, value, cell_count);
+ return regmap_write(info->regmap, reg, value);
+ case ltc4162_fad ... ltc4162_fst:
+ cell_count = ltc4162l_get_cell_count(info);
+ if (!cell_count)
+ return -EBUSY;
+
+ value = ltc4162l_vcharge(3412500, 12500, 31, value, cell_count);
+ return regmap_write(info->regmap, reg, value);
+ case ltc4162_sst ... ltc4162_sad:
+ cell_count = ltc4162l_get_cell_count(info) / 2;
+ if (!cell_count)
+ return -EBUSY;
+
+ value = ltc4162l_vcharge(6000000, 28571, 31, value, cell_count);
+ return regmap_write(info->regmap, reg, value);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ltc4015_set_vcharge(struct ltc4162l_info *info,
+ unsigned int reg,
+ unsigned int value)
+{
+ unsigned int chem_type;
+ u8 cell_count;
+
+ chem_type = ltc4162l_get_chem_type(info);
+ switch (chem_type) {
+ case ltc4162_lad ... ltc4162_l40:
+ cell_count = ltc4162l_get_cell_count(info);
+ if (!cell_count)
+ return -EBUSY;
+
+ value = ltc4162l_vcharge(3812500, 12500, 31, value, cell_count);
+ return regmap_write(info->regmap, reg, value);
+ case ltc4162_fad ... ltc4162_fst:
+ cell_count = ltc4162l_get_cell_count(info);
+ if (!cell_count)
+ return -EBUSY;
+
+ value = ltc4162l_vcharge(3412500, 12500, 31, value, cell_count);
+ return regmap_write(info->regmap, reg, value);
+ case ltc4162_sst - 1 ... ltc4162_sad - 1:
+ cell_count = ltc4162l_get_cell_count(info);
+ if (!cell_count)
+ return -EBUSY;
+
+ value = ltc4162l_vcharge(2000000, 1000000 / 105, 35,
+ value, cell_count);
+ return regmap_write(info->regmap, reg, value);
+ default:
+ return -EINVAL;
+ }
}
static int ltc4162l_get_iin_limit_dac(struct ltc4162l_info *info,
@@ -391,7 +636,7 @@ static int ltc4162l_get_iin_limit_dac(struct ltc4162l_info *info,
if (ret)
return ret;
- regval &= BIT(6) - 1; /* Only 6 bits */
+ regval &= GENMASK(5, 0);
/* (iin_limit_dac + 1) × 500μV / RSNSI */
++regval;
@@ -437,9 +682,30 @@ static int ltc4162l_get_die_temp(struct ltc4162l_info *info,
return 0;
}
+static int ltc4015_get_die_temp(struct ltc4162l_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(info->regmap, LTC4162L_DIE_TEMPERATURE, &regval);
+ if (ret)
+ return ret;
+
+ /* (die_temp - 12010) / 45.6°C */
+ ret = (s16)(regval & 0xFFFF);
+ ret -= 12010;
+ ret *= 1000;
+ ret /= 456;
+ val->intval = ret;
+
+ return 0;
+}
+
static int ltc4162l_get_term_current(struct ltc4162l_info *info,
union power_supply_propval *val)
{
+ const struct ltc4162l_chip_info *chip_info = info->chip_info;
unsigned int regval;
int ret;
@@ -457,10 +723,9 @@ static int ltc4162l_get_term_current(struct ltc4162l_info *info,
if (ret)
return ret;
- /* 1.466μV / RSNSB amperes/LSB */
- regval *= 14660u;
+ regval *= chip_info->ibat_resolution_pv;
regval /= info->rsnsb;
- val->intval = 100 * regval;
+ val->intval = regval;
return 0;
}
@@ -534,10 +799,11 @@ static ssize_t vbat_show(struct device *dev,
{
struct power_supply *psy = to_power_supply(dev);
struct ltc4162l_info *info = power_supply_get_drvdata(psy);
+ const struct ltc4162l_chip_info *chip_info = info->chip_info;
union power_supply_propval val;
int ret;
- ret = ltc4162l_get_vbat(info, LTC4162L_VBAT, &val);
+ ret = chip_info->get_vbat(info, LTC4162L_VBAT, &val);
if (ret)
return ret;
@@ -550,10 +816,11 @@ static ssize_t vbat_avg_show(struct device *dev,
{
struct power_supply *psy = to_power_supply(dev);
struct ltc4162l_info *info = power_supply_get_drvdata(psy);
+ const struct ltc4162l_chip_info *chip_info = info->chip_info;
union power_supply_propval val;
int ret;
- ret = ltc4162l_get_vbat(info, LTC4162L_VBAT_FILT, &val);
+ ret = chip_info->get_vbat(info, LTC4162L_VBAT_FILT, &val);
if (ret)
return ret;
@@ -589,7 +856,8 @@ static ssize_t force_telemetry_show(struct device *dev,
if (ret)
return ret;
- return sysfs_emit(buf, "%u\n", regval & BIT(2) ? 1 : 0);
+ return sysfs_emit(buf, "%u\n", regval &
+ info->chip_info->telemetry_mask ? 1 : 0);
}
static ssize_t force_telemetry_store(struct device *dev,
@@ -607,7 +875,8 @@ static ssize_t force_telemetry_store(struct device *dev,
return ret;
ret = regmap_update_bits(info->regmap, LTC4162L_CONFIG_BITS_REG,
- BIT(2), value ? BIT(2) : 0);
+ info->chip_info->telemetry_mask,
+ value ? info->chip_info->telemetry_mask : 0);
if (ret < 0)
return ret;
@@ -681,6 +950,7 @@ static int ltc4162l_get_property(struct power_supply *psy,
union power_supply_propval *val)
{
struct ltc4162l_info *info = power_supply_get_drvdata(psy);
+ const struct ltc4162l_chip_info *chip_info = info->chip_info;
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
@@ -702,15 +972,13 @@ static int ltc4162l_get_property(struct power_supply *psy,
return ltc4162l_get_icharge(info,
LTC4162L_CHARGE_CURRENT_SETTING, val);
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
- return ltc4162l_get_vcharge(info,
- LTC4162L_VCHARGE_DAC, val);
+ return chip_info->get_vcharge(info, LTC4162L_VCHARGE_DAC, val);
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
- return ltc4162l_get_vcharge(info,
- LTC4162L_VCHARGE_SETTING, val);
+ return chip_info->get_vcharge(info, LTC4162L_VCHARGE_SETTING, val);
case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
return ltc4162l_get_iin_limit_dac(info, val);
case POWER_SUPPLY_PROP_TEMP:
- return ltc4162l_get_die_temp(info, val);
+ return chip_info->get_die_temp(info, val);
case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
return ltc4162l_get_term_current(info, val);
default:
@@ -772,7 +1040,6 @@ static enum power_supply_property ltc4162l_properties[] = {
};
static const struct power_supply_desc ltc4162l_desc = {
- .name = "ltc4162-l",
.type = POWER_SUPPLY_TYPE_MAINS,
.properties = ltc4162l_properties,
.num_properties = ARRAY_SIZE(ltc4162l_properties),
@@ -781,6 +1048,50 @@ static const struct power_supply_desc ltc4162l_desc = {
.property_is_writeable = ltc4162l_property_is_writeable,
};
+static const struct ltc4162l_chip_info ltc4162l_chip_info = {
+ .name = "ltc4162-l",
+ .get_vbat = ltc4162l_get_vbat,
+ .get_vcharge = ltc4162l_get_vcharge,
+ .set_vcharge = ltc4162l_set_vcharge,
+ .get_die_temp = ltc4162l_get_die_temp,
+ .ibat_resolution_pv = 1466000,
+ .vin_resolution_uv = 1649,
+ .telemetry_mask = BIT(2),
+};
+
+static const struct ltc4162l_chip_info ltc4162f_chip_info = {
+ .name = "ltc4162-f",
+ .get_vbat = ltc4162l_get_vbat,
+ .get_vcharge = ltc4162l_get_vcharge,
+ .set_vcharge = ltc4162l_set_vcharge,
+ .get_die_temp = ltc4162l_get_die_temp,
+ .ibat_resolution_pv = 1466000,
+ .vin_resolution_uv = 1649,
+ .telemetry_mask = BIT(2),
+};
+
+static const struct ltc4162l_chip_info ltc4162s_chip_info = {
+ .name = "ltc4162-s",
+ .get_vbat = ltc4162l_get_vbat,
+ .get_vcharge = ltc4162l_get_vcharge,
+ .set_vcharge = ltc4162l_set_vcharge,
+ .get_die_temp = ltc4162l_get_die_temp,
+ .ibat_resolution_pv = 1466000,
+ .vin_resolution_uv = 1649,
+ .telemetry_mask = BIT(2),
+};
+
+static const struct ltc4162l_chip_info ltc4015_chip_info = {
+ .name = "ltc4015",
+ .get_vbat = ltc4015_get_vbat,
+ .get_vcharge = ltc4015_get_vcharge,
+ .set_vcharge = ltc4015_set_vcharge,
+ .get_die_temp = ltc4015_get_die_temp,
+ .ibat_resolution_pv = 1464870,
+ .vin_resolution_uv = 1648,
+ .telemetry_mask = BIT(4),
+};
+
static bool ltc4162l_is_writeable_reg(struct device *dev, unsigned int reg)
{
/* all registers up to this one are writeable */
@@ -825,6 +1136,8 @@ static int ltc4162l_probe(struct i2c_client *client)
struct device *dev = &client->dev;
struct ltc4162l_info *info;
struct power_supply_config ltc4162l_config = {};
+ struct power_supply_desc *desc;
+ const struct ltc4162l_chip_info *chip_info;
u32 value;
int ret;
@@ -839,6 +1152,12 @@ static int ltc4162l_probe(struct i2c_client *client)
info->client = client;
i2c_set_clientdata(client, info);
+ chip_info = i2c_get_match_data(client);
+ if (!chip_info)
+ return -ENODEV;
+
+ info->chip_info = chip_info;
+
info->regmap = devm_regmap_init_i2c(client, &ltc4162l_regmap_config);
if (IS_ERR(info->regmap)) {
dev_err(dev, "Failed to initialize register map\n");
@@ -870,8 +1189,15 @@ static int ltc4162l_probe(struct i2c_client *client)
ltc4162l_config.drv_data = info;
ltc4162l_config.attr_grp = ltc4162l_attr_groups;
- info->charger = devm_power_supply_register(dev, &ltc4162l_desc,
- &ltc4162l_config);
+ /* Duplicate the default descriptor to set name based on chip_info. */
+ desc = devm_kmemdup(dev, &ltc4162l_desc,
+ sizeof(struct power_supply_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ desc->name = chip_info->name;
+
+ info->charger = devm_power_supply_register(dev, desc, &ltc4162l_config);
if (IS_ERR(info->charger)) {
dev_err(dev, "Failed to register charger\n");
return PTR_ERR(info->charger);
@@ -903,14 +1229,20 @@ static void ltc4162l_alert(struct i2c_client *client,
}
static const struct i2c_device_id ltc4162l_i2c_id_table[] = {
- { "ltc4162-l" },
+ { "ltc4015", (kernel_ulong_t)&ltc4015_chip_info },
+ { "ltc4162-f", (kernel_ulong_t)&ltc4162f_chip_info },
+ { "ltc4162-l", (kernel_ulong_t)&ltc4162l_chip_info },
+ { "ltc4162-s", (kernel_ulong_t)&ltc4162s_chip_info },
{ }
};
MODULE_DEVICE_TABLE(i2c, ltc4162l_i2c_id_table);
static const struct of_device_id ltc4162l_of_match[] __maybe_unused = {
- { .compatible = "lltc,ltc4162-l", },
- { },
+ { .compatible = "lltc,ltc4015", .data = &ltc4015_chip_info },
+ { .compatible = "lltc,ltc4162-f", .data = &ltc4162f_chip_info },
+ { .compatible = "lltc,ltc4162-l", .data = &ltc4162l_chip_info },
+ { .compatible = "lltc,ltc4162-s", .data = &ltc4162s_chip_info },
+ { }
};
MODULE_DEVICE_TABLE(of, ltc4162l_of_match);
diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
index 496c3e1f2ee6..655b3f25dbd7 100644
--- a/drivers/power/supply/max17042_battery.c
+++ b/drivers/power/supply/max17042_battery.c
@@ -16,6 +16,7 @@
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/mod_devicetable.h>
#include <linux/power_supply.h>
@@ -52,13 +53,14 @@
#define MAX17042_VMAX_TOLERANCE 50 /* 50 mV */
struct max17042_chip {
- struct i2c_client *client;
+ struct device *dev;
struct regmap *regmap;
struct power_supply *battery;
enum max170xx_chip_type chip_type;
struct max17042_platform_data *pdata;
struct work_struct work;
int init_complete;
+ int irq;
};
static enum power_supply_property max17042_battery_props[] = {
@@ -573,11 +575,11 @@ static inline int max17042_model_data_compare(struct max17042_chip *chip,
int i;
if (memcmp(data1, data2, size)) {
- dev_err(&chip->client->dev, "%s compare failed\n", __func__);
+ dev_err(chip->dev, "%s compare failed\n", __func__);
for (i = 0; i < size; i++)
- dev_info(&chip->client->dev, "0x%x, 0x%x",
+ dev_info(chip->dev, "0x%x, 0x%x",
data1[i], data2[i]);
- dev_info(&chip->client->dev, "\n");
+ dev_info(chip->dev, "\n");
return -EINVAL;
}
return 0;
@@ -812,14 +814,14 @@ static int max17042_init_chip(struct max17042_chip *chip)
/* write cell characterization data */
ret = max17042_init_model(chip);
if (ret) {
- dev_err(&chip->client->dev, "%s init failed\n",
+ dev_err(chip->dev, "%s init failed\n",
__func__);
return -EIO;
}
ret = max17042_verify_model_lock(chip);
if (ret) {
- dev_err(&chip->client->dev, "%s lock verify failed\n",
+ dev_err(chip->dev, "%s lock verify failed\n",
__func__);
return -EIO;
}
@@ -875,7 +877,7 @@ static irqreturn_t max17042_thread_handler(int id, void *dev)
return IRQ_HANDLED;
if ((val & STATUS_SMN_BIT) || (val & STATUS_SMX_BIT)) {
- dev_dbg(&chip->client->dev, "SOC threshold INTR\n");
+ dev_dbg(chip->dev, "SOC threshold INTR\n");
max17042_set_soc_threshold(chip, 1);
}
@@ -907,7 +909,7 @@ static void max17042_init_worker(struct work_struct *work)
static struct max17042_platform_data *
max17042_get_of_pdata(struct max17042_chip *chip)
{
- struct device *dev = &chip->client->dev;
+ struct device *dev = chip->dev;
struct device_node *np = dev->of_node;
u32 prop;
struct max17042_platform_data *pdata;
@@ -949,7 +951,7 @@ static struct max17042_reg_data max17047_default_pdata_init_regs[] = {
static struct max17042_platform_data *
max17042_get_default_pdata(struct max17042_chip *chip)
{
- struct device *dev = &chip->client->dev;
+ struct device *dev = chip->dev;
struct max17042_platform_data *pdata;
int ret, misc_cfg;
@@ -990,7 +992,7 @@ max17042_get_default_pdata(struct max17042_chip *chip)
static struct max17042_platform_data *
max17042_get_pdata(struct max17042_chip *chip)
{
- struct device *dev = &chip->client->dev;
+ struct device *dev = chip->dev;
#ifdef CONFIG_OF
if (dev->of_node)
@@ -1003,6 +1005,7 @@ max17042_get_pdata(struct max17042_chip *chip)
}
static const struct regmap_config max17042_regmap_config = {
+ .name = "max17042",
.reg_bits = 8,
.val_bits = 16,
.val_format_endian = REGMAP_ENDIAN_NATIVE,
@@ -1029,14 +1032,12 @@ static const struct power_supply_desc max17042_no_current_sense_psy_desc = {
.num_properties = ARRAY_SIZE(max17042_battery_props) - 2,
};
-static int max17042_probe(struct i2c_client *client)
+static int max17042_probe(struct i2c_client *client, struct device *dev, int irq,
+ enum max170xx_chip_type chip_type)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct i2c_adapter *adapter = client->adapter;
const struct power_supply_desc *max17042_desc = &max17042_psy_desc;
struct power_supply_config psy_cfg = {};
- const struct acpi_device_id *acpi_id = NULL;
- struct device *dev = &client->dev;
struct max17042_chip *chip;
int ret;
int i;
@@ -1045,33 +1046,25 @@ static int max17042_probe(struct i2c_client *client)
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
return -EIO;
- chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
- chip->client = client;
- if (id) {
- chip->chip_type = id->driver_data;
- } else {
- acpi_id = acpi_match_device(dev->driver->acpi_match_table, dev);
- if (!acpi_id)
- return -ENODEV;
-
- chip->chip_type = acpi_id->driver_data;
- }
+ chip->dev = dev;
+ chip->chip_type = chip_type;
chip->regmap = devm_regmap_init_i2c(client, &max17042_regmap_config);
if (IS_ERR(chip->regmap)) {
- dev_err(&client->dev, "Failed to initialize regmap\n");
+ dev_err(dev, "Failed to initialize regmap\n");
return -EINVAL;
}
chip->pdata = max17042_get_pdata(chip);
if (!chip->pdata) {
- dev_err(&client->dev, "no platform data provided\n");
+ dev_err(dev, "no platform data provided\n");
return -EINVAL;
}
- i2c_set_clientdata(client, chip);
+ dev_set_drvdata(dev, chip);
psy_cfg.drv_data = chip;
psy_cfg.of_node = dev->of_node;
@@ -1095,24 +1088,17 @@ static int max17042_probe(struct i2c_client *client)
regmap_write(chip->regmap, MAX17042_LearnCFG, 0x0007);
}
- chip->battery = devm_power_supply_register(&client->dev, max17042_desc,
+ chip->battery = devm_power_supply_register(dev, max17042_desc,
&psy_cfg);
if (IS_ERR(chip->battery)) {
- dev_err(&client->dev, "failed: power supply register\n");
+ dev_err(dev, "failed: power supply register\n");
return PTR_ERR(chip->battery);
}
- if (client->irq) {
- unsigned int flags = IRQF_ONESHOT;
-
- /*
- * On ACPI systems the IRQ may be handled by ACPI-event code,
- * so we need to share (if the ACPI code is willing to share).
- */
- if (acpi_id)
- flags |= IRQF_SHARED | IRQF_PROBE_SHARED;
+ if (irq) {
+ unsigned int flags = IRQF_ONESHOT | IRQF_SHARED | IRQF_PROBE_SHARED;
- ret = devm_request_threaded_irq(&client->dev, client->irq,
+ ret = devm_request_threaded_irq(dev, irq,
NULL,
max17042_thread_handler, flags,
chip->battery->desc->name,
@@ -1123,18 +1109,20 @@ static int max17042_probe(struct i2c_client *client)
CFG_ALRT_BIT_ENBL);
max17042_set_soc_threshold(chip, 1);
} else {
- client->irq = 0;
+ irq = 0;
if (ret != -EBUSY)
- dev_err(&client->dev, "Failed to get IRQ\n");
+ dev_err(dev, "Failed to get IRQ\n");
}
}
/* Not able to update the charge threshold when exceeded? -> disable */
- if (!client->irq)
+ if (!irq)
regmap_write(chip->regmap, MAX17042_SALRT_Th, 0xff00);
+ chip->irq = irq;
+
regmap_read(chip->regmap, MAX17042_STATUS, &val);
if (val & STATUS_POR_BIT) {
- ret = devm_work_autocancel(&client->dev, &chip->work,
+ ret = devm_work_autocancel(dev, &chip->work,
max17042_init_worker);
if (ret)
return ret;
@@ -1146,6 +1134,44 @@ static int max17042_probe(struct i2c_client *client)
return 0;
}
+static int max17042_i2c_probe(struct i2c_client *client)
+{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
+ const struct acpi_device_id *acpi_id = NULL;
+ struct device *dev = &client->dev;
+ enum max170xx_chip_type chip_type;
+
+ if (id) {
+ chip_type = id->driver_data;
+ } else {
+ acpi_id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!acpi_id)
+ return -ENODEV;
+
+ chip_type = acpi_id->driver_data;
+ }
+
+ return max17042_probe(client, dev, client->irq, chip_type);
+}
+
+static int max17042_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct i2c_client *i2c;
+ const struct platform_device_id *id;
+ int irq;
+
+ i2c = to_i2c_client(pdev->dev.parent);
+ if (!i2c)
+ return -EINVAL;
+
+ dev->of_node = dev->parent->of_node;
+ id = platform_get_device_id(pdev);
+ irq = platform_get_irq(pdev, 0);
+
+ return max17042_probe(i2c, dev, irq, id->driver_data);
+}
+
#ifdef CONFIG_PM_SLEEP
static int max17042_suspend(struct device *dev)
{
@@ -1155,9 +1181,9 @@ static int max17042_suspend(struct device *dev)
* disable the irq and enable irq_wake
* capability to the interrupt line.
*/
- if (chip->client->irq) {
- disable_irq(chip->client->irq);
- enable_irq_wake(chip->client->irq);
+ if (chip->irq) {
+ disable_irq(chip->irq);
+ enable_irq_wake(chip->irq);
}
return 0;
@@ -1167,9 +1193,9 @@ static int max17042_resume(struct device *dev)
{
struct max17042_chip *chip = dev_get_drvdata(dev);
- if (chip->client->irq) {
- disable_irq_wake(chip->client->irq);
- enable_irq(chip->client->irq);
+ if (chip->irq) {
+ disable_irq_wake(chip->irq);
+ enable_irq(chip->irq);
/* re-program the SOC thresholds to 1% change */
max17042_set_soc_threshold(chip, 1);
}
@@ -1190,12 +1216,28 @@ MODULE_DEVICE_TABLE(acpi, max17042_acpi_match);
#endif
#ifdef CONFIG_OF
-static const struct of_device_id max17042_dt_match[] = {
- { .compatible = "maxim,max17042" },
- { .compatible = "maxim,max17047" },
- { .compatible = "maxim,max17050" },
- { .compatible = "maxim,max17055" },
- { .compatible = "maxim,max77849-battery" },
+/*
+ * Device may be instantiated through parent MFD device and device matching is done
+ * through platform_device_id.
+ *
+ * However if device's DT node contains proper clock compatible and driver is
+ * built as a module, then the *module* matching will be done trough DT aliases.
+ * This requires of_device_id table. In the same time this will not change the
+ * actual *device* matching so do not add .of_match_table.
+ */
+static const struct of_device_id max17042_dt_match[] __used = {
+ { .compatible = "maxim,max17042",
+ .data = (void *) MAXIM_DEVICE_TYPE_MAX17042 },
+ { .compatible = "maxim,max17047",
+ .data = (void *) MAXIM_DEVICE_TYPE_MAX17047 },
+ { .compatible = "maxim,max17050",
+ .data = (void *) MAXIM_DEVICE_TYPE_MAX17050 },
+ { .compatible = "maxim,max17055",
+ .data = (void *) MAXIM_DEVICE_TYPE_MAX17055 },
+ { .compatible = "maxim,max77705-battery",
+ .data = (void *) MAXIM_DEVICE_TYPE_MAX17047 },
+ { .compatible = "maxim,max77849-battery",
+ .data = (void *) MAXIM_DEVICE_TYPE_MAX17047 },
{ },
};
MODULE_DEVICE_TABLE(of, max17042_dt_match);
@@ -1211,6 +1253,17 @@ static const struct i2c_device_id max17042_id[] = {
};
MODULE_DEVICE_TABLE(i2c, max17042_id);
+static const struct platform_device_id max17042_platform_id[] = {
+ { "max17042", MAXIM_DEVICE_TYPE_MAX17042 },
+ { "max17047", MAXIM_DEVICE_TYPE_MAX17047 },
+ { "max17050", MAXIM_DEVICE_TYPE_MAX17050 },
+ { "max17055", MAXIM_DEVICE_TYPE_MAX17055 },
+ { "max77705-battery", MAXIM_DEVICE_TYPE_MAX17047 },
+ { "max77849-battery", MAXIM_DEVICE_TYPE_MAX17047 },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, max17042_platform_id);
+
static struct i2c_driver max17042_i2c_driver = {
.driver = {
.name = "max17042",
@@ -1218,10 +1271,44 @@ static struct i2c_driver max17042_i2c_driver = {
.of_match_table = of_match_ptr(max17042_dt_match),
.pm = &max17042_pm_ops,
},
- .probe = max17042_probe,
+ .probe = max17042_i2c_probe,
.id_table = max17042_id,
};
-module_i2c_driver(max17042_i2c_driver);
+
+static struct platform_driver max17042_platform_driver = {
+ .driver = {
+ .name = "max17042",
+ .acpi_match_table = ACPI_PTR(max17042_acpi_match),
+ .pm = &max17042_pm_ops,
+ },
+ .probe = max17042_platform_probe,
+ .id_table = max17042_platform_id,
+};
+
+static int __init max17042_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&max17042_platform_driver);
+ if (ret)
+ return ret;
+
+ ret = i2c_add_driver(&max17042_i2c_driver);
+ if (ret) {
+ platform_driver_unregister(&max17042_platform_driver);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(max17042_init);
+
+static void __exit max17042_exit(void)
+{
+ i2c_del_driver(&max17042_i2c_driver);
+ platform_driver_unregister(&max17042_platform_driver);
+}
+module_exit(max17042_exit);
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
MODULE_DESCRIPTION("MAX17042 Fuel Gauge");
diff --git a/drivers/power/supply/max1720x_battery.c b/drivers/power/supply/max1720x_battery.c
index 33105419e242..11580e414713 100644
--- a/drivers/power/supply/max1720x_battery.c
+++ b/drivers/power/supply/max1720x_battery.c
@@ -16,6 +16,11 @@
#include <linux/unaligned.h>
+/* SBS compliant registers */
+#define MAX172XX_TEMP1 0x34
+#define MAX172XX_INT_TEMP 0x35
+#define MAX172XX_TEMP2 0x3B
+
/* Nonvolatile registers */
#define MAX1720X_NXTABLE0 0x80
#define MAX1720X_NRSENSE 0xCF /* RSense in 10^-5 Ohm */
@@ -29,6 +34,7 @@
#define MAX172XX_TEMP 0x08 /* Temperature */
#define MAX172XX_CURRENT 0x0A /* Actual current */
#define MAX172XX_AVG_CURRENT 0x0B /* Average current */
+#define MAX172XX_FULL_CAP 0x10 /* Calculated full capacity */
#define MAX172XX_TTE 0x11 /* Time to empty */
#define MAX172XX_AVG_TA 0x16 /* Average temperature */
#define MAX172XX_CYCLES 0x17
@@ -112,11 +118,15 @@ static const struct regmap_config max1720x_regmap_cfg = {
};
static const struct regmap_range max1720x_nvmem_allow[] = {
+ regmap_reg_range(MAX172XX_TEMP1, MAX172XX_INT_TEMP),
+ regmap_reg_range(MAX172XX_TEMP2, MAX172XX_TEMP2),
regmap_reg_range(MAX1720X_NXTABLE0, MAX1720X_NDEVICE_NAME4),
};
static const struct regmap_range max1720x_nvmem_deny[] = {
- regmap_reg_range(0x00, 0x7F),
+ regmap_reg_range(0x00, 0x33),
+ regmap_reg_range(0x36, 0x3A),
+ regmap_reg_range(0x3C, 0x7F),
regmap_reg_range(0xE0, 0xFF),
};
@@ -250,6 +260,7 @@ static const enum power_supply_property max1720x_battery_props[] = {
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
};
@@ -362,6 +373,10 @@ static int max1720x_battery_get_property(struct power_supply *psy,
ret = regmap_read(info->regmap, MAX172XX_AVG_CURRENT, &reg_val);
val->intval = max172xx_current_to_voltage(reg_val) / info->rsense;
break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ ret = regmap_read(info->regmap, MAX172XX_FULL_CAP, &reg_val);
+ val->intval = max172xx_capacity_to_ps(reg_val);
+ break;
case POWER_SUPPLY_PROP_MODEL_NAME:
ret = regmap_read(info->regmap, MAX172XX_DEV_NAME, &reg_val);
reg_val = FIELD_GET(MAX172XX_DEV_NAME_TYPE_MASK, reg_val);
@@ -382,6 +397,54 @@ static int max1720x_battery_get_property(struct power_supply *psy,
return ret;
}
+static int max1720x_read_temp(struct device *dev, u8 reg, char *buf)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct max1720x_device_info *info = power_supply_get_drvdata(psy);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(info->regmap_nv, reg, &val);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Temperature in degrees Celsius starting at absolute zero, -273C or
+ * 0K with an LSb of 0.1C
+ */
+ return sysfs_emit(buf, "%d\n", val - 2730);
+}
+
+static ssize_t temp_ain1_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return max1720x_read_temp(dev, MAX172XX_TEMP1, buf);
+}
+
+static ssize_t temp_ain2_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return max1720x_read_temp(dev, MAX172XX_TEMP2, buf);
+}
+
+static ssize_t temp_int_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return max1720x_read_temp(dev, MAX172XX_INT_TEMP, buf);
+}
+
+static DEVICE_ATTR_RO(temp_ain1);
+static DEVICE_ATTR_RO(temp_ain2);
+static DEVICE_ATTR_RO(temp_int);
+
+static struct attribute *max1720x_attrs[] = {
+ &dev_attr_temp_ain1.attr,
+ &dev_attr_temp_ain2.attr,
+ &dev_attr_temp_int.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(max1720x);
+
static
int max1720x_nvmem_reg_read(void *priv, unsigned int off, void *val, size_t len)
{
@@ -482,6 +545,7 @@ static int max1720x_probe(struct i2c_client *client)
psy_cfg.drv_data = info;
psy_cfg.fwnode = dev_fwnode(dev);
+ psy_cfg.attr_grp = max1720x_groups;
i2c_set_clientdata(client, info);
info->regmap = devm_regmap_init_i2c(client, &max1720x_regmap_cfg);
if (IS_ERR(info->regmap))
diff --git a/drivers/power/supply/mm8013.c b/drivers/power/supply/mm8013.c
index 5bcfaeeda3db..4adf2acc2779 100644
--- a/drivers/power/supply/mm8013.c
+++ b/drivers/power/supply/mm8013.c
@@ -90,7 +90,7 @@ static int mm8013_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
- struct mm8013_chip *chip = psy->drv_data;
+ struct mm8013_chip *chip = power_supply_get_drvdata(psy);
int ret = 0;
u32 regval;
diff --git a/drivers/power/supply/olpc_battery.c b/drivers/power/supply/olpc_battery.c
index 9f60094a5599..849f63e89ba0 100644
--- a/drivers/power/supply/olpc_battery.c
+++ b/drivers/power/supply/olpc_battery.c
@@ -527,7 +527,7 @@ static enum power_supply_property olpc_xo15_bat_props[] = {
#define EEPROM_SIZE (EEPROM_END - EEPROM_START)
static ssize_t olpc_bat_eeprom_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off, size_t count)
+ const struct bin_attribute *attr, char *buf, loff_t off, size_t count)
{
uint8_t ec_byte;
int ret;
@@ -547,13 +547,13 @@ static ssize_t olpc_bat_eeprom_read(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute olpc_bat_eeprom = {
+static const struct bin_attribute olpc_bat_eeprom = {
.attr = {
.name = "eeprom",
.mode = S_IRUGO,
},
.size = EEPROM_SIZE,
- .read = olpc_bat_eeprom_read,
+ .read_new = olpc_bat_eeprom_read,
};
/* Allow userspace to see the specific error value pulled from the EC */
@@ -584,15 +584,14 @@ static struct attribute *olpc_bat_sysfs_attrs[] = {
NULL
};
-static struct bin_attribute *olpc_bat_sysfs_bin_attrs[] = {
+static const struct bin_attribute *const olpc_bat_sysfs_bin_attrs[] = {
&olpc_bat_eeprom,
NULL
};
static const struct attribute_group olpc_bat_sysfs_group = {
.attrs = olpc_bat_sysfs_attrs,
- .bin_attrs = olpc_bat_sysfs_bin_attrs,
-
+ .bin_attrs_new = olpc_bat_sysfs_bin_attrs,
};
static const struct attribute_group *olpc_bat_sysfs_groups[] = {
diff --git a/drivers/power/supply/power_supply.h b/drivers/power/supply/power_supply.h
index 7434a6f24775..8f6a2d44b996 100644
--- a/drivers/power/supply/power_supply.h
+++ b/drivers/power/supply/power_supply.h
@@ -9,24 +9,55 @@
* Modified: 2004, Oct Szabolcs Gyurko
*/
+#include <linux/lockdep.h>
+
struct device;
struct device_type;
struct power_supply;
extern int power_supply_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp);
+extern bool power_supply_has_property(struct power_supply *psy,
+ enum power_supply_property psp);
+extern bool power_supply_ext_has_property(const struct power_supply_ext *ext,
+ enum power_supply_property psp);
+
+struct power_supply_ext_registration {
+ struct list_head list_head;
+ const struct power_supply_ext *ext;
+ struct device *dev;
+ void *data;
+};
+
+/* Make sure that the macro is a single expression */
+#define power_supply_for_each_extension(pos, psy) \
+ if ( ({ lockdep_assert_held(&(psy)->extensions_sem); 0; }) ) \
+ ; \
+ else \
+ list_for_each_entry(pos, &(psy)->extensions, list_head) \
#ifdef CONFIG_SYSFS
extern void __init power_supply_init_attrs(void);
extern int power_supply_uevent(const struct device *dev, struct kobj_uevent_env *env);
extern const struct attribute_group *power_supply_attr_groups[];
+extern int power_supply_sysfs_add_extension(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ struct device *dev);
+extern void power_supply_sysfs_remove_extension(struct power_supply *psy,
+ const struct power_supply_ext *ext);
#else
static inline void power_supply_init_attrs(void) {}
#define power_supply_attr_groups NULL
#define power_supply_uevent NULL
+static inline int power_supply_sysfs_add_extension(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ struct device *dev)
+{ return 0; }
+static inline void power_supply_sysfs_remove_extension(struct power_supply *psy,
+ const struct power_supply_ext *ext) {}
#endif /* CONFIG_SYSFS */
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 16085eff0084..d0bb52a7a036 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -66,21 +66,19 @@ static bool __power_supply_is_supplied_by(struct power_supply *supplier,
return false;
}
-static int __power_supply_changed_work(struct device *dev, void *data)
+static int __power_supply_changed_work(struct power_supply *pst, void *data)
{
struct power_supply *psy = data;
- struct power_supply *pst = dev_get_drvdata(dev);
- if (__power_supply_is_supplied_by(psy, pst)) {
- if (pst->desc->external_power_changed)
- pst->desc->external_power_changed(pst);
- }
+ if (__power_supply_is_supplied_by(psy, pst))
+ power_supply_external_power_changed(pst);
return 0;
}
static void power_supply_changed_work(struct work_struct *work)
{
+ int ret;
unsigned long flags;
struct power_supply *psy = container_of(work, struct power_supply,
changed_work);
@@ -88,6 +86,16 @@ static void power_supply_changed_work(struct work_struct *work)
dev_dbg(&psy->dev, "%s\n", __func__);
spin_lock_irqsave(&psy->changed_lock, flags);
+
+ if (unlikely(psy->update_groups)) {
+ psy->update_groups = false;
+ spin_unlock_irqrestore(&psy->changed_lock, flags);
+ ret = sysfs_update_groups(&psy->dev.kobj, power_supply_dev_type.groups);
+ if (ret)
+ dev_warn(&psy->dev, "failed to update sysfs groups: %pe\n", ERR_PTR(ret));
+ spin_lock_irqsave(&psy->changed_lock, flags);
+ }
+
/*
* Check 'changed' here to avoid issues due to race between
* power_supply_changed() and this routine. In worst case
@@ -98,7 +106,7 @@ static void power_supply_changed_work(struct work_struct *work)
if (likely(psy->changed)) {
psy->changed = false;
spin_unlock_irqrestore(&psy->changed_lock, flags);
- power_supply_for_each_device(psy, __power_supply_changed_work);
+ power_supply_for_each_psy(psy, __power_supply_changed_work);
power_supply_update_leds(psy);
blocking_notifier_call_chain(&power_supply_notifier,
PSY_EVENT_PROP_CHANGED, psy);
@@ -116,11 +124,29 @@ static void power_supply_changed_work(struct work_struct *work)
spin_unlock_irqrestore(&psy->changed_lock, flags);
}
-int power_supply_for_each_device(void *data, int (*fn)(struct device *dev, void *data))
+struct psy_for_each_psy_cb_data {
+ int (*fn)(struct power_supply *psy, void *data);
+ void *data;
+};
+
+static int psy_for_each_psy_cb(struct device *dev, void *data)
{
- return class_for_each_device(&power_supply_class, NULL, data, fn);
+ struct psy_for_each_psy_cb_data *cb_data = data;
+ struct power_supply *psy = dev_to_psy(dev);
+
+ return cb_data->fn(psy, cb_data->data);
}
-EXPORT_SYMBOL_GPL(power_supply_for_each_device);
+
+int power_supply_for_each_psy(void *data, int (*fn)(struct power_supply *psy, void *data))
+{
+ struct psy_for_each_psy_cb_data cb_data = {
+ .fn = fn,
+ .data = data,
+ };
+
+ return class_for_each_device(&power_supply_class, NULL, &cb_data, psy_for_each_psy_cb);
+}
+EXPORT_SYMBOL_GPL(power_supply_for_each_psy);
void power_supply_changed(struct power_supply *psy)
{
@@ -166,11 +192,10 @@ static void power_supply_deferred_register_work(struct work_struct *work)
}
#ifdef CONFIG_OF
-static int __power_supply_populate_supplied_from(struct device *dev,
+static int __power_supply_populate_supplied_from(struct power_supply *epsy,
void *data)
{
struct power_supply *psy = data;
- struct power_supply *epsy = dev_get_drvdata(dev);
struct device_node *np;
int i = 0;
@@ -197,20 +222,19 @@ static int power_supply_populate_supplied_from(struct power_supply *psy)
{
int error;
- error = power_supply_for_each_device(psy, __power_supply_populate_supplied_from);
+ error = power_supply_for_each_psy(psy, __power_supply_populate_supplied_from);
dev_dbg(&psy->dev, "%s %d\n", __func__, error);
return error;
}
-static int __power_supply_find_supply_from_node(struct device *dev,
+static int __power_supply_find_supply_from_node(struct power_supply *epsy,
void *data)
{
struct device_node *np = data;
- struct power_supply *epsy = dev_get_drvdata(dev);
- /* returning non-zero breaks out of power_supply_for_each_device loop */
+ /* returning non-zero breaks out of power_supply_for_each_psy loop */
if (epsy->of_node == np)
return 1;
@@ -222,16 +246,16 @@ static int power_supply_find_supply_from_node(struct device_node *supply_node)
int error;
/*
- * power_supply_for_each_device() either returns its own errors or values
+ * power_supply_for_each_psy() either returns its own errors or values
* returned by __power_supply_find_supply_from_node().
*
* __power_supply_find_supply_from_node() will return 0 (no match)
* or 1 (match).
*
- * We return 0 if power_supply_for_each_device() returned 1, -EPROBE_DEFER if
+ * We return 0 if power_supply_for_each_psy() returned 1, -EPROBE_DEFER if
* it returned 0, or error as returned by it.
*/
- error = power_supply_for_each_device(supply_node, __power_supply_find_supply_from_node);
+ error = power_supply_for_each_psy(supply_node, __power_supply_find_supply_from_node);
return error ? (error == 1 ? 0 : error) : -EPROBE_DEFER;
}
@@ -316,10 +340,9 @@ struct psy_am_i_supplied_data {
unsigned int count;
};
-static int __power_supply_am_i_supplied(struct device *dev, void *_data)
+static int __power_supply_am_i_supplied(struct power_supply *epsy, void *_data)
{
union power_supply_propval ret = {0,};
- struct power_supply *epsy = dev_get_drvdata(dev);
struct psy_am_i_supplied_data *data = _data;
if (__power_supply_is_supplied_by(epsy, data->psy)) {
@@ -337,7 +360,7 @@ int power_supply_am_i_supplied(struct power_supply *psy)
struct psy_am_i_supplied_data data = { psy, 0 };
int error;
- error = power_supply_for_each_device(&data, __power_supply_am_i_supplied);
+ error = power_supply_for_each_psy(&data, __power_supply_am_i_supplied);
dev_dbg(&psy->dev, "%s count %u err %d\n", __func__, data.count, error);
@@ -348,10 +371,9 @@ int power_supply_am_i_supplied(struct power_supply *psy)
}
EXPORT_SYMBOL_GPL(power_supply_am_i_supplied);
-static int __power_supply_is_system_supplied(struct device *dev, void *data)
+static int __power_supply_is_system_supplied(struct power_supply *psy, void *data)
{
union power_supply_propval ret = {0,};
- struct power_supply *psy = dev_get_drvdata(dev);
unsigned int *count = data;
if (!psy->desc->get_property(psy, POWER_SUPPLY_PROP_SCOPE, &ret))
@@ -372,7 +394,7 @@ int power_supply_is_system_supplied(void)
int error;
unsigned int count = 0;
- error = power_supply_for_each_device(&count, __power_supply_is_system_supplied);
+ error = power_supply_for_each_psy(&count, __power_supply_is_system_supplied);
/*
* If no system scope power class device was found at all, most probably we
@@ -391,9 +413,8 @@ struct psy_get_supplier_prop_data {
union power_supply_propval *val;
};
-static int __power_supply_get_supplier_property(struct device *dev, void *_data)
+static int __power_supply_get_supplier_property(struct power_supply *epsy, void *_data)
{
- struct power_supply *epsy = dev_get_drvdata(dev);
struct psy_get_supplier_prop_data *data = _data;
if (__power_supply_is_supplied_by(epsy, data->psy))
@@ -418,7 +439,7 @@ int power_supply_get_property_from_supplier(struct power_supply *psy,
* This function is not intended for use with a supply with multiple
* suppliers, we simply pick the first supply to report the psp.
*/
- ret = power_supply_for_each_device(&data, __power_supply_get_supplier_property);
+ ret = power_supply_for_each_psy(&data, __power_supply_get_supplier_property);
if (ret < 0)
return ret;
if (ret == 0)
@@ -444,7 +465,7 @@ EXPORT_SYMBOL_GPL(power_supply_set_battery_charged);
static int power_supply_match_device_by_name(struct device *dev, const void *data)
{
const char *name = data;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
return strcmp(psy->desc->name, name) == 0;
}
@@ -467,7 +488,7 @@ struct power_supply *power_supply_get_by_name(const char *name)
power_supply_match_device_by_name);
if (dev) {
- psy = dev_get_drvdata(dev);
+ psy = dev_to_psy(dev);
atomic_inc(&psy->use_cnt);
}
@@ -524,7 +545,7 @@ struct power_supply *power_supply_get_by_phandle(struct device_node *np,
of_node_put(power_supply_np);
if (dev) {
- psy = dev_get_drvdata(dev);
+ psy = dev_to_psy(dev);
atomic_inc(&psy->use_cnt);
}
@@ -1180,8 +1201,8 @@ bool power_supply_battery_bti_in_range(struct power_supply_battery_info *info,
}
EXPORT_SYMBOL_GPL(power_supply_battery_bti_in_range);
-static bool psy_has_property(const struct power_supply_desc *psy_desc,
- enum power_supply_property psp)
+static bool psy_desc_has_property(const struct power_supply_desc *psy_desc,
+ enum power_supply_property psp)
{
bool found = false;
int i;
@@ -1196,17 +1217,57 @@ static bool psy_has_property(const struct power_supply_desc *psy_desc,
return found;
}
+bool power_supply_ext_has_property(const struct power_supply_ext *psy_ext,
+ enum power_supply_property psp)
+{
+ int i;
+
+ for (i = 0; i < psy_ext->num_properties; i++)
+ if (psy_ext->properties[i] == psp)
+ return true;
+
+ return false;
+}
+
+bool power_supply_has_property(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ struct power_supply_ext_registration *reg;
+
+ if (psy_desc_has_property(psy->desc, psp))
+ return true;
+
+ if (power_supply_battery_info_has_prop(psy->battery_info, psp))
+ return true;
+
+ power_supply_for_each_extension(reg, psy) {
+ if (power_supply_ext_has_property(reg->ext, psp))
+ return true;
+ }
+
+ return false;
+}
+
int power_supply_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
+ struct power_supply_ext_registration *reg;
+
if (atomic_read(&psy->use_cnt) <= 0) {
if (!psy->initialized)
return -EAGAIN;
return -ENODEV;
}
- if (psy_has_property(psy->desc, psp))
+ scoped_guard(rwsem_read, &psy->extensions_sem) {
+ power_supply_for_each_extension(reg, psy) {
+ if (power_supply_ext_has_property(reg->ext, psp))
+ return reg->ext->get_property(psy, reg->ext, reg->data, psp, val);
+ }
+ }
+
+ if (psy_desc_has_property(psy->desc, psp))
return psy->desc->get_property(psy, psp, val);
else if (power_supply_battery_info_has_prop(psy->battery_info, psp))
return power_supply_battery_info_get_prop(psy->battery_info, psp, val);
@@ -1219,7 +1280,24 @@ int power_supply_set_property(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val)
{
- if (atomic_read(&psy->use_cnt) <= 0 || !psy->desc->set_property)
+ struct power_supply_ext_registration *reg;
+
+ if (atomic_read(&psy->use_cnt) <= 0)
+ return -ENODEV;
+
+ scoped_guard(rwsem_read, &psy->extensions_sem) {
+ power_supply_for_each_extension(reg, psy) {
+ if (power_supply_ext_has_property(reg->ext, psp)) {
+ if (reg->ext->set_property)
+ return reg->ext->set_property(psy, reg->ext, reg->data,
+ psp, val);
+ else
+ return -ENODEV;
+ }
+ }
+ }
+
+ if (!psy->desc->set_property)
return -ENODEV;
return psy->desc->set_property(psy, psp, val);
@@ -1229,7 +1307,22 @@ EXPORT_SYMBOL_GPL(power_supply_set_property);
int power_supply_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
- return psy->desc->property_is_writeable && psy->desc->property_is_writeable(psy, psp);
+ struct power_supply_ext_registration *reg;
+
+ power_supply_for_each_extension(reg, psy) {
+ if (power_supply_ext_has_property(reg->ext, psp)) {
+ if (reg->ext->property_is_writeable)
+ return reg->ext->property_is_writeable(psy, reg->ext,
+ reg->data, psp);
+ else
+ return 0;
+ }
+ }
+
+ if (!psy->desc->property_is_writeable)
+ return 0;
+
+ return psy->desc->property_is_writeable(psy, psp);
}
void power_supply_external_power_changed(struct power_supply *psy)
@@ -1248,6 +1341,88 @@ int power_supply_powers(struct power_supply *psy, struct device *dev)
}
EXPORT_SYMBOL_GPL(power_supply_powers);
+static int power_supply_update_sysfs_and_hwmon(struct power_supply *psy)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&psy->changed_lock, flags);
+ psy->update_groups = true;
+ spin_unlock_irqrestore(&psy->changed_lock, flags);
+
+ power_supply_changed(psy);
+
+ power_supply_remove_hwmon_sysfs(psy);
+ return power_supply_add_hwmon_sysfs(psy);
+}
+
+int power_supply_register_extension(struct power_supply *psy, const struct power_supply_ext *ext,
+ struct device *dev, void *data)
+{
+ struct power_supply_ext_registration *reg;
+ size_t i;
+ int ret;
+
+ if (!psy || !dev || !ext || !ext->name || !ext->properties || !ext->num_properties)
+ return -EINVAL;
+
+ guard(rwsem_write)(&psy->extensions_sem);
+
+ power_supply_for_each_extension(reg, psy)
+ if (strcmp(ext->name, reg->ext->name) == 0)
+ return -EEXIST;
+
+ for (i = 0; i < ext->num_properties; i++)
+ if (power_supply_has_property(psy, ext->properties[i]))
+ return -EEXIST;
+
+ reg = kmalloc(sizeof(*reg), GFP_KERNEL);
+ if (!reg)
+ return -ENOMEM;
+
+ reg->ext = ext;
+ reg->dev = dev;
+ reg->data = data;
+ list_add(&reg->list_head, &psy->extensions);
+
+ ret = power_supply_sysfs_add_extension(psy, ext, dev);
+ if (ret)
+ goto sysfs_add_failed;
+
+ ret = power_supply_update_sysfs_and_hwmon(psy);
+ if (ret)
+ goto sysfs_hwmon_failed;
+
+ return 0;
+
+sysfs_hwmon_failed:
+ power_supply_sysfs_remove_extension(psy, ext);
+sysfs_add_failed:
+ list_del(&reg->list_head);
+ kfree(reg);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(power_supply_register_extension);
+
+void power_supply_unregister_extension(struct power_supply *psy, const struct power_supply_ext *ext)
+{
+ struct power_supply_ext_registration *reg;
+
+ guard(rwsem_write)(&psy->extensions_sem);
+
+ power_supply_for_each_extension(reg, psy) {
+ if (reg->ext == ext) {
+ list_del(&reg->list_head);
+ power_supply_sysfs_remove_extension(psy, ext);
+ kfree(reg);
+ power_supply_update_sysfs_and_hwmon(psy);
+ return;
+ }
+ }
+
+ dev_warn(&psy->dev, "Trying to unregister invalid extension");
+}
+EXPORT_SYMBOL_GPL(power_supply_unregister_extension);
+
static void power_supply_dev_release(struct device *dev)
{
struct power_supply *psy = to_power_supply(dev);
@@ -1300,7 +1475,7 @@ static int psy_register_thermal(struct power_supply *psy)
return 0;
/* Register battery zone device psy reports temperature */
- if (psy_has_property(psy->desc, POWER_SUPPLY_PROP_TEMP)) {
+ if (psy_desc_has_property(psy->desc, POWER_SUPPLY_PROP_TEMP)) {
/* Prefer our hwmon device and avoid duplicates */
struct thermal_zone_params tzp = {
.no_hwmon = IS_ENABLED(CONFIG_POWER_SUPPLY_HWMON)
@@ -1402,6 +1577,9 @@ __power_supply_register(struct device *parent,
}
spin_lock_init(&psy->changed_lock);
+ init_rwsem(&psy->extensions_sem);
+ INIT_LIST_HEAD(&psy->extensions);
+
rc = device_add(dev);
if (rc)
goto device_add_failed;
@@ -1414,13 +1592,15 @@ __power_supply_register(struct device *parent,
if (rc)
goto register_thermal_failed;
- rc = power_supply_create_triggers(psy);
- if (rc)
- goto create_triggers_failed;
+ scoped_guard(rwsem_read, &psy->extensions_sem) {
+ rc = power_supply_create_triggers(psy);
+ if (rc)
+ goto create_triggers_failed;
- rc = power_supply_add_hwmon_sysfs(psy);
- if (rc)
- goto add_hwmon_sysfs_failed;
+ rc = power_supply_add_hwmon_sysfs(psy);
+ if (rc)
+ goto add_hwmon_sysfs_failed;
+ }
/*
* Update use_cnt after any uevents (most notably from device_add()).
diff --git a/drivers/power/supply/power_supply_hwmon.c b/drivers/power/supply/power_supply_hwmon.c
index 01be04903d7d..95245e6a6baa 100644
--- a/drivers/power/supply/power_supply_hwmon.c
+++ b/drivers/power/supply/power_supply_hwmon.c
@@ -349,9 +349,28 @@ static const struct hwmon_chip_info power_supply_hwmon_chip_info = {
.info = power_supply_hwmon_info,
};
+static const enum power_supply_property power_supply_hwmon_props[] = {
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_POWER_AVG,
+ POWER_SUPPLY_PROP_POWER_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TEMP_MAX,
+ POWER_SUPPLY_PROP_TEMP_MIN,
+ POWER_SUPPLY_PROP_TEMP_ALERT_MIN,
+ POWER_SUPPLY_PROP_TEMP_ALERT_MAX,
+ POWER_SUPPLY_PROP_TEMP_AMBIENT,
+ POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN,
+ POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+};
+
int power_supply_add_hwmon_sysfs(struct power_supply *psy)
{
- const struct power_supply_desc *desc = psy->desc;
struct power_supply_hwmon *psyhw;
struct device *dev = &psy->dev;
struct device *hwmon;
@@ -377,32 +396,11 @@ int power_supply_add_hwmon_sysfs(struct power_supply *psy)
goto error;
}
- for (i = 0; i < desc->num_properties; i++) {
- const enum power_supply_property prop = desc->properties[i];
-
- switch (prop) {
- case POWER_SUPPLY_PROP_CURRENT_AVG:
- case POWER_SUPPLY_PROP_CURRENT_MAX:
- case POWER_SUPPLY_PROP_CURRENT_NOW:
- case POWER_SUPPLY_PROP_POWER_AVG:
- case POWER_SUPPLY_PROP_POWER_NOW:
- case POWER_SUPPLY_PROP_TEMP:
- case POWER_SUPPLY_PROP_TEMP_MAX:
- case POWER_SUPPLY_PROP_TEMP_MIN:
- case POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
- case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
- case POWER_SUPPLY_PROP_TEMP_AMBIENT:
- case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN:
- case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX:
- case POWER_SUPPLY_PROP_VOLTAGE_AVG:
- case POWER_SUPPLY_PROP_VOLTAGE_MIN:
- case POWER_SUPPLY_PROP_VOLTAGE_MAX:
- case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ for (i = 0; i < ARRAY_SIZE(power_supply_hwmon_props); i++) {
+ const enum power_supply_property prop = power_supply_hwmon_props[i];
+
+ if (power_supply_has_property(psy, prop))
set_bit(prop, psyhw->props);
- break;
- default:
- break;
- }
}
name = psy->desc->name;
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 571de43fcca9..edb058c19c9c 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -99,6 +99,7 @@ static const char * const POWER_SUPPLY_HEALTH_TEXT[] = {
[POWER_SUPPLY_HEALTH_OVERHEAT] = "Overheat",
[POWER_SUPPLY_HEALTH_DEAD] = "Dead",
[POWER_SUPPLY_HEALTH_OVERVOLTAGE] = "Over voltage",
+ [POWER_SUPPLY_HEALTH_UNDERVOLTAGE] = "Under voltage",
[POWER_SUPPLY_HEALTH_UNSPEC_FAILURE] = "Unspecified failure",
[POWER_SUPPLY_HEALTH_COLD] = "Cold",
[POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE] = "Watchdog timer expire",
@@ -182,6 +183,8 @@ static struct power_supply_attr power_supply_attrs[] __ro_after_init = {
POWER_SUPPLY_ATTR(CHARGE_CONTROL_START_THRESHOLD),
POWER_SUPPLY_ATTR(CHARGE_CONTROL_END_THRESHOLD),
POWER_SUPPLY_ENUM_ATTR(CHARGE_BEHAVIOUR),
+ /* Same enum value texts as "charge_type" without the 's' at the end */
+ _POWER_SUPPLY_ENUM_ATTR(CHARGE_TYPES, POWER_SUPPLY_CHARGE_TYPE_TEXT),
POWER_SUPPLY_ATTR(INPUT_CURRENT_LIMIT),
POWER_SUPPLY_ATTR(INPUT_VOLTAGE_LIMIT),
POWER_SUPPLY_ATTR(INPUT_POWER_LIMIT),
@@ -237,23 +240,52 @@ static enum power_supply_property dev_attr_psp(struct device_attribute *attr)
return to_ps_attr(attr) - power_supply_attrs;
}
+static void power_supply_escape_spaces(const char *str, char *buf, size_t bufsize)
+{
+ strscpy(buf, str, bufsize);
+ strreplace(buf, ' ', '_');
+}
+
+static int power_supply_match_string(const char * const *array, size_t n, const char *s)
+{
+ int ret;
+
+ /* First try an exact match */
+ ret = __sysfs_match_string(array, n, s);
+ if (ret >= 0)
+ return ret;
+
+ /* Second round, try matching with spaces replaced by '_' */
+ for (size_t i = 0; i < n; i++) {
+ char buf[32];
+
+ power_supply_escape_spaces(array[i], buf, sizeof(buf));
+ if (sysfs_streq(buf, s))
+ return i;
+ }
+
+ return -EINVAL;
+}
+
static ssize_t power_supply_show_enum_with_available(
struct device *dev, const char * const labels[], int label_count,
unsigned int available_values, int value, char *buf)
{
bool match = false, available, active;
+ char escaped_label[32];
ssize_t count = 0;
int i;
for (i = 0; i < label_count; i++) {
available = available_values & BIT(i);
active = i == value;
+ power_supply_escape_spaces(labels[i], escaped_label, sizeof(escaped_label));
if (available && active) {
- count += sysfs_emit_at(buf, count, "[%s] ", labels[i]);
+ count += sysfs_emit_at(buf, count, "[%s] ", escaped_label);
match = true;
} else if (available) {
- count += sysfs_emit_at(buf, count, "%s ", labels[i]);
+ count += sysfs_emit_at(buf, count, "%s ", escaped_label);
}
}
@@ -268,11 +300,34 @@ static ssize_t power_supply_show_enum_with_available(
return count;
}
-static ssize_t power_supply_show_property(struct device *dev,
- struct device_attribute *attr,
- char *buf) {
+static ssize_t power_supply_show_charge_behaviour(struct device *dev,
+ struct power_supply *psy,
+ union power_supply_propval *value,
+ char *buf)
+{
+ struct power_supply_ext_registration *reg;
+
+ scoped_guard(rwsem_read, &psy->extensions_sem) {
+ power_supply_for_each_extension(reg, psy) {
+ if (power_supply_ext_has_property(reg->ext,
+ POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR))
+ return power_supply_charge_behaviour_show(dev,
+ reg->ext->charge_behaviours,
+ value->intval, buf);
+ }
+ }
+
+ return power_supply_charge_behaviour_show(dev, psy->desc->charge_behaviours,
+ value->intval, buf);
+}
+
+static ssize_t power_supply_format_property(struct device *dev,
+ bool uevent,
+ struct device_attribute *attr,
+ char *buf)
+{
ssize_t ret;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
const struct power_supply_attr *ps_attr = to_ps_attr(attr);
enum power_supply_property psp = dev_attr_psp(attr);
union power_supply_propval value;
@@ -287,7 +342,7 @@ static ssize_t power_supply_show_property(struct device *dev,
dev_dbg_ratelimited(dev,
"driver has no data for `%s' property\n",
attr->attr.name);
- else if (ret != -ENODEV && ret != -EAGAIN)
+ else if (ret != -ENODEV && ret != -EAGAIN && ret != -EINVAL)
dev_err_ratelimited(dev,
"driver failed to report `%s' property: %zd\n",
attr->attr.name, ret);
@@ -303,13 +358,21 @@ static ssize_t power_supply_show_property(struct device *dev,
psy->desc->usb_types, value.intval, buf);
break;
case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
- ret = power_supply_charge_behaviour_show(dev, psy->desc->charge_behaviours,
- value.intval, buf);
+ if (uevent) /* no possible values in uevents */
+ goto default_format;
+ ret = power_supply_show_charge_behaviour(dev, psy, &value, buf);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPES:
+ if (uevent) /* no possible values in uevents */
+ goto default_format;
+ ret = power_supply_charge_types_show(dev, psy->desc->charge_types,
+ value.intval, buf);
break;
case POWER_SUPPLY_PROP_MODEL_NAME ... POWER_SUPPLY_PROP_SERIAL_NUMBER:
ret = sysfs_emit(buf, "%s\n", value.strval);
break;
default:
+default_format:
if (ps_attr->text_values_len > 0 &&
value.intval < ps_attr->text_values_len && value.intval >= 0) {
ret = sysfs_emit(buf, "%s\n", ps_attr->text_values[value.intval]);
@@ -321,19 +384,26 @@ static ssize_t power_supply_show_property(struct device *dev,
return ret;
}
+static ssize_t power_supply_show_property(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return power_supply_format_property(dev, false, attr, buf);
+}
+
static ssize_t power_supply_store_property(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count) {
ssize_t ret;
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
const struct power_supply_attr *ps_attr = to_ps_attr(attr);
enum power_supply_property psp = dev_attr_psp(attr);
union power_supply_propval value;
ret = -EINVAL;
if (ps_attr->text_values_len > 0) {
- ret = __sysfs_match_string(ps_attr->text_values,
- ps_attr->text_values_len, buf);
+ ret = power_supply_match_string(ps_attr->text_values,
+ ps_attr->text_values_len, buf);
}
/*
@@ -364,9 +434,8 @@ static umode_t power_supply_attr_is_visible(struct kobject *kobj,
int attrno)
{
struct device *dev = kobj_to_dev(kobj);
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
umode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
- int i;
if (!power_supply_attrs[attrno].prop_name)
return 0;
@@ -374,19 +443,13 @@ static umode_t power_supply_attr_is_visible(struct kobject *kobj,
if (attrno == POWER_SUPPLY_PROP_TYPE)
return mode;
- for (i = 0; i < psy->desc->num_properties; i++) {
- int property = psy->desc->properties[i];
-
- if (property == attrno) {
- if (power_supply_property_is_writeable(psy, property) > 0)
- mode |= S_IWUSR;
+ guard(rwsem_read)(&psy->extensions_sem);
- return mode;
- }
- }
-
- if (power_supply_battery_info_has_prop(psy->battery_info, attrno))
+ if (power_supply_has_property(psy, attrno)) {
+ if (power_supply_property_is_writeable(psy, attrno) > 0)
+ mode |= S_IWUSR;
return mode;
+ }
return 0;
}
@@ -396,8 +459,18 @@ static const struct attribute_group power_supply_attr_group = {
.is_visible = power_supply_attr_is_visible,
};
+static struct attribute *power_supply_extension_attrs[] = {
+ NULL
+};
+
+static const struct attribute_group power_supply_extension_group = {
+ .name = "extensions",
+ .attrs = power_supply_extension_attrs,
+};
+
const struct attribute_group *power_supply_attr_groups[] = {
&power_supply_attr_group,
+ &power_supply_extension_group,
NULL
};
@@ -437,8 +510,8 @@ static int add_prop_uevent(const struct device *dev, struct kobj_uevent_env *env
pwr_attr = &power_supply_attrs[prop];
dev_attr = &pwr_attr->dev_attr;
- ret = power_supply_show_property((struct device *)dev, dev_attr, prop_buf);
- if (ret == -ENODEV || ret == -ENODATA) {
+ ret = power_supply_format_property((struct device *)dev, true, dev_attr, prop_buf);
+ if (ret == -ENODEV || ret == -ENODATA || ret == -EINVAL) {
/*
* When a battery is absent, we expect -ENODEV. Don't abort;
* send the uevent with at least the PRESENT=0 property
@@ -459,11 +532,7 @@ static int add_prop_uevent(const struct device *dev, struct kobj_uevent_env *env
int power_supply_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- const struct power_supply *psy = dev_get_drvdata(dev);
- const enum power_supply_property *battery_props =
- power_supply_battery_info_properties;
- unsigned long psy_drv_properties[POWER_SUPPLY_ATTR_CNT /
- sizeof(unsigned long) + 1] = {0};
+ const struct power_supply *psy = dev_to_psy(dev);
int ret = 0, j;
char *prop_buf;
@@ -491,22 +560,8 @@ int power_supply_uevent(const struct device *dev, struct kobj_uevent_env *env)
if (ret)
goto out;
- for (j = 0; j < psy->desc->num_properties; j++) {
- set_bit(psy->desc->properties[j], psy_drv_properties);
- ret = add_prop_uevent(dev, env, psy->desc->properties[j],
- prop_buf);
- if (ret)
- goto out;
- }
-
- for (j = 0; j < power_supply_battery_info_properties_size; j++) {
- if (test_bit(battery_props[j], psy_drv_properties))
- continue;
- if (!power_supply_battery_info_has_prop(psy->battery_info,
- battery_props[j]))
- continue;
- ret = add_prop_uevent(dev, env, battery_props[j],
- prop_buf);
+ for (j = 0; j < POWER_SUPPLY_ATTR_CNT; j++) {
+ ret = add_prop_uevent(dev, env, j, prop_buf);
if (ret)
goto out;
}
@@ -542,3 +597,44 @@ int power_supply_charge_behaviour_parse(unsigned int available_behaviours, const
return -EINVAL;
}
EXPORT_SYMBOL_GPL(power_supply_charge_behaviour_parse);
+
+ssize_t power_supply_charge_types_show(struct device *dev,
+ unsigned int available_types,
+ enum power_supply_charge_type current_type,
+ char *buf)
+{
+ return power_supply_show_enum_with_available(
+ dev, POWER_SUPPLY_CHARGE_TYPE_TEXT,
+ ARRAY_SIZE(POWER_SUPPLY_CHARGE_TYPE_TEXT),
+ available_types, current_type, buf);
+}
+EXPORT_SYMBOL_GPL(power_supply_charge_types_show);
+
+int power_supply_charge_types_parse(unsigned int available_types, const char *buf)
+{
+ int i = power_supply_match_string(POWER_SUPPLY_CHARGE_TYPE_TEXT,
+ ARRAY_SIZE(POWER_SUPPLY_CHARGE_TYPE_TEXT),
+ buf);
+
+ if (i < 0)
+ return i;
+
+ if (available_types & BIT(i))
+ return i;
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(power_supply_charge_types_parse);
+
+int power_supply_sysfs_add_extension(struct power_supply *psy, const struct power_supply_ext *ext,
+ struct device *dev)
+{
+ return sysfs_add_link_to_group(&psy->dev.kobj, power_supply_extension_group.name,
+ &dev->kobj, ext->name);
+}
+
+void power_supply_sysfs_remove_extension(struct power_supply *psy,
+ const struct power_supply_ext *ext)
+{
+ sysfs_remove_link_from_group(&psy->dev.kobj, power_supply_extension_group.name, ext->name);
+}
diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c
index a6c204c08232..6f3d0413b1c1 100644
--- a/drivers/power/supply/sbs-battery.c
+++ b/drivers/power/supply/sbs-battery.c
@@ -21,6 +21,7 @@
#include <linux/power_supply.h>
#include <linux/slab.h>
#include <linux/stat.h>
+#include <linux/string_choices.h>
enum {
REG_MANUFACTURER_DATA,
@@ -320,8 +321,8 @@ static int sbs_update_presence(struct sbs_info *chip, bool is_present)
client->flags &= ~I2C_CLIENT_PEC;
}
- dev_dbg(&client->dev, "PEC: %s\n", (client->flags & I2C_CLIENT_PEC) ?
- "enabled" : "disabled");
+ dev_dbg(&client->dev, "PEC: %s\n",
+ str_enabled_disabled(client->flags & I2C_CLIENT_PEC));
if (!chip->is_present && is_present && !chip->charger_broadcasts)
sbs_disable_charger_broadcasts(chip);
diff --git a/drivers/power/supply/stc3117_fuel_gauge.c b/drivers/power/supply/stc3117_fuel_gauge.c
new file mode 100644
index 000000000000..a1bc5970370a
--- /dev/null
+++ b/drivers/power/supply/stc3117_fuel_gauge.c
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * stc3117_fuel_gauge.c - STMicroelectronics STC3117 Fuel Gauge Driver
+ *
+ * Copyright (c) 2024 Silicon Signals Pvt Ltd.
+ * Author: Hardevsinh Palaniya <hardevsinh.palaniya@siliconsignals.io>
+ * Bhavin Sharma <bhavin.sharma@siliconsignals.io>
+ */
+
+#include <linux/crc8.h>
+#include <linux/devm-helpers.h>
+#include <linux/i2c.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/workqueue.h>
+
+#define STC3117_ADDR_MODE 0x00
+#define STC3117_ADDR_CTRL 0x01
+#define STC3117_ADDR_SOC_L 0x02
+#define STC3117_ADDR_SOC_H 0x03
+#define STC3117_ADDR_COUNTER_L 0x04
+#define STC3117_ADDR_COUNTER_H 0x05
+#define STC3117_ADDR_CURRENT_L 0x06
+#define STC3117_ADDR_CURRENT_H 0x07
+#define STC3117_ADDR_VOLTAGE_L 0x08
+#define STC3117_ADDR_VOLTAGE_H 0x09
+#define STC3117_ADDR_TEMPERATURE 0x0A
+#define STC3117_ADDR_AVG_CURRENT_L 0x0B
+#define STC3117_ADDR_AVG_CURRENT_H 0x0C
+#define STC3117_ADDR_OCV_L 0x0D
+#define STC3117_ADDR_OCV_H 0x0E
+#define STC3117_ADDR_CC_CNF_L 0x0F
+#define STC3117_ADDR_CC_CNF_H 0x10
+#define STC3117_ADDR_VM_CNF_L 0x11
+#define STC3117_ADDR_VM_CNF_H 0x12
+#define STC3117_ADDR_ALARM_soc 0x13
+#define STC3117_ADDR_ALARM_VOLTAGE 0x14
+#define STC3117_ADDR_ID 0x18
+#define STC3117_ADDR_CC_ADJ_L 0x1B
+#define STC3117_ADDR_CC_ADJ_H 0x1C
+#define STC3117_ADDR_VM_ADJ_L 0x1D
+#define STC3117_ADDR_VM_ADJ_H 0x1E
+#define STC3117_ADDR_RAM 0x20
+#define STC3117_ADDR_OCV_TABLE 0x30
+#define STC3117_ADDR_SOC_TABLE 0x30
+
+/* Bit mask definition */
+#define STC3117_ID 0x16
+#define STC3117_MIXED_MODE 0x00
+#define STC3117_VMODE BIT(0)
+#define STC3117_GG_RUN BIT(4)
+#define STC3117_CC_MODE BIT(5)
+#define STC3117_BATFAIL BIT(3)
+#define STC3117_PORDET BIT(4)
+#define STC3117_RAM_SIZE 16
+#define STC3117_OCV_TABLE_SIZE 16
+#define STC3117_RAM_TESTWORD 0x53A9
+#define STC3117_SOFT_RESET 0x11
+#define STC3117_NOMINAL_CAPACITY 2600
+
+#define VOLTAGE_LSB_VALUE 9011
+#define CURRENT_LSB_VALUE 24084
+#define APP_CUTOFF_VOLTAGE 2500
+#define MAX_HRSOC 51200
+#define MAX_SOC 1000
+#define CHG_MIN_CURRENT 200
+#define CHG_END_CURRENT 20
+#define APP_MIN_CURRENT (-5)
+#define BATTERY_FULL 95
+#define CRC8_POLYNOMIAL 0x07
+#define CRC8_INIT 0x00
+
+DECLARE_CRC8_TABLE(stc3117_crc_table);
+
+enum stc3117_state {
+ STC3117_INIT,
+ STC3117_RUNNING,
+ STC3117_POWERDN,
+};
+
+/* Default ocv curve Li-ion battery */
+static const int ocv_value[16] = {
+ 3400, 3582, 3669, 3676, 3699, 3737, 3757, 3774,
+ 3804, 3844, 3936, 3984, 4028, 4131, 4246, 4320
+};
+
+union stc3117_internal_ram {
+ u8 ram_bytes[STC3117_RAM_SIZE];
+ struct {
+ u16 testword; /* 0-1 Bytes */
+ u16 hrsoc; /* 2-3 Bytes */
+ u16 cc_cnf; /* 4-5 Bytes */
+ u16 vm_cnf; /* 6-7 Bytes */
+ u8 soc; /* 8 Byte */
+ u8 state; /* 9 Byte */
+ u8 unused[5]; /* 10-14 Bytes */
+ u8 crc; /* 15 Byte */
+ } reg;
+};
+
+struct stc3117_battery_info {
+ int voltage_min_mv;
+ int voltage_max_mv;
+ int battery_capacity_mah;
+ int sense_resistor;
+};
+
+struct stc3117_data {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct delayed_work update_work;
+ struct power_supply *battery;
+ union stc3117_internal_ram ram_data;
+ struct stc3117_battery_info battery_info;
+
+ u8 soc_tab[16];
+ int cc_cnf;
+ int vm_cnf;
+ int cc_adj;
+ int vm_adj;
+ int avg_current;
+ int avg_voltage;
+ int batt_current;
+ int voltage;
+ int temp;
+ int soc;
+ int ocv;
+ int hrsoc;
+ int presence;
+};
+
+static int stc3117_convert(int value, int factor)
+{
+ value = (value * factor) / 4096;
+ return value * 1000;
+}
+
+static int stc3117_get_battery_data(struct stc3117_data *data)
+{
+ u8 reg_list[16];
+ u8 data_adjust[4];
+ int value, mode;
+
+ regmap_bulk_read(data->regmap, STC3117_ADDR_MODE,
+ reg_list, sizeof(reg_list));
+
+ /* soc */
+ value = (reg_list[3] << 8) + reg_list[2];
+ data->hrsoc = value;
+ data->soc = (value * 10 + 256) / 512;
+
+ /* current in uA*/
+ value = (reg_list[7] << 8) + reg_list[6];
+ data->batt_current = stc3117_convert(value,
+ CURRENT_LSB_VALUE / data->battery_info.sense_resistor);
+
+ /* voltage in uV */
+ value = (reg_list[9] << 8) + reg_list[8];
+ data->voltage = stc3117_convert(value, VOLTAGE_LSB_VALUE);
+
+ /* temp in 1/10 °C */
+ data->temp = reg_list[10] * 10;
+
+ /* Avg current in uA */
+ value = (reg_list[12] << 8) + reg_list[11];
+ regmap_read(data->regmap, STC3117_ADDR_MODE, &mode);
+ if (!(mode & STC3117_VMODE)) {
+ value = stc3117_convert(value,
+ CURRENT_LSB_VALUE / data->battery_info.sense_resistor);
+ value = value / 4;
+ } else {
+ value = stc3117_convert(value, 36 * STC3117_NOMINAL_CAPACITY);
+ }
+ data->avg_current = value;
+
+ /* ocv in uV */
+ value = (reg_list[14] << 8) + reg_list[13];
+ value = stc3117_convert(value, VOLTAGE_LSB_VALUE);
+ value = (value + 2) / 4;
+ data->ocv = value;
+
+ /* CC & VM adjustment counters */
+ regmap_bulk_read(data->regmap, STC3117_ADDR_CC_ADJ_L,
+ data_adjust, sizeof(data_adjust));
+ value = (data_adjust[1] << 8) + data_adjust[0];
+ data->cc_adj = value;
+
+ value = (data_adjust[3] << 8) + data_adjust[2];
+ data->vm_adj = value;
+
+ return 0;
+}
+
+static int ram_write(struct stc3117_data *data)
+{
+ int ret;
+
+ ret = regmap_bulk_write(data->regmap, STC3117_ADDR_RAM,
+ data->ram_data.ram_bytes, STC3117_RAM_SIZE);
+ if (ret)
+ return ret;
+
+ return 0;
+};
+
+static int ram_read(struct stc3117_data *data)
+{
+ int ret;
+
+ ret = regmap_bulk_read(data->regmap, STC3117_ADDR_RAM,
+ data->ram_data.ram_bytes, STC3117_RAM_SIZE);
+ if (ret)
+ return ret;
+
+ return 0;
+};
+
+static int stc3117_set_para(struct stc3117_data *data)
+{
+ int ret;
+
+ ret = regmap_write(data->regmap, STC3117_ADDR_MODE, STC3117_VMODE);
+
+ for (int i = 0; i < STC3117_OCV_TABLE_SIZE; i++)
+ ret |= regmap_write(data->regmap, STC3117_ADDR_OCV_TABLE + i,
+ ocv_value[i] * 100 / 55);
+ if (data->soc_tab[1] != 0)
+ ret |= regmap_bulk_write(data->regmap, STC3117_ADDR_SOC_TABLE,
+ data->soc_tab, STC3117_OCV_TABLE_SIZE);
+
+ ret |= regmap_write(data->regmap, STC3117_ADDR_CC_CNF_H,
+ (data->ram_data.reg.cc_cnf >> 8) & 0xFF);
+
+ ret |= regmap_write(data->regmap, STC3117_ADDR_CC_CNF_L,
+ data->ram_data.reg.cc_cnf & 0xFF);
+
+ ret |= regmap_write(data->regmap, STC3117_ADDR_VM_CNF_H,
+ (data->ram_data.reg.vm_cnf >> 8) & 0xFF);
+
+ ret |= regmap_write(data->regmap, STC3117_ADDR_VM_CNF_L,
+ data->ram_data.reg.vm_cnf & 0xFF);
+
+ ret |= regmap_write(data->regmap, STC3117_ADDR_CTRL, 0x03);
+
+ ret |= regmap_write(data->regmap, STC3117_ADDR_MODE,
+ STC3117_MIXED_MODE | STC3117_GG_RUN);
+
+ return ret;
+};
+
+static int stc3117_init(struct stc3117_data *data)
+{
+ int id, ret;
+ int ctrl;
+ int ocv_m, ocv_l;
+
+ regmap_read(data->regmap, STC3117_ADDR_ID, &id);
+ if (id != STC3117_ID)
+ return -EINVAL;
+
+ data->cc_cnf = (data->battery_info.battery_capacity_mah *
+ data->battery_info.sense_resistor * 250 + 6194) / 12389;
+ data->vm_cnf = (data->battery_info.battery_capacity_mah
+ * 200 * 50 + 24444) / 48889;
+
+ /* Battery has not been removed */
+ data->presence = 1;
+
+ /* Read RAM data */
+ ret = ram_read(data);
+ if (ret)
+ return ret;
+
+ if (data->ram_data.reg.testword != STC3117_RAM_TESTWORD ||
+ (crc8(stc3117_crc_table, data->ram_data.ram_bytes,
+ STC3117_RAM_SIZE, CRC8_INIT)) != 0) {
+ data->ram_data.reg.testword = STC3117_RAM_TESTWORD;
+ data->ram_data.reg.cc_cnf = data->cc_cnf;
+ data->ram_data.reg.vm_cnf = data->vm_cnf;
+ data->ram_data.reg.crc = crc8(stc3117_crc_table,
+ data->ram_data.ram_bytes,
+ STC3117_RAM_SIZE - 1, CRC8_INIT);
+
+ ret = regmap_read(data->regmap, STC3117_ADDR_OCV_H, &ocv_m);
+
+ ret |= regmap_read(data->regmap, STC3117_ADDR_OCV_L, &ocv_l);
+
+ ret |= stc3117_set_para(data);
+
+ ret |= regmap_write(data->regmap, STC3117_ADDR_OCV_H, ocv_m);
+
+ ret |= regmap_write(data->regmap, STC3117_ADDR_OCV_L, ocv_l);
+ if (ret)
+ return ret;
+ } else {
+ ret = regmap_read(data->regmap, STC3117_ADDR_CTRL, &ctrl);
+ if (ret)
+ return ret;
+
+ if ((ctrl & STC3117_BATFAIL) != 0 ||
+ (ctrl & STC3117_PORDET) != 0) {
+ ret = regmap_read(data->regmap,
+ STC3117_ADDR_OCV_H, &ocv_m);
+
+ ret |= regmap_read(data->regmap,
+ STC3117_ADDR_OCV_L, &ocv_l);
+
+ ret |= stc3117_set_para(data);
+
+ ret |= regmap_write(data->regmap,
+ STC3117_ADDR_OCV_H, ocv_m);
+
+ ret |= regmap_write(data->regmap,
+ STC3117_ADDR_OCV_L, ocv_l);
+ if (ret)
+ return ret;
+ } else {
+ ret = stc3117_set_para(data);
+ ret |= regmap_write(data->regmap, STC3117_ADDR_SOC_H,
+ (data->ram_data.reg.hrsoc >> 8 & 0xFF));
+ ret |= regmap_write(data->regmap, STC3117_ADDR_SOC_L,
+ (data->ram_data.reg.hrsoc & 0xFF));
+ if (ret)
+ return ret;
+ }
+ }
+
+ data->ram_data.reg.state = STC3117_INIT;
+ data->ram_data.reg.crc = crc8(stc3117_crc_table,
+ data->ram_data.ram_bytes,
+ STC3117_RAM_SIZE - 1, CRC8_INIT);
+ ret = ram_write(data);
+ if (ret)
+ return ret;
+
+ return 0;
+};
+
+static int stc3117_task(struct stc3117_data *data)
+{
+ int id, mode, ret;
+ int count_l, count_m;
+ int ocv_l, ocv_m;
+
+ regmap_read(data->regmap, STC3117_ADDR_ID, &id);
+ if (id != STC3117_ID) {
+ data->presence = 0;
+ return -EINVAL;
+ }
+
+ stc3117_get_battery_data(data);
+
+ /* Read RAM data */
+ ret = ram_read(data);
+ if (ret)
+ return ret;
+
+ if (data->ram_data.reg.testword != STC3117_RAM_TESTWORD ||
+ (crc8(stc3117_crc_table, data->ram_data.ram_bytes,
+ STC3117_RAM_SIZE, CRC8_INIT) != 0)) {
+ data->ram_data.reg.testword = STC3117_RAM_TESTWORD;
+ data->ram_data.reg.cc_cnf = data->cc_cnf;
+ data->ram_data.reg.vm_cnf = data->vm_cnf;
+ data->ram_data.reg.crc = crc8(stc3117_crc_table,
+ data->ram_data.ram_bytes,
+ STC3117_RAM_SIZE - 1, CRC8_INIT);
+ data->ram_data.reg.state = STC3117_INIT;
+ }
+
+ /* check battery presence status */
+ ret = regmap_read(data->regmap, STC3117_ADDR_CTRL, &mode);
+ if ((mode & STC3117_BATFAIL) != 0) {
+ data->presence = 0;
+ data->ram_data.reg.testword = 0;
+ data->ram_data.reg.state = STC3117_INIT;
+ ret = ram_write(data);
+ ret |= regmap_write(data->regmap, STC3117_ADDR_CTRL, STC3117_PORDET);
+ if (ret)
+ return ret;
+ }
+
+ data->presence = 1;
+
+ ret = regmap_read(data->regmap, STC3117_ADDR_MODE, &mode);
+ if (ret)
+ return ret;
+ if ((mode & STC3117_GG_RUN) == 0) {
+ if (data->ram_data.reg.state > STC3117_INIT) {
+ ret = stc3117_set_para(data);
+
+ ret |= regmap_write(data->regmap, STC3117_ADDR_SOC_H,
+ (data->ram_data.reg.hrsoc >> 8 & 0xFF));
+ ret |= regmap_write(data->regmap, STC3117_ADDR_SOC_L,
+ (data->ram_data.reg.hrsoc & 0xFF));
+ if (ret)
+ return ret;
+ } else {
+ ret = regmap_read(data->regmap, STC3117_ADDR_OCV_H, &ocv_m);
+
+ ret |= regmap_read(data->regmap, STC3117_ADDR_OCV_L, &ocv_l);
+
+ ret |= stc3117_set_para(data);
+
+ ret |= regmap_write(data->regmap, STC3117_ADDR_OCV_H, ocv_m);
+
+ ret |= regmap_write(data->regmap, STC3117_ADDR_OCV_L, ocv_l);
+ if (ret)
+ return ret;
+ }
+ data->ram_data.reg.state = STC3117_INIT;
+ }
+
+ regmap_read(data->regmap, STC3117_ADDR_COUNTER_L, &count_l);
+ regmap_read(data->regmap, STC3117_ADDR_COUNTER_H, &count_m);
+
+ count_m = (count_m << 8) + count_l;
+
+ /* INIT state, wait for batt_current & temperature value available: */
+ if (data->ram_data.reg.state == STC3117_INIT && count_m > 4) {
+ data->avg_voltage = data->voltage;
+ data->avg_current = data->batt_current;
+ data->ram_data.reg.state = STC3117_RUNNING;
+ }
+
+ if (data->ram_data.reg.state != STC3117_RUNNING) {
+ data->batt_current = -ENODATA;
+ data->temp = -ENODATA;
+ } else {
+ if (data->voltage < APP_CUTOFF_VOLTAGE)
+ data->soc = -ENODATA;
+
+ if (mode & STC3117_VMODE) {
+ data->avg_current = -ENODATA;
+ data->batt_current = -ENODATA;
+ }
+ }
+
+ data->ram_data.reg.hrsoc = data->hrsoc;
+ data->ram_data.reg.soc = (data->soc + 5) / 10;
+ data->ram_data.reg.crc = crc8(stc3117_crc_table,
+ data->ram_data.ram_bytes,
+ STC3117_RAM_SIZE - 1, CRC8_INIT);
+
+ ret = ram_write(data);
+ if (ret)
+ return ret;
+ return 0;
+};
+
+static void fuel_gauge_update_work(struct work_struct *work)
+{
+ struct stc3117_data *data =
+ container_of(work, struct stc3117_data, update_work.work);
+
+ stc3117_task(data);
+
+ /* Schedule the work to run again in 2 seconds */
+ schedule_delayed_work(&data->update_work, msecs_to_jiffies(2000));
+}
+
+static int stc3117_get_property(struct power_supply *psy,
+ enum power_supply_property psp, union power_supply_propval *val)
+{
+ struct stc3117_data *data = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (data->soc > BATTERY_FULL)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else if (data->batt_current < 0)
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else if (data->batt_current > 0)
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = data->voltage;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = data->batt_current;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+ val->intval = data->ocv;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ val->intval = data->avg_current;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = data->soc;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = data->temp;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = data->presence;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static enum power_supply_property stc3117_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_OCV,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_PRESENT,
+};
+
+static const struct power_supply_desc stc3117_battery_desc = {
+ .name = "stc3117-battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .get_property = stc3117_get_property,
+ .properties = stc3117_battery_props,
+ .num_properties = ARRAY_SIZE(stc3117_battery_props),
+};
+
+static const struct regmap_config stc3117_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int stc3117_probe(struct i2c_client *client)
+{
+ struct stc3117_data *data;
+ struct power_supply_config psy_cfg = {};
+ struct power_supply_battery_info *info;
+ int ret;
+
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ data->regmap = devm_regmap_init_i2c(client, &stc3117_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
+
+ psy_cfg.drv_data = data;
+ psy_cfg.fwnode = dev_fwnode(&client->dev);
+
+ crc8_populate_msb(stc3117_crc_table, CRC8_POLYNOMIAL);
+
+ data->battery = devm_power_supply_register(&client->dev,
+ &stc3117_battery_desc, &psy_cfg);
+ if (IS_ERR(data->battery))
+ return dev_err_probe(&client->dev, PTR_ERR(data->battery),
+ "failed to register battery\n");
+
+ ret = device_property_read_u32(&client->dev, "shunt-resistor-micro-ohms",
+ &data->battery_info.sense_resistor);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "failed to get shunt-resistor-micro-ohms\n");
+ data->battery_info.sense_resistor = data->battery_info.sense_resistor / 1000;
+
+ ret = power_supply_get_battery_info(data->battery, &info);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "failed to get battery information\n");
+
+ data->battery_info.battery_capacity_mah = info->charge_full_design_uah / 1000;
+ data->battery_info.voltage_min_mv = info->voltage_min_design_uv / 1000;
+ data->battery_info.voltage_max_mv = info->voltage_max_design_uv / 1000;
+
+ ret = stc3117_init(data);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "failed to initialize of stc3117\n");
+
+ ret = devm_delayed_work_autocancel(&client->dev, &data->update_work,
+ fuel_gauge_update_work);
+ if (ret)
+ return ret;
+
+ schedule_delayed_work(&data->update_work, 0);
+
+ return 0;
+}
+
+static const struct i2c_device_id stc3117_id[] = {
+ { "stc3117", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, stc3117_id);
+
+static const struct of_device_id stc3117_of_match[] = {
+ { .compatible = "st,stc3117" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, stc3117_of_match);
+
+static struct i2c_driver stc3117_i2c_driver = {
+ .driver = {
+ .name = "stc3117_i2c_driver",
+ .of_match_table = stc3117_of_match,
+ },
+ .probe = stc3117_probe,
+ .id_table = stc3117_id,
+};
+
+module_i2c_driver(stc3117_i2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Hardevsinh Palaniya <hardevsinh.palaniya@siliconsignals.io>");
+MODULE_AUTHOR("Bhavin Sharma <bhavin.sharma@siliconsignals.io>");
+MODULE_DESCRIPTION("STC3117 Fuel Gauge Driver");
diff --git a/drivers/power/supply/surface_battery.c b/drivers/power/supply/surface_battery.c
index ebd1edde28f1..c759add4df49 100644
--- a/drivers/power/supply/surface_battery.c
+++ b/drivers/power/supply/surface_battery.c
@@ -667,7 +667,7 @@ out:
static ssize_t alarm_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct spwr_battery_device *bat = power_supply_get_drvdata(psy);
int status;
@@ -681,7 +681,7 @@ static ssize_t alarm_show(struct device *dev, struct device_attribute *attr, cha
static ssize_t alarm_store(struct device *dev, struct device_attribute *attr, const char *buf,
size_t count)
{
- struct power_supply *psy = dev_get_drvdata(dev);
+ struct power_supply *psy = dev_to_psy(dev);
struct spwr_battery_device *bat = power_supply_get_drvdata(psy);
unsigned long value;
int status;
diff --git a/drivers/power/supply/test_power.c b/drivers/power/supply/test_power.c
index 442ceb7795e1..2a975a110f48 100644
--- a/drivers/power/supply/test_power.c
+++ b/drivers/power/supply/test_power.c
@@ -37,6 +37,7 @@ static int battery_charge_counter = -1000;
static int battery_current = -1600;
static enum power_supply_charge_behaviour battery_charge_behaviour =
POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO;
+static bool battery_extension;
static bool module_initialized;
@@ -238,6 +239,87 @@ static const struct power_supply_config test_power_configs[] = {
},
};
+static int test_power_battery_extmanufacture_year = 1234;
+static int test_power_battery_exttemp_max = 1000;
+static const enum power_supply_property test_power_battery_extprops[] = {
+ POWER_SUPPLY_PROP_MANUFACTURE_YEAR,
+ POWER_SUPPLY_PROP_TEMP_MAX,
+};
+
+static int test_power_battery_extget_property(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *ext_data,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_MANUFACTURE_YEAR:
+ val->intval = test_power_battery_extmanufacture_year;
+ break;
+ case POWER_SUPPLY_PROP_TEMP_MAX:
+ val->intval = test_power_battery_exttemp_max;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int test_power_battery_extset_property(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *ext_data,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_MANUFACTURE_YEAR:
+ test_power_battery_extmanufacture_year = val->intval;
+ break;
+ case POWER_SUPPLY_PROP_TEMP_MAX:
+ test_power_battery_exttemp_max = val->intval;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int test_power_battery_extproperty_is_writeable(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *ext_data,
+ enum power_supply_property psp)
+{
+ return true;
+}
+
+static const struct power_supply_ext test_power_battery_ext = {
+ .name = "test_power",
+ .properties = test_power_battery_extprops,
+ .num_properties = ARRAY_SIZE(test_power_battery_extprops),
+ .get_property = test_power_battery_extget_property,
+ .set_property = test_power_battery_extset_property,
+ .property_is_writeable = test_power_battery_extproperty_is_writeable,
+};
+
+static void test_power_configure_battery_extension(bool enable)
+{
+ struct power_supply *psy;
+
+ psy = test_power_supplies[TEST_BATTERY];
+
+ if (enable) {
+ if (power_supply_register_extension(psy, &test_power_battery_ext, &psy->dev,
+ NULL)) {
+ pr_err("registering battery extension failed\n");
+ return;
+ }
+ } else {
+ power_supply_unregister_extension(psy, &test_power_battery_ext);
+ }
+
+ battery_extension = enable;
+}
+
static int __init test_power_init(void)
{
int i;
@@ -258,6 +340,8 @@ static int __init test_power_init(void)
}
}
+ test_power_configure_battery_extension(true);
+
module_initialized = true;
return 0;
failed:
@@ -524,6 +608,26 @@ static int param_set_battery_current(const char *key,
#define param_get_battery_current param_get_int
+static int param_set_battery_extension(const char *key,
+ const struct kernel_param *kp)
+{
+ bool prev_battery_extension;
+ int ret;
+
+ prev_battery_extension = battery_extension;
+
+ ret = param_set_bool(key, kp);
+ if (ret)
+ return ret;
+
+ if (prev_battery_extension != battery_extension)
+ test_power_configure_battery_extension(battery_extension);
+
+ return 0;
+}
+
+#define param_get_battery_extension param_get_bool
+
static const struct kernel_param_ops param_ops_ac_online = {
.set = param_set_ac_online,
.get = param_get_ac_online,
@@ -574,6 +678,11 @@ static const struct kernel_param_ops param_ops_battery_current = {
.get = param_get_battery_current,
};
+static const struct kernel_param_ops param_ops_battery_extension = {
+ .set = param_set_battery_extension,
+ .get = param_get_battery_extension,
+};
+
#define param_check_ac_online(name, p) __param_check(name, p, void);
#define param_check_usb_online(name, p) __param_check(name, p, void);
#define param_check_battery_status(name, p) __param_check(name, p, void);
@@ -584,6 +693,7 @@ static const struct kernel_param_ops param_ops_battery_current = {
#define param_check_battery_voltage(name, p) __param_check(name, p, void);
#define param_check_battery_charge_counter(name, p) __param_check(name, p, void);
#define param_check_battery_current(name, p) __param_check(name, p, void);
+#define param_check_battery_extension(name, p) __param_check(name, p, void);
module_param(ac_online, ac_online, 0644);
@@ -621,6 +731,9 @@ MODULE_PARM_DESC(battery_charge_counter,
module_param(battery_current, battery_current, 0644);
MODULE_PARM_DESC(battery_current, "battery current (milliampere)");
+module_param(battery_extension, battery_extension, 0644);
+MODULE_PARM_DESC(battery_extension, "battery extension");
+
MODULE_DESCRIPTION("Power supply driver for testing");
MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/ug3105_battery.c b/drivers/power/supply/ug3105_battery.c
index ccc5c4d2e230..38e23bdd4603 100644
--- a/drivers/power/supply/ug3105_battery.c
+++ b/drivers/power/supply/ug3105_battery.c
@@ -287,7 +287,6 @@ out:
static enum power_supply_property ug3105_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
- POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_SCOPE,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_VOLTAGE_OCV,
@@ -316,9 +315,6 @@ static int ug3105_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_PRESENT:
val->intval = 1;
break;
- case POWER_SUPPLY_PROP_TECHNOLOGY:
- val->intval = chip->info->technology;
- break;
case POWER_SUPPLY_PROP_SCOPE:
val->intval = POWER_SUPPLY_SCOPE_SYSTEM;
break;
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 52c32dcbf7d8..4112a0097338 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -627,8 +627,7 @@ struct powercap_control_type *powercap_register_control_type(
dev_set_name(&control_type->dev, "%s", name);
result = device_register(&control_type->dev);
if (result) {
- if (control_type->allocated)
- kfree(control_type);
+ put_device(&control_type->dev);
return ERR_PTR(result);
}
idr_init(&control_type->idr);
diff --git a/drivers/pps/Makefile b/drivers/pps/Makefile
index ceaf65cc1f1d..0aea394d4e4d 100644
--- a/drivers/pps/Makefile
+++ b/drivers/pps/Makefile
@@ -6,6 +6,7 @@
pps_core-y := pps.o kapi.o sysfs.o
pps_core-$(CONFIG_NTP_PPS) += kc.o
obj-$(CONFIG_PPS) := pps_core.o
-obj-y += clients/ generators/
+obj-y += clients/
+obj-$(CONFIG_PPS_GENERATOR) += generators/
ccflags-$(CONFIG_PPS_DEBUG) := -DDEBUG
diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c
index 634c3b2f8c26..75c1bae30a7c 100644
--- a/drivers/pps/clients/pps-gpio.c
+++ b/drivers/pps/clients/pps-gpio.c
@@ -52,7 +52,9 @@ static irqreturn_t pps_gpio_irq_handler(int irq, void *data)
info = data;
- rising_edge = gpiod_get_value(info->gpio_pin);
+ /* Small trick to bypass the check on edge's direction when capture_clear is unset */
+ rising_edge = info->capture_clear ?
+ gpiod_get_value(info->gpio_pin) : !info->assert_falling_edge;
if ((rising_edge && !info->assert_falling_edge) ||
(!rising_edge && info->assert_falling_edge))
pps_event(info->pps, &ts, PPS_CAPTUREASSERT, data);
@@ -60,6 +62,8 @@ static irqreturn_t pps_gpio_irq_handler(int irq, void *data)
((rising_edge && info->assert_falling_edge) ||
(!rising_edge && !info->assert_falling_edge)))
pps_event(info->pps, &ts, PPS_CAPTURECLEAR, data);
+ else
+ dev_warn_ratelimited(&info->pps->dev, "IRQ did not trigger any PPS event\n");
return IRQ_HANDLED;
}
@@ -214,8 +218,8 @@ static int pps_gpio_probe(struct platform_device *pdev)
return -EINVAL;
}
- dev_info(data->pps->dev, "Registered IRQ %d as PPS source\n",
- data->irq);
+ dev_dbg(&data->pps->dev, "Registered IRQ %d as PPS source\n",
+ data->irq);
return 0;
}
diff --git a/drivers/pps/clients/pps-ktimer.c b/drivers/pps/clients/pps-ktimer.c
index d33106bd7a29..2f465549b843 100644
--- a/drivers/pps/clients/pps-ktimer.c
+++ b/drivers/pps/clients/pps-ktimer.c
@@ -56,7 +56,7 @@ static struct pps_source_info pps_ktimer_info = {
static void __exit pps_ktimer_exit(void)
{
- dev_info(pps->dev, "ktimer PPS source unregistered\n");
+ dev_dbg(&pps->dev, "ktimer PPS source unregistered\n");
del_timer_sync(&ktimer);
pps_unregister_source(pps);
@@ -74,7 +74,7 @@ static int __init pps_ktimer_init(void)
timer_setup(&ktimer, pps_ktimer_event, 0);
mod_timer(&ktimer, jiffies + HZ);
- dev_info(pps->dev, "ktimer PPS source registered\n");
+ dev_dbg(&pps->dev, "ktimer PPS source registered\n");
return 0;
}
diff --git a/drivers/pps/clients/pps-ldisc.c b/drivers/pps/clients/pps-ldisc.c
index 443d6bae19d1..fa5660f3c4b7 100644
--- a/drivers/pps/clients/pps-ldisc.c
+++ b/drivers/pps/clients/pps-ldisc.c
@@ -32,7 +32,7 @@ static void pps_tty_dcd_change(struct tty_struct *tty, bool active)
pps_event(pps, &ts, active ? PPS_CAPTUREASSERT :
PPS_CAPTURECLEAR, NULL);
- dev_dbg(pps->dev, "PPS %s at %lu\n",
+ dev_dbg(&pps->dev, "PPS %s at %lu\n",
active ? "assert" : "clear", jiffies);
}
@@ -69,7 +69,7 @@ static int pps_tty_open(struct tty_struct *tty)
goto err_unregister;
}
- dev_info(pps->dev, "source \"%s\" added\n", info.path);
+ dev_dbg(&pps->dev, "source \"%s\" added\n", info.path);
return 0;
@@ -89,7 +89,7 @@ static void pps_tty_close(struct tty_struct *tty)
if (WARN_ON(!pps))
return;
- dev_info(pps->dev, "removed\n");
+ dev_info(&pps->dev, "removed\n");
pps_unregister_source(pps);
}
diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c
index abaffb4e1c1c..24db06750297 100644
--- a/drivers/pps/clients/pps_parport.c
+++ b/drivers/pps/clients/pps_parport.c
@@ -81,7 +81,7 @@ static void parport_irq(void *handle)
/* check the signal (no signal means the pulse is lost this time) */
if (!signal_is_set(port)) {
local_irq_restore(flags);
- dev_err(dev->pps->dev, "lost the signal\n");
+ dev_err(&dev->pps->dev, "lost the signal\n");
goto out_assert;
}
@@ -98,7 +98,7 @@ static void parport_irq(void *handle)
/* timeout */
dev->cw_err++;
if (dev->cw_err >= CLEAR_WAIT_MAX_ERRORS) {
- dev_err(dev->pps->dev, "disabled clear edge capture after %d"
+ dev_err(&dev->pps->dev, "disabled clear edge capture after %d"
" timeouts\n", dev->cw_err);
dev->cw = 0;
dev->cw_err = 0;
diff --git a/drivers/pps/generators/Kconfig b/drivers/pps/generators/Kconfig
index d615e640fcad..cd94bf3bfaf2 100644
--- a/drivers/pps/generators/Kconfig
+++ b/drivers/pps/generators/Kconfig
@@ -3,7 +3,25 @@
# PPS generators configuration
#
-comment "PPS generators support"
+menuconfig PPS_GENERATOR
+ tristate "PPS generators support"
+ help
+ PPS generators are special hardware which are able to produce PPS
+ (Pulse Per Second) signals.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pps_gen_core.
+
+if PPS_GENERATOR
+
+config PPS_GENERATOR_DUMMY
+ tristate "Dummy PPS generator (Testing generator, use for debug)"
+ help
+ If you say yes here you get support for a PPS debugging generator
+ (which generates no PPS signal at all).
+
+ This driver can also be built as a module. If so, the module
+ will be called pps_gen-dummy.
config PPS_GENERATOR_PARPORT
tristate "Parallel port PPS signal generator"
@@ -12,3 +30,5 @@ config PPS_GENERATOR_PARPORT
If you say yes here you get support for a PPS signal generator which
utilizes STROBE pin of a parallel port to send PPS signals. It uses
parport abstraction layer and hrtimers to precisely control the signal.
+
+endif # PPS_GENERATOR
diff --git a/drivers/pps/generators/Makefile b/drivers/pps/generators/Makefile
index 2589fd0f2481..dc1aa5a4688b 100644
--- a/drivers/pps/generators/Makefile
+++ b/drivers/pps/generators/Makefile
@@ -3,6 +3,10 @@
# Makefile for PPS generators.
#
+pps_gen_core-y := pps_gen.o sysfs.o
+obj-$(CONFIG_PPS_GENERATOR) := pps_gen_core.o
+
+obj-$(CONFIG_PPS_GENERATOR_DUMMY) += pps_gen-dummy.o
obj-$(CONFIG_PPS_GENERATOR_PARPORT) += pps_gen_parport.o
ccflags-$(CONFIG_PPS_DEBUG) := -DDEBUG
diff --git a/drivers/pps/generators/pps_gen-dummy.c b/drivers/pps/generators/pps_gen-dummy.c
new file mode 100644
index 000000000000..b284c200cbe5
--- /dev/null
+++ b/drivers/pps/generators/pps_gen-dummy.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PPS dummy generator
+ *
+ * Copyright (C) 2024 Rodolfo Giometti <giometti@enneenne.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/random.h>
+#include <linux/pps_gen_kernel.h>
+
+static struct pps_gen_device *pps_gen;
+static struct timer_list ktimer;
+
+static unsigned int get_random_delay(void)
+{
+ unsigned int delay = get_random_u8() & 0x0f;
+
+ return (delay + 1) * HZ;
+}
+
+/*
+ * The kernel timer
+ */
+
+static void pps_gen_ktimer_event(struct timer_list *unused)
+{
+ pps_gen_event(pps_gen, PPS_GEN_EVENT_MISSEDPULSE, NULL);
+}
+
+/*
+ * PPS Generator methods
+ */
+
+static int pps_gen_dummy_get_time(struct pps_gen_device *pps_gen,
+ struct timespec64 *time)
+{
+ struct system_time_snapshot snap;
+
+ ktime_get_snapshot(&snap);
+ *time = ktime_to_timespec64(snap.real);
+
+ return 0;
+}
+
+static int pps_gen_dummy_enable(struct pps_gen_device *pps_gen, bool enable)
+{
+ if (enable)
+ mod_timer(&ktimer, jiffies + get_random_delay());
+ else
+ del_timer_sync(&ktimer);
+
+ return 0;
+}
+
+/*
+ * The PPS info struct
+ */
+
+static struct pps_gen_source_info pps_gen_dummy_info = {
+ .use_system_clock = true,
+ .get_time = pps_gen_dummy_get_time,
+ .enable = pps_gen_dummy_enable,
+};
+
+/*
+ * Module staff
+ */
+
+static void __exit pps_gen_dummy_exit(void)
+{
+ del_timer_sync(&ktimer);
+ pps_gen_unregister_source(pps_gen);
+}
+
+static int __init pps_gen_dummy_init(void)
+{
+ pps_gen = pps_gen_register_source(&pps_gen_dummy_info);
+ if (IS_ERR(pps_gen))
+ return PTR_ERR(pps_gen);
+
+ timer_setup(&ktimer, pps_gen_ktimer_event, 0);
+
+ return 0;
+}
+
+module_init(pps_gen_dummy_init);
+module_exit(pps_gen_dummy_exit);
+
+MODULE_AUTHOR("Rodolfo Giometti <giometti@enneenne.com>");
+MODULE_DESCRIPTION("LinuxPPS dummy generator");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pps/generators/pps_gen.c b/drivers/pps/generators/pps_gen.c
new file mode 100644
index 000000000000..ca592f1736f4
--- /dev/null
+++ b/drivers/pps/generators/pps_gen.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PPS generators core file
+ *
+ * Copyright (C) 2024 Rodolfo Giometti <giometti@enneenne.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/uaccess.h>
+#include <linux/idr.h>
+#include <linux/cdev.h>
+#include <linux/poll.h>
+#include <linux/fs.h>
+#include <linux/pps_gen_kernel.h>
+#include <linux/slab.h>
+
+/*
+ * Local variables
+ */
+
+static dev_t pps_gen_devt;
+static struct class *pps_gen_class;
+
+static DEFINE_IDA(pps_gen_ida);
+
+/*
+ * Char device methods
+ */
+
+static __poll_t pps_gen_cdev_poll(struct file *file, poll_table *wait)
+{
+ struct pps_gen_device *pps_gen = file->private_data;
+
+ poll_wait(file, &pps_gen->queue, wait);
+ return EPOLLIN | EPOLLRDNORM;
+}
+
+static int pps_gen_cdev_fasync(int fd, struct file *file, int on)
+{
+ struct pps_gen_device *pps_gen = file->private_data;
+
+ return fasync_helper(fd, file, on, &pps_gen->async_queue);
+}
+
+static long pps_gen_cdev_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct pps_gen_device *pps_gen = file->private_data;
+ void __user *uarg = (void __user *) arg;
+ unsigned int __user *uiuarg = (unsigned int __user *) arg;
+ unsigned int status;
+ int ret;
+
+ switch (cmd) {
+ case PPS_GEN_SETENABLE:
+ dev_dbg(pps_gen->dev, "PPS_GEN_SETENABLE\n");
+
+ ret = get_user(status, uiuarg);
+ if (ret)
+ return -EFAULT;
+
+ ret = pps_gen->info.enable(pps_gen, status);
+ if (ret)
+ return ret;
+ pps_gen->enabled = status;
+
+ break;
+
+ case PPS_GEN_USESYSTEMCLOCK:
+ dev_dbg(pps_gen->dev, "PPS_GEN_USESYSTEMCLOCK\n");
+
+ ret = put_user(pps_gen->info.use_system_clock, uiuarg);
+ if (ret)
+ return -EFAULT;
+
+ break;
+
+ case PPS_GEN_FETCHEVENT: {
+ struct pps_gen_event info;
+ unsigned int ev = pps_gen->last_ev;
+
+ dev_dbg(pps_gen->dev, "PPS_GEN_FETCHEVENT\n");
+
+ ret = wait_event_interruptible(pps_gen->queue,
+ ev != pps_gen->last_ev);
+ if (ret == -ERESTARTSYS) {
+ dev_dbg(pps_gen->dev, "pending signal caught\n");
+ return -EINTR;
+ }
+
+ spin_lock_irq(&pps_gen->lock);
+ info.sequence = pps_gen->sequence;
+ info.event = pps_gen->event;
+ spin_unlock_irq(&pps_gen->lock);
+
+ ret = copy_to_user(uarg, &info, sizeof(struct pps_gen_event));
+ if (ret)
+ return -EFAULT;
+
+ break;
+ }
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static int pps_gen_cdev_open(struct inode *inode, struct file *file)
+{
+ struct pps_gen_device *pps_gen = container_of(inode->i_cdev,
+ struct pps_gen_device, cdev);
+
+ get_device(pps_gen->dev);
+ file->private_data = pps_gen;
+ return 0;
+}
+
+static int pps_gen_cdev_release(struct inode *inode, struct file *file)
+{
+ struct pps_gen_device *pps_gen = file->private_data;
+
+ put_device(pps_gen->dev);
+ return 0;
+}
+
+/*
+ * Char device stuff
+ */
+
+static const struct file_operations pps_gen_cdev_fops = {
+ .owner = THIS_MODULE,
+ .poll = pps_gen_cdev_poll,
+ .fasync = pps_gen_cdev_fasync,
+ .unlocked_ioctl = pps_gen_cdev_ioctl,
+ .open = pps_gen_cdev_open,
+ .release = pps_gen_cdev_release,
+};
+
+static void pps_gen_device_destruct(struct device *dev)
+{
+ struct pps_gen_device *pps_gen = dev_get_drvdata(dev);
+
+ cdev_del(&pps_gen->cdev);
+
+ pr_debug("deallocating pps-gen%d\n", pps_gen->id);
+ ida_free(&pps_gen_ida, pps_gen->id);
+
+ kfree(dev);
+ kfree(pps_gen);
+}
+
+static int pps_gen_register_cdev(struct pps_gen_device *pps_gen)
+{
+ int err;
+ dev_t devt;
+
+ err = ida_alloc_max(&pps_gen_ida, PPS_GEN_MAX_SOURCES - 1, GFP_KERNEL);
+ if (err < 0) {
+ if (err == -ENOSPC) {
+ pr_err("too many PPS sources in the system\n");
+ err = -EBUSY;
+ }
+ return err;
+ }
+ pps_gen->id = err;
+
+ devt = MKDEV(MAJOR(pps_gen_devt), pps_gen->id);
+
+ cdev_init(&pps_gen->cdev, &pps_gen_cdev_fops);
+ pps_gen->cdev.owner = pps_gen->info.owner;
+
+ err = cdev_add(&pps_gen->cdev, devt, 1);
+ if (err) {
+ pr_err("failed to add char device %d:%d\n",
+ MAJOR(pps_gen_devt), pps_gen->id);
+ goto free_ida;
+ }
+ pps_gen->dev = device_create(pps_gen_class, pps_gen->info.parent, devt,
+ pps_gen, "pps-gen%d", pps_gen->id);
+ if (IS_ERR(pps_gen->dev)) {
+ err = PTR_ERR(pps_gen->dev);
+ goto del_cdev;
+ }
+ pps_gen->dev->release = pps_gen_device_destruct;
+ dev_set_drvdata(pps_gen->dev, pps_gen);
+
+ pr_debug("generator got cdev (%d:%d)\n",
+ MAJOR(pps_gen_devt), pps_gen->id);
+
+ return 0;
+
+del_cdev:
+ cdev_del(&pps_gen->cdev);
+free_ida:
+ ida_free(&pps_gen_ida, pps_gen->id);
+ return err;
+}
+
+static void pps_gen_unregister_cdev(struct pps_gen_device *pps_gen)
+{
+ pr_debug("unregistering pps-gen%d\n", pps_gen->id);
+ device_destroy(pps_gen_class, pps_gen->dev->devt);
+}
+
+/*
+ * Exported functions
+ */
+
+/**
+ * pps_gen_register_source() - add a PPS generator in the system
+ * @info: the PPS generator info struct
+ *
+ * This function is used to register a new PPS generator in the system.
+ * When it returns successfully the new generator is up and running, and
+ * it can be managed by the userspace.
+ *
+ * Return: the PPS generator device in case of success, and ERR_PTR(errno)
+ * otherwise.
+ */
+struct pps_gen_device *pps_gen_register_source(struct pps_gen_source_info *info)
+{
+ struct pps_gen_device *pps_gen;
+ int err;
+
+ pps_gen = kzalloc(sizeof(struct pps_gen_device), GFP_KERNEL);
+ if (pps_gen == NULL) {
+ err = -ENOMEM;
+ goto pps_gen_register_source_exit;
+ }
+ pps_gen->info = *info;
+ pps_gen->enabled = false;
+
+ init_waitqueue_head(&pps_gen->queue);
+ spin_lock_init(&pps_gen->lock);
+
+ /* Create the char device */
+ err = pps_gen_register_cdev(pps_gen);
+ if (err < 0) {
+ pr_err(" unable to create char device\n");
+ goto kfree_pps_gen;
+ }
+
+ return pps_gen;
+
+kfree_pps_gen:
+ kfree(pps_gen);
+
+pps_gen_register_source_exit:
+ pr_err("unable to register generator\n");
+
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(pps_gen_register_source);
+
+/**
+ * pps_gen_unregister_source() - remove a PPS generator from the system
+ * @pps_gen: the PPS generator device to be removed
+ *
+ * This function is used to deregister a PPS generator from the system. When
+ * called, it disables the generator so no pulses are generated anymore.
+ */
+void pps_gen_unregister_source(struct pps_gen_device *pps_gen)
+{
+ pps_gen_unregister_cdev(pps_gen);
+}
+EXPORT_SYMBOL(pps_gen_unregister_source);
+
+/* pps_gen_event - register a PPS generator event into the system
+ * @pps: the PPS generator device
+ * @event: the event type
+ * @data: userdef pointer
+ *
+ * This function is used by each PPS generator in order to register a new
+ * PPS event into the system (it's usually called inside an IRQ handler).
+ */
+void pps_gen_event(struct pps_gen_device *pps_gen,
+ unsigned int event, void *data)
+{
+ unsigned long flags;
+
+ dev_dbg(pps_gen->dev, "PPS generator event %u\n", event);
+
+ spin_lock_irqsave(&pps_gen->lock, flags);
+
+ pps_gen->event = event;
+ pps_gen->sequence++;
+
+ pps_gen->last_ev++;
+ wake_up_interruptible_all(&pps_gen->queue);
+ kill_fasync(&pps_gen->async_queue, SIGIO, POLL_IN);
+
+ spin_unlock_irqrestore(&pps_gen->lock, flags);
+}
+EXPORT_SYMBOL(pps_gen_event);
+
+/*
+ * Module stuff
+ */
+
+static void __exit pps_gen_exit(void)
+{
+ class_destroy(pps_gen_class);
+ unregister_chrdev_region(pps_gen_devt, PPS_GEN_MAX_SOURCES);
+}
+
+static int __init pps_gen_init(void)
+{
+ int err;
+
+ pps_gen_class = class_create("pps-gen");
+ if (IS_ERR(pps_gen_class)) {
+ pr_err("failed to allocate class\n");
+ return PTR_ERR(pps_gen_class);
+ }
+ pps_gen_class->dev_groups = pps_gen_groups;
+
+ err = alloc_chrdev_region(&pps_gen_devt, 0,
+ PPS_GEN_MAX_SOURCES, "pps-gen");
+ if (err < 0) {
+ pr_err("failed to allocate char device region\n");
+ goto remove_class;
+ }
+
+ return 0;
+
+remove_class:
+ class_destroy(pps_gen_class);
+ return err;
+}
+
+subsys_initcall(pps_gen_init);
+module_exit(pps_gen_exit);
+
+MODULE_AUTHOR("Rodolfo Giometti <giometti@enneenne.com>");
+MODULE_DESCRIPTION("LinuxPPS generators support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pps/generators/sysfs.c b/drivers/pps/generators/sysfs.c
new file mode 100644
index 000000000000..faf8b1c6d202
--- /dev/null
+++ b/drivers/pps/generators/sysfs.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PPS generators sysfs support
+ *
+ * Copyright (C) 2024 Rodolfo Giometti <giometti@enneenne.com>
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/pps_gen_kernel.h>
+
+/*
+ * Attribute functions
+ */
+
+static ssize_t system_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pps_gen_device *pps_gen = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", pps_gen->info.use_system_clock);
+}
+static DEVICE_ATTR_RO(system);
+
+static ssize_t time_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pps_gen_device *pps_gen = dev_get_drvdata(dev);
+ struct timespec64 time;
+ int ret;
+
+ ret = pps_gen->info.get_time(pps_gen, &time);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%llu %09lu\n", time.tv_sec, time.tv_nsec);
+}
+static DEVICE_ATTR_RO(time);
+
+static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pps_gen_device *pps_gen = dev_get_drvdata(dev);
+ bool status;
+ int ret;
+
+ ret = kstrtobool(buf, &status);
+ if (ret)
+ return ret;
+
+ ret = pps_gen->info.enable(pps_gen, status);
+ if (ret)
+ return ret;
+ pps_gen->enabled = status;
+
+ return count;
+}
+static DEVICE_ATTR_WO(enable);
+
+static struct attribute *pps_gen_attrs[] = {
+ &dev_attr_enable.attr,
+ &dev_attr_time.attr,
+ &dev_attr_system.attr,
+ NULL,
+};
+
+static const struct attribute_group pps_gen_group = {
+ .attrs = pps_gen_attrs,
+};
+
+const struct attribute_group *pps_gen_groups[] = {
+ &pps_gen_group,
+ NULL,
+};
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
index d9d566f70ed1..92d1b62ea239 100644
--- a/drivers/pps/kapi.c
+++ b/drivers/pps/kapi.c
@@ -41,7 +41,7 @@ static void pps_add_offset(struct pps_ktime *ts, struct pps_ktime *offset)
static void pps_echo_client_default(struct pps_device *pps, int event,
void *data)
{
- dev_info(pps->dev, "echo %s %s\n",
+ dev_info(&pps->dev, "echo %s %s\n",
event & PPS_CAPTUREASSERT ? "assert" : "",
event & PPS_CAPTURECLEAR ? "clear" : "");
}
@@ -112,7 +112,7 @@ struct pps_device *pps_register_source(struct pps_source_info *info,
goto kfree_pps;
}
- dev_info(pps->dev, "new PPS source %s\n", info->name);
+ dev_dbg(&pps->dev, "new PPS source %s\n", info->name);
return pps;
@@ -166,7 +166,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
/* check event type */
BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0);
- dev_dbg(pps->dev, "PPS event at %lld.%09ld\n",
+ dev_dbg(&pps->dev, "PPS event at %lld.%09ld\n",
(s64)ts->ts_real.tv_sec, ts->ts_real.tv_nsec);
timespec_to_pps_ktime(&ts_real, ts->ts_real);
@@ -188,7 +188,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
/* Save the time stamp */
pps->assert_tu = ts_real;
pps->assert_sequence++;
- dev_dbg(pps->dev, "capture assert seq #%u\n",
+ dev_dbg(&pps->dev, "capture assert seq #%u\n",
pps->assert_sequence);
captured = ~0;
@@ -202,7 +202,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
/* Save the time stamp */
pps->clear_tu = ts_real;
pps->clear_sequence++;
- dev_dbg(pps->dev, "capture clear seq #%u\n",
+ dev_dbg(&pps->dev, "capture clear seq #%u\n",
pps->clear_sequence);
captured = ~0;
diff --git a/drivers/pps/kc.c b/drivers/pps/kc.c
index 50dc59af45be..fbd23295afd7 100644
--- a/drivers/pps/kc.c
+++ b/drivers/pps/kc.c
@@ -43,11 +43,11 @@ int pps_kc_bind(struct pps_device *pps, struct pps_bind_args *bind_args)
pps_kc_hardpps_mode = 0;
pps_kc_hardpps_dev = NULL;
spin_unlock_irq(&pps_kc_hardpps_lock);
- dev_info(pps->dev, "unbound kernel"
+ dev_info(&pps->dev, "unbound kernel"
" consumer\n");
} else {
spin_unlock_irq(&pps_kc_hardpps_lock);
- dev_err(pps->dev, "selected kernel consumer"
+ dev_err(&pps->dev, "selected kernel consumer"
" is not bound\n");
return -EINVAL;
}
@@ -57,11 +57,11 @@ int pps_kc_bind(struct pps_device *pps, struct pps_bind_args *bind_args)
pps_kc_hardpps_mode = bind_args->edge;
pps_kc_hardpps_dev = pps;
spin_unlock_irq(&pps_kc_hardpps_lock);
- dev_info(pps->dev, "bound kernel consumer: "
+ dev_info(&pps->dev, "bound kernel consumer: "
"edge=0x%x\n", bind_args->edge);
} else {
spin_unlock_irq(&pps_kc_hardpps_lock);
- dev_err(pps->dev, "another kernel consumer"
+ dev_err(&pps->dev, "another kernel consumer"
" is already bound\n");
return -EINVAL;
}
@@ -83,7 +83,7 @@ void pps_kc_remove(struct pps_device *pps)
pps_kc_hardpps_mode = 0;
pps_kc_hardpps_dev = NULL;
spin_unlock_irq(&pps_kc_hardpps_lock);
- dev_info(pps->dev, "unbound kernel consumer"
+ dev_info(&pps->dev, "unbound kernel consumer"
" on device removal\n");
} else
spin_unlock_irq(&pps_kc_hardpps_lock);
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index 25d47907db17..6a02245ea35f 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -25,7 +25,7 @@
* Local variables
*/
-static dev_t pps_devt;
+static int pps_major;
static struct class *pps_class;
static DEFINE_MUTEX(pps_idr_lock);
@@ -62,7 +62,7 @@ static int pps_cdev_pps_fetch(struct pps_device *pps, struct pps_fdata *fdata)
else {
unsigned long ticks;
- dev_dbg(pps->dev, "timeout %lld.%09d\n",
+ dev_dbg(&pps->dev, "timeout %lld.%09d\n",
(long long) fdata->timeout.sec,
fdata->timeout.nsec);
ticks = fdata->timeout.sec * HZ;
@@ -80,7 +80,7 @@ static int pps_cdev_pps_fetch(struct pps_device *pps, struct pps_fdata *fdata)
/* Check for pending signals */
if (err == -ERESTARTSYS) {
- dev_dbg(pps->dev, "pending signal caught\n");
+ dev_dbg(&pps->dev, "pending signal caught\n");
return -EINTR;
}
@@ -98,7 +98,7 @@ static long pps_cdev_ioctl(struct file *file,
switch (cmd) {
case PPS_GETPARAMS:
- dev_dbg(pps->dev, "PPS_GETPARAMS\n");
+ dev_dbg(&pps->dev, "PPS_GETPARAMS\n");
spin_lock_irq(&pps->lock);
@@ -114,7 +114,7 @@ static long pps_cdev_ioctl(struct file *file,
break;
case PPS_SETPARAMS:
- dev_dbg(pps->dev, "PPS_SETPARAMS\n");
+ dev_dbg(&pps->dev, "PPS_SETPARAMS\n");
/* Check the capabilities */
if (!capable(CAP_SYS_TIME))
@@ -124,14 +124,14 @@ static long pps_cdev_ioctl(struct file *file,
if (err)
return -EFAULT;
if (!(params.mode & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR))) {
- dev_dbg(pps->dev, "capture mode unspecified (%x)\n",
+ dev_dbg(&pps->dev, "capture mode unspecified (%x)\n",
params.mode);
return -EINVAL;
}
/* Check for supported capabilities */
if ((params.mode & ~pps->info.mode) != 0) {
- dev_dbg(pps->dev, "unsupported capabilities (%x)\n",
+ dev_dbg(&pps->dev, "unsupported capabilities (%x)\n",
params.mode);
return -EINVAL;
}
@@ -144,7 +144,7 @@ static long pps_cdev_ioctl(struct file *file,
/* Restore the read only parameters */
if ((params.mode & (PPS_TSFMT_TSPEC | PPS_TSFMT_NTPFP)) == 0) {
/* section 3.3 of RFC 2783 interpreted */
- dev_dbg(pps->dev, "time format unspecified (%x)\n",
+ dev_dbg(&pps->dev, "time format unspecified (%x)\n",
params.mode);
pps->params.mode |= PPS_TSFMT_TSPEC;
}
@@ -165,7 +165,7 @@ static long pps_cdev_ioctl(struct file *file,
break;
case PPS_GETCAP:
- dev_dbg(pps->dev, "PPS_GETCAP\n");
+ dev_dbg(&pps->dev, "PPS_GETCAP\n");
err = put_user(pps->info.mode, iuarg);
if (err)
@@ -176,7 +176,7 @@ static long pps_cdev_ioctl(struct file *file,
case PPS_FETCH: {
struct pps_fdata fdata;
- dev_dbg(pps->dev, "PPS_FETCH\n");
+ dev_dbg(&pps->dev, "PPS_FETCH\n");
err = copy_from_user(&fdata, uarg, sizeof(struct pps_fdata));
if (err)
@@ -206,7 +206,7 @@ static long pps_cdev_ioctl(struct file *file,
case PPS_KC_BIND: {
struct pps_bind_args bind_args;
- dev_dbg(pps->dev, "PPS_KC_BIND\n");
+ dev_dbg(&pps->dev, "PPS_KC_BIND\n");
/* Check the capabilities */
if (!capable(CAP_SYS_TIME))
@@ -218,7 +218,7 @@ static long pps_cdev_ioctl(struct file *file,
/* Check for supported capabilities */
if ((bind_args.edge & ~pps->info.mode) != 0) {
- dev_err(pps->dev, "unsupported capabilities (%x)\n",
+ dev_err(&pps->dev, "unsupported capabilities (%x)\n",
bind_args.edge);
return -EINVAL;
}
@@ -227,7 +227,7 @@ static long pps_cdev_ioctl(struct file *file,
if (bind_args.tsformat != PPS_TSFMT_TSPEC ||
(bind_args.edge & ~PPS_CAPTUREBOTH) != 0 ||
bind_args.consumer != PPS_KC_HARDPPS) {
- dev_err(pps->dev, "invalid kernel consumer bind"
+ dev_err(&pps->dev, "invalid kernel consumer bind"
" parameters (%x)\n", bind_args.edge);
return -EINVAL;
}
@@ -259,7 +259,7 @@ static long pps_cdev_compat_ioctl(struct file *file,
struct pps_fdata fdata;
int err;
- dev_dbg(pps->dev, "PPS_FETCH\n");
+ dev_dbg(&pps->dev, "PPS_FETCH\n");
err = copy_from_user(&compat, uarg, sizeof(struct pps_fdata_compat));
if (err)
@@ -296,20 +296,36 @@ static long pps_cdev_compat_ioctl(struct file *file,
#define pps_cdev_compat_ioctl NULL
#endif
+static struct pps_device *pps_idr_get(unsigned long id)
+{
+ struct pps_device *pps;
+
+ mutex_lock(&pps_idr_lock);
+ pps = idr_find(&pps_idr, id);
+ if (pps)
+ get_device(&pps->dev);
+
+ mutex_unlock(&pps_idr_lock);
+ return pps;
+}
+
static int pps_cdev_open(struct inode *inode, struct file *file)
{
- struct pps_device *pps = container_of(inode->i_cdev,
- struct pps_device, cdev);
+ struct pps_device *pps = pps_idr_get(iminor(inode));
+
+ if (!pps)
+ return -ENODEV;
+
file->private_data = pps;
- kobject_get(&pps->dev->kobj);
return 0;
}
static int pps_cdev_release(struct inode *inode, struct file *file)
{
- struct pps_device *pps = container_of(inode->i_cdev,
- struct pps_device, cdev);
- kobject_put(&pps->dev->kobj);
+ struct pps_device *pps = file->private_data;
+
+ WARN_ON(pps->id != iminor(inode));
+ put_device(&pps->dev);
return 0;
}
@@ -331,22 +347,13 @@ static void pps_device_destruct(struct device *dev)
{
struct pps_device *pps = dev_get_drvdata(dev);
- cdev_del(&pps->cdev);
-
- /* Now we can release the ID for re-use */
pr_debug("deallocating pps%d\n", pps->id);
- mutex_lock(&pps_idr_lock);
- idr_remove(&pps_idr, pps->id);
- mutex_unlock(&pps_idr_lock);
-
- kfree(dev);
kfree(pps);
}
int pps_register_cdev(struct pps_device *pps)
{
int err;
- dev_t devt;
mutex_lock(&pps_idr_lock);
/*
@@ -363,40 +370,29 @@ int pps_register_cdev(struct pps_device *pps)
goto out_unlock;
}
pps->id = err;
- mutex_unlock(&pps_idr_lock);
-
- devt = MKDEV(MAJOR(pps_devt), pps->id);
-
- cdev_init(&pps->cdev, &pps_cdev_fops);
- pps->cdev.owner = pps->info.owner;
- err = cdev_add(&pps->cdev, devt, 1);
- if (err) {
- pr_err("%s: failed to add char device %d:%d\n",
- pps->info.name, MAJOR(pps_devt), pps->id);
+ pps->dev.class = pps_class;
+ pps->dev.parent = pps->info.dev;
+ pps->dev.devt = MKDEV(pps_major, pps->id);
+ dev_set_drvdata(&pps->dev, pps);
+ dev_set_name(&pps->dev, "pps%d", pps->id);
+ err = device_register(&pps->dev);
+ if (err)
goto free_idr;
- }
- pps->dev = device_create(pps_class, pps->info.dev, devt, pps,
- "pps%d", pps->id);
- if (IS_ERR(pps->dev)) {
- err = PTR_ERR(pps->dev);
- goto del_cdev;
- }
/* Override the release function with our own */
- pps->dev->release = pps_device_destruct;
+ pps->dev.release = pps_device_destruct;
- pr_debug("source %s got cdev (%d:%d)\n", pps->info.name,
- MAJOR(pps_devt), pps->id);
+ pr_debug("source %s got cdev (%d:%d)\n", pps->info.name, pps_major,
+ pps->id);
+ get_device(&pps->dev);
+ mutex_unlock(&pps_idr_lock);
return 0;
-del_cdev:
- cdev_del(&pps->cdev);
-
free_idr:
- mutex_lock(&pps_idr_lock);
idr_remove(&pps_idr, pps->id);
+ put_device(&pps->dev);
out_unlock:
mutex_unlock(&pps_idr_lock);
return err;
@@ -406,7 +402,13 @@ void pps_unregister_cdev(struct pps_device *pps)
{
pr_debug("unregistering pps%d\n", pps->id);
pps->lookup_cookie = NULL;
- device_destroy(pps_class, pps->dev->devt);
+ device_destroy(pps_class, pps->dev.devt);
+
+ /* Now we can release the ID for re-use */
+ mutex_lock(&pps_idr_lock);
+ idr_remove(&pps_idr, pps->id);
+ put_device(&pps->dev);
+ mutex_unlock(&pps_idr_lock);
}
/*
@@ -426,6 +428,11 @@ void pps_unregister_cdev(struct pps_device *pps)
* so that it will not be used again, even if the pps device cannot
* be removed from the idr due to pending references holding the minor
* number in use.
+ *
+ * Since pps_idr holds a reference to the device, the returned
+ * pps_device is guaranteed to be valid until pps_unregister_cdev() is
+ * called on it. But after calling pps_unregister_cdev(), it may be
+ * freed at any time.
*/
struct pps_device *pps_lookup_dev(void const *cookie)
{
@@ -448,13 +455,11 @@ EXPORT_SYMBOL(pps_lookup_dev);
static void __exit pps_exit(void)
{
class_destroy(pps_class);
- unregister_chrdev_region(pps_devt, PPS_MAX_SOURCES);
+ __unregister_chrdev(pps_major, 0, PPS_MAX_SOURCES, "pps");
}
static int __init pps_init(void)
{
- int err;
-
pps_class = class_create("pps");
if (IS_ERR(pps_class)) {
pr_err("failed to allocate class\n");
@@ -462,8 +467,9 @@ static int __init pps_init(void)
}
pps_class->dev_groups = pps_groups;
- err = alloc_chrdev_region(&pps_devt, 0, PPS_MAX_SOURCES, "pps");
- if (err < 0) {
+ pps_major = __register_chrdev(0, 0, PPS_MAX_SOURCES, "pps",
+ &pps_cdev_fops);
+ if (pps_major < 0) {
pr_err("failed to allocate char device region\n");
goto remove_class;
}
@@ -476,8 +482,7 @@ static int __init pps_init(void)
remove_class:
class_destroy(pps_class);
-
- return err;
+ return pps_major;
}
subsys_initcall(pps_init);
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index ea96a14d72d1..bf6468c56419 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2010 OMICRON electronics GmbH
*/
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/posix-clock.h>
#include <linux/poll.h>
@@ -176,6 +177,9 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
struct timespec64 ts;
int enable, err = 0;
+ if (in_compat_syscall() && cmd != PTP_ENABLE_PPS && cmd != PTP_ENABLE_PPS2)
+ arg = (unsigned long)compat_ptr(arg);
+
tsevq = pccontext->private_clkdata;
switch (cmd) {
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index b932425ddc6a..35a5994bf64f 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -217,6 +217,11 @@ static int ptp_getcycles64(struct ptp_clock_info *info, struct timespec64 *ts)
return info->gettime64(info, ts);
}
+static int ptp_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *request, int on)
+{
+ return -EOPNOTSUPP;
+}
+
static void ptp_aux_kworker(struct kthread_work *work)
{
struct ptp_clock *ptp = container_of(work, struct ptp_clock,
@@ -294,6 +299,9 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
ptp->info->getcrosscycles = ptp->info->getcrosststamp;
}
+ if (!ptp->info->enable)
+ ptp->info->enable = ptp_enable;
+
if (ptp->info->do_aux_work) {
kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
ptp->kworker = kthread_run_worker(0, "ptp%d", ptp->index);
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 7f08c70d8123..b651087f426f 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -4420,7 +4420,7 @@ ptp_ocp_complete(struct ptp_ocp *bp)
pps = pps_lookup_dev(bp->ptp);
if (pps)
- ptp_ocp_symlink(bp, pps->dev, "pps");
+ ptp_ocp_symlink(bp, &pps->dev, "pps");
ptp_ocp_debugfs_add_device(bp);
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 675b252d9c8c..ccd54c089bab 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -242,6 +242,9 @@ int pwm_round_waveform_might_sleep(struct pwm_device *pwm, struct pwm_waveform *
BUG_ON(WFHWSIZE < ops->sizeof_wfhw);
+ if (!pwmchip_supports_waveform(chip))
+ return -EOPNOTSUPP;
+
if (!pwm_wf_valid(wf))
return -EINVAL;
@@ -294,6 +297,9 @@ int pwm_get_waveform_might_sleep(struct pwm_device *pwm, struct pwm_waveform *wf
BUG_ON(WFHWSIZE < ops->sizeof_wfhw);
+ if (!pwmchip_supports_waveform(chip) || !ops->read_waveform)
+ return -EOPNOTSUPP;
+
guard(pwmchip)(chip);
if (!chip->operational)
@@ -320,6 +326,9 @@ static int __pwm_set_waveform(struct pwm_device *pwm,
BUG_ON(WFHWSIZE < ops->sizeof_wfhw);
+ if (!pwmchip_supports_waveform(chip))
+ return -EOPNOTSUPP;
+
if (!pwm_wf_valid(wf))
return -EINVAL;
@@ -592,7 +601,7 @@ static int __pwm_apply(struct pwm_device *pwm, const struct pwm_state *state)
state->usage_power == pwm->state.usage_power)
return 0;
- if (ops->write_waveform) {
+ if (pwmchip_supports_waveform(chip)) {
struct pwm_waveform wf;
char wfhw[WFHWSIZE];
@@ -746,7 +755,7 @@ int pwm_get_state_hw(struct pwm_device *pwm, struct pwm_state *state)
if (!chip->operational)
return -ENODEV;
- if (ops->read_waveform) {
+ if (pwmchip_supports_waveform(chip) && ops->read_waveform) {
char wfhw[WFHWSIZE];
struct pwm_waveform wf;
@@ -1276,7 +1285,7 @@ static int pwm_export_child(struct device *pwmchip_dev, struct pwm_device *pwm)
return 0;
}
-static int pwm_unexport_match(struct device *pwm_dev, void *data)
+static int pwm_unexport_match(struct device *pwm_dev, const void *data)
{
return pwm_from_dev(pwm_dev) == data;
}
diff --git a/drivers/pwm/pwm-microchip-core.c b/drivers/pwm/pwm-microchip-core.c
index c1f2287b8e97..12821b4bbf97 100644
--- a/drivers/pwm/pwm-microchip-core.c
+++ b/drivers/pwm/pwm-microchip-core.c
@@ -327,7 +327,7 @@ static int mchp_core_pwm_apply_locked(struct pwm_chip *chip, struct pwm_device *
* mchp_core_pwm_calc_period().
* The period is locked and we cannot change this, so we abort.
*/
- if (hw_period_steps == MCHPCOREPWM_PERIOD_STEPS_MAX)
+ if (hw_period_steps > MCHPCOREPWM_PERIOD_STEPS_MAX)
return -EINVAL;
prescale = hw_prescale;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 6c0ef1182248..89578b91c468 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -5033,7 +5033,7 @@ int _regulator_bulk_get(struct device *dev, int num_consumers,
consumers[i].supply, get_type);
if (IS_ERR(consumers[i].consumer)) {
ret = dev_err_probe(dev, PTR_ERR(consumers[i].consumer),
- "Failed to get supply '%s'",
+ "Failed to get supply '%s'\n",
consumers[i].supply);
consumers[i].consumer = NULL;
goto err;
diff --git a/drivers/regulator/tps6287x-regulator.c b/drivers/regulator/tps6287x-regulator.c
index 97f5ce138548..c0f5f0a186a3 100644
--- a/drivers/regulator/tps6287x-regulator.c
+++ b/drivers/regulator/tps6287x-regulator.c
@@ -44,10 +44,35 @@ static const unsigned int tps6287x_voltage_range_sel[] = {
0x0, 0x1, 0x2, 0x3
};
+static const unsigned int tps6287x_voltage_range_prefix[] = {
+ 0x000, 0x100, 0x200, 0x300
+};
+
static const unsigned int tps6287x_ramp_table[] = {
10000, 5000, 1250, 500
};
+struct tps6287x_reg_data {
+ int range;
+};
+
+static int tps6287x_best_range(struct regulator_config *config, const struct regulator_desc *desc)
+{
+ const struct linear_range *r;
+ int i;
+
+ if (!config->init_data->constraints.apply_uV)
+ return -1;
+
+ for (i = 0; i < desc->n_linear_ranges; i++) {
+ r = &desc->linear_ranges[i];
+ if (r->min <= config->init_data->constraints.min_uV &&
+ config->init_data->constraints.max_uV <= linear_range_get_max_value(r))
+ return i;
+ }
+ return -1;
+}
+
static int tps6287x_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
unsigned int val;
@@ -91,6 +116,28 @@ static unsigned int tps6287x_of_map_mode(unsigned int mode)
}
}
+static int tps6287x_map_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
+{
+ struct tps6287x_reg_data *data = (struct tps6287x_reg_data *)rdev->reg_data;
+ struct linear_range selected_range;
+ int selector, voltage;
+
+ if (!data || data->range == -1)
+ return regulator_map_voltage_pickable_linear_range(rdev, min_uV, max_uV);
+
+ selected_range = rdev->desc->linear_ranges[data->range];
+ selector = DIV_ROUND_UP(min_uV - selected_range.min, selected_range.step);
+ if (selector < selected_range.min_sel || selector > selected_range.max_sel)
+ return -EINVAL;
+
+ selector |= tps6287x_voltage_range_prefix[data->range];
+ voltage = rdev->desc->ops->list_voltage(rdev, selector);
+ if (voltage < min_uV || voltage > max_uV)
+ return -EINVAL;
+
+ return selector;
+}
+
static const struct regulator_ops tps6287x_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -100,6 +147,7 @@ static const struct regulator_ops tps6287x_regulator_ops = {
.get_voltage_sel = regulator_get_voltage_sel_pickable_regmap,
.set_voltage_sel = regulator_set_voltage_sel_pickable_regmap,
.list_voltage = regulator_list_voltage_pickable_linear_range,
+ .map_voltage = tps6287x_map_voltage,
.set_ramp_delay = regulator_set_ramp_delay_regmap,
};
@@ -130,8 +178,14 @@ static int tps6287x_i2c_probe(struct i2c_client *i2c)
{
struct device *dev = &i2c->dev;
struct regulator_config config = {};
+ struct tps6287x_reg_data *reg_data;
struct regulator_dev *rdev;
+ reg_data = devm_kzalloc(dev, sizeof(struct tps6287x_reg_data), GFP_KERNEL);
+
+ if (!reg_data)
+ return -ENOMEM;
+
config.regmap = devm_regmap_init_i2c(i2c, &tps6287x_regmap_config);
if (IS_ERR(config.regmap)) {
dev_err(dev, "Failed to init i2c\n");
@@ -143,12 +197,15 @@ static int tps6287x_i2c_probe(struct i2c_client *i2c)
config.init_data = of_get_regulator_init_data(dev, dev->of_node,
&tps6287x_reg);
+ reg_data->range = tps6287x_best_range(&config, &tps6287x_reg);
+
rdev = devm_regulator_register(dev, &tps6287x_reg, &config);
if (IS_ERR(rdev)) {
dev_err(dev, "Failed to register regulator\n");
return PTR_ERR(rdev);
}
+ rdev->reg_data = (void *)reg_data;
dev_dbg(dev, "Probed regulator\n");
return 0;
diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c
index 6e54093d1732..7b41b4547fa8 100644
--- a/drivers/remoteproc/keystone_remoteproc.c
+++ b/drivers/remoteproc/keystone_remoteproc.c
@@ -335,25 +335,16 @@ static int keystone_rproc_of_get_dev_syscon(struct platform_device *pdev,
{
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
- int ret;
if (!of_property_read_bool(np, "ti,syscon-dev")) {
dev_err(dev, "ti,syscon-dev property is absent\n");
return -EINVAL;
}
- ksproc->dev_ctrl =
- syscon_regmap_lookup_by_phandle(np, "ti,syscon-dev");
- if (IS_ERR(ksproc->dev_ctrl)) {
- ret = PTR_ERR(ksproc->dev_ctrl);
- return ret;
- }
-
- if (of_property_read_u32_index(np, "ti,syscon-dev", 1,
- &ksproc->boot_offset)) {
- dev_err(dev, "couldn't read the boot register offset\n");
- return -EINVAL;
- }
+ ksproc->dev_ctrl = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-dev",
+ 1, &ksproc->boot_offset);
+ if (IS_ERR(ksproc->dev_ctrl))
+ return PTR_ERR(ksproc->dev_ctrl);
return 0;
}
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
index 0f4a7065d0bd..8206a1766481 100644
--- a/drivers/remoteproc/mtk_scp.c
+++ b/drivers/remoteproc/mtk_scp.c
@@ -1326,6 +1326,11 @@ static int scp_cluster_init(struct platform_device *pdev, struct mtk_scp_of_clus
return ret;
}
+static const struct of_device_id scp_core_match[] = {
+ { .compatible = "mediatek,scp-core" },
+ {}
+};
+
static int scp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1357,13 +1362,15 @@ static int scp_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&scp_cluster->mtk_scp_list);
mutex_init(&scp_cluster->cluster_lock);
- ret = devm_of_platform_populate(dev);
+ ret = of_platform_populate(dev_of_node(dev), scp_core_match, NULL, dev);
if (ret)
return dev_err_probe(dev, ret, "Failed to populate platform devices\n");
ret = scp_cluster_init(pdev, scp_cluster);
- if (ret)
+ if (ret) {
+ of_platform_depopulate(dev);
return ret;
+ }
return 0;
}
@@ -1379,6 +1386,7 @@ static void scp_remove(struct platform_device *pdev)
rproc_del(scp->rproc);
scp_free(scp);
}
+ of_platform_depopulate(&pdev->dev);
mutex_destroy(&scp_cluster->cluster_lock);
}
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 9ae2e831456d..5f463937cbbf 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -37,6 +37,10 @@
#include <linux/platform_data/dmtimer-omap.h>
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+#include <asm/dma-iommu.h>
+#endif
+
#include "omap_remoteproc.h"
#include "remoteproc_internal.h"
@@ -1133,7 +1137,6 @@ static int omap_rproc_get_boot_data(struct platform_device *pdev,
struct device_node *np = pdev->dev.of_node;
struct omap_rproc *oproc = rproc->priv;
const struct omap_rproc_dev_data *data;
- int ret;
data = of_device_get_match_data(&pdev->dev);
if (!data)
@@ -1149,10 +1152,8 @@ static int omap_rproc_get_boot_data(struct platform_device *pdev,
oproc->boot_data->syscon =
syscon_regmap_lookup_by_phandle(np, "ti,bootreg");
- if (IS_ERR(oproc->boot_data->syscon)) {
- ret = PTR_ERR(oproc->boot_data->syscon);
- return ret;
- }
+ if (IS_ERR(oproc->boot_data->syscon))
+ return PTR_ERR(oproc->boot_data->syscon);
if (of_property_read_u32_index(np, "ti,bootreg", 1,
&oproc->boot_data->boot_reg)) {
@@ -1323,6 +1324,19 @@ static int omap_rproc_probe(struct platform_device *pdev)
/* All existing OMAP IPU and DSP processors have an MMU */
rproc->has_iommu = true;
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+ /*
+ * Throw away the ARM DMA mapping that we'll never use, so it doesn't
+ * interfere with the core rproc->domain and we get the right DMA ops.
+ */
+ if (pdev->dev.archdata.mapping) {
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(&pdev->dev);
+
+ arm_iommu_detach_device(&pdev->dev);
+ arm_iommu_release_mapping(mapping);
+ }
+#endif
+
ret = omap_rproc_of_get_internal_memories(pdev, rproc);
if (ret)
return ret;
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index eb66f78ec8b7..c2cf0d277729 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -2486,6 +2486,13 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
rproc->dev.driver_data = rproc;
idr_init(&rproc->notifyids);
+ /* Assign a unique device index and name */
+ rproc->index = ida_alloc(&rproc_dev_index, GFP_KERNEL);
+ if (rproc->index < 0) {
+ dev_err(dev, "ida_alloc failed: %d\n", rproc->index);
+ goto put_device;
+ }
+
rproc->name = kstrdup_const(name, GFP_KERNEL);
if (!rproc->name)
goto put_device;
@@ -2496,13 +2503,6 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
if (rproc_alloc_ops(rproc, ops))
goto put_device;
- /* Assign a unique device index and name */
- rproc->index = ida_alloc(&rproc_dev_index, GFP_KERNEL);
- if (rproc->index < 0) {
- dev_err(dev, "ida_alloc failed: %d\n", rproc->index);
- goto put_device;
- }
-
dev_set_name(&rproc->dev, "remoteproc%d", rproc->index);
atomic_set(&rproc->power, 0);
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
index 5df99bae7131..e6566a9839dc 100644
--- a/drivers/remoteproc/st_remoteproc.c
+++ b/drivers/remoteproc/st_remoteproc.c
@@ -290,26 +290,23 @@ static int st_rproc_parse_dt(struct platform_device *pdev)
if (ddata->config->sw_reset) {
ddata->sw_reset = devm_reset_control_get_exclusive(dev,
"sw_reset");
- if (IS_ERR(ddata->sw_reset)) {
- dev_err(dev, "Failed to get S/W Reset\n");
- return PTR_ERR(ddata->sw_reset);
- }
+ if (IS_ERR(ddata->sw_reset))
+ return dev_err_probe(dev, PTR_ERR(ddata->sw_reset),
+ "Failed to get S/W Reset\n");
}
if (ddata->config->pwr_reset) {
ddata->pwr_reset = devm_reset_control_get_exclusive(dev,
"pwr_reset");
- if (IS_ERR(ddata->pwr_reset)) {
- dev_err(dev, "Failed to get Power Reset\n");
- return PTR_ERR(ddata->pwr_reset);
- }
+ if (IS_ERR(ddata->pwr_reset))
+ return dev_err_probe(dev, PTR_ERR(ddata->pwr_reset),
+ "Failed to get Power Reset\n");
}
ddata->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(ddata->clk)) {
- dev_err(dev, "Failed to get clock\n");
- return PTR_ERR(ddata->clk);
- }
+ if (IS_ERR(ddata->clk))
+ return dev_err_probe(dev, PTR_ERR(ddata->clk),
+ "Failed to get clock\n");
err = of_property_read_u32(np, "clock-frequency", &ddata->clk_rate);
if (err) {
@@ -317,18 +314,11 @@ static int st_rproc_parse_dt(struct platform_device *pdev)
return err;
}
- ddata->boot_base = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
- if (IS_ERR(ddata->boot_base)) {
- dev_err(dev, "Boot base not found\n");
- return PTR_ERR(ddata->boot_base);
- }
-
- err = of_property_read_u32_index(np, "st,syscfg", 1,
- &ddata->boot_offset);
- if (err) {
- dev_err(dev, "Boot offset not found\n");
- return -EINVAL;
- }
+ ddata->boot_base = syscon_regmap_lookup_by_phandle_args(np, "st,syscfg",
+ 1, &ddata->boot_offset);
+ if (IS_ERR(ddata->boot_base))
+ return dev_err_probe(dev, PTR_ERR(ddata->boot_base),
+ "Boot base not found\n");
err = clk_prepare(ddata->clk);
if (err)
@@ -395,32 +385,32 @@ static int st_rproc_probe(struct platform_device *pdev)
*/
chan = mbox_request_channel_byname(&ddata->mbox_client_vq0, "vq0_rx");
if (IS_ERR(chan)) {
- dev_err(&rproc->dev, "failed to request mbox chan 0\n");
- ret = PTR_ERR(chan);
+ ret = dev_err_probe(&rproc->dev, PTR_ERR(chan),
+ "failed to request mbox chan 0\n");
goto free_clk;
}
ddata->mbox_chan[ST_RPROC_VQ0 * MBOX_MAX + MBOX_RX] = chan;
chan = mbox_request_channel_byname(&ddata->mbox_client_vq0, "vq0_tx");
if (IS_ERR(chan)) {
- dev_err(&rproc->dev, "failed to request mbox chan 0\n");
- ret = PTR_ERR(chan);
+ ret = dev_err_probe(&rproc->dev, PTR_ERR(chan),
+ "failed to request mbox chan 0\n");
goto free_mbox;
}
ddata->mbox_chan[ST_RPROC_VQ0 * MBOX_MAX + MBOX_TX] = chan;
chan = mbox_request_channel_byname(&ddata->mbox_client_vq1, "vq1_rx");
if (IS_ERR(chan)) {
- dev_err(&rproc->dev, "failed to request mbox chan 1\n");
- ret = PTR_ERR(chan);
+ ret = dev_err_probe(&rproc->dev, PTR_ERR(chan),
+ "failed to request mbox chan 1\n");
goto free_mbox;
}
ddata->mbox_chan[ST_RPROC_VQ1 * MBOX_MAX + MBOX_RX] = chan;
chan = mbox_request_channel_byname(&ddata->mbox_client_vq1, "vq1_tx");
if (IS_ERR(chan)) {
- dev_err(&rproc->dev, "failed to request mbox chan 1\n");
- ret = PTR_ERR(chan);
+ ret = dev_err_probe(&rproc->dev, PTR_ERR(chan),
+ "failed to request mbox chan 1\n");
goto free_mbox;
}
ddata->mbox_chan[ST_RPROC_VQ1 * MBOX_MAX + MBOX_TX] = chan;
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index 6560b7954027..dbc513c5569c 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -955,6 +955,13 @@ out:
return ret;
}
+static void k3_r5_mem_release(void *data)
+{
+ struct device *dev = data;
+
+ of_reserved_mem_device_release(dev);
+}
+
static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
{
struct device *dev = kproc->dev;
@@ -985,27 +992,25 @@ static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
return ret;
}
+ ret = devm_add_action_or_reset(dev, k3_r5_mem_release, dev);
+ if (ret)
+ return ret;
+
num_rmems--;
- kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
- if (!kproc->rmem) {
- ret = -ENOMEM;
- goto release_rmem;
- }
+ kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
+ if (!kproc->rmem)
+ return -ENOMEM;
/* use remaining reserved memory regions for static carveouts */
for (i = 0; i < num_rmems; i++) {
rmem_np = of_parse_phandle(np, "memory-region", i + 1);
- if (!rmem_np) {
- ret = -EINVAL;
- goto unmap_rmem;
- }
+ if (!rmem_np)
+ return -EINVAL;
rmem = of_reserved_mem_lookup(rmem_np);
of_node_put(rmem_np);
- if (!rmem) {
- ret = -EINVAL;
- goto unmap_rmem;
- }
+ if (!rmem)
+ return -EINVAL;
kproc->rmem[i].bus_addr = rmem->base;
/*
@@ -1020,12 +1025,11 @@ static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
*/
kproc->rmem[i].dev_addr = (u32)rmem->base;
kproc->rmem[i].size = rmem->size;
- kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
+ kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
if (!kproc->rmem[i].cpu_addr) {
dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
i + 1, &rmem->base, &rmem->size);
- ret = -ENOMEM;
- goto unmap_rmem;
+ return -ENOMEM;
}
dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
@@ -1036,25 +1040,6 @@ static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
kproc->num_rmems = num_rmems;
return 0;
-
-unmap_rmem:
- for (i--; i >= 0; i--)
- iounmap(kproc->rmem[i].cpu_addr);
- kfree(kproc->rmem);
-release_rmem:
- of_reserved_mem_device_release(dev);
- return ret;
-}
-
-static void k3_r5_reserved_mem_exit(struct k3_r5_rproc *kproc)
-{
- int i;
-
- for (i = 0; i < kproc->num_rmems; i++)
- iounmap(kproc->rmem[i].cpu_addr);
- kfree(kproc->rmem);
-
- of_reserved_mem_device_release(kproc->dev);
}
/*
@@ -1281,10 +1266,10 @@ init_rmem:
goto out;
}
- ret = rproc_add(rproc);
+ ret = devm_rproc_add(dev, rproc);
if (ret) {
- dev_err(dev, "rproc_add failed, ret = %d\n", ret);
- goto err_add;
+ dev_err_probe(dev, ret, "rproc_add failed\n");
+ goto out;
}
/* create only one rproc in lockstep, single-cpu or
@@ -1312,7 +1297,7 @@ init_rmem:
dev_err(dev,
"Timed out waiting for %s core to power up!\n",
rproc->name);
- goto err_powerup;
+ goto out;
}
}
@@ -1328,10 +1313,6 @@ err_split:
}
}
-err_powerup:
- rproc_del(rproc);
-err_add:
- k3_r5_reserved_mem_exit(kproc);
out:
/* undo core0 upon any failures on core1 in split-mode */
if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) {
@@ -1374,10 +1355,6 @@ static void k3_r5_cluster_rproc_exit(void *data)
}
mbox_free_channel(kproc->mbox);
-
- rproc_del(rproc);
-
- k3_r5_reserved_mem_exit(kproc);
}
}
@@ -1510,6 +1487,13 @@ static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
return 0;
}
+static void k3_r5_release_tsp(void *data)
+{
+ struct ti_sci_proc *tsp = data;
+
+ ti_sci_proc_release(tsp);
+}
+
static int k3_r5_core_of_init(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1603,6 +1587,10 @@ static int k3_r5_core_of_init(struct platform_device *pdev)
goto err;
}
+ ret = devm_add_action_or_reset(dev, k3_r5_release_tsp, core->tsp);
+ if (ret)
+ goto err;
+
platform_set_drvdata(pdev, core);
devres_close_group(dev, k3_r5_core_of_init);
@@ -1619,13 +1607,7 @@ err:
*/
static void k3_r5_core_of_exit(struct platform_device *pdev)
{
- struct k3_r5_core *core = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
- int ret;
-
- ret = ti_sci_proc_release(core->tsp);
- if (ret)
- dev_err(dev, "failed to release proc, ret = %d\n", ret);
platform_set_drvdata(pdev, NULL);
devres_release_group(dev, k3_r5_core_of_init);
diff --git a/drivers/reset/amlogic/reset-meson-aux.c b/drivers/reset/amlogic/reset-meson-aux.c
index 61ce515d92a2..33c06013439e 100644
--- a/drivers/reset/amlogic/reset-meson-aux.c
+++ b/drivers/reset/amlogic/reset-meson-aux.c
@@ -11,20 +11,20 @@
#include <linux/auxiliary_bus.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
-#include <linux/slab.h>
#include "reset-meson.h"
-#include <soc/amlogic/reset-meson-aux.h>
-static DEFINE_IDA(meson_rst_aux_ida);
-
-struct meson_reset_adev {
- struct auxiliary_device adev;
- struct regmap *map;
+static const struct meson_reset_param meson_a1_audio_param = {
+ .reset_ops = &meson_reset_toggle_ops,
+ .reset_num = 32,
+ .level_offset = 0x28,
};
-#define to_meson_reset_adev(_adev) \
- container_of((_adev), struct meson_reset_adev, adev)
+static const struct meson_reset_param meson_a1_audio_vad_param = {
+ .reset_ops = &meson_reset_toggle_ops,
+ .reset_num = 6,
+ .level_offset = 0x8,
+};
static const struct meson_reset_param meson_g12a_audio_param = {
.reset_ops = &meson_reset_toggle_ops,
@@ -40,6 +40,12 @@ static const struct meson_reset_param meson_sm1_audio_param = {
static const struct auxiliary_device_id meson_reset_aux_ids[] = {
{
+ .name = "a1-audio-clkc.rst-a1",
+ .driver_data = (kernel_ulong_t)&meson_a1_audio_param,
+ }, {
+ .name = "a1-audio-clkc.rst-a1-vad",
+ .driver_data = (kernel_ulong_t)&meson_a1_audio_vad_param,
+ }, {
.name = "axg-audio-clkc.rst-g12a",
.driver_data = (kernel_ulong_t)&meson_g12a_audio_param,
}, {
@@ -54,10 +60,13 @@ static int meson_reset_aux_probe(struct auxiliary_device *adev,
{
const struct meson_reset_param *param =
(const struct meson_reset_param *)(id->driver_data);
- struct meson_reset_adev *raux =
- to_meson_reset_adev(adev);
+ struct regmap *map;
+
+ map = dev_get_regmap(adev->dev.parent, NULL);
+ if (!map)
+ return -EINVAL;
- return meson_reset_controller_register(&adev->dev, raux->map, param);
+ return meson_reset_controller_register(&adev->dev, map, param);
}
static struct auxiliary_driver meson_reset_aux_driver = {
@@ -66,70 +75,6 @@ static struct auxiliary_driver meson_reset_aux_driver = {
};
module_auxiliary_driver(meson_reset_aux_driver);
-static void meson_rst_aux_release(struct device *dev)
-{
- struct auxiliary_device *adev = to_auxiliary_dev(dev);
- struct meson_reset_adev *raux =
- to_meson_reset_adev(adev);
-
- ida_free(&meson_rst_aux_ida, adev->id);
- kfree(raux);
-}
-
-static void meson_rst_aux_unregister_adev(void *_adev)
-{
- struct auxiliary_device *adev = _adev;
-
- auxiliary_device_delete(adev);
- auxiliary_device_uninit(adev);
-}
-
-int devm_meson_rst_aux_register(struct device *dev,
- struct regmap *map,
- const char *adev_name)
-{
- struct meson_reset_adev *raux;
- struct auxiliary_device *adev;
- int ret;
-
- raux = kzalloc(sizeof(*raux), GFP_KERNEL);
- if (!raux)
- return -ENOMEM;
-
- ret = ida_alloc(&meson_rst_aux_ida, GFP_KERNEL);
- if (ret < 0)
- goto raux_free;
-
- raux->map = map;
-
- adev = &raux->adev;
- adev->id = ret;
- adev->name = adev_name;
- adev->dev.parent = dev;
- adev->dev.release = meson_rst_aux_release;
- device_set_of_node_from_dev(&adev->dev, dev);
-
- ret = auxiliary_device_init(adev);
- if (ret)
- goto ida_free;
-
- ret = __auxiliary_device_add(adev, dev->driver->name);
- if (ret) {
- auxiliary_device_uninit(adev);
- return ret;
- }
-
- return devm_add_action_or_reset(dev, meson_rst_aux_unregister_adev,
- adev);
-
-ida_free:
- ida_free(&meson_rst_aux_ida, adev->id);
-raux_free:
- kfree(raux);
- return ret;
-}
-EXPORT_SYMBOL_GPL(devm_meson_rst_aux_register);
-
MODULE_DESCRIPTION("Amlogic Meson Reset Auxiliary driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index 712c06c02696..207b64c0a2fe 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -377,9 +377,9 @@ EXPORT_SYMBOL(rpmsg_get_mtu);
* this is used to make sure we're not creating rpmsg devices for channels
* that already exist.
*/
-static int rpmsg_device_match(struct device *dev, void *data)
+static int rpmsg_device_match(struct device *dev, const void *data)
{
- struct rpmsg_channel_info *chinfo = data;
+ const struct rpmsg_channel_info *chinfo = data;
struct rpmsg_device *rpdev = to_rpmsg_device(dev);
if (chinfo->src != RPMSG_ADDR_ANY && chinfo->src != rpdev->src)
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index a60bcc791a48..0bbbf778ecfa 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1316,7 +1316,7 @@ config RTC_DRV_SC27XX
config RTC_DRV_SPEAR
tristate "SPEAR ST RTC"
depends on PLAT_SPEAR || COMPILE_TEST
- default y
+ default PLAT_SPEAR
help
If you say Y here you will get support for the RTC found on
spear
diff --git a/drivers/rtc/rtc-88pm80x.c b/drivers/rtc/rtc-88pm80x.c
index 5c39cf252392..a3e52a5a708f 100644
--- a/drivers/rtc/rtc-88pm80x.c
+++ b/drivers/rtc/rtc-88pm80x.c
@@ -308,7 +308,7 @@ static int pm80x_rtc_probe(struct platform_device *pdev)
/* remember whether this power up is caused by PMIC RTC or not */
info->rtc_dev->dev.platform_data = &pdata->rtc_wakeup;
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
return 0;
out_rtc:
diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
index 814230d61842..964cd048fcdb 100644
--- a/drivers/rtc/rtc-88pm860x.c
+++ b/drivers/rtc/rtc-88pm860x.c
@@ -326,7 +326,7 @@ static int pm860x_rtc_probe(struct platform_device *pdev)
schedule_delayed_work(&info->calib_work, VRTC_CALIB_INTERVAL);
#endif /* VRTC_CALIBRATION */
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
return 0;
}
diff --git a/drivers/rtc/rtc-amlogic-a4.c b/drivers/rtc/rtc-amlogic-a4.c
index 2278b4c98a71..09d78c2cc691 100644
--- a/drivers/rtc/rtc-amlogic-a4.c
+++ b/drivers/rtc/rtc-amlogic-a4.c
@@ -361,7 +361,7 @@ static int aml_rtc_probe(struct platform_device *pdev)
"failed to get_enable rtc sys clk\n");
aml_rtc_init(rtc);
- device_init_wakeup(dev, 1);
+ device_init_wakeup(dev, true);
platform_set_drvdata(pdev, rtc);
rtc->rtc_dev = devm_rtc_allocate_device(dev);
@@ -391,7 +391,7 @@ static int aml_rtc_probe(struct platform_device *pdev)
return 0;
err_clk:
clk_disable_unprepare(rtc->sys_clk);
- device_init_wakeup(dev, 0);
+ device_init_wakeup(dev, false);
return ret;
}
@@ -426,7 +426,7 @@ static void aml_rtc_remove(struct platform_device *pdev)
struct aml_rtc_data *rtc = dev_get_drvdata(&pdev->dev);
clk_disable_unprepare(rtc->sys_clk);
- device_init_wakeup(&pdev->dev, 0);
+ device_init_wakeup(&pdev->dev, false);
}
static const struct aml_rtc_config a5_rtc_config = {
diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
index 569c1054d6b0..713fa0d077cd 100644
--- a/drivers/rtc/rtc-armada38x.c
+++ b/drivers/rtc/rtc-armada38x.c
@@ -527,7 +527,7 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
if (rtc->irq != -1)
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
else
clear_bit(RTC_FEATURE_ALARM, rtc->rtc_dev->features);
diff --git a/drivers/rtc/rtc-as3722.c b/drivers/rtc/rtc-as3722.c
index 0f21af27f4cf..9682d6457b7f 100644
--- a/drivers/rtc/rtc-as3722.c
+++ b/drivers/rtc/rtc-as3722.c
@@ -187,7 +187,7 @@ static int as3722_rtc_probe(struct platform_device *pdev)
return ret;
}
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
as3722_rtc->rtc = devm_rtc_device_register(&pdev->dev, "as3722-rtc",
&as3722_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 9b3898b8de7c..f6b0102a843a 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -528,7 +528,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
* being wake-capable; if it didn't, do that here.
*/
if (!device_can_wakeup(&pdev->dev))
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
if (at91_rtc_config->has_correction)
rtc->ops = &sama5d4_rtc_ops;
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 15b21da2788f..38991cca5930 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -353,7 +353,7 @@ static int at91_rtc_probe(struct platform_device *pdev)
/* platform setup code should have handled this; sigh */
if (!device_can_wakeup(&pdev->dev))
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
platform_set_drvdata(pdev, rtc);
diff --git a/drivers/rtc/rtc-cadence.c b/drivers/rtc/rtc-cadence.c
index bf2a9a1fdea7..8634eea799ab 100644
--- a/drivers/rtc/rtc-cadence.c
+++ b/drivers/rtc/rtc-cadence.c
@@ -359,7 +359,7 @@ static void cdns_rtc_remove(struct platform_device *pdev)
struct cdns_rtc *crtc = platform_get_drvdata(pdev);
cdns_rtc_alarm_irq_enable(&pdev->dev, 0);
- device_init_wakeup(&pdev->dev, 0);
+ device_init_wakeup(&pdev->dev, false);
clk_disable_unprepare(crtc->pclk);
clk_disable_unprepare(crtc->ref_clk);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 78f2ce12c75a..8172869bd3d7 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -151,11 +151,6 @@ static inline int hpet_set_periodic_freq(unsigned long freq)
return 0;
}
-static inline int hpet_rtc_dropped_irq(void)
-{
- return 0;
-}
-
static inline int hpet_rtc_timer_init(void)
{
return 0;
@@ -864,7 +859,7 @@ static void acpi_cmos_wake_setup(struct device *dev)
dev_info(dev, "RTC can wake from S4\n");
/* RTC always wakes from S1/S2/S3, and often S4/STD */
- device_init_wakeup(dev, 1);
+ device_init_wakeup(dev, true);
}
static void cmos_check_acpi_rtc_status(struct device *dev,
diff --git a/drivers/rtc/rtc-cpcap.c b/drivers/rtc/rtc-cpcap.c
index afc8fcba8f88..568a89e79c11 100644
--- a/drivers/rtc/rtc-cpcap.c
+++ b/drivers/rtc/rtc-cpcap.c
@@ -295,7 +295,7 @@ static int cpcap_rtc_probe(struct platform_device *pdev)
}
disable_irq(rtc->update_irq);
- err = device_init_wakeup(dev, 1);
+ err = device_init_wakeup(dev, true);
if (err) {
dev_err(dev, "wakeup initialization failed (%d)\n", err);
/* ignore error and continue without wakeup support */
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c
index 60a48c3ba3ca..865c2e82c7a5 100644
--- a/drivers/rtc/rtc-cros-ec.c
+++ b/drivers/rtc/rtc-cros-ec.c
@@ -337,7 +337,7 @@ static int cros_ec_rtc_probe(struct platform_device *pdev)
return ret;
}
- ret = device_init_wakeup(&pdev->dev, 1);
+ ret = device_init_wakeup(&pdev->dev, true);
if (ret) {
dev_err(&pdev->dev, "failed to initialize wakeup\n");
return ret;
diff --git a/drivers/rtc/rtc-da9055.c b/drivers/rtc/rtc-da9055.c
index 844168fcae1e..05adec6b77bf 100644
--- a/drivers/rtc/rtc-da9055.c
+++ b/drivers/rtc/rtc-da9055.c
@@ -288,7 +288,7 @@ static int da9055_rtc_probe(struct platform_device *pdev)
if (ret & DA9055_RTC_ALM_EN)
rtc->alarm_enable = 1;
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&da9055_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index dd37b055693c..19c09c418746 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -508,7 +508,7 @@ static int ds3232_probe(struct device *dev, struct regmap *regmap, int irq,
return ret;
if (ds3232->irq > 0)
- device_init_wakeup(dev, 1);
+ device_init_wakeup(dev, true);
ds3232_hwmon_register(dev, name);
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index 7b82e4a14b7a..f71a6bb77b2a 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -830,7 +830,7 @@ static int isl1208_setup_irq(struct i2c_client *client, int irq)
isl1208_driver.driver.name,
client);
if (!rc) {
- device_init_wakeup(&client->dev, 1);
+ device_init_wakeup(&client->dev, true);
enable_irq_wake(irq);
} else {
dev_err(&client->dev,
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index bafa7d1b9b88..44bba356268c 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -367,7 +367,7 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
- device_init_wakeup(dev, 1);
+ device_init_wakeup(dev, true);
ret = dev_pm_set_wake_irq(dev, irq);
if (ret)
diff --git a/drivers/rtc/rtc-loongson.c b/drivers/rtc/rtc-loongson.c
index 8d713e563d7c..97e5625c064c 100644
--- a/drivers/rtc/rtc-loongson.c
+++ b/drivers/rtc/rtc-loongson.c
@@ -114,6 +114,13 @@ static irqreturn_t loongson_rtc_isr(int irq, void *id)
struct loongson_rtc_priv *priv = (struct loongson_rtc_priv *)id;
rtc_update_irq(priv->rtcdev, 1, RTC_AF | RTC_IRQF);
+
+ /*
+ * The TOY_MATCH0_REG should be cleared 0 here,
+ * otherwise the interrupt cannot be cleared.
+ */
+ regmap_write(priv->regmap, TOY_MATCH0_REG, 0);
+
return IRQ_HANDLED;
}
@@ -131,11 +138,7 @@ static u32 loongson_rtc_handler(void *id)
writel(RTC_STS, priv->pm_base + PM1_STS_REG);
spin_unlock(&priv->lock);
- /*
- * The TOY_MATCH0_REG should be cleared 0 here,
- * otherwise the interrupt cannot be cleared.
- */
- return regmap_write(priv->regmap, TOY_MATCH0_REG, 0);
+ return ACPI_INTERRUPT_HANDLED;
}
static int loongson_rtc_set_enabled(struct device *dev)
@@ -329,7 +332,7 @@ static int loongson_rtc_probe(struct platform_device *pdev)
alarm_irq);
priv->pm_base = regs - priv->config->pm_offset;
- device_init_wakeup(dev, 1);
+ device_init_wakeup(dev, true);
if (has_acpi_companion(dev))
acpi_install_fixed_event_handler(ACPI_EVENT_RTC,
@@ -360,7 +363,7 @@ static void loongson_rtc_remove(struct platform_device *pdev)
acpi_remove_fixed_event_handler(ACPI_EVENT_RTC,
loongson_rtc_handler);
- device_init_wakeup(dev, 0);
+ device_init_wakeup(dev, false);
loongson_rtc_alarm_irq_enable(dev, 0);
}
diff --git a/drivers/rtc/rtc-lp8788.c b/drivers/rtc/rtc-lp8788.c
index c0b8fbce1082..0793d70507f7 100644
--- a/drivers/rtc/rtc-lp8788.c
+++ b/drivers/rtc/rtc-lp8788.c
@@ -293,7 +293,7 @@ static int lp8788_rtc_probe(struct platform_device *pdev)
rtc->alarm = lp->pdata ? lp->pdata->alarm_sel : DEFAULT_ALARM_SEL;
platform_set_drvdata(pdev, rtc);
- device_init_wakeup(dev, 1);
+ device_init_wakeup(dev, true);
rtc->rdev = devm_rtc_device_register(dev, "lp8788_rtc",
&lp8788_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
index 76ad7031a13d..74280bffe1b0 100644
--- a/drivers/rtc/rtc-lpc32xx.c
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -257,7 +257,7 @@ static int lpc32xx_rtc_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "Can't request interrupt.\n");
rtc->irq = -1;
} else {
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
}
}
diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
index a8f4b645c09d..7bb044d2ac25 100644
--- a/drivers/rtc/rtc-max77686.c
+++ b/drivers/rtc/rtc-max77686.c
@@ -770,7 +770,7 @@ static int max77686_rtc_probe(struct platform_device *pdev)
goto err_rtc;
}
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
info->rtc_dev = devm_rtc_device_register(&pdev->dev, id->name,
&max77686_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-max8925.c b/drivers/rtc/rtc-max8925.c
index 64bb8ac6ef62..6ce8afbeac68 100644
--- a/drivers/rtc/rtc-max8925.c
+++ b/drivers/rtc/rtc-max8925.c
@@ -270,7 +270,7 @@ static int max8925_rtc_probe(struct platform_device *pdev)
/* XXX - isn't this redundant? */
platform_set_drvdata(pdev, info);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
info->rtc_dev = devm_rtc_device_register(&pdev->dev, "max8925-rtc",
&max8925_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-max8997.c b/drivers/rtc/rtc-max8997.c
index 20e50d9fdf88..e7618d715bd8 100644
--- a/drivers/rtc/rtc-max8997.c
+++ b/drivers/rtc/rtc-max8997.c
@@ -473,7 +473,7 @@ static int max8997_rtc_probe(struct platform_device *pdev)
max8997_rtc_enable_wtsr(info, true);
max8997_rtc_enable_smpl(info, true);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
info->rtc_dev = devm_rtc_device_register(&pdev->dev, "max8997-rtc",
&max8997_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-meson-vrtc.c b/drivers/rtc/rtc-meson-vrtc.c
index 648fa362ec44..5849729f7d01 100644
--- a/drivers/rtc/rtc-meson-vrtc.c
+++ b/drivers/rtc/rtc-meson-vrtc.c
@@ -74,7 +74,7 @@ static int meson_vrtc_probe(struct platform_device *pdev)
if (IS_ERR(vrtc->io_alarm))
return PTR_ERR(vrtc->io_alarm);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
platform_set_drvdata(pdev, vrtc);
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index 600328131603..b90f8337a7e6 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -303,7 +303,7 @@ static int mpc5121_rtc_probe(struct platform_device *op)
return PTR_ERR(rtc->regs);
}
- device_init_wakeup(&op->dev, 1);
+ device_init_wakeup(&op->dev, true);
platform_set_drvdata(op, rtc);
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index 152699219a2b..6979d225a78e 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -286,7 +286,7 @@ static int mtk_rtc_probe(struct platform_device *pdev)
return ret;
}
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
rtc->rtc_dev->ops = &mtk_rtc_ops;
rtc->rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_1900;
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index 51029c536244..c27ad626d09f 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -264,7 +264,7 @@ static int __init mv_rtc_probe(struct platform_device *pdev)
}
if (pdata->irq >= 0)
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
else
clear_bit(RTC_FEATURE_ALARM, pdata->rtc->features);
@@ -287,7 +287,7 @@ static void __exit mv_rtc_remove(struct platform_device *pdev)
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
if (pdata->irq >= 0)
- device_init_wakeup(&pdev->dev, 0);
+ device_init_wakeup(&pdev->dev, false);
if (!IS_ERR(pdata->clk))
clk_disable_unprepare(pdata->clk);
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index dbb935dbbd8a..608db97d450c 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -377,7 +377,7 @@ static int mxc_rtc_probe(struct platform_device *pdev)
}
if (pdata->irq >= 0) {
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
ret = dev_pm_set_wake_irq(&pdev->dev, pdata->irq);
if (ret)
dev_err(&pdev->dev, "failed to enable irq wake\n");
diff --git a/drivers/rtc/rtc-mxc_v2.c b/drivers/rtc/rtc-mxc_v2.c
index 13c041bb79f1..570f27af4732 100644
--- a/drivers/rtc/rtc-mxc_v2.c
+++ b/drivers/rtc/rtc-mxc_v2.c
@@ -302,7 +302,7 @@ static int mxc_rtc_probe(struct platform_device *pdev)
if (pdata->irq < 0)
return pdata->irq;
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
ret = dev_pm_set_wake_irq(&pdev->dev, pdata->irq);
if (ret)
dev_err(&pdev->dev, "failed to enable irq wake\n");
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index c123778e2d9b..0f90065e352c 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -920,7 +920,7 @@ static void omap_rtc_remove(struct platform_device *pdev)
omap_rtc_power_off_rtc = NULL;
}
- device_init_wakeup(&pdev->dev, 0);
+ device_init_wakeup(&pdev->dev, false);
if (!IS_ERR(rtc->clk))
clk_disable_unprepare(rtc->clk);
diff --git a/drivers/rtc/rtc-palmas.c b/drivers/rtc/rtc-palmas.c
index 7256a88b490c..aecada6bcf8b 100644
--- a/drivers/rtc/rtc-palmas.c
+++ b/drivers/rtc/rtc-palmas.c
@@ -287,7 +287,7 @@ static int palmas_rtc_probe(struct platform_device *pdev)
palmas_rtc->irq = platform_get_irq(pdev, 0);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
palmas_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&palmas_rtc_ops, THIS_MODULE);
if (IS_ERR(palmas_rtc->rtc)) {
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 9c04c4e1a49c..31c7dca8f469 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -20,6 +20,7 @@
#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/bcd.h>
+#include <linux/bitfield.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -48,6 +49,7 @@
#define PCF2127_BIT_CTRL3_BLF BIT(2)
#define PCF2127_BIT_CTRL3_BF BIT(3)
#define PCF2127_BIT_CTRL3_BTSE BIT(4)
+#define PCF2127_CTRL3_PM GENMASK(7, 5)
/* Time and date registers */
#define PCF2127_REG_TIME_BASE 0x03
#define PCF2127_BIT_SC_OSF BIT(7)
@@ -331,6 +333,84 @@ static int pcf2127_rtc_set_time(struct device *dev, struct rtc_time *tm)
return 0;
}
+static int pcf2127_param_get(struct device *dev, struct rtc_param *param)
+{
+ struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
+ u32 value;
+ int ret;
+
+ switch (param->param) {
+ case RTC_PARAM_BACKUP_SWITCH_MODE:
+ ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL3, &value);
+ if (ret < 0)
+ return ret;
+
+ value = FIELD_GET(PCF2127_CTRL3_PM, value);
+
+ if (value < 0x3)
+ param->uvalue = RTC_BSM_LEVEL;
+ else if (value < 0x6)
+ param->uvalue = RTC_BSM_DIRECT;
+ else
+ param->uvalue = RTC_BSM_DISABLED;
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int pcf2127_param_set(struct device *dev, struct rtc_param *param)
+{
+ struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
+ u8 mode = 0;
+ u32 value;
+ int ret;
+
+ switch (param->param) {
+ case RTC_PARAM_BACKUP_SWITCH_MODE:
+ ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL3, &value);
+ if (ret < 0)
+ return ret;
+
+ value = FIELD_GET(PCF2127_CTRL3_PM, value);
+
+ if (value > 5)
+ value -= 5;
+ else if (value > 2)
+ value -= 3;
+
+ switch (param->uvalue) {
+ case RTC_BSM_LEVEL:
+ break;
+ case RTC_BSM_DIRECT:
+ mode = 3;
+ break;
+ case RTC_BSM_DISABLED:
+ if (value == 0)
+ value = 1;
+ mode = 5;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL3,
+ PCF2127_CTRL3_PM,
+ FIELD_PREP(PCF2127_CTRL3_PM, mode + value));
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int pcf2127_rtc_ioctl(struct device *dev,
unsigned int cmd, unsigned long arg)
{
@@ -741,6 +821,8 @@ static const struct rtc_class_ops pcf2127_rtc_ops = {
.read_alarm = pcf2127_rtc_read_alarm,
.set_alarm = pcf2127_rtc_set_alarm,
.alarm_irq_enable = pcf2127_rtc_alarm_irq_enable,
+ .param_get = pcf2127_param_get,
+ .param_set = pcf2127_param_set,
};
/* sysfs interface */
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index fdbc07f14036..905986c61655 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -322,7 +322,16 @@ static const struct rtc_class_ops pcf85063_rtc_ops = {
static int pcf85063_nvmem_read(void *priv, unsigned int offset,
void *val, size_t bytes)
{
- return regmap_read(priv, PCF85063_REG_RAM, val);
+ unsigned int tmp;
+ int ret;
+
+ ret = regmap_read(priv, PCF85063_REG_RAM, &tmp);
+ if (ret < 0)
+ return ret;
+
+ *(u8 *)val = tmp;
+
+ return 0;
}
static int pcf85063_nvmem_write(void *priv, unsigned int offset,
diff --git a/drivers/rtc/rtc-pic32.c b/drivers/rtc/rtc-pic32.c
index bed3c27e665f..2812da2c50c5 100644
--- a/drivers/rtc/rtc-pic32.c
+++ b/drivers/rtc/rtc-pic32.c
@@ -330,7 +330,7 @@ static int pic32_rtc_probe(struct platform_device *pdev)
pic32_rtc_enable(pdata, 1);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
pdata->rtc->ops = &pic32_rtcops;
pdata->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index 2f32187ecc8d..b2518aea4218 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -503,7 +503,7 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc_dd);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
rtc_dd->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc_dd->rtc))
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index 34d8545c8e15..62ee6b8f9bcd 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -360,7 +360,7 @@ static int __init pxa_rtc_probe(struct platform_device *pdev)
return ret;
}
- device_init_wakeup(dev, 1);
+ device_init_wakeup(dev, true);
return 0;
}
diff --git a/drivers/rtc/rtc-rc5t583.c b/drivers/rtc/rtc-rc5t583.c
index eecb49bab56a..8ba9cda74acf 100644
--- a/drivers/rtc/rtc-rc5t583.c
+++ b/drivers/rtc/rtc-rc5t583.c
@@ -245,7 +245,7 @@ static int rc5t583_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "IRQ is not free.\n");
return ret;
}
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
ricoh_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&rc5t583_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-rc5t619.c b/drivers/rtc/rtc-rc5t619.c
index 711f62eecd79..74d169102074 100644
--- a/drivers/rtc/rtc-rc5t619.c
+++ b/drivers/rtc/rtc-rc5t619.c
@@ -414,7 +414,7 @@ static int rc5t619_rtc_probe(struct platform_device *pdev)
} else {
/* enable wake */
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
enable_irq_wake(rtc->irq);
}
} else {
diff --git a/drivers/rtc/rtc-renesas-rtca3.c b/drivers/rtc/rtc-renesas-rtca3.c
index d127933bfc8a..a056291d3887 100644
--- a/drivers/rtc/rtc-renesas-rtca3.c
+++ b/drivers/rtc/rtc-renesas-rtca3.c
@@ -768,7 +768,7 @@ static int rtca3_probe(struct platform_device *pdev)
if (ret)
return ret;
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
priv->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(priv->rtc_dev))
diff --git a/drivers/rtc/rtc-rk808.c b/drivers/rtc/rtc-rk808.c
index 2d9bcb3ce1e3..59b8e9a30fe6 100644
--- a/drivers/rtc/rtc-rk808.c
+++ b/drivers/rtc/rtc-rk808.c
@@ -418,7 +418,7 @@ static int rk808_rtc_probe(struct platform_device *pdev)
return ret;
}
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
rk808_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rk808_rtc->rtc))
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index c0ac3bdb2f42..58c957eb753d 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -456,7 +456,7 @@ static int s3c_rtc_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "s3c2410_rtc: RTCCON=%02x\n",
readw(info->base + S3C2410_RTCCON));
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
info->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(info->rtc)) {
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index dad294a0ce2a..36acca5b2639 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -729,7 +729,7 @@ static int s5m_rtc_probe(struct platform_device *pdev)
info->irq, ret);
return ret;
}
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
}
return devm_rtc_register_device(info->rtc_dev);
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 13799b1abca1..1ad93648d69c 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -292,7 +292,7 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, info);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
return sa1100_rtc_init(pdev, info);
}
diff --git a/drivers/rtc/rtc-sc27xx.c b/drivers/rtc/rtc-sc27xx.c
index ce7a2ddbbc16..2b83561d4d28 100644
--- a/drivers/rtc/rtc-sc27xx.c
+++ b/drivers/rtc/rtc-sc27xx.c
@@ -613,14 +613,14 @@ static int sprd_rtc_probe(struct platform_device *pdev)
return ret;
}
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
rtc->rtc->ops = &sprd_rtc_ops;
rtc->rtc->range_min = 0;
rtc->rtc->range_max = 5662310399LL;
ret = devm_rtc_register_device(rtc->rtc);
if (ret) {
- device_init_wakeup(&pdev->dev, 0);
+ device_init_wakeup(&pdev->dev, false);
return ret;
}
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index a5df521876ba..9ea40f40188f 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -611,7 +611,7 @@ static int __init sh_rtc_probe(struct platform_device *pdev)
if (ret)
goto err_unmap;
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
return 0;
err_unmap:
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index 26eed927f8b3..959acff8faff 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -395,7 +395,7 @@ static int spear_rtc_probe(struct platform_device *pdev)
goto err_disable_clock;
if (!device_can_wakeup(&pdev->dev))
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
return 0;
@@ -411,7 +411,7 @@ static void spear_rtc_remove(struct platform_device *pdev)
spear_rtc_disable_interrupt(config);
clk_disable_unprepare(config->clk);
- device_init_wakeup(&pdev->dev, 0);
+ device_init_wakeup(&pdev->dev, false);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
index 9f1a019ec8af..a0564d443569 100644
--- a/drivers/rtc/rtc-stm32.c
+++ b/drivers/rtc/rtc-stm32.c
@@ -1074,26 +1074,18 @@ static int stm32_rtc_probe(struct platform_device *pdev)
regs = &rtc->data->regs;
if (rtc->data->need_dbp) {
- rtc->dbp = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "st,syscfg");
+ unsigned int args[2];
+
+ rtc->dbp = syscon_regmap_lookup_by_phandle_args(pdev->dev.of_node,
+ "st,syscfg",
+ 2, args);
if (IS_ERR(rtc->dbp)) {
dev_err(&pdev->dev, "no st,syscfg\n");
return PTR_ERR(rtc->dbp);
}
- ret = of_property_read_u32_index(pdev->dev.of_node, "st,syscfg",
- 1, &rtc->dbp_reg);
- if (ret) {
- dev_err(&pdev->dev, "can't read DBP register offset\n");
- return ret;
- }
-
- ret = of_property_read_u32_index(pdev->dev.of_node, "st,syscfg",
- 2, &rtc->dbp_mask);
- if (ret) {
- dev_err(&pdev->dev, "can't read DBP register mask\n");
- return ret;
- }
+ rtc->dbp_reg = args[0];
+ rtc->dbp_mask = args[1];
}
if (!rtc->data->has_pclk) {
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index e681c1745866..e5e6013d080e 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -826,7 +826,7 @@ static int sun6i_rtc_probe(struct platform_device *pdev)
clk_prepare_enable(chip->losc);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
chip->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(chip->rtc))
diff --git a/drivers/rtc/rtc-sunplus.c b/drivers/rtc/rtc-sunplus.c
index 9b1ce0e8ba27..519a06e728d6 100644
--- a/drivers/rtc/rtc-sunplus.c
+++ b/drivers/rtc/rtc-sunplus.c
@@ -269,7 +269,7 @@ static int sp_rtc_probe(struct platform_device *plat_dev)
if (ret)
goto free_reset_assert;
- device_init_wakeup(&plat_dev->dev, 1);
+ device_init_wakeup(&plat_dev->dev, true);
dev_set_drvdata(&plat_dev->dev, sp_rtc);
sp_rtc->rtc = devm_rtc_allocate_device(&plat_dev->dev);
@@ -307,7 +307,7 @@ static void sp_rtc_remove(struct platform_device *plat_dev)
{
struct sunplus_rtc *sp_rtc = dev_get_drvdata(&plat_dev->dev);
- device_init_wakeup(&plat_dev->dev, 0);
+ device_init_wakeup(&plat_dev->dev, false);
reset_control_assert(sp_rtc->rstc);
clk_disable_unprepare(sp_rtc->rtcclk);
}
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 79a3102c8354..46788db89953 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -319,7 +319,7 @@ static int tegra_rtc_probe(struct platform_device *pdev)
writel(0xffffffff, info->base + TEGRA_RTC_REG_INTR_STATUS);
writel(0, info->base + TEGRA_RTC_REG_INTR_MASK);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
ret = devm_request_irq(&pdev->dev, info->irq, tegra_rtc_irq_handler,
IRQF_TRIGGER_HIGH, dev_name(&pdev->dev),
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
index 7e0d8fb26465..a68b8c884102 100644
--- a/drivers/rtc/rtc-test.c
+++ b/drivers/rtc/rtc-test.c
@@ -132,7 +132,7 @@ static int test_probe(struct platform_device *plat_dev)
break;
default:
rtd->rtc->ops = &test_rtc_ops;
- device_init_wakeup(&plat_dev->dev, 1);
+ device_init_wakeup(&plat_dev->dev, true);
}
timer_setup(&rtd->alarm, test_rtc_alarm_handler, 0);
diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
index e796729fc817..54c8429b16bf 100644
--- a/drivers/rtc/rtc-tps6586x.c
+++ b/drivers/rtc/rtc-tps6586x.c
@@ -241,7 +241,7 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
return ret;
}
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
platform_set_drvdata(pdev, rtc);
rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index 2ea1bbfbbc2a..284aa2f0392b 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -418,7 +418,7 @@ static int tps65910_rtc_probe(struct platform_device *pdev)
tps_rtc->irq = irq;
if (irq != -1) {
if (device_property_present(tps65910->dev, "wakeup-source"))
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
else
device_set_wakeup_capable(&pdev->dev, 1);
} else {
diff --git a/drivers/rtc/rtc-tps6594.c b/drivers/rtc/rtc-tps6594.c
index e69667634137..7c6246e3f029 100644
--- a/drivers/rtc/rtc-tps6594.c
+++ b/drivers/rtc/rtc-tps6594.c
@@ -37,7 +37,7 @@
#define MAX_OFFSET (277774)
// Number of ticks per hour
-#define TICKS_PER_HOUR (32768 * 3600)
+#define TICKS_PER_HOUR (32768 * 3600LL)
// Multiplier for ppb conversions
#define PPB_MULT NANO
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 794429182b34..e6106e67e1f4 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -572,7 +572,7 @@ static int twl_rtc_probe(struct platform_device *pdev)
return ret;
platform_set_drvdata(pdev, twl_rtc);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
twl_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&twl_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
index 640833e21057..218316be942a 100644
--- a/drivers/rtc/rtc-wm831x.c
+++ b/drivers/rtc/rtc-wm831x.c
@@ -420,7 +420,7 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
if (ret & WM831X_RTC_ALM_ENA)
wm831x_rtc->alarm_enabled = 1;
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
wm831x_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(wm831x_rtc->rtc))
diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
index 6797eb4d2e49..3bd60d067a5e 100644
--- a/drivers/rtc/rtc-wm8350.c
+++ b/drivers/rtc/rtc-wm8350.c
@@ -420,7 +420,7 @@ static int wm8350_rtc_probe(struct platform_device *pdev)
}
}
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
wm_rtc->rtc = devm_rtc_device_register(&pdev->dev, "wm8350",
&wm8350_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-xgene.c b/drivers/rtc/rtc-xgene.c
index 0813ea1a03c2..6660b664e8dd 100644
--- a/drivers/rtc/rtc-xgene.c
+++ b/drivers/rtc/rtc-xgene.c
@@ -174,7 +174,7 @@ static int xgene_rtc_probe(struct platform_device *pdev)
/* Turn on the clock and the crystal */
writel(RTC_CCR_EN, pdata->csr_base + RTC_CCR);
- ret = device_init_wakeup(&pdev->dev, 1);
+ ret = device_init_wakeup(&pdev->dev, true);
if (ret) {
clk_disable_unprepare(pdata->clk);
return ret;
@@ -197,7 +197,7 @@ static void xgene_rtc_remove(struct platform_device *pdev)
struct xgene_rtc_dev *pdata = platform_get_drvdata(pdev);
xgene_rtc_alarm_irq_enable(&pdev->dev, 0);
- device_init_wakeup(&pdev->dev, 0);
+ device_init_wakeup(&pdev->dev, false);
clk_disable_unprepare(pdata->clk);
}
diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
index af1abb69d1e3..f39102b66eac 100644
--- a/drivers/rtc/rtc-zynqmp.c
+++ b/drivers/rtc/rtc-zynqmp.c
@@ -318,8 +318,8 @@ static int xlnx_rtc_probe(struct platform_device *pdev)
return ret;
}
- /* Getting the rtc_clk info */
- xrtcdev->rtc_clk = devm_clk_get_optional(&pdev->dev, "rtc_clk");
+ /* Getting the rtc info */
+ xrtcdev->rtc_clk = devm_clk_get_optional(&pdev->dev, "rtc");
if (IS_ERR(xrtcdev->rtc_clk)) {
if (PTR_ERR(xrtcdev->rtc_clk) != -EPROBE_DEFER)
dev_warn(&pdev->dev, "Device clock not found.\n");
@@ -337,7 +337,7 @@ static int xlnx_rtc_probe(struct platform_device *pdev)
xlnx_init_rtc(xrtcdev);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, true);
return devm_rtc_register_device(xrtcdev->rtc);
}
@@ -345,7 +345,7 @@ static int xlnx_rtc_probe(struct platform_device *pdev)
static void xlnx_rtc_remove(struct platform_device *pdev)
{
xlnx_rtc_alarm_irq_enable(&pdev->dev, 0);
- device_init_wakeup(&pdev->dev, 0);
+ device_init_wakeup(&pdev->dev, false);
}
static int __maybe_unused xlnx_rtc_suspend(struct device *dev)
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index fbffd451031f..45bd001206a2 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -245,7 +245,6 @@ static void sclp_request_timeout(bool force_restart);
static void sclp_process_queue(void);
static void __sclp_make_read_req(void);
static int sclp_init_mask(int calculate);
-static int sclp_init(void);
static void
__sclp_queue_read_req(void)
@@ -1251,8 +1250,7 @@ static struct platform_driver sclp_pdrv = {
/* Initialize SCLP driver. Return zero if driver is operational, non-zero
* otherwise. */
-static int
-sclp_init(void)
+int sclp_init(void)
{
unsigned long flags;
int rc = 0;
@@ -1305,13 +1303,7 @@ fail_unlock:
static __init int sclp_initcall(void)
{
- int rc;
-
- rc = platform_driver_register(&sclp_pdrv);
- if (rc)
- return rc;
-
- return sclp_init();
+ return platform_driver_register(&sclp_pdrv);
}
arch_initcall(sclp_initcall);
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 3dd50ac9c5b0..b2d93a6e36c4 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -123,7 +123,7 @@ static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
*/
static struct vmlogrdr_priv_t sys_ser[] = {
- { .system_service = "*LOGREC ",
+ { .system_service = { '*', 'L', 'O', 'G', 'R', 'E', 'C', ' ' },
.internal_name = "logrec",
.recording_name = "EREP",
.minor_num = 0,
@@ -132,7 +132,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
.autorecording = 1,
.autopurge = 1,
},
- { .system_service = "*ACCOUNT",
+ { .system_service = { '*', 'A', 'C', 'C', 'O', 'U', 'N', 'T' },
.internal_name = "account",
.recording_name = "ACCOUNT",
.minor_num = 1,
@@ -141,7 +141,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
.autorecording = 1,
.autopurge = 1,
},
- { .system_service = "*SYMPTOM",
+ { .system_service = { '*', 'S', 'Y', 'M', 'P', 'T', 'O', 'M' },
.internal_name = "symptom",
.recording_name = "SYMPTOM",
.minor_num = 2,
@@ -356,7 +356,7 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
if (connect_rc) {
pr_err("vmlogrdr: iucv connection to %s "
"failed with rc %i \n",
- logptr->system_service, connect_rc);
+ logptr->internal_name, connect_rc);
goto out_path;
}
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index cba2d048a96b..4a0b3f19bd8e 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -128,7 +128,7 @@ static int s390_vary_chpid(struct chp_id chpid, int on)
* Channel measurement related functions
*/
static ssize_t measurement_chars_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct channel_path *chp;
@@ -142,11 +142,11 @@ static ssize_t measurement_chars_read(struct file *filp, struct kobject *kobj,
return memory_read_from_buffer(buf, count, &off, &chp->cmg_chars,
sizeof(chp->cmg_chars));
}
-static BIN_ATTR_ADMIN_RO(measurement_chars, sizeof(struct cmg_chars));
+static const BIN_ATTR_ADMIN_RO(measurement_chars, sizeof(struct cmg_chars));
static ssize_t measurement_chars_full_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct channel_path *chp = to_channelpath(kobj_to_dev(kobj));
@@ -196,22 +196,22 @@ static ssize_t chp_measurement_copy_block(void *buf, loff_t off, size_t count,
}
static ssize_t measurement_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
return chp_measurement_copy_block(buf, off, count, kobj, false);
}
-static BIN_ATTR_ADMIN_RO(measurement, sizeof(struct cmg_entry));
+static const BIN_ATTR_ADMIN_RO(measurement, sizeof(struct cmg_entry));
static ssize_t ext_measurement_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
return chp_measurement_copy_block(buf, off, count, kobj, true);
}
-static BIN_ATTR_ADMIN_RO(ext_measurement, sizeof(struct cmg_ext_entry));
+static const BIN_ATTR_ADMIN_RO(ext_measurement, sizeof(struct cmg_ext_entry));
-static struct bin_attribute *measurement_attrs[] = {
+static const struct bin_attribute *measurement_attrs[] = {
&bin_attr_measurement_chars,
&bin_attr_measurement_chars_full,
&bin_attr_measurement,
@@ -435,7 +435,7 @@ static ssize_t speed_bps_show(struct device *dev,
static DEVICE_ATTR_RO(speed_bps);
static ssize_t util_string_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct channel_path *chp = to_channelpath(kobj_to_dev(kobj));
@@ -448,10 +448,10 @@ static ssize_t util_string_read(struct file *filp, struct kobject *kobj,
return rc;
}
-static BIN_ATTR_RO(util_string,
- sizeof(((struct channel_path_desc_fmt3 *)0)->util_str));
+static const BIN_ATTR_RO(util_string,
+ sizeof(((struct channel_path_desc_fmt3 *)0)->util_str));
-static struct bin_attribute *chp_bin_attrs[] = {
+static const struct bin_attribute *const chp_bin_attrs[] = {
&bin_attr_util_string,
NULL,
};
@@ -468,9 +468,9 @@ static struct attribute *chp_attrs[] = {
&dev_attr_speed_bps.attr,
NULL,
};
-static struct attribute_group chp_attr_group = {
+static const struct attribute_group chp_attr_group = {
.attrs = chp_attrs,
- .bin_attrs = chp_bin_attrs,
+ .bin_attrs_new = chp_bin_attrs,
};
static const struct attribute_group *chp_attr_groups[] = {
&chp_attr_group,
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index d6516ab00437..1d50f463afe7 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -537,6 +537,11 @@ static void zfcp_fc_adisc_handler(void *data)
/* port is still good, nothing to do */
out:
atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+ /*
+ * port ref comes from get_device() in zfcp_fc_test_link() and
+ * work item zfcp_fc_link_test_work() passes ref via
+ * zfcp_fc_adisc() to here, if zfcp_fc_adisc() could send ADISC
+ */
put_device(&port->dev);
kmem_cache_free(zfcp_fc_req_cache, fc_req);
}
@@ -603,7 +608,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
retval = zfcp_fc_adisc(port);
if (retval == 0)
- return;
+ return; /* port ref passed to zfcp_fc_adisc(), no put here */
/* send of ADISC was not possible */
atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 22e82000334a..99d6b3f8692b 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1218,7 +1218,7 @@ static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
/**
* zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
* @wka_port: pointer to zfcp WKA port to send CT/GS to
- * @ct: pointer to struct zfcp_send_ct with data for request
+ * @ct: pointer to struct zfcp_fsf_ct_els with data for CT request
* @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
* @timeout: timeout that hardware should use, and a later software timeout
*/
@@ -1316,7 +1316,7 @@ skip_fsfstatus:
* zfcp_fsf_send_els - initiate an ELS command (FC-FS)
* @adapter: pointer to zfcp adapter
* @d_id: N_Port_ID to send ELS to
- * @els: pointer to struct zfcp_send_els with data for the command
+ * @els: pointer to struct zfcp_fsf_ct_els with data for the ELS command
* @timeout: timeout that hardware should use, and a later software timeout
*/
int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index b2a8cd792266..b31f860af47b 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -37,11 +37,11 @@ static bool allow_lun_scan = true;
module_param(allow_lun_scan, bool, 0600);
MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs");
-static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
+static void zfcp_scsi_sdev_destroy(struct scsi_device *sdev)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
- /* if previous slave_alloc returned early, there is nothing to do */
+ /* if previous sdev_init returned early, there is nothing to do */
if (!zfcp_sdev->port)
return;
@@ -49,7 +49,8 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
put_device(&zfcp_sdev->port->dev);
}
-static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
+static int zfcp_scsi_sdev_configure(struct scsi_device *sdp,
+ struct queue_limits *lim)
{
if (sdp->tagged_supported)
scsi_change_queue_depth(sdp, default_depth);
@@ -110,7 +111,7 @@ int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
return ret;
}
-static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
+static int zfcp_scsi_sdev_init(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct zfcp_adapter *adapter =
@@ -427,9 +428,9 @@ static const struct scsi_host_template zfcp_scsi_host_template = {
.eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
.eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
.eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
- .slave_alloc = zfcp_scsi_slave_alloc,
- .slave_configure = zfcp_scsi_slave_configure,
- .slave_destroy = zfcp_scsi_slave_destroy,
+ .sdev_init = zfcp_scsi_sdev_init,
+ .sdev_configure = zfcp_scsi_sdev_configure,
+ .sdev_destroy = zfcp_scsi_sdev_destroy,
.change_queue_depth = scsi_change_queue_depth,
.host_reset = zfcp_scsi_sysfs_host_reset,
.proc_name = "zfcp",
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 304b81bb5f90..41e36af35488 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -284,7 +284,7 @@ static bool zfcp_sysfs_port_in_use(struct zfcp_port *const port)
goto unlock_host_lock;
}
- /* port is about to be removed, so no more unit_add or slave_alloc */
+ /* port is about to be removed, so no more unit_add or sdev_init */
zfcp_sysfs_port_set_removing(port);
in_use = false;
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
index 60f2a04f0869..4ef2a635d34f 100644
--- a/drivers/s390/scsi/zfcp_unit.c
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -170,7 +170,7 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
write_unlock_irq(&port->unit_list_lock);
/*
* lock order: shost->scan_mutex before zfcp_sysfs_port_units_mutex
- * due to zfcp_unit_scsi_scan() => zfcp_scsi_slave_alloc()
+ * due to zfcp_unit_scsi_scan() => zfcp_scsi_sdev_init()
*/
mutex_unlock(&zfcp_sysfs_port_units_mutex);
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 6fb61c88ea11..883d4a12a172 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1968,13 +1968,14 @@ static char *twa_string_lookup(twa_message_type *table, unsigned int code)
} /* End twa_string_lookup() */
/* This function gets called when a disk is coming on-line */
-static int twa_slave_configure(struct scsi_device *sdev)
+static int twa_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
/* Force 60 second timeout */
blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
return 0;
-} /* End twa_slave_configure() */
+} /* End twa_sdev_configure() */
static const struct scsi_host_template driver_template = {
.module = THIS_MODULE,
@@ -1984,7 +1985,7 @@ static const struct scsi_host_template driver_template = {
.bios_param = twa_scsi_biosparam,
.change_queue_depth = scsi_change_queue_depth,
.can_queue = TW_Q_LENGTH-2,
- .slave_configure = twa_slave_configure,
+ .sdev_configure = twa_sdev_configure,
.this_id = -1,
.sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
@@ -2260,7 +2261,7 @@ out_disable_device:
} /* End twa_resume() */
/* PCI Devices supported by this driver */
-static struct pci_device_id twa_pci_tbl[] = {
+static const struct pci_device_id twa_pci_tbl[] = {
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index caa6713a62a4..e057ab9c7b90 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -96,7 +96,7 @@ static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_res
/* This function returns AENs through sysfs */
static ssize_t twl_sysfs_aen_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *outbuf, loff_t offset, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -116,18 +116,18 @@ static ssize_t twl_sysfs_aen_read(struct file *filp, struct kobject *kobj,
} /* End twl_sysfs_aen_read() */
/* aen_read sysfs attribute initializer */
-static struct bin_attribute twl_sysfs_aen_read_attr = {
+static const struct bin_attribute twl_sysfs_aen_read_attr = {
.attr = {
.name = "3ware_aen_read",
.mode = S_IRUSR,
},
.size = 0,
- .read = twl_sysfs_aen_read
+ .read_new = twl_sysfs_aen_read
};
/* This function returns driver compatibility info through sysfs */
static ssize_t twl_sysfs_compat_info(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *outbuf, loff_t offset, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -147,13 +147,13 @@ static ssize_t twl_sysfs_compat_info(struct file *filp, struct kobject *kobj,
} /* End twl_sysfs_compat_info() */
/* compat_info sysfs attribute initializer */
-static struct bin_attribute twl_sysfs_compat_info_attr = {
+static const struct bin_attribute twl_sysfs_compat_info_attr = {
.attr = {
.name = "3ware_compat_info",
.mode = S_IRUSR,
},
.size = 0,
- .read = twl_sysfs_compat_info
+ .read_new = twl_sysfs_compat_info
};
/* Show some statistics about the card */
@@ -1523,13 +1523,14 @@ static void twl_shutdown(struct pci_dev *pdev)
} /* End twl_shutdown() */
/* This function configures unit settings when a unit is coming on-line */
-static int twl_slave_configure(struct scsi_device *sdev)
+static int twl_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
/* Force 60 second timeout */
blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
return 0;
-} /* End twl_slave_configure() */
+} /* End twl_sdev_configure() */
static const struct scsi_host_template driver_template = {
.module = THIS_MODULE,
@@ -1539,7 +1540,7 @@ static const struct scsi_host_template driver_template = {
.bios_param = twl_scsi_biosparam,
.change_queue_depth = scsi_change_queue_depth,
.can_queue = TW_Q_LENGTH-2,
- .slave_configure = twl_slave_configure,
+ .sdev_configure = twl_sdev_configure,
.this_id = -1,
.sg_tablesize = TW_LIBERATOR_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
@@ -1821,7 +1822,7 @@ out_disable_device:
} /* End twl_resume() */
/* PCI Devices supported by this driver */
-static struct pci_device_id twl_pci_tbl[] = {
+static const struct pci_device_id twl_pci_tbl[] = {
{ PCI_VDEVICE(3WARE, PCI_DEVICE_ID_3WARE_9750) },
{ }
};
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 2c0fb6da0e60..89bd56f78ef9 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -172,7 +172,7 @@
Initialize queues correctly when loading with no valid units.
1.02.00.034 - Fix tw_decode_bits() to handle multiple errors.
Add support for user configurable cmd_per_lun.
- Add support for sht->slave_configure().
+ Add support for sht->sdev_configure().
1.02.00.035 - Improve tw_allocate_memory() memory allocation.
Fix tw_chrdev_ioctl() to sleep correctly.
1.02.00.036 - Increase character ioctl timeout to 60 seconds.
@@ -2221,13 +2221,13 @@ static void tw_shutdown(struct pci_dev *pdev)
} /* End tw_shutdown() */
/* This function gets called when a disk is coming online */
-static int tw_slave_configure(struct scsi_device *sdev)
+static int tw_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
/* Force 60 second timeout */
blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
return 0;
-} /* End tw_slave_configure() */
+} /* End tw_sdev_configure() */
static const struct scsi_host_template driver_template = {
.module = THIS_MODULE,
@@ -2237,7 +2237,7 @@ static const struct scsi_host_template driver_template = {
.bios_param = tw_scsi_biosparam,
.change_queue_depth = scsi_change_queue_depth,
.can_queue = TW_Q_LENGTH-2,
- .slave_configure = tw_slave_configure,
+ .sdev_configure = tw_sdev_configure,
.this_id = -1,
.sg_tablesize = TW_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
@@ -2393,7 +2393,7 @@ static void tw_remove(struct pci_dev *pdev)
} /* End tw_remove() */
/* PCI Devices supported by this driver */
-static struct pci_device_id tw_pci_tbl[] = {
+static const struct pci_device_id tw_pci_tbl[] = {
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_1000,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_7000,
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 85439e976143..71b7ac027f48 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -158,9 +158,10 @@ STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
-STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
-STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
-STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
+STATIC int NCR_700_sdev_init(struct scsi_device *SDpnt);
+STATIC int NCR_700_sdev_configure(struct scsi_device *SDpnt,
+ struct queue_limits *lim);
+STATIC void NCR_700_sdev_destroy(struct scsi_device *SDpnt);
static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
STATIC const struct attribute_group *NCR_700_dev_groups[];
@@ -330,9 +331,9 @@ NCR_700_detect(struct scsi_host_template *tpnt,
tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
- tpnt->slave_configure = NCR_700_slave_configure;
- tpnt->slave_destroy = NCR_700_slave_destroy;
- tpnt->slave_alloc = NCR_700_slave_alloc;
+ tpnt->sdev_configure = NCR_700_sdev_configure;
+ tpnt->sdev_destroy = NCR_700_sdev_destroy;
+ tpnt->sdev_init = NCR_700_sdev_init;
tpnt->change_queue_depth = NCR_700_change_queue_depth;
if(tpnt->name == NULL)
@@ -2017,7 +2018,7 @@ NCR_700_set_offset(struct scsi_target *STp, int offset)
}
STATIC int
-NCR_700_slave_alloc(struct scsi_device *SDp)
+NCR_700_sdev_init(struct scsi_device *SDp)
{
SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters),
GFP_KERNEL);
@@ -2029,7 +2030,7 @@ NCR_700_slave_alloc(struct scsi_device *SDp)
}
STATIC int
-NCR_700_slave_configure(struct scsi_device *SDp)
+NCR_700_sdev_configure(struct scsi_device *SDp, struct queue_limits *lim)
{
struct NCR_700_Host_Parameters *hostdata =
(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
@@ -2052,7 +2053,7 @@ NCR_700_slave_configure(struct scsi_device *SDp)
}
STATIC void
-NCR_700_slave_destroy(struct scsi_device *SDp)
+NCR_700_sdev_destroy(struct scsi_device *SDp)
{
kfree(SDp->hostdata);
SDp->hostdata = NULL;
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 2135a2b3e2d0..1f100270cd38 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2153,14 +2153,15 @@ static void __init blogic_inithoststruct(struct blogic_adapter *adapter,
}
/*
- blogic_slaveconfig will actually set the queue depth on individual
+ blogic_sdev_configure will actually set the queue depth on individual
scsi devices as they are permanently added to the device chain. We
shamelessly rip off the SelectQueueDepths code to make this work mostly
like it used to. Since we don't get called once at the end of the scan
but instead get called for each device, we have to do things a bit
differently.
*/
-static int blogic_slaveconfig(struct scsi_device *dev)
+static int blogic_sdev_configure(struct scsi_device *dev,
+ struct queue_limits *lim)
{
struct blogic_adapter *adapter =
(struct blogic_adapter *) dev->host->hostdata;
@@ -3672,7 +3673,7 @@ static const struct scsi_host_template blogic_template = {
.name = "BusLogic",
.info = blogic_drvr_info,
.queuecommand = blogic_qcmd,
- .slave_configure = blogic_slaveconfig,
+ .sdev_configure = blogic_sdev_configure,
.bios_param = blogic_diskparam,
.eh_host_reset_handler = blogic_hostreset,
#if 0
@@ -3715,7 +3716,7 @@ static void __exit blogic_exit(void)
__setup("BusLogic=", blogic_setup);
#ifdef MODULE
-/*static struct pci_device_id blogic_pci_tbl[] = {
+/*static const struct pci_device_id blogic_pci_tbl[] = {
{ PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC,
diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h
index 7d1ec10f2430..61bf26d4fc10 100644
--- a/drivers/scsi/BusLogic.h
+++ b/drivers/scsi/BusLogic.h
@@ -1274,7 +1274,8 @@ static inline void blogic_incszbucket(unsigned int *cmdsz_buckets,
static const char *blogic_drvr_info(struct Scsi_Host *);
static int blogic_qcmd(struct Scsi_Host *h, struct scsi_cmnd *);
static int blogic_diskparam(struct scsi_device *, struct block_device *, sector_t, int *);
-static int blogic_slaveconfig(struct scsi_device *);
+static int blogic_sdev_configure(struct scsi_device *,
+ struct queue_limits *lim);
static void blogic_qcompleted_ccb(struct blogic_ccb *);
static irqreturn_t blogic_inthandler(int, void *);
static int blogic_resetadapter(struct blogic_adapter *, bool hard_reset);
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index b95147fb18b0..a8979f9e30ff 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1206,7 +1206,7 @@ static void inia100_remove_one(struct pci_dev *pdev)
scsi_host_put(shost);
}
-static struct pci_device_id inia100_pci_tbl[] = {
+static const struct pci_device_id inia100_pci_tbl[] = {
{PCI_VENDOR_ID_INIT, 0x1060, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
};
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 68f4dbcfff49..91170a67cc91 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -377,15 +377,17 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
}
/**
- * aac_slave_configure - compute queue depths
+ * aac_sdev_configure - compute queue depths
* @sdev: SCSI device we are considering
+ * @lim: Request queue limits
*
* Selects queue depths for each target device based on the host adapter's
* total capacity and the queue depth supported by the target device.
* A queue depth of one automatically disables tagged queueing.
*/
-static int aac_slave_configure(struct scsi_device *sdev)
+static int aac_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
int chn, tid;
@@ -1487,7 +1489,7 @@ static const struct scsi_host_template aac_driver_template = {
.queuecommand = aac_queuecommand,
.bios_param = aac_biosparm,
.shost_groups = aac_host_groups,
- .slave_configure = aac_slave_configure,
+ .sdev_configure = aac_sdev_configure,
.change_queue_depth = aac_change_queue_depth,
.sdev_groups = aac_dev_groups,
.eh_abort_handler = aac_eh_abort,
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index fd4fcb37863d..3a2c336307c0 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -4496,7 +4496,7 @@ static int AdvInitAsc3550Driver(ADV_DVC_VAR *asc_dvc)
/*
* Microcode operating variables for WDTR, SDTR, and command tag
- * queuing will be set in slave_configure() based on what a
+ * queuing will be set in sdev_configure() based on what a
* device reports it is capable of in Inquiry byte 7.
*
* If SCSI Bus Resets have been disabled, then directly set
@@ -5013,7 +5013,7 @@ static int AdvInitAsc38C0800Driver(ADV_DVC_VAR *asc_dvc)
/*
* Microcode operating variables for WDTR, SDTR, and command tag
- * queuing will be set in slave_configure() based on what a
+ * queuing will be set in sdev_configure() based on what a
* device reports it is capable of in Inquiry byte 7.
*
* If SCSI Bus Resets have been disabled, then directly set
@@ -5508,7 +5508,7 @@ static int AdvInitAsc38C1600Driver(ADV_DVC_VAR *asc_dvc)
/*
* Microcode operating variables for WDTR, SDTR, and command tag
- * queuing will be set in slave_configure() based on what a
+ * queuing will be set in sdev_configure() based on what a
* device reports it is capable of in Inquiry byte 7.
*
* If SCSI Bus Resets have been disabled, then directly set
@@ -7219,7 +7219,7 @@ static void AscAsyncFix(ASC_DVC_VAR *asc_dvc, struct scsi_device *sdev)
}
static void
-advansys_narrow_slave_configure(struct scsi_device *sdev, ASC_DVC_VAR *asc_dvc)
+advansys_narrow_sdev_configure(struct scsi_device *sdev, ASC_DVC_VAR *asc_dvc)
{
ASC_SCSI_BIT_ID_TYPE tid_bit = 1 << sdev->id;
ASC_SCSI_BIT_ID_TYPE orig_use_tagged_qng = asc_dvc->use_tagged_qng;
@@ -7345,7 +7345,7 @@ static void advansys_wide_enable_ppr(ADV_DVC_VAR *adv_dvc,
}
static void
-advansys_wide_slave_configure(struct scsi_device *sdev, ADV_DVC_VAR *adv_dvc)
+advansys_wide_sdev_configure(struct scsi_device *sdev, ADV_DVC_VAR *adv_dvc)
{
AdvPortAddr iop_base = adv_dvc->iop_base;
unsigned short tidmask = 1 << sdev->id;
@@ -7391,16 +7391,17 @@ advansys_wide_slave_configure(struct scsi_device *sdev, ADV_DVC_VAR *adv_dvc)
* Set the number of commands to queue per device for the
* specified host adapter.
*/
-static int advansys_slave_configure(struct scsi_device *sdev)
+static int advansys_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct asc_board *boardp = shost_priv(sdev->host);
if (ASC_NARROW_BOARD(boardp))
- advansys_narrow_slave_configure(sdev,
- &boardp->dvc_var.asc_dvc_var);
+ advansys_narrow_sdev_configure(sdev,
+ &boardp->dvc_var.asc_dvc_var);
else
- advansys_wide_slave_configure(sdev,
- &boardp->dvc_var.adv_dvc_var);
+ advansys_wide_sdev_configure(sdev,
+ &boardp->dvc_var.adv_dvc_var);
return 0;
}
@@ -10612,7 +10613,7 @@ static const struct scsi_host_template advansys_template = {
.queuecommand = advansys_queuecommand,
.eh_host_reset_handler = advansys_reset,
.bios_param = advansys_biosparam,
- .slave_configure = advansys_slave_configure,
+ .sdev_configure = advansys_sdev_configure,
.cmd_size = sizeof(struct advansys_cmd),
};
@@ -11408,7 +11409,7 @@ static struct eisa_driver advansys_eisa_driver = {
};
/* PCI Devices supported by this driver */
-static struct pci_device_id advansys_pci_tbl[] = {
+static const struct pci_device_id advansys_pci_tbl[] = {
{PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_1200A,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940,
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 4202059815a0..17dfc3c72110 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -672,7 +672,7 @@ ahd_linux_target_destroy(struct scsi_target *starget)
}
static int
-ahd_linux_slave_alloc(struct scsi_device *sdev)
+ahd_linux_sdev_init(struct scsi_device *sdev)
{
struct ahd_softc *ahd =
*((struct ahd_softc **)sdev->host->hostdata);
@@ -701,7 +701,7 @@ ahd_linux_slave_alloc(struct scsi_device *sdev)
}
static int
-ahd_linux_slave_configure(struct scsi_device *sdev)
+ahd_linux_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
if (bootverbose)
sdev_printk(KERN_INFO, sdev, "Slave Configure\n");
@@ -906,8 +906,8 @@ struct scsi_host_template aic79xx_driver_template = {
.this_id = -1,
.max_sectors = 8192,
.cmd_per_lun = 2,
- .slave_alloc = ahd_linux_slave_alloc,
- .slave_configure = ahd_linux_slave_configure,
+ .sdev_init = ahd_linux_sdev_init,
+ .sdev_configure = ahd_linux_sdev_configure,
.target_alloc = ahd_linux_target_alloc,
.target_destroy = ahd_linux_target_destroy,
};
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index b0c4f2345321..cebf8c5d0caf 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -632,7 +632,7 @@ ahc_linux_target_destroy(struct scsi_target *starget)
}
static int
-ahc_linux_slave_alloc(struct scsi_device *sdev)
+ahc_linux_sdev_init(struct scsi_device *sdev)
{
struct ahc_softc *ahc =
*((struct ahc_softc **)sdev->host->hostdata);
@@ -664,7 +664,7 @@ ahc_linux_slave_alloc(struct scsi_device *sdev)
}
static int
-ahc_linux_slave_configure(struct scsi_device *sdev)
+ahc_linux_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
if (bootverbose)
sdev_printk(KERN_INFO, sdev, "Slave Configure\n");
@@ -791,8 +791,8 @@ struct scsi_host_template aic7xxx_driver_template = {
.this_id = -1,
.max_sectors = 8192,
.cmd_per_lun = 2,
- .slave_alloc = ahc_linux_slave_alloc,
- .slave_configure = ahc_linux_slave_configure,
+ .sdev_init = ahc_linux_sdev_init,
+ .sdev_configure = ahc_linux_sdev_configure,
.target_alloc = ahc_linux_target_alloc,
.target_destroy = ahc_linux_target_destroy,
};
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
index 65182ad9cdf8..b1c9ce477cbd 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
@@ -102,6 +102,7 @@ static void add_conditional(symbol_t *symbol);
static void add_version(const char *verstring);
static int is_download_const(expression_t *immed);
static int is_location_address(symbol_t *symbol);
+int yylex();
void yyerror(const char *string);
#define SRAM_SYMNAME "SRAM_BASE"
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
index 8c0479865f04..5c7350eb5b5c 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
@@ -61,6 +61,7 @@
static symbol_t *macro_symbol;
static void add_macro_arg(const char *argtext, int position);
+int mmlex();
void mmerror(const char *string);
%}
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
index c78d4f68eea5..fc7e6c58148d 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
@@ -64,6 +64,9 @@ static char *string_buf_ptr;
static int parren_count;
static int quote_count;
static char buf[255];
+void mm_switch_to_buffer(YY_BUFFER_STATE);
+void mmparse();
+void mm_delete_buffer(YY_BUFFER_STATE);
%}
PATH ([/]*[-A-Za-z0-9_.])+
diff --git a/drivers/scsi/am53c974.c b/drivers/scsi/am53c974.c
index fbb29dbb1e50..003e61831e33 100644
--- a/drivers/scsi/am53c974.c
+++ b/drivers/scsi/am53c974.c
@@ -513,7 +513,7 @@ static void pci_esp_remove_one(struct pci_dev *pdev)
scsi_host_put(esp->host);
}
-static struct pci_device_id am53c974_pci_tbl[] = {
+static const struct pci_device_id am53c974_pci_tbl[] = {
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SCSI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ }
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index baeb5e795690..8e3d4799ce93 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -60,7 +60,7 @@
static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin,
+ const struct bin_attribute *bin,
char *buf, loff_t off,
size_t count)
{
@@ -107,7 +107,7 @@ static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp,
static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin,
+ const struct bin_attribute *bin,
char *buf, loff_t off,
size_t count)
{
@@ -155,7 +155,7 @@ static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp,
static ssize_t arcmsr_sysfs_iop_message_clear(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin,
+ const struct bin_attribute *bin,
char *buf, loff_t off,
size_t count)
{
@@ -194,7 +194,7 @@ static const struct bin_attribute arcmsr_sysfs_message_read_attr = {
.mode = S_IRUSR ,
},
.size = ARCMSR_API_DATA_BUFLEN,
- .read = arcmsr_sysfs_iop_message_read,
+ .read_new = arcmsr_sysfs_iop_message_read,
};
static const struct bin_attribute arcmsr_sysfs_message_write_attr = {
@@ -203,7 +203,7 @@ static const struct bin_attribute arcmsr_sysfs_message_write_attr = {
.mode = S_IWUSR,
},
.size = ARCMSR_API_DATA_BUFLEN,
- .write = arcmsr_sysfs_iop_message_write,
+ .write_new = arcmsr_sysfs_iop_message_write,
};
static const struct bin_attribute arcmsr_sysfs_message_clear_attr = {
@@ -212,7 +212,7 @@ static const struct bin_attribute arcmsr_sysfs_message_clear_attr = {
.mode = S_IWUSR,
},
.size = 1,
- .write = arcmsr_sysfs_iop_message_clear,
+ .write_new = arcmsr_sysfs_iop_message_clear,
};
int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *acb)
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 35860c61468b..221a520e8a9b 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -143,7 +143,8 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb);
static void arcmsr_set_iop_datetime(struct timer_list *);
-static int arcmsr_slave_config(struct scsi_device *sdev);
+static int arcmsr_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
{
if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
@@ -160,7 +161,7 @@ static const struct scsi_host_template arcmsr_scsi_host_template = {
.eh_abort_handler = arcmsr_abort,
.eh_bus_reset_handler = arcmsr_bus_reset,
.bios_param = arcmsr_bios_param,
- .slave_configure = arcmsr_slave_config,
+ .sdev_configure = arcmsr_sdev_configure,
.change_queue_depth = arcmsr_adjust_disk_queue_depth,
.can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD,
.this_id = ARCMSR_SCSI_INITIATOR_ID,
@@ -171,7 +172,7 @@ static const struct scsi_host_template arcmsr_scsi_host_template = {
.no_write_same = 1,
};
-static struct pci_device_id arcmsr_device_id_table[] = {
+static const struct pci_device_id arcmsr_device_id_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110),
.driver_data = ACB_ADAPTER_TYPE_A},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120),
@@ -1044,7 +1045,7 @@ static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb)
static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb)
{
timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0);
- pacb->refresh_timer.expires = jiffies + msecs_to_jiffies(60 * 1000);
+ pacb->refresh_timer.expires = jiffies + secs_to_jiffies(60);
add_timer(&pacb->refresh_timer);
}
@@ -3344,7 +3345,8 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd)
static DEF_SCSI_QCMD(arcmsr_queue_command)
-static int arcmsr_slave_config(struct scsi_device *sdev)
+static int arcmsr_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
unsigned int dev_timeout;
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 928151ec927a..401242912855 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -1743,7 +1743,7 @@ static const struct scsi_host_template atp870u_template = {
.max_sectors = ATP870U_MAX_SECTORS,
};
-static struct pci_device_id atp870u_id_table[] = {
+static const struct pci_device_id atp870u_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP885_DEVID) },
{ PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID1) },
{ PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID2) },
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 66fb701401de..a719a18f0fbc 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -25,7 +25,7 @@ struct scsi_transport_template *bfad_im_scsi_transport_template;
struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
static void bfad_im_itnim_work_handler(struct work_struct *work);
static int bfad_im_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmnd);
-static int bfad_im_slave_alloc(struct scsi_device *sdev);
+static int bfad_im_sdev_init(struct scsi_device *sdev);
static void bfad_im_fc_rport_add(struct bfad_im_port_s *im_port,
struct bfad_itnim_s *itnim);
@@ -404,10 +404,10 @@ bfad_im_reset_target_handler(struct scsi_cmnd *cmnd)
}
/*
- * Scsi_Host template entry slave_destroy.
+ * Scsi_Host template entry sdev_destroy.
*/
static void
-bfad_im_slave_destroy(struct scsi_device *sdev)
+bfad_im_sdev_destroy(struct scsi_device *sdev)
{
sdev->hostdata = NULL;
return;
@@ -783,7 +783,7 @@ bfad_thread_workq(struct bfad_s *bfad)
* Return non-zero if fails.
*/
static int
-bfad_im_slave_configure(struct scsi_device *sdev)
+bfad_im_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
scsi_change_queue_depth(sdev, bfa_lun_queue_depth);
return 0;
@@ -800,9 +800,9 @@ struct scsi_host_template bfad_im_scsi_host_template = {
.eh_device_reset_handler = bfad_im_reset_lun_handler,
.eh_target_reset_handler = bfad_im_reset_target_handler,
- .slave_alloc = bfad_im_slave_alloc,
- .slave_configure = bfad_im_slave_configure,
- .slave_destroy = bfad_im_slave_destroy,
+ .sdev_init = bfad_im_sdev_init,
+ .sdev_configure = bfad_im_sdev_configure,
+ .sdev_destroy = bfad_im_sdev_destroy,
.this_id = -1,
.sg_tablesize = BFAD_IO_MAX_SGE,
@@ -823,9 +823,9 @@ struct scsi_host_template bfad_im_vport_template = {
.eh_device_reset_handler = bfad_im_reset_lun_handler,
.eh_target_reset_handler = bfad_im_reset_target_handler,
- .slave_alloc = bfad_im_slave_alloc,
- .slave_configure = bfad_im_slave_configure,
- .slave_destroy = bfad_im_slave_destroy,
+ .sdev_init = bfad_im_sdev_init,
+ .sdev_configure = bfad_im_sdev_configure,
+ .sdev_destroy = bfad_im_sdev_destroy,
.this_id = -1,
.sg_tablesize = BFAD_IO_MAX_SGE,
@@ -915,7 +915,7 @@ bfad_get_itnim(struct bfad_im_port_s *im_port, int id)
}
/*
- * Function is invoked from the SCSI Host Template slave_alloc() entry point.
+ * Function is invoked from the SCSI Host Template sdev_init() entry point.
* Has the logic to query the LUN Mask database to check if this LUN needs to
* be made visible to the SCSI mid-layer or not.
*
@@ -946,10 +946,10 @@ bfad_im_check_if_make_lun_visible(struct scsi_device *sdev,
}
/*
- * Scsi_Host template entry slave_alloc
+ * Scsi_Host template entry sdev_init
*/
static int
-bfad_im_slave_alloc(struct scsi_device *sdev)
+bfad_im_sdev_init(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct bfad_itnim_data_s *itnim_data;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index f49783b89d04..5ac20c93637c 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2610,14 +2610,11 @@ static int bnx2fc_cpu_online(unsigned int cpu)
p = &per_cpu(bnx2fc_percpu, cpu);
- thread = kthread_create_on_node(bnx2fc_percpu_io_thread,
- (void *)p, cpu_to_node(cpu),
- "bnx2fc_thread/%d", cpu);
+ thread = kthread_create_on_cpu(bnx2fc_percpu_io_thread,
+ (void *)p, cpu, "bnx2fc_thread/%d");
if (IS_ERR(thread))
return PTR_ERR(thread);
- /* bind thread to the cpu */
- kthread_bind(thread, cpu);
p->iothread = thread;
wake_up_process(thread);
return 0;
@@ -2652,7 +2649,8 @@ static int bnx2fc_cpu_offline(unsigned int cpu)
return 0;
}
-static int bnx2fc_slave_configure(struct scsi_device *sdev)
+static int bnx2fc_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
if (!bnx2fc_queue_depth)
return 0;
@@ -2951,7 +2949,7 @@ static struct scsi_host_template bnx2fc_shost_template = {
.eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */
.eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */
.eh_host_reset_handler = fc_eh_host_reset,
- .slave_alloc = fc_slave_alloc,
+ .sdev_init = fc_sdev_init,
.change_queue_depth = scsi_change_queue_depth,
.this_id = -1,
.cmd_per_lun = 3,
@@ -2959,7 +2957,7 @@ static struct scsi_host_template bnx2fc_shost_template = {
.dma_boundary = 0x7fff,
.max_sectors = 0x3fbf,
.track_queue_depth = 1,
- .slave_configure = bnx2fc_slave_configure,
+ .sdev_configure = bnx2fc_sdev_configure,
.shost_groups = bnx2fc_host_groups,
.cmd_size = sizeof(struct bnx2fc_priv),
};
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 872ad37e2a6e..cecc3a026762 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -415,14 +415,11 @@ static int bnx2i_cpu_online(unsigned int cpu)
p = &per_cpu(bnx2i_percpu, cpu);
- thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p,
- cpu_to_node(cpu),
- "bnx2i_thread/%d", cpu);
+ thread = kthread_create_on_cpu(bnx2i_percpu_io_thread, (void *)p,
+ cpu, "bnx2i_thread/%d");
if (IS_ERR(thread))
return PTR_ERR(thread);
- /* bind thread to the cpu */
- kthread_bind(thread, cpu);
p->iothread = thread;
wake_up_process(thread);
return 0;
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 8329f0cab4e7..34bde6650fae 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -800,7 +800,7 @@ csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
rn = req->rnode;
/*
* FW says remote device is lost, but rnode
- * doesnt reflect it.
+ * doesn't reflect it.
*/
if (csio_scsi_itnexus_loss_error(req->wr_status) &&
csio_is_rnode_ready(rn)) {
@@ -2224,7 +2224,7 @@ fail:
}
static int
-csio_slave_alloc(struct scsi_device *sdev)
+csio_sdev_init(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
@@ -2237,14 +2237,14 @@ csio_slave_alloc(struct scsi_device *sdev)
}
static int
-csio_slave_configure(struct scsi_device *sdev)
+csio_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
scsi_change_queue_depth(sdev, csio_lun_qdepth);
return 0;
}
static void
-csio_slave_destroy(struct scsi_device *sdev)
+csio_sdev_destroy(struct scsi_device *sdev)
{
sdev->hostdata = NULL;
}
@@ -2276,9 +2276,9 @@ struct scsi_host_template csio_fcoe_shost_template = {
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = csio_eh_abort_handler,
.eh_device_reset_handler = csio_eh_lun_reset_handler,
- .slave_alloc = csio_slave_alloc,
- .slave_configure = csio_slave_configure,
- .slave_destroy = csio_slave_destroy,
+ .sdev_init = csio_sdev_init,
+ .sdev_configure = csio_sdev_configure,
+ .sdev_destroy = csio_sdev_destroy,
.scan_finished = csio_scan_finished,
.this_id = -1,
.sg_tablesize = CSIO_SCSI_MAX_SGE,
@@ -2295,9 +2295,9 @@ struct scsi_host_template csio_fcoe_shost_vport_template = {
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = csio_eh_abort_handler,
.eh_device_reset_handler = csio_eh_lun_reset_handler,
- .slave_alloc = csio_slave_alloc,
- .slave_configure = csio_slave_configure,
- .slave_destroy = csio_slave_destroy,
+ .sdev_init = csio_sdev_init,
+ .sdev_configure = csio_sdev_configure,
+ .sdev_destroy = csio_sdev_destroy,
.scan_finished = csio_scan_finished,
.this_id = -1,
.sg_tablesize = CSIO_SCSI_MAX_SGE,
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 62806f5e32e6..ae626e389c8b 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3177,7 +3177,7 @@ static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
/*
* PCI device binding table
*/
-static struct pci_device_id cxlflash_pci_table[] = {
+static const struct pci_device_id cxlflash_pci_table[] = {
{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index b375509d1470..97631f48e19d 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -966,7 +966,7 @@ static int cxlflash_disk_detach(struct scsi_device *sdev, void *detach)
*
* This routine is the release handler for the fops registered with
* the CXL services on an initial attach for a context. It is called
- * when a close (explicity by the user or as part of a process tear
+ * when a close (explicitly by the user or as part of a process tear
* down) is performed on the adapter file descriptor returned to the
* user. The user should be aware that explicitly performing a close
* considered catastrophic and subsequent usage of the superpipe API
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index d108a86e196e..e71de2419758 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -3715,13 +3715,13 @@ static void adapter_remove_and_free_all_devices(struct AdapterCtlBlk* acb)
/**
- * dc395x_slave_alloc - Called by the scsi mid layer to tell us about a new
+ * dc395x_sdev_init - Called by the scsi mid layer to tell us about a new
* scsi device that we need to deal with. We allocate a new device and then
* insert that device into the adapters device list.
*
* @scsi_device: The new scsi device that we need to handle.
**/
-static int dc395x_slave_alloc(struct scsi_device *scsi_device)
+static int dc395x_sdev_init(struct scsi_device *scsi_device)
{
struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
struct DeviceCtlBlk *dcb;
@@ -3736,12 +3736,12 @@ static int dc395x_slave_alloc(struct scsi_device *scsi_device)
/**
- * dc395x_slave_destroy - Called by the scsi mid layer to tell us about a
+ * dc395x_sdev_destroy - Called by the scsi mid layer to tell us about a
* device that is going away.
*
* @scsi_device: The new scsi device that we need to handle.
**/
-static void dc395x_slave_destroy(struct scsi_device *scsi_device)
+static void dc395x_sdev_destroy(struct scsi_device *scsi_device)
{
struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
struct DeviceCtlBlk *dcb = find_dcb(acb, scsi_device->id, scsi_device->lun);
@@ -4547,8 +4547,8 @@ static const struct scsi_host_template dc395x_driver_template = {
.show_info = dc395x_show_info,
.name = DC395X_BANNER " " DC395X_VERSION,
.queuecommand = dc395x_queue_command,
- .slave_alloc = dc395x_slave_alloc,
- .slave_destroy = dc395x_slave_destroy,
+ .sdev_init = dc395x_sdev_init,
+ .sdev_destroy = dc395x_sdev_destroy,
.can_queue = DC395x_MAX_CAN_QUEUE,
.this_id = 7,
.sg_tablesize = DC395x_MAX_SG_TABLESIZE,
@@ -4668,7 +4668,7 @@ static void dc395x_remove_one(struct pci_dev *dev)
}
-static struct pci_device_id dc395x_pci_table[] = {
+static const struct pci_device_id dc395x_pci_table[] = {
{
.vendor = PCI_VENDOR_ID_TEKRAM,
.device = PCI_DEVICE_ID_TEKRAM_TRMS1040,
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index dfb091d34363..d6d091b2f3c7 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -127,7 +127,7 @@ static void dmx3191d_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id dmx3191d_pci_tbl[] = {
+static const struct pci_device_id dmx3191d_pci_tbl[] = {
{PCI_VENDOR_ID_DOMEX, PCI_DEVICE_ID_DOMEX_DMX3191D,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
{ }
diff --git a/drivers/scsi/elx/efct/efct_driver.c b/drivers/scsi/elx/efct/efct_driver.c
index 55d2301bfd7d..8469c156ab33 100644
--- a/drivers/scsi/elx/efct/efct_driver.c
+++ b/drivers/scsi/elx/efct/efct_driver.c
@@ -470,7 +470,7 @@ out:
return rc;
}
-static struct pci_device_id efct_pci_table[] = {
+static const struct pci_device_id efct_pci_table[] = {
{PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G6), 0},
{PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G7), 0},
{} /* terminate list */
diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h
index 1e2d7c63a8e3..c48275d53aef 100644
--- a/drivers/scsi/esas2r/esas2r.h
+++ b/drivers/scsi/esas2r/esas2r.h
@@ -1411,11 +1411,11 @@ static inline void esas2r_comp_list_drain(struct esas2r_adapter *a,
}
/* sysfs handlers */
-extern struct bin_attribute bin_attr_fw;
-extern struct bin_attribute bin_attr_fs;
-extern struct bin_attribute bin_attr_vda;
-extern struct bin_attribute bin_attr_hw;
-extern struct bin_attribute bin_attr_live_nvram;
-extern struct bin_attribute bin_attr_default_nvram;
+extern const struct bin_attribute bin_attr_fw;
+extern const struct bin_attribute bin_attr_fs;
+extern const struct bin_attribute bin_attr_vda;
+extern const struct bin_attribute bin_attr_hw;
+extern const struct bin_attribute bin_attr_live_nvram;
+extern const struct bin_attribute bin_attr_default_nvram;
#endif /* ESAS2R_H */
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index f700a16cd885..44871746944a 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -66,7 +66,7 @@ static struct esas2r_adapter *esas2r_adapter_from_kobj(struct kobject *kobj)
}
static ssize_t read_fw(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -75,7 +75,7 @@ static ssize_t read_fw(struct file *file, struct kobject *kobj,
}
static ssize_t write_fw(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -84,7 +84,7 @@ static ssize_t write_fw(struct file *file, struct kobject *kobj,
}
static ssize_t read_fs(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -93,7 +93,7 @@ static ssize_t read_fs(struct file *file, struct kobject *kobj,
}
static ssize_t write_fs(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -109,7 +109,7 @@ static ssize_t write_fs(struct file *file, struct kobject *kobj,
}
static ssize_t read_vda(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -118,7 +118,7 @@ static ssize_t read_vda(struct file *file, struct kobject *kobj,
}
static ssize_t write_vda(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -127,7 +127,7 @@ static ssize_t write_vda(struct file *file, struct kobject *kobj,
}
static ssize_t read_live_nvram(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -138,7 +138,7 @@ static ssize_t read_live_nvram(struct file *file, struct kobject *kobj,
}
static ssize_t write_live_nvram(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -158,7 +158,7 @@ static ssize_t write_live_nvram(struct file *file, struct kobject *kobj,
}
static ssize_t read_default_nvram(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -169,7 +169,7 @@ static ssize_t read_default_nvram(struct file *file, struct kobject *kobj,
}
static ssize_t read_hw(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -187,7 +187,7 @@ static ssize_t read_hw(struct file *file, struct kobject *kobj,
}
static ssize_t write_hw(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -211,12 +211,12 @@ static ssize_t write_hw(struct file *file, struct kobject *kobj,
}
#define ESAS2R_RW_BIN_ATTR(_name) \
- struct bin_attribute bin_attr_ ## _name = { \
+ const struct bin_attribute bin_attr_ ## _name = { \
.attr = \
{ .name = __stringify(_name), .mode = S_IRUSR | S_IWUSR }, \
.size = 0, \
- .read = read_ ## _name, \
- .write = write_ ## _name }
+ .read_new = read_ ## _name, \
+ .write_new = write_ ## _name }
ESAS2R_RW_BIN_ATTR(fw);
ESAS2R_RW_BIN_ATTR(fs);
@@ -224,10 +224,10 @@ ESAS2R_RW_BIN_ATTR(vda);
ESAS2R_RW_BIN_ATTR(hw);
ESAS2R_RW_BIN_ATTR(live_nvram);
-struct bin_attribute bin_attr_default_nvram = {
+const struct bin_attribute bin_attr_default_nvram = {
.attr = { .name = "default_nvram", .mode = S_IRUGO },
.size = 0,
- .read = read_default_nvram,
+ .read_new = read_default_nvram,
.write = NULL
};
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 0175d2282b45..802718ffad84 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -2261,7 +2261,7 @@ static void esp_init_swstate(struct esp *esp)
INIT_LIST_HEAD(&esp->active_cmds);
INIT_LIST_HEAD(&esp->esp_cmd_pool);
- /* Start with a clear state, domain validation (via ->slave_configure,
+ /* Start with a clear state, domain validation (via ->sdev_configure,
* spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
* commands.
*/
@@ -2441,7 +2441,7 @@ static void esp_target_destroy(struct scsi_target *starget)
tp->starget = NULL;
}
-static int esp_slave_alloc(struct scsi_device *dev)
+static int esp_sdev_init(struct scsi_device *dev)
{
struct esp *esp = shost_priv(dev->host);
struct esp_target_data *tp = &esp->target[dev->id];
@@ -2463,7 +2463,7 @@ static int esp_slave_alloc(struct scsi_device *dev)
return 0;
}
-static int esp_slave_configure(struct scsi_device *dev)
+static int esp_sdev_configure(struct scsi_device *dev, struct queue_limits *lim)
{
struct esp *esp = shost_priv(dev->host);
struct esp_target_data *tp = &esp->target[dev->id];
@@ -2479,7 +2479,7 @@ static int esp_slave_configure(struct scsi_device *dev)
return 0;
}
-static void esp_slave_destroy(struct scsi_device *dev)
+static void esp_sdev_destroy(struct scsi_device *dev)
{
struct esp_lun_data *lp = dev->hostdata;
@@ -2667,9 +2667,9 @@ const struct scsi_host_template scsi_esp_template = {
.queuecommand = esp_queuecommand,
.target_alloc = esp_target_alloc,
.target_destroy = esp_target_destroy,
- .slave_alloc = esp_slave_alloc,
- .slave_configure = esp_slave_configure,
- .slave_destroy = esp_slave_destroy,
+ .sdev_init = esp_sdev_init,
+ .sdev_configure = esp_sdev_configure,
+ .sdev_destroy = esp_sdev_destroy,
.eh_abort_handler = esp_eh_abort_handler,
.eh_bus_reset_handler = esp_eh_bus_reset_handler,
.eh_host_reset_handler = esp_eh_host_reset_handler,
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index 00cd7c0ccc76..7bb0b69bff24 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -80,7 +80,7 @@
/* ESP config register 4 read-write */
#define ESP_CONFIG4_BBTE 0x01 /* Back-to-back transfers (fsc) */
-#define ESP_CONGIG4_TEST 0x02 /* Transfer counter test mode (fsc) */
+#define ESP_CONFIG4_TEST 0x02 /* Transfer counter test mode (fsc) */
#define ESP_CONFIG4_RADE 0x04 /* Active negation (am53c974/fsc) */
#define ESP_CONFIG4_RAE 0x08 /* Act. negation REQ/ACK (am53c974) */
#define ESP_CONFIG4_PWD 0x20 /* Reduced power feature (am53c974) */
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 39aec710660c..038e38578676 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -269,7 +269,7 @@ static const struct scsi_host_template fcoe_shost_template = {
.eh_abort_handler = fc_eh_abort,
.eh_device_reset_handler = fc_eh_device_reset,
.eh_host_reset_handler = fc_eh_host_reset,
- .slave_alloc = fc_slave_alloc,
+ .sdev_init = fc_sdev_init,
.change_queue_depth = scsi_change_queue_depth,
.this_id = -1,
.cmd_per_lun = 3,
diff --git a/drivers/scsi/fdomain_pci.c b/drivers/scsi/fdomain_pci.c
index 3e05ce7b89e5..c15b2ce76e9f 100644
--- a/drivers/scsi/fdomain_pci.c
+++ b/drivers/scsi/fdomain_pci.c
@@ -47,7 +47,7 @@ static void fdomain_pci_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id fdomain_pci_table[] = {
+static const struct pci_device_id fdomain_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_FD, PCI_DEVICE_ID_FD_36C70) },
{}
};
diff --git a/drivers/scsi/fnic/Makefile b/drivers/scsi/fnic/Makefile
index 6214a6b2e96d..c025e875009e 100644
--- a/drivers/scsi/fnic/Makefile
+++ b/drivers/scsi/fnic/Makefile
@@ -2,11 +2,13 @@
obj-$(CONFIG_FCOE_FNIC) += fnic.o
fnic-y := \
+ fip.o\
fnic_attrs.o \
fnic_isr.o \
fnic_main.o \
fnic_res.o \
fnic_fcs.o \
+ fdls_disc.o \
fnic_scsi.o \
fnic_trace.o \
fnic_debugfs.o \
@@ -15,4 +17,5 @@ fnic-y := \
vnic_intr.o \
vnic_rq.o \
vnic_wq_copy.o \
- vnic_wq.o
+ vnic_wq.o \
+ fnic_pci_subsys_devid.o
diff --git a/drivers/scsi/fnic/fdls_disc.c b/drivers/scsi/fnic/fdls_disc.c
new file mode 100644
index 000000000000..11211c469583
--- /dev/null
+++ b/drivers/scsi/fnic/fdls_disc.c
@@ -0,0 +1,4997 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include <linux/workqueue.h>
+#include "fnic.h"
+#include "fdls_fc.h"
+#include "fnic_fdls.h"
+#include <scsi/fc/fc_fcp.h>
+#include <scsi/scsi_transport_fc.h>
+#include <linux/utsname.h>
+
+#define FC_FC4_TYPE_SCSI 0x08
+#define PORT_SPEED_BIT_8 8
+#define PORT_SPEED_BIT_9 9
+#define PORT_SPEED_BIT_14 14
+#define PORT_SPEED_BIT_15 15
+
+/* FNIC FDMI Register HBA Macros */
+#define FNIC_FDMI_NUM_PORTS 1
+#define FNIC_FDMI_NUM_HBA_ATTRS 9
+#define FNIC_FDMI_TYPE_NODE_NAME 0X1
+#define FNIC_FDMI_TYPE_MANUFACTURER 0X2
+#define FNIC_FDMI_MANUFACTURER "Cisco Systems"
+#define FNIC_FDMI_TYPE_SERIAL_NUMBER 0X3
+#define FNIC_FDMI_TYPE_MODEL 0X4
+#define FNIC_FDMI_TYPE_MODEL_DES 0X5
+#define FNIC_FDMI_MODEL_DESCRIPTION "Cisco Virtual Interface Card"
+#define FNIC_FDMI_TYPE_HARDWARE_VERSION 0X6
+#define FNIC_FDMI_TYPE_DRIVER_VERSION 0X7
+#define FNIC_FDMI_TYPE_ROM_VERSION 0X8
+#define FNIC_FDMI_TYPE_FIRMWARE_VERSION 0X9
+#define FNIC_FDMI_NN_LEN 8
+#define FNIC_FDMI_MANU_LEN 20
+#define FNIC_FDMI_SERIAL_LEN 16
+#define FNIC_FDMI_MODEL_LEN 12
+#define FNIC_FDMI_MODEL_DES_LEN 56
+#define FNIC_FDMI_HW_VER_LEN 16
+#define FNIC_FDMI_DR_VER_LEN 28
+#define FNIC_FDMI_ROM_VER_LEN 8
+#define FNIC_FDMI_FW_VER_LEN 16
+
+/* FNIC FDMI Register PA Macros */
+#define FNIC_FDMI_TYPE_FC4_TYPES 0X1
+#define FNIC_FDMI_TYPE_SUPPORTED_SPEEDS 0X2
+#define FNIC_FDMI_TYPE_CURRENT_SPEED 0X3
+#define FNIC_FDMI_TYPE_MAX_FRAME_SIZE 0X4
+#define FNIC_FDMI_TYPE_OS_NAME 0X5
+#define FNIC_FDMI_TYPE_HOST_NAME 0X6
+#define FNIC_FDMI_NUM_PORT_ATTRS 6
+#define FNIC_FDMI_FC4_LEN 32
+#define FNIC_FDMI_SUPP_SPEED_LEN 4
+#define FNIC_FDMI_CUR_SPEED_LEN 4
+#define FNIC_FDMI_MFS_LEN 4
+#define FNIC_FDMI_MFS 0x800
+#define FNIC_FDMI_OS_NAME_LEN 16
+#define FNIC_FDMI_HN_LEN 24
+
+#define FDLS_FDMI_PLOGI_PENDING 0x1
+#define FDLS_FDMI_REG_HBA_PENDING 0x2
+#define FDLS_FDMI_RPA_PENDING 0x4
+#define FDLS_FDMI_ABORT_PENDING 0x8
+#define FDLS_FDMI_MAX_RETRY 3
+
+#define RETRIES_EXHAUSTED(iport) \
+ (iport->fabric.retry_counter == FABRIC_LOGO_MAX_RETRY)
+
+#define FNIC_TPORT_MAX_NEXUS_RESTART (8)
+
+#define SCHEDULE_OXID_FREE_RETRY_TIME (300)
+
+/* Private Functions */
+static void fdls_fdmi_register_hba(struct fnic_iport_s *iport);
+static void fdls_fdmi_register_pa(struct fnic_iport_s *iport);
+static void fdls_send_rpn_id(struct fnic_iport_s *iport);
+static void fdls_process_flogi_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr,
+ void *rx_frame);
+static void fnic_fdls_start_plogi(struct fnic_iport_s *iport);
+static void fnic_fdls_start_flogi(struct fnic_iport_s *iport);
+static struct fnic_tport_s *fdls_create_tport(struct fnic_iport_s *iport,
+ uint32_t fcid,
+ uint64_t wwpn);
+static void fdls_target_restart_nexus(struct fnic_tport_s *tport);
+static void fdls_start_tport_timer(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport, int timeout);
+static void fdls_tport_timer_callback(struct timer_list *t);
+static void fdls_send_fdmi_plogi(struct fnic_iport_s *iport);
+static void fdls_start_fabric_timer(struct fnic_iport_s *iport,
+ int timeout);
+static void fdls_init_plogi_frame(uint8_t *frame, struct fnic_iport_s *iport);
+static void fdls_init_els_acc_frame(uint8_t *frame, struct fnic_iport_s *iport);
+static void fdls_init_els_rjt_frame(uint8_t *frame, struct fnic_iport_s *iport);
+static void fdls_init_logo_frame(uint8_t *frame, struct fnic_iport_s *iport);
+static void fdls_init_fabric_abts_frame(uint8_t *frame,
+ struct fnic_iport_s *iport);
+
+uint8_t *fdls_alloc_frame(struct fnic_iport_s *iport)
+{
+ struct fnic *fnic = iport->fnic;
+ uint8_t *frame = NULL;
+
+ frame = mempool_alloc(fnic->frame_pool, GFP_ATOMIC);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame");
+ return NULL;
+ }
+
+ memset(frame, 0, FNIC_FCOE_FRAME_MAXSZ);
+ return frame;
+}
+
+/**
+ * fdls_alloc_oxid - Allocate an oxid from the bitmap based oxid pool
+ * @iport: Handle to iport instance
+ * @oxid_frame_type: Type of frame to allocate
+ * @active_oxid: the oxid which is in use
+ *
+ * Called with fnic lock held
+ */
+uint16_t fdls_alloc_oxid(struct fnic_iport_s *iport, int oxid_frame_type,
+ uint16_t *active_oxid)
+{
+ struct fnic *fnic = iport->fnic;
+ struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool;
+ int idx;
+ uint16_t oxid;
+
+ lockdep_assert_held(&fnic->fnic_lock);
+
+ /*
+ * Allocate next available oxid from bitmap
+ */
+ idx = find_next_zero_bit(oxid_pool->bitmap, FNIC_OXID_POOL_SZ, oxid_pool->next_idx);
+ if (idx == FNIC_OXID_POOL_SZ) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Alloc oxid: all oxid slots are busy iport state:%d\n",
+ iport->state);
+ return FNIC_UNASSIGNED_OXID;
+ }
+
+ WARN_ON(test_and_set_bit(idx, oxid_pool->bitmap));
+ oxid_pool->next_idx = (idx + 1) % FNIC_OXID_POOL_SZ; /* cycle through the bitmap */
+
+ oxid = FNIC_OXID_ENCODE(idx, oxid_frame_type);
+ *active_oxid = oxid;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "alloc oxid: 0x%x, iport state: %d\n",
+ oxid, iport->state);
+ return oxid;
+}
+
+/**
+ * fdls_free_oxid_idx - Free the oxid using the idx
+ * @iport: Handle to iport instance
+ * @oxid_idx: The index to free
+ *
+ * Free the oxid immediately and make it available for new requests
+ * Called with fnic lock held
+ */
+static void fdls_free_oxid_idx(struct fnic_iport_s *iport, uint16_t oxid_idx)
+{
+ struct fnic *fnic = iport->fnic;
+ struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool;
+
+ lockdep_assert_held(&fnic->fnic_lock);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "free oxid idx: 0x%x\n", oxid_idx);
+
+ WARN_ON(!test_and_clear_bit(oxid_idx, oxid_pool->bitmap));
+}
+
+/**
+ * fdls_reclaim_oxid_handler - Callback handler for delayed_oxid_work
+ * @work: Handle to work_struct
+ *
+ * Scheduled when an oxid is to be freed later
+ * After freeing expired oxid(s), the handler schedules
+ * another callback with the remaining time
+ * of next unexpired entry in the reclaim list.
+ */
+void fdls_reclaim_oxid_handler(struct work_struct *work)
+{
+ struct fnic_oxid_pool_s *oxid_pool = container_of(work,
+ struct fnic_oxid_pool_s, oxid_reclaim_work.work);
+ struct fnic_iport_s *iport = container_of(oxid_pool,
+ struct fnic_iport_s, oxid_pool);
+ struct fnic *fnic = iport->fnic;
+ struct reclaim_entry_s *reclaim_entry, *next;
+ unsigned long delay_j, cur_jiffies;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Reclaim oxid callback\n");
+
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+
+ /* Though the work was scheduled for one entry,
+ * walk through and free the expired entries which might have been scheduled
+ * at around the same time as the first entry
+ */
+ list_for_each_entry_safe(reclaim_entry, next,
+ &(oxid_pool->oxid_reclaim_list), links) {
+
+ /* The list is always maintained in the order of expiry time */
+ cur_jiffies = jiffies;
+ if (time_before(cur_jiffies, reclaim_entry->expires))
+ break;
+
+ list_del(&reclaim_entry->links);
+ fdls_free_oxid_idx(iport, reclaim_entry->oxid_idx);
+ kfree(reclaim_entry);
+ }
+
+ /* schedule to free up the next entry */
+ if (!list_empty(&oxid_pool->oxid_reclaim_list)) {
+ reclaim_entry = list_first_entry(&oxid_pool->oxid_reclaim_list,
+ struct reclaim_entry_s, links);
+
+ delay_j = reclaim_entry->expires - cur_jiffies;
+ schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Scheduling next callback at:%ld jiffies\n", delay_j);
+ }
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+}
+
+/**
+ * fdls_free_oxid - Helper function to free the oxid
+ * @iport: Handle to iport instance
+ * @oxid: oxid to free
+ * @active_oxid: the oxid which is in use
+ *
+ * Called with fnic lock held
+ */
+void fdls_free_oxid(struct fnic_iport_s *iport,
+ uint16_t oxid, uint16_t *active_oxid)
+{
+ fdls_free_oxid_idx(iport, FNIC_OXID_IDX(oxid));
+ *active_oxid = FNIC_UNASSIGNED_OXID;
+}
+
+/**
+ * fdls_schedule_oxid_free - Schedule oxid to be freed later
+ * @iport: Handle to iport instance
+ * @active_oxid: the oxid which is in use
+ *
+ * Gets called in a rare case scenario when both a command
+ * (fdls or target discovery) timed out and the following ABTS
+ * timed out as well, without a link change.
+ *
+ * Called with fnic lock held
+ */
+void fdls_schedule_oxid_free(struct fnic_iport_s *iport, uint16_t *active_oxid)
+{
+ struct fnic *fnic = iport->fnic;
+ struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool;
+ struct reclaim_entry_s *reclaim_entry;
+ unsigned long delay_j = msecs_to_jiffies(OXID_RECLAIM_TOV(iport));
+ int oxid_idx = FNIC_OXID_IDX(*active_oxid);
+
+ lockdep_assert_held(&fnic->fnic_lock);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Schedule oxid free. oxid: 0x%x\n", *active_oxid);
+
+ *active_oxid = FNIC_UNASSIGNED_OXID;
+
+ reclaim_entry = (struct reclaim_entry_s *)
+ kzalloc(sizeof(struct reclaim_entry_s), GFP_ATOMIC);
+
+ if (!reclaim_entry) {
+ FNIC_FCS_DBG(KERN_WARNING, fnic->host, fnic->fnic_num,
+ "Failed to allocate memory for reclaim struct for oxid idx: %d\n",
+ oxid_idx);
+
+ /* Retry the scheduling */
+ WARN_ON(test_and_set_bit(oxid_idx, oxid_pool->pending_schedule_free));
+ schedule_delayed_work(&oxid_pool->schedule_oxid_free_retry, 0);
+ return;
+ }
+
+ reclaim_entry->oxid_idx = oxid_idx;
+ reclaim_entry->expires = round_jiffies(jiffies + delay_j);
+
+ list_add_tail(&reclaim_entry->links, &oxid_pool->oxid_reclaim_list);
+
+ schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j);
+}
+
+/**
+ * fdls_schedule_oxid_free_retry_work - Thread to schedule the
+ * oxid to be freed later
+ *
+ * @work: Handle to the work struct
+ */
+void fdls_schedule_oxid_free_retry_work(struct work_struct *work)
+{
+ struct fnic_oxid_pool_s *oxid_pool = container_of(work,
+ struct fnic_oxid_pool_s, schedule_oxid_free_retry.work);
+ struct fnic_iport_s *iport = container_of(oxid_pool,
+ struct fnic_iport_s, oxid_pool);
+ struct fnic *fnic = iport->fnic;
+ struct reclaim_entry_s *reclaim_entry;
+ unsigned long delay_j = msecs_to_jiffies(OXID_RECLAIM_TOV(iport));
+ int idx;
+
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+
+ for_each_set_bit(idx, oxid_pool->pending_schedule_free, FNIC_OXID_POOL_SZ) {
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Schedule oxid free. oxid idx: %d\n", idx);
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ reclaim_entry = (struct reclaim_entry_s *)
+ kzalloc(sizeof(struct reclaim_entry_s), GFP_KERNEL);
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+
+ if (!reclaim_entry) {
+ FNIC_FCS_DBG(KERN_WARNING, fnic->host, fnic->fnic_num,
+ "Failed to allocate memory for reclaim struct for oxid idx: 0x%x\n",
+ idx);
+
+ schedule_delayed_work(&oxid_pool->schedule_oxid_free_retry,
+ msecs_to_jiffies(SCHEDULE_OXID_FREE_RETRY_TIME));
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ return;
+ }
+
+ if (test_and_clear_bit(idx, oxid_pool->pending_schedule_free)) {
+ reclaim_entry->oxid_idx = idx;
+ reclaim_entry->expires = round_jiffies(jiffies + delay_j);
+ list_add_tail(&reclaim_entry->links, &oxid_pool->oxid_reclaim_list);
+ schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j);
+ } else {
+ /* unlikely scenario, free the allocated memory and continue */
+ kfree(reclaim_entry);
+ }
+}
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+}
+
+static bool fdls_is_oxid_fabric_req(uint16_t oxid)
+{
+ int oxid_frame_type = FNIC_FRAME_TYPE(oxid);
+
+ switch (oxid_frame_type) {
+ case FNIC_FRAME_TYPE_FABRIC_FLOGI:
+ case FNIC_FRAME_TYPE_FABRIC_PLOGI:
+ case FNIC_FRAME_TYPE_FABRIC_RPN:
+ case FNIC_FRAME_TYPE_FABRIC_RFT:
+ case FNIC_FRAME_TYPE_FABRIC_RFF:
+ case FNIC_FRAME_TYPE_FABRIC_GPN_FT:
+ case FNIC_FRAME_TYPE_FABRIC_LOGO:
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static bool fdls_is_oxid_fdmi_req(uint16_t oxid)
+{
+ int oxid_frame_type = FNIC_FRAME_TYPE(oxid);
+
+ switch (oxid_frame_type) {
+ case FNIC_FRAME_TYPE_FDMI_PLOGI:
+ case FNIC_FRAME_TYPE_FDMI_RHBA:
+ case FNIC_FRAME_TYPE_FDMI_RPA:
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static bool fdls_is_oxid_tgt_req(uint16_t oxid)
+{
+ int oxid_frame_type = FNIC_FRAME_TYPE(oxid);
+
+ switch (oxid_frame_type) {
+ case FNIC_FRAME_TYPE_TGT_PLOGI:
+ case FNIC_FRAME_TYPE_TGT_PRLI:
+ case FNIC_FRAME_TYPE_TGT_ADISC:
+ case FNIC_FRAME_TYPE_TGT_LOGO:
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static void fdls_reset_oxid_pool(struct fnic_iport_s *iport)
+{
+ struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool;
+
+ oxid_pool->next_idx = 0;
+}
+
+void fnic_del_fabric_timer_sync(struct fnic *fnic)
+{
+ fnic->iport.fabric.del_timer_inprogress = 1;
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ del_timer_sync(&fnic->iport.fabric.retry_timer);
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ fnic->iport.fabric.del_timer_inprogress = 0;
+}
+
+void fnic_del_tport_timer_sync(struct fnic *fnic,
+ struct fnic_tport_s *tport)
+{
+ tport->del_timer_inprogress = 1;
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ del_timer_sync(&tport->retry_timer);
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ tport->del_timer_inprogress = 0;
+}
+
+static void
+fdls_start_fabric_timer(struct fnic_iport_s *iport, int timeout)
+{
+ u64 fabric_tov;
+ struct fnic *fnic = iport->fnic;
+
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport fcid: 0x%x: Canceling fabric disc timer\n",
+ iport->fcid);
+ fnic_del_fabric_timer_sync(fnic);
+ iport->fabric.timer_pending = 0;
+ }
+
+ if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED))
+ iport->fabric.retry_counter++;
+
+ fabric_tov = jiffies + msecs_to_jiffies(timeout);
+ mod_timer(&iport->fabric.retry_timer, round_jiffies(fabric_tov));
+ iport->fabric.timer_pending = 1;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fabric timer is %d ", timeout);
+}
+
+static void
+fdls_start_tport_timer(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport, int timeout)
+{
+ u64 fabric_tov;
+ struct fnic *fnic = iport->fnic;
+
+ if (tport->timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport fcid 0x%x: Canceling disc timer\n",
+ tport->fcid);
+ fnic_del_tport_timer_sync(fnic, tport);
+ tport->timer_pending = 0;
+ }
+
+ if (!(tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED))
+ tport->retry_counter++;
+
+ fabric_tov = jiffies + msecs_to_jiffies(timeout);
+ mod_timer(&tport->retry_timer, round_jiffies(fabric_tov));
+ tport->timer_pending = 1;
+}
+
+void fdls_init_plogi_frame(uint8_t *frame,
+ struct fnic_iport_s *iport)
+{
+ struct fc_std_flogi *pplogi;
+ uint8_t s_id[3];
+
+ pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pplogi = (struct fc_std_flogi) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_d_id = {0xFF, 0xFF, 0xFC},
+ .fh_type = FC_TYPE_ELS, .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .els = {
+ .fl_cmd = ELS_PLOGI,
+ .fl_csp = {.sp_hi_ver = FNIC_FC_PH_VER_HI,
+ .sp_lo_ver = FNIC_FC_PH_VER_LO,
+ .sp_bb_cred = cpu_to_be16(FNIC_FC_B2B_CREDIT),
+ .sp_features = cpu_to_be16(FC_SP_FT_CIRO),
+ .sp_bb_data = cpu_to_be16(FNIC_FC_B2B_RDF_SZ),
+ .sp_tot_seq = cpu_to_be16(FNIC_FC_CONCUR_SEQS),
+ .sp_rel_off = cpu_to_be16(FNIC_FC_RO_INFO),
+ .sp_e_d_tov = cpu_to_be32(FC_DEF_E_D_TOV)},
+ .fl_cssp[2].cp_class = cpu_to_be16(FC_CPC_VALID | FC_CPC_SEQ),
+ .fl_cssp[2].cp_rdfs = cpu_to_be16(0x800),
+ .fl_cssp[2].cp_con_seq = cpu_to_be16(0xFF),
+ .fl_cssp[2].cp_open_seq = 1}
+ };
+
+ FNIC_STD_SET_NPORT_NAME(&pplogi->els.fl_wwpn, iport->wwpn);
+ FNIC_STD_SET_NODE_NAME(&pplogi->els.fl_wwnn, iport->wwnn);
+ FNIC_LOGI_SET_RDF_SIZE(pplogi->els, iport->max_payload_size);
+
+ hton24(s_id, iport->fcid);
+ FNIC_STD_SET_S_ID(pplogi->fchdr, s_id);
+}
+
+static void fdls_init_els_acc_frame(uint8_t *frame,
+ struct fnic_iport_s *iport)
+{
+ struct fc_std_els_acc_rsp *pels_acc;
+ uint8_t s_id[3];
+
+ pels_acc = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pels_acc = (struct fc_std_els_acc_rsp) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REP,
+ .fh_type = FC_TYPE_ELS, .fh_f_ctl = {FNIC_ELS_REP_FCTL, 0, 0}},
+ .acc.la_cmd = ELS_LS_ACC,
+ };
+
+ hton24(s_id, iport->fcid);
+ FNIC_STD_SET_S_ID(pels_acc->fchdr, s_id);
+ FNIC_STD_SET_RX_ID(pels_acc->fchdr, FNIC_UNASSIGNED_RXID);
+}
+
+static void fdls_init_els_rjt_frame(uint8_t *frame,
+ struct fnic_iport_s *iport)
+{
+ struct fc_std_els_rjt_rsp *pels_rjt;
+
+ pels_rjt = (struct fc_std_els_rjt_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pels_rjt = (struct fc_std_els_rjt_rsp) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REP, .fh_type = FC_TYPE_ELS,
+ .fh_f_ctl = {FNIC_ELS_REP_FCTL, 0, 0}},
+ .rej.er_cmd = ELS_LS_RJT,
+ };
+
+ FNIC_STD_SET_RX_ID(pels_rjt->fchdr, FNIC_UNASSIGNED_RXID);
+}
+
+static void fdls_init_logo_frame(uint8_t *frame,
+ struct fnic_iport_s *iport)
+{
+ struct fc_std_logo *plogo;
+ uint8_t s_id[3];
+
+ plogo = (struct fc_std_logo *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *plogo = (struct fc_std_logo) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_type = FC_TYPE_ELS,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}},
+ .els.fl_cmd = ELS_LOGO,
+ };
+
+ hton24(s_id, iport->fcid);
+ FNIC_STD_SET_S_ID(plogo->fchdr, s_id);
+ memcpy(plogo->els.fl_n_port_id, s_id, 3);
+
+ FNIC_STD_SET_NPORT_NAME(&plogo->els.fl_n_port_wwn,
+ iport->wwpn);
+}
+
+static void fdls_init_fabric_abts_frame(uint8_t *frame,
+ struct fnic_iport_s *iport)
+{
+ struct fc_frame_header *pfabric_abts;
+
+ pfabric_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pfabric_abts = (struct fc_frame_header) {
+ .fh_r_ctl = FC_RCTL_BA_ABTS, /* ABTS */
+ .fh_s_id = {0x00, 0x00, 0x00},
+ .fh_cs_ctl = 0x00, .fh_type = FC_TYPE_BLS,
+ .fh_f_ctl = {FNIC_REQ_ABTS_FCTL, 0, 0}, .fh_seq_id = 0x00,
+ .fh_df_ctl = 0x00, .fh_seq_cnt = 0x0000,
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID),
+ .fh_parm_offset = 0x00000000, /* bit:0 = 0 Abort a exchange */
+ };
+}
+
+static void
+fdls_send_rscn_resp(struct fnic_iport_s *iport,
+ struct fc_frame_header *rscn_fchdr)
+{
+ uint8_t *frame;
+ struct fc_std_els_acc_rsp *pels_acc;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_els_acc_rsp);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send RSCN response");
+ return;
+ }
+
+ pels_acc = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_els_acc_frame(frame, iport);
+
+ FNIC_STD_SET_D_ID(pels_acc->fchdr, rscn_fchdr->fh_s_id);
+
+ oxid = FNIC_STD_GET_OX_ID(rscn_fchdr);
+ FNIC_STD_SET_OX_ID(pels_acc->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send RSCN response with oxid: 0x%x",
+ iport->fcid, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+}
+
+static void
+fdls_send_logo_resp(struct fnic_iport_s *iport,
+ struct fc_frame_header *req_fchdr)
+{
+ uint8_t *frame;
+ struct fc_std_els_acc_rsp *plogo_resp;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_els_acc_rsp);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send LOGO response");
+ return;
+ }
+
+ plogo_resp = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_els_acc_frame(frame, iport);
+
+ FNIC_STD_SET_D_ID(plogo_resp->fchdr, req_fchdr->fh_s_id);
+
+ oxid = FNIC_STD_GET_OX_ID(req_fchdr);
+ FNIC_STD_SET_OX_ID(plogo_resp->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send LOGO response with oxid: 0x%x",
+ iport->fcid, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+}
+
+void
+fdls_send_tport_abts(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport)
+{
+ uint8_t *frame;
+ uint8_t s_id[3];
+ uint8_t d_id[3];
+ struct fnic *fnic = iport->fnic;
+ struct fc_frame_header *ptport_abts;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_frame_header);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send tport ABTS");
+ return;
+ }
+
+ ptport_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *ptport_abts = (struct fc_frame_header) {
+ .fh_r_ctl = FC_RCTL_BA_ABTS, /* ABTS */
+ .fh_cs_ctl = 0x00, .fh_type = FC_TYPE_BLS,
+ .fh_f_ctl = {FNIC_REQ_ABTS_FCTL, 0, 0}, .fh_seq_id = 0x00,
+ .fh_df_ctl = 0x00, .fh_seq_cnt = 0x0000,
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID),
+ .fh_parm_offset = 0x00000000, /* bit:0 = 0 Abort a exchange */
+ };
+
+ hton24(s_id, iport->fcid);
+ hton24(d_id, tport->fcid);
+ FNIC_STD_SET_S_ID(*ptport_abts, s_id);
+ FNIC_STD_SET_D_ID(*ptport_abts, d_id);
+ tport->flags |= FNIC_FDLS_TGT_ABORT_ISSUED;
+
+ FNIC_STD_SET_OX_ID(*ptport_abts, tport->active_oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send tport abts: tport->state: %d ",
+ iport->fcid, tport->state);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_tport_timer(iport, tport, 2 * iport->e_d_tov);
+}
+static void fdls_send_fabric_abts(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ uint8_t s_id[3];
+ uint8_t d_id[3];
+ struct fnic *fnic = iport->fnic;
+ struct fc_frame_header *pfabric_abts;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_frame_header);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send fabric ABTS");
+ return;
+ }
+
+ pfabric_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_fabric_abts_frame(frame, iport);
+
+ hton24(s_id, iport->fcid);
+
+ switch (iport->fabric.state) {
+ case FDLS_STATE_FABRIC_LOGO:
+ hton24(d_id, FC_FID_FLOGI);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+ break;
+
+ case FDLS_STATE_FABRIC_FLOGI:
+ hton24(d_id, FC_FID_FLOGI);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+ break;
+
+ case FDLS_STATE_FABRIC_PLOGI:
+ FNIC_STD_SET_S_ID(*pfabric_abts, s_id);
+ hton24(d_id, FC_FID_DIR_SERV);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+ break;
+
+ case FDLS_STATE_RPN_ID:
+ FNIC_STD_SET_S_ID(*pfabric_abts, s_id);
+ hton24(d_id, FC_FID_DIR_SERV);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+ break;
+
+ case FDLS_STATE_SCR:
+ FNIC_STD_SET_S_ID(*pfabric_abts, s_id);
+ hton24(d_id, FC_FID_FCTRL);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+ break;
+
+ case FDLS_STATE_REGISTER_FC4_TYPES:
+ FNIC_STD_SET_S_ID(*pfabric_abts, s_id);
+ hton24(d_id, FC_FID_DIR_SERV);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+ break;
+
+ case FDLS_STATE_REGISTER_FC4_FEATURES:
+ FNIC_STD_SET_S_ID(*pfabric_abts, s_id);
+ hton24(d_id, FC_FID_DIR_SERV);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+ break;
+
+ case FDLS_STATE_GPN_FT:
+ FNIC_STD_SET_S_ID(*pfabric_abts, s_id);
+ hton24(d_id, FC_FID_DIR_SERV);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+ break;
+ default:
+ return;
+ }
+
+ oxid = iport->active_oxid_fabric_req;
+ FNIC_STD_SET_OX_ID(*pfabric_abts, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send fabric abts. iport->fabric.state: %d oxid: 0x%x",
+ iport->fcid, iport->fabric.state, oxid);
+
+ iport->fabric.flags |= FNIC_FDLS_FABRIC_ABORT_ISSUED;
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+ iport->fabric.timer_pending = 1;
+}
+
+static void fdls_send_fdmi_abts(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ uint8_t d_id[3];
+ struct fnic *fnic = iport->fnic;
+ struct fc_frame_header *pfabric_abts;
+ unsigned long fdmi_tov;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_frame_header);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send FDMI ABTS");
+ return;
+ }
+
+ pfabric_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_fabric_abts_frame(frame, iport);
+
+ hton24(d_id, FC_FID_MGMT_SERV);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+
+ if (iport->fabric.fdmi_pending & FDLS_FDMI_PLOGI_PENDING) {
+ oxid = iport->active_oxid_fdmi_plogi;
+ FNIC_STD_SET_OX_ID(*pfabric_abts, oxid);
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ } else {
+ if (iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING) {
+ oxid = iport->active_oxid_fdmi_rhba;
+ FNIC_STD_SET_OX_ID(*pfabric_abts, oxid);
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ }
+ if (iport->fabric.fdmi_pending & FDLS_FDMI_RPA_PENDING) {
+ oxid = iport->active_oxid_fdmi_rpa;
+ FNIC_STD_SET_OX_ID(*pfabric_abts, oxid);
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ }
+ }
+
+ fdmi_tov = jiffies + msecs_to_jiffies(2 * iport->e_d_tov);
+ mod_timer(&iport->fabric.fdmi_timer, round_jiffies(fdmi_tov));
+ iport->fabric.fdmi_pending |= FDLS_FDMI_ABORT_PENDING;
+}
+
+static void fdls_send_fabric_flogi(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_flogi *pflogi;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_flogi);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send FLOGI");
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ pflogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pflogi = (struct fc_std_flogi) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_d_id = {0xFF, 0xFF, 0xFE},
+ .fh_type = FC_TYPE_ELS, .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .els.fl_cmd = ELS_FLOGI,
+ .els.fl_csp = {.sp_hi_ver = FNIC_FC_PH_VER_HI,
+ .sp_lo_ver = FNIC_FC_PH_VER_LO,
+ .sp_bb_cred = cpu_to_be16(FNIC_FC_B2B_CREDIT),
+ .sp_bb_data = cpu_to_be16(FNIC_FC_B2B_RDF_SZ)},
+ .els.fl_cssp[2].cp_class = cpu_to_be16(FC_CPC_VALID | FC_CPC_SEQ)
+ };
+
+ FNIC_STD_SET_NPORT_NAME(&pflogi->els.fl_wwpn, iport->wwpn);
+ FNIC_STD_SET_NODE_NAME(&pflogi->els.fl_wwnn, iport->wwnn);
+ FNIC_LOGI_SET_RDF_SIZE(pflogi->els, iport->max_payload_size);
+ FNIC_LOGI_SET_R_A_TOV(pflogi->els, iport->r_a_tov);
+ FNIC_LOGI_SET_E_D_TOV(pflogi->els, iport->e_d_tov);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_FLOGI,
+ &iport->active_oxid_fabric_req);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send FLOGI",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+ FNIC_STD_SET_OX_ID(pflogi->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send fabric FLOGI with oxid: 0x%x", iport->fcid,
+ oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ atomic64_inc(&iport->iport_stats.fabric_flogi_sent);
+err_out:
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+}
+
+static void fdls_send_fabric_plogi(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_flogi *pplogi;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_flogi);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send PLOGI");
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_plogi_frame(frame, iport);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_PLOGI,
+ &iport->active_oxid_fabric_req);
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send fabric PLOGI",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+ FNIC_STD_SET_OX_ID(pplogi->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send fabric PLOGI with oxid: 0x%x", iport->fcid,
+ oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ atomic64_inc(&iport->iport_stats.fabric_plogi_sent);
+
+err_out:
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+}
+
+static void fdls_send_fdmi_plogi(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_flogi *pplogi;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_flogi);
+ uint8_t d_id[3];
+ u64 fdmi_tov;
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send FDMI PLOGI");
+ goto err_out;
+ }
+
+ pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_plogi_frame(frame, iport);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FDMI_PLOGI,
+ &iport->active_oxid_fdmi_plogi);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send FDMI PLOGI",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ goto err_out;
+ }
+ FNIC_STD_SET_OX_ID(pplogi->fchdr, oxid);
+
+ hton24(d_id, FC_FID_MGMT_SERV);
+ FNIC_STD_SET_D_ID(pplogi->fchdr, d_id);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send FDMI PLOGI with oxid: 0x%x",
+ iport->fcid, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+err_out:
+ fdmi_tov = jiffies + msecs_to_jiffies(2 * iport->e_d_tov);
+ mod_timer(&iport->fabric.fdmi_timer, round_jiffies(fdmi_tov));
+ iport->fabric.fdmi_pending = FDLS_FDMI_PLOGI_PENDING;
+}
+
+static void fdls_send_rpn_id(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_rpn_id *prpn_id;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_rpn_id);
+ uint8_t fcid[3];
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send RPN_ID");
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ prpn_id = (struct fc_std_rpn_id *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *prpn_id = (struct fc_std_rpn_id) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL,
+ .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR,
+ .ct_fs_subtype = FC_NS_SUBTYPE,
+ .ct_cmd = cpu_to_be16(FC_NS_RPN_ID)}
+ };
+
+ hton24(fcid, iport->fcid);
+ FNIC_STD_SET_S_ID(prpn_id->fchdr, fcid);
+
+ FNIC_STD_SET_PORT_ID(prpn_id->rpn_id, fcid);
+ FNIC_STD_SET_PORT_NAME(prpn_id->rpn_id, iport->wwpn);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_RPN,
+ &iport->active_oxid_fabric_req);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send RPN_ID",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+ FNIC_STD_SET_OX_ID(prpn_id->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send RPN ID with oxid: 0x%x", iport->fcid,
+ oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+err_out:
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+}
+
+static void fdls_send_scr(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_scr *pscr;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_scr);
+ uint8_t fcid[3];
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send SCR");
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ pscr = (struct fc_std_scr *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pscr = (struct fc_std_scr) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ,
+ .fh_d_id = {0xFF, 0xFF, 0xFD}, .fh_type = FC_TYPE_ELS,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .scr = {.scr_cmd = ELS_SCR,
+ .scr_reg_func = ELS_SCRF_FULL}
+ };
+
+ hton24(fcid, iport->fcid);
+ FNIC_STD_SET_S_ID(pscr->fchdr, fcid);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_SCR,
+ &iport->active_oxid_fabric_req);
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send SCR",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+ FNIC_STD_SET_OX_ID(pscr->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send SCR with oxid: 0x%x", iport->fcid,
+ oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ atomic64_inc(&iport->iport_stats.fabric_scr_sent);
+
+err_out:
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+}
+
+static void fdls_send_gpn_ft(struct fnic_iport_s *iport, int fdls_state)
+{
+ uint8_t *frame;
+ struct fc_std_gpn_ft *pgpn_ft;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_gpn_ft);
+ uint8_t fcid[3];
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send GPN FT");
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ pgpn_ft = (struct fc_std_gpn_ft *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pgpn_ft = (struct fc_std_gpn_ft) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL,
+ .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR,
+ .ct_fs_subtype = FC_NS_SUBTYPE,
+ .ct_cmd = cpu_to_be16(FC_NS_GPN_FT)},
+ .gpn_ft.fn_fc4_type = 0x08
+ };
+
+ hton24(fcid, iport->fcid);
+ FNIC_STD_SET_S_ID(pgpn_ft->fchdr, fcid);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_GPN_FT,
+ &iport->active_oxid_fabric_req);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send GPN FT",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+ FNIC_STD_SET_OX_ID(pgpn_ft->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send GPN FT with oxid: 0x%x", iport->fcid,
+ oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+err_out:
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+ fdls_set_state((&iport->fabric), fdls_state);
+}
+
+static void
+fdls_send_tgt_adisc(struct fnic_iport_s *iport, struct fnic_tport_s *tport)
+{
+ uint8_t *frame;
+ struct fc_std_els_adisc *padisc;
+ uint8_t s_id[3];
+ uint8_t d_id[3];
+ uint16_t oxid;
+ struct fnic *fnic = iport->fnic;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_els_adisc);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send TGT ADISC");
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ padisc = (struct fc_std_els_adisc *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+
+ hton24(s_id, iport->fcid);
+ hton24(d_id, tport->fcid);
+ memcpy(padisc->els.adisc_port_id, s_id, 3);
+ FNIC_STD_SET_S_ID(padisc->fchdr, s_id);
+ FNIC_STD_SET_D_ID(padisc->fchdr, d_id);
+
+ FNIC_STD_SET_F_CTL(padisc->fchdr, FNIC_ELS_REQ_FCTL << 16);
+ FNIC_STD_SET_R_CTL(padisc->fchdr, FC_RCTL_ELS_REQ);
+ FNIC_STD_SET_TYPE(padisc->fchdr, FC_TYPE_ELS);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_ADISC, &tport->active_oxid);
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send TGT ADISC",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+ FNIC_STD_SET_OX_ID(padisc->fchdr, oxid);
+ FNIC_STD_SET_RX_ID(padisc->fchdr, FNIC_UNASSIGNED_RXID);
+
+ tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED;
+
+ FNIC_STD_SET_NPORT_NAME(&padisc->els.adisc_wwpn,
+ iport->wwpn);
+ FNIC_STD_SET_NODE_NAME(&padisc->els.adisc_wwnn,
+ iport->wwnn);
+
+ padisc->els.adisc_cmd = ELS_ADISC;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send ADISC to tgt fcid: 0x%x",
+ iport->fcid, tport->fcid);
+
+ atomic64_inc(&iport->iport_stats.tport_adisc_sent);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+err_out:
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_tport_timer(iport, tport, 2 * iport->e_d_tov);
+}
+
+bool fdls_delete_tport(struct fnic_iport_s *iport, struct fnic_tport_s *tport)
+{
+ struct fnic_tport_event_s *tport_del_evt;
+ struct fnic *fnic = iport->fnic;
+
+ if ((tport->state == FDLS_TGT_STATE_OFFLINING)
+ || (tport->state == FDLS_TGT_STATE_OFFLINE)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport fcid 0x%x: tport state is offlining/offline\n",
+ tport->fcid);
+ return false;
+ }
+
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINING);
+ /*
+ * By setting this flag, the tport will not be seen in a look-up
+ * in an RSCN. Even if we move to multithreaded model, this tport
+ * will be destroyed and a new RSCN will have to create a new one
+ */
+ tport->flags |= FNIC_FDLS_TPORT_TERMINATING;
+
+ if (tport->timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport fcid 0x%x: Canceling disc timer\n",
+ tport->fcid);
+ fnic_del_tport_timer_sync(fnic, tport);
+ tport->timer_pending = 0;
+ }
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ fnic_rport_exch_reset(iport->fnic, tport->fcid);
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+
+ if (tport->flags & FNIC_FDLS_SCSI_REGISTERED) {
+ tport_del_evt =
+ kzalloc(sizeof(struct fnic_tport_event_s), GFP_ATOMIC);
+ if (!tport_del_evt) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Failed to allocate memory for tport fcid: 0x%0x\n",
+ tport->fcid);
+ return false;
+ }
+ tport_del_evt->event = TGT_EV_RPORT_DEL;
+ tport_del_evt->arg1 = (void *) tport;
+ list_add_tail(&tport_del_evt->links, &fnic->tport_event_list);
+ queue_work(fnic_event_queue, &fnic->tport_work);
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport 0x%x not reg with scsi_transport. Freeing locally",
+ tport->fcid);
+ list_del(&tport->links);
+ kfree(tport);
+ }
+ return true;
+}
+
+static void
+fdls_send_tgt_plogi(struct fnic_iport_s *iport, struct fnic_tport_s *tport)
+{
+ uint8_t *frame;
+ struct fc_std_flogi *pplogi;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_flogi);
+ uint8_t d_id[3];
+ uint32_t timeout;
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send TGT PLOGI");
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_plogi_frame(frame, iport);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_PLOGI, &tport->active_oxid);
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate oxid to send PLOGI to fcid: 0x%x",
+ iport->fcid, tport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+ FNIC_STD_SET_OX_ID(pplogi->fchdr, oxid);
+
+ tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED;
+
+ hton24(d_id, tport->fcid);
+ FNIC_STD_SET_D_ID(pplogi->fchdr, d_id);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send tgt PLOGI to tgt: 0x%x with oxid: 0x%x",
+ iport->fcid, tport->fcid, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ atomic64_inc(&iport->iport_stats.tport_plogi_sent);
+
+err_out:
+ timeout = max(2 * iport->e_d_tov, iport->plogi_timeout);
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_tport_timer(iport, tport, timeout);
+}
+
+static uint16_t
+fnic_fc_plogi_rsp_rdf(struct fnic_iport_s *iport,
+ struct fc_std_flogi *plogi_rsp)
+{
+ uint16_t b2b_rdf_size =
+ be16_to_cpu(FNIC_LOGI_RDF_SIZE(plogi_rsp->els));
+ uint16_t spc3_rdf_size =
+ be16_to_cpu(plogi_rsp->els.fl_cssp[2].cp_rdfs) & FNIC_FC_C3_RDF;
+ struct fnic *fnic = iport->fnic;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "MFS: b2b_rdf_size: 0x%x spc3_rdf_size: 0x%x",
+ b2b_rdf_size, spc3_rdf_size);
+
+ return min(b2b_rdf_size, spc3_rdf_size);
+}
+
+static void fdls_send_register_fc4_types(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_rft_id *prft_id;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_rft_id);
+ uint8_t fcid[3];
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send RFT");
+ return;
+ }
+
+ prft_id = (struct fc_std_rft_id *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *prft_id = (struct fc_std_rft_id) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL,
+ .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR,
+ .ct_fs_subtype = FC_NS_SUBTYPE,
+ .ct_cmd = cpu_to_be16(FC_NS_RFT_ID)}
+ };
+
+ hton24(fcid, iport->fcid);
+ FNIC_STD_SET_S_ID(prft_id->fchdr, fcid);
+ FNIC_STD_SET_PORT_ID(prft_id->rft_id, fcid);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_RFT,
+ &iport->active_oxid_fabric_req);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send RFT",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ return;
+ }
+ FNIC_STD_SET_OX_ID(prft_id->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send RFT with oxid: 0x%x", iport->fcid,
+ oxid);
+
+ prft_id->rft_id.fr_fts.ff_type_map[0] =
+ cpu_to_be32(1 << FC_TYPE_FCP);
+
+ prft_id->rft_id.fr_fts.ff_type_map[1] =
+ cpu_to_be32(1 << (FC_TYPE_CT % FC_NS_BPW));
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+}
+
+static void fdls_send_register_fc4_features(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_rff_id *prff_id;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_rff_id);
+ uint8_t fcid[3];
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send RFF");
+ return;
+ }
+
+ prff_id = (struct fc_std_rff_id *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *prff_id = (struct fc_std_rff_id) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL,
+ .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR,
+ .ct_fs_subtype = FC_NS_SUBTYPE,
+ .ct_cmd = cpu_to_be16(FC_NS_RFF_ID)},
+ .rff_id.fr_feat = 0x2,
+ .rff_id.fr_type = FC_TYPE_FCP
+ };
+
+ hton24(fcid, iport->fcid);
+ FNIC_STD_SET_S_ID(prff_id->fchdr, fcid);
+ FNIC_STD_SET_PORT_ID(prff_id->rff_id, fcid);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_RFF,
+ &iport->active_oxid_fabric_req);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send RFF",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ return;
+ }
+ FNIC_STD_SET_OX_ID(prff_id->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send RFF with oxid: 0x%x", iport->fcid,
+ oxid);
+
+ prff_id->rff_id.fr_type = FC_TYPE_FCP;
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+}
+
+static void
+fdls_send_tgt_prli(struct fnic_iport_s *iport, struct fnic_tport_s *tport)
+{
+ uint8_t *frame;
+ struct fc_std_els_prli *pprli;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_els_prli);
+ uint8_t s_id[3];
+ uint8_t d_id[3];
+ uint32_t timeout;
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send TGT PRLI");
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ pprli = (struct fc_std_els_prli *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pprli = (struct fc_std_els_prli) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_type = FC_TYPE_ELS,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .els_prli = {.prli_cmd = ELS_PRLI,
+ .prli_spp_len = 16,
+ .prli_len = cpu_to_be16(0x14)},
+ .sp = {.spp_type = 0x08, .spp_flags = 0x0020,
+ .spp_params = cpu_to_be32(0xA2)}
+ };
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_PRLI, &tport->active_oxid);
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send TGT PRLI to 0x%x",
+ iport->fcid, tport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED;
+
+ hton24(s_id, iport->fcid);
+ hton24(d_id, tport->fcid);
+
+ FNIC_STD_SET_OX_ID(pprli->fchdr, oxid);
+ FNIC_STD_SET_S_ID(pprli->fchdr, s_id);
+ FNIC_STD_SET_D_ID(pprli->fchdr, d_id);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send PRLI to tgt: 0x%x with oxid: 0x%x",
+ iport->fcid, tport->fcid, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ atomic64_inc(&iport->iport_stats.tport_prli_sent);
+
+err_out:
+ timeout = max(2 * iport->e_d_tov, iport->plogi_timeout);
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_tport_timer(iport, tport, timeout);
+}
+
+/**
+ * fdls_send_fabric_logo - Send flogo to the fcf
+ * @iport: Handle to fnic iport
+ *
+ * This function does not change or check the fabric state.
+ * It the caller's responsibility to set the appropriate iport fabric
+ * state when this is called. Normally it is FDLS_STATE_FABRIC_LOGO.
+ * Currently this assumes to be called with fnic lock held.
+ */
+void fdls_send_fabric_logo(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_logo *plogo;
+ struct fnic *fnic = iport->fnic;
+ uint8_t d_id[3];
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_logo);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send fabric LOGO");
+ return;
+ }
+
+ plogo = (struct fc_std_logo *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_logo_frame(frame, iport);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_LOGO,
+ &iport->active_oxid_fabric_req);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send fabric LOGO",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ return;
+ }
+ FNIC_STD_SET_OX_ID(plogo->fchdr, oxid);
+
+ hton24(d_id, FC_FID_FLOGI);
+ FNIC_STD_SET_D_ID(plogo->fchdr, d_id);
+
+ iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send fabric LOGO with oxid: 0x%x",
+ iport->fcid, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+}
+
+/**
+ * fdls_tgt_logout - Send plogo to the remote port
+ * @iport: Handle to fnic iport
+ * @tport: Handle to remote port
+ *
+ * This function does not change or check the fabric/tport state.
+ * It the caller's responsibility to set the appropriate tport/fabric
+ * state when this is called. Normally that is fdls_tgt_state_plogo.
+ * This could be used to send plogo to nameserver process
+ * also not just target processes
+ */
+void fdls_tgt_logout(struct fnic_iport_s *iport, struct fnic_tport_s *tport)
+{
+ uint8_t *frame;
+ struct fc_std_logo *plogo;
+ struct fnic *fnic = iport->fnic;
+ uint8_t d_id[3];
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_logo);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send fabric LOGO");
+ return;
+ }
+
+ plogo = (struct fc_std_logo *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_logo_frame(frame, iport);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_LOGO, &tport->active_oxid);
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send tgt LOGO",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ return;
+ }
+ FNIC_STD_SET_OX_ID(plogo->fchdr, oxid);
+
+ hton24(d_id, tport->fcid);
+ FNIC_STD_SET_D_ID(plogo->fchdr, d_id);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send tgt LOGO with oxid: 0x%x",
+ iport->fcid, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+ atomic64_inc(&iport->iport_stats.tport_logo_sent);
+}
+
+static void fdls_tgt_discovery_start(struct fnic_iport_s *iport)
+{
+ struct fnic_tport_s *tport, *next;
+ u32 old_link_down_cnt = iport->fnic->link_down_cnt;
+ struct fnic *fnic = iport->fnic;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Starting FDLS target discovery", iport->fcid);
+
+ list_for_each_entry_safe(tport, next, &iport->tport_list, links) {
+ if ((old_link_down_cnt != iport->fnic->link_down_cnt)
+ || (iport->state != FNIC_IPORT_STATE_READY)) {
+ break;
+ }
+ /* if we marked the tport as deleted due to GPN_FT
+ * We should not send ADISC anymore
+ */
+ if ((tport->state == FDLS_TGT_STATE_OFFLINING) ||
+ (tport->state == FDLS_TGT_STATE_OFFLINE))
+ continue;
+
+ /* For tports which have received RSCN */
+ if (tport->flags & FNIC_FDLS_TPORT_SEND_ADISC) {
+ tport->retry_counter = 0;
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_ADISC);
+ tport->flags &= ~FNIC_FDLS_TPORT_SEND_ADISC;
+ fdls_send_tgt_adisc(iport, tport);
+ continue;
+ }
+ if (fdls_get_tport_state(tport) != FDLS_TGT_STATE_INIT) {
+ /* Not a new port, skip */
+ continue;
+ }
+ tport->retry_counter = 0;
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_PLOGI);
+ fdls_send_tgt_plogi(iport, tport);
+ }
+ fdls_set_state((&iport->fabric), FDLS_STATE_TGT_DISCOVERY);
+}
+
+/*
+ * Function to restart the IT nexus if we received any out of
+ * sequence PLOGI/PRLI response from the target.
+ * The memory for the new tport structure is allocated
+ * inside fdls_create_tport and added to the iport's tport list.
+ * This will get freed later during tport_offline/linkdown
+ * or module unload. The new_tport pointer will go out of scope
+ * safely since the memory it is
+ * pointing to it will be freed later
+ */
+static void fdls_target_restart_nexus(struct fnic_tport_s *tport)
+{
+ struct fnic_iport_s *iport = tport->iport;
+ struct fnic_tport_s *new_tport = NULL;
+ uint32_t fcid;
+ uint64_t wwpn;
+ int nexus_restart_count;
+ struct fnic *fnic = iport->fnic;
+ bool retval = true;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport fcid: 0x%x state: %d restart_count: %d",
+ tport->fcid, tport->state, tport->nexus_restart_count);
+
+ fcid = tport->fcid;
+ wwpn = tport->wwpn;
+ nexus_restart_count = tport->nexus_restart_count;
+
+ retval = fdls_delete_tport(iport, tport);
+ if (retval != true) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Error deleting tport: 0x%x", fcid);
+ return;
+ }
+
+ if (nexus_restart_count >= FNIC_TPORT_MAX_NEXUS_RESTART) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Exceeded nexus restart retries tport: 0x%x",
+ fcid);
+ return;
+ }
+
+ /*
+ * Allocate memory for the new tport and add it to
+ * iport's tport list.
+ * This memory will be freed during tport_offline/linkdown
+ * or module unload. The pointer new_tport is safe to go
+ * out of scope when this function returns, since the memory
+ * it is pointing to is guaranteed to be freed later
+ * as mentioned above.
+ */
+ new_tport = fdls_create_tport(iport, fcid, wwpn);
+ if (!new_tport) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Error creating new tport: 0x%x", fcid);
+ return;
+ }
+
+ new_tport->nexus_restart_count = nexus_restart_count + 1;
+ fdls_send_tgt_plogi(iport, new_tport);
+ fdls_set_tport_state(new_tport, FDLS_TGT_STATE_PLOGI);
+}
+
+struct fnic_tport_s *fnic_find_tport_by_fcid(struct fnic_iport_s *iport,
+ uint32_t fcid)
+{
+ struct fnic_tport_s *tport, *next;
+
+ list_for_each_entry_safe(tport, next, &(iport->tport_list), links) {
+ if ((tport->fcid == fcid)
+ && !(tport->flags & FNIC_FDLS_TPORT_TERMINATING))
+ return tport;
+ }
+ return NULL;
+}
+
+static struct fnic_tport_s *fdls_create_tport(struct fnic_iport_s *iport,
+ uint32_t fcid, uint64_t wwpn)
+{
+ struct fnic_tport_s *tport;
+ struct fnic *fnic = iport->fnic;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS create tport: fcid: 0x%x wwpn: 0x%llx", fcid, wwpn);
+
+ tport = kzalloc(sizeof(struct fnic_tport_s), GFP_ATOMIC);
+ if (!tport) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Memory allocation failure while creating tport: 0x%x\n",
+ fcid);
+ return NULL;
+ }
+
+ tport->max_payload_size = FNIC_FCOE_MAX_FRAME_SZ;
+ tport->r_a_tov = FC_DEF_R_A_TOV;
+ tport->e_d_tov = FC_DEF_E_D_TOV;
+ tport->fcid = fcid;
+ tport->wwpn = wwpn;
+ tport->iport = iport;
+
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "Need to setup tport timer callback");
+
+ timer_setup(&tport->retry_timer, fdls_tport_timer_callback, 0);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Added tport 0x%x", tport->fcid);
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_INIT);
+ list_add_tail(&tport->links, &iport->tport_list);
+ atomic_set(&tport->in_flight, 0);
+ return tport;
+}
+
+struct fnic_tport_s *fnic_find_tport_by_wwpn(struct fnic_iport_s *iport,
+ uint64_t wwpn)
+{
+ struct fnic_tport_s *tport, *next;
+
+ list_for_each_entry_safe(tport, next, &(iport->tport_list), links) {
+ if ((tport->wwpn == wwpn)
+ && !(tport->flags & FNIC_FDLS_TPORT_TERMINATING))
+ return tport;
+ }
+ return NULL;
+}
+
+static void
+fnic_fdmi_attr_set(void *attr_start, u16 type, u16 len,
+ void *data, u32 *off)
+{
+ u16 size = len + FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ struct fc_fdmi_attr_entry *fdmi_attr = (struct fc_fdmi_attr_entry *)
+ ((u8 *)attr_start + *off);
+
+ put_unaligned_be16(type, &fdmi_attr->type);
+ put_unaligned_be16(size, &fdmi_attr->len);
+ memcpy(fdmi_attr->value, data, len);
+ *off += size;
+}
+
+static void fdls_fdmi_register_hba(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_fdmi_rhba *prhba;
+ struct fc_fdmi_attr_entry *fdmi_attr;
+ uint8_t fcid[3];
+ int err;
+ struct fnic *fnic = iport->fnic;
+ struct vnic_devcmd_fw_info *fw_info = NULL;
+ uint16_t oxid;
+ u32 attr_off_bytes, len;
+ u8 data[64];
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET;
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send FDMI RHBA");
+ return;
+ }
+
+ prhba = (struct fc_std_fdmi_rhba *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *prhba = (struct fc_std_fdmi_rhba) {
+ .fchdr = {
+ .fh_r_ctl = FC_RCTL_DD_UNSOL_CTL,
+ .fh_d_id = {0xFF, 0XFF, 0XFA},
+ .fh_type = FC_TYPE_CT,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)
+ },
+ .fc_std_ct_hdr = {
+ .ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_MGMT,
+ .ct_fs_subtype = FC_FDMI_SUBTYPE,
+ .ct_cmd = cpu_to_be16(FC_FDMI_RHBA)
+ },
+ };
+
+ hton24(fcid, iport->fcid);
+ FNIC_STD_SET_S_ID(prhba->fchdr, fcid);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FDMI_RHBA,
+ &iport->active_oxid_fdmi_rhba);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send FDMI RHBA",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ return;
+ }
+ FNIC_STD_SET_OX_ID(prhba->fchdr, oxid);
+
+ put_unaligned_be64(iport->wwpn, &prhba->rhba.hbaid.id);
+ put_unaligned_be32(FNIC_FDMI_NUM_PORTS, &prhba->rhba.port.numport);
+ put_unaligned_be64(iport->wwpn, &prhba->rhba.port.port[0].portname);
+ put_unaligned_be32(FNIC_FDMI_NUM_HBA_ATTRS,
+ &prhba->rhba.hba_attrs.numattrs);
+
+ fdmi_attr = prhba->rhba.hba_attrs.attr;
+ attr_off_bytes = 0;
+
+ put_unaligned_be64(iport->wwnn, data);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_NODE_NAME,
+ FNIC_FDMI_NN_LEN, data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "NN set, off=%d", attr_off_bytes);
+
+ strscpy_pad(data, FNIC_FDMI_MANUFACTURER, FNIC_FDMI_MANU_LEN);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MANUFACTURER,
+ FNIC_FDMI_MANU_LEN, data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "MFG set <%s>, off=%d", data, attr_off_bytes);
+
+ err = vnic_dev_fw_info(fnic->vdev, &fw_info);
+ if (!err) {
+ strscpy_pad(data, fw_info->hw_serial_number,
+ FNIC_FDMI_SERIAL_LEN);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_SERIAL_NUMBER,
+ FNIC_FDMI_SERIAL_LEN, data, &attr_off_bytes);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "SERIAL set <%s>, off=%d", data, attr_off_bytes);
+
+ }
+
+ if (fnic->subsys_desc_len >= FNIC_FDMI_MODEL_LEN)
+ fnic->subsys_desc_len = FNIC_FDMI_MODEL_LEN - 1;
+ strscpy_pad(data, fnic->subsys_desc, FNIC_FDMI_MODEL_LEN);
+ data[FNIC_FDMI_MODEL_LEN - 1] = 0;
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MODEL, FNIC_FDMI_MODEL_LEN,
+ data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "MODEL set <%s>, off=%d", data, attr_off_bytes);
+
+ strscpy_pad(data, FNIC_FDMI_MODEL_DESCRIPTION, FNIC_FDMI_MODEL_DES_LEN);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MODEL_DES,
+ FNIC_FDMI_MODEL_DES_LEN, data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "MODEL_DESC set <%s>, off=%d", data, attr_off_bytes);
+
+ if (!err) {
+ strscpy_pad(data, fw_info->hw_version, FNIC_FDMI_HW_VER_LEN);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_HARDWARE_VERSION,
+ FNIC_FDMI_HW_VER_LEN, data, &attr_off_bytes);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "HW_VER set <%s>, off=%d", data, attr_off_bytes);
+
+ }
+
+ strscpy_pad(data, DRV_VERSION, FNIC_FDMI_DR_VER_LEN);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_DRIVER_VERSION,
+ FNIC_FDMI_DR_VER_LEN, data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "DRV_VER set <%s>, off=%d", data, attr_off_bytes);
+
+ strscpy_pad(data, "N/A", FNIC_FDMI_ROM_VER_LEN);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_ROM_VERSION,
+ FNIC_FDMI_ROM_VER_LEN, data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ROM_VER set <%s>, off=%d", data, attr_off_bytes);
+
+ if (!err) {
+ strscpy_pad(data, fw_info->fw_version, FNIC_FDMI_FW_VER_LEN);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_FIRMWARE_VERSION,
+ FNIC_FDMI_FW_VER_LEN, data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FW_VER set <%s>, off=%d", data, attr_off_bytes);
+ }
+
+ len = sizeof(struct fc_std_fdmi_rhba) + attr_off_bytes;
+ frame_size += len;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send FDMI RHBA with oxid: 0x%x fs: %d", iport->fcid,
+ oxid, frame_size);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ iport->fabric.fdmi_pending |= FDLS_FDMI_REG_HBA_PENDING;
+}
+
+static void fdls_fdmi_register_pa(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_fdmi_rpa *prpa;
+ struct fc_fdmi_attr_entry *fdmi_attr;
+ uint8_t fcid[3];
+ struct fnic *fnic = iport->fnic;
+ u32 port_speed_bm;
+ u32 port_speed = vnic_dev_port_speed(fnic->vdev);
+ uint16_t oxid;
+ u32 attr_off_bytes, len;
+ u8 tmp_data[16], data[64];
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET;
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send FDMI RPA");
+ return;
+ }
+
+ prpa = (struct fc_std_fdmi_rpa *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *prpa = (struct fc_std_fdmi_rpa) {
+ .fchdr = {
+ .fh_r_ctl = FC_RCTL_DD_UNSOL_CTL,
+ .fh_d_id = {0xFF, 0xFF, 0xFA},
+ .fh_type = FC_TYPE_CT,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)
+ },
+ .fc_std_ct_hdr = {
+ .ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_MGMT,
+ .ct_fs_subtype = FC_FDMI_SUBTYPE,
+ .ct_cmd = cpu_to_be16(FC_FDMI_RPA)
+ },
+ };
+
+ hton24(fcid, iport->fcid);
+ FNIC_STD_SET_S_ID(prpa->fchdr, fcid);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FDMI_RPA,
+ &iport->active_oxid_fdmi_rpa);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send FDMI RPA",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ return;
+ }
+ FNIC_STD_SET_OX_ID(prpa->fchdr, oxid);
+
+ put_unaligned_be64(iport->wwpn, &prpa->rpa.port.portname);
+ put_unaligned_be32(FNIC_FDMI_NUM_PORT_ATTRS,
+ &prpa->rpa.hba_attrs.numattrs);
+
+ /* MDS does not support GIGE speed.
+ * Bit shift standard definitions from scsi_transport_fc.h to
+ * match FC spec.
+ */
+ switch (port_speed) {
+ case DCEM_PORTSPEED_10G:
+ case DCEM_PORTSPEED_20G:
+ /* There is no bit for 20G */
+ port_speed_bm = FC_PORTSPEED_10GBIT << PORT_SPEED_BIT_14;
+ break;
+ case DCEM_PORTSPEED_25G:
+ port_speed_bm = FC_PORTSPEED_25GBIT << PORT_SPEED_BIT_8;
+ break;
+ case DCEM_PORTSPEED_40G:
+ case DCEM_PORTSPEED_4x10G:
+ port_speed_bm = FC_PORTSPEED_40GBIT << PORT_SPEED_BIT_9;
+ break;
+ case DCEM_PORTSPEED_100G:
+ port_speed_bm = FC_PORTSPEED_100GBIT << PORT_SPEED_BIT_8;
+ break;
+ default:
+ port_speed_bm = FC_PORTSPEED_1GBIT << PORT_SPEED_BIT_15;
+ break;
+ }
+ attr_off_bytes = 0;
+
+ fdmi_attr = prpa->rpa.hba_attrs.attr;
+
+ put_unaligned_be64(iport->wwnn, data);
+
+ memset(data, 0, FNIC_FDMI_FC4_LEN);
+ data[2] = 1;
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_FC4_TYPES,
+ FNIC_FDMI_FC4_LEN, data, &attr_off_bytes);
+
+ put_unaligned_be32(port_speed_bm, data);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_SUPPORTED_SPEEDS,
+ FNIC_FDMI_SUPP_SPEED_LEN, data, &attr_off_bytes);
+
+ put_unaligned_be32(port_speed_bm, data);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_CURRENT_SPEED,
+ FNIC_FDMI_CUR_SPEED_LEN, data, &attr_off_bytes);
+
+ put_unaligned_be32(FNIC_FDMI_MFS, data);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MAX_FRAME_SIZE,
+ FNIC_FDMI_MFS_LEN, data, &attr_off_bytes);
+
+ snprintf(tmp_data, FNIC_FDMI_OS_NAME_LEN - 1, "host%d",
+ fnic->host->host_no);
+ strscpy_pad(data, tmp_data, FNIC_FDMI_OS_NAME_LEN);
+ data[FNIC_FDMI_OS_NAME_LEN - 1] = 0;
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_OS_NAME,
+ FNIC_FDMI_OS_NAME_LEN, data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "OS name set <%s>, off=%d", data, attr_off_bytes);
+
+ sprintf(fc_host_system_hostname(fnic->host), "%s", utsname()->nodename);
+ strscpy_pad(data, fc_host_system_hostname(fnic->host),
+ FNIC_FDMI_HN_LEN);
+ data[FNIC_FDMI_HN_LEN - 1] = 0;
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_HOST_NAME,
+ FNIC_FDMI_HN_LEN, data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Host name set <%s>, off=%d", data, attr_off_bytes);
+
+ len = sizeof(struct fc_std_fdmi_rpa) + attr_off_bytes;
+ frame_size += len;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send FDMI RPA with oxid: 0x%x fs: %d", iport->fcid,
+ oxid, frame_size);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ iport->fabric.fdmi_pending |= FDLS_FDMI_RPA_PENDING;
+}
+
+void fdls_fabric_timer_callback(struct timer_list *t)
+{
+ struct fnic_fdls_fabric_s *fabric = from_timer(fabric, t, retry_timer);
+ struct fnic_iport_s *iport =
+ container_of(fabric, struct fnic_iport_s, fabric);
+ struct fnic *fnic = iport->fnic;
+ unsigned long flags;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tp: %d fab state: %d fab retry counter: %d max_flogi_retries: %d",
+ iport->fabric.timer_pending, iport->fabric.state,
+ iport->fabric.retry_counter, iport->max_flogi_retries);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+
+ if (!iport->fabric.timer_pending) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+
+ if (iport->fabric.del_timer_inprogress) {
+ iport->fabric.del_timer_inprogress = 0;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fabric_del_timer inprogress(%d). Skip timer cb",
+ iport->fabric.del_timer_inprogress);
+ return;
+ }
+
+ iport->fabric.timer_pending = 0;
+
+ /* The fabric state indicates which frames have time out, and we retry */
+ switch (iport->fabric.state) {
+ case FDLS_STATE_FABRIC_FLOGI:
+ /* Flogi received a LS_RJT with busy we retry from here */
+ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME)
+ && (iport->fabric.retry_counter < iport->max_flogi_retries)) {
+ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_fabric_flogi(iport);
+ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) {
+ /* Flogi has time out 2*ed_tov send abts */
+ fdls_send_fabric_abts(iport);
+ } else {
+ /* ABTS has timed out
+ * Mark the OXID to be freed after 2 * r_a_tov and retry the req
+ */
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req);
+ if (iport->fabric.retry_counter < iport->max_flogi_retries) {
+ iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED;
+ fdls_send_fabric_flogi(iport);
+ } else
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Exceeded max FLOGI retries");
+ }
+ break;
+ case FDLS_STATE_FABRIC_PLOGI:
+ /* Plogi received a LS_RJT with busy we retry from here */
+ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME)
+ && (iport->fabric.retry_counter < iport->max_plogi_retries)) {
+ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_fabric_plogi(iport);
+ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) {
+ /* Plogi has timed out 2*ed_tov send abts */
+ fdls_send_fabric_abts(iport);
+ } else {
+ /* ABTS has timed out
+ * Mark the OXID to be freed after 2 * r_a_tov and retry the req
+ */
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req);
+ if (iport->fabric.retry_counter < iport->max_plogi_retries) {
+ iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED;
+ fdls_send_fabric_plogi(iport);
+ } else
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Exceeded max PLOGI retries");
+ }
+ break;
+ case FDLS_STATE_RPN_ID:
+ /* Rpn_id received a LS_RJT with busy we retry from here */
+ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME)
+ && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) {
+ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_rpn_id(iport);
+ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED))
+ /* RPN has timed out. Send abts */
+ fdls_send_fabric_abts(iport);
+ else {
+ /* ABTS has timed out */
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req);
+ fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */
+ }
+ break;
+ case FDLS_STATE_SCR:
+ /* scr received a LS_RJT with busy we retry from here */
+ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME)
+ && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) {
+ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_scr(iport);
+ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED))
+ /* scr has timed out. Send abts */
+ fdls_send_fabric_abts(iport);
+ else {
+ /* ABTS has timed out */
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ABTS timed out. Starting PLOGI: %p", iport);
+ fnic_fdls_start_plogi(iport);
+ }
+ break;
+ case FDLS_STATE_REGISTER_FC4_TYPES:
+ /* scr received a LS_RJT with busy we retry from here */
+ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME)
+ && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) {
+ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_register_fc4_types(iport);
+ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) {
+ /* RFT_ID timed out send abts */
+ fdls_send_fabric_abts(iport);
+ } else {
+ /* ABTS has timed out */
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ABTS timed out. Starting PLOGI: %p", iport);
+ fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */
+ }
+ break;
+ case FDLS_STATE_REGISTER_FC4_FEATURES:
+ /* scr received a LS_RJT with busy we retry from here */
+ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME)
+ && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) {
+ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_register_fc4_features(iport);
+ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED))
+ /* SCR has timed out. Send abts */
+ fdls_send_fabric_abts(iport);
+ else {
+ /* ABTS has timed out */
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ABTS timed out. Starting PLOGI %p", iport);
+ fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */
+ }
+ break;
+ case FDLS_STATE_RSCN_GPN_FT:
+ case FDLS_STATE_SEND_GPNFT:
+ case FDLS_STATE_GPN_FT:
+ /* GPN_FT received a LS_RJT with busy we retry from here */
+ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME)
+ && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) {
+ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_gpn_ft(iport, iport->fabric.state);
+ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) {
+ /* gpn_ft has timed out. Send abts */
+ fdls_send_fabric_abts(iport);
+ } else {
+ /* ABTS has timed out */
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req);
+ if (iport->fabric.retry_counter < FDLS_RETRY_COUNT) {
+ fdls_send_gpn_ft(iport, iport->fabric.state);
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ABTS timeout for fabric GPN_FT. Check name server: %p",
+ iport);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
+
+void fdls_fdmi_timer_callback(struct timer_list *t)
+{
+ struct fnic_fdls_fabric_s *fabric = from_timer(fabric, t, fdmi_timer);
+ struct fnic_iport_s *iport =
+ container_of(fabric, struct fnic_iport_s, fabric);
+ struct fnic *fnic = iport->fnic;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending);
+
+ if (!iport->fabric.fdmi_pending) {
+ /* timer expired after fdmi responses received. */
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending);
+
+ /* if not abort pending, send an abort */
+ if (!(iport->fabric.fdmi_pending & FDLS_FDMI_ABORT_PENDING)) {
+ fdls_send_fdmi_abts(iport);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending);
+
+ /* ABTS pending for an active fdmi request that is pending.
+ * That means FDMI ABTS timed out
+ * Schedule to free the OXID after 2*r_a_tov and proceed
+ */
+ if (iport->fabric.fdmi_pending & FDLS_FDMI_PLOGI_PENDING) {
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fdmi_plogi);
+ } else {
+ if (iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING)
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fdmi_rhba);
+ if (iport->fabric.fdmi_pending & FDLS_FDMI_RPA_PENDING)
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fdmi_rpa);
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending);
+
+ iport->fabric.fdmi_pending = 0;
+ /* If max retries not exhaused, start over from fdmi plogi */
+ if (iport->fabric.fdmi_retry < FDLS_FDMI_MAX_RETRY) {
+ iport->fabric.fdmi_retry++;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "retry fdmi timer %d", iport->fabric.fdmi_retry);
+ fdls_send_fdmi_plogi(iport);
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
+
+static void fdls_send_delete_tport_msg(struct fnic_tport_s *tport)
+{
+ struct fnic_iport_s *iport = (struct fnic_iport_s *) tport->iport;
+ struct fnic *fnic = iport->fnic;
+ struct fnic_tport_event_s *tport_del_evt;
+
+ tport_del_evt = kzalloc(sizeof(struct fnic_tport_event_s), GFP_ATOMIC);
+ if (!tport_del_evt) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Failed to allocate memory for tport event fcid: 0x%x",
+ tport->fcid);
+ return;
+ }
+ tport_del_evt->event = TGT_EV_TPORT_DELETE;
+ tport_del_evt->arg1 = (void *) tport;
+ list_add_tail(&tport_del_evt->links, &fnic->tport_event_list);
+ queue_work(fnic_event_queue, &fnic->tport_work);
+}
+
+static void fdls_tport_timer_callback(struct timer_list *t)
+{
+ struct fnic_tport_s *tport = from_timer(tport, t, retry_timer);
+ struct fnic_iport_s *iport = (struct fnic_iport_s *) tport->iport;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (!tport->timer_pending) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+
+ if (tport->del_timer_inprogress) {
+ tport->del_timer_inprogress = 0;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport_del_timer inprogress. Skip timer cb tport fcid: 0x%x\n",
+ tport->fcid);
+ return;
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport fcid: 0x%x timer pending: %d state: %d retry counter: %d",
+ tport->fcid, tport->timer_pending, tport->state,
+ tport->retry_counter);
+
+ tport->timer_pending = 0;
+ oxid = tport->active_oxid;
+
+ /* We retry plogi/prli/adisc frames depending on the tport state */
+ switch (tport->state) {
+ case FDLS_TGT_STATE_PLOGI:
+ /* PLOGI frame received a LS_RJT with busy, we retry from here */
+ if ((tport->flags & FNIC_FDLS_RETRY_FRAME)
+ && (tport->retry_counter < iport->max_plogi_retries)) {
+ tport->flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_tgt_plogi(iport, tport);
+ } else if (!(tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) {
+ /* Plogi frame has timed out, send abts */
+ fdls_send_tport_abts(iport, tport);
+ } else if (tport->retry_counter < iport->max_plogi_retries) {
+ /*
+ * ABTS has timed out
+ */
+ fdls_schedule_oxid_free(iport, &tport->active_oxid);
+ fdls_send_tgt_plogi(iport, tport);
+ } else {
+ /* exceeded plogi retry count */
+ fdls_schedule_oxid_free(iport, &tport->active_oxid);
+ fdls_send_delete_tport_msg(tport);
+ }
+ break;
+ case FDLS_TGT_STATE_PRLI:
+ /* PRLI received a LS_RJT with busy , hence we retry from here */
+ if ((tport->flags & FNIC_FDLS_RETRY_FRAME)
+ && (tport->retry_counter < FDLS_RETRY_COUNT)) {
+ tport->flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_tgt_prli(iport, tport);
+ } else if (!(tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) {
+ /* PRLI has time out, send abts */
+ fdls_send_tport_abts(iport, tport);
+ } else {
+ /* ABTS has timed out for prli, we go back to PLOGI */
+ fdls_schedule_oxid_free(iport, &tport->active_oxid);
+ fdls_send_tgt_plogi(iport, tport);
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_PLOGI);
+ }
+ break;
+ case FDLS_TGT_STATE_ADISC:
+ /* ADISC timed out send an ABTS */
+ if (!(tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) {
+ fdls_send_tport_abts(iport, tport);
+ } else if ((tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)
+ && (tport->retry_counter < FDLS_RETRY_COUNT)) {
+ /*
+ * ABTS has timed out
+ */
+ fdls_schedule_oxid_free(iport, &tport->active_oxid);
+ fdls_send_tgt_adisc(iport, tport);
+ } else {
+ /* exceeded retry count */
+ fdls_schedule_oxid_free(iport, &tport->active_oxid);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ADISC not responding. Deleting target port: 0x%x",
+ tport->fcid);
+ fdls_send_delete_tport_msg(tport);
+ }
+ break;
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "oxid: 0x%x Unknown tport state: 0x%x", oxid, tport->state);
+ break;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
+
+static void fnic_fdls_start_flogi(struct fnic_iport_s *iport)
+{
+ iport->fabric.retry_counter = 0;
+ fdls_send_fabric_flogi(iport);
+ fdls_set_state((&iport->fabric), FDLS_STATE_FABRIC_FLOGI);
+ iport->fabric.flags = 0;
+}
+
+static void fnic_fdls_start_plogi(struct fnic_iport_s *iport)
+{
+ iport->fabric.retry_counter = 0;
+ fdls_send_fabric_plogi(iport);
+ fdls_set_state((&iport->fabric), FDLS_STATE_FABRIC_PLOGI);
+ iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED;
+
+ if ((fnic_fdmi_support == 1) && (!(iport->flags & FNIC_FDMI_ACTIVE))) {
+ /* we can do FDMI at the same time */
+ iport->fabric.fdmi_retry = 0;
+ timer_setup(&iport->fabric.fdmi_timer, fdls_fdmi_timer_callback,
+ 0);
+ fdls_send_fdmi_plogi(iport);
+ iport->flags |= FNIC_FDMI_ACTIVE;
+ }
+}
+static void
+fdls_process_tgt_adisc_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint32_t tgt_fcid;
+ struct fnic_tport_s *tport;
+ uint8_t *fcid;
+ uint64_t frame_wwnn;
+ uint64_t frame_wwpn;
+ uint16_t oxid;
+ struct fc_std_els_adisc *adisc_rsp = (struct fc_std_els_adisc *)fchdr;
+ struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *)fchdr;
+ struct fnic *fnic = iport->fnic;
+
+ fcid = FNIC_STD_GET_S_ID(fchdr);
+ tgt_fcid = ntoh24(fcid);
+ tport = fnic_find_tport_by_fcid(iport, tgt_fcid);
+
+ if (!tport) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Tgt ADISC response tport not found: 0x%x", tgt_fcid);
+ return;
+ }
+ if ((iport->state != FNIC_IPORT_STATE_READY)
+ || (tport->state != FDLS_TGT_STATE_ADISC)
+ || (tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping this ADISC response");
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport state: %d tport state: %d Is abort issued on PRLI? %d",
+ iport->state, tport->state,
+ (tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED));
+ return;
+ }
+ if (FNIC_STD_GET_OX_ID(fchdr) != tport->active_oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping frame from target: 0x%x",
+ tgt_fcid);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Reason: Stale ADISC/Aborted ADISC/OOO frame delivery");
+ return;
+ }
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+
+ switch (adisc_rsp->els.adisc_cmd) {
+ case ELS_LS_ACC:
+ atomic64_inc(&iport->iport_stats.tport_adisc_ls_accepts);
+ if (tport->timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport 0x%p Canceling fabric disc timer\n",
+ tport);
+ fnic_del_tport_timer_sync(fnic, tport);
+ }
+ tport->timer_pending = 0;
+ tport->retry_counter = 0;
+ frame_wwnn = get_unaligned_be64(&adisc_rsp->els.adisc_wwnn);
+ frame_wwpn = get_unaligned_be64(&adisc_rsp->els.adisc_wwpn);
+ if ((frame_wwnn == tport->wwnn) && (frame_wwpn == tport->wwpn)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ADISC accepted from target: 0x%x. Target logged in",
+ tgt_fcid);
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_READY);
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Error mismatch frame: ADISC");
+ }
+ break;
+
+ case ELS_LS_RJT:
+ atomic64_inc(&iport->iport_stats.tport_adisc_ls_rejects);
+ if (((els_rjt->rej.er_reason == ELS_RJT_BUSY)
+ || (els_rjt->rej.er_reason == ELS_RJT_UNAB))
+ && (tport->retry_counter < FDLS_RETRY_COUNT)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ADISC ret ELS_LS_RJT BUSY. Retry from timer routine: 0x%x",
+ tgt_fcid);
+
+ /* Retry ADISC again from the timer routine. */
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ADISC returned ELS_LS_RJT from target: 0x%x",
+ tgt_fcid);
+ fdls_delete_tport(iport, tport);
+ }
+ break;
+ }
+}
+static void
+fdls_process_tgt_plogi_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint32_t tgt_fcid;
+ struct fnic_tport_s *tport;
+ uint8_t *fcid;
+ uint16_t oxid;
+ struct fc_std_flogi *plogi_rsp = (struct fc_std_flogi *)fchdr;
+ struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *)fchdr;
+ uint16_t max_payload_size;
+ struct fnic *fnic = iport->fnic;
+
+ fcid = FNIC_STD_GET_S_ID(fchdr);
+ tgt_fcid = ntoh24(fcid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS processing target PLOGI response: tgt_fcid: 0x%x",
+ tgt_fcid);
+
+ tport = fnic_find_tport_by_fcid(iport, tgt_fcid);
+ if (!tport) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport not found: 0x%x", tgt_fcid);
+ return;
+ }
+ if ((iport->state != FNIC_IPORT_STATE_READY)
+ || (tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping frame! iport state: %d tport state: %d",
+ iport->state, tport->state);
+ return;
+ }
+
+ if (tport->state != FDLS_TGT_STATE_PLOGI) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PLOGI rsp recvd in wrong state. Drop the frame and restart nexus");
+ fdls_target_restart_nexus(tport);
+ return;
+ }
+
+ if (FNIC_STD_GET_OX_ID(fchdr) != tport->active_oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PLOGI response from target: 0x%x. Dropping frame",
+ tgt_fcid);
+ return;
+ }
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+
+ switch (plogi_rsp->els.fl_cmd) {
+ case ELS_LS_ACC:
+ atomic64_inc(&iport->iport_stats.tport_plogi_ls_accepts);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PLOGI accepted by target: 0x%x", tgt_fcid);
+ break;
+
+ case ELS_LS_RJT:
+ atomic64_inc(&iport->iport_stats.tport_plogi_ls_rejects);
+ if (((els_rjt->rej.er_reason == ELS_RJT_BUSY)
+ || (els_rjt->rej.er_reason == ELS_RJT_UNAB))
+ && (tport->retry_counter < iport->max_plogi_retries)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PLOGI ret ELS_LS_RJT BUSY. Retry from timer routine: 0x%x",
+ tgt_fcid);
+ /* Retry plogi again from the timer routine. */
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ return;
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PLOGI returned ELS_LS_RJT from target: 0x%x",
+ tgt_fcid);
+ fdls_delete_tport(iport, tport);
+ return;
+
+ default:
+ atomic64_inc(&iport->iport_stats.tport_plogi_misc_rejects);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PLOGI not accepted from target fcid: 0x%x",
+ tgt_fcid);
+ return;
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Found the PLOGI target: 0x%x and state: %d",
+ (unsigned int) tgt_fcid, tport->state);
+
+ if (tport->timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport fcid 0x%x: Canceling disc timer\n",
+ tport->fcid);
+ fnic_del_tport_timer_sync(fnic, tport);
+ }
+
+ tport->timer_pending = 0;
+ tport->wwpn = get_unaligned_be64(&FNIC_LOGI_PORT_NAME(plogi_rsp->els));
+ tport->wwnn = get_unaligned_be64(&FNIC_LOGI_NODE_NAME(plogi_rsp->els));
+
+ /* Learn the Service Params */
+
+ /* Max frame size - choose the lowest */
+ max_payload_size = fnic_fc_plogi_rsp_rdf(iport, plogi_rsp);
+ tport->max_payload_size =
+ min(max_payload_size, iport->max_payload_size);
+
+ if (tport->max_payload_size < FNIC_MIN_DATA_FIELD_SIZE) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "MFS: tport max frame size below spec bounds: %d",
+ tport->max_payload_size);
+ tport->max_payload_size = FNIC_MIN_DATA_FIELD_SIZE;
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "MAX frame size: %u iport max_payload_size: %d tport mfs: %d",
+ max_payload_size, iport->max_payload_size,
+ tport->max_payload_size);
+
+ tport->max_concur_seqs = FNIC_FC_PLOGI_RSP_CONCUR_SEQ(plogi_rsp);
+
+ tport->retry_counter = 0;
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_PRLI);
+ fdls_send_tgt_prli(iport, tport);
+}
+static void
+fdls_process_tgt_prli_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint32_t tgt_fcid;
+ struct fnic_tport_s *tport;
+ uint8_t *fcid;
+ uint16_t oxid;
+ struct fc_std_els_prli *prli_rsp = (struct fc_std_els_prli *)fchdr;
+ struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *)fchdr;
+ struct fnic_tport_event_s *tport_add_evt;
+ struct fnic *fnic = iport->fnic;
+ bool mismatched_tgt = false;
+
+ fcid = FNIC_STD_GET_S_ID(fchdr);
+ tgt_fcid = ntoh24(fcid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS process tgt PRLI response: 0x%x", tgt_fcid);
+
+ tport = fnic_find_tport_by_fcid(iport, tgt_fcid);
+ if (!tport) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport not found: 0x%x", tgt_fcid);
+ /* Handle or just drop? */
+ return;
+ }
+
+ if ((iport->state != FNIC_IPORT_STATE_READY)
+ || (tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping frame! iport st: %d tport st: %d tport fcid: 0x%x",
+ iport->state, tport->state, tport->fcid);
+ return;
+ }
+
+ if (tport->state != FDLS_TGT_STATE_PRLI) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PRLI rsp recvd in wrong state. Drop frame. Restarting nexus");
+ fdls_target_restart_nexus(tport);
+ return;
+ }
+
+ if (FNIC_STD_GET_OX_ID(fchdr) != tport->active_oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping PRLI response from target: 0x%x ",
+ tgt_fcid);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Reason: Stale PRLI response/Aborted PDISC/OOO frame delivery");
+ return;
+ }
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+
+ switch (prli_rsp->els_prli.prli_cmd) {
+ case ELS_LS_ACC:
+ atomic64_inc(&iport->iport_stats.tport_prli_ls_accepts);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PRLI accepted from target: 0x%x", tgt_fcid);
+
+ if (prli_rsp->sp.spp_type != FC_FC4_TYPE_SCSI) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "mismatched target zoned with FC SCSI initiator: 0x%x",
+ tgt_fcid);
+ mismatched_tgt = true;
+ }
+ if (mismatched_tgt) {
+ fdls_tgt_logout(iport, tport);
+ fdls_delete_tport(iport, tport);
+ return;
+ }
+ break;
+ case ELS_LS_RJT:
+ atomic64_inc(&iport->iport_stats.tport_prli_ls_rejects);
+ if (((els_rjt->rej.er_reason == ELS_RJT_BUSY)
+ || (els_rjt->rej.er_reason == ELS_RJT_UNAB))
+ && (tport->retry_counter < FDLS_RETRY_COUNT)) {
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PRLI ret ELS_LS_RJT BUSY. Retry from timer routine: 0x%x",
+ tgt_fcid);
+
+ /*Retry Plogi again from the timer routine. */
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ return;
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PRLI returned ELS_LS_RJT from target: 0x%x",
+ tgt_fcid);
+
+ fdls_tgt_logout(iport, tport);
+ fdls_delete_tport(iport, tport);
+ return;
+ default:
+ atomic64_inc(&iport->iport_stats.tport_prli_misc_rejects);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PRLI not accepted from target: 0x%x", tgt_fcid);
+ return;
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Found the PRLI target: 0x%x and state: %d",
+ (unsigned int) tgt_fcid, tport->state);
+
+ if (tport->timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport fcid 0x%x: Canceling disc timer\n",
+ tport->fcid);
+ fnic_del_tport_timer_sync(fnic, tport);
+ }
+ tport->timer_pending = 0;
+
+ /* Learn Service Params */
+ tport->fcp_csp = be32_to_cpu(prli_rsp->sp.spp_params);
+ tport->retry_counter = 0;
+
+ if (tport->fcp_csp & FCP_SPPF_RETRY)
+ tport->tgt_flags |= FNIC_FC_RP_FLAGS_RETRY;
+
+ /* Check if the device plays Target Mode Function */
+ if (!(tport->fcp_csp & FCP_PRLI_FUNC_TARGET)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Remote port(0x%x): no target support. Deleting it\n",
+ tgt_fcid);
+ fdls_tgt_logout(iport, tport);
+ fdls_delete_tport(iport, tport);
+ return;
+ }
+
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_READY);
+
+ /* Inform the driver about new target added */
+ tport_add_evt = kzalloc(sizeof(struct fnic_tport_event_s), GFP_ATOMIC);
+ if (!tport_add_evt) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport event memory allocation failure: 0x%0x\n",
+ tport->fcid);
+ return;
+ }
+ tport_add_evt->event = TGT_EV_RPORT_ADD;
+ tport_add_evt->arg1 = (void *) tport;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport fcid: 0x%x add tport event fcid: 0x%x\n",
+ tport->fcid, iport->fcid);
+ list_add_tail(&tport_add_evt->links, &fnic->tport_event_list);
+ queue_work(fnic_event_queue, &fnic->tport_work);
+}
+
+
+static void
+fdls_process_rff_id_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ struct fnic *fnic = iport->fnic;
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fc_std_rff_id *rff_rsp = (struct fc_std_rff_id *) fchdr;
+ uint16_t rsp;
+ uint8_t reason_code;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ if (fdls_get_state(fdls) != FDLS_STATE_REGISTER_FC4_FEATURES) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RFF_ID resp recvd in state(%d). Dropping.",
+ fdls_get_state(fdls));
+ return;
+ }
+
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req);
+ return;
+ }
+
+ rsp = FNIC_STD_GET_FC_CT_CMD((&rff_rsp->fc_std_ct_hdr));
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS process RFF ID response: 0x%04x", iport->fcid,
+ (uint32_t) rsp);
+
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ switch (rsp) {
+ case FC_FS_ACC:
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n", iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ fdls->retry_counter = 0;
+ fdls_set_state((&iport->fabric), FDLS_STATE_SCR);
+ fdls_send_scr(iport);
+ break;
+ case FC_FS_RJT:
+ reason_code = rff_rsp->fc_std_ct_hdr.ct_reason;
+ if (((reason_code == FC_FS_RJT_BSY)
+ || (reason_code == FC_FS_RJT_UNABL))
+ && (fdls->retry_counter < FDLS_RETRY_COUNT)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RFF_ID ret ELS_LS_RJT BUSY. Retry from timer routine %p",
+ iport);
+
+ /* Retry again from the timer routine */
+ fdls->flags |= FNIC_FDLS_RETRY_FRAME;
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RFF_ID returned ELS_LS_RJT. Halting discovery %p",
+ iport);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n", iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ fdls->timer_pending = 0;
+ fdls->retry_counter = 0;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+fdls_process_rft_id_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fc_std_rft_id *rft_rsp = (struct fc_std_rft_id *) fchdr;
+ uint16_t rsp;
+ uint8_t reason_code;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ if (fdls_get_state(fdls) != FDLS_STATE_REGISTER_FC4_TYPES) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RFT_ID resp recvd in state(%d). Dropping.",
+ fdls_get_state(fdls));
+ return;
+ }
+
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req);
+ return;
+ }
+
+
+ rsp = FNIC_STD_GET_FC_CT_CMD((&rft_rsp->fc_std_ct_hdr));
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS process RFT ID response: 0x%04x", iport->fcid,
+ (uint32_t) rsp);
+
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ switch (rsp) {
+ case FC_FS_ACC:
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n", iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ fdls->retry_counter = 0;
+ fdls_send_register_fc4_features(iport);
+ fdls_set_state((&iport->fabric), FDLS_STATE_REGISTER_FC4_FEATURES);
+ break;
+ case FC_FS_RJT:
+ reason_code = rft_rsp->fc_std_ct_hdr.ct_reason;
+ if (((reason_code == FC_FS_RJT_BSY)
+ || (reason_code == FC_FS_RJT_UNABL))
+ && (fdls->retry_counter < FDLS_RETRY_COUNT)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: RFT_ID ret ELS_LS_RJT BUSY. Retry from timer routine",
+ iport->fcid);
+
+ /* Retry again from the timer routine */
+ fdls->flags |= FNIC_FDLS_RETRY_FRAME;
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: RFT_ID REJ. Halting discovery reason %d expl %d",
+ iport->fcid, reason_code,
+ rft_rsp->fc_std_ct_hdr.ct_explan);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n", iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ fdls->timer_pending = 0;
+ fdls->retry_counter = 0;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+fdls_process_rpn_id_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fc_std_rpn_id *rpn_rsp = (struct fc_std_rpn_id *) fchdr;
+ uint16_t rsp;
+ uint8_t reason_code;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ if (fdls_get_state(fdls) != FDLS_STATE_RPN_ID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RPN_ID resp recvd in state(%d). Dropping.",
+ fdls_get_state(fdls));
+ return;
+ }
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req);
+ return;
+ }
+
+ rsp = FNIC_STD_GET_FC_CT_CMD((&rpn_rsp->fc_std_ct_hdr));
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS process RPN ID response: 0x%04x", iport->fcid,
+ (uint32_t) rsp);
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ switch (rsp) {
+ case FC_FS_ACC:
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n", iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ fdls->retry_counter = 0;
+ fdls_send_register_fc4_types(iport);
+ fdls_set_state((&iport->fabric), FDLS_STATE_REGISTER_FC4_TYPES);
+ break;
+ case FC_FS_RJT:
+ reason_code = rpn_rsp->fc_std_ct_hdr.ct_reason;
+ if (((reason_code == FC_FS_RJT_BSY)
+ || (reason_code == FC_FS_RJT_UNABL))
+ && (fdls->retry_counter < FDLS_RETRY_COUNT)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RPN_ID returned REJ BUSY. Retry from timer routine %p",
+ iport);
+
+ /* Retry again from the timer routine */
+ fdls->flags |= FNIC_FDLS_RETRY_FRAME;
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RPN_ID ELS_LS_RJT. Halting discovery %p", iport);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n", iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ fdls->timer_pending = 0;
+ fdls->retry_counter = 0;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+fdls_process_scr_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fc_std_scr *scr_rsp = (struct fc_std_scr *) fchdr;
+ struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *) fchdr;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS process SCR response: 0x%04x",
+ (uint32_t) scr_rsp->scr.scr_cmd);
+
+ if (fdls_get_state(fdls) != FDLS_STATE_SCR) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "SCR resp recvd in state(%d). Dropping.",
+ fdls_get_state(fdls));
+ return;
+ }
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req);
+ }
+
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ switch (scr_rsp->scr.scr_cmd) {
+ case ELS_LS_ACC:
+ atomic64_inc(&iport->iport_stats.fabric_scr_ls_accepts);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n", iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ iport->fabric.retry_counter = 0;
+ fdls_send_gpn_ft(iport, FDLS_STATE_GPN_FT);
+ break;
+
+ case ELS_LS_RJT:
+ atomic64_inc(&iport->iport_stats.fabric_scr_ls_rejects);
+ if (((els_rjt->rej.er_reason == ELS_RJT_BUSY)
+ || (els_rjt->rej.er_reason == ELS_RJT_UNAB))
+ && (fdls->retry_counter < FDLS_RETRY_COUNT)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "SCR ELS_LS_RJT BUSY. Retry from timer routine %p",
+ iport);
+ /* Retry again from the timer routine */
+ fdls->flags |= FNIC_FDLS_RETRY_FRAME;
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "SCR returned ELS_LS_RJT. Halting discovery %p",
+ iport);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n",
+ iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ fdls->timer_pending = 0;
+ fdls->retry_counter = 0;
+ }
+ break;
+
+ default:
+ atomic64_inc(&iport->iport_stats.fabric_scr_misc_rejects);
+ break;
+ }
+}
+
+static void
+fdls_process_gpn_ft_tgt_list(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr, int len)
+{
+ struct fc_gpn_ft_rsp_iu *gpn_ft_tgt;
+ struct fnic_tport_s *tport, *next;
+ uint32_t fcid;
+ uint64_t wwpn;
+ int rem_len = len;
+ u32 old_link_down_cnt = iport->fnic->link_down_cnt;
+ struct fnic *fnic = iport->fnic;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS process GPN_FT tgt list", iport->fcid);
+
+ gpn_ft_tgt =
+ (struct fc_gpn_ft_rsp_iu *)((uint8_t *) fchdr +
+ sizeof(struct fc_frame_header)
+ + sizeof(struct fc_ct_hdr));
+ len -= sizeof(struct fc_frame_header) + sizeof(struct fc_ct_hdr);
+
+ while (rem_len > 0) {
+
+ fcid = ntoh24(gpn_ft_tgt->fcid);
+ wwpn = be64_to_cpu(gpn_ft_tgt->wwpn);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport: 0x%x: ctrl:0x%x", fcid, gpn_ft_tgt->ctrl);
+
+ if (fcid == iport->fcid) {
+ if (gpn_ft_tgt->ctrl & FC_NS_FID_LAST)
+ break;
+ gpn_ft_tgt++;
+ rem_len -= sizeof(struct fc_gpn_ft_rsp_iu);
+ continue;
+ }
+
+ tport = fnic_find_tport_by_wwpn(iport, wwpn);
+ if (!tport) {
+ /*
+ * New port registered with the switch or first time query
+ */
+ tport = fdls_create_tport(iport, fcid, wwpn);
+ if (!tport)
+ return;
+ }
+ /*
+ * check if this was an existing tport with same fcid
+ * but whose wwpn has changed now ,then remove it and
+ * create a new one
+ */
+ if (tport->fcid != fcid) {
+ fdls_delete_tport(iport, tport);
+ tport = fdls_create_tport(iport, fcid, wwpn);
+ if (!tport)
+ return;
+ }
+
+ /*
+ * If this GPN_FT rsp is after RSCN then mark the tports which
+ * matches with the new GPN_FT list, if some tport is not
+ * found in GPN_FT we went to delete that tport later.
+ */
+ if (fdls_get_state((&iport->fabric)) == FDLS_STATE_RSCN_GPN_FT)
+ tport->flags |= FNIC_FDLS_TPORT_IN_GPN_FT_LIST;
+
+ if (gpn_ft_tgt->ctrl & FC_NS_FID_LAST)
+ break;
+
+ gpn_ft_tgt++;
+ rem_len -= sizeof(struct fc_gpn_ft_rsp_iu);
+ }
+ if (rem_len <= 0) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "GPN_FT response: malformed/corrupt frame rxlen: %d remlen: %d",
+ len, rem_len);
+}
+
+ /*remove those ports which was not listed in GPN_FT */
+ if (fdls_get_state((&iport->fabric)) == FDLS_STATE_RSCN_GPN_FT) {
+ list_for_each_entry_safe(tport, next, &iport->tport_list, links) {
+
+ if (!(tport->flags & FNIC_FDLS_TPORT_IN_GPN_FT_LIST)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Remove port: 0x%x not found in GPN_FT list",
+ tport->fcid);
+ fdls_delete_tport(iport, tport);
+ } else {
+ tport->flags &= ~FNIC_FDLS_TPORT_IN_GPN_FT_LIST;
+ }
+ if ((old_link_down_cnt != iport->fnic->link_down_cnt)
+ || (iport->state != FNIC_IPORT_STATE_READY)) {
+ return;
+ }
+ }
+ }
+}
+
+static void
+fdls_process_gpn_ft_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr, int len)
+{
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fc_std_gpn_ft *gpn_ft_rsp = (struct fc_std_gpn_ft *) fchdr;
+ uint16_t rsp;
+ uint8_t reason_code;
+ int count = 0;
+ struct fnic_tport_s *tport, *next;
+ u32 old_link_down_cnt = iport->fnic->link_down_cnt;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS process GPN_FT response: iport state: %d len: %d",
+ iport->state, len);
+
+ /*
+ * GPNFT response :-
+ * FDLS_STATE_GPN_FT : GPNFT send after SCR state
+ * during fabric discovery(FNIC_IPORT_STATE_FABRIC_DISC)
+ * FDLS_STATE_RSCN_GPN_FT : GPNFT send in response to RSCN
+ * FDLS_STATE_SEND_GPNFT : GPNFT send after deleting a Target,
+ * e.g. after receiving Target LOGO
+ * FDLS_STATE_TGT_DISCOVERY :Target discovery is currently in progress
+ * from previous GPNFT response,a new GPNFT response has come.
+ */
+ if (!(((iport->state == FNIC_IPORT_STATE_FABRIC_DISC)
+ && (fdls_get_state(fdls) == FDLS_STATE_GPN_FT))
+ || ((iport->state == FNIC_IPORT_STATE_READY)
+ && ((fdls_get_state(fdls) == FDLS_STATE_RSCN_GPN_FT)
+ || (fdls_get_state(fdls) == FDLS_STATE_SEND_GPNFT)
+ || (fdls_get_state(fdls) == FDLS_STATE_TGT_DISCOVERY))))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "GPNFT resp recvd in fab state(%d) iport_state(%d). Dropping.",
+ fdls_get_state(fdls), iport->state);
+ return;
+ }
+
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req);
+ }
+
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ iport->state = FNIC_IPORT_STATE_READY;
+ rsp = FNIC_STD_GET_FC_CT_CMD((&gpn_ft_rsp->fc_std_ct_hdr));
+
+ switch (rsp) {
+
+ case FC_FS_ACC:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: GPNFT_RSP accept", iport->fcid);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Canceling fabric disc timer\n",
+ iport->fcid);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ iport->fabric.retry_counter = 0;
+ fdls_process_gpn_ft_tgt_list(iport, fchdr, len);
+
+ /*
+ * iport state can change only if link down event happened
+ * We don't need to undo fdls_process_gpn_ft_tgt_list,
+ * that will be taken care in next link up event
+ */
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Halting target discovery: fab st: %d iport st: %d ",
+ fdls_get_state(fdls), iport->state);
+ break;
+ }
+ fdls_tgt_discovery_start(iport);
+ break;
+
+ case FC_FS_RJT:
+ reason_code = gpn_ft_rsp->fc_std_ct_hdr.ct_reason;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: GPNFT_RSP Reject reason: %d", iport->fcid, reason_code);
+
+ if (((reason_code == FC_FS_RJT_BSY)
+ || (reason_code == FC_FS_RJT_UNABL))
+ && (fdls->retry_counter < FDLS_RETRY_COUNT)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: GPNFT_RSP ret REJ/BSY. Retry from timer routine",
+ iport->fcid);
+ /* Retry again from the timer routine */
+ fdls->flags |= FNIC_FDLS_RETRY_FRAME;
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: GPNFT_RSP reject", iport->fcid);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Canceling fabric disc timer\n",
+ iport->fcid);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ iport->fabric.retry_counter = 0;
+ /*
+ * If GPN_FT ls_rjt then we should delete
+ * all existing tports
+ */
+ count = 0;
+ list_for_each_entry_safe(tport, next, &iport->tport_list,
+ links) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "GPN_FT_REJECT: Remove port: 0x%x",
+ tport->fcid);
+ fdls_delete_tport(iport, tport);
+ if ((old_link_down_cnt != iport->fnic->link_down_cnt)
+ || (iport->state != FNIC_IPORT_STATE_READY)) {
+ return;
+ }
+ count++;
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "GPN_FT_REJECT: Removed (0x%x) ports", count);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * fdls_process_fabric_logo_rsp - Handle an flogo response from the fcf
+ * @iport: Handle to fnic iport
+ * @fchdr: Incoming frame
+ */
+static void
+fdls_process_fabric_logo_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ struct fc_std_flogi *flogo_rsp = (struct fc_std_flogi *) fchdr;
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req);
+ }
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ switch (flogo_rsp->els.fl_cmd) {
+ case ELS_LS_ACC:
+ if (iport->fabric.state != FDLS_STATE_FABRIC_LOGO) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Flogo response. Fabric not in LOGO state. Dropping! %p",
+ iport);
+ return;
+ }
+
+ iport->fabric.state = FDLS_STATE_FLOGO_DONE;
+ iport->state = FNIC_IPORT_STATE_LINK_WAIT;
+
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport 0x%p Canceling fabric disc timer\n",
+ iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Flogo response from Fabric for did: 0x%x",
+ ntoh24(fchdr->fh_d_id));
+ return;
+
+ case ELS_LS_RJT:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Flogo response from Fabric for did: 0x%x returned ELS_LS_RJT",
+ ntoh24(fchdr->fh_d_id));
+ return;
+
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGO response not accepted or rejected: 0x%x",
+ flogo_rsp->els.fl_cmd);
+ }
+}
+
+static void
+fdls_process_flogi_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr, void *rx_frame)
+{
+ struct fnic_fdls_fabric_s *fabric = &iport->fabric;
+ struct fc_std_flogi *flogi_rsp = (struct fc_std_flogi *) fchdr;
+ uint8_t *fcid;
+ uint16_t rdf_size;
+ uint8_t fcmac[6] = { 0x0E, 0XFC, 0x00, 0x00, 0x00, 0x00 };
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS processing FLOGI response", iport->fcid);
+
+ if (fdls_get_state(fabric) != FDLS_STATE_FABRIC_FLOGI) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI response received in state (%d). Dropping frame",
+ fdls_get_state(fabric));
+ return;
+ }
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fabric), oxid, iport->active_oxid_fabric_req);
+ return;
+ }
+
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ switch (flogi_rsp->els.fl_cmd) {
+ case ELS_LS_ACC:
+ atomic64_inc(&iport->iport_stats.fabric_flogi_ls_accepts);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport fcid: 0x%x Canceling fabric disc timer\n",
+ iport->fcid);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+
+ iport->fabric.timer_pending = 0;
+ iport->fabric.retry_counter = 0;
+ fcid = FNIC_STD_GET_D_ID(fchdr);
+ iport->fcid = ntoh24(fcid);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FLOGI response accepted", iport->fcid);
+
+ /* Learn the Service Params */
+ rdf_size = be16_to_cpu(FNIC_LOGI_RDF_SIZE(flogi_rsp->els));
+ if ((rdf_size >= FNIC_MIN_DATA_FIELD_SIZE)
+ && (rdf_size < FNIC_FC_MAX_PAYLOAD_LEN))
+ iport->max_payload_size = min(rdf_size,
+ iport->max_payload_size);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "max_payload_size from fabric: %u set: %d", rdf_size,
+ iport->max_payload_size);
+
+ iport->r_a_tov = be32_to_cpu(FNIC_LOGI_R_A_TOV(flogi_rsp->els));
+ iport->e_d_tov = be32_to_cpu(FNIC_LOGI_E_D_TOV(flogi_rsp->els));
+
+ if (FNIC_LOGI_FEATURES(flogi_rsp->els) & FNIC_FC_EDTOV_NSEC)
+ iport->e_d_tov = iport->e_d_tov / FNIC_NSEC_TO_MSEC;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "From fabric: R_A_TOV: %d E_D_TOV: %d",
+ iport->r_a_tov, iport->e_d_tov);
+
+ fc_host_fabric_name(iport->fnic->host) =
+ get_unaligned_be64(&FNIC_LOGI_NODE_NAME(flogi_rsp->els));
+ fc_host_port_id(iport->fnic->host) = iport->fcid;
+
+ fnic_fdls_learn_fcoe_macs(iport, rx_frame, fcid);
+
+ if (fnic_fdls_register_portid(iport, iport->fcid, rx_frame) != 0) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FLOGI registration failed", iport->fcid);
+ break;
+ }
+
+ memcpy(&fcmac[3], fcid, 3);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Adding vNIC device MAC addr: %02x:%02x:%02x:%02x:%02x:%02x",
+ fcmac[0], fcmac[1], fcmac[2], fcmac[3], fcmac[4],
+ fcmac[5]);
+ vnic_dev_add_addr(iport->fnic->vdev, fcmac);
+
+ if (fdls_get_state(fabric) == FDLS_STATE_FABRIC_FLOGI) {
+ fnic_fdls_start_plogi(iport);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI response received. Starting PLOGI");
+ } else {
+ /* From FDLS_STATE_FABRIC_FLOGI state fabric can only go to
+ * FDLS_STATE_LINKDOWN
+ * state, hence we don't have to worry about undoing:
+ * the fnic_fdls_register_portid and vnic_dev_add_addr
+ */
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI response received in state (%d). Dropping frame",
+ fdls_get_state(fabric));
+ }
+ break;
+
+ case ELS_LS_RJT:
+ atomic64_inc(&iport->iport_stats.fabric_flogi_ls_rejects);
+ if (fabric->retry_counter < iport->max_flogi_retries) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI returned ELS_LS_RJT BUSY. Retry from timer routine %p",
+ iport);
+
+ /* Retry Flogi again from the timer routine. */
+ fabric->flags |= FNIC_FDLS_RETRY_FRAME;
+
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI returned ELS_LS_RJT. Halting discovery %p",
+ iport);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport 0x%p Canceling fabric disc timer\n",
+ iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ fabric->timer_pending = 0;
+ fabric->retry_counter = 0;
+ }
+ break;
+
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI response not accepted: 0x%x",
+ flogi_rsp->els.fl_cmd);
+ atomic64_inc(&iport->iport_stats.fabric_flogi_misc_rejects);
+ break;
+ }
+}
+
+static void
+fdls_process_fabric_plogi_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ struct fc_std_flogi *plogi_rsp = (struct fc_std_flogi *) fchdr;
+ struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *) fchdr;
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ if (fdls_get_state((&iport->fabric)) != FDLS_STATE_FABRIC_PLOGI) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Fabric PLOGI response received in state (%d). Dropping frame",
+ fdls_get_state(&iport->fabric));
+ return;
+ }
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req);
+ return;
+ }
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ switch (plogi_rsp->els.fl_cmd) {
+ case ELS_LS_ACC:
+ atomic64_inc(&iport->iport_stats.fabric_plogi_ls_accepts);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport fcid: 0x%x fabric PLOGI response: Accepted\n",
+ iport->fcid);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ iport->fabric.retry_counter = 0;
+ fdls_set_state(&iport->fabric, FDLS_STATE_RPN_ID);
+ fdls_send_rpn_id(iport);
+ break;
+ case ELS_LS_RJT:
+ atomic64_inc(&iport->iport_stats.fabric_plogi_ls_rejects);
+ if (((els_rjt->rej.er_reason == ELS_RJT_BUSY)
+ || (els_rjt->rej.er_reason == ELS_RJT_UNAB))
+ && (iport->fabric.retry_counter < iport->max_plogi_retries)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Fabric PLOGI ELS_LS_RJT BUSY. Retry from timer routine",
+ iport->fcid);
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Fabric PLOGI ELS_LS_RJT. Halting discovery",
+ iport->fcid);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport fcid: 0x%x Canceling fabric disc timer\n",
+ iport->fcid);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ iport->fabric.retry_counter = 0;
+ return;
+ }
+ break;
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PLOGI response not accepted: 0x%x",
+ plogi_rsp->els.fl_cmd);
+ atomic64_inc(&iport->iport_stats.fabric_plogi_misc_rejects);
+ break;
+ }
+}
+
+static void fdls_process_fdmi_plogi_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ struct fc_std_flogi *plogi_rsp = (struct fc_std_flogi *)fchdr;
+ struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *)fchdr;
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fnic *fnic = iport->fnic;
+ u64 fdmi_tov;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ if (iport->active_oxid_fdmi_plogi != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fdls), oxid, iport->active_oxid_fdmi_plogi);
+ return;
+ }
+
+ iport->fabric.fdmi_pending &= ~FDLS_FDMI_PLOGI_PENDING;
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_plogi);
+
+ if (ntoh24(fchdr->fh_s_id) == FC_FID_MGMT_SERV) {
+ del_timer_sync(&iport->fabric.fdmi_timer);
+ iport->fabric.fdmi_pending = 0;
+ switch (plogi_rsp->els.fl_cmd) {
+ case ELS_LS_ACC:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS process fdmi PLOGI response status: ELS_LS_ACC\n");
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Sending fdmi registration for port 0x%x\n",
+ iport->fcid);
+
+ fdls_fdmi_register_hba(iport);
+ fdls_fdmi_register_pa(iport);
+ fdmi_tov = jiffies + msecs_to_jiffies(5000);
+ mod_timer(&iport->fabric.fdmi_timer,
+ round_jiffies(fdmi_tov));
+ break;
+ case ELS_LS_RJT:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Fabric FDMI PLOGI returned ELS_LS_RJT reason: 0x%x",
+ els_rjt->rej.er_reason);
+
+ if (((els_rjt->rej.er_reason == ELS_RJT_BUSY)
+ || (els_rjt->rej.er_reason == ELS_RJT_UNAB))
+ && (iport->fabric.fdmi_retry < 7)) {
+ iport->fabric.fdmi_retry++;
+ fdls_send_fdmi_plogi(iport);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+}
+static void fdls_process_fdmi_reg_ack(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr,
+ int rsp_type)
+{
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+
+ if (!iport->fabric.fdmi_pending) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Received FDMI ack while not waiting: 0x%x\n",
+ FNIC_STD_GET_OX_ID(fchdr));
+ return;
+ }
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ if ((iport->active_oxid_fdmi_rhba != oxid) &&
+ (iport->active_oxid_fdmi_rpa != oxid)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. oxid recvd: 0x%x, active oxids(rhba,rpa): 0x%x, 0x%x\n",
+ oxid, iport->active_oxid_fdmi_rhba, iport->active_oxid_fdmi_rpa);
+ return;
+ }
+ if (FNIC_FRAME_TYPE(oxid) == FNIC_FRAME_TYPE_FDMI_RHBA) {
+ iport->fabric.fdmi_pending &= ~FDLS_FDMI_REG_HBA_PENDING;
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rhba);
+ } else {
+ iport->fabric.fdmi_pending &= ~FDLS_FDMI_RPA_PENDING;
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rpa);
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport fcid: 0x%x: Received FDMI registration ack\n",
+ iport->fcid);
+
+ if (!iport->fabric.fdmi_pending) {
+ del_timer_sync(&iport->fabric.fdmi_timer);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport fcid: 0x%x: Canceling FDMI timer\n",
+ iport->fcid);
+ }
+}
+
+static void fdls_process_fdmi_abts_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint32_t s_id;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+
+ s_id = ntoh24(FNIC_STD_GET_S_ID(fchdr));
+
+ if (!(s_id != FC_FID_MGMT_SERV)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received abts rsp with invalid SID: 0x%x. Dropping frame",
+ s_id);
+ return;
+ }
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ switch (FNIC_FRAME_TYPE(oxid)) {
+ case FNIC_FRAME_TYPE_FDMI_PLOGI:
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_plogi);
+ break;
+ case FNIC_FRAME_TYPE_FDMI_RHBA:
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rhba);
+ break;
+ case FNIC_FRAME_TYPE_FDMI_RPA:
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rpa);
+ break;
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received abts rsp with invalid oxid: 0x%x. Dropping frame",
+ oxid);
+ break;
+ }
+
+ del_timer_sync(&iport->fabric.fdmi_timer);
+ iport->fabric.fdmi_pending &= ~FDLS_FDMI_ABORT_PENDING;
+
+ fdls_send_fdmi_plogi(iport);
+}
+
+static void
+fdls_process_fabric_abts_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint32_t s_id;
+ struct fc_std_abts_ba_acc *ba_acc = (struct fc_std_abts_ba_acc *)fchdr;
+ struct fc_std_abts_ba_rjt *ba_rjt;
+ uint32_t fabric_state = iport->fabric.state;
+ struct fnic *fnic = iport->fnic;
+ int frame_type;
+ uint16_t oxid;
+
+ s_id = ntoh24(fchdr->fh_s_id);
+ ba_rjt = (struct fc_std_abts_ba_rjt *) fchdr;
+
+ if (!((s_id == FC_FID_DIR_SERV) || (s_id == FC_FID_FLOGI)
+ || (s_id == FC_FID_FCTRL))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received abts rsp with invalid SID: 0x%x. Dropping frame",
+ s_id);
+ return;
+ }
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received abts rsp with invalid oxid: 0x%x. Dropping frame",
+ oxid);
+ return;
+ }
+
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n", iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED;
+
+ if (fchdr->fh_r_ctl == FC_RCTL_BA_ACC) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received abts rsp BA_ACC for fabric_state: %d OX_ID: 0x%x",
+ fabric_state, be16_to_cpu(ba_acc->acc.ba_ox_id));
+ } else if (fchdr->fh_r_ctl == FC_RCTL_BA_RJT) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "BA_RJT fs: %d OX_ID: 0x%x rc: 0x%x rce: 0x%x",
+ fabric_state, FNIC_STD_GET_OX_ID(&ba_rjt->fchdr),
+ ba_rjt->rjt.br_reason, ba_rjt->rjt.br_explan);
+ }
+
+ frame_type = FNIC_FRAME_TYPE(oxid);
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ /* currently error handling/retry logic is same for ABTS BA_ACC & BA_RJT */
+ switch (frame_type) {
+ case FNIC_FRAME_TYPE_FABRIC_FLOGI:
+ if (iport->fabric.retry_counter < iport->max_flogi_retries)
+ fdls_send_fabric_flogi(iport);
+ else
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Exceeded max FLOGI retries");
+ break;
+ case FNIC_FRAME_TYPE_FABRIC_LOGO:
+ if (iport->fabric.retry_counter < FABRIC_LOGO_MAX_RETRY)
+ fdls_send_fabric_logo(iport);
+ break;
+ case FNIC_FRAME_TYPE_FABRIC_PLOGI:
+ if (iport->fabric.retry_counter < iport->max_plogi_retries)
+ fdls_send_fabric_plogi(iport);
+ else
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Exceeded max PLOGI retries");
+ break;
+ case FNIC_FRAME_TYPE_FABRIC_RPN:
+ if (iport->fabric.retry_counter < FDLS_RETRY_COUNT)
+ fdls_send_rpn_id(iport);
+ else
+ /* go back to fabric Plogi */
+ fnic_fdls_start_plogi(iport);
+ break;
+ case FNIC_FRAME_TYPE_FABRIC_SCR:
+ if (iport->fabric.retry_counter < FDLS_RETRY_COUNT)
+ fdls_send_scr(iport);
+ else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "SCR exhausted retries. Start fabric PLOGI %p",
+ iport);
+ fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */
+ }
+ break;
+ case FNIC_FRAME_TYPE_FABRIC_RFT:
+ if (iport->fabric.retry_counter < FDLS_RETRY_COUNT)
+ fdls_send_register_fc4_types(iport);
+ else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RFT exhausted retries. Start fabric PLOGI %p",
+ iport);
+ fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */
+ }
+ break;
+ case FNIC_FRAME_TYPE_FABRIC_RFF:
+ if (iport->fabric.retry_counter < FDLS_RETRY_COUNT)
+ fdls_send_register_fc4_features(iport);
+ else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RFF exhausted retries. Start fabric PLOGI %p",
+ iport);
+ fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */
+ }
+ break;
+ case FNIC_FRAME_TYPE_FABRIC_GPN_FT:
+ if (iport->fabric.retry_counter <= FDLS_RETRY_COUNT)
+ fdls_send_gpn_ft(iport, fabric_state);
+ else
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "GPN FT exhausted retries. Start fabric PLOGI %p",
+ iport);
+ break;
+ default:
+ /*
+ * We should not be here since we already validated rx oxid with
+ * our active_oxid_fabric_req
+ */
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Invalid OXID/active oxid 0x%x\n", oxid);
+ WARN_ON(true);
+ return;
+ }
+}
+
+static void
+fdls_process_abts_req(struct fnic_iport_s *iport, struct fc_frame_header *fchdr)
+{
+ uint8_t *frame;
+ struct fc_std_abts_ba_acc *pba_acc;
+ uint32_t nport_id;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+ struct fnic_tport_s *tport;
+ struct fnic *fnic = iport->fnic;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_abts_ba_acc);
+
+ nport_id = ntoh24(fchdr->fh_s_id);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received abort from SID 0x%8x", nport_id);
+
+ tport = fnic_find_tport_by_fcid(iport, nport_id);
+ if (tport) {
+ if (tport->active_oxid == oxid) {
+ tport->flags |= FNIC_FDLS_TGT_ABORT_ISSUED;
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+ }
+ }
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate frame to send response for ABTS req",
+ iport->fcid);
+ return;
+ }
+
+ pba_acc = (struct fc_std_abts_ba_acc *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pba_acc = (struct fc_std_abts_ba_acc) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_BA_ACC,
+ .fh_f_ctl = {FNIC_FCP_RSP_FCTL, 0, 0}},
+ .acc = {.ba_low_seq_cnt = 0, .ba_high_seq_cnt = cpu_to_be16(0xFFFF)}
+ };
+
+ FNIC_STD_SET_S_ID(pba_acc->fchdr, fchdr->fh_d_id);
+ FNIC_STD_SET_D_ID(pba_acc->fchdr, fchdr->fh_s_id);
+ FNIC_STD_SET_OX_ID(pba_acc->fchdr, FNIC_STD_GET_OX_ID(fchdr));
+ FNIC_STD_SET_RX_ID(pba_acc->fchdr, FNIC_STD_GET_RX_ID(fchdr));
+
+ pba_acc->acc.ba_rx_id = cpu_to_be16(FNIC_STD_GET_RX_ID(fchdr));
+ pba_acc->acc.ba_ox_id = cpu_to_be16(FNIC_STD_GET_OX_ID(fchdr));
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send BA ACC with oxid: 0x%x",
+ iport->fcid, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+}
+
+static void
+fdls_process_unsupported_els_req(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint8_t *frame;
+ struct fc_std_els_rjt_rsp *pls_rsp;
+ uint16_t oxid;
+ uint32_t d_id = ntoh24(fchdr->fh_d_id);
+ struct fnic *fnic = iport->fnic;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_els_rjt_rsp);
+
+ if (iport->fcid != d_id) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping unsupported ELS with illegal frame bits 0x%x\n",
+ d_id);
+ atomic64_inc(&iport->iport_stats.unsupported_frames_dropped);
+ return;
+ }
+
+ if ((iport->state != FNIC_IPORT_STATE_READY)
+ && (iport->state != FNIC_IPORT_STATE_FABRIC_DISC)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping unsupported ELS request in iport state: %d",
+ iport->state);
+ atomic64_inc(&iport->iport_stats.unsupported_frames_dropped);
+ return;
+ }
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send response to unsupported ELS request");
+ return;
+ }
+
+ pls_rsp = (struct fc_std_els_rjt_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_els_rjt_frame(frame, iport);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Process unsupported ELS request from SID: 0x%x",
+ iport->fcid, ntoh24(fchdr->fh_s_id));
+
+ /* We don't support this ELS request, send a reject */
+ pls_rsp->rej.er_reason = 0x0B;
+ pls_rsp->rej.er_explan = 0x0;
+ pls_rsp->rej.er_vendor = 0x0;
+
+ FNIC_STD_SET_S_ID(pls_rsp->fchdr, fchdr->fh_d_id);
+ FNIC_STD_SET_D_ID(pls_rsp->fchdr, fchdr->fh_s_id);
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ FNIC_STD_SET_OX_ID(pls_rsp->fchdr, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+}
+
+static void
+fdls_process_rls_req(struct fnic_iport_s *iport, struct fc_frame_header *fchdr)
+{
+ uint8_t *frame;
+ struct fc_std_rls_acc *prls_acc_rsp;
+ uint16_t oxid;
+ struct fnic *fnic = iport->fnic;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_rls_acc);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Process RLS request %d", iport->fnic->fnic_num);
+
+ if ((iport->state != FNIC_IPORT_STATE_READY)
+ && (iport->state != FNIC_IPORT_STATE_FABRIC_DISC)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received RLS req in iport state: %d. Dropping the frame.",
+ iport->state);
+ return;
+ }
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send RLS accept");
+ return;
+ }
+ prls_acc_rsp = (struct fc_std_rls_acc *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+
+ FNIC_STD_SET_S_ID(prls_acc_rsp->fchdr, fchdr->fh_d_id);
+ FNIC_STD_SET_D_ID(prls_acc_rsp->fchdr, fchdr->fh_s_id);
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ FNIC_STD_SET_OX_ID(prls_acc_rsp->fchdr, oxid);
+ FNIC_STD_SET_RX_ID(prls_acc_rsp->fchdr, FNIC_UNASSIGNED_RXID);
+
+ FNIC_STD_SET_F_CTL(prls_acc_rsp->fchdr, FNIC_ELS_REP_FCTL << 16);
+ FNIC_STD_SET_R_CTL(prls_acc_rsp->fchdr, FC_RCTL_ELS_REP);
+ FNIC_STD_SET_TYPE(prls_acc_rsp->fchdr, FC_TYPE_ELS);
+
+ prls_acc_rsp->els.rls_cmd = ELS_LS_ACC;
+ prls_acc_rsp->els.rls_lesb.lesb_link_fail =
+ cpu_to_be32(iport->fnic->link_down_cnt);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+}
+
+static void
+fdls_process_els_req(struct fnic_iport_s *iport, struct fc_frame_header *fchdr,
+ uint32_t len)
+{
+ uint8_t *frame;
+ struct fc_std_els_acc_rsp *pels_acc;
+ uint16_t oxid;
+ uint8_t *fc_payload;
+ uint8_t type;
+ struct fnic *fnic = iport->fnic;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET;
+
+ fc_payload = (uint8_t *) fchdr + sizeof(struct fc_frame_header);
+ type = *fc_payload;
+
+ if ((iport->state != FNIC_IPORT_STATE_READY)
+ && (iport->state != FNIC_IPORT_STATE_FABRIC_DISC)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping ELS frame type: 0x%x in iport state: %d",
+ type, iport->state);
+ return;
+ }
+ switch (type) {
+ case ELS_ECHO:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "sending LS_ACC for ECHO request %d\n",
+ iport->fnic->fnic_num);
+ break;
+
+ case ELS_RRQ:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "sending LS_ACC for RRQ request %d\n",
+ iport->fnic->fnic_num);
+ break;
+
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "sending LS_ACC for 0x%x ELS frame\n", type);
+ break;
+ }
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send ELS response for 0x%x",
+ type);
+ return;
+ }
+
+ if (type == ELS_ECHO) {
+ /* Brocade sends a longer payload, copy all frame back */
+ memcpy(frame, fchdr, len);
+ }
+
+ pels_acc = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_els_acc_frame(frame, iport);
+
+ FNIC_STD_SET_D_ID(pels_acc->fchdr, fchdr->fh_s_id);
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ FNIC_STD_SET_OX_ID(pels_acc->fchdr, oxid);
+
+ if (type == ELS_ECHO)
+ frame_size += len;
+ else
+ frame_size += sizeof(struct fc_std_els_acc_rsp);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+}
+
+static void
+fdls_process_tgt_abts_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint32_t s_id;
+ struct fnic_tport_s *tport;
+ uint32_t tport_state;
+ struct fc_std_abts_ba_acc *ba_acc;
+ struct fc_std_abts_ba_rjt *ba_rjt;
+ uint16_t oxid;
+ struct fnic *fnic = iport->fnic;
+ int frame_type;
+
+ s_id = ntoh24(fchdr->fh_s_id);
+ ba_acc = (struct fc_std_abts_ba_acc *)fchdr;
+ ba_rjt = (struct fc_std_abts_ba_rjt *)fchdr;
+
+ tport = fnic_find_tport_by_fcid(iport, s_id);
+ if (!tport) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Received tgt abts rsp with invalid SID: 0x%x", s_id);
+ return;
+ }
+ if (tport->timer_pending) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "tport 0x%p Canceling fabric disc timer\n", tport);
+ fnic_del_tport_timer_sync(fnic, tport);
+ }
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Received tgt abts rsp in iport state(%d). Dropping.",
+ iport->state);
+ return;
+ }
+ tport->timer_pending = 0;
+ tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED;
+ tport_state = tport->state;
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ /*This abort rsp is for ADISC */
+ frame_type = FNIC_FRAME_TYPE(oxid);
+ switch (frame_type) {
+ case FNIC_FRAME_TYPE_TGT_ADISC:
+ if (fchdr->fh_r_ctl == FC_RCTL_BA_ACC) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "OX_ID: 0x%x tgt_fcid: 0x%x rcvd tgt adisc abts resp BA_ACC",
+ be16_to_cpu(ba_acc->acc.ba_ox_id),
+ tport->fcid);
+ } else if (fchdr->fh_r_ctl == FC_RCTL_BA_RJT) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "ADISC BA_RJT rcvd tport_fcid: 0x%x tport_state: %d ",
+ tport->fcid, tport_state);
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "reason code: 0x%x reason code explanation:0x%x ",
+ ba_rjt->rjt.br_reason,
+ ba_rjt->rjt.br_explan);
+ }
+ if ((tport->retry_counter < FDLS_RETRY_COUNT)
+ && (fchdr->fh_r_ctl == FC_RCTL_BA_ACC)) {
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+ fdls_send_tgt_adisc(iport, tport);
+ return;
+ }
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "ADISC not responding. Deleting target port: 0x%x",
+ tport->fcid);
+ fdls_delete_tport(iport, tport);
+ /* Restart discovery of targets */
+ if ((iport->state == FNIC_IPORT_STATE_READY)
+ && (iport->fabric.state != FDLS_STATE_SEND_GPNFT)
+ && (iport->fabric.state != FDLS_STATE_RSCN_GPN_FT)) {
+ fdls_send_gpn_ft(iport, FDLS_STATE_SEND_GPNFT);
+ }
+ break;
+ case FNIC_FRAME_TYPE_TGT_PLOGI:
+ if (fchdr->fh_r_ctl == FC_RCTL_BA_ACC) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Received tgt PLOGI abts response BA_ACC tgt_fcid: 0x%x",
+ tport->fcid);
+ } else if (fchdr->fh_r_ctl == FC_RCTL_BA_RJT) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PLOGI BA_RJT received for tport_fcid: 0x%x OX_ID: 0x%x",
+ tport->fcid, FNIC_STD_GET_OX_ID(fchdr));
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "reason code: 0x%x reason code explanation: 0x%x",
+ ba_rjt->rjt.br_reason,
+ ba_rjt->rjt.br_explan);
+ }
+ if ((tport->retry_counter < iport->max_plogi_retries)
+ && (fchdr->fh_r_ctl == FC_RCTL_BA_ACC)) {
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+ fdls_send_tgt_plogi(iport, tport);
+ return;
+ }
+
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+ fdls_delete_tport(iport, tport);
+ /* Restart discovery of targets */
+ if ((iport->state == FNIC_IPORT_STATE_READY)
+ && (iport->fabric.state != FDLS_STATE_SEND_GPNFT)
+ && (iport->fabric.state != FDLS_STATE_RSCN_GPN_FT)) {
+ fdls_send_gpn_ft(iport, FDLS_STATE_SEND_GPNFT);
+ }
+ break;
+ case FNIC_FRAME_TYPE_TGT_PRLI:
+ if (fchdr->fh_r_ctl == FC_RCTL_BA_ACC) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Received tgt PRLI abts response BA_ACC",
+ tport->fcid);
+ } else if (fchdr->fh_r_ctl == FC_RCTL_BA_RJT) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PRLI BA_RJT received for tport_fcid: 0x%x OX_ID: 0x%x ",
+ tport->fcid, FNIC_STD_GET_OX_ID(fchdr));
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "reason code: 0x%x reason code explanation: 0x%x",
+ ba_rjt->rjt.br_reason,
+ ba_rjt->rjt.br_explan);
+ }
+ if ((tport->retry_counter < FDLS_RETRY_COUNT)
+ && (fchdr->fh_r_ctl == FC_RCTL_BA_ACC)) {
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+ fdls_send_tgt_prli(iport, tport);
+ return;
+ }
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+ fdls_send_tgt_plogi(iport, tport); /* go back to plogi */
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_PLOGI);
+ break;
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received ABTS response for unknown frame %p", iport);
+ break;
+ }
+
+}
+
+static void
+fdls_process_plogi_req(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint8_t *frame;
+ struct fc_std_els_rjt_rsp *pplogi_rsp;
+ uint16_t oxid;
+ uint32_t d_id = ntoh24(fchdr->fh_d_id);
+ struct fnic *fnic = iport->fnic;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_els_rjt_rsp);
+
+ if (iport->fcid != d_id) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received PLOGI with illegal frame bits. Dropping frame from 0x%x",
+ d_id);
+ return;
+ }
+
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received PLOGI request in iport state: %d Dropping frame",
+ iport->state);
+ return;
+ }
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send response to PLOGI request");
+ return;
+ }
+
+ pplogi_rsp = (struct fc_std_els_rjt_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_els_rjt_frame(frame, iport);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Process PLOGI request from SID: 0x%x",
+ iport->fcid, ntoh24(fchdr->fh_s_id));
+
+ /* We don't support PLOGI request, send a reject */
+ pplogi_rsp->rej.er_reason = 0x0B;
+ pplogi_rsp->rej.er_explan = 0x0;
+ pplogi_rsp->rej.er_vendor = 0x0;
+
+ FNIC_STD_SET_S_ID(pplogi_rsp->fchdr, fchdr->fh_d_id);
+ FNIC_STD_SET_D_ID(pplogi_rsp->fchdr, fchdr->fh_s_id);
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ FNIC_STD_SET_OX_ID(pplogi_rsp->fchdr, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+}
+
+static void
+fdls_process_logo_req(struct fnic_iport_s *iport, struct fc_frame_header *fchdr)
+{
+ struct fc_std_logo *logo = (struct fc_std_logo *)fchdr;
+ uint32_t nport_id;
+ uint64_t nport_name;
+ struct fnic_tport_s *tport;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+
+ nport_id = ntoh24(logo->els.fl_n_port_id);
+ nport_name = be64_to_cpu(logo->els.fl_n_port_wwn);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Process LOGO request from fcid: 0x%x", nport_id);
+
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Dropping LOGO req from 0x%x in iport state: %d",
+ nport_id, iport->state);
+ return;
+ }
+
+ tport = fnic_find_tport_by_fcid(iport, nport_id);
+
+ if (!tport) {
+ /* We are not logged in with the nport, log and drop... */
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Received LOGO from an nport not logged in: 0x%x(0x%llx)",
+ nport_id, nport_name);
+ return;
+ }
+ if (tport->fcid != nport_id) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Received LOGO with invalid target port fcid: 0x%x(0x%llx)",
+ nport_id, nport_name);
+ return;
+ }
+ if (tport->timer_pending) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "tport fcid 0x%x: Canceling disc timer\n",
+ tport->fcid);
+ fnic_del_tport_timer_sync(fnic, tport);
+ tport->timer_pending = 0;
+ }
+
+ /* got a logo in response to adisc to a target which has logged out */
+ if (tport->state == FDLS_TGT_STATE_ADISC) {
+ tport->retry_counter = 0;
+ oxid = tport->active_oxid;
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+ fdls_delete_tport(iport, tport);
+ fdls_send_logo_resp(iport, &logo->fchdr);
+ if ((iport->state == FNIC_IPORT_STATE_READY)
+ && (fdls_get_state(&iport->fabric) != FDLS_STATE_SEND_GPNFT)
+ && (fdls_get_state(&iport->fabric) != FDLS_STATE_RSCN_GPN_FT)) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Sending GPNFT in response to LOGO from Target:0x%x",
+ nport_id);
+ fdls_send_gpn_ft(iport, FDLS_STATE_SEND_GPNFT);
+ return;
+ }
+ } else {
+ fdls_delete_tport(iport, tport);
+ }
+ if (iport->state == FNIC_IPORT_STATE_READY) {
+ fdls_send_logo_resp(iport, &logo->fchdr);
+ if ((fdls_get_state(&iport->fabric) != FDLS_STATE_SEND_GPNFT) &&
+ (fdls_get_state(&iport->fabric) != FDLS_STATE_RSCN_GPN_FT)) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Sending GPNFT in response to LOGO from Target:0x%x",
+ nport_id);
+ fdls_send_gpn_ft(iport, FDLS_STATE_SEND_GPNFT);
+ }
+ }
+}
+
+static void
+fdls_process_rscn(struct fnic_iport_s *iport, struct fc_frame_header *fchdr)
+{
+ struct fc_std_rscn *rscn;
+ struct fc_els_rscn_page *rscn_port = NULL;
+ int num_ports;
+ struct fnic_tport_s *tport, *next;
+ uint32_t nport_id;
+ uint8_t fcid[3];
+ int newports = 0;
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fnic *fnic = iport->fnic;
+ int rscn_type = NOT_PC_RSCN;
+ uint32_t sid = ntoh24(fchdr->fh_s_id);
+ unsigned long reset_fnic_list_lock_flags = 0;
+ uint16_t rscn_payload_len;
+
+ atomic64_inc(&iport->iport_stats.num_rscns);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS process RSCN %p", iport);
+
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS RSCN received in state(%d). Dropping",
+ fdls_get_state(fdls));
+ return;
+ }
+
+ rscn = (struct fc_std_rscn *)fchdr;
+ rscn_payload_len = be16_to_cpu(rscn->els.rscn_plen);
+
+ /* frame validation */
+ if ((rscn_payload_len % 4 != 0) || (rscn_payload_len < 8)
+ || (rscn_payload_len > 1024)
+ || (rscn->els.rscn_page_len != 4)) {
+ num_ports = 0;
+ if ((rscn_payload_len == 0xFFFF)
+ && (sid == FC_FID_FCTRL)) {
+ rscn_type = PC_RSCN;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "pcrscn: PCRSCN received. sid: 0x%x payload len: 0x%x",
+ sid, rscn_payload_len);
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RSCN payload_len: 0x%x page_len: 0x%x",
+ rscn_payload_len, rscn->els.rscn_page_len);
+ /* if this happens then we need to send ADISC to all the tports. */
+ list_for_each_entry_safe(tport, next, &iport->tport_list, links) {
+ if (tport->state == FDLS_TGT_STATE_READY)
+ tport->flags |= FNIC_FDLS_TPORT_SEND_ADISC;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RSCN for port id: 0x%x", tport->fcid);
+ }
+ } /* end else */
+ } else {
+ num_ports = (rscn_payload_len - 4) / rscn->els.rscn_page_len;
+ rscn_port = (struct fc_els_rscn_page *)(rscn + 1);
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RSCN received for num_ports: %d payload_len: %d page_len: %d ",
+ num_ports, rscn_payload_len, rscn->els.rscn_page_len);
+
+ /*
+ * RSCN have at least one Port_ID page , but may not have any port_id
+ * in it. If no port_id is specified in the Port_ID page , we send
+ * ADISC to all the tports
+ */
+
+ while (num_ports) {
+
+ memcpy(fcid, rscn_port->rscn_fid, 3);
+
+ nport_id = ntoh24(fcid);
+ rscn_port++;
+ num_ports--;
+ /* if this happens then we need to send ADISC to all the tports. */
+ if (nport_id == 0) {
+ list_for_each_entry_safe(tport, next, &iport->tport_list,
+ links) {
+ if (tport->state == FDLS_TGT_STATE_READY)
+ tport->flags |= FNIC_FDLS_TPORT_SEND_ADISC;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RSCN for port id: 0x%x", tport->fcid);
+ }
+ break;
+ }
+ tport = fnic_find_tport_by_fcid(iport, nport_id);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RSCN port id list: 0x%x", nport_id);
+
+ if (!tport) {
+ newports++;
+ continue;
+ }
+ if (tport->state == FDLS_TGT_STATE_READY)
+ tport->flags |= FNIC_FDLS_TPORT_SEND_ADISC;
+ }
+
+ if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON &&
+ rscn_type == PC_RSCN && fnic->role == FNIC_ROLE_FCP_INITIATOR) {
+
+ if (fnic->pc_rscn_handling_status == PC_RSCN_HANDLING_IN_PROGRESS) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PCRSCN handling already in progress. Skip host reset: %d",
+ iport->fnic->fnic_num);
+ return;
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Processing PCRSCN. Queuing fnic for host reset: %d",
+ iport->fnic->fnic_num);
+ fnic->pc_rscn_handling_status = PC_RSCN_HANDLING_IN_PROGRESS;
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+
+ spin_lock_irqsave(&reset_fnic_list_lock,
+ reset_fnic_list_lock_flags);
+ list_add_tail(&fnic->links, &reset_fnic_list);
+ spin_unlock_irqrestore(&reset_fnic_list_lock,
+ reset_fnic_list_lock_flags);
+
+ queue_work(reset_fnic_work_queue, &reset_fnic_work);
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS process RSCN sending GPN_FT: newports: %d", newports);
+ fdls_send_gpn_ft(iport, FDLS_STATE_RSCN_GPN_FT);
+ fdls_send_rscn_resp(iport, fchdr);
+ }
+}
+
+void fnic_fdls_disc_start(struct fnic_iport_s *iport)
+{
+ struct fnic *fnic = iport->fnic;
+
+ fc_host_fabric_name(iport->fnic->host) = 0;
+ fc_host_post_event(iport->fnic->host, fc_get_event_number(),
+ FCH_EVT_LIPRESET, 0);
+
+ if (!iport->usefip) {
+ if (iport->flags & FNIC_FIRST_LINK_UP) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ fnic_scsi_fcpio_reset(iport->fnic);
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+
+ iport->flags &= ~FNIC_FIRST_LINK_UP;
+ }
+ fnic_fdls_start_flogi(iport);
+ } else
+ fnic_fdls_start_plogi(iport);
+}
+
+static void
+fdls_process_adisc_req(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ struct fc_std_els_adisc *padisc_acc;
+ struct fc_std_els_adisc *adisc_req = (struct fc_std_els_adisc *)fchdr;
+ uint64_t frame_wwnn;
+ uint64_t frame_wwpn;
+ uint32_t tgt_fcid;
+ struct fnic_tport_s *tport;
+ uint8_t *fcid;
+ uint8_t *rjt_frame;
+ uint8_t *acc_frame;
+ struct fc_std_els_rjt_rsp *prjts_rsp;
+ uint16_t oxid;
+ struct fnic *fnic = iport->fnic;
+ uint16_t rjt_frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_els_rjt_rsp);
+ uint16_t acc_frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_els_adisc);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Process ADISC request %d", iport->fnic->fnic_num);
+
+ fcid = FNIC_STD_GET_S_ID(fchdr);
+ tgt_fcid = ntoh24(fcid);
+ tport = fnic_find_tport_by_fcid(iport, tgt_fcid);
+ if (!tport) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "tport for fcid: 0x%x not found. Dropping ADISC req.",
+ tgt_fcid);
+ return;
+ }
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Dropping ADISC req from fcid: 0x%x in iport state: %d",
+ tgt_fcid, iport->state);
+ return;
+ }
+
+ frame_wwnn = be64_to_cpu(adisc_req->els.adisc_wwnn);
+ frame_wwpn = be64_to_cpu(adisc_req->els.adisc_wwpn);
+
+ if ((frame_wwnn != tport->wwnn) || (frame_wwpn != tport->wwpn)) {
+ /* send reject */
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "ADISC req from fcid: 0x%x mismatch wwpn: 0x%llx wwnn: 0x%llx",
+ tgt_fcid, frame_wwpn, frame_wwnn);
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "local tport wwpn: 0x%llx wwnn: 0x%llx. Sending RJT",
+ tport->wwpn, tport->wwnn);
+
+ rjt_frame = fdls_alloc_frame(iport);
+ if (rjt_frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate rjt_frame to send response to ADISC request");
+ return;
+ }
+
+ prjts_rsp = (struct fc_std_els_rjt_rsp *) (rjt_frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_els_rjt_frame(rjt_frame, iport);
+
+ prjts_rsp->rej.er_reason = 0x03; /* logical error */
+ prjts_rsp->rej.er_explan = 0x1E; /* N_port login required */
+ prjts_rsp->rej.er_vendor = 0x0;
+
+ FNIC_STD_SET_S_ID(prjts_rsp->fchdr, fchdr->fh_d_id);
+ FNIC_STD_SET_D_ID(prjts_rsp->fchdr, fchdr->fh_s_id);
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ FNIC_STD_SET_OX_ID(prjts_rsp->fchdr, oxid);
+
+ fnic_send_fcoe_frame(iport, rjt_frame, rjt_frame_size);
+ return;
+ }
+
+ acc_frame = fdls_alloc_frame(iport);
+ if (acc_frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send ADISC accept");
+ return;
+ }
+
+ padisc_acc = (struct fc_std_els_adisc *) (acc_frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+
+ FNIC_STD_SET_S_ID(padisc_acc->fchdr, fchdr->fh_d_id);
+ FNIC_STD_SET_D_ID(padisc_acc->fchdr, fchdr->fh_s_id);
+
+ FNIC_STD_SET_F_CTL(padisc_acc->fchdr, FNIC_ELS_REP_FCTL << 16);
+ FNIC_STD_SET_R_CTL(padisc_acc->fchdr, FC_RCTL_ELS_REP);
+ FNIC_STD_SET_TYPE(padisc_acc->fchdr, FC_TYPE_ELS);
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ FNIC_STD_SET_OX_ID(padisc_acc->fchdr, oxid);
+ FNIC_STD_SET_RX_ID(padisc_acc->fchdr, FNIC_UNASSIGNED_RXID);
+
+ padisc_acc->els.adisc_cmd = ELS_LS_ACC;
+
+ FNIC_STD_SET_NPORT_NAME(&padisc_acc->els.adisc_wwpn,
+ iport->wwpn);
+ FNIC_STD_SET_NODE_NAME(&padisc_acc->els.adisc_wwnn,
+ iport->wwnn);
+ memcpy(padisc_acc->els.adisc_port_id, fchdr->fh_d_id, 3);
+
+ fnic_send_fcoe_frame(iport, acc_frame, acc_frame_size);
+}
+
+/*
+ * Performs a validation for all FCOE frames and return the frame type
+ */
+int
+fnic_fdls_validate_and_get_frame_type(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint8_t type;
+ uint8_t *fc_payload;
+ uint16_t oxid;
+ uint32_t s_id;
+ uint32_t d_id;
+ struct fnic *fnic = iport->fnic;
+ struct fnic_fdls_fabric_s *fabric = &iport->fabric;
+ int oxid_frame_type;
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ fc_payload = (uint8_t *) fchdr + sizeof(struct fc_frame_header);
+ type = *fc_payload;
+ s_id = ntoh24(fchdr->fh_s_id);
+ d_id = ntoh24(fchdr->fh_d_id);
+
+ /* some common validation */
+ if (fdls_get_state(fabric) > FDLS_STATE_FABRIC_FLOGI) {
+ if ((iport->fcid != d_id) || (!FNIC_FC_FRAME_CS_CTL(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "invalid frame received. Dropping frame");
+ return -1;
+ }
+ }
+
+ /* BLS ABTS response */
+ if ((fchdr->fh_r_ctl == FC_RCTL_BA_ACC)
+ || (fchdr->fh_r_ctl == FC_RCTL_BA_RJT)) {
+ if (!(FNIC_FC_FRAME_TYPE_BLS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received ABTS invalid frame. Dropping frame");
+ return -1;
+
+ }
+ if (fdls_is_oxid_fabric_req(oxid)) {
+ if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unexpected ABTS RSP(oxid:0x%x) from 0x%x. Dropping frame",
+ oxid, s_id);
+ return -1;
+ }
+ return FNIC_FABRIC_BLS_ABTS_RSP;
+ } else if (fdls_is_oxid_fdmi_req(oxid)) {
+ return FNIC_FDMI_BLS_ABTS_RSP;
+ } else if (fdls_is_oxid_tgt_req(oxid)) {
+ return FNIC_TPORT_BLS_ABTS_RSP;
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received ABTS rsp with unknown oxid(0x%x) from 0x%x. Dropping frame",
+ oxid, s_id);
+ return -1;
+ }
+
+ /* BLS ABTS Req */
+ if ((fchdr->fh_r_ctl == FC_RCTL_BA_ABTS)
+ && (FNIC_FC_FRAME_TYPE_BLS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Receiving Abort Request from s_id: 0x%x", s_id);
+ return FNIC_BLS_ABTS_REQ;
+ }
+
+ /* unsolicited requests frames */
+ if (FNIC_FC_FRAME_UNSOLICITED(fchdr)) {
+ switch (type) {
+ case ELS_LOGO:
+ if ((!FNIC_FC_FRAME_FCTL_FIRST_LAST_SEQINIT(fchdr))
+ || (!FNIC_FC_FRAME_UNSOLICITED(fchdr))
+ || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received LOGO invalid frame. Dropping frame");
+ return -1;
+ }
+ return FNIC_ELS_LOGO_REQ;
+ case ELS_RSCN:
+ if ((!FNIC_FC_FRAME_FCTL_FIRST_LAST_SEQINIT(fchdr))
+ || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))
+ || (!FNIC_FC_FRAME_UNSOLICITED(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received RSCN invalid FCTL. Dropping frame");
+ return -1;
+ }
+ if (s_id != FC_FID_FCTRL)
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received RSCN from target FCTL: 0x%x type: 0x%x s_id: 0x%x.",
+ fchdr->fh_f_ctl[0], fchdr->fh_type, s_id);
+ return FNIC_ELS_RSCN_REQ;
+ case ELS_PLOGI:
+ return FNIC_ELS_PLOGI_REQ;
+ case ELS_ECHO:
+ return FNIC_ELS_ECHO_REQ;
+ case ELS_ADISC:
+ return FNIC_ELS_ADISC;
+ case ELS_RLS:
+ return FNIC_ELS_RLS;
+ case ELS_RRQ:
+ return FNIC_ELS_RRQ;
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Unsupported frame (type:0x%02x) from fcid: 0x%x",
+ type, s_id);
+ return FNIC_ELS_UNSUPPORTED_REQ;
+ }
+ }
+
+ /* solicited response from fabric or target */
+ oxid_frame_type = FNIC_FRAME_TYPE(oxid);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "oxid frame code: 0x%x, oxid: 0x%x\n", oxid_frame_type, oxid);
+ switch (oxid_frame_type) {
+ case FNIC_FRAME_TYPE_FABRIC_FLOGI:
+ if (type == ELS_LS_ACC) {
+ if ((s_id != FC_FID_FLOGI)
+ || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unknown frame. Dropping frame");
+ return -1;
+ }
+ }
+ return FNIC_FABRIC_FLOGI_RSP;
+
+ case FNIC_FRAME_TYPE_FABRIC_PLOGI:
+ if (type == ELS_LS_ACC) {
+ if ((s_id != FC_FID_DIR_SERV)
+ || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unknown frame. Dropping frame");
+ return -1;
+ }
+ }
+ return FNIC_FABRIC_PLOGI_RSP;
+
+ case FNIC_FRAME_TYPE_FABRIC_SCR:
+ if (type == ELS_LS_ACC) {
+ if ((s_id != FC_FID_FCTRL)
+ || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unknown frame. Dropping frame");
+ return -1;
+ }
+ }
+ return FNIC_FABRIC_SCR_RSP;
+
+ case FNIC_FRAME_TYPE_FABRIC_RPN:
+ if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unknown frame. Dropping frame");
+ return -1;
+ }
+ return FNIC_FABRIC_RPN_RSP;
+
+ case FNIC_FRAME_TYPE_FABRIC_RFT:
+ if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unknown frame. Dropping frame");
+ return -1;
+ }
+ return FNIC_FABRIC_RFT_RSP;
+
+ case FNIC_FRAME_TYPE_FABRIC_RFF:
+ if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unknown frame. Dropping frame");
+ return -1;
+ }
+ return FNIC_FABRIC_RFF_RSP;
+
+ case FNIC_FRAME_TYPE_FABRIC_GPN_FT:
+ if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unknown frame. Dropping frame");
+ return -1;
+ }
+ return FNIC_FABRIC_GPN_FT_RSP;
+
+ case FNIC_FRAME_TYPE_FABRIC_LOGO:
+ return FNIC_FABRIC_LOGO_RSP;
+ case FNIC_FRAME_TYPE_FDMI_PLOGI:
+ return FNIC_FDMI_PLOGI_RSP;
+ case FNIC_FRAME_TYPE_FDMI_RHBA:
+ return FNIC_FDMI_REG_HBA_RSP;
+ case FNIC_FRAME_TYPE_FDMI_RPA:
+ return FNIC_FDMI_RPA_RSP;
+ case FNIC_FRAME_TYPE_TGT_PLOGI:
+ return FNIC_TPORT_PLOGI_RSP;
+ case FNIC_FRAME_TYPE_TGT_PRLI:
+ return FNIC_TPORT_PRLI_RSP;
+ case FNIC_FRAME_TYPE_TGT_ADISC:
+ return FNIC_TPORT_ADISC_RSP;
+ case FNIC_FRAME_TYPE_TGT_LOGO:
+ if (!FNIC_FC_FRAME_TYPE_ELS(fchdr)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping Unknown frame in tport solicited exchange range type: 0x%x.",
+ fchdr->fh_type);
+ return -1;
+ }
+ return FNIC_TPORT_LOGO_RSP;
+ default:
+ /* Drop the Rx frame and log/stats it */
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Solicited response: unknown OXID: 0x%x", oxid);
+ return -1;
+ }
+
+ return -1;
+}
+
+void fnic_fdls_recv_frame(struct fnic_iport_s *iport, void *rx_frame,
+ int len, int fchdr_offset)
+{
+ struct fc_frame_header *fchdr;
+ uint32_t s_id = 0;
+ uint32_t d_id = 0;
+ struct fnic *fnic = iport->fnic;
+ int frame_type;
+
+ fchdr = (struct fc_frame_header *) ((uint8_t *) rx_frame + fchdr_offset);
+ s_id = ntoh24(fchdr->fh_s_id);
+ d_id = ntoh24(fchdr->fh_d_id);
+
+ fnic_debug_dump_fc_frame(fnic, fchdr, len, "Incoming");
+
+ frame_type =
+ fnic_fdls_validate_and_get_frame_type(iport, fchdr);
+
+ /*if we are in flogo drop everything else */
+ if (iport->fabric.state == FDLS_STATE_FABRIC_LOGO &&
+ frame_type != FNIC_FABRIC_LOGO_RSP)
+ return;
+
+ switch (frame_type) {
+ case FNIC_FABRIC_FLOGI_RSP:
+ fdls_process_flogi_rsp(iport, fchdr, rx_frame);
+ break;
+ case FNIC_FABRIC_PLOGI_RSP:
+ fdls_process_fabric_plogi_rsp(iport, fchdr);
+ break;
+ case FNIC_FDMI_PLOGI_RSP:
+ fdls_process_fdmi_plogi_rsp(iport, fchdr);
+ break;
+ case FNIC_FABRIC_RPN_RSP:
+ fdls_process_rpn_id_rsp(iport, fchdr);
+ break;
+ case FNIC_FABRIC_RFT_RSP:
+ fdls_process_rft_id_rsp(iport, fchdr);
+ break;
+ case FNIC_FABRIC_RFF_RSP:
+ fdls_process_rff_id_rsp(iport, fchdr);
+ break;
+ case FNIC_FABRIC_SCR_RSP:
+ fdls_process_scr_rsp(iport, fchdr);
+ break;
+ case FNIC_FABRIC_GPN_FT_RSP:
+ fdls_process_gpn_ft_rsp(iport, fchdr, len);
+ break;
+ case FNIC_TPORT_PLOGI_RSP:
+ fdls_process_tgt_plogi_rsp(iport, fchdr);
+ break;
+ case FNIC_TPORT_PRLI_RSP:
+ fdls_process_tgt_prli_rsp(iport, fchdr);
+ break;
+ case FNIC_TPORT_ADISC_RSP:
+ fdls_process_tgt_adisc_rsp(iport, fchdr);
+ break;
+ case FNIC_TPORT_BLS_ABTS_RSP:
+ fdls_process_tgt_abts_rsp(iport, fchdr);
+ break;
+ case FNIC_TPORT_LOGO_RSP:
+ /* Logo response from tgt which we have deleted */
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Logo response from tgt: 0x%x",
+ ntoh24(fchdr->fh_s_id));
+ break;
+ case FNIC_FABRIC_LOGO_RSP:
+ fdls_process_fabric_logo_rsp(iport, fchdr);
+ break;
+ case FNIC_FABRIC_BLS_ABTS_RSP:
+ fdls_process_fabric_abts_rsp(iport, fchdr);
+ break;
+ case FNIC_FDMI_BLS_ABTS_RSP:
+ fdls_process_fdmi_abts_rsp(iport, fchdr);
+ break;
+ case FNIC_BLS_ABTS_REQ:
+ fdls_process_abts_req(iport, fchdr);
+ break;
+ case FNIC_ELS_UNSUPPORTED_REQ:
+ fdls_process_unsupported_els_req(iport, fchdr);
+ break;
+ case FNIC_ELS_PLOGI_REQ:
+ fdls_process_plogi_req(iport, fchdr);
+ break;
+ case FNIC_ELS_RSCN_REQ:
+ fdls_process_rscn(iport, fchdr);
+ break;
+ case FNIC_ELS_LOGO_REQ:
+ fdls_process_logo_req(iport, fchdr);
+ break;
+ case FNIC_ELS_RRQ:
+ case FNIC_ELS_ECHO_REQ:
+ fdls_process_els_req(iport, fchdr, len);
+ break;
+ case FNIC_ELS_ADISC:
+ fdls_process_adisc_req(iport, fchdr);
+ break;
+ case FNIC_ELS_RLS:
+ fdls_process_rls_req(iport, fchdr);
+ break;
+ case FNIC_FDMI_REG_HBA_RSP:
+ case FNIC_FDMI_RPA_RSP:
+ fdls_process_fdmi_reg_ack(iport, fchdr, frame_type);
+ break;
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "s_id: 0x%x d_did: 0x%x", s_id, d_id);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unknown FCoE frame of len: %d. Dropping frame", len);
+ break;
+ }
+}
+
+void fnic_fdls_disc_init(struct fnic_iport_s *iport)
+{
+ fdls_reset_oxid_pool(iport);
+ fdls_set_state((&iport->fabric), FDLS_STATE_INIT);
+}
+
+void fnic_fdls_link_down(struct fnic_iport_s *iport)
+{
+ struct fnic_tport_s *tport, *next;
+ struct fnic *fnic = iport->fnic;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS processing link down", iport->fcid);
+
+ fdls_set_state((&iport->fabric), FDLS_STATE_LINKDOWN);
+ iport->fabric.flags = 0;
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ fnic_scsi_fcpio_reset(iport->fnic);
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ list_for_each_entry_safe(tport, next, &iport->tport_list, links) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "removing rport: 0x%x", tport->fcid);
+ fdls_delete_tport(iport, tport);
+ }
+
+ if ((fnic_fdmi_support == 1) && (iport->fabric.fdmi_pending > 0)) {
+ del_timer_sync(&iport->fabric.fdmi_timer);
+ iport->fabric.fdmi_pending = 0;
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS finish processing link down", iport->fcid);
+}
diff --git a/drivers/scsi/fnic/fdls_fc.h b/drivers/scsi/fnic/fdls_fc.h
new file mode 100644
index 000000000000..012f43afd083
--- /dev/null
+++ b/drivers/scsi/fnic/fdls_fc.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _FDLS_FC_H_
+#define _FDLS_FC_H_
+
+/* This file contains the declarations for FC fabric services
+ * and target discovery
+ *
+ * Request and Response for
+ * 1. FLOGI
+ * 2. PLOGI to Fabric Controller
+ * 3. GPN_ID, GPN_FT
+ * 4. RSCN
+ * 5. PLOGI to Target
+ * 6. PRLI to Target
+ */
+
+#include <scsi/scsi.h>
+#include <scsi/fc/fc_els.h>
+#include <uapi/scsi/fc/fc_fs.h>
+#include <uapi/scsi/fc/fc_ns.h>
+#include <uapi/scsi/fc/fc_gs.h>
+#include <uapi/linux/if_ether.h>
+#include <scsi/fc/fc_ms.h>
+#include <linux/minmax.h>
+#include <linux/if_ether.h>
+#include <scsi/fc/fc_encaps.h>
+#include <scsi/fc/fc_fcoe.h>
+
+#define FDLS_MIN_FRAMES (32)
+#define FDLS_MIN_FRAME_ELEM (4)
+#define FNIC_FCP_SP_RD_XRDY_DIS 0x00000002
+#define FNIC_FCP_SP_TARGET 0x00000010
+#define FNIC_FCP_SP_INITIATOR 0x00000020
+#define FNIC_FCP_SP_CONF_CMPL 0x00000080
+#define FNIC_FCP_SP_RETRY 0x00000100
+
+#define FNIC_FC_CONCUR_SEQS (0xFF)
+#define FNIC_FC_RO_INFO (0x1F)
+
+/* Little Endian */
+#define FNIC_UNASSIGNED_OXID (0xffff)
+#define FNIC_UNASSIGNED_RXID (0xffff)
+#define FNIC_ELS_REQ_FCTL (0x000029)
+#define FNIC_ELS_REP_FCTL (0x000099)
+
+#define FNIC_FCP_RSP_FCTL (0x000099)
+#define FNIC_REQ_ABTS_FCTL (0x000009)
+
+#define FNIC_FC_PH_VER_HI (0x20)
+#define FNIC_FC_PH_VER_LO (0x20)
+#define FNIC_FC_PH_VER (0x2020)
+#define FNIC_FC_B2B_CREDIT (0x0A)
+#define FNIC_FC_B2B_RDF_SZ (0x0800)
+
+#define FNIC_LOGI_RDF_SIZE(_logi) ((_logi).fl_csp.sp_bb_data)
+#define FNIC_LOGI_R_A_TOV(_logi) ((_logi).fl_csp.sp_r_a_tov)
+#define FNIC_LOGI_E_D_TOV(_logi) ((_logi).fl_csp.sp_e_d_tov)
+#define FNIC_LOGI_FEATURES(_logi) (be16_to_cpu((_logi).fl_csp.sp_features))
+#define FNIC_LOGI_PORT_NAME(_logi) ((_logi).fl_wwpn)
+#define FNIC_LOGI_NODE_NAME(_logi) ((_logi).fl_wwnn)
+
+#define FNIC_LOGI_SET_RDF_SIZE(_logi, _rdf_size) \
+ (FNIC_LOGI_RDF_SIZE(_logi) = cpu_to_be16(_rdf_size))
+#define FNIC_LOGI_SET_E_D_TOV(_logi, _e_d_tov) \
+ (FNIC_LOGI_E_D_TOV(_logi) = cpu_to_be32(_e_d_tov))
+#define FNIC_LOGI_SET_R_A_TOV(_logi, _r_a_tov) \
+ (FNIC_LOGI_R_A_TOV(_logi) = cpu_to_be32(_r_a_tov))
+
+#define FNIC_STD_SET_S_ID(_fchdr, _sid) memcpy((_fchdr).fh_s_id, _sid, 3)
+#define FNIC_STD_SET_D_ID(_fchdr, _did) memcpy((_fchdr).fh_d_id, _did, 3)
+#define FNIC_STD_SET_OX_ID(_fchdr, _oxid) ((_fchdr).fh_ox_id = cpu_to_be16(_oxid))
+#define FNIC_STD_SET_RX_ID(_fchdr, _rxid) ((_fchdr).fh_rx_id = cpu_to_be16(_rxid))
+
+#define FNIC_STD_SET_R_CTL(_fchdr, _rctl) ((_fchdr).fh_r_ctl = _rctl)
+#define FNIC_STD_SET_TYPE(_fchdr, _type) ((_fchdr).fh_type = _type)
+#define FNIC_STD_SET_F_CTL(_fchdr, _fctl) \
+ put_unaligned_be24(_fctl, &((_fchdr).fh_f_ctl))
+
+#define FNIC_STD_SET_NPORT_NAME(_ptr, _wwpn) put_unaligned_be64(_wwpn, _ptr)
+#define FNIC_STD_SET_NODE_NAME(_ptr, _wwnn) put_unaligned_be64(_wwnn, _ptr)
+#define FNIC_STD_SET_PORT_ID(__req, __portid) \
+ memcpy(__req.fr_fid.fp_fid, __portid, 3)
+#define FNIC_STD_SET_PORT_NAME(_req, _pName) \
+ (put_unaligned_be64(_pName, &_req.fr_wwn))
+
+#define FNIC_STD_GET_OX_ID(_fchdr) (be16_to_cpu((_fchdr)->fh_ox_id))
+#define FNIC_STD_GET_RX_ID(_fchdr) (be16_to_cpu((_fchdr)->fh_rx_id))
+#define FNIC_STD_GET_S_ID(_fchdr) ((_fchdr)->fh_s_id)
+#define FNIC_STD_GET_D_ID(_fchdr) ((_fchdr)->fh_d_id)
+#define FNIC_STD_GET_TYPE(_fchdr) ((_fchdr)->fh_type)
+#define FNIC_STD_GET_F_CTL(_fchdr) ((_fchdr)->fh_f_ctl)
+#define FNIC_STD_GET_R_CTL(_fchdr) ((_fchdr)->fh_r_ctl)
+
+#define FNIC_STD_GET_FC_CT_CMD(__fcct_hdr) (be16_to_cpu(__fcct_hdr->ct_cmd))
+
+#define FNIC_FCOE_MAX_FRAME_SZ (2048)
+#define FNIC_FCOE_MIN_FRAME_SZ (280)
+#define FNIC_FC_MAX_PAYLOAD_LEN (2048)
+#define FNIC_MIN_DATA_FIELD_SIZE (256)
+
+#define FNIC_FC_EDTOV_NSEC (0x400)
+#define FNIC_NSEC_TO_MSEC (0x1000000)
+#define FCP_PRLI_FUNC_TARGET (0x0010)
+
+#define FNIC_FC_R_CTL_SOLICITED_DATA (0x21)
+#define FNIC_FC_F_CTL_LAST_END_SEQ (0x98)
+#define FNIC_FC_F_CTL_LAST_END_SEQ_INT (0x99)
+#define FNIC_FC_F_CTL_FIRST_LAST_SEQINIT (0x29)
+#define FNIC_FC_R_CTL_FC4_SCTL (0x03)
+#define FNIC_FC_CS_CTL (0x00)
+
+#define FNIC_FC_FRAME_UNSOLICITED(_fchdr) \
+ (_fchdr->fh_r_ctl == FC_RCTL_ELS_REQ)
+#define FNIC_FC_FRAME_SOLICITED_DATA(_fchdr) \
+ (_fchdr->fh_r_ctl == FNIC_FC_R_CTL_SOLICITED_DATA)
+#define FNIC_FC_FRAME_SOLICITED_CTRL_REPLY(_fchdr) \
+ (_fchdr->fh_r_ctl == FC_RCTL_ELS_REP)
+#define FNIC_FC_FRAME_FCTL_LAST_END_SEQ(_fchdr) \
+ (_fchdr->fh_f_ctl[0] == FNIC_FC_F_CTL_LAST_END_SEQ)
+#define FNIC_FC_FRAME_FCTL_LAST_END_SEQ_INT(_fchdr) \
+ (_fchdr->fh_f_ctl[0] == FNIC_FC_F_CTL_LAST_END_SEQ_INT)
+#define FNIC_FC_FRAME_FCTL_FIRST_LAST_SEQINIT(_fchdr) \
+ (_fchdr->fh_f_ctl[0] == FNIC_FC_F_CTL_FIRST_LAST_SEQINIT)
+#define FNIC_FC_FRAME_FC4_SCTL(_fchdr) \
+ (_fchdr->fh_r_ctl == FNIC_FC_R_CTL_FC4_SCTL)
+#define FNIC_FC_FRAME_TYPE_BLS(_fchdr) (_fchdr->fh_type == FC_TYPE_BLS)
+#define FNIC_FC_FRAME_TYPE_ELS(_fchdr) (_fchdr->fh_type == FC_TYPE_ELS)
+#define FNIC_FC_FRAME_TYPE_FC_GS(_fchdr) (_fchdr->fh_type == FC_TYPE_CT)
+#define FNIC_FC_FRAME_CS_CTL(_fchdr) (_fchdr->fh_cs_ctl == FNIC_FC_CS_CTL)
+
+#define FNIC_FC_C3_RDF (0xfff)
+#define FNIC_FC_PLOGI_RSP_RDF(_plogi_rsp) \
+ (min(_plogi_rsp->u.csp_plogi.b2b_rdf_size, \
+ (_plogi_rsp->spc3[4] & FNIC_FC_C3_RDF)))
+#define FNIC_FC_PLOGI_RSP_CONCUR_SEQ(_plogi_rsp) \
+ (min((uint16_t) (be16_to_cpu(_plogi_rsp->els.fl_csp.sp_tot_seq)), \
+ (uint16_t) (be16_to_cpu(_plogi_rsp->els.fl_cssp[2].cp_con_seq) & 0xff)))
+
+/* FLOGI/PLOGI struct */
+struct fc_std_flogi {
+ struct fc_frame_header fchdr;
+ struct fc_els_flogi els;
+} __packed;
+
+struct fc_std_els_acc_rsp {
+ struct fc_frame_header fchdr;
+ struct fc_els_ls_acc acc;
+} __packed;
+
+struct fc_std_els_rjt_rsp {
+ struct fc_frame_header fchdr;
+ struct fc_els_ls_rjt rej;
+} __packed;
+
+struct fc_std_els_adisc {
+ struct fc_frame_header fchdr;
+ struct fc_els_adisc els;
+} __packed;
+
+struct fc_std_rls_acc {
+ struct fc_frame_header fchdr;
+ struct fc_els_rls_resp els;
+} __packed;
+
+struct fc_std_abts_ba_acc {
+ struct fc_frame_header fchdr;
+ struct fc_ba_acc acc;
+} __packed;
+
+struct fc_std_abts_ba_rjt {
+ struct fc_frame_header fchdr;
+ struct fc_ba_rjt rjt;
+} __packed;
+
+struct fc_std_els_prli {
+ struct fc_frame_header fchdr;
+ struct fc_els_prli els_prli;
+ struct fc_els_spp sp;
+} __packed;
+
+struct fc_std_rpn_id {
+ struct fc_frame_header fchdr;
+ struct fc_ct_hdr fc_std_ct_hdr;
+ struct fc_ns_rn_id rpn_id;
+} __packed;
+
+struct fc_std_fdmi_rhba {
+ struct fc_frame_header fchdr;
+ struct fc_ct_hdr fc_std_ct_hdr;
+ struct fc_fdmi_rhba rhba;
+} __packed;
+
+struct fc_std_fdmi_rpa {
+ struct fc_frame_header fchdr;
+ struct fc_ct_hdr fc_std_ct_hdr;
+ struct fc_fdmi_rpa rpa;
+} __packed;
+
+struct fc_std_rft_id {
+ struct fc_frame_header fchdr;
+ struct fc_ct_hdr fc_std_ct_hdr;
+ struct fc_ns_rft_id rft_id;
+} __packed;
+
+struct fc_std_rff_id {
+ struct fc_frame_header fchdr;
+ struct fc_ct_hdr fc_std_ct_hdr;
+ struct fc_ns_rff_id rff_id;
+} __packed;
+
+struct fc_std_gpn_ft {
+ struct fc_frame_header fchdr;
+ struct fc_ct_hdr fc_std_ct_hdr;
+ struct fc_ns_gid_ft gpn_ft;
+} __packed;
+
+/* Accept CT_IU for GPN_FT */
+struct fc_gpn_ft_rsp_iu {
+ uint8_t ctrl;
+ uint8_t fcid[3];
+ uint32_t rsvd;
+ __be64 wwpn;
+} __packed;
+
+struct fc_std_rls {
+ struct fc_frame_header fchdr;
+ struct fc_els_rls els;
+} __packed;
+
+struct fc_std_scr {
+ struct fc_frame_header fchdr;
+ struct fc_els_scr scr;
+} __packed;
+
+struct fc_std_rscn {
+ struct fc_frame_header fchdr;
+ struct fc_els_rscn els;
+} __packed;
+
+struct fc_std_logo {
+ struct fc_frame_header fchdr;
+ struct fc_els_logo els;
+} __packed;
+
+#define FNIC_ETH_FCOE_HDRS_OFFSET \
+ (sizeof(struct ethhdr) + sizeof(struct fcoe_hdr))
+
+#endif /* _FDLS_FC_H */
diff --git a/drivers/scsi/fnic/fip.c b/drivers/scsi/fnic/fip.c
new file mode 100644
index 000000000000..7bb85949033f
--- /dev/null
+++ b/drivers/scsi/fnic/fip.c
@@ -0,0 +1,1005 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+#include "fnic.h"
+#include "fip.h"
+#include <linux/etherdevice.h>
+
+#define FIP_FNIC_RESET_WAIT_COUNT 15
+
+/**
+ * fnic_fcoe_reset_vlans - Free up the list of discovered vlans
+ * @fnic: Handle to fnic driver instance
+ */
+void fnic_fcoe_reset_vlans(struct fnic *fnic)
+{
+ unsigned long flags;
+ struct fcoe_vlan *vlan, *next;
+
+ spin_lock_irqsave(&fnic->vlans_lock, flags);
+ if (!list_empty(&fnic->vlan_list)) {
+ list_for_each_entry_safe(vlan, next, &fnic->vlan_list, list) {
+ list_del(&vlan->list);
+ kfree(vlan);
+ }
+ }
+
+ spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Reset vlan complete\n");
+}
+
+/**
+ * fnic_fcoe_send_vlan_req - Send FIP vlan request to all FCFs MAC
+ * @fnic: Handle to fnic driver instance
+ */
+void fnic_fcoe_send_vlan_req(struct fnic *fnic)
+{
+ uint8_t *frame;
+ struct fnic_iport_s *iport = &fnic->iport;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ u64 vlan_tov;
+ struct fip_vlan_req *pvlan_req;
+ uint16_t frame_size = sizeof(struct fip_vlan_req);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send VLAN req");
+ return;
+ }
+
+ fnic_fcoe_reset_vlans(fnic);
+
+ fnic->set_vlan(fnic, 0);
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "set vlan done\n");
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "got MAC 0x%x:%x:%x:%x:%x:%x\n", iport->hwmac[0],
+ iport->hwmac[1], iport->hwmac[2], iport->hwmac[3],
+ iport->hwmac[4], iport->hwmac[5]);
+
+ pvlan_req = (struct fip_vlan_req *) frame;
+ *pvlan_req = (struct fip_vlan_req) {
+ .eth = {.h_dest = FCOE_ALL_FCFS_MAC,
+ .h_proto = cpu_to_be16(ETH_P_FIP)},
+ .fip = {.fip_ver = FIP_VER_ENCAPS(FIP_VER),
+ .fip_op = cpu_to_be16(FIP_OP_VLAN),
+ .fip_subcode = FIP_SC_REQ,
+ .fip_dl_len = cpu_to_be16(FIP_VLAN_REQ_LEN)},
+ .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC,
+ .fip_dlen = 2}}
+ };
+
+ memcpy(pvlan_req->eth.h_source, iport->hwmac, ETH_ALEN);
+ memcpy(pvlan_req->mac_desc.fd_mac, iport->hwmac, ETH_ALEN);
+
+ atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
+
+ iport->fip.state = FDLS_FIP_VLAN_DISCOVERY_STARTED;
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Send VLAN req\n");
+ fnic_send_fip_frame(iport, frame, frame_size);
+
+ vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
+ mod_timer(&fnic->retry_fip_timer, round_jiffies(vlan_tov));
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fip timer set\n");
+}
+
+/**
+ * fnic_fcoe_process_vlan_resp - Processes the vlan response from one FCF and
+ * populates VLAN list.
+ * @fnic: Handle to fnic driver instance
+ * @fiph: Received FIP frame
+ *
+ * Will wait for responses from multiple FCFs until timeout.
+ */
+void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct fip_header *fiph)
+{
+ struct fip_vlan_notif *vlan_notif = (struct fip_vlan_notif *)fiph;
+
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ u16 vid;
+ int num_vlan = 0;
+ int cur_desc, desc_len;
+ struct fcoe_vlan *vlan;
+ struct fip_vlan_desc *vlan_desc;
+ unsigned long flags;
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic 0x%p got vlan resp\n", fnic);
+
+ desc_len = be16_to_cpu(vlan_notif->fip.fip_dl_len);
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "desc_len %d\n", desc_len);
+
+ spin_lock_irqsave(&fnic->vlans_lock, flags);
+
+ cur_desc = 0;
+ while (desc_len > 0) {
+ vlan_desc =
+ (struct fip_vlan_desc *)(((char *)vlan_notif->vlans_desc)
+ + cur_desc * 4);
+
+ if (vlan_desc->fd_desc.fip_dtype == FIP_DT_VLAN) {
+ if (vlan_desc->fd_desc.fip_dlen != 1) {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "Invalid descriptor length(%x) in VLan response\n",
+ vlan_desc->fd_desc.fip_dlen);
+
+ }
+ num_vlan++;
+ vid = be16_to_cpu(vlan_desc->fd_vlan);
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "process_vlan_resp: FIP VLAN %d\n", vid);
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+
+ if (!vlan) {
+ /* retry from timer */
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "Mem Alloc failure\n");
+ spin_unlock_irqrestore(&fnic->vlans_lock,
+ flags);
+ goto out;
+ }
+ vlan->vid = vid & 0x0fff;
+ vlan->state = FIP_VLAN_AVAIL;
+ list_add_tail(&vlan->list, &fnic->vlan_list);
+ break;
+ }
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "Invalid descriptor type(%x) in VLan response\n",
+ vlan_desc->fd_desc.fip_dtype);
+ /*
+ * Note : received a type=2 descriptor here i.e. FIP
+ * MAC Address Descriptor
+ */
+ cur_desc += vlan_desc->fd_desc.fip_dlen;
+ desc_len -= vlan_desc->fd_desc.fip_dlen;
+ }
+
+ /* any VLAN descriptors present ? */
+ if (num_vlan == 0) {
+ atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic 0x%p No VLAN descriptors in FIP VLAN response\n",
+ fnic);
+ }
+
+ spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+
+ out:
+ return;
+}
+
+/**
+ * fnic_fcoe_start_fcf_discovery - Start FIP FCF discovery in a selected vlan
+ * @fnic: Handle to fnic driver instance
+ */
+void fnic_fcoe_start_fcf_discovery(struct fnic *fnic)
+{
+ uint8_t *frame;
+ struct fnic_iport_s *iport = &fnic->iport;
+ u64 fcs_tov;
+ struct fip_discovery *pdisc_sol;
+ uint16_t frame_size = sizeof(struct fip_discovery);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to start FCF discovery");
+ return;
+ }
+
+ memset(iport->selected_fcf.fcf_mac, 0, ETH_ALEN);
+
+ pdisc_sol = (struct fip_discovery *) frame;
+ *pdisc_sol = (struct fip_discovery) {
+ .eth = {.h_dest = FCOE_ALL_FCFS_MAC,
+ .h_proto = cpu_to_be16(ETH_P_FIP)},
+ .fip = {
+ .fip_ver = FIP_VER_ENCAPS(FIP_VER), .fip_op = cpu_to_be16(FIP_OP_DISC),
+ .fip_subcode = FIP_SC_REQ, .fip_dl_len = cpu_to_be16(FIP_DISC_SOL_LEN),
+ .fip_flags = cpu_to_be16(FIP_FL_FPMA)},
+ .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}},
+ .name_desc = {.fd_desc = {.fip_dtype = FIP_DT_NAME, .fip_dlen = 3}},
+ .fcoe_desc = {.fd_desc = {.fip_dtype = FIP_DT_FCOE_SIZE, .fip_dlen = 1},
+ .fd_size = cpu_to_be16(FCOE_MAX_SIZE)}
+ };
+
+ memcpy(pdisc_sol->eth.h_source, iport->hwmac, ETH_ALEN);
+ memcpy(pdisc_sol->mac_desc.fd_mac, iport->hwmac, ETH_ALEN);
+ iport->selected_fcf.fcf_priority = 0xFF;
+
+ FNIC_STD_SET_NODE_NAME(&pdisc_sol->name_desc.fd_wwn, iport->wwnn);
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Start FCF discovery\n");
+ fnic_send_fip_frame(iport, frame, frame_size);
+
+ iport->fip.state = FDLS_FIP_FCF_DISCOVERY_STARTED;
+
+ fcs_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FCS_TOV);
+ mod_timer(&fnic->retry_fip_timer, round_jiffies(fcs_tov));
+}
+
+/**
+ * fnic_fcoe_fip_discovery_resp - Processes FCF advertisements.
+ * @fnic: Handle to fnic driver instance
+ * @fiph: Received frame
+ *
+ * FCF advertisements can be:
+ * solicited - Sent in response of a discover FCF FIP request
+ * Store the information of the FCF with highest priority.
+ * Wait until timeout in case of multiple FCFs.
+ *
+ * unsolicited - Sent periodically by the FCF for keep alive.
+ * If FLOGI is in progress or completed and the advertisement is
+ * received by our selected FCF, refresh the keep alive timer.
+ */
+void fnic_fcoe_fip_discovery_resp(struct fnic *fnic, struct fip_header *fiph)
+{
+ struct fnic_iport_s *iport = &fnic->iport;
+ struct fip_disc_adv *disc_adv = (struct fip_disc_adv *)fiph;
+ u64 fcs_ka_tov;
+ u64 tov;
+ int fka_has_changed;
+
+ switch (iport->fip.state) {
+ case FDLS_FIP_FCF_DISCOVERY_STARTED:
+ if (be16_to_cpu(disc_adv->fip.fip_flags) & FIP_FL_SOL) {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "fnic 0x%p Solicited adv\n", fnic);
+
+ if ((disc_adv->prio_desc.fd_pri <
+ iport->selected_fcf.fcf_priority)
+ && (be16_to_cpu(disc_adv->fip.fip_flags) & FIP_FL_AVAIL)) {
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "fnic 0x%p FCF Available\n", fnic);
+ memcpy(iport->selected_fcf.fcf_mac,
+ disc_adv->mac_desc.fd_mac, ETH_ALEN);
+ iport->selected_fcf.fcf_priority =
+ disc_adv->prio_desc.fd_pri;
+ iport->selected_fcf.fka_adv_period =
+ be32_to_cpu(disc_adv->fka_adv_desc.fd_fka_period);
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num, "adv time %d",
+ iport->selected_fcf.fka_adv_period);
+ iport->selected_fcf.ka_disabled =
+ (disc_adv->fka_adv_desc.fd_flags & 1);
+ }
+ }
+ break;
+ case FDLS_FIP_FLOGI_STARTED:
+ case FDLS_FIP_FLOGI_COMPLETE:
+ if (!(be16_to_cpu(disc_adv->fip.fip_flags) & FIP_FL_SOL)) {
+ /* same fcf */
+ if (memcmp
+ (iport->selected_fcf.fcf_mac,
+ disc_adv->mac_desc.fd_mac, ETH_ALEN) == 0) {
+ if (iport->selected_fcf.fka_adv_period !=
+ be32_to_cpu(disc_adv->fka_adv_desc.fd_fka_period)) {
+ iport->selected_fcf.fka_adv_period =
+ be32_to_cpu(disc_adv->fka_adv_desc.fd_fka_period);
+ FNIC_FIP_DBG(KERN_INFO,
+ fnic->host,
+ fnic->fnic_num,
+ "change fka to %d",
+ iport->selected_fcf.fka_adv_period);
+ }
+
+ fka_has_changed =
+ (iport->selected_fcf.ka_disabled == 1)
+ && ((disc_adv->fka_adv_desc.fd_flags & 1) ==
+ 0);
+
+ iport->selected_fcf.ka_disabled =
+ (disc_adv->fka_adv_desc.fd_flags & 1);
+ if (!((iport->selected_fcf.ka_disabled)
+ || (iport->selected_fcf.fka_adv_period ==
+ 0))) {
+
+ fcs_ka_tov = jiffies
+ + 3
+ *
+ msecs_to_jiffies(iport->selected_fcf.fka_adv_period);
+ mod_timer(&fnic->fcs_ka_timer,
+ round_jiffies(fcs_ka_tov));
+ } else {
+ if (timer_pending(&fnic->fcs_ka_timer))
+ del_timer_sync(&fnic->fcs_ka_timer);
+ }
+
+ if (fka_has_changed) {
+ if (iport->selected_fcf.fka_adv_period != 0) {
+ tov =
+ jiffies +
+ msecs_to_jiffies(
+ iport->selected_fcf.fka_adv_period);
+ mod_timer(&fnic->enode_ka_timer,
+ round_jiffies(tov));
+
+ tov =
+ jiffies +
+ msecs_to_jiffies
+ (FIP_VN_KA_PERIOD);
+ mod_timer(&fnic->vn_ka_timer,
+ round_jiffies(tov));
+ }
+ }
+ }
+ }
+ break;
+ default:
+ break;
+ } /* end switch */
+}
+
+/**
+ * fnic_fcoe_start_flogi - Send FIP FLOGI to the selected FCF
+ * @fnic: Handle to fnic driver instance
+ */
+void fnic_fcoe_start_flogi(struct fnic *fnic)
+{
+ uint8_t *frame;
+ struct fnic_iport_s *iport = &fnic->iport;
+ struct fip_flogi *pflogi_req;
+ u64 flogi_tov;
+ uint16_t oxid;
+ uint16_t frame_size = sizeof(struct fip_flogi);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to start FIP FLOGI");
+ return;
+ }
+
+ pflogi_req = (struct fip_flogi *) frame;
+ *pflogi_req = (struct fip_flogi) {
+ .eth = {
+ .h_proto = cpu_to_be16(ETH_P_FIP)},
+ .fip = {
+ .fip_ver = FIP_VER_ENCAPS(FIP_VER),
+ .fip_op = cpu_to_be16(FIP_OP_LS),
+ .fip_subcode = FIP_SC_REQ,
+ .fip_dl_len = cpu_to_be16(FIP_FLOGI_LEN),
+ .fip_flags = cpu_to_be16(FIP_FL_FPMA)},
+ .flogi_desc = {
+ .fd_desc = {.fip_dtype = FIP_DT_FLOGI, .fip_dlen = 36},
+ .flogi = {
+ .fchdr = {
+ .fh_r_ctl = FC_RCTL_ELS_REQ,
+ .fh_d_id = {0xFF, 0xFF, 0xFE},
+ .fh_type = FC_TYPE_ELS,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .els = {
+ .fl_cmd = ELS_FLOGI,
+ .fl_csp = {
+ .sp_hi_ver =
+ FNIC_FC_PH_VER_HI,
+ .sp_lo_ver =
+ FNIC_FC_PH_VER_LO,
+ .sp_bb_cred =
+ cpu_to_be16
+ (FNIC_FC_B2B_CREDIT),
+ .sp_bb_data =
+ cpu_to_be16
+ (FNIC_FC_B2B_RDF_SZ)},
+ .fl_cssp[2].cp_class =
+ cpu_to_be16(FC_CPC_VALID | FC_CPC_SEQ)
+ },
+ }
+ },
+ .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}}
+ };
+
+ memcpy(pflogi_req->eth.h_source, iport->hwmac, ETH_ALEN);
+ if (iport->usefip)
+ memcpy(pflogi_req->eth.h_dest, iport->selected_fcf.fcf_mac,
+ ETH_ALEN);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_FLOGI,
+ &iport->active_oxid_fabric_req);
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Failed to allocate OXID to send FIP FLOGI");
+ mempool_free(frame, fnic->frame_pool);
+ return;
+ }
+ FNIC_STD_SET_OX_ID(pflogi_req->flogi_desc.flogi.fchdr, oxid);
+
+ FNIC_STD_SET_NPORT_NAME(&pflogi_req->flogi_desc.flogi.els.fl_wwpn,
+ iport->wwpn);
+ FNIC_STD_SET_NODE_NAME(&pflogi_req->flogi_desc.flogi.els.fl_wwnn,
+ iport->wwnn);
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FIP start FLOGI\n");
+ fnic_send_fip_frame(iport, frame, frame_size);
+ iport->fip.flogi_retry++;
+
+ iport->fip.state = FDLS_FIP_FLOGI_STARTED;
+ flogi_tov = jiffies + msecs_to_jiffies(fnic->config.flogi_timeout);
+ mod_timer(&fnic->retry_fip_timer, round_jiffies(flogi_tov));
+}
+
+/**
+ * fnic_fcoe_process_flogi_resp - Processes FLOGI response from FCF.
+ * @fnic: Handle to fnic driver instance
+ * @fiph: Received frame
+ *
+ * If successful save assigned fc_id and MAC, program firmware
+ * and start fdls discovery, else restart vlan discovery.
+ */
+void fnic_fcoe_process_flogi_resp(struct fnic *fnic, struct fip_header *fiph)
+{
+ struct fnic_iport_s *iport = &fnic->iport;
+ struct fip_flogi_rsp *flogi_rsp = (struct fip_flogi_rsp *)fiph;
+ int desc_len;
+ uint32_t s_id;
+ int frame_type;
+ uint16_t oxid;
+
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ struct fc_frame_header *fchdr = &flogi_rsp->rsp_desc.flogi.fchdr;
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic 0x%p FIP FLOGI rsp\n", fnic);
+ desc_len = be16_to_cpu(flogi_rsp->fip.fip_dl_len);
+ if (desc_len != 38) {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Invalid Descriptor List len (%x). Dropping frame\n",
+ desc_len);
+ return;
+ }
+
+ if (!((flogi_rsp->rsp_desc.fd_desc.fip_dtype == 7)
+ && (flogi_rsp->rsp_desc.fd_desc.fip_dlen == 36))
+ || !((flogi_rsp->mac_desc.fd_desc.fip_dtype == 2)
+ && (flogi_rsp->mac_desc.fd_desc.fip_dlen == 2))) {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping frame invalid type and len mix\n");
+ return;
+ }
+
+ frame_type = fnic_fdls_validate_and_get_frame_type(iport, fchdr);
+
+ s_id = ntoh24(fchdr->fh_s_id);
+ if ((fchdr->fh_f_ctl[0] != 0x98)
+ || (fchdr->fh_r_ctl != 0x23)
+ || (s_id != FC_FID_FLOGI)
+ || (frame_type != FNIC_FABRIC_FLOGI_RSP)
+ || (fchdr->fh_type != 0x01)) {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping invalid frame: s_id %x F %x R %x t %x OX_ID %x\n",
+ s_id, fchdr->fh_f_ctl[0], fchdr->fh_r_ctl,
+ fchdr->fh_type, FNIC_STD_GET_OX_ID(fchdr));
+ return;
+ }
+
+ if (iport->fip.state == FDLS_FIP_FLOGI_STARTED) {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic 0x%p rsp for pending FLOGI\n", fnic);
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+ del_timer_sync(&fnic->retry_fip_timer);
+
+ if ((be16_to_cpu(flogi_rsp->fip.fip_dl_len) == FIP_FLOGI_LEN)
+ && (flogi_rsp->rsp_desc.flogi.els.fl_cmd == ELS_LS_ACC)) {
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "fnic 0x%p FLOGI success\n", fnic);
+ memcpy(iport->fpma, flogi_rsp->mac_desc.fd_mac, ETH_ALEN);
+ iport->fcid =
+ ntoh24(flogi_rsp->rsp_desc.flogi.fchdr.fh_d_id);
+
+ iport->r_a_tov =
+ be32_to_cpu(flogi_rsp->rsp_desc.flogi.els.fl_csp.sp_r_a_tov);
+ iport->e_d_tov =
+ be32_to_cpu(flogi_rsp->rsp_desc.flogi.els.fl_csp.sp_e_d_tov);
+ memcpy(fnic->iport.fcfmac, iport->selected_fcf.fcf_mac,
+ ETH_ALEN);
+ vnic_dev_add_addr(fnic->vdev, flogi_rsp->mac_desc.fd_mac);
+
+ if (fnic_fdls_register_portid(iport, iport->fcid, NULL)
+ != 0) {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "fnic 0x%p flogi registration failed\n",
+ fnic);
+ return;
+ }
+
+ iport->fip.state = FDLS_FIP_FLOGI_COMPLETE;
+ iport->state = FNIC_IPORT_STATE_FABRIC_DISC;
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num, "iport->state:%d\n",
+ iport->state);
+ fnic_fdls_disc_start(iport);
+ if (!((iport->selected_fcf.ka_disabled)
+ || (iport->selected_fcf.fka_adv_period == 0))) {
+ u64 tov;
+
+ tov = jiffies
+ +
+ msecs_to_jiffies(iport->selected_fcf.fka_adv_period);
+ mod_timer(&fnic->enode_ka_timer,
+ round_jiffies(tov));
+
+ tov =
+ jiffies +
+ msecs_to_jiffies(FIP_VN_KA_PERIOD);
+ mod_timer(&fnic->vn_ka_timer,
+ round_jiffies(tov));
+
+ }
+ } else {
+ /*
+ * If there's FLOGI rejects - clear all
+ * fcf's & restart from scratch
+ */
+ atomic64_inc(&fnic_stats->vlan_stats.flogi_rejects);
+ /* start FCoE VLAN discovery */
+ fnic_fcoe_send_vlan_req(fnic);
+
+ iport->fip.state = FDLS_FIP_VLAN_DISCOVERY_STARTED;
+ }
+ }
+}
+
+/**
+ * fnic_common_fip_cleanup - Clean up FCF info and timers in case of
+ * link down/CVL
+ * @fnic: Handle to fnic driver instance
+ */
+void fnic_common_fip_cleanup(struct fnic *fnic)
+{
+
+ struct fnic_iport_s *iport = &fnic->iport;
+
+ if (!iport->usefip)
+ return;
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic 0x%p fip cleanup\n", fnic);
+
+ iport->fip.state = FDLS_FIP_INIT;
+
+ del_timer_sync(&fnic->retry_fip_timer);
+ del_timer_sync(&fnic->fcs_ka_timer);
+ del_timer_sync(&fnic->enode_ka_timer);
+ del_timer_sync(&fnic->vn_ka_timer);
+
+ if (!is_zero_ether_addr(iport->fpma))
+ vnic_dev_del_addr(fnic->vdev, iport->fpma);
+
+ memset(iport->fpma, 0, ETH_ALEN);
+ iport->fcid = 0;
+ iport->r_a_tov = 0;
+ iport->e_d_tov = 0;
+ memset(fnic->iport.fcfmac, 0, ETH_ALEN);
+ memset(iport->selected_fcf.fcf_mac, 0, ETH_ALEN);
+ iport->selected_fcf.fcf_priority = 0;
+ iport->selected_fcf.fka_adv_period = 0;
+ iport->selected_fcf.ka_disabled = 0;
+
+ fnic_fcoe_reset_vlans(fnic);
+}
+
+/**
+ * fnic_fcoe_process_cvl - Processes Clear Virtual Link from FCF.
+ * @fnic: Handle to fnic driver instance
+ * @fiph: Received frame
+ *
+ * Verify that cvl is received from our current FCF for our assigned MAC
+ * and clean up and restart the vlan discovery.
+ */
+void fnic_fcoe_process_cvl(struct fnic *fnic, struct fip_header *fiph)
+{
+ struct fnic_iport_s *iport = &fnic->iport;
+ struct fip_cvl *cvl_msg = (struct fip_cvl *)fiph;
+ int i;
+ int found = false;
+ int max_count = 0;
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic 0x%p clear virtual link handler\n", fnic);
+
+ if (!((cvl_msg->fcf_mac_desc.fd_desc.fip_dtype == 2)
+ && (cvl_msg->fcf_mac_desc.fd_desc.fip_dlen == 2))
+ || !((cvl_msg->name_desc.fd_desc.fip_dtype == 4)
+ && (cvl_msg->name_desc.fd_desc.fip_dlen == 3))) {
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "invalid mix: ft %x fl %x ndt %x ndl %x",
+ cvl_msg->fcf_mac_desc.fd_desc.fip_dtype,
+ cvl_msg->fcf_mac_desc.fd_desc.fip_dlen,
+ cvl_msg->name_desc.fd_desc.fip_dtype,
+ cvl_msg->name_desc.fd_desc.fip_dlen);
+ }
+
+ if (memcmp
+ (iport->selected_fcf.fcf_mac, cvl_msg->fcf_mac_desc.fd_mac, ETH_ALEN)
+ == 0) {
+ for (i = 0; i < ((be16_to_cpu(fiph->fip_dl_len) / 5) - 1); i++) {
+ if (!((cvl_msg->vn_ports_desc[i].fd_desc.fip_dtype == 11)
+ && (cvl_msg->vn_ports_desc[i].fd_desc.fip_dlen == 5))) {
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "Invalid type and len mix type: %d len: %d\n",
+ cvl_msg->vn_ports_desc[i].fd_desc.fip_dtype,
+ cvl_msg->vn_ports_desc[i].fd_desc.fip_dlen);
+ }
+ if (memcmp
+ (iport->fpma, cvl_msg->vn_ports_desc[i].fd_mac,
+ ETH_ALEN) == 0) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ return;
+ fnic_common_fip_cleanup(fnic);
+
+ while (fnic->reset_in_progress == IN_PROGRESS) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ wait_for_completion_timeout(&fnic->reset_completion_wait,
+ msecs_to_jiffies(5000));
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ max_count++;
+ if (max_count >= FIP_FNIC_RESET_WAIT_COUNT) {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Rthr waited too long. Skipping handle link event %p\n",
+ fnic);
+ return;
+ }
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic reset in progress. Link event needs to wait %p",
+ fnic);
+ }
+ fnic->reset_in_progress = IN_PROGRESS;
+ fnic_fdls_link_down(iport);
+ fnic->reset_in_progress = NOT_IN_PROGRESS;
+ complete(&fnic->reset_completion_wait);
+ fnic_fcoe_send_vlan_req(fnic);
+ }
+}
+
+/**
+ * fdls_fip_recv_frame - Demultiplexer for FIP frames
+ * @fnic: Handle to fnic driver instance
+ * @frame: Received ethernet frame
+ */
+int fdls_fip_recv_frame(struct fnic *fnic, void *frame)
+{
+ struct ethhdr *eth = (struct ethhdr *)frame;
+ struct fip_header *fiph;
+ u16 op;
+ u8 sub;
+ int len = 2048;
+
+ if (be16_to_cpu(eth->h_proto) == ETH_P_FIP) {
+ fiph = (struct fip_header *)(eth + 1);
+ op = be16_to_cpu(fiph->fip_op);
+ sub = fiph->fip_subcode;
+
+ fnic_debug_dump_fip_frame(fnic, eth, len, "Incoming");
+
+ if (op == FIP_OP_DISC && sub == FIP_SC_REP)
+ fnic_fcoe_fip_discovery_resp(fnic, fiph);
+ else if (op == FIP_OP_VLAN && sub == FIP_SC_REP)
+ fnic_fcoe_process_vlan_resp(fnic, fiph);
+ else if (op == FIP_OP_CTRL && sub == FIP_SC_REP)
+ fnic_fcoe_process_cvl(fnic, fiph);
+ else if (op == FIP_OP_LS && sub == FIP_SC_REP)
+ fnic_fcoe_process_flogi_resp(fnic, fiph);
+
+ /* Return true if the frame was a FIP frame */
+ return true;
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Not a FIP Frame");
+ return false;
+}
+
+void fnic_work_on_fip_timer(struct work_struct *work)
+{
+ struct fnic *fnic = container_of(work, struct fnic, fip_timer_work);
+ struct fnic_iport_s *iport = &fnic->iport;
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FIP timeout\n");
+
+ if (iport->fip.state == FDLS_FIP_VLAN_DISCOVERY_STARTED) {
+ fnic_vlan_discovery_timeout(fnic);
+ } else if (iport->fip.state == FDLS_FIP_FCF_DISCOVERY_STARTED) {
+ u8 zmac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FCF Discovery timeout\n");
+ if (memcmp(iport->selected_fcf.fcf_mac, zmac, ETH_ALEN) != 0) {
+
+ if (iport->flags & FNIC_FIRST_LINK_UP) {
+ fnic_scsi_fcpio_reset(iport->fnic);
+ iport->flags &= ~FNIC_FIRST_LINK_UP;
+ }
+
+ fnic_fcoe_start_flogi(fnic);
+ if (!((iport->selected_fcf.ka_disabled)
+ || (iport->selected_fcf.fka_adv_period == 0))) {
+ u64 fcf_tov;
+
+ fcf_tov = jiffies
+ + 3
+ *
+ msecs_to_jiffies(iport->selected_fcf.fka_adv_period);
+ mod_timer(&fnic->fcs_ka_timer,
+ round_jiffies(fcf_tov));
+ }
+ } else {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num, "FCF Discovery timeout\n");
+ fnic_vlan_discovery_timeout(fnic);
+ }
+ } else if (iport->fip.state == FDLS_FIP_FLOGI_STARTED) {
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req);
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI timeout\n");
+ if (iport->fip.flogi_retry < fnic->config.flogi_retries)
+ fnic_fcoe_start_flogi(fnic);
+ else
+ fnic_vlan_discovery_timeout(fnic);
+ }
+}
+
+/**
+ * fnic_handle_fip_timer - Timeout handler for FIP discover phase.
+ * @t: Handle to the timer list
+ *
+ * Based on the current state, start next phase or restart discovery.
+ */
+void fnic_handle_fip_timer(struct timer_list *t)
+{
+ struct fnic *fnic = from_timer(fnic, t, retry_fip_timer);
+
+ INIT_WORK(&fnic->fip_timer_work, fnic_work_on_fip_timer);
+ queue_work(fnic_fip_queue, &fnic->fip_timer_work);
+}
+
+/**
+ * fnic_handle_enode_ka_timer - FIP node keep alive.
+ * @t: Handle to the timer list
+ */
+void fnic_handle_enode_ka_timer(struct timer_list *t)
+{
+ uint8_t *frame;
+ struct fnic *fnic = from_timer(fnic, t, enode_ka_timer);
+
+ struct fnic_iport_s *iport = &fnic->iport;
+ struct fip_enode_ka *penode_ka;
+ u64 enode_ka_tov;
+ uint16_t frame_size = sizeof(struct fip_enode_ka);
+
+ if (iport->fip.state != FDLS_FIP_FLOGI_COMPLETE)
+ return;
+
+ if ((iport->selected_fcf.ka_disabled)
+ || (iport->selected_fcf.fka_adv_period == 0)) {
+ return;
+ }
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send enode ka");
+ return;
+ }
+
+ penode_ka = (struct fip_enode_ka *) frame;
+ *penode_ka = (struct fip_enode_ka) {
+ .eth = {
+ .h_proto = cpu_to_be16(ETH_P_FIP)},
+ .fip = {
+ .fip_ver = FIP_VER_ENCAPS(FIP_VER),
+ .fip_op = cpu_to_be16(FIP_OP_CTRL),
+ .fip_subcode = FIP_SC_REQ,
+ .fip_dl_len = cpu_to_be16(FIP_ENODE_KA_LEN)},
+ .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}}
+ };
+
+ memcpy(penode_ka->eth.h_source, iport->hwmac, ETH_ALEN);
+ memcpy(penode_ka->eth.h_dest, iport->selected_fcf.fcf_mac, ETH_ALEN);
+ memcpy(penode_ka->mac_desc.fd_mac, iport->hwmac, ETH_ALEN);
+
+ FNIC_FIP_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "Handle enode KA timer\n");
+ fnic_send_fip_frame(iport, frame, frame_size);
+ enode_ka_tov = jiffies
+ + msecs_to_jiffies(iport->selected_fcf.fka_adv_period);
+ mod_timer(&fnic->enode_ka_timer, round_jiffies(enode_ka_tov));
+}
+
+/**
+ * fnic_handle_vn_ka_timer - FIP virtual port keep alive.
+ * @t: Handle to the timer list
+ */
+void fnic_handle_vn_ka_timer(struct timer_list *t)
+{
+ uint8_t *frame;
+ struct fnic *fnic = from_timer(fnic, t, vn_ka_timer);
+
+ struct fnic_iport_s *iport = &fnic->iport;
+ struct fip_vn_port_ka *pvn_port_ka;
+ u64 vn_ka_tov;
+ uint8_t fcid[3];
+ uint16_t frame_size = sizeof(struct fip_vn_port_ka);
+
+ if (iport->fip.state != FDLS_FIP_FLOGI_COMPLETE)
+ return;
+
+ if ((iport->selected_fcf.ka_disabled)
+ || (iport->selected_fcf.fka_adv_period == 0)) {
+ return;
+ }
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send vn ka");
+ return;
+ }
+
+ pvn_port_ka = (struct fip_vn_port_ka *) frame;
+ *pvn_port_ka = (struct fip_vn_port_ka) {
+ .eth = {
+ .h_proto = cpu_to_be16(ETH_P_FIP)},
+ .fip = {
+ .fip_ver = FIP_VER_ENCAPS(FIP_VER),
+ .fip_op = cpu_to_be16(FIP_OP_CTRL),
+ .fip_subcode = FIP_SC_REQ,
+ .fip_dl_len = cpu_to_be16(FIP_VN_KA_LEN)},
+ .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}},
+ .vn_port_desc = {.fd_desc = {.fip_dtype = FIP_DT_VN_ID, .fip_dlen = 5}}
+ };
+
+ memcpy(pvn_port_ka->eth.h_source, iport->fpma, ETH_ALEN);
+ memcpy(pvn_port_ka->eth.h_dest, iport->selected_fcf.fcf_mac, ETH_ALEN);
+ memcpy(pvn_port_ka->mac_desc.fd_mac, iport->hwmac, ETH_ALEN);
+ memcpy(pvn_port_ka->vn_port_desc.fd_mac, iport->fpma, ETH_ALEN);
+ hton24(fcid, iport->fcid);
+ memcpy(pvn_port_ka->vn_port_desc.fd_fc_id, fcid, 3);
+ FNIC_STD_SET_NPORT_NAME(&pvn_port_ka->vn_port_desc.fd_wwpn, iport->wwpn);
+
+ FNIC_FIP_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "Handle vnport KA timer\n");
+ fnic_send_fip_frame(iport, frame, frame_size);
+ vn_ka_tov = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD);
+ mod_timer(&fnic->vn_ka_timer, round_jiffies(vn_ka_tov));
+}
+
+/**
+ * fnic_vlan_discovery_timeout - Handle vlan discovery timeout
+ * @fnic: Handle to fnic driver instance
+ *
+ * End of VLAN discovery or FCF discovery time window.
+ * Start the FCF discovery if VLAN was never used.
+ */
+void fnic_vlan_discovery_timeout(struct fnic *fnic)
+{
+ struct fcoe_vlan *vlan;
+ struct fnic_iport_s *iport = &fnic->iport;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->stop_rx_link_events) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ if (!iport->usefip)
+ return;
+
+ spin_lock_irqsave(&fnic->vlans_lock, flags);
+ if (list_empty(&fnic->vlan_list)) {
+ /* no vlans available, try again */
+ spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+ fnic_fcoe_send_vlan_req(fnic);
+ return;
+ }
+
+ vlan = list_first_entry(&fnic->vlan_list, struct fcoe_vlan, list);
+
+ if (vlan->state == FIP_VLAN_SENT) {
+ if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
+ /*
+ * no response on this vlan, remove from the list.
+ * Try the next vlan
+ */
+ list_del(&vlan->list);
+ kfree(vlan);
+ vlan = NULL;
+ if (list_empty(&fnic->vlan_list)) {
+ /* we exhausted all vlans, restart vlan disc */
+ spin_unlock_irqrestore(&fnic->vlans_lock,
+ flags);
+ fnic_fcoe_send_vlan_req(fnic);
+ return;
+ }
+ /* check the next vlan */
+ vlan =
+ list_first_entry(&fnic->vlan_list, struct fcoe_vlan,
+ list);
+
+ fnic->set_vlan(fnic, vlan->vid);
+ vlan->state = FIP_VLAN_SENT; /* sent now */
+
+ }
+ atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
+
+ } else {
+ fnic->set_vlan(fnic, vlan->vid);
+ vlan->state = FIP_VLAN_SENT; /* sent now */
+ }
+ vlan->sol_count++;
+ spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+ fnic_fcoe_start_fcf_discovery(fnic);
+}
+
+/**
+ * fnic_work_on_fcs_ka_timer - Handle work on FCS keep alive timer.
+ * @work: the work queue to be serviced
+ *
+ * Finish handling fcs_ka_timer in process context.
+ * Clean up, bring the link down, and restart all FIP discovery.
+ */
+void fnic_work_on_fcs_ka_timer(struct work_struct *work)
+{
+ struct fnic
+ *fnic = container_of(work, struct fnic, fip_timer_work);
+ struct fnic_iport_s *iport = &fnic->iport;
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic 0x%p fcs ka timeout\n", fnic);
+
+ fnic_common_fip_cleanup(fnic);
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ fnic_fdls_link_down(iport);
+ iport->state = FNIC_IPORT_STATE_FIP;
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+
+ fnic_fcoe_send_vlan_req(fnic);
+}
+
+/**
+ * fnic_handle_fcs_ka_timer - Handle FCS keep alive timer.
+ * @t: Handle to the timer list
+ *
+ * No keep alives received from FCF. Clean up, bring the link down
+ * and restart all the FIP discovery.
+ */
+void fnic_handle_fcs_ka_timer(struct timer_list *t)
+{
+ struct fnic *fnic = from_timer(fnic, t, fcs_ka_timer);
+
+ INIT_WORK(&fnic->fip_timer_work, fnic_work_on_fcs_ka_timer);
+ queue_work(fnic_fip_queue, &fnic->fip_timer_work);
+}
diff --git a/drivers/scsi/fnic/fip.h b/drivers/scsi/fnic/fip.h
new file mode 100644
index 000000000000..79fee7628870
--- /dev/null
+++ b/drivers/scsi/fnic/fip.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+#ifndef _FIP_H_
+#define _FIP_H_
+
+#include "fdls_fc.h"
+#include "fnic_fdls.h"
+#include <scsi/fc/fc_fip.h>
+
+/* Drop the cast from the standard definition */
+#define FCOE_ALL_FCFS_MAC {0x01, 0x10, 0x18, 0x01, 0x00, 0x02}
+#define FCOE_MAX_SIZE 0x082E
+
+#define FCOE_CTLR_FIPVLAN_TOV (3*1000)
+#define FCOE_CTLR_FCS_TOV (3*1000)
+#define FCOE_CTLR_MAX_SOL (5*1000)
+
+#define FIP_DISC_SOL_LEN (6)
+#define FIP_VLAN_REQ_LEN (2)
+#define FIP_ENODE_KA_LEN (2)
+#define FIP_VN_KA_LEN (7)
+#define FIP_FLOGI_LEN (38)
+
+enum fdls_vlan_state {
+ FIP_VLAN_AVAIL,
+ FIP_VLAN_SENT
+};
+
+enum fdls_fip_state {
+ FDLS_FIP_INIT,
+ FDLS_FIP_VLAN_DISCOVERY_STARTED,
+ FDLS_FIP_FCF_DISCOVERY_STARTED,
+ FDLS_FIP_FLOGI_STARTED,
+ FDLS_FIP_FLOGI_COMPLETE,
+};
+
+/*
+ * VLAN entry.
+ */
+struct fcoe_vlan {
+ struct list_head list;
+ uint16_t vid; /* vlan ID */
+ uint16_t sol_count; /* no. of sols sent */
+ uint16_t state; /* state */
+};
+
+struct fip_vlan_req {
+ struct ethhdr eth;
+ struct fip_header fip;
+ struct fip_mac_desc mac_desc;
+} __packed;
+
+struct fip_vlan_notif {
+ struct fip_header fip;
+ struct fip_vlan_desc vlans_desc[];
+} __packed;
+
+struct fip_vn_port_ka {
+ struct ethhdr eth;
+ struct fip_header fip;
+ struct fip_mac_desc mac_desc;
+ struct fip_vn_desc vn_port_desc;
+} __packed;
+
+struct fip_enode_ka {
+ struct ethhdr eth;
+ struct fip_header fip;
+ struct fip_mac_desc mac_desc;
+} __packed;
+
+struct fip_cvl {
+ struct fip_header fip;
+ struct fip_mac_desc fcf_mac_desc;
+ struct fip_wwn_desc name_desc;
+ struct fip_vn_desc vn_ports_desc[];
+} __packed;
+
+struct fip_flogi_desc {
+ struct fip_desc fd_desc;
+ uint16_t rsvd;
+ struct fc_std_flogi flogi;
+} __packed;
+
+struct fip_flogi_rsp_desc {
+ struct fip_desc fd_desc;
+ uint16_t rsvd;
+ struct fc_std_flogi flogi;
+} __packed;
+
+struct fip_flogi {
+ struct ethhdr eth;
+ struct fip_header fip;
+ struct fip_flogi_desc flogi_desc;
+ struct fip_mac_desc mac_desc;
+} __packed;
+
+struct fip_flogi_rsp {
+ struct fip_header fip;
+ struct fip_flogi_rsp_desc rsp_desc;
+ struct fip_mac_desc mac_desc;
+} __packed;
+
+struct fip_discovery {
+ struct ethhdr eth;
+ struct fip_header fip;
+ struct fip_mac_desc mac_desc;
+ struct fip_wwn_desc name_desc;
+ struct fip_size_desc fcoe_desc;
+} __packed;
+
+struct fip_disc_adv {
+ struct fip_header fip;
+ struct fip_pri_desc prio_desc;
+ struct fip_mac_desc mac_desc;
+ struct fip_wwn_desc name_desc;
+ struct fip_fab_desc fabric_desc;
+ struct fip_fka_desc fka_adv_desc;
+} __packed;
+
+void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct fip_header *fiph);
+void fnic_fcoe_fip_discovery_resp(struct fnic *fnic, struct fip_header *fiph);
+void fnic_fcoe_process_flogi_resp(struct fnic *fnic, struct fip_header *fiph);
+void fnic_work_on_fip_timer(struct work_struct *work);
+void fnic_work_on_fcs_ka_timer(struct work_struct *work);
+void fnic_fcoe_send_vlan_req(struct fnic *fnic);
+void fnic_fcoe_start_fcf_discovery(struct fnic *fnic);
+void fnic_fcoe_start_flogi(struct fnic *fnic);
+void fnic_fcoe_process_cvl(struct fnic *fnic, struct fip_header *fiph);
+void fnic_vlan_discovery_timeout(struct fnic *fnic);
+
+extern struct workqueue_struct *fnic_fip_queue;
+
+#ifdef FNIC_DEBUG
+static inline void
+fnic_debug_dump_fip_frame(struct fnic *fnic, struct ethhdr *eth,
+ int len, char *pfx)
+{
+ struct fip_header *fiph = (struct fip_header *)(eth + 1);
+ u16 op = be16_to_cpu(fiph->fip_op);
+ u8 sub = fiph->fip_subcode;
+
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "FIP %s packet contents: op: 0x%x sub: 0x%x (len = %d)",
+ pfx, op, sub, len);
+
+ fnic_debug_dump(fnic, (uint8_t *)eth, len);
+}
+
+#else /* FNIC_DEBUG */
+
+static inline void
+fnic_debug_dump_fip_frame(struct fnic *fnic, struct ethhdr *eth,
+ int len, char *pfx) {}
+#endif /* FNIC_DEBUG */
+
+#endif /* _FIP_H_ */
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index ce73f08ee889..6c5f6046b1f5 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -10,8 +10,10 @@
#include <linux/netdevice.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
-#include <scsi/libfc.h>
-#include <scsi/libfcoe.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc_frame.h>
#include "fnic_io.h"
#include "fnic_res.h"
#include "fnic_trace.h"
@@ -24,13 +26,15 @@
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "vnic_scsi.h"
+#include "fnic_fdls.h"
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.7.0.0"
+#define DRV_VERSION "1.8.0.0"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
+#define FABRIC_LOGO_MAX_RETRY 3
#define DESC_CLEAN_LOW_WATERMARK 8
#define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */
#define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */
@@ -38,6 +42,7 @@
#define FNIC_DFLT_IO_REQ 256 /* Default scsi_cmnd tag map entries */
#define FNIC_DFLT_QUEUE_DEPTH 256
#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */
+#define LUN0_DELAY_TIME 9
/*
* Tag bits used for special requests.
@@ -75,6 +80,77 @@
#define FNIC_DEV_RST_TERM_DONE BIT(20)
#define FNIC_DEV_RST_ABTS_PENDING BIT(21)
+#define FNIC_FW_RESET_TIMEOUT 60000 /* mSec */
+#define FNIC_FCOE_MAX_CMD_LEN 16
+/* Retry supported by rport (returned by PRLI service parameters) */
+#define FNIC_FC_RP_FLAGS_RETRY 0x1
+
+/* Cisco vendor id */
+#define PCI_VENDOR_ID_CISCO 0x1137
+#define PCI_DEVICE_ID_CISCO_VIC_FC 0x0045 /* fc vnic */
+
+/* sereno pcie switch */
+#define PCI_DEVICE_ID_CISCO_SERENO 0x004e
+#define PCI_DEVICE_ID_CISCO_CRUZ 0x007a /* Cruz */
+#define PCI_DEVICE_ID_CISCO_BODEGA 0x0131 /* Bodega */
+#define PCI_DEVICE_ID_CISCO_BEVERLY 0x025f /* Beverly */
+
+/* Sereno */
+#define PCI_SUBDEVICE_ID_CISCO_VASONA 0x004f /* vasona mezz */
+#define PCI_SUBDEVICE_ID_CISCO_COTATI 0x0084 /* cotati mlom */
+#define PCI_SUBDEVICE_ID_CISCO_LEXINGTON 0x0085 /* lexington pcie */
+#define PCI_SUBDEVICE_ID_CISCO_ICEHOUSE 0x00cd /* Icehouse */
+#define PCI_SUBDEVICE_ID_CISCO_KIRKWOODLAKE 0x00ce /* KirkwoodLake pcie */
+#define PCI_SUBDEVICE_ID_CISCO_SUSANVILLE 0x012e /* Susanville MLOM */
+#define PCI_SUBDEVICE_ID_CISCO_TORRANCE 0x0139 /* Torrance MLOM */
+
+/* Cruz */
+#define PCI_SUBDEVICE_ID_CISCO_CALISTOGA 0x012c /* Calistoga MLOM */
+#define PCI_SUBDEVICE_ID_CISCO_MOUNTAINVIEW 0x0137 /* Cruz Mezz */
+/* Cruz MountTian SIOC */
+#define PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN 0x014b
+#define PCI_SUBDEVICE_ID_CISCO_CLEARLAKE 0x014d /* ClearLake pcie */
+/* Cruz MountTian2 SIOC */
+#define PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN2 0x0157
+#define PCI_SUBDEVICE_ID_CISCO_CLAREMONT 0x015d /* Claremont MLOM */
+
+/* Bodega */
+/* VIC 1457 PCIe mLOM */
+#define PCI_SUBDEVICE_ID_CISCO_BRADBURY 0x0218
+#define PCI_SUBDEVICE_ID_CISCO_BRENTWOOD 0x0217 /* VIC 1455 PCIe */
+/* VIC 1487 PCIe mLOM */
+#define PCI_SUBDEVICE_ID_CISCO_BURLINGAME 0x021a
+#define PCI_SUBDEVICE_ID_CISCO_BAYSIDE 0x0219 /* VIC 1485 PCIe */
+/* VIC 1440 Mezz mLOM */
+#define PCI_SUBDEVICE_ID_CISCO_BAKERSFIELD 0x0215
+#define PCI_SUBDEVICE_ID_CISCO_BOONVILLE 0x0216 /* VIC 1480 Mezz */
+#define PCI_SUBDEVICE_ID_CISCO_BENICIA 0x024a /* VIC 1495 */
+#define PCI_SUBDEVICE_ID_CISCO_BEAUMONT 0x024b /* VIC 1497 */
+#define PCI_SUBDEVICE_ID_CISCO_BRISBANE 0x02af /* VIC 1467 */
+#define PCI_SUBDEVICE_ID_CISCO_BENTON 0x02b0 /* VIC 1477 */
+#define PCI_SUBDEVICE_ID_CISCO_TWIN_RIVER 0x02cf /* VIC 14425 */
+#define PCI_SUBDEVICE_ID_CISCO_TWIN_PEAK 0x02d0 /* VIC 14825 */
+
+/* Beverly */
+#define PCI_SUBDEVICE_ID_CISCO_BERN 0x02de /* VIC 15420 */
+#define PCI_SUBDEVICE_ID_CISCO_STOCKHOLM 0x02dd /* VIC 15428 */
+#define PCI_SUBDEVICE_ID_CISCO_KRAKOW 0x02dc /* VIC 15411 */
+#define PCI_SUBDEVICE_ID_CISCO_LUCERNE 0x02db /* VIC 15231 */
+#define PCI_SUBDEVICE_ID_CISCO_TURKU 0x02e8 /* VIC 15238 */
+#define PCI_SUBDEVICE_ID_CISCO_TURKU_PLUS 0x02f3 /* VIC 15237 */
+#define PCI_SUBDEVICE_ID_CISCO_ZURICH 0x02df /* VIC 15230 */
+#define PCI_SUBDEVICE_ID_CISCO_RIGA 0x02e0 /* VIC 15427 */
+#define PCI_SUBDEVICE_ID_CISCO_GENEVA 0x02e1 /* VIC 15422 */
+#define PCI_SUBDEVICE_ID_CISCO_HELSINKI 0x02e4 /* VIC 15235 */
+#define PCI_SUBDEVICE_ID_CISCO_GOTHENBURG 0x02f2 /* VIC 15425 */
+
+struct fnic_pcie_device {
+ u32 device;
+ u8 *desc;
+ u32 subsystem_device;
+ u8 *subsys_desc;
+};
+
/*
* fnic private data per SCSI command.
* These fields are locked by the hashed io_req_lock.
@@ -127,8 +203,38 @@ static inline u64 fnic_flags_and_state(struct scsi_cmnd *cmd)
#define fnic_clear_state_flags(fnicp, st_flags) \
__fnic_set_state_flags(fnicp, st_flags, 1)
+enum reset_states {
+ NOT_IN_PROGRESS = 0,
+ IN_PROGRESS,
+ RESET_ERROR
+};
+
+enum rscn_type {
+ NOT_PC_RSCN = 0,
+ PC_RSCN
+};
+
+enum pc_rscn_handling_status {
+ PC_RSCN_HANDLING_NOT_IN_PROGRESS = 0,
+ PC_RSCN_HANDLING_IN_PROGRESS
+};
+
+enum pc_rscn_handling_feature {
+ PC_RSCN_HANDLING_FEATURE_OFF = 0,
+ PC_RSCN_HANDLING_FEATURE_ON
+};
+
+extern unsigned int fnic_fdmi_support;
extern unsigned int fnic_log_level;
extern unsigned int io_completions;
+extern struct workqueue_struct *fnic_event_queue;
+
+extern unsigned int pc_rscn_handling_feature_flag;
+extern spinlock_t reset_fnic_list_lock;
+extern struct list_head reset_fnic_list;
+extern struct workqueue_struct *reset_fnic_work_queue;
+extern struct work_struct reset_fnic_work;
+
#define FNIC_MAIN_LOGGING 0x01
#define FNIC_FCS_LOGGING 0x02
@@ -155,6 +261,12 @@ do { \
"fnic<%d>: %s: %d: " fmt, fnic_num,\
__func__, __LINE__, ##args);)
+#define FNIC_FIP_DBG(kern_level, host, fnic_num, fmt, args...) \
+ FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING, \
+ shost_printk(kern_level, host, \
+ "fnic<%d>: %s: %d: " fmt, fnic_num,\
+ __func__, __LINE__, ##args);)
+
#define FNIC_SCSI_DBG(kern_level, host, fnic_num, fmt, args...) \
FNIC_CHECK_LOGGING(FNIC_SCSI_LOGGING, \
shost_printk(kern_level, host, \
@@ -213,12 +325,26 @@ enum fnic_state {
struct mempool;
+enum fnic_role_e {
+ FNIC_ROLE_FCP_INITIATOR = 0,
+};
+
enum fnic_evt {
FNIC_EVT_START_VLAN_DISC = 1,
FNIC_EVT_START_FCF_DISC = 2,
FNIC_EVT_MAX,
};
+struct fnic_frame_list {
+ /*
+ * Link to frame lists
+ */
+ struct list_head links;
+ void *fp;
+ int frame_len;
+ int rx_ethhdr_stripped;
+};
+
struct fnic_event {
struct list_head list;
struct fnic *fnic;
@@ -235,8 +361,9 @@ struct fnic_cpy_wq {
/* Per-instance private data structure */
struct fnic {
int fnic_num;
- struct fc_lport *lport;
- struct fcoe_ctlr ctlr; /* FIP FCoE controller structure */
+ enum fnic_role_e role;
+ struct fnic_iport_s iport;
+ struct Scsi_Host *host;
struct vnic_dev_bar bar0;
struct fnic_msix_entry msix[FNIC_MSIX_INTR_MAX];
@@ -255,6 +382,7 @@ struct fnic {
unsigned int wq_count;
unsigned int cq_count;
+ struct completion reset_completion_wait;
struct mutex sgreset_mutex;
spinlock_t sgreset_lock; /* lock for sgreset */
struct scsi_cmnd *sgreset_sc;
@@ -268,25 +396,27 @@ struct fnic {
u32 vlan_hw_insert:1; /* let hw insert the tag */
u32 in_remove:1; /* fnic device in removal */
u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */
- u32 link_events:1; /* set when we get any link event*/
-
- struct completion *remove_wait; /* device remove thread blocks */
+ struct completion *fw_reset_done;
+ u32 reset_in_progress;
atomic_t in_flight; /* io counter */
bool internal_reset_inprogress;
u32 _reserved; /* fill hole */
unsigned long state_flags; /* protected by host lock */
enum fnic_state state;
spinlock_t fnic_lock;
+ unsigned long lock_flags;
u16 vlan_id; /* VLAN tag including priority */
u8 data_src_addr[ETH_ALEN];
u64 fcp_input_bytes; /* internal statistic */
u64 fcp_output_bytes; /* internal statistic */
u32 link_down_cnt;
+ u32 soft_reset_count;
int link_status;
struct list_head list;
+ struct list_head links;
struct pci_dev *pdev;
struct vnic_fc_config config;
struct vnic_dev *vdev;
@@ -306,19 +436,29 @@ struct fnic {
struct work_struct link_work;
struct work_struct frame_work;
struct work_struct flush_work;
- struct sk_buff_head frame_queue;
- struct sk_buff_head tx_queue;
+ struct list_head frame_queue;
+ struct list_head tx_queue;
+ mempool_t *frame_pool;
+ mempool_t *frame_elem_pool;
+ struct work_struct tport_work;
+ struct list_head tport_event_list;
+
+ char subsys_desc[14];
+ int subsys_desc_len;
+ int pc_rscn_handling_status;
/*** FIP related data members -- start ***/
void (*set_vlan)(struct fnic *, u16 vlan);
struct work_struct fip_frame_work;
- struct sk_buff_head fip_frame_queue;
+ struct work_struct fip_timer_work;
+ struct list_head fip_frame_queue;
struct timer_list fip_timer;
- struct list_head vlans;
spinlock_t vlans_lock;
-
- struct work_struct event_work;
- struct list_head evlist;
+ struct timer_list retry_fip_timer;
+ struct timer_list fcs_ka_timer;
+ struct timer_list enode_ka_timer;
+ struct timer_list vn_ka_timer;
+ struct list_head vlan_list;
/*** FIP related data members -- end ***/
/* copy work queue cache line section */
@@ -341,11 +481,6 @@ struct fnic {
____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX];
};
-static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip)
-{
- return container_of(fip, struct fnic, ctlr);
-}
-
extern struct workqueue_struct *fnic_event_queue;
extern struct workqueue_struct *fnic_fip_queue;
extern const struct attribute_group *fnic_host_groups[];
@@ -356,29 +491,29 @@ int fnic_set_intr_mode_msix(struct fnic *fnic);
void fnic_free_intr(struct fnic *fnic);
int fnic_request_intr(struct fnic *fnic);
-int fnic_send(struct fc_lport *, struct fc_frame *);
void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
void fnic_handle_frame(struct work_struct *work);
+void fnic_tport_event_handler(struct work_struct *work);
void fnic_handle_link(struct work_struct *work);
void fnic_handle_event(struct work_struct *work);
+void fdls_reclaim_oxid_handler(struct work_struct *work);
+void fdls_schedule_oxid_free(struct fnic_iport_s *iport, uint16_t *active_oxid);
+void fdls_schedule_oxid_free_retry_work(struct work_struct *work);
int fnic_rq_cmpl_handler(struct fnic *fnic, int);
int fnic_alloc_rq_frame(struct vnic_rq *rq);
void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
void fnic_flush_tx(struct work_struct *work);
-void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb);
-void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *);
-void fnic_update_mac(struct fc_lport *, u8 *new);
void fnic_update_mac_locked(struct fnic *, u8 *new);
int fnic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
int fnic_abort_cmd(struct scsi_cmnd *);
int fnic_device_reset(struct scsi_cmnd *);
-int fnic_host_reset(struct scsi_cmnd *);
-int fnic_reset(struct Scsi_Host *);
-void fnic_scsi_cleanup(struct fc_lport *);
-void fnic_scsi_abort_io(struct fc_lport *);
-void fnic_empty_scsi_cleanup(struct fc_lport *);
-void fnic_exch_mgr_reset(struct fc_lport *, u32, u32);
+int fnic_eh_host_reset_handler(struct scsi_cmnd *sc);
+int fnic_host_reset(struct Scsi_Host *shost);
+void fnic_reset(struct Scsi_Host *shost);
+int fnic_issue_fc_host_lip(struct Scsi_Host *shost);
+void fnic_get_host_port_state(struct Scsi_Host *shost);
+void fnic_scsi_fcpio_reset(struct fnic *fnic);
int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do, unsigned int cq_index);
int fnic_wq_cmpl_handler(struct fnic *fnic, int);
int fnic_flogi_reg_handler(struct fnic *fnic, u32);
@@ -390,14 +525,15 @@ const char *fnic_state_to_str(unsigned int state);
void fnic_mq_map_queues_cpus(struct Scsi_Host *host);
void fnic_log_q_error(struct fnic *fnic);
void fnic_handle_link_event(struct fnic *fnic);
-
+int fnic_stats_debugfs_init(struct fnic *fnic);
+void fnic_stats_debugfs_remove(struct fnic *fnic);
int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *);
void fnic_handle_fip_frame(struct work_struct *work);
+void fnic_reset_work_handler(struct work_struct *work);
void fnic_handle_fip_event(struct fnic *fnic);
void fnic_fcoe_reset_vlans(struct fnic *fnic);
-void fnic_fcoe_evlist_free(struct fnic *fnic);
-extern void fnic_handle_fip_timer(struct fnic *fnic);
+extern void fnic_handle_fip_timer(struct timer_list *t);
static inline int
fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
@@ -406,4 +542,90 @@ fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
}
void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long);
void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *);
+void fnic_free_txq(struct list_head *head);
+int fnic_get_desc_by_devid(struct pci_dev *pdev, char **desc,
+ char **subsys_desc);
+void fnic_fdls_link_status_change(struct fnic *fnic, int linkup);
+void fnic_delete_fcp_tports(struct fnic *fnic);
+void fnic_flush_tport_event_list(struct fnic *fnic);
+int fnic_count_ioreqs_wq(struct fnic *fnic, u32 hwq, u32 portid);
+unsigned int fnic_count_ioreqs(struct fnic *fnic, u32 portid);
+unsigned int fnic_count_all_ioreqs(struct fnic *fnic);
+unsigned int fnic_count_lun_ioreqs_wq(struct fnic *fnic, u32 hwq,
+ struct scsi_device *device);
+unsigned int fnic_count_lun_ioreqs(struct fnic *fnic,
+ struct scsi_device *device);
+void fnic_scsi_unload(struct fnic *fnic);
+void fnic_scsi_unload_cleanup(struct fnic *fnic);
+int fnic_get_debug_info(struct stats_debug_info *info,
+ struct fnic *fnic);
+
+struct fnic_scsi_iter_data {
+ struct fnic *fnic;
+ void *data1;
+ void *data2;
+ bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc,
+ void *data1, void *data2);
+};
+
+static inline bool
+fnic_io_iter_handler(struct scsi_cmnd *sc, void *iter_data)
+{
+ struct fnic_scsi_iter_data *iter = iter_data;
+
+ return iter->fn(iter->fnic, sc, iter->data1, iter->data2);
+}
+
+static inline void
+fnic_scsi_io_iter(struct fnic *fnic,
+ bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc,
+ void *data1, void *data2),
+ void *data1, void *data2)
+{
+ struct fnic_scsi_iter_data iter_data = {
+ .fn = fn,
+ .fnic = fnic,
+ .data1 = data1,
+ .data2 = data2,
+ };
+ scsi_host_busy_iter(fnic->host, fnic_io_iter_handler, &iter_data);
+}
+
+#ifdef FNIC_DEBUG
+static inline void
+fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i = i+8) {
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "%d: %02x %02x %02x %02x %02x %02x %02x %02x", i / 8,
+ u8arr[i + 0], u8arr[i + 1], u8arr[i + 2], u8arr[i + 3],
+ u8arr[i + 4], u8arr[i + 5], u8arr[i + 6], u8arr[i + 7]);
+ }
+}
+
+static inline void
+fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr,
+ int len, char *pfx)
+{
+ uint32_t s_id, d_id;
+
+ s_id = ntoh24(fchdr->fh_s_id);
+ d_id = ntoh24(fchdr->fh_d_id);
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "%s packet contents: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x (len = %d)\n",
+ pfx, s_id, d_id, fchdr->fh_type,
+ FNIC_STD_GET_OX_ID(fchdr), len);
+
+ fnic_debug_dump(fnic, (uint8_t *)fchdr, len);
+
+}
+#else /* FNIC_DEBUG */
+static inline void
+fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len) {}
+static inline void
+fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr,
+ uint32_t len, char *pfx) {}
+#endif /* FNIC_DEBUG */
#endif /* _FNIC_H_ */
diff --git a/drivers/scsi/fnic/fnic_attrs.c b/drivers/scsi/fnic/fnic_attrs.c
index 0c5e57c7e322..705718f0809b 100644
--- a/drivers/scsi/fnic/fnic_attrs.c
+++ b/drivers/scsi/fnic/fnic_attrs.c
@@ -11,8 +11,8 @@
static ssize_t fnic_show_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct fc_lport *lp = shost_priv(class_to_shost(dev));
- struct fnic *fnic = lport_priv(lp);
+ struct fnic *fnic =
+ *((struct fnic **) shost_priv(class_to_shost(dev)));
return sysfs_emit(buf, "%s\n", fnic_state_str[fnic->state]);
}
@@ -26,9 +26,13 @@ static ssize_t fnic_show_drv_version(struct device *dev,
static ssize_t fnic_show_link_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct fc_lport *lp = shost_priv(class_to_shost(dev));
+ struct fnic *fnic =
+ *((struct fnic **) shost_priv(class_to_shost(dev)));
- return sysfs_emit(buf, "%s\n", (lp->link_up) ? "Link Up" : "Link Down");
+ return sysfs_emit(buf, "%s\n",
+ ((fnic->iport.state != FNIC_IPORT_STATE_INIT) &&
+ (fnic->iport.state != FNIC_IPORT_STATE_LINK_WAIT)) ?
+ "Link Up" : "Link Down");
}
static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL);
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index 2619a2d4f5f1..5767862ae42f 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -7,6 +7,9 @@
#include <linux/vmalloc.h>
#include "fnic.h"
+extern int fnic_get_debug_info(struct stats_debug_info *debug_buffer,
+ struct fnic *fnic);
+
static struct dentry *fnic_trace_debugfs_root;
static struct dentry *fnic_trace_debugfs_file;
static struct dentry *fnic_trace_enable;
@@ -593,6 +596,7 @@ static int fnic_stats_debugfs_open(struct inode *inode,
debug->buf_size = buf_size;
memset((void *)debug->debug_buffer, 0, buf_size);
debug->buffer_len = fnic_get_stats_data(debug, fnic_stats);
+ debug->buffer_len += fnic_get_debug_info(debug, fnic);
file->private_data = debug;
@@ -673,26 +677,25 @@ static const struct file_operations fnic_reset_debugfs_fops = {
* It will create file stats and reset_stats under statistics/host# directory
* to log per fnic stats.
*/
-void fnic_stats_debugfs_init(struct fnic *fnic)
+int fnic_stats_debugfs_init(struct fnic *fnic)
{
char name[16];
- snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no);
+ snprintf(name, sizeof(name), "host%d", fnic->host->host_no);
fnic->fnic_stats_debugfs_host = debugfs_create_dir(name,
fnic_stats_debugfs_root);
-
fnic->fnic_stats_debugfs_file = debugfs_create_file("stats",
S_IFREG|S_IRUGO|S_IWUSR,
fnic->fnic_stats_debugfs_host,
fnic,
&fnic_stats_debugfs_fops);
-
fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats",
S_IFREG|S_IRUGO|S_IWUSR,
fnic->fnic_stats_debugfs_host,
fnic,
&fnic_reset_debugfs_fops);
+ return 0;
}
/*
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index a08293b2ad9f..1e8cd64f9a5c 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -14,701 +14,379 @@
#include <linux/workqueue.h>
#include <scsi/fc/fc_fip.h>
#include <scsi/fc/fc_els.h>
-#include <scsi/fc/fc_fcoe.h>
#include <scsi/fc_frame.h>
-#include <scsi/libfc.h>
+#include <linux/etherdevice.h>
+#include <scsi/scsi_transport_fc.h>
#include "fnic_io.h"
#include "fnic.h"
-#include "fnic_fip.h"
+#include "fnic_fdls.h"
+#include "fdls_fc.h"
#include "cq_enet_desc.h"
#include "cq_exch_desc.h"
+#include "fip.h"
+
+#define MAX_RESET_WAIT_COUNT 64
-static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
-struct workqueue_struct *fnic_fip_queue;
struct workqueue_struct *fnic_event_queue;
-static void fnic_set_eth_mode(struct fnic *);
-static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
-static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
-static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
-static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
-static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
+static uint8_t FCOE_ALL_FCF_MAC[6] = FC_FCOE_FLOGI_MAC;
-void fnic_handle_link(struct work_struct *work)
+/*
+ * Internal Functions
+ * This function will initialize the src_mac address to be
+ * used in outgoing frames
+ */
+static inline void fnic_fdls_set_fcoe_srcmac(struct fnic *fnic,
+ uint8_t *src_mac)
{
- struct fnic *fnic = container_of(work, struct fnic, link_work);
- unsigned long flags;
- int old_link_status;
- u32 old_link_down_cnt;
- u64 old_port_speed, new_port_speed;
-
- spin_lock_irqsave(&fnic->fnic_lock, flags);
-
- fnic->link_events = 1; /* less work to just set everytime*/
-
- if (fnic->stop_rx_link_events) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return;
- }
-
- old_link_down_cnt = fnic->link_down_cnt;
- old_link_status = fnic->link_status;
- old_port_speed = atomic64_read(
- &fnic->fnic_stats.misc_stats.current_port_speed);
-
- fnic->link_status = vnic_dev_link_status(fnic->vdev);
- fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
-
- new_port_speed = vnic_dev_port_speed(fnic->vdev);
- atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed,
- new_port_speed);
- if (old_port_speed != new_port_speed)
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "Current vnic speed set to: %llu\n",
- new_port_speed);
-
- switch (vnic_dev_port_speed(fnic->vdev)) {
- case DCEM_PORTSPEED_10G:
- fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT;
- fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT;
- break;
- case DCEM_PORTSPEED_20G:
- fc_host_speed(fnic->lport->host) = FC_PORTSPEED_20GBIT;
- fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT;
- break;
- case DCEM_PORTSPEED_25G:
- fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT;
- fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT;
- break;
- case DCEM_PORTSPEED_40G:
- case DCEM_PORTSPEED_4x10G:
- fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT;
- fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT;
- break;
- case DCEM_PORTSPEED_100G:
- fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT;
- fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT;
- break;
- default:
- fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN;
- fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
- break;
- }
-
- if (old_link_status == fnic->link_status) {
- if (!fnic->link_status) {
- /* DOWN -> DOWN */
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- fnic_fc_trace_set_data(fnic->lport->host->host_no,
- FNIC_FC_LE, "Link Status: DOWN->DOWN",
- strlen("Link Status: DOWN->DOWN"));
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "down->down\n");
- } else {
- if (old_link_down_cnt != fnic->link_down_cnt) {
- /* UP -> DOWN -> UP */
- fnic->lport->host_stats.link_failure_count++;
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- fnic_fc_trace_set_data(
- fnic->lport->host->host_no,
- FNIC_FC_LE,
- "Link Status:UP_DOWN_UP",
- strlen("Link_Status:UP_DOWN_UP")
- );
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "link down\n");
- fcoe_ctlr_link_down(&fnic->ctlr);
- if (fnic->config.flags & VFCF_FIP_CAPABLE) {
- /* start FCoE VLAN discovery */
- fnic_fc_trace_set_data(
- fnic->lport->host->host_no,
- FNIC_FC_LE,
- "Link Status: UP_DOWN_UP_VLAN",
- strlen(
- "Link Status: UP_DOWN_UP_VLAN")
- );
- fnic_fcoe_send_vlan_req(fnic);
- return;
- }
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "up->down->up: Link up\n");
- fcoe_ctlr_link_up(&fnic->ctlr);
- } else {
- /* UP -> UP */
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- fnic_fc_trace_set_data(
- fnic->lport->host->host_no, FNIC_FC_LE,
- "Link Status: UP_UP",
- strlen("Link Status: UP_UP"));
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "up->up\n");
- }
- }
- } else if (fnic->link_status) {
- /* DOWN -> UP */
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- if (fnic->config.flags & VFCF_FIP_CAPABLE) {
- /* start FCoE VLAN discovery */
- fnic_fc_trace_set_data(fnic->lport->host->host_no,
- FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
- strlen("Link Status: DOWN_UP_VLAN"));
- fnic_fcoe_send_vlan_req(fnic);
-
- return;
- }
-
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "down->up: Link up\n");
- fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
- "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
- fcoe_ctlr_link_up(&fnic->ctlr);
- } else {
- /* UP -> DOWN */
- fnic->lport->host_stats.link_failure_count++;
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "up->down: Link down\n");
- fnic_fc_trace_set_data(
- fnic->lport->host->host_no, FNIC_FC_LE,
- "Link Status: UP_DOWN",
- strlen("Link Status: UP_DOWN"));
- if (fnic->config.flags & VFCF_FIP_CAPABLE) {
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "deleting fip-timer during link-down\n");
- del_timer_sync(&fnic->fip_timer);
- }
- fcoe_ctlr_link_down(&fnic->ctlr);
- }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Setting src mac: %02x:%02x:%02x:%02x:%02x:%02x",
+ src_mac[0], src_mac[1], src_mac[2], src_mac[3],
+ src_mac[4], src_mac[5]);
+ memcpy(fnic->iport.fpma, src_mac, 6);
}
/*
- * This function passes incoming fabric frames to libFC
+ * This function will initialize the dst_mac address to be
+ * used in outgoing frames
*/
-void fnic_handle_frame(struct work_struct *work)
+static inline void fnic_fdls_set_fcoe_dstmac(struct fnic *fnic,
+ uint8_t *dst_mac)
{
- struct fnic *fnic = container_of(work, struct fnic, frame_work);
- struct fc_lport *lp = fnic->lport;
- unsigned long flags;
- struct sk_buff *skb;
- struct fc_frame *fp;
-
- while ((skb = skb_dequeue(&fnic->frame_queue))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Setting dst mac: %02x:%02x:%02x:%02x:%02x:%02x",
+ dst_mac[0], dst_mac[1], dst_mac[2], dst_mac[3],
+ dst_mac[4], dst_mac[5]);
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (fnic->stop_rx_link_events) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- dev_kfree_skb(skb);
- return;
- }
- fp = (struct fc_frame *)skb;
-
- /*
- * If we're in a transitional state, just re-queue and return.
- * The queue will be serviced when we get to a stable state.
- */
- if (fnic->state != FNIC_IN_FC_MODE &&
- fnic->state != FNIC_IN_ETH_MODE) {
- skb_queue_head(&fnic->frame_queue, skb);
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return;
- }
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-
- fc_exch_recv(lp, fp);
- }
+ memcpy(fnic->iport.fcfmac, dst_mac, 6);
}
-void fnic_fcoe_evlist_free(struct fnic *fnic)
+void fnic_get_host_port_state(struct Scsi_Host *shost)
{
- struct fnic_event *fevt = NULL;
- struct fnic_event *next = NULL;
+ struct fnic *fnic = *((struct fnic **) shost_priv(shost));
+ struct fnic_iport_s *iport = &fnic->iport;
unsigned long flags;
spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (list_empty(&fnic->evlist)) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return;
- }
-
- list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
- list_del(&fevt->list);
- kfree(fevt);
- }
+ if (!fnic->link_status)
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ else if (iport->state == FNIC_IPORT_STATE_READY)
+ fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ else
+ fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
-void fnic_handle_event(struct work_struct *work)
+void fnic_fdls_link_status_change(struct fnic *fnic, int linkup)
{
- struct fnic *fnic = container_of(work, struct fnic, event_work);
- struct fnic_event *fevt = NULL;
- struct fnic_event *next = NULL;
- unsigned long flags;
+ struct fnic_iport_s *iport = &fnic->iport;
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (list_empty(&fnic->evlist)) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return;
- }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "link up: %d, usefip: %d", linkup, iport->usefip);
- list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
- if (fnic->stop_rx_link_events) {
- list_del(&fevt->list);
- kfree(fevt);
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return;
- }
- /*
- * If we're in a transitional state, just re-queue and return.
- * The queue will be serviced when we get to a stable state.
- */
- if (fnic->state != FNIC_IN_FC_MODE &&
- fnic->state != FNIC_IN_ETH_MODE) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return;
- }
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
- list_del(&fevt->list);
- switch (fevt->event) {
- case FNIC_EVT_START_VLAN_DISC:
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ if (linkup) {
+ if (iport->usefip) {
+ iport->state = FNIC_IPORT_STATE_FIP;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "link up: %d, usefip: %d", linkup, iport->usefip);
fnic_fcoe_send_vlan_req(fnic);
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- break;
- case FNIC_EVT_START_FCF_DISC:
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "Start FCF Discovery\n");
- fnic_fcoe_start_fcf_disc(fnic);
- break;
- default:
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "Unknown event 0x%x\n", fevt->event);
- break;
+ } else {
+ iport->state = FNIC_IPORT_STATE_FABRIC_DISC;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport->state: %d", iport->state);
+ fnic_fdls_disc_start(iport);
}
- kfree(fevt);
+ } else {
+ iport->state = FNIC_IPORT_STATE_LINK_WAIT;
+ if (!is_zero_ether_addr(iport->fpma))
+ vnic_dev_del_addr(fnic->vdev, iport->fpma);
+ fnic_common_fip_cleanup(fnic);
+ fnic_fdls_link_down(iport);
+
}
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
}
-/**
- * is_fnic_fip_flogi_reject() - Check if the Received FIP FLOGI frame is rejected
- * @fip: The FCoE controller that received the frame
- * @skb: The received FIP frame
- *
- * Returns non-zero if the frame is rejected with unsupported cmd with
- * insufficient resource els explanation.
+
+/*
+ * FPMA can be either taken from ethhdr(dst_mac) or flogi resp
+ * or derive from FC_MAP and FCID combination. While it should be
+ * same, revisit this if there is any possibility of not-correct.
*/
-static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
- struct sk_buff *skb)
+void fnic_fdls_learn_fcoe_macs(struct fnic_iport_s *iport, void *rx_frame,
+ uint8_t *fcid)
{
- struct fc_lport *lport = fip->lp;
- struct fip_header *fiph;
- struct fc_frame_header *fh = NULL;
- struct fip_desc *desc;
- struct fip_encaps *els;
- u16 op;
- u8 els_op;
- u8 sub;
-
- size_t rlen;
- size_t dlen = 0;
-
- if (skb_linearize(skb))
- return 0;
+ struct fnic *fnic = iport->fnic;
+ struct ethhdr *ethhdr = (struct ethhdr *) rx_frame;
+ uint8_t fcmac[6] = { 0x0E, 0xFC, 0x00, 0x00, 0x00, 0x00 };
- if (skb->len < sizeof(*fiph))
- return 0;
+ memcpy(&fcmac[3], fcid, 3);
- fiph = (struct fip_header *)skb->data;
- op = ntohs(fiph->fip_op);
- sub = fiph->fip_subcode;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "learn fcoe: dst_mac: %02x:%02x:%02x:%02x:%02x:%02x",
+ ethhdr->h_dest[0], ethhdr->h_dest[1],
+ ethhdr->h_dest[2], ethhdr->h_dest[3],
+ ethhdr->h_dest[4], ethhdr->h_dest[5]);
- if (op != FIP_OP_LS)
- return 0;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "learn fcoe: fc_mac: %02x:%02x:%02x:%02x:%02x:%02x",
+ fcmac[0], fcmac[1], fcmac[2], fcmac[3], fcmac[4],
+ fcmac[5]);
- if (sub != FIP_SC_REP)
- return 0;
-
- rlen = ntohs(fiph->fip_dl_len) * 4;
- if (rlen + sizeof(*fiph) > skb->len)
- return 0;
-
- desc = (struct fip_desc *)(fiph + 1);
- dlen = desc->fip_dlen * FIP_BPW;
+ fnic_fdls_set_fcoe_srcmac(fnic, fcmac);
+ fnic_fdls_set_fcoe_dstmac(fnic, ethhdr->h_source);
+}
- if (desc->fip_dtype == FIP_DT_FLOGI) {
+void fnic_fdls_init(struct fnic *fnic, int usefip)
+{
+ struct fnic_iport_s *iport = &fnic->iport;
- if (dlen < sizeof(*els) + sizeof(*fh) + 1)
- return 0;
+ /* Initialize iPort structure */
+ iport->state = FNIC_IPORT_STATE_INIT;
+ iport->fnic = fnic;
+ iport->usefip = usefip;
- els = (struct fip_encaps *)desc;
- fh = (struct fc_frame_header *)(els + 1);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iportsrcmac: %02x:%02x:%02x:%02x:%02x:%02x",
+ iport->hwmac[0], iport->hwmac[1], iport->hwmac[2],
+ iport->hwmac[3], iport->hwmac[4], iport->hwmac[5]);
- if (!fh)
- return 0;
+ INIT_LIST_HEAD(&iport->tport_list);
+ INIT_LIST_HEAD(&iport->tport_list_pending_del);
- /*
- * ELS command code, reason and explanation should be = Reject,
- * unsupported command and insufficient resource
- */
- els_op = *(u8 *)(fh + 1);
- if (els_op == ELS_LS_RJT) {
- shost_printk(KERN_INFO, lport->host,
- "Flogi Request Rejected by Switch\n");
- return 1;
- }
- shost_printk(KERN_INFO, lport->host,
- "Flogi Request Accepted by Switch\n");
- }
- return 0;
+ fnic_fdls_disc_init(iport);
}
-static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
+void fnic_handle_link(struct work_struct *work)
{
- struct fcoe_ctlr *fip = &fnic->ctlr;
- struct fnic_stats *fnic_stats = &fnic->fnic_stats;
- struct sk_buff *skb;
- char *eth_fr;
- struct fip_vlan *vlan;
- u64 vlan_tov;
+ struct fnic *fnic = container_of(work, struct fnic, link_work);
+ int old_link_status;
+ u32 old_link_down_cnt;
+ int max_count = 0;
- fnic_fcoe_reset_vlans(fnic);
- fnic->set_vlan(fnic, 0);
+ if (vnic_dev_get_intr_mode(fnic->vdev) != VNIC_DEV_INTR_MODE_MSI)
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Interrupt mode is not MSI\n");
- if (printk_ratelimit())
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "Sending VLAN request...\n");
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
- skb = dev_alloc_skb(sizeof(struct fip_vlan));
- if (!skb)
+ if (fnic->stop_rx_link_events) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Stop link rx events\n");
return;
-
- eth_fr = (char *)skb->data;
- vlan = (struct fip_vlan *)eth_fr;
-
- memset(vlan, 0, sizeof(*vlan));
- memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
- memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
- vlan->eth.h_proto = htons(ETH_P_FIP);
-
- vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
- vlan->fip.fip_op = htons(FIP_OP_VLAN);
- vlan->fip.fip_subcode = FIP_SC_VL_REQ;
- vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
-
- vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
- vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
- memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
-
- vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
- vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
- put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
- atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
-
- skb_put(skb, sizeof(*vlan));
- skb->protocol = htons(ETH_P_FIP);
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- fip->send(fip, skb);
-
- /* set a timer so that we can retry if there no response */
- vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
- mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
-}
-
-static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
-{
- struct fcoe_ctlr *fip = &fnic->ctlr;
- struct fip_header *fiph;
- struct fip_desc *desc;
- struct fnic_stats *fnic_stats = &fnic->fnic_stats;
- u16 vid;
- size_t rlen;
- size_t dlen;
- struct fcoe_vlan *vlan;
- u64 sol_time;
- unsigned long flags;
-
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "Received VLAN response...\n");
-
- fiph = (struct fip_header *) skb->data;
-
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
- ntohs(fiph->fip_op), fiph->fip_subcode);
-
- rlen = ntohs(fiph->fip_dl_len) * 4;
- fnic_fcoe_reset_vlans(fnic);
- spin_lock_irqsave(&fnic->vlans_lock, flags);
- desc = (struct fip_desc *)(fiph + 1);
- while (rlen > 0) {
- dlen = desc->fip_dlen * FIP_BPW;
- switch (desc->fip_dtype) {
- case FIP_DT_VLAN:
- vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
- shost_printk(KERN_INFO, fnic->lport->host,
- "process_vlan_resp: FIP VLAN %d\n", vid);
- vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
- if (!vlan) {
- /* retry from timer */
- spin_unlock_irqrestore(&fnic->vlans_lock,
- flags);
- goto out;
- }
- vlan->vid = vid & 0x0fff;
- vlan->state = FIP_VLAN_AVAIL;
- list_add_tail(&vlan->list, &fnic->vlans);
- break;
- }
- desc = (struct fip_desc *)((char *)desc + dlen);
- rlen -= dlen;
}
- /* any VLAN descriptors present ? */
- if (list_empty(&fnic->vlans)) {
- /* retry from timer */
- atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "No VLAN descriptors in FIP VLAN response\n");
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- goto out;
+ /* Do not process if the fnic is already in transitional state */
+ if ((fnic->state != FNIC_IN_ETH_MODE)
+ && (fnic->state != FNIC_IN_FC_MODE)) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic in transitional state: %d. link up: %d ignored",
+ fnic->state, vnic_dev_link_status(fnic->vdev));
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Current link status: %d iport state: %d\n",
+ fnic->link_status, fnic->iport.state);
+ return;
}
- vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
- fnic->set_vlan(fnic, vlan->vid);
- vlan->state = FIP_VLAN_SENT; /* sent now */
- vlan->sol_count++;
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
-
- /* start the solicitation */
- fcoe_ctlr_link_up(fip);
-
- sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
- mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
-out:
- return;
-}
-
-static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
-{
- unsigned long flags;
- struct fcoe_vlan *vlan;
- u64 sol_time;
-
- spin_lock_irqsave(&fnic->vlans_lock, flags);
- vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
- fnic->set_vlan(fnic, vlan->vid);
- vlan->state = FIP_VLAN_SENT; /* sent now */
- vlan->sol_count = 1;
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
-
- /* start the solicitation */
- fcoe_ctlr_link_up(&fnic->ctlr);
-
- sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
- mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
-}
-
-static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
-{
- unsigned long flags;
- struct fcoe_vlan *fvlan;
+ old_link_down_cnt = fnic->link_down_cnt;
+ old_link_status = fnic->link_status;
+ fnic->link_status = vnic_dev_link_status(fnic->vdev);
+ fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
- spin_lock_irqsave(&fnic->vlans_lock, flags);
- if (list_empty(&fnic->vlans)) {
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- return -EINVAL;
+ while (fnic->reset_in_progress == IN_PROGRESS) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic reset in progress. Link event needs to wait\n");
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "waiting for reset completion\n");
+ wait_for_completion_timeout(&fnic->reset_completion_wait,
+ msecs_to_jiffies(5000));
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "woken up from reset completion wait\n");
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+
+ max_count++;
+ if (max_count >= MAX_RESET_WAIT_COUNT) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Rstth waited for too long. Skipping handle link event\n");
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ return;
+ }
}
-
- fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
- if (fvlan->state == FIP_VLAN_USED) {
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- return 0;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Marking fnic reset in progress\n");
+ fnic->reset_in_progress = IN_PROGRESS;
+
+ if ((vnic_dev_get_intr_mode(fnic->vdev) != VNIC_DEV_INTR_MODE_MSI) ||
+ (fnic->link_status != old_link_status)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "old link status: %d link status: %d\n",
+ old_link_status, (int) fnic->link_status);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "old down count %d down count: %d\n",
+ old_link_down_cnt, (int) fnic->link_down_cnt);
}
- if (fvlan->state == FIP_VLAN_SENT) {
- fvlan->state = FIP_VLAN_USED;
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- return 0;
+ if (old_link_status == fnic->link_status) {
+ if (!fnic->link_status) {
+ /* DOWN -> DOWN */
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "down->down\n");
+ } else {
+ if (old_link_down_cnt != fnic->link_down_cnt) {
+ /* UP -> DOWN -> UP */
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "up->down. Link down\n");
+ fnic_fdls_link_status_change(fnic, 0);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "down->up. Link up\n");
+ fnic_fdls_link_status_change(fnic, 1);
+ } else {
+ /* UP -> UP */
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "up->up\n");
+ }
+ }
+ } else if (fnic->link_status) {
+ /* DOWN -> UP */
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "down->up. Link up\n");
+ fnic_fdls_link_status_change(fnic, 1);
+ } else {
+ /* UP -> DOWN */
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "up->down. Link down\n");
+ fnic_fdls_link_status_change(fnic, 0);
}
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- return -EINVAL;
-}
-static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
-{
- struct fnic_event *fevt;
- unsigned long flags;
-
- fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
- if (!fevt)
- return;
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ fnic->reset_in_progress = NOT_IN_PROGRESS;
+ complete(&fnic->reset_completion_wait);
- fevt->fnic = fnic;
- fevt->event = ev;
-
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- list_add_tail(&fevt->list, &fnic->evlist);
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-
- schedule_work(&fnic->event_work);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Marking fnic reset completion\n");
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
}
-static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
+void fnic_handle_frame(struct work_struct *work)
{
- struct fip_header *fiph;
- int ret = 1;
- u16 op;
- u8 sub;
+ struct fnic *fnic = container_of(work, struct fnic, frame_work);
+ struct fnic_frame_list *cur_frame, *next;
+ int fchdr_offset = 0;
- if (!skb || !(skb->data))
- return -1;
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ list_for_each_entry_safe(cur_frame, next, &fnic->frame_queue, links) {
+ if (fnic->stop_rx_link_events) {
+ list_del(&cur_frame->links);
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ kfree(cur_frame->fp);
+ mempool_free(cur_frame, fnic->frame_elem_pool);
+ return;
+ }
- if (skb_linearize(skb))
- goto drop;
+ /*
+ * If we're in a transitional state, just re-queue and return.
+ * The queue will be serviced when we get to a stable state.
+ */
+ if (fnic->state != FNIC_IN_FC_MODE &&
+ fnic->state != FNIC_IN_ETH_MODE) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Cannot process frame in transitional state\n");
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ return;
+ }
- fiph = (struct fip_header *)skb->data;
- op = ntohs(fiph->fip_op);
- sub = fiph->fip_subcode;
+ list_del(&cur_frame->links);
- if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
- goto drop;
+ /* Frames from FCP_RQ will have ethhdrs stripped off */
+ fchdr_offset = (cur_frame->rx_ethhdr_stripped) ?
+ 0 : FNIC_ETH_FCOE_HDRS_OFFSET;
- if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
- goto drop;
+ fnic_fdls_recv_frame(&fnic->iport, cur_frame->fp,
+ cur_frame->frame_len, fchdr_offset);
- if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
- if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
- goto drop;
- /* pass it on to fcoe */
- ret = 1;
- } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) {
- /* set the vlan as used */
- fnic_fcoe_process_vlan_resp(fnic, skb);
- ret = 0;
- } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
- /* received CVL request, restart vlan disc */
- fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
- /* pass it on to fcoe */
- ret = 1;
+ kfree(cur_frame->fp);
+ mempool_free(cur_frame, fnic->frame_elem_pool);
}
-drop:
- return ret;
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
}
void fnic_handle_fip_frame(struct work_struct *work)
{
+ struct fnic_frame_list *cur_frame, *next;
struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
- struct fnic_stats *fnic_stats = &fnic->fnic_stats;
- unsigned long flags;
- struct sk_buff *skb;
- struct ethhdr *eh;
- while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
- spin_lock_irqsave(&fnic->fnic_lock, flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Processing FIP frame\n");
+
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ list_for_each_entry_safe(cur_frame, next, &fnic->fip_frame_queue,
+ links) {
if (fnic->stop_rx_link_events) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- dev_kfree_skb(skb);
+ list_del(&cur_frame->links);
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ kfree(cur_frame->fp);
+ kfree(cur_frame);
return;
}
+
/*
* If we're in a transitional state, just re-queue and return.
* The queue will be serviced when we get to a stable state.
*/
if (fnic->state != FNIC_IN_FC_MODE &&
- fnic->state != FNIC_IN_ETH_MODE) {
- skb_queue_head(&fnic->fip_frame_queue, skb);
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ fnic->state != FNIC_IN_ETH_MODE) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
return;
}
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- eh = (struct ethhdr *)skb->data;
- if (eh->h_proto == htons(ETH_P_FIP)) {
- skb_pull(skb, sizeof(*eh));
- if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
- dev_kfree_skb(skb);
- continue;
- }
- /*
- * If there's FLOGI rejects - clear all
- * fcf's & restart from scratch
- */
- if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
- atomic64_inc(
- &fnic_stats->vlan_stats.flogi_rejects);
- shost_printk(KERN_INFO, fnic->lport->host,
- "Trigger a Link down - VLAN Disc\n");
- fcoe_ctlr_link_down(&fnic->ctlr);
- /* start FCoE VLAN discovery */
- fnic_fcoe_send_vlan_req(fnic);
- dev_kfree_skb(skb);
- continue;
- }
- fcoe_ctlr_recv(&fnic->ctlr, skb);
- continue;
+
+ list_del(&cur_frame->links);
+
+ if (fdls_fip_recv_frame(fnic, cur_frame->fp)) {
+ kfree(cur_frame->fp);
+ kfree(cur_frame);
}
}
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
}
/**
* fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
* @fnic: fnic instance.
- * @skb: Ethernet Frame.
+ * @fp: Ethernet Frame.
*/
-static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
+static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, void *fp)
{
- struct fc_frame *fp;
struct ethhdr *eh;
- struct fcoe_hdr *fcoe_hdr;
- struct fcoe_crc_eof *ft;
+ struct fnic_frame_list *fip_fr_elem;
+ unsigned long flags;
- /*
- * Undo VLAN encapsulation if present.
- */
- eh = (struct ethhdr *)skb->data;
- if (eh->h_proto == htons(ETH_P_8021Q)) {
- memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
- eh = skb_pull(skb, VLAN_HLEN);
- skb_reset_mac_header(skb);
- }
- if (eh->h_proto == htons(ETH_P_FIP)) {
- if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
- printk(KERN_ERR "Dropped FIP frame, as firmware "
- "uses non-FIP mode, Enable FIP "
- "using UCSM\n");
- goto drop;
- }
- if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
- FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) {
- printk(KERN_ERR "fnic ctlr frame trace error!!!");
- }
- skb_queue_tail(&fnic->fip_frame_queue, skb);
+ eh = (struct ethhdr *) fp;
+ if ((eh->h_proto == cpu_to_be16(ETH_P_FIP)) && (fnic->iport.usefip)) {
+ fip_fr_elem = (struct fnic_frame_list *)
+ kzalloc(sizeof(struct fnic_frame_list), GFP_ATOMIC);
+ if (!fip_fr_elem)
+ return 0;
+ fip_fr_elem->fp = fp;
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ list_add_tail(&fip_fr_elem->links, &fnic->fip_frame_queue);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
queue_work(fnic_fip_queue, &fnic->fip_frame_work);
- return 1; /* let caller know packet was used */
- }
- if (eh->h_proto != htons(ETH_P_FCOE))
- goto drop;
- skb_set_network_header(skb, sizeof(*eh));
- skb_pull(skb, sizeof(*eh));
-
- fcoe_hdr = (struct fcoe_hdr *)skb->data;
- if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
- goto drop;
-
- fp = (struct fc_frame *)skb;
- fc_frame_init(fp);
- fr_sof(fp) = fcoe_hdr->fcoe_sof;
- skb_pull(skb, sizeof(struct fcoe_hdr));
- skb_reset_transport_header(skb);
-
- ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
- fr_eof(fp) = ft->fcoe_eof;
- skb_trim(skb, skb->len - sizeof(*ft));
- return 0;
-drop:
- dev_kfree_skb_irq(skb);
- return -1;
+ return 1; /* let caller know packet was used */
+ } else
+ return 0;
}
/**
@@ -720,206 +398,147 @@ drop:
*/
void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
{
- u8 *ctl = fnic->ctlr.ctl_src_addr;
+ struct fnic_iport_s *iport = &fnic->iport;
+ u8 *ctl = iport->hwmac;
u8 *data = fnic->data_src_addr;
if (is_zero_ether_addr(new))
new = ctl;
if (ether_addr_equal(data, new))
return;
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "update_mac %pM\n", new);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Update MAC: %u\n", *new);
+
if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
vnic_dev_del_addr(fnic->vdev, data);
+
memcpy(data, new, ETH_ALEN);
if (!ether_addr_equal(new, ctl))
vnic_dev_add_addr(fnic->vdev, new);
}
-/**
- * fnic_update_mac() - set data MAC address and filters.
- * @lport: local port.
- * @new: newly-assigned FCoE MAC address.
- */
-void fnic_update_mac(struct fc_lport *lport, u8 *new)
-{
- struct fnic *fnic = lport_priv(lport);
-
- spin_lock_irq(&fnic->fnic_lock);
- fnic_update_mac_locked(fnic, new);
- spin_unlock_irq(&fnic->fnic_lock);
-}
-
-/**
- * fnic_set_port_id() - set the port_ID after successful FLOGI.
- * @lport: local port.
- * @port_id: assigned FC_ID.
- * @fp: received frame containing the FLOGI accept or NULL.
- *
- * This is called from libfc when a new FC_ID has been assigned.
- * This causes us to reset the firmware to FC_MODE and setup the new MAC
- * address and FC_ID.
- *
- * It is also called with FC_ID 0 when we're logged off.
- *
- * If the FC_ID is due to point-to-point, fp may be NULL.
- */
-void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
-{
- struct fnic *fnic = lport_priv(lport);
- u8 *mac;
- int ret;
-
- FNIC_FCS_DBG(KERN_DEBUG, lport->host, fnic->fnic_num,
- "set port_id 0x%x fp 0x%p\n",
- port_id, fp);
-
- /*
- * If we're clearing the FC_ID, change to use the ctl_src_addr.
- * Set ethernet mode to send FLOGI.
- */
- if (!port_id) {
- fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
- fnic_set_eth_mode(fnic);
- return;
- }
-
- if (fp) {
- mac = fr_cb(fp)->granted_mac;
- if (is_zero_ether_addr(mac)) {
- /* non-FIP - FLOGI already accepted - ignore return */
- fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
- }
- fnic_update_mac(lport, mac);
- }
-
- /* Change state to reflect transition to FC mode */
- spin_lock_irq(&fnic->fnic_lock);
- if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
- fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
- else {
- FNIC_FCS_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
- "Unexpected fnic state: %s processing FLOGI response",
- fnic_state_to_str(fnic->state));
- spin_unlock_irq(&fnic->fnic_lock);
- return;
- }
- spin_unlock_irq(&fnic->fnic_lock);
-
- /*
- * Send FLOGI registration to firmware to set up FC mode.
- * The new address will be set up when registration completes.
- */
- ret = fnic_flogi_reg_handler(fnic, port_id);
-
- if (ret < 0) {
- spin_lock_irq(&fnic->fnic_lock);
- if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
- fnic->state = FNIC_IN_ETH_MODE;
- spin_unlock_irq(&fnic->fnic_lock);
- }
-}
-
static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
*cq_desc, struct vnic_rq_buf *buf,
int skipped __attribute__((unused)),
void *opaque)
{
struct fnic *fnic = vnic_dev_priv(rq->vdev);
- struct sk_buff *skb;
- struct fc_frame *fp;
+ uint8_t *fp;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ unsigned int ethhdr_stripped;
u8 type, color, eop, sop, ingress_port, vlan_stripped;
- u8 fcoe = 0, fcoe_sof, fcoe_eof;
- u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
- u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
- u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
+ u8 fcoe_fnic_crc_ok = 1, fcoe_enc_error = 0;
u8 fcs_ok = 1, packet_error = 0;
- u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
+ u16 q_number, completed_index, vlan;
u32 rss_hash;
+ u16 checksum;
+ u8 csum_not_calc, rss_type, ipv4, ipv6, ipv4_fragment;
+ u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
+ u8 fcoe = 0, fcoe_sof, fcoe_eof;
u16 exchange_id, tmpl;
u8 sof = 0;
u8 eof = 0;
u32 fcp_bytes_written = 0;
+ u16 enet_bytes_written = 0;
+ u32 bytes_written = 0;
unsigned long flags;
+ struct fnic_frame_list *frame_elem = NULL;
+ struct ethhdr *eh;
dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- skb = buf->os_buf;
- fp = (struct fc_frame *)skb;
+ DMA_FROM_DEVICE);
+ fp = (uint8_t *) buf->os_buf;
buf->os_buf = NULL;
cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
if (type == CQ_DESC_TYPE_RQ_FCP) {
- cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
- &type, &color, &q_number, &completed_index,
- &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
- &tmpl, &fcp_bytes_written, &sof, &eof,
- &ingress_port, &packet_error,
- &fcoe_enc_error, &fcs_ok, &vlan_stripped,
- &vlan);
- skb_trim(skb, fcp_bytes_written);
- fr_sof(fp) = sof;
- fr_eof(fp) = eof;
-
+ cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *) cq_desc, &type,
+ &color, &q_number, &completed_index, &eop, &sop,
+ &fcoe_fnic_crc_ok, &exchange_id, &tmpl,
+ &fcp_bytes_written, &sof, &eof, &ingress_port,
+ &packet_error, &fcoe_enc_error, &fcs_ok,
+ &vlan_stripped, &vlan);
+ ethhdr_stripped = 1;
+ bytes_written = fcp_bytes_written;
} else if (type == CQ_DESC_TYPE_RQ_ENET) {
- cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
- &type, &color, &q_number, &completed_index,
- &ingress_port, &fcoe, &eop, &sop,
- &rss_type, &csum_not_calc, &rss_hash,
- &bytes_written, &packet_error,
- &vlan_stripped, &vlan, &checksum,
- &fcoe_sof, &fcoe_fc_crc_ok,
- &fcoe_enc_error, &fcoe_eof,
- &tcp_udp_csum_ok, &udp, &tcp,
- &ipv4_csum_ok, &ipv6, &ipv4,
- &ipv4_fragment, &fcs_ok);
- skb_trim(skb, bytes_written);
+ cq_enet_rq_desc_dec((struct cq_enet_rq_desc *) cq_desc, &type,
+ &color, &q_number, &completed_index,
+ &ingress_port, &fcoe, &eop, &sop, &rss_type,
+ &csum_not_calc, &rss_hash, &enet_bytes_written,
+ &packet_error, &vlan_stripped, &vlan,
+ &checksum, &fcoe_sof, &fcoe_fnic_crc_ok,
+ &fcoe_enc_error, &fcoe_eof, &tcp_udp_csum_ok,
+ &udp, &tcp, &ipv4_csum_ok, &ipv6, &ipv4,
+ &ipv4_fragment, &fcs_ok);
+
+ ethhdr_stripped = 0;
+ bytes_written = enet_bytes_written;
+
if (!fcs_ok) {
atomic64_inc(&fnic_stats->misc_stats.frame_errors);
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "fcs error. dropping packet.\n");
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic 0x%p fcs error. Dropping packet.\n", fnic);
goto drop;
}
- if (fnic_import_rq_eth_pkt(fnic, skb))
- return;
+ eh = (struct ethhdr *) fp;
+ if (eh->h_proto != cpu_to_be16(ETH_P_FCOE)) {
+ if (fnic_import_rq_eth_pkt(fnic, fp))
+ return;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping h_proto 0x%x",
+ be16_to_cpu(eh->h_proto));
+ goto drop;
+ }
} else {
- /* wrong CQ type*/
- shost_printk(KERN_ERR, fnic->lport->host,
- "fnic rq_cmpl wrong cq type x%x\n", type);
+ /* wrong CQ type */
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic rq_cmpl wrong cq type x%x\n", type);
goto drop;
}
- if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
+ if (!fcs_ok || packet_error || !fcoe_fnic_crc_ok || fcoe_enc_error) {
atomic64_inc(&fnic_stats->misc_stats.frame_errors);
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "fnic rq_cmpl fcoe x%x fcsok x%x"
- " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
- " x%x\n",
- fcoe, fcs_ok, packet_error,
- fcoe_fc_crc_ok, fcoe_enc_error);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fcoe %x fcsok %x pkterr %x ffco %x fee %x\n",
+ fcoe, fcs_ok, packet_error,
+ fcoe_fnic_crc_ok, fcoe_enc_error);
goto drop;
}
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->stop_rx_link_events) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic->stop_rx_link_events: %d\n",
+ fnic->stop_rx_link_events);
goto drop;
}
- fr_dev(fp) = fnic->lport;
+
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV,
- (char *)skb->data, skb->len)) != 0) {
- printk(KERN_ERR "fnic ctlr frame trace error!!!");
+
+ frame_elem = mempool_alloc(fnic->frame_elem_pool,
+ GFP_ATOMIC | __GFP_ZERO);
+ if (!frame_elem) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Failed to allocate memory for frame elem");
+ goto drop;
}
+ frame_elem->fp = fp;
+ frame_elem->rx_ethhdr_stripped = ethhdr_stripped;
+ frame_elem->frame_len = bytes_written;
- skb_queue_tail(&fnic->frame_queue, skb);
- queue_work(fnic_event_queue, &fnic->frame_work);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ list_add_tail(&frame_elem->links, &fnic->frame_queue);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ queue_work(fnic_event_queue, &fnic->frame_work);
return;
+
drop:
- dev_kfree_skb_irq(skb);
+ kfree(fp);
}
static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
@@ -945,10 +564,10 @@ int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
fnic_rq_cmpl_handler_cont,
NULL);
- if (cur_work_done) {
+ if (cur_work_done && fnic->stop_rx_link_events != 1) {
err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
if (err)
- shost_printk(KERN_ERR, fnic->lport->host,
+ shost_printk(KERN_ERR, fnic->host,
"fnic_alloc_rq_frame can't alloc"
" frame\n");
}
@@ -966,218 +585,179 @@ int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
int fnic_alloc_rq_frame(struct vnic_rq *rq)
{
struct fnic *fnic = vnic_dev_priv(rq->vdev);
- struct sk_buff *skb;
+ void *buf;
u16 len;
dma_addr_t pa;
- int r;
+ int ret;
- len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
- skb = dev_alloc_skb(len);
- if (!skb) {
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "Unable to allocate RQ sk_buff\n");
+ len = FNIC_FRAME_HT_ROOM;
+ buf = kmalloc(len, GFP_ATOMIC);
+ if (!buf) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Unable to allocate RQ buffer of size: %d\n", len);
return -ENOMEM;
}
- skb_reset_mac_header(skb);
- skb_reset_transport_header(skb);
- skb_reset_network_header(skb);
- skb_put(skb, len);
- pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE);
+
+ pa = dma_map_single(&fnic->pdev->dev, buf, len, DMA_FROM_DEVICE);
if (dma_mapping_error(&fnic->pdev->dev, pa)) {
- r = -ENOMEM;
- printk(KERN_ERR "PCI mapping failed with error %d\n", r);
- goto free_skb;
+ ret = -ENOMEM;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PCI mapping failed with error %d\n", ret);
+ goto free_buf;
}
- fnic_queue_rq_desc(rq, skb, pa, len);
+ fnic_queue_rq_desc(rq, buf, pa, len);
return 0;
-
-free_skb:
- kfree_skb(skb);
- return r;
+free_buf:
+ kfree(buf);
+ return ret;
}
void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
{
- struct fc_frame *fp = buf->os_buf;
+ void *rq_buf = buf->os_buf;
struct fnic *fnic = vnic_dev_priv(rq->vdev);
dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
DMA_FROM_DEVICE);
- dev_kfree_skb(fp_skb(fp));
+ kfree(rq_buf);
buf->os_buf = NULL;
}
-/**
- * fnic_eth_send() - Send Ethernet frame.
- * @fip: fcoe_ctlr instance.
- * @skb: Ethernet Frame, FIP, without VLAN encapsulation.
- */
-void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
-{
- struct fnic *fnic = fnic_from_ctlr(fip);
- struct vnic_wq *wq = &fnic->wq[0];
- dma_addr_t pa;
- struct ethhdr *eth_hdr;
- struct vlan_ethhdr *vlan_hdr;
- unsigned long flags;
-
- if (!fnic->vlan_hw_insert) {
- eth_hdr = (struct ethhdr *)skb_mac_header(skb);
- vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr));
- memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
- vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
- vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
- vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
- if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
- FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) {
- printk(KERN_ERR "fnic ctlr frame trace error!!!");
- }
- } else {
- if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
- FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) {
- printk(KERN_ERR "fnic ctlr frame trace error!!!");
- }
- }
-
- pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&fnic->pdev->dev, pa)) {
- printk(KERN_ERR "DMA mapping failed\n");
- goto free_skb;
- }
-
- spin_lock_irqsave(&fnic->wq_lock[0], flags);
- if (!vnic_wq_desc_avail(wq))
- goto irq_restore;
-
- fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
- 0 /* hw inserts cos value */,
- fnic->vlan_id, 1);
- spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
- return;
-
-irq_restore:
- spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
- dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE);
-free_skb:
- kfree_skb(skb);
-}
-
/*
* Send FC frame.
*/
-static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
+static int fnic_send_frame(struct fnic *fnic, void *frame, int frame_len)
{
struct vnic_wq *wq = &fnic->wq[0];
- struct sk_buff *skb;
dma_addr_t pa;
- struct ethhdr *eth_hdr;
- struct vlan_ethhdr *vlan_hdr;
- struct fcoe_hdr *fcoe_hdr;
- struct fc_frame_header *fh;
- u32 tot_len, eth_hdr_len;
int ret = 0;
unsigned long flags;
- fh = fc_frame_header_get(fp);
- skb = fp_skb(fp);
+ pa = dma_map_single(&fnic->pdev->dev, frame, frame_len, DMA_TO_DEVICE);
- if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
- fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
- return 0;
-
- if (!fnic->vlan_hw_insert) {
- eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
- vlan_hdr = skb_push(skb, eth_hdr_len);
- eth_hdr = (struct ethhdr *)vlan_hdr;
- vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
- vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
- vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
- fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
- } else {
- eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
- eth_hdr = skb_push(skb, eth_hdr_len);
- eth_hdr->h_proto = htons(ETH_P_FCOE);
- fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
- }
-
- if (fnic->ctlr.map_dest)
- fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
- else
- memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
- memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
-
- tot_len = skb->len;
- BUG_ON(tot_len % 4);
-
- memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
- fcoe_hdr->fcoe_sof = fr_sof(fp);
- if (FC_FCOE_VER)
- FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
-
- pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE);
- if (dma_mapping_error(&fnic->pdev->dev, pa)) {
- ret = -ENOMEM;
- printk(KERN_ERR "DMA map failed with error %d\n", ret);
- goto free_skb_on_err;
- }
-
- if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
- (char *)eth_hdr, tot_len)) != 0) {
- printk(KERN_ERR "fnic ctlr frame trace error!!!");
+ if ((fnic_fc_trace_set_data(fnic->fnic_num,
+ FNIC_FC_SEND | 0x80, (char *) frame,
+ frame_len)) != 0) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic ctlr frame trace error");
}
spin_lock_irqsave(&fnic->wq_lock[0], flags);
if (!vnic_wq_desc_avail(wq)) {
- dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE);
+ dma_unmap_single(&fnic->pdev->dev, pa, frame_len, DMA_TO_DEVICE);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "vnic work queue descriptor is not available");
ret = -1;
- goto irq_restore;
+ goto fnic_send_frame_end;
}
- fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
- 0 /* hw inserts cos value */,
- fnic->vlan_id, 1, 1, 1);
+ /* hw inserts cos value */
+ fnic_queue_wq_desc(wq, frame, pa, frame_len, FC_EOF_T,
+ 0, fnic->vlan_id, 1, 1, 1);
-irq_restore:
+fnic_send_frame_end:
spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
-
-free_skb_on_err:
- if (ret)
- dev_kfree_skb_any(fp_skb(fp));
-
return ret;
}
-/*
- * fnic_send
- * Routine to send a raw frame
+/**
+ * fdls_send_fcoe_frame - send a filled-in FC frame, filling in eth and FCoE
+ * info. This interface is used only in the non fast path. (login, fabric
+ * registrations etc.)
+ *
+ * @fnic: fnic instance
+ * @frame: frame structure with FC payload filled in
+ * @frame_size: length of the frame to be sent
+ * @srcmac: source mac address
+ * @dstmac: destination mac address
+ *
+ * Called with the fnic lock held.
*/
-int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
+static int
+fdls_send_fcoe_frame(struct fnic *fnic, void *frame, int frame_size,
+ uint8_t *srcmac, uint8_t *dstmac)
{
- struct fnic *fnic = lport_priv(lp);
- unsigned long flags;
+ struct ethhdr *pethhdr;
+ struct fcoe_hdr *pfcoe_hdr;
+ struct fnic_frame_list *frame_elem;
+ int len = frame_size;
+ int ret;
+ struct fc_frame_header *fchdr = (struct fc_frame_header *) (frame +
+ FNIC_ETH_FCOE_HDRS_OFFSET);
- if (fnic->in_remove) {
- dev_kfree_skb(fp_skb(fp));
- return -1;
- }
+ pethhdr = (struct ethhdr *) frame;
+ pethhdr->h_proto = cpu_to_be16(ETH_P_FCOE);
+ memcpy(pethhdr->h_source, srcmac, ETH_ALEN);
+ memcpy(pethhdr->h_dest, dstmac, ETH_ALEN);
+
+ pfcoe_hdr = (struct fcoe_hdr *) (frame + sizeof(struct ethhdr));
+ pfcoe_hdr->fcoe_sof = FC_SOF_I3;
/*
* Queue frame if in a transitional state.
* This occurs while registering the Port_ID / MAC address after FLOGI.
*/
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
- skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ if ((fnic->state != FNIC_IN_FC_MODE)
+ && (fnic->state != FNIC_IN_ETH_MODE)) {
+ frame_elem = mempool_alloc(fnic->frame_elem_pool,
+ GFP_ATOMIC | __GFP_ZERO);
+ if (!frame_elem) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Failed to allocate memory for frame elem");
+ return -ENOMEM;
+ }
+
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "Queueing FC frame: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x\n",
+ ntoh24(fchdr->fh_s_id), ntoh24(fchdr->fh_d_id),
+ fchdr->fh_type, FNIC_STD_GET_OX_ID(fchdr));
+ frame_elem->fp = frame;
+ frame_elem->frame_len = len;
+ list_add_tail(&frame_elem->links, &fnic->tx_queue);
return 0;
}
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return fnic_send_frame(fnic, fp);
+ fnic_debug_dump_fc_frame(fnic, fchdr, frame_size, "Outgoing");
+
+ ret = fnic_send_frame(fnic, frame, len);
+ return ret;
+}
+
+void fnic_send_fcoe_frame(struct fnic_iport_s *iport, void *frame,
+ int frame_size)
+{
+ struct fnic *fnic = iport->fnic;
+ uint8_t *dstmac, *srcmac;
+
+ /* If module unload is in-progress, don't send */
+ if (fnic->in_remove)
+ return;
+
+ if (iport->fabric.flags & FNIC_FDLS_FPMA_LEARNT) {
+ srcmac = iport->fpma;
+ dstmac = iport->fcfmac;
+ } else {
+ srcmac = iport->hwmac;
+ dstmac = FCOE_ALL_FCF_MAC;
+ }
+
+ fdls_send_fcoe_frame(fnic, frame, frame_size, srcmac, dstmac);
+}
+
+int
+fnic_send_fip_frame(struct fnic_iport_s *iport, void *frame,
+ int frame_size)
+{
+ struct fnic *fnic = iport->fnic;
+
+ if (fnic->in_remove)
+ return -1;
+
+ fnic_debug_dump_fip_frame(fnic, frame, frame_size, "Outgoing");
+ return fnic_send_frame(fnic, frame, frame_size);
}
/**
@@ -1193,64 +773,87 @@ int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
void fnic_flush_tx(struct work_struct *work)
{
struct fnic *fnic = container_of(work, struct fnic, flush_work);
- struct sk_buff *skb;
struct fc_frame *fp;
+ struct fnic_frame_list *cur_frame, *next;
- while ((skb = skb_dequeue(&fnic->tx_queue))) {
- fp = (struct fc_frame *)skb;
- fnic_send_frame(fnic, fp);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Flush queued frames");
+
+ list_for_each_entry_safe(cur_frame, next, &fnic->tx_queue, links) {
+ fp = cur_frame->fp;
+ list_del(&cur_frame->links);
+ fnic_send_frame(fnic, fp, cur_frame->frame_len);
+ mempool_free(cur_frame, fnic->frame_elem_pool);
}
}
-/**
- * fnic_set_eth_mode() - put fnic into ethernet mode.
- * @fnic: fnic device
- *
- * Called without fnic lock held.
- */
-static void fnic_set_eth_mode(struct fnic *fnic)
+int
+fnic_fdls_register_portid(struct fnic_iport_s *iport, u32 port_id,
+ void *fp)
{
- unsigned long flags;
- enum fnic_state old_state;
+ struct fnic *fnic = iport->fnic;
+ struct ethhdr *ethhdr;
int ret;
- spin_lock_irqsave(&fnic->fnic_lock, flags);
-again:
- old_state = fnic->state;
- switch (old_state) {
- case FNIC_IN_FC_MODE:
- case FNIC_IN_ETH_TRANS_FC_MODE:
- default:
- fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Setting port id: 0x%x fp: 0x%p fnic state: %d", port_id,
+ fp, fnic->state);
- ret = fnic_fw_reset_handler(fnic);
+ if (fp) {
+ ethhdr = (struct ethhdr *) fp;
+ vnic_dev_add_addr(fnic->vdev, ethhdr->h_dest);
+ }
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
- goto again;
- if (ret)
- fnic->state = old_state;
- break;
-
- case FNIC_IN_FC_TRANS_ETH_MODE:
- case FNIC_IN_ETH_MODE:
- break;
+ /* Change state to reflect transition to FC mode */
+ if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
+ fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
+ else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Unexpected fnic state while processing FLOGI response\n");
+ return -1;
+ }
+
+ /*
+ * Send FLOGI registration to firmware to set up FC mode.
+ * The new address will be set up when registration completes.
+ */
+ ret = fnic_flogi_reg_handler(fnic, port_id);
+ if (ret < 0) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI registration error ret: %d fnic state: %d\n",
+ ret, fnic->state);
+ if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
+ fnic->state = FNIC_IN_ETH_MODE;
+
+ return -1;
+ }
+ iport->fabric.flags |= FNIC_FDLS_FPMA_LEARNT;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI registration success\n");
+ return 0;
+}
+
+void fnic_free_txq(struct list_head *head)
+{
+ struct fnic_frame_list *cur_frame, *next;
+
+ list_for_each_entry_safe(cur_frame, next, head, links) {
+ list_del(&cur_frame->links);
+ kfree(cur_frame->fp);
+ kfree(cur_frame);
}
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
struct cq_desc *cq_desc,
struct vnic_wq_buf *buf, void *opaque)
{
- struct sk_buff *skb = buf->os_buf;
- struct fc_frame *fp = (struct fc_frame *)skb;
struct fnic *fnic = vnic_dev_priv(wq->vdev);
dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
DMA_TO_DEVICE);
- dev_kfree_skb_irq(fp_skb(fp));
+ mempool_free(buf->os_buf, fnic->frame_pool);
buf->os_buf = NULL;
}
@@ -1288,119 +891,218 @@ int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
{
- struct fc_frame *fp = buf->os_buf;
struct fnic *fnic = vnic_dev_priv(wq->vdev);
dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
DMA_TO_DEVICE);
- dev_kfree_skb(fp_skb(fp));
+ kfree(buf->os_buf);
buf->os_buf = NULL;
}
-void fnic_fcoe_reset_vlans(struct fnic *fnic)
+void
+fnic_fdls_add_tport(struct fnic_iport_s *iport, struct fnic_tport_s *tport,
+ unsigned long flags)
+{
+ struct fnic *fnic = iport->fnic;
+ struct fc_rport *rport;
+ struct fc_rport_identifiers ids;
+ struct rport_dd_data_s *rdd_data;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Adding rport fcid: 0x%x", tport->fcid);
+
+ ids.node_name = tport->wwnn;
+ ids.port_name = tport->wwpn;
+ ids.port_id = tport->fcid;
+ ids.roles = FC_RPORT_ROLE_FCP_TARGET;
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ rport = fc_remote_port_add(fnic->host, 0, &ids);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (!rport) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Failed to add rport for tport: 0x%x", tport->fcid);
+ return;
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Added rport fcid: 0x%x", tport->fcid);
+
+ /* Mimic these assignments in queuecommand to avoid timing issues */
+ rport->maxframe_size = FNIC_FC_MAX_PAYLOAD_LEN;
+ rport->supported_classes = FC_COS_CLASS3 | FC_RPORT_ROLE_FCP_TARGET;
+ rdd_data = rport->dd_data;
+ rdd_data->tport = tport;
+ rdd_data->iport = iport;
+ tport->rport = rport;
+ tport->flags |= FNIC_FDLS_SCSI_REGISTERED;
+}
+
+void
+fnic_fdls_remove_tport(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport, unsigned long flags)
+{
+ struct fnic *fnic = iport->fnic;
+ struct rport_dd_data_s *rdd_data;
+
+ struct fc_rport *rport;
+
+ if (!tport)
+ return;
+
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINE);
+ rport = tport->rport;
+
+ if (rport) {
+ /* tport resource release will be done
+ * after fnic_terminate_rport_io()
+ */
+ tport->flags |= FNIC_FDLS_TPORT_DELETED;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ /* Interface to scsi_fc_transport */
+ fc_remote_port_delete(rport);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Deregistered and freed tport fcid: 0x%x from scsi transport fc",
+ tport->fcid);
+
+ /*
+ * the dd_data is allocated by fc transport
+ * of size dd_fcrport_size
+ */
+ rdd_data = rport->dd_data;
+ rdd_data->tport = NULL;
+ rdd_data->iport = NULL;
+ list_del(&tport->links);
+ kfree(tport);
+ } else {
+ fnic_del_tport_timer_sync(fnic, tport);
+ list_del(&tport->links);
+ kfree(tport);
+ }
+}
+
+void fnic_delete_fcp_tports(struct fnic *fnic)
{
+ struct fnic_tport_s *tport, *next;
unsigned long flags;
- struct fcoe_vlan *vlan;
- struct fcoe_vlan *next;
- /*
- * indicate a link down to fcoe so that all fcf's are free'd
- * might not be required since we did this before sending vlan
- * discovery request
- */
- spin_lock_irqsave(&fnic->vlans_lock, flags);
- if (!list_empty(&fnic->vlans)) {
- list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
- list_del(&vlan->list);
- kfree(vlan);
- }
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ list_for_each_entry_safe(tport, next, &fnic->iport.tport_list, links) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "removing fcp rport fcid: 0x%x", tport->fcid);
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINING);
+ fnic_del_tport_timer_sync(fnic, tport);
+ fnic_fdls_remove_tport(&fnic->iport, tport, flags);
}
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
-void fnic_handle_fip_timer(struct fnic *fnic)
+/**
+ * fnic_tport_event_handler() - Handler for remote port events
+ * in the tport_event_queue.
+ *
+ * @work: Handle to the remote port being dequeued
+ */
+void fnic_tport_event_handler(struct work_struct *work)
{
+ struct fnic *fnic = container_of(work, struct fnic, tport_work);
+ struct fnic_tport_event_s *cur_evt, *next;
unsigned long flags;
- struct fcoe_vlan *vlan;
- struct fnic_stats *fnic_stats = &fnic->fnic_stats;
- u64 sol_time;
+ struct fnic_tport_s *tport;
spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (fnic->stop_rx_link_events) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return;
+ list_for_each_entry_safe(cur_evt, next, &fnic->tport_event_list, links) {
+ tport = cur_evt->arg1;
+ switch (cur_evt->event) {
+ case TGT_EV_RPORT_ADD:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Add rport event");
+ if (tport->state == FDLS_TGT_STATE_READY) {
+ fnic_fdls_add_tport(&fnic->iport,
+ (struct fnic_tport_s *) cur_evt->arg1, flags);
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Target not ready. Add rport event dropped: 0x%x",
+ tport->fcid);
+ }
+ break;
+ case TGT_EV_RPORT_DEL:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Remove rport event");
+ if (tport->state == FDLS_TGT_STATE_OFFLINING) {
+ fnic_fdls_remove_tport(&fnic->iport,
+ (struct fnic_tport_s *) cur_evt->arg1, flags);
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "remove rport event dropped tport fcid: 0x%x",
+ tport->fcid);
+ }
+ break;
+ case TGT_EV_TPORT_DELETE:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Delete tport event");
+ fdls_delete_tport(tport->iport, tport);
+ break;
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Unknown tport event");
+ break;
+ }
+ list_del(&cur_evt->links);
+ kfree(cur_evt);
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
- if (fnic->ctlr.mode == FIP_MODE_NON_FIP)
- return;
+void fnic_flush_tport_event_list(struct fnic *fnic)
+{
+ struct fnic_tport_event_s *cur_evt, *next;
+ unsigned long flags;
- spin_lock_irqsave(&fnic->vlans_lock, flags);
- if (list_empty(&fnic->vlans)) {
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- /* no vlans available, try again */
- if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
- if (printk_ratelimit())
- shost_printk(KERN_DEBUG, fnic->lport->host,
- "Start VLAN Discovery\n");
- fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
- return;
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ list_for_each_entry_safe(cur_evt, next, &fnic->tport_event_list, links) {
+ list_del(&cur_evt->links);
+ kfree(cur_evt);
}
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
- vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "fip_timer: vlan %d state %d sol_count %d\n",
- vlan->vid, vlan->state, vlan->sol_count);
- switch (vlan->state) {
- case FIP_VLAN_USED:
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "FIP VLAN is selected for FC transaction\n");
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- break;
- case FIP_VLAN_FAILED:
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- /* if all vlans are in failed state, restart vlan disc */
- if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
- if (printk_ratelimit())
- shost_printk(KERN_DEBUG, fnic->lport->host,
- "Start VLAN Discovery\n");
- fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
- break;
- case FIP_VLAN_SENT:
- if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
- /*
- * no response on this vlan, remove from the list.
- * Try the next vlan
- */
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "Dequeue this VLAN ID %d from list\n",
- vlan->vid);
- list_del(&vlan->list);
- kfree(vlan);
- vlan = NULL;
- if (list_empty(&fnic->vlans)) {
- /* we exhausted all vlans, restart vlan disc */
- spin_unlock_irqrestore(&fnic->vlans_lock,
- flags);
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "fip_timer: vlan list empty, "
- "trigger vlan disc\n");
- fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
- return;
- }
- /* check the next vlan */
- vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
- list);
- fnic->set_vlan(fnic, vlan->vid);
- vlan->state = FIP_VLAN_SENT; /* sent now */
- }
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
- vlan->sol_count++;
- sol_time = jiffies + msecs_to_jiffies
- (FCOE_CTLR_START_DELAY);
- mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
- break;
+void fnic_reset_work_handler(struct work_struct *work)
+{
+ struct fnic *cur_fnic, *next_fnic;
+ unsigned long reset_fnic_list_lock_flags;
+ int host_reset_ret_code;
+
+ /*
+ * This is a single thread. It is per fnic module, not per fnic
+ * All the fnics that need to be reset
+ * have been serialized via the reset fnic list.
+ */
+ spin_lock_irqsave(&reset_fnic_list_lock, reset_fnic_list_lock_flags);
+ list_for_each_entry_safe(cur_fnic, next_fnic, &reset_fnic_list, links) {
+ list_del(&cur_fnic->links);
+ spin_unlock_irqrestore(&reset_fnic_list_lock,
+ reset_fnic_list_lock_flags);
+
+ dev_err(&cur_fnic->pdev->dev, "fnic: <%d>: issuing a host reset\n",
+ cur_fnic->fnic_num);
+ host_reset_ret_code = fnic_host_reset(cur_fnic->host);
+ dev_err(&cur_fnic->pdev->dev,
+ "fnic: <%d>: returned from host reset with status: %d\n",
+ cur_fnic->fnic_num, host_reset_ret_code);
+
+ spin_lock_irqsave(&cur_fnic->fnic_lock, cur_fnic->lock_flags);
+ cur_fnic->pc_rscn_handling_status =
+ PC_RSCN_HANDLING_NOT_IN_PROGRESS;
+ spin_unlock_irqrestore(&cur_fnic->fnic_lock, cur_fnic->lock_flags);
+
+ spin_lock_irqsave(&reset_fnic_list_lock,
+ reset_fnic_list_lock_flags);
}
+ spin_unlock_irqrestore(&reset_fnic_list_lock,
+ reset_fnic_list_lock_flags);
}
diff --git a/drivers/scsi/fnic/fnic_fdls.h b/drivers/scsi/fnic/fnic_fdls.h
new file mode 100644
index 000000000000..8e610b65ad57
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_fdls.h
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _FNIC_FDLS_H_
+#define _FNIC_FDLS_H_
+
+#include "fnic_stats.h"
+#include "fdls_fc.h"
+
+/* FDLS - Fabric discovery and login services
+ * -> VLAN discovery
+ * -> retry every retry delay seconds until it succeeds.
+ * <- List of VLANs
+ *
+ * -> Solicitation
+ * <- Solicitation response (Advertisement)
+ *
+ * -> FCF selection & FLOGI ( FLOGI timeout - 2 * E_D_TOV)
+ * <- FLOGI response
+ *
+ * -> FCF keep alive
+ * <- FCF keep alive
+ *
+ * -> PLOGI to FFFFFC (DNS) (PLOGI timeout - 2 * R_A_TOV)
+ * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
+ * <- PLOGI response
+ * -> Retry PLOGI to FFFFFC (DNS) - Number of retries from vnic.cfg
+ *
+ * -> SCR to FFFFFC (DNS) (SCR timeout - 2 * R_A_TOV)
+ * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
+ * <- SCR response
+ * -> Retry SCR - Number of retries 2
+ *
+ * -> GPN_FT to FFFFFC (GPN_FT timeout - 2 * R_A_TOV)a
+ * -> Retry on BUSY until it succeeds
+ * -> Retry on BUSY until it succeeds
+ * -> 2 retries on timeout
+ *
+ * -> RFT_ID to FFFFFC (DNS) (RFT_ID timeout - 3 * R_A_TOV)
+ * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
+ * -> Retry RFT_ID to FFFFFC (DNS) (Number of retries 2 )
+ * -> Ignore if both retires fail.
+ *
+ * Session establishment with targets
+ * For each PWWN
+ * -> PLOGI to FCID of that PWWN (PLOGI timeout 2 * R_A_TOV)
+ * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
+ * <- PLOGI response
+ * -> Retry PLOGI. Num retries using vnic.cfg
+ *
+ * -> PRLI to FCID of that PWWN (PRLI timeout 2 * R_A_TOV)
+ * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
+ * <- PRLI response
+ * -> Retry PRLI. Num retries using vnic.cfg
+ *
+ */
+
+#define FDLS_RETRY_COUNT 2
+
+/*
+ * OXID encoding:
+ * bits 0-8: oxid idx - allocated from poool
+ * bits 9-13: oxid frame code from fnic_oxid_frame_type_e
+ * bits 14-15: all zeros
+ */
+#define FNIC_OXID_POOL_SZ (512) /* always power of 2 */
+#define FNIC_OXID_ENCODE(idx, frame_type) (frame_type | idx)
+#define FNIC_FRAME_MASK 0xFE00
+#define FNIC_FRAME_TYPE(oxid) (oxid & FNIC_FRAME_MASK)
+#define FNIC_OXID_IDX(oxid) ((oxid) & (FNIC_OXID_POOL_SZ - 1))
+
+#define OXID_RECLAIM_TOV(iport) (2 * iport->r_a_tov) /* in milliseconds */
+
+#define FNIC_FDLS_FABRIC_ABORT_ISSUED 0x1
+#define FNIC_FDLS_FPMA_LEARNT 0x2
+
+/* tport flags */
+#define FNIC_FDLS_TPORT_IN_GPN_FT_LIST 0x1
+#define FNIC_FDLS_TGT_ABORT_ISSUED 0x2
+#define FNIC_FDLS_TPORT_SEND_ADISC 0x4
+#define FNIC_FDLS_RETRY_FRAME 0x8
+#define FNIC_FDLS_TPORT_BUSY 0x10
+#define FNIC_FDLS_TPORT_TERMINATING 0x20
+#define FNIC_FDLS_TPORT_DELETED 0x40
+#define FNIC_FDLS_SCSI_REGISTERED 0x200
+
+/* Retry supported by rport(returned by prli service parameters) */
+#define FDLS_FC_RP_FLAGS_RETRY 0x1
+
+#define fdls_set_state(_fdls_fabric, _state) ((_fdls_fabric)->state = _state)
+#define fdls_get_state(_fdls_fabric) ((_fdls_fabric)->state)
+
+#define FNIC_FDMI_ACTIVE 0x8
+#define FNIC_FIRST_LINK_UP 0x2
+
+#define fdls_set_tport_state(_tport, _state) (_tport->state = _state)
+#define fdls_get_tport_state(_tport) (_tport->state)
+
+#define FNIC_PORTSPEED_10GBIT 1
+#define FNIC_FRAME_HT_ROOM (2148)
+#define FNIC_FCOE_FRAME_MAXSZ (2112)
+
+
+#define FNIC_FRAME_TYPE_FABRIC_FLOGI 0x1000
+#define FNIC_FRAME_TYPE_FABRIC_PLOGI 0x1200
+#define FNIC_FRAME_TYPE_FABRIC_RPN 0x1400
+#define FNIC_FRAME_TYPE_FABRIC_RFT 0x1600
+#define FNIC_FRAME_TYPE_FABRIC_RFF 0x1800
+#define FNIC_FRAME_TYPE_FABRIC_SCR 0x1A00
+#define FNIC_FRAME_TYPE_FABRIC_GPN_FT 0x1C00
+#define FNIC_FRAME_TYPE_FABRIC_LOGO 0x1E00
+#define FNIC_FRAME_TYPE_FDMI_PLOGI 0x2000
+#define FNIC_FRAME_TYPE_FDMI_RHBA 0x2200
+#define FNIC_FRAME_TYPE_FDMI_RPA 0x2400
+#define FNIC_FRAME_TYPE_TGT_PLOGI 0x2600
+#define FNIC_FRAME_TYPE_TGT_PRLI 0x2800
+#define FNIC_FRAME_TYPE_TGT_ADISC 0x2A00
+#define FNIC_FRAME_TYPE_TGT_LOGO 0x2C00
+
+struct fnic_fip_fcf_s {
+ uint16_t vlan_id;
+ uint8_t fcf_mac[6];
+ uint8_t fcf_priority;
+ uint32_t fka_adv_period;
+ uint8_t ka_disabled;
+};
+
+enum fnic_fdls_state_e {
+ FDLS_STATE_INIT = 0,
+ FDLS_STATE_LINKDOWN,
+ FDLS_STATE_FABRIC_LOGO,
+ FDLS_STATE_FLOGO_DONE,
+ FDLS_STATE_FABRIC_FLOGI,
+ FDLS_STATE_FABRIC_PLOGI,
+ FDLS_STATE_RPN_ID,
+ FDLS_STATE_REGISTER_FC4_TYPES,
+ FDLS_STATE_REGISTER_FC4_FEATURES,
+ FDLS_STATE_SCR,
+ FDLS_STATE_GPN_FT,
+ FDLS_STATE_TGT_DISCOVERY,
+ FDLS_STATE_RSCN_GPN_FT,
+ FDLS_STATE_SEND_GPNFT
+};
+
+struct fnic_fdls_fabric_s {
+ enum fnic_fdls_state_e state;
+ uint32_t flags;
+ struct list_head tport_list; /* List of discovered tports */
+ struct timer_list retry_timer;
+ int del_timer_inprogress;
+ int del_fdmi_timer_inprogress;
+ int retry_counter;
+ int timer_pending;
+ int fdmi_retry;
+ struct timer_list fdmi_timer;
+ int fdmi_pending;
+};
+
+struct fnic_fdls_fip_s {
+ uint32_t state;
+ uint32_t flogi_retry;
+};
+
+/* Message to tport_event_handler */
+enum fnic_tgt_msg_id {
+ TGT_EV_NONE = 0,
+ TGT_EV_RPORT_ADD,
+ TGT_EV_RPORT_DEL,
+ TGT_EV_TPORT_DELETE,
+ TGT_EV_REMOVE
+};
+
+struct fnic_tport_event_s {
+ struct list_head links;
+ enum fnic_tgt_msg_id event;
+ void *arg1;
+};
+
+enum fdls_tgt_state_e {
+ FDLS_TGT_STATE_INIT = 0,
+ FDLS_TGT_STATE_PLOGI,
+ FDLS_TGT_STATE_PRLI,
+ FDLS_TGT_STATE_READY,
+ FDLS_TGT_STATE_LOGO_RECEIVED,
+ FDLS_TGT_STATE_ADISC,
+ FDL_TGT_STATE_PLOGO,
+ FDLS_TGT_STATE_OFFLINING,
+ FDLS_TGT_STATE_OFFLINE
+};
+
+struct fnic_tport_s {
+ struct list_head links; /* To link the tports */
+ enum fdls_tgt_state_e state;
+ uint32_t flags;
+ uint32_t fcid;
+ uint64_t wwpn;
+ uint64_t wwnn;
+ uint16_t active_oxid;
+ uint16_t tgt_flags;
+ atomic_t in_flight; /* io counter */
+ uint16_t max_payload_size;
+ uint16_t r_a_tov;
+ uint16_t e_d_tov;
+ uint16_t lun0_delay;
+ int max_concur_seqs;
+ uint32_t fcp_csp;
+ struct timer_list retry_timer;
+ int del_timer_inprogress;
+ int retry_counter;
+ int timer_pending;
+ unsigned int num_pending_cmds;
+ int nexus_restart_count;
+ int exch_reset_in_progress;
+ void *iport;
+ struct work_struct tport_del_work;
+ struct completion *tport_del_done;
+ struct fc_rport *rport;
+ char str_wwpn[20];
+ char str_wwnn[20];
+};
+
+/* OXID pool related structures */
+struct reclaim_entry_s {
+ struct list_head links;
+ /* oxid that needs to be freed after 2*r_a_tov */
+ uint16_t oxid_idx;
+ /* in jiffies. Use this to waiting time */
+ unsigned long expires;
+ unsigned long *bitmap;
+};
+
+/* used for allocating oxids for fabric and fdmi requests */
+struct fnic_oxid_pool_s {
+ DECLARE_BITMAP(bitmap, FNIC_OXID_POOL_SZ);
+ int sz; /* size of the pool or block */
+ int next_idx; /* used for cycling through the oxid pool */
+
+ /* retry schedule free */
+ DECLARE_BITMAP(pending_schedule_free, FNIC_OXID_POOL_SZ);
+ struct delayed_work schedule_oxid_free_retry;
+
+ /* List of oxids that need to be freed and reclaimed.
+ * This list is shared by all the oxid pools
+ */
+ struct list_head oxid_reclaim_list;
+ /* Work associated with reclaim list */
+ struct delayed_work oxid_reclaim_work;
+};
+
+/* iport */
+enum fnic_iport_state_e {
+ FNIC_IPORT_STATE_INIT = 0,
+ FNIC_IPORT_STATE_LINK_WAIT,
+ FNIC_IPORT_STATE_FIP,
+ FNIC_IPORT_STATE_FABRIC_DISC,
+ FNIC_IPORT_STATE_READY
+};
+
+struct fnic_iport_s {
+ enum fnic_iport_state_e state;
+ struct fnic *fnic;
+ uint64_t boot_time;
+ uint32_t flags;
+ int usefip;
+ uint8_t hwmac[6]; /* HW MAC Addr */
+ uint8_t fpma[6]; /* Fabric Provided MA */
+ uint8_t fcfmac[6]; /* MAC addr of Fabric */
+ uint16_t vlan_id;
+ uint32_t fcid;
+
+ /* oxid pool */
+ struct fnic_oxid_pool_s oxid_pool;
+
+ /*
+ * fabric reqs are serialized and only one req at a time.
+ * Tracking the oxid for sending abort
+ */
+ uint16_t active_oxid_fabric_req;
+ /* fdmi only */
+ uint16_t active_oxid_fdmi_plogi;
+ uint16_t active_oxid_fdmi_rhba;
+ uint16_t active_oxid_fdmi_rpa;
+
+ struct fnic_fip_fcf_s selected_fcf;
+ struct fnic_fdls_fip_s fip;
+ struct fnic_fdls_fabric_s fabric;
+ struct list_head tport_list;
+ struct list_head tport_list_pending_del;
+ /* list of tports for which we are yet to send PLOGO */
+ struct list_head inprocess_tport_list;
+ struct list_head deleted_tport_list;
+ struct work_struct tport_event_work;
+ uint32_t e_d_tov; /* msec */
+ uint32_t r_a_tov; /* msec */
+ uint32_t link_supported_speeds;
+ uint32_t max_flogi_retries;
+ uint32_t max_plogi_retries;
+ uint32_t plogi_timeout;
+ uint32_t service_params;
+ uint64_t wwpn;
+ uint64_t wwnn;
+ uint16_t max_payload_size;
+ spinlock_t deleted_tport_lst_lock;
+ struct completion *flogi_reg_done;
+ struct fnic_iport_stats iport_stats;
+ char str_wwpn[20];
+ char str_wwnn[20];
+};
+
+struct rport_dd_data_s {
+ struct fnic_tport_s *tport;
+ struct fnic_iport_s *iport;
+};
+
+enum fnic_recv_frame_type_e {
+ FNIC_FABRIC_FLOGI_RSP = 1,
+ FNIC_FABRIC_PLOGI_RSP,
+ FNIC_FABRIC_RPN_RSP,
+ FNIC_FABRIC_RFT_RSP,
+ FNIC_FABRIC_RFF_RSP,
+ FNIC_FABRIC_SCR_RSP,
+ FNIC_FABRIC_GPN_FT_RSP,
+ FNIC_FABRIC_BLS_ABTS_RSP,
+ FNIC_FDMI_PLOGI_RSP,
+ FNIC_FDMI_REG_HBA_RSP,
+ FNIC_FDMI_RPA_RSP,
+ FNIC_FDMI_BLS_ABTS_RSP,
+ FNIC_FABRIC_LOGO_RSP,
+
+ /* responses to target requests */
+ FNIC_TPORT_PLOGI_RSP,
+ FNIC_TPORT_PRLI_RSP,
+ FNIC_TPORT_ADISC_RSP,
+ FNIC_TPORT_BLS_ABTS_RSP,
+ FNIC_TPORT_LOGO_RSP,
+
+ /* unsolicited requests */
+ FNIC_BLS_ABTS_REQ,
+ FNIC_ELS_PLOGI_REQ,
+ FNIC_ELS_RSCN_REQ,
+ FNIC_ELS_LOGO_REQ,
+ FNIC_ELS_ECHO_REQ,
+ FNIC_ELS_ADISC,
+ FNIC_ELS_RLS,
+ FNIC_ELS_RRQ,
+ FNIC_ELS_UNSUPPORTED_REQ,
+};
+
+enum fnic_port_speeds {
+ DCEM_PORTSPEED_NONE = 0,
+ DCEM_PORTSPEED_1G = 1000,
+ DCEM_PORTSPEED_2G = 2000,
+ DCEM_PORTSPEED_4G = 4000,
+ DCEM_PORTSPEED_8G = 8000,
+ DCEM_PORTSPEED_10G = 10000,
+ DCEM_PORTSPEED_16G = 16000,
+ DCEM_PORTSPEED_20G = 20000,
+ DCEM_PORTSPEED_25G = 25000,
+ DCEM_PORTSPEED_32G = 32000,
+ DCEM_PORTSPEED_40G = 40000,
+ DCEM_PORTSPEED_4x10G = 41000,
+ DCEM_PORTSPEED_50G = 50000,
+ DCEM_PORTSPEED_64G = 64000,
+ DCEM_PORTSPEED_100G = 100000,
+ DCEM_PORTSPEED_128G = 128000,
+};
+
+/* Function Declarations */
+/* fdls_disc.c */
+void fnic_fdls_disc_init(struct fnic_iport_s *iport);
+void fnic_fdls_disc_start(struct fnic_iport_s *iport);
+void fnic_fdls_recv_frame(struct fnic_iport_s *iport, void *rx_frame,
+ int len, int fchdr_offset);
+void fnic_fdls_link_down(struct fnic_iport_s *iport);
+int fdls_init_frame_pool(struct fnic_iport_s *iport);
+uint8_t *fdls_alloc_frame(struct fnic_iport_s *iport);
+uint16_t fdls_alloc_oxid(struct fnic_iport_s *iport, int oxid_frame_type,
+ uint16_t *active_oxid);
+void fdls_free_oxid(struct fnic_iport_s *iport,
+ uint16_t oxid, uint16_t *active_oxid);
+void fdls_tgt_logout(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport);
+void fnic_del_fabric_timer_sync(struct fnic *fnic);
+void fnic_del_tport_timer_sync(struct fnic *fnic,
+ struct fnic_tport_s *tport);
+void fdls_send_fabric_logo(struct fnic_iport_s *iport);
+int fnic_fdls_validate_and_get_frame_type(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr);
+void fdls_send_tport_abts(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport);
+bool fdls_delete_tport(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport);
+void fdls_fdmi_timer_callback(struct timer_list *t);
+
+/* fnic_fcs.c */
+void fnic_fdls_init(struct fnic *fnic, int usefip);
+void fnic_send_fcoe_frame(struct fnic_iport_s *iport, void *frame,
+ int frame_size);
+void fnic_fcoe_send_vlan_req(struct fnic *fnic);
+int fnic_send_fip_frame(struct fnic_iport_s *iport,
+ void *frame, int frame_size);
+void fnic_fdls_learn_fcoe_macs(struct fnic_iport_s *iport, void *rx_frame,
+ uint8_t *fcid);
+void fnic_fdls_add_tport(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport, unsigned long flags);
+void fnic_fdls_remove_tport(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport,
+ unsigned long flags);
+
+/* fip.c */
+void fnic_fcoe_send_vlan_req(struct fnic *fnic);
+void fnic_common_fip_cleanup(struct fnic *fnic);
+int fdls_fip_recv_frame(struct fnic *fnic, void *frame);
+void fnic_handle_fcs_ka_timer(struct timer_list *t);
+void fnic_handle_enode_ka_timer(struct timer_list *t);
+void fnic_handle_vn_ka_timer(struct timer_list *t);
+void fnic_handle_fip_timer(struct timer_list *t);
+extern void fdls_fabric_timer_callback(struct timer_list *t);
+
+/* fnic_scsi.c */
+void fnic_scsi_fcpio_reset(struct fnic *fnic);
+extern void fdls_fabric_timer_callback(struct timer_list *t);
+void fnic_rport_exch_reset(struct fnic *fnic, u32 fcid);
+int fnic_fdls_register_portid(struct fnic_iport_s *iport, u32 port_id,
+ void *fp);
+struct fnic_tport_s *fnic_find_tport_by_fcid(struct fnic_iport_s *iport,
+ uint32_t fcid);
+struct fnic_tport_s *fnic_find_tport_by_wwpn(struct fnic_iport_s *iport,
+ uint64_t wwpn);
+
+#endif /* _FNIC_FDLS_H_ */
diff --git a/drivers/scsi/fnic/fnic_fip.h b/drivers/scsi/fnic/fnic_fip.h
deleted file mode 100644
index 79f53029737b..000000000000
--- a/drivers/scsi/fnic/fnic_fip.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
- * Copyright 2007 Nuova Systems, Inc. All rights reserved.
- */
-
-#ifndef _FNIC_FIP_H_
-#define _FNIC_FIP_H_
-
-
-#define FCOE_CTLR_START_DELAY 2000 /* ms after first adv. to choose FCF */
-#define FCOE_CTLR_FIPVLAN_TOV 2000 /* ms after FIP VLAN disc */
-#define FCOE_CTLR_MAX_SOL 8
-
-#define FINC_MAX_FLOGI_REJECTS 8
-
-struct vlan {
- __be16 vid;
- __be16 type;
-};
-
-/*
- * VLAN entry.
- */
-struct fcoe_vlan {
- struct list_head list;
- u16 vid; /* vlan ID */
- u16 sol_count; /* no. of sols sent */
- u16 state; /* state */
-};
-
-enum fip_vlan_state {
- FIP_VLAN_AVAIL = 0, /* don't do anything */
- FIP_VLAN_SENT = 1, /* sent */
- FIP_VLAN_USED = 2, /* succeed */
- FIP_VLAN_FAILED = 3, /* failed to response */
-};
-
-struct fip_vlan {
- struct ethhdr eth;
- struct fip_header fip;
- struct {
- struct fip_mac_desc mac;
- struct fip_wwn_desc wwnn;
- } desc;
-};
-
-#endif /* __FINC_FIP_H_ */
diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h
index 5895ead20e14..0d974e040ab7 100644
--- a/drivers/scsi/fnic/fnic_io.h
+++ b/drivers/scsi/fnic/fnic_io.h
@@ -7,6 +7,7 @@
#define _FNIC_IO_H_
#include <scsi/fc/fc_fcp.h>
+#include "fnic_fdls.h"
#define FNIC_DFLT_SG_DESC_CNT 32
#define FNIC_MAX_SG_DESC_CNT 256 /* Maximum descriptors per sgl */
@@ -41,6 +42,8 @@ enum fnic_ioreq_state {
};
struct fnic_io_req {
+ struct fnic_iport_s *iport;
+ struct fnic_tport_s *tport;
struct host_sg_desc *sgl_list; /* sgl list */
void *sgl_list_alloc; /* sgl list address used for free */
dma_addr_t sense_buf_pa; /* dma address for sense buffer*/
@@ -55,15 +58,4 @@ struct fnic_io_req {
unsigned int tag;
struct scsi_cmnd *sc; /* midlayer's cmd pointer */
};
-
-enum fnic_port_speeds {
- DCEM_PORTSPEED_NONE = 0,
- DCEM_PORTSPEED_1G = 1000,
- DCEM_PORTSPEED_10G = 10000,
- DCEM_PORTSPEED_20G = 20000,
- DCEM_PORTSPEED_25G = 25000,
- DCEM_PORTSPEED_40G = 40000,
- DCEM_PORTSPEED_4x10G = 41000,
- DCEM_PORTSPEED_100G = 100000,
-};
#endif /* _FNIC_IO_H_ */
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
index ff85441c6cea..7ed50b11afa6 100644
--- a/drivers/scsi/fnic/fnic_isr.c
+++ b/drivers/scsi/fnic/fnic_isr.c
@@ -7,7 +7,7 @@
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
-#include <scsi/libfc.h>
+#include <scsi/scsi_transport_fc.h>
#include <scsi/fc_frame.h>
#include "vnic_dev.h"
#include "vnic_intr.h"
@@ -222,7 +222,7 @@ int fnic_request_intr(struct fnic *fnic)
fnic->msix[i].devname,
fnic->msix[i].devid);
if (err) {
- FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"request_irq failed with error: %d\n",
err);
fnic_free_intr(fnic);
@@ -250,10 +250,10 @@ int fnic_set_intr_mode_msix(struct fnic *fnic)
* We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs
* (last INTR is used for WQ/RQ errors and notification area)
*/
- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"rq-array size: %d wq-array size: %d copy-wq array size: %d\n",
n, m, o);
- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"rq_count: %d raw_wq_count: %d wq_copy_count: %d cq_count: %d\n",
fnic->rq_count, fnic->raw_wq_count,
fnic->wq_copy_count, fnic->cq_count);
@@ -265,17 +265,17 @@ int fnic_set_intr_mode_msix(struct fnic *fnic)
vec_count = pci_alloc_irq_vectors(fnic->pdev, min_irqs, vecs,
PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"allocated %d MSI-X vectors\n",
vec_count);
if (vec_count > 0) {
if (vec_count < vecs) {
- FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"interrupts number mismatch: vec_count: %d vecs: %d\n",
vec_count, vecs);
if (vec_count < min_irqs) {
- FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"no interrupts for copy wq\n");
return 1;
}
@@ -287,7 +287,7 @@ int fnic_set_intr_mode_msix(struct fnic *fnic)
fnic->wq_copy_count = vec_count - n - m - 1;
fnic->wq_count = fnic->raw_wq_count + fnic->wq_copy_count;
if (fnic->cq_count != vec_count - 1) {
- FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"CQ count: %d does not match MSI-X vector count: %d\n",
fnic->cq_count, vec_count);
fnic->cq_count = vec_count - 1;
@@ -295,23 +295,23 @@ int fnic_set_intr_mode_msix(struct fnic *fnic)
fnic->intr_count = vec_count;
fnic->err_intr_offset = fnic->rq_count + fnic->wq_count;
- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"rq_count: %d raw_wq_count: %d copy_wq_base: %d\n",
fnic->rq_count,
fnic->raw_wq_count, fnic->copy_wq_base);
- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"wq_copy_count: %d wq_count: %d cq_count: %d\n",
fnic->wq_copy_count,
fnic->wq_count, fnic->cq_count);
- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"intr_count: %d err_intr_offset: %u",
fnic->intr_count,
fnic->err_intr_offset);
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSIX);
- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"fnic using MSI-X\n");
return 0;
}
@@ -351,7 +351,7 @@ int fnic_set_intr_mode(struct fnic *fnic)
fnic->intr_count = 1;
fnic->err_intr_offset = 0;
- FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"Using MSI Interrupts\n");
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI);
@@ -377,7 +377,7 @@ int fnic_set_intr_mode(struct fnic *fnic)
fnic->cq_count = 3;
fnic->intr_count = 3;
- FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"Using Legacy Interrupts\n");
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 1cb517f731f4..0b20ac8c3f46 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -21,15 +21,15 @@
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_tcq.h>
-#include <scsi/libfc.h>
#include <scsi/fc_frame.h>
#include "vnic_dev.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "fnic_io.h"
-#include "fnic_fip.h"
#include "fnic.h"
+#include "fnic_fdls.h"
+#include "fdls_fc.h"
#define PCI_DEVICE_ID_CISCO_FNIC 0x0045
@@ -38,12 +38,18 @@
static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES];
static struct kmem_cache *fnic_io_req_cache;
+static struct kmem_cache *fdls_frame_cache;
+static struct kmem_cache *fdls_frame_elem_cache;
static LIST_HEAD(fnic_list);
static DEFINE_SPINLOCK(fnic_list_lock);
static DEFINE_IDA(fnic_ida);
+struct work_struct reset_fnic_work;
+LIST_HEAD(reset_fnic_list);
+DEFINE_SPINLOCK(reset_fnic_list_lock);
+
/* Supported devices by fnic module */
-static struct pci_device_id fnic_id_table[] = {
+static const struct pci_device_id fnic_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_FNIC) },
{ 0, }
};
@@ -59,6 +65,14 @@ unsigned int fnic_log_level;
module_param(fnic_log_level, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
+unsigned int fnic_fdmi_support = 1;
+module_param(fnic_fdmi_support, int, 0644);
+MODULE_PARM_DESC(fnic_fdmi_support, "FDMI support");
+
+static unsigned int fnic_tgt_id_binding = 1;
+module_param(fnic_tgt_id_binding, uint, 0644);
+MODULE_PARM_DESC(fnic_tgt_id_binding,
+ "Target ID binding (0 for none. 1 for binding by WWPN (default))");
unsigned int io_completions = FNIC_DFLT_IO_COMPLETIONS;
module_param(io_completions, int, S_IRUGO|S_IWUSR);
@@ -78,15 +92,15 @@ static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH;
module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN");
-static struct libfc_function_template fnic_transport_template = {
- .frame_send = fnic_send,
- .lport_set_port_id = fnic_set_port_id,
- .fcp_abort_io = fnic_empty_scsi_cleanup,
- .fcp_cleanup = fnic_empty_scsi_cleanup,
- .exch_mgr_reset = fnic_exch_mgr_reset
-};
+unsigned int pc_rscn_handling_feature_flag = PC_RSCN_HANDLING_FEATURE_ON;
+module_param(pc_rscn_handling_feature_flag, uint, 0644);
+MODULE_PARM_DESC(pc_rscn_handling_feature_flag,
+ "PCRSCN handling (0 for none. 1 to handle PCRSCN (default))");
-static int fnic_slave_alloc(struct scsi_device *sdev)
+struct workqueue_struct *reset_fnic_work_queue;
+struct workqueue_struct *fnic_fip_queue;
+
+static int fnic_sdev_init(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
@@ -104,8 +118,8 @@ static const struct scsi_host_template fnic_host_template = {
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = fnic_abort_cmd,
.eh_device_reset_handler = fnic_device_reset,
- .eh_host_reset_handler = fnic_host_reset,
- .slave_alloc = fnic_slave_alloc,
+ .eh_host_reset_handler = fnic_eh_host_reset_handler,
+ .sdev_init = fnic_sdev_init,
.change_queue_depth = scsi_change_queue_depth,
.this_id = -1,
.cmd_per_lun = 3,
@@ -145,7 +159,7 @@ static struct fc_function_template fnic_fc_functions = {
.get_host_speed = fnic_get_host_speed,
.show_host_speed = 1,
.show_host_port_type = 1,
- .get_host_port_state = fc_get_host_port_state,
+ .get_host_port_state = fnic_get_host_port_state,
.show_host_port_state = 1,
.show_host_symbolic_name = 1,
.show_rport_maxframe_size = 1,
@@ -156,54 +170,88 @@ static struct fc_function_template fnic_fc_functions = {
.show_starget_port_id = 1,
.show_rport_dev_loss_tmo = 1,
.set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo,
- .issue_fc_host_lip = fnic_reset,
+ .issue_fc_host_lip = fnic_issue_fc_host_lip,
.get_fc_host_stats = fnic_get_stats,
.reset_fc_host_stats = fnic_reset_host_stats,
- .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
+ .dd_fcrport_size = sizeof(struct rport_dd_data_s),
.terminate_rport_io = fnic_terminate_rport_io,
- .bsg_request = fc_lport_bsg_request,
+ .bsg_request = NULL,
};
static void fnic_get_host_speed(struct Scsi_Host *shost)
{
- struct fc_lport *lp = shost_priv(shost);
- struct fnic *fnic = lport_priv(lp);
+ struct fnic *fnic = *((struct fnic **) shost_priv(shost));
u32 port_speed = vnic_dev_port_speed(fnic->vdev);
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+
+ FNIC_MAIN_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "port_speed: %d Mbps", port_speed);
+ atomic64_set(&fnic_stats->misc_stats.port_speed_in_mbps, port_speed);
/* Add in other values as they get defined in fw */
switch (port_speed) {
+ case DCEM_PORTSPEED_1G:
+ fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
+ break;
+ case DCEM_PORTSPEED_2G:
+ fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
+ break;
+ case DCEM_PORTSPEED_4G:
+ fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
+ break;
+ case DCEM_PORTSPEED_8G:
+ fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
+ break;
case DCEM_PORTSPEED_10G:
fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
break;
+ case DCEM_PORTSPEED_16G:
+ fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
+ break;
case DCEM_PORTSPEED_20G:
fc_host_speed(shost) = FC_PORTSPEED_20GBIT;
break;
case DCEM_PORTSPEED_25G:
fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
break;
+ case DCEM_PORTSPEED_32G:
+ fc_host_speed(shost) = FC_PORTSPEED_32GBIT;
+ break;
case DCEM_PORTSPEED_40G:
case DCEM_PORTSPEED_4x10G:
fc_host_speed(shost) = FC_PORTSPEED_40GBIT;
break;
+ case DCEM_PORTSPEED_50G:
+ fc_host_speed(shost) = FC_PORTSPEED_50GBIT;
+ break;
+ case DCEM_PORTSPEED_64G:
+ fc_host_speed(shost) = FC_PORTSPEED_64GBIT;
+ break;
case DCEM_PORTSPEED_100G:
fc_host_speed(shost) = FC_PORTSPEED_100GBIT;
break;
+ case DCEM_PORTSPEED_128G:
+ fc_host_speed(shost) = FC_PORTSPEED_128GBIT;
+ break;
default:
+ FNIC_MAIN_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Unknown FC speed: %d Mbps", port_speed);
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
}
}
+/* Placeholder function */
static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host)
{
int ret;
- struct fc_lport *lp = shost_priv(host);
- struct fnic *fnic = lport_priv(lp);
- struct fc_host_statistics *stats = &lp->host_stats;
+ struct fnic *fnic = *((struct fnic **) shost_priv(host));
+ struct fc_host_statistics *stats = &fnic->fnic_stats.host_stats;
struct vnic_stats *vs;
unsigned long flags;
- if (time_before(jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT))
+ if (time_before
+ (jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT))
return stats;
fnic->stats_time = jiffies;
@@ -212,24 +260,22 @@ static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host)
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (ret) {
- FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "fnic: Get vnic stats failed"
- " 0x%x", ret);
+ FNIC_MAIN_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "fnic: Get vnic stats failed: 0x%x", ret);
return stats;
}
vs = fnic->stats;
stats->tx_frames = vs->tx.tx_unicast_frames_ok;
- stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4;
+ stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4;
stats->rx_frames = vs->rx.rx_unicast_frames_ok;
- stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4;
+ stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4;
stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors;
stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop;
stats->invalid_crc_count = vs->rx.rx_crc_errors;
stats->seconds_since_last_reset =
- (jiffies - fnic->stats_reset_time) / HZ;
+ (jiffies - fnic->stats_reset_time) / HZ;
stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000);
stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000);
-
return stats;
}
@@ -310,8 +356,7 @@ void fnic_dump_fchost_stats(struct Scsi_Host *host,
static void fnic_reset_host_stats(struct Scsi_Host *host)
{
int ret;
- struct fc_lport *lp = shost_priv(host);
- struct fnic *fnic = lport_priv(lp);
+ struct fnic *fnic = *((struct fnic **) shost_priv(host));
struct fc_host_statistics *stats;
unsigned long flags;
@@ -324,7 +369,7 @@ static void fnic_reset_host_stats(struct Scsi_Host *host)
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (ret) {
- FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_MAIN_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"fnic: Reset vnic stats failed"
" 0x%x", ret);
return;
@@ -343,25 +388,19 @@ void fnic_log_q_error(struct fnic *fnic)
for (i = 0; i < fnic->raw_wq_count; i++) {
error_status = ioread32(&fnic->wq[i].ctrl->error_status);
if (error_status)
- shost_printk(KERN_ERR, fnic->lport->host,
- "WQ[%d] error_status"
- " %d\n", i, error_status);
+ dev_err(&fnic->pdev->dev, "WQ[%d] error_status %d\n", i, error_status);
}
for (i = 0; i < fnic->rq_count; i++) {
error_status = ioread32(&fnic->rq[i].ctrl->error_status);
if (error_status)
- shost_printk(KERN_ERR, fnic->lport->host,
- "RQ[%d] error_status"
- " %d\n", i, error_status);
+ dev_err(&fnic->pdev->dev, "RQ[%d] error_status %d\n", i, error_status);
}
for (i = 0; i < fnic->wq_copy_count; i++) {
error_status = ioread32(&fnic->hw_copy_wq[i].ctrl->error_status);
if (error_status)
- shost_printk(KERN_ERR, fnic->lport->host,
- "CWQ[%d] error_status"
- " %d\n", i, error_status);
+ dev_err(&fnic->pdev->dev, "CWQ[%d] error_status %d\n", i, error_status);
}
}
@@ -395,8 +434,7 @@ static int fnic_notify_set(struct fnic *fnic)
err = vnic_dev_notify_set(fnic->vdev, fnic->wq_copy_count + fnic->copy_wq_base);
break;
default:
- shost_printk(KERN_ERR, fnic->lport->host,
- "Interrupt mode should be set up"
+ dev_err(&fnic->pdev->dev, "Interrupt mode should be set up"
" before devcmd notify set %d\n",
vnic_dev_get_intr_mode(fnic->vdev));
err = -1;
@@ -415,13 +453,6 @@ static void fnic_notify_timer(struct timer_list *t)
round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
}
-static void fnic_fip_notify_timer(struct timer_list *t)
-{
- struct fnic *fnic = from_timer(fnic, t, fip_timer);
-
- fnic_handle_fip_timer(fnic);
-}
-
static void fnic_notify_timer_start(struct fnic *fnic)
{
switch (vnic_dev_get_intr_mode(fnic->vdev)) {
@@ -521,6 +552,8 @@ static int fnic_cleanup(struct fnic *fnic)
vnic_intr_clean(&fnic->intr[i]);
mempool_destroy(fnic->io_req_pool);
+ mempool_destroy(fnic->frame_pool);
+ mempool_destroy(fnic->frame_elem_pool);
for (i = 0; i < FNIC_SGL_NUM_CACHES; i++)
mempool_destroy(fnic->io_sgl_pool[i]);
@@ -533,25 +566,36 @@ static void fnic_iounmap(struct fnic *fnic)
iounmap(fnic->bar0.vaddr);
}
-/**
- * fnic_get_mac() - get assigned data MAC address for FIP code.
- * @lport: local port.
- */
-static u8 *fnic_get_mac(struct fc_lport *lport)
+static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
{
- struct fnic *fnic = lport_priv(lport);
+ vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
+}
- return fnic->data_src_addr;
+static void fnic_scsi_init(struct fnic *fnic)
+{
+ struct Scsi_Host *host = fnic->host;
+
+ snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
+ host->host_no);
+
+ host->transportt = fnic_fc_transport;
}
-static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
+static void fnic_free_ioreq_tables_mq(struct fnic *fnic)
{
- vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
+ int hwq;
+
+ for (hwq = 0; hwq < fnic->wq_copy_count; hwq++)
+ kfree(fnic->sw_copy_wq[hwq].io_req_table);
}
static int fnic_scsi_drv_init(struct fnic *fnic)
{
- struct Scsi_Host *host = fnic->lport->host;
+ struct Scsi_Host *host = fnic->host;
+ int err;
+ struct pci_dev *pdev = fnic->pdev;
+ struct fnic_iport_s *iport = &fnic->iport;
+ int hwq;
/* Configure maximum outstanding IO reqs*/
if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD)
@@ -562,40 +606,92 @@ static int fnic_scsi_drv_init(struct fnic *fnic)
fnic->fnic_max_tag_id = host->can_queue;
host->max_lun = fnic->config.luns_per_tgt;
host->max_id = FNIC_MAX_FCP_TARGET;
- host->max_cmd_len = FCOE_MAX_CMD_LEN;
+ host->max_cmd_len = FNIC_FCOE_MAX_CMD_LEN;
host->nr_hw_queues = fnic->wq_copy_count;
- shost_printk(KERN_INFO, host,
- "fnic: can_queue: %d max_lun: %llu",
+ dev_info(&fnic->pdev->dev, "fnic: can_queue: %d max_lun: %llu",
host->can_queue, host->max_lun);
- shost_printk(KERN_INFO, host,
- "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d",
+ dev_info(&fnic->pdev->dev, "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d",
host->max_id, host->max_cmd_len, host->nr_hw_queues);
+ for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) {
+ fnic->sw_copy_wq[hwq].ioreq_table_size = fnic->fnic_max_tag_id;
+ fnic->sw_copy_wq[hwq].io_req_table =
+ kzalloc((fnic->sw_copy_wq[hwq].ioreq_table_size + 1) *
+ sizeof(struct fnic_io_req *), GFP_KERNEL);
+
+ if (!fnic->sw_copy_wq[hwq].io_req_table) {
+ fnic_free_ioreq_tables_mq(fnic);
+ return -ENOMEM;
+ }
+ }
+
+ dev_info(&fnic->pdev->dev, "fnic copy wqs: %d, Q0 ioreq table size: %d\n",
+ fnic->wq_copy_count, fnic->sw_copy_wq[0].ioreq_table_size);
+
+ fnic_scsi_init(fnic);
+
+ err = scsi_add_host(fnic->host, &pdev->dev);
+ if (err) {
+ dev_err(&fnic->pdev->dev, "fnic: scsi add host failed: aborting\n");
+ return err;
+ }
+ fc_host_maxframe_size(fnic->host) = iport->max_payload_size;
+ fc_host_dev_loss_tmo(fnic->host) =
+ fnic->config.port_down_timeout / 1000;
+ sprintf(fc_host_symbolic_name(fnic->host),
+ DRV_NAME " v" DRV_VERSION " over %s", fnic->name);
+ fc_host_port_type(fnic->host) = FC_PORTTYPE_NPORT;
+ fc_host_node_name(fnic->host) = iport->wwnn;
+ fc_host_port_name(fnic->host) = iport->wwpn;
+ fc_host_supported_classes(fnic->host) = FC_COS_CLASS3;
+ memset(fc_host_supported_fc4s(fnic->host), 0,
+ sizeof(fc_host_supported_fc4s(fnic->host)));
+ fc_host_supported_fc4s(fnic->host)[2] = 1;
+ fc_host_supported_fc4s(fnic->host)[7] = 1;
+ fc_host_supported_speeds(fnic->host) = 0;
+ fc_host_supported_speeds(fnic->host) |= FC_PORTSPEED_8GBIT;
+
+ dev_info(&fnic->pdev->dev, "shost_data: 0x%p\n", fnic->host->shost_data);
+ if (fnic->host->shost_data != NULL) {
+ if (fnic_tgt_id_binding == 0) {
+ dev_info(&fnic->pdev->dev, "Setting target binding to NONE\n");
+ fc_host_tgtid_bind_type(fnic->host) = FC_TGTID_BIND_NONE;
+ } else {
+ dev_info(&fnic->pdev->dev, "Setting target binding to WWPN\n");
+ fc_host_tgtid_bind_type(fnic->host) = FC_TGTID_BIND_BY_WWPN;
+ }
+ }
+
+ fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
+ if (!fnic->io_req_pool) {
+ scsi_remove_host(fnic->host);
+ return -ENOMEM;
+ }
+
return 0;
}
void fnic_mq_map_queues_cpus(struct Scsi_Host *host)
{
- struct fc_lport *lp = shost_priv(host);
- struct fnic *fnic = lport_priv(lp);
+ struct fnic *fnic = *((struct fnic **) shost_priv(host));
struct pci_dev *l_pdev = fnic->pdev;
int intr_mode = fnic->config.intr_mode;
struct blk_mq_queue_map *qmap = &host->tag_set.map[HCTX_TYPE_DEFAULT];
if (intr_mode == VNIC_DEV_INTR_MODE_MSI || intr_mode == VNIC_DEV_INTR_MODE_INTX) {
- FNIC_MAIN_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_MAIN_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"intr_mode is not msix\n");
return;
}
- FNIC_MAIN_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_MAIN_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"qmap->nr_queues: %d\n", qmap->nr_queues);
if (l_pdev == NULL) {
- FNIC_MAIN_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_MAIN_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"l_pdev is null\n");
return;
}
@@ -605,60 +701,65 @@ void fnic_mq_map_queues_cpus(struct Scsi_Host *host)
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct Scsi_Host *host;
- struct fc_lport *lp;
+ struct Scsi_Host *host = NULL;
struct fnic *fnic;
mempool_t *pool;
+ struct fnic_iport_s *iport;
int err = 0;
int fnic_id = 0;
int i;
unsigned long flags;
- int hwq;
+ char *desc, *subsys_desc;
+ int len;
/*
- * Allocate SCSI Host and set up association between host,
- * local port, and fnic
+ * Allocate fnic
*/
- lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic));
- if (!lp) {
- printk(KERN_ERR PFX "Unable to alloc libfc local port\n");
+ fnic = kzalloc(sizeof(struct fnic), GFP_KERNEL);
+ if (!fnic) {
err = -ENOMEM;
- goto err_out;
+ goto err_out_fnic_alloc;
}
- host = lp->host;
- fnic = lport_priv(lp);
+ iport = &fnic->iport;
fnic_id = ida_alloc(&fnic_ida, GFP_KERNEL);
if (fnic_id < 0) {
- pr_err("Unable to alloc fnic ID\n");
+ dev_err(&pdev->dev, "Unable to alloc fnic ID\n");
err = fnic_id;
goto err_out_ida_alloc;
}
- fnic->lport = lp;
- fnic->ctlr.lp = lp;
- fnic->link_events = 0;
- fnic->pdev = pdev;
-
- snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
- host->host_no);
- host->transportt = fnic_fc_transport;
+ fnic->pdev = pdev;
fnic->fnic_num = fnic_id;
- fnic_stats_debugfs_init(fnic);
+
+ /* Find model name from PCIe subsys ID */
+ if (fnic_get_desc_by_devid(pdev, &desc, &subsys_desc) == 0) {
+ dev_info(&fnic->pdev->dev, "Model: %s\n", subsys_desc);
+
+ /* Update FDMI model */
+ fnic->subsys_desc_len = strlen(subsys_desc);
+ len = ARRAY_SIZE(fnic->subsys_desc);
+ if (fnic->subsys_desc_len > len)
+ fnic->subsys_desc_len = len;
+ memcpy(fnic->subsys_desc, subsys_desc, fnic->subsys_desc_len);
+ dev_info(&fnic->pdev->dev, "FDMI Model: %s\n", fnic->subsys_desc);
+ } else {
+ fnic->subsys_desc_len = 0;
+ dev_info(&fnic->pdev->dev, "Model: %s subsys_id: 0x%04x\n", "Unknown",
+ pdev->subsystem_device);
+ }
err = pci_enable_device(pdev);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Cannot enable PCI device, aborting.\n");
- goto err_out_free_hba;
+ dev_err(&fnic->pdev->dev, "Cannot enable PCI device, aborting.\n");
+ goto err_out_pci_enable_device;
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Cannot enable PCI resources, aborting\n");
- goto err_out_disable_device;
+ dev_err(&fnic->pdev->dev, "Cannot enable PCI resources, aborting\n");
+ goto err_out_pci_request_regions;
}
pci_set_master(pdev);
@@ -671,19 +772,17 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "No usable DMA configuration "
+ dev_err(&fnic->pdev->dev, "No usable DMA configuration "
"aborting\n");
- goto err_out_release_regions;
+ goto err_out_set_dma_mask;
}
}
/* Map vNIC resources from BAR0 */
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "BAR0 not memory-map'able, aborting.\n");
+ dev_err(&fnic->pdev->dev, "BAR0 not memory-map'able, aborting.\n");
err = -ENODEV;
- goto err_out_release_regions;
+ goto err_out_map_bar;
}
fnic->bar0.vaddr = pci_iomap(pdev, 0, 0);
@@ -691,61 +790,79 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
fnic->bar0.len = pci_resource_len(pdev, 0);
if (!fnic->bar0.vaddr) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Cannot memory-map BAR0 res hdr, "
+ dev_err(&fnic->pdev->dev, "Cannot memory-map BAR0 res hdr, "
"aborting.\n");
err = -ENODEV;
- goto err_out_release_regions;
+ goto err_out_fnic_map_bar;
}
fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0);
if (!fnic->vdev) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "vNIC registration failed, "
+ dev_err(&fnic->pdev->dev, "vNIC registration failed, "
"aborting.\n");
err = -ENODEV;
- goto err_out_iounmap;
+ goto err_out_dev_register;
}
err = vnic_dev_cmd_init(fnic->vdev);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "vnic_dev_cmd_init() returns %d, aborting\n",
+ dev_err(&fnic->pdev->dev, "vnic_dev_cmd_init() returns %d, aborting\n",
err);
- goto err_out_vnic_unregister;
+ goto err_out_dev_cmd_init;
}
err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
vnic_dev_open_done, CMD_OPENF_RQ_ENABLE_THEN_POST);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "vNIC dev open failed, aborting.\n");
- goto err_out_dev_cmd_deinit;
+ dev_err(&fnic->pdev->dev, "vNIC dev open failed, aborting.\n");
+ goto err_out_dev_open;
}
err = vnic_dev_init(fnic->vdev, 0);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "vNIC dev init failed, aborting.\n");
- goto err_out_dev_close;
+ dev_err(&fnic->pdev->dev, "vNIC dev init failed, aborting.\n");
+ goto err_out_dev_init;
}
- err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
+ err = vnic_dev_mac_addr(fnic->vdev, iport->hwmac);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "vNIC get MAC addr failed \n");
- goto err_out_dev_close;
+ dev_err(&fnic->pdev->dev, "vNIC get MAC addr failed\n");
+ goto err_out_dev_mac_addr;
}
/* set data_src for point-to-point mode and to keep it non-zero */
- memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN);
+ memcpy(fnic->data_src_addr, iport->hwmac, ETH_ALEN);
/* Get vNIC configuration */
err = fnic_get_vnic_config(fnic);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Get vNIC configuration failed, "
+ dev_err(&fnic->pdev->dev, "Get vNIC configuration failed, "
"aborting.\n");
- goto err_out_dev_close;
+ goto err_out_fnic_get_config;
+ }
+
+ switch (fnic->config.flags & 0xff0) {
+ case VFCF_FC_INITIATOR:
+ {
+ host =
+ scsi_host_alloc(&fnic_host_template,
+ sizeof(struct fnic *));
+ if (!host) {
+ dev_err(&fnic->pdev->dev, "Unable to allocate scsi host\n");
+ err = -ENOMEM;
+ goto err_out_scsi_host_alloc;
+ }
+ *((struct fnic **) shost_priv(host)) = fnic;
+
+ fnic->host = host;
+ fnic->role = FNIC_ROLE_FCP_INITIATOR;
+ dev_info(&fnic->pdev->dev, "fnic: %d is scsi initiator\n",
+ fnic->fnic_num);
+ }
+ break;
+ default:
+ dev_info(&fnic->pdev->dev, "fnic: %d has no role defined\n", fnic->fnic_num);
+ err = -EINVAL;
+ goto err_out_fnic_role;
}
/* Setup PCI resources */
@@ -755,29 +872,18 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = fnic_set_intr_mode(fnic);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Failed to set intr mode, "
+ dev_err(&fnic->pdev->dev, "Failed to set intr mode, "
"aborting.\n");
- goto err_out_dev_close;
+ goto err_out_fnic_set_intr_mode;
}
err = fnic_alloc_vnic_resources(fnic);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Failed to alloc vNIC resources, "
+ dev_err(&fnic->pdev->dev, "Failed to alloc vNIC resources, "
"aborting.\n");
- goto err_out_clear_intr;
+ goto err_out_fnic_alloc_vnic_res;
}
-
- fnic_scsi_drv_init(fnic);
-
- for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) {
- fnic->sw_copy_wq[hwq].ioreq_table_size = fnic->fnic_max_tag_id;
- fnic->sw_copy_wq[hwq].io_req_table =
- kzalloc((fnic->sw_copy_wq[hwq].ioreq_table_size + 1) *
- sizeof(struct fnic_io_req *), GFP_KERNEL);
- }
- shost_printk(KERN_INFO, fnic->lport->host, "fnic copy wqs: %d, Q0 ioreq table size: %d\n",
+ dev_info(&fnic->pdev->dev, "fnic copy wqs: %d, Q0 ioreq table size: %d\n",
fnic->wq_copy_count, fnic->sw_copy_wq[0].ioreq_table_size);
/* initialize all fnic locks */
@@ -793,50 +899,56 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
fnic->fw_ack_index[i] = -1;
}
- err = -ENOMEM;
- fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
- if (!fnic->io_req_pool)
- goto err_out_free_resources;
-
pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
- if (!pool)
- goto err_out_free_ioreq_pool;
+ if (!pool) {
+ err = -ENOMEM;
+ goto err_out_free_resources;
+ }
fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;
pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
- if (!pool)
+ if (!pool) {
+ err = -ENOMEM;
goto err_out_free_dflt_pool;
+ }
fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;
+ pool = mempool_create_slab_pool(FDLS_MIN_FRAMES, fdls_frame_cache);
+ if (!pool) {
+ err = -ENOMEM;
+ goto err_out_fdls_frame_pool;
+ }
+ fnic->frame_pool = pool;
+
+ pool = mempool_create_slab_pool(FDLS_MIN_FRAME_ELEM,
+ fdls_frame_elem_cache);
+ if (!pool) {
+ err = -ENOMEM;
+ goto err_out_fdls_frame_elem_pool;
+ }
+ fnic->frame_elem_pool = pool;
+
/* setup vlan config, hw inserts vlan header */
fnic->vlan_hw_insert = 1;
fnic->vlan_id = 0;
- /* Initialize the FIP fcoe_ctrl struct */
- fnic->ctlr.send = fnic_eth_send;
- fnic->ctlr.update_mac = fnic_update_mac;
- fnic->ctlr.get_src_addr = fnic_get_mac;
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
- shost_printk(KERN_INFO, fnic->lport->host,
- "firmware supports FIP\n");
+ dev_info(&fnic->pdev->dev, "firmware supports FIP\n");
/* enable directed and multicast */
vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
- vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
- fnic->set_vlan = fnic_set_vlan;
- fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
- timer_setup(&fnic->fip_timer, fnic_fip_notify_timer, 0);
+ vnic_dev_add_addr(fnic->vdev, iport->hwmac);
spin_lock_init(&fnic->vlans_lock);
INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
- INIT_WORK(&fnic->event_work, fnic_handle_event);
- skb_queue_head_init(&fnic->fip_frame_queue);
- INIT_LIST_HEAD(&fnic->evlist);
- INIT_LIST_HEAD(&fnic->vlans);
+ INIT_LIST_HEAD(&fnic->fip_frame_queue);
+ INIT_LIST_HEAD(&fnic->vlan_list);
+ timer_setup(&fnic->retry_fip_timer, fnic_handle_fip_timer, 0);
+ timer_setup(&fnic->fcs_ka_timer, fnic_handle_fcs_ka_timer, 0);
+ timer_setup(&fnic->enode_ka_timer, fnic_handle_enode_ka_timer, 0);
+ timer_setup(&fnic->vn_ka_timer, fnic_handle_vn_ka_timer, 0);
+ fnic->set_vlan = fnic_set_vlan;
} else {
- shost_printk(KERN_INFO, fnic->lport->host,
- "firmware uses non-FIP mode\n");
- fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP);
- fnic->ctlr.state = FIP_ST_NON_FIP;
+ dev_info(&fnic->pdev->dev, "firmware uses non-FIP mode\n");
}
fnic->state = FNIC_IN_FC_MODE;
@@ -849,9 +961,8 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup notification buffer area */
err = fnic_notify_set(fnic);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Failed to alloc notify buffer, aborting.\n");
- goto err_out_free_max_pool;
+ dev_err(&fnic->pdev->dev, "Failed to alloc notify buffer, aborting.\n");
+ goto err_out_fnic_notify_set;
}
/* Setup notify timer when using MSI interrupts */
@@ -862,13 +973,62 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < fnic->rq_count; i++) {
err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "fnic_alloc_rq_frame can't alloc "
+ dev_err(&fnic->pdev->dev, "fnic_alloc_rq_frame can't alloc "
"frame\n");
- goto err_out_rq_buf;
+ goto err_out_alloc_rq_buf;
}
}
+ init_completion(&fnic->reset_completion_wait);
+
+ /* Start local port initialization */
+ iport->max_flogi_retries = fnic->config.flogi_retries;
+ iport->max_plogi_retries = fnic->config.plogi_retries;
+ iport->plogi_timeout = fnic->config.plogi_timeout;
+ iport->service_params =
+ (FNIC_FCP_SP_INITIATOR | FNIC_FCP_SP_RD_XRDY_DIS |
+ FNIC_FCP_SP_CONF_CMPL);
+ if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
+ iport->service_params |= FNIC_FCP_SP_RETRY;
+
+ iport->boot_time = jiffies;
+ iport->e_d_tov = fnic->config.ed_tov;
+ iport->r_a_tov = fnic->config.ra_tov;
+ iport->link_supported_speeds = FNIC_PORTSPEED_10GBIT;
+ iport->wwpn = fnic->config.port_wwn;
+ iport->wwnn = fnic->config.node_wwn;
+
+ iport->max_payload_size = fnic->config.maxdatafieldsize;
+
+ if ((iport->max_payload_size < FNIC_MIN_DATA_FIELD_SIZE) ||
+ (iport->max_payload_size > FNIC_FC_MAX_PAYLOAD_LEN) ||
+ ((iport->max_payload_size % 4) != 0)) {
+ iport->max_payload_size = FNIC_FC_MAX_PAYLOAD_LEN;
+ }
+
+ iport->flags |= FNIC_FIRST_LINK_UP;
+
+ timer_setup(&(iport->fabric.retry_timer), fdls_fabric_timer_callback,
+ 0);
+
+ fnic->stats_reset_time = jiffies;
+
+ INIT_WORK(&fnic->link_work, fnic_handle_link);
+ INIT_WORK(&fnic->frame_work, fnic_handle_frame);
+ INIT_WORK(&fnic->tport_work, fnic_tport_event_handler);
+ INIT_WORK(&fnic->flush_work, fnic_flush_tx);
+
+ INIT_LIST_HEAD(&fnic->frame_queue);
+ INIT_LIST_HEAD(&fnic->tx_queue);
+ INIT_LIST_HEAD(&fnic->tport_event_list);
+
+ INIT_DELAYED_WORK(&iport->oxid_pool.schedule_oxid_free_retry,
+ fdls_schedule_oxid_free_retry_work);
+
+ /* Initialize the oxid reclaim list and work struct */
+ INIT_LIST_HEAD(&iport->oxid_pool.oxid_reclaim_list);
+ INIT_DELAYED_WORK(&iport->oxid_pool.oxid_reclaim_work, fdls_reclaim_oxid_handler);
+
/* Enable all queues */
for (i = 0; i < fnic->raw_wq_count; i++)
vnic_wq_enable(&fnic->wq[i]);
@@ -879,180 +1039,131 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < fnic->wq_copy_count; i++)
vnic_wq_copy_enable(&fnic->hw_copy_wq[i]);
- err = fnic_request_intr(fnic);
- if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Unable to request irq.\n");
- goto err_out_request_intr;
- }
+ vnic_dev_enable(fnic->vdev);
- /*
- * Initialization done with PCI system, hardware, firmware.
- * Add host to SCSI
- */
- err = scsi_add_host(lp->host, &pdev->dev);
+ err = fnic_request_intr(fnic);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "fnic: scsi_add_host failed...exiting\n");
- goto err_out_scsi_add_host;
+ dev_err(&fnic->pdev->dev, "Unable to request irq.\n");
+ goto err_out_fnic_request_intr;
}
+ fnic_notify_timer_start(fnic);
- /* Start local port initiatialization */
-
- lp->link_up = 0;
-
- lp->max_retry_count = fnic->config.flogi_retries;
- lp->max_rport_retry_count = fnic->config.plogi_retries;
- lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
- FCP_SPPF_CONF_COMPL);
- if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
- lp->service_params |= FCP_SPPF_RETRY;
-
- lp->boot_time = jiffies;
- lp->e_d_tov = fnic->config.ed_tov;
- lp->r_a_tov = fnic->config.ra_tov;
- lp->link_supported_speeds = FC_PORTSPEED_10GBIT;
- fc_set_wwnn(lp, fnic->config.node_wwn);
- fc_set_wwpn(lp, fnic->config.port_wwn);
-
- fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0);
-
- if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START,
- FCPIO_HOST_EXCH_RANGE_END, NULL)) {
- err = -ENOMEM;
- goto err_out_fc_exch_mgr_alloc;
- }
-
- fc_lport_init_stats(lp);
- fnic->stats_reset_time = jiffies;
+ fnic_fdls_init(fnic, (fnic->config.flags & VFCF_FIP_CAPABLE));
- fc_lport_config(lp);
+ err = fnic_scsi_drv_init(fnic);
+ if (err)
+ goto err_out_scsi_drv_init;
- if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
- sizeof(struct fc_frame_header))) {
- err = -EINVAL;
- goto err_out_free_exch_mgr;
+ err = fnic_stats_debugfs_init(fnic);
+ if (err) {
+ dev_err(&fnic->pdev->dev, "Failed to initialize debugfs for stats\n");
+ goto err_out_free_stats_debugfs;
}
- fc_host_maxframe_size(lp->host) = lp->mfs;
- fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000;
- sprintf(fc_host_symbolic_name(lp->host),
- DRV_NAME " v" DRV_VERSION " over %s", fnic->name);
+ for (i = 0; i < fnic->intr_count; i++)
+ vnic_intr_unmask(&fnic->intr[i]);
spin_lock_irqsave(&fnic_list_lock, flags);
list_add_tail(&fnic->list, &fnic_list);
spin_unlock_irqrestore(&fnic_list_lock, flags);
- INIT_WORK(&fnic->link_work, fnic_handle_link);
- INIT_WORK(&fnic->frame_work, fnic_handle_frame);
- INIT_WORK(&fnic->flush_work, fnic_flush_tx);
- skb_queue_head_init(&fnic->frame_queue);
- skb_queue_head_init(&fnic->tx_queue);
-
- fc_fabric_login(lp);
-
- vnic_dev_enable(fnic->vdev);
-
- for (i = 0; i < fnic->intr_count; i++)
- vnic_intr_unmask(&fnic->intr[i]);
-
- fnic_notify_timer_start(fnic);
-
return 0;
-err_out_free_exch_mgr:
- fc_exch_mgr_free(lp);
-err_out_fc_exch_mgr_alloc:
- fc_remove_host(lp->host);
- scsi_remove_host(lp->host);
-err_out_scsi_add_host:
+err_out_free_stats_debugfs:
+ fnic_stats_debugfs_remove(fnic);
+ fnic_free_ioreq_tables_mq(fnic);
+ scsi_remove_host(fnic->host);
+err_out_scsi_drv_init:
fnic_free_intr(fnic);
-err_out_request_intr:
- for (i = 0; i < fnic->rq_count; i++)
+err_out_fnic_request_intr:
+err_out_alloc_rq_buf:
+ for (i = 0; i < fnic->rq_count; i++) {
+ if (ioread32(&fnic->rq[i].ctrl->enable))
+ vnic_rq_disable(&fnic->rq[i]);
vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
-err_out_rq_buf:
+ }
vnic_dev_notify_unset(fnic->vdev);
-err_out_free_max_pool:
+err_out_fnic_notify_set:
+ mempool_destroy(fnic->frame_elem_pool);
+err_out_fdls_frame_elem_pool:
+ mempool_destroy(fnic->frame_pool);
+err_out_fdls_frame_pool:
mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]);
err_out_free_dflt_pool:
mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]);
-err_out_free_ioreq_pool:
- mempool_destroy(fnic->io_req_pool);
err_out_free_resources:
- for (hwq = 0; hwq < fnic->wq_copy_count; hwq++)
- kfree(fnic->sw_copy_wq[hwq].io_req_table);
fnic_free_vnic_resources(fnic);
-err_out_clear_intr:
+err_out_fnic_alloc_vnic_res:
fnic_clear_intr_mode(fnic);
-err_out_dev_close:
+err_out_fnic_set_intr_mode:
+ scsi_host_put(fnic->host);
+err_out_fnic_role:
+err_out_scsi_host_alloc:
+err_out_fnic_get_config:
+err_out_dev_mac_addr:
+err_out_dev_init:
vnic_dev_close(fnic->vdev);
-err_out_dev_cmd_deinit:
-err_out_vnic_unregister:
+err_out_dev_open:
+err_out_dev_cmd_init:
vnic_dev_unregister(fnic->vdev);
-err_out_iounmap:
+err_out_dev_register:
fnic_iounmap(fnic);
-err_out_release_regions:
+err_out_fnic_map_bar:
+err_out_map_bar:
+err_out_set_dma_mask:
pci_release_regions(pdev);
-err_out_disable_device:
+err_out_pci_request_regions:
pci_disable_device(pdev);
-err_out_free_hba:
- fnic_stats_debugfs_remove(fnic);
+err_out_pci_enable_device:
ida_free(&fnic_ida, fnic->fnic_num);
err_out_ida_alloc:
- scsi_host_put(lp->host);
-err_out:
+ kfree(fnic);
+err_out_fnic_alloc:
return err;
}
static void fnic_remove(struct pci_dev *pdev)
{
struct fnic *fnic = pci_get_drvdata(pdev);
- struct fc_lport *lp = fnic->lport;
unsigned long flags;
- int hwq;
/*
- * Mark state so that the workqueue thread stops forwarding
- * received frames and link events to the local port. ISR and
- * other threads that can queue work items will also stop
- * creating work items on the fnic workqueue
+ * Sometimes when probe() fails and do not exit with an error code,
+ * remove() gets called with 'drvdata' not set. Avoid a crash by
+ * adding a defensive check.
*/
+ if (!fnic)
+ return;
+
spin_lock_irqsave(&fnic->fnic_lock, flags);
fnic->stop_rx_link_events = 1;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
- del_timer_sync(&fnic->notify_timer);
-
/*
* Flush the fnic event queue. After this call, there should
* be no event queued for this fnic device in the workqueue
*/
flush_workqueue(fnic_event_queue);
- skb_queue_purge(&fnic->frame_queue);
- skb_queue_purge(&fnic->tx_queue);
+
+ fnic_scsi_unload(fnic);
+
+ if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
+ del_timer_sync(&fnic->notify_timer);
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
- del_timer_sync(&fnic->fip_timer);
- skb_queue_purge(&fnic->fip_frame_queue);
+ del_timer_sync(&fnic->retry_fip_timer);
+ del_timer_sync(&fnic->fcs_ka_timer);
+ del_timer_sync(&fnic->enode_ka_timer);
+ del_timer_sync(&fnic->vn_ka_timer);
+
+ fnic_free_txq(&fnic->fip_frame_queue);
fnic_fcoe_reset_vlans(fnic);
- fnic_fcoe_evlist_free(fnic);
}
- /*
- * Log off the fabric. This stops all remote ports, dns port,
- * logs off the fabric. This flushes all rport, disc, lport work
- * before returning
- */
- fc_fabric_logoff(fnic->lport);
-
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- fnic->in_remove = 1;
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ if ((fnic_fdmi_support == 1) && (fnic->iport.fabric.fdmi_pending > 0))
+ del_timer_sync(&fnic->iport.fabric.fdmi_timer);
- fcoe_ctlr_destroy(&fnic->ctlr);
- fc_lport_destroy(lp);
fnic_stats_debugfs_remove(fnic);
/*
@@ -1062,18 +1173,13 @@ static void fnic_remove(struct pci_dev *pdev)
*/
fnic_cleanup(fnic);
- BUG_ON(!skb_queue_empty(&fnic->frame_queue));
- BUG_ON(!skb_queue_empty(&fnic->tx_queue));
-
spin_lock_irqsave(&fnic_list_lock, flags);
list_del(&fnic->list);
spin_unlock_irqrestore(&fnic_list_lock, flags);
- fc_remove_host(fnic->lport->host);
- scsi_remove_host(fnic->lport->host);
- for (hwq = 0; hwq < fnic->wq_copy_count; hwq++)
- kfree(fnic->sw_copy_wq[hwq].io_req_table);
- fc_exch_mgr_free(fnic->lport);
+ fnic_free_txq(&fnic->frame_queue);
+ fnic_free_txq(&fnic->tx_queue);
+
vnic_dev_notify_unset(fnic->vdev);
fnic_free_intr(fnic);
fnic_free_vnic_resources(fnic);
@@ -1083,8 +1189,11 @@ static void fnic_remove(struct pci_dev *pdev)
fnic_iounmap(fnic);
pci_release_regions(pdev);
pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
ida_free(&fnic_ida, fnic->fnic_num);
- scsi_host_put(lp->host);
+ fnic_scsi_unload_cleanup(fnic);
+ scsi_host_put(fnic->host);
+ kfree(fnic);
}
static struct pci_driver fnic_driver = {
@@ -1160,6 +1269,24 @@ static int __init fnic_init_module(void)
goto err_create_fnic_ioreq_slab;
}
+ fdls_frame_cache = kmem_cache_create("fdls_frames",
+ FNIC_FCOE_FRAME_MAXSZ,
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!fdls_frame_cache) {
+ pr_err("fnic fdls frame cache create failed\n");
+ err = -ENOMEM;
+ goto err_create_fdls_frame_cache;
+ }
+
+ fdls_frame_elem_cache = kmem_cache_create("fdls_frame_elem",
+ sizeof(struct fnic_frame_list),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!fdls_frame_elem_cache) {
+ pr_err("fnic fdls frame elem cache create failed\n");
+ err = -ENOMEM;
+ goto err_create_fdls_frame_cache_elem;
+ }
+
fnic_event_queue =
alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fnic_event_wq");
if (!fnic_event_queue) {
@@ -1176,6 +1303,19 @@ static int __init fnic_init_module(void)
goto err_create_fip_workq;
}
+ if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON) {
+ reset_fnic_work_queue =
+ create_singlethread_workqueue("reset_fnic_work_queue");
+ if (!reset_fnic_work_queue) {
+ pr_err("reset fnic work queue create failed\n");
+ err = -ENOMEM;
+ goto err_create_reset_fnic_workq;
+ }
+ spin_lock_init(&reset_fnic_list_lock);
+ INIT_LIST_HEAD(&reset_fnic_list);
+ INIT_WORK(&reset_fnic_work, fnic_reset_work_handler);
+ }
+
fnic_fc_transport = fc_attach_transport(&fnic_fc_functions);
if (!fnic_fc_transport) {
printk(KERN_ERR PFX "fc_attach_transport error\n");
@@ -1196,8 +1336,15 @@ err_pci_register:
err_fc_transport:
destroy_workqueue(fnic_fip_queue);
err_create_fip_workq:
+ if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON)
+ destroy_workqueue(reset_fnic_work_queue);
+err_create_reset_fnic_workq:
destroy_workqueue(fnic_event_queue);
err_create_fnic_workq:
+ kmem_cache_destroy(fdls_frame_elem_cache);
+err_create_fdls_frame_cache_elem:
+ kmem_cache_destroy(fdls_frame_cache);
+err_create_fdls_frame_cache:
kmem_cache_destroy(fnic_io_req_cache);
err_create_fnic_ioreq_slab:
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
@@ -1214,11 +1361,18 @@ static void __exit fnic_cleanup_module(void)
{
pci_unregister_driver(&fnic_driver);
destroy_workqueue(fnic_event_queue);
- if (fnic_fip_queue)
+
+ if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON)
+ destroy_workqueue(reset_fnic_work_queue);
+
+ if (fnic_fip_queue) {
+ flush_workqueue(fnic_fip_queue);
destroy_workqueue(fnic_fip_queue);
+ }
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
kmem_cache_destroy(fnic_io_req_cache);
+ kmem_cache_destroy(fdls_frame_cache);
fc_release_transport(fnic_fc_transport);
fnic_trace_free();
fnic_fc_trace_free();
diff --git a/drivers/scsi/fnic/fnic_pci_subsys_devid.c b/drivers/scsi/fnic/fnic_pci_subsys_devid.c
new file mode 100644
index 000000000000..36a2c1268422
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_pci_subsys_devid.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <linux/if_ether.h>
+#include "fnic.h"
+
+static struct fnic_pcie_device fnic_pcie_device_table[] = {
+ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_VASONA,
+ "VIC 1280"},
+ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_COTATI,
+ "VIC 1240"},
+ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno",
+ PCI_SUBDEVICE_ID_CISCO_LEXINGTON, "VIC 1225"},
+ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_ICEHOUSE,
+ "VIC 1285"},
+ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno",
+ PCI_SUBDEVICE_ID_CISCO_KIRKWOODLAKE, "VIC 1225T"},
+ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno",
+ PCI_SUBDEVICE_ID_CISCO_SUSANVILLE, "VIC 1227"},
+ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_TORRANCE,
+ "VIC 1227T"},
+
+ {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CALISTOGA,
+ "VIC 1340"},
+ {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTAINVIEW,
+ "VIC 1380"},
+ {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN,
+ "C3260-SIOC"},
+ {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CLEARLAKE,
+ "VIC 1385"},
+ {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN2,
+ "C3260-SIOC"},
+ {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CLAREMONT,
+ "VIC 1387"},
+
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BRADBURY,
+ "VIC 1457"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
+ PCI_SUBDEVICE_ID_CISCO_BRENTWOOD, "VIC 1455"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
+ PCI_SUBDEVICE_ID_CISCO_BURLINGAME, "VIC 1487"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BAYSIDE,
+ "VIC 1485"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
+ PCI_SUBDEVICE_ID_CISCO_BAKERSFIELD, "VIC 1440"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
+ PCI_SUBDEVICE_ID_CISCO_BOONVILLE, "VIC 1480"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BENICIA,
+ "VIC 1495"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BEAUMONT,
+ "VIC 1497"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BRISBANE,
+ "VIC 1467"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BENTON,
+ "VIC 1477"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
+ PCI_SUBDEVICE_ID_CISCO_TWIN_RIVER, "VIC 14425"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
+ PCI_SUBDEVICE_ID_CISCO_TWIN_PEAK, "VIC 14825"},
+
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_BERN,
+ "VIC 15420"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly",
+ PCI_SUBDEVICE_ID_CISCO_STOCKHOLM, "VIC 15428"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_KRAKOW,
+ "VIC 15411"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly",
+ PCI_SUBDEVICE_ID_CISCO_LUCERNE, "VIC 15231"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_TURKU,
+ "VIC 15238"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_GENEVA,
+ "VIC 15422"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly",
+ PCI_SUBDEVICE_ID_CISCO_HELSINKI, "VIC 15235"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly",
+ PCI_SUBDEVICE_ID_CISCO_GOTHENBURG, "VIC 15425"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly",
+ PCI_SUBDEVICE_ID_CISCO_TURKU_PLUS, "VIC 15237"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_ZURICH,
+ "VIC 15230"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_RIGA,
+ "VIC 15427"},
+
+ {0,}
+};
+
+int fnic_get_desc_by_devid(struct pci_dev *pdev, char **desc,
+ char **subsys_desc)
+{
+ unsigned short device = PCI_DEVICE_ID_CISCO_VIC_FC;
+ int max = ARRAY_SIZE(fnic_pcie_device_table);
+ struct fnic_pcie_device *t = fnic_pcie_device_table;
+ int index = 0;
+
+ if (pdev->device != device)
+ return 1;
+
+ while (t->device != 0) {
+ if (memcmp
+ ((char *) &pdev->subsystem_device,
+ (char *) &t->subsystem_device, sizeof(short)) == 0)
+ break;
+ t++;
+ index++;
+ }
+
+ if (index >= max - 1) {
+ *desc = NULL;
+ *subsys_desc = NULL;
+ return 1;
+ }
+
+ *desc = fnic_pcie_device_table[index].desc;
+ *subsys_desc = fnic_pcie_device_table[index].subsys_desc;
+ return 0;
+}
diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c
index 33dd27f6f24e..763475587b7f 100644
--- a/drivers/scsi/fnic/fnic_res.c
+++ b/drivers/scsi/fnic/fnic_res.c
@@ -30,9 +30,7 @@ int fnic_get_vnic_config(struct fnic *fnic)
offsetof(struct vnic_fc_config, m), \
sizeof(c->m), &c->m); \
if (err) { \
- shost_printk(KERN_ERR, fnic->lport->host, \
- "Error getting %s, %d\n", #m, \
- err); \
+ dev_err(&fnic->pdev->dev, "Error getting %s, %d\n", #m, err); \
return err; \
} \
} while (0);
@@ -60,6 +58,11 @@ int fnic_get_vnic_config(struct fnic *fnic)
GET_CONFIG(intr_mode);
GET_CONFIG(wq_copy_count);
+ if ((c->flags & (VFCF_FC_INITIATOR)) == 0) {
+ dev_info(&fnic->pdev->dev, "vNIC role not defined (def role: FC Init)\n");
+ c->flags |= VFCF_FC_INITIATOR;
+ }
+
c->wq_enet_desc_count =
min_t(u32, VNIC_FNIC_WQ_DESCS_MAX,
max_t(u32, VNIC_FNIC_WQ_DESCS_MIN,
@@ -139,40 +142,28 @@ int fnic_get_vnic_config(struct fnic *fnic)
c->wq_copy_count = min_t(u16, FNIC_WQ_COPY_MAX, c->wq_copy_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC MAC addr %pM "
- "wq/wq_copy/rq %d/%d/%d\n",
- fnic->ctlr.ctl_src_addr,
+ dev_info(&fnic->pdev->dev, "fNIC MAC addr %p wq/wq_copy/rq %d/%d/%d\n",
+ fnic->data_src_addr,
c->wq_enet_desc_count, c->wq_copy_desc_count,
c->rq_desc_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC node wwn %llx port wwn %llx\n",
+ dev_info(&fnic->pdev->dev, "fNIC node wwn 0x%llx port wwn 0x%llx\n",
c->node_wwn, c->port_wwn);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC ed_tov %d ra_tov %d\n",
+ dev_info(&fnic->pdev->dev, "fNIC ed_tov %d ra_tov %d\n",
c->ed_tov, c->ra_tov);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC mtu %d intr timer %d\n",
+ dev_info(&fnic->pdev->dev, "fNIC mtu %d intr timer %d\n",
c->maxdatafieldsize, c->intr_timer);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC flags 0x%x luns per tgt %d\n",
+ dev_info(&fnic->pdev->dev, "fNIC flags 0x%x luns per tgt %d\n",
c->flags, c->luns_per_tgt);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC flogi_retries %d flogi timeout %d\n",
+ dev_info(&fnic->pdev->dev, "fNIC flogi_retries %d flogi timeout %d\n",
c->flogi_retries, c->flogi_timeout);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC plogi retries %d plogi timeout %d\n",
+ dev_info(&fnic->pdev->dev, "fNIC plogi retries %d plogi timeout %d\n",
c->plogi_retries, c->plogi_timeout);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC io throttle count %d link dn timeout %d\n",
+ dev_info(&fnic->pdev->dev, "fNIC io throttle count %d link dn timeout %d\n",
c->io_throttle_count, c->link_down_timeout);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC port dn io retries %d port dn timeout %d\n",
+ dev_info(&fnic->pdev->dev, "fNIC port dn io retries %d port dn timeout %d\n",
c->port_down_io_retries, c->port_down_timeout);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC wq_copy_count: %d\n", c->wq_copy_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC intr mode: %d\n", c->intr_mode);
+ dev_info(&fnic->pdev->dev, "fNIC wq_copy_count: %d\n", c->wq_copy_count);
+ dev_info(&fnic->pdev->dev, "fNIC intr mode: %d\n", c->intr_mode);
return 0;
}
@@ -206,18 +197,12 @@ void fnic_get_res_counts(struct fnic *fnic)
fnic->intr_count = vnic_dev_get_res_count(fnic->vdev,
RES_TYPE_INTR_CTRL);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC fw resources wq_count: %d\n", fnic->wq_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC fw resources raw_wq_count: %d\n", fnic->raw_wq_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC fw resources wq_copy_count: %d\n", fnic->wq_copy_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC fw resources rq_count: %d\n", fnic->rq_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC fw resources cq_count: %d\n", fnic->cq_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC fw resources intr_count: %d\n", fnic->intr_count);
+ dev_info(&fnic->pdev->dev, "vNIC fw resources wq_count: %d\n", fnic->wq_count);
+ dev_info(&fnic->pdev->dev, "vNIC fw resources raw_wq_count: %d\n", fnic->raw_wq_count);
+ dev_info(&fnic->pdev->dev, "vNIC fw resources wq_copy_count: %d\n", fnic->wq_copy_count);
+ dev_info(&fnic->pdev->dev, "vNIC fw resources rq_count: %d\n", fnic->rq_count);
+ dev_info(&fnic->pdev->dev, "vNIC fw resources cq_count: %d\n", fnic->cq_count);
+ dev_info(&fnic->pdev->dev, "vNIC fw resources intr_count: %d\n", fnic->intr_count);
}
void fnic_free_vnic_resources(struct fnic *fnic)
@@ -253,19 +238,17 @@ int fnic_alloc_vnic_resources(struct fnic *fnic)
intr_mode = vnic_dev_get_intr_mode(fnic->vdev);
- shost_printk(KERN_INFO, fnic->lport->host, "vNIC interrupt mode: %s\n",
+ dev_info(&fnic->pdev->dev, "vNIC interrupt mode: %s\n",
intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
intr_mode == VNIC_DEV_INTR_MODE_MSIX ?
"MSI-X" : "unknown");
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC resources avail: wq %d cp_wq %d raw_wq %d rq %d",
+ dev_info(&fnic->pdev->dev, "res avail: wq %d cp_wq %d raw_wq %d rq %d",
fnic->wq_count, fnic->wq_copy_count,
fnic->raw_wq_count, fnic->rq_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC resources avail: cq %d intr %d cpy-wq desc count %d\n",
+ dev_info(&fnic->pdev->dev, "res avail: cq %d intr %d cpy-wq desc count %d\n",
fnic->cq_count, fnic->intr_count,
fnic->config.wq_copy_desc_count);
@@ -340,8 +323,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic)
RES_TYPE_INTR_PBA_LEGACY, 0);
if (!fnic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Failed to hook legacy pba resource\n");
+ dev_err(&fnic->pdev->dev, "Failed to hook legacy pba resource\n");
err = -ENODEV;
goto err_out_cleanup;
}
@@ -444,8 +426,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic)
/* init the stats memory by making the first call here */
err = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "vnic_dev_stats_dump failed - x%x\n", err);
+ dev_err(&fnic->pdev->dev, "vnic_dev_stats_dump failed - x%x\n", err);
goto err_out_cleanup;
}
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 2ba61dba4569..7133b254cbe4 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -23,11 +23,13 @@
#include <scsi/scsi_tcq.h>
#include <scsi/fc/fc_els.h>
#include <scsi/fc/fc_fcoe.h>
-#include <scsi/libfc.h>
#include <scsi/fc_frame.h>
+#include <scsi/scsi_transport_fc.h>
#include "fnic_io.h"
#include "fnic.h"
+static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
+
const char *fnic_state_str[] = {
[FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE",
[FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
@@ -65,6 +67,18 @@ static const char *fcpio_status_str[] = {
[FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
};
+enum terminate_io_return {
+ TERM_SUCCESS = 0,
+ TERM_NO_SC = 1,
+ TERM_IO_REQ_NOT_FOUND,
+ TERM_ANOTHER_PORT,
+ TERM_GSTATE,
+ TERM_IO_BLOCKED,
+ TERM_OUT_OF_WQ_DESC,
+ TERM_TIMED_OUT,
+ TERM_MISC,
+};
+
const char *fnic_state_to_str(unsigned int state)
{
if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
@@ -90,8 +104,6 @@ static const char *fnic_fcpio_status_to_str(unsigned int status)
return fcpio_status_str[status];
}
-static void fnic_cleanup_io(struct fnic *fnic);
-
/*
* Unmap the data buffer and sense buffer for an io_req,
* also unmap and free the device-private scatter/gather list.
@@ -114,6 +126,65 @@ static void fnic_release_ioreq_buf(struct fnic *fnic,
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
}
+static bool
+fnic_count_portid_ioreqs_iter(struct fnic *fnic, struct scsi_cmnd *sc,
+ void *data1, void *data2)
+{
+ u32 *portid = data1;
+ unsigned int *count = data2;
+ struct fnic_io_req *io_req = fnic_priv(sc)->io_req;
+
+ if (!io_req || (*portid && (io_req->port_id != *portid)))
+ return true;
+
+ *count += 1;
+ return true;
+}
+
+unsigned int fnic_count_ioreqs(struct fnic *fnic, u32 portid)
+{
+ unsigned int count = 0;
+
+ fnic_scsi_io_iter(fnic, fnic_count_portid_ioreqs_iter,
+ &portid, &count);
+
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "portid = 0x%x count = %u\n", portid, count);
+ return count;
+}
+
+unsigned int fnic_count_all_ioreqs(struct fnic *fnic)
+{
+ return fnic_count_ioreqs(fnic, 0);
+}
+
+static bool
+fnic_count_lun_ioreqs_iter(struct fnic *fnic, struct scsi_cmnd *sc,
+ void *data1, void *data2)
+{
+ struct scsi_device *scsi_device = data1;
+ unsigned int *count = data2;
+
+ if (sc->device != scsi_device || !fnic_priv(sc)->io_req)
+ return true;
+
+ *count += 1;
+ return true;
+}
+
+unsigned int
+fnic_count_lun_ioreqs(struct fnic *fnic, struct scsi_device *scsi_device)
+{
+ unsigned int count = 0;
+
+ fnic_scsi_io_iter(fnic, fnic_count_lun_ioreqs_iter,
+ scsi_device, &count);
+
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "lun = %p count = %u\n", scsi_device, count);
+ return count;
+}
+
/* Free up Copy Wq descriptors. Called with copy_wq lock held */
static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq, unsigned int hwq)
{
@@ -179,12 +250,11 @@ int fnic_fw_reset_handler(struct fnic *fnic)
struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0];
int ret = 0;
unsigned long flags;
+ unsigned int ioreq_count;
/* indicate fwreset to io path */
fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
-
- skb_queue_purge(&fnic->frame_queue);
- skb_queue_purge(&fnic->tx_queue);
+ ioreq_count = fnic_count_all_ioreqs(fnic);
/* wait for io cmpl */
while (atomic_read(&fnic->in_flight))
@@ -198,6 +268,8 @@ int fnic_fw_reset_handler(struct fnic *fnic)
if (!vnic_wq_copy_desc_avail(wq))
ret = -EAGAIN;
else {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ioreq_count: %u\n", ioreq_count);
fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
@@ -211,11 +283,11 @@ int fnic_fw_reset_handler(struct fnic *fnic)
if (!ret) {
atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"Issued fw reset\n");
} else {
fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Failed to issue fw reset\n");
}
@@ -231,10 +303,10 @@ int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
{
struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0];
enum fcpio_flogi_reg_format_type format;
- struct fc_lport *lp = fnic->lport;
u8 gw_mac[ETH_ALEN];
int ret = 0;
unsigned long flags;
+ struct fnic_iport_s *iport = &fnic->iport;
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
@@ -246,28 +318,23 @@ int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
goto flogi_reg_ioreq_end;
}
- if (fnic->ctlr.map_dest) {
- eth_broadcast_addr(gw_mac);
- format = FCPIO_FLOGI_REG_DEF_DEST;
- } else {
- memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
- format = FCPIO_FLOGI_REG_GW_DEST;
- }
+ memcpy(gw_mac, fnic->iport.fcfmac, ETH_ALEN);
+ format = FCPIO_FLOGI_REG_GW_DEST;
- if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
+ if (fnic->config.flags & VFCF_FIP_CAPABLE) {
fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
fc_id, gw_mac,
- fnic->data_src_addr,
- lp->r_a_tov, lp->e_d_tov);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
- fc_id, fnic->data_src_addr, gw_mac);
+ fnic->iport.fpma,
+ iport->r_a_tov, iport->e_d_tov);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI FIP reg issued fcid: 0x%x src %p dest %p\n",
+ fc_id, fnic->iport.fpma, gw_mac);
} else {
fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
format, fc_id, gw_mac);
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "FLOGI reg issued fcid 0x%x map %d dest 0x%p\n",
- fc_id, fnic->ctlr.map_dest, gw_mac);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI reg issued fcid 0x%x dest %p\n",
+ fc_id, gw_mac);
}
atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
@@ -295,13 +362,17 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
{
struct scatterlist *sg;
struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
- struct fc_rport_libfc_priv *rp = rport->dd_data;
struct host_sg_desc *desc;
struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
unsigned int i;
int flags;
u8 exch_flags;
struct scsi_lun fc_lun;
+ struct fnic_tport_s *tport;
+ struct rport_dd_data_s *rdd_data;
+
+ rdd_data = rport->dd_data;
+ tport = rdd_data->tport;
if (sg_count) {
/* For each SGE, create a device desc entry */
@@ -342,7 +413,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
free_wq_copy_descs(fnic, wq, hwq);
if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"fnic_queue_wq_copy_desc failure - no descriptors\n");
atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
return SCSI_MLQUEUE_HOST_BUSY;
@@ -356,7 +427,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
exch_flags = 0;
if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
- (rp->flags & FC_RP_FLAGS_RETRY))
+ (tport->tgt_flags & FDLS_FC_RP_FLAGS_RETRY))
exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
fnic_queue_wq_copy_desc_icmnd_16(wq, mqtag,
@@ -371,8 +442,8 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
sc->cmnd, sc->cmd_len,
scsi_bufflen(sc),
fc_lun.scsi_lun, io_req->port_id,
- rport->maxframe_size, rp->r_a_tov,
- rp->e_d_tov);
+ tport->max_payload_size,
+ tport->r_a_tov, tport->e_d_tov);
atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
@@ -388,10 +459,10 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
struct request *const rq = scsi_cmd_to_rq(sc);
uint32_t mqtag = 0;
void (*done)(struct scsi_cmnd *) = scsi_done;
- struct fc_lport *lp = shost_priv(sc->device->host);
struct fc_rport *rport;
struct fnic_io_req *io_req = NULL;
- struct fnic *fnic = lport_priv(lp);
+ struct fnic *fnic = *((struct fnic **) shost_priv(sc->device->host));
+ struct fnic_iport_s *iport = NULL;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
struct vnic_wq_copy *wq;
int ret = 1;
@@ -400,32 +471,14 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
unsigned long flags = 0;
unsigned long ptr;
int io_lock_acquired = 0;
- struct fc_rport_libfc_priv *rp;
uint16_t hwq = 0;
-
- mqtag = blk_mq_unique_tag(rq);
- spin_lock_irqsave(&fnic->fnic_lock, flags);
-
- if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
- "fnic IO blocked flags: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n",
- fnic->state_flags);
- return SCSI_MLQUEUE_HOST_BUSY;
- }
-
- if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
- "fnic flags: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n",
- fnic->state_flags);
- return SCSI_MLQUEUE_HOST_BUSY;
- }
+ struct fnic_tport_s *tport = NULL;
+ struct rport_dd_data_s *rdd_data;
+ uint16_t lun0_delay = 0;
rport = starget_to_rport(scsi_target(sc->device));
if (!rport) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"returning DID_NO_CONNECT for IO as rport is NULL\n");
sc->result = DID_NO_CONNECT << 16;
done(sc);
@@ -434,50 +487,96 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
ret = fc_remote_port_chkready(rport);
if (ret) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"rport is not ready\n");
- atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
+ atomic64_inc(&fnic_stats->misc_stats.tport_not_ready);
sc->result = ret;
done(sc);
return 0;
}
- rp = rport->dd_data;
- if (!rp || rp->rp_state == RPORT_ST_DELETE) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "rport 0x%x removed, returning DID_NO_CONNECT\n",
- rport->port_id);
+ mqtag = blk_mq_unique_tag(rq);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ iport = &fnic->iport;
- atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
- sc->result = DID_NO_CONNECT<<16;
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "returning DID_NO_CONNECT for IO as iport state: %d\n",
+ iport->state);
+ sc->result = DID_NO_CONNECT << 16;
done(sc);
return 0;
}
- if (rp->rp_state != RPORT_ST_READY) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n",
- rport->port_id, rp->rp_state);
+ /* fc_remote_port_add() may have added the tport to
+ * fc_transport but dd_data not yet set
+ */
+ rdd_data = rport->dd_data;
+ tport = rdd_data->tport;
+ if (!tport || (rdd_data->iport != iport)) {
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "dd_data not yet set in SCSI for rport portid: 0x%x\n",
+ rport->port_id);
+ tport = fnic_find_tport_by_fcid(iport, rport->port_id);
+ if (!tport) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "returning DID_BUS_BUSY for IO as tport not found for: 0x%x\n",
+ rport->port_id);
+ sc->result = DID_BUS_BUSY << 16;
+ done(sc);
+ return 0;
+ }
+
+ /* Re-assign same params as in fnic_fdls_add_tport */
+ rport->maxframe_size = FNIC_FC_MAX_PAYLOAD_LEN;
+ rport->supported_classes =
+ FC_COS_CLASS3 | FC_RPORT_ROLE_FCP_TARGET;
+ /* the dd_data is allocated by fctransport of size dd_fcrport_size */
+ rdd_data = rport->dd_data;
+ rdd_data->tport = tport;
+ rdd_data->iport = iport;
+ tport->rport = rport;
+ tport->flags |= FNIC_FDLS_SCSI_REGISTERED;
+ }
- sc->result = DID_IMM_RETRY << 16;
+ if ((tport->state != FDLS_TGT_STATE_READY)
+ && (tport->state != FDLS_TGT_STATE_ADISC)) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "returning DID_NO_CONNECT for IO as tport state: %d\n",
+ tport->state);
+ sc->result = DID_NO_CONNECT << 16;
done(sc);
return 0;
}
- if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
+ atomic_inc(&fnic->in_flight);
+ atomic_inc(&tport->in_flight);
+
+ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) {
+ atomic_dec(&fnic->in_flight);
+ atomic_dec(&tport->in_flight);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
- "state not ready: %d/link not up: %d Returning HOST_BUSY\n",
- lp->state, lp->link_up);
return SCSI_MLQUEUE_HOST_BUSY;
}
- atomic_inc(&fnic->in_flight);
+ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "fnic flags FW reset: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n",
+ fnic->state_flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ if (!tport->lun0_delay) {
+ lun0_delay = 1;
+ tport->lun0_delay++;
+ }
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
fnic_priv(sc)->state = FNIC_IOREQ_NOT_INITED;
fnic_priv(sc)->flags = FNIC_NO_FLAGS;
@@ -499,6 +598,7 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
goto out;
}
+ io_req->tport = tport;
/* Determine the type of scatter/gather list we need */
io_req->sgl_cnt = sg_count;
io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
@@ -575,6 +675,7 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
mempool_free(io_req, fnic->io_req_pool);
}
atomic_dec(&fnic->in_flight);
+ atomic_dec(&tport->in_flight);
return ret;
} else {
atomic64_inc(&fnic_stats->io_stats.active_ios);
@@ -602,6 +703,14 @@ out:
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
atomic_dec(&fnic->in_flight);
+ atomic_dec(&tport->in_flight);
+
+ if (lun0_delay) {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "LUN0 delay\n");
+ mdelay(LUN0_DELAY_TIME);
+ }
+
return ret;
}
@@ -625,7 +734,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
atomic64_inc(&reset_stats->fw_reset_completions);
/* Clean up all outstanding io requests */
- fnic_cleanup_io(fnic);
+ fnic_cleanup_io(fnic, SCSI_NO_TAG);
atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
@@ -637,44 +746,37 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
/* Check status of reset completion */
if (!hdr_status) {
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"reset cmpl success\n");
/* Ready to send flogi out */
fnic->state = FNIC_IN_ETH_MODE;
} else {
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"reset failed with header status: %s\n",
fnic_fcpio_status_to_str(hdr_status));
- /*
- * Unable to change to eth mode, cannot send out flogi
- * Change state to fc mode, so that subsequent Flogi
- * requests from libFC will cause more attempts to
- * reset the firmware. Free the cached flogi
- */
fnic->state = FNIC_IN_FC_MODE;
atomic64_inc(&reset_stats->fw_reset_failures);
ret = -1;
}
} else {
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Unexpected state while processing reset completion: %s\n",
fnic_state_to_str(fnic->state));
atomic64_inc(&reset_stats->fw_reset_failures);
ret = -1;
}
- /* Thread removing device blocks till firmware reset is complete */
- if (fnic->remove_wait)
- complete(fnic->remove_wait);
+ if (fnic->fw_reset_done)
+ complete(fnic->fw_reset_done);
/*
* If fnic is being removed, or fw reset failed
* free the flogi frame. Else, send it out
*/
- if (fnic->remove_wait || ret) {
+ if (ret) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- skb_queue_purge(&fnic->tx_queue);
+ fnic_free_txq(&fnic->tx_queue);
goto reset_cmpl_handler_end;
}
@@ -710,19 +812,19 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
/* Check flogi registration completion status */
if (!hdr_status) {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "flog reg succeeded\n");
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "FLOGI reg succeeded\n");
fnic->state = FNIC_IN_FC_MODE;
} else {
FNIC_SCSI_DBG(KERN_DEBUG,
- fnic->lport->host, fnic->fnic_num,
- "fnic flogi reg :failed %s\n",
+ fnic->host, fnic->fnic_num,
+ "fnic flogi reg failed: %s\n",
fnic_fcpio_status_to_str(hdr_status));
fnic->state = FNIC_IN_ETH_MODE;
ret = -1;
}
} else {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"Unexpected fnic state %s while"
" processing flogi reg completion\n",
fnic_state_to_str(fnic->state));
@@ -795,7 +897,7 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
spin_unlock_irqrestore(&fnic->wq_copy_lock[wq_index], flags);
FNIC_TRACE(fnic_fcpio_ack_handler,
- fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
+ fnic->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
ox_id_tag[4], ox_id_tag[5]);
}
@@ -833,36 +935,36 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind
hwq = blk_mq_unique_tag_to_hwq(mqtag);
if (hwq != cq_index) {
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
hwq, mqtag, tag, cq_index);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hdr status: %s icmnd completion on the wrong queue\n",
fnic_fcpio_status_to_str(hdr_status));
}
if (tag >= fnic->fnic_max_tag_id) {
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
hwq, mqtag, tag, cq_index);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hdr status: %s Out of range tag\n",
fnic_fcpio_status_to_str(hdr_status));
return;
}
spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
- sc = scsi_host_find_tag(fnic->lport->host, id);
+ sc = scsi_host_find_tag(fnic->host, id);
WARN_ON_ONCE(!sc);
if (!sc) {
atomic64_inc(&fnic_stats->io_stats.sc_null);
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- shost_printk(KERN_ERR, fnic->lport->host,
+ shost_printk(KERN_ERR, fnic->host,
"icmnd_cmpl sc is null - "
"hdr status = %s tag = 0x%x desc = 0x%p\n",
fnic_fcpio_status_to_str(hdr_status), id, desc);
FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
- fnic->lport->host->host_no, id,
+ fnic->host->host_no, id,
((u64)icmnd_cmpl->_resvd0[1] << 16 |
(u64)icmnd_cmpl->_resvd0[0]),
((u64)hdr_status << 16 |
@@ -885,7 +987,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
fnic_priv(sc)->flags |= FNIC_IO_REQ_NULL;
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- shost_printk(KERN_ERR, fnic->lport->host,
+ shost_printk(KERN_ERR, fnic->host,
"icmnd_cmpl io_req is null - "
"hdr status = %s tag = 0x%x sc 0x%p\n",
fnic_fcpio_status_to_str(hdr_status), id, sc);
@@ -912,7 +1014,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind
if(FCPIO_ABORTED == hdr_status)
fnic_priv(sc)->flags |= FNIC_IO_ABORTED;
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"icmnd_cmpl abts pending "
"hdr status = %s tag = 0x%x sc = 0x%p "
"scsi_status = %x residual = %d\n",
@@ -943,6 +1045,9 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind
if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
+
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "xfer_len: %llu", xfer_len);
break;
case FCPIO_TIMEOUT: /* request was timed out */
@@ -1004,7 +1109,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind
if (hdr_status != FCPIO_SUCCESS) {
atomic64_inc(&fnic_stats->io_stats.io_failures);
- shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
+ shost_printk(KERN_ERR, fnic->host, "hdr status = %s\n",
fnic_fcpio_status_to_str(hdr_status));
}
@@ -1024,13 +1129,13 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind
desc, cmd_trace, fnic_flags_and_state(sc));
if (sc->sc_data_direction == DMA_FROM_DEVICE) {
- fnic->lport->host_stats.fcp_input_requests++;
+ fnic_stats->host_stats.fcp_input_requests++;
fnic->fcp_input_bytes += xfer_len;
} else if (sc->sc_data_direction == DMA_TO_DEVICE) {
- fnic->lport->host_stats.fcp_output_requests++;
+ fnic_stats->host_stats.fcp_output_requests++;
fnic->fcp_output_bytes += xfer_len;
} else
- fnic->lport->host_stats.fcp_control_requests++;
+ fnic_stats->host_stats.fcp_control_requests++;
/* Call SCSI completion function to complete the IO */
scsi_done(sc);
@@ -1097,27 +1202,27 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
hwq = blk_mq_unique_tag_to_hwq(id & FNIC_TAG_MASK);
if (hwq != cq_index) {
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
hwq, mqtag, tag, cq_index);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hdr status: %s ITMF completion on the wrong queue\n",
fnic_fcpio_status_to_str(hdr_status));
}
if (tag > fnic->fnic_max_tag_id) {
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
hwq, mqtag, tag, cq_index);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hdr status: %s Tag out of range\n",
fnic_fcpio_status_to_str(hdr_status));
return;
} else if ((tag == fnic->fnic_max_tag_id) && !(id & FNIC_TAG_DEV_RST)) {
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
hwq, mqtag, tag, cq_index);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hdr status: %s Tag out of range\n",
fnic_fcpio_status_to_str(hdr_status));
return;
@@ -1133,14 +1238,14 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
if (io_req)
sc = io_req->sc;
} else {
- sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
+ sc = scsi_host_find_tag(fnic->host, id & FNIC_TAG_MASK);
}
WARN_ON_ONCE(!sc);
if (!sc) {
atomic64_inc(&fnic_stats->io_stats.sc_null);
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- shost_printk(KERN_ERR, fnic->lport->host,
+ shost_printk(KERN_ERR, fnic->host,
"itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
fnic_fcpio_status_to_str(hdr_status), tag);
return;
@@ -1152,7 +1257,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
- shost_printk(KERN_ERR, fnic->lport->host,
+ shost_printk(KERN_ERR, fnic->host,
"itmf_cmpl io_req is null - "
"hdr status = %s tag = 0x%x sc 0x%p\n",
fnic_fcpio_status_to_str(hdr_status), tag, sc);
@@ -1163,7 +1268,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
/* Abort and terminate completion of device reset req */
/* REVISIT : Add asserts about various flags */
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Abt/term completion received\n",
hwq, mqtag, tag,
fnic_fcpio_status_to_str(hdr_status));
@@ -1175,7 +1280,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
} else if (id & FNIC_TAG_ABORT) {
/* Completion of abort cmd */
- shost_printk(KERN_DEBUG, fnic->lport->host,
+ shost_printk(KERN_DEBUG, fnic->host,
"hwq: %d mqtag: 0x%x tag: 0x%x Abort header status: %s\n",
hwq, mqtag, tag,
fnic_fcpio_status_to_str(hdr_status));
@@ -1190,7 +1295,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
&term_stats->terminate_fw_timeouts);
break;
case FCPIO_ITMF_REJECTED:
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"abort reject recd. id %d\n",
(int)(id & FNIC_TAG_MASK));
break;
@@ -1225,7 +1330,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"abts cmpl recd. id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
fnic_fcpio_status_to_str(hdr_status));
@@ -1238,11 +1343,11 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
if (io_req->abts_done) {
complete(io_req->abts_done);
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- shost_printk(KERN_INFO, fnic->lport->host,
+ shost_printk(KERN_INFO, fnic->host,
"hwq: %d mqtag: 0x%x tag: 0x%x Waking up abort thread\n",
hwq, mqtag, tag);
} else {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Completing IO\n",
hwq, mqtag,
tag, fnic_fcpio_status_to_str(hdr_status));
@@ -1273,7 +1378,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
}
} else if (id & FNIC_TAG_DEV_RST) {
/* Completion of device reset */
- shost_printk(KERN_INFO, fnic->lport->host,
+ shost_printk(KERN_INFO, fnic->host,
"hwq: %d mqtag: 0x%x tag: 0x%x DR hst: %s\n",
hwq, mqtag,
tag, fnic_fcpio_status_to_str(hdr_status));
@@ -1285,7 +1390,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
sc->device->host->host_no, id, sc,
jiffies_to_msecs(jiffies - start_time),
desc, 0, fnic_flags_and_state(sc));
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Terminate pending\n",
hwq, mqtag,
tag, fnic_fcpio_status_to_str(hdr_status));
@@ -1298,7 +1403,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
sc->device->host->host_no, id, sc,
jiffies_to_msecs(jiffies - start_time),
desc, 0, fnic_flags_and_state(sc));
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"dev reset cmpl recd after time out. "
"id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
@@ -1307,7 +1412,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
}
fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x hst: %s DR completion received\n",
hwq, mqtag,
tag, fnic_fcpio_status_to_str(hdr_status));
@@ -1316,7 +1421,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
} else {
- shost_printk(KERN_ERR, fnic->lport->host,
+ shost_printk(KERN_ERR, fnic->host,
"%s: Unexpected itmf io state: hwq: %d tag 0x%x %s\n",
__func__, hwq, id, fnic_ioreq_state_to_str(fnic_priv(sc)->state));
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
@@ -1371,7 +1476,7 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
break;
default:
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"firmware completion type %d\n",
desc->hdr.type);
break;
@@ -1414,8 +1519,8 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
struct request *const rq = scsi_cmd_to_rq(sc);
struct fnic *fnic = data;
struct fnic_io_req *io_req;
- unsigned long flags = 0;
unsigned long start_time = 0;
+ unsigned long flags;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
uint16_t hwq = 0;
int tag;
@@ -1432,14 +1537,14 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
io_req = fnic_priv(sc)->io_req;
if (!io_req) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x flags: 0x%x No ioreq. Returning\n",
hwq, mqtag, tag, fnic_priv(sc)->flags);
return true;
}
if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
- !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
+ !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
/*
* We will be here only when FW completes reset
* without sending completions for outstanding ios.
@@ -1449,6 +1554,7 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
complete(io_req->dr_done);
else if (io_req && io_req->abts_done)
complete(io_req->abts_done);
+
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
return true;
} else if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
@@ -1458,19 +1564,19 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
fnic_priv(sc)->io_req = NULL;
io_req->sc = NULL;
+ start_time = io_req->start_time;
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
/*
* If there is a scsi_cmnd associated with this io_req, then
* free the corresponding state
*/
- start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
sc->result = DID_TRANSPORT_DISRUPTED << 16;
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
- "mqtag:0x%x tag: 0x%x sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "mqtag: 0x%x tag: 0x%x sc: 0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
mqtag, tag, sc, (jiffies - start_time));
if (atomic64_read(&fnic->io_cmpl_skip))
@@ -1479,23 +1585,60 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
atomic64_inc(&fnic_stats->io_stats.io_completions);
FNIC_TRACE(fnic_cleanup_io,
- sc->device->host->host_no, tag, sc,
- jiffies_to_msecs(jiffies - start_time),
- 0, ((u64)sc->cmnd[0] << 32 |
- (u64)sc->cmnd[2] << 24 |
- (u64)sc->cmnd[3] << 16 |
- (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
- fnic_flags_and_state(sc));
-
+ sc->device->host->host_no, tag, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64) sc->cmnd[0] << 32 |
+ (u64) sc->cmnd[2] << 24 |
+ (u64) sc->cmnd[3] << 16 |
+ (u64) sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64) fnic_priv(sc)->flags << 32) | fnic_priv(sc)->
+ state));
+
+ /* Complete the command to SCSI */
scsi_done(sc);
-
return true;
}
-static void fnic_cleanup_io(struct fnic *fnic)
+static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
{
- scsi_host_busy_iter(fnic->lport->host,
- fnic_cleanup_io_iter, fnic);
+ unsigned int io_count = 0;
+ unsigned long flags;
+ struct fnic_io_req *io_req = NULL;
+ struct scsi_cmnd *sc = NULL;
+
+ io_count = fnic_count_all_ioreqs(fnic);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "Outstanding ioreq count: %d active io count: %lld Waiting\n",
+ io_count,
+ atomic64_read(&fnic->fnic_stats.io_stats.active_ios));
+
+ scsi_host_busy_iter(fnic->host,
+ fnic_cleanup_io_iter, fnic);
+
+ /* with sg3utils device reset, SC needs to be retrieved from ioreq */
+ spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
+ io_req = fnic->sw_copy_wq[0].io_req_table[fnic->fnic_max_tag_id];
+ if (io_req) {
+ sc = io_req->sc;
+ if (sc) {
+ if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
+ && !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
+ if (io_req && io_req->dr_done)
+ complete(io_req->dr_done);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+
+ while ((io_count = fnic_count_all_ioreqs(fnic))) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "Outstanding ioreq count: %d active io count: %lld Waiting\n",
+ io_count,
+ atomic64_read(&fnic->fnic_stats.io_stats.active_ios));
+
+ schedule_timeout(msecs_to_jiffies(100));
+ }
}
void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
@@ -1516,7 +1659,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
if (id >= fnic->fnic_max_tag_id)
return;
- sc = scsi_host_find_tag(fnic->lport->host, id);
+ sc = scsi_host_find_tag(fnic->host, id);
if (!sc)
return;
@@ -1545,7 +1688,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
wq_copy_cleanup_scsi_cmd:
sc->result = DID_NO_CONNECT << 16;
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, "wq_copy_cleanup_handler:"
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "wq_copy_cleanup_handler:"
" DID_NO_CONNECT\n");
FNIC_TRACE(fnic_wq_copy_cleanup_handler,
@@ -1567,10 +1710,13 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
struct vnic_wq_copy *wq = &fnic->hw_copy_wq[hwq];
struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
unsigned long flags;
+ struct fnic_tport_s *tport = io_req->tport;
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (unlikely(fnic_chk_state_flags_locked(fnic,
FNIC_FLAGS_IO_BLOCKED))) {
+ atomic_dec(&fnic->in_flight);
+ atomic_dec(&tport->in_flight);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return 1;
} else
@@ -1585,7 +1731,8 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
if (!vnic_wq_copy_desc_avail(wq)) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
atomic_dec(&fnic->in_flight);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ atomic_dec(&tport->in_flight);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"fnic_queue_abort_io_req: failure: no descriptors\n");
atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
return 1;
@@ -1619,20 +1766,24 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
struct fnic *fnic = iter_data->fnic;
int abt_tag = 0;
struct fnic_io_req *io_req;
- unsigned long flags;
struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
struct scsi_lun fc_lun;
enum fnic_ioreq_state old_ioreq_state;
uint16_t hwq = 0;
+ unsigned long flags;
abt_tag = blk_mq_unique_tag(rq);
hwq = blk_mq_unique_tag_to_hwq(abt_tag);
- spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
+ if (!sc) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "sc is NULL abt_tag: 0x%x hwq: %d\n", abt_tag, hwq);
+ return true;
+ }
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
io_req = fnic_priv(sc)->io_req;
-
if (!io_req || io_req->port_id != iter_data->port_id) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
return true;
@@ -1640,7 +1791,7 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED)) {
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d abt_tag: 0x%x flags: 0x%x Device reset is not pending\n",
hwq, abt_tag, fnic_priv(sc)->flags);
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
@@ -1655,37 +1806,40 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
return true;
}
+
if (io_req->abts_done) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "fnic_rport_exch_reset: io_req->abts_done is set "
- "state is %s\n",
+ shost_printk(KERN_ERR, fnic->host,
+ "fnic_rport_exch_reset: io_req->abts_done is set state is %s\n",
fnic_ioreq_state_to_str(fnic_priv(sc)->state));
}
if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "rport_exch_reset "
- "IO not yet issued %p tag 0x%x flags "
- "%x state %d\n",
- sc, abt_tag, fnic_priv(sc)->flags, fnic_priv(sc)->state);
+ shost_printk(KERN_ERR, fnic->host,
+ "rport_exch_reset IO not yet issued %p abt_tag 0x%x",
+ sc, abt_tag);
+ shost_printk(KERN_ERR, fnic->host,
+ "flags %x state %d\n", fnic_priv(sc)->flags,
+ fnic_priv(sc)->state);
}
old_ioreq_state = fnic_priv(sc)->state;
fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
+
if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
atomic64_inc(&reset_stats->device_reset_terminates);
abt_tag |= FNIC_TAG_DEV_RST;
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "dev reset sc 0x%p\n", sc);
}
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "fnic_rport_exch_reset dev rst sc 0x%p\n", sc);
- BUG_ON(io_req->abts_done);
-
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "fnic_rport_exch_reset: dev rst sc 0x%p\n", sc);
+ WARN_ON_ONCE(io_req->abts_done);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"fnic_rport_reset_exch: Issuing abts\n");
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- /* Now queue the abort command to firmware */
+ /* Queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
if (fnic_queue_abort_io_req(fnic, abt_tag,
@@ -1698,7 +1852,7 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
* lun reset
*/
spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d abt_tag: 0x%x flags: 0x%x Queuing abort failed\n",
hwq, abt_tag, fnic_priv(sc)->flags);
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
@@ -1714,11 +1868,14 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
atomic64_inc(&term_stats->terminates);
iter_data->term_cnt++;
}
+
return true;
}
-static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
+void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
{
+ unsigned int io_count = 0;
+ unsigned long flags;
struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
struct fnic_rport_abort_io_iter_data iter_data = {
.fnic = fnic,
@@ -1726,53 +1883,115 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
.term_cnt = 0,
};
- FNIC_SCSI_DBG(KERN_DEBUG,
- fnic->lport->host, fnic->fnic_num,
- "fnic_rport_exch_reset called portid 0x%06x\n",
- port_id);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "fnic rport exchange reset for tport: 0x%06x\n",
+ port_id);
if (fnic->in_remove)
return;
- scsi_host_busy_iter(fnic->lport->host, fnic_rport_abort_io_iter,
+ io_count = fnic_count_ioreqs(fnic, port_id);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "Starting terminates: rport:0x%x portid-io-count: %d active-io-count: %lld\n",
+ port_id, io_count,
+ atomic64_read(&fnic->fnic_stats.io_stats.active_ios));
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ /* Bump in_flight counter to hold off fnic_fw_reset_handler. */
+ atomic_inc(&fnic->in_flight);
+ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) {
+ atomic_dec(&fnic->in_flight);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ scsi_host_busy_iter(fnic->host, fnic_rport_abort_io_iter,
&iter_data);
+
if (iter_data.term_cnt > atomic64_read(&term_stats->max_terminates))
atomic64_set(&term_stats->max_terminates, iter_data.term_cnt);
+ atomic_dec(&fnic->in_flight);
+
+ while ((io_count = fnic_count_ioreqs(fnic, port_id)))
+ schedule_timeout(msecs_to_jiffies(1000));
+
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "rport: 0x%x remaining portid-io-count: %d ",
+ port_id, io_count);
}
void fnic_terminate_rport_io(struct fc_rport *rport)
{
- struct fc_rport_libfc_priv *rdata;
- struct fc_lport *lport;
- struct fnic *fnic;
+ struct fnic_tport_s *tport;
+ struct rport_dd_data_s *rdd_data;
+ struct fnic_iport_s *iport = NULL;
+ struct fnic *fnic = NULL;
if (!rport) {
- printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
+ pr_err("rport is NULL\n");
return;
}
- rdata = rport->dd_data;
- if (!rdata) {
- printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
- return;
+ rdd_data = rport->dd_data;
+ if (rdd_data) {
+ tport = rdd_data->tport;
+ if (!tport) {
+ pr_err(
+ "term rport io called after tport is deleted. Returning 0x%8x\n",
+ rport->port_id);
+ } else {
+ pr_err(
+ "term rport io called after tport is set 0x%8x\n",
+ rport->port_id);
+ pr_err(
+ "tport maybe rediscovered\n");
+
+ iport = (struct fnic_iport_s *) tport->iport;
+ fnic = iport->fnic;
+ fnic_rport_exch_reset(fnic, rport->port_id);
+ }
}
- lport = rdata->local_port;
+}
- if (!lport) {
- printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
- return;
- }
- fnic = lport_priv(lport);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
- rport->port_name, rport->node_name, rport,
- rport->port_id);
+/*
+ * FCP-SCSI specific handling for module unload
+ *
+ */
+void fnic_scsi_unload(struct fnic *fnic)
+{
+ unsigned long flags;
- if (fnic->in_remove)
- return;
+ /*
+ * Mark state so that the workqueue thread stops forwarding
+ * received frames and link events to the local port. ISR and
+ * other threads that can queue work items will also stop
+ * creating work items on the fnic workqueue
+ */
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ fnic->iport.state = FNIC_IPORT_STATE_LINK_WAIT;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ if (fdls_get_state(&fnic->iport.fabric) != FDLS_STATE_INIT)
+ fnic_scsi_fcpio_reset(fnic);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ fnic->in_remove = 1;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ fnic_flush_tport_event_list(fnic);
+ fnic_delete_fcp_tports(fnic);
+}
- fnic_rport_exch_reset(fnic, rport->port_id);
+void fnic_scsi_unload_cleanup(struct fnic *fnic)
+{
+ int hwq = 0;
+
+ fc_remove_host(fnic->host);
+ scsi_remove_host(fnic->host);
+ for (hwq = 0; hwq < fnic->wq_copy_count; hwq++)
+ kfree(fnic->sw_copy_wq[hwq].io_req_table);
}
/*
@@ -1783,10 +2002,12 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
int fnic_abort_cmd(struct scsi_cmnd *sc)
{
struct request *const rq = scsi_cmd_to_rq(sc);
- struct fc_lport *lp;
+ struct fnic_iport_s *iport;
+ struct fnic_tport_s *tport;
struct fnic *fnic;
struct fnic_io_req *io_req = NULL;
struct fc_rport *rport;
+ struct rport_dd_data_s *rdd_data;
unsigned long flags;
unsigned long start_time = 0;
int ret = SUCCESS;
@@ -1806,11 +2027,11 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
fc_block_scsi_eh(sc);
/* Get local-port, check ready and link up */
- lp = shost_priv(sc->device->host);
-
- fnic = lport_priv(lp);
+ fnic = *((struct fnic **) shost_priv(sc->device->host));
spin_lock_irqsave(&fnic->fnic_lock, flags);
+ iport = &fnic->iport;
+
fnic_stats = &fnic->fnic_stats;
abts_stats = &fnic->fnic_stats.abts_stats;
term_stats = &fnic->fnic_stats.term_stats;
@@ -1821,7 +2042,44 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
fnic_priv(sc)->flags = FNIC_NO_FLAGS;
- if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
+ rdd_data = rport->dd_data;
+ tport = rdd_data->tport;
+
+ if (!tport) {
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Abort cmd called after tport delete! rport fcid: 0x%x",
+ rport->port_id);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "lun: %llu hwq: 0x%x mqtag: 0x%x Op: 0x%x flags: 0x%x\n",
+ sc->device->lun, hwq, mqtag,
+ sc->cmnd[0], fnic_priv(sc)->flags);
+ ret = FAILED;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ goto fnic_abort_cmd_end;
+ }
+
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Abort cmd called rport fcid: 0x%x lun: %llu hwq: 0x%x mqtag: 0x%x",
+ rport->port_id, sc->device->lun, hwq, mqtag);
+
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Op: 0x%x flags: 0x%x\n",
+ sc->cmnd[0],
+ fnic_priv(sc)->flags);
+
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ atomic64_inc(&fnic_stats->misc_stats.iport_not_ready);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport NOT in READY state");
+ ret = FAILED;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ goto fnic_abort_cmd_end;
+ }
+
+ if ((tport->state != FDLS_TGT_STATE_READY) &&
+ (tport->state != FDLS_TGT_STATE_ADISC)) {
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "tport state: %d\n", tport->state);
ret = FAILED;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
goto fnic_abort_cmd_end;
@@ -1843,6 +2101,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
io_req = fnic_priv(sc)->io_req;
if (!io_req) {
+ ret = FAILED;
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
goto fnic_abort_cmd_end;
}
@@ -1870,7 +2129,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
else
atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"CDB Opcode: 0x%02x Abort issued time: %lu msec\n",
sc->cmnd[0], abt_issued_time);
/*
@@ -1893,7 +2152,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
if (fc_remote_port_chkready(rport) == 0)
task_req = FCPIO_ITMF_ABT_TASK;
else {
- atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
+ atomic64_inc(&fnic_stats->misc_stats.tport_not_ready);
task_req = FCPIO_ITMF_ABT_TASK_TERM;
}
@@ -1961,7 +2220,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Issuing host reset due to out of order IO\n");
ret = FAILED;
@@ -2009,7 +2268,7 @@ fnic_abort_cmd_end:
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
fnic_flags_and_state(sc));
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"Returning from abort cmd type %x %s\n", task_req,
(ret == SUCCESS) ?
"SUCCESS" : "FAILED");
@@ -2027,6 +2286,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
unsigned long flags;
uint16_t hwq = 0;
uint32_t tag = 0;
+ struct fnic_tport_s *tport = io_req->tport;
tag = io_req->tag;
hwq = blk_mq_unique_tag_to_hwq(tag);
@@ -2037,8 +2297,10 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
FNIC_FLAGS_IO_BLOCKED))) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return FAILED;
- } else
+ } else {
atomic_inc(&fnic->in_flight);
+ atomic_inc(&tport->in_flight);
+ }
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
@@ -2047,7 +2309,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
free_wq_copy_descs(fnic, wq, hwq);
if (!vnic_wq_copy_desc_avail(wq)) {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"queue_dr_io_req failure - no descriptors\n");
atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
ret = -EAGAIN;
@@ -2072,6 +2334,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
lr_io_req_end:
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
atomic_dec(&fnic->in_flight);
+ atomic_dec(&tport->in_flight);
return ret;
}
@@ -2114,7 +2377,7 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
* Found IO that is still pending with firmware and
* belongs to the LUN that we are resetting
*/
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"Found IO in %s on lun\n",
fnic_ioreq_state_to_str(fnic_priv(sc)->state));
@@ -2124,14 +2387,14 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
}
if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
(!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED))) {
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"dev rst not pending sc 0x%p\n", sc);
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
return true;
}
if (io_req->abts_done)
- shost_printk(KERN_ERR, fnic->lport->host,
+ shost_printk(KERN_ERR, fnic->host,
"%s: io_req->abts_done is set state is %s\n",
__func__, fnic_ioreq_state_to_str(fnic_priv(sc)->state));
old_ioreq_state = fnic_priv(sc)->state;
@@ -2147,7 +2410,7 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
BUG_ON(io_req->abts_done);
if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"dev rst sc 0x%p\n", sc);
}
@@ -2169,7 +2432,7 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
fnic_priv(sc)->state = old_ioreq_state;
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
iter_data->ret = FAILED;
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d abt_tag: 0x%lx Abort could not be queued\n",
hwq, abt_tag);
return false;
@@ -2248,7 +2511,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
iter_data.lr_sc = lr_sc;
- scsi_host_busy_iter(fnic->lport->host,
+ scsi_host_busy_iter(fnic->host,
fnic_pending_aborts_iter, &iter_data);
if (iter_data.ret == FAILED) {
ret = iter_data.ret;
@@ -2261,7 +2524,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
ret = 1;
clean_pending_aborts_end:
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"exit status: %d\n", ret);
return ret;
}
@@ -2274,11 +2537,11 @@ clean_pending_aborts_end:
int fnic_device_reset(struct scsi_cmnd *sc)
{
struct request *rq = scsi_cmd_to_rq(sc);
- struct fc_lport *lp;
struct fnic *fnic;
struct fnic_io_req *io_req = NULL;
struct fc_rport *rport;
int status;
+ int count = 0;
int ret = FAILED;
unsigned long flags;
unsigned long start_time = 0;
@@ -2289,31 +2552,63 @@ int fnic_device_reset(struct scsi_cmnd *sc)
DECLARE_COMPLETION_ONSTACK(tm_done);
bool new_sc = 0;
uint16_t hwq = 0;
+ struct fnic_iport_s *iport = NULL;
+ struct rport_dd_data_s *rdd_data;
+ struct fnic_tport_s *tport;
+ u32 old_soft_reset_count;
+ u32 old_link_down_cnt;
+ int exit_dr = 0;
/* Wait for rport to unblock */
fc_block_scsi_eh(sc);
/* Get local-port, check ready and link up */
- lp = shost_priv(sc->device->host);
+ fnic = *((struct fnic **) shost_priv(sc->device->host));
+ iport = &fnic->iport;
- fnic = lport_priv(lp);
fnic_stats = &fnic->fnic_stats;
- reset_stats = &fnic->fnic_stats.reset_stats;
+ reset_stats = &fnic_stats->reset_stats;
atomic64_inc(&reset_stats->device_resets);
rport = starget_to_rport(scsi_target(sc->device));
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "fcid: 0x%x lun: 0x%llx hwq: %d mqtag: 0x%x flags: 0x%x Device reset\n",
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "fcid: 0x%x lun: %llu hwq: %d mqtag: 0x%x flags: 0x%x Device reset\n",
rport->port_id, sc->device->lun, hwq, mqtag,
fnic_priv(sc)->flags);
- if (lp->state != LPORT_ST_READY || !(lp->link_up))
+ rdd_data = rport->dd_data;
+ tport = rdd_data->tport;
+ if (!tport) {
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Dev rst called after tport delete! rport fcid: 0x%x lun: %llu\n",
+ rport->port_id, sc->device->lun);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ goto fnic_device_reset_end;
+ }
+
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ atomic64_inc(&fnic_stats->misc_stats.iport_not_ready);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport NOT in READY state");
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ goto fnic_device_reset_end;
+ }
+
+ if ((tport->state != FDLS_TGT_STATE_READY) &&
+ (tport->state != FDLS_TGT_STATE_ADISC)) {
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "tport state: %d\n", tport->state);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
goto fnic_device_reset_end;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
/* Check if remote port up */
if (fc_remote_port_chkready(rport)) {
- atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
+ atomic64_inc(&fnic_stats->misc_stats.tport_not_ready);
goto fnic_device_reset_end;
}
@@ -2352,6 +2647,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
io_req->port_id = rport->port_id;
io_req->tag = mqtag;
fnic_priv(sc)->io_req = io_req;
+ io_req->tport = tport;
io_req->sc = sc;
if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL)
@@ -2366,7 +2662,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE;
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, "TAG %x\n", mqtag);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "TAG %x\n", mqtag);
/*
* issue the device reset, if enqueue failed, clean up the ioreq
@@ -2383,6 +2679,11 @@ int fnic_device_reset(struct scsi_cmnd *sc)
fnic_priv(sc)->flags |= FNIC_DEV_RST_ISSUED;
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ old_link_down_cnt = iport->fnic->link_down_cnt;
+ old_soft_reset_count = fnic->soft_reset_count;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
/*
* Wait on the local completion for LUN reset. The io_req may be
* freed while we wait since we hold no lock.
@@ -2390,14 +2691,39 @@ int fnic_device_reset(struct scsi_cmnd *sc)
wait_for_completion_timeout(&tm_done,
msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
+ /*
+ * Wake up can be due to the following reasons:
+ * 1) The device reset completed from target.
+ * 2) Device reset timed out.
+ * 3) A link-down/host_reset may have happened in between.
+ * 4) The device reset was aborted and io_req->dr_done was called.
+ */
+
+ exit_dr = 0;
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if ((old_link_down_cnt != fnic->link_down_cnt) ||
+ (fnic->reset_in_progress) ||
+ (fnic->soft_reset_count != old_soft_reset_count) ||
+ (iport->state != FNIC_IPORT_STATE_READY))
+ exit_dr = 1;
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
io_req = fnic_priv(sc)->io_req;
if (!io_req) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"io_req is null mqtag 0x%x sc 0x%p\n", mqtag, sc);
goto fnic_device_reset_end;
}
+
+ if (exit_dr) {
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Host reset called for fnic. Exit device reset\n");
+ io_req->dr_done = NULL;
+ goto fnic_device_reset_clean;
+ }
io_req->dr_done = NULL;
status = fnic_priv(sc)->lr_status;
@@ -2408,53 +2734,11 @@ int fnic_device_reset(struct scsi_cmnd *sc)
*/
if (status == FCPIO_INVALID_CODE) {
atomic64_inc(&reset_stats->device_reset_timeouts);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"Device reset timed out\n");
fnic_priv(sc)->flags |= FNIC_DEV_RST_TIMED_OUT;
- spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
int_to_scsilun(sc->device->lun, &fc_lun);
- /*
- * Issue abort and terminate on device reset request.
- * If q'ing of terminate fails, retry it after a delay.
- */
- while (1) {
- spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
- if (fnic_priv(sc)->flags & FNIC_DEV_RST_TERM_ISSUED) {
- spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- break;
- }
- spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- if (fnic_queue_abort_io_req(fnic,
- mqtag | FNIC_TAG_DEV_RST,
- FCPIO_ITMF_ABT_TASK_TERM,
- fc_lun.scsi_lun, io_req, hwq)) {
- wait_for_completion_timeout(&tm_done,
- msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
- } else {
- spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
- fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
- fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
- io_req->abts_done = &tm_done;
- spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "Abort and terminate issued on Device reset mqtag 0x%x sc 0x%p\n",
- mqtag, sc);
- break;
- }
- }
- while (1) {
- spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
- if (!(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
- spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- wait_for_completion_timeout(&tm_done,
- msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
- break;
- } else {
- io_req = fnic_priv(sc)->io_req;
- io_req->abts_done = NULL;
- goto fnic_device_reset_clean;
- }
- }
+ goto fnic_device_reset_clean;
} else {
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
}
@@ -2463,7 +2747,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
if (status != FCPIO_SUCCESS) {
spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
FNIC_SCSI_DBG(KERN_DEBUG,
- fnic->lport->host, fnic->fnic_num,
+ fnic->host, fnic->fnic_num,
"Device reset completed - failed\n");
io_req = fnic_priv(sc)->io_req;
goto fnic_device_reset_clean;
@@ -2479,9 +2763,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)
if (fnic_clean_pending_aborts(fnic, sc, new_sc)) {
spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
io_req = fnic_priv(sc)->io_req;
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "Device reset failed"
- " since could not abort all IOs\n");
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "Device reset failed: Cannot abort all IOs\n");
goto fnic_device_reset_clean;
}
@@ -2507,6 +2790,15 @@ fnic_device_reset_clean:
mempool_free(io_req, fnic->io_req_pool);
}
+ /*
+ * If link-event is seen while LUN reset is issued we need
+ * to complete the LUN reset here
+ */
+ if (!new_sc) {
+ sc->result = DID_RESET << 16;
+ scsi_done(sc);
+ }
+
fnic_device_reset_end:
FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, rq->tag, sc,
jiffies_to_msecs(jiffies - start_time),
@@ -2520,7 +2812,18 @@ fnic_device_reset_end:
mutex_unlock(&fnic->sgreset_mutex);
}
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ while ((ret == SUCCESS) && fnic_count_lun_ioreqs(fnic, sc->device)) {
+ if (count >= 2) {
+ ret = FAILED;
+ break;
+ }
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Cannot clean up all IOs for the LUN\n");
+ schedule_timeout(msecs_to_jiffies(1000));
+ count++;
+ }
+
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"Returning from device reset %s\n",
(ret == SUCCESS) ?
"SUCCESS" : "FAILED");
@@ -2531,67 +2834,78 @@ fnic_device_reset_end:
return ret;
}
-/* Clean up all IOs, clean up libFC local port */
-int fnic_reset(struct Scsi_Host *shost)
+static void fnic_post_flogo_linkflap(struct fnic *fnic)
+{
+ unsigned long flags;
+
+ fnic_fdls_link_status_change(fnic, 0);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+
+ if (fnic->link_status) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ fnic_fdls_link_status_change(fnic, 1);
+ return;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
+
+/* Logout from all the targets and simulate link flap */
+void fnic_reset(struct Scsi_Host *shost)
{
- struct fc_lport *lp;
struct fnic *fnic;
- int ret = 0;
struct reset_stats *reset_stats;
- lp = shost_priv(shost);
- fnic = lport_priv(lp);
+ fnic = *((struct fnic **) shost_priv(shost));
reset_stats = &fnic->fnic_stats.reset_stats;
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "Issuing fnic reset\n");
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Issuing fnic reset\n");
atomic64_inc(&reset_stats->fnic_resets);
+ fnic_post_flogo_linkflap(fnic);
- /*
- * Reset local port, this will clean up libFC exchanges,
- * reset remote port sessions, and if link is up, begin flogi
- */
- ret = fc_lport_reset(lp);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Returning from fnic reset");
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "Returning from fnic reset with: %s\n",
- (ret == 0) ? "SUCCESS" : "FAILED");
+ atomic64_inc(&reset_stats->fnic_reset_completions);
+}
- if (ret == 0)
- atomic64_inc(&reset_stats->fnic_reset_completions);
- else
- atomic64_inc(&reset_stats->fnic_reset_failures);
+int fnic_issue_fc_host_lip(struct Scsi_Host *shost)
+{
+ int ret = 0;
+ struct fnic *fnic = *((struct fnic **) shost_priv(shost));
+
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FC host lip issued");
+ ret = fnic_host_reset(shost);
return ret;
}
-/*
- * SCSI Error handling calls driver's eh_host_reset if all prior
- * error handling levels return FAILED. If host reset completes
- * successfully, and if link is up, then Fabric login begins.
- *
- * Host Reset is the highest level of error recovery. If this fails, then
- * host is offlined by SCSI.
- *
- */
-int fnic_host_reset(struct scsi_cmnd *sc)
+int fnic_host_reset(struct Scsi_Host *shost)
{
- int ret;
+ int ret = SUCCESS;
unsigned long wait_host_tmo;
- struct Scsi_Host *shost = sc->device->host;
- struct fc_lport *lp = shost_priv(shost);
- struct fnic *fnic = lport_priv(lp);
+ struct fnic *fnic = *((struct fnic **) shost_priv(shost));
unsigned long flags;
+ struct fnic_iport_s *iport = &fnic->iport;
spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (!fnic->internal_reset_inprogress) {
- fnic->internal_reset_inprogress = true;
+ if (fnic->reset_in_progress == NOT_IN_PROGRESS) {
+ fnic->reset_in_progress = IN_PROGRESS;
} else {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "host reset in progress skipping another host reset\n");
- return SUCCESS;
+ wait_for_completion_timeout(&fnic->reset_completion_wait,
+ msecs_to_jiffies(10000));
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->reset_in_progress == IN_PROGRESS) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_WARNING, fnic->host, fnic->fnic_num,
+ "Firmware reset in progress. Skipping another host reset\n");
+ return SUCCESS;
+ }
+ fnic->reset_in_progress = IN_PROGRESS;
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
@@ -2600,140 +2914,34 @@ int fnic_host_reset(struct scsi_cmnd *sc)
* scsi-ml tries to send a TUR to every device if host reset is
* successful, so before returning to scsi, fabric should be up
*/
- ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;
- if (ret == SUCCESS) {
+ fnic_reset(shost);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ fnic->reset_in_progress = NOT_IN_PROGRESS;
+ complete(&fnic->reset_completion_wait);
+ fnic->soft_reset_count++;
+
+ /* wait till the link is up */
+ if (fnic->link_status) {
wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
ret = FAILED;
while (time_before(jiffies, wait_host_tmo)) {
- if ((lp->state == LPORT_ST_READY) &&
- (lp->link_up)) {
+ if (iport->state != FNIC_IPORT_STATE_READY
+ && fnic->link_status) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ ssleep(1);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ } else {
ret = SUCCESS;
break;
}
- ssleep(1);
}
}
-
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- fnic->internal_reset_inprogress = false;
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return ret;
-}
-
-/*
- * This fxn is called from libFC when host is removed
- */
-void fnic_scsi_abort_io(struct fc_lport *lp)
-{
- int err = 0;
- unsigned long flags;
- enum fnic_state old_state;
- struct fnic *fnic = lport_priv(lp);
- DECLARE_COMPLETION_ONSTACK(remove_wait);
-
- /* Issue firmware reset for fnic, wait for reset to complete */
-retry_fw_reset:
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) &&
- fnic->link_events) {
- /* fw reset is in progress, poll for its completion */
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- schedule_timeout(msecs_to_jiffies(100));
- goto retry_fw_reset;
- }
-
- fnic->remove_wait = &remove_wait;
- old_state = fnic->state;
- fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
- fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- err = fnic_fw_reset_handler(fnic);
- if (err) {
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
- fnic->state = old_state;
- fnic->remove_wait = NULL;
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return;
- }
-
- /* Wait for firmware reset to complete */
- wait_for_completion_timeout(&remove_wait,
- msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
-
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- fnic->remove_wait = NULL;
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "fnic_scsi_abort_io %s\n",
- (fnic->state == FNIC_IN_ETH_MODE) ?
- "SUCCESS" : "FAILED");
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-
-}
-
-/*
- * This fxn called from libFC to clean up driver IO state on link down
- */
-void fnic_scsi_cleanup(struct fc_lport *lp)
-{
- unsigned long flags;
- enum fnic_state old_state;
- struct fnic *fnic = lport_priv(lp);
-
- /* issue fw reset */
-retry_fw_reset:
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
- /* fw reset is in progress, poll for its completion */
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- schedule_timeout(msecs_to_jiffies(100));
- goto retry_fw_reset;
- }
- old_state = fnic->state;
- fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
- fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-
- if (fnic_fw_reset_handler(fnic)) {
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
- fnic->state = old_state;
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- }
-
-}
-
-void fnic_empty_scsi_cleanup(struct fc_lport *lp)
-{
-}
-
-void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
-{
- struct fnic *fnic = lport_priv(lp);
-
- /* Non-zero sid, nothing to do */
- if (sid)
- goto call_fc_exch_mgr_reset;
-
- if (did) {
- fnic_rport_exch_reset(fnic, did);
- goto call_fc_exch_mgr_reset;
- }
-
- /*
- * sid = 0, did = 0
- * link down or device being removed
- */
- if (!fnic->in_remove)
- fnic_scsi_cleanup(lp);
- else
- fnic_scsi_abort_io(lp);
-
- /* call libFC exch mgr reset to reset its exchanges */
-call_fc_exch_mgr_reset:
- fc_exch_mgr_reset(lp, sid, did);
-
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "host reset return status: %d\n", ret);
+ return ret;
}
static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data)
@@ -2771,7 +2979,7 @@ static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data)
* Found IO that is still pending with firmware and
* belongs to the LUN that we are resetting
*/
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"hwq: %d tag: 0x%x Found IO in state: %s on lun\n",
hwq, tag,
fnic_ioreq_state_to_str(fnic_priv(sc)->state));
@@ -2804,8 +3012,81 @@ int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
}
/* walk again to check, if IOs are still pending in fw */
- scsi_host_busy_iter(fnic->lport->host,
+ scsi_host_busy_iter(fnic->host,
fnic_abts_pending_iter, &iter_data);
return iter_data.ret;
}
+
+/*
+ * SCSI Error handling calls driver's eh_host_reset if all prior
+ * error handling levels return FAILED. If host reset completes
+ * successfully, and if link is up, then Fabric login begins.
+ *
+ * Host Reset is the highest level of error recovery. If this fails, then
+ * host is offlined by SCSI.
+ *
+ */
+int fnic_eh_host_reset_handler(struct scsi_cmnd *sc)
+{
+ int ret = 0;
+ struct Scsi_Host *shost = sc->device->host;
+ struct fnic *fnic = *((struct fnic **) shost_priv(shost));
+
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "SCSI error handling: fnic host reset");
+
+ ret = fnic_host_reset(shost);
+ return ret;
+}
+
+
+void fnic_scsi_fcpio_reset(struct fnic *fnic)
+{
+ unsigned long flags;
+ enum fnic_state old_state;
+ struct fnic_iport_s *iport = &fnic->iport;
+ DECLARE_COMPLETION_ONSTACK(fw_reset_done);
+ int time_remain;
+
+ /* issue fw reset */
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
+ /* fw reset is in progress, poll for its completion */
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic is in unexpected state: %d for fw_reset\n",
+ fnic->state);
+ return;
+ }
+
+ old_state = fnic->state;
+ fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
+
+ fnic_update_mac_locked(fnic, iport->hwmac);
+ fnic->fw_reset_done = &fw_reset_done;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Issuing fw reset\n");
+ if (fnic_fw_reset_handler(fnic)) {
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
+ fnic->state = old_state;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ } else {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Waiting for fw completion\n");
+ time_remain = wait_for_completion_timeout(&fw_reset_done,
+ msecs_to_jiffies(FNIC_FW_RESET_TIMEOUT));
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Woken up after fw completion timeout\n");
+ if (time_remain == 0) {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FW reset completion timed out after %d ms)\n",
+ FNIC_FW_RESET_TIMEOUT);
+ }
+ atomic64_inc(&fnic->fnic_stats.reset_stats.fw_reset_timeouts);
+ }
+ fnic->fw_reset_done = NULL;
+}
diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h
index 9d7f98c452dd..8ddd20401a59 100644
--- a/drivers/scsi/fnic/fnic_stats.h
+++ b/drivers/scsi/fnic/fnic_stats.h
@@ -3,6 +3,7 @@
#ifndef _FNIC_STATS_H_
#define _FNIC_STATS_H_
#define FNIC_MQ_MAX_QUEUES 64
+#include <scsi/scsi_transport_fc.h>
struct stats_timestamps {
struct timespec64 last_reset_time;
@@ -63,6 +64,7 @@ struct reset_stats {
atomic64_t fw_resets;
atomic64_t fw_reset_completions;
atomic64_t fw_reset_failures;
+ atomic64_t fw_reset_timeouts;
atomic64_t fnic_resets;
atomic64_t fnic_reset_completions;
atomic64_t fnic_reset_failures;
@@ -102,10 +104,51 @@ struct misc_stats {
atomic64_t no_icmnd_itmf_cmpls;
atomic64_t check_condition;
atomic64_t queue_fulls;
- atomic64_t rport_not_ready;
+ atomic64_t tport_not_ready;
+ atomic64_t iport_not_ready;
atomic64_t frame_errors;
atomic64_t current_port_speed;
atomic64_t intx_dummy;
+ atomic64_t port_speed_in_mbps;
+};
+
+struct fnic_iport_stats {
+ atomic64_t num_linkdn;
+ atomic64_t num_linkup;
+ atomic64_t link_failure_count;
+ atomic64_t num_rscns;
+ atomic64_t rscn_redisc;
+ atomic64_t rscn_not_redisc;
+ atomic64_t frame_err;
+ atomic64_t num_rnid;
+ atomic64_t fabric_flogi_sent;
+ atomic64_t fabric_flogi_ls_accepts;
+ atomic64_t fabric_flogi_ls_rejects;
+ atomic64_t fabric_flogi_misc_rejects;
+ atomic64_t fabric_plogi_sent;
+ atomic64_t fabric_plogi_ls_accepts;
+ atomic64_t fabric_plogi_ls_rejects;
+ atomic64_t fabric_plogi_misc_rejects;
+ atomic64_t fabric_scr_sent;
+ atomic64_t fabric_scr_ls_accepts;
+ atomic64_t fabric_scr_ls_rejects;
+ atomic64_t fabric_scr_misc_rejects;
+ atomic64_t fabric_logo_sent;
+ atomic64_t tport_alive;
+ atomic64_t tport_plogi_sent;
+ atomic64_t tport_plogi_ls_accepts;
+ atomic64_t tport_plogi_ls_rejects;
+ atomic64_t tport_plogi_misc_rejects;
+ atomic64_t tport_prli_sent;
+ atomic64_t tport_prli_ls_accepts;
+ atomic64_t tport_prli_ls_rejects;
+ atomic64_t tport_prli_misc_rejects;
+ atomic64_t tport_adisc_sent;
+ atomic64_t tport_adisc_ls_accepts;
+ atomic64_t tport_adisc_ls_rejects;
+ atomic64_t tport_logo_sent;
+ atomic64_t unsupported_frames_ls_rejects;
+ atomic64_t unsupported_frames_dropped;
};
struct fnic_stats {
@@ -116,6 +159,7 @@ struct fnic_stats {
struct reset_stats reset_stats;
struct fw_stats fw_stats;
struct vlan_stats vlan_stats;
+ struct fc_host_statistics host_stats;
struct misc_stats misc_stats;
};
@@ -127,6 +171,5 @@ struct stats_debug_info {
};
int fnic_get_stats_data(struct stats_debug_info *, struct fnic_stats *);
-void fnic_stats_debugfs_init(struct fnic *);
-void fnic_stats_debugfs_remove(struct fnic *);
+const char *fnic_role_to_str(unsigned int role);
#endif /* _FNIC_STATS_H_ */
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index aaa4ea02fb7c..cdc6b12b1ec2 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -8,6 +8,7 @@
#include <linux/kallsyms.h>
#include <linux/time.h>
#include <linux/vmalloc.h>
+#include <scsi/scsi_transport_fc.h>
#include "fnic_io.h"
#include "fnic.h"
@@ -29,6 +30,17 @@ int fnic_fc_tracing_enabled = 1;
int fnic_fc_trace_cleared = 1;
static DEFINE_SPINLOCK(fnic_fc_trace_lock);
+static const char * const fnic_role_str[] = {
+ [FNIC_ROLE_FCP_INITIATOR] = "FCP_Initiator",
+};
+
+const char *fnic_role_to_str(unsigned int role)
+{
+ if (role >= ARRAY_SIZE(fnic_role_str) || !fnic_role_str[role])
+ return "Unknown";
+
+ return fnic_role_str[role];
+}
/*
* fnic_trace_get_buf - Give buffer pointer to user to fill up trace information
@@ -423,7 +435,8 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
"Number of Check Conditions encountered: %lld\n"
"Number of QUEUE Fulls: %lld\n"
"Number of rport not ready: %lld\n"
- "Number of receive frame errors: %lld\n",
+ "Number of receive frame errors: %lld\n"
+ "Port speed (in Mbps): %lld\n",
(u64)stats->misc_stats.last_isr_time,
(s64)val1.tv_sec, val1.tv_nsec,
(u64)stats->misc_stats.last_ack_time,
@@ -446,18 +459,68 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls),
(u64)atomic64_read(&stats->misc_stats.check_condition),
(u64)atomic64_read(&stats->misc_stats.queue_fulls),
- (u64)atomic64_read(&stats->misc_stats.rport_not_ready),
- (u64)atomic64_read(&stats->misc_stats.frame_errors));
-
- len += scnprintf(debug->debug_buffer + len, buf_size - len,
- "Firmware reported port speed: %llu\n",
- (u64)atomic64_read(
- &stats->misc_stats.current_port_speed));
+ (u64)atomic64_read(&stats->misc_stats.tport_not_ready),
+ (u64)atomic64_read(&stats->misc_stats.frame_errors),
+ (u64)atomic64_read(&stats->misc_stats.port_speed_in_mbps));
return len;
}
+int fnic_get_debug_info(struct stats_debug_info *info, struct fnic *fnic)
+{
+ struct fnic_iport_s *iport = &fnic->iport;
+ int buf_size = info->buf_size;
+ int len = info->buffer_len;
+ struct fnic_tport_s *tport, *next;
+ unsigned long flags;
+
+ len += snprintf(info->debug_buffer + len, buf_size - len,
+ "------------------------------------------\n"
+ "\t\t Debug Info\n"
+ "------------------------------------------\n");
+ len += snprintf(info->debug_buffer + len, buf_size - len,
+ "fnic Name:%s number:%d Role:%s State:%s\n",
+ fnic->name, fnic->fnic_num,
+ fnic_role_to_str(fnic->role),
+ fnic_state_to_str(fnic->state));
+ len +=
+ snprintf(info->debug_buffer + len, buf_size - len,
+ "iport State:%d Flags:0x%x vlan_id:%d fcid:0x%x\n",
+ iport->state, iport->flags, iport->vlan_id, iport->fcid);
+ len +=
+ snprintf(info->debug_buffer + len, buf_size - len,
+ "usefip:%d fip_state:%d fip_flogi_retry:%d\n",
+ iport->usefip, iport->fip.state, iport->fip.flogi_retry);
+ len +=
+ snprintf(info->debug_buffer + len, buf_size - len,
+ "fpma %02x:%02x:%02x:%02x:%02x:%02x",
+ iport->fpma[5], iport->fpma[4], iport->fpma[3],
+ iport->fpma[2], iport->fpma[1], iport->fpma[0]);
+ len +=
+ snprintf(info->debug_buffer + len, buf_size - len,
+ "fcfmac %02x:%02x:%02x:%02x:%02x:%02x\n",
+ iport->fcfmac[5], iport->fcfmac[4], iport->fcfmac[3],
+ iport->fcfmac[2], iport->fcfmac[1], iport->fcfmac[0]);
+ len +=
+ snprintf(info->debug_buffer + len, buf_size - len,
+ "fabric state:%d flags:0x%x retry_counter:%d e_d_tov:%d r_a_tov:%d\n",
+ iport->fabric.state, iport->fabric.flags,
+ iport->fabric.retry_counter, iport->e_d_tov,
+ iport->r_a_tov);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ list_for_each_entry_safe(tport, next, &iport->tport_list, links) {
+ len += snprintf(info->debug_buffer + len, buf_size - len,
+ "tport fcid:0x%x state:%d flags:0x%x inflight:%d retry_counter:%d\n",
+ tport->fcid, tport->state, tport->flags,
+ atomic_read(&tport->in_flight),
+ tport->retry_counter);
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return len;
+}
+
/*
* fnic_trace_buf_init - Initialize fnic trace buffer logging facility
*
@@ -485,8 +548,7 @@ int fnic_trace_buf_init(void)
}
fnic_trace_entries.page_offset =
- vmalloc(array_size(fnic_max_trace_entries,
- sizeof(unsigned long)));
+ vcalloc(fnic_max_trace_entries, sizeof(unsigned long));
if (!fnic_trace_entries.page_offset) {
printk(KERN_ERR PFX "Failed to allocate memory for"
" page_offset\n");
@@ -497,8 +559,6 @@ int fnic_trace_buf_init(void)
err = -ENOMEM;
goto err_fnic_trace_buf_init;
}
- memset((void *)fnic_trace_entries.page_offset, 0,
- (fnic_max_trace_entries * sizeof(unsigned long)));
fnic_trace_entries.wr_idx = fnic_trace_entries.rd_idx = 0;
fnic_buf_head = fnic_trace_buf_p;
@@ -559,8 +619,7 @@ int fnic_fc_trace_init(void)
fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/
FC_TRC_SIZE_BYTES;
fnic_fc_ctlr_trace_buf_p =
- (unsigned long)vmalloc(array_size(PAGE_SIZE,
- fnic_fc_trace_max_pages));
+ (unsigned long)vcalloc(fnic_fc_trace_max_pages, PAGE_SIZE);
if (!fnic_fc_ctlr_trace_buf_p) {
pr_err("fnic: Failed to allocate memory for "
"FC Control Trace Buf\n");
@@ -568,13 +627,9 @@ int fnic_fc_trace_init(void)
goto err_fnic_fc_ctlr_trace_buf_init;
}
- memset((void *)fnic_fc_ctlr_trace_buf_p, 0,
- fnic_fc_trace_max_pages * PAGE_SIZE);
-
/* Allocate memory for page offset */
fc_trace_entries.page_offset =
- vmalloc(array_size(fc_trace_max_entries,
- sizeof(unsigned long)));
+ vcalloc(fc_trace_max_entries, sizeof(unsigned long));
if (!fc_trace_entries.page_offset) {
pr_err("fnic:Failed to allocate memory for page_offset\n");
if (fnic_fc_ctlr_trace_buf_p) {
@@ -585,8 +640,6 @@ int fnic_fc_trace_init(void)
err = -ENOMEM;
goto err_fnic_fc_ctlr_trace_buf_init;
}
- memset((void *)fc_trace_entries.page_offset, 0,
- (fc_trace_max_entries * sizeof(unsigned long)));
fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0;
fc_trace_buf_head = fnic_fc_ctlr_trace_buf_p;
@@ -688,7 +741,7 @@ int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
*/
if (frame_type == FNIC_FC_RECV) {
eth_fcoe_hdr_len = sizeof(struct ethhdr) +
- sizeof(struct fcoe_hdr);
+ sizeof(struct fcoe_hdr);
memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len);
/* Copy the rest of data frame */
memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame,
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 4101447bb8eb..2d438d722d0b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -643,9 +643,8 @@ extern int hisi_sas_probe(struct platform_device *pdev,
const struct hisi_sas_hw *ops);
extern void hisi_sas_remove(struct platform_device *pdev);
-int hisi_sas_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim);
-extern int hisi_sas_slave_alloc(struct scsi_device *sdev);
+int hisi_sas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim);
+extern int hisi_sas_sdev_init(struct scsi_device *sdev);
extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time);
extern void hisi_sas_scan_start(struct Scsi_Host *shost);
extern int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 53cb15f6714b..da4a2ed8ee86 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -805,13 +805,13 @@ static int hisi_sas_init_device(struct domain_device *device)
return rc;
}
-int hisi_sas_slave_alloc(struct scsi_device *sdev)
+int hisi_sas_sdev_init(struct scsi_device *sdev)
{
struct domain_device *ddev = sdev_to_domain_dev(sdev);
struct hisi_sas_device *sas_dev = ddev->lldd_dev;
int rc;
- rc = sas_slave_alloc(sdev);
+ rc = sas_sdev_init(sdev);
if (rc)
return rc;
@@ -821,7 +821,7 @@ int hisi_sas_slave_alloc(struct scsi_device *sdev)
sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc);
+EXPORT_SYMBOL_GPL(hisi_sas_sdev_init);
static int hisi_sas_dev_found(struct domain_device *device)
{
@@ -868,11 +868,10 @@ err_out:
return rc;
}
-int hisi_sas_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+int hisi_sas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct domain_device *dev = sdev_to_domain_dev(sdev);
- int ret = sas_device_configure(sdev, lim);
+ int ret = sas_sdev_configure(sdev, lim);
if (ret)
return ret;
@@ -881,7 +880,7 @@ int hisi_sas_device_configure(struct scsi_device *sdev,
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_sas_device_configure);
+EXPORT_SYMBOL_GPL(hisi_sas_sdev_configure);
void hisi_sas_scan_start(struct Scsi_Host *shost)
{
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index c3e571be2222..bb78e53c66e2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1753,11 +1753,11 @@ static int check_fw_info_v1_hw(struct hisi_hba *hisi_hba)
static const struct scsi_host_template sht_v1_hw = {
LIBSAS_SHT_BASE_NO_SLAVE_INIT
- .device_configure = hisi_sas_device_configure,
+ .sdev_configure = hisi_sas_sdev_configure,
.scan_finished = hisi_sas_scan_finished,
.scan_start = hisi_sas_scan_start,
.sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
- .slave_alloc = hisi_sas_slave_alloc,
+ .sdev_init = hisi_sas_sdev_init,
.shost_groups = host_v1_hw_groups,
.host_reset = hisi_sas_host_reset,
};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 1a62b5d15eca..71cd5b4450c2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -3585,11 +3585,11 @@ static int check_fw_info_v2_hw(struct hisi_hba *hisi_hba)
static const struct scsi_host_template sht_v2_hw = {
LIBSAS_SHT_BASE_NO_SLAVE_INIT
- .device_configure = hisi_sas_device_configure,
+ .sdev_configure = hisi_sas_sdev_configure,
.scan_finished = hisi_sas_scan_finished,
.scan_start = hisi_sas_scan_start,
.sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
- .slave_alloc = hisi_sas_slave_alloc,
+ .sdev_init = hisi_sas_sdev_init,
.shost_groups = host_v2_hw_groups,
.sdev_groups = sdev_groups_v2_hw,
.host_reset = hisi_sas_host_reset,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 35501d0aa655..48b95d9a7927 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -2908,12 +2908,12 @@ static ssize_t iopoll_q_cnt_v3_hw_show(struct device *dev,
}
static DEVICE_ATTR_RO(iopoll_q_cnt_v3_hw);
-static int device_configure_v3_hw(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int sdev_configure_v3_hw(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct Scsi_Host *shost = dev_to_shost(&sdev->sdev_gendev);
struct hisi_hba *hisi_hba = shost_priv(shost);
- int ret = hisi_sas_device_configure(sdev, lim);
+ int ret = hisi_sas_sdev_configure(sdev, lim);
struct device *dev = hisi_hba->dev;
if (ret)
@@ -3336,13 +3336,13 @@ static void hisi_sas_map_queues(struct Scsi_Host *shost)
static const struct scsi_host_template sht_v3_hw = {
LIBSAS_SHT_BASE_NO_SLAVE_INIT
- .device_configure = device_configure_v3_hw,
+ .sdev_configure = sdev_configure_v3_hw,
.scan_finished = hisi_sas_scan_finished,
.scan_start = hisi_sas_scan_start,
.map_queues = hisi_sas_map_queues,
.sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
.sg_prot_tablesize = HISI_SAS_SGE_PAGE_CNT,
- .slave_alloc = hisi_sas_slave_alloc,
+ .sdev_init = hisi_sas_sdev_init,
.shost_groups = host_v3_hw_groups,
.sdev_groups = sdev_groups_v3_hw,
.tag_alloc_policy_rr = true,
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 0c49414c1f35..84d8de07b7ae 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -283,9 +283,10 @@ static int hpsa_scan_finished(struct Scsi_Host *sh,
static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
-static int hpsa_slave_alloc(struct scsi_device *sdev);
-static int hpsa_slave_configure(struct scsi_device *sdev);
-static void hpsa_slave_destroy(struct scsi_device *sdev);
+static int hpsa_sdev_init(struct scsi_device *sdev);
+static int hpsa_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
+static void hpsa_sdev_destroy(struct scsi_device *sdev);
static void hpsa_update_scsi_devices(struct ctlr_info *h);
static int check_for_unit_attention(struct ctlr_info *h,
@@ -978,9 +979,9 @@ static const struct scsi_host_template hpsa_driver_template = {
.this_id = -1,
.eh_device_reset_handler = hpsa_eh_device_reset_handler,
.ioctl = hpsa_ioctl,
- .slave_alloc = hpsa_slave_alloc,
- .slave_configure = hpsa_slave_configure,
- .slave_destroy = hpsa_slave_destroy,
+ .sdev_init = hpsa_sdev_init,
+ .sdev_configure = hpsa_sdev_configure,
+ .sdev_destroy = hpsa_sdev_destroy,
#ifdef CONFIG_COMPAT
.compat_ioctl = hpsa_compat_ioctl,
#endif
@@ -2107,7 +2108,7 @@ static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
return NULL;
}
-static int hpsa_slave_alloc(struct scsi_device *sdev)
+static int hpsa_sdev_init(struct scsi_device *sdev)
{
struct hpsa_scsi_dev_t *sd = NULL;
unsigned long flags;
@@ -2142,7 +2143,8 @@ static int hpsa_slave_alloc(struct scsi_device *sdev)
/* configure scsi device based on internal per-device structure */
#define CTLR_TIMEOUT (120 * HZ)
-static int hpsa_slave_configure(struct scsi_device *sdev)
+static int hpsa_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct hpsa_scsi_dev_t *sd;
int queue_depth;
@@ -2173,7 +2175,7 @@ static int hpsa_slave_configure(struct scsi_device *sdev)
return 0;
}
-static void hpsa_slave_destroy(struct scsi_device *sdev)
+static void hpsa_sdev_destroy(struct scsi_device *sdev)
{
struct hpsa_scsi_dev_t *hdev = NULL;
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index e889f268601b..21f1d9871a33 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1151,8 +1151,8 @@ static struct attribute *hptiop_host_attrs[] = {
ATTRIBUTE_GROUPS(hptiop_host);
-static int hptiop_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int hptiop_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
if (sdev->type == TYPE_TAPE)
lim->max_hw_sectors = 8192;
@@ -1168,7 +1168,7 @@ static const struct scsi_host_template driver_template = {
.emulated = 0,
.proc_name = driver_name,
.shost_groups = hptiop_host_groups,
- .device_configure = hptiop_device_configure,
+ .sdev_configure = hptiop_sdev_configure,
.this_id = -1,
.change_queue_depth = hptiop_adjust_disk_queue_depth,
.cmd_size = sizeof(struct hpt_cmd_priv),
@@ -1634,7 +1634,7 @@ static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
.host_phy_flag = cpu_to_le64(1),
};
-static struct pci_device_id hptiop_id_table[] = {
+static const struct pci_device_id hptiop_id_table[] = {
{ PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops },
{ PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops },
{ PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index e66c3ef74267..773ec2f31bc4 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -3393,7 +3393,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
}
/**
- * ibmvfc_slave_alloc - Setup the device's task set value
+ * ibmvfc_sdev_init - Setup the device's task set value
* @sdev: struct scsi_device device to configure
*
* Set the device's task set value so that error handling works as
@@ -3402,7 +3402,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
* Returns:
* 0 on success / -ENXIO if device does not exist
**/
-static int ibmvfc_slave_alloc(struct scsi_device *sdev)
+static int ibmvfc_sdev_init(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
@@ -3441,8 +3441,9 @@ static int ibmvfc_target_alloc(struct scsi_target *starget)
}
/**
- * ibmvfc_slave_configure - Configure the device
+ * ibmvfc_sdev_configure - Configure the device
* @sdev: struct scsi_device device to configure
+ * @lim: Request queue limits
*
* Enable allow_restart for a device if it is a disk. Adjust the
* queue_depth here also.
@@ -3450,7 +3451,8 @@ static int ibmvfc_target_alloc(struct scsi_target *starget)
* Returns:
* 0
**/
-static int ibmvfc_slave_configure(struct scsi_device *sdev)
+static int ibmvfc_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct Scsi_Host *shost = sdev->host;
unsigned long flags = 0;
@@ -3639,7 +3641,7 @@ static DEVICE_ATTR(nr_scsi_channels, S_IRUGO | S_IWUSR,
* number of bytes printed to buffer
**/
static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -3662,13 +3664,13 @@ static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute ibmvfc_trace_attr = {
+static const struct bin_attribute ibmvfc_trace_attr = {
.attr = {
.name = "trace",
.mode = S_IRUGO,
},
.size = 0,
- .read = ibmvfc_read_trace,
+ .read_new = ibmvfc_read_trace,
};
#endif
@@ -3696,8 +3698,8 @@ static const struct scsi_host_template driver_template = {
.eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
.eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
.eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
- .slave_alloc = ibmvfc_slave_alloc,
- .slave_configure = ibmvfc_slave_configure,
+ .sdev_init = ibmvfc_sdev_init,
+ .sdev_configure = ibmvfc_sdev_configure,
.target_alloc = ibmvfc_target_alloc,
.scan_finished = ibmvfc_scan_finished,
.change_queue_depth = ibmvfc_change_queue_depth,
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 71f3e9563520..16a1aac11911 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1860,14 +1860,16 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
}
/**
- * ibmvscsi_slave_configure: Set the "allow_restart" flag for each disk.
+ * ibmvscsi_sdev_configure: Set the "allow_restart" flag for each disk.
* @sdev: struct scsi_device device to configure
+ * @lim: Request queue limits
*
* Enable allow_restart for a device if it is a disk. Adjust the
* queue_depth here also as is required by the documentation for
* struct scsi_host_template.
*/
-static int ibmvscsi_slave_configure(struct scsi_device *sdev)
+static int ibmvscsi_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct Scsi_Host *shost = sdev->host;
unsigned long lock_flags = 0;
@@ -2091,7 +2093,7 @@ static struct scsi_host_template driver_template = {
.eh_abort_handler = ibmvscsi_eh_abort_handler,
.eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
.eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
- .slave_configure = ibmvscsi_slave_configure,
+ .sdev_configure = ibmvscsi_sdev_configure,
.change_queue_depth = ibmvscsi_change_queue_depth,
.host_reset = ibmvscsi_host_reset,
.cmd_per_lun = IBMVSCSI_CMDS_PER_LUN_DEFAULT,
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 625fd547ee60..8648bd965287 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2941,7 +2941,7 @@ static void initio_remove_one(struct pci_dev *pdev)
MODULE_LICENSE("GPL");
-static struct pci_device_id initio_pci_tbl[] = {
+static const struct pci_device_id initio_pci_tbl[] = {
{PCI_VENDOR_ID_INIT, 0x9500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_INIT, 0x9400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_INIT, 0x9401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 31cf2d31cceb..3bfafd43e42a 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3366,7 +3366,7 @@ static void ipr_worker_thread(struct work_struct *work)
* number of bytes printed to buffer
**/
static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -3383,13 +3383,13 @@ static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
return ret;
}
-static struct bin_attribute ipr_trace_attr = {
+static const struct bin_attribute ipr_trace_attr = {
.attr = {
.name = "trace",
.mode = S_IRUGO,
},
.size = 0,
- .read = ipr_read_trace,
+ .read_new = ipr_read_trace,
};
#endif
@@ -4087,7 +4087,7 @@ static struct device_attribute ipr_ioa_fw_type_attr = {
};
static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct device *cdev = kobj_to_dev(kobj);
@@ -4111,7 +4111,7 @@ static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
}
static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct device *cdev = kobj_to_dev(kobj);
@@ -4134,14 +4134,14 @@ static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
return count;
}
-static struct bin_attribute ipr_ioa_async_err_log = {
+static const struct bin_attribute ipr_ioa_async_err_log = {
.attr = {
.name = "async_err_log",
.mode = S_IRUGO | S_IWUSR,
},
.size = 0,
- .read = ipr_read_async_err_log,
- .write = ipr_next_async_err_log
+ .read_new = ipr_read_async_err_log,
+ .write_new = ipr_next_async_err_log
};
static struct attribute *ipr_ioa_attrs[] = {
@@ -4172,7 +4172,7 @@ ATTRIBUTE_GROUPS(ipr_ioa);
* number of bytes printed to buffer
**/
static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *cdev = kobj_to_dev(kobj);
@@ -4361,7 +4361,7 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
* number of bytes printed to buffer
**/
static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *cdev = kobj_to_dev(kobj);
@@ -4385,14 +4385,14 @@ static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute ipr_dump_attr = {
+static const struct bin_attribute ipr_dump_attr = {
.attr = {
.name = "dump",
.mode = S_IRUSR | S_IWUSR,
},
.size = 0,
- .read = ipr_read_dump,
- .write = ipr_write_dump
+ .read_new = ipr_read_dump,
+ .write_new = ipr_write_dump
};
#else
static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
@@ -4745,13 +4745,13 @@ static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
}
/**
- * ipr_slave_destroy - Unconfigure a SCSI device
+ * ipr_sdev_destroy - Unconfigure a SCSI device
* @sdev: scsi device struct
*
* Return value:
* nothing
**/
-static void ipr_slave_destroy(struct scsi_device *sdev)
+static void ipr_sdev_destroy(struct scsi_device *sdev)
{
struct ipr_resource_entry *res;
struct ipr_ioa_cfg *ioa_cfg;
@@ -4769,7 +4769,7 @@ static void ipr_slave_destroy(struct scsi_device *sdev)
}
/**
- * ipr_device_configure - Configure a SCSI device
+ * ipr_sdev_configure - Configure a SCSI device
* @sdev: scsi device struct
* @lim: queue limits
*
@@ -4778,8 +4778,8 @@ static void ipr_slave_destroy(struct scsi_device *sdev)
* Return value:
* 0 on success
**/
-static int ipr_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int ipr_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
struct ipr_resource_entry *res;
@@ -4815,7 +4815,7 @@ static int ipr_device_configure(struct scsi_device *sdev,
}
/**
- * ipr_slave_alloc - Prepare for commands to a device.
+ * ipr_sdev_init - Prepare for commands to a device.
* @sdev: scsi device struct
*
* This function saves a pointer to the resource entry
@@ -4826,7 +4826,7 @@ static int ipr_device_configure(struct scsi_device *sdev,
* Return value:
* 0 on success / -ENXIO if device does not exist
**/
-static int ipr_slave_alloc(struct scsi_device *sdev)
+static int ipr_sdev_init(struct scsi_device *sdev)
{
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
struct ipr_resource_entry *res;
@@ -6398,9 +6398,9 @@ static const struct scsi_host_template driver_template = {
.eh_abort_handler = ipr_eh_abort,
.eh_device_reset_handler = ipr_eh_dev_reset,
.eh_host_reset_handler = ipr_eh_host_reset,
- .slave_alloc = ipr_slave_alloc,
- .device_configure = ipr_device_configure,
- .slave_destroy = ipr_slave_destroy,
+ .sdev_init = ipr_sdev_init,
+ .sdev_configure = ipr_sdev_configure,
+ .sdev_destroy = ipr_sdev_destroy,
.scan_finished = ipr_scan_finished,
.target_destroy = ipr_target_destroy,
.change_queue_depth = ipr_change_queue_depth,
@@ -9844,7 +9844,7 @@ static void ipr_shutdown(struct pci_dev *pdev)
}
}
-static struct pci_device_id ipr_pci_table[] = {
+static const struct pci_device_id ipr_pci_table[] = {
{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 10cf5775a939..cce6c6b409ad 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -364,7 +364,7 @@ static struct scsi_host_template ips_driver_template = {
.proc_name = "ips",
.show_info = ips_show_info,
.write_info = ips_write_info,
- .slave_configure = ips_slave_configure,
+ .sdev_configure = ips_sdev_configure,
.bios_param = ips_biosparam,
.this_id = -1,
.sg_tablesize = IPS_MAX_SG,
@@ -1166,7 +1166,7 @@ static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
/****************************************************************************/
/* */
-/* Routine Name: ips_slave_configure */
+/* Routine Name: ips_sdev_configure */
/* */
/* Routine Description: */
/* */
@@ -1174,7 +1174,7 @@ static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
/* */
/****************************************************************************/
static int
-ips_slave_configure(struct scsi_device * SDptr)
+ips_sdev_configure(struct scsi_device *SDptr, struct queue_limits *lim)
{
ips_ha_t *ha;
int min;
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index 65edf000e447..8ac932ec4444 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -400,7 +400,8 @@
*/
static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int geom[]);
- static int ips_slave_configure(struct scsi_device *SDptr);
+ static int ips_sdev_configure(struct scsi_device *SDptr,
+ struct queue_limits *lim);
/*
* Raid Command Formats
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index 866950a02965..287e1ba8ddd7 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -422,21 +422,6 @@ enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
}
}
-enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev)
-{
- struct sci_base_state_machine *sm = &idev->sm;
- enum sci_remote_device_states state = sm->current_state_id;
-
- if (state != SCI_DEV_RESETTING) {
- dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
- __func__, dev_state_name(state));
- return SCI_FAILURE_INVALID_STATE;
- }
-
- sci_change_state(sm, SCI_DEV_READY);
- return SCI_SUCCESS;
-}
-
enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
u32 frame_index)
{
@@ -1694,20 +1679,6 @@ enum sci_status sci_remote_device_abort_requests_pending_abort(
return sci_remote_device_terminate_reqs_checkabort(idev, 1);
}
-enum sci_status isci_remote_device_reset_complete(
- struct isci_host *ihost,
- struct isci_remote_device *idev)
-{
- unsigned long flags;
- enum sci_status status;
-
- spin_lock_irqsave(&ihost->scic_lock, flags);
- status = sci_remote_device_reset_complete(idev);
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
-
- return status;
-}
-
void isci_dev_set_hang_detection_timeout(
struct isci_remote_device *idev,
u32 timeout)
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 3ad681c4c20a..27ae45332704 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -175,19 +175,6 @@ enum sci_status sci_remote_device_reset(
struct isci_remote_device *idev);
/**
- * sci_remote_device_reset_complete() - This method informs the device object
- * that the reset operation is complete and the device can resume operation
- * again.
- * @remote_device: This parameter specifies the device which is to be informed
- * of the reset complete operation.
- *
- * An indication that the device is resuming operation. SCI_SUCCESS the device
- * is resuming operation.
- */
-enum sci_status sci_remote_device_reset_complete(
- struct isci_remote_device *idev);
-
-/**
* enum sci_remote_device_states - This enumeration depicts all the states
* for the common remote device state machine.
* @SCI_DEV_INITIAL: Simply the initial state for the base remote device
@@ -364,10 +351,6 @@ enum sci_status isci_remote_device_reset(
struct isci_host *ihost,
struct isci_remote_device *idev);
-enum sci_status isci_remote_device_reset_complete(
- struct isci_host *ihost,
- struct isci_remote_device *idev);
-
enum sci_status isci_remote_device_suspend_terminate(
struct isci_host *ihost,
struct isci_remote_device *idev,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index c708e1059638..e81f60985193 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -1057,8 +1057,8 @@ static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
return 0;
}
-static int iscsi_sw_tcp_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int iscsi_sw_tcp_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(sdev->host);
struct iscsi_session *session = tcp_sw_host->session;
@@ -1083,7 +1083,7 @@ static const struct scsi_host_template iscsi_sw_tcp_sht = {
.eh_device_reset_handler= iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_recover_target,
.dma_boundary = PAGE_SIZE - 1,
- .device_configure = iscsi_sw_tcp_device_configure,
+ .sdev_configure = iscsi_sw_tcp_sdev_configure,
.proc_name = "iscsi_tcp",
.this_id = -1,
.track_queue_depth = 1,
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 80be3a936d92..fd1ef06655cb 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -2222,13 +2222,13 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
EXPORT_SYMBOL(fc_eh_host_reset);
/**
- * fc_slave_alloc() - Configure the queue depth of a Scsi_Host
+ * fc_sdev_init() - Configure the queue depth of a Scsi_Host
* @sdev: The SCSI device that identifies the SCSI host
*
* Configures queue depth based on host's cmd_per_len. If not set
* then we use the libfc default.
*/
-int fc_slave_alloc(struct scsi_device *sdev)
+int fc_sdev_init(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
@@ -2238,7 +2238,7 @@ int fc_slave_alloc(struct scsi_device *sdev)
scsi_change_queue_depth(sdev, FC_FCP_DFLT_QUEUE_DEPTH);
return 0;
}
-EXPORT_SYMBOL(fc_slave_alloc);
+EXPORT_SYMBOL(fc_sdev_init);
/**
* fc_fcp_destroy() - Tear down the FCP layer for a given local port
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index da11d32840e2..55ce7892f217 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -804,15 +804,14 @@ EXPORT_SYMBOL_GPL(sas_target_alloc);
#define SAS_DEF_QD 256
-int sas_device_configure(struct scsi_device *scsi_dev,
- struct queue_limits *lim)
+int sas_sdev_configure(struct scsi_device *scsi_dev, struct queue_limits *lim)
{
struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
if (dev_is_sata(dev)) {
- ata_sas_device_configure(scsi_dev, lim, dev->sata_dev.ap);
+ ata_sas_sdev_configure(scsi_dev, lim, dev->sata_dev.ap);
return 0;
}
@@ -830,7 +829,7 @@ int sas_device_configure(struct scsi_device *scsi_dev,
return 0;
}
-EXPORT_SYMBOL_GPL(sas_device_configure);
+EXPORT_SYMBOL_GPL(sas_sdev_configure);
int sas_change_queue_depth(struct scsi_device *sdev, int depth)
{
@@ -1194,14 +1193,14 @@ void sas_task_abort(struct sas_task *task)
}
EXPORT_SYMBOL_GPL(sas_task_abort);
-int sas_slave_alloc(struct scsi_device *sdev)
+int sas_sdev_init(struct scsi_device *sdev)
{
if (dev_is_sata(sdev_to_domain_dev(sdev)) && sdev->lun)
return -ENXIO;
return 0;
}
-EXPORT_SYMBOL_GPL(sas_slave_alloc);
+EXPORT_SYMBOL_GPL(sas_sdev_init);
void sas_target_destroy(struct scsi_target *starget)
{
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 39b504164ecc..0d0213bba35d 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -6185,7 +6185,7 @@ const struct attribute_group *lpfc_vport_groups[] = {
**/
static ssize_t
sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
size_t buf_off;
@@ -6244,7 +6244,7 @@ sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
**/
static ssize_t
sysfs_ctlreg_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
size_t buf_off;
@@ -6280,14 +6280,14 @@ sysfs_ctlreg_read(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_ctlreg_attr = {
+static const struct bin_attribute sysfs_ctlreg_attr = {
.attr = {
.name = "ctlreg",
.mode = S_IRUSR | S_IWUSR,
},
.size = 256,
- .read = sysfs_ctlreg_read,
- .write = sysfs_ctlreg_write,
+ .read_new = sysfs_ctlreg_read,
+ .write_new = sysfs_ctlreg_write,
};
/**
@@ -6308,7 +6308,7 @@ static struct bin_attribute sysfs_ctlreg_attr = {
**/
static ssize_t
sysfs_mbox_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
return -EPERM;
@@ -6332,20 +6332,20 @@ sysfs_mbox_write(struct file *filp, struct kobject *kobj,
**/
static ssize_t
sysfs_mbox_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
return -EPERM;
}
-static struct bin_attribute sysfs_mbox_attr = {
+static const struct bin_attribute sysfs_mbox_attr = {
.attr = {
.name = "mbox",
.mode = S_IRUSR | S_IWUSR,
},
.size = MAILBOX_SYSFS_MAX,
- .read = sysfs_mbox_read,
- .write = sysfs_mbox_write,
+ .read_new = sysfs_mbox_read,
+ .write_new = sysfs_mbox_write,
};
/**
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 1c6b024160da..c8f8496bbdf8 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -120,6 +120,16 @@ enum ELX_LOOPBACK_CMD {
#define ELX_LOOPBACK_HEADER_SZ \
(size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
+/* For non-embedded read object command */
+#define READ_OBJ_EMB0_SCHEME_0 {1, 10, 256, 128}
+#define READ_OBJ_EMB0_SCHEME_1 {11, LPFC_EMB0_MAX_RD_OBJ_HBD_CNT, 512, 192}
+static const struct lpfc_read_object_cmd_scheme {
+ u32 min_hbd_cnt;
+ u32 max_hbd_cnt;
+ u32 cmd_size;
+ u32 payload_word_offset;
+} rd_obj_scheme[2] = {READ_OBJ_EMB0_SCHEME_0, READ_OBJ_EMB0_SCHEME_1};
+
struct lpfc_dmabufext {
struct lpfc_dmabuf dma;
uint32_t size;
@@ -3539,6 +3549,103 @@ lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
}
/**
+ * lpfc_rd_obj_emb0_handle_job - Handles completion for non-embedded
+ * READ_OBJECT_V0 mailbox commands
+ * @phba: pointer to lpfc_hba data struct
+ * @pmb_buf: pointer to mailbox buffer
+ * @sli_cfg_mbx: pointer to SLI_CONFIG mailbox memory region
+ * @job: pointer to bsg_job struct
+ * @bsg_reply: point to bsg_reply struct
+ *
+ * Given a non-embedded READ_OBJECT_V0's HBD_CNT, this routine copies
+ * a READ_OBJECT_V0 mailbox command's read data payload into a bsg_job
+ * structure for passing back to application layer.
+ *
+ * Return codes
+ * 0 - successful
+ * -EINVAL - invalid HBD_CNT
+ * -ENODEV - pointer to bsg_job struct is NULL
+ **/
+static int
+lpfc_rd_obj_emb0_handle_job(struct lpfc_hba *phba, u8 *pmb_buf,
+ struct lpfc_sli_config_mbox *sli_cfg_mbx,
+ struct bsg_job *job,
+ struct fc_bsg_reply *bsg_reply)
+{
+ struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
+ struct lpfc_sli_config_emb0_subsys *emb0_subsys;
+ u32 hbd_cnt;
+ u32 dma_buf_len;
+ u8 i = 0;
+ size_t extra_bytes;
+ off_t skip = 0;
+
+ if (!job) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2496 NULL job\n");
+ return -ENODEV;
+ }
+
+ if (!bsg_reply) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2498 NULL bsg_reply\n");
+ return -ENODEV;
+ }
+
+ emb0_subsys = &sli_cfg_mbx->un.sli_config_emb0_subsys;
+
+ hbd_cnt = bsg_bf_get(lpfc_emb0_subcmnd_rd_obj_hbd_cnt,
+ emb0_subsys);
+
+ /* Calculate where the read object's read data payload is located based
+ * on HBD count scheme.
+ */
+ if (hbd_cnt >= rd_obj_scheme[0].min_hbd_cnt &&
+ hbd_cnt <= rd_obj_scheme[0].max_hbd_cnt) {
+ skip = rd_obj_scheme[0].payload_word_offset * 4;
+ } else if (hbd_cnt >= rd_obj_scheme[1].min_hbd_cnt &&
+ hbd_cnt <= rd_obj_scheme[1].max_hbd_cnt) {
+ skip = rd_obj_scheme[1].payload_word_offset * 4;
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2497 bad hbd_count 0x%08x\n",
+ hbd_cnt);
+ return -EINVAL;
+ }
+
+ /* Copy SLI_CONFIG command and READ_OBJECT response first */
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ pmb_buf, skip);
+
+ /* Copy data from hbds */
+ list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list,
+ list) {
+ dma_buf_len = emb0_subsys->hbd[i].buf_len;
+
+ /* Use sg_copy_buffer to specify a skip offset */
+ extra_bytes = sg_copy_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ curr_dmabuf->virt,
+ dma_buf_len, skip, false);
+
+ bsg_reply->reply_payload_rcv_len += extra_bytes;
+
+ skip += extra_bytes;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2499 copied hbd[%d] "
+ "0x%zx bytes\n",
+ i, extra_bytes);
+ i++;
+ }
+
+ return 0;
+}
+
+/**
* lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
* @phba: Pointer to HBA context object.
* @pmboxq: Pointer to mailbox command.
@@ -3551,10 +3658,10 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct bsg_job_data *dd_data;
struct bsg_job *job;
- struct fc_bsg_reply *bsg_reply;
+ struct fc_bsg_reply *bsg_reply = NULL;
uint8_t *pmb, *pmb_buf;
unsigned long flags;
- uint32_t size;
+ u32 size, opcode;
int rc = 0;
struct lpfc_dmabuf *dmabuf;
struct lpfc_sli_config_mbox *sli_cfg_mbx;
@@ -3591,6 +3698,24 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
&pmbx[sizeof(MAILBOX_t)],
sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
+
+ /* Special handling for non-embedded READ_OBJECT */
+ opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys);
+ switch (opcode) {
+ case COMN_OPCODE_READ_OBJECT:
+ if (job) {
+ rc = lpfc_rd_obj_emb0_handle_job(phba, pmb_buf,
+ sli_cfg_mbx,
+ job,
+ bsg_reply);
+ bsg_reply->result = rc;
+ goto done;
+ }
+ break;
+ default:
+ break;
+ }
}
/* Complete the job if the job is still active */
@@ -3604,12 +3729,14 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
/* result for successful */
bsg_reply->result = 0;
+done:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2937 SLI_CONFIG ext-buffer mailbox command "
"(x%x/x%x) complete bsg job done, bsize:%d\n",
phba->mbox_ext_buf_ctx.nembType,
- phba->mbox_ext_buf_ctx.mboxType, size);
+ phba->mbox_ext_buf_ctx.mboxType,
+ job->reply_payload.payload_len);
lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
phba->mbox_ext_buf_ctx.nembType,
phba->mbox_ext_buf_ctx.mboxType,
@@ -3819,14 +3946,16 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
{
struct fc_bsg_request *bsg_request = job->request;
struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ struct lpfc_sli_config_emb0_subsys *emb0_subsys;
+ struct list_head *ext_dmabuf_list;
struct dfc_mbox_req *mbox_req;
struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
- uint32_t ext_buf_cnt, ext_buf_index;
+ u32 ext_buf_cnt, ext_buf_index, hbd_cnt;
struct lpfc_dmabuf *ext_dmabuf = NULL;
struct bsg_job_data *dd_data = NULL;
LPFC_MBOXQ_t *pmboxq = NULL;
MAILBOX_t *pmb;
- uint8_t *pmbx;
+ u8 *pmbx, opcode;
int rc, i;
mbox_req =
@@ -3836,8 +3965,9 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
if (nemb_tp == nemb_mse) {
+ emb0_subsys = &sli_cfg_mbx->un.sli_config_emb0_subsys;
ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
- &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
+ &emb0_subsys->sli_config_hdr);
if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2945 Handled SLI_CONFIG(mse) rd, "
@@ -3847,6 +3977,57 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
rc = -ERANGE;
goto job_error;
}
+
+ /* Special handling for non-embedded READ_OBJECT */
+ opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode, emb0_subsys);
+ switch (opcode) {
+ case COMN_OPCODE_READ_OBJECT:
+ hbd_cnt = bsg_bf_get(lpfc_emb0_subcmnd_rd_obj_hbd_cnt,
+ emb0_subsys);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2449 SLI_CONFIG(mse) rd non-embedded "
+ "hbd count = %d\n",
+ hbd_cnt);
+
+ ext_dmabuf_list =
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list;
+
+ /* Allocate hbds */
+ for (i = 0; i < hbd_cnt; i++) {
+ ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
+ if (!ext_dmabuf) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ list_add_tail(&ext_dmabuf->list,
+ ext_dmabuf_list);
+ }
+
+ /* Fill out the physical memory addresses for the
+ * hbds
+ */
+ i = 0;
+ list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
+ ext_dmabuf_list, list) {
+ emb0_subsys->hbd[i].pa_hi =
+ putPaddrHigh(curr_dmabuf->phys);
+ emb0_subsys->hbd[i].pa_lo =
+ putPaddrLow(curr_dmabuf->phys);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2495 SLI_CONFIG(hbd)[%d], "
+ "bufLen:%d, addrHi:x%x, "
+ "addrLo:x%x\n", i,
+ emb0_subsys->hbd[i].buf_len,
+ emb0_subsys->hbd[i].pa_hi,
+ emb0_subsys->hbd[i].pa_lo);
+ i++;
+ }
+ break;
+ default:
+ break;
+ }
+
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2941 Handled SLI_CONFIG(mse) rd, "
"ext_buf_cnt:%d\n", ext_buf_cnt);
@@ -4223,6 +4404,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
case COMN_OPCODE_GET_PROFILE_CONFIG:
case COMN_OPCODE_SET_FEATURES:
+ case COMN_OPCODE_READ_OBJECT:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3106 Handled SLI_CONFIG "
"subsys_comn, opcode:x%x\n",
@@ -4665,8 +4847,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
bsg_reply->reply_payload_rcv_len = 0;
/* sanity check to protect driver */
- if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
- job->request_payload.payload_len > BSG_MBOX_SIZE) {
+ if (job->request_payload.payload_len > BSG_MBOX_SIZE) {
rc = -ERANGE;
goto job_done;
}
@@ -4737,6 +4918,19 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
pmb->mbxOwner = OWN_HOST;
pmboxq->vport = vport;
+ /* non-embedded SLI_CONFIG requests already parsed, check others */
+ if (unlikely(job->reply_payload.payload_len > BSG_MBOX_SIZE)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2729 Cmd x%x (x%x/x%x) request has "
+ "out-of-range reply payload length x%x\n",
+ pmb->mbxCommand,
+ lpfc_sli_config_mbox_subsys_get(phba, pmboxq),
+ lpfc_sli_config_mbox_opcode_get(phba, pmboxq),
+ job->reply_payload.payload_len);
+ rc = -ERANGE;
+ goto job_done;
+ }
+
/* If HBA encountered an error attention, allow only DUMP
* or RESTART mailbox commands until the HBA is restarted.
*/
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 3c04ca2d7455..27e7a033b53d 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2010-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -239,12 +239,27 @@ struct lpfc_sli_config_emb0_subsys {
uint32_t timeout; /* comn_set_feature timeout */
uint32_t request_length; /* comn_set_feature request len */
uint32_t version; /* comn_set_feature version */
- uint32_t csf_feature; /* comn_set_feature feature */
+ uint32_t word68; /* comn_set_feature feature */
+#define lpfc_emb0_subcmnd_csf_feat_SHIFT 0
+#define lpfc_emb0_subcmnd_csf_feat_MASK 0xffffffff
+#define lpfc_emb0_subcmnd_csf_feat_WORD word68
+#define lpfc_emb0_subcmnd_rd_obj_des_rd_len_SHIFT 0
+#define lpfc_emb0_subcmnd_rd_obj_des_rd_len_MASK 0x00ffffff
+#define lpfc_emb0_subcmnd_rd_obj_des_rd_len_WORD word68
uint32_t word69; /* comn_set_feature parameter len */
uint32_t word70; /* comn_set_feature parameter val0 */
#define lpfc_emb0_subcmnd_csf_p0_SHIFT 0
#define lpfc_emb0_subcmnd_csf_p0_MASK 0x3
#define lpfc_emb0_subcmnd_csf_p0_WORD word70
+ uint32_t reserved71[25];
+ uint32_t word96; /* rd_obj hbd_count */
+#define lpfc_emb0_subcmnd_rd_obj_hbd_cnt_SHIFT 0
+#define lpfc_emb0_subcmnd_rd_obj_hbd_cnt_MASK 0xffffffff
+#define lpfc_emb0_subcmnd_rd_obj_hbd_cnt_WORD word96
+#define LPFC_EMB0_MAX_RD_OBJ_HBD_CNT 31
+ struct lpfc_sli_config_hbd hbd[LPFC_EMB0_MAX_RD_OBJ_HBD_CNT];
+ uint32_t word190;
+ uint32_t word191;
};
struct lpfc_sli_config_emb1_subsys {
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 30891ad17e2a..12c67cdd7c19 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1646,14 +1646,12 @@ out:
/* If the caller wanted a synchronous DA_ID completion, signal the
* wait obj and clear flag to reset the vport.
*/
- if (ndlp->save_flags & NLP_WAIT_FOR_DA_ID) {
+ if (test_bit(NLP_WAIT_FOR_DA_ID, &ndlp->save_flags)) {
if (ndlp->da_id_waitq)
wake_up(ndlp->da_id_waitq);
}
- spin_lock_irq(&ndlp->lock);
- ndlp->save_flags &= ~NLP_WAIT_FOR_DA_ID;
- spin_unlock_irq(&ndlp->lock);
+ clear_bit(NLP_WAIT_FOR_DA_ID, &ndlp->save_flags);
lpfc_ct_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 3e173b5d00e0..3d47dc7458d1 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -85,13 +85,13 @@ enum lpfc_fc4_xpt_flags {
NLP_XPT_HAS_HH = 0x10
};
-enum lpfc_nlp_save_flags {
+enum lpfc_nlp_save_flags { /* mask bits */
/* devloss occurred during recovery */
- NLP_IN_RECOV_POST_DEV_LOSS = 0x1,
+ NLP_IN_RECOV_POST_DEV_LOSS,
/* wait for outstanding LOGO to cmpl */
- NLP_WAIT_FOR_LOGO = 0x2,
+ NLP_WAIT_FOR_LOGO,
/* wait for outstanding DA_ID to finish */
- NLP_WAIT_FOR_DA_ID = 0x4
+ NLP_WAIT_FOR_DA_ID
};
struct lpfc_nodelist {
@@ -154,7 +154,7 @@ struct lpfc_nodelist {
uint32_t fc4_prli_sent;
/* flags to keep ndlp alive until special conditions are met */
- enum lpfc_nlp_save_flags save_flags;
+ unsigned long save_flags;
enum lpfc_fc4_xpt_flags fc4_xpt_flags;
@@ -208,7 +208,6 @@ enum lpfc_nlp_flag {
NPR list */
NLP_RM_DFLT_RPI = 26, /* need to remove leftover dflt RPI */
NLP_NODEV_REMOVE = 27, /* Defer removal till discovery ends */
- NLP_TARGET_REMOVE = 28, /* Target remove in process */
NLP_SC_REQ = 29, /* Target requires authentication */
NLP_FIRSTBURST = 30, /* Target supports FirstBurst */
NLP_RPI_REGISTERED = 31 /* nlp_rpi is valid */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 37f0a930d469..1d7db49a8fe4 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -2988,12 +2988,8 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag);
- spin_lock_irq(&ndlp->lock);
- if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
+ if (test_and_clear_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags))
wake_up_waiter = 1;
- ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
- }
- spin_unlock_irq(&ndlp->lock);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"LOGO cmpl: status:x%x/x%x did:x%x",
@@ -3035,19 +3031,6 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Call state machine. This will unregister the rpi if needed. */
lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
- if (skip_recovery)
- goto out;
-
- /* The driver sets this flag for an NPIV instance that doesn't want to
- * log into the remote port.
- */
- if (test_bit(NLP_TARGET_REMOVE, &ndlp->nlp_flag)) {
- clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
- lpfc_disc_state_machine(vport, ndlp, cmdiocb,
- NLP_EVT_DEVICE_RM);
- goto out_rsrc_free;
- }
-
out:
/* At this point, the LOGO processing is complete. NOTE: For a
* pt2pt topology, we are assuming the NPortID will only change
@@ -3091,7 +3074,7 @@ out:
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM);
}
-out_rsrc_free:
+
/* Driver is done with the I/O. */
lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
@@ -4583,6 +4566,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
int link_reset = 0, rc;
u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
u32 ulp_word4 = get_job_word4(phba, rspiocb);
+ u8 rsn_code_exp = 0;
/* Note: cmd_dmabuf may be 0 for internal driver abort
@@ -4798,11 +4782,22 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
break;
case LSRJT_LOGICAL_BSY:
+ rsn_code_exp = stat.un.b.lsRjtRsnCodeExp;
if ((cmd == ELS_CMD_PLOGI) ||
(cmd == ELS_CMD_PRLI) ||
(cmd == ELS_CMD_NVMEPRLI)) {
delay = 1000;
maxretry = 48;
+
+ /* An authentication LS_RJT reason code
+ * explanation means some error in the
+ * security settings end-to-end. Reduce
+ * the retry count to allow lpfc to clear
+ * RSCN mode and not race with dev_loss.
+ */
+ if (cmd == ELS_CMD_PLOGI &&
+ rsn_code_exp == LSEXP_AUTH_REQ)
+ maxretry = 8;
} else if (cmd == ELS_CMD_FDISC) {
/* FDISC retry policy */
maxretry = 48;
@@ -4831,6 +4826,20 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"0820 FLOGI (x%x). "
"BBCredit Not Supported\n",
stat.un.lsRjtError);
+ } else if (cmd == ELS_CMD_PLOGI) {
+ rsn_code_exp = stat.un.b.lsRjtRsnCodeExp;
+
+ /* An authentication LS_RJT reason code
+ * explanation means some error in the
+ * security settings end-to-end. Reduce
+ * the retry count to allow lpfc to clear
+ * RSCN mode and not race with dev_loss.
+ */
+ if (rsn_code_exp == LSEXP_AUTH_REQ) {
+ delay = 1000;
+ retry = 1;
+ maxretry = 8;
+ }
}
break;
@@ -10411,8 +10420,6 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
}
- clear_bit(NLP_TARGET_REMOVE, &ndlp->nlp_flag);
-
lpfc_disc_state_machine(vport, ndlp, elsiocb,
NLP_EVT_RCV_PLOGI);
@@ -11498,15 +11505,13 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_can_disctmo(vport);
}
- if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
+ if (test_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags)) {
/* Wake up lpfc_vport_delete if waiting...*/
if (ndlp->logo_waitq)
wake_up(ndlp->logo_waitq);
clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag);
clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag);
- spin_lock_irq(&ndlp->lock);
- ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
- spin_unlock_irq(&ndlp->lock);
+ clear_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags);
}
/* Safe to release resources now. */
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 4036a9838bb5..36e66df36a18 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -414,12 +414,7 @@ void
lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp)
{
- unsigned long iflags;
-
- spin_lock_irqsave(&ndlp->lock, iflags);
- if (ndlp->save_flags & NLP_IN_RECOV_POST_DEV_LOSS) {
- ndlp->save_flags &= ~NLP_IN_RECOV_POST_DEV_LOSS;
- spin_unlock_irqrestore(&ndlp->lock, iflags);
+ if (test_and_clear_bit(NLP_IN_RECOV_POST_DEV_LOSS, &ndlp->save_flags)) {
lpfc_nlp_get(ndlp);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE,
"8438 Devloss timeout reversed on DID x%x "
@@ -427,9 +422,7 @@ lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
"port_state = x%x\n",
ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp,
ndlp->nlp_flag, vport->port_state);
- return;
}
- spin_unlock_irqrestore(&ndlp->lock, iflags);
}
/**
@@ -546,9 +539,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
ndlp->nlp_DID, kref_read(&ndlp->kref),
ndlp, ndlp->nlp_flag,
vport->port_state);
- spin_lock_irqsave(&ndlp->lock, iflags);
- ndlp->save_flags |= NLP_IN_RECOV_POST_DEV_LOSS;
- spin_unlock_irqrestore(&ndlp->lock, iflags);
+ set_bit(NLP_IN_RECOV_POST_DEV_LOSS, &ndlp->save_flags);
return fcf_inuse;
} else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
/* Fabric node fully recovered before this dev_loss_tmo
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index d5c15742f7f2..32298285ea5e 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -724,6 +724,7 @@ struct ls_rjt { /* Structure is in Big Endian format */
#define LSEXP_OUT_OF_RESOURCE 0x29
#define LSEXP_CANT_GIVE_DATA 0x2A
#define LSEXP_REQ_UNSUPPORTED 0x2C
+#define LSEXP_AUTH_REQ 0x48
#define LSEXP_NO_RSRC_ASSIGN 0x52
uint8_t vendorUnique; /* FC Word 0, bit 0: 7 */
} b;
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 26e1313ebb21..2dedb273b091 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1907,22 +1907,22 @@ struct lpfc_mbx_query_fw_config {
uint32_t asic_revision;
uint32_t physical_port;
uint32_t function_mode;
-#define LPFC_FCOE_INI_MODE 0x00000040
-#define LPFC_FCOE_TGT_MODE 0x00000080
+#define LPFC_FC_INI_MODE 0x00000040
+#define LPFC_FC_TGT_MODE 0x00000080
#define LPFC_DUA_MODE 0x00000800
- uint32_t ulp0_mode;
-#define LPFC_ULP_FCOE_INIT_MODE 0x00000040
-#define LPFC_ULP_FCOE_TGT_MODE 0x00000080
- uint32_t ulp0_nap_words[12];
- uint32_t ulp1_mode;
- uint32_t ulp1_nap_words[12];
+ uint32_t oper_mode;
+ uint32_t rsvd9[2];
+ uint32_t wqid_base;
+ uint32_t wqid_tot;
+ uint32_t rqid_base;
+ uint32_t rqid_tot;
+ uint32_t rsvd15[19];
uint32_t function_capabilities;
uint32_t cqid_base;
uint32_t cqid_tot;
uint32_t eqid_base;
uint32_t eqid_tot;
- uint32_t ulp0_nap2_words[2];
- uint32_t ulp1_nap2_words[2];
+ uint32_t rsvd39[4];
} rsp;
};
@@ -3778,25 +3778,22 @@ struct lpfc_mbx_get_prof_cfg {
struct lpfc_controller_attribute {
uint32_t version_string[8];
uint32_t manufacturer_name[8];
- uint32_t supported_modes;
+ uint32_t rsvd16;
uint32_t word17;
-#define lpfc_cntl_attr_eprom_ver_lo_SHIFT 0
-#define lpfc_cntl_attr_eprom_ver_lo_MASK 0x000000ff
-#define lpfc_cntl_attr_eprom_ver_lo_WORD word17
-#define lpfc_cntl_attr_eprom_ver_hi_SHIFT 8
-#define lpfc_cntl_attr_eprom_ver_hi_MASK 0x000000ff
-#define lpfc_cntl_attr_eprom_ver_hi_WORD word17
#define lpfc_cntl_attr_flash_id_SHIFT 16
#define lpfc_cntl_attr_flash_id_MASK 0x000000ff
#define lpfc_cntl_attr_flash_id_WORD word17
- uint32_t mbx_da_struct_ver;
- uint32_t ep_fw_da_struct_ver;
+#define lpfc_cntl_attr_boot_enable_SHIFT 24
+#define lpfc_cntl_attr_boot_enable_MASK 0x00000001
+#define lpfc_cntl_attr_boot_enable_WORD word17
+ uint32_t rsvd18[2];
uint32_t ncsi_ver_str[3];
- uint32_t dflt_ext_timeout;
+ uint32_t rsvd23;
uint32_t model_number[8];
uint32_t description[16];
uint32_t serial_number[8];
- uint32_t ip_ver_str[8];
+ uint32_t ipl_name[5];
+ uint32_t rsvd61[3];
uint32_t fw_ver_str[8];
uint32_t bios_ver_str[8];
uint32_t redboot_ver_str[8];
@@ -3804,53 +3801,31 @@ struct lpfc_controller_attribute {
uint32_t flash_fw_ver_str[8];
uint32_t functionality;
uint32_t word105;
-#define lpfc_cntl_attr_max_cbd_len_SHIFT 0
-#define lpfc_cntl_attr_max_cbd_len_MASK 0x0000ffff
-#define lpfc_cntl_attr_max_cbd_len_WORD word105
#define lpfc_cntl_attr_asic_rev_SHIFT 16
#define lpfc_cntl_attr_asic_rev_MASK 0x000000ff
#define lpfc_cntl_attr_asic_rev_WORD word105
-#define lpfc_cntl_attr_gen_guid0_SHIFT 24
-#define lpfc_cntl_attr_gen_guid0_MASK 0x000000ff
-#define lpfc_cntl_attr_gen_guid0_WORD word105
- uint32_t gen_guid1_12[3];
+ uint32_t rsvd106[3];
uint32_t word109;
-#define lpfc_cntl_attr_gen_guid13_14_SHIFT 0
-#define lpfc_cntl_attr_gen_guid13_14_MASK 0x0000ffff
-#define lpfc_cntl_attr_gen_guid13_14_WORD word109
-#define lpfc_cntl_attr_gen_guid15_SHIFT 16
-#define lpfc_cntl_attr_gen_guid15_MASK 0x000000ff
-#define lpfc_cntl_attr_gen_guid15_WORD word109
#define lpfc_cntl_attr_hba_port_cnt_SHIFT 24
#define lpfc_cntl_attr_hba_port_cnt_MASK 0x000000ff
#define lpfc_cntl_attr_hba_port_cnt_WORD word109
- uint32_t word110;
-#define lpfc_cntl_attr_dflt_lnk_tmo_SHIFT 0
-#define lpfc_cntl_attr_dflt_lnk_tmo_MASK 0x0000ffff
-#define lpfc_cntl_attr_dflt_lnk_tmo_WORD word110
-#define lpfc_cntl_attr_multi_func_dev_SHIFT 24
-#define lpfc_cntl_attr_multi_func_dev_MASK 0x000000ff
-#define lpfc_cntl_attr_multi_func_dev_WORD word110
+ uint32_t rsvd110;
uint32_t word111;
-#define lpfc_cntl_attr_cache_valid_SHIFT 0
-#define lpfc_cntl_attr_cache_valid_MASK 0x000000ff
-#define lpfc_cntl_attr_cache_valid_WORD word111
#define lpfc_cntl_attr_hba_status_SHIFT 8
#define lpfc_cntl_attr_hba_status_MASK 0x000000ff
#define lpfc_cntl_attr_hba_status_WORD word111
-#define lpfc_cntl_attr_max_domain_SHIFT 16
-#define lpfc_cntl_attr_max_domain_MASK 0x000000ff
-#define lpfc_cntl_attr_max_domain_WORD word111
#define lpfc_cntl_attr_lnk_numb_SHIFT 24
#define lpfc_cntl_attr_lnk_numb_MASK 0x0000003f
#define lpfc_cntl_attr_lnk_numb_WORD word111
#define lpfc_cntl_attr_lnk_type_SHIFT 30
#define lpfc_cntl_attr_lnk_type_MASK 0x00000003
#define lpfc_cntl_attr_lnk_type_WORD word111
- uint32_t fw_post_status;
- uint32_t hba_mtu[8];
+ uint32_t rsvd112[9];
uint32_t word121;
- uint32_t reserved1[3];
+#define lpfc_cntl_attr_asic_gen_SHIFT 8
+#define lpfc_cntl_attr_asic_gen_MASK 0x000000ff
+#define lpfc_cntl_attr_asic_gen_WORD word121
+ uint32_t rsvd122[3];
uint32_t word125;
#define lpfc_cntl_attr_pci_vendor_id_SHIFT 0
#define lpfc_cntl_attr_pci_vendor_id_MASK 0x0000ffff
@@ -3875,15 +3850,7 @@ struct lpfc_controller_attribute {
#define lpfc_cntl_attr_pci_fnc_num_SHIFT 16
#define lpfc_cntl_attr_pci_fnc_num_MASK 0x000000ff
#define lpfc_cntl_attr_pci_fnc_num_WORD word127
-#define lpfc_cntl_attr_inf_type_SHIFT 24
-#define lpfc_cntl_attr_inf_type_MASK 0x000000ff
-#define lpfc_cntl_attr_inf_type_WORD word127
- uint32_t unique_id[2];
- uint32_t word130;
-#define lpfc_cntl_attr_num_netfil_SHIFT 0
-#define lpfc_cntl_attr_num_netfil_MASK 0x000000ff
-#define lpfc_cntl_attr_num_netfil_WORD word130
- uint32_t reserved2[4];
+ uint32_t rsvd128[7];
};
struct lpfc_mbx_get_cntl_attributes {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 7f57397d91a9..bcadf11414c8 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -598,7 +598,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
jiffies + msecs_to_jiffies(1000 * timeout));
/* Set up heart beat (HB) timer */
mod_timer(&phba->hb_tmofunc,
- jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
+ jiffies + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL));
clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
phba->last_completion_time = jiffies;
@@ -1267,7 +1267,7 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
!test_bit(FC_UNLOADING, &phba->pport->load_flag))
mod_timer(&phba->hb_tmofunc,
jiffies +
- msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
+ secs_to_jiffies(LPFC_HB_MBOX_INTERVAL));
return;
}
@@ -1555,7 +1555,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
/* If IOs are completing, no need to issue a MBX_HEARTBEAT */
spin_lock_irq(&phba->pport->work_port_lock);
if (time_after(phba->last_completion_time +
- msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
+ secs_to_jiffies(LPFC_HB_MBOX_INTERVAL),
jiffies)) {
spin_unlock_irq(&phba->pport->work_port_lock);
if (test_bit(HBA_HBEAT_INP, &phba->hba_flag))
@@ -3354,7 +3354,7 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
spin_unlock_irqrestore(&phba->hbalock, iflag);
if (mbx_action == LPFC_MBX_NO_WAIT)
return;
- timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
+ timeout = secs_to_jiffies(LPFC_MBOX_TMO) + jiffies;
spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->sli.mbox_active) {
actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
@@ -3847,8 +3847,8 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
* Otherwise, let dev_loss take care of
* the node.
*/
- if (!(ndlp->save_flags &
- NLP_IN_RECOV_POST_DEV_LOSS) &&
+ if (!test_bit(NLP_IN_RECOV_POST_DEV_LOSS,
+ &ndlp->save_flags) &&
!(ndlp->fc4_xpt_flags &
(NVME_XPT_REGD | SCSI_XPT_REGD)))
lpfc_disc_state_machine
@@ -4924,14 +4924,14 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
stat = 1;
goto finished;
}
- if (time >= msecs_to_jiffies(30 * 1000)) {
+ if (time >= secs_to_jiffies(30)) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0461 Scanning longer than 30 "
"seconds. Continuing initialization\n");
stat = 1;
goto finished;
}
- if (time >= msecs_to_jiffies(15 * 1000) &&
+ if (time >= secs_to_jiffies(15) &&
phba->link_state <= LPFC_LINK_DOWN) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0465 Link down longer than 15 "
@@ -4945,7 +4945,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
if (vport->num_disc_nodes || vport->fc_prli_sent)
goto finished;
if (!atomic_read(&vport->fc_map_cnt) &&
- time < msecs_to_jiffies(2 * 1000))
+ time < secs_to_jiffies(2))
goto finished;
if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
goto finished;
@@ -5179,8 +5179,8 @@ lpfc_vmid_poll(struct timer_list *t)
lpfc_worker_wake_up(phba);
/* restart the timer for the next iteration */
- mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
- LPFC_VMID_TIMER));
+ mod_timer(&phba->inactive_vmid_poll,
+ jiffies + secs_to_jiffies(LPFC_VMID_TIMER));
}
/**
@@ -11109,14 +11109,11 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.fw_func_mode =
mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
- phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
- phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
phba->sli4_hba.physical_port =
mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
- "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
- phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
+ "3251 QUERY_FW_CFG: func_mode:x%x\n",
+ phba->sli4_hba.fw_func_mode);
mempool_free(mboxq, phba->mbox_mem_pool);
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index e98f1c2b2220..fb6dbcb86c09 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2524,8 +2524,10 @@ lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
/* addr mode is bit wise inverted value of fcf addr_mode */
- bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
- (~phba->fcf.addr_mode) & 0x3);
+ if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
+ bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
+ (~phba->fcf.addr_mode) & 0x3);
+ }
} else {
/* This is ONLY for NVMET MRQ == 1 */
if (phba->cfg_nvmet_mrq != 1)
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 4d88cfe71cae..a596b80d03d4 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -64,9 +64,6 @@ static int
lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_name *nn, struct lpfc_name *pn)
{
- /* First, we MUST have a RPI registered */
- if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag))
- return 0;
/* Compare the ADISC rsp WWNN / WWPN matches our internal node
* table entry for that node.
@@ -735,6 +732,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ADISC *ap;
uint32_t *lp;
uint32_t cmd;
+ int rc;
pcmd = cmdiocb->cmd_dmabuf;
lp = (uint32_t *) pcmd->virt;
@@ -759,21 +757,29 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* resume the RPI before the ACC goes out.
*/
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
- elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
- GFP_KERNEL);
- if (elsiocb) {
- /* Save info from cmd IOCB used in rsp */
- memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
- sizeof(struct lpfc_iocbq));
-
- /* Save the ELS cmd */
- elsiocb->drvrTimeout = cmd;
-
- if (lpfc_sli4_resume_rpi(ndlp,
- lpfc_mbx_cmpl_resume_rpi,
- elsiocb))
- kfree(elsiocb);
- goto out;
+ /* Don't resume an unregistered RPI - unnecessary
+ * mailbox. Just send the ACC when the RPI is not
+ * registered.
+ */
+ if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) {
+ elsiocb = kmalloc(sizeof(*elsiocb), GFP_KERNEL);
+ if (elsiocb) {
+ /* Save info from cmd IOCB used in
+ * rsp
+ */
+ memcpy(elsiocb, cmdiocb,
+ sizeof(*elsiocb));
+
+ elsiocb->drvrTimeout = cmd;
+
+ rc = lpfc_sli4_resume_rpi(ndlp,
+ lpfc_mbx_cmpl_resume_rpi,
+ elsiocb);
+ if (rc)
+ kfree(elsiocb);
+
+ goto out;
+ }
}
}
@@ -815,7 +821,6 @@ out:
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
return 0;
}
@@ -906,7 +911,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
(ndlp->nlp_state >= NLP_STE_ADISC_ISSUE ||
ndlp->nlp_state <= NLP_STE_PRLI_ISSUE)) {
mod_timer(&ndlp->nlp_delayfunc,
- jiffies + msecs_to_jiffies(1000 * 1));
+ jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
lpfc_printf_vlog(vport, KERN_INFO,
@@ -1332,7 +1337,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
/* Put ndlp in npr state set plogi timer for 1 sec */
- mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
@@ -1936,7 +1941,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
/* Put ndlp in npr state set plogi timer for 1 sec */
mod_timer(&ndlp->nlp_delayfunc,
- jiffies + msecs_to_jiffies(1000 * 1));
+ jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
@@ -2255,11 +2260,13 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
(vport->port_type == LPFC_NPIV_PORT) &&
vport->cfg_restrict_login) {
out:
- set_bit(NLP_TARGET_REMOVE, &ndlp->nlp_flag);
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_DISCOVERY | LOG_NODE,
+ "6228 Sending LOGO, determined nlp_type "
+ "0x%x nlp_flag x%lx refcnt %u\n",
+ ndlp->nlp_type, ndlp->nlp_flag,
+ kref_read(&ndlp->kref));
lpfc_issue_els_logo(vport, ndlp, 0);
-
- ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
return ndlp->nlp_state;
}
@@ -2743,7 +2750,7 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) {
mod_timer(&ndlp->nlp_delayfunc,
- jiffies + msecs_to_jiffies(1000 * 1));
+ jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 43dc1da4a156..b1adb9f59097 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -2237,7 +2237,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
* wait. Print a message if a 10 second wait expires and renew the
* wait. This is unexpected.
*/
- wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
+ wait_tmo = secs_to_jiffies(LPFC_NVME_WAIT_TMO);
while (true) {
ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
if (unlikely(!ret)) {
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 905026a4782c..055ed632c14d 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5136,6 +5136,12 @@ lpfc_info(struct Scsi_Host *host)
goto buffer_done;
}
+ /* Support for BSG ioctls */
+ scnprintf(tmp, sizeof(tmp), " BSG");
+ if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
+ sizeof(lpfcinfobuf))
+ goto buffer_done;
+
/* PCI resettable */
if (!lpfc_check_pci_resettable(phba)) {
scnprintf(tmp, sizeof(tmp), " PCI resettable");
@@ -6120,31 +6126,28 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
/* Issue LOGO, if no LOGO is outstanding */
spin_lock_irqsave(&pnode->lock, flags);
- if (!(pnode->save_flags & NLP_WAIT_FOR_LOGO) &&
+ if (!test_bit(NLP_WAIT_FOR_LOGO, &pnode->save_flags) &&
!pnode->logo_waitq) {
pnode->logo_waitq = &waitq;
pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
- set_bit(NLP_ISSUE_LOGO, &pnode->nlp_flag);
- pnode->save_flags |= NLP_WAIT_FOR_LOGO;
spin_unlock_irqrestore(&pnode->lock, flags);
+ set_bit(NLP_ISSUE_LOGO, &pnode->nlp_flag);
+ set_bit(NLP_WAIT_FOR_LOGO, &pnode->save_flags);
lpfc_unreg_rpi(vport, pnode);
wait_event_timeout(waitq,
- (!(pnode->save_flags &
- NLP_WAIT_FOR_LOGO)),
+ !test_bit(NLP_WAIT_FOR_LOGO,
+ &pnode->save_flags),
msecs_to_jiffies(dev_loss_tmo *
1000));
- if (pnode->save_flags & NLP_WAIT_FOR_LOGO) {
+ if (test_and_clear_bit(NLP_WAIT_FOR_LOGO,
+ &pnode->save_flags))
lpfc_printf_vlog(vport, KERN_ERR, logit,
"0725 SCSI layer TGTRST "
"failed & LOGO TMO (%d, %llu) "
"return x%x\n",
tgt_id, lun_id, status);
- spin_lock_irqsave(&pnode->lock, flags);
- pnode->save_flags &= ~NLP_WAIT_FOR_LOGO;
- } else {
- spin_lock_irqsave(&pnode->lock, flags);
- }
+ spin_lock_irqsave(&pnode->lock, flags);
pnode->logo_waitq = NULL;
spin_unlock_irqrestore(&pnode->lock, flags);
status = SUCCESS;
@@ -6226,7 +6229,7 @@ error:
}
/**
- * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
+ * lpfc_sdev_init - scsi_host_template sdev_init entry point
* @sdev: Pointer to scsi_device.
*
* This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
@@ -6239,7 +6242,7 @@ error:
* 0 - Success
**/
static int
-lpfc_slave_alloc(struct scsi_device *sdev)
+lpfc_sdev_init(struct scsi_device *sdev)
{
struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
struct lpfc_hba *phba = vport->phba;
@@ -6342,8 +6345,9 @@ lpfc_slave_alloc(struct scsi_device *sdev)
}
/**
- * lpfc_slave_configure - scsi_host_template slave_configure entry point
+ * lpfc_sdev_configure - scsi_host_template sdev_configure entry point
* @sdev: Pointer to scsi_device.
+ * @lim: Request queue limits.
*
* This routine configures following items
* - Tag command queuing support for @sdev if supported.
@@ -6353,7 +6357,7 @@ lpfc_slave_alloc(struct scsi_device *sdev)
* 0 - Success
**/
static int
-lpfc_slave_configure(struct scsi_device *sdev)
+lpfc_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
struct lpfc_hba *phba = vport->phba;
@@ -6371,13 +6375,13 @@ lpfc_slave_configure(struct scsi_device *sdev)
}
/**
- * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
+ * lpfc_sdev_destroy - sdev_destroy entry point of SHT data structure
* @sdev: Pointer to scsi_device.
*
* This routine sets @sdev hostatdata filed to null.
**/
static void
-lpfc_slave_destroy(struct scsi_device *sdev)
+lpfc_sdev_destroy(struct scsi_device *sdev)
{
struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
struct lpfc_hba *phba = vport->phba;
@@ -6422,7 +6426,7 @@ lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
{
struct lpfc_device_data *lun_info;
- int memory_flags;
+ gfp_t memory_flags;
if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
!(phba->cfg_fof))
@@ -6737,7 +6741,13 @@ lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
}
static int
-lpfc_no_slave(struct scsi_device *sdev)
+lpfc_init_no_sdev(struct scsi_device *sdev)
+{
+ return -ENODEV;
+}
+
+static int
+lpfc_config_no_sdev(struct scsi_device *sdev, struct queue_limits *lim)
{
return -ENODEV;
}
@@ -6748,8 +6758,8 @@ struct scsi_host_template lpfc_template_nvme = {
.proc_name = LPFC_DRIVER_NAME,
.info = lpfc_info,
.queuecommand = lpfc_no_command,
- .slave_alloc = lpfc_no_slave,
- .slave_configure = lpfc_no_slave,
+ .sdev_init = lpfc_init_no_sdev,
+ .sdev_configure = lpfc_config_no_sdev,
.scan_finished = lpfc_scan_finished,
.this_id = -1,
.sg_tablesize = 1,
@@ -6772,9 +6782,9 @@ struct scsi_host_template lpfc_template = {
.eh_device_reset_handler = lpfc_device_reset_handler,
.eh_target_reset_handler = lpfc_target_reset_handler,
.eh_host_reset_handler = lpfc_host_reset_handler,
- .slave_alloc = lpfc_slave_alloc,
- .slave_configure = lpfc_slave_configure,
- .slave_destroy = lpfc_slave_destroy,
+ .sdev_init = lpfc_sdev_init,
+ .sdev_configure = lpfc_sdev_configure,
+ .sdev_destroy = lpfc_sdev_destroy,
.scan_finished = lpfc_scan_finished,
.this_id = -1,
.sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
@@ -6799,9 +6809,9 @@ struct scsi_host_template lpfc_vport_template = {
.eh_target_reset_handler = lpfc_target_reset_handler,
.eh_bus_reset_handler = NULL,
.eh_host_reset_handler = NULL,
- .slave_alloc = lpfc_slave_alloc,
- .slave_configure = lpfc_slave_configure,
- .slave_destroy = lpfc_slave_destroy,
+ .sdev_init = lpfc_sdev_init,
+ .sdev_configure = lpfc_sdev_configure,
+ .sdev_destroy = lpfc_sdev_destroy,
.scan_finished = lpfc_scan_finished,
.this_id = -1,
.sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 874644b31a3e..3fd9723cd271 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -9012,7 +9012,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Start heart beat timer */
mod_timer(&phba->hb_tmofunc,
- jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
+ jiffies + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL));
clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
phba->last_completion_time = jiffies;
@@ -13323,7 +13323,7 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
lpfc_sli_mbox_sys_flush(phba);
return;
}
- timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
+ timeout = secs_to_jiffies(LPFC_MBOX_TMO) + jiffies;
/* Disable softirqs, including timers from obtaining phba->hbalock */
local_bh_disable();
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index c1e9ec0243ba..9be3da91c923 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -865,8 +865,6 @@ struct lpfc_sli4_hba {
struct lpfc_name wwpn;
uint32_t fw_func_mode; /* FW function protocol mode */
- uint32_t ulp0_mode; /* ULP0 protocol mode */
- uint32_t ulp1_mode; /* ULP1 protocol mode */
/* Optimized Access Storage specific queues/structures */
uint64_t oas_next_lun;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 61fe1220f8ad..c35f7225058e 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.4.0.6"
+#define LPFC_DRIVER_VERSION "14.4.0.7"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vmid.c b/drivers/scsi/lpfc/lpfc_vmid.c
index cc3e4736f2fe..14dbfe954e42 100644
--- a/drivers/scsi/lpfc/lpfc_vmid.c
+++ b/drivers/scsi/lpfc/lpfc_vmid.c
@@ -278,7 +278,7 @@ int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid,
if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) {
mod_timer(&vport->phba->inactive_vmid_poll,
jiffies +
- msecs_to_jiffies(1000 * LPFC_VMID_TIMER));
+ secs_to_jiffies(LPFC_VMID_TIMER));
vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD;
}
}
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 9e0e35763377..3d70cc517573 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -492,21 +492,22 @@ lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
spin_lock_irq(&ndlp->lock);
- if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO) &&
+ if (!test_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags) &&
!ndlp->logo_waitq) {
ndlp->logo_waitq = &waitq;
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
set_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag);
- ndlp->save_flags |= NLP_WAIT_FOR_LOGO;
+ set_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags);
}
spin_unlock_irq(&ndlp->lock);
rc = lpfc_issue_els_npiv_logo(vport, ndlp);
if (!rc) {
wait_event_timeout(waitq,
- (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO)),
+ !test_bit(NLP_WAIT_FOR_LOGO,
+ &ndlp->save_flags),
msecs_to_jiffies(phba->fc_ratov * 2000));
- if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO))
+ if (!test_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags))
goto logo_cmpl;
/* LOGO wait failed. Correct status. */
rc = -EINTR;
@@ -516,9 +517,7 @@ lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* Error - clean up node flags. */
clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag);
- spin_lock_irq(&ndlp->lock);
- ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
- spin_unlock_irq(&ndlp->lock);
+ clear_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags);
logo_cmpl:
lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
@@ -696,19 +695,20 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
spin_lock_irq(&ndlp->lock);
ndlp->da_id_waitq = &waitq;
- ndlp->save_flags |= NLP_WAIT_FOR_DA_ID;
spin_unlock_irq(&ndlp->lock);
+ set_bit(NLP_WAIT_FOR_DA_ID, &ndlp->save_flags);
rc = lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0);
if (!rc) {
wait_event_timeout(waitq,
- !(ndlp->save_flags & NLP_WAIT_FOR_DA_ID),
+ !test_bit(NLP_WAIT_FOR_DA_ID,
+ &ndlp->save_flags),
msecs_to_jiffies(phba->fc_ratov * 2000));
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT | LOG_ELS,
"1829 DA_ID issue status %d. "
- "SFlag x%x NState x%x, NFlag x%lx "
+ "SFlag x%lx NState x%x, NFlag x%lx "
"Rpi x%x\n",
rc, ndlp->save_flags, ndlp->nlp_state,
ndlp->nlp_flag, ndlp->nlp_rpi);
@@ -718,8 +718,8 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
*/
spin_lock_irq(&ndlp->lock);
ndlp->da_id_waitq = NULL;
- ndlp->save_flags &= ~NLP_WAIT_FOR_DA_ID;
spin_unlock_irq(&ndlp->lock);
+ clear_bit(NLP_WAIT_FOR_DA_ID, &ndlp->save_flags);
}
issue_logo:
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 38976f94453e..adab151663dd 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4551,7 +4551,7 @@ megaraid_shutdown(struct pci_dev *pdev)
__megaraid_shutdown(adapter);
}
-static struct pci_device_id megaraid_pci_tbl[] = {
+static const struct pci_device_id megaraid_pci_tbl[] = {
{PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID2,
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index bc867da650b6..60cc3372991f 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -199,7 +199,7 @@ MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)");
/*
* PCI table for all supported controllers.
*/
-static struct pci_device_id pci_id_table_g[] = {
+static const struct pci_device_id pci_id_table_g[] = {
{
PCI_VENDOR_ID_DELL,
PCI_DEVICE_ID_PERC4_DI_DISCOVERY,
@@ -621,7 +621,7 @@ megaraid_io_attach(adapter_t *adapter)
host = scsi_host_alloc(&megaraid_template_g, 8);
if (!host) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid mbox: scsi_register failed\n"));
+ "megaraid mbox: scsi_host_alloc failed\n"));
return -1;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 49abd7dd75a7..d85f990aec88 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -146,7 +146,7 @@ megasas_set_ld_removed_by_fw(struct megasas_instance *instance);
/*
* PCI ID table for all supported controllers
*/
-static struct pci_device_id megasas_pci_table[] = {
+static const struct pci_device_id megasas_pci_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
/* xscale IOP */
@@ -2067,8 +2067,8 @@ static void megasas_set_static_target_properties(struct scsi_device *sdev,
}
-static int megasas_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int megasas_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
u16 pd_index = 0;
struct megasas_instance *instance;
@@ -2108,7 +2108,7 @@ static int megasas_device_configure(struct scsi_device *sdev,
return 0;
}
-static int megasas_slave_alloc(struct scsi_device *sdev)
+static int megasas_sdev_init(struct scsi_device *sdev)
{
u16 pd_index = 0, ld_tgt_id;
struct megasas_instance *instance ;
@@ -2153,7 +2153,7 @@ scan_target:
return 0;
}
-static void megasas_slave_destroy(struct scsi_device *sdev)
+static void megasas_sdev_destroy(struct scsi_device *sdev)
{
u16 ld_tgt_id;
struct megasas_instance *instance;
@@ -3509,9 +3509,9 @@ static const struct scsi_host_template megasas_template = {
.module = THIS_MODULE,
.name = "Avago SAS based MegaRAID driver",
.proc_name = "megaraid_sas",
- .device_configure = megasas_device_configure,
- .slave_alloc = megasas_slave_alloc,
- .slave_destroy = megasas_slave_destroy,
+ .sdev_configure = megasas_sdev_configure,
+ .sdev_init = megasas_sdev_init,
+ .sdev_destroy = megasas_sdev_destroy,
.queuecommand = megasas_queue_command,
.eh_target_reset_handler = megasas_reset_target,
.eh_abort_handler = megasas_task_abort,
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
index 10b8e4dc64f8..7589f48aebc8 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -2951,6 +2951,7 @@ void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc)
.max_hw_sectors = MPI3MR_MAX_APP_XFER_SECTORS,
.max_segments = MPI3MR_MAX_APP_XFER_SEGMENTS,
};
+ struct request_queue *q;
device_initialize(bsg_dev);
@@ -2966,14 +2967,17 @@ void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc)
return;
}
- mrioc->bsg_queue = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), &lim,
+ q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), &lim,
mpi3mr_bsg_request, NULL, 0);
- if (IS_ERR(mrioc->bsg_queue)) {
+ if (IS_ERR(q)) {
ioc_err(mrioc, "%s: bsg registration failed\n",
dev_name(bsg_dev));
device_del(bsg_dev);
put_device(bsg_dev);
+ return;
}
+
+ mrioc->bsg_queue = q;
}
/**
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 1e8735538b23..b9a51d3f2024 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -4465,14 +4465,14 @@ static int mpi3mr_scan_finished(struct Scsi_Host *shost,
}
/**
- * mpi3mr_slave_destroy - Slave destroy callback handler
+ * mpi3mr_sdev_destroy - Slave destroy callback handler
* @sdev: SCSI device reference
*
* Cleanup and free per device(lun) private data.
*
* Return: Nothing.
*/
-static void mpi3mr_slave_destroy(struct scsi_device *sdev)
+static void mpi3mr_sdev_destroy(struct scsi_device *sdev)
{
struct Scsi_Host *shost;
struct mpi3mr_ioc *mrioc;
@@ -4552,7 +4552,7 @@ static void mpi3mr_target_destroy(struct scsi_target *starget)
}
/**
- * mpi3mr_device_configure - Slave configure callback handler
+ * mpi3mr_sdev_configure - Slave configure callback handler
* @sdev: SCSI device reference
* @lim: queue limits
*
@@ -4561,8 +4561,8 @@ static void mpi3mr_target_destroy(struct scsi_target *starget)
*
* Return: 0 always.
*/
-static int mpi3mr_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int mpi3mr_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct scsi_target *starget;
struct Scsi_Host *shost;
@@ -4599,14 +4599,14 @@ static int mpi3mr_device_configure(struct scsi_device *sdev,
}
/**
- * mpi3mr_slave_alloc -Slave alloc callback handler
+ * mpi3mr_sdev_init -Slave alloc callback handler
* @sdev: SCSI device reference
*
* Allocate per device(lun) private data and initialize it.
*
* Return: 0 on success -ENOMEM on memory allocation failure.
*/
-static int mpi3mr_slave_alloc(struct scsi_device *sdev)
+static int mpi3mr_sdev_init(struct scsi_device *sdev)
{
struct Scsi_Host *shost;
struct mpi3mr_ioc *mrioc;
@@ -5062,10 +5062,10 @@ static const struct scsi_host_template mpi3mr_driver_template = {
.proc_name = MPI3MR_DRIVER_NAME,
.queuecommand = mpi3mr_qcmd,
.target_alloc = mpi3mr_target_alloc,
- .slave_alloc = mpi3mr_slave_alloc,
- .device_configure = mpi3mr_device_configure,
+ .sdev_init = mpi3mr_sdev_init,
+ .sdev_configure = mpi3mr_sdev_configure,
.target_destroy = mpi3mr_target_destroy,
- .slave_destroy = mpi3mr_slave_destroy,
+ .sdev_destroy = mpi3mr_sdev_destroy,
.scan_finished = mpi3mr_scan_finished,
.scan_start = mpi3mr_scan_start,
.change_queue_depth = mpi3mr_change_queue_depth,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 16ac2267c71e..dc43cfa83088 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -5627,10 +5627,9 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
if (rc)
return rc;
if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
- pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
+ pr_err("%s: overriding NVDATA EEDPTagMode setting from 0 to 1\n",
ioc->name);
- ioc->manu_pg11.EEDPTagMode &= ~0x3;
- ioc->manu_pg11.EEDPTagMode |= 0x1;
+ ioc->manu_pg11.EEDPTagMode = 0x1;
mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
&ioc->manu_pg11);
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 9599d7a50028..a456e5ec74d8 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -2025,14 +2025,14 @@ scsih_target_destroy(struct scsi_target *starget)
}
/**
- * scsih_slave_alloc - device add routine
+ * scsih_sdev_init - device add routine
* @sdev: scsi device struct
*
* Return: 0 if ok. Any other return is assumed to be an error and
* the device is ignored.
*/
static int
-scsih_slave_alloc(struct scsi_device *sdev)
+scsih_sdev_init(struct scsi_device *sdev)
{
struct Scsi_Host *shost;
struct MPT3SAS_ADAPTER *ioc;
@@ -2107,11 +2107,11 @@ scsih_slave_alloc(struct scsi_device *sdev)
}
/**
- * scsih_slave_destroy - device destroy routine
+ * scsih_sdev_destroy - device destroy routine
* @sdev: scsi device struct
*/
static void
-scsih_slave_destroy(struct scsi_device *sdev)
+scsih_sdev_destroy(struct scsi_device *sdev)
{
struct MPT3SAS_TARGET *sas_target_priv_data;
struct scsi_target *starget;
@@ -2496,7 +2496,7 @@ _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
}
/**
- * scsih_device_configure - device configure routine.
+ * scsih_sdev_configure - device configure routine.
* @sdev: scsi device struct
* @lim: queue limits
*
@@ -2504,7 +2504,7 @@ _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
* the device is ignored.
*/
static int
-scsih_device_configure(struct scsi_device *sdev, struct queue_limits *lim)
+scsih_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct Scsi_Host *shost = sdev->host;
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
@@ -11904,10 +11904,10 @@ static const struct scsi_host_template mpt2sas_driver_template = {
.proc_name = MPT2SAS_DRIVER_NAME,
.queuecommand = scsih_qcmd,
.target_alloc = scsih_target_alloc,
- .slave_alloc = scsih_slave_alloc,
- .device_configure = scsih_device_configure,
+ .sdev_init = scsih_sdev_init,
+ .sdev_configure = scsih_sdev_configure,
.target_destroy = scsih_target_destroy,
- .slave_destroy = scsih_slave_destroy,
+ .sdev_destroy = scsih_sdev_destroy,
.scan_finished = scsih_scan_finished,
.scan_start = scsih_scan_start,
.change_queue_depth = scsih_change_queue_depth,
@@ -11942,10 +11942,10 @@ static const struct scsi_host_template mpt3sas_driver_template = {
.proc_name = MPT3SAS_DRIVER_NAME,
.queuecommand = scsih_qcmd,
.target_alloc = scsih_target_alloc,
- .slave_alloc = scsih_slave_alloc,
- .device_configure = scsih_device_configure,
+ .sdev_init = scsih_sdev_init,
+ .sdev_configure = scsih_sdev_configure,
.target_destroy = scsih_target_destroy,
- .slave_destroy = scsih_slave_destroy,
+ .sdev_destroy = scsih_sdev_destroy,
.scan_finished = scsih_scan_finished,
.scan_start = scsih_scan_start,
.change_queue_depth = scsih_change_queue_depth,
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 020037cbf0d9..2c72da6b8cf0 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -609,7 +609,7 @@ static void mvs_pci_remove(struct pci_dev *pdev)
return;
}
-static struct pci_device_id mvs_pci_table[] = {
+static const struct pci_device_id mvs_pci_table[] = {
{ PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
{ PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
{
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index d9d366ec17dc..96549e7f5705 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2000,7 +2000,8 @@ static struct mvumi_instance_template mvumi_instance_9580 = {
.reset_host = mvumi_reset_host_9580,
};
-static int mvumi_slave_configure(struct scsi_device *sdev)
+static int mvumi_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct mvumi_hba *mhba;
unsigned char bitcount = sizeof(unsigned char) * 8;
@@ -2172,7 +2173,7 @@ static const struct scsi_host_template mvumi_template = {
.module = THIS_MODULE,
.name = "Marvell Storage Controller",
- .slave_configure = mvumi_slave_configure,
+ .sdev_configure = mvumi_sdev_configure,
.queuecommand = mvumi_queue_command,
.eh_timed_out = mvumi_timed_out,
.eh_host_reset_handler = mvumi_host_reset,
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index a7e64b867c8e..dc4bd422b601 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -1619,7 +1619,7 @@ static int myrb_queuecommand(struct Scsi_Host *shost,
return myrb_pthru_queuecommand(shost, scmd);
}
-static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
+static int myrb_ldev_sdev_init(struct scsi_device *sdev)
{
struct myrb_hba *cb = shost_priv(sdev->host);
struct myrb_ldev_info *ldev_info;
@@ -1627,8 +1627,6 @@ static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
enum raid_level level;
ldev_info = cb->ldev_info_buf + ldev_num;
- if (!ldev_info)
- return -ENXIO;
sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
if (!sdev->hostdata)
@@ -1665,7 +1663,7 @@ static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
return 0;
}
-static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
+static int myrb_pdev_sdev_init(struct scsi_device *sdev)
{
struct myrb_hba *cb = shost_priv(sdev->host);
struct myrb_pdev_state *pdev_info;
@@ -1701,7 +1699,7 @@ static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
return 0;
}
-static int myrb_slave_alloc(struct scsi_device *sdev)
+static int myrb_sdev_init(struct scsi_device *sdev)
{
if (sdev->channel > myrb_logical_channel(sdev->host))
return -ENXIO;
@@ -1710,12 +1708,13 @@ static int myrb_slave_alloc(struct scsi_device *sdev)
return -ENXIO;
if (sdev->channel == myrb_logical_channel(sdev->host))
- return myrb_ldev_slave_alloc(sdev);
+ return myrb_ldev_sdev_init(sdev);
- return myrb_pdev_slave_alloc(sdev);
+ return myrb_pdev_sdev_init(sdev);
}
-static int myrb_slave_configure(struct scsi_device *sdev)
+static int myrb_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct myrb_ldev_info *ldev_info;
@@ -1741,7 +1740,7 @@ static int myrb_slave_configure(struct scsi_device *sdev)
return 0;
}
-static void myrb_slave_destroy(struct scsi_device *sdev)
+static void myrb_sdev_destroy(struct scsi_device *sdev)
{
kfree(sdev->hostdata);
}
@@ -2208,9 +2207,9 @@ static const struct scsi_host_template myrb_template = {
.proc_name = "myrb",
.queuecommand = myrb_queuecommand,
.eh_host_reset_handler = myrb_host_reset,
- .slave_alloc = myrb_slave_alloc,
- .slave_configure = myrb_slave_configure,
- .slave_destroy = myrb_slave_destroy,
+ .sdev_init = myrb_sdev_init,
+ .sdev_configure = myrb_sdev_configure,
+ .sdev_destroy = myrb_sdev_destroy,
.bios_param = myrb_biosparam,
.cmd_size = sizeof(struct myrb_cmdblk),
.shost_groups = myrb_shost_groups,
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index 1469d0c54e45..95af3bb03834 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -1786,7 +1786,7 @@ static unsigned short myrs_translate_ldev(struct myrs_hba *cs,
return ldev_num;
}
-static int myrs_slave_alloc(struct scsi_device *sdev)
+static int myrs_sdev_init(struct scsi_device *sdev)
{
struct myrs_hba *cs = shost_priv(sdev->host);
unsigned char status;
@@ -1882,7 +1882,8 @@ static int myrs_slave_alloc(struct scsi_device *sdev)
return 0;
}
-static int myrs_slave_configure(struct scsi_device *sdev)
+static int myrs_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct myrs_hba *cs = shost_priv(sdev->host);
struct myrs_ldev_info *ldev_info;
@@ -1910,7 +1911,7 @@ static int myrs_slave_configure(struct scsi_device *sdev)
return 0;
}
-static void myrs_slave_destroy(struct scsi_device *sdev)
+static void myrs_sdev_destroy(struct scsi_device *sdev)
{
kfree(sdev->hostdata);
}
@@ -1921,9 +1922,9 @@ static const struct scsi_host_template myrs_template = {
.proc_name = "myrs",
.queuecommand = myrs_queuecommand,
.eh_host_reset_handler = myrs_host_reset,
- .slave_alloc = myrs_slave_alloc,
- .slave_configure = myrs_slave_configure,
- .slave_destroy = myrs_slave_destroy,
+ .sdev_init = myrs_sdev_init,
+ .sdev_configure = myrs_sdev_configure,
+ .sdev_destroy = myrs_sdev_destroy,
.cmd_size = sizeof(struct myrs_cmdblk),
.shost_groups = myrs_shost_groups,
.sdev_groups = myrs_sdev_groups,
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 35869b4f9329..14ac81ec0aa0 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -7786,7 +7786,7 @@ static void __init ncr_getclock (struct ncb *np, int mult)
/*===================== LINUX ENTRY POINTS SECTION ==========================*/
-static int ncr53c8xx_slave_alloc(struct scsi_device *device)
+static int ncr53c8xx_sdev_init(struct scsi_device *device)
{
struct Scsi_Host *host = device->host;
struct ncb *np = ((struct host_data *) host->hostdata)->ncb;
@@ -7796,7 +7796,8 @@ static int ncr53c8xx_slave_alloc(struct scsi_device *device)
return 0;
}
-static int ncr53c8xx_slave_configure(struct scsi_device *device)
+static int ncr53c8xx_sdev_configure(struct scsi_device *device,
+ struct queue_limits *lim)
{
struct Scsi_Host *host = device->host;
struct ncb *np = ((struct host_data *) host->hostdata)->ncb;
@@ -8093,8 +8094,8 @@ struct Scsi_Host * __init ncr_attach(struct scsi_host_template *tpnt,
tpnt->shost_groups = ncr53c8xx_host_groups;
tpnt->queuecommand = ncr53c8xx_queue_command;
- tpnt->slave_configure = ncr53c8xx_slave_configure;
- tpnt->slave_alloc = ncr53c8xx_slave_alloc;
+ tpnt->sdev_configure = ncr53c8xx_sdev_configure;
+ tpnt->sdev_init = ncr53c8xx_sdev_init;
tpnt->eh_bus_reset_handler = ncr53c8xx_bus_reset;
tpnt->can_queue = SCSI_NCR_CAN_QUEUE;
tpnt->this_id = 7;
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index b7987019686e..abc4ce9eae74 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -66,7 +66,7 @@ static const char *nsp32_release_version = "1.2";
/****************************************************************************
* Supported hardware
*/
-static struct pci_device_id nsp32_pci_table[] = {
+static const struct pci_device_id nsp32_pci_table[] = {
{
.vendor = PCI_VENDOR_ID_IODATA,
.device = PCI_DEVICE_ID_NINJASCSI_32BI_CBSC_II,
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
index 7871e29a820a..4e19d61dffbb 100644
--- a/drivers/scsi/pm8001/pm8001_defs.h
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -90,7 +90,7 @@ enum port_type {
#define PM8001_MAX_PORTS 16 /* max. possible ports */
#define PM8001_MAX_DEVICES 2048 /* max supported device */
#define PM8001_MAX_MSIX_VEC 64 /* max msi-x int for spcv/ve */
-#define PM8001_RESERVE_SLOT 8
+#define PM8001_RESERVE_SLOT 128
#define PM8001_SECTOR_SIZE 512
#define PM8001_PAGE_SIZE_4K 4096
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index dec1e2d380f1..42a4eeac24c9 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3472,12 +3472,13 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
status, tag, scp);
switch (status) {
case IO_SUCCESS:
- pm8001_dbg(pm8001_ha, EH, "IO_SUCCESS\n");
+ pm8001_dbg(pm8001_ha, FAIL, "ABORT IO_SUCCESS for tag %#x\n",
+ tag);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_SAM_STAT_GOOD;
break;
case IO_NOT_VALID:
- pm8001_dbg(pm8001_ha, EH, "IO_NOT_VALID\n");
+ pm8001_dbg(pm8001_ha, FAIL, "IO_NOT_VALID for tag %#x\n", tag);
ts->resp = TMF_RESP_FUNC_FAILED;
break;
}
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 2a7822fd613e..599410bcdfea 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -736,7 +736,7 @@ static int pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
return -EIO;
}
time_remaining = wait_for_completion_timeout(&completion,
- msecs_to_jiffies(60*1000)); // 1 min
+ secs_to_jiffies(60)); // 1 min
if (!time_remaining) {
kfree(payload.func_specific);
pm8001_dbg(pm8001_ha, FAIL, "get_nvmd_req timeout\n");
@@ -1435,7 +1435,7 @@ err_out_disable:
/* update of pci device, vendor id and driver data with
* unique value for each of the controller
*/
-static struct pci_device_id pm8001_pci_table[] = {
+static const struct pci_device_id pm8001_pci_table[] = {
{ PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 },
{ PCI_VDEVICE(PMC_Sierra, 0x8006), chip_8006 },
{ PCI_VDEVICE(ADAPTEC2, 0x8006), chip_8006 },
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index d80cffd25a6e..183ce00aa671 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -101,6 +101,63 @@ int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
return 0;
}
+static void pm80xx_get_tag_opcodes(struct sas_task *task, int *ata_op,
+ int *ata_tag, bool *task_aborted)
+{
+ unsigned long flags;
+ struct ata_queued_cmd *qc = NULL;
+
+ *ata_op = 0;
+ *ata_tag = -1;
+ *task_aborted = false;
+
+ if (!task)
+ return;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED)))
+ *task_aborted = true;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ if (task->task_proto == SAS_PROTOCOL_STP) {
+ // sas_ata_qc_issue path uses SAS_PROTOCOL_STP.
+ // This only works for scsi + libsas + libata users.
+ qc = task->uldd_task;
+ if (qc) {
+ *ata_op = qc->tf.command;
+ *ata_tag = qc->tag;
+ }
+ }
+}
+
+void pm80xx_show_pending_commands(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_device *target_pm8001_dev)
+{
+ int i = 0, ata_op = 0, ata_tag = -1;
+ struct pm8001_ccb_info *ccb = NULL;
+ struct sas_task *task = NULL;
+ struct pm8001_device *pm8001_dev = NULL;
+ bool task_aborted;
+
+ for (i = 0; i < pm8001_ha->ccb_count; i++) {
+ ccb = &pm8001_ha->ccb_info[i];
+ if (ccb->ccb_tag == PM8001_INVALID_TAG)
+ continue;
+ pm8001_dev = ccb->device;
+ if (target_pm8001_dev && pm8001_dev &&
+ target_pm8001_dev != pm8001_dev)
+ continue;
+ task = ccb->task;
+ pm80xx_get_tag_opcodes(task, &ata_op, &ata_tag, &task_aborted);
+ pm8001_dbg(pm8001_ha, FAIL,
+ "tag %#x, device %#x task %p task aborted %d ata opcode %#x ata tag %d\n",
+ ccb->ccb_tag,
+ (pm8001_dev ? pm8001_dev->device_id : 0),
+ task, task_aborted,
+ ata_op, ata_tag);
+ }
+}
+
/**
* pm8001_mem_alloc - allocate memory for pm8001.
* @pdev: pci device.
@@ -374,23 +431,6 @@ static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha,
return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb);
}
- /* Find the local port id that's attached to this device */
-static int sas_find_local_port_id(struct domain_device *dev)
-{
- struct domain_device *pdev = dev->parent;
-
- /* Directly attached device */
- if (!pdev)
- return dev->port->id;
- while (pdev) {
- struct domain_device *pdev_p = pdev->parent;
- if (!pdev_p)
- return pdev->port->id;
- pdev = pdev->parent;
- }
- return 0;
-}
-
#define DEV_IS_GONE(pm8001_dev) \
((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
@@ -463,10 +503,10 @@ int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
spin_lock_irqsave(&pm8001_ha->lock, flags);
pm8001_dev = dev->lldd_dev;
- port = &pm8001_ha->port[sas_find_local_port_id(dev)];
+ port = pm8001_ha->phy[pm8001_dev->attached_phy].port;
if (!internal_abort &&
- (DEV_IS_GONE(pm8001_dev) || !port->port_attached)) {
+ (DEV_IS_GONE(pm8001_dev) || !port || !port->port_attached)) {
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_PHY_DOWN;
if (sas_protocol_ata(task_proto)) {
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index d3bd8683f344..315f6a7523f0 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -786,6 +786,8 @@ static inline void pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha,
}
void pm8001_setds_completion(struct domain_device *dev);
void pm8001_tmf_aborted(struct sas_task *task);
+void pm80xx_show_pending_commands(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_device *dev);
#endif
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index e65951dd2024..5b373c53c036 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -2246,7 +2246,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
u32 param;
u32 status;
u32 tag;
- int i, j;
+ int i, j, ata_tag = -1;
u8 sata_addr_low[4];
u32 temp_sata_addr_low, temp_sata_addr_hi;
u8 sata_addr_hi[4];
@@ -2256,6 +2256,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
u32 *sata_resp;
struct pm8001_device *pm8001_dev;
unsigned long flags;
+ struct ata_queued_cmd *qc;
psataPayload = (struct sata_completion_resp *)(piomb + 4);
status = le32_to_cpu(psataPayload->status);
@@ -2267,8 +2268,11 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
pm8001_dev = ccb->device;
if (t) {
- if (t->dev && (t->dev->lldd_dev))
+ if (t->dev && (t->dev->lldd_dev)) {
pm8001_dev = t->dev->lldd_dev;
+ qc = t->uldd_task;
+ ata_tag = qc ? qc->tag : -1;
+ }
} else {
pm8001_dbg(pm8001_ha, FAIL, "task null, freeing CCB tag %d\n",
ccb->ccb_tag);
@@ -2276,16 +2280,14 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
return;
}
-
if (pm8001_dev && unlikely(!t->lldd_task || !t->dev))
return;
ts = &t->task_status;
-
if (status != IO_SUCCESS) {
pm8001_dbg(pm8001_ha, FAIL,
- "IO failed device_id %u status 0x%x tag %d\n",
- pm8001_dev->device_id, status, tag);
+ "IO failed status %#x pm80xx tag %#x ata tag %d\n",
+ status, tag, ata_tag);
}
/* Print sas address of IO failed device */
@@ -2667,13 +2669,19 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
/* Check if this is NCQ error */
if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) {
+ /* tag value is invalid with this event */
+ pm8001_dbg(pm8001_ha, FAIL, "NCQ ERROR for device %#x tag %#x\n",
+ dev_id, tag);
+
/* find device using device id */
pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id);
/* send read log extension by aborting the link - libata does what we want */
- if (pm8001_dev)
+ if (pm8001_dev) {
+ pm80xx_show_pending_commands(pm8001_ha, pm8001_dev);
pm8001_handle_event(pm8001_ha,
pm8001_dev,
IO_XFER_ERROR_ABORTED_NCQ_MODE);
+ }
return;
}
@@ -3336,10 +3344,11 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 phy_id =
le32_to_cpu(pPayload->phyid) & 0xFF;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ u32 tag = le32_to_cpu(pPayload->tag);
pm8001_dbg(pm8001_ha, INIT,
- "phy start resp status:0x%x, phyid:0x%x\n",
- status, phy_id);
+ "phy start resp status:0x%x, phyid:0x%x, tag 0x%x\n",
+ status, phy_id, tag);
if (status == 0)
phy->phy_state = PHY_LINK_DOWN;
@@ -3348,6 +3357,8 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
complete(phy->enable_completion);
phy->enable_completion = NULL;
}
+
+ pm8001_tag_free(pm8001_ha, tag);
return 0;
}
@@ -3628,8 +3639,10 @@ static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 phyid =
le32_to_cpu(pPayload->phyid) & 0xFF;
struct pm8001_phy *phy = &pm8001_ha->phy[phyid];
- pm8001_dbg(pm8001_ha, MSG, "phy:0x%x status:0x%x\n",
- phyid, status);
+ u32 tag = le32_to_cpu(pPayload->tag);
+
+ pm8001_dbg(pm8001_ha, MSG, "phy:0x%x status:0x%x tag 0x%x\n", phyid,
+ status, tag);
if (status == PHY_STOP_SUCCESS ||
status == PHY_STOP_ERR_DEVICE_ATTACHED) {
phy->phy_state = PHY_LINK_DISABLE;
@@ -3637,6 +3650,7 @@ static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
phy->sas_phy.linkrate = SAS_PHY_DISABLED;
}
+ pm8001_tag_free(pm8001_ha, tag);
return 0;
}
@@ -3655,10 +3669,9 @@ static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
u32 tag = le32_to_cpu(pPayload->tag);
pm8001_dbg(pm8001_ha, MSG,
- "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n",
- status, err_qlfr_pgcd);
+ "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x tag 0x%x\n",
+ status, err_qlfr_pgcd, tag);
pm8001_tag_free(pm8001_ha, tag);
-
return 0;
}
@@ -4632,9 +4645,16 @@ static int
pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
{
struct phy_start_req payload;
- u32 tag = 0x01;
+ int ret;
+ u32 tag;
u32 opcode = OPC_INB_PHYSTART;
+ ret = pm8001_tag_alloc(pm8001_ha, &tag);
+ if (ret) {
+ pm8001_dbg(pm8001_ha, FAIL, "Tag allocation failed\n");
+ return ret;
+ }
+
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
@@ -4670,9 +4690,16 @@ static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
u8 phy_id)
{
struct phy_stop_req payload;
- u32 tag = 0x01;
+ int ret;
+ u32 tag;
u32 opcode = OPC_INB_PHYSTOP;
+ ret = pm8001_tag_alloc(pm8001_ha, &tag);
+ if (ret) {
+ pm8001_dbg(pm8001_ha, FAIL, "Tag allocation failed\n");
+ return ret;
+ }
+
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
payload.phy_id = cpu_to_le32(phy_id);
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 4c5881917d76..3ba53916fd86 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -113,7 +113,7 @@ static struct pmcraid_chip_details pmcraid_chip_cfg[] = {
/*
* PCI device ids supported by pmcraid driver
*/
-static struct pci_device_id pmcraid_pci_table[] = {
+static const struct pci_device_id pmcraid_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PMC, PCI_DEVICE_ID_PMC_MAXRAID),
0, 0, (kernel_ulong_t)&pmcraid_chip_cfg[0]
},
@@ -125,7 +125,7 @@ MODULE_DEVICE_TABLE(pci, pmcraid_pci_table);
/**
- * pmcraid_slave_alloc - Prepare for commands to a device
+ * pmcraid_sdev_init - Prepare for commands to a device
* @scsi_dev: scsi device struct
*
* This function is called by mid-layer prior to sending any command to the new
@@ -136,7 +136,7 @@ MODULE_DEVICE_TABLE(pci, pmcraid_pci_table);
* Return value:
* 0 on success / -ENXIO if device does not exist
*/
-static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
+static int pmcraid_sdev_init(struct scsi_device *scsi_dev)
{
struct pmcraid_resource_entry *temp, *res = NULL;
struct pmcraid_instance *pinstance;
@@ -197,7 +197,7 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
}
/**
- * pmcraid_device_configure - Configures a SCSI device
+ * pmcraid_sdev_configure - Configures a SCSI device
* @scsi_dev: scsi device struct
* @lim: queue limits
*
@@ -210,8 +210,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
* Return value:
* 0 on success
*/
-static int pmcraid_device_configure(struct scsi_device *scsi_dev,
- struct queue_limits *lim)
+static int pmcraid_sdev_configure(struct scsi_device *scsi_dev,
+ struct queue_limits *lim)
{
struct pmcraid_resource_entry *res = scsi_dev->hostdata;
@@ -248,17 +248,17 @@ static int pmcraid_device_configure(struct scsi_device *scsi_dev,
}
/**
- * pmcraid_slave_destroy - Unconfigure a SCSI device before removing it
+ * pmcraid_sdev_destroy - Unconfigure a SCSI device before removing it
*
* @scsi_dev: scsi device struct
*
* This is called by mid-layer before removing a device. Pointer assignments
- * done in pmcraid_slave_alloc will be reset to NULL here.
+ * done in pmcraid_sdev_init will be reset to NULL here.
*
* Return value
* none
*/
-static void pmcraid_slave_destroy(struct scsi_device *scsi_dev)
+static void pmcraid_sdev_destroy(struct scsi_device *scsi_dev)
{
struct pmcraid_resource_entry *res;
@@ -3668,9 +3668,9 @@ static const struct scsi_host_template pmcraid_host_template = {
.eh_device_reset_handler = pmcraid_eh_device_reset_handler,
.eh_host_reset_handler = pmcraid_eh_host_reset_handler,
- .slave_alloc = pmcraid_slave_alloc,
- .device_configure = pmcraid_device_configure,
- .slave_destroy = pmcraid_slave_destroy,
+ .sdev_init = pmcraid_sdev_init,
+ .sdev_configure = pmcraid_sdev_configure,
+ .sdev_destroy = pmcraid_sdev_destroy,
.change_queue_depth = pmcraid_change_queue_depth,
.can_queue = PMCRAID_MAX_IO_CMD,
.this_id = -1,
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index 90495a832f34..92fe5c5c5bb0 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -61,7 +61,8 @@ enum lv1_atapi_in_out {
};
-static int ps3rom_slave_configure(struct scsi_device *scsi_dev)
+static int ps3rom_sdev_configure(struct scsi_device *scsi_dev,
+ struct queue_limits *lim)
{
struct ps3rom_private *priv = shost_priv(scsi_dev->host);
struct ps3_storage_device *dev = priv->dev;
@@ -325,7 +326,7 @@ done:
static const struct scsi_host_template ps3rom_host_template = {
.name = DEVICE_NAME,
- .slave_configure = ps3rom_slave_configure,
+ .sdev_configure = ps3rom_sdev_configure,
.queuecommand = ps3rom_queuecommand,
.can_queue = 1,
.this_id = 7,
diff --git a/drivers/scsi/qedf/qedf_attr.c b/drivers/scsi/qedf/qedf_attr.c
index 8d8c760eee43..769da92ee20d 100644
--- a/drivers/scsi/qedf/qedf_attr.c
+++ b/drivers/scsi/qedf/qedf_attr.c
@@ -104,7 +104,7 @@ void qedf_capture_grc_dump(struct qedf_ctx *qedf)
static ssize_t
qedf_sysfs_read_grcdump(struct file *filep, struct kobject *kobj,
- struct bin_attribute *ba, char *buf, loff_t off,
+ const struct bin_attribute *ba, char *buf, loff_t off,
size_t count)
{
ssize_t ret = 0;
@@ -124,7 +124,7 @@ qedf_sysfs_read_grcdump(struct file *filep, struct kobject *kobj,
static ssize_t
qedf_sysfs_write_grcdump(struct file *filep, struct kobject *kobj,
- struct bin_attribute *ba, char *buf, loff_t off,
+ const struct bin_attribute *ba, char *buf, loff_t off,
size_t count)
{
struct fc_lport *lport = NULL;
@@ -160,14 +160,14 @@ qedf_sysfs_write_grcdump(struct file *filep, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_grcdump_attr = {
+static const struct bin_attribute sysfs_grcdump_attr = {
.attr = {
.name = "grcdump",
.mode = S_IRUSR | S_IWUSR,
},
.size = 0,
- .read = qedf_sysfs_read_grcdump,
- .write = qedf_sysfs_write_grcdump,
+ .read_new = qedf_sysfs_read_grcdump,
+ .write_new = qedf_sysfs_write_grcdump,
};
static struct sysfs_bin_attrs bin_file_entries[] = {
diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
index 5ec2b817c694..eeb6c841dacb 100644
--- a/drivers/scsi/qedf/qedf_dbg.h
+++ b/drivers/scsi/qedf/qedf_dbg.h
@@ -100,7 +100,7 @@ struct Scsi_Host;
struct sysfs_bin_attrs {
char *name;
- struct bin_attribute *attr;
+ const struct bin_attribute *attr;
};
extern int qedf_alloc_grc_dump_buf(uint8_t **buf, uint32_t len);
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index d2f47dc31dbf..436bd29d5eba 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -982,7 +982,8 @@ static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
return SUCCESS;
}
-static int qedf_slave_configure(struct scsi_device *sdev)
+static int qedf_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
if (qedf_queue_depth) {
scsi_change_queue_depth(sdev, qedf_queue_depth);
@@ -1003,7 +1004,7 @@ static const struct scsi_host_template qedf_host_template = {
.eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */
.eh_target_reset_handler = qedf_eh_target_reset, /* target reset */
.eh_host_reset_handler = qedf_eh_host_reset,
- .slave_configure = qedf_slave_configure,
+ .sdev_configure = qedf_sdev_configure,
.dma_boundary = QED_HW_DMA_BOUNDARY,
.sg_tablesize = QEDF_MAX_BDS_PER_CMD,
.can_queue = FCOE_PARAMS_NUM_TASKS,
diff --git a/drivers/scsi/qedi/qedi_dbg.h b/drivers/scsi/qedi/qedi_dbg.h
index fdda12ef13b0..5a1ec4542183 100644
--- a/drivers/scsi/qedi/qedi_dbg.h
+++ b/drivers/scsi/qedi/qedi_dbg.h
@@ -91,7 +91,7 @@ struct Scsi_Host;
struct sysfs_bin_attrs {
char *name;
- struct bin_attribute *attr;
+ const struct bin_attribute *attr;
};
int qedi_create_sysfs_attr(struct Scsi_Host *shost,
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 628d59dda20c..c9539897048a 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1961,13 +1961,11 @@ static int qedi_cpu_online(unsigned int cpu)
struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu);
struct task_struct *thread;
- thread = kthread_create_on_node(qedi_percpu_io_thread, (void *)p,
- cpu_to_node(cpu),
- "qedi_thread/%d", cpu);
+ thread = kthread_create_on_cpu(qedi_percpu_io_thread, (void *)p,
+ cpu, "qedi_thread/%d");
if (IS_ERR(thread))
return PTR_ERR(thread);
- kthread_bind(thread, cpu);
p->iothread = thread;
wake_up_process(thread);
return 0;
@@ -2869,7 +2867,7 @@ static void qedi_remove(struct pci_dev *pdev)
__qedi_remove(pdev, QEDI_MODE_NORMAL);
}
-static struct pci_device_id qedi_pci_tbl[] = {
+static const struct pci_device_id qedi_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8084) },
{ 0 },
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 8958547ac111..47d74f881948 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -501,7 +501,7 @@ struct qla_boards {
};
/* NOTE: the last argument in each entry is used to index ql1280_board_tbl */
-static struct pci_device_id qla1280_pci_tbl[] = {
+static const struct pci_device_id qla1280_pci_tbl[] = {
{PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020,
@@ -1159,7 +1159,7 @@ qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
/**************************************************************************
- * qla1280_slave_configure
+ * qla1280_sdev_configure
*
* Description:
* Determines the queue depth for a given device. There are two ways
@@ -1170,7 +1170,7 @@ qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
* default queue depth (dependent on the number of hardware SCBs).
**************************************************************************/
static int
-qla1280_slave_configure(struct scsi_device *device)
+qla1280_sdev_configure(struct scsi_device *device, struct queue_limits *lim)
{
struct scsi_qla_host *ha;
int default_depth = 3;
@@ -2867,7 +2867,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
cpu_to_le32(upper_32_bits(dma_handle)),
cpu_to_le32(lower_32_bits(dma_handle)),
- cpu_to_le32(sg_dma_len(sg_next(s))));
+ cpu_to_le32(sg_dma_len(s)));
remseg--;
}
dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
@@ -4121,7 +4121,7 @@ static const struct scsi_host_template qla1280_driver_template = {
.proc_name = "qla1280",
.name = "Qlogic ISP 1280/12160",
.info = qla1280_info,
- .slave_configure = qla1280_slave_configure,
+ .sdev_configure = qla1280_sdev_configure,
.queuecommand = qla1280_queuecommand,
.eh_abort_handler = qla1280_eh_abort,
.eh_device_reset_handler= qla1280_eh_device_reset,
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index e6ece30c4348..dcb0c2af1fa7 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -17,7 +17,7 @@ static int qla24xx_vport_disable(struct fc_vport *, bool);
static ssize_t
qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -58,7 +58,7 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
static ssize_t
qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -168,19 +168,19 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_fw_dump_attr = {
+static const struct bin_attribute sysfs_fw_dump_attr = {
.attr = {
.name = "fw_dump",
.mode = S_IRUSR | S_IWUSR,
},
.size = 0,
- .read = qla2x00_sysfs_read_fw_dump,
- .write = qla2x00_sysfs_write_fw_dump,
+ .read_new = qla2x00_sysfs_read_fw_dump,
+ .write_new = qla2x00_sysfs_write_fw_dump,
};
static ssize_t
qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -220,7 +220,7 @@ skip:
static ssize_t
qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -282,19 +282,19 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_nvram_attr = {
+static const struct bin_attribute sysfs_nvram_attr = {
.attr = {
.name = "nvram",
.mode = S_IRUSR | S_IWUSR,
},
.size = 512,
- .read = qla2x00_sysfs_read_nvram,
- .write = qla2x00_sysfs_write_nvram,
+ .read_new = qla2x00_sysfs_read_nvram,
+ .write_new = qla2x00_sysfs_write_nvram,
};
static ssize_t
qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -318,7 +318,7 @@ out:
static ssize_t
qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -344,19 +344,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_optrom_attr = {
+static const struct bin_attribute sysfs_optrom_attr = {
.attr = {
.name = "optrom",
.mode = S_IRUSR | S_IWUSR,
},
.size = 0,
- .read = qla2x00_sysfs_read_optrom,
- .write = qla2x00_sysfs_write_optrom,
+ .read_new = qla2x00_sysfs_read_optrom,
+ .write_new = qla2x00_sysfs_write_optrom,
};
static ssize_t
qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -529,18 +529,18 @@ out:
return rval;
}
-static struct bin_attribute sysfs_optrom_ctl_attr = {
+static const struct bin_attribute sysfs_optrom_ctl_attr = {
.attr = {
.name = "optrom_ctl",
.mode = S_IWUSR,
},
.size = 0,
- .write = qla2x00_sysfs_write_optrom_ctl,
+ .write_new = qla2x00_sysfs_write_optrom_ctl,
};
static ssize_t
qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -587,7 +587,7 @@ skip:
static ssize_t
qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -642,19 +642,19 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_vpd_attr = {
+static const struct bin_attribute sysfs_vpd_attr = {
.attr = {
.name = "vpd",
.mode = S_IRUSR | S_IWUSR,
},
.size = 0,
- .read = qla2x00_sysfs_read_vpd,
- .write = qla2x00_sysfs_write_vpd,
+ .read_new = qla2x00_sysfs_read_vpd,
+ .write_new = qla2x00_sysfs_write_vpd,
};
static ssize_t
qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -679,18 +679,18 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_sfp_attr = {
+static const struct bin_attribute sysfs_sfp_attr = {
.attr = {
.name = "sfp",
.mode = S_IRUSR | S_IWUSR,
},
.size = SFP_DEV_SIZE,
- .read = qla2x00_sysfs_read_sfp,
+ .read_new = qla2x00_sysfs_read_sfp,
};
static ssize_t
qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -823,19 +823,19 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_reset_attr = {
+static const struct bin_attribute sysfs_reset_attr = {
.attr = {
.name = "reset",
.mode = S_IWUSR,
},
.size = 0,
- .write = qla2x00_sysfs_write_reset,
+ .write_new = qla2x00_sysfs_write_reset,
};
static ssize_t
qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
@@ -866,18 +866,18 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_issue_logo_attr = {
+static const struct bin_attribute sysfs_issue_logo_attr = {
.attr = {
.name = "issue_logo",
.mode = S_IWUSR,
},
.size = 0,
- .write = qla2x00_issue_logo,
+ .write_new = qla2x00_issue_logo,
};
static ssize_t
qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -929,18 +929,18 @@ do_read:
return count;
}
-static struct bin_attribute sysfs_xgmac_stats_attr = {
+static const struct bin_attribute sysfs_xgmac_stats_attr = {
.attr = {
.name = "xgmac_stats",
.mode = S_IRUSR,
},
.size = 0,
- .read = qla2x00_sysfs_read_xgmac_stats,
+ .read_new = qla2x00_sysfs_read_xgmac_stats,
};
static ssize_t
qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -987,18 +987,18 @@ do_read:
return count;
}
-static struct bin_attribute sysfs_dcbx_tlv_attr = {
+static const struct bin_attribute sysfs_dcbx_tlv_attr = {
.attr = {
.name = "dcbx_tlv",
.mode = S_IRUSR,
},
.size = 0,
- .read = qla2x00_sysfs_read_dcbx_tlv,
+ .read_new = qla2x00_sysfs_read_dcbx_tlv,
};
static struct sysfs_entry {
char *name;
- struct bin_attribute *attr;
+ const struct bin_attribute *attr;
int type;
} bin_file_entries[] = {
{ "fw_dump", &sysfs_fw_dump_attr, },
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 15066c112817..cb95b7b12051 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -4098,6 +4098,8 @@ struct qla_hw_data {
uint32_t npiv_supported :1;
uint32_t pci_channel_io_perm_failure :1;
uint32_t fce_enabled :1;
+ uint32_t user_enabled_fce :1;
+ uint32_t fce_dump_buf_alloced :1;
uint32_t fac_supported :1;
uint32_t chip_reset_done :1;
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index a1545dad0c0c..08273520c777 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -409,26 +409,31 @@ qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
mutex_lock(&ha->fce_mutex);
- seq_puts(s, "FCE Trace Buffer\n");
- seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr);
- seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma);
- seq_puts(s, "FCE Enable Registers\n");
- seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
- ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4],
- ha->fce_mb[5], ha->fce_mb[6]);
-
- fce = (uint32_t *) ha->fce;
- fce_start = (unsigned long long) ha->fce_dma;
- for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) {
- if (cnt % 8 == 0)
- seq_printf(s, "\n%llx: ",
- (unsigned long long)((cnt * 4) + fce_start));
- else
- seq_putc(s, ' ');
- seq_printf(s, "%08x", *fce++);
- }
+ if (ha->flags.user_enabled_fce) {
+ seq_puts(s, "FCE Trace Buffer\n");
+ seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr);
+ seq_printf(s, "Base = %llx\n\n", (unsigned long long)ha->fce_dma);
+ seq_puts(s, "FCE Enable Registers\n");
+ seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
+ ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4],
+ ha->fce_mb[5], ha->fce_mb[6]);
+
+ fce = (uint32_t *)ha->fce;
+ fce_start = (unsigned long long)ha->fce_dma;
+ for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) {
+ if (cnt % 8 == 0)
+ seq_printf(s, "\n%llx: ",
+ (unsigned long long)((cnt * 4) + fce_start));
+ else
+ seq_putc(s, ' ');
+ seq_printf(s, "%08x", *fce++);
+ }
- seq_puts(s, "\nEnd\n");
+ seq_puts(s, "\nEnd\n");
+ } else {
+ seq_puts(s, "FCE Trace is currently not enabled\n");
+ seq_puts(s, "\techo [ 1 | 0 ] > fce\n");
+ }
mutex_unlock(&ha->fce_mutex);
@@ -467,7 +472,7 @@ qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
struct qla_hw_data *ha = vha->hw;
int rval;
- if (ha->flags.fce_enabled)
+ if (ha->flags.fce_enabled || !ha->fce)
goto out;
mutex_lock(&ha->fce_mutex);
@@ -488,11 +493,88 @@ out:
return single_release(inode, file);
}
+static ssize_t
+qla2x00_dfs_fce_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ struct seq_file *s = file->private_data;
+ struct scsi_qla_host *vha = s->private;
+ struct qla_hw_data *ha = vha->hw;
+ char *buf;
+ int rc = 0;
+ unsigned long enable;
+
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) {
+ ql_dbg(ql_dbg_user, vha, 0xd034,
+ "this adapter does not support FCE.");
+ return -EINVAL;
+ }
+
+ buf = memdup_user_nul(buffer, count);
+ if (IS_ERR(buf)) {
+ ql_dbg(ql_dbg_user, vha, 0xd037,
+ "fail to copy user buffer.");
+ return PTR_ERR(buf);
+ }
+
+ enable = kstrtoul(buf, 0, 0);
+ rc = count;
+
+ mutex_lock(&ha->fce_mutex);
+
+ if (enable) {
+ if (ha->flags.user_enabled_fce) {
+ mutex_unlock(&ha->fce_mutex);
+ goto out_free;
+ }
+ ha->flags.user_enabled_fce = 1;
+ if (!ha->fce) {
+ rc = qla2x00_alloc_fce_trace(vha);
+ if (rc) {
+ ha->flags.user_enabled_fce = 0;
+ mutex_unlock(&ha->fce_mutex);
+ goto out_free;
+ }
+
+ /* adjust fw dump buffer to take into account of this feature */
+ if (!ha->flags.fce_dump_buf_alloced)
+ qla2x00_alloc_fw_dump(vha);
+ }
+
+ if (!ha->flags.fce_enabled)
+ qla_enable_fce_trace(vha);
+
+ ql_dbg(ql_dbg_user, vha, 0xd045, "User enabled FCE .\n");
+ } else {
+ if (!ha->flags.user_enabled_fce) {
+ mutex_unlock(&ha->fce_mutex);
+ goto out_free;
+ }
+ ha->flags.user_enabled_fce = 0;
+ if (ha->flags.fce_enabled) {
+ qla2x00_disable_fce_trace(vha, NULL, NULL);
+ ha->flags.fce_enabled = 0;
+ }
+
+ qla2x00_free_fce_trace(ha);
+ /* no need to re-adjust fw dump buffer */
+
+ ql_dbg(ql_dbg_user, vha, 0xd04f, "User disabled FCE .\n");
+ }
+
+ mutex_unlock(&ha->fce_mutex);
+out_free:
+ kfree(buf);
+ return rc;
+}
+
static const struct file_operations dfs_fce_ops = {
.open = qla2x00_dfs_fce_open,
.read = seq_read,
.llseek = seq_lseek,
.release = qla2x00_dfs_fce_release,
+ .write = qla2x00_dfs_fce_write,
};
static int
@@ -626,8 +708,6 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
goto out;
- if (!ha->fce)
- goto out;
if (qla2x00_dfs_root)
goto create_dir;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index cededfda9d0e..e556f57c91af 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -11,6 +11,9 @@
/*
* Global Function Prototypes in qla_init.c source file.
*/
+int qla2x00_alloc_fce_trace(scsi_qla_host_t *);
+void qla2x00_free_fce_trace(struct qla_hw_data *ha);
+void qla_enable_fce_trace(scsi_qla_host_t *);
extern int qla2x00_initialize_adapter(scsi_qla_host_t *);
extern int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 31fc6a0eca3e..79cdfec2bca3 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2681,7 +2681,7 @@ exit:
return rval;
}
-static void qla_enable_fce_trace(scsi_qla_host_t *vha)
+void qla_enable_fce_trace(scsi_qla_host_t *vha)
{
int rval;
struct qla_hw_data *ha = vha->hw;
@@ -3717,25 +3717,24 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
return rval;
}
-static void
-qla2x00_alloc_fce_trace(scsi_qla_host_t *vha)
+int qla2x00_alloc_fce_trace(scsi_qla_host_t *vha)
{
dma_addr_t tc_dma;
void *tc;
struct qla_hw_data *ha = vha->hw;
if (!IS_FWI2_CAPABLE(ha))
- return;
+ return -EINVAL;
if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
- return;
+ return -EINVAL;
if (ha->fce) {
ql_dbg(ql_dbg_init, vha, 0x00bd,
"%s: FCE Mem is already allocated.\n",
__func__);
- return;
+ return -EIO;
}
/* Allocate memory for Fibre Channel Event Buffer. */
@@ -3745,7 +3744,7 @@ qla2x00_alloc_fce_trace(scsi_qla_host_t *vha)
ql_log(ql_log_warn, vha, 0x00be,
"Unable to allocate (%d KB) for FCE.\n",
FCE_SIZE / 1024);
- return;
+ return -ENOMEM;
}
ql_dbg(ql_dbg_init, vha, 0x00c0,
@@ -3754,6 +3753,16 @@ qla2x00_alloc_fce_trace(scsi_qla_host_t *vha)
ha->fce_dma = tc_dma;
ha->fce = tc;
ha->fce_bufs = FCE_NUM_BUFFERS;
+ return 0;
+}
+
+void qla2x00_free_fce_trace(struct qla_hw_data *ha)
+{
+ if (!ha->fce)
+ return;
+ dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, ha->fce_dma);
+ ha->fce = NULL;
+ ha->fce_dma = 0;
}
static void
@@ -3844,9 +3853,10 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
if (ha->tgt.atio_ring)
mq_size += ha->tgt.atio_q_length * sizeof(request_t);
- qla2x00_alloc_fce_trace(vha);
- if (ha->fce)
+ if (ha->fce) {
fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
+ ha->flags.fce_dump_buf_alloced = 1;
+ }
qla2x00_alloc_eft_trace(vha);
if (ha->eft)
eft_size = EFT_SIZE;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 31535beaaa16..6b9b8218b512 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1933,7 +1933,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
}
static int
-qla2xxx_slave_alloc(struct scsi_device *sdev)
+qla2xxx_sdev_init(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
@@ -1946,7 +1946,7 @@ qla2xxx_slave_alloc(struct scsi_device *sdev)
}
static int
-qla2xxx_slave_configure(struct scsi_device *sdev)
+qla2xxx_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
scsi_qla_host_t *vha = shost_priv(sdev->host);
struct req_que *req = vha->req;
@@ -1956,7 +1956,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
}
static void
-qla2xxx_slave_destroy(struct scsi_device *sdev)
+qla2xxx_sdev_destroy(struct scsi_device *sdev)
{
sdev->hostdata = NULL;
}
@@ -8087,10 +8087,10 @@ struct scsi_host_template qla2xxx_driver_template = {
.eh_bus_reset_handler = qla2xxx_eh_bus_reset,
.eh_host_reset_handler = qla2xxx_eh_host_reset,
- .slave_configure = qla2xxx_slave_configure,
+ .sdev_configure = qla2xxx_sdev_configure,
- .slave_alloc = qla2xxx_slave_alloc,
- .slave_destroy = qla2xxx_slave_destroy,
+ .sdev_init = qla2xxx_sdev_init,
+ .sdev_destroy = qla2xxx_sdev_destroy,
.scan_finished = qla2xxx_scan_finished,
.scan_start = qla2xxx_scan_start,
.change_queue_depth = scsi_change_queue_depth,
@@ -8116,7 +8116,7 @@ static const struct pci_error_handlers qla2xxx_err_handler = {
.reset_done = qla_pci_reset_done,
};
-static struct pci_device_id qla2xxx_pci_tbl[] = {
+static const struct pci_device_id qla2xxx_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) },
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
index abfa6ef60480..e3f85d6ea0db 100644
--- a/drivers/scsi/qla4xxx/ql4_attr.c
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -10,7 +10,7 @@
static ssize_t
qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj,
- struct bin_attribute *ba, char *buf, loff_t off,
+ const struct bin_attribute *ba, char *buf, loff_t off,
size_t count)
{
struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
@@ -28,7 +28,7 @@ qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj,
static ssize_t
qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
- struct bin_attribute *ba, char *buf, loff_t off,
+ const struct bin_attribute *ba, char *buf, loff_t off,
size_t count)
{
struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
@@ -104,19 +104,19 @@ qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_fw_dump_attr = {
+static const struct bin_attribute sysfs_fw_dump_attr = {
.attr = {
.name = "fw_dump",
.mode = S_IRUSR | S_IWUSR,
},
.size = 0,
- .read = qla4_8xxx_sysfs_read_fw_dump,
- .write = qla4_8xxx_sysfs_write_fw_dump,
+ .read_new = qla4_8xxx_sysfs_read_fw_dump,
+ .write_new = qla4_8xxx_sysfs_write_fw_dump,
};
static struct sysfs_entry {
char *name;
- struct bin_attribute *attr;
+ const struct bin_attribute *attr;
} bin_file_entries[] = {
{ "fw_dump", &sysfs_fw_dump_attr },
{ NULL },
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index d91f54a6e752..6b0e6b4cd8af 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -160,7 +160,7 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
-static int qla4xxx_slave_alloc(struct scsi_device *device);
+static int qla4xxx_sdev_init(struct scsi_device *device);
static umode_t qla4_attr_is_visible(int param_type, int param);
static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
@@ -234,7 +234,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
.eh_host_reset_handler = qla4xxx_eh_host_reset,
.eh_timed_out = qla4xxx_eh_cmd_timed_out,
- .slave_alloc = qla4xxx_slave_alloc,
+ .sdev_init = qla4xxx_sdev_init,
.change_queue_depth = scsi_change_queue_depth,
.this_id = -1,
@@ -7189,7 +7189,8 @@ exit_new_nt_list:
* 1: if flashnode entry is non-persistent
* 0: if flashnode entry is persistent
**/
-static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data)
+static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev,
+ const void *data)
{
struct iscsi_bus_flash_session *fnode_sess;
@@ -9052,7 +9053,7 @@ static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
}
}
-static int qla4xxx_slave_alloc(struct scsi_device *sdev)
+static int qla4xxx_sdev_init(struct scsi_device *sdev)
{
struct iscsi_cls_session *cls_sess;
struct iscsi_session *sess;
@@ -9846,7 +9847,7 @@ static const struct pci_error_handlers qla4xxx_err_handler = {
.resume = qla4xxx_pci_resume,
};
-static struct pci_device_id qla4xxx_pci_tbl[] = {
+static const struct pci_device_id qla4xxx_pci_tbl[] = {
{
.vendor = PCI_VENDOR_ID_QLOGIC,
.device = PCI_DEVICE_ID_QLOGIC_ISP4010,
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 74866b9f2b14..c9984ef57f26 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -975,7 +975,8 @@ static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int
host->sg_tablesize = QLOGICPTI_MAX_SG(num_free);
}
-static int qlogicpti_slave_configure(struct scsi_device *sdev)
+static int qlogicpti_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct qlogicpti *qpti = shost_priv(sdev->host);
int tgt = sdev->id;
@@ -1292,7 +1293,7 @@ static const struct scsi_host_template qpti_template = {
.name = "qlogicpti",
.info = qlogicpti_info,
.queuecommand = qlogicpti_queuecommand,
- .slave_configure = qlogicpti_slave_configure,
+ .sdev_configure = qlogicpti_sdev_configure,
.eh_abort_handler = qlogicpti_abort,
.eh_host_reset_handler = qlogicpti_reset,
.can_queue = QLOGICPTI_REQ_QUEUE_LEN,
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 680ba180a672..5ceaa4665e5d 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -5879,23 +5879,24 @@ static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
return open_devip;
}
-static int scsi_debug_slave_alloc(struct scsi_device *sdp)
+static int scsi_debug_sdev_init(struct scsi_device *sdp)
{
if (sdebug_verbose)
- pr_info("slave_alloc <%u %u %u %llu>\n",
+ pr_info("sdev_init <%u %u %u %llu>\n",
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
return 0;
}
-static int scsi_debug_slave_configure(struct scsi_device *sdp)
+static int scsi_debug_sdev_configure(struct scsi_device *sdp,
+ struct queue_limits *lim)
{
struct sdebug_dev_info *devip =
(struct sdebug_dev_info *)sdp->hostdata;
struct dentry *dentry;
if (sdebug_verbose)
- pr_info("slave_configure <%u %u %u %llu>\n",
+ pr_info("sdev_configure <%u %u %u %llu>\n",
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
@@ -5927,14 +5928,14 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
return 0;
}
-static void scsi_debug_slave_destroy(struct scsi_device *sdp)
+static void scsi_debug_sdev_destroy(struct scsi_device *sdp)
{
struct sdebug_dev_info *devip =
(struct sdebug_dev_info *)sdp->hostdata;
struct sdebug_err_inject *err;
if (sdebug_verbose)
- pr_info("slave_destroy <%u %u %u %llu>\n",
+ pr_info("sdev_destroy <%u %u %u %llu>\n",
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
if (!devip)
@@ -8706,15 +8707,15 @@ static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
return 0;
}
-static struct scsi_host_template sdebug_driver_template = {
+static const struct scsi_host_template sdebug_driver_template = {
.show_info = scsi_debug_show_info,
.write_info = scsi_debug_write_info,
.proc_name = sdebug_proc_name,
.name = "SCSI DEBUG",
.info = scsi_debug_info,
- .slave_alloc = scsi_debug_slave_alloc,
- .slave_configure = scsi_debug_slave_configure,
- .slave_destroy = scsi_debug_slave_destroy,
+ .sdev_init = scsi_debug_sdev_init,
+ .sdev_configure = scsi_debug_sdev_configure,
+ .sdev_destroy = scsi_debug_sdev_destroy,
.ioctl = scsi_debug_ioctl,
.queuecommand = scsi_debug_queuecommand,
.change_queue_depth = sdebug_change_qdepth,
@@ -8732,6 +8733,7 @@ static struct scsi_host_template sdebug_driver_template = {
.max_sectors = -1U,
.max_segment_size = -1U,
.module = THIS_MODULE,
+ .skip_settle_delay = 1,
.track_queue_depth = 1,
.cmd_size = sizeof(struct sdebug_scsi_cmd),
.init_cmd_priv = sdebug_init_cmd_priv,
@@ -8748,17 +8750,17 @@ static int sdebug_driver_probe(struct device *dev)
sdbg_host = dev_to_sdebug_host(dev);
- sdebug_driver_template.can_queue = sdebug_max_queue;
- sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
- if (!sdebug_clustering)
- sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
-
hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
if (NULL == hpnt) {
pr_err("scsi_host_alloc failed\n");
error = -ENODEV;
return error;
}
+ hpnt->can_queue = sdebug_max_queue;
+ hpnt->cmd_per_lun = sdebug_max_queue;
+ if (!sdebug_clustering)
+ hpnt->dma_boundary = PAGE_SIZE - 1;
+
if (submit_queues > nr_cpu_ids) {
pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
my_name, submit_queues, nr_cpu_ids);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 10154d78e336..815e7d63f3e2 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -2363,14 +2363,14 @@ int scsi_error_handler(void *data)
return 0;
}
-/*
- * Function: scsi_report_bus_reset()
+/**
+ * scsi_report_bus_reset() - report bus reset observed
*
- * Purpose: Utility function used by low-level drivers to report that
- * they have observed a bus reset on the bus being handled.
+ * Utility function used by low-level drivers to report that
+ * they have observed a bus reset on the bus being handled.
*
- * Arguments: shost - Host in question
- * channel - channel on which reset was observed.
+ * @shost: Host in question
+ * @channel: channel on which reset was observed.
*
* Returns: Nothing
*
@@ -2395,15 +2395,15 @@ void scsi_report_bus_reset(struct Scsi_Host *shost, int channel)
}
EXPORT_SYMBOL(scsi_report_bus_reset);
-/*
- * Function: scsi_report_device_reset()
+/**
+ * scsi_report_device_reset() - report device reset observed
*
- * Purpose: Utility function used by low-level drivers to report that
- * they have observed a device reset on the device being handled.
+ * Utility function used by low-level drivers to report that
+ * they have observed a device reset on the device being handled.
*
- * Arguments: shost - Host in question
- * channel - channel on which reset was observed
- * target - target on which reset was observed
+ * @shost: Host in question
+ * @channel: channel on which reset was observed
+ * @target: target on which reset was observed
*
* Returns: Nothing
*
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 6f6c5973c3ea..2fa45556e1ea 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -37,8 +37,10 @@
* @host: host to identify
* @buffer: userspace buffer for identification
*
- * Return an identifying string at @buffer, if @buffer is non-NULL, filling
- * to the length stored at * (int *) @buffer.
+ * Return:
+ * * if successful, %1 and an identifying string at @buffer, if @buffer
+ * is non-NULL, filling to the length stored at * (int *) @buffer.
+ * * <0 error code on failure.
*/
static int ioctl_probe(struct Scsi_Host *host, void __user *buffer)
{
@@ -121,6 +123,16 @@ out:
return result;
}
+/**
+ * scsi_set_medium_removal() - send command to allow or prevent medium removal
+ * @sdev: target scsi device
+ * @state: removal state to set (prevent or allow)
+ *
+ * Returns:
+ * * %0 if @sdev is not removable or not lockable or successful.
+ * * non-%0 is a SCSI result code if > 0 or kernel error code if < 0.
+ * * Sets @sdev->locked to the new state on success.
+ */
int scsi_set_medium_removal(struct scsi_device *sdev, char state)
{
char scsi_cmd[MAX_COMMAND_SIZE];
@@ -242,11 +254,15 @@ static int scsi_send_start_stop(struct scsi_device *sdev, int data)
NORMAL_RETRIES);
}
-/*
- * Check if the given command is allowed.
+/**
+ * scsi_cmd_allowed() - Check if the given command is allowed.
+ * @cmd: SCSI command to check
+ * @open_for_write: is the file / block device opened for writing?
*
* Only a subset of commands are allowed for unprivileged users. Commands used
* to format the media, update the firmware, etc. are not permitted.
+ *
+ * Return: %true if the cmd is allowed, otherwise @false.
*/
bool scsi_cmd_allowed(unsigned char *cmd, bool open_for_write)
{
@@ -859,6 +875,8 @@ static int scsi_ioctl_sg_io(struct scsi_device *sdev, bool open_for_write,
* Description: The scsi_ioctl() function differs from most ioctls in that it
* does not take a major/minor number as the dev field. Rather, it takes
* a pointer to a &struct scsi_device.
+ *
+ * Return: varies depending on the @cmd
*/
int scsi_ioctl(struct scsi_device *sdev, bool open_for_write, int cmd,
void __user *arg)
@@ -941,8 +959,15 @@ int scsi_ioctl(struct scsi_device *sdev, bool open_for_write, int cmd,
}
EXPORT_SYMBOL(scsi_ioctl);
-/*
+/**
+ * scsi_ioctl_block_when_processing_errors - prevent commands from being queued
+ * @sdev: target scsi device
+ * @cmd: which ioctl is it
+ * @ndelay: no delay (non-blocking)
+ *
* We can process a reset even when a device isn't fully operable.
+ *
+ * Return: %0 on success, <0 error code.
*/
int scsi_ioctl_block_when_processing_errors(struct scsi_device *sdev, int cmd,
bool ndelay)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 4411426a7894..be0890e4e706 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -184,6 +184,10 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
__scsi_queue_insert(cmd, reason, true);
}
+/**
+ * scsi_failures_reset_retries - reset all failures to zero
+ * @failures: &struct scsi_failures with specific failure modes set
+ */
void scsi_failures_reset_retries(struct scsi_failures *failures)
{
struct scsi_failure *failure;
@@ -868,13 +872,18 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
case 0x1a: /* start stop unit in progress */
case 0x1b: /* sanitize in progress */
case 0x1d: /* configuration in progress */
- case 0x24: /* depopulation in progress */
- case 0x25: /* depopulation restore in progress */
action = ACTION_DELAYED_RETRY;
break;
case 0x0a: /* ALUA state transition */
action = ACTION_DELAYED_REPREP;
break;
+ /*
+ * Depopulation might take many hours,
+ * thus it is not worthwhile to retry.
+ */
+ case 0x24: /* depopulation in progress */
+ case 0x25: /* depopulation restore in progress */
+ fallthrough;
default:
action = ACTION_FAIL;
break;
@@ -1217,6 +1226,15 @@ static void scsi_initialize_rq(struct request *rq)
cmd->retries = 0;
}
+/**
+ * scsi_alloc_request - allocate a block request and partially
+ * initialize its &scsi_cmnd
+ * @q: the device's request queue
+ * @opf: the request operation code
+ * @flags: block layer allocation flags
+ *
+ * Return: &struct request pointer on success or %NULL on failure
+ */
struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf,
blk_mq_req_flags_t flags)
{
@@ -2723,6 +2741,7 @@ int
scsi_device_quiesce(struct scsi_device *sdev)
{
struct request_queue *q = sdev->request_queue;
+ unsigned int memflags;
int err;
/*
@@ -2737,7 +2756,7 @@ scsi_device_quiesce(struct scsi_device *sdev)
blk_set_pm_only(q);
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
/*
* Ensure that the effect of blk_set_pm_only() will be visible
* for percpu_ref_tryget() callers that occur after the queue
@@ -2745,7 +2764,7 @@ scsi_device_quiesce(struct scsi_device *sdev)
* was called. See also https://lwn.net/Articles/573497/.
*/
synchronize_rcu();
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
mutex_lock(&sdev->state_mutex);
err = scsi_device_set_state(sdev, SDEV_QUIESCE);
@@ -3367,14 +3386,16 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
}
EXPORT_SYMBOL(scsi_vpd_lun_id);
-/*
+/**
* scsi_vpd_tpg_id - return a target port group identifier
* @sdev: SCSI device
+ * @rel_id: pointer to return relative target port in if not %NULL
*
* Returns the Target Port Group identifier from the information
- * froom VPD page 0x83 of the device.
+ * from VPD page 0x83 of the device.
+ * Optionally sets @rel_id to the relative target port on success.
*
- * Returns the identifier or error on failure.
+ * Return: the identifier or error on failure.
*/
int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
{
diff --git a/drivers/scsi/scsi_lib_test.c b/drivers/scsi/scsi_lib_test.c
index 99834426a100..ae8af0e0047a 100644
--- a/drivers/scsi/scsi_lib_test.c
+++ b/drivers/scsi/scsi_lib_test.c
@@ -67,6 +67,13 @@ static void scsi_lib_test_multiple_sense(struct kunit *test)
};
int i;
+ /* Success */
+ sc.result = 0;
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, &failures));
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, NULL));
+ /* Command failed but caller did not pass in a failures array */
+ scsi_build_sense(&sc, 0, ILLEGAL_REQUEST, 0x91, 0x36);
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, NULL));
/* Match end of array */
scsi_build_sense(&sc, 0, ILLEGAL_REQUEST, 0x91, 0x36);
KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc, &failures));
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 042329b74c6e..96d7e1a9a7c7 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -220,6 +220,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
int new_shift = sbitmap_calculate_shift(depth);
bool need_alloc = !sdev->budget_map.map;
bool need_free = false;
+ unsigned int memflags;
int ret;
struct sbitmap sb_backup;
@@ -227,7 +228,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
/*
* realloc if new shift is calculated, which is caused by setting
- * up one new default queue depth after calling ->device_configure
+ * up one new default queue depth after calling ->sdev_configure
*/
if (!need_alloc && new_shift != sdev->budget_map.shift)
need_alloc = need_free = true;
@@ -240,12 +241,12 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
* and here disk isn't added yet, so freezing is pretty fast
*/
if (need_free) {
- blk_mq_freeze_queue(sdev->request_queue);
+ memflags = blk_mq_freeze_queue(sdev->request_queue);
sb_backup = sdev->budget_map;
}
ret = sbitmap_init_node(&sdev->budget_map,
scsi_device_max_queue_depth(sdev),
- new_shift, GFP_KERNEL,
+ new_shift, GFP_NOIO,
sdev->request_queue->node, false, true);
if (!ret)
sbitmap_resize(&sdev->budget_map, depth);
@@ -256,7 +257,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
else
sbitmap_free(&sb_backup);
ret = 0;
- blk_mq_unfreeze_queue(sdev->request_queue);
+ blk_mq_unfreeze_queue(sdev->request_queue, memflags);
}
return ret;
}
@@ -265,7 +266,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
* scsi_alloc_sdev - allocate and setup a scsi_Device
* @starget: which target to allocate a &scsi_device for
* @lun: which lun
- * @hostdata: usually NULL and set by ->slave_alloc instead
+ * @hostdata: usually NULL and set by ->sdev_init instead
*
* Description:
* Allocate, initialize for io, and return a pointer to a scsi_Device.
@@ -312,11 +313,11 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
sdev->sdev_gendev.parent = get_device(&starget->dev);
sdev->sdev_target = starget;
- /* usually NULL and set by ->slave_alloc instead */
+ /* usually NULL and set by ->sdev_init instead */
sdev->hostdata = hostdata;
/* if the device needs this changing, it may do so in the
- * slave_configure function */
+ * sdev_configure function */
sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED;
/*
@@ -363,8 +364,8 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
scsi_sysfs_device_initialize(sdev);
- if (shost->hostt->slave_alloc) {
- ret = shost->hostt->slave_alloc(sdev);
+ if (shost->hostt->sdev_init) {
+ ret = shost->hostt->sdev_init(sdev);
if (ret) {
/*
* if LLDD reports slave not present, don't clutter
@@ -1074,10 +1075,8 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
else if (*bflags & BLIST_MAX_1024)
lim.max_hw_sectors = 1024;
- if (hostt->device_configure)
- ret = hostt->device_configure(sdev, &lim);
- else if (hostt->slave_configure)
- ret = hostt->slave_configure(sdev);
+ if (hostt->sdev_configure)
+ ret = hostt->sdev_configure(sdev, &lim);
if (ret) {
queue_limits_cancel_update(sdev->request_queue);
/*
@@ -1097,12 +1096,12 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
}
/*
- * The queue_depth is often changed in ->device_configure.
+ * The queue_depth is often changed in ->sdev_configure.
*
* Set up budget map again since memory consumption of the map depends
* on actual queue depth.
*/
- if (hostt->device_configure || hostt->slave_configure)
+ if (hostt->sdev_configure)
scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth);
if (sdev->scsi_level >= SCSI_3)
@@ -1636,6 +1635,24 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
}
EXPORT_SYMBOL(__scsi_add_device);
+/**
+ * scsi_add_device - creates a new SCSI (LU) instance
+ * @host: the &Scsi_Host instance where the device is located
+ * @channel: target channel number (rarely other than %0)
+ * @target: target id number
+ * @lun: LUN of target device
+ *
+ * Probe for a specific LUN and add it if found.
+ *
+ * Notes: This call is usually performed internally during a SCSI
+ * bus scan when an HBA is added (i.e. scsi_scan_host()). So it
+ * should only be called if the HBA becomes aware of a new SCSI
+ * device (LU) after scsi_scan_host() has completed. If successful
+ * this call can lead to sdev_init() and sdev_configure() callbacks
+ * into the LLD.
+ *
+ * Return: %0 on success or negative error code on failure
+ */
int scsi_add_device(struct Scsi_Host *host, uint channel,
uint target, u64 lun)
{
@@ -2027,6 +2044,8 @@ static void do_scan_async(void *_data, async_cookie_t c)
/**
* scsi_scan_host - scan the given adapter
* @shost: adapter to scan
+ *
+ * Notes: Should be called after scsi_add_host()
**/
void scsi_scan_host(struct Scsi_Host *shost)
{
diff --git a/drivers/scsi/scsi_sysctl.c b/drivers/scsi/scsi_sysctl.c
index 093774d77534..be4aef0f4f99 100644
--- a/drivers/scsi/scsi_sysctl.c
+++ b/drivers/scsi/scsi_sysctl.c
@@ -12,7 +12,7 @@
#include "scsi_priv.h"
-static struct ctl_table scsi_table[] = {
+static const struct ctl_table scsi_table[] = {
{ .procname = "logging_level",
.data = &scsi_logging_level,
.maxlen = sizeof(scsi_logging_level),
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index f3a1ecb42128..d772258e29ad 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -898,7 +898,7 @@ static DEVICE_ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field,
#define sdev_vpd_pg_attr(_page) \
static ssize_t \
show_vpd_##_page(struct file *filp, struct kobject *kobj, \
- struct bin_attribute *bin_attr, \
+ const struct bin_attribute *bin_attr, \
char *buf, loff_t off, size_t count) \
{ \
struct device *dev = kobj_to_dev(kobj); \
@@ -914,10 +914,10 @@ show_vpd_##_page(struct file *filp, struct kobject *kobj, \
rcu_read_unlock(); \
return ret; \
} \
-static struct bin_attribute dev_attr_vpd_##_page = { \
+static const struct bin_attribute dev_attr_vpd_##_page = { \
.attr = {.name = __stringify(vpd_##_page), .mode = S_IRUGO }, \
.size = 0, \
- .read = show_vpd_##_page, \
+ .read_new = show_vpd_##_page, \
};
sdev_vpd_pg_attr(pg83);
@@ -930,7 +930,7 @@ sdev_vpd_pg_attr(pgb7);
sdev_vpd_pg_attr(pg0);
static ssize_t show_inquiry(struct file *filep, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -943,13 +943,13 @@ static ssize_t show_inquiry(struct file *filep, struct kobject *kobj,
sdev->inquiry_len);
}
-static struct bin_attribute dev_attr_inquiry = {
+static const struct bin_attribute dev_attr_inquiry = {
.attr = {
.name = "inquiry",
.mode = S_IRUGO,
},
.size = 0,
- .read = show_inquiry,
+ .read_new = show_inquiry,
};
static ssize_t
@@ -1348,7 +1348,7 @@ static struct attribute *scsi_sdev_attrs[] = {
NULL
};
-static struct bin_attribute *scsi_sdev_bin_attrs[] = {
+static const struct bin_attribute *const scsi_sdev_bin_attrs[] = {
&dev_attr_vpd_pg0,
&dev_attr_vpd_pg83,
&dev_attr_vpd_pg80,
@@ -1362,7 +1362,7 @@ static struct bin_attribute *scsi_sdev_bin_attrs[] = {
};
static struct attribute_group scsi_sdev_attr_group = {
.attrs = scsi_sdev_attrs,
- .bin_attrs = scsi_sdev_bin_attrs,
+ .bin_attrs_new = scsi_sdev_bin_attrs,
.is_visible = scsi_sdev_attr_is_visible,
.is_bin_visible = scsi_sdev_bin_attr_is_visible,
};
@@ -1513,8 +1513,8 @@ void __scsi_remove_device(struct scsi_device *sdev)
kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags);
cancel_work_sync(&sdev->requeue_work);
- if (sdev->host->hostt->slave_destroy)
- sdev->host->hostt->slave_destroy(sdev);
+ if (sdev->host->hostt->sdev_destroy)
+ sdev->host->hostt->sdev_destroy(sdev);
transport_destroy_device(dev);
/*
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 9b47f91c5b97..9c347c64c315 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1324,7 +1324,7 @@ EXPORT_SYMBOL_GPL(iscsi_create_flashnode_conn);
* 1 on success
* 0 on failure
*/
-static int iscsi_is_flashnode_conn_dev(struct device *dev, void *data)
+static int iscsi_is_flashnode_conn_dev(struct device *dev, const void *data)
{
return dev->bus == &iscsi_flashnode_bus;
}
@@ -1335,7 +1335,7 @@ static int iscsi_destroy_flashnode_conn(struct iscsi_bus_flash_conn *fnode_conn)
return 0;
}
-static int flashnode_match_index(struct device *dev, void *data)
+static int flashnode_match_index(struct device *dev, const void *data)
{
struct iscsi_bus_flash_session *fnode_sess = NULL;
int ret = 0;
@@ -1344,7 +1344,7 @@ static int flashnode_match_index(struct device *dev, void *data)
goto exit_match_index;
fnode_sess = iscsi_dev_to_flash_session(dev);
- ret = (fnode_sess->target_id == *((int *)data)) ? 1 : 0;
+ ret = (fnode_sess->target_id == *((const int *)data)) ? 1 : 0;
exit_match_index:
return ret;
@@ -1389,8 +1389,8 @@ iscsi_get_flashnode_by_index(struct Scsi_Host *shost, uint32_t idx)
* %NULL on failure
*/
struct device *
-iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
- int (*fn)(struct device *dev, void *data))
+iscsi_find_flashnode_sess(struct Scsi_Host *shost, const void *data,
+ device_match_t fn)
{
return device_find_child(&shost->shost_gendev, data, fn);
}
@@ -2122,33 +2122,6 @@ destroy_wq:
}
EXPORT_SYMBOL_GPL(iscsi_add_session);
-/**
- * iscsi_create_session - create iscsi class session
- * @shost: scsi host
- * @transport: iscsi transport
- * @dd_size: private driver data size
- * @target_id: which target
- *
- * This can be called from a LLD or iscsi_transport.
- */
-struct iscsi_cls_session *
-iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
- int dd_size, unsigned int target_id)
-{
- struct iscsi_cls_session *session;
-
- session = iscsi_alloc_session(shost, transport, dd_size);
- if (!session)
- return NULL;
-
- if (iscsi_add_session(session, target_id)) {
- iscsi_free_session(session);
- return NULL;
- }
- return session;
-}
-EXPORT_SYMBOL_GPL(iscsi_create_session);
-
static void iscsi_conn_release(struct device *dev)
{
struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 4e33f1661e4c..351b028ef893 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -888,7 +888,8 @@ static void sas_port_delete_link(struct sas_port *port,
sysfs_remove_link(&phy->dev.kobj, "port");
}
-/** sas_port_alloc - allocate and initialize a SAS port structure
+/**
+ * sas_port_alloc - allocate and initialize a SAS port structure
*
* @parent: parent device
* @port_id: port number
@@ -897,7 +898,7 @@ static void sas_port_delete_link(struct sas_port *port,
* below the device specified by @parent which must be either a Scsi_Host
* or a sas_expander_device.
*
- * Returns %NULL on error
+ * Returns: %NULL on error
*/
struct sas_port *sas_port_alloc(struct device *parent, int port_id)
{
@@ -932,7 +933,8 @@ struct sas_port *sas_port_alloc(struct device *parent, int port_id)
}
EXPORT_SYMBOL(sas_port_alloc);
-/** sas_port_alloc_num - allocate and initialize a SAS port structure
+/**
+ * sas_port_alloc_num - allocate and initialize a SAS port structure
*
* @parent: parent device
*
@@ -942,7 +944,7 @@ EXPORT_SYMBOL(sas_port_alloc);
* the device tree below the device specified by @parent which must be
* either a Scsi_Host or a sas_expander_device.
*
- * Returns %NULL on error
+ * Returns: %NULL on error
*/
struct sas_port *sas_port_alloc_num(struct device *parent)
{
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 64852e6df3e3..fe47850a8258 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -985,7 +985,8 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
}
-/** spi_dv_device - Do Domain Validation on the device
+/**
+ * spi_dv_device - Do Domain Validation on the device
* @sdev: scsi device to validate
*
* Performs the domain validation on the given device in the
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 94127868bedf..effb7e768165 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1639,7 +1639,7 @@ MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
-static struct ctl_table sg_sysctls[] = {
+static const struct ctl_table sg_sysctls[] = {
{
.procname = "sg-big-buff",
.data = &sg_big_buff,
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 04fb24d77e9b..0da7be40c925 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -6489,7 +6489,7 @@ out:
return SUCCESS;
}
-static int pqi_slave_alloc(struct scsi_device *sdev)
+static int pqi_sdev_init(struct scsi_device *sdev)
{
struct pqi_scsi_dev *device;
unsigned long flags;
@@ -6557,7 +6557,8 @@ static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER;
}
-static int pqi_slave_configure(struct scsi_device *sdev)
+static int pqi_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
int rc = 0;
struct pqi_scsi_dev *device;
@@ -6573,7 +6574,7 @@ static int pqi_slave_configure(struct scsi_device *sdev)
return rc;
}
-static void pqi_slave_destroy(struct scsi_device *sdev)
+static void pqi_sdev_destroy(struct scsi_device *sdev)
{
struct pqi_ctrl_info *ctrl_info;
struct pqi_scsi_dev *device;
@@ -7548,9 +7549,9 @@ static const struct scsi_host_template pqi_driver_template = {
.eh_device_reset_handler = pqi_eh_device_reset_handler,
.eh_abort_handler = pqi_eh_abort_handler,
.ioctl = pqi_ioctl,
- .slave_alloc = pqi_slave_alloc,
- .slave_configure = pqi_slave_configure,
- .slave_destroy = pqi_slave_destroy,
+ .sdev_init = pqi_sdev_init,
+ .sdev_configure = pqi_sdev_configure,
+ .sdev_destroy = pqi_sdev_destroy,
.map_queues = pqi_map_queues,
.sdev_groups = pqi_sdev_groups,
.shost_groups = pqi_shost_groups,
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index 9be3f0193145..1c24517e4e65 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -21,7 +21,7 @@
#define PCI_DEVICE_ID_CISCO_SNIC 0x0046
/* Supported devices by snic module */
-static struct pci_device_id snic_id_table[] = {
+static const struct pci_device_id snic_id_table[] = {
{PCI_DEVICE(0x1137, PCI_DEVICE_ID_CISCO_SNIC) },
{ 0, } /* end of table */
};
@@ -42,11 +42,11 @@ module_param(snic_max_qdepth, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(snic_max_qdepth, "Queue depth to report for each LUN");
/*
- * snic_slave_alloc : callback function to SCSI Mid Layer, called on
+ * snic_sdev_init : callback function to SCSI Mid Layer, called on
* scsi device initialization.
*/
static int
-snic_slave_alloc(struct scsi_device *sdev)
+snic_sdev_init(struct scsi_device *sdev)
{
struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
@@ -57,11 +57,11 @@ snic_slave_alloc(struct scsi_device *sdev)
}
/*
- * snic_slave_configure : callback function to SCSI Mid Layer, called on
+ * snic_sdev_configure : callback function to SCSI Mid Layer, called on
* scsi device initialization.
*/
static int
-snic_slave_configure(struct scsi_device *sdev)
+snic_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct snic *snic = shost_priv(sdev->host);
u32 qdepth = 0, max_ios = 0;
@@ -107,8 +107,8 @@ static const struct scsi_host_template snic_host_template = {
.eh_abort_handler = snic_abort_cmd,
.eh_device_reset_handler = snic_device_reset,
.eh_host_reset_handler = snic_host_reset,
- .slave_alloc = snic_slave_alloc,
- .slave_configure = snic_slave_configure,
+ .sdev_init = snic_sdev_init,
+ .sdev_configure = snic_sdev_configure,
.change_queue_depth = snic_change_queue_depth,
.this_id = -1,
.cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH,
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index e8ef27d7ef61..ebbd50ec0cda 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -1030,6 +1030,11 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
retval = new_session ? CHKRES_NEW_SESSION : CHKRES_READY;
break;
}
+ if (STp->first_tur) {
+ /* Don't set pos_unknown right after device recognition */
+ STp->pos_unknown = 0;
+ STp->first_tur = 0;
+ }
if (SRpnt != NULL)
st_release_request(SRpnt);
@@ -4328,6 +4333,7 @@ static int st_probe(struct device *dev)
blk_queue_rq_timeout(tpnt->device->request_queue, ST_TIMEOUT);
tpnt->long_timeout = ST_LONG_TIMEOUT;
tpnt->try_dio = try_direct_io;
+ tpnt->first_tur = 1;
for (i = 0; i < ST_NBR_MODES; i++) {
STm = &(tpnt->modes[i]);
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index 7a68eaba7e81..1aaaf5369a40 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -170,6 +170,7 @@ struct scsi_tape {
unsigned char rew_at_close; /* rewind necessary at close */
unsigned char inited;
unsigned char cleaning_req; /* cleaning requested? */
+ unsigned char first_tur; /* first TEST UNIT READY */
int block_size;
int min_block;
int max_block;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 0e81125df8c7..63ed7f9aaa93 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -584,7 +584,7 @@ static void return_abnormal_state(struct st_hba *hba, int status)
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static int
-stex_slave_config(struct scsi_device *sdev)
+stex_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
sdev->use_10_for_rw = 1;
sdev->use_10_for_ms = 1;
@@ -1481,14 +1481,14 @@ static const struct scsi_host_template driver_template = {
.proc_name = DRV_NAME,
.bios_param = stex_biosparam,
.queuecommand = stex_queuecommand,
- .slave_configure = stex_slave_config,
+ .sdev_configure = stex_sdev_configure,
.eh_abort_handler = stex_abort,
.eh_host_reset_handler = stex_reset,
.this_id = -1,
.dma_boundary = PAGE_SIZE - 1,
};
-static struct pci_device_id stex_pci_tbl[] = {
+static const struct pci_device_id stex_pci_tbl[] = {
/* st_shasta */
{ 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
st_shasta }, /* SuperTrak EX8350/8300/16350/16300 */
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index d0b55c1fa908..a8614e54544e 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -171,6 +171,12 @@ do { \
dev_warn(&(dev)->device, fmt, ##__VA_ARGS__); \
} while (0)
+#define storvsc_log_ratelimited(dev, level, fmt, ...) \
+do { \
+ if (do_logging(level)) \
+ dev_warn_ratelimited(&(dev)->device, fmt, ##__VA_ARGS__); \
+} while (0)
+
struct vmscsi_request {
u16 length;
u8 srb_status;
@@ -917,14 +923,13 @@ static int storvsc_channel_init(struct hv_device *device, bool is_fc)
/*
* Allocate state to manage the sub-channels.
- * We allocate an array based on the numbers of possible CPUs
- * (Hyper-V does not support cpu online/offline).
- * This Array will be sparseley populated with unique
- * channels - primary + sub-channels.
- * We will however populate all the slots to evenly distribute
- * the load.
+ * We allocate an array based on the number of CPU ids. This array
+ * is initially sparsely populated for the CPUs assigned to channels:
+ * primary + sub-channels. As I/Os are initiated by different CPUs,
+ * the slots for all online CPUs are populated to evenly distribute
+ * the load across all channels.
*/
- stor_device->stor_chns = kcalloc(num_possible_cpus(), sizeof(void *),
+ stor_device->stor_chns = kcalloc(nr_cpu_ids, sizeof(void *),
GFP_KERNEL);
if (stor_device->stor_chns == NULL)
return -ENOMEM;
@@ -1177,7 +1182,7 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device,
int loglevel = (stor_pkt->vm_srb.cdb[0] == TEST_UNIT_READY) ?
STORVSC_LOGGING_WARN : STORVSC_LOGGING_ERROR;
- storvsc_log(device, loglevel,
+ storvsc_log_ratelimited(device, loglevel,
"tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n",
scsi_cmd_to_rq(request->cmd)->tag,
stor_pkt->vm_srb.cdb[0],
@@ -1579,7 +1584,8 @@ static int storvsc_device_alloc(struct scsi_device *sdevice)
return 0;
}
-static int storvsc_device_configure(struct scsi_device *sdevice)
+static int storvsc_sdev_configure(struct scsi_device *sdevice,
+ struct queue_limits *lim)
{
blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ));
@@ -1794,6 +1800,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
length = scsi_bufflen(scmnd);
payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb;
+ payload->range.len = 0;
payload_sz = 0;
if (scsi_sg_count(scmnd)) {
@@ -1880,8 +1887,8 @@ static struct scsi_host_template scsi_driver = {
.eh_host_reset_handler = storvsc_host_reset_handler,
.proc_name = "storvsc_host",
.eh_timed_out = storvsc_eh_timed_out,
- .slave_alloc = storvsc_device_alloc,
- .slave_configure = storvsc_device_configure,
+ .sdev_init = storvsc_device_alloc,
+ .sdev_configure = storvsc_sdev_configure,
.cmd_per_lun = 2048,
.this_id = -1,
/* Ensure there are no gaps in presented sgls */
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index a2560cc807d3..212d89d0d23e 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -765,7 +765,7 @@ static void sym_tune_dev_queuing(struct sym_tcb *tp, int lun, u_short reqtags)
}
}
-static int sym53c8xx_slave_alloc(struct scsi_device *sdev)
+static int sym53c8xx_sdev_init(struct scsi_device *sdev)
{
struct sym_hcb *np = sym_get_hcb(sdev->host);
struct sym_tcb *tp = &np->target[sdev->id];
@@ -825,7 +825,8 @@ out:
/*
* Linux entry point for device queue sizing.
*/
-static int sym53c8xx_slave_configure(struct scsi_device *sdev)
+static int sym53c8xx_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct sym_hcb *np = sym_get_hcb(sdev->host);
struct sym_tcb *tp = &np->target[sdev->id];
@@ -861,14 +862,14 @@ static int sym53c8xx_slave_configure(struct scsi_device *sdev)
return 0;
}
-static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
+static void sym53c8xx_sdev_destroy(struct scsi_device *sdev)
{
struct sym_hcb *np = sym_get_hcb(sdev->host);
struct sym_tcb *tp = &np->target[sdev->id];
struct sym_lcb *lp = sym_lp(tp, sdev->lun);
unsigned long flags;
- /* if slave_alloc returned before allocating a sym_lcb, return */
+ /* if sdev_init returned before allocating a sym_lcb, return */
if (!lp)
return;
@@ -1684,9 +1685,9 @@ static const struct scsi_host_template sym2_template = {
.info = sym53c8xx_info,
.cmd_size = sizeof(struct sym_ucmd),
.queuecommand = sym53c8xx_queue_command,
- .slave_alloc = sym53c8xx_slave_alloc,
- .slave_configure = sym53c8xx_slave_configure,
- .slave_destroy = sym53c8xx_slave_destroy,
+ .sdev_init = sym53c8xx_sdev_init,
+ .sdev_configure = sym53c8xx_sdev_configure,
+ .sdev_destroy = sym53c8xx_sdev_destroy,
.eh_abort_handler = sym53c8xx_eh_abort_handler,
.eh_target_reset_handler = sym53c8xx_eh_target_reset_handler,
.eh_bus_reset_handler = sym53c8xx_eh_bus_reset_handler,
@@ -2030,7 +2031,7 @@ static struct spi_function_template sym2_transport_functions = {
.get_signalling = sym2_get_signalling,
};
-static struct pci_device_id sym2_id_table[] = {
+static const struct pci_device_id sym2_id_table[] = {
{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C810,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C820,
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 60be1a0c6183..21ce3e940192 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -800,7 +800,7 @@ static const struct scsi_host_template virtscsi_host_template = {
.eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset,
.eh_timed_out = virtscsi_eh_timed_out,
- .slave_alloc = virtscsi_device_alloc,
+ .sdev_init = virtscsi_device_alloc,
.dma_boundary = UINT_MAX,
.map_queues = virtscsi_map_queues,
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 9ec55ddc1204..924025305753 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -735,7 +735,8 @@ static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
return scsifront_action_handler(sc, VSCSIIF_ACT_SCSI_RESET);
}
-static int scsifront_sdev_configure(struct scsi_device *sdev)
+static int scsifront_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct vscsifrnt_info *info = shost_priv(sdev->host);
int err;
@@ -776,8 +777,8 @@ static const struct scsi_host_template scsifront_sht = {
.queuecommand = scsifront_queuecommand,
.eh_abort_handler = scsifront_eh_abort_handler,
.eh_device_reset_handler = scsifront_dev_reset_handler,
- .slave_configure = scsifront_sdev_configure,
- .slave_destroy = scsifront_sdev_destroy,
+ .sdev_configure = scsifront_sdev_configure,
+ .sdev_destroy = scsifront_sdev_destroy,
.cmd_per_lun = VSCSIIF_DEFAULT_CMD_PER_LUN,
.can_queue = VSCSIIF_MAX_REQS,
.this_id = -1,
@@ -1074,8 +1075,8 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
continue;
/*
- * Front device state path, used in slave_configure called
- * on successfull scsi_add_device, and in slave_destroy called
+ * Front device state path, used in sdev_configure called
+ * on successfull scsi_add_device, and in sdev_destroy called
* on remove of a device.
*/
snprintf(info->dev_state_path, sizeof(info->dev_state_path),
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
index 65e5515f7555..005fa2ef100f 100644
--- a/drivers/slimbus/core.c
+++ b/drivers/slimbus/core.c
@@ -328,7 +328,8 @@ void slim_report_absent(struct slim_device *sbdev)
}
EXPORT_SYMBOL_GPL(slim_report_absent);
-static bool slim_eaddr_equal(struct slim_eaddr *a, struct slim_eaddr *b)
+static bool slim_eaddr_equal(const struct slim_eaddr *a,
+ const struct slim_eaddr *b)
{
return (a->manf_id == b->manf_id &&
a->prod_code == b->prod_code &&
@@ -336,9 +337,9 @@ static bool slim_eaddr_equal(struct slim_eaddr *a, struct slim_eaddr *b)
a->instance == b->instance);
}
-static int slim_match_dev(struct device *dev, void *data)
+static int slim_match_dev(struct device *dev, const void *data)
{
- struct slim_eaddr *e_addr = data;
+ const struct slim_eaddr *e_addr = data;
struct slim_device *sbdev = to_slim_device(dev);
return slim_eaddr_equal(&sbdev->e_addr, e_addr);
@@ -384,21 +385,13 @@ struct slim_device *slim_get_device(struct slim_controller *ctrl,
}
EXPORT_SYMBOL_GPL(slim_get_device);
-static int of_slim_match_dev(struct device *dev, void *data)
-{
- struct device_node *np = data;
- struct slim_device *sbdev = to_slim_device(dev);
-
- return (sbdev->dev.of_node == np);
-}
-
static struct slim_device *of_find_slim_device(struct slim_controller *ctrl,
struct device_node *np)
{
struct slim_device *sbdev;
struct device *dev;
- dev = device_find_child(ctrl->dev, np, of_slim_match_dev);
+ dev = device_find_child(ctrl->dev, np, device_match_of_node);
if (dev) {
sbdev = to_slim_device(dev);
return sbdev;
diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
index 242570a5e565..e7aa9bd4b44b 100644
--- a/drivers/slimbus/messaging.c
+++ b/drivers/slimbus/messaging.c
@@ -13,8 +13,8 @@
*
* @ctrl: Controller handle
* @reply: Reply received from the device
- * @len: Length of the reply
* @tid: Transaction ID received with which framework can associate reply.
+ * @len: Length of the reply
*
* Called by controller to inform framework about the response received.
* This helps in making the API asynchronous, and controller-driver doesn't need
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
index 2a42b28931c9..298b542dd1c0 100644
--- a/drivers/soc/atmel/soc.c
+++ b/drivers/soc/atmel/soc.c
@@ -399,7 +399,7 @@ static const struct of_device_id at91_soc_allowed_list[] __initconst = {
static int __init atmel_soc_device_init(void)
{
- struct device_node *np = of_find_node_by_path("/");
+ struct device_node *np __free(device_node) = of_find_node_by_path("/");
if (!of_match_node(at91_soc_allowed_list, np))
return 0;
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
index 3ad321ca608a..ca6a5fa1618f 100644
--- a/drivers/soc/imx/Makefile
+++ b/drivers/soc/imx/Makefile
@@ -3,4 +3,4 @@ ifeq ($(CONFIG_ARM),y)
obj-$(CONFIG_ARCH_MXC) += soc-imx.o
endif
obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o
-obj-$(CONFIG_SOC_IMX9) += imx93-src.o
+obj-$(CONFIG_SOC_IMX9) += imx93-src.o soc-imx9.o
diff --git a/drivers/soc/imx/soc-imx9.c b/drivers/soc/imx/soc-imx9.c
new file mode 100644
index 000000000000..b46d22cf0212
--- /dev/null
+++ b/drivers/soc/imx/soc-imx9.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#define IMX_SIP_GET_SOC_INFO 0xc2000006
+#define SOC_ID(x) (((x) & 0xFFFF) >> 8)
+#define SOC_REV_MAJOR(x) ((((x) >> 28) & 0xF) - 0x9)
+#define SOC_REV_MINOR(x) (((x) >> 24) & 0xF)
+
+static int imx9_soc_probe(struct platform_device *pdev)
+{
+ struct soc_device_attribute *attr;
+ struct arm_smccc_res res;
+ struct soc_device *sdev;
+ u32 soc_id, rev_major, rev_minor;
+ u64 uid127_64, uid63_0;
+ int err;
+
+ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+ if (!attr)
+ return -ENOMEM;
+
+ err = of_property_read_string(of_root, "model", &attr->machine);
+ if (err) {
+ pr_err("%s: missing model property: %d\n", __func__, err);
+ goto attr;
+ }
+
+ attr->family = kasprintf(GFP_KERNEL, "Freescale i.MX");
+
+ /*
+ * Retrieve the soc id, rev & uid info:
+ * res.a1[31:16]: soc revision;
+ * res.a1[15:0]: soc id;
+ * res.a2: uid[127:64];
+ * res.a3: uid[63:0];
+ */
+ arm_smccc_smc(IMX_SIP_GET_SOC_INFO, 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS) {
+ pr_err("%s: SMC failed: 0x%lx\n", __func__, res.a0);
+ err = -EINVAL;
+ goto family;
+ }
+
+ soc_id = SOC_ID(res.a1);
+ rev_major = SOC_REV_MAJOR(res.a1);
+ rev_minor = SOC_REV_MINOR(res.a1);
+
+ attr->soc_id = kasprintf(GFP_KERNEL, "i.MX%2x", soc_id);
+ attr->revision = kasprintf(GFP_KERNEL, "%d.%d", rev_major, rev_minor);
+
+ uid127_64 = res.a2;
+ uid63_0 = res.a3;
+ attr->serial_number = kasprintf(GFP_KERNEL, "%016llx%016llx", uid127_64, uid63_0);
+
+ sdev = soc_device_register(attr);
+ if (IS_ERR(sdev)) {
+ err = PTR_ERR(sdev);
+ pr_err("%s failed to register SoC as a device: %d\n", __func__, err);
+ goto serial_number;
+ }
+
+ return 0;
+
+serial_number:
+ kfree(attr->serial_number);
+ kfree(attr->revision);
+ kfree(attr->soc_id);
+family:
+ kfree(attr->family);
+attr:
+ kfree(attr);
+ return err;
+}
+
+static __maybe_unused const struct of_device_id imx9_soc_match[] = {
+ { .compatible = "fsl,imx93", },
+ { .compatible = "fsl,imx95", },
+ { }
+};
+
+#define IMX_SOC_DRIVER "imx9-soc"
+
+static struct platform_driver imx9_soc_driver = {
+ .probe = imx9_soc_probe,
+ .driver = {
+ .name = IMX_SOC_DRIVER,
+ },
+};
+
+static int __init imx9_soc_init(void)
+{
+ int ret;
+ struct platform_device *pdev;
+
+ /* No match means it is not an i.MX 9 series SoC, do nothing. */
+ if (!of_match_node(imx9_soc_match, of_root))
+ return 0;
+
+ ret = platform_driver_register(&imx9_soc_driver);
+ if (ret) {
+ pr_err("failed to register imx9_soc platform driver: %d\n", ret);
+ return ret;
+ }
+
+ pdev = platform_device_register_simple(IMX_SOC_DRIVER, -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ pr_err("failed to register imx9_soc platform device: %ld\n", PTR_ERR(pdev));
+ platform_driver_unregister(&imx9_soc_driver);
+ return PTR_ERR(pdev);
+ }
+
+ return 0;
+}
+device_initcall(imx9_soc_init);
+
+MODULE_AUTHOR("NXP");
+MODULE_DESCRIPTION("NXP i.MX9 SoC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/litex/litex_soc_ctrl.c b/drivers/soc/litex/litex_soc_ctrl.c
index d08bfc8ef7be..104a5f9bfd26 100644
--- a/drivers/soc/litex/litex_soc_ctrl.c
+++ b/drivers/soc/litex/litex_soc_ctrl.c
@@ -69,14 +69,11 @@ static int litex_check_csr_access(void __iomem *reg_addr)
struct litex_soc_ctrl_device {
void __iomem *base;
- struct notifier_block reset_nb;
};
-static int litex_reset_handler(struct notifier_block *this, unsigned long mode,
- void *cmd)
+static int litex_reset_handler(struct sys_off_data *data)
{
- struct litex_soc_ctrl_device *soc_ctrl_dev =
- container_of(this, struct litex_soc_ctrl_device, reset_nb);
+ struct litex_soc_ctrl_device *soc_ctrl_dev = data->cb_data;
litex_write32(soc_ctrl_dev->base + RESET_REG_OFF, RESET_REG_VALUE);
return NOTIFY_DONE;
@@ -105,11 +102,9 @@ static int litex_soc_ctrl_probe(struct platform_device *pdev)
if (error)
return error;
- platform_set_drvdata(pdev, soc_ctrl_dev);
-
- soc_ctrl_dev->reset_nb.notifier_call = litex_reset_handler;
- soc_ctrl_dev->reset_nb.priority = 128;
- error = register_restart_handler(&soc_ctrl_dev->reset_nb);
+ error = devm_register_restart_handler(&pdev->dev,
+ litex_reset_handler,
+ soc_ctrl_dev);
if (error) {
dev_warn(&pdev->dev, "cannot register restart handler: %d\n",
error);
@@ -118,20 +113,12 @@ static int litex_soc_ctrl_probe(struct platform_device *pdev)
return 0;
}
-static void litex_soc_ctrl_remove(struct platform_device *pdev)
-{
- struct litex_soc_ctrl_device *soc_ctrl_dev = platform_get_drvdata(pdev);
-
- unregister_restart_handler(&soc_ctrl_dev->reset_nb);
-}
-
static struct platform_driver litex_soc_ctrl_driver = {
.driver = {
.name = "litex-soc-controller",
.of_match_table = litex_soc_ctrl_of_match,
},
.probe = litex_soc_ctrl_probe,
- .remove = litex_soc_ctrl_remove,
};
module_platform_driver(litex_soc_ctrl_driver);
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
index 0a05ee87a0fc..455221e8de24 100644
--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -524,23 +524,5 @@ int cmdq_pkt_eoc(struct cmdq_pkt *pkt)
}
EXPORT_SYMBOL(cmdq_pkt_eoc);
-int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
-{
- struct cmdq_instruction inst = { {0} };
- int err;
-
- /* insert EOC and generate IRQ for each command iteration */
- err = cmdq_pkt_eoc(pkt);
- if (err < 0)
- return err;
-
- /* JUMP to end */
- inst.op = CMDQ_CODE_JUMP;
- inst.value = CMDQ_JUMP_PASS >>
- cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
- return cmdq_pkt_append_command(pkt, inst);
-}
-EXPORT_SYMBOL(cmdq_pkt_finalize);
-
MODULE_DESCRIPTION("MediaTek Command Queue (CMDQ) driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/mediatek/mtk-devapc.c b/drivers/soc/mediatek/mtk-devapc.c
index 2a1adcb87d4e..f54c966138b5 100644
--- a/drivers/soc/mediatek/mtk-devapc.c
+++ b/drivers/soc/mediatek/mtk-devapc.c
@@ -273,23 +273,31 @@ static int mtk_devapc_probe(struct platform_device *pdev)
return -EINVAL;
devapc_irq = irq_of_parse_and_map(node, 0);
- if (!devapc_irq)
- return -EINVAL;
+ if (!devapc_irq) {
+ ret = -EINVAL;
+ goto err;
+ }
ctx->infra_clk = devm_clk_get_enabled(&pdev->dev, "devapc-infra-clock");
- if (IS_ERR(ctx->infra_clk))
- return -EINVAL;
+ if (IS_ERR(ctx->infra_clk)) {
+ ret = -EINVAL;
+ goto err;
+ }
ret = devm_request_irq(&pdev->dev, devapc_irq, devapc_violation_irq,
IRQF_TRIGGER_NONE, "devapc", ctx);
if (ret)
- return ret;
+ goto err;
platform_set_drvdata(pdev, ctx);
start_devapc(ctx);
return 0;
+
+err:
+ iounmap(ctx->infra_base);
+ return ret;
}
static void mtk_devapc_remove(struct platform_device *pdev)
@@ -297,6 +305,7 @@ static void mtk_devapc_remove(struct platform_device *pdev)
struct mtk_devapc_context *ctx = platform_get_drvdata(pdev);
stop_devapc(ctx);
+ iounmap(ctx->infra_base);
}
static struct platform_driver mtk_devapc_driver = {
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 74b9121240f8..58e63cf0036b 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -139,7 +139,7 @@ config QCOM_RAMP_CTRL
config QCOM_RMTFS_MEM
tristate "Qualcomm Remote Filesystem memory driver"
- depends on ARCH_QCOM
+ depends on ARCH_QCOM || COMPILE_TEST
select QCOM_SCM
help
The Qualcomm remote filesystem memory driver is used for allocating
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 32c3bc887cef..56823b6a2fac 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -142,6 +142,7 @@ struct qcom_llcc_config {
bool skip_llcc_cfg;
bool no_edac;
bool irq_configured;
+ bool no_broadcast_register;
};
struct qcom_sct_config {
@@ -154,6 +155,38 @@ enum llcc_reg_offset {
LLCC_COMMON_STATUS0,
};
+static const struct llcc_slice_config ipq5424_data[] = {
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 768,
+ .priority = 1,
+ .bonus_ways = 0xFFFF,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ .write_scid_cacheable_en = true,
+ .stale_en = true,
+ .stale_cap_en = true,
+ .alloc_oneway_en = true,
+ .ovcap_en = true,
+ .ovcap_prio = true,
+ .vict_prio = true,
+ },
+ {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 256,
+ .priority = 2,
+ .fixed_size = true,
+ .bonus_ways = 0xF000,
+ .retain_on_pc = true,
+ .activate_on_init = true,
+ .write_scid_cacheable_en = true,
+ .stale_en = true,
+ .stale_cap_en = true,
+ },
+};
+
static const struct llcc_slice_config sa8775p_data[] = {
{
.usecase_id = LLCC_CPUSS,
@@ -3004,6 +3037,7 @@ static const struct llcc_slice_config x1e80100_data[] = {
.fixed_size = true,
.bonus_ways = 0xfff,
.cache_mode = 0,
+ .activate_on_init = true,
}, {
.usecase_id = LLCC_CAMEXP0,
.slice_id = 4,
@@ -3185,6 +3219,16 @@ static const struct qcom_llcc_config qdu1000_cfg[] = {
},
};
+static const struct qcom_llcc_config ipq5424_cfg[] = {
+ {
+ .sct_data = ipq5424_data,
+ .size = ARRAY_SIZE(ipq5424_data),
+ .reg_offset = llcc_v2_1_reg_offset,
+ .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+ .no_broadcast_register = true,
+ },
+};
+
static const struct qcom_llcc_config sa8775p_cfg[] = {
{
.sct_data = sa8775p_data,
@@ -3360,6 +3404,11 @@ static const struct qcom_sct_config qdu1000_cfgs = {
.num_config = ARRAY_SIZE(qdu1000_cfg),
};
+static const struct qcom_sct_config ipq5424_cfgs = {
+ .llcc_config = ipq5424_cfg,
+ .num_config = ARRAY_SIZE(ipq5424_cfg),
+};
+
static const struct qcom_sct_config sa8775p_cfgs = {
.llcc_config = sa8775p_cfg,
.num_config = ARRAY_SIZE(sa8775p_cfg),
@@ -3957,8 +4006,12 @@ static int qcom_llcc_probe(struct platform_device *pdev)
drv_data->bcast_regmap = qcom_llcc_init_mmio(pdev, i, "llcc_broadcast_base");
if (IS_ERR(drv_data->bcast_regmap)) {
- ret = PTR_ERR(drv_data->bcast_regmap);
- goto err;
+ if (cfg->no_broadcast_register) {
+ drv_data->bcast_regmap = regmap;
+ } else {
+ ret = PTR_ERR(drv_data->bcast_regmap);
+ goto err;
+ }
}
/* Extract version of the IP */
@@ -4029,6 +4082,7 @@ err:
}
static const struct of_device_id qcom_llcc_of_match[] = {
+ { .compatible = "qcom,ipq5424-llcc", .data = &ipq5424_cfgs},
{ .compatible = "qcom,qcs615-llcc", .data = &qcs615_cfgs},
{ .compatible = "qcom,qcs8300-llcc", .data = &qcs8300_cfgs},
{ .compatible = "qcom,qdu1000-llcc", .data = &qdu1000_cfgs},
diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
index caf3f63d940e..052c292eeda6 100644
--- a/drivers/soc/qcom/pmic_glink.c
+++ b/drivers/soc/qcom/pmic_glink.c
@@ -4,6 +4,7 @@
* Copyright (c) 2022, Linaro Ltd
*/
#include <linux/auxiliary_bus.h>
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -100,15 +101,13 @@ void pmic_glink_client_register(struct pmic_glink_client *client)
struct pmic_glink *pg = client->pg;
unsigned long flags;
- mutex_lock(&pg->state_lock);
+ guard(mutex)(&pg->state_lock);
spin_lock_irqsave(&pg->client_lock, flags);
list_add(&client->node, &pg->clients);
client->pdr_notify(client->priv, pg->client_state);
spin_unlock_irqrestore(&pg->client_lock, flags);
- mutex_unlock(&pg->state_lock);
-
}
EXPORT_SYMBOL_GPL(pmic_glink_client_register);
@@ -119,26 +118,25 @@ int pmic_glink_send(struct pmic_glink_client *client, void *data, size_t len)
unsigned long start;
int ret;
- mutex_lock(&pg->state_lock);
+ guard(mutex)(&pg->state_lock);
if (!pg->ept) {
- ret = -ECONNRESET;
- } else {
- start = jiffies;
- for (;;) {
- ret = rpmsg_send(pg->ept, data, len);
- if (ret != -EAGAIN)
- break;
-
- if (timeout_reached) {
- ret = -ETIMEDOUT;
- break;
- }
-
- usleep_range(1000, 5000);
- timeout_reached = time_after(jiffies, start + PMIC_GLINK_SEND_TIMEOUT);
+ return -ECONNRESET;
+ }
+
+ start = jiffies;
+ for (;;) {
+ ret = rpmsg_send(pg->ept, data, len);
+ if (ret != -EAGAIN)
+ break;
+
+ if (timeout_reached) {
+ ret = -ETIMEDOUT;
+ break;
}
+
+ usleep_range(1000, 5000);
+ timeout_reached = time_after(jiffies, start + PMIC_GLINK_SEND_TIMEOUT);
}
- mutex_unlock(&pg->state_lock);
return ret;
}
@@ -227,51 +225,42 @@ static void pmic_glink_pdr_callback(int state, char *svc_path, void *priv)
{
struct pmic_glink *pg = priv;
- mutex_lock(&pg->state_lock);
+ guard(mutex)(&pg->state_lock);
pg->pdr_state = state;
pmic_glink_state_notify_clients(pg);
- mutex_unlock(&pg->state_lock);
}
static int pmic_glink_rpmsg_probe(struct rpmsg_device *rpdev)
{
struct pmic_glink *pg = __pmic_glink;
- int ret = 0;
- mutex_lock(&__pmic_glink_lock);
- if (!pg) {
- ret = dev_err_probe(&rpdev->dev, -ENODEV, "no pmic_glink device to attach to\n");
- goto out_unlock;
- }
+ guard(mutex)(&__pmic_glink_lock);
+ pg = __pmic_glink;
+ if (!pg)
+ return dev_err_probe(&rpdev->dev, -ENODEV, "no pmic_glink device to attach to\n");
dev_set_drvdata(&rpdev->dev, pg);
- mutex_lock(&pg->state_lock);
+ guard(mutex)(&pg->state_lock);
pg->ept = rpdev->ept;
pmic_glink_state_notify_clients(pg);
- mutex_unlock(&pg->state_lock);
-out_unlock:
- mutex_unlock(&__pmic_glink_lock);
- return ret;
+ return 0;
}
static void pmic_glink_rpmsg_remove(struct rpmsg_device *rpdev)
{
struct pmic_glink *pg;
- mutex_lock(&__pmic_glink_lock);
+ guard(mutex)(&__pmic_glink_lock);
pg = __pmic_glink;
if (!pg)
- goto out_unlock;
+ return;
- mutex_lock(&pg->state_lock);
+ guard(mutex)(&pg->state_lock);
pg->ept = NULL;
pmic_glink_state_notify_clients(pg);
- mutex_unlock(&pg->state_lock);
-out_unlock:
- mutex_unlock(&__pmic_glink_lock);
}
static const struct rpmsg_device_id pmic_glink_rpmsg_id_match[] = {
@@ -378,9 +367,8 @@ static void pmic_glink_remove(struct platform_device *pdev)
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI))
pmic_glink_del_aux_device(pg, &pg->ucsi_aux);
- mutex_lock(&__pmic_glink_lock);
+ guard(mutex)(&__pmic_glink_lock);
__pmic_glink = NULL;
- mutex_unlock(&__pmic_glink_lock);
}
static const unsigned long pmic_glink_sc8280xp_client_mask = BIT(PMIC_GLINK_CLIENT_BATT) |
diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
index 463b1c528831..bd06ce161804 100644
--- a/drivers/soc/qcom/pmic_glink_altmode.c
+++ b/drivers/soc/qcom/pmic_glink_altmode.c
@@ -5,6 +5,7 @@
*/
#include <linux/auxiliary_bus.h>
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -114,7 +115,7 @@ static int pmic_glink_altmode_request(struct pmic_glink_altmode *altmode, u32 cm
* The USBC_CMD_WRITE_REQ ack doesn't identify the request, so wait for
* one ack at a time.
*/
- mutex_lock(&altmode->lock);
+ guard(mutex)(&altmode->lock);
req.hdr.owner = cpu_to_le32(altmode->owner_id);
req.hdr.type = cpu_to_le32(PMIC_GLINK_REQ_RESP);
@@ -125,18 +126,16 @@ static int pmic_glink_altmode_request(struct pmic_glink_altmode *altmode, u32 cm
ret = pmic_glink_send(altmode->client, &req, sizeof(req));
if (ret) {
dev_err(altmode->dev, "failed to send altmode request: %#x (%d)\n", cmd, ret);
- goto out_unlock;
+ return ret;
}
left = wait_for_completion_timeout(&altmode->pan_ack, 5 * HZ);
if (!left) {
dev_err(altmode->dev, "timeout waiting for altmode request ack for: %#x\n", cmd);
- ret = -ETIMEDOUT;
+ return -ETIMEDOUT;
}
-out_unlock:
- mutex_unlock(&altmode->lock);
- return ret;
+ return 0;
}
static void pmic_glink_altmode_enable_dp(struct pmic_glink_altmode *altmode,
diff --git a/drivers/soc/qcom/qcom_pd_mapper.c b/drivers/soc/qcom/qcom_pd_mapper.c
index 6e30f08761aa..154ca5beb471 100644
--- a/drivers/soc/qcom/qcom_pd_mapper.c
+++ b/drivers/soc/qcom/qcom_pd_mapper.c
@@ -553,6 +553,7 @@ static const struct of_device_id qcom_pdm_domains[] __maybe_unused = {
{ .compatible = "qcom,sm4250", .data = sm6115_domains, },
{ .compatible = "qcom,sm6115", .data = sm6115_domains, },
{ .compatible = "qcom,sm6350", .data = sm6350_domains, },
+ { .compatible = "qcom,sm7225", .data = sm6350_domains, },
{ .compatible = "qcom,sm7325", .data = sc7280_domains, },
{ .compatible = "qcom,sm8150", .data = sm8150_domains, },
{ .compatible = "qcom,sm8250", .data = sm8250_domains, },
@@ -561,6 +562,7 @@ static const struct of_device_id qcom_pdm_domains[] __maybe_unused = {
{ .compatible = "qcom,sm8550", .data = sm8550_domains, },
{ .compatible = "qcom,sm8650", .data = sm8550_domains, },
{ .compatible = "qcom,x1e80100", .data = x1e80100_domains, },
+ { .compatible = "qcom,x1p42100", .data = x1e80100_domains, },
{},
};
diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
index 33603b8fd8f3..1b32469f2789 100644
--- a/drivers/soc/qcom/rmtfs_mem.c
+++ b/drivers/soc/qcom/rmtfs_mem.c
@@ -125,7 +125,7 @@ static int qcom_rmtfs_mem_release(struct inode *inode, struct file *filp)
return 0;
}
-static struct class rmtfs_class = {
+static const struct class rmtfs_class = {
.name = "rmtfs",
};
diff --git a/drivers/soc/qcom/smem_state.c b/drivers/soc/qcom/smem_state.c
index d9bfac6c54fb..cc5be8019b6a 100644
--- a/drivers/soc/qcom/smem_state.c
+++ b/drivers/soc/qcom/smem_state.c
@@ -112,7 +112,8 @@ struct qcom_smem_state *qcom_smem_state_get(struct device *dev,
if (args.args_count != 1) {
dev_err(dev, "invalid #qcom,smem-state-cells\n");
- return ERR_PTR(-EINVAL);
+ state = ERR_PTR(-EINVAL);
+ goto put;
}
state = of_node_to_state(args.np);
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index 4783ab1adb8d..a3e88ced328a 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -365,7 +365,7 @@ static void smp2p_irq_print_chip(struct irq_data *irqd, struct seq_file *p)
{
struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
- seq_printf(p, " %8s", dev_name(entry->smp2p->dev));
+ seq_printf(p, "%8s", dev_name(entry->smp2p->dev));
}
static struct irq_chip smp2p_irq_chip = {
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 62fadfe44a09..18d7f1be9093 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -451,6 +451,7 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(QCS9100) },
{ qcom_board_id(QCS8300) },
{ qcom_board_id(QCS8275) },
+ { qcom_board_id(QCS9075) },
{ qcom_board_id(QCS615) },
};
@@ -796,7 +797,7 @@ static int qcom_socinfo_probe(struct platform_device *pdev)
if (!qs->attr.soc_id || !qs->attr.revision)
return -ENOMEM;
- if (offsetof(struct socinfo, serial_num) <= item_size) {
+ if (offsetofend(struct socinfo, serial_num) <= item_size) {
qs->attr.serial_number = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"%u",
le32_to_cpu(info->serial_num));
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 9f7fe02310b9..6d2e135eed89 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -345,6 +345,11 @@ config ARCH_R9A09G011
help
This enables support for the Renesas RZ/V2M SoC.
+config ARCH_R9A09G047
+ bool "ARM64 Platform support for RZ/G3E"
+ help
+ This enables support for the Renesas RZ/G3E SoC variants.
+
config ARCH_R9A09G057
bool "ARM64 Platform support for RZ/V2H(P)"
select RENESAS_RZV2H_ICU
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index d8c53cec7f37..dd5256e5aae1 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -126,7 +126,7 @@ static int tensor_set_bits_atomic(void *ctx, unsigned int offset, u32 val,
if (ret)
return ret;
}
- return ret;
+ return 0;
}
static bool tensor_is_atomic(unsigned int reg)
diff --git a/drivers/soc/tegra/cbb/tegra-cbb.c b/drivers/soc/tegra/cbb/tegra-cbb.c
index 84ab46c9d9f5..6215c6a84fbe 100644
--- a/drivers/soc/tegra/cbb/tegra-cbb.c
+++ b/drivers/soc/tegra/cbb/tegra-cbb.c
@@ -69,19 +69,12 @@ static int tegra_cbb_err_show(struct seq_file *file, void *data)
}
DEFINE_SHOW_ATTRIBUTE(tegra_cbb_err);
-static int tegra_cbb_err_debugfs_init(struct tegra_cbb *cbb)
+static void tegra_cbb_err_debugfs_init(struct tegra_cbb *cbb)
{
static struct dentry *root;
- if (!root) {
+ if (!root)
root = debugfs_create_file("tegra_cbb_err", 0444, NULL, cbb, &tegra_cbb_err_fops);
- if (IS_ERR_OR_NULL(root)) {
- pr_err("%s(): could not create debugfs node\n", __func__);
- return PTR_ERR(root);
- }
- }
-
- return 0;
}
void tegra_cbb_stall_enable(struct tegra_cbb *cbb)
@@ -148,13 +141,8 @@ int tegra_cbb_register(struct tegra_cbb *cbb)
{
int ret;
- if (IS_ENABLED(CONFIG_DEBUG_FS)) {
- ret = tegra_cbb_err_debugfs_init(cbb);
- if (ret) {
- dev_err(cbb->dev, "failed to create debugfs\n");
- return ret;
- }
- }
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ tegra_cbb_err_debugfs_init(cbb);
/* register interrupt handler for errors due to different initiators */
ret = cbb->ops->interrupt_enable(cbb);
diff --git a/drivers/soc/tegra/cbb/tegra234-cbb.c b/drivers/soc/tegra/cbb/tegra234-cbb.c
index 5cf0e8c34164..c74629af9bb5 100644
--- a/drivers/soc/tegra/cbb/tegra234-cbb.c
+++ b/drivers/soc/tegra/cbb/tegra234-cbb.c
@@ -277,7 +277,7 @@ static void tegra234_lookup_slave_timeout(struct seq_file *file, struct tegra234
* which timed out.
* a) Get block number from the index of set bit in
* <FABRIC>_SN_AXI2APB_<>_BLOCK_TMO_STATUS_0 register.
- * b) Get address of register repective to block number i.e.
+ * b) Get address of register respective to block number i.e.
* <FABRIC>_SN_AXI2APB_<>_BLOCK<index-set-bit>_TMO_0.
* c) Read the register in above step to get client_id which
* timed out as per the set bits.
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
index eb14e5ff5a0a..e24ab5f7d2bf 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -647,15 +647,20 @@ static const struct nvmem_cell_lookup tegra234_fuse_lookups[] = {
};
static const struct nvmem_keepout tegra234_fuse_keepouts[] = {
- { .start = 0x01c, .end = 0x0c8 },
- { .start = 0x12c, .end = 0x184 },
+ { .start = 0x01c, .end = 0x064 },
+ { .start = 0x084, .end = 0x0a0 },
+ { .start = 0x0a4, .end = 0x0c8 },
+ { .start = 0x12c, .end = 0x164 },
+ { .start = 0x16c, .end = 0x184 },
{ .start = 0x190, .end = 0x198 },
{ .start = 0x1a0, .end = 0x204 },
- { .start = 0x21c, .end = 0x250 },
- { .start = 0x25c, .end = 0x2f0 },
+ { .start = 0x21c, .end = 0x2f0 },
{ .start = 0x310, .end = 0x3d8 },
- { .start = 0x400, .end = 0x4f0 },
- { .start = 0x4f8, .end = 0x7e8 },
+ { .start = 0x400, .end = 0x420 },
+ { .start = 0x444, .end = 0x490 },
+ { .start = 0x4bc, .end = 0x4f0 },
+ { .start = 0x4f8, .end = 0x54c },
+ { .start = 0x57c, .end = 0x7e8 },
{ .start = 0x8d0, .end = 0x8d8 },
{ .start = 0xacc, .end = 0xf00 }
};
diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
index 96a7f9709720..5a54b10daf77 100644
--- a/drivers/soundwire/amd_manager.c
+++ b/drivers/soundwire/amd_manager.c
@@ -384,7 +384,7 @@ static u32 amd_sdw_read_ping_status(struct sdw_bus *bus)
return slave_stat;
}
-static int amd_sdw_compute_params(struct sdw_bus *bus)
+static int amd_sdw_compute_params(struct sdw_bus *bus, struct sdw_stream_runtime *stream)
{
struct sdw_transport_data t_data = {0};
struct sdw_master_runtime *m_rt;
@@ -410,7 +410,7 @@ static int amd_sdw_compute_params(struct sdw_bus *bus)
sdw_fill_xport_params(&p_rt->transport_params, p_rt->num,
false, SDW_BLK_GRP_CNT_1, sample_int,
port_bo, port_bo >> 8, hstart, hstop,
- SDW_BLK_PKG_PER_PORT, 0x0);
+ SDW_BLK_PKG_PER_PORT, p_rt->lane);
sdw_fill_port_params(&p_rt->port_params,
p_rt->num, bps,
@@ -1190,6 +1190,7 @@ static int __maybe_unused amd_resume_runtime(struct device *dev)
if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
return amd_sdw_clock_stop_exit(amd_manager);
} else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
+ writel(0x00, amd_manager->acp_mmio + ACP_SW_WAKE_EN(amd_manager->instance));
val = readl(amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
if (val) {
val |= AMD_SDW_CLK_RESUME_REQ;
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index d1dc62c34f1c..9b295fc9acd5 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -813,6 +813,16 @@ void sdw_extract_slave_id(struct sdw_bus *bus,
}
EXPORT_SYMBOL(sdw_extract_slave_id);
+bool is_clock_scaling_supported_by_slave(struct sdw_slave *slave)
+{
+ /*
+ * Dynamic scaling is a defined by SDCA. However, some devices expose the class ID but
+ * can't support dynamic scaling. We might need a quirk to handle such devices.
+ */
+ return slave->id.class_id;
+}
+EXPORT_SYMBOL(is_clock_scaling_supported_by_slave);
+
static int sdw_program_device_num(struct sdw_bus *bus, bool *programmed)
{
u8 buf[SDW_NUM_DEV_ID_REGISTERS] = {0};
@@ -1276,23 +1286,12 @@ int sdw_configure_dpn_intr(struct sdw_slave *slave,
return ret;
}
-static int sdw_slave_set_frequency(struct sdw_slave *slave)
+int sdw_slave_get_scale_index(struct sdw_slave *slave, u8 *base)
{
u32 mclk_freq = slave->bus->prop.mclk_freq;
u32 curr_freq = slave->bus->params.curr_dr_freq >> 1;
unsigned int scale;
u8 scale_index;
- u8 base;
- int ret;
-
- /*
- * frequency base and scale registers are required for SDCA
- * devices. They may also be used for 1.2+/non-SDCA devices.
- * Driver can set the property, we will need a DisCo property
- * to discover this case from platform firmware.
- */
- if (!slave->id.class_id && !slave->prop.clock_reg_supported)
- return 0;
if (!mclk_freq) {
dev_err(&slave->dev,
@@ -1311,19 +1310,19 @@ static int sdw_slave_set_frequency(struct sdw_slave *slave)
*/
if (!(19200000 % mclk_freq)) {
mclk_freq = 19200000;
- base = SDW_SCP_BASE_CLOCK_19200000_HZ;
+ *base = SDW_SCP_BASE_CLOCK_19200000_HZ;
} else if (!(22579200 % mclk_freq)) {
mclk_freq = 22579200;
- base = SDW_SCP_BASE_CLOCK_22579200_HZ;
+ *base = SDW_SCP_BASE_CLOCK_22579200_HZ;
} else if (!(24576000 % mclk_freq)) {
mclk_freq = 24576000;
- base = SDW_SCP_BASE_CLOCK_24576000_HZ;
+ *base = SDW_SCP_BASE_CLOCK_24576000_HZ;
} else if (!(32000000 % mclk_freq)) {
mclk_freq = 32000000;
- base = SDW_SCP_BASE_CLOCK_32000000_HZ;
+ *base = SDW_SCP_BASE_CLOCK_32000000_HZ;
} else if (!(96000000 % mclk_freq)) {
mclk_freq = 24000000;
- base = SDW_SCP_BASE_CLOCK_24000000_HZ;
+ *base = SDW_SCP_BASE_CLOCK_24000000_HZ;
} else {
dev_err(&slave->dev,
"Unsupported clock base, mclk %d\n",
@@ -1354,6 +1353,34 @@ static int sdw_slave_set_frequency(struct sdw_slave *slave)
}
scale_index++;
+ dev_dbg(&slave->dev,
+ "Configured bus base %d, scale %d, mclk %d, curr_freq %d\n",
+ *base, scale_index, mclk_freq, curr_freq);
+
+ return scale_index;
+}
+EXPORT_SYMBOL(sdw_slave_get_scale_index);
+
+static int sdw_slave_set_frequency(struct sdw_slave *slave)
+{
+ int scale_index;
+ u8 base;
+ int ret;
+
+ /*
+ * frequency base and scale registers are required for SDCA
+ * devices. They may also be used for 1.2+/non-SDCA devices.
+ * Driver can set the property directly, for now there's no
+ * DisCo property to discover support for the scaling registers
+ * from platform firmware.
+ */
+ if (!slave->id.class_id && !slave->prop.clock_reg_supported)
+ return 0;
+
+ scale_index = sdw_slave_get_scale_index(slave, &base);
+ if (scale_index < 0)
+ return scale_index;
+
ret = sdw_write_no_pm(slave, SDW_SCP_BUS_CLOCK_BASE, base);
if (ret < 0) {
dev_err(&slave->dev,
@@ -1373,10 +1400,6 @@ static int sdw_slave_set_frequency(struct sdw_slave *slave)
dev_err(&slave->dev,
"SDW_SCP_BUSCLOCK_SCALE_B1 write failed:%d\n", ret);
- dev_dbg(&slave->dev,
- "Configured bus base %d, scale %d, mclk %d, curr_freq %d\n",
- base, scale_index, mclk_freq, curr_freq);
-
return ret;
}
diff --git a/drivers/soundwire/bus.h b/drivers/soundwire/bus.h
index fda6b24ac2da..fc990171b3f7 100644
--- a/drivers/soundwire/bus.h
+++ b/drivers/soundwire/bus.h
@@ -90,6 +90,7 @@ int sdw_find_col_index(int col);
* @transport_params: Transport parameters
* @port_params: Port parameters
* @port_node: List node for Master or Slave port_list
+ * @lane: Which lane is used
*
* SoundWire spec has no mention of ports for Master interface but the
* concept is logically extended.
@@ -100,6 +101,7 @@ struct sdw_port_runtime {
struct sdw_transport_params transport_params;
struct sdw_port_params port_params;
struct list_head port_node;
+ unsigned int lane;
};
/**
@@ -149,6 +151,7 @@ struct sdw_transport_data {
int hstop;
int block_offset;
int sub_block_offset;
+ unsigned int lane;
};
struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave,
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index 77dc094075e1..e98d5db81b1c 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -167,9 +167,6 @@ static int sdw_drv_remove(struct device *dev)
slave->probed = false;
- if (slave->prop.use_domain_irq)
- sdw_irq_dispose_mapping(slave);
-
mutex_unlock(&slave->sdw_dev_lock);
if (drv->remove)
diff --git a/drivers/soundwire/generic_bandwidth_allocation.c b/drivers/soundwire/generic_bandwidth_allocation.c
index b9316207c3ab..59965f43c2fb 100644
--- a/drivers/soundwire/generic_bandwidth_allocation.c
+++ b/drivers/soundwire/generic_bandwidth_allocation.c
@@ -18,6 +18,7 @@
struct sdw_group_params {
unsigned int rate;
+ unsigned int lane;
int full_bw;
int payload_bw;
int hwidth;
@@ -27,6 +28,7 @@ struct sdw_group {
unsigned int count;
unsigned int max_size;
unsigned int *rates;
+ unsigned int *lanes;
};
void sdw_compute_slave_ports(struct sdw_master_runtime *m_rt,
@@ -48,6 +50,9 @@ void sdw_compute_slave_ports(struct sdw_master_runtime *m_rt,
slave_total_ch = 0;
list_for_each_entry(p_rt, &s_rt->port_list, port_node) {
+ if (p_rt->lane != t_data->lane)
+ continue;
+
ch = hweight32(p_rt->ch_mask);
sdw_fill_xport_params(&p_rt->transport_params,
@@ -56,7 +61,7 @@ void sdw_compute_slave_ports(struct sdw_master_runtime *m_rt,
sample_int, port_bo, port_bo >> 8,
t_data->hstart,
t_data->hstop,
- SDW_BLK_PKG_PER_PORT, 0x0);
+ SDW_BLK_PKG_PER_PORT, p_rt->lane);
sdw_fill_port_params(&p_rt->port_params,
p_rt->num, bps,
@@ -105,11 +110,13 @@ static void sdw_compute_master_ports(struct sdw_master_runtime *m_rt,
t_data.hstart = hstart;
list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
+ if (p_rt->lane != params->lane)
+ continue;
sdw_fill_xport_params(&p_rt->transport_params, p_rt->num,
false, SDW_BLK_GRP_CNT_1, sample_int,
*port_bo, (*port_bo) >> 8, hstart, hstop,
- SDW_BLK_PKG_PER_PORT, 0x0);
+ SDW_BLK_PKG_PER_PORT, p_rt->lane);
sdw_fill_port_params(&p_rt->port_params,
p_rt->num, bps,
@@ -131,6 +138,7 @@ static void sdw_compute_master_ports(struct sdw_master_runtime *m_rt,
(*port_bo) += bps * ch;
}
+ t_data.lane = params->lane;
sdw_compute_slave_ports(m_rt, &t_data);
}
@@ -138,69 +146,107 @@ static void _sdw_compute_port_params(struct sdw_bus *bus,
struct sdw_group_params *params, int count)
{
struct sdw_master_runtime *m_rt;
- int hstop = bus->params.col - 1;
- int port_bo, i;
+ int port_bo, i, l;
+ int hstop;
/* Run loop for all groups to compute transport parameters */
- for (i = 0; i < count; i++) {
- port_bo = 1;
+ for (l = 0; l < SDW_MAX_LANES; l++) {
+ if (l > 0 && !bus->lane_used_bandwidth[l])
+ continue;
+ /* reset hstop for each lane */
+ hstop = bus->params.col - 1;
+ for (i = 0; i < count; i++) {
+ if (params[i].lane != l)
+ continue;
+ port_bo = 1;
- list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
- sdw_compute_master_ports(m_rt, &params[i], &port_bo, hstop);
- }
+ list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
+ sdw_compute_master_ports(m_rt, &params[i], &port_bo, hstop);
+ }
- hstop = hstop - params[i].hwidth;
+ hstop = hstop - params[i].hwidth;
+ }
}
}
static int sdw_compute_group_params(struct sdw_bus *bus,
+ struct sdw_stream_runtime *stream,
struct sdw_group_params *params,
- int *rates, int count)
+ struct sdw_group *group)
{
struct sdw_master_runtime *m_rt;
+ struct sdw_port_runtime *p_rt;
int sel_col = bus->params.col;
unsigned int rate, bps, ch;
- int i, column_needed = 0;
+ int i, l, column_needed;
/* Calculate bandwidth per group */
- for (i = 0; i < count; i++) {
- params[i].rate = rates[i];
+ for (i = 0; i < group->count; i++) {
+ params[i].rate = group->rates[i];
+ params[i].lane = group->lanes[i];
params[i].full_bw = bus->params.curr_dr_freq / params[i].rate;
}
list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
- rate = m_rt->stream->params.rate;
- bps = m_rt->stream->params.bps;
- ch = m_rt->ch_count;
+ if (m_rt->stream == stream) {
+ /* Only runtime during prepare should be added */
+ if (stream->state != SDW_STREAM_CONFIGURED)
+ continue;
+ } else {
+ /*
+ * Include runtimes with running (ENABLED state) and paused (DISABLED state)
+ * streams
+ */
+ if (m_rt->stream->state != SDW_STREAM_ENABLED &&
+ m_rt->stream->state != SDW_STREAM_DISABLED)
+ continue;
+ }
+ list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
+ rate = m_rt->stream->params.rate;
+ bps = m_rt->stream->params.bps;
+ ch = hweight32(p_rt->ch_mask);
- for (i = 0; i < count; i++) {
- if (rate == params[i].rate)
- params[i].payload_bw += bps * ch;
+ for (i = 0; i < group->count; i++) {
+ if (rate == params[i].rate && p_rt->lane == params[i].lane)
+ params[i].payload_bw += bps * ch;
+ }
}
}
- for (i = 0; i < count; i++) {
- params[i].hwidth = (sel_col *
- params[i].payload_bw + params[i].full_bw - 1) /
- params[i].full_bw;
+ for (l = 0; l < SDW_MAX_LANES; l++) {
+ if (l > 0 && !bus->lane_used_bandwidth[l])
+ continue;
+ /* reset column_needed for each lane */
+ column_needed = 0;
+ for (i = 0; i < group->count; i++) {
+ if (params[i].lane != l)
+ continue;
+
+ params[i].hwidth = (sel_col * params[i].payload_bw +
+ params[i].full_bw - 1) / params[i].full_bw;
- column_needed += params[i].hwidth;
+ column_needed += params[i].hwidth;
+ /* There is no control column for lane 1 and above */
+ if (column_needed > sel_col)
+ return -EINVAL;
+ /* Column 0 is control column on lane 0 */
+ if (params[i].lane == 0 && column_needed > sel_col - 1)
+ return -EINVAL;
+ }
}
- if (column_needed > sel_col - 1)
- return -EINVAL;
return 0;
}
static int sdw_add_element_group_count(struct sdw_group *group,
- unsigned int rate)
+ unsigned int rate, unsigned int lane)
{
int num = group->count;
int i;
for (i = 0; i <= num; i++) {
- if (rate == group->rates[i])
+ if (rate == group->rates[i] && lane == group->lanes[i])
break;
if (i != num)
@@ -208,6 +254,7 @@ static int sdw_add_element_group_count(struct sdw_group *group,
if (group->count >= group->max_size) {
unsigned int *rates;
+ unsigned int *lanes;
group->max_size += 1;
rates = krealloc(group->rates,
@@ -215,10 +262,20 @@ static int sdw_add_element_group_count(struct sdw_group *group,
GFP_KERNEL);
if (!rates)
return -ENOMEM;
+
group->rates = rates;
+
+ lanes = krealloc(group->lanes,
+ (sizeof(int) * group->max_size),
+ GFP_KERNEL);
+ if (!lanes)
+ return -ENOMEM;
+
+ group->lanes = lanes;
}
- group->rates[group->count++] = rate;
+ group->rates[group->count] = rate;
+ group->lanes[group->count++] = lane;
}
return 0;
@@ -228,6 +285,7 @@ static int sdw_get_group_count(struct sdw_bus *bus,
struct sdw_group *group)
{
struct sdw_master_runtime *m_rt;
+ struct sdw_port_runtime *p_rt;
unsigned int rate;
int ret = 0;
@@ -237,17 +295,32 @@ static int sdw_get_group_count(struct sdw_bus *bus,
if (!group->rates)
return -ENOMEM;
+ group->lanes = kcalloc(group->max_size, sizeof(int), GFP_KERNEL);
+ if (!group->lanes) {
+ kfree(group->rates);
+ group->rates = NULL;
+ return -ENOMEM;
+ }
+
list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
+ if (m_rt->stream->state == SDW_STREAM_DEPREPARED)
+ continue;
+
rate = m_rt->stream->params.rate;
if (m_rt == list_first_entry(&bus->m_rt_list,
struct sdw_master_runtime,
bus_node)) {
group->rates[group->count++] = rate;
-
- } else {
- ret = sdw_add_element_group_count(group, rate);
+ }
+ /*
+ * Different ports could use different lane, add group element
+ * even if m_rt is the first entry
+ */
+ list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
+ ret = sdw_add_element_group_count(group, rate, p_rt->lane);
if (ret < 0) {
kfree(group->rates);
+ kfree(group->lanes);
return ret;
}
}
@@ -260,8 +333,9 @@ static int sdw_get_group_count(struct sdw_bus *bus,
* sdw_compute_port_params: Compute transport and port parameters
*
* @bus: SDW Bus instance
+ * @stream: Soundwire stream
*/
-static int sdw_compute_port_params(struct sdw_bus *bus)
+static int sdw_compute_port_params(struct sdw_bus *bus, struct sdw_stream_runtime *stream)
{
struct sdw_group_params *params = NULL;
struct sdw_group group;
@@ -281,8 +355,7 @@ static int sdw_compute_port_params(struct sdw_bus *bus)
}
/* Compute transport parameters for grouped streams */
- ret = sdw_compute_group_params(bus, params,
- &group.rates[0], group.count);
+ ret = sdw_compute_group_params(bus, stream, params, &group);
if (ret < 0)
goto free_params;
@@ -292,6 +365,7 @@ free_params:
kfree(params);
out:
kfree(group.rates);
+ kfree(group.lanes);
return ret;
}
@@ -299,7 +373,6 @@ out:
static int sdw_select_row_col(struct sdw_bus *bus, int clk_freq)
{
struct sdw_master_prop *prop = &bus->prop;
- int frame_int, frame_freq;
int r, c;
for (c = 0; c < SDW_FRAME_COLS; c++) {
@@ -308,11 +381,8 @@ static int sdw_select_row_col(struct sdw_bus *bus, int clk_freq)
sdw_cols[c] != prop->default_col)
continue;
- frame_int = sdw_rows[r] * sdw_cols[c];
- frame_freq = clk_freq / frame_int;
-
- if ((clk_freq - (frame_freq * SDW_FRAME_CTRL_BITS)) <
- bus->params.bandwidth)
+ if (clk_freq * (sdw_cols[c] - 1) <
+ bus->params.bandwidth * sdw_cols[c])
continue;
bus->params.row = sdw_rows[r];
@@ -324,6 +394,95 @@ static int sdw_select_row_col(struct sdw_bus *bus, int clk_freq)
return -EINVAL;
}
+static bool is_clock_scaling_supported(struct sdw_bus *bus)
+{
+ struct sdw_master_runtime *m_rt;
+ struct sdw_slave_runtime *s_rt;
+
+ list_for_each_entry(m_rt, &bus->m_rt_list, bus_node)
+ list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node)
+ if (!is_clock_scaling_supported_by_slave(s_rt->slave))
+ return false;
+
+ return true;
+}
+
+/**
+ * is_lane_connected_to_all_peripherals: Check if the given manager lane connects to all peripherals
+ * So that all peripherals can use the manager lane.
+ *
+ * @m_rt: Manager runtime
+ * @lane: Lane number
+ */
+static bool is_lane_connected_to_all_peripherals(struct sdw_master_runtime *m_rt, unsigned int lane)
+{
+ struct sdw_slave_prop *slave_prop;
+ struct sdw_slave_runtime *s_rt;
+ int i;
+
+ list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
+ slave_prop = &s_rt->slave->prop;
+ for (i = 1; i < SDW_MAX_LANES; i++) {
+ if (slave_prop->lane_maps[i] == lane) {
+ dev_dbg(&s_rt->slave->dev,
+ "M lane %d is connected to P lane %d\n",
+ lane, i);
+ break;
+ }
+ }
+ if (i == SDW_MAX_LANES) {
+ dev_dbg(&s_rt->slave->dev, "M lane %d is not connected\n", lane);
+ return false;
+ }
+ }
+ return true;
+}
+
+static int get_manager_lane(struct sdw_bus *bus, struct sdw_master_runtime *m_rt,
+ struct sdw_slave_runtime *s_rt, unsigned int curr_dr_freq)
+{
+ struct sdw_slave_prop *slave_prop = &s_rt->slave->prop;
+ struct sdw_port_runtime *m_p_rt;
+ unsigned int required_bandwidth;
+ int m_lane;
+ int l;
+
+ for (l = 1; l < SDW_MAX_LANES; l++) {
+ if (!slave_prop->lane_maps[l])
+ continue;
+
+ required_bandwidth = 0;
+ list_for_each_entry(m_p_rt, &m_rt->port_list, port_node) {
+ required_bandwidth += m_rt->stream->params.rate *
+ hweight32(m_p_rt->ch_mask) *
+ m_rt->stream->params.bps;
+ }
+ if (required_bandwidth <=
+ curr_dr_freq - bus->lane_used_bandwidth[l]) {
+ /* Check if m_lane is connected to all Peripherals */
+ if (!is_lane_connected_to_all_peripherals(m_rt,
+ slave_prop->lane_maps[l])) {
+ dev_dbg(bus->dev,
+ "Not all Peripherals are connected to M lane %d\n",
+ slave_prop->lane_maps[l]);
+ continue;
+ }
+ m_lane = slave_prop->lane_maps[l];
+ dev_dbg(&s_rt->slave->dev, "M lane %d is used\n", m_lane);
+ bus->lane_used_bandwidth[l] += required_bandwidth;
+ /*
+ * Use non-zero manager lane, subtract the lane 0
+ * bandwidth that is already calculated
+ */
+ bus->params.bandwidth -= required_bandwidth;
+ return m_lane;
+ }
+ }
+
+ /* No available multi lane found, only lane 0 can be used */
+ return 0;
+}
+
/**
* sdw_compute_bus_params: Compute bus parameters
*
@@ -331,10 +490,16 @@ static int sdw_select_row_col(struct sdw_bus *bus, int clk_freq)
*/
static int sdw_compute_bus_params(struct sdw_bus *bus)
{
- unsigned int curr_dr_freq = 0;
struct sdw_master_prop *mstr_prop = &bus->prop;
- int i, clk_values, ret;
+ struct sdw_slave_prop *slave_prop;
+ struct sdw_port_runtime *m_p_rt;
+ struct sdw_port_runtime *s_p_rt;
+ struct sdw_master_runtime *m_rt;
+ struct sdw_slave_runtime *s_rt;
+ unsigned int curr_dr_freq = 0;
+ int i, l, clk_values, ret;
bool is_gear = false;
+ int m_lane = 0;
u32 *clk_buf;
if (mstr_prop->num_clk_gears) {
@@ -349,6 +514,10 @@ static int sdw_compute_bus_params(struct sdw_bus *bus)
clk_buf = NULL;
}
+ /* If dynamic scaling is not supported, don't try higher freq */
+ if (!is_clock_scaling_supported(bus))
+ clk_values = 1;
+
for (i = 0; i < clk_values; i++) {
if (!clk_buf)
curr_dr_freq = bus->params.max_dr_freq;
@@ -357,10 +526,26 @@ static int sdw_compute_bus_params(struct sdw_bus *bus)
(bus->params.max_dr_freq >> clk_buf[i]) :
clk_buf[i] * SDW_DOUBLE_RATE_FACTOR;
- if (curr_dr_freq <= bus->params.bandwidth)
- continue;
+ if (curr_dr_freq * (mstr_prop->default_col - 1) >=
+ bus->params.bandwidth * mstr_prop->default_col)
+ break;
- break;
+ list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
+ /*
+ * Get the first s_rt that will be used to find the available lane that
+ * can be used. No need to check all Peripherals because we can't use
+ * multi-lane if we can't find any available lane for the first Peripheral.
+ */
+ s_rt = list_first_entry(&m_rt->slave_rt_list,
+ struct sdw_slave_runtime, m_rt_node);
+
+ /*
+ * Find the available Manager lane that connected to the first Peripheral.
+ */
+ m_lane = get_manager_lane(bus, m_rt, s_rt, curr_dr_freq);
+ if (m_lane > 0)
+ goto out;
+ }
/*
* TODO: Check all the Slave(s) port(s) audio modes and find
@@ -374,6 +559,38 @@ static int sdw_compute_bus_params(struct sdw_bus *bus)
__func__, bus->params.bandwidth);
return -EINVAL;
}
+out:
+ /* multilane can be used */
+ if (m_lane > 0) {
+ /* Set Peripheral lanes */
+ list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
+ slave_prop = &s_rt->slave->prop;
+ for (l = 1; l < SDW_MAX_LANES; l++) {
+ if (slave_prop->lane_maps[l] == m_lane) {
+ list_for_each_entry(s_p_rt, &s_rt->port_list, port_node) {
+ s_p_rt->lane = l;
+ dev_dbg(&s_rt->slave->dev,
+ "Set P lane %d for port %d\n",
+ l, s_p_rt->num);
+ }
+ break;
+ }
+ }
+ }
+ /*
+ * Set Manager lanes. Configure the last m_rt in bus->m_rt_list only since
+ * we don't want to touch other m_rts that are already working.
+ */
+ list_for_each_entry(m_p_rt, &m_rt->port_list, port_node) {
+ m_p_rt->lane = m_lane;
+ }
+ }
+
+ if (!mstr_prop->default_frame_rate || !mstr_prop->default_row)
+ return -EINVAL;
+
+ mstr_prop->default_col = curr_dr_freq / mstr_prop->default_frame_rate /
+ mstr_prop->default_row;
ret = sdw_select_row_col(bus, curr_dr_freq);
if (ret < 0) {
@@ -390,8 +607,9 @@ static int sdw_compute_bus_params(struct sdw_bus *bus)
* sdw_compute_params: Compute bus, transport and port parameters
*
* @bus: SDW Bus instance
+ * @stream: Soundwire stream
*/
-int sdw_compute_params(struct sdw_bus *bus)
+int sdw_compute_params(struct sdw_bus *bus, struct sdw_stream_runtime *stream)
{
int ret;
@@ -401,7 +619,7 @@ int sdw_compute_params(struct sdw_bus *bus)
return ret;
/* Compute transport and port params */
- ret = sdw_compute_port_params(bus);
+ ret = sdw_compute_port_params(bus, stream);
if (ret < 0) {
dev_err(bus->dev, "Compute transport params failed: %d\n", ret);
return ret;
diff --git a/drivers/soundwire/irq.c b/drivers/soundwire/irq.c
index 0c08cebb1235..c237e6d0766b 100644
--- a/drivers/soundwire/irq.c
+++ b/drivers/soundwire/irq.c
@@ -46,14 +46,18 @@ void sdw_irq_delete(struct sdw_bus *bus)
irq_domain_remove(bus->domain);
}
+static void sdw_irq_dispose_mapping(void *data)
+{
+ struct sdw_slave *slave = data;
+
+ irq_dispose_mapping(irq_find_mapping(slave->bus->domain, slave->dev_num));
+}
+
void sdw_irq_create_mapping(struct sdw_slave *slave)
{
slave->irq = irq_create_mapping(slave->bus->domain, slave->dev_num);
if (!slave->irq)
dev_warn(&slave->dev, "Failed to map IRQ\n");
-}
-void sdw_irq_dispose_mapping(struct sdw_slave *slave)
-{
- irq_dispose_mapping(irq_find_mapping(slave->bus->domain, slave->dev_num));
+ devm_add_action_or_reset(&slave->dev, sdw_irq_dispose_mapping, slave);
}
diff --git a/drivers/soundwire/irq.h b/drivers/soundwire/irq.h
index 58a58046d92b..86e2318409da 100644
--- a/drivers/soundwire/irq.h
+++ b/drivers/soundwire/irq.h
@@ -16,7 +16,6 @@ int sdw_irq_create(struct sdw_bus *bus,
struct fwnode_handle *fwnode);
void sdw_irq_delete(struct sdw_bus *bus);
void sdw_irq_create_mapping(struct sdw_slave *slave);
-void sdw_irq_dispose_mapping(struct sdw_slave *slave);
#else /* CONFIG_IRQ_DOMAIN */
@@ -34,10 +33,6 @@ static inline void sdw_irq_create_mapping(struct sdw_slave *slave)
{
}
-static inline void sdw_irq_dispose_mapping(struct sdw_slave *slave)
-{
-}
-
#endif /* CONFIG_IRQ_DOMAIN */
#endif /* __SDW_IRQ_H */
diff --git a/drivers/soundwire/mipi_disco.c b/drivers/soundwire/mipi_disco.c
index 9d59f486edbe..65afb28ef8fa 100644
--- a/drivers/soundwire/mipi_disco.c
+++ b/drivers/soundwire/mipi_disco.c
@@ -366,6 +366,44 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
return 0;
}
+/*
+ * In MIPI DisCo spec for SoundWire, lane mapping for a slave device is done with
+ * mipi-sdw-lane-x-mapping properties, where x is 1..7, and the values for those
+ * properties are mipi-sdw-manager-lane-x or mipi-sdw-peripheral-link-y, where x
+ * is an integer between 1 to 7 if the lane is connected to a manager lane, y is a
+ * character between A to E if the lane is connected to another peripheral lane.
+ */
+int sdw_slave_read_lane_mapping(struct sdw_slave *slave)
+{
+ struct sdw_slave_prop *prop = &slave->prop;
+ struct device *dev = &slave->dev;
+ char prop_name[30];
+ const char *prop_val;
+ size_t len;
+ int ret, i;
+ u8 lane;
+
+ for (i = 0; i < SDW_MAX_LANES; i++) {
+ snprintf(prop_name, sizeof(prop_name), "mipi-sdw-lane-%d-mapping", i);
+ ret = device_property_read_string(dev, prop_name, &prop_val);
+ if (ret)
+ continue;
+
+ len = strlen(prop_val);
+ if (len < 1)
+ return -EINVAL;
+
+ /* The last character is enough to identify the connection */
+ ret = kstrtou8(&prop_val[len - 1], 10, &lane);
+ if (ret)
+ return ret;
+ if (in_range(lane, 1, SDW_MAX_LANES - 1))
+ prop->lane_maps[i] = lane;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(sdw_slave_read_lane_mapping);
+
/**
* sdw_slave_read_prop() - Read Slave properties
* @slave: SDW Slave
@@ -486,6 +524,6 @@ int sdw_slave_read_prop(struct sdw_slave *slave)
sdw_slave_read_dpn(slave, prop->sink_dpn_prop, nval,
prop->sink_ports, "sink");
- return 0;
+ return sdw_slave_read_lane_mapping(slave);
}
EXPORT_SYMBOL(sdw_slave_read_prop);
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index e00c5ac496a6..0f45e3404756 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -1072,7 +1072,7 @@ static const struct sdw_master_ops qcom_swrm_ops = {
.pre_bank_switch = qcom_swrm_pre_bank_switch,
};
-static int qcom_swrm_compute_params(struct sdw_bus *bus)
+static int qcom_swrm_compute_params(struct sdw_bus *bus, struct sdw_stream_runtime *stream)
{
struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
struct sdw_master_runtime *m_rt;
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index 7aa4900dcf31..e9df503332bb 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -629,8 +629,44 @@ static int sdw_notify_config(struct sdw_master_runtime *m_rt)
static int sdw_program_params(struct sdw_bus *bus, bool prepare)
{
struct sdw_master_runtime *m_rt;
+ struct sdw_slave *slave;
int ret = 0;
+ u32 addr1;
+
+ /* Check if all Peripherals comply with SDCA */
+ list_for_each_entry(slave, &bus->slaves, node) {
+ if (!slave->dev_num_sticky)
+ continue;
+ if (!is_clock_scaling_supported_by_slave(slave)) {
+ dev_dbg(&slave->dev, "The Peripheral doesn't comply with SDCA\n");
+ goto manager_runtime;
+ }
+ }
+
+ if (bus->params.next_bank)
+ addr1 = SDW_SCP_BUSCLOCK_SCALE_B1;
+ else
+ addr1 = SDW_SCP_BUSCLOCK_SCALE_B0;
+
+ /* Program SDW_SCP_BUSCLOCK_SCALE if all Peripherals comply with SDCA */
+ list_for_each_entry(slave, &bus->slaves, node) {
+ int scale_index;
+ u8 base;
+
+ if (!slave->dev_num_sticky)
+ continue;
+ scale_index = sdw_slave_get_scale_index(slave, &base);
+ if (scale_index < 0)
+ return scale_index;
+
+ ret = sdw_write_no_pm(slave, addr1, scale_index);
+ if (ret < 0) {
+ dev_err(&slave->dev, "SDW_SCP_BUSCLOCK_SCALE register write failed\n");
+ return ret;
+ }
+ }
+manager_runtime:
list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
/*
@@ -1383,7 +1419,7 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream,
/* Compute params */
if (bus->compute_params) {
- ret = bus->compute_params(bus);
+ ret = bus->compute_params(bus, stream);
if (ret < 0) {
dev_err(bus->dev, "Compute params failed: %d\n",
ret);
@@ -1642,9 +1678,19 @@ EXPORT_SYMBOL(sdw_disable_stream);
static int _sdw_deprepare_stream(struct sdw_stream_runtime *stream)
{
struct sdw_master_runtime *m_rt;
+ struct sdw_port_runtime *p_rt;
+ unsigned int multi_lane_bandwidth;
+ unsigned int bandwidth;
struct sdw_bus *bus;
+ int state = stream->state;
int ret = 0;
+ /*
+ * first mark the state as DEPREPARED so that it is not taken into account
+ * for bit allocation
+ */
+ stream->state = SDW_STREAM_DEPREPARED;
+
list_for_each_entry(m_rt, &stream->master_list, stream_node) {
bus = m_rt->bus;
/* De-prepare port(s) */
@@ -1652,19 +1698,34 @@ static int _sdw_deprepare_stream(struct sdw_stream_runtime *stream)
if (ret < 0) {
dev_err(bus->dev,
"De-prepare port(s) failed: %d\n", ret);
+ stream->state = state;
return ret;
}
+ multi_lane_bandwidth = 0;
+
+ list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
+ if (!p_rt->lane)
+ continue;
+
+ bandwidth = m_rt->stream->params.rate * hweight32(p_rt->ch_mask) *
+ m_rt->stream->params.bps;
+ multi_lane_bandwidth += bandwidth;
+ bus->lane_used_bandwidth[p_rt->lane] -= bandwidth;
+ if (!bus->lane_used_bandwidth[p_rt->lane])
+ p_rt->lane = 0;
+ }
/* TODO: Update this during Device-Device support */
- bus->params.bandwidth -= m_rt->stream->params.rate *
- m_rt->ch_count * m_rt->stream->params.bps;
+ bandwidth = m_rt->stream->params.rate * m_rt->ch_count * m_rt->stream->params.bps;
+ bus->params.bandwidth -= bandwidth - multi_lane_bandwidth;
/* Compute params */
if (bus->compute_params) {
- ret = bus->compute_params(bus);
+ ret = bus->compute_params(bus, stream);
if (ret < 0) {
dev_err(bus->dev, "Compute params failed: %d\n",
ret);
+ stream->state = state;
return ret;
}
}
@@ -1673,11 +1734,11 @@ static int _sdw_deprepare_stream(struct sdw_stream_runtime *stream)
ret = sdw_program_params(bus, false);
if (ret < 0) {
dev_err(bus->dev, "%s: Program params failed: %d\n", __func__, ret);
+ stream->state = state;
return ret;
}
}
- stream->state = SDW_STREAM_DEPREPARED;
return do_bank_switch(stream);
}
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index add6247d3481..29c616e2c408 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1561,10 +1561,15 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
}
mcspi->ref_clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
- if (IS_ERR(mcspi->ref_clk))
- mcspi->ref_clk_hz = OMAP2_MCSPI_MAX_FREQ;
- else
+ if (IS_ERR(mcspi->ref_clk)) {
+ status = PTR_ERR(mcspi->ref_clk);
+ dev_err_probe(&pdev->dev, status, "Failed to get ref_clk");
+ goto free_ctlr;
+ }
+ if (mcspi->ref_clk)
mcspi->ref_clk_hz = clk_get_rate(mcspi->ref_clk);
+ else
+ mcspi->ref_clk_hz = OMAP2_MCSPI_MAX_FREQ;
ctlr->max_speed_hz = mcspi->ref_clk_hz;
ctlr->min_speed_hz = mcspi->ref_clk_hz >> 15;
diff --git a/drivers/spmi/hisi-spmi-controller.c b/drivers/spmi/hisi-spmi-controller.c
index 3cafdf22c909..122140b97579 100644
--- a/drivers/spmi/hisi-spmi-controller.c
+++ b/drivers/spmi/hisi-spmi-controller.c
@@ -300,9 +300,6 @@ static int spmi_controller_probe(struct platform_device *pdev)
spin_lock_init(&spmi_controller->lock);
- ctrl->dev.parent = pdev->dev.parent;
- ctrl->dev.of_node = of_node_get(pdev->dev.of_node);
-
/* Callbacks */
ctrl->read_cmd = spmi_read_cmd;
ctrl->write_cmd = spmi_write_cmd;
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index fb0101da1485..3cf8d9bd4566 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -517,7 +517,7 @@ static void of_spmi_register_devices(struct spmi_controller *ctrl)
if (!sdev)
continue;
- sdev->dev.of_node = node;
+ device_set_node(&sdev->dev, of_fwnode_handle(node));
sdev->usid = (u8)reg[0];
err = spmi_device_add(sdev);
diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c
index f6db2933ebba..6736b09b2f45 100644
--- a/drivers/staging/fbtft/fb_ssd1351.c
+++ b/drivers/staging/fbtft/fb_ssd1351.c
@@ -6,6 +6,7 @@
#include <linux/init.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
+#include <linux/string_choices.h>
#include "fbtft.h"
@@ -162,7 +163,7 @@ static int set_gamma(struct fbtft_par *par, u32 *curves)
static int blank(struct fbtft_par *par, bool on)
{
fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n",
- __func__, on ? "true" : "false");
+ __func__, str_true_false(on));
if (on)
write_reg(par, 0xAE);
else
diff --git a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
index 8e2334fe5c9b..3f4f95b7fe34 100644
--- a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
+++ b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
@@ -30,11 +30,8 @@ int agilent_82350b_accel_read(gpib_board_t *board, uint8_t *buffer, size_t lengt
unsigned short event_status;
int i, num_fifo_bytes;
//hardware doesn't support checking for end-of-string character when using fifo
- if (tms_priv->eos_flags & REOS) {
- //pr_info("ag-rd: using tms9914 read for REOS %x EOS %x\n",tms_priv->eos_flags,
- // tms_priv->eos);
+ if (tms_priv->eos_flags & REOS)
return tms9914_read(board, tms_priv, buffer, length, end, bytes_read);
- }
clear_bit(DEV_CLEAR_BN, &tms_priv->state);
@@ -811,15 +808,15 @@ void agilent_82350b_detach(gpib_board_t *board)
if (a_priv->gpib_base) {
tms9914_board_reset(tms_priv);
if (a_priv->misc_base)
- iounmap((void *)a_priv->misc_base);
+ iounmap(a_priv->misc_base);
if (a_priv->borg_base)
- iounmap((void *)a_priv->borg_base);
+ iounmap(a_priv->borg_base);
if (a_priv->sram_base)
- iounmap((void *)a_priv->sram_base);
+ iounmap(a_priv->sram_base);
if (a_priv->gpib_base)
- iounmap((void *)a_priv->gpib_base);
+ iounmap(a_priv->gpib_base);
if (a_priv->plx_base)
- iounmap((void *)a_priv->plx_base);
+ iounmap(a_priv->plx_base);
pci_release_regions(a_priv->pci_device);
}
if (a_priv->pci_device)
@@ -828,58 +825,58 @@ void agilent_82350b_detach(gpib_board_t *board)
agilent_82350b_free_private(board);
}
-gpib_interface_t agilent_82350b_unaccel_interface = {
-name: "agilent_82350b_unaccel",
-attach : agilent_82350b_unaccel_attach,
-detach : agilent_82350b_detach,
-read : agilent_82350b_read,
-write : agilent_82350b_write,
-command : agilent_82350b_command,
-request_system_control : agilent_82350b_request_system_control,
-take_control : agilent_82350b_take_control,
-go_to_standby : agilent_82350b_go_to_standby,
-interface_clear : agilent_82350b_interface_clear,
-remote_enable : agilent_82350b_remote_enable,
-enable_eos : agilent_82350b_enable_eos,
-disable_eos : agilent_82350b_disable_eos,
-parallel_poll : agilent_82350b_parallel_poll,
-parallel_poll_configure : agilent_82350b_parallel_poll_configure,
-parallel_poll_response : agilent_82350b_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : agilent_82350b_line_status,
-update_status : agilent_82350b_update_status,
-primary_address : agilent_82350b_primary_address,
-secondary_address : agilent_82350b_secondary_address,
-serial_poll_response : agilent_82350b_serial_poll_response,
-t1_delay : agilent_82350b_t1_delay,
-return_to_local : agilent_82350b_return_to_local,
+static gpib_interface_t agilent_82350b_unaccel_interface = {
+ .name = "agilent_82350b_unaccel",
+ .attach = agilent_82350b_unaccel_attach,
+ .detach = agilent_82350b_detach,
+ .read = agilent_82350b_read,
+ .write = agilent_82350b_write,
+ .command = agilent_82350b_command,
+ .request_system_control = agilent_82350b_request_system_control,
+ .take_control = agilent_82350b_take_control,
+ .go_to_standby = agilent_82350b_go_to_standby,
+ .interface_clear = agilent_82350b_interface_clear,
+ .remote_enable = agilent_82350b_remote_enable,
+ .enable_eos = agilent_82350b_enable_eos,
+ .disable_eos = agilent_82350b_disable_eos,
+ .parallel_poll = agilent_82350b_parallel_poll,
+ .parallel_poll_configure = agilent_82350b_parallel_poll_configure,
+ .parallel_poll_response = agilent_82350b_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = agilent_82350b_line_status,
+ .update_status = agilent_82350b_update_status,
+ .primary_address = agilent_82350b_primary_address,
+ .secondary_address = agilent_82350b_secondary_address,
+ .serial_poll_response = agilent_82350b_serial_poll_response,
+ .t1_delay = agilent_82350b_t1_delay,
+ .return_to_local = agilent_82350b_return_to_local,
};
-gpib_interface_t agilent_82350b_interface = {
-name: "agilent_82350b",
-attach : agilent_82350b_accel_attach,
-detach : agilent_82350b_detach,
-read : agilent_82350b_accel_read,
-write : agilent_82350b_accel_write,
-command : agilent_82350b_command,
-request_system_control : agilent_82350b_request_system_control,
-take_control : agilent_82350b_take_control,
-go_to_standby : agilent_82350b_go_to_standby,
-interface_clear : agilent_82350b_interface_clear,
-remote_enable : agilent_82350b_remote_enable,
-enable_eos : agilent_82350b_enable_eos,
-disable_eos : agilent_82350b_disable_eos,
-parallel_poll : agilent_82350b_parallel_poll,
-parallel_poll_configure : agilent_82350b_parallel_poll_configure,
-parallel_poll_response : agilent_82350b_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : agilent_82350b_line_status,
-update_status : agilent_82350b_update_status,
-primary_address : agilent_82350b_primary_address,
-secondary_address : agilent_82350b_secondary_address,
-serial_poll_response : agilent_82350b_serial_poll_response,
-t1_delay : agilent_82350b_t1_delay,
-return_to_local : agilent_82350b_return_to_local,
+static gpib_interface_t agilent_82350b_interface = {
+ .name = "agilent_82350b",
+ .attach = agilent_82350b_accel_attach,
+ .detach = agilent_82350b_detach,
+ .read = agilent_82350b_accel_read,
+ .write = agilent_82350b_accel_write,
+ .command = agilent_82350b_command,
+ .request_system_control = agilent_82350b_request_system_control,
+ .take_control = agilent_82350b_take_control,
+ .go_to_standby = agilent_82350b_go_to_standby,
+ .interface_clear = agilent_82350b_interface_clear,
+ .remote_enable = agilent_82350b_remote_enable,
+ .enable_eos = agilent_82350b_enable_eos,
+ .disable_eos = agilent_82350b_disable_eos,
+ .parallel_poll = agilent_82350b_parallel_poll,
+ .parallel_poll_configure = agilent_82350b_parallel_poll_configure,
+ .parallel_poll_response = agilent_82350b_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = agilent_82350b_line_status,
+ .update_status = agilent_82350b_update_status,
+ .primary_address = agilent_82350b_primary_address,
+ .secondary_address = agilent_82350b_secondary_address,
+ .serial_poll_response = agilent_82350b_serial_poll_response,
+ .t1_delay = agilent_82350b_t1_delay,
+ .return_to_local = agilent_82350b_return_to_local,
};
static int agilent_82350b_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
@@ -910,13 +907,30 @@ static int __init agilent_82350b_init_module(void)
result = pci_register_driver(&agilent_82350b_pci_driver);
if (result) {
- pr_err("agilent_82350b: pci_driver_register failed!\n");
+ pr_err("agilent_82350b: pci_register_driver failed: error = %d\n", result);
return result;
}
- gpib_register_driver(&agilent_82350b_unaccel_interface, THIS_MODULE);
- gpib_register_driver(&agilent_82350b_interface, THIS_MODULE);
+ result = gpib_register_driver(&agilent_82350b_unaccel_interface, THIS_MODULE);
+ if (result) {
+ pr_err("agilent_82350b: gpib_register_driver failed: error = %d\n", result);
+ goto err_unaccel;
+ }
+
+ result = gpib_register_driver(&agilent_82350b_interface, THIS_MODULE);
+ if (result) {
+ pr_err("agilent_82350b: gpib_register_driver failed: error = %d\n", result);
+ goto err_interface;
+ }
+
return 0;
+
+err_interface:
+ gpib_unregister_driver(&agilent_82350b_unaccel_interface);
+err_unaccel:
+ pci_unregister_driver(&agilent_82350b_pci_driver);
+
+ return result;
}
static void __exit agilent_82350b_exit_module(void)
diff --git a/drivers/staging/gpib/agilent_82350b/agilent_82350b.h b/drivers/staging/gpib/agilent_82350b/agilent_82350b.h
index 30683d67d170..32b322113c10 100644
--- a/drivers/staging/gpib/agilent_82350b/agilent_82350b.h
+++ b/drivers/staging/gpib/agilent_82350b/agilent_82350b.h
@@ -45,11 +45,11 @@ enum board_model {
struct agilent_82350b_priv {
struct tms9914_priv tms9914_priv;
struct pci_dev *pci_device;
- void *plx_base; //82350a only
- void *gpib_base;
- void *sram_base;
- void *misc_base;
- void *borg_base;
+ void __iomem *plx_base; //82350a only
+ void __iomem *gpib_base;
+ void __iomem *sram_base;
+ void __iomem *misc_base;
+ void __iomem *borg_base;
int irq;
unsigned short card_mode_bits;
unsigned short event_status_bits;
@@ -60,8 +60,6 @@ struct agilent_82350b_priv {
// driver name
extern const char *driver_name;
-// interfaces
-extern gpib_interface_t agilent_82350b_interface;
// init functions
int agilent_82350b_unaccel_attach(gpib_board_t *board, const gpib_board_config_t *config);
diff --git a/drivers/staging/gpib/agilent_82357a/agilent_82357a.c b/drivers/staging/gpib/agilent_82357a/agilent_82357a.c
index bf05fb4a736b..69f0e490d401 100644
--- a/drivers/staging/gpib/agilent_82357a/agilent_82357a.c
+++ b/drivers/staging/gpib/agilent_82357a/agilent_82357a.c
@@ -19,7 +19,7 @@ MODULE_DESCRIPTION("GPIB driver for Agilent 82357A/B usb adapters");
#define MAX_NUM_82357A_INTERFACES 128
static struct usb_interface *agilent_82357a_driver_interfaces[MAX_NUM_82357A_INTERFACES];
-DEFINE_MUTEX(agilent_82357a_hotplug_lock); // protect board insertion and removal
+static DEFINE_MUTEX(agilent_82357a_hotplug_lock); // protect board insertion and removal
static unsigned int agilent_82357a_update_status(gpib_board_t *board, unsigned int clear_mask);
@@ -1146,25 +1146,6 @@ setup_exit:
return retval;
}
-#ifdef RESET_USB_CONFIG
-static int agilent_82357a_reset_usb_configuration(gpib_board_t *board)
-{
- struct agilent_82357a_priv *a_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
- struct usb_device *usb_dev;
- int retval;
-
- if (!a_priv->bus_interface)
- return -ENODEV;
- usb_dev = interface_to_usbdev(a_priv->bus_interface);
- retval = usb_reset_configuration(usb_dev);
- if (retval)
- dev_err(&usb_dev->dev, "%s: usb_reset_configuration() returned %i\n",
- __func__, retval);
- return retval;
-}
-#endif
-
static void agilent_82357a_cleanup_urbs(struct agilent_82357a_priv *a_priv)
{
if (a_priv && a_priv->bus_interface) {
@@ -1175,15 +1156,23 @@ static void agilent_82357a_cleanup_urbs(struct agilent_82357a_priv *a_priv)
}
};
+static void agilent_82357a_release_urbs(struct agilent_82357a_priv *a_priv)
+{
+ if (a_priv) {
+ usb_free_urb(a_priv->interrupt_urb);
+ a_priv->interrupt_urb = NULL;
+ kfree(a_priv->interrupt_buffer);
+ }
+}
+
static int agilent_82357a_allocate_private(gpib_board_t *board)
{
struct agilent_82357a_priv *a_priv;
- board->private_data = kmalloc(sizeof(struct agilent_82357a_priv), GFP_KERNEL);
+ board->private_data = kzalloc(sizeof(struct agilent_82357a_priv), GFP_KERNEL);
if (!board->private_data)
return -ENOMEM;
a_priv = board->private_data;
- memset(a_priv, 0, sizeof(struct agilent_82357a_priv));
mutex_init(&a_priv->bulk_transfer_lock);
mutex_init(&a_priv->bulk_alloc_lock);
mutex_init(&a_priv->control_alloc_lock);
@@ -1191,11 +1180,11 @@ static int agilent_82357a_allocate_private(gpib_board_t *board)
return 0;
}
-static void agilent_82357a_free_private(struct agilent_82357a_priv *a_priv)
+static void agilent_82357a_free_private(gpib_board_t *board)
{
- usb_free_urb(a_priv->interrupt_urb);
- kfree(a_priv->interrupt_buffer);
- kfree(a_priv);
+ kfree(board->private_data);
+ board->private_data = NULL;
+
}
static int agilent_82357a_init(gpib_board_t *board)
@@ -1342,16 +1331,14 @@ static int agilent_82357a_attach(gpib_board_t *board, const gpib_board_config_t
a_priv->bus_interface = agilent_82357a_driver_interfaces[i];
usb_set_intfdata(agilent_82357a_driver_interfaces[i], board);
usb_dev = interface_to_usbdev(a_priv->bus_interface);
- dev_info(&usb_dev->dev,
- "bus %d dev num %d attached to gpib minor %d, agilent usb interface %i\n",
- usb_dev->bus->busnum, usb_dev->devnum, board->minor, i);
break;
}
}
if (i == MAX_NUM_82357A_INTERFACES) {
- mutex_unlock(&agilent_82357a_hotplug_lock);
- pr_err("No Agilent 82357 gpib adapters found, have you loaded its firmware?\n");
- return -ENODEV;
+ dev_err(board->gpib_dev,
+ "No Agilent 82357 gpib adapters found, have you loaded its firmware?\n");
+ retval = -ENODEV;
+ goto attach_fail;
}
product_id = le16_to_cpu(interface_to_usbdev(a_priv->bus_interface)->descriptor.idProduct);
switch (product_id) {
@@ -1365,20 +1352,13 @@ static int agilent_82357a_attach(gpib_board_t *board, const gpib_board_config_t
break;
default:
dev_err(&usb_dev->dev, "bug, unhandled product_id in switch?\n");
- return -EIO;
- }
-#ifdef RESET_USB_CONFIG
- retval = agilent_82357a_reset_usb_configuration(board);
- if (retval < 0) {
- mutex_unlock(&agilent_82357a_hotplug_lock);
- return retval;
+ retval = -EIO;
+ goto attach_fail;
}
-#endif
+
retval = agilent_82357a_setup_urbs(board);
- if (retval < 0) {
- mutex_unlock(&agilent_82357a_hotplug_lock);
- return retval;
- }
+ if (retval < 0)
+ goto attach_fail;
timer_setup(&a_priv->bulk_timer, agilent_82357a_timeout_handler, 0);
@@ -1387,11 +1367,19 @@ static int agilent_82357a_attach(gpib_board_t *board, const gpib_board_config_t
retval = agilent_82357a_init(board);
if (retval < 0) {
- mutex_unlock(&agilent_82357a_hotplug_lock);
- return retval;
+ agilent_82357a_cleanup_urbs(a_priv);
+ agilent_82357a_release_urbs(a_priv);
+ goto attach_fail;
}
- dev_info(&usb_dev->dev, "%s: attached\n", __func__);
+ dev_info(&usb_dev->dev,
+ "bus %d dev num %d attached to gpib minor %d, agilent usb interface %i\n",
+ usb_dev->bus->busnum, usb_dev->devnum, board->minor, i);
+ mutex_unlock(&agilent_82357a_hotplug_lock);
+ return retval;
+
+attach_fail:
+ agilent_82357a_free_private(board);
mutex_unlock(&agilent_82357a_hotplug_lock);
return retval;
}
@@ -1441,12 +1429,10 @@ static int agilent_82357a_go_idle(gpib_board_t *board)
static void agilent_82357a_detach(gpib_board_t *board)
{
struct agilent_82357a_priv *a_priv;
- struct usb_device *usb_dev;
mutex_lock(&agilent_82357a_hotplug_lock);
a_priv = board->private_data;
- usb_dev = interface_to_usbdev(a_priv->bus_interface);
if (a_priv) {
if (a_priv->bus_interface) {
agilent_82357a_go_idle(board);
@@ -1456,40 +1442,41 @@ static void agilent_82357a_detach(gpib_board_t *board)
mutex_lock(&a_priv->bulk_alloc_lock);
mutex_lock(&a_priv->interrupt_alloc_lock);
agilent_82357a_cleanup_urbs(a_priv);
- agilent_82357a_free_private(a_priv);
+ agilent_82357a_release_urbs(a_priv);
+ agilent_82357a_free_private(board);
}
- dev_info(&usb_dev->dev, "%s: detached\n", __func__);
+ dev_info(board->gpib_dev, "%s: detached\n", __func__);
mutex_unlock(&agilent_82357a_hotplug_lock);
}
-gpib_interface_t agilent_82357a_gpib_interface = {
-name: "agilent_82357a",
-attach : agilent_82357a_attach,
-detach : agilent_82357a_detach,
-read : agilent_82357a_read,
-write : agilent_82357a_write,
-command : agilent_82357a_command,
-take_control : agilent_82357a_take_control,
-go_to_standby : agilent_82357a_go_to_standby,
-request_system_control : agilent_82357a_request_system_control,
-interface_clear : agilent_82357a_interface_clear,
-remote_enable : agilent_82357a_remote_enable,
-enable_eos : agilent_82357a_enable_eos,
-disable_eos : agilent_82357a_disable_eos,
-parallel_poll : agilent_82357a_parallel_poll,
-parallel_poll_configure : agilent_82357a_parallel_poll_configure,
-parallel_poll_response : agilent_82357a_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : agilent_82357a_line_status,
-update_status : agilent_82357a_update_status,
-primary_address : agilent_82357a_primary_address,
-secondary_address : agilent_82357a_secondary_address,
-serial_poll_response : agilent_82357a_serial_poll_response,
-serial_poll_status : agilent_82357a_serial_poll_status,
-t1_delay : agilent_82357a_t1_delay,
-return_to_local : agilent_82357a_return_to_local,
-no_7_bit_eos : 1,
-skip_check_for_command_acceptors : 1
+static gpib_interface_t agilent_82357a_gpib_interface = {
+ .name = "agilent_82357a",
+ .attach = agilent_82357a_attach,
+ .detach = agilent_82357a_detach,
+ .read = agilent_82357a_read,
+ .write = agilent_82357a_write,
+ .command = agilent_82357a_command,
+ .take_control = agilent_82357a_take_control,
+ .go_to_standby = agilent_82357a_go_to_standby,
+ .request_system_control = agilent_82357a_request_system_control,
+ .interface_clear = agilent_82357a_interface_clear,
+ .remote_enable = agilent_82357a_remote_enable,
+ .enable_eos = agilent_82357a_enable_eos,
+ .disable_eos = agilent_82357a_disable_eos,
+ .parallel_poll = agilent_82357a_parallel_poll,
+ .parallel_poll_configure = agilent_82357a_parallel_poll_configure,
+ .parallel_poll_response = agilent_82357a_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = agilent_82357a_line_status,
+ .update_status = agilent_82357a_update_status,
+ .primary_address = agilent_82357a_primary_address,
+ .secondary_address = agilent_82357a_secondary_address,
+ .serial_poll_response = agilent_82357a_serial_poll_response,
+ .serial_poll_status = agilent_82357a_serial_poll_status,
+ .t1_delay = agilent_82357a_t1_delay,
+ .return_to_local = agilent_82357a_return_to_local,
+ .no_7_bit_eos = 1,
+ .skip_check_for_command_acceptors = 1
};
// Table with the USB-devices: just now only testing IDs
@@ -1691,12 +1678,24 @@ static struct usb_driver agilent_82357a_bus_driver = {
static int __init agilent_82357a_init_module(void)
{
int i;
+ int ret;
pr_info("agilent_82357a_gpib driver loading");
for (i = 0; i < MAX_NUM_82357A_INTERFACES; ++i)
agilent_82357a_driver_interfaces[i] = NULL;
- usb_register(&agilent_82357a_bus_driver);
- gpib_register_driver(&agilent_82357a_gpib_interface, THIS_MODULE);
+
+ ret = usb_register(&agilent_82357a_bus_driver);
+ if (ret) {
+ pr_err("agilent_82357a: usb_register failed: error = %d\n", ret);
+ return ret;
+ }
+
+ ret = gpib_register_driver(&agilent_82357a_gpib_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("agilent_82357a: gpib_register_driver failed: error = %d\n", ret);
+ usb_deregister(&agilent_82357a_bus_driver);
+ return ret;
+ }
return 0;
}
diff --git a/drivers/staging/gpib/cb7210/cb7210.c b/drivers/staging/gpib/cb7210/cb7210.c
index 59e41c97f518..4d22f647a453 100644
--- a/drivers/staging/gpib/cb7210/cb7210.c
+++ b/drivers/staging/gpib/cb7210/cb7210.c
@@ -683,170 +683,170 @@ void cb7210_return_to_local(gpib_board_t *board)
write_byte(nec_priv, AUX_RTL, AUXMR);
}
-gpib_interface_t cb_pci_unaccel_interface = {
-name: "cbi_pci_unaccel",
-attach : cb_pci_attach,
-detach : cb_pci_detach,
-read : cb7210_read,
-write : cb7210_write,
-command : cb7210_command,
-take_control : cb7210_take_control,
-go_to_standby : cb7210_go_to_standby,
-request_system_control : cb7210_request_system_control,
-interface_clear : cb7210_interface_clear,
-remote_enable : cb7210_remote_enable,
-enable_eos : cb7210_enable_eos,
-disable_eos : cb7210_disable_eos,
-parallel_poll : cb7210_parallel_poll,
-parallel_poll_configure : cb7210_parallel_poll_configure,
-parallel_poll_response : cb7210_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : cb7210_line_status,
-update_status : cb7210_update_status,
-primary_address : cb7210_primary_address,
-secondary_address : cb7210_secondary_address,
-serial_poll_response : cb7210_serial_poll_response,
-serial_poll_status : cb7210_serial_poll_status,
-t1_delay : cb7210_t1_delay,
-return_to_local : cb7210_return_to_local,
+static gpib_interface_t cb_pci_unaccel_interface = {
+ .name = "cbi_pci_unaccel",
+ .attach = cb_pci_attach,
+ .detach = cb_pci_detach,
+ .read = cb7210_read,
+ .write = cb7210_write,
+ .command = cb7210_command,
+ .take_control = cb7210_take_control,
+ .go_to_standby = cb7210_go_to_standby,
+ .request_system_control = cb7210_request_system_control,
+ .interface_clear = cb7210_interface_clear,
+ .remote_enable = cb7210_remote_enable,
+ .enable_eos = cb7210_enable_eos,
+ .disable_eos = cb7210_disable_eos,
+ .parallel_poll = cb7210_parallel_poll,
+ .parallel_poll_configure = cb7210_parallel_poll_configure,
+ .parallel_poll_response = cb7210_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = cb7210_line_status,
+ .update_status = cb7210_update_status,
+ .primary_address = cb7210_primary_address,
+ .secondary_address = cb7210_secondary_address,
+ .serial_poll_response = cb7210_serial_poll_response,
+ .serial_poll_status = cb7210_serial_poll_status,
+ .t1_delay = cb7210_t1_delay,
+ .return_to_local = cb7210_return_to_local,
};
-gpib_interface_t cb_pci_accel_interface = {
-name: "cbi_pci_accel",
-attach : cb_pci_attach,
-detach : cb_pci_detach,
-read : cb7210_accel_read,
-write : cb7210_accel_write,
-command : cb7210_command,
-take_control : cb7210_take_control,
-go_to_standby : cb7210_go_to_standby,
-request_system_control : cb7210_request_system_control,
-interface_clear : cb7210_interface_clear,
-remote_enable : cb7210_remote_enable,
-enable_eos : cb7210_enable_eos,
-disable_eos : cb7210_disable_eos,
-parallel_poll : cb7210_parallel_poll,
-parallel_poll_configure : cb7210_parallel_poll_configure,
-parallel_poll_response : cb7210_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : cb7210_line_status,
-update_status : cb7210_update_status,
-primary_address : cb7210_primary_address,
-secondary_address : cb7210_secondary_address,
-serial_poll_response : cb7210_serial_poll_response,
-serial_poll_status : cb7210_serial_poll_status,
-t1_delay : cb7210_t1_delay,
-return_to_local : cb7210_return_to_local,
+static gpib_interface_t cb_pci_accel_interface = {
+ .name = "cbi_pci_accel",
+ .attach = cb_pci_attach,
+ .detach = cb_pci_detach,
+ .read = cb7210_accel_read,
+ .write = cb7210_accel_write,
+ .command = cb7210_command,
+ .take_control = cb7210_take_control,
+ .go_to_standby = cb7210_go_to_standby,
+ .request_system_control = cb7210_request_system_control,
+ .interface_clear = cb7210_interface_clear,
+ .remote_enable = cb7210_remote_enable,
+ .enable_eos = cb7210_enable_eos,
+ .disable_eos = cb7210_disable_eos,
+ .parallel_poll = cb7210_parallel_poll,
+ .parallel_poll_configure = cb7210_parallel_poll_configure,
+ .parallel_poll_response = cb7210_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = cb7210_line_status,
+ .update_status = cb7210_update_status,
+ .primary_address = cb7210_primary_address,
+ .secondary_address = cb7210_secondary_address,
+ .serial_poll_response = cb7210_serial_poll_response,
+ .serial_poll_status = cb7210_serial_poll_status,
+ .t1_delay = cb7210_t1_delay,
+ .return_to_local = cb7210_return_to_local,
};
-gpib_interface_t cb_pci_interface = {
-name: "cbi_pci",
-attach : cb_pci_attach,
-detach : cb_pci_detach,
-read : cb7210_accel_read,
-write : cb7210_accel_write,
-command : cb7210_command,
-take_control : cb7210_take_control,
-go_to_standby : cb7210_go_to_standby,
-request_system_control : cb7210_request_system_control,
-interface_clear : cb7210_interface_clear,
-remote_enable : cb7210_remote_enable,
-enable_eos : cb7210_enable_eos,
-disable_eos : cb7210_disable_eos,
-parallel_poll : cb7210_parallel_poll,
-parallel_poll_configure : cb7210_parallel_poll_configure,
-parallel_poll_response : cb7210_parallel_poll_response,
-line_status : cb7210_line_status,
-update_status : cb7210_update_status,
-primary_address : cb7210_primary_address,
-secondary_address : cb7210_secondary_address,
-serial_poll_response : cb7210_serial_poll_response,
-serial_poll_status : cb7210_serial_poll_status,
-t1_delay : cb7210_t1_delay,
-return_to_local : cb7210_return_to_local,
+static gpib_interface_t cb_pci_interface = {
+ .name = "cbi_pci",
+ .attach = cb_pci_attach,
+ .detach = cb_pci_detach,
+ .read = cb7210_accel_read,
+ .write = cb7210_accel_write,
+ .command = cb7210_command,
+ .take_control = cb7210_take_control,
+ .go_to_standby = cb7210_go_to_standby,
+ .request_system_control = cb7210_request_system_control,
+ .interface_clear = cb7210_interface_clear,
+ .remote_enable = cb7210_remote_enable,
+ .enable_eos = cb7210_enable_eos,
+ .disable_eos = cb7210_disable_eos,
+ .parallel_poll = cb7210_parallel_poll,
+ .parallel_poll_configure = cb7210_parallel_poll_configure,
+ .parallel_poll_response = cb7210_parallel_poll_response,
+ .line_status = cb7210_line_status,
+ .update_status = cb7210_update_status,
+ .primary_address = cb7210_primary_address,
+ .secondary_address = cb7210_secondary_address,
+ .serial_poll_response = cb7210_serial_poll_response,
+ .serial_poll_status = cb7210_serial_poll_status,
+ .t1_delay = cb7210_t1_delay,
+ .return_to_local = cb7210_return_to_local,
};
-gpib_interface_t cb_isa_unaccel_interface = {
-name: "cbi_isa_unaccel",
-attach : cb_isa_attach,
-detach : cb_isa_detach,
-read : cb7210_read,
-write : cb7210_write,
-command : cb7210_command,
-take_control : cb7210_take_control,
-go_to_standby : cb7210_go_to_standby,
-request_system_control : cb7210_request_system_control,
-interface_clear : cb7210_interface_clear,
-remote_enable : cb7210_remote_enable,
-enable_eos : cb7210_enable_eos,
-disable_eos : cb7210_disable_eos,
-parallel_poll : cb7210_parallel_poll,
-parallel_poll_configure : cb7210_parallel_poll_configure,
-parallel_poll_response : cb7210_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : cb7210_line_status,
-update_status : cb7210_update_status,
-primary_address : cb7210_primary_address,
-secondary_address : cb7210_secondary_address,
-serial_poll_response : cb7210_serial_poll_response,
-serial_poll_status : cb7210_serial_poll_status,
-t1_delay : cb7210_t1_delay,
-return_to_local : cb7210_return_to_local,
+static gpib_interface_t cb_isa_unaccel_interface = {
+ .name = "cbi_isa_unaccel",
+ .attach = cb_isa_attach,
+ .detach = cb_isa_detach,
+ .read = cb7210_read,
+ .write = cb7210_write,
+ .command = cb7210_command,
+ .take_control = cb7210_take_control,
+ .go_to_standby = cb7210_go_to_standby,
+ .request_system_control = cb7210_request_system_control,
+ .interface_clear = cb7210_interface_clear,
+ .remote_enable = cb7210_remote_enable,
+ .enable_eos = cb7210_enable_eos,
+ .disable_eos = cb7210_disable_eos,
+ .parallel_poll = cb7210_parallel_poll,
+ .parallel_poll_configure = cb7210_parallel_poll_configure,
+ .parallel_poll_response = cb7210_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = cb7210_line_status,
+ .update_status = cb7210_update_status,
+ .primary_address = cb7210_primary_address,
+ .secondary_address = cb7210_secondary_address,
+ .serial_poll_response = cb7210_serial_poll_response,
+ .serial_poll_status = cb7210_serial_poll_status,
+ .t1_delay = cb7210_t1_delay,
+ .return_to_local = cb7210_return_to_local,
};
-gpib_interface_t cb_isa_interface = {
-name: "cbi_isa",
-attach : cb_isa_attach,
-detach : cb_isa_detach,
-read : cb7210_accel_read,
-write : cb7210_accel_write,
-command : cb7210_command,
-take_control : cb7210_take_control,
-go_to_standby : cb7210_go_to_standby,
-request_system_control : cb7210_request_system_control,
-interface_clear : cb7210_interface_clear,
-remote_enable : cb7210_remote_enable,
-enable_eos : cb7210_enable_eos,
-disable_eos : cb7210_disable_eos,
-parallel_poll : cb7210_parallel_poll,
-parallel_poll_configure : cb7210_parallel_poll_configure,
-parallel_poll_response : cb7210_parallel_poll_response,
-line_status : cb7210_line_status,
-update_status : cb7210_update_status,
-primary_address : cb7210_primary_address,
-secondary_address : cb7210_secondary_address,
-serial_poll_response : cb7210_serial_poll_response,
-serial_poll_status : cb7210_serial_poll_status,
-t1_delay : cb7210_t1_delay,
-return_to_local : cb7210_return_to_local,
+static gpib_interface_t cb_isa_interface = {
+ .name = "cbi_isa",
+ .attach = cb_isa_attach,
+ .detach = cb_isa_detach,
+ .read = cb7210_accel_read,
+ .write = cb7210_accel_write,
+ .command = cb7210_command,
+ .take_control = cb7210_take_control,
+ .go_to_standby = cb7210_go_to_standby,
+ .request_system_control = cb7210_request_system_control,
+ .interface_clear = cb7210_interface_clear,
+ .remote_enable = cb7210_remote_enable,
+ .enable_eos = cb7210_enable_eos,
+ .disable_eos = cb7210_disable_eos,
+ .parallel_poll = cb7210_parallel_poll,
+ .parallel_poll_configure = cb7210_parallel_poll_configure,
+ .parallel_poll_response = cb7210_parallel_poll_response,
+ .line_status = cb7210_line_status,
+ .update_status = cb7210_update_status,
+ .primary_address = cb7210_primary_address,
+ .secondary_address = cb7210_secondary_address,
+ .serial_poll_response = cb7210_serial_poll_response,
+ .serial_poll_status = cb7210_serial_poll_status,
+ .t1_delay = cb7210_t1_delay,
+ .return_to_local = cb7210_return_to_local,
};
-gpib_interface_t cb_isa_accel_interface = {
-name: "cbi_isa_accel",
-attach : cb_isa_attach,
-detach : cb_isa_detach,
-read : cb7210_accel_read,
-write : cb7210_accel_write,
-command : cb7210_command,
-take_control : cb7210_take_control,
-go_to_standby : cb7210_go_to_standby,
-request_system_control : cb7210_request_system_control,
-interface_clear : cb7210_interface_clear,
-remote_enable : cb7210_remote_enable,
-enable_eos : cb7210_enable_eos,
-disable_eos : cb7210_disable_eos,
-parallel_poll : cb7210_parallel_poll,
-parallel_poll_configure : cb7210_parallel_poll_configure,
-parallel_poll_response : cb7210_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : cb7210_line_status,
-update_status : cb7210_update_status,
-primary_address : cb7210_primary_address,
-secondary_address : cb7210_secondary_address,
-serial_poll_response : cb7210_serial_poll_response,
-serial_poll_status : cb7210_serial_poll_status,
-t1_delay : cb7210_t1_delay,
-return_to_local : cb7210_return_to_local,
+static gpib_interface_t cb_isa_accel_interface = {
+ .name = "cbi_isa_accel",
+ .attach = cb_isa_attach,
+ .detach = cb_isa_detach,
+ .read = cb7210_accel_read,
+ .write = cb7210_accel_write,
+ .command = cb7210_command,
+ .take_control = cb7210_take_control,
+ .go_to_standby = cb7210_go_to_standby,
+ .request_system_control = cb7210_request_system_control,
+ .interface_clear = cb7210_interface_clear,
+ .remote_enable = cb7210_remote_enable,
+ .enable_eos = cb7210_enable_eos,
+ .disable_eos = cb7210_disable_eos,
+ .parallel_poll = cb7210_parallel_poll,
+ .parallel_poll_configure = cb7210_parallel_poll_configure,
+ .parallel_poll_response = cb7210_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = cb7210_line_status,
+ .update_status = cb7210_update_status,
+ .primary_address = cb7210_primary_address,
+ .secondary_address = cb7210_secondary_address,
+ .serial_poll_response = cb7210_serial_poll_response,
+ .serial_poll_status = cb7210_serial_poll_status,
+ .t1_delay = cb7210_t1_delay,
+ .return_to_local = cb7210_return_to_local,
};
static int cb7210_allocate_private(gpib_board_t *board)
@@ -1040,8 +1040,8 @@ int cb_isa_attach(gpib_board_t *board, const gpib_board_config_t *config)
return retval;
cb_priv = board->private_data;
nec_priv = &cb_priv->nec7210_priv;
- if (request_region(config->ibbase, cb7210_iosize, "cb7210") == 0) {
- pr_err("gpib: ioports starting at 0x%u are already in use\n", config->ibbase);
+ if (!request_region(config->ibbase, cb7210_iosize, "cb7210")) {
+ pr_err("gpib: ioports starting at 0x%x are already in use\n", config->ibbase);
return -EIO;
}
nec_priv->iobase = config->ibbase;
@@ -1351,100 +1351,94 @@ static struct pcmcia_driver cb_gpib_cs_driver = {
.resume = cb_gpib_resume,
};
-int cb_pcmcia_init_module(void)
-{
- pcmcia_register_driver(&cb_gpib_cs_driver);
- return 0;
-}
-
void cb_pcmcia_cleanup_module(void)
{
DEBUG(0, "cb_gpib_cs: unloading\n");
pcmcia_unregister_driver(&cb_gpib_cs_driver);
}
-gpib_interface_t cb_pcmcia_unaccel_interface = {
-name: "cbi_pcmcia_unaccel",
-attach : cb_pcmcia_attach,
-detach : cb_pcmcia_detach,
-read : cb7210_read,
-write : cb7210_write,
-command : cb7210_command,
-take_control : cb7210_take_control,
-go_to_standby : cb7210_go_to_standby,
-request_system_control : cb7210_request_system_control,
-interface_clear : cb7210_interface_clear,
-remote_enable : cb7210_remote_enable,
-enable_eos : cb7210_enable_eos,
-disable_eos : cb7210_disable_eos,
-parallel_poll : cb7210_parallel_poll,
-parallel_poll_configure : cb7210_parallel_poll_configure,
-parallel_poll_response : cb7210_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : cb7210_line_status,
-update_status : cb7210_update_status,
-primary_address : cb7210_primary_address,
-secondary_address : cb7210_secondary_address,
-serial_poll_response : cb7210_serial_poll_response,
-serial_poll_status : cb7210_serial_poll_status,
-t1_delay : cb7210_t1_delay,
-return_to_local : cb7210_return_to_local,
+static gpib_interface_t cb_pcmcia_unaccel_interface = {
+ .name = "cbi_pcmcia_unaccel",
+ .attach = cb_pcmcia_attach,
+ .detach = cb_pcmcia_detach,
+ .read = cb7210_read,
+ .write = cb7210_write,
+ .command = cb7210_command,
+ .take_control = cb7210_take_control,
+ .go_to_standby = cb7210_go_to_standby,
+ .request_system_control = cb7210_request_system_control,
+ .interface_clear = cb7210_interface_clear,
+ .remote_enable = cb7210_remote_enable,
+ .enable_eos = cb7210_enable_eos,
+ .disable_eos = cb7210_disable_eos,
+ .parallel_poll = cb7210_parallel_poll,
+ .parallel_poll_configure = cb7210_parallel_poll_configure,
+ .parallel_poll_response = cb7210_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = cb7210_line_status,
+ .update_status = cb7210_update_status,
+ .primary_address = cb7210_primary_address,
+ .secondary_address = cb7210_secondary_address,
+ .serial_poll_response = cb7210_serial_poll_response,
+ .serial_poll_status = cb7210_serial_poll_status,
+ .t1_delay = cb7210_t1_delay,
+ .return_to_local = cb7210_return_to_local,
};
-gpib_interface_t cb_pcmcia_interface = {
-name: "cbi_pcmcia",
-attach : cb_pcmcia_attach,
-detach : cb_pcmcia_detach,
-read : cb7210_accel_read,
-write : cb7210_accel_write,
-command : cb7210_command,
-take_control : cb7210_take_control,
-go_to_standby : cb7210_go_to_standby,
-request_system_control : cb7210_request_system_control,
-interface_clear : cb7210_interface_clear,
-remote_enable : cb7210_remote_enable,
-enable_eos : cb7210_enable_eos,
-disable_eos : cb7210_disable_eos,
-parallel_poll : cb7210_parallel_poll,
-parallel_poll_configure : cb7210_parallel_poll_configure,
-parallel_poll_response : cb7210_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : cb7210_line_status,
-update_status : cb7210_update_status,
-primary_address : cb7210_primary_address,
-secondary_address : cb7210_secondary_address,
-serial_poll_response : cb7210_serial_poll_response,
-serial_poll_status : cb7210_serial_poll_status,
-t1_delay : cb7210_t1_delay,
-return_to_local : cb7210_return_to_local,
+static gpib_interface_t cb_pcmcia_interface = {
+ .name = "cbi_pcmcia",
+ .attach = cb_pcmcia_attach,
+ .detach = cb_pcmcia_detach,
+ .read = cb7210_accel_read,
+ .write = cb7210_accel_write,
+ .command = cb7210_command,
+ .take_control = cb7210_take_control,
+ .go_to_standby = cb7210_go_to_standby,
+ .request_system_control = cb7210_request_system_control,
+ .interface_clear = cb7210_interface_clear,
+ .remote_enable = cb7210_remote_enable,
+ .enable_eos = cb7210_enable_eos,
+ .disable_eos = cb7210_disable_eos,
+ .parallel_poll = cb7210_parallel_poll,
+ .parallel_poll_configure = cb7210_parallel_poll_configure,
+ .parallel_poll_response = cb7210_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = cb7210_line_status,
+ .update_status = cb7210_update_status,
+ .primary_address = cb7210_primary_address,
+ .secondary_address = cb7210_secondary_address,
+ .serial_poll_response = cb7210_serial_poll_response,
+ .serial_poll_status = cb7210_serial_poll_status,
+ .t1_delay = cb7210_t1_delay,
+ .return_to_local = cb7210_return_to_local,
};
-gpib_interface_t cb_pcmcia_accel_interface = {
-name: "cbi_pcmcia_accel",
-attach : cb_pcmcia_attach,
-detach : cb_pcmcia_detach,
-read : cb7210_accel_read,
-write : cb7210_accel_write,
-command : cb7210_command,
-take_control : cb7210_take_control,
-go_to_standby : cb7210_go_to_standby,
-request_system_control : cb7210_request_system_control,
-interface_clear : cb7210_interface_clear,
-remote_enable : cb7210_remote_enable,
-enable_eos : cb7210_enable_eos,
-disable_eos : cb7210_disable_eos,
-parallel_poll : cb7210_parallel_poll,
-parallel_poll_configure : cb7210_parallel_poll_configure,
-parallel_poll_response : cb7210_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : cb7210_line_status,
-update_status : cb7210_update_status,
-primary_address : cb7210_primary_address,
-secondary_address : cb7210_secondary_address,
-serial_poll_response : cb7210_serial_poll_response,
-serial_poll_status : cb7210_serial_poll_status,
-t1_delay : cb7210_t1_delay,
-return_to_local : cb7210_return_to_local,
+static gpib_interface_t cb_pcmcia_accel_interface = {
+ .name = "cbi_pcmcia_accel",
+ .attach = cb_pcmcia_attach,
+ .detach = cb_pcmcia_detach,
+ .read = cb7210_accel_read,
+ .write = cb7210_accel_write,
+ .command = cb7210_command,
+ .take_control = cb7210_take_control,
+ .go_to_standby = cb7210_go_to_standby,
+ .request_system_control = cb7210_request_system_control,
+ .interface_clear = cb7210_interface_clear,
+ .remote_enable = cb7210_remote_enable,
+ .enable_eos = cb7210_enable_eos,
+ .disable_eos = cb7210_disable_eos,
+ .parallel_poll = cb7210_parallel_poll,
+ .parallel_poll_configure = cb7210_parallel_poll_configure,
+ .parallel_poll_response = cb7210_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = cb7210_line_status,
+ .update_status = cb7210_update_status,
+ .primary_address = cb7210_primary_address,
+ .secondary_address = cb7210_secondary_address,
+ .serial_poll_response = cb7210_serial_poll_response,
+ .serial_poll_status = cb7210_serial_poll_status,
+ .t1_delay = cb7210_t1_delay,
+ .return_to_local = cb7210_return_to_local,
};
int cb_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
@@ -1465,8 +1459,8 @@ int cb_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
cb_priv = board->private_data;
nec_priv = &cb_priv->nec7210_priv;
- if (request_region(curr_dev->resource[0]->start, resource_size(curr_dev->resource[0]),
- "cb7210") == 0) {
+ if (!request_region(curr_dev->resource[0]->start, resource_size(curr_dev->resource[0]),
+ "cb7210")) {
pr_err("gpib: ioports starting at 0x%lx are already in use\n",
(unsigned long)curr_dev->resource[0]->start);
return -EIO;
@@ -1506,32 +1500,102 @@ void cb_pcmcia_detach(gpib_board_t *board)
static int __init cb7210_init_module(void)
{
- int err = 0;
- int result;
+ int ret;
+
+ ret = pci_register_driver(&cb7210_pci_driver);
+ if (ret) {
+ pr_err("cb7210: pci_register_driver failed: error = %d\n", ret);
+ return ret;
+ }
+
+ ret = gpib_register_driver(&cb_pci_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pci;
+ }
+
+ ret = gpib_register_driver(&cb_isa_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ goto err_isa;
+ }
- result = pci_register_driver(&cb7210_pci_driver);
- if (result) {
- pr_err("cb7210: pci_driver_register failed!\n");
- return result;
+ ret = gpib_register_driver(&cb_pci_accel_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pci_accel;
}
- gpib_register_driver(&cb_pci_interface, THIS_MODULE);
- gpib_register_driver(&cb_isa_interface, THIS_MODULE);
- gpib_register_driver(&cb_pci_accel_interface, THIS_MODULE);
- gpib_register_driver(&cb_pci_unaccel_interface, THIS_MODULE);
- gpib_register_driver(&cb_isa_accel_interface, THIS_MODULE);
- gpib_register_driver(&cb_isa_unaccel_interface, THIS_MODULE);
-
-#ifdef GPIB__PCMCIA
- gpib_register_driver(&cb_pcmcia_interface, THIS_MODULE);
- gpib_register_driver(&cb_pcmcia_accel_interface, THIS_MODULE);
- gpib_register_driver(&cb_pcmcia_unaccel_interface, THIS_MODULE);
- err += cb_pcmcia_init_module();
+ ret = gpib_register_driver(&cb_pci_unaccel_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pci_unaccel;
+ }
+
+ ret = gpib_register_driver(&cb_isa_accel_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ goto err_isa_accel;
+ }
+
+ ret = gpib_register_driver(&cb_isa_unaccel_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ goto err_isa_unaccel;
+ }
+
+#ifdef GPIB_PCMCIA
+ ret = gpib_register_driver(&cb_pcmcia_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pcmcia;
+ }
+
+ ret = gpib_register_driver(&cb_pcmcia_accel_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pcmcia_accel;
+ }
+
+ ret = gpib_register_driver(&cb_pcmcia_unaccel_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pcmcia_unaccel;
+ }
+
+ ret = pcmcia_register_driver(&cb_gpib_cs_driver);
+ if (ret) {
+ pr_err("cb7210: pcmcia_register_driver failed: error = %d\n", ret);
+ goto err_pcmcia_driver;
+ }
#endif
- if (err)
- return -1;
return 0;
+
+#ifdef GPIB_PCMCIA
+err_pcmcia_driver:
+ gpib_unregister_driver(&cb_pcmcia_unaccel_interface);
+err_pcmcia_unaccel:
+ gpib_unregister_driver(&cb_pcmcia_accel_interface);
+err_pcmcia_accel:
+ gpib_unregister_driver(&cb_pcmcia_interface);
+err_pcmcia:
+#endif
+ gpib_unregister_driver(&cb_isa_unaccel_interface);
+err_isa_unaccel:
+ gpib_unregister_driver(&cb_isa_accel_interface);
+err_isa_accel:
+ gpib_unregister_driver(&cb_pci_unaccel_interface);
+err_pci_unaccel:
+ gpib_unregister_driver(&cb_pci_accel_interface);
+err_pci_accel:
+ gpib_unregister_driver(&cb_isa_interface);
+err_isa:
+ gpib_unregister_driver(&cb_pci_interface);
+err_pci:
+ pci_unregister_driver(&cb7210_pci_driver);
+
+ return ret;
}
static void __exit cb7210_exit_module(void)
diff --git a/drivers/staging/gpib/cb7210/cb7210.h b/drivers/staging/gpib/cb7210/cb7210.h
index c17cb22585f7..d56cd905cc8c 100644
--- a/drivers/staging/gpib/cb7210/cb7210.h
+++ b/drivers/staging/gpib/cb7210/cb7210.h
@@ -36,11 +36,6 @@ struct cb7210_priv {
unsigned in_fifo_half_full : 1;
};
-// interfaces
-extern gpib_interface_t cb_pcmcia_interface;
-extern gpib_interface_t cb_pcmcia_accel_interface;
-extern gpib_interface_t cb_pcmcia_unaccel_interface;
-
// interrupt service routines
irqreturn_t cb_pci_interrupt(int irq, void *arg);
irqreturn_t cb7210_interrupt(int irq, void *arg);
diff --git a/drivers/staging/gpib/cec/cec.h b/drivers/staging/gpib/cec/cec.h
index 352cf83d8328..040ca70ed708 100644
--- a/drivers/staging/gpib/cec/cec.h
+++ b/drivers/staging/gpib/cec/cec.h
@@ -16,10 +16,6 @@ struct cec_priv {
unsigned int irq;
};
-// interfaces
-extern gpib_interface_t cec_pci_interface;
-extern gpib_interface_t cec_pcmcia_interface;
-
// interface functions
int cec_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read);
int cec_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
diff --git a/drivers/staging/gpib/cec/cec_gpib.c b/drivers/staging/gpib/cec/cec_gpib.c
index 9c00a874468c..d056cd1d6b3e 100644
--- a/drivers/staging/gpib/cec/cec_gpib.c
+++ b/drivers/staging/gpib/cec/cec_gpib.c
@@ -182,32 +182,32 @@ void cec_return_to_local(gpib_board_t *board)
nec7210_return_to_local(board, &priv->nec7210_priv);
}
-gpib_interface_t cec_pci_interface = {
-name: "cec_pci",
-attach : cec_pci_attach,
-detach : cec_pci_detach,
-read : cec_read,
-write : cec_write,
-command : cec_command,
-take_control : cec_take_control,
-go_to_standby : cec_go_to_standby,
-request_system_control : cec_request_system_control,
-interface_clear : cec_interface_clear,
-remote_enable : cec_remote_enable,
-enable_eos : cec_enable_eos,
-disable_eos : cec_disable_eos,
-parallel_poll : cec_parallel_poll,
-parallel_poll_configure : cec_parallel_poll_configure,
-parallel_poll_response : cec_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : NULL, //XXX
-update_status : cec_update_status,
-primary_address : cec_primary_address,
-secondary_address : cec_secondary_address,
-serial_poll_response : cec_serial_poll_response,
-serial_poll_status : cec_serial_poll_status,
-t1_delay : cec_t1_delay,
-return_to_local : cec_return_to_local,
+static gpib_interface_t cec_pci_interface = {
+ .name = "cec_pci",
+ .attach = cec_pci_attach,
+ .detach = cec_pci_detach,
+ .read = cec_read,
+ .write = cec_write,
+ .command = cec_command,
+ .take_control = cec_take_control,
+ .go_to_standby = cec_go_to_standby,
+ .request_system_control = cec_request_system_control,
+ .interface_clear = cec_interface_clear,
+ .remote_enable = cec_remote_enable,
+ .enable_eos = cec_enable_eos,
+ .disable_eos = cec_disable_eos,
+ .parallel_poll = cec_parallel_poll,
+ .parallel_poll_configure = cec_parallel_poll_configure,
+ .parallel_poll_response = cec_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL, //XXX
+ .update_status = cec_update_status,
+ .primary_address = cec_primary_address,
+ .secondary_address = cec_secondary_address,
+ .serial_poll_response = cec_serial_poll_response,
+ .serial_poll_status = cec_serial_poll_status,
+ .t1_delay = cec_t1_delay,
+ .return_to_local = cec_return_to_local,
};
static int cec_allocate_private(gpib_board_t *board)
@@ -365,11 +365,15 @@ static int __init cec_init_module(void)
result = pci_register_driver(&cec_pci_driver);
if (result) {
- pr_err("cec_gpib: pci_driver_register failed!\n");
+ pr_err("cec_gpib: pci_register_driver failed: error = %d\n", result);
return result;
}
- gpib_register_driver(&cec_pci_interface, THIS_MODULE);
+ result = gpib_register_driver(&cec_pci_interface, THIS_MODULE);
+ if (result) {
+ pr_err("cec_gpib: gpib_register_driver failed: error = %d\n", result);
+ return result;
+ }
return 0;
}
diff --git a/drivers/staging/gpib/common/gpib_os.c b/drivers/staging/gpib/common/gpib_os.c
index 0962729d7dfe..4901e660242e 100644
--- a/drivers/staging/gpib/common/gpib_os.c
+++ b/drivers/staging/gpib/common/gpib_os.c
@@ -835,7 +835,7 @@ static int board_type_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
return -EBUSY;
}
- retval = copy_from_user(&cmd, (void *)arg, sizeof(board_type_ioctl_t));
+ retval = copy_from_user(&cmd, (void __user *)arg, sizeof(board_type_ioctl_t));
if (retval)
return retval;
@@ -879,7 +879,7 @@ static int read_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
unsigned long arg)
{
read_write_ioctl_t read_cmd;
- u8 *userbuf;
+ u8 __user *userbuf;
unsigned long remain;
int end_flag = 0;
int retval;
@@ -887,7 +887,7 @@ static int read_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
gpib_descriptor_t *desc;
size_t nbytes;
- retval = copy_from_user(&read_cmd, (void *)arg, sizeof(read_cmd));
+ retval = copy_from_user(&read_cmd, (void __user *)arg, sizeof(read_cmd));
if (retval)
return -EFAULT;
@@ -901,7 +901,7 @@ static int read_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
if (WARN_ON_ONCE(sizeof(userbuf) > sizeof(read_cmd.buffer_ptr)))
return -EFAULT;
- userbuf = (u8 *)(unsigned long)read_cmd.buffer_ptr;
+ userbuf = (u8 __user *)(unsigned long)read_cmd.buffer_ptr;
userbuf += read_cmd.completed_transfer_count;
remain = read_cmd.requested_transfer_count - read_cmd.completed_transfer_count;
@@ -939,7 +939,7 @@ static int read_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
if (remain == 0 || end_flag)
read_ret = 0;
if (retval == 0)
- retval = copy_to_user((void *)arg, &read_cmd, sizeof(read_cmd));
+ retval = copy_to_user((void __user *)arg, &read_cmd, sizeof(read_cmd));
atomic_set(&desc->io_in_progress, 0);
@@ -954,7 +954,7 @@ static int command_ioctl(gpib_file_private_t *file_priv,
gpib_board_t *board, unsigned long arg)
{
read_write_ioctl_t cmd;
- u8 *userbuf;
+ u8 __user *userbuf;
unsigned long remain;
int retval;
int fault = 0;
@@ -962,7 +962,7 @@ static int command_ioctl(gpib_file_private_t *file_priv,
size_t bytes_written;
int no_clear_io_in_prog;
- retval = copy_from_user(&cmd, (void *)arg, sizeof(cmd));
+ retval = copy_from_user(&cmd, (void __user *)arg, sizeof(cmd));
if (retval)
return -EFAULT;
@@ -973,7 +973,7 @@ static int command_ioctl(gpib_file_private_t *file_priv,
if (!desc)
return -EINVAL;
- userbuf = (u8 *)(unsigned long)cmd.buffer_ptr;
+ userbuf = (u8 __user *)(unsigned long)cmd.buffer_ptr;
userbuf += cmd.completed_transfer_count;
no_clear_io_in_prog = cmd.end;
@@ -1016,7 +1016,7 @@ static int command_ioctl(gpib_file_private_t *file_priv,
cmd.completed_transfer_count = cmd.requested_transfer_count - remain;
if (fault == 0)
- fault = copy_to_user((void *)arg, &cmd, sizeof(cmd));
+ fault = copy_to_user((void __user *)arg, &cmd, sizeof(cmd));
/*
* no_clear_io_in_prog (cmd.end) is true when io_in_progress should
@@ -1038,13 +1038,13 @@ static int write_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
unsigned long arg)
{
read_write_ioctl_t write_cmd;
- u8 *userbuf;
+ u8 __user *userbuf;
unsigned long remain;
int retval = 0;
int fault;
gpib_descriptor_t *desc;
- fault = copy_from_user(&write_cmd, (void *)arg, sizeof(write_cmd));
+ fault = copy_from_user(&write_cmd, (void __user *)arg, sizeof(write_cmd));
if (fault)
return -EFAULT;
@@ -1055,7 +1055,7 @@ static int write_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
if (!desc)
return -EINVAL;
- userbuf = (u8 *)(unsigned long)write_cmd.buffer_ptr;
+ userbuf = (u8 __user *)(unsigned long)write_cmd.buffer_ptr;
userbuf += write_cmd.completed_transfer_count;
remain = write_cmd.requested_transfer_count - write_cmd.completed_transfer_count;
@@ -1094,7 +1094,7 @@ static int write_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
if (remain == 0)
retval = 0;
if (fault == 0)
- fault = copy_to_user((void *)arg, &write_cmd, sizeof(write_cmd));
+ fault = copy_to_user((void __user *)arg, &write_cmd, sizeof(write_cmd));
atomic_set(&desc->io_in_progress, 0);
@@ -1111,7 +1111,7 @@ static int status_bytes_ioctl(gpib_board_t *board, unsigned long arg)
spoll_bytes_ioctl_t cmd;
int retval;
- retval = copy_from_user(&cmd, (void *)arg, sizeof(cmd));
+ retval = copy_from_user(&cmd, (void __user *)arg, sizeof(cmd));
if (retval)
return -EFAULT;
@@ -1121,7 +1121,7 @@ static int status_bytes_ioctl(gpib_board_t *board, unsigned long arg)
else
cmd.num_bytes = num_status_bytes(device);
- retval = copy_to_user((void *)arg, &cmd, sizeof(cmd));
+ retval = copy_to_user((void __user *)arg, &cmd, sizeof(cmd));
if (retval)
return -EFAULT;
@@ -1231,7 +1231,7 @@ static int open_dev_ioctl(struct file *filep, gpib_board_t *board, unsigned long
gpib_file_private_t *file_priv = filep->private_data;
int i;
- retval = copy_from_user(&open_dev_cmd, (void *)arg, sizeof(open_dev_cmd));
+ retval = copy_from_user(&open_dev_cmd, (void __user *)arg, sizeof(open_dev_cmd));
if (retval)
return -EFAULT;
@@ -1267,7 +1267,7 @@ static int open_dev_ioctl(struct file *filep, gpib_board_t *board, unsigned long
atomic_set(&board->stuck_srq, 0);
open_dev_cmd.handle = i;
- retval = copy_to_user((void *)arg, &open_dev_cmd, sizeof(open_dev_cmd));
+ retval = copy_to_user((void __user *)arg, &open_dev_cmd, sizeof(open_dev_cmd));
if (retval)
return -EFAULT;
@@ -1280,7 +1280,7 @@ static int close_dev_ioctl(struct file *filep, gpib_board_t *board, unsigned lon
gpib_file_private_t *file_priv = filep->private_data;
int retval;
- retval = copy_from_user(&cmd, (void *)arg, sizeof(cmd));
+ retval = copy_from_user(&cmd, (void __user *)arg, sizeof(cmd));
if (retval)
return -EFAULT;
@@ -1308,7 +1308,7 @@ static int serial_poll_ioctl(gpib_board_t *board, unsigned long arg)
dev_dbg(board->gpib_dev, "pid %i, entering %s()\n", current->pid, __func__);
- retval = copy_from_user(&serial_cmd, (void *)arg, sizeof(serial_cmd));
+ retval = copy_from_user(&serial_cmd, (void __user *)arg, sizeof(serial_cmd));
if (retval)
return -EFAULT;
@@ -1317,7 +1317,7 @@ static int serial_poll_ioctl(gpib_board_t *board, unsigned long arg)
if (retval < 0)
return retval;
- retval = copy_to_user((void *)arg, &serial_cmd, sizeof(serial_cmd));
+ retval = copy_to_user((void __user *)arg, &serial_cmd, sizeof(serial_cmd));
if (retval)
return -EFAULT;
@@ -1331,7 +1331,7 @@ static int wait_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
int retval;
gpib_descriptor_t *desc;
- retval = copy_from_user(&wait_cmd, (void *)arg, sizeof(wait_cmd));
+ retval = copy_from_user(&wait_cmd, (void __user *)arg, sizeof(wait_cmd));
if (retval)
return -EFAULT;
@@ -1344,7 +1344,7 @@ static int wait_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
if (retval < 0)
return retval;
- retval = copy_to_user((void *)arg, &wait_cmd, sizeof(wait_cmd));
+ retval = copy_to_user((void __user *)arg, &wait_cmd, sizeof(wait_cmd));
if (retval)
return -EFAULT;
@@ -1360,7 +1360,7 @@ static int parallel_poll_ioctl(gpib_board_t *board, unsigned long arg)
if (retval < 0)
return retval;
- retval = copy_to_user((void *)arg, &poll_byte, sizeof(poll_byte));
+ retval = copy_to_user((void __user *)arg, &poll_byte, sizeof(poll_byte));
if (retval)
return -EFAULT;
@@ -1371,14 +1371,14 @@ static int online_ioctl(gpib_board_t *board, unsigned long arg)
{
online_ioctl_t online_cmd;
int retval;
- void *init_data = NULL;
+ void __user *init_data = NULL;
board->config.init_data = NULL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- retval = copy_from_user(&online_cmd, (void *)arg, sizeof(online_cmd));
+ retval = copy_from_user(&online_cmd, (void __user *)arg, sizeof(online_cmd));
if (retval)
return -EFAULT;
if (online_cmd.init_data_length > 0) {
@@ -1387,7 +1387,7 @@ static int online_ioctl(gpib_board_t *board, unsigned long arg)
return -ENOMEM;
if (WARN_ON_ONCE(sizeof(init_data) > sizeof(online_cmd.init_data_ptr)))
return -EFAULT;
- init_data = (void *)(unsigned long)(online_cmd.init_data_ptr);
+ init_data = (void __user *)(unsigned long)(online_cmd.init_data_ptr);
retval = copy_from_user(board->config.init_data, init_data,
online_cmd.init_data_length);
if (retval) {
@@ -1416,7 +1416,7 @@ static int remote_enable_ioctl(gpib_board_t *board, unsigned long arg)
int enable;
int retval;
- retval = copy_from_user(&enable, (void *)arg, sizeof(enable));
+ retval = copy_from_user(&enable, (void __user *)arg, sizeof(enable));
if (retval)
return -EFAULT;
@@ -1428,7 +1428,7 @@ static int take_control_ioctl(gpib_board_t *board, unsigned long arg)
int synchronous;
int retval;
- retval = copy_from_user(&synchronous, (void *)arg, sizeof(synchronous));
+ retval = copy_from_user(&synchronous, (void __user *)arg, sizeof(synchronous));
if (retval)
return -EFAULT;
@@ -1444,7 +1444,7 @@ static int line_status_ioctl(gpib_board_t *board, unsigned long arg)
if (retval < 0)
return retval;
- retval = copy_to_user((void *)arg, &lines, sizeof(lines));
+ retval = copy_to_user((void __user *)arg, &lines, sizeof(lines));
if (retval)
return -EFAULT;
@@ -1458,7 +1458,7 @@ static int pad_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
int retval;
gpib_descriptor_t *desc;
- retval = copy_from_user(&cmd, (void *)arg, sizeof(cmd));
+ retval = copy_from_user(&cmd, (void __user *)arg, sizeof(cmd));
if (retval)
return -EFAULT;
@@ -1494,7 +1494,7 @@ static int sad_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
int retval;
gpib_descriptor_t *desc;
- retval = copy_from_user(&cmd, (void *)arg, sizeof(cmd));
+ retval = copy_from_user(&cmd, (void __user *)arg, sizeof(cmd));
if (retval)
return -EFAULT;
@@ -1527,7 +1527,7 @@ static int eos_ioctl(gpib_board_t *board, unsigned long arg)
eos_ioctl_t eos_cmd;
int retval;
- retval = copy_from_user(&eos_cmd, (void *)arg, sizeof(eos_cmd));
+ retval = copy_from_user(&eos_cmd, (void __user *)arg, sizeof(eos_cmd));
if (retval)
return -EFAULT;
@@ -1539,7 +1539,7 @@ static int request_service_ioctl(gpib_board_t *board, unsigned long arg)
u8 status_byte;
int retval;
- retval = copy_from_user(&status_byte, (void *)arg, sizeof(status_byte));
+ retval = copy_from_user(&status_byte, (void __user *)arg, sizeof(status_byte));
if (retval)
return -EFAULT;
@@ -1551,7 +1551,8 @@ static int request_service2_ioctl(gpib_board_t *board, unsigned long arg)
request_service2_t request_service2_cmd;
int retval;
- retval = copy_from_user(&request_service2_cmd, (void *)arg, sizeof(request_service2_t));
+ retval = copy_from_user(&request_service2_cmd, (void __user *)arg,
+ sizeof(request_service2_t));
if (retval)
return -EFAULT;
@@ -1567,7 +1568,7 @@ static int iobase_ioctl(gpib_board_config_t *config, unsigned long arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- retval = copy_from_user(&base_addr, (void *)arg, sizeof(base_addr));
+ retval = copy_from_user(&base_addr, (void __user *)arg, sizeof(base_addr));
if (retval)
return -EFAULT;
@@ -1586,7 +1587,7 @@ static int irq_ioctl(gpib_board_config_t *config, unsigned long arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- retval = copy_from_user(&irq, (void *)arg, sizeof(irq));
+ retval = copy_from_user(&irq, (void __user *)arg, sizeof(irq));
if (retval)
return -EFAULT;
@@ -1603,7 +1604,7 @@ static int dma_ioctl(gpib_board_config_t *config, unsigned long arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- retval = copy_from_user(&dma_channel, (void *)arg, sizeof(dma_channel));
+ retval = copy_from_user(&dma_channel, (void __user *)arg, sizeof(dma_channel));
if (retval)
return -EFAULT;
@@ -1619,7 +1620,7 @@ static int autospoll_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
int retval;
gpib_descriptor_t *desc;
- retval = copy_from_user(&enable, (void *)arg, sizeof(enable));
+ retval = copy_from_user(&enable, (void __user *)arg, sizeof(enable));
if (retval)
return -EFAULT;
@@ -1654,7 +1655,7 @@ static int mutex_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
{
int retval, lock_mutex;
- retval = copy_from_user(&lock_mutex, (void *)arg, sizeof(lock_mutex));
+ retval = copy_from_user(&lock_mutex, (void __user *)arg, sizeof(lock_mutex));
if (retval)
return -EFAULT;
@@ -1698,7 +1699,7 @@ static int timeout_ioctl(gpib_board_t *board, unsigned long arg)
unsigned int timeout;
int retval;
- retval = copy_from_user(&timeout, (void *)arg, sizeof(timeout));
+ retval = copy_from_user(&timeout, (void __user *)arg, sizeof(timeout));
if (retval)
return -EFAULT;
@@ -1713,7 +1714,7 @@ static int ppc_ioctl(gpib_board_t *board, unsigned long arg)
ppoll_config_ioctl_t cmd;
int retval;
- retval = copy_from_user(&cmd, (void *)arg, sizeof(cmd));
+ retval = copy_from_user(&cmd, (void __user *)arg, sizeof(cmd));
if (retval)
return -EFAULT;
@@ -1739,7 +1740,7 @@ static int set_local_ppoll_mode_ioctl(gpib_board_t *board, unsigned long arg)
local_ppoll_mode_ioctl_t cmd;
int retval;
- retval = copy_from_user(&cmd, (void *)arg, sizeof(cmd));
+ retval = copy_from_user(&cmd, (void __user *)arg, sizeof(cmd));
if (retval)
return -EFAULT;
@@ -1759,7 +1760,7 @@ static int get_local_ppoll_mode_ioctl(gpib_board_t *board, unsigned long arg)
int retval;
cmd = board->local_ppoll_mode;
- retval = copy_to_user((void *)arg, &cmd, sizeof(cmd));
+ retval = copy_to_user((void __user *)arg, &cmd, sizeof(cmd));
if (retval)
return -EFAULT;
@@ -1773,7 +1774,7 @@ static int query_board_rsv_ioctl(gpib_board_t *board, unsigned long arg)
status = board->interface->serial_poll_status(board);
- retval = copy_to_user((void *)arg, &status, sizeof(status));
+ retval = copy_to_user((void __user *)arg, &status, sizeof(status));
if (retval)
return -EFAULT;
@@ -1796,7 +1797,7 @@ static int board_info_ioctl(const gpib_board_t *board, unsigned long arg)
info.t1_delay = board->t1_nano_sec;
info.ist = board->ist;
info.no_7_bit_eos = board->interface->no_7_bit_eos;
- retval = copy_to_user((void *)arg, &info, sizeof(info));
+ retval = copy_to_user((void __user *)arg, &info, sizeof(info));
if (retval)
return -EFAULT;
@@ -1808,7 +1809,7 @@ static int interface_clear_ioctl(gpib_board_t *board, unsigned long arg)
unsigned int usec_duration;
int retval;
- retval = copy_from_user(&usec_duration, (void *)arg, sizeof(usec_duration));
+ retval = copy_from_user(&usec_duration, (void __user *)arg, sizeof(usec_duration));
if (retval)
return -EFAULT;
@@ -1823,7 +1824,7 @@ static int select_pci_ioctl(gpib_board_config_t *config, unsigned long arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- retval = copy_from_user(&selection, (void *)arg, sizeof(selection));
+ retval = copy_from_user(&selection, (void __user *)arg, sizeof(selection));
if (retval)
return -EFAULT;
@@ -1845,7 +1846,7 @@ static int select_device_path_ioctl(gpib_board_config_t *config, unsigned long a
if (!selection)
return -ENOMEM;
- retval = copy_from_user(selection, (void *)arg, sizeof(select_device_path_ioctl_t));
+ retval = copy_from_user(selection, (void __user *)arg, sizeof(select_device_path_ioctl_t));
if (retval) {
vfree(selection);
return -EFAULT;
@@ -1979,7 +1980,7 @@ static int event_ioctl(gpib_board_t *board, unsigned long arg)
user_event = event;
- retval = copy_to_user((void *)arg, &user_event, sizeof(user_event));
+ retval = copy_to_user((void __user *)arg, &user_event, sizeof(user_event));
if (retval)
return -EFAULT;
@@ -1991,7 +1992,7 @@ static int request_system_control_ioctl(gpib_board_t *board, unsigned long arg)
rsc_ioctl_t request_control;
int retval;
- retval = copy_from_user(&request_control, (void *)arg, sizeof(request_control));
+ retval = copy_from_user(&request_control, (void __user *)arg, sizeof(request_control));
if (retval)
return -EFAULT;
@@ -2011,7 +2012,7 @@ static int t1_delay_ioctl(gpib_board_t *board, unsigned long arg)
return -EIO;
}
- retval = copy_from_user(&cmd, (void *)arg, sizeof(cmd));
+ retval = copy_from_user(&cmd, (void __user *)arg, sizeof(cmd));
if (retval)
return -EFAULT;
@@ -2022,13 +2023,13 @@ static int t1_delay_ioctl(gpib_board_t *board, unsigned long arg)
return 0;
}
-const struct file_operations ib_fops = {
-owner: THIS_MODULE,
-llseek : NULL,
-unlocked_ioctl : &ibioctl,
-compat_ioctl : &ibioctl,
-open : &ibopen,
-release : &ibclose,
+static const struct file_operations ib_fops = {
+ .owner = THIS_MODULE,
+ .llseek = NULL,
+ .unlocked_ioctl = &ibioctl,
+ .compat_ioctl = &ibioctl,
+ .open = &ibopen,
+ .release = &ibclose,
};
gpib_board_t board_array[GPIB_MAX_NUM_BOARDS];
@@ -2044,18 +2045,19 @@ void init_gpib_descriptor(gpib_descriptor_t *desc)
atomic_set(&desc->io_in_progress, 0);
}
-void gpib_register_driver(gpib_interface_t *interface, struct module *provider_module)
+int gpib_register_driver(gpib_interface_t *interface, struct module *provider_module)
{
struct gpib_interface_list_struct *entry;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
- return;
+ return -ENOMEM;
entry->interface = interface;
entry->module = provider_module;
list_add(&entry->list, &registered_drivers);
- pr_info("gpib: registered %s interface\n", interface->name);
+
+ return 0;
}
EXPORT_SYMBOL(gpib_register_driver);
@@ -2195,7 +2197,7 @@ static int __init gpib_common_init_module(void)
return PTR_ERR(gpib_class);
}
for (i = 0; i < GPIB_MAX_NUM_BOARDS; ++i)
- board_array[i].gpib_dev = device_create(gpib_class, 0,
+ board_array[i].gpib_dev = device_create(gpib_class, NULL,
MKDEV(GPIB_CODE, i), NULL, "gpib%i", i);
return 0;
diff --git a/drivers/staging/gpib/common/iblib.c b/drivers/staging/gpib/common/iblib.c
index db1911cc1b26..5f6fa135f505 100644
--- a/drivers/staging/gpib/common/iblib.c
+++ b/drivers/staging/gpib/common/iblib.c
@@ -695,7 +695,7 @@ int ibwait(gpib_board_t *board, int wait_mask, int clear_mask, int set_mask,
/* make sure we only clear status bits that we are reporting */
if (*status & clear_mask || set_mask)
- general_ibstatus(board, status_queue, *status & clear_mask, set_mask, 0);
+ general_ibstatus(board, status_queue, *status & clear_mask, set_mask, NULL);
return 0;
}
diff --git a/drivers/staging/gpib/eastwood/fluke_gpib.c b/drivers/staging/gpib/eastwood/fluke_gpib.c
index 5e59d38beb35..0304c5de4ccd 100644
--- a/drivers/staging/gpib/eastwood/fluke_gpib.c
+++ b/drivers/staging/gpib/eastwood/fluke_gpib.c
@@ -720,31 +720,31 @@ static int fluke_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length,
return retval;
}
-gpib_interface_t fluke_unaccel_interface = {
-name: "fluke_unaccel",
-attach : fluke_attach_holdoff_all,
-detach : fluke_detach,
-read : fluke_read,
-write : fluke_write,
-command : fluke_command,
-take_control : fluke_take_control,
-go_to_standby : fluke_go_to_standby,
-request_system_control : fluke_request_system_control,
-interface_clear : fluke_interface_clear,
-remote_enable : fluke_remote_enable,
-enable_eos : fluke_enable_eos,
-disable_eos : fluke_disable_eos,
-parallel_poll : fluke_parallel_poll,
-parallel_poll_configure : fluke_parallel_poll_configure,
-parallel_poll_response : fluke_parallel_poll_response,
-line_status : fluke_line_status,
-update_status : fluke_update_status,
-primary_address : fluke_primary_address,
-secondary_address : fluke_secondary_address,
-serial_poll_response : fluke_serial_poll_response,
-serial_poll_status : fluke_serial_poll_status,
-t1_delay : fluke_t1_delay,
-return_to_local : fluke_return_to_local,
+static gpib_interface_t fluke_unaccel_interface = {
+ .name = "fluke_unaccel",
+ .attach = fluke_attach_holdoff_all,
+ .detach = fluke_detach,
+ .read = fluke_read,
+ .write = fluke_write,
+ .command = fluke_command,
+ .take_control = fluke_take_control,
+ .go_to_standby = fluke_go_to_standby,
+ .request_system_control = fluke_request_system_control,
+ .interface_clear = fluke_interface_clear,
+ .remote_enable = fluke_remote_enable,
+ .enable_eos = fluke_enable_eos,
+ .disable_eos = fluke_disable_eos,
+ .parallel_poll = fluke_parallel_poll,
+ .parallel_poll_configure = fluke_parallel_poll_configure,
+ .parallel_poll_response = fluke_parallel_poll_response,
+ .line_status = fluke_line_status,
+ .update_status = fluke_update_status,
+ .primary_address = fluke_primary_address,
+ .secondary_address = fluke_secondary_address,
+ .serial_poll_response = fluke_serial_poll_response,
+ .serial_poll_status = fluke_serial_poll_status,
+ .t1_delay = fluke_t1_delay,
+ .return_to_local = fluke_return_to_local,
};
/* fluke_hybrid uses dma for writes but not for reads. Added
@@ -755,58 +755,58 @@ return_to_local : fluke_return_to_local,
* register just as the dma controller is also doing a read.
*/
-gpib_interface_t fluke_hybrid_interface = {
-name: "fluke_hybrid",
-attach : fluke_attach_holdoff_all,
-detach : fluke_detach,
-read : fluke_read,
-write : fluke_accel_write,
-command : fluke_command,
-take_control : fluke_take_control,
-go_to_standby : fluke_go_to_standby,
-request_system_control : fluke_request_system_control,
-interface_clear : fluke_interface_clear,
-remote_enable : fluke_remote_enable,
-enable_eos : fluke_enable_eos,
-disable_eos : fluke_disable_eos,
-parallel_poll : fluke_parallel_poll,
-parallel_poll_configure : fluke_parallel_poll_configure,
-parallel_poll_response : fluke_parallel_poll_response,
-line_status : fluke_line_status,
-update_status : fluke_update_status,
-primary_address : fluke_primary_address,
-secondary_address : fluke_secondary_address,
-serial_poll_response : fluke_serial_poll_response,
-serial_poll_status : fluke_serial_poll_status,
-t1_delay : fluke_t1_delay,
-return_to_local : fluke_return_to_local,
+static gpib_interface_t fluke_hybrid_interface = {
+ .name = "fluke_hybrid",
+ .attach = fluke_attach_holdoff_all,
+ .detach = fluke_detach,
+ .read = fluke_read,
+ .write = fluke_accel_write,
+ .command = fluke_command,
+ .take_control = fluke_take_control,
+ .go_to_standby = fluke_go_to_standby,
+ .request_system_control = fluke_request_system_control,
+ .interface_clear = fluke_interface_clear,
+ .remote_enable = fluke_remote_enable,
+ .enable_eos = fluke_enable_eos,
+ .disable_eos = fluke_disable_eos,
+ .parallel_poll = fluke_parallel_poll,
+ .parallel_poll_configure = fluke_parallel_poll_configure,
+ .parallel_poll_response = fluke_parallel_poll_response,
+ .line_status = fluke_line_status,
+ .update_status = fluke_update_status,
+ .primary_address = fluke_primary_address,
+ .secondary_address = fluke_secondary_address,
+ .serial_poll_response = fluke_serial_poll_response,
+ .serial_poll_status = fluke_serial_poll_status,
+ .t1_delay = fluke_t1_delay,
+ .return_to_local = fluke_return_to_local,
};
-gpib_interface_t fluke_interface = {
-name: "fluke",
-attach : fluke_attach_holdoff_end,
-detach : fluke_detach,
-read : fluke_accel_read,
-write : fluke_accel_write,
-command : fluke_command,
-take_control : fluke_take_control,
-go_to_standby : fluke_go_to_standby,
-request_system_control : fluke_request_system_control,
-interface_clear : fluke_interface_clear,
-remote_enable : fluke_remote_enable,
-enable_eos : fluke_enable_eos,
-disable_eos : fluke_disable_eos,
-parallel_poll : fluke_parallel_poll,
-parallel_poll_configure : fluke_parallel_poll_configure,
-parallel_poll_response : fluke_parallel_poll_response,
-line_status : fluke_line_status,
-update_status : fluke_update_status,
-primary_address : fluke_primary_address,
-secondary_address : fluke_secondary_address,
-serial_poll_response : fluke_serial_poll_response,
-serial_poll_status : fluke_serial_poll_status,
-t1_delay : fluke_t1_delay,
-return_to_local : fluke_return_to_local,
+static gpib_interface_t fluke_interface = {
+ .name = "fluke",
+ .attach = fluke_attach_holdoff_end,
+ .detach = fluke_detach,
+ .read = fluke_accel_read,
+ .write = fluke_accel_write,
+ .command = fluke_command,
+ .take_control = fluke_take_control,
+ .go_to_standby = fluke_go_to_standby,
+ .request_system_control = fluke_request_system_control,
+ .interface_clear = fluke_interface_clear,
+ .remote_enable = fluke_remote_enable,
+ .enable_eos = fluke_enable_eos,
+ .disable_eos = fluke_disable_eos,
+ .parallel_poll = fluke_parallel_poll,
+ .parallel_poll_configure = fluke_parallel_poll_configure,
+ .parallel_poll_response = fluke_parallel_poll_response,
+ .line_status = fluke_line_status,
+ .update_status = fluke_update_status,
+ .primary_address = fluke_primary_address,
+ .secondary_address = fluke_secondary_address,
+ .serial_poll_response = fluke_serial_poll_response,
+ .serial_poll_status = fluke_serial_poll_status,
+ .t1_delay = fluke_t1_delay,
+ .return_to_local = fluke_return_to_local,
};
irqreturn_t fluke_gpib_internal_interrupt(gpib_board_t *board)
@@ -1155,16 +1155,38 @@ static int __init fluke_init_module(void)
result = platform_driver_register(&fluke_gpib_platform_driver);
if (result) {
- pr_err("fluke_gpib: platform_driver_register failed!\n");
+ pr_err("fluke_gpib: platform_driver_register failed: error = %d\n", result);
return result;
}
- gpib_register_driver(&fluke_unaccel_interface, THIS_MODULE);
- gpib_register_driver(&fluke_hybrid_interface, THIS_MODULE);
- gpib_register_driver(&fluke_interface, THIS_MODULE);
+ result = gpib_register_driver(&fluke_unaccel_interface, THIS_MODULE);
+ if (result) {
+ pr_err("fluke_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_unaccel;
+ }
+
+ result = gpib_register_driver(&fluke_hybrid_interface, THIS_MODULE);
+ if (result) {
+ pr_err("fluke_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_hybrid;
+ }
+
+ result = gpib_register_driver(&fluke_interface, THIS_MODULE);
+ if (result) {
+ pr_err("fluke_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_interface;
+ }
- pr_info("fluke_gpib\n");
return 0;
+
+err_interface:
+ gpib_unregister_driver(&fluke_hybrid_interface);
+err_hybrid:
+ gpib_unregister_driver(&fluke_unaccel_interface);
+err_unaccel:
+ platform_driver_unregister(&fluke_gpib_platform_driver);
+
+ return result;
}
static void __exit fluke_exit_module(void)
diff --git a/drivers/staging/gpib/eastwood/fluke_gpib.h b/drivers/staging/gpib/eastwood/fluke_gpib.h
index 4e2144d45270..3e4348196b42 100644
--- a/drivers/staging/gpib/eastwood/fluke_gpib.h
+++ b/drivers/staging/gpib/eastwood/fluke_gpib.h
@@ -21,7 +21,7 @@ struct fluke_priv {
struct dma_chan *dma_channel;
u8 *dma_buffer;
int dma_buffer_size;
- void *write_transfer_counter;
+ void __iomem *write_transfer_counter;
};
// cb7210 specific registers and bits
diff --git a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c b/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
index 0662b20a45e7..f950e7cdd8f8 100644
--- a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
+++ b/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
@@ -1040,116 +1040,116 @@ static int fmh_gpib_fifo_read(gpib_board_t *board, uint8_t *buffer, size_t lengt
return retval;
}
-gpib_interface_t fmh_gpib_unaccel_interface = {
-name: "fmh_gpib_unaccel",
-attach : fmh_gpib_attach_holdoff_all,
-detach : fmh_gpib_detach,
-read : fmh_gpib_read,
-write : fmh_gpib_write,
-command : fmh_gpib_command,
-take_control : fmh_gpib_take_control,
-go_to_standby : fmh_gpib_go_to_standby,
-request_system_control : fmh_gpib_request_system_control,
-interface_clear : fmh_gpib_interface_clear,
-remote_enable : fmh_gpib_remote_enable,
-enable_eos : fmh_gpib_enable_eos,
-disable_eos : fmh_gpib_disable_eos,
-parallel_poll : fmh_gpib_parallel_poll,
-parallel_poll_configure : fmh_gpib_parallel_poll_configure,
-parallel_poll_response : fmh_gpib_parallel_poll_response,
-local_parallel_poll_mode : fmh_gpib_local_parallel_poll_mode,
-line_status : fmh_gpib_line_status,
-update_status : fmh_gpib_update_status,
-primary_address : fmh_gpib_primary_address,
-secondary_address : fmh_gpib_secondary_address,
-serial_poll_response2 : fmh_gpib_serial_poll_response2,
-serial_poll_status : fmh_gpib_serial_poll_status,
-t1_delay : fmh_gpib_t1_delay,
-return_to_local : fmh_gpib_return_to_local,
+static gpib_interface_t fmh_gpib_unaccel_interface = {
+ .name = "fmh_gpib_unaccel",
+ .attach = fmh_gpib_attach_holdoff_all,
+ .detach = fmh_gpib_detach,
+ .read = fmh_gpib_read,
+ .write = fmh_gpib_write,
+ .command = fmh_gpib_command,
+ .take_control = fmh_gpib_take_control,
+ .go_to_standby = fmh_gpib_go_to_standby,
+ .request_system_control = fmh_gpib_request_system_control,
+ .interface_clear = fmh_gpib_interface_clear,
+ .remote_enable = fmh_gpib_remote_enable,
+ .enable_eos = fmh_gpib_enable_eos,
+ .disable_eos = fmh_gpib_disable_eos,
+ .parallel_poll = fmh_gpib_parallel_poll,
+ .parallel_poll_configure = fmh_gpib_parallel_poll_configure,
+ .parallel_poll_response = fmh_gpib_parallel_poll_response,
+ .local_parallel_poll_mode = fmh_gpib_local_parallel_poll_mode,
+ .line_status = fmh_gpib_line_status,
+ .update_status = fmh_gpib_update_status,
+ .primary_address = fmh_gpib_primary_address,
+ .secondary_address = fmh_gpib_secondary_address,
+ .serial_poll_response2 = fmh_gpib_serial_poll_response2,
+ .serial_poll_status = fmh_gpib_serial_poll_status,
+ .t1_delay = fmh_gpib_t1_delay,
+ .return_to_local = fmh_gpib_return_to_local,
};
-gpib_interface_t fmh_gpib_interface = {
-name: "fmh_gpib",
-attach : fmh_gpib_attach_holdoff_end,
-detach : fmh_gpib_detach,
-read : fmh_gpib_accel_read,
-write : fmh_gpib_accel_write,
-command : fmh_gpib_command,
-take_control : fmh_gpib_take_control,
-go_to_standby : fmh_gpib_go_to_standby,
-request_system_control : fmh_gpib_request_system_control,
-interface_clear : fmh_gpib_interface_clear,
-remote_enable : fmh_gpib_remote_enable,
-enable_eos : fmh_gpib_enable_eos,
-disable_eos : fmh_gpib_disable_eos,
-parallel_poll : fmh_gpib_parallel_poll,
-parallel_poll_configure : fmh_gpib_parallel_poll_configure,
-parallel_poll_response : fmh_gpib_parallel_poll_response,
-local_parallel_poll_mode : fmh_gpib_local_parallel_poll_mode,
-line_status : fmh_gpib_line_status,
-update_status : fmh_gpib_update_status,
-primary_address : fmh_gpib_primary_address,
-secondary_address : fmh_gpib_secondary_address,
-serial_poll_response2 : fmh_gpib_serial_poll_response2,
-serial_poll_status : fmh_gpib_serial_poll_status,
-t1_delay : fmh_gpib_t1_delay,
-return_to_local : fmh_gpib_return_to_local,
+static gpib_interface_t fmh_gpib_interface = {
+ .name = "fmh_gpib",
+ .attach = fmh_gpib_attach_holdoff_end,
+ .detach = fmh_gpib_detach,
+ .read = fmh_gpib_accel_read,
+ .write = fmh_gpib_accel_write,
+ .command = fmh_gpib_command,
+ .take_control = fmh_gpib_take_control,
+ .go_to_standby = fmh_gpib_go_to_standby,
+ .request_system_control = fmh_gpib_request_system_control,
+ .interface_clear = fmh_gpib_interface_clear,
+ .remote_enable = fmh_gpib_remote_enable,
+ .enable_eos = fmh_gpib_enable_eos,
+ .disable_eos = fmh_gpib_disable_eos,
+ .parallel_poll = fmh_gpib_parallel_poll,
+ .parallel_poll_configure = fmh_gpib_parallel_poll_configure,
+ .parallel_poll_response = fmh_gpib_parallel_poll_response,
+ .local_parallel_poll_mode = fmh_gpib_local_parallel_poll_mode,
+ .line_status = fmh_gpib_line_status,
+ .update_status = fmh_gpib_update_status,
+ .primary_address = fmh_gpib_primary_address,
+ .secondary_address = fmh_gpib_secondary_address,
+ .serial_poll_response2 = fmh_gpib_serial_poll_response2,
+ .serial_poll_status = fmh_gpib_serial_poll_status,
+ .t1_delay = fmh_gpib_t1_delay,
+ .return_to_local = fmh_gpib_return_to_local,
};
-gpib_interface_t fmh_gpib_pci_interface = {
-name: "fmh_gpib_pci",
-attach : fmh_gpib_pci_attach_holdoff_end,
-detach : fmh_gpib_pci_detach,
-read : fmh_gpib_fifo_read,
-write : fmh_gpib_fifo_write,
-command : fmh_gpib_command,
-take_control : fmh_gpib_take_control,
-go_to_standby : fmh_gpib_go_to_standby,
-request_system_control : fmh_gpib_request_system_control,
-interface_clear : fmh_gpib_interface_clear,
-remote_enable : fmh_gpib_remote_enable,
-enable_eos : fmh_gpib_enable_eos,
-disable_eos : fmh_gpib_disable_eos,
-parallel_poll : fmh_gpib_parallel_poll,
-parallel_poll_configure : fmh_gpib_parallel_poll_configure,
-parallel_poll_response : fmh_gpib_parallel_poll_response,
-local_parallel_poll_mode : fmh_gpib_local_parallel_poll_mode,
-line_status : fmh_gpib_line_status,
-update_status : fmh_gpib_update_status,
-primary_address : fmh_gpib_primary_address,
-secondary_address : fmh_gpib_secondary_address,
-serial_poll_response2 : fmh_gpib_serial_poll_response2,
-serial_poll_status : fmh_gpib_serial_poll_status,
-t1_delay : fmh_gpib_t1_delay,
-return_to_local : fmh_gpib_return_to_local,
+static gpib_interface_t fmh_gpib_pci_interface = {
+ .name = "fmh_gpib_pci",
+ .attach = fmh_gpib_pci_attach_holdoff_end,
+ .detach = fmh_gpib_pci_detach,
+ .read = fmh_gpib_fifo_read,
+ .write = fmh_gpib_fifo_write,
+ .command = fmh_gpib_command,
+ .take_control = fmh_gpib_take_control,
+ .go_to_standby = fmh_gpib_go_to_standby,
+ .request_system_control = fmh_gpib_request_system_control,
+ .interface_clear = fmh_gpib_interface_clear,
+ .remote_enable = fmh_gpib_remote_enable,
+ .enable_eos = fmh_gpib_enable_eos,
+ .disable_eos = fmh_gpib_disable_eos,
+ .parallel_poll = fmh_gpib_parallel_poll,
+ .parallel_poll_configure = fmh_gpib_parallel_poll_configure,
+ .parallel_poll_response = fmh_gpib_parallel_poll_response,
+ .local_parallel_poll_mode = fmh_gpib_local_parallel_poll_mode,
+ .line_status = fmh_gpib_line_status,
+ .update_status = fmh_gpib_update_status,
+ .primary_address = fmh_gpib_primary_address,
+ .secondary_address = fmh_gpib_secondary_address,
+ .serial_poll_response2 = fmh_gpib_serial_poll_response2,
+ .serial_poll_status = fmh_gpib_serial_poll_status,
+ .t1_delay = fmh_gpib_t1_delay,
+ .return_to_local = fmh_gpib_return_to_local,
};
-gpib_interface_t fmh_gpib_pci_unaccel_interface = {
-name: "fmh_gpib_pci_unaccel",
-attach : fmh_gpib_pci_attach_holdoff_all,
-detach : fmh_gpib_pci_detach,
-read : fmh_gpib_read,
-write : fmh_gpib_write,
-command : fmh_gpib_command,
-take_control : fmh_gpib_take_control,
-go_to_standby : fmh_gpib_go_to_standby,
-request_system_control : fmh_gpib_request_system_control,
-interface_clear : fmh_gpib_interface_clear,
-remote_enable : fmh_gpib_remote_enable,
-enable_eos : fmh_gpib_enable_eos,
-disable_eos : fmh_gpib_disable_eos,
-parallel_poll : fmh_gpib_parallel_poll,
-parallel_poll_configure : fmh_gpib_parallel_poll_configure,
-parallel_poll_response : fmh_gpib_parallel_poll_response,
-local_parallel_poll_mode : fmh_gpib_local_parallel_poll_mode,
-line_status : fmh_gpib_line_status,
-update_status : fmh_gpib_update_status,
-primary_address : fmh_gpib_primary_address,
-secondary_address : fmh_gpib_secondary_address,
-serial_poll_response2 : fmh_gpib_serial_poll_response2,
-serial_poll_status : fmh_gpib_serial_poll_status,
-t1_delay : fmh_gpib_t1_delay,
-return_to_local : fmh_gpib_return_to_local,
+static gpib_interface_t fmh_gpib_pci_unaccel_interface = {
+ .name = "fmh_gpib_pci_unaccel",
+ .attach = fmh_gpib_pci_attach_holdoff_all,
+ .detach = fmh_gpib_pci_detach,
+ .read = fmh_gpib_read,
+ .write = fmh_gpib_write,
+ .command = fmh_gpib_command,
+ .take_control = fmh_gpib_take_control,
+ .go_to_standby = fmh_gpib_go_to_standby,
+ .request_system_control = fmh_gpib_request_system_control,
+ .interface_clear = fmh_gpib_interface_clear,
+ .remote_enable = fmh_gpib_remote_enable,
+ .enable_eos = fmh_gpib_enable_eos,
+ .disable_eos = fmh_gpib_disable_eos,
+ .parallel_poll = fmh_gpib_parallel_poll,
+ .parallel_poll_configure = fmh_gpib_parallel_poll_configure,
+ .parallel_poll_response = fmh_gpib_parallel_poll_response,
+ .local_parallel_poll_mode = fmh_gpib_local_parallel_poll_mode,
+ .line_status = fmh_gpib_line_status,
+ .update_status = fmh_gpib_update_status,
+ .primary_address = fmh_gpib_primary_address,
+ .secondary_address = fmh_gpib_secondary_address,
+ .serial_poll_response2 = fmh_gpib_serial_poll_response2,
+ .serial_poll_status = fmh_gpib_serial_poll_status,
+ .t1_delay = fmh_gpib_t1_delay,
+ .return_to_local = fmh_gpib_return_to_local,
};
irqreturn_t fmh_gpib_internal_interrupt(gpib_board_t *board)
@@ -1692,23 +1692,54 @@ static int __init fmh_gpib_init_module(void)
result = platform_driver_register(&fmh_gpib_platform_driver);
if (result) {
- pr_err("fmh_gpib: platform_driver_register failed!\n");
+ pr_err("fmh_gpib: platform_driver_register failed: error = %d\n", result);
return result;
}
result = pci_register_driver(&fmh_gpib_pci_driver);
if (result) {
- pr_err("fmh_gpib: pci_driver_register failed!\n");
- return result;
+ pr_err("fmh_gpib: pci_register_driver failed: error = %d\n", result);
+ goto err_pci_driver;
+ }
+
+ result = gpib_register_driver(&fmh_gpib_unaccel_interface, THIS_MODULE);
+ if (result) {
+ pr_err("fmh_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_unaccel;
+ }
+
+ result = gpib_register_driver(&fmh_gpib_interface, THIS_MODULE);
+ if (result) {
+ pr_err("fmh_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_interface;
+ }
+
+ result = gpib_register_driver(&fmh_gpib_pci_unaccel_interface, THIS_MODULE);
+ if (result) {
+ pr_err("fmh_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_pci_unaccel;
}
- gpib_register_driver(&fmh_gpib_unaccel_interface, THIS_MODULE);
- gpib_register_driver(&fmh_gpib_interface, THIS_MODULE);
- gpib_register_driver(&fmh_gpib_pci_unaccel_interface, THIS_MODULE);
- gpib_register_driver(&fmh_gpib_pci_interface, THIS_MODULE);
+ result = gpib_register_driver(&fmh_gpib_pci_interface, THIS_MODULE);
+ if (result) {
+ pr_err("fmh_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_pci;
+ }
- pr_info("fmh_gpib\n");
return 0;
+
+err_pci:
+ gpib_unregister_driver(&fmh_gpib_pci_unaccel_interface);
+err_pci_unaccel:
+ gpib_unregister_driver(&fmh_gpib_interface);
+err_interface:
+ gpib_unregister_driver(&fmh_gpib_unaccel_interface);
+err_unaccel:
+ pci_unregister_driver(&fmh_gpib_pci_driver);
+err_pci_driver:
+ platform_driver_unregister(&fmh_gpib_platform_driver);
+
+ return result;
}
static void __exit fmh_gpib_exit_module(void)
diff --git a/drivers/staging/gpib/fmh_gpib/fmh_gpib.h b/drivers/staging/gpib/fmh_gpib/fmh_gpib.h
index 60b1bd6d3c15..de6fd2164414 100644
--- a/drivers/staging/gpib/fmh_gpib/fmh_gpib.h
+++ b/drivers/staging/gpib/fmh_gpib/fmh_gpib.h
@@ -33,7 +33,7 @@ struct fmh_priv {
u8 *dma_buffer;
int dma_buffer_size;
int dma_burst_length;
- void *fifo_base;
+ void __iomem *fifo_base;
unsigned supports_fifo_interrupts : 1;
};
diff --git a/drivers/staging/gpib/gpio/gpib_bitbang.c b/drivers/staging/gpib/gpio/gpib_bitbang.c
index 23550502e012..828c99ea613f 100644
--- a/drivers/staging/gpib/gpio/gpib_bitbang.c
+++ b/drivers/staging/gpib/gpio/gpib_bitbang.c
@@ -147,7 +147,7 @@ DEFINE_LED_TRIGGER(ledtrig_gpib);
led_trigger_event(ledtrig_gpib, LED_OFF); } \
while (0)
-struct gpio_desc *all_descriptors[GPIB_PINS + SN7516X_PINS];
+static struct gpio_desc *all_descriptors[GPIB_PINS + SN7516X_PINS];
#define D01 all_descriptors[0]
#define D02 all_descriptors[1]
@@ -175,7 +175,7 @@ struct gpio_desc *all_descriptors[GPIB_PINS + SN7516X_PINS];
/* YOGA dapter uses a global enable for the buffer chips, re-using the TE pin */
#define YOGA_ENABLE TE
-int gpios_vector[] = {
+static int gpios_vector[] = {
D01_pin_nr,
D02_pin_nr,
D03_pin_nr,
@@ -265,7 +265,7 @@ static struct gpiod_lookup_table gpib_gpio_table_0 = {
static struct gpiod_lookup_table *lookup_tables[] = {
&gpib_gpio_table_0,
&gpib_gpio_table_1,
- 0
+ NULL
};
/* struct which defines private_data for gpio driver */
@@ -1119,7 +1119,7 @@ static void release_gpios(void)
for (j = 0 ; j < NUM_PINS ; j++) {
if (all_descriptors[j]) {
gpiod_put(all_descriptors[j]);
- all_descriptors[j] = 0;
+ all_descriptors[j] = NULL;
}
}
}
@@ -1312,36 +1312,41 @@ bb_attach_out:
return retval;
}
-gpib_interface_t bb_interface = {
-name: NAME,
-attach : bb_attach,
-detach : bb_detach,
-read : bb_read,
-write : bb_write,
-command : bb_command,
-take_control : bb_take_control,
-go_to_standby : bb_go_to_standby,
-request_system_control : bb_request_system_control,
-interface_clear : bb_interface_clear,
-remote_enable : bb_remote_enable,
-enable_eos : bb_enable_eos,
-disable_eos : bb_disable_eos,
-parallel_poll : bb_parallel_poll,
-parallel_poll_configure : bb_parallel_poll_configure,
-parallel_poll_response : bb_parallel_poll_response,
-line_status : bb_line_status,
-update_status : bb_update_status,
-primary_address : bb_primary_address,
-secondary_address : bb_secondary_address,
-serial_poll_response : bb_serial_poll_response,
-serial_poll_status : bb_serial_poll_status,
-t1_delay : bb_t1_delay,
-return_to_local : bb_return_to_local,
+static gpib_interface_t bb_interface = {
+ .name = NAME,
+ .attach = bb_attach,
+ .detach = bb_detach,
+ .read = bb_read,
+ .write = bb_write,
+ .command = bb_command,
+ .take_control = bb_take_control,
+ .go_to_standby = bb_go_to_standby,
+ .request_system_control = bb_request_system_control,
+ .interface_clear = bb_interface_clear,
+ .remote_enable = bb_remote_enable,
+ .enable_eos = bb_enable_eos,
+ .disable_eos = bb_disable_eos,
+ .parallel_poll = bb_parallel_poll,
+ .parallel_poll_configure = bb_parallel_poll_configure,
+ .parallel_poll_response = bb_parallel_poll_response,
+ .line_status = bb_line_status,
+ .update_status = bb_update_status,
+ .primary_address = bb_primary_address,
+ .secondary_address = bb_secondary_address,
+ .serial_poll_response = bb_serial_poll_response,
+ .serial_poll_status = bb_serial_poll_status,
+ .t1_delay = bb_t1_delay,
+ .return_to_local = bb_return_to_local,
};
static int __init bb_init_module(void)
{
- gpib_register_driver(&bb_interface, THIS_MODULE);
+ int result = gpib_register_driver(&bb_interface, THIS_MODULE);
+
+ if (result) {
+ pr_err("gpib_bitbang: gpib_register_driver failed: error = %d\n", result);
+ return result;
+ }
dbg_printk(0, "module loaded with pin map \"%s\"%s\n",
pin_map, (sn7516x_used) ? " and SN7516x driver support" : "");
diff --git a/drivers/staging/gpib/hp_82335/hp82335.c b/drivers/staging/gpib/hp_82335/hp82335.c
index ea78143c7ab6..700d1ba029d2 100644
--- a/drivers/staging/gpib/hp_82335/hp82335.c
+++ b/drivers/staging/gpib/hp_82335/hp82335.c
@@ -173,32 +173,32 @@ void hp82335_return_to_local(gpib_board_t *board)
tms9914_return_to_local(board, &priv->tms9914_priv);
}
-gpib_interface_t hp82335_interface = {
-name: "hp82335",
-attach : hp82335_attach,
-detach : hp82335_detach,
-read : hp82335_read,
-write : hp82335_write,
-command : hp82335_command,
-request_system_control : hp82335_request_system_control,
-take_control : hp82335_take_control,
-go_to_standby : hp82335_go_to_standby,
-interface_clear : hp82335_interface_clear,
-remote_enable : hp82335_remote_enable,
-enable_eos : hp82335_enable_eos,
-disable_eos : hp82335_disable_eos,
-parallel_poll : hp82335_parallel_poll,
-parallel_poll_configure : hp82335_parallel_poll_configure,
-parallel_poll_response : hp82335_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : hp82335_line_status,
-update_status : hp82335_update_status,
-primary_address : hp82335_primary_address,
-secondary_address : hp82335_secondary_address,
-serial_poll_response : hp82335_serial_poll_response,
-serial_poll_status : hp82335_serial_poll_status,
-t1_delay : hp82335_t1_delay,
-return_to_local : hp82335_return_to_local,
+static gpib_interface_t hp82335_interface = {
+ .name = "hp82335",
+ .attach = hp82335_attach,
+ .detach = hp82335_detach,
+ .read = hp82335_read,
+ .write = hp82335_write,
+ .command = hp82335_command,
+ .request_system_control = hp82335_request_system_control,
+ .take_control = hp82335_take_control,
+ .go_to_standby = hp82335_go_to_standby,
+ .interface_clear = hp82335_interface_clear,
+ .remote_enable = hp82335_remote_enable,
+ .enable_eos = hp82335_enable_eos,
+ .disable_eos = hp82335_disable_eos,
+ .parallel_poll = hp82335_parallel_poll,
+ .parallel_poll_configure = hp82335_parallel_poll_configure,
+ .parallel_poll_response = hp82335_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = hp82335_line_status,
+ .update_status = hp82335_update_status,
+ .primary_address = hp82335_primary_address,
+ .secondary_address = hp82335_secondary_address,
+ .serial_poll_response = hp82335_serial_poll_response,
+ .serial_poll_status = hp82335_serial_poll_status,
+ .t1_delay = hp82335_t1_delay,
+ .return_to_local = hp82335_return_to_local,
};
int hp82335_allocate_private(gpib_board_t *board)
@@ -326,7 +326,13 @@ void hp82335_detach(gpib_board_t *board)
static int __init hp82335_init_module(void)
{
- gpib_register_driver(&hp82335_interface, THIS_MODULE);
+ int result = gpib_register_driver(&hp82335_interface, THIS_MODULE);
+
+ if (result) {
+ pr_err("hp82335: gpib_register_driver failed: error = %d\n", result);
+ return result;
+ }
+
return 0;
}
diff --git a/drivers/staging/gpib/hp_82335/hp82335.h b/drivers/staging/gpib/hp_82335/hp82335.h
index 5e5297af731a..4b185d7c5188 100644
--- a/drivers/staging/gpib/hp_82335/hp82335.h
+++ b/drivers/staging/gpib/hp_82335/hp82335.h
@@ -17,9 +17,6 @@ struct hp82335_priv {
unsigned long raw_iobase;
};
-// interfaces
-extern gpib_interface_t hp82335_interface;
-
// interface functions
int hp82335_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read);
int hp82335_write(gpib_board_t *board, uint8_t *buffer, size_t length,
diff --git a/drivers/staging/gpib/hp_82341/hp_82341.c b/drivers/staging/gpib/hp_82341/hp_82341.c
index 71d481e88bd9..0ddae295912f 100644
--- a/drivers/staging/gpib/hp_82341/hp_82341.c
+++ b/drivers/staging/gpib/hp_82341/hp_82341.c
@@ -402,59 +402,59 @@ void hp_82341_return_to_local(gpib_board_t *board)
tms9914_return_to_local(board, &priv->tms9914_priv);
}
-gpib_interface_t hp_82341_unaccel_interface = {
-name: "hp_82341_unaccel",
-attach : hp_82341_attach,
-detach : hp_82341_detach,
-read : hp_82341_read,
-write : hp_82341_write,
-command : hp_82341_command,
-request_system_control : hp_82341_request_system_control,
-take_control : hp_82341_take_control,
-go_to_standby : hp_82341_go_to_standby,
-interface_clear : hp_82341_interface_clear,
-remote_enable : hp_82341_remote_enable,
-enable_eos : hp_82341_enable_eos,
-disable_eos : hp_82341_disable_eos,
-parallel_poll : hp_82341_parallel_poll,
-parallel_poll_configure : hp_82341_parallel_poll_configure,
-parallel_poll_response : hp_82341_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : hp_82341_line_status,
-update_status : hp_82341_update_status,
-primary_address : hp_82341_primary_address,
-secondary_address : hp_82341_secondary_address,
-serial_poll_response : hp_82341_serial_poll_response,
-serial_poll_status : hp_82341_serial_poll_status,
-t1_delay : hp_82341_t1_delay,
-return_to_local : hp_82341_return_to_local,
+static gpib_interface_t hp_82341_unaccel_interface = {
+ .name = "hp_82341_unaccel",
+ .attach = hp_82341_attach,
+ .detach = hp_82341_detach,
+ .read = hp_82341_read,
+ .write = hp_82341_write,
+ .command = hp_82341_command,
+ .request_system_control = hp_82341_request_system_control,
+ .take_control = hp_82341_take_control,
+ .go_to_standby = hp_82341_go_to_standby,
+ .interface_clear = hp_82341_interface_clear,
+ .remote_enable = hp_82341_remote_enable,
+ .enable_eos = hp_82341_enable_eos,
+ .disable_eos = hp_82341_disable_eos,
+ .parallel_poll = hp_82341_parallel_poll,
+ .parallel_poll_configure = hp_82341_parallel_poll_configure,
+ .parallel_poll_response = hp_82341_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = hp_82341_line_status,
+ .update_status = hp_82341_update_status,
+ .primary_address = hp_82341_primary_address,
+ .secondary_address = hp_82341_secondary_address,
+ .serial_poll_response = hp_82341_serial_poll_response,
+ .serial_poll_status = hp_82341_serial_poll_status,
+ .t1_delay = hp_82341_t1_delay,
+ .return_to_local = hp_82341_return_to_local,
};
-gpib_interface_t hp_82341_interface = {
-name: "hp_82341",
-attach : hp_82341_attach,
-detach : hp_82341_detach,
-read : hp_82341_accel_read,
-write : hp_82341_accel_write,
-command : hp_82341_command,
-request_system_control : hp_82341_request_system_control,
-take_control : hp_82341_take_control,
-go_to_standby : hp_82341_go_to_standby,
-interface_clear : hp_82341_interface_clear,
-remote_enable : hp_82341_remote_enable,
-enable_eos : hp_82341_enable_eos,
-disable_eos : hp_82341_disable_eos,
-parallel_poll : hp_82341_parallel_poll,
-parallel_poll_configure : hp_82341_parallel_poll_configure,
-parallel_poll_response : hp_82341_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : hp_82341_line_status,
-update_status : hp_82341_update_status,
-primary_address : hp_82341_primary_address,
-secondary_address : hp_82341_secondary_address,
-serial_poll_response : hp_82341_serial_poll_response,
-t1_delay : hp_82341_t1_delay,
-return_to_local : hp_82341_return_to_local,
+static gpib_interface_t hp_82341_interface = {
+ .name = "hp_82341",
+ .attach = hp_82341_attach,
+ .detach = hp_82341_detach,
+ .read = hp_82341_accel_read,
+ .write = hp_82341_accel_write,
+ .command = hp_82341_command,
+ .request_system_control = hp_82341_request_system_control,
+ .take_control = hp_82341_take_control,
+ .go_to_standby = hp_82341_go_to_standby,
+ .interface_clear = hp_82341_interface_clear,
+ .remote_enable = hp_82341_remote_enable,
+ .enable_eos = hp_82341_enable_eos,
+ .disable_eos = hp_82341_disable_eos,
+ .parallel_poll = hp_82341_parallel_poll,
+ .parallel_poll_configure = hp_82341_parallel_poll_configure,
+ .parallel_poll_response = hp_82341_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = hp_82341_line_status,
+ .update_status = hp_82341_update_status,
+ .primary_address = hp_82341_primary_address,
+ .secondary_address = hp_82341_secondary_address,
+ .serial_poll_response = hp_82341_serial_poll_response,
+ .t1_delay = hp_82341_t1_delay,
+ .return_to_local = hp_82341_return_to_local,
};
int hp_82341_allocate_private(gpib_board_t *board)
@@ -807,8 +807,21 @@ MODULE_DEVICE_TABLE(pnp, hp_82341_pnp_table);
static int __init hp_82341_init_module(void)
{
- gpib_register_driver(&hp_82341_unaccel_interface, THIS_MODULE);
- gpib_register_driver(&hp_82341_interface, THIS_MODULE);
+ int ret;
+
+ ret = gpib_register_driver(&hp_82341_unaccel_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("hp_82341: gpib_register_driver failed: error = %d\n", ret);
+ return ret;
+ }
+
+ ret = gpib_register_driver(&hp_82341_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("hp_82341: gpib_register_driver failed: error = %d\n", ret);
+ gpib_unregister_driver(&hp_82341_unaccel_interface);
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/staging/gpib/hp_82341/hp_82341.h b/drivers/staging/gpib/hp_82341/hp_82341.h
index 7c391860b399..0065ebd9747c 100644
--- a/drivers/staging/gpib/hp_82341/hp_82341.h
+++ b/drivers/staging/gpib/hp_82341/hp_82341.h
@@ -26,8 +26,6 @@ struct hp_82341_priv {
enum hp_82341_hardware_version hw_version;
};
-// interfaces
-extern gpib_interface_t hp_82341_interface;
// interface functions
int hp_82341_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
diff --git a/drivers/staging/gpib/include/amcc5920.h b/drivers/staging/gpib/include/amcc5920.h
index 766b3799223f..7a88bd282feb 100644
--- a/drivers/staging/gpib/include/amcc5920.h
+++ b/drivers/staging/gpib/include/amcc5920.h
@@ -22,7 +22,7 @@ static const int bits_per_region = 8;
static inline uint32_t amcc_wait_state_bits(unsigned int region, unsigned int num_wait_states)
{
- return (num_wait_states & 0x7) << (-region * bits_per_region);
+ return (num_wait_states & 0x7) << (--region * bits_per_region);
};
enum amcc_prefetch_bits {
diff --git a/drivers/staging/gpib/include/gpibP.h b/drivers/staging/gpib/include/gpibP.h
index b97da577ba33..d35fdd391f7e 100644
--- a/drivers/staging/gpib/include/gpibP.h
+++ b/drivers/staging/gpib/include/gpibP.h
@@ -18,7 +18,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
-void gpib_register_driver(gpib_interface_t *interface, struct module *mod);
+int gpib_register_driver(gpib_interface_t *interface, struct module *mod);
void gpib_unregister_driver(gpib_interface_t *interface);
struct pci_dev *gpib_pci_get_device(const gpib_board_config_t *config, unsigned int vendor_id,
unsigned int device_id, struct pci_dev *from);
diff --git a/drivers/staging/gpib/ines/ines.h b/drivers/staging/gpib/ines/ines.h
index eed038fd3f28..3918737fa21a 100644
--- a/drivers/staging/gpib/ines/ines.h
+++ b/drivers/staging/gpib/ines/ines.h
@@ -35,13 +35,6 @@ struct ines_priv {
u8 extend_mode_bits;
};
-// interfaces
-extern gpib_interface_t ines_pci_interface;
-extern gpib_interface_t ines_pci_accel_interface;
-extern gpib_interface_t ines_pcmcia_interface;
-extern gpib_interface_t ines_pcmcia_accel_interface;
-extern gpib_interface_t ines_pcmcia_unaccel_interface;
-
// interface functions
int ines_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read);
int ines_write(gpib_board_t *board, uint8_t *buffer, size_t length,
diff --git a/drivers/staging/gpib/ines/ines_gpib.c b/drivers/staging/gpib/ines/ines_gpib.c
index e18455ba842f..22a05a287bce 100644
--- a/drivers/staging/gpib/ines/ines_gpib.c
+++ b/drivers/staging/gpib/ines/ines_gpib.c
@@ -357,38 +357,38 @@ struct ines_pci_id {
enum ines_pci_chip pci_chip_type;
};
-struct ines_pci_id pci_ids[] = {
- {vendor_id: PCI_VENDOR_ID_PLX,
- device_id : PCI_DEVICE_ID_PLX_9050,
- subsystem_vendor_id : PCI_VENDOR_ID_PLX,
- subsystem_device_id : PCI_SUBDEVICE_ID_INES_GPIB,
- gpib_region : 2,
- io_offset : 1,
- pci_chip_type : PCI_CHIP_PLX9050,
+static struct ines_pci_id pci_ids[] = {
+ {.vendor_id = PCI_VENDOR_ID_PLX,
+ .device_id = PCI_DEVICE_ID_PLX_9050,
+ .subsystem_vendor_id = PCI_VENDOR_ID_PLX,
+ .subsystem_device_id = PCI_SUBDEVICE_ID_INES_GPIB,
+ .gpib_region = 2,
+ .io_offset = 1,
+ .pci_chip_type = PCI_CHIP_PLX9050,
},
- {vendor_id: PCI_VENDOR_ID_AMCC,
- device_id : PCI_DEVICE_ID_INES_GPIB_AMCC,
- subsystem_vendor_id : PCI_VENDOR_ID_AMCC,
- subsystem_device_id : PCI_SUBDEVICE_ID_INES_GPIB,
- gpib_region : 1,
- io_offset : 1,
- pci_chip_type : PCI_CHIP_AMCC5920,
+ {.vendor_id = PCI_VENDOR_ID_AMCC,
+ .device_id = PCI_DEVICE_ID_INES_GPIB_AMCC,
+ .subsystem_vendor_id = PCI_VENDOR_ID_AMCC,
+ .subsystem_device_id = PCI_SUBDEVICE_ID_INES_GPIB,
+ .gpib_region = 1,
+ .io_offset = 1,
+ .pci_chip_type = PCI_CHIP_AMCC5920,
},
- {vendor_id: PCI_VENDOR_ID_INES_QUICKLOGIC,
- device_id : PCI_DEVICE_ID_INES_GPIB_QL5030,
- subsystem_vendor_id : PCI_VENDOR_ID_INES_QUICKLOGIC,
- subsystem_device_id : PCI_DEVICE_ID_INES_GPIB_QL5030,
- gpib_region : 1,
- io_offset : 1,
- pci_chip_type : PCI_CHIP_QUICKLOGIC5030,
+ {.vendor_id = PCI_VENDOR_ID_INES_QUICKLOGIC,
+ .device_id = PCI_DEVICE_ID_INES_GPIB_QL5030,
+ .subsystem_vendor_id = PCI_VENDOR_ID_INES_QUICKLOGIC,
+ .subsystem_device_id = PCI_DEVICE_ID_INES_GPIB_QL5030,
+ .gpib_region = 1,
+ .io_offset = 1,
+ .pci_chip_type = PCI_CHIP_QUICKLOGIC5030,
},
- {vendor_id: PCI_VENDOR_ID_QUANCOM,
- device_id : PCI_DEVICE_ID_QUANCOM_GPIB,
- subsystem_vendor_id : -1,
- subsystem_device_id : -1,
- gpib_region : 0,
- io_offset : 4,
- pci_chip_type : PCI_CHIP_QUANCOM,
+ {.vendor_id = PCI_VENDOR_ID_QUANCOM,
+ .device_id = PCI_DEVICE_ID_QUANCOM_GPIB,
+ .subsystem_vendor_id = -1,
+ .subsystem_device_id = -1,
+ .gpib_region = 0,
+ .io_offset = 4,
+ .pci_chip_type = PCI_CHIP_QUANCOM,
},
};
@@ -540,116 +540,116 @@ void ines_return_to_local(gpib_board_t *board)
nec7210_return_to_local(board, &priv->nec7210_priv);
}
-gpib_interface_t ines_pci_unaccel_interface = {
-name: "ines_pci_unaccel",
-attach : ines_pci_attach,
-detach : ines_pci_detach,
-read : ines_read,
-write : ines_write,
-command : ines_command,
-take_control : ines_take_control,
-go_to_standby : ines_go_to_standby,
-request_system_control : ines_request_system_control,
-interface_clear : ines_interface_clear,
-remote_enable : ines_remote_enable,
-enable_eos : ines_enable_eos,
-disable_eos : ines_disable_eos,
-parallel_poll : ines_parallel_poll,
-parallel_poll_configure : ines_parallel_poll_configure,
-parallel_poll_response : ines_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : ines_line_status,
-update_status : ines_update_status,
-primary_address : ines_primary_address,
-secondary_address : ines_secondary_address,
-serial_poll_response : ines_serial_poll_response,
-serial_poll_status : ines_serial_poll_status,
-t1_delay : ines_t1_delay,
-return_to_local : ines_return_to_local,
+static gpib_interface_t ines_pci_unaccel_interface = {
+ .name = "ines_pci_unaccel",
+ .attach = ines_pci_attach,
+ .detach = ines_pci_detach,
+ .read = ines_read,
+ .write = ines_write,
+ .command = ines_command,
+ .take_control = ines_take_control,
+ .go_to_standby = ines_go_to_standby,
+ .request_system_control = ines_request_system_control,
+ .interface_clear = ines_interface_clear,
+ .remote_enable = ines_remote_enable,
+ .enable_eos = ines_enable_eos,
+ .disable_eos = ines_disable_eos,
+ .parallel_poll = ines_parallel_poll,
+ .parallel_poll_configure = ines_parallel_poll_configure,
+ .parallel_poll_response = ines_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = ines_line_status,
+ .update_status = ines_update_status,
+ .primary_address = ines_primary_address,
+ .secondary_address = ines_secondary_address,
+ .serial_poll_response = ines_serial_poll_response,
+ .serial_poll_status = ines_serial_poll_status,
+ .t1_delay = ines_t1_delay,
+ .return_to_local = ines_return_to_local,
};
-gpib_interface_t ines_pci_interface = {
-name: "ines_pci",
-attach : ines_pci_accel_attach,
-detach : ines_pci_detach,
-read : ines_accel_read,
-write : ines_accel_write,
-command : ines_command,
-take_control : ines_take_control,
-go_to_standby : ines_go_to_standby,
-request_system_control : ines_request_system_control,
-interface_clear : ines_interface_clear,
-remote_enable : ines_remote_enable,
-enable_eos : ines_enable_eos,
-disable_eos : ines_disable_eos,
-parallel_poll : ines_parallel_poll,
-parallel_poll_configure : ines_parallel_poll_configure,
-parallel_poll_response : ines_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : ines_line_status,
-update_status : ines_update_status,
-primary_address : ines_primary_address,
-secondary_address : ines_secondary_address,
-serial_poll_response : ines_serial_poll_response,
-serial_poll_status : ines_serial_poll_status,
-t1_delay : ines_t1_delay,
-return_to_local : ines_return_to_local,
+static gpib_interface_t ines_pci_interface = {
+ .name = "ines_pci",
+ .attach = ines_pci_accel_attach,
+ .detach = ines_pci_detach,
+ .read = ines_accel_read,
+ .write = ines_accel_write,
+ .command = ines_command,
+ .take_control = ines_take_control,
+ .go_to_standby = ines_go_to_standby,
+ .request_system_control = ines_request_system_control,
+ .interface_clear = ines_interface_clear,
+ .remote_enable = ines_remote_enable,
+ .enable_eos = ines_enable_eos,
+ .disable_eos = ines_disable_eos,
+ .parallel_poll = ines_parallel_poll,
+ .parallel_poll_configure = ines_parallel_poll_configure,
+ .parallel_poll_response = ines_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = ines_line_status,
+ .update_status = ines_update_status,
+ .primary_address = ines_primary_address,
+ .secondary_address = ines_secondary_address,
+ .serial_poll_response = ines_serial_poll_response,
+ .serial_poll_status = ines_serial_poll_status,
+ .t1_delay = ines_t1_delay,
+ .return_to_local = ines_return_to_local,
};
-gpib_interface_t ines_pci_accel_interface = {
-name: "ines_pci_accel",
-attach : ines_pci_accel_attach,
-detach : ines_pci_detach,
-read : ines_accel_read,
-write : ines_accel_write,
-command : ines_command,
-take_control : ines_take_control,
-go_to_standby : ines_go_to_standby,
-request_system_control : ines_request_system_control,
-interface_clear : ines_interface_clear,
-remote_enable : ines_remote_enable,
-enable_eos : ines_enable_eos,
-disable_eos : ines_disable_eos,
-parallel_poll : ines_parallel_poll,
-parallel_poll_configure : ines_parallel_poll_configure,
-parallel_poll_response : ines_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : ines_line_status,
-update_status : ines_update_status,
-primary_address : ines_primary_address,
-secondary_address : ines_secondary_address,
-serial_poll_response : ines_serial_poll_response,
-serial_poll_status : ines_serial_poll_status,
-t1_delay : ines_t1_delay,
-return_to_local : ines_return_to_local,
+static gpib_interface_t ines_pci_accel_interface = {
+ .name = "ines_pci_accel",
+ .attach = ines_pci_accel_attach,
+ .detach = ines_pci_detach,
+ .read = ines_accel_read,
+ .write = ines_accel_write,
+ .command = ines_command,
+ .take_control = ines_take_control,
+ .go_to_standby = ines_go_to_standby,
+ .request_system_control = ines_request_system_control,
+ .interface_clear = ines_interface_clear,
+ .remote_enable = ines_remote_enable,
+ .enable_eos = ines_enable_eos,
+ .disable_eos = ines_disable_eos,
+ .parallel_poll = ines_parallel_poll,
+ .parallel_poll_configure = ines_parallel_poll_configure,
+ .parallel_poll_response = ines_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = ines_line_status,
+ .update_status = ines_update_status,
+ .primary_address = ines_primary_address,
+ .secondary_address = ines_secondary_address,
+ .serial_poll_response = ines_serial_poll_response,
+ .serial_poll_status = ines_serial_poll_status,
+ .t1_delay = ines_t1_delay,
+ .return_to_local = ines_return_to_local,
};
-gpib_interface_t ines_isa_interface = {
-name: "ines_isa",
-attach : ines_isa_attach,
-detach : ines_isa_detach,
-read : ines_accel_read,
-write : ines_accel_write,
-command : ines_command,
-take_control : ines_take_control,
-go_to_standby : ines_go_to_standby,
-request_system_control : ines_request_system_control,
-interface_clear : ines_interface_clear,
-remote_enable : ines_remote_enable,
-enable_eos : ines_enable_eos,
-disable_eos : ines_disable_eos,
-parallel_poll : ines_parallel_poll,
-parallel_poll_configure : ines_parallel_poll_configure,
-parallel_poll_response : ines_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : ines_line_status,
-update_status : ines_update_status,
-primary_address : ines_primary_address,
-secondary_address : ines_secondary_address,
-serial_poll_response : ines_serial_poll_response,
-serial_poll_status : ines_serial_poll_status,
-t1_delay : ines_t1_delay,
-return_to_local : ines_return_to_local,
+static gpib_interface_t ines_isa_interface = {
+ .name = "ines_isa",
+ .attach = ines_isa_attach,
+ .detach = ines_isa_detach,
+ .read = ines_accel_read,
+ .write = ines_accel_write,
+ .command = ines_command,
+ .take_control = ines_take_control,
+ .go_to_standby = ines_go_to_standby,
+ .request_system_control = ines_request_system_control,
+ .interface_clear = ines_interface_clear,
+ .remote_enable = ines_remote_enable,
+ .enable_eos = ines_enable_eos,
+ .disable_eos = ines_disable_eos,
+ .parallel_poll = ines_parallel_poll,
+ .parallel_poll_configure = ines_parallel_poll_configure,
+ .parallel_poll_response = ines_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = ines_line_status,
+ .update_status = ines_update_status,
+ .primary_address = ines_primary_address,
+ .secondary_address = ines_secondary_address,
+ .serial_poll_response = ines_serial_poll_response,
+ .serial_poll_status = ines_serial_poll_status,
+ .t1_delay = ines_t1_delay,
+ .return_to_local = ines_return_to_local,
};
static int ines_allocate_private(gpib_board_t *board)
@@ -1122,7 +1122,7 @@ static int ines_gpib_config(struct pcmcia_device *link)
{
struct local_info *dev;
int retval;
- void *virt;
+ void __iomem *virt;
dev = link->priv;
DEBUG(0, "%s(0x%p)\n", __func__, link);
@@ -1156,7 +1156,7 @@ static int ines_gpib_config(struct pcmcia_device *link)
}
virt = ioremap(link->resource[2]->start, resource_size(link->resource[2]));
writeb((link->resource[2]->start >> 2) & 0xff, virt + 0xf0); // IOWindow base
- iounmap((void *)virt);
+ iounmap(virt);
/*
* This actually configures the PCMCIA socket -- setting up
@@ -1227,100 +1227,94 @@ static struct pcmcia_driver ines_gpib_cs_driver = {
.resume = ines_gpib_resume,
};
-int ines_pcmcia_init_module(void)
-{
- pcmcia_register_driver(&ines_gpib_cs_driver);
- return 0;
-}
-
void ines_pcmcia_cleanup_module(void)
{
DEBUG(0, "ines_cs: unloading\n");
pcmcia_unregister_driver(&ines_gpib_cs_driver);
}
-gpib_interface_t ines_pcmcia_unaccel_interface = {
-name: "ines_pcmcia_unaccel",
-attach : ines_pcmcia_attach,
-detach : ines_pcmcia_detach,
-read : ines_read,
-write : ines_write,
-command : ines_command,
-take_control : ines_take_control,
-go_to_standby : ines_go_to_standby,
-request_system_control : ines_request_system_control,
-interface_clear : ines_interface_clear,
-remote_enable : ines_remote_enable,
-enable_eos : ines_enable_eos,
-disable_eos : ines_disable_eos,
-parallel_poll : ines_parallel_poll,
-parallel_poll_configure : ines_parallel_poll_configure,
-parallel_poll_response : ines_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : ines_line_status,
-update_status : ines_update_status,
-primary_address : ines_primary_address,
-secondary_address : ines_secondary_address,
-serial_poll_response : ines_serial_poll_response,
-serial_poll_status : ines_serial_poll_status,
-t1_delay : ines_t1_delay,
-return_to_local : ines_return_to_local,
+static gpib_interface_t ines_pcmcia_unaccel_interface = {
+ .name = "ines_pcmcia_unaccel",
+ .attach = ines_pcmcia_attach,
+ .detach = ines_pcmcia_detach,
+ .read = ines_read,
+ .write = ines_write,
+ .command = ines_command,
+ .take_control = ines_take_control,
+ .go_to_standby = ines_go_to_standby,
+ .request_system_control = ines_request_system_control,
+ .interface_clear = ines_interface_clear,
+ .remote_enable = ines_remote_enable,
+ .enable_eos = ines_enable_eos,
+ .disable_eos = ines_disable_eos,
+ .parallel_poll = ines_parallel_poll,
+ .parallel_poll_configure = ines_parallel_poll_configure,
+ .parallel_poll_response = ines_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = ines_line_status,
+ .update_status = ines_update_status,
+ .primary_address = ines_primary_address,
+ .secondary_address = ines_secondary_address,
+ .serial_poll_response = ines_serial_poll_response,
+ .serial_poll_status = ines_serial_poll_status,
+ .t1_delay = ines_t1_delay,
+ .return_to_local = ines_return_to_local,
};
-gpib_interface_t ines_pcmcia_accel_interface = {
-name: "ines_pcmcia_accel",
-attach : ines_pcmcia_accel_attach,
-detach : ines_pcmcia_detach,
-read : ines_accel_read,
-write : ines_accel_write,
-command : ines_command,
-take_control : ines_take_control,
-go_to_standby : ines_go_to_standby,
-request_system_control : ines_request_system_control,
-interface_clear : ines_interface_clear,
-remote_enable : ines_remote_enable,
-enable_eos : ines_enable_eos,
-disable_eos : ines_disable_eos,
-parallel_poll : ines_parallel_poll,
-parallel_poll_configure : ines_parallel_poll_configure,
-parallel_poll_response : ines_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : ines_line_status,
-update_status : ines_update_status,
-primary_address : ines_primary_address,
-secondary_address : ines_secondary_address,
-serial_poll_response : ines_serial_poll_response,
-serial_poll_status : ines_serial_poll_status,
-t1_delay : ines_t1_delay,
-return_to_local : ines_return_to_local,
+static gpib_interface_t ines_pcmcia_accel_interface = {
+ .name = "ines_pcmcia_accel",
+ .attach = ines_pcmcia_accel_attach,
+ .detach = ines_pcmcia_detach,
+ .read = ines_accel_read,
+ .write = ines_accel_write,
+ .command = ines_command,
+ .take_control = ines_take_control,
+ .go_to_standby = ines_go_to_standby,
+ .request_system_control = ines_request_system_control,
+ .interface_clear = ines_interface_clear,
+ .remote_enable = ines_remote_enable,
+ .enable_eos = ines_enable_eos,
+ .disable_eos = ines_disable_eos,
+ .parallel_poll = ines_parallel_poll,
+ .parallel_poll_configure = ines_parallel_poll_configure,
+ .parallel_poll_response = ines_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = ines_line_status,
+ .update_status = ines_update_status,
+ .primary_address = ines_primary_address,
+ .secondary_address = ines_secondary_address,
+ .serial_poll_response = ines_serial_poll_response,
+ .serial_poll_status = ines_serial_poll_status,
+ .t1_delay = ines_t1_delay,
+ .return_to_local = ines_return_to_local,
};
-gpib_interface_t ines_pcmcia_interface = {
-name: "ines_pcmcia",
-attach : ines_pcmcia_accel_attach,
-detach : ines_pcmcia_detach,
-read : ines_accel_read,
-write : ines_accel_write,
-command : ines_command,
-take_control : ines_take_control,
-go_to_standby : ines_go_to_standby,
-request_system_control : ines_request_system_control,
-interface_clear : ines_interface_clear,
-remote_enable : ines_remote_enable,
-enable_eos : ines_enable_eos,
-disable_eos : ines_disable_eos,
-parallel_poll : ines_parallel_poll,
-parallel_poll_configure : ines_parallel_poll_configure,
-parallel_poll_response : ines_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : ines_line_status,
-update_status : ines_update_status,
-primary_address : ines_primary_address,
-secondary_address : ines_secondary_address,
-serial_poll_response : ines_serial_poll_response,
-serial_poll_status : ines_serial_poll_status,
-t1_delay : ines_t1_delay,
-return_to_local : ines_return_to_local,
+static gpib_interface_t ines_pcmcia_interface = {
+ .name = "ines_pcmcia",
+ .attach = ines_pcmcia_accel_attach,
+ .detach = ines_pcmcia_detach,
+ .read = ines_accel_read,
+ .write = ines_accel_write,
+ .command = ines_command,
+ .take_control = ines_take_control,
+ .go_to_standby = ines_go_to_standby,
+ .request_system_control = ines_request_system_control,
+ .interface_clear = ines_interface_clear,
+ .remote_enable = ines_remote_enable,
+ .enable_eos = ines_enable_eos,
+ .disable_eos = ines_disable_eos,
+ .parallel_poll = ines_parallel_poll,
+ .parallel_poll_configure = ines_parallel_poll_configure,
+ .parallel_poll_response = ines_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = ines_line_status,
+ .update_status = ines_update_status,
+ .primary_address = ines_primary_address,
+ .secondary_address = ines_secondary_address,
+ .serial_poll_response = ines_serial_poll_response,
+ .serial_poll_status = ines_serial_poll_status,
+ .t1_delay = ines_t1_delay,
+ .return_to_local = ines_return_to_local,
};
irqreturn_t ines_pcmcia_interrupt(int irq, void *arg)
@@ -1348,8 +1342,8 @@ int ines_common_pcmcia_attach(gpib_board_t *board)
ines_priv = board->private_data;
nec_priv = &ines_priv->nec7210_priv;
- if (request_region(curr_dev->resource[0]->start,
- resource_size(curr_dev->resource[0]), "ines_gpib") == 0) {
+ if (!request_region(curr_dev->resource[0]->start,
+ resource_size(curr_dev->resource[0]), "ines_gpib")) {
pr_err("ines_gpib: ioports at 0x%lx already in use\n",
(unsigned long)(curr_dev->resource[0]->start));
return -1;
@@ -1420,28 +1414,86 @@ void ines_pcmcia_detach(gpib_board_t *board)
static int __init ines_init_module(void)
{
- int err = 0;
+ int ret;
+
+ ret = pci_register_driver(&ines_pci_driver);
+ if (ret) {
+ pr_err("ines_gpib: pci_register_driver failed: error = %d\n", ret);
+ return ret;
+ }
+
+ ret = gpib_register_driver(&ines_pci_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pci;
+ }
- err = pci_register_driver(&ines_pci_driver);
- if (err) {
- pr_err("ines_gpib: pci_driver_register failed!\n");
- return err;
+ ret = gpib_register_driver(&ines_pci_unaccel_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pci_unaccel;
+ }
+
+ ret = gpib_register_driver(&ines_pci_accel_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pci_accel;
+ }
+
+ ret = gpib_register_driver(&ines_isa_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ goto err_isa;
}
- gpib_register_driver(&ines_pci_interface, THIS_MODULE);
- gpib_register_driver(&ines_pci_unaccel_interface, THIS_MODULE);
- gpib_register_driver(&ines_pci_accel_interface, THIS_MODULE);
- gpib_register_driver(&ines_isa_interface, THIS_MODULE);
#ifdef GPIB_PCMCIA
- gpib_register_driver(&ines_pcmcia_interface, THIS_MODULE);
- gpib_register_driver(&ines_pcmcia_unaccel_interface, THIS_MODULE);
- gpib_register_driver(&ines_pcmcia_accel_interface, THIS_MODULE);
- err += ines_pcmcia_init_module();
+ ret = gpib_register_driver(&ines_pcmcia_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pcmcia;
+ }
+
+ ret = gpib_register_driver(&ines_pcmcia_unaccel_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pcmcia_unaccel;
+ }
+
+ ret = gpib_register_driver(&ines_pcmcia_accel_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pcmcia_accel;
+ }
+
+ ret = pcmcia_register_driver(&ines_gpib_cs_driver);
+ if (ret) {
+ pr_err("ines_gpib: pcmcia_register_driver failed: error = %d\n", ret);
+ goto err_pcmcia_driver;
+ }
#endif
- if (err)
- return -1;
return 0;
+
+#ifdef GPIB_PCMCIA
+err_pcmcia_driver:
+ gpib_unregister_driver(&ines_pcmcia_accel_interface);
+err_pcmcia_accel:
+ gpib_unregister_driver(&ines_pcmcia_unaccel_interface);
+err_pcmcia_unaccel:
+ gpib_unregister_driver(&ines_pcmcia_interface);
+err_pcmcia:
+#endif
+ gpib_unregister_driver(&ines_isa_interface);
+err_isa:
+ gpib_unregister_driver(&ines_pci_accel_interface);
+err_pci_accel:
+ gpib_unregister_driver(&ines_pci_unaccel_interface);
+err_pci_unaccel:
+ gpib_unregister_driver(&ines_pci_interface);
+err_pci:
+ pci_unregister_driver(&ines_pci_driver);
+
+ return ret;
}
static void __exit ines_exit_module(void)
diff --git a/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c b/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
index 93a05c792816..85322af62c23 100644
--- a/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
+++ b/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
@@ -10,7 +10,6 @@
/* base module includes */
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
@@ -25,7 +24,6 @@
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/sched/signal.h>
-#include <linux/uaccess.h>
#include <linux/usb.h>
#include "gpibP.h"
@@ -1133,33 +1131,33 @@ static unsigned int usb_gpib_t1_delay(gpib_board_t *board, unsigned int nano_sec
* *** module dispatch table and init/exit functions ***
*/
-gpib_interface_t usb_gpib_interface = {
-name: NAME,
-attach : usb_gpib_attach,
-detach : usb_gpib_detach,
-read : usb_gpib_read,
-write : usb_gpib_write,
-command : usb_gpib_command,
-take_control : usb_gpib_take_control,
-go_to_standby : usb_gpib_go_to_standby,
-request_system_control : usb_gpib_request_system_control,
-interface_clear : usb_gpib_interface_clear,
-remote_enable : usb_gpib_remote_enable,
-enable_eos : usb_gpib_enable_eos,
-disable_eos : usb_gpib_disable_eos,
-parallel_poll : usb_gpib_parallel_poll,
-parallel_poll_configure : usb_gpib_parallel_poll_configure,
-parallel_poll_response : usb_gpib_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : usb_gpib_line_status,
-update_status : usb_gpib_update_status,
-primary_address : usb_gpib_primary_address,
-secondary_address : usb_gpib_secondary_address,
-serial_poll_response : usb_gpib_serial_poll_response,
-serial_poll_status : usb_gpib_serial_poll_status,
-t1_delay : usb_gpib_t1_delay,
-return_to_local : usb_gpib_return_to_local,
-skip_check_for_command_acceptors : 1
+static gpib_interface_t usb_gpib_interface = {
+ .name = NAME,
+ .attach = usb_gpib_attach,
+ .detach = usb_gpib_detach,
+ .read = usb_gpib_read,
+ .write = usb_gpib_write,
+ .command = usb_gpib_command,
+ .take_control = usb_gpib_take_control,
+ .go_to_standby = usb_gpib_go_to_standby,
+ .request_system_control = usb_gpib_request_system_control,
+ .interface_clear = usb_gpib_interface_clear,
+ .remote_enable = usb_gpib_remote_enable,
+ .enable_eos = usb_gpib_enable_eos,
+ .disable_eos = usb_gpib_disable_eos,
+ .parallel_poll = usb_gpib_parallel_poll,
+ .parallel_poll_configure = usb_gpib_parallel_poll_configure,
+ .parallel_poll_response = usb_gpib_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = usb_gpib_line_status,
+ .update_status = usb_gpib_update_status,
+ .primary_address = usb_gpib_primary_address,
+ .secondary_address = usb_gpib_secondary_address,
+ .serial_poll_response = usb_gpib_serial_poll_response,
+ .serial_poll_status = usb_gpib_serial_poll_status,
+ .t1_delay = usb_gpib_t1_delay,
+ .return_to_local = usb_gpib_return_to_local,
+ .skip_check_for_command_acceptors = 1
};
/*
@@ -1181,7 +1179,11 @@ static int usb_gpib_init_module(struct usb_interface *interface)
return rv;
if (!assigned_usb_minors) {
- gpib_register_driver(&usb_gpib_interface, THIS_MODULE);
+ rv = gpib_register_driver(&usb_gpib_interface, THIS_MODULE);
+ if (rv) {
+ pr_err("lpvo_usb_gpib: gpib_register_driver failed: error = %d\n", rv);
+ goto exit;
+ }
} else {
/* check if minor is already registered - maybe useless, but if
* it happens the code is inconsistent somewhere
@@ -1878,7 +1880,7 @@ static int skel_release(struct inode *inode, struct file *file)
* user space access to read function
*/
-static ssize_t skel_read(struct file *file, char *buffer, size_t count,
+static ssize_t skel_read(struct file *file, char __user *buffer, size_t count,
loff_t *ppos)
{
struct usb_skel *dev;
@@ -1909,7 +1911,7 @@ static ssize_t skel_read(struct file *file, char *buffer, size_t count,
* user space access to write function
*/
-static ssize_t skel_write(struct file *file, const char *user_buffer,
+static ssize_t skel_write(struct file *file, const char __user *user_buffer,
size_t count, loff_t *ppos)
{
struct usb_skel *dev;
diff --git a/drivers/staging/gpib/ni_usb/ni_usb_gpib.c b/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
index b7b6fb1be379..d0656dc520f5 100644
--- a/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
+++ b/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
@@ -85,7 +85,7 @@ static void ni_usb_bulk_complete(struct urb *urb)
// printk("debug: %s: status=0x%x, error_count=%i, actual_length=%i\n", __func__,
// urb->status, urb->error_count, urb->actual_length);
- up(&context->complete);
+ complete(&context->complete);
}
static void ni_usb_timeout_handler(struct timer_list *t)
@@ -94,7 +94,7 @@ static void ni_usb_timeout_handler(struct timer_list *t)
struct ni_usb_urb_ctx *context = &ni_priv->context;
context->timed_out = 1;
- up(&context->complete);
+ complete(&context->complete);
};
// I'm using nonblocking loosely here, it only means -EAGAIN can be returned in certain cases
@@ -124,7 +124,7 @@ static int ni_usb_nonblocking_send_bulk_msg(struct ni_usb_priv *ni_priv, void *d
}
usb_dev = interface_to_usbdev(ni_priv->bus_interface);
out_pipe = usb_sndbulkpipe(usb_dev, ni_priv->bulk_out_endpoint);
- sema_init(&context->complete, 0);
+ init_completion(&context->complete);
context->timed_out = 0;
usb_fill_bulk_urb(ni_priv->bulk_urb, usb_dev, out_pipe, data, data_length,
&ni_usb_bulk_complete, context);
@@ -143,7 +143,7 @@ static int ni_usb_nonblocking_send_bulk_msg(struct ni_usb_priv *ni_priv, void *d
return retval;
}
mutex_unlock(&ni_priv->bulk_transfer_lock);
- down(&context->complete); // wait for ni_usb_bulk_complete
+ wait_for_completion(&context->complete); // wait for ni_usb_bulk_complete
if (context->timed_out) {
usb_kill_urb(ni_priv->bulk_urb);
dev_err(&usb_dev->dev, "%s: killed urb due to timeout\n", __func__);
@@ -210,7 +210,7 @@ static int ni_usb_nonblocking_receive_bulk_msg(struct ni_usb_priv *ni_priv,
}
usb_dev = interface_to_usbdev(ni_priv->bus_interface);
in_pipe = usb_rcvbulkpipe(usb_dev, ni_priv->bulk_in_endpoint);
- sema_init(&context->complete, 0);
+ init_completion(&context->complete);
context->timed_out = 0;
usb_fill_bulk_urb(ni_priv->bulk_urb, usb_dev, in_pipe, data, data_length,
&ni_usb_bulk_complete, context);
@@ -231,7 +231,7 @@ static int ni_usb_nonblocking_receive_bulk_msg(struct ni_usb_priv *ni_priv,
}
mutex_unlock(&ni_priv->bulk_transfer_lock);
if (interruptible) {
- if (down_interruptible(&context->complete)) {
+ if (wait_for_completion_interruptible(&context->complete)) {
/* If we got interrupted by a signal while
* waiting for the usb gpib to respond, we
* should send a stop command so it will
@@ -243,10 +243,10 @@ static int ni_usb_nonblocking_receive_bulk_msg(struct ni_usb_priv *ni_priv,
/* now do an uninterruptible wait, it shouldn't take long
* for the board to respond now.
*/
- down(&context->complete);
+ wait_for_completion(&context->complete);
}
} else {
- down(&context->complete);
+ wait_for_completion(&context->complete);
}
if (context->timed_out) {
usb_kill_urb(ni_priv->bulk_urb);
@@ -783,8 +783,10 @@ static int ni_usb_write(gpib_board_t *board, uint8_t *buffer, size_t length,
}
in_data = kmalloc(in_data_length, GFP_KERNEL);
- if (!in_data)
+ if (!in_data) {
+ mutex_unlock(&ni_priv->addressed_transfer_lock);
return -ENOMEM;
+ }
retval = ni_usb_receive_bulk_msg(ni_priv, in_data, in_data_length, &usb_bytes_read,
ni_usb_timeout_msecs(board->usec_timeout), 1);
@@ -2351,33 +2353,33 @@ static void ni_usb_detach(gpib_board_t *board)
mutex_unlock(&ni_usb_hotplug_lock);
}
-gpib_interface_t ni_usb_gpib_interface = {
-name: "ni_usb_b",
-attach : ni_usb_attach,
-detach : ni_usb_detach,
-read : ni_usb_read,
-write : ni_usb_write,
-command : ni_usb_command,
-take_control : ni_usb_take_control,
-go_to_standby : ni_usb_go_to_standby,
-request_system_control : ni_usb_request_system_control,
-interface_clear : ni_usb_interface_clear,
-remote_enable : ni_usb_remote_enable,
-enable_eos : ni_usb_enable_eos,
-disable_eos : ni_usb_disable_eos,
-parallel_poll : ni_usb_parallel_poll,
-parallel_poll_configure : ni_usb_parallel_poll_configure,
-parallel_poll_response : ni_usb_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : ni_usb_line_status,
-update_status : ni_usb_update_status,
-primary_address : ni_usb_primary_address,
-secondary_address : ni_usb_secondary_address,
-serial_poll_response : ni_usb_serial_poll_response,
-serial_poll_status : ni_usb_serial_poll_status,
-t1_delay : ni_usb_t1_delay,
-return_to_local : ni_usb_return_to_local,
-skip_check_for_command_acceptors : 1
+static gpib_interface_t ni_usb_gpib_interface = {
+ .name = "ni_usb_b",
+ .attach = ni_usb_attach,
+ .detach = ni_usb_detach,
+ .read = ni_usb_read,
+ .write = ni_usb_write,
+ .command = ni_usb_command,
+ .take_control = ni_usb_take_control,
+ .go_to_standby = ni_usb_go_to_standby,
+ .request_system_control = ni_usb_request_system_control,
+ .interface_clear = ni_usb_interface_clear,
+ .remote_enable = ni_usb_remote_enable,
+ .enable_eos = ni_usb_enable_eos,
+ .disable_eos = ni_usb_disable_eos,
+ .parallel_poll = ni_usb_parallel_poll,
+ .parallel_poll_configure = ni_usb_parallel_poll_configure,
+ .parallel_poll_response = ni_usb_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = ni_usb_line_status,
+ .update_status = ni_usb_update_status,
+ .primary_address = ni_usb_primary_address,
+ .secondary_address = ni_usb_secondary_address,
+ .serial_poll_response = ni_usb_serial_poll_response,
+ .serial_poll_status = ni_usb_serial_poll_status,
+ .t1_delay = ni_usb_t1_delay,
+ .return_to_local = ni_usb_return_to_local,
+ .skip_check_for_command_acceptors = 1
};
// Table with the USB-devices: just now only testing IDs
@@ -2619,12 +2621,23 @@ static struct usb_driver ni_usb_bus_driver = {
static int __init ni_usb_init_module(void)
{
int i;
+ int ret;
pr_info("ni_usb_gpib driver loading\n");
for (i = 0; i < MAX_NUM_NI_USB_INTERFACES; i++)
ni_usb_driver_interfaces[i] = NULL;
- usb_register(&ni_usb_bus_driver);
- gpib_register_driver(&ni_usb_gpib_interface, THIS_MODULE);
+
+ ret = usb_register(&ni_usb_bus_driver);
+ if (ret) {
+ pr_err("ni_usb_gpib: usb_register failed: error = %d\n", ret);
+ return ret;
+ }
+
+ ret = gpib_register_driver(&ni_usb_gpib_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("ni_usb_gpib: gpib_register_driver failed: error = %d\n", ret);
+ return ret;
+ }
return 0;
}
diff --git a/drivers/staging/gpib/ni_usb/ni_usb_gpib.h b/drivers/staging/gpib/ni_usb/ni_usb_gpib.h
index 9b21dfa0f3f6..4b297db09a9b 100644
--- a/drivers/staging/gpib/ni_usb/ni_usb_gpib.h
+++ b/drivers/staging/gpib/ni_usb/ni_usb_gpib.h
@@ -56,7 +56,7 @@ enum hs_plus_endpoint_addresses {
};
struct ni_usb_urb_ctx {
- struct semaphore complete;
+ struct completion complete;
unsigned timed_out : 1;
};
diff --git a/drivers/staging/gpib/pc2/pc2_gpib.c b/drivers/staging/gpib/pc2/pc2_gpib.c
index d22af25125b1..c0b07cb63d9a 100644
--- a/drivers/staging/gpib/pc2/pc2_gpib.c
+++ b/drivers/staging/gpib/pc2/pc2_gpib.c
@@ -238,116 +238,116 @@ static void pc2_return_to_local(gpib_board_t *board)
nec7210_return_to_local(board, &priv->nec7210_priv);
}
-gpib_interface_t pc2_interface = {
-name: "pcII",
-attach : pc2_attach,
-detach : pc2_detach,
-read : pc2_read,
-write : pc2_write,
-command : pc2_command,
-take_control : pc2_take_control,
-go_to_standby : pc2_go_to_standby,
-request_system_control : pc2_request_system_control,
-interface_clear : pc2_interface_clear,
-remote_enable : pc2_remote_enable,
-enable_eos : pc2_enable_eos,
-disable_eos : pc2_disable_eos,
-parallel_poll : pc2_parallel_poll,
-parallel_poll_configure : pc2_parallel_poll_configure,
-parallel_poll_response : pc2_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : NULL,
-update_status : pc2_update_status,
-primary_address : pc2_primary_address,
-secondary_address : pc2_secondary_address,
-serial_poll_response : pc2_serial_poll_response,
-serial_poll_status : pc2_serial_poll_status,
-t1_delay : pc2_t1_delay,
-return_to_local : pc2_return_to_local,
+static gpib_interface_t pc2_interface = {
+ .name = "pcII",
+ .attach = pc2_attach,
+ .detach = pc2_detach,
+ .read = pc2_read,
+ .write = pc2_write,
+ .command = pc2_command,
+ .take_control = pc2_take_control,
+ .go_to_standby = pc2_go_to_standby,
+ .request_system_control = pc2_request_system_control,
+ .interface_clear = pc2_interface_clear,
+ .remote_enable = pc2_remote_enable,
+ .enable_eos = pc2_enable_eos,
+ .disable_eos = pc2_disable_eos,
+ .parallel_poll = pc2_parallel_poll,
+ .parallel_poll_configure = pc2_parallel_poll_configure,
+ .parallel_poll_response = pc2_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL,
+ .update_status = pc2_update_status,
+ .primary_address = pc2_primary_address,
+ .secondary_address = pc2_secondary_address,
+ .serial_poll_response = pc2_serial_poll_response,
+ .serial_poll_status = pc2_serial_poll_status,
+ .t1_delay = pc2_t1_delay,
+ .return_to_local = pc2_return_to_local,
};
-gpib_interface_t pc2a_interface = {
-name: "pcIIa",
-attach : pc2a_attach,
-detach : pc2a_detach,
-read : pc2_read,
-write : pc2_write,
-command : pc2_command,
-take_control : pc2_take_control,
-go_to_standby : pc2_go_to_standby,
-request_system_control : pc2_request_system_control,
-interface_clear : pc2_interface_clear,
-remote_enable : pc2_remote_enable,
-enable_eos : pc2_enable_eos,
-disable_eos : pc2_disable_eos,
-parallel_poll : pc2_parallel_poll,
-parallel_poll_configure : pc2_parallel_poll_configure,
-parallel_poll_response : pc2_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : NULL,
-update_status : pc2_update_status,
-primary_address : pc2_primary_address,
-secondary_address : pc2_secondary_address,
-serial_poll_response : pc2_serial_poll_response,
-serial_poll_status : pc2_serial_poll_status,
-t1_delay : pc2_t1_delay,
-return_to_local : pc2_return_to_local,
+static gpib_interface_t pc2a_interface = {
+ .name = "pcIIa",
+ .attach = pc2a_attach,
+ .detach = pc2a_detach,
+ .read = pc2_read,
+ .write = pc2_write,
+ .command = pc2_command,
+ .take_control = pc2_take_control,
+ .go_to_standby = pc2_go_to_standby,
+ .request_system_control = pc2_request_system_control,
+ .interface_clear = pc2_interface_clear,
+ .remote_enable = pc2_remote_enable,
+ .enable_eos = pc2_enable_eos,
+ .disable_eos = pc2_disable_eos,
+ .parallel_poll = pc2_parallel_poll,
+ .parallel_poll_configure = pc2_parallel_poll_configure,
+ .parallel_poll_response = pc2_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL,
+ .update_status = pc2_update_status,
+ .primary_address = pc2_primary_address,
+ .secondary_address = pc2_secondary_address,
+ .serial_poll_response = pc2_serial_poll_response,
+ .serial_poll_status = pc2_serial_poll_status,
+ .t1_delay = pc2_t1_delay,
+ .return_to_local = pc2_return_to_local,
};
-gpib_interface_t pc2a_cb7210_interface = {
-name: "pcIIa_cb7210",
-attach : pc2a_cb7210_attach,
-detach : pc2a_detach,
-read : pc2_read,
-write : pc2_write,
-command : pc2_command,
-take_control : pc2_take_control,
-go_to_standby : pc2_go_to_standby,
-request_system_control : pc2_request_system_control,
-interface_clear : pc2_interface_clear,
-remote_enable : pc2_remote_enable,
-enable_eos : pc2_enable_eos,
-disable_eos : pc2_disable_eos,
-parallel_poll : pc2_parallel_poll,
-parallel_poll_configure : pc2_parallel_poll_configure,
-parallel_poll_response : pc2_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : NULL, //XXX
-update_status : pc2_update_status,
-primary_address : pc2_primary_address,
-secondary_address : pc2_secondary_address,
-serial_poll_response : pc2_serial_poll_response,
-serial_poll_status : pc2_serial_poll_status,
-t1_delay : pc2_t1_delay,
-return_to_local : pc2_return_to_local,
+static gpib_interface_t pc2a_cb7210_interface = {
+ .name = "pcIIa_cb7210",
+ .attach = pc2a_cb7210_attach,
+ .detach = pc2a_detach,
+ .read = pc2_read,
+ .write = pc2_write,
+ .command = pc2_command,
+ .take_control = pc2_take_control,
+ .go_to_standby = pc2_go_to_standby,
+ .request_system_control = pc2_request_system_control,
+ .interface_clear = pc2_interface_clear,
+ .remote_enable = pc2_remote_enable,
+ .enable_eos = pc2_enable_eos,
+ .disable_eos = pc2_disable_eos,
+ .parallel_poll = pc2_parallel_poll,
+ .parallel_poll_configure = pc2_parallel_poll_configure,
+ .parallel_poll_response = pc2_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL, //XXX
+ .update_status = pc2_update_status,
+ .primary_address = pc2_primary_address,
+ .secondary_address = pc2_secondary_address,
+ .serial_poll_response = pc2_serial_poll_response,
+ .serial_poll_status = pc2_serial_poll_status,
+ .t1_delay = pc2_t1_delay,
+ .return_to_local = pc2_return_to_local,
};
-gpib_interface_t pc2_2a_interface = {
-name: "pcII_IIa",
-attach : pc2_2a_attach,
-detach : pc2_2a_detach,
-read : pc2_read,
-write : pc2_write,
-command : pc2_command,
-take_control : pc2_take_control,
-go_to_standby : pc2_go_to_standby,
-request_system_control : pc2_request_system_control,
-interface_clear : pc2_interface_clear,
-remote_enable : pc2_remote_enable,
-enable_eos : pc2_enable_eos,
-disable_eos : pc2_disable_eos,
-parallel_poll : pc2_parallel_poll,
-parallel_poll_configure : pc2_parallel_poll_configure,
-parallel_poll_response : pc2_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : NULL,
-update_status : pc2_update_status,
-primary_address : pc2_primary_address,
-secondary_address : pc2_secondary_address,
-serial_poll_response : pc2_serial_poll_response,
-serial_poll_status : pc2_serial_poll_status,
-t1_delay : pc2_t1_delay,
-return_to_local : pc2_return_to_local,
+static gpib_interface_t pc2_2a_interface = {
+ .name = "pcII_IIa",
+ .attach = pc2_2a_attach,
+ .detach = pc2_2a_detach,
+ .read = pc2_read,
+ .write = pc2_write,
+ .command = pc2_command,
+ .take_control = pc2_take_control,
+ .go_to_standby = pc2_go_to_standby,
+ .request_system_control = pc2_request_system_control,
+ .interface_clear = pc2_interface_clear,
+ .remote_enable = pc2_remote_enable,
+ .enable_eos = pc2_enable_eos,
+ .disable_eos = pc2_disable_eos,
+ .parallel_poll = pc2_parallel_poll,
+ .parallel_poll_configure = pc2_parallel_poll_configure,
+ .parallel_poll_response = pc2_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL,
+ .update_status = pc2_update_status,
+ .primary_address = pc2_primary_address,
+ .secondary_address = pc2_secondary_address,
+ .serial_poll_response = pc2_serial_poll_response,
+ .serial_poll_status = pc2_serial_poll_status,
+ .t1_delay = pc2_t1_delay,
+ .return_to_local = pc2_return_to_local,
};
static int allocate_private(gpib_board_t *board)
@@ -426,7 +426,7 @@ int pc2_attach(gpib_board_t *board, const gpib_board_config_t *config)
nec_priv = &pc2_priv->nec7210_priv;
nec_priv->offset = pc2_reg_offset;
- if (request_region(config->ibbase, pc2_iosize, "pc2") == 0) {
+ if (!request_region(config->ibbase, pc2_iosize, "pc2")) {
pr_err("gpib: ioports are already in use\n");
return -1;
}
@@ -635,12 +635,42 @@ void pc2_2a_detach(gpib_board_t *board)
static int __init pc2_init_module(void)
{
- gpib_register_driver(&pc2_interface, THIS_MODULE);
- gpib_register_driver(&pc2a_interface, THIS_MODULE);
- gpib_register_driver(&pc2a_cb7210_interface, THIS_MODULE);
- gpib_register_driver(&pc2_2a_interface, THIS_MODULE);
+ int ret;
+
+ ret = gpib_register_driver(&pc2_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("pc2_gpib: gpib_register_driver failed: error = %d\n", ret);
+ return ret;
+ }
+
+ ret = gpib_register_driver(&pc2a_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("pc2_gpib: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pc2a;
+ }
+
+ ret = gpib_register_driver(&pc2a_cb7210_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("pc2_gpib: gpib_register_driver failed: error = %d\n", ret);
+ goto err_cb7210;
+ }
+
+ ret = gpib_register_driver(&pc2_2a_interface, THIS_MODULE);
+ if (ret) {
+ pr_err("pc2_gpib: gpib_register_driver failed: error = %d\n", ret);
+ goto err_pc2_2a;
+ }
return 0;
+
+err_pc2_2a:
+ gpib_unregister_driver(&pc2a_cb7210_interface);
+err_cb7210:
+ gpib_unregister_driver(&pc2a_interface);
+err_pc2a:
+ gpib_unregister_driver(&pc2_interface);
+
+ return ret;
}
static void __exit pc2_exit_module(void)
diff --git a/drivers/staging/gpib/tnt4882/mite.c b/drivers/staging/gpib/tnt4882/mite.c
index 4bd352967616..ea64dde46bcb 100644
--- a/drivers/staging/gpib/tnt4882/mite.c
+++ b/drivers/staging/gpib/tnt4882/mite.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Hardware driver for NI Mite PCI interface chip,
diff --git a/drivers/staging/gpib/tnt4882/mite.h b/drivers/staging/gpib/tnt4882/mite.h
index edb873435b51..522d6b56cb7d 100644
--- a/drivers/staging/gpib/tnt4882/mite.h
+++ b/drivers/staging/gpib/tnt4882/mite.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2 */
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Hardware driver for NI Mite PCI interface chip
diff --git a/drivers/staging/gpib/tnt4882/tnt4882_gpib.c b/drivers/staging/gpib/tnt4882/tnt4882_gpib.c
index ce91c3eb768c..b39ab2abe495 100644
--- a/drivers/staging/gpib/tnt4882/tnt4882_gpib.c
+++ b/drivers/staging/gpib/tnt4882/tnt4882_gpib.c
@@ -116,7 +116,7 @@ static inline void tnt_paged_writeb(struct tnt4882_priv *priv, unsigned int valu
/* readb/writeb wrappers */
static inline unsigned short tnt_readb(struct tnt4882_priv *priv, unsigned long offset)
{
- void *address = priv->nec7210_priv.mmiobase + offset;
+ void __iomem *address = priv->nec7210_priv.mmiobase + offset;
unsigned long flags;
unsigned short retval;
spinlock_t *register_lock = &priv->nec7210_priv.register_page_lock;
@@ -154,7 +154,7 @@ static inline unsigned short tnt_readb(struct tnt4882_priv *priv, unsigned long
static inline void tnt_writeb(struct tnt4882_priv *priv, unsigned short value, unsigned long offset)
{
- void *address = priv->nec7210_priv.mmiobase + offset;
+ void __iomem *address = priv->nec7210_priv.mmiobase + offset;
unsigned long flags;
spinlock_t *register_lock = &priv->nec7210_priv.register_page_lock;
@@ -890,285 +890,285 @@ void tnt4882_return_to_local(gpib_board_t *board)
nec7210_return_to_local(board, &priv->nec7210_priv);
}
-gpib_interface_t ni_pci_interface = {
-name: "ni_pci",
-attach : ni_pci_attach,
-detach : ni_pci_detach,
-read : tnt4882_accel_read,
-write : tnt4882_accel_write,
-command : tnt4882_command,
-take_control : tnt4882_take_control,
-go_to_standby : tnt4882_go_to_standby,
-request_system_control : tnt4882_request_system_control,
-interface_clear : tnt4882_interface_clear,
-remote_enable : tnt4882_remote_enable,
-enable_eos : tnt4882_enable_eos,
-disable_eos : tnt4882_disable_eos,
-parallel_poll : tnt4882_parallel_poll,
-parallel_poll_configure : tnt4882_parallel_poll_configure,
-parallel_poll_response : tnt4882_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : tnt4882_line_status,
-update_status : tnt4882_update_status,
-primary_address : tnt4882_primary_address,
-secondary_address : tnt4882_secondary_address,
-serial_poll_response2 : tnt4882_serial_poll_response2,
-serial_poll_status : tnt4882_serial_poll_status,
-t1_delay : tnt4882_t1_delay,
-return_to_local : tnt4882_return_to_local,
+static gpib_interface_t ni_pci_interface = {
+ .name = "ni_pci",
+ .attach = ni_pci_attach,
+ .detach = ni_pci_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response2 = tnt4882_serial_poll_response2,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
};
-gpib_interface_t ni_pci_accel_interface = {
-name: "ni_pci_accel",
-attach : ni_pci_attach,
-detach : ni_pci_detach,
-read : tnt4882_accel_read,
-write : tnt4882_accel_write,
-command : tnt4882_command,
-take_control : tnt4882_take_control,
-go_to_standby : tnt4882_go_to_standby,
-request_system_control : tnt4882_request_system_control,
-interface_clear : tnt4882_interface_clear,
-remote_enable : tnt4882_remote_enable,
-enable_eos : tnt4882_enable_eos,
-disable_eos : tnt4882_disable_eos,
-parallel_poll : tnt4882_parallel_poll,
-parallel_poll_configure : tnt4882_parallel_poll_configure,
-parallel_poll_response : tnt4882_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : tnt4882_line_status,
-update_status : tnt4882_update_status,
-primary_address : tnt4882_primary_address,
-secondary_address : tnt4882_secondary_address,
-serial_poll_response2 : tnt4882_serial_poll_response2,
-serial_poll_status : tnt4882_serial_poll_status,
-t1_delay : tnt4882_t1_delay,
-return_to_local : tnt4882_return_to_local,
+static gpib_interface_t ni_pci_accel_interface = {
+ .name = "ni_pci_accel",
+ .attach = ni_pci_attach,
+ .detach = ni_pci_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response2 = tnt4882_serial_poll_response2,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
};
-gpib_interface_t ni_isa_interface = {
-name: "ni_isa",
-attach : ni_tnt_isa_attach,
-detach : ni_isa_detach,
-read : tnt4882_accel_read,
-write : tnt4882_accel_write,
-command : tnt4882_command,
-take_control : tnt4882_take_control,
-go_to_standby : tnt4882_go_to_standby,
-request_system_control : tnt4882_request_system_control,
-interface_clear : tnt4882_interface_clear,
-remote_enable : tnt4882_remote_enable,
-enable_eos : tnt4882_enable_eos,
-disable_eos : tnt4882_disable_eos,
-parallel_poll : tnt4882_parallel_poll,
-parallel_poll_configure : tnt4882_parallel_poll_configure,
-parallel_poll_response : tnt4882_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : tnt4882_line_status,
-update_status : tnt4882_update_status,
-primary_address : tnt4882_primary_address,
-secondary_address : tnt4882_secondary_address,
-serial_poll_response2 : tnt4882_serial_poll_response2,
-serial_poll_status : tnt4882_serial_poll_status,
-t1_delay : tnt4882_t1_delay,
-return_to_local : tnt4882_return_to_local,
+static gpib_interface_t ni_isa_interface = {
+ .name = "ni_isa",
+ .attach = ni_tnt_isa_attach,
+ .detach = ni_isa_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response2 = tnt4882_serial_poll_response2,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
};
-gpib_interface_t ni_nat4882_isa_interface = {
-name: "ni_nat4882_isa",
-attach : ni_nat4882_isa_attach,
-detach : ni_isa_detach,
-read : tnt4882_read,
-write : tnt4882_write,
-command : tnt4882_command_unaccel,
-take_control : tnt4882_take_control,
-go_to_standby : tnt4882_go_to_standby,
-request_system_control : tnt4882_request_system_control,
-interface_clear : tnt4882_interface_clear,
-remote_enable : tnt4882_remote_enable,
-enable_eos : tnt4882_enable_eos,
-disable_eos : tnt4882_disable_eos,
-parallel_poll : tnt4882_parallel_poll,
-parallel_poll_configure : tnt4882_parallel_poll_configure,
-parallel_poll_response : tnt4882_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : tnt4882_line_status,
-update_status : tnt4882_update_status,
-primary_address : tnt4882_primary_address,
-secondary_address : tnt4882_secondary_address,
-serial_poll_response2 : tnt4882_serial_poll_response2,
-serial_poll_status : tnt4882_serial_poll_status,
-t1_delay : tnt4882_t1_delay,
-return_to_local : tnt4882_return_to_local,
+static gpib_interface_t ni_nat4882_isa_interface = {
+ .name = "ni_nat4882_isa",
+ .attach = ni_nat4882_isa_attach,
+ .detach = ni_isa_detach,
+ .read = tnt4882_read,
+ .write = tnt4882_write,
+ .command = tnt4882_command_unaccel,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response2 = tnt4882_serial_poll_response2,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
};
-gpib_interface_t ni_nec_isa_interface = {
-name: "ni_nec_isa",
-attach : ni_nec_isa_attach,
-detach : ni_isa_detach,
-read : tnt4882_read,
-write : tnt4882_write,
-command : tnt4882_command_unaccel,
-take_control : tnt4882_take_control,
-go_to_standby : tnt4882_go_to_standby,
-request_system_control : tnt4882_request_system_control,
-interface_clear : tnt4882_interface_clear,
-remote_enable : tnt4882_remote_enable,
-enable_eos : tnt4882_enable_eos,
-disable_eos : tnt4882_disable_eos,
-parallel_poll : tnt4882_parallel_poll,
-parallel_poll_configure : tnt4882_parallel_poll_configure,
-parallel_poll_response : tnt4882_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : NULL,
-update_status : tnt4882_update_status,
-primary_address : tnt4882_primary_address,
-secondary_address : tnt4882_secondary_address,
-serial_poll_response : tnt4882_serial_poll_response,
-serial_poll_status : tnt4882_serial_poll_status,
-t1_delay : tnt4882_t1_delay,
-return_to_local : tnt4882_return_to_local,
+static gpib_interface_t ni_nec_isa_interface = {
+ .name = "ni_nec_isa",
+ .attach = ni_nec_isa_attach,
+ .detach = ni_isa_detach,
+ .read = tnt4882_read,
+ .write = tnt4882_write,
+ .command = tnt4882_command_unaccel,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response = tnt4882_serial_poll_response,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
};
-gpib_interface_t ni_isa_accel_interface = {
-name: "ni_isa_accel",
-attach : ni_tnt_isa_attach,
-detach : ni_isa_detach,
-read : tnt4882_accel_read,
-write : tnt4882_accel_write,
-command : tnt4882_command,
-take_control : tnt4882_take_control,
-go_to_standby : tnt4882_go_to_standby,
-request_system_control : tnt4882_request_system_control,
-interface_clear : tnt4882_interface_clear,
-remote_enable : tnt4882_remote_enable,
-enable_eos : tnt4882_enable_eos,
-disable_eos : tnt4882_disable_eos,
-parallel_poll : tnt4882_parallel_poll,
-parallel_poll_configure : tnt4882_parallel_poll_configure,
-parallel_poll_response : tnt4882_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : tnt4882_line_status,
-update_status : tnt4882_update_status,
-primary_address : tnt4882_primary_address,
-secondary_address : tnt4882_secondary_address,
-serial_poll_response2 : tnt4882_serial_poll_response2,
-serial_poll_status : tnt4882_serial_poll_status,
-t1_delay : tnt4882_t1_delay,
-return_to_local : tnt4882_return_to_local,
+static gpib_interface_t ni_isa_accel_interface = {
+ .name = "ni_isa_accel",
+ .attach = ni_tnt_isa_attach,
+ .detach = ni_isa_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response2 = tnt4882_serial_poll_response2,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
};
-gpib_interface_t ni_nat4882_isa_accel_interface = {
-name: "ni_nat4882_isa_accel",
-attach : ni_nat4882_isa_attach,
-detach : ni_isa_detach,
-read : tnt4882_accel_read,
-write : tnt4882_accel_write,
-command : tnt4882_command_unaccel,
-take_control : tnt4882_take_control,
-go_to_standby : tnt4882_go_to_standby,
-request_system_control : tnt4882_request_system_control,
-interface_clear : tnt4882_interface_clear,
-remote_enable : tnt4882_remote_enable,
-enable_eos : tnt4882_enable_eos,
-disable_eos : tnt4882_disable_eos,
-parallel_poll : tnt4882_parallel_poll,
-parallel_poll_configure : tnt4882_parallel_poll_configure,
-parallel_poll_response : tnt4882_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : tnt4882_line_status,
-update_status : tnt4882_update_status,
-primary_address : tnt4882_primary_address,
-secondary_address : tnt4882_secondary_address,
-serial_poll_response2 : tnt4882_serial_poll_response2,
-serial_poll_status : tnt4882_serial_poll_status,
-t1_delay : tnt4882_t1_delay,
-return_to_local : tnt4882_return_to_local,
+static gpib_interface_t ni_nat4882_isa_accel_interface = {
+ .name = "ni_nat4882_isa_accel",
+ .attach = ni_nat4882_isa_attach,
+ .detach = ni_isa_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command_unaccel,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response2 = tnt4882_serial_poll_response2,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
};
-gpib_interface_t ni_nec_isa_accel_interface = {
-name: "ni_nec_isa_accel",
-attach : ni_nec_isa_attach,
-detach : ni_isa_detach,
-read : tnt4882_accel_read,
-write : tnt4882_accel_write,
-command : tnt4882_command_unaccel,
-take_control : tnt4882_take_control,
-go_to_standby : tnt4882_go_to_standby,
-request_system_control : tnt4882_request_system_control,
-interface_clear : tnt4882_interface_clear,
-remote_enable : tnt4882_remote_enable,
-enable_eos : tnt4882_enable_eos,
-disable_eos : tnt4882_disable_eos,
-parallel_poll : tnt4882_parallel_poll,
-parallel_poll_configure : tnt4882_parallel_poll_configure,
-parallel_poll_response : tnt4882_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : NULL,
-update_status : tnt4882_update_status,
-primary_address : tnt4882_primary_address,
-secondary_address : tnt4882_secondary_address,
-serial_poll_response : tnt4882_serial_poll_response,
-serial_poll_status : tnt4882_serial_poll_status,
-t1_delay : tnt4882_t1_delay,
-return_to_local : tnt4882_return_to_local,
+static gpib_interface_t ni_nec_isa_accel_interface = {
+ .name = "ni_nec_isa_accel",
+ .attach = ni_nec_isa_attach,
+ .detach = ni_isa_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command_unaccel,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response = tnt4882_serial_poll_response,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
};
#ifdef GPIB_PCMCIA
-gpib_interface_t ni_pcmcia_interface = {
-name: "ni_pcmcia",
-attach : ni_pcmcia_attach,
-detach : ni_pcmcia_detach,
-read : tnt4882_accel_read,
-write : tnt4882_accel_write,
-command : tnt4882_command,
-take_control : tnt4882_take_control,
-go_to_standby : tnt4882_go_to_standby,
-request_system_control : tnt4882_request_system_control,
-interface_clear : tnt4882_interface_clear,
-remote_enable : tnt4882_remote_enable,
-enable_eos : tnt4882_enable_eos,
-disable_eos : tnt4882_disable_eos,
-parallel_poll : tnt4882_parallel_poll,
-parallel_poll_configure : tnt4882_parallel_poll_configure,
-parallel_poll_response : tnt4882_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : tnt4882_line_status,
-update_status : tnt4882_update_status,
-primary_address : tnt4882_primary_address,
-secondary_address : tnt4882_secondary_address,
-serial_poll_response : tnt4882_serial_poll_response,
-serial_poll_status : tnt4882_serial_poll_status,
-t1_delay : tnt4882_t1_delay,
-return_to_local : tnt4882_return_to_local,
+static gpib_interface_t ni_pcmcia_interface = {
+ .name = "ni_pcmcia",
+ .attach = ni_pcmcia_attach,
+ .detach = ni_pcmcia_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response = tnt4882_serial_poll_response,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
};
-gpib_interface_t ni_pcmcia_accel_interface = {
-name: "ni_pcmcia_accel",
-attach : ni_pcmcia_attach,
-detach : ni_pcmcia_detach,
-read : tnt4882_accel_read,
-write : tnt4882_accel_write,
-command : tnt4882_command,
-take_control : tnt4882_take_control,
-go_to_standby : tnt4882_go_to_standby,
-request_system_control : tnt4882_request_system_control,
-interface_clear : tnt4882_interface_clear,
-remote_enable : tnt4882_remote_enable,
-enable_eos : tnt4882_enable_eos,
-disable_eos : tnt4882_disable_eos,
-parallel_poll : tnt4882_parallel_poll,
-parallel_poll_configure : tnt4882_parallel_poll_configure,
-parallel_poll_response : tnt4882_parallel_poll_response,
-local_parallel_poll_mode : NULL, // XXX
-line_status : tnt4882_line_status,
-update_status : tnt4882_update_status,
-primary_address : tnt4882_primary_address,
-secondary_address : tnt4882_secondary_address,
-serial_poll_response : tnt4882_serial_poll_response,
-serial_poll_status : tnt4882_serial_poll_status,
-t1_delay : tnt4882_t1_delay,
-return_to_local : tnt4882_return_to_local,
+static gpib_interface_t ni_pcmcia_accel_interface = {
+ .name = "ni_pcmcia_accel",
+ .attach = ni_pcmcia_attach,
+ .detach = ni_pcmcia_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response = tnt4882_serial_poll_response,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
};
#endif
@@ -1516,29 +1516,109 @@ static int __init tnt4882_init_module(void)
result = pci_register_driver(&tnt4882_pci_driver);
if (result) {
- pr_err("tnt4882: pci_driver_register failed!\n");
+ pr_err("tnt4882_gpib: pci_register_driver failed: error = %d\n", result);
return result;
}
- gpib_register_driver(&ni_isa_interface, THIS_MODULE);
- gpib_register_driver(&ni_isa_accel_interface, THIS_MODULE);
- gpib_register_driver(&ni_nat4882_isa_interface, THIS_MODULE);
- gpib_register_driver(&ni_nat4882_isa_accel_interface, THIS_MODULE);
- gpib_register_driver(&ni_nec_isa_interface, THIS_MODULE);
- gpib_register_driver(&ni_nec_isa_accel_interface, THIS_MODULE);
- gpib_register_driver(&ni_pci_interface, THIS_MODULE);
- gpib_register_driver(&ni_pci_accel_interface, THIS_MODULE);
+ result = gpib_register_driver(&ni_isa_interface, THIS_MODULE);
+ if (result) {
+ pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_isa;
+ }
+
+ result = gpib_register_driver(&ni_isa_accel_interface, THIS_MODULE);
+ if (result) {
+ pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_isa_accel;
+ }
+
+ result = gpib_register_driver(&ni_nat4882_isa_interface, THIS_MODULE);
+ if (result) {
+ pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_nat4882_isa;
+ }
+
+ result = gpib_register_driver(&ni_nat4882_isa_accel_interface, THIS_MODULE);
+ if (result) {
+ pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_nat4882_isa_accel;
+ }
+
+ result = gpib_register_driver(&ni_nec_isa_interface, THIS_MODULE);
+ if (result) {
+ pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_nec_isa;
+ }
+
+ result = gpib_register_driver(&ni_nec_isa_accel_interface, THIS_MODULE);
+ if (result) {
+ pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_nec_isa_accel;
+ }
+
+ result = gpib_register_driver(&ni_pci_interface, THIS_MODULE);
+ if (result) {
+ pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_pci;
+ }
+
+ result = gpib_register_driver(&ni_pci_accel_interface, THIS_MODULE);
+ if (result) {
+ pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_pci_accel;
+ }
+
#ifdef GPIB_PCMCIA
- gpib_register_driver(&ni_pcmcia_interface, THIS_MODULE);
- gpib_register_driver(&ni_pcmcia_accel_interface, THIS_MODULE);
- if (init_ni_gpib_cs() < 0)
- return -1;
+ result = gpib_register_driver(&ni_pcmcia_interface, THIS_MODULE);
+ if (result) {
+ pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_pcmcia;
+ }
+
+ result = gpib_register_driver(&ni_pcmcia_accel_interface, THIS_MODULE);
+ if (result) {
+ pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ goto err_pcmcia_accel;
+ }
+
+ result = init_ni_gpib_cs();
+ if (result) {
+ pr_err("tnt4882_gpib: pcmcia_register_driver failed: error = %d\n", result);
+ goto err_pcmcia_driver;
+ }
#endif
mite_init();
mite_list_devices();
return 0;
+
+#ifdef GPIB_PCMCIA
+err_pcmcia_driver:
+ gpib_unregister_driver(&ni_pcmcia_accel_interface);
+err_pcmcia_accel:
+ gpib_unregister_driver(&ni_pcmcia_interface);
+err_pcmcia:
+#endif
+ gpib_unregister_driver(&ni_pci_accel_interface);
+err_pci_accel:
+ gpib_unregister_driver(&ni_pci_interface);
+err_pci:
+ gpib_unregister_driver(&ni_nec_isa_accel_interface);
+err_nec_isa_accel:
+ gpib_unregister_driver(&ni_nec_isa_interface);
+err_nec_isa:
+ gpib_unregister_driver(&ni_nat4882_isa_accel_interface);
+err_nat4882_isa_accel:
+ gpib_unregister_driver(&ni_nat4882_isa_interface);
+err_nat4882_isa:
+ gpib_unregister_driver(&ni_isa_accel_interface);
+err_isa_accel:
+ gpib_unregister_driver(&ni_isa_interface);
+err_isa:
+ pci_unregister_driver(&tnt4882_pci_driver);
+
+ return result;
}
static void __exit tnt4882_exit_module(void)
@@ -1816,8 +1896,8 @@ int ni_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
nec_priv->offset = atgpib_reg_offset;
DEBUG(0, "ioport1 window attributes: 0x%lx\n", curr_dev->resource[0]->flags);
- if (request_region(curr_dev->resource[0]->start, resource_size(curr_dev->resource[0]),
- "tnt4882") == 0) {
+ if (!request_region(curr_dev->resource[0]->start, resource_size(curr_dev->resource[0]),
+ "tnt4882")) {
pr_err("gpib: ioports starting at 0x%lx are already in use\n",
(unsigned long)curr_dev->resource[0]->start);
return -EIO;
diff --git a/drivers/staging/greybus/camera.c b/drivers/staging/greybus/camera.c
index ca71023df447..5d80ace41d8e 100644
--- a/drivers/staging/greybus/camera.c
+++ b/drivers/staging/greybus/camera.c
@@ -1128,18 +1128,7 @@ done:
static int gb_camera_debugfs_open(struct inode *inode, struct file *file)
{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(gb_camera_debugfs_entries); ++i) {
- const struct gb_camera_debugfs_entry *entry =
- &gb_camera_debugfs_entries[i];
-
- if (!strcmp(file->f_path.dentry->d_iname, entry->name)) {
- file->private_data = (void *)entry;
- break;
- }
- }
-
+ file->private_data = (void *)debugfs_get_aux(file);
return 0;
}
@@ -1175,8 +1164,8 @@ static int gb_camera_debugfs_init(struct gb_camera *gcam)
gcam->debugfs.buffers[i].length = 0;
- debugfs_create_file(entry->name, entry->mask,
- gcam->debugfs.root, gcam,
+ debugfs_create_file_aux(entry->name, entry->mask,
+ gcam->debugfs.root, gcam, entry,
&gb_camera_debugfs_ops);
}
diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
index 07ed33464d71..224ca8d42721 100644
--- a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
+++ b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
@@ -624,10 +624,10 @@ static int alloc_private_pages(struct hmm_buffer_object *bo)
const gfp_t gfp = __GFP_NOWARN | __GFP_RECLAIM | __GFP_FS;
int ret;
- ret = alloc_pages_bulk_array(gfp, bo->pgnr, bo->pages);
+ ret = alloc_pages_bulk(gfp, bo->pgnr, bo->pages);
if (ret != bo->pgnr) {
free_pages_bulk_array(ret, bo->pages);
- dev_err(atomisp_dev, "alloc_pages_bulk_array() failed\n");
+ dev_err(atomisp_dev, "alloc_pages_bulk() failed\n");
return -ENOMEM;
}
diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c
index 118bff988bc7..bb28daa4d713 100644
--- a/drivers/staging/media/imx/imx-media-of.c
+++ b/drivers/staging/media/imx/imx-media-of.c
@@ -54,22 +54,18 @@ int imx_media_add_of_subdevs(struct imx_media_dev *imxmd,
break;
ret = imx_media_of_add_csi(imxmd, csi_np);
+ of_node_put(csi_np);
if (ret) {
/* unavailable or already added is not an error */
if (ret == -ENODEV || ret == -EEXIST) {
- of_node_put(csi_np);
continue;
}
/* other error, can't continue */
- goto err_out;
+ return ret;
}
}
return 0;
-
-err_out:
- of_node_put(csi_np);
- return ret;
}
EXPORT_SYMBOL_GPL(imx_media_add_of_subdevs);
diff --git a/drivers/staging/media/max96712/max96712.c b/drivers/staging/media/max96712/max96712.c
index ede02e8c891c..0751b2e04895 100644
--- a/drivers/staging/media/max96712/max96712.c
+++ b/drivers/staging/media/max96712/max96712.c
@@ -418,7 +418,6 @@ static int max96712_probe(struct i2c_client *client)
priv->info = of_device_get_match_data(&client->dev);
priv->client = client;
- i2c_set_clientdata(client, priv);
priv->regmap = devm_regmap_init_i2c(client, &max96712_i2c_regmap);
if (IS_ERR(priv->regmap))
@@ -448,7 +447,8 @@ static int max96712_probe(struct i2c_client *client)
static void max96712_remove(struct i2c_client *client)
{
- struct max96712_priv *priv = i2c_get_clientdata(client);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct max96712_priv *priv = container_of(sd, struct max96712_priv, sd);
v4l2_async_unregister_subdev(&priv->sd);
diff --git a/drivers/staging/rtl8723bs/Makefile b/drivers/staging/rtl8723bs/Makefile
index ba200ee669f3..8560b84a3146 100644
--- a/drivers/staging/rtl8723bs/Makefile
+++ b/drivers/staging/rtl8723bs/Makefile
@@ -48,7 +48,6 @@ r8723bs-y = \
hal/HalHWImg8723B_RF.o \
hal/HalPhyRf_8723B.o \
os_dep/ioctl_cfg80211.o \
- os_dep/ioctl_linux.o \
os_dep/mlme_linux.o \
os_dep/osdep_service.o \
os_dep/os_intfs.o \
diff --git a/drivers/staging/rtl8723bs/core/rtw_io.c b/drivers/staging/rtl8723bs/core/rtw_io.c
index fcda9db6ebb5..79d543d88278 100644
--- a/drivers/staging/rtl8723bs/core/rtw_io.c
+++ b/drivers/staging/rtl8723bs/core/rtw_io.c
@@ -141,12 +141,12 @@ int rtw_init_io_priv(struct adapter *padapter, void (*set_intf_ops)(struct adapt
*/
int rtw_inc_and_chk_continual_io_error(struct dvobj_priv *dvobj)
{
- int ret = false;
- int value = atomic_inc_return(&dvobj->continual_io_error);
- if (value > MAX_CONTINUAL_IO_ERR)
- ret = true;
+ int error_count = atomic_inc_return(&dvobj->continual_io_error);
- return ret;
+ if (error_count > MAX_CONTINUAL_IO_ERR)
+ return true;
+
+ return false;
}
/*
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 317f3db19397..952ce6dd5af9 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -4959,7 +4959,6 @@ void _linked_info_dump(struct adapter *padapter)
rtw_hal_get_def_var(padapter, HW_DEF_RA_INFO_DUMP, &i);
}
}
- rtw_hal_set_def_var(padapter, HAL_DEF_DBG_RX_INFO_DUMP, NULL);
}
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index 699cff7b0ac9..297c93d65315 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -1467,7 +1467,8 @@ struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
pxmitbuf->priv_data = NULL;
pxmitbuf->len = 0;
- pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead;
+ pxmitbuf->pdata = pxmitbuf->phead;
+ pxmitbuf->ptail = pxmitbuf->phead;
pxmitbuf->agg_num = 1;
if (pxmitbuf->sctx)
@@ -1526,7 +1527,8 @@ struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv)
pxmitbuf->priv_data = NULL;
pxmitbuf->len = 0;
- pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead;
+ pxmitbuf->pdata = pxmitbuf->phead;
+ pxmitbuf->ptail = pxmitbuf->phead;
pxmitbuf->agg_num = 0;
pxmitbuf->pg_num = 0;
diff --git a/drivers/staging/rtl8723bs/hal/hal_com.c b/drivers/staging/rtl8723bs/hal/hal_com.c
index 95fb38283c58..b41ec89932af 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com.c
@@ -682,14 +682,6 @@ u8 SetHalDefVar(
u8 bResult = _SUCCESS;
switch (variable) {
- case HAL_DEF_DBG_RX_INFO_DUMP:
-
- if (odm->bLinked) {
- #ifdef DBG_RX_SIGNAL_DISPLAY_RAW_DATA
- rtw_dump_raw_rssi_info(adapter);
- #endif
- }
- break;
case HW_DEF_ODM_DBG_FLAG:
ODM_CmnInfoUpdate(odm, ODM_CMNINFO_DBG_COMP, *((u64 *)value));
break;
@@ -879,53 +871,6 @@ void rtw_hal_check_rxfifo_full(struct adapter *adapter)
}
}
-#ifdef DBG_RX_SIGNAL_DISPLAY_RAW_DATA
-void rtw_dump_raw_rssi_info(struct adapter *padapter)
-{
- u8 isCCKrate, rf_path;
- struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
- struct rx_raw_rssi *psample_pkt_rssi = &padapter->recvpriv.raw_rssi_info;
-
- isCCKrate = psample_pkt_rssi->data_rate <= DESC_RATE11M;
-
- if (isCCKrate)
- psample_pkt_rssi->mimo_signal_strength[0] = psample_pkt_rssi->pwdball;
-
- for (rf_path = 0; rf_path < pHalData->NumTotalRFPath; rf_path++) {
- if (!isCCKrate) {
- netdev_dbg(padapter->pnetdev, ", rx_ofdm_pwr:%d(dBm), rx_ofdm_snr:%d(dB)\n",
- psample_pkt_rssi->ofdm_pwr[rf_path],
- psample_pkt_rssi->ofdm_snr[rf_path]);
- }
- }
-}
-
-void rtw_store_phy_info(struct adapter *padapter, union recv_frame *prframe)
-{
- u8 isCCKrate, rf_path;
- struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
- struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
-
- struct odm_phy_info *pPhyInfo = (PODM_PHY_INFO_T)(&pattrib->phy_info);
- struct rx_raw_rssi *psample_pkt_rssi = &padapter->recvpriv.raw_rssi_info;
-
- psample_pkt_rssi->data_rate = pattrib->data_rate;
- isCCKrate = pattrib->data_rate <= DESC_RATE11M;
-
- psample_pkt_rssi->pwdball = pPhyInfo->rx_pwd_ba11;
- psample_pkt_rssi->pwr_all = pPhyInfo->recv_signal_power;
-
- for (rf_path = 0; rf_path < pHalData->NumTotalRFPath; rf_path++) {
- psample_pkt_rssi->mimo_signal_strength[rf_path] = pPhyInfo->rx_mimo_signal_strength[rf_path];
- psample_pkt_rssi->mimo_signal_quality[rf_path] = pPhyInfo->rx_mimo_signal_quality[rf_path];
- if (!isCCKrate) {
- psample_pkt_rssi->ofdm_pwr[rf_path] = pPhyInfo->RxPwr[rf_path];
- psample_pkt_rssi->ofdm_snr[rf_path] = pPhyInfo->RxSNR[rf_path];
- }
- }
-}
-#endif
-
static u32 Array_kfreemap[] = {
0xf8, 0xe,
0xf6, 0xc,
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c b/drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c
index 717faebf8aca..db3d7d72bffa 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c
@@ -67,8 +67,4 @@ void rtl8723b_process_phy_info(struct adapter *padapter, void *prframe)
/* Check EVM */
/* */
process_link_qual(padapter, precvframe);
- #ifdef DBG_RX_SIGNAL_DISPLAY_RAW_DATA
- rtw_store_phy_info(padapter, prframe);
- #endif
-
}
diff --git a/drivers/staging/rtl8723bs/include/hal_com.h b/drivers/staging/rtl8723bs/include/hal_com.h
index 4db93484725f..258a74076dd9 100644
--- a/drivers/staging/rtl8723bs/include/hal_com.h
+++ b/drivers/staging/rtl8723bs/include/hal_com.h
@@ -149,11 +149,6 @@ bool eqNByte(u8 *str1, u8 *str2, u32 num);
bool GetU1ByteIntegerFromStringInDecimal(char *str, u8 *in);
-#ifdef DBG_RX_SIGNAL_DISPLAY_RAW_DATA
-void rtw_store_phy_info(struct adapter *padapter, union recv_frame *prframe);
-void rtw_dump_raw_rssi_info(struct adapter *padapter);
-#endif
-
#define HWSET_MAX_SIZE 512
void rtw_bb_rf_gain_offset(struct adapter *padapter);
diff --git a/drivers/staging/rtl8723bs/include/hal_intf.h b/drivers/staging/rtl8723bs/include/hal_intf.h
index 282e141616b0..85de862823c2 100644
--- a/drivers/staging/rtl8723bs/include/hal_intf.h
+++ b/drivers/staging/rtl8723bs/include/hal_intf.h
@@ -144,7 +144,6 @@ enum hal_def_variable {
HAL_DEF_PCI_AMD_L1_SUPPORT,
HAL_DEF_PCI_ASPM_OSC, /* Support for ASPM OSC, added by Roger, 2013.03.27. */
HAL_DEF_MACID_SLEEP, /* Support for MACID sleep */
- HAL_DEF_DBG_RX_INFO_DUMP,
};
enum hal_odm_variable {
diff --git a/drivers/staging/rtl8723bs/include/osdep_intf.h b/drivers/staging/rtl8723bs/include/osdep_intf.h
index 215ece612f71..73199be78139 100644
--- a/drivers/staging/rtl8723bs/include/osdep_intf.h
+++ b/drivers/staging/rtl8723bs/include/osdep_intf.h
@@ -47,8 +47,6 @@ u32 rtw_start_drv_threads(struct adapter *padapter);
void rtw_stop_drv_threads(struct adapter *padapter);
void rtw_cancel_all_timer(struct adapter *padapter);
-int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-
int rtw_init_netdev_name(struct net_device *pnetdev, const char *ifname);
struct net_device *rtw_init_netdev(struct adapter *padapter);
void rtw_unregister_netdevs(struct dvobj_priv *dvobj);
diff --git a/drivers/staging/rtl8723bs/include/rtw_recv.h b/drivers/staging/rtl8723bs/include/rtw_recv.h
index 18dd1464e0c2..aa9f9d5ecd01 100644
--- a/drivers/staging/rtl8723bs/include/rtw_recv.h
+++ b/drivers/staging/rtl8723bs/include/rtw_recv.h
@@ -89,21 +89,6 @@ struct phy_info {
u8 btCoexPwrAdjust;
};
-#ifdef DBG_RX_SIGNAL_DISPLAY_RAW_DATA
-struct rx_raw_rssi {
- u8 data_rate;
- u8 pwdball;
- s8 pwr_all;
-
- u8 mimo_signal_strength[4];/* in 0~100 index */
- u8 mimo_signal_quality[4];
-
- s8 ofdm_pwr[4];
- u8 ofdm_snr[4];
-
-};
-#endif
-
struct rx_pkt_attrib {
u16 pkt_len;
u8 physt;
@@ -221,9 +206,6 @@ struct recv_priv {
u8 signal_strength;
u8 signal_qual;
s8 rssi; /* translate_percentage_to_dbm(ptarget_wlan->network.PhyInfo.SignalStrength); */
- #ifdef DBG_RX_SIGNAL_DISPLAY_RAW_DATA
- struct rx_raw_rssi raw_rssi_info;
- #endif
/* s8 rxpwdb; */
s16 noise;
/* int RxSNRdB[2]; */
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
deleted file mode 100644
index 793b051536f3..000000000000
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ /dev/null
@@ -1,1286 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-
-#include <linux/etherdevice.h>
-#include <drv_types.h>
-#include <rtw_mp.h>
-#include <hal_btcoex.h>
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
-
-#define RTL_IOCTL_WPA_SUPPLICANT (SIOCIWFIRSTPRIV + 30)
-
-static int wpa_set_auth_algs(struct net_device *dev, u32 value)
-{
- struct adapter *padapter = rtw_netdev_priv(dev);
- int ret = 0;
-
- if ((value & IW_AUTH_ALG_SHARED_KEY) && (value & IW_AUTH_ALG_OPEN_SYSTEM)) {
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeAutoSwitch;
- padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Auto;
- } else if (value & IW_AUTH_ALG_SHARED_KEY) {
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
-
- padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeShared;
- padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Shared;
- } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
- /* padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled; */
- if (padapter->securitypriv.ndisauthtype < Ndis802_11AuthModeWPAPSK) {
- padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
- padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
- }
- } else {
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
-{
- int ret = 0;
- u8 max_idx;
- u32 wep_key_idx, wep_key_len, wep_total_len;
- struct ndis_802_11_wep *pwep = NULL;
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
-
- param->u.crypt.err = 0;
- param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
-
- if (param_len < (u32)((u8 *)param->u.crypt.key - (u8 *)param) + param->u.crypt.key_len) {
- ret = -EINVAL;
- goto exit;
- }
-
- if (param->sta_addr[0] != 0xff || param->sta_addr[1] != 0xff ||
- param->sta_addr[2] != 0xff || param->sta_addr[3] != 0xff ||
- param->sta_addr[4] != 0xff || param->sta_addr[5] != 0xff) {
- ret = -EINVAL;
- goto exit;
- }
-
- if (strcmp(param->u.crypt.alg, "WEP") == 0)
- max_idx = WEP_KEYS - 1;
- else
- max_idx = BIP_MAX_KEYID;
-
- if (param->u.crypt.idx > max_idx) {
- netdev_err(dev, "Error crypt.idx %d > %d\n", param->u.crypt.idx, max_idx);
- ret = -EINVAL;
- goto exit;
- }
-
- if (strcmp(param->u.crypt.alg, "WEP") == 0) {
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
- padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
-
- wep_key_idx = param->u.crypt.idx;
- wep_key_len = param->u.crypt.key_len;
-
- if (wep_key_len > 0) {
- wep_key_len = wep_key_len <= 5 ? 5 : 13;
- wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, key_material);
- /* Allocate a full structure to avoid potentially running off the end. */
- pwep = kzalloc(sizeof(*pwep), GFP_KERNEL);
- if (!pwep) {
- ret = -ENOMEM;
- goto exit;
- }
-
- pwep->key_length = wep_key_len;
- pwep->length = wep_total_len;
-
- if (wep_key_len == 13) {
- padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
- padapter->securitypriv.dot118021XGrpPrivacy = _WEP104_;
- }
- } else {
- ret = -EINVAL;
- goto exit;
- }
-
- pwep->key_index = wep_key_idx;
- pwep->key_index |= 0x80000000;
-
- memcpy(pwep->key_material, param->u.crypt.key, pwep->key_length);
-
- if (param->u.crypt.set_tx) {
- if (rtw_set_802_11_add_wep(padapter, pwep) == (u8)_FAIL)
- ret = -EOPNOTSUPP;
- } else {
- /* don't update "psecuritypriv->dot11PrivacyAlgrthm" and */
- /* psecuritypriv->dot11PrivacyKeyIndex =keyid", but can rtw_set_key to fw/cam */
-
- if (wep_key_idx >= WEP_KEYS) {
- ret = -EOPNOTSUPP;
- goto exit;
- }
-
- memcpy(&psecuritypriv->dot11DefKey[wep_key_idx].skey[0], pwep->key_material, pwep->key_length);
- psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->key_length;
- rtw_set_key(padapter, psecuritypriv, wep_key_idx, 0, true);
- }
-
- goto exit;
- }
-
- if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) { /* 802_1x */
- struct sta_info *psta, *pbcmc_sta;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_MP_STATE) == true) { /* sta mode */
- psta = rtw_get_stainfo(pstapriv, get_bssid(pmlmepriv));
- if (psta) {
- /* Jeff: don't disable ieee8021x_blocked while clearing key */
- if (strcmp(param->u.crypt.alg, "none") != 0)
- psta->ieee8021x_blocked = false;
-
- if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) ||
- (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) {
- psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
- }
-
- if (param->u.crypt.set_tx == 1) { /* pairwise key */
- memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
-
- if (strcmp(param->u.crypt.alg, "TKIP") == 0) { /* set mic key */
- memcpy(psta->dot11tkiptxmickey.skey, &param->u.crypt.key[16], 8);
- memcpy(psta->dot11tkiprxmickey.skey, &param->u.crypt.key[24], 8);
-
- padapter->securitypriv.busetkipkey = false;
- /* _set_timer(&padapter->securitypriv.tkip_timer, 50); */
- }
-
- rtw_setstakey_cmd(padapter, psta, true, true);
- } else { /* group key */
- if (strcmp(param->u.crypt.alg, "TKIP") == 0 || strcmp(param->u.crypt.alg, "CCMP") == 0) {
- memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
- /* only TKIP group key need to install this */
- if (param->u.crypt.key_len > 16) {
- memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[16], 8);
- memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[24], 8);
- }
- padapter->securitypriv.binstallGrpkey = true;
-
- padapter->securitypriv.dot118021XGrpKeyid = param->u.crypt.idx;
-
- rtw_set_key(padapter, &padapter->securitypriv, param->u.crypt.idx, 1, true);
- } else if (strcmp(param->u.crypt.alg, "BIP") == 0) {
- /* save the IGTK key, length 16 bytes */
- memcpy(padapter->securitypriv.dot11wBIPKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
- padapter->securitypriv.dot11wBIPKeyid = param->u.crypt.idx;
- padapter->securitypriv.binstallBIPkey = true;
- }
- }
- }
-
- pbcmc_sta = rtw_get_bcmc_stainfo(padapter);
- if (pbcmc_sta) {
- /* Jeff: don't disable ieee8021x_blocked while clearing key */
- if (strcmp(param->u.crypt.alg, "none") != 0)
- pbcmc_sta->ieee8021x_blocked = false;
-
- if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) ||
- (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) {
- pbcmc_sta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
- }
- }
- } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
- /* adhoc mode */
- }
- }
-
-exit:
-
- kfree(pwep);
- return ret;
-}
-
-static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ielen)
-{
- u8 *buf = NULL;
- int group_cipher = 0, pairwise_cipher = 0;
- int ret = 0;
- u8 null_addr[] = {0, 0, 0, 0, 0, 0};
-
- if (ielen > MAX_WPA_IE_LEN || !pie) {
- _clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
- if (!pie)
- return ret;
- else
- return -EINVAL;
- }
-
- if (ielen) {
- buf = rtw_zmalloc(ielen);
- if (!buf) {
- ret = -ENOMEM;
- goto exit;
- }
-
- memcpy(buf, pie, ielen);
-
- if (ielen < RSN_HEADER_LEN) {
- ret = -1;
- goto exit;
- }
-
- if (rtw_parse_wpa_ie(buf, ielen, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
- padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
- padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPAPSK;
- memcpy(padapter->securitypriv.supplicant_ie, &buf[0], ielen);
- }
-
- if (rtw_parse_wpa2_ie(buf, ielen, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
- padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
- padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPA2PSK;
- memcpy(padapter->securitypriv.supplicant_ie, &buf[0], ielen);
- }
-
- if (group_cipher == 0)
- group_cipher = WPA_CIPHER_NONE;
- if (pairwise_cipher == 0)
- pairwise_cipher = WPA_CIPHER_NONE;
-
- switch (group_cipher) {
- case WPA_CIPHER_NONE:
- padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
- break;
- case WPA_CIPHER_WEP40:
- padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- break;
- case WPA_CIPHER_TKIP:
- padapter->securitypriv.dot118021XGrpPrivacy = _TKIP_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
- break;
- case WPA_CIPHER_CCMP:
- padapter->securitypriv.dot118021XGrpPrivacy = _AES_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
- break;
- case WPA_CIPHER_WEP104:
- padapter->securitypriv.dot118021XGrpPrivacy = _WEP104_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- break;
- }
-
- switch (pairwise_cipher) {
- case WPA_CIPHER_NONE:
- padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
- break;
- case WPA_CIPHER_WEP40:
- padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- break;
- case WPA_CIPHER_TKIP:
- padapter->securitypriv.dot11PrivacyAlgrthm = _TKIP_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
- break;
- case WPA_CIPHER_CCMP:
- padapter->securitypriv.dot11PrivacyAlgrthm = _AES_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
- break;
- case WPA_CIPHER_WEP104:
- padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- break;
- }
-
- _clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
- {/* set wps_ie */
- u16 cnt = 0;
- u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
-
- while (cnt < ielen) {
- eid = buf[cnt];
-
- if ((eid == WLAN_EID_VENDOR_SPECIFIC) && (!memcmp(&buf[cnt + 2], wps_oui, 4))) {
- padapter->securitypriv.wps_ie_len = ((buf[cnt + 1] + 2) < MAX_WPS_IE_LEN) ? (buf[cnt + 1] + 2) : MAX_WPS_IE_LEN;
-
- memcpy(padapter->securitypriv.wps_ie, &buf[cnt], padapter->securitypriv.wps_ie_len);
-
- set_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS);
-
- cnt += buf[cnt + 1] + 2;
-
- break;
- } else {
- cnt += buf[cnt + 1] + 2; /* goto next */
- }
- }
- }
- }
-
- /* TKIP and AES disallow multicast packets until installing group key */
- if (padapter->securitypriv.dot11PrivacyAlgrthm == _TKIP_ ||
- padapter->securitypriv.dot11PrivacyAlgrthm == _TKIP_WTMIC_ ||
- padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)
- /* WPS open need to enable multicast */
- /* check_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS) == true) */
- rtw_hal_set_hwreg(padapter, HW_VAR_OFF_RCR_AM, null_addr);
-
-exit:
-
- kfree(buf);
-
- return ret;
-}
-
-static int wpa_set_param(struct net_device *dev, u8 name, u32 value)
-{
- uint ret = 0;
- struct adapter *padapter = rtw_netdev_priv(dev);
-
- switch (name) {
- case IEEE_PARAM_WPA_ENABLED:
-
- padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X; /* 802.1x */
-
- /* ret = ieee80211_wpa_enable(ieee, value); */
-
- switch ((value) & 0xff) {
- case 1: /* WPA */
- padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPAPSK; /* WPA_PSK */
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
- break;
- case 2: /* WPA2 */
- padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPA2PSK; /* WPA2_PSK */
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
- break;
- }
-
- break;
-
- case IEEE_PARAM_TKIP_COUNTERMEASURES:
- /* ieee->tkip_countermeasures =value; */
- break;
-
- case IEEE_PARAM_DROP_UNENCRYPTED:
- {
- /* HACK:
- *
- * wpa_supplicant calls set_wpa_enabled when the driver
- * is loaded and unloaded, regardless of if WPA is being
- * used. No other calls are made which can be used to
- * determine if encryption will be used or not prior to
- * association being expected. If encryption is not being
- * used, drop_unencrypted is set to false, else true -- we
- * can use this to determine if the CAP_PRIVACY_ON bit should
- * be set.
- */
- break;
- }
- case IEEE_PARAM_PRIVACY_INVOKED:
-
- /* ieee->privacy_invoked =value; */
-
- break;
-
- case IEEE_PARAM_AUTH_ALGS:
-
- ret = wpa_set_auth_algs(dev, value);
-
- break;
-
- case IEEE_PARAM_IEEE_802_1X:
-
- /* ieee->ieee802_1x =value; */
-
- break;
-
- case IEEE_PARAM_WPAX_SELECT:
-
- /* added for WPA2 mixed mode */
- /*
- spin_lock_irqsave(&ieee->wpax_suitlist_lock, flags);
- ieee->wpax_type_set = 1;
- ieee->wpax_type_notify = value;
- spin_unlock_irqrestore(&ieee->wpax_suitlist_lock, flags);
- */
-
- break;
-
- default:
-
- ret = -EOPNOTSUPP;
-
- break;
- }
-
- return ret;
-}
-
-static int wpa_mlme(struct net_device *dev, u32 command, u32 reason)
-{
- int ret = 0;
- struct adapter *padapter = rtw_netdev_priv(dev);
-
- switch (command) {
- case IEEE_MLME_STA_DEAUTH:
-
- if (!rtw_set_802_11_disassociate(padapter))
- ret = -1;
-
- break;
-
- case IEEE_MLME_STA_DISASSOC:
-
- if (!rtw_set_802_11_disassociate(padapter))
- ret = -1;
-
- break;
-
- default:
- ret = -EOPNOTSUPP;
- break;
- }
-
- return ret;
-}
-
-static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
-{
- struct ieee_param *param;
- uint ret = 0;
-
- /* down(&ieee->wx_sem); */
-
- if (!p->pointer || p->length != sizeof(struct ieee_param))
- return -EINVAL;
-
- param = rtw_malloc(p->length);
- if (!param)
- return -ENOMEM;
-
- if (copy_from_user(param, p->pointer, p->length)) {
- kfree(param);
- return -EFAULT;
- }
-
- switch (param->cmd) {
- case IEEE_CMD_SET_WPA_PARAM:
- ret = wpa_set_param(dev, param->u.wpa_param.name, param->u.wpa_param.value);
- break;
-
- case IEEE_CMD_SET_WPA_IE:
- /* ret = wpa_set_wpa_ie(dev, param, p->length); */
- ret = rtw_set_wpa_ie(rtw_netdev_priv(dev), (char *)param->u.wpa_ie.data, (u16)param->u.wpa_ie.len);
- break;
-
- case IEEE_CMD_SET_ENCRYPTION:
- ret = wpa_set_encryption(dev, param, p->length);
- break;
-
- case IEEE_CMD_MLME:
- ret = wpa_mlme(dev, param->u.mlme.command, param->u.mlme.reason_code);
- break;
-
- default:
- ret = -EOPNOTSUPP;
- break;
- }
-
- if (ret == 0 && copy_to_user(p->pointer, param, p->length))
- ret = -EFAULT;
-
- kfree(param);
-
- /* up(&ieee->wx_sem); */
- return ret;
-}
-
-static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
-{
- int ret = 0;
- u32 wep_key_idx, wep_key_len, wep_total_len;
- struct ndis_802_11_wep *pwep = NULL;
- struct sta_info *psta = NULL, *pbcmc_sta = NULL;
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
- char *txkey = padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey;
- char *rxkey = padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey;
- char *grpkey = psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey;
-
- param->u.crypt.err = 0;
- param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
-
- /* sizeof(struct ieee_param) = 64 bytes; */
- /* if (param_len != (u32) ((u8 *) param->u.crypt.key - (u8 *) param) + param->u.crypt.key_len) */
- if (param_len != sizeof(struct ieee_param) + param->u.crypt.key_len) {
- ret = -EINVAL;
- goto exit;
- }
-
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
- if (param->u.crypt.idx >= WEP_KEYS) {
- ret = -EINVAL;
- goto exit;
- }
- } else {
- psta = rtw_get_stainfo(pstapriv, param->sta_addr);
- if (!psta)
- /* ret = -EINVAL; */
- goto exit;
- }
-
- if (strcmp(param->u.crypt.alg, "none") == 0 && !psta) {
- /* todo:clear default encryption keys */
-
- psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
- psecuritypriv->ndisencryptstatus = Ndis802_11EncryptionDisabled;
- psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
- psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
-
- goto exit;
- }
-
- if (strcmp(param->u.crypt.alg, "WEP") == 0 && !psta) {
- wep_key_idx = param->u.crypt.idx;
- wep_key_len = param->u.crypt.key_len;
-
- if ((wep_key_idx >= WEP_KEYS) || (wep_key_len <= 0)) {
- ret = -EINVAL;
- goto exit;
- }
-
- if (wep_key_len > 0) {
- wep_key_len = wep_key_len <= 5 ? 5 : 13;
- wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, key_material);
- /* Allocate a full structure to avoid potentially running off the end. */
- pwep = kzalloc(sizeof(*pwep), GFP_KERNEL);
- if (!pwep)
- goto exit;
-
- pwep->key_length = wep_key_len;
- pwep->length = wep_total_len;
- }
-
- pwep->key_index = wep_key_idx;
-
- memcpy(pwep->key_material, param->u.crypt.key, pwep->key_length);
-
- if (param->u.crypt.set_tx) {
- psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Auto;
- psecuritypriv->ndisencryptstatus = Ndis802_11Encryption1Enabled;
- psecuritypriv->dot11PrivacyAlgrthm = _WEP40_;
- psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
-
- if (pwep->key_length == 13) {
- psecuritypriv->dot11PrivacyAlgrthm = _WEP104_;
- psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
- }
-
- psecuritypriv->dot11PrivacyKeyIndex = wep_key_idx;
-
- memcpy(&psecuritypriv->dot11DefKey[wep_key_idx].skey[0], pwep->key_material, pwep->key_length);
-
- psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->key_length;
-
- rtw_ap_set_wep_key(padapter, pwep->key_material, pwep->key_length, wep_key_idx, 1);
- } else {
- /* don't update "psecuritypriv->dot11PrivacyAlgrthm" and */
- /* psecuritypriv->dot11PrivacyKeyIndex =keyid", but can rtw_set_key to cam */
-
- memcpy(&psecuritypriv->dot11DefKey[wep_key_idx].skey[0], pwep->key_material, pwep->key_length);
-
- psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->key_length;
-
- rtw_ap_set_wep_key(padapter, pwep->key_material, pwep->key_length, wep_key_idx, 0);
- }
-
- goto exit;
- }
-
- if (!psta && check_fwstate(pmlmepriv, WIFI_AP_STATE)) { /* group key */
- if (param->u.crypt.set_tx == 1) {
- if (strcmp(param->u.crypt.alg, "WEP") == 0) {
- memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
-
- psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
- if (param->u.crypt.key_len == 13)
- psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
-
- } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
- psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
-
- memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
-
- /* set mic key */
- memcpy(txkey, &param->u.crypt.key[16], 8);
- memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[24], 8);
-
- psecuritypriv->busetkipkey = true;
-
- } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
- psecuritypriv->dot118021XGrpPrivacy = _AES_;
-
- memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
- } else {
- psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
- }
-
- psecuritypriv->dot118021XGrpKeyid = param->u.crypt.idx;
-
- psecuritypriv->binstallGrpkey = true;
-
- psecuritypriv->dot11PrivacyAlgrthm = psecuritypriv->dot118021XGrpPrivacy;/* */
-
- rtw_ap_set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx);
-
- pbcmc_sta = rtw_get_bcmc_stainfo(padapter);
- if (pbcmc_sta) {
- pbcmc_sta->ieee8021x_blocked = false;
- pbcmc_sta->dot118021XPrivacy = psecuritypriv->dot118021XGrpPrivacy;/* rx will use bmc_sta's dot118021XPrivacy */
- }
- }
-
- goto exit;
- }
-
- if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X && psta) { /* psk/802_1x */
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- if (param->u.crypt.set_tx == 1) {
- memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
-
- if (strcmp(param->u.crypt.alg, "WEP") == 0) {
- psta->dot118021XPrivacy = _WEP40_;
- if (param->u.crypt.key_len == 13)
- psta->dot118021XPrivacy = _WEP104_;
- } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
- psta->dot118021XPrivacy = _TKIP_;
-
- /* set mic key */
- memcpy(psta->dot11tkiptxmickey.skey, &param->u.crypt.key[16], 8);
- memcpy(psta->dot11tkiprxmickey.skey, &param->u.crypt.key[24], 8);
-
- psecuritypriv->busetkipkey = true;
-
- } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
- psta->dot118021XPrivacy = _AES_;
- } else {
- psta->dot118021XPrivacy = _NO_PRIVACY_;
- }
-
- rtw_ap_set_pairwise_key(padapter, psta);
-
- psta->ieee8021x_blocked = false;
-
- } else { /* group key??? */
- if (strcmp(param->u.crypt.alg, "WEP") == 0) {
- memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
-
- psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
- if (param->u.crypt.key_len == 13)
- psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
- } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
- psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
-
- memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
-
- /* set mic key */
- memcpy(txkey, &param->u.crypt.key[16], 8);
- memcpy(rxkey, &param->u.crypt.key[24], 8);
-
- psecuritypriv->busetkipkey = true;
-
- } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
- psecuritypriv->dot118021XGrpPrivacy = _AES_;
-
- memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
- } else {
- psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
- }
-
- psecuritypriv->dot118021XGrpKeyid = param->u.crypt.idx;
-
- psecuritypriv->binstallGrpkey = true;
-
- psecuritypriv->dot11PrivacyAlgrthm = psecuritypriv->dot118021XGrpPrivacy;/* */
-
- rtw_ap_set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx);
-
- pbcmc_sta = rtw_get_bcmc_stainfo(padapter);
- if (pbcmc_sta) {
- pbcmc_sta->ieee8021x_blocked = false;
- pbcmc_sta->dot118021XPrivacy = psecuritypriv->dot118021XGrpPrivacy;/* rx will use bmc_sta's dot118021XPrivacy */
- }
- }
- }
- }
-
-exit:
- kfree(pwep);
-
- return ret;
-}
-
-static int rtw_set_beacon(struct net_device *dev, struct ieee_param *param, int len)
-{
- int ret = 0;
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
- unsigned char *pbuf = param->u.bcn_ie.buf;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
- return -EINVAL;
-
- memcpy(&pstapriv->max_num_sta, param->u.bcn_ie.reserved, 2);
-
- if ((pstapriv->max_num_sta > NUM_STA) || (pstapriv->max_num_sta <= 0))
- pstapriv->max_num_sta = NUM_STA;
-
- if (rtw_check_beacon_data(padapter, pbuf, (len - 12 - 2)) == _SUCCESS)/* 12 = param header, 2:no packed */
- ret = 0;
- else
- ret = -EINVAL;
-
- return ret;
-}
-
-static void rtw_hostapd_sta_flush(struct net_device *dev)
-{
- /* _irqL irqL; */
- /* struct list_head *phead, *plist; */
- /* struct sta_info *psta = NULL; */
- struct adapter *padapter = rtw_netdev_priv(dev);
- /* struct sta_priv *pstapriv = &padapter->stapriv; */
-
- flush_all_cam_entry(padapter); /* clear CAM */
-
- rtw_sta_flush(padapter);
-}
-
-static int rtw_add_sta(struct net_device *dev, struct ieee_param *param)
-{
- int ret = 0;
- struct sta_info *psta = NULL;
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- if (check_fwstate(pmlmepriv, (_FW_LINKED | WIFI_AP_STATE)) != true)
- return -EINVAL;
-
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
- return -EINVAL;
- }
-
-/*
- psta = rtw_get_stainfo(pstapriv, param->sta_addr);
- if (psta)
- {
- rtw_free_stainfo(padapter, psta);
-
- psta = NULL;
- }
-*/
- /* psta = rtw_alloc_stainfo(pstapriv, param->sta_addr); */
- psta = rtw_get_stainfo(pstapriv, param->sta_addr);
- if (psta) {
- int flags = param->u.add_sta.flags;
-
- psta->aid = param->u.add_sta.aid;/* aid = 1~2007 */
-
- memcpy(psta->bssrateset, param->u.add_sta.tx_supp_rates, 16);
-
- /* check wmm cap. */
- if (WLAN_STA_WME & flags)
- psta->qos_option = 1;
- else
- psta->qos_option = 0;
-
- if (pmlmepriv->qospriv.qos_option == 0)
- psta->qos_option = 0;
-
- /* chec 802.11n ht cap. */
- if (WLAN_STA_HT & flags) {
- psta->htpriv.ht_option = true;
- psta->qos_option = 1;
- memcpy((void *)&psta->htpriv.ht_cap, (void *)&param->u.add_sta.ht_cap, sizeof(struct ieee80211_ht_cap));
- } else {
- psta->htpriv.ht_option = false;
- }
-
- if (!pmlmepriv->htpriv.ht_option)
- psta->htpriv.ht_option = false;
-
- update_sta_info_apmode(padapter, psta);
-
- } else {
- ret = -ENOMEM;
- }
-
- return ret;
-}
-
-static int rtw_del_sta(struct net_device *dev, struct ieee_param *param)
-{
- int ret = 0;
- struct sta_info *psta = NULL;
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- if (check_fwstate(pmlmepriv, (_FW_LINKED | WIFI_AP_STATE)) != true)
- return -EINVAL;
-
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
- return -EINVAL;
- }
-
- psta = rtw_get_stainfo(pstapriv, param->sta_addr);
- if (psta) {
- u8 updated = false;
-
- spin_lock_bh(&pstapriv->asoc_list_lock);
- if (list_empty(&psta->asoc_list) == false) {
- list_del_init(&psta->asoc_list);
- pstapriv->asoc_list_cnt--;
- updated = ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
- }
- spin_unlock_bh(&pstapriv->asoc_list_lock);
-
- associated_clients_update(padapter, updated);
-
- psta = NULL;
- }
-
- return ret;
-}
-
-static int rtw_ioctl_get_sta_data(struct net_device *dev, struct ieee_param *param, int len)
-{
- int ret = 0;
- struct sta_info *psta = NULL;
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct ieee_param_ex *param_ex = (struct ieee_param_ex *)param;
- struct sta_data *psta_data = (struct sta_data *)param_ex->data;
-
- if (check_fwstate(pmlmepriv, (_FW_LINKED | WIFI_AP_STATE)) != true)
- return -EINVAL;
-
- if (param_ex->sta_addr[0] == 0xff && param_ex->sta_addr[1] == 0xff &&
- param_ex->sta_addr[2] == 0xff && param_ex->sta_addr[3] == 0xff &&
- param_ex->sta_addr[4] == 0xff && param_ex->sta_addr[5] == 0xff) {
- return -EINVAL;
- }
-
- psta = rtw_get_stainfo(pstapriv, param_ex->sta_addr);
- if (psta) {
- psta_data->aid = (u16)psta->aid;
- psta_data->capability = psta->capability;
- psta_data->flags = psta->flags;
-
-/*
- nonerp_set : BIT(0)
- no_short_slot_time_set : BIT(1)
- no_short_preamble_set : BIT(2)
- no_ht_gf_set : BIT(3)
- no_ht_set : BIT(4)
- ht_20mhz_set : BIT(5)
-*/
-
- psta_data->sta_set = ((psta->nonerp_set) |
- (psta->no_short_slot_time_set << 1) |
- (psta->no_short_preamble_set << 2) |
- (psta->no_ht_gf_set << 3) |
- (psta->no_ht_set << 4) |
- (psta->ht_20mhz_set << 5));
-
- psta_data->tx_supp_rates_len = psta->bssratelen;
- memcpy(psta_data->tx_supp_rates, psta->bssrateset, psta->bssratelen);
- memcpy(&psta_data->ht_cap, &psta->htpriv.ht_cap, sizeof(struct ieee80211_ht_cap));
- psta_data->rx_pkts = psta->sta_stats.rx_data_pkts;
- psta_data->rx_bytes = psta->sta_stats.rx_bytes;
- psta_data->rx_drops = psta->sta_stats.rx_drops;
-
- psta_data->tx_pkts = psta->sta_stats.tx_pkts;
- psta_data->tx_bytes = psta->sta_stats.tx_bytes;
- psta_data->tx_drops = psta->sta_stats.tx_drops;
-
- } else {
- ret = -1;
- }
-
- return ret;
-}
-
-static int rtw_get_sta_wpaie(struct net_device *dev, struct ieee_param *param)
-{
- int ret = 0;
- struct sta_info *psta = NULL;
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- if (check_fwstate(pmlmepriv, (_FW_LINKED | WIFI_AP_STATE)) != true)
- return -EINVAL;
-
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
- return -EINVAL;
- }
-
- psta = rtw_get_stainfo(pstapriv, param->sta_addr);
- if (psta) {
- if ((psta->wpa_ie[0] == WLAN_EID_RSN) || (psta->wpa_ie[0] == WLAN_EID_VENDOR_SPECIFIC)) {
- int wpa_ie_len;
- int copy_len;
-
- wpa_ie_len = psta->wpa_ie[1];
-
- copy_len = ((wpa_ie_len + 2) > sizeof(psta->wpa_ie)) ? (sizeof(psta->wpa_ie)) : (wpa_ie_len + 2);
-
- param->u.wpa_ie.len = copy_len;
-
- memcpy(param->u.wpa_ie.reserved, psta->wpa_ie, copy_len);
- }
- } else {
- ret = -1;
- }
-
- return ret;
-}
-
-static int rtw_set_wps_beacon(struct net_device *dev, struct ieee_param *param, int len)
-{
- int ret = 0;
- unsigned char wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- int ie_len;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
- return -EINVAL;
-
- ie_len = len - 12 - 2;/* 12 = param header, 2:no packed */
-
- kfree(pmlmepriv->wps_beacon_ie);
- pmlmepriv->wps_beacon_ie = NULL;
-
- if (ie_len > 0) {
- pmlmepriv->wps_beacon_ie = rtw_malloc(ie_len);
- pmlmepriv->wps_beacon_ie_len = ie_len;
- if (!pmlmepriv->wps_beacon_ie)
- return -EINVAL;
-
- memcpy(pmlmepriv->wps_beacon_ie, param->u.bcn_ie.buf, ie_len);
-
- update_beacon(padapter, WLAN_EID_VENDOR_SPECIFIC, wps_oui, true);
-
- pmlmeext->bstart_bss = true;
- }
-
- return ret;
-}
-
-static int rtw_set_wps_probe_resp(struct net_device *dev, struct ieee_param *param, int len)
-{
- int ret = 0;
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- int ie_len;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
- return -EINVAL;
-
- ie_len = len - 12 - 2;/* 12 = param header, 2:no packed */
-
- kfree(pmlmepriv->wps_probe_resp_ie);
- pmlmepriv->wps_probe_resp_ie = NULL;
-
- if (ie_len > 0) {
- pmlmepriv->wps_probe_resp_ie = rtw_malloc(ie_len);
- pmlmepriv->wps_probe_resp_ie_len = ie_len;
- if (!pmlmepriv->wps_probe_resp_ie)
- return -EINVAL;
-
- memcpy(pmlmepriv->wps_probe_resp_ie, param->u.bcn_ie.buf, ie_len);
- }
-
- return ret;
-}
-
-static int rtw_set_wps_assoc_resp(struct net_device *dev, struct ieee_param *param, int len)
-{
- int ret = 0;
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- int ie_len;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
- return -EINVAL;
-
- ie_len = len - 12 - 2;/* 12 = param header, 2:no packed */
-
- kfree(pmlmepriv->wps_assoc_resp_ie);
- pmlmepriv->wps_assoc_resp_ie = NULL;
-
- if (ie_len > 0) {
- pmlmepriv->wps_assoc_resp_ie = rtw_malloc(ie_len);
- pmlmepriv->wps_assoc_resp_ie_len = ie_len;
- if (!pmlmepriv->wps_assoc_resp_ie)
- return -EINVAL;
-
- memcpy(pmlmepriv->wps_assoc_resp_ie, param->u.bcn_ie.buf, ie_len);
- }
-
- return ret;
-}
-
-static int rtw_set_hidden_ssid(struct net_device *dev, struct ieee_param *param, int len)
-{
- int ret = 0;
- struct adapter *adapter = rtw_netdev_priv(dev);
- struct mlme_priv *mlmepriv = &adapter->mlmepriv;
- struct mlme_ext_priv *mlmeext = &adapter->mlmeextpriv;
- struct mlme_ext_info *mlmeinfo = &mlmeext->mlmext_info;
- int ie_len;
- u8 *ssid_ie;
- char ssid[NDIS_802_11_LENGTH_SSID + 1];
- signed int ssid_len;
- u8 ignore_broadcast_ssid;
-
- if (check_fwstate(mlmepriv, WIFI_AP_STATE) != true)
- return -EPERM;
-
- if (param->u.bcn_ie.reserved[0] != 0xea)
- return -EINVAL;
-
- mlmeinfo->hidden_ssid_mode = ignore_broadcast_ssid = param->u.bcn_ie.reserved[1];
-
- ie_len = len - 12 - 2;/* 12 = param header, 2:no packed */
- ssid_ie = rtw_get_ie(param->u.bcn_ie.buf, WLAN_EID_SSID, &ssid_len, ie_len);
-
- if (ssid_ie && ssid_len > 0 && ssid_len <= NDIS_802_11_LENGTH_SSID) {
- struct wlan_bssid_ex *pbss_network = &mlmepriv->cur_network.network;
- struct wlan_bssid_ex *pbss_network_ext = &mlmeinfo->network;
-
- memcpy(ssid, ssid_ie + 2, ssid_len);
- ssid[ssid_len] = 0x0;
-
- memcpy(pbss_network->ssid.ssid, (void *)ssid, ssid_len);
- pbss_network->ssid.ssid_length = ssid_len;
- memcpy(pbss_network_ext->ssid.ssid, (void *)ssid, ssid_len);
- pbss_network_ext->ssid.ssid_length = ssid_len;
- }
-
- return ret;
-}
-
-static int rtw_ioctl_acl_remove_sta(struct net_device *dev, struct ieee_param *param, int len)
-{
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
- return -EINVAL;
-
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
- return -EINVAL;
- }
-
- rtw_acl_remove_sta(padapter, param->sta_addr);
- return 0;
-}
-
-static int rtw_ioctl_acl_add_sta(struct net_device *dev, struct ieee_param *param, int len)
-{
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
- return -EINVAL;
-
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
- return -EINVAL;
- }
-
- return rtw_acl_add_sta(padapter, param->sta_addr);
-}
-
-static int rtw_ioctl_set_macaddr_acl(struct net_device *dev, struct ieee_param *param, int len)
-{
- int ret = 0;
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
- return -EINVAL;
-
- rtw_set_macaddr_acl(padapter, param->u.mlme.command);
-
- return ret;
-}
-
-static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
-{
- struct ieee_param *param;
- int ret = 0;
- struct adapter *padapter = rtw_netdev_priv(dev);
-
- /*
- * this function is expect to call in master mode, which allows no power saving
- * so, we just check hw_init_completed
- */
-
- if (!padapter->hw_init_completed)
- return -EPERM;
-
- if (!p->pointer || p->length != sizeof(*param))
- return -EINVAL;
-
- param = rtw_malloc(p->length);
- if (!param)
- return -ENOMEM;
-
- if (copy_from_user(param, p->pointer, p->length)) {
- kfree(param);
- return -EFAULT;
- }
-
- switch (param->cmd) {
- case RTL871X_HOSTAPD_FLUSH:
-
- rtw_hostapd_sta_flush(dev);
-
- break;
-
- case RTL871X_HOSTAPD_ADD_STA:
-
- ret = rtw_add_sta(dev, param);
-
- break;
-
- case RTL871X_HOSTAPD_REMOVE_STA:
-
- ret = rtw_del_sta(dev, param);
-
- break;
-
- case RTL871X_HOSTAPD_SET_BEACON:
-
- ret = rtw_set_beacon(dev, param, p->length);
-
- break;
-
- case RTL871X_SET_ENCRYPTION:
-
- ret = rtw_set_encryption(dev, param, p->length);
-
- break;
-
- case RTL871X_HOSTAPD_GET_WPAIE_STA:
-
- ret = rtw_get_sta_wpaie(dev, param);
-
- break;
-
- case RTL871X_HOSTAPD_SET_WPS_BEACON:
-
- ret = rtw_set_wps_beacon(dev, param, p->length);
-
- break;
-
- case RTL871X_HOSTAPD_SET_WPS_PROBE_RESP:
-
- ret = rtw_set_wps_probe_resp(dev, param, p->length);
-
- break;
-
- case RTL871X_HOSTAPD_SET_WPS_ASSOC_RESP:
-
- ret = rtw_set_wps_assoc_resp(dev, param, p->length);
-
- break;
-
- case RTL871X_HOSTAPD_SET_HIDDEN_SSID:
-
- ret = rtw_set_hidden_ssid(dev, param, p->length);
-
- break;
-
- case RTL871X_HOSTAPD_GET_INFO_STA:
-
- ret = rtw_ioctl_get_sta_data(dev, param, p->length);
-
- break;
-
- case RTL871X_HOSTAPD_SET_MACADDR_ACL:
-
- ret = rtw_ioctl_set_macaddr_acl(dev, param, p->length);
-
- break;
-
- case RTL871X_HOSTAPD_ACL_ADD_STA:
-
- ret = rtw_ioctl_acl_add_sta(dev, param, p->length);
-
- break;
-
- case RTL871X_HOSTAPD_ACL_REMOVE_STA:
-
- ret = rtw_ioctl_acl_remove_sta(dev, param, p->length);
-
- break;
-
- default:
- ret = -EOPNOTSUPP;
- break;
- }
-
- if (ret == 0 && copy_to_user(p->pointer, param, p->length))
- ret = -EFAULT;
-
- kfree(param);
- return ret;
-}
-
-/* copy from net/wireless/wext.c end */
-
-int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct iwreq *wrq = (struct iwreq *)rq;
- int ret = 0;
-
- switch (cmd) {
- case RTL_IOCTL_WPA_SUPPLICANT:
- ret = wpa_supplicant_ioctl(dev, &wrq->u.data);
- break;
- case RTL_IOCTL_HOSTAPD:
- ret = rtw_hostapd_ioctl(dev, &wrq->u.data);
- break;
- default:
- ret = -EOPNOTSUPP;
- break;
- }
-
- return ret;
-}
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index 4e1917c05402..738a601c55bb 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -407,7 +407,6 @@ static const struct net_device_ops rtw_netdev_ops = {
.ndo_select_queue = rtw_select_queue,
.ndo_set_mac_address = rtw_net_set_mac_address,
.ndo_get_stats = rtw_net_get_stats,
- .ndo_do_ioctl = rtw_ioctl,
};
int rtw_init_netdev_name(struct net_device *pnetdev, const char *ifname)
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
index dc0d715ed970..0dbe76ee5570 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
@@ -59,7 +59,7 @@ static int bcm2835_audio_send_msg_locked(struct bcm2835_audio_instance *instance
if (wait) {
if (!wait_for_completion_timeout(&instance->msg_avail_comp,
- msecs_to_jiffies(10 * 1000))) {
+ secs_to_jiffies(10))) {
dev_err(instance->dev,
"vchi message timeout, msg=%d\n", m->type);
return -ETIMEDOUT;
diff --git a/drivers/staging/vme_user/vme_tsi148.c b/drivers/staging/vme_user/vme_tsi148.c
index 31a44025e08f..733594dde9ae 100644
--- a/drivers/staging/vme_user/vme_tsi148.c
+++ b/drivers/staging/vme_user/vme_tsi148.c
@@ -761,8 +761,7 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
goto err_resource;
}
- image->kern_base = ioremap(
- image->bus_resource.start, size);
+ image->kern_base = ioremap(image->bus_resource.start, size);
if (!image->kern_base) {
dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
retval = -ENOMEM;
diff --git a/drivers/target/iscsi/Kconfig b/drivers/target/iscsi/Kconfig
index 922b207bc69d..70d76f3dd693 100644
--- a/drivers/target/iscsi/Kconfig
+++ b/drivers/target/iscsi/Kconfig
@@ -2,9 +2,9 @@
config ISCSI_TARGET
tristate "SCSI Target Mode Stack"
depends on INET
+ select CRC32
select CRYPTO
- select CRYPTO_CRC32C
- select CRYPTO_CRC32C_INTEL if X86
+ select CRYPTO_HASH
help
Say M to enable the SCSI target mode stack. A SCSI target mode stack
is software that makes local storage available over a storage network
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 6002283cbeba..1244ef3aa86c 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -8,7 +8,7 @@
*
******************************************************************************/
-#include <crypto/hash.h>
+#include <linux/crc32c.h>
#include <linux/string.h>
#include <linux/kthread.h>
#include <linux/completion.h>
@@ -490,8 +490,8 @@ void iscsit_aborted_task(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
}
EXPORT_SYMBOL(iscsit_aborted_task);
-static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
- u32, u32, const void *, void *);
+static u32 iscsit_crc_buf(const void *buf, u32 payload_length,
+ u32 padding, const void *pad_bytes);
static void iscsit_tx_thread_wait_for_tcp(struct iscsit_conn *);
static int
@@ -510,9 +510,7 @@ iscsit_xmit_nondatain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
- ISCSI_HDR_LEN, 0, NULL,
- header_digest);
+ *header_digest = iscsit_crc_buf(hdr, ISCSI_HDR_LEN, 0, NULL);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -537,11 +535,9 @@ iscsit_xmit_nondatain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
}
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
- data_buf, data_buf_len,
- padding, &cmd->pad_bytes,
- &cmd->data_crc);
-
+ cmd->data_crc = iscsit_crc_buf(data_buf, data_buf_len,
+ padding,
+ &cmd->pad_bytes);
iov[niov].iov_base = &cmd->data_crc;
iov[niov++].iov_len = ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -566,8 +562,8 @@ iscsit_xmit_nondatain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
static int iscsit_map_iovec(struct iscsit_cmd *cmd, struct kvec *iov, int nvec,
u32 data_offset, u32 data_length);
static void iscsit_unmap_iovec(struct iscsit_cmd *);
-static u32 iscsit_do_crypto_hash_sg(struct ahash_request *, struct iscsit_cmd *,
- u32, u32, u32, u8 *);
+static u32 iscsit_crc_sglist(const struct iscsit_cmd *cmd, u32 data_length,
+ u32 padding, const u8 *pad_bytes);
static int
iscsit_xmit_datain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
const struct iscsi_datain *datain)
@@ -584,10 +580,8 @@ iscsit_xmit_datain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
- ISCSI_HDR_LEN, 0, NULL,
- header_digest);
-
+ *header_digest = iscsit_crc_buf(cmd->pdu, ISCSI_HDR_LEN, 0,
+ NULL);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -614,12 +608,8 @@ iscsit_xmit_datain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
}
if (conn->conn_ops->DataDigest) {
- cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash,
- cmd, datain->offset,
- datain->length,
- cmd->padding,
- cmd->pad_bytes);
-
+ cmd->data_crc = iscsit_crc_sglist(cmd, datain->length,
+ cmd->padding, cmd->pad_bytes);
iov[iov_count].iov_base = &cmd->data_crc;
iov[iov_count++].iov_len = ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -1404,77 +1394,45 @@ iscsit_handle_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
return iscsit_get_immediate_data(cmd, hdr, dump_payload);
}
-static u32 iscsit_do_crypto_hash_sg(
- struct ahash_request *hash,
- struct iscsit_cmd *cmd,
- u32 data_offset,
- u32 data_length,
- u32 padding,
- u8 *pad_bytes)
+static u32 iscsit_crc_sglist(const struct iscsit_cmd *cmd, u32 data_length,
+ u32 padding, const u8 *pad_bytes)
{
- u32 data_crc;
- struct scatterlist *sg;
- unsigned int page_off;
-
- crypto_ahash_init(hash);
-
- sg = cmd->first_data_sg;
- page_off = cmd->first_data_sg_off;
-
- if (data_length && page_off) {
- struct scatterlist first_sg;
- u32 len = min_t(u32, data_length, sg->length - page_off);
-
- sg_init_table(&first_sg, 1);
- sg_set_page(&first_sg, sg_page(sg), len, sg->offset + page_off);
-
- ahash_request_set_crypt(hash, &first_sg, NULL, len);
- crypto_ahash_update(hash);
-
- data_length -= len;
- sg = sg_next(sg);
- }
+ struct scatterlist *sg = cmd->first_data_sg;
+ unsigned int page_off = cmd->first_data_sg_off;
+ u32 crc = ~0;
while (data_length) {
- u32 cur_len = min_t(u32, data_length, sg->length);
+ u32 cur_len = min_t(u32, data_length, sg->length - page_off);
+ const void *virt;
- ahash_request_set_crypt(hash, sg, NULL, cur_len);
- crypto_ahash_update(hash);
+ virt = kmap_local_page(sg_page(sg)) + sg->offset + page_off;
+ crc = crc32c(crc, virt, cur_len);
+ kunmap_local(virt);
- data_length -= cur_len;
/* iscsit_map_iovec has already checked for invalid sg pointers */
sg = sg_next(sg);
- }
-
- if (padding) {
- struct scatterlist pad_sg;
- sg_init_one(&pad_sg, pad_bytes, padding);
- ahash_request_set_crypt(hash, &pad_sg, (u8 *)&data_crc,
- padding);
- crypto_ahash_finup(hash);
- } else {
- ahash_request_set_crypt(hash, NULL, (u8 *)&data_crc, 0);
- crypto_ahash_final(hash);
+ page_off = 0;
+ data_length -= cur_len;
}
- return data_crc;
+ if (padding)
+ crc = crc32c(crc, pad_bytes, padding);
+
+ return ~crc;
}
-static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
- const void *buf, u32 payload_length, u32 padding,
- const void *pad_bytes, void *data_crc)
+static u32 iscsit_crc_buf(const void *buf, u32 payload_length,
+ u32 padding, const void *pad_bytes)
{
- struct scatterlist sg[2];
+ u32 crc = ~0;
- sg_init_table(sg, ARRAY_SIZE(sg));
- sg_set_buf(sg, buf, payload_length);
- if (padding)
- sg_set_buf(sg + 1, pad_bytes, padding);
+ crc = crc32c(crc, buf, payload_length);
- ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding);
+ if (padding)
+ crc = crc32c(crc, pad_bytes, padding);
- crypto_ahash_digest(hash);
+ return ~crc;
}
int
@@ -1662,11 +1620,8 @@ iscsit_get_dataout(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
if (conn->conn_ops->DataDigest) {
u32 data_crc;
- data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
- be32_to_cpu(hdr->offset),
- payload_length, padding,
- cmd->pad_bytes);
-
+ data_crc = iscsit_crc_sglist(cmd, payload_length, padding,
+ cmd->pad_bytes);
if (checksum != data_crc) {
pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
" DataSN: 0x%08x, CRC32C DataDigest 0x%08x"
@@ -1925,10 +1880,8 @@ static int iscsit_handle_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cm
}
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_rx_hash, ping_data,
- payload_length, padding,
- cmd->pad_bytes, &data_crc);
-
+ data_crc = iscsit_crc_buf(ping_data, payload_length,
+ padding, cmd->pad_bytes);
if (checksum != data_crc) {
pr_err("Ping data CRC32C DataDigest"
" 0x%08x does not match computed 0x%08x\n",
@@ -2328,10 +2281,7 @@ iscsit_handle_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
goto reject;
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
- text_in, rx_size, 0, NULL,
- &data_crc);
-
+ data_crc = iscsit_crc_buf(text_in, rx_size, 0, NULL);
if (checksum != data_crc) {
pr_err("Text data CRC32C DataDigest"
" 0x%08x does not match computed"
@@ -2688,10 +2638,8 @@ static int iscsit_handle_immediate_data(
if (conn->conn_ops->DataDigest) {
u32 data_crc;
- data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
- cmd->write_data_done, length, padding,
- cmd->pad_bytes);
-
+ data_crc = iscsit_crc_sglist(cmd, length, padding,
+ cmd->pad_bytes);
if (checksum != data_crc) {
pr_err("ImmediateData CRC32C DataDigest 0x%08x"
" does not match computed 0x%08x\n", checksum,
@@ -4116,10 +4064,8 @@ static void iscsit_get_rx_pdu(struct iscsit_conn *conn)
break;
}
- iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer,
- ISCSI_HDR_LEN, 0, NULL,
- &checksum);
-
+ checksum = iscsit_crc_buf(buffer, ISCSI_HDR_LEN, 0,
+ NULL);
if (digest != checksum) {
pr_err("HeaderDigest CRC32C failed,"
" received 0x%08x, computed 0x%08x\n",
@@ -4406,15 +4352,6 @@ int iscsit_close_connection(
*/
iscsit_check_conn_usage_count(conn);
- ahash_request_free(conn->conn_tx_hash);
- if (conn->conn_rx_hash) {
- struct crypto_ahash *tfm;
-
- tfm = crypto_ahash_reqtfm(conn->conn_rx_hash);
- ahash_request_free(conn->conn_rx_hash);
- crypto_free_ahash(tfm);
- }
-
if (conn->sock)
sock_release(conn->sock);
@@ -4727,21 +4664,6 @@ int iscsit_logout_post_handler(
}
EXPORT_SYMBOL(iscsit_logout_post_handler);
-void iscsit_fail_session(struct iscsit_session *sess)
-{
- struct iscsit_conn *conn;
-
- spin_lock_bh(&sess->conn_lock);
- list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
- pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
- conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
- }
- spin_unlock_bh(&sess->conn_lock);
-
- pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
- sess->session_state = TARG_SESS_STATE_FAILED;
-}
-
void iscsit_stop_session(
struct iscsit_session *sess,
int session_sleep,
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index 873411e95ed2..f4addae2aae4 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -40,7 +40,6 @@ extern int iscsi_target_tx_thread(void *);
extern int iscsi_target_rx_thread(void *);
extern int iscsit_close_connection(struct iscsit_conn *);
extern int iscsit_close_session(struct iscsit_session *, bool can_sleep);
-extern void iscsit_fail_session(struct iscsit_session *);
extern void iscsit_stop_session(struct iscsit_session *, int, int);
extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int);
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 18e88d2ea5fd..56d78af7cec7 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -25,54 +25,6 @@
/*
* FIXME: Does RData SNACK apply here as well?
*/
-void iscsit_create_conn_recovery_datain_values(
- struct iscsit_cmd *cmd,
- __be32 exp_data_sn)
-{
- u32 data_sn = 0;
- struct iscsit_conn *conn = cmd->conn;
-
- cmd->next_burst_len = 0;
- cmd->read_data_done = 0;
-
- while (be32_to_cpu(exp_data_sn) > data_sn) {
- if ((cmd->next_burst_len +
- conn->conn_ops->MaxRecvDataSegmentLength) <
- conn->sess->sess_ops->MaxBurstLength) {
- cmd->read_data_done +=
- conn->conn_ops->MaxRecvDataSegmentLength;
- cmd->next_burst_len +=
- conn->conn_ops->MaxRecvDataSegmentLength;
- } else {
- cmd->read_data_done +=
- (conn->sess->sess_ops->MaxBurstLength -
- cmd->next_burst_len);
- cmd->next_burst_len = 0;
- }
- data_sn++;
- }
-}
-
-void iscsit_create_conn_recovery_dataout_values(
- struct iscsit_cmd *cmd)
-{
- u32 write_data_done = 0;
- struct iscsit_conn *conn = cmd->conn;
-
- cmd->data_sn = 0;
- cmd->next_burst_len = 0;
-
- while (cmd->write_data_done > write_data_done) {
- if ((write_data_done + conn->sess->sess_ops->MaxBurstLength) <=
- cmd->write_data_done)
- write_data_done += conn->sess->sess_ops->MaxBurstLength;
- else
- break;
- }
-
- cmd->write_data_done = write_data_done;
-}
-
static int iscsit_attach_active_connection_recovery_entry(
struct iscsit_session *sess,
struct iscsi_conn_recovery *cr)
diff --git a/drivers/target/iscsi/iscsi_target_erl2.h b/drivers/target/iscsi/iscsi_target_erl2.h
index 6655e4bcf893..9064c74eef7a 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.h
+++ b/drivers/target/iscsi/iscsi_target_erl2.h
@@ -9,8 +9,6 @@ struct iscsit_conn;
struct iscsi_conn_recovery;
struct iscsit_session;
-extern void iscsit_create_conn_recovery_datain_values(struct iscsit_cmd *, __be32);
-extern void iscsit_create_conn_recovery_dataout_values(struct iscsit_cmd *);
extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
struct iscsit_session *, u16);
extern void iscsit_free_connection_recovery_entries(struct iscsit_session *);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 90b870f234f0..c2ac9a99ebbb 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -8,7 +8,6 @@
*
******************************************************************************/
-#include <crypto/hash.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/kthread.h>
@@ -71,46 +70,6 @@ out_login:
return NULL;
}
-/*
- * Used by iscsi_target_nego.c:iscsi_target_locate_portal() to setup
- * per struct iscsit_conn libcrypto contexts for crc32c and crc32-intel
- */
-int iscsi_login_setup_crypto(struct iscsit_conn *conn)
-{
- struct crypto_ahash *tfm;
-
- /*
- * Setup slicing by CRC32C algorithm for RX and TX libcrypto contexts
- * which will default to crc32c_intel.ko for cpu_has_xmm4_2, or fallback
- * to software 1x8 byte slicing from crc32c.ko
- */
- tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm)) {
- pr_err("crypto_alloc_ahash() failed\n");
- return -ENOMEM;
- }
-
- conn->conn_rx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!conn->conn_rx_hash) {
- pr_err("ahash_request_alloc() failed for conn_rx_hash\n");
- crypto_free_ahash(tfm);
- return -ENOMEM;
- }
- ahash_request_set_callback(conn->conn_rx_hash, 0, NULL, NULL);
-
- conn->conn_tx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!conn->conn_tx_hash) {
- pr_err("ahash_request_alloc() failed for conn_tx_hash\n");
- ahash_request_free(conn->conn_rx_hash);
- conn->conn_rx_hash = NULL;
- crypto_free_ahash(tfm);
- return -ENOMEM;
- }
- ahash_request_set_callback(conn->conn_tx_hash, 0, NULL, NULL);
-
- return 0;
-}
-
static int iscsi_login_check_initiator_version(
struct iscsit_conn *conn,
u8 version_max,
@@ -1165,15 +1124,6 @@ old_sess_out:
iscsit_dec_session_usage_count(conn->sess);
}
- ahash_request_free(conn->conn_tx_hash);
- if (conn->conn_rx_hash) {
- struct crypto_ahash *tfm;
-
- tfm = crypto_ahash_reqtfm(conn->conn_rx_hash);
- ahash_request_free(conn->conn_rx_hash);
- crypto_free_ahash(tfm);
- }
-
if (conn->param_list) {
iscsi_release_param_list(conn->param_list);
conn->param_list = NULL;
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index e8760735486b..03c7d695d58f 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -9,7 +9,6 @@ struct iscsi_login;
struct iscsi_np;
struct sockaddr_storage;
-extern int iscsi_login_setup_crypto(struct iscsit_conn *);
extern int iscsi_check_for_session_reinstatement(struct iscsit_conn *);
extern int iscsi_login_post_auth_non_zero_tsih(struct iscsit_conn *, u16, u32);
extern int iscsit_setup_np(struct iscsi_np *,
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index fa3fb5f4e6bc..16e3ded98c32 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -1194,14 +1194,7 @@ int iscsi_target_locate_portal(
goto get_target;
sess->sess_ops->SessionType = 1;
- /*
- * Setup crc32c modules from libcrypto
- */
- if (iscsi_login_setup_crypto(conn) < 0) {
- pr_err("iscsi_login_setup_crypto() failed\n");
- ret = -1;
- goto out;
- }
+
/*
* Serialize access across the discovery struct iscsi_portal_group to
* process login attempt.
@@ -1258,17 +1251,7 @@ get_target:
}
conn->tpg_np = tpg_np;
pr_debug("Located Portal Group Object: %hu\n", conn->tpg->tpgt);
- /*
- * Setup crc32c modules from libcrypto
- */
- if (iscsi_login_setup_crypto(conn) < 0) {
- pr_err("iscsi_login_setup_crypto() failed\n");
- kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
- iscsit_put_tiqn_for_login(tiqn);
- conn->tpg = NULL;
- ret = -1;
- goto out;
- }
+
/*
* Serialize access across the struct iscsi_portal_group to
* process login attempt.
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 5b90c22ee3dc..1d4e1788e073 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -67,54 +67,6 @@ int iscsi_login_tx_data(
return 0;
}
-void iscsi_dump_conn_ops(struct iscsi_conn_ops *conn_ops)
-{
- pr_debug("HeaderDigest: %s\n", (conn_ops->HeaderDigest) ?
- "CRC32C" : "None");
- pr_debug("DataDigest: %s\n", (conn_ops->DataDigest) ?
- "CRC32C" : "None");
- pr_debug("MaxRecvDataSegmentLength: %u\n",
- conn_ops->MaxRecvDataSegmentLength);
-}
-
-void iscsi_dump_sess_ops(struct iscsi_sess_ops *sess_ops)
-{
- pr_debug("InitiatorName: %s\n", sess_ops->InitiatorName);
- pr_debug("InitiatorAlias: %s\n", sess_ops->InitiatorAlias);
- pr_debug("TargetName: %s\n", sess_ops->TargetName);
- pr_debug("TargetAlias: %s\n", sess_ops->TargetAlias);
- pr_debug("TargetPortalGroupTag: %hu\n",
- sess_ops->TargetPortalGroupTag);
- pr_debug("MaxConnections: %hu\n", sess_ops->MaxConnections);
- pr_debug("InitialR2T: %s\n",
- (sess_ops->InitialR2T) ? "Yes" : "No");
- pr_debug("ImmediateData: %s\n", (sess_ops->ImmediateData) ?
- "Yes" : "No");
- pr_debug("MaxBurstLength: %u\n", sess_ops->MaxBurstLength);
- pr_debug("FirstBurstLength: %u\n", sess_ops->FirstBurstLength);
- pr_debug("DefaultTime2Wait: %hu\n", sess_ops->DefaultTime2Wait);
- pr_debug("DefaultTime2Retain: %hu\n",
- sess_ops->DefaultTime2Retain);
- pr_debug("MaxOutstandingR2T: %hu\n",
- sess_ops->MaxOutstandingR2T);
- pr_debug("DataPDUInOrder: %s\n",
- (sess_ops->DataPDUInOrder) ? "Yes" : "No");
- pr_debug("DataSequenceInOrder: %s\n",
- (sess_ops->DataSequenceInOrder) ? "Yes" : "No");
- pr_debug("ErrorRecoveryLevel: %hu\n",
- sess_ops->ErrorRecoveryLevel);
- pr_debug("SessionType: %s\n", (sess_ops->SessionType) ?
- "Discovery" : "Normal");
-}
-
-void iscsi_print_params(struct iscsi_param_list *param_list)
-{
- struct iscsi_param *param;
-
- list_for_each_entry(param, &param_list->param_list, p_list)
- pr_debug("%s: %s\n", param->name, param->value);
-}
-
static struct iscsi_param *iscsi_set_default_param(struct iscsi_param_list *param_list,
char *name, char *value, u8 phase, u8 scope, u8 sender,
u16 type_range, u8 use)
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index 00fbbebb8c75..c672a971fcb7 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -32,9 +32,6 @@ struct iscsi_sess_ops;
extern int iscsi_login_rx_data(struct iscsit_conn *, char *, int);
extern int iscsi_login_tx_data(struct iscsit_conn *, char *, char *, int);
-extern void iscsi_dump_conn_ops(struct iscsi_conn_ops *);
-extern void iscsi_dump_sess_ops(struct iscsi_sess_ops *);
-extern void iscsi_print_params(struct iscsi_param_list *);
extern int iscsi_create_default_params(struct iscsi_param_list **);
extern int iscsi_set_keys_to_negotiate(struct iscsi_param_list *, bool);
extern int iscsi_set_keys_irrelevant_for_discovery(struct iscsi_param_list *);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index f7bac98fd4fe..bf06cfdfb012 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -200,11 +200,6 @@ static void iscsit_clear_tpg_np_login_threads(
spin_unlock(&tpg->tpg_np_lock);
}
-void iscsit_tpg_dump_params(struct iscsi_portal_group *tpg)
-{
- iscsi_print_params(tpg->param_list);
-}
-
static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index d44d09f2dde9..1155b7b3164a 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -18,7 +18,6 @@ extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *,
struct iscsi_np *, struct iscsi_tpg_np **);
extern int iscsit_get_tpg(struct iscsi_portal_group *);
extern void iscsit_put_tpg(struct iscsi_portal_group *);
-extern void iscsit_tpg_dump_params(struct iscsi_portal_group *);
extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *);
extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *,
int);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 91a75a4a7cc1..ed2dadb21f75 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -333,50 +333,6 @@ int iscsit_sequence_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
}
EXPORT_SYMBOL(iscsit_sequence_cmd);
-int iscsit_check_unsolicited_dataout(struct iscsit_cmd *cmd, unsigned char *buf)
-{
- struct iscsit_conn *conn = cmd->conn;
- struct se_cmd *se_cmd = &cmd->se_cmd;
- struct iscsi_data *hdr = (struct iscsi_data *) buf;
- u32 payload_length = ntoh24(hdr->dlength);
-
- if (conn->sess->sess_ops->InitialR2T) {
- pr_err("Received unexpected unsolicited data"
- " while InitialR2T=Yes, protocol error.\n");
- transport_send_check_condition_and_sense(se_cmd,
- TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
- return -1;
- }
-
- if ((cmd->first_burst_len + payload_length) >
- conn->sess->sess_ops->FirstBurstLength) {
- pr_err("Total %u bytes exceeds FirstBurstLength: %u"
- " for this Unsolicited DataOut Burst.\n",
- (cmd->first_burst_len + payload_length),
- conn->sess->sess_ops->FirstBurstLength);
- transport_send_check_condition_and_sense(se_cmd,
- TCM_INCORRECT_AMOUNT_OF_DATA, 0);
- return -1;
- }
-
- if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))
- return 0;
-
- if (((cmd->first_burst_len + payload_length) != cmd->se_cmd.data_length) &&
- ((cmd->first_burst_len + payload_length) !=
- conn->sess->sess_ops->FirstBurstLength)) {
- pr_err("Unsolicited non-immediate data received %u"
- " does not equal FirstBurstLength: %u, and does"
- " not equal ExpXferLen %u.\n",
- (cmd->first_burst_len + payload_length),
- conn->sess->sess_ops->FirstBurstLength, cmd->se_cmd.data_length);
- transport_send_check_condition_and_sense(se_cmd,
- TCM_INCORRECT_AMOUNT_OF_DATA, 0);
- return -1;
- }
- return 0;
-}
-
struct iscsit_cmd *iscsit_find_cmd_from_itt(
struct iscsit_conn *conn,
itt_t init_task_tag)
@@ -1252,20 +1208,6 @@ int iscsit_tx_login_rsp(struct iscsit_conn *conn, u8 status_class, u8 status_det
return conn->conn_transport->iscsit_put_login_tx(conn, login, 0);
}
-void iscsit_print_session_params(struct iscsit_session *sess)
-{
- struct iscsit_conn *conn;
-
- pr_debug("-----------------------------[Session Params for"
- " SID: %u]-----------------------------\n", sess->sid);
- spin_lock_bh(&sess->conn_lock);
- list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
- iscsi_dump_conn_ops(conn->conn_ops);
- spin_unlock_bh(&sess->conn_lock);
-
- iscsi_dump_sess_ops(sess->sess_ops);
-}
-
int rx_data(
struct iscsit_conn *conn,
struct kvec *iov,
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 336da4fb0a77..7ae48a8a5cbf 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -23,7 +23,6 @@ extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsit_cmd *);
extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsit_cmd *, u32);
extern int iscsit_sequence_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
unsigned char * ,__be32 cmdsn);
-extern int iscsit_check_unsolicited_dataout(struct iscsit_cmd *, unsigned char *);
extern struct iscsit_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsit_conn *,
itt_t, u32);
extern struct iscsit_cmd *iscsit_find_cmd_from_ttt(struct iscsit_conn *, u32);
@@ -61,7 +60,6 @@ extern int iscsit_set_login_timer_kworker(struct iscsit_conn *, struct task_stru
extern int iscsit_send_tx_data(struct iscsit_cmd *, struct iscsit_conn *, int);
extern int iscsit_fe_sendpage_sg(struct iscsit_cmd *, struct iscsit_conn *);
extern int iscsit_tx_login_rsp(struct iscsit_conn *, u8, u8);
-extern void iscsit_print_session_params(struct iscsit_session *);
extern int rx_data(struct iscsit_conn *, struct kvec *, int, int);
extern int tx_data(struct iscsit_conn *, struct kvec *, int, int);
extern void iscsit_collect_login_stats(struct iscsit_conn *, u8, u8);
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index c42cbde8a31b..210648a0092e 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -117,9 +117,9 @@ static ssize_t target_stat_tgt_status_show(struct config_item *item,
char *page)
{
if (to_stat_tgt_dev(item)->export_count)
- return snprintf(page, PAGE_SIZE, "activated");
+ return snprintf(page, PAGE_SIZE, "activated\n");
else
- return snprintf(page, PAGE_SIZE, "deactivated");
+ return snprintf(page, PAGE_SIZE, "deactivated\n");
}
static ssize_t target_stat_tgt_non_access_lus_show(struct config_item *item,
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index 3e33cf2af73b..f0c3ac1103bb 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -1272,8 +1272,9 @@ static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
&res.smccc);
if (res.result.build_id)
- pr_info("revision %lu.%lu (%08lx)", res.result.major,
- res.result.minor, res.result.build_id);
+ pr_info("revision %lu.%lu (%0*lx)", res.result.major,
+ res.result.minor, (int)sizeof(res.result.build_id) * 2,
+ res.result.build_id);
else
pr_info("revision %lu.%lu", res.result.major, res.result.minor);
}
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 4bdb2d45e0bf..dc1f456736dc 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -70,6 +70,9 @@ struct tb_ctl {
#define tb_ctl_dbg(ctl, format, arg...) \
dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
+#define tb_ctl_dbg_once(ctl, format, arg...) \
+ dev_dbg_once(&(ctl)->nhi->pdev->dev, format, ## arg)
+
static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
/* Serializes access to request kref_get/put */
static DEFINE_MUTEX(tb_cfg_request_lock);
@@ -265,7 +268,7 @@ static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
return res;
}
-static void tb_cfg_print_error(struct tb_ctl *ctl,
+static void tb_cfg_print_error(struct tb_ctl *ctl, enum tb_cfg_space space,
const struct tb_cfg_result *res)
{
WARN_ON(res->err != 1);
@@ -279,8 +282,8 @@ static void tb_cfg_print_error(struct tb_ctl *ctl,
* Invalid cfg_space/offset/length combination in
* cfg_read/cfg_write.
*/
- tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n",
- res->response_route, res->response_port);
+ tb_ctl_dbg_once(ctl, "%llx:%x: invalid config space (%u) or offset\n",
+ res->response_route, res->response_port, space);
return;
case TB_CFG_ERROR_NO_SUCH_PORT:
/*
@@ -1072,7 +1075,7 @@ static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
return -ENODEV;
- tb_cfg_print_error(ctl, res);
+ tb_cfg_print_error(ctl, space, res);
if (res->tb_error == TB_CFG_ERROR_LOCK)
return -EACCES;
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index bf930a191472..7e08ca8f0895 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -140,5 +140,4 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
enum tb_cfg_space space, u32 offset, u32 length);
int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route);
-
#endif
diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
index a1d0d8a33f20..f8328ca7e22e 100644
--- a/drivers/thunderbolt/debugfs.c
+++ b/drivers/thunderbolt/debugfs.c
@@ -168,6 +168,13 @@ static bool parse_line(char **line, u32 *offs, u32 *val, int short_fmt_len,
* offset relative_offset cap_id vs_cap_id value\n
* v[0] v[1] v[2] v[3] v[4]
*
+ * For Path configuration space:
+ * Short format is: offset value\n
+ * v[0] v[1]
+ * Long format as produced from the read side:
+ * offset relative_offset in_hop_id value\n
+ * v[0] v[1] v[2] v[3]
+ *
* For Counter configuration space:
* Short format is: offset\n
* v[0]
@@ -191,14 +198,33 @@ static bool parse_line(char **line, u32 *offs, u32 *val, int short_fmt_len,
}
#if IS_ENABLED(CONFIG_USB4_DEBUGFS_WRITE)
+/*
+ * Path registers need to be written in double word pairs and they both must be
+ * read before written. This writes one double word in patch config space
+ * following the spec flow.
+ */
+static int path_write_one(struct tb_port *port, u32 val, u32 offset)
+{
+ u32 index = offset % PATH_LEN;
+ u32 offs = offset - index;
+ u32 data[PATH_LEN];
+ int ret;
+
+ ret = tb_port_read(port, data, TB_CFG_HOPS, offs, PATH_LEN);
+ if (ret)
+ return ret;
+ data[index] = val;
+ return tb_port_write(port, data, TB_CFG_HOPS, offs, PATH_LEN);
+}
+
static ssize_t regs_write(struct tb_switch *sw, struct tb_port *port,
- const char __user *user_buf, size_t count,
- loff_t *ppos)
+ enum tb_cfg_space space, const char __user *user_buf,
+ size_t count, loff_t *ppos)
{
+ int long_fmt_len, ret = 0;
struct tb *tb = sw->tb;
char *line, *buf;
u32 val, offset;
- int ret = 0;
buf = validate_and_copy_from_user(user_buf, &count);
if (IS_ERR(buf))
@@ -214,12 +240,21 @@ static ssize_t regs_write(struct tb_switch *sw, struct tb_port *port,
/* User did hardware changes behind the driver's back */
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+ if (space == TB_CFG_HOPS)
+ long_fmt_len = 4;
+ else
+ long_fmt_len = 5;
+
line = buf;
- while (parse_line(&line, &offset, &val, 2, 5)) {
- if (port)
- ret = tb_port_write(port, &val, TB_CFG_PORT, offset, 1);
- else
+ while (parse_line(&line, &offset, &val, 2, long_fmt_len)) {
+ if (port) {
+ if (space == TB_CFG_HOPS)
+ ret = path_write_one(port, val, offset);
+ else
+ ret = tb_port_write(port, &val, space, offset, 1);
+ } else {
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
+ }
if (ret)
break;
}
@@ -240,7 +275,16 @@ static ssize_t port_regs_write(struct file *file, const char __user *user_buf,
struct seq_file *s = file->private_data;
struct tb_port *port = s->private;
- return regs_write(port->sw, port, user_buf, count, ppos);
+ return regs_write(port->sw, port, TB_CFG_PORT, user_buf, count, ppos);
+}
+
+static ssize_t path_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_port *port = s->private;
+
+ return regs_write(port->sw, port, TB_CFG_HOPS, user_buf, count, ppos);
}
static ssize_t switch_regs_write(struct file *file, const char __user *user_buf,
@@ -249,7 +293,7 @@ static ssize_t switch_regs_write(struct file *file, const char __user *user_buf,
struct seq_file *s = file->private_data;
struct tb_switch *sw = s->private;
- return regs_write(sw, NULL, user_buf, count, ppos);
+ return regs_write(sw, NULL, TB_CFG_SWITCH, user_buf, count, ppos);
}
static bool parse_sb_line(char **line, u8 *reg, u8 *data, size_t data_size,
@@ -401,6 +445,7 @@ out:
#define DEBUGFS_MODE 0600
#else
#define port_regs_write NULL
+#define path_write NULL
#define switch_regs_write NULL
#define port_sb_regs_write NULL
#define retimer_sb_regs_write NULL
@@ -2243,7 +2288,7 @@ out_rpm_put:
return ret;
}
-DEBUGFS_ATTR_RO(path);
+DEBUGFS_ATTR_RW(path);
static int counter_set_regs_show(struct tb_port *port, struct seq_file *s,
int counter)
@@ -2368,6 +2413,8 @@ void tb_switch_debugfs_init(struct tb_switch *sw)
sw->debugfs_dir = debugfs_dir;
debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir, sw,
&switch_regs_fops);
+ if (sw->drom)
+ debugfs_create_blob("drom", 0400, debugfs_dir, &sw->drom_blob);
tb_switch_for_each_port(sw, port) {
struct dentry *debugfs_dir;
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index eb241b270f79..9c1d65d26553 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -435,6 +435,29 @@ static int tb_drom_parse_entries(struct tb_switch *sw, size_t header_size)
return 0;
}
+static int tb_switch_drom_alloc(struct tb_switch *sw, size_t size)
+{
+ sw->drom = kzalloc(size, GFP_KERNEL);
+ if (!sw->drom)
+ return -ENOMEM;
+
+#ifdef CONFIG_DEBUG_FS
+ sw->drom_blob.data = sw->drom;
+ sw->drom_blob.size = size;
+#endif
+ return 0;
+}
+
+static void tb_switch_drom_free(struct tb_switch *sw)
+{
+#ifdef CONFIG_DEBUG_FS
+ sw->drom_blob.data = NULL;
+ sw->drom_blob.size = 0;
+#endif
+ kfree(sw->drom);
+ sw->drom = NULL;
+}
+
/*
* tb_drom_copy_efi - copy drom supplied by EFI to sw->drom if present
*/
@@ -447,9 +470,9 @@ static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
if (len < 0 || len < sizeof(struct tb_drom_header))
return -EINVAL;
- sw->drom = kmalloc(len, GFP_KERNEL);
- if (!sw->drom)
- return -ENOMEM;
+ res = tb_switch_drom_alloc(sw, len);
+ if (res)
+ return res;
res = device_property_read_u8_array(dev, "ThunderboltDROM", sw->drom,
len);
@@ -464,8 +487,7 @@ static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
return 0;
err:
- kfree(sw->drom);
- sw->drom = NULL;
+ tb_switch_drom_free(sw);
return -EINVAL;
}
@@ -491,13 +513,15 @@ static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size)
/* Size includes CRC8 + UID + CRC32 */
*size += 1 + 8 + 4;
- sw->drom = kzalloc(*size, GFP_KERNEL);
- if (!sw->drom)
- return -ENOMEM;
+ ret = tb_switch_drom_alloc(sw, *size);
+ if (ret)
+ return ret;
ret = dma_port_flash_read(sw->dma_port, drom_offset, sw->drom, *size);
- if (ret)
- goto err_free;
+ if (ret) {
+ tb_switch_drom_free(sw);
+ return ret;
+ }
/*
* Read UID from the minimal DROM because the one in NVM is just
@@ -505,11 +529,6 @@ static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size)
*/
tb_drom_read_uid_only(sw, &sw->uid);
return 0;
-
-err_free:
- kfree(sw->drom);
- sw->drom = NULL;
- return ret;
}
static int usb4_copy_drom(struct tb_switch *sw, u16 *size)
@@ -522,15 +541,13 @@ static int usb4_copy_drom(struct tb_switch *sw, u16 *size)
/* Size includes CRC8 + UID + CRC32 */
*size += 1 + 8 + 4;
- sw->drom = kzalloc(*size, GFP_KERNEL);
- if (!sw->drom)
- return -ENOMEM;
+ ret = tb_switch_drom_alloc(sw, *size);
+ if (ret)
+ return ret;
ret = usb4_switch_drom_read(sw, 0, sw->drom, *size);
- if (ret) {
- kfree(sw->drom);
- sw->drom = NULL;
- }
+ if (ret)
+ tb_switch_drom_free(sw);
return ret;
}
@@ -552,19 +569,14 @@ static int tb_drom_bit_bang(struct tb_switch *sw, u16 *size)
return -EIO;
}
- sw->drom = kzalloc(*size, GFP_KERNEL);
- if (!sw->drom)
- return -ENOMEM;
+ ret = tb_switch_drom_alloc(sw, *size);
+ if (ret)
+ return ret;
ret = tb_eeprom_read_n(sw, 0, sw->drom, *size);
if (ret)
- goto err;
-
- return 0;
+ tb_switch_drom_free(sw);
-err:
- kfree(sw->drom);
- sw->drom = NULL;
return ret;
}
@@ -646,9 +658,7 @@ static int tb_drom_parse(struct tb_switch *sw, u16 size)
return 0;
err:
- kfree(sw->drom);
- sw->drom = NULL;
-
+ tb_switch_drom_free(sw);
return ret;
}
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index f760e54cd9bd..e1a5f6e3d0b6 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -581,10 +581,10 @@ int tb_path_activate(struct tb_path *path)
}
}
path->activated = true;
- tb_dbg(path->tb, "path activation complete\n");
+ tb_dbg(path->tb, "%s path activation complete\n", path->name);
return 0;
err:
- tb_WARN(path->tb, "path activation failed\n");
+ tb_WARN(path->tb, "%s path activation failed\n", path->name);
return res;
}
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
index eeb64433ebbc..1f25529fe05d 100644
--- a/drivers/thunderbolt/retimer.c
+++ b/drivers/thunderbolt/retimer.c
@@ -472,7 +472,7 @@ struct tb_retimer_lookup {
u8 index;
};
-static int retimer_match(struct device *dev, void *data)
+static int retimer_match(struct device *dev, const void *data)
{
const struct tb_retimer_lookup *lookup = data;
struct tb_retimer *rt = tb_to_retimer(dev);
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index a7c6919fbf97..390abcfe7188 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -20,6 +20,12 @@
#define TB_RELEASE_BW_TIMEOUT 10000 /* ms */
/*
+ * How many time bandwidth allocation request from graphics driver is
+ * retried if the DP tunnel is still activating.
+ */
+#define TB_BW_ALLOC_RETRIES 3
+
+/*
* Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
* direction. This is 40G - 10% guard band bandwidth.
*/
@@ -69,14 +75,20 @@ static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
}
struct tb_hotplug_event {
- struct work_struct work;
+ struct delayed_work work;
struct tb *tb;
u64 route;
u8 port;
bool unplug;
+ int retry;
};
+static void tb_scan_port(struct tb_port *port);
static void tb_handle_hotplug(struct work_struct *work);
+static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port,
+ const char *reason);
+static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port,
+ int retry, unsigned long delay);
static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
{
@@ -90,8 +102,8 @@ static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
ev->route = route;
ev->port = port;
ev->unplug = unplug;
- INIT_WORK(&ev->work, tb_handle_hotplug);
- queue_work(tb->wq, &ev->work);
+ INIT_DELAYED_WORK(&ev->work, tb_handle_hotplug);
+ queue_delayed_work(tb->wq, &ev->work, 0);
}
/* enumeration & hot plug handling */
@@ -961,7 +973,7 @@ static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
return 0;
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
err_reclaim:
if (tb_route(parent))
tb_reclaim_usb3_bandwidth(tb, down, up);
@@ -1238,8 +1250,6 @@ static void tb_configure_link(struct tb_port *down, struct tb_port *up,
tb_switch_configure_link(sw);
}
-static void tb_scan_port(struct tb_port *port);
-
/*
* tb_scan_switch() - scan for and initialize downstream switches
*/
@@ -1727,7 +1737,7 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
break;
}
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
/*
@@ -1864,12 +1874,76 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
return NULL;
}
-static bool tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
+static void tb_dp_tunnel_active(struct tb_tunnel *tunnel, void *data)
+{
+ struct tb_port *in = tunnel->src_port;
+ struct tb_port *out = tunnel->dst_port;
+ struct tb *tb = data;
+
+ mutex_lock(&tb->lock);
+ if (tb_tunnel_is_active(tunnel)) {
+ int consumed_up, consumed_down, ret;
+
+ tb_tunnel_dbg(tunnel, "DPRX capabilities read completed\n");
+
+ /* If fail reading tunnel's consumed bandwidth, tear it down */
+ ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up,
+ &consumed_down);
+ if (ret) {
+ tb_tunnel_warn(tunnel,
+ "failed to read consumed bandwidth, tearing down\n");
+ tb_deactivate_and_free_tunnel(tunnel);
+ } else {
+ tb_reclaim_usb3_bandwidth(tb, in, out);
+ /*
+ * Transition the links to asymmetric if the
+ * consumption exceeds the threshold.
+ */
+ tb_configure_asym(tb, in, out, consumed_up,
+ consumed_down);
+ /*
+ * Update the domain with the new bandwidth
+ * estimation.
+ */
+ tb_recalc_estimated_bandwidth(tb);
+ /*
+ * In case of DP tunnel exists, change host
+ * router's 1st children TMU mode to HiFi for
+ * CL0s to work.
+ */
+ tb_increase_tmu_accuracy(tunnel);
+ }
+ } else {
+ struct tb_port *in = tunnel->src_port;
+
+ /*
+ * This tunnel failed to establish. This means DPRX
+ * negotiation most likely did not complete which
+ * happens either because there is no graphics driver
+ * loaded or not all DP cables where connected to the
+ * discrete router.
+ *
+ * In both cases we remove the DP IN adapter from the
+ * available resources as it is not usable. This will
+ * also tear down the tunnel and try to re-use the
+ * released DP OUT.
+ *
+ * It will be added back only if there is hotplug for
+ * the DP IN again.
+ */
+ tb_tunnel_warn(tunnel, "not active, tearing down\n");
+ tb_dp_resource_unavailable(tb, in, "DPRX negotiation failed");
+ }
+ mutex_unlock(&tb->lock);
+
+ tb_domain_put(tb);
+}
+
+static void tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
struct tb_port *out)
{
int available_up, available_down, ret, link_nr;
struct tb_cm *tcm = tb_priv(tb);
- int consumed_up, consumed_down;
struct tb_tunnel *tunnel;
/*
@@ -1921,47 +1995,29 @@ static bool tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
available_up, available_down);
tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
- available_down);
+ available_down, tb_dp_tunnel_active,
+ tb_domain_get(tb));
if (!tunnel) {
tb_port_dbg(out, "could not allocate DP tunnel\n");
goto err_reclaim_usb;
}
- if (tb_tunnel_activate(tunnel)) {
+ list_add_tail(&tunnel->list, &tcm->tunnel_list);
+
+ ret = tb_tunnel_activate(tunnel);
+ if (ret && ret != -EINPROGRESS) {
tb_port_info(out, "DP tunnel activation failed, aborting\n");
+ list_del(&tunnel->list);
goto err_free;
}
- /* If fail reading tunnel's consumed bandwidth, tear it down */
- ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down);
- if (ret)
- goto err_deactivate;
-
- list_add_tail(&tunnel->list, &tcm->tunnel_list);
-
- tb_reclaim_usb3_bandwidth(tb, in, out);
- /*
- * Transition the links to asymmetric if the consumption exceeds
- * the threshold.
- */
- tb_configure_asym(tb, in, out, consumed_up, consumed_down);
-
- /* Update the domain with the new bandwidth estimation */
- tb_recalc_estimated_bandwidth(tb);
-
- /*
- * In case of DP tunnel exists, change host router's 1st children
- * TMU mode to HiFi for CL0s to work.
- */
- tb_increase_tmu_accuracy(tunnel);
- return true;
+ return;
-err_deactivate:
- tb_tunnel_deactivate(tunnel);
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
err_reclaim_usb:
tb_reclaim_usb3_bandwidth(tb, in, out);
+ tb_domain_put(tb);
err_detach_group:
tb_detach_bandwidth_group(in);
err_dealloc_dp:
@@ -1971,8 +2027,6 @@ err_rpm_put:
pm_runtime_put_autosuspend(&out->sw->dev);
pm_runtime_mark_last_busy(&in->sw->dev);
pm_runtime_put_autosuspend(&in->sw->dev);
-
- return false;
}
static void tb_tunnel_dp(struct tb *tb)
@@ -2090,17 +2144,18 @@ static void tb_switch_exit_redrive(struct tb_switch *sw)
}
}
-static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
+static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port,
+ const char *reason)
{
struct tb_port *in, *out;
struct tb_tunnel *tunnel;
if (tb_port_is_dpin(port)) {
- tb_port_dbg(port, "DP IN resource unavailable\n");
+ tb_port_dbg(port, "DP IN resource unavailable: %s\n", reason);
in = port;
out = NULL;
} else {
- tb_port_dbg(port, "DP OUT resource unavailable\n");
+ tb_port_dbg(port, "DP OUT resource unavailable: %s\n", reason);
in = NULL;
out = port;
}
@@ -2182,7 +2237,7 @@ static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
tb_tunnel_deactivate(tunnel);
list_del(&tunnel->list);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return 0;
}
@@ -2212,7 +2267,7 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
if (tb_tunnel_activate(tunnel)) {
tb_port_info(up,
"PCIe tunnel activation failed, aborting\n");
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return -EIO;
}
@@ -2271,7 +2326,7 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
return 0;
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
err_clx:
tb_enable_clx(sw);
mutex_unlock(&tb->lock);
@@ -2334,7 +2389,7 @@ static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
*/
static void tb_handle_hotplug(struct work_struct *work)
{
- struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
+ struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work.work);
struct tb *tb = ev->tb;
struct tb_cm *tcm = tb_priv(tb);
struct tb_switch *sw;
@@ -2406,7 +2461,7 @@ static void tb_handle_hotplug(struct work_struct *work)
tb_xdomain_put(xd);
tb_port_unconfigure_xdomain(port);
} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
- tb_dp_resource_unavailable(tb, port);
+ tb_dp_resource_unavailable(tb, port, "adapter unplug");
} else if (!port->port) {
tb_sw_dbg(sw, "xHCI disconnect request\n");
tb_switch_xhci_disconnect(sw);
@@ -2639,7 +2694,7 @@ fail:
static void tb_handle_dp_bandwidth_request(struct work_struct *work)
{
- struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
+ struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work.work);
int requested_bw, requested_up, requested_down, ret;
struct tb_tunnel *tunnel;
struct tb *tb = ev->tb;
@@ -2666,7 +2721,7 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
goto put_sw;
}
- tb_port_dbg(in, "handling bandwidth allocation request\n");
+ tb_port_dbg(in, "handling bandwidth allocation request, retry %d\n", ev->retry);
tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
if (!tunnel) {
@@ -2719,12 +2774,33 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
if (ret) {
- if (ret == -ENOBUFS)
+ if (ret == -ENOBUFS) {
tb_tunnel_warn(tunnel,
"not enough bandwidth available\n");
- else
+ } else if (ret == -ENOTCONN) {
+ tb_tunnel_dbg(tunnel, "not active yet\n");
+ /*
+ * We got bandwidth allocation request but the
+ * tunnel is not yet active. This means that
+ * tb_dp_tunnel_active() is not yet called for
+ * this tunnel. Allow it some time and retry
+ * this request a couple of times.
+ */
+ if (ev->retry < TB_BW_ALLOC_RETRIES) {
+ tb_tunnel_dbg(tunnel,
+ "retrying bandwidth allocation request\n");
+ tb_queue_dp_bandwidth_request(tb, ev->route,
+ ev->port,
+ ev->retry + 1,
+ msecs_to_jiffies(50));
+ } else {
+ tb_tunnel_dbg(tunnel,
+ "run out of retries, failing the request");
+ }
+ } else {
tb_tunnel_warn(tunnel,
"failed to change bandwidth allocation\n");
+ }
} else {
tb_tunnel_dbg(tunnel,
"bandwidth allocation changed to %d/%d Mb/s\n",
@@ -2745,7 +2821,8 @@ unlock:
kfree(ev);
}
-static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
+static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port,
+ int retry, unsigned long delay)
{
struct tb_hotplug_event *ev;
@@ -2756,8 +2833,9 @@ static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
ev->tb = tb;
ev->route = route;
ev->port = port;
- INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
- queue_work(tb->wq, &ev->work);
+ ev->retry = retry;
+ INIT_DELAYED_WORK(&ev->work, tb_handle_dp_bandwidth_request);
+ queue_delayed_work(tb->wq, &ev->work, delay);
}
static void tb_handle_notification(struct tb *tb, u64 route,
@@ -2777,7 +2855,7 @@ static void tb_handle_notification(struct tb *tb, u64 route,
if (tb_cfg_ack_notification(tb->ctl, route, error))
tb_warn(tb, "could not ack notification on %llx\n",
route);
- tb_queue_dp_bandwidth_request(tb, route, error->port);
+ tb_queue_dp_bandwidth_request(tb, route, error->port, 0, 0);
break;
default:
@@ -2832,7 +2910,7 @@ static void tb_stop(struct tb *tb)
*/
if (tb_tunnel_is_dma(tunnel))
tb_tunnel_deactivate(tunnel);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
tb_switch_remove(tb->root_switch);
tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
@@ -3028,7 +3106,7 @@ static int tb_resume_noirq(struct tb *tb)
if (tb_tunnel_is_usb3(tunnel))
usb3_delay = 500;
tb_tunnel_deactivate(tunnel);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
/* Re-create our tunnels now */
@@ -3039,7 +3117,7 @@ static int tb_resume_noirq(struct tb *tb)
/* Only need to do it once */
usb3_delay = 0;
}
- tb_tunnel_restart(tunnel);
+ tb_tunnel_activate(tunnel);
}
if (!list_empty(&tcm->tunnel_list)) {
/*
@@ -3149,7 +3227,7 @@ static int tb_runtime_resume(struct tb *tb)
tb_free_invalid_tunnels(tb);
tb_restore_children(tb->root_switch);
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
- tb_tunnel_restart(tunnel);
+ tb_tunnel_activate(tunnel);
tb_switch_enter_redrive(tb->root_switch);
tcm->hotplug_active = true;
mutex_unlock(&tb->lock);
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index ddbf0cd78377..b54147a1ba87 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -9,6 +9,7 @@
#ifndef TB_H_
#define TB_H_
+#include <linux/debugfs.h>
#include <linux/nvmem-provider.h>
#include <linux/pci.h>
#include <linux/thunderbolt.h>
@@ -160,6 +161,7 @@ struct tb_switch_tmu {
* @max_pcie_credits: Router preferred number of buffers for PCIe
* @max_dma_credits: Router preferred number of buffers for DMA/P2P
* @clx: CLx states on the upstream link of the router
+ * @drom_blob: DROM debugfs blob wrapper
*
* When the switch is being added or removed to the domain (other
* switches) you need to have domain lock held.
@@ -212,6 +214,9 @@ struct tb_switch {
unsigned int max_pcie_credits;
unsigned int max_dma_credits;
unsigned int clx;
+#ifdef CONFIG_DEBUG_FS
+ struct debugfs_blob_wrapper drom_blob;
+#endif
};
/**
diff --git a/drivers/thunderbolt/test.c b/drivers/thunderbolt/test.c
index 9475c6698c7d..1f4318249c22 100644
--- a/drivers/thunderbolt/test.c
+++ b/drivers/thunderbolt/test.c
@@ -1382,8 +1382,8 @@ static void tb_test_tunnel_pcie(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
- tb_tunnel_free(tunnel2);
- tb_tunnel_free(tunnel1);
+ tb_tunnel_put(tunnel2);
+ tb_tunnel_put(tunnel1);
}
static void tb_test_tunnel_dp(struct kunit *test)
@@ -1406,7 +1406,7 @@ static void tb_test_tunnel_dp(struct kunit *test)
in = &host->ports[5];
out = &dev->ports[13];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1421,7 +1421,7 @@ static void tb_test_tunnel_dp(struct kunit *test)
KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dp_chain(struct kunit *test)
@@ -1452,7 +1452,7 @@ static void tb_test_tunnel_dp_chain(struct kunit *test)
in = &host->ports[5];
out = &dev4->ports[14];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1467,7 +1467,7 @@ static void tb_test_tunnel_dp_chain(struct kunit *test)
KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dp_tree(struct kunit *test)
@@ -1502,7 +1502,7 @@ static void tb_test_tunnel_dp_tree(struct kunit *test)
in = &dev2->ports[13];
out = &dev5->ports[13];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1517,7 +1517,7 @@ static void tb_test_tunnel_dp_tree(struct kunit *test)
KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dp_max_length(struct kunit *test)
@@ -1567,7 +1567,7 @@ static void tb_test_tunnel_dp_max_length(struct kunit *test)
in = &dev6->ports[13];
out = &dev12->ports[13];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1597,7 +1597,7 @@ static void tb_test_tunnel_dp_max_length(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port,
&host->ports[1]);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_3dp(struct kunit *test)
@@ -1637,7 +1637,7 @@ static void tb_test_tunnel_3dp(struct kunit *test)
out2 = &dev5->ports[13];
out3 = &dev4->ports[14];
- tunnel1 = tb_tunnel_alloc_dp(NULL, in1, out1, 1, 0, 0);
+ tunnel1 = tb_tunnel_alloc_dp(NULL, in1, out1, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, in1);
@@ -1645,7 +1645,7 @@ static void tb_test_tunnel_3dp(struct kunit *test)
KUNIT_ASSERT_EQ(test, tunnel1->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 3);
- tunnel2 = tb_tunnel_alloc_dp(NULL, in2, out2, 1, 0, 0);
+ tunnel2 = tb_tunnel_alloc_dp(NULL, in2, out2, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, in2);
@@ -1653,7 +1653,7 @@ static void tb_test_tunnel_3dp(struct kunit *test)
KUNIT_ASSERT_EQ(test, tunnel2->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 4);
- tunnel3 = tb_tunnel_alloc_dp(NULL, in3, out3, 1, 0, 0);
+ tunnel3 = tb_tunnel_alloc_dp(NULL, in3, out3, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_TRUE(test, tunnel3 != NULL);
KUNIT_EXPECT_EQ(test, tunnel3->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel3->src_port, in3);
@@ -1661,8 +1661,8 @@ static void tb_test_tunnel_3dp(struct kunit *test)
KUNIT_ASSERT_EQ(test, tunnel3->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel3->paths[0]->path_length, 3);
- tb_tunnel_free(tunnel2);
- tb_tunnel_free(tunnel1);
+ tb_tunnel_put(tunnel2);
+ tb_tunnel_put(tunnel1);
}
static void tb_test_tunnel_usb3(struct kunit *test)
@@ -1716,8 +1716,8 @@ static void tb_test_tunnel_usb3(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
- tb_tunnel_free(tunnel2);
- tb_tunnel_free(tunnel1);
+ tb_tunnel_put(tunnel2);
+ tb_tunnel_put(tunnel1);
}
static void tb_test_tunnel_port_on_path(struct kunit *test)
@@ -1750,7 +1750,7 @@ static void tb_test_tunnel_port_on_path(struct kunit *test)
in = &dev2->ports[13];
out = &dev5->ports[13];
- dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, dp_tunnel);
KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
@@ -1783,7 +1783,7 @@ static void tb_test_tunnel_port_on_path(struct kunit *test)
port = &dev4->ports[1];
KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
- tb_tunnel_free(dp_tunnel);
+ tb_tunnel_put(dp_tunnel);
}
static void tb_test_tunnel_dma(struct kunit *test)
@@ -1826,7 +1826,7 @@ static void tb_test_tunnel_dma(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].out_port, port);
KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].next_hop_index, 8);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dma_rx(struct kunit *test)
@@ -1863,7 +1863,7 @@ static void tb_test_tunnel_dma_rx(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 2);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dma_tx(struct kunit *test)
@@ -1900,7 +1900,7 @@ static void tb_test_tunnel_dma_tx(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, port);
KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 15);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dma_chain(struct kunit *test)
@@ -1966,7 +1966,7 @@ static void tb_test_tunnel_dma_chain(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, port);
KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[2].next_hop_index, 8);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dma_match(struct kunit *test)
@@ -1993,7 +1993,7 @@ static void tb_test_tunnel_dma_match(struct kunit *test)
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, -1, 8, -1));
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, -1, -1);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
@@ -2005,7 +2005,7 @@ static void tb_test_tunnel_dma_match(struct kunit *test)
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 11);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
@@ -2017,7 +2017,7 @@ static void tb_test_tunnel_dma_match(struct kunit *test)
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 10, 11));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_legacy_not_bonded(struct kunit *test)
@@ -2050,7 +2050,7 @@ static void tb_test_credit_alloc_legacy_not_bonded(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_legacy_bonded(struct kunit *test)
@@ -2083,7 +2083,7 @@ static void tb_test_credit_alloc_legacy_bonded(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_pcie(struct kunit *test)
@@ -2116,7 +2116,7 @@ static void tb_test_credit_alloc_pcie(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_without_dp(struct kunit *test)
@@ -2166,7 +2166,7 @@ static void tb_test_credit_alloc_without_dp(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_dp(struct kunit *test)
@@ -2182,7 +2182,7 @@ static void tb_test_credit_alloc_dp(struct kunit *test)
in = &host->ports[5];
out = &dev->ports[14];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
@@ -2210,7 +2210,7 @@ static void tb_test_credit_alloc_dp(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_usb3(struct kunit *test)
@@ -2243,7 +2243,7 @@ static void tb_test_credit_alloc_usb3(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_dma(struct kunit *test)
@@ -2279,7 +2279,7 @@ static void tb_test_credit_alloc_dma(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
@@ -2356,7 +2356,7 @@ static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
* Release the first DMA tunnel. That should make 14 buffers
* available for the next tunnel.
*/
- tb_tunnel_free(tunnel1);
+ tb_tunnel_put(tunnel1);
tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
KUNIT_ASSERT_NOT_NULL(test, tunnel3);
@@ -2375,8 +2375,8 @@ static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
- tb_tunnel_free(tunnel3);
- tb_tunnel_free(tunnel2);
+ tb_tunnel_put(tunnel3);
+ tb_tunnel_put(tunnel2);
}
static struct tb_tunnel *TB_TEST_PCIE_TUNNEL(struct kunit *test,
@@ -2418,7 +2418,7 @@ static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test,
in = &host->ports[5];
out = &dev->ports[13];
- dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, dp_tunnel1);
KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3);
@@ -2455,7 +2455,7 @@ static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test,
in = &host->ports[6];
out = &dev->ports[14];
- dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, dp_tunnel2);
KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3);
@@ -2595,12 +2595,12 @@ static void tb_test_credit_alloc_all(struct kunit *test)
dma_tunnel1 = TB_TEST_DMA_TUNNEL1(test, host, dev);
dma_tunnel2 = TB_TEST_DMA_TUNNEL2(test, host, dev);
- tb_tunnel_free(dma_tunnel2);
- tb_tunnel_free(dma_tunnel1);
- tb_tunnel_free(usb3_tunnel);
- tb_tunnel_free(dp_tunnel2);
- tb_tunnel_free(dp_tunnel1);
- tb_tunnel_free(pcie_tunnel);
+ tb_tunnel_put(dma_tunnel2);
+ tb_tunnel_put(dma_tunnel1);
+ tb_tunnel_put(usb3_tunnel);
+ tb_tunnel_put(dp_tunnel2);
+ tb_tunnel_put(dp_tunnel1);
+ tb_tunnel_put(pcie_tunnel);
}
static const u32 root_directory[] = {
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 41cf6378ad25..8229a6fbda5a 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -70,6 +70,24 @@
#define USB4_V2_PCI_MIN_BANDWIDTH (1500 * TB_PCI_WEIGHT)
#define USB4_V2_USB3_MIN_BANDWIDTH (1500 * TB_USB3_WEIGHT)
+/*
+ * According to VESA spec, the DPRX negotiation shall compete in 5
+ * seconds after tunnel is established. Since at least i915 can runtime
+ * suspend if there is nothing connected, and that it polls any new
+ * connections every 10 seconds, we use 12 seconds here.
+ *
+ * These are in ms.
+ */
+#define TB_DPRX_TIMEOUT 12000
+#define TB_DPRX_WAIT_TIMEOUT 25
+#define TB_DPRX_POLL_DELAY 50
+
+static int dprx_timeout = TB_DPRX_TIMEOUT;
+module_param(dprx_timeout, int, 0444);
+MODULE_PARM_DESC(dprx_timeout,
+ "DPRX capability read timeout in ms, -1 waits forever (default: "
+ __MODULE_STRING(TB_DPRX_TIMEOUT) ")");
+
static unsigned int dma_credits = TB_DMA_CREDITS;
module_param(dma_credits, uint, 0444);
MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
@@ -82,6 +100,9 @@ MODULE_PARM_DESC(bw_alloc_mode,
static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
+/* Synchronizes kref_get()/put() of struct tb_tunnel */
+static DEFINE_MUTEX(tb_tunnel_lock);
+
static inline unsigned int tb_usable_credits(const struct tb_port *port)
{
return port->total_credits - port->ctl_credits;
@@ -155,7 +176,7 @@ static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
if (!tunnel->paths) {
- tb_tunnel_free(tunnel);
+ kfree(tunnel);
return NULL;
}
@@ -163,10 +184,42 @@ static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
tunnel->tb = tb;
tunnel->npaths = npaths;
tunnel->type = type;
+ kref_init(&tunnel->kref);
return tunnel;
}
+static void tb_tunnel_get(struct tb_tunnel *tunnel)
+{
+ mutex_lock(&tb_tunnel_lock);
+ kref_get(&tunnel->kref);
+ mutex_unlock(&tb_tunnel_lock);
+}
+
+static void tb_tunnel_destroy(struct kref *kref)
+{
+ struct tb_tunnel *tunnel = container_of(kref, typeof(*tunnel), kref);
+ int i;
+
+ if (tunnel->destroy)
+ tunnel->destroy(tunnel);
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ if (tunnel->paths[i])
+ tb_path_free(tunnel->paths[i]);
+ }
+
+ kfree(tunnel->paths);
+ kfree(tunnel);
+}
+
+void tb_tunnel_put(struct tb_tunnel *tunnel)
+{
+ mutex_lock(&tb_tunnel_lock);
+ kref_put(&tunnel->kref, tb_tunnel_destroy);
+ mutex_unlock(&tb_tunnel_lock);
+}
+
static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
{
struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
@@ -355,7 +408,7 @@ struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
err_deactivate:
tb_tunnel_deactivate(tunnel);
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -404,7 +457,7 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
return tunnel;
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -851,7 +904,7 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
return 0;
}
-static int tb_dp_init(struct tb_tunnel *tunnel)
+static int tb_dp_pre_activate(struct tb_tunnel *tunnel)
{
struct tb_port *in = tunnel->src_port;
struct tb_switch *sw = in->sw;
@@ -877,7 +930,7 @@ static int tb_dp_init(struct tb_tunnel *tunnel)
return tb_dp_bandwidth_alloc_mode_enable(tunnel);
}
-static void tb_dp_deinit(struct tb_tunnel *tunnel)
+static void tb_dp_post_deactivate(struct tb_tunnel *tunnel)
{
struct tb_port *in = tunnel->src_port;
@@ -889,6 +942,90 @@ static void tb_dp_deinit(struct tb_tunnel *tunnel)
}
}
+static ktime_t dprx_timeout_to_ktime(int timeout_msec)
+{
+ return timeout_msec >= 0 ?
+ ktime_add_ms(ktime_get(), timeout_msec) : KTIME_MAX;
+}
+
+static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec)
+{
+ ktime_t timeout = dprx_timeout_to_ktime(timeout_msec);
+ struct tb_port *in = tunnel->src_port;
+
+ /*
+ * Wait for DPRX done. Normally it should be already set for
+ * active tunnel.
+ */
+ do {
+ u32 val;
+ int ret;
+
+ ret = tb_port_read(in, &val, TB_CFG_PORT,
+ in->cap_adap + DP_COMMON_CAP, 1);
+ if (ret)
+ return ret;
+
+ if (val & DP_COMMON_CAP_DPRX_DONE)
+ return 0;
+
+ usleep_range(100, 150);
+ } while (ktime_before(ktime_get(), timeout));
+
+ tb_tunnel_dbg(tunnel, "DPRX read timeout\n");
+ return -ETIMEDOUT;
+}
+
+static void tb_dp_dprx_work(struct work_struct *work)
+{
+ struct tb_tunnel *tunnel = container_of(work, typeof(*tunnel), dprx_work.work);
+ struct tb *tb = tunnel->tb;
+
+ if (!tunnel->dprx_canceled) {
+ mutex_lock(&tb->lock);
+ if (tb_dp_is_usb4(tunnel->src_port->sw) &&
+ tb_dp_wait_dprx(tunnel, TB_DPRX_WAIT_TIMEOUT)) {
+ if (ktime_before(ktime_get(), tunnel->dprx_timeout)) {
+ queue_delayed_work(tb->wq, &tunnel->dprx_work,
+ msecs_to_jiffies(TB_DPRX_POLL_DELAY));
+ mutex_unlock(&tb->lock);
+ return;
+ }
+ } else {
+ tunnel->state = TB_TUNNEL_ACTIVE;
+ }
+ mutex_unlock(&tb->lock);
+ }
+
+ if (tunnel->callback)
+ tunnel->callback(tunnel, tunnel->callback_data);
+}
+
+static int tb_dp_dprx_start(struct tb_tunnel *tunnel)
+{
+ /*
+ * Bump up the reference to keep the tunnel around. It will be
+ * dropped in tb_dp_dprx_stop() once the tunnel is deactivated.
+ */
+ tb_tunnel_get(tunnel);
+
+ if (tunnel->callback) {
+ tunnel->dprx_timeout = dprx_timeout_to_ktime(dprx_timeout);
+ queue_delayed_work(tunnel->tb->wq, &tunnel->dprx_work, 0);
+ return -EINPROGRESS;
+ }
+
+ return tb_dp_is_usb4(tunnel->src_port->sw) ?
+ tb_dp_wait_dprx(tunnel, dprx_timeout) : 0;
+}
+
+static void tb_dp_dprx_stop(struct tb_tunnel *tunnel)
+{
+ tunnel->dprx_canceled = true;
+ cancel_delayed_work(&tunnel->dprx_work);
+ tb_tunnel_put(tunnel);
+}
+
static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
{
int ret;
@@ -910,6 +1047,7 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
} else {
+ tb_dp_dprx_stop(tunnel);
tb_dp_port_hpd_clear(tunnel->src_port);
tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
if (tb_port_is_dpout(tunnel->dst_port))
@@ -920,10 +1058,13 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
if (ret)
return ret;
- if (tb_port_is_dpout(tunnel->dst_port))
- return tb_dp_port_enable(tunnel->dst_port, active);
+ if (tb_port_is_dpout(tunnel->dst_port)) {
+ ret = tb_dp_port_enable(tunnel->dst_port, active);
+ if (ret)
+ return ret;
+ }
- return 0;
+ return active ? tb_dp_dprx_start(tunnel) : 0;
}
/**
@@ -1076,35 +1217,6 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
return 0;
}
-static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec)
-{
- ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
- struct tb_port *in = tunnel->src_port;
-
- /*
- * Wait for DPRX done. Normally it should be already set for
- * active tunnel.
- */
- do {
- u32 val;
- int ret;
-
- ret = tb_port_read(in, &val, TB_CFG_PORT,
- in->cap_adap + DP_COMMON_CAP, 1);
- if (ret)
- return ret;
-
- if (val & DP_COMMON_CAP_DPRX_DONE) {
- tb_tunnel_dbg(tunnel, "DPRX read done\n");
- return 0;
- }
- usleep_range(100, 150);
- } while (ktime_before(ktime_get(), timeout));
-
- tb_tunnel_dbg(tunnel, "DPRX read timeout\n");
- return -ETIMEDOUT;
-}
-
/* Read cap from tunnel DP IN */
static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
u32 *lanes)
@@ -1168,32 +1280,39 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
int ret;
if (tb_dp_is_usb4(sw)) {
- /*
- * On USB4 routers check if the bandwidth allocation
- * mode is enabled first and then read the bandwidth
- * through those registers.
- */
- ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
- consumed_down);
- if (ret < 0) {
- if (ret != -EOPNOTSUPP)
+ ret = tb_dp_wait_dprx(tunnel, 0);
+ if (ret) {
+ if (ret == -ETIMEDOUT) {
+ /*
+ * While we wait for DPRX complete the
+ * tunnel consumes as much as it had
+ * been reserved initially.
+ */
+ ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
+ &rate, &lanes);
+ if (ret)
+ return ret;
+ } else {
+ return ret;
+ }
+ } else {
+ /*
+ * On USB4 routers check if the bandwidth allocation
+ * mode is enabled first and then read the bandwidth
+ * through those registers.
+ */
+ ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
+ consumed_down);
+ if (ret < 0) {
+ if (ret != -EOPNOTSUPP)
+ return ret;
+ } else if (!ret) {
+ return 0;
+ }
+ ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
+ if (ret)
return ret;
- } else if (!ret) {
- return 0;
}
- /*
- * Then see if the DPRX negotiation is ready and if yes
- * return that bandwidth (it may be smaller than the
- * reduced one). According to VESA spec, the DPRX
- * negotiation shall compete in 5 seconds after tunnel
- * established. We give it 100ms extra just in case.
- */
- ret = tb_dp_wait_dprx(tunnel, 5100);
- if (ret)
- return ret;
- ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
- if (ret)
- return ret;
} else if (sw->generation >= 2) {
ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
if (ret)
@@ -1365,9 +1484,9 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
if (!tunnel)
return NULL;
- tunnel->init = tb_dp_init;
- tunnel->deinit = tb_dp_deinit;
+ tunnel->pre_activate = tb_dp_pre_activate;
tunnel->activate = tb_dp_activate;
+ tunnel->post_deactivate = tb_dp_post_deactivate;
tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
@@ -1424,7 +1543,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
err_deactivate:
tb_tunnel_deactivate(tunnel);
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -1439,15 +1558,24 @@ err_free:
* %0 if no available bandwidth.
* @max_down: Maximum available downstream bandwidth for the DP tunnel.
* %0 if no available bandwidth.
+ * @callback: Optional callback that is called when the DP tunnel is
+ * fully activated (or there is an error)
+ * @callback_data: Optional data for @callback
*
* Allocates a tunnel between @in and @out that is capable of tunneling
- * Display Port traffic.
+ * Display Port traffic. If @callback is not %NULL it will be called
+ * after tb_tunnel_activate() once the tunnel has been fully activated.
+ * It can call tb_tunnel_is_active() to check if activation was
+ * successful (or if it returns %false there was some sort of issue).
+ * The @callback is called without @tb->lock held.
*
- * Return: Returns a tb_tunnel on success or NULL on failure.
+ * Return: Returns a tb_tunnel on success or &NULL on failure.
*/
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
struct tb_port *out, int link_nr,
- int max_up, int max_down)
+ int max_up, int max_down,
+ void (*callback)(struct tb_tunnel *, void *),
+ void *callback_data)
{
struct tb_tunnel *tunnel;
struct tb_path **paths;
@@ -1461,9 +1589,9 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
if (!tunnel)
return NULL;
- tunnel->init = tb_dp_init;
- tunnel->deinit = tb_dp_deinit;
+ tunnel->pre_activate = tb_dp_pre_activate;
tunnel->activate = tb_dp_activate;
+ tunnel->post_deactivate = tb_dp_post_deactivate;
tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
@@ -1472,6 +1600,9 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
tunnel->dst_port = out;
tunnel->max_up = max_up;
tunnel->max_down = max_down;
+ tunnel->callback = callback;
+ tunnel->callback_data = callback_data;
+ INIT_DELAYED_WORK(&tunnel->dprx_work, tb_dp_dprx_work);
paths = tunnel->paths;
pm_support = usb4_switch_version(in->sw) >= 2;
@@ -1500,7 +1631,7 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
return tunnel;
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -1620,7 +1751,7 @@ static void tb_dma_release_credits(struct tb_path_hop *hop)
}
}
-static void tb_dma_deinit_path(struct tb_path *path)
+static void tb_dma_destroy_path(struct tb_path *path)
{
struct tb_path_hop *hop;
@@ -1628,14 +1759,14 @@ static void tb_dma_deinit_path(struct tb_path *path)
tb_dma_release_credits(hop);
}
-static void tb_dma_deinit(struct tb_tunnel *tunnel)
+static void tb_dma_destroy(struct tb_tunnel *tunnel)
{
int i;
for (i = 0; i < tunnel->npaths; i++) {
if (!tunnel->paths[i])
continue;
- tb_dma_deinit_path(tunnel->paths[i]);
+ tb_dma_destroy_path(tunnel->paths[i]);
}
}
@@ -1681,7 +1812,7 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
tunnel->src_port = nhi;
tunnel->dst_port = dst;
- tunnel->deinit = tb_dma_deinit;
+ tunnel->destroy = tb_dma_destroy;
credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits);
@@ -1712,7 +1843,7 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
return tunnel;
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -1793,7 +1924,7 @@ static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
return min(up_max_rate, down_max_rate);
}
-static int tb_usb3_init(struct tb_tunnel *tunnel)
+static int tb_usb3_pre_activate(struct tb_tunnel *tunnel)
{
tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
tunnel->allocated_up, tunnel->allocated_down);
@@ -2024,7 +2155,7 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
tunnel->allocated_up, tunnel->allocated_down);
- tunnel->init = tb_usb3_init;
+ tunnel->pre_activate = tb_usb3_pre_activate;
tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
tunnel->release_unused_bandwidth =
tb_usb3_release_unused_bandwidth;
@@ -2038,7 +2169,7 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
err_deactivate:
tb_tunnel_deactivate(tunnel);
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -2094,7 +2225,7 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
"USB3 Down");
if (!path) {
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
tb_usb3_init_path(path);
@@ -2103,7 +2234,7 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
"USB3 Up");
if (!path) {
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
tb_usb3_init_path(path);
@@ -2113,7 +2244,7 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
tunnel->allocated_up = min(max_rate, max_up);
tunnel->allocated_down = min(max_rate, max_down);
- tunnel->init = tb_usb3_init;
+ tunnel->pre_activate = tb_usb3_pre_activate;
tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
tunnel->release_unused_bandwidth =
tb_usb3_release_unused_bandwidth;
@@ -2125,31 +2256,6 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
}
/**
- * tb_tunnel_free() - free a tunnel
- * @tunnel: Tunnel to be freed
- *
- * Frees a tunnel. The tunnel does not need to be deactivated.
- */
-void tb_tunnel_free(struct tb_tunnel *tunnel)
-{
- int i;
-
- if (!tunnel)
- return;
-
- if (tunnel->deinit)
- tunnel->deinit(tunnel);
-
- for (i = 0; i < tunnel->npaths; i++) {
- if (tunnel->paths[i])
- tb_path_free(tunnel->paths[i]);
- }
-
- kfree(tunnel->paths);
- kfree(tunnel);
-}
-
-/**
* tb_tunnel_is_invalid - check whether an activated path is still valid
* @tunnel: Tunnel to check
*/
@@ -2167,12 +2273,15 @@ bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
}
/**
- * tb_tunnel_restart() - activate a tunnel after a hardware reset
- * @tunnel: Tunnel to restart
+ * tb_tunnel_activate() - activate a tunnel
+ * @tunnel: Tunnel to activate
*
- * Return: 0 on success and negative errno in case if failure
+ * Return: 0 on success and negative errno in case if failure.
+ * Specifically returns %-EINPROGRESS if the tunnel activation is still
+ * in progress (that's for DP tunnels to complete DPRX capabilities
+ * read).
*/
-int tb_tunnel_restart(struct tb_tunnel *tunnel)
+int tb_tunnel_activate(struct tb_tunnel *tunnel)
{
int res, i;
@@ -2189,8 +2298,10 @@ int tb_tunnel_restart(struct tb_tunnel *tunnel)
}
}
- if (tunnel->init) {
- res = tunnel->init(tunnel);
+ tunnel->state = TB_TUNNEL_ACTIVATING;
+
+ if (tunnel->pre_activate) {
+ res = tunnel->pre_activate(tunnel);
if (res)
return res;
}
@@ -2203,10 +2314,14 @@ int tb_tunnel_restart(struct tb_tunnel *tunnel)
if (tunnel->activate) {
res = tunnel->activate(tunnel, true);
- if (res)
+ if (res) {
+ if (res == -EINPROGRESS)
+ return res;
goto err;
+ }
}
+ tunnel->state = TB_TUNNEL_ACTIVE;
return 0;
err:
@@ -2216,27 +2331,6 @@ err:
}
/**
- * tb_tunnel_activate() - activate a tunnel
- * @tunnel: Tunnel to activate
- *
- * Return: Returns 0 on success or an error code on failure.
- */
-int tb_tunnel_activate(struct tb_tunnel *tunnel)
-{
- int i;
-
- for (i = 0; i < tunnel->npaths; i++) {
- if (tunnel->paths[i]->activated) {
- tb_tunnel_WARN(tunnel,
- "trying to activate an already activated tunnel\n");
- return -EINVAL;
- }
- }
-
- return tb_tunnel_restart(tunnel);
-}
-
-/**
* tb_tunnel_deactivate() - deactivate a tunnel
* @tunnel: Tunnel to deactivate
*/
@@ -2253,6 +2347,11 @@ void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
if (tunnel->paths[i] && tunnel->paths[i]->activated)
tb_path_deactivate(tunnel->paths[i]);
}
+
+ if (tunnel->post_deactivate)
+ tunnel->post_deactivate(tunnel);
+
+ tunnel->state = TB_TUNNEL_INACTIVE;
}
/**
@@ -2279,18 +2378,10 @@ bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
return false;
}
-static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
+// Is tb_tunnel_activate() called for the tunnel
+static bool tb_tunnel_is_activated(const struct tb_tunnel *tunnel)
{
- int i;
-
- for (i = 0; i < tunnel->npaths; i++) {
- if (!tunnel->paths[i])
- return false;
- if (!tunnel->paths[i]->activated)
- return false;
- }
-
- return true;
+ return tunnel->state == TB_TUNNEL_ACTIVATING || tb_tunnel_is_active(tunnel);
}
/**
@@ -2307,7 +2398,7 @@ int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
int *max_down)
{
if (!tb_tunnel_is_active(tunnel))
- return -EINVAL;
+ return -ENOTCONN;
if (tunnel->maximum_bandwidth)
return tunnel->maximum_bandwidth(tunnel, max_up, max_down);
@@ -2328,7 +2419,7 @@ int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
int *allocated_down)
{
if (!tb_tunnel_is_active(tunnel))
- return -EINVAL;
+ return -ENOTCONN;
if (tunnel->allocated_bandwidth)
return tunnel->allocated_bandwidth(tunnel, allocated_up,
@@ -2351,7 +2442,7 @@ int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
int *alloc_down)
{
if (!tb_tunnel_is_active(tunnel))
- return -EINVAL;
+ return -ENOTCONN;
if (tunnel->alloc_bandwidth)
return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
@@ -2376,26 +2467,27 @@ int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
{
int up_bw = 0, down_bw = 0;
- if (!tb_tunnel_is_active(tunnel))
- goto out;
-
- if (tunnel->consumed_bandwidth) {
+ /*
+ * Here we need to distinguish between not active tunnel from
+ * tunnels that are either fully active or activation started.
+ * The latter is true for DP tunnels where we must report the
+ * consumed to be the maximum we gave it until DPRX capabilities
+ * read is done by the graphics driver.
+ */
+ if (tb_tunnel_is_activated(tunnel) && tunnel->consumed_bandwidth) {
int ret;
ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
if (ret)
return ret;
-
- tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
- down_bw);
}
-out:
if (consumed_up)
*consumed_up = up_bw;
if (consumed_down)
*consumed_down = down_bw;
+ tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw, down_bw);
return 0;
}
@@ -2411,7 +2503,7 @@ out:
int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
{
if (!tb_tunnel_is_active(tunnel))
- return 0;
+ return -ENOTCONN;
if (tunnel->release_unused_bandwidth) {
int ret;
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index 1a27ccd08b86..7f6d3a18a41e 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -19,16 +19,33 @@ enum tb_tunnel_type {
};
/**
+ * enum tb_tunnel_state - State of a tunnel
+ * @TB_TUNNEL_INACTIVE: tb_tunnel_activate() is not called for the tunnel
+ * @TB_TUNNEL_ACTIVATING: tb_tunnel_activate() returned successfully for the tunnel
+ * @TB_TUNNEL_ACTIVE: The tunnel is fully active
+ */
+enum tb_tunnel_state {
+ TB_TUNNEL_INACTIVE,
+ TB_TUNNEL_ACTIVATING,
+ TB_TUNNEL_ACTIVE,
+};
+
+/**
* struct tb_tunnel - Tunnel between two ports
+ * @kref: Reference count
* @tb: Pointer to the domain
* @src_port: Source port of the tunnel
* @dst_port: Destination port of the tunnel. For discovered incomplete
* tunnels may be %NULL or null adapter port instead.
* @paths: All paths required by the tunnel
* @npaths: Number of paths in @paths
- * @init: Optional tunnel specific initialization
- * @deinit: Optional tunnel specific de-initialization
+ * @pre_activate: Optional tunnel specific initialization called before
+ * activation. Can touch hardware.
* @activate: Optional tunnel specific activation/deactivation
+ * @post_deactivate: Optional tunnel specific de-initialization called
+ * after deactivation. Can touch hardware.
+ * @destroy: Optional tunnel specific callback called when the tunnel
+ * memory is being released. Should not touch hardware.
* @maximum_bandwidth: Returns maximum possible bandwidth for this tunnel
* @allocated_bandwidth: Return how much bandwidth is allocated for the tunnel
* @alloc_bandwidth: Change tunnel bandwidth allocation
@@ -37,6 +54,7 @@ enum tb_tunnel_type {
* @reclaim_available_bandwidth: Reclaim back available bandwidth
* @list: Tunnels are linked using this field
* @type: Type of the tunnel
+ * @state: Current state of the tunnel
* @max_up: Maximum upstream bandwidth (Mb/s) available for the tunnel.
* Only set if the bandwidth needs to be limited.
* @max_down: Maximum downstream bandwidth (Mb/s) available for the tunnel.
@@ -45,16 +63,23 @@ enum tb_tunnel_type {
* @allocated_down: Allocated downstream bandwidth (only for USB3)
* @bw_mode: DP bandwidth allocation mode registers can be used to
* determine consumed and allocated bandwidth
+ * @dprx_canceled: Was DPRX capabilities read poll canceled
+ * @dprx_timeout: If set DPRX capabilities read poll work will timeout after this passes
+ * @dprx_work: Worker that is scheduled to poll completion of DPRX capabilities read
+ * @callback: Optional callback called when DP tunnel is fully activated
+ * @callback_data: Optional data for @callback
*/
struct tb_tunnel {
+ struct kref kref;
struct tb *tb;
struct tb_port *src_port;
struct tb_port *dst_port;
struct tb_path **paths;
size_t npaths;
- int (*init)(struct tb_tunnel *tunnel);
- void (*deinit)(struct tb_tunnel *tunnel);
+ int (*pre_activate)(struct tb_tunnel *tunnel);
int (*activate)(struct tb_tunnel *tunnel, bool activate);
+ void (*post_deactivate)(struct tb_tunnel *tunnel);
+ void (*destroy)(struct tb_tunnel *tunnel);
int (*maximum_bandwidth)(struct tb_tunnel *tunnel, int *max_up,
int *max_down);
int (*allocated_bandwidth)(struct tb_tunnel *tunnel, int *allocated_up,
@@ -69,11 +94,17 @@ struct tb_tunnel {
int *available_down);
struct list_head list;
enum tb_tunnel_type type;
+ enum tb_tunnel_state state;
int max_up;
int max_down;
int allocated_up;
int allocated_down;
bool bw_mode;
+ bool dprx_canceled;
+ ktime_t dprx_timeout;
+ struct delayed_work dprx_work;
+ void (*callback)(struct tb_tunnel *tunnel, void *data);
+ void *callback_data;
};
struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
@@ -86,7 +117,9 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
bool alloc_hopid);
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
struct tb_port *out, int link_nr,
- int max_up, int max_down);
+ int max_up, int max_down,
+ void (*callback)(struct tb_tunnel *, void *),
+ void *callback_data);
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_port *dst, int transmit_path,
int transmit_ring, int receive_path,
@@ -99,10 +132,24 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
struct tb_port *down, int max_up,
int max_down);
-void tb_tunnel_free(struct tb_tunnel *tunnel);
+void tb_tunnel_put(struct tb_tunnel *tunnel);
int tb_tunnel_activate(struct tb_tunnel *tunnel);
-int tb_tunnel_restart(struct tb_tunnel *tunnel);
void tb_tunnel_deactivate(struct tb_tunnel *tunnel);
+
+/**
+ * tb_tunnel_is_active() - Is tunnel fully activated
+ * @tunnel: Tunnel to check
+ *
+ * Returns %true if @tunnel is fully activated. For other than DP
+ * tunnels this is pretty much once tb_tunnel_activate() returns
+ * successfully. However, for DP tunnels this returns %true only once the
+ * DPRX capabilities read has been issued successfully.
+ */
+static inline bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
+{
+ return tunnel->state == TB_TUNNEL_ACTIVE;
+}
+
bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel);
bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
const struct tb_port *port);
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 11a50c86a1e4..b0630e6d9472 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -1026,7 +1026,7 @@ static int remove_missing_service(struct device *dev, void *data)
return 0;
}
-static int find_service(struct device *dev, void *data)
+static int find_service(struct device *dev, const void *data)
{
const struct tb_property *p = data;
struct tb_service *svc;
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index afbf7738c7c4..58b28be63c79 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -1154,7 +1154,7 @@ static char kgdbfdc_rbuf[4];
/* write buffer to allow compaction */
static unsigned int kgdbfdc_wbuflen;
-static char kgdbfdc_wbuf[4];
+static u8 kgdbfdc_wbuf[4];
static void __iomem *kgdbfdc_setup(void)
{
@@ -1215,7 +1215,7 @@ static int kgdbfdc_read_char(void)
/* push an FDC word from write buffer to TX FIFO */
static void kgdbfdc_push_one(void)
{
- const char *bufs[1] = { kgdbfdc_wbuf };
+ const u8 *bufs[1] = { kgdbfdc_wbuf };
struct fdc_word word;
void __iomem *regs;
unsigned int i;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 252849910588..363afe11974f 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2224,7 +2224,7 @@ static int gsm_dlci_negotiate(struct gsm_dlci *dlci)
*
* Some control dlci can stay in ADM mode with other dlci working just
* fine. In that case we can just keep the control dlci open after the
- * DLCI_OPENING retries time out.
+ * DLCI_OPENING receives DM.
*/
static void gsm_dlci_t1(struct timer_list *t)
@@ -2243,16 +2243,19 @@ static void gsm_dlci_t1(struct timer_list *t)
}
break;
case DLCI_OPENING:
- if (dlci->retries) {
- dlci->retries--;
- gsm_command(dlci->gsm, dlci->addr, SABM|PF);
- mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
- } else if (!dlci->addr && gsm->control == (DM | PF)) {
+ if (!dlci->addr && gsm->control == (DM | PF)) {
if (debug & DBG_ERRORS)
- pr_info("DLCI %d opening in ADM mode.\n",
- dlci->addr);
+ pr_info("DLCI 0 opening in ADM mode.\n");
dlci->mode = DLCI_MODE_ADM;
gsm_dlci_open(dlci);
+ } else if (dlci->retries) {
+ if (!dlci->addr || !gsm->dlci[0] ||
+ gsm->dlci[0]->state != DLCI_OPENING) {
+ dlci->retries--;
+ gsm_command(dlci->gsm, dlci->addr, SABM|PF);
+ }
+
+ mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
} else {
gsm->open_error++;
gsm_dlci_begin_close(dlci); /* prevent half open link */
@@ -2308,7 +2311,9 @@ static void gsm_dlci_begin_open(struct gsm_dlci *dlci)
dlci->retries = gsm->n2;
if (!need_pn) {
dlci->state = DLCI_OPENING;
- gsm_command(gsm, dlci->addr, SABM|PF);
+ if (!dlci->addr || !gsm->dlci[0] ||
+ gsm->dlci[0]->state != DLCI_OPENING)
+ gsm_command(gsm, dlci->addr, SABM|PF);
} else {
/* Configure DLCI before setup */
dlci->state = DLCI_CONFIGURE;
@@ -4251,7 +4256,7 @@ static const struct tty_port_operations gsm_port_ops = {
static int gsmtty_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct gsm_mux *gsm;
- struct gsm_dlci *dlci;
+ struct gsm_dlci *dlci, *dlci0;
unsigned int line = tty->index;
unsigned int mux = mux_line_to_num(line);
bool alloc = false;
@@ -4274,10 +4279,20 @@ static int gsmtty_install(struct tty_driver *driver, struct tty_struct *tty)
perspective as we don't have to worry about this
if DLCI0 is lost */
mutex_lock(&gsm->mutex);
- if (gsm->dlci[0] && gsm->dlci[0]->state != DLCI_OPEN) {
+
+ dlci0 = gsm->dlci[0];
+ if (dlci0 && dlci0->state != DLCI_OPEN) {
mutex_unlock(&gsm->mutex);
- return -EL2NSYNC;
+
+ if (dlci0->state == DLCI_OPENING)
+ wait_event(gsm->event, dlci0->state != DLCI_OPENING);
+
+ if (dlci0->state != DLCI_OPEN)
+ return -EL2NSYNC;
+
+ mutex_lock(&gsm->mutex);
}
+
dlci = gsm->dlci[line];
if (dlci == NULL) {
alloc = true;
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index df08f13052ff..8bb1a01fef2a 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -798,7 +798,7 @@ static int ptmx_open(struct inode *inode, struct file *filp)
nonseekable_open(inode, filp);
/* We refuse fsnotify events on ptmx, since it's a shared resource */
- filp->f_mode |= FMODE_NONOTIFY;
+ file_set_fsnotify_mode(filp, FMODE_NONOTIFY);
retval = tty_alloc_file(filp);
if (retval)
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index e5310c65cf52..11e05aa014e5 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -231,8 +231,8 @@ void serial8250_rpm_put_tx(struct uart_8250_port *p);
int serial8250_em485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485);
-void serial8250_em485_start_tx(struct uart_8250_port *p);
-void serial8250_em485_stop_tx(struct uart_8250_port *p);
+void serial8250_em485_start_tx(struct uart_8250_port *p, bool toggle_ier);
+void serial8250_em485_stop_tx(struct uart_8250_port *p, bool toggle_ier);
void serial8250_em485_destroy(struct uart_8250_port *p);
extern struct serial_rs485 serial8250_em485_supported;
diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
index fdb53b54e99e..0609582a62f7 100644
--- a/drivers/tty/serial/8250/8250_bcm2835aux.c
+++ b/drivers/tty/serial/8250/8250_bcm2835aux.c
@@ -46,7 +46,7 @@ struct bcm2835aux_data {
u32 cntl;
};
-static void bcm2835aux_rs485_start_tx(struct uart_8250_port *up)
+static void bcm2835aux_rs485_start_tx(struct uart_8250_port *up, bool toggle_ier)
{
if (!(up->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
struct bcm2835aux_data *data = dev_get_drvdata(up->port.dev);
@@ -65,7 +65,7 @@ static void bcm2835aux_rs485_start_tx(struct uart_8250_port *up)
serial8250_out_MCR(up, UART_MCR_RTS);
}
-static void bcm2835aux_rs485_stop_tx(struct uart_8250_port *up)
+static void bcm2835aux_rs485_stop_tx(struct uart_8250_port *up, bool toggle_ier)
{
if (up->port.rs485.flags & SER_RS485_RTS_AFTER_SEND)
serial8250_out_MCR(up, 0);
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 68baf75bdadc..6f676bb37ac3 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -675,7 +675,6 @@ static void serial_8250_overrun_backoff_work(struct work_struct *work)
uart_port_lock_irqsave(port, &flags);
up->ier |= UART_IER_RLSI | UART_IER_RDI;
- up->port.read_status_mask |= UART_LSR_DR;
serial_out(up, UART_IER, up->ier);
uart_port_unlock_irqrestore(port, flags);
}
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 9eb9aa766811..c2b75e3f106d 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -365,7 +365,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
if (up->port.rs485.flags & SER_RS485_ENABLED &&
up->port.rs485_config == serial8250_em485_config)
- serial8250_em485_stop_tx(up);
+ serial8250_em485_stop_tx(up, true);
}
/*
@@ -412,7 +412,13 @@ static void omap_8250_set_termios(struct uart_port *port,
*/
uart_update_timeout(port, termios->c_cflag, baud);
- up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ /*
+ * Specify which conditions may be considered for error
+ * handling and the ignoring of characters. The actual
+ * ignoring of characters only occurs if the bit is set
+ * in @ignore_status_mask as well.
+ */
+ up->port.read_status_mask = UART_LSR_OE | UART_LSR_DR;
if (termios->c_iflag & INPCK)
up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
if (termios->c_iflag & (IGNBRK | PARMRK))
@@ -838,7 +844,6 @@ static void omap_8250_unthrottle(struct uart_port *port)
if (up->dma)
up->dma->rx_dma(up);
up->ier |= UART_IER_RLSI | UART_IER_RDI;
- port->read_status_mask |= UART_LSR_DR;
serial_out(up, UART_IER, up->ier);
uart_port_unlock_irqrestore(port, flags);
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 3c3f7c926afb..df4d0d832e54 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -64,23 +64,17 @@
#define PCIE_DEVICE_ID_NEO_2_OX_IBM 0x00F6
#define PCI_DEVICE_ID_PLX_CRONYX_OMEGA 0xc001
#define PCI_DEVICE_ID_INTEL_PATSBURG_KT 0x1d3d
-#define PCI_VENDOR_ID_WCH 0x4348
-#define PCI_DEVICE_ID_WCH_CH352_2S 0x3253
-#define PCI_DEVICE_ID_WCH_CH353_4S 0x3453
-#define PCI_DEVICE_ID_WCH_CH353_2S1PF 0x5046
-#define PCI_DEVICE_ID_WCH_CH353_1S1P 0x5053
-#define PCI_DEVICE_ID_WCH_CH353_2S1P 0x7053
-#define PCI_DEVICE_ID_WCH_CH355_4S 0x7173
+
+#define PCI_DEVICE_ID_WCHCN_CH352_2S 0x3253
+#define PCI_DEVICE_ID_WCHCN_CH355_4S 0x7173
+
#define PCI_VENDOR_ID_AGESTAR 0x5372
#define PCI_DEVICE_ID_AGESTAR_9375 0x6872
#define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a
#define PCI_DEVICE_ID_AMCC_ADDIDATA_APCI7800 0x818e
-#define PCIE_VENDOR_ID_WCH 0x1c00
-#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250
-#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
-#define PCIE_DEVICE_ID_WCH_CH384_8S 0x3853
-#define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253
+#define PCI_DEVICE_ID_WCHIC_CH384_4S 0x3470
+#define PCI_DEVICE_ID_WCHIC_CH384_8S 0x3853
#define PCI_DEVICE_ID_MOXA_CP102E 0x1024
#define PCI_DEVICE_ID_MOXA_CP102EL 0x1025
@@ -2817,80 +2811,80 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
},
/* WCH CH353 1S1P card (16550 clone) */
{
- .vendor = PCI_VENDOR_ID_WCH,
- .device = PCI_DEVICE_ID_WCH_CH353_1S1P,
+ .vendor = PCI_VENDOR_ID_WCHCN,
+ .device = PCI_DEVICE_ID_WCHCN_CH353_1S1P,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch353_setup,
},
/* WCH CH353 2S1P card (16550 clone) */
{
- .vendor = PCI_VENDOR_ID_WCH,
- .device = PCI_DEVICE_ID_WCH_CH353_2S1P,
+ .vendor = PCI_VENDOR_ID_WCHCN,
+ .device = PCI_DEVICE_ID_WCHCN_CH353_2S1P,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch353_setup,
},
/* WCH CH353 4S card (16550 clone) */
{
- .vendor = PCI_VENDOR_ID_WCH,
- .device = PCI_DEVICE_ID_WCH_CH353_4S,
+ .vendor = PCI_VENDOR_ID_WCHCN,
+ .device = PCI_DEVICE_ID_WCHCN_CH353_4S,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch353_setup,
},
/* WCH CH353 2S1PF card (16550 clone) */
{
- .vendor = PCI_VENDOR_ID_WCH,
- .device = PCI_DEVICE_ID_WCH_CH353_2S1PF,
+ .vendor = PCI_VENDOR_ID_WCHCN,
+ .device = PCI_DEVICE_ID_WCHCN_CH353_2S1PF,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch353_setup,
},
/* WCH CH352 2S card (16550 clone) */
{
- .vendor = PCI_VENDOR_ID_WCH,
- .device = PCI_DEVICE_ID_WCH_CH352_2S,
+ .vendor = PCI_VENDOR_ID_WCHCN,
+ .device = PCI_DEVICE_ID_WCHCN_CH352_2S,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch353_setup,
},
/* WCH CH355 4S card (16550 clone) */
{
- .vendor = PCI_VENDOR_ID_WCH,
- .device = PCI_DEVICE_ID_WCH_CH355_4S,
+ .vendor = PCI_VENDOR_ID_WCHCN,
+ .device = PCI_DEVICE_ID_WCHCN_CH355_4S,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch355_setup,
},
/* WCH CH382 2S card (16850 clone) */
{
- .vendor = PCIE_VENDOR_ID_WCH,
- .device = PCIE_DEVICE_ID_WCH_CH382_2S,
+ .vendor = PCI_VENDOR_ID_WCHIC,
+ .device = PCI_DEVICE_ID_WCHIC_CH382_2S,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch38x_setup,
},
/* WCH CH382 2S1P card (16850 clone) */
{
- .vendor = PCIE_VENDOR_ID_WCH,
- .device = PCIE_DEVICE_ID_WCH_CH382_2S1P,
+ .vendor = PCI_VENDOR_ID_WCHIC,
+ .device = PCI_DEVICE_ID_WCHIC_CH382_2S1P,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch38x_setup,
},
/* WCH CH384 4S card (16850 clone) */
{
- .vendor = PCIE_VENDOR_ID_WCH,
- .device = PCIE_DEVICE_ID_WCH_CH384_4S,
+ .vendor = PCI_VENDOR_ID_WCHIC,
+ .device = PCI_DEVICE_ID_WCHIC_CH384_4S,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch38x_setup,
},
/* WCH CH384 8S card (16850 clone) */
{
- .vendor = PCIE_VENDOR_ID_WCH,
- .device = PCIE_DEVICE_ID_WCH_CH384_8S,
+ .vendor = PCI_VENDOR_ID_WCHIC,
+ .device = PCI_DEVICE_ID_WCHIC_CH384_8S,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.init = pci_wch_ch38x_init,
@@ -3967,11 +3961,11 @@ static const struct pci_device_id blacklist[] = {
/* multi-io cards handled by parport_serial */
/* WCH CH353 2S1P */
- { PCI_DEVICE(0x4348, 0x7053), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), },
+ { PCI_VDEVICE(WCHCN, 0x7053), REPORT_CONFIG(PARPORT_SERIAL), },
/* WCH CH353 1S1P */
- { PCI_DEVICE(0x4348, 0x5053), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), },
+ { PCI_VDEVICE(WCHCN, 0x5053), REPORT_CONFIG(PARPORT_SERIAL), },
/* WCH CH382 2S1P */
- { PCI_DEVICE(0x1c00, 0x3250), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), },
+ { PCI_VDEVICE(WCHIC, 0x3250), REPORT_CONFIG(PARPORT_SERIAL), },
/* Intel platforms with MID UART */
{ PCI_VDEVICE(INTEL, 0x081b), REPORT_8250_CONFIG(MID), },
@@ -6044,27 +6038,27 @@ static const struct pci_device_id serial_pci_tbl[] = {
* WCH CH353 series devices: The 2S1P is handled by parport_serial
* so not listed here.
*/
- { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH353_4S,
+ { PCI_VENDOR_ID_WCHCN, PCI_DEVICE_ID_WCHCN_CH353_4S,
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_b0_bt_4_115200 },
- { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH353_2S1PF,
+ { PCI_VENDOR_ID_WCHCN, PCI_DEVICE_ID_WCHCN_CH353_2S1PF,
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_b0_bt_2_115200 },
- { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH355_4S,
+ { PCI_VENDOR_ID_WCHCN, PCI_DEVICE_ID_WCHCN_CH355_4S,
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_b0_bt_4_115200 },
- { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH382_2S,
+ { PCI_VENDOR_ID_WCHIC, PCI_DEVICE_ID_WCHIC_CH382_2S,
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_wch382_2 },
- { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
+ { PCI_VENDOR_ID_WCHIC, PCI_DEVICE_ID_WCHIC_CH384_4S,
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_wch384_4 },
- { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_8S,
+ { PCI_VENDOR_ID_WCHIC, PCI_DEVICE_ID_WCHIC_CH384_8S,
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_wch384_8 },
/*
diff --git a/drivers/tty/serial/8250/8250_pci1xxxx.c b/drivers/tty/serial/8250/8250_pci1xxxx.c
index 838f181f929b..e9c51d4e447d 100644
--- a/drivers/tty/serial/8250/8250_pci1xxxx.c
+++ b/drivers/tty/serial/8250/8250_pci1xxxx.c
@@ -78,6 +78,12 @@
#define UART_TX_BYTE_FIFO 0x00
#define UART_FIFO_CTL 0x02
+#define UART_MODEM_CTL_REG 0x04
+#define UART_MODEM_CTL_RTS_SET BIT(1)
+
+#define UART_LINE_STAT_REG 0x05
+#define UART_LINE_XMIT_CHECK_MASK GENMASK(6, 5)
+
#define UART_ACTV_REG 0x11
#define UART_BLOCK_SET_ACTIVE BIT(0)
@@ -94,6 +100,7 @@
#define UART_BIT_SAMPLE_CNT_16 16
#define BAUD_CLOCK_DIV_INT_MSK GENMASK(31, 8)
#define ADCL_CFG_RTS_DELAY_MASK GENMASK(11, 8)
+#define FRAC_DIV_TX_END_POINT_MASK GENMASK(23, 20)
#define UART_WAKE_REG 0x8C
#define UART_WAKE_MASK_REG 0x90
@@ -134,6 +141,11 @@
#define UART_BST_STAT_LSR_FRAME_ERR 0x8000000
#define UART_BST_STAT_LSR_THRE 0x20000000
+#define GET_MODEM_CTL_RTS_STATUS(reg) ((reg) & UART_MODEM_CTL_RTS_SET)
+#define GET_RTS_PIN_STATUS(val) (((val) & TIOCM_RTS) >> 1)
+#define RTS_TOGGLE_STATUS_MASK(val, reg) (GET_MODEM_CTL_RTS_STATUS(reg) \
+ != GET_RTS_PIN_STATUS(val))
+
struct pci1xxxx_8250 {
unsigned int nr;
u8 dev_rev;
@@ -254,6 +266,47 @@ static void pci1xxxx_set_divisor(struct uart_port *port, unsigned int baud,
port->membase + UART_BAUD_CLK_DIVISOR_REG);
}
+static void pci1xxxx_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ u32 fract_div_cfg_reg;
+ u32 line_stat_reg;
+ u32 modem_ctl_reg;
+ u32 adcl_cfg_reg;
+
+ adcl_cfg_reg = readl(port->membase + ADCL_CFG_REG);
+
+ /* HW is responsible in ADCL_EN case */
+ if ((adcl_cfg_reg & (ADCL_CFG_EN | ADCL_CFG_PIN_SEL)))
+ return;
+
+ modem_ctl_reg = readl(port->membase + UART_MODEM_CTL_REG);
+
+ serial8250_do_set_mctrl(port, mctrl);
+
+ if (RTS_TOGGLE_STATUS_MASK(mctrl, modem_ctl_reg)) {
+ line_stat_reg = readl(port->membase + UART_LINE_STAT_REG);
+ if (line_stat_reg & UART_LINE_XMIT_CHECK_MASK) {
+ fract_div_cfg_reg = readl(port->membase +
+ FRAC_DIV_CFG_REG);
+
+ writel((fract_div_cfg_reg &
+ ~(FRAC_DIV_TX_END_POINT_MASK)),
+ port->membase + FRAC_DIV_CFG_REG);
+
+ /* Enable ADC and set the nRTS pin */
+ writel((adcl_cfg_reg | (ADCL_CFG_EN |
+ ADCL_CFG_PIN_SEL)),
+ port->membase + ADCL_CFG_REG);
+
+ /* Revert to the original settings */
+ writel(adcl_cfg_reg, port->membase + ADCL_CFG_REG);
+
+ writel(fract_div_cfg_reg, port->membase +
+ FRAC_DIV_CFG_REG);
+ }
+ }
+}
+
static int pci1xxxx_rs485_config(struct uart_port *port,
struct ktermios *termios,
struct serial_rs485 *rs485)
@@ -631,9 +684,14 @@ static int pci1xxxx_setup(struct pci_dev *pdev,
port->port.rs485_config = pci1xxxx_rs485_config;
port->port.rs485_supported = pci1xxxx_rs485_supported;
- /* From C0 rev Burst operation is supported */
+ /*
+ * C0 and later revisions support Burst operation.
+ * RTS workaround in mctrl is applicable only to B0.
+ */
if (rev >= 0xC0)
port->port.handle_irq = pci1xxxx_handle_irq;
+ else if (rev == 0xB0)
+ port->port.set_mctrl = pci1xxxx_set_mctrl;
ret = serial8250_pci_setup_port(pdev, port, 0, PORT_OFFSET * port_idx, 0);
if (ret < 0)
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 649e74e9b52f..d7976a21cca9 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -578,7 +578,7 @@ static int serial8250_em485_init(struct uart_8250_port *p)
deassert_rts:
if (p->em485->tx_stopped)
- p->rs485_stop_tx(p);
+ p->rs485_stop_tx(p, true);
return 0;
}
@@ -1390,7 +1390,6 @@ static void serial8250_stop_rx(struct uart_port *port)
serial8250_rpm_get(up);
up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
- up->port.read_status_mask &= ~UART_LSR_DR;
serial_port_out(port, UART_IER, up->ier);
serial8250_rpm_put(up);
@@ -1399,10 +1398,11 @@ static void serial8250_stop_rx(struct uart_port *port)
/**
* serial8250_em485_stop_tx() - generic ->rs485_stop_tx() callback
* @p: uart 8250 port
+ * @toggle_ier: true to allow enabling receive interrupts
*
* Generic callback usable by 8250 uart drivers to stop rs485 transmission.
*/
-void serial8250_em485_stop_tx(struct uart_8250_port *p)
+void serial8250_em485_stop_tx(struct uart_8250_port *p, bool toggle_ier)
{
unsigned char mcr = serial8250_in_MCR(p);
@@ -1423,8 +1423,10 @@ void serial8250_em485_stop_tx(struct uart_8250_port *p)
if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
serial8250_clear_and_reinit_fifos(p);
- p->ier |= UART_IER_RLSI | UART_IER_RDI;
- serial_port_out(&p->port, UART_IER, p->ier);
+ if (toggle_ier) {
+ p->ier |= UART_IER_RLSI | UART_IER_RDI;
+ serial_port_out(&p->port, UART_IER, p->ier);
+ }
}
}
EXPORT_SYMBOL_GPL(serial8250_em485_stop_tx);
@@ -1439,7 +1441,7 @@ static enum hrtimer_restart serial8250_em485_handle_stop_tx(struct hrtimer *t)
serial8250_rpm_get(p);
uart_port_lock_irqsave(&p->port, &flags);
if (em485->active_timer == &em485->stop_tx_timer) {
- p->rs485_stop_tx(p);
+ p->rs485_stop_tx(p, true);
em485->active_timer = NULL;
em485->tx_stopped = true;
}
@@ -1471,7 +1473,7 @@ static void __stop_tx_rs485(struct uart_8250_port *p, u64 stop_delay)
em485->active_timer = &em485->stop_tx_timer;
hrtimer_start(&em485->stop_tx_timer, ns_to_ktime(stop_delay), HRTIMER_MODE_REL);
} else {
- p->rs485_stop_tx(p);
+ p->rs485_stop_tx(p, true);
em485->active_timer = NULL;
em485->tx_stopped = true;
}
@@ -1560,6 +1562,7 @@ static inline void __start_tx(struct uart_port *port)
/**
* serial8250_em485_start_tx() - generic ->rs485_start_tx() callback
* @up: uart 8250 port
+ * @toggle_ier: true to allow disabling receive interrupts
*
* Generic callback usable by 8250 uart drivers to start rs485 transmission.
* Assumes that setting the RTS bit in the MCR register means RTS is high.
@@ -1567,11 +1570,11 @@ static inline void __start_tx(struct uart_port *port)
* stoppable by disabling the UART_IER_RDI interrupt. (Some chips set the
* UART_LSR_DR bit even when UART_IER_RDI is disabled, foiling this approach.)
*/
-void serial8250_em485_start_tx(struct uart_8250_port *up)
+void serial8250_em485_start_tx(struct uart_8250_port *up, bool toggle_ier)
{
unsigned char mcr = serial8250_in_MCR(up);
- if (!(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
+ if (!(up->port.rs485.flags & SER_RS485_RX_DURING_TX) && toggle_ier)
serial8250_stop_rx(&up->port);
if (up->port.rs485.flags & SER_RS485_RTS_ON_SEND)
@@ -1605,7 +1608,7 @@ static bool start_tx_rs485(struct uart_port *port)
if (em485->tx_stopped) {
em485->tx_stopped = false;
- up->rs485_start_tx(up);
+ up->rs485_start_tx(up, true);
if (up->port.rs485.delay_rts_before_send > 0) {
em485->active_timer = &em485->start_tx_timer;
@@ -1931,7 +1934,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
*/
if (!(status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS)) &&
(port->status & (UPSTAT_AUTOCTS | UPSTAT_AUTORTS)) &&
- !(port->read_status_mask & UART_LSR_DR))
+ !(up->ier & (UART_IER_RLSI | UART_IER_RDI)))
skip_rx = true;
if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) {
@@ -2079,11 +2082,20 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state)
serial8250_rpm_put(up);
}
-static void wait_for_lsr(struct uart_8250_port *up, int bits)
+/* Returns true if @bits were set, false on timeout */
+static bool wait_for_lsr(struct uart_8250_port *up, int bits)
{
- unsigned int status, tmout = 10000;
+ unsigned int status, tmout;
+
+ /*
+ * Wait for a character to be sent. Fallback to a safe default
+ * timeout value if @frame_time is not available.
+ */
+ if (up->port.frame_time)
+ tmout = up->port.frame_time * 2 / NSEC_PER_USEC;
+ else
+ tmout = 10000;
- /* Wait up to 10ms for the character(s) to be sent. */
for (;;) {
status = serial_lsr_in(up);
@@ -2094,11 +2106,11 @@ static void wait_for_lsr(struct uart_8250_port *up, int bits)
udelay(1);
touch_nmi_watchdog();
}
+
+ return (tmout != 0);
}
-/*
- * Wait for transmitter & holding register to empty
- */
+/* Wait for transmitter and holding register to empty with timeout */
static void wait_for_xmitr(struct uart_8250_port *up, int bits)
{
unsigned int tmout;
@@ -2786,7 +2798,13 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
*/
uart_update_timeout(port, termios->c_cflag, baud);
- port->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ /*
+ * Specify which conditions may be considered for error
+ * handling and the ignoring of characters. The actual
+ * ignoring of characters only occurs if the bit is set
+ * in @ignore_status_mask as well.
+ */
+ port->read_status_mask = UART_LSR_OE | UART_LSR_DR;
if (termios->c_iflag & INPCK)
port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
@@ -3250,7 +3268,7 @@ void serial8250_init_port(struct uart_8250_port *up)
port->ops = &serial8250_pops;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
- up->cur_iotype = 0xFF;
+ up->cur_iotype = UPIO_UNKNOWN;
}
EXPORT_SYMBOL_GPL(serial8250_init_port);
@@ -3285,10 +3303,15 @@ EXPORT_SYMBOL_GPL(serial8250_set_defaults);
static void serial8250_console_putchar(struct uart_port *port, unsigned char ch)
{
+ serial_port_out(port, UART_TX, ch);
+}
+
+static void serial8250_console_wait_putchar(struct uart_port *port, unsigned char ch)
+{
struct uart_8250_port *up = up_to_u8250p(port);
wait_for_xmitr(up, UART_LSR_THRE);
- serial_port_out(port, UART_TX, ch);
+ serial8250_console_putchar(port, ch);
}
/*
@@ -3317,6 +3340,16 @@ static void serial8250_console_restore(struct uart_8250_port *up)
serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS);
}
+static void fifo_wait_for_lsr(struct uart_8250_port *up, unsigned int count)
+{
+ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ if (wait_for_lsr(up, UART_LSR_THRE))
+ return;
+ }
+}
+
/*
* Print a string to the serial port using the device FIFO
*
@@ -3326,24 +3359,34 @@ static void serial8250_console_restore(struct uart_8250_port *up)
static void serial8250_console_fifo_write(struct uart_8250_port *up,
const char *s, unsigned int count)
{
- int i;
const char *end = s + count;
unsigned int fifosize = up->tx_loadsz;
+ struct uart_port *port = &up->port;
+ unsigned int tx_count = 0;
bool cr_sent = false;
+ unsigned int i;
while (s != end) {
- wait_for_lsr(up, UART_LSR_THRE);
+ /* Allow timeout for each byte of a possibly full FIFO */
+ fifo_wait_for_lsr(up, fifosize);
for (i = 0; i < fifosize && s != end; ++i) {
if (*s == '\n' && !cr_sent) {
- serial_out(up, UART_TX, '\r');
+ serial8250_console_putchar(port, '\r');
cr_sent = true;
} else {
- serial_out(up, UART_TX, *s++);
+ serial8250_console_putchar(port, *s++);
cr_sent = false;
}
}
+ tx_count = i;
}
+
+ /*
+ * Allow timeout for each byte written since the caller will only wait
+ * for UART_LSR_BOTH_EMPTY using the timeout of a single character
+ */
+ fifo_wait_for_lsr(up, tx_count);
}
/*
@@ -3385,7 +3428,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
if (em485) {
if (em485->tx_stopped)
- up->rs485_start_tx(up);
+ up->rs485_start_tx(up, false);
mdelay(port->rs485.delay_rts_before_send);
}
@@ -3412,7 +3455,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
if (likely(use_fifo))
serial8250_console_fifo_write(up, s, count);
else
- uart_console_write(port, s, count, serial8250_console_putchar);
+ uart_console_write(port, s, count, serial8250_console_wait_putchar);
/*
* Finally, wait for transmitter to become empty
@@ -3423,7 +3466,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
if (em485) {
mdelay(port->rs485.delay_rts_after_send);
if (em485->tx_stopped)
- up->rs485_stop_tx(up);
+ up->rs485_stop_tx(up, false);
}
serial_port_out(port, UART_IER, ier);
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 45f0f779fbf9..976dae3bb1bb 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -128,7 +128,7 @@ config SERIAL_SB1250_DUART_CONSOLE
config SERIAL_ATMEL
bool "AT91 on-chip serial port support"
depends on COMMON_CLK
- depends on ARCH_AT91 || COMPILE_TEST
+ depends on ARCH_AT91 || ARCH_LAN969X || COMPILE_TEST
select SERIAL_CORE
select SERIAL_MCTRL_GPIO if GPIOLIB
select MFD_AT91_USART
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index b9c3c3bed0c1..d47a62d1c9f7 100644
--- a/drivers/tty/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -24,8 +24,6 @@
#include <linux/io.h>
#include <linux/altera_jtaguart.h>
-#define DRV_NAME "altera_jtaguart"
-
/*
* Altera JTAG UART register definitions according to the Altera JTAG UART
* datasheet: https://www.altera.com/literature/hb/nios2/n2cpu_nii51009.pdf
@@ -173,7 +171,7 @@ static int altera_jtaguart_startup(struct uart_port *port)
int ret;
ret = request_irq(port->irq, altera_jtaguart_interrupt, 0,
- DRV_NAME, port);
+ dev_name(port->dev), port);
if (ret) {
dev_err(port->dev, "unable to attach Altera JTAG UART %d interrupt vector=%d\n",
port->line, port->irq);
@@ -365,7 +363,7 @@ OF_EARLYCON_DECLARE(juart, "altr,juart-1.0", altera_jtaguart_earlycon_setup);
static struct uart_driver altera_jtaguart_driver = {
.owner = THIS_MODULE,
- .driver_name = "altera_jtaguart",
+ .driver_name = KBUILD_MODNAME,
.dev_name = "ttyJ",
.major = ALTERA_JTAGUART_MAJOR,
.minor = ALTERA_JTAGUART_MINOR,
@@ -451,7 +449,7 @@ static struct platform_driver altera_jtaguart_platform_driver = {
.probe = altera_jtaguart_probe,
.remove = altera_jtaguart_remove,
.driver = {
- .name = DRV_NAME,
+ .name = KBUILD_MODNAME,
.of_match_table = of_match_ptr(altera_jtaguart_match),
},
};
@@ -481,4 +479,4 @@ module_exit(altera_jtaguart_exit);
MODULE_DESCRIPTION("Altera JTAG UART driver");
MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index c94655453c33..1759137121cc 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -24,7 +24,6 @@
#include <linux/io.h>
#include <linux/altera_uart.h>
-#define DRV_NAME "altera_uart"
#define SERIAL_ALTERA_MAJOR 204
#define SERIAL_ALTERA_MINOR 213
@@ -518,7 +517,7 @@ OF_EARLYCON_DECLARE(uart, "altr,uart-1.0", altera_uart_earlycon_setup);
*/
static struct uart_driver altera_uart_driver = {
.owner = THIS_MODULE,
- .driver_name = DRV_NAME,
+ .driver_name = KBUILD_MODNAME,
.dev_name = "ttyAL",
.major = SERIAL_ALTERA_MAJOR,
.minor = SERIAL_ALTERA_MINOR,
@@ -619,7 +618,7 @@ static struct platform_driver altera_uart_platform_driver = {
.probe = altera_uart_probe,
.remove = altera_uart_remove,
.driver = {
- .name = DRV_NAME,
+ .name = KBUILD_MODNAME,
.of_match_table = of_match_ptr(altera_uart_match),
},
};
@@ -649,5 +648,5 @@ module_exit(altera_uart_exit);
MODULE_DESCRIPTION("Altera UART driver");
MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_ALIAS_CHARDEV_MAJOR(SERIAL_ALTERA_MAJOR);
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 69b7a3e1e418..04212c823a91 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -248,6 +248,13 @@ struct pl011_dmatx_data {
bool queued;
};
+enum pl011_rs485_tx_state {
+ OFF,
+ WAIT_AFTER_RTS,
+ SEND,
+ WAIT_AFTER_SEND,
+};
+
/*
* We wrap our port structure around the generic uart_port.
*/
@@ -261,8 +268,10 @@ struct uart_amba_port {
unsigned int fifosize; /* vendor-specific */
unsigned int fixed_baud; /* vendor-set fixed baud rate */
char type[12];
- bool rs485_tx_started;
- unsigned int rs485_tx_drain_interval; /* usecs */
+ ktime_t rs485_tx_drain_interval; /* nano */
+ enum pl011_rs485_tx_state rs485_tx_state;
+ struct hrtimer trigger_start_tx;
+ struct hrtimer trigger_stop_tx;
#ifdef CONFIG_DMA_ENGINE
/* DMA stuff */
unsigned int dmacr; /* dma control reg */
@@ -1260,30 +1269,31 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
static void pl011_rs485_tx_stop(struct uart_amba_port *uap)
{
- /*
- * To be on the safe side only time out after twice as many iterations
- * as fifo size.
- */
- const int MAX_TX_DRAIN_ITERS = uap->port.fifosize * 2;
struct uart_port *port = &uap->port;
- int i = 0;
u32 cr;
- /* Wait until hardware tx queue is empty */
- while (!pl011_tx_empty(port)) {
- if (i > MAX_TX_DRAIN_ITERS) {
- dev_warn(port->dev,
- "timeout while draining hardware tx queue\n");
- break;
- }
+ if (uap->rs485_tx_state == SEND)
+ uap->rs485_tx_state = WAIT_AFTER_SEND;
- udelay(uap->rs485_tx_drain_interval);
- i++;
+ if (uap->rs485_tx_state == WAIT_AFTER_SEND) {
+ /* Schedule hrtimer if tx queue not empty */
+ if (!pl011_tx_empty(port)) {
+ hrtimer_start(&uap->trigger_stop_tx,
+ uap->rs485_tx_drain_interval,
+ HRTIMER_MODE_REL);
+ return;
+ }
+ if (port->rs485.delay_rts_after_send > 0) {
+ hrtimer_start(&uap->trigger_stop_tx,
+ ms_to_ktime(port->rs485.delay_rts_after_send),
+ HRTIMER_MODE_REL);
+ return;
+ }
+ /* Continue without any delay */
+ } else if (uap->rs485_tx_state == WAIT_AFTER_RTS) {
+ hrtimer_try_to_cancel(&uap->trigger_start_tx);
}
- if (port->rs485.delay_rts_after_send)
- mdelay(port->rs485.delay_rts_after_send);
-
cr = pl011_read(uap, REG_CR);
if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
@@ -1296,7 +1306,7 @@ static void pl011_rs485_tx_stop(struct uart_amba_port *uap)
cr |= UART011_CR_RXE;
pl011_write(cr, uap, REG_CR);
- uap->rs485_tx_started = false;
+ uap->rs485_tx_state = OFF;
}
static void pl011_stop_tx(struct uart_port *port)
@@ -1304,11 +1314,18 @@ static void pl011_stop_tx(struct uart_port *port)
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
+ if (port->rs485.flags & SER_RS485_ENABLED &&
+ uap->rs485_tx_state == WAIT_AFTER_RTS) {
+ pl011_rs485_tx_stop(uap);
+ return;
+ }
+
uap->im &= ~UART011_TXIM;
pl011_write(uap->im, uap, REG_IMSC);
pl011_dma_tx_stop(uap);
- if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started)
+ if (port->rs485.flags & SER_RS485_ENABLED &&
+ uap->rs485_tx_state != OFF)
pl011_rs485_tx_stop(uap);
}
@@ -1328,10 +1345,19 @@ static void pl011_rs485_tx_start(struct uart_amba_port *uap)
struct uart_port *port = &uap->port;
u32 cr;
+ if (uap->rs485_tx_state == WAIT_AFTER_RTS) {
+ uap->rs485_tx_state = SEND;
+ return;
+ }
+ if (uap->rs485_tx_state == WAIT_AFTER_SEND) {
+ hrtimer_try_to_cancel(&uap->trigger_stop_tx);
+ uap->rs485_tx_state = SEND;
+ return;
+ }
+ /* uap->rs485_tx_state == OFF */
/* Enable transmitter */
cr = pl011_read(uap, REG_CR);
cr |= UART011_CR_TXE;
-
/* Disable receiver if half-duplex */
if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
cr &= ~UART011_CR_RXE;
@@ -1343,10 +1369,14 @@ static void pl011_rs485_tx_start(struct uart_amba_port *uap)
pl011_write(cr, uap, REG_CR);
- if (port->rs485.delay_rts_before_send)
- mdelay(port->rs485.delay_rts_before_send);
-
- uap->rs485_tx_started = true;
+ if (port->rs485.delay_rts_before_send > 0) {
+ uap->rs485_tx_state = WAIT_AFTER_RTS;
+ hrtimer_start(&uap->trigger_start_tx,
+ ms_to_ktime(port->rs485.delay_rts_before_send),
+ HRTIMER_MODE_REL);
+ } else {
+ uap->rs485_tx_state = SEND;
+ }
}
static void pl011_start_tx(struct uart_port *port)
@@ -1355,13 +1385,44 @@ static void pl011_start_tx(struct uart_port *port)
container_of(port, struct uart_amba_port, port);
if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
- !uap->rs485_tx_started)
+ uap->rs485_tx_state != SEND) {
pl011_rs485_tx_start(uap);
+ if (uap->rs485_tx_state == WAIT_AFTER_RTS)
+ return;
+ }
if (!pl011_dma_tx_start(uap))
pl011_start_tx_pio(uap);
}
+static enum hrtimer_restart pl011_trigger_start_tx(struct hrtimer *t)
+{
+ struct uart_amba_port *uap =
+ container_of(t, struct uart_amba_port, trigger_start_tx);
+ unsigned long flags;
+
+ uart_port_lock_irqsave(&uap->port, &flags);
+ if (uap->rs485_tx_state == WAIT_AFTER_RTS)
+ pl011_start_tx(&uap->port);
+ uart_port_unlock_irqrestore(&uap->port, flags);
+
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart pl011_trigger_stop_tx(struct hrtimer *t)
+{
+ struct uart_amba_port *uap =
+ container_of(t, struct uart_amba_port, trigger_stop_tx);
+ unsigned long flags;
+
+ uart_port_lock_irqsave(&uap->port, &flags);
+ if (uap->rs485_tx_state == WAIT_AFTER_SEND)
+ pl011_rs485_tx_stop(uap);
+ uart_port_unlock_irqrestore(&uap->port, flags);
+
+ return HRTIMER_NORESTART;
+}
+
static void pl011_stop_rx(struct uart_port *port)
{
struct uart_amba_port *uap =
@@ -1953,7 +2014,7 @@ static void pl011_shutdown(struct uart_port *port)
pl011_dma_shutdown(uap);
- if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started)
+ if ((port->rs485.flags & SER_RS485_ENABLED && uap->rs485_tx_state != OFF))
pl011_rs485_tx_stop(uap);
free_irq(uap->port.irq, uap);
@@ -2098,7 +2159,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
* with the given baud rate. We use this as the poll interval when we
* wait for the tx queue to empty.
*/
- uap->rs485_tx_drain_interval = DIV_ROUND_UP(bits * 1000 * 1000, baud);
+ uap->rs485_tx_drain_interval = ns_to_ktime(DIV_ROUND_UP(bits * NSEC_PER_SEC, baud));
pl011_setup_status_masks(port, termios);
@@ -2807,6 +2868,11 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
}
}
+ hrtimer_init(&uap->trigger_start_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_init(&uap->trigger_stop_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ uap->trigger_start_tx.function = pl011_trigger_start_tx;
+ uap->trigger_stop_tx.function = pl011_trigger_stop_tx;
+
ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
if (ret)
return ret;
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 0cf05ac18993..f44f9d20a974 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1727,26 +1727,16 @@ static void atmel_init_property(struct atmel_uart_port *atmel_port,
/* DMA/PDC usage specification */
if (of_property_read_bool(np, "atmel,use-dma-rx")) {
- if (of_property_read_bool(np, "dmas")) {
- atmel_port->use_dma_rx = true;
- atmel_port->use_pdc_rx = false;
- } else {
- atmel_port->use_dma_rx = false;
- atmel_port->use_pdc_rx = true;
- }
+ atmel_port->use_dma_rx = of_property_present(np, "dmas");
+ atmel_port->use_pdc_rx = !atmel_port->use_dma_rx;
} else {
atmel_port->use_dma_rx = false;
atmel_port->use_pdc_rx = false;
}
if (of_property_read_bool(np, "atmel,use-dma-tx")) {
- if (of_property_read_bool(np, "dmas")) {
- atmel_port->use_dma_tx = true;
- atmel_port->use_pdc_tx = false;
- } else {
- atmel_port->use_dma_tx = false;
- atmel_port->use_pdc_tx = true;
- }
+ atmel_port->use_dma_tx = of_property_present(np, "dmas");
+ atmel_port->use_pdc_tx = !atmel_port->use_dma_tx;
} else {
atmel_port->use_dma_tx = false;
atmel_port->use_pdc_tx = false;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 57b0632a3db6..c91b9d9818cd 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -245,7 +245,7 @@
#define DRIVER_NAME "fsl-lpuart"
#define DEV_NAME "ttyLP"
-#define UART_NR 8
+#define UART_NR 12
/* IMX lpuart has four extra unused regs located at the beginning */
#define IMX_REG_OFF 0x10
@@ -1965,6 +1965,11 @@ static void lpuart32_shutdown(struct uart_port *port)
UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE | UARTCTRL_SBK);
lpuart32_write(port, temp, UARTCTRL);
+ /* flush Rx/Tx FIFO */
+ temp = lpuart32_read(port, UARTFIFO);
+ temp |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH;
+ lpuart32_write(port, temp, UARTFIFO);
+
uart_port_unlock_irqrestore(port, flags);
lpuart_dma_shutdown(sport);
diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
index e93850f6447a..2833708e369f 100644
--- a/drivers/tty/serial/kgdb_nmi.c
+++ b/drivers/tty/serial/kgdb_nmi.c
@@ -27,18 +27,6 @@
#include <linux/kgdb.h>
#include <linux/kdb.h>
-static int kgdb_nmi_knock = 1;
-module_param_named(knock, kgdb_nmi_knock, int, 0600);
-MODULE_PARM_DESC(knock, "if set to 1 (default), the special '$3#33' command " \
- "must be used to enter the debugger; when set to 0, " \
- "hitting return key is enough to enter the debugger; " \
- "when set to -1, the debugger is entered immediately " \
- "upon NMI");
-
-static char *kgdb_nmi_magic = "$3#33";
-module_param_named(magic, kgdb_nmi_magic, charp, 0600);
-MODULE_PARM_DESC(magic, "magic sequence to enter NMI debugger (default $3#33)");
-
static atomic_t kgdb_nmi_num_readers = ATOMIC_INIT(0);
static int kgdb_nmi_console_setup(struct console *co, char *options)
@@ -95,95 +83,6 @@ struct kgdb_nmi_tty_priv {
static struct tty_port *kgdb_nmi_port;
-static void kgdb_tty_recv(int ch)
-{
- struct kgdb_nmi_tty_priv *priv;
- char c = ch;
-
- if (!kgdb_nmi_port || ch < 0)
- return;
- /*
- * Can't use port->tty->driver_data as tty might be not there. Timer
- * will check for tty and will get the ref, but here we don't have to
- * do that, and actually, we can't: we're in NMI context, no locks are
- * possible.
- */
- priv = container_of(kgdb_nmi_port, struct kgdb_nmi_tty_priv, port);
- kfifo_in(&priv->fifo, &c, 1);
-}
-
-static int kgdb_nmi_poll_one_knock(void)
-{
- static int n;
- int c;
- const char *magic = kgdb_nmi_magic;
- size_t m = strlen(magic);
- bool printch = false;
-
- c = dbg_io_ops->read_char();
- if (c == NO_POLL_CHAR)
- return c;
-
- if (!kgdb_nmi_knock && (c == '\r' || c == '\n')) {
- return 1;
- } else if (c == magic[n]) {
- n = (n + 1) % m;
- if (!n)
- return 1;
- printch = true;
- } else {
- n = 0;
- }
-
- if (atomic_read(&kgdb_nmi_num_readers)) {
- kgdb_tty_recv(c);
- return 0;
- }
-
- if (printch) {
- kdb_printf("%c", c);
- return 0;
- }
-
- kdb_printf("\r%s %s to enter the debugger> %*s",
- kgdb_nmi_knock ? "Type" : "Hit",
- kgdb_nmi_knock ? magic : "<return>", (int)m, "");
- while (m--)
- kdb_printf("\b");
- return 0;
-}
-
-/**
- * kgdb_nmi_poll_knock - Check if it is time to enter the debugger
- *
- * "Serial ports are often noisy, especially when muxed over another port (we
- * often use serial over the headset connector). Noise on the async command
- * line just causes characters that are ignored, on a command line that blocked
- * execution noise would be catastrophic." -- Colin Cross
- *
- * So, this function implements KGDB/KDB knocking on the serial line: we won't
- * enter the debugger until we receive a known magic phrase (which is actually
- * "$3#33", known as "escape to KDB" command. There is also a relaxed variant
- * of knocking, i.e. just pressing the return key is enough to enter the
- * debugger. And if knocking is disabled, the function always returns 1.
- */
-bool kgdb_nmi_poll_knock(void)
-{
- if (kgdb_nmi_knock < 0)
- return true;
-
- while (1) {
- int ret;
-
- ret = kgdb_nmi_poll_one_knock();
- if (ret == NO_POLL_CHAR)
- return false;
- else if (ret == 1)
- break;
- }
- return true;
-}
-
/*
* The tasklet is cheap, it does not cause wakeups when reschedules itself,
* instead it waits for the next tick.
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index f55aa353aed9..2204cc3e3b07 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -1621,7 +1621,7 @@ mpc52xx_console_setup(struct console *co, char *options)
(void *)port->mapbase, port->membase,
port->irq, port->uartclk);
- /* Setup the port parameters accoding to options */
+ /* Setup the port parameters according to options */
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index a3093e09309f..7b51cdc274fd 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -314,6 +314,7 @@
#define SC16IS7XX_FIFO_SIZE (64)
#define SC16IS7XX_GPIOS_PER_BANK 4
+#define SC16IS7XX_POLL_PERIOD_MS 10
#define SC16IS7XX_RECONF_MD BIT(0)
#define SC16IS7XX_RECONF_IER BIT(1)
#define SC16IS7XX_RECONF_RS485 BIT(2)
@@ -348,6 +349,8 @@ struct sc16is7xx_port {
u8 mctrl_mask;
struct kthread_worker kworker;
struct task_struct *kworker_task;
+ struct kthread_delayed_work poll_work;
+ bool polling;
struct sc16is7xx_one p[];
};
@@ -861,6 +864,18 @@ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void sc16is7xx_poll_proc(struct kthread_work *ws)
+{
+ struct sc16is7xx_port *s = container_of(ws, struct sc16is7xx_port, poll_work.work);
+
+ /* Reuse standard IRQ handler. Interrupt ID is unused in this context. */
+ sc16is7xx_irq(0, s);
+
+ /* Setup delay based on SC16IS7XX_POLL_PERIOD_MS */
+ kthread_queue_delayed_work(&s->kworker, &s->poll_work,
+ msecs_to_jiffies(SC16IS7XX_POLL_PERIOD_MS));
+}
+
static void sc16is7xx_tx_proc(struct kthread_work *ws)
{
struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port);
@@ -1149,6 +1164,7 @@ static int sc16is7xx_config_rs485(struct uart_port *port, struct ktermios *termi
static int sc16is7xx_startup(struct uart_port *port)
{
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+ struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
unsigned int val;
unsigned long flags;
@@ -1211,6 +1227,10 @@ static int sc16is7xx_startup(struct uart_port *port)
sc16is7xx_enable_ms(port);
uart_port_unlock_irqrestore(port, flags);
+ if (s->polling)
+ kthread_queue_delayed_work(&s->kworker, &s->poll_work,
+ msecs_to_jiffies(SC16IS7XX_POLL_PERIOD_MS));
+
return 0;
}
@@ -1232,6 +1252,9 @@ static void sc16is7xx_shutdown(struct uart_port *port)
sc16is7xx_power(port, 0);
+ if (s->polling)
+ kthread_cancel_delayed_work_sync(&s->poll_work);
+
kthread_flush_worker(&s->kworker);
}
@@ -1538,6 +1561,11 @@ int sc16is7xx_probe(struct device *dev, const struct sc16is7xx_devtype *devtype,
/* Always ask for fixed clock rate from a property. */
device_property_read_u32(dev, "clock-frequency", &uartclk);
+ s->polling = !!irq;
+ if (s->polling)
+ dev_dbg(dev,
+ "No interrupt pin definition, falling back to polling mode\n");
+
s->clk = devm_clk_get_optional(dev, NULL);
if (IS_ERR(s->clk))
return PTR_ERR(s->clk);
@@ -1665,6 +1693,12 @@ int sc16is7xx_probe(struct device *dev, const struct sc16is7xx_devtype *devtype,
goto out_ports;
#endif
+ if (s->polling) {
+ /* Initialize kernel thread for polling */
+ kthread_init_delayed_work(&s->poll_work, sc16is7xx_poll_proc);
+ return 0;
+ }
+
/*
* Setup interrupt. We first try to acquire the IRQ line as level IRQ.
* If that succeeds, we can allow sharing the interrupt as well.
@@ -1724,6 +1758,9 @@ void sc16is7xx_remove(struct device *dev)
sc16is7xx_power(&s->p[i].port, 0);
}
+ if (s->polling)
+ kthread_cancel_delayed_work_sync(&s->poll_work);
+
kthread_flush_worker(&s->kworker);
kthread_stop(s->kworker_task);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 74fa02b23772..92f7e752f862 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -790,7 +790,6 @@ static int uart_get_info(struct tty_port *port, struct serial_struct *retinfo)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport;
- int ret = -ENODEV;
/* Initialize structure in case we error out later to prevent any stack info leakage. */
*retinfo = (struct serial_struct){};
@@ -799,10 +798,10 @@ static int uart_get_info(struct tty_port *port, struct serial_struct *retinfo)
* Ensure the state we copy is consistent and no hardware changes
* occur as we go
*/
- mutex_lock(&port->mutex);
+ guard(mutex)(&port->mutex);
uport = uart_port_check(state);
if (!uport)
- goto out;
+ return -ENODEV;
retinfo->type = uport->type;
retinfo->line = uport->line;
@@ -823,10 +822,7 @@ static int uart_get_info(struct tty_port *port, struct serial_struct *retinfo)
retinfo->iomem_reg_shift = uport->regshift;
retinfo->iomem_base = (void *)(unsigned long)uport->mapbase;
- ret = 0;
-out:
- mutex_unlock(&port->mutex);
- return ret;
+ return 0;
}
static int uart_get_info_user(struct tty_struct *tty,
@@ -838,6 +834,61 @@ static int uart_get_info_user(struct tty_struct *tty,
return uart_get_info(port, ss) < 0 ? -EIO : 0;
}
+static int uart_change_port(struct uart_port *uport,
+ const struct serial_struct *new_info,
+ unsigned long new_port)
+{
+ unsigned long old_iobase, old_mapbase;
+ unsigned int old_type, old_iotype, old_hub6, old_shift;
+ int retval;
+
+ old_iobase = uport->iobase;
+ old_mapbase = uport->mapbase;
+ old_type = uport->type;
+ old_hub6 = uport->hub6;
+ old_iotype = uport->iotype;
+ old_shift = uport->regshift;
+
+ if (old_type != PORT_UNKNOWN && uport->ops->release_port)
+ uport->ops->release_port(uport);
+
+ uport->iobase = new_port;
+ uport->type = new_info->type;
+ uport->hub6 = new_info->hub6;
+ uport->iotype = new_info->io_type;
+ uport->regshift = new_info->iomem_reg_shift;
+ uport->mapbase = (unsigned long)new_info->iomem_base;
+
+ if (uport->type == PORT_UNKNOWN || !uport->ops->request_port)
+ return 0;
+
+ retval = uport->ops->request_port(uport);
+ if (retval == 0)
+ return 0; /* succeeded => done */
+
+ /*
+ * If we fail to request resources for the new port, try to restore the
+ * old settings.
+ */
+ uport->iobase = old_iobase;
+ uport->type = old_type;
+ uport->hub6 = old_hub6;
+ uport->iotype = old_iotype;
+ uport->regshift = old_shift;
+ uport->mapbase = old_mapbase;
+
+ if (old_type == PORT_UNKNOWN)
+ return retval;
+
+ retval = uport->ops->request_port(uport);
+ /* If we failed to restore the old settings, we fail like this. */
+ if (retval)
+ uport->type = PORT_UNKNOWN;
+
+ /* We failed anyway. */
+ return -EBUSY;
+}
+
static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
struct uart_state *state,
struct serial_struct *new_info)
@@ -847,7 +898,7 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
unsigned int change_irq, change_port, closing_wait;
unsigned int old_custom_divisor, close_delay;
upf_t old_flags, new_flags;
- int retval = 0;
+ int retval;
if (!uport)
return -EIO;
@@ -886,13 +937,10 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
if (!(uport->flags & UPF_FIXED_PORT)) {
unsigned int uartclk = new_info->baud_base * 16;
/* check needs to be done here before other settings made */
- if (uartclk == 0) {
- retval = -EINVAL;
- goto exit;
- }
+ if (uartclk == 0)
+ return -EINVAL;
}
if (!capable(CAP_SYS_ADMIN)) {
- retval = -EPERM;
if (change_irq || change_port ||
(new_info->baud_base != uport->uartclk / 16) ||
(close_delay != port->close_delay) ||
@@ -900,7 +948,7 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
(new_info->xmit_fifo_size &&
new_info->xmit_fifo_size != uport->fifosize) ||
(((new_flags ^ old_flags) & ~UPF_USR_MASK) != 0))
- goto exit;
+ return -EPERM;
uport->flags = ((uport->flags & ~UPF_USR_MASK) |
(new_flags & UPF_USR_MASK));
uport->custom_divisor = new_info->custom_divisor;
@@ -910,30 +958,24 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
if (change_irq || change_port) {
retval = security_locked_down(LOCKDOWN_TIOCSSERIAL);
if (retval)
- goto exit;
+ return retval;
}
- /*
- * Ask the low level driver to verify the settings.
- */
- if (uport->ops->verify_port)
+ /* Ask the low level driver to verify the settings. */
+ if (uport->ops->verify_port) {
retval = uport->ops->verify_port(uport, new_info);
+ if (retval)
+ return retval;
+ }
if ((new_info->irq >= irq_get_nr_irqs()) || (new_info->irq < 0) ||
(new_info->baud_base < 9600))
- retval = -EINVAL;
-
- if (retval)
- goto exit;
+ return -EINVAL;
if (change_port || change_irq) {
- retval = -EBUSY;
-
- /*
- * Make sure that we are the sole user of this port.
- */
+ /* Make sure that we are the sole user of this port. */
if (tty_port_users(port) > 1)
- goto exit;
+ return -EBUSY;
/*
* We need to shutdown the serial port at the old
@@ -943,69 +985,9 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
}
if (change_port) {
- unsigned long old_iobase, old_mapbase;
- unsigned int old_type, old_iotype, old_hub6, old_shift;
-
- old_iobase = uport->iobase;
- old_mapbase = uport->mapbase;
- old_type = uport->type;
- old_hub6 = uport->hub6;
- old_iotype = uport->iotype;
- old_shift = uport->regshift;
-
- /*
- * Free and release old regions
- */
- if (old_type != PORT_UNKNOWN && uport->ops->release_port)
- uport->ops->release_port(uport);
-
- uport->iobase = new_port;
- uport->type = new_info->type;
- uport->hub6 = new_info->hub6;
- uport->iotype = new_info->io_type;
- uport->regshift = new_info->iomem_reg_shift;
- uport->mapbase = (unsigned long)new_info->iomem_base;
-
- /*
- * Claim and map the new regions
- */
- if (uport->type != PORT_UNKNOWN && uport->ops->request_port) {
- retval = uport->ops->request_port(uport);
- } else {
- /* Always success - Jean II */
- retval = 0;
- }
-
- /*
- * If we fail to request resources for the
- * new port, try to restore the old settings.
- */
- if (retval) {
- uport->iobase = old_iobase;
- uport->type = old_type;
- uport->hub6 = old_hub6;
- uport->iotype = old_iotype;
- uport->regshift = old_shift;
- uport->mapbase = old_mapbase;
-
- if (old_type != PORT_UNKNOWN) {
- retval = uport->ops->request_port(uport);
- /*
- * If we failed to restore the old settings,
- * we fail like this.
- */
- if (retval)
- uport->type = PORT_UNKNOWN;
-
- /*
- * We failed anyway.
- */
- retval = -EBUSY;
- }
-
- /* Added to return the correct error -Ram Gupta */
- goto exit;
- }
+ retval = uart_change_port(uport, new_info, new_port);
+ if (retval)
+ return retval;
}
if (change_irq)
@@ -1021,9 +1003,9 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
uport->fifosize = new_info->xmit_fifo_size;
check_and_exit:
- retval = 0;
if (uport->type == PORT_UNKNOWN)
- goto exit;
+ return 0;
+
if (tty_port_initialized(port)) {
if (((old_flags ^ uport->flags) & UPF_SPD_MASK) ||
old_custom_divisor != uport->custom_divisor) {
@@ -1039,15 +1021,17 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
}
uart_change_line_settings(tty, state, NULL);
}
- } else {
- retval = uart_startup(tty, state, true);
- if (retval == 0)
- tty_port_set_initialized(port, true);
- if (retval > 0)
- retval = 0;
+
+ return 0;
}
- exit:
- return retval;
+
+ retval = uart_startup(tty, state, true);
+ if (retval < 0)
+ return retval;
+ if (retval == 0)
+ tty_port_set_initialized(port, true);
+
+ return 0;
}
static int uart_set_info_user(struct tty_struct *tty, struct serial_struct *ss)
@@ -2365,9 +2349,9 @@ struct uart_match {
struct uart_driver *driver;
};
-static int serial_match_port(struct device *dev, void *data)
+static int serial_match_port(struct device *dev, const void *data)
{
- struct uart_match *match = data;
+ const struct uart_match *match = data;
struct tty_driver *tty_drv = match->driver->tty_driver;
dev_t devt = MKDEV(tty_drv->major, tty_drv->minor_start) +
match->port->line;
@@ -3061,26 +3045,25 @@ static ssize_t console_store(struct device *dev,
if (ret)
return ret;
- mutex_lock(&port->mutex);
+ guard(mutex)(&port->mutex);
uport = uart_port_check(state);
- if (uport) {
- oldconsole = uart_console_registered(uport);
- if (oldconsole && !newconsole) {
- ret = unregister_console(uport->cons);
- } else if (!oldconsole && newconsole) {
- if (uart_console(uport)) {
- uport->console_reinit = 1;
- register_console(uport->cons);
- } else {
- ret = -ENOENT;
- }
- }
- } else {
- ret = -ENXIO;
+ if (!uport)
+ return -ENXIO;
+
+ oldconsole = uart_console_registered(uport);
+ if (oldconsole && !newconsole) {
+ ret = unregister_console(uport->cons);
+ if (ret < 0)
+ return ret;
+ } else if (!oldconsole && newconsole) {
+ if (!uart_console(uport))
+ return -ENOENT;
+
+ uport->console_reinit = 1;
+ register_console(uport->cons);
}
- mutex_unlock(&port->mutex);
- return ret < 0 ? ret : count;
+ return count;
}
static DEVICE_ATTR_RO(uartclk);
@@ -3136,7 +3119,6 @@ static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *u
{
struct uart_state *state;
struct tty_port *port;
- int ret = 0;
struct device *tty_dev;
int num_groups;
@@ -3146,11 +3128,9 @@ static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *u
state = drv->state + uport->line;
port = &state->port;
- mutex_lock(&port->mutex);
- if (state->uart_port) {
- ret = -EINVAL;
- goto out;
- }
+ guard(mutex)(&port->mutex);
+ if (state->uart_port)
+ return -EINVAL;
/* Link the port to the driver state table and vice versa */
atomic_set(&state->refcount, 1);
@@ -3170,10 +3150,8 @@ static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *u
uport->minor = drv->tty_driver->minor_start + uport->line;
uport->name = kasprintf(GFP_KERNEL, "%s%d", drv->dev_name,
drv->tty_driver->name_base + uport->line);
- if (!uport->name) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!uport->name)
+ return -ENOMEM;
if (uport->cons && uport->dev)
of_console_check(uport->dev->of_node, uport->cons->name, uport->line);
@@ -3189,10 +3167,9 @@ static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *u
uport->tty_groups = kcalloc(num_groups, sizeof(*uport->tty_groups),
GFP_KERNEL);
- if (!uport->tty_groups) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!uport->tty_groups)
+ return -ENOMEM;
+
uport->tty_groups[0] = &tty_dev_attr_group;
if (uport->attr_group)
uport->tty_groups[1] = uport->attr_group;
@@ -3215,10 +3192,7 @@ static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *u
uport->line);
}
- out:
- mutex_unlock(&port->mutex);
-
- return ret;
+ return 0;
}
/**
@@ -3384,7 +3358,7 @@ int serial_core_register_port(struct uart_driver *drv, struct uart_port *port)
struct serial_ctrl_device *ctrl_dev, *new_ctrl_dev = NULL;
int ret;
- mutex_lock(&port_mutex);
+ guard(mutex)(&port_mutex);
/*
* Prevent serial_port_runtime_resume() from trying to use the port
@@ -3396,10 +3370,8 @@ int serial_core_register_port(struct uart_driver *drv, struct uart_port *port)
ctrl_dev = serial_core_ctrl_find(drv, port->dev, port->ctrl_id);
if (!ctrl_dev) {
new_ctrl_dev = serial_core_ctrl_device_add(port);
- if (IS_ERR(new_ctrl_dev)) {
- ret = PTR_ERR(new_ctrl_dev);
- goto err_unlock;
- }
+ if (IS_ERR(new_ctrl_dev))
+ return PTR_ERR(new_ctrl_dev);
ctrl_dev = new_ctrl_dev;
}
@@ -3420,8 +3392,6 @@ int serial_core_register_port(struct uart_driver *drv, struct uart_port *port)
if (ret)
goto err_unregister_port_dev;
- mutex_unlock(&port_mutex);
-
return 0;
err_unregister_port_dev:
@@ -3430,9 +3400,6 @@ err_unregister_port_dev:
err_unregister_ctrl_dev:
serial_base_ctrl_device_remove(new_ctrl_dev);
-err_unlock:
- mutex_unlock(&port_mutex);
-
return ret;
}
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 924b803af440..b1ea48f38248 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -165,6 +165,8 @@ struct sci_port {
static struct sci_port sci_ports[SCI_NPORTS];
static unsigned long sci_ports_in_use;
static struct uart_driver sci_uart_driver;
+static bool sci_uart_earlycon;
+static bool sci_uart_earlycon_dev_probing;
static inline struct sci_port *
to_sci_port(struct uart_port *uart)
@@ -3056,10 +3058,6 @@ static int sci_init_single(struct platform_device *dev,
ret = sci_init_clocks(sci_port, &dev->dev);
if (ret < 0)
return ret;
-
- port->dev = &dev->dev;
-
- pm_runtime_enable(&dev->dev);
}
port->type = p->type;
@@ -3086,11 +3084,6 @@ static int sci_init_single(struct platform_device *dev,
return 0;
}
-static void sci_cleanup_single(struct sci_port *port)
-{
- pm_runtime_disable(port->port.dev);
-}
-
#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) || \
defined(CONFIG_SERIAL_SH_SCI_EARLYCON)
static void serial_console_putchar(struct uart_port *port, unsigned char ch)
@@ -3260,8 +3253,6 @@ static void sci_remove(struct platform_device *dev)
sci_ports_in_use &= ~BIT(port->port.line);
uart_remove_one_port(&sci_uart_driver, &port->port);
- sci_cleanup_single(port);
-
if (port->port.fifosize > 1)
device_remove_file(&dev->dev, &dev_attr_rx_fifo_trigger);
if (type == PORT_SCIFA || type == PORT_SCIFB || type == PORT_HSCIF)
@@ -3396,7 +3387,8 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
static int sci_probe_single(struct platform_device *dev,
unsigned int index,
struct plat_sci_port *p,
- struct sci_port *sciport)
+ struct sci_port *sciport,
+ struct resource *sci_res)
{
int ret;
@@ -3425,6 +3417,11 @@ static int sci_probe_single(struct platform_device *dev,
if (ret)
return ret;
+ sciport->port.dev = &dev->dev;
+ ret = devm_pm_runtime_enable(&dev->dev);
+ if (ret)
+ return ret;
+
sciport->gpios = mctrl_gpio_init(&sciport->port, 0);
if (IS_ERR(sciport->gpios))
return PTR_ERR(sciport->gpios);
@@ -3438,18 +3435,37 @@ static int sci_probe_single(struct platform_device *dev,
sciport->port.flags |= UPF_HARD_FLOW;
}
- ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
- if (ret) {
- sci_cleanup_single(sciport);
- return ret;
+ if (sci_uart_earlycon && sci_ports[0].port.mapbase == sci_res->start) {
+ /*
+ * In case:
+ * - this is the earlycon port (mapped on index 0 in sci_ports[]) and
+ * - it now maps to an alias other than zero and
+ * - the earlycon is still alive (e.g., "earlycon keep_bootcon" is
+ * available in bootargs)
+ *
+ * we need to avoid disabling clocks and PM domains through the runtime
+ * PM APIs called in __device_attach(). For this, increment the runtime
+ * PM reference counter (the clocks and PM domains were already enabled
+ * by the bootloader). Otherwise the earlycon may access the HW when it
+ * has no clocks enabled leading to failures (infinite loop in
+ * sci_poll_put_char()).
+ */
+ pm_runtime_get_noresume(&dev->dev);
+
+ /*
+ * Skip cleanup the sci_port[0] in early_console_exit(), this
+ * port is the same as the earlycon one.
+ */
+ sci_uart_earlycon_dev_probing = true;
}
- return 0;
+ return uart_add_one_port(&sci_uart_driver, &sciport->port);
}
static int sci_probe(struct platform_device *dev)
{
struct plat_sci_port *p;
+ struct resource *res;
struct sci_port *sp;
unsigned int dev_id;
int ret;
@@ -3479,9 +3495,29 @@ static int sci_probe(struct platform_device *dev)
}
sp = &sci_ports[dev_id];
+
+ /*
+ * In case:
+ * - the probed port alias is zero (as the one used by earlycon), and
+ * - the earlycon is still active (e.g., "earlycon keep_bootcon" in
+ * bootargs)
+ *
+ * defer the probe of this serial. This is a debug scenario and the user
+ * must be aware of it.
+ *
+ * Except when the probed port is the same as the earlycon port.
+ */
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ if (sci_uart_earlycon && sp == &sci_ports[0] && sp->port.mapbase != res->start)
+ return dev_err_probe(&dev->dev, -EBUSY, "sci_port[0] is used by earlycon!\n");
+
platform_set_drvdata(dev, sp);
- ret = sci_probe_single(dev, dev_id, p, sp);
+ ret = sci_probe_single(dev, dev_id, p, sp, res);
if (ret)
return ret;
@@ -3562,7 +3598,23 @@ sh_early_platform_init_buffer("earlyprintk", &sci_driver,
early_serial_buf, ARRAY_SIZE(early_serial_buf));
#endif
#ifdef CONFIG_SERIAL_SH_SCI_EARLYCON
-static struct plat_sci_port port_cfg __initdata;
+static struct plat_sci_port port_cfg;
+
+static int early_console_exit(struct console *co)
+{
+ struct sci_port *sci_port = &sci_ports[0];
+
+ /*
+ * Clean the slot used by earlycon. A new SCI device might
+ * map to this slot.
+ */
+ if (!sci_uart_earlycon_dev_probing) {
+ memset(sci_port, 0, sizeof(*sci_port));
+ sci_uart_earlycon = false;
+ }
+
+ return 0;
+}
static int __init early_console_setup(struct earlycon_device *device,
int type)
@@ -3571,15 +3623,18 @@ static int __init early_console_setup(struct earlycon_device *device,
return -ENODEV;
device->port.type = type;
- memcpy(&sci_ports[0].port, &device->port, sizeof(struct uart_port));
+ sci_ports[0].port = device->port;
port_cfg.type = type;
sci_ports[0].cfg = &port_cfg;
sci_ports[0].params = sci_probe_regmap(&port_cfg);
+ sci_uart_earlycon = true;
port_cfg.scscr = sci_serial_in(&sci_ports[0].port, SCSCR);
sci_serial_out(&sci_ports[0].port, SCSCR,
SCSCR_RE | SCSCR_TE | port_cfg.scscr);
device->con->write = serial_console_write;
+ device->con->exit = early_console_exit;
+
return 0;
}
static int __init sci_early_console_setup(struct earlycon_device *device,
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index beb151be4d32..92ec51870d1d 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -287,7 +287,7 @@ static void cdns_uart_handle_rx(void *dev_id, unsigned int isrstatus)
continue;
}
- if (uart_handle_sysrq_char(port, data))
+ if (uart_prepare_sysrq_char(port, data))
continue;
if (is_rxbs_support) {
@@ -495,7 +495,7 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
!(readl(port->membase + CDNS_UART_CR) & CDNS_UART_CR_RX_DIS))
cdns_uart_handle_rx(dev_id, isrstatus);
- uart_port_unlock(port);
+ uart_unlock_and_check_sysrq(port);
return IRQ_HANDLED;
}
@@ -1380,9 +1380,7 @@ static void cdns_uart_console_write(struct console *co, const char *s,
unsigned int imr, ctrl;
int locked = 1;
- if (port->sysrq)
- locked = 0;
- else if (oops_in_progress)
+ if (oops_in_progress)
locked = uart_port_trylock_irqsave(port, &flags);
else
uart_port_lock_irqsave(port, &flags);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index dcb1769c3625..449dbd216460 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2622,14 +2622,13 @@ static int tty_tiocgicount(struct tty_struct *tty, void __user *arg)
static int tty_set_serial(struct tty_struct *tty, struct serial_struct *ss)
{
- char comm[TASK_COMM_LEN];
int flags;
flags = ss->flags & ASYNC_DEPRECATED;
if (flags)
pr_warn_ratelimited("%s: '%s' is using deprecated serial flags (with no effect): %.8x\n",
- __func__, get_task_comm(comm, current), flags);
+ __func__, current->comm, flags);
if (!tty->ops->set_serial)
return -ENOTTY;
@@ -3618,7 +3617,7 @@ void console_sysfs_notify(void)
sysfs_notify(&consdev->kobj, NULL, "active");
}
-static struct ctl_table tty_table[] = {
+static const struct ctl_table tty_table[] = {
{
.procname = "legacy_tiocsti",
.data = &tty_legacy_tiocsti,
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index 564341f1a74f..0bd6544e30a6 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -192,6 +192,20 @@ int set_selection_user(const struct tiocl_selection __user *sel,
if (copy_from_user(&v, sel, sizeof(*sel)))
return -EFAULT;
+ /*
+ * TIOCL_SELCLEAR, TIOCL_SELPOINTER and TIOCL_SELMOUSEREPORT are OK to
+ * use without CAP_SYS_ADMIN as they do not modify the selection.
+ */
+ switch (v.sel_mode) {
+ case TIOCL_SELCLEAR:
+ case TIOCL_SELPOINTER:
+ case TIOCL_SELMOUSEREPORT:
+ break;
+ default:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ }
+
return set_selection_kernel(&v, tty);
}
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 96842ce817af..be5564ed8c01 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -3345,8 +3345,6 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
switch (type) {
case TIOCL_SETSEL:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
return set_selection_user(param, tty);
case TIOCL_PASTESEL:
if (!capable(CAP_SYS_ADMIN))
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 796e37a1d859..3438269a5440 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -1439,6 +1439,7 @@ static ssize_t max_number_of_rtt_store(struct device *dev,
struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_dev_info *dev_info = &hba->dev_info;
struct scsi_device *sdev;
+ unsigned int memflags;
unsigned int rtt;
int ret;
@@ -1458,14 +1459,16 @@ static ssize_t max_number_of_rtt_store(struct device *dev,
ufshcd_rpm_get_sync(hba);
+ memflags = memalloc_noio_save();
shost_for_each_device(sdev, hba->host)
- blk_mq_freeze_queue(sdev->request_queue);
+ blk_mq_freeze_queue_nomemsave(sdev->request_queue);
ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt);
shost_for_each_device(sdev, hba->host)
- blk_mq_unfreeze_queue(sdev->request_queue);
+ blk_mq_unfreeze_queue_nomemrestore(sdev->request_queue);
+ memalloc_noio_restore(memflags);
ufshcd_rpm_put_sync(hba);
diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
index 6c09d97ae006..8d4ad0a3f2cf 100644
--- a/drivers/ufs/core/ufs_bsg.c
+++ b/drivers/ufs/core/ufs_bsg.c
@@ -216,6 +216,7 @@ void ufs_bsg_remove(struct ufs_hba *hba)
return;
bsg_remove_queue(hba->bsg_queue);
+ hba->bsg_queue = NULL;
device_del(bsg_dev);
put_device(bsg_dev);
@@ -257,6 +258,7 @@ int ufs_bsg_probe(struct ufs_hba *hba)
NULL, 0);
if (IS_ERR(q)) {
ret = PTR_ERR(q);
+ device_del(bsg_dev);
goto out;
}
diff --git a/drivers/ufs/core/ufshcd-crypto.c b/drivers/ufs/core/ufshcd-crypto.c
index a714dad82cd1..694ff7578fc1 100644
--- a/drivers/ufs/core/ufshcd-crypto.c
+++ b/drivers/ufs/core/ufshcd-crypto.c
@@ -17,20 +17,14 @@ static const struct ufs_crypto_alg_entry {
},
};
-static int ufshcd_program_key(struct ufs_hba *hba,
- const union ufs_crypto_cfg_entry *cfg, int slot)
+static void ufshcd_program_key(struct ufs_hba *hba,
+ const union ufs_crypto_cfg_entry *cfg, int slot)
{
int i;
u32 slot_offset = hba->crypto_cfg_register + slot * sizeof(*cfg);
- int err = 0;
ufshcd_hold(hba);
- if (hba->vops && hba->vops->program_key) {
- err = hba->vops->program_key(hba, cfg, slot);
- goto out;
- }
-
/* Ensure that CFGE is cleared before programming the key */
ufshcd_writel(hba, 0, slot_offset + 16 * sizeof(cfg->reg_val[0]));
for (i = 0; i < 16; i++) {
@@ -43,17 +37,14 @@ static int ufshcd_program_key(struct ufs_hba *hba,
/* Dword 16 must be written last */
ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[16]),
slot_offset + 16 * sizeof(cfg->reg_val[0]));
-out:
ufshcd_release(hba);
- return err;
}
static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
- struct ufs_hba *hba =
- container_of(profile, struct ufs_hba, crypto_profile);
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
const union ufs_crypto_cap_entry *ccap_array = hba->crypto_cap_array;
const struct ufs_crypto_alg_entry *alg =
&ufs_crypto_algs[key->crypto_cfg.crypto_mode];
@@ -61,7 +52,6 @@ static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile,
int i;
int cap_idx = -1;
union ufs_crypto_cfg_entry cfg = {};
- int err;
BUILD_BUG_ON(UFS_CRYPTO_KEY_SIZE_INVALID != 0);
for (i = 0; i < hba->crypto_capabilities.num_crypto_cap; i++) {
@@ -89,25 +79,25 @@ static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile,
memcpy(cfg.crypto_key, key->raw, key->size);
}
- err = ufshcd_program_key(hba, &cfg, slot);
+ ufshcd_program_key(hba, &cfg, slot);
memzero_explicit(&cfg, sizeof(cfg));
- return err;
+ return 0;
}
static int ufshcd_crypto_keyslot_evict(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
- struct ufs_hba *hba =
- container_of(profile, struct ufs_hba, crypto_profile);
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
/*
* Clear the crypto cfg on the device. Clearing CFGE
* might not be sufficient, so just clear the entire cfg.
*/
union ufs_crypto_cfg_entry cfg = {};
- return ufshcd_program_key(hba, &cfg, slot);
+ ufshcd_program_key(hba, &cfg, slot);
+ return 0;
}
/*
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 3094f3c89e82..1893a7ad9531 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -258,10 +258,15 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
return UFS_PM_LVL_0;
}
+static bool ufshcd_has_pending_tasks(struct ufs_hba *hba)
+{
+ return hba->outstanding_tasks || hba->active_uic_cmd ||
+ hba->uic_async_done;
+}
+
static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba)
{
- return (hba->clk_gating.active_reqs || hba->outstanding_reqs || hba->outstanding_tasks ||
- hba->active_uic_cmd || hba->uic_async_done);
+ return hba->outstanding_reqs || ufshcd_has_pending_tasks(hba);
}
static const struct ufs_dev_quirk ufs_fixups[] = {
@@ -1447,16 +1452,16 @@ static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_scaling.suspend_work);
- unsigned long irq_flags;
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return;
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (hba->clk_scaling.active_reqs ||
+ hba->clk_scaling.is_suspended)
+ return;
+
+ hba->clk_scaling.is_suspended = true;
+ hba->clk_scaling.window_start_t = 0;
}
- hba->clk_scaling.is_suspended = true;
- hba->clk_scaling.window_start_t = 0;
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
devfreq_suspend_device(hba->devfreq);
}
@@ -1465,15 +1470,13 @@ static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_scaling.resume_work);
- unsigned long irq_flags;
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (!hba->clk_scaling.is_suspended) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return;
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (!hba->clk_scaling.is_suspended)
+ return;
+ hba->clk_scaling.is_suspended = false;
}
- hba->clk_scaling.is_suspended = false;
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
devfreq_resume_device(hba->devfreq);
}
@@ -1487,7 +1490,6 @@ static int ufshcd_devfreq_target(struct device *dev,
bool scale_up = false, sched_clk_scaling_suspend_work = false;
struct list_head *clk_list = &hba->clk_list_head;
struct ufs_clk_info *clki;
- unsigned long irq_flags;
if (!ufshcd_is_clkscaling_supported(hba))
return -EINVAL;
@@ -1508,43 +1510,38 @@ static int ufshcd_devfreq_target(struct device *dev,
*freq = (unsigned long) clk_round_rate(clki->clk, *freq);
}
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (ufshcd_eh_in_progress(hba)) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return 0;
- }
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (ufshcd_eh_in_progress(hba))
+ return 0;
- /* Skip scaling clock when clock scaling is suspended */
- if (hba->clk_scaling.is_suspended) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- dev_warn(hba->dev, "clock scaling is suspended, skip");
- return 0;
- }
+ /* Skip scaling clock when clock scaling is suspended */
+ if (hba->clk_scaling.is_suspended) {
+ dev_warn(hba->dev, "clock scaling is suspended, skip");
+ return 0;
+ }
- if (!hba->clk_scaling.active_reqs)
- sched_clk_scaling_suspend_work = true;
+ if (!hba->clk_scaling.active_reqs)
+ sched_clk_scaling_suspend_work = true;
- if (list_empty(clk_list)) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- goto out;
- }
+ if (list_empty(clk_list))
+ goto out;
- /* Decide based on the target or rounded-off frequency and update */
- if (hba->use_pm_opp)
- scale_up = *freq > hba->clk_scaling.target_freq;
- else
- scale_up = *freq == clki->max_freq;
+ /* Decide based on the target or rounded-off frequency and update */
+ if (hba->use_pm_opp)
+ scale_up = *freq > hba->clk_scaling.target_freq;
+ else
+ scale_up = *freq == clki->max_freq;
- if (!hba->use_pm_opp && !scale_up)
- *freq = clki->min_freq;
+ if (!hba->use_pm_opp && !scale_up)
+ *freq = clki->min_freq;
- /* Update the frequency */
- if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- ret = 0;
- goto out; /* no state change required */
+ /* Update the frequency */
+ if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) {
+ ret = 0;
+ goto out; /* no state change required */
+ }
}
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
start = ktime_get();
ret = ufshcd_devfreq_scale(hba, *freq, scale_up);
@@ -1569,7 +1566,6 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
- unsigned long flags;
ktime_t curr_t;
if (!ufshcd_is_clkscaling_supported(hba))
@@ -1577,7 +1573,8 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
memset(stat, 0, sizeof(*stat));
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_scaling.lock);
+
curr_t = ktime_get();
if (!scaling->window_start_t)
goto start_window;
@@ -1613,7 +1610,7 @@ start_window:
scaling->busy_start_t = 0;
scaling->is_busy_started = false;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+
return 0;
}
@@ -1677,19 +1674,19 @@ static void ufshcd_devfreq_remove(struct ufs_hba *hba)
static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
{
- unsigned long flags;
bool suspend = false;
cancel_work_sync(&hba->clk_scaling.suspend_work);
cancel_work_sync(&hba->clk_scaling.resume_work);
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (!hba->clk_scaling.is_suspended) {
- suspend = true;
- hba->clk_scaling.is_suspended = true;
- hba->clk_scaling.window_start_t = 0;
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (!hba->clk_scaling.is_suspended) {
+ suspend = true;
+ hba->clk_scaling.is_suspended = true;
+ hba->clk_scaling.window_start_t = 0;
+ }
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
if (suspend)
devfreq_suspend_device(hba->devfreq);
@@ -1697,15 +1694,15 @@ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
{
- unsigned long flags;
bool resume = false;
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_scaling.is_suspended) {
- resume = true;
- hba->clk_scaling.is_suspended = false;
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (hba->clk_scaling.is_suspended) {
+ resume = true;
+ hba->clk_scaling.is_suspended = false;
+ }
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
if (resume)
devfreq_resume_device(hba->devfreq);
@@ -1791,6 +1788,8 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
INIT_WORK(&hba->clk_scaling.resume_work,
ufshcd_clk_scaling_resume_work);
+ spin_lock_init(&hba->clk_scaling.lock);
+
hba->clk_scaling.workq = alloc_ordered_workqueue(
"ufs_clkscaling_%d", WQ_MEM_RECLAIM, hba->host->host_no);
@@ -1811,19 +1810,16 @@ static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
static void ufshcd_ungate_work(struct work_struct *work)
{
int ret;
- unsigned long flags;
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_gating.ungate_work);
cancel_delayed_work_sync(&hba->clk_gating.gate_work);
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_gating.state == CLKS_ON) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- return;
+ scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
+ if (hba->clk_gating.state == CLKS_ON)
+ return;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_hba_vreg_set_hpm(hba);
ufshcd_setup_clocks(hba, true);
@@ -1858,7 +1854,7 @@ void ufshcd_hold(struct ufs_hba *hba)
if (!ufshcd_is_clkgating_allowed(hba) ||
!hba->clk_gating.is_initialized)
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ spin_lock_irqsave(&hba->clk_gating.lock, flags);
hba->clk_gating.active_reqs++;
start:
@@ -1874,11 +1870,11 @@ start:
*/
if (ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_link_hibern8(hba)) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
flush_result = flush_work(&hba->clk_gating.ungate_work);
if (hba->clk_gating.is_suspended && !flush_result)
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ spin_lock_irqsave(&hba->clk_gating.lock, flags);
goto start;
}
break;
@@ -1907,17 +1903,17 @@ start:
*/
fallthrough;
case REQ_CLKS_ON:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
flush_work(&hba->clk_gating.ungate_work);
/* Make sure state is CLKS_ON before returning */
- spin_lock_irqsave(hba->host->host_lock, flags);
+ spin_lock_irqsave(&hba->clk_gating.lock, flags);
goto start;
default:
dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
__func__, hba->clk_gating.state);
break;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_hold);
@@ -1925,28 +1921,32 @@ static void ufshcd_gate_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_gating.gate_work.work);
- unsigned long flags;
int ret;
- spin_lock_irqsave(hba->host->host_lock, flags);
- /*
- * In case you are here to cancel this work the gating state
- * would be marked as REQ_CLKS_ON. In this case save time by
- * skipping the gating work and exit after changing the clock
- * state to CLKS_ON.
- */
- if (hba->clk_gating.is_suspended ||
- (hba->clk_gating.state != REQ_CLKS_OFF)) {
- hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
- hba->clk_gating.state);
- goto rel_lock;
- }
+ scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
+ /*
+ * In case you are here to cancel this work the gating state
+ * would be marked as REQ_CLKS_ON. In this case save time by
+ * skipping the gating work and exit after changing the clock
+ * state to CLKS_ON.
+ */
+ if (hba->clk_gating.is_suspended ||
+ hba->clk_gating.state != REQ_CLKS_OFF) {
+ hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ return;
+ }
- if (ufshcd_is_ufs_dev_busy(hba) || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
- goto rel_lock;
+ if (hba->clk_gating.active_reqs)
+ return;
+ }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ scoped_guard(spinlock_irqsave, hba->host->host_lock) {
+ if (ufshcd_is_ufs_dev_busy(hba) ||
+ hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+ return;
+ }
/* put the link into hibern8 mode before turning off clocks */
if (ufshcd_can_hibern8_during_gating(hba)) {
@@ -1957,7 +1957,7 @@ static void ufshcd_gate_work(struct work_struct *work)
__func__, ret);
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
- goto out;
+ return;
}
ufshcd_set_link_hibern8(hba);
}
@@ -1977,33 +1977,34 @@ static void ufshcd_gate_work(struct work_struct *work)
* prevent from doing cancel work multiple times when there are
* new requests arriving before the current cancel work is done.
*/
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_gating.lock);
if (hba->clk_gating.state == REQ_CLKS_OFF) {
hba->clk_gating.state = CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
}
-rel_lock:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-out:
- return;
}
-/* host lock must be held before calling this variant */
static void __ufshcd_release(struct ufs_hba *hba)
{
+ lockdep_assert_held(&hba->clk_gating.lock);
+
if (!ufshcd_is_clkgating_allowed(hba))
return;
hba->clk_gating.active_reqs--;
if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
- hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
- hba->outstanding_tasks || !hba->clk_gating.is_initialized ||
- hba->active_uic_cmd || hba->uic_async_done ||
+ !hba->clk_gating.is_initialized ||
hba->clk_gating.state == CLKS_OFF)
return;
+ scoped_guard(spinlock_irqsave, hba->host->host_lock) {
+ if (ufshcd_has_pending_tasks(hba) ||
+ hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+ return;
+ }
+
hba->clk_gating.state = REQ_CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
queue_delayed_work(hba->clk_gating.clk_gating_workq,
@@ -2013,11 +2014,8 @@ static void __ufshcd_release(struct ufs_hba *hba)
void ufshcd_release(struct ufs_hba *hba)
{
- unsigned long flags;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_gating.lock);
__ufshcd_release(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_release);
@@ -2032,11 +2030,9 @@ static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- unsigned long flags;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_gating.lock);
hba->clk_gating.delay_ms = value;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set);
@@ -2064,7 +2060,6 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- unsigned long flags;
u32 value;
if (kstrtou32(buf, 0, &value))
@@ -2072,9 +2067,10 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
value = !!value;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_gating.lock);
+
if (value == hba->clk_gating.is_enabled)
- goto out;
+ return count;
if (value)
__ufshcd_release(hba);
@@ -2082,8 +2078,7 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
hba->clk_gating.active_reqs++;
hba->clk_gating.is_enabled = value;
-out:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+
return count;
}
@@ -2154,19 +2149,17 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
{
bool queue_resume_work = false;
ktime_t curr_t = ktime_get();
- unsigned long flags;
if (!ufshcd_is_clkscaling_supported(hba))
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_scaling.lock);
+
if (!hba->clk_scaling.active_reqs++)
queue_resume_work = true;
- if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
return;
- }
if (queue_resume_work)
queue_work(hba->clk_scaling.workq,
@@ -2182,18 +2175,17 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
hba->clk_scaling.busy_start_t = curr_t;
hba->clk_scaling.is_busy_started = true;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
{
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
- unsigned long flags;
if (!ufshcd_is_clkscaling_supported(hba))
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_scaling.lock);
+
hba->clk_scaling.active_reqs--;
if (!scaling->active_reqs && scaling->is_busy_started) {
scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
@@ -2201,7 +2193,6 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
scaling->busy_start_t = 0;
scaling->is_busy_started = false;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static inline int ufshcd_monitor_opcode2dir(u8 opcode)
@@ -2418,12 +2409,7 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
else
hba->lsdb_sup = true;
- if (!hba->mcq_sup)
- return 0;
-
hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP);
- hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT,
- hba->mcq_capabilities);
return 0;
}
@@ -3118,8 +3104,13 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
case UPIU_TRANSACTION_QUERY_RSP: {
u8 response = lrbp->ucd_rsp_ptr->header.response;
- if (response == 0)
+ if (response == 0) {
err = ufshcd_copy_query_response(hba, lrbp);
+ } else {
+ err = -EINVAL;
+ dev_err(hba->dev, "%s: unexpected response in Query RSP: %x\n",
+ __func__, response);
+ }
break;
}
case UPIU_TRANSACTION_REJECT_UPIU:
@@ -4812,20 +4803,14 @@ EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
*/
void ufshcd_hba_stop(struct ufs_hba *hba)
{
- unsigned long flags;
int err;
- /*
- * Obtain the host lock to prevent that the controller is disabled
- * while the UFS interrupt handler is active on another CPU.
- */
- spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_disable_irq(hba);
ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
CONTROLLER_ENABLE, CONTROLLER_DISABLE,
10, 1);
+ ufshcd_enable_irq(hba);
if (err)
dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
}
@@ -5195,12 +5180,12 @@ set_qdepth:
}
/**
- * ufshcd_slave_alloc - handle initial SCSI device configurations
+ * ufshcd_sdev_init - handle initial SCSI device configurations
* @sdev: pointer to SCSI device
*
* Return: success.
*/
-static int ufshcd_slave_alloc(struct scsi_device *sdev)
+static int ufshcd_sdev_init(struct scsi_device *sdev)
{
struct ufs_hba *hba;
@@ -5243,14 +5228,14 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
}
/**
- * ufshcd_device_configure - adjust SCSI device configurations
+ * ufshcd_sdev_configure - adjust SCSI device configurations
* @sdev: pointer to SCSI device
* @lim: queue limits
*
* Return: 0 (success).
*/
-static int ufshcd_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int ufshcd_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ufs_hba *hba = shost_priv(sdev->host);
struct request_queue *q = sdev->request_queue;
@@ -5281,10 +5266,10 @@ static int ufshcd_device_configure(struct scsi_device *sdev,
}
/**
- * ufshcd_slave_destroy - remove SCSI device configurations
+ * ufshcd_sdev_destroy - remove SCSI device configurations
* @sdev: pointer to SCSI device
*/
-static void ufshcd_slave_destroy(struct scsi_device *sdev)
+static void ufshcd_sdev_destroy(struct scsi_device *sdev)
{
struct ufs_hba *hba;
unsigned long flags;
@@ -5994,24 +5979,6 @@ out:
__func__, err);
}
-static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status)
-{
- u32 value;
-
- if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value))
- return;
-
- dev_info(hba->dev, "exception Tcase %d\n", value - 80);
-
- ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
-
- /*
- * A placeholder for the platform vendors to add whatever additional
- * steps required
- */
-}
-
static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
{
u8 index;
@@ -6232,7 +6199,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
ufshcd_bkops_exception_event_handler(hba);
if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
- ufshcd_temp_exception_event_handler(hba, status);
+ ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
ufs_debugfs_exception_event(hba, status);
}
@@ -8133,31 +8100,6 @@ static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
}
}
-static void ufshcd_ext_iid_probe(struct ufs_hba *hba, u8 *desc_buf)
-{
- struct ufs_dev_info *dev_info = &hba->dev_info;
- u32 ext_ufs_feature;
- u32 ext_iid_en = 0;
- int err;
-
- /* Only UFS-4.0 and above may support EXT_IID */
- if (dev_info->wspecversion < 0x400)
- goto out;
-
- ext_ufs_feature = get_unaligned_be32(desc_buf +
- DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
- if (!(ext_ufs_feature & UFS_DEV_EXT_IID_SUP))
- goto out;
-
- err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_EXT_IID_EN, 0, 0, &ext_iid_en);
- if (err)
- dev_err(hba->dev, "failed reading bEXTIIDEn. err = %d\n", err);
-
-out:
- dev_info->b_ext_iid_en = ext_iid_en;
-}
-
static void ufshcd_set_rtt(struct ufs_hba *hba)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
@@ -8259,7 +8201,9 @@ static void ufshcd_rtc_work(struct work_struct *work)
hba = container_of(to_delayed_work(work), struct ufs_hba, ufs_rtc_update_work);
/* Update RTC only when there are no requests in progress and UFSHCI is operational */
- if (!ufshcd_is_ufs_dev_busy(hba) && hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL)
+ if (!ufshcd_is_ufs_dev_busy(hba) &&
+ hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL &&
+ !hba->clk_gating.active_reqs)
ufshcd_update_rtc(hba);
if (ufshcd_is_ufs_dev_active(hba) && hba->dev_info.rtc_update_period)
@@ -8351,9 +8295,6 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
ufs_init_rtc(hba, desc_buf);
- if (hba->ext_iid_sup)
- ufshcd_ext_iid_probe(hba, desc_buf);
-
/*
* ufshcd_read_string_desc returns size of the string
* reset the error value
@@ -8967,9 +8908,9 @@ static const struct scsi_host_template ufshcd_driver_template = {
.map_queues = ufshcd_map_queues,
.queuecommand = ufshcd_queuecommand,
.mq_poll = ufshcd_poll,
- .slave_alloc = ufshcd_slave_alloc,
- .device_configure = ufshcd_device_configure,
- .slave_destroy = ufshcd_slave_destroy,
+ .sdev_init = ufshcd_sdev_init,
+ .sdev_configure = ufshcd_sdev_configure,
+ .sdev_destroy = ufshcd_sdev_destroy,
.change_queue_depth = ufshcd_change_queue_depth,
.eh_abort_handler = ufshcd_abort,
.eh_device_reset_handler = ufshcd_eh_device_reset_handler,
@@ -9155,7 +9096,6 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
int ret = 0;
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
- unsigned long flags;
ktime_t start = ktime_get();
bool clk_state_changed = false;
@@ -9205,12 +9145,11 @@ out:
if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
clk_disable_unprepare(clki->clk);
}
- } else if (!ret && on) {
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->clk_gating.state = CLKS_ON;
+ } else if (!ret && on && hba->clk_gating.is_initialized) {
+ scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
+ hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
if (clk_state_changed)
@@ -10293,16 +10232,6 @@ EXPORT_SYMBOL_GPL(ufshcd_system_thaw);
#endif /* CONFIG_PM_SLEEP */
/**
- * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
- * @hba: pointer to Host Bus Adapter (HBA)
- */
-void ufshcd_dealloc_host(struct ufs_hba *hba)
-{
- scsi_host_put(hba->host);
-}
-EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
-
-/**
* ufshcd_set_dma_mask - Set dma mask based on the controller
* addressing capability
* @hba: per adapter instance
@@ -10321,11 +10250,25 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
}
/**
+ * ufshcd_devres_release - devres cleanup handler, invoked during release of
+ * hba->dev
+ * @host: pointer to SCSI host
+ */
+static void ufshcd_devres_release(void *host)
+{
+ scsi_host_put(host);
+}
+
+/**
* ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
* @dev: pointer to device handle
* @hba_handle: driver private handle
*
* Return: 0 on success, non-zero value on failure.
+ *
+ * NOTE: There is no corresponding ufshcd_dealloc_host() because this function
+ * keeps track of its allocations using devres and deallocates everything on
+ * device removal automatically.
*/
int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
{
@@ -10347,6 +10290,13 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
err = -ENOMEM;
goto out_error;
}
+
+ err = devm_add_action_or_reset(dev, ufshcd_devres_release,
+ host);
+ if (err)
+ return dev_err_probe(dev, err,
+ "failed to add ufshcd dealloc action\n");
+
host->nr_maps = HCTX_TYPE_POLL + 1;
hba = shost_priv(host);
hba->host = host;
@@ -10475,6 +10425,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->irq = irq;
hba->vps = &ufs_hba_vps;
+ /*
+ * Initialize clk_gating.lock early since it is being used in
+ * ufshcd_setup_clocks()
+ */
+ spin_lock_init(&hba->clk_gating.lock);
+
err = ufshcd_hba_init(hba);
if (err)
goto out_error;
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 91e94fe990b4..23b9f6efa047 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -112,11 +112,18 @@ static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host)
qcom_ice_enable(host->ice);
}
+static const struct blk_crypto_ll_ops ufs_qcom_crypto_ops; /* forward decl */
+
static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
{
struct ufs_hba *hba = host->hba;
+ struct blk_crypto_profile *profile = &hba->crypto_profile;
struct device *dev = hba->dev;
struct qcom_ice *ice;
+ union ufs_crypto_capabilities caps;
+ union ufs_crypto_cap_entry cap;
+ int err;
+ int i;
ice = of_qcom_ice_get(dev);
if (ice == ERR_PTR(-EOPNOTSUPP)) {
@@ -128,8 +135,38 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
return PTR_ERR_OR_ZERO(ice);
host->ice = ice;
- hba->caps |= UFSHCD_CAP_CRYPTO;
+ /* Initialize the blk_crypto_profile */
+
+ caps.reg_val = cpu_to_le32(ufshcd_readl(hba, REG_UFS_CCAP));
+
+ /* The number of keyslots supported is (CFGC+1) */
+ err = devm_blk_crypto_profile_init(dev, profile, caps.config_count + 1);
+ if (err)
+ return err;
+
+ profile->ll_ops = ufs_qcom_crypto_ops;
+ profile->max_dun_bytes_supported = 8;
+ profile->dev = dev;
+
+ /*
+ * Currently this driver only supports AES-256-XTS. All known versions
+ * of ICE support it, but to be safe make sure it is really declared in
+ * the crypto capability registers. The crypto capability registers
+ * also give the supported data unit size(s).
+ */
+ for (i = 0; i < caps.num_crypto_cap; i++) {
+ cap.reg_val = cpu_to_le32(ufshcd_readl(hba,
+ REG_UFS_CRYPTOCAP +
+ i * sizeof(__le32)));
+ if (cap.algorithm_id == UFS_CRYPTO_ALG_AES_XTS &&
+ cap.key_size == UFS_CRYPTO_KEY_SIZE_256)
+ profile->modes_supported[BLK_ENCRYPTION_MODE_AES_256_XTS] |=
+ cap.sdus_mask * 512;
+ }
+
+ hba->caps |= UFSHCD_CAP_CRYPTO;
+ hba->quirks |= UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE;
return 0;
}
@@ -149,34 +186,49 @@ static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host)
return 0;
}
-static int ufs_qcom_ice_program_key(struct ufs_hba *hba,
- const union ufs_crypto_cfg_entry *cfg,
- int slot)
+static int ufs_qcom_ice_keyslot_program(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ unsigned int slot)
{
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- union ufs_crypto_cap_entry cap;
- bool config_enable =
- cfg->config_enable & UFS_CRYPTO_CONFIGURATION_ENABLE;
+ int err;
/* Only AES-256-XTS has been tested so far. */
- cap = hba->crypto_cap_array[cfg->crypto_cap_idx];
- if (cap.algorithm_id != UFS_CRYPTO_ALG_AES_XTS ||
- cap.key_size != UFS_CRYPTO_KEY_SIZE_256)
+ if (key->crypto_cfg.crypto_mode != BLK_ENCRYPTION_MODE_AES_256_XTS)
return -EOPNOTSUPP;
- if (config_enable)
- return qcom_ice_program_key(host->ice,
- QCOM_ICE_CRYPTO_ALG_AES_XTS,
- QCOM_ICE_CRYPTO_KEY_SIZE_256,
- cfg->crypto_key,
- cfg->data_unit_size, slot);
- else
- return qcom_ice_evict_key(host->ice, slot);
+ ufshcd_hold(hba);
+ err = qcom_ice_program_key(host->ice,
+ QCOM_ICE_CRYPTO_ALG_AES_XTS,
+ QCOM_ICE_CRYPTO_KEY_SIZE_256,
+ key->raw,
+ key->crypto_cfg.data_unit_size / 512,
+ slot);
+ ufshcd_release(hba);
+ return err;
}
-#else
+static int ufs_qcom_ice_keyslot_evict(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ unsigned int slot)
+{
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ int err;
+
+ ufshcd_hold(hba);
+ err = qcom_ice_evict_key(host->ice, slot);
+ ufshcd_release(hba);
+ return err;
+}
-#define ufs_qcom_ice_program_key NULL
+static const struct blk_crypto_ll_ops ufs_qcom_crypto_ops = {
+ .keyslot_program = ufs_qcom_ice_keyslot_program,
+ .keyslot_evict = ufs_qcom_ice_keyslot_evict,
+};
+
+#else
static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host)
{
@@ -1826,7 +1878,6 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.dbg_register_dump = ufs_qcom_dump_dbg_regs,
.device_reset = ufs_qcom_device_reset,
.config_scaling_param = ufs_qcom_config_scaling_param,
- .program_key = ufs_qcom_ice_program_key,
.mcq_config_resource = ufs_qcom_mcq_config_resource,
.get_hba_mac = ufs_qcom_get_hba_mac,
.op_runtime_config = ufs_qcom_op_runtime_config,
diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
index ea39c5d5b8cf..9cfcaad23cf9 100644
--- a/drivers/ufs/host/ufshcd-pci.c
+++ b/drivers/ufs/host/ufshcd-pci.c
@@ -562,7 +562,6 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
ufshcd_remove(hba);
- ufshcd_dealloc_host(hba);
}
/**
@@ -605,7 +604,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = ufshcd_init(hba, mmio_base, pdev->irq);
if (err) {
dev_err(&pdev->dev, "Initialization failed\n");
- ufshcd_dealloc_host(hba);
return err;
}
diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c
index 505572d4fa87..ffe5d1d2b215 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.c
+++ b/drivers/ufs/host/ufshcd-pltfrm.c
@@ -465,21 +465,17 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
struct device *dev = &pdev->dev;
mmio_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(mmio_base)) {
- err = PTR_ERR(mmio_base);
- goto out;
- }
+ if (IS_ERR(mmio_base))
+ return PTR_ERR(mmio_base);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- err = irq;
- goto out;
- }
+ if (irq < 0)
+ return irq;
err = ufshcd_alloc_host(dev, &hba);
if (err) {
dev_err(dev, "Allocation failed\n");
- goto out;
+ return err;
}
hba->vops = vops;
@@ -488,13 +484,13 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
if (err) {
dev_err(dev, "%s: clock parse failed %d\n",
__func__, err);
- goto dealloc_host;
+ return err;
}
err = ufshcd_parse_regulator_info(hba);
if (err) {
dev_err(dev, "%s: regulator init failed %d\n",
__func__, err);
- goto dealloc_host;
+ return err;
}
ufshcd_init_lanes_per_dir(hba);
@@ -502,25 +498,20 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
err = ufshcd_parse_operating_points(hba);
if (err) {
dev_err(dev, "%s: OPP parse failed %d\n", __func__, err);
- goto dealloc_host;
+ return err;
}
err = ufshcd_init(hba, mmio_base, irq);
if (err) {
dev_err_probe(dev, err, "Initialization failed with error %d\n",
err);
- goto dealloc_host;
+ return err;
}
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
return 0;
-
-dealloc_host:
- ufshcd_dealloc_host(hba);
-out:
- return err;
}
EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init);
@@ -534,7 +525,6 @@ void ufshcd_pltfrm_remove(struct platform_device *pdev)
pm_runtime_get_sync(&pdev->dev);
ufshcd_remove(hba);
- ufshcd_dealloc_host(hba);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
}
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 004a549c6c7d..d93ed4e86a17 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -565,7 +565,7 @@ static __poll_t uio_poll(struct file *filep, poll_table *wait)
mutex_lock(&idev->info_lock);
if (!idev->info || !idev->info->irq)
- ret = -EIO;
+ ret = EPOLLERR;
mutex_unlock(&idev->info_lock);
if (ret)
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index c70dd81bfc61..31aa75110ba5 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -167,6 +167,8 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
}
uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
pdev->dev.of_node);
+ if (!uioinfo->name)
+ return -ENOMEM;
uioinfo->version = "devicetree";
}
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index 3976360d0096..1b19b5647495 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -296,51 +296,51 @@ hv_uio_probe(struct hv_device *dev,
pdata->info.mem[MON_PAGE_MAP].size = PAGE_SIZE;
pdata->info.mem[MON_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
- pdata->recv_buf = vzalloc(RECV_BUFFER_SIZE);
- if (pdata->recv_buf == NULL) {
- ret = -ENOMEM;
- goto fail_free_ring;
+ if (channel->device_id == HV_NIC) {
+ pdata->recv_buf = vzalloc(RECV_BUFFER_SIZE);
+ if (!pdata->recv_buf) {
+ ret = -ENOMEM;
+ goto fail_free_ring;
+ }
+
+ ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
+ RECV_BUFFER_SIZE, &pdata->recv_gpadl);
+ if (ret) {
+ if (!pdata->recv_gpadl.decrypted)
+ vfree(pdata->recv_buf);
+ goto fail_close;
+ }
+
+ /* put Global Physical Address Label in name */
+ snprintf(pdata->recv_name, sizeof(pdata->recv_name),
+ "recv:%u", pdata->recv_gpadl.gpadl_handle);
+ pdata->info.mem[RECV_BUF_MAP].name = pdata->recv_name;
+ pdata->info.mem[RECV_BUF_MAP].addr = (uintptr_t)pdata->recv_buf;
+ pdata->info.mem[RECV_BUF_MAP].size = RECV_BUFFER_SIZE;
+ pdata->info.mem[RECV_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
+
+ pdata->send_buf = vzalloc(SEND_BUFFER_SIZE);
+ if (!pdata->send_buf) {
+ ret = -ENOMEM;
+ goto fail_close;
+ }
+
+ ret = vmbus_establish_gpadl(channel, pdata->send_buf,
+ SEND_BUFFER_SIZE, &pdata->send_gpadl);
+ if (ret) {
+ if (!pdata->send_gpadl.decrypted)
+ vfree(pdata->send_buf);
+ goto fail_close;
+ }
+
+ snprintf(pdata->send_name, sizeof(pdata->send_name),
+ "send:%u", pdata->send_gpadl.gpadl_handle);
+ pdata->info.mem[SEND_BUF_MAP].name = pdata->send_name;
+ pdata->info.mem[SEND_BUF_MAP].addr = (uintptr_t)pdata->send_buf;
+ pdata->info.mem[SEND_BUF_MAP].size = SEND_BUFFER_SIZE;
+ pdata->info.mem[SEND_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
}
- ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
- RECV_BUFFER_SIZE, &pdata->recv_gpadl);
- if (ret) {
- if (!pdata->recv_gpadl.decrypted)
- vfree(pdata->recv_buf);
- goto fail_close;
- }
-
- /* put Global Physical Address Label in name */
- snprintf(pdata->recv_name, sizeof(pdata->recv_name),
- "recv:%u", pdata->recv_gpadl.gpadl_handle);
- pdata->info.mem[RECV_BUF_MAP].name = pdata->recv_name;
- pdata->info.mem[RECV_BUF_MAP].addr
- = (uintptr_t)pdata->recv_buf;
- pdata->info.mem[RECV_BUF_MAP].size = RECV_BUFFER_SIZE;
- pdata->info.mem[RECV_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
-
- pdata->send_buf = vzalloc(SEND_BUFFER_SIZE);
- if (pdata->send_buf == NULL) {
- ret = -ENOMEM;
- goto fail_close;
- }
-
- ret = vmbus_establish_gpadl(channel, pdata->send_buf,
- SEND_BUFFER_SIZE, &pdata->send_gpadl);
- if (ret) {
- if (!pdata->send_gpadl.decrypted)
- vfree(pdata->send_buf);
- goto fail_close;
- }
-
- snprintf(pdata->send_name, sizeof(pdata->send_name),
- "send:%u", pdata->send_gpadl.gpadl_handle);
- pdata->info.mem[SEND_BUF_MAP].name = pdata->send_name;
- pdata->info.mem[SEND_BUF_MAP].addr
- = (uintptr_t)pdata->send_buf;
- pdata->info.mem[SEND_BUF_MAP].size = SEND_BUFFER_SIZE;
- pdata->info.mem[SEND_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
-
pdata->info.priv = pdata;
pdata->device = dev;
diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
index 4a3f0f958256..97edf767ecee 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.c
+++ b/drivers/usb/cdns3/cdnsp-gadget.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/log2.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/pci.h>
#include <linux/irq.h>
#include <linux/dmi.h>
@@ -1671,12 +1672,12 @@ static int cdnsp_gadget_init_endpoints(struct cdnsp_device *pdev)
"CTRL: %s, INT: %s, BULK: %s, ISOC %s, "
"SupDir IN: %s, OUT: %s\n",
pep->name, 1024,
- (pep->endpoint.caps.type_control) ? "yes" : "no",
- (pep->endpoint.caps.type_int) ? "yes" : "no",
- (pep->endpoint.caps.type_bulk) ? "yes" : "no",
- (pep->endpoint.caps.type_iso) ? "yes" : "no",
- (pep->endpoint.caps.dir_in) ? "yes" : "no",
- (pep->endpoint.caps.dir_out) ? "yes" : "no");
+ str_yes_no(pep->endpoint.caps.type_control),
+ str_yes_no(pep->endpoint.caps.type_int),
+ str_yes_no(pep->endpoint.caps.type_bulk),
+ str_yes_no(pep->endpoint.caps.type_iso),
+ str_yes_no(pep->endpoint.caps.dir_in),
+ str_yes_no(pep->endpoint.caps.dir_out));
INIT_LIST_HEAD(&pep->pending_list);
}
diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
index 465e9267b49c..98980a23e1c2 100644
--- a/drivers/usb/cdns3/core.c
+++ b/drivers/usb/cdns3/core.c
@@ -529,9 +529,7 @@ int cdns_resume(struct cdns *cdns)
int ret = 0;
if (cdns_power_is_lost(cdns)) {
- if (cdns->role_sw) {
- cdns->role = cdns_role_get(cdns->role_sw);
- } else {
+ if (!cdns->role_sw) {
real_role = cdns_hw_role_state_machine(cdns);
if (real_role != cdns->role) {
ret = cdns_hw_role_switch(cdns);
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 0cce19208370..ced6076a8248 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -13,6 +13,7 @@
#include <linux/usb/hcd.h>
#include <linux/usb/chipidea.h>
#include <linux/regulator/consumer.h>
+#include <linux/string_choices.h>
#include <linux/pinctrl/consumer.h>
#include "../host/ehci.h"
@@ -56,7 +57,7 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
if (ret) {
dev_err(dev,
"Failed to %s vbus regulator, ret=%d\n",
- enable ? "enable" : "disable", ret);
+ str_enable_disable(enable), ret);
return ret;
}
priv->enabled = enable;
@@ -256,8 +257,14 @@ static int ci_ehci_hub_control(
struct device *dev = hcd->self.controller;
struct ci_hdrc *ci = dev_get_drvdata(dev);
- port_index = wIndex & 0xff;
- port_index -= (port_index > 0);
+ /*
+ * Avoid out-of-bounds values while calculating the port index
+ * from wIndex. The compiler doesn't like pointers to invalid
+ * addresses, even if they are never used.
+ */
+ port_index = (wIndex - 1) & 0xff;
+ if (port_index >= HCS_N_PORTS_MAX)
+ port_index = 0;
status_reg = &ehci->regs->port_status[port_index];
spin_lock_irqsave(&ehci->lock, flags);
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index ff1a941fd2ed..e2527faa6592 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -87,7 +87,7 @@
/* Get two-int array: [0]=vendor ID, [1]=product ID: */
#define LPIOC_GET_VID_PID(len) _IOC(_IOC_READ, 'P', IOCNR_GET_VID_PID, len)
/* Perform class specific soft reset */
-#define LPIOC_SOFT_RESET _IOC(_IOC_NONE, 'P', IOCNR_SOFT_RESET, 0);
+#define LPIOC_SOFT_RESET _IOC(_IOC_NONE, 'P', IOCNR_SOFT_RESET, 0)
/*
* A DEVICE_ID string may include the printer's serial number.
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index 871cf199b6bf..fc0845f681be 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -41,6 +41,12 @@ const char *usb_ep_type_string(int ep_type)
}
EXPORT_SYMBOL_GPL(usb_ep_type_string);
+/**
+ * usb_otg_state_string() - returns human readable name of OTG state.
+ * @state: the OTG state to return the human readable name of. If it's not
+ * any of the states defined in usb_otg_state enum, 'UNDEFINED' will be
+ * returned.
+ */
const char *usb_otg_state_string(enum usb_otg_state state)
{
static const char *const names[] = {
@@ -179,6 +185,14 @@ static const char *const usb_dr_modes[] = {
[USB_DR_MODE_OTG] = "otg",
};
+/**
+ * usb_get_dr_mode_from_string() - Get dual role mode for given string
+ * @str: String to find the corresponding dual role mode for
+ *
+ * This function performs a lookup for the given string and returns the
+ * corresponding enum usb_dr_mode. If no match for the string could be found,
+ * 'USB_DR_MODE_UNKNOWN' is returned.
+ */
static enum usb_dr_mode usb_get_dr_mode_from_string(const char *str)
{
int ret;
diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c
index c84b4a700084..aa710b50791b 100644
--- a/drivers/usb/common/usb-conn-gpio.c
+++ b/drivers/usb/common/usb-conn-gpio.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/regulator/consumer.h>
+#include <linux/string_choices.h>
#include <linux/usb/role.h>
#define USB_GPIO_DEB_MS 20 /* ms */
@@ -111,7 +112,7 @@ static void usb_conn_detect_cable(struct work_struct *work)
if (info->vbus)
dev_dbg(info->dev, "vbus regulator is %s\n",
- regulator_is_enabled(info->vbus) ? "enabled" : "disabled");
+ str_enabled_disabled(regulator_is_enabled(info->vbus)));
power_supply_changed(info->charger);
}
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 25a00f974934..f7bf8d1de3ad 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -9,6 +9,7 @@
#include <linux/usb/quirks.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/device.h>
#include <asm/byteorder.h>
#include "usb.h"
@@ -18,12 +19,6 @@
#define USB_MAXCONFIG 8 /* Arbitrary limit */
-
-static inline const char *plural(int n)
-{
- return (n == 1 ? "" : "s");
-}
-
static int find_next_descriptor(unsigned char *buffer, int size,
int dt1, int dt2, int *num_skipped)
{
@@ -484,7 +479,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
retval = buffer - buffer0 + i;
if (n > 0)
dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
- n, plural(n), "endpoint");
+ n, str_plural(n), "endpoint");
return retval;
skip_to_next_endpoint_or_interface_descriptor:
@@ -563,7 +558,7 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
alt->extralen = i;
if (n > 0)
dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
- n, plural(n), "interface");
+ n, str_plural(n), "interface");
buffer += i;
size -= i;
@@ -605,7 +600,7 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
dev_notice(ddev, "config %d interface %d altsetting %d has %d "
"endpoint descriptor%s, different from the interface "
"descriptor's value: %d\n",
- cfgno, inum, asnum, n, plural(n), num_ep_orig);
+ cfgno, inum, asnum, n, str_plural(n), num_ep_orig);
return buffer - buffer0;
skip_to_next_interface_descriptor:
@@ -664,7 +659,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
if (size2 < sizeof(struct usb_descriptor_header)) {
dev_notice(ddev, "config %d descriptor has %d excess "
"byte%s, ignoring\n",
- cfgno, size2, plural(size2));
+ cfgno, size2, str_plural(size2));
break;
}
@@ -754,7 +749,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
if (n != nintf)
dev_notice(ddev, "config %d has %d interface%s, different from "
"the descriptor's value: %d\n",
- cfgno, n, plural(n), nintf_orig);
+ cfgno, n, str_plural(n), nintf_orig);
else if (n == 0)
dev_notice(ddev, "config %d has no interfaces?\n", cfgno);
config->desc.bNumInterfaces = nintf = n;
@@ -798,7 +793,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
config->extralen = i;
if (n > 0)
dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
- n, plural(n), "configuration");
+ n, str_plural(n), "configuration");
buffer += i;
size -= i;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index f203fdbfb6f6..460d4dde5994 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1086,15 +1086,14 @@ int usb_register_driver(struct usb_driver *new_driver, struct module *owner,
pr_info("%s: registered new interface driver %s\n",
usbcore_name, new_driver->name);
-out:
- return retval;
+ return 0;
out_newid:
driver_unregister(&new_driver->driver);
-
+out:
pr_err("%s: error %d registering interface driver %s\n",
usbcore_name, retval, new_driver->name);
- goto out;
+ return retval;
}
EXPORT_SYMBOL_GPL(usb_register_driver);
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index b134bff5c3fe..9c6ae5e1198b 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -21,14 +21,10 @@
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <linux/string_choices.h>
#include <uapi/linux/usb/audio.h>
#include "usb.h"
-static inline const char *plural(int n)
-{
- return (n == 1 ? "" : "s");
-}
-
static int is_rndis(struct usb_interface_descriptor *desc)
{
return desc->bInterfaceClass == USB_CLASS_COMM
@@ -194,18 +190,18 @@ int usb_choose_configuration(struct usb_device *udev)
if (insufficient_power > 0)
dev_info(&udev->dev, "rejected %d configuration%s "
"due to insufficient available bus power\n",
- insufficient_power, plural(insufficient_power));
+ insufficient_power, str_plural(insufficient_power));
if (best) {
i = best->desc.bConfigurationValue;
dev_dbg(&udev->dev,
"configuration #%d chosen from %d choice%s\n",
- i, num_configs, plural(num_configs));
+ i, num_configs, str_plural(num_configs));
} else {
i = -1;
dev_warn(&udev->dev,
"no configuration chosen from %d choice%s\n",
- num_configs, plural(num_configs));
+ num_configs, str_plural(num_configs));
}
return i;
}
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index a08f3f228e6d..56b534f59907 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -422,7 +422,12 @@ static int suspend_common(struct device *dev, pm_message_t msg)
bool do_wakeup;
int retval;
- do_wakeup = PMSG_IS_AUTO(msg) ? true : device_may_wakeup(dev);
+ if (PMSG_IS_AUTO(msg))
+ do_wakeup = true;
+ else if (PMSG_NO_WAKEUP(msg))
+ do_wakeup = false;
+ else
+ do_wakeup = device_may_wakeup(dev);
/* Root hub suspend should have stopped all downstream traffic,
* and all bus master traffic. And done so for both the interface
@@ -521,6 +526,11 @@ static int hcd_pci_suspend(struct device *dev)
return suspend_common(dev, PMSG_SUSPEND);
}
+static int hcd_pci_freeze(struct device *dev)
+{
+ return suspend_common(dev, PMSG_FREEZE);
+}
+
static int hcd_pci_suspend_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -590,6 +600,7 @@ static int hcd_pci_restore(struct device *dev)
#else
#define hcd_pci_suspend NULL
+#define hcd_pci_freeze NULL
#define hcd_pci_suspend_noirq NULL
#define hcd_pci_poweroff_late NULL
#define hcd_pci_resume_noirq NULL
@@ -624,7 +635,7 @@ const struct dev_pm_ops usb_hcd_pci_pm_ops = {
.suspend_noirq = hcd_pci_suspend_noirq,
.resume_noirq = hcd_pci_resume_noirq,
.resume = hcd_pci_resume,
- .freeze = hcd_pci_suspend,
+ .freeze = hcd_pci_freeze,
.freeze_noirq = check_root_hub_suspended,
.thaw_noirq = NULL,
.thaw = hcd_pci_resume,
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 0b2490347b9f..a75cf1f6d741 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -415,7 +415,7 @@ ascii2desc(char const *s, u8 *buf, unsigned len)
static unsigned
rh_string(int id, struct usb_hcd const *hcd, u8 *data, unsigned len)
{
- char buf[100];
+ char buf[160];
char const *s;
static char const langids[4] = {4, USB_DT_STRING, 0x09, 0x04};
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 21ac9b464696..c3f839637cb5 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -18,6 +18,7 @@
#include <linux/sched/mm.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/kcov.h>
#include <linux/ioctl.h>
#include <linux/usb.h>
@@ -1496,7 +1497,7 @@ static int hub_configure(struct usb_hub *hub,
maxchild = hub->descriptor->bNbrPorts;
dev_info(hub_dev, "%d port%s detected\n", maxchild,
- (maxchild == 1) ? "" : "s");
+ str_plural(maxchild));
hub->ports = kcalloc(maxchild, sizeof(struct usb_port *), GFP_KERNEL);
if (!hub->ports) {
@@ -4139,14 +4140,14 @@ static int usb_set_device_initiated_lpm(struct usb_device *udev,
break;
default:
dev_warn(&udev->dev, "%s: Can't %s non-U1 or U2 state.\n",
- __func__, enable ? "enable" : "disable");
+ __func__, str_enable_disable(enable));
return -EINVAL;
}
if (udev->state != USB_STATE_CONFIGURED) {
dev_dbg(&udev->dev, "%s: Can't %s %s state "
"for unconfigured device.\n",
- __func__, enable ? "enable" : "disable",
+ __func__, str_enable_disable(enable),
usb3_lpm_names[state]);
return 0;
}
@@ -4172,8 +4173,7 @@ static int usb_set_device_initiated_lpm(struct usb_device *udev,
}
if (ret < 0) {
dev_warn(&udev->dev, "%s of device-initiated %s failed.\n",
- enable ? "Enable" : "Disable",
- usb3_lpm_names[state]);
+ str_enable_disable(enable), usb3_lpm_names[state]);
return -EBUSY;
}
return 0;
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index e857e532b35a..f54198171b6a 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -9,6 +9,7 @@
#include <linux/kstrtox.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/sysfs.h>
#include <linux/pm_qos.h>
#include <linux/component.h>
@@ -25,7 +26,7 @@ static ssize_t early_stop_show(struct device *dev,
{
struct usb_port *port_dev = to_usb_port(dev);
- return sysfs_emit(buf, "%s\n", port_dev->early_stop ? "yes" : "no");
+ return sysfs_emit(buf, "%s\n", str_yes_no(port_dev->early_stop));
}
static ssize_t early_stop_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 13171454f959..67732c791c93 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -394,6 +394,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Kingston DataTraveler 3.0 */
{ USB_DEVICE(0x0951, 0x1666), .driver_info = USB_QUIRK_NO_LPM },
+ /* TOSHIBA TransMemory-Mx */
+ { USB_DEVICE(0x0930, 0x1408), .driver_info = USB_QUIRK_NO_LPM },
+
/* NVIDIA Jetson devices in Force Recovery mode */
{ USB_DEVICE(0x0955, 0x7018), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x0955, 0x7019), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index b4cba23831ac..23f3cb1989f4 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -854,7 +854,7 @@ static const struct attribute_group dev_string_attr_grp = {
static ssize_t
descriptors_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -890,11 +890,11 @@ descriptors_read(struct file *filp, struct kobject *kobj,
}
return count - nleft;
}
-static BIN_ATTR_RO(descriptors, 18 + 65535); /* dev descr + max-size raw descriptor */
+static const BIN_ATTR_RO(descriptors, 18 + 65535); /* dev descr + max-size raw descriptor */
static ssize_t
bos_descriptors_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -913,12 +913,12 @@ bos_descriptors_read(struct file *filp, struct kobject *kobj,
}
return n;
}
-static BIN_ATTR_RO(bos_descriptors, 65535); /* max-size BOS */
+static const BIN_ATTR_RO(bos_descriptors, 65535); /* max-size BOS */
/* When modifying this list, be sure to modify dev_bin_attrs_are_visible()
* accordingly.
*/
-static struct bin_attribute *dev_bin_attrs[] = {
+static const struct bin_attribute *const dev_bin_attrs[] = {
&bin_attr_descriptors,
&bin_attr_bos_descriptors,
NULL
@@ -944,7 +944,7 @@ static umode_t dev_bin_attrs_are_visible(struct kobject *kobj,
}
static const struct attribute_group dev_bin_attr_grp = {
- .bin_attrs = dev_bin_attrs,
+ .bin_attrs_new = dev_bin_attrs,
.is_bin_visible = dev_bin_attrs_are_visible,
};
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index f219c82e9619..dfa1b5fe48dc 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1479,6 +1479,26 @@ static int dwc3_core_init(struct dwc3 *dwc)
}
}
+ /*
+ * STAR 9001346572: This issue affects DWC_usb31 versions 1.80a and
+ * prior. When an active endpoint not currently cached in the host
+ * controller is chosen to be cached to the same index as an endpoint
+ * receiving NAKs, the endpoint receiving NAKs enters continuous
+ * retry mode. This prevents it from being evicted from the host
+ * controller cache, blocking the new endpoint from being cached and
+ * serviced.
+ *
+ * To resolve this, for controller versions 1.70a and 1.80a, set the
+ * GUCTL3 bit[16] (USB2.0 Internal Retry Disable) to 1. This bit
+ * disables the USB2.0 internal retry feature. The GUCTL3[16] register
+ * function is available only from version 1.70a.
+ */
+ if (DWC3_VER_IS_WITHIN(DWC31, 170A, 180A)) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
+ reg |= DWC3_GUCTL3_USB20_RETRY_DISABLE;
+ dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
+ }
+
return 0;
err_power_off_phy:
@@ -1664,8 +1684,6 @@ static void dwc3_get_properties(struct dwc3 *dwc)
u8 tx_thr_num_pkt_prd = 0;
u8 tx_max_burst_prd = 0;
u8 tx_fifo_resize_max_num;
- const char *usb_psy_name;
- int ret;
/* default to highest possible threshold */
lpm_nyet_threshold = 0xf;
@@ -1700,13 +1718,6 @@ static void dwc3_get_properties(struct dwc3 *dwc)
dwc->sys_wakeup = device_may_wakeup(dwc->sysdev);
- ret = device_property_read_string(dev, "usb-psy-name", &usb_psy_name);
- if (ret >= 0) {
- dwc->usb_psy = power_supply_get_by_name(usb_psy_name);
- if (!dwc->usb_psy)
- dev_err(dev, "couldn't get usb power supply\n");
- }
-
dwc->has_lpm_erratum = device_property_read_bool(dev,
"snps,has-lpm-erratum");
device_property_read_u8(dev, "snps,lpm-nyet-threshold",
@@ -2109,6 +2120,23 @@ static int dwc3_get_num_ports(struct dwc3 *dwc)
return 0;
}
+static struct power_supply *dwc3_get_usb_power_supply(struct dwc3 *dwc)
+{
+ struct power_supply *usb_psy;
+ const char *usb_psy_name;
+ int ret;
+
+ ret = device_property_read_string(dwc->dev, "usb-psy-name", &usb_psy_name);
+ if (ret < 0)
+ return NULL;
+
+ usb_psy = power_supply_get_by_name(usb_psy_name);
+ if (!usb_psy)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return usb_psy;
+}
+
static int dwc3_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -2165,6 +2193,10 @@ static int dwc3_probe(struct platform_device *pdev)
dwc3_get_software_properties(dwc);
+ dwc->usb_psy = dwc3_get_usb_power_supply(dwc);
+ if (IS_ERR(dwc->usb_psy))
+ return dev_err_probe(dev, PTR_ERR(dwc->usb_psy), "couldn't get usb power supply\n");
+
dwc->reset = devm_reset_control_array_get_optional_shared(dev);
if (IS_ERR(dwc->reset)) {
ret = PTR_ERR(dwc->reset);
@@ -2589,12 +2621,15 @@ static int dwc3_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
+ ret = pm_runtime_set_active(dev);
+ if (ret)
+ goto out;
ret = dwc3_resume_common(dwc, PMSG_RESUME);
if (ret)
pm_runtime_set_suspended(dev);
+out:
pm_runtime_enable(dev);
return ret;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index f11570c8ffd0..ac7c730f81ac 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -425,6 +425,7 @@
/* Global User Control Register 3 */
#define DWC3_GUCTL3_SPLITDISABLE BIT(14)
+#define DWC3_GUCTL3_USB20_RETRY_DISABLE BIT(16)
/* Device Configuration Register */
#define DWC3_DCFG_NUMLANES(n) (((n) & 0x3) << 30) /* DWC_usb32 only */
@@ -742,6 +743,7 @@ struct dwc3_event_buffer {
*/
struct dwc3_ep {
struct usb_ep endpoint;
+ struct delayed_work nostream_work;
struct list_head cancelled_list;
struct list_head pending_list;
struct list_head started_list;
@@ -764,7 +766,7 @@ struct dwc3_ep {
#define DWC3_EP_WAIT_TRANSFER_COMPLETE BIT(7)
#define DWC3_EP_IGNORE_NEXT_NOSTREAM BIT(8)
#define DWC3_EP_FORCE_RESTART_STREAM BIT(9)
-#define DWC3_EP_FIRST_STREAM_PRIMED BIT(10)
+#define DWC3_EP_STREAM_PRIMED BIT(10)
#define DWC3_EP_PENDING_CLEAR_STALL BIT(11)
#define DWC3_EP_TXFIFO_RESIZED BIT(12)
#define DWC3_EP_DELAY_STOP BIT(13)
@@ -957,7 +959,6 @@ struct dwc3_request {
struct usb_request request;
struct list_head list;
struct dwc3_ep *dep;
- struct scatterlist *sg;
struct scatterlist *start_sg;
unsigned int num_pending_sgs;
diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c
index 7d43da5f2897..c158364bc03e 100644
--- a/drivers/usb/dwc3/dwc3-am62.c
+++ b/drivers/usb/dwc3/dwc3-am62.c
@@ -108,6 +108,9 @@
#define DWC3_AM62_AUTOSUSPEND_DELAY 100
+#define USBSS_DEBUG_CFG_OFF 0x0
+#define USBSS_DEBUG_CFG_DISABLED 0x7
+
struct dwc3_am62 {
struct device *dev;
void __iomem *usbss;
@@ -117,6 +120,7 @@ struct dwc3_am62 {
unsigned int offset;
unsigned int vbus_divider;
u32 wakeup_stat;
+ void __iomem *phy_regs;
};
static const int dwc3_ti_rate_table[] = { /* in KHZ */
@@ -166,6 +170,7 @@ static int phy_syscon_pll_refclk(struct dwc3_am62 *am62)
if (ret)
return ret;
+ of_node_put(args.np);
am62->offset = args.args[0];
/* Core voltage. PHY_CORE_VOLTAGE bit Recommended to be 0 always */
@@ -184,15 +189,47 @@ static int phy_syscon_pll_refclk(struct dwc3_am62 *am62)
return 0;
}
+static int dwc3_ti_init(struct dwc3_am62 *am62)
+{
+ int ret;
+ u32 reg;
+
+ /* Read the syscon property and set the rate code */
+ ret = phy_syscon_pll_refclk(am62);
+ if (ret)
+ return ret;
+
+ /* Workaround Errata i2409 */
+ if (am62->phy_regs) {
+ reg = readl(am62->phy_regs + USB_PHY_PLL_REG12);
+ reg |= USB_PHY_PLL_LDO_REF_EN | USB_PHY_PLL_LDO_REF_EN_EN;
+ writel(reg, am62->phy_regs + USB_PHY_PLL_REG12);
+ }
+
+ /* VBUS divider select */
+ reg = dwc3_ti_readl(am62, USBSS_PHY_CONFIG);
+ if (am62->vbus_divider)
+ reg |= 1 << USBSS_PHY_VBUS_SEL_SHIFT;
+
+ dwc3_ti_writel(am62, USBSS_PHY_CONFIG, reg);
+
+ clk_prepare_enable(am62->usb2_refclk);
+
+ /* Set mode valid bit to indicate role is valid */
+ reg = dwc3_ti_readl(am62, USBSS_MODE_CONTROL);
+ reg |= USBSS_MODE_VALID;
+ dwc3_ti_writel(am62, USBSS_MODE_CONTROL, reg);
+
+ return 0;
+}
+
static int dwc3_ti_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = pdev->dev.of_node;
struct dwc3_am62 *am62;
unsigned long rate;
- void __iomem *phy;
int i, ret;
- u32 reg;
am62 = devm_kzalloc(dev, sizeof(*am62), GFP_KERNEL);
if (!am62)
@@ -228,29 +265,17 @@ static int dwc3_ti_probe(struct platform_device *pdev)
am62->rate_code = i;
- /* Read the syscon property and set the rate code */
- ret = phy_syscon_pll_refclk(am62);
- if (ret)
- return ret;
-
- /* Workaround Errata i2409 */
- phy = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(phy)) {
+ am62->phy_regs = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(am62->phy_regs)) {
dev_err(dev, "can't map PHY IOMEM resource. Won't apply i2409 fix.\n");
- phy = NULL;
- } else {
- reg = readl(phy + USB_PHY_PLL_REG12);
- reg |= USB_PHY_PLL_LDO_REF_EN | USB_PHY_PLL_LDO_REF_EN_EN;
- writel(reg, phy + USB_PHY_PLL_REG12);
+ am62->phy_regs = NULL;
}
- /* VBUS divider select */
am62->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
- reg = dwc3_ti_readl(am62, USBSS_PHY_CONFIG);
- if (am62->vbus_divider)
- reg |= 1 << USBSS_PHY_VBUS_SEL_SHIFT;
- dwc3_ti_writel(am62, USBSS_PHY_CONFIG, reg);
+ ret = dwc3_ti_init(am62);
+ if (ret)
+ return ret;
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
@@ -258,7 +283,6 @@ static int dwc3_ti_probe(struct platform_device *pdev)
* Don't ignore its dependencies with its children
*/
pm_suspend_ignore_children(dev, false);
- clk_prepare_enable(am62->usb2_refclk);
pm_runtime_get_noresume(dev);
ret = of_platform_populate(node, NULL, NULL, dev);
@@ -267,11 +291,6 @@ static int dwc3_ti_probe(struct platform_device *pdev)
goto err_pm_disable;
}
- /* Set mode valid bit to indicate role is valid */
- reg = dwc3_ti_readl(am62, USBSS_MODE_CONTROL);
- reg |= USBSS_MODE_VALID;
- dwc3_ti_writel(am62, USBSS_MODE_CONTROL, reg);
-
/* Device has capability to wakeup system from sleep */
device_set_wakeup_capable(dev, true);
ret = device_wakeup_enable(dev);
@@ -339,6 +358,9 @@ static int dwc3_ti_suspend_common(struct device *dev)
dwc3_ti_writel(am62, USBSS_WAKEUP_STAT, USBSS_WAKEUP_STAT_CLR);
}
+ /* just to track if module resets on suspend */
+ dwc3_ti_writel(am62, USBSS_DEBUG_CFG, USBSS_DEBUG_CFG_DISABLED);
+
clk_disable_unprepare(am62->usb2_refclk);
return 0;
@@ -349,7 +371,14 @@ static int dwc3_ti_resume_common(struct device *dev)
struct dwc3_am62 *am62 = dev_get_drvdata(dev);
u32 reg;
- clk_prepare_enable(am62->usb2_refclk);
+ reg = dwc3_ti_readl(am62, USBSS_DEBUG_CFG);
+ if (reg != USBSS_DEBUG_CFG_DISABLED) {
+ /* lost power/context */
+ dwc3_ti_init(am62);
+ } else {
+ dwc3_ti_writel(am62, USBSS_DEBUG_CFG, USBSS_DEBUG_CFG_OFF);
+ clk_prepare_enable(am62->usb2_refclk);
+ }
if (device_may_wakeup(dev)) {
/* Clear wakeup config enable bits */
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index b261c46124c6..fe74d11bb629 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -457,7 +457,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
struct dwc3_omap *omap;
struct device *dev = &pdev->dev;
- struct regulator *vbus_reg = NULL;
+ struct regulator *vbus_reg;
int ret;
int irq;
@@ -483,12 +483,11 @@ static int dwc3_omap_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- if (of_property_read_bool(node, "vbus-supply")) {
- vbus_reg = devm_regulator_get(dev, "vbus");
- if (IS_ERR(vbus_reg)) {
- dev_err(dev, "vbus init failed\n");
- return PTR_ERR(vbus_reg);
- }
+ vbus_reg = devm_regulator_get_optional(dev, "vbus");
+ if (IS_ERR(vbus_reg)) {
+ if (PTR_ERR(vbus_reg) != -ENODEV)
+ return dev_err_probe(dev, PTR_ERR(vbus_reg), "vbus init failed\n");
+ vbus_reg = NULL;
}
omap->dev = dev;
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index e16c3237180e..ef7c43008946 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -309,7 +309,6 @@ static void st_dwc3_remove(struct platform_device *pdev)
reset_control_assert(dwc3_data->rstc_rst);
}
-#ifdef CONFIG_PM_SLEEP
static int st_dwc3_suspend(struct device *dev)
{
struct st_dwc3 *dwc3_data = dev_get_drvdata(dev);
@@ -343,9 +342,8 @@ static int st_dwc3_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(st_dwc3_dev_pm_ops, st_dwc3_suspend, st_dwc3_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(st_dwc3_dev_pm_ops, st_dwc3_suspend, st_dwc3_resume);
static const struct of_device_id st_dwc3_match[] = {
{ .compatible = "st,stih407-dwc3" },
@@ -360,7 +358,7 @@ static struct platform_driver st_dwc3_driver = {
.driver = {
.name = "usb-st-dwc3",
.of_match_table = st_dwc3_match,
- .pm = &st_dwc3_dev_pm_ops,
+ .pm = pm_sleep_ptr(&st_dwc3_dev_pm_ops),
},
};
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 31a654c6f15b..d27af65eb08a 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -996,8 +996,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
/*
* All stream eps will reinitiate stream on NoStream
- * rejection until we can determine that the host can
- * prime after the first transfer.
+ * rejection.
*
* However, if the controller is capable of
* TXF_FLUSH_BYPASS, then IN direction endpoints will
@@ -2740,6 +2739,8 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
__dwc3_gadget_stop(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
+ usb_gadget_set_state(dwc->gadget, USB_STATE_NOTATTACHED);
+
return ret;
}
@@ -3298,6 +3299,50 @@ static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep)
return dwc3_alloc_trb_pool(dep);
}
+#define nostream_work_to_dep(w) (container_of(to_delayed_work(w), struct dwc3_ep, nostream_work))
+static void dwc3_nostream_work(struct work_struct *work)
+{
+ struct dwc3_ep *dep = nostream_work_to_dep(work);
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ if (dep->flags & DWC3_EP_STREAM_PRIMED)
+ goto out;
+
+ if ((dep->flags & DWC3_EP_IGNORE_NEXT_NOSTREAM) ||
+ (!DWC3_MST_CAPABLE(&dwc->hwparams) &&
+ !(dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE)))
+ goto out;
+ /*
+ * If the host rejects a stream due to no active stream, by the
+ * USB and xHCI spec, the endpoint will be put back to idle
+ * state. When the host is ready (buffer added/updated), it will
+ * prime the endpoint to inform the usb device controller. This
+ * triggers the device controller to issue ERDY to restart the
+ * stream. However, some hosts don't follow this and keep the
+ * endpoint in the idle state. No prime will come despite host
+ * streams are updated, and the device controller will not be
+ * triggered to generate ERDY to move the next stream data. To
+ * workaround this and maintain compatibility with various
+ * hosts, force to reinitiate the stream until the host is ready
+ * instead of waiting for the host to prime the endpoint.
+ */
+ if (DWC3_VER_IS_WITHIN(DWC32, 100A, ANY)) {
+ unsigned int cmd = DWC3_DGCMD_SET_ENDPOINT_PRIME;
+
+ dwc3_send_gadget_generic_command(dwc, cmd, dep->number);
+ } else {
+ dep->flags |= DWC3_EP_DELAY_START;
+ dwc3_stop_active_transfer(dep, true, true);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return;
+ }
+out:
+ dep->flags &= ~DWC3_EP_IGNORE_NEXT_NOSTREAM;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+}
+
static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum)
{
struct dwc3_ep *dep;
@@ -3343,6 +3388,7 @@ static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum)
INIT_LIST_HEAD(&dep->pending_list);
INIT_LIST_HEAD(&dep->started_list);
INIT_LIST_HEAD(&dep->cancelled_list);
+ INIT_DELAYED_WORK(&dep->nostream_work, dwc3_nostream_work);
dwc3_debugfs_create_endpoint_dir(dep);
@@ -3742,66 +3788,27 @@ static void dwc3_gadget_endpoint_command_complete(struct dwc3_ep *dep,
static void dwc3_gadget_endpoint_stream_event(struct dwc3_ep *dep,
const struct dwc3_event_depevt *event)
{
- struct dwc3 *dwc = dep->dwc;
-
if (event->status == DEPEVT_STREAMEVT_FOUND) {
- dep->flags |= DWC3_EP_FIRST_STREAM_PRIMED;
- goto out;
+ cancel_delayed_work(&dep->nostream_work);
+ dep->flags |= DWC3_EP_STREAM_PRIMED;
+ dep->flags &= ~DWC3_EP_IGNORE_NEXT_NOSTREAM;
+ return;
}
/* Note: NoStream rejection event param value is 0 and not 0xFFFF */
switch (event->parameters) {
case DEPEVT_STREAM_PRIME:
- /*
- * If the host can properly transition the endpoint state from
- * idle to prime after a NoStream rejection, there's no need to
- * force restarting the endpoint to reinitiate the stream. To
- * simplify the check, assume the host follows the USB spec if
- * it primed the endpoint more than once.
- */
- if (dep->flags & DWC3_EP_FORCE_RESTART_STREAM) {
- if (dep->flags & DWC3_EP_FIRST_STREAM_PRIMED)
- dep->flags &= ~DWC3_EP_FORCE_RESTART_STREAM;
- else
- dep->flags |= DWC3_EP_FIRST_STREAM_PRIMED;
- }
-
+ cancel_delayed_work(&dep->nostream_work);
+ dep->flags |= DWC3_EP_STREAM_PRIMED;
+ dep->flags &= ~DWC3_EP_IGNORE_NEXT_NOSTREAM;
break;
case DEPEVT_STREAM_NOSTREAM:
- if ((dep->flags & DWC3_EP_IGNORE_NEXT_NOSTREAM) ||
- !(dep->flags & DWC3_EP_FORCE_RESTART_STREAM) ||
- (!DWC3_MST_CAPABLE(&dwc->hwparams) &&
- !(dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE)))
- break;
-
- /*
- * If the host rejects a stream due to no active stream, by the
- * USB and xHCI spec, the endpoint will be put back to idle
- * state. When the host is ready (buffer added/updated), it will
- * prime the endpoint to inform the usb device controller. This
- * triggers the device controller to issue ERDY to restart the
- * stream. However, some hosts don't follow this and keep the
- * endpoint in the idle state. No prime will come despite host
- * streams are updated, and the device controller will not be
- * triggered to generate ERDY to move the next stream data. To
- * workaround this and maintain compatibility with various
- * hosts, force to reinitiate the stream until the host is ready
- * instead of waiting for the host to prime the endpoint.
- */
- if (DWC3_VER_IS_WITHIN(DWC32, 100A, ANY)) {
- unsigned int cmd = DWC3_DGCMD_SET_ENDPOINT_PRIME;
-
- dwc3_send_gadget_generic_command(dwc, cmd, dep->number);
- } else {
- dep->flags |= DWC3_EP_DELAY_START;
- dwc3_stop_active_transfer(dep, true, true);
- return;
- }
+ dep->flags &= ~DWC3_EP_STREAM_PRIMED;
+ if (dep->flags & DWC3_EP_FORCE_RESTART_STREAM)
+ queue_delayed_work(system_wq, &dep->nostream_work,
+ msecs_to_jiffies(100));
break;
}
-
-out:
- dep->flags &= ~DWC3_EP_IGNORE_NEXT_NOSTREAM;
}
static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
diff --git a/drivers/usb/fotg210/fotg210-core.c b/drivers/usb/fotg210/fotg210-core.c
index 49f25a70b32e..7fb4d4715e9f 100644
--- a/drivers/usb/fotg210/fotg210-core.c
+++ b/drivers/usb/fotg210/fotg210-core.c
@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
#include <linux/usb.h>
#include <linux/usb/otg.h>
@@ -119,8 +120,8 @@ void fotg210_vbus(struct fotg210 *fotg, bool enable)
ret = regmap_update_bits(fotg->map, GEMINI_GLOBAL_MISC_CTRL, mask, val);
if (ret)
dev_err(fotg->dev, "failed to %s VBUS\n",
- enable ? "enable" : "disable");
- dev_info(fotg->dev, "%s: %s VBUS\n", __func__, enable ? "enable" : "disable");
+ str_enable_disable(enable));
+ dev_info(fotg->dev, "%s: %s VBUS\n", __func__, str_enable_disable(enable));
}
static int fotg210_probe(struct platform_device *pdev)
diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
index 6cb7771e8a69..80841de845b0 100644
--- a/drivers/usb/gadget/function/f_ecm.c
+++ b/drivers/usb/gadget/function/f_ecm.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/etherdevice.h>
+#include <linux/string_choices.h>
#include "u_ether.h"
#include "u_ether_configfs.h"
@@ -387,8 +388,7 @@ static void ecm_do_notify(struct f_ecm *ecm)
event->wLength = 0;
req->length = sizeof *event;
- DBG(cdev, "notify connect %s\n",
- ecm->is_open ? "true" : "false");
+ DBG(cdev, "notify connect %s\n", str_true_false(ecm->is_open));
ecm->notify_state = ECM_NOTIFY_SPEED;
break;
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 8e761249d672..f60576a65ca6 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -17,6 +17,7 @@
#include <linux/device.h>
#include <linux/etherdevice.h>
#include <linux/crc32.h>
+#include <linux/string_choices.h>
#include <linux/usb/cdc.h>
@@ -558,7 +559,7 @@ static void ncm_do_notify(struct f_ncm *ncm)
req->length = sizeof *event;
DBG(cdev, "notify connect %s\n",
- ncm->is_open ? "true" : "false");
+ str_true_false(ncm->is_open));
ncm->notify_state = NCM_NOTIFY_NONE;
break;
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 15bb3aa12aa8..5a2e1237f85c 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -12,6 +12,7 @@
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/ctype.h>
+#include <linux/delay.h>
#include <linux/usb/ch9.h>
#include <linux/usb/composite.h>
#include <linux/usb/gadget.h>
@@ -50,7 +51,7 @@ static int bot_enqueue_cmd_cbw(struct f_uas *fu)
if (fu->flags & USBG_BOT_CMD_PEND)
return 0;
- ret = usb_ep_queue(fu->ep_out, fu->cmd.req, GFP_ATOMIC);
+ ret = usb_ep_queue(fu->ep_out, fu->cmd[0].req, GFP_ATOMIC);
if (!ret)
fu->flags |= USBG_BOT_CMD_PEND;
return ret;
@@ -62,10 +63,11 @@ static void bot_status_complete(struct usb_ep *ep, struct usb_request *req)
struct f_uas *fu = cmd->fu;
transport_generic_free_cmd(&cmd->se_cmd, 0);
- if (req->status < 0) {
- pr_err("ERR %s(%d)\n", __func__, __LINE__);
+ if (req->status == -ESHUTDOWN)
return;
- }
+
+ if (req->status < 0)
+ pr_err("ERR %s(%d)\n", __func__, __LINE__);
/* CSW completed, wait for next CBW */
bot_enqueue_cmd_cbw(fu);
@@ -136,7 +138,7 @@ static void bot_send_bad_status(struct usbg_cmd *cmd)
}
req->complete = bot_err_compl;
req->context = cmd;
- req->buf = fu->cmd.buf;
+ req->buf = fu->cmd[0].buf;
usb_ep_queue(ep, req, GFP_KERNEL);
} else {
bot_enqueue_sense_code(fu, cmd);
@@ -196,6 +198,11 @@ static void bot_read_compl(struct usb_ep *ep, struct usb_request *req)
if (req->status < 0)
pr_err("ERR %s(%d)\n", __func__, __LINE__);
+ if (req->status == -ESHUTDOWN) {
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
+ return;
+ }
+
bot_send_status(cmd, true);
}
@@ -244,11 +251,8 @@ static int usbg_prepare_w_request(struct usbg_cmd *, struct usb_request *);
static int bot_send_write_request(struct usbg_cmd *cmd)
{
struct f_uas *fu = cmd->fu;
- struct se_cmd *se_cmd = &cmd->se_cmd;
- struct usb_gadget *gadget = fuas_to_gadget(fu);
int ret;
- init_completion(&cmd->write_complete);
cmd->fu = fu;
if (!cmd->data_len) {
@@ -256,22 +260,6 @@ static int bot_send_write_request(struct usbg_cmd *cmd)
return -EINVAL;
}
- if (!gadget->sg_supported) {
- cmd->data_buf = kmalloc(se_cmd->data_length, GFP_KERNEL);
- if (!cmd->data_buf)
- return -ENOMEM;
-
- fu->bot_req_out->buf = cmd->data_buf;
- } else {
- fu->bot_req_out->buf = NULL;
- fu->bot_req_out->num_sgs = se_cmd->t_data_nents;
- fu->bot_req_out->sg = se_cmd->t_data_sg;
- }
-
- fu->bot_req_out->complete = usbg_data_write_cmpl;
- fu->bot_req_out->length = se_cmd->data_length;
- fu->bot_req_out->context = cmd;
-
ret = usbg_prepare_w_request(cmd, fu->bot_req_out);
if (ret)
goto cleanup;
@@ -279,8 +267,6 @@ static int bot_send_write_request(struct usbg_cmd *cmd)
if (ret)
pr_err("%s(%d)\n", __func__, __LINE__);
- wait_for_completion(&cmd->write_complete);
- target_execute_cmd(se_cmd);
cleanup:
return ret;
}
@@ -292,14 +278,31 @@ static void bot_cmd_complete(struct usb_ep *ep, struct usb_request *req)
struct f_uas *fu = req->context;
int ret;
+ if (req->status == -ESHUTDOWN)
+ return;
+
fu->flags &= ~USBG_BOT_CMD_PEND;
- if (req->status < 0)
+ if (req->status < 0) {
+ struct usb_gadget *gadget = fuas_to_gadget(fu);
+
+ dev_err(&gadget->dev, "BOT command req err (%d)\n", req->status);
+ bot_enqueue_cmd_cbw(fu);
return;
+ }
ret = bot_submit_command(fu, req->buf, req->actual);
- if (ret)
+ if (ret) {
pr_err("%s(%d): %d\n", __func__, __LINE__, ret);
+ if (!(fu->flags & USBG_BOT_WEDGED))
+ usb_ep_set_wedge(fu->ep_in);
+
+ fu->flags |= USBG_BOT_WEDGED;
+ bot_enqueue_cmd_cbw(fu);
+ } else if (fu->flags & USBG_BOT_WEDGED) {
+ fu->flags &= ~USBG_BOT_WEDGED;
+ usb_ep_clear_halt(fu->ep_in);
+ }
}
static int bot_prepare_reqs(struct f_uas *fu)
@@ -314,8 +317,8 @@ static int bot_prepare_reqs(struct f_uas *fu)
if (!fu->bot_req_out)
goto err_out;
- fu->cmd.req = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
- if (!fu->cmd.req)
+ fu->cmd[0].req = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
+ if (!fu->cmd[0].req)
goto err_cmd;
fu->bot_status.req = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
@@ -327,27 +330,27 @@ static int bot_prepare_reqs(struct f_uas *fu)
fu->bot_status.req->complete = bot_status_complete;
fu->bot_status.csw.Signature = cpu_to_le32(US_BULK_CS_SIGN);
- fu->cmd.buf = kmalloc(fu->ep_out->maxpacket, GFP_KERNEL);
- if (!fu->cmd.buf)
+ fu->cmd[0].buf = kmalloc(fu->ep_out->maxpacket, GFP_KERNEL);
+ if (!fu->cmd[0].buf)
goto err_buf;
- fu->cmd.req->complete = bot_cmd_complete;
- fu->cmd.req->buf = fu->cmd.buf;
- fu->cmd.req->length = fu->ep_out->maxpacket;
- fu->cmd.req->context = fu;
+ fu->cmd[0].req->complete = bot_cmd_complete;
+ fu->cmd[0].req->buf = fu->cmd[0].buf;
+ fu->cmd[0].req->length = fu->ep_out->maxpacket;
+ fu->cmd[0].req->context = fu;
ret = bot_enqueue_cmd_cbw(fu);
if (ret)
goto err_queue;
return 0;
err_queue:
- kfree(fu->cmd.buf);
- fu->cmd.buf = NULL;
+ kfree(fu->cmd[0].buf);
+ fu->cmd[0].buf = NULL;
err_buf:
usb_ep_free_request(fu->ep_in, fu->bot_status.req);
err_sts:
- usb_ep_free_request(fu->ep_out, fu->cmd.req);
- fu->cmd.req = NULL;
+ usb_ep_free_request(fu->ep_out, fu->cmd[0].req);
+ fu->cmd[0].req = NULL;
err_cmd:
usb_ep_free_request(fu->ep_out, fu->bot_req_out);
fu->bot_req_out = NULL;
@@ -372,16 +375,16 @@ static void bot_cleanup_old_alt(struct f_uas *fu)
usb_ep_free_request(fu->ep_in, fu->bot_req_in);
usb_ep_free_request(fu->ep_out, fu->bot_req_out);
- usb_ep_free_request(fu->ep_out, fu->cmd.req);
+ usb_ep_free_request(fu->ep_out, fu->cmd[0].req);
usb_ep_free_request(fu->ep_in, fu->bot_status.req);
- kfree(fu->cmd.buf);
+ kfree(fu->cmd[0].buf);
fu->bot_req_in = NULL;
fu->bot_req_out = NULL;
- fu->cmd.req = NULL;
+ fu->cmd[0].req = NULL;
fu->bot_status.req = NULL;
- fu->cmd.buf = NULL;
+ fu->cmd[0].buf = NULL;
}
static void bot_set_alt(struct f_uas *fu)
@@ -441,14 +444,10 @@ static int usbg_bot_setup(struct usb_function *f,
pr_err("No LUNs configured?\n");
return -EINVAL;
}
- /*
- * If 4 LUNs are present we return 3 i.e. LUN 0..3 can be
- * accessed. The upper limit is 0xf
- */
luns--;
- if (luns > 0xf) {
+ if (luns > US_BULK_MAX_LUN_LIMIT) {
pr_info_once("Limiting the number of luns to 16\n");
- luns = 0xf;
+ luns = US_BULK_MAX_LUN_LIMIT;
}
ret_lun = cdev->req->buf;
*ret_lun = luns;
@@ -457,6 +456,11 @@ static int usbg_bot_setup(struct usb_function *f,
case US_BULK_RESET_REQUEST:
/* XXX maybe we should remove previous requests for IN + OUT */
+ if (fu->flags & USBG_BOT_WEDGED) {
+ fu->flags &= ~USBG_BOT_WEDGED;
+ usb_ep_clear_halt(fu->ep_in);
+ }
+
bot_enqueue_cmd_cbw(fu);
return 0;
}
@@ -465,6 +469,45 @@ static int usbg_bot_setup(struct usb_function *f,
/* Start uas.c code */
+static int tcm_to_uasp_response(enum tcm_tmrsp_table code)
+{
+ switch (code) {
+ case TMR_FUNCTION_FAILED:
+ return RC_TMF_FAILED;
+ case TMR_FUNCTION_COMPLETE:
+ case TMR_TASK_DOES_NOT_EXIST:
+ return RC_TMF_COMPLETE;
+ case TMR_LUN_DOES_NOT_EXIST:
+ return RC_INCORRECT_LUN;
+ case TMR_FUNCTION_REJECTED:
+ case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
+ default:
+ return RC_TMF_NOT_SUPPORTED;
+ }
+}
+
+static unsigned char uasp_to_tcm_func(int code)
+{
+ switch (code) {
+ case TMF_ABORT_TASK:
+ return TMR_ABORT_TASK;
+ case TMF_ABORT_TASK_SET:
+ return TMR_ABORT_TASK_SET;
+ case TMF_CLEAR_TASK_SET:
+ return TMR_CLEAR_TASK_SET;
+ case TMF_LOGICAL_UNIT_RESET:
+ return TMR_LUN_RESET;
+ case TMF_CLEAR_ACA:
+ return TMR_CLEAR_ACA;
+ case TMF_I_T_NEXUS_RESET:
+ case TMF_QUERY_TASK:
+ case TMF_QUERY_TASK_SET:
+ case TMF_QUERY_ASYNC_EVENT:
+ default:
+ return TMR_UNKNOWN;
+ }
+}
+
static void uasp_cleanup_one_stream(struct f_uas *fu, struct uas_stream *stream)
{
/* We have either all three allocated or none */
@@ -482,10 +525,14 @@ static void uasp_cleanup_one_stream(struct f_uas *fu, struct uas_stream *stream)
static void uasp_free_cmdreq(struct f_uas *fu)
{
- usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
- kfree(fu->cmd.buf);
- fu->cmd.req = NULL;
- fu->cmd.buf = NULL;
+ int i;
+
+ for (i = 0; i < USBG_NUM_CMDS; i++) {
+ usb_ep_free_request(fu->ep_cmd, fu->cmd[i].req);
+ kfree(fu->cmd[i].buf);
+ fu->cmd[i].req = NULL;
+ fu->cmd[i].buf = NULL;
+ }
}
static void uasp_cleanup_old_alt(struct f_uas *fu)
@@ -500,7 +547,7 @@ static void uasp_cleanup_old_alt(struct f_uas *fu)
usb_ep_disable(fu->ep_status);
usb_ep_disable(fu->ep_cmd);
- for (i = 0; i < UASP_SS_EP_COMP_NUM_STREAMS; i++)
+ for (i = 0; i < USBG_NUM_CMDS; i++)
uasp_cleanup_one_stream(fu, &fu->stream[i]);
uasp_free_cmdreq(fu);
}
@@ -512,7 +559,7 @@ static int uasp_prepare_r_request(struct usbg_cmd *cmd)
struct se_cmd *se_cmd = &cmd->se_cmd;
struct f_uas *fu = cmd->fu;
struct usb_gadget *gadget = fuas_to_gadget(fu);
- struct uas_stream *stream = cmd->stream;
+ struct uas_stream *stream = &fu->stream[se_cmd->map_tag];
if (!gadget->sg_supported) {
cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
@@ -532,6 +579,7 @@ static int uasp_prepare_r_request(struct usbg_cmd *cmd)
}
stream->req_in->is_last = 1;
+ stream->req_in->stream_id = cmd->tag;
stream->req_in->complete = uasp_status_data_cmpl;
stream->req_in->length = se_cmd->data_length;
stream->req_in->context = cmd;
@@ -544,7 +592,7 @@ static void uasp_prepare_status(struct usbg_cmd *cmd)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct sense_iu *iu = &cmd->sense_iu;
- struct uas_stream *stream = cmd->stream;
+ struct uas_stream *stream = &cmd->fu->stream[se_cmd->map_tag];
cmd->state = UASP_QUEUE_COMMAND;
iu->iu_id = IU_ID_STATUS;
@@ -556,20 +604,76 @@ static void uasp_prepare_status(struct usbg_cmd *cmd)
iu->len = cpu_to_be16(se_cmd->scsi_sense_length);
iu->status = se_cmd->scsi_status;
stream->req_status->is_last = 1;
+ stream->req_status->stream_id = cmd->tag;
stream->req_status->context = cmd;
stream->req_status->length = se_cmd->scsi_sense_length + 16;
stream->req_status->buf = iu;
stream->req_status->complete = uasp_status_data_cmpl;
}
+static void uasp_prepare_response(struct usbg_cmd *cmd)
+{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct response_iu *rsp_iu = &cmd->response_iu;
+ struct uas_stream *stream = &cmd->fu->stream[se_cmd->map_tag];
+
+ cmd->state = UASP_QUEUE_COMMAND;
+ rsp_iu->iu_id = IU_ID_RESPONSE;
+ rsp_iu->tag = cpu_to_be16(cmd->tag);
+
+ if (cmd->tmr_rsp != RC_RESPONSE_UNKNOWN)
+ rsp_iu->response_code = cmd->tmr_rsp;
+ else
+ rsp_iu->response_code =
+ tcm_to_uasp_response(se_cmd->se_tmr_req->response);
+
+ /*
+ * The UASP driver must support all the task management functions listed
+ * in Table 20 of UAS-r04. To remain compliant while indicate that the
+ * TMR did not go through, report RC_TMF_FAILED instead of
+ * RC_TMF_NOT_SUPPORTED and print a warning to the user.
+ */
+ switch (cmd->tmr_func) {
+ case TMF_ABORT_TASK:
+ case TMF_ABORT_TASK_SET:
+ case TMF_CLEAR_TASK_SET:
+ case TMF_LOGICAL_UNIT_RESET:
+ case TMF_CLEAR_ACA:
+ case TMF_I_T_NEXUS_RESET:
+ case TMF_QUERY_TASK:
+ case TMF_QUERY_TASK_SET:
+ case TMF_QUERY_ASYNC_EVENT:
+ if (rsp_iu->response_code == RC_TMF_NOT_SUPPORTED) {
+ struct usb_gadget *gadget = fuas_to_gadget(cmd->fu);
+
+ dev_warn(&gadget->dev, "TMF function %d not supported\n",
+ cmd->tmr_func);
+ rsp_iu->response_code = RC_TMF_FAILED;
+ }
+ break;
+ default:
+ break;
+ }
+
+ stream->req_status->is_last = 1;
+ stream->req_status->stream_id = cmd->tag;
+ stream->req_status->context = cmd;
+ stream->req_status->length = sizeof(struct response_iu);
+ stream->req_status->buf = rsp_iu;
+ stream->req_status->complete = uasp_status_data_cmpl;
+}
+
+static void usbg_release_cmd(struct se_cmd *se_cmd);
+static int uasp_send_tm_response(struct usbg_cmd *cmd);
+
static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
{
struct usbg_cmd *cmd = req->context;
- struct uas_stream *stream = cmd->stream;
struct f_uas *fu = cmd->fu;
+ struct uas_stream *stream = &fu->stream[cmd->se_cmd.map_tag];
int ret;
- if (req->status < 0)
+ if (req->status == -ESHUTDOWN)
goto cleanup;
switch (cmd->state) {
@@ -600,8 +704,37 @@ static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
break;
case UASP_QUEUE_COMMAND:
- transport_generic_free_cmd(&cmd->se_cmd, 0);
- usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
+ /*
+ * Overlapped command detected and cancelled.
+ * So send overlapped attempted status.
+ */
+ if (cmd->tmr_rsp == RC_OVERLAPPED_TAG &&
+ req->status == -ECONNRESET) {
+ uasp_send_tm_response(cmd);
+ return;
+ }
+
+ hash_del(&stream->node);
+
+ /*
+ * If no command submitted to target core here, just free the
+ * bitmap index. This is for the cases where f_tcm handles
+ * status response instead of the target core.
+ */
+ if (cmd->tmr_rsp != RC_OVERLAPPED_TAG &&
+ cmd->tmr_rsp != RC_RESPONSE_UNKNOWN) {
+ struct se_session *se_sess;
+
+ se_sess = fu->tpg->tpg_nexus->tvn_se_sess;
+ sbitmap_queue_clear(&se_sess->sess_tag_pool,
+ cmd->se_cmd.map_tag,
+ cmd->se_cmd.map_cpu);
+ } else {
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
+ }
+
+ usb_ep_queue(fu->ep_cmd, cmd->req, GFP_ATOMIC);
+ complete(&stream->cmd_completion);
break;
default:
@@ -610,27 +743,38 @@ static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
return;
cleanup:
+ hash_del(&stream->node);
transport_generic_free_cmd(&cmd->se_cmd, 0);
}
static int uasp_send_status_response(struct usbg_cmd *cmd)
{
struct f_uas *fu = cmd->fu;
- struct uas_stream *stream = cmd->stream;
+ struct uas_stream *stream = &fu->stream[cmd->se_cmd.map_tag];
struct sense_iu *iu = &cmd->sense_iu;
iu->tag = cpu_to_be16(cmd->tag);
- stream->req_status->complete = uasp_status_data_cmpl;
- stream->req_status->context = cmd;
cmd->fu = fu;
uasp_prepare_status(cmd);
return usb_ep_queue(fu->ep_status, stream->req_status, GFP_ATOMIC);
}
+static int uasp_send_tm_response(struct usbg_cmd *cmd)
+{
+ struct f_uas *fu = cmd->fu;
+ struct uas_stream *stream = &fu->stream[cmd->se_cmd.map_tag];
+ struct response_iu *iu = &cmd->response_iu;
+
+ iu->tag = cpu_to_be16(cmd->tag);
+ cmd->fu = fu;
+ uasp_prepare_response(cmd);
+ return usb_ep_queue(fu->ep_status, stream->req_status, GFP_ATOMIC);
+}
+
static int uasp_send_read_response(struct usbg_cmd *cmd)
{
struct f_uas *fu = cmd->fu;
- struct uas_stream *stream = cmd->stream;
+ struct uas_stream *stream = &fu->stream[cmd->se_cmd.map_tag];
struct sense_iu *iu = &cmd->sense_iu;
int ret;
@@ -674,11 +818,10 @@ static int uasp_send_write_request(struct usbg_cmd *cmd)
{
struct f_uas *fu = cmd->fu;
struct se_cmd *se_cmd = &cmd->se_cmd;
- struct uas_stream *stream = cmd->stream;
+ struct uas_stream *stream = &fu->stream[se_cmd->map_tag];
struct sense_iu *iu = &cmd->sense_iu;
int ret;
- init_completion(&cmd->write_complete);
cmd->fu = fu;
iu->tag = cpu_to_be16(cmd->tag);
@@ -710,36 +853,31 @@ static int uasp_send_write_request(struct usbg_cmd *cmd)
pr_err("%s(%d)\n", __func__, __LINE__);
}
- wait_for_completion(&cmd->write_complete);
- target_execute_cmd(se_cmd);
cleanup:
return ret;
}
-static int usbg_submit_command(struct f_uas *, void *, unsigned int);
+static int usbg_submit_command(struct f_uas *, struct usb_request *);
static void uasp_cmd_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_uas *fu = req->context;
- int ret;
- if (req->status < 0)
+ if (req->status == -ESHUTDOWN)
return;
- ret = usbg_submit_command(fu, req->buf, req->actual);
- /*
- * Once we tune for performance enqueue the command req here again so
- * we can receive a second command while we processing this one. Pay
- * attention to properly sync STAUS endpoint with DATA IN + OUT so you
- * don't break HS.
- */
- if (!ret)
+ if (req->status < 0) {
+ usb_ep_queue(fu->ep_cmd, req, GFP_ATOMIC);
return;
- usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
+ }
+
+ usbg_submit_command(fu, req);
}
static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream)
{
+ init_completion(&stream->cmd_completion);
+
stream->req_in = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
if (!stream->req_in)
goto out;
@@ -764,66 +902,48 @@ out:
return -ENOMEM;
}
-static int uasp_alloc_cmd(struct f_uas *fu)
+static int uasp_alloc_cmd(struct f_uas *fu, int i)
{
- fu->cmd.req = usb_ep_alloc_request(fu->ep_cmd, GFP_KERNEL);
- if (!fu->cmd.req)
+ fu->cmd[i].req = usb_ep_alloc_request(fu->ep_cmd, GFP_KERNEL);
+ if (!fu->cmd[i].req)
goto err;
- fu->cmd.buf = kmalloc(fu->ep_cmd->maxpacket, GFP_KERNEL);
- if (!fu->cmd.buf)
+ fu->cmd[i].buf = kmalloc(fu->ep_cmd->maxpacket, GFP_KERNEL);
+ if (!fu->cmd[i].buf)
goto err_buf;
- fu->cmd.req->complete = uasp_cmd_complete;
- fu->cmd.req->buf = fu->cmd.buf;
- fu->cmd.req->length = fu->ep_cmd->maxpacket;
- fu->cmd.req->context = fu;
+ fu->cmd[i].req->complete = uasp_cmd_complete;
+ fu->cmd[i].req->buf = fu->cmd[i].buf;
+ fu->cmd[i].req->length = fu->ep_cmd->maxpacket;
+ fu->cmd[i].req->context = fu;
return 0;
err_buf:
- usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
+ usb_ep_free_request(fu->ep_cmd, fu->cmd[i].req);
err:
return -ENOMEM;
}
-static void uasp_setup_stream_res(struct f_uas *fu, int max_streams)
-{
- int i;
-
- for (i = 0; i < max_streams; i++) {
- struct uas_stream *s = &fu->stream[i];
-
- s->req_in->stream_id = i + 1;
- s->req_out->stream_id = i + 1;
- s->req_status->stream_id = i + 1;
- }
-}
-
static int uasp_prepare_reqs(struct f_uas *fu)
{
int ret;
int i;
- int max_streams;
- if (fu->flags & USBG_USE_STREAMS)
- max_streams = UASP_SS_EP_COMP_NUM_STREAMS;
- else
- max_streams = 1;
-
- for (i = 0; i < max_streams; i++) {
+ for (i = 0; i < USBG_NUM_CMDS; i++) {
ret = uasp_alloc_stream_res(fu, &fu->stream[i]);
if (ret)
goto err_cleanup;
}
- ret = uasp_alloc_cmd(fu);
- if (ret)
- goto err_free_stream;
- uasp_setup_stream_res(fu, max_streams);
+ for (i = 0; i < USBG_NUM_CMDS; i++) {
+ ret = uasp_alloc_cmd(fu, i);
+ if (ret)
+ goto err_free_stream;
- ret = usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
- if (ret)
- goto err_free_stream;
+ ret = usb_ep_queue(fu->ep_cmd, fu->cmd[i].req, GFP_ATOMIC);
+ if (ret)
+ goto err_free_stream;
+ }
return 0;
@@ -914,6 +1034,8 @@ static int get_cmd_dir(const unsigned char *cdb)
case READ_TOC:
case READ_FORMAT_CAPACITIES:
case REQUEST_SENSE:
+ case ATA_12:
+ case ATA_16:
ret = DMA_FROM_DEVICE;
break;
@@ -957,7 +1079,18 @@ static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
struct usbg_cmd *cmd = req->context;
struct se_cmd *se_cmd = &cmd->se_cmd;
- if (req->status < 0) {
+ cmd->state = UASP_QUEUE_COMMAND;
+
+ if (req->status == -ESHUTDOWN) {
+ struct uas_stream *stream = &cmd->fu->stream[se_cmd->map_tag];
+
+ hash_del(&stream->node);
+ target_put_sess_cmd(se_cmd);
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
+ return;
+ }
+
+ if (req->status) {
pr_err("%s() state %d transfer failed\n", __func__, cmd->state);
goto cleanup;
}
@@ -969,11 +1102,22 @@ static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
se_cmd->data_length);
}
- complete(&cmd->write_complete);
+ cmd->flags |= USBG_CMD_PENDING_DATA_WRITE;
+ queue_work(cmd->fu->tpg->workqueue, &cmd->work);
return;
cleanup:
- transport_generic_free_cmd(&cmd->se_cmd, 0);
+ target_put_sess_cmd(se_cmd);
+
+ /* Command was aborted due to overlapped tag */
+ if (cmd->state == UASP_QUEUE_COMMAND &&
+ cmd->tmr_rsp == RC_OVERLAPPED_TAG) {
+ uasp_send_tm_response(cmd);
+ return;
+ }
+
+ transport_send_check_condition_and_sense(se_cmd,
+ TCM_CHECK_CONDITION_ABORT_CMD, 0);
}
static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
@@ -995,9 +1139,12 @@ static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
}
req->is_last = 1;
+ req->stream_id = cmd->tag;
req->complete = usbg_data_write_cmpl;
req->length = se_cmd->data_length;
req->context = cmd;
+
+ cmd->state = UASP_SEND_STATUS;
return 0;
}
@@ -1037,36 +1184,153 @@ static int usbg_send_read_response(struct se_cmd *se_cmd)
return uasp_send_read_response(cmd);
}
-static void usbg_cmd_work(struct work_struct *work)
+static void usbg_aborted_task(struct se_cmd *se_cmd);
+
+static void usbg_submit_tmr(struct usbg_cmd *cmd)
+{
+ struct se_session *se_sess;
+ struct se_cmd *se_cmd;
+ int flags = TARGET_SCF_ACK_KREF;
+
+ se_cmd = &cmd->se_cmd;
+ se_sess = cmd->fu->tpg->tpg_nexus->tvn_se_sess;
+
+ target_submit_tmr(se_cmd, se_sess,
+ cmd->response_iu.add_response_info,
+ cmd->unpacked_lun, NULL, uasp_to_tcm_func(cmd->tmr_func),
+ GFP_ATOMIC, cmd->tag, flags);
+}
+
+static void usbg_submit_cmd(struct usbg_cmd *cmd)
{
- struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
struct se_cmd *se_cmd;
struct tcm_usbg_nexus *tv_nexus;
struct usbg_tpg *tpg;
int dir, flags = (TARGET_SCF_UNKNOWN_SIZE | TARGET_SCF_ACK_KREF);
+ /*
+ * Note: each command will spawn its own process, and each stage of the
+ * command is processed sequentially. Should this no longer be the case,
+ * locking is needed.
+ */
+ if (cmd->flags & USBG_CMD_PENDING_DATA_WRITE) {
+ target_execute_cmd(&cmd->se_cmd);
+ cmd->flags &= ~USBG_CMD_PENDING_DATA_WRITE;
+ return;
+ }
+
se_cmd = &cmd->se_cmd;
tpg = cmd->fu->tpg;
tv_nexus = tpg->tpg_nexus;
dir = get_cmd_dir(cmd->cmd_buf);
- if (dir < 0) {
- __target_init_cmd(se_cmd,
- tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
- tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
- cmd->prio_attr, cmd->sense_iu.sense,
- cmd->unpacked_lun, NULL);
+ if (dir < 0)
goto out;
- }
target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, cmd->cmd_buf,
cmd->sense_iu.sense, cmd->unpacked_lun, 0,
cmd->prio_attr, dir, flags);
+
return;
out:
+ __target_init_cmd(se_cmd,
+ tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
+ tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
+ cmd->prio_attr, cmd->sense_iu.sense,
+ cmd->unpacked_lun, NULL);
transport_send_check_condition_and_sense(se_cmd,
- TCM_UNSUPPORTED_SCSI_OPCODE, 1);
- transport_generic_free_cmd(&cmd->se_cmd, 0);
+ TCM_UNSUPPORTED_SCSI_OPCODE, 0);
+}
+
+static void usbg_cmd_work(struct work_struct *work)
+{
+ struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
+
+ /*
+ * Failure is detected by f_tcm here. Skip submitting the command to the
+ * target core if we already know the failing response and send the usb
+ * response to the host directly.
+ */
+ if (cmd->tmr_rsp != RC_RESPONSE_UNKNOWN)
+ goto skip;
+
+ if (cmd->tmr_func)
+ usbg_submit_tmr(cmd);
+ else
+ usbg_submit_cmd(cmd);
+
+ return;
+
+skip:
+ if (cmd->tmr_rsp == RC_OVERLAPPED_TAG) {
+ struct f_uas *fu = cmd->fu;
+ struct se_session *se_sess;
+ struct uas_stream *stream = NULL;
+ struct hlist_node *tmp;
+ struct usbg_cmd *active_cmd = NULL;
+
+ se_sess = cmd->fu->tpg->tpg_nexus->tvn_se_sess;
+
+ hash_for_each_possible_safe(fu->stream_hash, stream, tmp, node, cmd->tag) {
+ int i = stream - &fu->stream[0];
+
+ active_cmd = &((struct usbg_cmd *)se_sess->sess_cmd_map)[i];
+ if (active_cmd->tag == cmd->tag)
+ break;
+ }
+
+ /* Sanity check */
+ if (!stream || (active_cmd && active_cmd->tag != cmd->tag)) {
+ usbg_submit_command(cmd->fu, cmd->req);
+ return;
+ }
+
+ reinit_completion(&stream->cmd_completion);
+
+ /*
+ * A UASP command consists of the command, data, and status
+ * stages, each operating sequentially from different endpoints.
+ *
+ * Each USB endpoint operates independently, and depending on
+ * hardware implementation, a completion callback for a transfer
+ * from one endpoint may not reflect the order of completion on
+ * the wire. This is particularly true for devices with
+ * endpoints that have independent interrupts and event buffers.
+ *
+ * The driver must still detect misbehaving hosts and respond
+ * with an overlap status. To reduce false overlap failures,
+ * allow the active and matching stream ID a brief 1ms to
+ * complete before responding with an overlap command failure.
+ * Overlap failure should be rare.
+ */
+ wait_for_completion_timeout(&stream->cmd_completion, msecs_to_jiffies(1));
+
+ /* If the previous stream is completed, retry the command. */
+ if (!hash_hashed(&stream->node)) {
+ usbg_submit_command(cmd->fu, cmd->req);
+ return;
+ }
+
+ /*
+ * The command isn't submitted to the target core, so we're safe
+ * to remove the bitmap index from the session tag pool.
+ */
+ sbitmap_queue_clear(&se_sess->sess_tag_pool,
+ cmd->se_cmd.map_tag,
+ cmd->se_cmd.map_cpu);
+
+ /*
+ * Overlap command tag detected. Cancel any pending transfer of
+ * the command submitted to target core.
+ */
+ active_cmd->tmr_rsp = RC_OVERLAPPED_TAG;
+ usbg_aborted_task(&active_cmd->se_cmd);
+
+ /* Send the response after the transfer is aborted. */
+ return;
+ }
+
+ uasp_send_tm_response(cmd);
}
static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
@@ -1084,6 +1348,7 @@ static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
memset(cmd, 0, sizeof(*cmd));
cmd->se_cmd.map_tag = tag;
cmd->se_cmd.map_cpu = cpu;
+ cmd->se_cmd.cpuid = cpu;
cmd->se_cmd.tag = cmd->tag = scsi_tag;
cmd->fu = fu;
@@ -1092,50 +1357,82 @@ static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
static void usbg_release_cmd(struct se_cmd *);
-static int usbg_submit_command(struct f_uas *fu,
- void *cmdbuf, unsigned int len)
+static int usbg_submit_command(struct f_uas *fu, struct usb_request *req)
{
- struct command_iu *cmd_iu = cmdbuf;
+ struct iu *iu = req->buf;
struct usbg_cmd *cmd;
struct usbg_tpg *tpg = fu->tpg;
struct tcm_usbg_nexus *tv_nexus;
+ struct uas_stream *stream;
+ struct hlist_node *tmp;
+ struct command_iu *cmd_iu;
u32 cmd_len;
u16 scsi_tag;
- if (cmd_iu->iu_id != IU_ID_COMMAND) {
- pr_err("Unsupported type %d\n", cmd_iu->iu_id);
- return -EINVAL;
- }
-
tv_nexus = tpg->tpg_nexus;
if (!tv_nexus) {
pr_err("Missing nexus, ignoring command\n");
return -EINVAL;
}
- cmd_len = (cmd_iu->len & ~0x3) + 16;
- if (cmd_len > USBG_MAX_CMD)
- return -EINVAL;
-
- scsi_tag = be16_to_cpup(&cmd_iu->tag);
+ scsi_tag = be16_to_cpup(&iu->tag);
cmd = usbg_get_cmd(fu, tv_nexus, scsi_tag);
if (IS_ERR(cmd)) {
pr_err("usbg_get_cmd failed\n");
return -ENOMEM;
}
- memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len);
- if (fu->flags & USBG_USE_STREAMS) {
- if (cmd->tag > UASP_SS_EP_COMP_NUM_STREAMS)
- goto err;
- if (!cmd->tag)
- cmd->stream = &fu->stream[0];
- else
- cmd->stream = &fu->stream[cmd->tag - 1];
- } else {
- cmd->stream = &fu->stream[0];
+ cmd->req = req;
+ cmd->fu = fu;
+ cmd->tag = scsi_tag;
+ cmd->se_cmd.tag = scsi_tag;
+ cmd->tmr_func = 0;
+ cmd->tmr_rsp = RC_RESPONSE_UNKNOWN;
+ cmd->flags = 0;
+
+ cmd_iu = (struct command_iu *)iu;
+
+ /* Command and Task Management IUs share the same LUN offset */
+ cmd->unpacked_lun = scsilun_to_int(&cmd_iu->lun);
+
+ if (iu->iu_id != IU_ID_COMMAND && iu->iu_id != IU_ID_TASK_MGMT) {
+ cmd->tmr_rsp = RC_INVALID_INFO_UNIT;
+ goto skip;
+ }
+
+ hash_for_each_possible_safe(fu->stream_hash, stream, tmp, node, scsi_tag) {
+ struct usbg_cmd *active_cmd;
+ struct se_session *se_sess;
+ int i = stream - &fu->stream[0];
+
+ se_sess = cmd->fu->tpg->tpg_nexus->tvn_se_sess;
+ active_cmd = &((struct usbg_cmd *)se_sess->sess_cmd_map)[i];
+
+ if (active_cmd->tag == scsi_tag) {
+ cmd->tmr_rsp = RC_OVERLAPPED_TAG;
+ goto skip;
+ }
}
+ stream = &fu->stream[cmd->se_cmd.map_tag];
+ hash_add(fu->stream_hash, &stream->node, scsi_tag);
+
+ if (iu->iu_id == IU_ID_TASK_MGMT) {
+ struct task_mgmt_iu *tm_iu;
+
+ tm_iu = (struct task_mgmt_iu *)iu;
+ cmd->tmr_func = tm_iu->function;
+ goto skip;
+ }
+
+ cmd_len = (cmd_iu->len & ~0x3) + 16;
+ if (cmd_len > USBG_MAX_CMD) {
+ target_free_tag(tv_nexus->tvn_se_sess, &cmd->se_cmd);
+ hash_del(&stream->node);
+ return -EINVAL;
+ }
+ memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len);
+
switch (cmd_iu->prio_attr & 0x7) {
case UAS_HEAD_TAG:
cmd->prio_attr = TCM_HEAD_TAG;
@@ -1155,15 +1452,11 @@ static int usbg_submit_command(struct f_uas *fu,
break;
}
- cmd->unpacked_lun = scsilun_to_int(&cmd_iu->lun);
-
+skip:
INIT_WORK(&cmd->work, usbg_cmd_work);
queue_work(tpg->workqueue, &cmd->work);
return 0;
-err:
- usbg_release_cmd(&cmd->se_cmd);
- return -EINVAL;
}
static void bot_cmd_work(struct work_struct *work)
@@ -1172,30 +1465,40 @@ static void bot_cmd_work(struct work_struct *work)
struct se_cmd *se_cmd;
struct tcm_usbg_nexus *tv_nexus;
struct usbg_tpg *tpg;
+ int flags = TARGET_SCF_ACK_KREF;
int dir;
+ /*
+ * Note: each command will spawn its own process, and each stage of the
+ * command is processed sequentially. Should this no longer be the case,
+ * locking is needed.
+ */
+ if (cmd->flags & USBG_CMD_PENDING_DATA_WRITE) {
+ target_execute_cmd(&cmd->se_cmd);
+ cmd->flags &= ~USBG_CMD_PENDING_DATA_WRITE;
+ return;
+ }
+
se_cmd = &cmd->se_cmd;
tpg = cmd->fu->tpg;
tv_nexus = tpg->tpg_nexus;
dir = get_cmd_dir(cmd->cmd_buf);
- if (dir < 0) {
- __target_init_cmd(se_cmd,
- tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
- tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
- cmd->prio_attr, cmd->sense_iu.sense,
- cmd->unpacked_lun, NULL);
+ if (dir < 0)
goto out;
- }
target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
- cmd->data_len, cmd->prio_attr, dir, 0);
+ cmd->data_len, cmd->prio_attr, dir, flags);
return;
out:
+ __target_init_cmd(se_cmd,
+ tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
+ tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
+ cmd->prio_attr, cmd->sense_iu.sense,
+ cmd->unpacked_lun, NULL);
transport_send_check_condition_and_sense(se_cmd,
- TCM_UNSUPPORTED_SCSI_OPCODE, 1);
- transport_generic_free_cmd(&cmd->se_cmd, 0);
+ TCM_UNSUPPORTED_SCSI_OPCODE, 0);
}
static int bot_submit_command(struct f_uas *fu,
@@ -1239,6 +1542,7 @@ static int bot_submit_command(struct f_uas *fu,
cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
cmd->data_len = le32_to_cpu(cbw->DataTransferLength);
cmd->se_cmd.tag = le32_to_cpu(cmd->bot_tag);
+ cmd->flags = 0;
INIT_WORK(&cmd->work, bot_cmd_work);
queue_work(tpg->workqueue, &cmd->work);
@@ -1275,16 +1579,38 @@ static void usbg_release_cmd(struct se_cmd *se_cmd)
se_cmd);
struct se_session *se_sess = se_cmd->se_sess;
+ cmd->tag = 0;
kfree(cmd->data_buf);
target_free_tag(se_sess, se_cmd);
}
static void usbg_queue_tm_rsp(struct se_cmd *se_cmd)
{
+ struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd, se_cmd);
+
+ uasp_send_tm_response(cmd);
}
static void usbg_aborted_task(struct se_cmd *se_cmd)
{
+ struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd, se_cmd);
+ struct f_uas *fu = cmd->fu;
+ struct usb_gadget *gadget = fuas_to_gadget(fu);
+ struct uas_stream *stream = &fu->stream[se_cmd->map_tag];
+ int ret = 0;
+
+ if (stream->req_out->status == -EINPROGRESS)
+ ret = usb_ep_dequeue(fu->ep_out, stream->req_out);
+ else if (stream->req_in->status == -EINPROGRESS)
+ ret = usb_ep_dequeue(fu->ep_in, stream->req_in);
+ else if (stream->req_status->status == -EINPROGRESS)
+ ret = usb_ep_dequeue(fu->ep_status, stream->req_status);
+
+ if (ret)
+ dev_err(&gadget->dev, "Failed to abort cmd tag %d, (%d)\n",
+ cmd->tag, ret);
+
+ cmd->state = UASP_QUEUE_COMMAND;
}
static const char *usbg_check_wwn(const char *name)
@@ -1355,7 +1681,8 @@ static struct se_portal_group *usbg_make_tpg(struct se_wwn *wwn,
goto unref_dep;
mutex_init(&tpg->tpg_mutex);
atomic_set(&tpg->tpg_port_count, 0);
- tpg->workqueue = alloc_workqueue("tcm_usb_gadget", 0, 1);
+ tpg->workqueue = alloc_workqueue("tcm_usb_gadget",
+ WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
if (!tpg->workqueue)
goto free_tpg;
@@ -1746,7 +2073,7 @@ static struct usb_endpoint_descriptor uasp_ss_bi_desc = {
static struct usb_ss_ep_comp_descriptor uasp_bi_ep_comp_desc = {
.bLength = sizeof(uasp_bi_ep_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
- .bMaxBurst = 0,
+ .bMaxBurst = 15,
.bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
.wBytesPerInterval = 0,
};
@@ -1754,7 +2081,7 @@ static struct usb_ss_ep_comp_descriptor uasp_bi_ep_comp_desc = {
static struct usb_ss_ep_comp_descriptor bot_bi_ep_comp_desc = {
.bLength = sizeof(bot_bi_ep_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
- .bMaxBurst = 0,
+ .bMaxBurst = 15,
};
static struct usb_endpoint_descriptor uasp_bo_desc = {
@@ -1789,12 +2116,14 @@ static struct usb_endpoint_descriptor uasp_ss_bo_desc = {
static struct usb_ss_ep_comp_descriptor uasp_bo_ep_comp_desc = {
.bLength = sizeof(uasp_bo_ep_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 15,
.bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
};
static struct usb_ss_ep_comp_descriptor bot_bo_ep_comp_desc = {
.bLength = sizeof(bot_bo_ep_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 15,
};
static struct usb_endpoint_descriptor uasp_status_desc = {
@@ -1971,43 +2300,39 @@ static int tcm_bind(struct usb_configuration *c, struct usb_function *f)
bot_intf_desc.bInterfaceNumber = iface;
uasp_intf_desc.bInterfaceNumber = iface;
fu->iface = iface;
- ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bi_desc,
- &uasp_bi_ep_comp_desc);
+ ep = usb_ep_autoconfig(gadget, &uasp_fs_bi_desc);
if (!ep)
goto ep_fail;
fu->ep_in = ep;
- ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bo_desc,
- &uasp_bo_ep_comp_desc);
+ ep = usb_ep_autoconfig(gadget, &uasp_fs_bo_desc);
if (!ep)
goto ep_fail;
fu->ep_out = ep;
- ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_status_desc,
- &uasp_status_in_ep_comp_desc);
+ ep = usb_ep_autoconfig(gadget, &uasp_fs_status_desc);
if (!ep)
goto ep_fail;
fu->ep_status = ep;
- ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_cmd_desc,
- &uasp_cmd_comp_desc);
+ ep = usb_ep_autoconfig(gadget, &uasp_fs_cmd_desc);
if (!ep)
goto ep_fail;
fu->ep_cmd = ep;
/* Assume endpoint addresses are the same for both speeds */
- uasp_bi_desc.bEndpointAddress = uasp_ss_bi_desc.bEndpointAddress;
- uasp_bo_desc.bEndpointAddress = uasp_ss_bo_desc.bEndpointAddress;
+ uasp_bi_desc.bEndpointAddress = uasp_fs_bi_desc.bEndpointAddress;
+ uasp_bo_desc.bEndpointAddress = uasp_fs_bo_desc.bEndpointAddress;
uasp_status_desc.bEndpointAddress =
- uasp_ss_status_desc.bEndpointAddress;
- uasp_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
+ uasp_fs_status_desc.bEndpointAddress;
+ uasp_cmd_desc.bEndpointAddress = uasp_fs_cmd_desc.bEndpointAddress;
- uasp_fs_bi_desc.bEndpointAddress = uasp_ss_bi_desc.bEndpointAddress;
- uasp_fs_bo_desc.bEndpointAddress = uasp_ss_bo_desc.bEndpointAddress;
- uasp_fs_status_desc.bEndpointAddress =
- uasp_ss_status_desc.bEndpointAddress;
- uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
+ uasp_ss_bi_desc.bEndpointAddress = uasp_fs_bi_desc.bEndpointAddress;
+ uasp_ss_bo_desc.bEndpointAddress = uasp_fs_bo_desc.bEndpointAddress;
+ uasp_ss_status_desc.bEndpointAddress =
+ uasp_fs_status_desc.bEndpointAddress;
+ uasp_ss_cmd_desc.bEndpointAddress = uasp_fs_cmd_desc.bEndpointAddress;
ret = usb_assign_descriptors(f, uasp_fs_function_desc,
uasp_hs_function_desc, uasp_ss_function_desc,
@@ -2051,9 +2376,14 @@ static void tcm_delayed_set_alt(struct work_struct *wq)
static int tcm_get_alt(struct usb_function *f, unsigned intf)
{
- if (intf == bot_intf_desc.bInterfaceNumber)
+ struct f_uas *fu = to_f_uas(f);
+
+ if (fu->iface != intf)
+ return -EOPNOTSUPP;
+
+ if (fu->flags & USBG_IS_BOT)
return USB_G_ALT_INT_BBB;
- if (intf == uasp_intf_desc.bInterfaceNumber)
+ else if (fu->flags & USBG_IS_UAS)
return USB_G_ALT_INT_UAS;
return -EOPNOTSUPP;
@@ -2063,6 +2393,9 @@ static int tcm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_uas *fu = to_f_uas(f);
+ if (fu->iface != intf)
+ return -EOPNOTSUPP;
+
if ((alt == USB_G_ALT_INT_BBB) || (alt == USB_G_ALT_INT_UAS)) {
struct guas_setup_wq *work;
@@ -2271,6 +2604,8 @@ static struct usb_function *tcm_alloc(struct usb_function_instance *fi)
fu->function.disable = tcm_disable;
fu->function.free_func = tcm_free;
fu->tpg = tpg_instances[i].tpg;
+
+ hash_init(fu->stream_hash);
mutex_unlock(&tpg_instances_lock);
return &fu->function;
diff --git a/drivers/usb/gadget/function/storage_common.h b/drivers/usb/gadget/function/storage_common.h
index ced5d2b09234..11ac785d5eee 100644
--- a/drivers/usb/gadget/function/storage_common.h
+++ b/drivers/usb/gadget/function/storage_common.h
@@ -131,7 +131,7 @@ static inline bool fsg_lun_is_open(struct fsg_lun *curlun)
#define FSG_BUFLEN ((u32)16384)
/* Maximal number of LUNs supported in mass storage function */
-#define FSG_MAX_LUNS 16
+#define FSG_MAX_LUNS (US_BULK_MAX_LUN_LIMIT + 1)
enum fsg_buffer_state {
BUF_STATE_SENDING = -2,
diff --git a/drivers/usb/gadget/function/tcm.h b/drivers/usb/gadget/function/tcm.h
index 3cd565794ad7..009974d81d66 100644
--- a/drivers/usb/gadget/function/tcm.h
+++ b/drivers/usb/gadget/function/tcm.h
@@ -4,6 +4,7 @@
#include <linux/kref.h>
/* #include <linux/usb/uas.h> */
+#include <linux/hashtable.h>
#include <linux/usb/composite.h>
#include <linux/usb/uas.h>
#include <linux/usb/storage.h>
@@ -13,9 +14,11 @@
#define USBG_NAMELEN 32
#define fuas_to_gadget(f) (f->function.config->cdev->gadget)
-#define UASP_SS_EP_COMP_LOG_STREAMS 4
+#define UASP_SS_EP_COMP_LOG_STREAMS 5
#define UASP_SS_EP_COMP_NUM_STREAMS (1 << UASP_SS_EP_COMP_LOG_STREAMS)
+#define USBG_NUM_CMDS (UASP_SS_EP_COMP_NUM_STREAMS + 1)
+
enum {
USB_G_STR_INT_UAS = 0,
USB_G_STR_INT_BBB,
@@ -24,7 +27,7 @@ enum {
#define USB_G_ALT_INT_BBB 0
#define USB_G_ALT_INT_UAS 1
-#define USB_G_DEFAULT_SESSION_TAGS 128
+#define USB_G_DEFAULT_SESSION_TAGS USBG_NUM_CMDS
struct tcm_usbg_nexus {
struct se_session *tvn_se_sess;
@@ -72,15 +75,23 @@ struct usbg_cmd {
struct se_cmd se_cmd;
void *data_buf; /* used if no sg support available */
struct f_uas *fu;
- struct completion write_complete;
struct kref ref;
+ struct usb_request *req;
+
+ u32 flags;
+#define USBG_CMD_PENDING_DATA_WRITE BIT(0)
+
/* UAS only */
u16 tag;
u16 prio_attr;
struct sense_iu sense_iu;
+ struct response_iu response_iu;
enum uas_state state;
- struct uas_stream *stream;
+
+ int tmr_func;
+ int tmr_rsp;
+#define RC_RESPONSE_UNKNOWN 0xff
/* BOT only */
__le32 bot_tag;
@@ -93,6 +104,9 @@ struct uas_stream {
struct usb_request *req_in;
struct usb_request *req_out;
struct usb_request *req_status;
+
+ struct completion cmd_completion;
+ struct hlist_node node;
};
struct usbg_cdb {
@@ -116,15 +130,17 @@ struct f_uas {
#define USBG_USE_STREAMS (1 << 2)
#define USBG_IS_BOT (1 << 3)
#define USBG_BOT_CMD_PEND (1 << 4)
+#define USBG_BOT_WEDGED (1 << 5)
- struct usbg_cdb cmd;
+ struct usbg_cdb cmd[USBG_NUM_CMDS];
struct usb_ep *ep_in;
struct usb_ep *ep_out;
/* UAS */
struct usb_ep *ep_status;
struct usb_ep *ep_cmd;
- struct uas_stream stream[UASP_SS_EP_COMP_NUM_STREAMS];
+ struct uas_stream stream[USBG_NUM_CMDS];
+ DECLARE_HASHTABLE(stream_hash, UASP_SS_EP_COMP_LOG_STREAMS);
/* BOT */
struct bot_status bot_status;
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index bc143a86c2dd..36fff45e8c9b 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -21,6 +21,7 @@
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/console.h>
@@ -1420,10 +1421,6 @@ void gserial_disconnect(struct gserial *gser)
/* REVISIT as above: how best to track this? */
port->port_line_coding = gser->port_line_coding;
- /* disable endpoints, aborting down any active I/O */
- usb_ep_disable(gser->out);
- usb_ep_disable(gser->in);
-
port->port_usb = NULL;
gser->ioport = NULL;
if (port->port.count > 0) {
@@ -1435,6 +1432,10 @@ void gserial_disconnect(struct gserial *gser)
spin_unlock(&port->port_lock);
spin_unlock_irqrestore(&serial_port_lock, flags);
+ /* disable endpoints, aborting down any active I/O */
+ usb_ep_disable(gser->out);
+ usb_ep_disable(gser->in);
+
/* finally, free any unused/unusable I/O buffers */
spin_lock_irqsave(&port->port_lock, flags);
if (port->port.count == 0)
@@ -1545,7 +1546,7 @@ static int __init userial_init(void)
pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
MAX_U_SERIAL_PORTS,
- (MAX_U_SERIAL_PORTS == 1) ? "" : "s");
+ str_plural(MAX_U_SERIAL_PORTS));
return status;
fail:
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 9c7381661016..b6a30d88a800 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -20,6 +20,7 @@
#include <linux/uaccess.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/poll.h>
#include <linux/kthread.h>
#include <linux/aio.h>
@@ -1182,7 +1183,7 @@ ep0_fasync (int f, struct file *fd, int on)
{
struct dev_data *dev = fd->private_data;
// caller must F_SETOWN before signal delivery happens
- VDEBUG (dev, "%s %s\n", __func__, on ? "on" : "off");
+ VDEBUG(dev, "%s %s\n", __func__, str_on_off(on));
return fasync_helper (f, fd, on, &dev->fasync);
}
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/hub.c b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
index a63e4af60a56..02fe1a08d575 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/hub.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
@@ -22,6 +22,7 @@
#include <linux/usb/gadget.h>
#include <linux/of.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
#include <linux/dma-mapping.h>
#include <linux/bcd.h>
#include <linux/version.h>
@@ -219,7 +220,7 @@ static int ast_vhub_hub_dev_feature(struct ast_vhub_ep *ep,
if (wValue == USB_DEVICE_REMOTE_WAKEUP) {
ep->vhub->wakeup_en = is_set;
EPDBG(ep, "Hub remote wakeup %s\n",
- is_set ? "enabled" : "disabled");
+ str_enabled_disabled(is_set));
return std_req_complete;
}
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index e3af4ec3794e..aa4c61094dc6 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/interrupt.h>
@@ -131,7 +132,7 @@ static void proc_ep_show(struct seq_file *s, struct at91_ep *ep)
seq_printf(s, "csr %08x rxbytes=%d %s %s %s" EIGHTBITS "\n",
csr,
(csr & 0x07ff0000) >> 16,
- (csr & (1 << 15)) ? "enabled" : "disabled",
+ str_enabled_disabled(csr & (1 << 15)),
(csr & (1 << 11)) ? "DATA1" : "DATA0",
types[(csr & 0x700) >> 8],
diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
index 62fce42ef2da..7e69944ef18a 100644
--- a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
+++ b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
@@ -29,6 +29,7 @@
#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/property.h>
+#include <linux/string_choices.h>
#include <linux/dmapool.h>
#include <linux/iopoll.h>
@@ -2233,12 +2234,12 @@ static int cdns2_init_eps(struct cdns2_device *pdev)
dev_dbg(pdev->dev, "Init %s, SupType: CTRL: %s, INT: %s, "
"BULK: %s, ISOC %s, SupDir IN: %s, OUT: %s\n",
pep->name,
- (pep->endpoint.caps.type_control) ? "yes" : "no",
- (pep->endpoint.caps.type_int) ? "yes" : "no",
- (pep->endpoint.caps.type_bulk) ? "yes" : "no",
- (pep->endpoint.caps.type_iso) ? "yes" : "no",
- (pep->endpoint.caps.dir_in) ? "yes" : "no",
- (pep->endpoint.caps.dir_out) ? "yes" : "no");
+ str_yes_no(pep->endpoint.caps.type_control),
+ str_yes_no(pep->endpoint.caps.type_int),
+ str_yes_no(pep->endpoint.caps.type_bulk),
+ str_yes_no(pep->endpoint.caps.type_iso),
+ str_yes_no(pep->endpoint.caps.dir_in),
+ str_yes_no(pep->endpoint.caps.dir_out));
INIT_LIST_HEAD(&pep->pending_list);
INIT_LIST_HEAD(&pep->deferred_list);
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index a7e8fa45776b..bda08c5ba7c0 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -28,6 +28,7 @@
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/hrtimer.h>
@@ -625,7 +626,7 @@ static int dummy_enable(struct usb_ep *_ep,
desc->bEndpointAddress & 0x0f,
(desc->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
usb_ep_type_string(usb_endpoint_type(desc)),
- max, ep->stream_en ? "enabled" : "disabled");
+ max, str_enabled_disabled(ep->stream_en));
/* at this point real hardware should be NAKing transfers
* to that endpoint, until a buffer is queued to it.
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index 8b7f7f961774..4dea8bc30cf6 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -22,6 +22,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/interrupt.h>
@@ -1181,7 +1182,7 @@ static int fsl_vbus_session(struct usb_gadget *gadget, int is_active)
udc = container_of(gadget, struct fsl_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
- dev_vdbg(&gadget->dev, "VBUS %s\n", is_active ? "on" : "off");
+ dev_vdbg(&gadget->dev, "VBUS %s\n", str_on_off(is_active));
udc->vbus_active = (is_active != 0);
if (can_pullup(udc))
fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index 698463bf697b..8902abe3ca76 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -18,6 +18,7 @@
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
@@ -1252,7 +1253,7 @@ static int omap_vbus_session(struct usb_gadget *gadget, int is_active)
udc = container_of(gadget, struct omap_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
- VDBG("VBUS %s\n", is_active ? "on" : "off");
+ VDBG("VBUS %s\n", str_on_off(is_active));
udc->vbus_active = (is_active != 0);
if (cpu_is_omap15xx()) {
/* "software" detect, ignored if !VBUS_MODE_1510 */
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index f9a55d4f189f..897f53601b5b 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -20,6 +20,7 @@
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/prefetch.h>
#include <linux/byteorder/generic.h>
#include <linux/platform_data/pxa2xx_udc.h>
@@ -1083,7 +1084,7 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
is_first_req = list_empty(&ep->queue);
ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n",
- _req, is_first_req ? "yes" : "no",
+ _req, str_yes_no(is_first_req),
_req->length, _req->buf);
if (!ep->enabled) {
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index a6c20facf945..fce800ba4c61 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -15,6 +15,7 @@
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/list.h>
@@ -2756,7 +2757,7 @@ static void ehci_port_power(struct oxu_hcd *oxu, int is_on)
if (!HCS_PPC(oxu->hcs_params))
return;
- oxu_dbg(oxu, "...power%s ports...\n", is_on ? "up" : "down");
+ oxu_dbg(oxu, "...power%s ports...\n", str_up_down(is_on));
for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; ) {
if (is_on)
oxu_hub_control(oxu_to_hcd(oxu), SetPortFeature,
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 036f5fd6d159..fa2e4badd288 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -48,6 +48,7 @@
#include <linux/usb/hcd.h>
#include <linux/platform_device.h>
#include <linux/prefetch.h>
+#include <linux/string_choices.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
@@ -98,7 +99,7 @@ static void port_power(struct sl811 *sl811, int is_on)
if (sl811->board && sl811->board->port_power) {
/* switch VBUS, at 500mA unless hub power budget gets set */
dev_dbg(hcd->self.controller, "power %s\n",
- is_on ? "on" : "off");
+ str_on_off(is_on));
sl811->board->port_power(hcd->self.controller, is_on);
}
diff --git a/drivers/usb/host/xhci-caps.h b/drivers/usb/host/xhci-caps.h
index 9e94cebf4a56..f6b9a00a0ab9 100644
--- a/drivers/usb/host/xhci-caps.h
+++ b/drivers/usb/host/xhci-caps.h
@@ -83,3 +83,9 @@
#define HCC2_CIC(p) ((p) & (1 << 5))
/* true: HC support Extended TBC Capability, Isoc burst count > 65535 */
#define HCC2_ETC(p) ((p) & (1 << 6))
+/* true: HC support Extended TBC TRB Status Capability */
+#define HCC2_ETC_TSC(p) ((p) & (1 << 7))
+/* true: HC support Get/Set Extended Property Capability */
+#define HCC2_GSC(p) ((p) & (1 << 8))
+/* true: HC support Virtualization Based Trusted I/O Capability */
+#define HCC2_VTC(p) ((p) & (1 << 9))
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index 227e513867dd..fd7895b24367 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -957,7 +957,7 @@ static void xhci_dbc_handle_events(struct work_struct *work)
/* set fast poll rate if there are pending data transfers */
if (!list_empty(&dbc->eps[BULK_OUT].list_pending) ||
!list_empty(&dbc->eps[BULK_IN].list_pending))
- poll_interval = 1;
+ poll_interval = 0;
break;
default:
dev_info(dbc->dev, "stop handling dbc events\n");
diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
index d719c16ea30b..60ed753c85bb 100644
--- a/drivers/usb/host/xhci-dbgtty.c
+++ b/drivers/usb/host/xhci-dbgtty.c
@@ -110,15 +110,74 @@ static void dbc_start_rx(struct dbc_port *port)
}
}
+/*
+ * Queue received data to tty buffer and push it.
+ *
+ * Returns nr of remaining bytes that didn't fit tty buffer, i.e. 0 if all
+ * bytes sucessfullt moved. In case of error returns negative errno.
+ * Call with lock held
+ */
+static int dbc_rx_push_buffer(struct dbc_port *port, struct dbc_request *req)
+{
+ char *packet = req->buf;
+ unsigned int n, size = req->actual;
+ int count;
+
+ if (!req->actual)
+ return 0;
+
+ /* if n_read is set then request was partially moved to tty buffer */
+ n = port->n_read;
+ if (n) {
+ packet += n;
+ size -= n;
+ }
+
+ count = tty_insert_flip_string(&port->port, packet, size);
+ if (count)
+ tty_flip_buffer_push(&port->port);
+ if (count != size) {
+ port->n_read += count;
+ return size - count;
+ }
+
+ port->n_read = 0;
+ return 0;
+}
+
static void
dbc_read_complete(struct xhci_dbc *dbc, struct dbc_request *req)
{
unsigned long flags;
struct dbc_port *port = dbc_to_port(dbc);
+ struct tty_struct *tty;
+ int untransferred;
+
+ tty = port->port.tty;
spin_lock_irqsave(&port->port_lock, flags);
+
+ /*
+ * Only defer copyig data to tty buffer in case:
+ * - !list_empty(&port->read_queue), there are older pending data
+ * - tty is throttled
+ * - failed to copy all data to buffer, defer remaining part
+ */
+
+ if (list_empty(&port->read_queue) && tty && !tty_throttled(tty)) {
+ untransferred = dbc_rx_push_buffer(port, req);
+ if (untransferred == 0) {
+ list_add_tail(&req->list_pool, &port->read_pool);
+ if (req->status != -ESHUTDOWN)
+ dbc_start_rx(port);
+ goto out;
+ }
+ }
+
+ /* defer moving data from req to tty buffer to a tasklet */
list_add_tail(&req->list_pool, &port->read_queue);
tasklet_schedule(&port->push);
+out:
spin_unlock_irqrestore(&port->port_lock, flags);
}
@@ -331,10 +390,10 @@ static void dbc_rx_push(struct tasklet_struct *t)
struct dbc_request *req;
struct tty_struct *tty;
unsigned long flags;
- bool do_push = false;
bool disconnect = false;
struct dbc_port *port = from_tasklet(port, t, push);
struct list_head *queue = &port->read_queue;
+ int untransferred;
spin_lock_irqsave(&port->port_lock, flags);
tty = port->port.tty;
@@ -356,42 +415,15 @@ static void dbc_rx_push(struct tasklet_struct *t)
break;
}
- if (req->actual) {
- char *packet = req->buf;
- unsigned int n, size = req->actual;
- int count;
-
- n = port->n_read;
- if (n) {
- packet += n;
- size -= n;
- }
-
- count = tty_insert_flip_string(&port->port, packet,
- size);
- if (count)
- do_push = true;
- if (count != size) {
- port->n_read += count;
- break;
- }
- port->n_read = 0;
- }
+ untransferred = dbc_rx_push_buffer(port, req);
+ if (untransferred > 0)
+ break;
list_move_tail(&req->list_pool, &port->read_pool);
}
- if (do_push)
- tty_flip_buffer_push(&port->port);
-
- if (!list_empty(queue) && tty) {
- if (!tty_throttled(tty)) {
- if (do_push)
- tasklet_schedule(&port->push);
- else
- pr_warn("ttyDBC0: RX not scheduled?\n");
- }
- }
+ if (!list_empty(queue))
+ tasklet_schedule(&port->push);
if (!disconnect)
dbc_start_rx(port);
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index 4f0c1b96e208..1f5ef174abea 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -232,16 +232,7 @@ static struct xhci_file_map ring_files[] = {
static int xhci_ring_open(struct inode *inode, struct file *file)
{
- int i;
- struct xhci_file_map *f_map;
- const char *file_name = file_dentry(file)->d_iname;
-
- for (i = 0; i < ARRAY_SIZE(ring_files); i++) {
- f_map = &ring_files[i];
-
- if (strcmp(f_map->name, file_name) == 0)
- break;
- }
+ const struct xhci_file_map *f_map = debugfs_get_aux(file);
return single_open(file, f_map->show, inode->i_private);
}
@@ -318,16 +309,7 @@ static struct xhci_file_map context_files[] = {
static int xhci_context_open(struct inode *inode, struct file *file)
{
- int i;
- struct xhci_file_map *f_map;
- const char *file_name = file_dentry(file)->d_iname;
-
- for (i = 0; i < ARRAY_SIZE(context_files); i++) {
- f_map = &context_files[i];
-
- if (strcmp(f_map->name, file_name) == 0)
- break;
- }
+ const struct xhci_file_map *f_map = debugfs_get_aux(file);
return single_open(file, f_map->show, inode->i_private);
}
@@ -410,7 +392,8 @@ static void xhci_debugfs_create_files(struct xhci_hcd *xhci,
int i;
for (i = 0; i < nentries; i++)
- debugfs_create_file(files[i].name, 0444, parent, data, fops);
+ debugfs_create_file_aux(files[i].name, 0444, parent,
+ data, &files[i], fops);
}
static struct dentry *xhci_debugfs_create_ring_dir(struct xhci_hcd *xhci,
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index db109b570c5c..d85ffa9ffaa7 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -330,6 +330,8 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
usb3_hcd->can_do_streams = 1;
if (xhci->shared_hcd) {
+ xhci->shared_hcd->rsrc_start = hcd->rsrc_start;
+ xhci->shared_hcd->rsrc_len = hcd->rsrc_len;
ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
if (ret)
goto put_usb3_hcd;
@@ -567,6 +569,7 @@ EXPORT_SYMBOL_GPL(xhci_plat_pm_ops);
static const struct acpi_device_id usb_xhci_acpi_match[] = {
/* XHCI-compliant USB Controller */
{ "PNP0D10", },
+ { "PNP0D15", },
{ }
};
MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 09b05a62375e..965bffce301e 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -55,6 +55,7 @@
#include <linux/jiffies.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/dma-mapping.h>
#include "xhci.h"
#include "xhci-trace.h"
@@ -422,7 +423,8 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
!(xhci->xhc_state & XHCI_STATE_DYING)) {
xhci->current_cmd = cur_cmd;
- xhci_mod_cmd_timer(xhci);
+ if (cur_cmd)
+ xhci_mod_cmd_timer(xhci);
xhci_ring_cmd_db(xhci);
}
}
@@ -1649,12 +1651,13 @@ static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
NEC_FW_MINOR(le32_to_cpu(event->status)));
}
-static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
+static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 comp_code, u32 comp_param)
{
list_del(&cmd->cmd_list);
if (cmd->completion) {
- cmd->status = status;
+ cmd->status = comp_code;
+ cmd->comp_param = comp_param;
complete(cmd->completion);
} else {
kfree(cmd);
@@ -1666,7 +1669,7 @@ void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
struct xhci_command *cur_cmd, *tmp_cmd;
xhci->current_cmd = NULL;
list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
- xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED);
+ xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED, 0);
}
void xhci_handle_command_timeout(struct work_struct *work)
@@ -1751,6 +1754,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
struct xhci_event_cmd *event)
{
unsigned int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
+ u32 status = le32_to_cpu(event->status);
u64 cmd_dma;
dma_addr_t cmd_dequeue_dma;
u32 cmd_comp_code;
@@ -1879,7 +1883,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
}
event_handled:
- xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
+ xhci_complete_del_and_free_cmd(cmd, cmd_comp_code, COMP_PARAM(status));
inc_deq(xhci, xhci->cmd_ring);
}
@@ -3438,8 +3442,8 @@ static void check_interval(struct urb *urb, struct xhci_ep_ctx *ep_ctx)
if (xhci_interval != ep_interval) {
dev_dbg_ratelimited(&urb->dev->dev,
"Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
- ep_interval, ep_interval == 1 ? "" : "s",
- xhci_interval, xhci_interval == 1 ? "" : "s");
+ ep_interval, str_plural(ep_interval),
+ xhci_interval, str_plural(xhci_interval));
urb->interval = xhci_interval;
/* Convert back to frames for LS/FS devices */
if (urb->dev->speed == USB_SPEED_LOW ||
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 06ae193ec874..22dc86fb5254 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -26,6 +26,7 @@
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/usb/otg.h>
#include <linux/usb/phy.h>
#include <linux/usb/role.h>
@@ -724,7 +725,7 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
if (err < 0) {
dev_err(dev,
"failed to %s LFPS detection on USB3#%u: %d\n",
- enable ? "enable" : "disable", port, err);
+ str_enable_disable(enable), port, err);
rsp.cmd = MBOX_CMD_NAK;
} else {
rsp.cmd = MBOX_CMD_ACK;
@@ -1349,7 +1350,7 @@ static void tegra_xhci_id_work(struct work_struct *work)
u32 status;
int ret;
- dev_dbg(tegra->dev, "host mode %s\n", tegra->host_mode ? "on" : "off");
+ dev_dbg(tegra->dev, "host mode %s\n", str_on_off(tegra->host_mode));
mutex_lock(&tegra->lock);
@@ -1667,7 +1668,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto put_padctl;
}
- if (!of_property_read_bool(pdev->dev.of_node, "power-domains")) {
+ if (!of_property_present(pdev->dev.of_node, "power-domains")) {
tegra->host_rst = devm_reset_control_get(&pdev->dev,
"xusb_host");
if (IS_ERR(tegra->host_rst)) {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 5ebde8cae4fc..45653114ccd7 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/dmi.h>
#include <linux/dma-mapping.h>
@@ -4523,7 +4524,7 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
hlpm_addr = ports[port_num]->addr + PORTHLPMC;
xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
- enable ? "enable" : "disable", port_num + 1);
+ str_enable_disable(enable), port_num + 1);
if (enable) {
/* Host supports BESL timeout instead of HIRD */
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 4914f0a10cff..8c164340a2c3 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -529,6 +529,7 @@ struct xhci_command {
/* Input context for changing device state */
struct xhci_container_ctx *in_ctx;
u32 status;
+ u32 comp_param;
int slot_id;
/* If completion is null, no one is waiting on this command
* and the structure can be freed after the command completes.
@@ -959,6 +960,9 @@ struct xhci_event_cmd {
__le32 flags;
};
+/* status bitmasks */
+#define COMP_PARAM(p) ((p) & 0xffffff) /* Command Completion Parameter */
+
/* Address device - disable SetAddress */
#define TRB_BSR (1<<9)
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 9f758241d9d3..934ec5310fb9 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -322,7 +322,7 @@ static inline void mts_urb_abort(struct mts_desc* desc) {
usb_kill_urb( desc->urb );
}
-static int mts_slave_alloc (struct scsi_device *s)
+static int mts_sdev_init (struct scsi_device *s)
{
s->inquiry_len = 0x24;
return 0;
@@ -626,7 +626,7 @@ static const struct scsi_host_template mts_scsi_host_template = {
.this_id = -1,
.emulated = 1,
.dma_alignment = 511,
- .slave_alloc = mts_slave_alloc,
+ .sdev_init = mts_sdev_init,
.max_sectors= 256, /* 128 K */
};
diff --git a/drivers/usb/mtu3/mtu3_debugfs.c b/drivers/usb/mtu3/mtu3_debugfs.c
index f0de99858353..c003049bafbf 100644
--- a/drivers/usb/mtu3/mtu3_debugfs.c
+++ b/drivers/usb/mtu3/mtu3_debugfs.c
@@ -7,6 +7,7 @@
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
*/
+#include <linux/string_choices.h>
#include <linux/uaccess.h>
#include "mtu3.h"
@@ -256,16 +257,7 @@ static const struct mtu3_file_map mtu3_ep_files[] = {
static int mtu3_ep_open(struct inode *inode, struct file *file)
{
- const char *file_name = file_dentry(file)->d_iname;
- const struct mtu3_file_map *f_map;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(mtu3_ep_files); i++) {
- f_map = &mtu3_ep_files[i];
-
- if (strcmp(f_map->name, file_name) == 0)
- break;
- }
+ const struct mtu3_file_map *f_map = debugfs_get_aux(file);
return single_open(file, f_map->show, inode->i_private);
}
@@ -288,17 +280,8 @@ static const struct debugfs_reg32 mtu3_prb_regs[] = {
static int mtu3_probe_show(struct seq_file *sf, void *unused)
{
- const char *file_name = file_dentry(sf->file)->d_iname;
struct mtu3 *mtu = sf->private;
- const struct debugfs_reg32 *regs;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(mtu3_prb_regs); i++) {
- regs = &mtu3_prb_regs[i];
-
- if (strcmp(regs->name, file_name) == 0)
- break;
- }
+ const struct debugfs_reg32 *regs = debugfs_get_aux(sf->file);
seq_printf(sf, "0x%04x - 0x%08x\n", (u32)regs->offset,
mtu3_readl(mtu->ippc_base, (u32)regs->offset));
@@ -314,13 +297,11 @@ static int mtu3_probe_open(struct inode *inode, struct file *file)
static ssize_t mtu3_probe_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
- const char *file_name = file_dentry(file)->d_iname;
struct seq_file *sf = file->private_data;
struct mtu3 *mtu = sf->private;
- const struct debugfs_reg32 *regs;
+ const struct debugfs_reg32 *regs = debugfs_get_aux(file);
char buf[32];
u32 val;
- int i;
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
@@ -328,12 +309,6 @@ static ssize_t mtu3_probe_write(struct file *file, const char __user *ubuf,
if (kstrtou32(buf, 0, &val))
return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(mtu3_prb_regs); i++) {
- regs = &mtu3_prb_regs[i];
-
- if (strcmp(regs->name, file_name) == 0)
- break;
- }
mtu3_writel(mtu->ippc_base, (u32)regs->offset, val);
return count;
@@ -358,8 +333,8 @@ static void mtu3_debugfs_create_prb_files(struct mtu3 *mtu)
for (i = 0; i < ARRAY_SIZE(mtu3_prb_regs); i++) {
regs = &mtu3_prb_regs[i];
- debugfs_create_file(regs->name, 0644, dir_prb,
- mtu, &mtu3_probe_fops);
+ debugfs_create_file_aux(regs->name, 0644, dir_prb,
+ mtu, regs, &mtu3_probe_fops);
}
mtu3_debugfs_regset(mtu, mtu->ippc_base, mtu3_prb_regs,
@@ -379,8 +354,8 @@ static void mtu3_debugfs_create_ep_dir(struct mtu3_ep *mep,
for (i = 0; i < ARRAY_SIZE(mtu3_ep_files); i++) {
files = &mtu3_ep_files[i];
- debugfs_create_file(files->name, 0444, dir_ep,
- mep, &mtu3_ep_fops);
+ debugfs_create_file_aux(files->name, 0444, dir_ep,
+ mep, files, &mtu3_ep_fops);
}
}
@@ -479,7 +454,7 @@ static int ssusb_vbus_show(struct seq_file *sf, void *unused)
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
seq_printf(sf, "vbus state: %s\n(echo on/off)\n",
- regulator_is_enabled(otg_sx->vbus) ? "on" : "off");
+ str_on_off(regulator_is_enabled(otg_sx->vbus)));
return 0;
}
diff --git a/drivers/usb/mtu3/mtu3_dr.c b/drivers/usb/mtu3/mtu3_dr.c
index 8191b7ed3852..ffa5b9401dad 100644
--- a/drivers/usb/mtu3/mtu3_dr.c
+++ b/drivers/usb/mtu3/mtu3_dr.c
@@ -7,6 +7,7 @@
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
*/
+#include <linux/string_choices.h>
#include "mtu3.h"
#include "mtu3_dr.h"
#include "mtu3_debug.h"
@@ -109,7 +110,7 @@ int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on)
if (!vbus)
return 0;
- dev_dbg(ssusb->dev, "%s: turn %s\n", __func__, is_on ? "on" : "off");
+ dev_dbg(ssusb->dev, "%s: turn %s\n", __func__, str_on_off(is_on));
if (is_on) {
ret = regulator_enable(vbus);
diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
index ad0eeac4332d..bf73fbc29976 100644
--- a/drivers/usb/mtu3/mtu3_gadget.c
+++ b/drivers/usb/mtu3/mtu3_gadget.c
@@ -7,6 +7,7 @@
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
*/
+#include <linux/string_choices.h>
#include "mtu3.h"
#include "mtu3_trace.h"
@@ -490,7 +491,7 @@ static int mtu3_gadget_pullup(struct usb_gadget *gadget, int is_on)
unsigned long flags;
dev_dbg(mtu->dev, "%s (%s) for %sactive device\n", __func__,
- is_on ? "on" : "off", mtu->is_active ? "" : "in");
+ str_on_off(is_on), mtu->is_active ? "" : "in");
pm_runtime_get_sync(mtu->dev);
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index f772aa272bea..26fd71a5f9b2 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -21,6 +21,7 @@
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/string_choices.h>
#include <linux/dma-mapping.h>
#include <linux/usb/usb_phy_generic.h>
@@ -306,7 +307,7 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
}
dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
- drvvbus ? "on" : "off",
+ str_on_off(drvvbus),
usb_otg_state_string(musb->xceiv->otg->state),
err ? " ERROR" : "",
devctl);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 03b1154a6014..7f349f5e781d 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -72,6 +72,7 @@
#include <linux/kobject.h>
#include <linux/prefetch.h>
#include <linux/platform_device.h>
+#include <linux/string_choices.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/dma-mapping.h>
@@ -1937,7 +1938,7 @@ vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
pm_runtime_put_sync(dev);
return sprintf(buf, "Vbus %s, timeout %lu msec\n",
- vbus ? "on" : "off", val);
+ str_on_off(vbus), val);
}
static DEVICE_ATTR_RW(vbus);
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 2542239ec64e..f877faf5a930 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -24,6 +24,7 @@
#include <linux/usb/usb_phy_generic.h>
#include <linux/platform_data/usb-omap.h>
#include <linux/sizes.h>
+#include <linux/string_choices.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -378,7 +379,7 @@ static irqreturn_t dsps_interrupt(int irq, void *hci)
/* NOTE: this must complete power-on within 100 ms. */
dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
- drvvbus ? "on" : "off",
+ str_on_off(drvvbus),
usb_otg_state_string(musb->xceiv->otg->state),
err ? " ERROR" : "",
devctl);
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index c6076df0d50c..6869c58367f2 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
@@ -1606,7 +1607,7 @@ static void musb_pullup(struct musb *musb, int is_on)
/* FIXME if on, HdrcStart; if off, HdrcStop */
musb_dbg(musb, "gadget D+ pullup %s",
- is_on ? "on" : "off");
+ str_on_off(is_on));
musb_writeb(musb->mregs, MUSB_POWER, power);
}
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 732ba981e607..6b4481a867c5 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/dma-mapping.h>
@@ -1028,7 +1029,7 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
+ urb->actual_length);
musb_dbg(musb, "Sending %d byte%s to ep0 fifo %p",
fifo_count,
- (fifo_count == 1) ? "" : "s",
+ str_plural(fifo_count),
fifo_dest);
musb_write_fifo(hw_ep, fifo_count, fifo_dest);
diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
index 42c42e193232..40ac68e52cee 100644
--- a/drivers/usb/phy/phy-fsl-usb.c
+++ b/drivers/usb/phy/phy-fsl-usb.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/proc_fs.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -529,7 +530,7 @@ int fsl_otg_start_gadget(struct otg_fsm *fsm, int on)
if (!otg->gadget || !otg->gadget->dev.parent)
return -ENODEV;
- VDBG("gadget %s\n", on ? "on" : "off");
+ VDBG("gadget %s\n", str_on_off(on));
dev = otg->gadget->dev.parent;
if (on) {
diff --git a/drivers/usb/phy/phy-mv-usb.c b/drivers/usb/phy/phy-mv-usb.c
index a7a102f2e163..30d6c8840a5e 100644
--- a/drivers/usb/phy/phy-mv-usb.c
+++ b/drivers/usb/phy/phy-mv-usb.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/workqueue.h>
#include <linux/platform_device.h>
+#include <linux/string_choices.h>
#include <linux/usb.h>
#include <linux/usb/ch9.h>
@@ -217,7 +218,7 @@ static void mv_otg_start_periphrals(struct mv_otg *mvotg, int on)
if (!otg->gadget)
return;
- dev_info(mvotg->phy.dev, "gadget %s\n", on ? "on" : "off");
+ dev_info(mvotg->phy.dev, "gadget %s\n", str_on_off(on));
if (on)
usb_gadget_vbus_connect(otg->gadget);
diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
index ae7bf3ff89ee..88607d0edb01 100644
--- a/drivers/usb/phy/phy-tahvo.c
+++ b/drivers/usb/phy/phy-tahvo.c
@@ -18,6 +18,7 @@
#include <linux/extcon-provider.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/string_choices.h>
#include <linux/usb/otg.h>
#include <linux/mfd/retu.h>
#include <linux/usb/gadget.h>
@@ -63,7 +64,7 @@ static ssize_t vbus_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct tahvo_usb *tu = dev_get_drvdata(device);
- return sprintf(buf, "%s\n", tu->vbus_state ? "on" : "off");
+ return sprintf(buf, "%s\n", str_on_off(tu->vbus_state));
}
static DEVICE_ATTR_RO(vbus);
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index 1ce134505cee..e1435bc59662 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -346,13 +346,6 @@ static void devm_usb_phy_release2(struct device *dev, void *_res)
usb_put_phy(res->phy);
}
-static int devm_usb_phy_match(struct device *dev, void *res, void *match_data)
-{
- struct usb_phy **phy = res;
-
- return *phy == match_data;
-}
-
static void usb_charger_init(struct usb_phy *usb_phy)
{
usb_phy->chg_type = UNKNOWN_TYPE;
@@ -615,25 +608,6 @@ struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev,
EXPORT_SYMBOL_GPL(devm_usb_get_phy_by_phandle);
/**
- * devm_usb_put_phy - release the USB PHY
- * @dev: device that wants to release this phy
- * @phy: the phy returned by devm_usb_get_phy()
- *
- * destroys the devres associated with this phy and invokes usb_put_phy
- * to release the phy.
- *
- * For use by USB host and peripheral drivers.
- */
-void devm_usb_put_phy(struct device *dev, struct usb_phy *phy)
-{
- int r;
-
- r = devres_release(dev, devm_usb_phy_release, devm_usb_phy_match, phy);
- dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
-}
-EXPORT_SYMBOL_GPL(devm_usb_put_phy);
-
-/**
* usb_put_phy - release the USB PHY
* @x: the phy returned by usb_get_phy()
*
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index d10e4c4848a0..7cc36f84821f 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -63,6 +63,7 @@
#define CH341_REG_DIVISOR 0x13
#define CH341_REG_LCR 0x18
#define CH341_REG_LCR2 0x25
+#define CH341_REG_FLOW_CTL 0x27
#define CH341_NBREAK_BITS 0x01
@@ -77,6 +78,9 @@
#define CH341_LCR_CS6 0x01
#define CH341_LCR_CS5 0x00
+#define CH341_FLOW_CTL_NONE 0x00
+#define CH341_FLOW_CTL_RTSCTS 0x01
+
#define CH341_QUIRK_LIMITED_PRESCALER BIT(0)
#define CH341_QUIRK_SIMULATE_BREAK BIT(1)
@@ -478,6 +482,28 @@ err_kill_interrupt_urb:
return r;
}
+static void ch341_set_flow_control(struct tty_struct *tty,
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
+{
+ u16 flow_ctl;
+ int r;
+
+ if (C_CRTSCTS(tty))
+ flow_ctl = CH341_FLOW_CTL_RTSCTS;
+ else
+ flow_ctl = CH341_FLOW_CTL_NONE;
+
+ r = ch341_control_out(port->serial->dev,
+ CH341_REQ_WRITE_REG,
+ (CH341_REG_FLOW_CTL << 8) | CH341_REG_FLOW_CTL,
+ (flow_ctl << 8) | flow_ctl);
+ if (r < 0 && old_termios) {
+ tty->termios.c_cflag &= ~CRTSCTS;
+ tty->termios.c_cflag |= (old_termios->c_cflag & CRTSCTS);
+ }
+}
+
/* Old_termios contains the original termios settings and
* tty->termios contains the new setting to be used.
*/
@@ -546,6 +572,8 @@ static void ch341_set_termios(struct tty_struct *tty,
spin_unlock_irqrestore(&priv->lock, flags);
ch341_set_handshake(port->serial->dev, priv->mcr);
+
+ ch341_set_flow_control(tty, port, old_termios);
}
/*
@@ -632,13 +660,12 @@ restore:
static int ch341_break_ctl(struct tty_struct *tty, int break_state)
{
- const uint16_t ch341_break_reg =
- ((uint16_t) CH341_REG_LCR << 8) | CH341_REG_BREAK;
+ const u16 ch341_break_reg = (CH341_REG_LCR << 8) | CH341_REG_BREAK;
struct usb_serial_port *port = tty->driver_data;
struct ch341_private *priv = usb_get_serial_port_data(port);
+ u16 reg_contents;
+ u8 break_reg[2];
int r;
- uint16_t reg_contents;
- uint8_t break_reg[2];
if (priv->quirks & CH341_QUIRK_SIMULATE_BREAK)
return ch341_simulate_break(tty, break_state);
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index a317bdbd00ad..72fe83a6c978 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -503,7 +503,7 @@ static void qt2_process_read_urb(struct urb *urb)
newport = *(ch + 3);
- if (newport > serial->num_ports) {
+ if (newport >= serial->num_ports) {
dev_err(&port->dev,
"%s - port change to invalid port: %i\n",
__func__, newport);
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index d17b60a644ef..4be1d617d63d 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -3,8 +3,7 @@
# USB Storage driver configuration
#
-comment "NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may"
-comment "also be needed; see USB_STORAGE Help for more info"
+comment "NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; see USB_STORAGE Help for more info"
config USB_STORAGE
tristate "USB Mass Storage support"
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index dc98ceecb724..d2f476e48d0c 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -64,7 +64,7 @@ static const char* host_info(struct Scsi_Host *host)
return us->scsi_name;
}
-static int slave_alloc (struct scsi_device *sdev)
+static int sdev_init (struct scsi_device *sdev)
{
struct us_data *us = host_to_us(sdev->host);
@@ -88,7 +88,7 @@ static int slave_alloc (struct scsi_device *sdev)
return 0;
}
-static int device_configure(struct scsi_device *sdev, struct queue_limits *lim)
+static int sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct us_data *us = host_to_us(sdev->host);
struct device *dev = us->pusb_dev->bus->sysdev;
@@ -127,7 +127,7 @@ static int device_configure(struct scsi_device *sdev, struct queue_limits *lim)
lim->max_hw_sectors, dma_max_mapping_size(dev) >> SECTOR_SHIFT);
/*
- * We can't put these settings in slave_alloc() because that gets
+ * We can't put these settings in sdev_init() because that gets
* called before the device type is known. Consequently these
* settings can't be overridden via the scsi devinfo mechanism.
*/
@@ -634,8 +634,8 @@ static const struct scsi_host_template usb_stor_host_template = {
/* unknown initiator id */
.this_id = -1,
- .slave_alloc = slave_alloc,
- .device_configure = device_configure,
+ .sdev_init = sdev_init,
+ .sdev_configure = sdev_configure,
.target_alloc = target_alloc,
/* lots of sg segments can be handled */
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index 087c706bb315..c33cbf177e6f 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -32,6 +32,7 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/cdrom.h>
#include <scsi/scsi.h>
@@ -651,8 +652,7 @@ static int usbat_hp8200e_rw_block_test(struct us_data *us,
return USB_STOR_TRANSPORT_FAILED;
usb_stor_dbg(us, "Redoing %s\n",
- direction == DMA_TO_DEVICE
- ? "write" : "read");
+ str_write_read(direction == DMA_TO_DEVICE));
} else if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 9d767f6bf722..e6bc8ecaecbb 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -1087,13 +1087,9 @@ int usb_stor_Bulk_max_lun(struct us_data *us)
usb_stor_dbg(us, "GetMaxLUN command result is %d, data is %d\n",
result, us->iobuf[0]);
- /*
- * If we have a successful request, return the result if valid. The
- * CBW LUN field is 4 bits wide, so the value reported by the device
- * should fit into that.
- */
+ /* If we have a successful request, return the result if valid. */
if (result > 0) {
- if (us->iobuf[0] < 16) {
+ if (us->iobuf[0] <= US_BULK_MAX_LUN_LIMIT) {
return us->iobuf[0];
} else {
dev_info(&us->pusb_intf->dev,
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index f9ad90ce7af4..4ed0dc19afe0 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -817,7 +817,7 @@ static int uas_target_alloc(struct scsi_target *starget)
return 0;
}
-static int uas_slave_alloc(struct scsi_device *sdev)
+static int uas_sdev_init(struct scsi_device *sdev)
{
struct uas_dev_info *devinfo =
(struct uas_dev_info *)sdev->host->hostdata;
@@ -832,8 +832,8 @@ static int uas_slave_alloc(struct scsi_device *sdev)
return 0;
}
-static int uas_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int uas_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct uas_dev_info *devinfo = sdev->hostdata;
@@ -905,8 +905,8 @@ static const struct scsi_host_template uas_host_template = {
.name = "uas",
.queuecommand = uas_queuecommand,
.target_alloc = uas_target_alloc,
- .slave_alloc = uas_slave_alloc,
- .device_configure = uas_device_configure,
+ .sdev_init = uas_sdev_init,
+ .sdev_configure = uas_sdev_configure,
.eh_abort_handler = uas_eh_abort_handler,
.eh_device_reset_handler = uas_eh_device_reset_handler,
.this_id = -1,
diff --git a/drivers/usb/typec/altmodes/Kconfig b/drivers/usb/typec/altmodes/Kconfig
index 1a6b5e872b0d..7867fa7c405d 100644
--- a/drivers/usb/typec/altmodes/Kconfig
+++ b/drivers/usb/typec/altmodes/Kconfig
@@ -23,4 +23,13 @@ config TYPEC_NVIDIA_ALTMODE
To compile this driver as a module, choose M here: the
module will be called typec_nvidia.
+config TYPEC_TBT_ALTMODE
+ tristate "Thunderbolt3 Alternate Mode driver"
+ help
+ Select this option if you have Thunderbolt3 hardware on your
+ system.
+
+ To compile this driver as a module, choose M here: the
+ module will be called typec_thunderbolt.
+
endmenu
diff --git a/drivers/usb/typec/altmodes/Makefile b/drivers/usb/typec/altmodes/Makefile
index 45717548b396..508a68351bd2 100644
--- a/drivers/usb/typec/altmodes/Makefile
+++ b/drivers/usb/typec/altmodes/Makefile
@@ -4,3 +4,5 @@ obj-$(CONFIG_TYPEC_DP_ALTMODE) += typec_displayport.o
typec_displayport-y := displayport.o
obj-$(CONFIG_TYPEC_NVIDIA_ALTMODE) += typec_nvidia.o
typec_nvidia-y := nvidia.o
+obj-$(CONFIG_TYPEC_TBT_ALTMODE) += typec_thunderbolt.o
+typec_thunderbolt-y := thunderbolt.o
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index 2f03190a9873..ac84a6d64c2f 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -252,7 +252,7 @@ static void dp_altmode_work(struct work_struct *work)
case DP_STATE_ENTER:
ret = typec_altmode_enter(dp->alt, NULL);
if (ret && ret != -EBUSY)
- dev_err(&dp->alt->dev, "failed to enter mode\n");
+ dev_err(&dp->alt->dev, "failed to enter mode: %d\n", ret);
break;
case DP_STATE_ENTER_PRIME:
ret = typec_cable_altmode_enter(dp->alt, TYPEC_PLUG_SOP_P, NULL);
@@ -791,7 +791,7 @@ void dp_altmode_remove(struct typec_altmode *alt)
EXPORT_SYMBOL_GPL(dp_altmode_remove);
static const struct typec_device_id dp_typec_id[] = {
- { USB_TYPEC_DP_SID, USB_TYPEC_DP_MODE },
+ { USB_TYPEC_DP_SID },
{ },
};
MODULE_DEVICE_TABLE(typec, dp_typec_id);
diff --git a/drivers/usb/typec/altmodes/nvidia.c b/drivers/usb/typec/altmodes/nvidia.c
index fe70b36f078f..2b77d931e494 100644
--- a/drivers/usb/typec/altmodes/nvidia.c
+++ b/drivers/usb/typec/altmodes/nvidia.c
@@ -24,7 +24,7 @@ static void nvidia_altmode_remove(struct typec_altmode *alt)
}
static const struct typec_device_id nvidia_typec_id[] = {
- { USB_TYPEC_NVIDIA_VLINK_SID, TYPEC_ANY_MODE },
+ { USB_TYPEC_NVIDIA_VLINK_SID },
{ },
};
MODULE_DEVICE_TABLE(typec, nvidia_typec_id);
diff --git a/drivers/usb/typec/altmodes/thunderbolt.c b/drivers/usb/typec/altmodes/thunderbolt.c
new file mode 100644
index 000000000000..1b475b1d98e7
--- /dev/null
+++ b/drivers/usb/typec/altmodes/thunderbolt.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * USB Typec-C Thunderbolt3 Alternate Mode driver
+ *
+ * Copyright (C) 2019 Intel Corporation
+ * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ */
+
+#include <linux/lockdep.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/usb/pd_vdo.h>
+#include <linux/usb/typec_altmode.h>
+#include <linux/usb/typec_tbt.h>
+
+enum tbt_state {
+ TBT_STATE_IDLE,
+ TBT_STATE_SOP_P_ENTER,
+ TBT_STATE_SOP_PP_ENTER,
+ TBT_STATE_ENTER,
+ TBT_STATE_EXIT,
+ TBT_STATE_SOP_PP_EXIT,
+ TBT_STATE_SOP_P_EXIT
+};
+
+struct tbt_altmode {
+ enum tbt_state state;
+ struct typec_cable *cable;
+ struct typec_altmode *alt;
+ struct typec_altmode *plug[2];
+ u32 enter_vdo;
+
+ struct work_struct work;
+ struct mutex lock; /* device lock */
+};
+
+static bool tbt_ready(struct typec_altmode *alt);
+
+static int tbt_enter_mode(struct tbt_altmode *tbt)
+{
+ struct typec_altmode *plug = tbt->plug[TYPEC_PLUG_SOP_P];
+ u32 vdo;
+
+ vdo = tbt->alt->vdo & (TBT_VENDOR_SPECIFIC_B0 | TBT_VENDOR_SPECIFIC_B1);
+ vdo |= tbt->alt->vdo & TBT_INTEL_SPECIFIC_B0;
+ vdo |= TBT_MODE;
+
+ if (plug) {
+ if (typec_cable_is_active(tbt->cable))
+ vdo |= TBT_ENTER_MODE_ACTIVE_CABLE;
+
+ vdo |= TBT_ENTER_MODE_CABLE_SPEED(TBT_CABLE_SPEED(plug->vdo));
+ vdo |= plug->vdo & TBT_CABLE_ROUNDED;
+ vdo |= plug->vdo & TBT_CABLE_OPTICAL;
+ vdo |= plug->vdo & TBT_CABLE_RETIMER;
+ vdo |= plug->vdo & TBT_CABLE_LINK_TRAINING;
+ } else {
+ vdo |= TBT_ENTER_MODE_CABLE_SPEED(TBT_CABLE_USB3_PASSIVE);
+ }
+
+ tbt->enter_vdo = vdo;
+ return typec_altmode_enter(tbt->alt, &vdo);
+}
+
+static void tbt_altmode_work(struct work_struct *work)
+{
+ struct tbt_altmode *tbt = container_of(work, struct tbt_altmode, work);
+ int ret;
+
+ mutex_lock(&tbt->lock);
+
+ switch (tbt->state) {
+ case TBT_STATE_SOP_P_ENTER:
+ ret = typec_cable_altmode_enter(tbt->alt, TYPEC_PLUG_SOP_P, NULL);
+ if (ret) {
+ dev_dbg(&tbt->plug[TYPEC_PLUG_SOP_P]->dev,
+ "failed to enter mode (%d)\n", ret);
+ goto disable_plugs;
+ }
+ break;
+ case TBT_STATE_SOP_PP_ENTER:
+ ret = typec_cable_altmode_enter(tbt->alt, TYPEC_PLUG_SOP_PP, NULL);
+ if (ret) {
+ dev_dbg(&tbt->plug[TYPEC_PLUG_SOP_PP]->dev,
+ "failed to enter mode (%d)\n", ret);
+ goto disable_plugs;
+ }
+ break;
+ case TBT_STATE_ENTER:
+ ret = tbt_enter_mode(tbt);
+ if (ret)
+ dev_dbg(&tbt->alt->dev, "failed to enter mode (%d)\n",
+ ret);
+ break;
+ case TBT_STATE_EXIT:
+ typec_altmode_exit(tbt->alt);
+ break;
+ case TBT_STATE_SOP_PP_EXIT:
+ typec_cable_altmode_exit(tbt->alt, TYPEC_PLUG_SOP_PP);
+ break;
+ case TBT_STATE_SOP_P_EXIT:
+ typec_cable_altmode_exit(tbt->alt, TYPEC_PLUG_SOP_P);
+ break;
+ default:
+ break;
+ }
+
+ tbt->state = TBT_STATE_IDLE;
+
+ mutex_unlock(&tbt->lock);
+ return;
+
+disable_plugs:
+ for (int i = TYPEC_PLUG_SOP_PP; i > 0; --i) {
+ if (tbt->plug[i])
+ typec_altmode_put_plug(tbt->plug[i]);
+
+ tbt->plug[i] = NULL;
+ }
+
+ tbt->state = TBT_STATE_ENTER;
+ schedule_work(&tbt->work);
+ mutex_unlock(&tbt->lock);
+}
+
+/*
+ * If SOP' is available, enter that first (which will trigger a VDM response
+ * that will enter SOP" if available and then the port). If entering SOP' fails,
+ * stop attempting to enter either cable altmode (probably not supported) and
+ * directly enter the port altmode.
+ */
+static int tbt_enter_modes_ordered(struct typec_altmode *alt)
+{
+ struct tbt_altmode *tbt = typec_altmode_get_drvdata(alt);
+ int ret = 0;
+
+ lockdep_assert_held(&tbt->lock);
+
+ if (!tbt_ready(tbt->alt))
+ return -ENODEV;
+
+ if (tbt->plug[TYPEC_PLUG_SOP_P]) {
+ ret = typec_cable_altmode_enter(alt, TYPEC_PLUG_SOP_P, NULL);
+ if (ret < 0) {
+ for (int i = TYPEC_PLUG_SOP_PP; i > 0; --i) {
+ if (tbt->plug[i])
+ typec_altmode_put_plug(tbt->plug[i]);
+
+ tbt->plug[i] = NULL;
+ }
+ } else {
+ return ret;
+ }
+ }
+
+ return tbt_enter_mode(tbt);
+}
+
+static int tbt_cable_altmode_vdm(struct typec_altmode *alt,
+ enum typec_plug_index sop, const u32 hdr,
+ const u32 *vdo, int count)
+{
+ struct tbt_altmode *tbt = typec_altmode_get_drvdata(alt);
+ int cmd_type = PD_VDO_CMDT(hdr);
+ int cmd = PD_VDO_CMD(hdr);
+
+ mutex_lock(&tbt->lock);
+
+ if (tbt->state != TBT_STATE_IDLE) {
+ mutex_unlock(&tbt->lock);
+ return -EBUSY;
+ }
+
+ switch (cmd_type) {
+ case CMDT_RSP_ACK:
+ switch (cmd) {
+ case CMD_ENTER_MODE:
+ /*
+ * Following the order described in USB Type-C Spec
+ * R2.0 Section 6.7.3: SOP', SOP", then port.
+ */
+ if (sop == TYPEC_PLUG_SOP_P) {
+ if (tbt->plug[TYPEC_PLUG_SOP_PP])
+ tbt->state = TBT_STATE_SOP_PP_ENTER;
+ else
+ tbt->state = TBT_STATE_ENTER;
+ } else if (sop == TYPEC_PLUG_SOP_PP)
+ tbt->state = TBT_STATE_ENTER;
+
+ break;
+ case CMD_EXIT_MODE:
+ /* Exit in opposite order: Port, SOP", then SOP'. */
+ if (sop == TYPEC_PLUG_SOP_PP)
+ tbt->state = TBT_STATE_SOP_P_EXIT;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (tbt->state != TBT_STATE_IDLE)
+ schedule_work(&tbt->work);
+
+ mutex_unlock(&tbt->lock);
+ return 0;
+}
+
+static int tbt_altmode_vdm(struct typec_altmode *alt,
+ const u32 hdr, const u32 *vdo, int count)
+{
+ struct tbt_altmode *tbt = typec_altmode_get_drvdata(alt);
+ struct typec_thunderbolt_data data;
+ int cmd_type = PD_VDO_CMDT(hdr);
+ int cmd = PD_VDO_CMD(hdr);
+
+ mutex_lock(&tbt->lock);
+
+ if (tbt->state != TBT_STATE_IDLE) {
+ mutex_unlock(&tbt->lock);
+ return -EBUSY;
+ }
+
+ switch (cmd_type) {
+ case CMDT_RSP_ACK:
+ /* Port altmode is last to enter and first to exit. */
+ switch (cmd) {
+ case CMD_ENTER_MODE:
+ memset(&data, 0, sizeof(data));
+
+ data.device_mode = tbt->alt->vdo;
+ data.enter_vdo = tbt->enter_vdo;
+ if (tbt->plug[TYPEC_PLUG_SOP_P])
+ data.cable_mode = tbt->plug[TYPEC_PLUG_SOP_P]->vdo;
+
+ typec_altmode_notify(alt, TYPEC_STATE_MODAL, &data);
+ break;
+ case CMD_EXIT_MODE:
+ if (tbt->plug[TYPEC_PLUG_SOP_PP])
+ tbt->state = TBT_STATE_SOP_PP_EXIT;
+ else if (tbt->plug[TYPEC_PLUG_SOP_P])
+ tbt->state = TBT_STATE_SOP_P_EXIT;
+ break;
+ }
+ break;
+ case CMDT_RSP_NAK:
+ switch (cmd) {
+ case CMD_ENTER_MODE:
+ dev_warn(&alt->dev, "Enter Mode refused\n");
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (tbt->state != TBT_STATE_IDLE)
+ schedule_work(&tbt->work);
+
+ mutex_unlock(&tbt->lock);
+
+ return 0;
+}
+
+static int tbt_altmode_activate(struct typec_altmode *alt, int activate)
+{
+ struct tbt_altmode *tbt = typec_altmode_get_drvdata(alt);
+ int ret;
+
+ mutex_lock(&tbt->lock);
+
+ if (activate)
+ ret = tbt_enter_modes_ordered(alt);
+ else
+ ret = typec_altmode_exit(alt);
+
+ mutex_unlock(&tbt->lock);
+
+ return ret;
+}
+
+static const struct typec_altmode_ops tbt_altmode_ops = {
+ .vdm = tbt_altmode_vdm,
+ .activate = tbt_altmode_activate
+};
+
+static const struct typec_cable_ops tbt_cable_ops = {
+ .vdm = tbt_cable_altmode_vdm,
+};
+
+static int tbt_altmode_probe(struct typec_altmode *alt)
+{
+ struct tbt_altmode *tbt;
+
+ tbt = devm_kzalloc(&alt->dev, sizeof(*tbt), GFP_KERNEL);
+ if (!tbt)
+ return -ENOMEM;
+
+ INIT_WORK(&tbt->work, tbt_altmode_work);
+ mutex_init(&tbt->lock);
+ tbt->alt = alt;
+
+ alt->desc = "Thunderbolt3";
+ typec_altmode_set_drvdata(alt, tbt);
+ typec_altmode_set_ops(alt, &tbt_altmode_ops);
+
+ if (tbt_ready(alt)) {
+ if (tbt->plug[TYPEC_PLUG_SOP_P])
+ tbt->state = TBT_STATE_SOP_P_ENTER;
+ else if (tbt->plug[TYPEC_PLUG_SOP_PP])
+ tbt->state = TBT_STATE_SOP_PP_ENTER;
+ else
+ tbt->state = TBT_STATE_ENTER;
+ schedule_work(&tbt->work);
+ }
+
+ return 0;
+}
+
+static void tbt_altmode_remove(struct typec_altmode *alt)
+{
+ struct tbt_altmode *tbt = typec_altmode_get_drvdata(alt);
+
+ for (int i = TYPEC_PLUG_SOP_PP; i > 0; --i) {
+ if (tbt->plug[i])
+ typec_altmode_put_plug(tbt->plug[i]);
+ }
+
+ if (tbt->cable)
+ typec_cable_put(tbt->cable);
+}
+
+static bool tbt_ready(struct typec_altmode *alt)
+{
+ struct tbt_altmode *tbt = typec_altmode_get_drvdata(alt);
+ struct typec_altmode *plug;
+
+ if (tbt->cable)
+ return true;
+
+ /* Thunderbolt 3 requires a cable with eMarker */
+ tbt->cable = typec_cable_get(typec_altmode2port(tbt->alt));
+ if (!tbt->cable)
+ return false;
+
+ /* We accept systems without SOP' or SOP''. This means the port altmode
+ * driver will be responsible for properly ordering entry/exit.
+ */
+ for (int i = 0; i < TYPEC_PLUG_SOP_PP + 1; i++) {
+ plug = typec_altmode_get_plug(tbt->alt, i);
+ if (IS_ERR(plug))
+ continue;
+
+ if (!plug || plug->svid != USB_TYPEC_TBT_SID)
+ break;
+
+ plug->desc = "Thunderbolt3";
+ plug->cable_ops = &tbt_cable_ops;
+ typec_altmode_set_drvdata(plug, tbt);
+
+ tbt->plug[i] = plug;
+ }
+
+ return true;
+}
+
+static const struct typec_device_id tbt_typec_id[] = {
+ { USB_TYPEC_TBT_SID },
+ { }
+};
+MODULE_DEVICE_TABLE(typec, tbt_typec_id);
+
+static struct typec_altmode_driver tbt_altmode_driver = {
+ .id_table = tbt_typec_id,
+ .probe = tbt_altmode_probe,
+ .remove = tbt_altmode_remove,
+ .driver = {
+ .name = "typec-thunderbolt",
+ }
+};
+module_typec_altmode_driver(tbt_altmode_driver);
+
+MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Thunderbolt3 USB Type-C Alternate Mode");
diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
index aa879253d3b8..ae90688d23e4 100644
--- a/drivers/usb/typec/bus.c
+++ b/drivers/usb/typec/bus.c
@@ -454,8 +454,7 @@ static int typec_match(struct device *dev, const struct device_driver *driver)
const struct typec_device_id *id;
for (id = drv->id_table; id->svid; id++)
- if (id->svid == altmode->svid &&
- (id->mode == TYPEC_ANY_MODE || id->mode == altmode->mode))
+ if (id->svid == altmode->svid)
return 1;
return 0;
}
@@ -470,8 +469,7 @@ static int typec_uevent(const struct device *dev, struct kobj_uevent_env *env)
if (add_uevent_var(env, "MODE=%u", altmode->mode))
return -ENOMEM;
- return add_uevent_var(env, "MODALIAS=typec:id%04Xm%02X",
- altmode->svid, altmode->mode);
+ return add_uevent_var(env, "MODALIAS=typec:id%04X", altmode->svid);
}
static int typec_altmode_create_links(struct altmode *alt)
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 4b3047e055a3..9c76c3d0c6cf 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -10,6 +10,7 @@
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/usb/pd_vdo.h>
#include <linux/usb/typec_mux.h>
#include <linux/usb/typec_retimer.h>
@@ -229,21 +230,21 @@ static const char * const usb_modes[] = {
/* ------------------------------------------------------------------------- */
/* Alternate Modes */
-static int altmode_match(struct device *dev, void *data)
+static int altmode_match(struct device *dev, const void *data)
{
struct typec_altmode *adev = to_typec_altmode(dev);
- struct typec_device_id *id = data;
+ const struct typec_device_id *id = data;
if (!is_typec_altmode(dev))
return 0;
- return ((adev->svid == id->svid) && (adev->mode == id->mode));
+ return (adev->svid == id->svid);
}
static void typec_altmode_set_partner(struct altmode *altmode)
{
struct typec_altmode *adev = &altmode->adev;
- struct typec_device_id id = { adev->svid, adev->mode, };
+ struct typec_device_id id = { adev->svid };
struct typec_port *port = typec_altmode2port(adev);
struct altmode *partner;
struct device *dev;
@@ -361,7 +362,7 @@ active_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct typec_altmode *alt = to_typec_altmode(dev);
- return sprintf(buf, "%s\n", alt->active ? "yes" : "no");
+ return sprintf(buf, "%s\n", str_yes_no(alt->active));
}
static ssize_t active_store(struct device *dev, struct device_attribute *attr,
@@ -458,7 +459,8 @@ static umode_t typec_altmode_attr_is_visible(struct kobject *kobj,
struct typec_altmode *adev = to_typec_altmode(kobj_to_dev(kobj));
if (attr == &dev_attr_active.attr)
- if (!adev->ops || !adev->ops->activate)
+ if (!is_typec_port(adev->dev.parent) &&
+ (!adev->ops || !adev->ops->activate))
return 0444;
return attr->mode;
@@ -563,7 +565,7 @@ typec_register_altmode(struct device *parent,
if (is_port) {
alt->attrs[3] = &dev_attr_supported_roles.attr;
- alt->adev.active = true; /* Enabled by default */
+ alt->adev.active = !desc->inactive; /* Enabled by default */
}
sprintf(alt->group_name, "mode%d", desc->mode);
@@ -706,7 +708,7 @@ static ssize_t supports_usb_power_delivery_show(struct device *dev,
{
struct typec_partner *p = to_typec_partner(dev);
- return sprintf(buf, "%s\n", p->usb_pd ? "yes" : "no");
+ return sprintf(buf, "%s\n", str_yes_no(p->usb_pd));
}
static DEVICE_ATTR_RO(supports_usb_power_delivery);
@@ -1282,11 +1284,6 @@ const struct device_type typec_cable_dev_type = {
.release = typec_cable_release,
};
-static int cable_match(struct device *dev, void *data)
-{
- return is_typec_cable(dev);
-}
-
/**
* typec_cable_get - Get a reference to the USB Type-C cable
* @port: The USB Type-C Port the cable is connected to
@@ -1298,7 +1295,8 @@ struct typec_cable *typec_cable_get(struct typec_port *port)
{
struct device *dev;
- dev = device_find_child(&port->dev, NULL, cable_match);
+ dev = device_find_child(&port->dev, &typec_cable_dev_type,
+ device_match_type);
if (!dev)
return NULL;
@@ -1858,7 +1856,7 @@ static ssize_t vconn_source_show(struct device *dev,
struct typec_port *port = to_typec_port(dev);
return sprintf(buf, "%s\n",
- port->vconn_role == TYPEC_SOURCE ? "yes" : "no");
+ str_yes_no(port->vconn_role == TYPEC_SOURCE));
}
static DEVICE_ATTR_RW(vconn_source);
@@ -2028,16 +2026,12 @@ const struct device_type typec_port_dev_type = {
/* --------------------------------------- */
/* Driver callbacks to report role updates */
-static int partner_match(struct device *dev, void *data)
-{
- return is_typec_partner(dev);
-}
-
static struct typec_partner *typec_get_partner(struct typec_port *port)
{
struct device *dev;
- dev = device_find_child(&port->dev, NULL, partner_match);
+ dev = device_find_child(&port->dev, &typec_partner_dev_type,
+ device_match_type);
if (!dev)
return NULL;
@@ -2170,7 +2164,9 @@ void typec_set_pwr_opmode(struct typec_port *port,
sysfs_notify(&port->dev.kobj, NULL, "power_operation_mode");
kobject_uevent(&port->dev.kobj, KOBJ_CHANGE);
- partner_dev = device_find_child(&port->dev, NULL, partner_match);
+ partner_dev = device_find_child(&port->dev,
+ &typec_partner_dev_type,
+ device_match_type);
if (partner_dev) {
struct typec_partner *partner = to_typec_partner(partner_dev);
@@ -2334,7 +2330,9 @@ int typec_get_negotiated_svdm_version(struct typec_port *port)
enum usb_pd_svdm_ver svdm_version;
struct device *partner_dev;
- partner_dev = device_find_child(&port->dev, NULL, partner_match);
+ partner_dev = device_find_child(&port->dev,
+ &typec_partner_dev_type,
+ device_match_type);
if (!partner_dev)
return -ENODEV;
@@ -2361,7 +2359,8 @@ int typec_get_cable_svdm_version(struct typec_port *port)
enum usb_pd_svdm_ver svdm_version;
struct device *cable_dev;
- cable_dev = device_find_child(&port->dev, NULL, cable_match);
+ cable_dev = device_find_child(&port->dev, &typec_cable_dev_type,
+ device_match_type);
if (!cable_dev)
return -ENODEV;
diff --git a/drivers/usb/typec/hd3ss3220.c b/drivers/usb/typec/hd3ss3220.c
index fb1242e82ffd..3ecc688dda82 100644
--- a/drivers/usb/typec/hd3ss3220.c
+++ b/drivers/usb/typec/hd3ss3220.c
@@ -16,10 +16,17 @@
#include <linux/delay.h>
#include <linux/workqueue.h>
+#define HD3SS3220_REG_CN_STAT 0x08
#define HD3SS3220_REG_CN_STAT_CTRL 0x09
#define HD3SS3220_REG_GEN_CTRL 0x0A
#define HD3SS3220_REG_DEV_REV 0xA0
+/* Register HD3SS3220_REG_CN_STAT */
+#define HD3SS3220_REG_CN_STAT_CURRENT_MODE_MASK (BIT(7) | BIT(6))
+#define HD3SS3220_REG_CN_STAT_CURRENT_MODE_DEFAULT 0x00
+#define HD3SS3220_REG_CN_STAT_CURRENT_MODE_MID BIT(6)
+#define HD3SS3220_REG_CN_STAT_CURRENT_MODE_HIGH BIT(7)
+
/* Register HD3SS3220_REG_CN_STAT_CTRL*/
#define HD3SS3220_REG_CN_STAT_CTRL_ATTACHED_STATE_MASK (BIT(7) | BIT(6))
#define HD3SS3220_REG_CN_STAT_CTRL_AS_DFP BIT(6)
@@ -28,10 +35,16 @@
#define HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS BIT(4)
/* Register HD3SS3220_REG_GEN_CTRL*/
+#define HD3SS3220_REG_GEN_CTRL_DISABLE_TERM BIT(0)
#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_MASK (BIT(2) | BIT(1))
#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_DEFAULT 0x00
#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SNK BIT(1)
#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SRC (BIT(2) | BIT(1))
+#define HD3SS3220_REG_GEN_CTRL_MODE_SELECT_MASK (BIT(5) | BIT(4))
+#define HD3SS3220_REG_GEN_CTRL_MODE_SELECT_DEFAULT 0x00
+#define HD3SS3220_REG_GEN_CTRL_MODE_SELECT_DFP BIT(5)
+#define HD3SS3220_REG_GEN_CTRL_MODE_SELECT_UFP BIT(4)
+#define HD3SS3220_REG_GEN_CTRL_MODE_SELECT_DRP (BIT(5) | BIT(4))
struct hd3ss3220 {
struct device *dev;
@@ -43,8 +56,96 @@ struct hd3ss3220 {
bool poll;
};
-static int hd3ss3220_set_source_pref(struct hd3ss3220 *hd3ss3220, int src_pref)
+static int hd3ss3220_set_power_opmode(struct hd3ss3220 *hd3ss3220, int power_opmode)
+{
+ int current_mode;
+
+ switch (power_opmode) {
+ case TYPEC_PWR_MODE_USB:
+ current_mode = HD3SS3220_REG_CN_STAT_CURRENT_MODE_DEFAULT;
+ break;
+ case TYPEC_PWR_MODE_1_5A:
+ current_mode = HD3SS3220_REG_CN_STAT_CURRENT_MODE_MID;
+ break;
+ case TYPEC_PWR_MODE_3_0A:
+ current_mode = HD3SS3220_REG_CN_STAT_CURRENT_MODE_HIGH;
+ break;
+ case TYPEC_PWR_MODE_PD: /* Power delivery not supported */
+ default:
+ dev_err(hd3ss3220->dev, "bad power operation mode: %d\n", power_opmode);
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(hd3ss3220->regmap, HD3SS3220_REG_CN_STAT,
+ HD3SS3220_REG_CN_STAT_CURRENT_MODE_MASK,
+ current_mode);
+}
+
+static int hd3ss3220_set_port_type(struct hd3ss3220 *hd3ss3220, int type)
+{
+ int mode_select, err;
+
+ switch (type) {
+ case TYPEC_PORT_SRC:
+ mode_select = HD3SS3220_REG_GEN_CTRL_MODE_SELECT_DFP;
+ break;
+ case TYPEC_PORT_SNK:
+ mode_select = HD3SS3220_REG_GEN_CTRL_MODE_SELECT_UFP;
+ break;
+ case TYPEC_PORT_DRP:
+ mode_select = HD3SS3220_REG_GEN_CTRL_MODE_SELECT_DRP;
+ break;
+ default:
+ dev_err(hd3ss3220->dev, "bad port type: %d\n", type);
+ return -EINVAL;
+ }
+
+ /* Disable termination before changing MODE_SELECT as required by datasheet */
+ err = regmap_update_bits(hd3ss3220->regmap, HD3SS3220_REG_GEN_CTRL,
+ HD3SS3220_REG_GEN_CTRL_DISABLE_TERM,
+ HD3SS3220_REG_GEN_CTRL_DISABLE_TERM);
+ if (err < 0) {
+ dev_err(hd3ss3220->dev, "Failed to disable port for mode change: %d\n", err);
+ return err;
+ }
+
+ err = regmap_update_bits(hd3ss3220->regmap, HD3SS3220_REG_GEN_CTRL,
+ HD3SS3220_REG_GEN_CTRL_MODE_SELECT_MASK,
+ mode_select);
+ if (err < 0) {
+ dev_err(hd3ss3220->dev, "Failed to change mode: %d\n", err);
+ regmap_update_bits(hd3ss3220->regmap, HD3SS3220_REG_GEN_CTRL,
+ HD3SS3220_REG_GEN_CTRL_DISABLE_TERM, 0);
+ return err;
+ }
+
+ err = regmap_update_bits(hd3ss3220->regmap, HD3SS3220_REG_GEN_CTRL,
+ HD3SS3220_REG_GEN_CTRL_DISABLE_TERM, 0);
+ if (err < 0)
+ dev_err(hd3ss3220->dev, "Failed to re-enable port after mode change: %d\n", err);
+
+ return err;
+}
+
+static int hd3ss3220_set_source_pref(struct hd3ss3220 *hd3ss3220, int prefer_role)
{
+ int src_pref;
+
+ switch (prefer_role) {
+ case TYPEC_NO_PREFERRED_ROLE:
+ src_pref = HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_DEFAULT;
+ break;
+ case TYPEC_SINK:
+ src_pref = HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SNK;
+ break;
+ case TYPEC_SOURCE:
+ src_pref = HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SRC;
+ break;
+ default:
+ dev_err(hd3ss3220->dev, "bad role preference: %d\n", prefer_role);
+ return -EINVAL;
+ }
+
return regmap_update_bits(hd3ss3220->regmap, HD3SS3220_REG_GEN_CTRL,
HD3SS3220_REG_GEN_CTRL_SRC_PREF_MASK,
src_pref);
@@ -76,31 +177,23 @@ static enum usb_role hd3ss3220_get_attached_state(struct hd3ss3220 *hd3ss3220)
return attached_state;
}
-static int hd3ss3220_dr_set(struct typec_port *port, enum typec_data_role role)
+static int hd3ss3220_try_role(struct typec_port *port, int role)
{
struct hd3ss3220 *hd3ss3220 = typec_get_drvdata(port);
- enum usb_role role_val;
- int pref, ret = 0;
- if (role == TYPEC_HOST) {
- role_val = USB_ROLE_HOST;
- pref = HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SRC;
- } else {
- role_val = USB_ROLE_DEVICE;
- pref = HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SNK;
- }
-
- ret = hd3ss3220_set_source_pref(hd3ss3220, pref);
- usleep_range(10, 100);
+ return hd3ss3220_set_source_pref(hd3ss3220, role);
+}
- usb_role_switch_set_role(hd3ss3220->role_sw, role_val);
- typec_set_data_role(hd3ss3220->port, role);
+static int hd3ss3220_port_type_set(struct typec_port *port, enum typec_port_type type)
+{
+ struct hd3ss3220 *hd3ss3220 = typec_get_drvdata(port);
- return ret;
+ return hd3ss3220_set_port_type(hd3ss3220, type);
}
static const struct typec_operations hd3ss3220_ops = {
- .dr_set = hd3ss3220_dr_set
+ .try_role = hd3ss3220_try_role,
+ .port_type_set = hd3ss3220_port_type_set,
};
static void hd3ss3220_set_role(struct hd3ss3220 *hd3ss3220)
@@ -108,9 +201,6 @@ static void hd3ss3220_set_role(struct hd3ss3220 *hd3ss3220)
enum usb_role role_state = hd3ss3220_get_attached_state(hd3ss3220);
usb_role_switch_set_role(hd3ss3220->role_sw, role_state);
- if (role_state == USB_ROLE_NONE)
- hd3ss3220_set_source_pref(hd3ss3220,
- HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_DEFAULT);
switch (role_state) {
case USB_ROLE_HOST:
@@ -162,6 +252,67 @@ static irqreturn_t hd3ss3220_irq_handler(int irq, void *data)
return hd3ss3220_irq(hd3ss3220);
}
+static int hd3ss3220_configure_power_opmode(struct hd3ss3220 *hd3ss3220,
+ struct fwnode_handle *connector)
+{
+ /*
+ * Supported power operation mode can be configured through device tree
+ */
+ const char *cap_str;
+ int ret, power_opmode;
+
+ ret = fwnode_property_read_string(connector, "typec-power-opmode", &cap_str);
+ if (ret)
+ return 0;
+
+ power_opmode = typec_find_pwr_opmode(cap_str);
+ return hd3ss3220_set_power_opmode(hd3ss3220, power_opmode);
+}
+
+static int hd3ss3220_configure_port_type(struct hd3ss3220 *hd3ss3220,
+ struct fwnode_handle *connector,
+ struct typec_capability *cap)
+{
+ /*
+ * Port type can be configured through device tree
+ */
+ const char *cap_str;
+ int ret;
+
+ ret = fwnode_property_read_string(connector, "power-role", &cap_str);
+ if (ret)
+ return 0;
+
+ ret = typec_find_port_power_role(cap_str);
+ if (ret < 0)
+ return ret;
+
+ cap->type = ret;
+ return hd3ss3220_set_port_type(hd3ss3220, cap->type);
+}
+
+static int hd3ss3220_configure_source_pref(struct hd3ss3220 *hd3ss3220,
+ struct fwnode_handle *connector,
+ struct typec_capability *cap)
+{
+ /*
+ * Preferred role can be configured through device tree
+ */
+ const char *cap_str;
+ int ret;
+
+ ret = fwnode_property_read_string(connector, "try-power-role", &cap_str);
+ if (ret)
+ return 0;
+
+ ret = typec_find_power_role(cap_str);
+ if (ret < 0)
+ return ret;
+
+ cap->prefer_role = ret;
+ return hd3ss3220_set_source_pref(hd3ss3220, cap->prefer_role);
+}
+
static const struct regmap_config config = {
.reg_bits = 8,
.val_bits = 8,
@@ -188,8 +339,6 @@ static int hd3ss3220_probe(struct i2c_client *client)
if (IS_ERR(hd3ss3220->regmap))
return PTR_ERR(hd3ss3220->regmap);
- hd3ss3220_set_source_pref(hd3ss3220,
- HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_DEFAULT);
/* For backward compatibility check the connector child node first */
connector = device_get_named_child_node(hd3ss3220->dev, "connector");
if (connector) {
@@ -217,12 +366,24 @@ static int hd3ss3220_probe(struct i2c_client *client)
typec_cap.ops = &hd3ss3220_ops;
typec_cap.fwnode = connector;
+ ret = hd3ss3220_configure_source_pref(hd3ss3220, connector, &typec_cap);
+ if (ret < 0)
+ goto err_put_role;
+
+ ret = hd3ss3220_configure_port_type(hd3ss3220, connector, &typec_cap);
+ if (ret < 0)
+ goto err_put_role;
+
hd3ss3220->port = typec_register_port(&client->dev, &typec_cap);
if (IS_ERR(hd3ss3220->port)) {
ret = PTR_ERR(hd3ss3220->port);
goto err_put_role;
}
+ ret = hd3ss3220_configure_power_opmode(hd3ss3220, connector);
+ if (ret < 0)
+ goto err_unreg_port;
+
hd3ss3220_set_role(hd3ss3220);
ret = regmap_read(hd3ss3220->regmap, HD3SS3220_REG_CN_STAT_CTRL, &data);
if (ret < 0)
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index 5dfe95754394..65dda9183e6f 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -718,7 +718,7 @@ DEFINE_SHOW_ATTRIBUTE(port_iom_status);
static void pmc_mux_port_debugfs_init(struct pmc_usb_port *port)
{
struct dentry *debugfs_dir;
- char name[6];
+ char name[8];
snprintf(name, sizeof(name), "port%d", port->usb3_port - 1);
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index e2fe479e16ad..f15c63d3a8f4 100644
--- a/drivers/usb/typec/tcpm/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include <linux/usb.h>
#include <linux/usb/typec.h>
@@ -733,7 +734,7 @@ static int tcpm_set_vconn(struct tcpc_dev *dev, bool on)
mutex_lock(&chip->lock);
if (chip->vconn_on == on) {
- fusb302_log(chip, "vconn is already %s", on ? "On" : "Off");
+ fusb302_log(chip, "vconn is already %s", str_on_off(on));
goto done;
}
if (on) {
@@ -746,7 +747,7 @@ static int tcpm_set_vconn(struct tcpc_dev *dev, bool on)
if (ret < 0)
goto done;
chip->vconn_on = on;
- fusb302_log(chip, "vconn := %s", on ? "On" : "Off");
+ fusb302_log(chip, "vconn := %s", str_on_off(on));
done:
mutex_unlock(&chip->lock);
@@ -761,7 +762,7 @@ static int tcpm_set_vbus(struct tcpc_dev *dev, bool on, bool charge)
mutex_lock(&chip->lock);
if (chip->vbus_on == on) {
- fusb302_log(chip, "vbus is already %s", on ? "On" : "Off");
+ fusb302_log(chip, "vbus is already %s", str_on_off(on));
} else {
if (on)
ret = regulator_enable(chip->vbus);
@@ -769,15 +770,14 @@ static int tcpm_set_vbus(struct tcpc_dev *dev, bool on, bool charge)
ret = regulator_disable(chip->vbus);
if (ret < 0) {
fusb302_log(chip, "cannot %s vbus regulator, ret=%d",
- on ? "enable" : "disable", ret);
+ str_enable_disable(on), ret);
goto done;
}
chip->vbus_on = on;
- fusb302_log(chip, "vbus := %s", on ? "On" : "Off");
+ fusb302_log(chip, "vbus := %s", str_on_off(on));
}
if (chip->charge_on == charge)
- fusb302_log(chip, "charge is already %s",
- charge ? "On" : "Off");
+ fusb302_log(chip, "charge is already %s", str_on_off(charge));
else
chip->charge_on = charge;
@@ -854,16 +854,16 @@ static int tcpm_set_pd_rx(struct tcpc_dev *dev, bool on)
ret = fusb302_pd_set_auto_goodcrc(chip, on);
if (ret < 0) {
fusb302_log(chip, "cannot turn %s auto GCRC, ret=%d",
- on ? "on" : "off", ret);
+ str_on_off(on), ret);
goto done;
}
ret = fusb302_pd_set_interrupts(chip, on);
if (ret < 0) {
fusb302_log(chip, "cannot turn %s pd interrupts, ret=%d",
- on ? "on" : "off", ret);
+ str_on_off(on), ret);
goto done;
}
- fusb302_log(chip, "pd := %s", on ? "on" : "off");
+ fusb302_log(chip, "pd := %s", str_on_off(on));
done:
mutex_unlock(&chip->lock);
@@ -1531,7 +1531,7 @@ static void fusb302_irq_work(struct work_struct *work)
if (interrupt & FUSB_REG_INTERRUPT_VBUSOK) {
vbus_present = !!(status0 & FUSB_REG_STATUS0_VBUSOK);
fusb302_log(chip, "IRQ: VBUS_OK, vbus=%s",
- vbus_present ? "On" : "Off");
+ str_on_off(vbus_present));
if (vbus_present != chip->vbus_present) {
chip->vbus_present = vbus_present;
tcpm_vbus_change(chip->tcpm_port);
@@ -1562,7 +1562,7 @@ static void fusb302_irq_work(struct work_struct *work)
if ((interrupt & FUSB_REG_INTERRUPT_COMP_CHNG) && intr_comp_chng) {
comp_result = !!(status0 & FUSB_REG_STATUS0_COMP);
fusb302_log(chip, "IRQ: COMP_CHNG, comp=%s",
- comp_result ? "true" : "false");
+ str_true_false(comp_result));
if (comp_result) {
/* cc level > Rd_threshold, detach */
chip->cc1 = TYPEC_CC_OPEN;
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
index 726423684bae..18303b34594b 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
@@ -12,6 +12,7 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/usb/pd.h>
#include <linux/usb/tcpm.h>
#include "qcom_pmic_typec.h"
@@ -418,7 +419,7 @@ static int qcom_pmic_typec_pdphy_set_pd_rx(struct tcpc_dev *tcpc, bool on)
spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
- dev_dbg(pmic_typec_pdphy->dev, "set_pd_rx: %s\n", on ? "on" : "off");
+ dev_dbg(pmic_typec_pdphy->dev, "set_pd_rx: %s\n", str_on_off(on));
return ret;
}
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c
index df79059cda67..8fac171778da 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c
@@ -12,6 +12,7 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/usb/pd.h>
#include <linux/usb/tcpm.h>
#include "qcom_pmic_typec.h"
@@ -38,7 +39,7 @@ static int qcom_pmic_typec_pdphy_stub_set_pd_rx(struct tcpc_dev *tcpc, bool on)
struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
struct device *dev = tcpm->dev;
- dev_dbg(dev, "set_pd_rx: %s\n", on ? "on" : "off");
+ dev_dbg(dev, "set_pd_rx: %s\n", str_on_off(on));
return 0;
}
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
index c37dede62e12..4fc83dcfae64 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
@@ -13,6 +13,7 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/usb/tcpm.h>
#include <linux/usb/typec_mux.h>
#include <linux/workqueue.h>
@@ -562,7 +563,8 @@ done:
spin_unlock_irqrestore(&pmic_typec_port->lock, flags);
dev_dbg(dev, "set_vconn: orientation %d control 0x%08x state %s cc %s vconn %s\n",
- orientation, value, on ? "on" : "off", misc_to_vconn(misc), misc_to_cc(misc));
+ orientation, value, str_on_off(on), misc_to_vconn(misc),
+ misc_to_cc(misc));
return ret;
}
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index 24a6a4354df8..19ab6647af70 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -27,6 +27,7 @@
#define VPPS_NEW_MIN_PERCENT 95
#define VPPS_VALID_MIN_MV 100
#define VSINKDISCONNECT_PD_MIN_PERCENT 90
+#define VPPS_SHUTDOWN_MIN_PERCENT 85
struct tcpci {
struct device *dev;
@@ -282,7 +283,7 @@ static int tcpci_set_polarity(struct tcpc_dev *tcpc,
if (cc2 == TYPEC_CC_RD)
/* Role control would have the Rp setting when DRP was enabled */
reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RP);
- else
+ else if (cc2 >= TYPEC_CC_RP_DEF)
reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RD);
} else {
reg &= ~TCPC_ROLE_CTRL_CC1;
@@ -290,7 +291,7 @@ static int tcpci_set_polarity(struct tcpc_dev *tcpc,
if (cc1 == TYPEC_CC_RD)
/* Role control would have the Rp setting when DRP was enabled */
reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RP);
- else
+ else if (cc1 >= TYPEC_CC_RP_DEF)
reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RD);
}
}
@@ -366,7 +367,8 @@ static int tcpci_enable_auto_vbus_discharge(struct tcpc_dev *dev, bool enable)
}
static int tcpci_set_auto_vbus_discharge_threshold(struct tcpc_dev *dev, enum typec_pwr_opmode mode,
- bool pps_active, u32 requested_vbus_voltage_mv)
+ bool pps_active, u32 requested_vbus_voltage_mv,
+ u32 apdo_min_voltage_mv)
{
struct tcpci *tcpci = tcpc_to_tcpci(dev);
unsigned int pwr_ctrl, threshold = 0;
@@ -388,9 +390,12 @@ static int tcpci_set_auto_vbus_discharge_threshold(struct tcpc_dev *dev, enum ty
threshold = AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV;
} else if (mode == TYPEC_PWR_MODE_PD) {
if (pps_active)
- threshold = ((VPPS_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) -
- VSINKPD_MIN_IR_DROP_MV - VPPS_VALID_MIN_MV) *
- VSINKDISCONNECT_PD_MIN_PERCENT / 100;
+ /*
+ * To prevent disconnect when the source is in Current Limit Mode.
+ * Set the threshold to the lowest possible voltage vPpsShutdown (min)
+ */
+ threshold = VPPS_SHUTDOWN_MIN_PERCENT * apdo_min_voltage_mv / 100 -
+ VSINKPD_MIN_IR_DROP_MV;
else
threshold = ((VSRC_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) -
VSINKPD_MIN_IR_DROP_MV - VSRC_VALID_MIN_MV) *
diff --git a/drivers/usb/typec/tcpm/tcpci_mt6370.c b/drivers/usb/typec/tcpm/tcpci_mt6370.c
index 1479f961772d..ed822f438a09 100644
--- a/drivers/usb/typec/tcpm/tcpci_mt6370.c
+++ b/drivers/usb/typec/tcpm/tcpci_mt6370.c
@@ -11,7 +11,6 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm_wakeup.h>
#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 95c0c63119ac..47be450d2be3 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -21,6 +21,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/usb.h>
#include <linux/usb/pd.h>
#include <linux/usb/pd_ado.h>
@@ -185,7 +186,8 @@
S(UNSTRUCTURED_VDMS), \
S(STRUCTURED_VDMS), \
S(COUNTRY_INFO), \
- S(COUNTRY_CODES)
+ S(COUNTRY_CODES), \
+ S(REVISION_INFORMATION)
#define GENERATE_ENUM(e) e
#define GENERATE_STRING(s) #s
@@ -225,6 +227,7 @@ enum pd_msg_request {
PD_MSG_CTRL_NOT_SUPP,
PD_MSG_DATA_SINK_CAP,
PD_MSG_DATA_SOURCE_CAP,
+ PD_MSG_DATA_REV,
};
enum adev_actions {
@@ -310,6 +313,13 @@ struct pd_data {
unsigned int operating_snk_mw;
};
+struct pd_revision_info {
+ u8 rev_major;
+ u8 rev_minor;
+ u8 ver_major;
+ u8 ver_minor;
+};
+
/*
* @sink_wait_cap_time: Deadline (in ms) for tTypeCSinkWaitCap timer
* @ps_src_wait_off_time: Deadline (in ms) for tPSSourceOff timer
@@ -567,6 +577,9 @@ struct tcpm_port {
/* Timer deadline values configured at runtime */
struct pd_timings timings;
+
+ /* Indicates maximum (revision, version) supported */
+ struct pd_revision_info pd_rev;
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
struct mutex logbuffer_lock; /* log buffer access lock */
@@ -880,8 +893,8 @@ static int tcpm_enable_auto_vbus_discharge(struct tcpm_port *port, bool enable)
if (port->tcpc->enable_auto_vbus_discharge) {
ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, enable);
- tcpm_log_force(port, "%s vbus discharge ret:%d", enable ? "enable" : "disable",
- ret);
+ tcpm_log_force(port, "%s vbus discharge ret:%d",
+ str_enable_disable(enable), ret);
if (!ret)
port->auto_vbus_discharge_enabled = enable;
}
@@ -1234,6 +1247,24 @@ static u32 tcpm_forge_legacy_pdo(struct tcpm_port *port, u32 pdo, enum typec_rol
}
}
+static int tcpm_pd_send_revision(struct tcpm_port *port)
+{
+ struct pd_message msg;
+ u32 rmdo;
+
+ memset(&msg, 0, sizeof(msg));
+ rmdo = RMDO(port->pd_rev.rev_major, port->pd_rev.rev_minor,
+ port->pd_rev.ver_major, port->pd_rev.ver_minor);
+ msg.payload[0] = cpu_to_le32(rmdo);
+ msg.header = PD_HEADER_LE(PD_DATA_REVISION,
+ port->pwr_role,
+ port->data_role,
+ port->negotiated_rev,
+ port->message_id,
+ 1);
+ return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
+}
+
static int tcpm_pd_send_source_caps(struct tcpm_port *port)
{
struct pd_message msg;
@@ -2943,10 +2974,12 @@ static int tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port *port,
return 0;
ret = port->tcpc->set_auto_vbus_discharge_threshold(port->tcpc, mode, pps_active,
- requested_vbus_voltage);
+ requested_vbus_voltage,
+ port->pps_data.min_volt);
tcpm_log_force(port,
- "set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u ret:%d",
- mode, pps_active ? 'y' : 'n', requested_vbus_voltage, ret);
+ "set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u pps_apdo_min_volt:%u ret:%d",
+ mode, pps_active ? 'y' : 'n', requested_vbus_voltage,
+ port->pps_data.min_volt, ret);
return ret;
}
@@ -3537,6 +3570,17 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
PD_MSG_CTRL_NOT_SUPP,
NONE_AMS);
break;
+ case PD_CTRL_GET_REVISION:
+ if (port->negotiated_rev >= PD_REV30 && port->pd_rev.rev_major)
+ tcpm_pd_handle_msg(port, PD_MSG_DATA_REV,
+ REVISION_INFORMATION);
+ else
+ tcpm_pd_handle_msg(port,
+ port->negotiated_rev < PD_REV30 ?
+ PD_MSG_CTRL_REJECT :
+ PD_MSG_CTRL_NOT_SUPP,
+ NONE_AMS);
+ break;
default:
tcpm_pd_handle_msg(port,
port->negotiated_rev < PD_REV30 ?
@@ -3781,6 +3825,14 @@ static bool tcpm_send_queued_message(struct tcpm_port *port)
tcpm_ams_finish(port);
}
break;
+ case PD_MSG_DATA_REV:
+ ret = tcpm_pd_send_revision(port);
+ if (ret)
+ tcpm_log(port,
+ "Unable to send revision msg, ret=%d",
+ ret);
+ tcpm_ams_finish(port);
+ break;
default:
break;
}
@@ -4390,7 +4442,7 @@ static void tcpm_unregister_altmodes(struct tcpm_port *port)
static void tcpm_set_partner_usb_comm_capable(struct tcpm_port *port, bool capable)
{
- tcpm_log(port, "Setting usb_comm capable %s", capable ? "true" : "false");
+ tcpm_log(port, "Setting usb_comm capable %s", str_true_false(capable));
if (port->tcpc->set_partner_usb_comm_capable)
port->tcpc->set_partner_usb_comm_capable(port->tcpc, capable);
@@ -4772,7 +4824,7 @@ static void run_state_machine(struct tcpm_port *port)
port->caps_count = 0;
port->pd_capable = true;
tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
- PD_T_SEND_SOURCE_CAP);
+ PD_T_SENDER_RESPONSE);
}
break;
case SRC_SEND_CAPABILITIES_TIMEOUT:
@@ -7036,7 +7088,9 @@ static void tcpm_port_unregister_pd(struct tcpm_port *port)
static int tcpm_port_register_pd(struct tcpm_port *port)
{
- struct usb_power_delivery_desc desc = { port->typec_caps.pd_revision };
+ u16 pd_revision = port->typec_caps.pd_revision;
+ u16 pd_version = port->pd_rev.ver_major << 8 | port->pd_rev.ver_minor;
+ struct usb_power_delivery_desc desc = { pd_revision, pd_version };
struct usb_power_delivery_capabilities *cap;
int ret, i;
@@ -7331,6 +7385,29 @@ static int tcpm_fw_get_snk_vdos(struct tcpm_port *port, struct fwnode_handle *fw
return 0;
}
+static void tcpm_fw_get_pd_revision(struct tcpm_port *port, struct fwnode_handle *fwnode)
+{
+ int ret;
+ u8 val[4];
+
+ ret = fwnode_property_count_u8(fwnode, "pd-revision");
+ if (!ret || ret != 4) {
+ tcpm_log(port, "Unable to find pd-revision property or incorrect array size");
+ return;
+ }
+
+ ret = fwnode_property_read_u8_array(fwnode, "pd-revision", val, 4);
+ if (ret) {
+ tcpm_log(port, "Failed to parse pd-revision, ret:(%d)", ret);
+ return;
+ }
+
+ port->pd_rev.rev_major = val[0];
+ port->pd_rev.rev_minor = val[1];
+ port->pd_rev.ver_major = val[2];
+ port->pd_rev.ver_minor = val[3];
+}
+
/* Power Supply access to expose source power information */
enum tcpm_psy_online_states {
TCPM_PSY_OFFLINE = 0,
@@ -7669,11 +7746,18 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
goto out_destroy_wq;
tcpm_fw_get_timings(port, tcpc->fwnode);
+ tcpm_fw_get_pd_revision(port, tcpc->fwnode);
port->try_role = port->typec_caps.prefer_role;
port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */
- port->typec_caps.pd_revision = 0x0300; /* USB-PD spec release 3.0 */
+
+ if (port->pd_rev.rev_major)
+ port->typec_caps.pd_revision = port->pd_rev.rev_major << 8 |
+ port->pd_rev.rev_minor;
+ else
+ port->typec_caps.pd_revision = 0x0300; /* USB-PD spec release 3.0 */
+
port->typec_caps.svdm_version = SVDM_VER_2_0;
port->typec_caps.driver_data = port;
port->typec_caps.ops = &tcpm_ops;
diff --git a/drivers/usb/typec/ucsi/Kconfig b/drivers/usb/typec/ucsi/Kconfig
index 680e1b87b152..75559601fe8f 100644
--- a/drivers/usb/typec/ucsi/Kconfig
+++ b/drivers/usb/typec/ucsi/Kconfig
@@ -69,6 +69,19 @@ config UCSI_PMIC_GLINK
To compile the driver as a module, choose M here: the module will be
called ucsi_glink.
+config CROS_EC_UCSI
+ tristate "UCSI Driver for ChromeOS EC"
+ depends on MFD_CROS_EC_DEV
+ depends on CROS_USBPD_NOTIFY
+ depends on !EXTCON_TCSS_CROS_EC
+ default MFD_CROS_EC_DEV
+ help
+ This driver enables UCSI support for a ChromeOS EC. The EC is
+ expected to implement a PPM.
+
+ To compile the driver as a module, choose M here: the module
+ will be called cros_ec_ucsi.
+
config UCSI_LENOVO_YOGA_C630
tristate "UCSI Interface Driver for Lenovo Yoga C630"
depends on EC_LENOVO_YOGA_C630
diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile
index aed41d23887b..be98a879104d 100644
--- a/drivers/usb/typec/ucsi/Makefile
+++ b/drivers/usb/typec/ucsi/Makefile
@@ -21,4 +21,5 @@ obj-$(CONFIG_UCSI_ACPI) += ucsi_acpi.o
obj-$(CONFIG_UCSI_CCG) += ucsi_ccg.o
obj-$(CONFIG_UCSI_STM32G0) += ucsi_stm32g0.o
obj-$(CONFIG_UCSI_PMIC_GLINK) += ucsi_glink.o
+obj-$(CONFIG_CROS_EC_UCSI) += cros_ec_ucsi.o
obj-$(CONFIG_UCSI_LENOVO_YOGA_C630) += ucsi_yoga_c630.o
diff --git a/drivers/usb/typec/ucsi/cros_ec_ucsi.c b/drivers/usb/typec/ucsi/cros_ec_ucsi.c
new file mode 100644
index 000000000000..c605c8616726
--- /dev/null
+++ b/drivers/usb/typec/ucsi/cros_ec_ucsi.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * UCSI driver for ChromeOS EC
+ *
+ * Copyright 2024 Google LLC.
+ */
+
+#include <linux/container_of.h>
+#include <linux/dev_printk.h>
+#include <linux/jiffies.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_usbpd_notify.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#include "ucsi.h"
+
+/*
+ * Maximum size in bytes of a UCSI message between AP and EC
+ */
+#define MAX_EC_DATA_SIZE 256
+
+/*
+ * Maximum time in milliseconds the cros_ec_ucsi driver
+ * will wait for a response to a command or and ack.
+ */
+#define WRITE_TMO_MS 5000
+
+/* Number of times to attempt recovery from a write timeout before giving up. */
+#define WRITE_TMO_CTR_MAX 5
+
+struct cros_ucsi_data {
+ struct device *dev;
+ struct ucsi *ucsi;
+
+ struct cros_ec_device *ec;
+ struct notifier_block nb;
+ struct work_struct work;
+ struct delayed_work write_tmo;
+ int tmo_counter;
+
+ struct completion complete;
+ unsigned long flags;
+};
+
+static int cros_ucsi_read(struct ucsi *ucsi, unsigned int offset, void *val,
+ size_t val_len)
+{
+ struct cros_ucsi_data *udata = ucsi_get_drvdata(ucsi);
+ struct ec_params_ucsi_ppm_get req = {
+ .offset = offset,
+ .size = val_len,
+ };
+ int ret;
+
+ if (val_len > MAX_EC_DATA_SIZE) {
+ dev_err(udata->dev, "Can't read %zu bytes. Too big.\n", val_len);
+ return -EINVAL;
+ }
+
+ ret = cros_ec_cmd(udata->ec, 0, EC_CMD_UCSI_PPM_GET,
+ &req, sizeof(req), val, val_len);
+ if (ret < 0) {
+ dev_warn(udata->dev, "Failed to send EC message UCSI_PPM_GET: error=%d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int cros_ucsi_read_version(struct ucsi *ucsi, u16 *version)
+{
+ return cros_ucsi_read(ucsi, UCSI_VERSION, version, sizeof(*version));
+}
+
+static int cros_ucsi_read_cci(struct ucsi *ucsi, u32 *cci)
+{
+ return cros_ucsi_read(ucsi, UCSI_CCI, cci, sizeof(*cci));
+}
+
+static int cros_ucsi_read_message_in(struct ucsi *ucsi, void *val,
+ size_t val_len)
+{
+ return cros_ucsi_read(ucsi, UCSI_MESSAGE_IN, val, val_len);
+}
+
+static int cros_ucsi_async_control(struct ucsi *ucsi, u64 cmd)
+{
+ struct cros_ucsi_data *udata = ucsi_get_drvdata(ucsi);
+ u8 ec_buf[sizeof(struct ec_params_ucsi_ppm_set) + sizeof(cmd)];
+ struct ec_params_ucsi_ppm_set *req = (struct ec_params_ucsi_ppm_set *) ec_buf;
+ int ret;
+
+ req->offset = UCSI_CONTROL;
+ memcpy(req->data, &cmd, sizeof(cmd));
+ ret = cros_ec_cmd(udata->ec, 0, EC_CMD_UCSI_PPM_SET,
+ req, sizeof(ec_buf), NULL, 0);
+ if (ret < 0) {
+ dev_warn(udata->dev, "Failed to send EC message UCSI_PPM_SET: error=%d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int cros_ucsi_sync_control(struct ucsi *ucsi, u64 cmd)
+{
+ struct cros_ucsi_data *udata = ucsi_get_drvdata(ucsi);
+ int ret;
+
+ ret = ucsi_sync_control_common(ucsi, cmd);
+ switch (ret) {
+ case -EBUSY:
+ /* EC may return -EBUSY if CCI.busy is set.
+ * Convert this to a timeout.
+ */
+ case -ETIMEDOUT:
+ /* Schedule recovery attempt when we timeout
+ * or tried to send a command while still busy.
+ */
+ cancel_delayed_work_sync(&udata->write_tmo);
+ schedule_delayed_work(&udata->write_tmo,
+ msecs_to_jiffies(WRITE_TMO_MS));
+ break;
+ case 0:
+ /* Successful write. Cancel any pending recovery work. */
+ cancel_delayed_work_sync(&udata->write_tmo);
+ break;
+ }
+
+ return ret;
+}
+
+static const struct ucsi_operations cros_ucsi_ops = {
+ .read_version = cros_ucsi_read_version,
+ .read_cci = cros_ucsi_read_cci,
+ .read_message_in = cros_ucsi_read_message_in,
+ .async_control = cros_ucsi_async_control,
+ .sync_control = cros_ucsi_sync_control,
+};
+
+static void cros_ucsi_work(struct work_struct *work)
+{
+ struct cros_ucsi_data *udata = container_of(work, struct cros_ucsi_data, work);
+ u32 cci;
+
+ if (cros_ucsi_read_cci(udata->ucsi, &cci))
+ return;
+
+ ucsi_notify_common(udata->ucsi, cci);
+}
+
+static void cros_ucsi_write_timeout(struct work_struct *work)
+{
+ struct cros_ucsi_data *udata =
+ container_of(work, struct cros_ucsi_data, write_tmo.work);
+ u32 cci;
+ u64 cmd;
+
+ if (cros_ucsi_read(udata->ucsi, UCSI_CCI, &cci, sizeof(cci))) {
+ dev_err(udata->dev,
+ "Reading CCI failed; no write timeout recovery possible.\n");
+ return;
+ }
+
+ if (cci & UCSI_CCI_BUSY) {
+ udata->tmo_counter++;
+
+ if (udata->tmo_counter <= WRITE_TMO_CTR_MAX)
+ schedule_delayed_work(&udata->write_tmo,
+ msecs_to_jiffies(WRITE_TMO_MS));
+ else
+ dev_err(udata->dev,
+ "PPM unresponsive - too many write timeouts.\n");
+
+ return;
+ }
+
+ /* No longer busy means we can reset our timeout counter. */
+ udata->tmo_counter = 0;
+
+ /* Need to ack previous command which may have timed out. */
+ if (cci & UCSI_CCI_COMMAND_COMPLETE) {
+ cmd = UCSI_ACK_CC_CI | UCSI_ACK_COMMAND_COMPLETE;
+ cros_ucsi_async_control(udata->ucsi, cmd);
+
+ /* Check again after a few seconds that the system has
+ * recovered to make sure our async write above was successful.
+ */
+ schedule_delayed_work(&udata->write_tmo,
+ msecs_to_jiffies(WRITE_TMO_MS));
+ return;
+ }
+
+ /* We recovered from a previous timeout. Treat this as a recovery from
+ * suspend and call resume.
+ */
+ ucsi_resume(udata->ucsi);
+}
+
+static int cros_ucsi_event(struct notifier_block *nb,
+ unsigned long host_event, void *_notify)
+{
+ struct cros_ucsi_data *udata = container_of(nb, struct cros_ucsi_data, nb);
+
+ if (!(host_event & PD_EVENT_PPM))
+ return NOTIFY_OK;
+
+ dev_dbg(udata->dev, "UCSI notification received\n");
+ flush_work(&udata->work);
+ schedule_work(&udata->work);
+
+ return NOTIFY_OK;
+}
+
+static void cros_ucsi_destroy(struct cros_ucsi_data *udata)
+{
+ cros_usbpd_unregister_notify(&udata->nb);
+ cancel_delayed_work_sync(&udata->write_tmo);
+ cancel_work_sync(&udata->work);
+ ucsi_destroy(udata->ucsi);
+}
+
+static int cros_ucsi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_dev *ec_data = dev_get_drvdata(dev->parent);
+ struct cros_ucsi_data *udata;
+ int ret;
+
+ udata = devm_kzalloc(dev, sizeof(*udata), GFP_KERNEL);
+ if (!udata)
+ return -ENOMEM;
+
+ udata->dev = dev;
+
+ udata->ec = ec_data->ec_dev;
+ if (!udata->ec)
+ return dev_err_probe(dev, -ENODEV, "couldn't find parent EC device\n");
+
+ platform_set_drvdata(pdev, udata);
+
+ INIT_WORK(&udata->work, cros_ucsi_work);
+ INIT_DELAYED_WORK(&udata->write_tmo, cros_ucsi_write_timeout);
+ init_completion(&udata->complete);
+
+ udata->ucsi = ucsi_create(dev, &cros_ucsi_ops);
+ if (IS_ERR(udata->ucsi))
+ return dev_err_probe(dev, PTR_ERR(udata->ucsi), "failed to allocate UCSI instance\n");
+
+ ucsi_set_drvdata(udata->ucsi, udata);
+
+ udata->nb.notifier_call = cros_ucsi_event;
+ ret = cros_usbpd_register_notify(&udata->nb);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to register notifier\n");
+ ucsi_destroy(udata->ucsi);
+ return ret;
+ }
+
+ ret = ucsi_register(udata->ucsi);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to register UCSI\n");
+ cros_ucsi_destroy(udata);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void cros_ucsi_remove(struct platform_device *dev)
+{
+ struct cros_ucsi_data *udata = platform_get_drvdata(dev);
+
+ ucsi_unregister(udata->ucsi);
+ cros_ucsi_destroy(udata);
+}
+
+static int __maybe_unused cros_ucsi_suspend(struct device *dev)
+{
+ struct cros_ucsi_data *udata = dev_get_drvdata(dev);
+
+ cancel_delayed_work_sync(&udata->write_tmo);
+ cancel_work_sync(&udata->work);
+
+ return 0;
+}
+
+static void __maybe_unused cros_ucsi_complete(struct device *dev)
+{
+ struct cros_ucsi_data *udata = dev_get_drvdata(dev);
+
+ ucsi_resume(udata->ucsi);
+}
+
+/*
+ * UCSI protocol is also used on ChromeOS platforms which reply on
+ * cros_ec_lpc.c driver for communication with embedded controller (EC).
+ * On such platforms communication with the EC is not available until
+ * the .complete() callback of the cros_ec_lpc driver is executed.
+ * For this reason we delay ucsi_resume() until the .complete() stage
+ * otherwise UCSI SET_NOTIFICATION_ENABLE command will fail and we won't
+ * receive any UCSI notifications from the EC where PPM is implemented.
+ */
+static const struct dev_pm_ops cros_ucsi_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = cros_ucsi_suspend,
+ .complete = cros_ucsi_complete,
+#endif
+};
+
+static const struct platform_device_id cros_ucsi_id[] = {
+ { KBUILD_MODNAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cros_ucsi_id);
+
+static struct platform_driver cros_ucsi_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .pm = &cros_ucsi_pm_ops,
+ },
+ .id_table = cros_ucsi_id,
+ .probe = cros_ucsi_probe,
+ .remove = cros_ucsi_remove,
+};
+
+module_platform_driver(cros_ucsi_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("UCSI driver for ChromeOS EC");
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 5ff369c24a2f..82735eb34f0e 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -30,6 +30,7 @@ struct dentry;
#define UCSIv2_MESSAGE_OUT 272
/* UCSI versions */
+#define UCSI_VERSION_1_0 0x0100
#define UCSI_VERSION_1_1 0x0110
#define UCSI_VERSION_1_2 0x0120
#define UCSI_VERSION_2_0 0x0200
diff --git a/drivers/usb/typec/ucsi/ucsi_yoga_c630.c b/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
index f3a5e24ea84d..4cae85c0dc12 100644
--- a/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
+++ b/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
@@ -71,7 +71,7 @@ static int yoga_c630_ucsi_async_control(struct ucsi *ucsi, u64 command)
return yoga_c630_ec_ucsi_write(uec->ec, (u8*)&command);
}
-const struct ucsi_operations yoga_c630_ucsi_ops = {
+static const struct ucsi_operations yoga_c630_ucsi_ops = {
.read_version = yoga_c630_ucsi_read_version,
.read_cci = yoga_c630_ucsi_read_cci,
.read_message_in = yoga_c630_ucsi_read_message_in,
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 6338d818bc8b..9aa30ef76f3b 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -269,7 +269,7 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
return 0;
}
- usbip_dbg_stub_rx("seqnum %d is not pending\n",
+ usbip_dbg_stub_rx("seqnum %u is not pending\n",
pdu->u.cmd_unlink.seqnum);
/*
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index b1c2f6781cb3..7eb2e074012a 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -201,7 +201,7 @@ static int stub_send_ret_submit(struct stub_device *sdev)
/* 1. setup usbip_header */
setup_ret_submit_pdu(&pdu_header, urb);
- usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
+ usbip_dbg_stub_tx("setup txdata seqnum: %u\n",
pdu_header.base.seqnum);
if (priv->sgl) {
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index b03e5021c25b..e70fba9f55d6 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include "usbip_common.h"
#include "vhci.h"
@@ -675,7 +676,7 @@ static void vhci_tx_urb(struct urb *urb, struct vhci_device *vdev)
spin_lock_irqsave(&vdev->priv_lock, flags);
- priv->seqnum = atomic_inc_return(&vhci_hcd->seqnum);
+ priv->seqnum = (u32)atomic_inc_return(&vhci_hcd->seqnum);
if (priv->seqnum == 0xffff)
dev_info(&urb->dev->dev, "seqnum max\n");
@@ -1161,12 +1162,8 @@ static int vhci_setup(struct usb_hcd *hcd)
hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
}
- /*
- * Support SG.
- * sg_tablesize is an arbitrary value to alleviate memory pressure
- * on the host.
- */
- hcd->self.sg_tablesize = 32;
+ /* accept arbitrarily long scatter-gather lists */
+ hcd->self.sg_tablesize = ~0;
hcd->self.no_sg_constraint = 1;
return 0;
@@ -1453,7 +1450,7 @@ static int vhci_hcd_suspend(struct platform_device *pdev, pm_message_t state)
if (connected > 0) {
dev_info(&pdev->dev,
"We have %d active connection%s. Do not suspend.\n",
- connected, (connected == 1 ? "" : "s"));
+ connected, str_plural(connected));
ret = -EBUSY;
} else {
dev_info(&pdev->dev, "suspend vhci_hcd");
diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
index 7f2d1c241559..a75f4a898a41 100644
--- a/drivers/usb/usbip/vhci_rx.c
+++ b/drivers/usb/usbip/vhci_rx.c
@@ -66,7 +66,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
spin_unlock_irqrestore(&vdev->priv_lock, flags);
if (!urb) {
- pr_err("cannot find a urb of seqnum %u max seqnum %d\n",
+ pr_err("cannot find a urb of seqnum %u max seqnum %u\n",
pdu->base.seqnum,
atomic_read(&vhci_hcd->seqnum));
usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
@@ -162,10 +162,10 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
* already received the result of its submit result and gave
* back the URB.
*/
- pr_info("the urb (seqnum %d) was already given back\n",
+ pr_info("the urb (seqnum %u) was already given back\n",
pdu->base.seqnum);
} else {
- usbip_dbg_vhci_rx("now giveback urb %d\n", pdu->base.seqnum);
+ usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum);
/* If unlink is successful, status is -ECONNRESET */
urb->status = pdu->u.ret_unlink.status;
diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
index 907a43a00896..2aae3edfc813 100644
--- a/drivers/usb/usbip/vudc_sysfs.c
+++ b/drivers/usb/usbip/vudc_sysfs.c
@@ -67,7 +67,7 @@ out:
* Exposes device descriptor from the gadget driver.
*/
static ssize_t dev_desc_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *out,
+ const struct bin_attribute *attr, char *out,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -88,7 +88,7 @@ unlock:
spin_unlock_irqrestore(&udc->lock, flags);
return ret;
}
-static BIN_ATTR_RO(dev_desc, sizeof(struct usb_device_descriptor));
+static const BIN_ATTR_RO(dev_desc, sizeof(struct usb_device_descriptor));
static ssize_t usbip_sockfd_store(struct device *dev,
struct device_attribute *attr,
@@ -252,14 +252,14 @@ static struct attribute *dev_attrs[] = {
NULL,
};
-static struct bin_attribute *dev_bin_attrs[] = {
+static const struct bin_attribute *const dev_bin_attrs[] = {
&bin_attr_dev_desc,
NULL,
};
static const struct attribute_group vudc_attr_group = {
.attrs = dev_attrs,
- .bin_attrs = dev_bin_attrs,
+ .bin_attrs_new = dev_bin_attrs,
};
const struct attribute_group *vudc_groups[] = {
diff --git a/drivers/usb/usbip/vudc_tx.c b/drivers/usb/usbip/vudc_tx.c
index 3ccb17c3e840..30c11bf9f4e7 100644
--- a/drivers/usb/usbip/vudc_tx.c
+++ b/drivers/usb/usbip/vudc_tx.c
@@ -107,7 +107,7 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p)
/* 1. setup usbip_header */
setup_ret_submit_pdu(&pdu_header, urb_p);
- usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
+ usbip_dbg_stub_tx("setup txdata seqnum: %u\n",
pdu_header.base.seqnum);
usbip_header_correct_endian(&pdu_header, 1);
diff --git a/drivers/vdpa/octeon_ep/octep_vdpa.h b/drivers/vdpa/octeon_ep/octep_vdpa.h
index 046710ec4d42..53b020b019f7 100644
--- a/drivers/vdpa/octeon_ep/octep_vdpa.h
+++ b/drivers/vdpa/octeon_ep/octep_vdpa.h
@@ -8,6 +8,7 @@
#include <linux/pci_regs.h>
#include <linux/vdpa.h>
#include <linux/virtio_pci_modern.h>
+#include <uapi/linux/virtio_crypto.h>
#include <uapi/linux/virtio_net.h>
#include <uapi/linux/virtio_blk.h>
#include <uapi/linux/virtio_config.h>
@@ -29,12 +30,12 @@
#define OCTEP_EPF_RINFO(x) (0x000209f0 | ((x) << 25))
#define OCTEP_VF_MBOX_DATA(x) (0x00010210 | ((x) << 17))
#define OCTEP_PF_MBOX_DATA(x) (0x00022000 | ((x) << 4))
-
-#define OCTEP_EPF_RINFO_RPVF(val) (((val) >> 32) & 0xF)
-#define OCTEP_EPF_RINFO_NVFS(val) (((val) >> 48) & 0x7F)
+#define OCTEP_VF_IN_CTRL(x) (0x00010000 | ((x) << 17))
+#define OCTEP_VF_IN_CTRL_RPVF(val) (((val) >> 48) & 0xF)
#define OCTEP_FW_READY_SIGNATURE0 0xFEEDFEED
#define OCTEP_FW_READY_SIGNATURE1 0x3355ffaa
+#define OCTEP_MAX_CB_INTR 8
enum octep_vdpa_dev_status {
OCTEP_VDPA_DEV_STATUS_INVALID,
@@ -48,9 +49,26 @@ enum octep_vdpa_dev_status {
struct octep_vring_info {
struct vdpa_callback cb;
void __iomem *notify_addr;
- u32 __iomem *cb_notify_addr;
+ void __iomem *cb_notify_addr;
phys_addr_t notify_pa;
- char msix_name[256];
+};
+
+enum octep_pci_vndr_cfg_type {
+ OCTEP_PCI_VNDR_CFG_TYPE_VIRTIO_ID,
+ OCTEP_PCI_VNDR_CFG_TYPE_MAX,
+};
+
+struct octep_pci_vndr_data {
+ struct virtio_pci_vndr_data hdr;
+ u8 id;
+ u8 bar;
+ union {
+ u64 data;
+ struct {
+ u32 offset;
+ u32 length;
+ };
+ };
};
struct octep_hw {
@@ -68,7 +86,9 @@ struct octep_hw {
u64 features;
u16 nr_vring;
u32 config_size;
- int irq;
+ int nb_irqs;
+ int *irqs;
+ u8 dev_id;
};
u8 octep_hw_get_status(struct octep_hw *oct_hw);
diff --git a/drivers/vdpa/octeon_ep/octep_vdpa_hw.c b/drivers/vdpa/octeon_ep/octep_vdpa_hw.c
index 1d4767b33315..74240101c505 100644
--- a/drivers/vdpa/octeon_ep/octep_vdpa_hw.c
+++ b/drivers/vdpa/octeon_ep/octep_vdpa_hw.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2024 Marvell. */
#include <linux/iopoll.h>
+#include <linux/build_bug.h>
#include "octep_vdpa.h"
@@ -358,7 +359,14 @@ u16 octep_get_vq_size(struct octep_hw *oct_hw)
static u32 octep_get_config_size(struct octep_hw *oct_hw)
{
- return sizeof(struct virtio_net_config);
+ switch (oct_hw->dev_id) {
+ case VIRTIO_ID_NET:
+ return sizeof(struct virtio_net_config);
+ case VIRTIO_ID_CRYPTO:
+ return sizeof(struct virtio_crypto_config);
+ default:
+ return 0;
+ }
}
static void __iomem *octep_get_cap_addr(struct octep_hw *oct_hw, struct virtio_pci_cap *cap)
@@ -416,8 +424,25 @@ static int octep_pci_signature_verify(struct octep_hw *oct_hw)
return 0;
}
+static void octep_vndr_data_process(struct octep_hw *oct_hw,
+ struct octep_pci_vndr_data *vndr_data)
+{
+ BUILD_BUG_ON(sizeof(struct octep_pci_vndr_data) % 4 != 0);
+
+ switch (vndr_data->id) {
+ case OCTEP_PCI_VNDR_CFG_TYPE_VIRTIO_ID:
+ oct_hw->dev_id = (u8)vndr_data->data;
+ break;
+ default:
+ dev_err(&oct_hw->pdev->dev, "Invalid vendor data id %u\n",
+ vndr_data->id);
+ break;
+ }
+}
+
int octep_hw_caps_read(struct octep_hw *oct_hw, struct pci_dev *pdev)
{
+ struct octep_pci_vndr_data vndr_data;
struct octep_mbox __iomem *mbox;
struct device *dev = &pdev->dev;
struct virtio_pci_cap cap;
@@ -466,6 +491,15 @@ int octep_hw_caps_read(struct octep_hw *oct_hw, struct pci_dev *pdev)
case VIRTIO_PCI_CAP_ISR_CFG:
oct_hw->isr = octep_get_cap_addr(oct_hw, &cap);
break;
+ case VIRTIO_PCI_CAP_VENDOR_CFG:
+ octep_pci_caps_read(oct_hw, &vndr_data, sizeof(vndr_data), pos);
+ if (vndr_data.hdr.vendor_id != PCI_VENDOR_ID_CAVIUM) {
+ dev_err(dev, "Invalid vendor data\n");
+ return -EINVAL;
+ }
+
+ octep_vndr_data_process(oct_hw, &vndr_data);
+ break;
}
pos = cap.cap_next;
@@ -495,8 +529,6 @@ int octep_hw_caps_read(struct octep_hw *oct_hw, struct pci_dev *pdev)
if (!oct_hw->vqs)
return -ENOMEM;
- oct_hw->irq = -1;
-
dev_info(&pdev->dev, "Device features : %llx\n", oct_hw->features);
dev_info(&pdev->dev, "Maximum queues : %u\n", oct_hw->nr_vring);
diff --git a/drivers/vdpa/octeon_ep/octep_vdpa_main.c b/drivers/vdpa/octeon_ep/octep_vdpa_main.c
index cd55b1aac151..f3d4dda4e04c 100644
--- a/drivers/vdpa/octeon_ep/octep_vdpa_main.c
+++ b/drivers/vdpa/octeon_ep/octep_vdpa_main.c
@@ -49,58 +49,89 @@ static irqreturn_t octep_vdpa_intr_handler(int irq, void *data)
struct octep_hw *oct_hw = data;
int i;
- for (i = 0; i < oct_hw->nr_vring; i++) {
- if (oct_hw->vqs[i].cb.callback && ioread32(oct_hw->vqs[i].cb_notify_addr)) {
- /* Acknowledge the per queue notification to the device */
- iowrite32(0, oct_hw->vqs[i].cb_notify_addr);
- oct_hw->vqs[i].cb.callback(oct_hw->vqs[i].cb.private);
+ /* Each device has multiple interrupts (nb_irqs) shared among rings
+ * (nr_vring). Device interrupts are mapped to the rings in a
+ * round-robin fashion.
+ *
+ * For example, if nb_irqs = 8 and nr_vring = 64:
+ * 0 -> 0, 8, 16, 24, 32, 40, 48, 56;
+ * 1 -> 1, 9, 17, 25, 33, 41, 49, 57;
+ * ...
+ * 7 -> 7, 15, 23, 31, 39, 47, 55, 63;
+ */
+
+ for (i = irq - oct_hw->irqs[0]; i < oct_hw->nr_vring; i += oct_hw->nb_irqs) {
+ if (ioread8(oct_hw->vqs[i].cb_notify_addr)) {
+ /* Acknowledge the per ring notification to the device */
+ iowrite8(0, oct_hw->vqs[i].cb_notify_addr);
+
+ if (likely(oct_hw->vqs[i].cb.callback))
+ oct_hw->vqs[i].cb.callback(oct_hw->vqs[i].cb.private);
+ break;
}
}
+ /* Check for config interrupt. Config uses the first interrupt */
+ if (unlikely(irq == oct_hw->irqs[0] && ioread8(oct_hw->isr))) {
+ iowrite8(0, oct_hw->isr);
+
+ if (oct_hw->config_cb.callback)
+ oct_hw->config_cb.callback(oct_hw->config_cb.private);
+ }
+
return IRQ_HANDLED;
}
static void octep_free_irqs(struct octep_hw *oct_hw)
{
struct pci_dev *pdev = oct_hw->pdev;
+ int irq;
- if (oct_hw->irq != -1) {
- devm_free_irq(&pdev->dev, oct_hw->irq, oct_hw);
- oct_hw->irq = -1;
+ if (!oct_hw->irqs)
+ return;
+
+ for (irq = 0; irq < oct_hw->nb_irqs; irq++) {
+ if (!oct_hw->irqs[irq])
+ break;
+
+ devm_free_irq(&pdev->dev, oct_hw->irqs[irq], oct_hw);
}
+
pci_free_irq_vectors(pdev);
+ devm_kfree(&pdev->dev, oct_hw->irqs);
+ oct_hw->irqs = NULL;
}
static int octep_request_irqs(struct octep_hw *oct_hw)
{
struct pci_dev *pdev = oct_hw->pdev;
- int ret, irq;
+ int ret, irq, idx;
- /* Currently HW device provisions one IRQ per VF, hence
- * allocate one IRQ for all virtqueues call interface.
- */
- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
+ oct_hw->irqs = devm_kcalloc(&pdev->dev, oct_hw->nb_irqs, sizeof(int), GFP_KERNEL);
+ if (!oct_hw->irqs)
+ return -ENOMEM;
+
+ ret = pci_alloc_irq_vectors(pdev, 1, oct_hw->nb_irqs, PCI_IRQ_MSIX);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to alloc msix vector");
return ret;
}
- snprintf(oct_hw->vqs->msix_name, sizeof(oct_hw->vqs->msix_name),
- OCTEP_VDPA_DRIVER_NAME "-vf-%d", pci_iov_vf_id(pdev));
-
- irq = pci_irq_vector(pdev, 0);
- ret = devm_request_irq(&pdev->dev, irq, octep_vdpa_intr_handler, 0,
- oct_hw->vqs->msix_name, oct_hw);
- if (ret) {
- dev_err(&pdev->dev, "Failed to register interrupt handler\n");
- goto free_irq_vec;
+ for (idx = 0; idx < oct_hw->nb_irqs; idx++) {
+ irq = pci_irq_vector(pdev, idx);
+ ret = devm_request_irq(&pdev->dev, irq, octep_vdpa_intr_handler, 0,
+ dev_name(&pdev->dev), oct_hw);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register interrupt handler\n");
+ goto free_irqs;
+ }
+ oct_hw->irqs[idx] = irq;
}
- oct_hw->irq = irq;
return 0;
-free_irq_vec:
- pci_free_irq_vectors(pdev);
+free_irqs:
+ octep_free_irqs(oct_hw);
return ret;
}
@@ -271,7 +302,9 @@ static u32 octep_vdpa_get_generation(struct vdpa_device *vdpa_dev)
static u32 octep_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
{
- return VIRTIO_ID_NET;
+ struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
+
+ return oct_hw->dev_id;
}
static u32 octep_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
@@ -559,6 +592,7 @@ static void octep_vdpa_setup_task(struct work_struct *work)
struct device *dev = &pdev->dev;
struct octep_hw *oct_hw;
unsigned long timeout;
+ u64 val;
int ret;
oct_hw = &mgmt_dev->oct_hw;
@@ -590,6 +624,13 @@ static void octep_vdpa_setup_task(struct work_struct *work)
if (ret)
return;
+ val = readq(oct_hw->base[OCTEP_HW_MBOX_BAR] + OCTEP_VF_IN_CTRL(0));
+ oct_hw->nb_irqs = OCTEP_VF_IN_CTRL_RPVF(val);
+ if (!oct_hw->nb_irqs || oct_hw->nb_irqs > OCTEP_MAX_CB_INTR) {
+ dev_err(dev, "Invalid number of interrupts %d\n", oct_hw->nb_irqs);
+ goto unmap_region;
+ }
+
ret = octep_hw_caps_read(oct_hw, pdev);
if (ret < 0)
goto unmap_region;
@@ -768,12 +809,6 @@ static int octep_vdpa_pf_setup(struct octep_pf *octpf)
return -EINVAL;
}
- if (OCTEP_EPF_RINFO_RPVF(val) != BIT_ULL(0)) {
- val &= ~GENMASK_ULL(35, 32);
- val |= BIT_ULL(32);
- writeq(val, addr + OCTEP_EPF_RINFO(0));
- }
-
len = pci_resource_len(pdev, OCTEP_HW_CAPS_BAR);
octpf->vf_stride = len / totalvfs;
diff --git a/drivers/vdpa/solidrun/snet_main.c b/drivers/vdpa/solidrun/snet_main.c
index c8b74980dbd1..55ec51c17ab3 100644
--- a/drivers/vdpa/solidrun/snet_main.c
+++ b/drivers/vdpa/solidrun/snet_main.c
@@ -556,36 +556,38 @@ static const struct vdpa_config_ops snet_config_ops = {
static int psnet_open_pf_bar(struct pci_dev *pdev, struct psnet *psnet)
{
char *name;
- int ret, i, mask = 0;
+ unsigned short i;
+ bool bars_found = false;
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "psnet[%s]-bars", pci_name(pdev));
+ if (!name)
+ return -ENOMEM;
+
/* We don't know which BAR will be used to communicate..
* We will map every bar with len > 0.
*
* Later, we will discover the BAR and unmap all other BARs.
*/
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i))
- mask |= (1 << i);
- }
+ void __iomem *io;
- /* No BAR can be used.. */
- if (!mask) {
- SNET_ERR(pdev, "Failed to find a PCI BAR\n");
- return -ENODEV;
- }
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "psnet[%s]-bars", pci_name(pdev));
- if (!name)
- return -ENOMEM;
+ io = pcim_iomap_region(pdev, i, name);
+ if (IS_ERR(io)) {
+ SNET_ERR(pdev, "Failed to request and map PCI BARs\n");
+ return PTR_ERR(io);
+ }
- ret = pcim_iomap_regions(pdev, mask, name);
- if (ret) {
- SNET_ERR(pdev, "Failed to request and map PCI BARs\n");
- return ret;
+ psnet->bars[i] = io;
+ bars_found = true;
}
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (mask & (1 << i))
- psnet->bars[i] = pcim_iomap_table(pdev)[i];
+ /* No BAR can be used.. */
+ if (!bars_found) {
+ SNET_ERR(pdev, "Failed to find a PCI BAR\n");
+ return -ENODEV;
}
return 0;
@@ -594,20 +596,20 @@ static int psnet_open_pf_bar(struct pci_dev *pdev, struct psnet *psnet)
static int snet_open_vf_bar(struct pci_dev *pdev, struct snet *snet)
{
char *name;
- int ret;
+ void __iomem *io;
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "snet[%s]-bars", pci_name(pdev));
if (!name)
return -ENOMEM;
/* Request and map BAR */
- ret = pcim_iomap_regions(pdev, BIT(snet->psnet->cfg.vf_bar), name);
- if (ret) {
+ io = pcim_iomap_region(pdev, snet->psnet->cfg.vf_bar, name);
+ if (IS_ERR(io)) {
SNET_ERR(pdev, "Failed to request and map PCI BAR for a VF\n");
- return ret;
+ return PTR_ERR(io);
}
- snet->bar = pcim_iomap_table(pdev)[snet->psnet->cfg.vf_bar];
+ snet->bar = io;
return 0;
}
@@ -656,15 +658,12 @@ static int psnet_detect_bar(struct psnet *psnet, u32 off)
static void psnet_unmap_unused_bars(struct pci_dev *pdev, struct psnet *psnet)
{
- int i, mask = 0;
+ unsigned short i;
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (psnet->bars[i] && i != psnet->barno)
- mask |= (1 << i);
+ pcim_iounmap_region(pdev, i);
}
-
- if (mask)
- pcim_iounmap_regions(pdev, mask);
}
/* Read SNET config from PCI BAR */
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
index 16380764275e..8787407f75b0 100644
--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -367,6 +367,14 @@ static void vp_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid)
vp_iowrite16(qid, vp_vdpa->vring[qid].notify);
}
+static void vp_vdpa_kick_vq_with_data(struct vdpa_device *vdpa, u32 data)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+ u16 qid = data & 0xFFFF;
+
+ vp_iowrite32(data, vp_vdpa->vring[qid].notify);
+}
+
static u32 vp_vdpa_get_generation(struct vdpa_device *vdpa)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
@@ -472,6 +480,7 @@ static const struct vdpa_config_ops vp_vdpa_ops = {
.get_vq_size = vp_vdpa_get_vq_size,
.set_vq_address = vp_vdpa_set_vq_address,
.kick_vq = vp_vdpa_kick_vq,
+ .kick_vq_with_data = vp_vdpa_kick_vq_with_data,
.get_generation = vp_vdpa_get_generation,
.get_device_id = vp_vdpa_get_device_id,
.get_vendor_id = vp_vdpa_get_vendor_id,
diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c
index ed4737de4528..f2e686f8f1ef 100644
--- a/drivers/vfio/mdev/mdev_core.c
+++ b/drivers/vfio/mdev/mdev_core.c
@@ -76,7 +76,7 @@ int mdev_register_parent(struct mdev_parent *parent, struct device *dev,
if (ret)
return ret;
- ret = class_compat_create_link(mdev_bus_compat_class, dev, NULL);
+ ret = class_compat_create_link(mdev_bus_compat_class, dev);
if (ret)
dev_warn(dev, "Failed to create compatibility class link\n");
@@ -98,7 +98,7 @@ void mdev_unregister_parent(struct mdev_parent *parent)
dev_info(parent->dev, "MDEV: Unregistering\n");
down_write(&parent->unreg_sem);
- class_compat_remove_link(mdev_bus_compat_class, parent->dev, NULL);
+ class_compat_remove_link(mdev_bus_compat_class, parent->dev);
device_for_each_child(parent->dev, NULL, mdev_device_remove_cb);
parent_remove_sysfs_files(parent);
up_write(&parent->unreg_sem);
diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
index eb7387ee6ebd..11eda6b207f1 100644
--- a/drivers/vfio/pci/mlx5/cmd.c
+++ b/drivers/vfio/pci/mlx5/cmd.c
@@ -408,7 +408,7 @@ void mlx5vf_free_data_buffer(struct mlx5_vhca_data_buffer *buf)
buf->dma_dir, 0);
}
- /* Undo alloc_pages_bulk_array() */
+ /* Undo alloc_pages_bulk() */
for_each_sgtable_page(&buf->table.sgt, &sg_iter, 0)
__free_page(sg_page_iter_page(&sg_iter));
sg_free_append_table(&buf->table);
@@ -431,8 +431,8 @@ static int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf,
return -ENOMEM;
do {
- filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT, to_fill,
- page_list);
+ filled = alloc_pages_bulk(GFP_KERNEL_ACCOUNT, to_fill,
+ page_list);
if (!filled) {
ret = -ENOMEM;
goto err;
@@ -1342,7 +1342,7 @@ static void free_recv_pages(struct mlx5_vhca_recv_buf *recv_buf)
{
int i;
- /* Undo alloc_pages_bulk_array() */
+ /* Undo alloc_pages_bulk() */
for (i = 0; i < recv_buf->npages; i++)
__free_page(recv_buf->page_list[i]);
@@ -1361,9 +1361,9 @@ static int alloc_recv_pages(struct mlx5_vhca_recv_buf *recv_buf,
return -ENOMEM;
for (;;) {
- filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT,
- npages - done,
- recv_buf->page_list + done);
+ filled = alloc_pages_bulk(GFP_KERNEL_ACCOUNT,
+ npages - done,
+ recv_buf->page_list + done);
if (!filled)
goto err;
diff --git a/drivers/vfio/pci/nvgrace-gpu/main.c b/drivers/vfio/pci/nvgrace-gpu/main.c
index a467085038f0..e5ac39c4cc6b 100644
--- a/drivers/vfio/pci/nvgrace-gpu/main.c
+++ b/drivers/vfio/pci/nvgrace-gpu/main.c
@@ -5,6 +5,8 @@
#include <linux/sizes.h>
#include <linux/vfio_pci_core.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
/*
* The device memory usable to the workloads running in the VM is cached
@@ -17,12 +19,21 @@
#define RESMEM_REGION_INDEX VFIO_PCI_BAR2_REGION_INDEX
#define USEMEM_REGION_INDEX VFIO_PCI_BAR4_REGION_INDEX
-/* Memory size expected as non cached and reserved by the VM driver */
-#define RESMEM_SIZE SZ_1G
-
/* A hardwired and constant ABI value between the GPU FW and VFIO driver. */
#define MEMBLK_SIZE SZ_512M
+#define DVSEC_BITMAP_OFFSET 0xA
+#define MIG_SUPPORTED_WITH_CACHED_RESMEM BIT(0)
+
+#define GPU_CAP_DVSEC_REGISTER 3
+
+#define C2C_LINK_BAR0_OFFSET 0x1498
+#define HBM_TRAINING_BAR0_OFFSET 0x200BC
+#define STATUS_READY 0xFF
+
+#define POLL_QUANTUM_MS 1000
+#define POLL_TIMEOUT_MS (30 * 1000)
+
/*
* The state of the two device memory region - resmem and usemem - is
* saved as struct mem_region.
@@ -46,6 +57,7 @@ struct nvgrace_gpu_pci_core_device {
struct mem_region resmem;
/* Lock to control device memory kernel mapping */
struct mutex remap_lock;
+ bool has_mig_hw_bug;
};
static void nvgrace_gpu_init_fake_bar_emu_regs(struct vfio_device *core_vdev)
@@ -66,7 +78,7 @@ nvgrace_gpu_memregion(int index,
if (index == USEMEM_REGION_INDEX)
return &nvdev->usemem;
- if (index == RESMEM_REGION_INDEX)
+ if (nvdev->resmem.memlength && index == RESMEM_REGION_INDEX)
return &nvdev->resmem;
return NULL;
@@ -751,40 +763,67 @@ nvgrace_gpu_init_nvdev_struct(struct pci_dev *pdev,
u64 memphys, u64 memlength)
{
int ret = 0;
+ u64 resmem_size = 0;
/*
- * The VM GPU device driver needs a non-cacheable region to support
- * the MIG feature. Since the device memory is mapped as NORMAL cached,
- * carve out a region from the end with a different NORMAL_NC
- * property (called as reserved memory and represented as resmem). This
- * region then is exposed as a 64b BAR (region 2 and 3) to the VM, while
- * exposing the rest (termed as usable memory and represented using usemem)
- * as cacheable 64b BAR (region 4 and 5).
+ * On Grace Hopper systems, the VM GPU device driver needs a non-cacheable
+ * region to support the MIG feature owing to a hardware bug. Since the
+ * device memory is mapped as NORMAL cached, carve out a region from the end
+ * with a different NORMAL_NC property (called as reserved memory and
+ * represented as resmem). This region then is exposed as a 64b BAR
+ * (region 2 and 3) to the VM, while exposing the rest (termed as usable
+ * memory and represented using usemem) as cacheable 64b BAR (region 4 and 5).
*
* devmem (memlength)
* |-------------------------------------------------|
* | |
* usemem.memphys resmem.memphys
+ *
+ * This hardware bug is fixed on the Grace Blackwell platforms and the
+ * presence of the bug can be determined through nvdev->has_mig_hw_bug.
+ * Thus on systems with the hardware fix, there is no need to partition
+ * the GPU device memory and the entire memory is usable and mapped as
+ * NORMAL cached (i.e. resmem size is 0).
*/
+ if (nvdev->has_mig_hw_bug)
+ resmem_size = SZ_1G;
+
nvdev->usemem.memphys = memphys;
/*
* The device memory exposed to the VM is added to the kernel by the
- * VM driver module in chunks of memory block size. Only the usable
- * memory (usemem) is added to the kernel for usage by the VM
- * workloads. Make the usable memory size memblock aligned.
+ * VM driver module in chunks of memory block size. Note that only the
+ * usable memory (usemem) is added to the kernel for usage by the VM
+ * workloads.
*/
- if (check_sub_overflow(memlength, RESMEM_SIZE,
+ if (check_sub_overflow(memlength, resmem_size,
&nvdev->usemem.memlength)) {
ret = -EOVERFLOW;
goto done;
}
/*
- * The USEMEM part of the device memory has to be MEMBLK_SIZE
- * aligned. This is a hardwired ABI value between the GPU FW and
- * VFIO driver. The VM device driver is also aware of it and make
- * use of the value for its calculation to determine USEMEM size.
+ * The usemem region is exposed as a 64B Bar composed of region 4 and 5.
+ * Calculate and save the BAR size for the region.
+ */
+ nvdev->usemem.bar_size = roundup_pow_of_two(nvdev->usemem.memlength);
+
+ /*
+ * If the hardware has the fix for MIG, there is no requirement
+ * for splitting the device memory to create RESMEM. The entire
+ * device memory is usable and will be USEMEM. Return here for
+ * such case.
+ */
+ if (!nvdev->has_mig_hw_bug)
+ goto done;
+
+ /*
+ * When the device memory is split to workaround the MIG bug on
+ * Grace Hopper, the USEMEM part of the device memory has to be
+ * MEMBLK_SIZE aligned. This is a hardwired ABI value between the
+ * GPU FW and VFIO driver. The VM device driver is also aware of it
+ * and make use of the value for its calculation to determine USEMEM
+ * size. Note that the device memory may not be 512M aligned.
*/
nvdev->usemem.memlength = round_down(nvdev->usemem.memlength,
MEMBLK_SIZE);
@@ -803,15 +842,93 @@ nvgrace_gpu_init_nvdev_struct(struct pci_dev *pdev,
}
/*
- * The memory regions are exposed as BARs. Calculate and save
- * the BAR size for them.
+ * The resmem region is exposed as a 64b BAR composed of region 2 and 3
+ * for Grace Hopper. Calculate and save the BAR size for the region.
*/
- nvdev->usemem.bar_size = roundup_pow_of_two(nvdev->usemem.memlength);
nvdev->resmem.bar_size = roundup_pow_of_two(nvdev->resmem.memlength);
done:
return ret;
}
+static bool nvgrace_gpu_has_mig_hw_bug(struct pci_dev *pdev)
+{
+ int pcie_dvsec;
+ u16 dvsec_ctrl16;
+
+ pcie_dvsec = pci_find_dvsec_capability(pdev, PCI_VENDOR_ID_NVIDIA,
+ GPU_CAP_DVSEC_REGISTER);
+
+ if (pcie_dvsec) {
+ pci_read_config_word(pdev,
+ pcie_dvsec + DVSEC_BITMAP_OFFSET,
+ &dvsec_ctrl16);
+
+ if (dvsec_ctrl16 & MIG_SUPPORTED_WITH_CACHED_RESMEM)
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * To reduce the system bootup time, the HBM training has
+ * been moved out of the UEFI on the Grace-Blackwell systems.
+ *
+ * The onus of checking whether the HBM training has completed
+ * thus falls on the module. The HBM training status can be
+ * determined from a BAR0 register.
+ *
+ * Similarly, another BAR0 register exposes the status of the
+ * CPU-GPU chip-to-chip (C2C) cache coherent interconnect.
+ *
+ * Poll these register and check for 30s. If the HBM training is
+ * not complete or if the C2C link is not ready, fail the probe.
+ *
+ * While the wait is not required on Grace Hopper systems, it
+ * is beneficial to make the check to ensure the device is in an
+ * expected state.
+ *
+ * Ensure that the BAR0 region is enabled before accessing the
+ * registers.
+ */
+static int nvgrace_gpu_wait_device_ready(struct pci_dev *pdev)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(POLL_TIMEOUT_MS);
+ void __iomem *io;
+ int ret = -ETIME;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pci_request_selected_regions(pdev, 1 << 0, KBUILD_MODNAME);
+ if (ret)
+ goto request_region_exit;
+
+ io = pci_iomap(pdev, 0, 0);
+ if (!io) {
+ ret = -ENOMEM;
+ goto iomap_exit;
+ }
+
+ do {
+ if ((ioread32(io + C2C_LINK_BAR0_OFFSET) == STATUS_READY) &&
+ (ioread32(io + HBM_TRAINING_BAR0_OFFSET) == STATUS_READY)) {
+ ret = 0;
+ goto reg_check_exit;
+ }
+ msleep(POLL_QUANTUM_MS);
+ } while (!time_after(jiffies, timeout));
+
+reg_check_exit:
+ pci_iounmap(pdev, io);
+iomap_exit:
+ pci_release_selected_regions(pdev, 1 << 0);
+request_region_exit:
+ pci_disable_device(pdev);
+ return ret;
+}
+
static int nvgrace_gpu_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -820,6 +937,10 @@ static int nvgrace_gpu_probe(struct pci_dev *pdev,
u64 memphys, memlength;
int ret;
+ ret = nvgrace_gpu_wait_device_ready(pdev);
+ if (ret)
+ return ret;
+
ret = nvgrace_gpu_fetch_memory_property(pdev, &memphys, &memlength);
if (!ret)
ops = &nvgrace_gpu_pci_ops;
@@ -832,6 +953,8 @@ static int nvgrace_gpu_probe(struct pci_dev *pdev,
dev_set_drvdata(&pdev->dev, &nvdev->core_device);
if (ops == &nvgrace_gpu_pci_ops) {
+ nvdev->has_mig_hw_bug = nvgrace_gpu_has_mig_hw_bug(pdev);
+
/*
* Device memory properties are identified in the host ACPI
* table. Set the nvgrace_gpu_pci_core_device structure.
@@ -868,6 +991,8 @@ static const struct pci_device_id nvgrace_gpu_vfio_pci_table[] = {
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2345) },
/* GH200 SKU */
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2348) },
+ /* GB200 SKU */
+ { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2941) },
{}
};
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index ea2745c1ac5e..94142581c98c 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -511,13 +511,13 @@ static void vfio_bar_fixup(struct vfio_pci_core_device *vdev)
mask = ~(pci_resource_len(pdev, PCI_ROM_RESOURCE) - 1);
mask |= PCI_ROM_ADDRESS_ENABLE;
*vbar &= cpu_to_le32((u32)mask);
- } else if (pdev->resource[PCI_ROM_RESOURCE].flags &
- IORESOURCE_ROM_SHADOW) {
- mask = ~(0x20000 - 1);
+ } else if (pdev->rom && pdev->romlen) {
+ mask = ~(roundup_pow_of_two(pdev->romlen) - 1);
mask |= PCI_ROM_ADDRESS_ENABLE;
*vbar &= cpu_to_le32((u32)mask);
- } else
+ } else {
*vbar = 0;
+ }
vdev->bardirty = false;
}
@@ -1389,11 +1389,12 @@ static int vfio_ext_cap_len(struct vfio_pci_core_device *vdev, u16 ecap, u16 epo
switch (ecap) {
case PCI_EXT_CAP_ID_VNDR:
- ret = pci_read_config_dword(pdev, epos + PCI_VSEC_HDR, &dword);
+ ret = pci_read_config_dword(pdev, epos + PCI_VNDR_HEADER,
+ &dword);
if (ret)
return pcibios_err_to_errno(ret);
- return dword >> PCI_VSEC_HDR_LEN_SHIFT;
+ return PCI_VNDR_HEADER_LEN(dword);
case PCI_EXT_CAP_ID_VC:
case PCI_EXT_CAP_ID_VC9:
case PCI_EXT_CAP_ID_MFVC:
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index 1a4ed5a357d3..586e49efb81b 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -1054,31 +1054,27 @@ static int vfio_pci_ioctl_get_region_info(struct vfio_pci_core_device *vdev,
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.flags = 0;
+ info.size = 0;
- /* Report the BAR size, not the ROM size */
- info.size = pci_resource_len(pdev, info.index);
- if (!info.size) {
- /* Shadow ROMs appear as PCI option ROMs */
- if (pdev->resource[PCI_ROM_RESOURCE].flags &
- IORESOURCE_ROM_SHADOW)
- info.size = 0x20000;
- else
- break;
- }
-
- /*
- * Is it really there? Enable memory decode for implicit access
- * in pci_map_rom().
- */
- cmd = vfio_pci_memory_lock_and_enable(vdev);
- io = pci_map_rom(pdev, &size);
- if (io) {
+ if (pci_resource_start(pdev, PCI_ROM_RESOURCE)) {
+ /*
+ * Check ROM content is valid. Need to enable memory
+ * decode for ROM access in pci_map_rom().
+ */
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
+ io = pci_map_rom(pdev, &size);
+ if (io) {
+ info.flags = VFIO_REGION_INFO_FLAG_READ;
+ /* Report the BAR size, not the ROM size. */
+ info.size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
+ pci_unmap_rom(pdev, io);
+ }
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
+ } else if (pdev->rom && pdev->romlen) {
info.flags = VFIO_REGION_INFO_FLAG_READ;
- pci_unmap_rom(pdev, io);
- } else {
- info.size = 0;
+ /* Report BAR size as power of two. */
+ info.size = roundup_pow_of_two(pdev->romlen);
}
- vfio_pci_memory_unlock_and_restore(vdev, cmd);
break;
}
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index 66b72c289284..6192788c8ba3 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/vfio.h>
#include <linux/vgaarb.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include "vfio_pci_priv.h"
@@ -61,9 +62,7 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_iowrite##size);
VFIO_IOWRITE(8)
VFIO_IOWRITE(16)
VFIO_IOWRITE(32)
-#ifdef iowrite64
VFIO_IOWRITE(64)
-#endif
#define VFIO_IOREAD(size) \
int vfio_pci_core_ioread##size(struct vfio_pci_core_device *vdev, \
@@ -89,9 +88,7 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_ioread##size);
VFIO_IOREAD(8)
VFIO_IOREAD(16)
VFIO_IOREAD(32)
-#ifdef ioread64
VFIO_IOREAD(64)
-#endif
#define VFIO_IORDWR(size) \
static int vfio_pci_iordwr##size(struct vfio_pci_core_device *vdev,\
@@ -127,9 +124,7 @@ static int vfio_pci_iordwr##size(struct vfio_pci_core_device *vdev,\
VFIO_IORDWR(8)
VFIO_IORDWR(16)
VFIO_IORDWR(32)
-#if defined(ioread64) && defined(iowrite64)
VFIO_IORDWR(64)
-#endif
/*
* Read or write from an __iomem region (MMIO or I/O port) with an excluded
@@ -155,7 +150,6 @@ ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
else
fillable = 0;
-#if defined(ioread64) && defined(iowrite64)
if (fillable >= 8 && !(off % 8)) {
ret = vfio_pci_iordwr64(vdev, iswrite, test_mem,
io, buf, off, &filled);
@@ -163,7 +157,6 @@ ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
return ret;
} else
-#endif
if (fillable >= 4 && !(off % 4)) {
ret = vfio_pci_iordwr32(vdev, iswrite, test_mem,
io, buf, off, &filled);
@@ -244,9 +237,8 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
if (pci_resource_start(pdev, bar))
end = pci_resource_len(pdev, bar);
- else if (bar == PCI_ROM_RESOURCE &&
- pdev->resource[bar].flags & IORESOURCE_ROM_SHADOW)
- end = 0x20000;
+ else if (bar == PCI_ROM_RESOURCE && pdev->rom && pdev->romlen)
+ end = roundup_pow_of_two(pdev->romlen);
else
return -EINVAL;
@@ -261,11 +253,14 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
* excluded range at the end of the actual ROM. This makes
* filling large ROM BARs much faster.
*/
- io = pci_map_rom(pdev, &x_start);
- if (!io) {
- done = -ENOMEM;
- goto out;
+ if (pci_resource_start(pdev, bar)) {
+ io = pci_map_rom(pdev, &x_start);
+ } else {
+ io = ioremap(pdev->rom, pdev->romlen);
+ x_start = pdev->romlen;
}
+ if (!io)
+ return -ENOMEM;
x_end = end;
} else {
int ret = vfio_pci_core_setup_barmap(vdev, bar);
@@ -288,8 +283,13 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
if (done >= 0)
*ppos += done;
- if (bar == PCI_ROM_RESOURCE)
- pci_unmap_rom(pdev, io);
+ if (bar == PCI_ROM_RESOURCE) {
+ if (pci_resource_start(pdev, bar))
+ pci_unmap_rom(pdev, io);
+ else
+ iounmap(io);
+ }
+
out:
return done;
}
@@ -381,12 +381,10 @@ static void vfio_pci_ioeventfd_do_write(struct vfio_pci_ioeventfd *ioeventfd,
vfio_pci_core_iowrite32(ioeventfd->vdev, test_mem,
ioeventfd->data, ioeventfd->addr);
break;
-#ifdef iowrite64
case 8:
vfio_pci_core_iowrite64(ioeventfd->vdev, test_mem,
ioeventfd->data, ioeventfd->addr);
break;
-#endif
}
}
@@ -440,10 +438,8 @@ int vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset,
pos >= vdev->msix_offset + vdev->msix_size))
return -EINVAL;
-#ifndef iowrite64
if (count == 8)
return -EINVAL;
-#endif
ret = vfio_pci_core_setup_barmap(vdev, bar);
if (ret)
diff --git a/drivers/vfio/pci/virtio/migrate.c b/drivers/vfio/pci/virtio/migrate.c
index ee54f4c17857..ba92bb4e9af9 100644
--- a/drivers/vfio/pci/virtio/migrate.c
+++ b/drivers/vfio/pci/virtio/migrate.c
@@ -77,8 +77,8 @@ static int virtiovf_add_migration_pages(struct virtiovf_data_buffer *buf,
return -ENOMEM;
do {
- filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT, to_fill,
- page_list);
+ filled = alloc_pages_bulk(GFP_KERNEL_ACCOUNT, to_fill,
+ page_list);
if (!filled) {
ret = -ENOMEM;
goto err;
@@ -112,7 +112,7 @@ static void virtiovf_free_data_buffer(struct virtiovf_data_buffer *buf)
{
struct sg_page_iter sg_iter;
- /* Undo alloc_pages_bulk_array() */
+ /* Undo alloc_pages_bulk() */
for_each_sgtable_page(&buf->table.sgt, &sg_iter, 0)
__free_page(sg_page_iter_page(&sg_iter));
sg_free_append_table(&buf->table);
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index e53757d1d095..3bf1043cd795 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -388,6 +388,11 @@ static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg,
{
unsigned int done = 0;
+ if (off >= reg->size)
+ return -EINVAL;
+
+ count = min_t(size_t, count, reg->size - off);
+
if (!reg->ioaddr) {
reg->ioaddr =
ioremap(reg->addr, reg->size);
@@ -467,6 +472,11 @@ static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg,
{
unsigned int done = 0;
+ if (off >= reg->size)
+ return -EINVAL;
+
+ count = min_t(size_t, count, reg->size - off);
+
if (!reg->ioaddr) {
reg->ioaddr =
ioremap(reg->addr, reg->size);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9ad37c012189..b9b9e9d40951 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1107,6 +1107,7 @@ static void handle_rx(struct vhost_net *net)
size_t vhost_hlen, sock_hlen;
size_t vhost_len, sock_len;
bool busyloop_intr = false;
+ bool set_num_buffers;
struct socket *sock;
struct iov_iter fixup;
__virtio16 num_buffers;
@@ -1129,6 +1130,8 @@ static void handle_rx(struct vhost_net *net)
vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
vq->log : NULL;
mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
+ set_num_buffers = mergeable ||
+ vhost_has_feature(vq, VIRTIO_F_VERSION_1);
do {
sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
@@ -1205,7 +1208,7 @@ static void handle_rx(struct vhost_net *net)
/* TODO: Should check and handle checksum. */
num_buffers = cpu_to_vhost16(vq, headcount);
- if (likely(mergeable) &&
+ if (likely(set_num_buffers) &&
copy_to_iter(&num_buffers, sizeof num_buffers,
&fixup) != sizeof num_buffers) {
vq_err(vq, "Failed num_buffers write");
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index 36bfb6deb8ab..d866608da8d1 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -2199,7 +2199,7 @@ static ssize_t radeon_show_one_edid(char *buf, loff_t off, size_t count, const u
static ssize_t radeon_show_edid1(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2211,7 +2211,7 @@ static ssize_t radeon_show_edid1(struct file *filp, struct kobject *kobj,
static ssize_t radeon_show_edid2(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2227,7 +2227,7 @@ static const struct bin_attribute edid1_attr = {
.mode = 0444,
},
.size = EDID_LENGTH,
- .read = radeon_show_edid1,
+ .read_new = radeon_show_edid1,
};
static const struct bin_attribute edid2_attr = {
@@ -2236,7 +2236,7 @@ static const struct bin_attribute edid2_attr = {
.mode = 0444,
},
.size = EDID_LENGTH,
- .read = radeon_show_edid2,
+ .read_new = radeon_show_edid2,
};
static int radeonfb_pci_register(struct pci_dev *pdev,
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 20517448487e..0e1bd3dba255 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -275,7 +275,7 @@ static const struct fb_ops efifb_ops = {
.fb_setcolreg = efifb_setcolreg,
};
-static int efifb_setup(struct screen_info *si, char *options)
+static void efifb_setup(struct screen_info *si, char *options)
{
char *this_opt;
@@ -299,8 +299,6 @@ static int efifb_setup(struct screen_info *si, char *options)
use_bgrt = false;
}
}
-
- return 0;
}
static inline bool fb_base_is_valid(struct screen_info *si)
diff --git a/drivers/video/fbdev/omap/lcd_dma.c b/drivers/video/fbdev/omap/lcd_dma.c
index f85817635a8c..0da23c57e475 100644
--- a/drivers/video/fbdev/omap/lcd_dma.c
+++ b/drivers/video/fbdev/omap/lcd_dma.c
@@ -432,8 +432,8 @@ static int __init omap_init_lcd_dma(void)
spin_lock_init(&lcd_dma.lock);
- r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0,
- "LCD DMA", NULL);
+ r = request_threaded_irq(INT_DMA_LCD, NULL, lcd_dma_irq_handler,
+ IRQF_ONESHOT, "LCD DMA", NULL);
if (r != 0)
pr_err("unable to request IRQ for LCD DMA (error %d)\n", r);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
index c3329c8b4c16..ccb96a5be07e 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
@@ -3933,18 +3933,13 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
return -ENODEV;
}
- if (np && of_property_read_bool(np, "syscon-pol")) {
- dispc.syscon_pol = syscon_regmap_lookup_by_phandle(np, "syscon-pol");
+ if (np && of_property_present(np, "syscon-pol")) {
+ dispc.syscon_pol = syscon_regmap_lookup_by_phandle_args(np, "syscon-pol",
+ 1, &dispc.syscon_pol_offset);
if (IS_ERR(dispc.syscon_pol)) {
dev_err(&pdev->dev, "failed to get syscon-pol regmap\n");
return PTR_ERR(dispc.syscon_pol);
}
-
- if (of_property_read_u32_index(np, "syscon-pol", 1,
- &dispc.syscon_pol_offset)) {
- dev_err(&pdev->dev, "failed to get syscon-pol offset\n");
- return -EINVAL;
- }
}
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
index c04cbe0ef173..7c636db79882 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
@@ -36,6 +36,7 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port)
np = of_get_next_parent(np);
}
+ of_node_put(np);
return NULL;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
index b33f62c5cb22..bb7fe54dd019 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
@@ -567,23 +567,6 @@ static void hdmi_core_enable_interrupts(struct hdmi_core_data *core)
REG_FLD_MOD(core->base, HDMI_CORE_IH_MUTE, 0x0, 1, 0);
}
-int hdmi5_core_handle_irqs(struct hdmi_core_data *core)
-{
- void __iomem *base = core->base;
-
- REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT1, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT2, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_AS_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_PHY_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_I2CM_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_CEC_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_VP_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_I2CMPHY_STAT0, 0xff, 7, 0);
-
- return 0;
-}
-
void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_config *cfg)
{
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h
index 192c9b6e2f7b..493857374a15 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h
@@ -283,7 +283,6 @@ struct csc_table {
int hdmi5_read_edid(struct hdmi_core_data *core, u8 *edid, int len);
void hdmi5_core_dump(struct hdmi_core_data *core, struct seq_file *s);
-int hdmi5_core_handle_irqs(struct hdmi_core_data *core);
void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_config *cfg);
int hdmi5_core_init(struct platform_device *pdev, struct hdmi_core_data *core);
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index 935cd8413ed5..4715dcb59811 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -2123,11 +2123,7 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch,
static int sh_mobile_lcdc_update_bl(struct backlight_device *bdev)
{
struct sh_mobile_lcdc_chan *ch = bl_get_data(bdev);
- int brightness = bdev->props.brightness;
-
- if (bdev->props.power != BACKLIGHT_POWER_ON ||
- bdev->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
- brightness = 0;
+ int brightness = backlight_get_brightness(bdev);
ch->bl_brightness = brightness;
return ch->cfg->bl_info.set_brightness(brightness);
diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
index 86ecbb2d86db..7734377b2d87 100644
--- a/drivers/video/fbdev/sm501fb.c
+++ b/drivers/video/fbdev/sm501fb.c
@@ -27,6 +27,7 @@
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/io.h>
+#include <linux/string_choices.h>
#include <linux/uaccess.h>
#include <asm/div64.h>
@@ -1712,8 +1713,8 @@ static int sm501fb_init_fb(struct fb_info *fb, enum sm501_controller head,
BUG();
}
- dev_info(info->dev, "fb %s %sabled at start\n",
- fbname, enable ? "en" : "dis");
+ dev_info(info->dev, "fb %s %s at start\n",
+ fbname, str_enabled_disabled(enable));
/* check to see if our routing allows this */
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 71ac9e36f67c..acadf0eb450c 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -1416,7 +1416,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
static ssize_t edid_show(
struct file *filp,
- struct kobject *kobj, struct bin_attribute *a,
+ struct kobject *kobj, const struct bin_attribute *a,
char *buf, loff_t off, size_t count) {
struct device *fbdev = kobj_to_dev(kobj);
struct fb_info *fb_info = dev_get_drvdata(fbdev);
@@ -1438,7 +1438,7 @@ static ssize_t edid_show(
static ssize_t edid_store(
struct file *filp,
- struct kobject *kobj, struct bin_attribute *a,
+ struct kobject *kobj, const struct bin_attribute *a,
char *src, loff_t src_off, size_t src_size) {
struct device *fbdev = kobj_to_dev(kobj);
struct fb_info *fb_info = dev_get_drvdata(fbdev);
@@ -1482,8 +1482,8 @@ static const struct bin_attribute edid_attr = {
.attr.name = "edid",
.attr.mode = 0666,
.size = EDID_LENGTH,
- .read = edid_show,
- .write = edid_store
+ .read_new = edid_show,
+ .write_new = edid_store
};
static const struct device_attribute fb_device_attrs[] = {
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index fce0f5db7ba3..eedab14c7d51 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -185,9 +185,10 @@ static inline void setindex(int index)
/* Check if the video mode is supported by the driver */
static inline int check_mode_supported(const struct screen_info *si)
{
+ unsigned int type = screen_info_video_type(si);
+
/* only EGA and VGA in 16 color graphic mode are supported */
- if (si->orig_video_isVGA != VIDEO_TYPE_EGAC &&
- si->orig_video_isVGA != VIDEO_TYPE_VGAC)
+ if (type != VIDEO_TYPE_EGAC && type != VIDEO_TYPE_VGAC)
return -ENODEV;
if (si->orig_video_mode != 0x0D && /* 320x200/4 (EGA) */
@@ -1338,7 +1339,7 @@ static int vga16fb_probe(struct platform_device *dev)
printk(KERN_INFO "vga16fb: mapped to 0x%p\n", info->screen_base);
par = info->par;
- par->isVGA = si->orig_video_isVGA == VIDEO_TYPE_VGAC;
+ par->isVGA = screen_info_video_type(si) == VIDEO_TYPE_VGAC;
par->palette_blanked = 0;
par->vesa_blanked = 0;
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index ba301f3f4951..45b42f14a750 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -895,34 +895,6 @@ hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame,
}
/**
- * hdmi_infoframe_check() - check a HDMI infoframe
- * @frame: HDMI infoframe
- *
- * Validates that the infoframe is consistent and updates derived fields
- * (eg. length) based on other fields.
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int
-hdmi_infoframe_check(union hdmi_infoframe *frame)
-{
- switch (frame->any.type) {
- case HDMI_INFOFRAME_TYPE_AVI:
- return hdmi_avi_infoframe_check(&frame->avi);
- case HDMI_INFOFRAME_TYPE_SPD:
- return hdmi_spd_infoframe_check(&frame->spd);
- case HDMI_INFOFRAME_TYPE_AUDIO:
- return hdmi_audio_infoframe_check(&frame->audio);
- case HDMI_INFOFRAME_TYPE_VENDOR:
- return hdmi_vendor_any_infoframe_check(&frame->vendor);
- default:
- WARN(1, "Bad infoframe type %d\n", frame->any.type);
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL(hdmi_infoframe_check);
-
-/**
* hdmi_infoframe_pack_only() - write a HDMI infoframe to binary buffer
* @frame: HDMI infoframe
* @buffer: destination buffer
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index b10ed9f5b543..ba37665188b5 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -546,29 +546,7 @@ void unregister_virtio_device(struct virtio_device *dev)
}
EXPORT_SYMBOL_GPL(unregister_virtio_device);
-#ifdef CONFIG_PM_SLEEP
-int virtio_device_freeze(struct virtio_device *dev)
-{
- struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
- int ret;
-
- virtio_config_core_disable(dev);
-
- dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED;
-
- if (drv && drv->freeze) {
- ret = drv->freeze(dev);
- if (ret) {
- virtio_config_core_enable(dev);
- return ret;
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(virtio_device_freeze);
-
-int virtio_device_restore(struct virtio_device *dev)
+static int virtio_device_restore_priv(struct virtio_device *dev, bool restore)
{
struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
int ret;
@@ -599,8 +577,14 @@ int virtio_device_restore(struct virtio_device *dev)
if (ret)
goto err;
- if (drv->restore) {
- ret = drv->restore(dev);
+ if (restore) {
+ if (drv->restore) {
+ ret = drv->restore(dev);
+ if (ret)
+ goto err;
+ }
+ } else {
+ ret = drv->reset_done(dev);
if (ret)
goto err;
}
@@ -617,9 +601,69 @@ err:
virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
return ret;
}
+
+#ifdef CONFIG_PM_SLEEP
+int virtio_device_freeze(struct virtio_device *dev)
+{
+ struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
+ int ret;
+
+ virtio_config_core_disable(dev);
+
+ dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED;
+
+ if (drv && drv->freeze) {
+ ret = drv->freeze(dev);
+ if (ret) {
+ virtio_config_core_enable(dev);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_device_freeze);
+
+int virtio_device_restore(struct virtio_device *dev)
+{
+ return virtio_device_restore_priv(dev, true);
+}
EXPORT_SYMBOL_GPL(virtio_device_restore);
#endif
+int virtio_device_reset_prepare(struct virtio_device *dev)
+{
+ struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
+ int ret;
+
+ if (!drv || !drv->reset_prepare)
+ return -EOPNOTSUPP;
+
+ virtio_config_core_disable(dev);
+
+ dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED;
+
+ ret = drv->reset_prepare(dev);
+ if (ret) {
+ virtio_config_core_enable(dev);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_device_reset_prepare);
+
+int virtio_device_reset_done(struct virtio_device *dev)
+{
+ struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
+
+ if (!drv || !drv->reset_done)
+ return -EOPNOTSUPP;
+
+ return virtio_device_restore_priv(dev, false);
+}
+EXPORT_SYMBOL_GPL(virtio_device_reset_done);
+
static int virtio_init(void)
{
if (bus_register(&virtio_bus) != 0)
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index b36d2803674e..89da052f4f68 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -251,7 +251,7 @@ static unsigned int fill_balloon(struct virtio_balloon *vb, size_t num)
for (num_pfns = 0; num_pfns < num;
num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
- struct page *page = balloon_page_alloc();
+ page = balloon_page_alloc();
if (!page) {
dev_info_ratelimited(&vb->vdev->dev,
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index b0b871441578..8a294b9cbcf6 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -133,6 +133,8 @@ struct virtio_mem {
uint64_t addr;
/* Maximum region size in bytes. */
uint64_t region_size;
+ /* Usable region size in bytes. */
+ uint64_t usable_region_size;
/* The parent resource for all memory added via this device. */
struct resource *parent_resource;
@@ -2368,7 +2370,7 @@ static int virtio_mem_cleanup_pending_mb(struct virtio_mem *vm)
static void virtio_mem_refresh_config(struct virtio_mem *vm)
{
const struct range pluggable_range = mhp_get_pluggable_range(true);
- uint64_t new_plugged_size, usable_region_size, end_addr;
+ uint64_t new_plugged_size, end_addr;
/* the plugged_size is just a reflection of what _we_ did previously */
virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
@@ -2378,8 +2380,8 @@ static void virtio_mem_refresh_config(struct virtio_mem *vm)
/* calculate the last usable memory block id */
virtio_cread_le(vm->vdev, struct virtio_mem_config,
- usable_region_size, &usable_region_size);
- end_addr = min(vm->addr + usable_region_size - 1,
+ usable_region_size, &vm->usable_region_size);
+ end_addr = min(vm->addr + vm->usable_region_size - 1,
pluggable_range.end);
if (vm->in_sbm) {
@@ -2648,6 +2650,7 @@ static int virtio_mem_init_hotplug(struct virtio_mem *vm)
if (rc)
goto out_unreg_pm;
+ virtio_device_ready(vm->vdev);
return 0;
out_unreg_pm:
unregister_pm_notifier(&vm->pm_notifier);
@@ -2725,13 +2728,103 @@ static bool virtio_mem_vmcore_pfn_is_ram(struct vmcore_cb *cb,
mutex_unlock(&vm->hotplug_mutex);
return is_ram;
}
+
+#ifdef CONFIG_PROC_VMCORE_DEVICE_RAM
+static int virtio_mem_vmcore_add_device_ram(struct virtio_mem *vm,
+ struct list_head *list, uint64_t start, uint64_t end)
+{
+ int rc;
+
+ rc = vmcore_alloc_add_range(list, start, end - start);
+ if (rc)
+ dev_err(&vm->vdev->dev,
+ "Error adding device RAM range: %d\n", rc);
+ return rc;
+}
+
+static int virtio_mem_vmcore_get_device_ram(struct vmcore_cb *cb,
+ struct list_head *list)
+{
+ struct virtio_mem *vm = container_of(cb, struct virtio_mem,
+ vmcore_cb);
+ const uint64_t device_start = vm->addr;
+ const uint64_t device_end = vm->addr + vm->usable_region_size;
+ uint64_t chunk_size, cur_start, cur_end, plugged_range_start = 0;
+ LIST_HEAD(tmp_list);
+ int rc;
+
+ if (!vm->plugged_size)
+ return 0;
+
+ /* Process memory sections, unless the device block size is bigger. */
+ chunk_size = max_t(uint64_t, PFN_PHYS(PAGES_PER_SECTION),
+ vm->device_block_size);
+
+ mutex_lock(&vm->hotplug_mutex);
+
+ /*
+ * We process larger chunks and indicate the complete chunk if any
+ * block in there is plugged. This reduces the number of pfn_is_ram()
+ * callbacks and mimic what is effectively being done when the old
+ * kernel would add complete memory sections/blocks to the elfcore hdr.
+ */
+ cur_start = device_start;
+ for (cur_start = device_start; cur_start < device_end; cur_start = cur_end) {
+ cur_end = ALIGN_DOWN(cur_start + chunk_size, chunk_size);
+ cur_end = min_t(uint64_t, cur_end, device_end);
+
+ rc = virtio_mem_send_state_request(vm, cur_start,
+ cur_end - cur_start);
+
+ if (rc < 0) {
+ dev_err(&vm->vdev->dev,
+ "Error querying block states: %d\n", rc);
+ goto out;
+ } else if (rc != VIRTIO_MEM_STATE_UNPLUGGED) {
+ /* Merge ranges with plugged memory. */
+ if (!plugged_range_start)
+ plugged_range_start = cur_start;
+ continue;
+ }
+
+ /* Flush any plugged range. */
+ if (plugged_range_start) {
+ rc = virtio_mem_vmcore_add_device_ram(vm, &tmp_list,
+ plugged_range_start,
+ cur_start);
+ if (rc)
+ goto out;
+ plugged_range_start = 0;
+ }
+ }
+
+ /* Flush any plugged range. */
+ if (plugged_range_start)
+ rc = virtio_mem_vmcore_add_device_ram(vm, &tmp_list,
+ plugged_range_start,
+ cur_start);
+out:
+ mutex_unlock(&vm->hotplug_mutex);
+ if (rc < 0) {
+ vmcore_free_ranges(&tmp_list);
+ return rc;
+ }
+ list_splice_tail(&tmp_list, list);
+ return 0;
+}
+#endif /* CONFIG_PROC_VMCORE_DEVICE_RAM */
#endif /* CONFIG_PROC_VMCORE */
static int virtio_mem_init_kdump(struct virtio_mem *vm)
{
+ /* We must be prepared to receive a callback immediately. */
+ virtio_device_ready(vm->vdev);
#ifdef CONFIG_PROC_VMCORE
dev_info(&vm->vdev->dev, "memory hot(un)plug disabled in kdump kernel\n");
vm->vmcore_cb.pfn_is_ram = virtio_mem_vmcore_pfn_is_ram;
+#ifdef CONFIG_PROC_VMCORE_DEVICE_RAM
+ vm->vmcore_cb.get_device_ram = virtio_mem_vmcore_get_device_ram;
+#endif /* CONFIG_PROC_VMCORE_DEVICE_RAM */
register_vmcore_cb(&vm->vmcore_cb);
return 0;
#else /* CONFIG_PROC_VMCORE */
@@ -2760,6 +2853,8 @@ static int virtio_mem_init(struct virtio_mem *vm)
virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size,
&vm->region_size);
+ virtio_cread_le(vm->vdev, struct virtio_mem_config, usable_region_size,
+ &vm->usable_region_size);
/* Determine the nid for the device based on the lowest address. */
if (vm->nid == NUMA_NO_NODE)
@@ -2870,8 +2965,6 @@ static int virtio_mem_probe(struct virtio_device *vdev)
if (rc)
goto out_del_vq;
- virtio_device_ready(vdev);
-
/* trigger a config update to start processing the requested_size */
if (!vm->in_kdump) {
atomic_set(&vm->config_changed, 1);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 88074451dd61..d6d79af44569 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -794,6 +794,46 @@ static int virtio_pci_sriov_configure(struct pci_dev *pci_dev, int num_vfs)
return num_vfs;
}
+static void virtio_pci_reset_prepare(struct pci_dev *pci_dev)
+{
+ struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+ int ret = 0;
+
+ ret = virtio_device_reset_prepare(&vp_dev->vdev);
+ if (ret) {
+ if (ret != -EOPNOTSUPP)
+ dev_warn(&pci_dev->dev, "Reset prepare failure: %d",
+ ret);
+ return;
+ }
+
+ if (pci_is_enabled(pci_dev))
+ pci_disable_device(pci_dev);
+}
+
+static void virtio_pci_reset_done(struct pci_dev *pci_dev)
+{
+ struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+ int ret;
+
+ if (pci_is_enabled(pci_dev))
+ return;
+
+ ret = pci_enable_device(pci_dev);
+ if (!ret) {
+ pci_set_master(pci_dev);
+ ret = virtio_device_reset_done(&vp_dev->vdev);
+ }
+
+ if (ret && ret != -EOPNOTSUPP)
+ dev_warn(&pci_dev->dev, "Reset done failure: %d", ret);
+}
+
+static const struct pci_error_handlers virtio_pci_err_handler = {
+ .reset_prepare = virtio_pci_reset_prepare,
+ .reset_done = virtio_pci_reset_done,
+};
+
static struct pci_driver virtio_pci_driver = {
.name = "virtio-pci",
.id_table = virtio_pci_id_table,
@@ -803,6 +843,7 @@ static struct pci_driver virtio_pci_driver = {
.driver.pm = &virtio_pci_pm_ops,
#endif
.sriov_configure = virtio_pci_sriov_configure,
+ .err_handler = &virtio_pci_err_handler,
};
struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev)
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index a2ecbb863c57..e2a568c9a43a 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -7,7 +7,7 @@
* It is a I2C to 1-wire bridge.
* There are two variations: -100 and -800, which have 1 or 8 1-wire ports.
* The complete datasheet can be obtained from MAXIM's website at:
- * http://www.maxim-ic.com/quick_view2.cfm/qv_pk/4382
+ * https://www.analog.com/en/products/ds2482-100.html
*/
#include <linux/module.h>
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
#include <linux/w1.h>
@@ -445,17 +446,20 @@ static int ds2482_probe(struct i2c_client *client)
int err = -ENODEV;
int temp1;
int idx;
+ int ret;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
I2C_FUNC_SMBUS_BYTE))
return -ENODEV;
- data = kzalloc(sizeof(struct ds2482_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct ds2482_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ ret = devm_regulator_get_enable(&client->dev, "vcc");
+ if (ret)
+ return dev_err_probe(&client->dev, ret, "Failed to enable regulator\n");
data->client = client;
i2c_set_clientdata(client, data);
@@ -463,7 +467,7 @@ static int ds2482_probe(struct i2c_client *client)
/* Reset the device (sets the read_ptr to status) */
if (ds2482_send_cmd(data, DS2482_CMD_RESET) < 0) {
dev_warn(&client->dev, "DS2482 reset failed.\n");
- goto exit_free;
+ return err;
}
/* Sleep at least 525ns to allow the reset to complete */
@@ -474,7 +478,7 @@ static int ds2482_probe(struct i2c_client *client)
if (temp1 != (DS2482_REG_STS_LL | DS2482_REG_STS_RST)) {
dev_warn(&client->dev, "DS2482 reset status "
"0x%02X - not a DS2482\n", temp1);
- goto exit_free;
+ return err;
}
/* Detect the 8-port version */
@@ -516,9 +520,6 @@ exit_w1_remove:
if (data->w1_ch[idx].pdev != NULL)
w1_remove_master_device(&data->w1_ch[idx].w1_bm);
}
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -532,9 +533,6 @@ static void ds2482_remove(struct i2c_client *client)
if (data->w1_ch[idx].pdev != NULL)
w1_remove_master_device(&data->w1_ch[idx].w1_bm);
}
-
- /* Free the memory */
- kfree(data);
}
/*
diff --git a/drivers/w1/slaves/w1_ds2406.c b/drivers/w1/slaves/w1_ds2406.c
index 2f5926859b8b..1cae9b243ff8 100644
--- a/drivers/w1/slaves/w1_ds2406.c
+++ b/drivers/w1/slaves/w1_ds2406.c
@@ -24,7 +24,7 @@
static ssize_t w1_f12_read_state(
struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
u8 w1_buf[6] = {W1_F12_FUNC_READ_STATUS, 7, 0, 0, 0, 0};
@@ -61,7 +61,7 @@ static ssize_t w1_f12_read_state(
static ssize_t w1_f12_write_output(
struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -95,14 +95,14 @@ static ssize_t w1_f12_write_output(
}
#define NB_SYSFS_BIN_FILES 2
-static struct bin_attribute w1_f12_sysfs_bin_files[NB_SYSFS_BIN_FILES] = {
+static const struct bin_attribute w1_f12_sysfs_bin_files[NB_SYSFS_BIN_FILES] = {
{
.attr = {
.name = "state",
.mode = 0444,
},
.size = 1,
- .read = w1_f12_read_state,
+ .read_new = w1_f12_read_state,
},
{
.attr = {
@@ -110,7 +110,7 @@ static struct bin_attribute w1_f12_sysfs_bin_files[NB_SYSFS_BIN_FILES] = {
.mode = 0664,
},
.size = 1,
- .write = w1_f12_write_output,
+ .write_new = w1_f12_write_output,
}
};
diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c
index 56f822a1dfdb..beccd2912d2a 100644
--- a/drivers/w1/slaves/w1_ds2408.c
+++ b/drivers/w1/slaves/w1_ds2408.c
@@ -65,8 +65,8 @@ static int _read_reg(struct w1_slave *sl, u8 address, unsigned char *buf)
}
static ssize_t state_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
dev_dbg(&kobj_to_w1_slave(kobj)->dev,
"Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
@@ -77,7 +77,7 @@ static ssize_t state_read(struct file *filp, struct kobject *kobj,
}
static ssize_t output_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
dev_dbg(&kobj_to_w1_slave(kobj)->dev,
@@ -90,7 +90,7 @@ static ssize_t output_read(struct file *filp, struct kobject *kobj,
}
static ssize_t activity_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
dev_dbg(&kobj_to_w1_slave(kobj)->dev,
@@ -103,8 +103,8 @@ static ssize_t activity_read(struct file *filp, struct kobject *kobj,
}
static ssize_t cond_search_mask_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
dev_dbg(&kobj_to_w1_slave(kobj)->dev,
"Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
@@ -117,7 +117,7 @@ static ssize_t cond_search_mask_read(struct file *filp, struct kobject *kobj,
static ssize_t cond_search_polarity_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
if (count != 1 || off != 0)
@@ -127,8 +127,8 @@ static ssize_t cond_search_polarity_read(struct file *filp,
}
static ssize_t status_control_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
if (count != 1 || off != 0)
return -EFAULT;
@@ -160,7 +160,7 @@ static bool optional_read_back_valid(struct w1_slave *sl, u8 expected)
#endif
static ssize_t output_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -210,7 +210,7 @@ out:
* Writing to the activity file resets the activity latches.
*/
static ssize_t activity_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -240,8 +240,8 @@ error:
}
static ssize_t status_control_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
u8 w1_buf[4];
@@ -310,14 +310,14 @@ out:
return res;
}
-static BIN_ATTR_RO(state, 1);
-static BIN_ATTR_RW(output, 1);
-static BIN_ATTR_RW(activity, 1);
-static BIN_ATTR_RO(cond_search_mask, 1);
-static BIN_ATTR_RO(cond_search_polarity, 1);
-static BIN_ATTR_RW(status_control, 1);
+static const BIN_ATTR_RO(state, 1);
+static const BIN_ATTR_RW(output, 1);
+static const BIN_ATTR_RW(activity, 1);
+static const BIN_ATTR_RO(cond_search_mask, 1);
+static const BIN_ATTR_RO(cond_search_polarity, 1);
+static const BIN_ATTR_RW(status_control, 1);
-static struct bin_attribute *w1_f29_bin_attrs[] = {
+static const struct bin_attribute *const w1_f29_bin_attrs[] = {
&bin_attr_state,
&bin_attr_output,
&bin_attr_activity,
@@ -328,7 +328,7 @@ static struct bin_attribute *w1_f29_bin_attrs[] = {
};
static const struct attribute_group w1_f29_group = {
- .bin_attrs = w1_f29_bin_attrs,
+ .bin_attrs_new = w1_f29_bin_attrs,
};
static const struct attribute_group *w1_f29_groups[] = {
diff --git a/drivers/w1/slaves/w1_ds2413.c b/drivers/w1/slaves/w1_ds2413.c
index 739009806467..5fa46017ca7c 100644
--- a/drivers/w1/slaves/w1_ds2413.c
+++ b/drivers/w1/slaves/w1_ds2413.c
@@ -25,8 +25,8 @@
#define W1_F3A_INVALID_PIO_STATE 0xFF
static ssize_t state_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
unsigned int retries = W1_F3A_RETRIES;
@@ -78,10 +78,10 @@ out:
return bytes_read;
}
-static BIN_ATTR_RO(state, 1);
+static const BIN_ATTR_RO(state, 1);
static ssize_t output_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -128,16 +128,16 @@ out:
return bytes_written;
}
-static BIN_ATTR(output, 0664, NULL, output_write, 1);
+static const BIN_ATTR(output, 0664, NULL, output_write, 1);
-static struct bin_attribute *w1_f3a_bin_attrs[] = {
+static const struct bin_attribute *const w1_f3a_bin_attrs[] = {
&bin_attr_state,
&bin_attr_output,
NULL,
};
static const struct attribute_group w1_f3a_group = {
- .bin_attrs = w1_f3a_bin_attrs,
+ .bin_attrs_new = w1_f3a_bin_attrs,
};
static const struct attribute_group *w1_f3a_groups[] = {
diff --git a/drivers/w1/slaves/w1_ds2430.c b/drivers/w1/slaves/w1_ds2430.c
index 0ea7d779d17a..ff56e2e68e58 100644
--- a/drivers/w1/slaves/w1_ds2430.c
+++ b/drivers/w1/slaves/w1_ds2430.c
@@ -95,7 +95,7 @@ static int w1_f14_readblock(struct w1_slave *sl, int off, int count, char *buf)
}
static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -202,7 +202,7 @@ retry:
}
static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -263,15 +263,15 @@ out_up:
return count;
}
-static BIN_ATTR_RW(eeprom, W1_F14_EEPROM_SIZE);
+static const BIN_ATTR_RW(eeprom, W1_F14_EEPROM_SIZE);
-static struct bin_attribute *w1_f14_bin_attrs[] = {
+static const struct bin_attribute *const w1_f14_bin_attrs[] = {
&bin_attr_eeprom,
NULL,
};
static const struct attribute_group w1_f14_group = {
- .bin_attrs = w1_f14_bin_attrs,
+ .bin_attrs_new = w1_f14_bin_attrs,
};
static const struct attribute_group *w1_f14_groups[] = {
diff --git a/drivers/w1/slaves/w1_ds2431.c b/drivers/w1/slaves/w1_ds2431.c
index 6856b1c29e17..27b390fb59da 100644
--- a/drivers/w1/slaves/w1_ds2431.c
+++ b/drivers/w1/slaves/w1_ds2431.c
@@ -95,7 +95,7 @@ static int w1_f2d_readblock(struct w1_slave *sl, int off, int count, char *buf)
}
static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -201,7 +201,7 @@ retry:
}
static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -262,15 +262,15 @@ out_up:
return count;
}
-static BIN_ATTR_RW(eeprom, W1_F2D_EEPROM_SIZE);
+static const BIN_ATTR_RW(eeprom, W1_F2D_EEPROM_SIZE);
-static struct bin_attribute *w1_f2d_bin_attrs[] = {
+static const struct bin_attribute *const w1_f2d_bin_attrs[] = {
&bin_attr_eeprom,
NULL,
};
static const struct attribute_group w1_f2d_group = {
- .bin_attrs = w1_f2d_bin_attrs,
+ .bin_attrs_new = w1_f2d_bin_attrs,
};
static const struct attribute_group *w1_f2d_groups[] = {
diff --git a/drivers/w1/slaves/w1_ds2433.c b/drivers/w1/slaves/w1_ds2433.c
index 250b7f7ec429..22331d840ec1 100644
--- a/drivers/w1/slaves/w1_ds2433.c
+++ b/drivers/w1/slaves/w1_ds2433.c
@@ -110,7 +110,7 @@ static int w1_f23_refresh_block(struct w1_slave *sl, struct w1_f23_data *data,
#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -224,7 +224,7 @@ static int w1_f23_write(struct w1_slave *sl, int addr, int len, const u8 *data)
}
static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -274,27 +274,27 @@ out_up:
return count;
}
-static struct bin_attribute bin_attr_f23_eeprom = {
+static const struct bin_attribute bin_attr_f23_eeprom = {
.attr = { .name = "eeprom", .mode = 0644 },
- .read = eeprom_read,
- .write = eeprom_write,
+ .read_new = eeprom_read,
+ .write_new = eeprom_write,
.size = W1_EEPROM_DS2433_SIZE,
};
-static struct bin_attribute bin_attr_f43_eeprom = {
+static const struct bin_attribute bin_attr_f43_eeprom = {
.attr = { .name = "eeprom", .mode = 0644 },
- .read = eeprom_read,
- .write = eeprom_write,
+ .read_new = eeprom_read,
+ .write_new = eeprom_write,
.size = W1_EEPROM_DS28EC20_SIZE,
};
-static struct bin_attribute *w1_f23_bin_attributes[] = {
+static const struct bin_attribute *const w1_f23_bin_attributes[] = {
&bin_attr_f23_eeprom,
NULL,
};
static const struct attribute_group w1_f23_group = {
- .bin_attrs = w1_f23_bin_attributes,
+ .bin_attrs_new = w1_f23_bin_attributes,
};
static const struct attribute_group *w1_f23_groups[] = {
@@ -302,13 +302,13 @@ static const struct attribute_group *w1_f23_groups[] = {
NULL,
};
-static struct bin_attribute *w1_f43_bin_attributes[] = {
+static const struct bin_attribute *const w1_f43_bin_attributes[] = {
&bin_attr_f43_eeprom,
NULL,
};
static const struct attribute_group w1_f43_group = {
- .bin_attrs = w1_f43_bin_attributes,
+ .bin_attrs_new = w1_f43_bin_attributes,
};
static const struct attribute_group *w1_f43_groups[] = {
diff --git a/drivers/w1/slaves/w1_ds2438.c b/drivers/w1/slaves/w1_ds2438.c
index e008c27b3db9..630a6db5045e 100644
--- a/drivers/w1/slaves/w1_ds2438.c
+++ b/drivers/w1/slaves/w1_ds2438.c
@@ -288,7 +288,7 @@ static int w1_ds2438_get_current(struct w1_slave *sl, int16_t *voltage)
}
static ssize_t iad_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -310,7 +310,7 @@ static ssize_t iad_write(struct file *filp, struct kobject *kobj,
}
static ssize_t iad_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -331,7 +331,7 @@ static ssize_t iad_read(struct file *filp, struct kobject *kobj,
}
static ssize_t page0_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -361,7 +361,7 @@ static ssize_t page0_read(struct file *filp, struct kobject *kobj,
}
static ssize_t page1_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -391,7 +391,7 @@ static ssize_t page1_read(struct file *filp, struct kobject *kobj,
}
static ssize_t offset_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -410,7 +410,7 @@ static ssize_t offset_write(struct file *filp, struct kobject *kobj,
}
static ssize_t temperature_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -431,7 +431,7 @@ static ssize_t temperature_read(struct file *filp, struct kobject *kobj,
}
static ssize_t vad_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -452,7 +452,7 @@ static ssize_t vad_read(struct file *filp, struct kobject *kobj,
}
static ssize_t vdd_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -472,15 +472,15 @@ static ssize_t vdd_read(struct file *filp, struct kobject *kobj,
return ret;
}
-static BIN_ATTR_RW(iad, 0);
-static BIN_ATTR_RO(page0, DS2438_PAGE_SIZE);
-static BIN_ATTR_RO(page1, DS2438_PAGE_SIZE);
-static BIN_ATTR_WO(offset, 2);
-static BIN_ATTR_RO(temperature, 0/* real length varies */);
-static BIN_ATTR_RO(vad, 0/* real length varies */);
-static BIN_ATTR_RO(vdd, 0/* real length varies */);
+static const BIN_ATTR_RW(iad, 0);
+static const BIN_ATTR_RO(page0, DS2438_PAGE_SIZE);
+static const BIN_ATTR_RO(page1, DS2438_PAGE_SIZE);
+static const BIN_ATTR_WO(offset, 2);
+static const BIN_ATTR_RO(temperature, 0/* real length varies */);
+static const BIN_ATTR_RO(vad, 0/* real length varies */);
+static const BIN_ATTR_RO(vdd, 0/* real length varies */);
-static struct bin_attribute *w1_ds2438_bin_attrs[] = {
+static const struct bin_attribute *const w1_ds2438_bin_attrs[] = {
&bin_attr_iad,
&bin_attr_page0,
&bin_attr_page1,
@@ -492,7 +492,7 @@ static struct bin_attribute *w1_ds2438_bin_attrs[] = {
};
static const struct attribute_group w1_ds2438_group = {
- .bin_attrs = w1_ds2438_bin_attrs,
+ .bin_attrs_new = w1_ds2438_bin_attrs,
};
static const struct attribute_group *w1_ds2438_groups[] = {
diff --git a/drivers/w1/slaves/w1_ds2780.c b/drivers/w1/slaves/w1_ds2780.c
index 3cde1bb1886b..ba7beb7b01f9 100644
--- a/drivers/w1/slaves/w1_ds2780.c
+++ b/drivers/w1/slaves/w1_ds2780.c
@@ -87,7 +87,7 @@ int w1_ds2780_eeprom_cmd(struct device *dev, int addr, int cmd)
EXPORT_SYMBOL(w1_ds2780_eeprom_cmd);
static ssize_t w1_slave_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -95,15 +95,15 @@ static ssize_t w1_slave_read(struct file *filp, struct kobject *kobj,
return w1_ds2780_io(dev, buf, off, count, 0);
}
-static BIN_ATTR_RO(w1_slave, DS2780_DATA_SIZE);
+static const BIN_ATTR_RO(w1_slave, DS2780_DATA_SIZE);
-static struct bin_attribute *w1_ds2780_bin_attrs[] = {
+static const struct bin_attribute *const w1_ds2780_bin_attrs[] = {
&bin_attr_w1_slave,
NULL,
};
static const struct attribute_group w1_ds2780_group = {
- .bin_attrs = w1_ds2780_bin_attrs,
+ .bin_attrs_new = w1_ds2780_bin_attrs,
};
static const struct attribute_group *w1_ds2780_groups[] = {
diff --git a/drivers/w1/slaves/w1_ds2781.c b/drivers/w1/slaves/w1_ds2781.c
index e418484b4a49..acd04ee96e81 100644
--- a/drivers/w1/slaves/w1_ds2781.c
+++ b/drivers/w1/slaves/w1_ds2781.c
@@ -84,7 +84,7 @@ int w1_ds2781_eeprom_cmd(struct device *dev, int addr, int cmd)
EXPORT_SYMBOL(w1_ds2781_eeprom_cmd);
static ssize_t w1_slave_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -92,15 +92,15 @@ static ssize_t w1_slave_read(struct file *filp, struct kobject *kobj,
return w1_ds2781_io(dev, buf, off, count, 0);
}
-static BIN_ATTR_RO(w1_slave, DS2781_DATA_SIZE);
+static const BIN_ATTR_RO(w1_slave, DS2781_DATA_SIZE);
-static struct bin_attribute *w1_ds2781_bin_attrs[] = {
+static const struct bin_attribute *const w1_ds2781_bin_attrs[] = {
&bin_attr_w1_slave,
NULL,
};
static const struct attribute_group w1_ds2781_group = {
- .bin_attrs = w1_ds2781_bin_attrs,
+ .bin_attrs_new = w1_ds2781_bin_attrs,
};
static const struct attribute_group *w1_ds2781_groups[] = {
diff --git a/drivers/w1/slaves/w1_ds2805.c b/drivers/w1/slaves/w1_ds2805.c
index 4c1a2c515317..6ee895640d4a 100644
--- a/drivers/w1/slaves/w1_ds2805.c
+++ b/drivers/w1/slaves/w1_ds2805.c
@@ -92,7 +92,7 @@ static int w1_f0d_readblock(struct w1_slave *sl, int off, int count, char *buf)
}
static ssize_t w1_f0d_read_bin(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -200,7 +200,7 @@ retry:
}
static ssize_t w1_f0d_write_bin(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -261,14 +261,14 @@ out_up:
return count;
}
-static struct bin_attribute w1_f0d_bin_attr = {
+static const struct bin_attribute w1_f0d_bin_attr = {
.attr = {
.name = "eeprom",
.mode = 0644,
},
.size = W1_F0D_EEPROM_SIZE,
- .read = w1_f0d_read_bin,
- .write = w1_f0d_write_bin,
+ .read_new = w1_f0d_read_bin,
+ .write_new = w1_f0d_write_bin,
};
static int w1_f0d_add_slave(struct w1_slave *sl)
diff --git a/drivers/w1/slaves/w1_ds28e04.c b/drivers/w1/slaves/w1_ds28e04.c
index 2854b8b9e93f..d99ffadbe29b 100644
--- a/drivers/w1/slaves/w1_ds28e04.c
+++ b/drivers/w1/slaves/w1_ds28e04.c
@@ -112,7 +112,7 @@ static int w1_f1C_read(struct w1_slave *sl, int addr, int len, char *data)
}
static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -223,7 +223,7 @@ static int w1_f1C_write(struct w1_slave *sl, int addr, int len, const u8 *data)
}
static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
@@ -276,10 +276,10 @@ out_up:
return count;
}
-static BIN_ATTR_RW(eeprom, W1_EEPROM_SIZE);
+static const BIN_ATTR_RW(eeprom, W1_EEPROM_SIZE);
static ssize_t pio_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off,
+ const struct bin_attribute *bin_attr, char *buf, loff_t off,
size_t count)
{
@@ -298,8 +298,8 @@ static ssize_t pio_read(struct file *filp, struct kobject *kobj,
}
static ssize_t pio_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -337,7 +337,7 @@ static ssize_t pio_write(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR_RW(pio, 1);
+static const BIN_ATTR_RW(pio, 1);
static ssize_t crccheck_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -363,7 +363,7 @@ static struct attribute *w1_f1C_attrs[] = {
NULL,
};
-static struct bin_attribute *w1_f1C_bin_attrs[] = {
+static const struct bin_attribute *const w1_f1C_bin_attrs[] = {
&bin_attr_eeprom,
&bin_attr_pio,
NULL,
@@ -371,7 +371,7 @@ static struct bin_attribute *w1_f1C_bin_attrs[] = {
static const struct attribute_group w1_f1C_group = {
.attrs = w1_f1C_attrs,
- .bin_attrs = w1_f1C_bin_attrs,
+ .bin_attrs_new = w1_f1C_bin_attrs,
};
static const struct attribute_group *w1_f1C_groups[] = {
diff --git a/drivers/w1/slaves/w1_ds28e17.c b/drivers/w1/slaves/w1_ds28e17.c
index 52261b54d842..5738cbce1a37 100644
--- a/drivers/w1/slaves/w1_ds28e17.c
+++ b/drivers/w1/slaves/w1_ds28e17.c
@@ -583,7 +583,7 @@ static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
return result;
/* Return current speed value. */
- return sprintf(buf, "%d\n", result);
+ return sysfs_emit(buf, "%d\n", result);
}
static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
@@ -633,7 +633,7 @@ static ssize_t stretch_show(struct device *dev, struct device_attribute *attr,
struct w1_f19_data *data = sl->family_data;
/* Return current stretch value. */
- return sprintf(buf, "%d\n", data->stretch);
+ return sysfs_emit(buf, "%d\n", data->stretch);
}
static ssize_t stretch_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index d82e86d3ddf6..29f200bbab41 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -111,7 +111,7 @@ ATTRIBUTE_GROUPS(w1_slave);
/* Default family */
static ssize_t rw_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off,
+ const struct bin_attribute *bin_attr, char *buf, loff_t off,
size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -130,8 +130,8 @@ out_up:
}
static ssize_t rw_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -141,15 +141,15 @@ static ssize_t rw_read(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR_RW(rw, PAGE_SIZE);
+static const BIN_ATTR_RW(rw, PAGE_SIZE);
-static struct bin_attribute *w1_slave_bin_attrs[] = {
+static const struct bin_attribute *const w1_slave_bin_attrs[] = {
&bin_attr_rw,
NULL,
};
static const struct attribute_group w1_slave_default_group = {
- .bin_attrs = w1_slave_bin_attrs,
+ .bin_attrs_new = w1_slave_bin_attrs,
};
static const struct attribute_group *w1_slave_default_groups[] = {
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
index d708c091bf1b..77039f2f0be5 100644
--- a/drivers/watchdog/da9052_wdt.c
+++ b/drivers/watchdog/da9052_wdt.c
@@ -135,7 +135,11 @@ static int da9052_wdt_ping(struct watchdog_device *wdt_dev)
}
static const struct watchdog_info da9052_wdt_info = {
- .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_KEEPALIVEPING |
+ WDIOF_CARDRESET |
+ WDIOF_OVERHEAT |
+ WDIOF_POWERUNDER,
.identity = "DA9052 Watchdog",
};
@@ -169,6 +173,13 @@ static int da9052_wdt_probe(struct platform_device *pdev)
da9052_wdt->parent = dev;
watchdog_set_drvdata(da9052_wdt, driver_data);
+ if (da9052->fault_log & DA9052_FAULTLOG_TWDERROR)
+ da9052_wdt->bootstatus |= WDIOF_CARDRESET;
+ if (da9052->fault_log & DA9052_FAULTLOG_TEMPOVER)
+ da9052_wdt->bootstatus |= WDIOF_OVERHEAT;
+ if (da9052->fault_log & DA9052_FAULTLOG_VDDFAULT)
+ da9052_wdt->bootstatus |= WDIOF_POWERUNDER;
+
ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
DA9052_CONTROLD_TWDSCALE, 0);
if (ret < 0) {
diff --git a/drivers/watchdog/max77620_wdt.c b/drivers/watchdog/max77620_wdt.c
index 33835c0b06de..d3ced783a5f4 100644
--- a/drivers/watchdog/max77620_wdt.c
+++ b/drivers/watchdog/max77620_wdt.c
@@ -25,7 +25,6 @@ static bool nowayout = WATCHDOG_NOWAYOUT;
/**
* struct max77620_variant - Data specific to a chip variant
- * @wdt_info: watchdog descriptor
* @reg_onoff_cnfg2: ONOFF_CNFG2 register offset
* @reg_cnfg_glbl2: CNFG_GLBL2 register offset
* @reg_cnfg_glbl3: CNFG_GLBL3 register offset
diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
index 58c9445c0f88..d1f9ce4100a8 100644
--- a/drivers/watchdog/rti_wdt.c
+++ b/drivers/watchdog/rti_wdt.c
@@ -273,7 +273,8 @@ static int rti_wdt_probe(struct platform_device *pdev)
set_bit(WDOG_HW_RUNNING, &wdd->status);
time_left_ms = rti_wdt_get_timeleft_ms(wdd);
- heartbeat_ms = readl(wdt->base + RTIDWDPRLD);
+ /* AM62x TRM: texp = (RTIDWDPRLD + 1) * (2^13) / RTICLK1 */
+ heartbeat_ms = readl(wdt->base + RTIDWDPRLD) + 1;
heartbeat_ms <<= WDT_PRELOAD_SHIFT;
heartbeat_ms *= 1000;
do_div(heartbeat_ms, wdt->freq);
@@ -301,6 +302,7 @@ static int rti_wdt_probe(struct platform_device *pdev)
node = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
if (node) {
ret = of_address_to_resource(node, 0, &res);
+ of_node_put(node);
if (ret) {
dev_err(dev, "No memory address assigned to the region.\n");
goto err_iomap;
diff --git a/drivers/watchdog/rzv2h_wdt.c b/drivers/watchdog/rzv2h_wdt.c
index 1d1b17312747..8defd0241213 100644
--- a/drivers/watchdog/rzv2h_wdt.c
+++ b/drivers/watchdog/rzv2h_wdt.c
@@ -217,24 +217,24 @@ static int rzv2h_wdt_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- priv->pclk = devm_clk_get_prepared(&pdev->dev, "pclk");
+ priv->pclk = devm_clk_get_prepared(dev, "pclk");
if (IS_ERR(priv->pclk))
- return dev_err_probe(&pdev->dev, PTR_ERR(priv->pclk), "no pclk");
+ return dev_err_probe(dev, PTR_ERR(priv->pclk), "no pclk");
- priv->oscclk = devm_clk_get_prepared(&pdev->dev, "oscclk");
+ priv->oscclk = devm_clk_get_prepared(dev, "oscclk");
if (IS_ERR(priv->oscclk))
- return dev_err_probe(&pdev->dev, PTR_ERR(priv->oscclk), "no oscclk");
+ return dev_err_probe(dev, PTR_ERR(priv->oscclk), "no oscclk");
- priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ priv->rstc = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(priv->rstc))
- return dev_err_probe(&pdev->dev, PTR_ERR(priv->rstc),
+ return dev_err_probe(dev, PTR_ERR(priv->rstc),
"failed to get cpg reset");
priv->wdev.max_hw_heartbeat_ms = (MILLI * MAX_TIMEOUT_CYCLES * CLOCK_DIV_BY_256) /
clk_get_rate(priv->oscclk);
dev_dbg(dev, "max hw timeout of %dms\n", priv->wdev.max_hw_heartbeat_ms);
- ret = devm_pm_runtime_enable(&pdev->dev);
+ ret = devm_pm_runtime_enable(dev);
if (ret)
return ret;
@@ -251,7 +251,7 @@ static int rzv2h_wdt_probe(struct platform_device *pdev)
if (ret)
dev_warn(dev, "Specified timeout invalid, using default");
- return devm_watchdog_register_device(&pdev->dev, &priv->wdev);
+ return devm_watchdog_register_device(dev, &priv->wdev);
}
static const struct of_device_id rzv2h_wdt_ids[] = {
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 109e2e37e8f0..c2125f204a13 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -62,7 +62,6 @@
* @clk: (optional) clock structure of wdt
* @rate: (optional) clock rate when provided via properties
* @adev: amba device structure of wdt
- * @status: current status of wdt
* @load_val: load value to be set for current timeout
*/
struct sp805_wdt {
@@ -128,7 +127,7 @@ static unsigned int wdt_timeleft(struct watchdog_device *wdd)
/*If the interrupt is inactive then time left is WDTValue + WDTLoad. */
if (!(readl_relaxed(wdt->base + WDTRIS) & INT_MASK))
- load += wdt->load_val + 1;
+ load += (u64)wdt->load_val + 1;
spin_unlock(&wdt->lock);
return div_u64(load, wdt->rate);
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 528395133b4f..163f7f1d70f1 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -84,7 +84,7 @@ module_param(balloon_boot_timeout, uint, 0444);
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
static int xen_hotplug_unpopulated;
-static struct ctl_table balloon_table[] = {
+static const struct ctl_table balloon_table[] = {
{
.procname = "hotplug_unpopulated",
.data = &xen_hotplug_unpopulated,
diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
index c63f317e3df3..093ad4a08672 100644
--- a/drivers/xen/pcpu.c
+++ b/drivers/xen/pcpu.c
@@ -105,7 +105,7 @@ static ssize_t online_show(struct device *dev,
return sprintf(buf, "%u\n", !!(cpu->flags & XEN_PCPU_FLAGS_ONLINE));
}
-static ssize_t __ref online_store(struct device *dev,
+static ssize_t online_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index b72ee9379d77..4926d4badc57 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -341,6 +341,7 @@ int pvcalls_front_socket(struct socket *sock)
pvcalls_exit();
return ret;
}
+EXPORT_SYMBOL_GPL(pvcalls_front_socket);
static void free_active_ring(struct sock_mapping *map)
{
@@ -486,6 +487,7 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
pvcalls_exit_sock(sock);
return ret;
}
+EXPORT_SYMBOL_GPL(pvcalls_front_connect);
static int __write_ring(struct pvcalls_data_intf *intf,
struct pvcalls_data *data,
@@ -581,6 +583,7 @@ again:
pvcalls_exit_sock(sock);
return tot_sent;
}
+EXPORT_SYMBOL_GPL(pvcalls_front_sendmsg);
static int __read_ring(struct pvcalls_data_intf *intf,
struct pvcalls_data *data,
@@ -666,6 +669,7 @@ int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
pvcalls_exit_sock(sock);
return ret;
}
+EXPORT_SYMBOL_GPL(pvcalls_front_recvmsg);
int pvcalls_front_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
@@ -719,6 +723,7 @@ int pvcalls_front_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
pvcalls_exit_sock(sock);
return 0;
}
+EXPORT_SYMBOL_GPL(pvcalls_front_bind);
int pvcalls_front_listen(struct socket *sock, int backlog)
{
@@ -768,8 +773,10 @@ int pvcalls_front_listen(struct socket *sock, int backlog)
pvcalls_exit_sock(sock);
return ret;
}
+EXPORT_SYMBOL_GPL(pvcalls_front_listen);
-int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
+int pvcalls_front_accept(struct socket *sock, struct socket *newsock,
+ struct proto_accept_arg *arg)
{
struct pvcalls_bedata *bedata;
struct sock_mapping *map;
@@ -788,7 +795,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
return -EINVAL;
}
- nonblock = flags & SOCK_NONBLOCK;
+ nonblock = arg->flags & SOCK_NONBLOCK;
/*
* Backend only supports 1 inflight accept request, will return
* errors for the others
@@ -904,6 +911,7 @@ received:
pvcalls_exit_sock(sock);
return ret;
}
+EXPORT_SYMBOL_GPL(pvcalls_front_accept);
static __poll_t pvcalls_front_poll_passive(struct file *file,
struct pvcalls_bedata *bedata,
@@ -1004,6 +1012,7 @@ __poll_t pvcalls_front_poll(struct file *file, struct socket *sock,
pvcalls_exit_sock(sock);
return ret;
}
+EXPORT_SYMBOL_GPL(pvcalls_front_poll);
int pvcalls_front_release(struct socket *sock)
{
@@ -1087,6 +1096,7 @@ int pvcalls_front_release(struct socket *sock)
pvcalls_exit();
return 0;
}
+EXPORT_SYMBOL_GPL(pvcalls_front_release);
static const struct xenbus_device_id pvcalls_front_ids[] = {
{ "pvcalls" },
diff --git a/drivers/xen/pvcalls-front.h b/drivers/xen/pvcalls-front.h
index f694ad77379f..881ef14660bc 100644
--- a/drivers/xen/pvcalls-front.h
+++ b/drivers/xen/pvcalls-front.h
@@ -12,7 +12,7 @@ int pvcalls_front_bind(struct socket *sock,
int pvcalls_front_listen(struct socket *sock, int backlog);
int pvcalls_front_accept(struct socket *sock,
struct socket *newsock,
- int flags);
+ struct proto_accept_arg *arg);
int pvcalls_front_sendmsg(struct socket *sock,
struct msghdr *msg,
size_t len);
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index 698c43dd5dc8..f28bc763847a 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -202,7 +202,7 @@ static inline struct v9fs_session_info *v9fs_inode2v9ses(struct inode *inode)
return inode->i_sb->s_fs_info;
}
-static inline struct v9fs_session_info *v9fs_dentry2v9ses(struct dentry *dentry)
+static inline struct v9fs_session_info *v9fs_dentry2v9ses(const struct dentry *dentry)
{
return dentry->d_sb->s_fs_info;
}
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index 01338d4c2d9e..5061f192eafd 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -61,7 +61,7 @@ static void v9fs_dentry_release(struct dentry *dentry)
p9_fid_put(hlist_entry(p, struct p9_fid, dlist));
}
-static int v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+static int __v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
{
struct p9_fid *fid;
struct inode *inode;
@@ -99,14 +99,36 @@ out_valid:
return 1;
}
+static int v9fs_lookup_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
+{
+ return __v9fs_lookup_revalidate(dentry, flags);
+}
+
+static bool v9fs_dentry_unalias_trylock(const struct dentry *dentry)
+{
+ struct v9fs_session_info *v9ses = v9fs_dentry2v9ses(dentry);
+ return down_write_trylock(&v9ses->rename_sem);
+}
+
+static void v9fs_dentry_unalias_unlock(const struct dentry *dentry)
+{
+ struct v9fs_session_info *v9ses = v9fs_dentry2v9ses(dentry);
+ up_write(&v9ses->rename_sem);
+}
+
const struct dentry_operations v9fs_cached_dentry_operations = {
.d_revalidate = v9fs_lookup_revalidate,
- .d_weak_revalidate = v9fs_lookup_revalidate,
+ .d_weak_revalidate = __v9fs_lookup_revalidate,
.d_delete = v9fs_cached_dentry_delete,
.d_release = v9fs_dentry_release,
+ .d_unalias_trylock = v9fs_dentry_unalias_trylock,
+ .d_unalias_unlock = v9fs_dentry_unalias_unlock,
};
const struct dentry_operations v9fs_dentry_operations = {
.d_delete = always_delete_dentry,
.d_release = v9fs_dentry_release,
+ .d_unalias_trylock = v9fs_dentry_unalias_trylock,
+ .d_unalias_unlock = v9fs_dentry_unalias_unlock,
};
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index a843c36fc471..02cbf38e1a77 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -23,7 +23,8 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags);
static int afs_dir_open(struct inode *inode, struct file *file);
static int afs_readdir(struct file *file, struct dir_context *ctx);
-static int afs_d_revalidate(struct dentry *dentry, unsigned int flags);
+static int afs_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags);
static int afs_d_delete(const struct dentry *dentry);
static void afs_d_iput(struct dentry *dentry, struct inode *inode);
static bool afs_lookup_one_filldir(struct dir_context *ctx, const char *name, int nlen,
@@ -597,19 +598,19 @@ static bool afs_lookup_one_filldir(struct dir_context *ctx, const char *name,
* Do a lookup of a single name in a directory
* - just returns the FID the dentry name maps to if found
*/
-static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
+static int afs_do_lookup_one(struct inode *dir, const struct qstr *name,
struct afs_fid *fid,
afs_dataversion_t *_dir_version)
{
struct afs_super_info *as = dir->i_sb->s_fs_info;
struct afs_lookup_one_cookie cookie = {
.ctx.actor = afs_lookup_one_filldir,
- .name = dentry->d_name,
+ .name = *name,
.fid.vid = as->volume->vid
};
int ret;
- _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
+ _enter("{%lu},{%.*s},", dir->i_ino, name->len, name->name);
/* search the directory */
ret = afs_dir_iterate(dir, &cookie.ctx, NULL, _dir_version);
@@ -1023,21 +1024,12 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
/*
* Check the validity of a dentry under RCU conditions.
*/
-static int afs_d_revalidate_rcu(struct dentry *dentry)
+static int afs_d_revalidate_rcu(struct afs_vnode *dvnode, struct dentry *dentry)
{
- struct afs_vnode *dvnode;
- struct dentry *parent;
- struct inode *dir;
long dir_version, de_version;
_enter("%p", dentry);
- /* Check the parent directory is still valid first. */
- parent = READ_ONCE(dentry->d_parent);
- dir = d_inode_rcu(parent);
- if (!dir)
- return -ECHILD;
- dvnode = AFS_FS_I(dir);
if (test_bit(AFS_VNODE_DELETED, &dvnode->flags))
return -ECHILD;
@@ -1065,11 +1057,11 @@ static int afs_d_revalidate_rcu(struct dentry *dentry)
* - NOTE! the hit can be a negative hit too, so we can't assume we have an
* inode
*/
-static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
+static int afs_d_revalidate(struct inode *parent_dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
- struct afs_vnode *vnode, *dir;
+ struct afs_vnode *vnode, *dir = AFS_FS_I(parent_dir);
struct afs_fid fid;
- struct dentry *parent;
struct inode *inode;
struct key *key;
afs_dataversion_t dir_version, invalid_before;
@@ -1077,7 +1069,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
int ret;
if (flags & LOOKUP_RCU)
- return afs_d_revalidate_rcu(dentry);
+ return afs_d_revalidate_rcu(dir, dentry);
if (d_really_is_positive(dentry)) {
vnode = AFS_FS_I(d_inode(dentry));
@@ -1092,14 +1084,9 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
if (IS_ERR(key))
key = NULL;
- /* Hold the parent dentry so we can peer at it */
- parent = dget_parent(dentry);
- dir = AFS_FS_I(d_inode(parent));
-
/* validate the parent directory */
ret = afs_validate(dir, key);
if (ret == -ERESTARTSYS) {
- dput(parent);
key_put(key);
return ret;
}
@@ -1127,7 +1114,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
afs_stat_v(dir, n_reval);
/* search the directory for this vnode */
- ret = afs_do_lookup_one(&dir->netfs.inode, dentry, &fid, &dir_version);
+ ret = afs_do_lookup_one(&dir->netfs.inode, name, &fid, &dir_version);
switch (ret) {
case 0:
/* the filename maps to something */
@@ -1171,22 +1158,19 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
goto out_valid;
default:
- _debug("failed to iterate dir %pd: %d",
- parent, ret);
+ _debug("failed to iterate parent %pd2: %d", dentry, ret);
goto not_found;
}
out_valid:
dentry->d_fsdata = (void *)(unsigned long)dir_version;
out_valid_noupdate:
- dput(parent);
key_put(key);
_leave(" = 1 [valid]");
return 1;
not_found:
_debug("dropping dentry %pd2", dentry);
- dput(parent);
key_put(key);
_leave(" = 0 [bad]");
diff --git a/fs/aio.c b/fs/aio.c
index 50671640b588..7b976b564cfc 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -224,7 +224,7 @@ static unsigned long aio_nr; /* current system wide number of aio requests */
static unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
/*----end sysctl variables---*/
#ifdef CONFIG_SYSCTL
-static struct ctl_table aio_sysctls[] = {
+static const struct ctl_table aio_sysctls[] = {
{
.procname = "aio-nr",
.data = &aio_nr,
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 42bd1cb7c9cd..583ac81669c2 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -60,14 +60,14 @@ static struct inode *anon_inode_make_secure_inode(
const struct inode *context_inode)
{
struct inode *inode;
- const struct qstr qname = QSTR_INIT(name, strlen(name));
int error;
inode = alloc_anon_inode(anon_inode_mnt->mnt_sb);
if (IS_ERR(inode))
return inode;
inode->i_flags &= ~S_PRIVATE;
- error = security_inode_init_security_anon(inode, &qname, context_inode);
+ error = security_inode_init_security_anon(inode, &QSTR(name),
+ context_inode);
if (error) {
iput(inode);
return ERR_PTR(error);
diff --git a/fs/bcachefs/Kconfig b/fs/bcachefs/Kconfig
index 464b927e4fff..85eea7a4dea3 100644
--- a/fs/bcachefs/Kconfig
+++ b/fs/bcachefs/Kconfig
@@ -15,6 +15,7 @@ config BCACHEFS_FS
select ZLIB_INFLATE
select ZSTD_COMPRESS
select ZSTD_DECOMPRESS
+ select CRYPTO
select CRYPTO_SHA256
select CRYPTO_CHACHA20
select CRYPTO_POLY1305
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index fc2ef33b67b3..3ea809990ef1 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -1803,7 +1803,6 @@ struct discard_buckets_state {
u64 open;
u64 need_journal_commit;
u64 discarded;
- u64 need_journal_commit_this_dev;
};
static int bch2_discard_one_bucket(struct btree_trans *trans,
@@ -1827,11 +1826,11 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
goto out;
}
- if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
- c->journal.flushed_seq_ondisk,
- pos.inode, pos.offset)) {
- s->need_journal_commit++;
- s->need_journal_commit_this_dev++;
+ u64 seq_ready = bch2_bucket_journal_seq_ready(&c->buckets_waiting_for_journal,
+ pos.inode, pos.offset);
+ if (seq_ready > c->journal.flushed_seq_ondisk) {
+ if (seq_ready > c->journal.flushing_seq)
+ s->need_journal_commit++;
goto out;
}
@@ -1865,23 +1864,24 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
discard_locked = true;
}
- if (!bkey_eq(*discard_pos_done, iter.pos) &&
- ca->mi.discard && !c->opts.nochanges) {
- /*
- * This works without any other locks because this is the only
- * thread that removes items from the need_discard tree
- */
- bch2_trans_unlock_long(trans);
- blkdev_issue_discard(ca->disk_sb.bdev,
- k.k->p.offset * ca->mi.bucket_size,
- ca->mi.bucket_size,
- GFP_KERNEL);
- *discard_pos_done = iter.pos;
+ if (!bkey_eq(*discard_pos_done, iter.pos)) {
s->discarded++;
+ *discard_pos_done = iter.pos;
- ret = bch2_trans_relock_notrace(trans);
- if (ret)
- goto out;
+ if (ca->mi.discard && !c->opts.nochanges) {
+ /*
+ * This works without any other locks because this is the only
+ * thread that removes items from the need_discard tree
+ */
+ bch2_trans_unlock_long(trans);
+ blkdev_issue_discard(ca->disk_sb.bdev,
+ k.k->p.offset * ca->mi.bucket_size,
+ ca->mi.bucket_size,
+ GFP_KERNEL);
+ ret = bch2_trans_relock_notrace(trans);
+ if (ret)
+ goto out;
+ }
}
SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
@@ -1929,6 +1929,9 @@ static void bch2_do_discards_work(struct work_struct *work)
POS(ca->dev_idx, U64_MAX), 0, k,
bch2_discard_one_bucket(trans, ca, &iter, &discard_pos_done, &s, false)));
+ if (s.need_journal_commit > dev_buckets_available(ca, BCH_WATERMARK_normal))
+ bch2_journal_flush_async(&c->journal, NULL);
+
trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded,
bch2_err_str(ret));
@@ -2024,7 +2027,7 @@ static void bch2_do_discards_fast_work(struct work_struct *work)
break;
}
- trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded, bch2_err_str(ret));
+ trace_discard_buckets_fast(c, s.seen, s.open, s.need_journal_commit, s.discarded, bch2_err_str(ret));
bch2_trans_put(trans);
percpu_ref_put(&ca->io_ref);
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
index 6df41c331a52..5a781fb4c794 100644
--- a/fs/bcachefs/alloc_foreground.c
+++ b/fs/bcachefs/alloc_foreground.c
@@ -205,8 +205,12 @@ static inline bool may_alloc_bucket(struct bch_fs *c,
return false;
}
- if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
- c->journal.flushed_seq_ondisk, bucket.inode, bucket.offset)) {
+ u64 journal_seq_ready =
+ bch2_bucket_journal_seq_ready(&c->buckets_waiting_for_journal,
+ bucket.inode, bucket.offset);
+ if (journal_seq_ready > c->journal.flushed_seq_ondisk) {
+ if (journal_seq_ready > c->journal.flushing_seq)
+ s->need_journal_commit++;
s->skipped_need_journal_commit++;
return false;
}
@@ -570,7 +574,7 @@ alloc:
? bch2_bucket_alloc_freelist(trans, ca, watermark, &s, cl)
: bch2_bucket_alloc_early(trans, ca, watermark, &s, cl);
- if (s.skipped_need_journal_commit * 2 > avail)
+ if (s.need_journal_commit * 2 > avail)
bch2_journal_flush_async(&c->journal, NULL);
if (!ob && s.btree_bitmap != BTREE_BITMAP_ANY) {
diff --git a/fs/bcachefs/alloc_types.h b/fs/bcachefs/alloc_types.h
index 9bbb28e90b93..4aa8ee026cb8 100644
--- a/fs/bcachefs/alloc_types.h
+++ b/fs/bcachefs/alloc_types.h
@@ -18,6 +18,7 @@ struct bucket_alloc_state {
u64 buckets_seen;
u64 skipped_open;
u64 skipped_need_journal_commit;
+ u64 need_journal_commit;
u64 skipped_nocow;
u64 skipped_nouse;
u64 skipped_mi_btree_bitmap;
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index 672ca2c1d37d..ca755e8d1a37 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -24,7 +24,10 @@ do { \
} while (0)
const char * const bch2_btree_node_flags[] = {
-#define x(f) #f,
+ "typebit",
+ "typebit",
+ "typebit",
+#define x(f) [BTREE_NODE_##f] = #f,
BTREE_FLAGS()
#undef x
NULL
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 367231ab1980..5988219c6908 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -2239,8 +2239,6 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos
if (unlikely(ret))
return bkey_s_c_err(ret);
- btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);
-
k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u);
if (!k.k)
return k;
@@ -2251,6 +2249,7 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos
iter->k = u;
k.k = &iter->k;
+ btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);
return k;
}
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index 3b62296c3100..1821f40c161a 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -291,8 +291,10 @@ static noinline int btree_key_cache_fill(struct btree_trans *trans,
struct btree_path *ck_path,
unsigned flags)
{
- if (flags & BTREE_ITER_cached_nofill)
+ if (flags & BTREE_ITER_cached_nofill) {
+ ck_path->l[0].b = NULL;
return 0;
+ }
struct bch_fs *c = trans->c;
struct btree_iter iter;
@@ -746,7 +748,6 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
rcu_read_unlock();
mutex_lock(&bc->table.mutex);
mutex_unlock(&bc->table.mutex);
- rcu_read_lock();
continue;
}
for (i = 0; i < tbl->size; i++)
diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c
index 6b79b672e0b1..2760dd9569ed 100644
--- a/fs/bcachefs/btree_trans_commit.c
+++ b/fs/bcachefs/btree_trans_commit.c
@@ -348,7 +348,7 @@ static __always_inline int bch2_trans_journal_res_get(struct btree_trans *trans,
unsigned flags)
{
return bch2_journal_res_get(&trans->c->journal, &trans->journal_res,
- trans->journal_u64s, flags);
+ trans->journal_u64s, flags, trans);
}
#define JSET_ENTRY_LOG_U64s 4
diff --git a/fs/bcachefs/buckets_waiting_for_journal.c b/fs/bcachefs/buckets_waiting_for_journal.c
index f9fb150eda70..c8a488e6b7b8 100644
--- a/fs/bcachefs/buckets_waiting_for_journal.c
+++ b/fs/bcachefs/buckets_waiting_for_journal.c
@@ -22,23 +22,21 @@ static void bucket_table_init(struct buckets_waiting_for_journal_table *t, size_
memset(t->d, 0, sizeof(t->d[0]) << t->bits);
}
-bool bch2_bucket_needs_journal_commit(struct buckets_waiting_for_journal *b,
- u64 flushed_seq,
- unsigned dev, u64 bucket)
+u64 bch2_bucket_journal_seq_ready(struct buckets_waiting_for_journal *b,
+ unsigned dev, u64 bucket)
{
struct buckets_waiting_for_journal_table *t;
u64 dev_bucket = (u64) dev << 56 | bucket;
- bool ret = false;
- unsigned i;
+ u64 ret = 0;
mutex_lock(&b->lock);
t = b->t;
- for (i = 0; i < ARRAY_SIZE(t->hash_seeds); i++) {
+ for (unsigned i = 0; i < ARRAY_SIZE(t->hash_seeds); i++) {
struct bucket_hashed *h = bucket_hash(t, i, dev_bucket);
if (h->dev_bucket == dev_bucket) {
- ret = h->journal_seq > flushed_seq;
+ ret = h->journal_seq;
break;
}
}
diff --git a/fs/bcachefs/buckets_waiting_for_journal.h b/fs/bcachefs/buckets_waiting_for_journal.h
index d2ae19cbe18c..365619ca44c8 100644
--- a/fs/bcachefs/buckets_waiting_for_journal.h
+++ b/fs/bcachefs/buckets_waiting_for_journal.h
@@ -4,8 +4,8 @@
#include "buckets_waiting_for_journal_types.h"
-bool bch2_bucket_needs_journal_commit(struct buckets_waiting_for_journal *,
- u64, unsigned, u64);
+u64 bch2_bucket_journal_seq_ready(struct buckets_waiting_for_journal *,
+ unsigned, u64);
int bch2_set_bucket_needs_journal_commit(struct buckets_waiting_for_journal *,
u64, unsigned, u64, u64);
diff --git a/fs/bcachefs/compress.c b/fs/bcachefs/compress.c
index f99ff1819597..114bf2f3879f 100644
--- a/fs/bcachefs/compress.c
+++ b/fs/bcachefs/compress.c
@@ -4,6 +4,7 @@
#include "compress.h"
#include "error.h"
#include "extents.h"
+#include "io_write.h"
#include "opts.h"
#include "super-io.h"
@@ -254,11 +255,14 @@ err:
goto out;
}
-int bch2_bio_uncompress_inplace(struct bch_fs *c, struct bio *bio,
- struct bch_extent_crc_unpacked *crc)
+int bch2_bio_uncompress_inplace(struct bch_write_op *op,
+ struct bio *bio)
{
+ struct bch_fs *c = op->c;
+ struct bch_extent_crc_unpacked *crc = &op->crc;
struct bbuf data = { NULL };
size_t dst_len = crc->uncompressed_size << 9;
+ int ret = 0;
/* bio must own its pages: */
BUG_ON(!bio->bi_vcnt);
@@ -266,17 +270,26 @@ int bch2_bio_uncompress_inplace(struct bch_fs *c, struct bio *bio,
if (crc->uncompressed_size << 9 > c->opts.encoded_extent_max ||
crc->compressed_size << 9 > c->opts.encoded_extent_max) {
- bch_err(c, "error rewriting existing data: extent too big");
+ struct printbuf buf = PRINTBUF;
+ bch2_write_op_error(&buf, op);
+ prt_printf(&buf, "error rewriting existing data: extent too big");
+ bch_err_ratelimited(c, "%s", buf.buf);
+ printbuf_exit(&buf);
return -EIO;
}
data = __bounce_alloc(c, dst_len, WRITE);
if (__bio_uncompress(c, bio, data.b, *crc)) {
- if (!c->opts.no_data_io)
- bch_err(c, "error rewriting existing data: decompression error");
- bio_unmap_or_unbounce(c, data);
- return -EIO;
+ if (!c->opts.no_data_io) {
+ struct printbuf buf = PRINTBUF;
+ bch2_write_op_error(&buf, op);
+ prt_printf(&buf, "error rewriting existing data: decompression error");
+ bch_err_ratelimited(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ }
+ ret = -EIO;
+ goto err;
}
/*
@@ -293,9 +306,9 @@ int bch2_bio_uncompress_inplace(struct bch_fs *c, struct bio *bio,
crc->uncompressed_size = crc->live_size;
crc->offset = 0;
crc->csum = (struct bch_csum) { 0, 0 };
-
+err:
bio_unmap_or_unbounce(c, data);
- return 0;
+ return ret;
}
int bch2_bio_uncompress(struct bch_fs *c, struct bio *src,
diff --git a/fs/bcachefs/compress.h b/fs/bcachefs/compress.h
index 607fd5e232c9..bec2f05bfd52 100644
--- a/fs/bcachefs/compress.h
+++ b/fs/bcachefs/compress.h
@@ -47,8 +47,8 @@ static inline enum bch_compression_type bch2_compression_opt_to_type(unsigned v)
return __bch2_compression_opt_to_type[bch2_compression_decode(v).type];
}
-int bch2_bio_uncompress_inplace(struct bch_fs *, struct bio *,
- struct bch_extent_crc_unpacked *);
+struct bch_write_op;
+int bch2_bio_uncompress_inplace(struct bch_write_op *, struct bio *);
int bch2_bio_uncompress(struct bch_fs *, struct bio *, struct bio *,
struct bvec_iter, struct bch_extent_crc_unpacked);
unsigned bch2_bio_compress(struct bch_fs *, struct bio *, size_t *,
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
index 585214931e05..337494facac6 100644
--- a/fs/bcachefs/data_update.c
+++ b/fs/bcachefs/data_update.c
@@ -91,15 +91,28 @@ static bool bkey_nocow_lock(struct bch_fs *c, struct moving_context *ctxt, struc
return true;
}
-static void trace_move_extent_finish2(struct bch_fs *c, struct bkey_s_c k)
+static noinline void trace_move_extent_finish2(struct data_update *u,
+ struct bkey_i *new,
+ struct bkey_i *insert)
{
- if (trace_move_extent_finish_enabled()) {
- struct printbuf buf = PRINTBUF;
+ struct bch_fs *c = u->op.c;
+ struct printbuf buf = PRINTBUF;
- bch2_bkey_val_to_text(&buf, c, k);
- trace_move_extent_finish(c, buf.buf);
- printbuf_exit(&buf);
- }
+ prt_newline(&buf);
+
+ bch2_data_update_to_text(&buf, u);
+ prt_newline(&buf);
+
+ prt_str_indented(&buf, "new replicas:\t");
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(new));
+ prt_newline(&buf);
+
+ prt_str_indented(&buf, "insert:\t");
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
+ prt_newline(&buf);
+
+ trace_move_extent_finish(c, buf.buf);
+ printbuf_exit(&buf);
}
static void trace_move_extent_fail2(struct data_update *m,
@@ -372,7 +385,8 @@ restart_drop_extra_replicas:
bch2_btree_iter_set_pos(&iter, next_pos);
this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size);
- trace_move_extent_finish2(c, bkey_i_to_s_c(&new->k_i));
+ if (trace_move_extent_finish_enabled())
+ trace_move_extent_finish2(m, &new->k_i, insert);
}
err:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
@@ -525,34 +539,38 @@ void bch2_data_update_opts_to_text(struct printbuf *out, struct bch_fs *c,
struct data_update_opts *data_opts)
{
printbuf_tabstop_push(out, 20);
- prt_str(out, "rewrite ptrs:\t");
+
+ prt_str_indented(out, "rewrite ptrs:\t");
bch2_prt_u64_base2(out, data_opts->rewrite_ptrs);
prt_newline(out);
- prt_str(out, "kill ptrs:\t");
+ prt_str_indented(out, "kill ptrs:\t");
bch2_prt_u64_base2(out, data_opts->kill_ptrs);
prt_newline(out);
- prt_str(out, "target:\t");
+ prt_str_indented(out, "target:\t");
bch2_target_to_text(out, c, data_opts->target);
prt_newline(out);
- prt_str(out, "compression:\t");
+ prt_str_indented(out, "compression:\t");
bch2_compression_opt_to_text(out, io_opts->background_compression);
prt_newline(out);
- prt_str(out, "opts.replicas:\t");
+ prt_str_indented(out, "opts.replicas:\t");
prt_u64(out, io_opts->data_replicas);
+ prt_newline(out);
- prt_str(out, "extra replicas:\t");
+ prt_str_indented(out, "extra replicas:\t");
prt_u64(out, data_opts->extra_replicas);
}
void bch2_data_update_to_text(struct printbuf *out, struct data_update *m)
{
- bch2_bkey_val_to_text(out, m->op.c, bkey_i_to_s_c(m->k.k));
- prt_newline(out);
bch2_data_update_opts_to_text(out, m->op.c, &m->op.opts, &m->data_opts);
+ prt_newline(out);
+
+ prt_str_indented(out, "old key:\t");
+ bch2_bkey_val_to_text(out, m->op.c, bkey_i_to_s_c(m->k.k));
}
int bch2_extent_drop_ptrs(struct btree_trans *trans,
diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c
index b5de52a50d10..55333e82d1fe 100644
--- a/fs/bcachefs/debug.c
+++ b/fs/bcachefs/debug.c
@@ -20,6 +20,7 @@
#include "extents.h"
#include "fsck.h"
#include "inode.h"
+#include "journal_reclaim.h"
#include "super.h"
#include <linux/console.h>
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index 8fcf7c8e5ede..53a421ff136d 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -450,7 +450,7 @@ static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked *
return ret;
struct bch_hash_info dir_hash = bch2_hash_info_init(c, &lostfound);
- struct qstr name = (struct qstr) QSTR(name_buf);
+ struct qstr name = QSTR(name_buf);
inode->bi_dir = lostfound.bi_inum;
diff --git a/fs/bcachefs/inode.h b/fs/bcachefs/inode.h
index d2e134528f0e..428b9be6af34 100644
--- a/fs/bcachefs/inode.h
+++ b/fs/bcachefs/inode.h
@@ -285,12 +285,14 @@ void bch2_inode_opts_get(struct bch_io_opts *, struct bch_fs *,
struct bch_inode_unpacked *);
int bch2_inum_opts_get(struct btree_trans*, subvol_inum, struct bch_io_opts *);
+#include "rebalance.h"
+
static inline struct bch_extent_rebalance
bch2_inode_rebalance_opts_get(struct bch_fs *c, struct bch_inode_unpacked *inode)
{
struct bch_io_opts io_opts;
bch2_inode_opts_get(&io_opts, c, inode);
- return io_opts_to_rebalance_opts(&io_opts);
+ return io_opts_to_rebalance_opts(c, &io_opts);
}
int bch2_inode_rm_snapshot(struct btree_trans *, u64, u32);
diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c
index 3e71860f66b9..dd508d93e9fc 100644
--- a/fs/bcachefs/io_write.c
+++ b/fs/bcachefs/io_write.c
@@ -406,7 +406,7 @@ static void __bch2_write_op_error(struct printbuf *out, struct bch_write_op *op,
op->flags & BCH_WRITE_MOVE ? "(internal move)" : "");
}
-static void bch2_write_op_error(struct printbuf *out, struct bch_write_op *op)
+void bch2_write_op_error(struct printbuf *out, struct bch_write_op *op)
{
__bch2_write_op_error(out, op, op->pos.offset);
}
@@ -873,7 +873,7 @@ static enum prep_encoded_ret {
if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
return PREP_ENCODED_CHECKSUM_ERR;
- if (bch2_bio_uncompress_inplace(c, bio, &op->crc))
+ if (bch2_bio_uncompress_inplace(op, bio))
return PREP_ENCODED_ERR;
}
diff --git a/fs/bcachefs/io_write.h b/fs/bcachefs/io_write.h
index 5400ce94ee57..b4626013abc8 100644
--- a/fs/bcachefs/io_write.h
+++ b/fs/bcachefs/io_write.h
@@ -20,6 +20,8 @@ static inline void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw
void bch2_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *,
enum bch_data_type, const struct bkey_i *, bool);
+void bch2_write_op_error(struct printbuf *out, struct bch_write_op *op);
+
#define BCH_WRITE_FLAGS() \
x(ALLOC_NOWAIT) \
x(CACHED) \
diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c
index 2cd20114b74b..24c294d4634e 100644
--- a/fs/bcachefs/journal.c
+++ b/fs/bcachefs/journal.c
@@ -113,11 +113,10 @@ journal_seq_to_buf(struct journal *j, u64 seq)
static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
{
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(p->list); i++)
- INIT_LIST_HEAD(&p->list[i]);
- INIT_LIST_HEAD(&p->flushed);
+ for (unsigned i = 0; i < ARRAY_SIZE(p->unflushed); i++)
+ INIT_LIST_HEAD(&p->unflushed[i]);
+ for (unsigned i = 0; i < ARRAY_SIZE(p->flushed); i++)
+ INIT_LIST_HEAD(&p->flushed[i]);
atomic_set(&p->count, count);
p->devs.nr = 0;
}
@@ -320,6 +319,16 @@ void bch2_journal_halt(struct journal *j)
spin_unlock(&j->lock);
}
+void bch2_journal_halt_locked(struct journal *j)
+{
+ lockdep_assert_held(&j->lock);
+
+ __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL, true);
+ if (!j->err_seq)
+ j->err_seq = journal_cur_seq(j);
+ journal_wake(j);
+}
+
static bool journal_entry_want_write(struct journal *j)
{
bool ret = !journal_entry_is_open(j) ||
@@ -382,9 +391,12 @@ static int journal_entry_open(struct journal *j)
if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf))
return JOURNAL_ERR_max_in_flight;
- if (bch2_fs_fatal_err_on(journal_cur_seq(j) >= JOURNAL_SEQ_MAX,
- c, "cannot start: journal seq overflow"))
+ if (journal_cur_seq(j) >= JOURNAL_SEQ_MAX) {
+ bch_err(c, "cannot start: journal seq overflow");
+ if (bch2_fs_emergency_read_only_locked(c))
+ bch_err(c, "fatal error - emergency read only");
return JOURNAL_ERR_insufficient_devices; /* -EROFS */
+ }
BUG_ON(!j->cur_entry_sectors);
@@ -601,6 +613,16 @@ out:
: -BCH_ERR_journal_res_get_blocked;
}
+static unsigned max_dev_latency(struct bch_fs *c)
+{
+ u64 nsecs = 0;
+
+ for_each_rw_member(c, ca)
+ nsecs = max(nsecs, ca->io_latency[WRITE].stats.max_duration);
+
+ return nsecs_to_jiffies(nsecs);
+}
+
/*
* Essentially the entry function to the journaling code. When bcachefs is doing
* a btree insert, it calls this function to get the current journal write.
@@ -612,17 +634,31 @@ out:
* btree node write locks.
*/
int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
- unsigned flags)
+ unsigned flags,
+ struct btree_trans *trans)
{
int ret;
if (closure_wait_event_timeout(&j->async_wait,
(ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
(flags & JOURNAL_RES_GET_NONBLOCK),
- HZ * 10))
+ HZ))
return ret;
+ if (trans)
+ bch2_trans_unlock_long(trans);
+
struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ int remaining_wait = max(max_dev_latency(c) * 2, HZ * 10);
+
+ remaining_wait = max(0, remaining_wait - HZ);
+
+ if (closure_wait_event_timeout(&j->async_wait,
+ (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
+ (flags & JOURNAL_RES_GET_NONBLOCK),
+ remaining_wait))
+ return ret;
+
struct printbuf buf = PRINTBUF;
bch2_journal_debug_to_text(&buf, j);
bch_err(c, "Journal stuck? Waited for 10 seconds...\n%s",
@@ -727,7 +763,7 @@ recheck_need_open:
* livelock:
*/
sched_annotate_sleep();
- ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
+ ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0, NULL);
if (ret)
return ret;
@@ -760,6 +796,7 @@ recheck_need_open:
}
buf->must_flush = true;
+ j->flushing_seq = max(j->flushing_seq, seq);
if (parent && !closure_wait(&buf->wait, parent))
BUG();
@@ -848,7 +885,7 @@ out:
static int __bch2_journal_meta(struct journal *j)
{
struct journal_res res = {};
- int ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
+ int ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0, NULL);
if (ret)
return ret;
@@ -1602,54 +1639,3 @@ void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
__bch2_journal_debug_to_text(out, j);
spin_unlock(&j->lock);
}
-
-bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq)
-{
- struct journal_entry_pin_list *pin_list;
- struct journal_entry_pin *pin;
-
- spin_lock(&j->lock);
- if (!test_bit(JOURNAL_running, &j->flags)) {
- spin_unlock(&j->lock);
- return true;
- }
-
- *seq = max(*seq, j->pin.front);
-
- if (*seq >= j->pin.back) {
- spin_unlock(&j->lock);
- return true;
- }
-
- out->atomic++;
-
- pin_list = journal_seq_pin(j, *seq);
-
- prt_printf(out, "%llu: count %u\n", *seq, atomic_read(&pin_list->count));
- printbuf_indent_add(out, 2);
-
- for (unsigned i = 0; i < ARRAY_SIZE(pin_list->list); i++)
- list_for_each_entry(pin, &pin_list->list[i], list)
- prt_printf(out, "\t%px %ps\n", pin, pin->flush);
-
- if (!list_empty(&pin_list->flushed))
- prt_printf(out, "flushed:\n");
-
- list_for_each_entry(pin, &pin_list->flushed, list)
- prt_printf(out, "\t%px %ps\n", pin, pin->flush);
-
- printbuf_indent_sub(out, 2);
-
- --out->atomic;
- spin_unlock(&j->lock);
-
- return false;
-}
-
-void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
-{
- u64 seq = 0;
-
- while (!bch2_journal_seq_pins_to_text(out, j, &seq))
- seq++;
-}
diff --git a/fs/bcachefs/journal.h b/fs/bcachefs/journal.h
index cb0df0663946..107f7f901cd9 100644
--- a/fs/bcachefs/journal.h
+++ b/fs/bcachefs/journal.h
@@ -312,7 +312,7 @@ static inline void bch2_journal_res_put(struct journal *j,
}
int bch2_journal_res_get_slowpath(struct journal *, struct journal_res *,
- unsigned);
+ unsigned, struct btree_trans *);
/* First bits for BCH_WATERMARK: */
enum journal_res_flags {
@@ -368,7 +368,8 @@ static inline int journal_res_get_fast(struct journal *j,
}
static inline int bch2_journal_res_get(struct journal *j, struct journal_res *res,
- unsigned u64s, unsigned flags)
+ unsigned u64s, unsigned flags,
+ struct btree_trans *trans)
{
int ret;
@@ -380,7 +381,7 @@ static inline int bch2_journal_res_get(struct journal *j, struct journal_res *re
if (journal_res_get_fast(j, res, flags))
goto out;
- ret = bch2_journal_res_get_slowpath(j, res, flags);
+ ret = bch2_journal_res_get_slowpath(j, res, flags, trans);
if (ret)
return ret;
out:
@@ -408,6 +409,7 @@ bool bch2_journal_noflush_seq(struct journal *, u64, u64);
int bch2_journal_meta(struct journal *);
void bch2_journal_halt(struct journal *);
+void bch2_journal_halt_locked(struct journal *);
static inline int bch2_journal_error(struct journal *j)
{
@@ -429,8 +431,6 @@ struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *, u
void __bch2_journal_debug_to_text(struct printbuf *, struct journal *);
void bch2_journal_debug_to_text(struct printbuf *, struct journal *);
-void bch2_journal_pins_to_text(struct printbuf *, struct journal *);
-bool bch2_journal_seq_pins_to_text(struct printbuf *, struct journal *, u64 *);
int bch2_set_nr_journal_buckets(struct bch_fs *, struct bch_dev *,
unsigned nr);
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index 7f2efe85a805..11c39e0c34f4 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -17,6 +17,7 @@
#include "sb-clean.h"
#include "trace.h"
+#include <linux/ioprio.h>
#include <linux/string_choices.h>
void bch2_journal_pos_from_member_info_set(struct bch_fs *c)
@@ -1763,6 +1764,7 @@ static CLOSURE_CALLBACK(journal_write_submit)
bio->bi_iter.bi_sector = ptr->offset;
bio->bi_end_io = journal_write_endio;
bio->bi_private = ca;
+ bio->bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_RT, 0);
BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
ca->prev_journal_sector = bio->bi_iter.bi_sector;
diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c
index 3c8242606da7..6a9cefb635d6 100644
--- a/fs/bcachefs/journal_reclaim.c
+++ b/fs/bcachefs/journal_reclaim.c
@@ -327,8 +327,10 @@ void bch2_journal_reclaim_fast(struct journal *j)
popped = true;
}
- if (popped)
+ if (popped) {
bch2_journal_space_available(j);
+ __closure_wake_up(&j->reclaim_flush_wait);
+ }
}
bool __bch2_journal_pin_put(struct journal *j, u64 seq)
@@ -362,6 +364,9 @@ static inline bool __journal_pin_drop(struct journal *j,
pin->seq = 0;
list_del_init(&pin->list);
+ if (j->reclaim_flush_wait.list.first)
+ __closure_wake_up(&j->reclaim_flush_wait);
+
/*
* Unpinning a journal entry may make journal_next_bucket() succeed, if
* writing a new last_seq will now make another bucket available:
@@ -383,11 +388,11 @@ static enum journal_pin_type journal_pin_type(journal_pin_flush_fn fn)
{
if (fn == bch2_btree_node_flush0 ||
fn == bch2_btree_node_flush1)
- return JOURNAL_PIN_btree;
+ return JOURNAL_PIN_TYPE_btree;
else if (fn == bch2_btree_key_cache_journal_flush)
- return JOURNAL_PIN_key_cache;
+ return JOURNAL_PIN_TYPE_key_cache;
else
- return JOURNAL_PIN_other;
+ return JOURNAL_PIN_TYPE_other;
}
static inline void bch2_journal_pin_set_locked(struct journal *j, u64 seq,
@@ -406,7 +411,12 @@ static inline void bch2_journal_pin_set_locked(struct journal *j, u64 seq,
atomic_inc(&pin_list->count);
pin->seq = seq;
pin->flush = flush_fn;
- list_add(&pin->list, &pin_list->list[type]);
+
+ if (list_empty(&pin_list->unflushed[type]) &&
+ j->reclaim_flush_wait.list.first)
+ __closure_wake_up(&j->reclaim_flush_wait);
+
+ list_add(&pin->list, &pin_list->unflushed[type]);
}
void bch2_journal_pin_copy(struct journal *j,
@@ -499,16 +509,15 @@ journal_get_next_pin(struct journal *j,
{
struct journal_entry_pin_list *pin_list;
struct journal_entry_pin *ret = NULL;
- unsigned i;
fifo_for_each_entry_ptr(pin_list, &j->pin, *seq) {
if (*seq > seq_to_flush && !allowed_above_seq)
break;
- for (i = 0; i < JOURNAL_PIN_NR; i++)
- if ((((1U << i) & allowed_below_seq) && *seq <= seq_to_flush) ||
- ((1U << i) & allowed_above_seq)) {
- ret = list_first_entry_or_null(&pin_list->list[i],
+ for (unsigned i = 0; i < JOURNAL_PIN_TYPE_NR; i++)
+ if (((BIT(i) & allowed_below_seq) && *seq <= seq_to_flush) ||
+ (BIT(i) & allowed_above_seq)) {
+ ret = list_first_entry_or_null(&pin_list->unflushed[i],
struct journal_entry_pin, list);
if (ret)
return ret;
@@ -544,8 +553,8 @@ static size_t journal_flush_pins(struct journal *j,
}
if (min_key_cache) {
- allowed_above |= 1U << JOURNAL_PIN_key_cache;
- allowed_below |= 1U << JOURNAL_PIN_key_cache;
+ allowed_above |= BIT(JOURNAL_PIN_TYPE_key_cache);
+ allowed_below |= BIT(JOURNAL_PIN_TYPE_key_cache);
}
cond_resched();
@@ -553,7 +562,9 @@ static size_t journal_flush_pins(struct journal *j,
j->last_flushed = jiffies;
spin_lock(&j->lock);
- pin = journal_get_next_pin(j, seq_to_flush, allowed_below, allowed_above, &seq);
+ pin = journal_get_next_pin(j, seq_to_flush,
+ allowed_below,
+ allowed_above, &seq);
if (pin) {
BUG_ON(j->flush_in_progress);
j->flush_in_progress = pin;
@@ -576,7 +587,7 @@ static size_t journal_flush_pins(struct journal *j,
spin_lock(&j->lock);
/* Pin might have been dropped or rearmed: */
if (likely(!err && !j->flush_in_progress_dropped))
- list_move(&pin->list, &journal_seq_pin(j, seq)->flushed);
+ list_move(&pin->list, &journal_seq_pin(j, seq)->flushed[journal_pin_type(flush_fn)]);
j->flush_in_progress = NULL;
j->flush_in_progress_dropped = false;
spin_unlock(&j->lock);
@@ -816,10 +827,41 @@ int bch2_journal_reclaim_start(struct journal *j)
return 0;
}
+static bool journal_pins_still_flushing(struct journal *j, u64 seq_to_flush,
+ unsigned types)
+{
+ struct journal_entry_pin_list *pin_list;
+ u64 seq;
+
+ spin_lock(&j->lock);
+ fifo_for_each_entry_ptr(pin_list, &j->pin, seq) {
+ if (seq > seq_to_flush)
+ break;
+
+ for (unsigned i = 0; i < JOURNAL_PIN_TYPE_NR; i++)
+ if ((BIT(i) & types) &&
+ (!list_empty(&pin_list->unflushed[i]) ||
+ !list_empty(&pin_list->flushed[i]))) {
+ spin_unlock(&j->lock);
+ return true;
+ }
+ }
+ spin_unlock(&j->lock);
+
+ return false;
+}
+
+static bool journal_flush_pins_or_still_flushing(struct journal *j, u64 seq_to_flush,
+ unsigned types)
+{
+ return journal_flush_pins(j, seq_to_flush, types, 0, 0, 0) ||
+ journal_pins_still_flushing(j, seq_to_flush, types);
+}
+
static int journal_flush_done(struct journal *j, u64 seq_to_flush,
bool *did_work)
{
- int ret;
+ int ret = 0;
ret = bch2_journal_error(j);
if (ret)
@@ -827,12 +869,18 @@ static int journal_flush_done(struct journal *j, u64 seq_to_flush,
mutex_lock(&j->reclaim_lock);
- if (journal_flush_pins(j, seq_to_flush,
- (1U << JOURNAL_PIN_key_cache)|
- (1U << JOURNAL_PIN_other), 0, 0, 0) ||
- journal_flush_pins(j, seq_to_flush,
- (1U << JOURNAL_PIN_btree), 0, 0, 0))
+ if (journal_flush_pins_or_still_flushing(j, seq_to_flush,
+ BIT(JOURNAL_PIN_TYPE_key_cache)|
+ BIT(JOURNAL_PIN_TYPE_other))) {
+ *did_work = true;
+ goto unlock;
+ }
+
+ if (journal_flush_pins_or_still_flushing(j, seq_to_flush,
+ BIT(JOURNAL_PIN_TYPE_btree))) {
*did_work = true;
+ goto unlock;
+ }
if (seq_to_flush > journal_cur_seq(j))
bch2_journal_entry_close(j);
@@ -847,6 +895,7 @@ static int journal_flush_done(struct journal *j, u64 seq_to_flush,
!fifo_used(&j->pin);
spin_unlock(&j->lock);
+unlock:
mutex_unlock(&j->reclaim_lock);
return ret;
@@ -860,7 +909,7 @@ bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
if (!test_bit(JOURNAL_running, &j->flags))
return false;
- closure_wait_event(&j->async_wait,
+ closure_wait_event(&j->reclaim_flush_wait,
journal_flush_done(j, seq_to_flush, &did_work));
return did_work;
@@ -926,3 +975,54 @@ err:
return ret;
}
+
+bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq)
+{
+ struct journal_entry_pin_list *pin_list;
+ struct journal_entry_pin *pin;
+
+ spin_lock(&j->lock);
+ if (!test_bit(JOURNAL_running, &j->flags)) {
+ spin_unlock(&j->lock);
+ return true;
+ }
+
+ *seq = max(*seq, j->pin.front);
+
+ if (*seq >= j->pin.back) {
+ spin_unlock(&j->lock);
+ return true;
+ }
+
+ out->atomic++;
+
+ pin_list = journal_seq_pin(j, *seq);
+
+ prt_printf(out, "%llu: count %u\n", *seq, atomic_read(&pin_list->count));
+ printbuf_indent_add(out, 2);
+
+ prt_printf(out, "unflushed:\n");
+ for (unsigned i = 0; i < ARRAY_SIZE(pin_list->unflushed); i++)
+ list_for_each_entry(pin, &pin_list->unflushed[i], list)
+ prt_printf(out, "\t%px %ps\n", pin, pin->flush);
+
+ prt_printf(out, "flushed:\n");
+ for (unsigned i = 0; i < ARRAY_SIZE(pin_list->flushed); i++)
+ list_for_each_entry(pin, &pin_list->flushed[i], list)
+ prt_printf(out, "\t%px %ps\n", pin, pin->flush);
+
+ printbuf_indent_sub(out, 2);
+
+ --out->atomic;
+ spin_unlock(&j->lock);
+
+ return false;
+}
+
+void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
+{
+ u64 seq = 0;
+
+ while (!bch2_journal_seq_pins_to_text(out, j, &seq))
+ seq++;
+}
diff --git a/fs/bcachefs/journal_reclaim.h b/fs/bcachefs/journal_reclaim.h
index ec84c3345281..0a73d7134e1c 100644
--- a/fs/bcachefs/journal_reclaim.h
+++ b/fs/bcachefs/journal_reclaim.h
@@ -78,4 +78,7 @@ static inline bool bch2_journal_flush_all_pins(struct journal *j)
int bch2_journal_flush_device_pins(struct journal *, int);
+void bch2_journal_pins_to_text(struct printbuf *, struct journal *);
+bool bch2_journal_seq_pins_to_text(struct printbuf *, struct journal *, u64 *);
+
#endif /* _BCACHEFS_JOURNAL_RECLAIM_H */
diff --git a/fs/bcachefs/journal_types.h b/fs/bcachefs/journal_types.h
index e9bd716fbb71..a198a81d7478 100644
--- a/fs/bcachefs/journal_types.h
+++ b/fs/bcachefs/journal_types.h
@@ -53,15 +53,15 @@ struct journal_buf {
*/
enum journal_pin_type {
- JOURNAL_PIN_btree,
- JOURNAL_PIN_key_cache,
- JOURNAL_PIN_other,
- JOURNAL_PIN_NR,
+ JOURNAL_PIN_TYPE_btree,
+ JOURNAL_PIN_TYPE_key_cache,
+ JOURNAL_PIN_TYPE_other,
+ JOURNAL_PIN_TYPE_NR,
};
struct journal_entry_pin_list {
- struct list_head list[JOURNAL_PIN_NR];
- struct list_head flushed;
+ struct list_head unflushed[JOURNAL_PIN_TYPE_NR];
+ struct list_head flushed[JOURNAL_PIN_TYPE_NR];
atomic_t count;
struct bch_devs_list devs;
};
@@ -226,6 +226,7 @@ struct journal {
/* Used when waiting because the journal was full */
wait_queue_head_t wait;
struct closure_waitlist async_wait;
+ struct closure_waitlist reclaim_flush_wait;
struct delayed_work write_work;
struct workqueue_struct *wq;
@@ -236,6 +237,7 @@ struct journal {
/* seq, last_seq from the most recent journal entry successfully written */
u64 seq_ondisk;
u64 flushed_seq_ondisk;
+ u64 flushing_seq;
u64 last_seq_ondisk;
u64 err_seq;
u64 last_empty_seq;
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index 85c361e78ba5..21805509ab9e 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -215,7 +215,8 @@ static int bch2_copygc(struct moving_context *ctxt,
};
move_buckets buckets = { 0 };
struct move_bucket_in_flight *f;
- u64 moved = atomic64_read(&ctxt->stats->sectors_moved);
+ u64 sectors_seen = atomic64_read(&ctxt->stats->sectors_seen);
+ u64 sectors_moved = atomic64_read(&ctxt->stats->sectors_moved);
int ret = 0;
ret = bch2_copygc_get_buckets(ctxt, buckets_in_flight, &buckets);
@@ -245,7 +246,6 @@ static int bch2_copygc(struct moving_context *ctxt,
*did_work = true;
}
err:
- darray_exit(&buckets);
/* no entries in LRU btree found, or got to end: */
if (bch2_err_matches(ret, ENOENT))
@@ -254,8 +254,11 @@ err:
if (ret < 0 && !bch2_err_matches(ret, EROFS))
bch_err_msg(c, ret, "from bch2_move_data()");
- moved = atomic64_read(&ctxt->stats->sectors_moved) - moved;
- trace_and_count(c, copygc, c, moved, 0, 0, 0);
+ sectors_seen = atomic64_read(&ctxt->stats->sectors_seen) - sectors_seen;
+ sectors_moved = atomic64_read(&ctxt->stats->sectors_moved) - sectors_moved;
+ trace_and_count(c, copygc, c, buckets.nr, sectors_seen, sectors_moved);
+
+ darray_exit(&buckets);
return ret;
}
diff --git a/fs/bcachefs/opts.h b/fs/bcachefs/opts.h
index e763d52e0f38..9d397fc2a1f0 100644
--- a/fs/bcachefs/opts.h
+++ b/fs/bcachefs/opts.h
@@ -476,13 +476,13 @@ enum fsck_err_opts {
NULL, "Enable nocow mode: enables runtime locking in\n"\
"data move path needed if nocow will ever be in use\n")\
x(copygc_enabled, u8, \
- OPT_FS|OPT_MOUNT, \
+ OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
OPT_BOOL(), \
BCH2_NO_SB_OPT, true, \
NULL, "Enable copygc: disable for debugging, or to\n"\
"quiet the system when doing performance testing\n")\
x(rebalance_enabled, u8, \
- OPT_FS|OPT_MOUNT, \
+ OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
OPT_BOOL(), \
BCH2_NO_SB_OPT, true, \
NULL, "Enable rebalance: disable for debugging, or to\n"\
@@ -659,18 +659,4 @@ static inline void bch2_io_opts_fixups(struct bch_io_opts *opts)
struct bch_io_opts bch2_opts_to_inode_opts(struct bch_opts);
bool bch2_opt_is_inode_opt(enum bch_opt_id);
-/* rebalance opts: */
-
-static inline struct bch_extent_rebalance io_opts_to_rebalance_opts(struct bch_io_opts *opts)
-{
- return (struct bch_extent_rebalance) {
- .type = BIT(BCH_EXTENT_ENTRY_rebalance),
-#define x(_name) \
- ._name = opts->_name, \
- ._name##_from_inode = opts->_name##_from_inode,
- BCH_REBALANCE_OPTS()
-#undef x
- };
-};
-
#endif /* _BCACHEFS_OPTS_H */
diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c
index 4adc74cd3f70..d0a1f5cd5c2b 100644
--- a/fs/bcachefs/rebalance.c
+++ b/fs/bcachefs/rebalance.c
@@ -121,12 +121,10 @@ u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k)
}
}
incompressible:
- if (opts->background_target &&
- bch2_target_accepts_data(c, BCH_DATA_user, opts->background_target)) {
+ if (opts->background_target)
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
if (!p.ptr.cached && !bch2_dev_in_target(c, p.ptr.dev, opts->background_target))
sectors += p.crc.compressed_size;
- }
return sectors;
}
@@ -140,7 +138,7 @@ static bool bch2_bkey_rebalance_needs_update(struct bch_fs *c, struct bch_io_opt
const struct bch_extent_rebalance *old = bch2_bkey_rebalance_opts(k);
if (k.k->type == KEY_TYPE_reflink_v || bch2_bkey_ptrs_need_rebalance(c, opts, k)) {
- struct bch_extent_rebalance new = io_opts_to_rebalance_opts(opts);
+ struct bch_extent_rebalance new = io_opts_to_rebalance_opts(c, opts);
return old == NULL || memcmp(old, &new, sizeof(new));
} else {
return old != NULL;
@@ -163,7 +161,7 @@ int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bch_io_opts *opts,
k.k->u64s += sizeof(*old) / sizeof(u64);
}
- *old = io_opts_to_rebalance_opts(opts);
+ *old = io_opts_to_rebalance_opts(c, opts);
} else {
if (old)
extent_entry_drop(k, (union bch_extent_entry *) old);
diff --git a/fs/bcachefs/rebalance.h b/fs/bcachefs/rebalance.h
index 0a0821ab895d..62a3859d3823 100644
--- a/fs/bcachefs/rebalance.h
+++ b/fs/bcachefs/rebalance.h
@@ -4,8 +4,28 @@
#include "compress.h"
#include "disk_groups.h"
+#include "opts.h"
#include "rebalance_types.h"
+static inline struct bch_extent_rebalance io_opts_to_rebalance_opts(struct bch_fs *c,
+ struct bch_io_opts *opts)
+{
+ struct bch_extent_rebalance r = {
+ .type = BIT(BCH_EXTENT_ENTRY_rebalance),
+#define x(_name) \
+ ._name = opts->_name, \
+ ._name##_from_inode = opts->_name##_from_inode,
+ BCH_REBALANCE_OPTS()
+#undef x
+ };
+
+ if (r.background_target &&
+ !bch2_target_accepts_data(c, BCH_DATA_user, r.background_target))
+ r.background_target = 0;
+
+ return r;
+};
+
u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *, struct bkey_s_c);
int bch2_bkey_set_needs_rebalance(struct bch_fs *, struct bch_io_opts *, struct bkey_i *);
int bch2_get_update_rebalance_opts(struct btree_trans *,
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index 98825437381c..71c786cdb192 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -32,7 +32,6 @@
#include <linux/sort.h>
#include <linux/stat.h>
-#define QSTR(n) { { { .len = strlen(n) } }, .name = n }
int bch2_btree_lost_data(struct bch_fs *c, enum btree_id btree)
{
diff --git a/fs/bcachefs/sb-errors_format.h b/fs/bcachefs/sb-errors_format.h
index 0b4fe899209b..ea0a18364751 100644
--- a/fs/bcachefs/sb-errors_format.h
+++ b/fs/bcachefs/sb-errors_format.h
@@ -57,7 +57,7 @@ enum bch_fsck_flags {
x(bset_wrong_sector_offset, 44, 0) \
x(bset_empty, 45, 0) \
x(bset_bad_seq, 46, 0) \
- x(bset_blacklisted_journal_seq, 47, 0) \
+ x(bset_blacklisted_journal_seq, 47, FSCK_AUTOFIX) \
x(first_bset_blacklisted_journal_seq, 48, FSCK_AUTOFIX) \
x(btree_node_bad_btree, 49, 0) \
x(btree_node_bad_level, 50, 0) \
diff --git a/fs/bcachefs/str_hash.c b/fs/bcachefs/str_hash.c
index 8c2c5539de2e..d78451c2a0c6 100644
--- a/fs/bcachefs/str_hash.c
+++ b/fs/bcachefs/str_hash.c
@@ -31,11 +31,11 @@ static int bch2_dirent_has_target(struct btree_trans *trans, struct bkey_s_c_dir
}
}
-static int fsck_rename_dirent(struct btree_trans *trans,
- struct snapshots_seen *s,
- const struct bch_hash_desc desc,
- struct bch_hash_info *hash_info,
- struct bkey_s_c_dirent old)
+static noinline int fsck_rename_dirent(struct btree_trans *trans,
+ struct snapshots_seen *s,
+ const struct bch_hash_desc desc,
+ struct bch_hash_info *hash_info,
+ struct bkey_s_c_dirent old)
{
struct qstr old_name = bch2_dirent_get_name(old);
struct bkey_i_dirent *new = bch2_trans_kmalloc(trans, bkey_bytes(old.k) + 32);
@@ -71,11 +71,11 @@ static int fsck_rename_dirent(struct btree_trans *trans,
return bch2_fsck_update_backpointers(trans, s, desc, hash_info, &new->k_i);
}
-static int hash_pick_winner(struct btree_trans *trans,
- const struct bch_hash_desc desc,
- struct bch_hash_info *hash_info,
- struct bkey_s_c k1,
- struct bkey_s_c k2)
+static noinline int hash_pick_winner(struct btree_trans *trans,
+ const struct bch_hash_desc desc,
+ struct bch_hash_info *hash_info,
+ struct bkey_s_c k1,
+ struct bkey_s_c k2)
{
if (bkey_val_bytes(k1.k) == bkey_val_bytes(k2.k) &&
!memcmp(k1.v, k2.v, bkey_val_bytes(k1.k)))
@@ -142,8 +142,8 @@ fsck_err:
* All versions of the same inode in different snapshots must have the same hash
* seed/type: verify that the hash info we're using matches the root
*/
-static int check_inode_hash_info_matches_root(struct btree_trans *trans, u64 inum,
- struct bch_hash_info *hash_info)
+static noinline int check_inode_hash_info_matches_root(struct btree_trans *trans, u64 inum,
+ struct bch_hash_info *hash_info)
{
struct bch_fs *c = trans->c;
struct btree_iter iter;
diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c
index e3d0475232e5..b7b96283c316 100644
--- a/fs/bcachefs/subvolume.c
+++ b/fs/bcachefs/subvolume.c
@@ -428,7 +428,7 @@ static int __bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
bch2_bkey_get_iter_typed(trans, &snapshot_iter,
BTREE_ID_snapshots, POS(0, snapid),
0, snapshot);
- ret = bkey_err(subvol);
+ ret = bkey_err(snapshot);
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
"missing snapshot %u", snapid);
if (ret)
@@ -440,6 +440,11 @@ static int __bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
bch2_bkey_get_iter_typed(trans, &snapshot_tree_iter,
BTREE_ID_snapshot_trees, POS(0, treeid),
0, snapshot_tree);
+ ret = bkey_err(snapshot_tree);
+ bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
+ "missing snapshot tree %u", treeid);
+ if (ret)
+ goto err;
if (le32_to_cpu(snapshot_tree.v->master_subvol) == subvolid) {
struct bkey_i_snapshot_tree *snapshot_tree_mut =
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index d97ea7bd1171..6d97d412fed9 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -411,6 +411,17 @@ bool bch2_fs_emergency_read_only(struct bch_fs *c)
return ret;
}
+bool bch2_fs_emergency_read_only_locked(struct bch_fs *c)
+{
+ bool ret = !test_and_set_bit(BCH_FS_emergency_ro, &c->flags);
+
+ bch2_journal_halt_locked(&c->journal);
+ bch2_fs_read_only_async(c);
+
+ wake_up(&bch2_read_only_wait);
+ return ret;
+}
+
static int bch2_fs_read_write_late(struct bch_fs *c)
{
int ret;
diff --git a/fs/bcachefs/super.h b/fs/bcachefs/super.h
index fa6d52216510..04f8287eff5c 100644
--- a/fs/bcachefs/super.h
+++ b/fs/bcachefs/super.h
@@ -29,6 +29,7 @@ int bch2_dev_resize(struct bch_fs *, struct bch_dev *, u64);
struct bch_dev *bch2_dev_lookup(struct bch_fs *, const char *);
bool bch2_fs_emergency_read_only(struct bch_fs *);
+bool bch2_fs_emergency_read_only_locked(struct bch_fs *);
void bch2_fs_read_only(struct bch_fs *);
int bch2_fs_read_write(struct bch_fs *);
diff --git a/fs/bcachefs/trace.h b/fs/bcachefs/trace.h
index 9d40b7d4ea29..c1b51009edf6 100644
--- a/fs/bcachefs/trace.h
+++ b/fs/bcachefs/trace.h
@@ -727,7 +727,7 @@ DEFINE_EVENT(fs_str, bucket_alloc_fail,
TP_ARGS(c, str)
);
-TRACE_EVENT(discard_buckets,
+DECLARE_EVENT_CLASS(discard_buckets_class,
TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
u64 need_journal_commit, u64 discarded, const char *err),
TP_ARGS(c, seen, open, need_journal_commit, discarded, err),
@@ -759,6 +759,18 @@ TRACE_EVENT(discard_buckets,
__entry->err)
);
+DEFINE_EVENT(discard_buckets_class, discard_buckets,
+ TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
+ u64 need_journal_commit, u64 discarded, const char *err),
+ TP_ARGS(c, seen, open, need_journal_commit, discarded, err)
+);
+
+DEFINE_EVENT(discard_buckets_class, discard_buckets_fast,
+ TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
+ u64 need_journal_commit, u64 discarded, const char *err),
+ TP_ARGS(c, seen, open, need_journal_commit, discarded, err)
+);
+
TRACE_EVENT(bucket_invalidate,
TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
TP_ARGS(c, dev, bucket, sectors),
@@ -902,32 +914,30 @@ TRACE_EVENT(evacuate_bucket,
TRACE_EVENT(copygc,
TP_PROTO(struct bch_fs *c,
- u64 sectors_moved, u64 sectors_not_moved,
- u64 buckets_moved, u64 buckets_not_moved),
- TP_ARGS(c,
- sectors_moved, sectors_not_moved,
- buckets_moved, buckets_not_moved),
+ u64 buckets,
+ u64 sectors_seen,
+ u64 sectors_moved),
+ TP_ARGS(c, buckets, sectors_seen, sectors_moved),
TP_STRUCT__entry(
__field(dev_t, dev )
+ __field(u64, buckets )
+ __field(u64, sectors_seen )
__field(u64, sectors_moved )
- __field(u64, sectors_not_moved )
- __field(u64, buckets_moved )
- __field(u64, buckets_not_moved )
),
TP_fast_assign(
__entry->dev = c->dev;
+ __entry->buckets = buckets;
+ __entry->sectors_seen = sectors_seen;
__entry->sectors_moved = sectors_moved;
- __entry->sectors_not_moved = sectors_not_moved;
- __entry->buckets_moved = buckets_moved;
- __entry->buckets_not_moved = buckets_moved;
),
- TP_printk("%d,%d sectors moved %llu remain %llu buckets moved %llu remain %llu",
+ TP_printk("%d,%d buckets %llu sectors seen %llu moved %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->sectors_moved, __entry->sectors_not_moved,
- __entry->buckets_moved, __entry->buckets_not_moved)
+ __entry->buckets,
+ __entry->sectors_seen,
+ __entry->sectors_moved)
);
TRACE_EVENT(copygc_wait,
diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h
index 1a1720116071..e7c3541b38f3 100644
--- a/fs/bcachefs/util.h
+++ b/fs/bcachefs/util.h
@@ -670,8 +670,6 @@ static inline int cmp_le32(__le32 l, __le32 r)
#include <linux/uuid.h>
-#define QSTR(n) { { { .len = strlen(n) } }, .name = n }
-
static inline bool qstr_eq(const struct qstr l, const struct qstr r)
{
return l.len == r.len && !memcmp(l.name, r.name, l.len);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 106f0e8af177..8054f44d39cf 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1257,7 +1257,7 @@ out_free_interp:
}
reloc_func_desc = interp_load_addr;
- allow_write_access(interpreter);
+ exe_file_allow_write_access(interpreter);
fput(interpreter);
kfree(interp_elf_ex);
@@ -1354,7 +1354,7 @@ out_free_dentry:
kfree(interp_elf_ex);
kfree(interp_elf_phdata);
out_free_file:
- allow_write_access(interpreter);
+ exe_file_allow_write_access(interpreter);
if (interpreter)
fput(interpreter);
out_free_ph:
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index f1a7c4875c4a..c13ee8180b17 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -394,7 +394,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
goto error;
}
- allow_write_access(interpreter);
+ exe_file_allow_write_access(interpreter);
fput(interpreter);
interpreter = NULL;
}
@@ -467,7 +467,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
error:
if (interpreter) {
- allow_write_access(interpreter);
+ exe_file_allow_write_access(interpreter);
fput(interpreter);
}
kfree(interpreter_name);
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 92071ca0655f..3dc5a35dd19b 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1496,6 +1496,7 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
if (!p->skip_locking) {
btrfs_unlock_up_safe(p, parent_level + 1);
+ btrfs_maybe_reset_lockdep_class(root, tmp);
tmp_locked = true;
btrfs_tree_read_lock(tmp);
btrfs_release_path(p);
@@ -1539,6 +1540,7 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
if (!p->skip_locking) {
ASSERT(ret == -EAGAIN);
+ btrfs_maybe_reset_lockdep_class(root, tmp);
tmp_locked = true;
btrfs_tree_read_lock(tmp);
btrfs_release_path(p);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index d9f856358704..a2cac9d0a1a9 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -629,7 +629,7 @@ int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
for (allocated = 0; allocated < nr_pages;) {
unsigned int last = allocated;
- allocated = alloc_pages_bulk_array(gfp, nr_pages, page_array);
+ allocated = alloc_pages_bulk(gfp, nr_pages, page_array);
if (unlikely(allocated == last)) {
/* No progress, fail and do cleanup. */
for (int i = 0; i < allocated; i++) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index fe2c810335ff..a9322601ab5c 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -10173,7 +10173,6 @@ out_unlock_mmap:
*span = bsi.highest_ppage - bsi.lowest_ppage + 1;
sis->max = bsi.nr_pages;
sis->pages = bsi.nr_pages - 1;
- sis->highest_bit = bsi.nr_pages - 1;
return bsi.nr_extents;
}
#else
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index ae98269a5e3a..6c18bad53cd3 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2544,6 +2544,15 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
goto out;
}
+ /*
+ * Don't allow defrag on pre-content watched files, as it could
+ * populate the page cache with 0's via readahead.
+ */
+ if (unlikely(FMODE_FSNOTIFY_HSM(file->f_mode))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
if (argp) {
if (copy_from_user(&range, argp, sizeof(range))) {
ret = -EFAULT;
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 30eceaf829a7..4aca7475fd82 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -1229,6 +1229,18 @@ struct btrfs_ordered_extent *btrfs_split_ordered_extent(
*/
if (WARN_ON_ONCE(len >= ordered->num_bytes))
return ERR_PTR(-EINVAL);
+ /*
+ * If our ordered extent had an error there's no point in continuing.
+ * The error may have come from a transaction abort done either by this
+ * task or some other concurrent task, and the transaction abort path
+ * iterates over all existing ordered extents and sets the flag
+ * BTRFS_ORDERED_IOERR on them.
+ */
+ if (unlikely(flags & (1U << BTRFS_ORDERED_IOERR))) {
+ const int fs_error = BTRFS_FS_ERROR(fs_info);
+
+ return fs_error ? ERR_PTR(fs_error) : ERR_PTR(-EIO);
+ }
/* We cannot split partially completed ordered extents. */
if (ordered->bytes_left) {
ASSERT(!(flags & ~BTRFS_ORDERED_TYPE_FLAGS));
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index b90fabe302e6..f9d3766c809b 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1880,11 +1880,7 @@ int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 su
* Commit current transaction to make sure all the rfer/excl numbers
* get updated.
*/
- trans = btrfs_start_transaction(fs_info->quota_root, 0);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
-
- ret = btrfs_commit_transaction(trans);
+ ret = btrfs_commit_current_transaction(fs_info->quota_root);
if (ret < 0)
return ret;
@@ -1897,8 +1893,11 @@ int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 su
/*
* It's squota and the subvolume still has numbers needed for future
* accounting, in this case we can not delete it. Just skip it.
+ *
+ * Or the qgroup is already removed by a qgroup rescan. For both cases we're
+ * safe to ignore them.
*/
- if (ret == -EBUSY)
+ if (ret == -EBUSY || ret == -ENOENT)
ret = 0;
return ret;
}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index f809c3200c21..dc4fee519ca6 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -961,7 +961,7 @@ static int btrfs_fill_super(struct super_block *sb,
#endif
sb->s_xattr = btrfs_xattr_handlers;
sb->s_time_gran = 1;
- sb->s_iflags |= SB_I_CGROUPWB;
+ sb->s_iflags |= SB_I_CGROUPWB | SB_I_ALLOW_HSM;
err = super_setup_bdi(sb);
if (err) {
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 15312013f2a3..aca83a98b75a 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -274,8 +274,10 @@ loop:
cur_trans = fs_info->running_transaction;
if (cur_trans) {
if (TRANS_ABORTED(cur_trans)) {
+ const int abort_error = cur_trans->aborted;
+
spin_unlock(&fs_info->trans_lock);
- return cur_trans->aborted;
+ return abort_error;
}
if (btrfs_blocked_trans_types[cur_trans->state] & type) {
spin_unlock(&fs_info->trans_lock);
diff --git a/fs/cachefiles/error_inject.c b/fs/cachefiles/error_inject.c
index 1715d5ca2b2d..e341ade47dd8 100644
--- a/fs/cachefiles/error_inject.c
+++ b/fs/cachefiles/error_inject.c
@@ -11,7 +11,7 @@
unsigned int cachefiles_error_injection_state;
static struct ctl_table_header *cachefiles_sysctl;
-static struct ctl_table cachefiles_sysctls[] = {
+static const struct ctl_table cachefiles_sysctls[] = {
{
.procname = "error_injection",
.data = &cachefiles_error_injection_state,
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index fdf9dc15eafa..fdd404fc8112 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -412,7 +412,7 @@ void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
{
- char name[100];
+ char name[NAME_MAX];
doutc(fsc->client, "begin\n");
fsc->debugfs_congestion_kb =
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 0bf388e07a02..62e99e65250d 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1940,29 +1940,19 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry,
/*
* Check if cached dentry can be trusted.
*/
-static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
+static int ceph_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(dentry->d_sb)->mdsc;
struct ceph_client *cl = mdsc->fsc->client;
int valid = 0;
- struct dentry *parent;
- struct inode *dir, *inode;
+ struct inode *inode;
- valid = fscrypt_d_revalidate(dentry, flags);
+ valid = fscrypt_d_revalidate(dir, name, dentry, flags);
if (valid <= 0)
return valid;
- if (flags & LOOKUP_RCU) {
- parent = READ_ONCE(dentry->d_parent);
- dir = d_inode_rcu(parent);
- if (!dir)
- return -ECHILD;
- inode = d_inode_rcu(dentry);
- } else {
- parent = dget_parent(dentry);
- dir = d_inode(parent);
- inode = d_inode(dentry);
- }
+ inode = d_inode_rcu(dentry);
doutc(cl, "%p '%pd' inode %p offset 0x%llx nokey %d\n",
dentry, dentry, inode, ceph_dentry(dentry)->offset,
@@ -2008,6 +1998,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
req->r_parent = dir;
ihold(dir);
+ req->r_dname = name;
+
mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
if (ceph_security_xattr_wanted(dir))
mask |= CEPH_CAP_XATTR_SHARED;
@@ -2038,9 +2030,6 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
doutc(cl, "%p '%pd' %s\n", dentry, dentry, valid ? "valid" : "invalid");
if (!valid)
ceph_dir_clear_complete(dir);
-
- if (!(flags & LOOKUP_RCU))
- dput(parent);
return valid;
}
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 785fe489ef4b..54b3421501e9 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2621,6 +2621,7 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
{
struct inode *dir = req->r_parent;
struct dentry *dentry = req->r_dentry;
+ const struct qstr *name = req->r_dname;
u8 *cryptbuf = NULL;
u32 len = 0;
int ret = 0;
@@ -2641,8 +2642,10 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
if (!fscrypt_has_encryption_key(dir))
goto success;
- if (!fscrypt_fname_encrypted_size(dir, dentry->d_name.len, NAME_MAX,
- &len)) {
+ if (!name)
+ name = &dentry->d_name;
+
+ if (!fscrypt_fname_encrypted_size(dir, name->len, NAME_MAX, &len)) {
WARN_ON_ONCE(1);
return ERR_PTR(-ENAMETOOLONG);
}
@@ -2657,7 +2660,7 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
if (!cryptbuf)
return ERR_PTR(-ENOMEM);
- ret = fscrypt_fname_encrypt(dir, &dentry->d_name, cryptbuf, len);
+ ret = fscrypt_fname_encrypt(dir, name, cryptbuf, len);
if (ret) {
kfree(cryptbuf);
return ERR_PTR(ret);
@@ -2945,12 +2948,12 @@ static struct ceph_mds_request_head_legacy *
find_legacy_request_head(void *p, u64 features)
{
bool legacy = !(features & CEPH_FEATURE_FS_BTIME);
- struct ceph_mds_request_head_old *ohead;
+ struct ceph_mds_request_head *head;
if (legacy)
return (struct ceph_mds_request_head_legacy *)p;
- ohead = (struct ceph_mds_request_head_old *)p;
- return (struct ceph_mds_request_head_legacy *)&ohead->oldest_client_tid;
+ head = (struct ceph_mds_request_head *)p;
+ return (struct ceph_mds_request_head_legacy *)&head->oldest_client_tid;
}
/*
@@ -3020,7 +3023,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
if (legacy)
len = sizeof(struct ceph_mds_request_head_legacy);
else if (request_head_version == 1)
- len = sizeof(struct ceph_mds_request_head_old);
+ len = offsetofend(struct ceph_mds_request_head, args);
else if (request_head_version == 2)
len = offsetofend(struct ceph_mds_request_head, ext_num_fwd);
else
@@ -3104,11 +3107,11 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
msg->hdr.version = cpu_to_le16(3);
p = msg->front.iov_base + sizeof(*lhead);
} else if (request_head_version == 1) {
- struct ceph_mds_request_head_old *ohead = msg->front.iov_base;
+ struct ceph_mds_request_head *nhead = msg->front.iov_base;
msg->hdr.version = cpu_to_le16(4);
- ohead->version = cpu_to_le16(1);
- p = msg->front.iov_base + sizeof(*ohead);
+ nhead->version = cpu_to_le16(1);
+ p = msg->front.iov_base + offsetofend(struct ceph_mds_request_head, args);
} else if (request_head_version == 2) {
struct ceph_mds_request_head *nhead = msg->front.iov_base;
@@ -3265,7 +3268,7 @@ static int __prepare_send_request(struct ceph_mds_session *session,
* so we limit to retry at most 256 times.
*/
if (req->r_attempts) {
- old_max_retry = sizeof_field(struct ceph_mds_request_head_old,
+ old_max_retry = sizeof_field(struct ceph_mds_request_head,
num_retry);
old_max_retry = 1 << (old_max_retry * BITS_PER_BYTE);
if ((old_version && req->r_attempts >= old_max_retry) ||
@@ -5690,18 +5693,18 @@ static int ceph_mds_auth_match(struct ceph_mds_client *mdsc,
*
* All the other cases --> mismatch
*/
+ bool path_matched = true;
char *first = strstr(_tpath, auth->match.path);
- if (first != _tpath) {
- if (free_tpath)
- kfree(_tpath);
- return 0;
+ if (first != _tpath ||
+ (tlen > len && _tpath[len] != '/')) {
+ path_matched = false;
}
- if (tlen > len && _tpath[len] != '/') {
- if (free_tpath)
- kfree(_tpath);
+ if (free_tpath)
+ kfree(_tpath);
+
+ if (!path_matched)
return 0;
- }
}
}
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 38bb7e0d2d79..7c9fee9e80d4 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -299,6 +299,8 @@ struct ceph_mds_request {
struct inode *r_target_inode; /* resulting inode */
struct inode *r_new_inode; /* new inode (for creates) */
+ const struct qstr *r_dname; /* stable name (for ->d_revalidate) */
+
#define CEPH_MDS_R_DIRECT_IS_HASH (1) /* r_direct_hash is valid */
#define CEPH_MDS_R_ABORTED (2) /* call was aborted */
#define CEPH_MDS_R_GOT_UNSAFE (3) /* got an unsafe reply */
diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
index 06ee397e0c3a..d90eda19bcc4 100644
--- a/fs/ceph/quota.c
+++ b/fs/ceph/quota.c
@@ -166,7 +166,7 @@ static struct inode *lookup_quotarealm_inode(struct ceph_mds_client *mdsc,
if (IS_ERR(in)) {
doutc(cl, "Can't lookup inode %llx (err: %ld)\n", realm->ino,
PTR_ERR(in));
- qri->timeout = jiffies + msecs_to_jiffies(60 * 1000); /* XXX */
+ qri->timeout = jiffies + secs_to_jiffies(60); /* XXX */
} else {
qri->timeout = 0;
qri->inode = in;
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 4e552ba7bd43..a3e2dfeedfbf 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -445,7 +445,8 @@ static int coda_readdir(struct file *coda_file, struct dir_context *ctx)
}
/* called when a cache lookup succeeds */
-static int coda_dentry_revalidate(struct dentry *de, unsigned int flags)
+static int coda_dentry_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *de, unsigned int flags)
{
struct inode *inode;
struct coda_inode_info *cii;
diff --git a/fs/coda/sysctl.c b/fs/coda/sysctl.c
index 9f2d5743e2c8..0df46f09b6cc 100644
--- a/fs/coda/sysctl.c
+++ b/fs/coda/sysctl.c
@@ -14,7 +14,7 @@
static struct ctl_table_header *fs_table_header;
-static struct ctl_table coda_table[] = {
+static const struct ctl_table coda_table[] = {
{
.procname = "timeout",
.data = &coda_timeout,
diff --git a/fs/coredump.c b/fs/coredump.c
index d48edb37bc35..591700e1b2ce 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -995,7 +995,7 @@ static int proc_dostring_coredump(const struct ctl_table *table, int write,
static const unsigned int core_file_note_size_min = CORE_FILE_NOTE_SIZE_DEFAULT;
static const unsigned int core_file_note_size_max = CORE_FILE_NOTE_SIZE_MAX;
-static struct ctl_table coredump_sysctls[] = {
+static const struct ctl_table coredump_sysctls[] = {
{
.procname = "core_uses_pid",
.data = &core_uses_pid,
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 0ad52fbe51c9..010f9c0a4c2f 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -574,11 +574,10 @@ EXPORT_SYMBOL_GPL(fscrypt_fname_siphash);
* Validate dentries in encrypted directories to make sure we aren't potentially
* caching stale dentries after a key has been added.
*/
-int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
+int fscrypt_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
- struct dentry *dir;
int err;
- int valid;
/*
* Plaintext names are always valid, since fscrypt doesn't support
@@ -591,30 +590,21 @@ int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
/*
* No-key name; valid if the directory's key is still unavailable.
*
- * Although fscrypt forbids rename() on no-key names, we still must use
- * dget_parent() here rather than use ->d_parent directly. That's
- * because a corrupted fs image may contain directory hard links, which
- * the VFS handles by moving the directory's dentry tree in the dcache
- * each time ->lookup() finds the directory and it already has a dentry
- * elsewhere. Thus ->d_parent can be changing, and we must safely grab
- * a reference to some ->d_parent to prevent it from being freed.
+ * Note in RCU mode we have to bail if we get here -
+ * fscrypt_get_encryption_info() may block.
*/
if (flags & LOOKUP_RCU)
return -ECHILD;
- dir = dget_parent(dentry);
/*
* Pass allow_unsupported=true, so that files with an unsupported
* encryption policy can be deleted.
*/
- err = fscrypt_get_encryption_info(d_inode(dir), true);
- valid = !fscrypt_has_encryption_key(d_inode(dir));
- dput(dir);
-
+ err = fscrypt_get_encryption_info(dir, true);
if (err < 0)
return err;
- return valid;
+ return !fscrypt_has_encryption_key(dir);
}
EXPORT_SYMBOL_GPL(fscrypt_d_revalidate);
diff --git a/fs/dcache.c b/fs/dcache.c
index 1a01d7a6a7a9..e3634916ffb9 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -192,7 +192,7 @@ static int proc_nr_dentry(const struct ctl_table *table, int write, void *buffer
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
-static struct ctl_table fs_dcache_sysctls[] = {
+static const struct ctl_table fs_dcache_sysctls[] = {
{
.procname = "dentry-state",
.data = &dentry_stat,
@@ -295,12 +295,16 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
return dentry_string_cmp(cs, ct, tcount);
}
+/*
+ * long names are allocated separately from dentry and never modified.
+ * Refcounted, freeing is RCU-delayed. See take_dentry_name_snapshot()
+ * for the reason why ->count and ->head can't be combined into a union.
+ * dentry_string_cmp() relies upon ->name[] being word-aligned.
+ */
struct external_name {
- union {
- atomic_t count;
- struct rcu_head head;
- } u;
- unsigned char name[];
+ atomic_t count;
+ struct rcu_head head;
+ unsigned char name[] __aligned(sizeof(unsigned long));
};
static inline struct external_name *external_name(struct dentry *dentry)
@@ -324,31 +328,45 @@ static void __d_free_external(struct rcu_head *head)
static inline int dname_external(const struct dentry *dentry)
{
- return dentry->d_name.name != dentry->d_iname;
+ return dentry->d_name.name != dentry->d_shortname.string;
}
void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
{
- spin_lock(&dentry->d_lock);
- name->name = dentry->d_name;
- if (unlikely(dname_external(dentry))) {
- atomic_inc(&external_name(dentry)->u.count);
+ unsigned seq;
+ const unsigned char *s;
+
+ rcu_read_lock();
+retry:
+ seq = read_seqcount_begin(&dentry->d_seq);
+ s = READ_ONCE(dentry->d_name.name);
+ name->name.hash_len = dentry->d_name.hash_len;
+ name->name.name = name->inline_name.string;
+ if (likely(s == dentry->d_shortname.string)) {
+ name->inline_name = dentry->d_shortname;
} else {
- memcpy(name->inline_name, dentry->d_iname,
- dentry->d_name.len + 1);
- name->name.name = name->inline_name;
+ struct external_name *p;
+ p = container_of(s, struct external_name, name[0]);
+ // get a valid reference
+ if (unlikely(!atomic_inc_not_zero(&p->count)))
+ goto retry;
+ name->name.name = s;
}
- spin_unlock(&dentry->d_lock);
+ if (read_seqcount_retry(&dentry->d_seq, seq)) {
+ release_dentry_name_snapshot(name);
+ goto retry;
+ }
+ rcu_read_unlock();
}
EXPORT_SYMBOL(take_dentry_name_snapshot);
void release_dentry_name_snapshot(struct name_snapshot *name)
{
- if (unlikely(name->name.name != name->inline_name)) {
+ if (unlikely(name->name.name != name->inline_name.string)) {
struct external_name *p;
p = container_of(name->name.name, struct external_name, name[0]);
- if (unlikely(atomic_dec_and_test(&p->u.count)))
- kfree_rcu(p, u.head);
+ if (unlikely(atomic_dec_and_test(&p->count)))
+ kfree_rcu(p, head);
}
}
EXPORT_SYMBOL(release_dentry_name_snapshot);
@@ -386,7 +404,7 @@ static void dentry_free(struct dentry *dentry)
WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
if (unlikely(dname_external(dentry))) {
struct external_name *p = external_name(dentry);
- if (likely(atomic_dec_and_test(&p->u.count))) {
+ if (likely(atomic_dec_and_test(&p->count))) {
call_rcu(&dentry->d_u.d_rcu, __d_free_external);
return;
}
@@ -1654,10 +1672,10 @@ static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
* will still always have a NUL at the end, even if we might
* be overwriting an internal NUL character
*/
- dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
+ dentry->d_shortname.string[DNAME_INLINE_LEN-1] = 0;
if (unlikely(!name)) {
name = &slash_name;
- dname = dentry->d_iname;
+ dname = dentry->d_shortname.string;
} else if (name->len > DNAME_INLINE_LEN-1) {
size_t size = offsetof(struct external_name, name[1]);
struct external_name *p = kmalloc(size + name->len,
@@ -1667,10 +1685,10 @@ static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
kmem_cache_free(dentry_cache, dentry);
return NULL;
}
- atomic_set(&p->u.count, 1);
+ atomic_set(&p->count, 1);
dname = p->name;
} else {
- dname = dentry->d_iname;
+ dname = dentry->d_shortname.string;
}
dentry->d_name.len = name->len;
@@ -1682,7 +1700,7 @@ static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
smp_store_release(&dentry->d_name.name, dname); /* ^^^ */
dentry->d_flags = 0;
- lockref_init(&dentry->d_lockref, 1);
+ lockref_init(&dentry->d_lockref);
seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock);
dentry->d_inode = NULL;
dentry->d_parent = dentry;
@@ -2728,10 +2746,9 @@ static void swap_names(struct dentry *dentry, struct dentry *target)
* dentry:internal, target:external. Steal target's
* storage and make target internal.
*/
- memcpy(target->d_iname, dentry->d_name.name,
- dentry->d_name.len + 1);
dentry->d_name.name = target->d_name.name;
- target->d_name.name = target->d_iname;
+ target->d_shortname = dentry->d_shortname;
+ target->d_name.name = target->d_shortname.string;
}
} else {
if (unlikely(dname_external(dentry))) {
@@ -2739,20 +2756,16 @@ static void swap_names(struct dentry *dentry, struct dentry *target)
* dentry:external, target:internal. Give dentry's
* storage to target and make dentry internal
*/
- memcpy(dentry->d_iname, target->d_name.name,
- target->d_name.len + 1);
target->d_name.name = dentry->d_name.name;
- dentry->d_name.name = dentry->d_iname;
+ dentry->d_shortname = target->d_shortname;
+ dentry->d_name.name = dentry->d_shortname.string;
} else {
/*
* Both are internal.
*/
- unsigned int i;
- BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
- for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
- swap(((long *) &dentry->d_iname)[i],
- ((long *) &target->d_iname)[i]);
- }
+ for (int i = 0; i < DNAME_INLINE_WORDS; i++)
+ swap(dentry->d_shortname.words[i],
+ target->d_shortname.words[i]);
}
}
swap(dentry->d_name.hash_len, target->d_name.hash_len);
@@ -2764,16 +2777,15 @@ static void copy_name(struct dentry *dentry, struct dentry *target)
if (unlikely(dname_external(dentry)))
old_name = external_name(dentry);
if (unlikely(dname_external(target))) {
- atomic_inc(&external_name(target)->u.count);
+ atomic_inc(&external_name(target)->count);
dentry->d_name = target->d_name;
} else {
- memcpy(dentry->d_iname, target->d_name.name,
- target->d_name.len + 1);
- dentry->d_name.name = dentry->d_iname;
+ dentry->d_shortname = target->d_shortname;
+ dentry->d_name.name = dentry->d_shortname.string;
dentry->d_name.hash_len = target->d_name.hash_len;
}
- if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
- kfree_rcu(old_name, u.head);
+ if (old_name && likely(atomic_dec_and_test(&old_name->count)))
+ kfree_rcu(old_name, head);
}
/*
@@ -2954,7 +2966,12 @@ static int __d_unalias(struct dentry *dentry, struct dentry *alias)
goto out_err;
m2 = &alias->d_parent->d_inode->i_rwsem;
out_unalias:
+ if (alias->d_op && alias->d_op->d_unalias_trylock &&
+ !alias->d_op->d_unalias_trylock(alias))
+ goto out_err;
__d_move(alias, dentry, false);
+ if (alias->d_op && alias->d_op->d_unalias_unlock)
+ alias->d_op->d_unalias_unlock(alias);
ret = 0;
out_err:
if (m2)
@@ -3102,12 +3119,12 @@ void d_mark_tmpfile(struct file *file, struct inode *inode)
{
struct dentry *dentry = file->f_path.dentry;
- BUG_ON(dentry->d_name.name != dentry->d_iname ||
+ BUG_ON(dname_external(dentry) ||
!hlist_unhashed(&dentry->d_u.d_alias) ||
!d_unlinked(dentry));
spin_lock(&dentry->d_parent->d_lock);
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
- dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
+ dentry->d_name.len = sprintf(dentry->d_shortname.string, "#%llu",
(unsigned long long)inode->i_ino);
spin_unlock(&dentry->d_lock);
spin_unlock(&dentry->d_parent->d_lock);
@@ -3195,7 +3212,7 @@ static void __init dcache_init(void)
*/
dentry_cache = KMEM_CACHE_USERCOPY(dentry,
SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_ACCOUNT,
- d_iname);
+ d_shortname.string);
/* Hash may have been set up in dcache_init_early */
if (!hashdist)
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 16e198a26339..69e9ddcb113d 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -47,11 +47,17 @@ const struct file_operations debugfs_noop_file_operations = {
#define F_DENTRY(filp) ((filp)->f_path.dentry)
+const void *debugfs_get_aux(const struct file *file)
+{
+ return DEBUGFS_I(file_inode(file))->aux;
+}
+EXPORT_SYMBOL_GPL(debugfs_get_aux);
+
const struct file_operations *debugfs_real_fops(const struct file *filp)
{
struct debugfs_fsdata *fsd = F_DENTRY(filp)->d_fsdata;
- if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT) {
+ if (!fsd) {
/*
* Urgh, we've been called w/o a protecting
* debugfs_file_get().
@@ -84,9 +90,12 @@ static int __debugfs_file_get(struct dentry *dentry, enum dbgfs_get_mode mode)
return -EINVAL;
d_fsd = READ_ONCE(dentry->d_fsdata);
- if (!((unsigned long)d_fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)) {
+ if (d_fsd) {
fsd = d_fsd;
} else {
+ struct inode *inode = dentry->d_inode;
+ unsigned int methods = 0;
+
if (WARN_ON(mode == DBGFS_GET_ALREADY))
return -EINVAL;
@@ -95,23 +104,41 @@ static int __debugfs_file_get(struct dentry *dentry, enum dbgfs_get_mode mode)
return -ENOMEM;
if (mode == DBGFS_GET_SHORT) {
+ const struct debugfs_short_fops *ops;
+ ops = fsd->short_fops = DEBUGFS_I(inode)->short_fops;
+ if (ops->llseek)
+ methods |= HAS_LSEEK;
+ if (ops->read)
+ methods |= HAS_READ;
+ if (ops->write)
+ methods |= HAS_WRITE;
fsd->real_fops = NULL;
- fsd->short_fops = (void *)((unsigned long)d_fsd &
- ~DEBUGFS_FSDATA_IS_REAL_FOPS_BIT);
} else {
- fsd->real_fops = (void *)((unsigned long)d_fsd &
- ~DEBUGFS_FSDATA_IS_REAL_FOPS_BIT);
+ const struct file_operations *ops;
+ ops = fsd->real_fops = DEBUGFS_I(inode)->real_fops;
+ if (ops->llseek)
+ methods |= HAS_LSEEK;
+ if (ops->read)
+ methods |= HAS_READ;
+ if (ops->write)
+ methods |= HAS_WRITE;
+ if (ops->unlocked_ioctl)
+ methods |= HAS_IOCTL;
+ if (ops->poll)
+ methods |= HAS_POLL;
fsd->short_fops = NULL;
}
+ fsd->methods = methods;
refcount_set(&fsd->active_users, 1);
init_completion(&fsd->active_users_drained);
INIT_LIST_HEAD(&fsd->cancellations);
mutex_init(&fsd->cancellations_mtx);
- if (cmpxchg(&dentry->d_fsdata, d_fsd, fsd) != d_fsd) {
+ d_fsd = cmpxchg(&dentry->d_fsdata, NULL, fsd);
+ if (d_fsd) {
mutex_destroy(&fsd->cancellations_mtx);
kfree(fsd);
- fsd = READ_ONCE(dentry->d_fsdata);
+ fsd = d_fsd;
}
}
@@ -208,8 +235,7 @@ void debugfs_enter_cancellation(struct file *file,
return;
fsd = READ_ONCE(dentry->d_fsdata);
- if (WARN_ON(!fsd ||
- ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)))
+ if (WARN_ON(!fsd))
return;
mutex_lock(&fsd->cancellations_mtx);
@@ -240,8 +266,7 @@ void debugfs_leave_cancellation(struct file *file,
return;
fsd = READ_ONCE(dentry->d_fsdata);
- if (WARN_ON(!fsd ||
- ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)))
+ if (WARN_ON(!fsd))
return;
mutex_lock(&fsd->cancellations_mtx);
@@ -322,13 +347,16 @@ const struct file_operations debugfs_open_proxy_file_operations = {
#define PROTO(args...) args
#define ARGS(args...) args
-#define FULL_PROXY_FUNC(name, ret_type, filp, proto, args) \
+#define FULL_PROXY_FUNC(name, ret_type, filp, proto, args, bit, ret) \
static ret_type full_proxy_ ## name(proto) \
{ \
- struct dentry *dentry = F_DENTRY(filp); \
+ struct dentry *dentry = F_DENTRY(filp); \
+ struct debugfs_fsdata *fsd = dentry->d_fsdata; \
const struct file_operations *real_fops; \
ret_type r; \
\
+ if (!(fsd->methods & bit)) \
+ return ret; \
r = debugfs_file_get(dentry); \
if (unlikely(r)) \
return r; \
@@ -338,17 +366,18 @@ static ret_type full_proxy_ ## name(proto) \
return r; \
}
-#define FULL_PROXY_FUNC_BOTH(name, ret_type, filp, proto, args) \
+#define FULL_PROXY_FUNC_BOTH(name, ret_type, filp, proto, args, bit, ret) \
static ret_type full_proxy_ ## name(proto) \
{ \
struct dentry *dentry = F_DENTRY(filp); \
- struct debugfs_fsdata *fsd; \
+ struct debugfs_fsdata *fsd = dentry->d_fsdata; \
ret_type r; \
\
+ if (!(fsd->methods & bit)) \
+ return ret; \
r = debugfs_file_get(dentry); \
if (unlikely(r)) \
return r; \
- fsd = dentry->d_fsdata; \
if (fsd->real_fops) \
r = fsd->real_fops->name(args); \
else \
@@ -359,29 +388,32 @@ static ret_type full_proxy_ ## name(proto) \
FULL_PROXY_FUNC_BOTH(llseek, loff_t, filp,
PROTO(struct file *filp, loff_t offset, int whence),
- ARGS(filp, offset, whence));
+ ARGS(filp, offset, whence), HAS_LSEEK, -ESPIPE);
FULL_PROXY_FUNC_BOTH(read, ssize_t, filp,
PROTO(struct file *filp, char __user *buf, size_t size,
loff_t *ppos),
- ARGS(filp, buf, size, ppos));
+ ARGS(filp, buf, size, ppos), HAS_READ, -EINVAL);
FULL_PROXY_FUNC_BOTH(write, ssize_t, filp,
PROTO(struct file *filp, const char __user *buf,
size_t size, loff_t *ppos),
- ARGS(filp, buf, size, ppos));
+ ARGS(filp, buf, size, ppos), HAS_WRITE, -EINVAL);
FULL_PROXY_FUNC(unlocked_ioctl, long, filp,
PROTO(struct file *filp, unsigned int cmd, unsigned long arg),
- ARGS(filp, cmd, arg));
+ ARGS(filp, cmd, arg), HAS_IOCTL, -ENOTTY);
static __poll_t full_proxy_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct dentry *dentry = F_DENTRY(filp);
+ struct debugfs_fsdata *fsd = dentry->d_fsdata;
__poll_t r = 0;
const struct file_operations *real_fops;
+ if (!(fsd->methods & HAS_POLL))
+ return DEFAULT_POLLMASK;
if (debugfs_file_get(dentry))
return EPOLLHUP;
@@ -393,9 +425,7 @@ static __poll_t full_proxy_poll(struct file *filp,
static int full_proxy_release(struct inode *inode, struct file *filp)
{
- const struct dentry *dentry = F_DENTRY(filp);
const struct file_operations *real_fops = debugfs_real_fops(filp);
- const struct file_operations *proxy_fops = filp->f_op;
int r = 0;
/*
@@ -404,49 +434,21 @@ static int full_proxy_release(struct inode *inode, struct file *filp)
* not to leak any resources. Releasers must not assume that
* ->i_private is still being meaningful here.
*/
- if (real_fops && real_fops->release)
+ if (real_fops->release)
r = real_fops->release(inode, filp);
- replace_fops(filp, d_inode(dentry)->i_fop);
- kfree(proxy_fops);
fops_put(real_fops);
return r;
}
-static void __full_proxy_fops_init(struct file_operations *proxy_fops,
- struct debugfs_fsdata *fsd)
-{
- proxy_fops->release = full_proxy_release;
-
- if ((fsd->real_fops && fsd->real_fops->llseek) ||
- (fsd->short_fops && fsd->short_fops->llseek))
- proxy_fops->llseek = full_proxy_llseek;
-
- if ((fsd->real_fops && fsd->real_fops->read) ||
- (fsd->short_fops && fsd->short_fops->read))
- proxy_fops->read = full_proxy_read;
-
- if ((fsd->real_fops && fsd->real_fops->write) ||
- (fsd->short_fops && fsd->short_fops->write))
- proxy_fops->write = full_proxy_write;
-
- if (fsd->real_fops && fsd->real_fops->poll)
- proxy_fops->poll = full_proxy_poll;
-
- if (fsd->real_fops && fsd->real_fops->unlocked_ioctl)
- proxy_fops->unlocked_ioctl = full_proxy_unlocked_ioctl;
-}
-
-static int full_proxy_open(struct inode *inode, struct file *filp,
- enum dbgfs_get_mode mode)
+static int full_proxy_open_regular(struct inode *inode, struct file *filp)
{
struct dentry *dentry = F_DENTRY(filp);
const struct file_operations *real_fops;
- struct file_operations *proxy_fops = NULL;
struct debugfs_fsdata *fsd;
int r;
- r = __debugfs_file_get(dentry, mode);
+ r = __debugfs_file_get(dentry, DBGFS_GET_REGULAR);
if (r)
return r == -EIO ? -ENOENT : r;
@@ -456,7 +458,7 @@ static int full_proxy_open(struct inode *inode, struct file *filp,
if (r)
goto out;
- if (real_fops && !fops_get(real_fops)) {
+ if (!fops_get(real_fops)) {
#ifdef CONFIG_MODULES
if (real_fops->owner &&
real_fops->owner->state == MODULE_STATE_GOING) {
@@ -472,55 +474,52 @@ static int full_proxy_open(struct inode *inode, struct file *filp,
goto out;
}
- proxy_fops = kzalloc(sizeof(*proxy_fops), GFP_KERNEL);
- if (!proxy_fops) {
- r = -ENOMEM;
- goto free_proxy;
- }
- __full_proxy_fops_init(proxy_fops, fsd);
- replace_fops(filp, proxy_fops);
-
- if (!real_fops || real_fops->open) {
- if (real_fops)
- r = real_fops->open(inode, filp);
- else
- r = simple_open(inode, filp);
+ if (real_fops->open) {
+ r = real_fops->open(inode, filp);
if (r) {
- replace_fops(filp, d_inode(dentry)->i_fop);
- goto free_proxy;
- } else if (filp->f_op != proxy_fops) {
+ fops_put(real_fops);
+ } else if (filp->f_op != &debugfs_full_proxy_file_operations) {
/* No protection against file removal anymore. */
WARN(1, "debugfs file owner replaced proxy fops: %pd",
dentry);
- goto free_proxy;
+ fops_put(real_fops);
}
}
-
- goto out;
-free_proxy:
- kfree(proxy_fops);
- fops_put(real_fops);
out:
debugfs_file_put(dentry);
return r;
}
-static int full_proxy_open_regular(struct inode *inode, struct file *filp)
-{
- return full_proxy_open(inode, filp, DBGFS_GET_REGULAR);
-}
-
const struct file_operations debugfs_full_proxy_file_operations = {
.open = full_proxy_open_regular,
+ .release = full_proxy_release,
+ .llseek = full_proxy_llseek,
+ .read = full_proxy_read,
+ .write = full_proxy_write,
+ .poll = full_proxy_poll,
+ .unlocked_ioctl = full_proxy_unlocked_ioctl
};
static int full_proxy_open_short(struct inode *inode, struct file *filp)
{
- return full_proxy_open(inode, filp, DBGFS_GET_SHORT);
+ struct dentry *dentry = F_DENTRY(filp);
+ int r;
+
+ r = __debugfs_file_get(dentry, DBGFS_GET_SHORT);
+ if (r)
+ return r == -EIO ? -ENOENT : r;
+ r = debugfs_locked_down(inode, filp, NULL);
+ if (!r)
+ r = simple_open(inode, filp);
+ debugfs_file_put(dentry);
+ return r;
}
const struct file_operations debugfs_full_short_proxy_file_operations = {
.open = full_proxy_open_short,
+ .llseek = full_proxy_llseek,
+ .read = full_proxy_read,
+ .write = full_proxy_write,
};
ssize_t debugfs_attr_read(struct file *file, char __user *buf,
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index e752009de929..75715d8877ee 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -208,16 +208,34 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root)
return 0;
}
+static struct kmem_cache *debugfs_inode_cachep __ro_after_init;
+
+static void init_once(void *foo)
+{
+ struct debugfs_inode_info *info = foo;
+ inode_init_once(&info->vfs_inode);
+}
+
+static struct inode *debugfs_alloc_inode(struct super_block *sb)
+{
+ struct debugfs_inode_info *info;
+ info = alloc_inode_sb(sb, debugfs_inode_cachep, GFP_KERNEL);
+ if (!info)
+ return NULL;
+ return &info->vfs_inode;
+}
+
static void debugfs_free_inode(struct inode *inode)
{
if (S_ISLNK(inode->i_mode))
kfree(inode->i_link);
- free_inode_nonrcu(inode);
+ kmem_cache_free(debugfs_inode_cachep, DEBUGFS_I(inode));
}
static const struct super_operations debugfs_super_operations = {
.statfs = simple_statfs,
.show_options = debugfs_show_options,
+ .alloc_inode = debugfs_alloc_inode,
.free_inode = debugfs_free_inode,
};
@@ -225,23 +243,18 @@ static void debugfs_release_dentry(struct dentry *dentry)
{
struct debugfs_fsdata *fsd = dentry->d_fsdata;
- if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)
- return;
-
- /* check it wasn't a dir (no fsdata) or automount (no real_fops) */
- if (fsd && (fsd->real_fops || fsd->short_fops)) {
+ if (fsd) {
WARN_ON(!list_empty(&fsd->cancellations));
mutex_destroy(&fsd->cancellations_mtx);
}
-
kfree(fsd);
}
static struct vfsmount *debugfs_automount(struct path *path)
{
- struct debugfs_fsdata *fsd = path->dentry->d_fsdata;
+ struct inode *inode = path->dentry->d_inode;
- return fsd->automount(path->dentry, d_inode(path->dentry)->i_private);
+ return DEBUGFS_I(inode)->automount(path->dentry, inode->i_private);
}
static const struct dentry_operations debugfs_dops = {
@@ -411,6 +424,7 @@ static struct dentry *end_creating(struct dentry *dentry)
static struct dentry *__debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
+ const void *aux,
const struct file_operations *proxy_fops,
const void *real_fops)
{
@@ -441,9 +455,11 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode,
inode->i_private = data;
inode->i_op = &debugfs_file_inode_operations;
+ if (!real_fops)
+ proxy_fops = &debugfs_noop_file_operations;
inode->i_fop = proxy_fops;
- dentry->d_fsdata = (void *)((unsigned long)real_fops |
- DEBUGFS_FSDATA_IS_REAL_FOPS_BIT);
+ DEBUGFS_I(inode)->raw = real_fops;
+ DEBUGFS_I(inode)->aux = aux;
d_instantiate(dentry, inode);
fsnotify_create(d_inode(dentry->d_parent), dentry);
@@ -452,30 +468,22 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode,
struct dentry *debugfs_create_file_full(const char *name, umode_t mode,
struct dentry *parent, void *data,
+ const void *aux,
const struct file_operations *fops)
{
- if (WARN_ON((unsigned long)fops &
- DEBUGFS_FSDATA_IS_REAL_FOPS_BIT))
- return ERR_PTR(-EINVAL);
-
- return __debugfs_create_file(name, mode, parent, data,
- fops ? &debugfs_full_proxy_file_operations :
- &debugfs_noop_file_operations,
+ return __debugfs_create_file(name, mode, parent, data, aux,
+ &debugfs_full_proxy_file_operations,
fops);
}
EXPORT_SYMBOL_GPL(debugfs_create_file_full);
struct dentry *debugfs_create_file_short(const char *name, umode_t mode,
- struct dentry *parent, void *data,
- const struct debugfs_short_fops *fops)
+ struct dentry *parent, void *data,
+ const void *aux,
+ const struct debugfs_short_fops *fops)
{
- if (WARN_ON((unsigned long)fops &
- DEBUGFS_FSDATA_IS_REAL_FOPS_BIT))
- return ERR_PTR(-EINVAL);
-
- return __debugfs_create_file(name, mode, parent, data,
- fops ? &debugfs_full_short_proxy_file_operations :
- &debugfs_noop_file_operations,
+ return __debugfs_create_file(name, mode, parent, data, aux,
+ &debugfs_full_short_proxy_file_operations,
fops);
}
EXPORT_SYMBOL_GPL(debugfs_create_file_short);
@@ -512,9 +520,8 @@ struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode,
const struct file_operations *fops)
{
- return __debugfs_create_file(name, mode, parent, data,
- fops ? &debugfs_open_proxy_file_operations :
- &debugfs_noop_file_operations,
+ return __debugfs_create_file(name, mode, parent, data, NULL,
+ &debugfs_open_proxy_file_operations,
fops);
}
EXPORT_SYMBOL_GPL(debugfs_create_file_unsafe);
@@ -624,23 +631,13 @@ struct dentry *debugfs_create_automount(const char *name,
void *data)
{
struct dentry *dentry = start_creating(name, parent);
- struct debugfs_fsdata *fsd;
struct inode *inode;
if (IS_ERR(dentry))
return dentry;
- fsd = kzalloc(sizeof(*fsd), GFP_KERNEL);
- if (!fsd) {
- failed_creating(dentry);
- return ERR_PTR(-ENOMEM);
- }
-
- fsd->automount = f;
-
if (!(debugfs_allow & DEBUGFS_ALLOW_API)) {
failed_creating(dentry);
- kfree(fsd);
return ERR_PTR(-EPERM);
}
@@ -648,14 +645,13 @@ struct dentry *debugfs_create_automount(const char *name,
if (unlikely(!inode)) {
pr_err("out of free dentries, can not create automount '%s'\n",
name);
- kfree(fsd);
return failed_creating(dentry);
}
make_empty_dir_inode(inode);
inode->i_flags |= S_AUTOMOUNT;
inode->i_private = data;
- dentry->d_fsdata = fsd;
+ DEBUGFS_I(inode)->automount = f;
/* directory inodes start off with i_nlink == 2 (for "." entry) */
inc_nlink(inode);
d_instantiate(dentry, inode);
@@ -730,7 +726,7 @@ static void __debugfs_file_removed(struct dentry *dentry)
*/
smp_mb();
fsd = READ_ONCE(dentry->d_fsdata);
- if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)
+ if (!fsd)
return;
/* if this was the last reference, we're done */
@@ -834,76 +830,70 @@ void debugfs_lookup_and_remove(const char *name, struct dentry *parent)
EXPORT_SYMBOL_GPL(debugfs_lookup_and_remove);
/**
- * debugfs_rename - rename a file/directory in the debugfs filesystem
- * @old_dir: a pointer to the parent dentry for the renamed object. This
- * should be a directory dentry.
- * @old_dentry: dentry of an object to be renamed.
- * @new_dir: a pointer to the parent dentry where the object should be
- * moved. This should be a directory dentry.
- * @new_name: a pointer to a string containing the target name.
+ * debugfs_change_name - rename a file/directory in the debugfs filesystem
+ * @dentry: dentry of an object to be renamed.
+ * @fmt: format for new name
*
* This function renames a file/directory in debugfs. The target must not
* exist for rename to succeed.
*
- * This function will return a pointer to old_dentry (which is updated to
- * reflect renaming) if it succeeds. If an error occurs, ERR_PTR(-ERROR)
- * will be returned.
+ * This function will return 0 on success and -E... on failure.
*
* If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned.
*/
-struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
- struct dentry *new_dir, const char *new_name)
+int __printf(2, 3) debugfs_change_name(struct dentry *dentry, const char *fmt, ...)
{
- int error;
- struct dentry *dentry = NULL, *trap;
+ int error = 0;
+ const char *new_name;
struct name_snapshot old_name;
+ struct dentry *parent, *target;
+ struct inode *dir;
+ va_list ap;
- if (IS_ERR(old_dir))
- return old_dir;
- if (IS_ERR(new_dir))
- return new_dir;
- if (IS_ERR_OR_NULL(old_dentry))
- return old_dentry;
-
- trap = lock_rename(new_dir, old_dir);
- /* Source or destination directories don't exist? */
- if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir))
- goto exit;
- /* Source does not exist, cyclic rename, or mountpoint? */
- if (d_really_is_negative(old_dentry) || old_dentry == trap ||
- d_mountpoint(old_dentry))
- goto exit;
- dentry = lookup_one_len(new_name, new_dir, strlen(new_name));
- /* Lookup failed, cyclic rename or target exists? */
- if (IS_ERR(dentry) || dentry == trap || d_really_is_positive(dentry))
- goto exit;
-
- take_dentry_name_snapshot(&old_name, old_dentry);
-
- error = simple_rename(&nop_mnt_idmap, d_inode(old_dir), old_dentry,
- d_inode(new_dir), dentry, 0);
- if (error) {
- release_dentry_name_snapshot(&old_name);
- goto exit;
+ if (IS_ERR_OR_NULL(dentry))
+ return 0;
+
+ va_start(ap, fmt);
+ new_name = kvasprintf_const(GFP_KERNEL, fmt, ap);
+ va_end(ap);
+ if (!new_name)
+ return -ENOMEM;
+
+ parent = dget_parent(dentry);
+ dir = d_inode(parent);
+ inode_lock(dir);
+
+ take_dentry_name_snapshot(&old_name, dentry);
+
+ if (WARN_ON_ONCE(dentry->d_parent != parent)) {
+ error = -EINVAL;
+ goto out;
}
- d_move(old_dentry, dentry);
- fsnotify_move(d_inode(old_dir), d_inode(new_dir), &old_name.name,
- d_is_dir(old_dentry),
- NULL, old_dentry);
+ if (strcmp(old_name.name.name, new_name) == 0)
+ goto out;
+ target = lookup_one_len(new_name, parent, strlen(new_name));
+ if (IS_ERR(target)) {
+ error = PTR_ERR(target);
+ goto out;
+ }
+ if (d_really_is_positive(target)) {
+ dput(target);
+ error = -EINVAL;
+ goto out;
+ }
+ simple_rename_timestamp(dir, dentry, dir, target);
+ d_move(dentry, target);
+ dput(target);
+ fsnotify_move(dir, dir, &old_name.name, d_is_dir(dentry), NULL, dentry);
+out:
release_dentry_name_snapshot(&old_name);
- unlock_rename(new_dir, old_dir);
- dput(dentry);
- return old_dentry;
-exit:
- if (dentry && !IS_ERR(dentry))
- dput(dentry);
- unlock_rename(new_dir, old_dir);
- if (IS_ERR(dentry))
- return dentry;
- return ERR_PTR(-EINVAL);
+ inode_unlock(dir);
+ dput(parent);
+ kfree_const(new_name);
+ return error;
}
-EXPORT_SYMBOL_GPL(debugfs_rename);
+EXPORT_SYMBOL_GPL(debugfs_change_name);
/**
* debugfs_initialized - Tells whether debugfs has been registered
@@ -939,12 +929,22 @@ static int __init debugfs_init(void)
if (retval)
return retval;
- retval = register_filesystem(&debug_fs_type);
- if (retval)
+ debugfs_inode_cachep = kmem_cache_create("debugfs_inode_cache",
+ sizeof(struct debugfs_inode_info), 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
+ init_once);
+ if (debugfs_inode_cachep == NULL) {
sysfs_remove_mount_point(kernel_kobj, "debug");
- else
- debugfs_registered = true;
+ return -ENOMEM;
+ }
- return retval;
+ retval = register_filesystem(&debug_fs_type);
+ if (retval) { // Really not going to happen
+ sysfs_remove_mount_point(kernel_kobj, "debug");
+ kmem_cache_destroy(debugfs_inode_cachep);
+ return retval;
+ }
+ debugfs_registered = true;
+ return 0;
}
core_initcall(debugfs_init);
diff --git a/fs/debugfs/internal.h b/fs/debugfs/internal.h
index bbae4a228ef4..93483fe84425 100644
--- a/fs/debugfs/internal.h
+++ b/fs/debugfs/internal.h
@@ -11,6 +11,22 @@
struct file_operations;
+struct debugfs_inode_info {
+ struct inode vfs_inode;
+ union {
+ const void *raw;
+ const struct file_operations *real_fops;
+ const struct debugfs_short_fops *short_fops;
+ debugfs_automount_t automount;
+ };
+ const void *aux;
+};
+
+static inline struct debugfs_inode_info *DEBUGFS_I(struct inode *inode)
+{
+ return container_of(inode, struct debugfs_inode_info, vfs_inode);
+}
+
/* declared over in file.c */
extern const struct file_operations debugfs_noop_file_operations;
extern const struct file_operations debugfs_open_proxy_file_operations;
@@ -20,29 +36,25 @@ extern const struct file_operations debugfs_full_short_proxy_file_operations;
struct debugfs_fsdata {
const struct file_operations *real_fops;
const struct debugfs_short_fops *short_fops;
- union {
- /* automount_fn is used when real_fops is NULL */
- debugfs_automount_t automount;
- struct {
- refcount_t active_users;
- struct completion active_users_drained;
-
- /* protect cancellations */
- struct mutex cancellations_mtx;
- struct list_head cancellations;
- };
+ struct {
+ refcount_t active_users;
+ struct completion active_users_drained;
+
+ /* protect cancellations */
+ struct mutex cancellations_mtx;
+ struct list_head cancellations;
+ unsigned int methods;
};
};
-/*
- * A dentry's ->d_fsdata either points to the real fops or to a
- * dynamically allocated debugfs_fsdata instance.
- * In order to distinguish between these two cases, a real fops
- * pointer gets its lowest bit set.
- */
-#define DEBUGFS_FSDATA_IS_REAL_FOPS_BIT BIT(0)
+enum {
+ HAS_READ = 1,
+ HAS_WRITE = 2,
+ HAS_LSEEK = 4,
+ HAS_POLL = 8,
+ HAS_IOCTL = 16
+};
-/* Access BITS */
#define DEBUGFS_ALLOW_API BIT(0)
#define DEBUGFS_ALLOW_MOUNT BIT(1)
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index b20e565b9c5e..1096ff8562fa 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -45,7 +45,7 @@ static int pty_limit_min;
static int pty_limit_max = INT_MAX;
static atomic_t pty_count = ATOMIC_INIT(0);
-static struct ctl_table pty_table[] = {
+static const struct ctl_table pty_table[] = {
{
.procname = "max",
.maxlen = sizeof(int),
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index acaa0825e9bb..1dfd5b81d831 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -17,7 +17,9 @@
/**
* ecryptfs_d_revalidate - revalidate an ecryptfs dentry
- * @dentry: The ecryptfs dentry
+ * @dir: inode of expected parent
+ * @name: expected name
+ * @dentry: dentry to revalidate
* @flags: lookup flags
*
* Called when the VFS needs to revalidate a dentry. This
@@ -28,7 +30,8 @@
* Returns 1 if valid, 0 otherwise.
*
*/
-static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags)
+static int ecryptfs_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
int rc = 1;
@@ -36,8 +39,15 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags)
if (flags & LOOKUP_RCU)
return -ECHILD;
- if (lower_dentry->d_flags & DCACHE_OP_REVALIDATE)
- rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
+ if (lower_dentry->d_flags & DCACHE_OP_REVALIDATE) {
+ struct inode *lower_dir = ecryptfs_inode_to_lower(dir);
+ struct name_snapshot n;
+
+ take_dentry_name_snapshot(&n, lower_dentry);
+ rc = lower_dentry->d_op->d_revalidate(lower_dir, &n.name,
+ lower_dentry, flags);
+ release_dentry_name_snapshot(&n);
+ }
if (d_really_is_positive(dentry)) {
struct inode *inode = d_inode(dentry);
diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
index 23c51d62f902..cb1b6d0c3454 100644
--- a/fs/efivarfs/file.c
+++ b/fs/efivarfs/file.c
@@ -36,28 +36,41 @@ static ssize_t efivarfs_file_write(struct file *file,
if (IS_ERR(data))
return PTR_ERR(data);
+ inode_lock(inode);
+ if (var->removed) {
+ /*
+ * file got removed; don't allow a set. Caused by an
+ * unsuccessful create or successful delete write
+ * racing with us.
+ */
+ bytes = -EIO;
+ goto out;
+ }
+
bytes = efivar_entry_set_get_size(var, attributes, &datasize,
data, &set);
- if (!set && bytes) {
+ if (!set) {
if (bytes == -ENOENT)
bytes = -EIO;
goto out;
}
if (bytes == -ENOENT) {
- drop_nlink(inode);
- d_delete(file->f_path.dentry);
- dput(file->f_path.dentry);
+ /*
+ * zero size signals to release that the write deleted
+ * the variable
+ */
+ i_size_write(inode, 0);
} else {
- inode_lock(inode);
i_size_write(inode, datasize + sizeof(attributes));
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
- inode_unlock(inode);
}
bytes = count;
out:
+ inode_unlock(inode);
+
kfree(data);
return bytes;
@@ -106,8 +119,36 @@ out_free:
return size;
}
+static int efivarfs_file_release(struct inode *inode, struct file *file)
+{
+ struct efivar_entry *var = inode->i_private;
+
+ inode_lock(inode);
+ var->removed = (--var->open_count == 0 && i_size_read(inode) == 0);
+ inode_unlock(inode);
+
+ if (var->removed)
+ simple_recursive_removal(file->f_path.dentry, NULL);
+
+ return 0;
+}
+
+static int efivarfs_file_open(struct inode *inode, struct file *file)
+{
+ struct efivar_entry *entry = inode->i_private;
+
+ file->private_data = entry;
+
+ inode_lock(inode);
+ entry->open_count++;
+ inode_unlock(inode);
+
+ return 0;
+}
+
const struct file_operations efivarfs_file_operations = {
- .open = simple_open,
- .read = efivarfs_file_read,
- .write = efivarfs_file_write,
+ .open = efivarfs_file_open,
+ .read = efivarfs_file_read,
+ .write = efivarfs_file_write,
+ .release = efivarfs_file_release,
};
diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
index ec23da8405ff..98a7299a9ee9 100644
--- a/fs/efivarfs/inode.c
+++ b/fs/efivarfs/inode.c
@@ -77,39 +77,34 @@ static bool efivarfs_valid_name(const char *str, int len)
static int efivarfs_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
{
- struct efivarfs_fs_info *info = dir->i_sb->s_fs_info;
struct inode *inode = NULL;
struct efivar_entry *var;
int namelen, i = 0, err = 0;
bool is_removable = false;
+ efi_guid_t vendor;
if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len))
return -EINVAL;
- var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
- if (!var)
- return -ENOMEM;
-
/* length of the variable name itself: remove GUID and separator */
namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1;
- err = guid_parse(dentry->d_name.name + namelen + 1, &var->var.VendorGuid);
+ err = guid_parse(dentry->d_name.name + namelen + 1, &vendor);
if (err)
- goto out;
- if (guid_equal(&var->var.VendorGuid, &LINUX_EFI_RANDOM_SEED_TABLE_GUID)) {
- err = -EPERM;
- goto out;
- }
+ return err;
+ if (guid_equal(&vendor, &LINUX_EFI_RANDOM_SEED_TABLE_GUID))
+ return -EPERM;
- if (efivar_variable_is_removable(var->var.VendorGuid,
+ if (efivar_variable_is_removable(vendor,
dentry->d_name.name, namelen))
is_removable = true;
inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0, is_removable);
- if (!inode) {
- err = -ENOMEM;
- goto out;
- }
+ if (!inode)
+ return -ENOMEM;
+ var = efivar_entry(inode);
+
+ var->var.VendorGuid = vendor;
for (i = 0; i < namelen; i++)
var->var.VariableName[i] = dentry->d_name.name[i];
@@ -117,21 +112,11 @@ static int efivarfs_create(struct mnt_idmap *idmap, struct inode *dir,
var->var.VariableName[i] = '\0';
inode->i_private = var;
- kmemleak_ignore(var);
-
- err = efivar_entry_add(var, &info->efivarfs_list);
- if (err)
- goto out;
d_instantiate(dentry, inode);
dget(dentry);
-out:
- if (err) {
- kfree(var);
- if (inode)
- iput(inode);
- }
- return err;
+
+ return 0;
}
static int efivarfs_unlink(struct inode *dir, struct dentry *dentry)
@@ -187,7 +172,24 @@ efivarfs_fileattr_set(struct mnt_idmap *idmap,
return 0;
}
+/* copy of simple_setattr except that it doesn't do i_size updates */
+static int efivarfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *iattr)
+{
+ struct inode *inode = d_inode(dentry);
+ int error;
+
+ error = setattr_prepare(idmap, dentry, iattr);
+ if (error)
+ return error;
+
+ setattr_copy(idmap, inode, iattr);
+ mark_inode_dirty(inode);
+ return 0;
+}
+
static const struct inode_operations efivarfs_file_inode_operations = {
.fileattr_get = efivarfs_fileattr_get,
.fileattr_set = efivarfs_fileattr_set,
+ .setattr = efivarfs_setattr,
};
diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h
index 74f0602a9e01..ac6a1dd0a6a5 100644
--- a/fs/efivarfs/internal.h
+++ b/fs/efivarfs/internal.h
@@ -6,7 +6,6 @@
#ifndef EFIVAR_FS_INTERNAL_H
#define EFIVAR_FS_INTERNAL_H
-#include <linux/list.h>
#include <linux/efi.h>
struct efivarfs_mount_opts {
@@ -16,30 +15,31 @@ struct efivarfs_mount_opts {
struct efivarfs_fs_info {
struct efivarfs_mount_opts mount_opts;
- struct list_head efivarfs_list;
struct super_block *sb;
struct notifier_block nb;
+ struct notifier_block pm_nb;
};
struct efi_variable {
efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)];
efi_guid_t VendorGuid;
- __u32 Attributes;
};
struct efivar_entry {
struct efi_variable var;
- struct list_head list;
- struct kobject kobj;
+ struct inode vfs_inode;
+ unsigned long open_count;
+ bool removed;
};
-int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *,
- struct list_head *),
- void *data, struct list_head *head);
+static inline struct efivar_entry *efivar_entry(struct inode *inode)
+{
+ return container_of(inode, struct efivar_entry, vfs_inode);
+}
+
+int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
+ void *data, bool duplicate_check);
-int efivar_entry_add(struct efivar_entry *entry, struct list_head *head);
-void __efivar_entry_add(struct efivar_entry *entry, struct list_head *head);
-void efivar_entry_remove(struct efivar_entry *entry);
int efivar_entry_delete(struct efivar_entry *entry);
int efivar_entry_size(struct efivar_entry *entry, unsigned long *size);
@@ -50,13 +50,14 @@ int efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
unsigned long *size, void *data, bool *set);
-int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
- struct list_head *head, void *data);
bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
unsigned long data_size);
bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
size_t len);
+char *efivar_get_utf8name(const efi_char16_t *name16, efi_guid_t *vendor);
+bool efivarfs_variable_is_present(efi_char16_t *variable_name,
+ efi_guid_t *vendor, void *data);
extern const struct file_operations efivarfs_file_operations;
extern const struct inode_operations efivarfs_dir_inode_operations;
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index beba15673be8..09fcf731e65d 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -13,6 +13,7 @@
#include <linux/pagemap.h>
#include <linux/ucs2_string.h>
#include <linux/slab.h>
+#include <linux/suspend.h>
#include <linux/magic.h>
#include <linux/statfs.h>
#include <linux/notifier.h>
@@ -39,9 +40,24 @@ static int efivarfs_ops_notifier(struct notifier_block *nb, unsigned long event,
return NOTIFY_OK;
}
-static void efivarfs_evict_inode(struct inode *inode)
+static struct inode *efivarfs_alloc_inode(struct super_block *sb)
{
- clear_inode(inode);
+ struct efivar_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+
+ if (!entry)
+ return NULL;
+
+ inode_init_once(&entry->vfs_inode);
+ entry->removed = false;
+
+ return &entry->vfs_inode;
+}
+
+static void efivarfs_free_inode(struct inode *inode)
+{
+ struct efivar_entry *entry = efivar_entry(inode);
+
+ kfree(entry);
}
static int efivarfs_show_options(struct seq_file *m, struct dentry *root)
@@ -106,7 +122,8 @@ static int efivarfs_statfs(struct dentry *dentry, struct kstatfs *buf)
static const struct super_operations efivarfs_ops = {
.statfs = efivarfs_statfs,
.drop_inode = generic_delete_inode,
- .evict_inode = efivarfs_evict_inode,
+ .alloc_inode = efivarfs_alloc_inode,
+ .free_inode = efivarfs_free_inode,
.show_options = efivarfs_show_options,
};
@@ -181,56 +198,63 @@ static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name)
return ERR_PTR(-ENOMEM);
}
-static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
- unsigned long name_size, void *data,
- struct list_head *list)
+bool efivarfs_variable_is_present(efi_char16_t *variable_name,
+ efi_guid_t *vendor, void *data)
+{
+ char *name = efivar_get_utf8name(variable_name, vendor);
+ struct super_block *sb = data;
+ struct dentry *dentry;
+ struct qstr qstr;
+
+ if (!name)
+ /*
+ * If the allocation failed there'll already be an
+ * error in the log (and likely a huge and growing
+ * number of them since they system will be under
+ * extreme memory pressure), so simply assume
+ * collision for safety but don't add to the log
+ * flood.
+ */
+ return true;
+
+ qstr.name = name;
+ qstr.len = strlen(name);
+ dentry = d_hash_and_lookup(sb->s_root, &qstr);
+ kfree(name);
+ if (!IS_ERR_OR_NULL(dentry))
+ dput(dentry);
+
+ return dentry != NULL;
+}
+
+static int efivarfs_create_dentry(struct super_block *sb, efi_char16_t *name16,
+ unsigned long name_size, efi_guid_t vendor,
+ char *name)
{
- struct super_block *sb = (struct super_block *)data;
struct efivar_entry *entry;
- struct inode *inode = NULL;
+ struct inode *inode;
struct dentry *dentry, *root = sb->s_root;
unsigned long size = 0;
- char *name;
int len;
int err = -ENOMEM;
bool is_removable = false;
- if (guid_equal(&vendor, &LINUX_EFI_RANDOM_SEED_TABLE_GUID))
- return 0;
-
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return err;
-
- memcpy(entry->var.VariableName, name16, name_size);
- memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
-
- len = ucs2_utf8size(entry->var.VariableName);
+ /* length of the variable name itself: remove GUID and separator */
+ len = strlen(name) - EFI_VARIABLE_GUID_LEN - 1;
- /* name, plus '-', plus GUID, plus NUL*/
- name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL);
- if (!name)
- goto fail;
-
- ucs2_as_utf8(name, entry->var.VariableName, len);
-
- if (efivar_variable_is_removable(entry->var.VendorGuid, name, len))
+ if (efivar_variable_is_removable(vendor, name, len))
is_removable = true;
- name[len] = '-';
-
- efi_guid_to_str(&entry->var.VendorGuid, name + len + 1);
-
- name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
-
- /* replace invalid slashes like kobject_set_name_vargs does for /sys/firmware/efi/vars. */
- strreplace(name, '/', '!');
-
inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0,
is_removable);
if (!inode)
goto fail_name;
+ entry = efivar_entry(inode);
+
+ memcpy(entry->var.VariableName, name16, name_size);
+ memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
+
dentry = efivarfs_alloc_dentry(root, name);
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
@@ -238,14 +262,13 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
}
__efivar_entry_get(entry, NULL, &size, NULL);
- __efivar_entry_add(entry, list);
/* copied by the above to local storage in the dentry. */
kfree(name);
inode_lock(inode);
inode->i_private = entry;
- i_size_write(inode, size + sizeof(entry->var.Attributes));
+ i_size_write(inode, size + sizeof(__u32)); /* attributes + data */
inode_unlock(inode);
d_add(dentry, inode);
@@ -255,16 +278,24 @@ fail_inode:
iput(inode);
fail_name:
kfree(name);
-fail:
- kfree(entry);
+
return err;
}
-static int efivarfs_destroy(struct efivar_entry *entry, void *data)
+static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
+ unsigned long name_size, void *data)
{
- efivar_entry_remove(entry);
- kfree(entry);
- return 0;
+ struct super_block *sb = (struct super_block *)data;
+ char *name;
+
+ if (guid_equal(&vendor, &LINUX_EFI_RANDOM_SEED_TABLE_GUID))
+ return 0;
+
+ name = efivar_get_utf8name(name16, &vendor);
+ if (!name)
+ return -ENOMEM;
+
+ return efivarfs_create_dentry(sb, name16, name_size, vendor, name);
}
enum {
@@ -336,7 +367,7 @@ static int efivarfs_fill_super(struct super_block *sb, struct fs_context *fc)
if (err)
return err;
- return efivar_init(efivarfs_callback, sb, &sfi->efivarfs_list);
+ return efivar_init(efivarfs_callback, sb, true);
}
static int efivarfs_get_tree(struct fs_context *fc)
@@ -360,6 +391,148 @@ static const struct fs_context_operations efivarfs_context_ops = {
.reconfigure = efivarfs_reconfigure,
};
+struct efivarfs_ctx {
+ struct dir_context ctx;
+ struct super_block *sb;
+ struct dentry *dentry;
+};
+
+static bool efivarfs_actor(struct dir_context *ctx, const char *name, int len,
+ loff_t offset, u64 ino, unsigned mode)
+{
+ unsigned long size;
+ struct efivarfs_ctx *ectx = container_of(ctx, struct efivarfs_ctx, ctx);
+ struct qstr qstr = { .name = name, .len = len };
+ struct dentry *dentry = d_hash_and_lookup(ectx->sb->s_root, &qstr);
+ struct inode *inode;
+ struct efivar_entry *entry;
+ int err;
+
+ if (IS_ERR_OR_NULL(dentry))
+ return true;
+
+ inode = d_inode(dentry);
+ entry = efivar_entry(inode);
+
+ err = efivar_entry_size(entry, &size);
+ size += sizeof(__u32); /* attributes */
+ if (err)
+ size = 0;
+
+ inode_lock(inode);
+ i_size_write(inode, size);
+ inode_unlock(inode);
+
+ if (!size) {
+ ectx->dentry = dentry;
+ return false;
+ }
+
+ dput(dentry);
+
+ return true;
+}
+
+static int efivarfs_check_missing(efi_char16_t *name16, efi_guid_t vendor,
+ unsigned long name_size, void *data)
+{
+ char *name;
+ struct super_block *sb = data;
+ struct dentry *dentry;
+ struct qstr qstr;
+ int err;
+
+ if (guid_equal(&vendor, &LINUX_EFI_RANDOM_SEED_TABLE_GUID))
+ return 0;
+
+ name = efivar_get_utf8name(name16, &vendor);
+ if (!name)
+ return -ENOMEM;
+
+ qstr.name = name;
+ qstr.len = strlen(name);
+ dentry = d_hash_and_lookup(sb->s_root, &qstr);
+ if (IS_ERR(dentry)) {
+ err = PTR_ERR(dentry);
+ goto out;
+ }
+
+ if (!dentry) {
+ /* found missing entry */
+ pr_info("efivarfs: creating variable %s\n", name);
+ return efivarfs_create_dentry(sb, name16, name_size, vendor, name);
+ }
+
+ dput(dentry);
+ err = 0;
+
+ out:
+ kfree(name);
+
+ return err;
+}
+
+static int efivarfs_pm_notify(struct notifier_block *nb, unsigned long action,
+ void *ptr)
+{
+ struct efivarfs_fs_info *sfi = container_of(nb, struct efivarfs_fs_info,
+ pm_nb);
+ struct path path = { .mnt = NULL, .dentry = sfi->sb->s_root, };
+ struct efivarfs_ctx ectx = {
+ .ctx = {
+ .actor = efivarfs_actor,
+ },
+ .sb = sfi->sb,
+ };
+ struct file *file;
+ static bool rescan_done = true;
+
+ if (action == PM_HIBERNATION_PREPARE) {
+ rescan_done = false;
+ return NOTIFY_OK;
+ } else if (action != PM_POST_HIBERNATION) {
+ return NOTIFY_DONE;
+ }
+
+ if (rescan_done)
+ return NOTIFY_DONE;
+
+ pr_info("efivarfs: resyncing variable state\n");
+
+ /* O_NOATIME is required to prevent oops on NULL mnt */
+ file = kernel_file_open(&path, O_RDONLY | O_DIRECTORY | O_NOATIME,
+ current_cred());
+ if (IS_ERR(file))
+ return NOTIFY_DONE;
+
+ rescan_done = true;
+
+ /*
+ * First loop over the directory and verify each entry exists,
+ * removing it if it doesn't
+ */
+ file->f_pos = 2; /* skip . and .. */
+ do {
+ ectx.dentry = NULL;
+ iterate_dir(file, &ectx.ctx);
+ if (ectx.dentry) {
+ pr_info("efivarfs: removing variable %pd\n",
+ ectx.dentry);
+ simple_recursive_removal(ectx.dentry, NULL);
+ dput(ectx.dentry);
+ }
+ } while (ectx.dentry);
+ fput(file);
+
+ /*
+ * then loop over variables, creating them if there's no matching
+ * dentry
+ */
+ efivar_init(efivarfs_check_missing, sfi->sb, false);
+
+ return NOTIFY_OK;
+}
+
static int efivarfs_init_fs_context(struct fs_context *fc)
{
struct efivarfs_fs_info *sfi;
@@ -371,13 +544,16 @@ static int efivarfs_init_fs_context(struct fs_context *fc)
if (!sfi)
return -ENOMEM;
- INIT_LIST_HEAD(&sfi->efivarfs_list);
-
sfi->mount_opts.uid = GLOBAL_ROOT_UID;
sfi->mount_opts.gid = GLOBAL_ROOT_GID;
fc->s_fs_info = sfi;
fc->ops = &efivarfs_context_ops;
+
+ sfi->pm_nb.notifier_call = efivarfs_pm_notify;
+ sfi->pm_nb.priority = 0;
+ register_pm_notifier(&sfi->pm_nb);
+
return 0;
}
@@ -387,9 +563,8 @@ static void efivarfs_kill_sb(struct super_block *sb)
blocking_notifier_chain_unregister(&efivar_ops_nh, &sfi->nb);
kill_litter_super(sb);
+ unregister_pm_notifier(&sfi->pm_nb);
- /* Remove all entries and destroy */
- efivar_entry_iter(efivarfs_destroy, &sfi->efivarfs_list, NULL);
kfree(sfi);
}
diff --git a/fs/efivarfs/vars.c b/fs/efivarfs/vars.c
index f7d43c847ee9..6edc10958ecf 100644
--- a/fs/efivarfs/vars.c
+++ b/fs/efivarfs/vars.c
@@ -225,6 +225,31 @@ variable_matches(const char *var_name, size_t len, const char *match_name,
}
}
+char *
+efivar_get_utf8name(const efi_char16_t *name16, efi_guid_t *vendor)
+{
+ int len = ucs2_utf8size(name16);
+ char *name;
+
+ /* name, plus '-', plus GUID, plus NUL*/
+ name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL);
+ if (!name)
+ return NULL;
+
+ ucs2_as_utf8(name, name16, len);
+
+ name[len] = '-';
+
+ efi_guid_to_str(vendor, name + len + 1);
+
+ name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
+
+ /* replace invalid slashes like kobject_set_name_vargs does for /sys/firmware/efi/vars. */
+ strreplace(name, '/', '!');
+
+ return name;
+}
+
bool
efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
unsigned long data_size)
@@ -288,28 +313,6 @@ efivar_variable_is_removable(efi_guid_t vendor, const char *var_name,
return found;
}
-static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor,
- struct list_head *head)
-{
- struct efivar_entry *entry, *n;
- unsigned long strsize1, strsize2;
- bool found = false;
-
- strsize1 = ucs2_strsize(variable_name, EFI_VAR_NAME_LEN);
- list_for_each_entry_safe(entry, n, head, list) {
- strsize2 = ucs2_strsize(entry->var.VariableName, EFI_VAR_NAME_LEN);
- if (strsize1 == strsize2 &&
- !memcmp(variable_name, &(entry->var.VariableName),
- strsize2) &&
- !efi_guidcmp(entry->var.VendorGuid,
- *vendor)) {
- found = true;
- break;
- }
- }
- return found;
-}
-
/*
* Returns the size of variable_name, in bytes, including the
* terminating NULL character, or variable_name_size if no NULL
@@ -361,16 +364,15 @@ static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
* efivar_init - build the initial list of EFI variables
* @func: callback function to invoke for every variable
* @data: function-specific data to pass to @func
- * @head: initialised head of variable list
+ * @duplicate_check: fail if a duplicate variable is found
*
* Get every EFI variable from the firmware and invoke @func. @func
- * should call efivar_entry_add() to build the list of variables.
+ * should populate the initial dentry and inode tree.
*
* Returns 0 on success, or a kernel error code on failure.
*/
-int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *,
- struct list_head *),
- void *data, struct list_head *head)
+int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
+ void *data, bool duplicate_check)
{
unsigned long variable_name_size = 512;
efi_char16_t *variable_name;
@@ -414,14 +416,15 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *,
* we'll ever see a different variable name,
* and may end up looping here forever.
*/
- if (variable_is_present(variable_name, &vendor_guid,
- head)) {
+ if (duplicate_check &&
+ efivarfs_variable_is_present(variable_name,
+ &vendor_guid, data)) {
dup_variable_bug(variable_name, &vendor_guid,
variable_name_size);
status = EFI_NOT_FOUND;
} else {
err = func(variable_name, vendor_guid,
- variable_name_size, data, head);
+ variable_name_size, data);
if (err)
status = EFI_NOT_FOUND;
}
@@ -453,70 +456,12 @@ free:
}
/**
- * efivar_entry_add - add entry to variable list
- * @entry: entry to add to list
- * @head: list head
- *
- * Returns 0 on success, or a kernel error code on failure.
- */
-int efivar_entry_add(struct efivar_entry *entry, struct list_head *head)
-{
- int err;
-
- err = efivar_lock();
- if (err)
- return err;
- list_add(&entry->list, head);
- efivar_unlock();
-
- return 0;
-}
-
-/**
- * __efivar_entry_add - add entry to variable list
- * @entry: entry to add to list
- * @head: list head
- */
-void __efivar_entry_add(struct efivar_entry *entry, struct list_head *head)
-{
- list_add(&entry->list, head);
-}
-
-/**
- * efivar_entry_remove - remove entry from variable list
- * @entry: entry to remove from list
- *
- * Returns 0 on success, or a kernel error code on failure.
- */
-void efivar_entry_remove(struct efivar_entry *entry)
-{
- list_del(&entry->list);
-}
-
-/*
- * efivar_entry_list_del_unlock - remove entry from variable list
- * @entry: entry to remove
- *
- * Remove @entry from the variable list and release the list lock.
- *
- * NOTE: slightly weird locking semantics here - we expect to be
- * called with the efivars lock already held, and we release it before
- * returning. This is because this function is usually called after
- * set_variable() while the lock is still held.
- */
-static void efivar_entry_list_del_unlock(struct efivar_entry *entry)
-{
- list_del(&entry->list);
- efivar_unlock();
-}
-
-/**
- * efivar_entry_delete - delete variable and remove entry from list
+ * efivar_entry_delete - delete variable
* @entry: entry containing variable to delete
*
- * Delete the variable from the firmware and remove @entry from the
- * variable list. It is the caller's responsibility to free @entry
- * once we return.
+ * Delete the variable from the firmware. It is the caller's
+ * responsibility to free @entry (by deleting the dentry/inode) once
+ * we return.
*
* Returns 0 on success, -EINTR if we can't grab the semaphore,
* converted EFI status code if set_variable() fails.
@@ -533,12 +478,10 @@ int efivar_entry_delete(struct efivar_entry *entry)
status = efivar_set_variable_locked(entry->var.VariableName,
&entry->var.VendorGuid,
0, 0, NULL, false);
- if (!(status == EFI_SUCCESS || status == EFI_NOT_FOUND)) {
- efivar_unlock();
+ efivar_unlock();
+ if (!(status == EFI_SUCCESS || status == EFI_NOT_FOUND))
return efi_status_to_err(status);
- }
- efivar_entry_list_del_unlock(entry);
return 0;
}
@@ -632,7 +575,7 @@ int efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
* get_variable() fail.
*
* If the EFI variable does not exist when calling set_variable()
- * (EFI_NOT_FOUND), @entry is removed from the variable list.
+ * (EFI_NOT_FOUND).
*/
int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
unsigned long *size, void *data, bool *set)
@@ -648,9 +591,8 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
return -EINVAL;
/*
- * The lock here protects the get_variable call, the conditional
- * set_variable call, and removal of the variable from the efivars
- * list (in the case of an authenticated delete).
+ * The lock here protects the get_variable call and the
+ * conditional set_variable call
*/
err = efivar_lock();
if (err)
@@ -676,10 +618,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
&entry->var.VendorGuid,
NULL, size, NULL);
- if (status == EFI_NOT_FOUND)
- efivar_entry_list_del_unlock(entry);
- else
- efivar_unlock();
+ efivar_unlock();
if (status && status != EFI_BUFFER_TOO_SMALL)
return efi_status_to_err(status);
@@ -691,37 +630,3 @@ out:
return err;
}
-
-/**
- * efivar_entry_iter - iterate over variable list
- * @func: callback function
- * @head: head of variable list
- * @data: function-specific data to pass to callback
- *
- * Iterate over the list of EFI variables and call @func with every
- * entry on the list. It is safe for @func to remove entries in the
- * list via efivar_entry_delete() while iterating.
- *
- * Some notes for the callback function:
- * - a non-zero return value indicates an error and terminates the loop
- * - @func is called from atomic context
- */
-int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
- struct list_head *head, void *data)
-{
- struct efivar_entry *entry, *n;
- int err = 0;
-
- err = efivar_lock();
- if (err)
- return err;
-
- list_for_each_entry_safe(entry, n, head, list) {
- err = func(entry, data);
- if (err)
- break;
- }
- efivar_unlock();
-
- return err;
-}
diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h
index 7bfe251680ec..65ff39401020 100644
--- a/fs/erofs/compress.h
+++ b/fs/erofs/compress.h
@@ -29,29 +29,8 @@ struct z_erofs_decompressor {
char *name;
};
-/* some special page->private (unsigned long, see below) */
#define Z_EROFS_SHORTLIVED_PAGE (-1UL << 2)
-#define Z_EROFS_PREALLOCATED_PAGE (-2UL << 2)
-
-/*
- * For all pages in a pcluster, page->private should be one of
- * Type Last 2bits page->private
- * short-lived page 00 Z_EROFS_SHORTLIVED_PAGE
- * preallocated page (tryalloc) 00 Z_EROFS_PREALLOCATED_PAGE
- * cached/managed page 00 pointer to z_erofs_pcluster
- * online page (file-backed, 01/10/11 sub-index << 2 | count
- * some pages can be used for inplace I/O)
- *
- * page->mapping should be one of
- * Type page->mapping
- * short-lived page NULL
- * preallocated page NULL
- * cached/managed page non-NULL or NULL (invalidated/truncated page)
- * online page non-NULL
- *
- * For all managed pages, PG_private should be set with 1 extra refcount,
- * which is used for page reclaim / migration.
- */
+#define Z_EROFS_PREALLOCATED_FOLIO ((void *)(-2UL << 2))
/*
* Currently, short-lived pages are pages directly from buddy system
diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
index eb318c7ddd80..2b123b070a42 100644
--- a/fs/erofs/decompressor.c
+++ b/fs/erofs/decompressor.c
@@ -7,14 +7,7 @@
#include "compress.h"
#include <linux/lz4.h>
-#ifndef LZ4_DISTANCE_MAX /* history window size */
-#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
-#endif
-
#define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
-#ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
-#define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
-#endif
struct z_erofs_lz4_decompress_ctx {
struct z_erofs_decompress_req *rq;
diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h
index c8f2ae845bd2..199395ed1c1f 100644
--- a/fs/erofs/erofs_fs.h
+++ b/fs/erofs/erofs_fs.h
@@ -9,6 +9,7 @@
#ifndef __EROFS_FS_H
#define __EROFS_FS_H
+/* to allow for x86 boot sectors and other oddities. */
#define EROFS_SUPER_OFFSET 1024
#define EROFS_FEATURE_COMPAT_SB_CHKSUM 0x00000001
@@ -54,7 +55,7 @@ struct erofs_deviceslot {
/* erofs on-disk super block (currently 128 bytes) */
struct erofs_super_block {
__le32 magic; /* file system magic number */
- __le32 checksum; /* crc32c(super_block) */
+ __le32 checksum; /* crc32c to avoid unexpected on-disk overlap */
__le32 feature_compat;
__u8 blkszbits; /* filesystem block size in bit shift */
__u8 sb_extslots; /* superblock size = 128 + sb_extslots * 16 */
diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c
index 33f8539dda4a..0ffd1c63beeb 100644
--- a/fs/erofs/fileio.c
+++ b/fs/erofs/fileio.c
@@ -6,7 +6,7 @@
#include <trace/events/erofs.h>
struct erofs_fileio_rq {
- struct bio_vec bvecs[BIO_MAX_VECS];
+ struct bio_vec bvecs[16];
struct bio bio;
struct kiocb iocb;
struct super_block *sb;
@@ -68,7 +68,7 @@ static struct erofs_fileio_rq *erofs_fileio_rq_alloc(struct erofs_map_dev *mdev)
struct erofs_fileio_rq *rq = kzalloc(sizeof(*rq),
GFP_KERNEL | __GFP_NOFAIL);
- bio_init(&rq->bio, NULL, rq->bvecs, BIO_MAX_VECS, REQ_OP_READ);
+ bio_init(&rq->bio, NULL, rq->bvecs, ARRAY_SIZE(rq->bvecs), REQ_OP_READ);
rq->iocb.ki_filp = mdev->m_dif->file;
rq->sb = mdev->m_sb;
return rq;
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index f5956474bfde..827b62665649 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -39,29 +39,21 @@ void _erofs_printk(struct super_block *sb, const char *fmt, ...)
static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
{
- size_t len = 1 << EROFS_SB(sb)->blkszbits;
- struct erofs_super_block *dsb;
- u32 expected_crc, crc;
+ struct erofs_super_block *dsb = sbdata + EROFS_SUPER_OFFSET;
+ u32 len = 1 << EROFS_SB(sb)->blkszbits, crc;
if (len > EROFS_SUPER_OFFSET)
len -= EROFS_SUPER_OFFSET;
+ len -= offsetof(struct erofs_super_block, checksum) +
+ sizeof(dsb->checksum);
- dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET, len, GFP_KERNEL);
- if (!dsb)
- return -ENOMEM;
-
- expected_crc = le32_to_cpu(dsb->checksum);
- dsb->checksum = 0;
- /* to allow for x86 boot sectors and other oddities. */
- crc = crc32c(~0, dsb, len);
- kfree(dsb);
-
- if (crc != expected_crc) {
- erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
- crc, expected_crc);
- return -EBADMSG;
- }
- return 0;
+ /* skip .magic(pre-verified) and .checksum(0) fields */
+ crc = crc32c(0x5045B54A, (&dsb->checksum) + 1, len);
+ if (crc == le32_to_cpu(dsb->checksum))
+ return 0;
+ erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
+ crc, le32_to_cpu(dsb->checksum));
+ return -EBADMSG;
}
static void erofs_inode_init_once(void *ptr)
@@ -516,8 +508,6 @@ static int erofs_fc_parse_param(struct fs_context *fc,
errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
#endif
break;
- default:
- return -ENOPARAM;
}
return 0;
}
diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c
index a90d7d649739..df2777e05661 100644
--- a/fs/erofs/xattr.c
+++ b/fs/erofs/xattr.c
@@ -407,7 +407,7 @@ int erofs_getxattr(struct inode *inode, int index, const char *name,
}
it.index = index;
- it.name = (struct qstr)QSTR_INIT(name, strlen(name));
+ it.name = QSTR(name);
if (it.name.len > EROFS_NAME_LEN)
return -ERANGE;
@@ -478,7 +478,7 @@ int erofs_xattr_prefixes_init(struct super_block *sb)
if (!sbi->xattr_prefix_count)
return 0;
- pfs = kzalloc(sbi->xattr_prefix_count * sizeof(*pfs), GFP_KERNEL);
+ pfs = kcalloc(sbi->xattr_prefix_count, sizeof(*pfs), GFP_KERNEL);
if (!pfs)
return -ENOMEM;
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 33a128252687..d771e06db738 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -12,12 +12,6 @@
#define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
#define Z_EROFS_INLINE_BVECS 2
-/*
- * let's leave a type here in case of introducing
- * another tagged pointer later.
- */
-typedef void *z_erofs_next_pcluster_t;
-
struct z_erofs_bvec {
struct page *page;
int offset;
@@ -48,7 +42,7 @@ struct z_erofs_pcluster {
struct lockref lockref;
/* A: point to next chained pcluster or TAILs */
- z_erofs_next_pcluster_t next;
+ struct z_erofs_pcluster *next;
/* I: start block address of this pcluster */
erofs_off_t index;
@@ -94,12 +88,11 @@ struct z_erofs_pcluster {
/* the end of a chain of pclusters */
#define Z_EROFS_PCLUSTER_TAIL ((void *) 0x700 + POISON_POINTER_DELTA)
-#define Z_EROFS_PCLUSTER_NIL (NULL)
struct z_erofs_decompressqueue {
struct super_block *sb;
+ struct z_erofs_pcluster *head;
atomic_t pending_bios;
- z_erofs_next_pcluster_t head;
union {
struct completion done;
@@ -462,39 +455,32 @@ err_decompressor:
}
enum z_erofs_pclustermode {
+ /* It has previously been linked into another processing chain */
Z_EROFS_PCLUSTER_INFLIGHT,
/*
- * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it
- * could be dispatched into bypass queue later due to uptodated managed
- * pages. All related online pages cannot be reused for inplace I/O (or
- * bvpage) since it can be directly decoded without I/O submission.
+ * A weaker form of Z_EROFS_PCLUSTER_FOLLOWED; the difference is that it
+ * may be dispatched to the bypass queue later due to uptodated managed
+ * folios. All file-backed folios related to this pcluster cannot be
+ * reused for in-place I/O (or bvpage) since the pcluster may be decoded
+ * in a separate queue (and thus out of order).
*/
Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE,
/*
- * The pcluster was just linked to a decompression chain by us. It can
- * also be linked with the remaining pclusters, which means if the
- * processing page is the tail page of a pcluster, this pcluster can
- * safely use the whole page (since the previous pcluster is within the
- * same chain) for in-place I/O, as illustrated below:
- * ___________________________________________________
- * | tail (partial) page | head (partial) page |
- * | (of the current pcl) | (of the previous pcl) |
- * |___PCLUSTER_FOLLOWED___|_____PCLUSTER_FOLLOWED_____|
- *
- * [ (*) the page above can be used as inplace I/O. ]
+ * The pcluster has just been linked to our processing chain.
+ * File-backed folios (except for the head page) related to it can be
+ * used for in-place I/O (or bvpage).
*/
Z_EROFS_PCLUSTER_FOLLOWED,
};
-struct z_erofs_decompress_frontend {
+struct z_erofs_frontend {
struct inode *const inode;
struct erofs_map_blocks map;
struct z_erofs_bvec_iter biter;
struct page *pagepool;
struct page *candidate_bvpage;
- struct z_erofs_pcluster *pcl;
- z_erofs_next_pcluster_t owned_head;
+ struct z_erofs_pcluster *pcl, *head;
enum z_erofs_pclustermode mode;
erofs_off_t headoffset;
@@ -503,11 +489,11 @@ struct z_erofs_decompress_frontend {
unsigned int icur;
};
-#define DECOMPRESS_FRONTEND_INIT(__i) { \
- .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
- .mode = Z_EROFS_PCLUSTER_FOLLOWED }
+#define Z_EROFS_DEFINE_FRONTEND(fe, i, ho) struct z_erofs_frontend fe = { \
+ .inode = i, .head = Z_EROFS_PCLUSTER_TAIL, \
+ .mode = Z_EROFS_PCLUSTER_FOLLOWED, .headoffset = ho }
-static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
+static bool z_erofs_should_alloc_cache(struct z_erofs_frontend *fe)
{
unsigned int cachestrategy = EROFS_I_SB(fe->inode)->opt.cache_strategy;
@@ -524,19 +510,17 @@ static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
return false;
}
-static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
+static void z_erofs_bind_cache(struct z_erofs_frontend *fe)
{
struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
struct z_erofs_pcluster *pcl = fe->pcl;
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
bool shouldalloc = z_erofs_should_alloc_cache(fe);
- bool standalone = true;
- /*
- * optimistic allocation without direct reclaim since inplace I/O
- * can be used if low memory otherwise.
- */
+ bool may_bypass = true;
+ /* Optimistic allocation, as in-place I/O can be used as a fallback */
gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
+ struct folio *folio, *newfolio;
unsigned int i;
if (i_blocksize(fe->inode) != PAGE_SIZE ||
@@ -544,47 +528,42 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
return;
for (i = 0; i < pclusterpages; ++i) {
- struct page *page, *newpage;
-
/* Inaccurate check w/o locking to avoid unneeded lookups */
if (READ_ONCE(pcl->compressed_bvecs[i].page))
continue;
- page = find_get_page(mc, pcl->index + i);
- if (!page) {
- /* I/O is needed, no possible to decompress directly */
- standalone = false;
+ folio = filemap_get_folio(mc, pcl->index + i);
+ if (IS_ERR(folio)) {
+ may_bypass = false;
if (!shouldalloc)
continue;
/*
- * Try cached I/O if allocation succeeds or fallback to
- * in-place I/O instead to avoid any direct reclaim.
+ * Allocate a managed folio for cached I/O, or it may be
+ * then filled with a file-backed folio for in-place I/O
*/
- newpage = erofs_allocpage(&fe->pagepool, gfp);
- if (!newpage)
+ newfolio = filemap_alloc_folio(gfp, 0);
+ if (!newfolio)
continue;
- set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
+ newfolio->private = Z_EROFS_PREALLOCATED_FOLIO;
+ folio = NULL;
}
spin_lock(&pcl->lockref.lock);
if (!pcl->compressed_bvecs[i].page) {
- pcl->compressed_bvecs[i].page = page ? page : newpage;
+ pcl->compressed_bvecs[i].page =
+ folio_page(folio ?: newfolio, 0);
spin_unlock(&pcl->lockref.lock);
continue;
}
spin_unlock(&pcl->lockref.lock);
-
- if (page)
- put_page(page);
- else if (newpage)
- erofs_pagepool_add(&fe->pagepool, newpage);
+ folio_put(folio ?: newfolio);
}
/*
- * don't do inplace I/O if all compressed pages are available in
- * managed cache since it can be moved to the bypass queue instead.
+ * Don't perform in-place I/O if all compressed pages are available in
+ * the managed cache, as the pcluster can be moved to the bypass queue.
*/
- if (standalone)
+ if (may_bypass)
fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
}
@@ -681,7 +660,7 @@ int erofs_init_managed_cache(struct super_block *sb)
}
/* callers must be with pcluster lock held */
-static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
+static int z_erofs_attach_page(struct z_erofs_frontend *fe,
struct z_erofs_bvec *bvec, bool exclusive)
{
struct z_erofs_pcluster *pcl = fe->pcl;
@@ -727,7 +706,7 @@ static bool z_erofs_get_pcluster(struct z_erofs_pcluster *pcl)
return true;
}
-static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
+static int z_erofs_register_pcluster(struct z_erofs_frontend *fe)
{
struct erofs_map_blocks *map = &fe->map;
struct super_block *sb = fe->inode->i_sb;
@@ -747,13 +726,11 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
if (IS_ERR(pcl))
return PTR_ERR(pcl);
- lockref_init(&pcl->lockref, 1); /* one ref for this request */
+ lockref_init(&pcl->lockref); /* one ref for this request */
pcl->algorithmformat = map->m_algorithmformat;
pcl->length = 0;
pcl->partial = true;
-
- /* new pclusters should be claimed as type 1, primary and followed */
- pcl->next = fe->owned_head;
+ pcl->next = fe->head;
pcl->pageofs_out = map->m_la & ~PAGE_MASK;
fe->mode = Z_EROFS_PCLUSTER_FOLLOWED;
@@ -789,8 +766,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
goto err_out;
}
}
- fe->owned_head = &pcl->next;
- fe->pcl = pcl;
+ fe->head = fe->pcl = pcl;
return 0;
err_out:
@@ -799,7 +775,7 @@ err_out:
return err;
}
-static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
+static int z_erofs_pcluster_begin(struct z_erofs_frontend *fe)
{
struct erofs_map_blocks *map = &fe->map;
struct super_block *sb = fe->inode->i_sb;
@@ -809,7 +785,7 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
DBG_BUGON(fe->pcl);
/* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */
- DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
+ DBG_BUGON(!fe->head);
if (!(map->m_flags & EROFS_MAP_META)) {
while (1) {
@@ -837,10 +813,9 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
if (ret == -EEXIST) {
mutex_lock(&fe->pcl->lock);
/* check if this pcluster hasn't been linked into any chain. */
- if (cmpxchg(&fe->pcl->next, Z_EROFS_PCLUSTER_NIL,
- fe->owned_head) == Z_EROFS_PCLUSTER_NIL) {
+ if (!cmpxchg(&fe->pcl->next, NULL, fe->head)) {
/* .. so it can be attached to our submission chain */
- fe->owned_head = &fe->pcl->next;
+ fe->head = fe->pcl;
fe->mode = Z_EROFS_PCLUSTER_FOLLOWED;
} else { /* otherwise, it belongs to an inflight chain */
fe->mode = Z_EROFS_PCLUSTER_INFLIGHT;
@@ -873,14 +848,9 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
return 0;
}
-/*
- * keep in mind that no referenced pclusters will be freed
- * only after a RCU grace period.
- */
static void z_erofs_rcu_callback(struct rcu_head *head)
{
- z_erofs_free_pcluster(container_of(head,
- struct z_erofs_pcluster, rcu));
+ z_erofs_free_pcluster(container_of(head, struct z_erofs_pcluster, rcu));
}
static bool __erofs_try_to_release_pcluster(struct erofs_sb_info *sbi,
@@ -922,12 +892,10 @@ static bool erofs_try_to_release_pcluster(struct erofs_sb_info *sbi,
return free;
}
-unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi,
- unsigned long nr_shrink)
+unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi, unsigned long nr)
{
struct z_erofs_pcluster *pcl;
- unsigned int freed = 0;
- unsigned long index;
+ unsigned long index, freed = 0;
xa_lock(&sbi->managed_pslots);
xa_for_each(&sbi->managed_pslots, index, pcl) {
@@ -937,7 +905,7 @@ unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi,
xa_unlock(&sbi->managed_pslots);
++freed;
- if (!--nr_shrink)
+ if (!--nr)
return freed;
xa_lock(&sbi->managed_pslots);
}
@@ -966,7 +934,7 @@ static void z_erofs_put_pcluster(struct erofs_sb_info *sbi,
call_rcu(&pcl->rcu, z_erofs_rcu_callback);
}
-static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
+static void z_erofs_pcluster_end(struct z_erofs_frontend *fe)
{
struct z_erofs_pcluster *pcl = fe->pcl;
@@ -979,13 +947,9 @@ static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
if (fe->candidate_bvpage)
fe->candidate_bvpage = NULL;
- /*
- * if all pending pages are added, don't hold its reference
- * any longer if the pcluster isn't hosted by ourselves.
- */
+ /* Drop refcount if it doesn't belong to our processing chain */
if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE)
z_erofs_put_pcluster(EROFS_I_SB(fe->inode), pcl, false);
-
fe->pcl = NULL;
}
@@ -1014,7 +978,7 @@ static int z_erofs_read_fragment(struct super_block *sb, struct folio *folio,
return 0;
}
-static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f,
+static int z_erofs_scan_folio(struct z_erofs_frontend *f,
struct folio *folio, bool ra)
{
struct inode *const inode = f->inode;
@@ -1129,7 +1093,7 @@ static bool z_erofs_page_is_invalidated(struct page *page)
return !page_folio(page)->mapping && !z_erofs_is_shortlived_page(page);
}
-struct z_erofs_decompress_backend {
+struct z_erofs_backend {
struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES];
struct super_block *sb;
struct z_erofs_pcluster *pcl;
@@ -1149,7 +1113,7 @@ struct z_erofs_bvec_item {
struct list_head list;
};
-static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
+static void z_erofs_do_decompressed_bvec(struct z_erofs_backend *be,
struct z_erofs_bvec *bvec)
{
struct z_erofs_bvec_item *item;
@@ -1172,8 +1136,7 @@ static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
list_add(&item->list, &be->decompressed_secondary_bvecs);
}
-static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be,
- int err)
+static void z_erofs_fill_other_copies(struct z_erofs_backend *be, int err)
{
unsigned int off0 = be->pcl->pageofs_out;
struct list_head *p, *n;
@@ -1214,7 +1177,7 @@ static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be,
}
}
-static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be)
+static void z_erofs_parse_out_bvecs(struct z_erofs_backend *be)
{
struct z_erofs_pcluster *pcl = be->pcl;
struct z_erofs_bvec_iter biter;
@@ -1239,8 +1202,7 @@ static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be)
z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
}
-static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
- bool *overlapped)
+static int z_erofs_parse_in_bvecs(struct z_erofs_backend *be, bool *overlapped)
{
struct z_erofs_pcluster *pcl = be->pcl;
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
@@ -1275,8 +1237,7 @@ static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
return err;
}
-static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
- int err)
+static int z_erofs_decompress_pcluster(struct z_erofs_backend *be, int err)
{
struct erofs_sb_info *const sbi = EROFS_SB(be->sb);
struct z_erofs_pcluster *pcl = be->pcl;
@@ -1393,7 +1354,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
pcl->vcnt = 0;
/* pcluster lock MUST be taken before the following line */
- WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
+ WRITE_ONCE(pcl->next, NULL);
mutex_unlock(&pcl->lock);
if (z_erofs_is_inline_pcluster(pcl))
@@ -1406,21 +1367,19 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
static int z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
struct page **pagepool)
{
- struct z_erofs_decompress_backend be = {
+ struct z_erofs_backend be = {
.sb = io->sb,
.pagepool = pagepool,
.decompressed_secondary_bvecs =
LIST_HEAD_INIT(be.decompressed_secondary_bvecs),
+ .pcl = io->head,
};
- z_erofs_next_pcluster_t owned = io->head;
+ struct z_erofs_pcluster *next;
int err = io->eio ? -EIO : 0;
- while (owned != Z_EROFS_PCLUSTER_TAIL) {
- DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
-
- be.pcl = container_of(owned, struct z_erofs_pcluster, next);
- owned = READ_ONCE(be.pcl->next);
-
+ for (; be.pcl != Z_EROFS_PCLUSTER_TAIL; be.pcl = next) {
+ DBG_BUGON(!be.pcl);
+ next = READ_ONCE(be.pcl->next);
err = z_erofs_decompress_pcluster(&be, err) ?: err;
}
return err;
@@ -1486,7 +1445,7 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
}
static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
- struct z_erofs_decompress_frontend *f,
+ struct z_erofs_frontend *f,
struct z_erofs_pcluster *pcl,
unsigned int nr,
struct address_space *mc)
@@ -1513,12 +1472,8 @@ repeat:
DBG_BUGON(z_erofs_is_shortlived_page(bvec->bv_page));
folio = page_folio(zbv.page);
- /*
- * Handle preallocated cached folios. We tried to allocate such folios
- * without triggering direct reclaim. If allocation failed, inplace
- * file-backed folios will be used instead.
- */
- if (folio->private == (void *)Z_EROFS_PREALLOCATED_PAGE) {
+ /* For preallocated managed folios, add them to page cache here */
+ if (folio->private == Z_EROFS_PREALLOCATED_FOLIO) {
tocache = true;
goto out_tocache;
}
@@ -1630,18 +1585,13 @@ enum {
NR_JOBQUEUES,
};
-static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
- z_erofs_next_pcluster_t qtail[],
- z_erofs_next_pcluster_t owned_head)
+static void z_erofs_move_to_bypass_queue(struct z_erofs_pcluster *pcl,
+ struct z_erofs_pcluster *next,
+ struct z_erofs_pcluster **qtail[])
{
- z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT];
- z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS];
-
WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL);
-
- WRITE_ONCE(*submit_qtail, owned_head);
- WRITE_ONCE(*bypass_qtail, &pcl->next);
-
+ WRITE_ONCE(*qtail[JQ_SUBMIT], next);
+ WRITE_ONCE(*qtail[JQ_BYPASS], pcl);
qtail[JQ_BYPASS] = &pcl->next;
}
@@ -1670,15 +1620,15 @@ static void z_erofs_endio(struct bio *bio)
bio_put(bio);
}
-static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
+static void z_erofs_submit_queue(struct z_erofs_frontend *f,
struct z_erofs_decompressqueue *fgq,
bool *force_fg, bool readahead)
{
struct super_block *sb = f->inode->i_sb;
struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb));
- z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
+ struct z_erofs_pcluster **qtail[NR_JOBQUEUES];
struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
- z_erofs_next_pcluster_t owned_head = f->owned_head;
+ struct z_erofs_pcluster *pcl, *next;
/* bio is NULL initially, so no need to initialize last_{index,bdev} */
erofs_off_t last_pa;
unsigned int nr_bios = 0;
@@ -1694,22 +1644,19 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
/* by default, all need io submission */
- q[JQ_SUBMIT]->head = owned_head;
+ q[JQ_SUBMIT]->head = next = f->head;
do {
struct erofs_map_dev mdev;
- struct z_erofs_pcluster *pcl;
erofs_off_t cur, end;
struct bio_vec bvec;
unsigned int i = 0;
bool bypass = true;
- DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
- pcl = container_of(owned_head, struct z_erofs_pcluster, next);
- owned_head = READ_ONCE(pcl->next);
-
+ pcl = next;
+ next = READ_ONCE(pcl->next);
if (z_erofs_is_inline_pcluster(pcl)) {
- move_to_bypass_jobqueue(pcl, qtail, owned_head);
+ z_erofs_move_to_bypass_queue(pcl, next, qtail);
continue;
}
@@ -1781,8 +1728,8 @@ drain_io:
if (!bypass)
qtail[JQ_SUBMIT] = &pcl->next;
else
- move_to_bypass_jobqueue(pcl, qtail, owned_head);
- } while (owned_head != Z_EROFS_PCLUSTER_TAIL);
+ z_erofs_move_to_bypass_queue(pcl, next, qtail);
+ } while (next != Z_EROFS_PCLUSTER_TAIL);
if (bio) {
if (erofs_is_fileio_mode(EROFS_SB(sb)))
@@ -1806,17 +1753,16 @@ drain_io:
z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios);
}
-static int z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
- unsigned int ra_folios)
+static int z_erofs_runqueue(struct z_erofs_frontend *f, unsigned int rapages)
{
struct z_erofs_decompressqueue io[NR_JOBQUEUES];
struct erofs_sb_info *sbi = EROFS_I_SB(f->inode);
- bool force_fg = z_erofs_is_sync_decompress(sbi, ra_folios);
+ bool force_fg = z_erofs_is_sync_decompress(sbi, rapages);
int err;
- if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
+ if (f->head == Z_EROFS_PCLUSTER_TAIL)
return 0;
- z_erofs_submit_queue(f, io, &force_fg, !!ra_folios);
+ z_erofs_submit_queue(f, io, &force_fg, !!rapages);
/* handle bypass queue (no i/o pclusters) immediately */
err = z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool);
@@ -1834,7 +1780,7 @@ static int z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
* Since partial uptodate is still unimplemented for now, we have to use
* approximate readmore strategies as a start.
*/
-static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
+static void z_erofs_pcluster_readmore(struct z_erofs_frontend *f,
struct readahead_control *rac, bool backmost)
{
struct inode *inode = f->inode;
@@ -1889,12 +1835,10 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
static int z_erofs_read_folio(struct file *file, struct folio *folio)
{
struct inode *const inode = folio->mapping->host;
- struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
+ Z_EROFS_DEFINE_FRONTEND(f, inode, folio_pos(folio));
int err;
trace_erofs_read_folio(folio, false);
- f.headoffset = (erofs_off_t)folio->index << PAGE_SHIFT;
-
z_erofs_pcluster_readmore(&f, NULL, true);
err = z_erofs_scan_folio(&f, folio, false);
z_erofs_pcluster_readmore(&f, NULL, false);
@@ -1914,17 +1858,14 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio)
static void z_erofs_readahead(struct readahead_control *rac)
{
struct inode *const inode = rac->mapping->host;
- struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
+ Z_EROFS_DEFINE_FRONTEND(f, inode, readahead_pos(rac));
struct folio *head = NULL, *folio;
- unsigned int nr_folios;
+ unsigned int nrpages = readahead_count(rac);
int err;
- f.headoffset = readahead_pos(rac);
-
z_erofs_pcluster_readmore(&f, rac, true);
- nr_folios = readahead_count(rac);
- trace_erofs_readpages(inode, readahead_index(rac), nr_folios, false);
-
+ nrpages = readahead_count(rac);
+ trace_erofs_readpages(inode, readahead_index(rac), nrpages, false);
while ((folio = readahead_folio(rac))) {
folio->private = head;
head = folio;
@@ -1943,7 +1884,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
z_erofs_pcluster_readmore(&f, rac, false);
z_erofs_pcluster_end(&f);
- (void)z_erofs_runqueue(&f, nr_folios);
+ (void)z_erofs_runqueue(&f, nrpages);
erofs_put_metabuf(&f.map.buf);
erofs_release_pages(&f.pagepool);
}
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index 4535f2f0a014..689437e99a5a 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -97,17 +97,48 @@ static int get_compacted_la_distance(unsigned int lobits,
return d1;
}
-static int unpack_compacted_index(struct z_erofs_maprecorder *m,
- unsigned int amortizedshift,
- erofs_off_t pos, bool lookahead)
+static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m,
+ unsigned long lcn, bool lookahead)
{
- struct erofs_inode *const vi = EROFS_I(m->inode);
+ struct inode *const inode = m->inode;
+ struct erofs_inode *const vi = EROFS_I(inode);
+ const erofs_off_t ebase = sizeof(struct z_erofs_map_header) +
+ ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
const unsigned int lclusterbits = vi->z_logical_clusterbits;
+ const unsigned int totalidx = erofs_iblks(inode);
+ unsigned int compacted_4b_initial, compacted_2b, amortizedshift;
unsigned int vcnt, lo, lobits, encodebits, nblk, bytes;
- bool big_pcluster;
+ bool big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
+ erofs_off_t pos;
u8 *in, type;
int i;
+ if (lcn >= totalidx || lclusterbits > 14)
+ return -EINVAL;
+
+ m->lcn = lcn;
+ /* used to align to 32-byte (compacted_2b) alignment */
+ compacted_4b_initial = ((32 - ebase % 32) / 4) & 7;
+ compacted_2b = 0;
+ if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
+ compacted_4b_initial < totalidx)
+ compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
+
+ pos = ebase;
+ amortizedshift = 2; /* compact_4b */
+ if (lcn >= compacted_4b_initial) {
+ pos += compacted_4b_initial * 4;
+ lcn -= compacted_4b_initial;
+ if (lcn < compacted_2b) {
+ amortizedshift = 1;
+ } else {
+ pos += compacted_2b * 2;
+ lcn -= compacted_2b;
+ }
+ }
+ pos += lcn * (1 << amortizedshift);
+
+ /* figure out the lcluster count in this pack */
if (1 << amortizedshift == 4 && lclusterbits <= 14)
vcnt = 2;
else if (1 << amortizedshift == 2 && lclusterbits <= 12)
@@ -122,7 +153,6 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
/* it doesn't equal to round_up(..) */
m->nextpackoff = round_down(pos, vcnt << amortizedshift) +
(vcnt << amortizedshift);
- big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
lobits = max(lclusterbits, ilog2(Z_EROFS_LI_D0_CBLKCNT) + 1U);
encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
bytes = pos & ((vcnt << amortizedshift) - 1);
@@ -207,53 +237,6 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
return 0;
}
-static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m,
- unsigned long lcn, bool lookahead)
-{
- struct inode *const inode = m->inode;
- struct erofs_inode *const vi = EROFS_I(inode);
- const erofs_off_t ebase = sizeof(struct z_erofs_map_header) +
- ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
- unsigned int totalidx = erofs_iblks(inode);
- unsigned int compacted_4b_initial, compacted_2b;
- unsigned int amortizedshift;
- erofs_off_t pos;
-
- if (lcn >= totalidx || vi->z_logical_clusterbits > 14)
- return -EINVAL;
-
- m->lcn = lcn;
- /* used to align to 32-byte (compacted_2b) alignment */
- compacted_4b_initial = (32 - ebase % 32) / 4;
- if (compacted_4b_initial == 32 / 4)
- compacted_4b_initial = 0;
-
- if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
- compacted_4b_initial < totalidx)
- compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
- else
- compacted_2b = 0;
-
- pos = ebase;
- if (lcn < compacted_4b_initial) {
- amortizedshift = 2;
- goto out;
- }
- pos += compacted_4b_initial * 4;
- lcn -= compacted_4b_initial;
-
- if (lcn < compacted_2b) {
- amortizedshift = 1;
- goto out;
- }
- pos += compacted_2b * 2;
- lcn -= compacted_2b;
- amortizedshift = 2;
-out:
- pos += lcn * (1 << amortizedshift);
- return unpack_compacted_index(m, amortizedshift, pos, lookahead);
-}
-
static int z_erofs_load_lcluster_from_disk(struct z_erofs_maprecorder *m,
unsigned int lcn, bool lookahead)
{
@@ -311,27 +294,23 @@ err_bogus:
static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
unsigned int initial_lcn)
{
- struct super_block *sb = m->inode->i_sb;
- struct erofs_inode *const vi = EROFS_I(m->inode);
- struct erofs_map_blocks *const map = m->map;
- const unsigned int lclusterbits = vi->z_logical_clusterbits;
- unsigned long lcn;
+ struct inode *inode = m->inode;
+ struct super_block *sb = inode->i_sb;
+ struct erofs_inode *vi = EROFS_I(inode);
+ bool bigpcl1 = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
+ bool bigpcl2 = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2;
+ unsigned long lcn = m->lcn + 1;
int err;
- DBG_BUGON(m->type != Z_EROFS_LCLUSTER_TYPE_PLAIN &&
- m->type != Z_EROFS_LCLUSTER_TYPE_HEAD1 &&
- m->type != Z_EROFS_LCLUSTER_TYPE_HEAD2);
+ DBG_BUGON(m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
DBG_BUGON(m->type != m->headtype);
- if (m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
- ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD1) &&
- !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) ||
- ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) &&
- !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
- map->m_plen = 1ULL << lclusterbits;
- return 0;
- }
- lcn = m->lcn + 1;
+ if ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD1 && !bigpcl1) ||
+ ((m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
+ m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) && !bigpcl2) ||
+ (lcn << vi->z_logical_clusterbits) >= inode->i_size)
+ m->compressedblks = 1;
+
if (m->compressedblks)
goto out;
@@ -356,9 +335,9 @@ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
case Z_EROFS_LCLUSTER_TYPE_HEAD2:
/*
* if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
- * rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
+ * rather than CBLKCNT, it's a 1 block-sized pcluster.
*/
- m->compressedblks = 1 << (lclusterbits - sb->s_blocksize_bits);
+ m->compressedblks = 1;
break;
case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
if (m->delta[0] != 1)
@@ -373,7 +352,7 @@ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
return -EFSCORRUPTED;
}
out:
- map->m_plen = erofs_pos(sb, m->compressedblks);
+ m->map->m_plen = erofs_pos(sb, m->compressedblks);
return 0;
err_bonus_cblkcnt:
erofs_err(sb, "bogus CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid);
diff --git a/fs/erofs/zutil.c b/fs/erofs/zutil.c
index 0dd65cefce33..55ff2ab5128e 100644
--- a/fs/erofs/zutil.c
+++ b/fs/erofs/zutil.c
@@ -87,8 +87,8 @@ int z_erofs_gbuf_growsize(unsigned int nrpages)
tmp_pages[j] = gbuf->pages[j];
do {
last = j;
- j = alloc_pages_bulk_array(GFP_KERNEL, nrpages,
- tmp_pages);
+ j = alloc_pages_bulk(GFP_KERNEL, nrpages,
+ tmp_pages);
if (last == j)
goto out;
} while (j != nrpages);
@@ -243,7 +243,7 @@ void erofs_shrinker_unregister(struct super_block *sb)
static unsigned long erofs_shrink_count(struct shrinker *shrink,
struct shrink_control *sc)
{
- return atomic_long_read(&erofs_global_shrink_cnt);
+ return atomic_long_read(&erofs_global_shrink_cnt) ?: SHRINK_EMPTY;
}
static unsigned long erofs_shrink_scan(struct shrinker *shrink,
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index f9898e60dd8b..7c0980db77b3 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -318,7 +318,7 @@ static void unlist_file(struct epitems_head *head)
static long long_zero;
static long long_max = LONG_MAX;
-static struct ctl_table epoll_table[] = {
+static const struct ctl_table epoll_table[] = {
{
.procname = "max_user_watches",
.data = &max_user_watches,
diff --git a/fs/exec.c b/fs/exec.c
index 2f0acef8908e..506cd411f4ac 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -205,18 +205,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
/*
* Avoid relying on expanding the stack down in GUP (which
* does not work for STACK_GROWSUP anyway), and just do it
- * by hand ahead of time.
+ * ahead of time.
*/
- if (write && pos < vma->vm_start) {
- mmap_write_lock(mm);
- ret = expand_downwards(vma, pos);
- if (unlikely(ret < 0)) {
- mmap_write_unlock(mm);
- return NULL;
- }
- mmap_write_downgrade(mm);
- } else
- mmap_read_lock(mm);
+ if (!mmap_read_lock_maybe_expand(mm, vma, pos, write))
+ return NULL;
/*
* We are doing an exec(). 'current' is the process
@@ -892,7 +884,8 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
.lookup_flags = LOOKUP_FOLLOW,
};
- if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
+ if ((flags &
+ ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH | AT_EXECVE_CHECK)) != 0)
return ERR_PTR(-EINVAL);
if (flags & AT_SYMLINK_NOFOLLOW)
open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW;
@@ -912,7 +905,7 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
path_noexec(&file->f_path))
return ERR_PTR(-EACCES);
- err = deny_write_access(file);
+ err = exe_file_deny_write_access(file);
if (err)
return ERR_PTR(err);
@@ -927,7 +920,7 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
* Returns ERR_PTR on failure or allocated struct file on success.
*
* As this is a wrapper for the internal do_open_execat(), callers
- * must call allow_write_access() before fput() on release. Also see
+ * must call exe_file_allow_write_access() before fput() on release. Also see
* do_close_execat().
*/
struct file *open_exec(const char *name)
@@ -1492,7 +1485,7 @@ static void do_close_execat(struct file *file)
{
if (!file)
return;
- allow_write_access(file);
+ exe_file_allow_write_access(file);
fput(file);
}
@@ -1564,6 +1557,21 @@ static struct linux_binprm *alloc_bprm(int fd, struct filename *filename, int fl
}
bprm->interp = bprm->filename;
+ /*
+ * At this point, security_file_open() has already been called (with
+ * __FMODE_EXEC) and access control checks for AT_EXECVE_CHECK will
+ * stop just after the security_bprm_creds_for_exec() call in
+ * bprm_execve(). Indeed, the kernel should not try to parse the
+ * content of the file with exec_binprm() nor change the calling
+ * thread, which means that the following security functions will not
+ * be called:
+ * - security_bprm_check()
+ * - security_bprm_creds_from_file()
+ * - security_bprm_committing_creds()
+ * - security_bprm_committed_creds()
+ */
+ bprm->is_check = !!(flags & AT_EXECVE_CHECK);
+
retval = bprm_mm_init(bprm);
if (!retval)
return bprm;
@@ -1806,7 +1814,7 @@ static int exec_binprm(struct linux_binprm *bprm)
bprm->file = bprm->interpreter;
bprm->interpreter = NULL;
- allow_write_access(exec);
+ exe_file_allow_write_access(exec);
if (unlikely(bprm->have_execfd)) {
if (bprm->executable) {
fput(exec);
@@ -1845,7 +1853,7 @@ static int bprm_execve(struct linux_binprm *bprm)
/* Set the unchanging part of bprm->cred */
retval = security_bprm_creds_for_exec(bprm);
- if (retval)
+ if (retval || bprm->is_check)
goto out;
retval = exec_binprm(bprm);
@@ -2151,7 +2159,7 @@ static int proc_dointvec_minmax_coredump(const struct ctl_table *table, int writ
return error;
}
-static struct ctl_table fs_exec_sysctls[] = {
+static const struct ctl_table fs_exec_sysctls[] = {
{
.procname = "suid_dumpable",
.data = &suid_dumpable,
diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
index 099f80645072..691dd77b6ab5 100644
--- a/fs/exfat/namei.c
+++ b/fs/exfat/namei.c
@@ -31,10 +31,9 @@ static inline void exfat_d_version_set(struct dentry *dentry,
* If it happened, the negative dentry isn't actually negative anymore. So,
* drop it.
*/
-static int exfat_d_revalidate(struct dentry *dentry, unsigned int flags)
+static int exfat_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
- int ret;
-
if (flags & LOOKUP_RCU)
return -ECHILD;
@@ -58,11 +57,7 @@ static int exfat_d_revalidate(struct dentry *dentry, unsigned int flags)
if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
return 0;
- spin_lock(&dentry->d_lock);
- ret = inode_eq_iversion(d_inode(dentry->d_parent),
- exfat_d_version(dentry));
- spin_unlock(&dentry->d_lock);
- return ret;
+ return inode_eq_iversion(dir, exfat_d_version(dentry));
}
/* returns the length of a struct qstr, ignoring trailing dots if necessary */
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index e20d59221fc0..c9ca41d91a6c 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -31,8 +31,7 @@ config EXT4_FS
select BUFFER_HEAD
select JBD2
select CRC16
- select CRYPTO
- select CRYPTO_CRC32C
+ select CRC32
select FS_IOMAP
select FS_ENCRYPTION_ALGS if FS_ENCRYPTION
help
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 74f2071189b2..4e7de7eaa374 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -33,7 +33,7 @@
#include <linux/blockgroup_lock.h>
#include <linux/percpu_counter.h>
#include <linux/ratelimit.h>
-#include <crypto/hash.h>
+#include <linux/crc32c.h>
#include <linux/falloc.h>
#include <linux/percpu-rwsem.h>
#include <linux/fiemap.h>
@@ -1662,9 +1662,6 @@ struct ext4_sb_info {
/* record the last minlen when FITRIM is called. */
unsigned long s_last_trim_minblks;
- /* Reference to checksum algorithm driver via cryptoapi */
- struct crypto_shash *s_chksum_driver;
-
/* Precomputed FS UUID checksum for seeding other checksums */
__u32 s_csum_seed;
@@ -2463,19 +2460,7 @@ static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
static inline u32 ext4_chksum(struct ext4_sb_info *sbi, u32 crc,
const void *address, unsigned int length)
{
- struct {
- struct shash_desc shash;
- char ctx[4];
- } desc;
-
- BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver)!=sizeof(desc.ctx));
-
- desc.shash.tfm = sbi->s_chksum_driver;
- *(u32 *)desc.ctx = crc;
-
- BUG_ON(crypto_shash_update(&desc.shash, address, length));
-
- return *(u32 *)desc.ctx;
+ return crc32c(crc, address, length);
}
#ifdef __KERNEL__
@@ -3276,11 +3261,7 @@ extern int ext4_register_li_request(struct super_block *sb,
static inline int ext4_has_metadata_csum(struct super_block *sb)
{
- WARN_ON_ONCE(ext4_has_feature_metadata_csum(sb) &&
- !EXT4_SB(sb)->s_chksum_driver);
-
- return ext4_has_feature_metadata_csum(sb) &&
- (EXT4_SB(sb)->s_chksum_driver != NULL);
+ return ext4_has_feature_metadata_csum(sb);
}
static inline int ext4_has_group_desc_csum(struct super_block *sb)
diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
index 26c4fc37edcf..da4263a14a20 100644
--- a/fs/ext4/fast_commit.c
+++ b/fs/ext4/fast_commit.c
@@ -322,9 +322,7 @@ restart:
WARN_ON(!list_empty(&ei->i_fc_dilist));
spin_unlock(&sbi->s_fc_lock);
- if (fc_dentry->fcd_name.name &&
- fc_dentry->fcd_name.len > DNAME_INLINE_LEN)
- kfree(fc_dentry->fcd_name.name);
+ release_dentry_name_snapshot(&fc_dentry->fcd_name);
kmem_cache_free(ext4_fc_dentry_cachep, fc_dentry);
return;
@@ -449,22 +447,7 @@ static int __track_dentry_update(handle_t *handle, struct inode *inode,
node->fcd_op = dentry_update->op;
node->fcd_parent = dir->i_ino;
node->fcd_ino = inode->i_ino;
- if (dentry->d_name.len > DNAME_INLINE_LEN) {
- node->fcd_name.name = kmalloc(dentry->d_name.len, GFP_NOFS);
- if (!node->fcd_name.name) {
- kmem_cache_free(ext4_fc_dentry_cachep, node);
- ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_NOMEM, handle);
- mutex_lock(&ei->i_fc_lock);
- return -ENOMEM;
- }
- memcpy((u8 *)node->fcd_name.name, dentry->d_name.name,
- dentry->d_name.len);
- } else {
- memcpy(node->fcd_iname, dentry->d_name.name,
- dentry->d_name.len);
- node->fcd_name.name = node->fcd_iname;
- }
- node->fcd_name.len = dentry->d_name.len;
+ take_dentry_name_snapshot(&node->fcd_name, dentry);
INIT_LIST_HEAD(&node->fcd_dilist);
spin_lock(&sbi->s_fc_lock);
if (sbi->s_journal->j_flags & JBD2_FULL_COMMIT_ONGOING ||
@@ -832,7 +815,7 @@ static bool ext4_fc_add_dentry_tlv(struct super_block *sb, u32 *crc,
{
struct ext4_fc_dentry_info fcd;
struct ext4_fc_tl tl;
- int dlen = fc_dentry->fcd_name.len;
+ int dlen = fc_dentry->fcd_name.name.len;
u8 *dst = ext4_fc_reserve_space(sb,
EXT4_FC_TAG_BASE_LEN + sizeof(fcd) + dlen, crc);
@@ -847,7 +830,7 @@ static bool ext4_fc_add_dentry_tlv(struct super_block *sb, u32 *crc,
dst += EXT4_FC_TAG_BASE_LEN;
memcpy(dst, &fcd, sizeof(fcd));
dst += sizeof(fcd);
- memcpy(dst, fc_dentry->fcd_name.name, dlen);
+ memcpy(dst, fc_dentry->fcd_name.name.name, dlen);
return true;
}
@@ -1328,9 +1311,7 @@ static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid)
list_del_init(&fc_dentry->fcd_dilist);
spin_unlock(&sbi->s_fc_lock);
- if (fc_dentry->fcd_name.name &&
- fc_dentry->fcd_name.len > DNAME_INLINE_LEN)
- kfree(fc_dentry->fcd_name.name);
+ release_dentry_name_snapshot(&fc_dentry->fcd_name);
kmem_cache_free(ext4_fc_dentry_cachep, fc_dentry);
spin_lock(&sbi->s_fc_lock);
}
diff --git a/fs/ext4/fast_commit.h b/fs/ext4/fast_commit.h
index 2fadb2c4780c..3bd534e4dbbf 100644
--- a/fs/ext4/fast_commit.h
+++ b/fs/ext4/fast_commit.h
@@ -109,8 +109,7 @@ struct ext4_fc_dentry_update {
int fcd_op; /* Type of update create / unlink / link */
int fcd_parent; /* Parent inode number */
int fcd_ino; /* Inode number */
- struct qstr fcd_name; /* Dirent name */
- unsigned char fcd_iname[DNAME_INLINE_LEN]; /* Dirent name string */
+ struct name_snapshot fcd_name; /* Dirent name */
struct list_head fcd_list;
struct list_head fcd_dilist;
};
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 3bd96c3d4cd0..a5205149adba 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -756,6 +756,9 @@ retry:
return VM_FAULT_SIGBUS;
}
} else {
+ result = filemap_fsnotify_fault(vmf);
+ if (unlikely(result))
+ return result;
filemap_invalidate_lock_shared(mapping);
}
result = dax_iomap_fault(vmf, order, &pfn, &error, &ext4_iomap_ops);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 785809f33ff4..a50e5c31b937 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1380,8 +1380,6 @@ static void ext4_put_super(struct super_block *sb)
*/
kobject_put(&sbi->s_kobj);
wait_for_completion(&sbi->s_kobj_unregister);
- if (sbi->s_chksum_driver)
- crypto_free_shash(sbi->s_chksum_driver);
kfree(sbi->s_blockgroup_lock);
fs_put_dax(sbi->s_daxdev, NULL);
fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
@@ -4634,15 +4632,6 @@ static int ext4_init_metadata_csum(struct super_block *sb, struct ext4_super_blo
ext4_setup_csum_trigger(sb, EXT4_JTR_ORPHAN_FILE,
ext4_orphan_file_block_trigger);
- /* Load the checksum driver */
- sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
- if (IS_ERR(sbi->s_chksum_driver)) {
- int ret = PTR_ERR(sbi->s_chksum_driver);
- ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
- sbi->s_chksum_driver = NULL;
- return ret;
- }
-
/* Check superblock checksum */
if (!ext4_superblock_csum_verify(sb, es)) {
ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
@@ -5312,6 +5301,9 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
/* i_version is always enabled now */
sb->s_flags |= SB_I_VERSION;
+ /* HSM events are allowed by default. */
+ sb->s_iflags |= SB_I_ALLOW_HSM;
+
err = ext4_check_feature_compatibility(sb, es, silent);
if (err)
goto failed_mount;
@@ -5687,9 +5679,6 @@ failed_mount3:
del_timer_sync(&sbi->s_err_report);
ext4_group_desc_free(sbi);
failed_mount:
- if (sbi->s_chksum_driver)
- crypto_free_shash(sbi->s_chksum_driver);
-
#if IS_ENABLED(CONFIG_UNICODE)
utf8_unload(sb->s_encoding);
#endif
@@ -7494,6 +7483,5 @@ static void __exit ext4_exit_fs(void)
MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
MODULE_DESCRIPTION("Fourth Extended Filesystem");
MODULE_LICENSE("GPL");
-MODULE_SOFTDEP("pre: crc32c");
module_init(ext4_init_fs)
module_exit(ext4_exit_fs)
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index 68a1e23e1557..5916a02fb46d 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -4,8 +4,7 @@ config F2FS_FS
depends on BLOCK
select BUFFER_HEAD
select NLS
- select CRYPTO
- select CRYPTO_CRC32
+ select CRC32
select F2FS_FS_XATTR if FS_ENCRYPTION
select FS_ENCRYPTION_ALGS if FS_ENCRYPTION
select FS_IOMAP
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 7f26440e8595..985690d81a82 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -846,7 +846,7 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
int index, int nr_pages, bool uptodate)
{
- unsigned long pgidx = pages[index]->index;
+ unsigned long pgidx = page_folio(pages[index])->index;
int i = uptodate ? 0 : 1;
/*
@@ -860,9 +860,11 @@ bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
return false;
for (; i < cc->cluster_size; i++) {
- if (pages[index + i]->index != pgidx + i)
+ struct folio *folio = page_folio(pages[index + i]);
+
+ if (folio->index != pgidx + i)
return false;
- if (uptodate && !PageUptodate(pages[index + i]))
+ if (uptodate && !folio_test_uptodate(folio))
return false;
}
@@ -1195,7 +1197,8 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
.cluster_size = F2FS_I(inode)->i_cluster_size,
.rpages = fsdata,
};
- bool first_index = (index == cc.rpages[0]->index);
+ struct folio *folio = page_folio(cc.rpages[0]);
+ bool first_index = (index == folio->index);
if (copied)
set_cluster_dirty(&cc);
@@ -1239,13 +1242,14 @@ int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
int i;
for (i = cluster_size - 1; i >= 0; i--) {
- loff_t start = rpages[i]->index << PAGE_SHIFT;
+ struct folio *folio = page_folio(rpages[i]);
+ loff_t start = folio->index << PAGE_SHIFT;
if (from <= start) {
- zero_user_segment(rpages[i], 0, PAGE_SIZE);
+ folio_zero_segment(folio, 0, folio_size(folio));
} else {
- zero_user_segment(rpages[i], from - start,
- PAGE_SIZE);
+ folio_zero_segment(folio, from - start,
+ folio_size(folio));
break;
}
}
@@ -1278,6 +1282,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
.encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ?
1 : 0,
};
+ struct folio *folio;
struct dnode_of_data dn;
struct node_info ni;
struct compress_io_ctx *cic;
@@ -1289,7 +1294,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
/* we should bypass data pages to proceed the kworker jobs */
if (unlikely(f2fs_cp_error(sbi))) {
- mapping_set_error(cc->rpages[0]->mapping, -EIO);
+ mapping_set_error(inode->i_mapping, -EIO);
goto out_free;
}
@@ -1316,7 +1321,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
goto out_put_dnode;
}
- psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
+ folio = page_folio(cc->rpages[last_index]);
+ psize = folio_pos(folio) + folio_size(folio);
err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
if (err)
@@ -1339,7 +1345,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
for (i = 0; i < cc->valid_nr_cpages; i++) {
f2fs_set_compressed_page(cc->cpages[i], inode,
- cc->rpages[i + 1]->index, cic);
+ page_folio(cc->rpages[i + 1])->index, cic);
fio.compressed_page = cc->cpages[i];
fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
@@ -1374,7 +1380,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
if (blkaddr == COMPRESS_ADDR)
fio.compr_blocks++;
if (__is_valid_data_blkaddr(blkaddr))
- f2fs_invalidate_blocks(sbi, blkaddr);
+ f2fs_invalidate_blocks(sbi, blkaddr, 1);
f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
goto unlock_continue;
}
@@ -1384,7 +1390,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
if (i > cc->valid_nr_cpages) {
if (__is_valid_data_blkaddr(blkaddr)) {
- f2fs_invalidate_blocks(sbi, blkaddr);
+ f2fs_invalidate_blocks(sbi, blkaddr, 1);
f2fs_update_data_blkaddr(&dn, NEW_ADDR);
}
goto unlock_continue;
@@ -1545,6 +1551,7 @@ continue_unlock:
if (!clear_page_dirty_for_io(cc->rpages[i]))
goto continue_unlock;
+ submitted = 0;
ret = f2fs_write_single_data_page(page_folio(cc->rpages[i]),
&submitted,
NULL, NULL, wbc, io_type,
@@ -1903,11 +1910,12 @@ struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
return sbi->compress_inode->i_mapping;
}
-void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
+void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi,
+ block_t blkaddr, unsigned int len)
{
if (!sbi->compress_inode)
return;
- invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
+ invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr + len - 1);
}
void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index a2478c2afb3a..de4da6d9cd93 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -70,9 +70,9 @@ bool f2fs_is_cp_guaranteed(struct page *page)
return false;
}
-static enum count_type __read_io_type(struct page *page)
+static enum count_type __read_io_type(struct folio *folio)
{
- struct address_space *mapping = page_file_mapping(page);
+ struct address_space *mapping = folio->mapping;
if (mapping) {
struct inode *inode = mapping->host;
@@ -136,27 +136,22 @@ struct bio_post_read_ctx {
*/
static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
{
- struct bio_vec *bv;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
struct bio_post_read_ctx *ctx = bio->bi_private;
- bio_for_each_segment_all(bv, bio, iter_all) {
- struct page *page = bv->bv_page;
+ bio_for_each_folio_all(fi, bio) {
+ struct folio *folio = fi.folio;
- if (f2fs_is_compressed_page(page)) {
+ if (f2fs_is_compressed_page(&folio->page)) {
if (ctx && !ctx->decompression_attempted)
- f2fs_end_read_compressed_page(page, true, 0,
+ f2fs_end_read_compressed_page(&folio->page, true, 0,
in_task);
- f2fs_put_page_dic(page, in_task);
+ f2fs_put_page_dic(&folio->page, in_task);
continue;
}
- if (bio->bi_status)
- ClearPageUptodate(page);
- else
- SetPageUptodate(page);
- dec_page_count(F2FS_P_SB(page), __read_io_type(page));
- unlock_page(page);
+ dec_page_count(F2FS_F_SB(folio), __read_io_type(folio));
+ folio_end_read(folio, bio->bi_status == 0);
}
if (ctx)
@@ -516,10 +511,6 @@ static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, struct bio *bio,
enum page_type type)
{
WARN_ON_ONCE(is_read_io(bio_op(bio)));
-
- if (f2fs_lfs_mode(sbi) && current->plug && PAGE_TYPE_ON_MAIN(type))
- blk_finish_plug(current->plug);
-
trace_f2fs_submit_write_bio(sbi->sb, type, bio);
iostat_update_submit_ctx(bio, type);
submit_bio(bio);
@@ -689,33 +680,29 @@ void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
{
struct bio *bio;
- struct page *page = fio->encrypted_page ?
- fio->encrypted_page : fio->page;
+ struct folio *fio_folio = page_folio(fio->page);
+ struct folio *data_folio = fio->encrypted_page ?
+ page_folio(fio->encrypted_page) : fio_folio;
if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
fio->is_por ? META_POR : (__is_meta_io(fio) ?
META_GENERIC : DATA_GENERIC_ENHANCE)))
return -EFSCORRUPTED;
- trace_f2fs_submit_page_bio(page, fio);
+ trace_f2fs_submit_folio_bio(data_folio, fio);
/* Allocate a new bio */
bio = __bio_alloc(fio, 1);
- f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
- page_folio(fio->page)->index, fio, GFP_NOIO);
-
- if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
- bio_put(bio);
- return -EFAULT;
- }
+ f2fs_set_bio_crypt_ctx(bio, fio_folio->mapping->host,
+ fio_folio->index, fio, GFP_NOIO);
+ bio_add_folio_nofail(bio, data_folio, folio_size(data_folio), 0);
if (fio->io_wbc && !is_read_io(fio->op))
- wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page),
- PAGE_SIZE);
+ wbc_account_cgroup_owner(fio->io_wbc, fio_folio, PAGE_SIZE);
inc_page_count(fio->sbi, is_read_io(fio->op) ?
- __read_io_type(page) : WB_DATA_TYPE(fio->page, false));
+ __read_io_type(data_folio) : WB_DATA_TYPE(fio->page, false));
if (is_read_io(bio_op(bio)))
f2fs_submit_read_bio(fio->sbi, bio, fio->type);
@@ -894,7 +881,7 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
return -EFSCORRUPTED;
- trace_f2fs_submit_page_bio(page, fio);
+ trace_f2fs_submit_folio_bio(page_folio(page), fio);
if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
fio->new_blkaddr))
@@ -1018,7 +1005,7 @@ alloc_new:
io->last_block_in_bio = fio->new_blkaddr;
- trace_f2fs_submit_page_write(fio->page, fio);
+ trace_f2fs_submit_folio_write(page_folio(fio->page), fio);
#ifdef CONFIG_BLK_DEV_ZONED
if (f2fs_sb_has_blkzoned(sbi) && btype < META &&
is_end_zone_blkaddr(sbi, fio->new_blkaddr)) {
@@ -1289,7 +1276,7 @@ struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
struct address_space *mapping = inode->i_mapping;
struct page *page;
- page = find_get_page(mapping, index);
+ page = find_get_page_flags(mapping, index, FGP_ACCESSED);
if (page && PageUptodate(page))
return page;
f2fs_put_page(page, 0);
@@ -1423,7 +1410,7 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
return err;
if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
- f2fs_invalidate_internal_cache(sbi, old_blkaddr);
+ f2fs_invalidate_internal_cache(sbi, old_blkaddr, 1);
f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
return 0;
@@ -2464,7 +2451,7 @@ next_page:
static int f2fs_read_data_folio(struct file *file, struct folio *folio)
{
- struct inode *inode = folio_file_mapping(folio)->host;
+ struct inode *inode = folio->mapping->host;
int ret = -EAGAIN;
trace_f2fs_readpage(folio, DATA);
@@ -3163,6 +3150,7 @@ continue_unlock:
continue;
}
#endif
+ submitted = 0;
ret = f2fs_write_single_data_page(folio,
&submitted, &bio, &last_block,
wbc, io_type, 0, true);
@@ -4043,7 +4031,6 @@ retry:
cur_lblock = 1; /* force Empty message */
sis->max = cur_lblock;
sis->pages = cur_lblock - 1;
- sis->highest_bit = cur_lblock - 1;
out:
if (not_aligned)
f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%lu * N)",
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 47a5c806cf16..54dd52de7269 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -175,7 +175,8 @@ static unsigned long dir_block_index(unsigned int level,
static struct f2fs_dir_entry *find_in_block(struct inode *dir,
struct page *dentry_page,
const struct f2fs_filename *fname,
- int *max_slots)
+ int *max_slots,
+ bool use_hash)
{
struct f2fs_dentry_block *dentry_blk;
struct f2fs_dentry_ptr d;
@@ -183,7 +184,7 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir,
dentry_blk = (struct f2fs_dentry_block *)page_address(dentry_page);
make_dentry_ptr_block(dir, &d, dentry_blk);
- return f2fs_find_target_dentry(&d, fname, max_slots);
+ return f2fs_find_target_dentry(&d, fname, max_slots, use_hash);
}
static inline int f2fs_match_name(const struct inode *dir,
@@ -208,7 +209,8 @@ static inline int f2fs_match_name(const struct inode *dir,
}
struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
- const struct f2fs_filename *fname, int *max_slots)
+ const struct f2fs_filename *fname, int *max_slots,
+ bool use_hash)
{
struct f2fs_dir_entry *de;
unsigned long bit_pos = 0;
@@ -231,7 +233,7 @@ struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
continue;
}
- if (de->hash_code == fname->hash) {
+ if (!use_hash || de->hash_code == fname->hash) {
res = f2fs_match_name(d->inode, fname,
d->filename[bit_pos],
le16_to_cpu(de->name_len));
@@ -258,11 +260,12 @@ found:
static struct f2fs_dir_entry *find_in_level(struct inode *dir,
unsigned int level,
const struct f2fs_filename *fname,
- struct page **res_page)
+ struct page **res_page,
+ bool use_hash)
{
int s = GET_DENTRY_SLOTS(fname->disk_name.len);
unsigned int nbucket, nblock;
- unsigned int bidx, end_block;
+ unsigned int bidx, end_block, bucket_no;
struct page *dentry_page;
struct f2fs_dir_entry *de = NULL;
pgoff_t next_pgofs;
@@ -272,8 +275,11 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
nblock = bucket_blocks(level);
+ bucket_no = use_hash ? le32_to_cpu(fname->hash) % nbucket : 0;
+
+start_find_bucket:
bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level,
- le32_to_cpu(fname->hash) % nbucket);
+ bucket_no);
end_block = bidx + nblock;
while (bidx < end_block) {
@@ -290,7 +296,7 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
}
}
- de = find_in_block(dir, dentry_page, fname, &max_slots);
+ de = find_in_block(dir, dentry_page, fname, &max_slots, use_hash);
if (IS_ERR(de)) {
*res_page = ERR_CAST(de);
de = NULL;
@@ -307,12 +313,18 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
bidx++;
}
- if (!de && room && F2FS_I(dir)->chash != fname->hash) {
- F2FS_I(dir)->chash = fname->hash;
- F2FS_I(dir)->clevel = level;
- }
+ if (de)
+ return de;
- return de;
+ if (likely(use_hash)) {
+ if (room && F2FS_I(dir)->chash != fname->hash) {
+ F2FS_I(dir)->chash = fname->hash;
+ F2FS_I(dir)->clevel = level;
+ }
+ } else if (++bucket_no < nbucket) {
+ goto start_find_bucket;
+ }
+ return NULL;
}
struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
@@ -323,11 +335,15 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
struct f2fs_dir_entry *de = NULL;
unsigned int max_depth;
unsigned int level;
+ bool use_hash = true;
*res_page = NULL;
+#if IS_ENABLED(CONFIG_UNICODE)
+start_find_entry:
+#endif
if (f2fs_has_inline_dentry(dir)) {
- de = f2fs_find_in_inline_dir(dir, fname, res_page);
+ de = f2fs_find_in_inline_dir(dir, fname, res_page, use_hash);
goto out;
}
@@ -343,11 +359,18 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
}
for (level = 0; level < max_depth; level++) {
- de = find_in_level(dir, level, fname, res_page);
+ de = find_in_level(dir, level, fname, res_page, use_hash);
if (de || IS_ERR(*res_page))
break;
}
+
out:
+#if IS_ENABLED(CONFIG_UNICODE)
+ if (IS_CASEFOLDED(dir) && !de && use_hash) {
+ use_hash = false;
+ goto start_find_entry;
+ }
+#endif
/* This is to increase the speed of f2fs_create */
if (!de)
F2FS_I(dir)->task = current;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 6f2cbf4c5740..1afa7be16e7d 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -24,7 +24,6 @@
#include <linux/quotaops.h>
#include <linux/part_stat.h>
#include <linux/rw_hint.h>
-#include <crypto/hash.h>
#include <linux/fscrypt.h>
#include <linux/fsverity.h>
@@ -1768,9 +1767,6 @@ struct f2fs_sb_info {
u64 sectors_written_start;
u64 kbytes_written;
- /* Reference to checksum algorithm driver via cryptoapi */
- struct crypto_shash *s_chksum_driver;
-
/* Precomputed FS UUID checksum for seeding other checksums */
__u32 s_chksum_seed;
@@ -1948,21 +1944,7 @@ static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi,
static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc,
const void *address, unsigned int length)
{
- struct {
- struct shash_desc shash;
- char ctx[4];
- } desc;
- int err;
-
- BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx));
-
- desc.shash.tfm = sbi->s_chksum_driver;
- *(u32 *)desc.ctx = crc;
-
- err = crypto_shash_update(&desc.shash, address, length);
- BUG_ON(err);
-
- return *(u32 *)desc.ctx;
+ return crc32(crc, address, length);
}
static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
@@ -2003,9 +1985,14 @@ static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
return F2FS_I_SB(mapping->host);
}
+static inline struct f2fs_sb_info *F2FS_F_SB(struct folio *folio)
+{
+ return F2FS_M_SB(folio->mapping);
+}
+
static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
{
- return F2FS_M_SB(page_file_mapping(page));
+ return F2FS_F_SB(page_folio(page));
}
static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
@@ -3583,7 +3570,8 @@ int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
struct f2fs_filename *fname);
void f2fs_free_filename(struct f2fs_filename *fname);
struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
- const struct f2fs_filename *fname, int *max_slots);
+ const struct f2fs_filename *fname, int *max_slots,
+ bool use_hash);
int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
unsigned int start_pos, struct fscrypt_str *fstr);
void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
@@ -3718,7 +3706,8 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi);
int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
-void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
+void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr,
+ unsigned int len);
bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
int f2fs_start_discard_thread(struct f2fs_sb_info *sbi);
void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
@@ -4219,7 +4208,8 @@ int f2fs_write_inline_data(struct inode *inode, struct folio *folio);
int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
const struct f2fs_filename *fname,
- struct page **res_page);
+ struct page **res_page,
+ bool use_hash);
int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
struct page *ipage);
int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
@@ -4386,7 +4376,8 @@ void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
int __init f2fs_init_compress_cache(void);
void f2fs_destroy_compress_cache(void);
struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi);
-void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr);
+void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi,
+ block_t blkaddr, unsigned int len);
void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
nid_t ino, block_t blkaddr);
bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
@@ -4441,8 +4432,8 @@ static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return
static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
static inline int __init f2fs_init_compress_cache(void) { return 0; }
static inline void f2fs_destroy_compress_cache(void) { }
-static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi,
- block_t blkaddr) { }
+static inline void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi,
+ block_t blkaddr, unsigned int len) { }
static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
struct page *page, nid_t ino, block_t blkaddr) { }
static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
@@ -4758,10 +4749,10 @@ static inline void f2fs_truncate_meta_inode_pages(struct f2fs_sb_info *sbi,
}
static inline void f2fs_invalidate_internal_cache(struct f2fs_sb_info *sbi,
- block_t blkaddr)
+ block_t blkaddr, unsigned int len)
{
- f2fs_truncate_meta_inode_pages(sbi, blkaddr, 1);
- f2fs_invalidate_compress_page(sbi, blkaddr);
+ f2fs_truncate_meta_inode_pages(sbi, blkaddr, len);
+ f2fs_invalidate_compress_pages_range(sbi, blkaddr, len);
}
#define EFSBADCRC EBADMSG /* Bad CRC detected */
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index aa9679b3d8e4..f92a9fba9991 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -621,8 +621,11 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
int cluster_index = 0, valid_blocks = 0;
int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
+ block_t blkstart;
+ int blklen = 0;
addr = get_dnode_addr(dn->inode, dn->node_page) + ofs;
+ blkstart = le32_to_cpu(*addr);
/* Assumption: truncation starts with cluster */
for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
@@ -638,26 +641,44 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
}
if (blkaddr == NULL_ADDR)
- continue;
+ goto next;
f2fs_set_data_blkaddr(dn, NULL_ADDR);
if (__is_valid_data_blkaddr(blkaddr)) {
if (time_to_inject(sbi, FAULT_BLKADDR_CONSISTENCE))
- continue;
+ goto next;
if (!f2fs_is_valid_blkaddr_raw(sbi, blkaddr,
DATA_GENERIC_ENHANCE))
- continue;
+ goto next;
if (compressed_cluster)
valid_blocks++;
}
- f2fs_invalidate_blocks(sbi, blkaddr);
+ if (blkstart + blklen == blkaddr) {
+ blklen++;
+ } else {
+ f2fs_invalidate_blocks(sbi, blkstart, blklen);
+ blkstart = blkaddr;
+ blklen = 1;
+ }
if (!released || blkaddr != COMPRESS_ADDR)
nr_free++;
+
+ continue;
+
+next:
+ if (blklen)
+ f2fs_invalidate_blocks(sbi, blkstart, blklen);
+
+ blkstart = le32_to_cpu(*(addr + 1));
+ blklen = 0;
}
+ if (blklen)
+ f2fs_invalidate_blocks(sbi, blkstart, blklen);
+
if (compressed_cluster)
f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
@@ -747,10 +768,8 @@ int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
if (IS_DEVICE_ALIASING(inode)) {
struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
struct extent_info ei = et->largest;
- unsigned int i;
- for (i = 0; i < ei.len; i++)
- f2fs_invalidate_blocks(sbi, ei.blk + i);
+ f2fs_invalidate_blocks(sbi, ei.blk, ei.len);
dec_valid_block_count(sbi, inode, ei.len);
f2fs_update_time(sbi, REQ_TIME);
@@ -1323,7 +1342,7 @@ static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
if (ret) {
dec_valid_block_count(sbi, inode, 1);
- f2fs_invalidate_blocks(sbi, *blkaddr);
+ f2fs_invalidate_blocks(sbi, *blkaddr, 1);
} else {
f2fs_update_data_blkaddr(&dn, *blkaddr);
}
@@ -1575,7 +1594,7 @@ static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
break;
}
- f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
+ f2fs_invalidate_blocks(sbi, dn->data_blkaddr, 1);
f2fs_set_data_blkaddr(dn, NEW_ADDR);
}
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 3e1b6d2ff3a7..faf9fa1c804d 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -806,11 +806,14 @@ retry:
goto out;
}
- if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
+ if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) {
ret = -EBUSY;
- else
- p.min_segno = *result;
- goto out;
+ goto out;
+ }
+ if (gc_type == FG_GC)
+ clear_bit(GET_SEC_FROM_SEG(sbi, *result), dirty_i->victim_secmap);
+ p.min_segno = *result;
+ goto got_result;
}
ret = -ENODATA;
@@ -1412,7 +1415,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
page_address(mpage), PAGE_SIZE);
f2fs_put_page(mpage, 1);
- f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr);
+ f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr, 1);
set_page_dirty(fio.encrypted_page);
if (clear_page_dirty_for_io(fio.encrypted_page))
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 005babf1bed1..3e3c35d4c98b 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -81,7 +81,7 @@ bool f2fs_may_inline_dentry(struct inode *inode)
void f2fs_do_read_inline_data(struct folio *folio, struct page *ipage)
{
- struct inode *inode = folio_file_mapping(folio)->host;
+ struct inode *inode = folio->mapping->host;
if (folio_test_uptodate(folio))
return;
@@ -352,7 +352,8 @@ process_inline:
struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
const struct f2fs_filename *fname,
- struct page **res_page)
+ struct page **res_page,
+ bool use_hash)
{
struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
struct f2fs_dir_entry *de;
@@ -369,7 +370,7 @@ struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
inline_dentry = inline_data_addr(dir, ipage);
make_dentry_ptr_inline(dir, &d, inline_dentry);
- de = f2fs_find_target_dentry(&d, fname, NULL);
+ de = f2fs_find_target_dentry(&d, fname, NULL, use_hash);
unlock_page(ipage);
if (IS_ERR(de)) {
*res_page = ERR_CAST(de);
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 282fd320bdb3..3dd25f64d6f1 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -302,15 +302,6 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
F2FS_TOTAL_EXTRA_ATTR_SIZE);
return false;
}
- if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
- f2fs_has_inline_xattr(inode) &&
- (!fi->i_inline_xattr_size ||
- fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
- f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %lu",
- __func__, inode->i_ino, fi->i_inline_xattr_size,
- MAX_INLINE_XATTR_SIZE);
- return false;
- }
if (f2fs_sb_has_compression(sbi) &&
fi->i_flags & F2FS_COMPR_FL &&
F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
@@ -320,6 +311,16 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
}
}
+ if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
+ f2fs_has_inline_xattr(inode) &&
+ (fi->i_inline_xattr_size < MIN_INLINE_XATTR_SIZE ||
+ fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
+ f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, min: %zu, max: %lu",
+ __func__, inode->i_ino, fi->i_inline_xattr_size,
+ MIN_INLINE_XATTR_SIZE, MAX_INLINE_XATTR_SIZE);
+ return false;
+ }
+
if (!f2fs_sb_has_extra_attr(sbi)) {
if (f2fs_sb_has_project_quota(sbi)) {
f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 57d46e1439de..a278c7da8177 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -341,6 +341,7 @@ fail_drop:
trace_f2fs_new_inode(inode, err);
dquot_drop(inode);
inode->i_flags |= S_NOQUOTA;
+ make_bad_inode(inode);
if (nid_free)
set_inode_flag(inode, FI_FREE_NID);
clear_nlink(inode);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 0b900a7a48e5..f88392fc4ba9 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -558,6 +558,7 @@ int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
block_t blkaddr;
int i;
+ ni->flag = 0;
ni->nid = nid;
retry:
/* Check nat cache */
@@ -916,7 +917,7 @@ static int truncate_node(struct dnode_of_data *dn)
}
/* Deallocate node address */
- f2fs_invalidate_blocks(sbi, ni.blk_addr);
+ f2fs_invalidate_blocks(sbi, ni.blk_addr, 1);
dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
set_node_addr(sbi, &ni, NULL_ADDR, false);
@@ -1274,8 +1275,9 @@ int f2fs_remove_inode_page(struct inode *inode)
}
/* remove potential inline_data blocks */
- if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- S_ISLNK(inode->i_mode))
+ if (!IS_DEVICE_ALIASING(inode) &&
+ (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ S_ISLNK(inode->i_mode)))
f2fs_truncate_data_blocks_range(&dn, 1);
/* 0 is possible, after f2fs_new_inode() has failed */
@@ -2763,7 +2765,7 @@ int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
if (err)
return err;
- f2fs_invalidate_blocks(sbi, ni.blk_addr);
+ f2fs_invalidate_blocks(sbi, ni.blk_addr, 1);
dec_valid_node_count(sbi, inode, false);
set_node_addr(sbi, &ni, NULL_ADDR, false);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index f35be2c48e3c..69a2027e3ebc 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -899,10 +899,8 @@ skip:
* and the f2fs is not read only, check and fix zoned block devices'
* write pointer consistency.
*/
- if (!err) {
+ if (!err)
err = f2fs_check_and_fix_write_pointer(sbi);
- ret = err;
- }
if (!err)
clear_sbi_flag(sbi, SBI_POR_DOING);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index eade36c5ef13..c282e8a0a2ec 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -201,6 +201,12 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean)
clear_inode_flag(inode, FI_ATOMIC_FILE);
if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) {
clear_inode_flag(inode, FI_ATOMIC_DIRTIED);
+ /*
+ * The vfs inode keeps clean during commit, but the f2fs inode
+ * doesn't. So clear the dirty state after commit and let
+ * f2fs_mark_inode_dirty_sync ensure a consistent dirty state.
+ */
+ f2fs_inode_synced(inode);
f2fs_mark_inode_dirty_sync(inode, true);
}
stat_dec_atomic_inode(inode);
@@ -245,7 +251,7 @@ retry:
if (!__is_valid_data_blkaddr(new_addr)) {
if (new_addr == NULL_ADDR)
dec_valid_block_count(sbi, inode, 1);
- f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
+ f2fs_invalidate_blocks(sbi, dn.data_blkaddr, 1);
f2fs_update_data_blkaddr(&dn, new_addr);
} else {
f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
@@ -2426,78 +2432,38 @@ static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr,
SIT_I(sbi)->max_mtime = ctime;
}
-static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
+/*
+ * NOTE: when updating multiple blocks at the same time, please ensure
+ * that the consecutive input blocks belong to the same segment.
+ */
+static int update_sit_entry_for_release(struct f2fs_sb_info *sbi, struct seg_entry *se,
+ block_t blkaddr, unsigned int offset, int del)
{
- struct seg_entry *se;
- unsigned int segno, offset;
- long int new_vblocks;
bool exist;
#ifdef CONFIG_F2FS_CHECK_FS
bool mir_exist;
#endif
+ int i;
+ int del_count = -del;
- segno = GET_SEGNO(sbi, blkaddr);
- if (segno == NULL_SEGNO)
- return;
-
- se = get_seg_entry(sbi, segno);
- new_vblocks = se->valid_blocks + del;
- offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
-
- f2fs_bug_on(sbi, (new_vblocks < 0 ||
- (new_vblocks > f2fs_usable_blks_in_seg(sbi, segno))));
-
- se->valid_blocks = new_vblocks;
-
- /* Update valid block bitmap */
- if (del > 0) {
- exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
-#ifdef CONFIG_F2FS_CHECK_FS
- mir_exist = f2fs_test_and_set_bit(offset,
- se->cur_valid_map_mir);
- if (unlikely(exist != mir_exist)) {
- f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
- blkaddr, exist);
- f2fs_bug_on(sbi, 1);
- }
-#endif
- if (unlikely(exist)) {
- f2fs_err(sbi, "Bitmap was wrongly set, blk:%u",
- blkaddr);
- f2fs_bug_on(sbi, 1);
- se->valid_blocks--;
- del = 0;
- }
-
- if (f2fs_block_unit_discard(sbi) &&
- !f2fs_test_and_set_bit(offset, se->discard_map))
- sbi->discard_blks--;
+ f2fs_bug_on(sbi, GET_SEGNO(sbi, blkaddr) != GET_SEGNO(sbi, blkaddr + del_count - 1));
- /*
- * SSR should never reuse block which is checkpointed
- * or newly invalidated.
- */
- if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
- if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
- se->ckpt_valid_blocks++;
- }
- } else {
- exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
+ for (i = 0; i < del_count; i++) {
+ exist = f2fs_test_and_clear_bit(offset + i, se->cur_valid_map);
#ifdef CONFIG_F2FS_CHECK_FS
- mir_exist = f2fs_test_and_clear_bit(offset,
+ mir_exist = f2fs_test_and_clear_bit(offset + i,
se->cur_valid_map_mir);
if (unlikely(exist != mir_exist)) {
f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
- blkaddr, exist);
+ blkaddr + i, exist);
f2fs_bug_on(sbi, 1);
}
#endif
if (unlikely(!exist)) {
- f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u",
- blkaddr);
+ f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u", blkaddr + i);
f2fs_bug_on(sbi, 1);
se->valid_blocks++;
- del = 0;
+ del += 1;
} else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
/*
* If checkpoints are off, we must not reuse data that
@@ -2505,7 +2471,7 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
* before, we must track that to know how much space we
* really have.
*/
- if (f2fs_test_bit(offset, se->ckpt_valid_map)) {
+ if (f2fs_test_bit(offset + i, se->ckpt_valid_map)) {
spin_lock(&sbi->stat_lock);
sbi->unusable_block_count++;
spin_unlock(&sbi->stat_lock);
@@ -2513,12 +2479,91 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
}
if (f2fs_block_unit_discard(sbi) &&
- f2fs_test_and_clear_bit(offset, se->discard_map))
+ f2fs_test_and_clear_bit(offset + i, se->discard_map))
sbi->discard_blks++;
+
+ if (!f2fs_test_bit(offset + i, se->ckpt_valid_map))
+ se->ckpt_valid_blocks -= 1;
+ }
+
+ return del;
+}
+
+static int update_sit_entry_for_alloc(struct f2fs_sb_info *sbi, struct seg_entry *se,
+ block_t blkaddr, unsigned int offset, int del)
+{
+ bool exist;
+#ifdef CONFIG_F2FS_CHECK_FS
+ bool mir_exist;
+#endif
+
+ exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
+#ifdef CONFIG_F2FS_CHECK_FS
+ mir_exist = f2fs_test_and_set_bit(offset,
+ se->cur_valid_map_mir);
+ if (unlikely(exist != mir_exist)) {
+ f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
+ blkaddr, exist);
+ f2fs_bug_on(sbi, 1);
}
+#endif
+ if (unlikely(exist)) {
+ f2fs_err(sbi, "Bitmap was wrongly set, blk:%u", blkaddr);
+ f2fs_bug_on(sbi, 1);
+ se->valid_blocks--;
+ del = 0;
+ }
+
+ if (f2fs_block_unit_discard(sbi) &&
+ !f2fs_test_and_set_bit(offset, se->discard_map))
+ sbi->discard_blks--;
+
+ /*
+ * SSR should never reuse block which is checkpointed
+ * or newly invalidated.
+ */
+ if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
+ if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
+ se->ckpt_valid_blocks++;
+ }
+
if (!f2fs_test_bit(offset, se->ckpt_valid_map))
se->ckpt_valid_blocks += del;
+ return del;
+}
+
+/*
+ * If releasing blocks, this function supports updating multiple consecutive blocks
+ * at one time, but please note that these consecutive blocks need to belong to the
+ * same segment.
+ */
+static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
+{
+ struct seg_entry *se;
+ unsigned int segno, offset;
+ long int new_vblocks;
+
+ segno = GET_SEGNO(sbi, blkaddr);
+ if (segno == NULL_SEGNO)
+ return;
+
+ se = get_seg_entry(sbi, segno);
+ new_vblocks = se->valid_blocks + del;
+ offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
+
+ f2fs_bug_on(sbi, (new_vblocks < 0 ||
+ (new_vblocks > f2fs_usable_blks_in_seg(sbi, segno))));
+
+ se->valid_blocks = new_vblocks;
+
+ /* Update valid block bitmap */
+ if (del > 0) {
+ del = update_sit_entry_for_alloc(sbi, se, blkaddr, offset, del);
+ } else {
+ del = update_sit_entry_for_release(sbi, se, blkaddr, offset, del);
+ }
+
__mark_sit_entry_dirty(sbi, segno);
/* update total number of valid blocks to be written in ckpt area */
@@ -2528,25 +2573,43 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
get_sec_entry(sbi, segno)->valid_blocks += del;
}
-void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
+void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr,
+ unsigned int len)
{
unsigned int segno = GET_SEGNO(sbi, addr);
struct sit_info *sit_i = SIT_I(sbi);
+ block_t addr_start = addr, addr_end = addr + len - 1;
+ unsigned int seg_num = GET_SEGNO(sbi, addr_end) - segno + 1;
+ unsigned int i = 1, max_blocks = sbi->blocks_per_seg, cnt;
f2fs_bug_on(sbi, addr == NULL_ADDR);
if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
return;
- f2fs_invalidate_internal_cache(sbi, addr);
+ f2fs_invalidate_internal_cache(sbi, addr, len);
/* add it into sit main buffer */
down_write(&sit_i->sentry_lock);
- update_segment_mtime(sbi, addr, 0);
- update_sit_entry(sbi, addr, -1);
+ if (seg_num == 1)
+ cnt = len;
+ else
+ cnt = max_blocks - GET_BLKOFF_FROM_SEG0(sbi, addr);
+
+ do {
+ update_segment_mtime(sbi, addr_start, 0);
+ update_sit_entry(sbi, addr_start, -cnt);
- /* add it into dirty seglist */
- locate_dirty_segment(sbi, segno);
+ /* add it into dirty seglist */
+ locate_dirty_segment(sbi, segno);
+
+ /* update @addr_start and @cnt and @segno */
+ addr_start = START_BLOCK(sbi, ++segno);
+ if (++i == seg_num)
+ cnt = GET_BLKOFF_FROM_SEG0(sbi, addr_end) + 1;
+ else
+ cnt = max_blocks;
+ } while (i <= seg_num);
up_write(&sit_i->sentry_lock);
}
@@ -3857,7 +3920,7 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
goto out;
}
if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
- f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr);
+ f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr, 1);
/* writeout dirty page into bdev */
f2fs_submit_page_write(fio);
@@ -4049,7 +4112,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
update_sit_entry(sbi, new_blkaddr, 1);
}
if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
- f2fs_invalidate_internal_cache(sbi, old_blkaddr);
+ f2fs_invalidate_internal_cache(sbi, old_blkaddr, 1);
if (!from_gc)
update_segment_mtime(sbi, old_blkaddr, 0);
update_sit_entry(sbi, old_blkaddr, -1);
@@ -5405,7 +5468,8 @@ int f2fs_check_and_fix_write_pointer(struct f2fs_sb_info *sbi)
{
int ret;
- if (!f2fs_sb_has_blkzoned(sbi) || f2fs_readonly(sbi->sb))
+ if (!f2fs_sb_has_blkzoned(sbi) || f2fs_readonly(sbi->sb) ||
+ f2fs_hw_is_readonly(sbi))
return 0;
f2fs_notice(sbi, "Checking entire write pointers");
@@ -5492,8 +5556,10 @@ unsigned long long f2fs_get_section_mtime(struct f2fs_sb_info *sbi,
secno = GET_SEC_FROM_SEG(sbi, segno);
start = GET_SEG_FROM_SEC(sbi, secno);
- if (!__is_large_section(sbi))
- return get_seg_entry(sbi, start + i)->mtime;
+ if (!__is_large_section(sbi)) {
+ mtime = get_seg_entry(sbi, start + i)->mtime;
+ goto out;
+ }
for (i = 0; i < usable_segs_per_sec; i++) {
/* for large section, only check the mtime of valid segments */
@@ -5506,7 +5572,11 @@ unsigned long long f2fs_get_section_mtime(struct f2fs_sb_info *sbi,
if (total_valid_blocks == 0)
return INVALID_MTIME;
- return div_u64(mtime, total_valid_blocks);
+ mtime = div_u64(mtime, total_valid_blocks);
+out:
+ if (unlikely(mtime == INVALID_MTIME))
+ mtime -= 1;
+ return mtime;
}
/*
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index fc7d463dee15..19b67828ae32 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1694,8 +1694,6 @@ static void f2fs_put_super(struct super_block *sb)
kvfree(sbi->ckpt);
- if (sbi->s_chksum_driver)
- crypto_free_shash(sbi->s_chksum_driver);
kfree(sbi->raw_super);
f2fs_destroy_page_array_cache(sbi);
@@ -4466,15 +4464,6 @@ try_onemore:
}
mutex_init(&sbi->flush_lock);
- /* Load the checksum driver */
- sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
- if (IS_ERR(sbi->s_chksum_driver)) {
- f2fs_err(sbi, "Cannot load crc32 driver.");
- err = PTR_ERR(sbi->s_chksum_driver);
- sbi->s_chksum_driver = NULL;
- goto free_sbi;
- }
-
/* set a block size */
if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
f2fs_err(sbi, "unable to set blocksize");
@@ -4919,8 +4908,6 @@ free_options:
free_sb_buf:
kfree(raw_super);
free_sbi:
- if (sbi->s_chksum_driver)
- crypto_free_shash(sbi->s_chksum_driver);
kfree(sbi);
sb->s_fs_info = NULL;
@@ -5127,5 +5114,3 @@ module_exit(exit_f2fs_fs)
MODULE_AUTHOR("Samsung Electronics's Praesto Team");
MODULE_DESCRIPTION("Flash Friendly File System");
MODULE_LICENSE("GPL");
-MODULE_SOFTDEP("pre: crc32");
-
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 6b99dc49f776..d15c68b28952 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -1472,7 +1472,7 @@ static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
le32_to_cpu(sbi->raw_super->segment_count_main);
int i, j;
- seq_puts(seq, "format: segment_type|valid_blocks|bitmaps\n"
+ seq_puts(seq, "format: segment_type|valid_blocks|bitmaps|mtime\n"
"segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
for (i = 0; i < total_segs; i++) {
@@ -1482,6 +1482,7 @@ static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
seq_printf(seq, "%d|%-3u|", se->type, se->valid_blocks);
for (j = 0; j < SIT_VBLOCK_MAP_SIZE; j++)
seq_printf(seq, " %.2x", se->cur_valid_map[j]);
+ seq_printf(seq, "| %llx", se->mtime);
seq_putc(seq, '\n');
}
return 0;
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 15bf32c21ac0..926c26e90ef8 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -43,17 +43,13 @@ static inline void vfat_d_version_set(struct dentry *dentry,
* If it happened, the negative dentry isn't actually negative
* anymore. So, drop it.
*/
-static int vfat_revalidate_shortname(struct dentry *dentry)
+static bool vfat_revalidate_shortname(struct dentry *dentry, struct inode *dir)
{
- int ret = 1;
- spin_lock(&dentry->d_lock);
- if (!inode_eq_iversion(d_inode(dentry->d_parent), vfat_d_version(dentry)))
- ret = 0;
- spin_unlock(&dentry->d_lock);
- return ret;
+ return inode_eq_iversion(dir, vfat_d_version(dentry));
}
-static int vfat_revalidate(struct dentry *dentry, unsigned int flags)
+static int vfat_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
if (flags & LOOKUP_RCU)
return -ECHILD;
@@ -61,10 +57,11 @@ static int vfat_revalidate(struct dentry *dentry, unsigned int flags)
/* This is not negative dentry. Always valid. */
if (d_really_is_positive(dentry))
return 1;
- return vfat_revalidate_shortname(dentry);
+ return vfat_revalidate_shortname(dentry, dir);
}
-static int vfat_revalidate_ci(struct dentry *dentry, unsigned int flags)
+static int vfat_revalidate_ci(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
if (flags & LOOKUP_RCU)
return -ECHILD;
@@ -97,7 +94,7 @@ static int vfat_revalidate_ci(struct dentry *dentry, unsigned int flags)
if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
return 0;
- return vfat_revalidate_shortname(dentry);
+ return vfat_revalidate_shortname(dentry, dir);
}
/* returns the length of a struct qstr, ignoring trailing dots */
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 49884fa3c81d..5598e4d57422 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -1158,10 +1158,10 @@ static int __init fcntl_init(void)
* Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
* is defined as O_NONBLOCK on some platforms and not on others.
*/
- BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ !=
+ BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ !=
HWEIGHT32(
(VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) |
- __FMODE_EXEC | __FMODE_NONOTIFY));
+ __FMODE_EXEC));
fasync_cache = kmem_cache_create("fasync_cache",
sizeof(struct fasync_struct), 0,
diff --git a/fs/file_table.c b/fs/file_table.c
index a32171d2b83f..5c00dc38558d 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -106,7 +106,7 @@ static int proc_nr_files(const struct ctl_table *table, int write, void *buffer,
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
-static struct ctl_table fs_stat_sysctls[] = {
+static const struct ctl_table fs_stat_sysctls[] = {
{
.procname = "file-nr",
.data = &files_stat,
@@ -194,6 +194,11 @@ static int init_file(struct file *f, int flags, const struct cred *cred)
* refcount bumps we should reinitialize the reused file first.
*/
file_ref_init(&f->f_ref, 1);
+ /*
+ * Disable permission and pre-content events for all files by default.
+ * They may be enabled later by file_set_fsnotify_mode_from_watchers().
+ */
+ file_set_fsnotify_mode(f, FMODE_NONOTIFY_PERM);
return 0;
}
@@ -351,9 +356,7 @@ static struct file *alloc_file(const struct path *path, int flags,
static inline int alloc_path_pseudo(const char *name, struct inode *inode,
struct vfsmount *mnt, struct path *path)
{
- struct qstr this = QSTR_INIT(name, strlen(name));
-
- path->dentry = d_alloc_pseudo(mnt->mnt_sb, &this);
+ path->dentry = d_alloc_pseudo(mnt->mnt_sb, &QSTR(name));
if (!path->dentry)
return -ENOMEM;
path->mnt = mntget(mnt);
@@ -377,7 +380,13 @@ struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt,
if (IS_ERR(file)) {
ihold(inode);
path_put(&path);
+ return file;
}
+ /*
+ * Disable all fsnotify events for pseudo files by default.
+ * They may be enabled by caller with file_set_fsnotify_mode().
+ */
+ file_set_fsnotify_mode(file, FMODE_NONOTIFY);
return file;
}
EXPORT_SYMBOL(alloc_file_pseudo);
@@ -402,6 +411,11 @@ struct file *alloc_file_pseudo_noaccount(struct inode *inode,
return file;
}
file_init_path(file, &path, fops);
+ /*
+ * Disable all fsnotify events for pseudo files by default.
+ * They may be enabled by caller with file_set_fsnotify_mode().
+ */
+ file_set_fsnotify_mode(file, FMODE_NONOTIFY);
return file;
}
EXPORT_SYMBOL_GPL(alloc_file_pseudo_noaccount);
diff --git a/fs/fuse/Kconfig b/fs/fuse/Kconfig
index 8674dbfbe59d..ca215a3cba3e 100644
--- a/fs/fuse/Kconfig
+++ b/fs/fuse/Kconfig
@@ -63,3 +63,15 @@ config FUSE_PASSTHROUGH
to be performed directly on a backing file.
If you want to allow passthrough operations, answer Y.
+
+config FUSE_IO_URING
+ bool "FUSE communication over io-uring"
+ default y
+ depends on FUSE_FS
+ depends on IO_URING
+ help
+ This allows sending FUSE requests over the io-uring interface and
+ also adds request core affinity.
+
+ If you want to allow fuse server/client communication through io-uring,
+ answer Y
diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile
index 2c372180d631..3f0f312a31c1 100644
--- a/fs/fuse/Makefile
+++ b/fs/fuse/Makefile
@@ -15,5 +15,6 @@ fuse-y += iomode.o
fuse-$(CONFIG_FUSE_DAX) += dax.o
fuse-$(CONFIG_FUSE_PASSTHROUGH) += passthrough.o
fuse-$(CONFIG_SYSCTL) += sysctl.o
+fuse-$(CONFIG_FUSE_IO_URING) += dev_uring.o
virtiofs-y := virtio_fs.o
diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c
index 9abbc2f2894f..0b6ee6dd1fd6 100644
--- a/fs/fuse/dax.c
+++ b/fs/fuse/dax.c
@@ -240,11 +240,12 @@ static int fuse_send_removemapping(struct inode *inode,
args.opcode = FUSE_REMOVEMAPPING;
args.nodeid = fi->nodeid;
- args.in_numargs = 2;
- args.in_args[0].size = sizeof(*inargp);
- args.in_args[0].value = inargp;
- args.in_args[1].size = inargp->count * sizeof(*remove_one);
- args.in_args[1].value = remove_one;
+ args.in_numargs = 3;
+ fuse_set_zero_arg0(&args);
+ args.in_args[1].size = sizeof(*inargp);
+ args.in_args[1].value = inargp;
+ args.in_args[2].size = inargp->count * sizeof(*remove_one);
+ args.in_args[2].value = remove_one;
return fuse_simple_request(fm, &args);
}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 27ccae63495d..5b5f789b37eb 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -6,7 +6,9 @@
See the file COPYING.
*/
+#include "dev_uring_i.h"
#include "fuse_i.h"
+#include "fuse_dev_i.h"
#include <linux/init.h>
#include <linux/module.h>
@@ -28,23 +30,8 @@
MODULE_ALIAS_MISCDEV(FUSE_MINOR);
MODULE_ALIAS("devname:fuse");
-/* Ordinary requests have even IDs, while interrupts IDs are odd */
-#define FUSE_INT_REQ_BIT (1ULL << 0)
-#define FUSE_REQ_ID_STEP (1ULL << 1)
-
static struct kmem_cache *fuse_req_cachep;
-static void end_requests(struct list_head *head);
-
-static struct fuse_dev *fuse_get_dev(struct file *file)
-{
- /*
- * Lockless access is OK, because file->private data is set
- * once during mount and is valid until the file is released.
- */
- return READ_ONCE(file->private_data);
-}
-
static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req)
{
INIT_LIST_HEAD(&req->list);
@@ -89,7 +76,8 @@ void fuse_set_initialized(struct fuse_conn *fc)
static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
{
- return !fc->initialized || (for_background && fc->blocked);
+ return !fc->initialized || (for_background && fc->blocked) ||
+ (fc->io_uring && !fuse_uring_ready(fc));
}
static void fuse_drop_waiting(struct fuse_conn *fc)
@@ -234,7 +222,7 @@ u64 fuse_get_unique(struct fuse_iqueue *fiq)
}
EXPORT_SYMBOL_GPL(fuse_get_unique);
-static unsigned int fuse_req_hash(u64 unique)
+unsigned int fuse_req_hash(u64 unique)
{
return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS);
}
@@ -250,7 +238,8 @@ __releases(fiq->lock)
spin_unlock(&fiq->lock);
}
-static void fuse_dev_queue_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *forget)
+void fuse_dev_queue_forget(struct fuse_iqueue *fiq,
+ struct fuse_forget_link *forget)
{
spin_lock(&fiq->lock);
if (fiq->connected) {
@@ -263,7 +252,7 @@ static void fuse_dev_queue_forget(struct fuse_iqueue *fiq, struct fuse_forget_li
}
}
-static void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
+void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
{
spin_lock(&fiq->lock);
if (list_empty(&req->intr_entry)) {
@@ -580,7 +569,25 @@ ssize_t __fuse_simple_request(struct mnt_idmap *idmap,
return ret;
}
-static bool fuse_request_queue_background(struct fuse_req *req)
+#ifdef CONFIG_FUSE_IO_URING
+static bool fuse_request_queue_background_uring(struct fuse_conn *fc,
+ struct fuse_req *req)
+{
+ struct fuse_iqueue *fiq = &fc->iq;
+
+ req->in.h.unique = fuse_get_unique(fiq);
+ req->in.h.len = sizeof(struct fuse_in_header) +
+ fuse_len_args(req->args->in_numargs,
+ (struct fuse_arg *) req->args->in_args);
+
+ return fuse_uring_queue_bq_req(req);
+}
+#endif
+
+/*
+ * @return true if queued
+ */
+static int fuse_request_queue_background(struct fuse_req *req)
{
struct fuse_mount *fm = req->fm;
struct fuse_conn *fc = fm->fc;
@@ -592,6 +599,12 @@ static bool fuse_request_queue_background(struct fuse_req *req)
atomic_inc(&fc->num_waiting);
}
__set_bit(FR_ISREPLY, &req->flags);
+
+#ifdef CONFIG_FUSE_IO_URING
+ if (fuse_uring_ready(fc))
+ return fuse_request_queue_background_uring(fc, req);
+#endif
+
spin_lock(&fc->bg_lock);
if (likely(fc->connected)) {
fc->num_background++;
@@ -692,22 +705,8 @@ static int unlock_request(struct fuse_req *req)
return err;
}
-struct fuse_copy_state {
- int write;
- struct fuse_req *req;
- struct iov_iter *iter;
- struct pipe_buffer *pipebufs;
- struct pipe_buffer *currbuf;
- struct pipe_inode_info *pipe;
- unsigned long nr_segs;
- struct page *pg;
- unsigned len;
- unsigned offset;
- unsigned move_pages:1;
-};
-
-static void fuse_copy_init(struct fuse_copy_state *cs, int write,
- struct iov_iter *iter)
+void fuse_copy_init(struct fuse_copy_state *cs, int write,
+ struct iov_iter *iter)
{
memset(cs, 0, sizeof(*cs));
cs->write = write;
@@ -814,6 +813,9 @@ static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
*size -= ncpy;
cs->len -= ncpy;
cs->offset += ncpy;
+ if (cs->is_uring)
+ cs->ring.copied_sz += ncpy;
+
return ncpy;
}
@@ -1068,9 +1070,9 @@ static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
}
/* Copy request arguments to/from userspace buffer */
-static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
- unsigned argpages, struct fuse_arg *args,
- int zeroing)
+int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
+ unsigned argpages, struct fuse_arg *args,
+ int zeroing)
{
int err = 0;
unsigned i;
@@ -1760,7 +1762,7 @@ static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode,
args = &ap->args;
args->nodeid = outarg->nodeid;
args->opcode = FUSE_NOTIFY_REPLY;
- args->in_numargs = 2;
+ args->in_numargs = 3;
args->in_pages = true;
args->end = fuse_retrieve_end;
@@ -1788,9 +1790,10 @@ static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode,
}
ra->inarg.offset = outarg->offset;
ra->inarg.size = total_len;
- args->in_args[0].size = sizeof(ra->inarg);
- args->in_args[0].value = &ra->inarg;
- args->in_args[1].size = total_len;
+ fuse_set_zero_arg0(args);
+ args->in_args[1].size = sizeof(ra->inarg);
+ args->in_args[1].value = &ra->inarg;
+ args->in_args[2].size = total_len;
err = fuse_simple_notify_reply(fm, args, outarg->notify_unique);
if (err)
@@ -1885,7 +1888,7 @@ static void fuse_resend(struct fuse_conn *fc)
spin_unlock(&fiq->lock);
list_for_each_entry(req, &to_queue, list)
clear_bit(FR_PENDING, &req->flags);
- end_requests(&to_queue);
+ fuse_dev_end_requests(&to_queue);
return;
}
/* iq and pq requests are both oldest to newest */
@@ -1934,7 +1937,7 @@ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
}
/* Look up request on processing list by unique ID */
-static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
+struct fuse_req *fuse_request_find(struct fuse_pqueue *fpq, u64 unique)
{
unsigned int hash = fuse_req_hash(unique);
struct fuse_req *req;
@@ -1946,10 +1949,17 @@ static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
return NULL;
}
-static int copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
- unsigned nbytes)
+int fuse_copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
+ unsigned nbytes)
{
- unsigned reqsize = sizeof(struct fuse_out_header);
+
+ unsigned int reqsize = 0;
+
+ /*
+ * Uring has all headers separated from args - args is payload only
+ */
+ if (!cs->is_uring)
+ reqsize = sizeof(struct fuse_out_header);
reqsize += fuse_len_args(args->out_numargs, args->out_args);
@@ -2011,7 +2021,7 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
spin_lock(&fpq->lock);
req = NULL;
if (fpq->connected)
- req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
+ req = fuse_request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
err = -ENOENT;
if (!req) {
@@ -2049,7 +2059,7 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
if (oh.error)
err = nbytes != sizeof(oh) ? -EINVAL : 0;
else
- err = copy_out_args(cs, req->args, nbytes);
+ err = fuse_copy_out_args(cs, req->args, nbytes);
fuse_copy_finish(cs);
spin_lock(&fpq->lock);
@@ -2204,7 +2214,7 @@ static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
}
/* Abort all requests on the given list (pending or processing) */
-static void end_requests(struct list_head *head)
+void fuse_dev_end_requests(struct list_head *head)
{
while (!list_empty(head)) {
struct fuse_req *req;
@@ -2307,7 +2317,13 @@ void fuse_abort_conn(struct fuse_conn *fc)
wake_up_all(&fc->blocked_waitq);
spin_unlock(&fc->lock);
- end_requests(&to_end);
+ fuse_dev_end_requests(&to_end);
+
+ /*
+ * fc->lock must not be taken to avoid conflicts with io-uring
+ * locks
+ */
+ fuse_uring_abort(fc);
} else {
spin_unlock(&fc->lock);
}
@@ -2319,6 +2335,8 @@ void fuse_wait_aborted(struct fuse_conn *fc)
/* matches implicit memory barrier in fuse_drop_waiting() */
smp_mb();
wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
+
+ fuse_uring_wait_stopped_queues(fc);
}
int fuse_dev_release(struct inode *inode, struct file *file)
@@ -2337,7 +2355,7 @@ int fuse_dev_release(struct inode *inode, struct file *file)
list_splice_init(&fpq->processing[i], &to_end);
spin_unlock(&fpq->lock);
- end_requests(&to_end);
+ fuse_dev_end_requests(&to_end);
/* Are we the last open device? */
if (atomic_dec_and_test(&fc->dev_count)) {
@@ -2475,6 +2493,9 @@ const struct file_operations fuse_dev_operations = {
.fasync = fuse_dev_fasync,
.unlocked_ioctl = fuse_dev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
+#ifdef CONFIG_FUSE_IO_URING
+ .uring_cmd = fuse_uring_cmd,
+#endif
};
EXPORT_SYMBOL_GPL(fuse_dev_operations);
diff --git a/fs/fuse/dev_uring.c b/fs/fuse/dev_uring.c
new file mode 100644
index 000000000000..ebd2931b4f2a
--- /dev/null
+++ b/fs/fuse/dev_uring.c
@@ -0,0 +1,1319 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FUSE: Filesystem in Userspace
+ * Copyright (c) 2023-2024 DataDirect Networks.
+ */
+
+#include "fuse_i.h"
+#include "dev_uring_i.h"
+#include "fuse_dev_i.h"
+
+#include <linux/fs.h>
+#include <linux/io_uring/cmd.h>
+
+static bool __read_mostly enable_uring;
+module_param(enable_uring, bool, 0644);
+MODULE_PARM_DESC(enable_uring,
+ "Enable userspace communication through io-uring");
+
+#define FUSE_URING_IOV_SEGS 2 /* header and payload */
+
+
+bool fuse_uring_enabled(void)
+{
+ return enable_uring;
+}
+
+struct fuse_uring_pdu {
+ struct fuse_ring_ent *ent;
+};
+
+static const struct fuse_iqueue_ops fuse_io_uring_ops;
+
+static void uring_cmd_set_ring_ent(struct io_uring_cmd *cmd,
+ struct fuse_ring_ent *ring_ent)
+{
+ struct fuse_uring_pdu *pdu =
+ io_uring_cmd_to_pdu(cmd, struct fuse_uring_pdu);
+
+ pdu->ent = ring_ent;
+}
+
+static struct fuse_ring_ent *uring_cmd_to_ring_ent(struct io_uring_cmd *cmd)
+{
+ struct fuse_uring_pdu *pdu =
+ io_uring_cmd_to_pdu(cmd, struct fuse_uring_pdu);
+
+ return pdu->ent;
+}
+
+static void fuse_uring_flush_bg(struct fuse_ring_queue *queue)
+{
+ struct fuse_ring *ring = queue->ring;
+ struct fuse_conn *fc = ring->fc;
+
+ lockdep_assert_held(&queue->lock);
+ lockdep_assert_held(&fc->bg_lock);
+
+ /*
+ * Allow one bg request per queue, ignoring global fc limits.
+ * This prevents a single queue from consuming all resources and
+ * eliminates the need for remote queue wake-ups when global
+ * limits are met but this queue has no more waiting requests.
+ */
+ while ((fc->active_background < fc->max_background ||
+ !queue->active_background) &&
+ (!list_empty(&queue->fuse_req_bg_queue))) {
+ struct fuse_req *req;
+
+ req = list_first_entry(&queue->fuse_req_bg_queue,
+ struct fuse_req, list);
+ fc->active_background++;
+ queue->active_background++;
+
+ list_move_tail(&req->list, &queue->fuse_req_queue);
+ }
+}
+
+static void fuse_uring_req_end(struct fuse_ring_ent *ent, struct fuse_req *req,
+ int error)
+{
+ struct fuse_ring_queue *queue = ent->queue;
+ struct fuse_ring *ring = queue->ring;
+ struct fuse_conn *fc = ring->fc;
+
+ lockdep_assert_not_held(&queue->lock);
+ spin_lock(&queue->lock);
+ ent->fuse_req = NULL;
+ if (test_bit(FR_BACKGROUND, &req->flags)) {
+ queue->active_background--;
+ spin_lock(&fc->bg_lock);
+ fuse_uring_flush_bg(queue);
+ spin_unlock(&fc->bg_lock);
+ }
+
+ spin_unlock(&queue->lock);
+
+ if (error)
+ req->out.h.error = error;
+
+ clear_bit(FR_SENT, &req->flags);
+ fuse_request_end(req);
+}
+
+/* Abort all list queued request on the given ring queue */
+static void fuse_uring_abort_end_queue_requests(struct fuse_ring_queue *queue)
+{
+ struct fuse_req *req;
+ LIST_HEAD(req_list);
+
+ spin_lock(&queue->lock);
+ list_for_each_entry(req, &queue->fuse_req_queue, list)
+ clear_bit(FR_PENDING, &req->flags);
+ list_splice_init(&queue->fuse_req_queue, &req_list);
+ spin_unlock(&queue->lock);
+
+ /* must not hold queue lock to avoid order issues with fi->lock */
+ fuse_dev_end_requests(&req_list);
+}
+
+void fuse_uring_abort_end_requests(struct fuse_ring *ring)
+{
+ int qid;
+ struct fuse_ring_queue *queue;
+ struct fuse_conn *fc = ring->fc;
+
+ for (qid = 0; qid < ring->nr_queues; qid++) {
+ queue = READ_ONCE(ring->queues[qid]);
+ if (!queue)
+ continue;
+
+ queue->stopped = true;
+
+ WARN_ON_ONCE(ring->fc->max_background != UINT_MAX);
+ spin_lock(&queue->lock);
+ spin_lock(&fc->bg_lock);
+ fuse_uring_flush_bg(queue);
+ spin_unlock(&fc->bg_lock);
+ spin_unlock(&queue->lock);
+ fuse_uring_abort_end_queue_requests(queue);
+ }
+}
+
+void fuse_uring_destruct(struct fuse_conn *fc)
+{
+ struct fuse_ring *ring = fc->ring;
+ int qid;
+
+ if (!ring)
+ return;
+
+ for (qid = 0; qid < ring->nr_queues; qid++) {
+ struct fuse_ring_queue *queue = ring->queues[qid];
+ struct fuse_ring_ent *ent, *next;
+
+ if (!queue)
+ continue;
+
+ WARN_ON(!list_empty(&queue->ent_avail_queue));
+ WARN_ON(!list_empty(&queue->ent_w_req_queue));
+ WARN_ON(!list_empty(&queue->ent_commit_queue));
+ WARN_ON(!list_empty(&queue->ent_in_userspace));
+
+ list_for_each_entry_safe(ent, next, &queue->ent_released,
+ list) {
+ list_del_init(&ent->list);
+ kfree(ent);
+ }
+
+ kfree(queue->fpq.processing);
+ kfree(queue);
+ ring->queues[qid] = NULL;
+ }
+
+ kfree(ring->queues);
+ kfree(ring);
+ fc->ring = NULL;
+}
+
+/*
+ * Basic ring setup for this connection based on the provided configuration
+ */
+static struct fuse_ring *fuse_uring_create(struct fuse_conn *fc)
+{
+ struct fuse_ring *ring;
+ size_t nr_queues = num_possible_cpus();
+ struct fuse_ring *res = NULL;
+ size_t max_payload_size;
+
+ ring = kzalloc(sizeof(*fc->ring), GFP_KERNEL_ACCOUNT);
+ if (!ring)
+ return NULL;
+
+ ring->queues = kcalloc(nr_queues, sizeof(struct fuse_ring_queue *),
+ GFP_KERNEL_ACCOUNT);
+ if (!ring->queues)
+ goto out_err;
+
+ max_payload_size = max(FUSE_MIN_READ_BUFFER, fc->max_write);
+ max_payload_size = max(max_payload_size, fc->max_pages * PAGE_SIZE);
+
+ spin_lock(&fc->lock);
+ if (fc->ring) {
+ /* race, another thread created the ring in the meantime */
+ spin_unlock(&fc->lock);
+ res = fc->ring;
+ goto out_err;
+ }
+
+ init_waitqueue_head(&ring->stop_waitq);
+
+ fc->ring = ring;
+ ring->nr_queues = nr_queues;
+ ring->fc = fc;
+ ring->max_payload_sz = max_payload_size;
+ atomic_set(&ring->queue_refs, 0);
+
+ spin_unlock(&fc->lock);
+ return ring;
+
+out_err:
+ kfree(ring->queues);
+ kfree(ring);
+ return res;
+}
+
+static struct fuse_ring_queue *fuse_uring_create_queue(struct fuse_ring *ring,
+ int qid)
+{
+ struct fuse_conn *fc = ring->fc;
+ struct fuse_ring_queue *queue;
+ struct list_head *pq;
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL_ACCOUNT);
+ if (!queue)
+ return NULL;
+ pq = kcalloc(FUSE_PQ_HASH_SIZE, sizeof(struct list_head), GFP_KERNEL);
+ if (!pq) {
+ kfree(queue);
+ return NULL;
+ }
+
+ queue->qid = qid;
+ queue->ring = ring;
+ spin_lock_init(&queue->lock);
+
+ INIT_LIST_HEAD(&queue->ent_avail_queue);
+ INIT_LIST_HEAD(&queue->ent_commit_queue);
+ INIT_LIST_HEAD(&queue->ent_w_req_queue);
+ INIT_LIST_HEAD(&queue->ent_in_userspace);
+ INIT_LIST_HEAD(&queue->fuse_req_queue);
+ INIT_LIST_HEAD(&queue->fuse_req_bg_queue);
+ INIT_LIST_HEAD(&queue->ent_released);
+
+ queue->fpq.processing = pq;
+ fuse_pqueue_init(&queue->fpq);
+
+ spin_lock(&fc->lock);
+ if (ring->queues[qid]) {
+ spin_unlock(&fc->lock);
+ kfree(queue->fpq.processing);
+ kfree(queue);
+ return ring->queues[qid];
+ }
+
+ /*
+ * write_once and lock as the caller mostly doesn't take the lock at all
+ */
+ WRITE_ONCE(ring->queues[qid], queue);
+ spin_unlock(&fc->lock);
+
+ return queue;
+}
+
+static void fuse_uring_stop_fuse_req_end(struct fuse_req *req)
+{
+ clear_bit(FR_SENT, &req->flags);
+ req->out.h.error = -ECONNABORTED;
+ fuse_request_end(req);
+}
+
+/*
+ * Release a request/entry on connection tear down
+ */
+static void fuse_uring_entry_teardown(struct fuse_ring_ent *ent)
+{
+ struct fuse_req *req;
+ struct io_uring_cmd *cmd;
+
+ struct fuse_ring_queue *queue = ent->queue;
+
+ spin_lock(&queue->lock);
+ cmd = ent->cmd;
+ ent->cmd = NULL;
+ req = ent->fuse_req;
+ ent->fuse_req = NULL;
+ if (req) {
+ /* remove entry from queue->fpq->processing */
+ list_del_init(&req->list);
+ }
+
+ /*
+ * The entry must not be freed immediately, due to access of direct
+ * pointer access of entries through IO_URING_F_CANCEL - there is a risk
+ * of race between daemon termination (which triggers IO_URING_F_CANCEL
+ * and accesses entries without checking the list state first
+ */
+ list_move(&ent->list, &queue->ent_released);
+ ent->state = FRRS_RELEASED;
+ spin_unlock(&queue->lock);
+
+ if (cmd)
+ io_uring_cmd_done(cmd, -ENOTCONN, 0, IO_URING_F_UNLOCKED);
+
+ if (req)
+ fuse_uring_stop_fuse_req_end(req);
+}
+
+static void fuse_uring_stop_list_entries(struct list_head *head,
+ struct fuse_ring_queue *queue,
+ enum fuse_ring_req_state exp_state)
+{
+ struct fuse_ring *ring = queue->ring;
+ struct fuse_ring_ent *ent, *next;
+ ssize_t queue_refs = SSIZE_MAX;
+ LIST_HEAD(to_teardown);
+
+ spin_lock(&queue->lock);
+ list_for_each_entry_safe(ent, next, head, list) {
+ if (ent->state != exp_state) {
+ pr_warn("entry teardown qid=%d state=%d expected=%d",
+ queue->qid, ent->state, exp_state);
+ continue;
+ }
+
+ ent->state = FRRS_TEARDOWN;
+ list_move(&ent->list, &to_teardown);
+ }
+ spin_unlock(&queue->lock);
+
+ /* no queue lock to avoid lock order issues */
+ list_for_each_entry_safe(ent, next, &to_teardown, list) {
+ fuse_uring_entry_teardown(ent);
+ queue_refs = atomic_dec_return(&ring->queue_refs);
+ WARN_ON_ONCE(queue_refs < 0);
+ }
+}
+
+static void fuse_uring_teardown_entries(struct fuse_ring_queue *queue)
+{
+ fuse_uring_stop_list_entries(&queue->ent_in_userspace, queue,
+ FRRS_USERSPACE);
+ fuse_uring_stop_list_entries(&queue->ent_avail_queue, queue,
+ FRRS_AVAILABLE);
+}
+
+/*
+ * Log state debug info
+ */
+static void fuse_uring_log_ent_state(struct fuse_ring *ring)
+{
+ int qid;
+ struct fuse_ring_ent *ent;
+
+ for (qid = 0; qid < ring->nr_queues; qid++) {
+ struct fuse_ring_queue *queue = ring->queues[qid];
+
+ if (!queue)
+ continue;
+
+ spin_lock(&queue->lock);
+ /*
+ * Log entries from the intermediate queue, the other queues
+ * should be empty
+ */
+ list_for_each_entry(ent, &queue->ent_w_req_queue, list) {
+ pr_info(" ent-req-queue ring=%p qid=%d ent=%p state=%d\n",
+ ring, qid, ent, ent->state);
+ }
+ list_for_each_entry(ent, &queue->ent_commit_queue, list) {
+ pr_info(" ent-commit-queue ring=%p qid=%d ent=%p state=%d\n",
+ ring, qid, ent, ent->state);
+ }
+ spin_unlock(&queue->lock);
+ }
+ ring->stop_debug_log = 1;
+}
+
+static void fuse_uring_async_stop_queues(struct work_struct *work)
+{
+ int qid;
+ struct fuse_ring *ring =
+ container_of(work, struct fuse_ring, async_teardown_work.work);
+
+ /* XXX code dup */
+ for (qid = 0; qid < ring->nr_queues; qid++) {
+ struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]);
+
+ if (!queue)
+ continue;
+
+ fuse_uring_teardown_entries(queue);
+ }
+
+ /*
+ * Some ring entries might be in the middle of IO operations,
+ * i.e. in process to get handled by file_operations::uring_cmd
+ * or on the way to userspace - we could handle that with conditions in
+ * run time code, but easier/cleaner to have an async tear down handler
+ * If there are still queue references left
+ */
+ if (atomic_read(&ring->queue_refs) > 0) {
+ if (time_after(jiffies,
+ ring->teardown_time + FUSE_URING_TEARDOWN_TIMEOUT))
+ fuse_uring_log_ent_state(ring);
+
+ schedule_delayed_work(&ring->async_teardown_work,
+ FUSE_URING_TEARDOWN_INTERVAL);
+ } else {
+ wake_up_all(&ring->stop_waitq);
+ }
+}
+
+/*
+ * Stop the ring queues
+ */
+void fuse_uring_stop_queues(struct fuse_ring *ring)
+{
+ int qid;
+
+ for (qid = 0; qid < ring->nr_queues; qid++) {
+ struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]);
+
+ if (!queue)
+ continue;
+
+ fuse_uring_teardown_entries(queue);
+ }
+
+ if (atomic_read(&ring->queue_refs) > 0) {
+ ring->teardown_time = jiffies;
+ INIT_DELAYED_WORK(&ring->async_teardown_work,
+ fuse_uring_async_stop_queues);
+ schedule_delayed_work(&ring->async_teardown_work,
+ FUSE_URING_TEARDOWN_INTERVAL);
+ } else {
+ wake_up_all(&ring->stop_waitq);
+ }
+}
+
+/*
+ * Handle IO_URING_F_CANCEL, typically should come on daemon termination.
+ *
+ * Releasing the last entry should trigger fuse_dev_release() if
+ * the daemon was terminated
+ */
+static void fuse_uring_cancel(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ struct fuse_ring_ent *ent = uring_cmd_to_ring_ent(cmd);
+ struct fuse_ring_queue *queue;
+ bool need_cmd_done = false;
+
+ /*
+ * direct access on ent - it must not be destructed as long as
+ * IO_URING_F_CANCEL might come up
+ */
+ queue = ent->queue;
+ spin_lock(&queue->lock);
+ if (ent->state == FRRS_AVAILABLE) {
+ ent->state = FRRS_USERSPACE;
+ list_move(&ent->list, &queue->ent_in_userspace);
+ need_cmd_done = true;
+ ent->cmd = NULL;
+ }
+ spin_unlock(&queue->lock);
+
+ if (need_cmd_done) {
+ /* no queue lock to avoid lock order issues */
+ io_uring_cmd_done(cmd, -ENOTCONN, 0, issue_flags);
+ }
+}
+
+static void fuse_uring_prepare_cancel(struct io_uring_cmd *cmd, int issue_flags,
+ struct fuse_ring_ent *ring_ent)
+{
+ uring_cmd_set_ring_ent(cmd, ring_ent);
+ io_uring_cmd_mark_cancelable(cmd, issue_flags);
+}
+
+/*
+ * Checks for errors and stores it into the request
+ */
+static int fuse_uring_out_header_has_err(struct fuse_out_header *oh,
+ struct fuse_req *req,
+ struct fuse_conn *fc)
+{
+ int err;
+
+ err = -EINVAL;
+ if (oh->unique == 0) {
+ /* Not supported through io-uring yet */
+ pr_warn_once("notify through fuse-io-uring not supported\n");
+ goto err;
+ }
+
+ if (oh->error <= -ERESTARTSYS || oh->error > 0)
+ goto err;
+
+ if (oh->error) {
+ err = oh->error;
+ goto err;
+ }
+
+ err = -ENOENT;
+ if ((oh->unique & ~FUSE_INT_REQ_BIT) != req->in.h.unique) {
+ pr_warn_ratelimited("unique mismatch, expected: %llu got %llu\n",
+ req->in.h.unique,
+ oh->unique & ~FUSE_INT_REQ_BIT);
+ goto err;
+ }
+
+ /*
+ * Is it an interrupt reply ID?
+ * XXX: Not supported through fuse-io-uring yet, it should not even
+ * find the request - should not happen.
+ */
+ WARN_ON_ONCE(oh->unique & FUSE_INT_REQ_BIT);
+
+ err = 0;
+err:
+ return err;
+}
+
+static int fuse_uring_copy_from_ring(struct fuse_ring *ring,
+ struct fuse_req *req,
+ struct fuse_ring_ent *ent)
+{
+ struct fuse_copy_state cs;
+ struct fuse_args *args = req->args;
+ struct iov_iter iter;
+ int err;
+ struct fuse_uring_ent_in_out ring_in_out;
+
+ err = copy_from_user(&ring_in_out, &ent->headers->ring_ent_in_out,
+ sizeof(ring_in_out));
+ if (err)
+ return -EFAULT;
+
+ err = import_ubuf(ITER_SOURCE, ent->payload, ring->max_payload_sz,
+ &iter);
+ if (err)
+ return err;
+
+ fuse_copy_init(&cs, 0, &iter);
+ cs.is_uring = 1;
+ cs.req = req;
+
+ return fuse_copy_out_args(&cs, args, ring_in_out.payload_sz);
+}
+
+ /*
+ * Copy data from the req to the ring buffer
+ */
+static int fuse_uring_args_to_ring(struct fuse_ring *ring, struct fuse_req *req,
+ struct fuse_ring_ent *ent)
+{
+ struct fuse_copy_state cs;
+ struct fuse_args *args = req->args;
+ struct fuse_in_arg *in_args = args->in_args;
+ int num_args = args->in_numargs;
+ int err;
+ struct iov_iter iter;
+ struct fuse_uring_ent_in_out ent_in_out = {
+ .flags = 0,
+ .commit_id = req->in.h.unique,
+ };
+
+ err = import_ubuf(ITER_DEST, ent->payload, ring->max_payload_sz, &iter);
+ if (err) {
+ pr_info_ratelimited("fuse: Import of user buffer failed\n");
+ return err;
+ }
+
+ fuse_copy_init(&cs, 1, &iter);
+ cs.is_uring = 1;
+ cs.req = req;
+
+ if (num_args > 0) {
+ /*
+ * Expectation is that the first argument is the per op header.
+ * Some op code have that as zero size.
+ */
+ if (args->in_args[0].size > 0) {
+ err = copy_to_user(&ent->headers->op_in, in_args->value,
+ in_args->size);
+ if (err) {
+ pr_info_ratelimited(
+ "Copying the header failed.\n");
+ return -EFAULT;
+ }
+ }
+ in_args++;
+ num_args--;
+ }
+
+ /* copy the payload */
+ err = fuse_copy_args(&cs, num_args, args->in_pages,
+ (struct fuse_arg *)in_args, 0);
+ if (err) {
+ pr_info_ratelimited("%s fuse_copy_args failed\n", __func__);
+ return err;
+ }
+
+ ent_in_out.payload_sz = cs.ring.copied_sz;
+ err = copy_to_user(&ent->headers->ring_ent_in_out, &ent_in_out,
+ sizeof(ent_in_out));
+ return err ? -EFAULT : 0;
+}
+
+static int fuse_uring_copy_to_ring(struct fuse_ring_ent *ent,
+ struct fuse_req *req)
+{
+ struct fuse_ring_queue *queue = ent->queue;
+ struct fuse_ring *ring = queue->ring;
+ int err;
+
+ err = -EIO;
+ if (WARN_ON(ent->state != FRRS_FUSE_REQ)) {
+ pr_err("qid=%d ring-req=%p invalid state %d on send\n",
+ queue->qid, ent, ent->state);
+ return err;
+ }
+
+ err = -EINVAL;
+ if (WARN_ON(req->in.h.unique == 0))
+ return err;
+
+ /* copy the request */
+ err = fuse_uring_args_to_ring(ring, req, ent);
+ if (unlikely(err)) {
+ pr_info_ratelimited("Copy to ring failed: %d\n", err);
+ return err;
+ }
+
+ /* copy fuse_in_header */
+ err = copy_to_user(&ent->headers->in_out, &req->in.h,
+ sizeof(req->in.h));
+ if (err) {
+ err = -EFAULT;
+ return err;
+ }
+
+ return 0;
+}
+
+static int fuse_uring_prepare_send(struct fuse_ring_ent *ent,
+ struct fuse_req *req)
+{
+ int err;
+
+ err = fuse_uring_copy_to_ring(ent, req);
+ if (!err)
+ set_bit(FR_SENT, &req->flags);
+ else
+ fuse_uring_req_end(ent, req, err);
+
+ return err;
+}
+
+/*
+ * Write data to the ring buffer and send the request to userspace,
+ * userspace will read it
+ * This is comparable with classical read(/dev/fuse)
+ */
+static int fuse_uring_send_next_to_ring(struct fuse_ring_ent *ent,
+ struct fuse_req *req,
+ unsigned int issue_flags)
+{
+ struct fuse_ring_queue *queue = ent->queue;
+ int err;
+ struct io_uring_cmd *cmd;
+
+ err = fuse_uring_prepare_send(ent, req);
+ if (err)
+ return err;
+
+ spin_lock(&queue->lock);
+ cmd = ent->cmd;
+ ent->cmd = NULL;
+ ent->state = FRRS_USERSPACE;
+ list_move(&ent->list, &queue->ent_in_userspace);
+ spin_unlock(&queue->lock);
+
+ io_uring_cmd_done(cmd, 0, 0, issue_flags);
+ return 0;
+}
+
+/*
+ * Make a ring entry available for fuse_req assignment
+ */
+static void fuse_uring_ent_avail(struct fuse_ring_ent *ent,
+ struct fuse_ring_queue *queue)
+{
+ WARN_ON_ONCE(!ent->cmd);
+ list_move(&ent->list, &queue->ent_avail_queue);
+ ent->state = FRRS_AVAILABLE;
+}
+
+/* Used to find the request on SQE commit */
+static void fuse_uring_add_to_pq(struct fuse_ring_ent *ent,
+ struct fuse_req *req)
+{
+ struct fuse_ring_queue *queue = ent->queue;
+ struct fuse_pqueue *fpq = &queue->fpq;
+ unsigned int hash;
+
+ req->ring_entry = ent;
+ hash = fuse_req_hash(req->in.h.unique);
+ list_move_tail(&req->list, &fpq->processing[hash]);
+}
+
+/*
+ * Assign a fuse queue entry to the given entry
+ */
+static void fuse_uring_add_req_to_ring_ent(struct fuse_ring_ent *ent,
+ struct fuse_req *req)
+{
+ struct fuse_ring_queue *queue = ent->queue;
+ struct fuse_conn *fc = req->fm->fc;
+ struct fuse_iqueue *fiq = &fc->iq;
+
+ lockdep_assert_held(&queue->lock);
+
+ if (WARN_ON_ONCE(ent->state != FRRS_AVAILABLE &&
+ ent->state != FRRS_COMMIT)) {
+ pr_warn("%s qid=%d state=%d\n", __func__, ent->queue->qid,
+ ent->state);
+ }
+
+ spin_lock(&fiq->lock);
+ clear_bit(FR_PENDING, &req->flags);
+ spin_unlock(&fiq->lock);
+ ent->fuse_req = req;
+ ent->state = FRRS_FUSE_REQ;
+ list_move(&ent->list, &queue->ent_w_req_queue);
+ fuse_uring_add_to_pq(ent, req);
+}
+
+/* Fetch the next fuse request if available */
+static struct fuse_req *fuse_uring_ent_assign_req(struct fuse_ring_ent *ent)
+ __must_hold(&queue->lock)
+{
+ struct fuse_req *req;
+ struct fuse_ring_queue *queue = ent->queue;
+ struct list_head *req_queue = &queue->fuse_req_queue;
+
+ lockdep_assert_held(&queue->lock);
+
+ /* get and assign the next entry while it is still holding the lock */
+ req = list_first_entry_or_null(req_queue, struct fuse_req, list);
+ if (req)
+ fuse_uring_add_req_to_ring_ent(ent, req);
+
+ return req;
+}
+
+/*
+ * Read data from the ring buffer, which user space has written to
+ * This is comparible with handling of classical write(/dev/fuse).
+ * Also make the ring request available again for new fuse requests.
+ */
+static void fuse_uring_commit(struct fuse_ring_ent *ent, struct fuse_req *req,
+ unsigned int issue_flags)
+{
+ struct fuse_ring *ring = ent->queue->ring;
+ struct fuse_conn *fc = ring->fc;
+ ssize_t err = 0;
+
+ err = copy_from_user(&req->out.h, &ent->headers->in_out,
+ sizeof(req->out.h));
+ if (err) {
+ req->out.h.error = -EFAULT;
+ goto out;
+ }
+
+ err = fuse_uring_out_header_has_err(&req->out.h, req, fc);
+ if (err) {
+ /* req->out.h.error already set */
+ goto out;
+ }
+
+ err = fuse_uring_copy_from_ring(ring, req, ent);
+out:
+ fuse_uring_req_end(ent, req, err);
+}
+
+/*
+ * Get the next fuse req and send it
+ */
+static void fuse_uring_next_fuse_req(struct fuse_ring_ent *ent,
+ struct fuse_ring_queue *queue,
+ unsigned int issue_flags)
+{
+ int err;
+ struct fuse_req *req;
+
+retry:
+ spin_lock(&queue->lock);
+ fuse_uring_ent_avail(ent, queue);
+ req = fuse_uring_ent_assign_req(ent);
+ spin_unlock(&queue->lock);
+
+ if (req) {
+ err = fuse_uring_send_next_to_ring(ent, req, issue_flags);
+ if (err)
+ goto retry;
+ }
+}
+
+static int fuse_ring_ent_set_commit(struct fuse_ring_ent *ent)
+{
+ struct fuse_ring_queue *queue = ent->queue;
+
+ lockdep_assert_held(&queue->lock);
+
+ if (WARN_ON_ONCE(ent->state != FRRS_USERSPACE))
+ return -EIO;
+
+ ent->state = FRRS_COMMIT;
+ list_move(&ent->list, &queue->ent_commit_queue);
+
+ return 0;
+}
+
+/* FUSE_URING_CMD_COMMIT_AND_FETCH handler */
+static int fuse_uring_commit_fetch(struct io_uring_cmd *cmd, int issue_flags,
+ struct fuse_conn *fc)
+{
+ const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe);
+ struct fuse_ring_ent *ent;
+ int err;
+ struct fuse_ring *ring = fc->ring;
+ struct fuse_ring_queue *queue;
+ uint64_t commit_id = READ_ONCE(cmd_req->commit_id);
+ unsigned int qid = READ_ONCE(cmd_req->qid);
+ struct fuse_pqueue *fpq;
+ struct fuse_req *req;
+
+ err = -ENOTCONN;
+ if (!ring)
+ return err;
+
+ if (qid >= ring->nr_queues)
+ return -EINVAL;
+
+ queue = ring->queues[qid];
+ if (!queue)
+ return err;
+ fpq = &queue->fpq;
+
+ if (!READ_ONCE(fc->connected) || READ_ONCE(queue->stopped))
+ return err;
+
+ spin_lock(&queue->lock);
+ /* Find a request based on the unique ID of the fuse request
+ * This should get revised, as it needs a hash calculation and list
+ * search. And full struct fuse_pqueue is needed (memory overhead).
+ * As well as the link from req to ring_ent.
+ */
+ req = fuse_request_find(fpq, commit_id);
+ err = -ENOENT;
+ if (!req) {
+ pr_info("qid=%d commit_id %llu not found\n", queue->qid,
+ commit_id);
+ spin_unlock(&queue->lock);
+ return err;
+ }
+ list_del_init(&req->list);
+ ent = req->ring_entry;
+ req->ring_entry = NULL;
+
+ err = fuse_ring_ent_set_commit(ent);
+ if (err != 0) {
+ pr_info_ratelimited("qid=%d commit_id %llu state %d",
+ queue->qid, commit_id, ent->state);
+ spin_unlock(&queue->lock);
+ req->out.h.error = err;
+ clear_bit(FR_SENT, &req->flags);
+ fuse_request_end(req);
+ return err;
+ }
+
+ ent->cmd = cmd;
+ spin_unlock(&queue->lock);
+
+ /* without the queue lock, as other locks are taken */
+ fuse_uring_prepare_cancel(cmd, issue_flags, ent);
+ fuse_uring_commit(ent, req, issue_flags);
+
+ /*
+ * Fetching the next request is absolutely required as queued
+ * fuse requests would otherwise not get processed - committing
+ * and fetching is done in one step vs legacy fuse, which has separated
+ * read (fetch request) and write (commit result).
+ */
+ fuse_uring_next_fuse_req(ent, queue, issue_flags);
+ return 0;
+}
+
+static bool is_ring_ready(struct fuse_ring *ring, int current_qid)
+{
+ int qid;
+ struct fuse_ring_queue *queue;
+ bool ready = true;
+
+ for (qid = 0; qid < ring->nr_queues && ready; qid++) {
+ if (current_qid == qid)
+ continue;
+
+ queue = ring->queues[qid];
+ if (!queue) {
+ ready = false;
+ break;
+ }
+
+ spin_lock(&queue->lock);
+ if (list_empty(&queue->ent_avail_queue))
+ ready = false;
+ spin_unlock(&queue->lock);
+ }
+
+ return ready;
+}
+
+/*
+ * fuse_uring_req_fetch command handling
+ */
+static void fuse_uring_do_register(struct fuse_ring_ent *ent,
+ struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ struct fuse_ring_queue *queue = ent->queue;
+ struct fuse_ring *ring = queue->ring;
+ struct fuse_conn *fc = ring->fc;
+ struct fuse_iqueue *fiq = &fc->iq;
+
+ fuse_uring_prepare_cancel(cmd, issue_flags, ent);
+
+ spin_lock(&queue->lock);
+ ent->cmd = cmd;
+ fuse_uring_ent_avail(ent, queue);
+ spin_unlock(&queue->lock);
+
+ if (!ring->ready) {
+ bool ready = is_ring_ready(ring, queue->qid);
+
+ if (ready) {
+ WRITE_ONCE(fiq->ops, &fuse_io_uring_ops);
+ WRITE_ONCE(ring->ready, true);
+ wake_up_all(&fc->blocked_waitq);
+ }
+ }
+}
+
+/*
+ * sqe->addr is a ptr to an iovec array, iov[0] has the headers, iov[1]
+ * the payload
+ */
+static int fuse_uring_get_iovec_from_sqe(const struct io_uring_sqe *sqe,
+ struct iovec iov[FUSE_URING_IOV_SEGS])
+{
+ struct iovec __user *uiov = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ struct iov_iter iter;
+ ssize_t ret;
+
+ if (sqe->len != FUSE_URING_IOV_SEGS)
+ return -EINVAL;
+
+ /*
+ * Direction for buffer access will actually be READ and WRITE,
+ * using write for the import should include READ access as well.
+ */
+ ret = import_iovec(WRITE, uiov, FUSE_URING_IOV_SEGS,
+ FUSE_URING_IOV_SEGS, &iov, &iter);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static struct fuse_ring_ent *
+fuse_uring_create_ring_ent(struct io_uring_cmd *cmd,
+ struct fuse_ring_queue *queue)
+{
+ struct fuse_ring *ring = queue->ring;
+ struct fuse_ring_ent *ent;
+ size_t payload_size;
+ struct iovec iov[FUSE_URING_IOV_SEGS];
+ int err;
+
+ err = fuse_uring_get_iovec_from_sqe(cmd->sqe, iov);
+ if (err) {
+ pr_info_ratelimited("Failed to get iovec from sqe, err=%d\n",
+ err);
+ return ERR_PTR(err);
+ }
+
+ err = -EINVAL;
+ if (iov[0].iov_len < sizeof(struct fuse_uring_req_header)) {
+ pr_info_ratelimited("Invalid header len %zu\n", iov[0].iov_len);
+ return ERR_PTR(err);
+ }
+
+ payload_size = iov[1].iov_len;
+ if (payload_size < ring->max_payload_sz) {
+ pr_info_ratelimited("Invalid req payload len %zu\n",
+ payload_size);
+ return ERR_PTR(err);
+ }
+
+ err = -ENOMEM;
+ ent = kzalloc(sizeof(*ent), GFP_KERNEL_ACCOUNT);
+ if (!ent)
+ return ERR_PTR(err);
+
+ INIT_LIST_HEAD(&ent->list);
+
+ ent->queue = queue;
+ ent->headers = iov[0].iov_base;
+ ent->payload = iov[1].iov_base;
+
+ atomic_inc(&ring->queue_refs);
+ return ent;
+}
+
+/*
+ * Register header and payload buffer with the kernel and puts the
+ * entry as "ready to get fuse requests" on the queue
+ */
+static int fuse_uring_register(struct io_uring_cmd *cmd,
+ unsigned int issue_flags, struct fuse_conn *fc)
+{
+ const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe);
+ struct fuse_ring *ring = fc->ring;
+ struct fuse_ring_queue *queue;
+ struct fuse_ring_ent *ent;
+ int err;
+ unsigned int qid = READ_ONCE(cmd_req->qid);
+
+ err = -ENOMEM;
+ if (!ring) {
+ ring = fuse_uring_create(fc);
+ if (!ring)
+ return err;
+ }
+
+ if (qid >= ring->nr_queues) {
+ pr_info_ratelimited("fuse: Invalid ring qid %u\n", qid);
+ return -EINVAL;
+ }
+
+ queue = ring->queues[qid];
+ if (!queue) {
+ queue = fuse_uring_create_queue(ring, qid);
+ if (!queue)
+ return err;
+ }
+
+ /*
+ * The created queue above does not need to be destructed in
+ * case of entry errors below, will be done at ring destruction time.
+ */
+
+ ent = fuse_uring_create_ring_ent(cmd, queue);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ fuse_uring_do_register(ent, cmd, issue_flags);
+
+ return 0;
+}
+
+/*
+ * Entry function from io_uring to handle the given passthrough command
+ * (op code IORING_OP_URING_CMD)
+ */
+int fuse_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
+ struct fuse_dev *fud;
+ struct fuse_conn *fc;
+ u32 cmd_op = cmd->cmd_op;
+ int err;
+
+ if ((unlikely(issue_flags & IO_URING_F_CANCEL))) {
+ fuse_uring_cancel(cmd, issue_flags);
+ return 0;
+ }
+
+ /* This extra SQE size holds struct fuse_uring_cmd_req */
+ if (!(issue_flags & IO_URING_F_SQE128))
+ return -EINVAL;
+
+ fud = fuse_get_dev(cmd->file);
+ if (!fud) {
+ pr_info_ratelimited("No fuse device found\n");
+ return -ENOTCONN;
+ }
+ fc = fud->fc;
+
+ /* Once a connection has io-uring enabled on it, it can't be disabled */
+ if (!enable_uring && !fc->io_uring) {
+ pr_info_ratelimited("fuse-io-uring is disabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (fc->aborted)
+ return -ECONNABORTED;
+ if (!fc->connected)
+ return -ENOTCONN;
+
+ /*
+ * fuse_uring_register() needs the ring to be initialized,
+ * we need to know the max payload size
+ */
+ if (!fc->initialized)
+ return -EAGAIN;
+
+ switch (cmd_op) {
+ case FUSE_IO_URING_CMD_REGISTER:
+ err = fuse_uring_register(cmd, issue_flags, fc);
+ if (err) {
+ pr_info_once("FUSE_IO_URING_CMD_REGISTER failed err=%d\n",
+ err);
+ fc->io_uring = 0;
+ wake_up_all(&fc->blocked_waitq);
+ return err;
+ }
+ break;
+ case FUSE_IO_URING_CMD_COMMIT_AND_FETCH:
+ err = fuse_uring_commit_fetch(cmd, issue_flags, fc);
+ if (err) {
+ pr_info_once("FUSE_IO_URING_COMMIT_AND_FETCH failed err=%d\n",
+ err);
+ return err;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return -EIOCBQUEUED;
+}
+
+static void fuse_uring_send(struct fuse_ring_ent *ent, struct io_uring_cmd *cmd,
+ ssize_t ret, unsigned int issue_flags)
+{
+ struct fuse_ring_queue *queue = ent->queue;
+
+ spin_lock(&queue->lock);
+ ent->state = FRRS_USERSPACE;
+ list_move(&ent->list, &queue->ent_in_userspace);
+ ent->cmd = NULL;
+ spin_unlock(&queue->lock);
+
+ io_uring_cmd_done(cmd, ret, 0, issue_flags);
+}
+
+/*
+ * This prepares and sends the ring request in fuse-uring task context.
+ * User buffers are not mapped yet - the application does not have permission
+ * to write to it - this has to be executed in ring task context.
+ */
+static void fuse_uring_send_in_task(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ struct fuse_ring_ent *ent = uring_cmd_to_ring_ent(cmd);
+ struct fuse_ring_queue *queue = ent->queue;
+ int err;
+
+ if (!(issue_flags & IO_URING_F_TASK_DEAD)) {
+ err = fuse_uring_prepare_send(ent, ent->fuse_req);
+ if (err) {
+ fuse_uring_next_fuse_req(ent, queue, issue_flags);
+ return;
+ }
+ } else {
+ err = -ECANCELED;
+ }
+
+ fuse_uring_send(ent, cmd, err, issue_flags);
+}
+
+static struct fuse_ring_queue *fuse_uring_task_to_queue(struct fuse_ring *ring)
+{
+ unsigned int qid;
+ struct fuse_ring_queue *queue;
+
+ qid = task_cpu(current);
+
+ if (WARN_ONCE(qid >= ring->nr_queues,
+ "Core number (%u) exceeds nr queues (%zu)\n", qid,
+ ring->nr_queues))
+ qid = 0;
+
+ queue = ring->queues[qid];
+ WARN_ONCE(!queue, "Missing queue for qid %d\n", qid);
+
+ return queue;
+}
+
+static void fuse_uring_dispatch_ent(struct fuse_ring_ent *ent)
+{
+ struct io_uring_cmd *cmd = ent->cmd;
+
+ uring_cmd_set_ring_ent(cmd, ent);
+ io_uring_cmd_complete_in_task(cmd, fuse_uring_send_in_task);
+}
+
+/* queue a fuse request and send it if a ring entry is available */
+void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req)
+{
+ struct fuse_conn *fc = req->fm->fc;
+ struct fuse_ring *ring = fc->ring;
+ struct fuse_ring_queue *queue;
+ struct fuse_ring_ent *ent = NULL;
+ int err;
+
+ err = -EINVAL;
+ queue = fuse_uring_task_to_queue(ring);
+ if (!queue)
+ goto err;
+
+ if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
+ req->in.h.unique = fuse_get_unique(fiq);
+
+ spin_lock(&queue->lock);
+ err = -ENOTCONN;
+ if (unlikely(queue->stopped))
+ goto err_unlock;
+
+ ent = list_first_entry_or_null(&queue->ent_avail_queue,
+ struct fuse_ring_ent, list);
+ if (ent)
+ fuse_uring_add_req_to_ring_ent(ent, req);
+ else
+ list_add_tail(&req->list, &queue->fuse_req_queue);
+ spin_unlock(&queue->lock);
+
+ if (ent)
+ fuse_uring_dispatch_ent(ent);
+
+ return;
+
+err_unlock:
+ spin_unlock(&queue->lock);
+err:
+ req->out.h.error = err;
+ clear_bit(FR_PENDING, &req->flags);
+ fuse_request_end(req);
+}
+
+bool fuse_uring_queue_bq_req(struct fuse_req *req)
+{
+ struct fuse_conn *fc = req->fm->fc;
+ struct fuse_ring *ring = fc->ring;
+ struct fuse_ring_queue *queue;
+ struct fuse_ring_ent *ent = NULL;
+
+ queue = fuse_uring_task_to_queue(ring);
+ if (!queue)
+ return false;
+
+ spin_lock(&queue->lock);
+ if (unlikely(queue->stopped)) {
+ spin_unlock(&queue->lock);
+ return false;
+ }
+
+ list_add_tail(&req->list, &queue->fuse_req_bg_queue);
+
+ ent = list_first_entry_or_null(&queue->ent_avail_queue,
+ struct fuse_ring_ent, list);
+ spin_lock(&fc->bg_lock);
+ fc->num_background++;
+ if (fc->num_background == fc->max_background)
+ fc->blocked = 1;
+ fuse_uring_flush_bg(queue);
+ spin_unlock(&fc->bg_lock);
+
+ /*
+ * Due to bg_queue flush limits there might be other bg requests
+ * in the queue that need to be handled first. Or no further req
+ * might be available.
+ */
+ req = list_first_entry_or_null(&queue->fuse_req_queue, struct fuse_req,
+ list);
+ if (ent && req) {
+ fuse_uring_add_req_to_ring_ent(ent, req);
+ spin_unlock(&queue->lock);
+
+ fuse_uring_dispatch_ent(ent);
+ } else {
+ spin_unlock(&queue->lock);
+ }
+
+ return true;
+}
+
+static const struct fuse_iqueue_ops fuse_io_uring_ops = {
+ /* should be send over io-uring as enhancement */
+ .send_forget = fuse_dev_queue_forget,
+
+ /*
+ * could be send over io-uring, but interrupts should be rare,
+ * no need to make the code complex
+ */
+ .send_interrupt = fuse_dev_queue_interrupt,
+ .send_req = fuse_uring_queue_fuse_req,
+};
diff --git a/fs/fuse/dev_uring_i.h b/fs/fuse/dev_uring_i.h
new file mode 100644
index 000000000000..2102b3d0c1ae
--- /dev/null
+++ b/fs/fuse/dev_uring_i.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * FUSE: Filesystem in Userspace
+ * Copyright (c) 2023-2024 DataDirect Networks.
+ */
+
+#ifndef _FS_FUSE_DEV_URING_I_H
+#define _FS_FUSE_DEV_URING_I_H
+
+#include "fuse_i.h"
+
+#ifdef CONFIG_FUSE_IO_URING
+
+#define FUSE_URING_TEARDOWN_TIMEOUT (5 * HZ)
+#define FUSE_URING_TEARDOWN_INTERVAL (HZ/20)
+
+enum fuse_ring_req_state {
+ FRRS_INVALID = 0,
+
+ /* The ring entry received from userspace and it is being processed */
+ FRRS_COMMIT,
+
+ /* The ring entry is waiting for new fuse requests */
+ FRRS_AVAILABLE,
+
+ /* The ring entry got assigned a fuse req */
+ FRRS_FUSE_REQ,
+
+ /* The ring entry is in or on the way to user space */
+ FRRS_USERSPACE,
+
+ /* The ring entry is in teardown */
+ FRRS_TEARDOWN,
+
+ /* The ring entry is released, but not freed yet */
+ FRRS_RELEASED,
+};
+
+/** A fuse ring entry, part of the ring queue */
+struct fuse_ring_ent {
+ /* userspace buffer */
+ struct fuse_uring_req_header __user *headers;
+ void __user *payload;
+
+ /* the ring queue that owns the request */
+ struct fuse_ring_queue *queue;
+
+ /* fields below are protected by queue->lock */
+
+ struct io_uring_cmd *cmd;
+
+ struct list_head list;
+
+ enum fuse_ring_req_state state;
+
+ struct fuse_req *fuse_req;
+};
+
+struct fuse_ring_queue {
+ /*
+ * back pointer to the main fuse uring structure that holds this
+ * queue
+ */
+ struct fuse_ring *ring;
+
+ /* queue id, corresponds to the cpu core */
+ unsigned int qid;
+
+ /*
+ * queue lock, taken when any value in the queue changes _and_ also
+ * a ring entry state changes.
+ */
+ spinlock_t lock;
+
+ /* available ring entries (struct fuse_ring_ent) */
+ struct list_head ent_avail_queue;
+
+ /*
+ * entries in the process of being committed or in the process
+ * to be sent to userspace
+ */
+ struct list_head ent_w_req_queue;
+ struct list_head ent_commit_queue;
+
+ /* entries in userspace */
+ struct list_head ent_in_userspace;
+
+ /* entries that are released */
+ struct list_head ent_released;
+
+ /* fuse requests waiting for an entry slot */
+ struct list_head fuse_req_queue;
+
+ /* background fuse requests */
+ struct list_head fuse_req_bg_queue;
+
+ struct fuse_pqueue fpq;
+
+ unsigned int active_background;
+
+ bool stopped;
+};
+
+/**
+ * Describes if uring is for communication and holds alls the data needed
+ * for uring communication
+ */
+struct fuse_ring {
+ /* back pointer */
+ struct fuse_conn *fc;
+
+ /* number of ring queues */
+ size_t nr_queues;
+
+ /* maximum payload/arg size */
+ size_t max_payload_sz;
+
+ struct fuse_ring_queue **queues;
+
+ /*
+ * Log ring entry states on stop when entries cannot be released
+ */
+ unsigned int stop_debug_log : 1;
+
+ wait_queue_head_t stop_waitq;
+
+ /* async tear down */
+ struct delayed_work async_teardown_work;
+
+ /* log */
+ unsigned long teardown_time;
+
+ atomic_t queue_refs;
+
+ bool ready;
+};
+
+bool fuse_uring_enabled(void);
+void fuse_uring_destruct(struct fuse_conn *fc);
+void fuse_uring_stop_queues(struct fuse_ring *ring);
+void fuse_uring_abort_end_requests(struct fuse_ring *ring);
+int fuse_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
+void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req);
+bool fuse_uring_queue_bq_req(struct fuse_req *req);
+
+static inline void fuse_uring_abort(struct fuse_conn *fc)
+{
+ struct fuse_ring *ring = fc->ring;
+
+ if (ring == NULL)
+ return;
+
+ if (atomic_read(&ring->queue_refs) > 0) {
+ fuse_uring_abort_end_requests(ring);
+ fuse_uring_stop_queues(ring);
+ }
+}
+
+static inline void fuse_uring_wait_stopped_queues(struct fuse_conn *fc)
+{
+ struct fuse_ring *ring = fc->ring;
+
+ if (ring)
+ wait_event(ring->stop_waitq,
+ atomic_read(&ring->queue_refs) == 0);
+}
+
+static inline bool fuse_uring_ready(struct fuse_conn *fc)
+{
+ return fc->ring && fc->ring->ready;
+}
+
+#else /* CONFIG_FUSE_IO_URING */
+
+struct fuse_ring;
+
+static inline void fuse_uring_create(struct fuse_conn *fc)
+{
+}
+
+static inline void fuse_uring_destruct(struct fuse_conn *fc)
+{
+}
+
+static inline bool fuse_uring_enabled(void)
+{
+ return false;
+}
+
+static inline void fuse_uring_abort(struct fuse_conn *fc)
+{
+}
+
+static inline void fuse_uring_wait_stopped_queues(struct fuse_conn *fc)
+{
+}
+
+static inline bool fuse_uring_ready(struct fuse_conn *fc)
+{
+ return false;
+}
+
+#endif /* CONFIG_FUSE_IO_URING */
+
+#endif /* _FS_FUSE_DEV_URING_I_H */
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index bf057cf7098d..198862b086ff 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -175,9 +175,12 @@ static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
memset(outarg, 0, sizeof(struct fuse_entry_out));
args->opcode = FUSE_LOOKUP;
args->nodeid = nodeid;
- args->in_numargs = 1;
- args->in_args[0].size = name->len + 1;
- args->in_args[0].value = name->name;
+ args->in_numargs = 3;
+ fuse_set_zero_arg0(args);
+ args->in_args[1].size = name->len;
+ args->in_args[1].value = name->name;
+ args->in_args[2].size = 1;
+ args->in_args[2].value = "";
args->out_numargs = 1;
args->out_args[0].size = sizeof(struct fuse_entry_out);
args->out_args[0].value = outarg;
@@ -192,10 +195,10 @@ static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
* the lookup once more. If the lookup results in the same inode,
* then refresh the attributes, timeouts and mark the dentry valid.
*/
-static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
+static int fuse_dentry_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *entry, unsigned int flags)
{
struct inode *inode;
- struct dentry *parent;
struct fuse_mount *fm;
struct fuse_inode *fi;
int ret;
@@ -227,11 +230,9 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
attr_version = fuse_get_attr_version(fm->fc);
- parent = dget_parent(entry);
- fuse_lookup_init(fm->fc, &args, get_node_id(d_inode(parent)),
- &entry->d_name, &outarg);
+ fuse_lookup_init(fm->fc, &args, get_node_id(dir),
+ name, &outarg);
ret = fuse_simple_request(fm, &args);
- dput(parent);
/* Zero nodeid is same as -ENOENT */
if (!ret && !outarg.nodeid)
ret = -ENOENT;
@@ -265,9 +266,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
if (test_bit(FUSE_I_INIT_RDPLUS, &fi->state))
return -ECHILD;
} else if (test_and_clear_bit(FUSE_I_INIT_RDPLUS, &fi->state)) {
- parent = dget_parent(entry);
- fuse_advise_use_readdirplus(d_inode(parent));
- dput(parent);
+ fuse_advise_use_readdirplus(dir);
}
}
ret = 1;
@@ -929,11 +928,12 @@ static int fuse_symlink(struct mnt_idmap *idmap, struct inode *dir,
FUSE_ARGS(args);
args.opcode = FUSE_SYMLINK;
- args.in_numargs = 2;
- args.in_args[0].size = entry->d_name.len + 1;
- args.in_args[0].value = entry->d_name.name;
- args.in_args[1].size = len;
- args.in_args[1].value = link;
+ args.in_numargs = 3;
+ fuse_set_zero_arg0(&args);
+ args.in_args[1].size = entry->d_name.len + 1;
+ args.in_args[1].value = entry->d_name.name;
+ args.in_args[2].size = len;
+ args.in_args[2].value = link;
return create_new_entry(idmap, fm, &args, dir, entry, S_IFLNK);
}
@@ -993,9 +993,10 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
args.opcode = FUSE_UNLINK;
args.nodeid = get_node_id(dir);
- args.in_numargs = 1;
- args.in_args[0].size = entry->d_name.len + 1;
- args.in_args[0].value = entry->d_name.name;
+ args.in_numargs = 2;
+ fuse_set_zero_arg0(&args);
+ args.in_args[1].size = entry->d_name.len + 1;
+ args.in_args[1].value = entry->d_name.name;
err = fuse_simple_request(fm, &args);
if (!err) {
fuse_dir_changed(dir);
@@ -1016,9 +1017,10 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
args.opcode = FUSE_RMDIR;
args.nodeid = get_node_id(dir);
- args.in_numargs = 1;
- args.in_args[0].size = entry->d_name.len + 1;
- args.in_args[0].value = entry->d_name.name;
+ args.in_numargs = 2;
+ fuse_set_zero_arg0(&args);
+ args.in_args[1].size = entry->d_name.len + 1;
+ args.in_args[1].value = entry->d_name.name;
err = fuse_simple_request(fm, &args);
if (!err) {
fuse_dir_changed(dir);
diff --git a/fs/fuse/fuse_dev_i.h b/fs/fuse/fuse_dev_i.h
new file mode 100644
index 000000000000..3b2bfe1248d3
--- /dev/null
+++ b/fs/fuse/fuse_dev_i.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * FUSE: Filesystem in Userspace
+ * Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
+ */
+#ifndef _FS_FUSE_DEV_I_H
+#define _FS_FUSE_DEV_I_H
+
+#include <linux/types.h>
+
+/* Ordinary requests have even IDs, while interrupts IDs are odd */
+#define FUSE_INT_REQ_BIT (1ULL << 0)
+#define FUSE_REQ_ID_STEP (1ULL << 1)
+
+struct fuse_arg;
+struct fuse_args;
+struct fuse_pqueue;
+struct fuse_req;
+struct fuse_iqueue;
+struct fuse_forget_link;
+
+struct fuse_copy_state {
+ int write;
+ struct fuse_req *req;
+ struct iov_iter *iter;
+ struct pipe_buffer *pipebufs;
+ struct pipe_buffer *currbuf;
+ struct pipe_inode_info *pipe;
+ unsigned long nr_segs;
+ struct page *pg;
+ unsigned int len;
+ unsigned int offset;
+ unsigned int move_pages:1;
+ unsigned int is_uring:1;
+ struct {
+ unsigned int copied_sz; /* copied size into the user buffer */
+ } ring;
+};
+
+static inline struct fuse_dev *fuse_get_dev(struct file *file)
+{
+ /*
+ * Lockless access is OK, because file->private data is set
+ * once during mount and is valid until the file is released.
+ */
+ return READ_ONCE(file->private_data);
+}
+
+unsigned int fuse_req_hash(u64 unique);
+struct fuse_req *fuse_request_find(struct fuse_pqueue *fpq, u64 unique);
+
+void fuse_dev_end_requests(struct list_head *head);
+
+void fuse_copy_init(struct fuse_copy_state *cs, int write,
+ struct iov_iter *iter);
+int fuse_copy_args(struct fuse_copy_state *cs, unsigned int numargs,
+ unsigned int argpages, struct fuse_arg *args,
+ int zeroing);
+int fuse_copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
+ unsigned int nbytes);
+void fuse_dev_queue_forget(struct fuse_iqueue *fiq,
+ struct fuse_forget_link *forget);
+void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req);
+
+#endif
+
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 74744c6f2860..fee96fe7887b 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -310,7 +310,7 @@ struct fuse_args {
bool is_ext:1;
bool is_pinned:1;
bool invalidate_vmap:1;
- struct fuse_in_arg in_args[3];
+ struct fuse_in_arg in_args[4];
struct fuse_arg out_args[2];
void (*end)(struct fuse_mount *fm, struct fuse_args *args, int error);
/* Used for kvec iter backed by vmalloc address */
@@ -438,6 +438,10 @@ struct fuse_req {
/** fuse_mount this request belongs to */
struct fuse_mount *fm;
+
+#ifdef CONFIG_FUSE_IO_URING
+ void *ring_entry;
+#endif
};
struct fuse_iqueue;
@@ -863,6 +867,9 @@ struct fuse_conn {
/* Use pages instead of pointer for kernel I/O */
unsigned int use_pages_for_kvec_io:1;
+ /* Use io_uring for communication */
+ unsigned int io_uring;
+
/** Maximum stack depth for passthrough backing files */
int max_stack_depth;
@@ -923,6 +930,11 @@ struct fuse_conn {
/** IDR for backing files ids */
struct idr backing_files_map;
#endif
+
+#ifdef CONFIG_FUSE_IO_URING
+ /** uring connection information*/
+ struct fuse_ring *ring;
+#endif
};
/*
@@ -947,6 +959,19 @@ struct fuse_mount {
struct rcu_head rcu;
};
+/*
+ * Empty header for FUSE opcodes without specific header needs.
+ * Used as a placeholder in args->in_args[0] for consistency
+ * across all FUSE operations, simplifying request handling.
+ */
+struct fuse_zero_header {};
+
+static inline void fuse_set_zero_arg0(struct fuse_args *args)
+{
+ args->in_args[0].size = sizeof(struct fuse_zero_header);
+ args->in_args[0].value = NULL;
+}
+
static inline struct fuse_mount *get_fuse_mount_super(struct super_block *sb)
{
return sb->s_fs_info;
@@ -1220,6 +1245,11 @@ void fuse_change_entry_timeout(struct dentry *entry, struct fuse_entry_out *o);
struct fuse_conn *fuse_conn_get(struct fuse_conn *fc);
/**
+ * Initialize the fuse processing queue
+ */
+void fuse_pqueue_init(struct fuse_pqueue *fpq);
+
+/**
* Initialize fuse_conn
*/
void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm,
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 3ce4f4e81d09..e9db2cb8c150 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -7,6 +7,7 @@
*/
#include "fuse_i.h"
+#include "dev_uring_i.h"
#include <linux/pagemap.h>
#include <linux/slab.h>
@@ -937,7 +938,7 @@ static void fuse_iqueue_init(struct fuse_iqueue *fiq,
fiq->priv = priv;
}
-static void fuse_pqueue_init(struct fuse_pqueue *fpq)
+void fuse_pqueue_init(struct fuse_pqueue *fpq)
{
unsigned int i;
@@ -992,6 +993,8 @@ static void delayed_release(struct rcu_head *p)
{
struct fuse_conn *fc = container_of(p, struct fuse_conn, rcu);
+ fuse_uring_destruct(fc);
+
put_user_ns(fc->user_ns);
fc->release(fc);
}
@@ -1387,6 +1390,8 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
else
ok = false;
}
+ if (flags & FUSE_OVER_IO_URING && fuse_uring_enabled())
+ fc->io_uring = 1;
} else {
ra_pages = fc->max_read / PAGE_SIZE;
fc->no_lock = 1;
@@ -1446,6 +1451,13 @@ void fuse_send_init(struct fuse_mount *fm)
if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH))
flags |= FUSE_PASSTHROUGH;
+ /*
+ * This is just an information flag for fuse server. No need to check
+ * the reply - server is either sending IORING_OP_URING_CMD or not.
+ */
+ if (fuse_uring_enabled())
+ flags |= FUSE_OVER_IO_URING;
+
ia->in.flags = flags;
ia->in.flags2 = flags >> 32;
diff --git a/fs/fuse/sysctl.c b/fs/fuse/sysctl.c
index b272bb333005..63fb1e5bee30 100644
--- a/fs/fuse/sysctl.c
+++ b/fs/fuse/sysctl.c
@@ -13,7 +13,7 @@ static struct ctl_table_header *fuse_table_header;
/* Bound by fuse_init_out max_pages, which is a u16 */
static unsigned int sysctl_fuse_max_pages_limit = 65535;
-static struct ctl_table fuse_sysctl_table[] = {
+static const struct ctl_table fuse_sysctl_table[] = {
{
.procname = "max_pages_limit",
.data = &fuse_max_pages_limit,
diff --git a/fs/fuse/xattr.c b/fs/fuse/xattr.c
index 9f568d345c51..93dfb06b6cea 100644
--- a/fs/fuse/xattr.c
+++ b/fs/fuse/xattr.c
@@ -164,9 +164,10 @@ int fuse_removexattr(struct inode *inode, const char *name)
args.opcode = FUSE_REMOVEXATTR;
args.nodeid = get_node_id(inode);
- args.in_numargs = 1;
- args.in_args[0].size = strlen(name) + 1;
- args.in_args[0].value = name;
+ args.in_numargs = 2;
+ fuse_set_zero_arg0(&args);
+ args.in_args[1].size = strlen(name) + 1;
+ args.in_args[1].value = name;
err = fuse_simple_request(fm, &args);
if (err == -ENOSYS) {
fm->fc->no_removexattr = 1;
diff --git a/fs/gfs2/dentry.c b/fs/gfs2/dentry.c
index 2e215e8c3c88..95050e719233 100644
--- a/fs/gfs2/dentry.c
+++ b/fs/gfs2/dentry.c
@@ -21,7 +21,9 @@
/**
* gfs2_drevalidate - Check directory lookup consistency
- * @dentry: the mapping to check
+ * @dir: expected parent directory inode
+ * @name: expexted name
+ * @dentry: dentry to check
* @flags: lookup flags
*
* Check to make sure the lookup necessary to arrive at this inode from its
@@ -30,50 +32,43 @@
* Returns: 1 if the dentry is ok, 0 if it isn't
*/
-static int gfs2_drevalidate(struct dentry *dentry, unsigned int flags)
+static int gfs2_drevalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
- struct dentry *parent;
- struct gfs2_sbd *sdp;
- struct gfs2_inode *dip;
+ struct gfs2_sbd *sdp = GFS2_SB(dir);
+ struct gfs2_inode *dip = GFS2_I(dir);
struct inode *inode;
struct gfs2_holder d_gh;
struct gfs2_inode *ip = NULL;
- int error, valid = 0;
+ int error, valid;
int had_lock = 0;
if (flags & LOOKUP_RCU)
return -ECHILD;
- parent = dget_parent(dentry);
- sdp = GFS2_SB(d_inode(parent));
- dip = GFS2_I(d_inode(parent));
inode = d_inode(dentry);
if (inode) {
if (is_bad_inode(inode))
- goto out;
+ return 0;
ip = GFS2_I(inode);
}
- if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) {
- valid = 1;
- goto out;
- }
+ if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
+ return 1;
had_lock = (gfs2_glock_is_locked_by_me(dip->i_gl) != NULL);
if (!had_lock) {
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
if (error)
- goto out;
+ return 0;
}
- error = gfs2_dir_check(d_inode(parent), &dentry->d_name, ip);
+ error = gfs2_dir_check(dir, name, ip);
valid = inode ? !error : (error == -ENOENT);
if (!had_lock)
gfs2_glock_dq_uninit(&d_gh);
-out:
- dput(parent);
return valid;
}
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 8c4c1f871a88..65c07aa95718 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1201,8 +1201,8 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
if (glops->go_instantiate)
gl->gl_flags |= BIT(GLF_INSTANTIATE_NEEDED);
gl->gl_name = name;
+ lockref_init(&gl->gl_lockref);
lockdep_set_subclass(&gl->gl_lockref.lock, glops->go_subclass);
- gl->gl_lockref.count = 1;
gl->gl_state = LM_ST_UNLOCKED;
gl->gl_target = LM_ST_UNLOCKED;
gl->gl_demote_state = LM_ST_EXCLUSIVE;
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 04cadc02e5a6..0727f60ad028 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -51,7 +51,6 @@ static void gfs2_init_glock_once(void *foo)
{
struct gfs2_glock *gl = foo;
- spin_lock_init(&gl->gl_lockref.lock);
INIT_LIST_HEAD(&gl->gl_holders);
INIT_LIST_HEAD(&gl->gl_lru);
INIT_LIST_HEAD(&gl->gl_ail_list);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 58bc5013ca49..2298e06797ac 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -236,7 +236,7 @@ static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, str
return NULL;
qd->qd_sbd = sdp;
- lockref_init(&qd->qd_lockref, 0);
+ lockref_init(&qd->qd_lockref);
qd->qd_id = qid;
qd->qd_slot = -1;
INIT_LIST_HEAD(&qd->qd_lru);
@@ -297,7 +297,6 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
spin_lock_bucket(hash);
*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
if (qd == NULL) {
- new_qd->qd_lockref.count++;
*qdp = new_qd;
list_add(&new_qd->qd_list, &sdp->sd_quota_list);
hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
@@ -1450,6 +1449,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
if (qd == NULL)
goto fail_brelse;
+ qd->qd_lockref.count = 0;
set_bit(QDF_CHANGE, &qd->qd_flags);
qd->qd_change = qc_change;
qd->qd_slot = slot;
diff --git a/fs/hfs/sysdep.c b/fs/hfs/sysdep.c
index 76fa02e3835b..ef54fc8093cf 100644
--- a/fs/hfs/sysdep.c
+++ b/fs/hfs/sysdep.c
@@ -13,7 +13,8 @@
/* dentry case-handling: just lowercase everything */
-static int hfs_revalidate_dentry(struct dentry *dentry, unsigned int flags)
+static int hfs_revalidate_dentry(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
struct inode *inode;
int diff;
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 7e51d2cec64b..e0741e468956 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -95,32 +95,17 @@ __uml_setup("hostfs=", hostfs_args,
static char *__dentry_name(struct dentry *dentry, char *name)
{
char *p = dentry_path_raw(dentry, name, PATH_MAX);
- char *root;
- size_t len;
- struct hostfs_fs_info *fsi;
-
- fsi = dentry->d_sb->s_fs_info;
- root = fsi->host_root_path;
- len = strlen(root);
- if (IS_ERR(p)) {
- __putname(name);
- return NULL;
- }
+ struct hostfs_fs_info *fsi = dentry->d_sb->s_fs_info;
+ char *root = fsi->host_root_path;
+ size_t len = strlen(root);
- /*
- * This function relies on the fact that dentry_path_raw() will place
- * the path name at the end of the provided buffer.
- */
- BUG_ON(p + strlen(p) + 1 != name + PATH_MAX);
-
- strscpy(name, root, PATH_MAX);
- if (len > p - name) {
+ if (IS_ERR(p) || len > p - name) {
__putname(name);
return NULL;
}
- if (p > name + len)
- strcpy(name + len, p);
+ memcpy(name, root, len);
+ memmove(name + len, p, name + PATH_MAX - p);
return name;
}
@@ -410,38 +395,33 @@ static const struct file_operations hostfs_dir_fops = {
.fsync = hostfs_fsync,
};
-static int hostfs_writepage(struct page *page, struct writeback_control *wbc)
+static int hostfs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
{
- struct address_space *mapping = page->mapping;
struct inode *inode = mapping->host;
- char *buffer;
- loff_t base = page_offset(page);
- int count = PAGE_SIZE;
- int end_index = inode->i_size >> PAGE_SHIFT;
- int err;
-
- if (page->index >= end_index)
- count = inode->i_size & (PAGE_SIZE-1);
-
- buffer = kmap_local_page(page);
-
- err = write_file(HOSTFS_I(inode)->fd, &base, buffer, count);
- if (err != count) {
- if (err >= 0)
- err = -EIO;
- mapping_set_error(mapping, err);
- goto out;
+ struct folio *folio = NULL;
+ loff_t i_size = i_size_read(inode);
+ int err = 0;
+
+ while ((folio = writeback_iter(mapping, wbc, folio, &err))) {
+ loff_t pos = folio_pos(folio);
+ size_t count = folio_size(folio);
+ char *buffer;
+ int ret;
+
+ if (count > i_size - pos)
+ count = i_size - pos;
+
+ buffer = kmap_local_folio(folio, 0);
+ ret = write_file(HOSTFS_I(inode)->fd, &pos, buffer, count);
+ kunmap_local(buffer);
+ folio_unlock(folio);
+ if (ret != count) {
+ err = ret < 0 ? ret : -EIO;
+ mapping_set_error(mapping, err);
+ }
}
- if (base > inode->i_size)
- inode->i_size = base;
-
- err = 0;
-
- out:
- kunmap_local(buffer);
- unlock_page(page);
-
return err;
}
@@ -506,11 +486,12 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping,
}
static const struct address_space_operations hostfs_aops = {
- .writepage = hostfs_writepage,
+ .writepages = hostfs_writepages,
.read_folio = hostfs_read_folio,
.dirty_folio = filemap_dirty_folio,
.write_begin = hostfs_write_begin,
.write_end = hostfs_write_end,
+ .migrate_folio = filemap_migrate_folio,
};
static int hostfs_inode_update(struct inode *ino, const struct hostfs_stat *st)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index fc1ae5132127..0fc179a59830 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -99,7 +99,6 @@ static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct inode *inode = file_inode(file);
- struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
loff_t len, vma_len;
int ret;
struct hstate *h = hstate_file(file);
@@ -116,10 +115,6 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND);
vma->vm_ops = &hugetlb_vm_ops;
- ret = seal_check_write(info->seals, vma);
- if (ret)
- return ret;
-
/*
* page based offset in vm_pgoff could be sufficiently large to
* overflow a loff_t when converted to byte offset. This can
@@ -819,7 +814,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
* folios in these areas, we need to consume the reserves
* to keep reservation accounting consistent.
*/
- folio = alloc_hugetlb_folio(&pseudo_vma, addr, 0);
+ folio = alloc_hugetlb_folio(&pseudo_vma, addr, false);
if (IS_ERR(folio)) {
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
error = PTR_ERR(folio);
diff --git a/fs/inode.c b/fs/inode.c
index 6b4c77268fc0..5587aabdaa5e 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -184,7 +184,7 @@ static int proc_nr_inodes(const struct ctl_table *table, int write, void *buffer
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
-static struct ctl_table inodes_sysctls[] = {
+static const struct ctl_table inodes_sysctls[] = {
{
.procname = "inode-nr",
.data = &inodes_stat,
diff --git a/fs/iomap/swapfile.c b/fs/iomap/swapfile.c
index 5fc0ac36dee3..b90d0eda9e51 100644
--- a/fs/iomap/swapfile.c
+++ b/fs/iomap/swapfile.c
@@ -189,7 +189,6 @@ int iomap_swapfile_activate(struct swap_info_struct *sis,
*pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
sis->max = isi.nr_pages;
sis->pages = isi.nr_pages - 1;
- sis->highest_bit = isi.nr_pages - 1;
return isi.nr_extents;
}
EXPORT_SYMBOL_GPL(iomap_swapfile_activate);
diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index 34d5baa5d88a..5f3b6da0e022 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -301,7 +301,6 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
*/
static int zisofs_read_folio(struct file *file, struct folio *folio)
{
- struct page *page = &folio->page;
struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
int err;
@@ -311,16 +310,15 @@ static int zisofs_read_folio(struct file *file, struct folio *folio)
PAGE_SHIFT <= zisofs_block_shift ?
(1 << (zisofs_block_shift - PAGE_SHIFT)) : 0;
struct page **pages;
- pgoff_t index = page->index, end_index;
+ pgoff_t index = folio->index, end_index;
end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
/*
- * If this page is wholly outside i_size we just return zero;
+ * If this folio is wholly outside i_size we just return zero;
* do_generic_file_read() will handle this for us
*/
if (index >= end_index) {
- SetPageUptodate(page);
- unlock_page(page);
+ folio_end_read(folio, true);
return 0;
}
@@ -338,10 +336,10 @@ static int zisofs_read_folio(struct file *file, struct folio *folio)
pages = kcalloc(max_t(unsigned int, zisofs_pages_per_cblock, 1),
sizeof(*pages), GFP_KERNEL);
if (!pages) {
- unlock_page(page);
+ folio_unlock(folio);
return -ENOMEM;
}
- pages[full_page] = page;
+ pages[full_page] = &folio->page;
for (i = 0; i < pcount; i++, index++) {
if (i != full_page)
diff --git a/fs/jbd2/Kconfig b/fs/jbd2/Kconfig
index 4ad2c67f93f1..9c19e1512101 100644
--- a/fs/jbd2/Kconfig
+++ b/fs/jbd2/Kconfig
@@ -2,8 +2,6 @@
config JBD2
tristate
select CRC32
- select CRYPTO
- select CRYPTO_CRC32C
help
This is a generic journaling layer for block devices that support
both 32-bit and 64-bit block numbers. It is currently used by
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 7e49d912b091..d8084b31b361 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1369,20 +1369,12 @@ static int journal_check_superblock(journal_t *journal)
return err;
}
- /* Load the checksum driver */
if (jbd2_journal_has_csum_v2or3_feature(journal)) {
if (sb->s_checksum_type != JBD2_CRC32C_CHKSUM) {
printk(KERN_ERR "JBD2: Unknown checksum type\n");
return err;
}
- journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
- if (IS_ERR(journal->j_chksum_driver)) {
- printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n");
- err = PTR_ERR(journal->j_chksum_driver);
- journal->j_chksum_driver = NULL;
- return err;
- }
/* Check superblock checksum */
if (sb->s_checksum != jbd2_superblock_csum(journal, sb)) {
printk(KERN_ERR "JBD2: journal checksum error\n");
@@ -1608,8 +1600,6 @@ static journal_t *journal_init_common(struct block_device *bdev,
err_cleanup:
percpu_counter_destroy(&journal->j_checkpoint_jh_count);
- if (journal->j_chksum_driver)
- crypto_free_shash(journal->j_chksum_driver);
kfree(journal->j_wbuf);
jbd2_journal_destroy_revoke(journal);
journal_fail_superblock(journal);
@@ -2191,8 +2181,6 @@ int jbd2_journal_destroy(journal_t *journal)
iput(journal->j_inode);
if (journal->j_revoke)
jbd2_journal_destroy_revoke(journal);
- if (journal->j_chksum_driver)
- crypto_free_shash(journal->j_chksum_driver);
kfree(journal->j_fc_wbuf);
kfree(journal->j_wbuf);
kfree(journal);
@@ -2337,27 +2325,15 @@ int jbd2_journal_set_features(journal_t *journal, unsigned long compat,
}
}
- /* Load the checksum driver if necessary */
- if ((journal->j_chksum_driver == NULL) &&
- INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
- journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
- if (IS_ERR(journal->j_chksum_driver)) {
- printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n");
- journal->j_chksum_driver = NULL;
- return 0;
- }
- /* Precompute checksum seed for all metadata */
- journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid,
- sizeof(sb->s_uuid));
- }
-
lock_buffer(journal->j_sb_buffer);
- /* If enabling v3 checksums, update superblock */
+ /* If enabling v3 checksums, update superblock and precompute seed */
if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
sb->s_checksum_type = JBD2_CRC32C_CHKSUM;
sb->s_feature_compat &=
~cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM);
+ journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid,
+ sizeof(sb->s_uuid));
}
/* If enabling v1 checksums, downgrade superblock */
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index d68a4e6ac345..fc8ede43afde 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -1576,7 +1576,8 @@ out:
return result;
}
-static int jfs_ci_revalidate(struct dentry *dentry, unsigned int flags)
+static int jfs_ci_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
/*
* This is not negative dentry. Always valid.
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 458519e416fe..5f0f8b95f44c 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -1109,7 +1109,8 @@ struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
return ERR_PTR(rc);
}
-static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
+static int kernfs_dop_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
struct kernfs_node *kn;
struct kernfs_root *root;
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 8502ef68459b..0eb320617d7b 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -927,7 +927,7 @@ repeat:
if (!inode)
continue;
- name = (struct qstr)QSTR_INIT(kn->name, strlen(kn->name));
+ name = QSTR(kn->name);
parent = kernfs_get_parent(kn);
if (parent) {
p_inode = ilookup(info->sb, kernfs_ino(parent));
diff --git a/fs/libfs.c b/fs/libfs.c
index 5b6120b19e99..8444f5cc4064 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -1782,7 +1782,7 @@ int generic_ci_d_compare(const struct dentry *dentry, unsigned int len,
{
const struct dentry *parent;
const struct inode *dir;
- char strbuf[DNAME_INLINE_LEN];
+ union shortname_store strbuf;
struct qstr qstr;
/*
@@ -1802,22 +1802,23 @@ int generic_ci_d_compare(const struct dentry *dentry, unsigned int len,
if (!dir || !IS_CASEFOLDED(dir))
return 1;
+ qstr.len = len;
+ qstr.name = str;
/*
* If the dentry name is stored in-line, then it may be concurrently
* modified by a rename. If this happens, the VFS will eventually retry
* the lookup, so it doesn't matter what ->d_compare() returns.
* However, it's unsafe to call utf8_strncasecmp() with an unstable
* string. Therefore, we have to copy the name into a temporary buffer.
+ * As above, len is guaranteed to match str, so the shortname case
+ * is exactly when str points to ->d_shortname.
*/
- if (len <= DNAME_INLINE_LEN - 1) {
- memcpy(strbuf, str, len);
- strbuf[len] = 0;
- str = strbuf;
+ if (qstr.name == dentry->d_shortname.string) {
+ strbuf = dentry->d_shortname; // NUL is guaranteed to be in there
+ qstr.name = strbuf.string;
/* prevent compiler from optimizing out the temporary buffer */
barrier();
}
- qstr.len = len;
- qstr.name = str;
return utf8_strncasecmp(dentry->d_sb->s_encoding, name, &qstr);
}
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 4ec22c2f2ea3..2c8eedc6c2cc 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -70,9 +70,6 @@ static unsigned long nlm_grace_period;
unsigned long nlm_timeout = LOCKD_DFLT_TIMEO;
static int nlm_udpport, nlm_tcpport;
-/* RLIM_NOFILE defaults to 1024. That seems like a reasonable default here. */
-static unsigned int nlm_max_connections = 1024;
-
/*
* Constants needed for the sysctl interface.
*/
@@ -136,9 +133,6 @@ lockd(void *vrqstp)
* NFS mount or NFS daemon has gone away.
*/
while (!svc_thread_should_stop(rqstp)) {
- /* update sv_maxconn if it has changed */
- rqstp->rq_server->sv_maxconn = nlm_max_connections;
-
nlmsvc_retry_blocked(rqstp);
svc_recv(rqstp);
}
@@ -340,7 +334,6 @@ static int lockd_get(void)
return -ENOMEM;
}
- serv->sv_maxconn = nlm_max_connections;
error = svc_set_num_threads(serv, NULL, 1);
if (error < 0) {
svc_destroy(&serv);
@@ -419,7 +412,7 @@ EXPORT_SYMBOL_GPL(lockd_down);
* Sysctl parameters (same as module parameters, different interface).
*/
-static struct ctl_table nlm_sysctls[] = {
+static const struct ctl_table nlm_sysctls[] = {
{
.procname = "nlm_grace_period",
.data = &nlm_grace_period,
@@ -542,7 +535,6 @@ module_param_call(nlm_udpport, param_set_port, param_get_int,
module_param_call(nlm_tcpport, param_set_port, param_get_int,
&nlm_tcpport, 0644);
module_param(nsm_use_hostnames, bool, 0644);
-module_param(nlm_max_connections, uint, 0644);
static int lockd_init_net(struct net *net)
{
diff --git a/fs/locks.c b/fs/locks.c
index 25afc8d9c9d1..1619cddfa7a4 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -97,7 +97,7 @@ static int leases_enable = 1;
static int lease_break_time = 45;
#ifdef CONFIG_SYSCTL
-static struct ctl_table locks_sysctls[] = {
+static const struct ctl_table locks_sysctls[] = {
{
.procname = "leases-enable",
.data = &leases_enable,
diff --git a/fs/namei.c b/fs/namei.c
index e56c29a22d26..3ab9440c5b93 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -921,10 +921,11 @@ out_dput:
return false;
}
-static inline int d_revalidate(struct dentry *dentry, unsigned int flags)
+static inline int d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE))
- return dentry->d_op->d_revalidate(dentry, flags);
+ return dentry->d_op->d_revalidate(dir, name, dentry, flags);
else
return 1;
}
@@ -1099,7 +1100,7 @@ static int sysctl_protected_fifos __read_mostly;
static int sysctl_protected_regular __read_mostly;
#ifdef CONFIG_SYSCTL
-static struct ctl_table namei_sysctls[] = {
+static const struct ctl_table namei_sysctls[] = {
{
.procname = "protected_symlinks",
.data = &sysctl_protected_symlinks,
@@ -1652,7 +1653,7 @@ static struct dentry *lookup_dcache(const struct qstr *name,
{
struct dentry *dentry = d_lookup(dir, name);
if (dentry) {
- int error = d_revalidate(dentry, flags);
+ int error = d_revalidate(dir->d_inode, name, dentry, flags);
if (unlikely(error <= 0)) {
if (!error)
d_invalidate(dentry);
@@ -1737,19 +1738,20 @@ static struct dentry *lookup_fast(struct nameidata *nd)
if (read_seqcount_retry(&parent->d_seq, nd->seq))
return ERR_PTR(-ECHILD);
- status = d_revalidate(dentry, nd->flags);
+ status = d_revalidate(nd->inode, &nd->last, dentry, nd->flags);
if (likely(status > 0))
return dentry;
if (!try_to_unlazy_next(nd, dentry))
return ERR_PTR(-ECHILD);
if (status == -ECHILD)
/* we'd been told to redo it in non-rcu mode */
- status = d_revalidate(dentry, nd->flags);
+ status = d_revalidate(nd->inode, &nd->last,
+ dentry, nd->flags);
} else {
dentry = __d_lookup(parent, &nd->last);
if (unlikely(!dentry))
return NULL;
- status = d_revalidate(dentry, nd->flags);
+ status = d_revalidate(nd->inode, &nd->last, dentry, nd->flags);
}
if (unlikely(status <= 0)) {
if (!status)
@@ -1777,7 +1779,7 @@ again:
if (IS_ERR(dentry))
return dentry;
if (unlikely(!d_in_lookup(dentry))) {
- int error = d_revalidate(dentry, flags);
+ int error = d_revalidate(inode, name, dentry, flags);
if (unlikely(error <= 0)) {
if (!error) {
d_invalidate(dentry);
@@ -3575,7 +3577,7 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
if (d_in_lookup(dentry))
break;
- error = d_revalidate(dentry, nd->flags);
+ error = d_revalidate(dir_inode, &nd->last, dentry, nd->flags);
if (likely(error > 0))
break;
if (error)
diff --git a/fs/namespace.c b/fs/namespace.c
index 4013fbac354a..8f1000f9f3df 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -5087,30 +5087,29 @@ static int statmount_mnt_opts(struct kstatmount *s, struct seq_file *seq)
{
struct vfsmount *mnt = s->mnt;
struct super_block *sb = mnt->mnt_sb;
+ size_t start = seq->count;
int err;
- if (sb->s_op->show_options) {
- size_t start = seq->count;
-
- err = security_sb_show_options(seq, sb);
- if (err)
- return err;
+ err = security_sb_show_options(seq, sb);
+ if (err)
+ return err;
+ if (sb->s_op->show_options) {
err = sb->s_op->show_options(seq, mnt->mnt_root);
if (err)
return err;
+ }
- if (unlikely(seq_has_overflowed(seq)))
- return -EAGAIN;
+ if (unlikely(seq_has_overflowed(seq)))
+ return -EAGAIN;
- if (seq->count == start)
- return 0;
+ if (seq->count == start)
+ return 0;
- /* skip leading comma */
- memmove(seq->buf + start, seq->buf + start + 1,
- seq->count - start - 1);
- seq->count--;
- }
+ /* skip leading comma */
+ memmove(seq->buf + start, seq->buf + start + 1,
+ seq->count - start - 1);
+ seq->count--;
return 0;
}
@@ -5191,39 +5190,45 @@ static int statmount_string(struct kstatmount *s, u64 flag)
size_t kbufsize;
struct seq_file *seq = &s->seq;
struct statmount *sm = &s->sm;
- u32 start = seq->count;
+ u32 start, *offp;
+
+ /* Reserve an empty string at the beginning for any unset offsets */
+ if (!seq->count)
+ seq_putc(seq, 0);
+
+ start = seq->count;
switch (flag) {
case STATMOUNT_FS_TYPE:
- sm->fs_type = start;
+ offp = &sm->fs_type;
ret = statmount_fs_type(s, seq);
break;
case STATMOUNT_MNT_ROOT:
- sm->mnt_root = start;
+ offp = &sm->mnt_root;
ret = statmount_mnt_root(s, seq);
break;
case STATMOUNT_MNT_POINT:
- sm->mnt_point = start;
+ offp = &sm->mnt_point;
ret = statmount_mnt_point(s, seq);
break;
case STATMOUNT_MNT_OPTS:
- sm->mnt_opts = start;
+ offp = &sm->mnt_opts;
ret = statmount_mnt_opts(s, seq);
break;
case STATMOUNT_OPT_ARRAY:
- sm->opt_array = start;
+ offp = &sm->opt_array;
ret = statmount_opt_array(s, seq);
break;
case STATMOUNT_OPT_SEC_ARRAY:
- sm->opt_sec_array = start;
+ offp = &sm->opt_sec_array;
ret = statmount_opt_sec_array(s, seq);
break;
case STATMOUNT_FS_SUBTYPE:
- sm->fs_subtype = start;
+ offp = &sm->fs_subtype;
statmount_fs_subtype(s, seq);
break;
case STATMOUNT_SB_SOURCE:
- sm->sb_source = start;
+ offp = &sm->sb_source;
ret = statmount_sb_source(s, seq);
break;
default:
@@ -5251,6 +5256,7 @@ static int statmount_string(struct kstatmount *s, u64 flag)
seq->buf[seq->count++] = '\0';
sm->mask |= flag;
+ *offp = start;
return 0;
}
@@ -5985,7 +5991,7 @@ const struct proc_ns_operations mntns_operations = {
};
#ifdef CONFIG_SYSCTL
-static struct ctl_table fs_namespace_sysctls[] = {
+static const struct ctl_table fs_namespace_sysctls[] = {
{
.procname = "mount-max",
.data = &sysctl_mount_max,
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 0eb20012792f..d3f76101ad4b 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -170,7 +170,8 @@ config ROOT_NFS
config NFS_FSCACHE
bool "Provide NFS client caching support"
- depends on NFS_FS=m && NETFS_SUPPORT || NFS_FS=y && NETFS_SUPPORT=y
+ depends on NFS_FS
+ select NETFS_SUPPORT
select FSCACHE
help
Say Y here if you want NFS data to be cached locally on disc through
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 6cf92498a5ac..86bdc7d23fb9 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -211,10 +211,6 @@ static struct svc_serv *nfs_callback_create_svc(int minorversion)
return ERR_PTR(-ENOMEM);
}
cb_info->serv = serv;
- /* As there is only one thread we need to over-ride the
- * default maximum of 80 connections
- */
- serv->sv_maxconn = 1024;
dprintk("nfs_callback_create_svc: service created\n");
return serv;
}
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 7832fb0369a1..8397c43358bd 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -718,7 +718,7 @@ __be32 nfs4_callback_offload(void *data, void *dummy,
copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_KERNEL);
if (!copy)
- return htonl(NFS4ERR_SERVERFAULT);
+ return cpu_to_be32(NFS4ERR_DELAY);
spin_lock(&cps->clp->cl_lock);
rcu_read_lock();
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index fdeb0b34a3d3..4254ba3ee7c5 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -984,6 +984,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp)
nfs_put_client(cps.clp);
goto out_invalidcred;
}
+ svc_xprt_set_valid(rqstp->rq_xprt);
}
cps.minorversion = hdr_arg.minorversion;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 550ca934c9cf..3b0918ade53c 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -38,7 +38,7 @@
#include <linux/sunrpc/bc_xprt.h>
#include <linux/nsproxy.h>
#include <linux/pid_namespace.h>
-
+#include <linux/nfslocalio.h>
#include "nfs4_fs.h"
#include "callback.h"
@@ -186,7 +186,7 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
seqlock_init(&clp->cl_boot_lock);
ktime_get_real_ts64(&clp->cl_nfssvc_boot);
nfs_uuid_init(&clp->cl_uuid);
- spin_lock_init(&clp->cl_localio_lock);
+ INIT_WORK(&clp->cl_local_probe_work, nfs_local_probe_async_work);
#endif /* CONFIG_NFS_LOCALIO */
clp->cl_principal = "*";
@@ -244,7 +244,7 @@ static void pnfs_init_server(struct nfs_server *server)
*/
void nfs_free_client(struct nfs_client *clp)
{
- nfs_local_disable(clp);
+ nfs_localio_disable_client(clp);
/* -EIO all pending I/O */
if (!IS_ERR(clp->cl_rpcclient))
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 492cffd9d3d8..2b04038b0e40 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1672,7 +1672,7 @@ nfs_lookup_revalidate_delegated(struct inode *dir, struct dentry *dentry,
return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
}
-static int nfs_lookup_revalidate_dentry(struct inode *dir,
+static int nfs_lookup_revalidate_dentry(struct inode *dir, const struct qstr *name,
struct dentry *dentry,
struct inode *inode, unsigned int flags)
{
@@ -1690,7 +1690,7 @@ static int nfs_lookup_revalidate_dentry(struct inode *dir,
goto out;
dir_verifier = nfs_save_change_attribute(dir);
- ret = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr);
+ ret = NFS_PROTO(dir)->lookup(dir, dentry, name, fhandle, fattr);
if (ret < 0)
goto out;
@@ -1732,8 +1732,8 @@ out:
* cached dentry and do a new lookup.
*/
static int
-nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
- unsigned int flags)
+nfs_do_lookup_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
struct inode *inode;
int error = 0;
@@ -1775,7 +1775,7 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
if (NFS_STALE(inode))
goto out_bad;
- return nfs_lookup_revalidate_dentry(dir, dentry, inode, flags);
+ return nfs_lookup_revalidate_dentry(dir, name, dentry, inode, flags);
out_valid:
return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
out_bad:
@@ -1785,38 +1785,26 @@ out_bad:
}
static int
-__nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags,
- int (*reval)(struct inode *, struct dentry *, unsigned int))
+__nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
{
- struct dentry *parent;
- struct inode *dir;
- int ret;
-
if (flags & LOOKUP_RCU) {
if (dentry->d_fsdata == NFS_FSDATA_BLOCKED)
return -ECHILD;
- parent = READ_ONCE(dentry->d_parent);
- dir = d_inode_rcu(parent);
- if (!dir)
- return -ECHILD;
- ret = reval(dir, dentry, flags);
- if (parent != READ_ONCE(dentry->d_parent))
- return -ECHILD;
} else {
/* Wait for unlink to complete - see unblock_revalidate() */
wait_var_event(&dentry->d_fsdata,
smp_load_acquire(&dentry->d_fsdata)
!= NFS_FSDATA_BLOCKED);
- parent = dget_parent(dentry);
- ret = reval(d_inode(parent), dentry, flags);
- dput(parent);
}
- return ret;
+ return 0;
}
-static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+static int nfs_lookup_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
- return __nfs_lookup_revalidate(dentry, flags, nfs_do_lookup_revalidate);
+ if (__nfs_lookup_revalidate(dentry, flags))
+ return -ECHILD;
+ return nfs_do_lookup_revalidate(dir, name, dentry, flags);
}
static void block_revalidate(struct dentry *dentry)
@@ -1982,7 +1970,8 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in
dir_verifier = nfs_save_change_attribute(dir);
trace_nfs_lookup_enter(dir, dentry, flags);
- error = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr);
+ error = NFS_PROTO(dir)->lookup(dir, dentry, &dentry->d_name,
+ fhandle, fattr);
if (error == -ENOENT) {
if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE))
dir_verifier = inode_peek_iversion_raw(dir);
@@ -2025,7 +2014,8 @@ void nfs_d_prune_case_insensitive_aliases(struct inode *inode)
EXPORT_SYMBOL_GPL(nfs_d_prune_case_insensitive_aliases);
#if IS_ENABLED(CONFIG_NFS_V4)
-static int nfs4_lookup_revalidate(struct dentry *, unsigned int);
+static int nfs4_lookup_revalidate(struct inode *, const struct qstr *,
+ struct dentry *, unsigned int);
const struct dentry_operations nfs4_dentry_operations = {
.d_revalidate = nfs4_lookup_revalidate,
@@ -2214,11 +2204,14 @@ no_open:
EXPORT_SYMBOL_GPL(nfs_atomic_open);
static int
-nfs4_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
- unsigned int flags)
+nfs4_lookup_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
struct inode *inode;
+ if (__nfs_lookup_revalidate(dentry, flags))
+ return -ECHILD;
+
trace_nfs_lookup_revalidate_enter(dir, dentry, flags);
if (!(flags & LOOKUP_OPEN) || (flags & LOOKUP_DIRECTORY))
@@ -2254,16 +2247,10 @@ nfs4_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
reval_dentry:
if (flags & LOOKUP_RCU)
return -ECHILD;
- return nfs_lookup_revalidate_dentry(dir, dentry, inode, flags);
+ return nfs_lookup_revalidate_dentry(dir, name, dentry, inode, flags);
full_reval:
- return nfs_do_lookup_revalidate(dir, dentry, flags);
-}
-
-static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
-{
- return __nfs_lookup_revalidate(dentry, flags,
- nfs4_do_lookup_revalidate);
+ return nfs_do_lookup_revalidate(dir, name, dentry, flags);
}
#endif /* CONFIG_NFSV4 */
@@ -2319,7 +2306,8 @@ nfs_add_or_obtain(struct dentry *dentry, struct nfs_fh *fhandle,
d_drop(dentry);
if (fhandle->size == 0) {
- error = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr);
+ error = NFS_PROTO(dir)->lookup(dir, dentry, &dentry->d_name,
+ fhandle, fattr);
if (error)
goto out_error;
}
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index b08dbe96bc57..f45beea92d03 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -303,6 +303,7 @@ static void nfs_read_sync_pgio_error(struct list_head *head, int error)
static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
{
get_dreq(hdr->dreq);
+ set_bit(NFS_IOHDR_ODIRECT, &hdr->flags);
}
static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index f78115c6c2c1..98b45b636be3 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -164,18 +164,17 @@ decode_name(struct xdr_stream *xdr, u32 *id)
}
static struct nfsd_file *
-ff_local_open_fh(struct nfs_client *clp, const struct cred *cred,
+ff_local_open_fh(struct pnfs_layout_segment *lseg, u32 ds_idx,
+ struct nfs_client *clp, const struct cred *cred,
struct nfs_fh *fh, fmode_t mode)
{
- if (mode & FMODE_WRITE) {
- /*
- * Always request read and write access since this corresponds
- * to a rw layout.
- */
- mode |= FMODE_READ;
- }
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
- return nfs_local_open_fh(clp, cred, fh, mode);
+ return nfs_local_open_fh(clp, cred, fh, &mirror->nfl, mode);
+#else
+ return NULL;
+#endif
}
static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
@@ -247,6 +246,7 @@ static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
spin_lock_init(&mirror->lock);
refcount_set(&mirror->ref, 1);
INIT_LIST_HEAD(&mirror->mirrors);
+ nfs_localio_file_init(&mirror->nfl);
}
return mirror;
}
@@ -257,6 +257,7 @@ static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
ff_layout_remove_mirror(mirror);
kfree(mirror->fh_versions);
+ nfs_close_local_fh(&mirror->nfl);
cred = rcu_access_pointer(mirror->ro_cred);
put_cred(cred);
cred = rcu_access_pointer(mirror->rw_cred);
@@ -847,6 +848,9 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
struct nfs4_pnfs_ds *ds;
u32 ds_idx;
+ if (NFS_SERVER(pgio->pg_inode)->flags &
+ (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
+ pgio->pg_maxretrans = io_maxretrans;
retry:
pnfs_generic_pg_check_layout(pgio, req);
/* Use full layout for now */
@@ -860,6 +864,8 @@ retry:
if (!pgio->pg_lseg)
goto out_nolseg;
}
+ /* Reset wb_nio, since getting layout segment was successful */
+ req->wb_nio = 0;
ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
if (!ds) {
@@ -876,14 +882,24 @@ retry:
pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
pgio->pg_mirror_idx = ds_idx;
-
- if (NFS_SERVER(pgio->pg_inode)->flags &
- (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
- pgio->pg_maxretrans = io_maxretrans;
return;
out_nolseg:
- if (pgio->pg_error < 0)
- return;
+ if (pgio->pg_error < 0) {
+ if (pgio->pg_error != -EAGAIN)
+ return;
+ /* Retry getting layout segment if lower layer returned -EAGAIN */
+ if (pgio->pg_maxretrans && req->wb_nio++ > pgio->pg_maxretrans) {
+ if (NFS_SERVER(pgio->pg_inode)->flags & NFS_MOUNT_SOFTERR)
+ pgio->pg_error = -ETIMEDOUT;
+ else
+ pgio->pg_error = -EIO;
+ return;
+ }
+ pgio->pg_error = 0;
+ /* Sleep for 1 second before retrying */
+ ssleep(1);
+ goto retry;
+ }
out_mds:
trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
0, NFS4_MAX_UINT64, IOMODE_READ,
@@ -1820,7 +1836,7 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
hdr->mds_offset = offset;
/* Start IO accounting for local read */
- localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh, FMODE_READ);
+ localio = ff_local_open_fh(lseg, idx, ds->ds_clp, ds_cred, fh, FMODE_READ);
if (localio) {
hdr->task.tk_start = ktime_get();
ff_layout_read_record_layoutstats_start(&hdr->task, hdr);
@@ -1896,7 +1912,7 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
hdr->args.offset = offset;
/* Start IO accounting for local write */
- localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh,
+ localio = ff_local_open_fh(lseg, idx, ds->ds_clp, ds_cred, fh,
FMODE_READ|FMODE_WRITE);
if (localio) {
hdr->task.tk_start = ktime_get();
@@ -1981,7 +1997,7 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
data->args.fh = fh;
/* Start IO accounting for local commit */
- localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh,
+ localio = ff_local_open_fh(lseg, idx, ds->ds_clp, ds_cred, fh,
FMODE_READ|FMODE_WRITE);
if (localio) {
data->task.tk_start = ktime_get();
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h
index f84b3fb0dddd..095df09017a5 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.h
+++ b/fs/nfs/flexfilelayout/flexfilelayout.h
@@ -83,6 +83,7 @@ struct nfs4_ff_layout_mirror {
nfs4_stateid stateid;
const struct cred __rcu *ro_cred;
const struct cred __rcu *rw_cred;
+ struct nfs_file_localio nfl;
refcount_t ref;
spinlock_t lock;
unsigned long flags;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 596f35170137..1aa67fca69b2 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1137,6 +1137,8 @@ struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry,
ctx->lock_context.open_context = ctx;
INIT_LIST_HEAD(&ctx->list);
ctx->mdsthreshold = NULL;
+ nfs_localio_file_init(&ctx->nfl);
+
return ctx;
}
EXPORT_SYMBOL_GPL(alloc_nfs_open_context);
@@ -1168,6 +1170,7 @@ static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync)
nfs_sb_deactive(sb);
put_rpccred(rcu_dereference_protected(ctx->ll_cred, 1));
kfree(ctx->mdsthreshold);
+ nfs_close_local_fh(&ctx->nfl);
kfree_rcu(ctx, rcu_head);
}
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index e564bd11ba60..fae2c7ae4acc 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -455,11 +455,13 @@ extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode);
#if IS_ENABLED(CONFIG_NFS_LOCALIO)
/* localio.c */
-extern void nfs_local_disable(struct nfs_client *);
extern void nfs_local_probe(struct nfs_client *);
+extern void nfs_local_probe_async(struct nfs_client *);
+extern void nfs_local_probe_async_work(struct work_struct *);
extern struct nfsd_file *nfs_local_open_fh(struct nfs_client *,
const struct cred *,
struct nfs_fh *,
+ struct nfs_file_localio *,
const fmode_t);
extern int nfs_local_doio(struct nfs_client *,
struct nfsd_file *,
@@ -471,11 +473,12 @@ extern int nfs_local_commit(struct nfsd_file *,
extern bool nfs_server_is_local(const struct nfs_client *clp);
#else /* CONFIG_NFS_LOCALIO */
-static inline void nfs_local_disable(struct nfs_client *clp) {}
static inline void nfs_local_probe(struct nfs_client *clp) {}
+static inline void nfs_local_probe_async(struct nfs_client *clp) {}
static inline struct nfsd_file *
nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred,
- struct nfs_fh *fh, const fmode_t mode)
+ struct nfs_fh *fh, struct nfs_file_localio *nfl,
+ const fmode_t mode)
{
return NULL;
}
diff --git a/fs/nfs/localio.c b/fs/nfs/localio.c
index 4b8618cf114c..5c21caeae075 100644
--- a/fs/nfs/localio.c
+++ b/fs/nfs/localio.c
@@ -35,6 +35,7 @@ struct nfs_local_kiocb {
struct bio_vec *bvec;
struct nfs_pgio_header *hdr;
struct work_struct work;
+ void (*aio_complete_work)(struct work_struct *);
struct nfsd_file *localio;
};
@@ -48,9 +49,14 @@ struct nfs_local_fsync_ctx {
static bool localio_enabled __read_mostly = true;
module_param(localio_enabled, bool, 0644);
+static bool localio_O_DIRECT_semantics __read_mostly = false;
+module_param(localio_O_DIRECT_semantics, bool, 0644);
+MODULE_PARM_DESC(localio_O_DIRECT_semantics,
+ "LOCALIO will use O_DIRECT semantics to filesystem.");
+
static inline bool nfs_client_is_local(const struct nfs_client *clp)
{
- return !!test_bit(NFS_CS_LOCAL_IO, &clp->cl_flags);
+ return !!rcu_access_pointer(clp->cl_uuid.net);
}
bool nfs_server_is_local(const struct nfs_client *clp)
@@ -116,30 +122,6 @@ const struct rpc_program nfslocalio_program = {
};
/*
- * nfs_local_enable - enable local i/o for an nfs_client
- */
-static void nfs_local_enable(struct nfs_client *clp)
-{
- spin_lock(&clp->cl_localio_lock);
- set_bit(NFS_CS_LOCAL_IO, &clp->cl_flags);
- trace_nfs_local_enable(clp);
- spin_unlock(&clp->cl_localio_lock);
-}
-
-/*
- * nfs_local_disable - disable local i/o for an nfs_client
- */
-void nfs_local_disable(struct nfs_client *clp)
-{
- spin_lock(&clp->cl_localio_lock);
- if (test_and_clear_bit(NFS_CS_LOCAL_IO, &clp->cl_flags)) {
- trace_nfs_local_disable(clp);
- nfs_uuid_invalidate_one_client(&clp->cl_uuid);
- }
- spin_unlock(&clp->cl_localio_lock);
-}
-
-/*
* nfs_init_localioclient - Initialise an NFS localio client connection
*/
static struct rpc_clnt *nfs_init_localioclient(struct nfs_client *clp)
@@ -178,7 +160,7 @@ static bool nfs_server_uuid_is_local(struct nfs_client *clp)
rpc_shutdown_client(rpcclient_localio);
/* Server is only local if it initialized required struct members */
- if (status || !clp->cl_uuid.net || !clp->cl_uuid.dom)
+ if (status || !rcu_access_pointer(clp->cl_uuid.net) || !clp->cl_uuid.dom)
return false;
return true;
@@ -194,44 +176,64 @@ void nfs_local_probe(struct nfs_client *clp)
/* Disallow localio if disabled via sysfs or AUTH_SYS isn't used */
if (!localio_enabled ||
clp->cl_rpcclient->cl_auth->au_flavor != RPC_AUTH_UNIX) {
- nfs_local_disable(clp);
+ nfs_localio_disable_client(clp);
return;
}
if (nfs_client_is_local(clp)) {
/* If already enabled, disable and re-enable */
- nfs_local_disable(clp);
+ nfs_localio_disable_client(clp);
}
if (!nfs_uuid_begin(&clp->cl_uuid))
return;
if (nfs_server_uuid_is_local(clp))
- nfs_local_enable(clp);
+ nfs_localio_enable_client(clp);
nfs_uuid_end(&clp->cl_uuid);
}
EXPORT_SYMBOL_GPL(nfs_local_probe);
+void nfs_local_probe_async_work(struct work_struct *work)
+{
+ struct nfs_client *clp =
+ container_of(work, struct nfs_client, cl_local_probe_work);
+
+ nfs_local_probe(clp);
+}
+
+void nfs_local_probe_async(struct nfs_client *clp)
+{
+ queue_work(nfsiod_workqueue, &clp->cl_local_probe_work);
+}
+EXPORT_SYMBOL_GPL(nfs_local_probe_async);
+
+static inline struct nfsd_file *nfs_local_file_get(struct nfsd_file *nf)
+{
+ return nfs_to->nfsd_file_get(nf);
+}
+
+static inline void nfs_local_file_put(struct nfsd_file *nf)
+{
+ nfs_to->nfsd_file_put(nf);
+}
+
/*
- * nfs_local_open_fh - open a local filehandle in terms of nfsd_file
+ * __nfs_local_open_fh - open a local filehandle in terms of nfsd_file.
*
- * Returns a pointer to a struct nfsd_file or NULL
+ * Returns a pointer to a struct nfsd_file or ERR_PTR.
+ * Caller must release returned nfsd_file with nfs_to_nfsd_file_put_local().
*/
-struct nfsd_file *
-nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred,
- struct nfs_fh *fh, const fmode_t mode)
+static struct nfsd_file *
+__nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred,
+ struct nfs_fh *fh, struct nfs_file_localio *nfl,
+ const fmode_t mode)
{
struct nfsd_file *localio;
- int status;
-
- if (!nfs_server_is_local(clp))
- return NULL;
- if (mode & ~(FMODE_READ | FMODE_WRITE))
- return NULL;
localio = nfs_open_local_fh(&clp->cl_uuid, clp->cl_rpcclient,
- cred, fh, mode);
+ cred, fh, nfl, mode);
if (IS_ERR(localio)) {
- status = PTR_ERR(localio);
+ int status = PTR_ERR(localio);
trace_nfs_local_open_fh(fh, mode, status);
switch (status) {
case -ENOMEM:
@@ -240,10 +242,59 @@ nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred,
/* Revalidate localio, will disable if unsupported */
nfs_local_probe(clp);
}
- return NULL;
}
return localio;
}
+
+/*
+ * nfs_local_open_fh - open a local filehandle in terms of nfsd_file.
+ * First checking if the open nfsd_file is already cached, otherwise
+ * must __nfs_local_open_fh and insert the nfsd_file in nfs_file_localio.
+ *
+ * Returns a pointer to a struct nfsd_file or NULL.
+ */
+struct nfsd_file *
+nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred,
+ struct nfs_fh *fh, struct nfs_file_localio *nfl,
+ const fmode_t mode)
+{
+ struct nfsd_file *nf, *new, __rcu **pnf;
+
+ if (!nfs_server_is_local(clp))
+ return NULL;
+ if (mode & ~(FMODE_READ | FMODE_WRITE))
+ return NULL;
+
+ if (mode & FMODE_WRITE)
+ pnf = &nfl->rw_file;
+ else
+ pnf = &nfl->ro_file;
+
+ new = NULL;
+ rcu_read_lock();
+ nf = rcu_dereference(*pnf);
+ if (!nf) {
+ rcu_read_unlock();
+ new = __nfs_local_open_fh(clp, cred, fh, nfl, mode);
+ if (IS_ERR(new))
+ return NULL;
+ /* try to swap in the pointer */
+ spin_lock(&clp->cl_uuid.lock);
+ nf = rcu_dereference_protected(*pnf, 1);
+ if (!nf) {
+ nf = new;
+ new = NULL;
+ rcu_assign_pointer(*pnf, nf);
+ }
+ spin_unlock(&clp->cl_uuid.lock);
+ rcu_read_lock();
+ }
+ nf = nfs_local_file_get(nf);
+ rcu_read_unlock();
+ if (new)
+ nfs_to_nfsd_file_put_local(new);
+ return nf;
+}
EXPORT_SYMBOL_GPL(nfs_local_open_fh);
static struct bio_vec *
@@ -285,10 +336,19 @@ nfs_local_iocb_alloc(struct nfs_pgio_header *hdr,
kfree(iocb);
return NULL;
}
- init_sync_kiocb(&iocb->kiocb, file);
+
+ if (localio_O_DIRECT_semantics &&
+ test_bit(NFS_IOHDR_ODIRECT, &hdr->flags)) {
+ iocb->kiocb.ki_filp = file;
+ iocb->kiocb.ki_flags = IOCB_DIRECT;
+ } else
+ init_sync_kiocb(&iocb->kiocb, file);
+
iocb->kiocb.ki_pos = hdr->args.offset;
iocb->hdr = hdr;
iocb->kiocb.ki_flags &= ~IOCB_APPEND;
+ iocb->aio_complete_work = NULL;
+
return iocb;
}
@@ -328,7 +388,7 @@ nfs_local_pgio_done(struct nfs_pgio_header *hdr, long status)
hdr->res.op_status = NFS4_OK;
hdr->task.tk_status = 0;
} else {
- hdr->res.op_status = nfs4_stat_to_errno(status);
+ hdr->res.op_status = nfs_localio_errno_to_nfs4_stat(status);
hdr->task.tk_status = status;
}
}
@@ -338,11 +398,23 @@ nfs_local_pgio_release(struct nfs_local_kiocb *iocb)
{
struct nfs_pgio_header *hdr = iocb->hdr;
- nfs_to_nfsd_file_put_local(iocb->localio);
+ nfs_local_file_put(iocb->localio);
nfs_local_iocb_free(iocb);
nfs_local_hdr_release(hdr, hdr->task.tk_ops);
}
+/*
+ * Complete the I/O from iocb->kiocb.ki_complete()
+ *
+ * Note that this function can be called from a bottom half context,
+ * hence we need to queue the rpc_call_done() etc to a workqueue
+ */
+static inline void nfs_local_pgio_aio_complete(struct nfs_local_kiocb *iocb)
+{
+ INIT_WORK(&iocb->work, iocb->aio_complete_work);
+ queue_work(nfsiod_workqueue, &iocb->work);
+}
+
static void
nfs_local_read_done(struct nfs_local_kiocb *iocb, long status)
{
@@ -365,6 +437,23 @@ nfs_local_read_done(struct nfs_local_kiocb *iocb, long status)
status > 0 ? status : 0, hdr->res.eof);
}
+static void nfs_local_read_aio_complete_work(struct work_struct *work)
+{
+ struct nfs_local_kiocb *iocb =
+ container_of(work, struct nfs_local_kiocb, work);
+
+ nfs_local_pgio_release(iocb);
+}
+
+static void nfs_local_read_aio_complete(struct kiocb *kiocb, long ret)
+{
+ struct nfs_local_kiocb *iocb =
+ container_of(kiocb, struct nfs_local_kiocb, kiocb);
+
+ nfs_local_read_done(iocb, ret);
+ nfs_local_pgio_aio_complete(iocb); /* Calls nfs_local_read_aio_complete_work */
+}
+
static void nfs_local_call_read(struct work_struct *work)
{
struct nfs_local_kiocb *iocb =
@@ -379,10 +468,10 @@ static void nfs_local_call_read(struct work_struct *work)
nfs_local_iter_init(&iter, iocb, READ);
status = filp->f_op->read_iter(&iocb->kiocb, &iter);
- WARN_ON_ONCE(status == -EIOCBQUEUED);
-
- nfs_local_read_done(iocb, status);
- nfs_local_pgio_release(iocb);
+ if (status != -EIOCBQUEUED) {
+ nfs_local_read_done(iocb, status);
+ nfs_local_pgio_release(iocb);
+ }
revert_creds(save_cred);
}
@@ -410,6 +499,11 @@ nfs_do_local_read(struct nfs_pgio_header *hdr,
nfs_local_pgio_init(hdr, call_ops);
hdr->res.eof = false;
+ if (iocb->kiocb.ki_flags & IOCB_DIRECT) {
+ iocb->kiocb.ki_complete = nfs_local_read_aio_complete;
+ iocb->aio_complete_work = nfs_local_read_aio_complete_work;
+ }
+
INIT_WORK(&iocb->work, nfs_local_call_read);
queue_work(nfslocaliod_workqueue, &iocb->work);
@@ -534,6 +628,24 @@ nfs_local_write_done(struct nfs_local_kiocb *iocb, long status)
nfs_local_pgio_done(hdr, status);
}
+static void nfs_local_write_aio_complete_work(struct work_struct *work)
+{
+ struct nfs_local_kiocb *iocb =
+ container_of(work, struct nfs_local_kiocb, work);
+
+ nfs_local_vfs_getattr(iocb);
+ nfs_local_pgio_release(iocb);
+}
+
+static void nfs_local_write_aio_complete(struct kiocb *kiocb, long ret)
+{
+ struct nfs_local_kiocb *iocb =
+ container_of(kiocb, struct nfs_local_kiocb, kiocb);
+
+ nfs_local_write_done(iocb, ret);
+ nfs_local_pgio_aio_complete(iocb); /* Calls nfs_local_write_aio_complete_work */
+}
+
static void nfs_local_call_write(struct work_struct *work)
{
struct nfs_local_kiocb *iocb =
@@ -552,11 +664,11 @@ static void nfs_local_call_write(struct work_struct *work)
file_start_write(filp);
status = filp->f_op->write_iter(&iocb->kiocb, &iter);
file_end_write(filp);
- WARN_ON_ONCE(status == -EIOCBQUEUED);
-
- nfs_local_write_done(iocb, status);
- nfs_local_vfs_getattr(iocb);
- nfs_local_pgio_release(iocb);
+ if (status != -EIOCBQUEUED) {
+ nfs_local_write_done(iocb, status);
+ nfs_local_vfs_getattr(iocb);
+ nfs_local_pgio_release(iocb);
+ }
revert_creds(save_cred);
current->flags = old_flags;
@@ -592,10 +704,16 @@ nfs_do_local_write(struct nfs_pgio_header *hdr,
case NFS_FILE_SYNC:
iocb->kiocb.ki_flags |= IOCB_DSYNC|IOCB_SYNC;
}
+
nfs_local_pgio_init(hdr, call_ops);
nfs_set_local_verifier(hdr->inode, hdr->res.verf, hdr->args.stable);
+ if (iocb->kiocb.ki_flags & IOCB_DIRECT) {
+ iocb->kiocb.ki_complete = nfs_local_write_aio_complete;
+ iocb->aio_complete_work = nfs_local_write_aio_complete_work;
+ }
+
INIT_WORK(&iocb->work, nfs_local_call_write);
queue_work(nfslocaliod_workqueue, &iocb->work);
@@ -626,8 +744,8 @@ int nfs_local_doio(struct nfs_client *clp, struct nfsd_file *localio,
if (status != 0) {
if (status == -EAGAIN)
- nfs_local_disable(clp);
- nfs_to_nfsd_file_put_local(localio);
+ nfs_localio_disable_client(clp);
+ nfs_local_file_put(localio);
hdr->task.tk_status = status;
nfs_local_hdr_release(hdr, call_ops);
}
@@ -668,7 +786,7 @@ nfs_local_commit_done(struct nfs_commit_data *data, int status)
data->task.tk_status = 0;
} else {
nfs_reset_boot_verifier(data->inode);
- data->res.op_status = nfs4_stat_to_errno(status);
+ data->res.op_status = nfs_localio_errno_to_nfs4_stat(status);
data->task.tk_status = status;
}
}
@@ -678,7 +796,7 @@ nfs_local_release_commit_data(struct nfsd_file *localio,
struct nfs_commit_data *data,
const struct rpc_call_ops *call_ops)
{
- nfs_to_nfsd_file_put_local(localio);
+ nfs_local_file_put(localio);
call_ops->rpc_call_done(&data->task, data);
call_ops->rpc_release(data);
}
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 2d53574da605..973aed9cc5fe 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -308,7 +308,7 @@ int nfs_submount(struct fs_context *fc, struct nfs_server *server)
int err;
/* Look it up again to get its attributes */
- err = server->nfs_client->rpc_ops->lookup(d_inode(parent), dentry,
+ err = server->nfs_client->rpc_ops->lookup(d_inode(parent), dentry, &dentry->d_name,
ctx->mntfh, ctx->clone_data.fattr);
dput(parent);
if (err != 0)
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 1566163c6d85..0c3bc98cd999 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -192,7 +192,7 @@ __nfs3_proc_lookup(struct inode *dir, const char *name, size_t len,
}
static int
-nfs3_proc_lookup(struct inode *dir, struct dentry *dentry,
+nfs3_proc_lookup(struct inode *dir, struct dentry *dentry, const struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
unsigned short task_flags = 0;
@@ -202,8 +202,7 @@ nfs3_proc_lookup(struct inode *dir, struct dentry *dentry,
task_flags |= RPC_TASK_TIMEOUT;
dprintk("NFS call lookup %pd2\n", dentry);
- return __nfs3_proc_lookup(dir, dentry->d_name.name,
- dentry->d_name.len, fhandle, fattr,
+ return __nfs3_proc_lookup(dir, name->name, name->len, fhandle, fattr,
task_flags);
}
@@ -844,6 +843,41 @@ nfs3_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
return status;
}
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+
+static unsigned nfs3_localio_probe_throttle __read_mostly = 0;
+module_param(nfs3_localio_probe_throttle, uint, 0644);
+MODULE_PARM_DESC(nfs3_localio_probe_throttle,
+ "Probe for NFSv3 LOCALIO every N IO requests. Must be power-of-2, defaults to 0 (probing disabled).");
+
+static void nfs3_localio_probe(struct nfs_server *server)
+{
+ struct nfs_client *clp = server->nfs_client;
+
+ /* Throttled to reduce nfs_local_probe_async() frequency */
+ if (!nfs3_localio_probe_throttle || nfs_server_is_local(clp))
+ return;
+
+ /*
+ * Try (re)enabling LOCALIO if isn't enabled -- admin deems
+ * it worthwhile to periodically check if LOCALIO possible by
+ * setting the 'nfs3_localio_probe_throttle' module parameter.
+ *
+ * This is useful if LOCALIO was previously enabled, but was
+ * disabled due to server restart, and IO has successfully
+ * completed in terms of normal RPC.
+ */
+ if ((clp->cl_uuid.nfs3_localio_probe_count++ &
+ (nfs3_localio_probe_throttle - 1)) == 0) {
+ if (!nfs_server_is_local(clp))
+ nfs_local_probe_async(clp);
+ }
+}
+
+#else
+static void nfs3_localio_probe(struct nfs_server *server) {}
+#endif
+
static int nfs3_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
{
struct inode *inode = hdr->inode;
@@ -855,8 +889,11 @@ static int nfs3_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
if (nfs3_async_handle_jukebox(task, inode))
return -EAGAIN;
- if (task->tk_status >= 0 && !server->read_hdrsize)
- cmpxchg(&server->read_hdrsize, 0, hdr->res.replen);
+ if (task->tk_status >= 0) {
+ if (!server->read_hdrsize)
+ cmpxchg(&server->read_hdrsize, 0, hdr->res.replen);
+ nfs3_localio_probe(server);
+ }
nfs_invalidate_atime(inode);
nfs_refresh_inode(inode, &hdr->fattr);
@@ -886,8 +923,10 @@ static int nfs3_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
if (nfs3_async_handle_jukebox(task, inode))
return -EAGAIN;
- if (task->tk_status >= 0)
+ if (task->tk_status >= 0) {
nfs_writeback_update_inode(hdr);
+ nfs3_localio_probe(NFS_SERVER(inode));
+ }
return 0;
}
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 531c9c20ef1d..1924c4a2077b 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -498,15 +498,15 @@ out_put_src_lock:
return err;
}
-struct nfs42_offloadcancel_data {
+struct nfs42_offload_data {
struct nfs_server *seq_server;
struct nfs42_offload_status_args args;
struct nfs42_offload_status_res res;
};
-static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata)
+static void nfs42_offload_prepare(struct rpc_task *task, void *calldata)
{
- struct nfs42_offloadcancel_data *data = calldata;
+ struct nfs42_offload_data *data = calldata;
nfs4_setup_sequence(data->seq_server->nfs_client,
&data->args.osa_seq_args,
@@ -515,7 +515,7 @@ static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata)
static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata)
{
- struct nfs42_offloadcancel_data *data = calldata;
+ struct nfs42_offload_data *data = calldata;
trace_nfs4_offload_cancel(&data->args, task->tk_status);
nfs41_sequence_done(task, &data->res.osr_seq_res);
@@ -525,22 +525,22 @@ static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata)
rpc_restart_call_prepare(task);
}
-static void nfs42_free_offloadcancel_data(void *data)
+static void nfs42_offload_release(void *data)
{
kfree(data);
}
static const struct rpc_call_ops nfs42_offload_cancel_ops = {
- .rpc_call_prepare = nfs42_offload_cancel_prepare,
+ .rpc_call_prepare = nfs42_offload_prepare,
.rpc_call_done = nfs42_offload_cancel_done,
- .rpc_release = nfs42_free_offloadcancel_data,
+ .rpc_release = nfs42_offload_release,
};
static int nfs42_do_offload_cancel_async(struct file *dst,
nfs4_stateid *stateid)
{
struct nfs_server *dst_server = NFS_SERVER(file_inode(dst));
- struct nfs42_offloadcancel_data *data = NULL;
+ struct nfs42_offload_data *data = NULL;
struct nfs_open_context *ctx = nfs_file_open_context(dst);
struct rpc_task *task;
struct rpc_message msg = {
@@ -552,14 +552,14 @@ static int nfs42_do_offload_cancel_async(struct file *dst,
.rpc_message = &msg,
.callback_ops = &nfs42_offload_cancel_ops,
.workqueue = nfsiod_workqueue,
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE,
};
int status;
if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL))
return -EOPNOTSUPP;
- data = kzalloc(sizeof(struct nfs42_offloadcancel_data), GFP_KERNEL);
+ data = kzalloc(sizeof(struct nfs42_offload_data), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
@@ -861,7 +861,7 @@ int nfs42_proc_layoutstats_generic(struct nfs_server *server,
.rpc_message = &msg,
.callback_ops = &nfs42_layoutstat_ops,
.callback_data = data,
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE,
};
struct rpc_task *task;
@@ -1016,7 +1016,7 @@ int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg,
struct rpc_task_setup task_setup = {
.rpc_message = &msg,
.callback_ops = &nfs42_layouterror_ops,
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE,
};
unsigned int i;
diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
index 9e3ae53e2205..5072d7ea72e9 100644
--- a/fs/nfs/nfs42xdr.c
+++ b/fs/nfs/nfs42xdr.c
@@ -144,9 +144,11 @@
decode_putfh_maxsz + \
decode_offload_cancel_maxsz)
#define NFS4_enc_copy_notify_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_copy_notify_maxsz)
#define NFS4_dec_copy_notify_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_copy_notify_maxsz)
#define NFS4_enc_deallocate_sz (compound_encode_hdr_maxsz + \
@@ -549,7 +551,7 @@ static void nfs4_xdr_enc_copy(struct rpc_rqst *req,
}
/*
- * Encode OFFLOAD_CANEL request
+ * Encode OFFLOAD_CANCEL request
*/
static void nfs4_xdr_enc_offload_cancel(struct rpc_rqst *req,
struct xdr_stream *xdr,
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index d615d520f8cf..df9669d4ded7 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -4544,15 +4544,15 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
}
static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
- struct dentry *dentry, struct nfs_fh *fhandle,
- struct nfs_fattr *fattr)
+ struct dentry *dentry, const struct qstr *name,
+ struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs_server *server = NFS_SERVER(dir);
int status;
struct nfs4_lookup_arg args = {
.bitmask = server->attr_bitmask,
.dir_fh = NFS_FH(dir),
- .name = &dentry->d_name,
+ .name = name,
};
struct nfs4_lookup_res res = {
.server = server,
@@ -4594,17 +4594,16 @@ static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
}
static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
- struct dentry *dentry, struct nfs_fh *fhandle,
- struct nfs_fattr *fattr)
+ struct dentry *dentry, const struct qstr *name,
+ struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs4_exception exception = {
.interruptible = true,
};
struct rpc_clnt *client = *clnt;
- const struct qstr *name = &dentry->d_name;
int err;
do {
- err = _nfs4_proc_lookup(client, dir, dentry, fhandle, fattr);
+ err = _nfs4_proc_lookup(client, dir, dentry, name, fhandle, fattr);
trace_nfs4_lookup(dir, name, err);
switch (err) {
case -NFS4ERR_BADNAME:
@@ -4639,13 +4638,13 @@ out:
return err;
}
-static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry,
+static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry, const struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
int status;
struct rpc_clnt *client = NFS_CLIENT(dir);
- status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr);
+ status = nfs4_proc_lookup_common(&client, dir, dentry, name, fhandle, fattr);
if (client != NFS_CLIENT(dir)) {
rpc_shutdown_client(client);
nfs_fixup_secinfo_attributes(fattr);
@@ -4660,7 +4659,8 @@ nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry,
struct rpc_clnt *client = NFS_CLIENT(dir);
int status;
- status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr);
+ status = nfs4_proc_lookup_common(&client, dir, dentry, &dentry->d_name,
+ fhandle, fattr);
if (status < 0)
return ERR_PTR(status);
return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 9a9f60a2291b..542cdf71229f 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1955,6 +1955,7 @@ restart:
}
rcu_read_unlock();
nfs4_free_state_owners(&freeme);
+ nfs_local_probe_async(clp);
if (lost_locks)
pr_warn("NFS: %s: lost %d locks\n",
clp->cl_hostname, lost_locks);
diff --git a/fs/nfs/nfs4sysctl.c b/fs/nfs/nfs4sysctl.c
index 886a7c4c60b3..d1a92d8f8ba4 100644
--- a/fs/nfs/nfs4sysctl.c
+++ b/fs/nfs/nfs4sysctl.c
@@ -17,7 +17,7 @@ static const int nfs_set_port_min;
static const int nfs_set_port_max = 65535;
static struct ctl_table_header *nfs4_callback_sysctl_table;
-static struct ctl_table nfs4_cb_sysctls[] = {
+static const struct ctl_table nfs4_cb_sysctls[] = {
{
.procname = "nfs_callback_tcpport",
.data = &nfs_callback_set_tcpport,
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 1eab98c277fa..7a058bd8c566 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -1714,38 +1714,6 @@ TRACE_EVENT(nfs_local_open_fh,
)
);
-DECLARE_EVENT_CLASS(nfs_local_client_event,
- TP_PROTO(
- const struct nfs_client *clp
- ),
-
- TP_ARGS(clp),
-
- TP_STRUCT__entry(
- __field(unsigned int, protocol)
- __string(server, clp->cl_hostname)
- ),
-
- TP_fast_assign(
- __entry->protocol = clp->rpc_ops->version;
- __assign_str(server);
- ),
-
- TP_printk(
- "server=%s NFSv%u", __get_str(server), __entry->protocol
- )
-);
-
-#define DEFINE_NFS_LOCAL_CLIENT_EVENT(name) \
- DEFINE_EVENT(nfs_local_client_event, name, \
- TP_PROTO( \
- const struct nfs_client *clp \
- ), \
- TP_ARGS(clp))
-
-DEFINE_NFS_LOCAL_CLIENT_EVENT(nfs_local_enable);
-DEFINE_NFS_LOCAL_CLIENT_EVENT(nfs_local_disable);
-
DECLARE_EVENT_CLASS(nfs_xdr_event,
TP_PROTO(
const struct xdr_stream *xdr,
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index e27c07bd8929..11968dcb7243 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -961,8 +961,9 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
struct nfs_client *clp = NFS_SERVER(hdr->inode)->nfs_client;
struct nfsd_file *localio =
- nfs_local_open_fh(clp, hdr->cred,
- hdr->args.fh, hdr->args.context->mode);
+ nfs_local_open_fh(clp, hdr->cred, hdr->args.fh,
+ &hdr->args.context->nfl,
+ hdr->args.context->mode);
if (NFS_SERVER(hdr->inode)->nfs_client->cl_minorversion)
task_flags = RPC_TASK_MOVEABLE;
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 6c09cd090c34..77920a2e3cef 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -153,13 +153,13 @@ nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
}
static int
-nfs_proc_lookup(struct inode *dir, struct dentry *dentry,
+nfs_proc_lookup(struct inode *dir, struct dentry *dentry, const struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs_diropargs arg = {
.fh = NFS_FH(dir),
- .name = dentry->d_name.name,
- .len = dentry->d_name.len
+ .name = name->name,
+ .len = name->len
};
struct nfs_diropok res = {
.fh = fhandle,
diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c
index e645be1a3381..f579df0e8d67 100644
--- a/fs/nfs/sysctl.c
+++ b/fs/nfs/sysctl.c
@@ -14,7 +14,7 @@
static struct ctl_table_header *nfs_callback_sysctl_table;
-static struct ctl_table nfs_cb_sysctls[] = {
+static const struct ctl_table nfs_cb_sysctls[] = {
{
.procname = "nfs_mountpoint_timeout",
.data = &nfs_mountpoint_expiry_timeout,
diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c
index bf378ecd5d9f..7b59a40d40c0 100644
--- a/fs/nfs/sysfs.c
+++ b/fs/nfs/sysfs.c
@@ -280,9 +280,9 @@ void nfs_sysfs_link_rpc_client(struct nfs_server *server,
char name[RPC_CLIENT_NAME_SIZE];
int ret;
- strcpy(name, clnt->cl_program->name);
- strcat(name, uniq ? uniq : "");
- strcat(name, "_client");
+ strscpy(name, clnt->cl_program->name, sizeof(name));
+ strncat(name, uniq ? uniq : "", sizeof(name) - strlen(name) - 1);
+ strncat(name, "_client", sizeof(name) - strlen(name) - 1);
ret = sysfs_create_link_nowarn(&server->kobj,
&clnt->cl_sysfs->kobject, name);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 50fa539611f5..aa3d8bea3ec0 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1826,7 +1826,8 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how,
task_flags = RPC_TASK_MOVEABLE;
localio = nfs_local_open_fh(NFS_SERVER(inode)->nfs_client, data->cred,
- data->args.fh, data->context->mode);
+ data->args.fh, &data->context->nfl,
+ data->context->mode);
return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode),
data->mds_ops, how,
RPC_TASK_CRED_NOREF | task_flags, localio);
diff --git a/fs/nfs_common/Makefile b/fs/nfs_common/Makefile
index a5e54809701e..c10ead273ff2 100644
--- a/fs/nfs_common/Makefile
+++ b/fs/nfs_common/Makefile
@@ -6,8 +6,9 @@
obj-$(CONFIG_NFS_ACL_SUPPORT) += nfs_acl.o
nfs_acl-objs := nfsacl.o
+CFLAGS_localio_trace.o += -I$(src)
obj-$(CONFIG_NFS_COMMON_LOCALIO_SUPPORT) += nfs_localio.o
-nfs_localio-objs := nfslocalio.o
+nfs_localio-objs := nfslocalio.o localio_trace.o
obj-$(CONFIG_GRACE_PERIOD) += grace.o
obj-$(CONFIG_NFS_V4_2_SSC_HELPER) += nfs_ssc.o
diff --git a/fs/nfs_common/common.c b/fs/nfs_common/common.c
index 34a115176f97..af09aed09fd2 100644
--- a/fs/nfs_common/common.c
+++ b/fs/nfs_common/common.c
@@ -15,7 +15,7 @@ static const struct {
{ NFS_OK, 0 },
{ NFSERR_PERM, -EPERM },
{ NFSERR_NOENT, -ENOENT },
- { NFSERR_IO, -errno_NFSERR_IO},
+ { NFSERR_IO, -EIO },
{ NFSERR_NXIO, -ENXIO },
/* { NFSERR_EAGAIN, -EAGAIN }, */
{ NFSERR_ACCES, -EACCES },
@@ -45,7 +45,6 @@ static const struct {
{ NFSERR_SERVERFAULT, -EREMOTEIO },
{ NFSERR_BADTYPE, -EBADTYPE },
{ NFSERR_JUKEBOX, -EJUKEBOX },
- { -1, -EIO }
};
/**
@@ -59,26 +58,29 @@ int nfs_stat_to_errno(enum nfs_stat status)
{
int i;
- for (i = 0; nfs_errtbl[i].stat != -1; i++) {
+ for (i = 0; i < ARRAY_SIZE(nfs_errtbl); i++) {
if (nfs_errtbl[i].stat == (int)status)
return nfs_errtbl[i].errno;
}
- return nfs_errtbl[i].errno;
+ return -EIO;
}
EXPORT_SYMBOL_GPL(nfs_stat_to_errno);
/*
* We need to translate between nfs v4 status return values and
* the local errno values which may not be the same.
+ *
+ * nfs4_errtbl_common[] is used before more specialized mappings
+ * available in nfs4_errtbl[] or nfs4_errtbl_localio[].
*/
static const struct {
int stat;
int errno;
-} nfs4_errtbl[] = {
+} nfs4_errtbl_common[] = {
{ NFS4_OK, 0 },
{ NFS4ERR_PERM, -EPERM },
{ NFS4ERR_NOENT, -ENOENT },
- { NFS4ERR_IO, -errno_NFSERR_IO},
+ { NFS4ERR_IO, -EIO },
{ NFS4ERR_NXIO, -ENXIO },
{ NFS4ERR_ACCESS, -EACCES },
{ NFS4ERR_EXIST, -EEXIST },
@@ -98,15 +100,20 @@ static const struct {
{ NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
{ NFS4ERR_NOTSUPP, -ENOTSUPP },
{ NFS4ERR_TOOSMALL, -ETOOSMALL },
- { NFS4ERR_SERVERFAULT, -EREMOTEIO },
{ NFS4ERR_BADTYPE, -EBADTYPE },
- { NFS4ERR_LOCKED, -EAGAIN },
{ NFS4ERR_SYMLINK, -ELOOP },
- { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
{ NFS4ERR_DEADLOCK, -EDEADLK },
+};
+
+static const struct {
+ int stat;
+ int errno;
+} nfs4_errtbl[] = {
+ { NFS4ERR_SERVERFAULT, -EREMOTEIO },
+ { NFS4ERR_LOCKED, -EAGAIN },
+ { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
{ NFS4ERR_NOXATTR, -ENODATA },
{ NFS4ERR_XATTR2BIG, -E2BIG },
- { -1, -EIO }
};
/*
@@ -116,7 +123,14 @@ static const struct {
int nfs4_stat_to_errno(int stat)
{
int i;
- for (i = 0; nfs4_errtbl[i].stat != -1; i++) {
+
+ /* First check nfs4_errtbl_common */
+ for (i = 0; i < ARRAY_SIZE(nfs4_errtbl_common); i++) {
+ if (nfs4_errtbl_common[i].stat == stat)
+ return nfs4_errtbl_common[i].errno;
+ }
+ /* Then check nfs4_errtbl */
+ for (i = 0; i < ARRAY_SIZE(nfs4_errtbl); i++) {
if (nfs4_errtbl[i].stat == stat)
return nfs4_errtbl[i].errno;
}
@@ -132,3 +146,56 @@ int nfs4_stat_to_errno(int stat)
return -stat;
}
EXPORT_SYMBOL_GPL(nfs4_stat_to_errno);
+
+/*
+ * This table is useful for conversion from local errno to NFS error.
+ * It provides more logically correct mappings for use with LOCALIO
+ * (which is focused on converting from errno to NFS status).
+ */
+static const struct {
+ int stat;
+ int errno;
+} nfs4_errtbl_localio[] = {
+ /* Map errors differently than nfs4_errtbl */
+ { NFS4ERR_IO, -EREMOTEIO },
+ { NFS4ERR_DELAY, -EAGAIN },
+ { NFS4ERR_FBIG, -E2BIG },
+ /* Map errors not handled by nfs4_errtbl */
+ { NFS4ERR_STALE, -EBADF },
+ { NFS4ERR_STALE, -EOPENSTALE },
+ { NFS4ERR_DELAY, -ETIMEDOUT },
+ { NFS4ERR_DELAY, -ERESTARTSYS },
+ { NFS4ERR_DELAY, -ENOMEM },
+ { NFS4ERR_IO, -ETXTBSY },
+ { NFS4ERR_IO, -EBUSY },
+ { NFS4ERR_SERVERFAULT, -ESERVERFAULT },
+ { NFS4ERR_SERVERFAULT, -ENFILE },
+ { NFS4ERR_IO, -EUCLEAN },
+ { NFS4ERR_PERM, -ENOKEY },
+};
+
+/*
+ * Convert an errno to an NFS error code for LOCALIO.
+ */
+__u32 nfs_localio_errno_to_nfs4_stat(int errno)
+{
+ int i;
+
+ /* First check nfs4_errtbl_common */
+ for (i = 0; i < ARRAY_SIZE(nfs4_errtbl_common); i++) {
+ if (nfs4_errtbl_common[i].errno == errno)
+ return nfs4_errtbl_common[i].stat;
+ }
+ /* Then check nfs4_errtbl_localio */
+ for (i = 0; i < ARRAY_SIZE(nfs4_errtbl_localio); i++) {
+ if (nfs4_errtbl_localio[i].errno == errno)
+ return nfs4_errtbl_localio[i].stat;
+ }
+ /* If we cannot translate the error, the recovery routines should
+ * handle it.
+ * Note: remaining NFSv4 error codes have values > 10000, so should
+ * not conflict with native Linux error codes.
+ */
+ return NFS4ERR_SERVERFAULT;
+}
+EXPORT_SYMBOL_GPL(nfs_localio_errno_to_nfs4_stat);
diff --git a/fs/nfs_common/localio_trace.c b/fs/nfs_common/localio_trace.c
new file mode 100644
index 000000000000..7decfe57abeb
--- /dev/null
+++ b/fs/nfs_common/localio_trace.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 Trond Myklebust <trond.myklebust@hammerspace.com>
+ * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com>
+ */
+#include <linux/nfs_fs.h>
+#include <linux/namei.h>
+
+#define CREATE_TRACE_POINTS
+#include "localio_trace.h"
diff --git a/fs/nfs_common/localio_trace.h b/fs/nfs_common/localio_trace.h
new file mode 100644
index 000000000000..4055aec9ff8d
--- /dev/null
+++ b/fs/nfs_common/localio_trace.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Trond Myklebust <trond.myklebust@hammerspace.com>
+ * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com>
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM nfs_localio
+
+#if !defined(_TRACE_NFS_COMMON_LOCALIO_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NFS_COMMON_LOCALIO_H
+
+#include <linux/tracepoint.h>
+
+#include <trace/misc/fs.h>
+#include <trace/misc/nfs.h>
+#include <trace/misc/sunrpc.h>
+
+DECLARE_EVENT_CLASS(nfs_local_client_event,
+ TP_PROTO(
+ const struct nfs_client *clp
+ ),
+
+ TP_ARGS(clp),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, protocol)
+ __string(server, clp->cl_hostname)
+ ),
+
+ TP_fast_assign(
+ __entry->protocol = clp->rpc_ops->version;
+ __assign_str(server);
+ ),
+
+ TP_printk(
+ "server=%s NFSv%u", __get_str(server), __entry->protocol
+ )
+);
+
+#define DEFINE_NFS_LOCAL_CLIENT_EVENT(name) \
+ DEFINE_EVENT(nfs_local_client_event, name, \
+ TP_PROTO( \
+ const struct nfs_client *clp \
+ ), \
+ TP_ARGS(clp))
+
+DEFINE_NFS_LOCAL_CLIENT_EVENT(nfs_localio_enable_client);
+DEFINE_NFS_LOCAL_CLIENT_EVENT(nfs_localio_disable_client);
+
+#endif /* _TRACE_NFS_COMMON_LOCALIO_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE localio_trace
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/fs/nfs_common/nfslocalio.c b/fs/nfs_common/nfslocalio.c
index a74ec08f6c96..6a0bdea6d644 100644
--- a/fs/nfs_common/nfslocalio.c
+++ b/fs/nfs_common/nfslocalio.c
@@ -7,38 +7,67 @@
#include <linux/module.h>
#include <linux/list.h>
#include <linux/nfslocalio.h>
+#include <linux/nfs3.h>
+#include <linux/nfs4.h>
+#include <linux/nfs_fs.h>
#include <net/netns/generic.h>
+#include "localio_trace.h"
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("NFS localio protocol bypass support");
-static DEFINE_SPINLOCK(nfs_uuid_lock);
+static DEFINE_SPINLOCK(nfs_uuids_lock);
/*
* Global list of nfs_uuid_t instances
- * that is protected by nfs_uuid_lock.
+ * that is protected by nfs_uuids_lock.
*/
static LIST_HEAD(nfs_uuids);
+/*
+ * Lock ordering:
+ * 1: nfs_uuid->lock
+ * 2: nfs_uuids_lock
+ * 3: nfs_uuid->list_lock (aka nn->local_clients_lock)
+ *
+ * May skip locks in select cases, but never hold multiple
+ * locks out of order.
+ */
+
void nfs_uuid_init(nfs_uuid_t *nfs_uuid)
{
- nfs_uuid->net = NULL;
+ RCU_INIT_POINTER(nfs_uuid->net, NULL);
nfs_uuid->dom = NULL;
+ nfs_uuid->list_lock = NULL;
INIT_LIST_HEAD(&nfs_uuid->list);
+ INIT_LIST_HEAD(&nfs_uuid->files);
+ spin_lock_init(&nfs_uuid->lock);
+ nfs_uuid->nfs3_localio_probe_count = 0;
}
EXPORT_SYMBOL_GPL(nfs_uuid_init);
bool nfs_uuid_begin(nfs_uuid_t *nfs_uuid)
{
- spin_lock(&nfs_uuid_lock);
- /* Is this nfs_uuid already in use? */
+ spin_lock(&nfs_uuid->lock);
+ if (rcu_access_pointer(nfs_uuid->net)) {
+ /* This nfs_uuid is already in use */
+ spin_unlock(&nfs_uuid->lock);
+ return false;
+ }
+
+ spin_lock(&nfs_uuids_lock);
if (!list_empty(&nfs_uuid->list)) {
- spin_unlock(&nfs_uuid_lock);
+ /* This nfs_uuid is already in use */
+ spin_unlock(&nfs_uuids_lock);
+ spin_unlock(&nfs_uuid->lock);
return false;
}
- uuid_gen(&nfs_uuid->uuid);
list_add_tail(&nfs_uuid->list, &nfs_uuids);
- spin_unlock(&nfs_uuid_lock);
+ spin_unlock(&nfs_uuids_lock);
+
+ uuid_gen(&nfs_uuid->uuid);
+ spin_unlock(&nfs_uuid->lock);
return true;
}
@@ -46,12 +75,16 @@ EXPORT_SYMBOL_GPL(nfs_uuid_begin);
void nfs_uuid_end(nfs_uuid_t *nfs_uuid)
{
- if (nfs_uuid->net == NULL) {
- spin_lock(&nfs_uuid_lock);
- if (nfs_uuid->net == NULL)
+ if (!rcu_access_pointer(nfs_uuid->net)) {
+ spin_lock(&nfs_uuid->lock);
+ if (!rcu_access_pointer(nfs_uuid->net)) {
+ /* Not local, remove from nfs_uuids */
+ spin_lock(&nfs_uuids_lock);
list_del_init(&nfs_uuid->list);
- spin_unlock(&nfs_uuid_lock);
- }
+ spin_unlock(&nfs_uuids_lock);
+ }
+ spin_unlock(&nfs_uuid->lock);
+ }
}
EXPORT_SYMBOL_GPL(nfs_uuid_end);
@@ -69,68 +102,142 @@ static nfs_uuid_t * nfs_uuid_lookup_locked(const uuid_t *uuid)
static struct module *nfsd_mod;
void nfs_uuid_is_local(const uuid_t *uuid, struct list_head *list,
- struct net *net, struct auth_domain *dom,
- struct module *mod)
+ spinlock_t *list_lock, struct net *net,
+ struct auth_domain *dom, struct module *mod)
{
nfs_uuid_t *nfs_uuid;
- spin_lock(&nfs_uuid_lock);
+ spin_lock(&nfs_uuids_lock);
nfs_uuid = nfs_uuid_lookup_locked(uuid);
- if (nfs_uuid) {
- kref_get(&dom->ref);
- nfs_uuid->dom = dom;
- /*
- * We don't hold a ref on the net, but instead put
- * ourselves on a list so the net pointer can be
- * invalidated.
- */
- list_move(&nfs_uuid->list, list);
- rcu_assign_pointer(nfs_uuid->net, net);
-
- __module_get(mod);
- nfsd_mod = mod;
+ if (!nfs_uuid) {
+ spin_unlock(&nfs_uuids_lock);
+ return;
}
- spin_unlock(&nfs_uuid_lock);
+
+ /*
+ * We don't hold a ref on the net, but instead put
+ * ourselves on @list (nn->local_clients) so the net
+ * pointer can be invalidated.
+ */
+ spin_lock(list_lock); /* list_lock is nn->local_clients_lock */
+ list_move(&nfs_uuid->list, list);
+ spin_unlock(list_lock);
+
+ spin_unlock(&nfs_uuids_lock);
+ /* Once nfs_uuid is parented to @list, avoid global nfs_uuids_lock */
+ spin_lock(&nfs_uuid->lock);
+
+ __module_get(mod);
+ nfsd_mod = mod;
+
+ nfs_uuid->list_lock = list_lock;
+ kref_get(&dom->ref);
+ nfs_uuid->dom = dom;
+ rcu_assign_pointer(nfs_uuid->net, net);
+ spin_unlock(&nfs_uuid->lock);
}
EXPORT_SYMBOL_GPL(nfs_uuid_is_local);
-static void nfs_uuid_put_locked(nfs_uuid_t *nfs_uuid)
+void nfs_localio_enable_client(struct nfs_client *clp)
+{
+ /* nfs_uuid_is_local() does the actual enablement */
+ trace_nfs_localio_enable_client(clp);
+}
+EXPORT_SYMBOL_GPL(nfs_localio_enable_client);
+
+/*
+ * Cleanup the nfs_uuid_t embedded in an nfs_client.
+ * This is the long-form of nfs_uuid_init().
+ */
+static bool nfs_uuid_put(nfs_uuid_t *nfs_uuid)
{
- if (nfs_uuid->net) {
- module_put(nfsd_mod);
- nfs_uuid->net = NULL;
+ LIST_HEAD(local_files);
+ struct nfs_file_localio *nfl, *tmp;
+
+ spin_lock(&nfs_uuid->lock);
+ if (unlikely(!rcu_access_pointer(nfs_uuid->net))) {
+ spin_unlock(&nfs_uuid->lock);
+ return false;
}
+ RCU_INIT_POINTER(nfs_uuid->net, NULL);
+
if (nfs_uuid->dom) {
auth_domain_put(nfs_uuid->dom);
nfs_uuid->dom = NULL;
}
- list_del_init(&nfs_uuid->list);
+
+ list_splice_init(&nfs_uuid->files, &local_files);
+ spin_unlock(&nfs_uuid->lock);
+
+ /* Walk list of files and ensure their last references dropped */
+ list_for_each_entry_safe(nfl, tmp, &local_files, list) {
+ nfs_close_local_fh(nfl);
+ cond_resched();
+ }
+
+ spin_lock(&nfs_uuid->lock);
+ BUG_ON(!list_empty(&nfs_uuid->files));
+
+ /* Remove client from nn->local_clients */
+ if (nfs_uuid->list_lock) {
+ spin_lock(nfs_uuid->list_lock);
+ BUG_ON(list_empty(&nfs_uuid->list));
+ list_del_init(&nfs_uuid->list);
+ spin_unlock(nfs_uuid->list_lock);
+ nfs_uuid->list_lock = NULL;
+ }
+
+ module_put(nfsd_mod);
+ spin_unlock(&nfs_uuid->lock);
+
+ return true;
}
-void nfs_uuid_invalidate_clients(struct list_head *list)
+void nfs_localio_disable_client(struct nfs_client *clp)
{
+ if (nfs_uuid_put(&clp->cl_uuid))
+ trace_nfs_localio_disable_client(clp);
+}
+EXPORT_SYMBOL_GPL(nfs_localio_disable_client);
+
+void nfs_localio_invalidate_clients(struct list_head *nn_local_clients,
+ spinlock_t *nn_local_clients_lock)
+{
+ LIST_HEAD(local_clients);
nfs_uuid_t *nfs_uuid, *tmp;
+ struct nfs_client *clp;
- spin_lock(&nfs_uuid_lock);
- list_for_each_entry_safe(nfs_uuid, tmp, list, list)
- nfs_uuid_put_locked(nfs_uuid);
- spin_unlock(&nfs_uuid_lock);
+ spin_lock(nn_local_clients_lock);
+ list_splice_init(nn_local_clients, &local_clients);
+ spin_unlock(nn_local_clients_lock);
+ list_for_each_entry_safe(nfs_uuid, tmp, &local_clients, list) {
+ if (WARN_ON(nfs_uuid->list_lock != nn_local_clients_lock))
+ break;
+ clp = container_of(nfs_uuid, struct nfs_client, cl_uuid);
+ nfs_localio_disable_client(clp);
+ }
}
-EXPORT_SYMBOL_GPL(nfs_uuid_invalidate_clients);
+EXPORT_SYMBOL_GPL(nfs_localio_invalidate_clients);
-void nfs_uuid_invalidate_one_client(nfs_uuid_t *nfs_uuid)
+static void nfs_uuid_add_file(nfs_uuid_t *nfs_uuid, struct nfs_file_localio *nfl)
{
- if (nfs_uuid->net) {
- spin_lock(&nfs_uuid_lock);
- nfs_uuid_put_locked(nfs_uuid);
- spin_unlock(&nfs_uuid_lock);
+ /* Add nfl to nfs_uuid->files if it isn't already */
+ spin_lock(&nfs_uuid->lock);
+ if (list_empty(&nfl->list)) {
+ rcu_assign_pointer(nfl->nfs_uuid, nfs_uuid);
+ list_add_tail(&nfl->list, &nfs_uuid->files);
}
+ spin_unlock(&nfs_uuid->lock);
}
-EXPORT_SYMBOL_GPL(nfs_uuid_invalidate_one_client);
+/*
+ * Caller is responsible for calling nfsd_net_put and
+ * nfsd_file_put (via nfs_to_nfsd_file_put_local).
+ */
struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *uuid,
struct rpc_clnt *rpc_clnt, const struct cred *cred,
- const struct nfs_fh *nfs_fh, const fmode_t fmode)
+ const struct nfs_fh *nfs_fh, struct nfs_file_localio *nfl,
+ const fmode_t fmode)
{
struct net *net;
struct nfsd_file *localio;
@@ -139,7 +246,7 @@ struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *uuid,
* Not running in nfsd context, so must safely get reference on nfsd_serv.
* But the server may already be shutting down, if so disallow new localio.
* uuid->net is NOT a counted reference, but rcu_read_lock() ensures that
- * if uuid->net is not NULL, then calling nfsd_serv_try_get() is safe
+ * if uuid->net is not NULL, then calling nfsd_net_try_get() is safe
* and if it succeeds we will have an implied reference to the net.
*
* Otherwise NFS may not have ref on NFSD and therefore cannot safely
@@ -147,21 +254,62 @@ struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *uuid,
*/
rcu_read_lock();
net = rcu_dereference(uuid->net);
- if (!net || !nfs_to->nfsd_serv_try_get(net)) {
+ if (!net || !nfs_to->nfsd_net_try_get(net)) {
rcu_read_unlock();
return ERR_PTR(-ENXIO);
}
rcu_read_unlock();
- /* We have an implied reference to net thanks to nfsd_serv_try_get */
+ /* We have an implied reference to net thanks to nfsd_net_try_get */
localio = nfs_to->nfsd_open_local_fh(net, uuid->dom, rpc_clnt,
cred, nfs_fh, fmode);
if (IS_ERR(localio))
nfs_to_nfsd_net_put(net);
+ else
+ nfs_uuid_add_file(uuid, nfl);
return localio;
}
EXPORT_SYMBOL_GPL(nfs_open_local_fh);
+void nfs_close_local_fh(struct nfs_file_localio *nfl)
+{
+ struct nfsd_file *ro_nf = NULL;
+ struct nfsd_file *rw_nf = NULL;
+ nfs_uuid_t *nfs_uuid;
+
+ rcu_read_lock();
+ nfs_uuid = rcu_dereference(nfl->nfs_uuid);
+ if (!nfs_uuid) {
+ /* regular (non-LOCALIO) NFS will hammer this */
+ rcu_read_unlock();
+ return;
+ }
+
+ ro_nf = rcu_access_pointer(nfl->ro_file);
+ rw_nf = rcu_access_pointer(nfl->rw_file);
+ if (ro_nf || rw_nf) {
+ spin_lock(&nfs_uuid->lock);
+ if (ro_nf)
+ ro_nf = rcu_dereference_protected(xchg(&nfl->ro_file, NULL), 1);
+ if (rw_nf)
+ rw_nf = rcu_dereference_protected(xchg(&nfl->rw_file, NULL), 1);
+
+ /* Remove nfl from nfs_uuid->files list */
+ RCU_INIT_POINTER(nfl->nfs_uuid, NULL);
+ list_del_init(&nfl->list);
+ spin_unlock(&nfs_uuid->lock);
+ rcu_read_unlock();
+
+ if (ro_nf)
+ nfs_to_nfsd_file_put_local(ro_nf);
+ if (rw_nf)
+ nfs_to_nfsd_file_put_local(rw_nf);
+ return;
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(nfs_close_local_fh);
+
/*
* The NFS LOCALIO code needs to call into NFSD using various symbols,
* but cannot be statically linked, because that will make the NFS
diff --git a/fs/nfsd/Makefile b/fs/nfsd/Makefile
index 18cbd3fa7691..2f687619f65b 100644
--- a/fs/nfsd/Makefile
+++ b/fs/nfsd/Makefile
@@ -18,9 +18,23 @@ nfsd-$(CONFIG_NFSD_V2) += nfsproc.o nfsxdr.o
nfsd-$(CONFIG_NFSD_V2_ACL) += nfs2acl.o
nfsd-$(CONFIG_NFSD_V3_ACL) += nfs3acl.o
nfsd-$(CONFIG_NFSD_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4idmap.o \
- nfs4acl.o nfs4callback.o nfs4recover.o
+ nfs4acl.o nfs4callback.o nfs4recover.o nfs4xdr_gen.o
nfsd-$(CONFIG_NFSD_PNFS) += nfs4layouts.o
nfsd-$(CONFIG_NFSD_BLOCKLAYOUT) += blocklayout.o blocklayoutxdr.o
nfsd-$(CONFIG_NFSD_SCSILAYOUT) += blocklayout.o blocklayoutxdr.o
nfsd-$(CONFIG_NFSD_FLEXFILELAYOUT) += flexfilelayout.o flexfilelayoutxdr.o
nfsd-$(CONFIG_NFS_LOCALIO) += localio.o
+
+
+.PHONY: xdrgen
+
+xdrgen: ../../include/linux/sunrpc/xdrgen/nfs4_1.h nfs4xdr_gen.h nfs4xdr_gen.c
+
+../../include/linux/sunrpc/xdrgen/nfs4_1.h: ../../Documentation/sunrpc/xdr/nfs4_1.x
+ ../../tools/net/sunrpc/xdrgen/xdrgen definitions $< > $@
+
+nfs4xdr_gen.h: ../../Documentation/sunrpc/xdr/nfs4_1.x
+ ../../tools/net/sunrpc/xdrgen/xdrgen declarations $< > $@
+
+nfs4xdr_gen.c: ../../Documentation/sunrpc/xdr/nfs4_1.x
+ ../../tools/net/sunrpc/xdrgen/xdrgen source $< > $@
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index aa4712362b3b..0363720280d4 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -355,16 +355,25 @@ static void export_stats_destroy(struct export_stats *stats)
EXP_STATS_COUNTERS_NUM);
}
-static void svc_export_put(struct kref *ref)
+static void svc_export_release(struct rcu_head *rcu_head)
{
- struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
- path_put(&exp->ex_path);
- auth_domain_put(exp->ex_client);
+ struct svc_export *exp = container_of(rcu_head, struct svc_export,
+ ex_rcu);
+
nfsd4_fslocs_free(&exp->ex_fslocs);
export_stats_destroy(exp->ex_stats);
kfree(exp->ex_stats);
kfree(exp->ex_uuid);
- kfree_rcu(exp, ex_rcu);
+ kfree(exp);
+}
+
+static void svc_export_put(struct kref *ref)
+{
+ struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
+
+ path_put(&exp->ex_path);
+ auth_domain_put(exp->ex_client);
+ call_rcu(&exp->ex_rcu, svc_export_release);
}
static int svc_export_upcall(struct cache_detail *cd, struct cache_head *h)
@@ -1425,13 +1434,9 @@ static int e_show(struct seq_file *m, void *p)
return 0;
}
- if (!cache_get_rcu(&exp->h))
- return 0;
-
- if (cache_check(cd, &exp->h, NULL))
+ if (cache_check_rcu(cd, &exp->h, NULL))
return 0;
- exp_put(exp);
return svc_export_show(m, cd, cp);
}
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index dc5c9d8e8202..0e552d873eaa 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -39,6 +39,7 @@
#include <linux/fsnotify.h>
#include <linux/seq_file.h>
#include <linux/rhashtable.h>
+#include <linux/nfslocalio.h>
#include "vfs.h"
#include "nfsd.h"
@@ -391,7 +392,7 @@ nfsd_file_put(struct nfsd_file *nf)
}
/**
- * nfsd_file_put_local - put nfsd_file reference and arm nfsd_serv_put in caller
+ * nfsd_file_put_local - put nfsd_file reference and arm nfsd_net_put in caller
* @nf: nfsd_file of which to put the reference
*
* First save the associated net to return to caller, then put
@@ -833,6 +834,14 @@ __nfsd_file_cache_purge(struct net *net)
struct nfsd_file *nf;
LIST_HEAD(dispose);
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ if (net) {
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ nfs_localio_invalidate_clients(&nn->local_clients,
+ &nn->local_clients_lock);
+ }
+#endif
+
rhltable_walk_enter(&nfsd_file_rhltable, &iter);
do {
rhashtable_walk_start(&iter);
@@ -1222,10 +1231,9 @@ nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
* a file. The security implications of this should be carefully
* considered before use.
*
- * The nfsd_file object returned by this API is reference-counted
- * and garbage-collected. The object is retained for a few
- * seconds after the final nfsd_file_put() in case the caller
- * wants to re-use it.
+ * The nfsd_file_object returned by this API is reference-counted
+ * but not garbage-collected. The object is unhashed after the
+ * final nfsd_file_put().
*
* Return values:
* %nfs_ok - @pnf points to an nfsd_file with its reference
@@ -1247,7 +1255,7 @@ nfsd_file_acquire_local(struct net *net, struct svc_cred *cred,
__be32 beres;
beres = nfsd_file_do_acquire(NULL, net, cred, client,
- fhp, may_flags, NULL, pnf, true);
+ fhp, may_flags, NULL, pnf, false);
put_cred(revert_creds(save_cred));
return beres;
}
diff --git a/fs/nfsd/localio.c b/fs/nfsd/localio.c
index f441cb9f74d5..238647fa379e 100644
--- a/fs/nfsd/localio.c
+++ b/fs/nfsd/localio.c
@@ -25,10 +25,12 @@
#include "cache.h"
static const struct nfsd_localio_operations nfsd_localio_ops = {
- .nfsd_serv_try_get = nfsd_serv_try_get,
- .nfsd_serv_put = nfsd_serv_put,
+ .nfsd_net_try_get = nfsd_net_try_get,
+ .nfsd_net_put = nfsd_net_put,
.nfsd_open_local_fh = nfsd_open_local_fh,
.nfsd_file_put_local = nfsd_file_put_local,
+ .nfsd_file_get = nfsd_file_get,
+ .nfsd_file_put = nfsd_file_put,
.nfsd_file_file = nfsd_file_file,
};
@@ -52,7 +54,7 @@ void nfsd_localio_ops_init(void)
* avoid all the NFS overhead with reads, writes and commits.
*
* On successful return, returned nfsd_file will have its nf_net member
- * set. Caller (NFS client) is responsible for calling nfsd_serv_put and
+ * set. Caller (NFS client) is responsible for calling nfsd_net_put and
* nfsd_file_put (via nfs_to_nfsd_file_put_local).
*/
struct nfsd_file *
@@ -114,6 +116,7 @@ static __be32 localio_proc_uuid_is_local(struct svc_rqst *rqstp)
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
nfs_uuid_is_local(&argp->uuid, &nn->local_clients,
+ &nn->local_clients_lock,
net, rqstp->rq_client, THIS_MODULE);
return rpc_success;
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 26f7b34d1a03..3e2d0fde80a7 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -128,21 +128,16 @@ struct nfsd_net {
seqlock_t writeverf_lock;
unsigned char writeverf[8];
- /*
- * Max number of connections this nfsd container will allow. Defaults
- * to '0' which is means that it bases this on the number of threads.
- */
- unsigned int max_connections;
-
u32 clientid_base;
u32 clientid_counter;
u32 clverifier_counter;
struct svc_info nfsd_info;
#define nfsd_serv nfsd_info.serv
- struct percpu_ref nfsd_serv_ref;
- struct completion nfsd_serv_confirm_done;
- struct completion nfsd_serv_free_done;
+
+ struct percpu_ref nfsd_net_ref;
+ struct completion nfsd_net_confirm_done;
+ struct completion nfsd_net_free_done;
/*
* clientid and stateid data for construction of net unique COPY
@@ -219,6 +214,7 @@ struct nfsd_net {
#if IS_ENABLED(CONFIG_NFS_LOCALIO)
/* Local clients to be invalidated when net is shut down */
+ spinlock_t local_clients_lock;
struct list_head local_clients;
#endif
};
@@ -229,8 +225,8 @@ struct nfsd_net {
extern bool nfsd_support_version(int vers);
extern unsigned int nfsd_net_id;
-bool nfsd_serv_try_get(struct net *net);
-void nfsd_serv_put(struct net *net);
+bool nfsd_net_try_get(struct net *net);
+void nfsd_net_put(struct net *net);
void nfsd_copy_write_verifier(__be32 verf[2], struct nfsd_net *nn);
void nfsd_reset_write_verifier(struct nfsd_net *nn);
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index c083e539e898..50e468bdb8d4 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -42,6 +42,7 @@
#include "trace.h"
#include "xdr4cb.h"
#include "xdr4.h"
+#include "nfs4xdr_gen.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
@@ -93,12 +94,35 @@ static int decode_cb_fattr4(struct xdr_stream *xdr, uint32_t *bitmap,
{
fattr->ncf_cb_change = 0;
fattr->ncf_cb_fsize = 0;
+ fattr->ncf_cb_atime.tv_sec = 0;
+ fattr->ncf_cb_atime.tv_nsec = 0;
+ fattr->ncf_cb_mtime.tv_sec = 0;
+ fattr->ncf_cb_mtime.tv_nsec = 0;
+
if (bitmap[0] & FATTR4_WORD0_CHANGE)
if (xdr_stream_decode_u64(xdr, &fattr->ncf_cb_change) < 0)
return -NFSERR_BAD_XDR;
if (bitmap[0] & FATTR4_WORD0_SIZE)
if (xdr_stream_decode_u64(xdr, &fattr->ncf_cb_fsize) < 0)
return -NFSERR_BAD_XDR;
+ if (bitmap[2] & FATTR4_WORD2_TIME_DELEG_ACCESS) {
+ fattr4_time_deleg_access access;
+
+ if (!xdrgen_decode_fattr4_time_deleg_access(xdr, &access))
+ return -NFSERR_BAD_XDR;
+ fattr->ncf_cb_atime.tv_sec = access.seconds;
+ fattr->ncf_cb_atime.tv_nsec = access.nseconds;
+
+ }
+ if (bitmap[2] & FATTR4_WORD2_TIME_DELEG_MODIFY) {
+ fattr4_time_deleg_modify modify;
+
+ if (!xdrgen_decode_fattr4_time_deleg_modify(xdr, &modify))
+ return -NFSERR_BAD_XDR;
+ fattr->ncf_cb_mtime.tv_sec = modify.seconds;
+ fattr->ncf_cb_mtime.tv_nsec = modify.nseconds;
+
+ }
return 0;
}
@@ -361,16 +385,24 @@ static void
encode_cb_getattr4args(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr,
struct nfs4_cb_fattr *fattr)
{
- struct nfs4_delegation *dp =
- container_of(fattr, struct nfs4_delegation, dl_cb_fattr);
+ struct nfs4_delegation *dp = container_of(fattr, struct nfs4_delegation, dl_cb_fattr);
struct knfsd_fh *fh = &dp->dl_stid.sc_file->fi_fhandle;
- u32 bmap[1];
-
- bmap[0] = FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE;
-
+ struct nfs4_cb_fattr *ncf = &dp->dl_cb_fattr;
+ u32 bmap_size = 1;
+ u32 bmap[3];
+
+ bmap[0] = FATTR4_WORD0_SIZE;
+ if (!ncf->ncf_file_modified)
+ bmap[0] |= FATTR4_WORD0_CHANGE;
+
+ if (deleg_attrs_deleg(dp->dl_type)) {
+ bmap[1] = 0;
+ bmap[2] = FATTR4_WORD2_TIME_DELEG_ACCESS | FATTR4_WORD2_TIME_DELEG_MODIFY;
+ bmap_size = 3;
+ }
encode_nfs_cb_opnum4(xdr, OP_CB_GETATTR);
encode_nfs_fh4(xdr, fh);
- encode_bitmap4(xdr, bmap, ARRAY_SIZE(bmap));
+ encode_bitmap4(xdr, bmap, bmap_size);
hdr->nops++;
}
@@ -634,7 +666,7 @@ static int nfs4_xdr_dec_cb_getattr(struct rpc_rqst *rqstp,
struct nfs4_cb_compound_hdr hdr;
int status;
u32 bitmap[3] = {0};
- u32 attrlen;
+ u32 attrlen, maxlen;
struct nfs4_cb_fattr *ncf =
container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
@@ -647,13 +679,17 @@ static int nfs4_xdr_dec_cb_getattr(struct rpc_rqst *rqstp,
return status;
status = decode_cb_op_status(xdr, OP_CB_GETATTR, &cb->cb_status);
- if (status)
+ if (unlikely(status || cb->cb_seq_status))
return status;
if (xdr_stream_decode_uint32_array(xdr, bitmap, 3) < 0)
return -NFSERR_BAD_XDR;
if (xdr_stream_decode_u32(xdr, &attrlen) < 0)
return -NFSERR_BAD_XDR;
- if (attrlen > (sizeof(ncf->ncf_cb_change) + sizeof(ncf->ncf_cb_fsize)))
+ maxlen = sizeof(ncf->ncf_cb_change) + sizeof(ncf->ncf_cb_fsize);
+ if (bitmap[2] != 0)
+ maxlen += (sizeof(ncf->ncf_cb_mtime.tv_sec) +
+ sizeof(ncf->ncf_cb_mtime.tv_nsec)) * 2;
+ if (attrlen > maxlen)
return -NFSERR_BAD_XDR;
status = decode_cb_fattr4(xdr, bitmap, ncf);
return status;
@@ -1036,8 +1072,7 @@ static void nfsd41_cb_inflight_begin(struct nfs4_client *clp)
static void nfsd41_cb_inflight_end(struct nfs4_client *clp)
{
- if (atomic_dec_and_test(&clp->cl_cb_inflight))
- wake_up_var(&clp->cl_cb_inflight);
+ atomic_dec_and_wake_up(&clp->cl_cb_inflight);
}
static void nfsd41_cb_inflight_wait_complete(struct nfs4_client *clp)
@@ -1397,8 +1432,9 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
return;
if (cb->cb_status) {
- WARN_ONCE(task->tk_status, "cb_status=%d tk_status=%d",
- cb->cb_status, task->tk_status);
+ WARN_ONCE(task->tk_status,
+ "cb_status=%d tk_status=%d cb_opcode=%d",
+ cb->cb_status, task->tk_status, cb->cb_ops->opcode);
task->tk_status = cb->cb_status;
}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index ad44ad49274f..f6e06c779d09 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1135,18 +1135,43 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
.na_iattr = &setattr->sa_iattr,
.na_seclabel = &setattr->sa_label,
};
+ bool save_no_wcc, deleg_attrs;
+ struct nfs4_stid *st = NULL;
struct inode *inode;
__be32 status = nfs_ok;
- bool save_no_wcc;
int err;
- if (setattr->sa_iattr.ia_valid & ATTR_SIZE) {
+ deleg_attrs = setattr->sa_bmval[2] & (FATTR4_WORD2_TIME_DELEG_ACCESS |
+ FATTR4_WORD2_TIME_DELEG_MODIFY);
+
+ if (deleg_attrs || (setattr->sa_iattr.ia_valid & ATTR_SIZE)) {
+ int flags = WR_STATE;
+
+ if (setattr->sa_bmval[2] & FATTR4_WORD2_TIME_DELEG_ACCESS)
+ flags |= RD_STATE;
+
status = nfs4_preprocess_stateid_op(rqstp, cstate,
&cstate->current_fh, &setattr->sa_stateid,
- WR_STATE, NULL, NULL);
+ flags, NULL, &st);
if (status)
return status;
}
+
+ if (deleg_attrs) {
+ status = nfserr_bad_stateid;
+ if (st->sc_type & SC_TYPE_DELEG) {
+ struct nfs4_delegation *dp = delegstateid(st);
+
+ /* Only for *_ATTRS_DELEG flavors */
+ if (deleg_attrs_deleg(dp->dl_type))
+ status = nfs_ok;
+ }
+ }
+ if (st)
+ nfs4_put_stid(st);
+ if (status)
+ return status;
+
err = fh_want_write(&cstate->current_fh);
if (err)
return nfserrno(err);
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 7f2ceeb118a4..28f4d5311c40 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -2051,7 +2051,6 @@ static inline int check_for_legacy_methods(int status, struct net *net)
path_put(&path);
if (status)
return -ENOTDIR;
- status = nn->client_tracking_ops->init(net);
}
return status;
}
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 741b9449f727..b7a0cfd05401 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1909,17 +1909,86 @@ gen_sessionid(struct nfsd4_session *ses)
*/
#define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
+static struct shrinker *nfsd_slot_shrinker;
+static DEFINE_SPINLOCK(nfsd_session_list_lock);
+static LIST_HEAD(nfsd_session_list);
+/* The sum of "target_slots-1" on every session. The shrinker can push this
+ * down, though it can take a little while for the memory to actually
+ * be freed. The "-1" is because we can never free slot 0 while the
+ * session is active.
+ */
+static atomic_t nfsd_total_target_slots = ATOMIC_INIT(0);
+
static void
-free_session_slots(struct nfsd4_session *ses)
+free_session_slots(struct nfsd4_session *ses, int from)
{
int i;
- for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
- free_svc_cred(&ses->se_slots[i]->sl_cred);
- kfree(ses->se_slots[i]);
+ if (from >= ses->se_fchannel.maxreqs)
+ return;
+
+ for (i = from; i < ses->se_fchannel.maxreqs; i++) {
+ struct nfsd4_slot *slot = xa_load(&ses->se_slots, i);
+
+ /*
+ * Save the seqid in case we reactivate this slot.
+ * This will never require a memory allocation so GFP
+ * flag is irrelevant
+ */
+ xa_store(&ses->se_slots, i, xa_mk_value(slot->sl_seqid), 0);
+ free_svc_cred(&slot->sl_cred);
+ kfree(slot);
+ }
+ ses->se_fchannel.maxreqs = from;
+ if (ses->se_target_maxslots > from) {
+ int new_target = from ?: 1;
+ atomic_sub(ses->se_target_maxslots - new_target, &nfsd_total_target_slots);
+ ses->se_target_maxslots = new_target;
}
}
+/**
+ * reduce_session_slots - reduce the target max-slots of a session if possible
+ * @ses: The session to affect
+ * @dec: how much to decrease the target by
+ *
+ * This interface can be used by a shrinker to reduce the target max-slots
+ * for a session so that some slots can eventually be freed.
+ * It uses spin_trylock() as it may be called in a context where another
+ * spinlock is held that has a dependency on client_lock. As shrinkers are
+ * best-effort, skiping a session is client_lock is already held has no
+ * great coast
+ *
+ * Return value:
+ * The number of slots that the target was reduced by.
+ */
+static int
+reduce_session_slots(struct nfsd4_session *ses, int dec)
+{
+ struct nfsd_net *nn = net_generic(ses->se_client->net,
+ nfsd_net_id);
+ int ret = 0;
+
+ if (ses->se_target_maxslots <= 1)
+ return ret;
+ if (!spin_trylock(&nn->client_lock))
+ return ret;
+ ret = min(dec, ses->se_target_maxslots-1);
+ ses->se_target_maxslots -= ret;
+ atomic_sub(ret, &nfsd_total_target_slots);
+ ses->se_slot_gen += 1;
+ if (ses->se_slot_gen == 0) {
+ int i;
+ ses->se_slot_gen = 1;
+ for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
+ struct nfsd4_slot *slot = xa_load(&ses->se_slots, i);
+ slot->sl_generation = 0;
+ }
+ }
+ spin_unlock(&nn->client_lock);
+ return ret;
+}
+
/*
* We don't actually need to cache the rpc and session headers, so we
* can allocate a little less for each slot:
@@ -1935,89 +2004,46 @@ static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
return size + sizeof(struct nfsd4_slot);
}
-/*
- * XXX: If we run out of reserved DRC memory we could (up to a point)
- * re-negotiate active sessions and reduce their slot usage to make
- * room for new connections. For now we just fail the create session.
- */
-static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
-{
- u32 slotsize = slot_bytes(ca);
- u32 num = ca->maxreqs;
- unsigned long avail, total_avail;
- unsigned int scale_factor;
-
- spin_lock(&nfsd_drc_lock);
- if (nfsd_drc_max_mem > nfsd_drc_mem_used)
- total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
- else
- /* We have handed out more space than we chose in
- * set_max_drc() to allow. That isn't really a
- * problem as long as that doesn't make us think we
- * have lots more due to integer overflow.
- */
- total_avail = 0;
- avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
- /*
- * Never use more than a fraction of the remaining memory,
- * unless it's the only way to give this client a slot.
- * The chosen fraction is either 1/8 or 1/number of threads,
- * whichever is smaller. This ensures there are adequate
- * slots to support multiple clients per thread.
- * Give the client one slot even if that would require
- * over-allocation--it is better than failure.
- */
- scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads);
-
- avail = clamp_t(unsigned long, avail, slotsize,
- total_avail/scale_factor);
- num = min_t(int, num, avail / slotsize);
- num = max_t(int, num, 1);
- nfsd_drc_mem_used += num * slotsize;
- spin_unlock(&nfsd_drc_lock);
-
- return num;
-}
-
-static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
-{
- int slotsize = slot_bytes(ca);
-
- spin_lock(&nfsd_drc_lock);
- nfsd_drc_mem_used -= slotsize * ca->maxreqs;
- spin_unlock(&nfsd_drc_lock);
-}
-
static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
struct nfsd4_channel_attrs *battrs)
{
int numslots = fattrs->maxreqs;
int slotsize = slot_bytes(fattrs);
struct nfsd4_session *new;
+ struct nfsd4_slot *slot;
int i;
- BUILD_BUG_ON(struct_size(new, se_slots, NFSD_MAX_SLOTS_PER_SESSION)
- > PAGE_SIZE);
-
- new = kzalloc(struct_size(new, se_slots, numslots), GFP_KERNEL);
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return NULL;
+ xa_init(&new->se_slots);
/* allocate each struct nfsd4_slot and data cache in one piece */
- for (i = 0; i < numslots; i++) {
- new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
- if (!new->se_slots[i])
- goto out_free;
- }
+ slot = kzalloc(slotsize, GFP_KERNEL);
+ if (!slot || xa_is_err(xa_store(&new->se_slots, 0, slot, GFP_KERNEL)))
+ goto out_free;
+ for (i = 1; i < numslots; i++) {
+ const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
+ slot = kzalloc(slotsize, gfp);
+ if (!slot)
+ break;
+ if (xa_is_err(xa_store(&new->se_slots, i, slot, gfp))) {
+ kfree(slot);
+ break;
+ }
+ }
+ fattrs->maxreqs = i;
memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
+ new->se_target_maxslots = i;
+ atomic_add(i - 1, &nfsd_total_target_slots);
new->se_cb_slot_avail = ~0U;
new->se_cb_highest_slot = min(battrs->maxreqs - 1,
NFSD_BC_SLOT_TABLE_SIZE - 1);
spin_lock_init(&new->se_lock);
return new;
out_free:
- while (i--)
- kfree(new->se_slots[i]);
+ kfree(slot);
+ xa_destroy(&new->se_slots);
kfree(new);
return NULL;
}
@@ -2123,17 +2149,47 @@ static void nfsd4_del_conns(struct nfsd4_session *s)
static void __free_session(struct nfsd4_session *ses)
{
- free_session_slots(ses);
+ free_session_slots(ses, 0);
+ xa_destroy(&ses->se_slots);
kfree(ses);
}
static void free_session(struct nfsd4_session *ses)
{
nfsd4_del_conns(ses);
- nfsd4_put_drc_mem(&ses->se_fchannel);
__free_session(ses);
}
+static unsigned long
+nfsd_slot_count(struct shrinker *s, struct shrink_control *sc)
+{
+ unsigned long cnt = atomic_read(&nfsd_total_target_slots);
+
+ return cnt ? cnt : SHRINK_EMPTY;
+}
+
+static unsigned long
+nfsd_slot_scan(struct shrinker *s, struct shrink_control *sc)
+{
+ struct nfsd4_session *ses;
+ unsigned long scanned = 0;
+ unsigned long freed = 0;
+
+ spin_lock(&nfsd_session_list_lock);
+ list_for_each_entry(ses, &nfsd_session_list, se_all_sessions) {
+ freed += reduce_session_slots(ses, 1);
+ scanned += 1;
+ if (scanned >= sc->nr_to_scan) {
+ /* Move starting point for next scan */
+ list_move(&nfsd_session_list, &ses->se_all_sessions);
+ break;
+ }
+ }
+ spin_unlock(&nfsd_session_list_lock);
+ sc->nr_scanned = scanned;
+ return freed;
+}
+
static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
{
int idx;
@@ -2158,6 +2214,10 @@ static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, stru
list_add(&new->se_perclnt, &clp->cl_sessions);
spin_unlock(&clp->cl_lock);
+ spin_lock(&nfsd_session_list_lock);
+ list_add_tail(&new->se_all_sessions, &nfsd_session_list);
+ spin_unlock(&nfsd_session_list_lock);
+
{
struct sockaddr *sa = svc_addr(rqstp);
/*
@@ -2227,6 +2287,9 @@ unhash_session(struct nfsd4_session *ses)
spin_lock(&ses->se_client->cl_lock);
list_del(&ses->se_perclnt);
spin_unlock(&ses->se_client->cl_lock);
+ spin_lock(&nfsd_session_list_lock);
+ list_del(&ses->se_all_sessions);
+ spin_unlock(&nfsd_session_list_lock);
}
/* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
@@ -2362,8 +2425,12 @@ unhash_client_locked(struct nfs4_client *clp)
}
list_del_init(&clp->cl_lru);
spin_lock(&clp->cl_lock);
- list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
+ spin_lock(&nfsd_session_list_lock);
+ list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) {
list_del_init(&ses->se_hash);
+ list_del_init(&ses->se_all_sessions);
+ }
+ spin_unlock(&nfsd_session_list_lock);
spin_unlock(&clp->cl_lock);
}
@@ -2685,6 +2752,7 @@ static const char *cb_state2str(int state)
static int client_info_show(struct seq_file *m, void *v)
{
struct inode *inode = file_inode(m->file);
+ struct nfsd4_session *ses;
struct nfs4_client *clp;
u64 clid;
@@ -2721,6 +2789,16 @@ static int client_info_show(struct seq_file *m, void *v)
seq_printf(m, "callback address: \"%pISpc\"\n", &clp->cl_cb_conn.cb_addr);
seq_printf(m, "admin-revoked states: %d\n",
atomic_read(&clp->cl_admin_revoked));
+ spin_lock(&clp->cl_lock);
+ seq_printf(m, "session slots:");
+ list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
+ seq_printf(m, " %u", ses->se_fchannel.maxreqs);
+ seq_printf(m, "\nsession target slots:");
+ list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
+ seq_printf(m, " %u", ses->se_target_maxslots);
+ spin_unlock(&clp->cl_lock);
+ seq_puts(m, "\n");
+
drop_client(clp);
return 0;
@@ -2873,6 +2951,21 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
return 0;
}
+static char *nfs4_show_deleg_type(u32 dl_type)
+{
+ switch (dl_type) {
+ case OPEN_DELEGATE_READ:
+ return "r";
+ case OPEN_DELEGATE_WRITE:
+ return "w";
+ case OPEN_DELEGATE_READ_ATTRS_DELEG:
+ return "ra";
+ case OPEN_DELEGATE_WRITE_ATTRS_DELEG:
+ return "wa";
+ }
+ return "?";
+}
+
static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
{
struct nfs4_delegation *ds;
@@ -2886,8 +2979,7 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
nfs4_show_stateid(s, &st->sc_stateid);
seq_puts(s, ": { type: deleg, ");
- seq_printf(s, "access: %s",
- ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w");
+ seq_printf(s, "access: %s", nfs4_show_deleg_type(ds->dl_type));
/* XXX: lease time, whether it's being recalled. */
@@ -3708,10 +3800,10 @@ nfsd4_exchange_id_release(union nfsd4_op_u *u)
kfree(exid->server_impl_name);
}
-static __be32 check_slot_seqid(u32 seqid, u32 slot_seqid, bool slot_inuse)
+static __be32 check_slot_seqid(u32 seqid, u32 slot_seqid, u8 flags)
{
/* The slot is in use, and no response has been sent. */
- if (slot_inuse) {
+ if (flags & NFSD4_SLOT_INUSE) {
if (seqid == slot_seqid)
return nfserr_jukebox;
else
@@ -3720,6 +3812,8 @@ static __be32 check_slot_seqid(u32 seqid, u32 slot_seqid, bool slot_inuse)
/* Note unsigned 32-bit arithmetic handles wraparound: */
if (likely(seqid == slot_seqid + 1))
return nfs_ok;
+ if ((flags & NFSD4_SLOT_REUSED) && seqid == 1)
+ return nfs_ok;
if (seqid == slot_seqid)
return nfserr_replay_cache;
return nfserr_seq_misordered;
@@ -3778,17 +3872,6 @@ static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfs
ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
- /*
- * Note decreasing slot size below client's request may make it
- * difficult for client to function correctly, whereas
- * decreasing the number of slots will (just?) affect
- * performance. When short on memory we therefore prefer to
- * decrease number of slots instead of their size. Clients that
- * request larger slots than they need will get poor results:
- * Note that we always allow at least one slot, because our
- * accounting is soft and provides no guarantees either way.
- */
- ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
return nfs_ok;
}
@@ -3866,11 +3949,11 @@ nfsd4_create_session(struct svc_rqst *rqstp,
return status;
status = check_backchannel_attrs(&cr_ses->back_channel);
if (status)
- goto out_release_drc_mem;
+ goto out_err;
status = nfserr_jukebox;
new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
if (!new)
- goto out_release_drc_mem;
+ goto out_err;
conn = alloc_conn_from_crses(rqstp, cr_ses);
if (!conn)
goto out_free_session;
@@ -3979,8 +4062,7 @@ out_free_conn:
free_conn(conn);
out_free_session:
__free_session(new);
-out_release_drc_mem:
- nfsd4_put_drc_mem(&cr_ses->fore_channel);
+out_err:
return status;
}
@@ -4278,17 +4360,11 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (seq->slotid >= session->se_fchannel.maxreqs)
goto out_put_session;
- slot = session->se_slots[seq->slotid];
+ slot = xa_load(&session->se_slots, seq->slotid);
dprintk("%s: slotid %d\n", __func__, seq->slotid);
- /* We do not negotiate the number of slots yet, so set the
- * maxslots to the session maxreqs which is used to encode
- * sr_highest_slotid and the sr_target_slot id to maxslots */
- seq->maxslots = session->se_fchannel.maxreqs;
-
trace_nfsd_slot_seqid_sequence(clp, seq, slot);
- status = check_slot_seqid(seq->seqid, slot->sl_seqid,
- slot->sl_flags & NFSD4_SLOT_INUSE);
+ status = check_slot_seqid(seq->seqid, slot->sl_seqid, slot->sl_flags);
if (status == nfserr_replay_cache) {
status = nfserr_seq_misordered;
if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
@@ -4313,6 +4389,12 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status)
goto out_put_session;
+ if (session->se_target_maxslots < session->se_fchannel.maxreqs &&
+ slot->sl_generation == session->se_slot_gen &&
+ seq->maxslots <= session->se_target_maxslots)
+ /* Client acknowledged our reduce maxreqs */
+ free_session_slots(session, session->se_target_maxslots);
+
buflen = (seq->cachethis) ?
session->se_fchannel.maxresp_cached :
session->se_fchannel.maxresp_sz;
@@ -4323,9 +4405,11 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
svc_reserve(rqstp, buflen);
status = nfs_ok;
- /* Success! bump slot seqid */
+ /* Success! accept new slot seqid */
slot->sl_seqid = seq->seqid;
+ slot->sl_flags &= ~NFSD4_SLOT_REUSED;
slot->sl_flags |= NFSD4_SLOT_INUSE;
+ slot->sl_generation = session->se_slot_gen;
if (seq->cachethis)
slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
else
@@ -4335,6 +4419,49 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
cstate->session = session;
cstate->clp = clp;
+ /*
+ * If the client ever uses the highest available slot,
+ * gently try to allocate another 20%. This allows
+ * fairly quick growth without grossly over-shooting what
+ * the client might use.
+ */
+ if (seq->slotid == session->se_fchannel.maxreqs - 1 &&
+ session->se_target_maxslots >= session->se_fchannel.maxreqs &&
+ session->se_fchannel.maxreqs < NFSD_MAX_SLOTS_PER_SESSION) {
+ int s = session->se_fchannel.maxreqs;
+ int cnt = DIV_ROUND_UP(s, 5);
+ void *prev_slot;
+
+ do {
+ /*
+ * GFP_NOWAIT both allows allocation under a
+ * spinlock, and only succeeds if there is
+ * plenty of memory.
+ */
+ slot = kzalloc(slot_bytes(&session->se_fchannel),
+ GFP_NOWAIT);
+ prev_slot = xa_load(&session->se_slots, s);
+ if (xa_is_value(prev_slot) && slot) {
+ slot->sl_seqid = xa_to_value(prev_slot);
+ slot->sl_flags |= NFSD4_SLOT_REUSED;
+ }
+ if (slot &&
+ !xa_is_err(xa_store(&session->se_slots, s, slot,
+ GFP_NOWAIT))) {
+ s += 1;
+ session->se_fchannel.maxreqs = s;
+ atomic_add(s - session->se_target_maxslots,
+ &nfsd_total_target_slots);
+ session->se_target_maxslots = s;
+ } else {
+ kfree(slot);
+ slot = NULL;
+ }
+ } while (slot && --cnt > 0);
+ }
+ seq->maxslots = max(session->se_target_maxslots, seq->maxslots);
+ seq->target_maxslots = session->se_target_maxslots;
+
out:
switch (clp->cl_cb_state) {
case NFSD4_CB_DOWN:
@@ -4739,7 +4866,7 @@ static void init_nfs4_replay(struct nfs4_replay *rp)
rp->rp_status = nfserr_serverfault;
rp->rp_buflen = 0;
rp->rp_buf = rp->rp_ibuf;
- atomic_set(&rp->rp_locked, RP_UNLOCKED);
+ rp->rp_locked = RP_UNLOCKED;
}
static int nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
@@ -4747,9 +4874,9 @@ static int nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
{
if (!nfsd4_has_session(cstate)) {
wait_var_event(&so->so_replay.rp_locked,
- atomic_cmpxchg(&so->so_replay.rp_locked,
- RP_UNLOCKED, RP_LOCKED) != RP_LOCKED);
- if (atomic_read(&so->so_replay.rp_locked) == RP_UNHASHED)
+ cmpxchg(&so->so_replay.rp_locked,
+ RP_UNLOCKED, RP_LOCKED) != RP_LOCKED);
+ if (so->so_replay.rp_locked == RP_UNHASHED)
return -EAGAIN;
cstate->replay_owner = nfs4_get_stateowner(so);
}
@@ -4762,9 +4889,7 @@ void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
if (so != NULL) {
cstate->replay_owner = NULL;
- atomic_set(&so->so_replay.rp_locked, RP_UNLOCKED);
- smp_mb__after_atomic();
- wake_up_var(&so->so_replay.rp_locked);
+ store_release_wake_up(&so->so_replay.rp_locked, RP_UNLOCKED);
nfs4_put_stateowner(so);
}
}
@@ -5069,9 +5194,7 @@ move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
* Some threads with a reference might be waiting for rp_locked,
* so tell them to stop waiting.
*/
- atomic_set(&oo->oo_owner.so_replay.rp_locked, RP_UNHASHED);
- smp_mb__after_atomic();
- wake_up_var(&oo->oo_owner.so_replay.rp_locked);
+ store_release_wake_up(&oo->oo_owner.so_replay.rp_locked, RP_UNHASHED);
wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
release_all_access(s);
@@ -5472,7 +5595,7 @@ retry:
static inline __be32
nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
{
- if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
+ if (!(flags & RD_STATE) && deleg_is_read(dp->dl_type))
return nfserr_openmode;
else
return nfs_ok;
@@ -5704,8 +5827,7 @@ static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
}
-static struct file_lease *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
- int flag)
+static struct file_lease *nfs4_alloc_init_lease(struct nfs4_delegation *dp)
{
struct file_lease *fl;
@@ -5714,7 +5836,7 @@ static struct file_lease *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
return NULL;
fl->fl_lmops = &nfsd_lease_mng_ops;
fl->c.flc_flags = FL_DELEG;
- fl->c.flc_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
+ fl->c.flc_type = deleg_is_read(dp->dl_type) ? F_RDLCK : F_WRLCK;
fl->c.flc_owner = (fl_owner_t)dp;
fl->c.flc_pid = current->tgid;
fl->c.flc_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
@@ -5829,13 +5951,14 @@ static struct nfs4_delegation *
nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
struct svc_fh *parent)
{
- int status = 0;
+ bool deleg_ts = open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS;
struct nfs4_client *clp = stp->st_stid.sc_client;
struct nfs4_file *fp = stp->st_stid.sc_file;
struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate;
struct nfs4_delegation *dp;
struct nfsd_file *nf = NULL;
struct file_lease *fl;
+ int status = 0;
u32 dl_type;
/*
@@ -5860,7 +5983,7 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
*/
if ((open->op_share_access & NFS4_SHARE_ACCESS_BOTH) == NFS4_SHARE_ACCESS_BOTH) {
nf = find_rw_file(fp);
- dl_type = NFS4_OPEN_DELEGATE_WRITE;
+ dl_type = deleg_ts ? OPEN_DELEGATE_WRITE_ATTRS_DELEG : OPEN_DELEGATE_WRITE;
}
/*
@@ -5869,7 +5992,7 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
*/
if (!nf && (open->op_share_access & NFS4_SHARE_ACCESS_READ)) {
nf = find_readable_file(fp);
- dl_type = NFS4_OPEN_DELEGATE_READ;
+ dl_type = deleg_ts ? OPEN_DELEGATE_READ_ATTRS_DELEG : OPEN_DELEGATE_READ;
}
if (!nf)
@@ -5901,7 +6024,7 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
if (!dp)
goto out_delegees;
- fl = nfs4_alloc_init_lease(dp, dl_type);
+ fl = nfs4_alloc_init_lease(dp);
if (!fl)
goto out_clnt_odstate;
@@ -5958,20 +6081,20 @@ out_delegees:
static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
{
- open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
+ open->op_delegate_type = OPEN_DELEGATE_NONE_EXT;
if (status == -EAGAIN)
open->op_why_no_deleg = WND4_CONTENTION;
else {
open->op_why_no_deleg = WND4_RESOURCE;
switch (open->op_deleg_want) {
- case NFS4_SHARE_WANT_READ_DELEG:
- case NFS4_SHARE_WANT_WRITE_DELEG:
- case NFS4_SHARE_WANT_ANY_DELEG:
+ case OPEN4_SHARE_ACCESS_WANT_READ_DELEG:
+ case OPEN4_SHARE_ACCESS_WANT_WRITE_DELEG:
+ case OPEN4_SHARE_ACCESS_WANT_ANY_DELEG:
break;
- case NFS4_SHARE_WANT_CANCEL:
+ case OPEN4_SHARE_ACCESS_WANT_CANCEL:
open->op_why_no_deleg = WND4_CANCELLED;
break;
- case NFS4_SHARE_WANT_NO_DELEG:
+ case OPEN4_SHARE_ACCESS_WANT_NO_DELEG:
WARN_ON_ONCE(1);
}
}
@@ -6027,13 +6150,14 @@ static void
nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
struct svc_fh *currentfh)
{
- struct nfs4_delegation *dp;
+ bool deleg_ts = open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS;
struct nfs4_openowner *oo = openowner(stp->st_stateowner);
struct nfs4_client *clp = stp->st_stid.sc_client;
struct svc_fh *parent = NULL;
- int cb_up;
- int status = 0;
+ struct nfs4_delegation *dp;
struct kstat stat;
+ int status = 0;
+ int cb_up;
cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
open->op_recall = false;
@@ -6074,20 +6198,22 @@ nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
destroy_delegation(dp);
goto out_no_deleg;
}
- open->op_delegate_type = NFS4_OPEN_DELEGATE_WRITE;
+ open->op_delegate_type = deleg_ts ? OPEN_DELEGATE_WRITE_ATTRS_DELEG :
+ OPEN_DELEGATE_WRITE;
dp->dl_cb_fattr.ncf_cur_fsize = stat.size;
dp->dl_cb_fattr.ncf_initial_cinfo = nfsd4_change_attribute(&stat);
trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid);
} else {
- open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
+ open->op_delegate_type = deleg_ts ? OPEN_DELEGATE_READ_ATTRS_DELEG :
+ OPEN_DELEGATE_READ;
trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
}
nfs4_put_stid(&dp->dl_stid);
return;
out_no_deleg:
- open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
+ open->op_delegate_type = OPEN_DELEGATE_NONE;
if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
- open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
+ open->op_delegate_type != OPEN_DELEGATE_NONE) {
dprintk("NFSD: WARNING: refusing delegation reclaim\n");
open->op_recall = true;
}
@@ -6101,21 +6227,32 @@ out_no_deleg:
static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
struct nfs4_delegation *dp)
{
- if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
- dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
- open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
- open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
- } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
- dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
- open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
- open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
+ if (deleg_is_write(dp->dl_type)) {
+ if (open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_READ_DELEG) {
+ open->op_delegate_type = OPEN_DELEGATE_NONE_EXT;
+ open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
+ } else if (open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_WRITE_DELEG) {
+ open->op_delegate_type = OPEN_DELEGATE_NONE_EXT;
+ open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
+ }
}
/* Otherwise the client must be confused wanting a delegation
* it already has, therefore we don't return
- * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
+ * OPEN_DELEGATE_NONE_EXT and reason.
*/
}
+/* Are we returning only a delegation stateid? */
+static bool open_xor_delegation(struct nfsd4_open *open)
+{
+ if (!(open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_OPEN_XOR_DELEGATION))
+ return false;
+ /* Did we actually get a delegation? */
+ if (!deleg_is_read(open->op_delegate_type) && !deleg_is_write(open->op_delegate_type))
+ return false;
+ return true;
+}
+
/**
* nfsd4_process_open2 - finish open processing
* @rqstp: the RPC transaction being executed
@@ -6201,8 +6338,8 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
mutex_unlock(&stp->st_mutex);
if (nfsd4_has_session(&resp->cstate)) {
- if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
- open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
+ if (open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_NO_DELEG) {
+ open->op_delegate_type = OPEN_DELEGATE_NONE_EXT;
open->op_why_no_deleg = WND4_NOT_WANTED;
goto nodeleg;
}
@@ -6213,12 +6350,23 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
* OPEN succeeds even if we fail.
*/
nfs4_open_delegation(open, stp, &resp->cstate.current_fh);
+
+ /*
+ * If there is an existing open stateid, it must be updated and
+ * returned. Only respect WANT_OPEN_XOR_DELEGATION when a new
+ * open stateid would have to be created.
+ */
+ if (new_stp && open_xor_delegation(open)) {
+ memcpy(&open->op_stateid, &zero_stateid, sizeof(open->op_stateid));
+ open->op_rflags |= OPEN4_RESULT_NO_OPEN_STATEID;
+ release_open_stateid(stp);
+ }
nodeleg:
status = nfs_ok;
trace_nfsd_open(&stp->st_stid.sc_stateid);
out:
/* 4.1 client trying to upgrade/downgrade delegation? */
- if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
+ if (open->op_delegate_type == OPEN_DELEGATE_NONE && dp &&
open->op_deleg_want)
nfsd4_deleg_xgrade_none_ext(open, dp);
@@ -6229,7 +6377,7 @@ out:
/*
* To finish the open response, we just need to set the rflags.
*/
- open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
+ open->op_rflags |= NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
if (nfsd4_has_session(&resp->cstate))
open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
@@ -7966,7 +8114,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_blocked_lock *nbl = NULL;
struct file_lock *file_lock = NULL;
struct file_lock *conflock = NULL;
- struct super_block *sb;
__be32 status = 0;
int lkflg;
int err;
@@ -7986,7 +8133,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
if (status != nfs_ok)
return status;
- sb = cstate->current_fh.fh_dentry->d_sb;
if (lock->lk_is_new) {
if (nfsd4_has_session(cstate))
@@ -8721,7 +8867,6 @@ skip_grace:
}
/* initialization to perform when the nfsd service is started: */
-
int
nfs4_state_start(void)
{
@@ -8731,6 +8876,15 @@ nfs4_state_start(void)
if (ret)
return ret;
+ nfsd_slot_shrinker = shrinker_alloc(0, "nfsd-DRC-slot");
+ if (!nfsd_slot_shrinker) {
+ rhltable_destroy(&nfs4_file_rhltable);
+ return -ENOMEM;
+ }
+ nfsd_slot_shrinker->count_objects = nfsd_slot_count;
+ nfsd_slot_shrinker->scan_objects = nfsd_slot_scan;
+ shrinker_register(nfsd_slot_shrinker);
+
set_max_delegations();
return 0;
}
@@ -8772,6 +8926,7 @@ void
nfs4_state_shutdown(void)
{
rhltable_destroy(&nfs4_file_rhltable);
+ shrinker_free(nfsd_slot_shrinker);
}
static void
@@ -8889,6 +9044,78 @@ nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
}
/**
+ * set_cb_time - vet and set the timespec for a cb_getattr update
+ * @cb: timestamp from the CB_GETATTR response
+ * @orig: original timestamp in the inode
+ * @now: current time
+ *
+ * Given a timestamp in a CB_GETATTR response, check it against the
+ * current timestamp in the inode and the current time. Returns true
+ * if the inode's timestamp needs to be updated, and false otherwise.
+ * @cb may also be changed if the timestamp needs to be clamped.
+ */
+static bool set_cb_time(struct timespec64 *cb, const struct timespec64 *orig,
+ const struct timespec64 *now)
+{
+
+ /*
+ * "When the time presented is before the original time, then the
+ * update is ignored." Also no need to update if there is no change.
+ */
+ if (timespec64_compare(cb, orig) <= 0)
+ return false;
+
+ /*
+ * "When the time presented is in the future, the server can either
+ * clamp the new time to the current time, or it may
+ * return NFS4ERR_DELAY to the client, allowing it to retry."
+ */
+ if (timespec64_compare(cb, now) > 0) {
+ /* clamp it */
+ *cb = *now;
+ }
+
+ return true;
+}
+
+static int cb_getattr_update_times(struct dentry *dentry, struct nfs4_delegation *dp)
+{
+ struct inode *inode = d_inode(dentry);
+ struct timespec64 now = current_time(inode);
+ struct nfs4_cb_fattr *ncf = &dp->dl_cb_fattr;
+ struct iattr attrs = { };
+ int ret;
+
+ if (deleg_attrs_deleg(dp->dl_type)) {
+ struct timespec64 atime = inode_get_atime(inode);
+ struct timespec64 mtime = inode_get_mtime(inode);
+
+ attrs.ia_atime = ncf->ncf_cb_atime;
+ attrs.ia_mtime = ncf->ncf_cb_mtime;
+
+ if (set_cb_time(&attrs.ia_atime, &atime, &now))
+ attrs.ia_valid |= ATTR_ATIME | ATTR_ATIME_SET;
+
+ if (set_cb_time(&attrs.ia_mtime, &mtime, &now)) {
+ attrs.ia_valid |= ATTR_CTIME | ATTR_MTIME | ATTR_MTIME_SET;
+ attrs.ia_ctime = attrs.ia_mtime;
+ }
+ } else {
+ attrs.ia_valid |= ATTR_MTIME | ATTR_CTIME;
+ attrs.ia_mtime = attrs.ia_ctime = now;
+ }
+
+ if (!attrs.ia_valid)
+ return 0;
+
+ attrs.ia_valid |= ATTR_DELEG;
+ inode_lock(inode);
+ ret = notify_change(&nop_mnt_idmap, dentry, &attrs, NULL);
+ inode_unlock(inode);
+ return ret;
+}
+
+/**
* nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict
* @rqstp: RPC transaction context
* @dentry: dentry of inode to be checked for a conflict
@@ -8914,7 +9141,6 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct dentry *dentry,
struct file_lock_context *ctx;
struct nfs4_delegation *dp = NULL;
struct file_lease *fl;
- struct iattr attrs;
struct nfs4_cb_fattr *ncf;
struct inode *inode = d_inode(dentry);
@@ -8976,11 +9202,7 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct dentry *dentry,
* not update the file's metadata with the client's
* modified size
*/
- attrs.ia_mtime = attrs.ia_ctime = current_time(inode);
- attrs.ia_valid = ATTR_MTIME | ATTR_CTIME | ATTR_DELEG;
- inode_lock(inode);
- err = notify_change(&nop_mnt_idmap, dentry, &attrs, NULL);
- inode_unlock(inode);
+ err = cb_getattr_update_times(dentry, dp);
if (err) {
status = nfserrno(err);
goto out_status;
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 8dd2e2ada474..e67420729ecd 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -55,6 +55,7 @@
#include "netns.h"
#include "pnfs.h"
#include "filecache.h"
+#include "nfs4xdr_gen.h"
#include "trace.h"
@@ -520,6 +521,26 @@ nfsd4_decode_fattr4(struct nfsd4_compoundargs *argp, u32 *bmval, u32 bmlen,
*umask = mask & S_IRWXUGO;
iattr->ia_valid |= ATTR_MODE;
}
+ if (bmval[2] & FATTR4_WORD2_TIME_DELEG_ACCESS) {
+ fattr4_time_deleg_access access;
+
+ if (!xdrgen_decode_fattr4_time_deleg_access(argp->xdr, &access))
+ return nfserr_bad_xdr;
+ iattr->ia_atime.tv_sec = access.seconds;
+ iattr->ia_atime.tv_nsec = access.nseconds;
+ iattr->ia_valid |= ATTR_ATIME | ATTR_ATIME_SET | ATTR_DELEG;
+ }
+ if (bmval[2] & FATTR4_WORD2_TIME_DELEG_MODIFY) {
+ fattr4_time_deleg_modify modify;
+
+ if (!xdrgen_decode_fattr4_time_deleg_modify(argp->xdr, &modify))
+ return nfserr_bad_xdr;
+ iattr->ia_mtime.tv_sec = modify.seconds;
+ iattr->ia_mtime.tv_nsec = modify.nseconds;
+ iattr->ia_ctime.tv_sec = modify.seconds;
+ iattr->ia_ctime.tv_nsec = modify.seconds;
+ iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME | ATTR_MTIME_SET | ATTR_DELEG;
+ }
/* request sanity: did attrlist4 contain the expected number of words? */
if (attrlist4_count != xdr_stream_pos(argp->xdr) - starting_pos)
@@ -1066,13 +1087,13 @@ static __be32 nfsd4_decode_share_access(struct nfsd4_compoundargs *argp, u32 *sh
return nfs_ok;
if (!argp->minorversion)
return nfserr_bad_xdr;
- switch (w & NFS4_SHARE_WANT_MASK) {
- case NFS4_SHARE_WANT_NO_PREFERENCE:
- case NFS4_SHARE_WANT_READ_DELEG:
- case NFS4_SHARE_WANT_WRITE_DELEG:
- case NFS4_SHARE_WANT_ANY_DELEG:
- case NFS4_SHARE_WANT_NO_DELEG:
- case NFS4_SHARE_WANT_CANCEL:
+ switch (w & NFS4_SHARE_WANT_TYPE_MASK) {
+ case OPEN4_SHARE_ACCESS_WANT_NO_PREFERENCE:
+ case OPEN4_SHARE_ACCESS_WANT_READ_DELEG:
+ case OPEN4_SHARE_ACCESS_WANT_WRITE_DELEG:
+ case OPEN4_SHARE_ACCESS_WANT_ANY_DELEG:
+ case OPEN4_SHARE_ACCESS_WANT_NO_DELEG:
+ case OPEN4_SHARE_ACCESS_WANT_CANCEL:
break;
default:
return nfserr_bad_xdr;
@@ -1884,7 +1905,8 @@ nfsd4_decode_sequence(struct nfsd4_compoundargs *argp,
return nfserr_bad_xdr;
seq->seqid = be32_to_cpup(p++);
seq->slotid = be32_to_cpup(p++);
- seq->maxslots = be32_to_cpup(p++);
+ /* sa_highest_slotid counts from 0 but maxslots counts from 1 ... */
+ seq->maxslots = be32_to_cpup(p++) + 1;
seq->cachethis = be32_to_cpup(p);
seq->status_flags = 0;
@@ -2919,6 +2941,7 @@ struct nfsd4_fattr_args {
struct kstat stat;
struct kstatfs statfs;
struct nfs4_acl *acl;
+ u64 change_attr;
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
struct lsm_context context;
#endif
@@ -3017,7 +3040,6 @@ static __be32 nfsd4_encode_fattr4_change(struct xdr_stream *xdr,
const struct nfsd4_fattr_args *args)
{
const struct svc_export *exp = args->exp;
- u64 c;
if (unlikely(exp->ex_flags & NFSEXP_V4ROOT)) {
u32 flush_time = convert_to_wallclock(exp->cd->flush_time);
@@ -3028,9 +3050,7 @@ static __be32 nfsd4_encode_fattr4_change(struct xdr_stream *xdr,
return nfserr_resource;
return nfs_ok;
}
-
- c = nfsd4_change_attribute(&args->stat);
- return nfsd4_encode_changeid4(xdr, c);
+ return nfsd4_encode_changeid4(xdr, args->change_attr);
}
static __be32 nfsd4_encode_fattr4_size(struct xdr_stream *xdr,
@@ -3387,6 +3407,56 @@ static __be32 nfsd4_encode_fattr4_xattr_support(struct xdr_stream *xdr,
return nfsd4_encode_bool(xdr, err == 0);
}
+#define NFSD_OA_SHARE_ACCESS (BIT(OPEN_ARGS_SHARE_ACCESS_READ) | \
+ BIT(OPEN_ARGS_SHARE_ACCESS_WRITE) | \
+ BIT(OPEN_ARGS_SHARE_ACCESS_BOTH))
+
+#define NFSD_OA_SHARE_DENY (BIT(OPEN_ARGS_SHARE_DENY_NONE) | \
+ BIT(OPEN_ARGS_SHARE_DENY_READ) | \
+ BIT(OPEN_ARGS_SHARE_DENY_WRITE) | \
+ BIT(OPEN_ARGS_SHARE_DENY_BOTH))
+
+#define NFSD_OA_SHARE_ACCESS_WANT (BIT(OPEN_ARGS_SHARE_ACCESS_WANT_ANY_DELEG) | \
+ BIT(OPEN_ARGS_SHARE_ACCESS_WANT_NO_DELEG) | \
+ BIT(OPEN_ARGS_SHARE_ACCESS_WANT_CANCEL) | \
+ BIT(OPEN_ARGS_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS) | \
+ BIT(OPEN_ARGS_SHARE_ACCESS_WANT_OPEN_XOR_DELEGATION))
+
+#define NFSD_OA_OPEN_CLAIM (BIT(OPEN_ARGS_OPEN_CLAIM_NULL) | \
+ BIT(OPEN_ARGS_OPEN_CLAIM_PREVIOUS) | \
+ BIT(OPEN_ARGS_OPEN_CLAIM_DELEGATE_CUR) | \
+ BIT(OPEN_ARGS_OPEN_CLAIM_DELEGATE_PREV)| \
+ BIT(OPEN_ARGS_OPEN_CLAIM_FH) | \
+ BIT(OPEN_ARGS_OPEN_CLAIM_DELEG_CUR_FH) | \
+ BIT(OPEN_ARGS_OPEN_CLAIM_DELEG_PREV_FH))
+
+#define NFSD_OA_CREATE_MODE (BIT(OPEN_ARGS_CREATEMODE_UNCHECKED4) | \
+ BIT(OPEN_ARGS_CREATE_MODE_GUARDED) | \
+ BIT(OPEN_ARGS_CREATEMODE_EXCLUSIVE4) | \
+ BIT(OPEN_ARGS_CREATE_MODE_EXCLUSIVE4_1))
+
+static uint32_t oa_share_access = NFSD_OA_SHARE_ACCESS;
+static uint32_t oa_share_deny = NFSD_OA_SHARE_DENY;
+static uint32_t oa_share_access_want = NFSD_OA_SHARE_ACCESS_WANT;
+static uint32_t oa_open_claim = NFSD_OA_OPEN_CLAIM;
+static uint32_t oa_create_mode = NFSD_OA_CREATE_MODE;
+
+static const struct open_arguments4 nfsd_open_arguments = {
+ .oa_share_access = { .count = 1, .element = &oa_share_access },
+ .oa_share_deny = { .count = 1, .element = &oa_share_deny },
+ .oa_share_access_want = { .count = 1, .element = &oa_share_access_want },
+ .oa_open_claim = { .count = 1, .element = &oa_open_claim },
+ .oa_create_mode = { .count = 1, .element = &oa_create_mode },
+};
+
+static __be32 nfsd4_encode_fattr4_open_arguments(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ if (!xdrgen_encode_fattr4_open_arguments(xdr, &nfsd_open_arguments))
+ return nfserr_resource;
+ return nfs_ok;
+}
+
static const nfsd4_enc_attr nfsd4_enc_fattr4_encode_ops[] = {
[FATTR4_SUPPORTED_ATTRS] = nfsd4_encode_fattr4_supported_attrs,
[FATTR4_TYPE] = nfsd4_encode_fattr4_type,
@@ -3487,6 +3557,7 @@ static const nfsd4_enc_attr nfsd4_enc_fattr4_encode_ops[] = {
[FATTR4_MODE_UMASK] = nfsd4_encode_fattr4__noop,
[FATTR4_XATTR_SUPPORT] = nfsd4_encode_fattr4_xattr_support,
+ [FATTR4_OPEN_ARGUMENTS] = nfsd4_encode_fattr4_open_arguments,
};
/*
@@ -3504,8 +3575,8 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
struct nfsd4_fattr_args args;
struct svc_fh *tempfh = NULL;
int starting_len = xdr->buf->len;
- __be32 *attrlen_p, status;
- int attrlen_offset;
+ unsigned int attrlen_offset;
+ __be32 attrlen, status;
u32 attrmask[3];
int err;
struct nfsd4_compoundres *resp = rqstp->rq_resp;
@@ -3542,7 +3613,11 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
if (status)
goto out;
}
- if (attrmask[0] & (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE)) {
+ if ((attrmask[0] & (FATTR4_WORD0_CHANGE |
+ FATTR4_WORD0_SIZE)) ||
+ (attrmask[1] & (FATTR4_WORD1_TIME_ACCESS |
+ FATTR4_WORD1_TIME_MODIFY |
+ FATTR4_WORD1_TIME_METADATA))) {
status = nfsd4_deleg_getattr_conflict(rqstp, dentry, &dp);
if (status)
goto out;
@@ -3554,11 +3629,22 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
if (dp) {
struct nfs4_cb_fattr *ncf = &dp->dl_cb_fattr;
- if (ncf->ncf_file_modified)
+ if (ncf->ncf_file_modified) {
+ ++ncf->ncf_initial_cinfo;
args.stat.size = ncf->ncf_cur_fsize;
+ if (!timespec64_is_epoch(&ncf->ncf_cb_mtime))
+ args.stat.mtime = ncf->ncf_cb_mtime;
+ }
+ args.change_attr = ncf->ncf_initial_cinfo;
+
+ if (!timespec64_is_epoch(&ncf->ncf_cb_atime))
+ args.stat.atime = ncf->ncf_cb_atime;
nfs4_put_stid(&dp->dl_stid);
+ } else {
+ args.change_attr = nfsd4_change_attribute(&args.stat);
}
+
if (err)
goto out_nfserr;
@@ -3626,8 +3712,7 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
/* attr_vals */
attrlen_offset = xdr->buf->len;
- attrlen_p = xdr_reserve_space(xdr, XDR_UNIT);
- if (!attrlen_p)
+ if (unlikely(!xdr_reserve_space(xdr, XDR_UNIT)))
goto out_resource;
bitmap_from_arr32(attr_bitmap, attrmask,
ARRAY_SIZE(nfsd4_enc_fattr4_encode_ops));
@@ -3637,7 +3722,8 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
if (status != nfs_ok)
goto out;
}
- *attrlen_p = cpu_to_be32(xdr->buf->len - attrlen_offset - XDR_UNIT);
+ attrlen = cpu_to_be32(xdr->buf->len - attrlen_offset - XDR_UNIT);
+ write_bytes_to_xdr_buf(xdr->buf, attrlen_offset, &attrlen, XDR_UNIT);
status = nfs_ok;
out:
@@ -4227,18 +4313,20 @@ nfsd4_encode_open_delegation4(struct xdr_stream *xdr, struct nfsd4_open *open)
if (xdr_stream_encode_u32(xdr, open->op_delegate_type) != XDR_UNIT)
return nfserr_resource;
switch (open->op_delegate_type) {
- case NFS4_OPEN_DELEGATE_NONE:
+ case OPEN_DELEGATE_NONE:
status = nfs_ok;
break;
- case NFS4_OPEN_DELEGATE_READ:
+ case OPEN_DELEGATE_READ:
+ case OPEN_DELEGATE_READ_ATTRS_DELEG:
/* read */
status = nfsd4_encode_open_read_delegation4(xdr, open);
break;
- case NFS4_OPEN_DELEGATE_WRITE:
+ case OPEN_DELEGATE_WRITE:
+ case OPEN_DELEGATE_WRITE_ATTRS_DELEG:
/* write */
status = nfsd4_encode_open_write_delegation4(xdr, open);
break;
- case NFS4_OPEN_DELEGATE_NONE_EXT:
+ case OPEN_DELEGATE_NONE_EXT:
/* od_whynone */
status = nfsd4_encode_open_none_delegation4(xdr, open);
break;
@@ -4315,6 +4403,15 @@ static __be32 nfsd4_encode_splice_read(
__be32 nfserr;
/*
+ * Splice read doesn't work if encoding has already wandered
+ * into the XDR buf's page array.
+ */
+ if (unlikely(xdr->buf->page_len)) {
+ WARN_ON_ONCE(1);
+ return nfserr_serverfault;
+ }
+
+ /*
* Make sure there is room at the end of buf->head for
* svcxdr_encode_opaque_pages() to create a tail buffer
* to XDR-pad the payload.
@@ -4396,25 +4493,23 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_compoundargs *argp = resp->rqstp->rq_argp;
struct nfsd4_read *read = &u->read;
struct xdr_stream *xdr = resp->xdr;
- int starting_len = xdr->buf->len;
bool splice_ok = argp->splice_ok;
+ unsigned int eof_offset;
unsigned long maxcount;
+ __be32 wire_data[2];
struct file *file;
- __be32 *p;
if (nfserr)
return nfserr;
+
+ eof_offset = xdr->buf->len;
file = read->rd_nf->nf_file;
- p = xdr_reserve_space(xdr, 8); /* eof flag and byte count */
- if (!p) {
+ /* Reserve space for the eof flag and byte count */
+ if (unlikely(!xdr_reserve_space(xdr, XDR_UNIT * 2))) {
WARN_ON_ONCE(splice_ok);
return nfserr_resource;
}
- if (resp->xdr->buf->page_len && splice_ok) {
- WARN_ON_ONCE(1);
- return nfserr_serverfault;
- }
xdr_commit_encode(xdr);
maxcount = min_t(unsigned long, read->rd_length,
@@ -4425,12 +4520,13 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
else
nfserr = nfsd4_encode_readv(resp, read, file, maxcount);
if (nfserr) {
- xdr_truncate_encode(xdr, starting_len);
+ xdr_truncate_encode(xdr, eof_offset);
return nfserr;
}
- p = xdr_encode_bool(p, read->rd_eof);
- *p = cpu_to_be32(read->rd_length);
+ wire_data[0] = read->rd_eof ? xdr_one : xdr_zero;
+ wire_data[1] = cpu_to_be32(read->rd_length);
+ write_bytes_to_xdr_buf(xdr->buf, eof_offset, &wire_data, XDR_UNIT * 2);
return nfs_ok;
}
@@ -4439,25 +4535,21 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_readlink *readlink = &u->readlink;
- __be32 *p, *maxcount_p, zero = xdr_zero;
+ __be32 *p, wire_count, zero = xdr_zero;
struct xdr_stream *xdr = resp->xdr;
- int length_offset = xdr->buf->len;
+ unsigned int length_offset;
int maxcount, status;
- maxcount_p = xdr_reserve_space(xdr, XDR_UNIT);
- if (!maxcount_p)
+ /* linktext4.count */
+ length_offset = xdr->buf->len;
+ if (unlikely(!xdr_reserve_space(xdr, XDR_UNIT)))
return nfserr_resource;
- maxcount = PAGE_SIZE;
+ /* linktext4.data */
+ maxcount = PAGE_SIZE;
p = xdr_reserve_space(xdr, maxcount);
if (!p)
return nfserr_resource;
- /*
- * XXX: By default, vfs_readlink() will truncate symlinks if they
- * would overflow the buffer. Is this kosher in NFSv4? If not, one
- * easy fix is: if vfs_readlink() precisely fills the buffer, assume
- * that truncation occurred, and return NFS4ERR_RESOURCE.
- */
nfserr = nfsd_readlink(readlink->rl_rqstp, readlink->rl_fhp,
(char *)p, &maxcount);
if (nfserr == nfserr_isdir)
@@ -4470,7 +4562,9 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr,
nfserr = nfserrno(status);
goto out_err;
}
- *maxcount_p = cpu_to_be32(maxcount);
+
+ wire_count = cpu_to_be32(maxcount);
+ write_bytes_to_xdr_buf(xdr->buf, length_offset, &wire_count, XDR_UNIT);
xdr_truncate_encode(xdr, length_offset + 4 + xdr_align_size(maxcount));
write_bytes_to_xdr_buf(xdr->buf, length_offset + 4 + maxcount, &zero,
xdr_pad_size(maxcount));
@@ -4605,14 +4699,42 @@ nfsd4_encode_rpcsec_gss_info(struct xdr_stream *xdr,
}
static __be32
-nfsd4_do_encode_secinfo(struct xdr_stream *xdr, struct svc_export *exp)
+nfsd4_encode_secinfo4(struct xdr_stream *xdr, rpc_authflavor_t pf,
+ u32 *supported)
+{
+ struct rpcsec_gss_info info;
+ __be32 status;
+
+ if (rpcauth_get_gssinfo(pf, &info) == 0) {
+ (*supported)++;
+
+ /* flavor */
+ status = nfsd4_encode_uint32_t(xdr, RPC_AUTH_GSS);
+ if (status != nfs_ok)
+ return status;
+ /* flavor_info */
+ status = nfsd4_encode_rpcsec_gss_info(xdr, &info);
+ if (status != nfs_ok)
+ return status;
+ } else if (pf < RPC_AUTH_MAXFLAVOR) {
+ (*supported)++;
+
+ /* flavor */
+ status = nfsd4_encode_uint32_t(xdr, pf);
+ if (status != nfs_ok)
+ return status;
+ }
+ return nfs_ok;
+}
+
+static __be32
+nfsd4_encode_SECINFO4resok(struct xdr_stream *xdr, struct svc_export *exp)
{
u32 i, nflavs, supported;
struct exp_flavor_info *flavs;
struct exp_flavor_info def_flavs[2];
- static bool report = true;
- __be32 *flavorsp;
- __be32 status;
+ unsigned int count_offset;
+ __be32 status, wire_count;
if (exp->ex_nflavors) {
flavs = exp->ex_flavors;
@@ -4634,43 +4756,20 @@ nfsd4_do_encode_secinfo(struct xdr_stream *xdr, struct svc_export *exp)
}
}
- supported = 0;
- flavorsp = xdr_reserve_space(xdr, XDR_UNIT);
- if (!flavorsp)
+ count_offset = xdr->buf->len;
+ if (unlikely(!xdr_reserve_space(xdr, XDR_UNIT)))
return nfserr_resource;
- for (i = 0; i < nflavs; i++) {
- rpc_authflavor_t pf = flavs[i].pseudoflavor;
- struct rpcsec_gss_info info;
-
- if (rpcauth_get_gssinfo(pf, &info) == 0) {
- supported++;
-
- /* flavor */
- status = nfsd4_encode_uint32_t(xdr, RPC_AUTH_GSS);
- if (status != nfs_ok)
- return status;
- /* flavor_info */
- status = nfsd4_encode_rpcsec_gss_info(xdr, &info);
- if (status != nfs_ok)
- return status;
- } else if (pf < RPC_AUTH_MAXFLAVOR) {
- supported++;
-
- /* flavor */
- status = nfsd4_encode_uint32_t(xdr, pf);
- if (status != nfs_ok)
- return status;
- } else {
- if (report)
- pr_warn("NFS: SECINFO: security flavor %u "
- "is not supported\n", pf);
- }
+ for (i = 0, supported = 0; i < nflavs; i++) {
+ status = nfsd4_encode_secinfo4(xdr, flavs[i].pseudoflavor,
+ &supported);
+ if (status != nfs_ok)
+ return status;
}
- if (nflavs != supported)
- report = false;
- *flavorsp = cpu_to_be32(supported);
+ wire_count = cpu_to_be32(supported);
+ write_bytes_to_xdr_buf(xdr->buf, count_offset, &wire_count,
+ XDR_UNIT);
return 0;
}
@@ -4681,7 +4780,7 @@ nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_secinfo *secinfo = &u->secinfo;
struct xdr_stream *xdr = resp->xdr;
- return nfsd4_do_encode_secinfo(xdr, secinfo->si_exp);
+ return nfsd4_encode_SECINFO4resok(xdr, secinfo->si_exp);
}
static __be32
@@ -4691,7 +4790,7 @@ nfsd4_encode_secinfo_no_name(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_secinfo_no_name *secinfo = &u->secinfo_no_name;
struct xdr_stream *xdr = resp->xdr;
- return nfsd4_do_encode_secinfo(xdr, secinfo->sin_exp);
+ return nfsd4_encode_SECINFO4resok(xdr, secinfo->sin_exp);
}
static __be32
@@ -4966,7 +5065,7 @@ nfsd4_encode_sequence(struct nfsd4_compoundres *resp, __be32 nfserr,
if (nfserr != nfs_ok)
return nfserr;
/* sr_target_highest_slotid */
- nfserr = nfsd4_encode_slotid4(xdr, seq->maxslots - 1);
+ nfserr = nfsd4_encode_slotid4(xdr, seq->target_maxslots - 1);
if (nfserr != nfs_ok)
return nfserr;
/* sr_status_flags */
@@ -5294,17 +5393,20 @@ nfsd4_encode_read_plus_data(struct nfsd4_compoundres *resp,
struct file *file = read->rd_nf->nf_file;
struct xdr_stream *xdr = resp->xdr;
bool splice_ok = argp->splice_ok;
+ unsigned int offset_offset;
+ __be32 nfserr, wire_count;
unsigned long maxcount;
- __be32 nfserr, *p;
+ __be64 wire_offset;
- /* Content type, offset, byte count */
- p = xdr_reserve_space(xdr, 4 + 8 + 4);
- if (!p)
+ if (xdr_stream_encode_u32(xdr, NFS4_CONTENT_DATA) != XDR_UNIT)
return nfserr_io;
- if (resp->xdr->buf->page_len && splice_ok) {
- WARN_ON_ONCE(splice_ok);
- return nfserr_serverfault;
- }
+
+ offset_offset = xdr->buf->len;
+
+ /* Reserve space for the byte offset and count */
+ if (unlikely(!xdr_reserve_space(xdr, XDR_UNIT * 3)))
+ return nfserr_io;
+ xdr_commit_encode(xdr);
maxcount = min_t(unsigned long, read->rd_length,
(xdr->buf->buflen - xdr->buf->len));
@@ -5316,10 +5418,12 @@ nfsd4_encode_read_plus_data(struct nfsd4_compoundres *resp,
if (nfserr)
return nfserr;
- *p++ = cpu_to_be32(NFS4_CONTENT_DATA);
- p = xdr_encode_hyper(p, read->rd_offset);
- *p = cpu_to_be32(read->rd_length);
-
+ wire_offset = cpu_to_be64(read->rd_offset);
+ write_bytes_to_xdr_buf(xdr->buf, offset_offset, &wire_offset,
+ XDR_UNIT * 2);
+ wire_count = cpu_to_be32(read->rd_length);
+ write_bytes_to_xdr_buf(xdr->buf, offset_offset + XDR_UNIT * 2,
+ &wire_count, XDR_UNIT);
return nfs_ok;
}
@@ -5330,16 +5434,17 @@ nfsd4_encode_read_plus(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_read *read = &u->read;
struct file *file = read->rd_nf->nf_file;
struct xdr_stream *xdr = resp->xdr;
- int starting_len = xdr->buf->len;
+ unsigned int eof_offset;
+ __be32 wire_data[2];
u32 segments = 0;
- __be32 *p;
if (nfserr)
return nfserr;
- /* eof flag, segment count */
- p = xdr_reserve_space(xdr, 4 + 4);
- if (!p)
+ eof_offset = xdr->buf->len;
+
+ /* Reserve space for the eof flag and segment count */
+ if (unlikely(!xdr_reserve_space(xdr, XDR_UNIT * 2)))
return nfserr_io;
xdr_commit_encode(xdr);
@@ -5349,15 +5454,16 @@ nfsd4_encode_read_plus(struct nfsd4_compoundres *resp, __be32 nfserr,
nfserr = nfsd4_encode_read_plus_data(resp, read);
if (nfserr) {
- xdr_truncate_encode(xdr, starting_len);
+ xdr_truncate_encode(xdr, eof_offset);
return nfserr;
}
segments++;
out:
- p = xdr_encode_bool(p, read->rd_eof);
- *p = cpu_to_be32(segments);
+ wire_data[0] = read->rd_eof ? xdr_one : xdr_zero;
+ wire_data[1] = cpu_to_be32(segments);
+ write_bytes_to_xdr_buf(xdr->buf, eof_offset, &wire_data, XDR_UNIT * 2);
return nfserr;
}
@@ -5758,15 +5864,14 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
struct nfs4_stateowner *so = resp->cstate.replay_owner;
struct svc_rqst *rqstp = resp->rqstp;
const struct nfsd4_operation *opdesc = op->opdesc;
- int post_err_offset;
+ unsigned int op_status_offset;
nfsd4_enc encoder;
- __be32 *p;
- p = xdr_reserve_space(xdr, 8);
- if (!p)
+ if (xdr_stream_encode_u32(xdr, op->opnum) != XDR_UNIT)
+ goto release;
+ op_status_offset = xdr->buf->len;
+ if (!xdr_reserve_space(xdr, XDR_UNIT))
goto release;
- *p++ = cpu_to_be32(op->opnum);
- post_err_offset = xdr->buf->len;
if (op->opnum == OP_ILLEGAL)
goto status;
@@ -5807,20 +5912,21 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
* bug if we had to do this on a non-idempotent op:
*/
warn_on_nonidempotent_op(op);
- xdr_truncate_encode(xdr, post_err_offset);
+ xdr_truncate_encode(xdr, op_status_offset + XDR_UNIT);
}
if (so) {
- int len = xdr->buf->len - post_err_offset;
+ int len = xdr->buf->len - (op_status_offset + XDR_UNIT);
so->so_replay.rp_status = op->status;
so->so_replay.rp_buflen = len;
- read_bytes_from_xdr_buf(xdr->buf, post_err_offset,
+ read_bytes_from_xdr_buf(xdr->buf, op_status_offset + XDR_UNIT,
so->so_replay.rp_buf, len);
}
status:
op->status = nfsd4_map_status(op->status,
resp->cstate.minorversion);
- *p = op->status;
+ write_bytes_to_xdr_buf(xdr->buf, op_status_offset,
+ &op->status, XDR_UNIT);
release:
if (opdesc && opdesc->op_release)
opdesc->op_release(&op->u);
diff --git a/fs/nfsd/nfs4xdr_gen.c b/fs/nfsd/nfs4xdr_gen.c
new file mode 100644
index 000000000000..a17b5d8e60b3
--- /dev/null
+++ b/fs/nfsd/nfs4xdr_gen.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0
+// Generated by xdrgen. Manual edits will be lost.
+// XDR specification file: ../../Documentation/sunrpc/xdr/nfs4_1.x
+// XDR specification modification time: Mon Oct 14 09:10:13 2024
+
+#include <linux/sunrpc/svc.h>
+
+#include "nfs4xdr_gen.h"
+
+static bool __maybe_unused
+xdrgen_decode_int64_t(struct xdr_stream *xdr, int64_t *ptr)
+{
+ return xdrgen_decode_hyper(xdr, ptr);
+};
+
+static bool __maybe_unused
+xdrgen_decode_uint32_t(struct xdr_stream *xdr, uint32_t *ptr)
+{
+ return xdrgen_decode_unsigned_int(xdr, ptr);
+};
+
+static bool __maybe_unused
+xdrgen_decode_bitmap4(struct xdr_stream *xdr, bitmap4 *ptr)
+{
+ if (xdr_stream_decode_u32(xdr, &ptr->count) < 0)
+ return false;
+ for (u32 i = 0; i < ptr->count; i++)
+ if (!xdrgen_decode_uint32_t(xdr, &ptr->element[i]))
+ return false;
+ return true;
+};
+
+static bool __maybe_unused
+xdrgen_decode_nfstime4(struct xdr_stream *xdr, struct nfstime4 *ptr)
+{
+ if (!xdrgen_decode_int64_t(xdr, &ptr->seconds))
+ return false;
+ if (!xdrgen_decode_uint32_t(xdr, &ptr->nseconds))
+ return false;
+ return true;
+};
+
+static bool __maybe_unused
+xdrgen_decode_fattr4_offline(struct xdr_stream *xdr, fattr4_offline *ptr)
+{
+ return xdrgen_decode_bool(xdr, ptr);
+};
+
+static bool __maybe_unused
+xdrgen_decode_open_arguments4(struct xdr_stream *xdr, struct open_arguments4 *ptr)
+{
+ if (!xdrgen_decode_bitmap4(xdr, &ptr->oa_share_access))
+ return false;
+ if (!xdrgen_decode_bitmap4(xdr, &ptr->oa_share_deny))
+ return false;
+ if (!xdrgen_decode_bitmap4(xdr, &ptr->oa_share_access_want))
+ return false;
+ if (!xdrgen_decode_bitmap4(xdr, &ptr->oa_open_claim))
+ return false;
+ if (!xdrgen_decode_bitmap4(xdr, &ptr->oa_create_mode))
+ return false;
+ return true;
+};
+
+static bool __maybe_unused
+xdrgen_decode_open_args_share_access4(struct xdr_stream *xdr, open_args_share_access4 *ptr)
+{
+ u32 val;
+
+ if (xdr_stream_decode_u32(xdr, &val) < 0)
+ return false;
+ *ptr = val;
+ return true;
+}
+
+static bool __maybe_unused
+xdrgen_decode_open_args_share_deny4(struct xdr_stream *xdr, open_args_share_deny4 *ptr)
+{
+ u32 val;
+
+ if (xdr_stream_decode_u32(xdr, &val) < 0)
+ return false;
+ *ptr = val;
+ return true;
+}
+
+static bool __maybe_unused
+xdrgen_decode_open_args_share_access_want4(struct xdr_stream *xdr, open_args_share_access_want4 *ptr)
+{
+ u32 val;
+
+ if (xdr_stream_decode_u32(xdr, &val) < 0)
+ return false;
+ *ptr = val;
+ return true;
+}
+
+static bool __maybe_unused
+xdrgen_decode_open_args_open_claim4(struct xdr_stream *xdr, open_args_open_claim4 *ptr)
+{
+ u32 val;
+
+ if (xdr_stream_decode_u32(xdr, &val) < 0)
+ return false;
+ *ptr = val;
+ return true;
+}
+
+static bool __maybe_unused
+xdrgen_decode_open_args_createmode4(struct xdr_stream *xdr, open_args_createmode4 *ptr)
+{
+ u32 val;
+
+ if (xdr_stream_decode_u32(xdr, &val) < 0)
+ return false;
+ *ptr = val;
+ return true;
+}
+
+bool
+xdrgen_decode_fattr4_open_arguments(struct xdr_stream *xdr, fattr4_open_arguments *ptr)
+{
+ return xdrgen_decode_open_arguments4(xdr, ptr);
+};
+
+bool
+xdrgen_decode_fattr4_time_deleg_access(struct xdr_stream *xdr, fattr4_time_deleg_access *ptr)
+{
+ return xdrgen_decode_nfstime4(xdr, ptr);
+};
+
+bool
+xdrgen_decode_fattr4_time_deleg_modify(struct xdr_stream *xdr, fattr4_time_deleg_modify *ptr)
+{
+ return xdrgen_decode_nfstime4(xdr, ptr);
+};
+
+static bool __maybe_unused
+xdrgen_decode_open_delegation_type4(struct xdr_stream *xdr, open_delegation_type4 *ptr)
+{
+ u32 val;
+
+ if (xdr_stream_decode_u32(xdr, &val) < 0)
+ return false;
+ *ptr = val;
+ return true;
+}
+
+static bool __maybe_unused
+xdrgen_encode_int64_t(struct xdr_stream *xdr, const int64_t value)
+{
+ return xdrgen_encode_hyper(xdr, value);
+};
+
+static bool __maybe_unused
+xdrgen_encode_uint32_t(struct xdr_stream *xdr, const uint32_t value)
+{
+ return xdrgen_encode_unsigned_int(xdr, value);
+};
+
+static bool __maybe_unused
+xdrgen_encode_bitmap4(struct xdr_stream *xdr, const bitmap4 value)
+{
+ if (xdr_stream_encode_u32(xdr, value.count) != XDR_UNIT)
+ return false;
+ for (u32 i = 0; i < value.count; i++)
+ if (!xdrgen_encode_uint32_t(xdr, value.element[i]))
+ return false;
+ return true;
+};
+
+static bool __maybe_unused
+xdrgen_encode_nfstime4(struct xdr_stream *xdr, const struct nfstime4 *value)
+{
+ if (!xdrgen_encode_int64_t(xdr, value->seconds))
+ return false;
+ if (!xdrgen_encode_uint32_t(xdr, value->nseconds))
+ return false;
+ return true;
+};
+
+static bool __maybe_unused
+xdrgen_encode_fattr4_offline(struct xdr_stream *xdr, const fattr4_offline value)
+{
+ return xdrgen_encode_bool(xdr, value);
+};
+
+static bool __maybe_unused
+xdrgen_encode_open_arguments4(struct xdr_stream *xdr, const struct open_arguments4 *value)
+{
+ if (!xdrgen_encode_bitmap4(xdr, value->oa_share_access))
+ return false;
+ if (!xdrgen_encode_bitmap4(xdr, value->oa_share_deny))
+ return false;
+ if (!xdrgen_encode_bitmap4(xdr, value->oa_share_access_want))
+ return false;
+ if (!xdrgen_encode_bitmap4(xdr, value->oa_open_claim))
+ return false;
+ if (!xdrgen_encode_bitmap4(xdr, value->oa_create_mode))
+ return false;
+ return true;
+};
+
+static bool __maybe_unused
+xdrgen_encode_open_args_share_access4(struct xdr_stream *xdr, open_args_share_access4 value)
+{
+ return xdr_stream_encode_u32(xdr, value) == XDR_UNIT;
+}
+
+static bool __maybe_unused
+xdrgen_encode_open_args_share_deny4(struct xdr_stream *xdr, open_args_share_deny4 value)
+{
+ return xdr_stream_encode_u32(xdr, value) == XDR_UNIT;
+}
+
+static bool __maybe_unused
+xdrgen_encode_open_args_share_access_want4(struct xdr_stream *xdr, open_args_share_access_want4 value)
+{
+ return xdr_stream_encode_u32(xdr, value) == XDR_UNIT;
+}
+
+static bool __maybe_unused
+xdrgen_encode_open_args_open_claim4(struct xdr_stream *xdr, open_args_open_claim4 value)
+{
+ return xdr_stream_encode_u32(xdr, value) == XDR_UNIT;
+}
+
+static bool __maybe_unused
+xdrgen_encode_open_args_createmode4(struct xdr_stream *xdr, open_args_createmode4 value)
+{
+ return xdr_stream_encode_u32(xdr, value) == XDR_UNIT;
+}
+
+bool
+xdrgen_encode_fattr4_open_arguments(struct xdr_stream *xdr, const fattr4_open_arguments *value)
+{
+ return xdrgen_encode_open_arguments4(xdr, value);
+};
+
+bool
+xdrgen_encode_fattr4_time_deleg_access(struct xdr_stream *xdr, const fattr4_time_deleg_access *value)
+{
+ return xdrgen_encode_nfstime4(xdr, value);
+};
+
+bool
+xdrgen_encode_fattr4_time_deleg_modify(struct xdr_stream *xdr, const fattr4_time_deleg_modify *value)
+{
+ return xdrgen_encode_nfstime4(xdr, value);
+};
+
+static bool __maybe_unused
+xdrgen_encode_open_delegation_type4(struct xdr_stream *xdr, open_delegation_type4 value)
+{
+ return xdr_stream_encode_u32(xdr, value) == XDR_UNIT;
+}
diff --git a/fs/nfsd/nfs4xdr_gen.h b/fs/nfsd/nfs4xdr_gen.h
new file mode 100644
index 000000000000..41a0033b7256
--- /dev/null
+++ b/fs/nfsd/nfs4xdr_gen.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Generated by xdrgen. Manual edits will be lost. */
+/* XDR specification file: ../../Documentation/sunrpc/xdr/nfs4_1.x */
+/* XDR specification modification time: Mon Oct 14 09:10:13 2024 */
+
+#ifndef _LINUX_XDRGEN_NFS4_1_DECL_H
+#define _LINUX_XDRGEN_NFS4_1_DECL_H
+
+#include <linux/types.h>
+
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/xdrgen/_defs.h>
+#include <linux/sunrpc/xdrgen/_builtins.h>
+#include <linux/sunrpc/xdrgen/nfs4_1.h>
+
+bool xdrgen_decode_fattr4_open_arguments(struct xdr_stream *xdr, fattr4_open_arguments *ptr);
+bool xdrgen_encode_fattr4_open_arguments(struct xdr_stream *xdr, const fattr4_open_arguments *value);
+
+bool xdrgen_decode_fattr4_time_deleg_access(struct xdr_stream *xdr, fattr4_time_deleg_access *ptr);
+bool xdrgen_encode_fattr4_time_deleg_access(struct xdr_stream *xdr, const fattr4_time_deleg_access *value);
+
+bool xdrgen_decode_fattr4_time_deleg_modify(struct xdr_stream *xdr, fattr4_time_deleg_modify *ptr);
+bool xdrgen_encode_fattr4_time_deleg_modify(struct xdr_stream *xdr, const fattr4_time_deleg_modify *value);
+
+#endif /* _LINUX_XDRGEN_NFS4_1_DECL_H */
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 3adbc05ebaac..ce2a71e4904c 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -48,7 +48,6 @@ enum {
NFSD_Versions,
NFSD_Ports,
NFSD_MaxBlkSize,
- NFSD_MaxConnections,
NFSD_Filecache,
NFSD_Leasetime,
NFSD_Gracetime,
@@ -68,7 +67,6 @@ static ssize_t write_pool_threads(struct file *file, char *buf, size_t size);
static ssize_t write_versions(struct file *file, char *buf, size_t size);
static ssize_t write_ports(struct file *file, char *buf, size_t size);
static ssize_t write_maxblksize(struct file *file, char *buf, size_t size);
-static ssize_t write_maxconn(struct file *file, char *buf, size_t size);
#ifdef CONFIG_NFSD_V4
static ssize_t write_leasetime(struct file *file, char *buf, size_t size);
static ssize_t write_gracetime(struct file *file, char *buf, size_t size);
@@ -87,7 +85,6 @@ static ssize_t (*const write_op[])(struct file *, char *, size_t) = {
[NFSD_Versions] = write_versions,
[NFSD_Ports] = write_ports,
[NFSD_MaxBlkSize] = write_maxblksize,
- [NFSD_MaxConnections] = write_maxconn,
#ifdef CONFIG_NFSD_V4
[NFSD_Leasetime] = write_leasetime,
[NFSD_Gracetime] = write_gracetime,
@@ -902,44 +899,6 @@ static ssize_t write_maxblksize(struct file *file, char *buf, size_t size)
nfsd_max_blksize);
}
-/*
- * write_maxconn - Set or report the current max number of connections
- *
- * Input:
- * buf: ignored
- * size: zero
- * OR
- *
- * Input:
- * buf: C string containing an unsigned
- * integer value representing the new
- * number of max connections
- * size: non-zero length of C string in @buf
- * Output:
- * On success: passed-in buffer filled with '\n'-terminated C string
- * containing numeric value of max_connections setting
- * for this net namespace;
- * return code is the size in bytes of the string
- * On error: return code is zero or a negative errno value
- */
-static ssize_t write_maxconn(struct file *file, char *buf, size_t size)
-{
- char *mesg = buf;
- struct nfsd_net *nn = net_generic(netns(file), nfsd_net_id);
- unsigned int maxconn = nn->max_connections;
-
- if (size > 0) {
- int rv = get_uint(&mesg, &maxconn);
-
- if (rv)
- return rv;
- trace_nfsd_ctl_maxconn(netns(file), maxconn);
- nn->max_connections = maxconn;
- }
-
- return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%u\n", maxconn);
-}
-
#ifdef CONFIG_NFSD_V4
static ssize_t __nfsd4_write_time(struct file *file, char *buf, size_t size,
time64_t *time, struct nfsd_net *nn)
@@ -1372,7 +1331,6 @@ static int nfsd_fill_super(struct super_block *sb, struct fs_context *fc)
[NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO},
[NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO},
- [NFSD_MaxConnections] = {"max_connections", &transaction_ops, S_IWUSR|S_IRUGO},
[NFSD_Filecache] = {"filecache", &nfsd_file_cache_stats_fops, S_IRUGO},
#ifdef CONFIG_NFSD_V4
[NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR},
@@ -2259,6 +2217,7 @@ static __net_init int nfsd_net_init(struct net *net)
seqlock_init(&nn->writeverf_lock);
nfsd_proc_stat_init(net);
#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ spin_lock_init(&nn->local_clients_lock);
INIT_LIST_HEAD(&nn->local_clients);
#endif
return 0;
@@ -2276,14 +2235,15 @@ out_export_error:
* nfsd_net_pre_exit - Disconnect localio clients from net namespace
* @net: a network namespace that is about to be destroyed
*
- * This invalidated ->net pointers held by localio clients
+ * This invalidates ->net pointers held by localio clients
* while they can still safely access nn->counter.
*/
static __net_exit void nfsd_net_pre_exit(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- nfs_uuid_invalidate_clients(&nn->local_clients);
+ nfs_localio_invalidate_clients(&nn->local_clients,
+ &nn->local_clients_lock);
}
#endif
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 4b56ba1e8e48..e2997f0ffbc5 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -88,9 +88,6 @@ struct nfsd_genl_rqstp {
extern struct svc_program nfsd_programs[];
extern const struct svc_version nfsd_version2, nfsd_version3, nfsd_version4;
extern struct mutex nfsd_mutex;
-extern spinlock_t nfsd_drc_lock;
-extern unsigned long nfsd_drc_max_mem;
-extern unsigned long nfsd_drc_mem_used;
extern atomic_t nfsd_th_cnt; /* number of available threads */
extern const struct seq_operations nfs_exports_op;
@@ -458,7 +455,10 @@ enum {
(NFSD4_1_SUPPORTED_ATTRS_WORD2 | \
FATTR4_WORD2_MODE_UMASK | \
NFSD4_2_SECURITY_ATTRS | \
- FATTR4_WORD2_XATTR_SUPPORT)
+ FATTR4_WORD2_XATTR_SUPPORT | \
+ FATTR4_WORD2_TIME_DELEG_ACCESS | \
+ FATTR4_WORD2_TIME_DELEG_MODIFY | \
+ FATTR4_WORD2_OPEN_ARGUMENTS)
extern const u32 nfsd_suppattrs[3][3];
@@ -528,7 +528,10 @@ static inline bool nfsd_attrs_supported(u32 minorversion, const u32 *bmval)
#endif
#define NFSD_WRITEABLE_ATTRS_WORD2 \
(FATTR4_WORD2_MODE_UMASK \
- | MAYBE_FATTR4_WORD2_SECURITY_LABEL)
+ | MAYBE_FATTR4_WORD2_SECURITY_LABEL \
+ | FATTR4_WORD2_TIME_DELEG_ACCESS \
+ | FATTR4_WORD2_TIME_DELEG_MODIFY \
+ )
#define NFSD_SUPPATTR_EXCLCREAT_WORD0 \
NFSD_WRITEABLE_ATTRS_WORD0
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 98d6459724a7..32019751a41e 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -381,6 +381,8 @@ __fh_verify(struct svc_rqst *rqstp,
if (error)
goto out;
+ svc_xprt_set_valid(rqstp->rq_xprt);
+
/* Finally, check access permissions. */
error = nfsd_permission(cred, exp, dentry, access);
out:
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 49e2f32102ab..9b3d6cff0e1e 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -70,16 +70,6 @@ static __be32 nfsd_init_request(struct svc_rqst *,
*/
DEFINE_MUTEX(nfsd_mutex);
-/*
- * nfsd_drc_lock protects nfsd_drc_max_pages and nfsd_drc_pages_used.
- * nfsd_drc_max_pages limits the total amount of memory available for
- * version 4.1 DRC caches.
- * nfsd_drc_pages_used tracks the current version 4.1 DRC memory usage.
- */
-DEFINE_SPINLOCK(nfsd_drc_lock);
-unsigned long nfsd_drc_max_mem;
-unsigned long nfsd_drc_mem_used;
-
#if IS_ENABLED(CONFIG_NFS_LOCALIO)
static const struct svc_version *localio_versions[] = {
[1] = &localio_version1,
@@ -214,32 +204,32 @@ int nfsd_minorversion(struct nfsd_net *nn, u32 minorversion, enum vers_op change
return 0;
}
-bool nfsd_serv_try_get(struct net *net) __must_hold(rcu)
+bool nfsd_net_try_get(struct net *net) __must_hold(rcu)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- return (nn && percpu_ref_tryget_live(&nn->nfsd_serv_ref));
+ return (nn && percpu_ref_tryget_live(&nn->nfsd_net_ref));
}
-void nfsd_serv_put(struct net *net) __must_hold(rcu)
+void nfsd_net_put(struct net *net) __must_hold(rcu)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- percpu_ref_put(&nn->nfsd_serv_ref);
+ percpu_ref_put(&nn->nfsd_net_ref);
}
-static void nfsd_serv_done(struct percpu_ref *ref)
+static void nfsd_net_done(struct percpu_ref *ref)
{
- struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_serv_ref);
+ struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_net_ref);
- complete(&nn->nfsd_serv_confirm_done);
+ complete(&nn->nfsd_net_confirm_done);
}
-static void nfsd_serv_free(struct percpu_ref *ref)
+static void nfsd_net_free(struct percpu_ref *ref)
{
- struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_serv_ref);
+ struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_net_ref);
- complete(&nn->nfsd_serv_free_done);
+ complete(&nn->nfsd_net_free_done);
}
/*
@@ -436,6 +426,10 @@ static void nfsd_shutdown_net(struct net *net)
if (!nn->nfsd_net_up)
return;
+
+ percpu_ref_kill_and_confirm(&nn->nfsd_net_ref, nfsd_net_done);
+ wait_for_completion(&nn->nfsd_net_confirm_done);
+
nfsd_export_flush(net);
nfs4_state_shutdown_net(net);
nfsd_reply_cache_shutdown(nn);
@@ -444,7 +438,10 @@ static void nfsd_shutdown_net(struct net *net)
lockd_down(net);
nn->lockd_up = false;
}
- percpu_ref_exit(&nn->nfsd_serv_ref);
+
+ wait_for_completion(&nn->nfsd_net_free_done);
+ percpu_ref_exit(&nn->nfsd_net_ref);
+
nn->nfsd_net_up = false;
nfsd_shutdown_generic();
}
@@ -526,11 +523,6 @@ void nfsd_destroy_serv(struct net *net)
lockdep_assert_held(&nfsd_mutex);
- percpu_ref_kill_and_confirm(&nn->nfsd_serv_ref, nfsd_serv_done);
- wait_for_completion(&nn->nfsd_serv_confirm_done);
- wait_for_completion(&nn->nfsd_serv_free_done);
- /* percpu_ref_exit is called in nfsd_shutdown_net */
-
spin_lock(&nfsd_notifier_lock);
nn->nfsd_serv = NULL;
spin_unlock(&nfsd_notifier_lock);
@@ -575,27 +567,6 @@ void nfsd_reset_versions(struct nfsd_net *nn)
}
}
-/*
- * Each session guarantees a negotiated per slot memory cache for replies
- * which in turn consumes memory beyond the v2/v3/v4.0 server. A dedicated
- * NFSv4.1 server might want to use more memory for a DRC than a machine
- * with mutiple services.
- *
- * Impose a hard limit on the number of pages for the DRC which varies
- * according to the machines free pages. This is of course only a default.
- *
- * For now this is a #defined shift which could be under admin control
- * in the future.
- */
-static void set_max_drc(void)
-{
- #define NFSD_DRC_SIZE_SHIFT 7
- nfsd_drc_max_mem = (nr_free_buffer_pages()
- >> NFSD_DRC_SIZE_SHIFT) * PAGE_SIZE;
- nfsd_drc_mem_used = 0;
- dprintk("%s nfsd_drc_max_mem %lu \n", __func__, nfsd_drc_max_mem);
-}
-
static int nfsd_get_default_max_blksize(void)
{
struct sysinfo i;
@@ -652,12 +623,12 @@ int nfsd_create_serv(struct net *net)
if (nn->nfsd_serv)
return 0;
- error = percpu_ref_init(&nn->nfsd_serv_ref, nfsd_serv_free,
+ error = percpu_ref_init(&nn->nfsd_net_ref, nfsd_net_free,
0, GFP_KERNEL);
if (error)
return error;
- init_completion(&nn->nfsd_serv_free_done);
- init_completion(&nn->nfsd_serv_confirm_done);
+ init_completion(&nn->nfsd_net_free_done);
+ init_completion(&nn->nfsd_net_confirm_done);
if (nfsd_max_blksize == 0)
nfsd_max_blksize = nfsd_get_default_max_blksize();
@@ -668,7 +639,6 @@ int nfsd_create_serv(struct net *net)
if (serv == NULL)
return -ENOMEM;
- serv->sv_maxconn = nn->max_connections;
error = svc_bind(serv, net);
if (error < 0) {
svc_destroy(&serv);
@@ -678,7 +648,6 @@ int nfsd_create_serv(struct net *net)
nn->nfsd_serv = serv;
spin_unlock(&nfsd_notifier_lock);
- set_max_drc();
/* check if the notifier is already set */
if (atomic_inc_return(&nfsd_notifier_refcount) == 1) {
register_inetaddr_notifier(&nfsd_inetaddr_notifier);
@@ -954,11 +923,7 @@ nfsd(void *vrqstp)
* The main request loop
*/
while (!svc_thread_should_stop(rqstp)) {
- /* Update sv_maxconn if it has changed */
- rqstp->rq_server->sv_maxconn = nn->max_connections;
-
svc_recv(rqstp);
-
nfsd_file_net_dispose(nn);
}
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index e16bb3717fb9..74d2d7b42676 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -159,6 +159,8 @@ struct nfs4_cb_fattr {
/* from CB_GETATTR reply */
u64 ncf_cb_change;
u64 ncf_cb_fsize;
+ struct timespec64 ncf_cb_mtime;
+ struct timespec64 ncf_cb_atime;
unsigned long ncf_cb_flags;
bool ncf_file_modified;
@@ -207,6 +209,22 @@ struct nfs4_delegation {
struct nfs4_cb_fattr dl_cb_fattr;
};
+static inline bool deleg_is_read(u32 dl_type)
+{
+ return (dl_type == OPEN_DELEGATE_READ || dl_type == OPEN_DELEGATE_READ_ATTRS_DELEG);
+}
+
+static inline bool deleg_is_write(u32 dl_type)
+{
+ return (dl_type == OPEN_DELEGATE_WRITE || dl_type == OPEN_DELEGATE_WRITE_ATTRS_DELEG);
+}
+
+static inline bool deleg_attrs_deleg(u32 dl_type)
+{
+ return dl_type == OPEN_DELEGATE_READ_ATTRS_DELEG ||
+ dl_type == OPEN_DELEGATE_WRITE_ATTRS_DELEG;
+}
+
#define cb_to_delegation(cb) \
container_of(cb, struct nfs4_delegation, dl_recall)
@@ -227,8 +245,11 @@ static inline struct nfs4_delegation *delegstateid(struct nfs4_stid *s)
return container_of(s, struct nfs4_delegation, dl_stid);
}
-/* Maximum number of slots per session. 160 is useful for long haul TCP */
-#define NFSD_MAX_SLOTS_PER_SESSION 160
+/* Maximum number of slots per session. This is for sanity-check only.
+ * It could be increased if we had a mechanism to shutdown misbehaving clients.
+ * A large number can be needed to get good throughput on high-latency servers.
+ */
+#define NFSD_MAX_SLOTS_PER_SESSION 2048
/* Maximum session per slot cache size */
#define NFSD_SLOT_CACHE_SIZE 2048
/* Maximum number of NFSD_SLOT_CACHE_SIZE slots per session */
@@ -242,10 +263,12 @@ struct nfsd4_slot {
struct svc_cred sl_cred;
u32 sl_datalen;
u16 sl_opcnt;
+ u16 sl_generation;
#define NFSD4_SLOT_INUSE (1 << 0)
#define NFSD4_SLOT_CACHETHIS (1 << 1)
#define NFSD4_SLOT_INITIALIZED (1 << 2)
#define NFSD4_SLOT_CACHED (1 << 3)
+#define NFSD4_SLOT_REUSED (1 << 4)
u8 sl_flags;
char sl_data[];
};
@@ -318,16 +341,19 @@ struct nfsd4_session {
u32 se_cb_slot_avail; /* bitmap of available slots */
u32 se_cb_highest_slot; /* highest slot client wants */
u32 se_cb_prog;
- bool se_dead;
struct list_head se_hash; /* hash by sessionid */
struct list_head se_perclnt;
+ struct list_head se_all_sessions;/* global list of sessions */
struct nfs4_client *se_client;
struct nfs4_sessionid se_sessionid;
struct nfsd4_channel_attrs se_fchannel;
struct nfsd4_cb_sec se_cb_sec;
struct list_head se_conns;
u32 se_cb_seq_nr[NFSD_BC_SLOT_TABLE_SIZE];
- struct nfsd4_slot *se_slots[]; /* forward channel slots */
+ struct xarray se_slots; /* forward channel slots */
+ u16 se_slot_gen;
+ bool se_dead;
+ u32 se_target_maxslots;
};
/* formatted contents of nfs4_sessionid */
@@ -505,7 +531,7 @@ struct nfs4_replay {
unsigned int rp_buflen;
char *rp_buf;
struct knfsd_fh rp_openfh;
- atomic_t rp_locked;
+ int rp_locked;
char rp_ibuf[NFSD4_REPLAY_ISIZE];
};
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
index 696c89f68a9e..ad2c0c432d08 100644
--- a/fs/nfsd/trace.h
+++ b/fs/nfsd/trace.h
@@ -626,7 +626,6 @@ DEFINE_STATEID_EVENT(open);
DEFINE_STATEID_EVENT(deleg_read);
DEFINE_STATEID_EVENT(deleg_write);
DEFINE_STATEID_EVENT(deleg_return);
-DEFINE_STATEID_EVENT(deleg_recall);
DECLARE_EVENT_CLASS(nfsd_stateseqid_class,
TP_PROTO(u32 seqid, const stateid_t *stp),
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 382cc1389396..c26ba86dbdfd 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -576,9 +576,7 @@ struct nfsd4_sequence {
u32 slotid; /* request/response */
u32 maxslots; /* request/response */
u32 cachethis; /* request */
-#if 0
u32 target_maxslots; /* response */
-#endif /* not yet */
u32 status_flags; /* response */
};
diff --git a/fs/nfsd/xdr4cb.h b/fs/nfsd/xdr4cb.h
index e8b00309c449..f1a315cd31b7 100644
--- a/fs/nfsd/xdr4cb.h
+++ b/fs/nfsd/xdr4cb.h
@@ -59,16 +59,20 @@
* 1: CB_GETATTR opcode (32-bit)
* N: file_handle
* 1: number of entry in attribute array (32-bit)
- * 1: entry 0 in attribute array (32-bit)
+ * 3: entry 0-2 in attribute array (32-bit * 3)
*/
#define NFS4_enc_cb_getattr_sz (cb_compound_enc_hdr_sz + \
cb_sequence_enc_sz + \
- 1 + enc_nfs4_fh_sz + 1 + 1)
+ 1 + enc_nfs4_fh_sz + 1 + 3)
/*
* 4: fattr_bitmap_maxsz
* 1: attribute array len
* 2: change attr (64-bit)
* 2: size (64-bit)
+ * 2: atime.seconds (64-bit)
+ * 1: atime.nanoseconds (32-bit)
+ * 2: mtime.seconds (64-bit)
+ * 1: mtime.nanoseconds (32-bit)
*/
#define NFS4_dec_cb_getattr_sz (cb_compound_dec_hdr_sz + \
- cb_sequence_dec_sz + 4 + 1 + 2 + 2 + op_dec_sz)
+ cb_sequence_dec_sz + 4 + 1 + 2 + 2 + 2 + 1 + 2 + 1 + op_dec_sz)
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
index ba3e1f591f36..6b506995818d 100644
--- a/fs/nilfs2/alloc.c
+++ b/fs/nilfs2/alloc.c
@@ -21,6 +21,8 @@
* nilfs_palloc_groups_per_desc_block - get the number of groups that a group
* descriptor block can maintain
* @inode: inode of metadata file using this allocator
+ *
+ * Return: Number of groups that a group descriptor block can maintain.
*/
static inline unsigned long
nilfs_palloc_groups_per_desc_block(const struct inode *inode)
@@ -32,6 +34,8 @@ nilfs_palloc_groups_per_desc_block(const struct inode *inode)
/**
* nilfs_palloc_groups_count - get maximum number of groups
* @inode: inode of metadata file using this allocator
+ *
+ * Return: Maximum number of groups.
*/
static inline unsigned long
nilfs_palloc_groups_count(const struct inode *inode)
@@ -43,6 +47,8 @@ nilfs_palloc_groups_count(const struct inode *inode)
* nilfs_palloc_init_blockgroup - initialize private variables for allocator
* @inode: inode of metadata file using this allocator
* @entry_size: size of the persistent object
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned int entry_size)
{
@@ -78,6 +84,9 @@ int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned int entry_size)
* @inode: inode of metadata file using this allocator
* @nr: serial number of the entry (e.g. inode number)
* @offset: pointer to store offset number in the group
+ *
+ * Return: Number of the group that contains the entry with the index
+ * specified by @nr.
*/
static unsigned long nilfs_palloc_group(const struct inode *inode, __u64 nr,
unsigned long *offset)
@@ -93,8 +102,8 @@ static unsigned long nilfs_palloc_group(const struct inode *inode, __u64 nr,
* @inode: inode of metadata file using this allocator
* @group: group number
*
- * nilfs_palloc_desc_blkoff() returns block offset of the descriptor
- * block which contains a descriptor of the specified group.
+ * Return: Index number in the metadata file of the descriptor block of
+ * the group specified by @group.
*/
static unsigned long
nilfs_palloc_desc_blkoff(const struct inode *inode, unsigned long group)
@@ -111,6 +120,9 @@ nilfs_palloc_desc_blkoff(const struct inode *inode, unsigned long group)
*
* nilfs_palloc_bitmap_blkoff() returns block offset of the bitmap
* block used to allocate/deallocate entries in the specified group.
+ *
+ * Return: Index number in the metadata file of the bitmap block of
+ * the group specified by @group.
*/
static unsigned long
nilfs_palloc_bitmap_blkoff(const struct inode *inode, unsigned long group)
@@ -125,6 +137,8 @@ nilfs_palloc_bitmap_blkoff(const struct inode *inode, unsigned long group)
* nilfs_palloc_group_desc_nfrees - get the number of free entries in a group
* @desc: pointer to descriptor structure for the group
* @lock: spin lock protecting @desc
+ *
+ * Return: Number of free entries written in the group descriptor @desc.
*/
static unsigned long
nilfs_palloc_group_desc_nfrees(const struct nilfs_palloc_group_desc *desc,
@@ -143,6 +157,9 @@ nilfs_palloc_group_desc_nfrees(const struct nilfs_palloc_group_desc *desc,
* @desc: pointer to descriptor structure for the group
* @lock: spin lock protecting @desc
* @n: delta to be added
+ *
+ * Return: Number of free entries after adjusting the group descriptor
+ * @desc.
*/
static u32
nilfs_palloc_group_desc_add_entries(struct nilfs_palloc_group_desc *desc,
@@ -161,6 +178,9 @@ nilfs_palloc_group_desc_add_entries(struct nilfs_palloc_group_desc *desc,
* nilfs_palloc_entry_blkoff - get block offset of an entry block
* @inode: inode of metadata file using this allocator
* @nr: serial number of the entry (e.g. inode number)
+ *
+ * Return: Index number in the metadata file of the block containing
+ * the entry specified by @nr.
*/
static unsigned long
nilfs_palloc_entry_blkoff(const struct inode *inode, __u64 nr)
@@ -238,6 +258,12 @@ static int nilfs_palloc_get_block(struct inode *inode, unsigned long blkoff,
* @blkoff: block offset
* @prev: nilfs_bh_assoc struct of the last used buffer
* @lock: spin lock protecting @prev
+ *
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - Non-existent block.
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_palloc_delete_block(struct inode *inode, unsigned long blkoff,
struct nilfs_bh_assoc *prev,
@@ -258,6 +284,8 @@ static int nilfs_palloc_delete_block(struct inode *inode, unsigned long blkoff,
* @group: group number
* @create: create flag
* @bhp: pointer to store the resultant buffer head
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_palloc_get_desc_block(struct inode *inode,
unsigned long group,
@@ -277,6 +305,8 @@ static int nilfs_palloc_get_desc_block(struct inode *inode,
* @group: group number
* @create: create flag
* @bhp: pointer to store the resultant buffer head
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_palloc_get_bitmap_block(struct inode *inode,
unsigned long group,
@@ -294,6 +324,8 @@ static int nilfs_palloc_get_bitmap_block(struct inode *inode,
* nilfs_palloc_delete_bitmap_block - delete a bitmap block
* @inode: inode of metadata file using this allocator
* @group: group number
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_palloc_delete_bitmap_block(struct inode *inode,
unsigned long group)
@@ -312,6 +344,8 @@ static int nilfs_palloc_delete_bitmap_block(struct inode *inode,
* @nr: serial number of the entry (e.g. inode number)
* @create: create flag
* @bhp: pointer to store the resultant buffer head
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr,
int create, struct buffer_head **bhp)
@@ -328,6 +362,8 @@ int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr,
* nilfs_palloc_delete_entry_block - delete an entry block
* @inode: inode of metadata file using this allocator
* @nr: serial number of the entry
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_palloc_delete_entry_block(struct inode *inode, __u64 nr)
{
@@ -397,6 +433,9 @@ size_t nilfs_palloc_entry_offset(const struct inode *inode, __u64 nr,
* @bsize: size in bits
* @lock: spin lock protecting @bitmap
* @wrap: whether to wrap around
+ *
+ * Return: Offset number within the group of the found free entry, or
+ * %-ENOSPC if not found.
*/
static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
unsigned long target,
@@ -438,6 +477,9 @@ static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
* @inode: inode of metadata file using this allocator
* @curr: current group number
* @max: maximum number of groups
+ *
+ * Return: Number of remaining descriptors (= groups) managed by the descriptor
+ * block.
*/
static unsigned long
nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode,
@@ -453,6 +495,8 @@ nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode,
* nilfs_palloc_count_desc_blocks - count descriptor blocks number
* @inode: inode of metadata file using this allocator
* @desc_blocks: descriptor blocks number [out]
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_palloc_count_desc_blocks(struct inode *inode,
unsigned long *desc_blocks)
@@ -473,6 +517,8 @@ static int nilfs_palloc_count_desc_blocks(struct inode *inode,
* MDT file growing
* @inode: inode of metadata file using this allocator
* @desc_blocks: known current descriptor blocks count
+ *
+ * Return: true if a group can be added in the metadata file, false if not.
*/
static inline bool nilfs_palloc_mdt_file_can_grow(struct inode *inode,
unsigned long desc_blocks)
@@ -487,6 +533,12 @@ static inline bool nilfs_palloc_mdt_file_can_grow(struct inode *inode,
* @inode: inode of metadata file using this allocator
* @nused: current number of used entries
* @nmaxp: max number of entries [out]
+ *
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-ERANGE - Number of entries in use is out of range.
*/
int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp)
{
@@ -518,6 +570,13 @@ int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp)
* @inode: inode of metadata file using this allocator
* @req: nilfs_palloc_req structure exchanged for the allocation
* @wrap: whether to wrap around
+ *
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-ENOSPC - Entries exhausted (No entries available for allocation).
+ * * %-EROFS - Read only filesystem
*/
int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
struct nilfs_palloc_req *req, bool wrap)
@@ -710,6 +769,8 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode,
* nilfs_palloc_prepare_free_entry - prepare to deallocate a persistent object
* @inode: inode of metadata file using this allocator
* @req: nilfs_palloc_req structure exchanged for the removal
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_palloc_prepare_free_entry(struct inode *inode,
struct nilfs_palloc_req *req)
@@ -754,6 +815,8 @@ void nilfs_palloc_abort_free_entry(struct inode *inode,
* @inode: inode of metadata file using this allocator
* @entry_nrs: array of entry numbers to be deallocated
* @nitems: number of entries stored in @entry_nrs
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
{
diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h
index 3f115ab7e9a7..046d876ea3e0 100644
--- a/fs/nilfs2/alloc.h
+++ b/fs/nilfs2/alloc.h
@@ -21,6 +21,8 @@
*
* The number of entries per group is defined by the number of bits
* that a bitmap block can maintain.
+ *
+ * Return: Number of entries per group.
*/
static inline unsigned long
nilfs_palloc_entries_per_group(const struct inode *inode)
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index c9e8d9a7d820..ccc1a7aa52d2 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -47,17 +47,14 @@ static int nilfs_bmap_convert_error(struct nilfs_bmap *bmap,
* @ptrp: place to store the value associated to @key
*
* Description: nilfs_bmap_lookup_at_level() finds a record whose key
- * matches @key in the block at @level of the bmap.
- *
- * Return Value: On success, 0 is returned and the record associated with @key
- * is stored in the place pointed by @ptrp. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOENT - A record associated with @key does not exist.
+ * matches @key in the block at @level of the bmap. The record associated
+ * with @key is stored in the place pointed to by @ptrp.
+ *
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - A record associated with @key does not exist.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level,
__u64 *ptrp)
@@ -138,14 +135,11 @@ static int nilfs_bmap_do_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
* Description: nilfs_bmap_insert() inserts the new key-record pair specified
* by @key and @rec into @bmap.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-EEXIST - A record associated with @key already exist.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EEXIST - A record associated with @key already exists.
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_bmap_insert(struct nilfs_bmap *bmap, __u64 key, unsigned long rec)
{
@@ -193,14 +187,11 @@ static int nilfs_bmap_do_delete(struct nilfs_bmap *bmap, __u64 key)
* Description: nilfs_bmap_seek_key() seeks a valid key on @bmap
* starting from @start, and stores it to @keyp if found.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOENT - No valid entry was found
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - No valid entry was found.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_bmap_seek_key(struct nilfs_bmap *bmap, __u64 start, __u64 *keyp)
{
@@ -236,14 +227,11 @@ int nilfs_bmap_last_key(struct nilfs_bmap *bmap, __u64 *keyp)
* Description: nilfs_bmap_delete() deletes the key-record pair specified by
* @key from @bmap.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOENT - A record associated with @key does not exist.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - A record associated with @key does not exist.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_bmap_delete(struct nilfs_bmap *bmap, __u64 key)
{
@@ -290,12 +278,10 @@ static int nilfs_bmap_do_truncate(struct nilfs_bmap *bmap, __u64 key)
* Description: nilfs_bmap_truncate() removes key-record pairs whose keys are
* greater than or equal to @key from @bmap.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_bmap_truncate(struct nilfs_bmap *bmap, __u64 key)
{
@@ -330,12 +316,10 @@ void nilfs_bmap_clear(struct nilfs_bmap *bmap)
* Description: nilfs_bmap_propagate() marks the buffers that directly or
* indirectly refer to the block specified by @bh dirty.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_bmap_propagate(struct nilfs_bmap *bmap, struct buffer_head *bh)
{
@@ -362,22 +346,22 @@ void nilfs_bmap_lookup_dirty_buffers(struct nilfs_bmap *bmap,
/**
* nilfs_bmap_assign - assign a new block number to a block
- * @bmap: bmap
- * @bh: pointer to buffer head
+ * @bmap: bmap
+ * @bh: place to store a pointer to the buffer head to which a block
+ * address is assigned (in/out)
* @blocknr: block number
- * @binfo: block information
+ * @binfo: block information
*
* Description: nilfs_bmap_assign() assigns the block number @blocknr to the
- * buffer specified by @bh.
- *
- * Return Value: On success, 0 is returned and the buffer head of a newly
- * create buffer and the block information associated with the buffer are
- * stored in the place pointed by @bh and @binfo, respectively. On error, one
- * of the following negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * buffer specified by @bh. The block information is stored in the memory
+ * pointed to by @binfo, and the buffer head may be replaced as a block
+ * address is assigned, in which case a pointer to the new buffer head is
+ * stored in the memory pointed to by @bh.
+ *
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_bmap_assign(struct nilfs_bmap *bmap,
struct buffer_head **bh,
@@ -402,12 +386,10 @@ int nilfs_bmap_assign(struct nilfs_bmap *bmap,
* Description: nilfs_bmap_mark() marks the block specified by @key and @level
* as dirty.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_bmap_mark(struct nilfs_bmap *bmap, __u64 key, int level)
{
@@ -430,7 +412,7 @@ int nilfs_bmap_mark(struct nilfs_bmap *bmap, __u64 key, int level)
* Description: nilfs_test_and_clear() is the atomic operation to test and
* clear the dirty state of @bmap.
*
- * Return Value: 1 is returned if @bmap is dirty, or 0 if clear.
+ * Return: 1 if @bmap is dirty, or 0 if clear.
*/
int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *bmap)
{
@@ -490,10 +472,10 @@ static struct lock_class_key nilfs_bmap_mdt_lock_key;
*
* Description: nilfs_bmap_read() initializes the bmap @bmap.
*
- * Return Value: On success, 0 is returned. On error, the following negative
- * error code is returned.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (corrupted bmap).
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode)
{
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 54a3fa0cf67e..568367129092 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -201,7 +201,8 @@ void nilfs_btnode_delete(struct buffer_head *bh)
* Note that the current implementation does not support folio sizes larger
* than the page size.
*
- * Return: 0 on success, or the following negative error code on failure.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
* * %-EIO - I/O error (metadata corruption).
* * %-ENOMEM - Insufficient memory available.
*/
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index ef5061bb56da..0d8f7fb15c2e 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -334,7 +334,7 @@ static int nilfs_btree_node_lookup(const struct nilfs_btree_node *node,
* @inode: host inode of btree
* @blocknr: block number
*
- * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
+ * Return: 0 if normal, 1 if the node is broken.
*/
static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
size_t size, struct inode *inode,
@@ -366,7 +366,7 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
* @node: btree root node to be examined
* @inode: host inode of btree
*
- * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
+ * Return: 0 if normal, 1 if the root node is broken.
*/
static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
struct inode *inode)
@@ -652,8 +652,7 @@ static int nilfs_btree_do_lookup_last(const struct nilfs_bmap *btree,
* @minlevel: start level
* @nextkey: place to store the next valid key
*
- * Return Value: If a next key was found, 0 is returned. Otherwise,
- * -ENOENT is returned.
+ * Return: 0 if the next key was found, %-ENOENT if not found.
*/
static int nilfs_btree_get_next_key(const struct nilfs_bmap *btree,
const struct nilfs_btree_path *path,
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index c20207d7a989..bcc7d76269ac 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -191,14 +191,11 @@ static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile,
* @cnop: place to store the next checkpoint number
* @bhp: place to store a pointer to buffer_head struct
*
- * Return Value: On success, it returns 0. On error, the following negative
- * error code is returned.
- *
- * %-ENOMEM - Insufficient memory available.
- *
- * %-EIO - I/O error
- *
- * %-ENOENT - no block exists in the range.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - no block exists in the range.
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_cpfile_find_checkpoint_block(struct inode *cpfile,
__u64 start_cno, __u64 end_cno,
@@ -239,7 +236,8 @@ static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile,
* stores it to the inode file given by @ifile and the nilfs root object
* given by @root.
*
- * Return: 0 on success, or the following negative error code on failure.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
* * %-EINVAL - Invalid checkpoint.
* * %-ENOMEM - Insufficient memory available.
* * %-EIO - I/O error (including metadata corruption).
@@ -307,7 +305,8 @@ out_sem:
* In either case, the buffer of the block containing the checkpoint entry
* and the cpfile inode are made dirty for inclusion in the write log.
*
- * Return: 0 on success, or the following negative error code on failure.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
* * %-ENOMEM - Insufficient memory available.
* * %-EIO - I/O error (including metadata corruption).
* * %-EROFS - Read only filesystem
@@ -376,7 +375,8 @@ out_sem:
* cpfile with the data given by the arguments @root, @blkinc, @ctime, and
* @minor.
*
- * Return: 0 on success, or the following negative error code on failure.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
* * %-ENOMEM - Insufficient memory available.
* * %-EIO - I/O error (including metadata corruption).
*/
@@ -447,14 +447,11 @@ error:
* the period from @start to @end, excluding @end itself. The checkpoints
* which have been already deleted are ignored.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-EINVAL - invalid checkpoints.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EINVAL - Invalid checkpoints.
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
__u64 start,
@@ -718,7 +715,7 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
* number to continue searching.
*
* Return: Count of checkpoint info items stored in the output buffer on
- * success, or the following negative error code on failure.
+ * success, or one of the following negative error codes on failure:
* * %-EINVAL - Invalid checkpoint mode.
* * %-ENOMEM - Insufficient memory available.
* * %-EIO - I/O error (including metadata corruption).
@@ -743,7 +740,8 @@ ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode,
* @cpfile: checkpoint file inode
* @cno: checkpoint number to delete
*
- * Return: 0 on success, or the following negative error code on failure.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
* * %-EBUSY - Checkpoint in use (snapshot specified).
* * %-EIO - I/O error (including metadata corruption).
* * %-ENOENT - No valid checkpoint found.
@@ -1011,7 +1009,7 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
* @cno: checkpoint number
*
* Return: 1 if the checkpoint specified by @cno is a snapshot, 0 if not, or
- * the following negative error code on failure.
+ * one of the following negative error codes on failure:
* * %-EIO - I/O error (including metadata corruption).
* * %-ENOENT - No such checkpoint.
* * %-ENOMEM - Insufficient memory available.
@@ -1058,14 +1056,11 @@ int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
* Description: nilfs_change_cpmode() changes the mode of the checkpoint
* specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOENT - No such checkpoint.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - No such checkpoint.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
{
@@ -1097,14 +1092,12 @@ int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
* @cpstat: pointer to a structure of checkpoint statistics
*
* Description: nilfs_cpfile_get_stat() returns information about checkpoints.
+ * The checkpoint statistics are stored in the location pointed to by @cpstat.
*
- * Return Value: On success, 0 is returned, and checkpoints information is
- * stored in the place pointed by @cpstat. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
{
@@ -1135,6 +1128,8 @@ int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
* @cpsize: size of a checkpoint entry
* @raw_inode: on-disk cpfile inode
* @inodep: buffer to store the inode
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_cpfile_read(struct super_block *sb, size_t cpsize,
struct nilfs_inode *raw_inode, struct inode **inodep)
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index e220dcb08aa6..c664daba56ae 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -276,7 +276,8 @@ void nilfs_dat_abort_update(struct inode *dat,
* @dat: DAT file inode
* @vblocknr: virtual block number
*
- * Return: 0 on success, or the following negative error code on failure.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
* * %-EINVAL - Invalid DAT entry (internal code).
* * %-EIO - I/O error (including metadata corruption).
* * %-ENOMEM - Insufficient memory available.
@@ -302,14 +303,11 @@ int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
* Description: nilfs_dat_freev() frees the virtual block numbers specified by
* @vblocknrs and @nitems.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOENT - The virtual block number have not been allocated.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - The virtual block number have not been allocated.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
{
@@ -325,12 +323,10 @@ int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
* Description: nilfs_dat_move() changes the block number associated with
* @vblocknr to @blocknr.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
{
@@ -390,17 +386,14 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
* @blocknrp: pointer to a block number
*
* Description: nilfs_dat_translate() maps the virtual block number @vblocknr
- * to the corresponding block number.
- *
- * Return Value: On success, 0 is returned and the block number associated
- * with @vblocknr is stored in the place pointed by @blocknrp. On error, one
- * of the following negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * to the corresponding block number. The block number associated with
+ * @vblocknr is stored in the place pointed to by @blocknrp.
*
- * %-ENOENT - A block number associated with @vblocknr does not exist.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - A block number associated with @vblocknr does not exist.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
{
@@ -489,6 +482,8 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned int visz,
* @entry_size: size of a dat entry
* @raw_inode: on-disk dat inode
* @inodep: buffer to store the inode
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_dat_read(struct super_block *sb, size_t entry_size,
struct nilfs_inode *raw_inode, struct inode **inodep)
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 0a3aea6c416b..9b7f8e9655a2 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -400,7 +400,7 @@ int nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr, ino_t *ino)
return 0;
}
-void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
+int nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
struct folio *folio, struct inode *inode)
{
size_t from = offset_in_folio(folio, de);
@@ -410,11 +410,15 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
folio_lock(folio);
err = nilfs_prepare_chunk(folio, from, to);
- BUG_ON(err);
+ if (unlikely(err)) {
+ folio_unlock(folio);
+ return err;
+ }
de->inode = cpu_to_le64(inode->i_ino);
de->file_type = fs_umode_to_ftype(inode->i_mode);
nilfs_commit_chunk(folio, mapping, from, to);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
+ return 0;
}
/*
@@ -543,7 +547,10 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct folio *folio)
from = (char *)pde - kaddr;
folio_lock(folio);
err = nilfs_prepare_chunk(folio, from, to);
- BUG_ON(err);
+ if (unlikely(err)) {
+ folio_unlock(folio);
+ goto out;
+ }
if (pde)
pde->rec_len = nilfs_rec_len_to_disk(to - from);
dir->inode = 0;
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index 2dbb15767df1..561c220799c7 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -46,14 +46,11 @@
* specified by @pbn to the GC pagecache with the key @blkoff.
* This function sets @vbn (@pbn if @vbn is zero) in b_blocknr of the buffer.
*
- * Return Value: On success, 0 is returned. On Error, one of the following
- * negative error code is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOENT - The block specified with @pbn does not exist.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - The block specified with @pbn does not exist.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
sector_t pbn, __u64 vbn,
@@ -114,12 +111,11 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
* specified by @vbn to the GC pagecache. @pbn can be supplied by the
* caller to avoid translation of the disk block address.
*
- * Return Value: On success, 0 is returned. On Error, one of the following
- * negative error code is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - Invalid virtual block address.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn,
__u64 vbn, struct buffer_head **out_bh)
diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c
index e7339eb3c08a..c4cd4a4dedd0 100644
--- a/fs/nilfs2/ifile.c
+++ b/fs/nilfs2/ifile.c
@@ -38,17 +38,16 @@ static inline struct nilfs_ifile_info *NILFS_IFILE_I(struct inode *ifile)
* @out_ino: pointer to a variable to store inode number
* @out_bh: buffer_head contains newly allocated disk inode
*
- * Return Value: On success, 0 is returned and the newly allocated inode
- * number is stored in the place pointed by @ino, and buffer_head pointer
- * that contains newly allocated disk inode structure is stored in the
- * place pointed by @out_bh
- * On error, one of the following negative error codes is returned.
+ * nilfs_ifile_create_inode() allocates a new inode in the ifile metadata
+ * file and stores the inode number in the variable pointed to by @out_ino,
+ * as well as storing the ifile's buffer with the disk inode in the location
+ * pointed to by @out_bh.
*
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOSPC - No inode left.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-ENOSPC - No inode left.
*/
int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino,
struct buffer_head **out_bh)
@@ -83,14 +82,11 @@ int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino,
* @ifile: ifile inode
* @ino: inode number
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOENT - The inode number @ino have not been allocated.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - Inode number unallocated.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_ifile_delete_inode(struct inode *ifile, ino_t ino)
{
@@ -150,6 +146,8 @@ int nilfs_ifile_get_inode_block(struct inode *ifile, ino_t ino,
* @ifile: ifile inode
* @nmaxinodes: current maximum of available inodes count [out]
* @nfreeinodes: free inodes count [out]
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_ifile_count_free_inodes(struct inode *ifile,
u64 *nmaxinodes, u64 *nfreeinodes)
@@ -174,7 +172,8 @@ int nilfs_ifile_count_free_inodes(struct inode *ifile,
* @cno: number of checkpoint entry to read
* @inode_size: size of an inode
*
- * Return: 0 on success, or the following negative error code on failure.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
* * %-EINVAL - Invalid checkpoint.
* * %-ENOMEM - Insufficient memory available.
* * %-EIO - I/O error (including metadata corruption).
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 23f3a75edd50..6613b8fcceb0 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -68,6 +68,8 @@ void nilfs_inode_sub_blocks(struct inode *inode, int n)
*
* This function does not issue actual read request of the specified data
* block. It is done by VFS.
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_get_block(struct inode *inode, sector_t blkoff,
struct buffer_head *bh_result, int create)
@@ -141,6 +143,8 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff,
* address_space_operations.
* @file: file struct of the file to be read
* @folio: the folio to be read
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_read_folio(struct file *file, struct folio *folio)
{
@@ -598,10 +602,7 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
* or does nothing if the inode already has it. This function allocates
* an additional inode to maintain page cache of B-tree nodes one-on-one.
*
- * Return Value: On success, 0 is returned. On errors, one of the following
- * negative error code is returned.
- *
- * %-ENOMEM - Insufficient memory available.
+ * Return: 0 on success, or %-ENOMEM if memory is insufficient.
*/
int nilfs_attach_btree_node_cache(struct inode *inode)
{
@@ -660,11 +661,8 @@ void nilfs_detach_btree_node_cache(struct inode *inode)
* in one inode and the one for b-tree node pages is set up in the
* other inode, which is attached to the former inode.
*
- * Return Value: On success, a pointer to the inode for data pages is
- * returned. On errors, one of the following negative error code is returned
- * in a pointer type.
- *
- * %-ENOMEM - Insufficient memory available.
+ * Return: a pointer to the inode for data pages on success, or %-ENOMEM
+ * if memory is insufficient.
*/
struct inode *nilfs_iget_for_shadow(struct inode *inode)
{
@@ -1188,7 +1186,7 @@ int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
if (size) {
if (phys && blkphy << blkbits == phys + size) {
/* The current extent goes on */
- size += n << blkbits;
+ size += (u64)n << blkbits;
} else {
/* Terminate the current extent */
ret = fiemap_fill_next_extent(
@@ -1201,14 +1199,14 @@ int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
flags = FIEMAP_EXTENT_MERGED;
logical = blkoff << blkbits;
phys = blkphy << blkbits;
- size = n << blkbits;
+ size = (u64)n << blkbits;
}
} else {
/* Start a new extent */
flags = FIEMAP_EXTENT_MERGED;
logical = blkoff << blkbits;
phys = blkphy << blkbits;
- size = n << blkbits;
+ size = (u64)n << blkbits;
}
blkoff += n;
}
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index fa77f78df681..a66d62a51f77 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -33,17 +33,14 @@
* @dofunc: concrete function of get/set metadata info
*
* Description: nilfs_ioctl_wrap_copy() gets/sets metadata info by means of
- * calling dofunc() function on the basis of @argv argument.
- *
- * Return Value: On success, 0 is returned and requested metadata info
- * is copied into userspace. On error, one of the following
- * negative error codes is returned.
- *
- * %-EINVAL - Invalid arguments from userspace.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-EFAULT - Failure during execution of requested operation.
+ * calling dofunc() function on the basis of @argv argument. If successful,
+ * the requested metadata information is copied to userspace memory.
+ *
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EFAULT - Failure during execution of requested operation.
+ * * %-EINVAL - Invalid arguments from userspace.
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
struct nilfs_argv *argv, int dir,
@@ -190,13 +187,10 @@ static int nilfs_ioctl_getversion(struct inode *inode, void __user *argp)
* given checkpoint between checkpoint and snapshot state. This ioctl
* is used in chcp and mkcp utilities.
*
- * Return Value: On success, 0 is returned and mode of a checkpoint is
- * changed. On error, one of the following negative error codes
- * is returned.
- *
- * %-EPERM - Operation not permitted.
- *
- * %-EFAULT - Failure during checkpoint mode changing.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * %-EFAULT - Failure during checkpoint mode changing.
+ * %-EPERM - Operation not permitted.
*/
static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
@@ -244,13 +238,10 @@ out:
* checkpoint from NILFS2 file system. This ioctl is used in rmcp
* utility.
*
- * Return Value: On success, 0 is returned and a checkpoint is
- * removed. On error, one of the following negative error codes
- * is returned.
- *
- * %-EPERM - Operation not permitted.
- *
- * %-EFAULT - Failure during checkpoint removing.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * %-EFAULT - Failure during checkpoint removing.
+ * %-EPERM - Operation not permitted.
*/
static int
nilfs_ioctl_delete_checkpoint(struct inode *inode, struct file *filp,
@@ -296,7 +287,7 @@ out:
* requested checkpoints. The NILFS_IOCTL_GET_CPINFO ioctl is used in
* lscp utility and by nilfs_cleanerd daemon.
*
- * Return value: count of nilfs_cpinfo structures in output buffer.
+ * Return: Count of nilfs_cpinfo structures in output buffer.
*/
static ssize_t
nilfs_ioctl_do_get_cpinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
@@ -320,17 +311,14 @@ nilfs_ioctl_do_get_cpinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
*
* Description: nilfs_ioctl_get_cpstat() returns information about checkpoints.
* The NILFS_IOCTL_GET_CPSTAT ioctl is used by lscp, rmcp utilities
- * and by nilfs_cleanerd daemon.
+ * and by nilfs_cleanerd daemon. The checkpoint statistics are copied to
+ * the userspace memory pointed to by @argp.
*
- * Return Value: On success, 0 is returned, and checkpoints information is
- * copied into userspace pointer @argp. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-EFAULT - Failure during getting checkpoints statistics.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EFAULT - Failure during getting checkpoints statistics.
+ * * %-EIO - I/O error.
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_ioctl_get_cpstat(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
@@ -363,7 +351,8 @@ static int nilfs_ioctl_get_cpstat(struct inode *inode, struct file *filp,
* info about requested segments. The NILFS_IOCTL_GET_SUINFO ioctl is used
* in lssu, nilfs_resize utilities and by nilfs_cleanerd daemon.
*
- * Return value: count of nilfs_suinfo structures in output buffer.
+ * Return: Count of nilfs_suinfo structures in output buffer on success,
+ * or a negative error code on failure.
*/
static ssize_t
nilfs_ioctl_do_get_suinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
@@ -387,17 +376,14 @@ nilfs_ioctl_do_get_suinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
*
* Description: nilfs_ioctl_get_sustat() returns segment usage statistics.
* The NILFS_IOCTL_GET_SUSTAT ioctl is used in lssu, nilfs_resize utilities
- * and by nilfs_cleanerd daemon.
- *
- * Return Value: On success, 0 is returned, and segment usage information is
- * copied into userspace pointer @argp. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * and by nilfs_cleanerd daemon. The requested segment usage information is
+ * copied to the userspace memory pointed to by @argp.
*
- * %-EFAULT - Failure during getting segment usage statistics.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EFAULT - Failure during getting segment usage statistics.
+ * * %-EIO - I/O error.
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_ioctl_get_sustat(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
@@ -430,7 +416,8 @@ static int nilfs_ioctl_get_sustat(struct inode *inode, struct file *filp,
* on virtual block addresses. The NILFS_IOCTL_GET_VINFO ioctl is used
* by nilfs_cleanerd daemon.
*
- * Return value: count of nilfs_vinfo structures in output buffer.
+ * Return: Count of nilfs_vinfo structures in output buffer on success, or
+ * a negative error code on failure.
*/
static ssize_t
nilfs_ioctl_do_get_vinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
@@ -457,7 +444,8 @@ nilfs_ioctl_do_get_vinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
* about descriptors of disk block numbers. The NILFS_IOCTL_GET_BDESCS ioctl
* is used by nilfs_cleanerd daemon.
*
- * Return value: count of nilfs_bdescs structures in output buffer.
+ * Return: Count of nilfs_bdescs structures in output buffer on success, or
+ * a negative error code on failure.
*/
static ssize_t
nilfs_ioctl_do_get_bdescs(struct the_nilfs *nilfs, __u64 *posp, int flags,
@@ -494,19 +482,15 @@ nilfs_ioctl_do_get_bdescs(struct the_nilfs *nilfs, __u64 *posp, int flags,
*
* Description: nilfs_ioctl_do_get_bdescs() function returns information
* about descriptors of disk block numbers. The NILFS_IOCTL_GET_BDESCS ioctl
- * is used by nilfs_cleanerd daemon.
- *
- * Return Value: On success, 0 is returned, and disk block descriptors are
- * copied into userspace pointer @argp. On error, one of the following
- * negative error codes is returned.
- *
- * %-EINVAL - Invalid arguments from userspace.
+ * is used by nilfs_cleanerd daemon. If successful, disk block descriptors
+ * are copied to userspace pointer @argp.
*
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-EFAULT - Failure during getting disk block descriptors.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EFAULT - Failure during getting disk block descriptors.
+ * * %-EINVAL - Invalid arguments from userspace.
+ * * %-EIO - I/O error.
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_ioctl_get_bdescs(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
@@ -540,16 +524,12 @@ static int nilfs_ioctl_get_bdescs(struct inode *inode, struct file *filp,
* Description: nilfs_ioctl_move_inode_block() function registers data/node
* buffer in the GC pagecache and submit read request.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOENT - Requested block doesn't exist.
- *
- * %-EEXIST - Blocks conflict is detected.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EEXIST - Block conflict detected.
+ * * %-EIO - I/O error.
+ * * %-ENOENT - Requested block doesn't exist.
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_ioctl_move_inode_block(struct inode *inode,
struct nilfs_vdesc *vdesc,
@@ -604,8 +584,8 @@ static int nilfs_ioctl_move_inode_block(struct inode *inode,
* blocks that garbage collector specified with the array of nilfs_vdesc
* structures and stores them into page caches of GC inodes.
*
- * Return Value: Number of processed nilfs_vdesc structures or
- * error code, otherwise.
+ * Return: Number of processed nilfs_vdesc structures on success, or
+ * a negative error code on failure.
*/
static int nilfs_ioctl_move_blocks(struct super_block *sb,
struct nilfs_argv *argv, void *buf)
@@ -682,14 +662,11 @@ static int nilfs_ioctl_move_blocks(struct super_block *sb,
* in the period from p_start to p_end, excluding p_end itself. The checkpoints
* which have been already deleted are ignored.
*
- * Return Value: Number of processed nilfs_period structures or
- * error code, otherwise.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-EINVAL - invalid checkpoints.
+ * Return: Number of processed nilfs_period structures on success, or one of
+ * the following negative error codes on failure:
+ * * %-EINVAL - invalid checkpoints.
+ * * %-EIO - I/O error.
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_ioctl_delete_checkpoints(struct the_nilfs *nilfs,
struct nilfs_argv *argv, void *buf)
@@ -717,14 +694,11 @@ static int nilfs_ioctl_delete_checkpoints(struct the_nilfs *nilfs,
* Description: nilfs_ioctl_free_vblocknrs() function frees
* the virtual block numbers specified by @buf and @argv->v_nmembs.
*
- * Return Value: Number of processed virtual block numbers or
- * error code, otherwise.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOENT - The virtual block number have not been allocated.
+ * Return: Number of processed virtual block numbers on success, or one of the
+ * following negative error codes on failure:
+ * * %-EIO - I/O error.
+ * * %-ENOENT - Unallocated virtual block number.
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_ioctl_free_vblocknrs(struct the_nilfs *nilfs,
struct nilfs_argv *argv, void *buf)
@@ -746,14 +720,11 @@ static int nilfs_ioctl_free_vblocknrs(struct the_nilfs *nilfs,
* Description: nilfs_ioctl_mark_blocks_dirty() function marks
* metadata file or data blocks as dirty.
*
- * Return Value: Number of processed block descriptors or
- * error code, otherwise.
- *
- * %-ENOMEM - Insufficient memory available.
- *
- * %-EIO - I/O error
- *
- * %-ENOENT - the specified block does not exist (hole block)
+ * Return: Number of processed block descriptors on success, or one of the
+ * following negative error codes on failure:
+ * * %-EIO - I/O error.
+ * * %-ENOENT - Non-existent block (hole block).
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_ioctl_mark_blocks_dirty(struct the_nilfs *nilfs,
struct nilfs_argv *argv, void *buf)
@@ -852,7 +823,7 @@ int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs,
* from userspace. The NILFS_IOCTL_CLEAN_SEGMENTS ioctl is used by
* nilfs_cleanerd daemon.
*
- * Return Value: On success, 0 is returned or error code, otherwise.
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
@@ -976,20 +947,14 @@ out:
* and metadata are written out to the device when it successfully
* returned.
*
- * Return Value: On success, 0 is retured. On errors, one of the following
- * negative error code is returned.
- *
- * %-EROFS - Read only filesystem.
- *
- * %-EIO - I/O error
- *
- * %-ENOSPC - No space left on device (only in a panic state).
- *
- * %-ERESTARTSYS - Interrupted.
- *
- * %-ENOMEM - Insufficient memory available.
- *
- * %-EFAULT - Failure during execution of requested operation.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EFAULT - Failure during execution of requested operation.
+ * * %-EIO - I/O error.
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-ENOSPC - No space left on device (only in a panic state).
+ * * %-ERESTARTSYS - Interrupted.
+ * * %-EROFS - Read only filesystem.
*/
static int nilfs_ioctl_sync(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
@@ -1023,7 +988,7 @@ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp,
* @filp: file object
* @argp: pointer on argument from userspace
*
- * Return Value: On success, 0 is returned or error code, otherwise.
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_ioctl_resize(struct inode *inode, struct file *filp,
void __user *argp)
@@ -1059,7 +1024,7 @@ out:
* checks the arguments from userspace and calls nilfs_sufile_trim_fs, which
* performs the actual trim operation.
*
- * Return Value: On success, 0 is returned or negative error code, otherwise.
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_ioctl_trim_fs(struct inode *inode, void __user *argp)
{
@@ -1101,7 +1066,7 @@ static int nilfs_ioctl_trim_fs(struct inode *inode, void __user *argp)
* of segments in bytes and upper limit of segments in bytes.
* The NILFS_IOCTL_SET_ALLOC_RANGE is used by nilfs_resize utility.
*
- * Return Value: On success, 0 is returned or error code, otherwise.
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp)
{
@@ -1152,17 +1117,15 @@ out:
* @dofunc: concrete function of getting metadata info
*
* Description: nilfs_ioctl_get_info() gets metadata info by means of
- * calling dofunc() function.
+ * calling dofunc() function. The requested metadata information is copied
+ * to userspace memory @argp.
*
- * Return Value: On success, 0 is returned and requested metadata info
- * is copied into userspace. On error, one of the following
- * negative error codes is returned.
- *
- * %-EINVAL - Invalid arguments from userspace.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-EFAULT - Failure during execution of requested operation.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EFAULT - Failure during execution of requested operation.
+ * * %-EINVAL - Invalid arguments from userspace.
+ * * %-EIO - I/O error.
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp,
@@ -1202,18 +1165,14 @@ static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp,
* encapsulated in nilfs_argv and updates the segment usage info
* according to the flags in nilfs_suinfo_update.
*
- * Return Value: On success, 0 is returned. On error, one of the
- * following negative error codes is returned.
- *
- * %-EPERM - Not enough permissions
- *
- * %-EFAULT - Error copying input data
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-EINVAL - Invalid values in input (segment number, flags or nblocks)
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EEXIST - Block conflict detected.
+ * * %-EFAULT - Error copying input data.
+ * * %-EINVAL - Invalid values in input (segment number, flags or nblocks).
+ * * %-EIO - I/O error.
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-EPERM - Not enough permissions.
*/
static int nilfs_ioctl_set_suinfo(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
@@ -1309,7 +1268,8 @@ static int nilfs_ioctl_get_fslabel(struct super_block *sb, void __user *argp)
* @filp: file object
* @argp: pointer to userspace memory that contains the volume name
*
- * Return: 0 on success, or the following negative error code on failure.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
* * %-EFAULT - Error copying input data.
* * %-EINVAL - Label length exceeds record size in superblock.
* * %-EIO - I/O error.
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 965b5ad1c0df..2f850a18d6e7 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -226,20 +226,21 @@ static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
* @out_bh: output of a pointer to the buffer_head
*
* nilfs_mdt_get_block() looks up the specified buffer and tries to create
- * a new buffer if @create is not zero. On success, the returned buffer is
- * assured to be either existing or formatted using a buffer lock on success.
- * @out_bh is substituted only when zero is returned.
+ * a new buffer if @create is not zero. If (and only if) this function
+ * succeeds, it stores a pointer to the retrieved buffer head in the location
+ * pointed to by @out_bh.
*
- * Return Value: On success, it returns 0. On error, the following negative
- * error code is returned.
+ * The retrieved buffer may be either an existing one or a newly allocated one.
+ * For a newly created buffer, if the callback function argument @init_block
+ * is non-NULL, the callback will be called with the buffer locked to format
+ * the block.
*
- * %-ENOMEM - Insufficient memory available.
- *
- * %-EIO - I/O error
- *
- * %-ENOENT - the specified block does not exist (hole block)
- *
- * %-EROFS - Read only filesystem (for create mode)
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - The specified block does not exist (hole block).
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-EROFS - Read only filesystem (for create mode).
*/
int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create,
void (*init_block)(struct inode *,
@@ -275,14 +276,11 @@ int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create,
* @out_bh, and block offset to @blkoff, respectively. @out_bh and
* @blkoff are substituted only when zero is returned.
*
- * Return Value: On success, it returns 0. On error, the following negative
- * error code is returned.
- *
- * %-ENOMEM - Insufficient memory available.
- *
- * %-EIO - I/O error
- *
- * %-ENOENT - no block was found in the range
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - No block was found in the range.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_mdt_find_block(struct inode *inode, unsigned long start,
unsigned long end, unsigned long *blkoff,
@@ -321,12 +319,11 @@ out:
* @inode: inode of the meta data file
* @block: block offset
*
- * Return Value: On success, zero is returned.
- * On error, one of the following negative error code is returned.
- *
- * %-ENOMEM - Insufficient memory available.
- *
- * %-EIO - I/O error
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - Non-existent block.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_mdt_delete_block(struct inode *inode, unsigned long block)
{
@@ -349,12 +346,10 @@ int nilfs_mdt_delete_block(struct inode *inode, unsigned long block)
* nilfs_mdt_forget_block() clears a dirty flag of the specified buffer, and
* tries to release the page including the buffer from a page cache.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error code is returned.
- *
- * %-EBUSY - page has an active buffer.
- *
- * %-ENOENT - page cache has no page addressed by the offset.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EBUSY - Page has an active buffer.
+ * * %-ENOENT - Page cache has no page addressed by the offset.
*/
int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
{
@@ -524,6 +519,8 @@ void nilfs_mdt_set_entry_size(struct inode *inode, unsigned int entry_size,
* nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file
* @inode: inode of the metadata file
* @shadow: shadow mapping
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_mdt_setup_shadow_map(struct inode *inode,
struct nilfs_shadow_map *shadow)
@@ -545,6 +542,8 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
/**
* nilfs_mdt_save_to_shadow_map - copy bmap and dirty pages to shadow map
* @inode: inode of the metadata file
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_mdt_save_to_shadow_map(struct inode *inode)
{
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 1d836a5540f3..953fbd5f0851 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -370,6 +370,7 @@ static int nilfs_rename(struct mnt_idmap *idmap,
struct folio *old_folio;
struct nilfs_dir_entry *old_de;
struct nilfs_transaction_info ti;
+ bool old_is_dir = S_ISDIR(old_inode->i_mode);
int err;
if (flags & ~RENAME_NOREPLACE)
@@ -385,7 +386,7 @@ static int nilfs_rename(struct mnt_idmap *idmap,
goto out;
}
- if (S_ISDIR(old_inode->i_mode)) {
+ if (old_is_dir && old_dir != new_dir) {
err = -EIO;
dir_de = nilfs_dotdot(old_inode, &dir_folio);
if (!dir_de)
@@ -397,7 +398,7 @@ static int nilfs_rename(struct mnt_idmap *idmap,
struct nilfs_dir_entry *new_de;
err = -ENOTEMPTY;
- if (dir_de && !nilfs_empty_dir(new_inode))
+ if (old_is_dir && !nilfs_empty_dir(new_inode))
goto out_dir;
new_de = nilfs_find_entry(new_dir, &new_dentry->d_name,
@@ -406,11 +407,13 @@ static int nilfs_rename(struct mnt_idmap *idmap,
err = PTR_ERR(new_de);
goto out_dir;
}
- nilfs_set_link(new_dir, new_de, new_folio, old_inode);
+ err = nilfs_set_link(new_dir, new_de, new_folio, old_inode);
folio_release_kmap(new_folio, new_de);
+ if (unlikely(err))
+ goto out_dir;
nilfs_mark_inode_dirty(new_dir);
inode_set_ctime_current(new_inode);
- if (dir_de)
+ if (old_is_dir)
drop_nlink(new_inode);
drop_nlink(new_inode);
nilfs_mark_inode_dirty(new_inode);
@@ -418,7 +421,7 @@ static int nilfs_rename(struct mnt_idmap *idmap,
err = nilfs_add_link(new_dentry, old_inode);
if (err)
goto out_dir;
- if (dir_de) {
+ if (old_is_dir) {
inc_nlink(new_dir);
nilfs_mark_inode_dirty(new_dir);
}
@@ -430,28 +433,28 @@ static int nilfs_rename(struct mnt_idmap *idmap,
*/
inode_set_ctime_current(old_inode);
- nilfs_delete_entry(old_de, old_folio);
-
- if (dir_de) {
- nilfs_set_link(old_inode, dir_de, dir_folio, new_dir);
- folio_release_kmap(dir_folio, dir_de);
- drop_nlink(old_dir);
+ err = nilfs_delete_entry(old_de, old_folio);
+ if (likely(!err)) {
+ if (old_is_dir) {
+ if (old_dir != new_dir)
+ err = nilfs_set_link(old_inode, dir_de,
+ dir_folio, new_dir);
+ drop_nlink(old_dir);
+ }
+ nilfs_mark_inode_dirty(old_dir);
}
- folio_release_kmap(old_folio, old_de);
-
- nilfs_mark_inode_dirty(old_dir);
nilfs_mark_inode_dirty(old_inode);
- err = nilfs_transaction_commit(old_dir->i_sb);
- return err;
-
out_dir:
if (dir_de)
folio_release_kmap(dir_folio, dir_de);
out_old:
folio_release_kmap(old_folio, old_de);
out:
- nilfs_transaction_abort(old_dir->i_sb);
+ if (likely(!err))
+ err = nilfs_transaction_commit(old_dir->i_sb);
+ else
+ nilfs_transaction_abort(old_dir->i_sb);
return err;
}
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index dff241c53fc5..cb6ed54accd7 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -261,8 +261,8 @@ struct nilfs_dir_entry *nilfs_find_entry(struct inode *, const struct qstr *,
int nilfs_delete_entry(struct nilfs_dir_entry *, struct folio *);
int nilfs_empty_dir(struct inode *);
struct nilfs_dir_entry *nilfs_dotdot(struct inode *, struct folio **);
-void nilfs_set_link(struct inode *, struct nilfs_dir_entry *,
- struct folio *, struct inode *);
+int nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
+ struct folio *folio, struct inode *inode);
/* file.c */
extern int nilfs_sync_file(struct file *, loff_t, loff_t, int);
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 9de2a494a069..806b056d2260 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -135,8 +135,7 @@ void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
* nilfs_folio_buffers_clean - Check if a folio has dirty buffers or not.
* @folio: Folio to be checked.
*
- * nilfs_folio_buffers_clean() returns false if the folio has dirty buffers.
- * Otherwise, it returns true.
+ * Return: false if the folio has dirty buffers, true otherwise.
*/
bool nilfs_folio_buffers_clean(struct folio *folio)
{
@@ -392,6 +391,11 @@ void nilfs_clear_dirty_pages(struct address_space *mapping)
/**
* nilfs_clear_folio_dirty - discard dirty folio
* @folio: dirty folio that will be discarded
+ *
+ * nilfs_clear_folio_dirty() clears working states including dirty state for
+ * the folio and its buffers. If the folio has buffers, clear only if it is
+ * confirmed that none of the buffer heads are busy (none have valid
+ * references and none are locked).
*/
void nilfs_clear_folio_dirty(struct folio *folio)
{
@@ -399,10 +403,6 @@ void nilfs_clear_folio_dirty(struct folio *folio)
BUG_ON(!folio_test_locked(folio));
- folio_clear_uptodate(folio);
- folio_clear_mappedtodisk(folio);
- folio_clear_checked(folio);
-
head = folio_buffers(folio);
if (head) {
const unsigned long clear_bits =
@@ -410,6 +410,25 @@ void nilfs_clear_folio_dirty(struct folio *folio)
BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected) |
BIT(BH_Delay));
+ bool busy, invalidated = false;
+
+recheck_buffers:
+ busy = false;
+ bh = head;
+ do {
+ if (atomic_read(&bh->b_count) | buffer_locked(bh)) {
+ busy = true;
+ break;
+ }
+ } while (bh = bh->b_this_page, bh != head);
+
+ if (busy) {
+ if (invalidated)
+ return;
+ invalidate_bh_lrus();
+ invalidated = true;
+ goto recheck_buffers;
+ }
bh = head;
do {
@@ -419,6 +438,9 @@ void nilfs_clear_folio_dirty(struct folio *folio)
} while (bh = bh->b_this_page, bh != head);
}
+ folio_clear_uptodate(folio);
+ folio_clear_mappedtodisk(folio);
+ folio_clear_checked(folio);
__nilfs_clear_folio_dirty(folio);
}
@@ -477,8 +499,9 @@ void __nilfs_clear_folio_dirty(struct folio *folio)
* This function searches an extent of buffers marked "delayed" which
* starts from a block offset equal to or larger than @start_blk. If
* such an extent was found, this will store the start offset in
- * @blkoff and return its length in blocks. Otherwise, zero is
- * returned.
+ * @blkoff and return its length in blocks.
+ *
+ * Return: Length in blocks of found extent, 0 otherwise.
*/
unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
sector_t start_blk,
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index e43405bf521e..22aecf6e2344 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -88,6 +88,8 @@ static int nilfs_warn_segment_error(struct super_block *sb, int err)
* @check_bytes: number of bytes to be checked
* @start: DBN of start block
* @nblock: number of blocks to be checked
+ *
+ * Return: 0 on success, or %-EIO if an I/O error occurs.
*/
static int nilfs_compute_checksum(struct the_nilfs *nilfs,
struct buffer_head *bhs, u32 *sum,
@@ -126,6 +128,11 @@ static int nilfs_compute_checksum(struct the_nilfs *nilfs,
* @sr_block: disk block number of the super root block
* @pbh: address of a buffer_head pointer to return super root buffer
* @check: CRC check flag
+ *
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EINVAL - Super root block corrupted.
+ * * %-EIO - I/O error.
*/
int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block,
struct buffer_head **pbh, int check)
@@ -176,6 +183,8 @@ int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block,
* @nilfs: nilfs object
* @start_blocknr: start block number of the log
* @sum: pointer to return segment summary structure
+ *
+ * Return: Buffer head pointer, or NULL if an I/O error occurs.
*/
static struct buffer_head *
nilfs_read_log_header(struct the_nilfs *nilfs, sector_t start_blocknr,
@@ -195,6 +204,13 @@ nilfs_read_log_header(struct the_nilfs *nilfs, sector_t start_blocknr,
* @seg_seq: sequence number of segment
* @bh_sum: buffer head of summary block
* @sum: segment summary struct
+ *
+ * Return: 0 on success, or one of the following internal codes on failure:
+ * * %NILFS_SEG_FAIL_MAGIC - Magic number mismatch.
+ * * %NILFS_SEG_FAIL_SEQ - Sequence number mismatch.
+ * * %NIFLS_SEG_FAIL_CONSISTENCY - Block count out of range.
+ * * %NILFS_SEG_FAIL_IO - I/O error.
+ * * %NILFS_SEG_FAIL_CHECKSUM_FULL - Full log checksum verification failed.
*/
static int nilfs_validate_log(struct the_nilfs *nilfs, u64 seg_seq,
struct buffer_head *bh_sum,
@@ -238,6 +254,9 @@ out:
* @pbh: the current buffer head on summary blocks [in, out]
* @offset: the current byte offset on summary blocks [in, out]
* @bytes: byte size of the item to be read
+ *
+ * Return: Kernel space address of current segment summary entry, or
+ * NULL if an I/O error occurs.
*/
static void *nilfs_read_summary_info(struct the_nilfs *nilfs,
struct buffer_head **pbh,
@@ -300,6 +319,11 @@ static void nilfs_skip_summary_info(struct the_nilfs *nilfs,
* @start_blocknr: start block number of the log
* @sum: log summary information
* @head: list head to add nilfs_recovery_block struct
+ *
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error.
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_scan_dsync_log(struct the_nilfs *nilfs, sector_t start_blocknr,
struct nilfs_segment_summary *sum,
@@ -571,6 +595,12 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
* @sb: super block instance
* @root: NILFS root instance
* @ri: pointer to a nilfs_recovery_info
+ *
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EINVAL - Log format error.
+ * * %-EIO - I/O error.
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
struct super_block *sb,
@@ -754,18 +784,13 @@ static void nilfs_abort_roll_forward(struct the_nilfs *nilfs)
* @sb: super block instance
* @ri: pointer to a nilfs_recovery_info struct to store search results.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error code is returned.
- *
- * %-EINVAL - Inconsistent filesystem state.
- *
- * %-EIO - I/O error
- *
- * %-ENOSPC - No space left on device (only in a panic state).
- *
- * %-ERESTARTSYS - Interrupted.
- *
- * %-ENOMEM - Insufficient memory available.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EINVAL - Inconsistent filesystem state.
+ * * %-EIO - I/O error.
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-ENOSPC - No space left on device (only in a panic state).
+ * * %-ERESTARTSYS - Interrupted.
*/
int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
struct super_block *sb,
@@ -830,14 +855,11 @@ failed:
* segment pointed by the superblock. It sets up struct the_nilfs through
* this search. It fills nilfs_recovery_info (ri) required for recovery.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error code is returned.
- *
- * %-EINVAL - No valid segment found
- *
- * %-EIO - I/O error
- *
- * %-ENOMEM - Insufficient memory available.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EINVAL - No valid segment found.
+ * * %-EIO - I/O error.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_search_super_root(struct the_nilfs *nilfs,
struct nilfs_recovery_info *ri)
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index e08cab03366b..a8bdf3d318ea 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -406,12 +406,7 @@ static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf,
* @segbuf: buffer storing a log to be written
* @nilfs: nilfs object
*
- * Return Value: On Success, 0 is returned. On Error, one of the following
- * negative error code is returned.
- *
- * %-EIO - I/O error
- *
- * %-ENOMEM - Insufficient memory available.
+ * Return: Always 0.
*/
static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
struct the_nilfs *nilfs)
@@ -452,10 +447,7 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
* nilfs_segbuf_wait - wait for completion of requested BIOs
* @segbuf: segment buffer
*
- * Return Value: On Success, 0 is returned. On Error, one of the following
- * negative error code is returned.
- *
- * %-EIO - I/O error
+ * Return: 0 on success, or %-EIO if I/O error is detected.
*/
static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf)
{
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 587251830897..3a202e51b360 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -191,12 +191,10 @@ static int nilfs_prepare_segment_lock(struct super_block *sb,
* When @vacancy_check flag is set, this function will check the amount of
* free space, and will wait for the GC to reclaim disk space if low capacity.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error code is returned.
- *
- * %-ENOMEM - Insufficient memory available.
- *
- * %-ENOSPC - No space left on device
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-ENOSPC - No space left on device (if checking free space).
*/
int nilfs_transaction_begin(struct super_block *sb,
struct nilfs_transaction_info *ti,
@@ -252,6 +250,8 @@ int nilfs_transaction_begin(struct super_block *sb,
* nilfs_transaction_commit() sets a timer to start the segment
* constructor. If a sync flag is set, it starts construction
* directly.
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_transaction_commit(struct super_block *sb)
{
@@ -407,6 +407,8 @@ static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
/**
* nilfs_segctor_reset_segment_buffer - reset the current segment buffer
* @sci: nilfs_sc_info
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
{
@@ -734,7 +736,6 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
if (!head)
head = create_empty_buffers(folio,
i_blocksize(inode), 0);
- folio_unlock(folio);
bh = head;
do {
@@ -744,11 +745,14 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
list_add_tail(&bh->b_assoc_buffers, listp);
ndirties++;
if (unlikely(ndirties >= nlimit)) {
+ folio_unlock(folio);
folio_batch_release(&fbatch);
cond_resched();
return ndirties;
}
} while (bh = bh->b_this_page, bh != head);
+
+ folio_unlock(folio);
}
folio_batch_release(&fbatch);
cond_resched();
@@ -1118,7 +1122,8 @@ static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
* a super root block containing this sufile change is complete, and it can
* be canceled with nilfs_sufile_cancel_freev() until then.
*
- * Return: 0 on success, or the following negative error code on failure.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
* * %-EINVAL - Invalid segment number.
* * %-EIO - I/O error (including metadata corruption).
* * %-ENOMEM - Insufficient memory available.
@@ -1315,6 +1320,8 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
* nilfs_segctor_begin_construction - setup segment buffer to make a new log
* @sci: nilfs_sc_info
* @nilfs: nilfs object
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
struct the_nilfs *nilfs)
@@ -2312,18 +2319,13 @@ static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err, bool force)
* nilfs_construct_segment - construct a logical segment
* @sb: super block
*
- * Return Value: On success, 0 is returned. On errors, one of the following
- * negative error code is returned.
- *
- * %-EROFS - Read only filesystem.
- *
- * %-EIO - I/O error
- *
- * %-ENOSPC - No space left on device (only in a panic state).
- *
- * %-ERESTARTSYS - Interrupted.
- *
- * %-ENOMEM - Insufficient memory available.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-ENOSPC - No space left on device (only in a panic state).
+ * * %-ERESTARTSYS - Interrupted.
+ * * %-EROFS - Read only filesystem.
*/
int nilfs_construct_segment(struct super_block *sb)
{
@@ -2347,18 +2349,13 @@ int nilfs_construct_segment(struct super_block *sb)
* @start: start byte offset
* @end: end byte offset (inclusive)
*
- * Return Value: On success, 0 is returned. On errors, one of the following
- * negative error code is returned.
- *
- * %-EROFS - Read only filesystem.
- *
- * %-EIO - I/O error
- *
- * %-ENOSPC - No space left on device (only in a panic state).
- *
- * %-ERESTARTSYS - Interrupted.
- *
- * %-ENOMEM - Insufficient memory available.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-ENOSPC - No space left on device (only in a panic state).
+ * * %-ERESTARTSYS - Interrupted.
+ * * %-EROFS - Read only filesystem.
*/
int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
loff_t start, loff_t end)
@@ -2464,6 +2461,8 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
* nilfs_segctor_construct - form logs and write them to disk
* @sci: segment constructor object
* @mode: mode of log forming
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
{
@@ -2836,7 +2835,8 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
* This allocates a log writer object, initializes it, and starts the
* log writer.
*
- * Return: 0 on success, or the following negative error code on failure.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
* * %-EINTR - Log writer thread creation failed due to interruption.
* * %-ENOMEM - Insufficient memory available.
*/
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index d3ecc813d633..330f269abedf 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -133,6 +133,8 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
/**
* nilfs_sufile_get_ncleansegs - return the number of clean segments
* @sufile: inode of segment usage file
+ *
+ * Return: Number of clean segments.
*/
unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
{
@@ -155,17 +157,13 @@ unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
* of successfully modified segments from the head is stored in the
* place @ndone points to.
*
- * Return Value: On success, zero is returned. On error, one of the
- * following negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOENT - Given segment usage is in hole block (may be returned if
- * @create is zero)
- *
- * %-EINVAL - Invalid segment usage number
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EINVAL - Invalid segment usage number
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - Given segment usage is in hole block (may be returned if
+ * @create is zero)
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
int create, size_t *ndone,
@@ -272,10 +270,7 @@ int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
* @start: minimum segment number of allocatable region (inclusive)
* @end: maximum segment number of allocatable region (inclusive)
*
- * Return Value: On success, 0 is returned. On error, one of the
- * following negative error codes is returned.
- *
- * %-ERANGE - invalid segment region
+ * Return: 0 on success, or %-ERANGE if segment range is invalid.
*/
int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
{
@@ -300,17 +295,14 @@ int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
* @sufile: inode of segment usage file
* @segnump: pointer to segment number
*
- * Description: nilfs_sufile_alloc() allocates a clean segment.
- *
- * Return Value: On success, 0 is returned and the segment number of the
- * allocated segment is stored in the place pointed by @segnump. On error, one
- * of the following negative error codes is returned.
- *
- * %-EIO - I/O error.
+ * Description: nilfs_sufile_alloc() allocates a clean segment, and stores
+ * its segment number in the place pointed to by @segnump.
*
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOSPC - No clean segment left.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-ENOSPC - No clean segment left.
*/
int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
{
@@ -510,6 +502,8 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
* nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
* @sufile: inode of segment usage file
* @segnum: segment number
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
{
@@ -569,6 +563,8 @@ out_sem:
* @segnum: segment number
* @nblocks: number of live blocks in the segment
* @modtime: modification time (option)
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
unsigned long nblocks, time64_t modtime)
@@ -610,16 +606,13 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
* @sufile: inode of segment usage file
* @sustat: pointer to a structure of segment usage statistics
*
- * Description: nilfs_sufile_get_stat() returns information about segment
- * usage.
+ * Description: nilfs_sufile_get_stat() retrieves segment usage statistics
+ * and stores them in the location pointed to by @sustat.
*
- * Return Value: On success, 0 is returned, and segment usage information is
- * stored in the place pointed by @sustat. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
{
@@ -683,16 +676,12 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
* @start: start segment number (inclusive)
* @end: end segment number (inclusive)
*
- * Return Value: On success, 0 is returned. On error, one of the
- * following negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-EINVAL - Invalid number of segments specified
- *
- * %-EBUSY - Dirty or active segments are present in the range
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EBUSY - Dirty or active segments are present in the range.
+ * * %-EINVAL - Invalid number of segments specified.
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
static int nilfs_sufile_truncate_range(struct inode *sufile,
__u64 start, __u64 end)
@@ -787,16 +776,12 @@ out:
* @sufile: inode of segment usage file
* @newnsegs: new number of segments
*
- * Return Value: On success, 0 is returned. On error, one of the
- * following negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-ENOSPC - Enough free space is not left for shrinking
- *
- * %-EBUSY - Dirty or active segments exist in the region to be truncated
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EBUSY - Dirty or active segments exist in the region to be truncated.
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-ENOSPC - Enough free space is not left for shrinking.
*/
int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
{
@@ -865,7 +850,7 @@ out:
* @nsi: size of suinfo array
*
* Return: Count of segment usage info items stored in the output buffer on
- * success, or the following negative error code on failure.
+ * success, or one of the following negative error codes on failure:
* * %-EIO - I/O error (including metadata corruption).
* * %-ENOMEM - Insufficient memory available.
*/
@@ -939,14 +924,11 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
* segment usage accordingly. Only the fields indicated by the sup_flags
* are updated.
*
- * Return Value: On success, 0 is returned. On error, one of the
- * following negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-EINVAL - Invalid values in input (segment number, flags or nblocks)
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EINVAL - Invalid values in input (segment number, flags or nblocks).
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
unsigned int supsz, size_t nsup)
@@ -1073,7 +1055,7 @@ ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
* and start+len is rounded down. For each clean segment blkdev_issue_discard
* function is invoked.
*
- * Return Value: On success, 0 is returned or negative error code, otherwise.
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
{
@@ -1219,6 +1201,8 @@ out_sem:
* @susize: size of a segment usage entry
* @raw_inode: on-disk sufile inode
* @inodep: buffer to store the inode
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_sufile_read(struct super_block *sb, size_t susize,
struct nilfs_inode *raw_inode, struct inode **inodep)
diff --git a/fs/nilfs2/sufile.h b/fs/nilfs2/sufile.h
index 8e8a1a5a0402..cd6f28ab3521 100644
--- a/fs/nilfs2/sufile.h
+++ b/fs/nilfs2/sufile.h
@@ -58,6 +58,8 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range);
* nilfs_sufile_scrap - make a segment garbage
* @sufile: inode of segment usage file
* @segnum: segment number to be freed
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static inline int nilfs_sufile_scrap(struct inode *sufile, __u64 segnum)
{
@@ -68,6 +70,8 @@ static inline int nilfs_sufile_scrap(struct inode *sufile, __u64 segnum)
* nilfs_sufile_free - free segment
* @sufile: inode of segment usage file
* @segnum: segment number to be freed
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static inline int nilfs_sufile_free(struct inode *sufile, __u64 segnum)
{
@@ -80,6 +84,8 @@ static inline int nilfs_sufile_free(struct inode *sufile, __u64 segnum)
* @segnumv: array of segment numbers
* @nsegs: size of @segnumv array
* @ndone: place to store the number of freed segments
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static inline int nilfs_sufile_freev(struct inode *sufile, __u64 *segnumv,
size_t nsegs, size_t *ndone)
@@ -95,8 +101,7 @@ static inline int nilfs_sufile_freev(struct inode *sufile, __u64 *segnumv,
* @nsegs: size of @segnumv array
* @ndone: place to store the number of cancelled segments
*
- * Return Value: On success, 0 is returned. On error, a negative error codes
- * is returned.
+ * Return: 0 on success, or a negative error code on failure.
*/
static inline int nilfs_sufile_cancel_freev(struct inode *sufile,
__u64 *segnumv, size_t nsegs,
@@ -114,14 +119,11 @@ static inline int nilfs_sufile_cancel_freev(struct inode *sufile,
* Description: nilfs_sufile_set_error() marks the segment specified by
* @segnum as erroneous. The error segment will never be used again.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-EINVAL - Invalid segment usage number.
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EINVAL - Invalid segment usage number.
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
static inline int nilfs_sufile_set_error(struct inode *sufile, __u64 segnum)
{
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index eca79cca3803..badc2cbc895e 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -309,6 +309,8 @@ int nilfs_commit_super(struct super_block *sb, int flag)
* This function restores state flags in the on-disk super block.
* This will set "clean" flag (i.e. NILFS_VALID_FS) unless the
* filesystem was not clean previously.
+ *
+ * Return: 0 on success, %-EIO if I/O error or superblock is corrupted.
*/
int nilfs_cleanup_super(struct super_block *sb)
{
@@ -339,6 +341,8 @@ int nilfs_cleanup_super(struct super_block *sb)
* nilfs_move_2nd_super - relocate secondary super block
* @sb: super block instance
* @sb2off: new offset of the secondary super block (in bytes)
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static int nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off)
{
@@ -420,6 +424,8 @@ out:
* nilfs_resize_fs - resize the filesystem
* @sb: super block instance
* @newsize: new size of the filesystem (in bytes)
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_resize_fs(struct super_block *sb, __u64 newsize)
{
@@ -987,7 +993,7 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
* nilfs_tree_is_busy() - try to shrink dentries of a checkpoint
* @root_dentry: root dentry of the tree to be shrunk
*
- * This function returns true if the tree was in-use.
+ * Return: true if the tree was in-use, false otherwise.
*/
static bool nilfs_tree_is_busy(struct dentry *root_dentry)
{
@@ -1033,6 +1039,8 @@ int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno)
*
* This function is called exclusively by nilfs->ns_mount_mutex.
* So, the recovery process is protected from other simultaneous mounts.
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
static int
nilfs_fill_super(struct super_block *sb, struct fs_context *fc)
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index ac03fd3c330c..cb01ea81724d 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -49,8 +49,8 @@ void nilfs_set_last_segment(struct the_nilfs *nilfs,
* alloc_nilfs - allocate a nilfs object
* @sb: super block instance
*
- * Return Value: On success, pointer to the_nilfs is returned.
- * On error, NULL is returned.
+ * Return: a pointer to the allocated nilfs object on success, or NULL on
+ * failure.
*/
struct the_nilfs *alloc_nilfs(struct super_block *sb)
{
@@ -165,6 +165,9 @@ static void nilfs_clear_recovery_info(struct nilfs_recovery_info *ri)
* containing a super root from a given super block, and initializes
* relevant information on the nilfs object preparatory for log
* scanning and recovery.
+ *
+ * Return: 0 on success, or %-EINVAL if current segment number is out
+ * of range.
*/
static int nilfs_store_log_cursor(struct the_nilfs *nilfs,
struct nilfs_super_block *sbp)
@@ -200,8 +203,7 @@ static int nilfs_store_log_cursor(struct the_nilfs *nilfs,
* exponent information written in @sbp and stores it in @blocksize,
* or aborts with an error message if it's too large.
*
- * Return Value: On success, 0 is returned. If the block size is too
- * large, -EINVAL is returned.
+ * Return: 0 on success, or %-EINVAL if the block size is too large.
*/
static int nilfs_get_blocksize(struct super_block *sb,
struct nilfs_super_block *sbp, int *blocksize)
@@ -226,6 +228,13 @@ static int nilfs_get_blocksize(struct super_block *sb,
* load_nilfs() searches and load the latest super root,
* attaches the last segment, and does recovery if needed.
* The caller must call this exclusively for simultaneous mounts.
+ *
+ * Return: 0 on success, or one of the following negative error codes on
+ * failure:
+ * * %-EINVAL - No valid segment found.
+ * * %-EIO - I/O error.
+ * * %-ENOMEM - Insufficient memory available.
+ * * %-EROFS - Read only device or RO compat mode (if recovery is required)
*/
int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
{
@@ -395,6 +404,8 @@ static unsigned long long nilfs_max_size(unsigned int blkbits)
* nilfs_nrsvsegs - calculate the number of reserved segments
* @nilfs: nilfs object
* @nsegs: total number of segments
+ *
+ * Return: Number of reserved segments.
*/
unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs)
{
@@ -406,6 +417,8 @@ unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs)
/**
* nilfs_max_segment_count - calculate the maximum number of segments
* @nilfs: nilfs object
+ *
+ * Return: Maximum number of segments
*/
static u64 nilfs_max_segment_count(struct the_nilfs *nilfs)
{
@@ -538,7 +551,7 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp)
* area, or if the parameters themselves are not normal, it is
* determined to be invalid.
*
- * Return Value: true if invalid, false if valid.
+ * Return: true if invalid, false if valid.
*/
static bool nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset)
{
@@ -684,8 +697,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
* reading the super block, getting disk layout information, initializing
* shared fields in the_nilfs).
*
- * Return Value: On success, 0 is returned. On error, a negative error
- * code is returned.
+ * Return: 0 on success, or a negative error code on failure.
*/
int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
{
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index 6004dfdfdf0f..c4cdaf5fa7ed 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -20,7 +20,7 @@
static int dir_notify_enable __read_mostly = 1;
#ifdef CONFIG_SYSCTL
-static struct ctl_table dnotify_sysctls[] = {
+static const struct ctl_table dnotify_sysctls[] = {
{
.procname = "dir-notify-enable",
.data = &dir_notify_enable,
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index 24c7c5df4998..95646f7c46ca 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -223,7 +223,7 @@ static int fanotify_get_response(struct fsnotify_group *group,
struct fanotify_perm_event *event,
struct fsnotify_iter_info *iter_info)
{
- int ret;
+ int ret, errno;
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
@@ -262,14 +262,23 @@ static int fanotify_get_response(struct fsnotify_group *group,
ret = 0;
break;
case FAN_DENY:
+ /* Check custom errno from pre-content events */
+ errno = fanotify_get_response_errno(event->response);
+ if (errno) {
+ ret = -errno;
+ break;
+ }
+ fallthrough;
default:
ret = -EPERM;
}
/* Check if the response should be audited */
- if (event->response & FAN_AUDIT)
- audit_fanotify(event->response & ~FAN_AUDIT,
- &event->audit_rule);
+ if (event->response & FAN_AUDIT) {
+ u32 response = event->response &
+ (FANOTIFY_RESPONSE_ACCESS | FANOTIFY_RESPONSE_FLAGS);
+ audit_fanotify(response & ~FAN_AUDIT, &event->audit_rule);
+ }
pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__,
group, event, ret);
@@ -548,9 +557,13 @@ static struct fanotify_event *fanotify_alloc_path_event(const struct path *path,
return &pevent->fae;
}
-static struct fanotify_event *fanotify_alloc_perm_event(const struct path *path,
+static struct fanotify_event *fanotify_alloc_perm_event(const void *data,
+ int data_type,
gfp_t gfp)
{
+ const struct path *path = fsnotify_data_path(data, data_type);
+ const struct file_range *range =
+ fsnotify_data_file_range(data, data_type);
struct fanotify_perm_event *pevent;
pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp);
@@ -564,6 +577,9 @@ static struct fanotify_event *fanotify_alloc_perm_event(const struct path *path,
pevent->hdr.len = 0;
pevent->state = FAN_EVENT_INIT;
pevent->path = *path;
+ /* NULL ppos means no range info */
+ pevent->ppos = range ? &range->pos : NULL;
+ pevent->count = range ? range->count : 0;
path_get(path);
return &pevent->fae;
@@ -801,7 +817,7 @@ static struct fanotify_event *fanotify_alloc_event(
old_memcg = set_active_memcg(group->memcg);
if (fanotify_is_perm_event(mask)) {
- event = fanotify_alloc_perm_event(path, gfp);
+ event = fanotify_alloc_perm_event(data, data_type, gfp);
} else if (fanotify_is_error_event(mask)) {
event = fanotify_alloc_error_event(group, fsid, data,
data_type, &hash);
@@ -909,8 +925,9 @@ static int fanotify_handle_event(struct fsnotify_group *group, u32 mask,
BUILD_BUG_ON(FAN_OPEN_EXEC_PERM != FS_OPEN_EXEC_PERM);
BUILD_BUG_ON(FAN_FS_ERROR != FS_ERROR);
BUILD_BUG_ON(FAN_RENAME != FS_RENAME);
+ BUILD_BUG_ON(FAN_PRE_ACCESS != FS_PRE_ACCESS);
- BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 21);
+ BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 22);
mask = fanotify_group_event_mask(group, iter_info, &match_mask,
mask, data, data_type, dir);
diff --git a/fs/notify/fanotify/fanotify.h b/fs/notify/fanotify/fanotify.h
index e5ab33cae6a7..c12cbc270539 100644
--- a/fs/notify/fanotify/fanotify.h
+++ b/fs/notify/fanotify/fanotify.h
@@ -425,6 +425,8 @@ FANOTIFY_PE(struct fanotify_event *event)
struct fanotify_perm_event {
struct fanotify_event fae;
struct path path;
+ const loff_t *ppos; /* optional file range info */
+ size_t count;
u32 response; /* userspace answer to the event */
unsigned short state; /* state of the event */
int fd; /* fd we passed to userspace for this event */
@@ -446,6 +448,14 @@ static inline bool fanotify_is_perm_event(u32 mask)
mask & FANOTIFY_PERM_EVENTS;
}
+static inline bool fanotify_event_has_access_range(struct fanotify_event *event)
+{
+ if (!(event->mask & FANOTIFY_PRE_CONTENT_EVENTS))
+ return false;
+
+ return FANOTIFY_PERM(event)->ppos;
+}
+
static inline struct fanotify_event *FANOTIFY_E(struct fsnotify_event *fse)
{
return container_of(fse, struct fanotify_event, fse);
@@ -518,3 +528,8 @@ static inline unsigned int fanotify_mark_user_flags(struct fsnotify_mark *mark)
return mflags;
}
+
+static inline u32 fanotify_get_response_errno(int res)
+{
+ return (res >> FAN_ERRNO_SHIFT) & FAN_ERRNO_MASK;
+}
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 2d85c71717d6..ba3e2d09eb44 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -58,7 +58,7 @@ static int fanotify_max_queued_events __read_mostly;
static long ft_zero = 0;
static long ft_int_max = INT_MAX;
-static struct ctl_table fanotify_table[] = {
+static const struct ctl_table fanotify_table[] = {
{
.procname = "max_user_groups",
.data = &init_user_ns.ucount_max[UCOUNT_FANOTIFY_GROUPS],
@@ -100,8 +100,7 @@ static void __init fanotify_sysctls_init(void)
*
* Internal and external open flags are stored together in field f_flags of
* struct file. Only external open flags shall be allowed in event_f_flags.
- * Internal flags like FMODE_NONOTIFY, FMODE_EXEC, FMODE_NOCMTIME shall be
- * excluded.
+ * Internal flags like FMODE_EXEC shall be excluded.
*/
#define FANOTIFY_INIT_ALL_EVENT_F_BITS ( \
O_ACCMODE | O_APPEND | O_NONBLOCK | \
@@ -118,10 +117,12 @@ struct kmem_cache *fanotify_perm_event_cachep __ro_after_init;
#define FANOTIFY_EVENT_ALIGN 4
#define FANOTIFY_FID_INFO_HDR_LEN \
(sizeof(struct fanotify_event_info_fid) + sizeof(struct file_handle))
-#define FANOTIFY_PIDFD_INFO_HDR_LEN \
+#define FANOTIFY_PIDFD_INFO_LEN \
sizeof(struct fanotify_event_info_pidfd)
#define FANOTIFY_ERROR_INFO_LEN \
(sizeof(struct fanotify_event_info_error))
+#define FANOTIFY_RANGE_INFO_LEN \
+ (sizeof(struct fanotify_event_info_range))
static int fanotify_fid_info_len(int fh_len, int name_len)
{
@@ -159,9 +160,6 @@ static size_t fanotify_event_len(unsigned int info_mode,
int fh_len;
int dot_len = 0;
- if (!info_mode)
- return event_len;
-
if (fanotify_is_error_event(event->mask))
event_len += FANOTIFY_ERROR_INFO_LEN;
@@ -176,14 +174,17 @@ static size_t fanotify_event_len(unsigned int info_mode,
dot_len = 1;
}
- if (info_mode & FAN_REPORT_PIDFD)
- event_len += FANOTIFY_PIDFD_INFO_HDR_LEN;
-
if (fanotify_event_has_object_fh(event)) {
fh_len = fanotify_event_object_fh_len(event);
event_len += fanotify_fid_info_len(fh_len, dot_len);
}
+ if (info_mode & FAN_REPORT_PIDFD)
+ event_len += FANOTIFY_PIDFD_INFO_LEN;
+
+ if (fanotify_event_has_access_range(event))
+ event_len += FANOTIFY_RANGE_INFO_LEN;
+
return event_len;
}
@@ -258,12 +259,11 @@ static int create_fd(struct fsnotify_group *group, const struct path *path,
return client_fd;
/*
- * we need a new file handle for the userspace program so it can read even if it was
- * originally opened O_WRONLY.
+ * We provide an fd for the userspace program, so it could access the
+ * file without generating fanotify events itself.
*/
- new_file = dentry_open(path,
- group->fanotify_data.f_flags | __FMODE_NONOTIFY,
- current_cred());
+ new_file = dentry_open_nonotify(path, group->fanotify_data.f_flags,
+ current_cred());
if (IS_ERR(new_file)) {
put_unused_fd(client_fd);
client_fd = PTR_ERR(new_file);
@@ -327,11 +327,12 @@ static int process_access_response(struct fsnotify_group *group,
struct fanotify_perm_event *event;
int fd = response_struct->fd;
u32 response = response_struct->response;
+ int errno = fanotify_get_response_errno(response);
int ret = info_len;
struct fanotify_response_info_audit_rule friar;
- pr_debug("%s: group=%p fd=%d response=%u buf=%p size=%zu\n", __func__,
- group, fd, response, info, info_len);
+ pr_debug("%s: group=%p fd=%d response=%x errno=%d buf=%p size=%zu\n",
+ __func__, group, fd, response, errno, info, info_len);
/*
* make sure the response is valid, if invalid we do nothing and either
* userspace can send a valid response or we will clean it up after the
@@ -342,7 +343,31 @@ static int process_access_response(struct fsnotify_group *group,
switch (response & FANOTIFY_RESPONSE_ACCESS) {
case FAN_ALLOW:
+ if (errno)
+ return -EINVAL;
+ break;
case FAN_DENY:
+ /* Custom errno is supported only for pre-content groups */
+ if (errno && group->priority != FSNOTIFY_PRIO_PRE_CONTENT)
+ return -EINVAL;
+
+ /*
+ * Limit errno to values expected on open(2)/read(2)/write(2)
+ * of regular files.
+ */
+ switch (errno) {
+ case 0:
+ case EIO:
+ case EPERM:
+ case EBUSY:
+ case ETXTBSY:
+ case EAGAIN:
+ case ENOSPC:
+ case EDQUOT:
+ break;
+ default:
+ return -EINVAL;
+ }
break;
default:
return -EINVAL;
@@ -506,7 +531,7 @@ static int copy_pidfd_info_to_user(int pidfd,
size_t count)
{
struct fanotify_event_info_pidfd info = { };
- size_t info_len = FANOTIFY_PIDFD_INFO_HDR_LEN;
+ size_t info_len = FANOTIFY_PIDFD_INFO_LEN;
if (WARN_ON_ONCE(info_len > count))
return -EFAULT;
@@ -521,6 +546,30 @@ static int copy_pidfd_info_to_user(int pidfd,
return info_len;
}
+static size_t copy_range_info_to_user(struct fanotify_event *event,
+ char __user *buf, int count)
+{
+ struct fanotify_perm_event *pevent = FANOTIFY_PERM(event);
+ struct fanotify_event_info_range info = { };
+ size_t info_len = FANOTIFY_RANGE_INFO_LEN;
+
+ if (WARN_ON_ONCE(info_len > count))
+ return -EFAULT;
+
+ if (WARN_ON_ONCE(!pevent->ppos))
+ return -EINVAL;
+
+ info.hdr.info_type = FAN_EVENT_INFO_TYPE_RANGE;
+ info.hdr.len = info_len;
+ info.offset = *(pevent->ppos);
+ info.count = pevent->count;
+
+ if (copy_to_user(buf, &info, info_len))
+ return -EFAULT;
+
+ return info_len;
+}
+
static int copy_info_records_to_user(struct fanotify_event *event,
struct fanotify_info *info,
unsigned int info_mode, int pidfd,
@@ -642,6 +691,15 @@ static int copy_info_records_to_user(struct fanotify_event *event,
total_bytes += ret;
}
+ if (fanotify_event_has_access_range(event)) {
+ ret = copy_range_info_to_user(event, buf, count);
+ if (ret < 0)
+ return ret;
+ buf += ret;
+ count -= ret;
+ total_bytes += ret;
+ }
+
return total_bytes;
}
@@ -756,12 +814,10 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
buf += FAN_EVENT_METADATA_LEN;
count -= FAN_EVENT_METADATA_LEN;
- if (info_mode) {
- ret = copy_info_records_to_user(event, info, info_mode, pidfd,
- buf, count);
- if (ret < 0)
- goto out_close_fd;
- }
+ ret = copy_info_records_to_user(event, info, info_mode, pidfd,
+ buf, count);
+ if (ret < 0)
+ goto out_close_fd;
if (f)
fd_install(fd, f);
@@ -1294,7 +1350,7 @@ static int fanotify_group_init_error_pool(struct fsnotify_group *group)
}
static int fanotify_may_update_existing_mark(struct fsnotify_mark *fsn_mark,
- unsigned int fan_flags)
+ __u32 mask, unsigned int fan_flags)
{
/*
* Non evictable mark cannot be downgraded to evictable mark.
@@ -1321,6 +1377,11 @@ static int fanotify_may_update_existing_mark(struct fsnotify_mark *fsn_mark,
fsn_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY)
return -EEXIST;
+ /* For now pre-content events are not generated for directories */
+ mask |= fsn_mark->mask;
+ if (mask & FANOTIFY_PRE_CONTENT_EVENTS && mask & FAN_ONDIR)
+ return -EEXIST;
+
return 0;
}
@@ -1347,7 +1408,7 @@ static int fanotify_add_mark(struct fsnotify_group *group,
/*
* Check if requested mark flags conflict with an existing mark flags.
*/
- ret = fanotify_may_update_existing_mark(fsn_mark, fan_flags);
+ ret = fanotify_may_update_existing_mark(fsn_mark, mask, fan_flags);
if (ret)
goto out;
@@ -1409,6 +1470,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
unsigned int fid_mode = flags & FANOTIFY_FID_BITS;
unsigned int class = flags & FANOTIFY_CLASS_BITS;
unsigned int internal_flags = 0;
+ struct file *file;
pr_debug("%s: flags=%x event_f_flags=%x\n",
__func__, flags, event_f_flags);
@@ -1477,7 +1539,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
(!(fid_mode & FAN_REPORT_NAME) || !(fid_mode & FAN_REPORT_FID)))
return -EINVAL;
- f_flags = O_RDWR | __FMODE_NONOTIFY;
+ f_flags = O_RDWR;
if (flags & FAN_CLOEXEC)
f_flags |= O_CLOEXEC;
if (flags & FAN_NONBLOCK)
@@ -1555,10 +1617,18 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
goto out_destroy_group;
}
- fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
+ fd = get_unused_fd_flags(f_flags);
if (fd < 0)
goto out_destroy_group;
+ file = anon_inode_getfile_fmode("[fanotify]", &fanotify_fops, group,
+ f_flags, FMODE_NONOTIFY);
+ if (IS_ERR(file)) {
+ put_unused_fd(fd);
+ fd = PTR_ERR(file);
+ goto out_destroy_group;
+ }
+ fd_install(fd, file);
return fd;
out_destroy_group:
@@ -1638,12 +1708,24 @@ static int fanotify_events_supported(struct fsnotify_group *group,
unsigned int flags)
{
unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS;
+ bool is_dir = d_is_dir(path->dentry);
/* Strict validation of events in non-dir inode mask with v5.17+ APIs */
bool strict_dir_events = FAN_GROUP_FLAG(group, FAN_REPORT_TARGET_FID) ||
(mask & FAN_RENAME) ||
(flags & FAN_MARK_IGNORE);
/*
+ * Filesystems need to opt-into pre-content evnets (a.k.a HSM)
+ * and they are only supported on regular files and directories.
+ */
+ if (mask & FANOTIFY_PRE_CONTENT_EVENTS) {
+ if (!(path->mnt->mnt_sb->s_iflags & SB_I_ALLOW_HSM))
+ return -EOPNOTSUPP;
+ if (!is_dir && !d_is_reg(path->dentry))
+ return -EINVAL;
+ }
+
+ /*
* Some filesystems such as 'proc' acquire unusual locks when opening
* files. For them fanotify permission events have high chances of
* deadlocking the system - open done when reporting fanotify event
@@ -1675,7 +1757,7 @@ static int fanotify_events_supported(struct fsnotify_group *group,
* but because we always allowed it, error only when using new APIs.
*/
if (strict_dir_events && mark_type == FAN_MARK_INODE &&
- !d_is_dir(path->dentry) && (mask & FANOTIFY_DIRONLY_EVENT_BITS))
+ !is_dir && (mask & FANOTIFY_DIRONLY_EVENT_BITS))
return -ENOTDIR;
return 0;
@@ -1776,10 +1858,14 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
return -EPERM;
/*
- * Permission events require minimum priority FAN_CLASS_CONTENT.
+ * Permission events are not allowed for FAN_CLASS_NOTIF.
+ * Pre-content permission events are not allowed for FAN_CLASS_CONTENT.
*/
if (mask & FANOTIFY_PERM_EVENTS &&
- group->priority < FSNOTIFY_PRIO_CONTENT)
+ group->priority == FSNOTIFY_PRIO_NORMAL)
+ return -EINVAL;
+ else if (mask & FANOTIFY_PRE_CONTENT_EVENTS &&
+ group->priority == FSNOTIFY_PRIO_CONTENT)
return -EINVAL;
if (mask & FAN_FS_ERROR &&
@@ -1814,6 +1900,10 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
if (mask & FAN_RENAME && !(fid_mode & FAN_REPORT_NAME))
return -EINVAL;
+ /* Pre-content events are not currently generated for directories. */
+ if (mask & FANOTIFY_PRE_CONTENT_EVENTS && mask & FAN_ONDIR)
+ return -EINVAL;
+
if (mark_cmd == FAN_MARK_FLUSH) {
if (mark_type == FAN_MARK_MOUNT)
fsnotify_clear_vfsmount_marks_by_group(group);
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index f976949d2634..fae1b6d397ea 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -193,7 +193,7 @@ static bool fsnotify_event_needs_parent(struct inode *inode, __u32 mnt_mask,
return mask & marks_mask;
}
-/* Are there any inode/mount/sb objects that are interested in this event? */
+/* Are there any inode/mount/sb objects that watch for these events? */
static inline bool fsnotify_object_watched(struct inode *inode, __u32 mnt_mask,
__u32 mask)
{
@@ -203,6 +203,24 @@ static inline bool fsnotify_object_watched(struct inode *inode, __u32 mnt_mask,
return mask & marks_mask & ALL_FSNOTIFY_EVENTS;
}
+/* Report pre-content event with optional range info */
+int fsnotify_pre_content(const struct path *path, const loff_t *ppos,
+ size_t count)
+{
+ struct file_range range;
+
+ /* Report page aligned range only when pos is known */
+ if (!ppos)
+ return fsnotify_path(path, FS_PRE_ACCESS);
+
+ range.path = path;
+ range.pos = PAGE_ALIGN_DOWN(*ppos);
+ range.count = PAGE_ALIGN(*ppos + count) - range.pos;
+
+ return fsnotify_parent(path->dentry, FS_PRE_ACCESS, &range,
+ FSNOTIFY_EVENT_FILE_RANGE);
+}
+
/*
* Notify this dentry's parent about a child's events with child name info
* if parent is watching or if inode/sb/mount are interested in events with
@@ -623,11 +641,78 @@ out:
}
EXPORT_SYMBOL_GPL(fsnotify);
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+/*
+ * At open time we check fsnotify_sb_has_priority_watchers() and set the
+ * FMODE_NONOTIFY_ mode bits accordignly.
+ * Later, fsnotify permission hooks do not check if there are permission event
+ * watches, but that there were permission event watches at open time.
+ */
+void file_set_fsnotify_mode_from_watchers(struct file *file)
+{
+ struct dentry *dentry = file->f_path.dentry, *parent;
+ struct super_block *sb = dentry->d_sb;
+ __u32 mnt_mask, p_mask;
+
+ /* Is it a file opened by fanotify? */
+ if (FMODE_FSNOTIFY_NONE(file->f_mode))
+ return;
+
+ /*
+ * Permission events is a super set of pre-content events, so if there
+ * are no permission event watchers, there are also no pre-content event
+ * watchers and this is implied from the single FMODE_NONOTIFY_PERM bit.
+ */
+ if (likely(!fsnotify_sb_has_priority_watchers(sb,
+ FSNOTIFY_PRIO_CONTENT))) {
+ file_set_fsnotify_mode(file, FMODE_NONOTIFY_PERM);
+ return;
+ }
+
+ /*
+ * If there are permission event watchers but no pre-content event
+ * watchers, set FMODE_NONOTIFY | FMODE_NONOTIFY_PERM to indicate that.
+ */
+ if ((!d_is_dir(dentry) && !d_is_reg(dentry)) ||
+ likely(!fsnotify_sb_has_priority_watchers(sb,
+ FSNOTIFY_PRIO_PRE_CONTENT))) {
+ file_set_fsnotify_mode(file, FMODE_NONOTIFY | FMODE_NONOTIFY_PERM);
+ return;
+ }
+
+ /*
+ * OK, there are some pre-content watchers. Check if anybody is
+ * watching for pre-content events on *this* file.
+ */
+ mnt_mask = READ_ONCE(real_mount(file->f_path.mnt)->mnt_fsnotify_mask);
+ if (unlikely(fsnotify_object_watched(d_inode(dentry), mnt_mask,
+ FSNOTIFY_PRE_CONTENT_EVENTS))) {
+ /* Enable pre-content events */
+ file_set_fsnotify_mode(file, 0);
+ return;
+ }
+
+ /* Is parent watching for pre-content events on this file? */
+ if (dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED) {
+ parent = dget_parent(dentry);
+ p_mask = fsnotify_inode_watches_children(d_inode(parent));
+ dput(parent);
+ if (p_mask & FSNOTIFY_PRE_CONTENT_EVENTS) {
+ /* Enable pre-content events */
+ file_set_fsnotify_mode(file, 0);
+ return;
+ }
+ }
+ /* Nobody watching for pre-content events from this file */
+ file_set_fsnotify_mode(file, FMODE_NONOTIFY | FMODE_NONOTIFY_PERM);
+}
+#endif
+
static __init int fsnotify_init(void)
{
int ret;
- BUILD_BUG_ON(HWEIGHT32(ALL_FSNOTIFY_BITS) != 23);
+ BUILD_BUG_ON(HWEIGHT32(ALL_FSNOTIFY_BITS) != 24);
ret = init_srcu_struct(&fsnotify_mark_srcu);
if (ret)
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index 993375f0db67..cd7d11b0eb08 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -121,7 +121,7 @@ int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, u32 mask,
event->sync_cookie = cookie;
event->name_len = len;
if (len)
- strcpy(event->name, name->name);
+ strscpy(event->name, name->name, event->name_len + 1);
ret = fsnotify_add_event(group, fsn_event, inotify_merge);
if (ret) {
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index e0c48956608a..b372fb2c56bd 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -58,7 +58,7 @@ struct kmem_cache *inotify_inode_mark_cachep __ro_after_init;
static long it_zero = 0;
static long it_int_max = INT_MAX;
-static struct ctl_table inotify_table[] = {
+static const struct ctl_table inotify_table[] = {
{
.procname = "max_user_instances",
.data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES],
diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
index 8d789b017fa9..af94e3737470 100644
--- a/fs/ntfs3/attrib.c
+++ b/fs/ntfs3/attrib.c
@@ -787,7 +787,8 @@ pack_runs:
if (err)
goto out;
- attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
+ attr = mi_find_attr(ni, mi, NULL, type, name, name_len,
+ &le->id);
if (!attr) {
err = -EINVAL;
goto bad_inode;
@@ -1181,7 +1182,7 @@ repack:
goto out;
}
- attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, &le->id);
+ attr = mi_find_attr(ni, mi, NULL, ATTR_DATA, NULL, 0, &le->id);
if (!attr) {
err = -EINVAL;
goto out;
@@ -1406,7 +1407,7 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
*/
if (!attr->non_res) {
if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
- ntfs_inode_err(&ni->vfs_inode, "is corrupted");
+ _ntfs_bad_inode(&ni->vfs_inode);
return -EINVAL;
}
addr = resident_data(attr);
@@ -1796,7 +1797,7 @@ repack:
goto out;
}
- attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
+ attr = mi_find_attr(ni, mi, NULL, ATTR_DATA, NULL, 0,
&le->id);
if (!attr) {
err = -EINVAL;
@@ -2041,8 +2042,8 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
}
/* Look for required attribute. */
- attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
- 0, &le->id);
+ attr = mi_find_attr(ni, mi, NULL, ATTR_DATA,
+ NULL, 0, &le->id);
if (!attr) {
err = -EINVAL;
goto out;
@@ -2587,7 +2588,7 @@ int attr_force_nonresident(struct ntfs_inode *ni)
attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, &mi);
if (!attr) {
- ntfs_bad_inode(&ni->vfs_inode, "no data attribute");
+ _ntfs_bad_inode(&ni->vfs_inode);
return -ENOENT;
}
diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
index fc6a8aa29e3a..b6da80c69ca6 100644
--- a/fs/ntfs3/dir.c
+++ b/fs/ntfs3/dir.c
@@ -512,7 +512,7 @@ out:
ctx->pos = pos;
} else if (err < 0) {
if (err == -EINVAL)
- ntfs_inode_err(dir, "directory corrupted");
+ _ntfs_bad_inode(dir);
ctx->pos = eod;
}
diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
index 8b39d0ce5f28..5df6a0b5add9 100644
--- a/fs/ntfs3/frecord.c
+++ b/fs/ntfs3/frecord.c
@@ -75,7 +75,7 @@ struct ATTR_STD_INFO *ni_std(struct ntfs_inode *ni)
{
const struct ATTRIB *attr;
- attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
+ attr = mi_find_attr(ni, &ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO)) :
NULL;
}
@@ -89,7 +89,7 @@ struct ATTR_STD_INFO5 *ni_std5(struct ntfs_inode *ni)
{
const struct ATTRIB *attr;
- attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
+ attr = mi_find_attr(ni, &ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO5)) :
NULL;
@@ -148,8 +148,10 @@ int ni_load_mi_ex(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi)
goto out;
err = mi_get(ni->mi.sbi, rno, &r);
- if (err)
+ if (err) {
+ _ntfs_bad_inode(&ni->vfs_inode);
return err;
+ }
ni_add_mi(ni, r);
@@ -201,7 +203,8 @@ struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
*mi = &ni->mi;
/* Look for required attribute in primary record. */
- return mi_find_attr(&ni->mi, attr, type, name, name_len, NULL);
+ return mi_find_attr(ni, &ni->mi, attr, type, name, name_len,
+ NULL);
}
/* First look for list entry of required type. */
@@ -217,7 +220,7 @@ struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
return NULL;
/* Look for required attribute. */
- attr = mi_find_attr(m, NULL, type, name, name_len, &le->id);
+ attr = mi_find_attr(ni, m, NULL, type, name, name_len, &le->id);
if (!attr)
goto out;
@@ -238,8 +241,7 @@ struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
return attr;
out:
- ntfs_inode_err(&ni->vfs_inode, "failed to parse mft record");
- ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
+ _ntfs_bad_inode(&ni->vfs_inode);
return NULL;
}
@@ -259,7 +261,7 @@ struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr,
if (mi)
*mi = &ni->mi;
/* Enum attributes in primary record. */
- return mi_enum_attr(&ni->mi, attr);
+ return mi_enum_attr(ni, &ni->mi, attr);
}
/* Get next list entry. */
@@ -275,7 +277,7 @@ struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr,
*mi = mi2;
/* Find attribute in loaded record. */
- return rec_find_attr_le(mi2, le2);
+ return rec_find_attr_le(ni, mi2, le2);
}
/*
@@ -293,7 +295,8 @@ struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
if (!ni->attr_list.size) {
if (pmi)
*pmi = &ni->mi;
- return mi_find_attr(&ni->mi, NULL, type, name, name_len, NULL);
+ return mi_find_attr(ni, &ni->mi, NULL, type, name, name_len,
+ NULL);
}
le = al_find_ex(ni, NULL, type, name, name_len, NULL);
@@ -319,7 +322,7 @@ struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
if (pmi)
*pmi = mi;
- attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
+ attr = mi_find_attr(ni, mi, NULL, type, name, name_len, &le->id);
if (!attr)
return NULL;
@@ -330,6 +333,7 @@ struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
vcn <= le64_to_cpu(attr->nres.evcn))
return attr;
+ _ntfs_bad_inode(&ni->vfs_inode);
return NULL;
}
@@ -398,7 +402,8 @@ int ni_remove_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
int diff;
if (base_only || type == ATTR_LIST || !ni->attr_list.size) {
- attr = mi_find_attr(&ni->mi, NULL, type, name, name_len, id);
+ attr = mi_find_attr(ni, &ni->mi, NULL, type, name, name_len,
+ id);
if (!attr)
return -ENOENT;
@@ -437,7 +442,7 @@ next_le2:
al_remove_le(ni, le);
- attr = mi_find_attr(mi, NULL, type, name, name_len, id);
+ attr = mi_find_attr(ni, mi, NULL, type, name, name_len, id);
if (!attr)
return -ENOENT;
@@ -485,7 +490,7 @@ ni_ins_new_attr(struct ntfs_inode *ni, struct mft_inode *mi,
name = le->name;
}
- attr = mi_insert_attr(mi, type, name, name_len, asize, name_off);
+ attr = mi_insert_attr(ni, mi, type, name, name_len, asize, name_off);
if (!attr) {
if (le_added)
al_remove_le(ni, le);
@@ -673,7 +678,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
if (err)
return err;
- attr_list = mi_find_attr(&ni->mi, NULL, ATTR_LIST, NULL, 0, NULL);
+ attr_list = mi_find_attr(ni, &ni->mi, NULL, ATTR_LIST, NULL, 0, NULL);
if (!attr_list)
return 0;
@@ -695,7 +700,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
if (!mi)
return 0;
- attr = mi_find_attr(mi, NULL, le->type, le_name(le),
+ attr = mi_find_attr(ni, mi, NULL, le->type, le_name(le),
le->name_len, &le->id);
if (!attr)
return 0;
@@ -731,7 +736,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
goto out;
}
- attr = mi_find_attr(mi, NULL, le->type, le_name(le),
+ attr = mi_find_attr(ni, mi, NULL, le->type, le_name(le),
le->name_len, &le->id);
if (!attr) {
/* Should never happened, 'cause already checked. */
@@ -740,7 +745,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
asize = le32_to_cpu(attr->size);
/* Insert into primary record. */
- attr_ins = mi_insert_attr(&ni->mi, le->type, le_name(le),
+ attr_ins = mi_insert_attr(ni, &ni->mi, le->type, le_name(le),
le->name_len, asize,
le16_to_cpu(attr->name_off));
if (!attr_ins) {
@@ -768,7 +773,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
if (!mi)
continue;
- attr = mi_find_attr(mi, NULL, le->type, le_name(le),
+ attr = mi_find_attr(ni, mi, NULL, le->type, le_name(le),
le->name_len, &le->id);
if (!attr)
continue;
@@ -831,7 +836,7 @@ int ni_create_attr_list(struct ntfs_inode *ni)
free_b = 0;
attr = NULL;
- for (; (attr = mi_enum_attr(&ni->mi, attr)); le = Add2Ptr(le, sz)) {
+ for (; (attr = mi_enum_attr(ni, &ni->mi, attr)); le = Add2Ptr(le, sz)) {
sz = le_size(attr->name_len);
le->type = attr->type;
le->size = cpu_to_le16(sz);
@@ -886,7 +891,7 @@ int ni_create_attr_list(struct ntfs_inode *ni)
u32 asize = le32_to_cpu(b->size);
u16 name_off = le16_to_cpu(b->name_off);
- attr = mi_insert_attr(mi, b->type, Add2Ptr(b, name_off),
+ attr = mi_insert_attr(ni, mi, b->type, Add2Ptr(b, name_off),
b->name_len, asize, name_off);
if (!attr)
goto out;
@@ -909,7 +914,7 @@ int ni_create_attr_list(struct ntfs_inode *ni)
goto out;
}
- attr = mi_insert_attr(&ni->mi, ATTR_LIST, NULL, 0,
+ attr = mi_insert_attr(ni, &ni->mi, ATTR_LIST, NULL, 0,
lsize + SIZEOF_RESIDENT, SIZEOF_RESIDENT);
if (!attr)
goto out;
@@ -993,13 +998,13 @@ static int ni_ins_attr_ext(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
mi = rb_entry(node, struct mft_inode, node);
if (is_mft_data &&
- (mi_enum_attr(mi, NULL) ||
+ (mi_enum_attr(ni, mi, NULL) ||
vbo <= ((u64)mi->rno << sbi->record_bits))) {
/* We can't accept this record 'cause MFT's bootstrapping. */
continue;
}
if (is_mft &&
- mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, NULL)) {
+ mi_find_attr(ni, mi, NULL, ATTR_DATA, NULL, 0, NULL)) {
/*
* This child record already has a ATTR_DATA.
* So it can't accept any other records.
@@ -1008,7 +1013,7 @@ static int ni_ins_attr_ext(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
}
if ((type != ATTR_NAME || name_len) &&
- mi_find_attr(mi, NULL, type, name, name_len, NULL)) {
+ mi_find_attr(ni, mi, NULL, type, name, name_len, NULL)) {
/* Only indexed attributes can share same record. */
continue;
}
@@ -1157,7 +1162,7 @@ static int ni_insert_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
/* Estimate the result of moving all possible attributes away. */
attr = NULL;
- while ((attr = mi_enum_attr(&ni->mi, attr))) {
+ while ((attr = mi_enum_attr(ni, &ni->mi, attr))) {
if (attr->type == ATTR_STD)
continue;
if (attr->type == ATTR_LIST)
@@ -1175,7 +1180,7 @@ static int ni_insert_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
attr = NULL;
for (;;) {
- attr = mi_enum_attr(&ni->mi, attr);
+ attr = mi_enum_attr(ni, &ni->mi, attr);
if (!attr) {
/* We should never be here 'cause we have already check this case. */
err = -EINVAL;
@@ -1259,7 +1264,7 @@ static int ni_expand_mft_list(struct ntfs_inode *ni)
for (node = rb_first(&ni->mi_tree); node; node = rb_next(node)) {
mi = rb_entry(node, struct mft_inode, node);
- attr = mi_enum_attr(mi, NULL);
+ attr = mi_enum_attr(ni, mi, NULL);
if (!attr) {
mft_min = mi->rno;
@@ -1280,7 +1285,7 @@ static int ni_expand_mft_list(struct ntfs_inode *ni)
ni_remove_mi(ni, mi_new);
}
- attr = mi_find_attr(&ni->mi, NULL, ATTR_DATA, NULL, 0, NULL);
+ attr = mi_find_attr(ni, &ni->mi, NULL, ATTR_DATA, NULL, 0, NULL);
if (!attr) {
err = -EINVAL;
goto out;
@@ -1397,7 +1402,7 @@ int ni_expand_list(struct ntfs_inode *ni)
continue;
/* Find attribute in primary record. */
- attr = rec_find_attr_le(&ni->mi, le);
+ attr = rec_find_attr_le(ni, &ni->mi, le);
if (!attr) {
err = -EINVAL;
goto out;
@@ -1604,8 +1609,8 @@ int ni_delete_all(struct ntfs_inode *ni)
roff = le16_to_cpu(attr->nres.run_off);
if (roff > asize) {
- _ntfs_bad_inode(&ni->vfs_inode);
- return -EINVAL;
+ /* ni_enum_attr_ex checks this case. */
+ continue;
}
/* run==1 means unpack and deallocate. */
@@ -2726,9 +2731,10 @@ int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
{
int err;
struct ntfs_sb_info *sbi = ni->mi.sbi;
+ struct folio *folio = page_folio(pages[0]);
u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
u32 frame_size = sbi->cluster_size << NTFS_LZNT_CUNIT;
- u64 frame_vbo = (u64)pages[0]->index << PAGE_SHIFT;
+ u64 frame_vbo = folio_pos(folio);
CLST frame = frame_vbo >> frame_bits;
char *frame_ondisk = NULL;
struct page **pages_disk = NULL;
@@ -3343,7 +3349,7 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint)
if (!mi->dirty)
continue;
- is_empty = !mi_enum_attr(mi, NULL);
+ is_empty = !mi_enum_attr(ni, mi, NULL);
if (is_empty)
clear_rec_inuse(mi->mrec);
diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
index 03471bc9371c..938d351ebac7 100644
--- a/fs/ntfs3/fsntfs.c
+++ b/fs/ntfs3/fsntfs.c
@@ -908,7 +908,11 @@ void ntfs_bad_inode(struct inode *inode, const char *hint)
ntfs_inode_err(inode, "%s", hint);
make_bad_inode(inode);
- ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+ /* Avoid recursion if bad inode is $Volume. */
+ if (inode->i_ino != MFT_REC_VOL &&
+ !(sbi->flags & NTFS_FLAGS_LOG_REPLAYING)) {
+ ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+ }
}
/*
diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
index 9089c58a005c..7eb9fae22f8d 100644
--- a/fs/ntfs3/index.c
+++ b/fs/ntfs3/index.c
@@ -1094,8 +1094,7 @@ int indx_read(struct ntfs_index *indx, struct ntfs_inode *ni, CLST vbn,
ok:
if (!index_buf_check(ib, bytes, &vbn)) {
- ntfs_inode_err(&ni->vfs_inode, "directory corrupted");
- ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
+ _ntfs_bad_inode(&ni->vfs_inode);
err = -EINVAL;
goto out;
}
@@ -1117,8 +1116,7 @@ ok:
out:
if (err == -E_NTFS_CORRUPT) {
- ntfs_inode_err(&ni->vfs_inode, "directory corrupted");
- ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
+ _ntfs_bad_inode(&ni->vfs_inode);
err = -EINVAL;
}
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index be04d2845bb7..a1e11228dafd 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -410,6 +410,9 @@ end_enum:
if (!std5)
goto out;
+ if (is_bad_inode(inode))
+ goto out;
+
if (!is_match && name) {
err = -ENOENT;
goto out;
diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
index cd8e8374bb5a..382820464dee 100644
--- a/fs/ntfs3/ntfs_fs.h
+++ b/fs/ntfs3/ntfs_fs.h
@@ -745,23 +745,24 @@ int mi_get(struct ntfs_sb_info *sbi, CLST rno, struct mft_inode **mi);
void mi_put(struct mft_inode *mi);
int mi_init(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno);
int mi_read(struct mft_inode *mi, bool is_mft);
-struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr);
-// TODO: id?
-struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr,
- enum ATTR_TYPE type, const __le16 *name,
- u8 name_len, const __le16 *id);
-static inline struct ATTRIB *rec_find_attr_le(struct mft_inode *rec,
+struct ATTRIB *mi_enum_attr(struct ntfs_inode *ni, struct mft_inode *mi,
+ struct ATTRIB *attr);
+struct ATTRIB *mi_find_attr(struct ntfs_inode *ni, struct mft_inode *mi,
+ struct ATTRIB *attr, enum ATTR_TYPE type,
+ const __le16 *name, u8 name_len, const __le16 *id);
+static inline struct ATTRIB *rec_find_attr_le(struct ntfs_inode *ni,
+ struct mft_inode *rec,
struct ATTR_LIST_ENTRY *le)
{
- return mi_find_attr(rec, NULL, le->type, le_name(le), le->name_len,
+ return mi_find_attr(ni, rec, NULL, le->type, le_name(le), le->name_len,
&le->id);
}
int mi_write(struct mft_inode *mi, int wait);
int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
__le16 flags, bool is_mft);
-struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
- const __le16 *name, u8 name_len, u32 asize,
- u16 name_off);
+struct ATTRIB *mi_insert_attr(struct ntfs_inode *ni, struct mft_inode *mi,
+ enum ATTR_TYPE type, const __le16 *name,
+ u8 name_len, u32 asize, u16 name_off);
bool mi_remove_attr(struct ntfs_inode *ni, struct mft_inode *mi,
struct ATTRIB *attr);
diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
index 61d53d39f3b9..714c7ecedca8 100644
--- a/fs/ntfs3/record.c
+++ b/fs/ntfs3/record.c
@@ -31,7 +31,7 @@ static inline int compare_attr(const struct ATTRIB *left, enum ATTR_TYPE type,
*
* Return: Unused attribute id that is less than mrec->next_attr_id.
*/
-static __le16 mi_new_attt_id(struct mft_inode *mi)
+static __le16 mi_new_attt_id(struct ntfs_inode *ni, struct mft_inode *mi)
{
u16 free_id, max_id, t16;
struct MFT_REC *rec = mi->mrec;
@@ -52,7 +52,7 @@ static __le16 mi_new_attt_id(struct mft_inode *mi)
attr = NULL;
for (;;) {
- attr = mi_enum_attr(mi, attr);
+ attr = mi_enum_attr(ni, mi, attr);
if (!attr) {
rec->next_attr_id = cpu_to_le16(max_id + 1);
mi->dirty = true;
@@ -195,7 +195,8 @@ out:
* NOTE: mi->mrec - memory of size sbi->record_size
* here we sure that mi->mrec->total == sbi->record_size (see mi_read)
*/
-struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
+struct ATTRIB *mi_enum_attr(struct ntfs_inode *ni, struct mft_inode *mi,
+ struct ATTRIB *attr)
{
const struct MFT_REC *rec = mi->mrec;
u32 used = le32_to_cpu(rec->used);
@@ -209,11 +210,11 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
off = le16_to_cpu(rec->attr_off);
if (used > total)
- return NULL;
+ goto out;
if (off >= used || off < MFTRECORD_FIXUP_OFFSET_1 ||
!IS_ALIGNED(off, 8)) {
- return NULL;
+ goto out;
}
/* Skip non-resident records. */
@@ -243,7 +244,7 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
*/
if (off + 8 > used) {
static_assert(ALIGN(sizeof(enum ATTR_TYPE), 8) == 8);
- return NULL;
+ goto out;
}
if (attr->type == ATTR_END) {
@@ -254,112 +255,116 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
/* 0x100 is last known attribute for now. */
t32 = le32_to_cpu(attr->type);
if (!t32 || (t32 & 0xf) || (t32 > 0x100))
- return NULL;
+ goto out;
/* attributes in record must be ordered by type */
if (t32 < prev_type)
- return NULL;
+ goto out;
asize = le32_to_cpu(attr->size);
if (!IS_ALIGNED(asize, 8))
- return NULL;
+ goto out;
/* Check overflow and boundary. */
if (off + asize < off || off + asize > used)
- return NULL;
+ goto out;
/* Can we use the field attr->non_res. */
if (off + 9 > used)
- return NULL;
+ goto out;
/* Check size of attribute. */
if (!attr->non_res) {
/* Check resident fields. */
if (asize < SIZEOF_RESIDENT)
- return NULL;
+ goto out;
t16 = le16_to_cpu(attr->res.data_off);
if (t16 > asize)
- return NULL;
+ goto out;
if (le32_to_cpu(attr->res.data_size) > asize - t16)
- return NULL;
+ goto out;
t32 = sizeof(short) * attr->name_len;
if (t32 && le16_to_cpu(attr->name_off) + t32 > t16)
- return NULL;
+ goto out;
return attr;
}
/* Check nonresident fields. */
if (attr->non_res != 1)
- return NULL;
+ goto out;
/* Can we use memory including attr->nres.valid_size? */
if (asize < SIZEOF_NONRESIDENT)
- return NULL;
+ goto out;
t16 = le16_to_cpu(attr->nres.run_off);
if (t16 > asize)
- return NULL;
+ goto out;
t32 = sizeof(short) * attr->name_len;
if (t32 && le16_to_cpu(attr->name_off) + t32 > t16)
- return NULL;
+ goto out;
/* Check start/end vcn. */
if (le64_to_cpu(attr->nres.svcn) > le64_to_cpu(attr->nres.evcn) + 1)
- return NULL;
+ goto out;
data_size = le64_to_cpu(attr->nres.data_size);
if (le64_to_cpu(attr->nres.valid_size) > data_size)
- return NULL;
+ goto out;
alloc_size = le64_to_cpu(attr->nres.alloc_size);
if (data_size > alloc_size)
- return NULL;
+ goto out;
t32 = mi->sbi->cluster_mask;
if (alloc_size & t32)
- return NULL;
+ goto out;
if (!attr->nres.svcn && is_attr_ext(attr)) {
/* First segment of sparse/compressed attribute */
/* Can we use memory including attr->nres.total_size? */
if (asize < SIZEOF_NONRESIDENT_EX)
- return NULL;
+ goto out;
tot_size = le64_to_cpu(attr->nres.total_size);
if (tot_size & t32)
- return NULL;
+ goto out;
if (tot_size > alloc_size)
- return NULL;
+ goto out;
} else {
if (attr->nres.c_unit)
- return NULL;
+ goto out;
if (alloc_size > mi->sbi->volume.size)
- return NULL;
+ goto out;
}
return attr;
+
+out:
+ _ntfs_bad_inode(&ni->vfs_inode);
+ return NULL;
}
/*
* mi_find_attr - Find the attribute by type and name and id.
*/
-struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr,
- enum ATTR_TYPE type, const __le16 *name,
- u8 name_len, const __le16 *id)
+struct ATTRIB *mi_find_attr(struct ntfs_inode *ni, struct mft_inode *mi,
+ struct ATTRIB *attr, enum ATTR_TYPE type,
+ const __le16 *name, u8 name_len, const __le16 *id)
{
u32 type_in = le32_to_cpu(type);
u32 atype;
next_attr:
- attr = mi_enum_attr(mi, attr);
+ attr = mi_enum_attr(ni, mi, attr);
if (!attr)
return NULL;
@@ -467,9 +472,9 @@ int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
*
* Return: Not full constructed attribute or NULL if not possible to create.
*/
-struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
- const __le16 *name, u8 name_len, u32 asize,
- u16 name_off)
+struct ATTRIB *mi_insert_attr(struct ntfs_inode *ni, struct mft_inode *mi,
+ enum ATTR_TYPE type, const __le16 *name,
+ u8 name_len, u32 asize, u16 name_off)
{
size_t tail;
struct ATTRIB *attr;
@@ -488,7 +493,7 @@ struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
* at which we should insert it.
*/
attr = NULL;
- while ((attr = mi_enum_attr(mi, attr))) {
+ while ((attr = mi_enum_attr(ni, mi, attr))) {
int diff = compare_attr(attr, type, name, name_len, upcase);
if (diff < 0)
@@ -508,7 +513,7 @@ struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
tail = used - PtrOffset(rec, attr);
}
- id = mi_new_attt_id(mi);
+ id = mi_new_attt_id(ni, mi);
memmove(Add2Ptr(attr, asize), attr, tail);
memset(attr, 0, asize);
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 395e23920632..4414743b638e 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -566,7 +566,7 @@ static void ocfs2_adjust_rightmost_records(handle_t *handle,
struct ocfs2_path *path,
struct ocfs2_extent_rec *insert_rec);
/*
- * Reset the actual path elements so that we can re-use the structure
+ * Reset the actual path elements so that we can reuse the structure
* to build another path. Generally, this involves freeing the buffer
* heads.
*/
@@ -1182,7 +1182,7 @@ static int ocfs2_add_branch(handle_t *handle,
/*
* If there is a gap before the root end and the real end
- * of the righmost leaf block, we need to remove the gap
+ * of the rightmost leaf block, we need to remove the gap
* between new_cpos and root_end first so that the tree
* is consistent after we add a new branch(it will start
* from new_cpos).
@@ -1238,7 +1238,7 @@ static int ocfs2_add_branch(handle_t *handle,
/* Note: new_eb_bhs[new_blocks - 1] is the guy which will be
* linked with the rest of the tree.
- * conversly, new_eb_bhs[0] is the new bottommost leaf.
+ * conversely, new_eb_bhs[0] is the new bottommost leaf.
*
* when we leave the loop, new_last_eb_blk will point to the
* newest leaf, and next_blkno will point to the topmost extent
@@ -3712,7 +3712,7 @@ static int ocfs2_try_to_merge_extent(handle_t *handle,
* update split_index here.
*
* When the split_index is zero, we need to merge it to the
- * prevoius extent block. It is more efficient and easier
+ * previous extent block. It is more efficient and easier
* if we do merge_right first and merge_left later.
*/
ret = ocfs2_merge_rec_right(path, handle, et, split_rec,
@@ -4517,7 +4517,7 @@ static void ocfs2_figure_contig_type(struct ocfs2_extent_tree *et,
}
/*
- * This should only be called against the righmost leaf extent list.
+ * This should only be called against the rightmost leaf extent list.
*
* ocfs2_figure_appending_type() will figure out whether we'll have to
* insert at the tail of the rightmost leaf.
@@ -6154,6 +6154,9 @@ static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb,
int status;
struct inode *inode = NULL;
struct buffer_head *bh = NULL;
+ struct ocfs2_dinode *di;
+ struct ocfs2_truncate_log *tl;
+ unsigned int tl_count;
inode = ocfs2_get_system_file_inode(osb,
TRUNCATE_LOG_SYSTEM_INODE,
@@ -6171,6 +6174,18 @@ static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb,
goto bail;
}
+ di = (struct ocfs2_dinode *)bh->b_data;
+ tl = &di->id2.i_dealloc;
+ tl_count = le16_to_cpu(tl->tl_count);
+ if (unlikely(tl_count > ocfs2_truncate_recs_per_inode(osb->sb) ||
+ tl_count == 0)) {
+ status = -EFSCORRUPTED;
+ iput(inode);
+ brelse(bh);
+ mlog_errno(status);
+ goto bail;
+ }
+
*tl_inode = inode;
*tl_bh = bh;
bail:
@@ -6808,27 +6823,27 @@ static int ocfs2_zero_func(handle_t *handle, struct buffer_head *bh)
return 0;
}
-void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
- unsigned int from, unsigned int to,
- struct page *page, int zero, u64 *phys)
+void ocfs2_map_and_dirty_folio(struct inode *inode, handle_t *handle,
+ size_t from, size_t to, struct folio *folio, int zero,
+ u64 *phys)
{
int ret, partial = 0;
- loff_t start_byte = ((loff_t)page->index << PAGE_SHIFT) + from;
+ loff_t start_byte = folio_pos(folio) + from;
loff_t length = to - from;
- ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0);
+ ret = ocfs2_map_folio_blocks(folio, phys, inode, from, to, 0);
if (ret)
mlog_errno(ret);
if (zero)
- zero_user_segment(page, from, to);
+ folio_zero_segment(folio, from, to);
/*
* Need to set the buffers we zero'd into uptodate
* here if they aren't - ocfs2_map_page_blocks()
* might've skipped some
*/
- ret = walk_page_buffers(handle, page_buffers(page),
+ ret = walk_page_buffers(handle, folio_buffers(folio),
from, to, &partial,
ocfs2_zero_func);
if (ret < 0)
@@ -6841,92 +6856,88 @@ void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
}
if (!partial)
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
- flush_dcache_page(page);
+ flush_dcache_folio(folio);
}
-static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start,
- loff_t end, struct page **pages,
- int numpages, u64 phys, handle_t *handle)
+static void ocfs2_zero_cluster_folios(struct inode *inode, loff_t start,
+ loff_t end, struct folio **folios, int numfolios,
+ u64 phys, handle_t *handle)
{
int i;
- struct page *page;
- unsigned int from, to = PAGE_SIZE;
struct super_block *sb = inode->i_sb;
BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb)));
- if (numpages == 0)
+ if (numfolios == 0)
goto out;
- to = PAGE_SIZE;
- for(i = 0; i < numpages; i++) {
- page = pages[i];
-
- from = start & (PAGE_SIZE - 1);
- if ((end >> PAGE_SHIFT) == page->index)
- to = end & (PAGE_SIZE - 1);
+ for (i = 0; i < numfolios; i++) {
+ struct folio *folio = folios[i];
+ size_t to = folio_size(folio);
+ size_t from = offset_in_folio(folio, start);
- BUG_ON(from > PAGE_SIZE);
- BUG_ON(to > PAGE_SIZE);
+ if (to > end - folio_pos(folio))
+ to = end - folio_pos(folio);
- ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1,
- &phys);
+ ocfs2_map_and_dirty_folio(inode, handle, from, to, folio, 1,
+ &phys);
- start = (page->index + 1) << PAGE_SHIFT;
+ start = folio_next_index(folio) << PAGE_SHIFT;
}
out:
- if (pages)
- ocfs2_unlock_and_free_pages(pages, numpages);
+ if (folios)
+ ocfs2_unlock_and_free_folios(folios, numfolios);
}
-int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
- struct page **pages, int *num)
+static int ocfs2_grab_folios(struct inode *inode, loff_t start, loff_t end,
+ struct folio **folios, int *num)
{
- int numpages, ret = 0;
+ int numfolios, ret = 0;
struct address_space *mapping = inode->i_mapping;
unsigned long index;
loff_t last_page_bytes;
BUG_ON(start > end);
- numpages = 0;
+ numfolios = 0;
last_page_bytes = PAGE_ALIGN(end);
index = start >> PAGE_SHIFT;
do {
- pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS);
- if (!pages[numpages]) {
- ret = -ENOMEM;
+ folios[numfolios] = __filemap_get_folio(mapping, index,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS);
+ if (IS_ERR(folios[numfolios])) {
+ ret = PTR_ERR(folios[numfolios]);
mlog_errno(ret);
goto out;
}
- numpages++;
- index++;
+ index = folio_next_index(folios[numfolios]);
+ numfolios++;
} while (index < (last_page_bytes >> PAGE_SHIFT));
out:
if (ret != 0) {
- if (pages)
- ocfs2_unlock_and_free_pages(pages, numpages);
- numpages = 0;
+ if (folios)
+ ocfs2_unlock_and_free_folios(folios, numfolios);
+ numfolios = 0;
}
- *num = numpages;
+ *num = numfolios;
return ret;
}
-static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end,
- struct page **pages, int *num)
+static int ocfs2_grab_eof_folios(struct inode *inode, loff_t start, loff_t end,
+ struct folio **folios, int *num)
{
struct super_block *sb = inode->i_sb;
BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits !=
(end - 1) >> OCFS2_SB(sb)->s_clustersize_bits);
- return ocfs2_grab_pages(inode, start, end, pages, num);
+ return ocfs2_grab_folios(inode, start, end, folios, num);
}
/*
@@ -6940,8 +6951,8 @@ static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end,
int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
u64 range_start, u64 range_end)
{
- int ret = 0, numpages;
- struct page **pages = NULL;
+ int ret = 0, numfolios;
+ struct folio **folios = NULL;
u64 phys;
unsigned int ext_flags;
struct super_block *sb = inode->i_sb;
@@ -6954,17 +6965,17 @@ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
return 0;
/*
- * Avoid zeroing pages fully beyond current i_size. It is pointless as
- * underlying blocks of those pages should be already zeroed out and
+ * Avoid zeroing folios fully beyond current i_size. It is pointless as
+ * underlying blocks of those folios should be already zeroed out and
* page writeback will skip them anyway.
*/
range_end = min_t(u64, range_end, i_size_read(inode));
if (range_start >= range_end)
return 0;
- pages = kcalloc(ocfs2_pages_per_cluster(sb),
- sizeof(struct page *), GFP_NOFS);
- if (pages == NULL) {
+ folios = kcalloc(ocfs2_pages_per_cluster(sb),
+ sizeof(struct folio *), GFP_NOFS);
+ if (folios == NULL) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
@@ -6985,18 +6996,18 @@ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
if (phys == 0 || ext_flags & OCFS2_EXT_UNWRITTEN)
goto out;
- ret = ocfs2_grab_eof_pages(inode, range_start, range_end, pages,
- &numpages);
+ ret = ocfs2_grab_eof_folios(inode, range_start, range_end, folios,
+ &numfolios);
if (ret) {
mlog_errno(ret);
goto out;
}
- ocfs2_zero_cluster_pages(inode, range_start, range_end, pages,
- numpages, phys, handle);
+ ocfs2_zero_cluster_folios(inode, range_start, range_end, folios,
+ numfolios, phys, handle);
/*
- * Initiate writeout of the pages we zero'd here. We don't
+ * Initiate writeout of the folios we zero'd here. We don't
* wait on them - the truncate_inode_pages() call later will
* do that for us.
*/
@@ -7006,7 +7017,7 @@ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
mlog_errno(ret);
out:
- kfree(pages);
+ kfree(folios);
return ret;
}
@@ -7059,7 +7070,7 @@ void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di)
int ocfs2_convert_inline_data_to_extents(struct inode *inode,
struct buffer_head *di_bh)
{
- int ret, has_data, num_pages = 0;
+ int ret, has_data, num_folios = 0;
int need_free = 0;
u32 bit_off, num;
handle_t *handle;
@@ -7068,7 +7079,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_alloc_context *data_ac = NULL;
- struct page *page = NULL;
+ struct folio *folio = NULL;
struct ocfs2_extent_tree et;
int did_quota = 0;
@@ -7119,12 +7130,12 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
/*
* Save two copies, one for insert, and one that can
- * be changed by ocfs2_map_and_dirty_page() below.
+ * be changed by ocfs2_map_and_dirty_folio() below.
*/
block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
- ret = ocfs2_grab_eof_pages(inode, 0, page_end, &page,
- &num_pages);
+ ret = ocfs2_grab_eof_folios(inode, 0, page_end, &folio,
+ &num_folios);
if (ret) {
mlog_errno(ret);
need_free = 1;
@@ -7135,15 +7146,15 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
* This should populate the 1st page for us and mark
* it up to date.
*/
- ret = ocfs2_read_inline_data(inode, page, di_bh);
+ ret = ocfs2_read_inline_data(inode, folio, di_bh);
if (ret) {
mlog_errno(ret);
need_free = 1;
goto out_unlock;
}
- ocfs2_map_and_dirty_page(inode, handle, 0, page_end, page, 0,
- &phys);
+ ocfs2_map_and_dirty_folio(inode, handle, 0, page_end, folio, 0,
+ &phys);
}
spin_lock(&oi->ip_lock);
@@ -7174,8 +7185,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
}
out_unlock:
- if (page)
- ocfs2_unlock_and_free_pages(&page, num_pages);
+ if (folio)
+ ocfs2_unlock_and_free_folios(&folio, num_folios);
out_commit:
if (ret < 0 && did_quota)
diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h
index 4af7abaa6e40..1c0c83362904 100644
--- a/fs/ocfs2/alloc.h
+++ b/fs/ocfs2/alloc.h
@@ -254,11 +254,9 @@ static inline int ocfs2_is_empty_extent(struct ocfs2_extent_rec *rec)
return !rec->e_leaf_clusters;
}
-int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
- struct page **pages, int *num);
-void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
- unsigned int from, unsigned int to,
- struct page *page, int zero, u64 *phys);
+void ocfs2_map_and_dirty_folio(struct inode *inode, handle_t *handle,
+ size_t from, size_t to, struct folio *folio, int zero,
+ u64 *phys);
/*
* Structures which describe a path through a btree, and functions to
* manipulate them.
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index db72b3e924b3..5bbeb6fbb1ac 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -215,10 +215,9 @@ bail:
return err;
}
-int ocfs2_read_inline_data(struct inode *inode, struct page *page,
+int ocfs2_read_inline_data(struct inode *inode, struct folio *folio,
struct buffer_head *di_bh)
{
- void *kaddr;
loff_t size;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
@@ -230,7 +229,7 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page,
size = i_size_read(inode);
- if (size > PAGE_SIZE ||
+ if (size > folio_size(folio) ||
size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) {
ocfs2_error(inode->i_sb,
"Inode %llu has with inline data has bad size: %Lu\n",
@@ -239,25 +238,18 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page,
return -EROFS;
}
- kaddr = kmap_atomic(page);
- if (size)
- memcpy(kaddr, di->id2.i_data.id_data, size);
- /* Clear the remaining part of the page */
- memset(kaddr + size, 0, PAGE_SIZE - size);
- flush_dcache_page(page);
- kunmap_atomic(kaddr);
-
- SetPageUptodate(page);
+ folio_fill_tail(folio, 0, di->id2.i_data.id_data, size);
+ folio_mark_uptodate(folio);
return 0;
}
-static int ocfs2_readpage_inline(struct inode *inode, struct page *page)
+static int ocfs2_readpage_inline(struct inode *inode, struct folio *folio)
{
int ret;
struct buffer_head *di_bh = NULL;
- BUG_ON(!PageLocked(page));
+ BUG_ON(!folio_test_locked(folio));
BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
ret = ocfs2_read_inode_block(inode, &di_bh);
@@ -266,9 +258,9 @@ static int ocfs2_readpage_inline(struct inode *inode, struct page *page)
goto out;
}
- ret = ocfs2_read_inline_data(inode, page, di_bh);
+ ret = ocfs2_read_inline_data(inode, folio, di_bh);
out:
- unlock_page(page);
+ folio_unlock(folio);
brelse(di_bh);
return ret;
@@ -283,7 +275,7 @@ static int ocfs2_read_folio(struct file *file, struct folio *folio)
trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, folio->index);
- ret = ocfs2_inode_lock_with_page(inode, NULL, 0, &folio->page);
+ ret = ocfs2_inode_lock_with_folio(inode, NULL, 0, folio);
if (ret != 0) {
if (ret == AOP_TRUNCATED_PAGE)
unlock = 0;
@@ -305,7 +297,7 @@ static int ocfs2_read_folio(struct file *file, struct folio *folio)
}
/*
- * i_size might have just been updated as we grabed the meta lock. We
+ * i_size might have just been updated as we grabbed the meta lock. We
* might now be discovering a truncate that hit on another node.
* block_read_full_folio->get_block freaks out if it is asked to read
* beyond the end of a file, so we check here. Callers
@@ -322,7 +314,7 @@ static int ocfs2_read_folio(struct file *file, struct folio *folio)
}
if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
- ret = ocfs2_readpage_inline(inode, &folio->page);
+ ret = ocfs2_readpage_inline(inode, folio);
else
ret = block_read_full_folio(folio, ocfs2_get_block);
unlock = 0;
@@ -534,7 +526,7 @@ static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
*
* from == to == 0 is code for "zero the entire cluster region"
*/
-static void ocfs2_clear_page_regions(struct page *page,
+static void ocfs2_clear_folio_regions(struct folio *folio,
struct ocfs2_super *osb, u32 cpos,
unsigned from, unsigned to)
{
@@ -543,7 +535,7 @@ static void ocfs2_clear_page_regions(struct page *page,
ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end);
- kaddr = kmap_atomic(page);
+ kaddr = kmap_local_folio(folio, 0);
if (from || to) {
if (from > cluster_start)
@@ -554,13 +546,13 @@ static void ocfs2_clear_page_regions(struct page *page,
memset(kaddr + cluster_start, 0, cluster_end - cluster_start);
}
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
}
/*
* Nonsparse file systems fully allocate before we get to the write
* code. This prevents ocfs2_write() from tagging the write as an
- * allocating one, which means ocfs2_map_page_blocks() might try to
+ * allocating one, which means ocfs2_map_folio_blocks() might try to
* read-in the blocks at the tail of our file. Avoid reading them by
* testing i_size against each block offset.
*/
@@ -585,11 +577,10 @@ static int ocfs2_should_read_blk(struct inode *inode, struct folio *folio,
*
* This will also skip zeroing, which is handled externally.
*/
-int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
+int ocfs2_map_folio_blocks(struct folio *folio, u64 *p_blkno,
struct inode *inode, unsigned int from,
unsigned int to, int new)
{
- struct folio *folio = page_folio(page);
int ret = 0;
struct buffer_head *head, *bh, *wait[2], **wait_bh = wait;
unsigned int block_end, block_start;
@@ -729,24 +720,24 @@ struct ocfs2_write_ctxt {
unsigned int w_large_pages;
/*
- * Pages involved in this write.
+ * Folios involved in this write.
*
- * w_target_page is the page being written to by the user.
+ * w_target_folio is the folio being written to by the user.
*
- * w_pages is an array of pages which always contains
- * w_target_page, and in the case of an allocating write with
+ * w_folios is an array of folios which always contains
+ * w_target_folio, and in the case of an allocating write with
* page_size < cluster size, it will contain zero'd and mapped
- * pages adjacent to w_target_page which need to be written
+ * pages adjacent to w_target_folio which need to be written
* out in so that future reads from that region will get
* zero's.
*/
- unsigned int w_num_pages;
- struct page *w_pages[OCFS2_MAX_CTXT_PAGES];
- struct page *w_target_page;
+ unsigned int w_num_folios;
+ struct folio *w_folios[OCFS2_MAX_CTXT_PAGES];
+ struct folio *w_target_folio;
/*
* w_target_locked is used for page_mkwrite path indicating no unlocking
- * against w_target_page in ocfs2_write_end_nolock.
+ * against w_target_folio in ocfs2_write_end_nolock.
*/
unsigned int w_target_locked:1;
@@ -771,40 +762,40 @@ struct ocfs2_write_ctxt {
unsigned int w_unwritten_count;
};
-void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
+void ocfs2_unlock_and_free_folios(struct folio **folios, int num_folios)
{
int i;
- for(i = 0; i < num_pages; i++) {
- if (pages[i]) {
- unlock_page(pages[i]);
- mark_page_accessed(pages[i]);
- put_page(pages[i]);
- }
+ for(i = 0; i < num_folios; i++) {
+ if (!folios[i])
+ continue;
+ folio_unlock(folios[i]);
+ folio_mark_accessed(folios[i]);
+ folio_put(folios[i]);
}
}
-static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc)
+static void ocfs2_unlock_folios(struct ocfs2_write_ctxt *wc)
{
int i;
/*
* w_target_locked is only set to true in the page_mkwrite() case.
* The intent is to allow us to lock the target page from write_begin()
- * to write_end(). The caller must hold a ref on w_target_page.
+ * to write_end(). The caller must hold a ref on w_target_folio.
*/
if (wc->w_target_locked) {
- BUG_ON(!wc->w_target_page);
- for (i = 0; i < wc->w_num_pages; i++) {
- if (wc->w_target_page == wc->w_pages[i]) {
- wc->w_pages[i] = NULL;
+ BUG_ON(!wc->w_target_folio);
+ for (i = 0; i < wc->w_num_folios; i++) {
+ if (wc->w_target_folio == wc->w_folios[i]) {
+ wc->w_folios[i] = NULL;
break;
}
}
- mark_page_accessed(wc->w_target_page);
- put_page(wc->w_target_page);
+ folio_mark_accessed(wc->w_target_folio);
+ folio_put(wc->w_target_folio);
}
- ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
+ ocfs2_unlock_and_free_folios(wc->w_folios, wc->w_num_folios);
}
static void ocfs2_free_unwritten_list(struct inode *inode,
@@ -826,7 +817,7 @@ static void ocfs2_free_write_ctxt(struct inode *inode,
struct ocfs2_write_ctxt *wc)
{
ocfs2_free_unwritten_list(inode, &wc->w_unwritten_list);
- ocfs2_unlock_pages(wc);
+ ocfs2_unlock_folios(wc);
brelse(wc->w_di_bh);
kfree(wc);
}
@@ -869,29 +860,30 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
* and dirty so they'll be written out (in order to prevent uninitialised
* block data from leaking). And clear the new bit.
*/
-static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to)
+static void ocfs2_zero_new_buffers(struct folio *folio, size_t from, size_t to)
{
unsigned int block_start, block_end;
struct buffer_head *head, *bh;
- BUG_ON(!PageLocked(page));
- if (!page_has_buffers(page))
+ BUG_ON(!folio_test_locked(folio));
+ head = folio_buffers(folio);
+ if (!head)
return;
- bh = head = page_buffers(page);
+ bh = head;
block_start = 0;
do {
block_end = block_start + bh->b_size;
if (buffer_new(bh)) {
if (block_end > from && block_start < to) {
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
unsigned start, end;
start = max(from, block_start);
end = min(to, block_end);
- zero_user_segment(page, start, end);
+ folio_zero_segment(folio, start, end);
set_buffer_uptodate(bh);
}
@@ -916,29 +908,26 @@ static void ocfs2_write_failure(struct inode *inode,
int i;
unsigned from = user_pos & (PAGE_SIZE - 1),
to = user_pos + user_len;
- struct page *tmppage;
- if (wc->w_target_page)
- ocfs2_zero_new_buffers(wc->w_target_page, from, to);
+ if (wc->w_target_folio)
+ ocfs2_zero_new_buffers(wc->w_target_folio, from, to);
- for(i = 0; i < wc->w_num_pages; i++) {
- tmppage = wc->w_pages[i];
+ for (i = 0; i < wc->w_num_folios; i++) {
+ struct folio *folio = wc->w_folios[i];
- if (tmppage && page_has_buffers(tmppage)) {
+ if (folio && folio_buffers(folio)) {
if (ocfs2_should_order_data(inode))
ocfs2_jbd2_inode_add_write(wc->w_handle, inode,
user_pos, user_len);
- block_commit_write(tmppage, from, to);
+ block_commit_write(&folio->page, from, to);
}
}
}
-static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
- struct ocfs2_write_ctxt *wc,
- struct page *page, u32 cpos,
- loff_t user_pos, unsigned user_len,
- int new)
+static int ocfs2_prepare_folio_for_write(struct inode *inode, u64 *p_blkno,
+ struct ocfs2_write_ctxt *wc, struct folio *folio, u32 cpos,
+ loff_t user_pos, unsigned user_len, int new)
{
int ret;
unsigned int map_from = 0, map_to = 0;
@@ -951,20 +940,19 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
/* treat the write as new if the a hole/lseek spanned across
* the page boundary.
*/
- new = new | ((i_size_read(inode) <= page_offset(page)) &&
- (page_offset(page) <= user_pos));
+ new = new | ((i_size_read(inode) <= folio_pos(folio)) &&
+ (folio_pos(folio) <= user_pos));
- if (page == wc->w_target_page) {
+ if (folio == wc->w_target_folio) {
map_from = user_pos & (PAGE_SIZE - 1);
map_to = map_from + user_len;
if (new)
- ret = ocfs2_map_page_blocks(page, p_blkno, inode,
- cluster_start, cluster_end,
- new);
+ ret = ocfs2_map_folio_blocks(folio, p_blkno, inode,
+ cluster_start, cluster_end, new);
else
- ret = ocfs2_map_page_blocks(page, p_blkno, inode,
- map_from, map_to, new);
+ ret = ocfs2_map_folio_blocks(folio, p_blkno, inode,
+ map_from, map_to, new);
if (ret) {
mlog_errno(ret);
goto out;
@@ -978,7 +966,7 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
}
} else {
/*
- * If we haven't allocated the new page yet, we
+ * If we haven't allocated the new folio yet, we
* shouldn't be writing it out without copying user
* data. This is likely a math error from the caller.
*/
@@ -987,8 +975,8 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
map_from = cluster_start;
map_to = cluster_end;
- ret = ocfs2_map_page_blocks(page, p_blkno, inode,
- cluster_start, cluster_end, new);
+ ret = ocfs2_map_folio_blocks(folio, p_blkno, inode,
+ cluster_start, cluster_end, new);
if (ret) {
mlog_errno(ret);
goto out;
@@ -996,20 +984,20 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
}
/*
- * Parts of newly allocated pages need to be zero'd.
+ * Parts of newly allocated folios need to be zero'd.
*
* Above, we have also rewritten 'to' and 'from' - as far as
* the rest of the function is concerned, the entire cluster
- * range inside of a page needs to be written.
+ * range inside of a folio needs to be written.
*
- * We can skip this if the page is up to date - it's already
+ * We can skip this if the folio is uptodate - it's already
* been zero'd from being read in as a hole.
*/
- if (new && !PageUptodate(page))
- ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb),
+ if (new && !folio_test_uptodate(folio))
+ ocfs2_clear_folio_regions(folio, OCFS2_SB(inode->i_sb),
cpos, user_data_from, user_data_to);
- flush_dcache_page(page);
+ flush_dcache_folio(folio);
out:
return ret;
@@ -1018,11 +1006,9 @@ out:
/*
* This function will only grab one clusters worth of pages.
*/
-static int ocfs2_grab_pages_for_write(struct address_space *mapping,
- struct ocfs2_write_ctxt *wc,
- u32 cpos, loff_t user_pos,
- unsigned user_len, int new,
- struct page *mmap_page)
+static int ocfs2_grab_folios_for_write(struct address_space *mapping,
+ struct ocfs2_write_ctxt *wc, u32 cpos, loff_t user_pos,
+ unsigned user_len, int new, struct folio *mmap_folio)
{
int ret = 0, i;
unsigned long start, target_index, end_index, index;
@@ -1039,7 +1025,7 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
* last page of the write.
*/
if (new) {
- wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb);
+ wc->w_num_folios = ocfs2_pages_per_cluster(inode->i_sb);
start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos);
/*
* We need the index *past* the last page we could possibly
@@ -1049,15 +1035,15 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
last_byte = max(user_pos + user_len, i_size_read(inode));
BUG_ON(last_byte < 1);
end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1;
- if ((start + wc->w_num_pages) > end_index)
- wc->w_num_pages = end_index - start;
+ if ((start + wc->w_num_folios) > end_index)
+ wc->w_num_folios = end_index - start;
} else {
- wc->w_num_pages = 1;
+ wc->w_num_folios = 1;
start = target_index;
}
end_index = (user_pos + user_len - 1) >> PAGE_SHIFT;
- for(i = 0; i < wc->w_num_pages; i++) {
+ for(i = 0; i < wc->w_num_folios; i++) {
index = start + i;
if (index >= target_index && index <= end_index &&
@@ -1067,37 +1053,38 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
* and wants us to directly use the page
* passed in.
*/
- lock_page(mmap_page);
+ folio_lock(mmap_folio);
/* Exit and let the caller retry */
- if (mmap_page->mapping != mapping) {
- WARN_ON(mmap_page->mapping);
- unlock_page(mmap_page);
+ if (mmap_folio->mapping != mapping) {
+ WARN_ON(mmap_folio->mapping);
+ folio_unlock(mmap_folio);
ret = -EAGAIN;
goto out;
}
- get_page(mmap_page);
- wc->w_pages[i] = mmap_page;
+ folio_get(mmap_folio);
+ wc->w_folios[i] = mmap_folio;
wc->w_target_locked = true;
} else if (index >= target_index && index <= end_index &&
wc->w_type == OCFS2_WRITE_DIRECT) {
/* Direct write has no mapping page. */
- wc->w_pages[i] = NULL;
+ wc->w_folios[i] = NULL;
continue;
} else {
- wc->w_pages[i] = find_or_create_page(mapping, index,
- GFP_NOFS);
- if (!wc->w_pages[i]) {
- ret = -ENOMEM;
+ wc->w_folios[i] = __filemap_get_folio(mapping, index,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+ GFP_NOFS);
+ if (IS_ERR(wc->w_folios[i])) {
+ ret = PTR_ERR(wc->w_folios[i]);
mlog_errno(ret);
goto out;
}
}
- wait_for_stable_page(wc->w_pages[i]);
+ folio_wait_stable(wc->w_folios[i]);
if (index == target_index)
- wc->w_target_page = wc->w_pages[i];
+ wc->w_target_folio = wc->w_folios[i];
}
out:
if (ret)
@@ -1181,19 +1168,18 @@ static int ocfs2_write_cluster(struct address_space *mapping,
if (!should_zero)
p_blkno += (user_pos >> inode->i_sb->s_blocksize_bits) & (u64)(bpc - 1);
- for(i = 0; i < wc->w_num_pages; i++) {
+ for (i = 0; i < wc->w_num_folios; i++) {
int tmpret;
/* This is the direct io target page. */
- if (wc->w_pages[i] == NULL) {
+ if (wc->w_folios[i] == NULL) {
p_blkno += (1 << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits));
continue;
}
- tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc,
- wc->w_pages[i], cpos,
- user_pos, user_len,
- should_zero);
+ tmpret = ocfs2_prepare_folio_for_write(inode, &p_blkno, wc,
+ wc->w_folios[i], cpos, user_pos, user_len,
+ should_zero);
if (tmpret) {
mlog_errno(tmpret);
if (ret == 0)
@@ -1472,7 +1458,7 @@ static int ocfs2_write_begin_inline(struct address_space *mapping,
{
int ret;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- struct page *page;
+ struct folio *folio;
handle_t *handle;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
@@ -1483,19 +1469,21 @@ static int ocfs2_write_begin_inline(struct address_space *mapping,
goto out;
}
- page = find_or_create_page(mapping, 0, GFP_NOFS);
- if (!page) {
+ folio = __filemap_get_folio(mapping, 0,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS);
+ if (IS_ERR(folio)) {
ocfs2_commit_trans(osb, handle);
- ret = -ENOMEM;
+ ret = PTR_ERR(folio);
mlog_errno(ret);
goto out;
}
/*
- * If we don't set w_num_pages then this page won't get unlocked
+ * If we don't set w_num_folios then this folio won't get unlocked
* and freed on cleanup of the write context.
*/
- wc->w_pages[0] = wc->w_target_page = page;
- wc->w_num_pages = 1;
+ wc->w_target_folio = folio;
+ wc->w_folios[0] = folio;
+ wc->w_num_folios = 1;
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
@@ -1509,8 +1497,8 @@ static int ocfs2_write_begin_inline(struct address_space *mapping,
if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
ocfs2_set_inode_data_inline(inode, di);
- if (!PageUptodate(page)) {
- ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh);
+ if (!folio_test_uptodate(folio)) {
+ ret = ocfs2_read_inline_data(inode, folio, wc->w_di_bh);
if (ret) {
ocfs2_commit_trans(osb, handle);
@@ -1533,9 +1521,8 @@ int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size)
}
static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
- struct inode *inode, loff_t pos,
- unsigned len, struct page *mmap_page,
- struct ocfs2_write_ctxt *wc)
+ struct inode *inode, loff_t pos, size_t len,
+ struct folio *mmap_folio, struct ocfs2_write_ctxt *wc)
{
int ret, written = 0;
loff_t end = pos + len;
@@ -1550,7 +1537,7 @@ static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
* Handle inodes which already have inline data 1st.
*/
if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
- if (mmap_page == NULL &&
+ if (mmap_folio == NULL &&
ocfs2_size_fits_inline_data(wc->w_di_bh, end))
goto do_inline_write;
@@ -1574,7 +1561,7 @@ static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
* Check whether the write can fit.
*/
di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
- if (mmap_page ||
+ if (mmap_folio ||
end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di))
return 0;
@@ -1641,9 +1628,9 @@ static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
}
int ocfs2_write_begin_nolock(struct address_space *mapping,
- loff_t pos, unsigned len, ocfs2_write_type_t type,
- struct folio **foliop, void **fsdata,
- struct buffer_head *di_bh, struct page *mmap_page)
+ loff_t pos, unsigned len, ocfs2_write_type_t type,
+ struct folio **foliop, void **fsdata,
+ struct buffer_head *di_bh, struct folio *mmap_folio)
{
int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0;
@@ -1666,7 +1653,7 @@ try_again:
if (ocfs2_supports_inline_data(osb)) {
ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len,
- mmap_page, wc);
+ mmap_folio, wc);
if (ret == 1) {
ret = 0;
goto success;
@@ -1718,7 +1705,7 @@ try_again:
(unsigned long long)OCFS2_I(inode)->ip_blkno,
(long long)i_size_read(inode),
le32_to_cpu(di->i_clusters),
- pos, len, type, mmap_page,
+ pos, len, type, mmap_folio,
clusters_to_alloc, extents_to_split);
/*
@@ -1789,21 +1776,21 @@ try_again:
}
/*
- * Fill our page array first. That way we've grabbed enough so
+ * Fill our folio array first. That way we've grabbed enough so
* that we can zero and flush if we error after adding the
* extent.
*/
- ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
- cluster_of_pages, mmap_page);
+ ret = ocfs2_grab_folios_for_write(mapping, wc, wc->w_cpos, pos, len,
+ cluster_of_pages, mmap_folio);
if (ret) {
/*
- * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock
- * the target page. In this case, we exit with no error and no target
- * page. This will trigger the caller, page_mkwrite(), to re-try
- * the operation.
+ * ocfs2_grab_folios_for_write() returns -EAGAIN if it
+ * could not lock the target folio. In this case, we exit
+ * with no error and no target folio. This will trigger
+ * the caller, page_mkwrite(), to re-try the operation.
*/
if (type == OCFS2_WRITE_MMAP && ret == -EAGAIN) {
- BUG_ON(wc->w_target_page);
+ BUG_ON(wc->w_target_folio);
ret = 0;
goto out_quota;
}
@@ -1826,7 +1813,7 @@ try_again:
success:
if (foliop)
- *foliop = page_folio(wc->w_target_page);
+ *foliop = wc->w_target_folio;
*fsdata = wc;
return 0;
out_quota:
@@ -1845,7 +1832,7 @@ out:
* to VM code.
*/
if (wc->w_target_locked)
- unlock_page(mmap_page);
+ folio_unlock(mmap_folio);
ocfs2_free_write_ctxt(inode, wc);
@@ -1924,18 +1911,15 @@ static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
struct ocfs2_dinode *di,
struct ocfs2_write_ctxt *wc)
{
- void *kaddr;
-
if (unlikely(*copied < len)) {
- if (!PageUptodate(wc->w_target_page)) {
+ if (!folio_test_uptodate(wc->w_target_folio)) {
*copied = 0;
return;
}
}
- kaddr = kmap_atomic(wc->w_target_page);
- memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied);
- kunmap_atomic(kaddr);
+ memcpy_from_folio(di->id2.i_data.id_data + pos, wc->w_target_folio,
+ pos, *copied);
trace_ocfs2_write_end_inline(
(unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -1944,17 +1928,16 @@ static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
le16_to_cpu(di->i_dyn_features));
}
-int ocfs2_write_end_nolock(struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied, void *fsdata)
+int ocfs2_write_end_nolock(struct address_space *mapping, loff_t pos,
+ unsigned len, unsigned copied, void *fsdata)
{
int i, ret;
- unsigned from, to, start = pos & (PAGE_SIZE - 1);
+ size_t from, to, start = pos & (PAGE_SIZE - 1);
struct inode *inode = mapping->host;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_write_ctxt *wc = fsdata;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
handle_t *handle = wc->w_handle;
- struct page *tmppage;
BUG_ON(!list_empty(&wc->w_unwritten_list));
@@ -1973,44 +1956,44 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
goto out_write_size;
}
- if (unlikely(copied < len) && wc->w_target_page) {
+ if (unlikely(copied < len) && wc->w_target_folio) {
loff_t new_isize;
- if (!PageUptodate(wc->w_target_page))
+ if (!folio_test_uptodate(wc->w_target_folio))
copied = 0;
new_isize = max_t(loff_t, i_size_read(inode), pos + copied);
- if (new_isize > page_offset(wc->w_target_page))
- ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
+ if (new_isize > folio_pos(wc->w_target_folio))
+ ocfs2_zero_new_buffers(wc->w_target_folio, start+copied,
start+len);
else {
/*
- * When page is fully beyond new isize (data copy
- * failed), do not bother zeroing the page. Invalidate
+ * When folio is fully beyond new isize (data copy
+ * failed), do not bother zeroing the folio. Invalidate
* it instead so that writeback does not get confused
* put page & buffer dirty bits into inconsistent
* state.
*/
- block_invalidate_folio(page_folio(wc->w_target_page),
- 0, PAGE_SIZE);
+ block_invalidate_folio(wc->w_target_folio, 0,
+ folio_size(wc->w_target_folio));
}
}
- if (wc->w_target_page)
- flush_dcache_page(wc->w_target_page);
+ if (wc->w_target_folio)
+ flush_dcache_folio(wc->w_target_folio);
- for(i = 0; i < wc->w_num_pages; i++) {
- tmppage = wc->w_pages[i];
+ for (i = 0; i < wc->w_num_folios; i++) {
+ struct folio *folio = wc->w_folios[i];
- /* This is the direct io target page. */
- if (tmppage == NULL)
+ /* This is the direct io target folio */
+ if (folio == NULL)
continue;
- if (tmppage == wc->w_target_page) {
+ if (folio == wc->w_target_folio) {
from = wc->w_target_from;
to = wc->w_target_to;
- BUG_ON(from > PAGE_SIZE ||
- to > PAGE_SIZE ||
+ BUG_ON(from > folio_size(folio) ||
+ to > folio_size(folio) ||
to < from);
} else {
/*
@@ -2019,19 +2002,17 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
* to flush their entire range.
*/
from = 0;
- to = PAGE_SIZE;
+ to = folio_size(folio);
}
- if (page_has_buffers(tmppage)) {
+ if (folio_buffers(folio)) {
if (handle && ocfs2_should_order_data(inode)) {
- loff_t start_byte =
- ((loff_t)tmppage->index << PAGE_SHIFT) +
- from;
+ loff_t start_byte = folio_pos(folio) + from;
loff_t length = to - from;
ocfs2_jbd2_inode_add_write(handle, inode,
start_byte, length);
}
- block_commit_write(tmppage, from, to);
+ block_commit_write(&folio->page, from, to);
}
}
@@ -2060,7 +2041,7 @@ out:
* this lock and will ask for the page lock when flushing the data.
* put it here to preserve the unlock order.
*/
- ocfs2_unlock_pages(wc);
+ ocfs2_unlock_folios(wc);
if (handle)
ocfs2_commit_trans(osb, handle);
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index 1d1b4b7edba0..114efc9111e4 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -8,16 +8,11 @@
#include <linux/fs.h>
-handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
- struct page *page,
- unsigned from,
- unsigned to);
-
-int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
+int ocfs2_map_folio_blocks(struct folio *folio, u64 *p_blkno,
struct inode *inode, unsigned int from,
unsigned int to, int new);
-void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages);
+void ocfs2_unlock_and_free_folios(struct folio **folios, int num_folios);
int walk_page_buffers( handle_t *handle,
struct buffer_head *head,
@@ -37,11 +32,11 @@ typedef enum {
} ocfs2_write_type_t;
int ocfs2_write_begin_nolock(struct address_space *mapping,
- loff_t pos, unsigned len, ocfs2_write_type_t type,
- struct folio **foliop, void **fsdata,
- struct buffer_head *di_bh, struct page *mmap_page);
+ loff_t pos, unsigned len, ocfs2_write_type_t type,
+ struct folio **foliop, void **fsdata,
+ struct buffer_head *di_bh, struct folio *mmap_folio);
-int ocfs2_read_inline_data(struct inode *inode, struct page *page,
+int ocfs2_read_inline_data(struct inode *inode, struct folio *folio,
struct buffer_head *di_bh);
int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size);
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 4200a0341343..724350925aff 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -3,6 +3,7 @@
* Copyright (C) 2004, 2005 Oracle. All rights reserved.
*/
+#include "linux/kstrtox.h"
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/jiffies.h>
@@ -1020,7 +1021,7 @@ fire_callbacks:
if (list_empty(&slot->ds_live_item))
goto out;
- /* live nodes only go dead after enough consequtive missed
+ /* live nodes only go dead after enough consecutive missed
* samples.. reset the missed counter whenever we see
* activity */
if (slot->ds_equal_samples >= o2hb_dead_threshold || gen_changed) {
@@ -1535,10 +1536,11 @@ static int o2hb_read_block_input(struct o2hb_region *reg,
{
unsigned long bytes;
char *p = (char *)page;
+ int ret;
- bytes = simple_strtoul(p, &p, 0);
- if (!p || (*p && (*p != '\n')))
- return -EINVAL;
+ ret = kstrtoul(p, 0, &bytes);
+ if (ret)
+ return ret;
/* Heartbeat and fs min / max block sizes are the same. */
if (bytes > 4096 || bytes < 512)
@@ -1622,13 +1624,14 @@ static ssize_t o2hb_region_blocks_store(struct config_item *item,
struct o2hb_region *reg = to_o2hb_region(item);
unsigned long tmp;
char *p = (char *)page;
+ int ret;
if (reg->hr_bdev_file)
return -EINVAL;
- tmp = simple_strtoul(p, &p, 0);
- if (!p || (*p && (*p != '\n')))
- return -EINVAL;
+ ret = kstrtoul(p, 0, &tmp);
+ if (ret)
+ return ret;
if (tmp > O2NM_MAX_NODES || tmp == 0)
return -ERANGE;
@@ -1776,8 +1779,8 @@ static ssize_t o2hb_region_dev_store(struct config_item *item,
if (o2nm_this_node() == O2NM_MAX_NODES)
return -EINVAL;
- fd = simple_strtol(p, &p, 0);
- if (!p || (*p && (*p != '\n')))
+ ret = kstrtol(p, 0, &fd);
+ if (ret < 0)
return -EINVAL;
if (fd < 0 || fd >= INT_MAX)
@@ -2136,10 +2139,11 @@ static ssize_t o2hb_heartbeat_group_dead_threshold_store(struct config_item *ite
{
unsigned long tmp;
char *p = (char *)page;
+ int ret;
- tmp = simple_strtoul(p, &p, 10);
- if (!p || (*p && (*p != '\n')))
- return -EINVAL;
+ ret = kstrtoul(p, 10, &tmp);
+ if (ret)
+ return ret;
/* this will validate ranges for us. */
o2hb_dead_threshold_set((unsigned int) tmp);
diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h
index b73fc42e46ff..630bd5a3dd0d 100644
--- a/fs/ocfs2/cluster/masklog.h
+++ b/fs/ocfs2/cluster/masklog.h
@@ -29,7 +29,7 @@
* just calling printk() so that this can eventually make its way through
* relayfs along with the debugging messages. Everything else gets KERN_DEBUG.
* The inline tests and macro dance give GCC the opportunity to quite cleverly
- * only emit the appropriage printk() when the caller passes in a constant
+ * only emit the appropriate printk() when the caller passes in a constant
* mask, as is almost always the case.
*
* All this bitmask nonsense is managed from the files under
diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c
index 8bf17231d7b7..bfb8b456876c 100644
--- a/fs/ocfs2/cluster/quorum.c
+++ b/fs/ocfs2/cluster/quorum.c
@@ -23,7 +23,7 @@
* race between when we see a node start heartbeating and when we connect
* to it.
*
- * So nodes that are in this transtion put a hold on the quorum decision
+ * So nodes that are in this transition put a hold on the quorum decision
* with a counter. As they fall out of this transition they drop the count
* and if they're the last, they fire off the decision.
*/
@@ -189,7 +189,7 @@ static void o2quo_clear_hold(struct o2quo_state *qs, u8 node)
}
/* as a node comes up we delay the quorum decision until we know the fate of
- * the connection. the hold will be droped in conn_up or hb_down. it might be
+ * the connection. the hold will be dropped in conn_up or hb_down. it might be
* perpetuated by con_err until hb_down. if we already have a conn, we might
* be dropping a hold that conn_up got. */
void o2quo_hb_up(u8 node)
@@ -256,7 +256,7 @@ void o2quo_hb_still_up(u8 node)
}
/* This is analogous to hb_up. as a node's connection comes up we delay the
- * quorum decision until we see it heartbeating. the hold will be droped in
+ * quorum decision until we see it heartbeating. the hold will be dropped in
* hb_up or hb_down. it might be perpetuated by con_err until hb_down. if
* it's already heartbeating we might be dropping a hold that conn_up got.
* */
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 2b8fa3e782fb..0f46b22561d6 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -5,13 +5,13 @@
*
* ----
*
- * Callers for this were originally written against a very simple synchronus
+ * Callers for this were originally written against a very simple synchronous
* API. This implementation reflects those simple callers. Some day I'm sure
* we'll need to move to a more robust posting/callback mechanism.
*
* Transmit calls pass in kernel virtual addresses and block copying this into
* the socket's tx buffers via a usual blocking sendmsg. They'll block waiting
- * for a failed socket to timeout. TX callers can also pass in a poniter to an
+ * for a failed socket to timeout. TX callers can also pass in a pointer to an
* 'int' which gets filled with an errno off the wire in response to the
* message they send.
*
@@ -101,7 +101,7 @@ static struct socket *o2net_listen_sock;
* o2net_wq. teardown detaches the callbacks before destroying the workqueue.
* quorum work is queued as sock containers are shutdown.. stop_listening
* tears down all the node's sock containers, preventing future shutdowns
- * and queued quroum work, before canceling delayed quorum work and
+ * and queued quorum work, before canceling delayed quorum work and
* destroying the work queue.
*/
static struct workqueue_struct *o2net_wq;
@@ -1419,7 +1419,7 @@ out:
return ret;
}
-/* this work func is triggerd by data ready. it reads until it can read no
+/* this work func is triggered by data ready. it reads until it can read no
* more. it interprets 0, eof, as fatal. if data_ready hits while we're doing
* our work the work struct will be marked and we'll be called again. */
static void o2net_rx_until_empty(struct work_struct *work)
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index a9b8688aaf30..1873bbbb7e5b 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -32,7 +32,8 @@ void ocfs2_dentry_attach_gen(struct dentry *dentry)
}
-static int ocfs2_dentry_revalidate(struct dentry *dentry, unsigned int flags)
+static int ocfs2_dentry_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
struct inode *inode;
int ret = 0; /* if all else fails, just return false */
@@ -44,8 +45,7 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry, unsigned int flags)
inode = d_inode(dentry);
osb = OCFS2_SB(dentry->d_sb);
- trace_ocfs2_dentry_revalidate(dentry, dentry->d_name.len,
- dentry->d_name.name);
+ trace_ocfs2_dentry_revalidate(dentry, name->len, name->name);
/* For a negative dentry -
* check the generation number of the parent and compare with the
@@ -53,12 +53,8 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry, unsigned int flags)
*/
if (inode == NULL) {
unsigned long gen = (unsigned long) dentry->d_fsdata;
- unsigned long pgen;
- spin_lock(&dentry->d_lock);
- pgen = OCFS2_I(d_inode(dentry->d_parent))->ip_dir_lock_gen;
- spin_unlock(&dentry->d_lock);
- trace_ocfs2_dentry_revalidate_negative(dentry->d_name.len,
- dentry->d_name.name,
+ unsigned long pgen = OCFS2_I(dir)->ip_dir_lock_gen;
+ trace_ocfs2_dentry_revalidate_negative(name->len, name->name,
pgen, gen);
if (gen != pgen)
goto bail;
diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h
index 847a52dcbe7d..1969db8ffa9c 100644
--- a/fs/ocfs2/dlm/dlmapi.h
+++ b/fs/ocfs2/dlm/dlmapi.h
@@ -118,7 +118,7 @@ struct dlm_lockstatus {
#define LKM_VALBLK 0x00000100 /* lock value block request */
#define LKM_NOQUEUE 0x00000200 /* non blocking request */
#define LKM_CONVERT 0x00000400 /* conversion request */
-#define LKM_NODLCKWT 0x00000800 /* this lock wont deadlock (U) */
+#define LKM_NODLCKWT 0x00000800 /* this lock won't deadlock (U) */
#define LKM_UNLOCK 0x00001000 /* deallocate this lock */
#define LKM_CANCEL 0x00002000 /* cancel conversion request */
#define LKM_DEQALL 0x00004000 /* remove all locks held by proc (U) */
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index e9ef4e2b0e75..fe4fdd09bae3 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -14,6 +14,7 @@
#include <linux/spinlock.h>
#include <linux/debugfs.h>
#include <linux/export.h>
+#include <linux/string_choices.h>
#include "../cluster/heartbeat.h"
#include "../cluster/nodemanager.h"
@@ -90,12 +91,12 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
buf, res->owner, res->state);
printk(" last used: %lu, refcnt: %u, on purge list: %s\n",
res->last_used, kref_read(&res->refs),
- list_empty(&res->purge) ? "no" : "yes");
+ str_no_yes(list_empty(&res->purge)));
printk(" on dirty list: %s, on reco list: %s, "
"migrating pending: %s\n",
- list_empty(&res->dirty) ? "no" : "yes",
- list_empty(&res->recovering) ? "no" : "yes",
- res->migration_pending ? "yes" : "no");
+ str_no_yes(list_empty(&res->dirty)),
+ str_no_yes(list_empty(&res->recovering)),
+ str_yes_no(res->migration_pending));
printk(" inflight locks: %d, asts reserved: %d\n",
res->inflight_locks, atomic_read(&res->asts_reserved));
dlm_print_lockres_refmap(res);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index d610da8e2f24..86bb1a03bcc1 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -21,7 +21,7 @@
#include <linux/inet.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
-
+#include <linux/string_choices.h>
#include "../cluster/heartbeat.h"
#include "../cluster/nodemanager.h"
@@ -2859,7 +2859,7 @@ static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
dlm_lockres_release_ast(dlm, res);
mlog(0, "about to wait on migration_wq, dirty=%s\n",
- res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
+ str_yes_no(res->state & DLM_LOCK_RES_DIRTY));
/* if the extra ref we just put was the final one, this
* will pass thru immediately. otherwise, we need to wait
* for the last ast to finish. */
@@ -2869,12 +2869,12 @@ again:
msecs_to_jiffies(1000));
if (ret < 0) {
mlog(0, "woken again: migrating? %s, dead? %s\n",
- res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
- test_bit(target, dlm->domain_map) ? "no":"yes");
+ str_yes_no(res->state & DLM_LOCK_RES_MIGRATING),
+ str_no_yes(test_bit(target, dlm->domain_map)));
} else {
mlog(0, "all is well: migrating? %s, dead? %s\n",
- res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
- test_bit(target, dlm->domain_map) ? "no":"yes");
+ str_yes_no(res->state & DLM_LOCK_RES_MIGRATING),
+ str_no_yes(test_bit(target, dlm->domain_map)));
}
if (!dlm_migration_can_proceed(dlm, res, target)) {
mlog(0, "trying again...\n");
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 50da8af988c1..67fc62a49a76 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -22,7 +22,7 @@
#include <linux/timer.h>
#include <linux/kthread.h>
#include <linux/delay.h>
-
+#include <linux/string_choices.h>
#include "../cluster/heartbeat.h"
#include "../cluster/nodemanager.h"
@@ -207,7 +207,7 @@ void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
* 1) all recovery threads cluster wide will work on recovering
* ONE node at a time
* 2) negotiate who will take over all the locks for the dead node.
- * thats right... ALL the locks.
+ * that's right... ALL the locks.
* 3) once a new master is chosen, everyone scans all locks
* and moves aside those mastered by the dead guy
* 4) each of these locks should be locked until recovery is done
@@ -581,8 +581,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
msecs_to_jiffies(1000));
mlog(0, "waited 1 sec for %u, "
"dead? %s\n", ndata->node_num,
- dlm_is_node_dead(dlm, ndata->node_num) ?
- "yes" : "no");
+ str_yes_no(dlm_is_node_dead(dlm, ndata->node_num)));
} else {
/* -ENOMEM on the other node */
mlog(0, "%s: node %u returned "
@@ -677,7 +676,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
spin_unlock(&dlm_reco_state_lock);
mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
- all_nodes_done?"yes":"no");
+ str_yes_no(all_nodes_done));
if (all_nodes_done) {
int ret;
@@ -1469,7 +1468,7 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
* The first one is handled at the end of this function. The
* other two are handled in the worker thread after locks have
* been attached. Yes, we don't wait for purge time to match
- * kref_init. The lockres will still have atleast one ref
+ * kref_init. The lockres will still have at least one ref
* added because it is in the hash __dlm_insert_lockres() */
extra_refs++;
@@ -1735,7 +1734,7 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
spin_unlock(&res->spinlock);
}
} else {
- /* put.. incase we are not the master */
+ /* put.. in case we are not the master */
spin_unlock(&res->spinlock);
dlm_lockres_put(res);
}
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 7fc0e920eda7..2a7f36643895 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/fs.h>
+#include <linux/fs_context.h>
#include <linux/pagemap.h>
#include <linux/types.h>
#include <linux/slab.h>
@@ -506,9 +507,7 @@ bail:
return status;
}
-static int dlmfs_fill_super(struct super_block * sb,
- void * data,
- int silent)
+static int dlmfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_blocksize = PAGE_SIZE;
@@ -556,17 +555,27 @@ static const struct inode_operations dlmfs_file_inode_operations = {
.setattr = dlmfs_file_setattr,
};
-static struct dentry *dlmfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int dlmfs_get_tree(struct fs_context *fc)
{
- return mount_nodev(fs_type, flags, data, dlmfs_fill_super);
+ return get_tree_nodev(fc, dlmfs_fill_super);
+}
+
+static const struct fs_context_operations dlmfs_context_ops = {
+ .get_tree = dlmfs_get_tree,
+};
+
+static int dlmfs_init_fs_context(struct fs_context *fc)
+{
+ fc->ops = &dlmfs_context_ops;
+
+ return 0;
}
static struct file_system_type dlmfs_fs_type = {
.owner = THIS_MODULE,
.name = "ocfs2_dlmfs",
- .mount = dlmfs_mount,
.kill_sb = kill_litter_super,
+ .init_fs_context = dlmfs_init_fs_context,
};
MODULE_ALIAS_FS("ocfs2_dlmfs");
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 764ecbd5ad41..92a6149da9c1 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -19,6 +19,7 @@
#include <linux/delay.h>
#include <linux/quotaops.h>
#include <linux/sched/signal.h>
+#include <linux/string_choices.h>
#define MLOG_MASK_PREFIX ML_DLM_GLUE
#include <cluster/masklog.h>
@@ -794,7 +795,7 @@ void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
/*
* Keep a list of processes who have interest in a lockres.
- * Note: this is now only uesed for check recursive cluster locking.
+ * Note: this is now only used for check recursive cluster locking.
*/
static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres,
struct ocfs2_lock_holder *oh)
@@ -2529,30 +2530,28 @@ bail:
/*
* This is working around a lock inversion between tasks acquiring DLM
- * locks while holding a page lock and the downconvert thread which
- * blocks dlm lock acquiry while acquiring page locks.
+ * locks while holding a folio lock and the downconvert thread which
+ * blocks dlm lock acquiry while acquiring folio locks.
*
- * ** These _with_page variantes are only intended to be called from aop
- * methods that hold page locks and return a very specific *positive* error
+ * ** These _with_folio variants are only intended to be called from aop
+ * methods that hold folio locks and return a very specific *positive* error
* code that aop methods pass up to the VFS -- test for errors with != 0. **
*
* The DLM is called such that it returns -EAGAIN if it would have
* blocked waiting for the downconvert thread. In that case we unlock
- * our page so the downconvert thread can make progress. Once we've
+ * our folio so the downconvert thread can make progress. Once we've
* done this we have to return AOP_TRUNCATED_PAGE so the aop method
* that called us can bubble that back up into the VFS who will then
* immediately retry the aop call.
*/
-int ocfs2_inode_lock_with_page(struct inode *inode,
- struct buffer_head **ret_bh,
- int ex,
- struct page *page)
+int ocfs2_inode_lock_with_folio(struct inode *inode,
+ struct buffer_head **ret_bh, int ex, struct folio *folio)
{
int ret;
ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
if (ret == -EAGAIN) {
- unlock_page(page);
+ folio_unlock(folio);
/*
* If we can't get inode lock immediately, we should not return
* directly here, since this will lead to a softlockup problem.
@@ -2630,7 +2629,7 @@ void ocfs2_inode_unlock(struct inode *inode,
}
/*
- * This _tracker variantes are introduced to deal with the recursive cluster
+ * This _tracker variants are introduced to deal with the recursive cluster
* locking issue. The idea is to keep track of a lock holder on the stack of
* the current process. If there's a lock holder on the stack, we know the
* task context is already protected by cluster locking. Currently, they're
@@ -2735,7 +2734,7 @@ void ocfs2_inode_unlock_tracker(struct inode *inode,
struct ocfs2_lock_res *lockres;
lockres = &OCFS2_I(inode)->ip_inode_lockres;
- /* had_lock means that the currect process already takes the cluster
+ /* had_lock means that the current process already takes the cluster
* lock previously.
* If had_lock is 1, we have nothing to do here.
* If had_lock is 0, we will release the lock.
@@ -3802,9 +3801,9 @@ recheck:
* set when the ast is received for an upconvert just before the
* OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
* on the heels of the ast, we want to delay the downconvert just
- * enough to allow the up requestor to do its task. Because this
+ * enough to allow the up requester to do its task. Because this
* lock is in the blocked queue, the lock will be downconverted
- * as soon as the requestor is done with the lock.
+ * as soon as the requester is done with the lock.
*/
if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
goto leave_requeue;
@@ -4339,7 +4338,7 @@ unqueue:
ocfs2_schedule_blocked_lock(osb, lockres);
mlog(ML_BASTS, "lockres %s, requeue = %s.\n", lockres->l_name,
- ctl.requeue ? "yes" : "no");
+ str_yes_no(ctl.requeue));
spin_unlock_irqrestore(&lockres->l_lock, flags);
if (ctl.unblock_action != UNBLOCK_CONTINUE
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
index e5da5809ed95..a3ebd7303ea2 100644
--- a/fs/ocfs2/dlmglue.h
+++ b/fs/ocfs2/dlmglue.h
@@ -137,10 +137,8 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
int ex,
int arg_flags,
int subclass);
-int ocfs2_inode_lock_with_page(struct inode *inode,
- struct buffer_head **ret_bh,
- int ex,
- struct page *page);
+int ocfs2_inode_lock_with_folio(struct inode *inode,
+ struct buffer_head **ret_bh, int ex, struct folio *folio);
/* Variants without special locking class or flags */
#define ocfs2_inode_lock_full(i, r, e, f)\
ocfs2_inode_lock_full_nested(i, r, e, f, OI_LS_NORMAL)
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index f7672472fa82..930150ed5db1 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -435,6 +435,16 @@ static int ocfs2_get_clusters_nocache(struct inode *inode,
}
}
+ if (le16_to_cpu(el->l_next_free_rec) > le16_to_cpu(el->l_count)) {
+ ocfs2_error(inode->i_sb,
+ "Inode %lu has an invalid extent (next_free_rec %u, count %u)\n",
+ inode->i_ino,
+ le16_to_cpu(el->l_next_free_rec),
+ le16_to_cpu(el->l_count));
+ ret = -EROFS;
+ goto out;
+ }
+
i = ocfs2_search_extent_list(el, v_cluster);
if (i == -1) {
/*
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 957ced628eb1..e54f2c4b5a90 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -782,11 +782,11 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
goto out_commit_trans;
}
- /* Get the offsets within the page that we want to zero */
- zero_from = abs_from & (PAGE_SIZE - 1);
- zero_to = abs_to & (PAGE_SIZE - 1);
+ /* Get the offsets within the folio that we want to zero */
+ zero_from = offset_in_folio(folio, abs_from);
+ zero_to = offset_in_folio(folio, abs_to);
if (!zero_to)
- zero_to = PAGE_SIZE;
+ zero_to = folio_size(folio);
trace_ocfs2_write_zero_page(
(unsigned long long)OCFS2_I(inode)->ip_blkno,
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 2cc5c99fe941..12e5d1f73325 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -200,6 +200,20 @@ bail:
return inode;
}
+static int ocfs2_dinode_has_extents(struct ocfs2_dinode *di)
+{
+ /* inodes flagged with other stuff in id2 */
+ if (di->i_flags & (OCFS2_SUPER_BLOCK_FL | OCFS2_LOCAL_ALLOC_FL |
+ OCFS2_CHAIN_FL | OCFS2_DEALLOC_FL))
+ return 0;
+ /* i_flags doesn't indicate when id2 is a fast symlink */
+ if (S_ISLNK(di->i_mode) && di->i_size && di->i_clusters == 0)
+ return 0;
+ if (di->i_dyn_features & OCFS2_INLINE_DATA_FL)
+ return 0;
+
+ return 1;
+}
/*
* here's how inodes get read from disk:
@@ -1122,7 +1136,7 @@ static void ocfs2_clear_inode(struct inode *inode)
dquot_drop(inode);
- /* To preven remote deletes we hold open lock before, now it
+ /* To prevent remote deletes we hold open lock before, now it
* is time to unlock PR and EX open locks. */
ocfs2_open_unlock(inode);
@@ -1437,7 +1451,7 @@ static int ocfs2_filecheck_validate_inode_block(struct super_block *sb,
* Call ocfs2_validate_meta_ecc() first since it has ecc repair
* function, but we should not return error immediately when ecc
* validation fails, because the reason is quite likely the invalid
- * inode number inputed.
+ * inode number inputted.
*/
rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check);
if (rc) {
@@ -1547,6 +1561,16 @@ static int ocfs2_filecheck_repair_inode_block(struct super_block *sb,
le32_to_cpu(di->i_fs_generation));
}
+ if (ocfs2_dinode_has_extents(di) &&
+ le16_to_cpu(di->id2.i_list.l_next_free_rec) > le16_to_cpu(di->id2.i_list.l_count)) {
+ di->id2.i_list.l_next_free_rec = di->id2.i_list.l_count;
+ changed = 1;
+ mlog(ML_ERROR,
+ "Filecheck: reset dinode #%llu: l_next_free_rec to %u\n",
+ (unsigned long long)bh->b_blocknr,
+ le16_to_cpu(di->id2.i_list.l_next_free_rec));
+ }
+
if (changed || ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check)) {
ocfs2_compute_meta_ecc(sb, bh->b_data, &di->i_check);
mark_buffer_dirty(bh);
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 71beef7f8a60..7ae96fb8807a 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -796,7 +796,7 @@ bail:
/*
* OCFS2_IOC_INFO handles an array of requests passed from userspace.
*
- * ocfs2_info_handle() recevies a large info aggregation, grab and
+ * ocfs2_info_handle() receives a large info aggregation, grab and
* validate the request count from header, then break it into small
* pieces, later specific handlers can handle them one by one.
*
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 1bf188b6866a..f1b4b3e611cb 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -1956,7 +1956,7 @@ bail:
/*
* Scan timer should get fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT. Add some
- * randomness to the timeout to minimize multple nodes firing the timer at the
+ * randomness to the timeout to minimize multiple nodes firing the timer at the
* same time.
*/
static inline unsigned long ocfs2_orphan_scan_timeout(void)
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 6ef4cb045ccd..6a314e9f2b49 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -44,13 +44,13 @@ static vm_fault_t ocfs2_fault(struct vm_fault *vmf)
}
static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
- struct buffer_head *di_bh, struct page *page)
+ struct buffer_head *di_bh, struct folio *folio)
{
int err;
vm_fault_t ret = VM_FAULT_NOPAGE;
struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
- loff_t pos = page_offset(page);
+ loff_t pos = folio_pos(folio);
unsigned int len = PAGE_SIZE;
pgoff_t last_index;
struct folio *locked_folio = NULL;
@@ -72,9 +72,9 @@ static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
*
* Let VM retry with these cases.
*/
- if ((page->mapping != inode->i_mapping) ||
- (!PageUptodate(page)) ||
- (page_offset(page) >= size))
+ if ((folio->mapping != inode->i_mapping) ||
+ !folio_test_uptodate(folio) ||
+ (pos >= size))
goto out;
/*
@@ -87,11 +87,11 @@ static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
* worry about ocfs2_write_begin() skipping some buffer reads
* because the "write" would invalidate their data.
*/
- if (page->index == last_index)
+ if (folio->index == last_index)
len = ((size - 1) & ~PAGE_MASK) + 1;
err = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP,
- &locked_folio, &fsdata, di_bh, page);
+ &locked_folio, &fsdata, di_bh, folio);
if (err) {
if (err != -ENOSPC)
mlog_errno(err);
@@ -112,7 +112,7 @@ out:
static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf)
{
- struct page *page = vmf->page;
+ struct folio *folio = page_folio(vmf->page);
struct inode *inode = file_inode(vmf->vma->vm_file);
struct buffer_head *di_bh = NULL;
sigset_t oldset;
@@ -141,7 +141,7 @@ static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf)
*/
down_write(&OCFS2_I(inode)->ip_alloc_sem);
- ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, page);
+ ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, folio);
up_write(&OCFS2_I(inode)->ip_alloc_sem);
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index f9d6a4f9ca92..369c7d27befd 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -492,7 +492,7 @@ static int ocfs2_validate_and_adjust_move_goal(struct inode *inode,
bg = (struct ocfs2_group_desc *)gd_bh->b_data;
/*
- * moving goal is not allowd to start with a group desc blok(#0 blk)
+ * moving goal is not allowed to start with a group desc blok(#0 blk)
* let's compromise to the latter cluster.
*/
if (range->me_goal == le64_to_cpu(bg->bg_blkno))
@@ -658,7 +658,7 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
/*
* probe the victim cluster group to find a proper
- * region to fit wanted movement, it even will perfrom
+ * region to fit wanted movement, it even will perform
* a best-effort attempt by compromising to a threshold
* around the goal.
*/
@@ -920,7 +920,7 @@ static int ocfs2_move_extents(struct ocfs2_move_extents_context *context)
}
/*
- * rememer ip_xattr_sem also needs to be held if necessary
+ * remember ip_xattr_sem also needs to be held if necessary
*/
down_write(&OCFS2_I(inode)->ip_alloc_sem);
@@ -1022,7 +1022,7 @@ int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
context->range = &range;
/*
- * ok, the default theshold for the defragmentation
+ * ok, the default threshold for the defragmentation
* is 1M, since our maximum clustersize was 1M also.
* any thought?
*/
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 5550f8afa438..0ec63a1a94b8 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -508,7 +508,6 @@ static int __ocfs2_mknod_locked(struct inode *dir,
struct inode *inode,
dev_t dev,
struct buffer_head **new_fe_bh,
- struct buffer_head *parent_fe_bh,
handle_t *handle,
struct ocfs2_alloc_context *inode_ac,
u64 fe_blkno, u64 suballoc_loc, u16 suballoc_bit)
@@ -641,8 +640,8 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
}
return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
- parent_fe_bh, handle, inode_ac,
- fe_blkno, suballoc_loc, suballoc_bit);
+ handle, inode_ac, fe_blkno,
+ suballoc_loc, suballoc_bit);
}
static int ocfs2_mkdir(struct mnt_idmap *idmap,
@@ -2576,7 +2575,7 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
clear_nlink(inode);
/* do the real work now. */
status = __ocfs2_mknod_locked(dir, inode,
- 0, &new_di_bh, parent_di_bh, handle,
+ 0, &new_di_bh, handle,
inode_ac, di_blkno, suballoc_loc,
suballoc_bit);
if (status < 0) {
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index c93689b568fe..e8e94599e907 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -132,7 +132,7 @@
* well as the name of the cluster being joined.
* mount.ocfs2 must pass in a matching stack name.
*
- * If not set, the classic stack will be used. This is compatbile with
+ * If not set, the classic stack will be used. This is compatible with
* all older versions.
*/
#define OCFS2_FEATURE_INCOMPAT_USERSPACE_STACK 0x0080
@@ -143,7 +143,7 @@
/* Support for extended attributes */
#define OCFS2_FEATURE_INCOMPAT_XATTR 0x0200
-/* Support for indexed directores */
+/* Support for indexed directories */
#define OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS 0x0400
/* Metadata checksum and error correction */
@@ -156,7 +156,7 @@
#define OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG 0x2000
/*
- * Incompat bit to indicate useable clusterinfo with stackflags for all
+ * Incompat bit to indicate usable clusterinfo with stackflags for all
* cluster stacks (userspace adnd o2cb). If this bit is set,
* INCOMPAT_USERSPACE_STACK becomes superfluous and thus should not be set.
*/
@@ -1083,7 +1083,7 @@ struct ocfs2_xattr_block {
struct ocfs2_xattr_header xb_header; /* xattr header if this
block contains xattr */
struct ocfs2_xattr_tree_root xb_root;/* xattr tree root if this
- block cotains xattr
+ block contains xattr
tree. */
} xb_attrs;
};
diff --git a/fs/ocfs2/ocfs2_ioctl.h b/fs/ocfs2/ocfs2_ioctl.h
index 9680797bc531..2de2f8733283 100644
--- a/fs/ocfs2/ocfs2_ioctl.h
+++ b/fs/ocfs2/ocfs2_ioctl.h
@@ -215,7 +215,7 @@ struct ocfs2_move_extents {
movement less likely
to fail, may make fs
even more fragmented */
-#define OCFS2_MOVE_EXT_FL_COMPLETE (0x00000004) /* Move or defragmenation
+#define OCFS2_MOVE_EXT_FL_COMPLETE (0x00000004) /* Move or defragmentation
completely gets done.
*/
diff --git a/fs/ocfs2/ocfs2_lockid.h b/fs/ocfs2/ocfs2_lockid.h
index 8ac357ce6a30..9b234c03d693 100644
--- a/fs/ocfs2/ocfs2_lockid.h
+++ b/fs/ocfs2/ocfs2_lockid.h
@@ -93,7 +93,7 @@ static char *ocfs2_lock_type_strings[] = {
[OCFS2_LOCK_TYPE_DATA] = "Data",
[OCFS2_LOCK_TYPE_SUPER] = "Super",
[OCFS2_LOCK_TYPE_RENAME] = "Rename",
- /* Need to differntiate from [R]ename.. serializing writes is the
+ /* Need to differentiate from [R]ename.. serializing writes is the
* important job it does, anyway. */
[OCFS2_LOCK_TYPE_RW] = "Write/Read",
[OCFS2_LOCK_TYPE_DENTRY] = "Dentry",
diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h
index 0511c69c9fde..54ed1495de9a 100644
--- a/fs/ocfs2/ocfs2_trace.h
+++ b/fs/ocfs2/ocfs2_trace.h
@@ -1658,34 +1658,34 @@ TRACE_EVENT(ocfs2_remount,
);
TRACE_EVENT(ocfs2_fill_super,
- TP_PROTO(void *sb, void *data, int silent),
- TP_ARGS(sb, data, silent),
+ TP_PROTO(void *sb, void *fc, int silent),
+ TP_ARGS(sb, fc, silent),
TP_STRUCT__entry(
__field(void *, sb)
- __field(void *, data)
+ __field(void *, fc)
__field(int, silent)
),
TP_fast_assign(
__entry->sb = sb;
- __entry->data = data;
+ __entry->fc = fc;
__entry->silent = silent;
),
TP_printk("%p %p %d", __entry->sb,
- __entry->data, __entry->silent)
+ __entry->fc, __entry->silent)
);
TRACE_EVENT(ocfs2_parse_options,
- TP_PROTO(int is_remount, char *options),
- TP_ARGS(is_remount, options),
+ TP_PROTO(int is_remount, const char *option),
+ TP_ARGS(is_remount, option),
TP_STRUCT__entry(
__field(int, is_remount)
- __string(options, options)
+ __string(option, option)
),
TP_fast_assign(
__entry->is_remount = is_remount;
- __assign_str(options);
+ __assign_str(option);
),
- TP_printk("%d %s", __entry->is_remount, __get_str(options))
+ TP_printk("%d %s", __entry->is_remount, __get_str(option))
);
DEFINE_OCFS2_POINTER_EVENT(ocfs2_put_super);
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 3404e7a30c33..15d9acd456ec 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -761,6 +761,11 @@ static int ocfs2_release_dquot(struct dquot *dquot)
handle = ocfs2_start_trans(osb,
ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_id.type));
if (IS_ERR(handle)) {
+ /*
+ * Mark dquot as inactive to avoid endless cycle in
+ * quota_release_workfn().
+ */
+ clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
status = PTR_ERR(handle);
mlog_errno(status);
goto out_ilock;
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 004393b13c0a..8f732742b26e 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2420,7 +2420,7 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
*
* If we will insert a new one, this is easy and only happens
* during adding refcounted flag to the extent, so we don't
- * have a chance of spliting. We just need one record.
+ * have a chance of splitting. We just need one record.
*
* If the refcount rec already exists, that would be a little
* complicated. we may have to:
@@ -2610,11 +2610,11 @@ static inline unsigned int ocfs2_cow_align_length(struct super_block *sb,
/*
* Calculate out the start and number of virtual clusters we need to CoW.
*
- * cpos is vitual start cluster position we want to do CoW in a
+ * cpos is virtual start cluster position we want to do CoW in a
* file and write_len is the cluster length.
* max_cpos is the place where we want to stop CoW intentionally.
*
- * Normal we will start CoW from the beginning of extent record cotaining cpos.
+ * Normal we will start CoW from the beginning of extent record containing cpos.
* We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we
* get good I/O from the resulting extent tree.
*/
@@ -2902,7 +2902,6 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
int ret = 0, partial;
struct super_block *sb = inode->i_sb;
u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
- struct page *page;
pgoff_t page_index;
unsigned int from, to;
loff_t offset, end, map_end;
@@ -2921,6 +2920,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
end = i_size_read(inode);
while (offset < end) {
+ struct folio *folio;
page_index = offset >> PAGE_SHIFT;
map_end = ((loff_t)page_index + 1) << PAGE_SHIFT;
if (map_end > end)
@@ -2933,9 +2933,10 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
to = map_end & (PAGE_SIZE - 1);
retry:
- page = find_or_create_page(mapping, page_index, GFP_NOFS);
- if (!page) {
- ret = -ENOMEM;
+ folio = __filemap_get_folio(mapping, page_index,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS);
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
mlog_errno(ret);
break;
}
@@ -2945,9 +2946,9 @@ retry:
* page, so write it back.
*/
if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) {
- if (PageDirty(page)) {
- unlock_page(page);
- put_page(page);
+ if (folio_test_dirty(folio)) {
+ folio_unlock(folio);
+ folio_put(folio);
ret = filemap_write_and_wait_range(mapping,
offset, map_end - 1);
@@ -2955,9 +2956,7 @@ retry:
}
}
- if (!PageUptodate(page)) {
- struct folio *folio = page_folio(page);
-
+ if (!folio_test_uptodate(folio)) {
ret = block_read_full_folio(folio, ocfs2_get_block);
if (ret) {
mlog_errno(ret);
@@ -2966,8 +2965,8 @@ retry:
folio_lock(folio);
}
- if (page_has_buffers(page)) {
- ret = walk_page_buffers(handle, page_buffers(page),
+ if (folio_buffers(folio)) {
+ ret = walk_page_buffers(handle, folio_buffers(folio),
from, to, &partial,
ocfs2_clear_cow_buffer);
if (ret) {
@@ -2976,14 +2975,12 @@ retry:
}
}
- ocfs2_map_and_dirty_page(inode,
- handle, from, to,
- page, 0, &new_block);
- mark_page_accessed(page);
+ ocfs2_map_and_dirty_folio(inode, handle, from, to,
+ folio, 0, &new_block);
+ folio_mark_accessed(folio);
unlock:
- unlock_page(page);
- put_page(page);
- page = NULL;
+ folio_unlock(folio);
+ folio_put(folio);
offset = map_end;
if (ret)
break;
diff --git a/fs/ocfs2/reservations.h b/fs/ocfs2/reservations.h
index ec8101ef5717..4fce17180342 100644
--- a/fs/ocfs2/reservations.h
+++ b/fs/ocfs2/reservations.h
@@ -31,7 +31,7 @@ struct ocfs2_alloc_reservation {
#define OCFS2_RESV_FLAG_INUSE 0x01 /* Set when r_node is part of a btree */
#define OCFS2_RESV_FLAG_TMP 0x02 /* Temporary reservation, will be
- * destroyed immedately after use */
+ * destroyed immediately after use */
#define OCFS2_RESV_FLAG_DIR 0x04 /* Reservation is for an unindexed
* directory btree */
@@ -125,7 +125,7 @@ int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap,
/**
* ocfs2_resmap_claimed_bits() - Tell the reservation code that bits were used.
* @resmap: reservations bitmap
- * @resv: optional reservation to recalulate based on new bitmap
+ * @resv: optional reservation to recalculate based on new bitmap
* @cstart: start of allocation in clusters
* @clen: end of allocation in clusters.
*
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c
index 10157d9d7a9c..f58e891aa2da 100644
--- a/fs/ocfs2/stack_o2cb.c
+++ b/fs/ocfs2/stack_o2cb.c
@@ -227,7 +227,7 @@ static int o2cb_dlm_lock_status(struct ocfs2_dlm_lksb *lksb)
}
/*
- * o2dlm aways has a "valid" LVB. If the dlm loses track of the LVB
+ * o2dlm always has a "valid" LVB. If the dlm loses track of the LVB
* contents, it will zero out the LVB. Thus the caller can always trust
* the contents.
*/
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index 20aa37b67cfb..ddd761cf44c8 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -650,7 +650,7 @@ error:
* and easier to preserve the name.
*/
-static struct ctl_table ocfs2_nm_table[] = {
+static const struct ctl_table ocfs2_nm_table[] = {
{
.procname = "hb_ctl_path",
.data = ocfs2_hb_ctl_path,
diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h
index 02ab072c528a..5486a6dce70a 100644
--- a/fs/ocfs2/stackglue.h
+++ b/fs/ocfs2/stackglue.h
@@ -210,7 +210,7 @@ struct ocfs2_stack_operations {
struct file_lock *fl);
/*
- * This is an optoinal debugging hook. If provided, the
+ * This is an optional debugging hook. If provided, the
* stack can dump debugging information about this lock.
*/
void (*dump_lksb)(struct ocfs2_dlm_lksb *lksb);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index c79b4291777f..8bb5022f3082 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -19,10 +19,10 @@
#include <linux/blkdev.h>
#include <linux/socket.h>
#include <linux/inet.h>
-#include <linux/parser.h>
+#include <linux/fs_parser.h>
+#include <linux/fs_context.h>
#include <linux/crc32.h>
#include <linux/debugfs.h>
-#include <linux/mount.h>
#include <linux/seq_file.h>
#include <linux/quotaops.h>
#include <linux/signal.h>
@@ -80,17 +80,15 @@ struct mount_options
unsigned int resv_level;
int dir_resv_level;
char cluster_stack[OCFS2_STACK_LABEL_LEN + 1];
+ bool user_stack;
};
-static int ocfs2_parse_options(struct super_block *sb, char *options,
- struct mount_options *mopt,
- int is_remount);
+static int ocfs2_parse_param(struct fs_context *fc, struct fs_parameter *param);
static int ocfs2_check_set_options(struct super_block *sb,
struct mount_options *options);
static int ocfs2_show_options(struct seq_file *s, struct dentry *root);
static void ocfs2_put_super(struct super_block *sb);
static int ocfs2_mount_volume(struct super_block *sb);
-static int ocfs2_remount(struct super_block *sb, int *flags, char *data);
static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err);
static int ocfs2_initialize_mem_caches(void);
static void ocfs2_free_mem_caches(void);
@@ -135,7 +133,6 @@ static const struct super_operations ocfs2_sops = {
.evict_inode = ocfs2_evict_inode,
.sync_fs = ocfs2_sync_fs,
.put_super = ocfs2_put_super,
- .remount_fs = ocfs2_remount,
.show_options = ocfs2_show_options,
.quota_read = ocfs2_quota_read,
.quota_write = ocfs2_quota_write,
@@ -144,15 +141,10 @@ static const struct super_operations ocfs2_sops = {
enum {
Opt_barrier,
- Opt_err_panic,
- Opt_err_ro,
+ Opt_errors,
Opt_intr,
- Opt_nointr,
- Opt_hb_none,
- Opt_hb_local,
- Opt_hb_global,
- Opt_data_ordered,
- Opt_data_writeback,
+ Opt_heartbeat,
+ Opt_data,
Opt_atime_quantum,
Opt_slot,
Opt_commit,
@@ -160,52 +152,64 @@ enum {
Opt_localflocks,
Opt_stack,
Opt_user_xattr,
- Opt_nouser_xattr,
Opt_inode64,
Opt_acl,
- Opt_noacl,
Opt_usrquota,
Opt_grpquota,
- Opt_coherency_buffered,
- Opt_coherency_full,
+ Opt_coherency,
Opt_resv_level,
Opt_dir_resv_level,
Opt_journal_async_commit,
- Opt_err_cont,
- Opt_err,
};
-static const match_table_t tokens = {
- {Opt_barrier, "barrier=%u"},
- {Opt_err_panic, "errors=panic"},
- {Opt_err_ro, "errors=remount-ro"},
- {Opt_intr, "intr"},
- {Opt_nointr, "nointr"},
- {Opt_hb_none, OCFS2_HB_NONE},
- {Opt_hb_local, OCFS2_HB_LOCAL},
- {Opt_hb_global, OCFS2_HB_GLOBAL},
- {Opt_data_ordered, "data=ordered"},
- {Opt_data_writeback, "data=writeback"},
- {Opt_atime_quantum, "atime_quantum=%u"},
- {Opt_slot, "preferred_slot=%u"},
- {Opt_commit, "commit=%u"},
- {Opt_localalloc, "localalloc=%d"},
- {Opt_localflocks, "localflocks"},
- {Opt_stack, "cluster_stack=%s"},
- {Opt_user_xattr, "user_xattr"},
- {Opt_nouser_xattr, "nouser_xattr"},
- {Opt_inode64, "inode64"},
- {Opt_acl, "acl"},
- {Opt_noacl, "noacl"},
- {Opt_usrquota, "usrquota"},
- {Opt_grpquota, "grpquota"},
- {Opt_coherency_buffered, "coherency=buffered"},
- {Opt_coherency_full, "coherency=full"},
- {Opt_resv_level, "resv_level=%u"},
- {Opt_dir_resv_level, "dir_resv_level=%u"},
- {Opt_journal_async_commit, "journal_async_commit"},
- {Opt_err_cont, "errors=continue"},
- {Opt_err, NULL}
+static const struct constant_table ocfs2_param_errors[] = {
+ {"panic", OCFS2_MOUNT_ERRORS_PANIC},
+ {"remount-ro", OCFS2_MOUNT_ERRORS_ROFS},
+ {"continue", OCFS2_MOUNT_ERRORS_CONT},
+ {}
+};
+
+static const struct constant_table ocfs2_param_heartbeat[] = {
+ {"local", OCFS2_MOUNT_HB_LOCAL},
+ {"none", OCFS2_MOUNT_HB_NONE},
+ {"global", OCFS2_MOUNT_HB_GLOBAL},
+ {}
+};
+
+static const struct constant_table ocfs2_param_data[] = {
+ {"writeback", OCFS2_MOUNT_DATA_WRITEBACK},
+ {"ordered", 0},
+ {}
+};
+
+static const struct constant_table ocfs2_param_coherency[] = {
+ {"buffered", OCFS2_MOUNT_COHERENCY_BUFFERED},
+ {"full", 0},
+ {}
+};
+
+static const struct fs_parameter_spec ocfs2_param_spec[] = {
+ fsparam_u32 ("barrier", Opt_barrier),
+ fsparam_enum ("errors", Opt_errors, ocfs2_param_errors),
+ fsparam_flag_no ("intr", Opt_intr),
+ fsparam_enum ("heartbeat", Opt_heartbeat, ocfs2_param_heartbeat),
+ fsparam_enum ("data", Opt_data, ocfs2_param_data),
+ fsparam_u32 ("atime_quantum", Opt_atime_quantum),
+ fsparam_u32 ("preferred_slot", Opt_slot),
+ fsparam_u32 ("commit", Opt_commit),
+ fsparam_s32 ("localalloc", Opt_localalloc),
+ fsparam_flag ("localflocks", Opt_localflocks),
+ fsparam_string ("cluster_stack", Opt_stack),
+ fsparam_flag_no ("user_xattr", Opt_user_xattr),
+ fsparam_flag ("inode64", Opt_inode64),
+ fsparam_flag_no ("acl", Opt_acl),
+ fsparam_flag ("usrquota", Opt_usrquota),
+ fsparam_flag ("grpquota", Opt_grpquota),
+ fsparam_enum ("coherency", Opt_coherency, ocfs2_param_coherency),
+ fsparam_u32 ("resv_level", Opt_resv_level),
+ fsparam_u32 ("dir_resv_level", Opt_dir_resv_level),
+ fsparam_flag ("journal_async_commit", Opt_journal_async_commit),
+ {}
};
#ifdef CONFIG_DEBUG_FS
@@ -600,32 +604,32 @@ static unsigned long long ocfs2_max_file_offset(unsigned int bbits,
return (((unsigned long long)bytes) << bitshift) - trim;
}
-static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
+static int ocfs2_reconfigure(struct fs_context *fc)
{
int incompat_features;
int ret = 0;
- struct mount_options parsed_options;
+ struct mount_options *parsed_options = fc->fs_private;
+ struct super_block *sb = fc->root->d_sb;
struct ocfs2_super *osb = OCFS2_SB(sb);
u32 tmp;
sync_filesystem(sb);
- if (!ocfs2_parse_options(sb, data, &parsed_options, 1) ||
- !ocfs2_check_set_options(sb, &parsed_options)) {
+ if (!ocfs2_check_set_options(sb, parsed_options)) {
ret = -EINVAL;
goto out;
}
tmp = OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL |
OCFS2_MOUNT_HB_NONE;
- if ((osb->s_mount_opt & tmp) != (parsed_options.mount_opt & tmp)) {
+ if ((osb->s_mount_opt & tmp) != (parsed_options->mount_opt & tmp)) {
ret = -EINVAL;
mlog(ML_ERROR, "Cannot change heartbeat mode on remount\n");
goto out;
}
if ((osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK) !=
- (parsed_options.mount_opt & OCFS2_MOUNT_DATA_WRITEBACK)) {
+ (parsed_options->mount_opt & OCFS2_MOUNT_DATA_WRITEBACK)) {
ret = -EINVAL;
mlog(ML_ERROR, "Cannot change data mode on remount\n");
goto out;
@@ -634,16 +638,16 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
/* Probably don't want this on remount; it might
* mess with other nodes */
if (!(osb->s_mount_opt & OCFS2_MOUNT_INODE64) &&
- (parsed_options.mount_opt & OCFS2_MOUNT_INODE64)) {
+ (parsed_options->mount_opt & OCFS2_MOUNT_INODE64)) {
ret = -EINVAL;
mlog(ML_ERROR, "Cannot enable inode64 on remount\n");
goto out;
}
/* We're going to/from readonly mode. */
- if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
+ if ((bool)(fc->sb_flags & SB_RDONLY) != sb_rdonly(sb)) {
/* Disable quota accounting before remounting RO */
- if (*flags & SB_RDONLY) {
+ if (fc->sb_flags & SB_RDONLY) {
ret = ocfs2_susp_quotas(osb, 0);
if (ret < 0)
goto out;
@@ -657,7 +661,7 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
goto unlock_osb;
}
- if (*flags & SB_RDONLY) {
+ if (fc->sb_flags & SB_RDONLY) {
sb->s_flags |= SB_RDONLY;
osb->osb_flags |= OCFS2_OSB_SOFT_RO;
} else {
@@ -678,11 +682,11 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
sb->s_flags &= ~SB_RDONLY;
osb->osb_flags &= ~OCFS2_OSB_SOFT_RO;
}
- trace_ocfs2_remount(sb->s_flags, osb->osb_flags, *flags);
+ trace_ocfs2_remount(sb->s_flags, osb->osb_flags, fc->sb_flags);
unlock_osb:
spin_unlock(&osb->osb_lock);
/* Enable quota accounting after remounting RW */
- if (!ret && !(*flags & SB_RDONLY)) {
+ if (!ret && !(fc->sb_flags & SB_RDONLY)) {
if (sb_any_quota_suspended(sb))
ret = ocfs2_susp_quotas(osb, 1);
else
@@ -701,11 +705,11 @@ unlock_osb:
if (!ret) {
/* Only save off the new mount options in case of a successful
* remount. */
- osb->s_mount_opt = parsed_options.mount_opt;
- osb->s_atime_quantum = parsed_options.atime_quantum;
- osb->preferred_slot = parsed_options.slot;
- if (parsed_options.commit_interval)
- osb->osb_commit_interval = parsed_options.commit_interval;
+ osb->s_mount_opt = parsed_options->mount_opt;
+ osb->s_atime_quantum = parsed_options->atime_quantum;
+ osb->preferred_slot = parsed_options->slot;
+ if (parsed_options->commit_interval)
+ osb->osb_commit_interval = parsed_options->commit_interval;
if (!ocfs2_is_hard_readonly(osb))
ocfs2_set_journal_params(osb);
@@ -966,23 +970,18 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb)
}
}
-static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
+static int ocfs2_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct dentry *root;
int status, sector_size;
- struct mount_options parsed_options;
+ struct mount_options *parsed_options = fc->fs_private;
struct inode *inode = NULL;
struct ocfs2_super *osb = NULL;
struct buffer_head *bh = NULL;
char nodestr[12];
struct ocfs2_blockcheck_stats stats;
- trace_ocfs2_fill_super(sb, data, silent);
-
- if (!ocfs2_parse_options(sb, data, &parsed_options, 0)) {
- status = -EINVAL;
- goto out;
- }
+ trace_ocfs2_fill_super(sb, fc, fc->sb_flags & SB_SILENT);
/* probe for superblock */
status = ocfs2_sb_probe(sb, &bh, &sector_size, &stats);
@@ -999,24 +998,24 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
osb = OCFS2_SB(sb);
- if (!ocfs2_check_set_options(sb, &parsed_options)) {
+ if (!ocfs2_check_set_options(sb, parsed_options)) {
status = -EINVAL;
goto out_super;
}
- osb->s_mount_opt = parsed_options.mount_opt;
- osb->s_atime_quantum = parsed_options.atime_quantum;
- osb->preferred_slot = parsed_options.slot;
- osb->osb_commit_interval = parsed_options.commit_interval;
+ osb->s_mount_opt = parsed_options->mount_opt;
+ osb->s_atime_quantum = parsed_options->atime_quantum;
+ osb->preferred_slot = parsed_options->slot;
+ osb->osb_commit_interval = parsed_options->commit_interval;
- ocfs2_la_set_sizes(osb, parsed_options.localalloc_opt);
- osb->osb_resv_level = parsed_options.resv_level;
- osb->osb_dir_resv_level = parsed_options.resv_level;
- if (parsed_options.dir_resv_level == -1)
- osb->osb_dir_resv_level = parsed_options.resv_level;
+ ocfs2_la_set_sizes(osb, parsed_options->localalloc_opt);
+ osb->osb_resv_level = parsed_options->resv_level;
+ osb->osb_dir_resv_level = parsed_options->resv_level;
+ if (parsed_options->dir_resv_level == -1)
+ osb->osb_dir_resv_level = parsed_options->resv_level;
else
- osb->osb_dir_resv_level = parsed_options.dir_resv_level;
+ osb->osb_dir_resv_level = parsed_options->dir_resv_level;
- status = ocfs2_verify_userspace_stack(osb, &parsed_options);
+ status = ocfs2_verify_userspace_stack(osb, parsed_options);
if (status)
goto out_super;
@@ -1180,27 +1179,72 @@ out:
return status;
}
-static struct dentry *ocfs2_mount(struct file_system_type *fs_type,
- int flags,
- const char *dev_name,
- void *data)
+static int ocfs2_get_tree(struct fs_context *fc)
+{
+ return get_tree_bdev(fc, ocfs2_fill_super);
+}
+
+static void ocfs2_free_fc(struct fs_context *fc)
{
- return mount_bdev(fs_type, flags, dev_name, data, ocfs2_fill_super);
+ kfree(fc->fs_private);
+}
+
+static const struct fs_context_operations ocfs2_context_ops = {
+ .parse_param = ocfs2_parse_param,
+ .get_tree = ocfs2_get_tree,
+ .reconfigure = ocfs2_reconfigure,
+ .free = ocfs2_free_fc,
+};
+
+static int ocfs2_init_fs_context(struct fs_context *fc)
+{
+ struct mount_options *mopt;
+
+ mopt = kzalloc(sizeof(struct mount_options), GFP_KERNEL);
+ if (!mopt)
+ return -EINVAL;
+
+ mopt->commit_interval = 0;
+ mopt->mount_opt = OCFS2_MOUNT_NOINTR;
+ mopt->atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM;
+ mopt->slot = OCFS2_INVALID_SLOT;
+ mopt->localalloc_opt = -1;
+ mopt->cluster_stack[0] = '\0';
+ mopt->resv_level = OCFS2_DEFAULT_RESV_LEVEL;
+ mopt->dir_resv_level = -1;
+
+ fc->fs_private = mopt;
+ fc->ops = &ocfs2_context_ops;
+
+ return 0;
}
static struct file_system_type ocfs2_fs_type = {
.owner = THIS_MODULE,
.name = "ocfs2",
- .mount = ocfs2_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV|FS_RENAME_DOES_D_MOVE,
- .next = NULL
+ .next = NULL,
+ .init_fs_context = ocfs2_init_fs_context,
+ .parameters = ocfs2_param_spec,
};
MODULE_ALIAS_FS("ocfs2");
static int ocfs2_check_set_options(struct super_block *sb,
struct mount_options *options)
{
+ if (options->user_stack == 0) {
+ u32 tmp;
+
+ /* Ensure only one heartbeat mode */
+ tmp = options->mount_opt & (OCFS2_MOUNT_HB_LOCAL |
+ OCFS2_MOUNT_HB_GLOBAL |
+ OCFS2_MOUNT_HB_NONE);
+ if (hweight32(tmp) != 1) {
+ mlog(ML_ERROR, "Invalid heartbeat mount options\n");
+ return 0;
+ }
+ }
if (options->mount_opt & OCFS2_MOUNT_USRQUOTA &&
!OCFS2_HAS_RO_COMPAT_FEATURE(sb,
OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
@@ -1232,241 +1276,142 @@ static int ocfs2_check_set_options(struct super_block *sb,
return 1;
}
-static int ocfs2_parse_options(struct super_block *sb,
- char *options,
- struct mount_options *mopt,
- int is_remount)
+static int ocfs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- int status, user_stack = 0;
- char *p;
- u32 tmp;
- int token, option;
- substring_t args[MAX_OPT_ARGS];
-
- trace_ocfs2_parse_options(is_remount, options ? options : "(none)");
-
- mopt->commit_interval = 0;
- mopt->mount_opt = OCFS2_MOUNT_NOINTR;
- mopt->atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM;
- mopt->slot = OCFS2_INVALID_SLOT;
- mopt->localalloc_opt = -1;
- mopt->cluster_stack[0] = '\0';
- mopt->resv_level = OCFS2_DEFAULT_RESV_LEVEL;
- mopt->dir_resv_level = -1;
-
- if (!options) {
- status = 1;
- goto bail;
- }
-
- while ((p = strsep(&options, ",")) != NULL) {
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_hb_local:
- mopt->mount_opt |= OCFS2_MOUNT_HB_LOCAL;
- break;
- case Opt_hb_none:
- mopt->mount_opt |= OCFS2_MOUNT_HB_NONE;
- break;
- case Opt_hb_global:
- mopt->mount_opt |= OCFS2_MOUNT_HB_GLOBAL;
- break;
- case Opt_barrier:
- if (match_int(&args[0], &option)) {
- status = 0;
- goto bail;
- }
- if (option)
- mopt->mount_opt |= OCFS2_MOUNT_BARRIER;
- else
- mopt->mount_opt &= ~OCFS2_MOUNT_BARRIER;
- break;
- case Opt_intr:
- mopt->mount_opt &= ~OCFS2_MOUNT_NOINTR;
- break;
- case Opt_nointr:
+ struct fs_parse_result result;
+ int opt;
+ struct mount_options *mopt = fc->fs_private;
+ bool is_remount = (fc->purpose & FS_CONTEXT_FOR_RECONFIGURE);
+
+ trace_ocfs2_parse_options(is_remount, param->key);
+
+ opt = fs_parse(fc, ocfs2_param_spec, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_heartbeat:
+ mopt->mount_opt |= result.uint_32;
+ break;
+ case Opt_barrier:
+ if (result.uint_32)
+ mopt->mount_opt |= OCFS2_MOUNT_BARRIER;
+ else
+ mopt->mount_opt &= ~OCFS2_MOUNT_BARRIER;
+ break;
+ case Opt_intr:
+ if (result.negated)
mopt->mount_opt |= OCFS2_MOUNT_NOINTR;
- break;
- case Opt_err_panic:
- mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_CONT;
- mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_ROFS;
- mopt->mount_opt |= OCFS2_MOUNT_ERRORS_PANIC;
- break;
- case Opt_err_ro:
- mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_CONT;
- mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_PANIC;
- mopt->mount_opt |= OCFS2_MOUNT_ERRORS_ROFS;
- break;
- case Opt_err_cont:
- mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_ROFS;
- mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_PANIC;
- mopt->mount_opt |= OCFS2_MOUNT_ERRORS_CONT;
- break;
- case Opt_data_ordered:
- mopt->mount_opt &= ~OCFS2_MOUNT_DATA_WRITEBACK;
- break;
- case Opt_data_writeback:
- mopt->mount_opt |= OCFS2_MOUNT_DATA_WRITEBACK;
- break;
- case Opt_user_xattr:
- mopt->mount_opt &= ~OCFS2_MOUNT_NOUSERXATTR;
- break;
- case Opt_nouser_xattr:
+ else
+ mopt->mount_opt &= ~OCFS2_MOUNT_NOINTR;
+ break;
+ case Opt_errors:
+ mopt->mount_opt &= ~(OCFS2_MOUNT_ERRORS_CONT |
+ OCFS2_MOUNT_ERRORS_ROFS |
+ OCFS2_MOUNT_ERRORS_PANIC);
+ mopt->mount_opt |= result.uint_32;
+ break;
+ case Opt_data:
+ mopt->mount_opt &= ~OCFS2_MOUNT_DATA_WRITEBACK;
+ mopt->mount_opt |= result.uint_32;
+ break;
+ case Opt_user_xattr:
+ if (result.negated)
mopt->mount_opt |= OCFS2_MOUNT_NOUSERXATTR;
- break;
- case Opt_atime_quantum:
- if (match_int(&args[0], &option)) {
- status = 0;
- goto bail;
- }
- if (option >= 0)
- mopt->atime_quantum = option;
- break;
- case Opt_slot:
- if (match_int(&args[0], &option)) {
- status = 0;
- goto bail;
- }
- if (option)
- mopt->slot = (u16)option;
- break;
- case Opt_commit:
- if (match_int(&args[0], &option)) {
- status = 0;
- goto bail;
- }
- if (option < 0)
- return 0;
- if (option == 0)
- option = JBD2_DEFAULT_MAX_COMMIT_AGE;
- mopt->commit_interval = HZ * option;
- break;
- case Opt_localalloc:
- if (match_int(&args[0], &option)) {
- status = 0;
- goto bail;
- }
- if (option >= 0)
- mopt->localalloc_opt = option;
- break;
- case Opt_localflocks:
- /*
- * Changing this during remount could race
- * flock() requests, or "unbalance" existing
- * ones (e.g., a lock is taken in one mode but
- * dropped in the other). If users care enough
- * to flip locking modes during remount, we
- * could add a "local" flag to individual
- * flock structures for proper tracking of
- * state.
- */
- if (!is_remount)
- mopt->mount_opt |= OCFS2_MOUNT_LOCALFLOCKS;
- break;
- case Opt_stack:
- /* Check both that the option we were passed
- * is of the right length and that it is a proper
- * string of the right length.
- */
- if (((args[0].to - args[0].from) !=
- OCFS2_STACK_LABEL_LEN) ||
- (strnlen(args[0].from,
- OCFS2_STACK_LABEL_LEN) !=
- OCFS2_STACK_LABEL_LEN)) {
- mlog(ML_ERROR,
- "Invalid cluster_stack option\n");
- status = 0;
- goto bail;
- }
- memcpy(mopt->cluster_stack, args[0].from,
- OCFS2_STACK_LABEL_LEN);
- mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0';
- /*
- * Open code the memcmp here as we don't have
- * an osb to pass to
- * ocfs2_userspace_stack().
- */
- if (memcmp(mopt->cluster_stack,
- OCFS2_CLASSIC_CLUSTER_STACK,
- OCFS2_STACK_LABEL_LEN))
- user_stack = 1;
- break;
- case Opt_inode64:
- mopt->mount_opt |= OCFS2_MOUNT_INODE64;
- break;
- case Opt_usrquota:
- mopt->mount_opt |= OCFS2_MOUNT_USRQUOTA;
- break;
- case Opt_grpquota:
- mopt->mount_opt |= OCFS2_MOUNT_GRPQUOTA;
- break;
- case Opt_coherency_buffered:
- mopt->mount_opt |= OCFS2_MOUNT_COHERENCY_BUFFERED;
- break;
- case Opt_coherency_full:
- mopt->mount_opt &= ~OCFS2_MOUNT_COHERENCY_BUFFERED;
- break;
- case Opt_acl:
- mopt->mount_opt |= OCFS2_MOUNT_POSIX_ACL;
- mopt->mount_opt &= ~OCFS2_MOUNT_NO_POSIX_ACL;
- break;
- case Opt_noacl:
+ else
+ mopt->mount_opt &= ~OCFS2_MOUNT_NOUSERXATTR;
+ break;
+ case Opt_atime_quantum:
+ mopt->atime_quantum = result.uint_32;
+ break;
+ case Opt_slot:
+ if (result.uint_32)
+ mopt->slot = (u16)result.uint_32;
+ break;
+ case Opt_commit:
+ if (result.uint_32 == 0)
+ mopt->commit_interval = HZ * JBD2_DEFAULT_MAX_COMMIT_AGE;
+ else
+ mopt->commit_interval = HZ * result.uint_32;
+ break;
+ case Opt_localalloc:
+ if (result.int_32 >= 0)
+ mopt->localalloc_opt = result.int_32;
+ break;
+ case Opt_localflocks:
+ /*
+ * Changing this during remount could race flock() requests, or
+ * "unbalance" existing ones (e.g., a lock is taken in one mode
+ * but dropped in the other). If users care enough to flip
+ * locking modes during remount, we could add a "local" flag to
+ * individual flock structures for proper tracking of state.
+ */
+ if (!is_remount)
+ mopt->mount_opt |= OCFS2_MOUNT_LOCALFLOCKS;
+ break;
+ case Opt_stack:
+ /* Check both that the option we were passed is of the right
+ * length and that it is a proper string of the right length.
+ */
+ if (strlen(param->string) != OCFS2_STACK_LABEL_LEN) {
+ mlog(ML_ERROR, "Invalid cluster_stack option\n");
+ return -EINVAL;
+ }
+ memcpy(mopt->cluster_stack, param->string, OCFS2_STACK_LABEL_LEN);
+ mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0';
+ /*
+ * Open code the memcmp here as we don't have an osb to pass
+ * to ocfs2_userspace_stack().
+ */
+ if (memcmp(mopt->cluster_stack,
+ OCFS2_CLASSIC_CLUSTER_STACK,
+ OCFS2_STACK_LABEL_LEN))
+ mopt->user_stack = 1;
+ break;
+ case Opt_inode64:
+ mopt->mount_opt |= OCFS2_MOUNT_INODE64;
+ break;
+ case Opt_usrquota:
+ mopt->mount_opt |= OCFS2_MOUNT_USRQUOTA;
+ break;
+ case Opt_grpquota:
+ mopt->mount_opt |= OCFS2_MOUNT_GRPQUOTA;
+ break;
+ case Opt_coherency:
+ mopt->mount_opt &= ~OCFS2_MOUNT_COHERENCY_BUFFERED;
+ mopt->mount_opt |= result.uint_32;
+ break;
+ case Opt_acl:
+ if (result.negated) {
mopt->mount_opt |= OCFS2_MOUNT_NO_POSIX_ACL;
mopt->mount_opt &= ~OCFS2_MOUNT_POSIX_ACL;
+ } else {
+ mopt->mount_opt |= OCFS2_MOUNT_POSIX_ACL;
+ mopt->mount_opt &= ~OCFS2_MOUNT_NO_POSIX_ACL;
+ }
+ break;
+ case Opt_resv_level:
+ if (is_remount)
break;
- case Opt_resv_level:
- if (is_remount)
- break;
- if (match_int(&args[0], &option)) {
- status = 0;
- goto bail;
- }
- if (option >= OCFS2_MIN_RESV_LEVEL &&
- option < OCFS2_MAX_RESV_LEVEL)
- mopt->resv_level = option;
- break;
- case Opt_dir_resv_level:
- if (is_remount)
- break;
- if (match_int(&args[0], &option)) {
- status = 0;
- goto bail;
- }
- if (option >= OCFS2_MIN_RESV_LEVEL &&
- option < OCFS2_MAX_RESV_LEVEL)
- mopt->dir_resv_level = option;
- break;
- case Opt_journal_async_commit:
- mopt->mount_opt |= OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT;
+ if (result.uint_32 >= OCFS2_MIN_RESV_LEVEL &&
+ result.uint_32 < OCFS2_MAX_RESV_LEVEL)
+ mopt->resv_level = result.uint_32;
+ break;
+ case Opt_dir_resv_level:
+ if (is_remount)
break;
- default:
- mlog(ML_ERROR,
- "Unrecognized mount option \"%s\" "
- "or missing value\n", p);
- status = 0;
- goto bail;
- }
- }
-
- if (user_stack == 0) {
- /* Ensure only one heartbeat mode */
- tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL |
- OCFS2_MOUNT_HB_GLOBAL |
- OCFS2_MOUNT_HB_NONE);
- if (hweight32(tmp) != 1) {
- mlog(ML_ERROR, "Invalid heartbeat mount options\n");
- status = 0;
- goto bail;
- }
+ if (result.uint_32 >= OCFS2_MIN_RESV_LEVEL &&
+ result.uint_32 < OCFS2_MAX_RESV_LEVEL)
+ mopt->dir_resv_level = result.uint_32;
+ break;
+ case Opt_journal_async_commit:
+ mopt->mount_opt |= OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT;
+ break;
+ default:
+ return -EINVAL;
}
- status = 1;
-
-bail:
- return status;
+ return 0;
}
static int ocfs2_show_options(struct seq_file *s, struct dentry *root)
@@ -1858,7 +1803,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
osb = OCFS2_SB(sb);
BUG_ON(!osb);
- /* Remove file check sysfs related directores/files,
+ /* Remove file check sysfs related directories/files,
* and wait for the pending file check operations */
ocfs2_filecheck_remove_sysfs(osb);
@@ -2340,7 +2285,7 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di,
mlog(ML_ERROR, "found superblock with incorrect block "
"size bits: found %u, should be 9, 10, 11, or 12\n",
blksz_bits);
- } else if ((1 << le32_to_cpu(blksz_bits)) != blksz) {
+ } else if ((1 << blksz_bits) != blksz) {
mlog(ML_ERROR, "found superblock with incorrect block "
"size: found %u, should be %u\n", 1 << blksz_bits, blksz);
} else if (le16_to_cpu(di->id2.i_super.s_major_rev_level) !=
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
index d4c5fdcfa1e4..ad8be3300b49 100644
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -54,31 +54,27 @@
static int ocfs2_fast_symlink_read_folio(struct file *f, struct folio *folio)
{
- struct page *page = &folio->page;
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct buffer_head *bh = NULL;
int status = ocfs2_read_inode_block(inode, &bh);
struct ocfs2_dinode *fe;
const char *link;
- void *kaddr;
size_t len;
if (status < 0) {
mlog_errno(status);
- return status;
+ goto out;
}
fe = (struct ocfs2_dinode *) bh->b_data;
link = (char *) fe->id2.i_symlink;
/* will be less than a page size */
len = strnlen(link, ocfs2_fast_symlink_chars(inode->i_sb));
- kaddr = kmap_atomic(page);
- memcpy(kaddr, link, len + 1);
- kunmap_atomic(kaddr);
- SetPageUptodate(page);
- unlock_page(page);
+ memcpy_to_folio(folio, 0, link, len + 1);
+out:
+ folio_end_read(folio, status == 0);
brelse(bh);
- return 0;
+ return status;
}
const struct address_space_operations ocfs2_fast_symlink_aops = {
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 73a6f6fd8a8e..d70a20d29e3e 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -648,7 +648,7 @@ int ocfs2_calc_xattr_init(struct inode *dir,
* 256(name) + 80(value) + 16(entry) = 352 bytes,
* The max space of acl xattr taken inline is
* 80(value) + 16(entry) * 2(if directory) = 192 bytes,
- * when blocksize = 512, may reserve one more cluser for
+ * when blocksize = 512, may reserve one more cluster for
* xattr bucket, otherwise reserve one metadata block
* for them is ok.
* If this is a new directory with inline data,
@@ -4371,7 +4371,7 @@ static int cmp_xe_offset(const void *a, const void *b)
/*
* defrag a xattr bucket if we find that the bucket has some
- * holes beteen name/value pairs.
+ * holes between name/value pairs.
* We will move all the name/value pairs to the end of the bucket
* so that we can spare some space for insertion.
*/
@@ -5011,7 +5011,7 @@ static int ocfs2_divide_xattr_cluster(struct inode *inode,
* 2. If cluster_size == bucket_size:
* a) If the previous extent rec has more than one cluster and the insert
* place isn't in the last cluster, copy the entire last cluster to the
- * new one. This time, we don't need to upate the first_bh and header_bh
+ * new one. This time, we don't need to update the first_bh and header_bh
* since they will not be moved into the new cluster.
* b) Otherwise, move the bottom half of the xattrs in the last cluster into
* the new one. And we set the extend flag to zero if the insert place is
@@ -6189,7 +6189,7 @@ struct ocfs2_xattr_reflink {
/*
* Given a xattr header and xe offset,
* return the proper xv and the corresponding bh.
- * xattr in inode, block and xattr tree have different implementaions.
+ * xattr in inode, block and xattr tree have different implementations.
*/
typedef int (get_xattr_value_root)(struct super_block *sb,
struct buffer_head *bh,
@@ -6269,7 +6269,7 @@ static int ocfs2_get_xattr_value_root(struct super_block *sb,
}
/*
- * Lock the meta_ac and caculate how much credits we need for reflink xattrs.
+ * Lock the meta_ac and calculate how much credits we need for reflink xattrs.
* It is only used for inline xattr and xattr block.
*/
static int ocfs2_reflink_lock_xattr_allocators(struct ocfs2_super *osb,
diff --git a/fs/open.c b/fs/open.c
index ffcfef67ac86..1be20de9f283 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -81,14 +81,18 @@ long vfs_truncate(const struct path *path, loff_t length)
if (!S_ISREG(inode->i_mode))
return -EINVAL;
- error = mnt_want_write(path->mnt);
- if (error)
- goto out;
-
idmap = mnt_idmap(path->mnt);
error = inode_permission(idmap, inode, MAY_WRITE);
if (error)
- goto mnt_drop_write_and_out;
+ return error;
+
+ error = fsnotify_truncate_perm(path, length);
+ if (error)
+ return error;
+
+ error = mnt_want_write(path->mnt);
+ if (error)
+ return error;
error = -EPERM;
if (IS_APPEND(inode))
@@ -114,7 +118,7 @@ put_write_and_out:
put_write_access(inode);
mnt_drop_write_and_out:
mnt_drop_write(path->mnt);
-out:
+
return error;
}
EXPORT_SYMBOL_GPL(vfs_truncate);
@@ -175,11 +179,18 @@ long do_ftruncate(struct file *file, loff_t length, int small)
/* Check IS_APPEND on real upper inode */
if (IS_APPEND(file_inode(file)))
return -EPERM;
- sb_start_write(inode->i_sb);
+
error = security_file_truncate(file);
- if (!error)
- error = do_truncate(file_mnt_idmap(file), dentry, length,
- ATTR_MTIME | ATTR_CTIME, file);
+ if (error)
+ return error;
+
+ error = fsnotify_truncate_perm(&file->f_path, length);
+ if (error)
+ return error;
+
+ sb_start_write(inode->i_sb);
+ error = do_truncate(file_mnt_idmap(file), dentry, length,
+ ATTR_MTIME | ATTR_CTIME, file);
sb_end_write(inode->i_sb);
return error;
@@ -895,6 +906,7 @@ static int do_dentry_open(struct file *f,
if (unlikely(f->f_flags & O_PATH)) {
f->f_mode = FMODE_PATH | FMODE_OPENED;
+ file_set_fsnotify_mode(f, FMODE_NONOTIFY);
f->f_op = &empty_fops;
return 0;
}
@@ -922,6 +934,12 @@ static int do_dentry_open(struct file *f,
if (error)
goto cleanup_all;
+ /*
+ * Set FMODE_NONOTIFY_* bits according to existing permission watches.
+ * If FMODE_NONOTIFY mode was already set for an fanotify fd or for a
+ * pseudo file, this call will not change the mode.
+ */
+ file_set_fsnotify_mode_from_watchers(f);
error = fsnotify_open_perm(f);
if (error)
goto cleanup_all;
@@ -1098,6 +1116,23 @@ struct file *dentry_open(const struct path *path, int flags,
}
EXPORT_SYMBOL(dentry_open);
+struct file *dentry_open_nonotify(const struct path *path, int flags,
+ const struct cred *cred)
+{
+ struct file *f = alloc_empty_file(flags, cred);
+ if (!IS_ERR(f)) {
+ int error;
+
+ file_set_fsnotify_mode(f, FMODE_NONOTIFY);
+ error = vfs_open(path, f);
+ if (error) {
+ fput(f);
+ f = ERR_PTR(error);
+ }
+ }
+ return f;
+}
+
/**
* dentry_create - Create and open a file
* @path: path to create
@@ -1195,7 +1230,7 @@ inline struct open_how build_open_how(int flags, umode_t mode)
inline int build_open_flags(const struct open_how *how, struct open_flags *op)
{
u64 flags = how->flags;
- u64 strip = __FMODE_NONOTIFY | O_CLOEXEC;
+ u64 strip = O_CLOEXEC;
int lookup_flags = 0;
int acc_mode = ACC_MODE(flags);
@@ -1203,9 +1238,7 @@ inline int build_open_flags(const struct open_how *how, struct open_flags *op)
"struct open_flags doesn't yet handle flags > 32 bits");
/*
- * Strip flags that either shouldn't be set by userspace like
- * FMODE_NONOTIFY or that aren't relevant in determining struct
- * open_flags like O_CLOEXEC.
+ * Strip flags that aren't relevant in determining struct open_flags.
*/
flags &= ~strip;
@@ -1497,7 +1530,7 @@ static int filp_flush(struct file *filp, fl_owner_t id)
{
int retval = 0;
- if (CHECK_DATA_CORRUPTION(file_count(filp) == 0,
+ if (CHECK_DATA_CORRUPTION(file_count(filp) == 0, filp,
"VFS: Close: file count is 0 (f_op=%ps)",
filp->f_op)) {
return 0;
diff --git a/fs/orangefs/dcache.c b/fs/orangefs/dcache.c
index 395a00ed8ac7..a19d1ad705db 100644
--- a/fs/orangefs/dcache.c
+++ b/fs/orangefs/dcache.c
@@ -13,10 +13,9 @@
#include "orangefs-kernel.h"
/* Returns 1 if dentry can still be trusted, else 0. */
-static int orangefs_revalidate_lookup(struct dentry *dentry)
+static int orangefs_revalidate_lookup(struct inode *parent_inode, const struct qstr *name,
+ struct dentry *dentry)
{
- struct dentry *parent_dentry = dget_parent(dentry);
- struct inode *parent_inode = parent_dentry->d_inode;
struct orangefs_inode_s *parent = ORANGEFS_I(parent_inode);
struct inode *inode = dentry->d_inode;
struct orangefs_kernel_op_s *new_op;
@@ -26,14 +25,14 @@ static int orangefs_revalidate_lookup(struct dentry *dentry)
gossip_debug(GOSSIP_DCACHE_DEBUG, "%s: attempting lookup.\n", __func__);
new_op = op_alloc(ORANGEFS_VFS_OP_LOOKUP);
- if (!new_op) {
- ret = -ENOMEM;
- goto out_put_parent;
- }
+ if (!new_op)
+ return -ENOMEM;
new_op->upcall.req.lookup.sym_follow = ORANGEFS_LOOKUP_LINK_NO_FOLLOW;
new_op->upcall.req.lookup.parent_refn = parent->refn;
- strscpy(new_op->upcall.req.lookup.d_name, dentry->d_name.name);
+ /* op_alloc() leaves ->upcall zeroed */
+ memcpy(new_op->upcall.req.lookup.d_name, name->name,
+ min(name->len, ORANGEFS_NAME_MAX - 1));
gossip_debug(GOSSIP_DCACHE_DEBUG,
"%s:%s:%d interrupt flag [%d]\n",
@@ -78,8 +77,6 @@ static int orangefs_revalidate_lookup(struct dentry *dentry)
ret = 1;
out_release_op:
op_release(new_op);
-out_put_parent:
- dput(parent_dentry);
return ret;
out_drop:
gossip_debug(GOSSIP_DCACHE_DEBUG, "%s:%s:%d revalidate failed\n",
@@ -92,7 +89,8 @@ out_drop:
*
* Should return 1 if dentry can still be trusted, else 0.
*/
-static int orangefs_d_revalidate(struct dentry *dentry, unsigned int flags)
+static int orangefs_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
int ret;
unsigned long time = (unsigned long) dentry->d_fsdata;
@@ -114,7 +112,7 @@ static int orangefs_d_revalidate(struct dentry *dentry, unsigned int flags)
* If this passes, the positive dentry still exists or the negative
* dentry still does not exist.
*/
- if (!orangefs_revalidate_lookup(dentry))
+ if (!orangefs_revalidate_lookup(dir, name, dentry))
return 0;
/* We do not need to continue with negative dentries. */
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
index 1b508f543384..f52073022fae 100644
--- a/fs/orangefs/orangefs-debugfs.c
+++ b/fs/orangefs/orangefs-debugfs.c
@@ -206,8 +206,8 @@ static void orangefs_kernel_debug_init(void)
pr_info("%s: overflow 1!\n", __func__);
}
- debugfs_create_file(ORANGEFS_KMOD_DEBUG_FILE, 0444, debug_dir, k_buffer,
- &kernel_debug_fops);
+ debugfs_create_file_aux_num(ORANGEFS_KMOD_DEBUG_FILE, 0444, debug_dir, k_buffer,
+ 0, &kernel_debug_fops);
}
@@ -306,11 +306,10 @@ static void orangefs_client_debug_init(void)
pr_info("%s: overflow! 2\n", __func__);
}
- client_debug_dentry = debugfs_create_file(ORANGEFS_CLIENT_DEBUG_FILE,
- 0444,
- debug_dir,
- c_buffer,
- &kernel_debug_fops);
+ client_debug_dentry = debugfs_create_file_aux_num(
+ ORANGEFS_CLIENT_DEBUG_FILE,
+ 0444, debug_dir, c_buffer, 1,
+ &kernel_debug_fops);
}
/* open ORANGEFS_KMOD_DEBUG_FILE or ORANGEFS_CLIENT_DEBUG_FILE.*/
@@ -393,9 +392,9 @@ static ssize_t orangefs_debug_write(struct file *file,
* Thwart users who try to jamb a ridiculous number
* of bytes into the debug file...
*/
- if (count > ORANGEFS_MAX_DEBUG_STRING_LEN + 1) {
+ if (count > ORANGEFS_MAX_DEBUG_STRING_LEN) {
silly = count;
- count = ORANGEFS_MAX_DEBUG_STRING_LEN + 1;
+ count = ORANGEFS_MAX_DEBUG_STRING_LEN;
}
buf = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL);
@@ -418,8 +417,7 @@ static ssize_t orangefs_debug_write(struct file *file,
* A service operation is required to set a new client-side
* debug mask.
*/
- if (!strcmp(file->f_path.dentry->d_name.name,
- ORANGEFS_KMOD_DEBUG_FILE)) {
+ if (!debugfs_get_aux_num(file)) { // kernel-debug
debug_string_to_mask(buf, &orangefs_gossip_debug_mask, 0);
debug_mask_to_string(&orangefs_gossip_debug_mask, 0);
debug_string = kernel_debug_string;
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index cea820cb3b55..be5c65d6f848 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -14,8 +14,6 @@
#include <linux/exportfs.h>
#include "overlayfs.h"
-#include "../internal.h" /* for vfs_path_lookup */
-
struct ovl_lookup_data {
struct super_block *sb;
const struct ovl_layer *layer;
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index fe511192f83c..86ae6f6da36b 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -91,7 +91,24 @@ static int ovl_revalidate_real(struct dentry *d, unsigned int flags, bool weak)
if (d->d_flags & DCACHE_OP_WEAK_REVALIDATE)
ret = d->d_op->d_weak_revalidate(d, flags);
} else if (d->d_flags & DCACHE_OP_REVALIDATE) {
- ret = d->d_op->d_revalidate(d, flags);
+ struct dentry *parent;
+ struct inode *dir;
+ struct name_snapshot n;
+
+ if (flags & LOOKUP_RCU) {
+ parent = READ_ONCE(d->d_parent);
+ dir = d_inode_rcu(parent);
+ if (!dir)
+ return -ECHILD;
+ } else {
+ parent = dget_parent(d);
+ dir = d_inode(parent);
+ }
+ take_dentry_name_snapshot(&n, d);
+ ret = d->d_op->d_revalidate(dir, &n.name, d, flags);
+ release_dentry_name_snapshot(&n);
+ if (!(flags & LOOKUP_RCU))
+ dput(parent);
if (!ret) {
if (!(flags & LOOKUP_RCU))
d_invalidate(d);
@@ -127,7 +144,8 @@ static int ovl_dentry_revalidate_common(struct dentry *dentry,
return ret;
}
-static int ovl_dentry_revalidate(struct dentry *dentry, unsigned int flags)
+static int ovl_dentry_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
return ovl_dentry_revalidate_common(dentry, flags, false);
}
diff --git a/fs/pidfs.c b/fs/pidfs.c
index 049352f973de..63f9699ebac3 100644
--- a/fs/pidfs.c
+++ b/fs/pidfs.c
@@ -287,7 +287,6 @@ static bool pidfs_ioctl_valid(unsigned int cmd)
switch (cmd) {
case FS_IOC_GETVERSION:
case PIDFD_GET_CGROUP_NAMESPACE:
- case PIDFD_GET_INFO:
case PIDFD_GET_IPC_NAMESPACE:
case PIDFD_GET_MNT_NAMESPACE:
case PIDFD_GET_NET_NAMESPACE:
@@ -300,6 +299,17 @@ static bool pidfs_ioctl_valid(unsigned int cmd)
return true;
}
+ /* Extensible ioctls require some more careful checks. */
+ switch (_IOC_NR(cmd)) {
+ case _IOC_NR(PIDFD_GET_INFO):
+ /*
+ * Try to prevent performing a pidfd ioctl when someone
+ * erronously mistook the file descriptor for a pidfd.
+ * This is not perfect but will catch most cases.
+ */
+ return (_IOC_TYPE(cmd) == _IOC_TYPE(PIDFD_GET_INFO));
+ }
+
return false;
}
diff --git a/fs/pipe.c b/fs/pipe.c
index 82fede0f2111..ce1af7592780 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -960,6 +960,12 @@ int create_pipe_files(struct file **res, int flags)
res[1] = f;
stream_open(inode, res[0]);
stream_open(inode, res[1]);
+ /*
+ * Disable permission and pre-content events, but enable legacy
+ * inotify events for legacy users.
+ */
+ file_set_fsnotify_mode(res[0], FMODE_NONOTIFY_PERM);
+ file_set_fsnotify_mode(res[1], FMODE_NONOTIFY_PERM);
return 0;
}
@@ -1478,7 +1484,7 @@ static int proc_dopipe_max_size(const struct ctl_table *table, int write,
do_proc_dopipe_max_size_conv, NULL);
}
-static struct ctl_table fs_pipe_sysctls[] = {
+static const struct ctl_table fs_pipe_sysctls[] = {
{
.procname = "pipe-max-size",
.data = &pipe_max_size,
diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
index d80a1431ef7b..6ae966c561e7 100644
--- a/fs/proc/Kconfig
+++ b/fs/proc/Kconfig
@@ -61,6 +61,25 @@ config PROC_VMCORE_DEVICE_DUMP
as ELF notes to /proc/vmcore. You can still disable device
dump using the kernel command line option 'novmcoredd'.
+config NEED_PROC_VMCORE_DEVICE_RAM
+ bool
+
+config PROC_VMCORE_DEVICE_RAM
+ def_bool y
+ depends on PROC_VMCORE && NEED_PROC_VMCORE_DEVICE_RAM
+ depends on VIRTIO_MEM
+ help
+ If the elfcore hdr is allocated and prepared by the dump kernel
+ ("2nd kernel") instead of the crashed kernel, RAM provided by memory
+ devices such as virtio-mem will not be included in the dump
+ image, because only the device driver can properly detect them.
+
+ With this config enabled, these RAM ranges will be queried from the
+ device drivers once the device gets probed, so they can be included
+ in the crash dump.
+
+ Relevant architectures should select NEED_PROC_VMCORE_DEVICE_RAM.
+
config PROC_SYSCTL
bool "Sysctl support (/proc/sys)" if EXPERT
depends on PROC_FS
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 0edf14a9840e..cd89e956c322 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2058,7 +2058,8 @@ void pid_update_inode(struct task_struct *task, struct inode *inode)
* performed a setuid(), etc.
*
*/
-static int pid_revalidate(struct dentry *dentry, unsigned int flags)
+static int pid_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
struct inode *inode;
struct task_struct *task;
@@ -2191,7 +2192,8 @@ static int dname_to_vma_addr(struct dentry *dentry,
return 0;
}
-static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags)
+static int map_files_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
unsigned long vm_start, vm_end;
bool exact_vma_exists = false;
@@ -3269,6 +3271,7 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
struct mm_struct *mm;
+ int ret = 0;
mm = get_task_mm(task);
if (mm) {
@@ -3276,6 +3279,16 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns,
seq_printf(m, "ksm_zero_pages %ld\n", mm_ksm_zero_pages(mm));
seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages);
seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm));
+ seq_printf(m, "ksm_merge_any: %s\n",
+ test_bit(MMF_VM_MERGE_ANY, &mm->flags) ? "yes" : "no");
+ ret = mmap_read_lock_killable(mm);
+ if (ret) {
+ mmput(mm);
+ return ret;
+ }
+ seq_printf(m, "ksm_mergeable: %s\n",
+ ksm_process_mergeable(mm) ? "yes" : "no");
+ mmap_read_unlock(mm);
mmput(mm);
}
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index 24baf23e864f..37aa778d1af7 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -140,7 +140,8 @@ static void tid_fd_update_inode(struct task_struct *task, struct inode *inode,
security_task_to_inode(task, inode);
}
-static int tid_fd_revalidate(struct dentry *dentry, unsigned int flags)
+static int tid_fd_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
struct task_struct *task;
struct inode *inode;
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index dbe82cf23ee4..8ec90826a49e 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -216,7 +216,8 @@ void proc_free_inum(unsigned int inum)
ida_free(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST);
}
-static int proc_misc_d_revalidate(struct dentry *dentry, unsigned int flags)
+static int proc_misc_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
if (flags & LOOKUP_RCU)
return -ECHILD;
@@ -343,7 +344,8 @@ static const struct file_operations proc_dir_operations = {
.iterate_shared = proc_readdir,
};
-static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags)
+static int proc_net_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
return 0;
}
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 27a283d85a6e..cc9d74a06ff0 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -884,7 +884,8 @@ static const struct inode_operations proc_sys_dir_operations = {
.getattr = proc_sys_getattr,
};
-static int proc_sys_revalidate(struct dentry *dentry, unsigned int flags)
+static int proc_sys_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
if (flags & LOOKUP_RCU)
return -ECHILD;
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 658bf199d424..a00120a3c099 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -8,6 +8,8 @@
*
*/
+#define pr_fmt(fmt) "vmcore: " fmt
+
#include <linux/mm.h>
#include <linux/kcore.h>
#include <linux/user.h>
@@ -51,9 +53,14 @@ static u64 vmcore_size;
static struct proc_dir_entry *proc_vmcore;
#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
+struct vmcoredd_node {
+ struct list_head list; /* List of dumps */
+ void *buf; /* Buffer containing device's dump */
+ unsigned int size; /* Size of the buffer */
+};
+
/* Device Dump list and mutex to synchronize access to list */
static LIST_HEAD(vmcoredd_list);
-static DEFINE_MUTEX(vmcoredd_mutex);
static bool vmcoredd_disabled;
core_param(novmcoredd, vmcoredd_disabled, bool, 0);
@@ -62,17 +69,22 @@ core_param(novmcoredd, vmcoredd_disabled, bool, 0);
/* Device Dump Size */
static size_t vmcoredd_orig_sz;
-static DEFINE_SPINLOCK(vmcore_cb_lock);
+static DEFINE_MUTEX(vmcore_mutex);
+
DEFINE_STATIC_SRCU(vmcore_cb_srcu);
/* List of registered vmcore callbacks. */
static LIST_HEAD(vmcore_cb_list);
/* Whether the vmcore has been opened once. */
static bool vmcore_opened;
+/* Whether the vmcore is currently open. */
+static unsigned int vmcore_open;
+
+static void vmcore_process_device_ram(struct vmcore_cb *cb);
void register_vmcore_cb(struct vmcore_cb *cb)
{
INIT_LIST_HEAD(&cb->next);
- spin_lock(&vmcore_cb_lock);
+ mutex_lock(&vmcore_mutex);
list_add_tail(&cb->next, &vmcore_cb_list);
/*
* Registering a vmcore callback after the vmcore was opened is
@@ -80,13 +92,15 @@ void register_vmcore_cb(struct vmcore_cb *cb)
*/
if (vmcore_opened)
pr_warn_once("Unexpected vmcore callback registration\n");
- spin_unlock(&vmcore_cb_lock);
+ if (!vmcore_open && cb->get_device_ram)
+ vmcore_process_device_ram(cb);
+ mutex_unlock(&vmcore_mutex);
}
EXPORT_SYMBOL_GPL(register_vmcore_cb);
void unregister_vmcore_cb(struct vmcore_cb *cb)
{
- spin_lock(&vmcore_cb_lock);
+ mutex_lock(&vmcore_mutex);
list_del_rcu(&cb->next);
/*
* Unregistering a vmcore callback after the vmcore was opened is
@@ -95,7 +109,7 @@ void unregister_vmcore_cb(struct vmcore_cb *cb)
*/
if (vmcore_opened)
pr_warn_once("Unexpected vmcore callback unregistration\n");
- spin_unlock(&vmcore_cb_lock);
+ mutex_unlock(&vmcore_mutex);
synchronize_srcu(&vmcore_cb_srcu);
}
@@ -120,9 +134,23 @@ static bool pfn_is_ram(unsigned long pfn)
static int open_vmcore(struct inode *inode, struct file *file)
{
- spin_lock(&vmcore_cb_lock);
+ mutex_lock(&vmcore_mutex);
vmcore_opened = true;
- spin_unlock(&vmcore_cb_lock);
+ if (vmcore_open + 1 == 0) {
+ mutex_unlock(&vmcore_mutex);
+ return -EBUSY;
+ }
+ vmcore_open++;
+ mutex_unlock(&vmcore_mutex);
+
+ return 0;
+}
+
+static int release_vmcore(struct inode *inode, struct file *file)
+{
+ mutex_lock(&vmcore_mutex);
+ vmcore_open--;
+ mutex_unlock(&vmcore_mutex);
return 0;
}
@@ -243,33 +271,27 @@ static int vmcoredd_copy_dumps(struct iov_iter *iter, u64 start, size_t size)
{
struct vmcoredd_node *dump;
u64 offset = 0;
- int ret = 0;
size_t tsz;
char *buf;
- mutex_lock(&vmcoredd_mutex);
list_for_each_entry(dump, &vmcoredd_list, list) {
if (start < offset + dump->size) {
tsz = min(offset + (u64)dump->size - start, (u64)size);
buf = dump->buf + start - offset;
- if (copy_to_iter(buf, tsz, iter) < tsz) {
- ret = -EFAULT;
- goto out_unlock;
- }
+ if (copy_to_iter(buf, tsz, iter) < tsz)
+ return -EFAULT;
size -= tsz;
start += tsz;
/* Leave now if buffer filled already */
if (!size)
- goto out_unlock;
+ return 0;
}
offset += dump->size;
}
-out_unlock:
- mutex_unlock(&vmcoredd_mutex);
- return ret;
+ return 0;
}
#ifdef CONFIG_MMU
@@ -278,20 +300,16 @@ static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
{
struct vmcoredd_node *dump;
u64 offset = 0;
- int ret = 0;
size_t tsz;
char *buf;
- mutex_lock(&vmcoredd_mutex);
list_for_each_entry(dump, &vmcoredd_list, list) {
if (start < offset + dump->size) {
tsz = min(offset + (u64)dump->size - start, (u64)size);
buf = dump->buf + start - offset;
if (remap_vmalloc_range_partial(vma, dst, buf, 0,
- tsz)) {
- ret = -EFAULT;
- goto out_unlock;
- }
+ tsz))
+ return -EFAULT;
size -= tsz;
start += tsz;
@@ -299,14 +317,12 @@ static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
/* Leave now if buffer filled already */
if (!size)
- goto out_unlock;
+ return 0;
}
offset += dump->size;
}
-out_unlock:
- mutex_unlock(&vmcoredd_mutex);
- return ret;
+ return 0;
}
#endif /* CONFIG_MMU */
#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
@@ -316,10 +332,10 @@ out_unlock:
*/
static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos)
{
+ struct vmcore_range *m = NULL;
ssize_t acc = 0, tmp;
size_t tsz;
u64 start;
- struct vmcore *m = NULL;
if (!iov_iter_count(iter) || *fpos >= vmcore_size)
return 0;
@@ -576,7 +592,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
{
size_t size = vma->vm_end - vma->vm_start;
u64 start, end, len, tsz;
- struct vmcore *m;
+ struct vmcore_range *m;
start = (u64)vma->vm_pgoff << PAGE_SHIFT;
end = start + size;
@@ -693,21 +709,17 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
static const struct proc_ops vmcore_proc_ops = {
.proc_open = open_vmcore,
+ .proc_release = release_vmcore,
.proc_read_iter = read_vmcore,
.proc_lseek = default_llseek,
.proc_mmap = mmap_vmcore,
};
-static struct vmcore* __init get_new_element(void)
-{
- return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
-}
-
static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
struct list_head *vc_list)
{
+ struct vmcore_range *m;
u64 size;
- struct vmcore *m;
size = elfsz + elfnotesegsz;
list_for_each_entry(m, vc_list, list) {
@@ -1109,7 +1121,6 @@ static int __init process_ptload_program_headers_elf64(char *elfptr,
Elf64_Ehdr *ehdr_ptr;
Elf64_Phdr *phdr_ptr;
loff_t vmcore_off;
- struct vmcore *new;
ehdr_ptr = (Elf64_Ehdr *)elfptr;
phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
@@ -1128,13 +1139,8 @@ static int __init process_ptload_program_headers_elf64(char *elfptr,
end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
size = end - start;
- /* Add this contiguous chunk of memory to vmcore list.*/
- new = get_new_element();
- if (!new)
+ if (vmcore_alloc_add_range(vc_list, start, size))
return -ENOMEM;
- new->paddr = start;
- new->size = size;
- list_add_tail(&new->list, vc_list);
/* Update the program header offset. */
phdr_ptr->p_offset = vmcore_off + (paddr - start);
@@ -1152,7 +1158,6 @@ static int __init process_ptload_program_headers_elf32(char *elfptr,
Elf32_Ehdr *ehdr_ptr;
Elf32_Phdr *phdr_ptr;
loff_t vmcore_off;
- struct vmcore *new;
ehdr_ptr = (Elf32_Ehdr *)elfptr;
phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
@@ -1171,13 +1176,8 @@ static int __init process_ptload_program_headers_elf32(char *elfptr,
end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
size = end - start;
- /* Add this contiguous chunk of memory to vmcore list.*/
- new = get_new_element();
- if (!new)
+ if (vmcore_alloc_add_range(vc_list, start, size))
return -ENOMEM;
- new->paddr = start;
- new->size = size;
- list_add_tail(&new->list, vc_list);
/* Update the program header offset */
phdr_ptr->p_offset = vmcore_off + (paddr - start);
@@ -1190,8 +1190,8 @@ static int __init process_ptload_program_headers_elf32(char *elfptr,
static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
struct list_head *vc_list)
{
+ struct vmcore_range *m;
loff_t vmcore_off;
- struct vmcore *m;
/* Skip ELF header, program headers and ELF note segment. */
vmcore_off = elfsz + elfnotes_sz;
@@ -1518,12 +1518,18 @@ int vmcore_add_device_dump(struct vmcoredd_data *data)
dump->buf = buf;
dump->size = data_size;
- /* Add the dump to driver sysfs list */
- mutex_lock(&vmcoredd_mutex);
- list_add_tail(&dump->list, &vmcoredd_list);
- mutex_unlock(&vmcoredd_mutex);
+ /* Add the dump to driver sysfs list and update the elfcore hdr */
+ mutex_lock(&vmcore_mutex);
+ if (vmcore_opened)
+ pr_warn_once("Unexpected adding of device dump\n");
+ if (vmcore_open) {
+ ret = -EBUSY;
+ goto out_err;
+ }
+ list_add_tail(&dump->list, &vmcoredd_list);
vmcoredd_update_size(data_size);
+ mutex_unlock(&vmcore_mutex);
return 0;
out_err:
@@ -1535,11 +1541,163 @@ out_err:
EXPORT_SYMBOL(vmcore_add_device_dump);
#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
+#ifdef CONFIG_PROC_VMCORE_DEVICE_RAM
+static int vmcore_realloc_elfcore_buffer_elf64(size_t new_size)
+{
+ char *elfcorebuf_new;
+
+ if (WARN_ON_ONCE(new_size < elfcorebuf_sz))
+ return -EINVAL;
+ if (get_order(elfcorebuf_sz_orig) == get_order(new_size)) {
+ elfcorebuf_sz_orig = new_size;
+ return 0;
+ }
+
+ elfcorebuf_new = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(new_size));
+ if (!elfcorebuf_new)
+ return -ENOMEM;
+ memcpy(elfcorebuf_new, elfcorebuf, elfcorebuf_sz);
+ free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
+ elfcorebuf = elfcorebuf_new;
+ elfcorebuf_sz_orig = new_size;
+ return 0;
+}
+
+static void vmcore_reset_offsets_elf64(void)
+{
+ Elf64_Phdr *phdr_start = (Elf64_Phdr *)(elfcorebuf + sizeof(Elf64_Ehdr));
+ loff_t vmcore_off = elfcorebuf_sz + elfnotes_sz;
+ Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfcorebuf;
+ Elf64_Phdr *phdr;
+ int i;
+
+ for (i = 0, phdr = phdr_start; i < ehdr->e_phnum; i++, phdr++) {
+ u64 start, end;
+
+ /*
+ * After merge_note_headers_elf64() we should only have a single
+ * PT_NOTE entry that starts immediately after elfcorebuf_sz.
+ */
+ if (phdr->p_type == PT_NOTE) {
+ phdr->p_offset = elfcorebuf_sz;
+ continue;
+ }
+
+ start = rounddown(phdr->p_offset, PAGE_SIZE);
+ end = roundup(phdr->p_offset + phdr->p_memsz, PAGE_SIZE);
+ phdr->p_offset = vmcore_off + (phdr->p_offset - start);
+ vmcore_off = vmcore_off + end - start;
+ }
+ set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
+}
+
+static int vmcore_add_device_ram_elf64(struct list_head *list, size_t count)
+{
+ Elf64_Phdr *phdr_start = (Elf64_Phdr *)(elfcorebuf + sizeof(Elf64_Ehdr));
+ Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfcorebuf;
+ struct vmcore_range *cur;
+ Elf64_Phdr *phdr;
+ size_t new_size;
+ int rc;
+
+ if ((Elf32_Half)(ehdr->e_phnum + count) != ehdr->e_phnum + count) {
+ pr_err("too many device ram ranges\n");
+ return -ENOSPC;
+ }
+
+ /* elfcorebuf_sz must always cover full pages. */
+ new_size = sizeof(Elf64_Ehdr) +
+ (ehdr->e_phnum + count) * sizeof(Elf64_Phdr);
+ new_size = roundup(new_size, PAGE_SIZE);
+
+ /*
+ * Make sure we have sufficient space to include the new PT_LOAD
+ * entries.
+ */
+ rc = vmcore_realloc_elfcore_buffer_elf64(new_size);
+ if (rc) {
+ pr_err("resizing elfcore failed\n");
+ return rc;
+ }
+
+ /* Modify our used elfcore buffer size to cover the new entries. */
+ elfcorebuf_sz = new_size;
+
+ /* Fill the added PT_LOAD entries. */
+ phdr = phdr_start + ehdr->e_phnum;
+ list_for_each_entry(cur, list, list) {
+ WARN_ON_ONCE(!IS_ALIGNED(cur->paddr | cur->size, PAGE_SIZE));
+ elfcorehdr_fill_device_ram_ptload_elf64(phdr, cur->paddr, cur->size);
+
+ /* p_offset will be adjusted later. */
+ phdr++;
+ ehdr->e_phnum++;
+ }
+ list_splice_tail(list, &vmcore_list);
+
+ /* We changed elfcorebuf_sz and added new entries; reset all offsets. */
+ vmcore_reset_offsets_elf64();
+
+ /* Finally, recalculate the total vmcore size. */
+ vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
+ &vmcore_list);
+ proc_vmcore->size = vmcore_size;
+ return 0;
+}
+
+static void vmcore_process_device_ram(struct vmcore_cb *cb)
+{
+ unsigned char *e_ident = (unsigned char *)elfcorebuf;
+ struct vmcore_range *first, *m;
+ LIST_HEAD(list);
+ int count;
+
+ /* We only support Elf64 dumps for now. */
+ if (WARN_ON_ONCE(e_ident[EI_CLASS] != ELFCLASS64)) {
+ pr_err("device ram ranges only support Elf64\n");
+ return;
+ }
+
+ if (cb->get_device_ram(cb, &list)) {
+ pr_err("obtaining device ram ranges failed\n");
+ return;
+ }
+ count = list_count_nodes(&list);
+ if (!count)
+ return;
+
+ /*
+ * For some reason these ranges are already know? Might happen
+ * with unusual register->unregister->register sequences; we'll simply
+ * sanity check using the first range.
+ */
+ first = list_first_entry(&list, struct vmcore_range, list);
+ list_for_each_entry(m, &vmcore_list, list) {
+ unsigned long long m_end = m->paddr + m->size;
+ unsigned long long first_end = first->paddr + first->size;
+
+ if (first->paddr < m_end && m->paddr < first_end)
+ goto out_free;
+ }
+
+ /* If adding the mem nodes succeeds, they must not be freed. */
+ if (!vmcore_add_device_ram_elf64(&list, count))
+ return;
+out_free:
+ vmcore_free_ranges(&list);
+}
+#else /* !CONFIG_PROC_VMCORE_DEVICE_RAM */
+static void vmcore_process_device_ram(struct vmcore_cb *cb)
+{
+}
+#endif /* CONFIG_PROC_VMCORE_DEVICE_RAM */
+
/* Free all dumps in vmcore device dump list */
static void vmcore_free_device_dumps(void)
{
#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
- mutex_lock(&vmcoredd_mutex);
+ mutex_lock(&vmcore_mutex);
while (!list_empty(&vmcoredd_list)) {
struct vmcoredd_node *dump;
@@ -1549,7 +1707,7 @@ static void vmcore_free_device_dumps(void)
vfree(dump->buf);
vfree(dump);
}
- mutex_unlock(&vmcoredd_mutex);
+ mutex_unlock(&vmcore_mutex);
#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
}
@@ -1571,7 +1729,7 @@ static int __init vmcore_init(void)
rc = parse_crash_elf_headers();
if (rc) {
elfcorehdr_free(elfcorehdr_addr);
- pr_warn("Kdump: vmcore not initialized\n");
+ pr_warn("not initialized\n");
return rc;
}
elfcorehdr_free(elfcorehdr_addr);
@@ -1592,14 +1750,7 @@ void vmcore_cleanup(void)
proc_vmcore = NULL;
}
- /* clear the vmcore list. */
- while (!list_empty(&vmcore_list)) {
- struct vmcore *m;
-
- m = list_first_entry(&vmcore_list, struct vmcore, list);
- list_del(&m->list);
- kfree(m);
- }
+ vmcore_free_ranges(&vmcore_list);
free_elfcorebuf();
/* clear vmcore device dump list */
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index f9578918cfb2..825c5c2e0962 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2926,7 +2926,7 @@ static int do_proc_dqstats(const struct ctl_table *table, int write,
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
-static struct ctl_table fs_dqstats_table[] = {
+static const struct ctl_table fs_dqstats_table[] = {
{
.procname = "lookups",
.data = &dqstats.stat[DQST_LOOKUPS],
diff --git a/fs/smb/client/asn1.c b/fs/smb/client/asn1.c
index b5724ef9f182..214a44509e7b 100644
--- a/fs/smb/client/asn1.c
+++ b/fs/smb/client/asn1.c
@@ -52,6 +52,8 @@ int cifs_neg_token_init_mech_type(void *context, size_t hdrlen,
server->sec_kerberos = true;
else if (oid == OID_ntlmssp)
server->sec_ntlmssp = true;
+ else if (oid == OID_IAKerb)
+ server->sec_iakerb = true;
else {
char buf[50];
diff --git a/fs/smb/client/cifs_spnego.c b/fs/smb/client/cifs_spnego.c
index 28f568b5fc27..bc1c1e9b288a 100644
--- a/fs/smb/client/cifs_spnego.c
+++ b/fs/smb/client/cifs_spnego.c
@@ -138,11 +138,13 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo,
dp = description + strlen(description);
- /* for now, only sec=krb5 and sec=mskrb5 are valid */
+ /* for now, only sec=krb5 and sec=mskrb5 and iakerb are valid */
if (server->sec_kerberos)
sprintf(dp, ";sec=krb5");
else if (server->sec_mskerberos)
sprintf(dp, ";sec=mskrb5");
+ else if (server->sec_iakerb)
+ sprintf(dp, ";sec=iakerb");
else {
cifs_dbg(VFS, "unknown or missing server auth type, use krb5\n");
sprintf(dp, ";sec=krb5");
diff --git a/fs/smb/client/cifsacl.c b/fs/smb/client/cifsacl.c
index ba79aa2107cc..699a3f76d083 100644
--- a/fs/smb/client/cifsacl.c
+++ b/fs/smb/client/cifsacl.c
@@ -1395,7 +1395,7 @@ chown_chgrp_exit:
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
struct smb_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
const struct cifs_fid *cifsfid, u32 *pacllen,
- u32 __maybe_unused unused)
+ u32 info)
{
struct smb_ntsd *pntsd = NULL;
unsigned int xid;
@@ -1407,7 +1407,7 @@ struct smb_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
xid = get_xid();
rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), cifsfid->netfid, &pntsd,
- pacllen);
+ pacllen, info);
free_xid(xid);
cifs_put_tlink(tlink);
@@ -1419,7 +1419,7 @@ struct smb_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
}
static struct smb_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
- const char *path, u32 *pacllen)
+ const char *path, u32 *pacllen, u32 info)
{
struct smb_ntsd *pntsd = NULL;
int oplock = 0;
@@ -1446,9 +1446,12 @@ static struct smb_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
.fid = &fid,
};
+ if (info & SACL_SECINFO)
+ oparms.desired_access |= SYSTEM_SECURITY;
+
rc = CIFS_open(xid, &oparms, &oplock, NULL);
if (!rc) {
- rc = CIFSSMBGetCIFSACL(xid, tcon, fid.netfid, &pntsd, pacllen);
+ rc = CIFSSMBGetCIFSACL(xid, tcon, fid.netfid, &pntsd, pacllen, info);
CIFSSMBClose(xid, tcon, fid.netfid);
}
@@ -1472,7 +1475,7 @@ struct smb_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
if (inode)
open_file = find_readable_file(CIFS_I(inode), true);
if (!open_file)
- return get_cifs_acl_by_path(cifs_sb, path, pacllen);
+ return get_cifs_acl_by_path(cifs_sb, path, pacllen, info);
pntsd = get_cifs_acl_by_fid(cifs_sb, &open_file->fid, pacllen, info);
cifsFileInfo_put(open_file);
@@ -1485,7 +1488,7 @@ int set_cifs_acl(struct smb_ntsd *pnntsd, __u32 acllen,
{
int oplock = 0;
unsigned int xid;
- int rc, access_flags;
+ int rc, access_flags = 0;
struct cifs_tcon *tcon;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
@@ -1498,10 +1501,12 @@ int set_cifs_acl(struct smb_ntsd *pnntsd, __u32 acllen,
tcon = tlink_tcon(tlink);
xid = get_xid();
- if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
- access_flags = WRITE_OWNER;
- else
- access_flags = WRITE_DAC;
+ if (aclflag & CIFS_ACL_OWNER || aclflag & CIFS_ACL_GROUP)
+ access_flags |= WRITE_OWNER;
+ if (aclflag & CIFS_ACL_SACL)
+ access_flags |= SYSTEM_SECURITY;
+ if (aclflag & CIFS_ACL_DACL)
+ access_flags |= WRITE_DAC;
oparms = (struct cifs_open_parms) {
.tcon = tcon,
diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c
index 7a43daacc815..e69968e88fe7 100644
--- a/fs/smb/client/cifsencrypt.c
+++ b/fs/smb/client/cifsencrypt.c
@@ -315,59 +315,72 @@ build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
return 0;
}
-/* Server has provided av pairs/target info in the type 2 challenge
- * packet and we have plucked it and stored within smb session.
- * We parse that blob here to find netbios domain name to be used
- * as part of ntlmv2 authentication (in Target String), if not already
- * specified on the command line.
- * If this function returns without any error but without fetching
- * domain name, authentication may fail against some server but
- * may not fail against other (those who are not very particular
- * about target string i.e. for some, just user name might suffice.
+#define AV_TYPE(av) (le16_to_cpu(av->type))
+#define AV_LEN(av) (le16_to_cpu(av->length))
+#define AV_DATA_PTR(av) ((void *)av->data)
+
+#define av_for_each_entry(ses, av) \
+ for (av = NULL; (av = find_next_av(ses, av));)
+
+static struct ntlmssp2_name *find_next_av(struct cifs_ses *ses,
+ struct ntlmssp2_name *av)
+{
+ u16 len;
+ u8 *end;
+
+ end = (u8 *)ses->auth_key.response + ses->auth_key.len;
+ if (!av) {
+ if (unlikely(!ses->auth_key.response || !ses->auth_key.len))
+ return NULL;
+ av = (void *)ses->auth_key.response;
+ } else {
+ av = (void *)((u8 *)av + sizeof(*av) + AV_LEN(av));
+ }
+
+ if ((u8 *)av + sizeof(*av) > end)
+ return NULL;
+
+ len = AV_LEN(av);
+ if (AV_TYPE(av) == NTLMSSP_AV_EOL)
+ return NULL;
+ if (!len || (u8 *)av + sizeof(*av) + len > end)
+ return NULL;
+ return av;
+}
+
+/*
+ * Check if server has provided av pair of @type in the NTLMSSP
+ * CHALLENGE_MESSAGE blob.
*/
-static int
-find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
+static int find_av_name(struct cifs_ses *ses, u16 type, char **name, u16 maxlen)
{
- unsigned int attrsize;
- unsigned int type;
- unsigned int onesize = sizeof(struct ntlmssp2_name);
- unsigned char *blobptr;
- unsigned char *blobend;
- struct ntlmssp2_name *attrptr;
+ const struct nls_table *nlsc = ses->local_nls;
+ struct ntlmssp2_name *av;
+ u16 len, nlen;
- if (!ses->auth_key.len || !ses->auth_key.response)
+ if (*name)
return 0;
- blobptr = ses->auth_key.response;
- blobend = blobptr + ses->auth_key.len;
-
- while (blobptr + onesize < blobend) {
- attrptr = (struct ntlmssp2_name *) blobptr;
- type = le16_to_cpu(attrptr->type);
- if (type == NTLMSSP_AV_EOL)
- break;
- blobptr += 2; /* advance attr type */
- attrsize = le16_to_cpu(attrptr->length);
- blobptr += 2; /* advance attr size */
- if (blobptr + attrsize > blobend)
+ av_for_each_entry(ses, av) {
+ len = AV_LEN(av);
+ if (AV_TYPE(av) != type)
+ continue;
+ if (!IS_ALIGNED(len, sizeof(__le16))) {
+ cifs_dbg(VFS | ONCE, "%s: bad length(%u) for type %u\n",
+ __func__, len, type);
+ continue;
+ }
+ nlen = len / sizeof(__le16);
+ if (nlen <= maxlen) {
+ ++nlen;
+ *name = kmalloc(nlen, GFP_KERNEL);
+ if (!*name)
+ return -ENOMEM;
+ cifs_from_utf16(*name, AV_DATA_PTR(av), nlen,
+ len, nlsc, NO_MAP_UNI_RSVD);
break;
- if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
- if (!attrsize || attrsize >= CIFS_MAX_DOMAINNAME_LEN)
- break;
- if (!ses->domainName) {
- ses->domainName =
- kmalloc(attrsize + 1, GFP_KERNEL);
- if (!ses->domainName)
- return -ENOMEM;
- cifs_from_utf16(ses->domainName,
- (__le16 *)blobptr, attrsize, attrsize,
- nls_cp, NO_MAP_UNI_RSVD);
- break;
- }
}
- blobptr += attrsize; /* advance attr value */
}
-
return 0;
}
@@ -377,40 +390,16 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
* as part of ntlmv2 authentication (or local current time as
* default in case of failure)
*/
-static __le64
-find_timestamp(struct cifs_ses *ses)
+static __le64 find_timestamp(struct cifs_ses *ses)
{
- unsigned int attrsize;
- unsigned int type;
- unsigned int onesize = sizeof(struct ntlmssp2_name);
- unsigned char *blobptr;
- unsigned char *blobend;
- struct ntlmssp2_name *attrptr;
+ struct ntlmssp2_name *av;
struct timespec64 ts;
- if (!ses->auth_key.len || !ses->auth_key.response)
- return 0;
-
- blobptr = ses->auth_key.response;
- blobend = blobptr + ses->auth_key.len;
-
- while (blobptr + onesize < blobend) {
- attrptr = (struct ntlmssp2_name *) blobptr;
- type = le16_to_cpu(attrptr->type);
- if (type == NTLMSSP_AV_EOL)
- break;
- blobptr += 2; /* advance attr type */
- attrsize = le16_to_cpu(attrptr->length);
- blobptr += 2; /* advance attr size */
- if (blobptr + attrsize > blobend)
- break;
- if (type == NTLMSSP_AV_TIMESTAMP) {
- if (attrsize == sizeof(u64))
- return *((__le64 *)blobptr);
- }
- blobptr += attrsize; /* advance attr value */
+ av_for_each_entry(ses, av) {
+ if (AV_TYPE(av) == NTLMSSP_AV_TIMESTAMP &&
+ AV_LEN(av) == sizeof(u64))
+ return *((__le64 *)AV_DATA_PTR(av));
}
-
ktime_get_real_ts64(&ts);
return cpu_to_le64(cifs_UnixTimeToNT(ts));
}
@@ -563,16 +552,29 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) {
if (!ses->domainName) {
if (ses->domainAuto) {
- rc = find_domain_name(ses, nls_cp);
- if (rc) {
- cifs_dbg(VFS, "error %d finding domain name\n",
- rc);
+ /*
+ * Domain (workgroup) hasn't been specified in
+ * mount options, so try to find it in
+ * CHALLENGE_MESSAGE message and then use it as
+ * part of NTLMv2 authentication.
+ */
+ rc = find_av_name(ses, NTLMSSP_AV_NB_DOMAIN_NAME,
+ &ses->domainName,
+ CIFS_MAX_DOMAINNAME_LEN);
+ if (rc)
goto setup_ntlmv2_rsp_ret;
- }
} else {
ses->domainName = kstrdup("", GFP_KERNEL);
+ if (!ses->domainName) {
+ rc = -ENOMEM;
+ goto setup_ntlmv2_rsp_ret;
+ }
}
}
+ rc = find_av_name(ses, NTLMSSP_AV_DNS_DOMAIN_NAME,
+ &ses->dns_dom, CIFS_MAX_DOMAINNAME_LEN);
+ if (rc)
+ goto setup_ntlmv2_rsp_ret;
} else {
rc = build_avpair_blob(ses, nls_cp);
if (rc) {
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index b800c9f585d8..6a3bd652d251 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -715,6 +715,12 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
cifs_sb->ctx->backupgid));
seq_show_option(s, "reparse",
cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
+ if (cifs_sb->ctx->nonativesocket)
+ seq_puts(s, ",nonativesocket");
+ else
+ seq_puts(s, ",nativesocket");
+ seq_show_option(s, "symlink",
+ cifs_symlink_type_str(get_cifs_symlink_type(cifs_sb)));
seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
diff --git a/fs/smb/client/cifsfs.h b/fs/smb/client/cifsfs.h
index a762dbbbd959..831fee962c4d 100644
--- a/fs/smb/client/cifsfs.h
+++ b/fs/smb/client/cifsfs.h
@@ -146,6 +146,6 @@ extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
/* when changing internal version - update following two lines at same time */
-#define SMB3_PRODUCT_BUILD 52
-#define CIFS_VERSION "2.52"
+#define SMB3_PRODUCT_BUILD 53
+#define CIFS_VERSION "2.53"
#endif /* _CIFSFS_H */
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 6e63abe461fd..ac1f890a0d54 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -151,6 +151,7 @@ enum securityEnum {
NTLMv2, /* Legacy NTLM auth with NTLMv2 hash */
RawNTLMSSP, /* NTLMSSP without SPNEGO, NTLMv2 hash */
Kerberos, /* Kerberos via SPNEGO */
+ IAKerb, /* Kerberos proxy */
};
enum upcall_target_enum {
@@ -160,6 +161,7 @@ enum upcall_target_enum {
};
enum cifs_reparse_type {
+ CIFS_REPARSE_TYPE_NONE,
CIFS_REPARSE_TYPE_NFS,
CIFS_REPARSE_TYPE_WSL,
CIFS_REPARSE_TYPE_DEFAULT = CIFS_REPARSE_TYPE_NFS,
@@ -168,6 +170,8 @@ enum cifs_reparse_type {
static inline const char *cifs_reparse_type_str(enum cifs_reparse_type type)
{
switch (type) {
+ case CIFS_REPARSE_TYPE_NONE:
+ return "none";
case CIFS_REPARSE_TYPE_NFS:
return "nfs";
case CIFS_REPARSE_TYPE_WSL:
@@ -177,6 +181,39 @@ static inline const char *cifs_reparse_type_str(enum cifs_reparse_type type)
}
}
+enum cifs_symlink_type {
+ CIFS_SYMLINK_TYPE_DEFAULT,
+ CIFS_SYMLINK_TYPE_NONE,
+ CIFS_SYMLINK_TYPE_NATIVE,
+ CIFS_SYMLINK_TYPE_UNIX,
+ CIFS_SYMLINK_TYPE_MFSYMLINKS,
+ CIFS_SYMLINK_TYPE_SFU,
+ CIFS_SYMLINK_TYPE_NFS,
+ CIFS_SYMLINK_TYPE_WSL,
+};
+
+static inline const char *cifs_symlink_type_str(enum cifs_symlink_type type)
+{
+ switch (type) {
+ case CIFS_SYMLINK_TYPE_NONE:
+ return "none";
+ case CIFS_SYMLINK_TYPE_NATIVE:
+ return "native";
+ case CIFS_SYMLINK_TYPE_UNIX:
+ return "unix";
+ case CIFS_SYMLINK_TYPE_MFSYMLINKS:
+ return "mfsymlinks";
+ case CIFS_SYMLINK_TYPE_SFU:
+ return "sfu";
+ case CIFS_SYMLINK_TYPE_NFS:
+ return "nfs";
+ case CIFS_SYMLINK_TYPE_WSL:
+ return "wsl";
+ default:
+ return "unknown";
+ }
+}
+
struct session_key {
unsigned int len;
char *response;
@@ -215,10 +252,7 @@ struct cifs_cred {
struct cifs_open_info_data {
bool adjust_tz;
- union {
- bool reparse_point;
- bool symlink;
- };
+ bool reparse_point;
struct {
/* ioctl response buffer */
struct {
@@ -226,10 +260,7 @@ struct cifs_open_info_data {
struct kvec iov;
} io;
__u32 tag;
- union {
- struct reparse_data_buffer *buf;
- struct reparse_posix_data *posix;
- };
+ struct reparse_data_buffer *buf;
} reparse;
struct {
__u8 eas[SMB2_WSL_MAX_QUERY_EA_RESP_SIZE];
@@ -326,7 +357,7 @@ struct smb_version_operations {
int (*handle_cancelled_mid)(struct mid_q_entry *, struct TCP_Server_Info *);
void (*downgrade_oplock)(struct TCP_Server_Info *server,
struct cifsInodeInfo *cinode, __u32 oplock,
- unsigned int epoch, bool *purge_cache);
+ __u16 epoch, bool *purge_cache);
/* process transaction2 response */
bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *,
char *, int);
@@ -521,12 +552,12 @@ struct smb_version_operations {
/* if we can do cache read operations */
bool (*is_read_op)(__u32);
/* set oplock level for the inode */
- void (*set_oplock_level)(struct cifsInodeInfo *, __u32, unsigned int,
- bool *);
+ void (*set_oplock_level)(struct cifsInodeInfo *cinode, __u32 oplock, __u16 epoch,
+ bool *purge_cache);
/* create lease context buffer for CREATE request */
char * (*create_lease_buf)(u8 *lease_key, u8 oplock);
/* parse lease context buffer and return oplock/epoch info */
- __u8 (*parse_lease_buf)(void *buf, unsigned int *epoch, char *lkey);
+ __u8 (*parse_lease_buf)(void *buf, __u16 *epoch, char *lkey);
ssize_t (*copychunk_range)(const unsigned int,
struct cifsFileInfo *src_file,
struct cifsFileInfo *target_file,
@@ -751,6 +782,7 @@ struct TCP_Server_Info {
bool sec_kerberosu2u; /* supports U2U Kerberos */
bool sec_kerberos; /* supports plain Kerberos */
bool sec_mskerberos; /* supports legacy MS Kerberos */
+ bool sec_iakerb; /* supports pass-through auth for Kerberos (krb5 proxy) */
bool large_buf; /* is current buffer large? */
/* use SMBD connection instead of socket */
bool rdma;
@@ -811,23 +843,15 @@ struct TCP_Server_Info {
bool use_swn_dstaddr;
struct sockaddr_storage swn_dstaddr;
#endif
- struct mutex refpath_lock; /* protects leaf_fullpath */
/*
- * leaf_fullpath: Canonical DFS referral path related to this
- * connection.
- * It is used in DFS cache refresher, reconnect and may
- * change due to nested DFS links.
- *
- * Protected by @refpath_lock and @srv_lock. The @refpath_lock is
- * mostly used for not requiring a copy of @leaf_fullpath when getting
- * cached or new DFS referrals (which might also sleep during I/O).
- * While @srv_lock is held for making string and NULL comparisons against
- * both fields as in mount(2) and cache refresh.
+ * Canonical DFS referral path used in cifs_reconnect() for failover as
+ * well as in DFS cache refresher.
*
* format: \\HOST\SHARE[\OPTIONAL PATH]
*/
char *leaf_fullpath;
bool dfs_conn:1;
+ char dns_dom[CIFS_MAX_DOMAINNAME_LEN + 1];
};
static inline bool is_smb1(struct TCP_Server_Info *server)
@@ -1154,6 +1178,7 @@ struct cifs_ses {
/* ========= end: protected by chan_lock ======== */
struct cifs_ses *dfs_root_ses;
struct nls_table *local_nls;
+ char *dns_dom; /* FQDN of the domain */
};
static inline bool
@@ -1422,7 +1447,7 @@ struct cifs_fid {
__u8 create_guid[16];
__u32 access;
struct cifs_pending_open *pending_open;
- unsigned int epoch;
+ __u16 epoch;
#ifdef CONFIG_CIFS_DEBUG2
__u64 mid;
#endif /* CIFS_DEBUG2 */
@@ -1455,7 +1480,7 @@ struct cifsFileInfo {
bool oplock_break_cancelled:1;
bool status_file_deleted:1; /* file has been deleted */
bool offload:1; /* offload final part of _put to a wq */
- unsigned int oplock_epoch; /* epoch from the lease break */
+ __u16 oplock_epoch; /* epoch from the lease break */
__u32 oplock_level; /* oplock/lease level from the lease break */
int count;
spinlock_t file_info_lock; /* protects four flag/count fields above */
@@ -1552,7 +1577,7 @@ struct cifsInodeInfo {
spinlock_t open_file_lock; /* protects openFileList */
__u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
unsigned int oplock; /* oplock/lease level we have */
- unsigned int epoch; /* used to track lease state changes */
+ __u16 epoch; /* used to track lease state changes */
#define CIFS_INODE_PENDING_OPLOCK_BREAK (0) /* oplock break in progress */
#define CIFS_INODE_PENDING_WRITERS (1) /* Writes in progress */
#define CIFS_INODE_FLAG_UNUSED (2) /* Unused flag */
@@ -2125,6 +2150,8 @@ static inline char *get_security_type_str(enum securityEnum sectype)
return "Kerberos";
case NTLMv2:
return "NTLMv2";
+ case IAKerb:
+ return "IAKerb";
default:
return "Unknown";
}
@@ -2180,11 +2207,13 @@ static inline size_t ntlmssp_workstation_name_size(const struct cifs_ses *ses)
static inline void move_cifs_info_to_smb2(struct smb2_file_all_info *dst, const FILE_ALL_INFO *src)
{
- memcpy(dst, src, (size_t)((u8 *)&src->AccessFlags - (u8 *)src));
- dst->AccessFlags = src->AccessFlags;
- dst->CurrentByteOffset = src->CurrentByteOffset;
- dst->Mode = src->Mode;
- dst->AlignmentRequirement = src->AlignmentRequirement;
+ memcpy(dst, src, (size_t)((u8 *)&src->EASize - (u8 *)src));
+ dst->IndexNumber = 0;
+ dst->EASize = src->EASize;
+ dst->AccessFlags = 0;
+ dst->CurrentByteOffset = 0;
+ dst->Mode = 0;
+ dst->AlignmentRequirement = 0;
dst->FileNameLength = src->FileNameLength;
}
@@ -2311,4 +2340,24 @@ static inline bool cifs_ses_exiting(struct cifs_ses *ses)
return ret;
}
+static inline bool cifs_netbios_name(const char *name, size_t namelen)
+{
+ bool ret = false;
+ size_t i;
+
+ if (namelen >= 1 && namelen <= RFC1001_NAME_LEN) {
+ for (i = 0; i < namelen; i++) {
+ const unsigned char c = name[i];
+
+ if (c == '\\' || c == '/' || c == ':' || c == '*' ||
+ c == '?' || c == '"' || c == '<' || c == '>' ||
+ c == '|' || c == '.')
+ return false;
+ if (!ret && isalpha(c))
+ ret = true;
+ }
+ }
+ return ret;
+}
+
#endif /* _CIFS_GLOB_H */
diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
index ee78bb6741d6..48d0d6f439cf 100644
--- a/fs/smb/client/cifspdu.h
+++ b/fs/smb/client/cifspdu.h
@@ -190,42 +190,82 @@
*/
#define FILE_READ_DATA 0x00000001 /* Data can be read from the file */
+ /* or directory child entries can */
+ /* be listed together with the */
+ /* associated child attributes */
+ /* (so the FILE_READ_ATTRIBUTES on */
+ /* the child entry is not needed) */
#define FILE_WRITE_DATA 0x00000002 /* Data can be written to the file */
+ /* or new file can be created in */
+ /* the directory */
#define FILE_APPEND_DATA 0x00000004 /* Data can be appended to the file */
+ /* (for non-local files over SMB it */
+ /* is same as FILE_WRITE_DATA) */
+ /* or new subdirectory can be */
+ /* created in the directory */
#define FILE_READ_EA 0x00000008 /* Extended attributes associated */
/* with the file can be read */
#define FILE_WRITE_EA 0x00000010 /* Extended attributes associated */
/* with the file can be written */
#define FILE_EXECUTE 0x00000020 /*Data can be read into memory from */
/* the file using system paging I/O */
-#define FILE_DELETE_CHILD 0x00000040
+ /* for executing the file / script */
+ /* or right to traverse directory */
+ /* (but by default all users have */
+ /* directory bypass traverse */
+ /* privilege and do not need this */
+ /* permission on directories at all)*/
+#define FILE_DELETE_CHILD 0x00000040 /* Child entry can be deleted from */
+ /* the directory (so the DELETE on */
+ /* the child entry is not needed) */
#define FILE_READ_ATTRIBUTES 0x00000080 /* Attributes associated with the */
- /* file can be read */
+ /* file or directory can be read */
#define FILE_WRITE_ATTRIBUTES 0x00000100 /* Attributes associated with the */
- /* file can be written */
-#define DELETE 0x00010000 /* The file can be deleted */
-#define READ_CONTROL 0x00020000 /* The access control list and */
- /* ownership associated with the */
- /* file can be read */
-#define WRITE_DAC 0x00040000 /* The access control list and */
- /* ownership associated with the */
- /* file can be written. */
+ /* file or directory can be written */
+#define DELETE 0x00010000 /* The file or dir can be deleted */
+#define READ_CONTROL 0x00020000 /* The discretionary access control */
+ /* list and ownership associated */
+ /* with the file or dir can be read */
+#define WRITE_DAC 0x00040000 /* The discretionary access control */
+ /* list associated with the file or */
+ /* directory can be written */
#define WRITE_OWNER 0x00080000 /* Ownership information associated */
- /* with the file can be written */
+ /* with the file/dir can be written */
#define SYNCHRONIZE 0x00100000 /* The file handle can waited on to */
/* synchronize with the completion */
/* of an input/output request */
#define SYSTEM_SECURITY 0x01000000 /* The system access control list */
- /* can be read and changed */
-#define GENERIC_ALL 0x10000000
-#define GENERIC_EXECUTE 0x20000000
-#define GENERIC_WRITE 0x40000000
-#define GENERIC_READ 0x80000000
- /* In summary - Relevant file */
- /* access flags from CIFS are */
- /* file_read_data, file_write_data */
- /* file_execute, file_read_attributes*/
- /* write_dac, and delete. */
+ /* associated with the file or */
+ /* directory can be read or written */
+ /* (cannot be in DACL, can in SACL) */
+#define MAXIMUM_ALLOWED 0x02000000 /* Maximal subset of GENERIC_ALL */
+ /* permissions which can be granted */
+ /* (cannot be in DACL nor SACL) */
+#define GENERIC_ALL 0x10000000 /* Same as: GENERIC_EXECUTE | */
+ /* GENERIC_WRITE | */
+ /* GENERIC_READ | */
+ /* FILE_DELETE_CHILD | */
+ /* DELETE | */
+ /* WRITE_DAC | */
+ /* WRITE_OWNER */
+ /* So GENERIC_ALL contains all bits */
+ /* mentioned above except these two */
+ /* SYSTEM_SECURITY MAXIMUM_ALLOWED */
+#define GENERIC_EXECUTE 0x20000000 /* Same as: FILE_EXECUTE | */
+ /* FILE_READ_ATTRIBUTES | */
+ /* READ_CONTROL | */
+ /* SYNCHRONIZE */
+#define GENERIC_WRITE 0x40000000 /* Same as: FILE_WRITE_DATA | */
+ /* FILE_APPEND_DATA | */
+ /* FILE_WRITE_EA | */
+ /* FILE_WRITE_ATTRIBUTES | */
+ /* READ_CONTROL | */
+ /* SYNCHRONIZE */
+#define GENERIC_READ 0x80000000 /* Same as: FILE_READ_DATA | */
+ /* FILE_READ_EA | */
+ /* FILE_READ_ATTRIBUTES | */
+ /* READ_CONTROL | */
+ /* SYNCHRONIZE */
#define FILE_READ_RIGHTS (FILE_READ_DATA | FILE_READ_EA | FILE_READ_ATTRIBUTES)
#define FILE_WRITE_RIGHTS (FILE_WRITE_DATA | FILE_APPEND_DATA \
@@ -649,7 +689,7 @@ typedef union smb_com_session_setup_andx {
struct ntlmssp2_name {
__le16 type;
__le16 length;
-/* char name[length]; */
+ __u8 data[];
} __attribute__((packed));
struct ntlmv2_resp {
@@ -1484,36 +1524,6 @@ struct file_notify_information {
__u8 FileName[];
} __attribute__((packed));
-/* For IO_REPARSE_TAG_SYMLINK */
-struct reparse_symlink_data {
- __le32 ReparseTag;
- __le16 ReparseDataLength;
- __u16 Reserved;
- __le16 SubstituteNameOffset;
- __le16 SubstituteNameLength;
- __le16 PrintNameOffset;
- __le16 PrintNameLength;
- __le32 Flags;
- char PathBuffer[];
-} __attribute__((packed));
-
-/* Flag above */
-#define SYMLINK_FLAG_RELATIVE 0x00000001
-
-/* For IO_REPARSE_TAG_NFS */
-#define NFS_SPECFILE_LNK 0x00000000014B4E4C
-#define NFS_SPECFILE_CHR 0x0000000000524843
-#define NFS_SPECFILE_BLK 0x00000000004B4C42
-#define NFS_SPECFILE_FIFO 0x000000004F464946
-#define NFS_SPECFILE_SOCK 0x000000004B434F53
-struct reparse_posix_data {
- __le32 ReparseTag;
- __le16 ReparseDataLength;
- __u16 Reserved;
- __le64 InodeType; /* LNK, FIFO, CHR etc. */
- __u8 DataBuffer[];
-} __attribute__((packed));
-
struct cifs_quota_data {
__u32 rsrvd1; /* 0 */
__u32 sid_size;
@@ -2280,13 +2290,7 @@ typedef struct { /* data block encoding of response to level 263 QPathInfo */
__u8 DeletePending;
__u8 Directory;
__u16 Pad2;
- __le64 IndexNumber;
__le32 EASize;
- __le32 AccessFlags;
- __u64 IndexNumber1;
- __le64 CurrentByteOffset;
- __le32 Mode;
- __le32 AlignmentRequirement;
__le32 FileNameLength;
union {
char __pad;
diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
index d26f9bbb5382..81680001944d 100644
--- a/fs/smb/client/cifsproto.h
+++ b/fs/smb/client/cifsproto.h
@@ -474,9 +474,6 @@ extern int cifs_query_reparse_point(const unsigned int xid,
const char *full_path,
u32 *tag, struct kvec *rsp,
int *rsp_buftype);
-extern int CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
- __u16 fid, char **symlinkinfo,
- const struct nls_table *nls_codepage);
extern int CIFSSMB_set_compression(const unsigned int xid,
struct cifs_tcon *tcon, __u16 fid);
extern int CIFS_open(const unsigned int xid, struct cifs_open_parms *oparms,
@@ -560,7 +557,7 @@ extern int CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon,
const struct nls_table *nls_codepage,
struct cifs_sb_info *cifs_sb);
extern int CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon,
- __u16 fid, struct smb_ntsd **acl_inf, __u32 *buflen);
+ __u16 fid, struct smb_ntsd **acl_inf, __u32 *buflen, __u32 info);
extern int CIFSSMBSetCIFSACL(const unsigned int, struct cifs_tcon *, __u16,
struct smb_ntsd *pntsd, __u32 len, int aclflag);
extern int cifs_do_get_acl(const unsigned int xid, struct cifs_tcon *tcon,
@@ -659,7 +656,7 @@ char *extract_sharename(const char *unc);
int parse_reparse_point(struct reparse_data_buffer *buf,
u32 plen, struct cifs_sb_info *cifs_sb,
const char *full_path,
- bool unicode, struct cifs_open_info_data *data);
+ struct cifs_open_info_data *data);
int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
struct dentry *dentry, struct cifs_tcon *tcon,
const char *full_path, umode_t mode, dev_t dev,
@@ -680,7 +677,7 @@ static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
}
int match_target_ip(struct TCP_Server_Info *server,
- const char *share, size_t share_len,
+ const char *host, size_t hostlen,
bool *result);
int cifs_inval_name_dfs_link_error(const unsigned int xid,
struct cifs_tcon *tcon,
diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
index 7f1cacc89dbb..3feaa0f68169 100644
--- a/fs/smb/client/cifssmb.c
+++ b/fs/smb/client/cifssmb.c
@@ -3369,7 +3369,7 @@ validate_ntransact(char *buf, char **ppparm, char **ppdata,
/* Get Security Descriptor (by handle) from remote server for a file or dir */
int
CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid,
- struct smb_ntsd **acl_inf, __u32 *pbuflen)
+ struct smb_ntsd **acl_inf, __u32 *pbuflen, __u32 info)
{
int rc = 0;
int buf_type = 0;
@@ -3392,7 +3392,7 @@ CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid,
pSMB->MaxSetupCount = 0;
pSMB->Fid = fid; /* file handle always le */
pSMB->AclFlags = cpu_to_le32(CIFS_ACL_OWNER | CIFS_ACL_GROUP |
- CIFS_ACL_DACL);
+ CIFS_ACL_DACL | info);
pSMB->ByteCount = cpu_to_le16(11); /* 3 bytes pad + 8 bytes parm */
inc_rfc1001_len(pSMB, 11);
iov[0].iov_base = (char *)pSMB;
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index eaa6be4456d0..f917de020dd5 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -72,10 +72,8 @@ static void cifs_prune_tlinks(struct work_struct *work);
*/
static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
{
- int rc;
- int len;
- char *unc;
struct sockaddr_storage ss;
+ int rc;
if (!server->hostname)
return -EINVAL;
@@ -84,32 +82,18 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
if (server->hostname[0] == '\0')
return 0;
- len = strlen(server->hostname) + 3;
-
- unc = kmalloc(len, GFP_KERNEL);
- if (!unc) {
- cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__);
- return -ENOMEM;
- }
- scnprintf(unc, len, "\\\\%s", server->hostname);
-
spin_lock(&server->srv_lock);
ss = server->dstaddr;
spin_unlock(&server->srv_lock);
- rc = dns_resolve_server_name_to_ip(unc, (struct sockaddr *)&ss, NULL);
- kfree(unc);
-
- if (rc < 0) {
- cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n",
- __func__, server->hostname, rc);
- } else {
+ rc = dns_resolve_name(server->dns_dom, server->hostname,
+ strlen(server->hostname),
+ (struct sockaddr *)&ss);
+ if (!rc) {
spin_lock(&server->srv_lock);
memcpy(&server->dstaddr, &ss, sizeof(server->dstaddr));
spin_unlock(&server->srv_lock);
- rc = 0;
}
-
return rc;
}
@@ -438,7 +422,8 @@ static int __cifs_reconnect(struct TCP_Server_Info *server,
}
#ifdef CONFIG_CIFS_DFS_UPCALL
-static int __reconnect_target_unlocked(struct TCP_Server_Info *server, const char *target)
+static int __reconnect_target_locked(struct TCP_Server_Info *server,
+ const char *target)
{
int rc;
char *hostname;
@@ -471,34 +456,43 @@ static int __reconnect_target_unlocked(struct TCP_Server_Info *server, const cha
return rc;
}
-static int reconnect_target_unlocked(struct TCP_Server_Info *server, struct dfs_cache_tgt_list *tl,
- struct dfs_cache_tgt_iterator **target_hint)
+static int reconnect_target_locked(struct TCP_Server_Info *server,
+ struct dfs_cache_tgt_list *tl,
+ struct dfs_cache_tgt_iterator **target_hint)
{
- int rc;
struct dfs_cache_tgt_iterator *tit;
+ int rc;
*target_hint = NULL;
/* If dfs target list is empty, then reconnect to last server */
tit = dfs_cache_get_tgt_iterator(tl);
if (!tit)
- return __reconnect_target_unlocked(server, server->hostname);
+ return __reconnect_target_locked(server, server->hostname);
/* Otherwise, try every dfs target in @tl */
- for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) {
- rc = __reconnect_target_unlocked(server, dfs_cache_get_tgt_name(tit));
+ do {
+ const char *target = dfs_cache_get_tgt_name(tit);
+
+ spin_lock(&server->srv_lock);
+ if (server->tcpStatus != CifsNeedReconnect) {
+ spin_unlock(&server->srv_lock);
+ return -ECONNRESET;
+ }
+ spin_unlock(&server->srv_lock);
+ rc = __reconnect_target_locked(server, target);
if (!rc) {
*target_hint = tit;
break;
}
- }
+ } while ((tit = dfs_cache_get_next_tgt(tl, tit)));
return rc;
}
static int reconnect_dfs_server(struct TCP_Server_Info *server)
{
struct dfs_cache_tgt_iterator *target_hint = NULL;
-
+ const char *ref_path = server->leaf_fullpath + 1;
DFS_CACHE_TGT_LIST(tl);
int num_targets = 0;
int rc = 0;
@@ -511,10 +505,8 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
* through /proc/fs/cifs/dfscache or the target list is empty due to server settings after
* refreshing the referral, so, in this case, default it to 1.
*/
- mutex_lock(&server->refpath_lock);
- if (!dfs_cache_noreq_find(server->leaf_fullpath + 1, NULL, &tl))
+ if (!dfs_cache_noreq_find(ref_path, NULL, &tl))
num_targets = dfs_cache_get_nr_tgts(&tl);
- mutex_unlock(&server->refpath_lock);
if (!num_targets)
num_targets = 1;
@@ -534,7 +526,7 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
try_to_freeze();
cifs_server_lock(server);
- rc = reconnect_target_unlocked(server, &tl, &target_hint);
+ rc = reconnect_target_locked(server, &tl, &target_hint);
if (rc) {
/* Failed to reconnect socket */
cifs_server_unlock(server);
@@ -558,9 +550,7 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
} while (server->tcpStatus == CifsNeedReconnect);
- mutex_lock(&server->refpath_lock);
- dfs_cache_noreq_update_tgthint(server->leaf_fullpath + 1, target_hint);
- mutex_unlock(&server->refpath_lock);
+ dfs_cache_noreq_update_tgthint(ref_path, target_hint);
dfs_cache_free_tgts(&tl);
/* Need to set up echo worker again once connection has been established */
@@ -575,13 +565,8 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
{
- mutex_lock(&server->refpath_lock);
- if (!server->leaf_fullpath) {
- mutex_unlock(&server->refpath_lock);
+ if (!server->leaf_fullpath)
return __cifs_reconnect(server, mark_smb_session);
- }
- mutex_unlock(&server->refpath_lock);
-
return reconnect_dfs_server(server);
}
#else
@@ -1541,42 +1526,10 @@ static int match_server(struct TCP_Server_Info *server,
if (!cifs_match_ipaddr((struct sockaddr *)&ctx->srcaddr,
(struct sockaddr *)&server->srcaddr))
return 0;
- /*
- * When matching cifs.ko superblocks (@match_super == true), we can't
- * really match either @server->leaf_fullpath or @server->dstaddr
- * directly since this @server might belong to a completely different
- * server -- in case of domain-based DFS referrals or DFS links -- as
- * provided earlier by mount(2) through 'source' and 'ip' options.
- *
- * Otherwise, match the DFS referral in @server->leaf_fullpath or the
- * destination address in @server->dstaddr.
- *
- * When using 'nodfs' mount option, we avoid sharing it with DFS
- * connections as they might failover.
- */
- if (!match_super) {
- if (!ctx->nodfs) {
- if (server->leaf_fullpath) {
- if (!ctx->leaf_fullpath ||
- strcasecmp(server->leaf_fullpath,
- ctx->leaf_fullpath))
- return 0;
- } else if (ctx->leaf_fullpath) {
- return 0;
- }
- } else if (server->leaf_fullpath) {
- return 0;
- }
- }
- /*
- * Match for a regular connection (address/hostname/port) which has no
- * DFS referrals set.
- */
- if (!server->leaf_fullpath &&
- (strcasecmp(server->hostname, ctx->server_hostname) ||
- !match_server_address(server, addr) ||
- !match_port(server, addr)))
+ if (strcasecmp(server->hostname, ctx->server_hostname) ||
+ !match_server_address(server, addr) ||
+ !match_port(server, addr))
return 0;
if (!match_security(server, ctx))
@@ -1710,6 +1663,8 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
goto out_err;
}
}
+ if (ctx->dns_dom)
+ strscpy(tcp_ses->dns_dom, ctx->dns_dom);
if (ctx->nosharesock)
tcp_ses->nosharesock = true;
@@ -1758,9 +1713,6 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server);
mutex_init(&tcp_ses->reconnect_mutex);
-#ifdef CONFIG_CIFS_DFS_UPCALL
- mutex_init(&tcp_ses->refpath_lock);
-#endif
memcpy(&tcp_ses->srcaddr, &ctx->srcaddr,
sizeof(tcp_ses->srcaddr));
memcpy(&tcp_ses->dstaddr, &ctx->dstaddr,
@@ -2276,12 +2228,13 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx __attribute__((unused)),
struct cifs_ses *
cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
{
- int rc = 0;
- int retries = 0;
- unsigned int xid;
- struct cifs_ses *ses;
- struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
+ struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
+ struct cifs_ses *ses;
+ unsigned int xid;
+ int retries = 0;
+ size_t len;
+ int rc = 0;
xid = get_xid();
@@ -2371,6 +2324,14 @@ retry_old_session:
ses->domainName = kstrdup(ctx->domainname, GFP_KERNEL);
if (!ses->domainName)
goto get_ses_fail;
+
+ len = strnlen(ctx->domainname, CIFS_MAX_DOMAINNAME_LEN);
+ if (!cifs_netbios_name(ctx->domainname, len)) {
+ ses->dns_dom = kstrndup(ctx->domainname,
+ len, GFP_KERNEL);
+ if (!ses->dns_dom)
+ goto get_ses_fail;
+ }
}
strscpy(ses->workstation_name, ctx->workstation_name, sizeof(ses->workstation_name));
@@ -2888,6 +2849,10 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
return 0;
if (old->ctx->reparse_type != new->ctx->reparse_type)
return 0;
+ if (old->ctx->nonativesocket != new->ctx->nonativesocket)
+ return 0;
+ if (old->ctx->symlink_type != new->ctx->symlink_type)
+ return 0;
return 1;
}
diff --git a/fs/smb/client/dfs.c b/fs/smb/client/dfs.c
index 4647df9e1e3b..f65a8a90ba27 100644
--- a/fs/smb/client/dfs.c
+++ b/fs/smb/client/dfs.c
@@ -9,6 +9,8 @@
#include "fs_context.h"
#include "dfs.h"
+#define DFS_DOM(ctx) (ctx->dfs_root_ses ? ctx->dfs_root_ses->dns_dom : NULL)
+
/**
* dfs_parse_target_referral - set fs context for dfs target referral
*
@@ -46,8 +48,8 @@ int dfs_parse_target_referral(const char *full_path, const struct dfs_info3_para
if (rc)
goto out;
- rc = dns_resolve_server_name_to_ip(path, (struct sockaddr *)&ctx->dstaddr, NULL);
-
+ rc = dns_resolve_unc(DFS_DOM(ctx), path,
+ (struct sockaddr *)&ctx->dstaddr);
out:
kfree(path);
return rc;
@@ -59,8 +61,9 @@ static int get_session(struct cifs_mount_ctx *mnt_ctx, const char *full_path)
int rc;
ctx->leaf_fullpath = (char *)full_path;
+ ctx->dns_dom = DFS_DOM(ctx);
rc = cifs_mount_get_session(mnt_ctx);
- ctx->leaf_fullpath = NULL;
+ ctx->leaf_fullpath = ctx->dns_dom = NULL;
return rc;
}
@@ -95,15 +98,16 @@ static inline int parse_dfs_target(struct smb3_fs_context *ctx,
return rc;
}
-static int setup_dfs_ref(struct cifs_mount_ctx *mnt_ctx,
- struct dfs_info3_param *tgt,
- struct dfs_ref_walk *rw)
+static int setup_dfs_ref(struct dfs_info3_param *tgt, struct dfs_ref_walk *rw)
{
- struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
- struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+ struct cifs_sb_info *cifs_sb = rw->mnt_ctx->cifs_sb;
+ struct smb3_fs_context *ctx = rw->mnt_ctx->fs_ctx;
char *ref_path, *full_path;
int rc;
+ set_root_smb_session(rw->mnt_ctx);
+ ref_walk_ses(rw) = ctx->dfs_root_ses;
+
full_path = smb3_fs_context_fullpath(ctx, CIFS_DIR_SEP(cifs_sb));
if (IS_ERR(full_path))
return PTR_ERR(full_path);
@@ -120,35 +124,22 @@ static int setup_dfs_ref(struct cifs_mount_ctx *mnt_ctx,
}
ref_walk_path(rw) = ref_path;
ref_walk_fpath(rw) = full_path;
- ref_walk_ses(rw) = ctx->dfs_root_ses;
- return 0;
+
+ return dfs_get_referral(rw->mnt_ctx,
+ ref_walk_path(rw) + 1,
+ ref_walk_tl(rw));
}
-static int __dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx,
- struct dfs_ref_walk *rw)
+static int __dfs_referral_walk(struct dfs_ref_walk *rw)
{
- struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ struct smb3_fs_context *ctx = rw->mnt_ctx->fs_ctx;
+ struct cifs_mount_ctx *mnt_ctx = rw->mnt_ctx;
struct dfs_info3_param tgt = {};
int rc = -ENOENT;
again:
do {
ctx->dfs_root_ses = ref_walk_ses(rw);
- if (ref_walk_empty(rw)) {
- rc = dfs_get_referral(mnt_ctx, ref_walk_path(rw) + 1,
- NULL, ref_walk_tl(rw));
- if (rc) {
- rc = cifs_mount_get_tcon(mnt_ctx);
- if (!rc)
- rc = cifs_is_path_remote(mnt_ctx);
- continue;
- }
- if (!ref_walk_num_tgts(rw)) {
- rc = -ENOENT;
- continue;
- }
- }
-
while (ref_walk_next_tgt(rw)) {
rc = parse_dfs_target(ctx, rw, &tgt);
if (rc)
@@ -159,32 +150,31 @@ again:
if (rc)
continue;
- ref_walk_set_tgt_hint(rw);
- if (tgt.flags & DFSREF_STORAGE_SERVER) {
- rc = cifs_mount_get_tcon(mnt_ctx);
- if (!rc)
- rc = cifs_is_path_remote(mnt_ctx);
- if (!rc)
+ rc = cifs_mount_get_tcon(mnt_ctx);
+ if (rc) {
+ if (tgt.server_type == DFS_TYPE_LINK &&
+ DFS_INTERLINK(tgt.flags))
+ rc = -EREMOTE;
+ } else {
+ rc = cifs_is_path_remote(mnt_ctx);
+ if (!rc) {
+ ref_walk_set_tgt_hint(rw);
break;
- if (rc != -EREMOTE)
- continue;
+ }
}
-
- set_root_smb_session(mnt_ctx);
- rc = ref_walk_advance(rw);
- if (!rc) {
- rc = setup_dfs_ref(mnt_ctx, &tgt, rw);
+ if (rc == -EREMOTE) {
+ rc = ref_walk_advance(rw);
if (!rc) {
- rc = -EREMOTE;
+ rc = setup_dfs_ref(&tgt, rw);
+ if (rc)
+ break;
+ ref_walk_mark_end(rw);
goto again;
}
}
- if (rc != -ELOOP)
- goto out;
}
} while (rc && ref_walk_descend(rw));
-out:
free_dfs_info_param(&tgt);
return rc;
}
@@ -201,10 +191,10 @@ static int dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx,
return rc;
}
- ref_walk_init(*rw);
- rc = setup_dfs_ref(mnt_ctx, NULL, *rw);
+ ref_walk_init(*rw, mnt_ctx);
+ rc = setup_dfs_ref(NULL, *rw);
if (!rc)
- rc = __dfs_referral_walk(mnt_ctx, *rw);
+ rc = __dfs_referral_walk(*rw);
return rc;
}
@@ -264,7 +254,7 @@ static int update_fs_context_dstaddr(struct smb3_fs_context *ctx)
int rc = 0;
if (!ctx->nodfs && ctx->dfs_automount) {
- rc = dns_resolve_server_name_to_ip(ctx->source, addr, NULL);
+ rc = dns_resolve_unc(NULL, ctx->source, addr);
if (!rc)
cifs_set_port(addr, ctx->port);
ctx->dfs_automount = false;
@@ -294,7 +284,7 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
* to respond with PATH_NOT_COVERED to requests that include the prefix.
*/
if (!nodfs) {
- rc = dfs_get_referral(mnt_ctx, ctx->UNC + 1, NULL, NULL);
+ rc = dfs_get_referral(mnt_ctx, ctx->UNC + 1, NULL);
if (rc) {
cifs_dbg(FYI, "%s: no dfs referral for %s: %d\n",
__func__, ctx->UNC + 1, rc);
@@ -314,10 +304,8 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
cifs_mount_put_conns(mnt_ctx);
rc = get_session(mnt_ctx, NULL);
}
- if (!rc) {
- set_root_smb_session(mnt_ctx);
+ if (!rc)
rc = __dfs_mount_share(mnt_ctx);
- }
return rc;
}
diff --git a/fs/smb/client/dfs.h b/fs/smb/client/dfs.h
index 1aa2bc65b3bc..e60f0a24a8a1 100644
--- a/fs/smb/client/dfs.h
+++ b/fs/smb/client/dfs.h
@@ -12,6 +12,7 @@
#include "dfs_cache.h"
#include "cifs_unicode.h"
#include <linux/namei.h>
+#include <linux/errno.h>
#define DFS_INTERLINK(v) \
(((v) & DFSREF_REFERRAL_SERVER) && !((v) & DFSREF_STORAGE_SERVER))
@@ -25,8 +26,9 @@ struct dfs_ref {
};
struct dfs_ref_walk {
- struct dfs_ref *ref;
- struct dfs_ref refs[MAX_NESTED_LINKS];
+ struct cifs_mount_ctx *mnt_ctx;
+ struct dfs_ref *ref;
+ struct dfs_ref refs[MAX_NESTED_LINKS];
};
#define ref_walk_start(w) ((w)->refs)
@@ -35,7 +37,6 @@ struct dfs_ref_walk {
#define ref_walk_descend(w) (--ref_walk_cur(w) >= ref_walk_start(w))
#define ref_walk_tit(w) (ref_walk_cur(w)->tit)
-#define ref_walk_empty(w) (!ref_walk_tit(w))
#define ref_walk_path(w) (ref_walk_cur(w)->path)
#define ref_walk_fpath(w) (ref_walk_cur(w)->full_path)
#define ref_walk_tl(w) (&ref_walk_cur(w)->tl)
@@ -51,9 +52,11 @@ static inline struct dfs_ref_walk *ref_walk_alloc(void)
return rw;
}
-static inline void ref_walk_init(struct dfs_ref_walk *rw)
+static inline void ref_walk_init(struct dfs_ref_walk *rw,
+ struct cifs_mount_ctx *mnt_ctx)
{
memset(rw, 0, sizeof(*rw));
+ rw->mnt_ctx = mnt_ctx;
ref_walk_cur(rw) = ref_walk_start(rw);
}
@@ -93,15 +96,23 @@ static inline int ref_walk_advance(struct dfs_ref_walk *rw)
static inline struct dfs_cache_tgt_iterator *
ref_walk_next_tgt(struct dfs_ref_walk *rw)
{
- struct dfs_cache_tgt_iterator *tit;
struct dfs_ref *ref = ref_walk_cur(rw);
+ struct dfs_cache_tgt_iterator *tit;
+
+ if (IS_ERR(ref->tit))
+ return NULL;
if (!ref->tit)
tit = dfs_cache_get_tgt_iterator(&ref->tl);
else
tit = dfs_cache_get_next_tgt(&ref->tl, ref->tit);
+
+ if (!tit) {
+ ref->tit = ERR_PTR(-ENOENT);
+ return NULL;
+ }
ref->tit = tit;
- return tit;
+ return ref->tit;
}
static inline int ref_walk_get_tgt(struct dfs_ref_walk *rw,
@@ -112,11 +123,6 @@ static inline int ref_walk_get_tgt(struct dfs_ref_walk *rw,
ref_walk_tit(rw), tgt);
}
-static inline int ref_walk_num_tgts(struct dfs_ref_walk *rw)
-{
- return dfs_cache_get_nr_tgts(ref_walk_tl(rw));
-}
-
static inline void ref_walk_set_tgt_hint(struct dfs_ref_walk *rw)
{
dfs_cache_noreq_update_tgthint(ref_walk_path(rw) + 1,
@@ -136,6 +142,15 @@ static inline void ref_walk_set_tcon(struct dfs_ref_walk *rw,
}
}
+static inline void ref_walk_mark_end(struct dfs_ref_walk *rw)
+{
+ struct dfs_ref *ref = ref_walk_cur(rw) - 1;
+
+ WARN_ON_ONCE(ref < ref_walk_start(rw));
+ dfs_cache_noreq_update_tgthint(ref->path + 1, ref->tit);
+ ref->tit = ERR_PTR(-ENOENT); /* end marker */
+}
+
int dfs_parse_target_referral(const char *full_path, const struct dfs_info3_param *ref,
struct smb3_fs_context *ctx);
int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx);
@@ -145,15 +160,16 @@ static inline char *dfs_get_path(struct cifs_sb_info *cifs_sb, const char *path)
return dfs_cache_canonical_path(path, cifs_sb->local_nls, cifs_remap(cifs_sb));
}
-static inline int dfs_get_referral(struct cifs_mount_ctx *mnt_ctx, const char *path,
- struct dfs_info3_param *ref, struct dfs_cache_tgt_list *tl)
+static inline int dfs_get_referral(struct cifs_mount_ctx *mnt_ctx,
+ const char *path,
+ struct dfs_cache_tgt_list *tl)
{
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
struct cifs_ses *rses = ctx->dfs_root_ses ?: mnt_ctx->ses;
return dfs_cache_find(mnt_ctx->xid, rses, cifs_sb->local_nls,
- cifs_remap(cifs_sb), path, ref, tl);
+ cifs_remap(cifs_sb), path, NULL, tl);
}
/*
@@ -172,4 +188,11 @@ static inline void dfs_put_root_smb_sessions(struct list_head *head)
}
}
+static inline const char *dfs_ses_refpath(struct cifs_ses *ses)
+{
+ const char *path = ses->server->leaf_fullpath;
+
+ return path ? path + 1 : ERR_PTR(-ENOENT);
+}
+
#endif /* _CIFS_DFS_H */
diff --git a/fs/smb/client/dfs_cache.c b/fs/smb/client/dfs_cache.c
index 541608b0267e..4dada26d56b5 100644
--- a/fs/smb/client/dfs_cache.c
+++ b/fs/smb/client/dfs_cache.c
@@ -1096,11 +1096,8 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
static bool target_share_equal(struct cifs_tcon *tcon, const char *s1)
{
struct TCP_Server_Info *server = tcon->ses->server;
- struct sockaddr_storage ss;
- const char *host;
const char *s2 = &tcon->tree_name[1];
- size_t hostlen;
- char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
+ struct sockaddr_storage ss;
bool match;
int rc;
@@ -1111,18 +1108,13 @@ static bool target_share_equal(struct cifs_tcon *tcon, const char *s1)
* Resolve share's hostname and check if server address matches. Otherwise just ignore it
* as we could not have upcall to resolve hostname or failed to convert ip address.
*/
- extract_unc_hostname(s1, &host, &hostlen);
- scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host);
-
- rc = dns_resolve_server_name_to_ip(unc, (struct sockaddr *)&ss, NULL);
- if (rc < 0) {
- cifs_dbg(FYI, "%s: could not resolve %.*s. assuming server address matches.\n",
- __func__, (int)hostlen, host);
+ rc = dns_resolve_unc(server->dns_dom, s1, (struct sockaddr *)&ss);
+ if (rc < 0)
return true;
- }
cifs_server_lock(server);
match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, (struct sockaddr *)&ss);
+ cifs_dbg(FYI, "%s: [share=%s] ipaddr matched: %s\n", __func__, s1, str_yes_no(match));
cifs_server_unlock(server);
return match;
@@ -1144,35 +1136,19 @@ static bool is_ses_good(struct cifs_ses *ses)
return ret;
}
-static char *get_ses_refpath(struct cifs_ses *ses)
-{
- struct TCP_Server_Info *server = ses->server;
- char *path = ERR_PTR(-ENOENT);
-
- mutex_lock(&server->refpath_lock);
- if (server->leaf_fullpath) {
- path = kstrdup(server->leaf_fullpath + 1, GFP_ATOMIC);
- if (!path)
- path = ERR_PTR(-ENOMEM);
- }
- mutex_unlock(&server->refpath_lock);
- return path;
-}
-
/* Refresh dfs referral of @ses */
static void refresh_ses_referral(struct cifs_ses *ses)
{
struct cache_entry *ce;
unsigned int xid;
- char *path;
+ const char *path;
int rc = 0;
xid = get_xid();
- path = get_ses_refpath(ses);
+ path = dfs_ses_refpath(ses);
if (IS_ERR(path)) {
rc = PTR_ERR(path);
- path = NULL;
goto out;
}
@@ -1191,7 +1167,6 @@ static void refresh_ses_referral(struct cifs_ses *ses)
out:
free_xid(xid);
- kfree(path);
}
static int __refresh_tcon_referral(struct cifs_tcon *tcon,
@@ -1241,19 +1216,18 @@ static void refresh_tcon_referral(struct cifs_tcon *tcon, bool force_refresh)
struct dfs_info3_param *refs = NULL;
struct cache_entry *ce;
struct cifs_ses *ses;
- unsigned int xid;
bool needs_refresh;
- char *path;
+ const char *path;
+ unsigned int xid;
int numrefs = 0;
int rc = 0;
xid = get_xid();
ses = tcon->ses;
- path = get_ses_refpath(ses);
+ path = dfs_ses_refpath(ses);
if (IS_ERR(path)) {
rc = PTR_ERR(path);
- path = NULL;
goto out;
}
@@ -1281,7 +1255,6 @@ static void refresh_tcon_referral(struct cifs_tcon *tcon, bool force_refresh)
out:
free_xid(xid);
- kfree(path);
free_dfs_info_array(refs, numrefs);
}
diff --git a/fs/smb/client/dir.c b/fs/smb/client/dir.c
index 864b194dbaa0..d1e95632ac54 100644
--- a/fs/smb/client/dir.c
+++ b/fs/smb/client/dir.c
@@ -627,7 +627,7 @@ int cifs_mknod(struct mnt_idmap *idmap, struct inode *inode,
goto mknod_out;
}
- trace_smb3_mknod_enter(xid, tcon->ses->Suid, tcon->tid, full_path);
+ trace_smb3_mknod_enter(xid, tcon->tid, tcon->ses->Suid, full_path);
rc = tcon->ses->server->ops->make_node(xid, inode, direntry, tcon,
full_path, mode,
@@ -635,9 +635,9 @@ int cifs_mknod(struct mnt_idmap *idmap, struct inode *inode,
mknod_out:
if (rc)
- trace_smb3_mknod_err(xid, tcon->ses->Suid, tcon->tid, rc);
+ trace_smb3_mknod_err(xid, tcon->tid, tcon->ses->Suid, rc);
else
- trace_smb3_mknod_done(xid, tcon->ses->Suid, tcon->tid);
+ trace_smb3_mknod_done(xid, tcon->tid, tcon->ses->Suid);
free_dentry_path(page);
free_xid(xid);
@@ -737,7 +737,8 @@ again:
}
static int
-cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
+cifs_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *direntry, unsigned int flags)
{
struct inode *inode;
int rc;
diff --git a/fs/smb/client/dns_resolve.c b/fs/smb/client/dns_resolve.c
index 8bf8978bc5d6..de7f4b384718 100644
--- a/fs/smb/client/dns_resolve.c
+++ b/fs/smb/client/dns_resolve.c
@@ -20,69 +20,77 @@
#include "cifsproto.h"
#include "cifs_debug.h"
-/**
- * dns_resolve_server_name_to_ip - Resolve UNC server name to ip address.
- * @unc: UNC path specifying the server (with '/' as delimiter)
- * @ip_addr: Where to return the IP address.
- * @expiry: Where to return the expiry time for the dns record.
- *
- * Returns zero success, -ve on error.
- */
-int
-dns_resolve_server_name_to_ip(const char *unc, struct sockaddr *ip_addr, time64_t *expiry)
+static int resolve_name(const char *name, size_t namelen, struct sockaddr *addr)
{
- const char *hostname, *sep;
char *ip;
- int len, rc;
+ int rc;
- if (!ip_addr || !unc)
- return -EINVAL;
+ rc = dns_query(current->nsproxy->net_ns, NULL, name,
+ namelen, NULL, &ip, NULL, false);
+ if (rc < 0) {
+ cifs_dbg(FYI, "%s: unable to resolve: %*.*s\n",
+ __func__, (int)namelen, (int)namelen, name);
+ } else {
+ cifs_dbg(FYI, "%s: resolved: %*.*s to %s\n",
+ __func__, (int)namelen, (int)namelen, name, ip);
- len = strlen(unc);
- if (len < 3) {
- cifs_dbg(FYI, "%s: unc is too short: %s\n", __func__, unc);
- return -EINVAL;
+ rc = cifs_convert_address(addr, ip, strlen(ip));
+ kfree(ip);
+ if (!rc) {
+ cifs_dbg(FYI, "%s: unable to determine ip address\n",
+ __func__);
+ rc = -EHOSTUNREACH;
+ } else {
+ rc = 0;
+ }
}
+ return rc;
+}
- /* Discount leading slashes for cifs */
- len -= 2;
- hostname = unc + 2;
+/**
+ * dns_resolve_name - Perform an upcall to resolve hostname to an ip address.
+ * @dom: DNS domain name (or NULL)
+ * @name: Name to look up
+ * @namelen: Length of name
+ * @ip_addr: Where to return the IP address
+ *
+ * Returns zero on success, -ve code otherwise.
+ */
+int dns_resolve_name(const char *dom, const char *name,
+ size_t namelen, struct sockaddr *ip_addr)
+{
+ size_t len;
+ char *s;
+ int rc;
- /* Search for server name delimiter */
- sep = memchr(hostname, '/', len);
- if (sep)
- len = sep - hostname;
- else
- cifs_dbg(FYI, "%s: probably server name is whole unc: %s\n",
- __func__, unc);
+ cifs_dbg(FYI, "%s: dom=%s name=%.*s\n", __func__, dom, (int)namelen, name);
+ if (!ip_addr || !name || !*name || !namelen)
+ return -EINVAL;
+ cifs_dbg(FYI, "%s: hostname=%.*s\n", __func__, (int)namelen, name);
/* Try to interpret hostname as an IPv4 or IPv6 address */
- rc = cifs_convert_address(ip_addr, hostname, len);
+ rc = cifs_convert_address(ip_addr, name, namelen);
if (rc > 0) {
- cifs_dbg(FYI, "%s: unc is IP, skipping dns upcall: %*.*s\n", __func__, len, len,
- hostname);
+ cifs_dbg(FYI, "%s: unc is IP, skipping dns upcall: %*.*s\n",
+ __func__, (int)namelen, (int)namelen, name);
return 0;
}
- /* Perform the upcall */
- rc = dns_query(current->nsproxy->net_ns, NULL, hostname, len,
- NULL, &ip, expiry, false);
- if (rc < 0) {
- cifs_dbg(FYI, "%s: unable to resolve: %*.*s\n",
- __func__, len, len, hostname);
- } else {
- cifs_dbg(FYI, "%s: resolved: %*.*s to %s expiry %llu\n",
- __func__, len, len, hostname, ip,
- expiry ? (*expiry) : 0);
-
- rc = cifs_convert_address(ip_addr, ip, strlen(ip));
- kfree(ip);
+ /*
+ * If @name contains a NetBIOS name and @dom has been specified, then
+ * convert @name to an FQDN and try resolving it first.
+ */
+ if (dom && *dom && cifs_netbios_name(name, namelen)) {
+ len = strnlen(dom, CIFS_MAX_DOMAINNAME_LEN) + namelen + 2;
+ s = kmalloc(len, GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
- if (!rc) {
- cifs_dbg(FYI, "%s: unable to determine ip address\n", __func__);
- rc = -EHOSTUNREACH;
- } else
- rc = 0;
+ scnprintf(s, len, "%.*s.%s", (int)namelen, name, dom);
+ rc = resolve_name(s, len - 1, ip_addr);
+ kfree(s);
+ if (!rc)
+ return 0;
}
- return rc;
+ return resolve_name(name, namelen, ip_addr);
}
diff --git a/fs/smb/client/dns_resolve.h b/fs/smb/client/dns_resolve.h
index 6eb0c15a2440..0dc706f2c422 100644
--- a/fs/smb/client/dns_resolve.h
+++ b/fs/smb/client/dns_resolve.h
@@ -12,9 +12,30 @@
#define _DNS_RESOLVE_H
#include <linux/net.h>
+#include "cifsglob.h"
+#include "cifsproto.h"
#ifdef __KERNEL__
-int dns_resolve_server_name_to_ip(const char *unc, struct sockaddr *ip_addr, time64_t *expiry);
+
+int dns_resolve_name(const char *dom, const char *name,
+ size_t namelen, struct sockaddr *ip_addr);
+
+static inline int dns_resolve_unc(const char *dom, const char *unc,
+ struct sockaddr *ip_addr)
+{
+ const char *name;
+ size_t namelen;
+
+ if (!unc || strlen(unc) < 3)
+ return -EINVAL;
+
+ extract_unc_hostname(unc, &name, &namelen);
+ if (!namelen)
+ return -EINVAL;
+
+ return dns_resolve_name(dom, name, namelen, ip_addr);
+}
+
#endif /* KERNEL */
#endif /* _DNS_RESOLVE_H */
diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
index 49123f458d0c..e9b286d9a7ba 100644
--- a/fs/smb/client/fs_context.c
+++ b/fs/smb/client/fs_context.c
@@ -133,6 +133,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
fsparam_flag("rootfs", Opt_rootfs),
fsparam_flag("compress", Opt_compress),
fsparam_flag("witness", Opt_witness),
+ fsparam_flag_no("nativesocket", Opt_nativesocket),
/* Mount options which take uid or gid */
fsparam_uid("backupuid", Opt_backupuid),
@@ -185,6 +186,8 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
fsparam_string("cache", Opt_cache),
fsparam_string("reparse", Opt_reparse),
fsparam_string("upcall_target", Opt_upcalltarget),
+ fsparam_string("symlink", Opt_symlink),
+ fsparam_string("symlinkroot", Opt_symlinkroot),
/* Arguments that should be ignored */
fsparam_flag("guest", Opt_ignore),
@@ -332,6 +335,7 @@ cifs_parse_cache_flavor(struct fs_context *fc, char *value, struct smb3_fs_conte
static const match_table_t reparse_flavor_tokens = {
{ Opt_reparse_default, "default" },
+ { Opt_reparse_none, "none" },
{ Opt_reparse_nfs, "nfs" },
{ Opt_reparse_wsl, "wsl" },
{ Opt_reparse_err, NULL },
@@ -346,6 +350,9 @@ static int parse_reparse_flavor(struct fs_context *fc, char *value,
case Opt_reparse_default:
ctx->reparse_type = CIFS_REPARSE_TYPE_DEFAULT;
break;
+ case Opt_reparse_none:
+ ctx->reparse_type = CIFS_REPARSE_TYPE_NONE;
+ break;
case Opt_reparse_nfs:
ctx->reparse_type = CIFS_REPARSE_TYPE_NFS;
break;
@@ -359,6 +366,55 @@ static int parse_reparse_flavor(struct fs_context *fc, char *value,
return 0;
}
+static const match_table_t symlink_flavor_tokens = {
+ { Opt_symlink_default, "default" },
+ { Opt_symlink_none, "none" },
+ { Opt_symlink_native, "native" },
+ { Opt_symlink_unix, "unix" },
+ { Opt_symlink_mfsymlinks, "mfsymlinks" },
+ { Opt_symlink_sfu, "sfu" },
+ { Opt_symlink_nfs, "nfs" },
+ { Opt_symlink_wsl, "wsl" },
+ { Opt_symlink_err, NULL },
+};
+
+static int parse_symlink_flavor(struct fs_context *fc, char *value,
+ struct smb3_fs_context *ctx)
+{
+ substring_t args[MAX_OPT_ARGS];
+
+ switch (match_token(value, symlink_flavor_tokens, args)) {
+ case Opt_symlink_default:
+ ctx->symlink_type = CIFS_SYMLINK_TYPE_DEFAULT;
+ break;
+ case Opt_symlink_none:
+ ctx->symlink_type = CIFS_SYMLINK_TYPE_NONE;
+ break;
+ case Opt_symlink_native:
+ ctx->symlink_type = CIFS_SYMLINK_TYPE_NATIVE;
+ break;
+ case Opt_symlink_unix:
+ ctx->symlink_type = CIFS_SYMLINK_TYPE_UNIX;
+ break;
+ case Opt_symlink_mfsymlinks:
+ ctx->symlink_type = CIFS_SYMLINK_TYPE_MFSYMLINKS;
+ break;
+ case Opt_symlink_sfu:
+ ctx->symlink_type = CIFS_SYMLINK_TYPE_SFU;
+ break;
+ case Opt_symlink_nfs:
+ ctx->symlink_type = CIFS_SYMLINK_TYPE_NFS;
+ break;
+ case Opt_symlink_wsl:
+ ctx->symlink_type = CIFS_SYMLINK_TYPE_WSL;
+ break;
+ default:
+ cifs_errorf(fc, "bad symlink= option: %s\n", value);
+ return 1;
+ }
+ return 0;
+}
+
#define DUP_CTX_STR(field) \
do { \
if (ctx->field) { \
@@ -385,6 +441,8 @@ smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx
new_ctx->source = NULL;
new_ctx->iocharset = NULL;
new_ctx->leaf_fullpath = NULL;
+ new_ctx->dns_dom = NULL;
+ new_ctx->symlinkroot = NULL;
/*
* Make sure to stay in sync with smb3_cleanup_fs_context_contents()
*/
@@ -399,6 +457,8 @@ smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx
DUP_CTX_STR(nodename);
DUP_CTX_STR(iocharset);
DUP_CTX_STR(leaf_fullpath);
+ DUP_CTX_STR(dns_dom);
+ DUP_CTX_STR(symlinkroot);
return 0;
}
@@ -1725,6 +1785,23 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
if (parse_reparse_flavor(fc, param->string, ctx))
goto cifs_parse_mount_err;
break;
+ case Opt_nativesocket:
+ ctx->nonativesocket = result.negated;
+ break;
+ case Opt_symlink:
+ if (parse_symlink_flavor(fc, param->string, ctx))
+ goto cifs_parse_mount_err;
+ break;
+ case Opt_symlinkroot:
+ if (param->string[0] != '/') {
+ cifs_errorf(fc, "symlinkroot mount options must be absolute path\n");
+ goto cifs_parse_mount_err;
+ }
+ kfree(ctx->symlinkroot);
+ ctx->symlinkroot = kstrdup(param->string, GFP_KERNEL);
+ if (!ctx->symlinkroot)
+ goto cifs_parse_mount_err;
+ break;
}
/* case Opt_ignore: - is ignored as expected ... */
@@ -1733,6 +1810,13 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
goto cifs_parse_mount_err;
}
+ /*
+ * By default resolve all native absolute symlinks relative to "/mnt/".
+ * Same default has drvfs driver running in WSL for resolving SMB shares.
+ */
+ if (!ctx->symlinkroot)
+ ctx->symlinkroot = kstrdup("/mnt/", GFP_KERNEL);
+
return 0;
cifs_parse_mount_err:
@@ -1743,6 +1827,24 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
return -EINVAL;
}
+enum cifs_symlink_type get_cifs_symlink_type(struct cifs_sb_info *cifs_sb)
+{
+ if (cifs_sb->ctx->symlink_type == CIFS_SYMLINK_TYPE_DEFAULT) {
+ if (cifs_sb->ctx->mfsymlinks)
+ return CIFS_SYMLINK_TYPE_MFSYMLINKS;
+ else if (cifs_sb->ctx->sfu_emul)
+ return CIFS_SYMLINK_TYPE_SFU;
+ else if (cifs_sb->ctx->linux_ext && !cifs_sb->ctx->no_linux_ext)
+ return CIFS_SYMLINK_TYPE_UNIX;
+ else if (cifs_sb->ctx->reparse_type != CIFS_REPARSE_TYPE_NONE)
+ return CIFS_SYMLINK_TYPE_NATIVE;
+ else
+ return CIFS_SYMLINK_TYPE_NONE;
+ } else {
+ return cifs_sb->ctx->symlink_type;
+ }
+}
+
int smb3_init_fs_context(struct fs_context *fc)
{
struct smb3_fs_context *ctx;
@@ -1819,6 +1921,8 @@ int smb3_init_fs_context(struct fs_context *fc)
ctx->retrans = 1;
ctx->reparse_type = CIFS_REPARSE_TYPE_DEFAULT;
+ ctx->symlink_type = CIFS_SYMLINK_TYPE_DEFAULT;
+ ctx->nonativesocket = 0;
/*
* short int override_uid = -1;
@@ -1863,6 +1967,10 @@ smb3_cleanup_fs_context_contents(struct smb3_fs_context *ctx)
ctx->prepath = NULL;
kfree(ctx->leaf_fullpath);
ctx->leaf_fullpath = NULL;
+ kfree(ctx->dns_dom);
+ ctx->dns_dom = NULL;
+ kfree(ctx->symlinkroot);
+ ctx->symlinkroot = NULL;
}
void
diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h
index ac6baa774ad3..881bfc08667e 100644
--- a/fs/smb/client/fs_context.h
+++ b/fs/smb/client/fs_context.h
@@ -43,11 +43,24 @@ enum {
enum cifs_reparse_parm {
Opt_reparse_default,
+ Opt_reparse_none,
Opt_reparse_nfs,
Opt_reparse_wsl,
Opt_reparse_err
};
+enum cifs_symlink_parm {
+ Opt_symlink_default,
+ Opt_symlink_none,
+ Opt_symlink_native,
+ Opt_symlink_unix,
+ Opt_symlink_mfsymlinks,
+ Opt_symlink_sfu,
+ Opt_symlink_nfs,
+ Opt_symlink_wsl,
+ Opt_symlink_err
+};
+
enum cifs_sec_param {
Opt_sec_krb5,
Opt_sec_krb5i,
@@ -166,6 +179,9 @@ enum cifs_param {
Opt_cache,
Opt_reparse,
Opt_upcalltarget,
+ Opt_nativesocket,
+ Opt_symlink,
+ Opt_symlinkroot,
/* Mount options to be ignored */
Opt_ignore,
@@ -294,11 +310,17 @@ struct smb3_fs_context {
struct cifs_ses *dfs_root_ses;
bool dfs_automount:1; /* set for dfs automount only */
enum cifs_reparse_type reparse_type;
+ enum cifs_symlink_type symlink_type;
+ bool nonativesocket:1;
bool dfs_conn:1; /* set for dfs mounts */
+ char *dns_dom;
+ char *symlinkroot; /* top level directory for native SMB symlinks in absolute format */
};
extern const struct fs_parameter_spec smb3_fs_parameters[];
+extern enum cifs_symlink_type get_cifs_symlink_type(struct cifs_sb_info *cifs_sb);
+
extern int smb3_init_fs_context(struct fs_context *fc);
extern void smb3_cleanup_fs_context_contents(struct smb3_fs_context *ctx);
extern void smb3_cleanup_fs_context(struct smb3_fs_context *ctx);
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index f146e06c97eb..9cc31cf6ebd0 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -990,7 +990,7 @@ cifs_get_file_info(struct file *filp)
/* TODO: add support to query reparse tag */
data.adjust_tz = false;
if (data.symlink_target) {
- data.symlink = true;
+ data.reparse_point = true;
data.reparse.tag = IO_REPARSE_TAG_SYMLINK;
}
path = build_path_from_dentry(dentry, page);
@@ -1216,6 +1216,11 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data,
full_path,
iov, data);
}
+
+ if (data->reparse.tag == IO_REPARSE_TAG_SYMLINK && !rc) {
+ bool directory = le32_to_cpu(data->fi.Attributes) & ATTR_DIRECTORY;
+ rc = smb2_fix_symlink_target_type(&data->symlink_target, directory, cifs_sb);
+ }
break;
}
@@ -2392,6 +2397,13 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
if (to_dentry->d_parent != from_dentry->d_parent)
goto do_rename_exit;
+ /*
+ * CIFSSMBRenameOpenFile() uses SMB_SET_FILE_RENAME_INFORMATION
+ * which is SMB PASSTHROUGH level.
+ */
+ if (!(tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU))
+ goto do_rename_exit;
+
oparms = (struct cifs_open_parms) {
.tcon = tcon,
.cifs_sb = cifs_sb,
diff --git a/fs/smb/client/link.c b/fs/smb/client/link.c
index 47ddeb7fa111..6e6c09cc5ce7 100644
--- a/fs/smb/client/link.c
+++ b/fs/smb/client/link.c
@@ -18,6 +18,7 @@
#include "cifs_unicode.h"
#include "smb2proto.h"
#include "cifs_ioctl.h"
+#include "fs_context.h"
/*
* M-F Symlink Functions - Begin
@@ -604,22 +605,53 @@ cifs_symlink(struct mnt_idmap *idmap, struct inode *inode,
cifs_dbg(FYI, "symname is %s\n", symname);
/* BB what if DFS and this volume is on different share? BB */
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
- rc = create_mf_symlink(xid, pTcon, cifs_sb, full_path, symname);
- } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
- rc = __cifs_sfu_make_node(xid, inode, direntry, pTcon,
- full_path, S_IFLNK, 0, symname);
+ rc = -EOPNOTSUPP;
+ switch (get_cifs_symlink_type(cifs_sb)) {
+ case CIFS_SYMLINK_TYPE_DEFAULT:
+ /* should not happen, get_cifs_symlink_type() resolves the default */
+ break;
+
+ case CIFS_SYMLINK_TYPE_NONE:
+ break;
+
+ case CIFS_SYMLINK_TYPE_UNIX:
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
- } else if (pTcon->unix_ext) {
- rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname,
- cifs_sb->local_nls,
- cifs_remap(cifs_sb));
+ if (pTcon->unix_ext) {
+ rc = CIFSUnixCreateSymLink(xid, pTcon, full_path,
+ symname,
+ cifs_sb->local_nls,
+ cifs_remap(cifs_sb));
+ }
#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
- } else if (server->ops->create_reparse_symlink) {
- rc = server->ops->create_reparse_symlink(xid, inode, direntry,
- pTcon, full_path,
- symname);
- goto symlink_exit;
+ break;
+
+ case CIFS_SYMLINK_TYPE_MFSYMLINKS:
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
+ rc = create_mf_symlink(xid, pTcon, cifs_sb,
+ full_path, symname);
+ }
+ break;
+
+ case CIFS_SYMLINK_TYPE_SFU:
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
+ rc = __cifs_sfu_make_node(xid, inode, direntry, pTcon,
+ full_path, S_IFLNK,
+ 0, symname);
+ }
+ break;
+
+ case CIFS_SYMLINK_TYPE_NATIVE:
+ case CIFS_SYMLINK_TYPE_NFS:
+ case CIFS_SYMLINK_TYPE_WSL:
+ if (server->ops->create_reparse_symlink) {
+ rc = server->ops->create_reparse_symlink(xid, inode,
+ direntry,
+ pTcon,
+ full_path,
+ symname);
+ goto symlink_exit;
+ }
+ break;
}
if (rc == 0) {
diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
index 4373dd64b66d..b328dc5c7988 100644
--- a/fs/smb/client/misc.c
+++ b/fs/smb/client/misc.c
@@ -101,6 +101,7 @@ sesInfoFree(struct cifs_ses *buf_to_free)
kfree_sensitive(buf_to_free->password2);
kfree(buf_to_free->user_name);
kfree(buf_to_free->domainName);
+ kfree(buf_to_free->dns_dom);
kfree_sensitive(buf_to_free->auth_key.response);
spin_lock(&buf_to_free->iface_lock);
list_for_each_entry_safe(iface, niface, &buf_to_free->iface_list,
@@ -908,9 +909,9 @@ parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
*num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
if (*num_of_nodes < 1) {
- cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n",
- *num_of_nodes);
- rc = -EINVAL;
+ cifs_dbg(VFS | ONCE, "%s: [path=%s] num_referrals must be at least > 0, but we got %d\n",
+ __func__, searchName, *num_of_nodes);
+ rc = -ENOENT;
goto parse_DFS_referrals_exit;
}
@@ -1171,33 +1172,25 @@ void cifs_put_tcp_super(struct super_block *sb)
#ifdef CONFIG_CIFS_DFS_UPCALL
int match_target_ip(struct TCP_Server_Info *server,
- const char *share, size_t share_len,
+ const char *host, size_t hostlen,
bool *result)
{
- int rc;
- char *target;
struct sockaddr_storage ss;
+ int rc;
- *result = false;
-
- target = kzalloc(share_len + 3, GFP_KERNEL);
- if (!target)
- return -ENOMEM;
-
- scnprintf(target, share_len + 3, "\\\\%.*s", (int)share_len, share);
-
- cifs_dbg(FYI, "%s: target name: %s\n", __func__, target + 2);
+ cifs_dbg(FYI, "%s: hostname=%.*s\n", __func__, (int)hostlen, host);
- rc = dns_resolve_server_name_to_ip(target, (struct sockaddr *)&ss, NULL);
- kfree(target);
+ *result = false;
+ rc = dns_resolve_name(server->dns_dom, host, hostlen,
+ (struct sockaddr *)&ss);
if (rc < 0)
return rc;
spin_lock(&server->srv_lock);
*result = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, (struct sockaddr *)&ss);
spin_unlock(&server->srv_lock);
- cifs_dbg(FYI, "%s: ip addresses match: %u\n", __func__, *result);
+ cifs_dbg(FYI, "%s: ip addresses matched: %s\n", __func__, str_yes_no(*result));
return 0;
}
diff --git a/fs/smb/client/netmisc.c b/fs/smb/client/netmisc.c
index 2a8d71221e5e..9ec20601cee2 100644
--- a/fs/smb/client/netmisc.c
+++ b/fs/smb/client/netmisc.c
@@ -313,7 +313,6 @@ static const struct {
ERRDOS, 2215, NT_STATUS_NO_LOGON_SERVERS}, {
ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_LOGON_SESSION}, {
ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_PRIVILEGE}, {
- ERRDOS, ERRnoaccess, NT_STATUS_PRIVILEGE_NOT_HELD}, {
ERRHRD, ERRgeneral, NT_STATUS_INVALID_ACCOUNT_NAME}, {
ERRHRD, ERRgeneral, NT_STATUS_USER_EXISTS},
/* { This NT error code was 'sqashed'
@@ -775,10 +774,10 @@ cifs_print_status(__u32 status_code)
int idx = 0;
while (nt_errs[idx].nt_errstr != NULL) {
- if (((nt_errs[idx].nt_errcode) & 0xFFFFFF) ==
- (status_code & 0xFFFFFF)) {
+ if (nt_errs[idx].nt_errcode == status_code) {
pr_notice("Status code returned 0x%08x %s\n",
status_code, nt_errs[idx].nt_errstr);
+ return;
}
idx++;
}
@@ -871,6 +870,15 @@ map_smb_to_linux_error(char *buf, bool logErr)
}
/* else ERRHRD class errors or junk - return EIO */
+ /* special cases for NT status codes which cannot be translated to DOS codes */
+ if (smb->Flags2 & SMBFLG2_ERR_STATUS) {
+ __u32 err = le32_to_cpu(smb->Status.CifsError);
+ if (err == (NT_STATUS_NOT_A_REPARSE_POINT))
+ rc = -ENODATA;
+ else if (err == (NT_STATUS_PRIVILEGE_NOT_HELD))
+ rc = -EPERM;
+ }
+
cifs_dbg(FYI, "Mapping smb error code 0x%x to POSIX err %d\n",
le32_to_cpu(smb->Status.CifsError), rc);
diff --git a/fs/smb/client/nterr.c b/fs/smb/client/nterr.c
index 358a766375b4..8f0bc441295e 100644
--- a/fs/smb/client/nterr.c
+++ b/fs/smb/client/nterr.c
@@ -13,6 +13,13 @@
const struct nt_err_code_struct nt_errs[] = {
{"NT_STATUS_OK", NT_STATUS_OK},
+ {"NT_STATUS_MEDIA_CHANGED", NT_STATUS_MEDIA_CHANGED},
+ {"NT_STATUS_END_OF_MEDIA", NT_STATUS_END_OF_MEDIA},
+ {"NT_STATUS_MEDIA_CHECK", NT_STATUS_MEDIA_CHECK},
+ {"NT_STATUS_NO_DATA_DETECTED", NT_STATUS_NO_DATA_DETECTED},
+ {"NT_STATUS_STOPPED_ON_SYMLINK", NT_STATUS_STOPPED_ON_SYMLINK},
+ {"NT_STATUS_DEVICE_REQUIRES_CLEANING", NT_STATUS_DEVICE_REQUIRES_CLEANING},
+ {"NT_STATUS_DEVICE_DOOR_OPEN", NT_STATUS_DEVICE_DOOR_OPEN},
{"NT_STATUS_UNSUCCESSFUL", NT_STATUS_UNSUCCESSFUL},
{"NT_STATUS_NOT_IMPLEMENTED", NT_STATUS_NOT_IMPLEMENTED},
{"NT_STATUS_INVALID_INFO_CLASS", NT_STATUS_INVALID_INFO_CLASS},
@@ -667,8 +674,10 @@ const struct nt_err_code_struct nt_errs[] = {
{"NT_STATUS_QUOTA_LIST_INCONSISTENT",
NT_STATUS_QUOTA_LIST_INCONSISTENT},
{"NT_STATUS_FILE_IS_OFFLINE", NT_STATUS_FILE_IS_OFFLINE},
+ {"NT_STATUS_NOT_A_REPARSE_POINT", NT_STATUS_NOT_A_REPARSE_POINT},
{"NT_STATUS_NO_MORE_ENTRIES", NT_STATUS_NO_MORE_ENTRIES},
{"NT_STATUS_MORE_ENTRIES", NT_STATUS_MORE_ENTRIES},
{"NT_STATUS_SOME_UNMAPPED", NT_STATUS_SOME_UNMAPPED},
+ {"NT_STATUS_NO_SUCH_JOB", NT_STATUS_NO_SUCH_JOB},
{NULL, 0}
};
diff --git a/fs/smb/client/nterr.h b/fs/smb/client/nterr.h
index edd4741cab0a..180602c22355 100644
--- a/fs/smb/client/nterr.h
+++ b/fs/smb/client/nterr.h
@@ -546,6 +546,7 @@ extern const struct nt_err_code_struct nt_errs[];
#define NT_STATUS_TOO_MANY_LINKS 0xC0000000 | 0x0265
#define NT_STATUS_QUOTA_LIST_INCONSISTENT 0xC0000000 | 0x0266
#define NT_STATUS_FILE_IS_OFFLINE 0xC0000000 | 0x0267
+#define NT_STATUS_NOT_A_REPARSE_POINT 0xC0000000 | 0x0275
#define NT_STATUS_NO_SUCH_JOB 0xC0000000 | 0xEDE /* scheduler */
#endif /* _NTERR_H */
diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c
index 273358d20a46..50f96259d9ad 100644
--- a/fs/smb/client/readdir.c
+++ b/fs/smb/client/readdir.c
@@ -413,7 +413,7 @@ ffirst_retry:
cifsFile->invalidHandle = false;
} else if ((rc == -EOPNOTSUPP) &&
(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) {
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
+ cifs_autodisable_serverino(cifs_sb);
goto ffirst_retry;
}
error_exit:
diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c
index d88b41133e00..0a5a52a8a7dd 100644
--- a/fs/smb/client/reparse.c
+++ b/fs/smb/client/reparse.c
@@ -14,6 +14,20 @@
#include "fs_context.h"
#include "reparse.h"
+static int mknod_nfs(unsigned int xid, struct inode *inode,
+ struct dentry *dentry, struct cifs_tcon *tcon,
+ const char *full_path, umode_t mode, dev_t dev,
+ const char *symname);
+
+static int mknod_wsl(unsigned int xid, struct inode *inode,
+ struct dentry *dentry, struct cifs_tcon *tcon,
+ const char *full_path, umode_t mode, dev_t dev,
+ const char *symname);
+
+static int create_native_symlink(const unsigned int xid, struct inode *inode,
+ struct dentry *dentry, struct cifs_tcon *tcon,
+ const char *full_path, const char *symname);
+
static int detect_directory_symlink_target(struct cifs_sb_info *cifs_sb,
const unsigned int xid,
const char *full_path,
@@ -24,37 +38,148 @@ int smb2_create_reparse_symlink(const unsigned int xid, struct inode *inode,
struct dentry *dentry, struct cifs_tcon *tcon,
const char *full_path, const char *symname)
{
+ switch (get_cifs_symlink_type(CIFS_SB(inode->i_sb))) {
+ case CIFS_SYMLINK_TYPE_NATIVE:
+ return create_native_symlink(xid, inode, dentry, tcon, full_path, symname);
+ case CIFS_SYMLINK_TYPE_NFS:
+ return mknod_nfs(xid, inode, dentry, tcon, full_path, S_IFLNK, 0, symname);
+ case CIFS_SYMLINK_TYPE_WSL:
+ return mknod_wsl(xid, inode, dentry, tcon, full_path, S_IFLNK, 0, symname);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int create_native_symlink(const unsigned int xid, struct inode *inode,
+ struct dentry *dentry, struct cifs_tcon *tcon,
+ const char *full_path, const char *symname)
+{
struct reparse_symlink_data_buffer *buf = NULL;
- struct cifs_open_info_data data;
+ struct cifs_open_info_data data = {};
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct inode *new;
struct kvec iov;
- __le16 *path;
+ __le16 *path = NULL;
bool directory;
- char *sym, sep = CIFS_DIR_SEP(cifs_sb);
- u16 len, plen;
+ char *symlink_target = NULL;
+ char *sym = NULL;
+ char sep = CIFS_DIR_SEP(cifs_sb);
+ u16 len, plen, poff, slen;
int rc = 0;
if (strlen(symname) > REPARSE_SYM_PATH_MAX)
return -ENAMETOOLONG;
- sym = kstrdup(symname, GFP_KERNEL);
- if (!sym)
- return -ENOMEM;
+ symlink_target = kstrdup(symname, GFP_KERNEL);
+ if (!symlink_target) {
+ rc = -ENOMEM;
+ goto out;
+ }
data = (struct cifs_open_info_data) {
.reparse_point = true,
.reparse = { .tag = IO_REPARSE_TAG_SYMLINK, },
- .symlink_target = sym,
+ .symlink_target = symlink_target,
};
- convert_delimiter(sym, sep);
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) && symname[0] == '/') {
+ /*
+ * This is a request to create an absolute symlink on the server
+ * which does not support POSIX paths, and expects symlink in
+ * NT-style path. So convert absolute Linux symlink target path
+ * to the absolute NT-style path. Root of the NT-style path for
+ * symlinks is specified in "symlinkroot" mount option. This will
+ * ensure compatibility of this symlink stored in absolute form
+ * on the SMB server.
+ */
+ if (!strstarts(symname, cifs_sb->ctx->symlinkroot)) {
+ /*
+ * If the absolute Linux symlink target path is not
+ * inside "symlinkroot" location then there is no way
+ * to convert such Linux symlink to NT-style path.
+ */
+ cifs_dbg(VFS,
+ "absolute symlink '%s' cannot be converted to NT format "
+ "because it is outside of symlinkroot='%s'\n",
+ symname, cifs_sb->ctx->symlinkroot);
+ rc = -EINVAL;
+ goto out;
+ }
+ len = strlen(cifs_sb->ctx->symlinkroot);
+ if (cifs_sb->ctx->symlinkroot[len-1] != '/')
+ len++;
+ if (symname[len] >= 'a' && symname[len] <= 'z' &&
+ (symname[len+1] == '/' || symname[len+1] == '\0')) {
+ /*
+ * Symlink points to Linux target /symlinkroot/x/path/...
+ * where 'x' is the lowercase local Windows drive.
+ * NT-style path for 'x' has common form \??\X:\path\...
+ * with uppercase local Windows drive.
+ */
+ int common_path_len = strlen(symname+len+1)+1;
+ sym = kzalloc(6+common_path_len, GFP_KERNEL);
+ if (!sym) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ memcpy(sym, "\\??\\", 4);
+ sym[4] = symname[len] - ('a'-'A');
+ sym[5] = ':';
+ memcpy(sym+6, symname+len+1, common_path_len);
+ } else {
+ /* Unhandled absolute symlink. Report an error. */
+ cifs_dbg(
+ VFS,
+ "absolute symlink '%s' cannot be converted to NT format "
+ "because it points to unknown target\n",
+ symname);
+ rc = -EINVAL;
+ goto out;
+ }
+ } else {
+ /*
+ * This is request to either create an absolute symlink on
+ * server which expects POSIX paths or it is an request to
+ * create a relative symlink from the current directory.
+ * These paths have same format as relative SMB symlinks,
+ * so no conversion is needed. So just take symname as-is.
+ */
+ sym = kstrdup(symname, GFP_KERNEL);
+ if (!sym) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ }
+
+ if (sep == '\\')
+ convert_delimiter(sym, sep);
+
+ /*
+ * For absolute NT symlinks it is required to pass also leading
+ * backslash and to not mangle NT object prefix "\\??\\" and not to
+ * mangle colon in drive letter. But cifs_convert_path_to_utf16()
+ * removes leading backslash and replaces '?' and ':'. So temporary
+ * mask these characters in NT object prefix by '_' and then change
+ * them back.
+ */
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) && symname[0] == '/')
+ sym[0] = sym[1] = sym[2] = sym[5] = '_';
+
path = cifs_convert_path_to_utf16(sym, cifs_sb);
if (!path) {
rc = -ENOMEM;
goto out;
}
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) && symname[0] == '/') {
+ sym[0] = '\\';
+ sym[1] = sym[2] = '?';
+ sym[5] = ':';
+ path[0] = cpu_to_le16('\\');
+ path[1] = path[2] = cpu_to_le16('?');
+ path[5] = cpu_to_le16(':');
+ }
+
/*
* SMB distinguish between symlink to directory and symlink to file.
* They cannot be exchanged (symlink of file type which points to
@@ -67,8 +192,18 @@ int smb2_create_reparse_symlink(const unsigned int xid, struct inode *inode,
if (rc < 0)
goto out;
- plen = 2 * UniStrnlen((wchar_t *)path, REPARSE_SYM_PATH_MAX);
- len = sizeof(*buf) + plen * 2;
+ slen = 2 * UniStrnlen((wchar_t *)path, REPARSE_SYM_PATH_MAX);
+ poff = 0;
+ plen = slen;
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) && symname[0] == '/') {
+ /*
+ * For absolute NT symlinks skip leading "\\??\\" in PrintName as
+ * PrintName is user visible location in DOS/Win32 format (not in NT format).
+ */
+ poff = 4;
+ plen -= 2 * poff;
+ }
+ len = sizeof(*buf) + plen + slen;
buf = kzalloc(len, GFP_KERNEL);
if (!buf) {
rc = -ENOMEM;
@@ -77,17 +212,17 @@ int smb2_create_reparse_symlink(const unsigned int xid, struct inode *inode,
buf->ReparseTag = cpu_to_le32(IO_REPARSE_TAG_SYMLINK);
buf->ReparseDataLength = cpu_to_le16(len - sizeof(struct reparse_data_buffer));
+
buf->SubstituteNameOffset = cpu_to_le16(plen);
- buf->SubstituteNameLength = cpu_to_le16(plen);
- memcpy(&buf->PathBuffer[plen], path, plen);
+ buf->SubstituteNameLength = cpu_to_le16(slen);
+ memcpy(&buf->PathBuffer[plen], path, slen);
+
buf->PrintNameOffset = 0;
buf->PrintNameLength = cpu_to_le16(plen);
- memcpy(buf->PathBuffer, path, plen);
+ memcpy(buf->PathBuffer, path+poff, plen);
+
buf->Flags = cpu_to_le32(*symname != '/' ? SYMLINK_FLAG_RELATIVE : 0);
- if (*sym != sep)
- buf->Flags = cpu_to_le32(SYMLINK_FLAG_RELATIVE);
- convert_delimiter(sym, '/');
iov.iov_base = buf;
iov.iov_len = len;
new = smb2_get_reparse_inode(&data, inode->i_sb, xid,
@@ -98,6 +233,7 @@ int smb2_create_reparse_symlink(const unsigned int xid, struct inode *inode,
else
rc = PTR_ERR(new);
out:
+ kfree(sym);
kfree(path);
cifs_free_open_info(&data);
kfree(buf);
@@ -242,8 +378,39 @@ static int detect_directory_symlink_target(struct cifs_sb_info *cifs_sb,
return 0;
}
-static int nfs_set_reparse_buf(struct reparse_posix_data *buf,
+static int create_native_socket(const unsigned int xid, struct inode *inode,
+ struct dentry *dentry, struct cifs_tcon *tcon,
+ const char *full_path)
+{
+ struct reparse_data_buffer buf = {
+ .ReparseTag = cpu_to_le32(IO_REPARSE_TAG_AF_UNIX),
+ .ReparseDataLength = cpu_to_le16(0),
+ };
+ struct cifs_open_info_data data = {
+ .reparse_point = true,
+ .reparse = { .tag = IO_REPARSE_TAG_AF_UNIX, .buf = &buf, },
+ };
+ struct kvec iov = {
+ .iov_base = &buf,
+ .iov_len = sizeof(buf),
+ };
+ struct inode *new;
+ int rc = 0;
+
+ new = smb2_get_reparse_inode(&data, inode->i_sb, xid,
+ tcon, full_path, false, &iov, NULL);
+ if (!IS_ERR(new))
+ d_instantiate(dentry, new);
+ else
+ rc = PTR_ERR(new);
+ cifs_free_open_info(&data);
+ return rc;
+}
+
+static int nfs_set_reparse_buf(struct reparse_nfs_data_buffer *buf,
mode_t mode, dev_t dev,
+ __le16 *symname_utf16,
+ int symname_utf16_len,
struct kvec *iov)
{
u64 type;
@@ -254,7 +421,13 @@ static int nfs_set_reparse_buf(struct reparse_posix_data *buf,
switch ((type = reparse_mode_nfs_type(mode))) {
case NFS_SPECFILE_BLK:
case NFS_SPECFILE_CHR:
- dlen = sizeof(__le64);
+ dlen = 2 * sizeof(__le32);
+ ((__le32 *)buf->DataBuffer)[0] = cpu_to_le32(MAJOR(dev));
+ ((__le32 *)buf->DataBuffer)[1] = cpu_to_le32(MINOR(dev));
+ break;
+ case NFS_SPECFILE_LNK:
+ dlen = symname_utf16_len;
+ memcpy(buf->DataBuffer, symname_utf16, symname_utf16_len);
break;
case NFS_SPECFILE_FIFO:
case NFS_SPECFILE_SOCK:
@@ -269,8 +442,6 @@ static int nfs_set_reparse_buf(struct reparse_posix_data *buf,
buf->InodeType = cpu_to_le64(type);
buf->ReparseDataLength = cpu_to_le16(len + dlen -
sizeof(struct reparse_data_buffer));
- *(__le64 *)buf->DataBuffer = cpu_to_le64(((u64)MINOR(dev) << 32) |
- MAJOR(dev));
iov->iov_base = buf;
iov->iov_len = len + dlen;
return 0;
@@ -278,23 +449,45 @@ static int nfs_set_reparse_buf(struct reparse_posix_data *buf,
static int mknod_nfs(unsigned int xid, struct inode *inode,
struct dentry *dentry, struct cifs_tcon *tcon,
- const char *full_path, umode_t mode, dev_t dev)
+ const char *full_path, umode_t mode, dev_t dev,
+ const char *symname)
{
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifs_open_info_data data;
- struct reparse_posix_data *p;
+ struct reparse_nfs_data_buffer *p = NULL;
+ __le16 *symname_utf16 = NULL;
+ int symname_utf16_len = 0;
struct inode *new;
struct kvec iov;
__u8 buf[sizeof(*p) + sizeof(__le64)];
int rc;
- p = (struct reparse_posix_data *)buf;
- rc = nfs_set_reparse_buf(p, mode, dev, &iov);
+ if (S_ISLNK(mode)) {
+ symname_utf16 = cifs_strndup_to_utf16(symname, strlen(symname),
+ &symname_utf16_len,
+ cifs_sb->local_nls,
+ NO_MAP_UNI_RSVD);
+ if (!symname_utf16) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ symname_utf16_len -= 2; /* symlink is without trailing wide-nul */
+ p = kzalloc(sizeof(*p) + symname_utf16_len, GFP_KERNEL);
+ if (!p) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ } else {
+ p = (struct reparse_nfs_data_buffer *)buf;
+ }
+ rc = nfs_set_reparse_buf(p, mode, dev, symname_utf16, symname_utf16_len, &iov);
if (rc)
- return rc;
+ goto out;
data = (struct cifs_open_info_data) {
.reparse_point = true,
- .reparse = { .tag = IO_REPARSE_TAG_NFS, .posix = p, },
+ .reparse = { .tag = IO_REPARSE_TAG_NFS, .buf = (struct reparse_data_buffer *)p, },
+ .symlink_target = kstrdup(symname, GFP_KERNEL),
};
new = smb2_get_reparse_inode(&data, inode->i_sb, xid,
@@ -304,12 +497,25 @@ static int mknod_nfs(unsigned int xid, struct inode *inode,
else
rc = PTR_ERR(new);
cifs_free_open_info(&data);
+out:
+ if (S_ISLNK(mode)) {
+ kfree(symname_utf16);
+ kfree(p);
+ }
return rc;
}
-static int wsl_set_reparse_buf(struct reparse_data_buffer *buf,
- mode_t mode, struct kvec *iov)
+static int wsl_set_reparse_buf(struct reparse_data_buffer **buf,
+ mode_t mode, const char *symname,
+ struct cifs_sb_info *cifs_sb,
+ struct kvec *iov)
{
+ struct reparse_wsl_symlink_data_buffer *symlink_buf;
+ __le16 *symname_utf16;
+ int symname_utf16_len;
+ int symname_utf8_maxlen;
+ int symname_utf8_len;
+ size_t buf_len;
u32 tag;
switch ((tag = reparse_mode_wsl_tag(mode))) {
@@ -317,16 +523,45 @@ static int wsl_set_reparse_buf(struct reparse_data_buffer *buf,
case IO_REPARSE_TAG_LX_CHR:
case IO_REPARSE_TAG_LX_FIFO:
case IO_REPARSE_TAG_AF_UNIX:
+ buf_len = sizeof(struct reparse_data_buffer);
+ *buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+ break;
+ case IO_REPARSE_TAG_LX_SYMLINK:
+ symname_utf16 = cifs_strndup_to_utf16(symname, strlen(symname),
+ &symname_utf16_len,
+ cifs_sb->local_nls,
+ NO_MAP_UNI_RSVD);
+ if (!symname_utf16)
+ return -ENOMEM;
+ symname_utf8_maxlen = symname_utf16_len/2*3;
+ symlink_buf = kzalloc(sizeof(struct reparse_wsl_symlink_data_buffer) +
+ symname_utf8_maxlen, GFP_KERNEL);
+ if (!symlink_buf) {
+ kfree(symname_utf16);
+ return -ENOMEM;
+ }
+ /* Flag 0x02000000 is unknown, but all wsl symlinks have this value */
+ symlink_buf->Flags = cpu_to_le32(0x02000000);
+ /* PathBuffer is in UTF-8 but without trailing null-term byte */
+ symname_utf8_len = utf16s_to_utf8s((wchar_t *)symname_utf16, symname_utf16_len/2,
+ UTF16_LITTLE_ENDIAN,
+ symlink_buf->PathBuffer,
+ symname_utf8_maxlen);
+ *buf = (struct reparse_data_buffer *)symlink_buf;
+ buf_len = sizeof(struct reparse_wsl_symlink_data_buffer) + symname_utf8_len;
+ kfree(symname_utf16);
break;
default:
return -EOPNOTSUPP;
}
- buf->ReparseTag = cpu_to_le32(tag);
- buf->Reserved = 0;
- buf->ReparseDataLength = 0;
- iov->iov_base = buf;
- iov->iov_len = sizeof(*buf);
+ (*buf)->ReparseTag = cpu_to_le32(tag);
+ (*buf)->Reserved = 0;
+ (*buf)->ReparseDataLength = cpu_to_le16(buf_len - sizeof(struct reparse_data_buffer));
+ iov->iov_base = *buf;
+ iov->iov_len = buf_len;
return 0;
}
@@ -415,27 +650,32 @@ static int wsl_set_xattrs(struct inode *inode, umode_t _mode,
static int mknod_wsl(unsigned int xid, struct inode *inode,
struct dentry *dentry, struct cifs_tcon *tcon,
- const char *full_path, umode_t mode, dev_t dev)
+ const char *full_path, umode_t mode, dev_t dev,
+ const char *symname)
{
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifs_open_info_data data;
- struct reparse_data_buffer buf;
+ struct reparse_data_buffer *buf;
struct smb2_create_ea_ctx *cc;
struct inode *new;
unsigned int len;
struct kvec reparse_iov, xattr_iov;
int rc;
- rc = wsl_set_reparse_buf(&buf, mode, &reparse_iov);
+ rc = wsl_set_reparse_buf(&buf, mode, symname, cifs_sb, &reparse_iov);
if (rc)
return rc;
rc = wsl_set_xattrs(inode, mode, dev, &xattr_iov);
- if (rc)
+ if (rc) {
+ kfree(buf);
return rc;
+ }
data = (struct cifs_open_info_data) {
.reparse_point = true,
- .reparse = { .tag = le32_to_cpu(buf.ReparseTag), .buf = &buf, },
+ .reparse = { .tag = le32_to_cpu(buf->ReparseTag), .buf = buf, },
+ .symlink_target = kstrdup(symname, GFP_KERNEL),
};
cc = xattr_iov.iov_base;
@@ -452,6 +692,7 @@ static int mknod_wsl(unsigned int xid, struct inode *inode,
rc = PTR_ERR(new);
cifs_free_open_info(&data);
kfree(xattr_iov.iov_base);
+ kfree(buf);
return rc;
}
@@ -460,21 +701,22 @@ int smb2_mknod_reparse(unsigned int xid, struct inode *inode,
const char *full_path, umode_t mode, dev_t dev)
{
struct smb3_fs_context *ctx = CIFS_SB(inode->i_sb)->ctx;
- int rc = -EOPNOTSUPP;
+
+ if (S_ISSOCK(mode) && !ctx->nonativesocket && ctx->reparse_type != CIFS_REPARSE_TYPE_NONE)
+ return create_native_socket(xid, inode, dentry, tcon, full_path);
switch (ctx->reparse_type) {
case CIFS_REPARSE_TYPE_NFS:
- rc = mknod_nfs(xid, inode, dentry, tcon, full_path, mode, dev);
- break;
+ return mknod_nfs(xid, inode, dentry, tcon, full_path, mode, dev, NULL);
case CIFS_REPARSE_TYPE_WSL:
- rc = mknod_wsl(xid, inode, dentry, tcon, full_path, mode, dev);
- break;
+ return mknod_wsl(xid, inode, dentry, tcon, full_path, mode, dev, NULL);
+ default:
+ return -EOPNOTSUPP;
}
- return rc;
}
/* See MS-FSCC 2.1.2.6 for the 'NFS' style reparse tags */
-static int parse_reparse_posix(struct reparse_posix_data *buf,
+static int parse_reparse_nfs(struct reparse_nfs_data_buffer *buf,
struct cifs_sb_info *cifs_sb,
struct cifs_open_info_data *data)
{
@@ -536,43 +778,160 @@ static int parse_reparse_posix(struct reparse_posix_data *buf,
}
int smb2_parse_native_symlink(char **target, const char *buf, unsigned int len,
- bool unicode, bool relative,
+ bool relative,
const char *full_path,
struct cifs_sb_info *cifs_sb)
{
char sep = CIFS_DIR_SEP(cifs_sb);
char *linux_target = NULL;
char *smb_target = NULL;
+ int symlinkroot_len;
+ int abs_path_len;
+ char *abs_path;
int levels;
int rc;
int i;
- /* Check that length it valid for unicode/non-unicode mode */
- if (!len || (unicode && (len % 2))) {
+ /* Check that length it valid */
+ if (!len || (len % 2)) {
cifs_dbg(VFS, "srv returned malformed symlink buffer\n");
rc = -EIO;
goto out;
}
/*
- * Check that buffer does not contain UTF-16 null codepoint in unicode
- * mode or null byte in non-unicode mode because Linux cannot process
- * symlink with null byte.
+ * Check that buffer does not contain UTF-16 null codepoint
+ * because Linux cannot process symlink with null byte.
*/
- if ((unicode && UniStrnlen((wchar_t *)buf, len/2) != len/2) ||
- (!unicode && strnlen(buf, len) != len)) {
+ if (UniStrnlen((wchar_t *)buf, len/2) != len/2) {
cifs_dbg(VFS, "srv returned null byte in native symlink target location\n");
rc = -EIO;
goto out;
}
- smb_target = cifs_strndup_from_utf16(buf, len, unicode, cifs_sb->local_nls);
+ smb_target = cifs_strndup_from_utf16(buf, len, true, cifs_sb->local_nls);
if (!smb_target) {
rc = -ENOMEM;
goto out;
}
- if (smb_target[0] == sep && relative) {
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) && !relative) {
+ /*
+ * This is an absolute symlink from the server which does not
+ * support POSIX paths, so the symlink is in NT-style path.
+ * So convert it to absolute Linux symlink target path. Root of
+ * the NT-style path for symlinks is specified in "symlinkroot"
+ * mount option.
+ *
+ * Root of the DOS and Win32 paths is at NT path \??\
+ * It means that DOS/Win32 path C:\folder\file.txt is
+ * NT path \??\C:\folder\file.txt
+ *
+ * NT systems have some well-known object symlinks in their NT
+ * hierarchy, which is needed to take into account when resolving
+ * other symlinks. Most commonly used symlink paths are:
+ * \?? -> \GLOBAL??
+ * \DosDevices -> \??
+ * \GLOBAL??\GLOBALROOT -> \
+ * \GLOBAL??\Global -> \GLOBAL??
+ * \GLOBAL??\NUL -> \Device\Null
+ * \GLOBAL??\UNC -> \Device\Mup
+ * \GLOBAL??\PhysicalDrive0 -> \Device\Harddisk0\DR0 (for each harddisk)
+ * \GLOBAL??\A: -> \Device\Floppy0 (if A: is the first floppy)
+ * \GLOBAL??\C: -> \Device\HarddiskVolume1 (if C: is the first harddisk)
+ * \GLOBAL??\D: -> \Device\CdRom0 (if D: is first cdrom)
+ * \SystemRoot -> \Device\Harddisk0\Partition1\WINDOWS (or where is NT system installed)
+ * \Volume{...} -> \Device\HarddiskVolume1 (where ... is system generated guid)
+ *
+ * In most common cases, absolute NT symlinks points to path on
+ * DOS/Win32 drive letter, system-specific Volume or on UNC share.
+ * Here are few examples of commonly used absolute NT symlinks
+ * created by mklink.exe tool:
+ * \??\C:\folder\file.txt
+ * \??\\C:\folder\file.txt
+ * \??\UNC\server\share\file.txt
+ * \??\\UNC\server\share\file.txt
+ * \??\Volume{b75e2c83-0000-0000-0000-602f00000000}\folder\file.txt
+ *
+ * It means that the most common path prefix \??\ is also NT path
+ * symlink (to \GLOBAL??). It is less common that second path
+ * separator is double backslash, but it is valid.
+ *
+ * Volume guid is randomly generated by the target system and so
+ * only the target system knows the mapping between guid and the
+ * hardisk number. Over SMB it is not possible to resolve this
+ * mapping, therefore symlinks pointing to target location of
+ * volume guids are totally unusable over SMB.
+ *
+ * For now parse only symlink paths available for DOS and Win32.
+ * Those are paths with \??\ prefix or paths which points to \??\
+ * via other NT symlink (\DosDevices\, \GLOBAL??\, ...).
+ */
+ abs_path = smb_target;
+globalroot:
+ if (strstarts(abs_path, "\\??\\"))
+ abs_path += sizeof("\\??\\")-1;
+ else if (strstarts(abs_path, "\\DosDevices\\"))
+ abs_path += sizeof("\\DosDevices\\")-1;
+ else if (strstarts(abs_path, "\\GLOBAL??\\"))
+ abs_path += sizeof("\\GLOBAL??\\")-1;
+ else {
+ /* Unhandled absolute symlink, points outside of DOS/Win32 */
+ cifs_dbg(VFS,
+ "absolute symlink '%s' cannot be converted from NT format "
+ "because points to unknown target\n",
+ smb_target);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* Sometimes path separator after \?? is double backslash */
+ if (abs_path[0] == '\\')
+ abs_path++;
+
+ while (strstarts(abs_path, "Global\\"))
+ abs_path += sizeof("Global\\")-1;
+
+ if (strstarts(abs_path, "GLOBALROOT\\")) {
+ /* Label globalroot requires path with leading '\\', so do not trim '\\' */
+ abs_path += sizeof("GLOBALROOT")-1;
+ goto globalroot;
+ }
+
+ /* For now parse only paths to drive letters */
+ if (((abs_path[0] >= 'A' && abs_path[0] <= 'Z') ||
+ (abs_path[0] >= 'a' && abs_path[0] <= 'z')) &&
+ abs_path[1] == ':' &&
+ (abs_path[2] == '\\' || abs_path[2] == '\0')) {
+ /* Convert drive letter to lowercase and drop colon */
+ char drive_letter = abs_path[0];
+ if (drive_letter >= 'A' && drive_letter <= 'Z')
+ drive_letter += 'a'-'A';
+ abs_path++;
+ abs_path[0] = drive_letter;
+ } else {
+ /* Unhandled absolute symlink. Report an error. */
+ cifs_dbg(VFS,
+ "absolute symlink '%s' cannot be converted from NT format "
+ "because points to unknown target\n",
+ smb_target);
+ rc = -EIO;
+ goto out;
+ }
+
+ abs_path_len = strlen(abs_path)+1;
+ symlinkroot_len = strlen(cifs_sb->ctx->symlinkroot);
+ if (cifs_sb->ctx->symlinkroot[symlinkroot_len-1] == '/')
+ symlinkroot_len--;
+ linux_target = kmalloc(symlinkroot_len + 1 + abs_path_len, GFP_KERNEL);
+ if (!linux_target) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ memcpy(linux_target, cifs_sb->ctx->symlinkroot, symlinkroot_len);
+ linux_target[symlinkroot_len] = '/';
+ memcpy(linux_target + symlinkroot_len + 1, abs_path, abs_path_len);
+ } else if (smb_target[0] == sep && relative) {
/*
* This is a relative SMB symlink from the top of the share,
* which is the top level directory of the Linux mount point.
@@ -601,6 +960,12 @@ int smb2_parse_native_symlink(char **target, const char *buf, unsigned int len,
}
memcpy(linux_target + levels*3, smb_target+1, smb_target_len); /* +1 to skip leading sep */
} else {
+ /*
+ * This is either an absolute symlink in POSIX-style format
+ * or relative SMB symlink from the current directory.
+ * These paths have same format as Linux symlinks, so no
+ * conversion is needed.
+ */
linux_target = smb_target;
smb_target = NULL;
}
@@ -620,8 +985,8 @@ out:
return rc;
}
-static int parse_reparse_symlink(struct reparse_symlink_data_buffer *sym,
- u32 plen, bool unicode,
+static int parse_reparse_native_symlink(struct reparse_symlink_data_buffer *sym,
+ u32 plen,
struct cifs_sb_info *cifs_sb,
const char *full_path,
struct cifs_open_info_data *data)
@@ -641,7 +1006,6 @@ static int parse_reparse_symlink(struct reparse_symlink_data_buffer *sym,
return smb2_parse_native_symlink(&data->symlink_target,
sym->PathBuffer + offs,
len,
- unicode,
le32_to_cpu(sym->Flags) & SYMLINK_FLAG_RELATIVE,
full_path,
cifs_sb);
@@ -696,7 +1060,7 @@ static int parse_reparse_wsl_symlink(struct reparse_wsl_symlink_data_buffer *buf
int parse_reparse_point(struct reparse_data_buffer *buf,
u32 plen, struct cifs_sb_info *cifs_sb,
const char *full_path,
- bool unicode, struct cifs_open_info_data *data)
+ struct cifs_open_info_data *data)
{
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
@@ -705,12 +1069,12 @@ int parse_reparse_point(struct reparse_data_buffer *buf,
/* See MS-FSCC 2.1.2 */
switch (le32_to_cpu(buf->ReparseTag)) {
case IO_REPARSE_TAG_NFS:
- return parse_reparse_posix((struct reparse_posix_data *)buf,
+ return parse_reparse_nfs((struct reparse_nfs_data_buffer *)buf,
cifs_sb, data);
case IO_REPARSE_TAG_SYMLINK:
- return parse_reparse_symlink(
+ return parse_reparse_native_symlink(
(struct reparse_symlink_data_buffer *)buf,
- plen, unicode, cifs_sb, full_path, data);
+ plen, cifs_sb, full_path, data);
case IO_REPARSE_TAG_LX_SYMLINK:
return parse_reparse_wsl_symlink(
(struct reparse_wsl_symlink_data_buffer *)buf,
@@ -744,14 +1108,15 @@ int smb2_parse_reparse_point(struct cifs_sb_info *cifs_sb,
buf = (struct reparse_data_buffer *)((u8 *)io +
le32_to_cpu(io->OutputOffset));
- return parse_reparse_point(buf, plen, cifs_sb, full_path, true, data);
+ return parse_reparse_point(buf, plen, cifs_sb, full_path, data);
}
-static void wsl_to_fattr(struct cifs_open_info_data *data,
+static bool wsl_to_fattr(struct cifs_open_info_data *data,
struct cifs_sb_info *cifs_sb,
u32 tag, struct cifs_fattr *fattr)
{
struct smb2_file_full_ea_info *ea;
+ bool have_xattr_dev = false;
u32 next = 0;
switch (tag) {
@@ -794,21 +1159,31 @@ static void wsl_to_fattr(struct cifs_open_info_data *data,
fattr->cf_uid = wsl_make_kuid(cifs_sb, v);
else if (!strncmp(name, SMB2_WSL_XATTR_GID, nlen))
fattr->cf_gid = wsl_make_kgid(cifs_sb, v);
- else if (!strncmp(name, SMB2_WSL_XATTR_MODE, nlen))
+ else if (!strncmp(name, SMB2_WSL_XATTR_MODE, nlen)) {
+ /* File type in reparse point tag and in xattr mode must match. */
+ if (S_DT(fattr->cf_mode) != S_DT(le32_to_cpu(*(__le32 *)v)))
+ return false;
fattr->cf_mode = (umode_t)le32_to_cpu(*(__le32 *)v);
- else if (!strncmp(name, SMB2_WSL_XATTR_DEV, nlen))
+ } else if (!strncmp(name, SMB2_WSL_XATTR_DEV, nlen)) {
fattr->cf_rdev = reparse_mkdev(v);
+ have_xattr_dev = true;
+ }
} while (next);
out:
+
+ /* Major and minor numbers for char and block devices are mandatory. */
+ if (!have_xattr_dev && (tag == IO_REPARSE_TAG_LX_CHR || tag == IO_REPARSE_TAG_LX_BLK))
+ return false;
+
fattr->cf_dtype = S_DT(fattr->cf_mode);
+ return true;
}
static bool posix_reparse_to_fattr(struct cifs_sb_info *cifs_sb,
struct cifs_fattr *fattr,
struct cifs_open_info_data *data)
{
- struct reparse_posix_data *buf = data->reparse.posix;
-
+ struct reparse_nfs_data_buffer *buf = (struct reparse_nfs_data_buffer *)data->reparse.buf;
if (buf == NULL)
return true;
@@ -874,7 +1249,9 @@ bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb,
case IO_REPARSE_TAG_AF_UNIX:
case IO_REPARSE_TAG_LX_CHR:
case IO_REPARSE_TAG_LX_BLK:
- wsl_to_fattr(data, cifs_sb, tag, fattr);
+ ok = wsl_to_fattr(data, cifs_sb, tag, fattr);
+ if (!ok)
+ return false;
break;
case IO_REPARSE_TAG_NFS:
ok = posix_reparse_to_fattr(cifs_sb, fattr, data);
diff --git a/fs/smb/client/reparse.h b/fs/smb/client/reparse.h
index ff05b0e75c92..5a753fec7e2c 100644
--- a/fs/smb/client/reparse.h
+++ b/fs/smb/client/reparse.h
@@ -50,6 +50,7 @@ static inline kgid_t wsl_make_kgid(struct cifs_sb_info *cifs_sb,
static inline u64 reparse_mode_nfs_type(mode_t mode)
{
switch (mode & S_IFMT) {
+ case S_IFLNK: return NFS_SPECFILE_LNK;
case S_IFBLK: return NFS_SPECFILE_BLK;
case S_IFCHR: return NFS_SPECFILE_CHR;
case S_IFIFO: return NFS_SPECFILE_FIFO;
@@ -61,6 +62,7 @@ static inline u64 reparse_mode_nfs_type(mode_t mode)
static inline u32 reparse_mode_wsl_tag(mode_t mode)
{
switch (mode & S_IFMT) {
+ case S_IFLNK: return IO_REPARSE_TAG_LX_SYMLINK;
case S_IFBLK: return IO_REPARSE_TAG_LX_BLK;
case S_IFCHR: return IO_REPARSE_TAG_LX_CHR;
case S_IFIFO: return IO_REPARSE_TAG_LX_FIFO;
diff --git a/fs/smb/client/rfc1002pdu.h b/fs/smb/client/rfc1002pdu.h
index ae1d025da294..ac82c2f3a4a2 100644
--- a/fs/smb/client/rfc1002pdu.h
+++ b/fs/smb/client/rfc1002pdu.h
@@ -24,7 +24,7 @@
struct rfc1002_session_packet {
__u8 type;
__u8 flags;
- __u16 length;
+ __be16 length;
union {
struct {
__u8 called_len;
@@ -35,8 +35,8 @@ struct rfc1002_session_packet {
__u8 scope2; /* null */
} __attribute__((packed)) session_req;
struct {
- __u32 retarget_ip_addr;
- __u16 port;
+ __be32 retarget_ip_addr;
+ __be16 port;
} __attribute__((packed)) retarget_resp;
__u8 neg_ses_resp_error_code;
/* POSITIVE_SESSION_RESPONSE packet does not include trailer.
diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
index 91d4d409cb1d..faa80e7d54a6 100644
--- a/fs/smb/client/sess.c
+++ b/fs/smb/client/sess.c
@@ -1235,12 +1235,13 @@ cifs_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
switch (requested) {
case Kerberos:
case RawNTLMSSP:
+ case IAKerb:
return requested;
case Unspecified:
if (server->sec_ntlmssp &&
(global_secflags & CIFSSEC_MAY_NTLMSSP))
return RawNTLMSSP;
- if ((server->sec_kerberos || server->sec_mskerberos) &&
+ if ((server->sec_kerberos || server->sec_mskerberos || server->sec_iakerb) &&
(global_secflags & CIFSSEC_MAY_KRB5))
return Kerberos;
fallthrough;
diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c
index db3695eddcf9..d6e2fb669c40 100644
--- a/fs/smb/client/smb1ops.c
+++ b/fs/smb/client/smb1ops.c
@@ -377,7 +377,7 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
static void
cifs_downgrade_oplock(struct TCP_Server_Info *server,
struct cifsInodeInfo *cinode, __u32 oplock,
- unsigned int epoch, bool *purge_cache)
+ __u16 epoch, bool *purge_cache)
{
cifs_set_oplock_level(cinode, oplock);
}
@@ -551,7 +551,7 @@ static int cifs_query_path_info(const unsigned int xid,
int rc;
FILE_ALL_INFO fi = {};
- data->symlink = false;
+ data->reparse_point = false;
data->adjust_tz = false;
/* could do find first instead but this returns more info */
@@ -569,32 +569,8 @@ static int cifs_query_path_info(const unsigned int xid,
}
if (!rc) {
- int tmprc;
- int oplock = 0;
- struct cifs_fid fid;
- struct cifs_open_parms oparms;
-
move_cifs_info_to_smb2(&data->fi, &fi);
-
- if (!(le32_to_cpu(fi.Attributes) & ATTR_REPARSE))
- return 0;
-
- oparms = (struct cifs_open_parms) {
- .tcon = tcon,
- .cifs_sb = cifs_sb,
- .desired_access = FILE_READ_ATTRIBUTES,
- .create_options = cifs_create_options(cifs_sb, 0),
- .disposition = FILE_OPEN,
- .path = full_path,
- .fid = &fid,
- };
-
- /* Need to check if this is a symbolic link or not */
- tmprc = CIFS_open(xid, &oparms, &oplock, NULL);
- if (tmprc == -EOPNOTSUPP)
- data->symlink = true;
- else if (tmprc == 0)
- CIFSSMBClose(xid, tcon, fid.netfid);
+ data->reparse_point = le32_to_cpu(fi.Attributes) & ATTR_REPARSE;
}
return rc;
@@ -614,7 +590,13 @@ static int cifs_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
* There may be higher info levels that work but are there Windows
* server or network appliances for which IndexNumber field is not
* guaranteed unique?
+ *
+ * CIFSGetSrvInodeNumber() uses SMB_QUERY_FILE_INTERNAL_INFO
+ * which is SMB PASSTHROUGH level therefore check for capability.
+ * Note that this function can be called with tcon == NULL.
*/
+ if (tcon && !(tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU))
+ return -EOPNOTSUPP;
return CIFSGetSrvInodeNumber(xid, tcon, full_path, uniqueid,
cifs_sb->local_nls,
cifs_remap(cifs_sb));
@@ -1004,7 +986,7 @@ static int cifs_parse_reparse_point(struct cifs_sb_info *cifs_sb,
buf = (struct reparse_data_buffer *)((__u8 *)&io->hdr.Protocol +
le32_to_cpu(io->DataOffset));
- return parse_reparse_point(buf, plen, cifs_sb, full_path, true, data);
+ return parse_reparse_point(buf, plen, cifs_sb, full_path, data);
}
static bool
diff --git a/fs/smb/client/smb2file.c b/fs/smb/client/smb2file.c
index e836bc2193dd..d609a20fb98a 100644
--- a/fs/smb/client/smb2file.c
+++ b/fs/smb/client/smb2file.c
@@ -42,14 +42,14 @@ static struct smb2_symlink_err_rsp *symlink_data(const struct kvec *iov)
end = (struct smb2_error_context_rsp *)((u8 *)err + iov->iov_len);
do {
if (le32_to_cpu(p->ErrorId) == SMB2_ERROR_ID_DEFAULT) {
- sym = (struct smb2_symlink_err_rsp *)&p->ErrorContextData;
+ sym = (struct smb2_symlink_err_rsp *)p->ErrorContextData;
break;
}
cifs_dbg(FYI, "%s: skipping unhandled error context: 0x%x\n",
__func__, le32_to_cpu(p->ErrorId));
len = ALIGN(le32_to_cpu(p->ErrorDataLength), 8);
- p = (struct smb2_error_context_rsp *)((u8 *)&p->ErrorContextData + len);
+ p = (struct smb2_error_context_rsp *)(p->ErrorContextData + len);
} while (p < end);
} else if (le32_to_cpu(err->ByteCount) >= sizeof(*sym) &&
iov->iov_len >= SMB2_SYMLINK_STRUCT_SIZE) {
@@ -63,6 +63,52 @@ static struct smb2_symlink_err_rsp *symlink_data(const struct kvec *iov)
return sym;
}
+int smb2_fix_symlink_target_type(char **target, bool directory, struct cifs_sb_info *cifs_sb)
+{
+ char *buf;
+ int len;
+
+ /*
+ * POSIX server does not distinguish between symlinks to file and
+ * symlink directory. So nothing is needed to fix on the client side.
+ */
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
+ return 0;
+
+ if (!*target)
+ return -EIO;
+
+ len = strlen(*target);
+ if (!len)
+ return -EIO;
+
+ /*
+ * If this is directory symlink and it does not have trailing slash then
+ * append it. Trailing slash simulates Windows/SMB behavior which do not
+ * allow resolving directory symlink to file.
+ */
+ if (directory && (*target)[len-1] != '/') {
+ buf = krealloc(*target, len+2, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ buf[len] = '/';
+ buf[len+1] = '\0';
+ *target = buf;
+ len++;
+ }
+
+ /*
+ * If this is a file (non-directory) symlink and it points to path name
+ * with trailing slash then this is an invalid symlink because file name
+ * cannot contain slash character. File name with slash is invalid on
+ * both Windows and Linux systems. So return an error for such symlink.
+ */
+ if (!directory && (*target)[len-1] == '/')
+ return -EIO;
+
+ return 0;
+}
+
int smb2_parse_symlink_response(struct cifs_sb_info *cifs_sb, const struct kvec *iov,
const char *full_path, char **path)
{
@@ -89,7 +135,6 @@ int smb2_parse_symlink_response(struct cifs_sb_info *cifs_sb, const struct kvec
return smb2_parse_native_symlink(path,
(char *)sym->PathBuffer + sub_offs,
sub_len,
- true,
le32_to_cpu(sym->Flags) & SYMLINK_FLAG_RELATIVE,
full_path,
cifs_sb);
@@ -133,6 +178,11 @@ int smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32
NULL, NULL, NULL);
oparms->create_options &= ~OPEN_REPARSE_POINT;
}
+ if (!rc) {
+ bool directory = le32_to_cpu(data->fi.Attributes) & ATTR_DIRECTORY;
+ rc = smb2_fix_symlink_target_type(&data->symlink_target,
+ directory, oparms->cifs_sb);
+ }
}
}
diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
index a55f0044d30b..5dfb30b0a852 100644
--- a/fs/smb/client/smb2inode.c
+++ b/fs/smb/client/smb2inode.c
@@ -176,27 +176,27 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
struct kvec *out_iov, int *out_buftype, struct dentry *dentry)
{
- struct reparse_data_buffer *rbuf;
+ struct smb2_query_info_rsp *qi_rsp = NULL;
struct smb2_compound_vars *vars = NULL;
- struct kvec *rsp_iov, *iov;
- struct smb_rqst *rqst;
- int rc;
- __le16 *utf16_path = NULL;
__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
- struct cifs_fid fid;
+ struct cifs_open_info_data *idata;
struct cifs_ses *ses = tcon->ses;
+ struct reparse_data_buffer *rbuf;
struct TCP_Server_Info *server;
- int num_rqst = 0, i;
int resp_buftype[MAX_COMPOUND];
- struct smb2_query_info_rsp *qi_rsp = NULL;
- struct cifs_open_info_data *idata;
+ int retries = 0, cur_sleep = 1;
+ __u8 delete_pending[8] = {1,};
+ struct kvec *rsp_iov, *iov;
struct inode *inode = NULL;
- int flags = 0;
- __u8 delete_pending[8] = {1, 0, 0, 0, 0, 0, 0, 0};
+ __le16 *utf16_path = NULL;
+ struct smb_rqst *rqst;
unsigned int size[2];
- void *data[2];
+ struct cifs_fid fid;
+ int num_rqst = 0, i;
unsigned int len;
- int retries = 0, cur_sleep = 1;
+ int tmp_rc, rc;
+ int flags = 0;
+ void *data[2];
replay_again:
/* reinitialize for possible replay */
@@ -298,8 +298,8 @@ replay_again:
goto finished;
}
num_rqst++;
- trace_smb3_query_info_compound_enter(xid, ses->Suid,
- tcon->tid, full_path);
+ trace_smb3_query_info_compound_enter(xid, tcon->tid,
+ ses->Suid, full_path);
break;
case SMB2_OP_POSIX_QUERY_INFO:
rqst[num_rqst].rq_iov = &vars->qi_iov;
@@ -334,18 +334,18 @@ replay_again:
goto finished;
}
num_rqst++;
- trace_smb3_posix_query_info_compound_enter(xid, ses->Suid,
- tcon->tid, full_path);
+ trace_smb3_posix_query_info_compound_enter(xid, tcon->tid,
+ ses->Suid, full_path);
break;
case SMB2_OP_DELETE:
- trace_smb3_delete_enter(xid, ses->Suid, tcon->tid, full_path);
+ trace_smb3_delete_enter(xid, tcon->tid, ses->Suid, full_path);
break;
case SMB2_OP_MKDIR:
/*
* Directories are created through parameters in the
* SMB2_open() call.
*/
- trace_smb3_mkdir_enter(xid, ses->Suid, tcon->tid, full_path);
+ trace_smb3_mkdir_enter(xid, tcon->tid, ses->Suid, full_path);
break;
case SMB2_OP_RMDIR:
rqst[num_rqst].rq_iov = &vars->si_iov[0];
@@ -363,7 +363,7 @@ replay_again:
goto finished;
smb2_set_next_command(tcon, &rqst[num_rqst]);
smb2_set_related(&rqst[num_rqst++]);
- trace_smb3_rmdir_enter(xid, ses->Suid, tcon->tid, full_path);
+ trace_smb3_rmdir_enter(xid, tcon->tid, ses->Suid, full_path);
break;
case SMB2_OP_SET_EOF:
rqst[num_rqst].rq_iov = &vars->si_iov[0];
@@ -398,7 +398,7 @@ replay_again:
goto finished;
}
num_rqst++;
- trace_smb3_set_eof_enter(xid, ses->Suid, tcon->tid, full_path);
+ trace_smb3_set_eof_enter(xid, tcon->tid, ses->Suid, full_path);
break;
case SMB2_OP_SET_INFO:
rqst[num_rqst].rq_iov = &vars->si_iov[0];
@@ -429,8 +429,8 @@ replay_again:
goto finished;
}
num_rqst++;
- trace_smb3_set_info_compound_enter(xid, ses->Suid,
- tcon->tid, full_path);
+ trace_smb3_set_info_compound_enter(xid, tcon->tid,
+ ses->Suid, full_path);
break;
case SMB2_OP_RENAME:
rqst[num_rqst].rq_iov = &vars->si_iov[0];
@@ -469,7 +469,7 @@ replay_again:
goto finished;
}
num_rqst++;
- trace_smb3_rename_enter(xid, ses->Suid, tcon->tid, full_path);
+ trace_smb3_rename_enter(xid, tcon->tid, ses->Suid, full_path);
break;
case SMB2_OP_HARDLINK:
rqst[num_rqst].rq_iov = &vars->si_iov[0];
@@ -496,7 +496,7 @@ replay_again:
goto finished;
smb2_set_next_command(tcon, &rqst[num_rqst]);
smb2_set_related(&rqst[num_rqst++]);
- trace_smb3_hardlink_enter(xid, ses->Suid, tcon->tid, full_path);
+ trace_smb3_hardlink_enter(xid, tcon->tid, ses->Suid, full_path);
break;
case SMB2_OP_SET_REPARSE:
rqst[num_rqst].rq_iov = vars->io_iov;
@@ -523,8 +523,8 @@ replay_again:
goto finished;
}
num_rqst++;
- trace_smb3_set_reparse_compound_enter(xid, ses->Suid,
- tcon->tid, full_path);
+ trace_smb3_set_reparse_compound_enter(xid, tcon->tid,
+ ses->Suid, full_path);
break;
case SMB2_OP_GET_REPARSE:
rqst[num_rqst].rq_iov = vars->io_iov;
@@ -549,8 +549,8 @@ replay_again:
goto finished;
}
num_rqst++;
- trace_smb3_get_reparse_compound_enter(xid, ses->Suid,
- tcon->tid, full_path);
+ trace_smb3_get_reparse_compound_enter(xid, tcon->tid,
+ ses->Suid, full_path);
break;
case SMB2_OP_QUERY_WSL_EA:
rqst[num_rqst].rq_iov = &vars->ea_iov;
@@ -584,6 +584,8 @@ replay_again:
goto finished;
}
num_rqst++;
+ trace_smb3_query_wsl_ea_compound_enter(xid, tcon->tid,
+ ses->Suid, full_path);
break;
default:
cifs_dbg(VFS, "Invalid command\n");
@@ -637,7 +639,14 @@ finished:
tcon->need_reconnect = true;
}
+ tmp_rc = rc;
for (i = 0; i < num_cmds; i++) {
+ char *buf = rsp_iov[i + i].iov_base;
+
+ if (buf && resp_buftype[i + 1] != CIFS_NO_BUFFER)
+ rc = server->ops->map_error(buf, false);
+ else
+ rc = tmp_rc;
switch (cmds[i]) {
case SMB2_OP_QUERY_INFO:
idata = in_iov[i].iov_base;
@@ -656,11 +665,11 @@ finished:
}
SMB2_query_info_free(&rqst[num_rqst++]);
if (rc)
- trace_smb3_query_info_compound_err(xid, ses->Suid,
- tcon->tid, rc);
+ trace_smb3_query_info_compound_err(xid, tcon->tid,
+ ses->Suid, rc);
else
- trace_smb3_query_info_compound_done(xid, ses->Suid,
- tcon->tid);
+ trace_smb3_query_info_compound_done(xid, tcon->tid,
+ ses->Suid);
break;
case SMB2_OP_POSIX_QUERY_INFO:
idata = in_iov[i].iov_base;
@@ -683,15 +692,15 @@ finished:
SMB2_query_info_free(&rqst[num_rqst++]);
if (rc)
- trace_smb3_posix_query_info_compound_err(xid, ses->Suid,
- tcon->tid, rc);
+ trace_smb3_posix_query_info_compound_err(xid, tcon->tid,
+ ses->Suid, rc);
else
- trace_smb3_posix_query_info_compound_done(xid, ses->Suid,
- tcon->tid);
+ trace_smb3_posix_query_info_compound_done(xid, tcon->tid,
+ ses->Suid);
break;
case SMB2_OP_DELETE:
if (rc)
- trace_smb3_delete_err(xid, ses->Suid, tcon->tid, rc);
+ trace_smb3_delete_err(xid, tcon->tid, ses->Suid, rc);
else {
/*
* If dentry (hence, inode) is NULL, lease break is going to
@@ -699,59 +708,59 @@ finished:
*/
if (inode)
cifs_mark_open_handles_for_deleted_file(inode, full_path);
- trace_smb3_delete_done(xid, ses->Suid, tcon->tid);
+ trace_smb3_delete_done(xid, tcon->tid, ses->Suid);
}
break;
case SMB2_OP_MKDIR:
if (rc)
- trace_smb3_mkdir_err(xid, ses->Suid, tcon->tid, rc);
+ trace_smb3_mkdir_err(xid, tcon->tid, ses->Suid, rc);
else
- trace_smb3_mkdir_done(xid, ses->Suid, tcon->tid);
+ trace_smb3_mkdir_done(xid, tcon->tid, ses->Suid);
break;
case SMB2_OP_HARDLINK:
if (rc)
- trace_smb3_hardlink_err(xid, ses->Suid, tcon->tid, rc);
+ trace_smb3_hardlink_err(xid, tcon->tid, ses->Suid, rc);
else
- trace_smb3_hardlink_done(xid, ses->Suid, tcon->tid);
+ trace_smb3_hardlink_done(xid, tcon->tid, ses->Suid);
SMB2_set_info_free(&rqst[num_rqst++]);
break;
case SMB2_OP_RENAME:
if (rc)
- trace_smb3_rename_err(xid, ses->Suid, tcon->tid, rc);
+ trace_smb3_rename_err(xid, tcon->tid, ses->Suid, rc);
else
- trace_smb3_rename_done(xid, ses->Suid, tcon->tid);
+ trace_smb3_rename_done(xid, tcon->tid, ses->Suid);
SMB2_set_info_free(&rqst[num_rqst++]);
break;
case SMB2_OP_RMDIR:
if (rc)
- trace_smb3_rmdir_err(xid, ses->Suid, tcon->tid, rc);
+ trace_smb3_rmdir_err(xid, tcon->tid, ses->Suid, rc);
else
- trace_smb3_rmdir_done(xid, ses->Suid, tcon->tid);
+ trace_smb3_rmdir_done(xid, tcon->tid, ses->Suid);
SMB2_set_info_free(&rqst[num_rqst++]);
break;
case SMB2_OP_SET_EOF:
if (rc)
- trace_smb3_set_eof_err(xid, ses->Suid, tcon->tid, rc);
+ trace_smb3_set_eof_err(xid, tcon->tid, ses->Suid, rc);
else
- trace_smb3_set_eof_done(xid, ses->Suid, tcon->tid);
+ trace_smb3_set_eof_done(xid, tcon->tid, ses->Suid);
SMB2_set_info_free(&rqst[num_rqst++]);
break;
case SMB2_OP_SET_INFO:
if (rc)
- trace_smb3_set_info_compound_err(xid, ses->Suid,
- tcon->tid, rc);
+ trace_smb3_set_info_compound_err(xid, tcon->tid,
+ ses->Suid, rc);
else
- trace_smb3_set_info_compound_done(xid, ses->Suid,
- tcon->tid);
+ trace_smb3_set_info_compound_done(xid, tcon->tid,
+ ses->Suid);
SMB2_set_info_free(&rqst[num_rqst++]);
break;
case SMB2_OP_SET_REPARSE:
if (rc) {
- trace_smb3_set_reparse_compound_err(xid, ses->Suid,
- tcon->tid, rc);
+ trace_smb3_set_reparse_compound_err(xid, tcon->tid,
+ ses->Suid, rc);
} else {
- trace_smb3_set_reparse_compound_done(xid, ses->Suid,
- tcon->tid);
+ trace_smb3_set_reparse_compound_done(xid, tcon->tid,
+ ses->Suid);
}
SMB2_ioctl_free(&rqst[num_rqst++]);
break;
@@ -764,18 +773,18 @@ finished:
rbuf = reparse_buf_ptr(iov);
if (IS_ERR(rbuf)) {
rc = PTR_ERR(rbuf);
- trace_smb3_set_reparse_compound_err(xid, ses->Suid,
- tcon->tid, rc);
+ trace_smb3_get_reparse_compound_err(xid, tcon->tid,
+ ses->Suid, rc);
} else {
idata->reparse.tag = le32_to_cpu(rbuf->ReparseTag);
- trace_smb3_set_reparse_compound_done(xid, ses->Suid,
- tcon->tid);
+ trace_smb3_get_reparse_compound_done(xid, tcon->tid,
+ ses->Suid);
}
memset(iov, 0, sizeof(*iov));
resp_buftype[i + 1] = CIFS_NO_BUFFER;
} else {
- trace_smb3_set_reparse_compound_err(xid, ses->Suid,
- tcon->tid, rc);
+ trace_smb3_get_reparse_compound_err(xid, tcon->tid,
+ ses->Suid, rc);
}
SMB2_ioctl_free(&rqst[num_rqst++]);
break;
@@ -792,17 +801,18 @@ finished:
}
}
if (!rc) {
- trace_smb3_query_wsl_ea_compound_done(xid, ses->Suid,
- tcon->tid);
+ trace_smb3_query_wsl_ea_compound_done(xid, tcon->tid,
+ ses->Suid);
} else {
- trace_smb3_query_wsl_ea_compound_err(xid, ses->Suid,
- tcon->tid, rc);
+ trace_smb3_query_wsl_ea_compound_err(xid, tcon->tid,
+ ses->Suid, rc);
}
SMB2_query_info_free(&rqst[num_rqst++]);
break;
}
}
SMB2_close_free(&rqst[num_rqst]);
+ rc = tmp_rc;
num_cmds += 2;
if (out_iov && out_buftype) {
@@ -858,22 +868,52 @@ static int parse_create_response(struct cifs_open_info_data *data,
return rc;
}
+/* Check only if SMB2_OP_QUERY_WSL_EA command failed in the compound chain */
+static bool ea_unsupported(int *cmds, int num_cmds,
+ struct kvec *out_iov, int *out_buftype)
+{
+ int i;
+
+ if (cmds[num_cmds - 1] != SMB2_OP_QUERY_WSL_EA)
+ return false;
+
+ for (i = 1; i < num_cmds - 1; i++) {
+ struct smb2_hdr *hdr = out_iov[i].iov_base;
+
+ if (out_buftype[i] == CIFS_NO_BUFFER || !hdr ||
+ hdr->Status != STATUS_SUCCESS)
+ return false;
+ }
+ return true;
+}
+
+static inline void free_rsp_iov(struct kvec *iovs, int *buftype, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ free_rsp_buf(buftype[i], iovs[i].iov_base);
+ memset(&iovs[i], 0, sizeof(*iovs));
+ buftype[i] = CIFS_NO_BUFFER;
+ }
+}
+
int smb2_query_path_info(const unsigned int xid,
struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb,
const char *full_path,
struct cifs_open_info_data *data)
{
+ struct kvec in_iov[3], out_iov[5] = {};
+ struct cached_fid *cfid = NULL;
struct cifs_open_parms oparms;
- __u32 create_options = 0;
struct cifsFileInfo *cfile;
- struct cached_fid *cfid = NULL;
+ __u32 create_options = 0;
+ int out_buftype[5] = {};
struct smb2_hdr *hdr;
- struct kvec in_iov[3], out_iov[3] = {};
- int out_buftype[3] = {};
+ int num_cmds = 0;
int cmds[3];
bool islink;
- int i, num_cmds = 0;
int rc, rc2;
data->adjust_tz = false;
@@ -943,14 +983,14 @@ int smb2_query_path_info(const unsigned int xid,
if (rc || !data->reparse_point)
goto out;
- if (!tcon->posix_extensions)
- cmds[num_cmds++] = SMB2_OP_QUERY_WSL_EA;
/*
* Skip SMB2_OP_GET_REPARSE if symlink already parsed in create
* response.
*/
if (data->reparse.tag != IO_REPARSE_TAG_SYMLINK)
cmds[num_cmds++] = SMB2_OP_GET_REPARSE;
+ if (!tcon->posix_extensions)
+ cmds[num_cmds++] = SMB2_OP_QUERY_WSL_EA;
oparms = CIFS_OPARMS(cifs_sb, tcon, full_path,
FILE_READ_ATTRIBUTES |
@@ -958,9 +998,23 @@ int smb2_query_path_info(const unsigned int xid,
FILE_OPEN, create_options |
OPEN_REPARSE_POINT, ACL_NO_MODE);
cifs_get_readable_path(tcon, full_path, &cfile);
+ free_rsp_iov(out_iov, out_buftype, ARRAY_SIZE(out_iov));
rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
&oparms, in_iov, cmds, num_cmds,
- cfile, NULL, NULL, NULL);
+ cfile, out_iov, out_buftype, NULL);
+ if (rc && ea_unsupported(cmds, num_cmds,
+ out_iov, out_buftype)) {
+ if (data->reparse.tag != IO_REPARSE_TAG_LX_BLK &&
+ data->reparse.tag != IO_REPARSE_TAG_LX_CHR)
+ rc = 0;
+ else
+ rc = -EOPNOTSUPP;
+ }
+
+ if (data->reparse.tag == IO_REPARSE_TAG_SYMLINK && !rc) {
+ bool directory = le32_to_cpu(data->fi.Attributes) & ATTR_DIRECTORY;
+ rc = smb2_fix_symlink_target_type(&data->symlink_target, directory, cifs_sb);
+ }
break;
case -EREMOTE:
break;
@@ -978,8 +1032,7 @@ int smb2_query_path_info(const unsigned int xid,
}
out:
- for (i = 0; i < ARRAY_SIZE(out_buftype); i++)
- free_rsp_buf(out_buftype[i], out_iov[i].iov_base);
+ free_rsp_iov(out_iov, out_buftype, ARRAY_SIZE(out_iov));
return rc;
}
diff --git a/fs/smb/client/smb2maperror.c b/fs/smb/client/smb2maperror.c
index b05313acf9b2..12c2b868789f 100644
--- a/fs/smb/client/smb2maperror.c
+++ b/fs/smb/client/smb2maperror.c
@@ -380,7 +380,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
{STATUS_NO_LOGON_SERVERS, -EIO, "STATUS_NO_LOGON_SERVERS"},
{STATUS_NO_SUCH_LOGON_SESSION, -EIO, "STATUS_NO_SUCH_LOGON_SESSION"},
{STATUS_NO_SUCH_PRIVILEGE, -EIO, "STATUS_NO_SUCH_PRIVILEGE"},
- {STATUS_PRIVILEGE_NOT_HELD, -EIO, "STATUS_PRIVILEGE_NOT_HELD"},
+ {STATUS_PRIVILEGE_NOT_HELD, -EPERM, "STATUS_PRIVILEGE_NOT_HELD"},
{STATUS_INVALID_ACCOUNT_NAME, -EIO, "STATUS_INVALID_ACCOUNT_NAME"},
{STATUS_USER_EXISTS, -EIO, "STATUS_USER_EXISTS"},
{STATUS_NO_SUCH_USER, -EIO, "STATUS_NO_SUCH_USER"},
@@ -871,7 +871,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
{STATUS_VALIDATE_CONTINUE, -EIO, "STATUS_VALIDATE_CONTINUE"},
{STATUS_NO_MATCH, -EIO, "STATUS_NO_MATCH"},
{STATUS_NO_MORE_MATCHES, -EIO, "STATUS_NO_MORE_MATCHES"},
- {STATUS_NOT_A_REPARSE_POINT, -EIO, "STATUS_NOT_A_REPARSE_POINT"},
+ {STATUS_NOT_A_REPARSE_POINT, -ENODATA, "STATUS_NOT_A_REPARSE_POINT"},
{STATUS_IO_REPARSE_TAG_INVALID, -EIO, "STATUS_IO_REPARSE_TAG_INVALID"},
{STATUS_IO_REPARSE_TAG_MISMATCH, -EIO,
"STATUS_IO_REPARSE_TAG_MISMATCH"},
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 7121d9e0f404..ec36bed54b0b 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -658,7 +658,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
while (bytes_left >= (ssize_t)sizeof(*p)) {
memset(&tmp_iface, 0, sizeof(tmp_iface));
- tmp_iface.speed = le64_to_cpu(p->LinkSpeed);
+ /* default to 1Gbps when link speed is unset */
+ tmp_iface.speed = le64_to_cpu(p->LinkSpeed) ?: 1000000000;
tmp_iface.rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
tmp_iface.rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
@@ -3007,9 +3008,9 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
num_of_nodes, target_nodes,
nls_codepage, remap, search_name,
true /* is_unicode */);
- if (rc) {
- cifs_tcon_dbg(VFS, "parse error in %s rc=%d\n", __func__, rc);
- goto out;
+ if (rc && rc != -ENOENT) {
+ cifs_tcon_dbg(VFS, "%s: failed to parse DFS referral %s: %d\n",
+ __func__, search_name, rc);
}
out:
@@ -3903,22 +3904,22 @@ static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
static void
smb2_downgrade_oplock(struct TCP_Server_Info *server,
struct cifsInodeInfo *cinode, __u32 oplock,
- unsigned int epoch, bool *purge_cache)
+ __u16 epoch, bool *purge_cache)
{
server->ops->set_oplock_level(cinode, oplock, 0, NULL);
}
static void
smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
- unsigned int epoch, bool *purge_cache);
+ __u16 epoch, bool *purge_cache);
static void
smb3_downgrade_oplock(struct TCP_Server_Info *server,
struct cifsInodeInfo *cinode, __u32 oplock,
- unsigned int epoch, bool *purge_cache)
+ __u16 epoch, bool *purge_cache)
{
unsigned int old_state = cinode->oplock;
- unsigned int old_epoch = cinode->epoch;
+ __u16 old_epoch = cinode->epoch;
unsigned int new_state;
if (epoch > old_epoch) {
@@ -3938,7 +3939,7 @@ smb3_downgrade_oplock(struct TCP_Server_Info *server,
static void
smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
- unsigned int epoch, bool *purge_cache)
+ __u16 epoch, bool *purge_cache)
{
oplock &= 0xFF;
cinode->lease_granted = false;
@@ -3962,7 +3963,7 @@ smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
static void
smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
- unsigned int epoch, bool *purge_cache)
+ __u16 epoch, bool *purge_cache)
{
char message[5] = {0};
unsigned int new_oplock = 0;
@@ -3999,7 +4000,7 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
static void
smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
- unsigned int epoch, bool *purge_cache)
+ __u16 epoch, bool *purge_cache)
{
unsigned int old_oplock = cinode->oplock;
@@ -4113,7 +4114,7 @@ smb3_create_lease_buf(u8 *lease_key, u8 oplock)
}
static __u8
-smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
+smb2_parse_lease_buf(void *buf, __u16 *epoch, char *lease_key)
{
struct create_lease *lc = (struct create_lease *)buf;
@@ -4124,7 +4125,7 @@ smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
}
static __u8
-smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
+smb3_parse_lease_buf(void *buf, __u16 *epoch, char *lease_key)
{
struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
@@ -5076,6 +5077,7 @@ int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
{
struct TCP_Server_Info *server = tcon->ses->server;
struct cifs_open_parms oparms;
+ struct cifs_open_info_data idata;
struct cifs_io_parms io_parms = {};
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifs_fid fid;
@@ -5145,10 +5147,20 @@ int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
CREATE_OPTION_SPECIAL, ACL_NO_MODE);
oparms.fid = &fid;
- rc = server->ops->open(xid, &oparms, &oplock, NULL);
+ rc = server->ops->open(xid, &oparms, &oplock, &idata);
if (rc)
goto out;
+ /*
+ * Check if the server honored ATTR_SYSTEM flag by CREATE_OPTION_SPECIAL
+ * option. If not then server does not support ATTR_SYSTEM and newly
+ * created file is not SFU compatible, which means that the call failed.
+ */
+ if (!(le32_to_cpu(idata.fi.Attributes) & ATTR_SYSTEM)) {
+ rc = -EOPNOTSUPP;
+ goto out_close;
+ }
+
if (type_len + data_len > 0) {
io_parms.pid = current->tgid;
io_parms.tcon = tcon;
@@ -5163,8 +5175,18 @@ int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
iov, ARRAY_SIZE(iov)-1);
}
+out_close:
server->ops->close(xid, tcon, &fid);
+ /*
+ * If CREATE was successful but either setting ATTR_SYSTEM failed or
+ * writing type/data information failed then remove the intermediate
+ * object created by CREATE. Otherwise intermediate empty object stay
+ * on the server.
+ */
+ if (rc)
+ server->ops->unlink(xid, tcon, full_path, cifs_sb, NULL);
+
out:
kfree(symname_utf16);
return rc;
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 9f54596a6866..ed7812247ebc 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -1429,7 +1429,7 @@ smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
if (server->sec_ntlmssp &&
(global_secflags & CIFSSEC_MAY_NTLMSSP))
return RawNTLMSSP;
- if ((server->sec_kerberos || server->sec_mskerberos) &&
+ if ((server->sec_kerberos || server->sec_mskerberos || server->sec_iakerb) &&
(global_secflags & CIFSSEC_MAY_KRB5))
return Kerberos;
fallthrough;
@@ -2169,7 +2169,7 @@ tcon_exit:
tcon_error_exit:
if (rsp && rsp->hdr.Status == STATUS_BAD_NETWORK_NAME)
- cifs_tcon_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
+ cifs_dbg(VFS | ONCE, "BAD_NETWORK_NAME: %s\n", tree);
goto tcon_exit;
}
@@ -2329,7 +2329,7 @@ parse_posix_ctxt(struct create_context *cc, struct smb2_file_all_info *info,
int smb2_parse_contexts(struct TCP_Server_Info *server,
struct kvec *rsp_iov,
- unsigned int *epoch,
+ __u16 *epoch,
char *lease_key, __u8 *oplock,
struct smb2_file_all_info *buf,
struct create_posix_rsp *posix)
diff --git a/fs/smb/client/smb2pdu.h b/fs/smb/client/smb2pdu.h
index 076d9e83e1a0..3c09a58dfd07 100644
--- a/fs/smb/client/smb2pdu.h
+++ b/fs/smb/client/smb2pdu.h
@@ -79,7 +79,7 @@ struct smb2_symlink_err_rsp {
struct smb2_error_context_rsp {
__le32 ErrorDataLength;
__le32 ErrorId;
- __u8 ErrorContextData; /* ErrorDataLength long array */
+ __u8 ErrorContextData[] __counted_by_le(ErrorDataLength);
} __packed;
/* ErrorId values */
diff --git a/fs/smb/client/smb2proto.h b/fs/smb/client/smb2proto.h
index 09349fa8da03..4662c7e2d259 100644
--- a/fs/smb/client/smb2proto.h
+++ b/fs/smb/client/smb2proto.h
@@ -111,8 +111,9 @@ extern int smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb,
const unsigned char *path, char *pbuf,
unsigned int *pbytes_read);
+int smb2_fix_symlink_target_type(char **target, bool directory, struct cifs_sb_info *cifs_sb);
int smb2_parse_native_symlink(char **target, const char *buf, unsigned int len,
- bool unicode, bool relative,
+ bool relative,
const char *full_path,
struct cifs_sb_info *cifs_sb);
int smb2_parse_symlink_response(struct cifs_sb_info *cifs_sb,
@@ -282,7 +283,7 @@ extern enum securityEnum smb2_select_sectype(struct TCP_Server_Info *,
enum securityEnum);
int smb2_parse_contexts(struct TCP_Server_Info *server,
struct kvec *rsp_iov,
- unsigned int *epoch,
+ __u16 *epoch,
char *lease_key, __u8 *oplock,
struct smb2_file_all_info *buf,
struct create_posix_rsp *posix);
diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h
index 12cbd3428a6d..52bcb55d9952 100644
--- a/fs/smb/client/trace.h
+++ b/fs/smb/client/trace.h
@@ -674,6 +674,7 @@ DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(set_eof_enter);
DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(set_info_compound_enter);
DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(set_reparse_compound_enter);
DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(get_reparse_compound_enter);
+DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(query_wsl_ea_compound_enter);
DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(delete_enter);
DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(mkdir_enter);
DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(tdis_enter);
diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
index 3c7c706c797d..3336df2ea5d4 100644
--- a/fs/smb/common/smb2pdu.h
+++ b/fs/smb/common/smb2pdu.h
@@ -1550,7 +1550,19 @@ struct reparse_symlink_data_buffer {
__u8 PathBuffer[]; /* Variable Length */
} __packed;
-/* See MS-FSCC 2.1.2.6 and cifspdu.h for struct reparse_posix_data */
+/* For IO_REPARSE_TAG_NFS - see MS-FSCC 2.1.2.6 */
+#define NFS_SPECFILE_LNK 0x00000000014B4E4C
+#define NFS_SPECFILE_CHR 0x0000000000524843
+#define NFS_SPECFILE_BLK 0x00000000004B4C42
+#define NFS_SPECFILE_FIFO 0x000000004F464946
+#define NFS_SPECFILE_SOCK 0x000000004B434F53
+struct reparse_nfs_data_buffer {
+ __le32 ReparseTag;
+ __le16 ReparseDataLength;
+ __u16 Reserved;
+ __le64 InodeType; /* NFS_SPECFILE_* */
+ __u8 DataBuffer[];
+} __packed;
/* For IO_REPARSE_TAG_LX_SYMLINK */
struct reparse_wsl_symlink_data_buffer {
diff --git a/fs/smb/server/ksmbd_netlink.h b/fs/smb/server/ksmbd_netlink.h
index 3d01d9d15293..3f07a612c05b 100644
--- a/fs/smb/server/ksmbd_netlink.h
+++ b/fs/smb/server/ksmbd_netlink.h
@@ -111,7 +111,8 @@ struct ksmbd_startup_request {
__u32 smb2_max_credits; /* MAX credits */
__u32 smbd_max_io_size; /* smbd read write size */
__u32 max_connections; /* Number of maximum simultaneous connections */
- __u32 reserved[126]; /* Reserved room */
+ __s8 bind_interfaces_only;
+ __s8 reserved[503]; /* Reserved room */
__u32 ifc_list_sz; /* interfaces list size */
__s8 ____payload[];
};
diff --git a/fs/smb/server/server.h b/fs/smb/server/server.h
index 94187628ff08..995555febe7d 100644
--- a/fs/smb/server/server.h
+++ b/fs/smb/server/server.h
@@ -46,6 +46,7 @@ struct ksmbd_server_config {
char *conf[SERVER_CONF_WORK_GROUP + 1];
struct task_struct *dh_task;
+ bool bind_interfaces_only;
};
extern struct ksmbd_server_config server_conf;
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index 772deec5b90f..f1efcd027475 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -38,6 +38,7 @@
#include "mgmt/user_session.h"
#include "mgmt/ksmbd_ida.h"
#include "ndr.h"
+#include "transport_tcp.h"
static void __wbuf(struct ksmbd_work *work, void **req, void **rsp)
{
@@ -7759,6 +7760,9 @@ static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
if (netdev->type == ARPHRD_LOOPBACK)
continue;
+ if (!ksmbd_find_netdev_name_iface_list(netdev->name))
+ continue;
+
flags = dev_get_flags(netdev);
if (!(flags & IFF_RUNNING))
continue;
diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
index befaf42b84cc..0460ebea6ff0 100644
--- a/fs/smb/server/transport_ipc.c
+++ b/fs/smb/server/transport_ipc.c
@@ -333,6 +333,7 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req)
ret = ksmbd_set_netbios_name(req->netbios_name);
ret |= ksmbd_set_server_string(req->server_string);
ret |= ksmbd_set_work_group(req->work_group);
+ server_conf.bind_interfaces_only = req->bind_interfaces_only;
ret |= ksmbd_tcp_set_interfaces(KSMBD_STARTUP_CONFIG_INTERFACES(req),
req->ifc_list_sz);
if (ret) {
@@ -626,6 +627,9 @@ ksmbd_ipc_spnego_authen_request(const char *spnego_blob, int blob_len)
struct ksmbd_spnego_authen_request *req;
struct ksmbd_spnego_authen_response *resp;
+ if (blob_len > KSMBD_IPC_MAX_PAYLOAD)
+ return NULL;
+
msg = ipc_msg_alloc(sizeof(struct ksmbd_spnego_authen_request) +
blob_len + 1);
if (!msg)
@@ -805,6 +809,9 @@ struct ksmbd_rpc_command *ksmbd_rpc_write(struct ksmbd_session *sess, int handle
struct ksmbd_rpc_command *req;
struct ksmbd_rpc_command *resp;
+ if (payload_sz > KSMBD_IPC_MAX_PAYLOAD)
+ return NULL;
+
msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1);
if (!msg)
return NULL;
@@ -853,6 +860,9 @@ struct ksmbd_rpc_command *ksmbd_rpc_ioctl(struct ksmbd_session *sess, int handle
struct ksmbd_rpc_command *req;
struct ksmbd_rpc_command *resp;
+ if (payload_sz > KSMBD_IPC_MAX_PAYLOAD)
+ return NULL;
+
msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1);
if (!msg)
return NULL;
@@ -871,31 +881,6 @@ struct ksmbd_rpc_command *ksmbd_rpc_ioctl(struct ksmbd_session *sess, int handle
return resp;
}
-struct ksmbd_rpc_command *ksmbd_rpc_rap(struct ksmbd_session *sess, void *payload,
- size_t payload_sz)
-{
- struct ksmbd_ipc_msg *msg;
- struct ksmbd_rpc_command *req;
- struct ksmbd_rpc_command *resp;
-
- msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1);
- if (!msg)
- return NULL;
-
- msg->type = KSMBD_EVENT_RPC_REQUEST;
- req = (struct ksmbd_rpc_command *)msg->payload;
- req->handle = ksmbd_acquire_id(&ipc_ida);
- req->flags = rpc_context_flags(sess);
- req->flags |= KSMBD_RPC_RAP_METHOD;
- req->payload_sz = payload_sz;
- memcpy(req->payload, payload, payload_sz);
-
- resp = ipc_msg_send_request(msg, req->handle);
- ipc_msg_handle_free(req->handle);
- ipc_msg_free(msg);
- return resp;
-}
-
static int __ipc_heartbeat(void)
{
unsigned long delta;
diff --git a/fs/smb/server/transport_ipc.h b/fs/smb/server/transport_ipc.h
index d9b6737f8cd0..e51850f1423b 100644
--- a/fs/smb/server/transport_ipc.h
+++ b/fs/smb/server/transport_ipc.h
@@ -41,8 +41,6 @@ struct ksmbd_rpc_command *ksmbd_rpc_write(struct ksmbd_session *sess, int handle
struct ksmbd_rpc_command *ksmbd_rpc_read(struct ksmbd_session *sess, int handle);
struct ksmbd_rpc_command *ksmbd_rpc_ioctl(struct ksmbd_session *sess, int handle,
void *payload, size_t payload_sz);
-struct ksmbd_rpc_command *ksmbd_rpc_rap(struct ksmbd_session *sess, void *payload,
- size_t payload_sz);
void ksmbd_ipc_release(void);
void ksmbd_ipc_soft_reset(void);
int ksmbd_ipc_init(void);
diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c
index 0d9007285e30..7f38a3c3f5bd 100644
--- a/fs/smb/server/transport_tcp.c
+++ b/fs/smb/server/transport_tcp.c
@@ -504,32 +504,37 @@ out_clear:
return ret;
}
+struct interface *ksmbd_find_netdev_name_iface_list(char *netdev_name)
+{
+ struct interface *iface;
+
+ list_for_each_entry(iface, &iface_list, entry)
+ if (!strcmp(iface->name, netdev_name))
+ return iface;
+ return NULL;
+}
+
static int ksmbd_netdev_event(struct notifier_block *nb, unsigned long event,
void *ptr)
{
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
struct interface *iface;
- int ret, found = 0;
+ int ret;
switch (event) {
case NETDEV_UP:
if (netif_is_bridge_port(netdev))
return NOTIFY_OK;
- list_for_each_entry(iface, &iface_list, entry) {
- if (!strcmp(iface->name, netdev->name)) {
- found = 1;
- if (iface->state != IFACE_STATE_DOWN)
- break;
- ksmbd_debug(CONN, "netdev-up event: netdev(%s) is going up\n",
- iface->name);
- ret = create_socket(iface);
- if (ret)
- return NOTIFY_OK;
- break;
- }
+ iface = ksmbd_find_netdev_name_iface_list(netdev->name);
+ if (iface && iface->state == IFACE_STATE_DOWN) {
+ ksmbd_debug(CONN, "netdev-up event: netdev(%s) is going up\n",
+ iface->name);
+ ret = create_socket(iface);
+ if (ret)
+ return NOTIFY_OK;
}
- if (!found && bind_additional_ifaces) {
+ if (!iface && bind_additional_ifaces) {
iface = alloc_iface(kstrdup(netdev->name, KSMBD_DEFAULT_GFP));
if (!iface)
return NOTIFY_OK;
@@ -541,21 +546,19 @@ static int ksmbd_netdev_event(struct notifier_block *nb, unsigned long event,
}
break;
case NETDEV_DOWN:
- list_for_each_entry(iface, &iface_list, entry) {
- if (!strcmp(iface->name, netdev->name) &&
- iface->state == IFACE_STATE_CONFIGURED) {
- ksmbd_debug(CONN, "netdev-down event: netdev(%s) is going down\n",
- iface->name);
- tcp_stop_kthread(iface->ksmbd_kthread);
- iface->ksmbd_kthread = NULL;
- mutex_lock(&iface->sock_release_lock);
- tcp_destroy_socket(iface->ksmbd_socket);
- iface->ksmbd_socket = NULL;
- mutex_unlock(&iface->sock_release_lock);
-
- iface->state = IFACE_STATE_DOWN;
- break;
- }
+ iface = ksmbd_find_netdev_name_iface_list(netdev->name);
+ if (iface && iface->state == IFACE_STATE_CONFIGURED) {
+ ksmbd_debug(CONN, "netdev-down event: netdev(%s) is going down\n",
+ iface->name);
+ tcp_stop_kthread(iface->ksmbd_kthread);
+ iface->ksmbd_kthread = NULL;
+ mutex_lock(&iface->sock_release_lock);
+ tcp_destroy_socket(iface->ksmbd_socket);
+ iface->ksmbd_socket = NULL;
+ mutex_unlock(&iface->sock_release_lock);
+
+ iface->state = IFACE_STATE_DOWN;
+ break;
}
break;
}
@@ -624,18 +627,6 @@ int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz)
int sz = 0;
if (!ifc_list_sz) {
- struct net_device *netdev;
-
- rtnl_lock();
- for_each_netdev(&init_net, netdev) {
- if (netif_is_bridge_port(netdev))
- continue;
- if (!alloc_iface(kstrdup(netdev->name, KSMBD_DEFAULT_GFP))) {
- rtnl_unlock();
- return -ENOMEM;
- }
- }
- rtnl_unlock();
bind_additional_ifaces = 1;
return 0;
}
diff --git a/fs/smb/server/transport_tcp.h b/fs/smb/server/transport_tcp.h
index e338bebe322f..8c9aa624cfe3 100644
--- a/fs/smb/server/transport_tcp.h
+++ b/fs/smb/server/transport_tcp.h
@@ -7,6 +7,7 @@
#define __KSMBD_TRANSPORT_TCP_H__
int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz);
+struct interface *ksmbd_find_netdev_name_iface_list(char *netdev_name);
int ksmbd_tcp_init(void);
void ksmbd_tcp_destroy(void);
diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
index 40f08eac519c..6890016e1923 100644
--- a/fs/smb/server/vfs.c
+++ b/fs/smb/server/vfs.c
@@ -1856,13 +1856,6 @@ void ksmbd_vfs_posix_lock_wait(struct file_lock *flock)
wait_event(flock->c.flc_wait, !flock->c.flc_blocker);
}
-int ksmbd_vfs_posix_lock_wait_timeout(struct file_lock *flock, long timeout)
-{
- return wait_event_interruptible_timeout(flock->c.flc_wait,
- !flock->c.flc_blocker,
- timeout);
-}
-
void ksmbd_vfs_posix_lock_unblock(struct file_lock *flock)
{
locks_delete_block(flock);
diff --git a/fs/smb/server/vfs.h b/fs/smb/server/vfs.h
index 06903024a2d8..2893f59803a6 100644
--- a/fs/smb/server/vfs.h
+++ b/fs/smb/server/vfs.h
@@ -140,7 +140,6 @@ int ksmbd_vfs_fill_dentry_attrs(struct ksmbd_work *work,
struct dentry *dentry,
struct ksmbd_kstat *ksmbd_kstat);
void ksmbd_vfs_posix_lock_wait(struct file_lock *flock);
-int ksmbd_vfs_posix_lock_wait_timeout(struct file_lock *flock, long timeout);
void ksmbd_vfs_posix_lock_unblock(struct file_lock *flock);
int ksmbd_vfs_remove_acl_xattrs(struct mnt_idmap *idmap,
const struct path *path);
diff --git a/fs/splice.c b/fs/splice.c
index 2898fa1e9e63..28cfa63aa236 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -342,7 +342,7 @@ ssize_t copy_splice_read(struct file *in, loff_t *ppos,
return -ENOMEM;
pages = (struct page **)(bv + npages);
- npages = alloc_pages_bulk_array(GFP_USER, npages, pages);
+ npages = alloc_pages_bulk(GFP_USER, npages, pages);
if (!npages) {
kfree(bv);
return -ENOMEM;
diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig
index 60fc98bdf421..b1091e70434a 100644
--- a/fs/squashfs/Kconfig
+++ b/fs/squashfs/Kconfig
@@ -5,8 +5,8 @@ config SQUASHFS
help
Saying Y here includes support for SquashFS 4.0 (a Compressed
Read-Only File System). Squashfs is a highly compressed read-only
- filesystem for Linux. It uses zlib, lzo or xz compression to
- compress both files, inodes and directories. Inodes in the system
+ filesystem for Linux. It uses zlib, lz4, lzo, xz or zstd compression
+ to compress both files, inodes and directories. Inodes in the system
are very small and all blocks are packed to minimise data overhead.
Block sizes greater than 4K are supported up to a maximum of 1 Mbytes
(default block size 128K). SquashFS 4.0 supports 64 bit filesystems
@@ -16,7 +16,7 @@ config SQUASHFS
Squashfs is intended for general read-only filesystem use, for
archival use (i.e. in cases where a .tar.gz file may be used), and in
embedded systems where low overhead is needed. Further information
- and tools are available from http://squashfs.sourceforge.net.
+ and tools are available from github.com/plougher/squashfs-tools.
If you want to compile this as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 5062326d0efb..4db0d2b0aab8 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -224,11 +224,15 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries,
int block_size)
{
int i, j;
- struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+ struct squashfs_cache *cache;
+ if (entries == 0)
+ return NULL;
+
+ cache = kzalloc(sizeof(*cache), GFP_KERNEL);
if (cache == NULL) {
ERROR("Failed to allocate %s cache\n", name);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL);
@@ -281,7 +285,7 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries,
cleanup:
squashfs_cache_delete(cache);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index 21aaa96856c1..5ca2baa16dc2 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -362,29 +362,33 @@ static int read_blocklist(struct inode *inode, int index, u64 *block)
return squashfs_block_size(size);
}
-void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail)
+static bool squashfs_fill_page(struct folio *folio,
+ struct squashfs_cache_entry *buffer, size_t offset,
+ size_t avail)
{
- int copied;
+ size_t copied;
void *pageaddr;
- pageaddr = kmap_atomic(page);
+ pageaddr = kmap_local_folio(folio, 0);
copied = squashfs_copy_data(pageaddr, buffer, offset, avail);
memset(pageaddr + copied, 0, PAGE_SIZE - copied);
- kunmap_atomic(pageaddr);
+ kunmap_local(pageaddr);
- flush_dcache_page(page);
- if (copied == avail)
- SetPageUptodate(page);
+ flush_dcache_folio(folio);
+
+ return copied == avail;
}
/* Copy data into page cache */
-void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
- int bytes, int offset)
+void squashfs_copy_cache(struct folio *folio,
+ struct squashfs_cache_entry *buffer, size_t bytes,
+ size_t offset)
{
- struct inode *inode = page->mapping->host;
+ struct address_space *mapping = folio->mapping;
+ struct inode *inode = mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
- int start_index = page->index & ~mask, end_index = start_index | mask;
+ int start_index = folio->index & ~mask, end_index = start_index | mask;
/*
* Loop copying datablock into pages. As the datablock likely covers
@@ -394,32 +398,35 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
*/
for (i = start_index; i <= end_index && bytes > 0; i++,
bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
- struct page *push_page;
- int avail = buffer ? min_t(int, bytes, PAGE_SIZE) : 0;
+ struct folio *push_folio;
+ size_t avail = buffer ? min(bytes, PAGE_SIZE) : 0;
+ bool updated = false;
- TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail);
+ TRACE("bytes %zu, i %d, available_bytes %zu\n", bytes, i, avail);
- push_page = (i == page->index) ? page :
- grab_cache_page_nowait(page->mapping, i);
+ push_folio = (i == folio->index) ? folio :
+ __filemap_get_folio(mapping, i,
+ FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
+ mapping_gfp_mask(mapping));
- if (!push_page)
+ if (IS_ERR(push_folio))
continue;
- if (PageUptodate(push_page))
- goto skip_page;
+ if (folio_test_uptodate(push_folio))
+ goto skip_folio;
- squashfs_fill_page(push_page, buffer, offset, avail);
-skip_page:
- unlock_page(push_page);
- if (i != page->index)
- put_page(push_page);
+ updated = squashfs_fill_page(push_folio, buffer, offset, avail);
+skip_folio:
+ folio_end_read(push_folio, updated);
+ if (i != folio->index)
+ folio_put(push_folio);
}
}
/* Read datablock stored packed inside a fragment (tail-end packed block) */
-static int squashfs_readpage_fragment(struct page *page, int expected)
+static int squashfs_readpage_fragment(struct folio *folio, int expected)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
squashfs_i(inode)->fragment_block,
squashfs_i(inode)->fragment_size);
@@ -430,36 +437,34 @@ static int squashfs_readpage_fragment(struct page *page, int expected)
squashfs_i(inode)->fragment_block,
squashfs_i(inode)->fragment_size);
else
- squashfs_copy_cache(page, buffer, expected,
+ squashfs_copy_cache(folio, buffer, expected,
squashfs_i(inode)->fragment_offset);
squashfs_cache_put(buffer);
return res;
}
-static int squashfs_readpage_sparse(struct page *page, int expected)
+static int squashfs_readpage_sparse(struct folio *folio, int expected)
{
- squashfs_copy_cache(page, NULL, expected, 0);
+ squashfs_copy_cache(folio, NULL, expected, 0);
return 0;
}
static int squashfs_read_folio(struct file *file, struct folio *folio)
{
- struct page *page = &folio->page;
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
- int index = page->index >> (msblk->block_log - PAGE_SHIFT);
+ int index = folio->index >> (msblk->block_log - PAGE_SHIFT);
int file_end = i_size_read(inode) >> msblk->block_log;
int expected = index == file_end ?
(i_size_read(inode) & (msblk->block_size - 1)) :
msblk->block_size;
int res = 0;
- void *pageaddr;
TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
- page->index, squashfs_i(inode)->start);
+ folio->index, squashfs_i(inode)->start);
- if (page->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >>
+ if (folio->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >>
PAGE_SHIFT))
goto out;
@@ -472,23 +477,18 @@ static int squashfs_read_folio(struct file *file, struct folio *folio)
goto out;
if (res == 0)
- res = squashfs_readpage_sparse(page, expected);
+ res = squashfs_readpage_sparse(folio, expected);
else
- res = squashfs_readpage_block(page, block, res, expected);
+ res = squashfs_readpage_block(folio, block, res, expected);
} else
- res = squashfs_readpage_fragment(page, expected);
+ res = squashfs_readpage_fragment(folio, expected);
if (!res)
return 0;
out:
- pageaddr = kmap_atomic(page);
- memset(pageaddr, 0, PAGE_SIZE);
- kunmap_atomic(pageaddr);
- flush_dcache_page(page);
- if (res == 0)
- SetPageUptodate(page);
- unlock_page(page);
+ folio_zero_segment(folio, 0, folio_size(folio));
+ folio_end_read(folio, res == 0);
return res;
}
diff --git a/fs/squashfs/file_cache.c b/fs/squashfs/file_cache.c
index 54c17b7c85fd..40e59a43d098 100644
--- a/fs/squashfs/file_cache.c
+++ b/fs/squashfs/file_cache.c
@@ -18,9 +18,9 @@
#include "squashfs.h"
/* Read separately compressed datablock and memcopy into page cache */
-int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expected)
+int squashfs_readpage_block(struct folio *folio, u64 block, int bsize, int expected)
{
- struct inode *i = page->mapping->host;
+ struct inode *i = folio->mapping->host;
struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
block, bsize);
int res = buffer->error;
@@ -29,7 +29,7 @@ int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expecte
ERROR("Unable to read page, block %llx, size %x\n", block,
bsize);
else
- squashfs_copy_cache(page, buffer, expected, 0);
+ squashfs_copy_cache(folio, buffer, expected, 0);
squashfs_cache_put(buffer);
return res;
diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
index d19d4db74af8..2c3e809d6891 100644
--- a/fs/squashfs/file_direct.c
+++ b/fs/squashfs/file_direct.c
@@ -19,12 +19,11 @@
#include "page_actor.h"
/* Read separately compressed datablock directly into page cache */
-int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
- int expected)
-
+int squashfs_readpage_block(struct folio *folio, u64 block, int bsize,
+ int expected)
{
- struct folio *folio = page_folio(target_page);
- struct inode *inode = target_page->mapping->host;
+ struct page *target_page = &folio->page;
+ struct inode *inode = folio->mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
loff_t file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
@@ -48,7 +47,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
/* Try to grab all the pages covered by the Squashfs block */
for (i = 0, index = start_index; index <= end_index; index++) {
page[i] = (index == folio->index) ? target_page :
- grab_cache_page_nowait(target_page->mapping, index);
+ grab_cache_page_nowait(folio->mapping, index);
if (page[i] == NULL)
continue;
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
index 5a756e6790b5..218868b20f16 100644
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -14,6 +14,12 @@
#define WARNING(s, args...) pr_warn("SQUASHFS: "s, ## args)
+#ifdef CONFIG_SQUASHFS_FILE_CACHE
+#define SQUASHFS_READ_PAGES msblk->max_thread_num
+#else
+#define SQUASHFS_READ_PAGES 0
+#endif
+
/* block.c */
extern int squashfs_read_data(struct super_block *, u64, int, u64 *,
struct squashfs_page_actor *);
@@ -67,12 +73,11 @@ extern __le64 *squashfs_read_fragment_index_table(struct super_block *,
u64, u64, unsigned int);
/* file.c */
-void squashfs_fill_page(struct page *, struct squashfs_cache_entry *, int, int);
-void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int,
- int);
+void squashfs_copy_cache(struct folio *, struct squashfs_cache_entry *,
+ size_t bytes, size_t offset);
/* file_xxx.c */
-extern int squashfs_readpage_block(struct page *, u64, int, int);
+int squashfs_readpage_block(struct folio *, u64 block, int bsize, int expected);
/* id.c */
extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *);
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 22e812808e5c..67c55fe32ce8 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -314,26 +314,29 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_flags |= SB_RDONLY;
sb->s_op = &squashfs_super_ops;
- err = -ENOMEM;
-
msblk->block_cache = squashfs_cache_init("metadata",
SQUASHFS_CACHED_BLKS, SQUASHFS_METADATA_SIZE);
- if (msblk->block_cache == NULL)
+ if (IS_ERR(msblk->block_cache)) {
+ err = PTR_ERR(msblk->block_cache);
goto failed_mount;
+ }
/* Allocate read_page block */
msblk->read_page = squashfs_cache_init("data",
- msblk->max_thread_num, msblk->block_size);
- if (msblk->read_page == NULL) {
+ SQUASHFS_READ_PAGES, msblk->block_size);
+ if (IS_ERR(msblk->read_page)) {
errorf(fc, "Failed to allocate read_page block");
+ err = PTR_ERR(msblk->read_page);
goto failed_mount;
}
if (msblk->devblksize == PAGE_SIZE) {
struct inode *cache = new_inode(sb);
- if (cache == NULL)
+ if (cache == NULL) {
+ err = -ENOMEM;
goto failed_mount;
+ }
set_nlink(cache, 1);
cache->i_size = OFFSET_MAX;
@@ -405,9 +408,9 @@ handle_fragments:
goto check_directory_table;
msblk->fragment_cache = squashfs_cache_init("fragment",
- SQUASHFS_CACHED_FRAGMENTS, msblk->block_size);
- if (msblk->fragment_cache == NULL) {
- err = -ENOMEM;
+ min(SQUASHFS_CACHED_FRAGMENTS, fragments), msblk->block_size);
+ if (IS_ERR(msblk->fragment_cache)) {
+ err = PTR_ERR(msblk->fragment_cache);
goto failed_mount;
}
diff --git a/fs/stat.c b/fs/stat.c
index 2c0e111a098a..f13308bfdc98 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -281,6 +281,8 @@ static int vfs_statx_path(struct path *path, int flags, struct kstat *stat,
u32 request_mask)
{
int error = vfs_getattr(path, stat, request_mask, flags);
+ if (error)
+ return error;
if (request_mask & STATX_MNT_ID_UNIQUE) {
stat->mnt_id = real_mount(path->mnt)->mnt_id_unique;
@@ -302,7 +304,7 @@ static int vfs_statx_path(struct path *path, int flags, struct kstat *stat,
if (S_ISBLK(stat->mode))
bdev_statx(path, stat, request_mask);
- return error;
+ return 0;
}
static int vfs_statx_fd(int fd, int flags, struct kstat *stat,
diff --git a/fs/super.c b/fs/super.c
index c9c7223bc2a2..5a7db4a556e3 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -647,7 +647,7 @@ void generic_shutdown_super(struct super_block *sb)
*/
fscrypt_destroy_keyring(sb);
- if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes),
+ if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes), NULL,
"VFS: Busy inodes after unmount of %s (%s)",
sb->s_id, sb->s_type->name)) {
/*
diff --git a/fs/sysctls.c b/fs/sysctls.c
index 8dbde9a802fa..ad429dffeb4b 100644
--- a/fs/sysctls.c
+++ b/fs/sysctls.c
@@ -7,7 +7,7 @@
#include <linux/init.h>
#include <linux/sysctl.h>
-static struct ctl_table fs_shared_sysctls[] = {
+static const struct ctl_table fs_shared_sysctls[] = {
{
.procname = "overflowuid",
.data = &fs_overflowuid,
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 785408861c01..6931308876c4 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -817,7 +817,7 @@ EXPORT_SYMBOL_GPL(sysfs_emit_at);
* Returns number of bytes written to @buf.
*/
ssize_t sysfs_bin_attr_simple_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
memcpy(buf, attr->private + off, count);
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index cfc614c638da..53214499e384 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -457,7 +457,8 @@ static void tracefs_d_release(struct dentry *dentry)
eventfs_d_release(dentry);
}
-static int tracefs_d_revalidate(struct dentry *dentry, unsigned int flags)
+static int tracefs_d_revalidate(struct inode *inode, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
struct eventfs_inode *ei = dentry->d_fsdata;
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 5cc69beaa62e..b01f382ce8db 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -863,7 +863,6 @@ void ubifs_dump_leb(const struct ubifs_info *c, int lnum)
out:
vfree(buf);
- return;
}
void ubifs_dump_znode(const struct ubifs_info *c,
@@ -946,16 +945,20 @@ void ubifs_dump_tnc(struct ubifs_info *c)
pr_err("\n");
pr_err("(pid %d) start dumping TNC tree\n", current->pid);
- znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, NULL);
- level = znode->level;
- pr_err("== Level %d ==\n", level);
- while (znode) {
- if (level != znode->level) {
- level = znode->level;
- pr_err("== Level %d ==\n", level);
+ if (c->zroot.znode) {
+ znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, NULL);
+ level = znode->level;
+ pr_err("== Level %d ==\n", level);
+ while (znode) {
+ if (level != znode->level) {
+ level = znode->level;
+ pr_err("== Level %d ==\n", level);
+ }
+ ubifs_dump_znode(c, znode);
+ znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, znode);
}
- ubifs_dump_znode(c, znode);
- znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, znode);
+ } else {
+ pr_err("empty TNC tree in memory\n");
}
pr_err("(pid %d) finish dumping TNC tree\n", current->pid);
}
diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c
index aa8837e6247c..f2cb214581fd 100644
--- a/fs/ubifs/lpt_commit.c
+++ b/fs/ubifs/lpt_commit.c
@@ -1932,7 +1932,6 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum)
pr_err("(pid %d) finish dumping LEB %d\n", current->pid, lnum);
out:
vfree(buf);
- return;
}
/**
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 7c0bd0b55f88..97c4d71115d8 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -36,7 +36,7 @@
static int sysctl_unprivileged_userfaultfd __read_mostly;
#ifdef CONFIG_SYSCTL
-static struct ctl_table vm_userfaultfd_table[] = {
+static const struct ctl_table vm_userfaultfd_table[] = {
{
.procname = "unprivileged_userfaultfd",
.data = &sysctl_unprivileged_userfaultfd,
diff --git a/fs/vboxsf/dir.c b/fs/vboxsf/dir.c
index 5f1a14d5b927..a859ac9b74ba 100644
--- a/fs/vboxsf/dir.c
+++ b/fs/vboxsf/dir.c
@@ -192,7 +192,8 @@ const struct file_operations vboxsf_dir_fops = {
* This is called during name resolution/lookup to check if the @dentry in
* the cache is still valid. the job is handled by vboxsf_inode_revalidate.
*/
-static int vboxsf_dentry_revalidate(struct dentry *dentry, unsigned int flags)
+static int vboxsf_dentry_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
if (flags & LOOKUP_RCU)
return -ECHILD;
diff --git a/fs/vboxsf/super.c b/fs/vboxsf/super.c
index e95b8a48d8a0..1d94bb784108 100644
--- a/fs/vboxsf/super.c
+++ b/fs/vboxsf/super.c
@@ -21,7 +21,8 @@
#define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */
-static const unsigned char VBSF_MOUNT_SIGNATURE[4] = "\000\377\376\375";
+static const unsigned char VBSF_MOUNT_SIGNATURE[4] = { '\000', '\377', '\376',
+ '\375' };
static int follow_symlinks;
module_param(follow_symlinks, int, 0444);
diff --git a/fs/verity/init.c b/fs/verity/init.c
index f440f0e61e3e..6e8d33b50240 100644
--- a/fs/verity/init.c
+++ b/fs/verity/init.c
@@ -10,7 +10,7 @@
#include <linux/ratelimit.h>
#ifdef CONFIG_SYSCTL
-static struct ctl_table fsverity_sysctl_table[] = {
+static const struct ctl_table fsverity_sysctl_table[] = {
#ifdef CONFIG_FS_VERITY_BUILTIN_SIGNATURES
{
.procname = "require_signatures",
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index ed9b0dabc1f1..7afa51e41427 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -51,6 +51,8 @@ xfs-y += $(addprefix libxfs/, \
xfs_rmap_btree.o \
xfs_refcount.o \
xfs_refcount_btree.o \
+ xfs_rtrefcount_btree.o \
+ xfs_rtrmap_btree.o \
xfs_sb.o \
xfs_symlink_remote.o \
xfs_trans_inode.o \
@@ -193,6 +195,8 @@ xfs-$(CONFIG_XFS_ONLINE_SCRUB_STATS) += scrub/stats.o
xfs-$(CONFIG_XFS_RT) += $(addprefix scrub/, \
rgsuper.o \
rtbitmap.o \
+ rtrefcount.o \
+ rtrmap.o \
rtsummary.o \
)
@@ -232,6 +236,8 @@ xfs-y += $(addprefix scrub/, \
xfs-$(CONFIG_XFS_RT) += $(addprefix scrub/, \
rtbitmap_repair.o \
+ rtrefcount_repair.o \
+ rtrmap_repair.o \
rtsummary_repair.o \
)
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
index f5d853089019..fb79215a509d 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.c
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -114,6 +114,7 @@ xfs_ag_resv_needed(
case XFS_AG_RESV_RMAPBT:
len -= xfs_perag_resv(pag, type)->ar_reserved;
break;
+ case XFS_AG_RESV_METAFILE:
case XFS_AG_RESV_NONE:
/* empty */
break;
@@ -347,6 +348,7 @@ xfs_ag_resv_alloc_extent(
switch (type) {
case XFS_AG_RESV_AGFL:
+ case XFS_AG_RESV_METAFILE:
return;
case XFS_AG_RESV_METADATA:
case XFS_AG_RESV_RMAPBT:
@@ -389,6 +391,7 @@ xfs_ag_resv_free_extent(
switch (type) {
case XFS_AG_RESV_AGFL:
+ case XFS_AG_RESV_METAFILE:
return;
case XFS_AG_RESV_METADATA:
case XFS_AG_RESV_RMAPBT:
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 17875ad865f5..8c04acd30d48 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -1004,9 +1004,7 @@ xfs_attr_add_fork(
unsigned int blks; /* space reservation */
int error; /* error return value */
- if (xfs_is_metadir_inode(ip))
- ASSERT(XFS_IS_DQDETACHED(ip));
- else
+ if (!xfs_is_metadir_inode(ip))
ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
blks = XFS_ADDAFORK_SPACE_RES(mp);
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 5255f93bae31..0ef19f1469ec 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -615,7 +615,7 @@ xfs_bmap_btree_to_extents(
xfs_trans_binval(tp, cbp);
if (cur->bc_levels[0].bp == cbp)
cur->bc_levels[0].bp = NULL;
- xfs_iroot_realloc(ip, -1, whichfork);
+ xfs_bmap_broot_realloc(ip, whichfork, 0);
ASSERT(ifp->if_broot == NULL);
ifp->if_format = XFS_DINODE_FMT_EXTENTS;
*logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
@@ -659,12 +659,11 @@ xfs_bmap_extents_to_btree(
* Make space in the inode incore. This needs to be undone if we fail
* to expand the root.
*/
- xfs_iroot_realloc(ip, 1, whichfork);
+ block = xfs_bmap_broot_realloc(ip, whichfork, 1);
/*
* Fill in the root.
*/
- block = ifp->if_broot;
xfs_bmbt_init_block(ip, block, NULL, 1, 1);
/*
* Need a cursor. Can't allocate until bb_level is filled in.
@@ -746,7 +745,7 @@ xfs_bmap_extents_to_btree(
out_unreserve_dquot:
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
out_root_realloc:
- xfs_iroot_realloc(ip, -1, whichfork);
+ xfs_bmap_broot_realloc(ip, whichfork, 0);
ifp->if_format = XFS_DINODE_FMT_EXTENTS;
ASSERT(ifp->if_broot == NULL);
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
@@ -1043,9 +1042,7 @@ xfs_bmap_add_attrfork(
int error; /* error return value */
xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
- if (xfs_is_metadir_inode(ip))
- ASSERT(XFS_IS_DQDETACHED(ip));
- else
+ if (!xfs_is_metadir_inode(ip))
ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
ASSERT(!xfs_inode_has_attr_fork(ip));
@@ -3566,12 +3563,12 @@ xfs_bmap_btalloc_at_eof(
int error;
/*
- * If there are already extents in the file, try an exact EOF block
- * allocation to extend the file as a contiguous extent. If that fails,
- * or it's the first allocation in a file, just try for a stripe aligned
- * allocation.
+ * If there are already extents in the file, and xfs_bmap_adjacent() has
+ * given a better blkno, try an exact EOF block allocation to extend the
+ * file as a contiguous extent. If that fails, or it's the first
+ * allocation in a file, just try for a stripe aligned allocation.
*/
- if (ap->offset) {
+ if (ap->eof) {
xfs_extlen_t nextminlen = 0;
/*
@@ -3739,7 +3736,8 @@ xfs_bmap_btalloc_best_length(
int error;
ap->blkno = XFS_INO_TO_FSB(args->mp, ap->ip->i_ino);
- xfs_bmap_adjacent(ap);
+ if (!xfs_bmap_adjacent(ap))
+ ap->eof = false;
/*
* Search for an allocation group with a single extent large enough for
@@ -4567,8 +4565,9 @@ xfs_bmapi_write(
* the refcount btree for orphan recovery.
*/
if (whichfork == XFS_COW_FORK)
- xfs_refcount_alloc_cow_extent(tp, bma.blkno,
- bma.length);
+ xfs_refcount_alloc_cow_extent(tp,
+ XFS_IS_REALTIME_INODE(ip),
+ bma.blkno, bma.length);
}
/* Deal with the allocated space we found. */
@@ -4743,7 +4742,8 @@ xfs_bmapi_convert_one_delalloc(
*seq = READ_ONCE(ifp->if_seq);
if (whichfork == XFS_COW_FORK)
- xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length);
+ xfs_refcount_alloc_cow_extent(tp, XFS_IS_REALTIME_INODE(ip),
+ bma.blkno, bma.length);
error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
whichfork);
@@ -5391,7 +5391,7 @@ xfs_bmap_del_extent_real(
bool isrt = xfs_ifork_is_realtime(ip, whichfork);
if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
- xfs_refcount_decrease_extent(tp, del);
+ xfs_refcount_decrease_extent(tp, isrt, del);
} else if (isrt && !xfs_has_rtgroups(mp)) {
error = xfs_bmap_free_rtblocks(tp, del);
} else {
@@ -6501,9 +6501,8 @@ xfs_get_extsz_hint(
* No point in aligning allocations if we need to COW to actually
* write to them.
*/
- if (xfs_is_always_cow_inode(ip))
- return 0;
- if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
+ if (!xfs_is_always_cow_inode(ip) &&
+ (ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
return ip->i_extsize;
if (XFS_IS_REALTIME_INODE(ip) &&
ip->i_mount->m_sb.sb_rextsize > 1)
@@ -6526,7 +6525,13 @@ xfs_get_cowextsz_hint(
a = 0;
if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
a = ip->i_cowextsize;
- b = xfs_get_extsz_hint(ip);
+ if (XFS_IS_REALTIME_INODE(ip)) {
+ b = 0;
+ if (ip->i_diflags & XFS_DIFLAG_EXTSIZE)
+ b = ip->i_extsize;
+ } else {
+ b = xfs_get_extsz_hint(ip);
+ }
a = max(a, b);
if (a == 0)
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index 3464be771f95..908d7b050e9c 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -516,6 +516,116 @@ xfs_bmbt_keys_contiguous(
be64_to_cpu(key2->bmbt.br_startoff));
}
+static inline void
+xfs_bmbt_move_ptrs(
+ struct xfs_mount *mp,
+ struct xfs_btree_block *broot,
+ short old_size,
+ size_t new_size,
+ unsigned int numrecs)
+{
+ void *dptr;
+ void *sptr;
+
+ sptr = xfs_bmap_broot_ptr_addr(mp, broot, 1, old_size);
+ dptr = xfs_bmap_broot_ptr_addr(mp, broot, 1, new_size);
+ memmove(dptr, sptr, numrecs * sizeof(xfs_bmbt_ptr_t));
+}
+
+/*
+ * Reallocate the space for if_broot based on the number of records. Move the
+ * records and pointers in if_broot to fit the new size. When shrinking this
+ * will eliminate holes between the records and pointers created by the caller.
+ * When growing this will create holes to be filled in by the caller.
+ *
+ * The caller must not request to add more records than would fit in the
+ * on-disk inode root. If the if_broot is currently NULL, then if we are
+ * adding records, one will be allocated. The caller must also not request
+ * that the number of records go below zero, although it can go to zero.
+ *
+ * ip -- the inode whose if_broot area is changing
+ * whichfork -- which inode fork to change
+ * new_numrecs -- the new number of records requested for the if_broot array
+ *
+ * Returns the incore btree root block.
+ */
+struct xfs_btree_block *
+xfs_bmap_broot_realloc(
+ struct xfs_inode *ip,
+ int whichfork,
+ unsigned int new_numrecs)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
+ struct xfs_btree_block *broot;
+ unsigned int new_size;
+ unsigned int old_size = ifp->if_broot_bytes;
+
+ /*
+ * Block mapping btrees do not support storing zero records; if this
+ * happens, the fork is being changed to FMT_EXTENTS. Free the broot
+ * and get out.
+ */
+ if (new_numrecs == 0)
+ return xfs_broot_realloc(ifp, 0);
+
+ new_size = xfs_bmap_broot_space_calc(mp, new_numrecs);
+
+ /* Handle the nop case quietly. */
+ if (new_size == old_size)
+ return ifp->if_broot;
+
+ if (new_size > old_size) {
+ unsigned int old_numrecs;
+
+ /*
+ * If there wasn't any memory allocated before, just
+ * allocate it now and get out.
+ */
+ if (old_size == 0)
+ return xfs_broot_realloc(ifp, new_size);
+
+ /*
+ * If there is already an existing if_broot, then we need
+ * to realloc() it and shift the pointers to their new
+ * location. The records don't change location because
+ * they are kept butted up against the btree block header.
+ */
+ old_numrecs = xfs_bmbt_maxrecs(mp, old_size, false);
+ broot = xfs_broot_realloc(ifp, new_size);
+ ASSERT(xfs_bmap_bmdr_space(broot) <=
+ xfs_inode_fork_size(ip, whichfork));
+ xfs_bmbt_move_ptrs(mp, broot, old_size, new_size, old_numrecs);
+ return broot;
+ }
+
+ /*
+ * We're reducing, but not totally eliminating, numrecs. In this case,
+ * we are shrinking the if_broot buffer, so it must already exist.
+ */
+ ASSERT(ifp->if_broot != NULL && old_size > 0 && new_size > 0);
+
+ /*
+ * Shrink the btree root by moving the bmbt pointers, since they are
+ * not butted up against the btree block header, then reallocating
+ * broot.
+ */
+ xfs_bmbt_move_ptrs(mp, ifp->if_broot, old_size, new_size, new_numrecs);
+ broot = xfs_broot_realloc(ifp, new_size);
+ ASSERT(xfs_bmap_bmdr_space(broot) <=
+ xfs_inode_fork_size(ip, whichfork));
+ return broot;
+}
+
+static struct xfs_btree_block *
+xfs_bmbt_broot_realloc(
+ struct xfs_btree_cur *cur,
+ unsigned int new_numrecs)
+{
+ return xfs_bmap_broot_realloc(cur->bc_ino.ip, cur->bc_ino.whichfork,
+ new_numrecs);
+}
+
const struct xfs_btree_ops xfs_bmbt_ops = {
.name = "bmap",
.type = XFS_BTREE_TYPE_INODE,
@@ -543,6 +653,7 @@ const struct xfs_btree_ops xfs_bmbt_ops = {
.keys_inorder = xfs_bmbt_keys_inorder,
.recs_inorder = xfs_bmbt_recs_inorder,
.keys_contiguous = xfs_bmbt_keys_contiguous,
+ .broot_realloc = xfs_bmbt_broot_realloc,
};
/*
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.h b/fs/xfs/libxfs/xfs_bmap_btree.h
index 49a3bae3f6ec..b238d559ab03 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.h
+++ b/fs/xfs/libxfs/xfs_bmap_btree.h
@@ -198,4 +198,7 @@ xfs_bmap_bmdr_space(struct xfs_btree_block *bb)
return xfs_bmdr_space_calc(be16_to_cpu(bb->bb_numrecs));
}
+struct xfs_btree_block *xfs_bmap_broot_realloc(struct xfs_inode *ip,
+ int whichfork, unsigned int new_numrecs);
+
#endif /* __XFS_BMAP_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 68ee1c299c25..299ce7fd11b0 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -30,6 +30,12 @@
#include "xfs_health.h"
#include "xfs_buf_mem.h"
#include "xfs_btree_mem.h"
+#include "xfs_rtrmap_btree.h"
+#include "xfs_bmap.h"
+#include "xfs_rmap.h"
+#include "xfs_quota.h"
+#include "xfs_metafile.h"
+#include "xfs_rtrefcount_btree.h"
/*
* Btree magic numbers.
@@ -1537,12 +1543,16 @@ xfs_btree_log_recs(
int first,
int last)
{
+ if (!bp) {
+ xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip,
+ xfs_ilog_fbroot(cur->bc_ino.whichfork));
+ return;
+ }
xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
xfs_trans_log_buf(cur->bc_tp, bp,
xfs_btree_rec_offset(cur, first),
xfs_btree_rec_offset(cur, last + 1) - 1);
-
}
/*
@@ -3078,6 +3088,131 @@ xfs_btree_split(
#define xfs_btree_split __xfs_btree_split
#endif /* __KERNEL__ */
+/* Move the records from a root leaf block to a separate block. */
+STATIC void
+xfs_btree_promote_leaf_iroot(
+ struct xfs_btree_cur *cur,
+ struct xfs_btree_block *block,
+ struct xfs_buf *cbp,
+ union xfs_btree_ptr *cptr,
+ struct xfs_btree_block *cblock)
+{
+ union xfs_btree_rec *rp;
+ union xfs_btree_rec *crp;
+ union xfs_btree_key *kp;
+ union xfs_btree_ptr *pp;
+ struct xfs_btree_block *broot;
+ int numrecs = xfs_btree_get_numrecs(block);
+
+ /* Copy the records from the leaf broot into the new child block. */
+ rp = xfs_btree_rec_addr(cur, 1, block);
+ crp = xfs_btree_rec_addr(cur, 1, cblock);
+ xfs_btree_copy_recs(cur, crp, rp, numrecs);
+
+ /*
+ * Increment the tree height.
+ *
+ * Trickery here: The amount of memory that we need per record for the
+ * ifork's btree root block may change when we convert the broot from a
+ * leaf to a node block. Free the existing leaf broot so that nobody
+ * thinks we need to migrate node pointers when we realloc the broot
+ * buffer after bumping nlevels.
+ */
+ cur->bc_ops->broot_realloc(cur, 0);
+ cur->bc_nlevels++;
+ cur->bc_levels[1].ptr = 1;
+
+ /*
+ * Allocate a new node broot and initialize it to point to the new
+ * child block.
+ */
+ broot = cur->bc_ops->broot_realloc(cur, 1);
+ xfs_btree_init_block(cur->bc_mp, broot, cur->bc_ops,
+ cur->bc_nlevels - 1, 1, cur->bc_ino.ip->i_ino);
+
+ pp = xfs_btree_ptr_addr(cur, 1, broot);
+ kp = xfs_btree_key_addr(cur, 1, broot);
+ xfs_btree_copy_ptrs(cur, pp, cptr, 1);
+ xfs_btree_get_keys(cur, cblock, kp);
+
+ /* Attach the new block to the cursor and log it. */
+ xfs_btree_setbuf(cur, 0, cbp);
+ xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS);
+ xfs_btree_log_recs(cur, cbp, 1, numrecs);
+}
+
+/*
+ * Move the keys and pointers from a root block to a separate block.
+ *
+ * Since the keyptr size does not change, all we have to do is increase the
+ * tree height, copy the keyptrs to the new internal node (cblock), shrink
+ * the root, and copy the pointers there.
+ */
+STATIC int
+xfs_btree_promote_node_iroot(
+ struct xfs_btree_cur *cur,
+ struct xfs_btree_block *block,
+ int level,
+ struct xfs_buf *cbp,
+ union xfs_btree_ptr *cptr,
+ struct xfs_btree_block *cblock)
+{
+ union xfs_btree_key *ckp;
+ union xfs_btree_key *kp;
+ union xfs_btree_ptr *cpp;
+ union xfs_btree_ptr *pp;
+ int i;
+ int error;
+ int numrecs = xfs_btree_get_numrecs(block);
+
+ /*
+ * Increase tree height, adjusting the root block level to match.
+ * We cannot change the root btree node size until we've copied the
+ * block contents to the new child block.
+ */
+ be16_add_cpu(&block->bb_level, 1);
+ cur->bc_nlevels++;
+ cur->bc_levels[level + 1].ptr = 1;
+
+ /*
+ * Adjust the root btree record count, then copy the keys from the old
+ * root to the new child block.
+ */
+ xfs_btree_set_numrecs(block, 1);
+ kp = xfs_btree_key_addr(cur, 1, block);
+ ckp = xfs_btree_key_addr(cur, 1, cblock);
+ xfs_btree_copy_keys(cur, ckp, kp, numrecs);
+
+ /* Check the pointers and copy them to the new child block. */
+ pp = xfs_btree_ptr_addr(cur, 1, block);
+ cpp = xfs_btree_ptr_addr(cur, 1, cblock);
+ for (i = 0; i < numrecs; i++) {
+ error = xfs_btree_debug_check_ptr(cur, pp, i, level);
+ if (error)
+ return error;
+ }
+ xfs_btree_copy_ptrs(cur, cpp, pp, numrecs);
+
+ /*
+ * Set the first keyptr to point to the new child block, then shrink
+ * the memory buffer for the root block.
+ */
+ error = xfs_btree_debug_check_ptr(cur, cptr, 0, level);
+ if (error)
+ return error;
+ xfs_btree_copy_ptrs(cur, pp, cptr, 1);
+ xfs_btree_get_keys(cur, cblock, kp);
+
+ cur->bc_ops->broot_realloc(cur, 1);
+
+ /* Attach the new block to the cursor and log it. */
+ xfs_btree_setbuf(cur, level, cbp);
+ xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS);
+ xfs_btree_log_keys(cur, cbp, 1, numrecs);
+ xfs_btree_log_ptrs(cur, cbp, 1, numrecs);
+ return 0;
+}
+
/*
* Copy the old inode root contents into a real block and make the
* broot point to it.
@@ -3091,14 +3226,10 @@ xfs_btree_new_iroot(
struct xfs_buf *cbp; /* buffer for cblock */
struct xfs_btree_block *block; /* btree block */
struct xfs_btree_block *cblock; /* child btree block */
- union xfs_btree_key *ckp; /* child key pointer */
- union xfs_btree_ptr *cpp; /* child ptr pointer */
- union xfs_btree_key *kp; /* pointer to btree key */
- union xfs_btree_ptr *pp; /* pointer to block addr */
+ union xfs_btree_ptr aptr;
union xfs_btree_ptr nptr; /* new block addr */
int level; /* btree level */
int error; /* error return code */
- int i; /* loop counter */
XFS_BTREE_STATS_INC(cur, newroot);
@@ -3107,10 +3238,15 @@ xfs_btree_new_iroot(
level = cur->bc_nlevels - 1;
block = xfs_btree_get_iroot(cur);
- pp = xfs_btree_ptr_addr(cur, 1, block);
+ ASSERT(level > 0 || (cur->bc_ops->geom_flags & XFS_BTGEO_IROOT_RECORDS));
+ if (level > 0)
+ aptr = *xfs_btree_ptr_addr(cur, 1, block);
+ else
+ aptr.l = cpu_to_be64(XFS_INO_TO_FSB(cur->bc_mp,
+ cur->bc_ino.ip->i_ino));
/* Allocate the new block. If we can't do it, we're toast. Give up. */
- error = xfs_btree_alloc_block(cur, pp, &nptr, stat);
+ error = xfs_btree_alloc_block(cur, &aptr, &nptr, stat);
if (error)
goto error0;
if (*stat == 0)
@@ -3136,47 +3272,16 @@ xfs_btree_new_iroot(
cblock->bb_u.s.bb_blkno = bno;
}
- be16_add_cpu(&block->bb_level, 1);
- xfs_btree_set_numrecs(block, 1);
- cur->bc_nlevels++;
- ASSERT(cur->bc_nlevels <= cur->bc_maxlevels);
- cur->bc_levels[level + 1].ptr = 1;
-
- kp = xfs_btree_key_addr(cur, 1, block);
- ckp = xfs_btree_key_addr(cur, 1, cblock);
- xfs_btree_copy_keys(cur, ckp, kp, xfs_btree_get_numrecs(cblock));
-
- cpp = xfs_btree_ptr_addr(cur, 1, cblock);
- for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) {
- error = xfs_btree_debug_check_ptr(cur, pp, i, level);
+ if (level > 0) {
+ error = xfs_btree_promote_node_iroot(cur, block, level, cbp,
+ &nptr, cblock);
if (error)
goto error0;
+ } else {
+ xfs_btree_promote_leaf_iroot(cur, block, cbp, &nptr, cblock);
}
- xfs_btree_copy_ptrs(cur, cpp, pp, xfs_btree_get_numrecs(cblock));
-
- error = xfs_btree_debug_check_ptr(cur, &nptr, 0, level);
- if (error)
- goto error0;
-
- xfs_btree_copy_ptrs(cur, pp, &nptr, 1);
-
- xfs_iroot_realloc(cur->bc_ino.ip,
- 1 - xfs_btree_get_numrecs(cblock),
- cur->bc_ino.whichfork);
-
- xfs_btree_setbuf(cur, level, cbp);
-
- /*
- * Do all this logging at the end so that
- * the root is at the right level.
- */
- xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS);
- xfs_btree_log_keys(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
- xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
-
- *logflags |=
- XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork);
+ *logflags |= XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork);
*stat = 1;
return 0;
error0:
@@ -3347,7 +3452,7 @@ xfs_btree_make_block_unfull(
if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) {
/* A root block that can be made bigger. */
- xfs_iroot_realloc(ip, 1, cur->bc_ino.whichfork);
+ cur->bc_ops->broot_realloc(cur, numrecs + 1);
*stat = 1;
} else {
/* A root block that needs replacing */
@@ -3693,6 +3798,97 @@ error0:
return error;
}
+/* Move the records from a child leaf block to the root block. */
+STATIC void
+xfs_btree_demote_leaf_child(
+ struct xfs_btree_cur *cur,
+ struct xfs_btree_block *cblock,
+ int numrecs)
+{
+ union xfs_btree_rec *rp;
+ union xfs_btree_rec *crp;
+ struct xfs_btree_block *broot;
+
+ /*
+ * Decrease the tree height.
+ *
+ * Trickery here: The amount of memory that we need per record for the
+ * ifork's btree root block may change when we convert the broot from a
+ * node to a leaf. Free the old node broot so that we can get a fresh
+ * leaf broot.
+ */
+ cur->bc_ops->broot_realloc(cur, 0);
+ cur->bc_nlevels--;
+
+ /*
+ * Allocate a new leaf broot and copy the records from the old child.
+ * Detach the old child from the cursor.
+ */
+ broot = cur->bc_ops->broot_realloc(cur, numrecs);
+ xfs_btree_init_block(cur->bc_mp, broot, cur->bc_ops, 0, numrecs,
+ cur->bc_ino.ip->i_ino);
+
+ rp = xfs_btree_rec_addr(cur, 1, broot);
+ crp = xfs_btree_rec_addr(cur, 1, cblock);
+ xfs_btree_copy_recs(cur, rp, crp, numrecs);
+
+ cur->bc_levels[0].bp = NULL;
+}
+
+/*
+ * Move the keyptrs from a child node block to the root block.
+ *
+ * Since the keyptr size does not change, all we have to do is increase the
+ * tree height, copy the keyptrs to the new internal node (cblock), shrink
+ * the root, and copy the pointers there.
+ */
+STATIC int
+xfs_btree_demote_node_child(
+ struct xfs_btree_cur *cur,
+ struct xfs_btree_block *cblock,
+ int level,
+ int numrecs)
+{
+ struct xfs_btree_block *block;
+ union xfs_btree_key *ckp;
+ union xfs_btree_key *kp;
+ union xfs_btree_ptr *cpp;
+ union xfs_btree_ptr *pp;
+ int i;
+ int error;
+
+ /*
+ * Adjust the root btree node size and the record count to match the
+ * doomed child so that we can copy the keyptrs ahead of changing the
+ * tree shape.
+ */
+ block = cur->bc_ops->broot_realloc(cur, numrecs);
+
+ xfs_btree_set_numrecs(block, numrecs);
+ ASSERT(block->bb_numrecs == cblock->bb_numrecs);
+
+ /* Copy keys from the doomed block. */
+ kp = xfs_btree_key_addr(cur, 1, block);
+ ckp = xfs_btree_key_addr(cur, 1, cblock);
+ xfs_btree_copy_keys(cur, kp, ckp, numrecs);
+
+ /* Copy pointers from the doomed block. */
+ pp = xfs_btree_ptr_addr(cur, 1, block);
+ cpp = xfs_btree_ptr_addr(cur, 1, cblock);
+ for (i = 0; i < numrecs; i++) {
+ error = xfs_btree_debug_check_ptr(cur, cpp, i, level - 1);
+ if (error)
+ return error;
+ }
+ xfs_btree_copy_ptrs(cur, pp, cpp, numrecs);
+
+ /* Decrease tree height, adjusting the root block level to match. */
+ cur->bc_levels[level - 1].bp = NULL;
+ be16_add_cpu(&block->bb_level, -1);
+ cur->bc_nlevels--;
+ return 0;
+}
+
/*
* Try to merge a non-leaf block back into the inode root.
*
@@ -3705,34 +3901,31 @@ STATIC int
xfs_btree_kill_iroot(
struct xfs_btree_cur *cur)
{
- int whichfork = cur->bc_ino.whichfork;
struct xfs_inode *ip = cur->bc_ino.ip;
- struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
struct xfs_btree_block *block;
struct xfs_btree_block *cblock;
- union xfs_btree_key *kp;
- union xfs_btree_key *ckp;
- union xfs_btree_ptr *pp;
- union xfs_btree_ptr *cpp;
struct xfs_buf *cbp;
int level;
- int index;
int numrecs;
int error;
#ifdef DEBUG
union xfs_btree_ptr ptr;
#endif
- int i;
ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
- ASSERT(cur->bc_nlevels > 1);
+ ASSERT((cur->bc_ops->geom_flags & XFS_BTGEO_IROOT_RECORDS) ||
+ cur->bc_nlevels > 1);
/*
* Don't deal with the root block needs to be a leaf case.
* We're just going to turn the thing back into extents anyway.
*/
level = cur->bc_nlevels - 1;
- if (level == 1)
+ if (level == 1 && !(cur->bc_ops->geom_flags & XFS_BTGEO_IROOT_RECORDS))
+ goto out0;
+
+ /* If we're already a leaf, jump out. */
+ if (level == 0)
goto out0;
/*
@@ -3762,40 +3955,20 @@ xfs_btree_kill_iroot(
ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
#endif
- index = numrecs - cur->bc_ops->get_maxrecs(cur, level);
- if (index) {
- xfs_iroot_realloc(cur->bc_ino.ip, index,
- cur->bc_ino.whichfork);
- block = ifp->if_broot;
- }
-
- be16_add_cpu(&block->bb_numrecs, index);
- ASSERT(block->bb_numrecs == cblock->bb_numrecs);
-
- kp = xfs_btree_key_addr(cur, 1, block);
- ckp = xfs_btree_key_addr(cur, 1, cblock);
- xfs_btree_copy_keys(cur, kp, ckp, numrecs);
-
- pp = xfs_btree_ptr_addr(cur, 1, block);
- cpp = xfs_btree_ptr_addr(cur, 1, cblock);
-
- for (i = 0; i < numrecs; i++) {
- error = xfs_btree_debug_check_ptr(cur, cpp, i, level - 1);
+ if (level > 1) {
+ error = xfs_btree_demote_node_child(cur, cblock, level,
+ numrecs);
if (error)
return error;
- }
-
- xfs_btree_copy_ptrs(cur, pp, cpp, numrecs);
+ } else
+ xfs_btree_demote_leaf_child(cur, cblock, numrecs);
error = xfs_btree_free_block(cur, cbp);
if (error)
return error;
- cur->bc_levels[level - 1].bp = NULL;
- be16_add_cpu(&block->bb_level, -1);
xfs_trans_log_inode(cur->bc_tp, ip,
XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork));
- cur->bc_nlevels--;
out0:
return 0;
}
@@ -3949,10 +4122,10 @@ xfs_btree_delrec(
/*
* We're at the root level. First, shrink the root block in-memory.
* Try to get rid of the next level down. If we can't then there's
- * nothing left to do.
+ * nothing left to do. numrecs was decremented above.
*/
if (xfs_btree_at_iroot(cur, level)) {
- xfs_iroot_realloc(cur->bc_ino.ip, -1, cur->bc_ino.whichfork);
+ cur->bc_ops->broot_realloc(cur, numrecs);
error = xfs_btree_kill_iroot(cur);
if (error)
@@ -5360,6 +5533,12 @@ xfs_btree_init_cur_caches(void)
error = xfs_refcountbt_init_cur_cache();
if (error)
goto err;
+ error = xfs_rtrmapbt_init_cur_cache();
+ if (error)
+ goto err;
+ error = xfs_rtrefcountbt_init_cur_cache();
+ if (error)
+ goto err;
return 0;
err:
@@ -5376,6 +5555,8 @@ xfs_btree_destroy_cur_caches(void)
xfs_bmbt_destroy_cur_cache();
xfs_rmapbt_destroy_cur_cache();
xfs_refcountbt_destroy_cur_cache();
+ xfs_rtrmapbt_destroy_cur_cache();
+ xfs_rtrefcountbt_destroy_cur_cache();
}
/* Move the btree cursor before the first record. */
@@ -5404,3 +5585,67 @@ xfs_btree_goto_left_edge(
return 0;
}
+
+/* Allocate a block for an inode-rooted metadata btree. */
+int
+xfs_btree_alloc_metafile_block(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_ptr *start,
+ union xfs_btree_ptr *new,
+ int *stat)
+{
+ struct xfs_alloc_arg args = {
+ .mp = cur->bc_mp,
+ .tp = cur->bc_tp,
+ .resv = XFS_AG_RESV_METAFILE,
+ .minlen = 1,
+ .maxlen = 1,
+ .prod = 1,
+ };
+ struct xfs_inode *ip = cur->bc_ino.ip;
+ int error;
+
+ ASSERT(xfs_is_metadir_inode(ip));
+
+ xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, cur->bc_ino.whichfork);
+ error = xfs_alloc_vextent_start_ag(&args,
+ XFS_INO_TO_FSB(cur->bc_mp, ip->i_ino));
+ if (error)
+ return error;
+ if (args.fsbno == NULLFSBLOCK) {
+ *stat = 0;
+ return 0;
+ }
+ ASSERT(args.len == 1);
+
+ xfs_metafile_resv_alloc_space(ip, &args);
+
+ new->l = cpu_to_be64(args.fsbno);
+ *stat = 1;
+ return 0;
+}
+
+/* Free a block from an inode-rooted metadata btree. */
+int
+xfs_btree_free_metafile_block(
+ struct xfs_btree_cur *cur,
+ struct xfs_buf *bp)
+{
+ struct xfs_owner_info oinfo;
+ struct xfs_mount *mp = cur->bc_mp;
+ struct xfs_inode *ip = cur->bc_ino.ip;
+ struct xfs_trans *tp = cur->bc_tp;
+ xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
+ int error;
+
+ ASSERT(xfs_is_metadir_inode(ip));
+
+ xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_ino.whichfork);
+ error = xfs_free_extent_later(tp, fsbno, 1, &oinfo, XFS_AG_RESV_METAFILE,
+ 0);
+ if (error)
+ return error;
+
+ xfs_metafile_resv_free_space(ip, tp, 1);
+ return 0;
+}
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index c5bff273cae2..355b304696e6 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -135,7 +135,7 @@ struct xfs_btree_ops {
/* offset of btree stats array */
unsigned int statoff;
- /* sick mask for health reporting (only for XFS_BTREE_TYPE_AG) */
+ /* sick mask for health reporting (not for bmap btrees) */
unsigned int sick_mask;
/* cursor operations */
@@ -213,11 +213,27 @@ struct xfs_btree_ops {
const union xfs_btree_key *key1,
const union xfs_btree_key *key2,
const union xfs_btree_key *mask);
+
+ /*
+ * Reallocate the space for if_broot to fit the number of records.
+ * Move the records and pointers in if_broot to fit the new size. When
+ * shrinking this will eliminate holes between the records and pointers
+ * created by the caller. When growing this will create holes to be
+ * filled in by the caller.
+ *
+ * The caller must not request to add more records than would fit in
+ * the on-disk inode root. If the if_broot is currently NULL, then if
+ * we are adding records, one will be allocated. The caller must also
+ * not request that the number of records go below zero, although it
+ * can go to zero.
+ */
+ struct xfs_btree_block *(*broot_realloc)(struct xfs_btree_cur *cur,
+ unsigned int new_numrecs);
};
/* btree geometry flags */
#define XFS_BTGEO_OVERLAPPING (1U << 0) /* overlapping intervals */
-
+#define XFS_BTGEO_IROOT_RECORDS (1U << 1) /* iroot can store records */
union xfs_btree_irec {
struct xfs_alloc_rec_incore a;
@@ -281,7 +297,7 @@ struct xfs_btree_cur
struct {
unsigned int nr_ops; /* # record updates */
unsigned int shape_changes; /* # of extent splits */
- } bc_refc; /* refcountbt */
+ } bc_refc; /* refcountbt/rtrefcountbt */
};
/* Must be at the end of the struct! */
@@ -687,4 +703,10 @@ xfs_btree_at_iroot(
level == cur->bc_nlevels - 1;
}
+int xfs_btree_alloc_metafile_block(struct xfs_btree_cur *cur,
+ const union xfs_btree_ptr *start, union xfs_btree_ptr *newp,
+ int *stat);
+int xfs_btree_free_metafile_block(struct xfs_btree_cur *cur,
+ struct xfs_buf *bp);
+
#endif /* __XFS_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_btree_mem.c b/fs/xfs/libxfs/xfs_btree_mem.c
index df3d613675a1..f2f7b4305413 100644
--- a/fs/xfs/libxfs/xfs_btree_mem.c
+++ b/fs/xfs/libxfs/xfs_btree_mem.c
@@ -18,6 +18,7 @@
#include "xfs_ag.h"
#include "xfs_buf_item.h"
#include "xfs_trace.h"
+#include "xfs_rtgroup.h"
/* Set the root of an in-memory btree. */
void
diff --git a/fs/xfs/libxfs/xfs_btree_staging.c b/fs/xfs/libxfs/xfs_btree_staging.c
index 694929703152..5ed84f9cc877 100644
--- a/fs/xfs/libxfs/xfs_btree_staging.c
+++ b/fs/xfs/libxfs/xfs_btree_staging.c
@@ -134,6 +134,7 @@ xfs_btree_stage_ifakeroot(
cur->bc_ino.ifake = ifake;
cur->bc_nlevels = ifake->if_levels;
cur->bc_ino.forksize = ifake->if_fork_size;
+ cur->bc_ino.whichfork = XFS_STAGING_FORK;
cur->bc_flags |= XFS_BTREE_STAGING;
}
@@ -573,6 +574,7 @@ xfs_btree_bload_compute_geometry(
struct xfs_btree_bload *bbl,
uint64_t nr_records)
{
+ const struct xfs_btree_ops *ops = cur->bc_ops;
uint64_t nr_blocks = 0;
uint64_t nr_this_level;
@@ -599,7 +601,7 @@ xfs_btree_bload_compute_geometry(
xfs_btree_bload_level_geometry(cur, bbl, level, nr_this_level,
&avg_per_block, &level_blocks, &dontcare64);
- if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) {
+ if (ops->type == XFS_BTREE_TYPE_INODE) {
/*
* If all the items we want to store at this level
* would fit in the inode root block, then we have our
@@ -607,7 +609,9 @@ xfs_btree_bload_compute_geometry(
*
* Note that bmap btrees forbid records in the root.
*/
- if (level != 0 && nr_this_level <= avg_per_block) {
+ if ((level != 0 ||
+ (ops->geom_flags & XFS_BTGEO_IROOT_RECORDS)) &&
+ nr_this_level <= avg_per_block) {
nr_blocks++;
break;
}
@@ -658,7 +662,7 @@ xfs_btree_bload_compute_geometry(
return -EOVERFLOW;
bbl->btree_height = cur->bc_nlevels;
- if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
+ if (ops->type == XFS_BTREE_TYPE_INODE)
bbl->nr_blocks = nr_blocks - 1;
else
bbl->nr_blocks = nr_blocks;
diff --git a/fs/xfs/libxfs/xfs_defer.h b/fs/xfs/libxfs/xfs_defer.h
index ec51b8465e61..9effd95ddcd4 100644
--- a/fs/xfs/libxfs/xfs_defer.h
+++ b/fs/xfs/libxfs/xfs_defer.h
@@ -68,7 +68,9 @@ struct xfs_defer_op_type {
extern const struct xfs_defer_op_type xfs_bmap_update_defer_type;
extern const struct xfs_defer_op_type xfs_refcount_update_defer_type;
+extern const struct xfs_defer_op_type xfs_rtrefcount_update_defer_type;
extern const struct xfs_defer_op_type xfs_rmap_update_defer_type;
+extern const struct xfs_defer_op_type xfs_rtrmap_update_defer_type;
extern const struct xfs_defer_op_type xfs_extent_free_defer_type;
extern const struct xfs_defer_op_type xfs_agfl_free_defer_type;
extern const struct xfs_defer_op_type xfs_rtextent_free_defer_type;
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index 202468223bf9..1775abcfa04d 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -197,7 +197,7 @@ xfs_da_unmount(
/*
* Return 1 if directory contains only "." and "..".
*/
-int
+static bool
xfs_dir_isempty(
xfs_inode_t *dp)
{
@@ -205,9 +205,9 @@ xfs_dir_isempty(
ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
if (dp->i_disk_size == 0) /* might happen during shutdown. */
- return 1;
+ return true;
if (dp->i_disk_size > xfs_inode_data_fork_size(dp))
- return 0;
+ return false;
sfp = dp->i_df.if_data;
return !sfp->count;
}
@@ -379,12 +379,11 @@ xfs_dir_cilookup_result(
!(args->op_flags & XFS_DA_OP_CILOOKUP))
return -EEXIST;
- args->value = kmalloc(len,
+ args->value = kmemdup(name, len,
GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_RETRY_MAYFAIL);
if (!args->value)
return -ENOMEM;
- memcpy(args->value, name, len);
args->valuelen = len;
return -EEXIST;
}
diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
index 576068ed81fa..a6594a5a941d 100644
--- a/fs/xfs/libxfs/xfs_dir2.h
+++ b/fs/xfs/libxfs/xfs_dir2.h
@@ -58,7 +58,6 @@ extern void xfs_dir_startup(void);
extern int xfs_da_mount(struct xfs_mount *mp);
extern void xfs_da_unmount(struct xfs_mount *mp);
-extern int xfs_dir_isempty(struct xfs_inode *dp);
extern int xfs_dir_init(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_inode *pdp);
extern int xfs_dir_createname(struct xfs_trans *tp, struct xfs_inode *dp,
diff --git a/fs/xfs/libxfs/xfs_errortag.h b/fs/xfs/libxfs/xfs_errortag.h
index 7002d7676a78..a53c5d40e084 100644
--- a/fs/xfs/libxfs/xfs_errortag.h
+++ b/fs/xfs/libxfs/xfs_errortag.h
@@ -64,7 +64,8 @@
#define XFS_ERRTAG_WB_DELAY_MS 42
#define XFS_ERRTAG_WRITE_DELAY_MS 43
#define XFS_ERRTAG_EXCHMAPS_FINISH_ONE 44
-#define XFS_ERRTAG_MAX 45
+#define XFS_ERRTAG_METAFILE_RESV_CRITICAL 45
+#define XFS_ERRTAG_MAX 46
/*
* Random factors for above tags, 1 means always, 2 means 1/2 time, etc.
@@ -113,5 +114,6 @@
#define XFS_RANDOM_WB_DELAY_MS 3000
#define XFS_RANDOM_WRITE_DELAY_MS 3000
#define XFS_RANDOM_EXCHMAPS_FINISH_ONE 1
+#define XFS_RANDOM_METAFILE_RESV_CRITICAL 4
#endif /* __XFS_ERRORTAG_H_ */
diff --git a/fs/xfs/libxfs/xfs_exchmaps.c b/fs/xfs/libxfs/xfs_exchmaps.c
index 2021396651de..3f1d6a98c118 100644
--- a/fs/xfs/libxfs/xfs_exchmaps.c
+++ b/fs/xfs/libxfs/xfs_exchmaps.c
@@ -662,7 +662,9 @@ xfs_exchmaps_rmapbt_blocks(
if (!xfs_has_rmapbt(mp))
return 0;
if (XFS_IS_REALTIME_INODE(req->ip1))
- return 0;
+ return howmany_64(req->nr_exchanges,
+ XFS_MAX_CONTIG_RTRMAPS_PER_BLOCK(mp)) *
+ XFS_RTRMAPADD_SPACE_RES(mp);
return howmany_64(req->nr_exchanges,
XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp)) *
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index 4d47a3e723aa..b1007fb661ba 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -857,6 +857,8 @@ enum xfs_metafile_type {
XFS_METAFILE_PRJQUOTA, /* project quota */
XFS_METAFILE_RTBITMAP, /* rt bitmap */
XFS_METAFILE_RTSUMMARY, /* rt summary */
+ XFS_METAFILE_RTRMAP, /* rt rmap */
+ XFS_METAFILE_RTREFCOUNT, /* rt refcount */
XFS_METAFILE_MAX
} __packed;
@@ -868,7 +870,9 @@ enum xfs_metafile_type {
{ XFS_METAFILE_GRPQUOTA, "grpquota" }, \
{ XFS_METAFILE_PRJQUOTA, "prjquota" }, \
{ XFS_METAFILE_RTBITMAP, "rtbitmap" }, \
- { XFS_METAFILE_RTSUMMARY, "rtsummary" }
+ { XFS_METAFILE_RTSUMMARY, "rtsummary" }, \
+ { XFS_METAFILE_RTRMAP, "rtrmap" }, \
+ { XFS_METAFILE_RTREFCOUNT, "rtrefcount" }
/*
* On-disk inode structure.
@@ -997,7 +1001,8 @@ enum xfs_dinode_fmt {
XFS_DINODE_FMT_LOCAL, /* bulk data */
XFS_DINODE_FMT_EXTENTS, /* struct xfs_bmbt_rec */
XFS_DINODE_FMT_BTREE, /* struct xfs_bmdr_block */
- XFS_DINODE_FMT_UUID /* added long ago, but never used */
+ XFS_DINODE_FMT_UUID, /* added long ago, but never used */
+ XFS_DINODE_FMT_META_BTREE, /* metadata btree */
};
#define XFS_INODE_FORMAT_STR \
@@ -1005,7 +1010,8 @@ enum xfs_dinode_fmt {
{ XFS_DINODE_FMT_LOCAL, "local" }, \
{ XFS_DINODE_FMT_EXTENTS, "extent" }, \
{ XFS_DINODE_FMT_BTREE, "btree" }, \
- { XFS_DINODE_FMT_UUID, "uuid" }
+ { XFS_DINODE_FMT_UUID, "uuid" }, \
+ { XFS_DINODE_FMT_META_BTREE, "meta_btree" }
/*
* Max values for extnum and aextnum.
@@ -1726,6 +1732,24 @@ typedef __be32 xfs_rmap_ptr_t;
XFS_IBT_BLOCK(mp) + 1)
/*
+ * Realtime Reverse mapping btree format definitions
+ *
+ * This is a btree for reverse mapping records for realtime volumes
+ */
+#define XFS_RTRMAP_CRC_MAGIC 0x4d415052 /* 'MAPR' */
+
+/*
+ * rtrmap root header, on-disk form only.
+ */
+struct xfs_rtrmap_root {
+ __be16 bb_level; /* 0 is a leaf */
+ __be16 bb_numrecs; /* current # of data records */
+};
+
+/* inode-based btree pointer type */
+typedef __be64 xfs_rtrmap_ptr_t;
+
+/*
* Reference Count Btree format definitions
*
*/
@@ -1768,12 +1792,29 @@ struct xfs_refcount_key {
__be32 rc_startblock; /* starting block number */
};
-#define MAXREFCOUNT ((xfs_nlink_t)~0U)
-#define MAXREFCEXTLEN ((xfs_extlen_t)~0U)
+#define XFS_REFC_REFCOUNT_MAX ((xfs_nlink_t)~0U)
+#define XFS_REFC_LEN_MAX ((xfs_extlen_t)~0U)
/* btree pointer type */
typedef __be32 xfs_refcount_ptr_t;
+/*
+ * Realtime Reference Count btree format definitions
+ *
+ * This is a btree for reference count records for realtime volumes
+ */
+#define XFS_RTREFC_CRC_MAGIC 0x52434e54 /* 'RCNT' */
+
+/*
+ * rt refcount root header, on-disk form only.
+ */
+struct xfs_rtrefcount_root {
+ __be16 bb_level; /* 0 is a leaf */
+ __be16 bb_numrecs; /* current # of data records */
+};
+
+/* inode-rooted btree pointer type */
+typedef __be64 xfs_rtrefcount_ptr_t;
/*
* BMAP Btree format definitions
diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h
index 41ce4d3d650e..2c3171262b44 100644
--- a/fs/xfs/libxfs/xfs_fs.h
+++ b/fs/xfs/libxfs/xfs_fs.h
@@ -737,9 +737,11 @@ struct xfs_scrub_metadata {
#define XFS_SCRUB_TYPE_DIRTREE 28 /* directory tree structure */
#define XFS_SCRUB_TYPE_METAPATH 29 /* metadata directory tree paths */
#define XFS_SCRUB_TYPE_RGSUPER 30 /* realtime superblock */
+#define XFS_SCRUB_TYPE_RTRMAPBT 31 /* rtgroup reverse mapping btree */
+#define XFS_SCRUB_TYPE_RTREFCBT 32 /* realtime reference count btree */
/* Number of scrub subcommands. */
-#define XFS_SCRUB_TYPE_NR 31
+#define XFS_SCRUB_TYPE_NR 33
/*
* This special type code only applies to the vectored scrub implementation.
@@ -829,9 +831,11 @@ struct xfs_scrub_vec_head {
#define XFS_SCRUB_METAPATH_USRQUOTA (5) /* user quota */
#define XFS_SCRUB_METAPATH_GRPQUOTA (6) /* group quota */
#define XFS_SCRUB_METAPATH_PRJQUOTA (7) /* project quota */
+#define XFS_SCRUB_METAPATH_RTRMAPBT (8) /* realtime reverse mapping */
+#define XFS_SCRUB_METAPATH_RTREFCOUNTBT (9) /* realtime refcount */
/* Number of metapath sm_ino values */
-#define XFS_SCRUB_METAPATH_NR (8)
+#define XFS_SCRUB_METAPATH_NR (10)
/*
* ioctl limits
@@ -993,6 +997,8 @@ struct xfs_rtgroup_geometry {
#define XFS_RTGROUP_GEOM_SICK_SUPER (1U << 0) /* superblock */
#define XFS_RTGROUP_GEOM_SICK_BITMAP (1U << 1) /* rtbitmap */
#define XFS_RTGROUP_GEOM_SICK_SUMMARY (1U << 2) /* rtsummary */
+#define XFS_RTGROUP_GEOM_SICK_RMAPBT (1U << 3) /* reverse mappings */
+#define XFS_RTGROUP_GEOM_SICK_REFCNTBT (1U << 4) /* reference counts */
/*
* ioctl commands that are used by Linux filesystems
diff --git a/fs/xfs/libxfs/xfs_health.h b/fs/xfs/libxfs/xfs_health.h
index d34986ac18c3..b31000f7190c 100644
--- a/fs/xfs/libxfs/xfs_health.h
+++ b/fs/xfs/libxfs/xfs_health.h
@@ -70,6 +70,8 @@ struct xfs_rtgroup;
#define XFS_SICK_RG_SUPER (1 << 0) /* rt group superblock */
#define XFS_SICK_RG_BITMAP (1 << 1) /* rt group bitmap */
#define XFS_SICK_RG_SUMMARY (1 << 2) /* rt groups summary */
+#define XFS_SICK_RG_RMAPBT (1 << 3) /* reverse mappings */
+#define XFS_SICK_RG_REFCNTBT (1 << 4) /* reference counts */
/* Observable health issues for AG metadata. */
#define XFS_SICK_AG_SB (1 << 0) /* superblock */
@@ -115,7 +117,9 @@ struct xfs_rtgroup;
#define XFS_SICK_RG_PRIMARY (XFS_SICK_RG_SUPER | \
XFS_SICK_RG_BITMAP | \
- XFS_SICK_RG_SUMMARY)
+ XFS_SICK_RG_SUMMARY | \
+ XFS_SICK_RG_RMAPBT | \
+ XFS_SICK_RG_REFCNTBT)
#define XFS_SICK_AG_PRIMARY (XFS_SICK_AG_SB | \
XFS_SICK_AG_AGF | \
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 424861fbf1bd..f24fa628fecf 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -441,6 +441,30 @@ xfs_dinode_verify_fork(
if (di_nextents > max_extents)
return __this_address;
break;
+ case XFS_DINODE_FMT_META_BTREE:
+ if (!xfs_has_metadir(mp))
+ return __this_address;
+ if (!(dip->di_flags2 & cpu_to_be64(XFS_DIFLAG2_METADATA)))
+ return __this_address;
+ switch (be16_to_cpu(dip->di_metatype)) {
+ case XFS_METAFILE_RTRMAP:
+ /*
+ * growfs must create the rtrmap inodes before adding a
+ * realtime volume to the filesystem, so we cannot use
+ * the rtrmapbt predicate here.
+ */
+ if (!xfs_has_rmapbt(mp))
+ return __this_address;
+ break;
+ case XFS_METAFILE_RTREFCOUNT:
+ /* same comment about growfs and rmap inodes applies */
+ if (!xfs_has_reflink(mp))
+ return __this_address;
+ break;
+ default:
+ return __this_address;
+ }
+ break;
default:
return __this_address;
}
@@ -460,6 +484,10 @@ xfs_dinode_verify_forkoff(
if (dip->di_forkoff != (roundup(sizeof(xfs_dev_t), 8) >> 3))
return __this_address;
break;
+ case XFS_DINODE_FMT_META_BTREE:
+ if (!xfs_has_metadir(mp) || !xfs_has_parent(mp))
+ return __this_address;
+ fallthrough;
case XFS_DINODE_FMT_LOCAL: /* fall through ... */
case XFS_DINODE_FMT_EXTENTS: /* fall through ... */
case XFS_DINODE_FMT_BTREE:
@@ -637,9 +665,6 @@ xfs_dinode_verify(
if (mode && nextents + naextents > nblocks)
return __this_address;
- if (nextents + naextents == 0 && nblocks != 0)
- return __this_address;
-
if (S_ISDIR(mode) && nextents > mp->m_dir_geo->max_extents)
return __this_address;
@@ -723,7 +748,8 @@ xfs_dinode_verify(
return __this_address;
/* don't let reflink and realtime mix */
- if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME))
+ if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME) &&
+ !xfs_has_rtreflink(mp))
return __this_address;
/* COW extent size hint validation */
@@ -743,6 +769,12 @@ xfs_dinode_verify(
return fa;
}
+ /* metadata inodes containing btrees always have zero extent count */
+ if (XFS_DFORK_FORMAT(dip, XFS_DATA_FORK) != XFS_DINODE_FMT_META_BTREE) {
+ if (nextents + naextents == 0 && nblocks != 0)
+ return __this_address;
+ }
+
return NULL;
}
@@ -878,11 +910,29 @@ xfs_inode_validate_cowextsize(
bool rt_flag;
bool hint_flag;
uint32_t cowextsize_bytes;
+ uint32_t blocksize_bytes;
rt_flag = (flags & XFS_DIFLAG_REALTIME);
hint_flag = (flags2 & XFS_DIFLAG2_COWEXTSIZE);
cowextsize_bytes = XFS_FSB_TO_B(mp, cowextsize);
+ /*
+ * Similar to extent size hints, a directory can be configured to
+ * propagate realtime status and a CoW extent size hint to newly
+ * created files even if there is no realtime device, and the hints on
+ * disk can become misaligned if the sysadmin changes the rt extent
+ * size while adding the realtime device.
+ *
+ * Therefore, we can only enforce the rextsize alignment check against
+ * regular realtime files, and rely on callers to decide when alignment
+ * checks are appropriate, and fix things up as needed.
+ */
+
+ if (rt_flag)
+ blocksize_bytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
+ else
+ blocksize_bytes = mp->m_sb.sb_blocksize;
+
if (hint_flag && !xfs_has_reflink(mp))
return __this_address;
@@ -896,16 +946,13 @@ xfs_inode_validate_cowextsize(
if (mode && !hint_flag && cowextsize != 0)
return __this_address;
- if (hint_flag && rt_flag)
- return __this_address;
-
- if (cowextsize_bytes % mp->m_sb.sb_blocksize)
+ if (cowextsize_bytes % blocksize_bytes)
return __this_address;
if (cowextsize > XFS_MAX_BMBT_EXTLEN)
return __this_address;
- if (cowextsize > mp->m_sb.sb_agblocks / 2)
+ if (!rt_flag && cowextsize > mp->m_sb.sb_agblocks / 2)
return __this_address;
return NULL;
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 1158ca48626b..4f99b90add55 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -27,6 +27,8 @@
#include "xfs_errortag.h"
#include "xfs_health.h"
#include "xfs_symlink_remote.h"
+#include "xfs_rtrmap_btree.h"
+#include "xfs_rtrefcount_btree.h"
struct kmem_cache *xfs_ifork_cache;
@@ -178,7 +180,7 @@ xfs_iformat_btree(
struct xfs_mount *mp = ip->i_mount;
xfs_bmdr_block_t *dfp;
struct xfs_ifork *ifp;
- /* REFERENCED */
+ struct xfs_btree_block *broot;
int nrecs;
int size;
int level;
@@ -211,16 +213,13 @@ xfs_iformat_btree(
return -EFSCORRUPTED;
}
- ifp->if_broot_bytes = size;
- ifp->if_broot = kmalloc(size,
- GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
- ASSERT(ifp->if_broot != NULL);
+ broot = xfs_broot_alloc(ifp, size);
/*
* Copy and convert from the on-disk structure
* to the in-memory structure.
*/
xfs_bmdr_to_bmbt(ip, dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork),
- ifp->if_broot, size);
+ broot, size);
ifp->if_bytes = 0;
ifp->if_data = NULL;
@@ -270,6 +269,16 @@ xfs_iformat_data_fork(
return xfs_iformat_extents(ip, dip, XFS_DATA_FORK);
case XFS_DINODE_FMT_BTREE:
return xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
+ case XFS_DINODE_FMT_META_BTREE:
+ switch (ip->i_metatype) {
+ case XFS_METAFILE_RTRMAP:
+ return xfs_iformat_rtrmap(ip, dip);
+ case XFS_METAFILE_RTREFCOUNT:
+ return xfs_iformat_rtrefcount(ip, dip);
+ default:
+ break;
+ }
+ fallthrough;
default:
xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
dip, sizeof(*dip), __this_address);
@@ -363,135 +372,68 @@ xfs_iformat_attr_fork(
}
/*
- * Reallocate the space for if_broot based on the number of records
- * being added or deleted as indicated in rec_diff. Move the records
- * and pointers in if_broot to fit the new size. When shrinking this
- * will eliminate holes between the records and pointers created by
- * the caller. When growing this will create holes to be filled in
- * by the caller.
- *
- * The caller must not request to add more records than would fit in
- * the on-disk inode root. If the if_broot is currently NULL, then
- * if we are adding records, one will be allocated. The caller must also
- * not request that the number of records go below zero, although
- * it can go to zero.
- *
- * ip -- the inode whose if_broot area is changing
- * ext_diff -- the change in the number of records, positive or negative,
- * requested for the if_broot array.
+ * Allocate the if_broot component of an inode fork so that it is @new_size
+ * bytes in size, using __GFP_NOLOCKDEP like all the other code that
+ * initializes a broot during inode load. Returns if_broot.
*/
-void
-xfs_iroot_realloc(
- xfs_inode_t *ip,
- int rec_diff,
- int whichfork)
+struct xfs_btree_block *
+xfs_broot_alloc(
+ struct xfs_ifork *ifp,
+ size_t new_size)
{
- struct xfs_mount *mp = ip->i_mount;
- int cur_max;
- struct xfs_ifork *ifp;
- struct xfs_btree_block *new_broot;
- int new_max;
- size_t new_size;
- char *np;
- char *op;
+ ASSERT(ifp->if_broot == NULL);
- /*
- * Handle the degenerate case quietly.
- */
- if (rec_diff == 0) {
- return;
- }
+ ifp->if_broot = kmalloc(new_size,
+ GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
+ ifp->if_broot_bytes = new_size;
+ return ifp->if_broot;
+}
- ifp = xfs_ifork_ptr(ip, whichfork);
- if (rec_diff > 0) {
- /*
- * If there wasn't any memory allocated before, just
- * allocate it now and get out.
- */
- if (ifp->if_broot_bytes == 0) {
- new_size = xfs_bmap_broot_space_calc(mp, rec_diff);
- ifp->if_broot = kmalloc(new_size,
- GFP_KERNEL | __GFP_NOFAIL);
- ifp->if_broot_bytes = (int)new_size;
- return;
- }
+/*
+ * Reallocate the if_broot component of an inode fork so that it is @new_size
+ * bytes in size. Returns if_broot.
+ */
+struct xfs_btree_block *
+xfs_broot_realloc(
+ struct xfs_ifork *ifp,
+ size_t new_size)
+{
+ /* No size change? No action needed. */
+ if (new_size == ifp->if_broot_bytes)
+ return ifp->if_broot;
- /*
- * If there is already an existing if_broot, then we need
- * to realloc() it and shift the pointers to their new
- * location. The records don't change location because
- * they are kept butted up against the btree block header.
- */
- cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, false);
- new_max = cur_max + rec_diff;
- new_size = xfs_bmap_broot_space_calc(mp, new_max);
- ifp->if_broot = krealloc(ifp->if_broot, new_size,
- GFP_KERNEL | __GFP_NOFAIL);
- op = (char *)xfs_bmap_broot_ptr_addr(mp, ifp->if_broot, 1,
- ifp->if_broot_bytes);
- np = (char *)xfs_bmap_broot_ptr_addr(mp, ifp->if_broot, 1,
- (int)new_size);
- ifp->if_broot_bytes = (int)new_size;
- ASSERT(xfs_bmap_bmdr_space(ifp->if_broot) <=
- xfs_inode_fork_size(ip, whichfork));
- memmove(np, op, cur_max * (uint)sizeof(xfs_fsblock_t));
- return;
+ /* New size is zero, free it. */
+ if (new_size == 0) {
+ ifp->if_broot_bytes = 0;
+ kfree(ifp->if_broot);
+ ifp->if_broot = NULL;
+ return NULL;
}
/*
- * rec_diff is less than 0. In this case, we are shrinking the
- * if_broot buffer. It must already exist. If we go to zero
- * records, just get rid of the root and clear the status bit.
+ * Shrinking the iroot means we allocate a new smaller object and copy
+ * it. We don't trust krealloc not to nop on realloc-down.
*/
- ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0));
- cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, false);
- new_max = cur_max + rec_diff;
- ASSERT(new_max >= 0);
- if (new_max > 0)
- new_size = xfs_bmap_broot_space_calc(mp, new_max);
- else
- new_size = 0;
- if (new_size > 0) {
- new_broot = kmalloc(new_size, GFP_KERNEL | __GFP_NOFAIL);
- /*
- * First copy over the btree block header.
- */
- memcpy(new_broot, ifp->if_broot,
- xfs_bmbt_block_len(ip->i_mount));
- } else {
- new_broot = NULL;
+ if (ifp->if_broot_bytes > 0 && ifp->if_broot_bytes > new_size) {
+ struct xfs_btree_block *old_broot = ifp->if_broot;
+
+ ifp->if_broot = kmalloc(new_size, GFP_KERNEL | __GFP_NOFAIL);
+ ifp->if_broot_bytes = new_size;
+ memcpy(ifp->if_broot, old_broot, new_size);
+ kfree(old_broot);
+ return ifp->if_broot;
}
/*
- * Only copy the keys and pointers if there are any.
+ * Growing the iroot means we can krealloc. This may get us the same
+ * object.
*/
- if (new_max > 0) {
- /*
- * First copy the keys.
- */
- op = (char *)xfs_bmbt_key_addr(mp, ifp->if_broot, 1);
- np = (char *)xfs_bmbt_key_addr(mp, new_broot, 1);
- memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_key_t));
-
- /*
- * Then copy the pointers.
- */
- op = (char *)xfs_bmap_broot_ptr_addr(mp, ifp->if_broot, 1,
- ifp->if_broot_bytes);
- np = (char *)xfs_bmap_broot_ptr_addr(mp, new_broot, 1,
- (int)new_size);
- memcpy(np, op, new_max * (uint)sizeof(xfs_fsblock_t));
- }
- kfree(ifp->if_broot);
- ifp->if_broot = new_broot;
- ifp->if_broot_bytes = (int)new_size;
- if (ifp->if_broot)
- ASSERT(xfs_bmap_bmdr_space(ifp->if_broot) <=
- xfs_inode_fork_size(ip, whichfork));
- return;
+ ifp->if_broot = krealloc(ifp->if_broot, new_size,
+ GFP_KERNEL | __GFP_NOFAIL);
+ ifp->if_broot_bytes = new_size;
+ return ifp->if_broot;
}
-
/*
* This is called when the amount of space needed for if_data
* is increased or decreased. The change in size is indicated by
@@ -671,6 +613,25 @@ xfs_iflush_fork(
}
break;
+ case XFS_DINODE_FMT_META_BTREE:
+ ASSERT(whichfork == XFS_DATA_FORK);
+
+ if (!(iip->ili_fields & brootflag[whichfork]))
+ break;
+
+ switch (ip->i_metatype) {
+ case XFS_METAFILE_RTRMAP:
+ xfs_iflush_rtrmap(ip, dip);
+ break;
+ case XFS_METAFILE_RTREFCOUNT:
+ xfs_iflush_rtrefcount(ip, dip);
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ break;
+
default:
ASSERT(0);
break;
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h
index 2373d12fd474..69ed0919d60b 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.h
+++ b/fs/xfs/libxfs/xfs_inode_fork.h
@@ -170,7 +170,11 @@ void xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *,
void xfs_idestroy_fork(struct xfs_ifork *ifp);
void * xfs_idata_realloc(struct xfs_inode *ip, int64_t byte_diff,
int whichfork);
-void xfs_iroot_realloc(struct xfs_inode *, int, int);
+struct xfs_btree_block *xfs_broot_alloc(struct xfs_ifork *ifp,
+ size_t new_size);
+struct xfs_btree_block *xfs_broot_realloc(struct xfs_ifork *ifp,
+ size_t new_size);
+
int xfs_iread_extents(struct xfs_trans *, struct xfs_inode *, int);
int xfs_iextents_copy(struct xfs_inode *, struct xfs_bmbt_rec *,
int);
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index 15dec19b6c32..a472ac2e45d0 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -250,6 +250,10 @@ typedef struct xfs_trans_header {
#define XFS_LI_XMD 0x1249 /* mapping exchange done */
#define XFS_LI_EFI_RT 0x124a /* realtime extent free intent */
#define XFS_LI_EFD_RT 0x124b /* realtime extent free done */
+#define XFS_LI_RUI_RT 0x124c /* realtime rmap update intent */
+#define XFS_LI_RUD_RT 0x124d /* realtime rmap update done */
+#define XFS_LI_CUI_RT 0x124e /* realtime refcount update intent */
+#define XFS_LI_CUD_RT 0x124f /* realtime refcount update done */
#define XFS_LI_TYPE_DESC \
{ XFS_LI_EFI, "XFS_LI_EFI" }, \
@@ -271,7 +275,11 @@ typedef struct xfs_trans_header {
{ XFS_LI_XMI, "XFS_LI_XMI" }, \
{ XFS_LI_XMD, "XFS_LI_XMD" }, \
{ XFS_LI_EFI_RT, "XFS_LI_EFI_RT" }, \
- { XFS_LI_EFD_RT, "XFS_LI_EFD_RT" }
+ { XFS_LI_EFD_RT, "XFS_LI_EFD_RT" }, \
+ { XFS_LI_RUI_RT, "XFS_LI_RUI_RT" }, \
+ { XFS_LI_RUD_RT, "XFS_LI_RUD_RT" }, \
+ { XFS_LI_CUI_RT, "XFS_LI_CUI_RT" }, \
+ { XFS_LI_CUD_RT, "XFS_LI_CUD_RT" }
/*
* Inode Log Item Format definitions.
@@ -351,12 +359,6 @@ struct xfs_inode_log_format_32 {
*/
#define XFS_ILOG_IVERSION 0x8000
-#define XFS_ILOG_NONCORE (XFS_ILOG_DDATA | XFS_ILOG_DEXT | \
- XFS_ILOG_DBROOT | XFS_ILOG_DEV | \
- XFS_ILOG_ADATA | XFS_ILOG_AEXT | \
- XFS_ILOG_ABROOT | XFS_ILOG_DOWNER | \
- XFS_ILOG_AOWNER)
-
#define XFS_ILOG_DFORK (XFS_ILOG_DDATA | XFS_ILOG_DEXT | \
XFS_ILOG_DBROOT)
diff --git a/fs/xfs/libxfs/xfs_log_recover.h b/fs/xfs/libxfs/xfs_log_recover.h
index 5397a8ff004d..66c7916fb5cd 100644
--- a/fs/xfs/libxfs/xfs_log_recover.h
+++ b/fs/xfs/libxfs/xfs_log_recover.h
@@ -79,6 +79,10 @@ extern const struct xlog_recover_item_ops xlog_xmi_item_ops;
extern const struct xlog_recover_item_ops xlog_xmd_item_ops;
extern const struct xlog_recover_item_ops xlog_rtefi_item_ops;
extern const struct xlog_recover_item_ops xlog_rtefd_item_ops;
+extern const struct xlog_recover_item_ops xlog_rtrui_item_ops;
+extern const struct xlog_recover_item_ops xlog_rtrud_item_ops;
+extern const struct xlog_recover_item_ops xlog_rtcui_item_ops;
+extern const struct xlog_recover_item_ops xlog_rtcud_item_ops;
/*
* Macros, structures, prototypes for internal log manager use.
diff --git a/fs/xfs/libxfs/xfs_metadir.c b/fs/xfs/libxfs/xfs_metadir.c
index bae7377c0f22..178e89711cb7 100644
--- a/fs/xfs/libxfs/xfs_metadir.c
+++ b/fs/xfs/libxfs/xfs_metadir.c
@@ -29,6 +29,10 @@
#include "xfs_dir2_priv.h"
#include "xfs_parent.h"
#include "xfs_health.h"
+#include "xfs_errortag.h"
+#include "xfs_error.h"
+#include "xfs_btree.h"
+#include "xfs_alloc.h"
/*
* Metadata Directory Tree
diff --git a/fs/xfs/libxfs/xfs_metafile.c b/fs/xfs/libxfs/xfs_metafile.c
index adeb25d1a444..2f5f554a36d4 100644
--- a/fs/xfs/libxfs/xfs_metafile.c
+++ b/fs/xfs/libxfs/xfs_metafile.c
@@ -17,6 +17,28 @@
#include "xfs_metafile.h"
#include "xfs_trace.h"
#include "xfs_inode.h"
+#include "xfs_quota.h"
+#include "xfs_errortag.h"
+#include "xfs_error.h"
+#include "xfs_alloc.h"
+
+static const struct {
+ enum xfs_metafile_type mtype;
+ const char *name;
+} xfs_metafile_type_strs[] = { XFS_METAFILE_TYPE_STR };
+
+const char *
+xfs_metafile_type_str(enum xfs_metafile_type metatype)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xfs_metafile_type_strs); i++) {
+ if (xfs_metafile_type_strs[i].mtype == metatype)
+ return xfs_metafile_type_strs[i].name;
+ }
+
+ return NULL;
+}
/* Set up an inode to be recognized as a metadata directory inode. */
void
@@ -50,3 +72,204 @@ xfs_metafile_clear_iflag(
ip->i_diflags2 &= ~XFS_DIFLAG2_METADATA;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
}
+
+/*
+ * Is the amount of space that could be allocated towards a given metadata
+ * file at or beneath a certain threshold?
+ */
+static inline bool
+xfs_metafile_resv_can_cover(
+ struct xfs_inode *ip,
+ int64_t rhs)
+{
+ /*
+ * The amount of space that can be allocated to this metadata file is
+ * the remaining reservation for the particular metadata file + the
+ * global free block count. Take care of the first case to avoid
+ * touching the per-cpu counter.
+ */
+ if (ip->i_delayed_blks >= rhs)
+ return true;
+
+ /*
+ * There aren't enough blocks left in the inode's reservation, but it
+ * isn't critical unless there also isn't enough free space.
+ */
+ return __percpu_counter_compare(&ip->i_mount->m_fdblocks,
+ rhs - ip->i_delayed_blks, 2048) >= 0;
+}
+
+/*
+ * Is this metadata file critically low on blocks? For now we'll define that
+ * as the number of blocks we can get our hands on being less than 10% of what
+ * we reserved or less than some arbitrary number (maximum btree height).
+ */
+bool
+xfs_metafile_resv_critical(
+ struct xfs_inode *ip)
+{
+ uint64_t asked_low_water;
+
+ if (!ip)
+ return false;
+
+ ASSERT(xfs_is_metadir_inode(ip));
+ trace_xfs_metafile_resv_critical(ip, 0);
+
+ if (!xfs_metafile_resv_can_cover(ip, ip->i_mount->m_rtbtree_maxlevels))
+ return true;
+
+ asked_low_water = div_u64(ip->i_meta_resv_asked, 10);
+ if (!xfs_metafile_resv_can_cover(ip, asked_low_water))
+ return true;
+
+ return XFS_TEST_ERROR(false, ip->i_mount,
+ XFS_ERRTAG_METAFILE_RESV_CRITICAL);
+}
+
+/* Allocate a block from the metadata file's reservation. */
+void
+xfs_metafile_resv_alloc_space(
+ struct xfs_inode *ip,
+ struct xfs_alloc_arg *args)
+{
+ int64_t len = args->len;
+
+ ASSERT(xfs_is_metadir_inode(ip));
+ ASSERT(args->resv == XFS_AG_RESV_METAFILE);
+
+ trace_xfs_metafile_resv_alloc_space(ip, args->len);
+
+ /*
+ * Allocate the blocks from the metadata inode's block reservation
+ * and update the ondisk sb counter.
+ */
+ if (ip->i_delayed_blks > 0) {
+ int64_t from_resv;
+
+ from_resv = min_t(int64_t, len, ip->i_delayed_blks);
+ ip->i_delayed_blks -= from_resv;
+ xfs_mod_delalloc(ip, 0, -from_resv);
+ xfs_trans_mod_sb(args->tp, XFS_TRANS_SB_RES_FDBLOCKS,
+ -from_resv);
+ len -= from_resv;
+ }
+
+ /*
+ * Any allocation in excess of the reservation requires in-core and
+ * on-disk fdblocks updates. If we can grab @len blocks from the
+ * in-core fdblocks then all we need to do is update the on-disk
+ * superblock; if not, then try to steal some from the transaction's
+ * block reservation. Overruns are only expected for rmap btrees.
+ */
+ if (len) {
+ unsigned int field;
+ int error;
+
+ error = xfs_dec_fdblocks(ip->i_mount, len, true);
+ if (error)
+ field = XFS_TRANS_SB_FDBLOCKS;
+ else
+ field = XFS_TRANS_SB_RES_FDBLOCKS;
+
+ xfs_trans_mod_sb(args->tp, field, -len);
+ }
+
+ ip->i_nblocks += args->len;
+ xfs_trans_log_inode(args->tp, ip, XFS_ILOG_CORE);
+}
+
+/* Free a block to the metadata file's reservation. */
+void
+xfs_metafile_resv_free_space(
+ struct xfs_inode *ip,
+ struct xfs_trans *tp,
+ xfs_filblks_t len)
+{
+ int64_t to_resv;
+
+ ASSERT(xfs_is_metadir_inode(ip));
+ trace_xfs_metafile_resv_free_space(ip, len);
+
+ ip->i_nblocks -= len;
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+ /*
+ * Add the freed blocks back into the inode's delalloc reservation
+ * until it reaches the maximum size. Update the ondisk fdblocks only.
+ */
+ to_resv = ip->i_meta_resv_asked - (ip->i_nblocks + ip->i_delayed_blks);
+ if (to_resv > 0) {
+ to_resv = min_t(int64_t, to_resv, len);
+ ip->i_delayed_blks += to_resv;
+ xfs_mod_delalloc(ip, 0, to_resv);
+ xfs_trans_mod_sb(tp, XFS_TRANS_SB_RES_FDBLOCKS, to_resv);
+ len -= to_resv;
+ }
+
+ /*
+ * Everything else goes back to the filesystem, so update the in-core
+ * and on-disk counters.
+ */
+ if (len)
+ xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, len);
+}
+
+/* Release a metadata file's space reservation. */
+void
+xfs_metafile_resv_free(
+ struct xfs_inode *ip)
+{
+ /* Non-btree metadata inodes don't need space reservations. */
+ if (!ip || !ip->i_meta_resv_asked)
+ return;
+
+ ASSERT(xfs_is_metadir_inode(ip));
+ trace_xfs_metafile_resv_free(ip, 0);
+
+ if (ip->i_delayed_blks) {
+ xfs_mod_delalloc(ip, 0, -ip->i_delayed_blks);
+ xfs_add_fdblocks(ip->i_mount, ip->i_delayed_blks);
+ ip->i_delayed_blks = 0;
+ }
+ ip->i_meta_resv_asked = 0;
+}
+
+/* Set up a metadata file's space reservation. */
+int
+xfs_metafile_resv_init(
+ struct xfs_inode *ip,
+ xfs_filblks_t ask)
+{
+ xfs_filblks_t hidden_space;
+ xfs_filblks_t used;
+ int error;
+
+ if (!ip || ip->i_meta_resv_asked > 0)
+ return 0;
+
+ ASSERT(xfs_is_metadir_inode(ip));
+
+ /*
+ * Space taken by all other metadata btrees are accounted on-disk as
+ * used space. We therefore only hide the space that is reserved but
+ * not used by the trees.
+ */
+ used = ip->i_nblocks;
+ if (used > ask)
+ ask = used;
+ hidden_space = ask - used;
+
+ error = xfs_dec_fdblocks(ip->i_mount, hidden_space, true);
+ if (error) {
+ trace_xfs_metafile_resv_init_error(ip, error, _RET_IP_);
+ return error;
+ }
+
+ xfs_mod_delalloc(ip, 0, hidden_space);
+ ip->i_delayed_blks = hidden_space;
+ ip->i_meta_resv_asked = ask;
+
+ trace_xfs_metafile_resv_init(ip, ask);
+ return 0;
+}
diff --git a/fs/xfs/libxfs/xfs_metafile.h b/fs/xfs/libxfs/xfs_metafile.h
index acec400123db..95af4b52e5a7 100644
--- a/fs/xfs/libxfs/xfs_metafile.h
+++ b/fs/xfs/libxfs/xfs_metafile.h
@@ -6,6 +6,8 @@
#ifndef __XFS_METAFILE_H__
#define __XFS_METAFILE_H__
+const char *xfs_metafile_type_str(enum xfs_metafile_type metatype);
+
/* All metadata files must have these flags set. */
#define XFS_METAFILE_DIFLAGS (XFS_DIFLAG_IMMUTABLE | \
XFS_DIFLAG_SYNC | \
@@ -21,6 +23,17 @@ void xfs_metafile_set_iflag(struct xfs_trans *tp, struct xfs_inode *ip,
enum xfs_metafile_type metafile_type);
void xfs_metafile_clear_iflag(struct xfs_trans *tp, struct xfs_inode *ip);
+/* Space reservations for metadata inodes. */
+struct xfs_alloc_arg;
+
+bool xfs_metafile_resv_critical(struct xfs_inode *ip);
+void xfs_metafile_resv_alloc_space(struct xfs_inode *ip,
+ struct xfs_alloc_arg *args);
+void xfs_metafile_resv_free_space(struct xfs_inode *ip, struct xfs_trans *tp,
+ xfs_filblks_t len);
+void xfs_metafile_resv_free(struct xfs_inode *ip);
+int xfs_metafile_resv_init(struct xfs_inode *ip, xfs_filblks_t ask);
+
/* Code specific to kernel/userspace; must be provided externally. */
int xfs_trans_metafile_iget(struct xfs_trans *tp, xfs_ino_t ino,
diff --git a/fs/xfs/libxfs/xfs_ondisk.h b/fs/xfs/libxfs/xfs_ondisk.h
index ad0dedf00f18..a85ecddaa48e 100644
--- a/fs/xfs/libxfs/xfs_ondisk.h
+++ b/fs/xfs/libxfs/xfs_ondisk.h
@@ -83,6 +83,10 @@ xfs_check_ondisk_structs(void)
XFS_CHECK_STRUCT_SIZE(union xfs_rtword_raw, 4);
XFS_CHECK_STRUCT_SIZE(union xfs_suminfo_raw, 4);
XFS_CHECK_STRUCT_SIZE(struct xfs_rtbuf_blkinfo, 48);
+ XFS_CHECK_STRUCT_SIZE(xfs_rtrmap_ptr_t, 8);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_rtrmap_root, 4);
+ XFS_CHECK_STRUCT_SIZE(xfs_rtrefcount_ptr_t, 8);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_rtrefcount_root, 4);
/*
* m68k has problems with struct xfs_attr_leaf_name_remote, but we pad
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index 2dbab68b4fe6..cebe83f7842a 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -25,6 +25,9 @@
#include "xfs_ag.h"
#include "xfs_health.h"
#include "xfs_refcount_item.h"
+#include "xfs_rtgroup.h"
+#include "xfs_rtalloc.h"
+#include "xfs_rtrefcount_btree.h"
struct kmem_cache *xfs_refcount_intent_cache;
@@ -128,7 +131,7 @@ xfs_refcount_check_irec(
struct xfs_perag *pag,
const struct xfs_refcount_irec *irec)
{
- if (irec->rc_blockcount == 0 || irec->rc_blockcount > MAXREFCEXTLEN)
+ if (irec->rc_blockcount == 0 || irec->rc_blockcount > XFS_REFC_LEN_MAX)
return __this_address;
if (!xfs_refcount_check_domain(irec))
@@ -138,12 +141,43 @@ xfs_refcount_check_irec(
if (!xfs_verify_agbext(pag, irec->rc_startblock, irec->rc_blockcount))
return __this_address;
- if (irec->rc_refcount == 0 || irec->rc_refcount > MAXREFCOUNT)
+ if (irec->rc_refcount == 0 || irec->rc_refcount > XFS_REFC_REFCOUNT_MAX)
return __this_address;
return NULL;
}
+xfs_failaddr_t
+xfs_rtrefcount_check_irec(
+ struct xfs_rtgroup *rtg,
+ const struct xfs_refcount_irec *irec)
+{
+ if (irec->rc_blockcount == 0 || irec->rc_blockcount > XFS_REFC_LEN_MAX)
+ return __this_address;
+
+ if (!xfs_refcount_check_domain(irec))
+ return __this_address;
+
+ /* check for valid extent range, including overflow */
+ if (!xfs_verify_rgbext(rtg, irec->rc_startblock, irec->rc_blockcount))
+ return __this_address;
+
+ if (irec->rc_refcount == 0 || irec->rc_refcount > XFS_REFC_REFCOUNT_MAX)
+ return __this_address;
+
+ return NULL;
+}
+
+static inline xfs_failaddr_t
+xfs_refcount_check_btrec(
+ struct xfs_btree_cur *cur,
+ const struct xfs_refcount_irec *irec)
+{
+ if (xfs_btree_is_rtrefcount(cur->bc_ops))
+ return xfs_rtrefcount_check_irec(to_rtg(cur->bc_group), irec);
+ return xfs_refcount_check_irec(to_perag(cur->bc_group), irec);
+}
+
static inline int
xfs_refcount_complain_bad_rec(
struct xfs_btree_cur *cur,
@@ -152,9 +186,15 @@ xfs_refcount_complain_bad_rec(
{
struct xfs_mount *mp = cur->bc_mp;
- xfs_warn(mp,
+ if (xfs_btree_is_rtrefcount(cur->bc_ops)) {
+ xfs_warn(mp,
+ "RT Refcount BTree record corruption in rtgroup %u detected at %pS!",
+ cur->bc_group->xg_gno, fa);
+ } else {
+ xfs_warn(mp,
"Refcount BTree record corruption in AG %d detected at %pS!",
cur->bc_group->xg_gno, fa);
+ }
xfs_warn(mp,
"Start block 0x%x, block count 0x%x, references 0x%x",
irec->rc_startblock, irec->rc_blockcount, irec->rc_refcount);
@@ -180,7 +220,7 @@ xfs_refcount_get_rec(
return error;
xfs_refcount_btrec_to_irec(rec, irec);
- fa = xfs_refcount_check_irec(to_perag(cur->bc_group), irec);
+ fa = xfs_refcount_check_btrec(cur, irec);
if (fa)
return xfs_refcount_complain_bad_rec(cur, fa, irec);
@@ -853,9 +893,9 @@ xfs_refc_merge_refcount(
const struct xfs_refcount_irec *irec,
enum xfs_refc_adjust_op adjust)
{
- /* Once a record hits MAXREFCOUNT, it is pinned there forever */
- if (irec->rc_refcount == MAXREFCOUNT)
- return MAXREFCOUNT;
+ /* Once a record hits XFS_REFC_REFCOUNT_MAX, it is pinned forever */
+ if (irec->rc_refcount == XFS_REFC_REFCOUNT_MAX)
+ return XFS_REFC_REFCOUNT_MAX;
return irec->rc_refcount + adjust;
}
@@ -898,7 +938,7 @@ xfs_refc_want_merge_center(
* hence we need to catch u32 addition overflows here.
*/
ulen += cleft->rc_blockcount + right->rc_blockcount;
- if (ulen >= MAXREFCEXTLEN)
+ if (ulen >= XFS_REFC_LEN_MAX)
return false;
*ulenp = ulen;
@@ -933,7 +973,7 @@ xfs_refc_want_merge_left(
* hence we need to catch u32 addition overflows here.
*/
ulen += cleft->rc_blockcount;
- if (ulen >= MAXREFCEXTLEN)
+ if (ulen >= XFS_REFC_LEN_MAX)
return false;
return true;
@@ -967,7 +1007,7 @@ xfs_refc_want_merge_right(
* hence we need to catch u32 addition overflows here.
*/
ulen += cright->rc_blockcount;
- if (ulen >= MAXREFCEXTLEN)
+ if (ulen >= XFS_REFC_LEN_MAX)
return false;
return true;
@@ -1065,7 +1105,7 @@ xfs_refcount_still_have_space(
*/
overhead = xfs_allocfree_block_count(cur->bc_mp,
cur->bc_refc.shape_changes);
- overhead += cur->bc_mp->m_refc_maxlevels;
+ overhead += cur->bc_maxlevels;
overhead *= cur->bc_mp->m_sb.sb_blocksize;
/*
@@ -1085,6 +1125,22 @@ xfs_refcount_still_have_space(
cur->bc_refc.nr_ops * XFS_REFCOUNT_ITEM_OVERHEAD;
}
+/* Schedule an extent free. */
+static int
+xrefc_free_extent(
+ struct xfs_btree_cur *cur,
+ struct xfs_refcount_irec *rec)
+{
+ unsigned int flags = 0;
+
+ if (xfs_btree_is_rtrefcount(cur->bc_ops))
+ flags |= XFS_FREE_EXTENT_REALTIME;
+
+ return xfs_free_extent_later(cur->bc_tp,
+ xfs_gbno_to_fsb(cur->bc_group, rec->rc_startblock),
+ rec->rc_blockcount, NULL, XFS_AG_RESV_NONE, flags);
+}
+
/*
* Adjust the refcounts of middle extents. At this point we should have
* split extents that crossed the adjustment range; merged with adjacent
@@ -1101,7 +1157,6 @@ xfs_refcount_adjust_extents(
struct xfs_refcount_irec ext, tmp;
int error;
int found_rec, found_tmp;
- xfs_fsblock_t fsbno;
/* Merging did all the work already. */
if (*aglen == 0)
@@ -1117,7 +1172,7 @@ xfs_refcount_adjust_extents(
if (error)
goto out_error;
if (!found_rec || ext.rc_domain != XFS_REFC_DOMAIN_SHARED) {
- ext.rc_startblock = cur->bc_mp->m_sb.sb_agblocks;
+ ext.rc_startblock = xfs_group_max_blocks(cur->bc_group);
ext.rc_blockcount = 0;
ext.rc_refcount = 0;
ext.rc_domain = XFS_REFC_DOMAIN_SHARED;
@@ -1154,11 +1209,7 @@ xfs_refcount_adjust_extents(
goto out_error;
}
} else {
- fsbno = xfs_agbno_to_fsb(to_perag(cur->bc_group),
- tmp.rc_startblock);
- error = xfs_free_extent_later(cur->bc_tp, fsbno,
- tmp.rc_blockcount, NULL,
- XFS_AG_RESV_NONE, 0);
+ error = xrefc_free_extent(cur, &tmp);
if (error)
goto out_error;
}
@@ -1196,7 +1247,7 @@ xfs_refcount_adjust_extents(
* Adjust the reference count and either update the tree
* (incr) or free the blocks (decr).
*/
- if (ext.rc_refcount == MAXREFCOUNT)
+ if (ext.rc_refcount == XFS_REFC_REFCOUNT_MAX)
goto skip;
ext.rc_refcount += adj;
trace_xfs_refcount_modify_extent(cur, &ext);
@@ -1216,11 +1267,7 @@ xfs_refcount_adjust_extents(
}
goto advloop;
} else {
- fsbno = xfs_agbno_to_fsb(to_perag(cur->bc_group),
- ext.rc_startblock);
- error = xfs_free_extent_later(cur->bc_tp, fsbno,
- ext.rc_blockcount, NULL,
- XFS_AG_RESV_NONE, 0);
+ error = xrefc_free_extent(cur, &ext);
if (error)
goto out_error;
}
@@ -1417,12 +1464,122 @@ xfs_refcount_finish_one(
}
/*
+ * Set up a continuation a deferred rtrefcount operation by updating the
+ * intent. Checks to make sure we're not going to run off the end of the
+ * rtgroup.
+ */
+static inline int
+xfs_rtrefcount_continue_op(
+ struct xfs_btree_cur *cur,
+ struct xfs_refcount_intent *ri,
+ xfs_agblock_t new_agbno)
+{
+ struct xfs_mount *mp = cur->bc_mp;
+ struct xfs_rtgroup *rtg = to_rtg(ri->ri_group);
+
+ if (XFS_IS_CORRUPT(mp, !xfs_verify_rgbext(rtg, new_agbno,
+ ri->ri_blockcount))) {
+ xfs_btree_mark_sick(cur);
+ return -EFSCORRUPTED;
+ }
+
+ ri->ri_startblock = xfs_rgbno_to_rtb(rtg, new_agbno);
+
+ ASSERT(xfs_verify_rtbext(mp, ri->ri_startblock, ri->ri_blockcount));
+ return 0;
+}
+
+/*
+ * Process one of the deferred realtime refcount operations. We pass back the
+ * btree cursor to maintain our lock on the btree between calls.
+ */
+int
+xfs_rtrefcount_finish_one(
+ struct xfs_trans *tp,
+ struct xfs_refcount_intent *ri,
+ struct xfs_btree_cur **pcur)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_rtgroup *rtg = to_rtg(ri->ri_group);
+ struct xfs_btree_cur *rcur = *pcur;
+ int error = 0;
+ xfs_rgblock_t bno;
+ unsigned long nr_ops = 0;
+ int shape_changes = 0;
+
+ bno = xfs_rtb_to_rgbno(mp, ri->ri_startblock);
+
+ trace_xfs_refcount_deferred(mp, ri);
+
+ if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_REFCOUNT_FINISH_ONE))
+ return -EIO;
+
+ /*
+ * If we haven't gotten a cursor or the cursor AG doesn't match
+ * the startblock, get one now.
+ */
+ if (rcur != NULL && rcur->bc_group != ri->ri_group) {
+ nr_ops = rcur->bc_refc.nr_ops;
+ shape_changes = rcur->bc_refc.shape_changes;
+ xfs_btree_del_cursor(rcur, 0);
+ rcur = NULL;
+ *pcur = NULL;
+ }
+ if (rcur == NULL) {
+ xfs_rtgroup_lock(rtg, XFS_RTGLOCK_REFCOUNT);
+ xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_REFCOUNT);
+ *pcur = rcur = xfs_rtrefcountbt_init_cursor(tp, rtg);
+
+ rcur->bc_refc.nr_ops = nr_ops;
+ rcur->bc_refc.shape_changes = shape_changes;
+ }
+
+ switch (ri->ri_type) {
+ case XFS_REFCOUNT_INCREASE:
+ error = xfs_refcount_adjust(rcur, &bno, &ri->ri_blockcount,
+ XFS_REFCOUNT_ADJUST_INCREASE);
+ if (error)
+ return error;
+ if (ri->ri_blockcount > 0)
+ error = xfs_rtrefcount_continue_op(rcur, ri, bno);
+ break;
+ case XFS_REFCOUNT_DECREASE:
+ error = xfs_refcount_adjust(rcur, &bno, &ri->ri_blockcount,
+ XFS_REFCOUNT_ADJUST_DECREASE);
+ if (error)
+ return error;
+ if (ri->ri_blockcount > 0)
+ error = xfs_rtrefcount_continue_op(rcur, ri, bno);
+ break;
+ case XFS_REFCOUNT_ALLOC_COW:
+ error = __xfs_refcount_cow_alloc(rcur, bno, ri->ri_blockcount);
+ if (error)
+ return error;
+ ri->ri_blockcount = 0;
+ break;
+ case XFS_REFCOUNT_FREE_COW:
+ error = __xfs_refcount_cow_free(rcur, bno, ri->ri_blockcount);
+ if (error)
+ return error;
+ ri->ri_blockcount = 0;
+ break;
+ default:
+ ASSERT(0);
+ return -EFSCORRUPTED;
+ }
+ if (!error && ri->ri_blockcount > 0)
+ trace_xfs_refcount_finish_one_leftover(mp, ri);
+ return error;
+}
+
+/*
* Record a refcount intent for later processing.
*/
static void
__xfs_refcount_add(
struct xfs_trans *tp,
enum xfs_refcount_intent_type type,
+ bool isrt,
xfs_fsblock_t startblock,
xfs_extlen_t blockcount)
{
@@ -1434,6 +1591,7 @@ __xfs_refcount_add(
ri->ri_type = type;
ri->ri_startblock = startblock;
ri->ri_blockcount = blockcount;
+ ri->ri_realtime = isrt;
xfs_refcount_defer_add(tp, ri);
}
@@ -1444,12 +1602,13 @@ __xfs_refcount_add(
void
xfs_refcount_increase_extent(
struct xfs_trans *tp,
+ bool isrt,
struct xfs_bmbt_irec *PREV)
{
if (!xfs_has_reflink(tp->t_mountp))
return;
- __xfs_refcount_add(tp, XFS_REFCOUNT_INCREASE, PREV->br_startblock,
+ __xfs_refcount_add(tp, XFS_REFCOUNT_INCREASE, isrt, PREV->br_startblock,
PREV->br_blockcount);
}
@@ -1459,12 +1618,13 @@ xfs_refcount_increase_extent(
void
xfs_refcount_decrease_extent(
struct xfs_trans *tp,
+ bool isrt,
struct xfs_bmbt_irec *PREV)
{
if (!xfs_has_reflink(tp->t_mountp))
return;
- __xfs_refcount_add(tp, XFS_REFCOUNT_DECREASE, PREV->br_startblock,
+ __xfs_refcount_add(tp, XFS_REFCOUNT_DECREASE, isrt, PREV->br_startblock,
PREV->br_blockcount);
}
@@ -1666,7 +1826,7 @@ xfs_refcount_adjust_cow_extents(
goto out_error;
}
if (!found_rec) {
- ext.rc_startblock = cur->bc_mp->m_sb.sb_agblocks;
+ ext.rc_startblock = xfs_group_max_blocks(cur->bc_group);
ext.rc_blockcount = 0;
ext.rc_refcount = 0;
ext.rc_domain = XFS_REFC_DOMAIN_COW;
@@ -1820,6 +1980,7 @@ __xfs_refcount_cow_free(
void
xfs_refcount_alloc_cow_extent(
struct xfs_trans *tp,
+ bool isrt,
xfs_fsblock_t fsb,
xfs_extlen_t len)
{
@@ -1828,17 +1989,17 @@ xfs_refcount_alloc_cow_extent(
if (!xfs_has_reflink(mp))
return;
- __xfs_refcount_add(tp, XFS_REFCOUNT_ALLOC_COW, fsb, len);
+ __xfs_refcount_add(tp, XFS_REFCOUNT_ALLOC_COW, isrt, fsb, len);
/* Add rmap entry */
- xfs_rmap_alloc_extent(tp, XFS_FSB_TO_AGNO(mp, fsb),
- XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
+ xfs_rmap_alloc_extent(tp, isrt, fsb, len, XFS_RMAP_OWN_COW);
}
/* Forget a CoW staging event in the refcount btree. */
void
xfs_refcount_free_cow_extent(
struct xfs_trans *tp,
+ bool isrt,
xfs_fsblock_t fsb,
xfs_extlen_t len)
{
@@ -1848,9 +2009,8 @@ xfs_refcount_free_cow_extent(
return;
/* Remove rmap entry */
- xfs_rmap_free_extent(tp, XFS_FSB_TO_AGNO(mp, fsb),
- XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
- __xfs_refcount_add(tp, XFS_REFCOUNT_FREE_COW, fsb, len);
+ xfs_rmap_free_extent(tp, isrt, fsb, len, XFS_RMAP_OWN_COW);
+ __xfs_refcount_add(tp, XFS_REFCOUNT_FREE_COW, isrt, fsb, len);
}
struct xfs_refcount_recovery {
@@ -1879,8 +2039,7 @@ xfs_refcount_recover_extent(
INIT_LIST_HEAD(&rr->rr_list);
xfs_refcount_btrec_to_irec(rec, &rr->rr_rrec);
- if (xfs_refcount_check_irec(to_perag(cur->bc_group), &rr->rr_rrec) !=
- NULL ||
+ if (xfs_refcount_check_btrec(cur, &rr->rr_rrec) != NULL ||
XFS_IS_CORRUPT(cur->bc_mp,
rr->rr_rrec.rc_domain != XFS_REFC_DOMAIN_COW)) {
xfs_btree_mark_sick(cur);
@@ -1895,12 +2054,13 @@ xfs_refcount_recover_extent(
/* Find and remove leftover CoW reservations. */
int
xfs_refcount_recover_cow_leftovers(
- struct xfs_mount *mp,
- struct xfs_perag *pag)
+ struct xfs_group *xg)
{
+ struct xfs_mount *mp = xg->xg_mount;
+ bool isrt = xg->xg_type == XG_TYPE_RTG;
struct xfs_trans *tp;
struct xfs_btree_cur *cur;
- struct xfs_buf *agbp;
+ struct xfs_buf *agbp = NULL;
struct xfs_refcount_recovery *rr, *n;
struct list_head debris;
union xfs_btree_irec low = {
@@ -1913,10 +2073,19 @@ xfs_refcount_recover_cow_leftovers(
xfs_fsblock_t fsb;
int error;
- /* reflink filesystems mustn't have AGs larger than 2^31-1 blocks */
+ /* reflink filesystems must not have groups larger than 2^31-1 blocks */
+ BUILD_BUG_ON(XFS_MAX_RGBLOCKS >= XFS_REFC_COWFLAG);
BUILD_BUG_ON(XFS_MAX_CRC_AG_BLOCKS >= XFS_REFC_COWFLAG);
- if (mp->m_sb.sb_agblocks > XFS_MAX_CRC_AG_BLOCKS)
- return -EOPNOTSUPP;
+
+ if (isrt) {
+ if (!xfs_has_rtgroups(mp))
+ return 0;
+ if (xfs_group_max_blocks(xg) >= XFS_MAX_RGBLOCKS)
+ return -EOPNOTSUPP;
+ } else {
+ if (xfs_group_max_blocks(xg) > XFS_MAX_CRC_AG_BLOCKS)
+ return -EOPNOTSUPP;
+ }
INIT_LIST_HEAD(&debris);
@@ -1934,16 +2103,24 @@ xfs_refcount_recover_cow_leftovers(
if (error)
return error;
- error = xfs_alloc_read_agf(pag, tp, 0, &agbp);
- if (error)
- goto out_trans;
- cur = xfs_refcountbt_init_cursor(mp, tp, agbp, pag);
+ if (isrt) {
+ xfs_rtgroup_lock(to_rtg(xg), XFS_RTGLOCK_REFCOUNT);
+ cur = xfs_rtrefcountbt_init_cursor(tp, to_rtg(xg));
+ } else {
+ error = xfs_alloc_read_agf(to_perag(xg), tp, 0, &agbp);
+ if (error)
+ goto out_trans;
+ cur = xfs_refcountbt_init_cursor(mp, tp, agbp, to_perag(xg));
+ }
/* Find all the leftover CoW staging extents. */
error = xfs_btree_query_range(cur, &low, &high,
xfs_refcount_recover_extent, &debris);
xfs_btree_del_cursor(cur, error);
- xfs_trans_brelse(tp, agbp);
+ if (agbp)
+ xfs_trans_brelse(tp, agbp);
+ else
+ xfs_rtgroup_unlock(to_rtg(xg), XFS_RTGLOCK_REFCOUNT);
xfs_trans_cancel(tp);
if (error)
goto out_free;
@@ -1956,14 +2133,15 @@ xfs_refcount_recover_cow_leftovers(
goto out_free;
/* Free the orphan record */
- fsb = xfs_agbno_to_fsb(pag, rr->rr_rrec.rc_startblock);
- xfs_refcount_free_cow_extent(tp, fsb,
+ fsb = xfs_gbno_to_fsb(xg, rr->rr_rrec.rc_startblock);
+ xfs_refcount_free_cow_extent(tp, isrt, fsb,
rr->rr_rrec.rc_blockcount);
/* Free the block. */
error = xfs_free_extent_later(tp, fsb,
rr->rr_rrec.rc_blockcount, NULL,
- XFS_AG_RESV_NONE, 0);
+ XFS_AG_RESV_NONE,
+ isrt ? XFS_FREE_EXTENT_REALTIME : 0);
if (error)
goto out_trans;
@@ -2028,7 +2206,7 @@ xfs_refcount_query_range_helper(
xfs_failaddr_t fa;
xfs_refcount_btrec_to_irec(rec, &irec);
- fa = xfs_refcount_check_irec(to_perag(cur->bc_group), &irec);
+ fa = xfs_refcount_check_btrec(cur, &irec);
if (fa)
return xfs_refcount_complain_bad_rec(cur, fa, &irec);
diff --git a/fs/xfs/libxfs/xfs_refcount.h b/fs/xfs/libxfs/xfs_refcount.h
index 62d78afcf1f3..f2e299a716a4 100644
--- a/fs/xfs/libxfs/xfs_refcount.h
+++ b/fs/xfs/libxfs/xfs_refcount.h
@@ -12,6 +12,7 @@ struct xfs_perag;
struct xfs_btree_cur;
struct xfs_bmbt_irec;
struct xfs_refcount_irec;
+struct xfs_rtgroup;
extern int xfs_refcount_lookup_le(struct xfs_btree_cur *cur,
enum xfs_refc_domain domain, xfs_agblock_t bno, int *stat);
@@ -60,6 +61,7 @@ struct xfs_refcount_intent {
enum xfs_refcount_intent_type ri_type;
xfs_extlen_t ri_blockcount;
xfs_fsblock_t ri_startblock;
+ bool ri_realtime;
};
/* Check that the refcount is appropriate for the record domain. */
@@ -74,24 +76,25 @@ xfs_refcount_check_domain(
return true;
}
-void xfs_refcount_increase_extent(struct xfs_trans *tp,
+void xfs_refcount_increase_extent(struct xfs_trans *tp, bool isrt,
struct xfs_bmbt_irec *irec);
-void xfs_refcount_decrease_extent(struct xfs_trans *tp,
+void xfs_refcount_decrease_extent(struct xfs_trans *tp, bool isrt,
struct xfs_bmbt_irec *irec);
-extern int xfs_refcount_finish_one(struct xfs_trans *tp,
+int xfs_refcount_finish_one(struct xfs_trans *tp,
+ struct xfs_refcount_intent *ri, struct xfs_btree_cur **pcur);
+int xfs_rtrefcount_finish_one(struct xfs_trans *tp,
struct xfs_refcount_intent *ri, struct xfs_btree_cur **pcur);
extern int xfs_refcount_find_shared(struct xfs_btree_cur *cur,
xfs_agblock_t agbno, xfs_extlen_t aglen, xfs_agblock_t *fbno,
xfs_extlen_t *flen, bool find_end_of_shared);
-void xfs_refcount_alloc_cow_extent(struct xfs_trans *tp, xfs_fsblock_t fsb,
- xfs_extlen_t len);
-void xfs_refcount_free_cow_extent(struct xfs_trans *tp, xfs_fsblock_t fsb,
- xfs_extlen_t len);
-extern int xfs_refcount_recover_cow_leftovers(struct xfs_mount *mp,
- struct xfs_perag *pag);
+void xfs_refcount_alloc_cow_extent(struct xfs_trans *tp, bool isrt,
+ xfs_fsblock_t fsb, xfs_extlen_t len);
+void xfs_refcount_free_cow_extent(struct xfs_trans *tp, bool isrt,
+ xfs_fsblock_t fsb, xfs_extlen_t len);
+int xfs_refcount_recover_cow_leftovers(struct xfs_group *xg);
/*
* While we're adjusting the refcounts records of an extent, we have
@@ -120,6 +123,8 @@ extern void xfs_refcount_btrec_to_irec(const union xfs_btree_rec *rec,
struct xfs_refcount_irec *irec);
xfs_failaddr_t xfs_refcount_check_irec(struct xfs_perag *pag,
const struct xfs_refcount_irec *irec);
+xfs_failaddr_t xfs_rtrefcount_check_irec(struct xfs_rtgroup *rtg,
+ const struct xfs_refcount_irec *irec);
extern int xfs_refcount_insert(struct xfs_btree_cur *cur,
struct xfs_refcount_irec *irec, int *stat);
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index d0df68dc3131..3cdf50563fec 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -25,6 +25,8 @@
#include "xfs_ag.h"
#include "xfs_health.h"
#include "xfs_rmap_item.h"
+#include "xfs_rtgroup.h"
+#include "xfs_rtrmap_btree.h"
struct kmem_cache *xfs_rmap_intent_cache;
@@ -264,11 +266,77 @@ xfs_rmap_check_irec(
return NULL;
}
+static xfs_failaddr_t
+xfs_rtrmap_check_meta_irec(
+ struct xfs_rtgroup *rtg,
+ const struct xfs_rmap_irec *irec)
+{
+ struct xfs_mount *mp = rtg_mount(rtg);
+
+ if (irec->rm_offset != 0)
+ return __this_address;
+ if (irec->rm_flags & XFS_RMAP_UNWRITTEN)
+ return __this_address;
+
+ switch (irec->rm_owner) {
+ case XFS_RMAP_OWN_FS:
+ if (irec->rm_startblock != 0)
+ return __this_address;
+ if (irec->rm_blockcount != mp->m_sb.sb_rextsize)
+ return __this_address;
+ return NULL;
+ case XFS_RMAP_OWN_COW:
+ if (!xfs_has_rtreflink(mp))
+ return __this_address;
+ if (!xfs_verify_rgbext(rtg, irec->rm_startblock,
+ irec->rm_blockcount))
+ return __this_address;
+ return NULL;
+ default:
+ return __this_address;
+ }
+
+ return NULL;
+}
+
+static xfs_failaddr_t
+xfs_rtrmap_check_inode_irec(
+ struct xfs_rtgroup *rtg,
+ const struct xfs_rmap_irec *irec)
+{
+ struct xfs_mount *mp = rtg_mount(rtg);
+
+ if (!xfs_verify_ino(mp, irec->rm_owner))
+ return __this_address;
+ if (!xfs_verify_rgbext(rtg, irec->rm_startblock, irec->rm_blockcount))
+ return __this_address;
+ if (!xfs_verify_fileext(mp, irec->rm_offset, irec->rm_blockcount))
+ return __this_address;
+ return NULL;
+}
+
+xfs_failaddr_t
+xfs_rtrmap_check_irec(
+ struct xfs_rtgroup *rtg,
+ const struct xfs_rmap_irec *irec)
+{
+ if (irec->rm_blockcount == 0)
+ return __this_address;
+ if (irec->rm_flags & (XFS_RMAP_BMBT_BLOCK | XFS_RMAP_ATTR_FORK))
+ return __this_address;
+ if (XFS_RMAP_NON_INODE_OWNER(irec->rm_owner))
+ return xfs_rtrmap_check_meta_irec(rtg, irec);
+ return xfs_rtrmap_check_inode_irec(rtg, irec);
+}
+
static inline xfs_failaddr_t
xfs_rmap_check_btrec(
struct xfs_btree_cur *cur,
const struct xfs_rmap_irec *irec)
{
+ if (xfs_btree_is_rtrmap(cur->bc_ops) ||
+ xfs_btree_is_mem_rtrmap(cur->bc_ops))
+ return xfs_rtrmap_check_irec(to_rtg(cur->bc_group), irec);
return xfs_rmap_check_irec(to_perag(cur->bc_group), irec);
}
@@ -283,6 +351,10 @@ xfs_rmap_complain_bad_rec(
if (xfs_btree_is_mem_rmap(cur->bc_ops))
xfs_warn(mp,
"In-Memory Reverse Mapping BTree record corruption detected at %pS!", fa);
+ else if (xfs_btree_is_rtrmap(cur->bc_ops))
+ xfs_warn(mp,
+ "RT Reverse Mapping BTree record corruption in rtgroup %u detected at %pS!",
+ cur->bc_group->xg_gno, fa);
else
xfs_warn(mp,
"Reverse Mapping BTree record corruption in AG %d detected at %pS!",
@@ -525,7 +597,7 @@ xfs_rmap_free_check_owner(
struct xfs_btree_cur *cur,
uint64_t ltoff,
struct xfs_rmap_irec *rec,
- xfs_filblks_t len,
+ xfs_extlen_t len,
uint64_t owner,
uint64_t offset,
unsigned int flags)
@@ -2556,6 +2628,47 @@ __xfs_rmap_finish_intent(
}
}
+static int
+xfs_rmap_finish_init_cursor(
+ struct xfs_trans *tp,
+ struct xfs_rmap_intent *ri,
+ struct xfs_btree_cur **pcur)
+{
+ struct xfs_perag *pag = to_perag(ri->ri_group);
+ struct xfs_buf *agbp = NULL;
+ int error;
+
+ /*
+ * Refresh the freelist before we start changing the rmapbt, because a
+ * shape change could cause us to allocate blocks.
+ */
+ error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
+ if (error) {
+ xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL);
+ return error;
+ }
+ if (XFS_IS_CORRUPT(tp->t_mountp, !agbp)) {
+ xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL);
+ return -EFSCORRUPTED;
+ }
+ *pcur = xfs_rmapbt_init_cursor(tp->t_mountp, tp, agbp, pag);
+ return 0;
+}
+
+static int
+xfs_rtrmap_finish_init_cursor(
+ struct xfs_trans *tp,
+ struct xfs_rmap_intent *ri,
+ struct xfs_btree_cur **pcur)
+{
+ struct xfs_rtgroup *rtg = to_rtg(ri->ri_group);
+
+ xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
+ xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_RMAP);
+ *pcur = xfs_rtrmapbt_init_cursor(tp, rtg);
+ return 0;
+}
+
/*
* Process one of the deferred rmap operations. We pass back the
* btree cursor to maintain our lock on the rmapbt between calls.
@@ -2571,8 +2684,6 @@ xfs_rmap_finish_one(
{
struct xfs_owner_info oinfo;
struct xfs_mount *mp = tp->t_mountp;
- struct xfs_btree_cur *rcur = *pcur;
- struct xfs_buf *agbp = NULL;
xfs_agblock_t bno;
bool unwritten;
int error = 0;
@@ -2586,38 +2697,26 @@ xfs_rmap_finish_one(
* If we haven't gotten a cursor or the cursor AG doesn't match
* the startblock, get one now.
*/
- if (rcur != NULL && rcur->bc_group != ri->ri_group) {
- xfs_btree_del_cursor(rcur, 0);
- rcur = NULL;
+ if (*pcur != NULL && (*pcur)->bc_group != ri->ri_group) {
+ xfs_btree_del_cursor(*pcur, 0);
*pcur = NULL;
}
- if (rcur == NULL) {
- struct xfs_perag *pag = to_perag(ri->ri_group);
-
- /*
- * Refresh the freelist before we start changing the
- * rmapbt, because a shape change could cause us to
- * allocate blocks.
- */
- error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
- if (error) {
- xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL);
+ if (*pcur == NULL) {
+ if (ri->ri_group->xg_type == XG_TYPE_RTG)
+ error = xfs_rtrmap_finish_init_cursor(tp, ri, pcur);
+ else
+ error = xfs_rmap_finish_init_cursor(tp, ri, pcur);
+ if (error)
return error;
- }
- if (XFS_IS_CORRUPT(tp->t_mountp, !agbp)) {
- xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL);
- return -EFSCORRUPTED;
- }
-
- *pcur = rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
}
xfs_rmap_ino_owner(&oinfo, ri->ri_owner, ri->ri_whichfork,
ri->ri_bmap.br_startoff);
unwritten = ri->ri_bmap.br_state == XFS_EXT_UNWRITTEN;
- bno = XFS_FSB_TO_AGBNO(rcur->bc_mp, ri->ri_bmap.br_startblock);
- error = __xfs_rmap_finish_intent(rcur, ri->ri_type, bno,
+ bno = xfs_fsb_to_gbno(mp, ri->ri_bmap.br_startblock,
+ ri->ri_group->xg_type);
+ error = __xfs_rmap_finish_intent(*pcur, ri->ri_type, bno,
ri->ri_bmap.br_blockcount, &oinfo, unwritten);
if (error)
return error;
@@ -2647,6 +2746,7 @@ __xfs_rmap_add(
struct xfs_trans *tp,
enum xfs_rmap_intent_type type,
uint64_t owner,
+ bool isrt,
int whichfork,
struct xfs_bmbt_irec *bmap)
{
@@ -2658,6 +2758,7 @@ __xfs_rmap_add(
ri->ri_owner = owner;
ri->ri_whichfork = whichfork;
ri->ri_bmap = *bmap;
+ ri->ri_realtime = isrt;
xfs_rmap_defer_add(tp, ri);
}
@@ -2671,6 +2772,7 @@ xfs_rmap_map_extent(
struct xfs_bmbt_irec *PREV)
{
enum xfs_rmap_intent_type type = XFS_RMAP_MAP;
+ bool isrt = xfs_ifork_is_realtime(ip, whichfork);
if (!xfs_rmap_update_is_needed(tp->t_mountp, whichfork))
return;
@@ -2678,7 +2780,7 @@ xfs_rmap_map_extent(
if (whichfork != XFS_ATTR_FORK && xfs_is_reflink_inode(ip))
type = XFS_RMAP_MAP_SHARED;
- __xfs_rmap_add(tp, type, ip->i_ino, whichfork, PREV);
+ __xfs_rmap_add(tp, type, ip->i_ino, isrt, whichfork, PREV);
}
/* Unmap an extent out of a file. */
@@ -2690,6 +2792,7 @@ xfs_rmap_unmap_extent(
struct xfs_bmbt_irec *PREV)
{
enum xfs_rmap_intent_type type = XFS_RMAP_UNMAP;
+ bool isrt = xfs_ifork_is_realtime(ip, whichfork);
if (!xfs_rmap_update_is_needed(tp->t_mountp, whichfork))
return;
@@ -2697,7 +2800,7 @@ xfs_rmap_unmap_extent(
if (whichfork != XFS_ATTR_FORK && xfs_is_reflink_inode(ip))
type = XFS_RMAP_UNMAP_SHARED;
- __xfs_rmap_add(tp, type, ip->i_ino, whichfork, PREV);
+ __xfs_rmap_add(tp, type, ip->i_ino, isrt, whichfork, PREV);
}
/*
@@ -2715,6 +2818,7 @@ xfs_rmap_convert_extent(
struct xfs_bmbt_irec *PREV)
{
enum xfs_rmap_intent_type type = XFS_RMAP_CONVERT;
+ bool isrt = xfs_ifork_is_realtime(ip, whichfork);
if (!xfs_rmap_update_is_needed(mp, whichfork))
return;
@@ -2722,15 +2826,15 @@ xfs_rmap_convert_extent(
if (whichfork != XFS_ATTR_FORK && xfs_is_reflink_inode(ip))
type = XFS_RMAP_CONVERT_SHARED;
- __xfs_rmap_add(tp, type, ip->i_ino, whichfork, PREV);
+ __xfs_rmap_add(tp, type, ip->i_ino, isrt, whichfork, PREV);
}
/* Schedule the creation of an rmap for non-file data. */
void
xfs_rmap_alloc_extent(
struct xfs_trans *tp,
- xfs_agnumber_t agno,
- xfs_agblock_t bno,
+ bool isrt,
+ xfs_fsblock_t fsbno,
xfs_extlen_t len,
uint64_t owner)
{
@@ -2739,20 +2843,20 @@ xfs_rmap_alloc_extent(
if (!xfs_rmap_update_is_needed(tp->t_mountp, XFS_DATA_FORK))
return;
- bmap.br_startblock = XFS_AGB_TO_FSB(tp->t_mountp, agno, bno);
+ bmap.br_startblock = fsbno;
bmap.br_blockcount = len;
bmap.br_startoff = 0;
bmap.br_state = XFS_EXT_NORM;
- __xfs_rmap_add(tp, XFS_RMAP_ALLOC, owner, XFS_DATA_FORK, &bmap);
+ __xfs_rmap_add(tp, XFS_RMAP_ALLOC, owner, isrt, XFS_DATA_FORK, &bmap);
}
/* Schedule the deletion of an rmap for non-file data. */
void
xfs_rmap_free_extent(
struct xfs_trans *tp,
- xfs_agnumber_t agno,
- xfs_agblock_t bno,
+ bool isrt,
+ xfs_fsblock_t fsbno,
xfs_extlen_t len,
uint64_t owner)
{
@@ -2761,12 +2865,12 @@ xfs_rmap_free_extent(
if (!xfs_rmap_update_is_needed(tp->t_mountp, XFS_DATA_FORK))
return;
- bmap.br_startblock = XFS_AGB_TO_FSB(tp->t_mountp, agno, bno);
+ bmap.br_startblock = fsbno;
bmap.br_blockcount = len;
bmap.br_startoff = 0;
bmap.br_state = XFS_EXT_NORM;
- __xfs_rmap_add(tp, XFS_RMAP_FREE, owner, XFS_DATA_FORK, &bmap);
+ __xfs_rmap_add(tp, XFS_RMAP_FREE, owner, isrt, XFS_DATA_FORK, &bmap);
}
/* Compare rmap records. Returns -1 if a < b, 1 if a > b, and 0 if equal. */
diff --git a/fs/xfs/libxfs/xfs_rmap.h b/fs/xfs/libxfs/xfs_rmap.h
index 96b4321d8310..5f39f6e53cd1 100644
--- a/fs/xfs/libxfs/xfs_rmap.h
+++ b/fs/xfs/libxfs/xfs_rmap.h
@@ -7,6 +7,7 @@
#define __XFS_RMAP_H__
struct xfs_perag;
+struct xfs_rtgroup;
static inline void
xfs_rmap_ino_bmbt_owner(
@@ -174,6 +175,7 @@ struct xfs_rmap_intent {
uint64_t ri_owner;
struct xfs_bmbt_irec ri_bmap;
struct xfs_group *ri_group;
+ bool ri_realtime;
};
/* functions for updating the rmapbt based on bmbt map/unmap operations */
@@ -184,10 +186,10 @@ void xfs_rmap_unmap_extent(struct xfs_trans *tp, struct xfs_inode *ip,
void xfs_rmap_convert_extent(struct xfs_mount *mp, struct xfs_trans *tp,
struct xfs_inode *ip, int whichfork,
struct xfs_bmbt_irec *imap);
-void xfs_rmap_alloc_extent(struct xfs_trans *tp, xfs_agnumber_t agno,
- xfs_agblock_t bno, xfs_extlen_t len, uint64_t owner);
-void xfs_rmap_free_extent(struct xfs_trans *tp, xfs_agnumber_t agno,
- xfs_agblock_t bno, xfs_extlen_t len, uint64_t owner);
+void xfs_rmap_alloc_extent(struct xfs_trans *tp, bool isrt, xfs_fsblock_t fsbno,
+ xfs_extlen_t len, uint64_t owner);
+void xfs_rmap_free_extent(struct xfs_trans *tp, bool isrt, xfs_fsblock_t fsbno,
+ xfs_extlen_t len, uint64_t owner);
int xfs_rmap_finish_one(struct xfs_trans *tp, struct xfs_rmap_intent *ri,
struct xfs_btree_cur **pcur);
@@ -206,6 +208,8 @@ xfs_failaddr_t xfs_rmap_btrec_to_irec(const union xfs_btree_rec *rec,
struct xfs_rmap_irec *irec);
xfs_failaddr_t xfs_rmap_check_irec(struct xfs_perag *pag,
const struct xfs_rmap_irec *irec);
+xfs_failaddr_t xfs_rtrmap_check_irec(struct xfs_rtgroup *rtg,
+ const struct xfs_rmap_irec *irec);
int xfs_rmap_has_records(struct xfs_btree_cur *cur, xfs_agblock_t bno,
xfs_extlen_t len, enum xbtree_recpacking *outcome);
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
index 4ddfb7e395b3..770adf60dd73 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.c
+++ b/fs/xfs/libxfs/xfs_rtbitmap.c
@@ -1055,7 +1055,7 @@ xfs_rtfree_extent(
xfs_rtxlen_t len) /* length of extent freed */
{
struct xfs_mount *mp = tp->t_mountp;
- struct xfs_inode *rbmip = rtg->rtg_inodes[XFS_RTGI_BITMAP];
+ struct xfs_inode *rbmip = rtg_bitmap(rtg);
struct xfs_rtalloc_args args = {
.mp = mp,
.tp = tp,
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.h b/fs/xfs/libxfs/xfs_rtbitmap.h
index 16563a44bd13..22e5d9cd95f4 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.h
+++ b/fs/xfs/libxfs/xfs_rtbitmap.h
@@ -135,6 +135,15 @@ xfs_rtb_to_rtx(
return div_u64(rtbno, mp->m_sb.sb_rextsize);
}
+/* Return the offset of a rtgroup block number within an rt extent. */
+static inline xfs_extlen_t
+xfs_rgbno_to_rtxoff(
+ struct xfs_mount *mp,
+ xfs_rgblock_t rgbno)
+{
+ return rgbno % mp->m_sb.sb_rextsize;
+}
+
/* Return the offset of an rt block number within an rt extent. */
static inline xfs_extlen_t
xfs_rtb_to_rtxoff(
diff --git a/fs/xfs/libxfs/xfs_rtgroup.c b/fs/xfs/libxfs/xfs_rtgroup.c
index 4f3bfc884aff..d84d32f1b48f 100644
--- a/fs/xfs/libxfs/xfs_rtgroup.c
+++ b/fs/xfs/libxfs/xfs_rtgroup.c
@@ -33,6 +33,8 @@
#include "xfs_rtbitmap.h"
#include "xfs_metafile.h"
#include "xfs_metadir.h"
+#include "xfs_rtrmap_btree.h"
+#include "xfs_rtrefcount_btree.h"
/* Find the first usable fsblock in this rtgroup. */
static inline uint32_t
@@ -197,11 +199,17 @@ xfs_rtgroup_lock(
* Lock both realtime free space metadata inodes for a freespace
* update.
*/
- xfs_ilock(rtg->rtg_inodes[XFS_RTGI_BITMAP], XFS_ILOCK_EXCL);
- xfs_ilock(rtg->rtg_inodes[XFS_RTGI_SUMMARY], XFS_ILOCK_EXCL);
+ xfs_ilock(rtg_bitmap(rtg), XFS_ILOCK_EXCL);
+ xfs_ilock(rtg_summary(rtg), XFS_ILOCK_EXCL);
} else if (rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) {
- xfs_ilock(rtg->rtg_inodes[XFS_RTGI_BITMAP], XFS_ILOCK_SHARED);
+ xfs_ilock(rtg_bitmap(rtg), XFS_ILOCK_SHARED);
}
+
+ if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg_rmap(rtg))
+ xfs_ilock(rtg_rmap(rtg), XFS_ILOCK_EXCL);
+
+ if ((rtglock_flags & XFS_RTGLOCK_REFCOUNT) && rtg_refcount(rtg))
+ xfs_ilock(rtg_refcount(rtg), XFS_ILOCK_EXCL);
}
/* Unlock metadata inodes associated with this rt group. */
@@ -214,11 +222,17 @@ xfs_rtgroup_unlock(
ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) ||
!(rtglock_flags & XFS_RTGLOCK_BITMAP));
+ if ((rtglock_flags & XFS_RTGLOCK_REFCOUNT) && rtg_refcount(rtg))
+ xfs_iunlock(rtg_refcount(rtg), XFS_ILOCK_EXCL);
+
+ if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg_rmap(rtg))
+ xfs_iunlock(rtg_rmap(rtg), XFS_ILOCK_EXCL);
+
if (rtglock_flags & XFS_RTGLOCK_BITMAP) {
- xfs_iunlock(rtg->rtg_inodes[XFS_RTGI_SUMMARY], XFS_ILOCK_EXCL);
- xfs_iunlock(rtg->rtg_inodes[XFS_RTGI_BITMAP], XFS_ILOCK_EXCL);
+ xfs_iunlock(rtg_summary(rtg), XFS_ILOCK_EXCL);
+ xfs_iunlock(rtg_bitmap(rtg), XFS_ILOCK_EXCL);
} else if (rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) {
- xfs_iunlock(rtg->rtg_inodes[XFS_RTGI_BITMAP], XFS_ILOCK_SHARED);
+ xfs_iunlock(rtg_bitmap(rtg), XFS_ILOCK_SHARED);
}
}
@@ -236,11 +250,15 @@ xfs_rtgroup_trans_join(
ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED));
if (rtglock_flags & XFS_RTGLOCK_BITMAP) {
- xfs_trans_ijoin(tp, rtg->rtg_inodes[XFS_RTGI_BITMAP],
- XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, rtg->rtg_inodes[XFS_RTGI_SUMMARY],
- XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, rtg_bitmap(rtg), XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, rtg_summary(rtg), XFS_ILOCK_EXCL);
}
+
+ if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg_rmap(rtg))
+ xfs_trans_ijoin(tp, rtg_rmap(rtg), XFS_ILOCK_EXCL);
+
+ if ((rtglock_flags & XFS_RTGLOCK_REFCOUNT) && rtg_refcount(rtg))
+ xfs_trans_ijoin(tp, rtg_refcount(rtg), XFS_ILOCK_EXCL);
}
/* Retrieve rt group geometry. */
@@ -284,7 +302,8 @@ xfs_rtginode_ilock_print_fn(
const struct xfs_inode *ip =
container_of(m, struct xfs_inode, i_lock.dep_map);
- printk(KERN_CONT " rgno=%u", ip->i_projid);
+ printk(KERN_CONT " rgno=%u metatype=%s", ip->i_projid,
+ xfs_metafile_type_str(ip->i_metatype));
}
/*
@@ -316,8 +335,10 @@ struct xfs_rtginode_ops {
unsigned int sick; /* rtgroup sickness flag */
+ unsigned int fmt_mask; /* all valid data fork formats */
+
/* Does the fs have this feature? */
- bool (*enabled)(struct xfs_mount *mp);
+ bool (*enabled)(const struct xfs_mount *mp);
/* Create this rtgroup metadata inode and initialize it. */
int (*create)(struct xfs_rtgroup *rtg,
@@ -331,14 +352,40 @@ static const struct xfs_rtginode_ops xfs_rtginode_ops[XFS_RTGI_MAX] = {
.name = "bitmap",
.metafile_type = XFS_METAFILE_RTBITMAP,
.sick = XFS_SICK_RG_BITMAP,
+ .fmt_mask = (1U << XFS_DINODE_FMT_EXTENTS) |
+ (1U << XFS_DINODE_FMT_BTREE),
.create = xfs_rtbitmap_create,
},
[XFS_RTGI_SUMMARY] = {
.name = "summary",
.metafile_type = XFS_METAFILE_RTSUMMARY,
.sick = XFS_SICK_RG_SUMMARY,
+ .fmt_mask = (1U << XFS_DINODE_FMT_EXTENTS) |
+ (1U << XFS_DINODE_FMT_BTREE),
.create = xfs_rtsummary_create,
},
+ [XFS_RTGI_RMAP] = {
+ .name = "rmap",
+ .metafile_type = XFS_METAFILE_RTRMAP,
+ .sick = XFS_SICK_RG_RMAPBT,
+ .fmt_mask = 1U << XFS_DINODE_FMT_META_BTREE,
+ /*
+ * growfs must create the rtrmap inodes before adding a
+ * realtime volume to the filesystem, so we cannot use the
+ * rtrmapbt predicate here.
+ */
+ .enabled = xfs_has_rmapbt,
+ .create = xfs_rtrmapbt_create,
+ },
+ [XFS_RTGI_REFCOUNT] = {
+ .name = "refcount",
+ .metafile_type = XFS_METAFILE_RTREFCOUNT,
+ .sick = XFS_SICK_RG_REFCNTBT,
+ .fmt_mask = 1U << XFS_DINODE_FMT_META_BTREE,
+ /* same comment about growfs and rmap inodes applies here */
+ .enabled = xfs_has_reflink,
+ .create = xfs_rtrefcountbt_create,
+ },
};
/* Return the shortname of this rtgroup inode. */
@@ -435,8 +482,7 @@ xfs_rtginode_load(
return error;
}
- if (XFS_IS_CORRUPT(mp, ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
- ip->i_df.if_format != XFS_DINODE_FMT_BTREE)) {
+ if (XFS_IS_CORRUPT(mp, !((1U << ip->i_df.if_format) & ops->fmt_mask))) {
xfs_irele(ip);
xfs_rtginode_mark_sick(rtg, type);
return -EFSCORRUPTED;
diff --git a/fs/xfs/libxfs/xfs_rtgroup.h b/fs/xfs/libxfs/xfs_rtgroup.h
index 2d7822644eff..03f39d4e43fc 100644
--- a/fs/xfs/libxfs/xfs_rtgroup.h
+++ b/fs/xfs/libxfs/xfs_rtgroup.h
@@ -14,6 +14,8 @@ struct xfs_trans;
enum xfs_rtg_inodes {
XFS_RTGI_BITMAP, /* allocation bitmap */
XFS_RTGI_SUMMARY, /* allocation summary */
+ XFS_RTGI_RMAP, /* rmap btree inode */
+ XFS_RTGI_REFCOUNT, /* refcount btree inode */
XFS_RTGI_MAX,
};
@@ -64,6 +66,26 @@ static inline xfs_rgnumber_t rtg_rgno(const struct xfs_rtgroup *rtg)
return rtg->rtg_group.xg_gno;
}
+static inline struct xfs_inode *rtg_bitmap(const struct xfs_rtgroup *rtg)
+{
+ return rtg->rtg_inodes[XFS_RTGI_BITMAP];
+}
+
+static inline struct xfs_inode *rtg_summary(const struct xfs_rtgroup *rtg)
+{
+ return rtg->rtg_inodes[XFS_RTGI_SUMMARY];
+}
+
+static inline struct xfs_inode *rtg_rmap(const struct xfs_rtgroup *rtg)
+{
+ return rtg->rtg_inodes[XFS_RTGI_RMAP];
+}
+
+static inline struct xfs_inode *rtg_refcount(const struct xfs_rtgroup *rtg)
+{
+ return rtg->rtg_inodes[XFS_RTGI_REFCOUNT];
+}
+
/* Passive rtgroup references */
static inline struct xfs_rtgroup *
xfs_rtgroup_get(
@@ -122,6 +144,32 @@ xfs_rtgroup_next(
return xfs_rtgroup_next_range(mp, rtg, 0, mp->m_sb.sb_rgcount - 1);
}
+static inline bool
+xfs_verify_rgbno(
+ struct xfs_rtgroup *rtg,
+ xfs_rgblock_t rgbno)
+{
+ ASSERT(xfs_has_rtgroups(rtg_mount(rtg)));
+
+ return xfs_verify_gbno(rtg_group(rtg), rgbno);
+}
+
+/*
+ * Check that [@rgbno,@len] is a valid extent range in @rtg.
+ *
+ * Must only be used for RTG-enabled file systems.
+ */
+static inline bool
+xfs_verify_rgbext(
+ struct xfs_rtgroup *rtg,
+ xfs_rgblock_t rgbno,
+ xfs_extlen_t len)
+{
+ ASSERT(xfs_has_rtgroups(rtg_mount(rtg)));
+
+ return xfs_verify_gbext(rtg_group(rtg), rgbno, len);
+}
+
static inline xfs_rtblock_t
xfs_rgbno_to_rtb(
struct xfs_rtgroup *rtg,
@@ -223,9 +271,15 @@ int xfs_update_last_rtgroup_size(struct xfs_mount *mp,
#define XFS_RTGLOCK_BITMAP (1U << 0)
/* Lock the rt bitmap inode in shared mode */
#define XFS_RTGLOCK_BITMAP_SHARED (1U << 1)
+/* Lock the rt rmap inode in exclusive mode */
+#define XFS_RTGLOCK_RMAP (1U << 2)
+/* Lock the rt refcount inode in exclusive mode */
+#define XFS_RTGLOCK_REFCOUNT (1U << 3)
#define XFS_RTGLOCK_ALL_FLAGS (XFS_RTGLOCK_BITMAP | \
- XFS_RTGLOCK_BITMAP_SHARED)
+ XFS_RTGLOCK_BITMAP_SHARED | \
+ XFS_RTGLOCK_RMAP | \
+ XFS_RTGLOCK_REFCOUNT)
void xfs_rtgroup_lock(struct xfs_rtgroup *rtg, unsigned int rtglock_flags);
void xfs_rtgroup_unlock(struct xfs_rtgroup *rtg, unsigned int rtglock_flags);
@@ -248,6 +302,8 @@ int xfs_rtginode_create(struct xfs_rtgroup *rtg, enum xfs_rtg_inodes type,
bool init);
void xfs_rtginode_irele(struct xfs_inode **ipp);
+void xfs_rtginode_irele(struct xfs_inode **ipp);
+
static inline const char *xfs_rtginode_path(xfs_rgnumber_t rgno,
enum xfs_rtg_inodes type)
{
diff --git a/fs/xfs/libxfs/xfs_rtrefcount_btree.c b/fs/xfs/libxfs/xfs_rtrefcount_btree.c
new file mode 100644
index 000000000000..3db5e7a4a945
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_rtrefcount_btree.c
@@ -0,0 +1,757 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2021-2024 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
+#include "xfs_sb.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_inode.h"
+#include "xfs_trans.h"
+#include "xfs_alloc.h"
+#include "xfs_btree.h"
+#include "xfs_btree_staging.h"
+#include "xfs_rtrefcount_btree.h"
+#include "xfs_refcount.h"
+#include "xfs_trace.h"
+#include "xfs_cksum.h"
+#include "xfs_error.h"
+#include "xfs_extent_busy.h"
+#include "xfs_rtgroup.h"
+#include "xfs_rtbitmap.h"
+#include "xfs_metafile.h"
+#include "xfs_health.h"
+
+static struct kmem_cache *xfs_rtrefcountbt_cur_cache;
+
+/*
+ * Realtime Reference Count btree.
+ *
+ * This is a btree used to track the owner(s) of a given extent in the realtime
+ * device. See the comments in xfs_refcount_btree.c for more information.
+ *
+ * This tree is basically the same as the regular refcount btree except that
+ * it's rooted in an inode.
+ */
+
+static struct xfs_btree_cur *
+xfs_rtrefcountbt_dup_cursor(
+ struct xfs_btree_cur *cur)
+{
+ return xfs_rtrefcountbt_init_cursor(cur->bc_tp, to_rtg(cur->bc_group));
+}
+
+STATIC int
+xfs_rtrefcountbt_get_minrecs(
+ struct xfs_btree_cur *cur,
+ int level)
+{
+ if (level == cur->bc_nlevels - 1) {
+ struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
+
+ return xfs_rtrefcountbt_maxrecs(cur->bc_mp, ifp->if_broot_bytes,
+ level == 0) / 2;
+ }
+
+ return cur->bc_mp->m_rtrefc_mnr[level != 0];
+}
+
+STATIC int
+xfs_rtrefcountbt_get_maxrecs(
+ struct xfs_btree_cur *cur,
+ int level)
+{
+ if (level == cur->bc_nlevels - 1) {
+ struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
+
+ return xfs_rtrefcountbt_maxrecs(cur->bc_mp, ifp->if_broot_bytes,
+ level == 0);
+ }
+
+ return cur->bc_mp->m_rtrefc_mxr[level != 0];
+}
+
+/*
+ * Calculate number of records in a realtime refcount btree inode root.
+ */
+unsigned int
+xfs_rtrefcountbt_droot_maxrecs(
+ unsigned int blocklen,
+ bool leaf)
+{
+ blocklen -= sizeof(struct xfs_rtrefcount_root);
+
+ if (leaf)
+ return blocklen / sizeof(struct xfs_refcount_rec);
+ return blocklen / (2 * sizeof(struct xfs_refcount_key) +
+ sizeof(xfs_rtrefcount_ptr_t));
+}
+
+/*
+ * Get the maximum records we could store in the on-disk format.
+ *
+ * For non-root nodes this is equivalent to xfs_rtrefcountbt_get_maxrecs, but
+ * for the root node this checks the available space in the dinode fork so that
+ * we can resize the in-memory buffer to match it. After a resize to the
+ * maximum size this function returns the same value as
+ * xfs_rtrefcountbt_get_maxrecs for the root node, too.
+ */
+STATIC int
+xfs_rtrefcountbt_get_dmaxrecs(
+ struct xfs_btree_cur *cur,
+ int level)
+{
+ if (level != cur->bc_nlevels - 1)
+ return cur->bc_mp->m_rtrefc_mxr[level != 0];
+ return xfs_rtrefcountbt_droot_maxrecs(cur->bc_ino.forksize, level == 0);
+}
+
+STATIC void
+xfs_rtrefcountbt_init_key_from_rec(
+ union xfs_btree_key *key,
+ const union xfs_btree_rec *rec)
+{
+ key->refc.rc_startblock = rec->refc.rc_startblock;
+}
+
+STATIC void
+xfs_rtrefcountbt_init_high_key_from_rec(
+ union xfs_btree_key *key,
+ const union xfs_btree_rec *rec)
+{
+ __u32 x;
+
+ x = be32_to_cpu(rec->refc.rc_startblock);
+ x += be32_to_cpu(rec->refc.rc_blockcount) - 1;
+ key->refc.rc_startblock = cpu_to_be32(x);
+}
+
+STATIC void
+xfs_rtrefcountbt_init_rec_from_cur(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_rec *rec)
+{
+ const struct xfs_refcount_irec *irec = &cur->bc_rec.rc;
+ uint32_t start;
+
+ start = xfs_refcount_encode_startblock(irec->rc_startblock,
+ irec->rc_domain);
+ rec->refc.rc_startblock = cpu_to_be32(start);
+ rec->refc.rc_blockcount = cpu_to_be32(cur->bc_rec.rc.rc_blockcount);
+ rec->refc.rc_refcount = cpu_to_be32(cur->bc_rec.rc.rc_refcount);
+}
+
+STATIC void
+xfs_rtrefcountbt_init_ptr_from_cur(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *ptr)
+{
+ ptr->l = 0;
+}
+
+STATIC int64_t
+xfs_rtrefcountbt_key_diff(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_key *key)
+{
+ const struct xfs_refcount_key *kp = &key->refc;
+ const struct xfs_refcount_irec *irec = &cur->bc_rec.rc;
+ uint32_t start;
+
+ start = xfs_refcount_encode_startblock(irec->rc_startblock,
+ irec->rc_domain);
+ return (int64_t)be32_to_cpu(kp->rc_startblock) - start;
+}
+
+STATIC int64_t
+xfs_rtrefcountbt_diff_two_keys(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_key *k1,
+ const union xfs_btree_key *k2,
+ const union xfs_btree_key *mask)
+{
+ ASSERT(!mask || mask->refc.rc_startblock);
+
+ return (int64_t)be32_to_cpu(k1->refc.rc_startblock) -
+ be32_to_cpu(k2->refc.rc_startblock);
+}
+
+static xfs_failaddr_t
+xfs_rtrefcountbt_verify(
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
+ xfs_failaddr_t fa;
+ int level;
+
+ if (!xfs_verify_magic(bp, block->bb_magic))
+ return __this_address;
+
+ if (!xfs_has_reflink(mp))
+ return __this_address;
+ fa = xfs_btree_fsblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN);
+ if (fa)
+ return fa;
+ level = be16_to_cpu(block->bb_level);
+ if (level > mp->m_rtrefc_maxlevels)
+ return __this_address;
+
+ return xfs_btree_fsblock_verify(bp, mp->m_rtrefc_mxr[level != 0]);
+}
+
+static void
+xfs_rtrefcountbt_read_verify(
+ struct xfs_buf *bp)
+{
+ xfs_failaddr_t fa;
+
+ if (!xfs_btree_fsblock_verify_crc(bp))
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_rtrefcountbt_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
+
+ if (bp->b_error)
+ trace_xfs_btree_corrupt(bp, _RET_IP_);
+}
+
+static void
+xfs_rtrefcountbt_write_verify(
+ struct xfs_buf *bp)
+{
+ xfs_failaddr_t fa;
+
+ fa = xfs_rtrefcountbt_verify(bp);
+ if (fa) {
+ trace_xfs_btree_corrupt(bp, _RET_IP_);
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ return;
+ }
+ xfs_btree_fsblock_calc_crc(bp);
+
+}
+
+const struct xfs_buf_ops xfs_rtrefcountbt_buf_ops = {
+ .name = "xfs_rtrefcountbt",
+ .magic = { 0, cpu_to_be32(XFS_RTREFC_CRC_MAGIC) },
+ .verify_read = xfs_rtrefcountbt_read_verify,
+ .verify_write = xfs_rtrefcountbt_write_verify,
+ .verify_struct = xfs_rtrefcountbt_verify,
+};
+
+STATIC int
+xfs_rtrefcountbt_keys_inorder(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_key *k1,
+ const union xfs_btree_key *k2)
+{
+ return be32_to_cpu(k1->refc.rc_startblock) <
+ be32_to_cpu(k2->refc.rc_startblock);
+}
+
+STATIC int
+xfs_rtrefcountbt_recs_inorder(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_rec *r1,
+ const union xfs_btree_rec *r2)
+{
+ return be32_to_cpu(r1->refc.rc_startblock) +
+ be32_to_cpu(r1->refc.rc_blockcount) <=
+ be32_to_cpu(r2->refc.rc_startblock);
+}
+
+STATIC enum xbtree_key_contig
+xfs_rtrefcountbt_keys_contiguous(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_key *key1,
+ const union xfs_btree_key *key2,
+ const union xfs_btree_key *mask)
+{
+ ASSERT(!mask || mask->refc.rc_startblock);
+
+ return xbtree_key_contig(be32_to_cpu(key1->refc.rc_startblock),
+ be32_to_cpu(key2->refc.rc_startblock));
+}
+
+static inline void
+xfs_rtrefcountbt_move_ptrs(
+ struct xfs_mount *mp,
+ struct xfs_btree_block *broot,
+ short old_size,
+ size_t new_size,
+ unsigned int numrecs)
+{
+ void *dptr;
+ void *sptr;
+
+ sptr = xfs_rtrefcount_broot_ptr_addr(mp, broot, 1, old_size);
+ dptr = xfs_rtrefcount_broot_ptr_addr(mp, broot, 1, new_size);
+ memmove(dptr, sptr, numrecs * sizeof(xfs_rtrefcount_ptr_t));
+}
+
+static struct xfs_btree_block *
+xfs_rtrefcountbt_broot_realloc(
+ struct xfs_btree_cur *cur,
+ unsigned int new_numrecs)
+{
+ struct xfs_mount *mp = cur->bc_mp;
+ struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
+ struct xfs_btree_block *broot;
+ unsigned int new_size;
+ unsigned int old_size = ifp->if_broot_bytes;
+ const unsigned int level = cur->bc_nlevels - 1;
+
+ new_size = xfs_rtrefcount_broot_space_calc(mp, level, new_numrecs);
+
+ /* Handle the nop case quietly. */
+ if (new_size == old_size)
+ return ifp->if_broot;
+
+ if (new_size > old_size) {
+ unsigned int old_numrecs;
+
+ /*
+ * If there wasn't any memory allocated before, just allocate
+ * it now and get out.
+ */
+ if (old_size == 0)
+ return xfs_broot_realloc(ifp, new_size);
+
+ /*
+ * If there is already an existing if_broot, then we need to
+ * realloc it and possibly move the node block pointers because
+ * those are not butted up against the btree block header.
+ */
+ old_numrecs = xfs_rtrefcountbt_maxrecs(mp, old_size, level);
+ broot = xfs_broot_realloc(ifp, new_size);
+ if (level > 0)
+ xfs_rtrefcountbt_move_ptrs(mp, broot, old_size,
+ new_size, old_numrecs);
+ goto out_broot;
+ }
+
+ /*
+ * We're reducing numrecs. If we're going all the way to zero, just
+ * free the block.
+ */
+ ASSERT(ifp->if_broot != NULL && old_size > 0);
+ if (new_size == 0)
+ return xfs_broot_realloc(ifp, 0);
+
+ /*
+ * Shrink the btree root by possibly moving the rtrmapbt pointers,
+ * since they are not butted up against the btree block header. Then
+ * reallocate broot.
+ */
+ if (level > 0)
+ xfs_rtrefcountbt_move_ptrs(mp, ifp->if_broot, old_size,
+ new_size, new_numrecs);
+ broot = xfs_broot_realloc(ifp, new_size);
+
+out_broot:
+ ASSERT(xfs_rtrefcount_droot_space(broot) <=
+ xfs_inode_fork_size(cur->bc_ino.ip, cur->bc_ino.whichfork));
+ return broot;
+}
+
+const struct xfs_btree_ops xfs_rtrefcountbt_ops = {
+ .name = "rtrefcount",
+ .type = XFS_BTREE_TYPE_INODE,
+ .geom_flags = XFS_BTGEO_IROOT_RECORDS,
+
+ .rec_len = sizeof(struct xfs_refcount_rec),
+ .key_len = sizeof(struct xfs_refcount_key),
+ .ptr_len = XFS_BTREE_LONG_PTR_LEN,
+
+ .lru_refs = XFS_REFC_BTREE_REF,
+ .statoff = XFS_STATS_CALC_INDEX(xs_rtrefcbt_2),
+ .sick_mask = XFS_SICK_RG_REFCNTBT,
+
+ .dup_cursor = xfs_rtrefcountbt_dup_cursor,
+ .alloc_block = xfs_btree_alloc_metafile_block,
+ .free_block = xfs_btree_free_metafile_block,
+ .get_minrecs = xfs_rtrefcountbt_get_minrecs,
+ .get_maxrecs = xfs_rtrefcountbt_get_maxrecs,
+ .get_dmaxrecs = xfs_rtrefcountbt_get_dmaxrecs,
+ .init_key_from_rec = xfs_rtrefcountbt_init_key_from_rec,
+ .init_high_key_from_rec = xfs_rtrefcountbt_init_high_key_from_rec,
+ .init_rec_from_cur = xfs_rtrefcountbt_init_rec_from_cur,
+ .init_ptr_from_cur = xfs_rtrefcountbt_init_ptr_from_cur,
+ .key_diff = xfs_rtrefcountbt_key_diff,
+ .buf_ops = &xfs_rtrefcountbt_buf_ops,
+ .diff_two_keys = xfs_rtrefcountbt_diff_two_keys,
+ .keys_inorder = xfs_rtrefcountbt_keys_inorder,
+ .recs_inorder = xfs_rtrefcountbt_recs_inorder,
+ .keys_contiguous = xfs_rtrefcountbt_keys_contiguous,
+ .broot_realloc = xfs_rtrefcountbt_broot_realloc,
+};
+
+/* Allocate a new rt refcount btree cursor. */
+struct xfs_btree_cur *
+xfs_rtrefcountbt_init_cursor(
+ struct xfs_trans *tp,
+ struct xfs_rtgroup *rtg)
+{
+ struct xfs_inode *ip = rtg_refcount(rtg);
+ struct xfs_mount *mp = rtg_mount(rtg);
+ struct xfs_btree_cur *cur;
+
+ xfs_assert_ilocked(ip, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL);
+
+ cur = xfs_btree_alloc_cursor(mp, tp, &xfs_rtrefcountbt_ops,
+ mp->m_rtrefc_maxlevels, xfs_rtrefcountbt_cur_cache);
+
+ cur->bc_ino.ip = ip;
+ cur->bc_refc.nr_ops = 0;
+ cur->bc_refc.shape_changes = 0;
+ cur->bc_group = xfs_group_hold(rtg_group(rtg));
+ cur->bc_nlevels = be16_to_cpu(ip->i_df.if_broot->bb_level) + 1;
+ cur->bc_ino.forksize = xfs_inode_fork_size(ip, XFS_DATA_FORK);
+ cur->bc_ino.whichfork = XFS_DATA_FORK;
+ return cur;
+}
+
+/*
+ * Install a new rt reverse mapping btree root. Caller is responsible for
+ * invalidating and freeing the old btree blocks.
+ */
+void
+xfs_rtrefcountbt_commit_staged_btree(
+ struct xfs_btree_cur *cur,
+ struct xfs_trans *tp)
+{
+ struct xbtree_ifakeroot *ifake = cur->bc_ino.ifake;
+ struct xfs_ifork *ifp;
+ int flags = XFS_ILOG_CORE | XFS_ILOG_DBROOT;
+
+ ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
+ ASSERT(ifake->if_fork->if_format == XFS_DINODE_FMT_META_BTREE);
+
+ /*
+ * Free any resources hanging off the real fork, then shallow-copy the
+ * staging fork's contents into the real fork to transfer everything
+ * we just built.
+ */
+ ifp = xfs_ifork_ptr(cur->bc_ino.ip, XFS_DATA_FORK);
+ xfs_idestroy_fork(ifp);
+ memcpy(ifp, ifake->if_fork, sizeof(struct xfs_ifork));
+
+ cur->bc_ino.ip->i_projid = cur->bc_group->xg_gno;
+ xfs_trans_log_inode(tp, cur->bc_ino.ip, flags);
+ xfs_btree_commit_ifakeroot(cur, tp, XFS_DATA_FORK);
+}
+
+/* Calculate number of records in a realtime refcount btree block. */
+static inline unsigned int
+xfs_rtrefcountbt_block_maxrecs(
+ unsigned int blocklen,
+ bool leaf)
+{
+
+ if (leaf)
+ return blocklen / sizeof(struct xfs_refcount_rec);
+ return blocklen / (sizeof(struct xfs_refcount_key) +
+ sizeof(xfs_rtrefcount_ptr_t));
+}
+
+/*
+ * Calculate number of records in an refcount btree block.
+ */
+unsigned int
+xfs_rtrefcountbt_maxrecs(
+ struct xfs_mount *mp,
+ unsigned int blocklen,
+ bool leaf)
+{
+ blocklen -= XFS_RTREFCOUNT_BLOCK_LEN;
+ return xfs_rtrefcountbt_block_maxrecs(blocklen, leaf);
+}
+
+/* Compute the max possible height for realtime refcount btrees. */
+unsigned int
+xfs_rtrefcountbt_maxlevels_ondisk(void)
+{
+ unsigned int minrecs[2];
+ unsigned int blocklen;
+
+ blocklen = XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_LBLOCK_CRC_LEN;
+
+ minrecs[0] = xfs_rtrefcountbt_block_maxrecs(blocklen, true) / 2;
+ minrecs[1] = xfs_rtrefcountbt_block_maxrecs(blocklen, false) / 2;
+
+ /* We need at most one record for every block in an rt group. */
+ return xfs_btree_compute_maxlevels(minrecs, XFS_MAX_RGBLOCKS);
+}
+
+int __init
+xfs_rtrefcountbt_init_cur_cache(void)
+{
+ xfs_rtrefcountbt_cur_cache = kmem_cache_create("xfs_rtrefcountbt_cur",
+ xfs_btree_cur_sizeof(
+ xfs_rtrefcountbt_maxlevels_ondisk()),
+ 0, 0, NULL);
+
+ if (!xfs_rtrefcountbt_cur_cache)
+ return -ENOMEM;
+ return 0;
+}
+
+void
+xfs_rtrefcountbt_destroy_cur_cache(void)
+{
+ kmem_cache_destroy(xfs_rtrefcountbt_cur_cache);
+ xfs_rtrefcountbt_cur_cache = NULL;
+}
+
+/* Compute the maximum height of a realtime refcount btree. */
+void
+xfs_rtrefcountbt_compute_maxlevels(
+ struct xfs_mount *mp)
+{
+ unsigned int d_maxlevels, r_maxlevels;
+
+ if (!xfs_has_rtreflink(mp)) {
+ mp->m_rtrefc_maxlevels = 0;
+ return;
+ }
+
+ /*
+ * The realtime refcountbt lives on the data device, which means that
+ * its maximum height is constrained by the size of the data device and
+ * the height required to store one refcount record for each rtextent
+ * in an rt group.
+ */
+ d_maxlevels = xfs_btree_space_to_height(mp->m_rtrefc_mnr,
+ mp->m_sb.sb_dblocks);
+ r_maxlevels = xfs_btree_compute_maxlevels(mp->m_rtrefc_mnr,
+ mp->m_sb.sb_rgextents);
+
+ /* Add one level to handle the inode root level. */
+ mp->m_rtrefc_maxlevels = min(d_maxlevels, r_maxlevels) + 1;
+}
+
+/* Calculate the rtrefcount btree size for some records. */
+unsigned long long
+xfs_rtrefcountbt_calc_size(
+ struct xfs_mount *mp,
+ unsigned long long len)
+{
+ return xfs_btree_calc_size(mp->m_rtrefc_mnr, len);
+}
+
+/*
+ * Calculate the maximum refcount btree size.
+ */
+static unsigned long long
+xfs_rtrefcountbt_max_size(
+ struct xfs_mount *mp,
+ xfs_rtblock_t rtblocks)
+{
+ /* Bail out if we're uninitialized, which can happen in mkfs. */
+ if (mp->m_rtrefc_mxr[0] == 0)
+ return 0;
+
+ return xfs_rtrefcountbt_calc_size(mp, rtblocks);
+}
+
+/*
+ * Figure out how many blocks to reserve and how many are used by this btree.
+ * We need enough space to hold one record for every rt extent in the rtgroup.
+ */
+xfs_filblks_t
+xfs_rtrefcountbt_calc_reserves(
+ struct xfs_mount *mp)
+{
+ if (!xfs_has_rtreflink(mp))
+ return 0;
+
+ return xfs_rtrefcountbt_max_size(mp, mp->m_sb.sb_rgextents);
+}
+
+/*
+ * Convert on-disk form of btree root to in-memory form.
+ */
+STATIC void
+xfs_rtrefcountbt_from_disk(
+ struct xfs_inode *ip,
+ struct xfs_rtrefcount_root *dblock,
+ int dblocklen,
+ struct xfs_btree_block *rblock)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_refcount_key *fkp;
+ __be64 *fpp;
+ struct xfs_refcount_key *tkp;
+ __be64 *tpp;
+ struct xfs_refcount_rec *frp;
+ struct xfs_refcount_rec *trp;
+ unsigned int numrecs;
+ unsigned int maxrecs;
+ unsigned int rblocklen;
+
+ rblocklen = xfs_rtrefcount_broot_space(mp, dblock);
+
+ xfs_btree_init_block(mp, rblock, &xfs_rtrefcountbt_ops, 0, 0,
+ ip->i_ino);
+
+ rblock->bb_level = dblock->bb_level;
+ rblock->bb_numrecs = dblock->bb_numrecs;
+
+ if (be16_to_cpu(rblock->bb_level) > 0) {
+ maxrecs = xfs_rtrefcountbt_droot_maxrecs(dblocklen, false);
+ fkp = xfs_rtrefcount_droot_key_addr(dblock, 1);
+ tkp = xfs_rtrefcount_key_addr(rblock, 1);
+ fpp = xfs_rtrefcount_droot_ptr_addr(dblock, 1, maxrecs);
+ tpp = xfs_rtrefcount_broot_ptr_addr(mp, rblock, 1, rblocklen);
+ numrecs = be16_to_cpu(dblock->bb_numrecs);
+ memcpy(tkp, fkp, 2 * sizeof(*fkp) * numrecs);
+ memcpy(tpp, fpp, sizeof(*fpp) * numrecs);
+ } else {
+ frp = xfs_rtrefcount_droot_rec_addr(dblock, 1);
+ trp = xfs_rtrefcount_rec_addr(rblock, 1);
+ numrecs = be16_to_cpu(dblock->bb_numrecs);
+ memcpy(trp, frp, sizeof(*frp) * numrecs);
+ }
+}
+
+/* Load a realtime reference count btree root in from disk. */
+int
+xfs_iformat_rtrefcount(
+ struct xfs_inode *ip,
+ struct xfs_dinode *dip)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_rtrefcount_root *dfp = XFS_DFORK_PTR(dip, XFS_DATA_FORK);
+ struct xfs_btree_block *broot;
+ unsigned int numrecs;
+ unsigned int level;
+ int dsize;
+
+ /*
+ * growfs must create the rtrefcount inodes before adding a realtime
+ * volume to the filesystem, so we cannot use the rtrefcount predicate
+ * here.
+ */
+ if (!xfs_has_reflink(ip->i_mount)) {
+ xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE);
+ return -EFSCORRUPTED;
+ }
+
+ dsize = XFS_DFORK_SIZE(dip, mp, XFS_DATA_FORK);
+ numrecs = be16_to_cpu(dfp->bb_numrecs);
+ level = be16_to_cpu(dfp->bb_level);
+
+ if (level > mp->m_rtrefc_maxlevels ||
+ xfs_rtrefcount_droot_space_calc(level, numrecs) > dsize) {
+ xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE);
+ return -EFSCORRUPTED;
+ }
+
+ broot = xfs_broot_alloc(xfs_ifork_ptr(ip, XFS_DATA_FORK),
+ xfs_rtrefcount_broot_space_calc(mp, level, numrecs));
+ if (broot)
+ xfs_rtrefcountbt_from_disk(ip, dfp, dsize, broot);
+ return 0;
+}
+
+/*
+ * Convert in-memory form of btree root to on-disk form.
+ */
+void
+xfs_rtrefcountbt_to_disk(
+ struct xfs_mount *mp,
+ struct xfs_btree_block *rblock,
+ int rblocklen,
+ struct xfs_rtrefcount_root *dblock,
+ int dblocklen)
+{
+ struct xfs_refcount_key *fkp;
+ __be64 *fpp;
+ struct xfs_refcount_key *tkp;
+ __be64 *tpp;
+ struct xfs_refcount_rec *frp;
+ struct xfs_refcount_rec *trp;
+ unsigned int maxrecs;
+ unsigned int numrecs;
+
+ ASSERT(rblock->bb_magic == cpu_to_be32(XFS_RTREFC_CRC_MAGIC));
+ ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid));
+ ASSERT(rblock->bb_u.l.bb_blkno == cpu_to_be64(XFS_BUF_DADDR_NULL));
+ ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK));
+ ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK));
+
+ dblock->bb_level = rblock->bb_level;
+ dblock->bb_numrecs = rblock->bb_numrecs;
+
+ if (be16_to_cpu(rblock->bb_level) > 0) {
+ maxrecs = xfs_rtrefcountbt_droot_maxrecs(dblocklen, false);
+ fkp = xfs_rtrefcount_key_addr(rblock, 1);
+ tkp = xfs_rtrefcount_droot_key_addr(dblock, 1);
+ fpp = xfs_rtrefcount_broot_ptr_addr(mp, rblock, 1, rblocklen);
+ tpp = xfs_rtrefcount_droot_ptr_addr(dblock, 1, maxrecs);
+ numrecs = be16_to_cpu(rblock->bb_numrecs);
+ memcpy(tkp, fkp, 2 * sizeof(*fkp) * numrecs);
+ memcpy(tpp, fpp, sizeof(*fpp) * numrecs);
+ } else {
+ frp = xfs_rtrefcount_rec_addr(rblock, 1);
+ trp = xfs_rtrefcount_droot_rec_addr(dblock, 1);
+ numrecs = be16_to_cpu(rblock->bb_numrecs);
+ memcpy(trp, frp, sizeof(*frp) * numrecs);
+ }
+}
+
+/* Flush a realtime reference count btree root out to disk. */
+void
+xfs_iflush_rtrefcount(
+ struct xfs_inode *ip,
+ struct xfs_dinode *dip)
+{
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
+ struct xfs_rtrefcount_root *dfp = XFS_DFORK_PTR(dip, XFS_DATA_FORK);
+
+ ASSERT(ifp->if_broot != NULL);
+ ASSERT(ifp->if_broot_bytes > 0);
+ ASSERT(xfs_rtrefcount_droot_space(ifp->if_broot) <=
+ xfs_inode_fork_size(ip, XFS_DATA_FORK));
+ xfs_rtrefcountbt_to_disk(ip->i_mount, ifp->if_broot,
+ ifp->if_broot_bytes, dfp,
+ XFS_DFORK_SIZE(dip, ip->i_mount, XFS_DATA_FORK));
+}
+
+/*
+ * Create a realtime refcount btree inode.
+ */
+int
+xfs_rtrefcountbt_create(
+ struct xfs_rtgroup *rtg,
+ struct xfs_inode *ip,
+ struct xfs_trans *tp,
+ bool init)
+{
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_btree_block *broot;
+
+ ifp->if_format = XFS_DINODE_FMT_META_BTREE;
+ ASSERT(ifp->if_broot_bytes == 0);
+ ASSERT(ifp->if_bytes == 0);
+
+ /* Initialize the empty incore btree root. */
+ broot = xfs_broot_realloc(ifp,
+ xfs_rtrefcount_broot_space_calc(mp, 0, 0));
+ if (broot)
+ xfs_btree_init_block(mp, broot, &xfs_rtrefcountbt_ops, 0, 0,
+ ip->i_ino);
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE | XFS_ILOG_DBROOT);
+ return 0;
+}
diff --git a/fs/xfs/libxfs/xfs_rtrefcount_btree.h b/fs/xfs/libxfs/xfs_rtrefcount_btree.h
new file mode 100644
index 000000000000..a99b7a8aec86
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_rtrefcount_btree.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2021-2024 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#ifndef __XFS_RTREFCOUNT_BTREE_H__
+#define __XFS_RTREFCOUNT_BTREE_H__
+
+struct xfs_buf;
+struct xfs_btree_cur;
+struct xfs_mount;
+struct xbtree_ifakeroot;
+struct xfs_rtgroup;
+
+/* refcounts only exist on crc enabled filesystems */
+#define XFS_RTREFCOUNT_BLOCK_LEN XFS_BTREE_LBLOCK_CRC_LEN
+
+struct xfs_btree_cur *xfs_rtrefcountbt_init_cursor(struct xfs_trans *tp,
+ struct xfs_rtgroup *rtg);
+struct xfs_btree_cur *xfs_rtrefcountbt_stage_cursor(struct xfs_mount *mp,
+ struct xfs_rtgroup *rtg, struct xfs_inode *ip,
+ struct xbtree_ifakeroot *ifake);
+void xfs_rtrefcountbt_commit_staged_btree(struct xfs_btree_cur *cur,
+ struct xfs_trans *tp);
+unsigned int xfs_rtrefcountbt_maxrecs(struct xfs_mount *mp,
+ unsigned int blocklen, bool leaf);
+void xfs_rtrefcountbt_compute_maxlevels(struct xfs_mount *mp);
+unsigned int xfs_rtrefcountbt_droot_maxrecs(unsigned int blocklen, bool leaf);
+
+/*
+ * Addresses of records, keys, and pointers within an incore rtrefcountbt block.
+ *
+ * (note that some of these may appear unused, but they are used in userspace)
+ */
+static inline struct xfs_refcount_rec *
+xfs_rtrefcount_rec_addr(
+ struct xfs_btree_block *block,
+ unsigned int index)
+{
+ return (struct xfs_refcount_rec *)
+ ((char *)block + XFS_RTREFCOUNT_BLOCK_LEN +
+ (index - 1) * sizeof(struct xfs_refcount_rec));
+}
+
+static inline struct xfs_refcount_key *
+xfs_rtrefcount_key_addr(
+ struct xfs_btree_block *block,
+ unsigned int index)
+{
+ return (struct xfs_refcount_key *)
+ ((char *)block + XFS_RTREFCOUNT_BLOCK_LEN +
+ (index - 1) * sizeof(struct xfs_refcount_key));
+}
+
+static inline xfs_rtrefcount_ptr_t *
+xfs_rtrefcount_ptr_addr(
+ struct xfs_btree_block *block,
+ unsigned int index,
+ unsigned int maxrecs)
+{
+ return (xfs_rtrefcount_ptr_t *)
+ ((char *)block + XFS_RTREFCOUNT_BLOCK_LEN +
+ maxrecs * sizeof(struct xfs_refcount_key) +
+ (index - 1) * sizeof(xfs_rtrefcount_ptr_t));
+}
+
+unsigned int xfs_rtrefcountbt_maxlevels_ondisk(void);
+int __init xfs_rtrefcountbt_init_cur_cache(void);
+void xfs_rtrefcountbt_destroy_cur_cache(void);
+
+xfs_filblks_t xfs_rtrefcountbt_calc_reserves(struct xfs_mount *mp);
+unsigned long long xfs_rtrefcountbt_calc_size(struct xfs_mount *mp,
+ unsigned long long len);
+
+/* Addresses of key, pointers, and records within an ondisk rtrefcount block. */
+
+static inline struct xfs_refcount_rec *
+xfs_rtrefcount_droot_rec_addr(
+ struct xfs_rtrefcount_root *block,
+ unsigned int index)
+{
+ return (struct xfs_refcount_rec *)
+ ((char *)(block + 1) +
+ (index - 1) * sizeof(struct xfs_refcount_rec));
+}
+
+static inline struct xfs_refcount_key *
+xfs_rtrefcount_droot_key_addr(
+ struct xfs_rtrefcount_root *block,
+ unsigned int index)
+{
+ return (struct xfs_refcount_key *)
+ ((char *)(block + 1) +
+ (index - 1) * sizeof(struct xfs_refcount_key));
+}
+
+static inline xfs_rtrefcount_ptr_t *
+xfs_rtrefcount_droot_ptr_addr(
+ struct xfs_rtrefcount_root *block,
+ unsigned int index,
+ unsigned int maxrecs)
+{
+ return (xfs_rtrefcount_ptr_t *)
+ ((char *)(block + 1) +
+ maxrecs * sizeof(struct xfs_refcount_key) +
+ (index - 1) * sizeof(xfs_rtrefcount_ptr_t));
+}
+
+/*
+ * Address of pointers within the incore btree root.
+ *
+ * These are to be used when we know the size of the block and
+ * we don't have a cursor.
+ */
+static inline xfs_rtrefcount_ptr_t *
+xfs_rtrefcount_broot_ptr_addr(
+ struct xfs_mount *mp,
+ struct xfs_btree_block *bb,
+ unsigned int index,
+ unsigned int block_size)
+{
+ return xfs_rtrefcount_ptr_addr(bb, index,
+ xfs_rtrefcountbt_maxrecs(mp, block_size, false));
+}
+
+/*
+ * Compute the space required for the incore btree root containing the given
+ * number of records.
+ */
+static inline size_t
+xfs_rtrefcount_broot_space_calc(
+ struct xfs_mount *mp,
+ unsigned int level,
+ unsigned int nrecs)
+{
+ size_t sz = XFS_RTREFCOUNT_BLOCK_LEN;
+
+ if (level > 0)
+ return sz + nrecs * (sizeof(struct xfs_refcount_key) +
+ sizeof(xfs_rtrefcount_ptr_t));
+ return sz + nrecs * sizeof(struct xfs_refcount_rec);
+}
+
+/*
+ * Compute the space required for the incore btree root given the ondisk
+ * btree root block.
+ */
+static inline size_t
+xfs_rtrefcount_broot_space(struct xfs_mount *mp, struct xfs_rtrefcount_root *bb)
+{
+ return xfs_rtrefcount_broot_space_calc(mp, be16_to_cpu(bb->bb_level),
+ be16_to_cpu(bb->bb_numrecs));
+}
+
+/* Compute the space required for the ondisk root block. */
+static inline size_t
+xfs_rtrefcount_droot_space_calc(
+ unsigned int level,
+ unsigned int nrecs)
+{
+ size_t sz = sizeof(struct xfs_rtrefcount_root);
+
+ if (level > 0)
+ return sz + nrecs * (sizeof(struct xfs_refcount_key) +
+ sizeof(xfs_rtrefcount_ptr_t));
+ return sz + nrecs * sizeof(struct xfs_refcount_rec);
+}
+
+/*
+ * Compute the space required for the ondisk root block given an incore root
+ * block.
+ */
+static inline size_t
+xfs_rtrefcount_droot_space(struct xfs_btree_block *bb)
+{
+ return xfs_rtrefcount_droot_space_calc(be16_to_cpu(bb->bb_level),
+ be16_to_cpu(bb->bb_numrecs));
+}
+
+int xfs_iformat_rtrefcount(struct xfs_inode *ip, struct xfs_dinode *dip);
+void xfs_rtrefcountbt_to_disk(struct xfs_mount *mp,
+ struct xfs_btree_block *rblock, int rblocklen,
+ struct xfs_rtrefcount_root *dblock, int dblocklen);
+void xfs_iflush_rtrefcount(struct xfs_inode *ip, struct xfs_dinode *dip);
+
+int xfs_rtrefcountbt_create(struct xfs_rtgroup *rtg, struct xfs_inode *ip,
+ struct xfs_trans *tp, bool init);
+
+#endif /* __XFS_RTREFCOUNT_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_rtrmap_btree.c b/fs/xfs/libxfs/xfs_rtrmap_btree.c
new file mode 100644
index 000000000000..e4ec36943cb7
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_rtrmap_btree.c
@@ -0,0 +1,1035 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2018-2024 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
+#include "xfs_sb.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_inode.h"
+#include "xfs_trans.h"
+#include "xfs_alloc.h"
+#include "xfs_btree.h"
+#include "xfs_btree_staging.h"
+#include "xfs_metafile.h"
+#include "xfs_rmap.h"
+#include "xfs_rtrmap_btree.h"
+#include "xfs_trace.h"
+#include "xfs_cksum.h"
+#include "xfs_error.h"
+#include "xfs_extent_busy.h"
+#include "xfs_rtgroup.h"
+#include "xfs_bmap.h"
+#include "xfs_health.h"
+#include "xfs_buf_mem.h"
+#include "xfs_btree_mem.h"
+
+static struct kmem_cache *xfs_rtrmapbt_cur_cache;
+
+/*
+ * Realtime Reverse Map btree.
+ *
+ * This is a btree used to track the owner(s) of a given extent in the realtime
+ * device. See the comments in xfs_rmap_btree.c for more information.
+ *
+ * This tree is basically the same as the regular rmap btree except that it
+ * is rooted in an inode and does not live in free space.
+ */
+
+static struct xfs_btree_cur *
+xfs_rtrmapbt_dup_cursor(
+ struct xfs_btree_cur *cur)
+{
+ return xfs_rtrmapbt_init_cursor(cur->bc_tp, to_rtg(cur->bc_group));
+}
+
+STATIC int
+xfs_rtrmapbt_get_minrecs(
+ struct xfs_btree_cur *cur,
+ int level)
+{
+ if (level == cur->bc_nlevels - 1) {
+ struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
+
+ return xfs_rtrmapbt_maxrecs(cur->bc_mp, ifp->if_broot_bytes,
+ level == 0) / 2;
+ }
+
+ return cur->bc_mp->m_rtrmap_mnr[level != 0];
+}
+
+STATIC int
+xfs_rtrmapbt_get_maxrecs(
+ struct xfs_btree_cur *cur,
+ int level)
+{
+ if (level == cur->bc_nlevels - 1) {
+ struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
+
+ return xfs_rtrmapbt_maxrecs(cur->bc_mp, ifp->if_broot_bytes,
+ level == 0);
+ }
+
+ return cur->bc_mp->m_rtrmap_mxr[level != 0];
+}
+
+/* Calculate number of records in the ondisk realtime rmap btree inode root. */
+unsigned int
+xfs_rtrmapbt_droot_maxrecs(
+ unsigned int blocklen,
+ bool leaf)
+{
+ blocklen -= sizeof(struct xfs_rtrmap_root);
+
+ if (leaf)
+ return blocklen / sizeof(struct xfs_rmap_rec);
+ return blocklen / (2 * sizeof(struct xfs_rmap_key) +
+ sizeof(xfs_rtrmap_ptr_t));
+}
+
+/*
+ * Get the maximum records we could store in the on-disk format.
+ *
+ * For non-root nodes this is equivalent to xfs_rtrmapbt_get_maxrecs, but
+ * for the root node this checks the available space in the dinode fork
+ * so that we can resize the in-memory buffer to match it. After a
+ * resize to the maximum size this function returns the same value
+ * as xfs_rtrmapbt_get_maxrecs for the root node, too.
+ */
+STATIC int
+xfs_rtrmapbt_get_dmaxrecs(
+ struct xfs_btree_cur *cur,
+ int level)
+{
+ if (level != cur->bc_nlevels - 1)
+ return cur->bc_mp->m_rtrmap_mxr[level != 0];
+ return xfs_rtrmapbt_droot_maxrecs(cur->bc_ino.forksize, level == 0);
+}
+
+/*
+ * Convert the ondisk record's offset field into the ondisk key's offset field.
+ * Fork and bmbt are significant parts of the rmap record key, but written
+ * status is merely a record attribute.
+ */
+static inline __be64 ondisk_rec_offset_to_key(const union xfs_btree_rec *rec)
+{
+ return rec->rmap.rm_offset & ~cpu_to_be64(XFS_RMAP_OFF_UNWRITTEN);
+}
+
+STATIC void
+xfs_rtrmapbt_init_key_from_rec(
+ union xfs_btree_key *key,
+ const union xfs_btree_rec *rec)
+{
+ key->rmap.rm_startblock = rec->rmap.rm_startblock;
+ key->rmap.rm_owner = rec->rmap.rm_owner;
+ key->rmap.rm_offset = ondisk_rec_offset_to_key(rec);
+}
+
+STATIC void
+xfs_rtrmapbt_init_high_key_from_rec(
+ union xfs_btree_key *key,
+ const union xfs_btree_rec *rec)
+{
+ uint64_t off;
+ int adj;
+
+ adj = be32_to_cpu(rec->rmap.rm_blockcount) - 1;
+
+ key->rmap.rm_startblock = rec->rmap.rm_startblock;
+ be32_add_cpu(&key->rmap.rm_startblock, adj);
+ key->rmap.rm_owner = rec->rmap.rm_owner;
+ key->rmap.rm_offset = ondisk_rec_offset_to_key(rec);
+ if (XFS_RMAP_NON_INODE_OWNER(be64_to_cpu(rec->rmap.rm_owner)) ||
+ XFS_RMAP_IS_BMBT_BLOCK(be64_to_cpu(rec->rmap.rm_offset)))
+ return;
+ off = be64_to_cpu(key->rmap.rm_offset);
+ off = (XFS_RMAP_OFF(off) + adj) | (off & ~XFS_RMAP_OFF_MASK);
+ key->rmap.rm_offset = cpu_to_be64(off);
+}
+
+STATIC void
+xfs_rtrmapbt_init_rec_from_cur(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_rec *rec)
+{
+ rec->rmap.rm_startblock = cpu_to_be32(cur->bc_rec.r.rm_startblock);
+ rec->rmap.rm_blockcount = cpu_to_be32(cur->bc_rec.r.rm_blockcount);
+ rec->rmap.rm_owner = cpu_to_be64(cur->bc_rec.r.rm_owner);
+ rec->rmap.rm_offset = cpu_to_be64(
+ xfs_rmap_irec_offset_pack(&cur->bc_rec.r));
+}
+
+STATIC void
+xfs_rtrmapbt_init_ptr_from_cur(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *ptr)
+{
+ ptr->l = 0;
+}
+
+/*
+ * Mask the appropriate parts of the ondisk key field for a key comparison.
+ * Fork and bmbt are significant parts of the rmap record key, but written
+ * status is merely a record attribute.
+ */
+static inline uint64_t offset_keymask(uint64_t offset)
+{
+ return offset & ~XFS_RMAP_OFF_UNWRITTEN;
+}
+
+STATIC int64_t
+xfs_rtrmapbt_key_diff(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_key *key)
+{
+ struct xfs_rmap_irec *rec = &cur->bc_rec.r;
+ const struct xfs_rmap_key *kp = &key->rmap;
+ __u64 x, y;
+ int64_t d;
+
+ d = (int64_t)be32_to_cpu(kp->rm_startblock) - rec->rm_startblock;
+ if (d)
+ return d;
+
+ x = be64_to_cpu(kp->rm_owner);
+ y = rec->rm_owner;
+ if (x > y)
+ return 1;
+ else if (y > x)
+ return -1;
+
+ x = offset_keymask(be64_to_cpu(kp->rm_offset));
+ y = offset_keymask(xfs_rmap_irec_offset_pack(rec));
+ if (x > y)
+ return 1;
+ else if (y > x)
+ return -1;
+ return 0;
+}
+
+STATIC int64_t
+xfs_rtrmapbt_diff_two_keys(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_key *k1,
+ const union xfs_btree_key *k2,
+ const union xfs_btree_key *mask)
+{
+ const struct xfs_rmap_key *kp1 = &k1->rmap;
+ const struct xfs_rmap_key *kp2 = &k2->rmap;
+ int64_t d;
+ __u64 x, y;
+
+ /* Doesn't make sense to mask off the physical space part */
+ ASSERT(!mask || mask->rmap.rm_startblock);
+
+ d = (int64_t)be32_to_cpu(kp1->rm_startblock) -
+ be32_to_cpu(kp2->rm_startblock);
+ if (d)
+ return d;
+
+ if (!mask || mask->rmap.rm_owner) {
+ x = be64_to_cpu(kp1->rm_owner);
+ y = be64_to_cpu(kp2->rm_owner);
+ if (x > y)
+ return 1;
+ else if (y > x)
+ return -1;
+ }
+
+ if (!mask || mask->rmap.rm_offset) {
+ /* Doesn't make sense to allow offset but not owner */
+ ASSERT(!mask || mask->rmap.rm_owner);
+
+ x = offset_keymask(be64_to_cpu(kp1->rm_offset));
+ y = offset_keymask(be64_to_cpu(kp2->rm_offset));
+ if (x > y)
+ return 1;
+ else if (y > x)
+ return -1;
+ }
+
+ return 0;
+}
+
+static xfs_failaddr_t
+xfs_rtrmapbt_verify(
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
+ xfs_failaddr_t fa;
+ int level;
+
+ if (!xfs_verify_magic(bp, block->bb_magic))
+ return __this_address;
+
+ if (!xfs_has_rmapbt(mp))
+ return __this_address;
+ fa = xfs_btree_fsblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN);
+ if (fa)
+ return fa;
+ level = be16_to_cpu(block->bb_level);
+ if (level > mp->m_rtrmap_maxlevels)
+ return __this_address;
+
+ return xfs_btree_fsblock_verify(bp, mp->m_rtrmap_mxr[level != 0]);
+}
+
+static void
+xfs_rtrmapbt_read_verify(
+ struct xfs_buf *bp)
+{
+ xfs_failaddr_t fa;
+
+ if (!xfs_btree_fsblock_verify_crc(bp))
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_rtrmapbt_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
+
+ if (bp->b_error)
+ trace_xfs_btree_corrupt(bp, _RET_IP_);
+}
+
+static void
+xfs_rtrmapbt_write_verify(
+ struct xfs_buf *bp)
+{
+ xfs_failaddr_t fa;
+
+ fa = xfs_rtrmapbt_verify(bp);
+ if (fa) {
+ trace_xfs_btree_corrupt(bp, _RET_IP_);
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ return;
+ }
+ xfs_btree_fsblock_calc_crc(bp);
+
+}
+
+const struct xfs_buf_ops xfs_rtrmapbt_buf_ops = {
+ .name = "xfs_rtrmapbt",
+ .magic = { 0, cpu_to_be32(XFS_RTRMAP_CRC_MAGIC) },
+ .verify_read = xfs_rtrmapbt_read_verify,
+ .verify_write = xfs_rtrmapbt_write_verify,
+ .verify_struct = xfs_rtrmapbt_verify,
+};
+
+STATIC int
+xfs_rtrmapbt_keys_inorder(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_key *k1,
+ const union xfs_btree_key *k2)
+{
+ uint32_t x;
+ uint32_t y;
+ uint64_t a;
+ uint64_t b;
+
+ x = be32_to_cpu(k1->rmap.rm_startblock);
+ y = be32_to_cpu(k2->rmap.rm_startblock);
+ if (x < y)
+ return 1;
+ else if (x > y)
+ return 0;
+ a = be64_to_cpu(k1->rmap.rm_owner);
+ b = be64_to_cpu(k2->rmap.rm_owner);
+ if (a < b)
+ return 1;
+ else if (a > b)
+ return 0;
+ a = offset_keymask(be64_to_cpu(k1->rmap.rm_offset));
+ b = offset_keymask(be64_to_cpu(k2->rmap.rm_offset));
+ if (a <= b)
+ return 1;
+ return 0;
+}
+
+STATIC int
+xfs_rtrmapbt_recs_inorder(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_rec *r1,
+ const union xfs_btree_rec *r2)
+{
+ uint32_t x;
+ uint32_t y;
+ uint64_t a;
+ uint64_t b;
+
+ x = be32_to_cpu(r1->rmap.rm_startblock);
+ y = be32_to_cpu(r2->rmap.rm_startblock);
+ if (x < y)
+ return 1;
+ else if (x > y)
+ return 0;
+ a = be64_to_cpu(r1->rmap.rm_owner);
+ b = be64_to_cpu(r2->rmap.rm_owner);
+ if (a < b)
+ return 1;
+ else if (a > b)
+ return 0;
+ a = offset_keymask(be64_to_cpu(r1->rmap.rm_offset));
+ b = offset_keymask(be64_to_cpu(r2->rmap.rm_offset));
+ if (a <= b)
+ return 1;
+ return 0;
+}
+
+STATIC enum xbtree_key_contig
+xfs_rtrmapbt_keys_contiguous(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_key *key1,
+ const union xfs_btree_key *key2,
+ const union xfs_btree_key *mask)
+{
+ ASSERT(!mask || mask->rmap.rm_startblock);
+
+ /*
+ * We only support checking contiguity of the physical space component.
+ * If any callers ever need more specificity than that, they'll have to
+ * implement it here.
+ */
+ ASSERT(!mask || (!mask->rmap.rm_owner && !mask->rmap.rm_offset));
+
+ return xbtree_key_contig(be32_to_cpu(key1->rmap.rm_startblock),
+ be32_to_cpu(key2->rmap.rm_startblock));
+}
+
+static inline void
+xfs_rtrmapbt_move_ptrs(
+ struct xfs_mount *mp,
+ struct xfs_btree_block *broot,
+ short old_size,
+ size_t new_size,
+ unsigned int numrecs)
+{
+ void *dptr;
+ void *sptr;
+
+ sptr = xfs_rtrmap_broot_ptr_addr(mp, broot, 1, old_size);
+ dptr = xfs_rtrmap_broot_ptr_addr(mp, broot, 1, new_size);
+ memmove(dptr, sptr, numrecs * sizeof(xfs_rtrmap_ptr_t));
+}
+
+static struct xfs_btree_block *
+xfs_rtrmapbt_broot_realloc(
+ struct xfs_btree_cur *cur,
+ unsigned int new_numrecs)
+{
+ struct xfs_mount *mp = cur->bc_mp;
+ struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
+ struct xfs_btree_block *broot;
+ unsigned int new_size;
+ unsigned int old_size = ifp->if_broot_bytes;
+ const unsigned int level = cur->bc_nlevels - 1;
+
+ new_size = xfs_rtrmap_broot_space_calc(mp, level, new_numrecs);
+
+ /* Handle the nop case quietly. */
+ if (new_size == old_size)
+ return ifp->if_broot;
+
+ if (new_size > old_size) {
+ unsigned int old_numrecs;
+
+ /*
+ * If there wasn't any memory allocated before, just allocate
+ * it now and get out.
+ */
+ if (old_size == 0)
+ return xfs_broot_realloc(ifp, new_size);
+
+ /*
+ * If there is already an existing if_broot, then we need to
+ * realloc it and possibly move the node block pointers because
+ * those are not butted up against the btree block header.
+ */
+ old_numrecs = xfs_rtrmapbt_maxrecs(mp, old_size, level == 0);
+ broot = xfs_broot_realloc(ifp, new_size);
+ if (level > 0)
+ xfs_rtrmapbt_move_ptrs(mp, broot, old_size, new_size,
+ old_numrecs);
+ goto out_broot;
+ }
+
+ /*
+ * We're reducing numrecs. If we're going all the way to zero, just
+ * free the block.
+ */
+ ASSERT(ifp->if_broot != NULL && old_size > 0);
+ if (new_size == 0)
+ return xfs_broot_realloc(ifp, 0);
+
+ /*
+ * Shrink the btree root by possibly moving the rtrmapbt pointers,
+ * since they are not butted up against the btree block header. Then
+ * reallocate broot.
+ */
+ if (level > 0)
+ xfs_rtrmapbt_move_ptrs(mp, ifp->if_broot, old_size, new_size,
+ new_numrecs);
+ broot = xfs_broot_realloc(ifp, new_size);
+
+out_broot:
+ ASSERT(xfs_rtrmap_droot_space(broot) <=
+ xfs_inode_fork_size(cur->bc_ino.ip, cur->bc_ino.whichfork));
+ return broot;
+}
+
+const struct xfs_btree_ops xfs_rtrmapbt_ops = {
+ .name = "rtrmap",
+ .type = XFS_BTREE_TYPE_INODE,
+ .geom_flags = XFS_BTGEO_OVERLAPPING |
+ XFS_BTGEO_IROOT_RECORDS,
+
+ .rec_len = sizeof(struct xfs_rmap_rec),
+ /* Overlapping btree; 2 keys per pointer. */
+ .key_len = 2 * sizeof(struct xfs_rmap_key),
+ .ptr_len = XFS_BTREE_LONG_PTR_LEN,
+
+ .lru_refs = XFS_RMAP_BTREE_REF,
+ .statoff = XFS_STATS_CALC_INDEX(xs_rtrmap_2),
+ .sick_mask = XFS_SICK_RG_RMAPBT,
+
+ .dup_cursor = xfs_rtrmapbt_dup_cursor,
+ .alloc_block = xfs_btree_alloc_metafile_block,
+ .free_block = xfs_btree_free_metafile_block,
+ .get_minrecs = xfs_rtrmapbt_get_minrecs,
+ .get_maxrecs = xfs_rtrmapbt_get_maxrecs,
+ .get_dmaxrecs = xfs_rtrmapbt_get_dmaxrecs,
+ .init_key_from_rec = xfs_rtrmapbt_init_key_from_rec,
+ .init_high_key_from_rec = xfs_rtrmapbt_init_high_key_from_rec,
+ .init_rec_from_cur = xfs_rtrmapbt_init_rec_from_cur,
+ .init_ptr_from_cur = xfs_rtrmapbt_init_ptr_from_cur,
+ .key_diff = xfs_rtrmapbt_key_diff,
+ .buf_ops = &xfs_rtrmapbt_buf_ops,
+ .diff_two_keys = xfs_rtrmapbt_diff_two_keys,
+ .keys_inorder = xfs_rtrmapbt_keys_inorder,
+ .recs_inorder = xfs_rtrmapbt_recs_inorder,
+ .keys_contiguous = xfs_rtrmapbt_keys_contiguous,
+ .broot_realloc = xfs_rtrmapbt_broot_realloc,
+};
+
+/* Allocate a new rt rmap btree cursor. */
+struct xfs_btree_cur *
+xfs_rtrmapbt_init_cursor(
+ struct xfs_trans *tp,
+ struct xfs_rtgroup *rtg)
+{
+ struct xfs_inode *ip = rtg_rmap(rtg);
+ struct xfs_mount *mp = rtg_mount(rtg);
+ struct xfs_btree_cur *cur;
+
+ xfs_assert_ilocked(ip, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL);
+
+ cur = xfs_btree_alloc_cursor(mp, tp, &xfs_rtrmapbt_ops,
+ mp->m_rtrmap_maxlevels, xfs_rtrmapbt_cur_cache);
+
+ cur->bc_ino.ip = ip;
+ cur->bc_group = xfs_group_hold(rtg_group(rtg));
+ cur->bc_ino.whichfork = XFS_DATA_FORK;
+ cur->bc_nlevels = be16_to_cpu(ip->i_df.if_broot->bb_level) + 1;
+ cur->bc_ino.forksize = xfs_inode_fork_size(ip, XFS_DATA_FORK);
+
+ return cur;
+}
+
+#ifdef CONFIG_XFS_BTREE_IN_MEM
+/*
+ * Validate an in-memory realtime rmap btree block. Callers are allowed to
+ * generate an in-memory btree even if the ondisk feature is not enabled.
+ */
+static xfs_failaddr_t
+xfs_rtrmapbt_mem_verify(
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = bp->b_mount;
+ struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
+ xfs_failaddr_t fa;
+ unsigned int level;
+ unsigned int maxrecs;
+
+ if (!xfs_verify_magic(bp, block->bb_magic))
+ return __this_address;
+
+ fa = xfs_btree_fsblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN);
+ if (fa)
+ return fa;
+
+ level = be16_to_cpu(block->bb_level);
+ if (xfs_has_rmapbt(mp)) {
+ if (level >= mp->m_rtrmap_maxlevels)
+ return __this_address;
+ } else {
+ if (level >= xfs_rtrmapbt_maxlevels_ondisk())
+ return __this_address;
+ }
+
+ maxrecs = xfs_rtrmapbt_maxrecs(mp, XFBNO_BLOCKSIZE, level == 0);
+ return xfs_btree_memblock_verify(bp, maxrecs);
+}
+
+static void
+xfs_rtrmapbt_mem_rw_verify(
+ struct xfs_buf *bp)
+{
+ xfs_failaddr_t fa = xfs_rtrmapbt_mem_verify(bp);
+
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+}
+
+/* skip crc checks on in-memory btrees to save time */
+static const struct xfs_buf_ops xfs_rtrmapbt_mem_buf_ops = {
+ .name = "xfs_rtrmapbt_mem",
+ .magic = { 0, cpu_to_be32(XFS_RTRMAP_CRC_MAGIC) },
+ .verify_read = xfs_rtrmapbt_mem_rw_verify,
+ .verify_write = xfs_rtrmapbt_mem_rw_verify,
+ .verify_struct = xfs_rtrmapbt_mem_verify,
+};
+
+const struct xfs_btree_ops xfs_rtrmapbt_mem_ops = {
+ .type = XFS_BTREE_TYPE_MEM,
+ .geom_flags = XFS_BTGEO_OVERLAPPING,
+
+ .rec_len = sizeof(struct xfs_rmap_rec),
+ /* Overlapping btree; 2 keys per pointer. */
+ .key_len = 2 * sizeof(struct xfs_rmap_key),
+ .ptr_len = XFS_BTREE_LONG_PTR_LEN,
+
+ .lru_refs = XFS_RMAP_BTREE_REF,
+ .statoff = XFS_STATS_CALC_INDEX(xs_rtrmap_mem_2),
+
+ .dup_cursor = xfbtree_dup_cursor,
+ .set_root = xfbtree_set_root,
+ .alloc_block = xfbtree_alloc_block,
+ .free_block = xfbtree_free_block,
+ .get_minrecs = xfbtree_get_minrecs,
+ .get_maxrecs = xfbtree_get_maxrecs,
+ .init_key_from_rec = xfs_rtrmapbt_init_key_from_rec,
+ .init_high_key_from_rec = xfs_rtrmapbt_init_high_key_from_rec,
+ .init_rec_from_cur = xfs_rtrmapbt_init_rec_from_cur,
+ .init_ptr_from_cur = xfbtree_init_ptr_from_cur,
+ .key_diff = xfs_rtrmapbt_key_diff,
+ .buf_ops = &xfs_rtrmapbt_mem_buf_ops,
+ .diff_two_keys = xfs_rtrmapbt_diff_two_keys,
+ .keys_inorder = xfs_rtrmapbt_keys_inorder,
+ .recs_inorder = xfs_rtrmapbt_recs_inorder,
+ .keys_contiguous = xfs_rtrmapbt_keys_contiguous,
+};
+
+/* Create a cursor for an in-memory btree. */
+struct xfs_btree_cur *
+xfs_rtrmapbt_mem_cursor(
+ struct xfs_rtgroup *rtg,
+ struct xfs_trans *tp,
+ struct xfbtree *xfbt)
+{
+ struct xfs_mount *mp = rtg_mount(rtg);
+ struct xfs_btree_cur *cur;
+
+ cur = xfs_btree_alloc_cursor(mp, tp, &xfs_rtrmapbt_mem_ops,
+ mp->m_rtrmap_maxlevels, xfs_rtrmapbt_cur_cache);
+ cur->bc_mem.xfbtree = xfbt;
+ cur->bc_nlevels = xfbt->nlevels;
+ cur->bc_group = xfs_group_hold(rtg_group(rtg));
+ return cur;
+}
+
+/* Create an in-memory realtime rmap btree. */
+int
+xfs_rtrmapbt_mem_init(
+ struct xfs_mount *mp,
+ struct xfbtree *xfbt,
+ struct xfs_buftarg *btp,
+ xfs_rgnumber_t rgno)
+{
+ xfbt->owner = rgno;
+ return xfbtree_init(mp, xfbt, btp, &xfs_rtrmapbt_mem_ops);
+}
+#endif /* CONFIG_XFS_BTREE_IN_MEM */
+
+/*
+ * Install a new rt reverse mapping btree root. Caller is responsible for
+ * invalidating and freeing the old btree blocks.
+ */
+void
+xfs_rtrmapbt_commit_staged_btree(
+ struct xfs_btree_cur *cur,
+ struct xfs_trans *tp)
+{
+ struct xbtree_ifakeroot *ifake = cur->bc_ino.ifake;
+ struct xfs_ifork *ifp;
+ int flags = XFS_ILOG_CORE | XFS_ILOG_DBROOT;
+
+ ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
+ ASSERT(ifake->if_fork->if_format == XFS_DINODE_FMT_META_BTREE);
+
+ /*
+ * Free any resources hanging off the real fork, then shallow-copy the
+ * staging fork's contents into the real fork to transfer everything
+ * we just built.
+ */
+ ifp = xfs_ifork_ptr(cur->bc_ino.ip, XFS_DATA_FORK);
+ xfs_idestroy_fork(ifp);
+ memcpy(ifp, ifake->if_fork, sizeof(struct xfs_ifork));
+
+ cur->bc_ino.ip->i_projid = cur->bc_group->xg_gno;
+ xfs_trans_log_inode(tp, cur->bc_ino.ip, flags);
+ xfs_btree_commit_ifakeroot(cur, tp, XFS_DATA_FORK);
+}
+
+/* Calculate number of records in a rt reverse mapping btree block. */
+static inline unsigned int
+xfs_rtrmapbt_block_maxrecs(
+ unsigned int blocklen,
+ bool leaf)
+{
+ if (leaf)
+ return blocklen / sizeof(struct xfs_rmap_rec);
+ return blocklen /
+ (2 * sizeof(struct xfs_rmap_key) + sizeof(xfs_rtrmap_ptr_t));
+}
+
+/*
+ * Calculate number of records in an rt reverse mapping btree block.
+ */
+unsigned int
+xfs_rtrmapbt_maxrecs(
+ struct xfs_mount *mp,
+ unsigned int blocklen,
+ bool leaf)
+{
+ blocklen -= XFS_RTRMAP_BLOCK_LEN;
+ return xfs_rtrmapbt_block_maxrecs(blocklen, leaf);
+}
+
+/* Compute the max possible height for realtime reverse mapping btrees. */
+unsigned int
+xfs_rtrmapbt_maxlevels_ondisk(void)
+{
+ unsigned long long max_dblocks;
+ unsigned int minrecs[2];
+ unsigned int blocklen;
+
+ blocklen = XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_LBLOCK_CRC_LEN;
+
+ minrecs[0] = xfs_rtrmapbt_block_maxrecs(blocklen, true) / 2;
+ minrecs[1] = xfs_rtrmapbt_block_maxrecs(blocklen, false) / 2;
+
+ /*
+ * Compute the asymptotic maxlevels for an rtrmapbt on any rtreflink fs.
+ *
+ * On a reflink filesystem, each block in an rtgroup can have up to
+ * 2^32 (per the refcount record format) owners, which means that
+ * theoretically we could face up to 2^64 rmap records. However, we're
+ * likely to run out of blocks in the data device long before that
+ * happens, which means that we must compute the max height based on
+ * what the btree will look like if it consumes almost all the blocks
+ * in the data device due to maximal sharing factor.
+ */
+ max_dblocks = -1U; /* max ag count */
+ max_dblocks *= XFS_MAX_CRC_AG_BLOCKS;
+ return xfs_btree_space_to_height(minrecs, max_dblocks);
+}
+
+int __init
+xfs_rtrmapbt_init_cur_cache(void)
+{
+ xfs_rtrmapbt_cur_cache = kmem_cache_create("xfs_rtrmapbt_cur",
+ xfs_btree_cur_sizeof(xfs_rtrmapbt_maxlevels_ondisk()),
+ 0, 0, NULL);
+
+ if (!xfs_rtrmapbt_cur_cache)
+ return -ENOMEM;
+ return 0;
+}
+
+void
+xfs_rtrmapbt_destroy_cur_cache(void)
+{
+ kmem_cache_destroy(xfs_rtrmapbt_cur_cache);
+ xfs_rtrmapbt_cur_cache = NULL;
+}
+
+/* Compute the maximum height of an rt reverse mapping btree. */
+void
+xfs_rtrmapbt_compute_maxlevels(
+ struct xfs_mount *mp)
+{
+ unsigned int d_maxlevels, r_maxlevels;
+
+ if (!xfs_has_rtrmapbt(mp)) {
+ mp->m_rtrmap_maxlevels = 0;
+ return;
+ }
+
+ /*
+ * The realtime rmapbt lives on the data device, which means that its
+ * maximum height is constrained by the size of the data device and
+ * the height required to store one rmap record for each block in an
+ * rt group.
+ *
+ * On a reflink filesystem, each rt block can have up to 2^32 (per the
+ * refcount record format) owners, which means that theoretically we
+ * could face up to 2^64 rmap records. This makes the computation of
+ * maxlevels based on record count meaningless, so we only consider the
+ * size of the data device.
+ */
+ d_maxlevels = xfs_btree_space_to_height(mp->m_rtrmap_mnr,
+ mp->m_sb.sb_dblocks);
+ if (xfs_has_rtreflink(mp)) {
+ mp->m_rtrmap_maxlevels = d_maxlevels + 1;
+ return;
+ }
+
+ r_maxlevels = xfs_btree_compute_maxlevels(mp->m_rtrmap_mnr,
+ mp->m_groups[XG_TYPE_RTG].blocks);
+
+ /* Add one level to handle the inode root level. */
+ mp->m_rtrmap_maxlevels = min(d_maxlevels, r_maxlevels) + 1;
+}
+
+/* Calculate the rtrmap btree size for some records. */
+unsigned long long
+xfs_rtrmapbt_calc_size(
+ struct xfs_mount *mp,
+ unsigned long long len)
+{
+ return xfs_btree_calc_size(mp->m_rtrmap_mnr, len);
+}
+
+/*
+ * Calculate the maximum rmap btree size.
+ */
+static unsigned long long
+xfs_rtrmapbt_max_size(
+ struct xfs_mount *mp,
+ xfs_rtblock_t rtblocks)
+{
+ /* Bail out if we're uninitialized, which can happen in mkfs. */
+ if (mp->m_rtrmap_mxr[0] == 0)
+ return 0;
+
+ return xfs_rtrmapbt_calc_size(mp, rtblocks);
+}
+
+/*
+ * Figure out how many blocks to reserve and how many are used by this btree.
+ */
+xfs_filblks_t
+xfs_rtrmapbt_calc_reserves(
+ struct xfs_mount *mp)
+{
+ uint32_t blocks = mp->m_groups[XG_TYPE_RTG].blocks;
+
+ if (!xfs_has_rtrmapbt(mp))
+ return 0;
+
+ /* Reserve 1% of the rtgroup or enough for 1 block per record. */
+ return max_t(xfs_filblks_t, blocks / 100,
+ xfs_rtrmapbt_max_size(mp, blocks));
+}
+
+/* Convert on-disk form of btree root to in-memory form. */
+STATIC void
+xfs_rtrmapbt_from_disk(
+ struct xfs_inode *ip,
+ struct xfs_rtrmap_root *dblock,
+ unsigned int dblocklen,
+ struct xfs_btree_block *rblock)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_rmap_key *fkp;
+ __be64 *fpp;
+ struct xfs_rmap_key *tkp;
+ __be64 *tpp;
+ struct xfs_rmap_rec *frp;
+ struct xfs_rmap_rec *trp;
+ unsigned int rblocklen = xfs_rtrmap_broot_space(mp, dblock);
+ unsigned int numrecs;
+ unsigned int maxrecs;
+
+ xfs_btree_init_block(mp, rblock, &xfs_rtrmapbt_ops, 0, 0, ip->i_ino);
+
+ rblock->bb_level = dblock->bb_level;
+ rblock->bb_numrecs = dblock->bb_numrecs;
+ numrecs = be16_to_cpu(dblock->bb_numrecs);
+
+ if (be16_to_cpu(rblock->bb_level) > 0) {
+ maxrecs = xfs_rtrmapbt_droot_maxrecs(dblocklen, false);
+ fkp = xfs_rtrmap_droot_key_addr(dblock, 1);
+ tkp = xfs_rtrmap_key_addr(rblock, 1);
+ fpp = xfs_rtrmap_droot_ptr_addr(dblock, 1, maxrecs);
+ tpp = xfs_rtrmap_broot_ptr_addr(mp, rblock, 1, rblocklen);
+ memcpy(tkp, fkp, 2 * sizeof(*fkp) * numrecs);
+ memcpy(tpp, fpp, sizeof(*fpp) * numrecs);
+ } else {
+ frp = xfs_rtrmap_droot_rec_addr(dblock, 1);
+ trp = xfs_rtrmap_rec_addr(rblock, 1);
+ memcpy(trp, frp, sizeof(*frp) * numrecs);
+ }
+}
+
+/* Load a realtime reverse mapping btree root in from disk. */
+int
+xfs_iformat_rtrmap(
+ struct xfs_inode *ip,
+ struct xfs_dinode *dip)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_rtrmap_root *dfp = XFS_DFORK_PTR(dip, XFS_DATA_FORK);
+ struct xfs_btree_block *broot;
+ unsigned int numrecs;
+ unsigned int level;
+ int dsize;
+
+ /*
+ * growfs must create the rtrmap inodes before adding a realtime volume
+ * to the filesystem, so we cannot use the rtrmapbt predicate here.
+ */
+ if (!xfs_has_rmapbt(ip->i_mount)) {
+ xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE);
+ return -EFSCORRUPTED;
+ }
+
+ dsize = XFS_DFORK_SIZE(dip, mp, XFS_DATA_FORK);
+ numrecs = be16_to_cpu(dfp->bb_numrecs);
+ level = be16_to_cpu(dfp->bb_level);
+
+ if (level > mp->m_rtrmap_maxlevels ||
+ xfs_rtrmap_droot_space_calc(level, numrecs) > dsize) {
+ xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE);
+ return -EFSCORRUPTED;
+ }
+
+ broot = xfs_broot_alloc(xfs_ifork_ptr(ip, XFS_DATA_FORK),
+ xfs_rtrmap_broot_space_calc(mp, level, numrecs));
+ if (broot)
+ xfs_rtrmapbt_from_disk(ip, dfp, dsize, broot);
+ return 0;
+}
+
+/* Convert in-memory form of btree root to on-disk form. */
+void
+xfs_rtrmapbt_to_disk(
+ struct xfs_mount *mp,
+ struct xfs_btree_block *rblock,
+ unsigned int rblocklen,
+ struct xfs_rtrmap_root *dblock,
+ unsigned int dblocklen)
+{
+ struct xfs_rmap_key *fkp;
+ __be64 *fpp;
+ struct xfs_rmap_key *tkp;
+ __be64 *tpp;
+ struct xfs_rmap_rec *frp;
+ struct xfs_rmap_rec *trp;
+ unsigned int numrecs;
+ unsigned int maxrecs;
+
+ ASSERT(rblock->bb_magic == cpu_to_be32(XFS_RTRMAP_CRC_MAGIC));
+ ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid));
+ ASSERT(rblock->bb_u.l.bb_blkno == cpu_to_be64(XFS_BUF_DADDR_NULL));
+ ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK));
+ ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK));
+
+ dblock->bb_level = rblock->bb_level;
+ dblock->bb_numrecs = rblock->bb_numrecs;
+ numrecs = be16_to_cpu(rblock->bb_numrecs);
+
+ if (be16_to_cpu(rblock->bb_level) > 0) {
+ maxrecs = xfs_rtrmapbt_droot_maxrecs(dblocklen, false);
+ fkp = xfs_rtrmap_key_addr(rblock, 1);
+ tkp = xfs_rtrmap_droot_key_addr(dblock, 1);
+ fpp = xfs_rtrmap_broot_ptr_addr(mp, rblock, 1, rblocklen);
+ tpp = xfs_rtrmap_droot_ptr_addr(dblock, 1, maxrecs);
+ memcpy(tkp, fkp, 2 * sizeof(*fkp) * numrecs);
+ memcpy(tpp, fpp, sizeof(*fpp) * numrecs);
+ } else {
+ frp = xfs_rtrmap_rec_addr(rblock, 1);
+ trp = xfs_rtrmap_droot_rec_addr(dblock, 1);
+ memcpy(trp, frp, sizeof(*frp) * numrecs);
+ }
+}
+
+/* Flush a realtime reverse mapping btree root out to disk. */
+void
+xfs_iflush_rtrmap(
+ struct xfs_inode *ip,
+ struct xfs_dinode *dip)
+{
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
+ struct xfs_rtrmap_root *dfp = XFS_DFORK_PTR(dip, XFS_DATA_FORK);
+
+ ASSERT(ifp->if_broot != NULL);
+ ASSERT(ifp->if_broot_bytes > 0);
+ ASSERT(xfs_rtrmap_droot_space(ifp->if_broot) <=
+ xfs_inode_fork_size(ip, XFS_DATA_FORK));
+ xfs_rtrmapbt_to_disk(ip->i_mount, ifp->if_broot, ifp->if_broot_bytes,
+ dfp, XFS_DFORK_SIZE(dip, ip->i_mount, XFS_DATA_FORK));
+}
+
+/*
+ * Create a realtime rmap btree inode.
+ */
+int
+xfs_rtrmapbt_create(
+ struct xfs_rtgroup *rtg,
+ struct xfs_inode *ip,
+ struct xfs_trans *tp,
+ bool init)
+{
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_btree_block *broot;
+
+ ifp->if_format = XFS_DINODE_FMT_META_BTREE;
+ ASSERT(ifp->if_broot_bytes == 0);
+ ASSERT(ifp->if_bytes == 0);
+
+ /* Initialize the empty incore btree root. */
+ broot = xfs_broot_realloc(ifp, xfs_rtrmap_broot_space_calc(mp, 0, 0));
+ if (broot)
+ xfs_btree_init_block(mp, broot, &xfs_rtrmapbt_ops, 0, 0,
+ ip->i_ino);
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE | XFS_ILOG_DBROOT);
+
+ return 0;
+}
+
+/*
+ * Initialize an rmap for a realtime superblock using the potentially updated
+ * rt geometry in the provided @mp.
+ */
+int
+xfs_rtrmapbt_init_rtsb(
+ struct xfs_mount *mp,
+ struct xfs_rtgroup *rtg,
+ struct xfs_trans *tp)
+{
+ struct xfs_rmap_irec rmap = {
+ .rm_blockcount = mp->m_sb.sb_rextsize,
+ .rm_owner = XFS_RMAP_OWN_FS,
+ };
+ struct xfs_btree_cur *cur;
+ int error;
+
+ ASSERT(xfs_has_rtsb(mp));
+ ASSERT(rtg_rgno(rtg) == 0);
+
+ cur = xfs_rtrmapbt_init_cursor(tp, rtg);
+ error = xfs_rmap_map_raw(cur, &rmap);
+ xfs_btree_del_cursor(cur, error);
+ return error;
+}
diff --git a/fs/xfs/libxfs/xfs_rtrmap_btree.h b/fs/xfs/libxfs/xfs_rtrmap_btree.h
new file mode 100644
index 000000000000..9d0915089891
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_rtrmap_btree.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2018-2024 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#ifndef __XFS_RTRMAP_BTREE_H__
+#define __XFS_RTRMAP_BTREE_H__
+
+struct xfs_buf;
+struct xfs_btree_cur;
+struct xfs_mount;
+struct xbtree_ifakeroot;
+struct xfs_rtgroup;
+struct xfbtree;
+
+/* rmaps only exist on crc enabled filesystems */
+#define XFS_RTRMAP_BLOCK_LEN XFS_BTREE_LBLOCK_CRC_LEN
+
+struct xfs_btree_cur *xfs_rtrmapbt_init_cursor(struct xfs_trans *tp,
+ struct xfs_rtgroup *rtg);
+struct xfs_btree_cur *xfs_rtrmapbt_stage_cursor(struct xfs_mount *mp,
+ struct xfs_rtgroup *rtg, struct xfs_inode *ip,
+ struct xbtree_ifakeroot *ifake);
+void xfs_rtrmapbt_commit_staged_btree(struct xfs_btree_cur *cur,
+ struct xfs_trans *tp);
+unsigned int xfs_rtrmapbt_maxrecs(struct xfs_mount *mp, unsigned int blocklen,
+ bool leaf);
+void xfs_rtrmapbt_compute_maxlevels(struct xfs_mount *mp);
+unsigned int xfs_rtrmapbt_droot_maxrecs(unsigned int blocklen, bool leaf);
+
+/*
+ * Addresses of records, keys, and pointers within an incore rtrmapbt block.
+ *
+ * (note that some of these may appear unused, but they are used in userspace)
+ */
+static inline struct xfs_rmap_rec *
+xfs_rtrmap_rec_addr(
+ struct xfs_btree_block *block,
+ unsigned int index)
+{
+ return (struct xfs_rmap_rec *)
+ ((char *)block + XFS_RTRMAP_BLOCK_LEN +
+ (index - 1) * sizeof(struct xfs_rmap_rec));
+}
+
+static inline struct xfs_rmap_key *
+xfs_rtrmap_key_addr(
+ struct xfs_btree_block *block,
+ unsigned int index)
+{
+ return (struct xfs_rmap_key *)
+ ((char *)block + XFS_RTRMAP_BLOCK_LEN +
+ (index - 1) * 2 * sizeof(struct xfs_rmap_key));
+}
+
+static inline struct xfs_rmap_key *
+xfs_rtrmap_high_key_addr(
+ struct xfs_btree_block *block,
+ unsigned int index)
+{
+ return (struct xfs_rmap_key *)
+ ((char *)block + XFS_RTRMAP_BLOCK_LEN +
+ sizeof(struct xfs_rmap_key) +
+ (index - 1) * 2 * sizeof(struct xfs_rmap_key));
+}
+
+static inline xfs_rtrmap_ptr_t *
+xfs_rtrmap_ptr_addr(
+ struct xfs_btree_block *block,
+ unsigned int index,
+ unsigned int maxrecs)
+{
+ return (xfs_rtrmap_ptr_t *)
+ ((char *)block + XFS_RTRMAP_BLOCK_LEN +
+ maxrecs * 2 * sizeof(struct xfs_rmap_key) +
+ (index - 1) * sizeof(xfs_rtrmap_ptr_t));
+}
+
+unsigned int xfs_rtrmapbt_maxlevels_ondisk(void);
+
+int __init xfs_rtrmapbt_init_cur_cache(void);
+void xfs_rtrmapbt_destroy_cur_cache(void);
+
+xfs_filblks_t xfs_rtrmapbt_calc_reserves(struct xfs_mount *mp);
+
+/* Addresses of key, pointers, and records within an ondisk rtrmapbt block. */
+
+static inline struct xfs_rmap_rec *
+xfs_rtrmap_droot_rec_addr(
+ struct xfs_rtrmap_root *block,
+ unsigned int index)
+{
+ return (struct xfs_rmap_rec *)
+ ((char *)(block + 1) +
+ (index - 1) * sizeof(struct xfs_rmap_rec));
+}
+
+static inline struct xfs_rmap_key *
+xfs_rtrmap_droot_key_addr(
+ struct xfs_rtrmap_root *block,
+ unsigned int index)
+{
+ return (struct xfs_rmap_key *)
+ ((char *)(block + 1) +
+ (index - 1) * 2 * sizeof(struct xfs_rmap_key));
+}
+
+static inline xfs_rtrmap_ptr_t *
+xfs_rtrmap_droot_ptr_addr(
+ struct xfs_rtrmap_root *block,
+ unsigned int index,
+ unsigned int maxrecs)
+{
+ return (xfs_rtrmap_ptr_t *)
+ ((char *)(block + 1) +
+ maxrecs * 2 * sizeof(struct xfs_rmap_key) +
+ (index - 1) * sizeof(xfs_rtrmap_ptr_t));
+}
+
+/*
+ * Address of pointers within the incore btree root.
+ *
+ * These are to be used when we know the size of the block and
+ * we don't have a cursor.
+ */
+static inline xfs_rtrmap_ptr_t *
+xfs_rtrmap_broot_ptr_addr(
+ struct xfs_mount *mp,
+ struct xfs_btree_block *bb,
+ unsigned int index,
+ unsigned int block_size)
+{
+ return xfs_rtrmap_ptr_addr(bb, index,
+ xfs_rtrmapbt_maxrecs(mp, block_size, false));
+}
+
+/*
+ * Compute the space required for the incore btree root containing the given
+ * number of records.
+ */
+static inline size_t
+xfs_rtrmap_broot_space_calc(
+ struct xfs_mount *mp,
+ unsigned int level,
+ unsigned int nrecs)
+{
+ size_t sz = XFS_RTRMAP_BLOCK_LEN;
+
+ if (level > 0)
+ return sz + nrecs * (2 * sizeof(struct xfs_rmap_key) +
+ sizeof(xfs_rtrmap_ptr_t));
+ return sz + nrecs * sizeof(struct xfs_rmap_rec);
+}
+
+/*
+ * Compute the space required for the incore btree root given the ondisk
+ * btree root block.
+ */
+static inline size_t
+xfs_rtrmap_broot_space(struct xfs_mount *mp, struct xfs_rtrmap_root *bb)
+{
+ return xfs_rtrmap_broot_space_calc(mp, be16_to_cpu(bb->bb_level),
+ be16_to_cpu(bb->bb_numrecs));
+}
+
+/* Compute the space required for the ondisk root block. */
+static inline size_t
+xfs_rtrmap_droot_space_calc(
+ unsigned int level,
+ unsigned int nrecs)
+{
+ size_t sz = sizeof(struct xfs_rtrmap_root);
+
+ if (level > 0)
+ return sz + nrecs * (2 * sizeof(struct xfs_rmap_key) +
+ sizeof(xfs_rtrmap_ptr_t));
+ return sz + nrecs * sizeof(struct xfs_rmap_rec);
+}
+
+/*
+ * Compute the space required for the ondisk root block given an incore root
+ * block.
+ */
+static inline size_t
+xfs_rtrmap_droot_space(struct xfs_btree_block *bb)
+{
+ return xfs_rtrmap_droot_space_calc(be16_to_cpu(bb->bb_level),
+ be16_to_cpu(bb->bb_numrecs));
+}
+
+int xfs_iformat_rtrmap(struct xfs_inode *ip, struct xfs_dinode *dip);
+void xfs_rtrmapbt_to_disk(struct xfs_mount *mp, struct xfs_btree_block *rblock,
+ unsigned int rblocklen, struct xfs_rtrmap_root *dblock,
+ unsigned int dblocklen);
+void xfs_iflush_rtrmap(struct xfs_inode *ip, struct xfs_dinode *dip);
+
+int xfs_rtrmapbt_create(struct xfs_rtgroup *rtg, struct xfs_inode *ip,
+ struct xfs_trans *tp, bool init);
+int xfs_rtrmapbt_init_rtsb(struct xfs_mount *mp, struct xfs_rtgroup *rtg,
+ struct xfs_trans *tp);
+
+unsigned long long xfs_rtrmapbt_calc_size(struct xfs_mount *mp,
+ unsigned long long len);
+
+struct xfs_btree_cur *xfs_rtrmapbt_mem_cursor(struct xfs_rtgroup *rtg,
+ struct xfs_trans *tp, struct xfbtree *xfbtree);
+int xfs_rtrmapbt_mem_init(struct xfs_mount *mp, struct xfbtree *xfbtree,
+ struct xfs_buftarg *btp, xfs_rgnumber_t rgno);
+
+#endif /* __XFS_RTRMAP_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 3b5623611eba..3dc5f5dba162 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -28,6 +28,8 @@
#include "xfs_rtbitmap.h"
#include "xfs_exchrange.h"
#include "xfs_rtgroup.h"
+#include "xfs_rtrmap_btree.h"
+#include "xfs_rtrefcount_btree.h"
/*
* Physical superblock buffer manipulations. Shared with libxfs in userspace.
@@ -1215,11 +1217,23 @@ xfs_sb_mount_common(
mp->m_rmap_mnr[0] = mp->m_rmap_mxr[0] / 2;
mp->m_rmap_mnr[1] = mp->m_rmap_mxr[1] / 2;
+ mp->m_rtrmap_mxr[0] = xfs_rtrmapbt_maxrecs(mp, sbp->sb_blocksize, true);
+ mp->m_rtrmap_mxr[1] = xfs_rtrmapbt_maxrecs(mp, sbp->sb_blocksize, false);
+ mp->m_rtrmap_mnr[0] = mp->m_rtrmap_mxr[0] / 2;
+ mp->m_rtrmap_mnr[1] = mp->m_rtrmap_mxr[1] / 2;
+
mp->m_refc_mxr[0] = xfs_refcountbt_maxrecs(mp, sbp->sb_blocksize, true);
mp->m_refc_mxr[1] = xfs_refcountbt_maxrecs(mp, sbp->sb_blocksize, false);
mp->m_refc_mnr[0] = mp->m_refc_mxr[0] / 2;
mp->m_refc_mnr[1] = mp->m_refc_mxr[1] / 2;
+ mp->m_rtrefc_mxr[0] = xfs_rtrefcountbt_maxrecs(mp, sbp->sb_blocksize,
+ true);
+ mp->m_rtrefc_mxr[1] = xfs_rtrefcountbt_maxrecs(mp, sbp->sb_blocksize,
+ false);
+ mp->m_rtrefc_mnr[0] = mp->m_rtrefc_mxr[0] / 2;
+ mp->m_rtrefc_mnr[1] = mp->m_rtrefc_mxr[1] / 2;
+
mp->m_bsize = XFS_FSB_TO_BB(mp, 1);
mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
mp->m_ag_max_usable = xfs_alloc_ag_max_usable(mp);
diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
index e7efdb9ceaf3..b1e0d9bc1f7d 100644
--- a/fs/xfs/libxfs/xfs_shared.h
+++ b/fs/xfs/libxfs/xfs_shared.h
@@ -42,6 +42,8 @@ extern const struct xfs_buf_ops xfs_rtbitmap_buf_ops;
extern const struct xfs_buf_ops xfs_rtsummary_buf_ops;
extern const struct xfs_buf_ops xfs_rtbuf_ops;
extern const struct xfs_buf_ops xfs_rtsb_buf_ops;
+extern const struct xfs_buf_ops xfs_rtrefcountbt_buf_ops;
+extern const struct xfs_buf_ops xfs_rtrmapbt_buf_ops;
extern const struct xfs_buf_ops xfs_sb_buf_ops;
extern const struct xfs_buf_ops xfs_sb_quiet_buf_ops;
extern const struct xfs_buf_ops xfs_symlink_buf_ops;
@@ -55,6 +57,9 @@ extern const struct xfs_btree_ops xfs_bmbt_ops;
extern const struct xfs_btree_ops xfs_refcountbt_ops;
extern const struct xfs_btree_ops xfs_rmapbt_ops;
extern const struct xfs_btree_ops xfs_rmapbt_mem_ops;
+extern const struct xfs_btree_ops xfs_rtrmapbt_ops;
+extern const struct xfs_btree_ops xfs_rtrmapbt_mem_ops;
+extern const struct xfs_btree_ops xfs_rtrefcountbt_ops;
static inline bool xfs_btree_is_bno(const struct xfs_btree_ops *ops)
{
@@ -96,10 +101,26 @@ static inline bool xfs_btree_is_mem_rmap(const struct xfs_btree_ops *ops)
{
return ops == &xfs_rmapbt_mem_ops;
}
+
+static inline bool xfs_btree_is_mem_rtrmap(const struct xfs_btree_ops *ops)
+{
+ return ops == &xfs_rtrmapbt_mem_ops;
+}
#else
# define xfs_btree_is_mem_rmap(...) (false)
+# define xfs_btree_is_mem_rtrmap(...) (false)
#endif
+static inline bool xfs_btree_is_rtrmap(const struct xfs_btree_ops *ops)
+{
+ return ops == &xfs_rtrmapbt_ops;
+}
+
+static inline bool xfs_btree_is_rtrefcount(const struct xfs_btree_ops *ops)
+{
+ return ops == &xfs_rtrefcountbt_ops;
+}
+
/* log size calculation functions */
int xfs_log_calc_unit_res(struct xfs_mount *mp, int unit_bytes);
int xfs_log_calc_minimum_size(struct xfs_mount *);
diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
index bab402340b5d..13d00c7166e1 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.c
+++ b/fs/xfs/libxfs/xfs_trans_resv.c
@@ -92,6 +92,14 @@ xfs_refcountbt_block_count(
return num_ops * (2 * mp->m_refc_maxlevels - 1);
}
+static unsigned int
+xfs_rtrefcountbt_block_count(
+ struct xfs_mount *mp,
+ unsigned int num_ops)
+{
+ return num_ops * (2 * mp->m_rtrefc_maxlevels - 1);
+}
+
/*
* Logging inodes is really tricksy. They are logged in memory format,
* which means that what we write into the log doesn't directly translate into
@@ -213,7 +221,9 @@ xfs_calc_inode_chunk_res(
* Per-extent log reservation for the btree changes involved in freeing or
* allocating a realtime extent. We have to be able to log as many rtbitmap
* blocks as needed to mark inuse XFS_BMBT_MAX_EXTLEN blocks' worth of realtime
- * extents, as well as the realtime summary block.
+ * extents, as well as the realtime summary block (t1). Realtime rmap btree
+ * operations happen in a second transaction, so factor in a couple of rtrmapbt
+ * splits (t2).
*/
static unsigned int
xfs_rtalloc_block_count(
@@ -222,10 +232,16 @@ xfs_rtalloc_block_count(
{
unsigned int rtbmp_blocks;
xfs_rtxlen_t rtxlen;
+ unsigned int t1, t2 = 0;
rtxlen = xfs_extlen_to_rtxlen(mp, XFS_MAX_BMBT_EXTLEN);
rtbmp_blocks = xfs_rtbitmap_blockcount_len(mp, rtxlen);
- return (rtbmp_blocks + 1) * num_ops;
+ t1 = (rtbmp_blocks + 1) * num_ops;
+
+ if (xfs_has_rmapbt(mp))
+ t2 = num_ops * (2 * mp->m_rtrmap_maxlevels - 1);
+
+ return max(t1, t2);
}
/*
@@ -251,10 +267,13 @@ xfs_rtalloc_block_count(
* Compute the log reservation required to handle the refcount update
* transaction. Refcount updates are always done via deferred log items.
*
- * This is calculated as:
+ * This is calculated as the max of:
* Data device refcount updates (t1):
* the agfs of the ags containing the blocks: nr_ops * sector size
* the refcount btrees: nr_ops * 1 trees * (2 * max depth - 1) * block size
+ * Realtime refcount updates (t2);
+ * the rt refcount inode
+ * the rtrefcount btrees: nr_ops * 1 trees * (2 * max depth - 1) * block size
*/
static unsigned int
xfs_calc_refcountbt_reservation(
@@ -262,12 +281,20 @@ xfs_calc_refcountbt_reservation(
unsigned int nr_ops)
{
unsigned int blksz = XFS_FSB_TO_B(mp, 1);
+ unsigned int t1, t2 = 0;
if (!xfs_has_reflink(mp))
return 0;
- return xfs_calc_buf_res(nr_ops, mp->m_sb.sb_sectsize) +
- xfs_calc_buf_res(xfs_refcountbt_block_count(mp, nr_ops), blksz);
+ t1 = xfs_calc_buf_res(nr_ops, mp->m_sb.sb_sectsize) +
+ xfs_calc_buf_res(xfs_refcountbt_block_count(mp, nr_ops), blksz);
+
+ if (xfs_has_realtime(mp))
+ t2 = xfs_calc_inode_res(mp, 1) +
+ xfs_calc_buf_res(xfs_rtrefcountbt_block_count(mp, nr_ops),
+ blksz);
+
+ return max(t1, t2);
}
/*
diff --git a/fs/xfs/libxfs/xfs_trans_space.h b/fs/xfs/libxfs/xfs_trans_space.h
index 1155ff2d37e2..d89b570aafcc 100644
--- a/fs/xfs/libxfs/xfs_trans_space.h
+++ b/fs/xfs/libxfs/xfs_trans_space.h
@@ -14,6 +14,19 @@
#define XFS_MAX_CONTIG_BMAPS_PER_BLOCK(mp) \
(((mp)->m_bmap_dmxr[0]) - ((mp)->m_bmap_dmnr[0]))
+/* Worst case number of realtime rmaps that can be held in a block. */
+#define XFS_MAX_CONTIG_RTRMAPS_PER_BLOCK(mp) \
+ (((mp)->m_rtrmap_mxr[0]) - ((mp)->m_rtrmap_mnr[0]))
+
+/* Adding one realtime rmap could split every level to the top of the tree. */
+#define XFS_RTRMAPADD_SPACE_RES(mp) ((mp)->m_rtrmap_maxlevels)
+
+/* Blocks we might need to add "b" realtime rmaps to a tree. */
+#define XFS_NRTRMAPADD_SPACE_RES(mp, b) \
+ ((((b) + XFS_MAX_CONTIG_RTRMAPS_PER_BLOCK(mp) - 1) / \
+ XFS_MAX_CONTIG_RTRMAPS_PER_BLOCK(mp)) * \
+ XFS_RTRMAPADD_SPACE_RES(mp))
+
/* Worst case number of rmaps that can be held in a block. */
#define XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp) \
(((mp)->m_rmap_mxr[0]) - ((mp)->m_rmap_mnr[0]))
diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h
index bf33c2b1e43e..ca2401c1facd 100644
--- a/fs/xfs/libxfs/xfs_types.h
+++ b/fs/xfs/libxfs/xfs_types.h
@@ -202,6 +202,13 @@ enum xfs_ag_resv_type {
* altering fdblocks. If you think you need this you're wrong.
*/
XFS_AG_RESV_IGNORE,
+
+ /*
+ * This allocation activity is being done on behalf of a metadata file.
+ * These files maintain their own permanent space reservations and are
+ * required to adjust fdblocks using the xfs_metafile_resv_* helpers.
+ */
+ XFS_AG_RESV_METAFILE,
};
/* Results of scanning a btree keyspace to check occupancy. */
diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c
index b45d2b32051a..cd6f0223879f 100644
--- a/fs/xfs/scrub/agheader_repair.c
+++ b/fs/xfs/scrub/agheader_repair.c
@@ -647,7 +647,7 @@ xrep_agfl_fill(
xfs_agblock_t agbno = start;
int error;
- trace_xrep_agfl_insert(sc->sa.pag, agbno, len);
+ trace_xrep_agfl_insert(pag_group(sc->sa.pag), agbno, len);
while (agbno < start + len && af->fl_off < af->flcount)
af->agfl_bno[af->fl_off++] = cpu_to_be32(agbno++);
diff --git a/fs/xfs/scrub/alloc_repair.c b/fs/xfs/scrub/alloc_repair.c
index 0433363a90b6..bed6a09aa791 100644
--- a/fs/xfs/scrub/alloc_repair.c
+++ b/fs/xfs/scrub/alloc_repair.c
@@ -542,8 +542,9 @@ xrep_abt_dispose_one(
/* Add a deferred rmap for each extent we used. */
if (resv->used > 0)
- xfs_rmap_alloc_extent(sc->tp, pag_agno(pag), resv->agbno,
- resv->used, XFS_RMAP_OWN_AG);
+ xfs_rmap_alloc_extent(sc->tp, false,
+ xfs_agbno_to_fsb(pag, resv->agbno), resv->used,
+ XFS_RMAP_OWN_AG);
/*
* For each reserved btree block we didn't use, add it to the free
diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
index 7e00312225ed..66da7d4d56ba 100644
--- a/fs/xfs/scrub/bmap.c
+++ b/fs/xfs/scrub/bmap.c
@@ -21,6 +21,8 @@
#include "xfs_rmap_btree.h"
#include "xfs_rtgroup.h"
#include "xfs_health.h"
+#include "xfs_rtalloc.h"
+#include "xfs_rtrmap_btree.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/btree.h"
@@ -143,15 +145,22 @@ static inline bool
xchk_bmap_get_rmap(
struct xchk_bmap_info *info,
struct xfs_bmbt_irec *irec,
- xfs_agblock_t agbno,
+ xfs_agblock_t bno,
uint64_t owner,
struct xfs_rmap_irec *rmap)
{
+ struct xfs_btree_cur **curp = &info->sc->sa.rmap_cur;
xfs_fileoff_t offset;
unsigned int rflags = 0;
int has_rmap;
int error;
+ if (xfs_ifork_is_realtime(info->sc->ip, info->whichfork))
+ curp = &info->sc->sr.rmap_cur;
+
+ if (*curp == NULL)
+ return false;
+
if (info->whichfork == XFS_ATTR_FORK)
rflags |= XFS_RMAP_ATTR_FORK;
if (irec->br_state == XFS_EXT_UNWRITTEN)
@@ -172,13 +181,13 @@ xchk_bmap_get_rmap(
* range rmap lookup to make sure we get the correct owner/offset.
*/
if (info->is_shared) {
- error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno,
- owner, offset, rflags, rmap, &has_rmap);
+ error = xfs_rmap_lookup_le_range(*curp, bno, owner, offset,
+ rflags, rmap, &has_rmap);
} else {
- error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno,
- owner, offset, rflags, rmap, &has_rmap);
+ error = xfs_rmap_lookup_le(*curp, bno, owner, offset,
+ rflags, rmap, &has_rmap);
}
- if (!xchk_should_check_xref(info->sc, &error, &info->sc->sa.rmap_cur))
+ if (!xchk_should_check_xref(info->sc, &error, curp))
return false;
if (!has_rmap)
@@ -192,29 +201,29 @@ STATIC void
xchk_bmap_xref_rmap(
struct xchk_bmap_info *info,
struct xfs_bmbt_irec *irec,
- xfs_agblock_t agbno)
+ xfs_agblock_t bno)
{
struct xfs_rmap_irec rmap;
unsigned long long rmap_end;
uint64_t owner = info->sc->ip->i_ino;
- if (!info->sc->sa.rmap_cur || xchk_skip_xref(info->sc->sm))
+ if (xchk_skip_xref(info->sc->sm))
return;
/* Find the rmap record for this irec. */
- if (!xchk_bmap_get_rmap(info, irec, agbno, owner, &rmap))
+ if (!xchk_bmap_get_rmap(info, irec, bno, owner, &rmap))
return;
/*
* The rmap must be an exact match for this incore file mapping record,
* which may have arisen from multiple ondisk records.
*/
- if (rmap.rm_startblock != agbno)
+ if (rmap.rm_startblock != bno)
xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount;
- if (rmap_end != agbno + irec->br_blockcount)
+ if (rmap_end != bno + irec->br_blockcount)
xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
@@ -259,7 +268,7 @@ STATIC void
xchk_bmap_xref_rmap_cow(
struct xchk_bmap_info *info,
struct xfs_bmbt_irec *irec,
- xfs_agblock_t agbno)
+ xfs_agblock_t bno)
{
struct xfs_rmap_irec rmap;
unsigned long long rmap_end;
@@ -269,7 +278,7 @@ xchk_bmap_xref_rmap_cow(
return;
/* Find the rmap record for this irec. */
- if (!xchk_bmap_get_rmap(info, irec, agbno, owner, &rmap))
+ if (!xchk_bmap_get_rmap(info, irec, bno, owner, &rmap))
return;
/*
@@ -277,12 +286,12 @@ xchk_bmap_xref_rmap_cow(
* can start before and end after the physical space allocated to this
* mapping. There are no offsets to check.
*/
- if (rmap.rm_startblock > agbno)
+ if (rmap.rm_startblock > bno)
xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount;
- if (rmap_end < agbno + irec->br_blockcount)
+ if (rmap_end < bno + irec->br_blockcount)
xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
@@ -315,6 +324,8 @@ xchk_bmap_rt_iextent_xref(
struct xchk_bmap_info *info,
struct xfs_bmbt_irec *irec)
{
+ struct xfs_owner_info oinfo;
+ xfs_rgblock_t rgbno;
int error;
error = xchk_rtgroup_init_existing(info->sc,
@@ -324,10 +335,46 @@ xchk_bmap_rt_iextent_xref(
irec->br_startoff, &error))
return;
- xchk_rtgroup_lock(&info->sc->sr, XCHK_RTGLOCK_ALL);
+ error = xchk_rtgroup_lock(info->sc, &info->sc->sr, XCHK_RTGLOCK_ALL);
+ if (!xchk_fblock_process_error(info->sc, info->whichfork,
+ irec->br_startoff, &error))
+ goto out_free;
+
xchk_xref_is_used_rt_space(info->sc, irec->br_startblock,
irec->br_blockcount);
+ if (!xfs_has_rtrmapbt(info->sc->mp))
+ goto out_cur;
+
+ rgbno = xfs_rtb_to_rgbno(info->sc->mp, irec->br_startblock);
+
+ switch (info->whichfork) {
+ case XFS_DATA_FORK:
+ xchk_bmap_xref_rmap(info, irec, rgbno);
+ if (!xfs_is_reflink_inode(info->sc->ip)) {
+ xfs_rmap_ino_owner(&oinfo, info->sc->ip->i_ino,
+ info->whichfork, irec->br_startoff);
+ xchk_xref_is_only_rt_owned_by(info->sc, rgbno,
+ irec->br_blockcount, &oinfo);
+ xchk_xref_is_not_rt_shared(info->sc, rgbno,
+ irec->br_blockcount);
+ }
+ xchk_xref_is_not_rt_cow_staging(info->sc, rgbno,
+ irec->br_blockcount);
+ break;
+ case XFS_COW_FORK:
+ xchk_bmap_xref_rmap_cow(info, irec, rgbno);
+ xchk_xref_is_only_rt_owned_by(info->sc, rgbno,
+ irec->br_blockcount, &XFS_RMAP_OINFO_COW);
+ xchk_xref_is_rt_cow_staging(info->sc, rgbno,
+ irec->br_blockcount);
+ xchk_xref_is_not_rt_shared(info->sc, rgbno,
+ irec->br_blockcount);
+ break;
+ }
+out_cur:
+ xchk_rtgroup_btcur_free(&info->sc->sr);
+out_free:
xchk_rtgroup_free(info->sc, &info->sc->sr);
}
@@ -614,8 +661,7 @@ xchk_bmap_check_rmap(
xchk_fblock_set_corrupt(sc, sbcri->whichfork,
check_rec.rm_offset);
if (irec.br_startblock !=
- xfs_agbno_to_fsb(to_perag(cur->bc_group),
- check_rec.rm_startblock))
+ xfs_gbno_to_fsb(cur->bc_group, check_rec.rm_startblock))
xchk_fblock_set_corrupt(sc, sbcri->whichfork,
check_rec.rm_offset);
if (irec.br_blockcount > check_rec.rm_blockcount)
@@ -669,6 +715,30 @@ xchk_bmap_check_ag_rmaps(
return error;
}
+/* Make sure each rt rmap has a corresponding bmbt entry. */
+STATIC int
+xchk_bmap_check_rt_rmaps(
+ struct xfs_scrub *sc,
+ struct xfs_rtgroup *rtg)
+{
+ struct xchk_bmap_check_rmap_info sbcri;
+ struct xfs_btree_cur *cur;
+ int error;
+
+ xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
+ cur = xfs_rtrmapbt_init_cursor(sc->tp, rtg);
+
+ sbcri.sc = sc;
+ sbcri.whichfork = XFS_DATA_FORK;
+ error = xfs_rmap_query_all(cur, xchk_bmap_check_rmap, &sbcri);
+ if (error == -ECANCELED)
+ error = 0;
+
+ xfs_btree_del_cursor(cur, error);
+ xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP);
+ return error;
+}
+
/*
* Decide if we want to scan the reverse mappings to determine if the attr
* fork /really/ has zero space mappings.
@@ -723,10 +793,6 @@ xchk_bmap_check_empty_datafork(
{
struct xfs_ifork *ifp = &ip->i_df;
- /* Don't support realtime rmap checks yet. */
- if (XFS_IS_REALTIME_INODE(ip))
- return false;
-
/*
* If the dinode repair found a bad data fork, it will reset the fork
* to extents format with zero records and wait for the this scrubber
@@ -777,6 +843,21 @@ xchk_bmap_check_rmaps(
struct xfs_perag *pag = NULL;
int error;
+ if (xfs_ifork_is_realtime(sc->ip, whichfork)) {
+ struct xfs_rtgroup *rtg = NULL;
+
+ while ((rtg = xfs_rtgroup_next(sc->mp, rtg))) {
+ error = xchk_bmap_check_rt_rmaps(sc, rtg);
+ if (error ||
+ (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) {
+ xfs_rtgroup_rele(rtg);
+ return error;
+ }
+ }
+
+ return 0;
+ }
+
while ((pag = xfs_perag_next(sc->mp, pag))) {
error = xchk_bmap_check_ag_rmaps(sc, whichfork, pag);
if (error ||
@@ -983,6 +1064,7 @@ xchk_bmap(
case XFS_DINODE_FMT_UUID:
case XFS_DINODE_FMT_DEV:
case XFS_DINODE_FMT_LOCAL:
+ case XFS_DINODE_FMT_META_BTREE:
/* No mappings to check. */
if (whichfork == XFS_COW_FORK)
xchk_fblock_set_corrupt(sc, whichfork, 0);
diff --git a/fs/xfs/scrub/bmap_repair.c b/fs/xfs/scrub/bmap_repair.c
index 7c4955482641..1084213b8e9b 100644
--- a/fs/xfs/scrub/bmap_repair.c
+++ b/fs/xfs/scrub/bmap_repair.c
@@ -25,11 +25,13 @@
#include "xfs_bmap_btree.h"
#include "xfs_rmap.h"
#include "xfs_rmap_btree.h"
+#include "xfs_rtrmap_btree.h"
#include "xfs_refcount.h"
#include "xfs_quota.h"
#include "xfs_ialloc.h"
#include "xfs_ag.h"
#include "xfs_reflink.h"
+#include "xfs_rtgroup.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
@@ -99,14 +101,21 @@ xrep_bmap_discover_shared(
xfs_filblks_t blockcount)
{
struct xfs_scrub *sc = rb->sc;
+ struct xfs_btree_cur *cur;
xfs_agblock_t agbno;
xfs_agblock_t fbno;
xfs_extlen_t flen;
int error;
- agbno = XFS_FSB_TO_AGBNO(sc->mp, startblock);
- error = xfs_refcount_find_shared(sc->sa.refc_cur, agbno, blockcount,
- &fbno, &flen, false);
+ if (XFS_IS_REALTIME_INODE(sc->ip)) {
+ agbno = xfs_rtb_to_rgbno(sc->mp, startblock);
+ cur = sc->sr.refc_cur;
+ } else {
+ agbno = XFS_FSB_TO_AGBNO(sc->mp, startblock);
+ cur = sc->sa.refc_cur;
+ }
+ error = xfs_refcount_find_shared(cur, agbno, blockcount, &fbno, &flen,
+ false);
if (error)
return error;
@@ -359,6 +368,114 @@ xrep_bmap_scan_ag(
return error;
}
+#ifdef CONFIG_XFS_RT
+/* Check for any obvious errors or conflicts in the file mapping. */
+STATIC int
+xrep_bmap_check_rtfork_rmap(
+ struct xfs_scrub *sc,
+ struct xfs_btree_cur *cur,
+ const struct xfs_rmap_irec *rec)
+{
+ /* xattr extents are never stored on realtime devices */
+ if (rec->rm_flags & XFS_RMAP_ATTR_FORK)
+ return -EFSCORRUPTED;
+
+ /* bmbt blocks are never stored on realtime devices */
+ if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK)
+ return -EFSCORRUPTED;
+
+ /* Data extents for non-rt files are never stored on the rt device. */
+ if (!XFS_IS_REALTIME_INODE(sc->ip))
+ return -EFSCORRUPTED;
+
+ /* Check the file offsets and physical extents. */
+ if (!xfs_verify_fileext(sc->mp, rec->rm_offset, rec->rm_blockcount))
+ return -EFSCORRUPTED;
+
+ /* Check that this is within the rtgroup. */
+ if (!xfs_verify_rgbext(to_rtg(cur->bc_group), rec->rm_startblock,
+ rec->rm_blockcount))
+ return -EFSCORRUPTED;
+
+ /* Make sure this isn't free space. */
+ return xrep_require_rtext_inuse(sc, rec->rm_startblock,
+ rec->rm_blockcount);
+}
+
+/* Record realtime extents that belong to this inode's fork. */
+STATIC int
+xrep_bmap_walk_rtrmap(
+ struct xfs_btree_cur *cur,
+ const struct xfs_rmap_irec *rec,
+ void *priv)
+{
+ struct xrep_bmap *rb = priv;
+ int error = 0;
+
+ if (xchk_should_terminate(rb->sc, &error))
+ return error;
+
+ /* Skip extents which are not owned by this inode and fork. */
+ if (rec->rm_owner != rb->sc->ip->i_ino)
+ return 0;
+
+ error = xrep_bmap_check_rtfork_rmap(rb->sc, cur, rec);
+ if (error)
+ return error;
+
+ /*
+ * Record all blocks allocated to this file even if the extent isn't
+ * for the fork we're rebuilding so that we can reset di_nblocks later.
+ */
+ rb->nblocks += rec->rm_blockcount;
+
+ /* If this rmap isn't for the fork we want, we're done. */
+ if (rb->whichfork == XFS_DATA_FORK &&
+ (rec->rm_flags & XFS_RMAP_ATTR_FORK))
+ return 0;
+ if (rb->whichfork == XFS_ATTR_FORK &&
+ !(rec->rm_flags & XFS_RMAP_ATTR_FORK))
+ return 0;
+
+ return xrep_bmap_from_rmap(rb, rec->rm_offset,
+ xfs_rgbno_to_rtb(to_rtg(cur->bc_group),
+ rec->rm_startblock),
+ rec->rm_blockcount,
+ rec->rm_flags & XFS_RMAP_UNWRITTEN);
+}
+
+/* Scan the realtime reverse mappings to build the new extent map. */
+STATIC int
+xrep_bmap_scan_rtgroup(
+ struct xrep_bmap *rb,
+ struct xfs_rtgroup *rtg)
+{
+ struct xfs_scrub *sc = rb->sc;
+ int error;
+
+ if (!xfs_has_rtrmapbt(sc->mp))
+ return 0;
+
+ error = xrep_rtgroup_init(sc, rtg, &sc->sr,
+ XFS_RTGLOCK_RMAP |
+ XFS_RTGLOCK_REFCOUNT |
+ XFS_RTGLOCK_BITMAP_SHARED);
+ if (error)
+ return error;
+
+ error = xfs_rmap_query_all(sc->sr.rmap_cur, xrep_bmap_walk_rtrmap, rb);
+ xchk_rtgroup_btcur_free(&sc->sr);
+ xchk_rtgroup_free(sc, &sc->sr);
+ return error;
+}
+#else
+static inline int
+xrep_bmap_scan_rtgroup(struct xrep_bmap *rb, struct xfs_rtgroup *rtg)
+{
+ return -EFSCORRUPTED;
+}
+#endif
+
/* Find the delalloc extents from the old incore extent tree. */
STATIC int
xrep_bmap_find_delalloc(
@@ -410,6 +527,22 @@ xrep_bmap_find_mappings(
struct xfs_perag *pag = NULL;
int error = 0;
+ /*
+ * Iterate the rtrmaps for extents. Metadata files never have content
+ * on the realtime device, so there's no need to scan them.
+ */
+ if (!xfs_is_metadir_inode(sc->ip)) {
+ struct xfs_rtgroup *rtg = NULL;
+
+ while ((rtg = xfs_rtgroup_next(sc->mp, rtg))) {
+ error = xrep_bmap_scan_rtgroup(rb, rtg);
+ if (error) {
+ xfs_rtgroup_rele(rtg);
+ return error;
+ }
+ }
+ }
+
/* Iterate the rmaps for extents. */
while ((pag = xfs_perag_next(sc->mp, pag))) {
error = xrep_bmap_scan_ag(rb, pag);
@@ -731,6 +864,7 @@ xrep_bmap_check_inputs(
case XFS_DINODE_FMT_DEV:
case XFS_DINODE_FMT_LOCAL:
case XFS_DINODE_FMT_UUID:
+ case XFS_DINODE_FMT_META_BTREE:
return -ECANCELED;
case XFS_DINODE_FMT_EXTENTS:
case XFS_DINODE_FMT_BTREE:
@@ -753,10 +887,6 @@ xrep_bmap_check_inputs(
return -EINVAL;
}
- /* Don't know how to rebuild realtime data forks. */
- if (XFS_IS_REALTIME_INODE(sc->ip))
- return -EOPNOTSUPP;
-
return 0;
}
@@ -782,10 +912,6 @@ xrep_bmap_init_reflink_scan(
if (whichfork != XFS_DATA_FORK)
return RLS_IRRELEVANT;
- /* cannot share realtime extents */
- if (XFS_IS_REALTIME_INODE(sc->ip))
- return RLS_IRRELEVANT;
-
return RLS_UNKNOWN;
}
diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c
index 5cbd94b56582..28ad341df8ee 100644
--- a/fs/xfs/scrub/common.c
+++ b/fs/xfs/scrub/common.c
@@ -35,6 +35,9 @@
#include "xfs_exchmaps.h"
#include "xfs_rtbitmap.h"
#include "xfs_rtgroup.h"
+#include "xfs_rtrmap_btree.h"
+#include "xfs_bmap_util.h"
+#include "xfs_rtrefcount_btree.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
@@ -719,20 +722,111 @@ xchk_rtgroup_init(
return 0;
}
-void
+/* Lock all the rt group metadata inode ILOCKs and wait for intents. */
+int
xchk_rtgroup_lock(
+ struct xfs_scrub *sc,
struct xchk_rt *sr,
unsigned int rtglock_flags)
{
- xfs_rtgroup_lock(sr->rtg, rtglock_flags);
+ int error = 0;
+
+ ASSERT(sr->rtg != NULL);
+
+ /*
+ * If we're /only/ locking the rtbitmap in shared mode, then we're
+ * obviously not trying to compare records in two metadata inodes.
+ * There's no need to drain intents here because the caller (most
+ * likely the rgsuper scanner) doesn't need that level of consistency.
+ */
+ if (rtglock_flags == XFS_RTGLOCK_BITMAP_SHARED) {
+ xfs_rtgroup_lock(sr->rtg, rtglock_flags);
+ sr->rtlock_flags = rtglock_flags;
+ return 0;
+ }
+
+ do {
+ if (xchk_should_terminate(sc, &error))
+ return error;
+
+ xfs_rtgroup_lock(sr->rtg, rtglock_flags);
+
+ /*
+ * If we've grabbed a non-metadata file for scrubbing, we
+ * assume that holding its ILOCK will suffice to coordinate
+ * with any rt intent chains involving this inode.
+ */
+ if (sc->ip && !xfs_is_internal_inode(sc->ip))
+ break;
+
+ /*
+ * Decide if the rt group is quiet enough for all metadata to
+ * be consistent with each other. Regular file IO doesn't get
+ * to lock all the rt inodes at the same time, which means that
+ * there could be other threads in the middle of processing a
+ * chain of deferred ops.
+ *
+ * We just locked all the metadata inodes for this rt group;
+ * now take a look to see if there are any intents in progress.
+ * If there are, drop the rt group inode locks and wait for the
+ * intents to drain. Since we hold the rt group inode locks
+ * for the duration of the scrub, this is the only time we have
+ * to sample the intents counter; any threads increasing it
+ * after this point can't possibly be in the middle of a chain
+ * of rt metadata updates.
+ *
+ * Obviously, this should be slanted against scrub and in favor
+ * of runtime threads.
+ */
+ if (!xfs_group_intent_busy(rtg_group(sr->rtg)))
+ break;
+
+ xfs_rtgroup_unlock(sr->rtg, rtglock_flags);
+
+ if (!(sc->flags & XCHK_FSGATES_DRAIN))
+ return -ECHRNG;
+ error = xfs_group_intent_drain(rtg_group(sr->rtg));
+ if (error) {
+ if (error == -ERESTARTSYS)
+ error = -EINTR;
+ return error;
+ }
+ } while (1);
+
sr->rtlock_flags = rtglock_flags;
+
+ if (xfs_has_rtrmapbt(sc->mp) && (rtglock_flags & XFS_RTGLOCK_RMAP))
+ sr->rmap_cur = xfs_rtrmapbt_init_cursor(sc->tp, sr->rtg);
+
+ if (xfs_has_rtreflink(sc->mp) && (rtglock_flags & XFS_RTGLOCK_REFCOUNT))
+ sr->refc_cur = xfs_rtrefcountbt_init_cursor(sc->tp, sr->rtg);
+
+ return 0;
+}
+
+/*
+ * Free all the btree cursors and other incore data relating to the realtime
+ * group. This has to be done /before/ committing (or cancelling) the scrub
+ * transaction.
+ */
+void
+xchk_rtgroup_btcur_free(
+ struct xchk_rt *sr)
+{
+ if (sr->rmap_cur)
+ xfs_btree_del_cursor(sr->rmap_cur, XFS_BTREE_ERROR);
+ if (sr->refc_cur)
+ xfs_btree_del_cursor(sr->refc_cur, XFS_BTREE_ERROR);
+
+ sr->refc_cur = NULL;
+ sr->rmap_cur = NULL;
}
/*
* Unlock the realtime group. This must be done /after/ committing (or
* cancelling) the scrub transaction.
*/
-static void
+void
xchk_rtgroup_unlock(
struct xchk_rt *sr)
{
@@ -812,6 +906,14 @@ xchk_setup_fs(
return xchk_trans_alloc(sc, resblks);
}
+/* Set us up with a transaction and an empty context to repair rt metadata. */
+int
+xchk_setup_rt(
+ struct xfs_scrub *sc)
+{
+ return xchk_trans_alloc(sc, xrep_calc_rtgroup_resblks(sc));
+}
+
/* Set us up with AG headers and btree cursors. */
int
xchk_setup_ag_btree(
@@ -1379,7 +1481,7 @@ xchk_fsgates_enable(
trace_xchk_fsgates_enable(sc, scrub_fsgates);
if (scrub_fsgates & XCHK_FSGATES_DRAIN)
- xfs_drain_wait_enable();
+ xfs_defer_drain_wait_enable();
if (scrub_fsgates & XCHK_FSGATES_QUOTA)
xfs_dqtrx_hook_enable();
@@ -1573,3 +1675,63 @@ xchk_inode_rootdir_inum(const struct xfs_inode *ip)
return mp->m_metadirip->i_ino;
return mp->m_rootip->i_ino;
}
+
+static int
+xchk_meta_btree_count_blocks(
+ struct xfs_scrub *sc,
+ xfs_extnum_t *nextents,
+ xfs_filblks_t *count)
+{
+ struct xfs_btree_cur *cur;
+ int error;
+
+ if (!sc->sr.rtg) {
+ ASSERT(0);
+ return -EFSCORRUPTED;
+ }
+
+ switch (sc->ip->i_metatype) {
+ case XFS_METAFILE_RTRMAP:
+ cur = xfs_rtrmapbt_init_cursor(sc->tp, sc->sr.rtg);
+ break;
+ case XFS_METAFILE_RTREFCOUNT:
+ cur = xfs_rtrefcountbt_init_cursor(sc->tp, sc->sr.rtg);
+ break;
+ default:
+ ASSERT(0);
+ return -EFSCORRUPTED;
+ }
+
+ error = xfs_btree_count_blocks(cur, count);
+ xfs_btree_del_cursor(cur, error);
+ if (!error) {
+ *nextents = 0;
+ (*count)--; /* don't count the btree iroot */
+ }
+ return error;
+}
+
+/* Count the blocks used by a file, even if it's a metadata inode. */
+int
+xchk_inode_count_blocks(
+ struct xfs_scrub *sc,
+ int whichfork,
+ xfs_extnum_t *nextents,
+ xfs_filblks_t *count)
+{
+ struct xfs_ifork *ifp = xfs_ifork_ptr(sc->ip, whichfork);
+
+ if (!ifp) {
+ *nextents = 0;
+ *count = 0;
+ return 0;
+ }
+
+ if (ifp->if_format == XFS_DINODE_FMT_META_BTREE) {
+ ASSERT(whichfork == XFS_DATA_FORK);
+ return xchk_meta_btree_count_blocks(sc, nextents, count);
+ }
+
+ return xfs_bmap_count_blocks(sc->tp, sc->ip, whichfork, nextents,
+ count);
+}
diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h
index 9ff3cafd8679..bdcd40f0ec74 100644
--- a/fs/xfs/scrub/common.h
+++ b/fs/xfs/scrub/common.h
@@ -63,6 +63,7 @@ static inline int xchk_setup_nothing(struct xfs_scrub *sc)
/* Setup functions */
int xchk_setup_agheader(struct xfs_scrub *sc);
int xchk_setup_fs(struct xfs_scrub *sc);
+int xchk_setup_rt(struct xfs_scrub *sc);
int xchk_setup_ag_allocbt(struct xfs_scrub *sc);
int xchk_setup_ag_iallocbt(struct xfs_scrub *sc);
int xchk_setup_ag_rmapbt(struct xfs_scrub *sc);
@@ -80,10 +81,14 @@ int xchk_setup_metapath(struct xfs_scrub *sc);
int xchk_setup_rtbitmap(struct xfs_scrub *sc);
int xchk_setup_rtsummary(struct xfs_scrub *sc);
int xchk_setup_rgsuperblock(struct xfs_scrub *sc);
+int xchk_setup_rtrmapbt(struct xfs_scrub *sc);
+int xchk_setup_rtrefcountbt(struct xfs_scrub *sc);
#else
# define xchk_setup_rtbitmap xchk_setup_nothing
# define xchk_setup_rtsummary xchk_setup_nothing
# define xchk_setup_rgsuperblock xchk_setup_nothing
+# define xchk_setup_rtrmapbt xchk_setup_nothing
+# define xchk_setup_rtrefcountbt xchk_setup_nothing
#endif
#ifdef CONFIG_XFS_QUOTA
int xchk_ino_dqattach(struct xfs_scrub *sc);
@@ -125,7 +130,9 @@ xchk_ag_init_existing(
#ifdef CONFIG_XFS_RT
/* All the locks we need to check an rtgroup. */
-#define XCHK_RTGLOCK_ALL (XFS_RTGLOCK_BITMAP)
+#define XCHK_RTGLOCK_ALL (XFS_RTGLOCK_BITMAP | \
+ XFS_RTGLOCK_RMAP | \
+ XFS_RTGLOCK_REFCOUNT)
int xchk_rtgroup_init(struct xfs_scrub *sc, xfs_rgnumber_t rgno,
struct xchk_rt *sr);
@@ -141,12 +148,17 @@ xchk_rtgroup_init_existing(
return error == -ENOENT ? -EFSCORRUPTED : error;
}
-void xchk_rtgroup_lock(struct xchk_rt *sr, unsigned int rtglock_flags);
+int xchk_rtgroup_lock(struct xfs_scrub *sc, struct xchk_rt *sr,
+ unsigned int rtglock_flags);
+void xchk_rtgroup_unlock(struct xchk_rt *sr);
+void xchk_rtgroup_btcur_free(struct xchk_rt *sr);
void xchk_rtgroup_free(struct xfs_scrub *sc, struct xchk_rt *sr);
#else
# define xchk_rtgroup_init(sc, rgno, sr) (-EFSCORRUPTED)
# define xchk_rtgroup_init_existing(sc, rgno, sr) (-EFSCORRUPTED)
-# define xchk_rtgroup_lock(sc, lockflags) do { } while (0)
+# define xchk_rtgroup_lock(sc, sr, lockflags) (-EFSCORRUPTED)
+# define xchk_rtgroup_unlock(sr) do { } while (0)
+# define xchk_rtgroup_btcur_free(sr) do { } while (0)
# define xchk_rtgroup_free(sc, sr) do { } while (0)
#endif /* CONFIG_XFS_RT */
@@ -257,6 +269,12 @@ int xchk_metadata_inode_forks(struct xfs_scrub *sc);
(sc)->mp->m_super->s_id, \
(sc)->ip ? (sc)->ip->i_ino : (sc)->sm->sm_ino, \
##__VA_ARGS__)
+#define xchk_xfile_rtgroup_descr(sc, fmt, ...) \
+ kasprintf(XCHK_GFP_FLAGS, "XFS (%s): rtgroup 0x%x " fmt, \
+ (sc)->mp->m_super->s_id, \
+ (sc)->sa.pag ? \
+ rtg_rgno((sc)->sr.rtg) : (sc)->sm->sm_agno, \
+ ##__VA_ARGS__)
/*
* Setting up a hook to wait for intents to drain is costly -- we have to take
@@ -274,6 +292,8 @@ void xchk_fsgates_enable(struct xfs_scrub *sc, unsigned int scrub_fshooks);
int xchk_inode_is_allocated(struct xfs_scrub *sc, xfs_agino_t agino,
bool *inuse);
+int xchk_inode_count_blocks(struct xfs_scrub *sc, int whichfork,
+ xfs_extnum_t *nextents, xfs_filblks_t *count);
bool xchk_inode_is_dirtree_root(const struct xfs_inode *ip);
bool xchk_inode_is_sb_rooted(const struct xfs_inode *ip);
diff --git a/fs/xfs/scrub/cow_repair.c b/fs/xfs/scrub/cow_repair.c
index 5b6194cef3e5..38a246b8bf11 100644
--- a/fs/xfs/scrub/cow_repair.c
+++ b/fs/xfs/scrub/cow_repair.c
@@ -26,6 +26,9 @@
#include "xfs_errortag.h"
#include "xfs_icache.h"
#include "xfs_refcount_btree.h"
+#include "xfs_rtalloc.h"
+#include "xfs_rtbitmap.h"
+#include "xfs_rtgroup.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
@@ -34,6 +37,7 @@
#include "scrub/bitmap.h"
#include "scrub/off_bitmap.h"
#include "scrub/fsb_bitmap.h"
+#include "scrub/rtb_bitmap.h"
#include "scrub/reap.h"
/*
@@ -61,7 +65,10 @@ struct xrep_cow {
struct xoff_bitmap bad_fileoffs;
/* Bitmap of fsblocks that were removed from the CoW fork. */
- struct xfsb_bitmap old_cowfork_fsblocks;
+ union {
+ struct xfsb_bitmap old_cowfork_fsblocks;
+ struct xrtb_bitmap old_cowfork_rtblocks;
+ };
/* CoW fork mappings used to scan for bad CoW staging extents. */
struct xfs_bmbt_irec irec;
@@ -145,8 +152,7 @@ xrep_cow_mark_shared_staging(
xrep_cow_trim_refcount(xc, &rrec, rec);
return xrep_cow_mark_file_range(xc,
- xfs_agbno_to_fsb(to_perag(cur->bc_group),
- rrec.rc_startblock),
+ xfs_gbno_to_fsb(cur->bc_group, rrec.rc_startblock),
rrec.rc_blockcount);
}
@@ -177,9 +183,8 @@ xrep_cow_mark_missing_staging(
if (xc->next_bno >= rrec.rc_startblock)
goto next;
-
error = xrep_cow_mark_file_range(xc,
- xfs_agbno_to_fsb(to_perag(cur->bc_group), xc->next_bno),
+ xfs_gbno_to_fsb(cur->bc_group, xc->next_bno),
rrec.rc_startblock - xc->next_bno);
if (error)
return error;
@@ -222,8 +227,7 @@ xrep_cow_mark_missing_staging_rmap(
}
return xrep_cow_mark_file_range(xc,
- xfs_agbno_to_fsb(to_perag(cur->bc_group), rec_bno),
- rec_len);
+ xfs_gbno_to_fsb(cur->bc_group, rec_bno), rec_len);
}
/*
@@ -311,6 +315,92 @@ out_pag:
}
/*
+ * Find any part of the CoW fork mapping that isn't a single-owner CoW staging
+ * extent and mark the corresponding part of the file range in the bitmap.
+ */
+STATIC int
+xrep_cow_find_bad_rt(
+ struct xrep_cow *xc)
+{
+ struct xfs_refcount_irec rc_low = { 0 };
+ struct xfs_refcount_irec rc_high = { 0 };
+ struct xfs_rmap_irec rm_low = { 0 };
+ struct xfs_rmap_irec rm_high = { 0 };
+ struct xfs_scrub *sc = xc->sc;
+ struct xfs_rtgroup *rtg;
+ int error = 0;
+
+ xc->irec_startbno = xfs_rtb_to_rgbno(sc->mp, xc->irec.br_startblock);
+
+ rtg = xfs_rtgroup_get(sc->mp,
+ xfs_rtb_to_rgno(sc->mp, xc->irec.br_startblock));
+ if (!rtg)
+ return -EFSCORRUPTED;
+
+ error = xrep_rtgroup_init(sc, rtg, &sc->sr,
+ XFS_RTGLOCK_RMAP | XFS_RTGLOCK_REFCOUNT);
+ if (error)
+ goto out_rtg;
+
+ /* Mark any CoW fork extents that are shared. */
+ rc_low.rc_startblock = xc->irec_startbno;
+ rc_high.rc_startblock = xc->irec_startbno + xc->irec.br_blockcount - 1;
+ rc_low.rc_domain = rc_high.rc_domain = XFS_REFC_DOMAIN_SHARED;
+ error = xfs_refcount_query_range(sc->sr.refc_cur, &rc_low, &rc_high,
+ xrep_cow_mark_shared_staging, xc);
+ if (error)
+ goto out_sr;
+
+ /* Make sure there are CoW staging extents for the whole mapping. */
+ rc_low.rc_startblock = xc->irec_startbno;
+ rc_high.rc_startblock = xc->irec_startbno + xc->irec.br_blockcount - 1;
+ rc_low.rc_domain = rc_high.rc_domain = XFS_REFC_DOMAIN_COW;
+ xc->next_bno = xc->irec_startbno;
+ error = xfs_refcount_query_range(sc->sr.refc_cur, &rc_low, &rc_high,
+ xrep_cow_mark_missing_staging, xc);
+ if (error)
+ goto out_sr;
+
+ if (xc->next_bno < xc->irec_startbno + xc->irec.br_blockcount) {
+ error = xrep_cow_mark_file_range(xc,
+ xfs_rgbno_to_rtb(rtg, xc->next_bno),
+ xc->irec_startbno + xc->irec.br_blockcount -
+ xc->next_bno);
+ if (error)
+ goto out_sr;
+ }
+
+ /* Mark any area has an rmap that isn't a COW staging extent. */
+ rm_low.rm_startblock = xc->irec_startbno;
+ memset(&rm_high, 0xFF, sizeof(rm_high));
+ rm_high.rm_startblock = xc->irec_startbno + xc->irec.br_blockcount - 1;
+ error = xfs_rmap_query_range(sc->sr.rmap_cur, &rm_low, &rm_high,
+ xrep_cow_mark_missing_staging_rmap, xc);
+ if (error)
+ goto out_sr;
+
+ /*
+ * If userspace is forcing us to rebuild the CoW fork or someone
+ * turned on the debugging knob, replace everything in the
+ * CoW fork and then scan for staging extents in the refcountbt.
+ */
+ if ((sc->sm->sm_flags & XFS_SCRUB_IFLAG_FORCE_REBUILD) ||
+ XFS_TEST_ERROR(false, sc->mp, XFS_ERRTAG_FORCE_SCRUB_REPAIR)) {
+ error = xrep_cow_mark_file_range(xc, xc->irec.br_startblock,
+ xc->irec.br_blockcount);
+ if (error)
+ goto out_rtg;
+ }
+
+out_sr:
+ xchk_rtgroup_btcur_free(&sc->sr);
+ xchk_rtgroup_free(sc, &sc->sr);
+out_rtg:
+ xfs_rtgroup_put(rtg);
+ return error;
+}
+
+/*
* Allocate a replacement CoW staging extent of up to the given number of
* blocks, and fill out the mapping.
*/
@@ -343,7 +433,7 @@ xrep_cow_alloc(
if (args.fsbno == NULLFSBLOCK)
return -ENOSPC;
- xfs_refcount_alloc_cow_extent(sc->tp, args.fsbno, args.len);
+ xfs_refcount_alloc_cow_extent(sc->tp, false, args.fsbno, args.len);
repl->fsbno = args.fsbno;
repl->len = args.len;
@@ -351,6 +441,32 @@ xrep_cow_alloc(
}
/*
+ * Allocate a replacement rt CoW staging extent of up to the given number of
+ * blocks, and fill out the mapping.
+ */
+STATIC int
+xrep_cow_alloc_rt(
+ struct xfs_scrub *sc,
+ xfs_extlen_t maxlen,
+ struct xrep_cow_extent *repl)
+{
+ xfs_rtxlen_t maxrtx = xfs_rtb_to_rtx(sc->mp, maxlen);
+ int error;
+
+ error = xfs_trans_reserve_more(sc->tp, 0, maxrtx);
+ if (error)
+ return error;
+
+ error = xfs_rtallocate_rtgs(sc->tp, NULLRTBLOCK, 1, maxrtx, 1, false,
+ false, &repl->fsbno, &repl->len);
+ if (error)
+ return error;
+
+ xfs_refcount_alloc_cow_extent(sc->tp, true, repl->fsbno, repl->len);
+ return 0;
+}
+
+/*
* Look up the current CoW fork mapping so that we only allocate enough to
* replace a single mapping. If we don't find a mapping that covers the start
* of the file range, or we find a delalloc or written extent, something is
@@ -467,7 +583,10 @@ xrep_cow_replace_range(
*/
alloc_len = min_t(xfs_fileoff_t, XFS_MAX_BMBT_EXTLEN,
nextoff - startoff);
- error = xrep_cow_alloc(sc, alloc_len, &repl);
+ if (XFS_IS_REALTIME_INODE(sc->ip))
+ error = xrep_cow_alloc_rt(sc, alloc_len, &repl);
+ else
+ error = xrep_cow_alloc(sc, alloc_len, &repl);
if (error)
return error;
@@ -483,8 +602,12 @@ xrep_cow_replace_range(
return error;
/* Note the old CoW staging extents; we'll reap them all later. */
- error = xfsb_bitmap_set(&xc->old_cowfork_fsblocks, got.br_startblock,
- repl.len);
+ if (XFS_IS_REALTIME_INODE(sc->ip))
+ error = xrtb_bitmap_set(&xc->old_cowfork_rtblocks,
+ got.br_startblock, repl.len);
+ else
+ error = xfsb_bitmap_set(&xc->old_cowfork_fsblocks,
+ got.br_startblock, repl.len);
if (error)
return error;
@@ -540,8 +663,16 @@ xrep_bmap_cow(
if (!ifp)
return 0;
- /* realtime files aren't supported yet */
- if (XFS_IS_REALTIME_INODE(sc->ip))
+ /*
+ * Realtime files with large extent sizes are not supported because
+ * we could encounter an CoW mapping that has been partially written
+ * out *and* requires replacement, and there's no solution to that.
+ */
+ if (xfs_inode_has_bigrtalloc(sc->ip))
+ return -EOPNOTSUPP;
+
+ /* Metadata inodes aren't supposed to have data on the rt volume. */
+ if (xfs_is_metadir_inode(sc->ip) && XFS_IS_REALTIME_INODE(sc->ip))
return -EOPNOTSUPP;
/*
@@ -562,7 +693,10 @@ xrep_bmap_cow(
xc->sc = sc;
xoff_bitmap_init(&xc->bad_fileoffs);
- xfsb_bitmap_init(&xc->old_cowfork_fsblocks);
+ if (XFS_IS_REALTIME_INODE(sc->ip))
+ xrtb_bitmap_init(&xc->old_cowfork_rtblocks);
+ else
+ xfsb_bitmap_init(&xc->old_cowfork_fsblocks);
for_each_xfs_iext(ifp, &icur, &xc->irec) {
if (xchk_should_terminate(sc, &error))
@@ -585,7 +719,10 @@ xrep_bmap_cow(
if (xfs_bmap_is_written_extent(&xc->irec))
continue;
- error = xrep_cow_find_bad(xc);
+ if (XFS_IS_REALTIME_INODE(sc->ip))
+ error = xrep_cow_find_bad_rt(xc);
+ else
+ error = xrep_cow_find_bad(xc);
if (error)
goto out_bitmap;
}
@@ -600,13 +737,20 @@ xrep_bmap_cow(
* by the refcount btree, not the inode, so it is correct to treat them
* like inode metadata.
*/
- error = xrep_reap_fsblocks(sc, &xc->old_cowfork_fsblocks,
- &XFS_RMAP_OINFO_COW);
+ if (XFS_IS_REALTIME_INODE(sc->ip))
+ error = xrep_reap_rtblocks(sc, &xc->old_cowfork_rtblocks,
+ &XFS_RMAP_OINFO_COW);
+ else
+ error = xrep_reap_fsblocks(sc, &xc->old_cowfork_fsblocks,
+ &XFS_RMAP_OINFO_COW);
if (error)
goto out_bitmap;
out_bitmap:
- xfsb_bitmap_destroy(&xc->old_cowfork_fsblocks);
+ if (XFS_IS_REALTIME_INODE(sc->ip))
+ xrtb_bitmap_destroy(&xc->old_cowfork_rtblocks);
+ else
+ xfsb_bitmap_destroy(&xc->old_cowfork_fsblocks);
xoff_bitmap_destroy(&xc->bad_fileoffs);
kfree(xc);
return error;
diff --git a/fs/xfs/scrub/health.c b/fs/xfs/scrub/health.c
index ccc6ca5934ca..3c0f25098b69 100644
--- a/fs/xfs/scrub/health.c
+++ b/fs/xfs/scrub/health.c
@@ -114,6 +114,8 @@ static const struct xchk_health_map type_to_health_flag[XFS_SCRUB_TYPE_NR] = {
[XFS_SCRUB_TYPE_DIRTREE] = { XHG_INO, XFS_SICK_INO_DIRTREE },
[XFS_SCRUB_TYPE_METAPATH] = { XHG_FS, XFS_SICK_FS_METAPATH },
[XFS_SCRUB_TYPE_RGSUPER] = { XHG_RTGROUP, XFS_SICK_RG_SUPER },
+ [XFS_SCRUB_TYPE_RTRMAPBT] = { XHG_RTGROUP, XFS_SICK_RG_RMAPBT },
+ [XFS_SCRUB_TYPE_RTREFCBT] = { XHG_RTGROUP, XFS_SICK_RG_REFCNTBT },
};
/* Return the health status mask for this scrub type. */
diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c
index 25ee66e7649d..db6edd5a5fe5 100644
--- a/fs/xfs/scrub/inode.c
+++ b/fs/xfs/scrub/inode.c
@@ -260,12 +260,7 @@ xchk_inode_extsize(
xchk_ino_set_warning(sc, ino);
}
-/*
- * Validate di_cowextsize hint.
- *
- * The rules are documented at xfs_ioctl_setattr_check_cowextsize().
- * These functions must be kept in sync with each other.
- */
+/* Validate di_cowextsize hint. */
STATIC void
xchk_inode_cowextsize(
struct xfs_scrub *sc,
@@ -276,12 +271,25 @@ xchk_inode_cowextsize(
uint64_t flags2)
{
xfs_failaddr_t fa;
+ uint32_t value = be32_to_cpu(dip->di_cowextsize);
- fa = xfs_inode_validate_cowextsize(sc->mp,
- be32_to_cpu(dip->di_cowextsize), mode, flags,
- flags2);
+ fa = xfs_inode_validate_cowextsize(sc->mp, value, mode, flags, flags2);
if (fa)
xchk_ino_set_corrupt(sc, ino);
+
+ /*
+ * XFS allows a sysadmin to change the rt extent size when adding a rt
+ * section to a filesystem after formatting. If there are any
+ * directories with cowextsize and rtinherit set, the hint could become
+ * misaligned with the new rextsize. The verifier doesn't check this,
+ * because we allow rtinherit directories even without an rt device.
+ * Flag this as an administrative warning since we will clean this up
+ * eventually.
+ */
+ if ((flags & XFS_DIFLAG_RTINHERIT) &&
+ (flags2 & XFS_DIFLAG2_COWEXTSIZE) &&
+ value % sc->mp->m_sb.sb_rextsize > 0)
+ xchk_ino_set_warning(sc, ino);
}
/* Make sure the di_flags make sense for the inode. */
@@ -360,8 +368,9 @@ xchk_inode_flags2(
if ((flags2 & XFS_DIFLAG2_REFLINK) && !S_ISREG(mode))
goto bad;
- /* realtime and reflink make no sense, currently */
- if ((flags & XFS_DIFLAG_REALTIME) && (flags2 & XFS_DIFLAG2_REFLINK))
+ /* realtime and reflink don't always go together */
+ if ((flags & XFS_DIFLAG_REALTIME) && (flags2 & XFS_DIFLAG2_REFLINK) &&
+ !xfs_has_rtreflink(mp))
goto bad;
/* no bigtime iflag without the bigtime feature */
@@ -502,6 +511,10 @@ xchk_dinode(
if (!S_ISREG(mode) && !S_ISDIR(mode))
xchk_ino_set_corrupt(sc, ino);
break;
+ case XFS_DINODE_FMT_META_BTREE:
+ if (!S_ISREG(mode))
+ xchk_ino_set_corrupt(sc, ino);
+ break;
case XFS_DINODE_FMT_UUID:
default:
xchk_ino_set_corrupt(sc, ino);
@@ -686,15 +699,13 @@ xchk_inode_xref_bmap(
return;
/* Walk all the extents to check nextents/naextents/nblocks. */
- error = xfs_bmap_count_blocks(sc->tp, sc->ip, XFS_DATA_FORK,
- &nextents, &count);
+ error = xchk_inode_count_blocks(sc, XFS_DATA_FORK, &nextents, &count);
if (!xchk_should_check_xref(sc, &error, NULL))
return;
if (nextents < xfs_dfork_data_extents(dip))
xchk_ino_xref_set_corrupt(sc, sc->ip->i_ino);
- error = xfs_bmap_count_blocks(sc->tp, sc->ip, XFS_ATTR_FORK,
- &nextents, &acount);
+ error = xchk_inode_count_blocks(sc, XFS_ATTR_FORK, &nextents, &acount);
if (!xchk_should_check_xref(sc, &error, NULL))
return;
if (nextents != xfs_dfork_attr_extents(dip))
diff --git a/fs/xfs/scrub/inode_repair.c b/fs/xfs/scrub/inode_repair.c
index 5a58ddd27bd2..2f641b6d663e 100644
--- a/fs/xfs/scrub/inode_repair.c
+++ b/fs/xfs/scrub/inode_repair.c
@@ -38,6 +38,9 @@
#include "xfs_log_priv.h"
#include "xfs_health.h"
#include "xfs_symlink_remote.h"
+#include "xfs_rtgroup.h"
+#include "xfs_rtrmap_btree.h"
+#include "xfs_rtrefcount_btree.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
@@ -562,8 +565,6 @@ xrep_dinode_flags(
flags2 |= XFS_DIFLAG2_REFLINK;
else
flags2 &= ~(XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE);
- if (flags & XFS_DIFLAG_REALTIME)
- flags2 &= ~XFS_DIFLAG2_REFLINK;
if (!xfs_has_bigtime(mp))
flags2 &= ~XFS_DIFLAG2_BIGTIME;
if (!xfs_has_large_extent_counts(mp))
@@ -773,17 +774,71 @@ xrep_dinode_count_ag_rmaps(
return error;
}
+/* Count extents and blocks for an inode given an rt rmap. */
+STATIC int
+xrep_dinode_walk_rtrmap(
+ struct xfs_btree_cur *cur,
+ const struct xfs_rmap_irec *rec,
+ void *priv)
+{
+ struct xrep_inode *ri = priv;
+ int error = 0;
+
+ if (xchk_should_terminate(ri->sc, &error))
+ return error;
+
+ /* We only care about this inode. */
+ if (rec->rm_owner != ri->sc->sm->sm_ino)
+ return 0;
+
+ if (rec->rm_flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK))
+ return -EFSCORRUPTED;
+
+ ri->rt_blocks += rec->rm_blockcount;
+ ri->rt_extents++;
+ return 0;
+}
+
+/* Count extents and blocks for an inode from all realtime rmap data. */
+STATIC int
+xrep_dinode_count_rtgroup_rmaps(
+ struct xrep_inode *ri,
+ struct xfs_rtgroup *rtg)
+{
+ struct xfs_scrub *sc = ri->sc;
+ int error;
+
+ error = xrep_rtgroup_init(sc, rtg, &sc->sr, XFS_RTGLOCK_RMAP);
+ if (error)
+ return error;
+
+ error = xfs_rmap_query_all(sc->sr.rmap_cur, xrep_dinode_walk_rtrmap,
+ ri);
+ xchk_rtgroup_btcur_free(&sc->sr);
+ xchk_rtgroup_free(sc, &sc->sr);
+ return error;
+}
+
/* Count extents and blocks for a given inode from all rmap data. */
STATIC int
xrep_dinode_count_rmaps(
struct xrep_inode *ri)
{
struct xfs_perag *pag = NULL;
+ struct xfs_rtgroup *rtg = NULL;
int error;
- if (!xfs_has_rmapbt(ri->sc->mp) || xfs_has_realtime(ri->sc->mp))
+ if (!xfs_has_rmapbt(ri->sc->mp))
return -EOPNOTSUPP;
+ while ((rtg = xfs_rtgroup_next(ri->sc->mp, rtg))) {
+ error = xrep_dinode_count_rtgroup_rmaps(ri, rtg);
+ if (error) {
+ xfs_rtgroup_rele(rtg);
+ return error;
+ }
+ }
+
while ((pag = xfs_perag_next(ri->sc->mp, pag))) {
error = xrep_dinode_count_ag_rmaps(ri, pag);
if (error) {
@@ -888,6 +943,85 @@ xrep_dinode_bad_bmbt_fork(
return false;
}
+/* Return true if this rmap-format ifork looks like garbage. */
+STATIC bool
+xrep_dinode_bad_rtrmapbt_fork(
+ struct xfs_scrub *sc,
+ struct xfs_dinode *dip,
+ unsigned int dfork_size)
+{
+ struct xfs_rtrmap_root *dfp;
+ unsigned int nrecs;
+ unsigned int level;
+
+ if (dfork_size < sizeof(struct xfs_rtrmap_root))
+ return true;
+
+ dfp = XFS_DFORK_PTR(dip, XFS_DATA_FORK);
+ nrecs = be16_to_cpu(dfp->bb_numrecs);
+ level = be16_to_cpu(dfp->bb_level);
+
+ if (level > sc->mp->m_rtrmap_maxlevels)
+ return true;
+ if (xfs_rtrmap_droot_space_calc(level, nrecs) > dfork_size)
+ return true;
+ if (level > 0 && nrecs == 0)
+ return true;
+
+ return false;
+}
+
+/* Return true if this refcount-format ifork looks like garbage. */
+STATIC bool
+xrep_dinode_bad_rtrefcountbt_fork(
+ struct xfs_scrub *sc,
+ struct xfs_dinode *dip,
+ unsigned int dfork_size)
+{
+ struct xfs_rtrefcount_root *dfp;
+ unsigned int nrecs;
+ unsigned int level;
+
+ if (dfork_size < sizeof(struct xfs_rtrefcount_root))
+ return true;
+
+ dfp = XFS_DFORK_PTR(dip, XFS_DATA_FORK);
+ nrecs = be16_to_cpu(dfp->bb_numrecs);
+ level = be16_to_cpu(dfp->bb_level);
+
+ if (level > sc->mp->m_rtrefc_maxlevels)
+ return true;
+ if (xfs_rtrefcount_droot_space_calc(level, nrecs) > dfork_size)
+ return true;
+ if (level > 0 && nrecs == 0)
+ return true;
+
+ return false;
+}
+
+/* Check a metadata-btree fork. */
+STATIC bool
+xrep_dinode_bad_metabt_fork(
+ struct xfs_scrub *sc,
+ struct xfs_dinode *dip,
+ unsigned int dfork_size,
+ int whichfork)
+{
+ if (whichfork != XFS_DATA_FORK)
+ return true;
+
+ switch (be16_to_cpu(dip->di_metatype)) {
+ case XFS_METAFILE_RTRMAP:
+ return xrep_dinode_bad_rtrmapbt_fork(sc, dip, dfork_size);
+ case XFS_METAFILE_RTREFCOUNT:
+ return xrep_dinode_bad_rtrefcountbt_fork(sc, dip, dfork_size);
+ default:
+ return true;
+ }
+
+ return false;
+}
+
/*
* Check the data fork for things that will fail the ifork verifiers or the
* ifork formatters.
@@ -968,6 +1102,11 @@ xrep_dinode_check_dfork(
XFS_DATA_FORK))
return true;
break;
+ case XFS_DINODE_FMT_META_BTREE:
+ if (xrep_dinode_bad_metabt_fork(sc, dip, dfork_size,
+ XFS_DATA_FORK))
+ return true;
+ break;
default:
return true;
}
@@ -1088,6 +1227,11 @@ xrep_dinode_check_afork(
XFS_ATTR_FORK))
return true;
break;
+ case XFS_DINODE_FMT_META_BTREE:
+ if (xrep_dinode_bad_metabt_fork(sc, dip, afork_size,
+ XFS_ATTR_FORK))
+ return true;
+ break;
default:
return true;
}
@@ -1135,6 +1279,8 @@ xrep_dinode_ensure_forkoff(
uint16_t mode)
{
struct xfs_bmdr_block *bmdr;
+ struct xfs_rtrmap_root *rmdr;
+ struct xfs_rtrefcount_root *rcdr;
struct xfs_scrub *sc = ri->sc;
xfs_extnum_t attr_extents, data_extents;
size_t bmdr_minsz = xfs_bmdr_space_calc(1);
@@ -1241,6 +1387,21 @@ xrep_dinode_ensure_forkoff(
bmdr = XFS_DFORK_PTR(dip, XFS_DATA_FORK);
dfork_min = xfs_bmap_broot_space(sc->mp, bmdr);
break;
+ case XFS_DINODE_FMT_META_BTREE:
+ switch (be16_to_cpu(dip->di_metatype)) {
+ case XFS_METAFILE_RTRMAP:
+ rmdr = XFS_DFORK_PTR(dip, XFS_DATA_FORK);
+ dfork_min = xfs_rtrmap_broot_space(sc->mp, rmdr);
+ break;
+ case XFS_METAFILE_RTREFCOUNT:
+ rcdr = XFS_DFORK_PTR(dip, XFS_DATA_FORK);
+ dfork_min = xfs_rtrefcount_broot_space(sc->mp, rcdr);
+ break;
+ default:
+ dfork_min = 0;
+ break;
+ }
+ break;
default:
dfork_min = 0;
break;
@@ -1500,8 +1661,7 @@ xrep_inode_blockcounts(
trace_xrep_inode_blockcounts(sc);
/* Set data fork counters from the data fork mappings. */
- error = xfs_bmap_count_blocks(sc->tp, sc->ip, XFS_DATA_FORK,
- &nextents, &count);
+ error = xchk_inode_count_blocks(sc, XFS_DATA_FORK, &nextents, &count);
if (error)
return error;
if (xfs_is_reflink_inode(sc->ip)) {
@@ -1525,8 +1685,8 @@ xrep_inode_blockcounts(
/* Set attr fork counters from the attr fork mappings. */
ifp = xfs_ifork_ptr(sc->ip, XFS_ATTR_FORK);
if (ifp) {
- error = xfs_bmap_count_blocks(sc->tp, sc->ip, XFS_ATTR_FORK,
- &nextents, &acount);
+ error = xchk_inode_count_blocks(sc, XFS_ATTR_FORK, &nextents,
+ &acount);
if (error)
return error;
if (count >= sc->mp->m_sb.sb_dblocks)
@@ -1664,10 +1824,6 @@ xrep_inode_flags(
/* DAX only applies to files and dirs. */
if (!(S_ISREG(mode) || S_ISDIR(mode)))
sc->ip->i_diflags2 &= ~XFS_DIFLAG2_DAX;
-
- /* No reflink files on the realtime device. */
- if (sc->ip->i_diflags & XFS_DIFLAG_REALTIME)
- sc->ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
}
/*
@@ -1783,6 +1939,20 @@ xrep_inode_pptr(
sizeof(struct xfs_attr_sf_hdr), true);
}
+/* Fix COW extent size hint problems. */
+STATIC void
+xrep_inode_cowextsize(
+ struct xfs_scrub *sc)
+{
+ /* Fix misaligned CoW extent size hints on a directory. */
+ if ((sc->ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
+ (sc->ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) &&
+ sc->ip->i_extsize % sc->mp->m_sb.sb_rextsize > 0) {
+ sc->ip->i_cowextsize = 0;
+ sc->ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE;
+ }
+}
+
/* Fix any irregularities in an inode that the verifiers don't catch. */
STATIC int
xrep_inode_problems(
@@ -1806,6 +1976,7 @@ xrep_inode_problems(
if (S_ISDIR(VFS_I(sc->ip)->i_mode))
xrep_inode_dir_size(sc);
xrep_inode_extsize(sc);
+ xrep_inode_cowextsize(sc);
trace_xrep_inode_fixed(sc);
xfs_trans_log_inode(sc->tp, sc->ip, XFS_ILOG_CORE);
diff --git a/fs/xfs/scrub/metapath.c b/fs/xfs/scrub/metapath.c
index c678cba1ffc3..e21c16fbd15d 100644
--- a/fs/xfs/scrub/metapath.c
+++ b/fs/xfs/scrub/metapath.c
@@ -21,6 +21,8 @@
#include "xfs_trans_space.h"
#include "xfs_attr.h"
#include "xfs_rtgroup.h"
+#include "xfs_rtrmap_btree.h"
+#include "xfs_rtrefcount_btree.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
@@ -246,6 +248,10 @@ xchk_setup_metapath(
return xchk_setup_metapath_dqinode(sc, XFS_DQTYPE_GROUP);
case XFS_SCRUB_METAPATH_PRJQUOTA:
return xchk_setup_metapath_dqinode(sc, XFS_DQTYPE_PROJ);
+ case XFS_SCRUB_METAPATH_RTRMAPBT:
+ return xchk_setup_metapath_rtginode(sc, XFS_RTGI_RMAP);
+ case XFS_SCRUB_METAPATH_RTREFCOUNTBT:
+ return xchk_setup_metapath_rtginode(sc, XFS_RTGI_REFCOUNT);
default:
return -ENOENT;
}
diff --git a/fs/xfs/scrub/newbt.c b/fs/xfs/scrub/newbt.c
index 70af27d98734..ac38f5843090 100644
--- a/fs/xfs/scrub/newbt.c
+++ b/fs/xfs/scrub/newbt.c
@@ -19,6 +19,8 @@
#include "xfs_rmap.h"
#include "xfs_ag.h"
#include "xfs_defer.h"
+#include "xfs_metafile.h"
+#include "xfs_quota.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
@@ -121,6 +123,43 @@ xrep_newbt_init_inode(
}
/*
+ * Initialize accounting resources for staging a new metadata inode btree.
+ * If the metadata file has a space reservation, the caller must adjust that
+ * reservation when committing the new ondisk btree.
+ */
+int
+xrep_newbt_init_metadir_inode(
+ struct xrep_newbt *xnr,
+ struct xfs_scrub *sc)
+{
+ struct xfs_owner_info oinfo;
+ struct xfs_ifork *ifp;
+
+ ASSERT(xfs_is_metadir_inode(sc->ip));
+
+ xfs_rmap_ino_bmbt_owner(&oinfo, sc->ip->i_ino, XFS_DATA_FORK);
+
+ ifp = kmem_cache_zalloc(xfs_ifork_cache, XCHK_GFP_FLAGS);
+ if (!ifp)
+ return -ENOMEM;
+
+ /*
+ * Allocate new metadir btree blocks with XFS_AG_RESV_NONE because the
+ * inode metadata space reservations can only account allocated space
+ * to the i_nblocks. We do not want to change the inode core fields
+ * until we're ready to commit the new tree, so we allocate the blocks
+ * as if they were regular file blocks. This exposes us to a higher
+ * risk of the repair being cancelled due to ENOSPC.
+ */
+ xrep_newbt_init_ag(xnr, sc, &oinfo,
+ XFS_INO_TO_FSB(sc->mp, sc->ip->i_ino),
+ XFS_AG_RESV_NONE);
+ xnr->ifake.if_fork = ifp;
+ xnr->ifake.if_fork_size = xfs_inode_fork_size(sc->ip, XFS_DATA_FORK);
+ return 0;
+}
+
+/*
* Initialize accounting resources for staging a new btree. Callers are
* expected to add their own reservations (and clean them up) manually.
*/
@@ -224,6 +263,7 @@ xrep_newbt_alloc_ag_blocks(
int error = 0;
ASSERT(sc->sa.pag != NULL);
+ ASSERT(xnr->resv != XFS_AG_RESV_METAFILE);
while (nr_blocks > 0) {
struct xfs_alloc_arg args = {
@@ -297,6 +337,8 @@ xrep_newbt_alloc_file_blocks(
struct xfs_mount *mp = sc->mp;
int error = 0;
+ ASSERT(xnr->resv != XFS_AG_RESV_METAFILE);
+
while (nr_blocks > 0) {
struct xfs_alloc_arg args = {
.tp = sc->tp,
diff --git a/fs/xfs/scrub/newbt.h b/fs/xfs/scrub/newbt.h
index 3d804d31af24..5ce785599287 100644
--- a/fs/xfs/scrub/newbt.h
+++ b/fs/xfs/scrub/newbt.h
@@ -63,6 +63,7 @@ void xrep_newbt_init_ag(struct xrep_newbt *xnr, struct xfs_scrub *sc,
enum xfs_ag_resv_type resv);
int xrep_newbt_init_inode(struct xrep_newbt *xnr, struct xfs_scrub *sc,
int whichfork, const struct xfs_owner_info *oinfo);
+int xrep_newbt_init_metadir_inode(struct xrep_newbt *xnr, struct xfs_scrub *sc);
int xrep_newbt_alloc_blocks(struct xrep_newbt *xnr, uint64_t nr_blocks);
int xrep_newbt_add_extent(struct xrep_newbt *xnr, struct xfs_perag *pag,
xfs_agblock_t agbno, xfs_extlen_t len);
diff --git a/fs/xfs/scrub/quota.c b/fs/xfs/scrub/quota.c
index 183d531875ea..58d6d4ed2853 100644
--- a/fs/xfs/scrub/quota.c
+++ b/fs/xfs/scrub/quota.c
@@ -212,12 +212,18 @@ xchk_quota_item(
if (mp->m_sb.sb_dblocks < dq->q_blk.count)
xchk_fblock_set_warning(sc, XFS_DATA_FORK,
offset);
+ if (mp->m_sb.sb_rblocks < dq->q_rtb.count)
+ xchk_fblock_set_warning(sc, XFS_DATA_FORK,
+ offset);
} else {
if (mp->m_sb.sb_dblocks < dq->q_blk.count)
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
offset);
+ if (mp->m_sb.sb_rblocks < dq->q_rtb.count)
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
+ offset);
}
- if (dq->q_ino.count > fs_icount || dq->q_rtb.count > mp->m_sb.sb_rblocks)
+ if (dq->q_ino.count > fs_icount)
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
/*
diff --git a/fs/xfs/scrub/quota_repair.c b/fs/xfs/scrub/quota_repair.c
index cd51f10f2920..8f4c8d41f308 100644
--- a/fs/xfs/scrub/quota_repair.c
+++ b/fs/xfs/scrub/quota_repair.c
@@ -233,7 +233,7 @@ xrep_quota_item(
rqi->need_quotacheck = true;
dirty = true;
}
- if (dq->q_rtb.count > mp->m_sb.sb_rblocks) {
+ if (!xfs_has_reflink(mp) && dq->q_rtb.count > mp->m_sb.sb_rblocks) {
dq->q_rtb.reserved -= dq->q_rtb.count;
dq->q_rtb.reserved += mp->m_sb.sb_rblocks;
dq->q_rtb.count = mp->m_sb.sb_rblocks;
diff --git a/fs/xfs/scrub/reap.c b/fs/xfs/scrub/reap.c
index 08230952053b..b32fb233cf84 100644
--- a/fs/xfs/scrub/reap.c
+++ b/fs/xfs/scrub/reap.c
@@ -33,6 +33,9 @@
#include "xfs_attr.h"
#include "xfs_attr_remote.h"
#include "xfs_defer.h"
+#include "xfs_metafile.h"
+#include "xfs_rtgroup.h"
+#include "xfs_rtrmap_btree.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
@@ -40,6 +43,7 @@
#include "scrub/bitmap.h"
#include "scrub/agb_bitmap.h"
#include "scrub/fsb_bitmap.h"
+#include "scrub/rtb_bitmap.h"
#include "scrub/reap.h"
/*
@@ -310,7 +314,7 @@ xreap_agextent_binval(
}
out:
- trace_xreap_agextent_binval(sc->sa.pag, agbno, *aglenp);
+ trace_xreap_agextent_binval(pag_group(sc->sa.pag), agbno, *aglenp);
}
/*
@@ -369,7 +373,8 @@ xreap_agextent_select(
out_found:
*aglenp = len;
- trace_xreap_agextent_select(sc->sa.pag, agbno, len, *crosslinked);
+ trace_xreap_agextent_select(pag_group(sc->sa.pag), agbno, len,
+ *crosslinked);
out_cur:
xfs_btree_del_cursor(cur, error);
return error;
@@ -390,6 +395,8 @@ xreap_agextent_iter(
xfs_fsblock_t fsbno;
int error = 0;
+ ASSERT(rs->resv != XFS_AG_RESV_METAFILE);
+
fsbno = xfs_agbno_to_fsb(sc->sa.pag, agbno);
/*
@@ -406,7 +413,8 @@ xreap_agextent_iter(
* to run xfs_repair.
*/
if (crosslinked) {
- trace_xreap_dispose_unmap_extent(sc->sa.pag, agbno, *aglenp);
+ trace_xreap_dispose_unmap_extent(pag_group(sc->sa.pag), agbno,
+ *aglenp);
rs->force_roll = true;
@@ -416,7 +424,8 @@ xreap_agextent_iter(
* records from the refcountbt, which will remove the
* rmap record as well.
*/
- xfs_refcount_free_cow_extent(sc->tp, fsbno, *aglenp);
+ xfs_refcount_free_cow_extent(sc->tp, false, fsbno,
+ *aglenp);
return 0;
}
@@ -424,7 +433,7 @@ xreap_agextent_iter(
*aglenp, rs->oinfo);
}
- trace_xreap_dispose_free_extent(sc->sa.pag, agbno, *aglenp);
+ trace_xreap_dispose_free_extent(pag_group(sc->sa.pag), agbno, *aglenp);
/*
* Invalidate as many buffers as we can, starting at agbno. If this
@@ -448,7 +457,7 @@ xreap_agextent_iter(
if (rs->oinfo == &XFS_RMAP_OINFO_COW) {
ASSERT(rs->resv == XFS_AG_RESV_NONE);
- xfs_refcount_free_cow_extent(sc->tp, fsbno, *aglenp);
+ xfs_refcount_free_cow_extent(sc->tp, false, fsbno, *aglenp);
error = xfs_free_extent_later(sc->tp, fsbno, *aglenp, NULL,
rs->resv, XFS_FREE_EXTENT_SKIP_DISCARD);
if (error)
@@ -675,6 +684,263 @@ xrep_reap_fsblocks(
return 0;
}
+#ifdef CONFIG_XFS_RT
+/*
+ * Figure out the longest run of blocks that we can dispose of with a single
+ * call. Cross-linked blocks should have their reverse mappings removed, but
+ * single-owner extents can be freed. Units are rt blocks, not rt extents.
+ */
+STATIC int
+xreap_rgextent_select(
+ struct xreap_state *rs,
+ xfs_rgblock_t rgbno,
+ xfs_rgblock_t rgbno_next,
+ bool *crosslinked,
+ xfs_extlen_t *rglenp)
+{
+ struct xfs_scrub *sc = rs->sc;
+ struct xfs_btree_cur *cur;
+ xfs_rgblock_t bno = rgbno + 1;
+ xfs_extlen_t len = 1;
+ int error;
+
+ /*
+ * Determine if there are any other rmap records covering the first
+ * block of this extent. If so, the block is crosslinked.
+ */
+ cur = xfs_rtrmapbt_init_cursor(sc->tp, sc->sr.rtg);
+ error = xfs_rmap_has_other_keys(cur, rgbno, 1, rs->oinfo,
+ crosslinked);
+ if (error)
+ goto out_cur;
+
+ /*
+ * Figure out how many of the subsequent blocks have the same crosslink
+ * status.
+ */
+ while (bno < rgbno_next) {
+ bool also_crosslinked;
+
+ error = xfs_rmap_has_other_keys(cur, bno, 1, rs->oinfo,
+ &also_crosslinked);
+ if (error)
+ goto out_cur;
+
+ if (*crosslinked != also_crosslinked)
+ break;
+
+ len++;
+ bno++;
+ }
+
+ *rglenp = len;
+ trace_xreap_agextent_select(rtg_group(sc->sr.rtg), rgbno, len,
+ *crosslinked);
+out_cur:
+ xfs_btree_del_cursor(cur, error);
+ return error;
+}
+
+/*
+ * Dispose of as much of the beginning of this rtgroup extent as possible.
+ * The number of blocks disposed of will be returned in @rglenp.
+ */
+STATIC int
+xreap_rgextent_iter(
+ struct xreap_state *rs,
+ xfs_rgblock_t rgbno,
+ xfs_extlen_t *rglenp,
+ bool crosslinked)
+{
+ struct xfs_scrub *sc = rs->sc;
+ xfs_rtblock_t rtbno;
+ int error;
+
+ /*
+ * The only caller so far is CoW fork repair, so we only know how to
+ * unlink or free CoW staging extents. Here we don't have to worry
+ * about invalidating buffers!
+ */
+ if (rs->oinfo != &XFS_RMAP_OINFO_COW) {
+ ASSERT(rs->oinfo == &XFS_RMAP_OINFO_COW);
+ return -EFSCORRUPTED;
+ }
+ ASSERT(rs->resv == XFS_AG_RESV_NONE);
+
+ rtbno = xfs_rgbno_to_rtb(sc->sr.rtg, rgbno);
+
+ /*
+ * If there are other rmappings, this block is cross linked and must
+ * not be freed. Remove the forward and reverse mapping and move on.
+ */
+ if (crosslinked) {
+ trace_xreap_dispose_unmap_extent(rtg_group(sc->sr.rtg), rgbno,
+ *rglenp);
+
+ xfs_refcount_free_cow_extent(sc->tp, true, rtbno, *rglenp);
+ rs->deferred++;
+ return 0;
+ }
+
+ trace_xreap_dispose_free_extent(rtg_group(sc->sr.rtg), rgbno, *rglenp);
+
+ /*
+ * The CoW staging extent is not crosslinked. Use deferred work items
+ * to remove the refcountbt records (which removes the rmap records)
+ * and free the extent. We're not worried about the system going down
+ * here because log recovery walks the refcount btree to clean out the
+ * CoW staging extents.
+ */
+ xfs_refcount_free_cow_extent(sc->tp, true, rtbno, *rglenp);
+ error = xfs_free_extent_later(sc->tp, rtbno, *rglenp, NULL,
+ rs->resv,
+ XFS_FREE_EXTENT_REALTIME |
+ XFS_FREE_EXTENT_SKIP_DISCARD);
+ if (error)
+ return error;
+
+ rs->deferred++;
+ return 0;
+}
+
+#define XREAP_RTGLOCK_ALL (XFS_RTGLOCK_BITMAP | \
+ XFS_RTGLOCK_RMAP | \
+ XFS_RTGLOCK_REFCOUNT)
+
+/*
+ * Break a rt file metadata extent into sub-extents by fate (crosslinked, not
+ * crosslinked), and dispose of each sub-extent separately. The extent must
+ * be aligned to a realtime extent.
+ */
+STATIC int
+xreap_rtmeta_extent(
+ uint64_t rtbno,
+ uint64_t len,
+ void *priv)
+{
+ struct xreap_state *rs = priv;
+ struct xfs_scrub *sc = rs->sc;
+ xfs_rgblock_t rgbno = xfs_rtb_to_rgbno(sc->mp, rtbno);
+ xfs_rgblock_t rgbno_next = rgbno + len;
+ int error = 0;
+
+ ASSERT(sc->ip != NULL);
+ ASSERT(!sc->sr.rtg);
+
+ /*
+ * We're reaping blocks after repairing file metadata, which means that
+ * we have to init the xchk_ag structure ourselves.
+ */
+ sc->sr.rtg = xfs_rtgroup_get(sc->mp, xfs_rtb_to_rgno(sc->mp, rtbno));
+ if (!sc->sr.rtg)
+ return -EFSCORRUPTED;
+
+ xfs_rtgroup_lock(sc->sr.rtg, XREAP_RTGLOCK_ALL);
+
+ while (rgbno < rgbno_next) {
+ xfs_extlen_t rglen;
+ bool crosslinked;
+
+ error = xreap_rgextent_select(rs, rgbno, rgbno_next,
+ &crosslinked, &rglen);
+ if (error)
+ goto out_unlock;
+
+ error = xreap_rgextent_iter(rs, rgbno, &rglen, crosslinked);
+ if (error)
+ goto out_unlock;
+
+ if (xreap_want_defer_finish(rs)) {
+ error = xfs_defer_finish(&sc->tp);
+ if (error)
+ goto out_unlock;
+ xreap_defer_finish_reset(rs);
+ } else if (xreap_want_roll(rs)) {
+ error = xfs_trans_roll_inode(&sc->tp, sc->ip);
+ if (error)
+ goto out_unlock;
+ xreap_reset(rs);
+ }
+
+ rgbno += rglen;
+ }
+
+out_unlock:
+ xfs_rtgroup_unlock(sc->sr.rtg, XREAP_RTGLOCK_ALL);
+ xfs_rtgroup_put(sc->sr.rtg);
+ sc->sr.rtg = NULL;
+ return error;
+}
+
+/*
+ * Dispose of every block of every rt metadata extent in the bitmap.
+ * Do not use this to dispose of the mappings in an ondisk inode fork.
+ */
+int
+xrep_reap_rtblocks(
+ struct xfs_scrub *sc,
+ struct xrtb_bitmap *bitmap,
+ const struct xfs_owner_info *oinfo)
+{
+ struct xreap_state rs = {
+ .sc = sc,
+ .oinfo = oinfo,
+ .resv = XFS_AG_RESV_NONE,
+ };
+ int error;
+
+ ASSERT(xfs_has_rmapbt(sc->mp));
+ ASSERT(sc->ip != NULL);
+
+ error = xrtb_bitmap_walk(bitmap, xreap_rtmeta_extent, &rs);
+ if (error)
+ return error;
+
+ if (xreap_dirty(&rs))
+ return xrep_defer_finish(sc);
+
+ return 0;
+}
+#endif /* CONFIG_XFS_RT */
+
+/*
+ * Dispose of every block of an old metadata btree that used to be rooted in a
+ * metadata directory file.
+ */
+int
+xrep_reap_metadir_fsblocks(
+ struct xfs_scrub *sc,
+ struct xfsb_bitmap *bitmap)
+{
+ /*
+ * Reap old metadir btree blocks with XFS_AG_RESV_NONE because the old
+ * blocks are no longer mapped by the inode, and inode metadata space
+ * reservations can only account freed space to the i_nblocks.
+ */
+ struct xfs_owner_info oinfo;
+ struct xreap_state rs = {
+ .sc = sc,
+ .oinfo = &oinfo,
+ .resv = XFS_AG_RESV_NONE,
+ };
+ int error;
+
+ ASSERT(xfs_has_rmapbt(sc->mp));
+ ASSERT(sc->ip != NULL);
+ ASSERT(xfs_is_metadir_inode(sc->ip));
+
+ xfs_rmap_ino_bmbt_owner(&oinfo, sc->ip->i_ino, XFS_DATA_FORK);
+
+ error = xfsb_bitmap_walk(bitmap, xreap_fsmeta_extent, &rs);
+ if (error)
+ return error;
+
+ if (xreap_dirty(&rs))
+ return xrep_defer_finish(sc);
+
+ return 0;
+}
+
/*
* Metadata files are not supposed to share blocks with anything else.
* If blocks are shared, we remove the reverse mapping (thus reducing the
@@ -729,7 +995,8 @@ xreap_bmapi_select(
}
imap->br_blockcount = len;
- trace_xreap_bmapi_select(sc->sa.pag, agbno, len, *crosslinked);
+ trace_xreap_bmapi_select(pag_group(sc->sa.pag), agbno, len,
+ *crosslinked);
out_cur:
xfs_btree_del_cursor(cur, error);
return error;
@@ -868,7 +1135,8 @@ xreap_bmapi_binval(
}
out:
- trace_xreap_bmapi_binval(sc->sa.pag, agbno, imap->br_blockcount);
+ trace_xreap_bmapi_binval(pag_group(sc->sa.pag), agbno,
+ imap->br_blockcount);
return 0;
}
@@ -895,7 +1163,7 @@ xrep_reap_bmapi_iter(
* anybody else who thinks they own the block, even though that
* runs the risk of stale buffer warnings in the future.
*/
- trace_xreap_dispose_unmap_extent(sc->sa.pag,
+ trace_xreap_dispose_unmap_extent(pag_group(sc->sa.pag),
XFS_FSB_TO_AGBNO(sc->mp, imap->br_startblock),
imap->br_blockcount);
@@ -918,7 +1186,7 @@ xrep_reap_bmapi_iter(
* by a block starting before the first block of the extent but overlap
* anyway.
*/
- trace_xreap_dispose_free_extent(sc->sa.pag,
+ trace_xreap_dispose_free_extent(pag_group(sc->sa.pag),
XFS_FSB_TO_AGBNO(sc->mp, imap->br_startblock),
imap->br_blockcount);
diff --git a/fs/xfs/scrub/reap.h b/fs/xfs/scrub/reap.h
index 3f2f1775e29d..4c8f62701fb3 100644
--- a/fs/xfs/scrub/reap.h
+++ b/fs/xfs/scrub/reap.h
@@ -14,6 +14,15 @@ int xrep_reap_agblocks(struct xfs_scrub *sc, struct xagb_bitmap *bitmap,
int xrep_reap_fsblocks(struct xfs_scrub *sc, struct xfsb_bitmap *bitmap,
const struct xfs_owner_info *oinfo);
int xrep_reap_ifork(struct xfs_scrub *sc, struct xfs_inode *ip, int whichfork);
+int xrep_reap_metadir_fsblocks(struct xfs_scrub *sc,
+ struct xfsb_bitmap *bitmap);
+
+#ifdef CONFIG_XFS_RT
+int xrep_reap_rtblocks(struct xfs_scrub *sc, struct xrtb_bitmap *bitmap,
+ const struct xfs_owner_info *oinfo);
+#else
+# define xrep_reap_rtblocks(...) (-EOPNOTSUPP)
+#endif /* CONFIG_XFS_RT */
/* Buffer cache scan context. */
struct xrep_bufscan {
diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c
index 1c5e45cc6419..d46528023015 100644
--- a/fs/xfs/scrub/refcount.c
+++ b/fs/xfs/scrub/refcount.c
@@ -421,7 +421,7 @@ xchk_refcount_mergeable(
if (r1->rc_refcount != r2->rc_refcount)
return false;
if ((unsigned long long)r1->rc_blockcount + r2->rc_blockcount >
- MAXREFCEXTLEN)
+ XFS_REFC_LEN_MAX)
return false;
return true;
diff --git a/fs/xfs/scrub/refcount_repair.c b/fs/xfs/scrub/refcount_repair.c
index 4e572b81c986..9c8cb5332da0 100644
--- a/fs/xfs/scrub/refcount_repair.c
+++ b/fs/xfs/scrub/refcount_repair.c
@@ -183,13 +183,13 @@ xrep_refc_stash(
if (xchk_should_terminate(sc, &error))
return error;
- irec.rc_refcount = min_t(uint64_t, MAXREFCOUNT, refcount);
+ irec.rc_refcount = min_t(uint64_t, XFS_REFC_REFCOUNT_MAX, refcount);
error = xrep_refc_check_ext(rr->sc, &irec);
if (error)
return error;
- trace_xrep_refc_found(sc->sa.pag, &irec);
+ trace_xrep_refc_found(pag_group(sc->sa.pag), &irec);
return xfarray_append(rr->refcount_records, &irec);
}
@@ -422,7 +422,7 @@ xrep_refc_find_refcounts(
/*
* Set up a bag to store all the rmap records that we're tracking to
* generate a reference count record. If the size of the bag exceeds
- * MAXREFCOUNT, we clamp rc_refcount.
+ * XFS_REFC_REFCOUNT_MAX, we clamp rc_refcount.
*/
error = rcbag_init(sc->mp, sc->xmbtp, &rcstack);
if (error)
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
index 91c8bc055a4f..3b5288d3ef4e 100644
--- a/fs/xfs/scrub/repair.c
+++ b/fs/xfs/scrub/repair.c
@@ -37,6 +37,12 @@
#include "xfs_da_btree.h"
#include "xfs_attr.h"
#include "xfs_dir2.h"
+#include "xfs_rtrmap_btree.h"
+#include "xfs_rtbitmap.h"
+#include "xfs_rtgroup.h"
+#include "xfs_rtalloc.h"
+#include "xfs_metafile.h"
+#include "xfs_rtrefcount_btree.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
@@ -62,6 +68,7 @@ xrep_attempt(
trace_xrep_attempt(XFS_I(file_inode(sc->file)), sc->sm, error);
xchk_ag_btcur_free(&sc->sa);
+ xchk_rtgroup_btcur_free(&sc->sr);
/* Repair whatever's broken. */
ASSERT(sc->ops->repair);
@@ -378,6 +385,41 @@ xrep_calc_ag_resblks(
return max(max(bnobt_sz, inobt_sz), max(rmapbt_sz, refcbt_sz));
}
+#ifdef CONFIG_XFS_RT
+/*
+ * Figure out how many blocks to reserve for a rtgroup repair. We calculate
+ * the worst case estimate for the number of blocks we'd need to rebuild one of
+ * any type of per-rtgroup btree.
+ */
+xfs_extlen_t
+xrep_calc_rtgroup_resblks(
+ struct xfs_scrub *sc)
+{
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_scrub_metadata *sm = sc->sm;
+ uint64_t usedlen;
+ xfs_extlen_t rmapbt_sz = 0;
+
+ if (!(sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR))
+ return 0;
+ if (!xfs_has_rtgroups(mp)) {
+ ASSERT(0);
+ return -EFSCORRUPTED;
+ }
+
+ usedlen = xfs_rtbxlen_to_blen(mp, xfs_rtgroup_extents(mp, sm->sm_agno));
+ ASSERT(usedlen <= XFS_MAX_RGBLOCKS);
+
+ if (xfs_has_rmapbt(mp))
+ rmapbt_sz = xfs_rtrmapbt_calc_size(mp, usedlen);
+
+ trace_xrep_calc_rtgroup_resblks_btsize(mp, sm->sm_agno, usedlen,
+ rmapbt_sz);
+
+ return rmapbt_sz;
+}
+#endif /* CONFIG_XFS_RT */
+
/*
* Reconstructing per-AG Btrees
*
@@ -954,6 +996,27 @@ xrep_ag_init(
}
#ifdef CONFIG_XFS_RT
+/* Initialize all the btree cursors for a RT repair. */
+void
+xrep_rtgroup_btcur_init(
+ struct xfs_scrub *sc,
+ struct xchk_rt *sr)
+{
+ struct xfs_mount *mp = sc->mp;
+
+ ASSERT(sr->rtg != NULL);
+
+ if (sc->sm->sm_type != XFS_SCRUB_TYPE_RTRMAPBT &&
+ (sr->rtlock_flags & XFS_RTGLOCK_RMAP) &&
+ xfs_has_rtrmapbt(mp))
+ sr->rmap_cur = xfs_rtrmapbt_init_cursor(sc->tp, sr->rtg);
+
+ if (sc->sm->sm_type != XFS_SCRUB_TYPE_RTREFCBT &&
+ (sr->rtlock_flags & XFS_RTGLOCK_REFCOUNT) &&
+ xfs_has_rtreflink(mp))
+ sr->refc_cur = xfs_rtrefcountbt_init_cursor(sc->tp, sr->rtg);
+}
+
/*
* Given a reference to a rtgroup structure, lock rtgroup btree inodes and
* create btree cursors. Must only be called to repair a regular rt file.
@@ -972,6 +1035,33 @@ xrep_rtgroup_init(
/* Grab our own passive reference from the caller's ref. */
sr->rtg = xfs_rtgroup_hold(rtg);
+ xrep_rtgroup_btcur_init(sc, sr);
+ return 0;
+}
+
+/* Ensure that all rt blocks in the given range are not marked free. */
+int
+xrep_require_rtext_inuse(
+ struct xfs_scrub *sc,
+ xfs_rgblock_t rgbno,
+ xfs_filblks_t len)
+{
+ struct xfs_mount *mp = sc->mp;
+ xfs_rtxnum_t startrtx;
+ xfs_rtxnum_t endrtx;
+ bool is_free = false;
+ int error;
+
+ startrtx = xfs_rgbno_to_rtx(mp, rgbno);
+ endrtx = xfs_rgbno_to_rtx(mp, rgbno + len - 1);
+
+ error = xfs_rtalloc_extent_is_free(sc->sr.rtg, sc->tp, startrtx,
+ endrtx - startrtx + 1, &is_free);
+ if (error)
+ return error;
+ if (is_free)
+ return -EFSCORRUPTED;
+
return 0;
}
#endif /* CONFIG_XFS_RT */
@@ -1237,3 +1327,110 @@ xrep_buf_verify_struct(
return fa == NULL;
}
+
+/* Check the sanity of a rmap record for a metadata btree inode. */
+int
+xrep_check_ino_btree_mapping(
+ struct xfs_scrub *sc,
+ const struct xfs_rmap_irec *rec)
+{
+ enum xbtree_recpacking outcome;
+ int error;
+
+ /*
+ * Metadata btree inodes never have extended attributes, and all blocks
+ * should have the bmbt block flag set.
+ */
+ if ((rec->rm_flags & XFS_RMAP_ATTR_FORK) ||
+ !(rec->rm_flags & XFS_RMAP_BMBT_BLOCK))
+ return -EFSCORRUPTED;
+
+ /* Make sure the block is within the AG. */
+ if (!xfs_verify_agbext(sc->sa.pag, rec->rm_startblock,
+ rec->rm_blockcount))
+ return -EFSCORRUPTED;
+
+ /* Make sure this isn't free space. */
+ error = xfs_alloc_has_records(sc->sa.bno_cur, rec->rm_startblock,
+ rec->rm_blockcount, &outcome);
+ if (error)
+ return error;
+ if (outcome != XBTREE_RECPACKING_EMPTY)
+ return -EFSCORRUPTED;
+
+ return 0;
+}
+
+/*
+ * Reset the block count of the inode being repaired, and adjust the dquot
+ * block usage to match. The inode must not have an xattr fork.
+ */
+void
+xrep_inode_set_nblocks(
+ struct xfs_scrub *sc,
+ int64_t new_blocks)
+{
+ int64_t delta =
+ new_blocks - sc->ip->i_nblocks;
+
+ sc->ip->i_nblocks = new_blocks;
+
+ xfs_trans_log_inode(sc->tp, sc->ip, XFS_ILOG_CORE);
+ if (delta != 0)
+ xfs_trans_mod_dquot_byino(sc->tp, sc->ip, XFS_TRANS_DQ_BCOUNT,
+ delta);
+}
+
+/* Reset the block reservation for a metadata inode. */
+int
+xrep_reset_metafile_resv(
+ struct xfs_scrub *sc)
+{
+ struct xfs_inode *ip = sc->ip;
+ int64_t delta;
+ int error;
+
+ delta = ip->i_nblocks + ip->i_delayed_blks - ip->i_meta_resv_asked;
+ if (delta == 0)
+ return 0;
+
+ /*
+ * Too many blocks have been reserved, transfer some from the incore
+ * reservation back to the filesystem.
+ */
+ if (delta > 0) {
+ int64_t give_back;
+
+ give_back = min_t(uint64_t, delta, ip->i_delayed_blks);
+ if (give_back > 0) {
+ xfs_mod_delalloc(ip, 0, -give_back);
+ xfs_add_fdblocks(ip->i_mount, give_back);
+ ip->i_delayed_blks -= give_back;
+ }
+
+ return 0;
+ }
+
+ /*
+ * Not enough reservation; try to take some blocks from the filesystem
+ * to the metadata inode. @delta is negative here, so invert the sign.
+ */
+ delta = -delta;
+ error = xfs_dec_fdblocks(sc->mp, delta, true);
+ while (error == -ENOSPC) {
+ delta--;
+ if (delta == 0) {
+ xfs_warn(sc->mp,
+"Insufficient free space to reset space reservation for inode 0x%llx after repair.",
+ ip->i_ino);
+ return 0;
+ }
+ error = xfs_dec_fdblocks(sc->mp, delta, true);
+ }
+ if (error)
+ return error;
+
+ xfs_mod_delalloc(ip, 0, delta);
+ ip->i_delayed_blks += delta;
+ return 0;
+}
diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h
index b649da1a93eb..823c00d1a502 100644
--- a/fs/xfs/scrub/repair.h
+++ b/fs/xfs/scrub/repair.h
@@ -50,7 +50,9 @@ xrep_trans_commit(
struct xbitmap;
struct xagb_bitmap;
+struct xrgb_bitmap;
struct xfsb_bitmap;
+struct xrtb_bitmap;
int xrep_fix_freelist(struct xfs_scrub *sc, int alloc_flags);
@@ -97,6 +99,8 @@ int xrep_setup_parent(struct xfs_scrub *sc);
int xrep_setup_nlinks(struct xfs_scrub *sc);
int xrep_setup_symlink(struct xfs_scrub *sc, unsigned int *resblks);
int xrep_setup_dirtree(struct xfs_scrub *sc);
+int xrep_setup_rtrmapbt(struct xfs_scrub *sc);
+int xrep_setup_rtrefcountbt(struct xfs_scrub *sc);
/* Repair setup functions */
int xrep_setup_ag_allocbt(struct xfs_scrub *sc);
@@ -110,10 +114,18 @@ int xrep_ag_init(struct xfs_scrub *sc, struct xfs_perag *pag,
#ifdef CONFIG_XFS_RT
int xrep_rtgroup_init(struct xfs_scrub *sc, struct xfs_rtgroup *rtg,
struct xchk_rt *sr, unsigned int rtglock_flags);
+void xrep_rtgroup_btcur_init(struct xfs_scrub *sc, struct xchk_rt *sr);
+int xrep_require_rtext_inuse(struct xfs_scrub *sc, xfs_rgblock_t rgbno,
+ xfs_filblks_t len);
+xfs_extlen_t xrep_calc_rtgroup_resblks(struct xfs_scrub *sc);
#else
# define xrep_rtgroup_init(sc, rtg, sr, lockflags) (-ENOSYS)
+# define xrep_calc_rtgroup_resblks(sc) (0)
#endif /* CONFIG_XFS_RT */
+int xrep_check_ino_btree_mapping(struct xfs_scrub *sc,
+ const struct xfs_rmap_irec *rec);
+
/* Metadata revalidators */
int xrep_revalidate_allocbt(struct xfs_scrub *sc);
@@ -147,10 +159,14 @@ int xrep_metapath(struct xfs_scrub *sc);
int xrep_rtbitmap(struct xfs_scrub *sc);
int xrep_rtsummary(struct xfs_scrub *sc);
int xrep_rgsuperblock(struct xfs_scrub *sc);
+int xrep_rtrmapbt(struct xfs_scrub *sc);
+int xrep_rtrefcountbt(struct xfs_scrub *sc);
#else
# define xrep_rtbitmap xrep_notsupported
# define xrep_rtsummary xrep_notsupported
# define xrep_rgsuperblock xrep_notsupported
+# define xrep_rtrmapbt xrep_notsupported
+# define xrep_rtrefcountbt xrep_notsupported
#endif /* CONFIG_XFS_RT */
#ifdef CONFIG_XFS_QUOTA
@@ -169,6 +185,8 @@ int xrep_trans_alloc_hook_dummy(struct xfs_mount *mp, void **cookiep,
void xrep_trans_cancel_hook_dummy(void **cookiep, struct xfs_trans *tp);
bool xrep_buf_verify_struct(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
+void xrep_inode_set_nblocks(struct xfs_scrub *sc, int64_t new_blocks);
+int xrep_reset_metafile_resv(struct xfs_scrub *sc);
#else
@@ -192,6 +210,8 @@ xrep_calc_ag_resblks(
return 0;
}
+#define xrep_calc_rtgroup_resblks xrep_calc_ag_resblks
+
static inline int
xrep_reset_perag_resv(
struct xfs_scrub *sc)
@@ -219,6 +239,8 @@ xrep_setup_nothing(
#define xrep_setup_nlinks xrep_setup_nothing
#define xrep_setup_dirtree xrep_setup_nothing
#define xrep_setup_metapath xrep_setup_nothing
+#define xrep_setup_rtrmapbt xrep_setup_nothing
+#define xrep_setup_rtrefcountbt xrep_setup_nothing
#define xrep_setup_inode(sc, imap) ((void)0)
@@ -256,6 +278,8 @@ static inline int xrep_setup_symlink(struct xfs_scrub *sc, unsigned int *x)
#define xrep_dirtree xrep_notsupported
#define xrep_metapath xrep_notsupported
#define xrep_rgsuperblock xrep_notsupported
+#define xrep_rtrmapbt xrep_notsupported
+#define xrep_rtrefcountbt xrep_notsupported
#endif /* CONFIG_XFS_ONLINE_REPAIR */
diff --git a/fs/xfs/scrub/rgb_bitmap.h b/fs/xfs/scrub/rgb_bitmap.h
new file mode 100644
index 000000000000..4c3126b66dcb
--- /dev/null
+++ b/fs/xfs/scrub/rgb_bitmap.h
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2020-2024 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#ifndef __XFS_SCRUB_RGB_BITMAP_H__
+#define __XFS_SCRUB_RGB_BITMAP_H__
+
+/* Bitmaps, but for type-checked for xfs_rgblock_t */
+
+struct xrgb_bitmap {
+ struct xbitmap32 rgbitmap;
+};
+
+static inline void xrgb_bitmap_init(struct xrgb_bitmap *bitmap)
+{
+ xbitmap32_init(&bitmap->rgbitmap);
+}
+
+static inline void xrgb_bitmap_destroy(struct xrgb_bitmap *bitmap)
+{
+ xbitmap32_destroy(&bitmap->rgbitmap);
+}
+
+static inline int xrgb_bitmap_set(struct xrgb_bitmap *bitmap,
+ xfs_rgblock_t start, xfs_extlen_t len)
+{
+ return xbitmap32_set(&bitmap->rgbitmap, start, len);
+}
+
+static inline int xrgb_bitmap_walk(struct xrgb_bitmap *bitmap,
+ xbitmap32_walk_fn fn, void *priv)
+{
+ return xbitmap32_walk(&bitmap->rgbitmap, fn, priv);
+}
+
+#endif /* __XFS_SCRUB_RGB_BITMAP_H__ */
diff --git a/fs/xfs/scrub/rgsuper.c b/fs/xfs/scrub/rgsuper.c
index 463b3573bb76..d189732d0e24 100644
--- a/fs/xfs/scrub/rgsuper.c
+++ b/fs/xfs/scrub/rgsuper.c
@@ -13,6 +13,7 @@
#include "xfs_log_format.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
+#include "xfs_rmap.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/repair.h"
@@ -34,6 +35,7 @@ xchk_rgsuperblock_xref(
return;
xchk_xref_is_used_rt_space(sc, xfs_rgbno_to_rtb(sc->sr.rtg, 0), 1);
+ xchk_xref_is_only_rt_owned_by(sc, 0, 1, &XFS_RMAP_OINFO_FS);
}
int
@@ -61,7 +63,9 @@ xchk_rgsuperblock(
if (!xchk_xref_process_error(sc, 0, 0, &error))
return error;
- xchk_rtgroup_lock(&sc->sr, XFS_RTGLOCK_BITMAP_SHARED);
+ error = xchk_rtgroup_lock(sc, &sc->sr, XFS_RTGLOCK_BITMAP_SHARED);
+ if (error)
+ return error;
/*
* Since we already validated the rt superblock at mount time, we don't
diff --git a/fs/xfs/scrub/rmap_repair.c b/fs/xfs/scrub/rmap_repair.c
index a0a227d183d2..f5f73078ffe2 100644
--- a/fs/xfs/scrub/rmap_repair.c
+++ b/fs/xfs/scrub/rmap_repair.c
@@ -31,6 +31,9 @@
#include "xfs_refcount.h"
#include "xfs_refcount_btree.h"
#include "xfs_ag.h"
+#include "xfs_rtrmap_btree.h"
+#include "xfs_rtgroup.h"
+#include "xfs_rtrefcount_btree.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
@@ -499,6 +502,69 @@ xrep_rmap_scan_iext(
return xrep_rmap_stash_accumulated(rf);
}
+static int
+xrep_rmap_scan_meta_btree(
+ struct xrep_rmap_ifork *rf,
+ struct xfs_inode *ip)
+{
+ struct xfs_scrub *sc = rf->rr->sc;
+ struct xfs_rtgroup *rtg = NULL;
+ struct xfs_btree_cur *cur = NULL;
+ enum xfs_rtg_inodes type;
+ int error;
+
+ if (rf->whichfork != XFS_DATA_FORK)
+ return -EFSCORRUPTED;
+
+ switch (ip->i_metatype) {
+ case XFS_METAFILE_RTRMAP:
+ type = XFS_RTGI_RMAP;
+ break;
+ case XFS_METAFILE_RTREFCOUNT:
+ type = XFS_RTGI_REFCOUNT;
+ break;
+ default:
+ ASSERT(0);
+ return -EFSCORRUPTED;
+ }
+
+ while ((rtg = xfs_rtgroup_next(sc->mp, rtg))) {
+ if (ip == rtg->rtg_inodes[type])
+ goto found;
+ }
+
+ /*
+ * We should never find an rt metadata btree inode that isn't
+ * associated with an rtgroup yet has ondisk blocks allocated to it.
+ */
+ if (ip->i_nblocks) {
+ ASSERT(0);
+ return -EFSCORRUPTED;
+ }
+
+ return 0;
+
+found:
+ switch (ip->i_metatype) {
+ case XFS_METAFILE_RTRMAP:
+ cur = xfs_rtrmapbt_init_cursor(sc->tp, rtg);
+ break;
+ case XFS_METAFILE_RTREFCOUNT:
+ cur = xfs_rtrefcountbt_init_cursor(sc->tp, rtg);
+ break;
+ default:
+ ASSERT(0);
+ error = -EFSCORRUPTED;
+ goto out_rtg;
+ }
+
+ error = xrep_rmap_scan_iroot_btree(rf, cur);
+ xfs_btree_del_cursor(cur, error);
+out_rtg:
+ xfs_rtgroup_rele(rtg);
+ return error;
+}
+
/* Find all the extents from a given AG in an inode fork. */
STATIC int
xrep_rmap_scan_ifork(
@@ -512,14 +578,14 @@ xrep_rmap_scan_ifork(
.whichfork = whichfork,
};
struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
+ bool mappings_done;
int error = 0;
if (!ifp)
return 0;
- if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
- bool mappings_done;
-
+ switch (ifp->if_format) {
+ case XFS_DINODE_FMT_BTREE:
/*
* Scan the bmap btree for data device mappings. This includes
* the btree blocks themselves, even if this is a realtime
@@ -528,15 +594,18 @@ xrep_rmap_scan_ifork(
error = xrep_rmap_scan_bmbt(&rf, ip, &mappings_done);
if (error || mappings_done)
return error;
- } else if (ifp->if_format != XFS_DINODE_FMT_EXTENTS) {
- return 0;
+ fallthrough;
+ case XFS_DINODE_FMT_EXTENTS:
+ /* Scan incore extent cache if this isn't a realtime file. */
+ if (xfs_ifork_is_realtime(ip, whichfork))
+ return 0;
+
+ return xrep_rmap_scan_iext(&rf, ifp);
+ case XFS_DINODE_FMT_META_BTREE:
+ return xrep_rmap_scan_meta_btree(&rf, ip);
}
- /* Scan incore extent cache if this isn't a realtime file. */
- if (xfs_ifork_is_realtime(ip, whichfork))
- return 0;
-
- return xrep_rmap_scan_iext(&rf, ifp);
+ return 0;
}
/*
@@ -1552,7 +1621,7 @@ xrep_rmapbt_live_update(
if (!xrep_rmapbt_want_live_update(&rr->iscan, &p->oinfo))
goto out_unlock;
- trace_xrep_rmap_live_update(rr->sc->sa.pag, action, p);
+ trace_xrep_rmap_live_update(pag_group(rr->sc->sa.pag), action, p);
error = xrep_trans_alloc_hook_dummy(mp, &txcookie, &tp);
if (error)
diff --git a/fs/xfs/scrub/rtb_bitmap.h b/fs/xfs/scrub/rtb_bitmap.h
new file mode 100644
index 000000000000..1313ef605511
--- /dev/null
+++ b/fs/xfs/scrub/rtb_bitmap.h
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2022-2024 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#ifndef __XFS_SCRUB_RTB_BITMAP_H__
+#define __XFS_SCRUB_RTB_BITMAP_H__
+
+/* Bitmaps, but for type-checked for xfs_rtblock_t */
+
+struct xrtb_bitmap {
+ struct xbitmap64 rtbitmap;
+};
+
+static inline void xrtb_bitmap_init(struct xrtb_bitmap *bitmap)
+{
+ xbitmap64_init(&bitmap->rtbitmap);
+}
+
+static inline void xrtb_bitmap_destroy(struct xrtb_bitmap *bitmap)
+{
+ xbitmap64_destroy(&bitmap->rtbitmap);
+}
+
+static inline int xrtb_bitmap_set(struct xrtb_bitmap *bitmap,
+ xfs_rtblock_t start, xfs_filblks_t len)
+{
+ return xbitmap64_set(&bitmap->rtbitmap, start, len);
+}
+
+static inline int xrtb_bitmap_walk(struct xrtb_bitmap *bitmap,
+ xbitmap64_walk_fn fn, void *priv)
+{
+ return xbitmap64_walk(&bitmap->rtbitmap, fn, priv);
+}
+
+#endif /* __XFS_SCRUB_RTB_BITMAP_H__ */
diff --git a/fs/xfs/scrub/rtbitmap.c b/fs/xfs/scrub/rtbitmap.c
index 376a36fd9a9c..e8c776a34c1d 100644
--- a/fs/xfs/scrub/rtbitmap.c
+++ b/fs/xfs/scrub/rtbitmap.c
@@ -9,17 +9,24 @@
#include "xfs_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
+#include "xfs_btree.h"
#include "xfs_log_format.h"
#include "xfs_trans.h"
#include "xfs_rtbitmap.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
#include "xfs_bit.h"
+#include "xfs_rtgroup.h"
#include "xfs_sb.h"
+#include "xfs_rmap.h"
+#include "xfs_rtrmap_btree.h"
+#include "xfs_exchmaps.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/repair.h"
+#include "scrub/tempexch.h"
#include "scrub/rtbitmap.h"
+#include "scrub/btree.h"
/* Set us up with the realtime metadata locked. */
int
@@ -30,10 +37,15 @@ xchk_setup_rtbitmap(
struct xchk_rtbitmap *rtb;
int error;
- rtb = kzalloc(sizeof(struct xchk_rtbitmap), XCHK_GFP_FLAGS);
+ if (xchk_need_intent_drain(sc))
+ xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
+
+ rtb = kzalloc(struct_size(rtb, words, xchk_rtbitmap_wordcnt(sc)),
+ XCHK_GFP_FLAGS);
if (!rtb)
return -ENOMEM;
sc->buf = rtb;
+ rtb->sc = sc;
error = xchk_rtgroup_init(sc, sc->sm->sm_agno, &sc->sr);
if (error)
@@ -49,8 +61,7 @@ xchk_setup_rtbitmap(
if (error)
return error;
- error = xchk_install_live_inode(sc,
- sc->sr.rtg->rtg_inodes[XFS_RTGI_BITMAP]);
+ error = xchk_install_live_inode(sc, rtg_bitmap(sc->sr.rtg));
if (error)
return error;
@@ -58,12 +69,15 @@ xchk_setup_rtbitmap(
if (error)
return error;
+ error = xchk_rtgroup_lock(sc, &sc->sr, XCHK_RTGLOCK_ALL);
+ if (error)
+ return error;
+
/*
* Now that we've locked the rtbitmap, we can't race with growfsrt
* trying to expand the bitmap or change the size of the rt volume.
* Hence it is safe to compute and check the geometry values.
*/
- xchk_rtgroup_lock(&sc->sr, XFS_RTGLOCK_BITMAP);
if (mp->m_sb.sb_rblocks) {
rtb->rextents = xfs_blen_to_rtbxlen(mp, mp->m_sb.sb_rblocks);
rtb->rextslog = xfs_compute_rextslog(rtb->rextents);
@@ -73,7 +87,32 @@ xchk_setup_rtbitmap(
return 0;
}
-/* Realtime bitmap. */
+/* Per-rtgroup bitmap contents. */
+
+/* Cross-reference rtbitmap entries with other metadata. */
+STATIC void
+xchk_rtbitmap_xref(
+ struct xchk_rtbitmap *rtb,
+ xfs_rtblock_t startblock,
+ xfs_rtblock_t blockcount)
+{
+ struct xfs_scrub *sc = rtb->sc;
+ xfs_rgblock_t rgbno = xfs_rtb_to_rgbno(sc->mp, startblock);
+
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+ if (!sc->sr.rmap_cur)
+ return;
+
+ xchk_xref_has_no_rt_owner(sc, rgbno, blockcount);
+ xchk_xref_is_not_rt_shared(sc, rgbno, blockcount);
+ xchk_xref_is_not_rt_cow_staging(sc, rgbno, blockcount);
+
+ if (rtb->next_free_rgbno < rgbno)
+ xchk_xref_has_rt_owner(sc, rtb->next_free_rgbno,
+ rgbno - rtb->next_free_rgbno);
+ rtb->next_free_rgbno = rgbno + blockcount;
+}
/* Scrub a free extent record from the realtime bitmap. */
STATIC int
@@ -83,7 +122,8 @@ xchk_rtbitmap_rec(
const struct xfs_rtalloc_rec *rec,
void *priv)
{
- struct xfs_scrub *sc = priv;
+ struct xchk_rtbitmap *rtb = priv;
+ struct xfs_scrub *sc = rtb->sc;
xfs_rtblock_t startblock;
xfs_filblks_t blockcount;
@@ -92,6 +132,12 @@ xchk_rtbitmap_rec(
if (!xfs_verify_rtbext(rtg_mount(rtg), startblock, blockcount))
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+
+ xchk_rtbitmap_xref(rtb, startblock, blockcount);
+
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return -ECANCELED;
+
return 0;
}
@@ -139,15 +185,16 @@ xchk_rtbitmap_check_extents(
return error;
}
-/* Scrub the realtime bitmap. */
+/* Scrub this group's realtime bitmap. */
int
xchk_rtbitmap(
struct xfs_scrub *sc)
{
struct xfs_mount *mp = sc->mp;
struct xfs_rtgroup *rtg = sc->sr.rtg;
- struct xfs_inode *rbmip = rtg->rtg_inodes[XFS_RTGI_BITMAP];
+ struct xfs_inode *rbmip = rtg_bitmap(rtg);
struct xchk_rtbitmap *rtb = sc->buf;
+ xfs_rgblock_t last_rgbno;
int error;
/* Is sb_rextents correct? */
@@ -200,10 +247,20 @@ xchk_rtbitmap(
if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
return error;
- error = xfs_rtalloc_query_all(rtg, sc->tp, xchk_rtbitmap_rec, sc);
+ rtb->next_free_rgbno = 0;
+ error = xfs_rtalloc_query_all(rtg, sc->tp, xchk_rtbitmap_rec, rtb);
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
return error;
+ /*
+ * Check that the are rmappings for all rt extents between the end of
+ * the last free extent we saw and the last possible extent in the rt
+ * group.
+ */
+ last_rgbno = rtg->rtg_extents * mp->m_sb.sb_rextsize - 1;
+ if (rtb->next_free_rgbno < last_rgbno)
+ xchk_xref_has_rt_owner(sc, rtb->next_free_rgbno,
+ last_rgbno - rtb->next_free_rgbno);
return 0;
}
@@ -215,7 +272,7 @@ xchk_xref_is_used_rt_space(
xfs_extlen_t len)
{
struct xfs_rtgroup *rtg = sc->sr.rtg;
- struct xfs_inode *rbmip = rtg->rtg_inodes[XFS_RTGI_BITMAP];
+ struct xfs_inode *rbmip = rtg_bitmap(rtg);
xfs_rtxnum_t startext;
xfs_rtxnum_t endext;
bool is_free;
diff --git a/fs/xfs/scrub/rtbitmap.h b/fs/xfs/scrub/rtbitmap.h
index 85304ff019e1..fe52b877253d 100644
--- a/fs/xfs/scrub/rtbitmap.h
+++ b/fs/xfs/scrub/rtbitmap.h
@@ -6,17 +6,72 @@
#ifndef __XFS_SCRUB_RTBITMAP_H__
#define __XFS_SCRUB_RTBITMAP_H__
+/*
+ * We use an xfile to construct new bitmap blocks for the portion of the
+ * rtbitmap file that we're replacing. Whereas the ondisk bitmap must be
+ * accessed through the buffer cache, the xfile bitmap supports direct
+ * word-level accesses. Therefore, we create a small abstraction for linear
+ * access.
+ */
+typedef unsigned long long xrep_wordoff_t;
+typedef unsigned int xrep_wordcnt_t;
+
+/* Mask to round an rtx down to the nearest bitmap word. */
+#define XREP_RTBMP_WORDMASK ((1ULL << XFS_NBWORDLOG) - 1)
+
+
struct xchk_rtbitmap {
+ struct xfs_scrub *sc;
+
uint64_t rextents;
uint64_t rbmblocks;
unsigned int rextslog;
unsigned int resblks;
+
+ /* The next free rt group block number that we expect to see. */
+ xfs_rgblock_t next_free_rgbno;
+
+#ifdef CONFIG_XFS_ONLINE_REPAIR
+ /* stuff for staging a new bitmap */
+ struct xfs_rtalloc_args args;
+ struct xrep_tempexch tempexch;
+#endif
+
+ /* The next rtgroup block we expect to see during our rtrmapbt walk. */
+ xfs_rgblock_t next_rgbno;
+
+ /* rtgroup lock flags */
+ unsigned int rtglock_flags;
+
+ /* rtword position of xfile as we write buffers to disk. */
+ xrep_wordoff_t prep_wordoff;
+
+ /* In-Memory rtbitmap for repair. */
+ union xfs_rtword_raw words[];
};
#ifdef CONFIG_XFS_ONLINE_REPAIR
int xrep_setup_rtbitmap(struct xfs_scrub *sc, struct xchk_rtbitmap *rtb);
+
+/*
+ * How big should the words[] buffer be?
+ *
+ * For repairs, we want a full fsblock worth of space so that we can memcpy a
+ * buffer full of 1s into the xfile bitmap. The xfile bitmap doesn't have
+ * rtbitmap block headers, so we don't use blockwsize. Scrub doesn't use the
+ * words buffer at all.
+ */
+static inline unsigned int
+xchk_rtbitmap_wordcnt(
+ struct xfs_scrub *sc)
+{
+ if (xchk_could_repair(sc))
+ return sc->mp->m_sb.sb_blocksize >> XFS_WORDLOG;
+ return 0;
+}
#else
# define xrep_setup_rtbitmap(sc, rtb) (0)
+# define xchk_rtbitmap_wordcnt(sc) (0)
#endif /* CONFIG_XFS_ONLINE_REPAIR */
#endif /* __XFS_SCRUB_RTBITMAP_H__ */
diff --git a/fs/xfs/scrub/rtbitmap_repair.c b/fs/xfs/scrub/rtbitmap_repair.c
index 0fef98e9f834..203a1a97c502 100644
--- a/fs/xfs/scrub/rtbitmap_repair.c
+++ b/fs/xfs/scrub/rtbitmap_repair.c
@@ -12,32 +12,66 @@
#include "xfs_btree.h"
#include "xfs_log_format.h"
#include "xfs_trans.h"
+#include "xfs_rtalloc.h"
#include "xfs_inode.h"
#include "xfs_bit.h"
#include "xfs_bmap.h"
#include "xfs_bmap_btree.h"
+#include "xfs_rmap.h"
+#include "xfs_rtrmap_btree.h"
+#include "xfs_exchmaps.h"
+#include "xfs_rtbitmap.h"
+#include "xfs_rtgroup.h"
+#include "xfs_extent_busy.h"
+#include "xfs_refcount.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
#include "scrub/repair.h"
#include "scrub/xfile.h"
+#include "scrub/tempfile.h"
+#include "scrub/tempexch.h"
+#include "scrub/reap.h"
#include "scrub/rtbitmap.h"
-/* Set up to repair the realtime bitmap file metadata. */
+/* rt bitmap content repairs */
+
+/* Set up to repair the realtime bitmap for this group. */
int
xrep_setup_rtbitmap(
struct xfs_scrub *sc,
struct xchk_rtbitmap *rtb)
{
struct xfs_mount *mp = sc->mp;
- unsigned long long blocks = 0;
+ char *descr;
+ unsigned long long blocks = mp->m_sb.sb_rbmblocks;
+ int error;
+
+ error = xrep_tempfile_create(sc, S_IFREG);
+ if (error)
+ return error;
+
+ /* Create an xfile to hold our reconstructed bitmap. */
+ descr = xchk_xfile_rtgroup_descr(sc, "bitmap file");
+ error = xfile_create(descr, blocks * mp->m_sb.sb_blocksize, &sc->xfile);
+ kfree(descr);
+ if (error)
+ return error;
/*
- * Reserve enough blocks to write out a completely new bmbt for a
- * maximally fragmented bitmap file. We do not hold the rtbitmap
- * ILOCK yet, so this is entirely speculative.
+ * Reserve enough blocks to write out a completely new bitmap file,
+ * plus twice as many blocks as we would need if we can only allocate
+ * one block per data fork mapping. This should cover the
+ * preallocation of the temporary file and exchanging the extent
+ * mappings.
+ *
+ * We cannot use xfs_exchmaps_estimate because we have not yet
+ * constructed the replacement bitmap and therefore do not know how
+ * many extents it will use. By the time we do, we will have a dirty
+ * transaction (which we cannot drop because we cannot drop the
+ * rtbitmap ILOCK) and cannot ask for more reservation.
*/
- blocks = xfs_bmbt_calc_size(mp, mp->m_sb.sb_rbmblocks);
+ blocks += xfs_bmbt_calc_size(mp, blocks) * 2;
if (blocks > UINT_MAX)
return -EOPNOTSUPP;
@@ -45,6 +79,325 @@ xrep_setup_rtbitmap(
return 0;
}
+static inline xrep_wordoff_t
+rtx_to_wordoff(
+ struct xfs_mount *mp,
+ xfs_rtxnum_t rtx)
+{
+ return rtx >> XFS_NBWORDLOG;
+}
+
+static inline xrep_wordcnt_t
+rtxlen_to_wordcnt(
+ xfs_rtxlen_t rtxlen)
+{
+ return rtxlen >> XFS_NBWORDLOG;
+}
+
+/* Helper functions to record rtwords in an xfile. */
+
+static inline int
+xfbmp_load(
+ struct xchk_rtbitmap *rtb,
+ xrep_wordoff_t wordoff,
+ xfs_rtword_t *word)
+{
+ union xfs_rtword_raw urk;
+ int error;
+
+ ASSERT(xfs_has_rtgroups(rtb->sc->mp));
+
+ error = xfile_load(rtb->sc->xfile, &urk,
+ sizeof(union xfs_rtword_raw),
+ wordoff << XFS_WORDLOG);
+ if (error)
+ return error;
+
+ *word = be32_to_cpu(urk.rtg);
+ return 0;
+}
+
+static inline int
+xfbmp_store(
+ struct xchk_rtbitmap *rtb,
+ xrep_wordoff_t wordoff,
+ const xfs_rtword_t word)
+{
+ union xfs_rtword_raw urk;
+
+ ASSERT(xfs_has_rtgroups(rtb->sc->mp));
+
+ urk.rtg = cpu_to_be32(word);
+ return xfile_store(rtb->sc->xfile, &urk,
+ sizeof(union xfs_rtword_raw),
+ wordoff << XFS_WORDLOG);
+}
+
+static inline int
+xfbmp_copyin(
+ struct xchk_rtbitmap *rtb,
+ xrep_wordoff_t wordoff,
+ const union xfs_rtword_raw *word,
+ xrep_wordcnt_t nr_words)
+{
+ return xfile_store(rtb->sc->xfile, word, nr_words << XFS_WORDLOG,
+ wordoff << XFS_WORDLOG);
+}
+
+static inline int
+xfbmp_copyout(
+ struct xchk_rtbitmap *rtb,
+ xrep_wordoff_t wordoff,
+ union xfs_rtword_raw *word,
+ xrep_wordcnt_t nr_words)
+{
+ return xfile_load(rtb->sc->xfile, word, nr_words << XFS_WORDLOG,
+ wordoff << XFS_WORDLOG);
+}
+
+/* Perform a logical OR operation on an rtword in the incore bitmap. */
+static int
+xrep_rtbitmap_or(
+ struct xchk_rtbitmap *rtb,
+ xrep_wordoff_t wordoff,
+ xfs_rtword_t mask)
+{
+ xfs_rtword_t word;
+ int error;
+
+ error = xfbmp_load(rtb, wordoff, &word);
+ if (error)
+ return error;
+
+ trace_xrep_rtbitmap_or(rtb->sc->mp, wordoff, mask, word);
+
+ return xfbmp_store(rtb, wordoff, word | mask);
+}
+
+/*
+ * Mark as free every rt extent between the next rt block we expected to see
+ * in the rtrmap records and the given rt block.
+ */
+STATIC int
+xrep_rtbitmap_mark_free(
+ struct xchk_rtbitmap *rtb,
+ xfs_rgblock_t rgbno)
+{
+ struct xfs_mount *mp = rtb->sc->mp;
+ struct xchk_rt *sr = &rtb->sc->sr;
+ struct xfs_rtgroup *rtg = sr->rtg;
+ xfs_rtxnum_t startrtx;
+ xfs_rtxnum_t nextrtx;
+ xrep_wordoff_t wordoff, nextwordoff;
+ unsigned int bit;
+ unsigned int bufwsize;
+ xfs_extlen_t mod;
+ xfs_rtword_t mask;
+ enum xbtree_recpacking outcome;
+ int error;
+
+ if (!xfs_verify_rgbext(rtg, rtb->next_rgbno, rgbno - rtb->next_rgbno))
+ return -EFSCORRUPTED;
+
+ /*
+ * Convert rt blocks to rt extents The block range we find must be
+ * aligned to an rtextent boundary on both ends.
+ */
+ startrtx = xfs_rgbno_to_rtx(mp, rtb->next_rgbno);
+ mod = xfs_rgbno_to_rtxoff(mp, rtb->next_rgbno);
+ if (mod)
+ return -EFSCORRUPTED;
+
+ nextrtx = xfs_rgbno_to_rtx(mp, rgbno - 1) + 1;
+ mod = xfs_rgbno_to_rtxoff(mp, rgbno - 1);
+ if (mod != mp->m_sb.sb_rextsize - 1)
+ return -EFSCORRUPTED;
+
+ /* Must not be shared or CoW staging. */
+ if (sr->refc_cur) {
+ error = xfs_refcount_has_records(sr->refc_cur,
+ XFS_REFC_DOMAIN_SHARED, rtb->next_rgbno,
+ rgbno - rtb->next_rgbno, &outcome);
+ if (error)
+ return error;
+ if (outcome != XBTREE_RECPACKING_EMPTY)
+ return -EFSCORRUPTED;
+
+ error = xfs_refcount_has_records(sr->refc_cur,
+ XFS_REFC_DOMAIN_COW, rtb->next_rgbno,
+ rgbno - rtb->next_rgbno, &outcome);
+ if (error)
+ return error;
+ if (outcome != XBTREE_RECPACKING_EMPTY)
+ return -EFSCORRUPTED;
+ }
+
+ trace_xrep_rtbitmap_record_free(mp, startrtx, nextrtx - 1);
+
+ /* Set bits as needed to round startrtx up to the nearest word. */
+ bit = startrtx & XREP_RTBMP_WORDMASK;
+ if (bit) {
+ xfs_rtblock_t len = nextrtx - startrtx;
+ unsigned int lastbit;
+
+ lastbit = min(bit + len, XFS_NBWORD);
+ mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
+
+ error = xrep_rtbitmap_or(rtb, rtx_to_wordoff(mp, startrtx),
+ mask);
+ if (error || lastbit - bit == len)
+ return error;
+ startrtx += XFS_NBWORD - bit;
+ }
+
+ /* Set bits as needed to round nextrtx down to the nearest word. */
+ bit = nextrtx & XREP_RTBMP_WORDMASK;
+ if (bit) {
+ mask = ((xfs_rtword_t)1 << bit) - 1;
+
+ error = xrep_rtbitmap_or(rtb, rtx_to_wordoff(mp, nextrtx),
+ mask);
+ if (error || startrtx + bit == nextrtx)
+ return error;
+ nextrtx -= bit;
+ }
+
+ trace_xrep_rtbitmap_record_free_bulk(mp, startrtx, nextrtx - 1);
+
+ /* Set all the words in between, up to a whole fs block at once. */
+ wordoff = rtx_to_wordoff(mp, startrtx);
+ nextwordoff = rtx_to_wordoff(mp, nextrtx);
+ bufwsize = mp->m_sb.sb_blocksize >> XFS_WORDLOG;
+
+ while (wordoff < nextwordoff) {
+ xrep_wordoff_t rem;
+ xrep_wordcnt_t wordcnt;
+
+ wordcnt = min_t(xrep_wordcnt_t, nextwordoff - wordoff,
+ bufwsize);
+
+ /*
+ * Try to keep us aligned to the rtwords buffer to reduce the
+ * number of xfile writes.
+ */
+ rem = wordoff & (bufwsize - 1);
+ if (rem)
+ wordcnt = min_t(xrep_wordcnt_t, wordcnt,
+ bufwsize - rem);
+
+ error = xfbmp_copyin(rtb, wordoff, rtb->words, wordcnt);
+ if (error)
+ return error;
+
+ wordoff += wordcnt;
+ }
+
+ return 0;
+}
+
+/* Set free space in the rtbitmap based on rtrmapbt records. */
+STATIC int
+xrep_rtbitmap_walk_rtrmap(
+ struct xfs_btree_cur *cur,
+ const struct xfs_rmap_irec *rec,
+ void *priv)
+{
+ struct xchk_rtbitmap *rtb = priv;
+ int error = 0;
+
+ if (xchk_should_terminate(rtb->sc, &error))
+ return error;
+
+ if (rtb->next_rgbno < rec->rm_startblock) {
+ error = xrep_rtbitmap_mark_free(rtb, rec->rm_startblock);
+ if (error)
+ return error;
+ }
+
+ rtb->next_rgbno = max(rtb->next_rgbno,
+ rec->rm_startblock + rec->rm_blockcount);
+ return 0;
+}
+
+/*
+ * Walk the rtrmapbt to find all the gaps between records, and mark the gaps
+ * in the realtime bitmap that we're computing.
+ */
+STATIC int
+xrep_rtbitmap_find_freespace(
+ struct xchk_rtbitmap *rtb)
+{
+ struct xfs_scrub *sc = rtb->sc;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_rtgroup *rtg = sc->sr.rtg;
+ uint64_t blockcount;
+ int error;
+
+ /* Prepare a buffer of ones so that we can accelerate bulk setting. */
+ memset(rtb->words, 0xFF, mp->m_sb.sb_blocksize);
+
+ xrep_rtgroup_btcur_init(sc, &sc->sr);
+ error = xfs_rmap_query_all(sc->sr.rmap_cur, xrep_rtbitmap_walk_rtrmap,
+ rtb);
+ if (error)
+ goto out;
+
+ /*
+ * Mark as free every possible rt extent from the last one we saw to
+ * the end of the rt group.
+ */
+ blockcount = rtg->rtg_extents * mp->m_sb.sb_rextsize;
+ if (rtb->next_rgbno < blockcount) {
+ error = xrep_rtbitmap_mark_free(rtb, blockcount);
+ if (error)
+ goto out;
+ }
+
+out:
+ xchk_rtgroup_btcur_free(&sc->sr);
+ return error;
+}
+
+static int
+xrep_rtbitmap_prep_buf(
+ struct xfs_scrub *sc,
+ struct xfs_buf *bp,
+ void *data)
+{
+ struct xchk_rtbitmap *rtb = data;
+ struct xfs_mount *mp = sc->mp;
+ union xfs_rtword_raw *ondisk;
+ int error;
+
+ rtb->args.mp = sc->mp;
+ rtb->args.tp = sc->tp;
+ rtb->args.rbmbp = bp;
+ ondisk = xfs_rbmblock_wordptr(&rtb->args, 0);
+ rtb->args.rbmbp = NULL;
+
+ error = xfbmp_copyout(rtb, rtb->prep_wordoff, ondisk,
+ mp->m_blockwsize);
+ if (error)
+ return error;
+
+ if (xfs_has_rtgroups(sc->mp)) {
+ struct xfs_rtbuf_blkinfo *hdr = bp->b_addr;
+
+ hdr->rt_magic = cpu_to_be32(XFS_RTBITMAP_MAGIC);
+ hdr->rt_owner = cpu_to_be64(sc->ip->i_ino);
+ hdr->rt_blkno = cpu_to_be64(xfs_buf_daddr(bp));
+ hdr->rt_lsn = 0;
+ uuid_copy(&hdr->rt_uuid, &sc->mp->m_sb.sb_meta_uuid);
+ bp->b_ops = &xfs_rtbitmap_buf_ops;
+ } else {
+ bp->b_ops = &xfs_rtbuf_ops;
+ }
+
+ rtb->prep_wordoff += mp->m_blockwsize;
+ xfs_trans_buf_set_type(sc->tp, bp, XFS_BLFT_RTBITMAP_BUF);
+ return 0;
+}
+
/*
* Make sure that the given range of the data fork of the realtime file is
* mapped to written blocks. The caller must ensure that the inode is joined
@@ -160,9 +513,18 @@ xrep_rtbitmap(
{
struct xchk_rtbitmap *rtb = sc->buf;
struct xfs_mount *mp = sc->mp;
+ struct xfs_group *xg = rtg_group(sc->sr.rtg);
unsigned long long blocks = 0;
+ unsigned int busy_gen;
int error;
+ /* We require the realtime rmapbt to rebuild anything. */
+ if (!xfs_has_rtrmapbt(sc->mp))
+ return -EOPNOTSUPP;
+ /* We require atomic file exchange range to rebuild anything. */
+ if (!xfs_has_exchange_range(sc->mp))
+ return -EOPNOTSUPP;
+
/* Impossibly large rtbitmap means we can't touch the filesystem. */
if (rtb->rbmblocks > U32_MAX)
return 0;
@@ -195,6 +557,79 @@ xrep_rtbitmap(
if (error)
return error;
- /* Fix inconsistent bitmap geometry */
- return xrep_rtbitmap_geometry(sc, rtb);
+ /*
+ * Fix inconsistent bitmap geometry. This function returns with a
+ * clean scrub transaction.
+ */
+ error = xrep_rtbitmap_geometry(sc, rtb);
+ if (error)
+ return error;
+
+ /*
+ * Make sure the busy extent list is clear because we can't put extents
+ * on there twice.
+ */
+ if (!xfs_extent_busy_list_empty(xg, &busy_gen)) {
+ error = xfs_extent_busy_flush(sc->tp, xg, busy_gen, 0);
+ if (error)
+ return error;
+ }
+
+ /*
+ * Generate the new rtbitmap data. We don't need the rtbmp information
+ * once this call is finished.
+ */
+ error = xrep_rtbitmap_find_freespace(rtb);
+ if (error)
+ return error;
+
+ /*
+ * Try to take ILOCK_EXCL of the temporary file. We had better be the
+ * only ones holding onto this inode, but we can't block while holding
+ * the rtbitmap file's ILOCK_EXCL.
+ */
+ while (!xrep_tempfile_ilock_nowait(sc)) {
+ if (xchk_should_terminate(sc, &error))
+ return error;
+ delay(1);
+ }
+
+ /*
+ * Make sure we have space allocated for the part of the bitmap
+ * file that corresponds to this group. We already joined sc->ip.
+ */
+ xfs_trans_ijoin(sc->tp, sc->tempip, 0);
+ error = xrep_tempfile_prealloc(sc, 0, rtb->rbmblocks);
+ if (error)
+ return error;
+
+ /* Last chance to abort before we start committing fixes. */
+ if (xchk_should_terminate(sc, &error))
+ return error;
+
+ /* Copy the bitmap file that we generated. */
+ error = xrep_tempfile_copyin(sc, 0, rtb->rbmblocks,
+ xrep_rtbitmap_prep_buf, rtb);
+ if (error)
+ return error;
+ error = xrep_tempfile_set_isize(sc,
+ XFS_FSB_TO_B(sc->mp, sc->mp->m_sb.sb_rbmblocks));
+ if (error)
+ return error;
+
+ /*
+ * Now exchange the data fork contents. We're done with the temporary
+ * buffer, so we can reuse it for the tempfile exchmaps information.
+ */
+ error = xrep_tempexch_trans_reserve(sc, XFS_DATA_FORK, 0,
+ rtb->rbmblocks, &rtb->tempexch);
+ if (error)
+ return error;
+
+ error = xrep_tempexch_contents(sc, &rtb->tempexch);
+ if (error)
+ return error;
+
+ /* Free the old rtbitmap blocks if they're not in use. */
+ return xrep_reap_ifork(sc, sc->tempip, XFS_DATA_FORK);
}
diff --git a/fs/xfs/scrub/rtrefcount.c b/fs/xfs/scrub/rtrefcount.c
new file mode 100644
index 000000000000..4c5dffc73641
--- /dev/null
+++ b/fs/xfs/scrub/rtrefcount.c
@@ -0,0 +1,661 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2021-2024 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_trans.h"
+#include "xfs_btree.h"
+#include "xfs_rmap.h"
+#include "xfs_refcount.h"
+#include "xfs_inode.h"
+#include "xfs_rtbitmap.h"
+#include "xfs_rtgroup.h"
+#include "xfs_metafile.h"
+#include "xfs_rtrefcount_btree.h"
+#include "xfs_rtalloc.h"
+#include "scrub/scrub.h"
+#include "scrub/common.h"
+#include "scrub/btree.h"
+#include "scrub/repair.h"
+
+/* Set us up with the realtime refcount metadata locked. */
+int
+xchk_setup_rtrefcountbt(
+ struct xfs_scrub *sc)
+{
+ int error;
+
+ if (xchk_need_intent_drain(sc))
+ xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
+
+ if (xchk_could_repair(sc)) {
+ error = xrep_setup_rtrefcountbt(sc);
+ if (error)
+ return error;
+ }
+
+ error = xchk_rtgroup_init(sc, sc->sm->sm_agno, &sc->sr);
+ if (error)
+ return error;
+
+ error = xchk_setup_rt(sc);
+ if (error)
+ return error;
+
+ error = xchk_install_live_inode(sc, rtg_refcount(sc->sr.rtg));
+ if (error)
+ return error;
+
+ return xchk_rtgroup_lock(sc, &sc->sr, XCHK_RTGLOCK_ALL);
+}
+
+/* Realtime Reference count btree scrubber. */
+
+/*
+ * Confirming Reference Counts via Reverse Mappings
+ *
+ * We want to count the reverse mappings overlapping a refcount record
+ * (bno, len, refcount), allowing for the possibility that some of the
+ * overlap may come from smaller adjoining reverse mappings, while some
+ * comes from single extents which overlap the range entirely. The
+ * outer loop is as follows:
+ *
+ * 1. For all reverse mappings overlapping the refcount extent,
+ * a. If a given rmap completely overlaps, mark it as seen.
+ * b. Otherwise, record the fragment (in agbno order) for later
+ * processing.
+ *
+ * Once we've seen all the rmaps, we know that for all blocks in the
+ * refcount record we want to find $refcount owners and we've already
+ * visited $seen extents that overlap all the blocks. Therefore, we
+ * need to find ($refcount - $seen) owners for every block in the
+ * extent; call that quantity $target_nr. Proceed as follows:
+ *
+ * 2. Pull the first $target_nr fragments from the list; all of them
+ * should start at or before the start of the extent.
+ * Call this subset of fragments the working set.
+ * 3. Until there are no more unprocessed fragments,
+ * a. Find the shortest fragments in the set and remove them.
+ * b. Note the block number of the end of these fragments.
+ * c. Pull the same number of fragments from the list. All of these
+ * fragments should start at the block number recorded in the
+ * previous step.
+ * d. Put those fragments in the set.
+ * 4. Check that there are $target_nr fragments remaining in the list,
+ * and that they all end at or beyond the end of the refcount extent.
+ *
+ * If the refcount is correct, all the check conditions in the algorithm
+ * should always hold true. If not, the refcount is incorrect.
+ */
+struct xchk_rtrefcnt_frag {
+ struct list_head list;
+ struct xfs_rmap_irec rm;
+};
+
+struct xchk_rtrefcnt_check {
+ struct xfs_scrub *sc;
+ struct list_head fragments;
+
+ /* refcount extent we're examining */
+ xfs_rgblock_t bno;
+ xfs_extlen_t len;
+ xfs_nlink_t refcount;
+
+ /* number of owners seen */
+ xfs_nlink_t seen;
+};
+
+/*
+ * Decide if the given rmap is large enough that we can redeem it
+ * towards refcount verification now, or if it's a fragment, in
+ * which case we'll hang onto it in the hopes that we'll later
+ * discover that we've collected exactly the correct number of
+ * fragments as the rtrefcountbt says we should have.
+ */
+STATIC int
+xchk_rtrefcountbt_rmap_check(
+ struct xfs_btree_cur *cur,
+ const struct xfs_rmap_irec *rec,
+ void *priv)
+{
+ struct xchk_rtrefcnt_check *refchk = priv;
+ struct xchk_rtrefcnt_frag *frag;
+ xfs_rgblock_t rm_last;
+ xfs_rgblock_t rc_last;
+ int error = 0;
+
+ if (xchk_should_terminate(refchk->sc, &error))
+ return error;
+
+ rm_last = rec->rm_startblock + rec->rm_blockcount - 1;
+ rc_last = refchk->bno + refchk->len - 1;
+
+ /* Confirm that a single-owner refc extent is a CoW stage. */
+ if (refchk->refcount == 1 && rec->rm_owner != XFS_RMAP_OWN_COW) {
+ xchk_btree_xref_set_corrupt(refchk->sc, cur, 0);
+ return 0;
+ }
+
+ if (rec->rm_startblock <= refchk->bno && rm_last >= rc_last) {
+ /*
+ * The rmap overlaps the refcount record, so we can confirm
+ * one refcount owner seen.
+ */
+ refchk->seen++;
+ } else {
+ /*
+ * This rmap covers only part of the refcount record, so
+ * save the fragment for later processing. If the rmapbt
+ * is healthy each rmap_irec we see will be in agbno order
+ * so we don't need insertion sort here.
+ */
+ frag = kmalloc(sizeof(struct xchk_rtrefcnt_frag),
+ XCHK_GFP_FLAGS);
+ if (!frag)
+ return -ENOMEM;
+ memcpy(&frag->rm, rec, sizeof(frag->rm));
+ list_add_tail(&frag->list, &refchk->fragments);
+ }
+
+ return 0;
+}
+
+/*
+ * Given a bunch of rmap fragments, iterate through them, keeping
+ * a running tally of the refcount. If this ever deviates from
+ * what we expect (which is the rtrefcountbt's refcount minus the
+ * number of extents that totally covered the rtrefcountbt extent),
+ * we have a rtrefcountbt error.
+ */
+STATIC void
+xchk_rtrefcountbt_process_rmap_fragments(
+ struct xchk_rtrefcnt_check *refchk)
+{
+ struct list_head worklist;
+ struct xchk_rtrefcnt_frag *frag;
+ struct xchk_rtrefcnt_frag *n;
+ xfs_rgblock_t bno;
+ xfs_rgblock_t rbno;
+ xfs_rgblock_t next_rbno;
+ xfs_nlink_t nr;
+ xfs_nlink_t target_nr;
+
+ target_nr = refchk->refcount - refchk->seen;
+ if (target_nr == 0)
+ return;
+
+ /*
+ * There are (refchk->rc.rc_refcount - refchk->nr refcount)
+ * references we haven't found yet. Pull that many off the
+ * fragment list and figure out where the smallest rmap ends
+ * (and therefore the next rmap should start). All the rmaps
+ * we pull off should start at or before the beginning of the
+ * refcount record's range.
+ */
+ INIT_LIST_HEAD(&worklist);
+ rbno = NULLRGBLOCK;
+
+ /* Make sure the fragments actually /are/ in bno order. */
+ bno = 0;
+ list_for_each_entry(frag, &refchk->fragments, list) {
+ if (frag->rm.rm_startblock < bno)
+ goto done;
+ bno = frag->rm.rm_startblock;
+ }
+
+ /*
+ * Find all the rmaps that start at or before the refc extent,
+ * and put them on the worklist.
+ */
+ nr = 0;
+ list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
+ if (frag->rm.rm_startblock > refchk->bno || nr > target_nr)
+ break;
+ bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
+ if (bno < rbno)
+ rbno = bno;
+ list_move_tail(&frag->list, &worklist);
+ nr++;
+ }
+
+ /*
+ * We should have found exactly $target_nr rmap fragments starting
+ * at or before the refcount extent.
+ */
+ if (nr != target_nr)
+ goto done;
+
+ while (!list_empty(&refchk->fragments)) {
+ /* Discard any fragments ending at rbno from the worklist. */
+ nr = 0;
+ next_rbno = NULLRGBLOCK;
+ list_for_each_entry_safe(frag, n, &worklist, list) {
+ bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
+ if (bno != rbno) {
+ if (bno < next_rbno)
+ next_rbno = bno;
+ continue;
+ }
+ list_del(&frag->list);
+ kfree(frag);
+ nr++;
+ }
+
+ /* Try to add nr rmaps starting at rbno to the worklist. */
+ list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
+ bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
+ if (frag->rm.rm_startblock != rbno)
+ goto done;
+ list_move_tail(&frag->list, &worklist);
+ if (next_rbno > bno)
+ next_rbno = bno;
+ nr--;
+ if (nr == 0)
+ break;
+ }
+
+ /*
+ * If we get here and nr > 0, this means that we added fewer
+ * items to the worklist than we discarded because the fragment
+ * list ran out of items. Therefore, we cannot maintain the
+ * required refcount. Something is wrong, so we're done.
+ */
+ if (nr)
+ goto done;
+
+ rbno = next_rbno;
+ }
+
+ /*
+ * Make sure the last extent we processed ends at or beyond
+ * the end of the refcount extent.
+ */
+ if (rbno < refchk->bno + refchk->len)
+ goto done;
+
+ /* Actually record us having seen the remaining refcount. */
+ refchk->seen = refchk->refcount;
+done:
+ /* Delete fragments and work list. */
+ list_for_each_entry_safe(frag, n, &worklist, list) {
+ list_del(&frag->list);
+ kfree(frag);
+ }
+ list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
+ list_del(&frag->list);
+ kfree(frag);
+ }
+}
+
+/* Use the rmap entries covering this extent to verify the refcount. */
+STATIC void
+xchk_rtrefcountbt_xref_rmap(
+ struct xfs_scrub *sc,
+ const struct xfs_refcount_irec *irec)
+{
+ struct xchk_rtrefcnt_check refchk = {
+ .sc = sc,
+ .bno = irec->rc_startblock,
+ .len = irec->rc_blockcount,
+ .refcount = irec->rc_refcount,
+ .seen = 0,
+ };
+ struct xfs_rmap_irec low;
+ struct xfs_rmap_irec high;
+ struct xchk_rtrefcnt_frag *frag;
+ struct xchk_rtrefcnt_frag *n;
+ int error;
+
+ if (!sc->sr.rmap_cur || xchk_skip_xref(sc->sm))
+ return;
+
+ /* Cross-reference with the rmapbt to confirm the refcount. */
+ memset(&low, 0, sizeof(low));
+ low.rm_startblock = irec->rc_startblock;
+ memset(&high, 0xFF, sizeof(high));
+ high.rm_startblock = irec->rc_startblock + irec->rc_blockcount - 1;
+
+ INIT_LIST_HEAD(&refchk.fragments);
+ error = xfs_rmap_query_range(sc->sr.rmap_cur, &low, &high,
+ xchk_rtrefcountbt_rmap_check, &refchk);
+ if (!xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur))
+ goto out_free;
+
+ xchk_rtrefcountbt_process_rmap_fragments(&refchk);
+ if (irec->rc_refcount != refchk.seen)
+ xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0);
+
+out_free:
+ list_for_each_entry_safe(frag, n, &refchk.fragments, list) {
+ list_del(&frag->list);
+ kfree(frag);
+ }
+}
+
+/* Cross-reference with the other btrees. */
+STATIC void
+xchk_rtrefcountbt_xref(
+ struct xfs_scrub *sc,
+ const struct xfs_refcount_irec *irec)
+{
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ xchk_xref_is_used_rt_space(sc,
+ xfs_rgbno_to_rtb(sc->sr.rtg, irec->rc_startblock),
+ irec->rc_blockcount);
+ xchk_rtrefcountbt_xref_rmap(sc, irec);
+}
+
+struct xchk_rtrefcbt_records {
+ /* Previous refcount record. */
+ struct xfs_refcount_irec prev_rec;
+
+ /* The next rtgroup block where we aren't expecting shared extents. */
+ xfs_rgblock_t next_unshared_rgbno;
+
+ /* Number of CoW blocks we expect. */
+ xfs_extlen_t cow_blocks;
+
+ /* Was the last record a shared or CoW staging extent? */
+ enum xfs_refc_domain prev_domain;
+};
+
+static inline bool
+xchk_rtrefcount_mergeable(
+ struct xchk_rtrefcbt_records *rrc,
+ const struct xfs_refcount_irec *r2)
+{
+ const struct xfs_refcount_irec *r1 = &rrc->prev_rec;
+
+ /* Ignore if prev_rec is not yet initialized. */
+ if (r1->rc_blockcount > 0)
+ return false;
+
+ if (r1->rc_startblock + r1->rc_blockcount != r2->rc_startblock)
+ return false;
+ if (r1->rc_refcount != r2->rc_refcount)
+ return false;
+ if ((unsigned long long)r1->rc_blockcount + r2->rc_blockcount >
+ XFS_REFC_LEN_MAX)
+ return false;
+
+ return true;
+}
+
+/* Flag failures for records that could be merged. */
+STATIC void
+xchk_rtrefcountbt_check_mergeable(
+ struct xchk_btree *bs,
+ struct xchk_rtrefcbt_records *rrc,
+ const struct xfs_refcount_irec *irec)
+{
+ if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ if (xchk_rtrefcount_mergeable(rrc, irec))
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
+
+ memcpy(&rrc->prev_rec, irec, sizeof(struct xfs_refcount_irec));
+}
+
+STATIC int
+xchk_rtrefcountbt_rmap_check_gap(
+ struct xfs_btree_cur *cur,
+ const struct xfs_rmap_irec *rec,
+ void *priv)
+{
+ xfs_rgblock_t *next_bno = priv;
+
+ if (*next_bno != NULLRGBLOCK && rec->rm_startblock < *next_bno)
+ return -ECANCELED;
+
+ *next_bno = rec->rm_startblock + rec->rm_blockcount;
+ return 0;
+}
+
+/*
+ * Make sure that a gap in the reference count records does not correspond to
+ * overlapping records (i.e. shared extents) in the reverse mappings.
+ */
+static inline void
+xchk_rtrefcountbt_xref_gaps(
+ struct xfs_scrub *sc,
+ struct xchk_rtrefcbt_records *rrc,
+ xfs_rtblock_t bno)
+{
+ struct xfs_rmap_irec low;
+ struct xfs_rmap_irec high;
+ xfs_rgblock_t next_bno = NULLRGBLOCK;
+ int error;
+
+ if (bno <= rrc->next_unshared_rgbno || !sc->sr.rmap_cur ||
+ xchk_skip_xref(sc->sm))
+ return;
+
+ memset(&low, 0, sizeof(low));
+ low.rm_startblock = rrc->next_unshared_rgbno;
+ memset(&high, 0xFF, sizeof(high));
+ high.rm_startblock = bno - 1;
+
+ error = xfs_rmap_query_range(sc->sr.rmap_cur, &low, &high,
+ xchk_rtrefcountbt_rmap_check_gap, &next_bno);
+ if (error == -ECANCELED)
+ xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0);
+ else
+ xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur);
+}
+
+/* Scrub a rtrefcountbt record. */
+STATIC int
+xchk_rtrefcountbt_rec(
+ struct xchk_btree *bs,
+ const union xfs_btree_rec *rec)
+{
+ struct xfs_mount *mp = bs->cur->bc_mp;
+ struct xchk_rtrefcbt_records *rrc = bs->private;
+ struct xfs_refcount_irec irec;
+ u32 mod;
+
+ xfs_refcount_btrec_to_irec(rec, &irec);
+ if (xfs_rtrefcount_check_irec(to_rtg(bs->cur->bc_group), &irec) !=
+ NULL) {
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
+ return 0;
+ }
+
+ /* We can only share full rt extents. */
+ mod = xfs_rgbno_to_rtxoff(mp, irec.rc_startblock);
+ if (mod)
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
+ mod = xfs_extlen_to_rtxmod(mp, irec.rc_blockcount);
+ if (mod)
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
+
+ if (irec.rc_domain == XFS_REFC_DOMAIN_COW)
+ rrc->cow_blocks += irec.rc_blockcount;
+
+ /* Shared records always come before CoW records. */
+ if (irec.rc_domain == XFS_REFC_DOMAIN_SHARED &&
+ rrc->prev_domain == XFS_REFC_DOMAIN_COW)
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
+ rrc->prev_domain = irec.rc_domain;
+
+ xchk_rtrefcountbt_check_mergeable(bs, rrc, &irec);
+ xchk_rtrefcountbt_xref(bs->sc, &irec);
+
+ /*
+ * If this is a record for a shared extent, check that all blocks
+ * between the previous record and this one have at most one reverse
+ * mapping.
+ */
+ if (irec.rc_domain == XFS_REFC_DOMAIN_SHARED) {
+ xchk_rtrefcountbt_xref_gaps(bs->sc, rrc, irec.rc_startblock);
+ rrc->next_unshared_rgbno = irec.rc_startblock +
+ irec.rc_blockcount;
+ }
+
+ return 0;
+}
+
+/* Make sure we have as many refc blocks as the rmap says. */
+STATIC void
+xchk_refcount_xref_rmap(
+ struct xfs_scrub *sc,
+ const struct xfs_owner_info *btree_oinfo,
+ xfs_extlen_t cow_blocks)
+{
+ xfs_filblks_t refcbt_blocks = 0;
+ xfs_filblks_t blocks;
+ int error;
+
+ if (!sc->sr.rmap_cur || !sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
+ return;
+
+ /* Check that we saw as many refcbt blocks as the rmap knows about. */
+ error = xfs_btree_count_blocks(sc->sr.refc_cur, &refcbt_blocks);
+ if (!xchk_btree_process_error(sc, sc->sr.refc_cur, 0, &error))
+ return;
+ error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, btree_oinfo,
+ &blocks);
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ return;
+ if (blocks != refcbt_blocks)
+ xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
+
+ /* Check that we saw as many cow blocks as the rmap knows about. */
+ error = xchk_count_rmap_ownedby_ag(sc, sc->sr.rmap_cur,
+ &XFS_RMAP_OINFO_COW, &blocks);
+ if (!xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur))
+ return;
+ if (blocks != cow_blocks)
+ xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0);
+}
+
+/* Scrub the refcount btree for some AG. */
+int
+xchk_rtrefcountbt(
+ struct xfs_scrub *sc)
+{
+ struct xfs_owner_info btree_oinfo;
+ struct xchk_rtrefcbt_records rrc = {
+ .cow_blocks = 0,
+ .next_unshared_rgbno = 0,
+ .prev_domain = XFS_REFC_DOMAIN_SHARED,
+ };
+ int error;
+
+ error = xchk_metadata_inode_forks(sc);
+ if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
+ return error;
+
+ xfs_rmap_ino_bmbt_owner(&btree_oinfo, rtg_refcount(sc->sr.rtg)->i_ino,
+ XFS_DATA_FORK);
+ error = xchk_btree(sc, sc->sr.refc_cur, xchk_rtrefcountbt_rec,
+ &btree_oinfo, &rrc);
+ if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
+ return error;
+
+ /*
+ * Check that all blocks between the last refcount > 1 record and the
+ * end of the rt volume have at most one reverse mapping.
+ */
+ xchk_rtrefcountbt_xref_gaps(sc, &rrc, sc->mp->m_sb.sb_rblocks);
+
+ xchk_refcount_xref_rmap(sc, &btree_oinfo, rrc.cow_blocks);
+
+ return 0;
+}
+
+/* xref check that a cow staging extent is marked in the rtrefcountbt. */
+void
+xchk_xref_is_rt_cow_staging(
+ struct xfs_scrub *sc,
+ xfs_rgblock_t bno,
+ xfs_extlen_t len)
+{
+ struct xfs_refcount_irec rc;
+ int has_refcount;
+ int error;
+
+ if (!sc->sr.refc_cur || xchk_skip_xref(sc->sm))
+ return;
+
+ /* Find the CoW staging extent. */
+ error = xfs_refcount_lookup_le(sc->sr.refc_cur, XFS_REFC_DOMAIN_COW,
+ bno, &has_refcount);
+ if (!xchk_should_check_xref(sc, &error, &sc->sr.refc_cur))
+ return;
+ if (!has_refcount) {
+ xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0);
+ return;
+ }
+
+ error = xfs_refcount_get_rec(sc->sr.refc_cur, &rc, &has_refcount);
+ if (!xchk_should_check_xref(sc, &error, &sc->sr.refc_cur))
+ return;
+ if (!has_refcount) {
+ xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0);
+ return;
+ }
+
+ /* CoW lookup returned a shared extent record? */
+ if (rc.rc_domain != XFS_REFC_DOMAIN_COW)
+ xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
+
+ /* Must be at least as long as what was passed in */
+ if (rc.rc_blockcount < len)
+ xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0);
+}
+
+/*
+ * xref check that the extent is not shared. Only file data blocks
+ * can have multiple owners.
+ */
+void
+xchk_xref_is_not_rt_shared(
+ struct xfs_scrub *sc,
+ xfs_rgblock_t bno,
+ xfs_extlen_t len)
+{
+ enum xbtree_recpacking outcome;
+ int error;
+
+ if (!sc->sr.refc_cur || xchk_skip_xref(sc->sm))
+ return;
+
+ error = xfs_refcount_has_records(sc->sr.refc_cur,
+ XFS_REFC_DOMAIN_SHARED, bno, len, &outcome);
+ if (!xchk_should_check_xref(sc, &error, &sc->sr.refc_cur))
+ return;
+ if (outcome != XBTREE_RECPACKING_EMPTY)
+ xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0);
+}
+
+/* xref check that the extent is not being used for CoW staging. */
+void
+xchk_xref_is_not_rt_cow_staging(
+ struct xfs_scrub *sc,
+ xfs_rgblock_t bno,
+ xfs_extlen_t len)
+{
+ enum xbtree_recpacking outcome;
+ int error;
+
+ if (!sc->sr.refc_cur || xchk_skip_xref(sc->sm))
+ return;
+
+ error = xfs_refcount_has_records(sc->sr.refc_cur, XFS_REFC_DOMAIN_COW,
+ bno, len, &outcome);
+ if (!xchk_should_check_xref(sc, &error, &sc->sr.refc_cur))
+ return;
+ if (outcome != XBTREE_RECPACKING_EMPTY)
+ xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0);
+}
diff --git a/fs/xfs/scrub/rtrefcount_repair.c b/fs/xfs/scrub/rtrefcount_repair.c
new file mode 100644
index 000000000000..257cfb24beb4
--- /dev/null
+++ b/fs/xfs/scrub/rtrefcount_repair.c
@@ -0,0 +1,783 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2021-2024 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_btree.h"
+#include "xfs_btree_staging.h"
+#include "xfs_bit.h"
+#include "xfs_log_format.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_alloc.h"
+#include "xfs_ialloc.h"
+#include "xfs_rmap.h"
+#include "xfs_rmap_btree.h"
+#include "xfs_rtrmap_btree.h"
+#include "xfs_refcount.h"
+#include "xfs_rtrefcount_btree.h"
+#include "xfs_error.h"
+#include "xfs_health.h"
+#include "xfs_inode.h"
+#include "xfs_quota.h"
+#include "xfs_rtalloc.h"
+#include "xfs_ag.h"
+#include "xfs_rtgroup.h"
+#include "xfs_rtbitmap.h"
+#include "scrub/xfs_scrub.h"
+#include "scrub/scrub.h"
+#include "scrub/common.h"
+#include "scrub/btree.h"
+#include "scrub/trace.h"
+#include "scrub/repair.h"
+#include "scrub/bitmap.h"
+#include "scrub/fsb_bitmap.h"
+#include "scrub/xfile.h"
+#include "scrub/xfarray.h"
+#include "scrub/newbt.h"
+#include "scrub/reap.h"
+#include "scrub/rcbag.h"
+
+/*
+ * Rebuilding the Reference Count Btree
+ * ====================================
+ *
+ * This algorithm is "borrowed" from xfs_repair. Imagine the rmap
+ * entries as rectangles representing extents of physical blocks, and
+ * that the rectangles can be laid down to allow them to overlap each
+ * other; then we know that we must emit a refcnt btree entry wherever
+ * the amount of overlap changes, i.e. the emission stimulus is
+ * level-triggered:
+ *
+ * - ---
+ * -- ----- ---- --- ------
+ * -- ---- ----------- ---- ---------
+ * -------------------------------- -----------
+ * ^ ^ ^^ ^^ ^ ^^ ^^^ ^^^^ ^ ^^ ^ ^ ^
+ * 2 1 23 21 3 43 234 2123 1 01 2 3 0
+ *
+ * For our purposes, a rmap is a tuple (startblock, len, fileoff, owner).
+ *
+ * Note that in the actual refcnt btree we don't store the refcount < 2
+ * cases because the bnobt tells us which blocks are free; single-use
+ * blocks aren't recorded in the bnobt or the refcntbt. If the rmapbt
+ * supports storing multiple entries covering a given block we could
+ * theoretically dispense with the refcntbt and simply count rmaps, but
+ * that's inefficient in the (hot) write path, so we'll take the cost of
+ * the extra tree to save time. Also there's no guarantee that rmap
+ * will be enabled.
+ *
+ * Given an array of rmaps sorted by physical block number, a starting
+ * physical block (sp), a bag to hold rmaps that cover sp, and the next
+ * physical block where the level changes (np), we can reconstruct the
+ * rt refcount btree as follows:
+ *
+ * While there are still unprocessed rmaps in the array,
+ * - Set sp to the physical block (pblk) of the next unprocessed rmap.
+ * - Add to the bag all rmaps in the array where startblock == sp.
+ * - Set np to the physical block where the bag size will change. This
+ * is the minimum of (the pblk of the next unprocessed rmap) and
+ * (startblock + len of each rmap in the bag).
+ * - Record the bag size as old_bag_size.
+ *
+ * - While the bag isn't empty,
+ * - Remove from the bag all rmaps where startblock + len == np.
+ * - Add to the bag all rmaps in the array where startblock == np.
+ * - If the bag size isn't old_bag_size, store the refcount entry
+ * (sp, np - sp, bag_size) in the refcnt btree.
+ * - If the bag is empty, break out of the inner loop.
+ * - Set old_bag_size to the bag size
+ * - Set sp = np.
+ * - Set np to the physical block where the bag size will change.
+ * This is the minimum of (the pblk of the next unprocessed rmap)
+ * and (startblock + len of each rmap in the bag).
+ *
+ * Like all the other repairers, we make a list of all the refcount
+ * records we need, then reinitialize the rt refcount btree root and
+ * insert all the records.
+ */
+
+struct xrep_rtrefc {
+ /* refcount extents */
+ struct xfarray *refcount_records;
+
+ /* new refcountbt information */
+ struct xrep_newbt new_btree;
+
+ /* old refcountbt blocks */
+ struct xfsb_bitmap old_rtrefcountbt_blocks;
+
+ struct xfs_scrub *sc;
+
+ /* get_records()'s position in the rt refcount record array. */
+ xfarray_idx_t array_cur;
+
+ /* # of refcountbt blocks */
+ xfs_filblks_t btblocks;
+};
+
+/* Set us up to repair refcount btrees. */
+int
+xrep_setup_rtrefcountbt(
+ struct xfs_scrub *sc)
+{
+ char *descr;
+ int error;
+
+ descr = xchk_xfile_ag_descr(sc, "rmap record bag");
+ error = xrep_setup_xfbtree(sc, descr);
+ kfree(descr);
+ return error;
+}
+
+/* Check for any obvious conflicts with this shared/CoW staging extent. */
+STATIC int
+xrep_rtrefc_check_ext(
+ struct xfs_scrub *sc,
+ const struct xfs_refcount_irec *rec)
+{
+ xfs_rgblock_t last;
+
+ if (xfs_rtrefcount_check_irec(sc->sr.rtg, rec) != NULL)
+ return -EFSCORRUPTED;
+
+ if (xfs_rgbno_to_rtxoff(sc->mp, rec->rc_startblock) != 0)
+ return -EFSCORRUPTED;
+
+ last = rec->rc_startblock + rec->rc_blockcount - 1;
+ if (xfs_rgbno_to_rtxoff(sc->mp, last) != sc->mp->m_sb.sb_rextsize - 1)
+ return -EFSCORRUPTED;
+
+ /* Make sure this isn't free space or misaligned. */
+ return xrep_require_rtext_inuse(sc, rec->rc_startblock,
+ rec->rc_blockcount);
+}
+
+/* Record a reference count extent. */
+STATIC int
+xrep_rtrefc_stash(
+ struct xrep_rtrefc *rr,
+ enum xfs_refc_domain domain,
+ xfs_rgblock_t bno,
+ xfs_extlen_t len,
+ uint64_t refcount)
+{
+ struct xfs_refcount_irec irec = {
+ .rc_startblock = bno,
+ .rc_blockcount = len,
+ .rc_refcount = refcount,
+ .rc_domain = domain,
+ };
+ int error = 0;
+
+ if (xchk_should_terminate(rr->sc, &error))
+ return error;
+
+ irec.rc_refcount = min_t(uint64_t, XFS_REFC_REFCOUNT_MAX, refcount);
+
+ error = xrep_rtrefc_check_ext(rr->sc, &irec);
+ if (error)
+ return error;
+
+ trace_xrep_refc_found(rtg_group(rr->sc->sr.rtg), &irec);
+
+ return xfarray_append(rr->refcount_records, &irec);
+}
+
+/* Record a CoW staging extent. */
+STATIC int
+xrep_rtrefc_stash_cow(
+ struct xrep_rtrefc *rr,
+ xfs_rgblock_t bno,
+ xfs_extlen_t len)
+{
+ return xrep_rtrefc_stash(rr, XFS_REFC_DOMAIN_COW, bno, len, 1);
+}
+
+/* Decide if an rmap could describe a shared extent. */
+static inline bool
+xrep_rtrefc_rmap_shareable(
+ const struct xfs_rmap_irec *rmap)
+{
+ /* rt metadata are never sharable */
+ if (XFS_RMAP_NON_INODE_OWNER(rmap->rm_owner))
+ return false;
+
+ /* Unwritten file blocks are not shareable. */
+ if (rmap->rm_flags & XFS_RMAP_UNWRITTEN)
+ return false;
+
+ return true;
+}
+
+/* Grab the next (abbreviated) rmap record from the rmapbt. */
+STATIC int
+xrep_rtrefc_walk_rmaps(
+ struct xrep_rtrefc *rr,
+ struct xfs_rmap_irec *rmap,
+ bool *have_rec)
+{
+ struct xfs_btree_cur *cur = rr->sc->sr.rmap_cur;
+ struct xfs_mount *mp = cur->bc_mp;
+ int have_gt;
+ int error = 0;
+
+ *have_rec = false;
+
+ /*
+ * Loop through the remaining rmaps. Remember CoW staging
+ * extents and the refcountbt blocks from the old tree for later
+ * disposal. We can only share written data fork extents, so
+ * keep looping until we find an rmap for one.
+ */
+ do {
+ if (xchk_should_terminate(rr->sc, &error))
+ return error;
+
+ error = xfs_btree_increment(cur, 0, &have_gt);
+ if (error)
+ return error;
+ if (!have_gt)
+ return 0;
+
+ error = xfs_rmap_get_rec(cur, rmap, &have_gt);
+ if (error)
+ return error;
+ if (XFS_IS_CORRUPT(mp, !have_gt)) {
+ xfs_btree_mark_sick(cur);
+ return -EFSCORRUPTED;
+ }
+
+ if (rmap->rm_owner == XFS_RMAP_OWN_COW) {
+ error = xrep_rtrefc_stash_cow(rr, rmap->rm_startblock,
+ rmap->rm_blockcount);
+ if (error)
+ return error;
+ } else if (xfs_is_sb_inum(mp, rmap->rm_owner) ||
+ (rmap->rm_flags & (XFS_RMAP_ATTR_FORK |
+ XFS_RMAP_BMBT_BLOCK))) {
+ xfs_btree_mark_sick(cur);
+ return -EFSCORRUPTED;
+ }
+ } while (!xrep_rtrefc_rmap_shareable(rmap));
+
+ *have_rec = true;
+ return 0;
+}
+
+static inline uint32_t
+xrep_rtrefc_encode_startblock(
+ const struct xfs_refcount_irec *irec)
+{
+ uint32_t start;
+
+ start = irec->rc_startblock & ~XFS_REFC_COWFLAG;
+ if (irec->rc_domain == XFS_REFC_DOMAIN_COW)
+ start |= XFS_REFC_COWFLAG;
+
+ return start;
+}
+
+/*
+ * Compare two refcount records. We want to sort in order of increasing block
+ * number.
+ */
+static int
+xrep_rtrefc_extent_cmp(
+ const void *a,
+ const void *b)
+{
+ const struct xfs_refcount_irec *ap = a;
+ const struct xfs_refcount_irec *bp = b;
+ uint32_t sa, sb;
+
+ sa = xrep_rtrefc_encode_startblock(ap);
+ sb = xrep_rtrefc_encode_startblock(bp);
+
+ if (sa > sb)
+ return 1;
+ if (sa < sb)
+ return -1;
+ return 0;
+}
+
+/*
+ * Sort the refcount extents by startblock or else the btree records will be in
+ * the wrong order. Make sure the records do not overlap in physical space.
+ */
+STATIC int
+xrep_rtrefc_sort_records(
+ struct xrep_rtrefc *rr)
+{
+ struct xfs_refcount_irec irec;
+ xfarray_idx_t cur;
+ enum xfs_refc_domain dom = XFS_REFC_DOMAIN_SHARED;
+ xfs_rgblock_t next_rgbno = 0;
+ int error;
+
+ error = xfarray_sort(rr->refcount_records, xrep_rtrefc_extent_cmp,
+ XFARRAY_SORT_KILLABLE);
+ if (error)
+ return error;
+
+ foreach_xfarray_idx(rr->refcount_records, cur) {
+ if (xchk_should_terminate(rr->sc, &error))
+ return error;
+
+ error = xfarray_load(rr->refcount_records, cur, &irec);
+ if (error)
+ return error;
+
+ if (dom == XFS_REFC_DOMAIN_SHARED &&
+ irec.rc_domain == XFS_REFC_DOMAIN_COW) {
+ dom = irec.rc_domain;
+ next_rgbno = 0;
+ }
+
+ if (dom != irec.rc_domain)
+ return -EFSCORRUPTED;
+ if (irec.rc_startblock < next_rgbno)
+ return -EFSCORRUPTED;
+
+ next_rgbno = irec.rc_startblock + irec.rc_blockcount;
+ }
+
+ return error;
+}
+
+/* Record extents that belong to the realtime refcount inode. */
+STATIC int
+xrep_rtrefc_walk_rmap(
+ struct xfs_btree_cur *cur,
+ const struct xfs_rmap_irec *rec,
+ void *priv)
+{
+ struct xrep_rtrefc *rr = priv;
+ int error = 0;
+
+ if (xchk_should_terminate(rr->sc, &error))
+ return error;
+
+ /* Skip extents which are not owned by this inode and fork. */
+ if (rec->rm_owner != rr->sc->ip->i_ino)
+ return 0;
+
+ error = xrep_check_ino_btree_mapping(rr->sc, rec);
+ if (error)
+ return error;
+
+ return xfsb_bitmap_set(&rr->old_rtrefcountbt_blocks,
+ xfs_gbno_to_fsb(cur->bc_group, rec->rm_startblock),
+ rec->rm_blockcount);
+}
+
+/*
+ * Walk forward through the rmap btree to collect all rmaps starting at
+ * @bno in @rmap_bag. These represent the file(s) that share ownership of
+ * the current block. Upon return, the rmap cursor points to the last record
+ * satisfying the startblock constraint.
+ */
+static int
+xrep_rtrefc_push_rmaps_at(
+ struct xrep_rtrefc *rr,
+ struct rcbag *rcstack,
+ xfs_rgblock_t bno,
+ struct xfs_rmap_irec *rmap,
+ bool *have)
+{
+ struct xfs_scrub *sc = rr->sc;
+ int have_gt;
+ int error;
+
+ while (*have && rmap->rm_startblock == bno) {
+ error = rcbag_add(rcstack, rr->sc->tp, rmap);
+ if (error)
+ return error;
+
+ error = xrep_rtrefc_walk_rmaps(rr, rmap, have);
+ if (error)
+ return error;
+ }
+
+ error = xfs_btree_decrement(sc->sr.rmap_cur, 0, &have_gt);
+ if (error)
+ return error;
+ if (XFS_IS_CORRUPT(sc->mp, !have_gt)) {
+ xfs_btree_mark_sick(sc->sr.rmap_cur);
+ return -EFSCORRUPTED;
+ }
+
+ return 0;
+}
+
+/* Scan one AG for reverse mappings for the realtime refcount btree. */
+STATIC int
+xrep_rtrefc_scan_ag(
+ struct xrep_rtrefc *rr,
+ struct xfs_perag *pag)
+{
+ struct xfs_scrub *sc = rr->sc;
+ int error;
+
+ error = xrep_ag_init(sc, pag, &sc->sa);
+ if (error)
+ return error;
+
+ error = xfs_rmap_query_all(sc->sa.rmap_cur, xrep_rtrefc_walk_rmap, rr);
+ xchk_ag_free(sc, &sc->sa);
+ return error;
+}
+
+/* Iterate all the rmap records to generate reference count data. */
+STATIC int
+xrep_rtrefc_find_refcounts(
+ struct xrep_rtrefc *rr)
+{
+ struct xfs_scrub *sc = rr->sc;
+ struct rcbag *rcstack;
+ struct xfs_perag *pag = NULL;
+ uint64_t old_stack_height;
+ xfs_rgblock_t sbno;
+ xfs_rgblock_t cbno;
+ xfs_rgblock_t nbno;
+ bool have;
+ int error;
+
+ /* Scan for old rtrefc btree blocks. */
+ while ((pag = xfs_perag_next(sc->mp, pag))) {
+ error = xrep_rtrefc_scan_ag(rr, pag);
+ if (error) {
+ xfs_perag_rele(pag);
+ return error;
+ }
+ }
+
+ xrep_rtgroup_btcur_init(sc, &sc->sr);
+
+ /*
+ * Set up a bag to store all the rmap records that we're tracking to
+ * generate a reference count record. If this exceeds
+ * XFS_REFC_REFCOUNT_MAX, we clamp rc_refcount.
+ */
+ error = rcbag_init(sc->mp, sc->xmbtp, &rcstack);
+ if (error)
+ goto out_cur;
+
+ /* Start the rtrmapbt cursor to the left of all records. */
+ error = xfs_btree_goto_left_edge(sc->sr.rmap_cur);
+ if (error)
+ goto out_bag;
+
+ /* Process reverse mappings into refcount data. */
+ while (xfs_btree_has_more_records(sc->sr.rmap_cur)) {
+ struct xfs_rmap_irec rmap;
+
+ /* Push all rmaps with pblk == sbno onto the stack */
+ error = xrep_rtrefc_walk_rmaps(rr, &rmap, &have);
+ if (error)
+ goto out_bag;
+ if (!have)
+ break;
+ sbno = cbno = rmap.rm_startblock;
+ error = xrep_rtrefc_push_rmaps_at(rr, rcstack, sbno, &rmap,
+ &have);
+ if (error)
+ goto out_bag;
+
+ /* Set nbno to the bno of the next refcount change */
+ error = rcbag_next_edge(rcstack, sc->tp, &rmap, have, &nbno);
+ if (error)
+ goto out_bag;
+
+ ASSERT(nbno > sbno);
+ old_stack_height = rcbag_count(rcstack);
+
+ /* While stack isn't empty... */
+ while (rcbag_count(rcstack) > 0) {
+ /* Pop all rmaps that end at nbno */
+ error = rcbag_remove_ending_at(rcstack, sc->tp, nbno);
+ if (error)
+ goto out_bag;
+
+ /* Push array items that start at nbno */
+ error = xrep_rtrefc_walk_rmaps(rr, &rmap, &have);
+ if (error)
+ goto out_bag;
+ if (have) {
+ error = xrep_rtrefc_push_rmaps_at(rr, rcstack,
+ nbno, &rmap, &have);
+ if (error)
+ goto out_bag;
+ }
+
+ /* Emit refcount if necessary */
+ ASSERT(nbno > cbno);
+ if (rcbag_count(rcstack) != old_stack_height) {
+ if (old_stack_height > 1) {
+ error = xrep_rtrefc_stash(rr,
+ XFS_REFC_DOMAIN_SHARED,
+ cbno, nbno - cbno,
+ old_stack_height);
+ if (error)
+ goto out_bag;
+ }
+ cbno = nbno;
+ }
+
+ /* Stack empty, go find the next rmap */
+ if (rcbag_count(rcstack) == 0)
+ break;
+ old_stack_height = rcbag_count(rcstack);
+ sbno = nbno;
+
+ /* Set nbno to the bno of the next refcount change */
+ error = rcbag_next_edge(rcstack, sc->tp, &rmap, have,
+ &nbno);
+ if (error)
+ goto out_bag;
+
+ ASSERT(nbno > sbno);
+ }
+ }
+
+ ASSERT(rcbag_count(rcstack) == 0);
+out_bag:
+ rcbag_free(&rcstack);
+out_cur:
+ xchk_rtgroup_btcur_free(&sc->sr);
+ return error;
+}
+
+/* Retrieve refcountbt data for bulk load. */
+STATIC int
+xrep_rtrefc_get_records(
+ struct xfs_btree_cur *cur,
+ unsigned int idx,
+ struct xfs_btree_block *block,
+ unsigned int nr_wanted,
+ void *priv)
+{
+ struct xrep_rtrefc *rr = priv;
+ union xfs_btree_rec *block_rec;
+ unsigned int loaded;
+ int error;
+
+ for (loaded = 0; loaded < nr_wanted; loaded++, idx++) {
+ error = xfarray_load(rr->refcount_records, rr->array_cur++,
+ &cur->bc_rec.rc);
+ if (error)
+ return error;
+
+ block_rec = xfs_btree_rec_addr(cur, idx, block);
+ cur->bc_ops->init_rec_from_cur(cur, block_rec);
+ }
+
+ return loaded;
+}
+
+/* Feed one of the new btree blocks to the bulk loader. */
+STATIC int
+xrep_rtrefc_claim_block(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *ptr,
+ void *priv)
+{
+ struct xrep_rtrefc *rr = priv;
+
+ return xrep_newbt_claim_block(cur, &rr->new_btree, ptr);
+}
+
+/* Figure out how much space we need to create the incore btree root block. */
+STATIC size_t
+xrep_rtrefc_iroot_size(
+ struct xfs_btree_cur *cur,
+ unsigned int level,
+ unsigned int nr_this_level,
+ void *priv)
+{
+ return xfs_rtrefcount_broot_space_calc(cur->bc_mp, level,
+ nr_this_level);
+}
+
+/*
+ * Use the collected refcount information to stage a new rt refcount btree. If
+ * this is successful we'll return with the new btree root information logged
+ * to the repair transaction but not yet committed.
+ */
+STATIC int
+xrep_rtrefc_build_new_tree(
+ struct xrep_rtrefc *rr)
+{
+ struct xfs_scrub *sc = rr->sc;
+ struct xfs_rtgroup *rtg = sc->sr.rtg;
+ struct xfs_btree_cur *refc_cur;
+ int error;
+
+ error = xrep_rtrefc_sort_records(rr);
+ if (error)
+ return error;
+
+ /*
+ * Prepare to construct the new btree by reserving disk space for the
+ * new btree and setting up all the accounting information we'll need
+ * to root the new btree while it's under construction and before we
+ * attach it to the realtime refcount inode.
+ */
+ error = xrep_newbt_init_metadir_inode(&rr->new_btree, sc);
+ if (error)
+ return error;
+
+ rr->new_btree.bload.get_records = xrep_rtrefc_get_records;
+ rr->new_btree.bload.claim_block = xrep_rtrefc_claim_block;
+ rr->new_btree.bload.iroot_size = xrep_rtrefc_iroot_size;
+
+ refc_cur = xfs_rtrefcountbt_init_cursor(NULL, rtg);
+ xfs_btree_stage_ifakeroot(refc_cur, &rr->new_btree.ifake);
+
+ /* Compute how many blocks we'll need. */
+ error = xfs_btree_bload_compute_geometry(refc_cur, &rr->new_btree.bload,
+ xfarray_length(rr->refcount_records));
+ if (error)
+ goto err_cur;
+
+ /* Last chance to abort before we start committing fixes. */
+ if (xchk_should_terminate(sc, &error))
+ goto err_cur;
+
+ /*
+ * Guess how many blocks we're going to need to rebuild an entire
+ * rtrefcountbt from the number of extents we found, and pump up our
+ * transaction to have sufficient block reservation. We're allowed
+ * to exceed quota to repair inconsistent metadata, though this is
+ * unlikely.
+ */
+ error = xfs_trans_reserve_more_inode(sc->tp, rtg_refcount(rtg),
+ rr->new_btree.bload.nr_blocks, 0, true);
+ if (error)
+ goto err_cur;
+
+ /* Reserve the space we'll need for the new btree. */
+ error = xrep_newbt_alloc_blocks(&rr->new_btree,
+ rr->new_btree.bload.nr_blocks);
+ if (error)
+ goto err_cur;
+
+ /* Add all observed refcount records. */
+ rr->new_btree.ifake.if_fork->if_format = XFS_DINODE_FMT_META_BTREE;
+ rr->array_cur = XFARRAY_CURSOR_INIT;
+ error = xfs_btree_bload(refc_cur, &rr->new_btree.bload, rr);
+ if (error)
+ goto err_cur;
+
+ /*
+ * Install the new rtrefc btree in the inode. After this point the old
+ * btree is no longer accessible, the new tree is live, and we can
+ * delete the cursor.
+ */
+ xfs_rtrefcountbt_commit_staged_btree(refc_cur, sc->tp);
+ xrep_inode_set_nblocks(rr->sc, rr->new_btree.ifake.if_blocks);
+ xfs_btree_del_cursor(refc_cur, 0);
+
+ /* Dispose of any unused blocks and the accounting information. */
+ error = xrep_newbt_commit(&rr->new_btree);
+ if (error)
+ return error;
+
+ return xrep_roll_trans(sc);
+err_cur:
+ xfs_btree_del_cursor(refc_cur, error);
+ xrep_newbt_cancel(&rr->new_btree);
+ return error;
+}
+
+/*
+ * Now that we've logged the roots of the new btrees, invalidate all of the
+ * old blocks and free them.
+ */
+STATIC int
+xrep_rtrefc_remove_old_tree(
+ struct xrep_rtrefc *rr)
+{
+ int error;
+
+ /*
+ * Free all the extents that were allocated to the former rtrefcountbt
+ * and aren't cross-linked with something else.
+ */
+ error = xrep_reap_metadir_fsblocks(rr->sc,
+ &rr->old_rtrefcountbt_blocks);
+ if (error)
+ return error;
+
+ /*
+ * Ensure the proper reservation for the rtrefcount inode so that we
+ * don't fail to expand the btree.
+ */
+ return xrep_reset_metafile_resv(rr->sc);
+}
+
+/* Rebuild the rt refcount btree. */
+int
+xrep_rtrefcountbt(
+ struct xfs_scrub *sc)
+{
+ struct xrep_rtrefc *rr;
+ struct xfs_mount *mp = sc->mp;
+ char *descr;
+ int error;
+
+ /* We require the rmapbt to rebuild anything. */
+ if (!xfs_has_rtrmapbt(mp))
+ return -EOPNOTSUPP;
+
+ /* Make sure any problems with the fork are fixed. */
+ error = xrep_metadata_inode_forks(sc);
+ if (error)
+ return error;
+
+ rr = kzalloc(sizeof(struct xrep_rtrefc), XCHK_GFP_FLAGS);
+ if (!rr)
+ return -ENOMEM;
+ rr->sc = sc;
+
+ /* Set up enough storage to handle one refcount record per rt extent. */
+ descr = xchk_xfile_ag_descr(sc, "reference count records");
+ error = xfarray_create(descr, mp->m_sb.sb_rextents,
+ sizeof(struct xfs_refcount_irec),
+ &rr->refcount_records);
+ kfree(descr);
+ if (error)
+ goto out_rr;
+
+ /* Collect all reference counts. */
+ xfsb_bitmap_init(&rr->old_rtrefcountbt_blocks);
+ error = xrep_rtrefc_find_refcounts(rr);
+ if (error)
+ goto out_bitmap;
+
+ xfs_trans_ijoin(sc->tp, sc->ip, 0);
+
+ /* Rebuild the refcount information. */
+ error = xrep_rtrefc_build_new_tree(rr);
+ if (error)
+ goto out_bitmap;
+
+ /* Kill the old tree. */
+ error = xrep_rtrefc_remove_old_tree(rr);
+ if (error)
+ goto out_bitmap;
+
+out_bitmap:
+ xfsb_bitmap_destroy(&rr->old_rtrefcountbt_blocks);
+ xfarray_destroy(rr->refcount_records);
+out_rr:
+ kfree(rr);
+ return error;
+}
diff --git a/fs/xfs/scrub/rtrmap.c b/fs/xfs/scrub/rtrmap.c
new file mode 100644
index 000000000000..12989fe80e8b
--- /dev/null
+++ b/fs/xfs/scrub/rtrmap.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2018-2024 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_btree.h"
+#include "xfs_bit.h"
+#include "xfs_log_format.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_rmap.h"
+#include "xfs_rmap_btree.h"
+#include "xfs_rtrmap_btree.h"
+#include "xfs_inode.h"
+#include "xfs_rtalloc.h"
+#include "xfs_rtgroup.h"
+#include "xfs_metafile.h"
+#include "xfs_refcount.h"
+#include "scrub/xfs_scrub.h"
+#include "scrub/scrub.h"
+#include "scrub/common.h"
+#include "scrub/btree.h"
+#include "scrub/trace.h"
+#include "scrub/repair.h"
+
+/* Set us up with the realtime metadata locked. */
+int
+xchk_setup_rtrmapbt(
+ struct xfs_scrub *sc)
+{
+ int error;
+
+ if (xchk_need_intent_drain(sc))
+ xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
+
+ if (xchk_could_repair(sc)) {
+ error = xrep_setup_rtrmapbt(sc);
+ if (error)
+ return error;
+ }
+
+ error = xchk_rtgroup_init(sc, sc->sm->sm_agno, &sc->sr);
+ if (error)
+ return error;
+
+ error = xchk_setup_rt(sc);
+ if (error)
+ return error;
+
+ error = xchk_install_live_inode(sc, rtg_rmap(sc->sr.rtg));
+ if (error)
+ return error;
+
+ return xchk_rtgroup_lock(sc, &sc->sr, XCHK_RTGLOCK_ALL);
+}
+
+/* Realtime reverse mapping. */
+
+struct xchk_rtrmap {
+ /*
+ * The furthest-reaching of the rmapbt records that we've already
+ * processed. This enables us to detect overlapping records for space
+ * allocations that cannot be shared.
+ */
+ struct xfs_rmap_irec overlap_rec;
+
+ /*
+ * The previous rmapbt record, so that we can check for two records
+ * that could be one.
+ */
+ struct xfs_rmap_irec prev_rec;
+};
+
+static inline bool
+xchk_rtrmapbt_is_shareable(
+ struct xfs_scrub *sc,
+ const struct xfs_rmap_irec *irec)
+{
+ if (!xfs_has_rtreflink(sc->mp))
+ return false;
+ if (irec->rm_flags & XFS_RMAP_UNWRITTEN)
+ return false;
+ return true;
+}
+
+/* Flag failures for records that overlap but cannot. */
+STATIC void
+xchk_rtrmapbt_check_overlapping(
+ struct xchk_btree *bs,
+ struct xchk_rtrmap *cr,
+ const struct xfs_rmap_irec *irec)
+{
+ xfs_rtblock_t pnext, inext;
+
+ if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ /* No previous record? */
+ if (cr->overlap_rec.rm_blockcount == 0)
+ goto set_prev;
+
+ /* Do overlap_rec and irec overlap? */
+ pnext = cr->overlap_rec.rm_startblock + cr->overlap_rec.rm_blockcount;
+ if (pnext <= irec->rm_startblock)
+ goto set_prev;
+
+ /* Overlap is only allowed if both records are data fork mappings. */
+ if (!xchk_rtrmapbt_is_shareable(bs->sc, &cr->overlap_rec) ||
+ !xchk_rtrmapbt_is_shareable(bs->sc, irec))
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
+
+ /* Save whichever rmap record extends furthest. */
+ inext = irec->rm_startblock + irec->rm_blockcount;
+ if (pnext > inext)
+ return;
+
+set_prev:
+ memcpy(&cr->overlap_rec, irec, sizeof(struct xfs_rmap_irec));
+}
+
+/* Decide if two reverse-mapping records can be merged. */
+static inline bool
+xchk_rtrmap_mergeable(
+ struct xchk_rtrmap *cr,
+ const struct xfs_rmap_irec *r2)
+{
+ const struct xfs_rmap_irec *r1 = &cr->prev_rec;
+
+ /* Ignore if prev_rec is not yet initialized. */
+ if (cr->prev_rec.rm_blockcount == 0)
+ return false;
+
+ if (r1->rm_owner != r2->rm_owner)
+ return false;
+ if (r1->rm_startblock + r1->rm_blockcount != r2->rm_startblock)
+ return false;
+ if ((unsigned long long)r1->rm_blockcount + r2->rm_blockcount >
+ XFS_RMAP_LEN_MAX)
+ return false;
+ if (r1->rm_flags != r2->rm_flags)
+ return false;
+ return r1->rm_offset + r1->rm_blockcount == r2->rm_offset;
+}
+
+/* Flag failures for records that could be merged. */
+STATIC void
+xchk_rtrmapbt_check_mergeable(
+ struct xchk_btree *bs,
+ struct xchk_rtrmap *cr,
+ const struct xfs_rmap_irec *irec)
+{
+ if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ if (xchk_rtrmap_mergeable(cr, irec))
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
+
+ memcpy(&cr->prev_rec, irec, sizeof(struct xfs_rmap_irec));
+}
+
+/* Cross-reference a rmap against the refcount btree. */
+STATIC void
+xchk_rtrmapbt_xref_rtrefc(
+ struct xfs_scrub *sc,
+ struct xfs_rmap_irec *irec)
+{
+ xfs_rgblock_t fbno;
+ xfs_extlen_t flen;
+ bool is_inode;
+ bool is_bmbt;
+ bool is_attr;
+ bool is_unwritten;
+ int error;
+
+ if (!sc->sr.refc_cur || xchk_skip_xref(sc->sm))
+ return;
+
+ is_inode = !XFS_RMAP_NON_INODE_OWNER(irec->rm_owner);
+ is_bmbt = irec->rm_flags & XFS_RMAP_BMBT_BLOCK;
+ is_attr = irec->rm_flags & XFS_RMAP_ATTR_FORK;
+ is_unwritten = irec->rm_flags & XFS_RMAP_UNWRITTEN;
+
+ /* If this is shared, must be a data fork extent. */
+ error = xfs_refcount_find_shared(sc->sr.refc_cur, irec->rm_startblock,
+ irec->rm_blockcount, &fbno, &flen, false);
+ if (!xchk_should_check_xref(sc, &error, &sc->sr.refc_cur))
+ return;
+ if (flen != 0 && (!is_inode || is_attr || is_bmbt || is_unwritten))
+ xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0);
+}
+
+/* Cross-reference with other metadata. */
+STATIC void
+xchk_rtrmapbt_xref(
+ struct xfs_scrub *sc,
+ struct xfs_rmap_irec *irec)
+{
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ xchk_xref_is_used_rt_space(sc,
+ xfs_rgbno_to_rtb(sc->sr.rtg, irec->rm_startblock),
+ irec->rm_blockcount);
+ if (irec->rm_owner == XFS_RMAP_OWN_COW)
+ xchk_xref_is_cow_staging(sc, irec->rm_startblock,
+ irec->rm_blockcount);
+ else
+ xchk_rtrmapbt_xref_rtrefc(sc, irec);
+}
+
+/* Scrub a realtime rmapbt record. */
+STATIC int
+xchk_rtrmapbt_rec(
+ struct xchk_btree *bs,
+ const union xfs_btree_rec *rec)
+{
+ struct xchk_rtrmap *cr = bs->private;
+ struct xfs_rmap_irec irec;
+
+ if (xfs_rmap_btrec_to_irec(rec, &irec) != NULL ||
+ xfs_rtrmap_check_irec(to_rtg(bs->cur->bc_group), &irec) != NULL) {
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
+ return 0;
+ }
+
+ if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return 0;
+
+ xchk_rtrmapbt_check_mergeable(bs, cr, &irec);
+ xchk_rtrmapbt_check_overlapping(bs, cr, &irec);
+ xchk_rtrmapbt_xref(bs->sc, &irec);
+ return 0;
+}
+
+/* Scrub the realtime rmap btree. */
+int
+xchk_rtrmapbt(
+ struct xfs_scrub *sc)
+{
+ struct xfs_inode *ip = rtg_rmap(sc->sr.rtg);
+ struct xfs_owner_info oinfo;
+ struct xchk_rtrmap cr = { };
+ int error;
+
+ error = xchk_metadata_inode_forks(sc);
+ if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
+ return error;
+
+ xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, XFS_DATA_FORK);
+ return xchk_btree(sc, sc->sr.rmap_cur, xchk_rtrmapbt_rec, &oinfo, &cr);
+}
+
+/* xref check that the extent has no realtime reverse mapping at all */
+void
+xchk_xref_has_no_rt_owner(
+ struct xfs_scrub *sc,
+ xfs_rgblock_t bno,
+ xfs_extlen_t len)
+{
+ enum xbtree_recpacking outcome;
+ int error;
+
+ if (!sc->sr.rmap_cur || xchk_skip_xref(sc->sm))
+ return;
+
+ error = xfs_rmap_has_records(sc->sr.rmap_cur, bno, len, &outcome);
+ if (!xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur))
+ return;
+ if (outcome != XBTREE_RECPACKING_EMPTY)
+ xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0);
+}
+
+/* xref check that the extent is completely mapped */
+void
+xchk_xref_has_rt_owner(
+ struct xfs_scrub *sc,
+ xfs_rgblock_t bno,
+ xfs_extlen_t len)
+{
+ enum xbtree_recpacking outcome;
+ int error;
+
+ if (!sc->sr.rmap_cur || xchk_skip_xref(sc->sm))
+ return;
+
+ error = xfs_rmap_has_records(sc->sr.rmap_cur, bno, len, &outcome);
+ if (!xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur))
+ return;
+ if (outcome != XBTREE_RECPACKING_FULL)
+ xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0);
+}
+
+/* xref check that the extent is only owned by a given owner */
+void
+xchk_xref_is_only_rt_owned_by(
+ struct xfs_scrub *sc,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ const struct xfs_owner_info *oinfo)
+{
+ struct xfs_rmap_matches res;
+ int error;
+
+ if (!sc->sr.rmap_cur || xchk_skip_xref(sc->sm))
+ return;
+
+ error = xfs_rmap_count_owners(sc->sr.rmap_cur, bno, len, oinfo, &res);
+ if (!xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur))
+ return;
+ if (res.matches != 1)
+ xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0);
+ if (res.bad_non_owner_matches)
+ xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0);
+ if (res.non_owner_matches)
+ xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0);
+}
diff --git a/fs/xfs/scrub/rtrmap_repair.c b/fs/xfs/scrub/rtrmap_repair.c
new file mode 100644
index 000000000000..f2fdd7a9fc24
--- /dev/null
+++ b/fs/xfs/scrub/rtrmap_repair.c
@@ -0,0 +1,1006 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2020-2024 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_btree.h"
+#include "xfs_btree_staging.h"
+#include "xfs_buf_mem.h"
+#include "xfs_btree_mem.h"
+#include "xfs_bit.h"
+#include "xfs_log_format.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_alloc.h"
+#include "xfs_rmap.h"
+#include "xfs_rmap_btree.h"
+#include "xfs_rtrmap_btree.h"
+#include "xfs_inode.h"
+#include "xfs_icache.h"
+#include "xfs_bmap.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_quota.h"
+#include "xfs_rtalloc.h"
+#include "xfs_ag.h"
+#include "xfs_rtgroup.h"
+#include "xfs_refcount.h"
+#include "scrub/xfs_scrub.h"
+#include "scrub/scrub.h"
+#include "scrub/common.h"
+#include "scrub/btree.h"
+#include "scrub/trace.h"
+#include "scrub/repair.h"
+#include "scrub/bitmap.h"
+#include "scrub/fsb_bitmap.h"
+#include "scrub/rgb_bitmap.h"
+#include "scrub/xfile.h"
+#include "scrub/xfarray.h"
+#include "scrub/iscan.h"
+#include "scrub/newbt.h"
+#include "scrub/reap.h"
+
+/*
+ * Realtime Reverse Mapping Btree Repair
+ * =====================================
+ *
+ * This isn't quite as difficult as repairing the rmap btree on the data
+ * device, since we only store the data fork extents of realtime files on the
+ * realtime device. We still have to freeze the filesystem and stop the
+ * background threads like we do for the rmap repair, but we only have to scan
+ * realtime inodes.
+ *
+ * Collecting entries for the new realtime rmap btree is easy -- all we have
+ * to do is generate rtrmap entries from the data fork mappings of all realtime
+ * files in the filesystem. We then scan the rmap btrees of the data device
+ * looking for extents belonging to the old btree and note them in a bitmap.
+ *
+ * To rebuild the realtime rmap btree, we bulk-load the collected mappings into
+ * a new btree cursor and atomically swap that into the realtime inode. Then
+ * we can free the blocks from the old btree.
+ *
+ * We use the 'xrep_rtrmap' prefix for all the rmap functions.
+ */
+
+/* Context for collecting rmaps */
+struct xrep_rtrmap {
+ /* new rtrmapbt information */
+ struct xrep_newbt new_btree;
+
+ /* lock for the xfbtree and xfile */
+ struct mutex lock;
+
+ /* rmap records generated from primary metadata */
+ struct xfbtree rtrmap_btree;
+
+ struct xfs_scrub *sc;
+
+ /* bitmap of old rtrmapbt blocks */
+ struct xfsb_bitmap old_rtrmapbt_blocks;
+
+ /* Hooks into rtrmap update code. */
+ struct xfs_rmap_hook rhook;
+
+ /* inode scan cursor */
+ struct xchk_iscan iscan;
+
+ /* in-memory btree cursor for the ->get_blocks walk */
+ struct xfs_btree_cur *mcur;
+
+ /* Number of records we're staging in the new btree. */
+ uint64_t nr_records;
+};
+
+/* Set us up to repair rt reverse mapping btrees. */
+int
+xrep_setup_rtrmapbt(
+ struct xfs_scrub *sc)
+{
+ struct xrep_rtrmap *rr;
+ char *descr;
+ int error;
+
+ xchk_fsgates_enable(sc, XCHK_FSGATES_RMAP);
+
+ descr = xchk_xfile_rtgroup_descr(sc, "reverse mapping records");
+ error = xrep_setup_xfbtree(sc, descr);
+ kfree(descr);
+ if (error)
+ return error;
+
+ rr = kzalloc(sizeof(struct xrep_rtrmap), XCHK_GFP_FLAGS);
+ if (!rr)
+ return -ENOMEM;
+
+ rr->sc = sc;
+ sc->buf = rr;
+ return 0;
+}
+
+/* Make sure there's nothing funny about this mapping. */
+STATIC int
+xrep_rtrmap_check_mapping(
+ struct xfs_scrub *sc,
+ const struct xfs_rmap_irec *rec)
+{
+ if (xfs_rtrmap_check_irec(sc->sr.rtg, rec) != NULL)
+ return -EFSCORRUPTED;
+
+ /* Make sure this isn't free space. */
+ return xrep_require_rtext_inuse(sc, rec->rm_startblock,
+ rec->rm_blockcount);
+}
+
+/* Store a reverse-mapping record. */
+static inline int
+xrep_rtrmap_stash(
+ struct xrep_rtrmap *rr,
+ xfs_rgblock_t startblock,
+ xfs_extlen_t blockcount,
+ uint64_t owner,
+ uint64_t offset,
+ unsigned int flags)
+{
+ struct xfs_rmap_irec rmap = {
+ .rm_startblock = startblock,
+ .rm_blockcount = blockcount,
+ .rm_owner = owner,
+ .rm_offset = offset,
+ .rm_flags = flags,
+ };
+ struct xfs_scrub *sc = rr->sc;
+ struct xfs_btree_cur *mcur;
+ int error = 0;
+
+ if (xchk_should_terminate(sc, &error))
+ return error;
+
+ if (xchk_iscan_aborted(&rr->iscan))
+ return -EFSCORRUPTED;
+
+ trace_xrep_rtrmap_found(sc->mp, &rmap);
+
+ /* Add entry to in-memory btree. */
+ mutex_lock(&rr->lock);
+ mcur = xfs_rtrmapbt_mem_cursor(sc->sr.rtg, sc->tp, &rr->rtrmap_btree);
+ error = xfs_rmap_map_raw(mcur, &rmap);
+ xfs_btree_del_cursor(mcur, error);
+ if (error)
+ goto out_cancel;
+
+ error = xfbtree_trans_commit(&rr->rtrmap_btree, sc->tp);
+ if (error)
+ goto out_abort;
+
+ mutex_unlock(&rr->lock);
+ return 0;
+
+out_cancel:
+ xfbtree_trans_cancel(&rr->rtrmap_btree, sc->tp);
+out_abort:
+ xchk_iscan_abort(&rr->iscan);
+ mutex_unlock(&rr->lock);
+ return error;
+}
+
+/* Finding all file and bmbt extents. */
+
+/* Context for accumulating rmaps for an inode fork. */
+struct xrep_rtrmap_ifork {
+ /*
+ * Accumulate rmap data here to turn multiple adjacent bmaps into a
+ * single rmap.
+ */
+ struct xfs_rmap_irec accum;
+
+ struct xrep_rtrmap *rr;
+};
+
+/* Stash an rmap that we accumulated while walking an inode fork. */
+STATIC int
+xrep_rtrmap_stash_accumulated(
+ struct xrep_rtrmap_ifork *rf)
+{
+ if (rf->accum.rm_blockcount == 0)
+ return 0;
+
+ return xrep_rtrmap_stash(rf->rr, rf->accum.rm_startblock,
+ rf->accum.rm_blockcount, rf->accum.rm_owner,
+ rf->accum.rm_offset, rf->accum.rm_flags);
+}
+
+/* Accumulate a bmbt record. */
+STATIC int
+xrep_rtrmap_visit_bmbt(
+ struct xfs_btree_cur *cur,
+ struct xfs_bmbt_irec *rec,
+ void *priv)
+{
+ struct xrep_rtrmap_ifork *rf = priv;
+ struct xfs_rmap_irec *accum = &rf->accum;
+ struct xfs_mount *mp = rf->rr->sc->mp;
+ xfs_rgblock_t rgbno;
+ unsigned int rmap_flags = 0;
+ int error;
+
+ if (xfs_rtb_to_rgno(mp, rec->br_startblock) !=
+ rtg_rgno(rf->rr->sc->sr.rtg))
+ return 0;
+
+ if (rec->br_state == XFS_EXT_UNWRITTEN)
+ rmap_flags |= XFS_RMAP_UNWRITTEN;
+
+ /* If this bmap is adjacent to the previous one, just add it. */
+ rgbno = xfs_rtb_to_rgbno(mp, rec->br_startblock);
+ if (accum->rm_blockcount > 0 &&
+ rec->br_startoff == accum->rm_offset + accum->rm_blockcount &&
+ rgbno == accum->rm_startblock + accum->rm_blockcount &&
+ rmap_flags == accum->rm_flags) {
+ accum->rm_blockcount += rec->br_blockcount;
+ return 0;
+ }
+
+ /* Otherwise stash the old rmap and start accumulating a new one. */
+ error = xrep_rtrmap_stash_accumulated(rf);
+ if (error)
+ return error;
+
+ accum->rm_startblock = rgbno;
+ accum->rm_blockcount = rec->br_blockcount;
+ accum->rm_offset = rec->br_startoff;
+ accum->rm_flags = rmap_flags;
+ return 0;
+}
+
+/*
+ * Iterate the block mapping btree to collect rmap records for anything in this
+ * fork that maps to the rt volume. Sets @mappings_done to true if we've
+ * scanned the block mappings in this fork.
+ */
+STATIC int
+xrep_rtrmap_scan_bmbt(
+ struct xrep_rtrmap_ifork *rf,
+ struct xfs_inode *ip,
+ bool *mappings_done)
+{
+ struct xrep_rtrmap *rr = rf->rr;
+ struct xfs_btree_cur *cur;
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
+ int error = 0;
+
+ *mappings_done = false;
+
+ /*
+ * If the incore extent cache is already loaded, we'll just use the
+ * incore extent scanner to record mappings. Don't bother walking the
+ * ondisk extent tree.
+ */
+ if (!xfs_need_iread_extents(ifp))
+ return 0;
+
+ /* Accumulate all the mappings in the bmap btree. */
+ cur = xfs_bmbt_init_cursor(rr->sc->mp, rr->sc->tp, ip, XFS_DATA_FORK);
+ error = xfs_bmap_query_all(cur, xrep_rtrmap_visit_bmbt, rf);
+ xfs_btree_del_cursor(cur, error);
+ if (error)
+ return error;
+
+ /* Stash any remaining accumulated rmaps and exit. */
+ *mappings_done = true;
+ return xrep_rtrmap_stash_accumulated(rf);
+}
+
+/*
+ * Iterate the in-core extent cache to collect rmap records for anything in
+ * this fork that matches the AG.
+ */
+STATIC int
+xrep_rtrmap_scan_iext(
+ struct xrep_rtrmap_ifork *rf,
+ struct xfs_ifork *ifp)
+{
+ struct xfs_bmbt_irec rec;
+ struct xfs_iext_cursor icur;
+ int error;
+
+ for_each_xfs_iext(ifp, &icur, &rec) {
+ if (isnullstartblock(rec.br_startblock))
+ continue;
+ error = xrep_rtrmap_visit_bmbt(NULL, &rec, rf);
+ if (error)
+ return error;
+ }
+
+ return xrep_rtrmap_stash_accumulated(rf);
+}
+
+/* Find all the extents on the realtime device mapped by an inode fork. */
+STATIC int
+xrep_rtrmap_scan_dfork(
+ struct xrep_rtrmap *rr,
+ struct xfs_inode *ip)
+{
+ struct xrep_rtrmap_ifork rf = {
+ .accum = { .rm_owner = ip->i_ino, },
+ .rr = rr,
+ };
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
+ int error = 0;
+
+ if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
+ bool mappings_done;
+
+ /*
+ * Scan the bmbt for mappings. If the incore extent tree is
+ * loaded, we want to scan the cached mappings since that's
+ * faster when the extent counts are very high.
+ */
+ error = xrep_rtrmap_scan_bmbt(&rf, ip, &mappings_done);
+ if (error || mappings_done)
+ return error;
+ } else if (ifp->if_format != XFS_DINODE_FMT_EXTENTS) {
+ /* realtime data forks should only be extents or btree */
+ return -EFSCORRUPTED;
+ }
+
+ /* Scan incore extent cache. */
+ return xrep_rtrmap_scan_iext(&rf, ifp);
+}
+
+/* Record reverse mappings for a file. */
+STATIC int
+xrep_rtrmap_scan_inode(
+ struct xrep_rtrmap *rr,
+ struct xfs_inode *ip)
+{
+ unsigned int lock_mode;
+ int error = 0;
+
+ /* Skip the rt rmap btree inode. */
+ if (rr->sc->ip == ip)
+ return 0;
+
+ lock_mode = xfs_ilock_data_map_shared(ip);
+
+ /* Check the data fork if it's on the realtime device. */
+ if (XFS_IS_REALTIME_INODE(ip)) {
+ error = xrep_rtrmap_scan_dfork(rr, ip);
+ if (error)
+ goto out_unlock;
+ }
+
+ xchk_iscan_mark_visited(&rr->iscan, ip);
+out_unlock:
+ xfs_iunlock(ip, lock_mode);
+ return error;
+}
+
+/* Record extents that belong to the realtime rmap inode. */
+STATIC int
+xrep_rtrmap_walk_rmap(
+ struct xfs_btree_cur *cur,
+ const struct xfs_rmap_irec *rec,
+ void *priv)
+{
+ struct xrep_rtrmap *rr = priv;
+ int error = 0;
+
+ if (xchk_should_terminate(rr->sc, &error))
+ return error;
+
+ /* Skip extents which are not owned by this inode and fork. */
+ if (rec->rm_owner != rr->sc->ip->i_ino)
+ return 0;
+
+ error = xrep_check_ino_btree_mapping(rr->sc, rec);
+ if (error)
+ return error;
+
+ return xfsb_bitmap_set(&rr->old_rtrmapbt_blocks,
+ xfs_gbno_to_fsb(cur->bc_group, rec->rm_startblock),
+ rec->rm_blockcount);
+}
+
+/* Scan one AG for reverse mappings for the realtime rmap btree. */
+STATIC int
+xrep_rtrmap_scan_ag(
+ struct xrep_rtrmap *rr,
+ struct xfs_perag *pag)
+{
+ struct xfs_scrub *sc = rr->sc;
+ int error;
+
+ error = xrep_ag_init(sc, pag, &sc->sa);
+ if (error)
+ return error;
+
+ error = xfs_rmap_query_all(sc->sa.rmap_cur, xrep_rtrmap_walk_rmap, rr);
+ xchk_ag_free(sc, &sc->sa);
+ return error;
+}
+
+struct xrep_rtrmap_stash_run {
+ struct xrep_rtrmap *rr;
+ uint64_t owner;
+};
+
+static int
+xrep_rtrmap_stash_run(
+ uint32_t start,
+ uint32_t len,
+ void *priv)
+{
+ struct xrep_rtrmap_stash_run *rsr = priv;
+ struct xrep_rtrmap *rr = rsr->rr;
+ xfs_rgblock_t rgbno = start;
+
+ return xrep_rtrmap_stash(rr, rgbno, len, rsr->owner, 0, 0);
+}
+
+/*
+ * Emit rmaps for every extent of bits set in the bitmap. Caller must ensure
+ * that the ranges are in units of FS blocks.
+ */
+STATIC int
+xrep_rtrmap_stash_bitmap(
+ struct xrep_rtrmap *rr,
+ struct xrgb_bitmap *bitmap,
+ const struct xfs_owner_info *oinfo)
+{
+ struct xrep_rtrmap_stash_run rsr = {
+ .rr = rr,
+ .owner = oinfo->oi_owner,
+ };
+
+ return xrgb_bitmap_walk(bitmap, xrep_rtrmap_stash_run, &rsr);
+}
+
+/* Record a CoW staging extent. */
+STATIC int
+xrep_rtrmap_walk_cowblocks(
+ struct xfs_btree_cur *cur,
+ const struct xfs_refcount_irec *irec,
+ void *priv)
+{
+ struct xrgb_bitmap *bitmap = priv;
+
+ if (!xfs_refcount_check_domain(irec) ||
+ irec->rc_domain != XFS_REFC_DOMAIN_COW)
+ return -EFSCORRUPTED;
+
+ return xrgb_bitmap_set(bitmap, irec->rc_startblock,
+ irec->rc_blockcount);
+}
+
+/*
+ * Collect rmaps for the blocks containing the refcount btree, and all CoW
+ * staging extents.
+ */
+STATIC int
+xrep_rtrmap_find_refcount_rmaps(
+ struct xrep_rtrmap *rr)
+{
+ struct xrgb_bitmap cow_blocks; /* COWBIT */
+ struct xfs_refcount_irec low = {
+ .rc_startblock = 0,
+ .rc_domain = XFS_REFC_DOMAIN_COW,
+ };
+ struct xfs_refcount_irec high = {
+ .rc_startblock = -1U,
+ .rc_domain = XFS_REFC_DOMAIN_COW,
+ };
+ struct xfs_scrub *sc = rr->sc;
+ int error;
+
+ if (!xfs_has_rtreflink(sc->mp))
+ return 0;
+
+ xrgb_bitmap_init(&cow_blocks);
+
+ /* Collect rmaps for CoW staging extents. */
+ error = xfs_refcount_query_range(sc->sr.refc_cur, &low, &high,
+ xrep_rtrmap_walk_cowblocks, &cow_blocks);
+ if (error)
+ goto out_bitmap;
+
+ /* Generate rmaps for everything. */
+ error = xrep_rtrmap_stash_bitmap(rr, &cow_blocks, &XFS_RMAP_OINFO_COW);
+ if (error)
+ goto out_bitmap;
+
+out_bitmap:
+ xrgb_bitmap_destroy(&cow_blocks);
+ return error;
+}
+
+/* Count and check all collected records. */
+STATIC int
+xrep_rtrmap_check_record(
+ struct xfs_btree_cur *cur,
+ const struct xfs_rmap_irec *rec,
+ void *priv)
+{
+ struct xrep_rtrmap *rr = priv;
+ int error;
+
+ error = xrep_rtrmap_check_mapping(rr->sc, rec);
+ if (error)
+ return error;
+
+ rr->nr_records++;
+ return 0;
+}
+
+/* Generate all the reverse-mappings for the realtime device. */
+STATIC int
+xrep_rtrmap_find_rmaps(
+ struct xrep_rtrmap *rr)
+{
+ struct xfs_scrub *sc = rr->sc;
+ struct xfs_perag *pag = NULL;
+ struct xfs_inode *ip;
+ struct xfs_btree_cur *mcur;
+ int error;
+
+ /* Generate rmaps for the realtime superblock */
+ if (xfs_has_rtsb(sc->mp) && rtg_rgno(rr->sc->sr.rtg) == 0) {
+ error = xrep_rtrmap_stash(rr, 0, sc->mp->m_sb.sb_rextsize,
+ XFS_RMAP_OWN_FS, 0, 0);
+ if (error)
+ return error;
+ }
+
+ /* Find CoW staging extents. */
+ xrep_rtgroup_btcur_init(sc, &sc->sr);
+ error = xrep_rtrmap_find_refcount_rmaps(rr);
+ xchk_rtgroup_btcur_free(&sc->sr);
+ if (error)
+ return error;
+
+ /*
+ * Set up for a potentially lengthy filesystem scan by reducing our
+ * transaction resource usage for the duration. Specifically:
+ *
+ * Unlock the realtime metadata inodes and cancel the transaction to
+ * release the log grant space while we scan the filesystem.
+ *
+ * Create a new empty transaction to eliminate the possibility of the
+ * inode scan deadlocking on cyclical metadata.
+ *
+ * We pass the empty transaction to the file scanning function to avoid
+ * repeatedly cycling empty transactions. This can be done even though
+ * we take the IOLOCK to quiesce the file because empty transactions
+ * do not take sb_internal.
+ */
+ xchk_trans_cancel(sc);
+ xchk_rtgroup_unlock(&sc->sr);
+ error = xchk_trans_alloc_empty(sc);
+ if (error)
+ return error;
+
+ while ((error = xchk_iscan_iter(&rr->iscan, &ip)) == 1) {
+ error = xrep_rtrmap_scan_inode(rr, ip);
+ xchk_irele(sc, ip);
+ if (error)
+ break;
+
+ if (xchk_should_terminate(sc, &error))
+ break;
+ }
+ xchk_iscan_iter_finish(&rr->iscan);
+ if (error)
+ return error;
+
+ /*
+ * Switch out for a real transaction and lock the RT metadata in
+ * preparation for building a new tree.
+ */
+ xchk_trans_cancel(sc);
+ error = xchk_setup_rt(sc);
+ if (error)
+ return error;
+ error = xchk_rtgroup_lock(sc, &sc->sr, XCHK_RTGLOCK_ALL);
+ if (error)
+ return error;
+
+ /*
+ * If a hook failed to update the in-memory btree, we lack the data to
+ * continue the repair.
+ */
+ if (xchk_iscan_aborted(&rr->iscan))
+ return -EFSCORRUPTED;
+
+ /* Scan for old rtrmap blocks. */
+ while ((pag = xfs_perag_next(sc->mp, pag))) {
+ error = xrep_rtrmap_scan_ag(rr, pag);
+ if (error) {
+ xfs_perag_rele(pag);
+ return error;
+ }
+ }
+
+ /*
+ * Now that we have everything locked again, we need to count the
+ * number of rmap records stashed in the btree. This should reflect
+ * all actively-owned rt files in the filesystem. At the same time,
+ * check all our records before we start building a new btree, which
+ * requires the rtbitmap lock.
+ */
+ mcur = xfs_rtrmapbt_mem_cursor(rr->sc->sr.rtg, NULL, &rr->rtrmap_btree);
+ rr->nr_records = 0;
+ error = xfs_rmap_query_all(mcur, xrep_rtrmap_check_record, rr);
+ xfs_btree_del_cursor(mcur, error);
+
+ return error;
+}
+
+/* Building the new rtrmap btree. */
+
+/* Retrieve rtrmapbt data for bulk load. */
+STATIC int
+xrep_rtrmap_get_records(
+ struct xfs_btree_cur *cur,
+ unsigned int idx,
+ struct xfs_btree_block *block,
+ unsigned int nr_wanted,
+ void *priv)
+{
+ struct xrep_rtrmap *rr = priv;
+ union xfs_btree_rec *block_rec;
+ unsigned int loaded;
+ int error;
+
+ for (loaded = 0; loaded < nr_wanted; loaded++, idx++) {
+ int stat = 0;
+
+ error = xfs_btree_increment(rr->mcur, 0, &stat);
+ if (error)
+ return error;
+ if (!stat)
+ return -EFSCORRUPTED;
+
+ error = xfs_rmap_get_rec(rr->mcur, &cur->bc_rec.r, &stat);
+ if (error)
+ return error;
+ if (!stat)
+ return -EFSCORRUPTED;
+
+ block_rec = xfs_btree_rec_addr(cur, idx, block);
+ cur->bc_ops->init_rec_from_cur(cur, block_rec);
+ }
+
+ return loaded;
+}
+
+/* Feed one of the new btree blocks to the bulk loader. */
+STATIC int
+xrep_rtrmap_claim_block(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *ptr,
+ void *priv)
+{
+ struct xrep_rtrmap *rr = priv;
+
+ return xrep_newbt_claim_block(cur, &rr->new_btree, ptr);
+}
+
+/* Figure out how much space we need to create the incore btree root block. */
+STATIC size_t
+xrep_rtrmap_iroot_size(
+ struct xfs_btree_cur *cur,
+ unsigned int level,
+ unsigned int nr_this_level,
+ void *priv)
+{
+ return xfs_rtrmap_broot_space_calc(cur->bc_mp, level, nr_this_level);
+}
+
+/*
+ * Use the collected rmap information to stage a new rmap btree. If this is
+ * successful we'll return with the new btree root information logged to the
+ * repair transaction but not yet committed. This implements section (III)
+ * above.
+ */
+STATIC int
+xrep_rtrmap_build_new_tree(
+ struct xrep_rtrmap *rr)
+{
+ struct xfs_scrub *sc = rr->sc;
+ struct xfs_rtgroup *rtg = sc->sr.rtg;
+ struct xfs_btree_cur *rmap_cur;
+ int error;
+
+ /*
+ * Prepare to construct the new btree by reserving disk space for the
+ * new btree and setting up all the accounting information we'll need
+ * to root the new btree while it's under construction and before we
+ * attach it to the realtime rmapbt inode.
+ */
+ error = xrep_newbt_init_metadir_inode(&rr->new_btree, sc);
+ if (error)
+ return error;
+
+ rr->new_btree.bload.get_records = xrep_rtrmap_get_records;
+ rr->new_btree.bload.claim_block = xrep_rtrmap_claim_block;
+ rr->new_btree.bload.iroot_size = xrep_rtrmap_iroot_size;
+
+ rmap_cur = xfs_rtrmapbt_init_cursor(NULL, rtg);
+ xfs_btree_stage_ifakeroot(rmap_cur, &rr->new_btree.ifake);
+
+ /* Compute how many blocks we'll need for the rmaps collected. */
+ error = xfs_btree_bload_compute_geometry(rmap_cur,
+ &rr->new_btree.bload, rr->nr_records);
+ if (error)
+ goto err_cur;
+
+ /* Last chance to abort before we start committing fixes. */
+ if (xchk_should_terminate(sc, &error))
+ goto err_cur;
+
+ /*
+ * Guess how many blocks we're going to need to rebuild an entire
+ * rtrmapbt from the number of extents we found, and pump up our
+ * transaction to have sufficient block reservation. We're allowed
+ * to exceed quota to repair inconsistent metadata, though this is
+ * unlikely.
+ */
+ error = xfs_trans_reserve_more_inode(sc->tp, rtg_rmap(rtg),
+ rr->new_btree.bload.nr_blocks, 0, true);
+ if (error)
+ goto err_cur;
+
+ /* Reserve the space we'll need for the new btree. */
+ error = xrep_newbt_alloc_blocks(&rr->new_btree,
+ rr->new_btree.bload.nr_blocks);
+ if (error)
+ goto err_cur;
+
+ /*
+ * Create a cursor to the in-memory btree so that we can bulk load the
+ * new btree.
+ */
+ rr->mcur = xfs_rtrmapbt_mem_cursor(sc->sr.rtg, NULL, &rr->rtrmap_btree);
+ error = xfs_btree_goto_left_edge(rr->mcur);
+ if (error)
+ goto err_mcur;
+
+ /* Add all observed rmap records. */
+ rr->new_btree.ifake.if_fork->if_format = XFS_DINODE_FMT_META_BTREE;
+ error = xfs_btree_bload(rmap_cur, &rr->new_btree.bload, rr);
+ if (error)
+ goto err_mcur;
+
+ /*
+ * Install the new rtrmap btree in the inode. After this point the old
+ * btree is no longer accessible, the new tree is live, and we can
+ * delete the cursor.
+ */
+ xfs_rtrmapbt_commit_staged_btree(rmap_cur, sc->tp);
+ xrep_inode_set_nblocks(rr->sc, rr->new_btree.ifake.if_blocks);
+ xfs_btree_del_cursor(rmap_cur, 0);
+ xfs_btree_del_cursor(rr->mcur, 0);
+ rr->mcur = NULL;
+
+ /*
+ * Now that we've written the new btree to disk, we don't need to keep
+ * updating the in-memory btree. Abort the scan to stop live updates.
+ */
+ xchk_iscan_abort(&rr->iscan);
+
+ /* Dispose of any unused blocks and the accounting information. */
+ error = xrep_newbt_commit(&rr->new_btree);
+ if (error)
+ return error;
+
+ return xrep_roll_trans(sc);
+
+err_mcur:
+ xfs_btree_del_cursor(rr->mcur, error);
+err_cur:
+ xfs_btree_del_cursor(rmap_cur, error);
+ xrep_newbt_cancel(&rr->new_btree);
+ return error;
+}
+
+/* Reaping the old btree. */
+
+/* Reap the old rtrmapbt blocks. */
+STATIC int
+xrep_rtrmap_remove_old_tree(
+ struct xrep_rtrmap *rr)
+{
+ int error;
+
+ /*
+ * Free all the extents that were allocated to the former rtrmapbt and
+ * aren't cross-linked with something else.
+ */
+ error = xrep_reap_metadir_fsblocks(rr->sc, &rr->old_rtrmapbt_blocks);
+ if (error)
+ return error;
+
+ /*
+ * Ensure the proper reservation for the rtrmap inode so that we don't
+ * fail to expand the new btree.
+ */
+ return xrep_reset_metafile_resv(rr->sc);
+}
+
+static inline bool
+xrep_rtrmapbt_want_live_update(
+ struct xchk_iscan *iscan,
+ const struct xfs_owner_info *oi)
+{
+ if (xchk_iscan_aborted(iscan))
+ return false;
+
+ /*
+ * We scanned the CoW staging extents before we started the iscan, so
+ * we need all the updates.
+ */
+ if (XFS_RMAP_NON_INODE_OWNER(oi->oi_owner))
+ return true;
+
+ /* Ignore updates to files that the scanner hasn't visited yet. */
+ return xchk_iscan_want_live_update(iscan, oi->oi_owner);
+}
+
+/*
+ * Apply a rtrmapbt update from the regular filesystem into our shadow btree.
+ * We're running from the thread that owns the rtrmap ILOCK and is generating
+ * the update, so we must be careful about which parts of the struct
+ * xrep_rtrmap that we change.
+ */
+static int
+xrep_rtrmapbt_live_update(
+ struct notifier_block *nb,
+ unsigned long action,
+ void *data)
+{
+ struct xfs_rmap_update_params *p = data;
+ struct xrep_rtrmap *rr;
+ struct xfs_mount *mp;
+ struct xfs_btree_cur *mcur;
+ struct xfs_trans *tp;
+ void *txcookie;
+ int error;
+
+ rr = container_of(nb, struct xrep_rtrmap, rhook.rmap_hook.nb);
+ mp = rr->sc->mp;
+
+ if (!xrep_rtrmapbt_want_live_update(&rr->iscan, &p->oinfo))
+ goto out_unlock;
+
+ trace_xrep_rmap_live_update(rtg_group(rr->sc->sr.rtg), action, p);
+
+ error = xrep_trans_alloc_hook_dummy(mp, &txcookie, &tp);
+ if (error)
+ goto out_abort;
+
+ mutex_lock(&rr->lock);
+ mcur = xfs_rtrmapbt_mem_cursor(rr->sc->sr.rtg, tp, &rr->rtrmap_btree);
+ error = __xfs_rmap_finish_intent(mcur, action, p->startblock,
+ p->blockcount, &p->oinfo, p->unwritten);
+ xfs_btree_del_cursor(mcur, error);
+ if (error)
+ goto out_cancel;
+
+ error = xfbtree_trans_commit(&rr->rtrmap_btree, tp);
+ if (error)
+ goto out_cancel;
+
+ xrep_trans_cancel_hook_dummy(&txcookie, tp);
+ mutex_unlock(&rr->lock);
+ return NOTIFY_DONE;
+
+out_cancel:
+ xfbtree_trans_cancel(&rr->rtrmap_btree, tp);
+ xrep_trans_cancel_hook_dummy(&txcookie, tp);
+out_abort:
+ xchk_iscan_abort(&rr->iscan);
+ mutex_unlock(&rr->lock);
+out_unlock:
+ return NOTIFY_DONE;
+}
+
+/* Set up the filesystem scan components. */
+STATIC int
+xrep_rtrmap_setup_scan(
+ struct xrep_rtrmap *rr)
+{
+ struct xfs_scrub *sc = rr->sc;
+ int error;
+
+ mutex_init(&rr->lock);
+ xfsb_bitmap_init(&rr->old_rtrmapbt_blocks);
+
+ /* Set up some storage */
+ error = xfs_rtrmapbt_mem_init(sc->mp, &rr->rtrmap_btree, sc->xmbtp,
+ rtg_rgno(sc->sr.rtg));
+ if (error)
+ goto out_bitmap;
+
+ /* Retry iget every tenth of a second for up to 30 seconds. */
+ xchk_iscan_start(sc, 30000, 100, &rr->iscan);
+
+ /*
+ * Hook into live rtrmap operations so that we can update our in-memory
+ * btree to reflect live changes on the filesystem. Since we drop the
+ * rtrmap ILOCK to scan all the inodes, we need this piece to avoid
+ * installing a stale btree.
+ */
+ ASSERT(sc->flags & XCHK_FSGATES_RMAP);
+ xfs_rmap_hook_setup(&rr->rhook, xrep_rtrmapbt_live_update);
+ error = xfs_rmap_hook_add(rtg_group(sc->sr.rtg), &rr->rhook);
+ if (error)
+ goto out_iscan;
+ return 0;
+
+out_iscan:
+ xchk_iscan_teardown(&rr->iscan);
+ xfbtree_destroy(&rr->rtrmap_btree);
+out_bitmap:
+ xfsb_bitmap_destroy(&rr->old_rtrmapbt_blocks);
+ mutex_destroy(&rr->lock);
+ return error;
+}
+
+/* Tear down scan components. */
+STATIC void
+xrep_rtrmap_teardown(
+ struct xrep_rtrmap *rr)
+{
+ struct xfs_scrub *sc = rr->sc;
+
+ xchk_iscan_abort(&rr->iscan);
+ xfs_rmap_hook_del(rtg_group(sc->sr.rtg), &rr->rhook);
+ xchk_iscan_teardown(&rr->iscan);
+ xfbtree_destroy(&rr->rtrmap_btree);
+ xfsb_bitmap_destroy(&rr->old_rtrmapbt_blocks);
+ mutex_destroy(&rr->lock);
+}
+
+/* Repair the realtime rmap btree. */
+int
+xrep_rtrmapbt(
+ struct xfs_scrub *sc)
+{
+ struct xrep_rtrmap *rr = sc->buf;
+ int error;
+
+ /* Make sure any problems with the fork are fixed. */
+ error = xrep_metadata_inode_forks(sc);
+ if (error)
+ return error;
+
+ error = xrep_rtrmap_setup_scan(rr);
+ if (error)
+ return error;
+
+ /* Collect rmaps for realtime files. */
+ error = xrep_rtrmap_find_rmaps(rr);
+ if (error)
+ goto out_records;
+
+ xfs_trans_ijoin(sc->tp, sc->ip, 0);
+
+ /* Rebuild the rtrmap information. */
+ error = xrep_rtrmap_build_new_tree(rr);
+ if (error)
+ goto out_records;
+
+ /* Kill the old tree. */
+ error = xrep_rtrmap_remove_old_tree(rr);
+ if (error)
+ goto out_records;
+
+out_records:
+ xrep_rtrmap_teardown(rr);
+ return error;
+}
diff --git a/fs/xfs/scrub/rtsummary.c b/fs/xfs/scrub/rtsummary.c
index 49fc6250bafc..4ac679c1bd29 100644
--- a/fs/xfs/scrub/rtsummary.c
+++ b/fs/xfs/scrub/rtsummary.c
@@ -81,8 +81,7 @@ xchk_setup_rtsummary(
if (error)
return error;
- error = xchk_install_live_inode(sc,
- sc->sr.rtg->rtg_inodes[XFS_RTGI_SUMMARY]);
+ error = xchk_install_live_inode(sc, rtg_summary(sc->sr.rtg));
if (error)
return error;
@@ -90,6 +89,10 @@ xchk_setup_rtsummary(
if (error)
return error;
+ error = xchk_rtgroup_lock(sc, &sc->sr, XFS_RTGLOCK_BITMAP);
+ if (error)
+ return error;
+
/*
* Now that we've locked the rtbitmap and rtsummary, we can't race with
* growfsrt trying to expand the summary or change the size of the rt
@@ -100,7 +103,6 @@ xchk_setup_rtsummary(
* exclusively here. If we ever start caring about running concurrent
* fsmap with scrub this could be changed.
*/
- xchk_rtgroup_lock(&sc->sr, XFS_RTGLOCK_BITMAP);
if (mp->m_sb.sb_rblocks) {
rts->rextents = xfs_blen_to_rtbxlen(mp, mp->m_sb.sb_rblocks);
rts->rbmblocks = xfs_rtbitmap_blockcount(mp);
@@ -191,8 +193,7 @@ xchk_rtsum_record_free(
rtlen = xfs_rtxlen_to_extlen(mp, rec->ar_extcount);
if (!xfs_verify_rtbext(mp, rtbno, rtlen)) {
- xchk_ino_xref_set_corrupt(sc,
- rtg->rtg_inodes[XFS_RTGI_BITMAP]->i_ino);
+ xchk_ino_xref_set_corrupt(sc, rtg_bitmap(rtg)->i_ino);
return -EFSCORRUPTED;
}
@@ -218,7 +219,7 @@ xchk_rtsum_compute(
/* If the bitmap size doesn't match the computed size, bail. */
if (XFS_FSB_TO_B(mp, xfs_rtbitmap_blockcount(mp)) !=
- rtg->rtg_inodes[XFS_RTGI_BITMAP]->i_disk_size)
+ rtg_bitmap(rtg)->i_disk_size)
return -EFSCORRUPTED;
return xfs_rtalloc_query_all(rtg, sc->tp, xchk_rtsum_record_free, sc);
@@ -310,8 +311,8 @@ xchk_rtsummary(
{
struct xfs_mount *mp = sc->mp;
struct xfs_rtgroup *rtg = sc->sr.rtg;
- struct xfs_inode *rbmip = rtg->rtg_inodes[XFS_RTGI_BITMAP];
- struct xfs_inode *rsumip = rtg->rtg_inodes[XFS_RTGI_SUMMARY];
+ struct xfs_inode *rbmip = rtg_bitmap(rtg);
+ struct xfs_inode *rsumip = rtg_summary(rtg);
struct xchk_rtsummary *rts = sc->buf;
int error;
diff --git a/fs/xfs/scrub/rtsummary_repair.c b/fs/xfs/scrub/rtsummary_repair.c
index 8198ea84ad70..d593977d70df 100644
--- a/fs/xfs/scrub/rtsummary_repair.c
+++ b/fs/xfs/scrub/rtsummary_repair.c
@@ -165,7 +165,8 @@ xrep_rtsummary(
* Now exchange the contents. Nothing in repair uses the temporary
* buffer, so we can reuse it for the tempfile exchrange information.
*/
- error = xrep_tempexch_trans_reserve(sc, XFS_DATA_FORK, &rts->tempexch);
+ error = xrep_tempexch_trans_reserve(sc, XFS_DATA_FORK, 0,
+ rts->rsumblocks, &rts->tempexch);
if (error)
return error;
diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c
index 950f5a58dcd9..7567dd5cad14 100644
--- a/fs/xfs/scrub/scrub.c
+++ b/fs/xfs/scrub/scrub.c
@@ -164,7 +164,7 @@ xchk_fsgates_disable(
trace_xchk_fsgates_disable(sc, sc->flags & XCHK_FSGATES_ALL);
if (sc->flags & XCHK_FSGATES_DRAIN)
- xfs_drain_wait_disable();
+ xfs_defer_drain_wait_disable();
if (sc->flags & XCHK_FSGATES_QUOTA)
xfs_dqtrx_hook_disable();
@@ -218,6 +218,8 @@ xchk_teardown(
int error)
{
xchk_ag_free(sc, &sc->sa);
+ xchk_rtgroup_btcur_free(&sc->sr);
+
if (sc->tp) {
if (error == 0 && (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR))
error = xfs_trans_commit(sc->tp);
@@ -458,6 +460,20 @@ static const struct xchk_meta_ops meta_scrub_ops[] = {
.has = xfs_has_rtsb,
.repair = xrep_rgsuperblock,
},
+ [XFS_SCRUB_TYPE_RTRMAPBT] = { /* realtime group rmapbt */
+ .type = ST_RTGROUP,
+ .setup = xchk_setup_rtrmapbt,
+ .scrub = xchk_rtrmapbt,
+ .has = xfs_has_rtrmapbt,
+ .repair = xrep_rtrmapbt,
+ },
+ [XFS_SCRUB_TYPE_RTREFCBT] = { /* realtime refcountbt */
+ .type = ST_RTGROUP,
+ .setup = xchk_setup_rtrefcountbt,
+ .scrub = xchk_rtrefcountbt,
+ .has = xfs_has_rtreflink,
+ .repair = xrep_rtrefcountbt,
+ },
};
static int
diff --git a/fs/xfs/scrub/scrub.h b/fs/xfs/scrub/scrub.h
index 5dbbe93cb49b..a3f1abc91390 100644
--- a/fs/xfs/scrub/scrub.h
+++ b/fs/xfs/scrub/scrub.h
@@ -96,7 +96,7 @@ struct xchk_meta_ops {
int (*repair_eval)(struct xfs_scrub *sc);
/* Decide if we even have this piece of metadata. */
- bool (*has)(struct xfs_mount *);
+ bool (*has)(const struct xfs_mount *);
/* type describing required/allowed inputs */
enum xchk_type type;
@@ -126,6 +126,10 @@ struct xchk_rt {
/* XFS_RTGLOCK_* lock state if locked */
unsigned int rtlock_flags;
+
+ /* rtgroup btrees */
+ struct xfs_btree_cur *rmap_cur;
+ struct xfs_btree_cur *refc_cur;
};
struct xfs_scrub {
@@ -280,10 +284,14 @@ int xchk_metapath(struct xfs_scrub *sc);
int xchk_rtbitmap(struct xfs_scrub *sc);
int xchk_rtsummary(struct xfs_scrub *sc);
int xchk_rgsuperblock(struct xfs_scrub *sc);
+int xchk_rtrmapbt(struct xfs_scrub *sc);
+int xchk_rtrefcountbt(struct xfs_scrub *sc);
#else
# define xchk_rtbitmap xchk_nothing
# define xchk_rtsummary xchk_nothing
# define xchk_rgsuperblock xchk_nothing
+# define xchk_rtrmapbt xchk_nothing
+# define xchk_rtrefcountbt xchk_nothing
#endif
#ifdef CONFIG_XFS_QUOTA
int xchk_quota(struct xfs_scrub *sc);
@@ -317,8 +325,26 @@ void xchk_xref_is_not_cow_staging(struct xfs_scrub *sc, xfs_agblock_t bno,
#ifdef CONFIG_XFS_RT
void xchk_xref_is_used_rt_space(struct xfs_scrub *sc, xfs_rtblock_t rtbno,
xfs_extlen_t len);
+void xchk_xref_has_no_rt_owner(struct xfs_scrub *sc, xfs_rgblock_t rgbno,
+ xfs_extlen_t len);
+void xchk_xref_has_rt_owner(struct xfs_scrub *sc, xfs_rgblock_t rgbno,
+ xfs_extlen_t len);
+void xchk_xref_is_only_rt_owned_by(struct xfs_scrub *sc, xfs_rgblock_t rgbno,
+ xfs_extlen_t len, const struct xfs_owner_info *oinfo);
+void xchk_xref_is_rt_cow_staging(struct xfs_scrub *sc, xfs_rgblock_t rgbno,
+ xfs_extlen_t len);
+void xchk_xref_is_not_rt_shared(struct xfs_scrub *sc, xfs_rgblock_t rgbno,
+ xfs_extlen_t len);
+void xchk_xref_is_not_rt_cow_staging(struct xfs_scrub *sc, xfs_rgblock_t rgbno,
+ xfs_extlen_t len);
#else
# define xchk_xref_is_used_rt_space(sc, rtbno, len) do { } while (0)
+# define xchk_xref_has_no_rt_owner(sc, rtbno, len) do { } while (0)
+# define xchk_xref_has_rt_owner(sc, rtbno, len) do { } while (0)
+# define xchk_xref_is_only_rt_owned_by(sc, bno, len, oinfo) do { } while (0)
+# define xchk_xref_is_rt_cow_staging(sc, bno, len) do { } while (0)
+# define xchk_xref_is_not_rt_shared(sc, bno, len) do { } while (0)
+# define xchk_xref_is_not_rt_cow_staging(sc, bno, len) do { } while (0)
#endif
#endif /* __XFS_SCRUB_SCRUB_H__ */
diff --git a/fs/xfs/scrub/stats.c b/fs/xfs/scrub/stats.c
index a476c7b2ab75..f8a37ea97791 100644
--- a/fs/xfs/scrub/stats.c
+++ b/fs/xfs/scrub/stats.c
@@ -82,6 +82,8 @@ static const char *name_map[XFS_SCRUB_TYPE_NR] = {
[XFS_SCRUB_TYPE_DIRTREE] = "dirtree",
[XFS_SCRUB_TYPE_METAPATH] = "metapath",
[XFS_SCRUB_TYPE_RGSUPER] = "rgsuper",
+ [XFS_SCRUB_TYPE_RTRMAPBT] = "rtrmapbt",
+ [XFS_SCRUB_TYPE_RTREFCBT] = "rtrefcountbt",
};
/* Format the scrub stats into a text buffer, similar to pcp style. */
diff --git a/fs/xfs/scrub/tempexch.h b/fs/xfs/scrub/tempexch.h
index 995ba187c5aa..eccda720c2ca 100644
--- a/fs/xfs/scrub/tempexch.h
+++ b/fs/xfs/scrub/tempexch.h
@@ -12,7 +12,7 @@ struct xrep_tempexch {
};
int xrep_tempexch_trans_reserve(struct xfs_scrub *sc, int whichfork,
- struct xrep_tempexch *ti);
+ xfs_fileoff_t off, xfs_filblks_t len, struct xrep_tempexch *ti);
int xrep_tempexch_trans_alloc(struct xfs_scrub *sc, int whichfork,
struct xrep_tempexch *ti);
diff --git a/fs/xfs/scrub/tempfile.c b/fs/xfs/scrub/tempfile.c
index 2d7ca7e1bbca..cf99e0ca51b0 100644
--- a/fs/xfs/scrub/tempfile.c
+++ b/fs/xfs/scrub/tempfile.c
@@ -606,6 +606,8 @@ STATIC int
xrep_tempexch_prep_request(
struct xfs_scrub *sc,
int whichfork,
+ xfs_fileoff_t off,
+ xfs_filblks_t len,
struct xrep_tempexch *tx)
{
struct xfs_exchmaps_req *req = &tx->req;
@@ -629,18 +631,19 @@ xrep_tempexch_prep_request(
/* Exchange all mappings in both forks. */
req->ip1 = sc->tempip;
req->ip2 = sc->ip;
- req->startoff1 = 0;
- req->startoff2 = 0;
+ req->startoff1 = off;
+ req->startoff2 = off;
switch (whichfork) {
case XFS_ATTR_FORK:
req->flags |= XFS_EXCHMAPS_ATTR_FORK;
break;
case XFS_DATA_FORK:
- /* Always exchange sizes when exchanging data fork mappings. */
- req->flags |= XFS_EXCHMAPS_SET_SIZES;
+ /* Exchange sizes when exchanging all data fork mappings. */
+ if (off == 0 && len == XFS_MAX_FILEOFF)
+ req->flags |= XFS_EXCHMAPS_SET_SIZES;
break;
}
- req->blockcount = XFS_MAX_FILEOFF;
+ req->blockcount = len;
return 0;
}
@@ -749,6 +752,7 @@ xrep_tempexch_reserve_quota(
* or the two inodes have the same dquots.
*/
if (!XFS_IS_QUOTA_ON(tp->t_mountp) || req->ip1 == req->ip2 ||
+ xfs_is_metadir_inode(req->ip1) ||
(req->ip1->i_udquot == req->ip2->i_udquot &&
req->ip1->i_gdquot == req->ip2->i_gdquot &&
req->ip1->i_pdquot == req->ip2->i_pdquot))
@@ -795,6 +799,8 @@ int
xrep_tempexch_trans_reserve(
struct xfs_scrub *sc,
int whichfork,
+ xfs_fileoff_t off,
+ xfs_filblks_t len,
struct xrep_tempexch *tx)
{
int error;
@@ -803,7 +809,7 @@ xrep_tempexch_trans_reserve(
xfs_assert_ilocked(sc->ip, XFS_ILOCK_EXCL);
xfs_assert_ilocked(sc->tempip, XFS_ILOCK_EXCL);
- error = xrep_tempexch_prep_request(sc, whichfork, tx);
+ error = xrep_tempexch_prep_request(sc, whichfork, off, len, tx);
if (error)
return error;
@@ -841,7 +847,8 @@ xrep_tempexch_trans_alloc(
ASSERT(sc->tp == NULL);
ASSERT(xfs_has_exchange_range(sc->mp));
- error = xrep_tempexch_prep_request(sc, whichfork, tx);
+ error = xrep_tempexch_prep_request(sc, whichfork, 0, XFS_MAX_FILEOFF,
+ tx);
if (error)
return error;
diff --git a/fs/xfs/scrub/trace.c b/fs/xfs/scrub/trace.c
index 98f923ae664d..2450e214103f 100644
--- a/fs/xfs/scrub/trace.c
+++ b/fs/xfs/scrub/trace.c
@@ -21,6 +21,7 @@
#include "xfs_rmap.h"
#include "xfs_parent.h"
#include "xfs_metafile.h"
+#include "xfs_rtgroup.h"
#include "scrub/scrub.h"
#include "scrub/xfile.h"
#include "scrub/xfarray.h"
diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h
index d2ae7e93acb0..d7c4ced47c15 100644
--- a/fs/xfs/scrub/trace.h
+++ b/fs/xfs/scrub/trace.h
@@ -17,6 +17,7 @@
#include "xfs_bit.h"
#include "xfs_quota_defs.h"
+struct xfs_rtgroup;
struct xfs_scrub;
struct xfile;
struct xfarray;
@@ -40,6 +41,9 @@ struct xchk_dirtree_outcomes;
TRACE_DEFINE_ENUM(XFS_REFC_DOMAIN_SHARED);
TRACE_DEFINE_ENUM(XFS_REFC_DOMAIN_COW);
+TRACE_DEFINE_ENUM(XG_TYPE_AG);
+TRACE_DEFINE_ENUM(XG_TYPE_RTG);
+
TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_PROBE);
TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_SB);
TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_AGF);
@@ -72,6 +76,8 @@ TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_DIRTREE);
TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_BARRIER);
TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_METAPATH);
TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_RGSUPER);
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_RTRMAPBT);
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_RTREFCBT);
#define XFS_SCRUB_TYPE_STRINGS \
{ XFS_SCRUB_TYPE_PROBE, "probe" }, \
@@ -105,7 +111,9 @@ TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_RGSUPER);
{ XFS_SCRUB_TYPE_DIRTREE, "dirtree" }, \
{ XFS_SCRUB_TYPE_BARRIER, "barrier" }, \
{ XFS_SCRUB_TYPE_METAPATH, "metapath" }, \
- { XFS_SCRUB_TYPE_RGSUPER, "rgsuper" }
+ { XFS_SCRUB_TYPE_RGSUPER, "rgsuper" }, \
+ { XFS_SCRUB_TYPE_RTRMAPBT, "rtrmapbt" }, \
+ { XFS_SCRUB_TYPE_RTREFCBT, "rtrefcountbt" }
#define XFS_SCRUB_FLAG_STRINGS \
{ XFS_SCRUB_IFLAG_REPAIR, "repair" }, \
@@ -1956,32 +1964,36 @@ DEFINE_XCHK_METAPATH_EVENT(xchk_metapath_lookup);
#if IS_ENABLED(CONFIG_XFS_ONLINE_REPAIR)
DECLARE_EVENT_CLASS(xrep_extent_class,
- TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno,
+ TP_PROTO(const struct xfs_group *xg, xfs_agblock_t agbno,
xfs_extlen_t len),
- TP_ARGS(pag, agbno, len),
+ TP_ARGS(xg, agbno, len),
TP_STRUCT__entry(
__field(dev_t, dev)
+ __field(enum xfs_group_type, type)
__field(xfs_agnumber_t, agno)
__field(xfs_agblock_t, agbno)
__field(xfs_extlen_t, len)
),
TP_fast_assign(
- __entry->dev = pag_mount(pag)->m_super->s_dev;
- __entry->agno = pag_agno(pag);
+ __entry->dev = xg->xg_mount->m_super->s_dev;
+ __entry->type = xg->xg_type;
+ __entry->agno = xg->xg_gno;
__entry->agbno = agbno;
__entry->len = len;
),
- TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x",
+ TP_printk("dev %d:%d %sno 0x%x %sbno 0x%x fsbcount 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->agno,
+ __print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->agbno,
__entry->len)
);
#define DEFINE_REPAIR_EXTENT_EVENT(name) \
DEFINE_EVENT(xrep_extent_class, name, \
- TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno, \
+ TP_PROTO(const struct xfs_group *xg, xfs_agblock_t agbno, \
xfs_extlen_t len), \
- TP_ARGS(pag, agbno, len))
+ TP_ARGS(xg, agbno, len))
DEFINE_REPAIR_EXTENT_EVENT(xreap_dispose_unmap_extent);
DEFINE_REPAIR_EXTENT_EVENT(xreap_dispose_free_extent);
DEFINE_REPAIR_EXTENT_EVENT(xreap_agextent_binval);
@@ -1989,35 +2001,39 @@ DEFINE_REPAIR_EXTENT_EVENT(xreap_bmapi_binval);
DEFINE_REPAIR_EXTENT_EVENT(xrep_agfl_insert);
DECLARE_EVENT_CLASS(xrep_reap_find_class,
- TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno,
+ TP_PROTO(const struct xfs_group *xg, xfs_agblock_t agbno,
xfs_extlen_t len, bool crosslinked),
- TP_ARGS(pag, agbno, len, crosslinked),
+ TP_ARGS(xg, agbno, len, crosslinked),
TP_STRUCT__entry(
__field(dev_t, dev)
+ __field(enum xfs_group_type, type)
__field(xfs_agnumber_t, agno)
__field(xfs_agblock_t, agbno)
__field(xfs_extlen_t, len)
__field(bool, crosslinked)
),
TP_fast_assign(
- __entry->dev = pag_mount(pag)->m_super->s_dev;
- __entry->agno = pag_agno(pag);
+ __entry->dev = xg->xg_mount->m_super->s_dev;
+ __entry->type = xg->xg_type;
+ __entry->agno = xg->xg_gno;
__entry->agbno = agbno;
__entry->len = len;
__entry->crosslinked = crosslinked;
),
- TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x crosslinked %d",
+ TP_printk("dev %d:%d %sno 0x%x %sbno 0x%x fsbcount 0x%x crosslinked %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->agno,
+ __print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->agbno,
__entry->len,
__entry->crosslinked ? 1 : 0)
);
#define DEFINE_REPAIR_REAP_FIND_EVENT(name) \
DEFINE_EVENT(xrep_reap_find_class, name, \
- TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno, \
+ TP_PROTO(const struct xfs_group *xg, xfs_agblock_t agbno, \
xfs_extlen_t len, bool crosslinked), \
- TP_ARGS(pag, agbno, len, crosslinked))
+ TP_ARGS(xg, agbno, len, crosslinked))
DEFINE_REPAIR_REAP_FIND_EVENT(xreap_agextent_select);
DEFINE_REPAIR_REAP_FIND_EVENT(xreap_bmapi_select);
@@ -2108,29 +2124,33 @@ TRACE_EVENT(xrep_ibt_found,
)
TRACE_EVENT(xrep_refc_found,
- TP_PROTO(const struct xfs_perag *pag,
+ TP_PROTO(const struct xfs_group *xg,
const struct xfs_refcount_irec *rec),
- TP_ARGS(pag, rec),
+ TP_ARGS(xg, rec),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(enum xfs_refc_domain, domain)
+ __field(enum xfs_group_type, type)
__field(xfs_agblock_t, startblock)
__field(xfs_extlen_t, blockcount)
__field(xfs_nlink_t, refcount)
),
TP_fast_assign(
- __entry->dev = pag_mount(pag)->m_super->s_dev;
- __entry->agno = pag_agno(pag);
+ __entry->dev = xg->xg_mount->m_super->s_dev;
+ __entry->agno = xg->xg_gno;
+ __entry->type = xg->xg_type;
__entry->domain = rec->rc_domain;
__entry->startblock = rec->rc_startblock;
__entry->blockcount = rec->rc_blockcount;
__entry->refcount = rec->rc_refcount;
),
- TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u",
+ TP_printk("dev %d:%d %sno 0x%x dom %s %sbno 0x%x fsbcount 0x%x refcount %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->agno,
__print_symbolic(__entry->domain, XFS_REFC_DOMAIN_STRINGS),
+ __print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->startblock,
__entry->blockcount,
__entry->refcount)
@@ -2282,6 +2302,32 @@ TRACE_EVENT(xrep_calc_ag_resblks_btsize,
__entry->rmapbt_sz,
__entry->refcbt_sz)
)
+
+#ifdef CONFIG_XFS_RT
+TRACE_EVENT(xrep_calc_rtgroup_resblks_btsize,
+ TP_PROTO(struct xfs_mount *mp, xfs_rgnumber_t rgno,
+ xfs_rgblock_t usedlen, xfs_rgblock_t rmapbt_sz),
+ TP_ARGS(mp, rgno, usedlen, rmapbt_sz),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_rgnumber_t, rgno)
+ __field(xfs_rgblock_t, usedlen)
+ __field(xfs_rgblock_t, rmapbt_sz)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->rgno = rgno;
+ __entry->usedlen = usedlen;
+ __entry->rmapbt_sz = rmapbt_sz;
+ ),
+ TP_printk("dev %d:%d rgno 0x%x usedlen %u rmapbt %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->rgno,
+ __entry->usedlen,
+ __entry->rmapbt_sz)
+);
+#endif /* CONFIG_XFS_RT */
+
TRACE_EVENT(xrep_reset_counters,
TP_PROTO(struct xfs_mount *mp, struct xchk_fscounters *fsc),
TP_ARGS(mp, fsc),
@@ -2680,11 +2726,12 @@ DEFINE_SCRUB_NLINKS_DIFF_EVENT(xrep_nlinks_update_inode);
DEFINE_SCRUB_NLINKS_DIFF_EVENT(xrep_nlinks_unfixable_inode);
TRACE_EVENT(xrep_rmap_live_update,
- TP_PROTO(const struct xfs_perag *pag, unsigned int op,
+ TP_PROTO(const struct xfs_group *xg, unsigned int op,
const struct xfs_rmap_update_params *p),
- TP_ARGS(pag, op, p),
+ TP_ARGS(xg, op, p),
TP_STRUCT__entry(
__field(dev_t, dev)
+ __field(enum xfs_group_type, type)
__field(xfs_agnumber_t, agno)
__field(unsigned int, op)
__field(xfs_agblock_t, agbno)
@@ -2694,8 +2741,9 @@ TRACE_EVENT(xrep_rmap_live_update,
__field(unsigned int, flags)
),
TP_fast_assign(
- __entry->dev = pag_mount(pag)->m_super->s_dev;
- __entry->agno = pag_agno(pag);
+ __entry->dev = xg->xg_mount->m_super->s_dev;
+ __entry->type = xg->xg_type;
+ __entry->agno = xg->xg_gno;
__entry->op = op;
__entry->agbno = p->startblock;
__entry->len = p->blockcount;
@@ -2704,10 +2752,12 @@ TRACE_EVENT(xrep_rmap_live_update,
if (p->unwritten)
__entry->flags |= XFS_RMAP_UNWRITTEN;
),
- TP_printk("dev %d:%d agno 0x%x op %d agbno 0x%x fsbcount 0x%x owner 0x%llx fileoff 0x%llx flags 0x%x",
+ TP_printk("dev %d:%d %sno 0x%x op %d %sbno 0x%x fsbcount 0x%x owner 0x%llx fileoff 0x%llx flags 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->agno,
__entry->op,
+ __print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->agbno,
__entry->len,
__entry->owner,
@@ -3605,6 +3655,186 @@ DEFINE_XCHK_METAPATH_EVENT(xrep_metapath_try_unlink);
DEFINE_XCHK_METAPATH_EVENT(xrep_metapath_unlink);
DEFINE_XCHK_METAPATH_EVENT(xrep_metapath_link);
+#ifdef CONFIG_XFS_RT
+DECLARE_EVENT_CLASS(xrep_rtbitmap_class,
+ TP_PROTO(struct xfs_mount *mp, xfs_rtxnum_t start, xfs_rtxnum_t end),
+ TP_ARGS(mp, start, end),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(dev_t, rtdev)
+ __field(xfs_rtxnum_t, start)
+ __field(xfs_rtxnum_t, end)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->rtdev = mp->m_rtdev_targp->bt_dev;
+ __entry->start = start;
+ __entry->end = end;
+ ),
+ TP_printk("dev %d:%d rtdev %d:%d startrtx 0x%llx endrtx 0x%llx",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ MAJOR(__entry->rtdev), MINOR(__entry->rtdev),
+ __entry->start,
+ __entry->end)
+);
+#define DEFINE_REPAIR_RGBITMAP_EVENT(name) \
+DEFINE_EVENT(xrep_rtbitmap_class, name, \
+ TP_PROTO(struct xfs_mount *mp, xfs_rtxnum_t start, \
+ xfs_rtxnum_t end), \
+ TP_ARGS(mp, start, end))
+DEFINE_REPAIR_RGBITMAP_EVENT(xrep_rtbitmap_record_free);
+DEFINE_REPAIR_RGBITMAP_EVENT(xrep_rtbitmap_record_free_bulk);
+
+TRACE_EVENT(xrep_rtbitmap_or,
+ TP_PROTO(struct xfs_mount *mp, unsigned long long wordoff,
+ xfs_rtword_t mask, xfs_rtword_t word),
+ TP_ARGS(mp, wordoff, mask, word),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(dev_t, rtdev)
+ __field(unsigned long long, wordoff)
+ __field(unsigned int, mask)
+ __field(unsigned int, word)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->rtdev = mp->m_rtdev_targp->bt_dev;
+ __entry->wordoff = wordoff;
+ __entry->mask = mask;
+ __entry->word = word;
+ ),
+ TP_printk("dev %d:%d rtdev %d:%d wordoff 0x%llx mask 0x%x word 0x%x",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ MAJOR(__entry->rtdev), MINOR(__entry->rtdev),
+ __entry->wordoff,
+ __entry->mask,
+ __entry->word)
+);
+
+TRACE_EVENT(xrep_rtbitmap_load,
+ TP_PROTO(struct xfs_rtgroup *rtg, xfs_fileoff_t rbmoff,
+ xfs_rtxnum_t rtx, xfs_rtxnum_t len),
+ TP_ARGS(rtg, rbmoff, rtx, len),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(dev_t, rtdev)
+ __field(xfs_rgnumber_t, rgno)
+ __field(xfs_fileoff_t, rbmoff)
+ __field(xfs_rtxnum_t, rtx)
+ __field(xfs_rtxnum_t, len)
+ ),
+ TP_fast_assign(
+ __entry->dev = rtg_mount(rtg)->m_super->s_dev;
+ __entry->rtdev = rtg_mount(rtg)->m_rtdev_targp->bt_dev;
+ __entry->rgno = rtg_rgno(rtg);
+ __entry->rbmoff = rbmoff;
+ __entry->rtx = rtx;
+ __entry->len = len;
+ ),
+ TP_printk("dev %d:%d rtdev %d:%d rgno 0x%x rbmoff 0x%llx rtx 0x%llx rtxcount 0x%llx",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ MAJOR(__entry->rtdev), MINOR(__entry->rtdev),
+ __entry->rgno,
+ __entry->rbmoff,
+ __entry->rtx,
+ __entry->len)
+);
+
+TRACE_EVENT(xrep_rtbitmap_load_words,
+ TP_PROTO(struct xfs_mount *mp, xfs_fileoff_t rbmoff,
+ unsigned long long wordoff, unsigned int wordcnt),
+ TP_ARGS(mp, rbmoff, wordoff, wordcnt),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(dev_t, rtdev)
+ __field(xfs_fileoff_t, rbmoff)
+ __field(unsigned long long, wordoff)
+ __field(unsigned int, wordcnt)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->rtdev = mp->m_rtdev_targp->bt_dev;
+ __entry->rbmoff = rbmoff;
+ __entry->wordoff = wordoff;
+ __entry->wordcnt = wordcnt;
+ ),
+ TP_printk("dev %d:%d rtdev %d:%d rbmoff 0x%llx wordoff 0x%llx wordcnt 0x%x",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ MAJOR(__entry->rtdev), MINOR(__entry->rtdev),
+ __entry->rbmoff,
+ __entry->wordoff,
+ __entry->wordcnt)
+);
+
+TRACE_EVENT(xrep_rtbitmap_load_word,
+ TP_PROTO(struct xfs_mount *mp, unsigned long long wordoff,
+ unsigned int bit, xfs_rtword_t ondisk_word,
+ xfs_rtword_t xfile_word, xfs_rtword_t word_mask),
+ TP_ARGS(mp, wordoff, bit, ondisk_word, xfile_word, word_mask),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(dev_t, rtdev)
+ __field(unsigned long long, wordoff)
+ __field(unsigned int, bit)
+ __field(xfs_rtword_t, ondisk_word)
+ __field(xfs_rtword_t, xfile_word)
+ __field(xfs_rtword_t, word_mask)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->rtdev = mp->m_rtdev_targp->bt_dev;
+ __entry->wordoff = wordoff;
+ __entry->bit = bit;
+ __entry->ondisk_word = ondisk_word;
+ __entry->xfile_word = xfile_word;
+ __entry->word_mask = word_mask;
+ ),
+ TP_printk("dev %d:%d rtdev %d:%d wordoff 0x%llx bit %u ondisk 0x%x(0x%x) inmem 0x%x(0x%x) result 0x%x mask 0x%x",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ MAJOR(__entry->rtdev), MINOR(__entry->rtdev),
+ __entry->wordoff,
+ __entry->bit,
+ __entry->ondisk_word,
+ __entry->ondisk_word & __entry->word_mask,
+ __entry->xfile_word,
+ __entry->xfile_word & ~__entry->word_mask,
+ (__entry->xfile_word & ~__entry->word_mask) |
+ (__entry->ondisk_word & __entry->word_mask),
+ __entry->word_mask)
+);
+
+TRACE_EVENT(xrep_rtrmap_found,
+ TP_PROTO(struct xfs_mount *mp, const struct xfs_rmap_irec *rec),
+ TP_ARGS(mp, rec),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(dev_t, rtdev)
+ __field(xfs_rgblock_t, rgbno)
+ __field(xfs_extlen_t, len)
+ __field(uint64_t, owner)
+ __field(uint64_t, offset)
+ __field(unsigned int, flags)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->rtdev = mp->m_rtdev_targp->bt_dev;
+ __entry->rgbno = rec->rm_startblock;
+ __entry->len = rec->rm_blockcount;
+ __entry->owner = rec->rm_owner;
+ __entry->offset = rec->rm_offset;
+ __entry->flags = rec->rm_flags;
+ ),
+ TP_printk("dev %d:%d rtdev %d:%d rgbno 0x%x fsbcount 0x%x owner 0x%llx fileoff 0x%llx flags 0x%x",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ MAJOR(__entry->rtdev), MINOR(__entry->rtdev),
+ __entry->rgbno,
+ __entry->len,
+ __entry->owner,
+ __entry->offset,
+ __entry->flags)
+);
+#endif /* CONFIG_XFS_RT */
+
#endif /* IS_ENABLED(CONFIG_XFS_ONLINE_REPAIR) */
#endif /* _TRACE_XFS_SCRUB_TRACE_H */
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 559a3a577097..67877c36ed11 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -131,7 +131,7 @@ xfs_end_ioend(
error = xfs_iomap_write_unwritten(ip, offset, size, false);
if (!error && xfs_ioend_is_append(ioend))
- error = xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
+ error = xfs_setfilesize(ip, offset, size);
done:
iomap_finish_ioends(ioend, error);
memalloc_nofs_restore(nofs_flag);
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index 24fb12986a56..319004bf089f 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -305,11 +305,6 @@ xfs_attr3_root_inactive(
XFS_FSB_TO_BB(mp, mp->m_attr_geo->fsbcount), 0, &bp);
if (error)
return error;
- error = bp->b_error;
- if (error) {
- xfs_trans_brelse(*trans, bp);
- return error;
- }
xfs_trans_binval(*trans, bp); /* remove from cache */
/*
* Commit the invalidate and start the next transaction.
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index aa63b8efd782..15bb790359f8 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -22,6 +22,7 @@
#include "xfs_error.h"
#include "xfs_ag.h"
#include "xfs_buf_mem.h"
+#include "xfs_notify_failure.h"
struct kmem_cache *xfs_buf_cache;
@@ -40,8 +41,7 @@ struct kmem_cache *xfs_buf_cache;
*
* xfs_buf_rele:
* b_lock
- * pag_buf_lock
- * lru_lock
+ * lru_lock
*
* xfs_buftarg_drain_rele
* lru_lock
@@ -52,14 +52,8 @@ struct kmem_cache *xfs_buf_cache;
* b_lock (trylock due to inversion)
*/
-static int __xfs_buf_submit(struct xfs_buf *bp, bool wait);
-
-static inline int
-xfs_buf_submit(
- struct xfs_buf *bp)
-{
- return __xfs_buf_submit(bp, !(bp->b_flags & XBF_ASYNC));
-}
+static void xfs_buf_submit(struct xfs_buf *bp);
+static int xfs_buf_iowait(struct xfs_buf *bp);
static inline bool xfs_buf_is_uncached(struct xfs_buf *bp)
{
@@ -132,15 +126,6 @@ __xfs_buf_ioacct_dec(
}
}
-static inline void
-xfs_buf_ioacct_dec(
- struct xfs_buf *bp)
-{
- spin_lock(&bp->b_lock);
- __xfs_buf_ioacct_dec(bp);
- spin_unlock(&bp->b_lock);
-}
-
/*
* When we mark a buffer stale, we remove the buffer from the LRU and clear the
* b_lru_ref count so that the buffer is freed immediately when the buffer
@@ -176,9 +161,9 @@ xfs_buf_stale(
atomic_set(&bp->b_lru_ref, 0);
if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
(list_lru_del_obj(&bp->b_target->bt_lru, &bp->b_lru)))
- atomic_dec(&bp->b_hold);
+ bp->b_hold--;
- ASSERT(atomic_read(&bp->b_hold) >= 1);
+ ASSERT(bp->b_hold >= 1);
spin_unlock(&bp->b_lock);
}
@@ -202,9 +187,6 @@ xfs_buf_get_maps(
return 0;
}
-/*
- * Frees b_pages if it was allocated.
- */
static void
xfs_buf_free_maps(
struct xfs_buf *bp)
@@ -237,23 +219,25 @@ _xfs_buf_alloc(
*/
flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
- atomic_set(&bp->b_hold, 1);
+ /*
+ * A new buffer is held and locked by the owner. This ensures that the
+ * buffer is owned by the caller and racing RCU lookups right after
+ * inserting into the hash table are safe (and will have to wait for
+ * the unlock to do anything non-trivial).
+ */
+ bp->b_hold = 1;
+ sema_init(&bp->b_sema, 0); /* held, no waiters */
+
+ spin_lock_init(&bp->b_lock);
atomic_set(&bp->b_lru_ref, 1);
init_completion(&bp->b_iowait);
INIT_LIST_HEAD(&bp->b_lru);
INIT_LIST_HEAD(&bp->b_list);
INIT_LIST_HEAD(&bp->b_li_list);
- sema_init(&bp->b_sema, 0); /* held, no waiters */
- spin_lock_init(&bp->b_lock);
bp->b_target = target;
bp->b_mount = target->bt_mount;
bp->b_flags = flags;
- /*
- * Set length and io_length to the same value initially.
- * I/O routines should use io_length, which will be the same in
- * most cases but may be reset (e.g. XFS recovery).
- */
error = xfs_buf_get_maps(bp, nmaps);
if (error) {
kmem_cache_free(xfs_buf_cache, bp);
@@ -395,8 +379,8 @@ xfs_buf_alloc_pages(
for (;;) {
long last = filled;
- filled = alloc_pages_bulk_array(gfp_mask, bp->b_page_count,
- bp->b_pages);
+ filled = alloc_pages_bulk(gfp_mask, bp->b_page_count,
+ bp->b_pages);
if (filled == bp->b_page_count) {
XFS_STATS_INC(bp->b_mount, xb_page_found);
break;
@@ -519,7 +503,6 @@ int
xfs_buf_cache_init(
struct xfs_buf_cache *bch)
{
- spin_lock_init(&bch->bc_lock);
return rhashtable_init(&bch->bc_hash, &xfs_buf_hash_params);
}
@@ -588,6 +571,20 @@ xfs_buf_find_lock(
return 0;
}
+static bool
+xfs_buf_try_hold(
+ struct xfs_buf *bp)
+{
+ spin_lock(&bp->b_lock);
+ if (bp->b_hold == 0) {
+ spin_unlock(&bp->b_lock);
+ return false;
+ }
+ bp->b_hold++;
+ spin_unlock(&bp->b_lock);
+ return true;
+}
+
static inline int
xfs_buf_lookup(
struct xfs_buf_cache *bch,
@@ -600,7 +597,7 @@ xfs_buf_lookup(
rcu_read_lock();
bp = rhashtable_lookup(&bch->bc_hash, map, xfs_buf_hash_params);
- if (!bp || !atomic_inc_not_zero(&bp->b_hold)) {
+ if (!bp || !xfs_buf_try_hold(bp)) {
rcu_read_unlock();
return -ENOENT;
}
@@ -655,18 +652,20 @@ xfs_buf_find_insert(
if (error)
goto out_free_buf;
- spin_lock(&bch->bc_lock);
+ /* The new buffer keeps the perag reference until it is freed. */
+ new_bp->b_pag = pag;
+
+ rcu_read_lock();
bp = rhashtable_lookup_get_insert_fast(&bch->bc_hash,
&new_bp->b_rhash_head, xfs_buf_hash_params);
if (IS_ERR(bp)) {
+ rcu_read_unlock();
error = PTR_ERR(bp);
- spin_unlock(&bch->bc_lock);
goto out_free_buf;
}
- if (bp) {
+ if (bp && xfs_buf_try_hold(bp)) {
/* found an existing buffer */
- atomic_inc(&bp->b_hold);
- spin_unlock(&bch->bc_lock);
+ rcu_read_unlock();
error = xfs_buf_find_lock(bp, flags);
if (error)
xfs_buf_rele(bp);
@@ -674,10 +673,8 @@ xfs_buf_find_insert(
*bpp = bp;
goto out_free_buf;
}
+ rcu_read_unlock();
- /* The new buffer keeps the perag reference until it is freed. */
- new_bp->b_pag = pag;
- spin_unlock(&bch->bc_lock);
*bpp = new_bp;
return 0;
@@ -806,7 +803,10 @@ _xfs_buf_read(
bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD | XBF_DONE);
bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
- return xfs_buf_submit(bp);
+ xfs_buf_submit(bp);
+ if (flags & XBF_ASYNC)
+ return 0;
+ return xfs_buf_iowait(bp);
}
/*
@@ -982,8 +982,8 @@ xfs_buf_read_uncached(
bp->b_ops = ops;
xfs_buf_submit(bp);
- if (bp->b_error) {
- error = bp->b_error;
+ error = xfs_buf_iowait(bp);
+ if (error) {
xfs_buf_relse(bp);
return error;
}
@@ -1043,7 +1043,10 @@ xfs_buf_hold(
struct xfs_buf *bp)
{
trace_xfs_buf_hold(bp, _RET_IP_);
- atomic_inc(&bp->b_hold);
+
+ spin_lock(&bp->b_lock);
+ bp->b_hold++;
+ spin_unlock(&bp->b_lock);
}
static void
@@ -1051,10 +1054,15 @@ xfs_buf_rele_uncached(
struct xfs_buf *bp)
{
ASSERT(list_empty(&bp->b_lru));
- if (atomic_dec_and_test(&bp->b_hold)) {
- xfs_buf_ioacct_dec(bp);
- xfs_buf_free(bp);
+
+ spin_lock(&bp->b_lock);
+ if (--bp->b_hold) {
+ spin_unlock(&bp->b_lock);
+ return;
}
+ __xfs_buf_ioacct_dec(bp);
+ spin_unlock(&bp->b_lock);
+ xfs_buf_free(bp);
}
static void
@@ -1064,51 +1072,38 @@ xfs_buf_rele_cached(
struct xfs_buftarg *btp = bp->b_target;
struct xfs_perag *pag = bp->b_pag;
struct xfs_buf_cache *bch = xfs_buftarg_buf_cache(btp, pag);
- bool release;
bool freebuf = false;
trace_xfs_buf_rele(bp, _RET_IP_);
- ASSERT(atomic_read(&bp->b_hold) > 0);
-
- /*
- * We grab the b_lock here first to serialise racing xfs_buf_rele()
- * calls. The pag_buf_lock being taken on the last reference only
- * serialises against racing lookups in xfs_buf_find(). IOWs, the second
- * to last reference we drop here is not serialised against the last
- * reference until we take bp->b_lock. Hence if we don't grab b_lock
- * first, the last "release" reference can win the race to the lock and
- * free the buffer before the second-to-last reference is processed,
- * leading to a use-after-free scenario.
- */
spin_lock(&bp->b_lock);
- release = atomic_dec_and_lock(&bp->b_hold, &bch->bc_lock);
- if (!release) {
+ ASSERT(bp->b_hold >= 1);
+ if (bp->b_hold > 1) {
/*
* Drop the in-flight state if the buffer is already on the LRU
* and it holds the only reference. This is racy because we
* haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT
* ensures the decrement occurs only once per-buf.
*/
- if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
+ if (--bp->b_hold == 1 && !list_empty(&bp->b_lru))
__xfs_buf_ioacct_dec(bp);
goto out_unlock;
}
- /* the last reference has been dropped ... */
+ /* we are asked to drop the last reference */
__xfs_buf_ioacct_dec(bp);
if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
/*
- * If the buffer is added to the LRU take a new reference to the
+ * If the buffer is added to the LRU, keep the reference to the
* buffer for the LRU and clear the (now stale) dispose list
- * state flag
+ * state flag, else drop the reference.
*/
- if (list_lru_add_obj(&btp->bt_lru, &bp->b_lru)) {
+ if (list_lru_add_obj(&btp->bt_lru, &bp->b_lru))
bp->b_state &= ~XFS_BSTATE_DISPOSE;
- atomic_inc(&bp->b_hold);
- }
- spin_unlock(&bch->bc_lock);
+ else
+ bp->b_hold--;
} else {
+ bp->b_hold--;
/*
* most of the time buffers will already be removed from the
* LRU, so optimise that case by checking for the
@@ -1124,7 +1119,6 @@ xfs_buf_rele_cached(
ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
rhashtable_remove_fast(&bch->bc_hash, &bp->b_rhash_head,
xfs_buf_hash_params);
- spin_unlock(&bch->bc_lock);
if (pag)
xfs_perag_put(pag);
freebuf = true;
@@ -1291,6 +1285,7 @@ xfs_buf_ioend_handle_error(
{
struct xfs_mount *mp = bp->b_mount;
struct xfs_error_cfg *cfg;
+ struct xfs_log_item *lip;
/*
* If we've already shutdown the journal because of I/O errors, there's
@@ -1338,12 +1333,11 @@ xfs_buf_ioend_handle_error(
}
/* Still considered a transient error. Caller will schedule retries. */
- if (bp->b_flags & _XBF_INODES)
- xfs_buf_inode_io_fail(bp);
- else if (bp->b_flags & _XBF_DQUOTS)
- xfs_buf_dquot_io_fail(bp);
- else
- ASSERT(list_empty(&bp->b_li_list));
+ list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
+ set_bit(XFS_LI_FAILED, &lip->li_flags);
+ clear_bit(XFS_LI_FLUSHING, &lip->li_flags);
+ }
+
xfs_buf_ioerror(bp, 0);
xfs_buf_relse(bp);
return true;
@@ -1367,14 +1361,10 @@ xfs_buf_ioend(
{
trace_xfs_buf_iodone(bp, _RET_IP_);
- /*
- * Pull in IO completion errors now. We are guaranteed to be running
- * single threaded, so we don't need the lock to read b_io_error.
- */
- if (!bp->b_error && bp->b_io_error)
- xfs_buf_ioerror(bp, bp->b_io_error);
-
if (bp->b_flags & XBF_READ) {
+ if (!bp->b_error && xfs_buf_is_vmapped(bp))
+ invalidate_kernel_vmap_range(bp->b_addr,
+ xfs_buf_vmap_len(bp));
if (!bp->b_error && bp->b_ops)
bp->b_ops->verify_read(bp);
if (!bp->b_error)
@@ -1401,11 +1391,8 @@ xfs_buf_ioend(
if (bp->b_log_item)
xfs_buf_item_done(bp);
- if (bp->b_flags & _XBF_INODES)
- xfs_buf_inode_iodone(bp);
- else if (bp->b_flags & _XBF_DQUOTS)
- xfs_buf_dquot_iodone(bp);
-
+ if (bp->b_iodone)
+ bp->b_iodone(bp);
}
bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD |
@@ -1485,7 +1472,8 @@ xfs_bwrite(
bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
XBF_DONE);
- error = xfs_buf_submit(bp);
+ xfs_buf_submit(bp);
+ error = xfs_buf_iowait(bp);
if (error)
xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
return error;
@@ -1495,188 +1483,79 @@ static void
xfs_buf_bio_end_io(
struct bio *bio)
{
- struct xfs_buf *bp = (struct xfs_buf *)bio->bi_private;
-
- if (!bio->bi_status &&
- (bp->b_flags & XBF_WRITE) && (bp->b_flags & XBF_ASYNC) &&
- XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_IOERROR))
- bio->bi_status = BLK_STS_IOERR;
-
- /*
- * don't overwrite existing errors - otherwise we can lose errors on
- * buffers that require multiple bios to complete.
- */
- if (bio->bi_status) {
- int error = blk_status_to_errno(bio->bi_status);
-
- cmpxchg(&bp->b_io_error, 0, error);
- }
-
- if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
- invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
-
- if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
- xfs_buf_ioend_async(bp);
- bio_put(bio);
-}
-
-static void
-xfs_buf_ioapply_map(
- struct xfs_buf *bp,
- int map,
- int *buf_offset,
- int *count,
- blk_opf_t op)
-{
- int page_index;
- unsigned int total_nr_pages = bp->b_page_count;
- int nr_pages;
- struct bio *bio;
- sector_t sector = bp->b_maps[map].bm_bn;
- int size;
- int offset;
-
- /* skip the pages in the buffer before the start offset */
- page_index = 0;
- offset = *buf_offset;
- while (offset >= PAGE_SIZE) {
- page_index++;
- offset -= PAGE_SIZE;
- }
-
- /*
- * Limit the IO size to the length of the current vector, and update the
- * remaining IO count for the next time around.
- */
- size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
- *count -= size;
- *buf_offset += size;
-
-next_chunk:
- atomic_inc(&bp->b_io_remaining);
- nr_pages = bio_max_segs(total_nr_pages);
-
- bio = bio_alloc(bp->b_target->bt_bdev, nr_pages, op, GFP_NOIO);
- bio->bi_iter.bi_sector = sector;
- bio->bi_end_io = xfs_buf_bio_end_io;
- bio->bi_private = bp;
-
- for (; size && nr_pages; nr_pages--, page_index++) {
- int rbytes, nbytes = PAGE_SIZE - offset;
-
- if (nbytes > size)
- nbytes = size;
-
- rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
- offset);
- if (rbytes < nbytes)
- break;
-
- offset = 0;
- sector += BTOBB(nbytes);
- size -= nbytes;
- total_nr_pages--;
- }
+ struct xfs_buf *bp = bio->bi_private;
- if (likely(bio->bi_iter.bi_size)) {
- if (xfs_buf_is_vmapped(bp)) {
- flush_kernel_vmap_range(bp->b_addr,
- xfs_buf_vmap_len(bp));
- }
- submit_bio(bio);
- if (size)
- goto next_chunk;
- } else {
- /*
- * This is guaranteed not to be the last io reference count
- * because the caller (xfs_buf_submit) holds a count itself.
- */
- atomic_dec(&bp->b_io_remaining);
+ if (bio->bi_status)
+ xfs_buf_ioerror(bp, blk_status_to_errno(bio->bi_status));
+ else if ((bp->b_flags & XBF_WRITE) && (bp->b_flags & XBF_ASYNC) &&
+ XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_IOERROR))
xfs_buf_ioerror(bp, -EIO);
- bio_put(bio);
- }
+ xfs_buf_ioend_async(bp);
+ bio_put(bio);
}
-STATIC void
-_xfs_buf_ioapply(
- struct xfs_buf *bp)
+static inline blk_opf_t
+xfs_buf_bio_op(
+ struct xfs_buf *bp)
{
- struct blk_plug plug;
- blk_opf_t op;
- int offset;
- int size;
- int i;
-
- /*
- * Make sure we capture only current IO errors rather than stale errors
- * left over from previous use of the buffer (e.g. failed readahead).
- */
- bp->b_error = 0;
+ blk_opf_t op;
if (bp->b_flags & XBF_WRITE) {
op = REQ_OP_WRITE;
-
- /*
- * Run the write verifier callback function if it exists. If
- * this function fails it will mark the buffer with an error and
- * the IO should not be dispatched.
- */
- if (bp->b_ops) {
- bp->b_ops->verify_write(bp);
- if (bp->b_error) {
- xfs_force_shutdown(bp->b_mount,
- SHUTDOWN_CORRUPT_INCORE);
- return;
- }
- } else if (bp->b_rhash_key != XFS_BUF_DADDR_NULL) {
- struct xfs_mount *mp = bp->b_mount;
-
- /*
- * non-crc filesystems don't attach verifiers during
- * log recovery, so don't warn for such filesystems.
- */
- if (xfs_has_crc(mp)) {
- xfs_warn(mp,
- "%s: no buf ops on daddr 0x%llx len %d",
- __func__, xfs_buf_daddr(bp),
- bp->b_length);
- xfs_hex_dump(bp->b_addr,
- XFS_CORRUPTION_DUMP_LEN);
- dump_stack();
- }
- }
} else {
op = REQ_OP_READ;
if (bp->b_flags & XBF_READ_AHEAD)
op |= REQ_RAHEAD;
}
- /* we only use the buffer cache for meta-data */
- op |= REQ_META;
+ return op | REQ_META;
+}
- /* in-memory targets are directly mapped, no IO required. */
- if (xfs_buftarg_is_mem(bp->b_target)) {
- xfs_buf_ioend(bp);
- return;
+static void
+xfs_buf_submit_bio(
+ struct xfs_buf *bp)
+{
+ unsigned int size = BBTOB(bp->b_length);
+ unsigned int map = 0, p;
+ struct blk_plug plug;
+ struct bio *bio;
+
+ bio = bio_alloc(bp->b_target->bt_bdev, bp->b_page_count,
+ xfs_buf_bio_op(bp), GFP_NOIO);
+ bio->bi_private = bp;
+ bio->bi_end_io = xfs_buf_bio_end_io;
+
+ if (bp->b_flags & _XBF_KMEM) {
+ __bio_add_page(bio, virt_to_page(bp->b_addr), size,
+ bp->b_offset);
+ } else {
+ for (p = 0; p < bp->b_page_count; p++)
+ __bio_add_page(bio, bp->b_pages[p], PAGE_SIZE, 0);
+ bio->bi_iter.bi_size = size; /* limit to the actual size used */
+
+ if (xfs_buf_is_vmapped(bp))
+ flush_kernel_vmap_range(bp->b_addr,
+ xfs_buf_vmap_len(bp));
}
/*
- * Walk all the vectors issuing IO on them. Set up the initial offset
- * into the buffer and the desired IO size before we start -
- * _xfs_buf_ioapply_vec() will modify them appropriately for each
- * subsequent call.
+ * If there is more than one map segment, split out a new bio for each
+ * map except of the last one. The last map is handled by the
+ * remainder of the original bio outside the loop.
*/
- offset = bp->b_offset;
- size = BBTOB(bp->b_length);
blk_start_plug(&plug);
- for (i = 0; i < bp->b_map_count; i++) {
- xfs_buf_ioapply_map(bp, i, &offset, &size, op);
- if (bp->b_error)
- break;
- if (size <= 0)
- break; /* all done */
+ for (map = 0; map < bp->b_map_count - 1; map++) {
+ struct bio *split;
+
+ split = bio_split(bio, bp->b_maps[map].bm_len, GFP_NOFS,
+ &fs_bio_set);
+ split->bi_iter.bi_sector = bp->b_maps[map].bm_bn;
+ bio_chain(split, bio);
+ submit_bio(split);
}
+ bio->bi_iter.bi_sector = bp->b_maps[map].bm_bn;
+ submit_bio(bio);
blk_finish_plug(&plug);
}
@@ -1697,18 +1576,45 @@ xfs_buf_iowait(
}
/*
+ * Run the write verifier callback function if it exists. If this fails, mark
+ * the buffer with an error and do not dispatch the I/O.
+ */
+static bool
+xfs_buf_verify_write(
+ struct xfs_buf *bp)
+{
+ if (bp->b_ops) {
+ bp->b_ops->verify_write(bp);
+ if (bp->b_error)
+ return false;
+ } else if (bp->b_rhash_key != XFS_BUF_DADDR_NULL) {
+ /*
+ * Non-crc filesystems don't attach verifiers during log
+ * recovery, so don't warn for such filesystems.
+ */
+ if (xfs_has_crc(bp->b_mount)) {
+ xfs_warn(bp->b_mount,
+ "%s: no buf ops on daddr 0x%llx len %d",
+ __func__, xfs_buf_daddr(bp),
+ bp->b_length);
+ xfs_hex_dump(bp->b_addr, XFS_CORRUPTION_DUMP_LEN);
+ dump_stack();
+ }
+ }
+
+ return true;
+}
+
+/*
* Buffer I/O submission path, read or write. Asynchronous submission transfers
* the buffer lock ownership and the current reference to the IO. It is not
* safe to reference the buffer after a call to this function unless the caller
* holds an additional reference itself.
*/
-static int
-__xfs_buf_submit(
- struct xfs_buf *bp,
- bool wait)
+static void
+xfs_buf_submit(
+ struct xfs_buf *bp)
{
- int error = 0;
-
trace_xfs_buf_submit(bp, _RET_IP_);
ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
@@ -1728,57 +1634,36 @@ __xfs_buf_submit(
* state here rather than mount state to avoid corrupting the log tail
* on shutdown.
*/
- if (bp->b_mount->m_log &&
- xlog_is_shutdown(bp->b_mount->m_log)) {
+ if (bp->b_mount->m_log && xlog_is_shutdown(bp->b_mount->m_log)) {
xfs_buf_ioend_fail(bp);
- return -EIO;
+ return;
}
- /*
- * Grab a reference so the buffer does not go away underneath us. For
- * async buffers, I/O completion drops the callers reference, which
- * could occur before submission returns.
- */
- xfs_buf_hold(bp);
-
if (bp->b_flags & XBF_WRITE)
xfs_buf_wait_unpin(bp);
- /* clear the internal error state to avoid spurious errors */
- bp->b_io_error = 0;
-
/*
- * Set the count to 1 initially, this will stop an I/O completion
- * callout which happens before we have started all the I/O from calling
- * xfs_buf_ioend too early.
+ * Make sure we capture only current IO errors rather than stale errors
+ * left over from previous use of the buffer (e.g. failed readahead).
*/
- atomic_set(&bp->b_io_remaining, 1);
+ bp->b_error = 0;
+
if (bp->b_flags & XBF_ASYNC)
xfs_buf_ioacct_inc(bp);
- _xfs_buf_ioapply(bp);
- /*
- * If _xfs_buf_ioapply failed, we can get back here with only the IO
- * reference we took above. If we drop it to zero, run completion so
- * that we don't return to the caller with completion still pending.
- */
- if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
- if (bp->b_error || !(bp->b_flags & XBF_ASYNC))
- xfs_buf_ioend(bp);
- else
- xfs_buf_ioend_async(bp);
+ if ((bp->b_flags & XBF_WRITE) && !xfs_buf_verify_write(bp)) {
+ xfs_force_shutdown(bp->b_mount, SHUTDOWN_CORRUPT_INCORE);
+ xfs_buf_ioend(bp);
+ return;
}
- if (wait)
- error = xfs_buf_iowait(bp);
+ /* In-memory targets are directly mapped, no I/O required. */
+ if (xfs_buftarg_is_mem(bp->b_target)) {
+ xfs_buf_ioend(bp);
+ return;
+ }
- /*
- * Release the hold that keeps the buffer referenced for the entire
- * I/O. Note that if the buffer is async, it is not safe to reference
- * after this release.
- */
- xfs_buf_rele(bp);
- return error;
+ xfs_buf_submit_bio(bp);
}
void *
@@ -1863,13 +1748,14 @@ xfs_buftarg_drain_rele(
struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
struct list_head *dispose = arg;
- if (atomic_read(&bp->b_hold) > 1) {
+ if (!spin_trylock(&bp->b_lock))
+ return LRU_SKIP;
+ if (bp->b_hold > 1) {
/* need to wait, so skip it this pass */
+ spin_unlock(&bp->b_lock);
trace_xfs_buf_drain_buftarg(bp, _RET_IP_);
return LRU_SKIP;
}
- if (!spin_trylock(&bp->b_lock))
- return LRU_SKIP;
/*
* clear the LRU reference count so the buffer doesn't get
@@ -2208,7 +2094,7 @@ xfs_buf_delwri_queue(
*/
bp->b_flags |= _XBF_DELWRI_Q;
if (list_empty(&bp->b_list)) {
- atomic_inc(&bp->b_hold);
+ xfs_buf_hold(bp);
list_add_tail(&bp->b_list, list);
}
@@ -2266,72 +2152,26 @@ xfs_buf_cmp(
return 0;
}
-/*
- * Submit buffers for write. If wait_list is specified, the buffers are
- * submitted using sync I/O and placed on the wait list such that the caller can
- * iowait each buffer. Otherwise async I/O is used and the buffers are released
- * at I/O completion time. In either case, buffers remain locked until I/O
- * completes and the buffer is released from the queue.
- */
-static int
-xfs_buf_delwri_submit_buffers(
- struct list_head *buffer_list,
- struct list_head *wait_list)
+static bool
+xfs_buf_delwri_submit_prep(
+ struct xfs_buf *bp)
{
- struct xfs_buf *bp, *n;
- int pinned = 0;
- struct blk_plug plug;
-
- list_sort(NULL, buffer_list, xfs_buf_cmp);
-
- blk_start_plug(&plug);
- list_for_each_entry_safe(bp, n, buffer_list, b_list) {
- if (!wait_list) {
- if (!xfs_buf_trylock(bp))
- continue;
- if (xfs_buf_ispinned(bp)) {
- xfs_buf_unlock(bp);
- pinned++;
- continue;
- }
- } else {
- xfs_buf_lock(bp);
- }
-
- /*
- * Someone else might have written the buffer synchronously or
- * marked it stale in the meantime. In that case only the
- * _XBF_DELWRI_Q flag got cleared, and we have to drop the
- * reference and remove it from the list here.
- */
- if (!(bp->b_flags & _XBF_DELWRI_Q)) {
- xfs_buf_list_del(bp);
- xfs_buf_relse(bp);
- continue;
- }
-
- trace_xfs_buf_delwri_split(bp, _RET_IP_);
-
- /*
- * If we have a wait list, each buffer (and associated delwri
- * queue reference) transfers to it and is submitted
- * synchronously. Otherwise, drop the buffer from the delwri
- * queue and submit async.
- */
- bp->b_flags &= ~_XBF_DELWRI_Q;
- bp->b_flags |= XBF_WRITE;
- if (wait_list) {
- bp->b_flags &= ~XBF_ASYNC;
- list_move_tail(&bp->b_list, wait_list);
- } else {
- bp->b_flags |= XBF_ASYNC;
- xfs_buf_list_del(bp);
- }
- __xfs_buf_submit(bp, false);
+ /*
+ * Someone else might have written the buffer synchronously or marked it
+ * stale in the meantime. In that case only the _XBF_DELWRI_Q flag got
+ * cleared, and we have to drop the reference and remove it from the
+ * list here.
+ */
+ if (!(bp->b_flags & _XBF_DELWRI_Q)) {
+ xfs_buf_list_del(bp);
+ xfs_buf_relse(bp);
+ return false;
}
- blk_finish_plug(&plug);
- return pinned;
+ trace_xfs_buf_delwri_split(bp, _RET_IP_);
+ bp->b_flags &= ~_XBF_DELWRI_Q;
+ bp->b_flags |= XBF_WRITE;
+ return true;
}
/*
@@ -2354,7 +2194,30 @@ int
xfs_buf_delwri_submit_nowait(
struct list_head *buffer_list)
{
- return xfs_buf_delwri_submit_buffers(buffer_list, NULL);
+ struct xfs_buf *bp, *n;
+ int pinned = 0;
+ struct blk_plug plug;
+
+ list_sort(NULL, buffer_list, xfs_buf_cmp);
+
+ blk_start_plug(&plug);
+ list_for_each_entry_safe(bp, n, buffer_list, b_list) {
+ if (!xfs_buf_trylock(bp))
+ continue;
+ if (xfs_buf_ispinned(bp)) {
+ xfs_buf_unlock(bp);
+ pinned++;
+ continue;
+ }
+ if (!xfs_buf_delwri_submit_prep(bp))
+ continue;
+ bp->b_flags |= XBF_ASYNC;
+ xfs_buf_list_del(bp);
+ xfs_buf_submit(bp);
+ }
+ blk_finish_plug(&plug);
+
+ return pinned;
}
/*
@@ -2371,9 +2234,21 @@ xfs_buf_delwri_submit(
{
LIST_HEAD (wait_list);
int error = 0, error2;
- struct xfs_buf *bp;
+ struct xfs_buf *bp, *n;
+ struct blk_plug plug;
- xfs_buf_delwri_submit_buffers(buffer_list, &wait_list);
+ list_sort(NULL, buffer_list, xfs_buf_cmp);
+
+ blk_start_plug(&plug);
+ list_for_each_entry_safe(bp, n, buffer_list, b_list) {
+ xfs_buf_lock(bp);
+ if (!xfs_buf_delwri_submit_prep(bp))
+ continue;
+ bp->b_flags &= ~XBF_ASYNC;
+ list_move_tail(&bp->b_list, &wait_list);
+ xfs_buf_submit(bp);
+ }
+ blk_finish_plug(&plug);
/* Wait for IO to complete. */
while (!list_empty(&wait_list)) {
@@ -2398,14 +2273,9 @@ xfs_buf_delwri_submit(
* Push a single buffer on a delwri queue.
*
* The purpose of this function is to submit a single buffer of a delwri queue
- * and return with the buffer still on the original queue. The waiting delwri
- * buffer submission infrastructure guarantees transfer of the delwri queue
- * buffer reference to a temporary wait list. We reuse this infrastructure to
- * transfer the buffer back to the original queue.
+ * and return with the buffer still on the original queue.
*
- * Note the buffer transitions from the queued state, to the submitted and wait
- * listed state and back to the queued state during this call. The buffer
- * locking and queue management logic between _delwri_pushbuf() and
+ * The buffer locking and queue management logic between _delwri_pushbuf() and
* _delwri_queue() guarantee that the buffer cannot be queued to another list
* before returning.
*/
@@ -2414,33 +2284,21 @@ xfs_buf_delwri_pushbuf(
struct xfs_buf *bp,
struct list_head *buffer_list)
{
- LIST_HEAD (submit_list);
int error;
ASSERT(bp->b_flags & _XBF_DELWRI_Q);
trace_xfs_buf_delwri_pushbuf(bp, _RET_IP_);
- /*
- * Isolate the buffer to a new local list so we can submit it for I/O
- * independently from the rest of the original list.
- */
xfs_buf_lock(bp);
- list_move(&bp->b_list, &submit_list);
- xfs_buf_unlock(bp);
-
- /*
- * Delwri submission clears the DELWRI_Q buffer flag and returns with
- * the buffer on the wait list with the original reference. Rather than
- * bounce the buffer from a local wait list back to the original list
- * after I/O completion, reuse the original list as the wait list.
- */
- xfs_buf_delwri_submit_buffers(&submit_list, buffer_list);
+ bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC);
+ bp->b_flags |= XBF_WRITE;
+ xfs_buf_submit(bp);
/*
- * The buffer is now locked, under I/O and wait listed on the original
- * delwri queue. Wait for I/O completion, restore the DELWRI_Q flag and
- * return with the buffer unlocked and on the original queue.
+ * The buffer is now locked, under I/O but still on the original delwri
+ * queue. Wait for I/O completion, restore the DELWRI_Q flag and
+ * return with the buffer unlocked and still on the original queue.
*/
error = xfs_buf_iowait(bp);
bp->b_flags |= _XBF_DELWRI_Q;
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 3d56bc7a35cc..3b4ed42e11c0 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -34,8 +34,6 @@ struct xfs_buf;
#define XBF_WRITE_FAIL (1u << 7) /* async writes have failed on this buffer */
/* buffer type flags for write callbacks */
-#define _XBF_INODES (1u << 16)/* inode buffer */
-#define _XBF_DQUOTS (1u << 17)/* dquot buffer */
#define _XBF_LOGRECOVERY (1u << 18)/* log recovery buffer */
/* flags used only internally */
@@ -65,8 +63,6 @@ typedef unsigned int xfs_buf_flags_t;
{ XBF_DONE, "DONE" }, \
{ XBF_STALE, "STALE" }, \
{ XBF_WRITE_FAIL, "WRITE_FAIL" }, \
- { _XBF_INODES, "INODES" }, \
- { _XBF_DQUOTS, "DQUOTS" }, \
{ _XBF_LOGRECOVERY, "LOG_RECOVERY" }, \
{ _XBF_PAGES, "PAGES" }, \
{ _XBF_KMEM, "KMEM" }, \
@@ -84,7 +80,6 @@ typedef unsigned int xfs_buf_flags_t;
#define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */
struct xfs_buf_cache {
- spinlock_t bc_lock;
struct rhashtable bc_hash;
};
@@ -172,7 +167,7 @@ struct xfs_buf {
xfs_daddr_t b_rhash_key; /* buffer cache index */
int b_length; /* size of buffer in BBs */
- atomic_t b_hold; /* reference count */
+ unsigned int b_hold; /* reference count */
atomic_t b_lru_ref; /* lru reclaim ref count */
xfs_buf_flags_t b_flags; /* status flags */
struct semaphore b_sema; /* semaphore for lockables */
@@ -184,10 +179,9 @@ struct xfs_buf {
struct list_head b_lru; /* lru list */
spinlock_t b_lock; /* internal state lock */
unsigned int b_state; /* internal state flags */
- int b_io_error; /* internal IO error state */
wait_queue_head_t b_waiters; /* unpin waiters */
struct list_head b_list;
- struct xfs_perag *b_pag; /* contains rbtree root */
+ struct xfs_perag *b_pag;
struct xfs_mount *b_mount;
struct xfs_buftarg *b_target; /* buffer target (device) */
void *b_addr; /* virtual address of buffer */
@@ -202,11 +196,11 @@ struct xfs_buf {
struct xfs_buf_map __b_map; /* inline compound buffer map */
int b_map_count;
atomic_t b_pin_count; /* pin count */
- atomic_t b_io_remaining; /* #outstanding I/O requests */
unsigned int b_page_count; /* size of page array */
unsigned int b_offset; /* page offset of b_addr,
only for _XBF_KMEM buffers */
int b_error; /* error code on I/O */
+ void (*b_iodone)(struct xfs_buf *bp);
/*
* async write failure retry count. Initialised to zero on the first
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index 4d8a6aece995..8cde85259a58 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -54,17 +54,12 @@ bool xfs_buf_item_put(struct xfs_buf_log_item *);
void xfs_buf_item_log(struct xfs_buf_log_item *, uint, uint);
bool xfs_buf_item_dirty_format(struct xfs_buf_log_item *);
void xfs_buf_inode_iodone(struct xfs_buf *);
-void xfs_buf_inode_io_fail(struct xfs_buf *bp);
#ifdef CONFIG_XFS_QUOTA
void xfs_buf_dquot_iodone(struct xfs_buf *);
-void xfs_buf_dquot_io_fail(struct xfs_buf *bp);
#else
static inline void xfs_buf_dquot_iodone(struct xfs_buf *bp)
{
}
-static inline void xfs_buf_dquot_io_fail(struct xfs_buf *bp)
-{
-}
#endif /* CONFIG_XFS_QUOTA */
void xfs_buf_iodone(struct xfs_buf *);
bool xfs_buf_log_check_iovec(struct xfs_log_iovec *iovec);
diff --git a/fs/xfs/xfs_buf_item_recover.c b/fs/xfs/xfs_buf_item_recover.c
index 3d0c6402cb36..05a2f6927c12 100644
--- a/fs/xfs/xfs_buf_item_recover.c
+++ b/fs/xfs/xfs_buf_item_recover.c
@@ -262,12 +262,18 @@ xlog_recover_validate_buf_type(
case XFS_BMAP_MAGIC:
bp->b_ops = &xfs_bmbt_buf_ops;
break;
+ case XFS_RTRMAP_CRC_MAGIC:
+ bp->b_ops = &xfs_rtrmapbt_buf_ops;
+ break;
case XFS_RMAP_CRC_MAGIC:
bp->b_ops = &xfs_rmapbt_buf_ops;
break;
case XFS_REFC_CRC_MAGIC:
bp->b_ops = &xfs_refcountbt_buf_ops;
break;
+ case XFS_RTREFC_CRC_MAGIC:
+ bp->b_ops = &xfs_rtrefcountbt_buf_ops;
+ break;
default:
warnmsg = "Bad btree block magic!";
break;
@@ -855,6 +861,8 @@ xlog_recover_get_buf_lsn(
uuid = &btb->bb_u.s.bb_uuid;
break;
}
+ case XFS_RTRMAP_CRC_MAGIC:
+ case XFS_RTREFC_CRC_MAGIC:
case XFS_BMAP_CRC_MAGIC:
case XFS_BMAP_MAGIC: {
struct xfs_btree_block *btb = blk;
@@ -1079,7 +1087,7 @@ xlog_recover_buf_commit_pass2(
error = xlog_recover_do_primary_sb_buffer(mp, item, bp, buf_f,
current_lsn);
if (error)
- goto out_release;
+ goto out_writebuf;
/* Update the rt superblock if we have one. */
if (xfs_has_rtsb(mp) && mp->m_rtsb_bp) {
@@ -1097,6 +1105,15 @@ xlog_recover_buf_commit_pass2(
}
/*
+ * Buffer held by buf log item during 'normal' buffer recovery must
+ * be committed through buffer I/O submission path to ensure proper
+ * release. When error occurs during sb buffer recovery, log shutdown
+ * will be done before submitting buffer list so that buffers can be
+ * released correctly through ioend failure path.
+ */
+out_writebuf:
+
+ /*
* Perform delayed write on the buffer. Asynchronous writes will be
* slower when taking into account all the buffers to be flushed.
*
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index c4bd145f5ec1..3f2403a7b49c 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -90,7 +90,7 @@ xfs_discard_endio_work(
/*
* Queue up the actual completion to a thread to avoid IRQ-safe locking for
- * pagb_lock.
+ * eb_lock.
*/
static void
xfs_discard_endio(
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 201c26322ede..edbc521870a1 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -1230,18 +1230,6 @@ xfs_buf_dquot_iodone(
}
}
-void
-xfs_buf_dquot_io_fail(
- struct xfs_buf *bp)
-{
- struct xfs_log_item *lip;
-
- spin_lock(&bp->b_mount->m_ail->ail_lock);
- list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
- set_bit(XFS_LI_FAILED, &lip->li_flags);
- spin_unlock(&bp->b_mount->m_ail->ail_lock);
-}
-
/* Check incore dquot for errors before we flush. */
static xfs_failaddr_t
xfs_qm_dqflush_check(
@@ -1316,7 +1304,8 @@ out_abort:
/*
* Attach a dquot buffer to this dquot to avoid allocating a buffer during a
- * dqflush, since dqflush can be called from reclaim context.
+ * dqflush, since dqflush can be called from reclaim context. Caller must hold
+ * the dqlock.
*/
int
xfs_dquot_attach_buf(
@@ -1337,13 +1326,16 @@ xfs_dquot_attach_buf(
return error;
/*
- * Attach the dquot to the buffer so that the AIL does not have
- * to read the dquot buffer to push this item.
+ * Hold the dquot buffer so that we retain our ref to it after
+ * detaching it from the transaction, then give that ref to the
+ * dquot log item so that the AIL does not have to read the
+ * dquot buffer to push this item.
*/
xfs_buf_hold(bp);
+ xfs_trans_brelse(tp, bp);
+
spin_lock(&qlip->qli_lock);
lip->li_buf = bp;
- xfs_trans_brelse(tp, bp);
}
qlip->qli_dirty = true;
spin_unlock(&qlip->qli_lock);
@@ -1459,7 +1451,7 @@ xfs_qm_dqflush(
* Attach the dquot to the buffer so that we can remove this dquot from
* the AIL and release the flush lock once the dquot is synced to disk.
*/
- bp->b_flags |= _XBF_DQUOTS;
+ bp->b_iodone = xfs_buf_dquot_iodone;
list_add_tail(&lip->li_bio_list, &bp->b_li_list);
/*
diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h
index c617bac75361..61217adf5ba5 100644
--- a/fs/xfs/xfs_dquot.h
+++ b/fs/xfs/xfs_dquot.h
@@ -160,6 +160,9 @@ static inline struct xfs_dquot *xfs_inode_dquot(
struct xfs_inode *ip,
xfs_dqtype_t type)
{
+ if (xfs_is_metadir_inode(ip))
+ return NULL;
+
switch (type) {
case XFS_DQTYPE_USER:
return ip->i_udquot;
diff --git a/fs/xfs/xfs_drain.c b/fs/xfs/xfs_drain.c
index 5ede81fadbd8..fa5f31931efd 100644
--- a/fs/xfs/xfs_drain.c
+++ b/fs/xfs/xfs_drain.c
@@ -13,28 +13,28 @@
#include "xfs_trace.h"
/*
- * Use a static key here to reduce the overhead of xfs_drain_rele. If the
- * compiler supports jump labels, the static branch will be replaced by a nop
- * sled when there are no xfs_drain_wait callers. Online fsck is currently
- * the only caller, so this is a reasonable tradeoff.
+ * Use a static key here to reduce the overhead of xfs_defer_drain_rele. If
+ * the compiler supports jump labels, the static branch will be replaced by a
+ * nop sled when there are no xfs_defer_drain_wait callers. Online fsck is
+ * currently the only caller, so this is a reasonable tradeoff.
*
* Note: Patching the kernel code requires taking the cpu hotplug lock. Other
* parts of the kernel allocate memory with that lock held, which means that
* XFS callers cannot hold any locks that might be used by memory reclaim or
* writeback when calling the static_branch_{inc,dec} functions.
*/
-static DEFINE_STATIC_KEY_FALSE(xfs_drain_waiter_gate);
+static DEFINE_STATIC_KEY_FALSE(xfs_defer_drain_waiter_gate);
void
-xfs_drain_wait_disable(void)
+xfs_defer_drain_wait_disable(void)
{
- static_branch_dec(&xfs_drain_waiter_gate);
+ static_branch_dec(&xfs_defer_drain_waiter_gate);
}
void
-xfs_drain_wait_enable(void)
+xfs_defer_drain_wait_enable(void)
{
- static_branch_inc(&xfs_drain_waiter_gate);
+ static_branch_inc(&xfs_defer_drain_waiter_gate);
}
void
@@ -71,7 +71,7 @@ static inline bool has_waiters(struct wait_queue_head *wq_head)
static inline void xfs_defer_drain_rele(struct xfs_defer_drain *dr)
{
if (atomic_dec_and_test(&dr->dr_count) &&
- static_branch_unlikely(&xfs_drain_waiter_gate) &&
+ static_branch_unlikely(&xfs_defer_drain_waiter_gate) &&
has_waiters(&dr->dr_waiters))
wake_up(&dr->dr_waiters);
}
diff --git a/fs/xfs/xfs_drain.h b/fs/xfs/xfs_drain.h
index efcf88df9a5e..4d446dbf65e5 100644
--- a/fs/xfs/xfs_drain.h
+++ b/fs/xfs/xfs_drain.h
@@ -26,8 +26,8 @@ struct xfs_defer_drain {
void xfs_defer_drain_init(struct xfs_defer_drain *dr);
void xfs_defer_drain_free(struct xfs_defer_drain *dr);
-void xfs_drain_wait_disable(void);
-void xfs_drain_wait_enable(void);
+void xfs_defer_drain_wait_disable(void);
+void xfs_defer_drain_wait_enable(void);
/*
* Deferred Work Intent Drains
@@ -61,6 +61,9 @@ void xfs_drain_wait_enable(void);
* All functions that create work items must increment the intent counter as
* soon as the item is added to the transaction and cannot drop the counter
* until the item is finished or cancelled.
+ *
+ * The same principles apply to realtime groups because the rt metadata inode
+ * ILOCKs are not held across transaction rolls.
*/
struct xfs_group *xfs_group_intent_get(struct xfs_mount *mp,
xfs_fsblock_t fsbno, enum xfs_group_type type);
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index 78cdc5064a8c..dbd87e137694 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -63,6 +63,7 @@ static unsigned int xfs_errortag_random_default[] = {
XFS_RANDOM_WB_DELAY_MS,
XFS_RANDOM_WRITE_DELAY_MS,
XFS_RANDOM_EXCHMAPS_FINISH_ONE,
+ XFS_RANDOM_METAFILE_RESV_CRITICAL,
};
struct xfs_errortag_attr {
@@ -181,6 +182,7 @@ XFS_ERRORTAG_ATTR_RW(attr_leaf_to_node, XFS_ERRTAG_ATTR_LEAF_TO_NODE);
XFS_ERRORTAG_ATTR_RW(wb_delay_ms, XFS_ERRTAG_WB_DELAY_MS);
XFS_ERRORTAG_ATTR_RW(write_delay_ms, XFS_ERRTAG_WRITE_DELAY_MS);
XFS_ERRORTAG_ATTR_RW(exchmaps_finish_one, XFS_ERRTAG_EXCHMAPS_FINISH_ONE);
+XFS_ERRORTAG_ATTR_RW(metafile_resv_crit, XFS_ERRTAG_METAFILE_RESV_CRITICAL);
static struct attribute *xfs_errortag_attrs[] = {
XFS_ERRORTAG_ATTR_LIST(noerror),
@@ -227,6 +229,7 @@ static struct attribute *xfs_errortag_attrs[] = {
XFS_ERRORTAG_ATTR_LIST(wb_delay_ms),
XFS_ERRORTAG_ATTR_LIST(write_delay_ms),
XFS_ERRORTAG_ATTR_LIST(exchmaps_finish_one),
+ XFS_ERRORTAG_ATTR_LIST(metafile_resv_crit),
NULL,
};
ATTRIBUTE_GROUPS(xfs_errortag);
diff --git a/fs/xfs/xfs_exchrange.c b/fs/xfs/xfs_exchrange.c
index 265c42449893..0b41bdfecdfb 100644
--- a/fs/xfs/xfs_exchrange.c
+++ b/fs/xfs/xfs_exchrange.c
@@ -119,6 +119,9 @@ xfs_exchrange_reserve_quota(
int ip1_error = 0;
int error;
+ ASSERT(!xfs_is_metadir_inode(req->ip1));
+ ASSERT(!xfs_is_metadir_inode(req->ip2));
+
/*
* Don't bother with a quota reservation if we're not enforcing them
* or the two inodes have the same dquots.
@@ -326,22 +329,6 @@ out_trans_cancel:
* successfully but before locks are dropped.
*/
-/* Verify that we have security clearance to perform this operation. */
-static int
-xfs_exchange_range_verify_area(
- struct xfs_exchrange *fxr)
-{
- int ret;
-
- ret = remap_verify_area(fxr->file1, fxr->file1_offset, fxr->length,
- true);
- if (ret)
- return ret;
-
- return remap_verify_area(fxr->file2, fxr->file2_offset, fxr->length,
- true);
-}
-
/*
* Performs necessary checks before doing a range exchange, having stabilized
* mutable inode attributes via i_rwsem.
@@ -352,11 +339,13 @@ xfs_exchange_range_checks(
unsigned int alloc_unit)
{
struct inode *inode1 = file_inode(fxr->file1);
+ loff_t size1 = i_size_read(inode1);
struct inode *inode2 = file_inode(fxr->file2);
+ loff_t size2 = i_size_read(inode2);
uint64_t allocmask = alloc_unit - 1;
int64_t test_len;
uint64_t blen;
- loff_t size1, size2, tmp;
+ loff_t tmp;
int error;
/* Don't touch certain kinds of inodes */
@@ -365,24 +354,25 @@ xfs_exchange_range_checks(
if (IS_SWAPFILE(inode1) || IS_SWAPFILE(inode2))
return -ETXTBSY;
- size1 = i_size_read(inode1);
- size2 = i_size_read(inode2);
-
/* Ranges cannot start after EOF. */
if (fxr->file1_offset > size1 || fxr->file2_offset > size2)
return -EINVAL;
- /*
- * If the caller said to exchange to EOF, we set the length of the
- * request large enough to cover everything to the end of both files.
- */
if (fxr->flags & XFS_EXCHANGE_RANGE_TO_EOF) {
+ /*
+ * If the caller said to exchange to EOF, we set the length of
+ * the request large enough to cover everything to the end of
+ * both files.
+ */
fxr->length = max_t(int64_t, size1 - fxr->file1_offset,
size2 - fxr->file2_offset);
-
- error = xfs_exchange_range_verify_area(fxr);
- if (error)
- return error;
+ } else {
+ /*
+ * Otherwise we require both ranges to end within EOF.
+ */
+ if (fxr->file1_offset + fxr->length > size1 ||
+ fxr->file2_offset + fxr->length > size2)
+ return -EINVAL;
}
/*
@@ -399,15 +389,6 @@ xfs_exchange_range_checks(
return -EINVAL;
/*
- * We require both ranges to end within EOF, unless we're exchanging
- * to EOF.
- */
- if (!(fxr->flags & XFS_EXCHANGE_RANGE_TO_EOF) &&
- (fxr->file1_offset + fxr->length > size1 ||
- fxr->file2_offset + fxr->length > size2))
- return -EINVAL;
-
- /*
* Make sure we don't hit any file size limits. If we hit any size
* limits such that test_length was adjusted, we abort the whole
* operation.
@@ -744,6 +725,7 @@ xfs_exchange_range(
{
struct inode *inode1 = file_inode(fxr->file1);
struct inode *inode2 = file_inode(fxr->file2);
+ loff_t check_len = fxr->length;
int ret;
BUILD_BUG_ON(XFS_EXCHANGE_RANGE_ALL_FLAGS &
@@ -776,14 +758,18 @@ xfs_exchange_range(
return -EBADF;
/*
- * If we're not exchanging to EOF, we can check the areas before
- * stabilizing both files' i_size.
+ * If we're exchanging to EOF we can't calculate the length until taking
+ * the iolock. Pass a 0 length to remap_verify_area similar to the
+ * FICLONE and FICLONERANGE ioctls that support cloning to EOF as well.
*/
- if (!(fxr->flags & XFS_EXCHANGE_RANGE_TO_EOF)) {
- ret = xfs_exchange_range_verify_area(fxr);
- if (ret)
- return ret;
- }
+ if (fxr->flags & XFS_EXCHANGE_RANGE_TO_EOF)
+ check_len = 0;
+ ret = remap_verify_area(fxr->file1, fxr->file1_offset, check_len, true);
+ if (ret)
+ return ret;
+ ret = remap_verify_area(fxr->file2, fxr->file2_offset, check_len, true);
+ if (ret)
+ return ret;
/* Update cmtime if the fd/inode don't forbid it. */
if (!(fxr->file1->f_mode & FMODE_NOCMTIME) && !IS_NOCMTIME(inode1))
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 9a435b1ff264..f7a7d89c345e 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1451,6 +1451,9 @@ xfs_dax_read_fault(
trace_xfs_read_fault(ip, order);
+ ret = filemap_fsnotify_fault(vmf);
+ if (unlikely(ret))
+ return ret;
xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
ret = xfs_dax_fault_locked(vmf, order, false);
xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
@@ -1479,6 +1482,16 @@ xfs_write_fault(
vm_fault_t ret;
trace_xfs_write_fault(ip, order);
+ /*
+ * Usually we get here from ->page_mkwrite callback but in case of DAX
+ * we will get here also for ordinary write fault. Handle HSM
+ * notifications for that case.
+ */
+ if (IS_DAX(inode)) {
+ ret = filemap_fsnotify_fault(vmf);
+ if (unlikely(ret))
+ return ret;
+ }
sb_start_pagefault(inode->i_sb);
file_update_time(vmf->vma->vm_file);
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index 3290dd8524a6..1dbd2d75f7ae 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -26,6 +26,8 @@
#include "xfs_rtbitmap.h"
#include "xfs_ag.h"
#include "xfs_rtgroup.h"
+#include "xfs_rtrmap_btree.h"
+#include "xfs_rtrefcount_btree.h"
/* Convert an xfs_fsmap to an fsmap. */
static void
@@ -211,21 +213,20 @@ xfs_getfsmap_is_shared(
struct xfs_mount *mp = tp->t_mountp;
struct xfs_btree_cur *cur;
xfs_agblock_t fbno;
- xfs_extlen_t flen;
+ xfs_extlen_t flen = 0;
int error;
*stat = false;
- if (!xfs_has_reflink(mp))
- return 0;
- /* rt files will have no perag structure */
- if (!info->group)
+ if (!xfs_has_reflink(mp) || !info->group)
return 0;
- /* Are there any shared blocks here? */
- flen = 0;
- cur = xfs_refcountbt_init_cursor(mp, tp, info->agf_bp,
- to_perag(info->group));
+ if (info->group->xg_type == XG_TYPE_RTG)
+ cur = xfs_rtrefcountbt_init_cursor(tp, to_rtg(info->group));
+ else
+ cur = xfs_refcountbt_init_cursor(mp, tp, info->agf_bp,
+ to_perag(info->group));
+ /* Are there any shared blocks here? */
error = xfs_refcount_find_shared(cur, frec->rec_key,
XFS_BB_TO_FSBT(mp, frec->len_daddr), &fbno, &flen,
false);
@@ -832,6 +833,175 @@ xfs_getfsmap_rtdev_rtbitmap(
return error;
}
+
+/* Transform a realtime rmapbt record into a fsmap */
+STATIC int
+xfs_getfsmap_rtdev_rmapbt_helper(
+ struct xfs_btree_cur *cur,
+ const struct xfs_rmap_irec *rec,
+ void *priv)
+{
+ struct xfs_fsmap_irec frec = {
+ .owner = rec->rm_owner,
+ .offset = rec->rm_offset,
+ .rm_flags = rec->rm_flags,
+ .rec_key = rec->rm_startblock,
+ };
+ struct xfs_getfsmap_info *info = priv;
+
+ return xfs_getfsmap_group_helper(info, cur->bc_tp, cur->bc_group,
+ rec->rm_startblock, rec->rm_blockcount, &frec);
+}
+
+/* Actually query the rtrmap btree. */
+STATIC int
+xfs_getfsmap_rtdev_rmapbt_query(
+ struct xfs_trans *tp,
+ struct xfs_getfsmap_info *info,
+ struct xfs_btree_cur **curpp)
+{
+ struct xfs_rtgroup *rtg = to_rtg(info->group);
+
+ /* Query the rtrmapbt */
+ xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP | XFS_RTGLOCK_REFCOUNT);
+ *curpp = xfs_rtrmapbt_init_cursor(tp, rtg);
+ return xfs_rmap_query_range(*curpp, &info->low, &info->high,
+ xfs_getfsmap_rtdev_rmapbt_helper, info);
+}
+
+/* Execute a getfsmap query against the realtime device rmapbt. */
+STATIC int
+xfs_getfsmap_rtdev_rmapbt(
+ struct xfs_trans *tp,
+ const struct xfs_fsmap *keys,
+ struct xfs_getfsmap_info *info)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_rtgroup *rtg = NULL;
+ struct xfs_btree_cur *bt_cur = NULL;
+ xfs_rtblock_t start_rtb;
+ xfs_rtblock_t end_rtb;
+ xfs_rgnumber_t start_rg, end_rg;
+ uint64_t eofs;
+ int error = 0;
+
+ eofs = XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks);
+ if (keys[0].fmr_physical >= eofs)
+ return 0;
+ start_rtb = xfs_daddr_to_rtb(mp, keys[0].fmr_physical);
+ end_rtb = xfs_daddr_to_rtb(mp, min(eofs - 1, keys[1].fmr_physical));
+
+ info->missing_owner = XFS_FMR_OWN_FREE;
+
+ /*
+ * Convert the fsmap low/high keys to rtgroup based keys. Initialize
+ * low to the fsmap low key and max out the high key to the end
+ * of the rtgroup.
+ */
+ info->low.rm_offset = XFS_BB_TO_FSBT(mp, keys[0].fmr_offset);
+ error = xfs_fsmap_owner_to_rmap(&info->low, &keys[0]);
+ if (error)
+ return error;
+ info->low.rm_blockcount = XFS_BB_TO_FSBT(mp, keys[0].fmr_length);
+ xfs_getfsmap_set_irec_flags(&info->low, &keys[0]);
+
+ /* Adjust the low key if we are continuing from where we left off. */
+ if (info->low.rm_blockcount == 0) {
+ /* No previous record from which to continue */
+ } else if (rmap_not_shareable(mp, &info->low)) {
+ /* Last record seen was an unshareable extent */
+ info->low.rm_owner = 0;
+ info->low.rm_offset = 0;
+
+ start_rtb += info->low.rm_blockcount;
+ if (xfs_rtb_to_daddr(mp, start_rtb) >= eofs)
+ return 0;
+ } else {
+ /* Last record seen was a shareable file data extent */
+ info->low.rm_offset += info->low.rm_blockcount;
+ }
+ info->low.rm_startblock = xfs_rtb_to_rgbno(mp, start_rtb);
+
+ info->high.rm_startblock = -1U;
+ info->high.rm_owner = ULLONG_MAX;
+ info->high.rm_offset = ULLONG_MAX;
+ info->high.rm_blockcount = 0;
+ info->high.rm_flags = XFS_RMAP_KEY_FLAGS | XFS_RMAP_REC_FLAGS;
+
+ start_rg = xfs_rtb_to_rgno(mp, start_rtb);
+ end_rg = xfs_rtb_to_rgno(mp, end_rtb);
+
+ while ((rtg = xfs_rtgroup_next_range(mp, rtg, start_rg, end_rg))) {
+ /*
+ * Set the rtgroup high key from the fsmap high key if this
+ * is the last rtgroup that we're querying.
+ */
+ info->group = rtg_group(rtg);
+ if (rtg_rgno(rtg) == end_rg) {
+ info->high.rm_startblock =
+ xfs_rtb_to_rgbno(mp, end_rtb);
+ info->high.rm_offset =
+ XFS_BB_TO_FSBT(mp, keys[1].fmr_offset);
+ error = xfs_fsmap_owner_to_rmap(&info->high, &keys[1]);
+ if (error)
+ break;
+ xfs_getfsmap_set_irec_flags(&info->high, &keys[1]);
+ }
+
+ if (bt_cur) {
+ xfs_rtgroup_unlock(to_rtg(bt_cur->bc_group),
+ XFS_RTGLOCK_RMAP |
+ XFS_RTGLOCK_REFCOUNT);
+ xfs_btree_del_cursor(bt_cur, XFS_BTREE_NOERROR);
+ bt_cur = NULL;
+ }
+
+ trace_xfs_fsmap_low_group_key(mp, info->dev, rtg_rgno(rtg),
+ &info->low);
+ trace_xfs_fsmap_high_group_key(mp, info->dev, rtg_rgno(rtg),
+ &info->high);
+
+ error = xfs_getfsmap_rtdev_rmapbt_query(tp, info, &bt_cur);
+ if (error)
+ break;
+
+ /*
+ * Set the rtgroup low key to the start of the rtgroup prior to
+ * moving on to the next rtgroup.
+ */
+ if (rtg_rgno(rtg) == start_rg)
+ memset(&info->low, 0, sizeof(info->low));
+
+ /*
+ * If this is the last rtgroup, report any gap at the end of it
+ * before we drop the reference to the perag when the loop
+ * terminates.
+ */
+ if (rtg_rgno(rtg) == end_rg) {
+ info->last = true;
+ error = xfs_getfsmap_rtdev_rmapbt_helper(bt_cur,
+ &info->high, info);
+ if (error)
+ break;
+ }
+ info->group = NULL;
+ }
+
+ if (bt_cur) {
+ xfs_rtgroup_unlock(to_rtg(bt_cur->bc_group),
+ XFS_RTGLOCK_RMAP | XFS_RTGLOCK_REFCOUNT);
+ xfs_btree_del_cursor(bt_cur, error < 0 ? XFS_BTREE_ERROR :
+ XFS_BTREE_NOERROR);
+ }
+
+ /* loop termination case */
+ if (rtg) {
+ info->group = NULL;
+ xfs_rtgroup_rele(rtg);
+ }
+
+ return error;
+}
#endif /* CONFIG_XFS_RT */
/* Do we recognize the device? */
@@ -971,7 +1141,10 @@ xfs_getfsmap(
if (mp->m_rtdev_targp) {
handlers[2].nr_sectors = XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks);
handlers[2].dev = new_encode_dev(mp->m_rtdev_targp->bt_dev);
- handlers[2].fn = xfs_getfsmap_rtdev_rtbitmap;
+ if (use_rmap)
+ handlers[2].fn = xfs_getfsmap_rtdev_rmapbt;
+ else
+ handlers[2].fn = xfs_getfsmap_rtdev_rtbitmap;
}
#endif /* CONFIG_XFS_RT */
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 28dde215c899..455298503d01 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -21,6 +21,9 @@
#include "xfs_ag.h"
#include "xfs_ag_resv.h"
#include "xfs_trace.h"
+#include "xfs_rtalloc.h"
+#include "xfs_rtrmap_btree.h"
+#include "xfs_rtrefcount_btree.h"
/*
* Write new AG headers to disk. Non-transactional, but need to be
@@ -113,6 +116,12 @@ xfs_growfs_data_private(
xfs_buf_relse(bp);
}
+ /* Make sure the new fs size won't cause problems with the log. */
+ error = xfs_growfs_check_rtgeom(mp, nb, mp->m_sb.sb_rblocks,
+ mp->m_sb.sb_rextsize);
+ if (error)
+ return error;
+
nb_div = nb;
nb_mod = do_div(nb_div, mp->m_sb.sb_agblocks);
if (nb_mod && nb_mod >= XFS_MIN_AG_BLOCKS)
@@ -220,7 +229,12 @@ xfs_growfs_data_private(
error = xfs_fs_reserve_ag_blocks(mp);
if (error == -ENOSPC)
error = 0;
+
+ /* Compute new maxlevels for rt btrees. */
+ xfs_rtrmapbt_compute_maxlevels(mp);
+ xfs_rtrefcountbt_compute_maxlevels(mp);
}
+
return error;
out_trans_cancel:
@@ -541,6 +555,19 @@ xfs_fs_reserve_ag_blocks(
xfs_warn(mp,
"Error %d reserving per-AG metadata reserve pool.", error);
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ return error;
+ }
+
+ if (xfs_has_realtime(mp)) {
+ err2 = xfs_rt_resv_init(mp);
+ if (err2 && err2 != -ENOSPC) {
+ xfs_warn(mp,
+ "Error %d reserving realtime metadata reserve pool.", err2);
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ }
+
+ if (err2 && !error)
+ error = err2;
}
return error;
@@ -555,6 +582,9 @@ xfs_fs_unreserve_ag_blocks(
{
struct xfs_perag *pag = NULL;
+ if (xfs_has_realtime(mp))
+ xfs_rt_resv_free(mp);
+
while ((pag = xfs_perag_next(mp, pag)))
xfs_ag_resv_free(pag);
}
diff --git a/fs/xfs/xfs_health.c b/fs/xfs/xfs_health.c
index c7c2e6561998..7c541fb373d5 100644
--- a/fs/xfs/xfs_health.c
+++ b/fs/xfs/xfs_health.c
@@ -447,6 +447,8 @@ static const struct ioctl_sick_map rtgroup_map[] = {
{ XFS_SICK_RG_SUPER, XFS_RTGROUP_GEOM_SICK_SUPER },
{ XFS_SICK_RG_BITMAP, XFS_RTGROUP_GEOM_SICK_BITMAP },
{ XFS_SICK_RG_SUMMARY, XFS_RTGROUP_GEOM_SICK_SUMMARY },
+ { XFS_SICK_RG_RMAPBT, XFS_RTGROUP_GEOM_SICK_RMAPBT },
+ { XFS_SICK_RG_REFCNTBT, XFS_RTGROUP_GEOM_SICK_REFCNTBT },
};
/* Fill out rtgroup geometry health info. */
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index c8ad2606f928..b1f9f156ec88 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1404,8 +1404,11 @@ xfs_inactive(
goto out;
/* Try to clean out the cow blocks if there are any. */
- if (xfs_inode_has_cow_data(ip))
- xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
+ if (xfs_inode_has_cow_data(ip)) {
+ error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
+ if (error)
+ goto out;
+ }
if (VFS_I(ip)->i_nlink != 0) {
/*
@@ -2382,7 +2385,16 @@ xfs_iflush(
__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
goto flush_out;
}
- if (S_ISREG(VFS_I(ip)->i_mode)) {
+ if (ip->i_df.if_format == XFS_DINODE_FMT_META_BTREE) {
+ if (!S_ISREG(VFS_I(ip)->i_mode) ||
+ !(ip->i_diflags2 & XFS_DIFLAG2_METADATA)) {
+ xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
+ "%s: Bad %s meta btree inode %Lu, ptr "PTR_FMT,
+ __func__, xfs_metafile_type_str(ip->i_metatype),
+ ip->i_ino, ip);
+ goto flush_out;
+ }
+ } else if (S_ISREG(VFS_I(ip)->i_mode)) {
if (XFS_TEST_ERROR(
ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
@@ -2422,6 +2434,14 @@ xfs_iflush(
goto flush_out;
}
+ if (xfs_inode_has_attr_fork(ip) &&
+ ip->i_af.if_format == XFS_DINODE_FMT_META_BTREE) {
+ xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
+ "%s: meta btree in inode %Lu attr fork, ptr "PTR_FMT,
+ __func__, ip->i_ino, ip);
+ goto flush_out;
+ }
+
/*
* Inode item log recovery for v2 inodes are dependent on the flushiter
* count for correct sequencing. We bump the flush iteration count so
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 1648dc5a8068..c08093a65352 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -25,9 +25,19 @@ struct xfs_dquot;
typedef struct xfs_inode {
/* Inode linking and identification information. */
struct xfs_mount *i_mount; /* fs mount struct ptr */
- struct xfs_dquot *i_udquot; /* user dquot */
- struct xfs_dquot *i_gdquot; /* group dquot */
- struct xfs_dquot *i_pdquot; /* project dquot */
+ union {
+ struct {
+ struct xfs_dquot *i_udquot; /* user dquot */
+ struct xfs_dquot *i_gdquot; /* group dquot */
+ struct xfs_dquot *i_pdquot; /* project dquot */
+ };
+
+ /*
+ * Space that has been set aside to accomodate expansions of a
+ * metadata btree rooted in this file.
+ */
+ uint64_t i_meta_resv_asked;
+ };
/* Inode location stuff */
xfs_ino_t i_ino; /* inode number (agno/agino)*/
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 912f0b1bc3cb..35803fcf0beb 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -157,6 +157,20 @@ xfs_inode_item_precommit(
if (flags & XFS_ILOG_IVERSION)
flags = ((flags & ~XFS_ILOG_IVERSION) | XFS_ILOG_CORE);
+ /*
+ * Inode verifiers do not check that the CoW extent size hint is an
+ * integer multiple of the rt extent size on a directory with both
+ * rtinherit and cowextsize flags set. If we're logging a directory
+ * that is misconfigured in this way, clear the hint.
+ */
+ if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
+ (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) &&
+ xfs_extlen_to_rtxmod(ip->i_mount, ip->i_cowextsize) > 0) {
+ ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE;
+ ip->i_cowextsize = 0;
+ flags |= XFS_ILOG_CORE;
+ }
+
if (!iip->ili_item.li_buf) {
struct xfs_buf *bp;
int error;
@@ -185,7 +199,7 @@ xfs_inode_item_precommit(
xfs_buf_hold(bp);
spin_lock(&iip->ili_lock);
iip->ili_item.li_buf = bp;
- bp->b_flags |= _XBF_INODES;
+ bp->b_iodone = xfs_buf_inode_iodone;
list_add_tail(&iip->ili_item.li_bio_list, &bp->b_li_list);
xfs_trans_brelse(tp, bp);
}
@@ -242,6 +256,7 @@ xfs_inode_item_data_fork_size(
}
break;
case XFS_DINODE_FMT_BTREE:
+ case XFS_DINODE_FMT_META_BTREE:
if ((iip->ili_fields & XFS_ILOG_DBROOT) &&
ip->i_df.if_broot_bytes > 0) {
*nbytes += ip->i_df.if_broot_bytes;
@@ -362,6 +377,7 @@ xfs_inode_item_format_data_fork(
}
break;
case XFS_DINODE_FMT_BTREE:
+ case XFS_DINODE_FMT_META_BTREE:
iip->ili_fields &=
~(XFS_ILOG_DDATA | XFS_ILOG_DEXT | XFS_ILOG_DEV);
@@ -1023,18 +1039,6 @@ xfs_buf_inode_iodone(
list_splice_tail(&flushed_inodes, &bp->b_li_list);
}
-void
-xfs_buf_inode_io_fail(
- struct xfs_buf *bp)
-{
- struct xfs_log_item *lip;
-
- list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
- set_bit(XFS_LI_FAILED, &lip->li_flags);
- clear_bit(XFS_LI_FLUSHING, &lip->li_flags);
- }
-}
-
/*
* Clear the inode logging fields so no more flushes are attempted. If we are
* on a buffer list, it is now safe to remove it because the buffer is
diff --git a/fs/xfs/xfs_inode_item_recover.c b/fs/xfs/xfs_inode_item_recover.c
index e70d2611456b..f3bfb814378c 100644
--- a/fs/xfs/xfs_inode_item_recover.c
+++ b/fs/xfs/xfs_inode_item_recover.c
@@ -22,6 +22,8 @@
#include "xfs_log_recover.h"
#include "xfs_icache.h"
#include "xfs_bmap_btree.h"
+#include "xfs_rtrmap_btree.h"
+#include "xfs_rtrefcount_btree.h"
STATIC void
xlog_recover_inode_ra_pass2(
@@ -266,6 +268,41 @@ xlog_dinode_verify_extent_counts(
return 0;
}
+static inline int
+xlog_recover_inode_dbroot(
+ struct xfs_mount *mp,
+ void *src,
+ unsigned int len,
+ struct xfs_dinode *dip)
+{
+ void *dfork = XFS_DFORK_DPTR(dip);
+ unsigned int dsize = XFS_DFORK_DSIZE(dip, mp);
+
+ switch (dip->di_format) {
+ case XFS_DINODE_FMT_BTREE:
+ xfs_bmbt_to_bmdr(mp, src, len, dfork, dsize);
+ break;
+ case XFS_DINODE_FMT_META_BTREE:
+ switch (be16_to_cpu(dip->di_metatype)) {
+ case XFS_METAFILE_RTRMAP:
+ xfs_rtrmapbt_to_disk(mp, src, len, dfork, dsize);
+ return 0;
+ case XFS_METAFILE_RTREFCOUNT:
+ xfs_rtrefcountbt_to_disk(mp, src, len, dfork, dsize);
+ return 0;
+ default:
+ ASSERT(0);
+ return -EFSCORRUPTED;
+ }
+ break;
+ default:
+ ASSERT(0);
+ return -EFSCORRUPTED;
+ }
+
+ return 0;
+}
+
STATIC int
xlog_recover_inode_commit_pass2(
struct xlog *log,
@@ -393,8 +430,9 @@ xlog_recover_inode_commit_pass2(
if (unlikely(S_ISREG(ldip->di_mode))) {
- if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
- (ldip->di_format != XFS_DINODE_FMT_BTREE)) {
+ if (ldip->di_format != XFS_DINODE_FMT_EXTENTS &&
+ ldip->di_format != XFS_DINODE_FMT_BTREE &&
+ ldip->di_format != XFS_DINODE_FMT_META_BTREE) {
XFS_CORRUPTION_ERROR(
"Bad log dinode data fork format for regular file",
XFS_ERRLEVEL_LOW, mp, ldip, sizeof(*ldip));
@@ -475,9 +513,9 @@ xlog_recover_inode_commit_pass2(
break;
case XFS_ILOG_DBROOT:
- xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
- (struct xfs_bmdr_block *)XFS_DFORK_DPTR(dip),
- XFS_DFORK_DSIZE(dip, mp));
+ error = xlog_recover_inode_dbroot(mp, src, len, dip);
+ if (error)
+ goto out_release;
break;
default:
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index f95103325318..ed85322507dd 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -469,8 +469,21 @@ xfs_fill_fsxattr(
}
}
- if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
- fa->fsx_cowextsize = XFS_FSB_TO_B(mp, ip->i_cowextsize);
+ if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
+ /*
+ * Don't let a misaligned CoW extent size hint on a directory
+ * escape to userspace if it won't pass the setattr checks
+ * later.
+ */
+ if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
+ ip->i_cowextsize % mp->m_sb.sb_rextsize > 0) {
+ fa->fsx_xflags &= ~FS_XFLAG_COWEXTSIZE;
+ fa->fsx_cowextsize = 0;
+ } else {
+ fa->fsx_cowextsize = XFS_FSB_TO_B(mp, ip->i_cowextsize);
+ }
+ }
+
fa->fsx_projid = ip->i_projid;
if (ifp && !xfs_need_iread_extents(ifp))
fa->fsx_nextents = xfs_iext_count(ifp);
@@ -541,10 +554,6 @@ xfs_ioctl_setattr_xflags(
if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 ||
xfs_extlen_to_rtxmod(mp, ip->i_extsize))
return -EINVAL;
-
- /* Clear reflink if we are actually able to set the rt flag. */
- if (xfs_is_reflink_inode(ip))
- ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
}
/* diflags2 only valid for v3 inodes. */
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 50fa3ef89f6c..d61460309a78 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -976,10 +976,8 @@ xfs_dax_write_iomap_end(
if (!xfs_is_cow_inode(ip))
return 0;
- if (!written) {
- xfs_reflink_cancel_cow_range(ip, pos, length, true);
- return 0;
- }
+ if (!written)
+ return xfs_reflink_cancel_cow_range(ip, pos, length, true);
return xfs_reflink_end_cow(ip, pos, written);
}
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 05daad8a8d34..f8851ff835de 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -2744,8 +2744,6 @@ xfs_log_ticket_regrant(
if (!ticket->t_cnt) {
xlog_grant_add_space(&log->l_reserve_head, ticket->t_unit_res);
trace_xfs_log_ticket_regrant_exit(log, ticket);
-
- ticket->t_curr_res = ticket->t_unit_res;
}
xfs_log_ticket_put(ticket);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 0af3d477197b..b3c27dbccce8 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1820,6 +1820,10 @@ static const struct xlog_recover_item_ops *xlog_recover_item_ops[] = {
&xlog_xmd_item_ops,
&xlog_rtefi_item_ops,
&xlog_rtefd_item_ops,
+ &xlog_rtrui_item_ops,
+ &xlog_rtrud_item_ops,
+ &xlog_rtcui_item_ops,
+ &xlog_rtcud_item_ops,
};
static const struct xlog_recover_item_ops *
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 5918f433dba7..477c5262cf91 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -37,6 +37,8 @@
#include "xfs_rtbitmap.h"
#include "xfs_metafile.h"
#include "xfs_rtgroup.h"
+#include "xfs_rtrmap_btree.h"
+#include "xfs_rtrefcount_btree.h"
#include "scrub/stats.h"
static DEFINE_MUTEX(xfs_uuid_table_mutex);
@@ -650,6 +652,15 @@ xfs_agbtree_compute_maxlevels(
mp->m_agbtree_maxlevels = max(levels, mp->m_refc_maxlevels);
}
+/* Compute maximum possible height for realtime btree types for this fs. */
+static inline void
+xfs_rtbtree_compute_maxlevels(
+ struct xfs_mount *mp)
+{
+ mp->m_rtbtree_maxlevels = max(mp->m_rtrmap_maxlevels,
+ mp->m_rtrefc_maxlevels);
+}
+
/*
* This function does the following on an initial mount of a file system:
* - reads the superblock from disk and init the mount struct
@@ -718,9 +729,12 @@ xfs_mountfs(
xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
xfs_mount_setup_inode_geom(mp);
xfs_rmapbt_compute_maxlevels(mp);
+ xfs_rtrmapbt_compute_maxlevels(mp);
xfs_refcountbt_compute_maxlevels(mp);
+ xfs_rtrefcountbt_compute_maxlevels(mp);
xfs_agbtree_compute_maxlevels(mp);
+ xfs_rtbtree_compute_maxlevels(mp);
/*
* Check if sb_agblocks is aligned at stripe boundary. If sb_agblocks
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index db9dade7d22a..fbed172d6770 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -158,13 +158,20 @@ typedef struct xfs_mount {
uint m_bmap_dmnr[2]; /* min bmap btree records */
uint m_rmap_mxr[2]; /* max rmap btree records */
uint m_rmap_mnr[2]; /* min rmap btree records */
+ uint m_rtrmap_mxr[2]; /* max rtrmap btree records */
+ uint m_rtrmap_mnr[2]; /* min rtrmap btree records */
uint m_refc_mxr[2]; /* max refc btree records */
uint m_refc_mnr[2]; /* min refc btree records */
+ uint m_rtrefc_mxr[2]; /* max rtrefc btree records */
+ uint m_rtrefc_mnr[2]; /* min rtrefc btree records */
uint m_alloc_maxlevels; /* max alloc btree levels */
uint m_bm_maxlevels[2]; /* max bmap btree levels */
uint m_rmap_maxlevels; /* max rmap btree levels */
+ uint m_rtrmap_maxlevels; /* max rtrmap btree level */
uint m_refc_maxlevels; /* max refcount btree level */
+ uint m_rtrefc_maxlevels; /* max rtrefc btree level */
unsigned int m_agbtree_maxlevels; /* max level of all AG btrees */
+ unsigned int m_rtbtree_maxlevels; /* max level of all rt btrees */
xfs_extlen_t m_ag_prealloc_blocks; /* reserved ag blocks */
uint m_alloc_set_aside; /* space we can't use */
uint m_ag_max_usable; /* max space per AG */
@@ -350,7 +357,7 @@ typedef struct xfs_mount {
#define XFS_FEAT_NOUUID (1ULL << 63) /* ignore uuid during mount */
#define __XFS_HAS_FEAT(name, NAME) \
-static inline bool xfs_has_ ## name (struct xfs_mount *mp) \
+static inline bool xfs_has_ ## name (const struct xfs_mount *mp) \
{ \
return mp->m_features & XFS_FEAT_ ## NAME; \
}
@@ -386,18 +393,30 @@ __XFS_HAS_FEAT(large_extent_counts, NREXT64)
__XFS_HAS_FEAT(exchange_range, EXCHANGE_RANGE)
__XFS_HAS_FEAT(metadir, METADIR)
-static inline bool xfs_has_rtgroups(struct xfs_mount *mp)
+static inline bool xfs_has_rtgroups(const struct xfs_mount *mp)
{
/* all metadir file systems also allow rtgroups */
return xfs_has_metadir(mp);
}
-static inline bool xfs_has_rtsb(struct xfs_mount *mp)
+static inline bool xfs_has_rtsb(const struct xfs_mount *mp)
{
/* all rtgroups filesystems with an rt section have an rtsb */
return xfs_has_rtgroups(mp) && xfs_has_realtime(mp);
}
+static inline bool xfs_has_rtrmapbt(const struct xfs_mount *mp)
+{
+ return xfs_has_rtgroups(mp) && xfs_has_realtime(mp) &&
+ xfs_has_rmapbt(mp);
+}
+
+static inline bool xfs_has_rtreflink(const struct xfs_mount *mp)
+{
+ return xfs_has_metadir(mp) && xfs_has_realtime(mp) &&
+ xfs_has_reflink(mp);
+}
+
/*
* Some features are always on for v5 file systems, allow the compiler to
* eliminiate dead code when building without v4 support.
diff --git a/fs/xfs/xfs_notify_failure.c b/fs/xfs/xfs_notify_failure.c
index fa50e5308292..ed8d8ed42f0a 100644
--- a/fs/xfs/xfs_notify_failure.c
+++ b/fs/xfs/xfs_notify_failure.c
@@ -19,6 +19,9 @@
#include "xfs_rtalloc.h"
#include "xfs_trans.h"
#include "xfs_ag.h"
+#include "xfs_notify_failure.h"
+#include "xfs_rtgroup.h"
+#include "xfs_rtrmap_btree.h"
#include <linux/mm.h>
#include <linux/dax.h>
@@ -154,23 +157,115 @@ xfs_dax_notify_failure_thaw(
}
static int
-xfs_dax_notify_ddev_failure(
+xfs_dax_translate_range(
+ struct xfs_buftarg *btp,
+ u64 offset,
+ u64 len,
+ xfs_daddr_t *daddr,
+ uint64_t *bblen)
+{
+ u64 dev_start = btp->bt_dax_part_off;
+ u64 dev_len = bdev_nr_bytes(btp->bt_bdev);
+ u64 dev_end = dev_start + dev_len - 1;
+
+ /* Notify failure on the whole device. */
+ if (offset == 0 && len == U64_MAX) {
+ offset = dev_start;
+ len = dev_len;
+ }
+
+ /* Ignore the range out of filesystem area */
+ if (offset + len - 1 < dev_start)
+ return -ENXIO;
+ if (offset > dev_end)
+ return -ENXIO;
+
+ /* Calculate the real range when it touches the boundary */
+ if (offset > dev_start)
+ offset -= dev_start;
+ else {
+ len -= dev_start - offset;
+ offset = 0;
+ }
+ if (offset + len - 1 > dev_end)
+ len = dev_end - offset + 1;
+
+ *daddr = BTOBB(offset);
+ *bblen = BTOBB(len);
+ return 0;
+}
+
+static int
+xfs_dax_notify_logdev_failure(
struct xfs_mount *mp,
- xfs_daddr_t daddr,
- xfs_daddr_t bblen,
+ u64 offset,
+ u64 len,
int mf_flags)
{
+ xfs_daddr_t daddr;
+ uint64_t bblen;
+ int error;
+
+ /*
+ * Return ENXIO instead of shutting down the filesystem if the failed
+ * region is beyond the end of the log.
+ */
+ error = xfs_dax_translate_range(mp->m_logdev_targp,
+ offset, len, &daddr, &bblen);
+ if (error)
+ return error;
+
+ /*
+ * In the pre-remove case the failure notification is attempting to
+ * trigger a force unmount. The expectation is that the device is
+ * still present, but its removal is in progress and can not be
+ * cancelled, proceed with accessing the log device.
+ */
+ if (mf_flags & MF_MEM_PRE_REMOVE)
+ return 0;
+
+ xfs_err(mp, "ondisk log corrupt, shutting down fs!");
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_ONDISK);
+ return -EFSCORRUPTED;
+}
+
+static int
+xfs_dax_notify_dev_failure(
+ struct xfs_mount *mp,
+ u64 offset,
+ u64 len,
+ int mf_flags,
+ enum xfs_group_type type)
+{
struct xfs_failure_info notify = { .mf_flags = mf_flags };
struct xfs_trans *tp = NULL;
struct xfs_btree_cur *cur = NULL;
- struct xfs_buf *agf_bp = NULL;
int error = 0;
bool kernel_frozen = false;
- xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, daddr);
- xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, fsbno);
- xfs_fsblock_t end_fsbno = XFS_DADDR_TO_FSB(mp,
- daddr + bblen - 1);
- xfs_agnumber_t end_agno = XFS_FSB_TO_AGNO(mp, end_fsbno);
+ uint32_t start_gno, end_gno;
+ xfs_fsblock_t start_bno, end_bno;
+ xfs_daddr_t daddr;
+ uint64_t bblen;
+ struct xfs_group *xg = NULL;
+
+ if (!xfs_has_rmapbt(mp)) {
+ xfs_debug(mp, "notify_failure() needs rmapbt enabled!");
+ return -EOPNOTSUPP;
+ }
+
+ error = xfs_dax_translate_range(type == XG_TYPE_RTG ?
+ mp->m_rtdev_targp : mp->m_ddev_targp,
+ offset, len, &daddr, &bblen);
+ if (error)
+ return error;
+
+ if (type == XG_TYPE_RTG) {
+ start_bno = xfs_daddr_to_rtb(mp, daddr);
+ end_bno = xfs_daddr_to_rtb(mp, daddr + bblen - 1);
+ } else {
+ start_bno = XFS_DADDR_TO_FSB(mp, daddr);
+ end_bno = XFS_DADDR_TO_FSB(mp, daddr + bblen - 1);
+ }
if (mf_flags & MF_MEM_PRE_REMOVE) {
xfs_info(mp, "Device is about to be removed!");
@@ -189,46 +284,58 @@ xfs_dax_notify_ddev_failure(
if (error)
goto out;
- for (; agno <= end_agno; agno++) {
+ start_gno = xfs_fsb_to_gno(mp, start_bno, type);
+ end_gno = xfs_fsb_to_gno(mp, end_bno, type);
+ while ((xg = xfs_group_next_range(mp, xg, start_gno, end_gno, type))) {
+ struct xfs_buf *agf_bp = NULL;
+ struct xfs_rtgroup *rtg = NULL;
struct xfs_rmap_irec ri_low = { };
struct xfs_rmap_irec ri_high;
- struct xfs_agf *agf;
- struct xfs_perag *pag;
- xfs_agblock_t range_agend;
- pag = xfs_perag_get(mp, agno);
- error = xfs_alloc_read_agf(pag, tp, 0, &agf_bp);
- if (error) {
- xfs_perag_put(pag);
- break;
- }
+ if (type == XG_TYPE_AG) {
+ struct xfs_perag *pag = to_perag(xg);
- cur = xfs_rmapbt_init_cursor(mp, tp, agf_bp, pag);
+ error = xfs_alloc_read_agf(pag, tp, 0, &agf_bp);
+ if (error) {
+ xfs_perag_put(pag);
+ break;
+ }
+
+ cur = xfs_rmapbt_init_cursor(mp, tp, agf_bp, pag);
+ } else {
+ rtg = to_rtg(xg);
+ xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
+ cur = xfs_rtrmapbt_init_cursor(tp, rtg);
+ }
/*
* Set the rmap range from ri_low to ri_high, which represents
* a [start, end] where we looking for the files or metadata.
*/
memset(&ri_high, 0xFF, sizeof(ri_high));
- ri_low.rm_startblock = XFS_FSB_TO_AGBNO(mp, fsbno);
- if (agno == end_agno)
- ri_high.rm_startblock = XFS_FSB_TO_AGBNO(mp, end_fsbno);
+ if (xg->xg_gno == start_gno)
+ ri_low.rm_startblock =
+ xfs_fsb_to_gbno(mp, start_bno, type);
+ if (xg->xg_gno == end_gno)
+ ri_high.rm_startblock =
+ xfs_fsb_to_gbno(mp, end_bno, type);
- agf = agf_bp->b_addr;
- range_agend = min(be32_to_cpu(agf->agf_length) - 1,
- ri_high.rm_startblock);
notify.startblock = ri_low.rm_startblock;
- notify.blockcount = range_agend + 1 - ri_low.rm_startblock;
+ notify.blockcount = min(xg->xg_block_count,
+ ri_high.rm_startblock + 1) -
+ ri_low.rm_startblock;
error = xfs_rmap_query_range(cur, &ri_low, &ri_high,
xfs_dax_failure_fn, &notify);
xfs_btree_del_cursor(cur, error);
- xfs_trans_brelse(tp, agf_bp);
- xfs_perag_put(pag);
- if (error)
+ if (agf_bp)
+ xfs_trans_brelse(tp, agf_bp);
+ if (rtg)
+ xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP);
+ if (error) {
+ xfs_group_put(xg);
break;
-
- fsbno = XFS_AGB_TO_FSB(mp, agno + 1, 0);
+ }
}
xfs_trans_cancel(tp);
@@ -263,67 +370,20 @@ xfs_dax_notify_failure(
int mf_flags)
{
struct xfs_mount *mp = dax_holder(dax_dev);
- u64 ddev_start;
- u64 ddev_end;
if (!(mp->m_super->s_flags & SB_BORN)) {
xfs_warn(mp, "filesystem is not ready for notify_failure()!");
return -EIO;
}
- if (mp->m_rtdev_targp && mp->m_rtdev_targp->bt_daxdev == dax_dev) {
- xfs_debug(mp,
- "notify_failure() not supported on realtime device!");
- return -EOPNOTSUPP;
- }
-
- if (mp->m_logdev_targp && mp->m_logdev_targp->bt_daxdev == dax_dev &&
- mp->m_logdev_targp != mp->m_ddev_targp) {
- /*
- * In the pre-remove case the failure notification is attempting
- * to trigger a force unmount. The expectation is that the
- * device is still present, but its removal is in progress and
- * can not be cancelled, proceed with accessing the log device.
- */
- if (mf_flags & MF_MEM_PRE_REMOVE)
- return 0;
- xfs_err(mp, "ondisk log corrupt, shutting down fs!");
- xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_ONDISK);
- return -EFSCORRUPTED;
- }
-
- if (!xfs_has_rmapbt(mp)) {
- xfs_debug(mp, "notify_failure() needs rmapbt enabled!");
- return -EOPNOTSUPP;
- }
-
- ddev_start = mp->m_ddev_targp->bt_dax_part_off;
- ddev_end = ddev_start + bdev_nr_bytes(mp->m_ddev_targp->bt_bdev) - 1;
-
- /* Notify failure on the whole device. */
- if (offset == 0 && len == U64_MAX) {
- offset = ddev_start;
- len = bdev_nr_bytes(mp->m_ddev_targp->bt_bdev);
- }
-
- /* Ignore the range out of filesystem area */
- if (offset + len - 1 < ddev_start)
- return -ENXIO;
- if (offset > ddev_end)
- return -ENXIO;
-
- /* Calculate the real range when it touches the boundary */
- if (offset > ddev_start)
- offset -= ddev_start;
- else {
- len -= ddev_start - offset;
- offset = 0;
+ if (mp->m_logdev_targp != mp->m_ddev_targp &&
+ mp->m_logdev_targp->bt_daxdev == dax_dev) {
+ return xfs_dax_notify_logdev_failure(mp, offset, len, mf_flags);
}
- if (offset + len - 1 > ddev_end)
- len = ddev_end - offset + 1;
- return xfs_dax_notify_ddev_failure(mp, BTOBB(offset), BTOBB(len),
- mf_flags);
+ return xfs_dax_notify_dev_failure(mp, offset, len, mf_flags,
+ (mp->m_rtdev_targp && mp->m_rtdev_targp->bt_daxdev == dax_dev) ?
+ XG_TYPE_RTG : XG_TYPE_AG);
}
const struct dax_holder_operations xfs_dax_holder_operations = {
diff --git a/fs/xfs/xfs_notify_failure.h b/fs/xfs/xfs_notify_failure.h
new file mode 100644
index 000000000000..8d08ec29dd29
--- /dev/null
+++ b/fs/xfs/xfs_notify_failure.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2024 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#ifndef __XFS_NOTIFY_FAILURE_H__
+#define __XFS_NOTIFY_FAILURE_H__
+
+extern const struct dax_holder_operations xfs_dax_holder_operations;
+
+#endif /* __XFS_NOTIFY_FAILURE_H__ */
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index dc8b1010d4d3..e1ba5af6250f 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -230,10 +230,10 @@ xfs_qm_unmount_rt(
if (!rtg)
return;
- if (rtg->rtg_inodes[XFS_RTGI_BITMAP])
- xfs_qm_dqdetach(rtg->rtg_inodes[XFS_RTGI_BITMAP]);
- if (rtg->rtg_inodes[XFS_RTGI_SUMMARY])
- xfs_qm_dqdetach(rtg->rtg_inodes[XFS_RTGI_SUMMARY]);
+ if (rtg_bitmap(rtg))
+ xfs_qm_dqdetach(rtg_bitmap(rtg));
+ if (rtg_summary(rtg))
+ xfs_qm_dqdetach(rtg_summary(rtg));
xfs_rtgroup_rele(rtg);
}
@@ -428,6 +428,8 @@ void
xfs_qm_dqdetach(
xfs_inode_t *ip)
{
+ if (xfs_is_metadir_inode(ip))
+ return;
if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
return;
diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c
index 847ba29630e9..37f1230e7584 100644
--- a/fs/xfs/xfs_qm_bhv.c
+++ b/fs/xfs/xfs_qm_bhv.c
@@ -32,21 +32,27 @@ xfs_fill_statvfs_from_dquot(
limit = blkres->softlimit ?
blkres->softlimit :
blkres->hardlimit;
- if (limit && statp->f_blocks > limit) {
- statp->f_blocks = limit;
- statp->f_bfree = statp->f_bavail =
- (statp->f_blocks > blkres->reserved) ?
- (statp->f_blocks - blkres->reserved) : 0;
+ if (limit) {
+ uint64_t remaining = 0;
+
+ if (limit > blkres->reserved)
+ remaining = limit - blkres->reserved;
+
+ statp->f_blocks = min(statp->f_blocks, limit);
+ statp->f_bfree = min(statp->f_bfree, remaining);
}
limit = dqp->q_ino.softlimit ?
dqp->q_ino.softlimit :
dqp->q_ino.hardlimit;
- if (limit && statp->f_files > limit) {
- statp->f_files = limit;
- statp->f_ffree =
- (statp->f_files > dqp->q_ino.reserved) ?
- (statp->f_files - dqp->q_ino.reserved) : 0;
+ if (limit) {
+ uint64_t remaining = 0;
+
+ if (limit > dqp->q_ino.reserved)
+ remaining = limit - dqp->q_ino.reserved;
+
+ statp->f_files = min(statp->f_files, limit);
+ statp->f_ffree = min(statp->f_ffree, remaining);
}
}
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index d7565462af3d..105e6eb57620 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -29,11 +29,6 @@ struct xfs_buf;
(XFS_IS_GQUOTA_ON(mp) && (ip)->i_gdquot == NULL) || \
(XFS_IS_PQUOTA_ON(mp) && (ip)->i_pdquot == NULL))
-#define XFS_IS_DQDETACHED(ip) \
- ((ip)->i_udquot == NULL && \
- (ip)->i_gdquot == NULL && \
- (ip)->i_pdquot == NULL)
-
#define XFS_QM_NEED_QUOTACHECK(mp) \
((XFS_IS_UQUOTA_ON(mp) && \
(mp->m_sb.sb_qflags & XFS_UQUOTA_CHKD) == 0) || \
diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c
index bede1c96c330..fe2d7aab8554 100644
--- a/fs/xfs/xfs_refcount_item.c
+++ b/fs/xfs/xfs_refcount_item.c
@@ -23,6 +23,7 @@
#include "xfs_ag.h"
#include "xfs_btree.h"
#include "xfs_trace.h"
+#include "xfs_rtgroup.h"
struct kmem_cache *xfs_cui_cache;
struct kmem_cache *xfs_cud_cache;
@@ -94,8 +95,9 @@ xfs_cui_item_format(
ASSERT(atomic_read(&cuip->cui_next_extent) ==
cuip->cui_format.cui_nextents);
+ ASSERT(lip->li_type == XFS_LI_CUI || lip->li_type == XFS_LI_CUI_RT);
- cuip->cui_format.cui_type = XFS_LI_CUI;
+ cuip->cui_format.cui_type = lip->li_type;
cuip->cui_format.cui_size = 1;
xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUI_FORMAT, &cuip->cui_format,
@@ -138,12 +140,14 @@ xfs_cui_item_release(
STATIC struct xfs_cui_log_item *
xfs_cui_init(
struct xfs_mount *mp,
+ unsigned short item_type,
uint nextents)
-
{
struct xfs_cui_log_item *cuip;
ASSERT(nextents > 0);
+ ASSERT(item_type == XFS_LI_CUI || item_type == XFS_LI_CUI_RT);
+
if (nextents > XFS_CUI_MAX_FAST_EXTENTS)
cuip = kzalloc(xfs_cui_log_item_sizeof(nextents),
GFP_KERNEL | __GFP_NOFAIL);
@@ -151,7 +155,7 @@ xfs_cui_init(
cuip = kmem_cache_zalloc(xfs_cui_cache,
GFP_KERNEL | __GFP_NOFAIL);
- xfs_log_item_init(mp, &cuip->cui_item, XFS_LI_CUI, &xfs_cui_item_ops);
+ xfs_log_item_init(mp, &cuip->cui_item, item_type, &xfs_cui_item_ops);
cuip->cui_format.cui_nextents = nextents;
cuip->cui_format.cui_id = (uintptr_t)(void *)cuip;
atomic_set(&cuip->cui_next_extent, 0);
@@ -190,7 +194,9 @@ xfs_cud_item_format(
struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
struct xfs_log_iovec *vecp = NULL;
- cudp->cud_format.cud_type = XFS_LI_CUD;
+ ASSERT(lip->li_type == XFS_LI_CUD || lip->li_type == XFS_LI_CUD_RT);
+
+ cudp->cud_format.cud_type = lip->li_type;
cudp->cud_format.cud_size = 1;
xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUD_FORMAT, &cudp->cud_format,
@@ -234,6 +240,14 @@ static inline struct xfs_refcount_intent *ci_entry(const struct list_head *e)
return list_entry(e, struct xfs_refcount_intent, ri_list);
}
+static inline bool
+xfs_cui_item_isrt(const struct xfs_log_item *lip)
+{
+ ASSERT(lip->li_type == XFS_LI_CUI || lip->li_type == XFS_LI_CUI_RT);
+
+ return lip->li_type == XFS_LI_CUI_RT;
+}
+
/* Sort refcount intents by AG. */
static int
xfs_refcount_update_diff_items(
@@ -282,18 +296,20 @@ xfs_refcount_update_log_item(
}
static struct xfs_log_item *
-xfs_refcount_update_create_intent(
+__xfs_refcount_update_create_intent(
struct xfs_trans *tp,
struct list_head *items,
unsigned int count,
- bool sort)
+ bool sort,
+ unsigned short item_type)
{
struct xfs_mount *mp = tp->t_mountp;
- struct xfs_cui_log_item *cuip = xfs_cui_init(mp, count);
+ struct xfs_cui_log_item *cuip;
struct xfs_refcount_intent *ri;
ASSERT(count > 0);
+ cuip = xfs_cui_init(mp, item_type, count);
if (sort)
list_sort(mp, items, xfs_refcount_update_diff_items);
list_for_each_entry(ri, items, ri_list)
@@ -301,6 +317,23 @@ xfs_refcount_update_create_intent(
return &cuip->cui_item;
}
+static struct xfs_log_item *
+xfs_refcount_update_create_intent(
+ struct xfs_trans *tp,
+ struct list_head *items,
+ unsigned int count,
+ bool sort)
+{
+ return __xfs_refcount_update_create_intent(tp, items, count, sort,
+ XFS_LI_CUI);
+}
+
+static inline unsigned short
+xfs_cud_type_from_cui(const struct xfs_cui_log_item *cuip)
+{
+ return xfs_cui_item_isrt(&cuip->cui_item) ? XFS_LI_CUD_RT : XFS_LI_CUD;
+}
+
/* Get an CUD so we can process all the deferred refcount updates. */
static struct xfs_log_item *
xfs_refcount_update_create_done(
@@ -312,8 +345,8 @@ xfs_refcount_update_create_done(
struct xfs_cud_log_item *cudp;
cudp = kmem_cache_zalloc(xfs_cud_cache, GFP_KERNEL | __GFP_NOFAIL);
- xfs_log_item_init(tp->t_mountp, &cudp->cud_item, XFS_LI_CUD,
- &xfs_cud_item_ops);
+ xfs_log_item_init(tp->t_mountp, &cudp->cud_item,
+ xfs_cud_type_from_cui(cuip), &xfs_cud_item_ops);
cudp->cud_cuip = cuip;
cudp->cud_format.cud_cui_id = cuip->cui_format.cui_id;
@@ -328,10 +361,20 @@ xfs_refcount_defer_add(
{
struct xfs_mount *mp = tp->t_mountp;
- trace_xfs_refcount_defer(mp, ri);
+ /*
+ * Deferred refcount updates for the realtime and data sections must
+ * use separate transactions to finish deferred work because updates to
+ * realtime metadata files can lock AGFs to allocate btree blocks and
+ * we don't want that mixing with the AGF locks taken to finish data
+ * section updates.
+ */
+ ri->ri_group = xfs_group_intent_get(mp, ri->ri_startblock,
+ ri->ri_realtime ? XG_TYPE_RTG : XG_TYPE_AG);
- ri->ri_group = xfs_group_intent_get(mp, ri->ri_startblock, XG_TYPE_AG);
- xfs_defer_add(tp, &ri->ri_list, &xfs_refcount_update_defer_type);
+ trace_xfs_refcount_defer(mp, ri);
+ xfs_defer_add(tp, &ri->ri_list, ri->ri_realtime ?
+ &xfs_rtrefcount_update_defer_type :
+ &xfs_refcount_update_defer_type);
}
/* Cancel a deferred refcount update. */
@@ -381,7 +424,7 @@ xfs_refcount_finish_one_cleanup(
return;
agbp = rcur->bc_ag.agbp;
xfs_btree_del_cursor(rcur, error);
- if (error)
+ if (error && agbp)
xfs_trans_brelse(tp, agbp);
}
@@ -397,6 +440,7 @@ xfs_refcount_update_abort_intent(
static inline bool
xfs_cui_validate_phys(
struct xfs_mount *mp,
+ bool isrt,
struct xfs_phys_extent *pmap)
{
if (!xfs_has_reflink(mp))
@@ -415,6 +459,9 @@ xfs_cui_validate_phys(
return false;
}
+ if (isrt)
+ return xfs_verify_rtbext(mp, pmap->pe_startblock, pmap->pe_len);
+
return xfs_verify_fsbext(mp, pmap->pe_startblock, pmap->pe_len);
}
@@ -422,6 +469,7 @@ static inline void
xfs_cui_recover_work(
struct xfs_mount *mp,
struct xfs_defer_pending *dfp,
+ bool isrt,
struct xfs_phys_extent *pmap)
{
struct xfs_refcount_intent *ri;
@@ -432,7 +480,8 @@ xfs_cui_recover_work(
ri->ri_startblock = pmap->pe_startblock;
ri->ri_blockcount = pmap->pe_len;
ri->ri_group = xfs_group_intent_get(mp, pmap->pe_startblock,
- XG_TYPE_AG);
+ isrt ? XG_TYPE_RTG : XG_TYPE_AG);
+ ri->ri_realtime = isrt;
xfs_defer_add_item(dfp, &ri->ri_list);
}
@@ -451,6 +500,7 @@ xfs_refcount_recover_work(
struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
struct xfs_trans *tp;
struct xfs_mount *mp = lip->li_log->l_mp;
+ bool isrt = xfs_cui_item_isrt(lip);
int i;
int error = 0;
@@ -460,7 +510,7 @@ xfs_refcount_recover_work(
* just toss the CUI.
*/
for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
- if (!xfs_cui_validate_phys(mp,
+ if (!xfs_cui_validate_phys(mp, isrt,
&cuip->cui_format.cui_extents[i])) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
&cuip->cui_format,
@@ -468,7 +518,8 @@ xfs_refcount_recover_work(
return -EFSCORRUPTED;
}
- xfs_cui_recover_work(mp, dfp, &cuip->cui_format.cui_extents[i]);
+ xfs_cui_recover_work(mp, dfp, isrt,
+ &cuip->cui_format.cui_extents[i]);
}
/*
@@ -515,10 +566,13 @@ xfs_refcount_relog_intent(
struct xfs_phys_extent *pmap;
unsigned int count;
+ ASSERT(intent->li_type == XFS_LI_CUI ||
+ intent->li_type == XFS_LI_CUI_RT);
+
count = CUI_ITEM(intent)->cui_format.cui_nextents;
pmap = CUI_ITEM(intent)->cui_format.cui_extents;
- cuip = xfs_cui_init(tp->t_mountp, count);
+ cuip = xfs_cui_init(tp->t_mountp, intent->li_type, count);
memcpy(cuip->cui_format.cui_extents, pmap, count * sizeof(*pmap));
atomic_set(&cuip->cui_next_extent, count);
@@ -538,6 +592,71 @@ const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
.relog_intent = xfs_refcount_relog_intent,
};
+#ifdef CONFIG_XFS_RT
+static struct xfs_log_item *
+xfs_rtrefcount_update_create_intent(
+ struct xfs_trans *tp,
+ struct list_head *items,
+ unsigned int count,
+ bool sort)
+{
+ return __xfs_refcount_update_create_intent(tp, items, count, sort,
+ XFS_LI_CUI_RT);
+}
+
+/* Process a deferred realtime refcount update. */
+STATIC int
+xfs_rtrefcount_update_finish_item(
+ struct xfs_trans *tp,
+ struct xfs_log_item *done,
+ struct list_head *item,
+ struct xfs_btree_cur **state)
+{
+ struct xfs_refcount_intent *ri = ci_entry(item);
+ int error;
+
+ error = xfs_rtrefcount_finish_one(tp, ri, state);
+
+ /* Did we run out of reservation? Requeue what we didn't finish. */
+ if (!error && ri->ri_blockcount > 0) {
+ ASSERT(ri->ri_type == XFS_REFCOUNT_INCREASE ||
+ ri->ri_type == XFS_REFCOUNT_DECREASE);
+ return -EAGAIN;
+ }
+
+ xfs_refcount_update_cancel_item(item);
+ return error;
+}
+
+/* Clean up after calling xfs_rtrefcount_finish_one. */
+STATIC void
+xfs_rtrefcount_finish_one_cleanup(
+ struct xfs_trans *tp,
+ struct xfs_btree_cur *rcur,
+ int error)
+{
+ if (rcur)
+ xfs_btree_del_cursor(rcur, error);
+}
+
+const struct xfs_defer_op_type xfs_rtrefcount_update_defer_type = {
+ .name = "rtrefcount",
+ .max_items = XFS_CUI_MAX_FAST_EXTENTS,
+ .create_intent = xfs_rtrefcount_update_create_intent,
+ .abort_intent = xfs_refcount_update_abort_intent,
+ .create_done = xfs_refcount_update_create_done,
+ .finish_item = xfs_rtrefcount_update_finish_item,
+ .finish_cleanup = xfs_rtrefcount_finish_one_cleanup,
+ .cancel_item = xfs_refcount_update_cancel_item,
+ .recover_work = xfs_refcount_recover_work,
+ .relog_intent = xfs_refcount_relog_intent,
+};
+#else
+const struct xfs_defer_op_type xfs_rtrefcount_update_defer_type = {
+ .name = "rtrefcount",
+};
+#endif /* CONFIG_XFS_RT */
+
STATIC bool
xfs_cui_item_match(
struct xfs_log_item *lip,
@@ -603,7 +722,7 @@ xlog_recover_cui_commit_pass2(
return -EFSCORRUPTED;
}
- cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
+ cuip = xfs_cui_init(mp, ITEM_TYPE(item), cui_formatp->cui_nextents);
xfs_cui_copy_format(&cuip->cui_format, cui_formatp);
atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
@@ -617,6 +736,61 @@ const struct xlog_recover_item_ops xlog_cui_item_ops = {
.commit_pass2 = xlog_recover_cui_commit_pass2,
};
+#ifdef CONFIG_XFS_RT
+STATIC int
+xlog_recover_rtcui_commit_pass2(
+ struct xlog *log,
+ struct list_head *buffer_list,
+ struct xlog_recover_item *item,
+ xfs_lsn_t lsn)
+{
+ struct xfs_mount *mp = log->l_mp;
+ struct xfs_cui_log_item *cuip;
+ struct xfs_cui_log_format *cui_formatp;
+ size_t len;
+
+ cui_formatp = item->ri_buf[0].i_addr;
+
+ if (item->ri_buf[0].i_len < xfs_cui_log_format_sizeof(0)) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
+ item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
+ return -EFSCORRUPTED;
+ }
+
+ len = xfs_cui_log_format_sizeof(cui_formatp->cui_nextents);
+ if (item->ri_buf[0].i_len != len) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
+ item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
+ return -EFSCORRUPTED;
+ }
+
+ cuip = xfs_cui_init(mp, ITEM_TYPE(item), cui_formatp->cui_nextents);
+ xfs_cui_copy_format(&cuip->cui_format, cui_formatp);
+ atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
+
+ xlog_recover_intent_item(log, &cuip->cui_item, lsn,
+ &xfs_rtrefcount_update_defer_type);
+ return 0;
+}
+#else
+STATIC int
+xlog_recover_rtcui_commit_pass2(
+ struct xlog *log,
+ struct list_head *buffer_list,
+ struct xlog_recover_item *item,
+ xfs_lsn_t lsn)
+{
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
+ item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
+ return -EFSCORRUPTED;
+}
+#endif
+
+const struct xlog_recover_item_ops xlog_rtcui_item_ops = {
+ .item_type = XFS_LI_CUI_RT,
+ .commit_pass2 = xlog_recover_rtcui_commit_pass2,
+};
+
/*
* This routine is called when an CUD format structure is found in a committed
* transaction in the log. Its purpose is to cancel the corresponding CUI if it
@@ -648,3 +822,33 @@ const struct xlog_recover_item_ops xlog_cud_item_ops = {
.item_type = XFS_LI_CUD,
.commit_pass2 = xlog_recover_cud_commit_pass2,
};
+
+#ifdef CONFIG_XFS_RT
+STATIC int
+xlog_recover_rtcud_commit_pass2(
+ struct xlog *log,
+ struct list_head *buffer_list,
+ struct xlog_recover_item *item,
+ xfs_lsn_t lsn)
+{
+ struct xfs_cud_log_format *cud_formatp;
+
+ cud_formatp = item->ri_buf[0].i_addr;
+ if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
+ item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
+ return -EFSCORRUPTED;
+ }
+
+ xlog_recover_release_intent(log, XFS_LI_CUI_RT,
+ cud_formatp->cud_cui_id);
+ return 0;
+}
+#else
+# define xlog_recover_rtcud_commit_pass2 xlog_recover_rtcui_commit_pass2
+#endif
+
+const struct xlog_recover_item_ops xlog_rtcud_item_ops = {
+ .item_type = XFS_LI_CUD_RT,
+ .commit_pass2 = xlog_recover_rtcud_commit_pass2,
+};
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index b11769c009ef..59f7fc16eb80 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -30,6 +30,10 @@
#include "xfs_ag.h"
#include "xfs_ag_resv.h"
#include "xfs_health.h"
+#include "xfs_rtrefcount_btree.h"
+#include "xfs_rtalloc.h"
+#include "xfs_rtgroup.h"
+#include "xfs_metafile.h"
/*
* Copy on Write of Shared Blocks
@@ -120,38 +124,93 @@
*/
/*
- * Given an AG extent, find the lowest-numbered run of shared blocks
- * within that range and return the range in fbno/flen. If
- * find_end_of_shared is true, return the longest contiguous extent of
- * shared blocks. If there are no shared extents, fbno and flen will
- * be set to NULLAGBLOCK and 0, respectively.
+ * Given a file mapping for the data device, find the lowest-numbered run of
+ * shared blocks within that mapping and return it in shared_offset/shared_len.
+ * The offset is relative to the start of irec.
+ *
+ * If find_end_of_shared is true, return the longest contiguous extent of shared
+ * blocks. If there are no shared extents, shared_offset and shared_len will be
+ * set to 0;
*/
static int
xfs_reflink_find_shared(
- struct xfs_perag *pag,
+ struct xfs_mount *mp,
struct xfs_trans *tp,
- xfs_agblock_t agbno,
- xfs_extlen_t aglen,
- xfs_agblock_t *fbno,
- xfs_extlen_t *flen,
+ const struct xfs_bmbt_irec *irec,
+ xfs_extlen_t *shared_offset,
+ xfs_extlen_t *shared_len,
bool find_end_of_shared)
{
struct xfs_buf *agbp;
+ struct xfs_perag *pag;
struct xfs_btree_cur *cur;
int error;
+ xfs_agblock_t orig_bno, found_bno;
+
+ pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, irec->br_startblock));
+ orig_bno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
error = xfs_alloc_read_agf(pag, tp, 0, &agbp);
if (error)
- return error;
+ goto out;
- cur = xfs_refcountbt_init_cursor(pag_mount(pag), tp, agbp, pag);
+ cur = xfs_refcountbt_init_cursor(mp, tp, agbp, pag);
+ error = xfs_refcount_find_shared(cur, orig_bno, irec->br_blockcount,
+ &found_bno, shared_len, find_end_of_shared);
+ xfs_btree_del_cursor(cur, error);
+ xfs_trans_brelse(tp, agbp);
- error = xfs_refcount_find_shared(cur, agbno, aglen, fbno, flen,
- find_end_of_shared);
+ if (!error && *shared_len)
+ *shared_offset = found_bno - orig_bno;
+out:
+ xfs_perag_put(pag);
+ return error;
+}
+/*
+ * Given a file mapping for the rt device, find the lowest-numbered run of
+ * shared blocks within that mapping and return it in shared_offset/shared_len.
+ * The offset is relative to the start of irec.
+ *
+ * If find_end_of_shared is true, return the longest contiguous extent of shared
+ * blocks. If there are no shared extents, shared_offset and shared_len will be
+ * set to 0;
+ */
+static int
+xfs_reflink_find_rtshared(
+ struct xfs_mount *mp,
+ struct xfs_trans *tp,
+ const struct xfs_bmbt_irec *irec,
+ xfs_extlen_t *shared_offset,
+ xfs_extlen_t *shared_len,
+ bool find_end_of_shared)
+{
+ struct xfs_rtgroup *rtg;
+ struct xfs_btree_cur *cur;
+ xfs_rgblock_t orig_bno;
+ xfs_agblock_t found_bno;
+ int error;
+
+ BUILD_BUG_ON(NULLRGBLOCK != NULLAGBLOCK);
+
+ /*
+ * Note: this uses the not quite correct xfs_agblock_t type because
+ * xfs_refcount_find_shared is shared between the RT and data device
+ * refcount code.
+ */
+ orig_bno = xfs_rtb_to_rgbno(mp, irec->br_startblock);
+ rtg = xfs_rtgroup_get(mp, xfs_rtb_to_rgno(mp, irec->br_startblock));
+
+ xfs_rtgroup_lock(rtg, XFS_RTGLOCK_REFCOUNT);
+ cur = xfs_rtrefcountbt_init_cursor(tp, rtg);
+ error = xfs_refcount_find_shared(cur, orig_bno, irec->br_blockcount,
+ &found_bno, shared_len, find_end_of_shared);
xfs_btree_del_cursor(cur, error);
+ xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_REFCOUNT);
+ xfs_rtgroup_put(rtg);
- xfs_trans_brelse(tp, agbp);
+ if (!error && *shared_len)
+ *shared_offset = found_bno - orig_bno;
return error;
}
@@ -172,11 +231,7 @@ xfs_reflink_trim_around_shared(
bool *shared)
{
struct xfs_mount *mp = ip->i_mount;
- struct xfs_perag *pag;
- xfs_agblock_t agbno;
- xfs_extlen_t aglen;
- xfs_agblock_t fbno;
- xfs_extlen_t flen;
+ xfs_extlen_t shared_offset, shared_len;
int error = 0;
/* Holes, unwritten, and delalloc extents cannot be shared */
@@ -187,41 +242,37 @@ xfs_reflink_trim_around_shared(
trace_xfs_reflink_trim_around_shared(ip, irec);
- pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, irec->br_startblock));
- agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
- aglen = irec->br_blockcount;
-
- error = xfs_reflink_find_shared(pag, NULL, agbno, aglen, &fbno, &flen,
- true);
- xfs_perag_put(pag);
+ if (XFS_IS_REALTIME_INODE(ip))
+ error = xfs_reflink_find_rtshared(mp, NULL, irec,
+ &shared_offset, &shared_len, true);
+ else
+ error = xfs_reflink_find_shared(mp, NULL, irec,
+ &shared_offset, &shared_len, true);
if (error)
return error;
- *shared = false;
- if (fbno == NULLAGBLOCK) {
+ if (!shared_len) {
/* No shared blocks at all. */
- return 0;
- }
-
- if (fbno == agbno) {
+ *shared = false;
+ } else if (!shared_offset) {
/*
- * The start of this extent is shared. Truncate the
- * mapping at the end of the shared region so that a
- * subsequent iteration starts at the start of the
- * unshared region.
+ * The start of this mapping points to shared space. Truncate
+ * the mapping at the end of the shared region so that a
+ * subsequent iteration starts at the start of the unshared
+ * region.
*/
- irec->br_blockcount = flen;
+ irec->br_blockcount = shared_len;
*shared = true;
- return 0;
+ } else {
+ /*
+ * There's a shared region that doesn't start at the beginning
+ * of the mapping. Truncate the mapping at the start of the
+ * shared extent so that a subsequent iteration starts at the
+ * start of the shared region.
+ */
+ irec->br_blockcount = shared_offset;
+ *shared = false;
}
-
- /*
- * There's a shared extent midway through this extent.
- * Truncate the mapping at the start of the shared
- * extent so that a subsequent iteration starts at the
- * start of the shared region.
- */
- irec->br_blockcount = fbno - agbno;
return 0;
}
@@ -389,20 +440,26 @@ xfs_reflink_fill_cow_hole(
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
xfs_filblks_t resaligned;
- xfs_extlen_t resblks;
+ unsigned int dblocks = 0, rblocks = 0;
int nimaps;
int error;
bool found;
resaligned = xfs_aligned_fsb_count(imap->br_startoff,
imap->br_blockcount, xfs_get_cowextsz_hint(ip));
- resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
+ if (XFS_IS_REALTIME_INODE(ip)) {
+ dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
+ rblocks = resaligned;
+ } else {
+ dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
+ rblocks = 0;
+ }
xfs_iunlock(ip, *lockmode);
*lockmode = 0;
- error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, 0,
- false, &tp);
+ error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, dblocks,
+ rblocks, false, &tp);
if (error)
return error;
@@ -571,6 +628,7 @@ xfs_reflink_cancel_cow_blocks(
struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
struct xfs_bmbt_irec got, del;
struct xfs_iext_cursor icur;
+ bool isrt = XFS_IS_REALTIME_INODE(ip);
int error = 0;
if (!xfs_inode_has_cow_data(ip))
@@ -598,12 +656,13 @@ xfs_reflink_cancel_cow_blocks(
ASSERT((*tpp)->t_highest_agno == NULLAGNUMBER);
/* Free the CoW orphan record. */
- xfs_refcount_free_cow_extent(*tpp, del.br_startblock,
- del.br_blockcount);
+ xfs_refcount_free_cow_extent(*tpp, isrt,
+ del.br_startblock, del.br_blockcount);
error = xfs_free_extent_later(*tpp, del.br_startblock,
del.br_blockcount, NULL,
- XFS_AG_RESV_NONE, 0);
+ XFS_AG_RESV_NONE,
+ isrt ? XFS_FREE_EXTENT_REALTIME : 0);
if (error)
break;
@@ -687,6 +746,35 @@ out:
return error;
}
+#ifdef CONFIG_XFS_QUOTA
+/*
+ * Update quota accounting for a remapping operation. When we're remapping
+ * something from the CoW fork to the data fork, we must update the quota
+ * accounting for delayed allocations. For remapping from the data fork to the
+ * data fork, use regular block accounting.
+ */
+static inline void
+xfs_reflink_update_quota(
+ struct xfs_trans *tp,
+ struct xfs_inode *ip,
+ bool is_cow,
+ int64_t blocks)
+{
+ unsigned int qflag;
+
+ if (XFS_IS_REALTIME_INODE(ip)) {
+ qflag = is_cow ? XFS_TRANS_DQ_DELRTBCOUNT :
+ XFS_TRANS_DQ_RTBCOUNT;
+ } else {
+ qflag = is_cow ? XFS_TRANS_DQ_DELBCOUNT :
+ XFS_TRANS_DQ_BCOUNT;
+ }
+ xfs_trans_mod_dquot_byino(tp, ip, qflag, blocks);
+}
+#else
+# define xfs_reflink_update_quota(tp, ip, is_cow, blocks) ((void)0)
+#endif
+
/*
* Remap part of the CoW fork into the data fork.
*
@@ -710,6 +798,7 @@ xfs_reflink_end_cow_extent(
struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
unsigned int resblks;
int nmaps;
+ bool isrt = XFS_IS_REALTIME_INODE(ip);
int error;
resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
@@ -779,9 +868,8 @@ xfs_reflink_end_cow_extent(
* or not), unmap the extent and drop its refcount.
*/
xfs_bmap_unmap_extent(tp, ip, XFS_DATA_FORK, &data);
- xfs_refcount_decrease_extent(tp, &data);
- xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT,
- -data.br_blockcount);
+ xfs_refcount_decrease_extent(tp, isrt, &data);
+ xfs_reflink_update_quota(tp, ip, false, -data.br_blockcount);
} else if (data.br_startblock == DELAYSTARTBLOCK) {
int done;
@@ -799,14 +887,14 @@ xfs_reflink_end_cow_extent(
}
/* Free the CoW orphan record. */
- xfs_refcount_free_cow_extent(tp, del.br_startblock, del.br_blockcount);
+ xfs_refcount_free_cow_extent(tp, isrt, del.br_startblock,
+ del.br_blockcount);
/* Map the new blocks into the data fork. */
xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, &del);
/* Charge this new data fork mapping to the on-disk quota. */
- xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_DELBCOUNT,
- (long)del.br_blockcount);
+ xfs_reflink_update_quota(tp, ip, true, del.br_blockcount);
/* Remove the mapping from the CoW fork. */
xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
@@ -895,20 +983,29 @@ xfs_reflink_recover_cow(
struct xfs_mount *mp)
{
struct xfs_perag *pag = NULL;
+ struct xfs_rtgroup *rtg = NULL;
int error = 0;
if (!xfs_has_reflink(mp))
return 0;
while ((pag = xfs_perag_next(mp, pag))) {
- error = xfs_refcount_recover_cow_leftovers(mp, pag);
+ error = xfs_refcount_recover_cow_leftovers(pag_group(pag));
if (error) {
xfs_perag_rele(pag);
- break;
+ return error;
}
}
- return error;
+ while ((rtg = xfs_rtgroup_next(mp, rtg))) {
+ error = xfs_refcount_recover_cow_leftovers(rtg_group(rtg));
+ if (error) {
+ xfs_rtgroup_rele(rtg);
+ return error;
+ }
+ }
+
+ return 0;
}
/*
@@ -1100,14 +1197,28 @@ out_error:
static int
xfs_reflink_ag_has_free_space(
struct xfs_mount *mp,
- xfs_agnumber_t agno)
+ struct xfs_inode *ip,
+ xfs_fsblock_t fsb)
{
struct xfs_perag *pag;
+ xfs_agnumber_t agno;
int error = 0;
if (!xfs_has_rmapbt(mp))
return 0;
+ if (XFS_IS_REALTIME_INODE(ip)) {
+ struct xfs_rtgroup *rtg;
+ xfs_rgnumber_t rgno;
+
+ rgno = xfs_rtb_to_rgno(mp, fsb);
+ rtg = xfs_rtgroup_get(mp, rgno);
+ if (xfs_metafile_resv_critical(rtg_rmap(rtg)))
+ error = -ENOSPC;
+ xfs_rtgroup_put(rtg);
+ return error;
+ }
+ agno = XFS_FSB_TO_AGNO(mp, fsb);
pag = xfs_perag_get(mp, agno);
if (xfs_ag_resv_critical(pag, XFS_AG_RESV_RMAPBT) ||
xfs_ag_resv_critical(pag, XFS_AG_RESV_METADATA))
@@ -1131,10 +1242,11 @@ xfs_reflink_remap_extent(
struct xfs_trans *tp;
xfs_off_t newlen;
int64_t qdelta = 0;
- unsigned int resblks;
+ unsigned int dblocks, rblocks, resblks;
bool quota_reserved = true;
bool smap_real;
bool dmap_written = xfs_bmap_is_written_extent(dmap);
+ bool isrt = XFS_IS_REALTIME_INODE(ip);
int iext_delta = 0;
int nimaps;
int error;
@@ -1161,8 +1273,15 @@ xfs_reflink_remap_extent(
* we're remapping.
*/
resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
+ if (XFS_IS_REALTIME_INODE(ip)) {
+ dblocks = resblks;
+ rblocks = dmap->br_blockcount;
+ } else {
+ dblocks = resblks + dmap->br_blockcount;
+ rblocks = 0;
+ }
error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
- resblks + dmap->br_blockcount, 0, false, &tp);
+ dblocks, rblocks, false, &tp);
if (error == -EDQUOT || error == -ENOSPC) {
quota_reserved = false;
error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
@@ -1213,8 +1332,8 @@ xfs_reflink_remap_extent(
/* No reflinking if the AG of the dest mapping is low on space. */
if (dmap_written) {
- error = xfs_reflink_ag_has_free_space(mp,
- XFS_FSB_TO_AGNO(mp, dmap->br_startblock));
+ error = xfs_reflink_ag_has_free_space(mp, ip,
+ dmap->br_startblock);
if (error)
goto out_cancel;
}
@@ -1242,8 +1361,15 @@ xfs_reflink_remap_extent(
* done.
*/
if (!quota_reserved && !smap_real && dmap_written) {
- error = xfs_trans_reserve_quota_nblks(tp, ip,
- dmap->br_blockcount, 0, false);
+ if (XFS_IS_REALTIME_INODE(ip)) {
+ dblocks = 0;
+ rblocks = dmap->br_blockcount;
+ } else {
+ dblocks = dmap->br_blockcount;
+ rblocks = 0;
+ }
+ error = xfs_trans_reserve_quota_nblks(tp, ip, dblocks, rblocks,
+ false);
if (error)
goto out_cancel;
}
@@ -1264,7 +1390,7 @@ xfs_reflink_remap_extent(
* or not), unmap the extent and drop its refcount.
*/
xfs_bmap_unmap_extent(tp, ip, XFS_DATA_FORK, &smap);
- xfs_refcount_decrease_extent(tp, &smap);
+ xfs_refcount_decrease_extent(tp, isrt, &smap);
qdelta -= smap.br_blockcount;
} else if (smap.br_startblock == DELAYSTARTBLOCK) {
int done;
@@ -1287,12 +1413,12 @@ xfs_reflink_remap_extent(
* its refcount and map it into the file.
*/
if (dmap_written) {
- xfs_refcount_increase_extent(tp, dmap);
+ xfs_refcount_increase_extent(tp, isrt, dmap);
xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, dmap);
qdelta += dmap->br_blockcount;
}
- xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, qdelta);
+ xfs_reflink_update_quota(tp, ip, false, qdelta);
/* Update dest isize if needed. */
newlen = XFS_FSB_TO_B(mp, dmap->br_startoff + dmap->br_blockcount);
@@ -1466,8 +1592,8 @@ xfs_reflink_remap_prep(
/* Check file eligibility and prepare for block sharing. */
ret = -EINVAL;
- /* Don't reflink realtime inodes */
- if (XFS_IS_REALTIME_INODE(src) || XFS_IS_REALTIME_INODE(dest))
+ /* Can't reflink between data and rt volumes */
+ if (XFS_IS_REALTIME_INODE(src) != XFS_IS_REALTIME_INODE(dest))
goto out_unlock;
/* Don't share DAX file data with non-DAX file. */
@@ -1547,27 +1673,23 @@ xfs_reflink_inode_has_shared_extents(
*has_shared = false;
found = xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got);
while (found) {
- struct xfs_perag *pag;
- xfs_agblock_t agbno;
- xfs_extlen_t aglen;
- xfs_agblock_t rbno;
- xfs_extlen_t rlen;
+ xfs_extlen_t shared_offset, shared_len;
if (isnullstartblock(got.br_startblock) ||
got.br_state != XFS_EXT_NORM)
goto next;
- pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, got.br_startblock));
- agbno = XFS_FSB_TO_AGBNO(mp, got.br_startblock);
- aglen = got.br_blockcount;
- error = xfs_reflink_find_shared(pag, tp, agbno, aglen,
- &rbno, &rlen, false);
- xfs_perag_put(pag);
+ if (XFS_IS_REALTIME_INODE(ip))
+ error = xfs_reflink_find_rtshared(mp, tp, &got,
+ &shared_offset, &shared_len, false);
+ else
+ error = xfs_reflink_find_shared(mp, tp, &got,
+ &shared_offset, &shared_len, false);
if (error)
return error;
/* Is there still a shared block here? */
- if (rbno != NULLAGBLOCK) {
+ if (shared_len) {
*has_shared = true;
return 0;
}
@@ -1700,3 +1822,28 @@ out:
trace_xfs_reflink_unshare_error(ip, error, _RET_IP_);
return error;
}
+
+/*
+ * Can we use reflink with this realtime extent size? Note that we don't check
+ * for rblocks > 0 here because this can be called as part of attaching a new
+ * rt section.
+ */
+bool
+xfs_reflink_supports_rextsize(
+ struct xfs_mount *mp,
+ unsigned int rextsize)
+{
+ /* reflink on the realtime device requires rtgroups */
+ if (!xfs_has_rtgroups(mp))
+ return false;
+
+ /*
+ * Reflink doesn't support rt extent size larger than a single fsblock
+ * because we would have to perform CoW-around for unaligned write
+ * requests to guarantee that we always remap entire rt extents.
+ */
+ if (rextsize != 1)
+ return false;
+
+ return true;
+}
diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h
index 4a58e4533671..cc4e92278279 100644
--- a/fs/xfs/xfs_reflink.h
+++ b/fs/xfs/xfs_reflink.h
@@ -25,7 +25,7 @@ xfs_can_free_cowblocks(struct xfs_inode *ip)
return true;
}
-extern int xfs_reflink_trim_around_shared(struct xfs_inode *ip,
+int xfs_reflink_trim_around_shared(struct xfs_inode *ip,
struct xfs_bmbt_irec *irec, bool *shared);
int xfs_bmap_trim_cow(struct xfs_inode *ip, struct xfs_bmbt_irec *imap,
bool *shared);
@@ -62,4 +62,6 @@ extern int xfs_reflink_remap_blocks(struct xfs_inode *src, loff_t pos_in,
extern int xfs_reflink_update_dest(struct xfs_inode *dest, xfs_off_t newlen,
xfs_extlen_t cowextsize, unsigned int remap_flags);
+bool xfs_reflink_supports_rextsize(struct xfs_mount *mp, unsigned int rextsize);
+
#endif /* __XFS_REFLINK_H */
diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c
index 76b3c0ed3b4f..89decffe76c8 100644
--- a/fs/xfs/xfs_rmap_item.c
+++ b/fs/xfs/xfs_rmap_item.c
@@ -23,6 +23,7 @@
#include "xfs_ag.h"
#include "xfs_btree.h"
#include "xfs_trace.h"
+#include "xfs_rtgroup.h"
struct kmem_cache *xfs_rui_cache;
struct kmem_cache *xfs_rud_cache;
@@ -94,7 +95,9 @@ xfs_rui_item_format(
ASSERT(atomic_read(&ruip->rui_next_extent) ==
ruip->rui_format.rui_nextents);
- ruip->rui_format.rui_type = XFS_LI_RUI;
+ ASSERT(lip->li_type == XFS_LI_RUI || lip->li_type == XFS_LI_RUI_RT);
+
+ ruip->rui_format.rui_type = lip->li_type;
ruip->rui_format.rui_size = 1;
xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format,
@@ -137,12 +140,15 @@ xfs_rui_item_release(
STATIC struct xfs_rui_log_item *
xfs_rui_init(
struct xfs_mount *mp,
+ unsigned short item_type,
uint nextents)
{
struct xfs_rui_log_item *ruip;
ASSERT(nextents > 0);
+ ASSERT(item_type == XFS_LI_RUI || item_type == XFS_LI_RUI_RT);
+
if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
ruip = kzalloc(xfs_rui_log_item_sizeof(nextents),
GFP_KERNEL | __GFP_NOFAIL);
@@ -150,7 +156,7 @@ xfs_rui_init(
ruip = kmem_cache_zalloc(xfs_rui_cache,
GFP_KERNEL | __GFP_NOFAIL);
- xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
+ xfs_log_item_init(mp, &ruip->rui_item, item_type, &xfs_rui_item_ops);
ruip->rui_format.rui_nextents = nextents;
ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
atomic_set(&ruip->rui_next_extent, 0);
@@ -189,7 +195,9 @@ xfs_rud_item_format(
struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
struct xfs_log_iovec *vecp = NULL;
- rudp->rud_format.rud_type = XFS_LI_RUD;
+ ASSERT(lip->li_type == XFS_LI_RUD || lip->li_type == XFS_LI_RUD_RT);
+
+ rudp->rud_format.rud_type = lip->li_type;
rudp->rud_format.rud_size = 1;
xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format,
@@ -233,6 +241,14 @@ static inline struct xfs_rmap_intent *ri_entry(const struct list_head *e)
return list_entry(e, struct xfs_rmap_intent, ri_list);
}
+static inline bool
+xfs_rui_item_isrt(const struct xfs_log_item *lip)
+{
+ ASSERT(lip->li_type == XFS_LI_RUI || lip->li_type == XFS_LI_RUI_RT);
+
+ return lip->li_type == XFS_LI_RUI_RT;
+}
+
/* Sort rmap intents by AG. */
static int
xfs_rmap_update_diff_items(
@@ -305,18 +321,20 @@ xfs_rmap_update_log_item(
}
static struct xfs_log_item *
-xfs_rmap_update_create_intent(
+__xfs_rmap_update_create_intent(
struct xfs_trans *tp,
struct list_head *items,
unsigned int count,
- bool sort)
+ bool sort,
+ unsigned short item_type)
{
struct xfs_mount *mp = tp->t_mountp;
- struct xfs_rui_log_item *ruip = xfs_rui_init(mp, count);
+ struct xfs_rui_log_item *ruip;
struct xfs_rmap_intent *ri;
ASSERT(count > 0);
+ ruip = xfs_rui_init(mp, item_type, count);
if (sort)
list_sort(mp, items, xfs_rmap_update_diff_items);
list_for_each_entry(ri, items, ri_list)
@@ -324,6 +342,23 @@ xfs_rmap_update_create_intent(
return &ruip->rui_item;
}
+static struct xfs_log_item *
+xfs_rmap_update_create_intent(
+ struct xfs_trans *tp,
+ struct list_head *items,
+ unsigned int count,
+ bool sort)
+{
+ return __xfs_rmap_update_create_intent(tp, items, count, sort,
+ XFS_LI_RUI);
+}
+
+static inline unsigned short
+xfs_rud_type_from_rui(const struct xfs_rui_log_item *ruip)
+{
+ return xfs_rui_item_isrt(&ruip->rui_item) ? XFS_LI_RUD_RT : XFS_LI_RUD;
+}
+
/* Get an RUD so we can process all the deferred rmap updates. */
static struct xfs_log_item *
xfs_rmap_update_create_done(
@@ -335,8 +370,8 @@ xfs_rmap_update_create_done(
struct xfs_rud_log_item *rudp;
rudp = kmem_cache_zalloc(xfs_rud_cache, GFP_KERNEL | __GFP_NOFAIL);
- xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD,
- &xfs_rud_item_ops);
+ xfs_log_item_init(tp->t_mountp, &rudp->rud_item,
+ xfs_rud_type_from_rui(ruip), &xfs_rud_item_ops);
rudp->rud_ruip = ruip;
rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id;
@@ -351,11 +386,20 @@ xfs_rmap_defer_add(
{
struct xfs_mount *mp = tp->t_mountp;
- trace_xfs_rmap_defer(mp, ri);
-
+ /*
+ * Deferred rmap updates for the realtime and data sections must use
+ * separate transactions to finish deferred work because updates to
+ * realtime metadata files can lock AGFs to allocate btree blocks and
+ * we don't want that mixing with the AGF locks taken to finish data
+ * section updates.
+ */
ri->ri_group = xfs_group_intent_get(mp, ri->ri_bmap.br_startblock,
- XG_TYPE_AG);
- xfs_defer_add(tp, &ri->ri_list, &xfs_rmap_update_defer_type);
+ ri->ri_realtime ? XG_TYPE_RTG : XG_TYPE_AG);
+
+ trace_xfs_rmap_defer(mp, ri);
+ xfs_defer_add(tp, &ri->ri_list, ri->ri_realtime ?
+ &xfs_rtrmap_update_defer_type :
+ &xfs_rmap_update_defer_type);
}
/* Cancel a deferred rmap update. */
@@ -415,6 +459,7 @@ xfs_rmap_update_abort_intent(
static inline bool
xfs_rui_validate_map(
struct xfs_mount *mp,
+ bool isrt,
struct xfs_map_extent *map)
{
if (!xfs_has_rmapbt(mp))
@@ -444,6 +489,9 @@ xfs_rui_validate_map(
if (!xfs_verify_fileext(mp, map->me_startoff, map->me_len))
return false;
+ if (isrt)
+ return xfs_verify_rtbext(mp, map->me_startblock, map->me_len);
+
return xfs_verify_fsbext(mp, map->me_startblock, map->me_len);
}
@@ -451,6 +499,7 @@ static inline void
xfs_rui_recover_work(
struct xfs_mount *mp,
struct xfs_defer_pending *dfp,
+ bool isrt,
const struct xfs_map_extent *map)
{
struct xfs_rmap_intent *ri;
@@ -495,7 +544,9 @@ xfs_rui_recover_work(
ri->ri_bmap.br_blockcount = map->me_len;
ri->ri_bmap.br_state = (map->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
- ri->ri_group = xfs_group_intent_get(mp, map->me_startblock, XG_TYPE_AG);
+ ri->ri_group = xfs_group_intent_get(mp, map->me_startblock,
+ isrt ? XG_TYPE_RTG : XG_TYPE_AG);
+ ri->ri_realtime = isrt;
xfs_defer_add_item(dfp, &ri->ri_list);
}
@@ -514,6 +565,7 @@ xfs_rmap_recover_work(
struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
struct xfs_trans *tp;
struct xfs_mount *mp = lip->li_log->l_mp;
+ bool isrt = xfs_rui_item_isrt(lip);
int i;
int error = 0;
@@ -523,7 +575,7 @@ xfs_rmap_recover_work(
* just toss the RUI.
*/
for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
- if (!xfs_rui_validate_map(mp,
+ if (!xfs_rui_validate_map(mp, isrt,
&ruip->rui_format.rui_extents[i])) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
&ruip->rui_format,
@@ -531,7 +583,8 @@ xfs_rmap_recover_work(
return -EFSCORRUPTED;
}
- xfs_rui_recover_work(mp, dfp, &ruip->rui_format.rui_extents[i]);
+ xfs_rui_recover_work(mp, dfp, isrt,
+ &ruip->rui_format.rui_extents[i]);
}
resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate);
@@ -566,10 +619,13 @@ xfs_rmap_relog_intent(
struct xfs_map_extent *map;
unsigned int count;
+ ASSERT(intent->li_type == XFS_LI_RUI ||
+ intent->li_type == XFS_LI_RUI_RT);
+
count = RUI_ITEM(intent)->rui_format.rui_nextents;
map = RUI_ITEM(intent)->rui_format.rui_extents;
- ruip = xfs_rui_init(tp->t_mountp, count);
+ ruip = xfs_rui_init(tp->t_mountp, intent->li_type, count);
memcpy(ruip->rui_format.rui_extents, map, count * sizeof(*map));
atomic_set(&ruip->rui_next_extent, count);
@@ -589,6 +645,47 @@ const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
.relog_intent = xfs_rmap_relog_intent,
};
+#ifdef CONFIG_XFS_RT
+static struct xfs_log_item *
+xfs_rtrmap_update_create_intent(
+ struct xfs_trans *tp,
+ struct list_head *items,
+ unsigned int count,
+ bool sort)
+{
+ return __xfs_rmap_update_create_intent(tp, items, count, sort,
+ XFS_LI_RUI_RT);
+}
+
+/* Clean up after calling xfs_rmap_finish_one. */
+STATIC void
+xfs_rtrmap_finish_one_cleanup(
+ struct xfs_trans *tp,
+ struct xfs_btree_cur *rcur,
+ int error)
+{
+ if (rcur)
+ xfs_btree_del_cursor(rcur, error);
+}
+
+const struct xfs_defer_op_type xfs_rtrmap_update_defer_type = {
+ .name = "rtrmap",
+ .max_items = XFS_RUI_MAX_FAST_EXTENTS,
+ .create_intent = xfs_rtrmap_update_create_intent,
+ .abort_intent = xfs_rmap_update_abort_intent,
+ .create_done = xfs_rmap_update_create_done,
+ .finish_item = xfs_rmap_update_finish_item,
+ .finish_cleanup = xfs_rtrmap_finish_one_cleanup,
+ .cancel_item = xfs_rmap_update_cancel_item,
+ .recover_work = xfs_rmap_recover_work,
+ .relog_intent = xfs_rmap_relog_intent,
+};
+#else
+const struct xfs_defer_op_type xfs_rtrmap_update_defer_type = {
+ .name = "rtrmap",
+};
+#endif
+
STATIC bool
xfs_rui_item_match(
struct xfs_log_item *lip,
@@ -654,7 +751,7 @@ xlog_recover_rui_commit_pass2(
return -EFSCORRUPTED;
}
- ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
+ ruip = xfs_rui_init(mp, ITEM_TYPE(item), rui_formatp->rui_nextents);
xfs_rui_copy_format(&ruip->rui_format, rui_formatp);
atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
@@ -668,6 +765,61 @@ const struct xlog_recover_item_ops xlog_rui_item_ops = {
.commit_pass2 = xlog_recover_rui_commit_pass2,
};
+#ifdef CONFIG_XFS_RT
+STATIC int
+xlog_recover_rtrui_commit_pass2(
+ struct xlog *log,
+ struct list_head *buffer_list,
+ struct xlog_recover_item *item,
+ xfs_lsn_t lsn)
+{
+ struct xfs_mount *mp = log->l_mp;
+ struct xfs_rui_log_item *ruip;
+ struct xfs_rui_log_format *rui_formatp;
+ size_t len;
+
+ rui_formatp = item->ri_buf[0].i_addr;
+
+ if (item->ri_buf[0].i_len < xfs_rui_log_format_sizeof(0)) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
+ item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
+ return -EFSCORRUPTED;
+ }
+
+ len = xfs_rui_log_format_sizeof(rui_formatp->rui_nextents);
+ if (item->ri_buf[0].i_len != len) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
+ item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
+ return -EFSCORRUPTED;
+ }
+
+ ruip = xfs_rui_init(mp, ITEM_TYPE(item), rui_formatp->rui_nextents);
+ xfs_rui_copy_format(&ruip->rui_format, rui_formatp);
+ atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
+
+ xlog_recover_intent_item(log, &ruip->rui_item, lsn,
+ &xfs_rtrmap_update_defer_type);
+ return 0;
+}
+#else
+STATIC int
+xlog_recover_rtrui_commit_pass2(
+ struct xlog *log,
+ struct list_head *buffer_list,
+ struct xlog_recover_item *item,
+ xfs_lsn_t lsn)
+{
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
+ item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
+ return -EFSCORRUPTED;
+}
+#endif
+
+const struct xlog_recover_item_ops xlog_rtrui_item_ops = {
+ .item_type = XFS_LI_RUI_RT,
+ .commit_pass2 = xlog_recover_rtrui_commit_pass2,
+};
+
/*
* This routine is called when an RUD format structure is found in a committed
* transaction in the log. Its purpose is to cancel the corresponding RUI if it
@@ -699,3 +851,33 @@ const struct xlog_recover_item_ops xlog_rud_item_ops = {
.item_type = XFS_LI_RUD,
.commit_pass2 = xlog_recover_rud_commit_pass2,
};
+
+#ifdef CONFIG_XFS_RT
+STATIC int
+xlog_recover_rtrud_commit_pass2(
+ struct xlog *log,
+ struct list_head *buffer_list,
+ struct xlog_recover_item *item,
+ xfs_lsn_t lsn)
+{
+ struct xfs_rud_log_format *rud_formatp;
+
+ rud_formatp = item->ri_buf[0].i_addr;
+ if (item->ri_buf[0].i_len != sizeof(struct xfs_rud_log_format)) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
+ rud_formatp, item->ri_buf[0].i_len);
+ return -EFSCORRUPTED;
+ }
+
+ xlog_recover_release_intent(log, XFS_LI_RUI_RT,
+ rud_formatp->rud_rui_id);
+ return 0;
+}
+#else
+# define xlog_recover_rtrud_commit_pass2 xlog_recover_rtrui_commit_pass2
+#endif
+
+const struct xlog_recover_item_ops xlog_rtrud_item_ops = {
+ .item_type = XFS_LI_RUD_RT,
+ .commit_pass2 = xlog_recover_rtrud_commit_pass2,
+};
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index fcfa6e0eb3ad..d8e6d073d64d 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -22,6 +22,7 @@
#include "xfs_rtalloc.h"
#include "xfs_sb.h"
#include "xfs_rtbitmap.h"
+#include "xfs_rtrmap_btree.h"
#include "xfs_quota.h"
#include "xfs_log_priv.h"
#include "xfs_health.h"
@@ -30,6 +31,8 @@
#include "xfs_rtgroup.h"
#include "xfs_error.h"
#include "xfs_trace.h"
+#include "xfs_rtrefcount_btree.h"
+#include "xfs_reflink.h"
/*
* Return whether there are any free extents in the size range given
@@ -592,7 +595,7 @@ xfs_rtalloc_sumlevel(
* specified. If we don't get maxlen then use prod to trim
* the length, if given. The lengths are all in rtextents.
*/
-STATIC int
+static int
xfs_rtallocate_extent_size(
struct xfs_rtalloc_args *args,
xfs_rtxlen_t minlen, /* minimum length to allocate */
@@ -845,6 +848,13 @@ xfs_growfs_rt_init_rtsb(
mp->m_rtsb_bp = rtsb_bp;
error = xfs_bwrite(rtsb_bp);
xfs_buf_unlock(rtsb_bp);
+ if (error)
+ return error;
+
+ /* Initialize the rtrmap to reflect the rtsb. */
+ if (rtg_rmap(args->rtg) != NULL)
+ error = xfs_rtrmapbt_init_rtsb(nargs->mp, args->rtg, args->tp);
+
return error;
}
@@ -856,8 +866,8 @@ xfs_growfs_rt_bmblock(
xfs_fileoff_t bmbno)
{
struct xfs_mount *mp = rtg_mount(rtg);
- struct xfs_inode *rbmip = rtg->rtg_inodes[XFS_RTGI_BITMAP];
- struct xfs_inode *rsumip = rtg->rtg_inodes[XFS_RTGI_SUMMARY];
+ struct xfs_inode *rbmip = rtg_bitmap(rtg);
+ struct xfs_inode *rsumip = rtg_summary(rtg);
struct xfs_rtalloc_args args = {
.mp = mp,
.rtg = rtg,
@@ -893,8 +903,9 @@ xfs_growfs_rt_bmblock(
goto out_free;
nargs.tp = args.tp;
- xfs_rtgroup_lock(args.rtg, XFS_RTGLOCK_BITMAP);
- xfs_rtgroup_trans_join(args.tp, args.rtg, XFS_RTGLOCK_BITMAP);
+ xfs_rtgroup_lock(args.rtg, XFS_RTGLOCK_BITMAP | XFS_RTGLOCK_RMAP);
+ xfs_rtgroup_trans_join(args.tp, args.rtg,
+ XFS_RTGLOCK_BITMAP | XFS_RTGLOCK_RMAP);
/*
* Update the bitmap inode's size ondisk and incore. We need to update
@@ -980,9 +991,12 @@ xfs_growfs_rt_bmblock(
goto out_free;
/*
- * Ensure the mount RT feature flag is now set.
+ * Ensure the mount RT feature flag is now set, and compute new
+ * maxlevels for rt btrees.
*/
mp->m_features |= XFS_FEAT_REALTIME;
+ xfs_rtrmapbt_compute_maxlevels(mp);
+ xfs_rtrefcountbt_compute_maxlevels(mp);
kfree(nmp);
return 0;
@@ -1041,8 +1055,8 @@ xfs_growfs_rt_alloc_blocks(
xfs_extlen_t *nrbmblocks)
{
struct xfs_mount *mp = rtg_mount(rtg);
- struct xfs_inode *rbmip = rtg->rtg_inodes[XFS_RTGI_BITMAP];
- struct xfs_inode *rsumip = rtg->rtg_inodes[XFS_RTGI_SUMMARY];
+ struct xfs_inode *rbmip = rtg_bitmap(rtg);
+ struct xfs_inode *rsumip = rtg_summary(rtg);
xfs_extlen_t orbmblocks = 0;
xfs_extlen_t orsumblocks = 0;
struct xfs_mount *nmp;
@@ -1150,29 +1164,38 @@ out_rele:
return error;
}
-static int
+int
xfs_growfs_check_rtgeom(
const struct xfs_mount *mp,
+ xfs_rfsblock_t dblocks,
xfs_rfsblock_t rblocks,
xfs_extlen_t rextsize)
{
+ xfs_extlen_t min_logfsbs;
struct xfs_mount *nmp;
- int error = 0;
nmp = xfs_growfs_rt_alloc_fake_mount(mp, rblocks, rextsize);
if (!nmp)
return -ENOMEM;
+ nmp->m_sb.sb_dblocks = dblocks;
+
+ xfs_rtrmapbt_compute_maxlevels(nmp);
+ xfs_rtrefcountbt_compute_maxlevels(nmp);
+ xfs_trans_resv_calc(nmp, M_RES(nmp));
/*
* New summary size can't be more than half the size of the log. This
* prevents us from getting a log overflow, since we'll log basically
* the whole summary file at once.
*/
- if (nmp->m_rsumblocks > (mp->m_sb.sb_logblocks >> 1))
- error = -EINVAL;
+ min_logfsbs = min_t(xfs_extlen_t, xfs_log_calc_minimum_size(nmp),
+ nmp->m_rsumblocks * 2);
kfree(nmp);
- return error;
+
+ if (min_logfsbs > mp->m_sb.sb_logblocks)
+ return -EINVAL;
+ return 0;
}
/*
@@ -1263,11 +1286,17 @@ xfs_growfs_rt(
XFS_FSB_TO_B(mp, in->extsize) < XFS_MIN_RTEXTSIZE)
goto out_unlock;
- /* Unsupported realtime features. */
+ /* Check for features supported only on rtgroups filesystems. */
error = -EOPNOTSUPP;
- if (xfs_has_quota(mp) && !xfs_has_rtgroups(mp))
- goto out_unlock;
- if (xfs_has_rmapbt(mp) || xfs_has_reflink(mp))
+ if (!xfs_has_rtgroups(mp)) {
+ if (xfs_has_rmapbt(mp))
+ goto out_unlock;
+ if (xfs_has_quota(mp))
+ goto out_unlock;
+ if (xfs_has_reflink(mp))
+ goto out_unlock;
+ } else if (xfs_has_reflink(mp) &&
+ !xfs_reflink_supports_rextsize(mp, in->extsize))
goto out_unlock;
error = xfs_sb_validate_fsb_count(&mp->m_sb, in->newblocks);
@@ -1291,7 +1320,8 @@ xfs_growfs_rt(
goto out_unlock;
/* Make sure the new fs size won't cause problems with the log. */
- error = xfs_growfs_check_rtgeom(mp, in->newblocks, in->extsize);
+ error = xfs_growfs_check_rtgeom(mp, mp->m_sb.sb_dblocks, in->newblocks,
+ in->extsize);
if (error)
goto out_unlock;
@@ -1344,6 +1374,12 @@ xfs_growfs_rt(
if (!error)
error = error2;
+
+ /* Reset the rt metadata btree space reservations. */
+ xfs_rt_resv_free(mp);
+ error2 = xfs_rt_resv_init(mp);
+ if (error2 && error2 != -ENOSPC)
+ error = error2;
}
out_unlock:
@@ -1487,6 +1523,46 @@ xfs_rtalloc_reinit_frextents(
return 0;
}
+/* Free space reservations for rt metadata inodes. */
+void
+xfs_rt_resv_free(
+ struct xfs_mount *mp)
+{
+ struct xfs_rtgroup *rtg = NULL;
+ unsigned int i;
+
+ while ((rtg = xfs_rtgroup_next(mp, rtg))) {
+ for (i = 0; i < XFS_RTGI_MAX; i++)
+ xfs_metafile_resv_free(rtg->rtg_inodes[i]);
+ }
+}
+
+/* Reserve space for rt metadata inodes' space expansion. */
+int
+xfs_rt_resv_init(
+ struct xfs_mount *mp)
+{
+ struct xfs_rtgroup *rtg = NULL;
+ xfs_filblks_t ask;
+ int error = 0;
+
+ while ((rtg = xfs_rtgroup_next(mp, rtg))) {
+ int err2;
+
+ ask = xfs_rtrmapbt_calc_reserves(mp);
+ err2 = xfs_metafile_resv_init(rtg_rmap(rtg), ask);
+ if (err2 && !error)
+ error = err2;
+
+ ask = xfs_rtrefcountbt_calc_reserves(mp);
+ err2 = xfs_metafile_resv_init(rtg_refcount(rtg), ask);
+ if (err2 && !error)
+ error = err2;
+ }
+
+ return error;
+}
+
/*
* Read in the bmbt of an rt metadata inode so that we never have to load them
* at runtime. This enables the use of shared ILOCKs for rtbitmap scans. Use
@@ -1601,7 +1677,7 @@ xfs_rtpick_extent(
xfs_rtxlen_t len) /* allocation length (rtextents) */
{
struct xfs_mount *mp = rtg_mount(rtg);
- struct xfs_inode *rbmip = rtg->rtg_inodes[XFS_RTGI_BITMAP];
+ struct xfs_inode *rbmip = rtg_bitmap(rtg);
xfs_rtxnum_t b = 0; /* result rtext */
int log2; /* log of sequence number */
uint64_t resid; /* residual after log removed */
@@ -1885,7 +1961,7 @@ out_unlock:
goto out_release;
}
-static int
+int
xfs_rtallocate_rtgs(
struct xfs_trans *tp,
xfs_fsblock_t bno_hint,
@@ -1950,7 +2026,10 @@ xfs_rtallocate_align(
if (*noalign) {
align = mp->m_sb.sb_rextsize;
} else {
- align = xfs_get_extsz_hint(ap->ip);
+ if (ap->flags & XFS_BMAPI_COWFORK)
+ align = xfs_get_cowextsz_hint(ap->ip);
+ else
+ align = xfs_get_extsz_hint(ap->ip);
if (!align)
align = 1;
if (align == mp->m_sb.sb_rextsize)
diff --git a/fs/xfs/xfs_rtalloc.h b/fs/xfs/xfs_rtalloc.h
index 8e2a07b8174b..0d95b29092c9 100644
--- a/fs/xfs/xfs_rtalloc.h
+++ b/fs/xfs/xfs_rtalloc.h
@@ -34,6 +34,9 @@ int /* error */
xfs_rtmount_inodes(
struct xfs_mount *mp); /* file system mount structure */
+void xfs_rt_resv_free(struct xfs_mount *mp);
+int xfs_rt_resv_init(struct xfs_mount *mp);
+
/*
* Grow the realtime area of the filesystem.
*/
@@ -43,6 +46,8 @@ xfs_growfs_rt(
xfs_growfs_rt_t *in); /* user supplied growfs struct */
int xfs_rtalloc_reinit_frextents(struct xfs_mount *mp);
+int xfs_growfs_check_rtgeom(const struct xfs_mount *mp, xfs_rfsblock_t dblocks,
+ xfs_rfsblock_t rblocks, xfs_agblock_t rextsize);
#else
# define xfs_growfs_rt(mp,in) (-ENOSYS)
# define xfs_rtalloc_reinit_frextents(m) (0)
@@ -60,6 +65,21 @@ xfs_rtmount_init(
}
# define xfs_rtmount_inodes(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (-ENOSYS))
# define xfs_rtunmount_inodes(m)
+# define xfs_rt_resv_free(mp) ((void)0)
+# define xfs_rt_resv_init(mp) (0)
+
+static inline int
+xfs_growfs_check_rtgeom(const struct xfs_mount *mp,
+ xfs_rfsblock_t dblocks, xfs_rfsblock_t rblocks,
+ xfs_extlen_t rextsize)
+{
+ return 0;
+}
#endif /* CONFIG_XFS_RT */
+int xfs_rtallocate_rtgs(struct xfs_trans *tp, xfs_fsblock_t bno_hint,
+ xfs_rtxlen_t minlen, xfs_rtxlen_t maxlen, xfs_rtxlen_t prod,
+ bool wasdel, bool initial_user_data, xfs_rtblock_t *bno,
+ xfs_extlen_t *blen);
+
#endif /* __XFS_RTALLOC_H__ */
diff --git a/fs/xfs/xfs_stats.c b/fs/xfs/xfs_stats.c
index ffb52725c2a8..35c7fb3ba324 100644
--- a/fs/xfs/xfs_stats.c
+++ b/fs/xfs/xfs_stats.c
@@ -52,7 +52,10 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
{ "rmapbt", xfsstats_offset(xs_refcbt_2) },
{ "refcntbt", xfsstats_offset(xs_rmap_mem_2) },
{ "rmapbt_mem", xfsstats_offset(xs_rcbag_2) },
- { "rcbagbt", xfsstats_offset(xs_qm_dqreclaims)},
+ { "rcbagbt", xfsstats_offset(xs_rtrmap_2) },
+ { "rtrmapbt", xfsstats_offset(xs_rtrmap_mem_2)},
+ { "rtrmapbt_mem", xfsstats_offset(xs_rtrefcbt_2) },
+ { "rtrefcntbt", xfsstats_offset(xs_qm_dqreclaims)},
/* we print both series of quota information together */
{ "qm", xfsstats_offset(xs_xstrat_bytes)},
};
diff --git a/fs/xfs/xfs_stats.h b/fs/xfs/xfs_stats.h
index a61fb56ed2e6..15ba1abcf253 100644
--- a/fs/xfs/xfs_stats.h
+++ b/fs/xfs/xfs_stats.h
@@ -127,6 +127,9 @@ struct __xfsstats {
uint32_t xs_refcbt_2[__XBTS_MAX];
uint32_t xs_rmap_mem_2[__XBTS_MAX];
uint32_t xs_rcbag_2[__XBTS_MAX];
+ uint32_t xs_rtrmap_2[__XBTS_MAX];
+ uint32_t xs_rtrmap_mem_2[__XBTS_MAX];
+ uint32_t xs_rtrefcbt_2[__XBTS_MAX];
uint32_t xs_qm_dqreclaims;
uint32_t xs_qm_dqreclaim_misses;
uint32_t xs_qm_dquot_dups;
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 394fdf3bb535..d92d7a07ea89 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -819,20 +819,76 @@ xfs_fs_sync_fs(
return 0;
}
+static xfs_extlen_t
+xfs_internal_log_size(
+ struct xfs_mount *mp)
+{
+ if (!mp->m_sb.sb_logstart)
+ return 0;
+ return mp->m_sb.sb_logblocks;
+}
+
+static void
+xfs_statfs_data(
+ struct xfs_mount *mp,
+ struct kstatfs *st)
+{
+ int64_t fdblocks =
+ percpu_counter_sum(&mp->m_fdblocks);
+
+ /* make sure st->f_bfree does not underflow */
+ st->f_bfree = max(0LL, fdblocks - xfs_fdblocks_unavailable(mp));
+ /*
+ * sb_dblocks can change during growfs, but nothing cares about reporting
+ * the old or new value during growfs.
+ */
+ st->f_blocks = mp->m_sb.sb_dblocks - xfs_internal_log_size(mp);
+}
+
+/*
+ * When stat(v)fs is called on a file with the realtime bit set or a directory
+ * with the rtinherit bit, report freespace information for the RT device
+ * instead of the main data device.
+ */
+static void
+xfs_statfs_rt(
+ struct xfs_mount *mp,
+ struct kstatfs *st)
+{
+ st->f_bfree = xfs_rtbxlen_to_blen(mp,
+ percpu_counter_sum_positive(&mp->m_frextents));
+ st->f_blocks = mp->m_sb.sb_rblocks;
+}
+
+static void
+xfs_statfs_inodes(
+ struct xfs_mount *mp,
+ struct kstatfs *st)
+{
+ uint64_t icount = percpu_counter_sum(&mp->m_icount);
+ uint64_t ifree = percpu_counter_sum(&mp->m_ifree);
+ uint64_t fakeinos = XFS_FSB_TO_INO(mp, st->f_bfree);
+
+ st->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
+ if (M_IGEO(mp)->maxicount)
+ st->f_files = min_t(typeof(st->f_files), st->f_files,
+ M_IGEO(mp)->maxicount);
+
+ /* If sb_icount overshot maxicount, report actual allocation */
+ st->f_files = max_t(typeof(st->f_files), st->f_files,
+ mp->m_sb.sb_icount);
+
+ /* Make sure st->f_ffree does not underflow */
+ st->f_ffree = max_t(int64_t, 0, st->f_files - (icount - ifree));
+}
+
STATIC int
xfs_fs_statfs(
struct dentry *dentry,
- struct kstatfs *statp)
+ struct kstatfs *st)
{
struct xfs_mount *mp = XFS_M(dentry->d_sb);
- xfs_sb_t *sbp = &mp->m_sb;
struct xfs_inode *ip = XFS_I(d_inode(dentry));
- uint64_t fakeinos, id;
- uint64_t icount;
- uint64_t ifree;
- uint64_t fdblocks;
- xfs_extlen_t lsize;
- int64_t ffree;
/*
* Expedite background inodegc but don't wait. We do not want to block
@@ -840,58 +896,28 @@ xfs_fs_statfs(
*/
xfs_inodegc_push(mp);
- statp->f_type = XFS_SUPER_MAGIC;
- statp->f_namelen = MAXNAMELEN - 1;
-
- id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
- statp->f_fsid = u64_to_fsid(id);
-
- icount = percpu_counter_sum(&mp->m_icount);
- ifree = percpu_counter_sum(&mp->m_ifree);
- fdblocks = percpu_counter_sum(&mp->m_fdblocks);
-
- spin_lock(&mp->m_sb_lock);
- statp->f_bsize = sbp->sb_blocksize;
- lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
- statp->f_blocks = sbp->sb_dblocks - lsize;
- spin_unlock(&mp->m_sb_lock);
-
- /* make sure statp->f_bfree does not underflow */
- statp->f_bfree = max_t(int64_t, 0,
- fdblocks - xfs_fdblocks_unavailable(mp));
- statp->f_bavail = statp->f_bfree;
-
- fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
- statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
- if (M_IGEO(mp)->maxicount)
- statp->f_files = min_t(typeof(statp->f_files),
- statp->f_files,
- M_IGEO(mp)->maxicount);
-
- /* If sb_icount overshot maxicount, report actual allocation */
- statp->f_files = max_t(typeof(statp->f_files),
- statp->f_files,
- sbp->sb_icount);
+ st->f_type = XFS_SUPER_MAGIC;
+ st->f_namelen = MAXNAMELEN - 1;
+ st->f_bsize = mp->m_sb.sb_blocksize;
+ st->f_fsid = u64_to_fsid(huge_encode_dev(mp->m_ddev_targp->bt_dev));
- /* make sure statp->f_ffree does not underflow */
- ffree = statp->f_files - (icount - ifree);
- statp->f_ffree = max_t(int64_t, ffree, 0);
+ xfs_statfs_data(mp, st);
+ xfs_statfs_inodes(mp, st);
if (XFS_IS_REALTIME_MOUNT(mp) &&
- (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
- s64 freertx;
-
- statp->f_blocks = sbp->sb_rblocks;
- freertx = percpu_counter_sum_positive(&mp->m_frextents);
- statp->f_bavail = statp->f_bfree =
- xfs_rtbxlen_to_blen(mp, freertx);
- }
+ (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME)))
+ xfs_statfs_rt(mp, st);
if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
(XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
- xfs_qm_statvfs(ip, statp);
+ xfs_qm_statvfs(ip, st);
+ /*
+ * XFS does not distinguish between blocks available to privileged and
+ * unprivileged users.
+ */
+ st->f_bavail = st->f_bfree;
return 0;
}
@@ -1730,7 +1756,7 @@ xfs_fs_fill_super(
sb->s_time_max = XFS_LEGACY_TIME_MAX;
}
trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
- sb->s_iflags |= SB_I_CGROUPWB;
+ sb->s_iflags |= SB_I_CGROUPWB | SB_I_ALLOW_HSM;
set_posix_acl_flag(sb);
@@ -1754,9 +1780,11 @@ xfs_fs_fill_super(
xfs_warn_experimental(mp, XFS_EXPERIMENTAL_METADIR);
if (xfs_has_reflink(mp)) {
- if (mp->m_sb.sb_rblocks) {
+ if (xfs_has_realtime(mp) &&
+ !xfs_reflink_supports_rextsize(mp, mp->m_sb.sb_rextsize)) {
xfs_alert(mp,
- "reflink not compatible with realtime device!");
+ "reflink not compatible with realtime extent size %u!",
+ mp->m_sb.sb_rextsize);
error = -EINVAL;
goto out_filestream_unmount;
}
@@ -1767,12 +1795,6 @@ xfs_fs_fill_super(
}
}
- if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) {
- xfs_alert(mp,
- "reverse mapping btree not compatible with realtime device!");
- error = -EINVAL;
- goto out_filestream_unmount;
- }
if (xfs_has_exchange_range(mp))
xfs_warn_experimental(mp, XFS_EXPERIMENTAL_EXCHRANGE);
diff --git a/fs/xfs/xfs_super.h b/fs/xfs/xfs_super.h
index 302e6e5d6c7e..c0e85c1e42f2 100644
--- a/fs/xfs/xfs_super.h
+++ b/fs/xfs/xfs_super.h
@@ -92,7 +92,6 @@ extern xfs_agnumber_t xfs_set_inode_alloc(struct xfs_mount *,
extern const struct export_operations xfs_export_operations;
extern const struct quotactl_ops xfs_quotactl_operations;
-extern const struct dax_holder_operations xfs_dax_holder_operations;
extern void xfs_reinit_percpu_counters(struct xfs_mount *mp);
diff --git a/fs/xfs/xfs_sysctl.c b/fs/xfs/xfs_sysctl.c
index c84df23b494d..751dc74a3067 100644
--- a/fs/xfs/xfs_sysctl.c
+++ b/fs/xfs/xfs_sysctl.c
@@ -66,7 +66,7 @@ xfs_deprecated_dointvec_minmax(
return proc_dointvec_minmax(ctl, write, buffer, lenp, ppos);
}
-static struct ctl_table xfs_table[] = {
+static const struct ctl_table xfs_table[] = {
{
.procname = "irix_sgid_inherit",
.data = &xfs_params.sgid_inherit.val,
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 7b16cdd72e9d..b29462363b81 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -14,11 +14,15 @@
* ino: filesystem inode number
*
* agbno: per-AG block number in fs blocks
+ * rgbno: per-rtgroup block number in fs blocks
* startblock: physical block number for file mappings. This is either a
* segmented fsblock for data device mappings, or a rfsblock
* for realtime device mappings
* fsbcount: number of blocks in an extent, in fs blocks
*
+ * gbno: generic allocation group block number. This is an agbno for
+ * space in a per-AG or a rgbno for space in a realtime group.
+ *
* daddr: physical block number in 512b blocks
* bbcount: number of blocks in a physical extent, in 512b blocks
*
@@ -494,7 +498,7 @@ DECLARE_EVENT_CLASS(xfs_buf_class,
__entry->dev = bp->b_target->bt_dev;
__entry->bno = xfs_buf_daddr(bp);
__entry->nblks = bp->b_length;
- __entry->hold = atomic_read(&bp->b_hold);
+ __entry->hold = bp->b_hold;
__entry->pincount = atomic_read(&bp->b_pin_count);
__entry->lockval = bp->b_sema.count;
__entry->flags = bp->b_flags;
@@ -565,7 +569,7 @@ DECLARE_EVENT_CLASS(xfs_buf_flags_class,
__entry->bno = xfs_buf_daddr(bp);
__entry->length = bp->b_length;
__entry->flags = flags;
- __entry->hold = atomic_read(&bp->b_hold);
+ __entry->hold = bp->b_hold;
__entry->pincount = atomic_read(&bp->b_pin_count);
__entry->lockval = bp->b_sema.count;
__entry->caller_ip = caller_ip;
@@ -608,7 +612,7 @@ TRACE_EVENT(xfs_buf_ioerror,
__entry->dev = bp->b_target->bt_dev;
__entry->bno = xfs_buf_daddr(bp);
__entry->length = bp->b_length;
- __entry->hold = atomic_read(&bp->b_hold);
+ __entry->hold = bp->b_hold;
__entry->pincount = atomic_read(&bp->b_pin_count);
__entry->lockval = bp->b_sema.count;
__entry->error = error;
@@ -652,7 +656,7 @@ DECLARE_EVENT_CLASS(xfs_buf_item_class,
__entry->buf_bno = xfs_buf_daddr(bip->bli_buf);
__entry->buf_len = bip->bli_buf->b_length;
__entry->buf_flags = bip->bli_buf->b_flags;
- __entry->buf_hold = atomic_read(&bip->bli_buf->b_hold);
+ __entry->buf_hold = bip->bli_buf->b_hold;
__entry->buf_pincount = atomic_read(&bip->bli_buf->b_pin_count);
__entry->buf_lockval = bip->bli_buf->b_sema.count;
__entry->li_flags = bip->bli_item.li_flags;
@@ -2295,6 +2299,7 @@ TRACE_DEFINE_ENUM(XFS_DINODE_FMT_LOCAL);
TRACE_DEFINE_ENUM(XFS_DINODE_FMT_EXTENTS);
TRACE_DEFINE_ENUM(XFS_DINODE_FMT_BTREE);
TRACE_DEFINE_ENUM(XFS_DINODE_FMT_UUID);
+TRACE_DEFINE_ENUM(XFS_DINODE_FMT_META_BTREE);
DECLARE_EVENT_CLASS(xfs_swap_extent_class,
TP_PROTO(struct xfs_inode *ip, int which),
@@ -2918,13 +2923,14 @@ DEFINE_DEFER_PENDING_ITEM_EVENT(xfs_defer_finish_item);
/* rmap tracepoints */
DECLARE_EVENT_CLASS(xfs_rmap_class,
TP_PROTO(struct xfs_btree_cur *cur,
- xfs_agblock_t agbno, xfs_extlen_t len, bool unwritten,
+ xfs_agblock_t gbno, xfs_extlen_t len, bool unwritten,
const struct xfs_owner_info *oinfo),
- TP_ARGS(cur, agbno, len, unwritten, oinfo),
+ TP_ARGS(cur, gbno, len, unwritten, oinfo),
TP_STRUCT__entry(
__field(dev_t, dev)
+ __field(enum xfs_group_type, type)
__field(xfs_agnumber_t, agno)
- __field(xfs_agblock_t, agbno)
+ __field(xfs_agblock_t, gbno)
__field(xfs_extlen_t, len)
__field(uint64_t, owner)
__field(uint64_t, offset)
@@ -2932,8 +2938,9 @@ DECLARE_EVENT_CLASS(xfs_rmap_class,
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
+ __entry->type = cur->bc_group->xg_type;
__entry->agno = cur->bc_group->xg_gno;
- __entry->agbno = agbno;
+ __entry->gbno = gbno;
__entry->len = len;
__entry->owner = oinfo->oi_owner;
__entry->offset = oinfo->oi_offset;
@@ -2941,10 +2948,11 @@ DECLARE_EVENT_CLASS(xfs_rmap_class,
if (unwritten)
__entry->flags |= XFS_RMAP_UNWRITTEN;
),
- TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x owner 0x%llx fileoff 0x%llx flags 0x%lx",
+ TP_printk("dev %d:%d %sno 0x%x gbno 0x%x fsbcount 0x%x owner 0x%llx fileoff 0x%llx flags 0x%lx",
MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->agno,
- __entry->agbno,
+ __entry->gbno,
__entry->len,
__entry->owner,
__entry->offset,
@@ -2953,9 +2961,9 @@ DECLARE_EVENT_CLASS(xfs_rmap_class,
#define DEFINE_RMAP_EVENT(name) \
DEFINE_EVENT(xfs_rmap_class, name, \
TP_PROTO(struct xfs_btree_cur *cur, \
- xfs_agblock_t agbno, xfs_extlen_t len, bool unwritten, \
+ xfs_agblock_t gbno, xfs_extlen_t len, bool unwritten, \
const struct xfs_owner_info *oinfo), \
- TP_ARGS(cur, agbno, len, unwritten, oinfo))
+ TP_ARGS(cur, gbno, len, unwritten, oinfo))
/* btree cursor error/%ip tracepoint class */
DECLARE_EVENT_CLASS(xfs_btree_error_class,
@@ -3018,47 +3026,36 @@ TRACE_EVENT(xfs_rmap_convert_state,
TP_ARGS(cur, state, caller_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
+ __field(enum xfs_group_type, type)
__field(xfs_agnumber_t, agno)
- __field(xfs_ino_t, ino)
__field(int, state)
__field(unsigned long, caller_ip)
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
- switch (cur->bc_ops->type) {
- case XFS_BTREE_TYPE_INODE:
- __entry->agno = 0;
- __entry->ino = cur->bc_ino.ip->i_ino;
- break;
- case XFS_BTREE_TYPE_AG:
- __entry->agno = cur->bc_group->xg_gno;
- __entry->ino = 0;
- break;
- case XFS_BTREE_TYPE_MEM:
- __entry->agno = 0;
- __entry->ino = 0;
- break;
- }
+ __entry->type = cur->bc_group->xg_type;
+ __entry->agno = cur->bc_group->xg_gno;
__entry->state = state;
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d agno 0x%x ino 0x%llx state %d caller %pS",
+ TP_printk("dev %d:%d %sno 0x%x state %d caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->agno,
- __entry->ino,
__entry->state,
(char *)__entry->caller_ip)
);
DECLARE_EVENT_CLASS(xfs_rmapbt_class,
TP_PROTO(struct xfs_btree_cur *cur,
- xfs_agblock_t agbno, xfs_extlen_t len,
+ xfs_agblock_t gbno, xfs_extlen_t len,
uint64_t owner, uint64_t offset, unsigned int flags),
- TP_ARGS(cur, agbno, len, owner, offset, flags),
+ TP_ARGS(cur, gbno, len, owner, offset, flags),
TP_STRUCT__entry(
__field(dev_t, dev)
+ __field(enum xfs_group_type, type)
__field(xfs_agnumber_t, agno)
- __field(xfs_agblock_t, agbno)
+ __field(xfs_agblock_t, gbno)
__field(xfs_extlen_t, len)
__field(uint64_t, owner)
__field(uint64_t, offset)
@@ -3066,17 +3063,19 @@ DECLARE_EVENT_CLASS(xfs_rmapbt_class,
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
+ __entry->type = cur->bc_group->xg_type;
__entry->agno = cur->bc_group->xg_gno;
- __entry->agbno = agbno;
+ __entry->gbno = gbno;
__entry->len = len;
__entry->owner = owner;
__entry->offset = offset;
__entry->flags = flags;
),
- TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x owner 0x%llx fileoff 0x%llx flags 0x%x",
+ TP_printk("dev %d:%d %sno 0x%x gbno 0x%x fsbcount 0x%x owner 0x%llx fileoff 0x%llx flags 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->agno,
- __entry->agbno,
+ __entry->gbno,
__entry->len,
__entry->owner,
__entry->offset,
@@ -3085,9 +3084,9 @@ DECLARE_EVENT_CLASS(xfs_rmapbt_class,
#define DEFINE_RMAPBT_EVENT(name) \
DEFINE_EVENT(xfs_rmapbt_class, name, \
TP_PROTO(struct xfs_btree_cur *cur, \
- xfs_agblock_t agbno, xfs_extlen_t len, \
+ xfs_agblock_t gbno, xfs_extlen_t len, \
uint64_t owner, uint64_t offset, unsigned int flags), \
- TP_ARGS(cur, agbno, len, owner, offset, flags))
+ TP_ARGS(cur, gbno, len, owner, offset, flags))
TRACE_DEFINE_ENUM(XFS_RMAP_MAP);
TRACE_DEFINE_ENUM(XFS_RMAP_MAP_SHARED);
@@ -3104,8 +3103,9 @@ DECLARE_EVENT_CLASS(xfs_rmap_deferred_class,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(unsigned long long, owner)
+ __field(enum xfs_group_type, type)
__field(xfs_agnumber_t, agno)
- __field(xfs_agblock_t, agbno)
+ __field(xfs_agblock_t, gbno)
__field(int, whichfork)
__field(xfs_fileoff_t, l_loff)
__field(xfs_filblks_t, l_len)
@@ -3114,9 +3114,11 @@ DECLARE_EVENT_CLASS(xfs_rmap_deferred_class,
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
- __entry->agno = XFS_FSB_TO_AGNO(mp, ri->ri_bmap.br_startblock);
- __entry->agbno = XFS_FSB_TO_AGBNO(mp,
- ri->ri_bmap.br_startblock);
+ __entry->type = ri->ri_group->xg_type;
+ __entry->agno = ri->ri_group->xg_gno;
+ __entry->gbno = xfs_fsb_to_gbno(mp,
+ ri->ri_bmap.br_startblock,
+ ri->ri_group->xg_type);
__entry->owner = ri->ri_owner;
__entry->whichfork = ri->ri_whichfork;
__entry->l_loff = ri->ri_bmap.br_startoff;
@@ -3124,11 +3126,12 @@ DECLARE_EVENT_CLASS(xfs_rmap_deferred_class,
__entry->l_state = ri->ri_bmap.br_state;
__entry->op = ri->ri_type;
),
- TP_printk("dev %d:%d op %s agno 0x%x agbno 0x%x owner 0x%llx %s fileoff 0x%llx fsbcount 0x%llx state %d",
+ TP_printk("dev %d:%d op %s %sno 0x%x gbno 0x%x owner 0x%llx %s fileoff 0x%llx fsbcount 0x%llx state %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_symbolic(__entry->op, XFS_RMAP_INTENT_STRINGS),
+ __print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->agno,
- __entry->agbno,
+ __entry->gbno,
__entry->owner,
__print_symbolic(__entry->whichfork, XFS_WHICHFORK_STRINGS),
__entry->l_loff,
@@ -3302,56 +3305,62 @@ TRACE_EVENT(xfs_ag_resv_init_error,
/* refcount tracepoint classes */
DECLARE_EVENT_CLASS(xfs_refcount_class,
- TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t agbno,
+ TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t gbno,
xfs_extlen_t len),
- TP_ARGS(cur, agbno, len),
+ TP_ARGS(cur, gbno, len),
TP_STRUCT__entry(
__field(dev_t, dev)
+ __field(enum xfs_group_type, type)
__field(xfs_agnumber_t, agno)
- __field(xfs_agblock_t, agbno)
+ __field(xfs_agblock_t, gbno)
__field(xfs_extlen_t, len)
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
+ __entry->type = cur->bc_group->xg_type;
__entry->agno = cur->bc_group->xg_gno;
- __entry->agbno = agbno;
+ __entry->gbno = gbno;
__entry->len = len;
),
- TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x",
+ TP_printk("dev %d:%d %sno 0x%x gbno 0x%x fsbcount 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->agno,
- __entry->agbno,
+ __entry->gbno,
__entry->len)
);
#define DEFINE_REFCOUNT_EVENT(name) \
DEFINE_EVENT(xfs_refcount_class, name, \
- TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t agbno, \
+ TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t gbno, \
xfs_extlen_t len), \
- TP_ARGS(cur, agbno, len))
+ TP_ARGS(cur, gbno, len))
TRACE_DEFINE_ENUM(XFS_LOOKUP_EQi);
TRACE_DEFINE_ENUM(XFS_LOOKUP_LEi);
TRACE_DEFINE_ENUM(XFS_LOOKUP_GEi);
TRACE_EVENT(xfs_refcount_lookup,
- TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t agbno,
+ TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t gbno,
xfs_lookup_t dir),
- TP_ARGS(cur, agbno, dir),
+ TP_ARGS(cur, gbno, dir),
TP_STRUCT__entry(
__field(dev_t, dev)
+ __field(enum xfs_group_type, type)
__field(xfs_agnumber_t, agno)
- __field(xfs_agblock_t, agbno)
+ __field(xfs_agblock_t, gbno)
__field(xfs_lookup_t, dir)
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
+ __entry->type = cur->bc_group->xg_type;
__entry->agno = cur->bc_group->xg_gno;
- __entry->agbno = agbno;
+ __entry->gbno = gbno;
__entry->dir = dir;
),
- TP_printk("dev %d:%d agno 0x%x agbno 0x%x cmp %s(%d)",
+ TP_printk("dev %d:%d %sno 0x%x gbno 0x%x cmp %s(%d)",
MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->agno,
- __entry->agbno,
+ __entry->gbno,
__print_symbolic(__entry->dir, XFS_AG_BTREE_CMP_FORMAT_STR),
__entry->dir)
)
@@ -3362,6 +3371,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_extent_class,
TP_ARGS(cur, irec),
TP_STRUCT__entry(
__field(dev_t, dev)
+ __field(enum xfs_group_type, type)
__field(xfs_agnumber_t, agno)
__field(enum xfs_refc_domain, domain)
__field(xfs_agblock_t, startblock)
@@ -3370,14 +3380,16 @@ DECLARE_EVENT_CLASS(xfs_refcount_extent_class,
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
+ __entry->type = cur->bc_group->xg_type;
__entry->agno = cur->bc_group->xg_gno;
__entry->domain = irec->rc_domain;
__entry->startblock = irec->rc_startblock;
__entry->blockcount = irec->rc_blockcount;
__entry->refcount = irec->rc_refcount;
),
- TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u",
+ TP_printk("dev %d:%d %sno 0x%x dom %s gbno 0x%x fsbcount 0x%x refcount %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->agno,
__print_symbolic(__entry->domain, XFS_REFC_DOMAIN_STRINGS),
__entry->startblock,
@@ -3393,49 +3405,53 @@ DEFINE_EVENT(xfs_refcount_extent_class, name, \
/* single-rcext and an agbno tracepoint class */
DECLARE_EVENT_CLASS(xfs_refcount_extent_at_class,
TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *irec,
- xfs_agblock_t agbno),
- TP_ARGS(cur, irec, agbno),
+ xfs_agblock_t gbno),
+ TP_ARGS(cur, irec, gbno),
TP_STRUCT__entry(
__field(dev_t, dev)
+ __field(enum xfs_group_type, type)
__field(xfs_agnumber_t, agno)
__field(enum xfs_refc_domain, domain)
__field(xfs_agblock_t, startblock)
__field(xfs_extlen_t, blockcount)
__field(xfs_nlink_t, refcount)
- __field(xfs_agblock_t, agbno)
+ __field(xfs_agblock_t, gbno)
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
+ __entry->type = cur->bc_group->xg_type;
__entry->agno = cur->bc_group->xg_gno;
__entry->domain = irec->rc_domain;
__entry->startblock = irec->rc_startblock;
__entry->blockcount = irec->rc_blockcount;
__entry->refcount = irec->rc_refcount;
- __entry->agbno = agbno;
+ __entry->gbno = gbno;
),
- TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u @ agbno 0x%x",
+ TP_printk("dev %d:%d %sno 0x%x dom %s gbno 0x%x fsbcount 0x%x refcount %u @ gbno 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->agno,
__print_symbolic(__entry->domain, XFS_REFC_DOMAIN_STRINGS),
__entry->startblock,
__entry->blockcount,
__entry->refcount,
- __entry->agbno)
+ __entry->gbno)
)
#define DEFINE_REFCOUNT_EXTENT_AT_EVENT(name) \
DEFINE_EVENT(xfs_refcount_extent_at_class, name, \
TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *irec, \
- xfs_agblock_t agbno), \
- TP_ARGS(cur, irec, agbno))
+ xfs_agblock_t gbno), \
+ TP_ARGS(cur, irec, gbno))
/* double-rcext tracepoint class */
DECLARE_EVENT_CLASS(xfs_refcount_double_extent_class,
TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1,
- struct xfs_refcount_irec *i2),
+ struct xfs_refcount_irec *i2),
TP_ARGS(cur, i1, i2),
TP_STRUCT__entry(
__field(dev_t, dev)
+ __field(enum xfs_group_type, type)
__field(xfs_agnumber_t, agno)
__field(enum xfs_refc_domain, i1_domain)
__field(xfs_agblock_t, i1_startblock)
@@ -3448,6 +3464,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_class,
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
+ __entry->type = cur->bc_group->xg_type;
__entry->agno = cur->bc_group->xg_gno;
__entry->i1_domain = i1->rc_domain;
__entry->i1_startblock = i1->rc_startblock;
@@ -3458,9 +3475,10 @@ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_class,
__entry->i2_blockcount = i2->rc_blockcount;
__entry->i2_refcount = i2->rc_refcount;
),
- TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u -- "
- "dom %s agbno 0x%x fsbcount 0x%x refcount %u",
+ TP_printk("dev %d:%d %sno 0x%x dom %s gbno 0x%x fsbcount 0x%x refcount %u -- "
+ "dom %s gbno 0x%x fsbcount 0x%x refcount %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->agno,
__print_symbolic(__entry->i1_domain, XFS_REFC_DOMAIN_STRINGS),
__entry->i1_startblock,
@@ -3481,10 +3499,11 @@ DEFINE_EVENT(xfs_refcount_double_extent_class, name, \
/* double-rcext and an agbno tracepoint class */
DECLARE_EVENT_CLASS(xfs_refcount_double_extent_at_class,
TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1,
- struct xfs_refcount_irec *i2, xfs_agblock_t agbno),
- TP_ARGS(cur, i1, i2, agbno),
+ struct xfs_refcount_irec *i2, xfs_agblock_t gbno),
+ TP_ARGS(cur, i1, i2, gbno),
TP_STRUCT__entry(
__field(dev_t, dev)
+ __field(enum xfs_group_type, type)
__field(xfs_agnumber_t, agno)
__field(enum xfs_refc_domain, i1_domain)
__field(xfs_agblock_t, i1_startblock)
@@ -3494,10 +3513,11 @@ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_at_class,
__field(xfs_agblock_t, i2_startblock)
__field(xfs_extlen_t, i2_blockcount)
__field(xfs_nlink_t, i2_refcount)
- __field(xfs_agblock_t, agbno)
+ __field(xfs_agblock_t, gbno)
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
+ __entry->type = cur->bc_group->xg_type;
__entry->agno = cur->bc_group->xg_gno;
__entry->i1_domain = i1->rc_domain;
__entry->i1_startblock = i1->rc_startblock;
@@ -3507,11 +3527,12 @@ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_at_class,
__entry->i2_startblock = i2->rc_startblock;
__entry->i2_blockcount = i2->rc_blockcount;
__entry->i2_refcount = i2->rc_refcount;
- __entry->agbno = agbno;
+ __entry->gbno = gbno;
),
- TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u -- "
- "dom %s agbno 0x%x fsbcount 0x%x refcount %u @ agbno 0x%x",
+ TP_printk("dev %d:%d %sno 0x%x dom %s gbno 0x%x fsbcount 0x%x refcount %u -- "
+ "dom %s gbno 0x%x fsbcount 0x%x refcount %u @ gbno 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->agno,
__print_symbolic(__entry->i1_domain, XFS_REFC_DOMAIN_STRINGS),
__entry->i1_startblock,
@@ -3521,14 +3542,14 @@ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_at_class,
__entry->i2_startblock,
__entry->i2_blockcount,
__entry->i2_refcount,
- __entry->agbno)
+ __entry->gbno)
)
#define DEFINE_REFCOUNT_DOUBLE_EXTENT_AT_EVENT(name) \
DEFINE_EVENT(xfs_refcount_double_extent_at_class, name, \
TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1, \
- struct xfs_refcount_irec *i2, xfs_agblock_t agbno), \
- TP_ARGS(cur, i1, i2, agbno))
+ struct xfs_refcount_irec *i2, xfs_agblock_t gbno), \
+ TP_ARGS(cur, i1, i2, gbno))
/* triple-rcext tracepoint class */
DECLARE_EVENT_CLASS(xfs_refcount_triple_extent_class,
@@ -3537,6 +3558,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_triple_extent_class,
TP_ARGS(cur, i1, i2, i3),
TP_STRUCT__entry(
__field(dev_t, dev)
+ __field(enum xfs_group_type, type)
__field(xfs_agnumber_t, agno)
__field(enum xfs_refc_domain, i1_domain)
__field(xfs_agblock_t, i1_startblock)
@@ -3553,6 +3575,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_triple_extent_class,
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
+ __entry->type = cur->bc_group->xg_type;
__entry->agno = cur->bc_group->xg_gno;
__entry->i1_domain = i1->rc_domain;
__entry->i1_startblock = i1->rc_startblock;
@@ -3567,10 +3590,11 @@ DECLARE_EVENT_CLASS(xfs_refcount_triple_extent_class,
__entry->i3_blockcount = i3->rc_blockcount;
__entry->i3_refcount = i3->rc_refcount;
),
- TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u -- "
- "dom %s agbno 0x%x fsbcount 0x%x refcount %u -- "
- "dom %s agbno 0x%x fsbcount 0x%x refcount %u",
+ TP_printk("dev %d:%d %sno 0x%x dom %s gbno 0x%x fsbcount 0x%x refcount %u -- "
+ "dom %s gbno 0x%x fsbcount 0x%x refcount %u -- "
+ "dom %s gbno 0x%x fsbcount 0x%x refcount %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->agno,
__print_symbolic(__entry->i1_domain, XFS_REFC_DOMAIN_STRINGS),
__entry->i1_startblock,
@@ -3638,23 +3662,27 @@ DECLARE_EVENT_CLASS(xfs_refcount_deferred_class,
TP_ARGS(mp, refc),
TP_STRUCT__entry(
__field(dev_t, dev)
+ __field(enum xfs_group_type, type)
__field(xfs_agnumber_t, agno)
__field(int, op)
- __field(xfs_agblock_t, agbno)
+ __field(xfs_agblock_t, gbno)
__field(xfs_extlen_t, len)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
- __entry->agno = XFS_FSB_TO_AGNO(mp, refc->ri_startblock);
+ __entry->type = refc->ri_group->xg_type;
+ __entry->agno = refc->ri_group->xg_gno;
__entry->op = refc->ri_type;
- __entry->agbno = XFS_FSB_TO_AGBNO(mp, refc->ri_startblock);
+ __entry->gbno = xfs_fsb_to_gbno(mp, refc->ri_startblock,
+ refc->ri_group->xg_type);
__entry->len = refc->ri_blockcount;
),
- TP_printk("dev %d:%d op %s agno 0x%x agbno 0x%x fsbcount 0x%x",
+ TP_printk("dev %d:%d op %s %sno 0x%x gbno 0x%x fsbcount 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_symbolic(__entry->op, XFS_REFCOUNT_INTENT_STRINGS),
+ __print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->agno,
- __entry->agbno,
+ __entry->gbno,
__entry->len)
);
#define DEFINE_REFCOUNT_DEFERRED_EVENT(name) \
@@ -3993,7 +4021,7 @@ TRACE_EVENT(xfs_fsmap_mapping,
__entry->offset = frec->offset;
__entry->flags = frec->rm_flags;
),
- TP_printk("dev %d:%d keydev %d:%d agno 0x%x rmapbno 0x%x start_daddr 0x%llx len_daddr 0x%llx owner 0x%llx fileoff 0x%llx flags 0x%x",
+ TP_printk("dev %d:%d keydev %d:%d agno 0x%x gbno 0x%x start_daddr 0x%llx len_daddr 0x%llx owner 0x%llx fileoff 0x%llx flags 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
MAJOR(__entry->keydev), MINOR(__entry->keydev),
__entry->agno,
@@ -4950,7 +4978,7 @@ DECLARE_EVENT_CLASS(xfbtree_buf_class,
__entry->xfino = file_inode(xfbt->target->bt_file)->i_ino;
__entry->bno = xfs_buf_daddr(bp);
__entry->nblks = bp->b_length;
- __entry->hold = atomic_read(&bp->b_hold);
+ __entry->hold = bp->b_hold;
__entry->pincount = atomic_read(&bp->b_pin_count);
__entry->lockval = bp->b_sema.count;
__entry->flags = bp->b_flags;
@@ -5574,6 +5602,72 @@ DEFINE_EVENT(xfs_metadir_class, name, \
TP_ARGS(dp, name, ino))
DEFINE_METADIR_EVENT(xfs_metadir_lookup);
+/* metadata inode space reservations */
+
+DECLARE_EVENT_CLASS(xfs_metafile_resv_class,
+ TP_PROTO(struct xfs_inode *ip, xfs_filblks_t len),
+ TP_ARGS(ip, len),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(unsigned long long, freeblks)
+ __field(unsigned long long, reserved)
+ __field(unsigned long long, asked)
+ __field(unsigned long long, used)
+ __field(unsigned long long, len)
+ ),
+ TP_fast_assign(
+ struct xfs_mount *mp = ip->i_mount;
+
+ __entry->dev = mp->m_super->s_dev;
+ __entry->ino = ip->i_ino;
+ __entry->freeblks = percpu_counter_sum(&mp->m_fdblocks);
+ __entry->reserved = ip->i_delayed_blks;
+ __entry->asked = ip->i_meta_resv_asked;
+ __entry->used = ip->i_nblocks;
+ __entry->len = len;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx freeblks %llu resv %llu ask %llu used %llu len %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->freeblks,
+ __entry->reserved,
+ __entry->asked,
+ __entry->used,
+ __entry->len)
+)
+#define DEFINE_METAFILE_RESV_EVENT(name) \
+DEFINE_EVENT(xfs_metafile_resv_class, name, \
+ TP_PROTO(struct xfs_inode *ip, xfs_filblks_t len), \
+ TP_ARGS(ip, len))
+DEFINE_METAFILE_RESV_EVENT(xfs_metafile_resv_init);
+DEFINE_METAFILE_RESV_EVENT(xfs_metafile_resv_free);
+DEFINE_METAFILE_RESV_EVENT(xfs_metafile_resv_alloc_space);
+DEFINE_METAFILE_RESV_EVENT(xfs_metafile_resv_free_space);
+DEFINE_METAFILE_RESV_EVENT(xfs_metafile_resv_critical);
+DEFINE_INODE_ERROR_EVENT(xfs_metafile_resv_init_error);
+
+#ifdef CONFIG_XFS_RT
+TRACE_EVENT(xfs_growfs_check_rtgeom,
+ TP_PROTO(const struct xfs_mount *mp, unsigned int min_logfsbs),
+ TP_ARGS(mp, min_logfsbs),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned int, logblocks)
+ __field(unsigned int, min_logfsbs)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->logblocks = mp->m_sb.sb_logblocks;
+ __entry->min_logfsbs = min_logfsbs;
+ ),
+ TP_printk("dev %d:%d logblocks %u min_logfsbs %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->logblocks,
+ __entry->min_logfsbs)
+);
+#endif /* CONFIG_XFS_RT */
+
#endif /* _TRACE_XFS_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 4cd25717c9d1..c6657072361a 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -100,7 +100,6 @@ xfs_trans_dup(
/*
* Initialize the new transaction structure.
*/
- ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
ntp->t_mountp = tp->t_mountp;
INIT_LIST_HEAD(&ntp->t_items);
INIT_LIST_HEAD(&ntp->t_busy);
@@ -275,7 +274,6 @@ retry:
ASSERT(!(flags & XFS_TRANS_RES_FDBLKS) ||
xfs_has_lazysbcount(mp));
- tp->t_magic = XFS_TRANS_HEADER_MAGIC;
tp->t_flags = flags;
tp->t_mountp = mp;
INIT_LIST_HEAD(&tp->t_items);
@@ -1266,6 +1264,9 @@ retry:
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+ if (xfs_is_metadir_inode(ip))
+ goto out;
+
error = xfs_qm_dqattach_locked(ip, false);
if (error) {
/* Caller should have allocated the dquots! */
@@ -1334,6 +1335,7 @@ retry:
goto out_cancel;
}
+out:
*tpp = tp;
return 0;
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 71c2e82e4dad..2b366851e9a4 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -122,7 +122,6 @@ void xfs_log_item_init(struct xfs_mount *mp, struct xfs_log_item *item,
* This is the structure maintained for every active transaction.
*/
typedef struct xfs_trans {
- unsigned int t_magic; /* magic number */
unsigned int t_log_res; /* amt of log space resvd */
unsigned int t_log_count; /* count for perm log res */
unsigned int t_blk_res; /* # of blocks resvd */
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index f56d62dced97..0fcb1828e598 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -359,13 +359,8 @@ xfsaild_resubmit_item(
}
/* protected by ail_lock */
- list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
- if (bp->b_flags & (_XBF_INODES | _XBF_DQUOTS))
- clear_bit(XFS_LI_FAILED, &lip->li_flags);
- else
- xfs_clear_li_failed(lip);
- }
-
+ list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
+ clear_bit(XFS_LI_FAILED, &lip->li_flags);
xfs_buf_unlock(bp);
return XFS_ITEM_SUCCESS;
}
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 8e886ecfd69a..53af546c0b23 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -659,7 +659,7 @@ xfs_trans_inode_buf(
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_flags |= XFS_BLI_INODE_BUF;
- bp->b_flags |= _XBF_INODES;
+ bp->b_iodone = xfs_buf_inode_iodone;
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
}
@@ -684,7 +684,7 @@ xfs_trans_stale_inode_buf(
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_flags |= XFS_BLI_STALE_INODE;
- bp->b_flags |= _XBF_INODES;
+ bp->b_iodone = xfs_buf_inode_iodone;
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
}
@@ -709,7 +709,7 @@ xfs_trans_inode_alloc_buf(
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF;
- bp->b_flags |= _XBF_INODES;
+ bp->b_iodone = xfs_buf_inode_iodone;
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
}
@@ -820,6 +820,6 @@ xfs_trans_dquot_buf(
break;
}
- bp->b_flags |= _XBF_DQUOTS;
+ bp->b_iodone = xfs_buf_dquot_iodone;
xfs_trans_buf_set_type(tp, bp, type);
}
diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
index 713b6d243e56..765456bf3428 100644
--- a/fs/xfs/xfs_trans_dquot.c
+++ b/fs/xfs/xfs_trans_dquot.c
@@ -156,7 +156,8 @@ xfs_trans_mod_ino_dquot(
unsigned int field,
int64_t delta)
{
- ASSERT(!xfs_is_metadir_inode(ip) || XFS_IS_DQDETACHED(ip));
+ if (xfs_is_metadir_inode(ip))
+ return;
xfs_trans_mod_dquot(tp, dqp, field, delta);
@@ -246,11 +247,10 @@ xfs_trans_mod_dquot_byino(
xfs_mount_t *mp = tp->t_mountp;
if (!XFS_IS_QUOTA_ON(mp) ||
- xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
+ xfs_is_quota_inode(&mp->m_sb, ip->i_ino) ||
+ xfs_is_metadir_inode(ip))
return;
- ASSERT(!xfs_is_metadir_inode(ip) || XFS_IS_DQDETACHED(ip));
-
if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot)
xfs_trans_mod_ino_dquot(tp, ip, ip->i_udquot, field, delta);
if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot)
diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h
index b5f594754a9e..99b960bd473c 100644
--- a/include/acpi/acpi_numa.h
+++ b/include/acpi/acpi_numa.h
@@ -17,11 +17,16 @@ extern int node_to_pxm(int);
extern int acpi_map_pxm_to_node(int);
extern unsigned char acpi_srat_revision;
extern void disable_srat(void);
+extern int fix_pxm_node_maps(int max_nid);
extern void bad_srat(void);
extern int srat_disabled(void);
#else /* CONFIG_ACPI_NUMA */
+static inline int fix_pxm_node_maps(int max_nid)
+{
+ return 0;
+}
static inline void disable_srat(void)
{
}
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index d076ebd19a61..78b24b090488 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -763,6 +763,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
*event_status))
ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u32 gpe_number))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_hw_disable_all_gpes(void))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_hw_enable_all_wakeup_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h
index 9d0479f50f97..5db59a1efb65 100644
--- a/include/asm-generic/early_ioremap.h
+++ b/include/asm-generic/early_ioremap.h
@@ -35,7 +35,7 @@ extern void early_ioremap_reset(void);
/*
* Early copy from unmapped memory to kernel mapped memory.
*/
-extern void copy_from_early_mem(void *dest, phys_addr_t src,
+extern int copy_from_early_mem(void *dest, phys_addr_t src,
unsigned long size);
#else
diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h
deleted file mode 100644
index 814207e7c37f..000000000000
--- a/include/asm-generic/hyperv-tlfs.h
+++ /dev/null
@@ -1,874 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-/*
- * This file contains definitions from Hyper-V Hypervisor Top-Level Functional
- * Specification (TLFS):
- * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
- */
-
-#ifndef _ASM_GENERIC_HYPERV_TLFS_H
-#define _ASM_GENERIC_HYPERV_TLFS_H
-
-#include <linux/types.h>
-#include <linux/bits.h>
-#include <linux/time64.h>
-
-/*
- * While not explicitly listed in the TLFS, Hyper-V always runs with a page size
- * of 4096. These definitions are used when communicating with Hyper-V using
- * guest physical pages and guest physical page addresses, since the guest page
- * size may not be 4096 on all architectures.
- */
-#define HV_HYP_PAGE_SHIFT 12
-#define HV_HYP_PAGE_SIZE BIT(HV_HYP_PAGE_SHIFT)
-#define HV_HYP_PAGE_MASK (~(HV_HYP_PAGE_SIZE - 1))
-
-/*
- * Hyper-V provides two categories of flags relevant to guest VMs. The
- * "Features" category indicates specific functionality that is available
- * to guests on this particular instance of Hyper-V. The "Features"
- * are presented in four groups, each of which is 32 bits. The group A
- * and B definitions are common across architectures and are listed here.
- * However, not all flags are relevant on all architectures.
- *
- * Groups C and D vary across architectures and are listed in the
- * architecture specific portion of hyperv-tlfs.h. Some of these flags exist
- * on multiple architectures, but the bit positions are different so they
- * cannot appear in the generic portion of hyperv-tlfs.h.
- *
- * The "Enlightenments" category provides recommendations on whether to use
- * specific enlightenments that are available. The Enlighenments are a single
- * group of 32 bits, but they vary across architectures and are listed in
- * the architecture specific portion of hyperv-tlfs.h.
- */
-
-/*
- * Group A Features.
- */
-
-/* VP Runtime register available */
-#define HV_MSR_VP_RUNTIME_AVAILABLE BIT(0)
-/* Partition Reference Counter available*/
-#define HV_MSR_TIME_REF_COUNT_AVAILABLE BIT(1)
-/* Basic SynIC register available */
-#define HV_MSR_SYNIC_AVAILABLE BIT(2)
-/* Synthetic Timer registers available */
-#define HV_MSR_SYNTIMER_AVAILABLE BIT(3)
-/* Virtual APIC assist and VP assist page registers available */
-#define HV_MSR_APIC_ACCESS_AVAILABLE BIT(4)
-/* Hypercall and Guest OS ID registers available*/
-#define HV_MSR_HYPERCALL_AVAILABLE BIT(5)
-/* Access virtual processor index register available*/
-#define HV_MSR_VP_INDEX_AVAILABLE BIT(6)
-/* Virtual system reset register available*/
-#define HV_MSR_RESET_AVAILABLE BIT(7)
-/* Access statistics page registers available */
-#define HV_MSR_STAT_PAGES_AVAILABLE BIT(8)
-/* Partition reference TSC register is available */
-#define HV_MSR_REFERENCE_TSC_AVAILABLE BIT(9)
-/* Partition Guest IDLE register is available */
-#define HV_MSR_GUEST_IDLE_AVAILABLE BIT(10)
-/* Partition local APIC and TSC frequency registers available */
-#define HV_ACCESS_FREQUENCY_MSRS BIT(11)
-/* AccessReenlightenmentControls privilege */
-#define HV_ACCESS_REENLIGHTENMENT BIT(13)
-/* AccessTscInvariantControls privilege */
-#define HV_ACCESS_TSC_INVARIANT BIT(15)
-
-/*
- * Group B features.
- */
-#define HV_CREATE_PARTITIONS BIT(0)
-#define HV_ACCESS_PARTITION_ID BIT(1)
-#define HV_ACCESS_MEMORY_POOL BIT(2)
-#define HV_ADJUST_MESSAGE_BUFFERS BIT(3)
-#define HV_POST_MESSAGES BIT(4)
-#define HV_SIGNAL_EVENTS BIT(5)
-#define HV_CREATE_PORT BIT(6)
-#define HV_CONNECT_PORT BIT(7)
-#define HV_ACCESS_STATS BIT(8)
-#define HV_DEBUGGING BIT(11)
-#define HV_CPU_MANAGEMENT BIT(12)
-#define HV_ENABLE_EXTENDED_HYPERCALLS BIT(20)
-#define HV_ISOLATION BIT(22)
-
-/*
- * TSC page layout.
- */
-struct ms_hyperv_tsc_page {
- volatile u32 tsc_sequence;
- u32 reserved1;
- volatile u64 tsc_scale;
- volatile s64 tsc_offset;
-} __packed;
-
-union hv_reference_tsc_msr {
- u64 as_uint64;
- struct {
- u64 enable:1;
- u64 reserved:11;
- u64 pfn:52;
- } __packed;
-};
-
-/*
- * The guest OS needs to register the guest ID with the hypervisor.
- * The guest ID is a 64 bit entity and the structure of this ID is
- * specified in the Hyper-V specification:
- *
- * msdn.microsoft.com/en-us/library/windows/hardware/ff542653%28v=vs.85%29.aspx
- *
- * While the current guideline does not specify how Linux guest ID(s)
- * need to be generated, our plan is to publish the guidelines for
- * Linux and other guest operating systems that currently are hosted
- * on Hyper-V. The implementation here conforms to this yet
- * unpublished guidelines.
- *
- *
- * Bit(s)
- * 63 - Indicates if the OS is Open Source or not; 1 is Open Source
- * 62:56 - Os Type; Linux is 0x100
- * 55:48 - Distro specific identification
- * 47:16 - Linux kernel version number
- * 15:0 - Distro specific identification
- *
- *
- */
-
-#define HV_LINUX_VENDOR_ID 0x8100
-
-/*
- * Crash notification flags.
- */
-#define HV_CRASH_CTL_CRASH_NOTIFY_MSG BIT_ULL(62)
-#define HV_CRASH_CTL_CRASH_NOTIFY BIT_ULL(63)
-
-/* Declare the various hypercall operations. */
-#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002
-#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003
-#define HVCALL_ENABLE_VP_VTL 0x000f
-#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008
-#define HVCALL_SEND_IPI 0x000b
-#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013
-#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014
-#define HVCALL_SEND_IPI_EX 0x0015
-#define HVCALL_GET_PARTITION_ID 0x0046
-#define HVCALL_DEPOSIT_MEMORY 0x0048
-#define HVCALL_CREATE_VP 0x004e
-#define HVCALL_GET_VP_REGISTERS 0x0050
-#define HVCALL_SET_VP_REGISTERS 0x0051
-#define HVCALL_POST_MESSAGE 0x005c
-#define HVCALL_SIGNAL_EVENT 0x005d
-#define HVCALL_POST_DEBUG_DATA 0x0069
-#define HVCALL_RETRIEVE_DEBUG_DATA 0x006a
-#define HVCALL_RESET_DEBUG_SESSION 0x006b
-#define HVCALL_ADD_LOGICAL_PROCESSOR 0x0076
-#define HVCALL_MAP_DEVICE_INTERRUPT 0x007c
-#define HVCALL_UNMAP_DEVICE_INTERRUPT 0x007d
-#define HVCALL_RETARGET_INTERRUPT 0x007e
-#define HVCALL_START_VP 0x0099
-#define HVCALL_GET_VP_ID_FROM_APIC_ID 0x009a
-#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af
-#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0
-#define HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY 0x00db
-#define HVCALL_MMIO_READ 0x0106
-#define HVCALL_MMIO_WRITE 0x0107
-
-/* Extended hypercalls */
-#define HV_EXT_CALL_QUERY_CAPABILITIES 0x8001
-#define HV_EXT_CALL_MEMORY_HEAT_HINT 0x8003
-
-#define HV_FLUSH_ALL_PROCESSORS BIT(0)
-#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1)
-#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2)
-#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3)
-
-/* Extended capability bits */
-#define HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT BIT(8)
-
-enum HV_GENERIC_SET_FORMAT {
- HV_GENERIC_SET_SPARSE_4K,
- HV_GENERIC_SET_ALL,
-};
-
-#define HV_PARTITION_ID_SELF ((u64)-1)
-#define HV_VP_INDEX_SELF ((u32)-2)
-
-#define HV_HYPERCALL_RESULT_MASK GENMASK_ULL(15, 0)
-#define HV_HYPERCALL_FAST_BIT BIT(16)
-#define HV_HYPERCALL_VARHEAD_OFFSET 17
-#define HV_HYPERCALL_VARHEAD_MASK GENMASK_ULL(26, 17)
-#define HV_HYPERCALL_RSVD0_MASK GENMASK_ULL(31, 27)
-#define HV_HYPERCALL_NESTED BIT_ULL(31)
-#define HV_HYPERCALL_REP_COMP_OFFSET 32
-#define HV_HYPERCALL_REP_COMP_1 BIT_ULL(32)
-#define HV_HYPERCALL_REP_COMP_MASK GENMASK_ULL(43, 32)
-#define HV_HYPERCALL_RSVD1_MASK GENMASK_ULL(47, 44)
-#define HV_HYPERCALL_REP_START_OFFSET 48
-#define HV_HYPERCALL_REP_START_MASK GENMASK_ULL(59, 48)
-#define HV_HYPERCALL_RSVD2_MASK GENMASK_ULL(63, 60)
-#define HV_HYPERCALL_RSVD_MASK (HV_HYPERCALL_RSVD0_MASK | \
- HV_HYPERCALL_RSVD1_MASK | \
- HV_HYPERCALL_RSVD2_MASK)
-
-/* hypercall status code */
-#define HV_STATUS_SUCCESS 0
-#define HV_STATUS_INVALID_HYPERCALL_CODE 2
-#define HV_STATUS_INVALID_HYPERCALL_INPUT 3
-#define HV_STATUS_INVALID_ALIGNMENT 4
-#define HV_STATUS_INVALID_PARAMETER 5
-#define HV_STATUS_ACCESS_DENIED 6
-#define HV_STATUS_OPERATION_DENIED 8
-#define HV_STATUS_INSUFFICIENT_MEMORY 11
-#define HV_STATUS_INVALID_PORT_ID 17
-#define HV_STATUS_INVALID_CONNECTION_ID 18
-#define HV_STATUS_INSUFFICIENT_BUFFERS 19
-#define HV_STATUS_TIME_OUT 120
-#define HV_STATUS_VTL_ALREADY_ENABLED 134
-
-/*
- * The Hyper-V TimeRefCount register and the TSC
- * page provide a guest VM clock with 100ns tick rate
- */
-#define HV_CLOCK_HZ (NSEC_PER_SEC/100)
-
-/* Define the number of synthetic interrupt sources. */
-#define HV_SYNIC_SINT_COUNT (16)
-/* Define the expected SynIC version. */
-#define HV_SYNIC_VERSION_1 (0x1)
-/* Valid SynIC vectors are 16-255. */
-#define HV_SYNIC_FIRST_VALID_VECTOR (16)
-
-#define HV_SYNIC_CONTROL_ENABLE (1ULL << 0)
-#define HV_SYNIC_SIMP_ENABLE (1ULL << 0)
-#define HV_SYNIC_SIEFP_ENABLE (1ULL << 0)
-#define HV_SYNIC_SINT_MASKED (1ULL << 16)
-#define HV_SYNIC_SINT_AUTO_EOI (1ULL << 17)
-#define HV_SYNIC_SINT_VECTOR_MASK (0xFF)
-
-#define HV_SYNIC_STIMER_COUNT (4)
-
-/* Define synthetic interrupt controller message constants. */
-#define HV_MESSAGE_SIZE (256)
-#define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240)
-#define HV_MESSAGE_PAYLOAD_QWORD_COUNT (30)
-
-/*
- * Define hypervisor message types. Some of the message types
- * are x86/x64 specific, but there's no good way to separate
- * them out into the arch-specific version of hyperv-tlfs.h
- * because C doesn't provide a way to extend enum types.
- * Keeping them all in the arch neutral hyperv-tlfs.h seems
- * the least messy compromise.
- */
-enum hv_message_type {
- HVMSG_NONE = 0x00000000,
-
- /* Memory access messages. */
- HVMSG_UNMAPPED_GPA = 0x80000000,
- HVMSG_GPA_INTERCEPT = 0x80000001,
-
- /* Timer notification messages. */
- HVMSG_TIMER_EXPIRED = 0x80000010,
-
- /* Error messages. */
- HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020,
- HVMSG_UNRECOVERABLE_EXCEPTION = 0x80000021,
- HVMSG_UNSUPPORTED_FEATURE = 0x80000022,
-
- /* Trace buffer complete messages. */
- HVMSG_EVENTLOG_BUFFERCOMPLETE = 0x80000040,
-
- /* Platform-specific processor intercept messages. */
- HVMSG_X64_IOPORT_INTERCEPT = 0x80010000,
- HVMSG_X64_MSR_INTERCEPT = 0x80010001,
- HVMSG_X64_CPUID_INTERCEPT = 0x80010002,
- HVMSG_X64_EXCEPTION_INTERCEPT = 0x80010003,
- HVMSG_X64_APIC_EOI = 0x80010004,
- HVMSG_X64_LEGACY_FP_ERROR = 0x80010005
-};
-
-/* Define synthetic interrupt controller message flags. */
-union hv_message_flags {
- __u8 asu8;
- struct {
- __u8 msg_pending:1;
- __u8 reserved:7;
- } __packed;
-};
-
-/* Define port identifier type. */
-union hv_port_id {
- __u32 asu32;
- struct {
- __u32 id:24;
- __u32 reserved:8;
- } __packed u;
-};
-
-/* Define synthetic interrupt controller message header. */
-struct hv_message_header {
- __u32 message_type;
- __u8 payload_size;
- union hv_message_flags message_flags;
- __u8 reserved[2];
- union {
- __u64 sender;
- union hv_port_id port;
- };
-} __packed;
-
-/* Define synthetic interrupt controller message format. */
-struct hv_message {
- struct hv_message_header header;
- union {
- __u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
- } u;
-} __packed;
-
-/* Define the synthetic interrupt message page layout. */
-struct hv_message_page {
- struct hv_message sint_message[HV_SYNIC_SINT_COUNT];
-} __packed;
-
-/* Define timer message payload structure. */
-struct hv_timer_message_payload {
- __u32 timer_index;
- __u32 reserved;
- __u64 expiration_time; /* When the timer expired */
- __u64 delivery_time; /* When the message was delivered */
-} __packed;
-
-
-/* Define synthetic interrupt controller flag constants. */
-#define HV_EVENT_FLAGS_COUNT (256 * 8)
-#define HV_EVENT_FLAGS_LONG_COUNT (256 / sizeof(unsigned long))
-
-/*
- * Synthetic timer configuration.
- */
-union hv_stimer_config {
- u64 as_uint64;
- struct {
- u64 enable:1;
- u64 periodic:1;
- u64 lazy:1;
- u64 auto_enable:1;
- u64 apic_vector:8;
- u64 direct_mode:1;
- u64 reserved_z0:3;
- u64 sintx:4;
- u64 reserved_z1:44;
- } __packed;
-};
-
-
-/* Define the synthetic interrupt controller event flags format. */
-union hv_synic_event_flags {
- unsigned long flags[HV_EVENT_FLAGS_LONG_COUNT];
-};
-
-/* Define SynIC control register. */
-union hv_synic_scontrol {
- u64 as_uint64;
- struct {
- u64 enable:1;
- u64 reserved:63;
- } __packed;
-};
-
-/* Define synthetic interrupt source. */
-union hv_synic_sint {
- u64 as_uint64;
- struct {
- u64 vector:8;
- u64 reserved1:8;
- u64 masked:1;
- u64 auto_eoi:1;
- u64 polling:1;
- u64 reserved2:45;
- } __packed;
-};
-
-/* Define the format of the SIMP register */
-union hv_synic_simp {
- u64 as_uint64;
- struct {
- u64 simp_enabled:1;
- u64 preserved:11;
- u64 base_simp_gpa:52;
- } __packed;
-};
-
-/* Define the format of the SIEFP register */
-union hv_synic_siefp {
- u64 as_uint64;
- struct {
- u64 siefp_enabled:1;
- u64 preserved:11;
- u64 base_siefp_gpa:52;
- } __packed;
-};
-
-struct hv_vpset {
- u64 format;
- u64 valid_bank_mask;
- u64 bank_contents[];
-} __packed;
-
-/* The maximum number of sparse vCPU banks which can be encoded by 'struct hv_vpset' */
-#define HV_MAX_SPARSE_VCPU_BANKS (64)
-/* The number of vCPUs in one sparse bank */
-#define HV_VCPUS_PER_SPARSE_BANK (64)
-
-/* HvCallSendSyntheticClusterIpi hypercall */
-struct hv_send_ipi {
- u32 vector;
- u32 reserved;
- u64 cpu_mask;
-} __packed;
-
-/* HvCallSendSyntheticClusterIpiEx hypercall */
-struct hv_send_ipi_ex {
- u32 vector;
- u32 reserved;
- struct hv_vpset vp_set;
-} __packed;
-
-/* HvFlushGuestPhysicalAddressSpace hypercalls */
-struct hv_guest_mapping_flush {
- u64 address_space;
- u64 flags;
-} __packed;
-
-/*
- * HV_MAX_FLUSH_PAGES = "additional_pages" + 1. It's limited
- * by the bitwidth of "additional_pages" in union hv_gpa_page_range.
- */
-#define HV_MAX_FLUSH_PAGES (2048)
-#define HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB 0
-#define HV_GPA_PAGE_RANGE_PAGE_SIZE_1GB 1
-
-/* HvFlushGuestPhysicalAddressList, HvExtCallMemoryHeatHint hypercall */
-union hv_gpa_page_range {
- u64 address_space;
- struct {
- u64 additional_pages:11;
- u64 largepage:1;
- u64 basepfn:52;
- } page;
- struct {
- u64 reserved:12;
- u64 page_size:1;
- u64 reserved1:8;
- u64 base_large_pfn:43;
- };
-};
-
-/*
- * All input flush parameters should be in single page. The max flush
- * count is equal with how many entries of union hv_gpa_page_range can
- * be populated into the input parameter page.
- */
-#define HV_MAX_FLUSH_REP_COUNT ((HV_HYP_PAGE_SIZE - 2 * sizeof(u64)) / \
- sizeof(union hv_gpa_page_range))
-
-struct hv_guest_mapping_flush_list {
- u64 address_space;
- u64 flags;
- union hv_gpa_page_range gpa_list[HV_MAX_FLUSH_REP_COUNT];
-};
-
-/* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */
-struct hv_tlb_flush {
- u64 address_space;
- u64 flags;
- u64 processor_mask;
- u64 gva_list[];
-} __packed;
-
-/* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */
-struct hv_tlb_flush_ex {
- u64 address_space;
- u64 flags;
- struct hv_vpset hv_vp_set;
- u64 gva_list[];
-} __packed;
-
-/* HvGetPartitionId hypercall (output only) */
-struct hv_get_partition_id {
- u64 partition_id;
-} __packed;
-
-/* HvDepositMemory hypercall */
-struct hv_deposit_memory {
- u64 partition_id;
- u64 gpa_page_list[];
-} __packed;
-
-struct hv_proximity_domain_flags {
- u32 proximity_preferred : 1;
- u32 reserved : 30;
- u32 proximity_info_valid : 1;
-} __packed;
-
-struct hv_proximity_domain_info {
- u32 domain_id;
- struct hv_proximity_domain_flags flags;
-} __packed;
-
-struct hv_lp_startup_status {
- u64 hv_status;
- u64 substatus1;
- u64 substatus2;
- u64 substatus3;
- u64 substatus4;
- u64 substatus5;
- u64 substatus6;
-} __packed;
-
-/* HvAddLogicalProcessor hypercall */
-struct hv_input_add_logical_processor {
- u32 lp_index;
- u32 apic_id;
- struct hv_proximity_domain_info proximity_domain_info;
-} __packed;
-
-struct hv_output_add_logical_processor {
- struct hv_lp_startup_status startup_status;
-} __packed;
-
-enum HV_SUBNODE_TYPE
-{
- HvSubnodeAny = 0,
- HvSubnodeSocket = 1,
- HvSubnodeAmdNode = 2,
- HvSubnodeL3 = 3,
- HvSubnodeCount = 4,
- HvSubnodeInvalid = -1
-};
-
-/* HvCreateVp hypercall */
-struct hv_create_vp {
- u64 partition_id;
- u32 vp_index;
- u8 padding[3];
- u8 subnode_type;
- u64 subnode_id;
- struct hv_proximity_domain_info proximity_domain_info;
- u64 flags;
-} __packed;
-
-enum hv_interrupt_source {
- HV_INTERRUPT_SOURCE_MSI = 1, /* MSI and MSI-X */
- HV_INTERRUPT_SOURCE_IOAPIC,
-};
-
-union hv_ioapic_rte {
- u64 as_uint64;
-
- struct {
- u32 vector:8;
- u32 delivery_mode:3;
- u32 destination_mode:1;
- u32 delivery_status:1;
- u32 interrupt_polarity:1;
- u32 remote_irr:1;
- u32 trigger_mode:1;
- u32 interrupt_mask:1;
- u32 reserved1:15;
-
- u32 reserved2:24;
- u32 destination_id:8;
- };
-
- struct {
- u32 low_uint32;
- u32 high_uint32;
- };
-} __packed;
-
-struct hv_interrupt_entry {
- u32 source;
- u32 reserved1;
- union {
- union hv_msi_entry msi_entry;
- union hv_ioapic_rte ioapic_rte;
- };
-} __packed;
-
-/*
- * flags for hv_device_interrupt_target.flags
- */
-#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1
-#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2
-
-struct hv_device_interrupt_target {
- u32 vector;
- u32 flags;
- union {
- u64 vp_mask;
- struct hv_vpset vp_set;
- };
-} __packed;
-
-struct hv_retarget_device_interrupt {
- u64 partition_id; /* use "self" */
- u64 device_id;
- struct hv_interrupt_entry int_entry;
- u64 reserved2;
- struct hv_device_interrupt_target int_target;
-} __packed __aligned(8);
-
-/*
- * These Hyper-V registers provide information equivalent to the CPUID
- * instruction on x86/x64.
- */
-#define HV_REGISTER_HYPERVISOR_VERSION 0x00000100 /*CPUID 0x40000002 */
-#define HV_REGISTER_FEATURES 0x00000200 /*CPUID 0x40000003 */
-#define HV_REGISTER_ENLIGHTENMENTS 0x00000201 /*CPUID 0x40000004 */
-
-/*
- * Synthetic register definitions equivalent to MSRs on x86/x64
- */
-#define HV_REGISTER_GUEST_CRASH_P0 0x00000210
-#define HV_REGISTER_GUEST_CRASH_P1 0x00000211
-#define HV_REGISTER_GUEST_CRASH_P2 0x00000212
-#define HV_REGISTER_GUEST_CRASH_P3 0x00000213
-#define HV_REGISTER_GUEST_CRASH_P4 0x00000214
-#define HV_REGISTER_GUEST_CRASH_CTL 0x00000215
-
-#define HV_REGISTER_GUEST_OS_ID 0x00090002
-#define HV_REGISTER_VP_INDEX 0x00090003
-#define HV_REGISTER_TIME_REF_COUNT 0x00090004
-#define HV_REGISTER_REFERENCE_TSC 0x00090017
-
-#define HV_REGISTER_SINT0 0x000A0000
-#define HV_REGISTER_SCONTROL 0x000A0010
-#define HV_REGISTER_SIEFP 0x000A0012
-#define HV_REGISTER_SIMP 0x000A0013
-#define HV_REGISTER_EOM 0x000A0014
-
-#define HV_REGISTER_STIMER0_CONFIG 0x000B0000
-#define HV_REGISTER_STIMER0_COUNT 0x000B0001
-
-/* HvGetVpRegisters hypercall input with variable size reg name list*/
-struct hv_get_vp_registers_input {
- struct {
- u64 partitionid;
- u32 vpindex;
- u8 inputvtl;
- u8 padding[3];
- } header;
- struct input {
- u32 name0;
- u32 name1;
- } element[];
-} __packed;
-
-/* HvGetVpRegisters returns an array of these output elements */
-struct hv_get_vp_registers_output {
- union {
- struct {
- u32 a;
- u32 b;
- u32 c;
- u32 d;
- } as32 __packed;
- struct {
- u64 low;
- u64 high;
- } as64 __packed;
- };
-};
-
-/* HvSetVpRegisters hypercall with variable size reg name/value list*/
-struct hv_set_vp_registers_input {
- struct {
- u64 partitionid;
- u32 vpindex;
- u8 inputvtl;
- u8 padding[3];
- } header;
- struct {
- u32 name;
- u32 padding1;
- u64 padding2;
- u64 valuelow;
- u64 valuehigh;
- } element[];
-} __packed;
-
-enum hv_device_type {
- HV_DEVICE_TYPE_LOGICAL = 0,
- HV_DEVICE_TYPE_PCI = 1,
- HV_DEVICE_TYPE_IOAPIC = 2,
- HV_DEVICE_TYPE_ACPI = 3,
-};
-
-typedef u16 hv_pci_rid;
-typedef u16 hv_pci_segment;
-typedef u64 hv_logical_device_id;
-union hv_pci_bdf {
- u16 as_uint16;
-
- struct {
- u8 function:3;
- u8 device:5;
- u8 bus;
- };
-} __packed;
-
-union hv_pci_bus_range {
- u16 as_uint16;
-
- struct {
- u8 subordinate_bus;
- u8 secondary_bus;
- };
-} __packed;
-
-union hv_device_id {
- u64 as_uint64;
-
- struct {
- u64 reserved0:62;
- u64 device_type:2;
- };
-
- /* HV_DEVICE_TYPE_LOGICAL */
- struct {
- u64 id:62;
- u64 device_type:2;
- } logical;
-
- /* HV_DEVICE_TYPE_PCI */
- struct {
- union {
- hv_pci_rid rid;
- union hv_pci_bdf bdf;
- };
-
- hv_pci_segment segment;
- union hv_pci_bus_range shadow_bus_range;
-
- u16 phantom_function_bits:2;
- u16 source_shadow:1;
-
- u16 rsvdz0:11;
- u16 device_type:2;
- } pci;
-
- /* HV_DEVICE_TYPE_IOAPIC */
- struct {
- u8 ioapic_id;
- u8 rsvdz0;
- u16 rsvdz1;
- u16 rsvdz2;
-
- u16 rsvdz3:14;
- u16 device_type:2;
- } ioapic;
-
- /* HV_DEVICE_TYPE_ACPI */
- struct {
- u32 input_mapping_base;
- u32 input_mapping_count:30;
- u32 device_type:2;
- } acpi;
-} __packed;
-
-enum hv_interrupt_trigger_mode {
- HV_INTERRUPT_TRIGGER_MODE_EDGE = 0,
- HV_INTERRUPT_TRIGGER_MODE_LEVEL = 1,
-};
-
-struct hv_device_interrupt_descriptor {
- u32 interrupt_type;
- u32 trigger_mode;
- u32 vector_count;
- u32 reserved;
- struct hv_device_interrupt_target target;
-} __packed;
-
-struct hv_input_map_device_interrupt {
- u64 partition_id;
- u64 device_id;
- u64 flags;
- struct hv_interrupt_entry logical_interrupt_entry;
- struct hv_device_interrupt_descriptor interrupt_descriptor;
-} __packed;
-
-struct hv_output_map_device_interrupt {
- struct hv_interrupt_entry interrupt_entry;
-} __packed;
-
-struct hv_input_unmap_device_interrupt {
- u64 partition_id;
- u64 device_id;
- struct hv_interrupt_entry interrupt_entry;
-} __packed;
-
-#define HV_SOURCE_SHADOW_NONE 0x0
-#define HV_SOURCE_SHADOW_BRIDGE_BUS_RANGE 0x1
-
-/*
- * Version info reported by hypervisor
- */
-union hv_hypervisor_version_info {
- struct {
- u32 build_number;
-
- u32 minor_version : 16;
- u32 major_version : 16;
-
- u32 service_pack;
-
- u32 service_number : 24;
- u32 service_branch : 8;
- };
- struct {
- u32 eax;
- u32 ebx;
- u32 ecx;
- u32 edx;
- };
-};
-
-/*
- * The whole argument should fit in a page to be able to pass to the hypervisor
- * in one hypercall.
- */
-#define HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES \
- ((HV_HYP_PAGE_SIZE - sizeof(struct hv_memory_hint)) / \
- sizeof(union hv_gpa_page_range))
-
-/* HvExtCallMemoryHeatHint hypercall */
-#define HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD 2
-struct hv_memory_hint {
- u64 type:2;
- u64 reserved:62;
- union hv_gpa_page_range ranges[];
-} __packed;
-
-/* Data structures for HVCALL_MMIO_READ and HVCALL_MMIO_WRITE */
-#define HV_HYPERCALL_MMIO_MAX_DATA_LENGTH 64
-
-struct hv_mmio_read_input {
- u64 gpa;
- u32 size;
- u32 reserved;
-} __packed;
-
-struct hv_mmio_read_output {
- u8 data[HV_HYPERCALL_MMIO_MAX_DATA_LENGTH];
-} __packed;
-
-struct hv_mmio_write_input {
- u64 gpa;
- u32 size;
- u32 reserved;
- u8 data[HV_HYPERCALL_MMIO_MAX_DATA_LENGTH];
-} __packed;
-
-#endif
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
index 8fe7aaab2599..a7bbe504e4f3 100644
--- a/include/asm-generic/mshyperv.h
+++ b/include/asm-generic/mshyperv.h
@@ -6,9 +6,8 @@
* independent. See arch/<arch>/include/asm/mshyperv.h for definitions
* that are specific to architecture <arch>.
*
- * Definitions that are specified in the Hyper-V Top Level Functional
- * Spec (TLFS) should not go in this file, but should instead go in
- * hyperv-tlfs.h.
+ * Definitions that are derived from Hyper-V code or headers should not go in
+ * this file, but should instead go in the relevant files in include/hyperv.
*
* Copyright (C) 2019, Microsoft, Inc.
*
@@ -25,7 +24,7 @@
#include <linux/cpumask.h>
#include <linux/nmi.h>
#include <asm/ptrace.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#define VTPM_BASE_ADDRESS 0xfed40000
diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
index 7c48f5fbf8aa..892ece4558a2 100644
--- a/include/asm-generic/pgalloc.h
+++ b/include/asm-generic/pgalloc.h
@@ -109,8 +109,7 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
{
struct ptdesc *ptdesc = page_ptdesc(pte_page);
- pagetable_pte_dtor(ptdesc);
- pagetable_free(ptdesc);
+ pagetable_dtor_free(ptdesc);
}
@@ -153,8 +152,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
- pagetable_pmd_dtor(ptdesc);
- pagetable_free(ptdesc);
+ pagetable_dtor_free(ptdesc);
}
#endif
@@ -202,8 +200,7 @@ static inline void __pud_free(struct mm_struct *mm, pud_t *pud)
struct ptdesc *ptdesc = virt_to_ptdesc(pud);
BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
- pagetable_pud_dtor(ptdesc);
- pagetable_free(ptdesc);
+ pagetable_dtor_free(ptdesc);
}
#ifndef __HAVE_ARCH_PUD_FREE
@@ -215,10 +212,82 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
+#if CONFIG_PGTABLE_LEVELS > 4
+
+static inline p4d_t *__p4d_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
+{
+ gfp_t gfp = GFP_PGTABLE_USER;
+ struct ptdesc *ptdesc;
+
+ if (mm == &init_mm)
+ gfp = GFP_PGTABLE_KERNEL;
+ gfp &= ~__GFP_HIGHMEM;
+
+ ptdesc = pagetable_alloc_noprof(gfp, 0);
+ if (!ptdesc)
+ return NULL;
+
+ pagetable_p4d_ctor(ptdesc);
+ return ptdesc_address(ptdesc);
+}
+#define __p4d_alloc_one(...) alloc_hooks(__p4d_alloc_one_noprof(__VA_ARGS__))
+
+#ifndef __HAVE_ARCH_P4D_ALLOC_ONE
+static inline p4d_t *p4d_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
+{
+ return __p4d_alloc_one_noprof(mm, addr);
+}
+#define p4d_alloc_one(...) alloc_hooks(p4d_alloc_one_noprof(__VA_ARGS__))
+#endif
+
+static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d)
+{
+ struct ptdesc *ptdesc = virt_to_ptdesc(p4d);
+
+ BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
+ pagetable_dtor_free(ptdesc);
+}
+
+#ifndef __HAVE_ARCH_P4D_FREE
+static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
+{
+ if (!mm_p4d_folded(mm))
+ __p4d_free(mm, p4d);
+}
+#endif
+
+#endif /* CONFIG_PGTABLE_LEVELS > 4 */
+
+static inline pgd_t *__pgd_alloc_noprof(struct mm_struct *mm, unsigned int order)
+{
+ gfp_t gfp = GFP_PGTABLE_USER;
+ struct ptdesc *ptdesc;
+
+ if (mm == &init_mm)
+ gfp = GFP_PGTABLE_KERNEL;
+ gfp &= ~__GFP_HIGHMEM;
+
+ ptdesc = pagetable_alloc_noprof(gfp, order);
+ if (!ptdesc)
+ return NULL;
+
+ pagetable_pgd_ctor(ptdesc);
+ return ptdesc_address(ptdesc);
+}
+#define __pgd_alloc(...) alloc_hooks(__pgd_alloc_noprof(__VA_ARGS__))
+
+static inline void __pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ struct ptdesc *ptdesc = virt_to_ptdesc(pgd);
+
+ BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
+ pagetable_dtor_free(ptdesc);
+}
+
#ifndef __HAVE_ARCH_PGD_FREE
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- pagetable_free(virt_to_ptdesc(pgd));
+ __pgd_free(mm, pgd);
}
#endif
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h
index 5a80fe728dc8..182b039ce5fa 100644
--- a/include/asm-generic/syscall.h
+++ b/include/asm-generic/syscall.h
@@ -5,7 +5,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
*
* This file is a stub providing documentation for what functions
- * asm-ARCH/syscall.h files need to define. Most arch definitions
+ * arch/ARCH/include/asm/syscall.h files need to define. Most arch definitions
* will be simple inlines.
*
* All of these functions expect to be called with no locks,
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 709830274b75..e402aef79c93 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -153,8 +153,9 @@
*
* Useful if your architecture has non-page page directories.
*
- * When used, an architecture is expected to provide __tlb_remove_table()
- * which does the actual freeing of these pages.
+ * When used, an architecture is expected to provide __tlb_remove_table() or
+ * use the generic __tlb_remove_table(), which does the actual freeing of these
+ * pages.
*
* MMU_GATHER_RCU_TABLE_FREE
*
@@ -207,16 +208,31 @@ struct mmu_table_batch {
#define MAX_TABLE_BATCH \
((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
+#ifndef __HAVE_ARCH_TLB_REMOVE_TABLE
+static inline void __tlb_remove_table(void *table)
+{
+ struct ptdesc *ptdesc = (struct ptdesc *)table;
+
+ pagetable_dtor_free(ptdesc);
+}
+#endif
+
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
-#else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
+#else /* !CONFIG_MMU_GATHER_TABLE_FREE */
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page);
/*
* Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
* page directories and we can use the normal page batching to free them.
*/
-#define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
+static inline void tlb_remove_table(struct mmu_gather *tlb, void *table)
+{
+ struct page *page = (struct page *)table;
+ pagetable_dtor(page_ptdesc(page));
+ tlb_remove_page(tlb, page);
+}
#endif /* CONFIG_MMU_GATHER_TABLE_FREE */
#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 54504013c749..02a4adb4a999 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -1038,6 +1038,7 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)
*(.discard) \
*(.discard.*) \
*(.export_symbol) \
+ *(.no_trim_symbol) \
*(.modinfo) \
/* ld.bfd warns about .gnu.version* even when not emitted */ \
*(.gnu.version*) \
diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h
index cbbc9a6dc571..ce6521ad04d1 100644
--- a/include/clocksource/arm_arch_timer.h
+++ b/include/clocksource/arm_arch_timer.h
@@ -22,6 +22,12 @@
#define CNTHCTL_EVNTDIR (1 << 3)
#define CNTHCTL_EVNTI (0xF << 4)
#define CNTHCTL_ECV (1 << 12)
+#define CNTHCTL_EL1TVT (1 << 13)
+#define CNTHCTL_EL1TVCT (1 << 14)
+#define CNTHCTL_EL1NVPCT (1 << 15)
+#define CNTHCTL_EL1NVVCT (1 << 16)
+#define CNTHCTL_CNTVMASK (1 << 18)
+#define CNTHCTL_CNTPMASK (1 << 19)
enum arch_timer_reg {
ARCH_TIMER_REG_CTRL,
diff --git a/include/clocksource/hyperv_timer.h b/include/clocksource/hyperv_timer.h
index aa5233b1eba9..d48dd4176fd3 100644
--- a/include/clocksource/hyperv_timer.h
+++ b/include/clocksource/hyperv_timer.h
@@ -15,7 +15,7 @@
#include <linux/clocksource.h>
#include <linux/math64.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#define HV_MAX_MAX_DELTA_TICKS 0xffffffff
#define HV_MIN_DELTA_TICKS 1
diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h
index 81330c6446f6..b0853f7cada0 100644
--- a/include/crypto/gf128mul.h
+++ b/include/crypto/gf128mul.h
@@ -158,12 +158,10 @@
64...71 72...79 80...87 88...95 96..103 104.111 112.119 120.127
*/
-/* A slow generic version of gf_mul, implemented for lle and bbe
+/* A slow generic version of gf_mul, implemented for lle
* It multiplies a and b and puts the result in a */
void gf128mul_lle(be128 *a, const be128 *b);
-void gf128mul_bbe(be128 *a, const be128 *b);
-
/*
* The following functions multiply a field element by x in
* the polynomial field representation. They use 64-bit word operations
@@ -224,9 +222,7 @@ struct gf128mul_4k {
};
struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g);
-struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g);
void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t);
-void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t);
void gf128mul_x8_ble(le128 *r, const le128 *x);
static inline void gf128mul_free_4k(struct gf128mul_4k *t)
{
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 58967593b6b4..84da3424decc 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -12,20 +12,6 @@
#include <crypto/hash.h>
struct ahash_request;
-struct scatterlist;
-
-struct crypto_hash_walk {
- char *data;
-
- unsigned int offset;
- unsigned int flags;
-
- struct page *pg;
- unsigned int entrylen;
-
- unsigned int total;
- struct scatterlist *sg;
-};
struct ahash_instance {
void (*free)(struct ahash_instance *inst);
@@ -57,15 +43,6 @@ struct crypto_shash_spawn {
struct crypto_spawn base;
};
-int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
-int crypto_hash_walk_first(struct ahash_request *req,
- struct crypto_hash_walk *walk);
-
-static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
-{
- return !(walk->entrylen | walk->total);
-}
-
int crypto_register_ahash(struct ahash_alg *alg);
void crypto_unregister_ahash(struct ahash_alg *alg);
int crypto_register_ahashes(struct ahash_alg *algs, int count);
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index 7ae42afdcf3e..4f49621d3eb6 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -11,7 +11,6 @@
#include <crypto/algapi.h>
#include <crypto/internal/cipher.h>
#include <crypto/skcipher.h>
-#include <linux/list.h>
#include <linux/types.h>
/*
@@ -58,12 +57,6 @@ struct crypto_lskcipher_spawn {
struct skcipher_walk {
union {
struct {
- struct page *page;
- unsigned long offset;
- } phys;
-
- struct {
- u8 *page;
void *addr;
} virt;
} src, dst;
@@ -74,8 +67,6 @@ struct skcipher_walk {
struct scatter_walk out;
unsigned int total;
- struct list_head buffers;
-
u8 *page;
u8 *buffer;
u8 *oiv;
@@ -205,17 +196,14 @@ void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count);
int lskcipher_register_instance(struct crypto_template *tmpl,
struct lskcipher_instance *inst);
-int skcipher_walk_done(struct skcipher_walk *walk, int err);
+int skcipher_walk_done(struct skcipher_walk *walk, int res);
int skcipher_walk_virt(struct skcipher_walk *walk,
struct skcipher_request *req,
bool atomic);
-int skcipher_walk_async(struct skcipher_walk *walk,
- struct skcipher_request *req);
int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
struct aead_request *req, bool atomic);
int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
struct aead_request *req, bool atomic);
-void skcipher_walk_complete(struct skcipher_walk *walk, int err);
static inline void skcipher_walk_abort(struct skcipher_walk *walk)
{
diff --git a/include/cxl/event.h b/include/cxl/event.h
index 0bea1afbd747..04edd44bd26f 100644
--- a/include/cxl/event.h
+++ b/include/cxl/event.h
@@ -18,7 +18,8 @@ struct cxl_event_record_hdr {
__le16 related_handle;
__le64 timestamp;
u8 maint_op_class;
- u8 reserved[15];
+ u8 maint_op_sub_class;
+ u8 reserved[14];
} __packed;
struct cxl_event_media_hdr {
@@ -44,19 +45,22 @@ struct cxl_event_generic {
/*
* General Media Event Record
- * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
+ * CXL rev 3.1 Section 8.2.9.2.1.1; Table 8-45
*/
#define CXL_EVENT_GEN_MED_COMP_ID_SIZE 0x10
struct cxl_event_gen_media {
struct cxl_event_media_hdr media_hdr;
u8 device[3];
u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
- u8 reserved[46];
+ u8 cme_threshold_ev_flags;
+ u8 cme_count[3];
+ u8 sub_type;
+ u8 reserved[41];
} __packed;
/*
* DRAM Event Record - DER
- * CXL rev 3.0 section 8.2.9.2.1.2; Table 3-44
+ * CXL rev 3.1 section 8.2.9.2.1.2; Table 8-46
*/
#define CXL_EVENT_DER_CORRECTION_MASK_SIZE 0x20
struct cxl_event_dram {
@@ -67,12 +71,17 @@ struct cxl_event_dram {
u8 row[3];
u8 column[2];
u8 correction_mask[CXL_EVENT_DER_CORRECTION_MASK_SIZE];
- u8 reserved[0x17];
+ u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
+ u8 sub_channel;
+ u8 cme_threshold_ev_flags;
+ u8 cvme_count[3];
+ u8 sub_type;
+ u8 reserved;
} __packed;
/*
* Get Health Info Record
- * CXL rev 3.0 section 8.2.9.8.3.1; Table 8-100
+ * CXL rev 3.1 section 8.2.9.9.3.1; Table 8-133
*/
struct cxl_get_health_info {
u8 health_status;
@@ -87,13 +96,16 @@ struct cxl_get_health_info {
/*
* Memory Module Event Record
- * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
+ * CXL rev 3.1 section 8.2.9.2.1.3; Table 8-47
*/
struct cxl_event_mem_module {
struct cxl_event_record_hdr hdr;
u8 event_type;
struct cxl_get_health_info info;
- u8 reserved[0x3d];
+ u8 validity_flags[2];
+ u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
+ u8 event_sub_type;
+ u8 reserved[0x2a];
} __packed;
union cxl_event {
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index f77fe1531cf8..9732f514566d 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -32,6 +32,7 @@
#include <linux/dynamic_debug.h>
#include <drm/drm.h>
+#include <drm/drm_device.h>
struct debugfs_regset32;
struct drm_device;
diff --git a/include/dt-bindings/arm/qcom,ids.h b/include/dt-bindings/arm/qcom,ids.h
index e850dc3a1ad3..1b3e0176dcb7 100644
--- a/include/dt-bindings/arm/qcom,ids.h
+++ b/include/dt-bindings/arm/qcom,ids.h
@@ -284,6 +284,7 @@
#define QCOM_ID_QCS9100 667
#define QCOM_ID_QCS8300 674
#define QCOM_ID_QCS8275 675
+#define QCOM_ID_QCS9075 676
#define QCOM_ID_QCS615 680
/*
diff --git a/include/dt-bindings/iio/adi,ad4695.h b/include/dt-bindings/iio/adc/adi,ad4695.h
index 9fbef542bf67..9fbef542bf67 100644
--- a/include/dt-bindings/iio/adi,ad4695.h
+++ b/include/dt-bindings/iio/adc/adi,ad4695.h
diff --git a/include/dt-bindings/interconnect/qcom,sm8750-rpmh.h b/include/dt-bindings/interconnect/qcom,sm8750-rpmh.h
new file mode 100644
index 000000000000..30563952a646
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm8750-rpmh.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM8750_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM8750_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_1 1
+#define MASTER_QUP_3 2
+#define MASTER_SDCC_4 3
+#define MASTER_UFS_MEM 4
+#define MASTER_USB3_0 5
+#define SLAVE_A1NOC_SNOC 6
+
+#define MASTER_QDSS_BAM 0
+#define MASTER_QUP_2 1
+#define MASTER_CRYPTO 2
+#define MASTER_IPA 3
+#define MASTER_SOCCP_AGGR_NOC 4
+#define MASTER_SP 5
+#define MASTER_QDSS_ETR 6
+#define MASTER_QDSS_ETR_1 7
+#define MASTER_SDCC_2 8
+#define SLAVE_A2NOC_SNOC 9
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_2 5
+
+#define MASTER_CNOC_CFG 0
+#define SLAVE_AHB2PHY_SOUTH 1
+#define SLAVE_AHB2PHY_NORTH 2
+#define SLAVE_CAMERA_CFG 3
+#define SLAVE_CLK_CTL 4
+#define SLAVE_CRYPTO_0_CFG 5
+#define SLAVE_DISPLAY_CFG 6
+#define SLAVE_EVA_CFG 7
+#define SLAVE_GFX3D_CFG 8
+#define SLAVE_I2C 9
+#define SLAVE_I3C_IBI0_CFG 10
+#define SLAVE_I3C_IBI1_CFG 11
+#define SLAVE_IMEM_CFG 12
+#define SLAVE_CNOC_MSS 13
+#define SLAVE_PCIE_CFG 14
+#define SLAVE_PRNG 15
+#define SLAVE_QDSS_CFG 16
+#define SLAVE_QSPI_0 17
+#define SLAVE_QUP_3 18
+#define SLAVE_QUP_1 19
+#define SLAVE_QUP_2 20
+#define SLAVE_SDCC_2 21
+#define SLAVE_SDCC_4 22
+#define SLAVE_SPSS_CFG 23
+#define SLAVE_TCSR 24
+#define SLAVE_TLMM 25
+#define SLAVE_UFS_MEM_CFG 26
+#define SLAVE_USB3_0 27
+#define SLAVE_VENUS_CFG 28
+#define SLAVE_VSENSE_CTRL_CFG 29
+#define SLAVE_CNOC_MNOC_CFG 30
+#define SLAVE_PCIE_ANOC_CFG 31
+#define SLAVE_QDSS_STM 32
+#define SLAVE_TCU 33
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AOSS 2
+#define SLAVE_IPA_CFG 3
+#define SLAVE_IPC_ROUTER_CFG 4
+#define SLAVE_SOCCP 5
+#define SLAVE_TME_CFG 6
+#define SLAVE_APPSS 7
+#define SLAVE_CNOC_CFG 8
+#define SLAVE_DDRSS_CFG 9
+#define SLAVE_BOOT_IMEM 10
+#define SLAVE_IMEM 11
+#define SLAVE_BOOT_IMEM_2 12
+#define SLAVE_SERVICE_CNOC 13
+#define SLAVE_PCIE_0 14
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_APPSS_PROC 2
+#define MASTER_GFX3D 3
+#define MASTER_LPASS_GEM_NOC 4
+#define MASTER_MSS_PROC 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_COMPUTE_NOC 8
+#define MASTER_ANOC_PCIE_GEM_NOC 9
+#define MASTER_SNOC_SF_MEM_NOC 10
+#define MASTER_UBWC_P 11
+#define MASTER_GIC 12
+#define SLAVE_UBWC_P 13
+#define SLAVE_GEM_NOC_CNOC 14
+#define SLAVE_LLCC 15
+#define SLAVE_MEM_NOC_PCIE_SNOC 16
+
+#define MASTER_LPIAON_NOC 0
+#define SLAVE_LPASS_GEM_NOC 1
+
+#define MASTER_LPASS_LPINOC 0
+#define SLAVE_LPIAON_NOC_LPASS_AG_NOC 1
+
+#define MASTER_LPASS_PROC 0
+#define SLAVE_LPICX_NOC_LPIAON_NOC 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_NRT_ICP_SF 1
+#define MASTER_CAMNOC_RT_CDM_SF 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_MDP 4
+#define MASTER_CDSP_HCP 5
+#define MASTER_VIDEO_CV_PROC 6
+#define MASTER_VIDEO_EVA 7
+#define MASTER_VIDEO_MVP 8
+#define MASTER_VIDEO_V_PROC 9
+#define MASTER_CNOC_MNOC_CFG 10
+#define SLAVE_MNOC_HF_MEM_NOC 11
+#define SLAVE_MNOC_SF_MEM_NOC 12
+#define SLAVE_SERVICE_MNOC 13
+
+#define MASTER_CDSP_PROC 0
+#define SLAVE_CDSP_MEM_NOC 1
+
+#define MASTER_PCIE_ANOC_CFG 0
+#define MASTER_PCIE_0 1
+#define SLAVE_ANOC_PCIE_GEM_NOC 2
+#define SLAVE_SERVICE_PCIE_ANOC 3
+
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define SLAVE_SNOC_GEM_NOC_SF 2
+
+#endif
diff --git a/include/dt-bindings/media/video-interfaces.h b/include/dt-bindings/media/video-interfaces.h
index 68ac4e05e37f..88b9d05d8075 100644
--- a/include/dt-bindings/media/video-interfaces.h
+++ b/include/dt-bindings/media/video-interfaces.h
@@ -13,4 +13,11 @@
#define MEDIA_BUS_TYPE_PARALLEL 5
#define MEDIA_BUS_TYPE_BT656 6
+#define MEDIA_BUS_CSI2_CPHY_LINE_ORDER_ABC 0
+#define MEDIA_BUS_CSI2_CPHY_LINE_ORDER_ACB 1
+#define MEDIA_BUS_CSI2_CPHY_LINE_ORDER_BAC 2
+#define MEDIA_BUS_CSI2_CPHY_LINE_ORDER_BCA 3
+#define MEDIA_BUS_CSI2_CPHY_LINE_ORDER_CAB 4
+#define MEDIA_BUS_CSI2_CPHY_LINE_ORDER_CBA 5
+
#endif /* __DT_BINDINGS_MEDIA_VIDEO_INTERFACES_H__ */
diff --git a/include/dt-bindings/pinctrl/renesas,r9a09g047-pinctrl.h b/include/dt-bindings/pinctrl/renesas,r9a09g047-pinctrl.h
new file mode 100644
index 000000000000..5917096720bd
--- /dev/null
+++ b/include/dt-bindings/pinctrl/renesas,r9a09g047-pinctrl.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for Renesas RZ/G3E family pinctrl bindings.
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ *
+ */
+
+#ifndef __DT_BINDINGS_PINCTRL_RENESAS_R9A09G047_PINCTRL_H__
+#define __DT_BINDINGS_PINCTRL_RENESAS_R9A09G047_PINCTRL_H__
+
+#include <dt-bindings/pinctrl/rzg2l-pinctrl.h>
+
+/* RZG3E_Px = Offset address of PFC_P_mn - 0x20 */
+#define RZG3E_P0 0
+#define RZG3E_P1 1
+#define RZG3E_P2 2
+#define RZG3E_P3 3
+#define RZG3E_P4 4
+#define RZG3E_P5 5
+#define RZG3E_P6 6
+#define RZG3E_P7 7
+#define RZG3E_P8 8
+#define RZG3E_PA 10
+#define RZG3E_PB 11
+#define RZG3E_PC 12
+#define RZG3E_PD 13
+#define RZG3E_PE 14
+#define RZG3E_PF 15
+#define RZG3E_PG 16
+#define RZG3E_PH 17
+#define RZG3E_PJ 19
+#define RZG3E_PK 20
+#define RZG3E_PL 21
+#define RZG3E_PM 22
+#define RZG3E_PS 28
+
+#define RZG3E_PORT_PINMUX(b, p, f) RZG2L_PORT_PINMUX(RZG3E_P##b, p, f)
+#define RZG3E_GPIO(port, pin) RZG2L_GPIO(RZG3E_P##port, pin)
+
+#endif /* __DT_BINDINGS_PINCTRL_RENESAS_R9A09G047_PINCTRL_H__ */
diff --git a/include/dt-bindings/pinctrl/renesas,r9a09g057-pinctrl.h b/include/dt-bindings/pinctrl/renesas,r9a09g057-pinctrl.h
new file mode 100644
index 000000000000..2e83bf43160b
--- /dev/null
+++ b/include/dt-bindings/pinctrl/renesas,r9a09g057-pinctrl.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for Renesas RZ/V2H family pinctrl bindings.
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ *
+ */
+
+#ifndef __DT_BINDINGS_PINCTRL_RENESAS_R9A09G057_PINCTRL_H__
+#define __DT_BINDINGS_PINCTRL_RENESAS_R9A09G057_PINCTRL_H__
+
+#include <dt-bindings/pinctrl/rzg2l-pinctrl.h>
+
+/* RZV2H_Px = Offset address of PFC_P_mn - 0x20 */
+#define RZV2H_P0 0
+#define RZV2H_P1 1
+#define RZV2H_P2 2
+#define RZV2H_P3 3
+#define RZV2H_P4 4
+#define RZV2H_P5 5
+#define RZV2H_P6 6
+#define RZV2H_P7 7
+#define RZV2H_P8 8
+#define RZV2H_P9 9
+#define RZV2H_PA 10
+#define RZV2H_PB 11
+
+#define RZV2H_PORT_PINMUX(b, p, f) RZG2L_PORT_PINMUX(RZV2H_P##b, p, f)
+#define RZV2H_GPIO(port, pin) RZG2L_GPIO(RZV2H_P##port, pin)
+
+#endif /* __DT_BINDINGS_PINCTRL_RENESAS_R9A09G057_PINCTRL_H__ */
diff --git a/include/dt-bindings/reset/amlogic,meson-a1-audio-reset.h b/include/dt-bindings/reset/amlogic,meson-a1-audio-reset.h
new file mode 100644
index 000000000000..7693552f1507
--- /dev/null
+++ b/include/dt-bindings/reset/amlogic,meson-a1-audio-reset.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2024, SaluteDevices. All Rights Reserved.
+ *
+ * Author: Jan Dakinevich <jan.dakinevich@salutedevices.com>
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_MESON_A1_AUDIO_RESET_H
+#define _DT_BINDINGS_AMLOGIC_MESON_A1_AUDIO_RESET_H
+
+#define AUD_RESET_DDRARB 0
+#define AUD_RESET_TDMIN_A 1
+#define AUD_RESET_TDMIN_B 2
+#define AUD_RESET_TDMIN_LB 3
+#define AUD_RESET_LOOPBACK 4
+#define AUD_RESET_TDMOUT_A 5
+#define AUD_RESET_TDMOUT_B 6
+#define AUD_RESET_FRDDR_A 7
+#define AUD_RESET_FRDDR_B 8
+#define AUD_RESET_TODDR_A 9
+#define AUD_RESET_TODDR_B 10
+#define AUD_RESET_SPDIFIN 11
+#define AUD_RESET_RESAMPLE 12
+#define AUD_RESET_EQDRC 13
+#define AUD_RESET_LOCKER 14
+#define AUD_RESET_TOACODEC 30
+#define AUD_RESET_CLKTREE 31
+
+#define AUD_VAD_RESET_DDRARB 0
+#define AUD_VAD_RESET_PDM 1
+#define AUD_VAD_RESET_TDMIN_VAD 2
+#define AUD_VAD_RESET_TODDR_VAD 3
+#define AUD_VAD_RESET_TOVAD 4
+#define AUD_VAD_RESET_CLKTREE 5
+
+#endif /* _DT_BINDINGS_AMLOGIC_MESON_A1_AUDIO_RESET_H */
diff --git a/include/dt-bindings/sound/qcom,wcd9335.h b/include/dt-bindings/sound/qcom,wcd9335.h
index f5e9f1db091e..4fc68aeb9e04 100644
--- a/include/dt-bindings/sound/qcom,wcd9335.h
+++ b/include/dt-bindings/sound/qcom,wcd9335.h
@@ -10,6 +10,5 @@
#define AIF3_PB 4
#define AIF3_CAP 5
#define AIF4_PB 6
-#define NUM_CODEC_DAIS 7
#endif
diff --git a/include/hyperv/hvgdk.h b/include/hyperv/hvgdk.h
new file mode 100644
index 000000000000..dd6d4939ea29
--- /dev/null
+++ b/include/hyperv/hvgdk.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Type definitions for the Microsoft Hypervisor.
+ */
+#ifndef _HV_HVGDK_H
+#define _HV_HVGDK_H
+
+#include "hvgdk_mini.h"
+#include "hvgdk_ext.h"
+
+/*
+ * The guest OS needs to register the guest ID with the hypervisor.
+ * The guest ID is a 64 bit entity and the structure of this ID is
+ * specified in the Hyper-V TLFS specification.
+ *
+ * While the current guideline does not specify how Linux guest ID(s)
+ * need to be generated, our plan is to publish the guidelines for
+ * Linux and other guest operating systems that currently are hosted
+ * on Hyper-V. The implementation here conforms to this yet
+ * unpublished guidelines.
+ *
+ * Bit(s)
+ * 63 - Indicates if the OS is Open Source or not; 1 is Open Source
+ * 62:56 - Os Type; Linux is 0x100
+ * 55:48 - Distro specific identification
+ * 47:16 - Linux kernel version number
+ * 15:0 - Distro specific identification
+ */
+
+#define HV_LINUX_VENDOR_ID 0x8100
+
+/* HV_VMX_ENLIGHTENED_VMCS */
+struct hv_enlightened_vmcs {
+ u32 revision_id;
+ u32 abort;
+
+ u16 host_es_selector;
+ u16 host_cs_selector;
+ u16 host_ss_selector;
+ u16 host_ds_selector;
+ u16 host_fs_selector;
+ u16 host_gs_selector;
+ u16 host_tr_selector;
+
+ u16 padding16_1;
+
+ u64 host_ia32_pat;
+ u64 host_ia32_efer;
+
+ u64 host_cr0;
+ u64 host_cr3;
+ u64 host_cr4;
+
+ u64 host_ia32_sysenter_esp;
+ u64 host_ia32_sysenter_eip;
+ u64 host_rip;
+ u32 host_ia32_sysenter_cs;
+
+ u32 pin_based_vm_exec_control;
+ u32 vm_exit_controls;
+ u32 secondary_vm_exec_control;
+
+ u64 io_bitmap_a;
+ u64 io_bitmap_b;
+ u64 msr_bitmap;
+
+ u16 guest_es_selector;
+ u16 guest_cs_selector;
+ u16 guest_ss_selector;
+ u16 guest_ds_selector;
+ u16 guest_fs_selector;
+ u16 guest_gs_selector;
+ u16 guest_ldtr_selector;
+ u16 guest_tr_selector;
+
+ u32 guest_es_limit;
+ u32 guest_cs_limit;
+ u32 guest_ss_limit;
+ u32 guest_ds_limit;
+ u32 guest_fs_limit;
+ u32 guest_gs_limit;
+ u32 guest_ldtr_limit;
+ u32 guest_tr_limit;
+ u32 guest_gdtr_limit;
+ u32 guest_idtr_limit;
+
+ u32 guest_es_ar_bytes;
+ u32 guest_cs_ar_bytes;
+ u32 guest_ss_ar_bytes;
+ u32 guest_ds_ar_bytes;
+ u32 guest_fs_ar_bytes;
+ u32 guest_gs_ar_bytes;
+ u32 guest_ldtr_ar_bytes;
+ u32 guest_tr_ar_bytes;
+
+ u64 guest_es_base;
+ u64 guest_cs_base;
+ u64 guest_ss_base;
+ u64 guest_ds_base;
+ u64 guest_fs_base;
+ u64 guest_gs_base;
+ u64 guest_ldtr_base;
+ u64 guest_tr_base;
+ u64 guest_gdtr_base;
+ u64 guest_idtr_base;
+
+ u64 padding64_1[3];
+
+ u64 vm_exit_msr_store_addr;
+ u64 vm_exit_msr_load_addr;
+ u64 vm_entry_msr_load_addr;
+
+ u64 cr3_target_value0;
+ u64 cr3_target_value1;
+ u64 cr3_target_value2;
+ u64 cr3_target_value3;
+
+ u32 page_fault_error_code_mask;
+ u32 page_fault_error_code_match;
+
+ u32 cr3_target_count;
+ u32 vm_exit_msr_store_count;
+ u32 vm_exit_msr_load_count;
+ u32 vm_entry_msr_load_count;
+
+ u64 tsc_offset;
+ u64 virtual_apic_page_addr;
+ u64 vmcs_link_pointer;
+
+ u64 guest_ia32_debugctl;
+ u64 guest_ia32_pat;
+ u64 guest_ia32_efer;
+
+ u64 guest_pdptr0;
+ u64 guest_pdptr1;
+ u64 guest_pdptr2;
+ u64 guest_pdptr3;
+
+ u64 guest_pending_dbg_exceptions;
+ u64 guest_sysenter_esp;
+ u64 guest_sysenter_eip;
+
+ u32 guest_activity_state;
+ u32 guest_sysenter_cs;
+
+ u64 cr0_guest_host_mask;
+ u64 cr4_guest_host_mask;
+ u64 cr0_read_shadow;
+ u64 cr4_read_shadow;
+ u64 guest_cr0;
+ u64 guest_cr3;
+ u64 guest_cr4;
+ u64 guest_dr7;
+
+ u64 host_fs_base;
+ u64 host_gs_base;
+ u64 host_tr_base;
+ u64 host_gdtr_base;
+ u64 host_idtr_base;
+ u64 host_rsp;
+
+ u64 ept_pointer;
+
+ u16 virtual_processor_id;
+ u16 padding16_2[3];
+
+ u64 padding64_2[5];
+ u64 guest_physical_address;
+
+ u32 vm_instruction_error;
+ u32 vm_exit_reason;
+ u32 vm_exit_intr_info;
+ u32 vm_exit_intr_error_code;
+ u32 idt_vectoring_info_field;
+ u32 idt_vectoring_error_code;
+ u32 vm_exit_instruction_len;
+ u32 vmx_instruction_info;
+
+ u64 exit_qualification;
+ u64 exit_io_instruction_ecx;
+ u64 exit_io_instruction_esi;
+ u64 exit_io_instruction_edi;
+ u64 exit_io_instruction_eip;
+
+ u64 guest_linear_address;
+ u64 guest_rsp;
+ u64 guest_rflags;
+
+ u32 guest_interruptibility_info;
+ u32 cpu_based_vm_exec_control;
+ u32 exception_bitmap;
+ u32 vm_entry_controls;
+ u32 vm_entry_intr_info_field;
+ u32 vm_entry_exception_error_code;
+ u32 vm_entry_instruction_len;
+ u32 tpr_threshold;
+
+ u64 guest_rip;
+
+ u32 hv_clean_fields;
+ u32 padding32_1;
+ u32 hv_synthetic_controls;
+ struct {
+ u32 nested_flush_hypercall:1;
+ u32 msr_bitmap:1;
+ u32 reserved:30;
+ } __packed hv_enlightenments_control;
+ u32 hv_vp_id;
+ u32 padding32_2;
+ u64 hv_vm_id;
+ u64 partition_assist_page;
+ u64 padding64_4[4];
+ u64 guest_bndcfgs;
+ u64 guest_ia32_perf_global_ctrl;
+ u64 guest_ia32_s_cet;
+ u64 guest_ssp;
+ u64 guest_ia32_int_ssp_table_addr;
+ u64 guest_ia32_lbr_ctl;
+ u64 padding64_5[2];
+ u64 xss_exit_bitmap;
+ u64 encls_exiting_bitmap;
+ u64 host_ia32_perf_global_ctrl;
+ u64 tsc_multiplier;
+ u64 host_ia32_s_cet;
+ u64 host_ssp;
+ u64 host_ia32_int_ssp_table_addr;
+ u64 padding64_6;
+} __packed;
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE 0
+
+
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP BIT(0)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP BIT(1)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2 BIT(2)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1 BIT(3)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC BIT(4)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT BIT(5)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY BIT(6)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN BIT(7)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR BIT(8)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT BIT(9)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC BIT(10)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1 BIT(11)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2 BIT(12)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER BIT(13)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1 BIT(14)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ENLIGHTENMENTSCONTROL BIT(15)
+
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL 0xFFFF
+
+/*
+ * Note, Hyper-V isn't actually stealing bit 28 from Intel, just abusing it by
+ * pairing it with architecturally impossible exit reasons. Bit 28 is set only
+ * on SMI exits to a SMI transfer monitor (STM) and if and only if a MTF VM-Exit
+ * is pending. I.e. it will never be set by hardware for non-SMI exits (there
+ * are only three), nor will it ever be set unless the VMM is an STM.
+ */
+#define HV_VMX_SYNTHETIC_EXIT_REASON_TRAP_AFTER_FLUSH 0x10000031
+
+/*
+ * Hyper-V uses the software reserved 32 bytes in VMCB control area to expose
+ * SVM enlightenments to guests. This is documented in the TLFS doc.
+ * Note on naming: SVM_NESTED_ENLIGHTENED_VMCB_FIELDS
+ */
+struct hv_vmcb_enlightenments {
+ struct __packed hv_enlightenments_control {
+ u32 nested_flush_hypercall : 1;
+ u32 msr_bitmap : 1;
+ u32 enlightened_npt_tlb: 1;
+ u32 reserved : 29;
+ } __packed hv_enlightenments_control;
+ u32 hv_vp_id;
+ u64 hv_vm_id;
+ u64 partition_assist_page;
+ u64 reserved;
+} __packed;
+
+/*
+ * Hyper-V uses the software reserved clean bit in VMCB.
+ */
+#define HV_VMCB_NESTED_ENLIGHTENMENTS 31
+
+/* Synthetic VM-Exit */
+#define HV_SVM_EXITCODE_ENL 0xf0000000
+#define HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH (1)
+
+/* VM_PARTITION_ASSIST_PAGE */
+struct hv_partition_assist_pg {
+ u32 tlb_lock_count;
+};
+
+/* Define connection identifier type. */
+union hv_connection_id {
+ u32 asu32;
+ struct {
+ u32 id : 24;
+ u32 reserved : 8;
+ } __packed u;
+};
+
+struct hv_input_unmap_gpa_pages {
+ u64 target_partition_id;
+ u64 target_gpa_base;
+ u32 unmap_flags;
+ u32 padding;
+} __packed;
+
+#endif /* #ifndef _HV_HVGDK_H */
diff --git a/include/hyperv/hvgdk_ext.h b/include/hyperv/hvgdk_ext.h
new file mode 100644
index 000000000000..641b591ee61f
--- /dev/null
+++ b/include/hyperv/hvgdk_ext.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Type definitions for the Microsoft Hypervisor.
+ */
+#ifndef _HV_HVGDK_EXT_H
+#define _HV_HVGDK_EXT_H
+
+#include "hvgdk_mini.h"
+
+/* Extended hypercalls */
+#define HV_EXT_CALL_QUERY_CAPABILITIES 0x8001
+#define HV_EXT_CALL_MEMORY_HEAT_HINT 0x8003
+
+/* Extended hypercalls */
+enum { /* HV_EXT_CALL */
+ HV_EXTCALL_QUERY_CAPABILITIES = 0x8001,
+ HV_EXTCALL_MEMORY_HEAT_HINT = 0x8003,
+};
+
+/* HV_EXT_OUTPUT_QUERY_CAPABILITIES */
+#define HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT BIT(8)
+
+enum { /* HV_EXT_MEMORY_HEAT_HINT_TYPE */
+ HV_EXTMEM_HEAT_HINT_COLD = 0,
+ HV_EXTMEM_HEAT_HINT_HOT = 1,
+ HV_EXTMEM_HEAT_HINT_COLD_DISCARD = 2,
+ HV_EXTMEM_HEAT_HINT_MAX
+};
+
+/*
+ * The whole argument should fit in a page to be able to pass to the hypervisor
+ * in one hypercall.
+ */
+#define HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES \
+ ((HV_HYP_PAGE_SIZE - sizeof(struct hv_memory_hint)) / \
+ sizeof(union hv_gpa_page_range))
+
+/* HvExtCallMemoryHeatHint hypercall */
+#define HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD 2
+struct hv_memory_hint { /* HV_EXT_INPUT_MEMORY_HEAT_HINT */
+ u64 heat_type : 2; /* HV_EXTMEM_HEAT_HINT_* */
+ u64 reserved : 62;
+ union hv_gpa_page_range ranges[];
+} __packed;
+
+#endif /* _HV_HVGDK_EXT_H */
diff --git a/include/hyperv/hvgdk_mini.h b/include/hyperv/hvgdk_mini.h
new file mode 100644
index 000000000000..155615175965
--- /dev/null
+++ b/include/hyperv/hvgdk_mini.h
@@ -0,0 +1,1348 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Type definitions for the Microsoft hypervisor.
+ */
+#ifndef _HV_HVGDK_MINI_H
+#define _HV_HVGDK_MINI_H
+
+#include <linux/types.h>
+#include <linux/bits.h>
+
+struct hv_u128 {
+ u64 low_part;
+ u64 high_part;
+} __packed;
+
+/* NOTE: when adding below, update hv_status_to_string() */
+#define HV_STATUS_SUCCESS 0x0
+#define HV_STATUS_INVALID_HYPERCALL_CODE 0x2
+#define HV_STATUS_INVALID_HYPERCALL_INPUT 0x3
+#define HV_STATUS_INVALID_ALIGNMENT 0x4
+#define HV_STATUS_INVALID_PARAMETER 0x5
+#define HV_STATUS_ACCESS_DENIED 0x6
+#define HV_STATUS_INVALID_PARTITION_STATE 0x7
+#define HV_STATUS_OPERATION_DENIED 0x8
+#define HV_STATUS_UNKNOWN_PROPERTY 0x9
+#define HV_STATUS_PROPERTY_VALUE_OUT_OF_RANGE 0xA
+#define HV_STATUS_INSUFFICIENT_MEMORY 0xB
+#define HV_STATUS_INVALID_PARTITION_ID 0xD
+#define HV_STATUS_INVALID_VP_INDEX 0xE
+#define HV_STATUS_NOT_FOUND 0x10
+#define HV_STATUS_INVALID_PORT_ID 0x11
+#define HV_STATUS_INVALID_CONNECTION_ID 0x12
+#define HV_STATUS_INSUFFICIENT_BUFFERS 0x13
+#define HV_STATUS_NOT_ACKNOWLEDGED 0x14
+#define HV_STATUS_INVALID_VP_STATE 0x15
+#define HV_STATUS_NO_RESOURCES 0x1D
+#define HV_STATUS_PROCESSOR_FEATURE_NOT_SUPPORTED 0x20
+#define HV_STATUS_INVALID_LP_INDEX 0x41
+#define HV_STATUS_INVALID_REGISTER_VALUE 0x50
+#define HV_STATUS_OPERATION_FAILED 0x71
+#define HV_STATUS_TIME_OUT 0x78
+#define HV_STATUS_CALL_PENDING 0x79
+#define HV_STATUS_VTL_ALREADY_ENABLED 0x86
+
+/*
+ * The Hyper-V TimeRefCount register and the TSC
+ * page provide a guest VM clock with 100ns tick rate
+ */
+#define HV_CLOCK_HZ (NSEC_PER_SEC / 100)
+
+#define HV_HYP_PAGE_SHIFT 12
+#define HV_HYP_PAGE_SIZE BIT(HV_HYP_PAGE_SHIFT)
+#define HV_HYP_PAGE_MASK (~(HV_HYP_PAGE_SIZE - 1))
+
+#define HV_PARTITION_ID_INVALID ((u64)0)
+#define HV_PARTITION_ID_SELF ((u64)-1)
+
+/* Hyper-V specific model specific registers (MSRs) */
+
+#if defined(CONFIG_X86)
+/* HV_X64_SYNTHETIC_MSR */
+#define HV_X64_MSR_GUEST_OS_ID 0x40000000
+#define HV_X64_MSR_HYPERCALL 0x40000001
+#define HV_X64_MSR_VP_INDEX 0x40000002
+#define HV_X64_MSR_RESET 0x40000003
+#define HV_X64_MSR_VP_RUNTIME 0x40000010
+#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
+#define HV_X64_MSR_REFERENCE_TSC 0x40000021
+#define HV_X64_MSR_TSC_FREQUENCY 0x40000022
+#define HV_X64_MSR_APIC_FREQUENCY 0x40000023
+
+/* Define the virtual APIC registers */
+#define HV_X64_MSR_EOI 0x40000070
+#define HV_X64_MSR_ICR 0x40000071
+#define HV_X64_MSR_TPR 0x40000072
+#define HV_X64_MSR_VP_ASSIST_PAGE 0x40000073
+
+/* Define synthetic interrupt controller model specific registers. */
+#define HV_X64_MSR_SCONTROL 0x40000080
+#define HV_X64_MSR_SVERSION 0x40000081
+#define HV_X64_MSR_SIEFP 0x40000082
+#define HV_X64_MSR_SIMP 0x40000083
+#define HV_X64_MSR_EOM 0x40000084
+#define HV_X64_MSR_SIRBP 0x40000085
+#define HV_X64_MSR_SINT0 0x40000090
+#define HV_X64_MSR_SINT1 0x40000091
+#define HV_X64_MSR_SINT2 0x40000092
+#define HV_X64_MSR_SINT3 0x40000093
+#define HV_X64_MSR_SINT4 0x40000094
+#define HV_X64_MSR_SINT5 0x40000095
+#define HV_X64_MSR_SINT6 0x40000096
+#define HV_X64_MSR_SINT7 0x40000097
+#define HV_X64_MSR_SINT8 0x40000098
+#define HV_X64_MSR_SINT9 0x40000099
+#define HV_X64_MSR_SINT10 0x4000009A
+#define HV_X64_MSR_SINT11 0x4000009B
+#define HV_X64_MSR_SINT12 0x4000009C
+#define HV_X64_MSR_SINT13 0x4000009D
+#define HV_X64_MSR_SINT14 0x4000009E
+#define HV_X64_MSR_SINT15 0x4000009F
+
+/* Define synthetic interrupt controller model specific registers for nested hypervisor */
+#define HV_X64_MSR_NESTED_SCONTROL 0x40001080
+#define HV_X64_MSR_NESTED_SVERSION 0x40001081
+#define HV_X64_MSR_NESTED_SIEFP 0x40001082
+#define HV_X64_MSR_NESTED_SIMP 0x40001083
+#define HV_X64_MSR_NESTED_EOM 0x40001084
+#define HV_X64_MSR_NESTED_SINT0 0x40001090
+
+/*
+ * Synthetic Timer MSRs. Four timers per vcpu.
+ */
+#define HV_X64_MSR_STIMER0_CONFIG 0x400000B0
+#define HV_X64_MSR_STIMER0_COUNT 0x400000B1
+#define HV_X64_MSR_STIMER1_CONFIG 0x400000B2
+#define HV_X64_MSR_STIMER1_COUNT 0x400000B3
+#define HV_X64_MSR_STIMER2_CONFIG 0x400000B4
+#define HV_X64_MSR_STIMER2_COUNT 0x400000B5
+#define HV_X64_MSR_STIMER3_CONFIG 0x400000B6
+#define HV_X64_MSR_STIMER3_COUNT 0x400000B7
+
+/* Hyper-V guest idle MSR */
+#define HV_X64_MSR_GUEST_IDLE 0x400000F0
+
+/* Hyper-V guest crash notification MSR's */
+#define HV_X64_MSR_CRASH_P0 0x40000100
+#define HV_X64_MSR_CRASH_P1 0x40000101
+#define HV_X64_MSR_CRASH_P2 0x40000102
+#define HV_X64_MSR_CRASH_P3 0x40000103
+#define HV_X64_MSR_CRASH_P4 0x40000104
+#define HV_X64_MSR_CRASH_CTL 0x40000105
+
+#define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001
+#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12
+#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \
+ (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1))
+
+#define HV_X64_MSR_CRASH_PARAMS \
+ (1 + (HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0))
+
+#define HV_IPI_LOW_VECTOR 0x10
+#define HV_IPI_HIGH_VECTOR 0xff
+
+#define HV_X64_MSR_VP_ASSIST_PAGE_ENABLE 0x00000001
+#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT 12
+#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK \
+ (~((1ull << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT) - 1))
+
+/* Hyper-V Enlightened VMCS version mask in nested features CPUID */
+#define HV_X64_ENLIGHTENED_VMCS_VERSION 0xff
+
+#define HV_X64_MSR_TSC_REFERENCE_ENABLE 0x00000001
+#define HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT 12
+
+/* Number of XMM registers used in hypercall input/output */
+#define HV_HYPERCALL_MAX_XMM_REGISTERS 6
+
+struct hv_reenlightenment_control {
+ u64 vector : 8;
+ u64 reserved1 : 8;
+ u64 enabled : 1;
+ u64 reserved2 : 15;
+ u64 target_vp : 32;
+} __packed;
+
+struct hv_tsc_emulation_status { /* HV_TSC_EMULATION_STATUS */
+ u64 inprogress : 1;
+ u64 reserved : 63;
+} __packed;
+
+struct hv_tsc_emulation_control { /* HV_TSC_INVARIANT_CONTROL */
+ u64 enabled : 1;
+ u64 reserved : 63;
+} __packed;
+
+/* TSC emulation after migration */
+#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106
+#define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107
+#define HV_X64_MSR_TSC_EMULATION_STATUS 0x40000108
+#define HV_X64_MSR_TSC_INVARIANT_CONTROL 0x40000118
+#define HV_EXPOSE_INVARIANT_TSC BIT_ULL(0)
+
+#endif /* CONFIG_X86 */
+
+struct hv_get_partition_id { /* HV_OUTPUT_GET_PARTITION_ID */
+ u64 partition_id;
+} __packed;
+
+/* HV_CRASH_CTL_REG_CONTENTS */
+#define HV_CRASH_CTL_CRASH_NOTIFY_MSG BIT_ULL(62)
+#define HV_CRASH_CTL_CRASH_NOTIFY BIT_ULL(63)
+
+union hv_reference_tsc_msr {
+ u64 as_uint64;
+ struct {
+ u64 enable : 1;
+ u64 reserved : 11;
+ u64 pfn : 52;
+ } __packed;
+};
+
+/* The maximum number of sparse vCPU banks which can be encoded by 'struct hv_vpset' */
+#define HV_MAX_SPARSE_VCPU_BANKS (64)
+/* The number of vCPUs in one sparse bank */
+#define HV_VCPUS_PER_SPARSE_BANK (64)
+
+/* Some of Hyper-V structs do not use hv_vpset where linux uses them */
+struct hv_vpset { /* HV_VP_SET */
+ u64 format;
+ u64 valid_bank_mask;
+ u64 bank_contents[];
+} __packed;
+
+/*
+ * Version info reported by hypervisor
+ * Changed to a union for convenience
+ */
+union hv_hypervisor_version_info {
+ struct {
+ u32 build_number;
+
+ u32 minor_version : 16;
+ u32 major_version : 16;
+
+ u32 service_pack;
+
+ u32 service_number : 24;
+ u32 service_branch : 8;
+ };
+ struct {
+ u32 eax;
+ u32 ebx;
+ u32 ecx;
+ u32 edx;
+ };
+};
+
+/* HV_CPUID_FUNCTION */
+#define HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS 0x40000000
+#define HYPERV_CPUID_INTERFACE 0x40000001
+#define HYPERV_CPUID_VERSION 0x40000002
+#define HYPERV_CPUID_FEATURES 0x40000003
+#define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004
+#define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005
+#define HYPERV_CPUID_CPU_MANAGEMENT_FEATURES 0x40000007
+#define HYPERV_CPUID_NESTED_FEATURES 0x4000000A
+#define HYPERV_CPUID_ISOLATION_CONFIG 0x4000000C
+
+#define HYPERV_CPUID_VIRT_STACK_INTERFACE 0x40000081
+#define HYPERV_VS_INTERFACE_EAX_SIGNATURE 0x31235356 /* "VS#1" */
+
+#define HYPERV_CPUID_VIRT_STACK_PROPERTIES 0x40000082
+/* Support for the extended IOAPIC RTE format */
+#define HYPERV_VS_PROPERTIES_EAX_EXTENDED_IOAPIC_RTE BIT(2)
+
+#define HYPERV_HYPERVISOR_PRESENT_BIT 0x80000000
+#define HYPERV_CPUID_MIN 0x40000005
+#define HYPERV_CPUID_MAX 0x4000ffff
+
+/*
+ * HV_X64_HYPERVISOR_FEATURES (EAX), or
+ * HV_PARTITION_PRIVILEGE_MASK [31-0]
+ */
+#define HV_MSR_VP_RUNTIME_AVAILABLE BIT(0)
+#define HV_MSR_TIME_REF_COUNT_AVAILABLE BIT(1)
+#define HV_MSR_SYNIC_AVAILABLE BIT(2)
+#define HV_MSR_SYNTIMER_AVAILABLE BIT(3)
+#define HV_MSR_APIC_ACCESS_AVAILABLE BIT(4)
+#define HV_MSR_HYPERCALL_AVAILABLE BIT(5)
+#define HV_MSR_VP_INDEX_AVAILABLE BIT(6)
+#define HV_MSR_RESET_AVAILABLE BIT(7)
+#define HV_MSR_STAT_PAGES_AVAILABLE BIT(8)
+#define HV_MSR_REFERENCE_TSC_AVAILABLE BIT(9)
+#define HV_MSR_GUEST_IDLE_AVAILABLE BIT(10)
+#define HV_ACCESS_FREQUENCY_MSRS BIT(11)
+#define HV_ACCESS_REENLIGHTENMENT BIT(13)
+#define HV_ACCESS_TSC_INVARIANT BIT(15)
+
+/*
+ * HV_X64_HYPERVISOR_FEATURES (EBX), or
+ * HV_PARTITION_PRIVILEGE_MASK [63-32]
+ */
+#define HV_CREATE_PARTITIONS BIT(0)
+#define HV_ACCESS_PARTITION_ID BIT(1)
+#define HV_ACCESS_MEMORY_POOL BIT(2)
+#define HV_ADJUST_MESSAGE_BUFFERS BIT(3)
+#define HV_POST_MESSAGES BIT(4)
+#define HV_SIGNAL_EVENTS BIT(5)
+#define HV_CREATE_PORT BIT(6)
+#define HV_CONNECT_PORT BIT(7)
+#define HV_ACCESS_STATS BIT(8)
+#define HV_DEBUGGING BIT(11)
+#define HV_CPU_MANAGEMENT BIT(12)
+#define HV_ENABLE_EXTENDED_HYPERCALLS BIT(20)
+#define HV_ISOLATION BIT(22)
+
+#if defined(CONFIG_X86)
+/* HV_X64_HYPERVISOR_FEATURES (EDX) */
+#define HV_X64_MWAIT_AVAILABLE BIT(0)
+#define HV_X64_GUEST_DEBUGGING_AVAILABLE BIT(1)
+#define HV_X64_PERF_MONITOR_AVAILABLE BIT(2)
+#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE BIT(3)
+#define HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE BIT(4)
+#define HV_X64_GUEST_IDLE_STATE_AVAILABLE BIT(5)
+#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE BIT(8)
+#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(10)
+#define HV_FEATURE_DEBUG_MSRS_AVAILABLE BIT(11)
+#define HV_FEATURE_EXT_GVA_RANGES_FLUSH BIT(14)
+/*
+ * Support for returning hypercall output block via XMM
+ * registers is available
+ */
+#define HV_X64_HYPERCALL_XMM_OUTPUT_AVAILABLE BIT(15)
+/* stimer Direct Mode is available */
+#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(19)
+
+/*
+ * Implementation recommendations. Indicates which behaviors the hypervisor
+ * recommends the OS implement for optimal performance.
+ * These are HYPERV_CPUID_ENLIGHTMENT_INFO.EAX bits.
+ */
+/* HV_X64_ENLIGHTENMENT_INFORMATION */
+#define HV_X64_AS_SWITCH_RECOMMENDED BIT(0)
+#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED BIT(1)
+#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED BIT(2)
+#define HV_X64_APIC_ACCESS_RECOMMENDED BIT(3)
+#define HV_X64_SYSTEM_RESET_RECOMMENDED BIT(4)
+#define HV_X64_RELAXED_TIMING_RECOMMENDED BIT(5)
+#define HV_DEPRECATING_AEOI_RECOMMENDED BIT(9)
+#define HV_X64_CLUSTER_IPI_RECOMMENDED BIT(10)
+#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED BIT(11)
+#define HV_X64_HYPERV_NESTED BIT(12)
+#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED BIT(14)
+#define HV_X64_USE_MMIO_HYPERCALLS BIT(21)
+
+/*
+ * CPU management features identification.
+ * These are HYPERV_CPUID_CPU_MANAGEMENT_FEATURES.EAX bits.
+ */
+#define HV_X64_START_LOGICAL_PROCESSOR BIT(0)
+#define HV_X64_CREATE_ROOT_VIRTUAL_PROCESSOR BIT(1)
+#define HV_X64_PERFORMANCE_COUNTER_SYNC BIT(2)
+#define HV_X64_RESERVED_IDENTITY_BIT BIT(31)
+
+/*
+ * Virtual processor will never share a physical core with another virtual
+ * processor, except for virtual processors that are reported as sibling SMT
+ * threads.
+ */
+#define HV_X64_NO_NONARCH_CORESHARING BIT(18)
+
+/* Nested features. These are HYPERV_CPUID_NESTED_FEATURES.EAX bits. */
+#define HV_X64_NESTED_DIRECT_FLUSH BIT(17)
+#define HV_X64_NESTED_GUEST_MAPPING_FLUSH BIT(18)
+#define HV_X64_NESTED_MSR_BITMAP BIT(19)
+
+/* Nested features #2. These are HYPERV_CPUID_NESTED_FEATURES.EBX bits. */
+#define HV_X64_NESTED_EVMCS1_PERF_GLOBAL_CTRL BIT(0)
+
+/*
+ * This is specific to AMD and specifies that enlightened TLB flush is
+ * supported. If guest opts in to this feature, ASID invalidations only
+ * flushes gva -> hpa mapping entries. To flush the TLB entries derived
+ * from NPT, hypercalls should be used (HvFlushGuestPhysicalAddressSpace
+ * or HvFlushGuestPhysicalAddressList).
+ */
+#define HV_X64_NESTED_ENLIGHTENED_TLB BIT(22)
+
+/* HYPERV_CPUID_ISOLATION_CONFIG.EAX bits. */
+#define HV_PARAVISOR_PRESENT BIT(0)
+
+/* HYPERV_CPUID_ISOLATION_CONFIG.EBX bits. */
+#define HV_ISOLATION_TYPE GENMASK(3, 0)
+#define HV_SHARED_GPA_BOUNDARY_ACTIVE BIT(5)
+#define HV_SHARED_GPA_BOUNDARY_BITS GENMASK(11, 6)
+
+enum hv_isolation_type {
+ HV_ISOLATION_TYPE_NONE = 0, /* HV_PARTITION_ISOLATION_TYPE_NONE */
+ HV_ISOLATION_TYPE_VBS = 1,
+ HV_ISOLATION_TYPE_SNP = 2,
+ HV_ISOLATION_TYPE_TDX = 3
+};
+
+union hv_x64_msr_hypercall_contents {
+ u64 as_uint64;
+ struct {
+ u64 enable : 1;
+ u64 reserved : 11;
+ u64 guest_physical_address : 52;
+ } __packed;
+};
+#endif /* CONFIG_X86 */
+
+#if defined(CONFIG_ARM64)
+#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(8)
+#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(13)
+#endif /* CONFIG_ARM64 */
+
+#if defined(CONFIG_X86)
+#define HV_MAXIMUM_PROCESSORS 2048
+#elif defined(CONFIG_ARM64) /* CONFIG_X86 */
+#define HV_MAXIMUM_PROCESSORS 320
+#endif /* CONFIG_ARM64 */
+
+#define HV_MAX_VP_INDEX (HV_MAXIMUM_PROCESSORS - 1)
+#define HV_VP_INDEX_SELF ((u32)-2)
+#define HV_ANY_VP ((u32)-1)
+
+union hv_vp_assist_msr_contents { /* HV_REGISTER_VP_ASSIST_PAGE */
+ u64 as_uint64;
+ struct {
+ u64 enable : 1;
+ u64 reserved : 11;
+ u64 pfn : 52;
+ } __packed;
+};
+
+/* Declare the various hypercall operations. */
+/* HV_CALL_CODE */
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003
+#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008
+#define HVCALL_SEND_IPI 0x000b
+#define HVCALL_ENABLE_VP_VTL 0x000f
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014
+#define HVCALL_SEND_IPI_EX 0x0015
+#define HVCALL_CREATE_PARTITION 0x0040
+#define HVCALL_INITIALIZE_PARTITION 0x0041
+#define HVCALL_FINALIZE_PARTITION 0x0042
+#define HVCALL_DELETE_PARTITION 0x0043
+#define HVCALL_GET_PARTITION_PROPERTY 0x0044
+#define HVCALL_SET_PARTITION_PROPERTY 0x0045
+#define HVCALL_GET_PARTITION_ID 0x0046
+#define HVCALL_DEPOSIT_MEMORY 0x0048
+#define HVCALL_WITHDRAW_MEMORY 0x0049
+#define HVCALL_MAP_GPA_PAGES 0x004b
+#define HVCALL_UNMAP_GPA_PAGES 0x004c
+#define HVCALL_CREATE_VP 0x004e
+#define HVCALL_DELETE_VP 0x004f
+#define HVCALL_GET_VP_REGISTERS 0x0050
+#define HVCALL_SET_VP_REGISTERS 0x0051
+#define HVCALL_DELETE_PORT 0x0058
+#define HVCALL_DISCONNECT_PORT 0x005b
+#define HVCALL_POST_MESSAGE 0x005c
+#define HVCALL_SIGNAL_EVENT 0x005d
+#define HVCALL_POST_DEBUG_DATA 0x0069
+#define HVCALL_RETRIEVE_DEBUG_DATA 0x006a
+#define HVCALL_RESET_DEBUG_SESSION 0x006b
+#define HVCALL_ADD_LOGICAL_PROCESSOR 0x0076
+#define HVCALL_GET_SYSTEM_PROPERTY 0x007b
+#define HVCALL_MAP_DEVICE_INTERRUPT 0x007c
+#define HVCALL_UNMAP_DEVICE_INTERRUPT 0x007d
+#define HVCALL_RETARGET_INTERRUPT 0x007e
+#define HVCALL_NOTIFY_PORT_RING_EMPTY 0x008b
+#define HVCALL_ASSERT_VIRTUAL_INTERRUPT 0x0094
+#define HVCALL_CREATE_PORT 0x0095
+#define HVCALL_CONNECT_PORT 0x0096
+#define HVCALL_START_VP 0x0099
+#define HVCALL_GET_VP_ID_FROM_APIC_ID 0x009a
+#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af
+#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0
+#define HVCALL_DISPATCH_VP 0x00c2
+#define HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY 0x00db
+#define HVCALL_MAP_VP_STATE_PAGE 0x00e1
+#define HVCALL_UNMAP_VP_STATE_PAGE 0x00e2
+#define HVCALL_GET_VP_STATE 0x00e3
+#define HVCALL_SET_VP_STATE 0x00e4
+#define HVCALL_MMIO_READ 0x0106
+#define HVCALL_MMIO_WRITE 0x0107
+
+/* HV_HYPERCALL_INPUT */
+#define HV_HYPERCALL_RESULT_MASK GENMASK_ULL(15, 0)
+#define HV_HYPERCALL_FAST_BIT BIT(16)
+#define HV_HYPERCALL_VARHEAD_OFFSET 17
+#define HV_HYPERCALL_VARHEAD_MASK GENMASK_ULL(26, 17)
+#define HV_HYPERCALL_RSVD0_MASK GENMASK_ULL(31, 27)
+#define HV_HYPERCALL_NESTED BIT_ULL(31)
+#define HV_HYPERCALL_REP_COMP_OFFSET 32
+#define HV_HYPERCALL_REP_COMP_1 BIT_ULL(32)
+#define HV_HYPERCALL_REP_COMP_MASK GENMASK_ULL(43, 32)
+#define HV_HYPERCALL_RSVD1_MASK GENMASK_ULL(47, 44)
+#define HV_HYPERCALL_REP_START_OFFSET 48
+#define HV_HYPERCALL_REP_START_MASK GENMASK_ULL(59, 48)
+#define HV_HYPERCALL_RSVD2_MASK GENMASK_ULL(63, 60)
+#define HV_HYPERCALL_RSVD_MASK (HV_HYPERCALL_RSVD0_MASK | \
+ HV_HYPERCALL_RSVD1_MASK | \
+ HV_HYPERCALL_RSVD2_MASK)
+
+/* HvFlushGuestPhysicalAddressSpace hypercalls */
+struct hv_guest_mapping_flush {
+ u64 address_space;
+ u64 flags;
+} __packed;
+
+/*
+ * HV_MAX_FLUSH_PAGES = "additional_pages" + 1. It's limited
+ * by the bitwidth of "additional_pages" in union hv_gpa_page_range.
+ */
+#define HV_MAX_FLUSH_PAGES (2048)
+#define HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB 0
+#define HV_GPA_PAGE_RANGE_PAGE_SIZE_1GB 1
+
+#define HV_FLUSH_ALL_PROCESSORS BIT(0)
+#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1)
+#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2)
+#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3)
+
+/* HvFlushGuestPhysicalAddressList, HvExtCallMemoryHeatHint hypercall */
+union hv_gpa_page_range {
+ u64 address_space;
+ struct {
+ u64 additional_pages : 11;
+ u64 largepage : 1;
+ u64 basepfn : 52;
+ } page;
+ struct {
+ u64 reserved : 12;
+ u64 page_size : 1;
+ u64 reserved1 : 8;
+ u64 base_large_pfn : 43;
+ };
+};
+
+/*
+ * All input flush parameters should be in single page. The max flush
+ * count is equal with how many entries of union hv_gpa_page_range can
+ * be populated into the input parameter page.
+ */
+#define HV_MAX_FLUSH_REP_COUNT ((HV_HYP_PAGE_SIZE - 2 * sizeof(u64)) / \
+ sizeof(union hv_gpa_page_range))
+
+struct hv_guest_mapping_flush_list {
+ u64 address_space;
+ u64 flags;
+ union hv_gpa_page_range gpa_list[HV_MAX_FLUSH_REP_COUNT];
+};
+
+struct hv_tlb_flush { /* HV_INPUT_FLUSH_VIRTUAL_ADDRESS_LIST */
+ u64 address_space;
+ u64 flags;
+ u64 processor_mask;
+ u64 gva_list[];
+} __packed;
+
+/* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */
+struct hv_tlb_flush_ex {
+ u64 address_space;
+ u64 flags;
+ struct hv_vpset hv_vp_set;
+ u64 gva_list[];
+} __packed;
+
+struct ms_hyperv_tsc_page { /* HV_REFERENCE_TSC_PAGE */
+ volatile u32 tsc_sequence;
+ u32 reserved1;
+ volatile u64 tsc_scale;
+ volatile s64 tsc_offset;
+} __packed;
+
+/* Define the number of synthetic interrupt sources. */
+#define HV_SYNIC_SINT_COUNT (16)
+
+/* Define the expected SynIC version. */
+#define HV_SYNIC_VERSION_1 (0x1)
+/* Valid SynIC vectors are 16-255. */
+#define HV_SYNIC_FIRST_VALID_VECTOR (16)
+
+#define HV_SYNIC_CONTROL_ENABLE (1ULL << 0)
+#define HV_SYNIC_SIMP_ENABLE (1ULL << 0)
+#define HV_SYNIC_SIEFP_ENABLE (1ULL << 0)
+#define HV_SYNIC_SINT_MASKED (1ULL << 16)
+#define HV_SYNIC_SINT_AUTO_EOI (1ULL << 17)
+#define HV_SYNIC_SINT_VECTOR_MASK (0xFF)
+
+#
+
+/* Hyper-V defined statically assigned SINTs */
+#define HV_SYNIC_INTERCEPTION_SINT_INDEX 0x00000000
+#define HV_SYNIC_IOMMU_FAULT_SINT_INDEX 0x00000001
+#define HV_SYNIC_VMBUS_SINT_INDEX 0x00000002
+#define HV_SYNIC_FIRST_UNUSED_SINT_INDEX 0x00000005
+
+/* mshv assigned SINT for doorbell */
+#define HV_SYNIC_DOORBELL_SINT_INDEX HV_SYNIC_FIRST_UNUSED_SINT_INDEX
+
+enum hv_interrupt_type {
+ HV_X64_INTERRUPT_TYPE_FIXED = 0x0000,
+ HV_X64_INTERRUPT_TYPE_LOWESTPRIORITY = 0x0001,
+ HV_X64_INTERRUPT_TYPE_SMI = 0x0002,
+ HV_X64_INTERRUPT_TYPE_REMOTEREAD = 0x0003,
+ HV_X64_INTERRUPT_TYPE_NMI = 0x0004,
+ HV_X64_INTERRUPT_TYPE_INIT = 0x0005,
+ HV_X64_INTERRUPT_TYPE_SIPI = 0x0006,
+ HV_X64_INTERRUPT_TYPE_EXTINT = 0x0007,
+ HV_X64_INTERRUPT_TYPE_LOCALINT0 = 0x0008,
+ HV_X64_INTERRUPT_TYPE_LOCALINT1 = 0x0009,
+ HV_X64_INTERRUPT_TYPE_MAXIMUM = 0x000A,
+};
+
+/* Define synthetic interrupt source. */
+union hv_synic_sint {
+ u64 as_uint64;
+ struct {
+ u64 vector : 8;
+ u64 reserved1 : 8;
+ u64 masked : 1;
+ u64 auto_eoi : 1;
+ u64 polling : 1;
+ u64 as_intercept : 1;
+ u64 proxy : 1;
+ u64 reserved2 : 43;
+ } __packed;
+};
+
+union hv_x64_xsave_xfem_register {
+ u64 as_uint64;
+ struct {
+ u32 low_uint32;
+ u32 high_uint32;
+ } __packed;
+ struct {
+ u64 legacy_x87 : 1;
+ u64 legacy_sse : 1;
+ u64 avx : 1;
+ u64 mpx_bndreg : 1;
+ u64 mpx_bndcsr : 1;
+ u64 avx_512_op_mask : 1;
+ u64 avx_512_zmmhi : 1;
+ u64 avx_512_zmm16_31 : 1;
+ u64 rsvd8_9 : 2;
+ u64 pasid : 1;
+ u64 cet_u : 1;
+ u64 cet_s : 1;
+ u64 rsvd13_16 : 4;
+ u64 xtile_cfg : 1;
+ u64 xtile_data : 1;
+ u64 rsvd19_63 : 45;
+ } __packed;
+};
+
+/* Synthetic timer configuration */
+union hv_stimer_config { /* HV_X64_MSR_STIMER_CONFIG_CONTENTS */
+ u64 as_uint64;
+ struct {
+ u64 enable : 1;
+ u64 periodic : 1;
+ u64 lazy : 1;
+ u64 auto_enable : 1;
+ u64 apic_vector : 8;
+ u64 direct_mode : 1;
+ u64 reserved_z0 : 3;
+ u64 sintx : 4;
+ u64 reserved_z1 : 44;
+ } __packed;
+};
+
+/* Define the number of synthetic timers */
+#define HV_SYNIC_STIMER_COUNT (4)
+
+/* Define port identifier type. */
+union hv_port_id {
+ u32 asu32;
+ struct {
+ u32 id : 24;
+ u32 reserved : 8;
+ } __packed u;
+};
+
+#define HV_MESSAGE_SIZE (256)
+#define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240)
+#define HV_MESSAGE_PAYLOAD_QWORD_COUNT (30)
+
+/* Define hypervisor message types. */
+enum hv_message_type {
+ HVMSG_NONE = 0x00000000,
+
+ /* Memory access messages. */
+ HVMSG_UNMAPPED_GPA = 0x80000000,
+ HVMSG_GPA_INTERCEPT = 0x80000001,
+
+ /* Timer notification messages. */
+ HVMSG_TIMER_EXPIRED = 0x80000010,
+
+ /* Error messages. */
+ HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020,
+ HVMSG_UNRECOVERABLE_EXCEPTION = 0x80000021,
+ HVMSG_UNSUPPORTED_FEATURE = 0x80000022,
+
+ /*
+ * Opaque intercept message. The original intercept message is only
+ * accessible from the mapped intercept message page.
+ */
+ HVMSG_OPAQUE_INTERCEPT = 0x8000003F,
+
+ /* Trace buffer complete messages. */
+ HVMSG_EVENTLOG_BUFFERCOMPLETE = 0x80000040,
+
+ /* Hypercall intercept */
+ HVMSG_HYPERCALL_INTERCEPT = 0x80000050,
+
+ /* SynIC intercepts */
+ HVMSG_SYNIC_EVENT_INTERCEPT = 0x80000060,
+ HVMSG_SYNIC_SINT_INTERCEPT = 0x80000061,
+ HVMSG_SYNIC_SINT_DELIVERABLE = 0x80000062,
+
+ /* Async call completion intercept */
+ HVMSG_ASYNC_CALL_COMPLETION = 0x80000070,
+
+ /* Root scheduler messages */
+ HVMSG_SCHEDULER_VP_SIGNAL_BITSET = 0x80000100,
+ HVMSG_SCHEDULER_VP_SIGNAL_PAIR = 0x80000101,
+
+ /* Platform-specific processor intercept messages. */
+ HVMSG_X64_IO_PORT_INTERCEPT = 0x80010000,
+ HVMSG_X64_MSR_INTERCEPT = 0x80010001,
+ HVMSG_X64_CPUID_INTERCEPT = 0x80010002,
+ HVMSG_X64_EXCEPTION_INTERCEPT = 0x80010003,
+ HVMSG_X64_APIC_EOI = 0x80010004,
+ HVMSG_X64_LEGACY_FP_ERROR = 0x80010005,
+ HVMSG_X64_IOMMU_PRQ = 0x80010006,
+ HVMSG_X64_HALT = 0x80010007,
+ HVMSG_X64_INTERRUPTION_DELIVERABLE = 0x80010008,
+ HVMSG_X64_SIPI_INTERCEPT = 0x80010009,
+};
+
+/* Define the format of the SIMP register */
+union hv_synic_simp {
+ u64 as_uint64;
+ struct {
+ u64 simp_enabled : 1;
+ u64 preserved : 11;
+ u64 base_simp_gpa : 52;
+ } __packed;
+};
+
+union hv_message_flags {
+ u8 asu8;
+ struct {
+ u8 msg_pending : 1;
+ u8 reserved : 7;
+ } __packed;
+};
+
+struct hv_message_header {
+ u32 message_type;
+ u8 payload_size;
+ union hv_message_flags message_flags;
+ u8 reserved[2];
+ union {
+ u64 sender;
+ union hv_port_id port;
+ };
+} __packed;
+
+/*
+ * Message format for notifications delivered via
+ * intercept message(as_intercept=1)
+ */
+struct hv_notification_message_payload {
+ u32 sint_index;
+} __packed;
+
+struct hv_message {
+ struct hv_message_header header;
+ union {
+ u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
+ } u;
+} __packed;
+
+/* Define the synthetic interrupt message page layout. */
+struct hv_message_page {
+ struct hv_message sint_message[HV_SYNIC_SINT_COUNT];
+} __packed;
+
+/* Define timer message payload structure. */
+struct hv_timer_message_payload {
+ __u32 timer_index;
+ __u32 reserved;
+ __u64 expiration_time; /* When the timer expired */
+ __u64 delivery_time; /* When the message was delivered */
+} __packed;
+
+struct hv_x64_segment_register {
+ u64 base;
+ u32 limit;
+ u16 selector;
+ union {
+ struct {
+ u16 segment_type : 4;
+ u16 non_system_segment : 1;
+ u16 descriptor_privilege_level : 2;
+ u16 present : 1;
+ u16 reserved : 4;
+ u16 available : 1;
+ u16 _long : 1;
+ u16 _default : 1;
+ u16 granularity : 1;
+ } __packed;
+ u16 attributes;
+ };
+} __packed;
+
+struct hv_x64_table_register {
+ u16 pad[3];
+ u16 limit;
+ u64 base;
+} __packed;
+
+union hv_input_vtl {
+ u8 as_uint8;
+ struct {
+ u8 target_vtl : 4;
+ u8 use_target_vtl : 1;
+ u8 reserved_z : 3;
+ };
+} __packed;
+
+struct hv_init_vp_context {
+ u64 rip;
+ u64 rsp;
+ u64 rflags;
+
+ struct hv_x64_segment_register cs;
+ struct hv_x64_segment_register ds;
+ struct hv_x64_segment_register es;
+ struct hv_x64_segment_register fs;
+ struct hv_x64_segment_register gs;
+ struct hv_x64_segment_register ss;
+ struct hv_x64_segment_register tr;
+ struct hv_x64_segment_register ldtr;
+
+ struct hv_x64_table_register idtr;
+ struct hv_x64_table_register gdtr;
+
+ u64 efer;
+ u64 cr0;
+ u64 cr3;
+ u64 cr4;
+ u64 msr_cr_pat;
+} __packed;
+
+struct hv_enable_vp_vtl {
+ u64 partition_id;
+ u32 vp_index;
+ union hv_input_vtl target_vtl;
+ u8 mbz0;
+ u16 mbz1;
+ struct hv_init_vp_context vp_context;
+} __packed;
+
+struct hv_get_vp_from_apic_id_in {
+ u64 partition_id;
+ union hv_input_vtl target_vtl;
+ u8 res[7];
+ u32 apic_ids[];
+} __packed;
+
+struct hv_nested_enlightenments_control {
+ struct {
+ u32 directhypercall : 1;
+ u32 reserved : 31;
+ } __packed features;
+ struct {
+ u32 inter_partition_comm : 1;
+ u32 reserved : 31;
+ } __packed hypercall_controls;
+} __packed;
+
+/* Define virtual processor assist page structure. */
+struct hv_vp_assist_page {
+ u32 apic_assist;
+ u32 reserved1;
+ u32 vtl_entry_reason;
+ u32 vtl_reserved;
+ u64 vtl_ret_x64rax;
+ u64 vtl_ret_x64rcx;
+ struct hv_nested_enlightenments_control nested_control;
+ u8 enlighten_vmentry;
+ u8 reserved2[7];
+ u64 current_nested_vmcs;
+ u8 synthetic_time_unhalted_timer_expired;
+ u8 reserved3[7];
+ u8 virtualization_fault_information[40];
+ u8 reserved4[8];
+ u8 intercept_message[256];
+ u8 vtl_ret_actions[256];
+} __packed;
+
+enum hv_register_name {
+ /* Suspend Registers */
+ HV_REGISTER_EXPLICIT_SUSPEND = 0x00000000,
+ HV_REGISTER_INTERCEPT_SUSPEND = 0x00000001,
+ HV_REGISTER_DISPATCH_SUSPEND = 0x00000003,
+
+ /* Version - 128-bit result same as CPUID 0x40000002 */
+ HV_REGISTER_HYPERVISOR_VERSION = 0x00000100,
+
+ /* Feature Access (registers are 128 bits) - same as CPUID 0x40000003 - 0x4000000B */
+ HV_REGISTER_PRIVILEGES_AND_FEATURES_INFO = 0x00000200,
+ HV_REGISTER_FEATURES_INFO = 0x00000201,
+ HV_REGISTER_IMPLEMENTATION_LIMITS_INFO = 0x00000202,
+ HV_REGISTER_HARDWARE_FEATURES_INFO = 0x00000203,
+ HV_REGISTER_CPU_MANAGEMENT_FEATURES_INFO = 0x00000204,
+ HV_REGISTER_SVM_FEATURES_INFO = 0x00000205,
+ HV_REGISTER_SKIP_LEVEL_FEATURES_INFO = 0x00000206,
+ HV_REGISTER_NESTED_VIRT_FEATURES_INFO = 0x00000207,
+ HV_REGISTER_IPT_FEATURES_INFO = 0x00000208,
+
+ /* Guest Crash Registers */
+ HV_REGISTER_GUEST_CRASH_P0 = 0x00000210,
+ HV_REGISTER_GUEST_CRASH_P1 = 0x00000211,
+ HV_REGISTER_GUEST_CRASH_P2 = 0x00000212,
+ HV_REGISTER_GUEST_CRASH_P3 = 0x00000213,
+ HV_REGISTER_GUEST_CRASH_P4 = 0x00000214,
+ HV_REGISTER_GUEST_CRASH_CTL = 0x00000215,
+
+ /* Misc */
+ HV_REGISTER_VP_RUNTIME = 0x00090000,
+ HV_REGISTER_GUEST_OS_ID = 0x00090002,
+ HV_REGISTER_VP_INDEX = 0x00090003,
+ HV_REGISTER_TIME_REF_COUNT = 0x00090004,
+ HV_REGISTER_CPU_MANAGEMENT_VERSION = 0x00090007,
+ HV_REGISTER_VP_ASSIST_PAGE = 0x00090013,
+ HV_REGISTER_VP_ROOT_SIGNAL_COUNT = 0x00090014,
+ HV_REGISTER_REFERENCE_TSC = 0x00090017,
+
+ /* Hypervisor-defined Registers (Synic) */
+ HV_REGISTER_SINT0 = 0x000A0000,
+ HV_REGISTER_SINT1 = 0x000A0001,
+ HV_REGISTER_SINT2 = 0x000A0002,
+ HV_REGISTER_SINT3 = 0x000A0003,
+ HV_REGISTER_SINT4 = 0x000A0004,
+ HV_REGISTER_SINT5 = 0x000A0005,
+ HV_REGISTER_SINT6 = 0x000A0006,
+ HV_REGISTER_SINT7 = 0x000A0007,
+ HV_REGISTER_SINT8 = 0x000A0008,
+ HV_REGISTER_SINT9 = 0x000A0009,
+ HV_REGISTER_SINT10 = 0x000A000A,
+ HV_REGISTER_SINT11 = 0x000A000B,
+ HV_REGISTER_SINT12 = 0x000A000C,
+ HV_REGISTER_SINT13 = 0x000A000D,
+ HV_REGISTER_SINT14 = 0x000A000E,
+ HV_REGISTER_SINT15 = 0x000A000F,
+ HV_REGISTER_SCONTROL = 0x000A0010,
+ HV_REGISTER_SVERSION = 0x000A0011,
+ HV_REGISTER_SIEFP = 0x000A0012,
+ HV_REGISTER_SIMP = 0x000A0013,
+ HV_REGISTER_EOM = 0x000A0014,
+ HV_REGISTER_SIRBP = 0x000A0015,
+
+ HV_REGISTER_NESTED_SINT0 = 0x000A1000,
+ HV_REGISTER_NESTED_SINT1 = 0x000A1001,
+ HV_REGISTER_NESTED_SINT2 = 0x000A1002,
+ HV_REGISTER_NESTED_SINT3 = 0x000A1003,
+ HV_REGISTER_NESTED_SINT4 = 0x000A1004,
+ HV_REGISTER_NESTED_SINT5 = 0x000A1005,
+ HV_REGISTER_NESTED_SINT6 = 0x000A1006,
+ HV_REGISTER_NESTED_SINT7 = 0x000A1007,
+ HV_REGISTER_NESTED_SINT8 = 0x000A1008,
+ HV_REGISTER_NESTED_SINT9 = 0x000A1009,
+ HV_REGISTER_NESTED_SINT10 = 0x000A100A,
+ HV_REGISTER_NESTED_SINT11 = 0x000A100B,
+ HV_REGISTER_NESTED_SINT12 = 0x000A100C,
+ HV_REGISTER_NESTED_SINT13 = 0x000A100D,
+ HV_REGISTER_NESTED_SINT14 = 0x000A100E,
+ HV_REGISTER_NESTED_SINT15 = 0x000A100F,
+ HV_REGISTER_NESTED_SCONTROL = 0x000A1010,
+ HV_REGISTER_NESTED_SVERSION = 0x000A1011,
+ HV_REGISTER_NESTED_SIFP = 0x000A1012,
+ HV_REGISTER_NESTED_SIPP = 0x000A1013,
+ HV_REGISTER_NESTED_EOM = 0x000A1014,
+ HV_REGISTER_NESTED_SIRBP = 0x000a1015,
+
+ /* Hypervisor-defined Registers (Synthetic Timers) */
+ HV_REGISTER_STIMER0_CONFIG = 0x000B0000,
+ HV_REGISTER_STIMER0_COUNT = 0x000B0001,
+
+ /* VSM */
+ HV_REGISTER_VSM_VP_STATUS = 0x000D0003,
+};
+
+/*
+ * Arch compatibility regs for use with hv_set/get_register
+ */
+#if defined(CONFIG_X86)
+
+/*
+ * To support arch-generic code calling hv_set/get_register:
+ * - On x86, HV_MSR_ indicates an MSR accessed via rdmsrl/wrmsrl
+ * - On ARM, HV_MSR_ indicates a VP register accessed via hypercall
+ */
+#define HV_MSR_CRASH_P0 (HV_X64_MSR_CRASH_P0)
+#define HV_MSR_CRASH_P1 (HV_X64_MSR_CRASH_P1)
+#define HV_MSR_CRASH_P2 (HV_X64_MSR_CRASH_P2)
+#define HV_MSR_CRASH_P3 (HV_X64_MSR_CRASH_P3)
+#define HV_MSR_CRASH_P4 (HV_X64_MSR_CRASH_P4)
+#define HV_MSR_CRASH_CTL (HV_X64_MSR_CRASH_CTL)
+
+#define HV_MSR_VP_INDEX (HV_X64_MSR_VP_INDEX)
+#define HV_MSR_TIME_REF_COUNT (HV_X64_MSR_TIME_REF_COUNT)
+#define HV_MSR_REFERENCE_TSC (HV_X64_MSR_REFERENCE_TSC)
+
+#define HV_MSR_SINT0 (HV_X64_MSR_SINT0)
+#define HV_MSR_SVERSION (HV_X64_MSR_SVERSION)
+#define HV_MSR_SCONTROL (HV_X64_MSR_SCONTROL)
+#define HV_MSR_SIEFP (HV_X64_MSR_SIEFP)
+#define HV_MSR_SIMP (HV_X64_MSR_SIMP)
+#define HV_MSR_EOM (HV_X64_MSR_EOM)
+#define HV_MSR_SIRBP (HV_X64_MSR_SIRBP)
+
+#define HV_MSR_NESTED_SCONTROL (HV_X64_MSR_NESTED_SCONTROL)
+#define HV_MSR_NESTED_SVERSION (HV_X64_MSR_NESTED_SVERSION)
+#define HV_MSR_NESTED_SIEFP (HV_X64_MSR_NESTED_SIEFP)
+#define HV_MSR_NESTED_SIMP (HV_X64_MSR_NESTED_SIMP)
+#define HV_MSR_NESTED_EOM (HV_X64_MSR_NESTED_EOM)
+#define HV_MSR_NESTED_SINT0 (HV_X64_MSR_NESTED_SINT0)
+
+#define HV_MSR_STIMER0_CONFIG (HV_X64_MSR_STIMER0_CONFIG)
+#define HV_MSR_STIMER0_COUNT (HV_X64_MSR_STIMER0_COUNT)
+
+#elif defined(CONFIG_ARM64) /* CONFIG_X86 */
+
+#define HV_MSR_CRASH_P0 (HV_REGISTER_GUEST_CRASH_P0)
+#define HV_MSR_CRASH_P1 (HV_REGISTER_GUEST_CRASH_P1)
+#define HV_MSR_CRASH_P2 (HV_REGISTER_GUEST_CRASH_P2)
+#define HV_MSR_CRASH_P3 (HV_REGISTER_GUEST_CRASH_P3)
+#define HV_MSR_CRASH_P4 (HV_REGISTER_GUEST_CRASH_P4)
+#define HV_MSR_CRASH_CTL (HV_REGISTER_GUEST_CRASH_CTL)
+
+#define HV_MSR_VP_INDEX (HV_REGISTER_VP_INDEX)
+#define HV_MSR_TIME_REF_COUNT (HV_REGISTER_TIME_REF_COUNT)
+#define HV_MSR_REFERENCE_TSC (HV_REGISTER_REFERENCE_TSC)
+
+#define HV_MSR_SINT0 (HV_REGISTER_SINT0)
+#define HV_MSR_SCONTROL (HV_REGISTER_SCONTROL)
+#define HV_MSR_SIEFP (HV_REGISTER_SIEFP)
+#define HV_MSR_SIMP (HV_REGISTER_SIMP)
+#define HV_MSR_EOM (HV_REGISTER_EOM)
+#define HV_MSR_SIRBP (HV_REGISTER_SIRBP)
+
+#define HV_MSR_STIMER0_CONFIG (HV_REGISTER_STIMER0_CONFIG)
+#define HV_MSR_STIMER0_COUNT (HV_REGISTER_STIMER0_COUNT)
+
+#endif /* CONFIG_ARM64 */
+
+union hv_explicit_suspend_register {
+ u64 as_uint64;
+ struct {
+ u64 suspended : 1;
+ u64 reserved : 63;
+ } __packed;
+};
+
+union hv_intercept_suspend_register {
+ u64 as_uint64;
+ struct {
+ u64 suspended : 1;
+ u64 reserved : 63;
+ } __packed;
+};
+
+union hv_dispatch_suspend_register {
+ u64 as_uint64;
+ struct {
+ u64 suspended : 1;
+ u64 reserved : 63;
+ } __packed;
+};
+
+union hv_arm64_pending_interruption_register {
+ u64 as_uint64;
+ struct {
+ u64 interruption_pending : 1;
+ u64 interruption_type: 1;
+ u64 reserved : 30;
+ u64 error_code : 32;
+ } __packed;
+};
+
+union hv_arm64_interrupt_state_register {
+ u64 as_uint64;
+ struct {
+ u64 interrupt_shadow : 1;
+ u64 reserved : 63;
+ } __packed;
+};
+
+union hv_arm64_pending_synthetic_exception_event {
+ u64 as_uint64[2];
+ struct {
+ u8 event_pending : 1;
+ u8 event_type : 3;
+ u8 reserved : 4;
+ u8 rsvd[3];
+ u32 exception_type;
+ u64 context;
+ } __packed;
+};
+
+union hv_x64_interrupt_state_register {
+ u64 as_uint64;
+ struct {
+ u64 interrupt_shadow : 1;
+ u64 nmi_masked : 1;
+ u64 reserved : 62;
+ } __packed;
+};
+
+union hv_x64_pending_interruption_register {
+ u64 as_uint64;
+ struct {
+ u32 interruption_pending : 1;
+ u32 interruption_type : 3;
+ u32 deliver_error_code : 1;
+ u32 instruction_length : 4;
+ u32 nested_event : 1;
+ u32 reserved : 6;
+ u32 interruption_vector : 16;
+ u32 error_code;
+ } __packed;
+};
+
+union hv_register_value {
+ struct hv_u128 reg128;
+ u64 reg64;
+ u32 reg32;
+ u16 reg16;
+ u8 reg8;
+
+ struct hv_x64_segment_register segment;
+ struct hv_x64_table_register table;
+ union hv_explicit_suspend_register explicit_suspend;
+ union hv_intercept_suspend_register intercept_suspend;
+ union hv_dispatch_suspend_register dispatch_suspend;
+#ifdef CONFIG_ARM64
+ union hv_arm64_interrupt_state_register interrupt_state;
+ union hv_arm64_pending_interruption_register pending_interruption;
+#endif
+#ifdef CONFIG_X86
+ union hv_x64_interrupt_state_register interrupt_state;
+ union hv_x64_pending_interruption_register pending_interruption;
+#endif
+ union hv_arm64_pending_synthetic_exception_event pending_synthetic_exception_event;
+};
+
+/* NOTE: Linux helper struct - NOT from Hyper-V code. */
+struct hv_output_get_vp_registers {
+ DECLARE_FLEX_ARRAY(union hv_register_value, values);
+};
+
+#if defined(CONFIG_ARM64)
+/* HvGetVpRegisters returns an array of these output elements */
+struct hv_get_vp_registers_output {
+ union {
+ struct {
+ u32 a;
+ u32 b;
+ u32 c;
+ u32 d;
+ } as32 __packed;
+ struct {
+ u64 low;
+ u64 high;
+ } as64 __packed;
+ };
+};
+
+#endif /* CONFIG_ARM64 */
+
+struct hv_register_assoc {
+ u32 name; /* enum hv_register_name */
+ u32 reserved1;
+ u64 reserved2;
+ union hv_register_value value;
+} __packed;
+
+struct hv_input_get_vp_registers {
+ u64 partition_id;
+ u32 vp_index;
+ union hv_input_vtl input_vtl;
+ u8 rsvd_z8;
+ u16 rsvd_z16;
+ u32 names[];
+} __packed;
+
+struct hv_input_set_vp_registers {
+ u64 partition_id;
+ u32 vp_index;
+ union hv_input_vtl input_vtl;
+ u8 rsvd_z8;
+ u16 rsvd_z16;
+ struct hv_register_assoc elements[];
+} __packed;
+
+#define HV_UNMAP_GPA_LARGE_PAGE 0x2
+
+/* HvCallSendSyntheticClusterIpi hypercall */
+struct hv_send_ipi { /* HV_INPUT_SEND_SYNTHETIC_CLUSTER_IPI */
+ u32 vector;
+ u32 reserved;
+ u64 cpu_mask;
+} __packed;
+
+#define HV_X64_VTL_MASK GENMASK(3, 0)
+
+/* Hyper-V memory host visibility */
+enum hv_mem_host_visibility {
+ VMBUS_PAGE_NOT_VISIBLE = 0,
+ VMBUS_PAGE_VISIBLE_READ_ONLY = 1,
+ VMBUS_PAGE_VISIBLE_READ_WRITE = 3
+};
+
+/* HvCallModifySparseGpaPageHostVisibility hypercall */
+#define HV_MAX_MODIFY_GPA_REP_COUNT ((HV_HYP_PAGE_SIZE / sizeof(u64)) - 2)
+struct hv_gpa_range_for_visibility {
+ u64 partition_id;
+ u32 host_visibility : 2;
+ u32 reserved0 : 30;
+ u32 reserved1;
+ u64 gpa_page_list[HV_MAX_MODIFY_GPA_REP_COUNT];
+} __packed;
+
+#if defined(CONFIG_X86)
+union hv_msi_address_register { /* HV_MSI_ADDRESS */
+ u32 as_uint32;
+ struct {
+ u32 reserved1 : 2;
+ u32 destination_mode : 1;
+ u32 redirection_hint : 1;
+ u32 reserved2 : 8;
+ u32 destination_id : 8;
+ u32 msi_base : 12;
+ };
+} __packed;
+
+union hv_msi_data_register { /* HV_MSI_ENTRY.Data */
+ u32 as_uint32;
+ struct {
+ u32 vector : 8;
+ u32 delivery_mode : 3;
+ u32 reserved1 : 3;
+ u32 level_assert : 1;
+ u32 trigger_mode : 1;
+ u32 reserved2 : 16;
+ };
+} __packed;
+
+union hv_msi_entry { /* HV_MSI_ENTRY */
+
+ u64 as_uint64;
+ struct {
+ union hv_msi_address_register address;
+ union hv_msi_data_register data;
+ } __packed;
+};
+
+#elif defined(CONFIG_ARM64) /* CONFIG_X86 */
+
+union hv_msi_entry {
+ u64 as_uint64[2];
+ struct {
+ u64 address;
+ u32 data;
+ u32 reserved;
+ } __packed;
+};
+#endif /* CONFIG_ARM64 */
+
+union hv_ioapic_rte {
+ u64 as_uint64;
+
+ struct {
+ u32 vector : 8;
+ u32 delivery_mode : 3;
+ u32 destination_mode : 1;
+ u32 delivery_status : 1;
+ u32 interrupt_polarity : 1;
+ u32 remote_irr : 1;
+ u32 trigger_mode : 1;
+ u32 interrupt_mask : 1;
+ u32 reserved1 : 15;
+
+ u32 reserved2 : 24;
+ u32 destination_id : 8;
+ };
+
+ struct {
+ u32 low_uint32;
+ u32 high_uint32;
+ };
+} __packed;
+
+enum hv_interrupt_source { /* HV_INTERRUPT_SOURCE */
+ HV_INTERRUPT_SOURCE_MSI = 1, /* MSI and MSI-X */
+ HV_INTERRUPT_SOURCE_IOAPIC,
+};
+
+struct hv_interrupt_entry { /* HV_INTERRUPT_ENTRY */
+ u32 source;
+ u32 reserved1;
+ union {
+ union hv_msi_entry msi_entry;
+ union hv_ioapic_rte ioapic_rte;
+ };
+} __packed;
+
+#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1
+#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2
+
+struct hv_device_interrupt_target { /* HV_DEVICE_INTERRUPT_TARGET */
+ u32 vector;
+ u32 flags; /* HV_DEVICE_INTERRUPT_TARGET_* above */
+ union {
+ u64 vp_mask;
+ struct hv_vpset vp_set;
+ };
+} __packed;
+
+struct hv_retarget_device_interrupt { /* HV_INPUT_RETARGET_DEVICE_INTERRUPT */
+ u64 partition_id; /* use "self" */
+ u64 device_id;
+ struct hv_interrupt_entry int_entry;
+ u64 reserved2;
+ struct hv_device_interrupt_target int_target;
+} __packed __aligned(8);
+
+/* Data structures for HVCALL_MMIO_READ and HVCALL_MMIO_WRITE */
+#define HV_HYPERCALL_MMIO_MAX_DATA_LENGTH 64
+
+struct hv_mmio_read_input { /* HV_INPUT_MEMORY_MAPPED_IO_READ */
+ u64 gpa;
+ u32 size;
+ u32 reserved;
+} __packed;
+
+struct hv_mmio_read_output {
+ u8 data[HV_HYPERCALL_MMIO_MAX_DATA_LENGTH];
+} __packed;
+
+struct hv_mmio_write_input {
+ u64 gpa;
+ u32 size;
+ u32 reserved;
+ u8 data[HV_HYPERCALL_MMIO_MAX_DATA_LENGTH];
+} __packed;
+
+#endif /* _HV_HVGDK_MINI_H */
diff --git a/include/hyperv/hvhdk.h b/include/hyperv/hvhdk.h
new file mode 100644
index 000000000000..64407c2a3809
--- /dev/null
+++ b/include/hyperv/hvhdk.h
@@ -0,0 +1,733 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Type definitions for the Microsoft hypervisor.
+ */
+#ifndef _HV_HVHDK_H
+#define _HV_HVHDK_H
+
+#include <linux/build_bug.h>
+
+#include "hvhdk_mini.h"
+#include "hvgdk.h"
+
+/* Bits for dirty mask of hv_vp_register_page */
+#define HV_X64_REGISTER_CLASS_GENERAL 0
+#define HV_X64_REGISTER_CLASS_IP 1
+#define HV_X64_REGISTER_CLASS_XMM 2
+#define HV_X64_REGISTER_CLASS_SEGMENT 3
+#define HV_X64_REGISTER_CLASS_FLAGS 4
+
+#define HV_VP_REGISTER_PAGE_VERSION_1 1u
+
+struct hv_vp_register_page {
+ u16 version;
+ u8 isvalid;
+ u8 rsvdz;
+ u32 dirty;
+ union {
+ struct {
+ /* General purpose registers
+ * (HV_X64_REGISTER_CLASS_GENERAL)
+ */
+ union {
+ struct {
+ u64 rax;
+ u64 rcx;
+ u64 rdx;
+ u64 rbx;
+ u64 rsp;
+ u64 rbp;
+ u64 rsi;
+ u64 rdi;
+ u64 r8;
+ u64 r9;
+ u64 r10;
+ u64 r11;
+ u64 r12;
+ u64 r13;
+ u64 r14;
+ u64 r15;
+ } __packed;
+
+ u64 gp_registers[16];
+ };
+ /* Instruction pointer (HV_X64_REGISTER_CLASS_IP) */
+ u64 rip;
+ /* Flags (HV_X64_REGISTER_CLASS_FLAGS) */
+ u64 rflags;
+ } __packed;
+
+ u64 registers[18];
+ };
+ /* Volatile XMM registers (HV_X64_REGISTER_CLASS_XMM) */
+ union {
+ struct {
+ struct hv_u128 xmm0;
+ struct hv_u128 xmm1;
+ struct hv_u128 xmm2;
+ struct hv_u128 xmm3;
+ struct hv_u128 xmm4;
+ struct hv_u128 xmm5;
+ } __packed;
+
+ struct hv_u128 xmm_registers[6];
+ };
+ /* Segment registers (HV_X64_REGISTER_CLASS_SEGMENT) */
+ union {
+ struct {
+ struct hv_x64_segment_register es;
+ struct hv_x64_segment_register cs;
+ struct hv_x64_segment_register ss;
+ struct hv_x64_segment_register ds;
+ struct hv_x64_segment_register fs;
+ struct hv_x64_segment_register gs;
+ } __packed;
+
+ struct hv_x64_segment_register segment_registers[6];
+ };
+ /* Misc. control registers (cannot be set via this interface) */
+ u64 cr0;
+ u64 cr3;
+ u64 cr4;
+ u64 cr8;
+ u64 efer;
+ u64 dr7;
+ union hv_x64_pending_interruption_register pending_interruption;
+ union hv_x64_interrupt_state_register interrupt_state;
+ u64 instruction_emulation_hints;
+} __packed;
+
+#define HV_PARTITION_PROCESSOR_FEATURES_BANKS 2
+
+union hv_partition_processor_features {
+ u64 as_uint64[HV_PARTITION_PROCESSOR_FEATURES_BANKS];
+ struct {
+ u64 sse3_support : 1;
+ u64 lahf_sahf_support : 1;
+ u64 ssse3_support : 1;
+ u64 sse4_1_support : 1;
+ u64 sse4_2_support : 1;
+ u64 sse4a_support : 1;
+ u64 xop_support : 1;
+ u64 pop_cnt_support : 1;
+ u64 cmpxchg16b_support : 1;
+ u64 altmovcr8_support : 1;
+ u64 lzcnt_support : 1;
+ u64 mis_align_sse_support : 1;
+ u64 mmx_ext_support : 1;
+ u64 amd3dnow_support : 1;
+ u64 extended_amd3dnow_support : 1;
+ u64 page_1gb_support : 1;
+ u64 aes_support : 1;
+ u64 pclmulqdq_support : 1;
+ u64 pcid_support : 1;
+ u64 fma4_support : 1;
+ u64 f16c_support : 1;
+ u64 rd_rand_support : 1;
+ u64 rd_wr_fs_gs_support : 1;
+ u64 smep_support : 1;
+ u64 enhanced_fast_string_support : 1;
+ u64 bmi1_support : 1;
+ u64 bmi2_support : 1;
+ u64 hle_support_deprecated : 1;
+ u64 rtm_support_deprecated : 1;
+ u64 movbe_support : 1;
+ u64 npiep1_support : 1;
+ u64 dep_x87_fpu_save_support : 1;
+ u64 rd_seed_support : 1;
+ u64 adx_support : 1;
+ u64 intel_prefetch_support : 1;
+ u64 smap_support : 1;
+ u64 hle_support : 1;
+ u64 rtm_support : 1;
+ u64 rdtscp_support : 1;
+ u64 clflushopt_support : 1;
+ u64 clwb_support : 1;
+ u64 sha_support : 1;
+ u64 x87_pointers_saved_support : 1;
+ u64 invpcid_support : 1;
+ u64 ibrs_support : 1;
+ u64 stibp_support : 1;
+ u64 ibpb_support: 1;
+ u64 unrestricted_guest_support : 1;
+ u64 mdd_support : 1;
+ u64 fast_short_rep_mov_support : 1;
+ u64 l1dcache_flush_support : 1;
+ u64 rdcl_no_support : 1;
+ u64 ibrs_all_support : 1;
+ u64 skip_l1df_support : 1;
+ u64 ssb_no_support : 1;
+ u64 rsb_a_no_support : 1;
+ u64 virt_spec_ctrl_support : 1;
+ u64 rd_pid_support : 1;
+ u64 umip_support : 1;
+ u64 mbs_no_support : 1;
+ u64 mb_clear_support : 1;
+ u64 taa_no_support : 1;
+ u64 tsx_ctrl_support : 1;
+ /*
+ * N.B. The final processor feature bit in bank 0 is reserved to
+ * simplify potential downlevel backports.
+ */
+ u64 reserved_bank0 : 1;
+
+ /* N.B. Begin bank 1 processor features. */
+ u64 acount_mcount_support : 1;
+ u64 tsc_invariant_support : 1;
+ u64 cl_zero_support : 1;
+ u64 rdpru_support : 1;
+ u64 la57_support : 1;
+ u64 mbec_support : 1;
+ u64 nested_virt_support : 1;
+ u64 psfd_support : 1;
+ u64 cet_ss_support : 1;
+ u64 cet_ibt_support : 1;
+ u64 vmx_exception_inject_support : 1;
+ u64 enqcmd_support : 1;
+ u64 umwait_tpause_support : 1;
+ u64 movdiri_support : 1;
+ u64 movdir64b_support : 1;
+ u64 cldemote_support : 1;
+ u64 serialize_support : 1;
+ u64 tsc_deadline_tmr_support : 1;
+ u64 tsc_adjust_support : 1;
+ u64 fzlrep_movsb : 1;
+ u64 fsrep_stosb : 1;
+ u64 fsrep_cmpsb : 1;
+ u64 reserved_bank1 : 42;
+ } __packed;
+};
+
+union hv_partition_processor_xsave_features {
+ struct {
+ u64 xsave_support : 1;
+ u64 xsaveopt_support : 1;
+ u64 avx_support : 1;
+ u64 reserved1 : 61;
+ } __packed;
+ u64 as_uint64;
+};
+
+struct hv_partition_creation_properties {
+ union hv_partition_processor_features disabled_processor_features;
+ union hv_partition_processor_xsave_features
+ disabled_processor_xsave_features;
+} __packed;
+
+#define HV_PARTITION_SYNTHETIC_PROCESSOR_FEATURES_BANKS 1
+
+union hv_partition_synthetic_processor_features {
+ u64 as_uint64[HV_PARTITION_SYNTHETIC_PROCESSOR_FEATURES_BANKS];
+
+ struct {
+ u64 hypervisor_present : 1;
+ /* Support for HV#1: (CPUID leaves 0x40000000 - 0x40000006)*/
+ u64 hv1 : 1;
+ u64 access_vp_run_time_reg : 1; /* HV_X64_MSR_VP_RUNTIME */
+ u64 access_partition_reference_counter : 1; /* HV_X64_MSR_TIME_REF_COUNT */
+ u64 access_synic_regs : 1; /* SINT-related registers */
+ /*
+ * Access to HV_X64_MSR_STIMER0_CONFIG through
+ * HV_X64_MSR_STIMER3_COUNT.
+ */
+ u64 access_synthetic_timer_regs : 1;
+ u64 access_intr_ctrl_regs : 1; /* APIC MSRs and VP assist page*/
+ /* HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL */
+ u64 access_hypercall_regs : 1;
+ u64 access_vp_index : 1;
+ u64 access_partition_reference_tsc : 1;
+ u64 access_guest_idle_reg : 1;
+ u64 access_frequency_regs : 1;
+ u64 reserved_z12 : 1;
+ u64 reserved_z13 : 1;
+ u64 reserved_z14 : 1;
+ u64 enable_extended_gva_ranges_for_flush_virtual_address_list : 1;
+ u64 reserved_z16 : 1;
+ u64 reserved_z17 : 1;
+ /* Use fast hypercall output. Corresponds to privilege. */
+ u64 fast_hypercall_output : 1;
+ u64 reserved_z19 : 1;
+ u64 start_virtual_processor : 1; /* Can start VPs */
+ u64 reserved_z21 : 1;
+ /* Synthetic timers in direct mode. */
+ u64 direct_synthetic_timers : 1;
+ u64 reserved_z23 : 1;
+ u64 extended_processor_masks : 1;
+
+ /* Enable various hypercalls */
+ u64 tb_flush_hypercalls : 1;
+ u64 synthetic_cluster_ipi : 1;
+ u64 notify_long_spin_wait : 1;
+ u64 query_numa_distance : 1;
+ u64 signal_events : 1;
+ u64 retarget_device_interrupt : 1;
+ u64 restore_time : 1;
+
+ /* EnlightenedVmcs nested enlightenment is supported. */
+ u64 enlightened_vmcs : 1;
+ u64 reserved : 31;
+ } __packed;
+};
+
+#define HV_MAKE_COMPATIBILITY_VERSION(major_, minor_) \
+ ((u32)((major_) << 8 | (minor_)))
+
+#define HV_COMPATIBILITY_21_H2 HV_MAKE_COMPATIBILITY_VERSION(0X6, 0X9)
+
+union hv_partition_isolation_properties {
+ u64 as_uint64;
+ struct {
+ u64 isolation_type: 5;
+ u64 isolation_host_type : 2;
+ u64 rsvd_z: 5;
+ u64 shared_gpa_boundary_page_number: 52;
+ } __packed;
+};
+
+/*
+ * Various isolation types supported by MSHV.
+ */
+#define HV_PARTITION_ISOLATION_TYPE_NONE 0
+#define HV_PARTITION_ISOLATION_TYPE_SNP 2
+#define HV_PARTITION_ISOLATION_TYPE_TDX 3
+
+/*
+ * Various host isolation types supported by MSHV.
+ */
+#define HV_PARTITION_ISOLATION_HOST_TYPE_NONE 0x0
+#define HV_PARTITION_ISOLATION_HOST_TYPE_HARDWARE 0x1
+#define HV_PARTITION_ISOLATION_HOST_TYPE_RESERVED 0x2
+
+/* Note: Exo partition is enabled by default */
+#define HV_PARTITION_CREATION_FLAG_EXO_PARTITION BIT(8)
+#define HV_PARTITION_CREATION_FLAG_LAPIC_ENABLED BIT(13)
+#define HV_PARTITION_CREATION_FLAG_INTERCEPT_MESSAGE_PAGE_ENABLED BIT(19)
+#define HV_PARTITION_CREATION_FLAG_X2APIC_CAPABLE BIT(22)
+
+struct hv_input_create_partition {
+ u64 flags;
+ struct hv_proximity_domain_info proximity_domain_info;
+ u32 compatibility_version;
+ u32 padding;
+ struct hv_partition_creation_properties partition_creation_properties;
+ union hv_partition_isolation_properties isolation_properties;
+} __packed;
+
+struct hv_output_create_partition {
+ u64 partition_id;
+} __packed;
+
+struct hv_input_initialize_partition {
+ u64 partition_id;
+} __packed;
+
+struct hv_input_finalize_partition {
+ u64 partition_id;
+} __packed;
+
+struct hv_input_delete_partition {
+ u64 partition_id;
+} __packed;
+
+struct hv_input_get_partition_property {
+ u64 partition_id;
+ u32 property_code; /* enum hv_partition_property_code */
+ u32 padding;
+} __packed;
+
+struct hv_output_get_partition_property {
+ u64 property_value;
+} __packed;
+
+struct hv_input_set_partition_property {
+ u64 partition_id;
+ u32 property_code; /* enum hv_partition_property_code */
+ u32 padding;
+ u64 property_value;
+} __packed;
+
+enum hv_vp_state_page_type {
+ HV_VP_STATE_PAGE_REGISTERS = 0,
+ HV_VP_STATE_PAGE_INTERCEPT_MESSAGE = 1,
+ HV_VP_STATE_PAGE_COUNT
+};
+
+struct hv_input_map_vp_state_page {
+ u64 partition_id;
+ u32 vp_index;
+ u32 type; /* enum hv_vp_state_page_type */
+} __packed;
+
+struct hv_output_map_vp_state_page {
+ u64 map_location; /* GPA page number */
+} __packed;
+
+struct hv_input_unmap_vp_state_page {
+ u64 partition_id;
+ u32 vp_index;
+ u32 type; /* enum hv_vp_state_page_type */
+} __packed;
+
+struct hv_opaque_intercept_message {
+ u32 vp_index;
+} __packed;
+
+enum hv_port_type {
+ HV_PORT_TYPE_MESSAGE = 1,
+ HV_PORT_TYPE_EVENT = 2,
+ HV_PORT_TYPE_MONITOR = 3,
+ HV_PORT_TYPE_DOORBELL = 4 /* Root Partition only */
+};
+
+struct hv_port_info {
+ u32 port_type; /* enum hv_port_type */
+ u32 padding;
+ union {
+ struct {
+ u32 target_sint;
+ u32 target_vp;
+ u64 rsvdz;
+ } message_port_info;
+ struct {
+ u32 target_sint;
+ u32 target_vp;
+ u16 base_flag_number;
+ u16 flag_count;
+ u32 rsvdz;
+ } event_port_info;
+ struct {
+ u64 monitor_address;
+ u64 rsvdz;
+ } monitor_port_info;
+ struct {
+ u32 target_sint;
+ u32 target_vp;
+ u64 rsvdz;
+ } doorbell_port_info;
+ };
+} __packed;
+
+struct hv_connection_info {
+ u32 port_type;
+ u32 padding;
+ union {
+ struct {
+ u64 rsvdz;
+ } message_connection_info;
+ struct {
+ u64 rsvdz;
+ } event_connection_info;
+ struct {
+ u64 monitor_address;
+ } monitor_connection_info;
+ struct {
+ u64 gpa;
+ u64 trigger_value;
+ u64 flags;
+ } doorbell_connection_info;
+ };
+} __packed;
+
+/* Define synthetic interrupt controller flag constants. */
+#define HV_EVENT_FLAGS_COUNT (256 * 8)
+#define HV_EVENT_FLAGS_BYTE_COUNT (256)
+#define HV_EVENT_FLAGS32_COUNT (256 / sizeof(u32))
+
+/* linux side we create long version of flags to use long bit ops on flags */
+#define HV_EVENT_FLAGS_UL_COUNT (256 / sizeof(ulong))
+
+/* Define the synthetic interrupt controller event flags format. */
+union hv_synic_event_flags {
+ unsigned char flags8[HV_EVENT_FLAGS_BYTE_COUNT];
+ u32 flags32[HV_EVENT_FLAGS32_COUNT];
+ ulong flags[HV_EVENT_FLAGS_UL_COUNT]; /* linux only */
+};
+
+struct hv_synic_event_flags_page {
+ volatile union hv_synic_event_flags event_flags[HV_SYNIC_SINT_COUNT];
+};
+
+#define HV_SYNIC_EVENT_RING_MESSAGE_COUNT 63
+
+struct hv_synic_event_ring {
+ u8 signal_masked;
+ u8 ring_full;
+ u16 reserved_z;
+ u32 data[HV_SYNIC_EVENT_RING_MESSAGE_COUNT];
+} __packed;
+
+struct hv_synic_event_ring_page {
+ struct hv_synic_event_ring sint_event_ring[HV_SYNIC_SINT_COUNT];
+};
+
+/* Define SynIC control register. */
+union hv_synic_scontrol {
+ u64 as_uint64;
+ struct {
+ u64 enable : 1;
+ u64 reserved : 63;
+ } __packed;
+};
+
+/* Define the format of the SIEFP register */
+union hv_synic_siefp {
+ u64 as_uint64;
+ struct {
+ u64 siefp_enabled : 1;
+ u64 preserved : 11;
+ u64 base_siefp_gpa : 52;
+ } __packed;
+};
+
+union hv_synic_sirbp {
+ u64 as_uint64;
+ struct {
+ u64 sirbp_enabled : 1;
+ u64 preserved : 11;
+ u64 base_sirbp_gpa : 52;
+ } __packed;
+};
+
+union hv_interrupt_control {
+ u64 as_uint64;
+ struct {
+ u32 interrupt_type; /* enum hv_interrupt_type */
+ u32 level_triggered : 1;
+ u32 logical_dest_mode : 1;
+ u32 rsvd : 30;
+ } __packed;
+};
+
+struct hv_stimer_state {
+ struct {
+ u32 undelivered_msg_pending : 1;
+ u32 reserved : 31;
+ } __packed flags;
+ u32 resvd;
+ u64 config;
+ u64 count;
+ u64 adjustment;
+ u64 undelivered_exp_time;
+} __packed;
+
+struct hv_synthetic_timers_state {
+ struct hv_stimer_state timers[HV_SYNIC_STIMER_COUNT];
+ u64 reserved[5];
+} __packed;
+
+union hv_input_delete_vp {
+ u64 as_uint64[2];
+ struct {
+ u64 partition_id;
+ u32 vp_index;
+ u8 reserved[4];
+ } __packed;
+} __packed;
+
+struct hv_input_assert_virtual_interrupt {
+ u64 partition_id;
+ union hv_interrupt_control control;
+ u64 dest_addr; /* cpu's apic id */
+ u32 vector;
+ u8 target_vtl;
+ u8 rsvd_z0;
+ u16 rsvd_z1;
+} __packed;
+
+struct hv_input_create_port {
+ u64 port_partition_id;
+ union hv_port_id port_id;
+ u8 port_vtl;
+ u8 min_connection_vtl;
+ u16 padding;
+ u64 connection_partition_id;
+ struct hv_port_info port_info;
+ struct hv_proximity_domain_info proximity_domain_info;
+} __packed;
+
+union hv_input_delete_port {
+ u64 as_uint64[2];
+ struct {
+ u64 port_partition_id;
+ union hv_port_id port_id;
+ u32 reserved;
+ };
+} __packed;
+
+struct hv_input_connect_port {
+ u64 connection_partition_id;
+ union hv_connection_id connection_id;
+ u8 connection_vtl;
+ u8 rsvdz0;
+ u16 rsvdz1;
+ u64 port_partition_id;
+ union hv_port_id port_id;
+ u32 reserved2;
+ struct hv_connection_info connection_info;
+ struct hv_proximity_domain_info proximity_domain_info;
+} __packed;
+
+union hv_input_disconnect_port {
+ u64 as_uint64[2];
+ struct {
+ u64 connection_partition_id;
+ union hv_connection_id connection_id;
+ u32 is_doorbell: 1;
+ u32 reserved: 31;
+ } __packed;
+} __packed;
+
+union hv_input_notify_port_ring_empty {
+ u64 as_uint64;
+ struct {
+ u32 sint_index;
+ u32 reserved;
+ };
+} __packed;
+
+struct hv_vp_state_data_xsave {
+ u64 flags;
+ union hv_x64_xsave_xfem_register states;
+} __packed;
+
+/*
+ * For getting and setting VP state, there are two options based on the state type:
+ *
+ * 1.) Data that is accessed by PFNs in the input hypercall page. This is used
+ * for state which may not fit into the hypercall pages.
+ * 2.) Data that is accessed directly in the input\output hypercall pages.
+ * This is used for state that will always fit into the hypercall pages.
+ *
+ * In the future this could be dynamic based on the size if needed.
+ *
+ * Note these hypercalls have an 8-byte aligned variable header size as per the tlfs
+ */
+
+#define HV_GET_SET_VP_STATE_TYPE_PFN BIT(31)
+
+enum hv_get_set_vp_state_type {
+ /* HvGetSetVpStateLocalInterruptControllerState - APIC/GIC state */
+ HV_GET_SET_VP_STATE_LAPIC_STATE = 0 | HV_GET_SET_VP_STATE_TYPE_PFN,
+ HV_GET_SET_VP_STATE_XSAVE = 1 | HV_GET_SET_VP_STATE_TYPE_PFN,
+ HV_GET_SET_VP_STATE_SIM_PAGE = 2 | HV_GET_SET_VP_STATE_TYPE_PFN,
+ HV_GET_SET_VP_STATE_SIEF_PAGE = 3 | HV_GET_SET_VP_STATE_TYPE_PFN,
+ HV_GET_SET_VP_STATE_SYNTHETIC_TIMERS = 4,
+};
+
+struct hv_vp_state_data {
+ u32 type;
+ u32 rsvd;
+ struct hv_vp_state_data_xsave xsave;
+} __packed;
+
+struct hv_input_get_vp_state {
+ u64 partition_id;
+ u32 vp_index;
+ u8 input_vtl;
+ u8 rsvd0;
+ u16 rsvd1;
+ struct hv_vp_state_data state_data;
+ u64 output_data_pfns[];
+} __packed;
+
+union hv_output_get_vp_state {
+ struct hv_synthetic_timers_state synthetic_timers_state;
+} __packed;
+
+union hv_input_set_vp_state_data {
+ u64 pfns;
+ u8 bytes;
+} __packed;
+
+struct hv_input_set_vp_state {
+ u64 partition_id;
+ u32 vp_index;
+ u8 input_vtl;
+ u8 rsvd0;
+ u16 rsvd1;
+ struct hv_vp_state_data state_data;
+ union hv_input_set_vp_state_data data[];
+} __packed;
+
+/*
+ * Dispatch state for the VP communicated by the hypervisor to the
+ * VP-dispatching thread in the root on return from HVCALL_DISPATCH_VP.
+ */
+enum hv_vp_dispatch_state {
+ HV_VP_DISPATCH_STATE_INVALID = 0,
+ HV_VP_DISPATCH_STATE_BLOCKED = 1,
+ HV_VP_DISPATCH_STATE_READY = 2,
+};
+
+/*
+ * Dispatch event that caused the current dispatch state on return from
+ * HVCALL_DISPATCH_VP.
+ */
+enum hv_vp_dispatch_event {
+ HV_VP_DISPATCH_EVENT_INVALID = 0x00000000,
+ HV_VP_DISPATCH_EVENT_SUSPEND = 0x00000001,
+ HV_VP_DISPATCH_EVENT_INTERCEPT = 0x00000002,
+};
+
+#define HV_ROOT_SCHEDULER_MAX_VPS_PER_CHILD_PARTITION 1024
+/* The maximum array size of HV_GENERIC_SET (vp_set) buffer */
+#define HV_GENERIC_SET_QWORD_COUNT(max) (((((max) - 1) >> 6) + 1) + 2)
+
+struct hv_vp_signal_bitset_scheduler_message {
+ u64 partition_id;
+ u32 overflow_count;
+ u16 vp_count;
+ u16 reserved;
+
+#define BITSET_BUFFER_SIZE \
+ HV_GENERIC_SET_QWORD_COUNT(HV_ROOT_SCHEDULER_MAX_VPS_PER_CHILD_PARTITION)
+ union {
+ struct hv_vpset bitset;
+ u64 bitset_buffer[BITSET_BUFFER_SIZE];
+ } vp_bitset;
+#undef BITSET_BUFFER_SIZE
+} __packed;
+
+static_assert(sizeof(struct hv_vp_signal_bitset_scheduler_message) <=
+ (sizeof(struct hv_message) - sizeof(struct hv_message_header)));
+
+#define HV_MESSAGE_MAX_PARTITION_VP_PAIR_COUNT \
+ (((sizeof(struct hv_message) - sizeof(struct hv_message_header)) / \
+ (sizeof(u64 /* partition id */) + sizeof(u32 /* vp index */))) - 1)
+
+struct hv_vp_signal_pair_scheduler_message {
+ u32 overflow_count;
+ u8 vp_count;
+ u8 reserved1[3];
+
+ u64 partition_ids[HV_MESSAGE_MAX_PARTITION_VP_PAIR_COUNT];
+ u32 vp_indexes[HV_MESSAGE_MAX_PARTITION_VP_PAIR_COUNT];
+
+ u8 reserved2[4];
+} __packed;
+
+static_assert(sizeof(struct hv_vp_signal_pair_scheduler_message) ==
+ (sizeof(struct hv_message) - sizeof(struct hv_message_header)));
+
+/* Input and output structures for HVCALL_DISPATCH_VP */
+#define HV_DISPATCH_VP_FLAG_CLEAR_INTERCEPT_SUSPEND 0x1
+#define HV_DISPATCH_VP_FLAG_ENABLE_CALLER_INTERRUPTS 0x2
+#define HV_DISPATCH_VP_FLAG_SET_CALLER_SPEC_CTRL 0x4
+#define HV_DISPATCH_VP_FLAG_SKIP_VP_SPEC_FLUSH 0x8
+#define HV_DISPATCH_VP_FLAG_SKIP_CALLER_SPEC_FLUSH 0x10
+#define HV_DISPATCH_VP_FLAG_SKIP_CALLER_USER_SPEC_FLUSH 0x20
+
+struct hv_input_dispatch_vp {
+ u64 partition_id;
+ u32 vp_index;
+ u32 flags;
+ u64 time_slice; /* in 100ns */
+ u64 spec_ctrl;
+} __packed;
+
+struct hv_output_dispatch_vp {
+ u32 dispatch_state; /* enum hv_vp_dispatch_state */
+ u32 dispatch_event; /* enum hv_vp_dispatch_event */
+} __packed;
+
+#endif /* _HV_HVHDK_H */
diff --git a/include/hyperv/hvhdk_mini.h b/include/hyperv/hvhdk_mini.h
new file mode 100644
index 000000000000..f8a39d3e9ce6
--- /dev/null
+++ b/include/hyperv/hvhdk_mini.h
@@ -0,0 +1,311 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Type definitions for the Microsoft Hypervisor.
+ */
+#ifndef _HV_HVHDK_MINI_H
+#define _HV_HVHDK_MINI_H
+
+#include "hvgdk_mini.h"
+
+/*
+ * Doorbell connection_info flags.
+ */
+#define HV_DOORBELL_FLAG_TRIGGER_SIZE_MASK 0x00000007
+#define HV_DOORBELL_FLAG_TRIGGER_SIZE_ANY 0x00000000
+#define HV_DOORBELL_FLAG_TRIGGER_SIZE_BYTE 0x00000001
+#define HV_DOORBELL_FLAG_TRIGGER_SIZE_WORD 0x00000002
+#define HV_DOORBELL_FLAG_TRIGGER_SIZE_DWORD 0x00000003
+#define HV_DOORBELL_FLAG_TRIGGER_SIZE_QWORD 0x00000004
+#define HV_DOORBELL_FLAG_TRIGGER_ANY_VALUE 0x80000000
+
+/* Each generic set contains 64 elements */
+#define HV_GENERIC_SET_SHIFT (6)
+#define HV_GENERIC_SET_MASK (63)
+
+enum hv_generic_set_format {
+ HV_GENERIC_SET_SPARSE_4K,
+ HV_GENERIC_SET_ALL,
+};
+#define HV_GENERIC_SET_FORMAT hv_generic_set_format
+
+enum hv_scheduler_type {
+ HV_SCHEDULER_TYPE_LP = 1, /* Classic scheduler w/o SMT */
+ HV_SCHEDULER_TYPE_LP_SMT = 2, /* Classic scheduler w/ SMT */
+ HV_SCHEDULER_TYPE_CORE_SMT = 3, /* Core scheduler */
+ HV_SCHEDULER_TYPE_ROOT = 4, /* Root / integrated scheduler */
+ HV_SCHEDULER_TYPE_MAX
+};
+
+enum hv_partition_property_code {
+ /* Privilege properties */
+ HV_PARTITION_PROPERTY_PRIVILEGE_FLAGS = 0x00010000,
+ HV_PARTITION_PROPERTY_SYNTHETIC_PROC_FEATURES = 0x00010001,
+
+ /* Resource properties */
+ HV_PARTITION_PROPERTY_GPA_PAGE_ACCESS_TRACKING = 0x00050005,
+ HV_PARTITION_PROPERTY_UNIMPLEMENTED_MSR_ACTION = 0x00050017,
+
+ /* Compatibility properties */
+ HV_PARTITION_PROPERTY_PROCESSOR_XSAVE_FEATURES = 0x00060002,
+ HV_PARTITION_PROPERTY_MAX_XSAVE_DATA_SIZE = 0x00060008,
+ HV_PARTITION_PROPERTY_PROCESSOR_CLOCK_FREQUENCY = 0x00060009,
+};
+
+enum hv_system_property {
+ /* Add more values when needed */
+ HV_SYSTEM_PROPERTY_SCHEDULER_TYPE = 15,
+};
+
+struct hv_input_get_system_property {
+ u32 property_id; /* enum hv_system_property */
+ union {
+ u32 as_uint32;
+ /* More fields to be filled in when needed */
+ };
+} __packed;
+
+struct hv_output_get_system_property {
+ union {
+ u32 scheduler_type; /* enum hv_scheduler_type */
+ };
+} __packed;
+
+struct hv_proximity_domain_flags {
+ u32 proximity_preferred : 1;
+ u32 reserved : 30;
+ u32 proximity_info_valid : 1;
+} __packed;
+
+struct hv_proximity_domain_info {
+ u32 domain_id;
+ struct hv_proximity_domain_flags flags;
+} __packed;
+
+/* HvDepositMemory hypercall */
+struct hv_deposit_memory { /* HV_INPUT_DEPOSIT_MEMORY */
+ u64 partition_id;
+ u64 gpa_page_list[];
+} __packed;
+
+struct hv_input_withdraw_memory {
+ u64 partition_id;
+ struct hv_proximity_domain_info proximity_domain_info;
+} __packed;
+
+struct hv_output_withdraw_memory {
+ DECLARE_FLEX_ARRAY(u64, gpa_page_list);
+} __packed;
+
+/* HV Map GPA (Guest Physical Address) Flags */
+#define HV_MAP_GPA_PERMISSIONS_NONE 0x0
+#define HV_MAP_GPA_READABLE 0x1
+#define HV_MAP_GPA_WRITABLE 0x2
+#define HV_MAP_GPA_KERNEL_EXECUTABLE 0x4
+#define HV_MAP_GPA_USER_EXECUTABLE 0x8
+#define HV_MAP_GPA_EXECUTABLE 0xC
+#define HV_MAP_GPA_PERMISSIONS_MASK 0xF
+#define HV_MAP_GPA_ADJUSTABLE 0x8000
+#define HV_MAP_GPA_NO_ACCESS 0x10000
+#define HV_MAP_GPA_NOT_CACHED 0x200000
+#define HV_MAP_GPA_LARGE_PAGE 0x80000000
+
+struct hv_input_map_gpa_pages {
+ u64 target_partition_id;
+ u64 target_gpa_base;
+ u32 map_flags;
+ u32 padding;
+ u64 source_gpa_page_list[];
+} __packed;
+
+union hv_gpa_page_access_state_flags {
+ struct {
+ u64 clear_accessed : 1;
+ u64 set_accessed : 1;
+ u64 clear_dirty : 1;
+ u64 set_dirty : 1;
+ u64 reserved : 60;
+ } __packed;
+ u64 as_uint64;
+};
+
+struct hv_input_get_gpa_pages_access_state {
+ u64 partition_id;
+ union hv_gpa_page_access_state_flags flags;
+ u64 hv_gpa_page_number;
+} __packed;
+
+union hv_gpa_page_access_state {
+ struct {
+ u8 accessed : 1;
+ u8 dirty : 1;
+ u8 reserved: 6;
+ };
+ u8 as_uint8;
+} __packed;
+
+struct hv_lp_startup_status {
+ u64 hv_status;
+ u64 substatus1;
+ u64 substatus2;
+ u64 substatus3;
+ u64 substatus4;
+ u64 substatus5;
+ u64 substatus6;
+} __packed;
+
+struct hv_input_add_logical_processor {
+ u32 lp_index;
+ u32 apic_id;
+ struct hv_proximity_domain_info proximity_domain_info;
+} __packed;
+
+struct hv_output_add_logical_processor {
+ struct hv_lp_startup_status startup_status;
+} __packed;
+
+enum { /* HV_SUBNODE_TYPE */
+ HV_SUBNODE_ANY = 0,
+ HV_SUBNODE_SOCKET,
+ HV_SUBNODE_CLUSTER,
+ HV_SUBNODE_L3,
+ HV_SUBNODE_COUNT,
+ HV_SUBNODE_INVALID = -1
+};
+
+struct hv_create_vp { /* HV_INPUT_CREATE_VP */
+ u64 partition_id;
+ u32 vp_index;
+ u8 padding[3];
+ u8 subnode_type;
+ u64 subnode_id;
+ struct hv_proximity_domain_info proximity_domain_info;
+ u64 flags;
+} __packed;
+
+/* HV_INTERRUPT_TRIGGER_MODE */
+enum hv_interrupt_trigger_mode {
+ HV_INTERRUPT_TRIGGER_MODE_EDGE = 0,
+ HV_INTERRUPT_TRIGGER_MODE_LEVEL = 1,
+};
+
+/* HV_DEVICE_INTERRUPT_DESCRIPTOR */
+struct hv_device_interrupt_descriptor {
+ u32 interrupt_type;
+ u32 trigger_mode;
+ u32 vector_count;
+ u32 reserved;
+ struct hv_device_interrupt_target target;
+} __packed;
+
+/* HV_INPUT_MAP_DEVICE_INTERRUPT */
+struct hv_input_map_device_interrupt {
+ u64 partition_id;
+ u64 device_id;
+ u32 flags;
+ u32 base_irt_idx;
+ struct hv_interrupt_entry logical_interrupt_entry;
+ struct hv_device_interrupt_descriptor interrupt_descriptor;
+} __packed;
+
+/* HV_OUTPUT_MAP_DEVICE_INTERRUPT */
+struct hv_output_map_device_interrupt {
+ struct hv_interrupt_entry interrupt_entry;
+} __packed;
+
+/* HV_INPUT_UNMAP_DEVICE_INTERRUPT */
+struct hv_input_unmap_device_interrupt {
+ u64 partition_id;
+ u64 device_id;
+ struct hv_interrupt_entry interrupt_entry;
+ u32 flags;
+} __packed;
+
+#define HV_SOURCE_SHADOW_NONE 0x0
+#define HV_SOURCE_SHADOW_BRIDGE_BUS_RANGE 0x1
+
+struct hv_send_ipi_ex { /* HV_INPUT_SEND_SYNTHETIC_CLUSTER_IPI_EX */
+ u32 vector;
+ u32 reserved;
+ struct hv_vpset vp_set;
+} __packed;
+
+typedef u16 hv_pci_rid; /* HV_PCI_RID */
+typedef u16 hv_pci_segment; /* HV_PCI_SEGMENT */
+typedef u64 hv_logical_device_id;
+union hv_pci_bdf { /* HV_PCI_BDF */
+ u16 as_uint16;
+
+ struct {
+ u8 function : 3;
+ u8 device : 5;
+ u8 bus;
+ };
+} __packed;
+
+union hv_pci_bus_range {
+ u16 as_uint16;
+
+ struct {
+ u8 subordinate_bus;
+ u8 secondary_bus;
+ };
+} __packed;
+
+enum hv_device_type { /* HV_DEVICE_TYPE */
+ HV_DEVICE_TYPE_LOGICAL = 0,
+ HV_DEVICE_TYPE_PCI = 1,
+ HV_DEVICE_TYPE_IOAPIC = 2,
+ HV_DEVICE_TYPE_ACPI = 3,
+};
+
+union hv_device_id { /* HV_DEVICE_ID */
+ u64 as_uint64;
+
+ struct {
+ u64 reserved0 : 62;
+ u64 device_type : 2;
+ };
+
+ /* HV_DEVICE_TYPE_LOGICAL */
+ struct {
+ u64 id : 62;
+ u64 device_type : 2;
+ } logical;
+
+ /* HV_DEVICE_TYPE_PCI */
+ struct {
+ union {
+ hv_pci_rid rid;
+ union hv_pci_bdf bdf;
+ };
+
+ hv_pci_segment segment;
+ union hv_pci_bus_range shadow_bus_range;
+
+ u16 phantom_function_bits : 2;
+ u16 source_shadow : 1;
+
+ u16 rsvdz0 : 11;
+ u16 device_type : 2;
+ } pci;
+
+ /* HV_DEVICE_TYPE_IOAPIC */
+ struct {
+ u8 ioapic_id;
+ u8 rsvdz0;
+ u16 rsvdz1;
+ u16 rsvdz2;
+
+ u16 rsvdz3 : 14;
+ u16 device_type : 2;
+ } ioapic;
+
+ /* HV_DEVICE_TYPE_ACPI */
+ struct {
+ u32 input_mapping_base;
+ u32 input_mapping_count : 30;
+ u32 device_type : 2;
+ } acpi;
+} __packed;
+
+#endif /* _HV_HVHDK_MINI_H */
diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h
index 8365adf842ef..a6c2897bcc63 100644
--- a/include/keys/system_keyring.h
+++ b/include/keys/system_keyring.h
@@ -73,7 +73,6 @@ static inline void __init set_machine_trusted_keys(struct key *keyring)
}
#endif
-extern struct pkcs7_message *pkcs7;
#ifdef CONFIG_SYSTEM_BLACKLIST_KEYRING
extern int mark_hash_blacklisted(const u8 *hash, size_t hash_len,
enum blacklist_hash_type hash_type);
@@ -93,6 +92,7 @@ static inline int is_binary_blacklisted(const u8 *hash, size_t hash_len)
}
#endif
+struct pkcs7_message;
#ifdef CONFIG_SYSTEM_REVOCATION_LIST
extern int add_key_to_revocation_list(const char *data, size_t size);
extern int is_key_on_revocation_list(struct pkcs7_message *pkcs7);
diff --git a/include/kunit/platform_device.h b/include/kunit/platform_device.h
index 0fc0999d2420..f8236a8536f7 100644
--- a/include/kunit/platform_device.h
+++ b/include/kunit/platform_device.h
@@ -2,6 +2,7 @@
#ifndef _KUNIT_PLATFORM_DRIVER_H
#define _KUNIT_PLATFORM_DRIVER_H
+struct completion;
struct kunit;
struct platform_device;
struct platform_driver;
diff --git a/include/kunit/test.h b/include/kunit/test.h
index 34b71e42fb10..58dbab60f853 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -312,6 +312,7 @@ static inline void kunit_set_failure(struct kunit *test)
}
bool kunit_enabled(void);
+bool kunit_autorun(void);
const char *kunit_action(void);
const char *kunit_filter_glob(void);
char *kunit_filter(void);
@@ -334,7 +335,8 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set,
int *err);
void kunit_free_suite_set(struct kunit_suite_set suite_set);
-int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_suites);
+int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_suites,
+ bool run_tests);
void __kunit_test_suites_exit(struct kunit_suite **suites, int num_suites);
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index fd650a8789b9..681cf0c8b9df 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -98,6 +98,7 @@ int __init kvm_timer_hyp_init(bool has_gic);
int kvm_timer_enable(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
+void kvm_timer_sync_nested(struct kvm_vcpu *vcpu);
void kvm_timer_sync_user(struct kvm_vcpu *vcpu);
bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu);
void kvm_timer_update_run(struct kvm_vcpu *vcpu);
@@ -150,9 +151,31 @@ void kvm_timer_cpu_down(void);
/* CNTKCTL_EL1 valid bits as of DDI0487J.a */
#define CNTKCTL_VALID_BITS (BIT(17) | GENMASK_ULL(9, 0))
+DECLARE_STATIC_KEY_FALSE(broken_cntvoff_key);
+
+static inline bool has_broken_cntvoff(void)
+{
+ return static_branch_unlikely(&broken_cntvoff_key);
+}
+
static inline bool has_cntpoff(void)
{
return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF));
}
+static inline u64 timer_get_offset(struct arch_timer_context *ctxt)
+{
+ u64 offset = 0;
+
+ if (!ctxt)
+ return 0;
+
+ if (ctxt->offset.vm_offset)
+ offset += *ctxt->offset.vm_offset;
+ if (ctxt->offset.vcpu_offset)
+ offset += *ctxt->offset.vcpu_offset;
+
+ return offset;
+}
+
#endif
diff --git a/include/linux/adreno-smmu-priv.h b/include/linux/adreno-smmu-priv.h
index c637e0997f6d..abec23c7744f 100644
--- a/include/linux/adreno-smmu-priv.h
+++ b/include/linux/adreno-smmu-priv.h
@@ -50,6 +50,11 @@ struct adreno_smmu_fault_info {
* the GPU driver must call resume_translation()
* @resume_translation: Resume translation after a fault
*
+ * @set_prr_bit: [optional] Configure the GPU's Partially Resident
+ * Region (PRR) bit in the ACTLR register.
+ * @set_prr_addr: [optional] Configure the PRR_CFG_*ADDR register with
+ * the physical address of PRR page passed from GPU
+ * driver.
*
* The GPU driver (drm/msm) and adreno-smmu work together for controlling
* the GPU's SMMU instance. This is by necessity, as the GPU is directly
@@ -67,6 +72,8 @@ struct adreno_smmu_priv {
void (*get_fault_info)(const void *cookie, struct adreno_smmu_fault_info *info);
void (*set_stall)(const void *cookie, bool enabled);
void (*resume_translation)(const void *cookie, bool terminate);
+ void (*set_prr_bit)(const void *cookie, bool set);
+ void (*set_prr_addr)(const void *cookie, phys_addr_t page_addr);
};
#endif /* __ADRENO_SMMU_PRIV_H */
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 4b97f38f3fcf..947b63091902 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -16,10 +16,18 @@
#define AER_CORRECTABLE 2
#define DPC_FATAL 3
+/*
+ * AER and DPC capabilities TLP Logging register sizes (PCIe r6.2, sec 7.8.4
+ * & 7.9.14).
+ */
+#define PCIE_STD_NUM_TLP_HEADERLOG 4
+#define PCIE_STD_MAX_TLP_PREFIXLOG 4
+
struct pci_dev;
struct pcie_tlp_log {
- u32 dw[4];
+ u32 dw[PCIE_STD_NUM_TLP_HEADERLOG];
+ u32 prefix[PCIE_STD_MAX_TLP_PREFIXLOG];
};
struct aer_capability_regs {
@@ -37,8 +45,6 @@ struct aer_capability_regs {
u16 uncor_err_source;
};
-int pcie_read_tlp_log(struct pci_dev *dev, int where, struct pcie_tlp_log *log);
-
#if defined(CONFIG_PCIEAER)
int pci_aer_clear_nonfatal_status(struct pci_dev *dev);
int pcie_aer_is_native(struct pci_dev *dev);
diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
index 0bbbe537c5f9..a946e0203e6d 100644
--- a/include/linux/alloc_tag.h
+++ b/include/linux/alloc_tag.h
@@ -224,9 +224,14 @@ static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {}
#define alloc_hooks_tag(_tag, _do_alloc) \
({ \
- struct alloc_tag * __maybe_unused _old = alloc_tag_save(_tag); \
- typeof(_do_alloc) _res = _do_alloc; \
- alloc_tag_restore(_tag, _old); \
+ typeof(_do_alloc) _res; \
+ if (mem_alloc_profiling_enabled()) { \
+ struct alloc_tag * __maybe_unused _old; \
+ _old = alloc_tag_save(_tag); \
+ _res = _do_alloc; \
+ alloc_tag_restore(_tag, _old); \
+ } else \
+ _res = _do_alloc; \
_res; \
})
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h
index 2b90c48a6a87..062fbd4c9b77 100644
--- a/include/linux/amd-iommu.h
+++ b/include/linux/amd-iommu.h
@@ -31,11 +31,11 @@ struct amd_iommu_pi_data {
struct task_struct;
struct pci_dev;
-extern int amd_iommu_detect(void);
+extern void amd_iommu_detect(void);
#else /* CONFIG_AMD_IOMMU */
-static inline int amd_iommu_detect(void) { return -ENODEV; }
+static inline void amd_iommu_detect(void) { }
#endif /* CONFIG_AMD_IOMMU */
diff --git a/include/linux/amd-pmf-io.h b/include/linux/amd-pmf-io.h
index b4f818205216..6fa510f419c0 100644
--- a/include/linux/amd-pmf-io.h
+++ b/include/linux/amd-pmf-io.h
@@ -18,10 +18,12 @@
* enum sfh_message_type - Query the SFH message type
* @MT_HPD: Message ID to know the Human presence info from MP2 FW
* @MT_ALS: Message ID to know the Ambient light info from MP2 FW
+ * @MT_SRA: Message ID to know the SRA data from MP2 FW
*/
enum sfh_message_type {
MT_HPD,
MT_ALS,
+ MT_SRA,
};
/**
@@ -40,10 +42,23 @@ enum sfh_hpd_info {
* struct amd_sfh_info - get HPD sensor info from MP2 FW
* @ambient_light: Populates the ambient light information
* @user_present: Populates the user presence information
+ * @platform_type: Operating modes (clamshell, flat, tent, etc.)
+ * @laptop_placement: Device states (ontable, onlap, outbag)
*/
struct amd_sfh_info {
u32 ambient_light;
u8 user_present;
+ u32 platform_type;
+ u32 laptop_placement;
+};
+
+enum laptop_placement {
+ LP_UNKNOWN = 0,
+ ON_TABLE,
+ ON_LAP_MOTION,
+ IN_BAG,
+ OUT_OF_BAG,
+ LP_UNDEFINED,
};
int amd_get_sfh_info(struct amd_sfh_info *sfh_info, enum sfh_message_type op);
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 3305c849abd6..60d674af3080 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -44,7 +44,12 @@ struct linux_binprm {
*/
point_of_no_return:1,
/* Set when "comm" must come from the dentry. */
- comm_from_dentry:1;
+ comm_from_dentry:1,
+ /*
+ * Set by user space to check executability according to the
+ * caller's environment.
+ */
+ is_check:1;
struct file *executable; /* Executable to pass to the interpreter */
struct file *interpreter;
struct file *file;
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 262b6596eca5..2026953e2c4e 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -23,7 +23,7 @@ struct device;
*
* Function implementations generic to all architectures are in
* lib/bitmap.c. Functions implementations that are architecture
- * specific are in various include/asm-<arch>/bitops.h headers
+ * specific are in various arch/<arch>/include/asm/bitops.h headers
* and other arch/<arch> specific files.
*
* See lib/bitmap.c for more details.
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index ba35bbf07798..c1cb53cf2f0f 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -230,6 +230,37 @@ static inline int get_count_order_long(unsigned long l)
}
/**
+ * parity8 - get the parity of an u8 value
+ * @value: the value to be examined
+ *
+ * Determine the parity of the u8 argument.
+ *
+ * Returns:
+ * 0 for even parity, 1 for odd parity
+ *
+ * Note: This function informs you about the current parity. Example to bail
+ * out when parity is odd:
+ *
+ * if (parity8(val) == 1)
+ * return -EBADMSG;
+ *
+ * If you need to calculate a parity bit, you need to draw the conclusion from
+ * this result yourself. Example to enforce odd parity, parity bit is bit 7:
+ *
+ * if (parity8(val) == 0)
+ * val ^= BIT(7);
+ */
+static inline int parity8(u8 val)
+{
+ /*
+ * One explanation of this algorithm:
+ * https://funloop.org/codex/problem/parity/README.html
+ */
+ val ^= val >> 4;
+ return (0x6996 >> (val & 0xf)) & 1;
+}
+
+/**
* __ffs64 - find first set bit in a 64 bit word
* @word: The 64 bit word
*
diff --git a/include/linux/bits.h b/include/linux/bits.h
index 60044b608817..61a75d3f294b 100644
--- a/include/linux/bits.h
+++ b/include/linux/bits.h
@@ -20,9 +20,8 @@
*/
#if !defined(__ASSEMBLY__)
#include <linux/build_bug.h>
-#define GENMASK_INPUT_CHECK(h, l) \
- (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
- __is_constexpr((l) > (h)), (l) > (h), 0)))
+#include <linux/compiler.h>
+#define GENMASK_INPUT_CHECK(h, l) BUILD_BUG_ON_ZERO(const_true((l) > (h)))
#else
/*
* BUILD_BUG_ON_ZERO is not available in h files included from asm files,
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index a0a9007cc1e3..9ebb53f031cd 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -900,8 +900,22 @@ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
busy_tag_iter_fn *fn, void *priv);
void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
-void blk_mq_freeze_queue(struct request_queue *q);
-void blk_mq_unfreeze_queue(struct request_queue *q);
+void blk_mq_freeze_queue_nomemsave(struct request_queue *q);
+void blk_mq_unfreeze_queue_nomemrestore(struct request_queue *q);
+static inline unsigned int __must_check
+blk_mq_freeze_queue(struct request_queue *q)
+{
+ unsigned int memflags = memalloc_noio_save();
+
+ blk_mq_freeze_queue_nomemsave(q);
+ return memflags;
+}
+static inline void
+blk_mq_unfreeze_queue(struct request_queue *q, unsigned int memflags)
+{
+ blk_mq_unfreeze_queue_nomemrestore(q);
+ memalloc_noio_restore(memflags);
+}
void blk_freeze_queue_start(struct request_queue *q);
void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 76f0a4e7c2e5..248416ecd01c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -561,7 +561,6 @@ struct request_queue {
struct list_head flush_list;
struct mutex sysfs_lock;
- struct mutex sysfs_dir_lock;
struct mutex limits_lock;
/*
@@ -605,8 +604,6 @@ struct request_queue {
* Serializes all debugfs metadata operations using the above dentries.
*/
struct mutex debugfs_mutex;
-
- bool mq_sysfs_init_done;
};
/* Keep blk_queue_flag_name[] in sync with the definitions below */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 9ef57f6f584a..f3f50e29d639 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -2299,6 +2299,14 @@ void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
struct bpf_map *bpf_map_get(u32 ufd);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
+/*
+ * The __bpf_map_get() and __btf_get_by_fd() functions parse a file
+ * descriptor and return a corresponding map or btf object.
+ * Their names are double underscored to emphasize the fact that they
+ * do not increase refcnt. To also increase refcnt use corresponding
+ * bpf_map_get() and btf_get_by_fd() functions.
+ */
+
static inline struct bpf_map *__bpf_map_get(struct fd f)
{
if (fd_empty(f))
@@ -2308,6 +2316,15 @@ static inline struct bpf_map *__bpf_map_get(struct fd f)
return fd_file(f)->private_data;
}
+static inline struct btf *__btf_get_by_fd(struct fd f)
+{
+ if (fd_empty(f))
+ return ERR_PTR(-EBADF);
+ if (unlikely(fd_file(f)->f_op != &btf_fops))
+ return ERR_PTR(-EINVAL);
+ return fd_file(f)->private_data;
+}
+
void bpf_map_inc(struct bpf_map *map);
void bpf_map_inc_with_uref(struct bpf_map *map);
struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref);
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 48b7b2eeb7e2..32c23f2a3086 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -233,6 +233,7 @@ enum bpf_stack_slot_type {
*/
STACK_DYNPTR,
STACK_ITER,
+ STACK_IRQ_FLAG,
};
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
@@ -254,8 +255,9 @@ struct bpf_reference_state {
* default to pointer reference on zero initialization of a state.
*/
enum ref_state_type {
- REF_TYPE_PTR = 0,
- REF_TYPE_LOCK,
+ REF_TYPE_PTR = 1,
+ REF_TYPE_IRQ = 2,
+ REF_TYPE_LOCK = 3,
} type;
/* Track each reference created with a unique id, even if the same
* instruction creates the reference multiple times (eg, via CALL).
@@ -315,9 +317,6 @@ struct bpf_func_state {
u32 callback_depth;
/* The following fields should be last. See copy_func_state() */
- int acquired_refs;
- int active_locks;
- struct bpf_reference_state *refs;
/* The state of the stack. Each element of the array describes BPF_REG_SIZE
* (i.e. 8) bytes worth of stack memory.
* stack[0] represents bytes [*(r10-8)..*(r10-1)]
@@ -370,6 +369,8 @@ struct bpf_verifier_state {
/* call stack tracking */
struct bpf_func_state *frame[MAX_CALL_FRAMES];
struct bpf_verifier_state *parent;
+ /* Acquired reference states */
+ struct bpf_reference_state *refs;
/*
* 'branches' field is the number of branches left to explore:
* 0 - all possible paths from this state reached bpf_exit or
@@ -419,9 +420,13 @@ struct bpf_verifier_state {
u32 insn_idx;
u32 curframe;
- bool speculative;
+ u32 acquired_refs;
+ u32 active_locks;
+ u32 active_preempt_locks;
+ u32 active_irq_id;
bool active_rcu_lock;
- u32 active_preempt_lock;
+
+ bool speculative;
/* If this state was ever pointed-to by other state's loop_entry field
* this flag would be set to true. Used to avoid freeing such states
* while they are still in use.
@@ -980,8 +985,9 @@ const char *dynptr_type_str(enum bpf_dynptr_type type);
const char *iter_type_str(const struct btf *btf, u32 btf_id);
const char *iter_state_str(enum bpf_iter_state state);
-void print_verifier_state(struct bpf_verifier_env *env,
- const struct bpf_func_state *state, bool print_all);
-void print_insn_state(struct bpf_verifier_env *env, const struct bpf_func_state *state);
+void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
+ u32 frameno, bool print_all);
+void print_insn_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
+ u32 frameno);
#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 4214e76c9168..2a08a2b55592 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -353,6 +353,11 @@ static inline bool btf_type_is_scalar(const struct btf_type *t)
return btf_type_is_int(t) || btf_type_is_enum(t);
}
+static inline bool btf_type_is_fwd(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
+}
+
static inline bool btf_type_is_typedef(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF;
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 348acf2558f3..a9948a9f1093 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -73,15 +73,23 @@ static inline void generic_bug_clear_once(void) {}
#endif /* CONFIG_GENERIC_BUG */
+#ifdef CONFIG_PRINTK
+void mem_dump_obj(void *object);
+#else
+static inline void mem_dump_obj(void *object) {}
+#endif
+
/*
* Since detected data corruption should stop operation on the affected
* structures. Return value must be checked and sanely acted on by caller.
*/
static inline __must_check bool check_data_corruption(bool v) { return v; }
-#define CHECK_DATA_CORRUPTION(condition, fmt, ...) \
+#define CHECK_DATA_CORRUPTION(condition, addr, fmt, ...) \
check_data_corruption(({ \
bool corruption = unlikely(condition); \
if (corruption) { \
+ if (addr) \
+ mem_dump_obj(addr); \
if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \
pr_err(fmt, ##__VA_ARGS__); \
BUG(); \
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index 7ad736538649..c8f4f0a0b874 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -147,7 +147,7 @@ static inline int get_cpu_cacheinfo_id(int cpu, int level)
return ci ? ci->id : -1;
}
-#ifdef CONFIG_ARM64
+#if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
#define use_arch_cache_info() (true)
#else
#define use_arch_cache_info() (false)
diff --git a/include/linux/call_once.h b/include/linux/call_once.h
new file mode 100644
index 000000000000..6261aa0b3fb0
--- /dev/null
+++ b/include/linux/call_once.h
@@ -0,0 +1,45 @@
+#ifndef _LINUX_CALL_ONCE_H
+#define _LINUX_CALL_ONCE_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+
+#define ONCE_NOT_STARTED 0
+#define ONCE_RUNNING 1
+#define ONCE_COMPLETED 2
+
+struct once {
+ atomic_t state;
+ struct mutex lock;
+};
+
+static inline void __once_init(struct once *once, const char *name,
+ struct lock_class_key *key)
+{
+ atomic_set(&once->state, ONCE_NOT_STARTED);
+ __mutex_init(&once->lock, name, key);
+}
+
+#define once_init(once) \
+do { \
+ static struct lock_class_key __key; \
+ __once_init((once), #once, &__key); \
+} while (0)
+
+static inline void call_once(struct once *once, void (*cb)(struct once *))
+{
+ /* Pairs with atomic_set_release() below. */
+ if (atomic_read_acquire(&once->state) == ONCE_COMPLETED)
+ return;
+
+ guard(mutex)(&once->lock);
+ WARN_ON(atomic_read(&once->state) == ONCE_RUNNING);
+ if (atomic_read(&once->state) != ONCE_NOT_STARTED)
+ return;
+
+ atomic_set(&once->state, ONCE_RUNNING);
+ cb(once);
+ atomic_set_release(&once->state, ONCE_COMPLETED);
+}
+
+#endif /* _LINUX_CALL_ONCE_H */
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 2d7d86f0290d..c7f2c63b3bc3 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -504,20 +504,6 @@ struct ceph_mds_request_head_legacy {
#define CEPH_MDS_REQUEST_HEAD_VERSION 3
-struct ceph_mds_request_head_old {
- __le16 version; /* struct version */
- __le64 oldest_client_tid;
- __le32 mdsmap_epoch; /* on client */
- __le32 flags; /* CEPH_MDS_FLAG_* */
- __u8 num_retry, num_fwd; /* count retry, fwd attempts */
- __le16 num_releases; /* # include cap/lease release records */
- __le32 op; /* mds op code */
- __le32 caller_uid, caller_gid;
- __le64 ino; /* use this ino for openc, mkdir, mknod,
- etc. (if replaying) */
- union ceph_mds_request_args_ext args;
-} __attribute__ ((packed));
-
struct ceph_mds_request_head {
__le16 version; /* struct version */
__le64 oldest_client_tid;
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index efd43df3a99a..200fd3c5bc70 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -191,6 +191,25 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
__v; \
})
+#ifdef __CHECKER__
+#define __BUILD_BUG_ON_ZERO_MSG(e, msg) (0)
+#else /* __CHECKER__ */
+#define __BUILD_BUG_ON_ZERO_MSG(e, msg) ((int)sizeof(struct {_Static_assert(!(e), msg);}))
+#endif /* __CHECKER__ */
+
+/* &a[0] degrades to a pointer: a different type from an array */
+#define __is_array(a) (!__same_type((a), &(a)[0]))
+#define __must_be_array(a) __BUILD_BUG_ON_ZERO_MSG(!__is_array(a), \
+ "must be array")
+
+#define __is_byte_array(a) (__is_array(a) && sizeof((a)[0]) == 1)
+#define __must_be_byte_array(a) __BUILD_BUG_ON_ZERO_MSG(!__is_byte_array(a), \
+ "must be byte array")
+
+/* Require C Strings (i.e. NUL-terminated) lack the "nonstring" attribute. */
+#define __must_be_cstr(p) \
+ __BUILD_BUG_ON_ZERO_MSG(__annotated(p, nonstring), "must be cstr (NUL-terminated)")
+
#endif /* __KERNEL__ */
/**
@@ -231,19 +250,6 @@ static inline void *offset_to_ptr(const int *off)
#define __ADDRESSABLE_ASM_STR(sym) __stringify(__ADDRESSABLE_ASM(sym))
-#ifdef __CHECKER__
-#define __BUILD_BUG_ON_ZERO_MSG(e, msg) (0)
-#else /* __CHECKER__ */
-#define __BUILD_BUG_ON_ZERO_MSG(e, msg) ((int)sizeof(struct {_Static_assert(!(e), msg);}))
-#endif /* __CHECKER__ */
-
-/* &a[0] degrades to a pointer: a different type from an array */
-#define __must_be_array(a) __BUILD_BUG_ON_ZERO_MSG(__same_type((a), &(a)[0]), "must be array")
-
-/* Require C Strings (i.e. NUL-terminated) lack the "nonstring" attribute. */
-#define __must_be_cstr(p) \
- __BUILD_BUG_ON_ZERO_MSG(__annotated(p, nonstring), "must be cstr (NUL-terminated)")
-
/*
* This returns a constant expression while determining if an argument is
* a constant expression, most importantly without evaluating the argument.
@@ -308,6 +314,28 @@ static inline void *offset_to_ptr(const int *off)
#define statically_true(x) (__builtin_constant_p(x) && (x))
/*
+ * Similar to statically_true() but produces a constant expression
+ *
+ * To be used in conjunction with macros, such as BUILD_BUG_ON_ZERO(),
+ * which require their input to be a constant expression and for which
+ * statically_true() would otherwise fail.
+ *
+ * This is a trade-off: const_true() requires all its operands to be
+ * compile time constants. Else, it would always returns false even on
+ * the most trivial cases like:
+ *
+ * true || non_const_var
+ *
+ * On the opposite, statically_true() is able to fold more complex
+ * tautologies and will return true on expressions such as:
+ *
+ * !(non_const_var * 8 % 4)
+ *
+ * For the general case, statically_true() is better.
+ */
+#define const_true(x) __builtin_choose_expr(__is_constexpr(x), x, false)
+
+/*
* This is needed in functions which generate the stack canary, see
* arch/x86/kernel/smpboot.c::start_secondary() for an example.
*/
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index c13342594278..17276965ff1d 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -172,6 +172,9 @@ struct coresight_desc {
* @dest_dev: a @coresight_device representation of the component
connected to @src_port. NULL until the device is created
* @link: Representation of the connection as a sysfs link.
+ * @filter_src_fwnode: filter source component's fwnode handle.
+ * @filter_src_dev: a @coresight_device representation of the component that
+ needs to be filtered.
*
* The full connection structure looks like this, where in_conns store
* references to same connection as the source device's out_conns.
@@ -200,8 +203,10 @@ struct coresight_connection {
struct coresight_device *dest_dev;
struct coresight_sysfs_link *link;
struct coresight_device *src_dev;
- atomic_t src_refcnt;
- atomic_t dest_refcnt;
+ struct fwnode_handle *filter_src_fwnode;
+ struct coresight_device *filter_src_dev;
+ int src_refcnt;
+ int dest_refcnt;
};
/**
@@ -588,9 +593,14 @@ static inline void csdev_access_write64(struct csdev_access *csa, u64 val, u32 o
}
#endif /* CONFIG_64BIT */
+static inline bool coresight_is_device_source(struct coresight_device *csdev)
+{
+ return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SOURCE);
+}
+
static inline bool coresight_is_percpu_source(struct coresight_device *csdev)
{
- return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SOURCE) &&
+ return csdev && coresight_is_device_source(csdev) &&
(csdev->subtype.source_subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_PROC);
}
@@ -662,6 +672,7 @@ void coresight_relaxed_write64(struct coresight_device *csdev,
void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset);
extern int coresight_get_cpu(struct device *dev);
+extern int coresight_get_static_trace_id(struct device *dev, u32 *id);
struct coresight_platform_data *coresight_get_platform_data(struct device *dev);
struct coresight_connection *
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index bdcec1732445..6a0a8f1c7c90 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -77,6 +77,7 @@ extern ssize_t cpu_show_gds(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_ghostwrite(struct device *dev, struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 9278a50d514f..36a890d0dd57 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -391,7 +391,7 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta
for_each_set_bit_from(cpu, cpumask_bits(mask), small_cpumask_bits)
/**
- * cpumask_any_but - return a "random" in a cpumask, but not this one.
+ * cpumask_any_but - return an arbitrary cpu in a cpumask, but not this one.
* @mask: the cpumask to search
* @cpu: the cpu to ignore.
*
@@ -411,7 +411,7 @@ unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
}
/**
- * cpumask_any_and_but - pick a "random" cpu from *mask1 & *mask2, but not this one.
+ * cpumask_any_and_but - pick an arbitrary cpu from *mask1 & *mask2, but not this one.
* @mask1: the first input cpumask
* @mask2: the second input cpumask
* @cpu: the cpu to ignore
@@ -840,7 +840,7 @@ void cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp)
}
/**
- * cpumask_any - pick a "random" cpu from *srcp
+ * cpumask_any - pick an arbitrary cpu from *srcp
* @srcp: the input cpumask
*
* Return: >= nr_cpu_ids if no cpus set.
@@ -848,7 +848,7 @@ void cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp)
#define cpumask_any(srcp) cpumask_first(srcp)
/**
- * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
+ * cpumask_any_and - pick an arbitrary cpu from *mask1 & *mask2
* @mask1: the first input cpumask
* @mask2: the second input cpumask
*
@@ -1043,7 +1043,6 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
/* Wrappers for arch boot code to manipulate normally-constant masks */
void init_cpu_present(const struct cpumask *src);
void init_cpu_possible(const struct cpumask *src);
-void init_cpu_online(const struct cpumask *src);
#define assign_cpu(cpu, mask, val) \
assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val))
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index acc55626afdc..2f2555e6407c 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -20,6 +20,8 @@ extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size);
extern void elfcorehdr_free(unsigned long long addr);
extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos);
extern ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
+void elfcorehdr_fill_device_ram_ptload_elf64(Elf64_Phdr *phdr,
+ unsigned long long paddr, unsigned long long size);
extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
unsigned long from, unsigned long pfn,
unsigned long size, pgprot_t prot);
@@ -99,6 +101,12 @@ static inline void vmcore_unusable(void)
* indicated in the vmcore instead. For example, a ballooned page
* contains no data and reading from such a page will cause high
* load in the hypervisor.
+ * @get_device_ram: query RAM ranges that can only be detected by device
+ * drivers, such as the virtio-mem driver, so they can be included in
+ * the crash dump on architectures that allocate the elfcore hdr in the dump
+ * ("2nd") kernel. Indicated RAM ranges may contain holes to reduce the
+ * total number of ranges; such holes can be detected using the pfn_is_ram
+ * callback just like for other RAM.
* @next: List head to manage registered callbacks internally; initialized by
* register_vmcore_cb().
*
@@ -109,11 +117,44 @@ static inline void vmcore_unusable(void)
*/
struct vmcore_cb {
bool (*pfn_is_ram)(struct vmcore_cb *cb, unsigned long pfn);
+ int (*get_device_ram)(struct vmcore_cb *cb, struct list_head *list);
struct list_head next;
};
extern void register_vmcore_cb(struct vmcore_cb *cb);
extern void unregister_vmcore_cb(struct vmcore_cb *cb);
+struct vmcore_range {
+ struct list_head list;
+ unsigned long long paddr;
+ unsigned long long size;
+ loff_t offset;
+};
+
+/* Allocate a vmcore range and add it to the list. */
+static inline int vmcore_alloc_add_range(struct list_head *list,
+ unsigned long long paddr, unsigned long long size)
+{
+ struct vmcore_range *m = kzalloc(sizeof(*m), GFP_KERNEL);
+
+ if (!m)
+ return -ENOMEM;
+ m->paddr = paddr;
+ m->size = size;
+ list_add_tail(&m->list, list);
+ return 0;
+}
+
+/* Free a list of vmcore ranges. */
+static inline void vmcore_free_ranges(struct list_head *list)
+{
+ struct vmcore_range *m, *tmp;
+
+ list_for_each_entry_safe(m, tmp, list, list) {
+ list_del(&m->list);
+ kfree(m);
+ }
+}
+
#else /* !CONFIG_CRASH_DUMP */
static inline bool is_kdump_kernel(void) { return false; }
#endif /* CONFIG_CRASH_DUMP */
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h
index 6bb0c0bf357b..16787c1cee21 100644
--- a/include/linux/crc-t10dif.h
+++ b/include/linux/crc-t10dif.h
@@ -6,11 +6,29 @@
#define CRC_T10DIF_DIGEST_SIZE 2
#define CRC_T10DIF_BLOCK_SIZE 1
-#define CRC_T10DIF_STRING "crct10dif"
-extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer,
- size_t len);
-extern __u16 crc_t10dif(unsigned char const *, size_t);
-extern __u16 crc_t10dif_update(__u16 crc, unsigned char const *, size_t);
+u16 crc_t10dif_arch(u16 crc, const u8 *p, size_t len);
+u16 crc_t10dif_generic(u16 crc, const u8 *p, size_t len);
+
+static inline u16 crc_t10dif_update(u16 crc, const u8 *p, size_t len)
+{
+ if (IS_ENABLED(CONFIG_CRC_T10DIF_ARCH))
+ return crc_t10dif_arch(crc, p, len);
+ return crc_t10dif_generic(crc, p, len);
+}
+
+static inline u16 crc_t10dif(const u8 *p, size_t len)
+{
+ return crc_t10dif_update(0, p, len);
+}
+
+#if IS_ENABLED(CONFIG_CRC_T10DIF_ARCH)
+bool crc_t10dif_is_optimized(void);
+#else
+static inline bool crc_t10dif_is_optimized(void)
+{
+ return false;
+}
+#endif
#endif
diff --git a/include/linux/crc32.h b/include/linux/crc32.h
index 87f788c0d607..e9bd40056687 100644
--- a/include/linux/crc32.h
+++ b/include/linux/crc32.h
@@ -8,10 +8,49 @@
#include <linux/types.h>
#include <linux/bitrev.h>
-u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len);
-u32 __pure crc32_le_base(u32 crc, unsigned char const *p, size_t len);
-u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len);
-u32 __pure crc32_be_base(u32 crc, unsigned char const *p, size_t len);
+u32 __pure crc32_le_arch(u32 crc, const u8 *p, size_t len);
+u32 __pure crc32_le_base(u32 crc, const u8 *p, size_t len);
+u32 __pure crc32_be_arch(u32 crc, const u8 *p, size_t len);
+u32 __pure crc32_be_base(u32 crc, const u8 *p, size_t len);
+u32 __pure crc32c_le_arch(u32 crc, const u8 *p, size_t len);
+u32 __pure crc32c_le_base(u32 crc, const u8 *p, size_t len);
+
+static inline u32 __pure crc32_le(u32 crc, const u8 *p, size_t len)
+{
+ if (IS_ENABLED(CONFIG_CRC32_ARCH))
+ return crc32_le_arch(crc, p, len);
+ return crc32_le_base(crc, p, len);
+}
+
+static inline u32 __pure crc32_be(u32 crc, const u8 *p, size_t len)
+{
+ if (IS_ENABLED(CONFIG_CRC32_ARCH))
+ return crc32_be_arch(crc, p, len);
+ return crc32_be_base(crc, p, len);
+}
+
+/* TODO: leading underscores should be dropped once callers have been updated */
+static inline u32 __pure __crc32c_le(u32 crc, const u8 *p, size_t len)
+{
+ if (IS_ENABLED(CONFIG_CRC32_ARCH))
+ return crc32c_le_arch(crc, p, len);
+ return crc32c_le_base(crc, p, len);
+}
+
+/*
+ * crc32_optimizations() returns flags that indicate which CRC32 library
+ * functions are using architecture-specific optimizations. Unlike
+ * IS_ENABLED(CONFIG_CRC32_ARCH) it takes into account the different CRC32
+ * variants and also whether any needed CPU features are available at runtime.
+ */
+#define CRC32_LE_OPTIMIZATION BIT(0) /* crc32_le() is optimized */
+#define CRC32_BE_OPTIMIZATION BIT(1) /* crc32_be() is optimized */
+#define CRC32C_OPTIMIZATION BIT(2) /* __crc32c_le() is optimized */
+#if IS_ENABLED(CONFIG_CRC32_ARCH)
+u32 crc32_optimizations(void);
+#else
+static inline u32 crc32_optimizations(void) { return 0; }
+#endif
/**
* crc32_le_combine - Combine two crc32 check values into one. For two
@@ -38,9 +77,6 @@ static inline u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2)
return crc32_le_shift(crc1, len2) ^ crc2;
}
-u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len);
-u32 __pure __crc32c_le_base(u32 crc, unsigned char const *p, size_t len);
-
/**
* __crc32c_le_combine - Combine two crc32c check values into one. For two
* sequences of bytes, seq1 and seq2 with lengths len1
diff --git a/include/linux/crc32c.h b/include/linux/crc32c.h
index 357ae4611a45..47eb78003c26 100644
--- a/include/linux/crc32c.h
+++ b/include/linux/crc32c.h
@@ -2,9 +2,12 @@
#ifndef _LINUX_CRC32C_H
#define _LINUX_CRC32C_H
-#include <linux/types.h>
+#include <linux/crc32.h>
-extern u32 crc32c(u32 crc, const void *address, unsigned int length);
+static inline u32 crc32c(u32 crc, const void *address, unsigned int length)
+{
+ return __crc32c_le(crc, address, length);
+}
/* This macro exists for backwards-compatibility. */
#define crc32c_le crc32c
diff --git a/include/linux/damon.h b/include/linux/damon.h
index a67f2c4940e9..af525252b853 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -193,11 +193,16 @@ struct damos_quota_goal {
* size quota is set, DAMON tries to apply the action only up to &sz bytes
* within &reset_interval.
*
- * Internally, the time quota is transformed to a size quota using estimated
- * throughput of the scheme's action. DAMON then compares it against &sz and
- * uses smaller one as the effective quota.
+ * To convince the different types of quotas and goals, DAMON internally
+ * converts those into one single size quota called "effective quota". DAMON
+ * internally uses it as the only one real quota. The conversion is made as
+ * follows.
*
- * If @goals is not empt, DAMON calculates yet another size quota based on the
+ * The time quota is transformed to a size quota using estimated throughput of
+ * the scheme's action. DAMON then compares it against &sz and uses smaller
+ * one as the effective quota.
+ *
+ * If @goals is not empty, DAMON calculates yet another size quota based on the
* goals using its internal feedback loop algorithm, for every @reset_interval.
* Then, if the new size quota is smaller than the effective quota, it uses the
* new size quota as the effective quota.
@@ -286,13 +291,33 @@ struct damos_watermarks {
* @sz_tried: Total size of regions that the scheme is tried to be applied.
* @nr_applied: Total number of regions that the scheme is applied.
* @sz_applied: Total size of regions that the scheme is applied.
+ * @sz_ops_filter_passed:
+ * Total bytes that passed ops layer-handled DAMOS filters.
* @qt_exceeds: Total number of times the quota of the scheme has exceeded.
+ *
+ * "Tried an action to a region" in this context means the DAMOS core logic
+ * determined the region as eligible to apply the action. The access pattern
+ * (&struct damos_access_pattern), quotas (&struct damos_quota), watermarks
+ * (&struct damos_watermarks) and filters (&struct damos_filter) that handled
+ * on core logic can affect this. The core logic asks the operation set
+ * (&struct damon_operations) to apply the action to the region.
+ *
+ * "Applied an action to a region" in this context means the operation set
+ * (&struct damon_operations) successfully applied the action to the region, at
+ * least to a part of the region. The filters (&struct damos_filter) that
+ * handled on operation set layer and type of the action and pages of the
+ * region can affect this. For example, if a filter is set to exclude
+ * anonymous pages and the region has only anonymous pages, the region will be
+ * failed at applying the action. If the action is &DAMOS_PAGEOUT and all
+ * pages of the region are already paged out, the region will be failed at
+ * applying the action.
*/
struct damos_stat {
unsigned long nr_tried;
unsigned long sz_tried;
unsigned long nr_applied;
unsigned long sz_applied;
+ unsigned long sz_ops_filter_passed;
unsigned long qt_exceeds;
};
@@ -327,8 +352,9 @@ enum damos_filter_type {
/**
* struct damos_filter - DAMOS action target memory filter.
- * @type: Type of the page.
- * @matching: If the matching page should filtered out or in.
+ * @type: Type of the target memory.
+ * @matching: Whether this is for @type-matching memory.
+ * @allow: Whether to include or exclude the @matching memory.
* @memcg_id: Memcg id of the question if @type is DAMOS_FILTER_MEMCG.
* @addr_range: Address range if @type is DAMOS_FILTER_TYPE_ADDR.
* @target_idx: Index of the &struct damon_target of
@@ -337,13 +363,15 @@ enum damos_filter_type {
* @list: List head for siblings.
*
* Before applying the &damos->action to a memory region, DAMOS checks if each
- * page of the region matches to this and avoid applying the action if so.
- * Support of each filter type depends on the running &struct damon_operations
- * and the type. Refer to &enum damos_filter_type for more detai.
+ * byte of the region matches to this given condition and avoid applying the
+ * action if so. Support of each filter type depends on the running &struct
+ * damon_operations and the type. Refer to &enum damos_filter_type for more
+ * details.
*/
struct damos_filter {
enum damos_filter_type type;
bool matching;
+ bool allow;
union {
unsigned short memcg_id;
struct damon_addr_range addr_range;
@@ -352,6 +380,31 @@ struct damos_filter {
struct list_head list;
};
+struct damon_ctx;
+struct damos;
+
+/**
+ * struct damos_walk_control - Control damos_walk().
+ *
+ * @walk_fn: Function to be called back for each region.
+ * @data: Data that will be passed to walk functions.
+ *
+ * Control damos_walk(), which requests specific kdamond to invoke the given
+ * function to each region that eligible to apply actions of the kdamond's
+ * schemes. Refer to damos_walk() for more details.
+ */
+struct damos_walk_control {
+ void (*walk_fn)(void *data, struct damon_ctx *ctx,
+ struct damon_target *t, struct damon_region *r,
+ struct damos *s, unsigned long sz_filter_passed);
+ void *data;
+/* private: internal use only */
+ /* informs if the kdamond finished handling of the walk request */
+ struct completion completion;
+ /* informs if the walk is canceled. */
+ bool canceled;
+};
+
/**
* struct damos_access_pattern - Target access pattern of the given scheme.
* @min_sz_region: Minimum size of target regions.
@@ -415,6 +468,8 @@ struct damos {
* @action
*/
unsigned long next_apply_sis;
+ /* informs if ongoing DAMOS walk for this scheme is finished */
+ bool walk_completed;
/* public: */
struct damos_quota quota;
struct damos_watermarks wmarks;
@@ -442,8 +497,6 @@ enum damon_ops_id {
NR_DAMON_OPS,
};
-struct damon_ctx;
-
/**
* struct damon_operations - Monitoring operations for given use cases.
*
@@ -487,7 +540,8 @@ struct damon_ctx;
* @apply_scheme is called from @kdamond when a region for user provided
* DAMON-based operation scheme is found. It should apply the scheme's action
* to the region and return bytes of the region that the action is successfully
- * applied.
+ * applied. It should also report how many bytes of the region has passed
+ * filters (&struct damos_filter) that handled by itself.
* @target_valid should check whether the target is still valid for the
* monitoring.
* @cleanup is called from @kdamond just before its termination.
@@ -504,7 +558,7 @@ struct damon_operations {
struct damos *scheme);
unsigned long (*apply_scheme)(struct damon_ctx *context,
struct damon_target *t, struct damon_region *r,
- struct damos *scheme);
+ struct damos *scheme, unsigned long *sz_filter_passed);
bool (*target_valid)(struct damon_target *t);
void (*cleanup)(struct damon_ctx *context);
};
@@ -552,6 +606,27 @@ struct damon_callback {
void (*before_terminate)(struct damon_ctx *context);
};
+/*
+ * struct damon_call_control - Control damon_call().
+ *
+ * @fn: Function to be called back.
+ * @data: Data that will be passed to @fn.
+ * @return_code: Return code from @fn invocation.
+ *
+ * Control damon_call(), which requests specific kdamond to invoke a given
+ * function. Refer to damon_call() for more details.
+ */
+struct damon_call_control {
+ int (*fn)(void *data);
+ void *data;
+ int return_code;
+/* private: internal use only */
+ /* informs if the kdamond finished handling of the request */
+ struct completion completion;
+ /* informs if the kdamond canceled @fn infocation */
+ bool canceled;
+};
+
/**
* struct damon_attrs - Monitoring attributes for accuracy/overhead control.
*
@@ -632,6 +707,12 @@ struct damon_ctx {
/* for scheme quotas prioritization */
unsigned long *regions_score_histogram;
+ struct damon_call_control *call_control;
+ struct mutex call_control_lock;
+
+ struct damos_walk_control *walk_control;
+ struct mutex walk_control_lock;
+
/* public: */
struct task_struct *kdamond;
struct mutex kdamond_lock;
@@ -725,7 +806,7 @@ void damon_update_region_access_rate(struct damon_region *r, bool accessed,
struct damon_attrs *attrs);
struct damos_filter *damos_new_filter(enum damos_filter_type type,
- bool matching);
+ bool matching, bool allow);
void damos_add_filter(struct damos *s, struct damos_filter *f);
void damos_destroy_filter(struct damos_filter *f);
@@ -779,6 +860,9 @@ static inline unsigned int damon_max_nr_accesses(const struct damon_attrs *attrs
int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
+int damon_call(struct damon_ctx *ctx, struct damon_call_control *control);
+int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control);
+
int damon_set_region_biggest_system_ram_default(struct damon_target *t,
unsigned long *start, unsigned long *end);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index bff956f7b2b9..4afb60365675 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -57,6 +57,7 @@ struct qstr {
};
#define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
+#define QSTR(n) (struct qstr)QSTR_INIT(n, strlen(n))
extern const struct qstr empty_name;
extern const struct qstr slash_name;
@@ -68,16 +69,24 @@ extern const struct qstr dotdot_name;
* large memory footprint increase).
*/
#ifdef CONFIG_64BIT
-# define DNAME_INLINE_LEN 40 /* 192 bytes */
+# define DNAME_INLINE_WORDS 5 /* 192 bytes */
#else
# ifdef CONFIG_SMP
-# define DNAME_INLINE_LEN 36 /* 128 bytes */
+# define DNAME_INLINE_WORDS 9 /* 128 bytes */
# else
-# define DNAME_INLINE_LEN 44 /* 128 bytes */
+# define DNAME_INLINE_WORDS 11 /* 128 bytes */
# endif
#endif
+#define DNAME_INLINE_LEN (DNAME_INLINE_WORDS*sizeof(unsigned long))
+
+union shortname_store {
+ unsigned char string[DNAME_INLINE_LEN];
+ unsigned long words[DNAME_INLINE_WORDS];
+};
+
#define d_lock d_lockref.lock
+#define d_iname d_shortname.string
struct dentry {
/* RCU lookup touched fields */
@@ -88,7 +97,7 @@ struct dentry {
struct qstr d_name;
struct inode *d_inode; /* Where the name belongs to - NULL is
* negative */
- unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */
+ union shortname_store d_shortname;
/* --- cacheline 1 boundary (64 bytes) was 32 bytes ago --- */
/* Ref lookup also touches following */
@@ -136,7 +145,8 @@ enum d_real_type {
};
struct dentry_operations {
- int (*d_revalidate)(struct dentry *, unsigned int);
+ int (*d_revalidate)(struct inode *, const struct qstr *,
+ struct dentry *, unsigned int);
int (*d_weak_revalidate)(struct dentry *, unsigned int);
int (*d_hash)(const struct dentry *, struct qstr *);
int (*d_compare)(const struct dentry *,
@@ -150,6 +160,8 @@ struct dentry_operations {
struct vfsmount *(*d_automount)(struct path *);
int (*d_manage)(const struct path *, bool);
struct dentry *(*d_real)(struct dentry *, enum d_real_type type);
+ bool (*d_unalias_trylock)(const struct dentry *);
+ void (*d_unalias_unlock)(const struct dentry *);
} ____cacheline_aligned;
/*
@@ -589,7 +601,7 @@ static inline struct inode *d_real_inode(const struct dentry *dentry)
struct name_snapshot {
struct qstr name;
- unsigned char inline_name[DNAME_INLINE_LEN];
+ union shortname_store inline_name;
};
void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *);
void release_dentry_name_snapshot(struct name_snapshot *);
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 59444b495d49..fa2568b4380d 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -67,21 +67,23 @@ static const struct file_operations __fops = { \
typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
-#if defined(CONFIG_DEBUG_FS)
-
-struct dentry *debugfs_lookup(const char *name, struct dentry *parent);
-
struct debugfs_short_fops {
ssize_t (*read)(struct file *, char __user *, size_t, loff_t *);
ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *);
loff_t (*llseek) (struct file *, loff_t, int);
};
+#if defined(CONFIG_DEBUG_FS)
+
+struct dentry *debugfs_lookup(const char *name, struct dentry *parent);
+
struct dentry *debugfs_create_file_full(const char *name, umode_t mode,
struct dentry *parent, void *data,
+ const void *aux,
const struct file_operations *fops);
struct dentry *debugfs_create_file_short(const char *name, umode_t mode,
struct dentry *parent, void *data,
+ const void *aux,
const struct debugfs_short_fops *fops);
/**
@@ -126,7 +128,15 @@ struct dentry *debugfs_create_file_short(const char *name, umode_t mode,
const struct debugfs_short_fops *: debugfs_create_file_short, \
struct file_operations *: debugfs_create_file_full, \
struct debugfs_short_fops *: debugfs_create_file_short) \
- (name, mode, parent, data, fops)
+ (name, mode, parent, data, NULL, fops)
+
+#define debugfs_create_file_aux(name, mode, parent, data, aux, fops) \
+ _Generic(fops, \
+ const struct file_operations *: debugfs_create_file_full, \
+ const struct debugfs_short_fops *: debugfs_create_file_short, \
+ struct file_operations *: debugfs_create_file_full, \
+ struct debugfs_short_fops *: debugfs_create_file_short) \
+ (name, mode, parent, data, aux, fops)
struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode,
struct dentry *parent, void *data,
@@ -153,6 +163,7 @@ void debugfs_remove(struct dentry *dentry);
void debugfs_lookup_and_remove(const char *name, struct dentry *parent);
const struct file_operations *debugfs_real_fops(const struct file *filp);
+const void *debugfs_get_aux(const struct file *file);
int debugfs_file_get(struct dentry *dentry);
void debugfs_file_put(struct dentry *dentry);
@@ -164,8 +175,7 @@ ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf,
size_t len, loff_t *ppos);
-struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
- struct dentry *new_dir, const char *new_name);
+int debugfs_change_name(struct dentry *dentry, const char *fmt, ...) __printf(2, 3);
void debugfs_create_u8(const char *name, umode_t mode, struct dentry *parent,
u8 *value);
@@ -259,6 +269,14 @@ static inline struct dentry *debugfs_lookup(const char *name,
return ERR_PTR(-ENODEV);
}
+static inline struct dentry *debugfs_create_file_aux(const char *name,
+ umode_t mode, struct dentry *parent,
+ void *data, void *aux,
+ const void *fops)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline struct dentry *debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const void *fops)
@@ -312,6 +330,7 @@ static inline void debugfs_lookup_and_remove(const char *name,
{ }
const struct file_operations *debugfs_real_fops(const struct file *filp);
+void *debugfs_get_aux(const struct file *file);
static inline int debugfs_file_get(struct dentry *dentry)
{
@@ -341,10 +360,10 @@ static inline ssize_t debugfs_attr_write_signed(struct file *file,
return -ENODEV;
}
-static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
- struct dentry *new_dir, char *new_name)
+static inline int __printf(2, 3) debugfs_change_name(struct dentry *dentry,
+ const char *fmt, ...)
{
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
}
static inline void debugfs_create_u8(const char *name, umode_t mode,
@@ -452,6 +471,11 @@ static inline ssize_t debugfs_read_file_str(struct file *file,
#endif
+#define debugfs_create_file_aux_num(name, mode, parent, data, n, fops) \
+ debugfs_create_file_aux(name, mode, parent, data, \
+ (void *)(unsigned long)n, fops)
+#define debugfs_get_aux_num(f) (unsigned long)debugfs_get_aux(f)
+
/**
* debugfs_create_xul - create a debugfs file that is used to read and write an
* unsigned long value, formatted in hexadecimal
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 6639f48dac36..800dcc360db2 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -29,25 +29,39 @@ struct task_delay_info {
* XXX_delay contains the accumulated delay time in nanoseconds.
*/
u64 blkio_start;
+ u64 blkio_delay_max;
+ u64 blkio_delay_min;
u64 blkio_delay; /* wait for sync block io completion */
u64 swapin_start;
+ u64 swapin_delay_max;
+ u64 swapin_delay_min;
u64 swapin_delay; /* wait for swapin */
u32 blkio_count; /* total count of the number of sync block */
/* io operations performed */
u32 swapin_count; /* total count of swapin */
u64 freepages_start;
+ u64 freepages_delay_max;
+ u64 freepages_delay_min;
u64 freepages_delay; /* wait for memory reclaim */
u64 thrashing_start;
+ u64 thrashing_delay_max;
+ u64 thrashing_delay_min;
u64 thrashing_delay; /* wait for thrashing page */
u64 compact_start;
+ u64 compact_delay_max;
+ u64 compact_delay_min;
u64 compact_delay; /* wait for memory compact */
u64 wpcopy_start;
+ u64 wpcopy_delay_max;
+ u64 wpcopy_delay_min;
u64 wpcopy_delay; /* wait for write-protect copy */
+ u64 irq_delay_max;
+ u64 irq_delay_min;
u64 irq_delay; /* wait for IRQ/SOFTIRQ */
u32 freepages_count; /* total count of memory reclaim */
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 8321f65897f3..bcc6d7b69470 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -299,6 +299,9 @@ struct target_type {
#define dm_target_supports_mixed_zoned_model(type) (false)
#endif
+#define DM_TARGET_ATOMIC_WRITES 0x00000400
+#define dm_target_supports_atomic_writes(type) ((type)->features & DM_TARGET_ATOMIC_WRITES)
+
struct dm_target {
struct dm_table *table;
struct target_type *type;
diff --git a/include/linux/device.h b/include/linux/device.h
index 667cb6db9019..80a5b3268986 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -399,7 +399,23 @@ void __iomem *devm_of_iomap(struct device *dev,
#endif
/* allows to add/remove a custom action to devres stack */
-void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
+int devm_remove_action_nowarn(struct device *dev, void (*action)(void *), void *data);
+
+/**
+ * devm_remove_action() - removes previously added custom action
+ * @dev: Device that owns the action
+ * @action: Function implementing the action
+ * @data: Pointer to data passed to @action implementation
+ *
+ * Removes instance of @action previously added by devm_add_action().
+ * Both action and data should match one of the existing entries.
+ */
+static inline
+void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
+{
+ WARN_ON(devm_remove_action_nowarn(dev, action, data));
+}
+
void devm_release_action(struct device *dev, void (*action)(void *), void *data);
int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name);
@@ -1074,18 +1090,44 @@ void device_del(struct device *dev);
DEFINE_FREE(device_del, struct device *, if (_T) device_del(_T))
-int device_for_each_child(struct device *dev, void *data,
- int (*fn)(struct device *dev, void *data));
-int device_for_each_child_reverse(struct device *dev, void *data,
- int (*fn)(struct device *dev, void *data));
+int device_for_each_child(struct device *parent, void *data,
+ device_iter_t fn);
+int device_for_each_child_reverse(struct device *parent, void *data,
+ device_iter_t fn);
int device_for_each_child_reverse_from(struct device *parent,
- struct device *from, const void *data,
- int (*fn)(struct device *, const void *));
-struct device *device_find_child(struct device *dev, void *data,
- int (*match)(struct device *dev, void *data));
-struct device *device_find_child_by_name(struct device *parent,
- const char *name);
-struct device *device_find_any_child(struct device *parent);
+ struct device *from, void *data,
+ device_iter_t fn);
+struct device *device_find_child(struct device *parent, const void *data,
+ device_match_t match);
+/**
+ * device_find_child_by_name - device iterator for locating a child device.
+ * @parent: parent struct device
+ * @name: name of the child device
+ *
+ * This is similar to the device_find_child() function above, but it
+ * returns a reference to a device that has the name @name.
+ *
+ * NOTE: you will need to drop the reference with put_device() after use.
+ */
+static inline struct device *device_find_child_by_name(struct device *parent,
+ const char *name)
+{
+ return device_find_child(parent, name, device_match_name);
+}
+
+/**
+ * device_find_any_child - device iterator for locating a child device, if any.
+ * @parent: parent struct device
+ *
+ * This is similar to the device_find_child() function above, but it
+ * returns a reference to a child device, if any.
+ *
+ * NOTE: you will need to drop the reference with put_device() after use.
+ */
+static inline struct device *device_find_any_child(struct device *parent)
+{
+ return device_find_child(parent, NULL, device_match_any);
+}
int device_rename(struct device *dev, const char *new_name);
int device_move(struct device *dev, struct device *new_parent,
diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h
index b18658bce2c3..f5a56efd2bd6 100644
--- a/include/linux/device/bus.h
+++ b/include/linux/device/bus.h
@@ -134,6 +134,7 @@ typedef int (*device_match_t)(struct device *dev, const void *data);
/* Generic device matching functions that all busses can use to match with */
int device_match_name(struct device *dev, const void *name);
+int device_match_type(struct device *dev, const void *type);
int device_match_of_node(struct device *dev, const void *np);
int device_match_fwnode(struct device *dev, const void *fwnode);
int device_match_devt(struct device *dev, const void *pdevt);
@@ -141,9 +142,12 @@ int device_match_acpi_dev(struct device *dev, const void *adev);
int device_match_acpi_handle(struct device *dev, const void *handle);
int device_match_any(struct device *dev, const void *unused);
+/* Device iterating function type for various driver core for_each APIs */
+typedef int (*device_iter_t)(struct device *dev, void *data);
+
/* iterator helpers for buses */
-int bus_for_each_dev(const struct bus_type *bus, struct device *start, void *data,
- int (*fn)(struct device *dev, void *data));
+int bus_for_each_dev(const struct bus_type *bus, struct device *start,
+ void *data, device_iter_t fn);
struct device *bus_find_device(const struct bus_type *bus, struct device *start,
const void *data, device_match_t match);
/**
diff --git a/include/linux/device/class.h b/include/linux/device/class.h
index 518c9c83d64b..45ee3a634999 100644
--- a/include/linux/device/class.h
+++ b/include/linux/device/class.h
@@ -82,18 +82,16 @@ bool class_is_registered(const struct class *class);
struct class_compat;
struct class_compat *class_compat_register(const char *name);
void class_compat_unregister(struct class_compat *cls);
-int class_compat_create_link(struct class_compat *cls, struct device *dev,
- struct device *device_link);
-void class_compat_remove_link(struct class_compat *cls, struct device *dev,
- struct device *device_link);
+int class_compat_create_link(struct class_compat *cls, struct device *dev);
+void class_compat_remove_link(struct class_compat *cls, struct device *dev);
void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class,
const struct device *start, const struct device_type *type);
struct device *class_dev_iter_next(struct class_dev_iter *iter);
void class_dev_iter_exit(struct class_dev_iter *iter);
-int class_for_each_device(const struct class *class, const struct device *start, void *data,
- int (*fn)(struct device *dev, void *data));
+int class_for_each_device(const struct class *class, const struct device *start,
+ void *data, device_iter_t fn);
struct device *class_find_device(const struct class *class, const struct device *start,
const void *data, device_match_t match);
diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h
index 5c04b8e3833b..cd8e0f0a634b 100644
--- a/include/linux/device/driver.h
+++ b/include/linux/device/driver.h
@@ -154,7 +154,7 @@ void driver_remove_file(const struct device_driver *driver,
int driver_set_override(struct device *dev, const char **override,
const char *s, size_t len);
int __must_check driver_for_each_device(struct device_driver *drv, struct device *start,
- void *data, int (*fn)(struct device *dev, void *));
+ void *data, device_iter_t fn);
struct device *driver_find_device(const struct device_driver *drv,
struct device *start, const void *data,
device_match_t match);
diff --git a/include/linux/efi.h b/include/linux/efi.h
index e5815867aba9..053c57e61869 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -363,7 +363,6 @@ void efi_native_runtime_setup(void);
#define ACPI_20_TABLE_GUID EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81)
#define SMBIOS_TABLE_GUID EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define SMBIOS3_TABLE_GUID EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94)
-#define UGA_IO_PROTOCOL_GUID EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2)
#define EFI_GLOBAL_VARIABLE_GUID EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c)
#define UV_SYSTEM_TABLE_GUID EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93)
#define LINUX_EFI_CRASH_GUID EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc, 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0)
@@ -373,7 +372,6 @@ void efi_native_runtime_setup(void);
#define EFI_DEVICE_PATH_TO_TEXT_PROTOCOL_GUID EFI_GUID(0x8b843e20, 0x8132, 0x4852, 0x90, 0xcc, 0x55, 0x1a, 0x4e, 0x4a, 0x7f, 0x1c)
#define EFI_DEVICE_PATH_FROM_TEXT_PROTOCOL_GUID EFI_GUID(0x05c99a21, 0xc70f, 0x4ad2, 0x8a, 0x5f, 0x35, 0xdf, 0x33, 0x43, 0xf5, 0x1e)
#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a)
-#define EFI_UGA_PROTOCOL_GUID EFI_GUID(0x982c298b, 0xf4fa, 0x41cb, 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39)
#define EFI_PCI_IO_PROTOCOL_GUID EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a)
#define EFI_FILE_INFO_ID EFI_GUID(0x09576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
#define EFI_SYSTEM_RESOURCE_TABLE_GUID EFI_GUID(0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80)
@@ -1286,8 +1284,6 @@ struct linux_efi_memreserve {
void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size);
-char *efi_systab_show_arch(char *str);
-
/*
* The LINUX_EFI_MOK_VARIABLE_TABLE_GUID config table can be provided
* to the kernel by an EFI boot loader. The table contains a packed
diff --git a/include/linux/export.h b/include/linux/export.h
index 2633df4d31e6..a8c23d945634 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -52,9 +52,24 @@
#else
+#ifdef CONFIG_GENDWARFKSYMS
+/*
+ * With CONFIG_GENDWARFKSYMS, ensure the compiler emits debugging
+ * information for all exported symbols, including those defined in
+ * different TUs, by adding a __gendwarfksyms_ptr_<symbol> pointer
+ * that's discarded during the final link.
+ */
+#define __GENDWARFKSYMS_EXPORT(sym) \
+ static typeof(sym) *__gendwarfksyms_ptr_##sym __used \
+ __section(".discard.gendwarfksyms") = &sym;
+#else
+#define __GENDWARFKSYMS_EXPORT(sym)
+#endif
+
#define __EXPORT_SYMBOL(sym, license, ns) \
extern typeof(sym) sym; \
__ADDRESSABLE(sym) \
+ __GENDWARFKSYMS_EXPORT(sym) \
asm(__stringify(___EXPORT_SYMBOL(sym, license, ns)))
#endif
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
index 89ff45bd6f01..78f660ebc318 100644
--- a/include/linux/fanotify.h
+++ b/include/linux/fanotify.h
@@ -89,6 +89,16 @@
#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE | \
FAN_RENAME)
+/* Content events can be used to inspect file content */
+#define FANOTIFY_CONTENT_PERM_EVENTS (FAN_OPEN_PERM | FAN_OPEN_EXEC_PERM | \
+ FAN_ACCESS_PERM)
+/* Pre-content events can be used to fill file content */
+#define FANOTIFY_PRE_CONTENT_EVENTS (FAN_PRE_ACCESS)
+
+/* Events that require a permission response from user */
+#define FANOTIFY_PERM_EVENTS (FANOTIFY_CONTENT_PERM_EVENTS | \
+ FANOTIFY_PRE_CONTENT_EVENTS)
+
/* Events that can be reported with event->fd */
#define FANOTIFY_FD_EVENTS (FANOTIFY_PATH_EVENTS | FANOTIFY_PERM_EVENTS)
@@ -104,10 +114,6 @@
FANOTIFY_INODE_EVENTS | \
FANOTIFY_ERROR_EVENTS)
-/* Events that require a permission response from user */
-#define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM | \
- FAN_OPEN_EXEC_PERM)
-
/* Extra flags that may be reported with event or control handling of events */
#define FANOTIFY_EVENT_FLAGS (FAN_EVENT_ON_CHILD | FAN_ONDIR)
@@ -126,7 +132,9 @@
/* These masks check for invalid bits in permission responses. */
#define FANOTIFY_RESPONSE_ACCESS (FAN_ALLOW | FAN_DENY)
#define FANOTIFY_RESPONSE_FLAGS (FAN_AUDIT | FAN_INFO)
-#define FANOTIFY_RESPONSE_VALID_MASK (FANOTIFY_RESPONSE_ACCESS | FANOTIFY_RESPONSE_FLAGS)
+#define FANOTIFY_RESPONSE_VALID_MASK \
+ (FANOTIFY_RESPONSE_ACCESS | FANOTIFY_RESPONSE_FLAGS | \
+ (FAN_ERRNO_MASK << FAN_ERRNO_SHIFT))
/* Do not use these old uapi constants internally */
#undef FAN_ALL_CLASS_BITS
diff --git a/include/linux/firmware/cirrus/cs_dsp_test_utils.h b/include/linux/firmware/cirrus/cs_dsp_test_utils.h
new file mode 100644
index 000000000000..4f87a908ab4f
--- /dev/null
+++ b/include/linux/firmware/cirrus/cs_dsp_test_utils.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Support utilities for cs_dsp testing.
+ *
+ * Copyright (C) 2024 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#include <linux/regmap.h>
+#include <linux/firmware/cirrus/wmfw.h>
+
+struct kunit;
+struct cs_dsp_test;
+struct cs_dsp_test_local;
+
+/**
+ * struct cs_dsp_test - base class for test utilities
+ *
+ * @test: Pointer to struct kunit instance.
+ * @dsp: Pointer to struct cs_dsp instance.
+ * @local: Private data for each test suite.
+ */
+struct cs_dsp_test {
+ struct kunit *test;
+ struct cs_dsp *dsp;
+
+ struct cs_dsp_test_local *local;
+
+ /* Following members are private */
+ bool saw_bus_write;
+};
+
+/**
+ * struct cs_dsp_mock_alg_def - Info for creating a mock algorithm entry.
+ *
+ * @id Algorithm ID.
+ * @ver; Algorithm version.
+ * @xm_base_words XM base address in DSP words.
+ * @xm_size_words XM size in DSP words.
+ * @ym_base_words YM base address in DSP words.
+ * @ym_size_words YM size in DSP words.
+ * @zm_base_words ZM base address in DSP words.
+ * @zm_size_words ZM size in DSP words.
+ */
+struct cs_dsp_mock_alg_def {
+ unsigned int id;
+ unsigned int ver;
+ unsigned int xm_base_words;
+ unsigned int xm_size_words;
+ unsigned int ym_base_words;
+ unsigned int ym_size_words;
+ unsigned int zm_base_words;
+ unsigned int zm_size_words;
+};
+
+struct cs_dsp_mock_coeff_def {
+ const char *shortname;
+ const char *fullname;
+ const char *description;
+ u16 type;
+ u16 flags;
+ u16 mem_type;
+ unsigned int offset_dsp_words;
+ unsigned int length_bytes;
+};
+
+/**
+ * struct cs_dsp_mock_xm_header - XM header builder
+ *
+ * @test_priv: Pointer to the struct cs_dsp_test.
+ * @blob_data: Pointer to the created blob data.
+ * @blob_size_bytes: Size of the data at blob_data.
+ */
+struct cs_dsp_mock_xm_header {
+ struct cs_dsp_test *test_priv;
+ void *blob_data;
+ size_t blob_size_bytes;
+};
+
+struct cs_dsp_mock_wmfw_builder;
+struct cs_dsp_mock_bin_builder;
+
+extern const unsigned int cs_dsp_mock_adsp2_32bit_sysbase;
+extern const unsigned int cs_dsp_mock_adsp2_16bit_sysbase;
+extern const unsigned int cs_dsp_mock_halo_core_base;
+extern const unsigned int cs_dsp_mock_halo_sysinfo_base;
+
+extern const struct cs_dsp_region cs_dsp_mock_halo_dsp1_regions[];
+extern const unsigned int cs_dsp_mock_halo_dsp1_region_sizes[];
+extern const struct cs_dsp_region cs_dsp_mock_adsp2_32bit_dsp1_regions[];
+extern const unsigned int cs_dsp_mock_adsp2_32bit_dsp1_region_sizes[];
+extern const struct cs_dsp_region cs_dsp_mock_adsp2_16bit_dsp1_regions[];
+extern const unsigned int cs_dsp_mock_adsp2_16bit_dsp1_region_sizes[];
+int cs_dsp_mock_count_regions(const unsigned int *region_sizes);
+unsigned int cs_dsp_mock_size_of_region(const struct cs_dsp *dsp, int mem_type);
+unsigned int cs_dsp_mock_base_addr_for_mem(struct cs_dsp_test *priv, int mem_type);
+unsigned int cs_dsp_mock_reg_addr_inc_per_unpacked_word(struct cs_dsp_test *priv);
+unsigned int cs_dsp_mock_reg_block_length_bytes(struct cs_dsp_test *priv, int mem_type);
+unsigned int cs_dsp_mock_reg_block_length_registers(struct cs_dsp_test *priv, int mem_type);
+unsigned int cs_dsp_mock_reg_block_length_dsp_words(struct cs_dsp_test *priv, int mem_type);
+bool cs_dsp_mock_has_zm(struct cs_dsp_test *priv);
+int cs_dsp_mock_packed_to_unpacked_mem_type(int packed_mem_type);
+unsigned int cs_dsp_mock_num_dsp_words_to_num_packed_regs(unsigned int num_dsp_words);
+unsigned int cs_dsp_mock_xm_header_get_alg_base_in_words(struct cs_dsp_test *priv,
+ unsigned int alg_id,
+ int mem_type);
+unsigned int cs_dsp_mock_xm_header_get_fw_version_from_regmap(struct cs_dsp_test *priv);
+unsigned int cs_dsp_mock_xm_header_get_fw_version(struct cs_dsp_mock_xm_header *header);
+void cs_dsp_mock_xm_header_drop_from_regmap_cache(struct cs_dsp_test *priv);
+int cs_dsp_mock_xm_header_write_to_regmap(struct cs_dsp_mock_xm_header *header);
+struct cs_dsp_mock_xm_header *cs_dsp_create_mock_xm_header(struct cs_dsp_test *priv,
+ const struct cs_dsp_mock_alg_def *algs,
+ size_t num_algs);
+
+int cs_dsp_mock_regmap_init(struct cs_dsp_test *priv);
+void cs_dsp_mock_regmap_drop_range(struct cs_dsp_test *priv,
+ unsigned int first_reg, unsigned int last_reg);
+void cs_dsp_mock_regmap_drop_regs(struct cs_dsp_test *priv,
+ unsigned int first_reg, size_t num_regs);
+void cs_dsp_mock_regmap_drop_bytes(struct cs_dsp_test *priv,
+ unsigned int first_reg, size_t num_bytes);
+void cs_dsp_mock_regmap_drop_system_regs(struct cs_dsp_test *priv);
+bool cs_dsp_mock_regmap_is_dirty(struct cs_dsp_test *priv, bool drop_system_regs);
+
+struct cs_dsp_mock_bin_builder *cs_dsp_mock_bin_init(struct cs_dsp_test *priv,
+ int format_version,
+ unsigned int fw_version);
+void cs_dsp_mock_bin_add_raw_block(struct cs_dsp_mock_bin_builder *builder,
+ unsigned int alg_id, unsigned int alg_ver,
+ int type, unsigned int offset,
+ const void *payload_data, size_t payload_len_bytes);
+void cs_dsp_mock_bin_add_info(struct cs_dsp_mock_bin_builder *builder,
+ const char *info);
+void cs_dsp_mock_bin_add_name(struct cs_dsp_mock_bin_builder *builder,
+ const char *name);
+void cs_dsp_mock_bin_add_patch(struct cs_dsp_mock_bin_builder *builder,
+ unsigned int alg_id, unsigned int alg_ver,
+ int mem_region, unsigned int reg_addr_offset,
+ const void *payload_data, size_t payload_len_bytes);
+struct firmware *cs_dsp_mock_bin_get_firmware(struct cs_dsp_mock_bin_builder *builder);
+
+struct cs_dsp_mock_wmfw_builder *cs_dsp_mock_wmfw_init(struct cs_dsp_test *priv,
+ int format_version);
+void cs_dsp_mock_wmfw_add_raw_block(struct cs_dsp_mock_wmfw_builder *builder,
+ int mem_region, unsigned int mem_offset_dsp_words,
+ const void *payload_data, size_t payload_len_bytes);
+void cs_dsp_mock_wmfw_add_info(struct cs_dsp_mock_wmfw_builder *builder,
+ const char *info);
+void cs_dsp_mock_wmfw_add_data_block(struct cs_dsp_mock_wmfw_builder *builder,
+ int mem_region, unsigned int mem_offset_dsp_words,
+ const void *payload_data, size_t payload_len_bytes);
+void cs_dsp_mock_wmfw_start_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder,
+ unsigned int alg_id,
+ const char *name,
+ const char *description);
+void cs_dsp_mock_wmfw_add_coeff_desc(struct cs_dsp_mock_wmfw_builder *builder,
+ const struct cs_dsp_mock_coeff_def *def);
+void cs_dsp_mock_wmfw_end_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder);
+struct firmware *cs_dsp_mock_wmfw_get_firmware(struct cs_dsp_mock_wmfw_builder *builder);
+int cs_dsp_mock_wmfw_format_version(struct cs_dsp_mock_wmfw_builder *builder);
diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h
index 4621aec0328c..983e1591bbba 100644
--- a/include/linux/firmware/qcom/qcom_scm.h
+++ b/include/linux/firmware/qcom/qcom_scm.h
@@ -105,6 +105,14 @@ bool qcom_scm_ice_available(void);
int qcom_scm_ice_invalidate_key(u32 index);
int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
enum qcom_scm_ice_cipher cipher, u32 data_unit_size);
+bool qcom_scm_has_wrapped_key_support(void);
+int qcom_scm_derive_sw_secret(const u8 *eph_key, size_t eph_key_size,
+ u8 *sw_secret, size_t sw_secret_size);
+int qcom_scm_generate_ice_key(u8 *lt_key, size_t lt_key_size);
+int qcom_scm_prepare_ice_key(const u8 *lt_key, size_t lt_key_size,
+ u8 *eph_key, size_t eph_key_size);
+int qcom_scm_import_ice_key(const u8 *raw_key, size_t raw_key_size,
+ u8 *lt_key, size_t lt_key_size);
bool qcom_scm_hdcp_available(void);
int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index a4af70367f8a..2c3b2f8a621f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -173,13 +173,20 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define FMODE_NOREUSE ((__force fmode_t)(1 << 23))
-/* FMODE_* bit 24 */
-
/* File is embedded in backing_file object */
-#define FMODE_BACKING ((__force fmode_t)(1 << 25))
+#define FMODE_BACKING ((__force fmode_t)(1 << 24))
+
+/*
+ * Together with FMODE_NONOTIFY_PERM defines which fsnotify events shouldn't be
+ * generated (see below)
+ */
+#define FMODE_NONOTIFY ((__force fmode_t)(1 << 25))
-/* File was opened by fanotify and shouldn't generate fanotify events */
-#define FMODE_NONOTIFY ((__force fmode_t)(1 << 26))
+/*
+ * Together with FMODE_NONOTIFY defines which fsnotify events shouldn't be
+ * generated (see below)
+ */
+#define FMODE_NONOTIFY_PERM ((__force fmode_t)(1 << 26))
/* File is capable of returning -EAGAIN if I/O will block */
#define FMODE_NOWAIT ((__force fmode_t)(1 << 27))
@@ -191,6 +198,31 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define FMODE_NOACCOUNT ((__force fmode_t)(1 << 29))
/*
+ * The two FMODE_NONOTIFY* define which fsnotify events should not be generated
+ * for a file. These are the possible values of (f->f_mode &
+ * FMODE_FSNOTIFY_MASK) and their meaning:
+ *
+ * FMODE_NONOTIFY - suppress all (incl. non-permission) events.
+ * FMODE_NONOTIFY_PERM - suppress permission (incl. pre-content) events.
+ * FMODE_NONOTIFY | FMODE_NONOTIFY_PERM - suppress only pre-content events.
+ */
+#define FMODE_FSNOTIFY_MASK \
+ (FMODE_NONOTIFY | FMODE_NONOTIFY_PERM)
+
+#define FMODE_FSNOTIFY_NONE(mode) \
+ ((mode & FMODE_FSNOTIFY_MASK) == FMODE_NONOTIFY)
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+#define FMODE_FSNOTIFY_PERM(mode) \
+ ((mode & FMODE_FSNOTIFY_MASK) == 0 || \
+ (mode & FMODE_FSNOTIFY_MASK) == (FMODE_NONOTIFY | FMODE_NONOTIFY_PERM))
+#define FMODE_FSNOTIFY_HSM(mode) \
+ ((mode & FMODE_FSNOTIFY_MASK) == 0)
+#else
+#define FMODE_FSNOTIFY_PERM(mode) 0
+#define FMODE_FSNOTIFY_HSM(mode) 0
+#endif
+
+/*
* Attribute flags. These should be or-ed together to figure out what
* has been changed!
*/
@@ -758,6 +790,19 @@ struct inode {
static inline void inode_set_cached_link(struct inode *inode, char *link, int linklen)
{
+ int testlen;
+
+ /*
+ * TODO: patch it into a debug-only check if relevant macros show up.
+ * In the meantime, since we are suffering strlen even on production kernels
+ * to find the right length, do a fixup if the wrong value got passed.
+ */
+ testlen = strlen(link);
+ if (testlen != linklen) {
+ WARN_ONCE(1, "bad length passed for symlink [%s] (got %d, expected %d)",
+ link, linklen, testlen);
+ linklen = testlen;
+ }
inode->i_link = link;
inode->i_linklen = linklen;
inode->i_opflags |= IOP_CACHED_LINK;
@@ -1246,6 +1291,7 @@ extern int send_sigurg(struct file *file);
#define SB_I_RETIRED 0x00000800 /* superblock shouldn't be reused */
#define SB_I_NOUMASK 0x00001000 /* VFS does not apply umask */
#define SB_I_NOIDMAP 0x00002000 /* No idmapped mounts on this superblock */
+#define SB_I_ALLOW_HSM 0x00004000 /* Allow HSM events on this superblock */
/* Possible states of 'frozen' field */
enum {
@@ -2767,6 +2813,8 @@ static inline struct file *file_open_root_mnt(struct vfsmount *mnt,
}
struct file *dentry_open(const struct path *path, int flags,
const struct cred *creds);
+struct file *dentry_open_nonotify(const struct path *path, int flags,
+ const struct cred *cred);
struct file *dentry_create(const struct path *path, int flags, umode_t mode,
const struct cred *cred);
struct path *backing_file_user_path(struct file *f);
@@ -2890,6 +2938,8 @@ extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart,
extern int __must_check file_check_and_advance_wb_err(struct file *file);
extern int __must_check file_write_and_wait_range(struct file *file,
loff_t start, loff_t end);
+int filemap_fdatawrite_range_kick(struct address_space *mapping, loff_t start,
+ loff_t end);
static inline int file_write_and_wait(struct file *file)
{
@@ -2922,6 +2972,11 @@ static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count)
(iocb->ki_flags & IOCB_SYNC) ? 0 : 1);
if (ret)
return ret;
+ } else if (iocb->ki_flags & IOCB_DONTCACHE) {
+ struct address_space *mapping = iocb->ki_filp->f_mapping;
+
+ filemap_fdatawrite_range_kick(mapping, iocb->ki_pos,
+ iocb->ki_pos + count);
}
return count;
@@ -3075,6 +3130,34 @@ static inline void allow_write_access(struct file *file)
if (file)
atomic_inc(&file_inode(file)->i_writecount);
}
+
+/*
+ * Do not prevent write to executable file when watched by pre-content events.
+ *
+ * Note that FMODE_FSNOTIFY_HSM mode is set depending on pre-content watches at
+ * the time of file open and remains constant for entire lifetime of the file,
+ * so if pre-content watches are added post execution or removed before the end
+ * of the execution, it will not cause i_writecount reference leak.
+ */
+static inline int exe_file_deny_write_access(struct file *exe_file)
+{
+ if (unlikely(FMODE_FSNOTIFY_HSM(exe_file->f_mode)))
+ return 0;
+ return deny_write_access(exe_file);
+}
+static inline void exe_file_allow_write_access(struct file *exe_file)
+{
+ if (unlikely(!exe_file || FMODE_FSNOTIFY_HSM(exe_file->f_mode)))
+ return;
+ allow_write_access(exe_file);
+}
+
+static inline void file_set_fsnotify_mode(struct file *file, fmode_t mode)
+{
+ file->f_mode &= ~FMODE_FSNOTIFY_MASK;
+ file->f_mode |= mode;
+}
+
static inline bool inode_is_open_for_write(const struct inode *inode)
{
return atomic_read(&inode->i_writecount) > 0;
@@ -3730,11 +3813,9 @@ struct ctl_table;
int __init list_bdev_fs_names(char *buf, size_t size);
#define __FMODE_EXEC ((__force int) FMODE_EXEC)
-#define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY)
#define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE])
-#define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \
- (flag & __FMODE_NONOTIFY)))
+#define OPEN_FMODE(flag) ((__force fmode_t)((flag + 1) & O_ACCMODE))
static inline bool is_sxid(umode_t mode)
{
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index 772f822dc6b8..18855cb44b1c 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -192,7 +192,8 @@ struct fscrypt_operations {
unsigned int *num_devs);
};
-int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags);
+int fscrypt_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags);
static inline struct fscrypt_inode_info *
fscrypt_get_inode_info(const struct inode *inode)
@@ -711,8 +712,8 @@ static inline u64 fscrypt_fname_siphash(const struct inode *dir,
return 0;
}
-static inline int fscrypt_d_revalidate(struct dentry *dentry,
- unsigned int flags)
+static inline int fscrypt_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
return 1;
}
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index c90ec889bfc2..99f30c7d6208 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -438,21 +438,21 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
extern const struct bus_type fsl_mc_bus_type;
-extern struct device_type fsl_mc_bus_dprc_type;
-extern struct device_type fsl_mc_bus_dpni_type;
-extern struct device_type fsl_mc_bus_dpio_type;
-extern struct device_type fsl_mc_bus_dpsw_type;
-extern struct device_type fsl_mc_bus_dpbp_type;
-extern struct device_type fsl_mc_bus_dpcon_type;
-extern struct device_type fsl_mc_bus_dpmcp_type;
-extern struct device_type fsl_mc_bus_dpmac_type;
-extern struct device_type fsl_mc_bus_dprtc_type;
-extern struct device_type fsl_mc_bus_dpseci_type;
-extern struct device_type fsl_mc_bus_dpdmux_type;
-extern struct device_type fsl_mc_bus_dpdcei_type;
-extern struct device_type fsl_mc_bus_dpaiop_type;
-extern struct device_type fsl_mc_bus_dpci_type;
-extern struct device_type fsl_mc_bus_dpdmai_type;
+extern const struct device_type fsl_mc_bus_dprc_type;
+extern const struct device_type fsl_mc_bus_dpni_type;
+extern const struct device_type fsl_mc_bus_dpio_type;
+extern const struct device_type fsl_mc_bus_dpsw_type;
+extern const struct device_type fsl_mc_bus_dpbp_type;
+extern const struct device_type fsl_mc_bus_dpcon_type;
+extern const struct device_type fsl_mc_bus_dpmcp_type;
+extern const struct device_type fsl_mc_bus_dpmac_type;
+extern const struct device_type fsl_mc_bus_dprtc_type;
+extern const struct device_type fsl_mc_bus_dpseci_type;
+extern const struct device_type fsl_mc_bus_dpdmux_type;
+extern const struct device_type fsl_mc_bus_dpdcei_type;
+extern const struct device_type fsl_mc_bus_dpaiop_type;
+extern const struct device_type fsl_mc_bus_dpci_type;
+extern const struct device_type fsl_mc_bus_dpdmai_type;
static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev)
{
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 278620e063ab..6a33288bd6a1 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -108,38 +108,35 @@ static inline void fsnotify_dentry(struct dentry *dentry, __u32 mask)
fsnotify_parent(dentry, mask, dentry, FSNOTIFY_EVENT_DENTRY);
}
-static inline int fsnotify_file(struct file *file, __u32 mask)
+static inline int fsnotify_path(const struct path *path, __u32 mask)
{
- const struct path *path;
+ return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH);
+}
+static inline int fsnotify_file(struct file *file, __u32 mask)
+{
/*
* FMODE_NONOTIFY are fds generated by fanotify itself which should not
* generate new events. We also don't want to generate events for
* FMODE_PATH fds (involves open & close events) as they are just
* handle creation / destruction events and not "real" file events.
*/
- if (file->f_mode & (FMODE_NONOTIFY | FMODE_PATH))
+ if (FMODE_FSNOTIFY_NONE(file->f_mode))
return 0;
- path = &file->f_path;
- /* Permission events require group prio >= FSNOTIFY_PRIO_CONTENT */
- if (mask & ALL_FSNOTIFY_PERM_EVENTS &&
- !fsnotify_sb_has_priority_watchers(path->dentry->d_sb,
- FSNOTIFY_PRIO_CONTENT))
- return 0;
-
- return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH);
+ return fsnotify_path(&file->f_path, mask);
}
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+
+void file_set_fsnotify_mode_from_watchers(struct file *file);
+
/*
* fsnotify_file_area_perm - permission hook before access to file range
*/
static inline int fsnotify_file_area_perm(struct file *file, int perm_mask,
const loff_t *ppos, size_t count)
{
- __u32 fsnotify_mask = FS_ACCESS_PERM;
-
/*
* filesystem may be modified in the context of permission events
* (e.g. by HSM filling a file on access), so sb freeze protection
@@ -147,14 +144,49 @@ static inline int fsnotify_file_area_perm(struct file *file, int perm_mask,
*/
lockdep_assert_once(file_write_not_started(file));
+ if (!(perm_mask & (MAY_READ | MAY_WRITE | MAY_ACCESS)))
+ return 0;
+
+ if (likely(!FMODE_FSNOTIFY_PERM(file->f_mode)))
+ return 0;
+
+ /*
+ * read()/write() and other types of access generate pre-content events.
+ */
+ if (unlikely(FMODE_FSNOTIFY_HSM(file->f_mode))) {
+ int ret = fsnotify_pre_content(&file->f_path, ppos, count);
+
+ if (ret)
+ return ret;
+ }
+
if (!(perm_mask & MAY_READ))
return 0;
- return fsnotify_file(file, fsnotify_mask);
+ /*
+ * read() also generates the legacy FS_ACCESS_PERM event, so content
+ * scanners can inspect the content filled by pre-content event.
+ */
+ return fsnotify_path(&file->f_path, FS_ACCESS_PERM);
+}
+
+/*
+ * fsnotify_truncate_perm - permission hook before file truncate
+ */
+static inline int fsnotify_truncate_perm(const struct path *path, loff_t length)
+{
+ struct inode *inode = d_inode(path->dentry);
+
+ if (!(inode->i_sb->s_iflags & SB_I_ALLOW_HSM) ||
+ !fsnotify_sb_has_priority_watchers(inode->i_sb,
+ FSNOTIFY_PRIO_PRE_CONTENT))
+ return 0;
+
+ return fsnotify_pre_content(path, &length, 0);
}
/*
- * fsnotify_file_perm - permission hook before file access
+ * fsnotify_file_perm - permission hook before file access (unknown range)
*/
static inline int fsnotify_file_perm(struct file *file, int perm_mask)
{
@@ -168,22 +200,34 @@ static inline int fsnotify_open_perm(struct file *file)
{
int ret;
+ if (likely(!FMODE_FSNOTIFY_PERM(file->f_mode)))
+ return 0;
+
if (file->f_flags & __FMODE_EXEC) {
- ret = fsnotify_file(file, FS_OPEN_EXEC_PERM);
+ ret = fsnotify_path(&file->f_path, FS_OPEN_EXEC_PERM);
if (ret)
return ret;
}
- return fsnotify_file(file, FS_OPEN_PERM);
+ return fsnotify_path(&file->f_path, FS_OPEN_PERM);
}
#else
+static inline void file_set_fsnotify_mode_from_watchers(struct file *file)
+{
+}
+
static inline int fsnotify_file_area_perm(struct file *file, int perm_mask,
const loff_t *ppos, size_t count)
{
return 0;
}
+static inline int fsnotify_truncate_perm(const struct path *path, loff_t length)
+{
+ return 0;
+}
+
static inline int fsnotify_file_perm(struct file *file, int perm_mask)
{
return 0;
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 3ecf7768e577..0d24a21a8e60 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -55,6 +55,9 @@
#define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */
#define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */
#define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */
+/* #define FS_DIR_MODIFY 0x00080000 */ /* Deprecated (reserved) */
+
+#define FS_PRE_ACCESS 0x00100000 /* Pre-content access hook */
/*
* Set on inode mark that cares about things that happen to its children.
@@ -77,8 +80,14 @@
*/
#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE | FS_RENAME)
-#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \
- FS_OPEN_EXEC_PERM)
+/* Content events can be used to inspect file content */
+#define FSNOTIFY_CONTENT_PERM_EVENTS (FS_OPEN_PERM | FS_OPEN_EXEC_PERM | \
+ FS_ACCESS_PERM)
+/* Pre-content events can be used to fill file content */
+#define FSNOTIFY_PRE_CONTENT_EVENTS (FS_PRE_ACCESS)
+
+#define ALL_FSNOTIFY_PERM_EVENTS (FSNOTIFY_CONTENT_PERM_EVENTS | \
+ FSNOTIFY_PRE_CONTENT_EVENTS)
/*
* This is a list of all events that may get sent to a parent that is watching
@@ -285,6 +294,7 @@ static inline void fsnotify_group_assert_locked(struct fsnotify_group *group)
/* When calling fsnotify tell it if the data is a path or inode */
enum fsnotify_data_type {
FSNOTIFY_EVENT_NONE,
+ FSNOTIFY_EVENT_FILE_RANGE,
FSNOTIFY_EVENT_PATH,
FSNOTIFY_EVENT_INODE,
FSNOTIFY_EVENT_DENTRY,
@@ -297,6 +307,17 @@ struct fs_error_report {
struct super_block *sb;
};
+struct file_range {
+ const struct path *path;
+ loff_t pos;
+ size_t count;
+};
+
+static inline const struct path *file_range_path(const struct file_range *range)
+{
+ return range->path;
+}
+
static inline struct inode *fsnotify_data_inode(const void *data, int data_type)
{
switch (data_type) {
@@ -306,6 +327,8 @@ static inline struct inode *fsnotify_data_inode(const void *data, int data_type)
return d_inode(data);
case FSNOTIFY_EVENT_PATH:
return d_inode(((const struct path *)data)->dentry);
+ case FSNOTIFY_EVENT_FILE_RANGE:
+ return d_inode(file_range_path(data)->dentry);
case FSNOTIFY_EVENT_ERROR:
return ((struct fs_error_report *)data)->inode;
default:
@@ -321,6 +344,8 @@ static inline struct dentry *fsnotify_data_dentry(const void *data, int data_typ
return (struct dentry *)data;
case FSNOTIFY_EVENT_PATH:
return ((const struct path *)data)->dentry;
+ case FSNOTIFY_EVENT_FILE_RANGE:
+ return file_range_path(data)->dentry;
default:
return NULL;
}
@@ -332,6 +357,8 @@ static inline const struct path *fsnotify_data_path(const void *data,
switch (data_type) {
case FSNOTIFY_EVENT_PATH:
return data;
+ case FSNOTIFY_EVENT_FILE_RANGE:
+ return file_range_path(data);
default:
return NULL;
}
@@ -347,6 +374,8 @@ static inline struct super_block *fsnotify_data_sb(const void *data,
return ((struct dentry *)data)->d_sb;
case FSNOTIFY_EVENT_PATH:
return ((const struct path *)data)->dentry->d_sb;
+ case FSNOTIFY_EVENT_FILE_RANGE:
+ return file_range_path(data)->dentry->d_sb;
case FSNOTIFY_EVENT_ERROR:
return ((struct fs_error_report *) data)->sb;
default:
@@ -366,6 +395,18 @@ static inline struct fs_error_report *fsnotify_data_error_report(
}
}
+static inline const struct file_range *fsnotify_data_file_range(
+ const void *data,
+ int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_FILE_RANGE:
+ return (struct file_range *)data;
+ default:
+ return NULL;
+ }
+}
+
/*
* Index to merged marks iterator array that correlates to a type of watch.
* The type of watched object can be deduced from the iterator type, but not
@@ -854,9 +895,17 @@ static inline void fsnotify_init_event(struct fsnotify_event *event)
{
INIT_LIST_HEAD(&event->list);
}
+int fsnotify_pre_content(const struct path *path, const loff_t *ppos,
+ size_t count);
#else
+static inline int fsnotify_pre_content(const struct path *path,
+ const loff_t *ppos, size_t count)
+{
+ return 0;
+}
+
static inline int fsnotify(__u32 mask, const void *data, int data_type,
struct inode *dir, const struct qstr *name,
struct inode *inode, u32 cookie)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 07092dfb21a4..fbabc3d848b3 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1151,8 +1151,6 @@ struct ftrace_graph_ret {
int depth;
/* Number of functions that overran the depth limit for current task */
unsigned int overrun;
- unsigned long long calltime;
- unsigned long long rettime;
} __packed;
struct fgraph_ops;
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 0d79070c5a70..0731994b9d7c 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -112,6 +112,7 @@ struct fwnode_reference_args {
* @device_is_available: Return true if the device is available.
* @device_get_match_data: Return the device driver match data.
* @property_present: Return true if a property is present.
+ * @property_read_bool: Return a boolean property value.
* @property_read_int_array: Read an array of integer properties. Return zero on
* success, a negative error code otherwise.
* @property_read_string_array: Read an array of string properties. Return zero
@@ -141,6 +142,8 @@ struct fwnode_operations {
(*device_get_dma_attr)(const struct fwnode_handle *fwnode);
bool (*property_present)(const struct fwnode_handle *fwnode,
const char *propname);
+ bool (*property_read_bool)(const struct fwnode_handle *fwnode,
+ const char *propname);
int (*property_read_int_array)(const struct fwnode_handle *fwnode,
const char *propname,
unsigned int elem_size, void *val,
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index b0fe9f62d15b..6bb1a5a7a4ae 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -212,35 +212,31 @@ struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_
unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
nodemask_t *nodemask, int nr_pages,
- struct list_head *page_list,
struct page **page_array);
#define __alloc_pages_bulk(...) alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__))
-unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
+unsigned long alloc_pages_bulk_mempolicy_noprof(gfp_t gfp,
unsigned long nr_pages,
struct page **page_array);
-#define alloc_pages_bulk_array_mempolicy(...) \
- alloc_hooks(alloc_pages_bulk_array_mempolicy_noprof(__VA_ARGS__))
+#define alloc_pages_bulk_mempolicy(...) \
+ alloc_hooks(alloc_pages_bulk_mempolicy_noprof(__VA_ARGS__))
/* Bulk allocate order-0 pages */
-#define alloc_pages_bulk_list(_gfp, _nr_pages, _list) \
- __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _list, NULL)
-
-#define alloc_pages_bulk_array(_gfp, _nr_pages, _page_array) \
- __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, NULL, _page_array)
+#define alloc_pages_bulk(_gfp, _nr_pages, _page_array) \
+ __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _page_array)
static inline unsigned long
-alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
+alloc_pages_bulk_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
struct page **page_array)
{
if (nid == NUMA_NO_NODE)
nid = numa_mem_id();
- return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, NULL, page_array);
+ return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, page_array);
}
-#define alloc_pages_bulk_array_node(...) \
- alloc_hooks(alloc_pages_bulk_array_node_noprof(__VA_ARGS__))
+#define alloc_pages_bulk_node(...) \
+ alloc_hooks(alloc_pages_bulk_node_noprof(__VA_ARGS__))
static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask)
{
@@ -300,8 +296,6 @@ static inline struct page *alloc_pages_node_noprof(int nid, gfp_t gfp_mask,
#ifdef CONFIG_NUMA
struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order);
-struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order,
- struct mempolicy *mpol, pgoff_t ilx, int nid);
struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order);
struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
struct mempolicy *mpol, pgoff_t ilx, int nid);
@@ -312,11 +306,6 @@ static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order
{
return alloc_pages_node_noprof(numa_node_id(), gfp_mask, order);
}
-static inline struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order,
- struct mempolicy *mpol, pgoff_t ilx, int nid)
-{
- return alloc_pages_noprof(gfp, order);
-}
static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
{
return __folio_alloc_node_noprof(gfp, order, numa_node_id());
@@ -331,7 +320,6 @@ static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int orde
#endif
#define alloc_pages(...) alloc_hooks(alloc_pages_noprof(__VA_ARGS__))
-#define alloc_pages_mpol(...) alloc_hooks(alloc_pages_mpol_noprof(__VA_ARGS__))
#define folio_alloc(...) alloc_hooks(folio_alloc_noprof(__VA_ARGS__))
#define folio_alloc_mpol(...) alloc_hooks(folio_alloc_mpol_noprof(__VA_ARGS__))
#define vma_alloc_folio(...) alloc_hooks(vma_alloc_folio_noprof(__VA_ARGS__))
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index 455f855bc084..96bda41d9148 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -445,7 +445,6 @@ ssize_t hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer,
size_t size);
ssize_t hdmi_infoframe_pack_only(const union hdmi_infoframe *frame,
void *buffer, size_t size);
-int hdmi_infoframe_check(union hdmi_infoframe *frame);
int hdmi_infoframe_unpack(union hdmi_infoframe *frame,
const void *buffer, size_t size);
void hdmi_infoframe_log(const char *level, struct device *dev,
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
index 6dbd0d49628f..99fcf65d575f 100644
--- a/include/linux/hisi_acc_qm.h
+++ b/include/linux/hisi_acc_qm.h
@@ -97,6 +97,8 @@
/* page number for queue file region */
#define QM_DOORBELL_PAGE_NR 1
+#define QM_DEV_ALG_MAX_LEN 256
+
/* uacce mode of the driver */
#define UACCE_MODE_NOUACCE 0 /* don't use uacce */
#define UACCE_MODE_SVA 1 /* use uacce sva mode */
@@ -122,6 +124,7 @@ enum qm_hw_ver {
QM_HW_V1 = 0x20,
QM_HW_V2 = 0x21,
QM_HW_V3 = 0x30,
+ QM_HW_V4 = 0x50,
};
enum qm_fun_type {
@@ -156,6 +159,7 @@ enum qm_cap_bits {
QM_SUPPORT_MB_COMMAND,
QM_SUPPORT_SVA_PREFETCH,
QM_SUPPORT_RPM,
+ QM_SUPPORT_DAE,
};
struct qm_dev_alg {
@@ -266,6 +270,8 @@ struct hisi_qm_err_ini {
void (*show_last_dfx_regs)(struct hisi_qm *qm);
void (*err_info_init)(struct hisi_qm *qm);
enum acc_err_result (*get_err_result)(struct hisi_qm *qm);
+ bool (*dev_is_abnormal)(struct hisi_qm *qm);
+ int (*set_priv_status)(struct hisi_qm *qm);
};
struct hisi_qm_cap_info {
@@ -392,6 +398,8 @@ struct hisi_qm {
struct mutex mailbox_lock;
+ struct mutex ifc_lock;
+
const struct hisi_qm_hw_ops *ops;
struct qm_debug debug;
diff --git a/include/linux/hrtimer_defs.h b/include/linux/hrtimer_defs.h
index c3b4b7ed7c16..84a5045f80f3 100644
--- a/include/linux/hrtimer_defs.h
+++ b/include/linux/hrtimer_defs.h
@@ -125,6 +125,7 @@ struct hrtimer_cpu_base {
ktime_t softirq_expires_next;
struct hrtimer *softirq_next_timer;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
+ call_single_data_t csd;
} ____cacheline_aligned;
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index b94c2e8ee918..93e509b6c00e 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -121,6 +121,8 @@ enum mthp_stat_item {
MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
MTHP_STAT_ZSWPOUT,
MTHP_STAT_SWPIN,
+ MTHP_STAT_SWPIN_FALLBACK,
+ MTHP_STAT_SWPIN_FALLBACK_CHARGE,
MTHP_STAT_SWPOUT,
MTHP_STAT_SWPOUT_FALLBACK,
MTHP_STAT_SHMEM_ALLOC,
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index ae4fe8615bb6..ec8c0ccc8f95 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -153,11 +153,11 @@ bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
vm_flags_t vm_flags);
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long freed);
-bool isolate_hugetlb(struct folio *folio, struct list_head *list);
+bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list);
int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison);
int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
bool *migratable_cleared);
-void folio_putback_active_hugetlb(struct folio *folio);
+void folio_putback_hugetlb(struct folio *folio);
void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table;
@@ -414,7 +414,7 @@ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
return NULL;
}
-static inline bool isolate_hugetlb(struct folio *folio, struct list_head *list)
+static inline bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list)
{
return false;
}
@@ -430,7 +430,7 @@ static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
return 0;
}
-static inline void folio_putback_active_hugetlb(struct folio *folio)
+static inline void folio_putback_hugetlb(struct folio *folio)
{
}
@@ -681,8 +681,9 @@ struct huge_bootmem_page {
};
int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
+int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn);
struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
- unsigned long addr, int avoid_reserve);
+ unsigned long addr, bool cow_from_owner);
struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask,
bool allow_alloc_fallback);
@@ -1059,9 +1060,15 @@ static inline int isolate_or_dissolve_huge_page(struct page *page,
return -ENOMEM;
}
+static inline int replace_free_hugepage_folios(unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+ return 0;
+}
+
static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
unsigned long addr,
- int avoid_reserve)
+ bool cow_from_owner)
{
return NULL;
}
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 02a226bcf0ed..4179add2864b 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -24,7 +24,7 @@
#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
#include <linux/reciprocal_div.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#define MAX_PAGE_BUFFER_COUNT 32
#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
@@ -768,15 +768,6 @@ struct vmbus_close_msg {
struct vmbus_channel_close_channel msg;
};
-/* Define connection identifier type. */
-union hv_connection_id {
- u32 asu32;
- struct {
- u32 id:24;
- u32 reserved:8;
- } u;
-};
-
enum vmbus_device_type {
HV_IDE = 0,
HV_SCSI,
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index c31fd1dba3bd..2b2af24d2a43 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -244,6 +244,7 @@ enum i2c_driver_flags {
* @id_table: List of I2C devices supported by this driver
* @detect: Callback for device detection
* @address_list: The I2C addresses to probe (for detect)
+ * @clients: List of detected clients we created (for i2c-core use only)
* @flags: A bitmask of flags defined in &enum i2c_driver_flags
*
* The driver.owner field should be set to the module owner of this driver.
@@ -298,6 +299,7 @@ struct i2c_driver {
/* Device detection callback for automatic device creation */
int (*detect)(struct i2c_client *client, struct i2c_board_info *info);
const unsigned short *address_list;
+ struct list_head clients;
u32 flags;
};
@@ -313,6 +315,8 @@ struct i2c_driver {
* @dev: Driver model device node for the slave.
* @init_irq: IRQ that was set at initialization
* @irq: indicates the IRQ generated by this device (if any)
+ * @detected: member of an i2c_driver.clients list or i2c-core's
+ * userspace_devices list
* @slave_cb: Callback when I2C slave mode of an adapter is used. The adapter
* calls it to pass on slave events to the slave driver.
* @devres_group_id: id of the devres group that will be created for resources
@@ -332,8 +336,6 @@ struct i2c_client {
#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */
#define I2C_CLIENT_HOST_NOTIFY 0x40 /* We want to use I2C host notify */
#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
-#define I2C_CLIENT_AUTO 0x100 /* client was auto-detected */
-#define I2C_CLIENT_USER 0x200 /* client was userspace-created */
#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
/* Must match I2C_M_STOP|IGNORE_NAK */
@@ -345,6 +347,7 @@ struct i2c_client {
struct device dev; /* the device structure */
int init_irq; /* irq set at initialization */
int irq; /* irq issued by device */
+ struct list_head detected;
#if IS_ENABLED(CONFIG_I2C_SLAVE)
i2c_slave_cb_t slave_cb; /* callback for slave mode */
#endif
@@ -751,6 +754,9 @@ struct i2c_adapter {
char name[48];
struct completion dev_released;
+ struct mutex userspace_clients_lock;
+ struct list_head userspace_clients;
+
struct i2c_bus_recovery_info *bus_recovery_info;
const struct i2c_adapter_quirks *quirks;
diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h
index 0a8a44ac2f02..b674f64d0822 100644
--- a/include/linux/i3c/device.h
+++ b/include/linux/i3c/device.h
@@ -283,7 +283,7 @@ static inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv,
* module_i3c_i2c_driver() - Register a module providing an I3C and an I2C
* driver
* @__i3cdrv: the I3C driver to register
- * @__i2cdrv: the I3C driver to register
+ * @__i2cdrv: the I2C driver to register
*
* Provide generic init/exit functions that simply register/unregister an I3C
* and an I2C driver.
diff --git a/include/linux/i8042.h b/include/linux/i8042.h
index 95b07f8b77fe..00037c13abc8 100644
--- a/include/linux/i8042.h
+++ b/include/linux/i8042.h
@@ -54,15 +54,29 @@
struct serio;
+/**
+ * typedef i8042_filter_t - i8042 filter callback
+ * @data: Data received by the i8042 controller
+ * @str: Status register of the i8042 controller
+ * @serio: Serio of the i8042 controller
+ * @context: Context pointer associated with this callback
+ *
+ * This represents a i8042 filter callback which can be used with i8042_install_filter()
+ * and i8042_remove_filter() to filter the i8042 input for platform-specific key codes.
+ *
+ * Context: Interrupt context.
+ * Returns: true if the data should be filtered out, false if otherwise.
+ */
+typedef bool (*i8042_filter_t)(unsigned char data, unsigned char str, struct serio *serio,
+ void *context);
+
#if defined(CONFIG_SERIO_I8042) || defined(CONFIG_SERIO_I8042_MODULE)
void i8042_lock_chip(void);
void i8042_unlock_chip(void);
int i8042_command(unsigned char *param, int command);
-int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
- struct serio *serio));
-int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
- struct serio *serio));
+int i8042_install_filter(i8042_filter_t filter, void *context);
+int i8042_remove_filter(i8042_filter_t filter);
#else
@@ -79,14 +93,12 @@ static inline int i8042_command(unsigned char *param, int command)
return -ENODEV;
}
-static inline int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
- struct serio *serio))
+static inline int i8042_install_filter(i8042_filter_t filter, void *context)
{
return -ENODEV;
}
-static inline int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
- struct serio *serio))
+static inline int i8042_remove_filter(i8042_filter_t filter)
{
return -ENODEV;
}
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index f8c1d2505940..417073c52380 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -29,6 +29,7 @@ struct ad_sd_calib_data {
struct ad_sigma_delta;
struct device;
+struct gpio_desc;
struct iio_dev;
/**
@@ -53,6 +54,7 @@ struct iio_dev;
* @irq_flags: flags for the interrupt used by the triggered buffer
* @num_slots: Number of sequencer slots
* @irq_line: IRQ for reading conversions. If 0, spi->irq will be used
+ * @num_resetclks: Number of SPI clk cycles with MOSI=1 to reset the chip.
*/
struct ad_sigma_delta_info {
int (*set_channel)(struct ad_sigma_delta *, unsigned int channel);
@@ -69,6 +71,7 @@ struct ad_sigma_delta_info {
unsigned long irq_flags;
unsigned int num_slots;
int irq_line;
+ unsigned int num_resetclks;
};
/**
@@ -85,6 +88,7 @@ struct ad_sigma_delta {
/* private: */
struct completion completion;
+ spinlock_t irq_lock; /* protects .irq_dis and irq en/disable state */
bool irq_dis;
bool bus_locked;
@@ -96,7 +100,8 @@ struct ad_sigma_delta {
unsigned int active_slots;
unsigned int current_slot;
unsigned int num_slots;
- int irq_line;
+ struct gpio_desc *rdy_gpiod;
+ int irq_line;
bool status_appended;
/* map slots to channels in order to know what to expect from devices */
unsigned int *slots;
@@ -178,8 +183,7 @@ int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
unsigned int size, unsigned int *val);
-int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
- unsigned int reset_length);
+int ad_sd_reset(struct ad_sigma_delta *sigma_delta);
int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, int *val);
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
index 418b1307d3f2..3b8d618bb3df 100644
--- a/include/linux/iio/buffer.h
+++ b/include/linux/iio/buffer.h
@@ -37,7 +37,7 @@ int iio_pop_from_buffer(struct iio_buffer *buffer, void *data);
static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev,
void *data, int64_t timestamp)
{
- if (indio_dev->scan_timestamp) {
+ if (ACCESS_PRIVATE(indio_dev, scan_timestamp)) {
size_t ts_offset = indio_dev->scan_bytes / sizeof(int64_t) - 1;
((int64_t *)data)[ts_offset] = timestamp;
}
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 333d1d8ccb37..6a4479616479 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -418,7 +418,7 @@ unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan);
* @chan: The channel being queried.
* @attr: The ext_info attribute to read.
* @buf: Where to store the attribute value. Assumed to hold
- * at least PAGE_SIZE bytes.
+ * at least PAGE_SIZE bytes and to be aligned at PAGE_SIZE.
*
* Returns the number of bytes written to buf (perhaps w/o zero termination;
* it need not even be a string), or an error code.
@@ -445,7 +445,7 @@ ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
* iio_read_channel_label() - read label for a given channel
* @chan: The channel being queried.
* @buf: Where to store the attribute value. Assumed to hold
- * at least PAGE_SIZE bytes.
+ * at least PAGE_SIZE bytes and to be aligned at PAGE_SIZE.
*
* Returns the number of bytes written to buf, or an error code.
*/
diff --git a/include/linux/iio/iio-gts-helper.h b/include/linux/iio/iio-gts-helper.h
index 9cb6c80dea71..e5de7a124bad 100644
--- a/include/linux/iio/iio-gts-helper.h
+++ b/include/linux/iio/iio-gts-helper.h
@@ -188,6 +188,9 @@ int iio_gts_total_gain_to_scale(struct iio_gts *gts, int total_gain,
int iio_gts_find_gain_sel_for_scale_using_time(struct iio_gts *gts, int time_sel,
int scale_int, int scale_nano,
int *gain_sel);
+int iio_gts_find_gain_time_sel_for_scale(struct iio_gts *gts, int scale_int,
+ int scale_nano, int *gain_sel,
+ int *time_sel);
int iio_gts_get_scale(struct iio_gts *gts, int gain, int time, int *scale_int,
int *scale_nano);
int iio_gts_find_new_gain_sel_by_old_gain_time(struct iio_gts *gts,
@@ -196,6 +199,9 @@ int iio_gts_find_new_gain_sel_by_old_gain_time(struct iio_gts *gts,
int iio_gts_find_new_gain_by_old_gain_time(struct iio_gts *gts, int old_gain,
int old_time, int new_time,
int *new_gain);
+int iio_gts_find_new_gain_by_gain_time_min(struct iio_gts *gts, int old_gain,
+ int old_time, int new_time,
+ int *new_gain, bool *in_range);
int iio_gts_avail_times(struct iio_gts *gts, const int **vals, int *type,
int *length);
int iio_gts_all_avail_scales(struct iio_gts *gts, const int **vals, int *type,
diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h
index a89e7e43e441..4247497f3f8b 100644
--- a/include/linux/iio/iio-opaque.h
+++ b/include/linux/iio/iio-opaque.h
@@ -28,7 +28,7 @@
* @groupcounter: index of next attribute group
* @legacy_scan_el_group: attribute group for legacy scan elements attribute group
* @legacy_buffer_group: attribute group for legacy buffer attributes group
- * @bounce_buffer: for devices that call iio_push_to_buffers_with_timestamp_unaligned()
+ * @bounce_buffer: for devices that call iio_push_to_buffers_with_ts_unaligned()
* @bounce_buffer_size: size of currently allocate bounce buffer
* @scan_index_timestamp: cache of the index to the timestamp
* @clock_id: timestamping clock posix identifier
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index ae65890d4567..56161e02f002 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -611,7 +611,7 @@ struct iio_dev {
const unsigned long *available_scan_masks;
unsigned int __private masklength;
const unsigned long *active_scan_mask;
- bool scan_timestamp;
+ bool __private scan_timestamp;
struct iio_trigger *trig;
struct iio_poll_func *pollfunc;
struct iio_poll_func *pollfunc_event;
diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
index e6a75356567a..4bb98d9731de 100644
--- a/include/linux/iio/imu/adis.h
+++ b/include/linux/iio/imu/adis.h
@@ -99,7 +99,6 @@ struct adis_data {
* @spi: Reference to SPI device which owns this ADIS IIO device
* @trig: IIO trigger object data
* @data: ADIS chip variant specific data
- * @burst: ADIS burst transfer information
* @burst_extra_len: Burst extra length. Should only be used by devices that can
* dynamically change their burst mode length.
* @state_lock: Lock used by the device to protect state
diff --git a/include/linux/iio/timer/stm32-timer-trigger.h b/include/linux/iio/timer/stm32-timer-trigger.h
index 37572e4dc73a..1ee237b56183 100644
--- a/include/linux/iio/timer/stm32-timer-trigger.h
+++ b/include/linux/iio/timer/stm32-timer-trigger.h
@@ -72,6 +72,12 @@
#define TIM17_OC1 "tim17_oc1"
+#define TIM20_OC1 "tim20_oc1"
+#define TIM20_OC2 "tim20_oc2"
+#define TIM20_OC3 "tim20_oc3"
+#define TIM20_TRGO "tim20_trgo"
+#define TIM20_TRGO2 "tim20_trgo2"
+
#if IS_REACHABLE(CONFIG_IIO_STM32_TIMER_TRIGGER)
bool is_stm32_timer_trigger(struct iio_trigger *trig);
#else
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index ce86b09ae80f..bba2a51c87d2 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -181,11 +181,21 @@ struct io_pgtable_cfg {
};
/**
+ * struct arm_lpae_io_pgtable_walk_data - information from a pgtable walk
+ *
+ * @ptes: The recorded PTE values from the walk
+ */
+struct arm_lpae_io_pgtable_walk_data {
+ u64 ptes[4];
+};
+
+/**
* struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
*
* @map_pages: Map a physically contiguous range of pages of the same size.
* @unmap_pages: Unmap a range of virtually contiguous pages of the same size.
* @iova_to_phys: Translate iova to physical address.
+ * @pgtable_walk: (optional) Perform a page table walk for a given iova.
*
* These functions map directly onto the iommu_ops member functions with
* the same names.
@@ -199,6 +209,7 @@ struct io_pgtable_ops {
struct iommu_iotlb_gather *gather);
phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
unsigned long iova);
+ int (*pgtable_walk)(struct io_pgtable_ops *ops, unsigned long iova, void *wd);
int (*read_and_clear_dirty)(struct io_pgtable_ops *ops,
unsigned long iova, size_t size,
unsigned long flags,
diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
index a3ce553413de..abd0c8bd950b 100644
--- a/include/linux/io_uring/cmd.h
+++ b/include/linux/io_uring/cmd.h
@@ -19,8 +19,8 @@ struct io_uring_cmd {
};
struct io_uring_cmd_data {
- struct io_uring_sqe sqes[2];
void *op_data;
+ struct io_uring_sqe sqes[2];
};
static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 623d8e798a11..3def525a1da3 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -222,7 +222,8 @@ struct io_alloc_cache {
void **entries;
unsigned int nr_cached;
unsigned int max_cached;
- size_t elem_size;
+ unsigned int elem_size;
+ unsigned int init_clear;
};
struct io_ring_ctx {
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 318d27841130..38c65e92ecd0 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -587,9 +587,6 @@ iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size,
* - IOMMU_DOMAIN_DMA: must use a dma domain
* - 0: use the default setting
* @default_domain_ops: the default ops for domains
- * @remove_dev_pasid: Remove any translation configurations of a specific
- * pasid, so that any DMA transactions with this pasid
- * will be blocked by the hardware.
* @viommu_alloc: Allocate an iommufd_viommu on a physical IOMMU instance behind
* the @dev, as the set of virtualization resources shared/passed
* to user space IOMMU instance. And associate it with a nesting
@@ -647,8 +644,6 @@ struct iommu_ops {
struct iommu_page_response *msg);
int (*def_domain_type)(struct device *dev);
- void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid,
- struct iommu_domain *domain);
struct iommufd_viommu *(*viommu_alloc)(
struct device *dev, struct iommu_domain *parent_domain,
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 50f7ea8714bf..561025b4f3d9 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -28,7 +28,7 @@
#include <linux/slab.h>
#include <linux/bit_spinlock.h>
#include <linux/blkdev.h>
-#include <crypto/hash.h>
+#include <linux/crc32c.h>
#endif
#define journal_oom_retry 1
@@ -1242,13 +1242,6 @@ struct journal_s
void *j_private;
/**
- * @j_chksum_driver:
- *
- * Reference to checksum algorithm driver via cryptoapi.
- */
- struct crypto_shash *j_chksum_driver;
-
- /**
* @j_csum_seed:
*
* Precomputed journal UUID checksum for seeding other checksums.
@@ -1750,10 +1743,7 @@ static inline bool jbd2_journal_has_csum_v2or3_feature(journal_t *j)
static inline int jbd2_journal_has_csum_v2or3(journal_t *journal)
{
- WARN_ON_ONCE(jbd2_journal_has_csum_v2or3_feature(journal) &&
- journal->j_chksum_driver == NULL);
-
- return journal->j_chksum_driver != NULL;
+ return jbd2_journal_has_csum_v2or3_feature(journal);
}
static inline int jbd2_journal_get_num_fc_blks(journal_superblock_t *jsb)
@@ -1790,27 +1780,10 @@ static inline unsigned long jbd2_log_space_left(journal_t *journal)
#define BJ_Reserved 4 /* Buffer is reserved for access by journal */
#define BJ_Types 5
-/* JBD uses a CRC32 checksum */
-#define JBD_MAX_CHECKSUM_SIZE 4
-
static inline u32 jbd2_chksum(journal_t *journal, u32 crc,
const void *address, unsigned int length)
{
- DEFINE_RAW_FLEX(struct shash_desc, desc, __ctx,
- DIV_ROUND_UP(JBD_MAX_CHECKSUM_SIZE,
- sizeof(*((struct shash_desc *)0)->__ctx)));
- int err;
-
- BUG_ON(crypto_shash_descsize(journal->j_chksum_driver) >
- JBD_MAX_CHECKSUM_SIZE);
-
- desc->tfm = journal->j_chksum_driver;
- *(u32 *)desc->__ctx = crc;
-
- err = crypto_shash_update(desc, address, length);
- BUG_ON(err);
-
- return *(u32 *)desc->__ctx;
+ return crc32c(crc, address, length);
}
/* Return most recent uncommitted transaction */
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index ed945f42e064..0ea8c9887429 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -537,7 +537,7 @@ static __always_inline unsigned long msecs_to_jiffies(const unsigned int m)
*
* Return: jiffies value
*/
-#define secs_to_jiffies(_secs) ((_secs) * HZ)
+#define secs_to_jiffies(_secs) (unsigned long)((_secs) * HZ)
extern unsigned long __usecs_to_jiffies(const unsigned int u);
#if !(USEC_PER_SEC % HZ)
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index f5a2727ca4a9..fdb79dd1ebd8 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -75,6 +75,7 @@
#include <linux/types.h>
#include <linux/compiler.h>
+#include <linux/cleanup.h>
extern bool static_key_initialized;
@@ -347,6 +348,8 @@ static inline void static_key_disable(struct static_key *key)
#endif /* CONFIG_JUMP_LABEL */
+DEFINE_LOCK_GUARD_0(jump_label_lock, jump_label_lock(), jump_label_unlock())
+
#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
#define jump_label_enabled static_key_enabled
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index c3f075e8f60c..1c6a6c1704d8 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -57,10 +57,10 @@ static inline void *dereference_symbol_descriptor(void *ptr)
preempt_disable();
mod = __module_address((unsigned long)ptr);
- preempt_enable();
if (mod)
ptr = dereference_module_function_descriptor(mod, ptr);
+ preempt_enable();
#endif
return ptr;
}
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 6bbfc8aa42e8..890011071f2b 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -153,7 +153,7 @@ static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache,
void __kasan_poison_new_object(struct kmem_cache *cache, void *object);
/**
- * kasan_unpoison_new_object - Repoison a new slab object.
+ * kasan_poison_new_object - Repoison a new slab object.
* @cache: Cache the object belong to.
* @object: Pointer to the object.
*
@@ -491,7 +491,6 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
void kasan_record_aux_stack(void *ptr);
-void kasan_record_aux_stack_noalloc(void *ptr);
#else /* CONFIG_KASAN_GENERIC */
@@ -509,7 +508,6 @@ static inline void kasan_cache_create(struct kmem_cache *cache,
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
static inline void kasan_record_aux_stack(void *ptr) {}
-static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
#endif /* CONFIG_KASAN_GENERIC */
diff --git a/include/linux/kcore.h b/include/linux/kcore.h
index 86c0f1d18998..9a2fa013c91d 100644
--- a/include/linux/kcore.h
+++ b/include/linux/kcore.h
@@ -20,19 +20,6 @@ struct kcore_list {
int type;
};
-struct vmcore {
- struct list_head list;
- unsigned long long paddr;
- unsigned long long size;
- loff_t offset;
-};
-
-struct vmcoredd_node {
- struct list_head list; /* List of dumps */
- void *buf; /* Buffer containing device's dump */
- unsigned int size; /* Size of the buffer */
-};
-
#ifdef CONFIG_PROC_KCORE
void __init kclist_add(struct kcore_list *, void *, size_t, int type);
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index f6c2ddb16b95..905a2e2f45f6 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -140,9 +140,6 @@ extern const char *kdb_diemsg;
extern unsigned int kdb_flags; /* Global flags, see kdb_state for per cpu state */
-extern void kdb_save_flags(void);
-extern void kdb_restore_flags(void);
-
#define KDB_FLAG(flag) (kdb_flags & KDB_FLAG_##flag)
#define KDB_FLAG_SET(flag) ((void)(kdb_flags |= KDB_FLAG_##flag))
#define KDB_FLAG_CLEAR(flag) ((void)(kdb_flags &= ~KDB_FLAG_##flag))
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index 76e891ee9e37..51ef131e66b7 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -309,11 +309,9 @@ extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);
#ifdef CONFIG_SERIAL_KGDB_NMI
extern int kgdb_register_nmi_console(void);
extern int kgdb_unregister_nmi_console(void);
-extern bool kgdb_nmi_poll_knock(void);
#else
static inline int kgdb_register_nmi_console(void) { return 0; }
static inline int kgdb_unregister_nmi_console(void) { return 0; }
-static inline bool kgdb_nmi_poll_knock(void) { return true; }
#endif
extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
index be707748e7ce..150fe2ae1b6b 100644
--- a/include/linux/kobject_ns.h
+++ b/include/linux/kobject_ns.h
@@ -52,8 +52,6 @@ const struct kobj_ns_type_operations *kobj_ns_ops(const struct kobject *kobj);
bool kobj_ns_current_may_mount(enum kobj_ns_type type);
void *kobj_ns_grab_current(enum kobj_ns_type type);
-const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk);
-const void *kobj_ns_initial(enum kobj_ns_type type);
void kobj_ns_drop(enum kobj_ns_type type, void *ns);
#endif /* _LINUX_KOBJECT_NS_H */
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 6a53ac4885bb..d73095b5cd96 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -93,6 +93,7 @@ void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
void collect_procs_ksm(const struct folio *folio, const struct page *page,
struct list_head *to_kill, int force_early);
long ksm_process_profit(struct mm_struct *);
+bool ksm_process_mergeable(struct mm_struct *mm);
#else /* !CONFIG_KSM */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 401439bb21e3..f34f4cfaa513 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -255,11 +255,17 @@ union kvm_mmu_notifier_arg {
unsigned long attributes;
};
+enum kvm_gfn_range_filter {
+ KVM_FILTER_SHARED = BIT(0),
+ KVM_FILTER_PRIVATE = BIT(1),
+};
+
struct kvm_gfn_range {
struct kvm_memory_slot *slot;
gfn_t start;
gfn_t end;
union kvm_mmu_notifier_arg arg;
+ enum kvm_gfn_range_filter attr_filter;
bool may_block;
};
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
@@ -596,7 +602,12 @@ struct kvm_memory_slot {
#ifdef CONFIG_KVM_PRIVATE_MEM
struct {
- struct file __rcu *file;
+ /*
+ * Writes protected by kvm->slots_lock. Acquiring a
+ * reference via kvm_gmem_get_file() is protected by
+ * either kvm->slots_lock or kvm->srcu.
+ */
+ struct file *file;
pgoff_t pgoff;
} gmem;
#endif
@@ -963,6 +974,15 @@ static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
{
int num_vcpus = atomic_read(&kvm->online_vcpus);
+
+ /*
+ * Explicitly verify the target vCPU is online, as the anti-speculation
+ * logic only limits the CPU's ability to speculate, e.g. given a "bad"
+ * index, clamping the index to 0 would return vCPU0, not NULL.
+ */
+ if (i >= num_vcpus)
+ return NULL;
+
i = array_index_nospec(i, num_vcpus);
/* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */
@@ -970,9 +990,10 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
return xa_load(&kvm->vcpu_array, i);
}
-#define kvm_for_each_vcpu(idx, vcpup, kvm) \
- xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \
- (atomic_read(&kvm->online_vcpus) - 1))
+#define kvm_for_each_vcpu(idx, vcpup, kvm) \
+ if (atomic_read(&kvm->online_vcpus)) \
+ xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \
+ (atomic_read(&kvm->online_vcpus) - 1))
static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
{
@@ -1183,7 +1204,7 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
* -- just change its flags
*
* Since flags can be changed by some of these operations, the following
- * differentiation is the best we can do for __kvm_set_memory_region():
+ * differentiation is the best we can do for kvm_set_memory_region():
*/
enum kvm_mr_change {
KVM_MR_CREATE,
@@ -1192,10 +1213,8 @@ enum kvm_mr_change {
KVM_MR_FLAGS_ONLY,
};
-int kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region2 *mem);
-int __kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region2 *mem);
+int kvm_set_internal_memslot(struct kvm *kvm,
+ const struct kvm_userspace_memory_region2 *mem);
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -1596,7 +1615,6 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu);
-int kvm_arch_post_init_vm(struct kvm *kvm);
void kvm_arch_pre_destroy_vm(struct kvm *kvm);
void kvm_arch_create_vm_debugfs(struct kvm *kvm);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index be5183d75736..c1c57f814b98 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1199,10 +1199,9 @@ extern int ata_std_bios_param(struct scsi_device *sdev,
struct block_device *bdev,
sector_t capacity, int geom[]);
extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev);
-extern int ata_scsi_slave_alloc(struct scsi_device *sdev);
-int ata_scsi_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim);
-extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
+extern int ata_scsi_sdev_init(struct scsi_device *sdev);
+int ata_scsi_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim);
+extern void ata_scsi_sdev_destroy(struct scsi_device *sdev);
extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
int queue_depth);
extern int ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
@@ -1301,8 +1300,8 @@ extern struct ata_port *ata_port_alloc(struct ata_host *host);
extern void ata_port_free(struct ata_port *ap);
extern int ata_tport_add(struct device *parent, struct ata_port *ap);
extern void ata_tport_delete(struct ata_port *ap);
-int ata_sas_device_configure(struct scsi_device *sdev, struct queue_limits *lim,
- struct ata_port *ap);
+int ata_sas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim,
+ struct ata_port *ap);
extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
extern void ata_tf_to_fis(const struct ata_taskfile *tf,
u8 pmp, int is_cmd, u8 *fis);
@@ -1458,8 +1457,8 @@ extern const struct attribute_group *ata_common_sdev_groups[];
.this_id = ATA_SHT_THIS_ID, \
.emulated = ATA_SHT_EMULATED, \
.proc_name = drv_name, \
- .slave_alloc = ata_scsi_slave_alloc, \
- .slave_destroy = ata_scsi_slave_destroy, \
+ .sdev_init = ata_scsi_sdev_init, \
+ .sdev_destroy = ata_scsi_sdev_destroy, \
.bios_param = ata_std_bios_param, \
.unlock_native_capacity = ata_scsi_unlock_native_capacity,\
.max_sectors = ATA_MAX_SECTORS_LBA48
@@ -1468,13 +1467,13 @@ extern const struct attribute_group *ata_common_sdev_groups[];
__ATA_BASE_SHT(drv_name), \
.can_queue = ATA_DEF_QUEUE, \
.tag_alloc_policy_rr = true, \
- .device_configure = ata_scsi_device_configure
+ .sdev_configure = ata_scsi_sdev_configure
#define ATA_SUBBASE_SHT_QD(drv_name, drv_qd) \
__ATA_BASE_SHT(drv_name), \
.can_queue = drv_qd, \
.tag_alloc_policy_rr = true, \
- .device_configure = ata_scsi_device_configure
+ .sdev_configure = ata_scsi_sdev_configure
#define ATA_BASE_SHT(drv_name) \
ATA_SUBBASE_SHT(drv_name), \
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 05c166811f6b..fe739d35a864 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -91,13 +91,24 @@ void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *paren
* @memcg: the cgroup of the sublist to add the item to.
*
* If the element is already part of a list, this function returns doing
- * nothing. Therefore the caller does not need to keep state about whether or
- * not the element already belongs in the list and is allowed to lazy update
- * it. Note however that this is valid for *a* list, not *this* list. If
- * the caller organize itself in a way that elements can be in more than
- * one type of list, it is up to the caller to fully remove the item from
- * the previous list (with list_lru_del() for instance) before moving it
- * to @lru.
+ * nothing. This means that it is not necessary to keep state about whether or
+ * not the element already belongs in the list. That said, this logic only
+ * works if the item is in *this* list. If the item might be in some other
+ * list, then you cannot rely on this check and you must remove it from the
+ * other list before trying to insert it.
+ *
+ * The lru list consists of many sublists internally; the @nid and @memcg
+ * parameters are used to determine which sublist to insert the item into.
+ * It's important to use the right value of @nid and @memcg when deleting the
+ * item, since it might otherwise get deleted from the wrong sublist.
+ *
+ * This also applies when attempting to insert the item multiple times - if
+ * the item is currently in one sublist and you call list_lru_add() again, you
+ * must pass the right @nid and @memcg parameters so that the same sublist is
+ * used.
+ *
+ * You must ensure that the memcg is not freed during this call (e.g., with
+ * rcu or by taking a css refcnt).
*
* Return: true if the list was updated, false otherwise
*/
@@ -113,7 +124,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
* memcg of the sublist is determined by @item list_head. This assumption is
* valid for slab objects LRU such as dentries, inodes, etc.
*
- * Return value: true if the list was updated, false otherwise
+ * Return: true if the list was updated, false otherwise
*/
bool list_lru_add_obj(struct list_lru *lru, struct list_head *item);
@@ -125,8 +136,19 @@ bool list_lru_add_obj(struct list_lru *lru, struct list_head *item);
* @memcg: the cgroup of the sublist to delete the item from.
*
* This function works analogously as list_lru_add() in terms of list
- * manipulation. The comments about an element already pertaining to
- * a list are also valid for list_lru_del().
+ * manipulation.
+ *
+ * The comments in list_lru_add() about an element already being in a list are
+ * also valid for list_lru_del(), that is, you can delete an item that has
+ * already been removed or never been added. However, if the item is in a
+ * list, it must be in *this* list, and you must pass the right value of @nid
+ * and @memcg so that the right sublist is used.
+ *
+ * You must ensure that the memcg is not freed during this call (e.g., with
+ * rcu or by taking a css refcnt). When a memcg is deleted, list_lru entries
+ * are automatically moved to the parent memcg. This is done in a race-free
+ * way, so during deletion of an memcg both the old and new memcg will resolve
+ * to the same sublist internally.
*
* Return: true if the list was updated, false otherwise
*/
@@ -142,7 +164,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
* memcg of the sublist is determined by @item list_head. This assumption is
* valid for slab objects LRU such as dentries, inodes, etc.
*
- * Return value: true if the list was updated, false otherwise.
+ * Return: true if the list was updated, false otherwise.
*/
bool list_lru_del_obj(struct list_lru *lru, struct list_head *item);
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index c39f119659ba..676721ee878d 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -37,12 +37,13 @@ struct lockref {
/**
* lockref_init - Initialize a lockref
* @lockref: pointer to lockref structure
- * @count: initial count
+ *
+ * Initializes @lockref->count to 1.
*/
-static inline void lockref_init(struct lockref *lockref, unsigned int count)
+static inline void lockref_init(struct lockref *lockref)
{
spin_lock_init(&lockref->lock);
- lockref->count = count;
+ lockref->count = 1;
}
void lockref_get(struct lockref *lockref);
diff --git a/include/linux/lz4.h b/include/linux/lz4.h
index b16e15b9587a..ad6042a718b5 100644
--- a/include/linux/lz4.h
+++ b/include/linux/lz4.h
@@ -645,4 +645,10 @@ int LZ4_decompress_safe_usingDict(const char *source, char *dest,
int LZ4_decompress_fast_usingDict(const char *source, char *dest,
int originalSize, const char *dictStart, int dictSize);
+#define LZ4_DECOMPRESS_INPLACE_MARGIN(compressedSize) (((compressedSize) >> 8) + 32)
+
+#ifndef LZ4_DISTANCE_MAX /* history window size; can be user-defined at compile time */
+#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
+#endif
+
#endif
diff --git a/include/linux/mailbox/exynos-message.h b/include/linux/mailbox/exynos-message.h
new file mode 100644
index 000000000000..5a9ed5ce2046
--- /dev/null
+++ b/include/linux/mailbox/exynos-message.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Exynos mailbox message.
+ *
+ * Copyright 2024 Linaro Ltd.
+ */
+
+#ifndef _LINUX_EXYNOS_MESSAGE_H_
+#define _LINUX_EXYNOS_MESSAGE_H_
+
+#define EXYNOS_MBOX_CHAN_TYPE_DOORBELL 0
+#define EXYNOS_MBOX_CHAN_TYPE_DATA 1
+
+struct exynos_mbox_msg {
+ unsigned int chan_id;
+ unsigned int chan_type;
+};
+
+#endif /* _LINUX_EXYNOS_MESSAGE_H_ */
diff --git a/include/linux/mailbox/mchp-ipc.h b/include/linux/mailbox/mchp-ipc.h
new file mode 100644
index 000000000000..f084ac9e291b
--- /dev/null
+++ b/include/linux/mailbox/mchp-ipc.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *Copyright (c) 2024 Microchip Technology Inc. All rights reserved.
+ */
+
+#ifndef _LINUX_MCHP_IPC_H_
+#define _LINUX_MCHP_IPC_H_
+
+#include <linux/mailbox_controller.h>
+#include <linux/types.h>
+
+struct mchp_ipc_msg {
+ u32 *buf;
+ u16 size;
+};
+
+struct mchp_ipc_sbi_chan {
+ void *buf_base_tx;
+ void *buf_base_rx;
+ void *msg_buf_tx;
+ void *msg_buf_rx;
+ phys_addr_t buf_base_tx_addr;
+ phys_addr_t buf_base_rx_addr;
+ phys_addr_t msg_buf_tx_addr;
+ phys_addr_t msg_buf_rx_addr;
+ int chan_aggregated_irq;
+ int mp_irq;
+ int mc_irq;
+ u32 id;
+ u32 max_msg_size;
+};
+
+#endif /* _LINUX_MCHP_IPC_H_ */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 673d5cae7c81..e79eb6ac516f 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -378,6 +378,10 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
/* Flags for memblock allocation APIs */
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
+/*
+ * MEMBLOCK_ALLOC_NOLEAKTRACE avoids kmemleak tracing. It implies
+ * MEMBLOCK_ALLOC_ACCESSIBLE
+ */
#define MEMBLOCK_ALLOC_NOLEAKTRACE 1
/* We are using top down, so it is safe to use 0 here */
@@ -417,6 +421,12 @@ static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
}
+void *__memblock_alloc_or_panic(phys_addr_t size, phys_addr_t align,
+ const char *func);
+
+#define memblock_alloc_or_panic(size, align) \
+ __memblock_alloc_or_panic(size, align, __func__)
+
static inline void *memblock_alloc_raw(phys_addr_t size,
phys_addr_t align)
{
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 5502aa8e138e..6e74b8254d9b 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -620,8 +620,6 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
page_counter_read(&memcg->memory);
}
-void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg);
-
int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
/**
@@ -646,8 +644,7 @@ static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
return __mem_cgroup_charge(folio, mm, gfp);
}
-int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
- long nr_pages);
+int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp);
int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
gfp_t gfp, swp_entry_t entry);
@@ -677,7 +674,6 @@ static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios)
__mem_cgroup_uncharge_folios(folios);
}
-void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages);
void mem_cgroup_replace_folio(struct folio *old, struct folio *new);
void mem_cgroup_migrate(struct folio *old, struct folio *new);
@@ -1046,6 +1042,23 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
void split_page_memcg(struct page *head, int old_order, int new_order);
+static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
+{
+ struct mem_cgroup *memcg;
+ u64 id;
+
+ if (mem_cgroup_disabled())
+ return 0;
+
+ rcu_read_lock();
+ memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
+ if (!memcg)
+ memcg = root_mem_cgroup;
+ id = cgroup_id(memcg->css.cgroup);
+ rcu_read_unlock();
+ return id;
+}
+
#else /* CONFIG_MEMCG */
#define MEM_CGROUP_ID_SHIFT 0
@@ -1135,21 +1148,15 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
return false;
}
-static inline void mem_cgroup_commit_charge(struct folio *folio,
- struct mem_cgroup *memcg)
-{
-}
-
static inline int mem_cgroup_charge(struct folio *folio,
struct mm_struct *mm, gfp_t gfp)
{
return 0;
}
-static inline int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg,
- gfp_t gfp, long nr_pages)
+static inline int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp)
{
- return 0;
+ return 0;
}
static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
@@ -1170,11 +1177,6 @@ static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios)
{
}
-static inline void mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
- unsigned int nr_pages)
-{
-}
-
static inline void mem_cgroup_replace_folio(struct folio *old,
struct folio *new)
{
@@ -1466,6 +1468,11 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
static inline void split_page_memcg(struct page *head, int old_order, int new_order)
{
}
+
+static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
+{
+ return 0;
+}
#endif /* CONFIG_MEMCG */
/*
diff --git a/include/linux/memfd.h b/include/linux/memfd.h
index d437e3070850..246daadbfde8 100644
--- a/include/linux/memfd.h
+++ b/include/linux/memfd.h
@@ -7,7 +7,14 @@
#ifdef CONFIG_MEMFD_CREATE
extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg);
struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx);
-unsigned int *memfd_file_seals_ptr(struct file *file);
+/*
+ * Check for any existing seals on mmap, return an error if access is denied due
+ * to sealing, or 0 otherwise.
+ *
+ * We also update VMA flags if appropriate by manipulating the VMA flags pointed
+ * to by vm_flags_ptr.
+ */
+int memfd_check_seals_mmap(struct file *file, unsigned long *vm_flags_ptr);
#else
static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a)
{
@@ -17,19 +24,11 @@ static inline struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
{
return ERR_PTR(-EINVAL);
}
-
-static inline unsigned int *memfd_file_seals_ptr(struct file *file)
+static inline int memfd_check_seals_mmap(struct file *file,
+ unsigned long *vm_flags_ptr)
{
- return NULL;
+ return 0;
}
#endif
-/* Retrieve memfd seals associated with the file, if any. */
-static inline unsigned int memfd_file_seals(struct file *file)
-{
- unsigned int *sealsp = memfd_file_seals_ptr(file);
-
- return sealsp ? *sealsp : 0;
-}
-
#endif /* __LINUX_MEMFD_H */
diff --git a/include/linux/memory/ti-aemif.h b/include/linux/memory/ti-aemif.h
new file mode 100644
index 000000000000..da94a9d985e7
--- /dev/null
+++ b/include/linux/memory/ti-aemif.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __MEMORY_TI_AEMIF_H
+#define __MEMORY_TI_AEMIF_H
+
+/**
+ * struct aemif_cs_timings: structure to hold CS timing configuration
+ * values are expressed in number of clock cycles - 1
+ * @ta: minimum turn around time
+ * @rhold: read hold width
+ * @rstrobe: read strobe width
+ * @rsetup: read setup width
+ * @whold: write hold width
+ * @wstrobe: write strobe width
+ * @wsetup: write setup width
+ */
+struct aemif_cs_timings {
+ u32 ta;
+ u32 rhold;
+ u32 rstrobe;
+ u32 rsetup;
+ u32 whold;
+ u32 wstrobe;
+ u32 wsetup;
+};
+
+struct aemif_device;
+
+int aemif_set_cs_timings(struct aemif_device *aemif, u8 cs, struct aemif_cs_timings *timings);
+int aemif_check_cs_timings(struct aemif_cs_timings *timings);
+
+#endif // __MEMORY_TI_AEMIF_H
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index b27ddce5d324..eaac5ae8c05c 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -144,8 +144,6 @@ extern u64 max_mem_size;
extern int mhp_online_type_from_str(const char *str);
-/* Default online_type (MMOP_*) when new memory blocks are added. */
-extern int mhp_default_online_type;
/* If movable_node boot option specified */
extern bool movable_node_enabled;
static inline bool movable_node_is_enabled(void)
@@ -303,6 +301,9 @@ static inline void __remove_memory(u64 start, u64 size) {}
#endif /* CONFIG_MEMORY_HOTREMOVE */
#ifdef CONFIG_MEMORY_HOTPLUG
+/* Default online_type (MMOP_*) when new memory blocks are added. */
+extern int mhp_get_default_online_type(void);
+extern void mhp_set_default_online_type(int online_type);
extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat);
extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 002e49b2ebd9..29919faea2f1 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -144,16 +144,14 @@ const struct movable_operations *page_movable_ops(struct page *page)
#ifdef CONFIG_NUMA_BALANCING
int migrate_misplaced_folio_prepare(struct folio *folio,
struct vm_area_struct *vma, int node);
-int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
- int node);
+int migrate_misplaced_folio(struct folio *folio, int node);
#else
static inline int migrate_misplaced_folio_prepare(struct folio *folio,
struct vm_area_struct *vma, int node)
{
return -EAGAIN; /* can't migrate now */
}
-static inline int migrate_misplaced_folio(struct folio *folio,
- struct vm_area_struct *vma, int node)
+static inline int migrate_misplaced_folio(struct folio *folio, int node)
{
return -EAGAIN; /* can't migrate now */
}
diff --git a/include/linux/min_heap.h b/include/linux/min_heap.h
index 6325f6ffb895..1160bed6579e 100644
--- a/include/linux/min_heap.h
+++ b/include/linux/min_heap.h
@@ -6,6 +6,17 @@
#include <linux/string.h>
#include <linux/types.h>
+/*
+ * The Min Heap API provides utilities for managing min-heaps, a binary tree
+ * structure where each node's value is less than or equal to its children's
+ * values, ensuring the smallest element is at the root.
+ *
+ * Users should avoid directly calling functions prefixed with __min_heap_*().
+ * Instead, use the provided macro wrappers.
+ *
+ * For further details and examples, refer to Documentation/core-api/min_heap.rst.
+ */
+
/**
* Data structure to hold a min-heap.
* @nr: Number of elements currently in the heap.
@@ -218,7 +229,7 @@ void __min_heap_init_inline(min_heap_char *heap, void *data, int size)
}
#define min_heap_init_inline(_heap, _data, _size) \
- __min_heap_init_inline((min_heap_char *)_heap, _data, _size)
+ __min_heap_init_inline(container_of(&(_heap)->nr, min_heap_char, nr), _data, _size)
/* Get the minimum element from the heap. */
static __always_inline
@@ -228,7 +239,8 @@ void *__min_heap_peek_inline(struct min_heap_char *heap)
}
#define min_heap_peek_inline(_heap) \
- (__minheap_cast(_heap) __min_heap_peek_inline((min_heap_char *)_heap))
+ (__minheap_cast(_heap) \
+ __min_heap_peek_inline(container_of(&(_heap)->nr, min_heap_char, nr)))
/* Check if the heap is full. */
static __always_inline
@@ -238,7 +250,7 @@ bool __min_heap_full_inline(min_heap_char *heap)
}
#define min_heap_full_inline(_heap) \
- __min_heap_full_inline((min_heap_char *)_heap)
+ __min_heap_full_inline(container_of(&(_heap)->nr, min_heap_char, nr))
/* Sift the element at pos down the heap. */
static __always_inline
@@ -277,8 +289,8 @@ void __min_heap_sift_down_inline(min_heap_char *heap, int pos, size_t elem_size,
}
#define min_heap_sift_down_inline(_heap, _pos, _func, _args) \
- __min_heap_sift_down_inline((min_heap_char *)_heap, _pos, __minheap_obj_size(_heap), \
- _func, _args)
+ __min_heap_sift_down_inline(container_of(&(_heap)->nr, min_heap_char, nr), _pos, \
+ __minheap_obj_size(_heap), _func, _args)
/* Sift up ith element from the heap, O(log2(nr)). */
static __always_inline
@@ -304,8 +316,8 @@ void __min_heap_sift_up_inline(min_heap_char *heap, size_t elem_size, size_t idx
}
#define min_heap_sift_up_inline(_heap, _idx, _func, _args) \
- __min_heap_sift_up_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, \
- _func, _args)
+ __min_heap_sift_up_inline(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _idx, _func, _args)
/* Floyd's approach to heapification that is O(nr). */
static __always_inline
@@ -319,7 +331,8 @@ void __min_heapify_all_inline(min_heap_char *heap, size_t elem_size,
}
#define min_heapify_all_inline(_heap, _func, _args) \
- __min_heapify_all_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args)
+ __min_heapify_all_inline(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _func, _args)
/* Remove minimum element from the heap, O(log2(nr)). */
static __always_inline
@@ -340,7 +353,8 @@ bool __min_heap_pop_inline(min_heap_char *heap, size_t elem_size,
}
#define min_heap_pop_inline(_heap, _func, _args) \
- __min_heap_pop_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args)
+ __min_heap_pop_inline(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _func, _args)
/*
* Remove the minimum element and then push the given element. The
@@ -356,8 +370,8 @@ void __min_heap_pop_push_inline(min_heap_char *heap, const void *element, size_t
}
#define min_heap_pop_push_inline(_heap, _element, _func, _args) \
- __min_heap_pop_push_inline((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), \
- _func, _args)
+ __min_heap_pop_push_inline(container_of(&(_heap)->nr, min_heap_char, nr), _element, \
+ __minheap_obj_size(_heap), _func, _args)
/* Push an element on to the heap, O(log2(nr)). */
static __always_inline
@@ -382,8 +396,8 @@ bool __min_heap_push_inline(min_heap_char *heap, const void *element, size_t ele
}
#define min_heap_push_inline(_heap, _element, _func, _args) \
- __min_heap_push_inline((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), \
- _func, _args)
+ __min_heap_push_inline(container_of(&(_heap)->nr, min_heap_char, nr), _element, \
+ __minheap_obj_size(_heap), _func, _args)
/* Remove ith element from the heap, O(log2(nr)). */
static __always_inline
@@ -411,8 +425,8 @@ bool __min_heap_del_inline(min_heap_char *heap, size_t elem_size, size_t idx,
}
#define min_heap_del_inline(_heap, _idx, _func, _args) \
- __min_heap_del_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, \
- _func, _args)
+ __min_heap_del_inline(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _idx, _func, _args)
void __min_heap_init(min_heap_char *heap, void *data, int size);
void *__min_heap_peek(struct min_heap_char *heap);
@@ -433,25 +447,31 @@ bool __min_heap_del(min_heap_char *heap, size_t elem_size, size_t idx,
const struct min_heap_callbacks *func, void *args);
#define min_heap_init(_heap, _data, _size) \
- __min_heap_init((min_heap_char *)_heap, _data, _size)
+ __min_heap_init(container_of(&(_heap)->nr, min_heap_char, nr), _data, _size)
#define min_heap_peek(_heap) \
- (__minheap_cast(_heap) __min_heap_peek((min_heap_char *)_heap))
+ (__minheap_cast(_heap) __min_heap_peek(container_of(&(_heap)->nr, min_heap_char, nr)))
#define min_heap_full(_heap) \
- __min_heap_full((min_heap_char *)_heap)
+ __min_heap_full(container_of(&(_heap)->nr, min_heap_char, nr))
#define min_heap_sift_down(_heap, _pos, _func, _args) \
- __min_heap_sift_down((min_heap_char *)_heap, _pos, __minheap_obj_size(_heap), _func, _args)
+ __min_heap_sift_down(container_of(&(_heap)->nr, min_heap_char, nr), _pos, \
+ __minheap_obj_size(_heap), _func, _args)
#define min_heap_sift_up(_heap, _idx, _func, _args) \
- __min_heap_sift_up((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, _func, _args)
+ __min_heap_sift_up(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _idx, _func, _args)
#define min_heapify_all(_heap, _func, _args) \
- __min_heapify_all((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args)
+ __min_heapify_all(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _func, _args)
#define min_heap_pop(_heap, _func, _args) \
- __min_heap_pop((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args)
+ __min_heap_pop(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _func, _args)
#define min_heap_pop_push(_heap, _element, _func, _args) \
- __min_heap_pop_push((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), \
- _func, _args)
+ __min_heap_pop_push(container_of(&(_heap)->nr, min_heap_char, nr), _element, \
+ __minheap_obj_size(_heap), _func, _args)
#define min_heap_push(_heap, _element, _func, _args) \
- __min_heap_push((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), _func, _args)
+ __min_heap_push(container_of(&(_heap)->nr, min_heap_char, nr), _element, \
+ __minheap_obj_size(_heap), _func, _args)
#define min_heap_del(_heap, _idx, _func, _args) \
- __min_heap_del((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, _func, _args)
+ __min_heap_del(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _idx, _func, _args)
#endif /* _LINUX_MIN_HEAP_H */
diff --git a/include/linux/minmax.h b/include/linux/minmax.h
index 98008dd92153..eaaf5c008e4d 100644
--- a/include/linux/minmax.h
+++ b/include/linux/minmax.h
@@ -8,13 +8,10 @@
#include <linux/types.h>
/*
- * min()/max()/clamp() macros must accomplish three things:
+ * min()/max()/clamp() macros must accomplish several things:
*
* - Avoid multiple evaluations of the arguments (so side-effects like
* "x++" happen only once) when non-constant.
- * - Retain result as a constant expressions when called with only
- * constant expressions (to avoid tripping VLA warnings in stack
- * allocation usage).
* - Perform signed v unsigned type-checking (to generate compile
* errors instead of nasty runtime surprises).
* - Unsigned char/short are always promoted to signed int and can be
@@ -31,58 +28,54 @@
* bit #0 set if ok for unsigned comparisons
* bit #1 set if ok for signed comparisons
*
- * In particular, statically non-negative signed integer
- * expressions are ok for both.
+ * In particular, statically non-negative signed integer expressions
+ * are ok for both.
*
- * NOTE! Unsigned types smaller than 'int' are implicitly
- * converted to 'int' in expressions, and are accepted for
- * signed conversions for now. This is debatable.
+ * NOTE! Unsigned types smaller than 'int' are implicitly converted to 'int'
+ * in expressions, and are accepted for signed conversions for now.
+ * This is debatable.
*
- * Note that 'x' is the original expression, and 'ux' is
- * the unique variable that contains the value.
+ * Note that 'x' is the original expression, and 'ux' is the unique variable
+ * that contains the value.
*
- * We use 'ux' for pure type checking, and 'x' for when
- * we need to look at the value (but without evaluating
- * it for side effects! Careful to only ever evaluate it
- * with sizeof() or __builtin_constant_p() etc).
+ * We use 'ux' for pure type checking, and 'x' for when we need to look at the
+ * value (but without evaluating it for side effects!
+ * Careful to only ever evaluate it with sizeof() or __builtin_constant_p() etc).
*
- * Pointers end up being checked by the normal C type
- * rules at the actual comparison, and these expressions
- * only need to be careful to not cause warnings for
- * pointer use.
+ * Pointers end up being checked by the normal C type rules at the actual
+ * comparison, and these expressions only need to be careful to not cause
+ * warnings for pointer use.
*/
-#define __signed_type_use(x,ux) (2+__is_nonneg(x,ux))
-#define __unsigned_type_use(x,ux) (1+2*(sizeof(ux)<4))
-#define __sign_use(x,ux) (is_signed_type(typeof(ux))? \
- __signed_type_use(x,ux):__unsigned_type_use(x,ux))
+#define __sign_use(ux) (is_signed_type(typeof(ux)) ? \
+ (2 + __is_nonneg(ux)) : (1 + 2 * (sizeof(ux) < 4)))
/*
- * To avoid warnings about casting pointers to integers
- * of different sizes, we need that special sign type.
+ * Check whether a signed value is always non-negative.
*
- * On 64-bit we can just always use 'long', since any
- * integer or pointer type can just be cast to that.
+ * A cast is needed to avoid any warnings from values that aren't signed
+ * integer types (in which case the result doesn't matter).
*
- * This does not work for 128-bit signed integers since
- * the cast would truncate them, but we do not use s128
- * types in the kernel (we do use 'u128', but they will
- * be handled by the !is_signed_type() case).
+ * On 64-bit any integer or pointer type can safely be cast to 'long long'.
+ * But on 32-bit we need to avoid warnings about casting pointers to integers
+ * of different sizes without truncating 64-bit values so 'long' or 'long long'
+ * must be used depending on the size of the value.
*
- * NOTE! The cast is there only to avoid any warnings
- * from when values that aren't signed integer types.
+ * This does not work for 128-bit signed integers since the cast would truncate
+ * them, but we do not use s128 types in the kernel (we do use 'u128',
+ * but they are handled by the !is_signed_type() case).
*/
-#ifdef CONFIG_64BIT
- #define __signed_type(ux) long
+#if __SIZEOF_POINTER__ == __SIZEOF_LONG_LONG__
+#define __is_nonneg(ux) statically_true((long long)(ux) >= 0)
#else
- #define __signed_type(ux) typeof(__builtin_choose_expr(sizeof(ux)>4,1LL,1L))
+#define __is_nonneg(ux) statically_true( \
+ (typeof(__builtin_choose_expr(sizeof(ux) > 4, 1LL, 1L)))(ux) >= 0)
#endif
-#define __is_nonneg(x,ux) statically_true((__signed_type(ux))(x)>=0)
-#define __types_ok(x,y,ux,uy) \
- (__sign_use(x,ux) & __sign_use(y,uy))
+#define __types_ok(ux, uy) \
+ (__sign_use(ux) & __sign_use(uy))
-#define __types_ok3(x,y,z,ux,uy,uz) \
- (__sign_use(x,ux) & __sign_use(y,uy) & __sign_use(z,uz))
+#define __types_ok3(ux, uy, uz) \
+ (__sign_use(ux) & __sign_use(uy) & __sign_use(uz))
#define __cmp_op_min <
#define __cmp_op_max >
@@ -97,30 +90,13 @@
#define __careful_cmp_once(op, x, y, ux, uy) ({ \
__auto_type ux = (x); __auto_type uy = (y); \
- BUILD_BUG_ON_MSG(!__types_ok(x,y,ux,uy), \
+ BUILD_BUG_ON_MSG(!__types_ok(ux, uy), \
#op"("#x", "#y") signedness error"); \
__cmp(op, ux, uy); })
#define __careful_cmp(op, x, y) \
__careful_cmp_once(op, x, y, __UNIQUE_ID(x_), __UNIQUE_ID(y_))
-#define __clamp(val, lo, hi) \
- ((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val)))
-
-#define __clamp_once(val, lo, hi, uval, ulo, uhi) ({ \
- __auto_type uval = (val); \
- __auto_type ulo = (lo); \
- __auto_type uhi = (hi); \
- static_assert(__builtin_choose_expr(__is_constexpr((lo) > (hi)), \
- (lo) <= (hi), true), \
- "clamp() low limit " #lo " greater than high limit " #hi); \
- BUILD_BUG_ON_MSG(!__types_ok3(val,lo,hi,uval,ulo,uhi), \
- "clamp("#val", "#lo", "#hi") signedness error"); \
- __clamp(uval, ulo, uhi); })
-
-#define __careful_clamp(val, lo, hi) \
- __clamp_once(val, lo, hi, __UNIQUE_ID(v_), __UNIQUE_ID(l_), __UNIQUE_ID(h_))
-
/**
* min - return minimum of two values of the same or compatible types
* @x: first value
@@ -154,7 +130,7 @@
#define __careful_op3(op, x, y, z, ux, uy, uz) ({ \
__auto_type ux = (x); __auto_type uy = (y);__auto_type uz = (z);\
- BUILD_BUG_ON_MSG(!__types_ok3(x,y,z,ux,uy,uz), \
+ BUILD_BUG_ON_MSG(!__types_ok3(ux, uy, uz), \
#op"3("#x", "#y", "#z") signedness error"); \
__cmp(op, ux, __cmp(op, uy, uz)); })
@@ -177,6 +153,22 @@
__careful_op3(max, x, y, z, __UNIQUE_ID(x_), __UNIQUE_ID(y_), __UNIQUE_ID(z_))
/**
+ * min_t - return minimum of two values, using the specified type
+ * @type: data type to use
+ * @x: first value
+ * @y: second value
+ */
+#define min_t(type, x, y) __cmp_once(min, type, x, y)
+
+/**
+ * max_t - return maximum of two values, using the specified type
+ * @type: data type to use
+ * @x: first value
+ * @y: second value
+ */
+#define max_t(type, x, y) __cmp_once(max, type, x, y)
+
+/**
* min_not_zero - return the minimum that is _not_ zero, unless both are zero
* @x: value1
* @y: value2
@@ -186,39 +178,57 @@
typeof(y) __y = (y); \
__x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
+#define __clamp(val, lo, hi) \
+ ((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val)))
+
+#define __clamp_once(type, val, lo, hi, uval, ulo, uhi) ({ \
+ type uval = (val); \
+ type ulo = (lo); \
+ type uhi = (hi); \
+ BUILD_BUG_ON_MSG(statically_true(ulo > uhi), \
+ "clamp() low limit " #lo " greater than high limit " #hi); \
+ BUILD_BUG_ON_MSG(!__types_ok3(uval, ulo, uhi), \
+ "clamp("#val", "#lo", "#hi") signedness error"); \
+ __clamp(uval, ulo, uhi); })
+
+#define __careful_clamp(type, val, lo, hi) \
+ __clamp_once(type, val, lo, hi, __UNIQUE_ID(v_), __UNIQUE_ID(l_), __UNIQUE_ID(h_))
+
/**
- * clamp - return a value clamped to a given range with strict typechecking
+ * clamp - return a value clamped to a given range with typechecking
* @val: current value
* @lo: lowest allowable value
* @hi: highest allowable value
*
- * This macro does strict typechecking of @lo/@hi to make sure they are of the
- * same type as @val. See the unnecessary pointer comparisons.
- */
-#define clamp(val, lo, hi) __careful_clamp(val, lo, hi)
-
-/*
- * ..and if you can't take the strict
- * types, you can specify one yourself.
- *
- * Or not use min/max/clamp at all, of course.
+ * This macro checks @val/@lo/@hi to make sure they have compatible
+ * signedness.
*/
+#define clamp(val, lo, hi) __careful_clamp(__auto_type, val, lo, hi)
/**
- * min_t - return minimum of two values, using the specified type
- * @type: data type to use
- * @x: first value
- * @y: second value
+ * clamp_t - return a value clamped to a given range using a given type
+ * @type: the type of variable to use
+ * @val: current value
+ * @lo: minimum allowable value
+ * @hi: maximum allowable value
+ *
+ * This macro does no typechecking and uses temporary variables of type
+ * @type to make all the comparisons.
*/
-#define min_t(type, x, y) __cmp_once(min, type, x, y)
+#define clamp_t(type, val, lo, hi) __careful_clamp(type, val, lo, hi)
/**
- * max_t - return maximum of two values, using the specified type
- * @type: data type to use
- * @x: first value
- * @y: second value
+ * clamp_val - return a value clamped to a given range using val's type
+ * @val: current value
+ * @lo: minimum allowable value
+ * @hi: maximum allowable value
+ *
+ * This macro does no typechecking and uses temporary variables of whatever
+ * type the input argument @val is. This is useful when @val is an unsigned
+ * type and @lo and @hi are literals that will otherwise be assigned a signed
+ * integer type.
*/
-#define max_t(type, x, y) __cmp_once(max, type, x, y)
+#define clamp_val(val, lo, hi) __careful_clamp(typeof(val), val, lo, hi)
/*
* Do not check the array parameter using __must_be_array().
@@ -263,31 +273,6 @@
*/
#define max_array(array, len) __minmax_array(max, array, len)
-/**
- * clamp_t - return a value clamped to a given range using a given type
- * @type: the type of variable to use
- * @val: current value
- * @lo: minimum allowable value
- * @hi: maximum allowable value
- *
- * This macro does no typechecking and uses temporary variables of type
- * @type to make all the comparisons.
- */
-#define clamp_t(type, val, lo, hi) __careful_clamp((type)(val), (type)(lo), (type)(hi))
-
-/**
- * clamp_val - return a value clamped to a given range using val's type
- * @val: current value
- * @lo: minimum allowable value
- * @hi: maximum allowable value
- *
- * This macro does no typechecking and uses temporary variables of whatever
- * type the input argument @val is. This is useful when @val is an unsigned
- * type and @lo and @hi are literals that will otherwise be assigned a signed
- * integer type.
- */
-#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
-
static inline bool in_range64(u64 val, u64 start, u64 len)
{
return (val - start) < len;
@@ -326,9 +311,9 @@ static inline bool in_range32(u32 val, u32 start, u32 len)
* Use these carefully: no type checking, and uses the arguments
* multiple times. Use for obvious constants only.
*/
-#define MIN(a,b) __cmp(min,a,b)
-#define MAX(a,b) __cmp(max,a,b)
-#define MIN_T(type,a,b) __cmp(min,(type)(a),(type)(b))
-#define MAX_T(type,a,b) __cmp(max,(type)(a),(type)(b))
+#define MIN(a, b) __cmp(min, a, b)
+#define MAX(a, b) __cmp(max, a, b)
+#define MIN_T(type, a, b) __cmp(min, (type)(a), (type)(b))
+#define MAX_T(type, a, b) __cmp(max, (type)(a), (type)(b))
#endif /* _LINUX_MINMAX_H */
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index c0fea6ca5076..69e110c2b86a 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -76,7 +76,7 @@
struct device;
struct attribute_group;
-struct miscdevice {
+struct miscdevice {
int minor;
const char *name;
const struct file_operations *fops;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f02925447e59..7b1068ddcbb7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2320,6 +2320,7 @@ extern void pagefault_out_of_memory(void);
struct zap_details {
struct folio *single_folio; /* Locked folio to be unmapped */
bool even_cows; /* Zap COWed private pages too? */
+ bool reclaim_pt; /* Need reclaim page tables? */
zap_flags_t zap_flags; /* Extra flags for zapping */
};
@@ -2991,18 +2992,15 @@ static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; }
static inline void ptlock_free(struct ptdesc *ptdesc) {}
#endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */
-static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc)
+static inline void __pagetable_ctor(struct ptdesc *ptdesc)
{
struct folio *folio = ptdesc_folio(ptdesc);
- if (!ptlock_init(ptdesc))
- return false;
__folio_set_pgtable(folio);
lruvec_stat_add_folio(folio, NR_PAGETABLE);
- return true;
}
-static inline void pagetable_pte_dtor(struct ptdesc *ptdesc)
+static inline void pagetable_dtor(struct ptdesc *ptdesc)
{
struct folio *folio = ptdesc_folio(ptdesc);
@@ -3011,6 +3009,20 @@ static inline void pagetable_pte_dtor(struct ptdesc *ptdesc)
lruvec_stat_sub_folio(folio, NR_PAGETABLE);
}
+static inline void pagetable_dtor_free(struct ptdesc *ptdesc)
+{
+ pagetable_dtor(ptdesc);
+ pagetable_free(ptdesc);
+}
+
+static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc)
+{
+ if (!ptlock_init(ptdesc))
+ return false;
+ __pagetable_ctor(ptdesc);
+ return true;
+}
+
pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
static inline pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr,
pmd_t *pmdvalp)
@@ -3087,14 +3099,6 @@ static inline bool pmd_ptlock_init(struct ptdesc *ptdesc)
return ptlock_init(ptdesc);
}
-static inline void pmd_ptlock_free(struct ptdesc *ptdesc)
-{
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- VM_BUG_ON_PAGE(ptdesc->pmd_huge_pte, ptdesc_page(ptdesc));
-#endif
- ptlock_free(ptdesc);
-}
-
#define pmd_huge_pte(mm, pmd) (pmd_ptdesc(pmd)->pmd_huge_pte)
#else
@@ -3105,7 +3109,6 @@ static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
}
static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) { return true; }
-static inline void pmd_ptlock_free(struct ptdesc *ptdesc) {}
#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
@@ -3120,25 +3123,13 @@ static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc)
{
- struct folio *folio = ptdesc_folio(ptdesc);
-
if (!pmd_ptlock_init(ptdesc))
return false;
- __folio_set_pgtable(folio);
ptdesc_pmd_pts_init(ptdesc);
- lruvec_stat_add_folio(folio, NR_PAGETABLE);
+ __pagetable_ctor(ptdesc);
return true;
}
-static inline void pagetable_pmd_dtor(struct ptdesc *ptdesc)
-{
- struct folio *folio = ptdesc_folio(ptdesc);
-
- pmd_ptlock_free(ptdesc);
- __folio_clear_pgtable(folio);
- lruvec_stat_sub_folio(folio, NR_PAGETABLE);
-}
-
/*
* No scalability reason to split PUD locks yet, but follow the same pattern
* as the PMD locks to make it easier if we decide to. The VM should not be
@@ -3160,18 +3151,17 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
static inline void pagetable_pud_ctor(struct ptdesc *ptdesc)
{
- struct folio *folio = ptdesc_folio(ptdesc);
-
- __folio_set_pgtable(folio);
- lruvec_stat_add_folio(folio, NR_PAGETABLE);
+ __pagetable_ctor(ptdesc);
}
-static inline void pagetable_pud_dtor(struct ptdesc *ptdesc)
+static inline void pagetable_p4d_ctor(struct ptdesc *ptdesc)
{
- struct folio *folio = ptdesc_folio(ptdesc);
+ __pagetable_ctor(ptdesc);
+}
- __folio_clear_pgtable(folio);
- lruvec_stat_sub_folio(folio, NR_PAGETABLE);
+static inline void pagetable_pgd_ctor(struct ptdesc *ptdesc)
+{
+ __pagetable_ctor(ptdesc);
}
extern void __init pagecache_init(void);
@@ -3324,6 +3314,8 @@ extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admi
extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
extern void exit_mmap(struct mm_struct *);
int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift);
+bool mmap_read_lock_maybe_expand(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, bool write);
static inline int check_data_rlimit(unsigned long rlim,
unsigned long new,
@@ -3371,9 +3363,6 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
return __get_unmapped_area(file, addr, len, pgoff, flags, 0);
}
-extern unsigned long mmap_region(struct file *file, unsigned long addr,
- unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
- struct list_head *uf);
extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
@@ -3431,15 +3420,13 @@ extern vm_fault_t filemap_fault(struct vm_fault *vmf);
extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
+extern vm_fault_t filemap_fsnotify_fault(struct vm_fault *vmf);
extern unsigned long stack_guard_gap;
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
int expand_stack_locked(struct vm_area_struct *vma, unsigned long address);
struct vm_area_struct *expand_stack(struct mm_struct * mm, unsigned long addr);
-/* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */
-int expand_downwards(struct vm_area_struct *vma, unsigned long address);
-
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
@@ -4096,67 +4083,6 @@ unsigned long wp_shared_mapping_range(struct address_space *mapping,
extern int sysctl_nr_trim_pages;
-#ifdef CONFIG_PRINTK
-void mem_dump_obj(void *object);
-#else
-static inline void mem_dump_obj(void *object) {}
-#endif
-
-static inline bool is_write_sealed(int seals)
-{
- return seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE);
-}
-
-/**
- * is_readonly_sealed - Checks whether write-sealed but mapped read-only,
- * in which case writes should be disallowing moving
- * forwards.
- * @seals: the seals to check
- * @vm_flags: the VMA flags to check
- *
- * Returns whether readonly sealed, in which case writess should be disallowed
- * going forward.
- */
-static inline bool is_readonly_sealed(int seals, vm_flags_t vm_flags)
-{
- /*
- * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as
- * MAP_SHARED and read-only, take care to not allow mprotect to
- * revert protections on such mappings. Do this only for shared
- * mappings. For private mappings, don't need to mask
- * VM_MAYWRITE as we still want them to be COW-writable.
- */
- if (is_write_sealed(seals) &&
- ((vm_flags & (VM_SHARED | VM_WRITE)) == VM_SHARED))
- return true;
-
- return false;
-}
-
-/**
- * seal_check_write - Check for F_SEAL_WRITE or F_SEAL_FUTURE_WRITE flags and
- * handle them.
- * @seals: the seals to check
- * @vma: the vma to operate on
- *
- * Check whether F_SEAL_WRITE or F_SEAL_FUTURE_WRITE are set; if so, do proper
- * check/handling on the vma flags. Return 0 if check pass, or <0 for errors.
- */
-static inline int seal_check_write(int seals, struct vm_area_struct *vma)
-{
- if (!is_write_sealed(seals))
- return 0;
-
- /*
- * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
- * write seals are active.
- */
- if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
- return -EPERM;
-
- return 0;
-}
-
#ifdef CONFIG_ANON_VMA_NAME
int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
unsigned long len_in,
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 1b6a917fffa4..f9157a0c42a5 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -133,31 +133,25 @@ static inline int lru_hist_from_seq(unsigned long seq)
return seq % NR_HIST_GENS;
}
-static inline int lru_tier_from_refs(int refs)
+static inline int lru_tier_from_refs(int refs, bool workingset)
{
VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH));
- /* see the comment in folio_lru_refs() */
- return order_base_2(refs + 1);
+ /* see the comment on MAX_NR_TIERS */
+ return workingset ? MAX_NR_TIERS - 1 : order_base_2(refs);
}
static inline int folio_lru_refs(struct folio *folio)
{
unsigned long flags = READ_ONCE(folio->flags);
- bool workingset = flags & BIT(PG_workingset);
+ if (!(flags & BIT(PG_referenced)))
+ return 0;
/*
- * Return the number of accesses beyond PG_referenced, i.e., N-1 if the
- * total number of accesses is N>1, since N=0,1 both map to the first
- * tier. lru_tier_from_refs() will account for this off-by-one. Also see
- * the comment on MAX_NR_TIERS.
+ * Return the total number of accesses including PG_referenced. Also see
+ * the comment on LRU_REFS_FLAGS.
*/
- return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + workingset;
-}
-
-static inline void folio_clear_lru_refs(struct folio *folio)
-{
- set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0);
+ return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + 1;
}
static inline int folio_lru_gen(struct folio *folio)
@@ -223,11 +217,43 @@ static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *foli
VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
}
+static inline unsigned long lru_gen_folio_seq(struct lruvec *lruvec, struct folio *folio,
+ bool reclaiming)
+{
+ int gen;
+ int type = folio_is_file_lru(folio);
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
+
+ /*
+ * +-----------------------------------+-----------------------------------+
+ * | Accessed through page tables and | Accessed through file descriptors |
+ * | promoted by folio_update_gen() | and protected by folio_inc_gen() |
+ * +-----------------------------------+-----------------------------------+
+ * | PG_active (set while isolated) | |
+ * +-----------------+-----------------+-----------------+-----------------+
+ * | PG_workingset | PG_referenced | PG_workingset | LRU_REFS_FLAGS |
+ * +-----------------------------------+-----------------------------------+
+ * |<---------- MIN_NR_GENS ---------->| |
+ * |<---------------------------- MAX_NR_GENS ---------------------------->|
+ */
+ if (folio_test_active(folio))
+ gen = MIN_NR_GENS - folio_test_workingset(folio);
+ else if (reclaiming)
+ gen = MAX_NR_GENS;
+ else if ((!folio_is_file_lru(folio) && !folio_test_swapcache(folio)) ||
+ (folio_test_reclaim(folio) &&
+ (folio_test_dirty(folio) || folio_test_writeback(folio))))
+ gen = MIN_NR_GENS;
+ else
+ gen = MAX_NR_GENS - folio_test_workingset(folio);
+
+ return max(READ_ONCE(lrugen->max_seq) - gen + 1, READ_ONCE(lrugen->min_seq[type]));
+}
+
static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
{
unsigned long seq;
unsigned long flags;
- unsigned long mask;
int gen = folio_lru_gen(folio);
int type = folio_is_file_lru(folio);
int zone = folio_zonenum(folio);
@@ -237,40 +263,12 @@ static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio,
if (folio_test_unevictable(folio) || !lrugen->enabled)
return false;
- /*
- * There are four common cases for this page:
- * 1. If it's hot, i.e., freshly faulted in, add it to the youngest
- * generation, and it's protected over the rest below.
- * 2. If it can't be evicted immediately, i.e., a dirty page pending
- * writeback, add it to the second youngest generation.
- * 3. If it should be evicted first, e.g., cold and clean from
- * folio_rotate_reclaimable(), add it to the oldest generation.
- * 4. Everything else falls between 2 & 3 above and is added to the
- * second oldest generation if it's considered inactive, or the
- * oldest generation otherwise. See lru_gen_is_active().
- */
- if (folio_test_active(folio))
- seq = lrugen->max_seq;
- else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) ||
- (folio_test_reclaim(folio) &&
- (folio_test_dirty(folio) || folio_test_writeback(folio))))
- seq = lrugen->max_seq - 1;
- else if (reclaiming || lrugen->min_seq[type] + MIN_NR_GENS >= lrugen->max_seq)
- seq = lrugen->min_seq[type];
- else
- seq = lrugen->min_seq[type] + 1;
+ seq = lru_gen_folio_seq(lruvec, folio, reclaiming);
gen = lru_gen_from_seq(seq);
flags = (gen + 1UL) << LRU_GEN_PGOFF;
/* see the comment on MIN_NR_GENS about PG_active */
- mask = LRU_GEN_MASK;
- /*
- * Don't clear PG_workingset here because it can affect PSI accounting
- * if the activation is due to workingset refault.
- */
- if (folio_test_active(folio))
- mask |= LRU_REFS_MASK | BIT(PG_referenced) | BIT(PG_active);
- set_mask_bits(&folio->flags, mask, flags);
+ set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags);
lru_gen_update_size(lruvec, folio, -1, gen);
/* for folio_rotate_reclaimable() */
@@ -564,9 +562,9 @@ static inline pte_marker copy_pte_marker(
* Must be called with pgtable lock held so that no thread will see the none
* pte, and if they see it, they'll fault and serialize at the pgtable lock.
*
- * This function is a no-op if PTE_MARKER_UFFD_WP is not enabled.
+ * Returns true if an uffd-wp pte was installed, false otherwise.
*/
-static inline void
+static inline bool
pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
pte_t *pte, pte_t pteval)
{
@@ -583,7 +581,7 @@ pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
* with a swap pte. There's no way of leaking the bit.
*/
if (vma_is_anonymous(vma) || !userfaultfd_wp(vma))
- return;
+ return false;
/* A uffd-wp wr-protected normal pte */
if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval)))
@@ -596,10 +594,13 @@ pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
if (unlikely(pte_swp_uffd_wp_any(pteval)))
arm_uffd_pte = true;
- if (unlikely(arm_uffd_pte))
+ if (unlikely(arm_uffd_pte)) {
set_pte_at(vma->vm_mm, addr, pte,
make_pte_marker(PTE_MARKER_UFFD_WP));
+ return true;
+ }
#endif
+ return false;
}
static inline bool vma_has_recency(struct vm_area_struct *vma)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 825c04b56403..6b27db7f9496 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -438,7 +438,9 @@ FOLIO_MATCH(compound_head, _head_2a);
* struct ptdesc - Memory descriptor for page tables.
* @__page_flags: Same as page flags. Powerpc only.
* @pt_rcu_head: For freeing page table pages.
- * @pt_list: List of used page tables. Used for s390 and x86.
+ * @pt_list: List of used page tables. Used for s390 gmap shadow pages
+ * (which are not linked into the user page tables) and x86
+ * pgds.
* @_pt_pad_1: Padding that aliases with page's compound head.
* @pmd_huge_pte: Protected by ptdesc->ptl, used for THPs.
* @__page_mapping: Aliases with page->mapping. Unused for page tables.
@@ -1404,6 +1406,7 @@ enum tlb_flush_reason {
TLB_LOCAL_SHOOTDOWN,
TLB_LOCAL_MM_SHOOTDOWN,
TLB_REMOTE_SEND_IPI,
+ TLB_REMOTE_WRONG_CPU,
NR_TLB_FLUSH_REASONS,
};
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index d7cb1e5ecbda..a0a3894900ed 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -9,10 +9,12 @@ struct page;
struct vm_area_struct;
struct mm_struct;
struct vma_iterator;
+struct vma_merge_struct;
void dump_page(const struct page *page, const char *reason);
void dump_vma(const struct vm_area_struct *vma);
void dump_mm(const struct mm_struct *mm);
+void dump_vmg(const struct vma_merge_struct *vmg, const char *reason);
void vma_iter_dump_tree(const struct vma_iterator *vmi);
#ifdef CONFIG_DEBUG_VM
@@ -87,6 +89,15 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi);
} \
unlikely(__ret_warn_once); \
})
+#define VM_WARN_ON_VMG(cond, vmg) ({ \
+ int __ret_warn = !!(cond); \
+ \
+ if (unlikely(__ret_warn)) { \
+ dump_vmg(vmg, "VM_WARN_ON_VMG(" __stringify(cond)")"); \
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn); \
+})
#define VM_WARN_ON(cond) (void)WARN_ON(cond)
#define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
@@ -104,9 +115,10 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi);
#define VM_WARN_ON_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE_MM(cond, mm) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_VMG(cond, vmg) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
-#endif
+#endif /* CONFIG_DEBUG_VM */
#ifdef CONFIG_DEBUG_VM_IRQSOFF
#define VM_WARN_ON_IRQS_ENABLED() WARN_ON_ONCE(!irqs_disabled())
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index b36124145a16..9540b41894da 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -332,66 +332,88 @@ enum lruvec_flags {
#endif /* !__GENERATING_BOUNDS_H */
/*
- * Evictable pages are divided into multiple generations. The youngest and the
+ * Evictable folios are divided into multiple generations. The youngest and the
* oldest generation numbers, max_seq and min_seq, are monotonically increasing.
* They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An
* offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the
* corresponding generation. The gen counter in folio->flags stores gen+1 while
- * a page is on one of lrugen->folios[]. Otherwise it stores 0.
+ * a folio is on one of lrugen->folios[]. Otherwise it stores 0.
*
- * A page is added to the youngest generation on faulting. The aging needs to
- * check the accessed bit at least twice before handing this page over to the
- * eviction. The first check takes care of the accessed bit set on the initial
- * fault; the second check makes sure this page hasn't been used since then.
- * This process, AKA second chance, requires a minimum of two generations,
- * hence MIN_NR_GENS. And to maintain ABI compatibility with the active/inactive
- * LRU, e.g., /proc/vmstat, these two generations are considered active; the
- * rest of generations, if they exist, are considered inactive. See
- * lru_gen_is_active().
+ * After a folio is faulted in, the aging needs to check the accessed bit at
+ * least twice before handing this folio over to the eviction. The first check
+ * clears the accessed bit from the initial fault; the second check makes sure
+ * this folio hasn't been used since then. This process, AKA second chance,
+ * requires a minimum of two generations, hence MIN_NR_GENS. And to maintain ABI
+ * compatibility with the active/inactive LRU, e.g., /proc/vmstat, these two
+ * generations are considered active; the rest of generations, if they exist,
+ * are considered inactive. See lru_gen_is_active().
*
- * PG_active is always cleared while a page is on one of lrugen->folios[] so
- * that the aging needs not to worry about it. And it's set again when a page
- * considered active is isolated for non-reclaiming purposes, e.g., migration.
- * See lru_gen_add_folio() and lru_gen_del_folio().
+ * PG_active is always cleared while a folio is on one of lrugen->folios[] so
+ * that the sliding window needs not to worry about it. And it's set again when
+ * a folio considered active is isolated for non-reclaiming purposes, e.g.,
+ * migration. See lru_gen_add_folio() and lru_gen_del_folio().
*
* MAX_NR_GENS is set to 4 so that the multi-gen LRU can support twice the
* number of categories of the active/inactive LRU when keeping track of
* accesses through page tables. This requires order_base_2(MAX_NR_GENS+1) bits
- * in folio->flags.
+ * in folio->flags, masked by LRU_GEN_MASK.
*/
#define MIN_NR_GENS 2U
#define MAX_NR_GENS 4U
/*
- * Each generation is divided into multiple tiers. A page accessed N times
- * through file descriptors is in tier order_base_2(N). A page in the first tier
- * (N=0,1) is marked by PG_referenced unless it was faulted in through page
- * tables or read ahead. A page in any other tier (N>1) is marked by
- * PG_referenced and PG_workingset. This implies a minimum of two tiers is
- * supported without using additional bits in folio->flags.
+ * Each generation is divided into multiple tiers. A folio accessed N times
+ * through file descriptors is in tier order_base_2(N). A folio in the first
+ * tier (N=0,1) is marked by PG_referenced unless it was faulted in through page
+ * tables or read ahead. A folio in the last tier (MAX_NR_TIERS-1) is marked by
+ * PG_workingset. A folio in any other tier (1<N<5) between the first and last
+ * is marked by additional bits of LRU_REFS_WIDTH in folio->flags.
*
* In contrast to moving across generations which requires the LRU lock, moving
* across tiers only involves atomic operations on folio->flags and therefore
* has a negligible cost in the buffered access path. In the eviction path,
- * comparisons of refaulted/(evicted+protected) from the first tier and the
- * rest infer whether pages accessed multiple times through file descriptors
- * are statistically hot and thus worth protecting.
+ * comparisons of refaulted/(evicted+protected) from the first tier and the rest
+ * infer whether folios accessed multiple times through file descriptors are
+ * statistically hot and thus worth protecting.
*
* MAX_NR_TIERS is set to 4 so that the multi-gen LRU can support twice the
* number of categories of the active/inactive LRU when keeping track of
* accesses through file descriptors. This uses MAX_NR_TIERS-2 spare bits in
- * folio->flags.
+ * folio->flags, masked by LRU_REFS_MASK.
*/
#define MAX_NR_TIERS 4U
#ifndef __GENERATING_BOUNDS_H
-struct lruvec;
-struct page_vma_mapped_walk;
-
#define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
#define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF)
+/*
+ * For folios accessed multiple times through file descriptors,
+ * lru_gen_inc_refs() sets additional bits of LRU_REFS_WIDTH in folio->flags
+ * after PG_referenced, then PG_workingset after LRU_REFS_WIDTH. After all its
+ * bits are set, i.e., LRU_REFS_FLAGS|BIT(PG_workingset), a folio is lazily
+ * promoted into the second oldest generation in the eviction path. And when
+ * folio_inc_gen() does that, it clears LRU_REFS_FLAGS so that
+ * lru_gen_inc_refs() can start over. Note that for this case, LRU_REFS_MASK is
+ * only valid when PG_referenced is set.
+ *
+ * For folios accessed multiple times through page tables, folio_update_gen()
+ * from a page table walk or lru_gen_set_refs() from a rmap walk sets
+ * PG_referenced after the accessed bit is cleared for the first time.
+ * Thereafter, those two paths set PG_workingset and promote folios to the
+ * youngest generation. Like folio_inc_gen(), folio_update_gen() also clears
+ * PG_referenced. Note that for this case, LRU_REFS_MASK is not used.
+ *
+ * For both cases above, after PG_workingset is set on a folio, it remains until
+ * this folio is either reclaimed, or "deactivated" by lru_gen_clear_refs(). It
+ * can be set again if lru_gen_test_recent() returns true upon a refault.
+ */
+#define LRU_REFS_FLAGS (LRU_REFS_MASK | BIT(PG_referenced))
+
+struct lruvec;
+struct page_vma_mapped_walk;
+
#ifdef CONFIG_LRU_GEN
enum {
@@ -406,8 +428,6 @@ enum {
NR_LRU_GEN_CAPS
};
-#define LRU_REFS_FLAGS (BIT(PG_referenced) | BIT(PG_workingset))
-
#define MIN_LRU_BATCH BITS_PER_LONG
#define MAX_LRU_BATCH (MIN_LRU_BATCH * 64)
@@ -421,12 +441,11 @@ enum {
/*
* The youngest generation number is stored in max_seq for both anon and file
* types as they are aged on an equal footing. The oldest generation numbers are
- * stored in min_seq[] separately for anon and file types as clean file pages
- * can be evicted regardless of swap constraints.
- *
- * Normally anon and file min_seq are in sync. But if swapping is constrained,
- * e.g., out of swap space, file min_seq is allowed to advance and leave anon
- * min_seq behind.
+ * stored in min_seq[] separately for anon and file types so that they can be
+ * incremented independently. Ideally min_seq[] are kept in sync when both anon
+ * and file types are evictable. However, to adapt to situations like extreme
+ * swappiness, they are allowed to be out of sync by at most
+ * MAX_NR_GENS-MIN_NR_GENS-1.
*
* The number of pages in each generation is eventually consistent and therefore
* can be transiently negative when reset_batch_size() is pending.
@@ -446,8 +465,8 @@ struct lru_gen_folio {
unsigned long avg_refaulted[ANON_AND_FILE][MAX_NR_TIERS];
/* the exponential moving average of evicted+protected */
unsigned long avg_total[ANON_AND_FILE][MAX_NR_TIERS];
- /* the first tier doesn't need protection, hence the minus one */
- unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS - 1];
+ /* can only be modified under the LRU lock */
+ unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
/* can be modified without holding the LRU lock */
atomic_long_t evicted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
@@ -498,7 +517,7 @@ struct lru_gen_mm_walk {
int mm_stats[NR_MM_STATS];
/* total batched items */
int batched;
- bool can_swap;
+ int swappiness;
bool force_scan;
};
diff --git a/include/linux/module.h b/include/linux/module.h
index b3a643435357..30e5b19bafa9 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -52,9 +52,9 @@ struct module_kobject {
struct module_attribute {
struct attribute attr;
- ssize_t (*show)(struct module_attribute *, struct module_kobject *,
+ ssize_t (*show)(const struct module_attribute *, struct module_kobject *,
char *);
- ssize_t (*store)(struct module_attribute *, struct module_kobject *,
+ ssize_t (*store)(const struct module_attribute *, struct module_kobject *,
const char *, size_t count);
void (*setup)(struct module *, const char *);
int (*test)(struct module *);
@@ -67,10 +67,10 @@ struct module_version_attribute {
const char *version;
};
-extern ssize_t __modver_version_show(struct module_attribute *,
+extern ssize_t __modver_version_show(const struct module_attribute *,
struct module_kobject *, char *);
-extern struct module_attribute module_uevent;
+extern const struct module_attribute module_uevent;
/* These are either module local, or the kernel's dummy ones. */
extern int init_module(void);
@@ -275,7 +275,7 @@ extern typeof(name) __mod_device_table__##type##__##name \
#else
#define MODULE_VERSION(_version) \
MODULE_INFO(version, _version); \
- static struct module_version_attribute __modver_attr \
+ static const struct module_version_attribute __modver_attr \
__used __section("__modver") \
__aligned(__alignof__(struct module_version_attribute)) \
= { \
@@ -306,7 +306,10 @@ extern int modules_disabled; /* for sysctl */
/* Get/put a kernel symbol (calls must be symmetric) */
void *__symbol_get(const char *symbol);
void *__symbol_get_gpl(const char *symbol);
-#define symbol_get(x) ((typeof(&x))(__symbol_get(__stringify(x))))
+#define symbol_get(x) ({ \
+ static const char __notrim[] \
+ __used __section(".no_trim_symbol") = __stringify(x); \
+ (typeof(&x))(__symbol_get(__stringify(x))); })
/* modules using other modules: kdb wants to see this. */
struct module_use {
@@ -430,7 +433,7 @@ struct module {
/* Exported symbols */
const struct kernel_symbol *syms;
- const s32 *crcs;
+ const u32 *crcs;
unsigned int num_syms;
#ifdef CONFIG_ARCH_USES_CFI_TRAPS
@@ -448,7 +451,7 @@ struct module {
/* GPL-only exported symbols. */
unsigned int num_gpl_syms;
const struct kernel_symbol *gpl_syms;
- const s32 *gpl_crcs;
+ const u32 *gpl_crcs;
bool using_gplonly_symbols;
#ifdef CONFIG_MODULE_SIG
diff --git a/include/linux/mtd/nand-qpic-common.h b/include/linux/mtd/nand-qpic-common.h
new file mode 100644
index 000000000000..4d9b736ff8b7
--- /dev/null
+++ b/include/linux/mtd/nand-qpic-common.h
@@ -0,0 +1,478 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * QCOM QPIC common APIs header file
+ *
+ * Copyright (c) 2023 Qualcomm Inc.
+ * Authors: Md sadre Alam <quic_mdalam@quicinc.com>
+ *
+ */
+#ifndef __MTD_NAND_QPIC_COMMON_H__
+#define __MTD_NAND_QPIC_COMMON_H__
+
+/* NANDc reg offsets */
+#define NAND_FLASH_CMD 0x00
+#define NAND_ADDR0 0x04
+#define NAND_ADDR1 0x08
+#define NAND_FLASH_CHIP_SELECT 0x0c
+#define NAND_EXEC_CMD 0x10
+#define NAND_FLASH_STATUS 0x14
+#define NAND_BUFFER_STATUS 0x18
+#define NAND_DEV0_CFG0 0x20
+#define NAND_DEV0_CFG1 0x24
+#define NAND_DEV0_ECC_CFG 0x28
+#define NAND_AUTO_STATUS_EN 0x2c
+#define NAND_DEV1_CFG0 0x30
+#define NAND_DEV1_CFG1 0x34
+#define NAND_READ_ID 0x40
+#define NAND_READ_STATUS 0x44
+#define NAND_DEV_CMD0 0xa0
+#define NAND_DEV_CMD1 0xa4
+#define NAND_DEV_CMD2 0xa8
+#define NAND_DEV_CMD_VLD 0xac
+#define SFLASHC_BURST_CFG 0xe0
+#define NAND_ERASED_CW_DETECT_CFG 0xe8
+#define NAND_ERASED_CW_DETECT_STATUS 0xec
+#define NAND_EBI2_ECC_BUF_CFG 0xf0
+#define FLASH_BUF_ACC 0x100
+
+#define NAND_CTRL 0xf00
+#define NAND_VERSION 0xf08
+#define NAND_READ_LOCATION_0 0xf20
+#define NAND_READ_LOCATION_1 0xf24
+#define NAND_READ_LOCATION_2 0xf28
+#define NAND_READ_LOCATION_3 0xf2c
+#define NAND_READ_LOCATION_LAST_CW_0 0xf40
+#define NAND_READ_LOCATION_LAST_CW_1 0xf44
+#define NAND_READ_LOCATION_LAST_CW_2 0xf48
+#define NAND_READ_LOCATION_LAST_CW_3 0xf4c
+
+/* dummy register offsets, used by qcom_write_reg_dma */
+#define NAND_DEV_CMD1_RESTORE 0xdead
+#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
+
+/* NAND_FLASH_CMD bits */
+#define PAGE_ACC BIT(4)
+#define LAST_PAGE BIT(5)
+
+/* NAND_FLASH_CHIP_SELECT bits */
+#define NAND_DEV_SEL 0
+#define DM_EN BIT(2)
+
+/* NAND_FLASH_STATUS bits */
+#define FS_OP_ERR BIT(4)
+#define FS_READY_BSY_N BIT(5)
+#define FS_MPU_ERR BIT(8)
+#define FS_DEVICE_STS_ERR BIT(16)
+#define FS_DEVICE_WP BIT(23)
+
+/* NAND_BUFFER_STATUS bits */
+#define BS_UNCORRECTABLE_BIT BIT(8)
+#define BS_CORRECTABLE_ERR_MSK 0x1f
+
+/* NAND_DEVn_CFG0 bits */
+#define DISABLE_STATUS_AFTER_WRITE BIT(4)
+#define CW_PER_PAGE 6
+#define CW_PER_PAGE_MASK GENMASK(8, 6)
+#define UD_SIZE_BYTES 9
+#define UD_SIZE_BYTES_MASK GENMASK(18, 9)
+#define ECC_PARITY_SIZE_BYTES_RS GENMASK(22, 19)
+#define SPARE_SIZE_BYTES 23
+#define SPARE_SIZE_BYTES_MASK GENMASK(26, 23)
+#define NUM_ADDR_CYCLES 27
+#define NUM_ADDR_CYCLES_MASK GENMASK(29, 27)
+#define STATUS_BFR_READ BIT(30)
+#define SET_RD_MODE_AFTER_STATUS BIT(31)
+
+/* NAND_DEVn_CFG0 bits */
+#define DEV0_CFG1_ECC_DISABLE BIT(0)
+#define WIDE_FLASH BIT(1)
+#define NAND_RECOVERY_CYCLES 2
+#define NAND_RECOVERY_CYCLES_MASK GENMASK(4, 2)
+#define CS_ACTIVE_BSY BIT(5)
+#define BAD_BLOCK_BYTE_NUM 6
+#define BAD_BLOCK_BYTE_NUM_MASK GENMASK(15, 6)
+#define BAD_BLOCK_IN_SPARE_AREA BIT(16)
+#define WR_RD_BSY_GAP 17
+#define WR_RD_BSY_GAP_MASK GENMASK(22, 17)
+#define ENABLE_BCH_ECC BIT(27)
+
+/* NAND_DEV0_ECC_CFG bits */
+#define ECC_CFG_ECC_DISABLE BIT(0)
+#define ECC_SW_RESET BIT(1)
+#define ECC_MODE 4
+#define ECC_MODE_MASK GENMASK(5, 4)
+#define ECC_PARITY_SIZE_BYTES_BCH 8
+#define ECC_PARITY_SIZE_BYTES_BCH_MASK GENMASK(12, 8)
+#define ECC_NUM_DATA_BYTES 16
+#define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16)
+#define ECC_FORCE_CLK_OPEN BIT(30)
+
+/* NAND_DEV_CMD1 bits */
+#define READ_ADDR 0
+
+/* NAND_DEV_CMD_VLD bits */
+#define READ_START_VLD BIT(0)
+#define READ_STOP_VLD BIT(1)
+#define WRITE_START_VLD BIT(2)
+#define ERASE_START_VLD BIT(3)
+#define SEQ_READ_START_VLD BIT(4)
+
+/* NAND_EBI2_ECC_BUF_CFG bits */
+#define NUM_STEPS 0
+
+/* NAND_ERASED_CW_DETECT_CFG bits */
+#define ERASED_CW_ECC_MASK 1
+#define AUTO_DETECT_RES 0
+#define MASK_ECC BIT(ERASED_CW_ECC_MASK)
+#define RESET_ERASED_DET BIT(AUTO_DETECT_RES)
+#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
+#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
+#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
+
+/* NAND_ERASED_CW_DETECT_STATUS bits */
+#define PAGE_ALL_ERASED BIT(7)
+#define CODEWORD_ALL_ERASED BIT(6)
+#define PAGE_ERASED BIT(5)
+#define CODEWORD_ERASED BIT(4)
+#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
+#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
+
+/* NAND_READ_LOCATION_n bits */
+#define READ_LOCATION_OFFSET 0
+#define READ_LOCATION_SIZE 16
+#define READ_LOCATION_LAST 31
+
+/* Version Mask */
+#define NAND_VERSION_MAJOR_MASK 0xf0000000
+#define NAND_VERSION_MAJOR_SHIFT 28
+#define NAND_VERSION_MINOR_MASK 0x0fff0000
+#define NAND_VERSION_MINOR_SHIFT 16
+
+/* NAND OP_CMDs */
+#define OP_PAGE_READ 0x2
+#define OP_PAGE_READ_WITH_ECC 0x3
+#define OP_PAGE_READ_WITH_ECC_SPARE 0x4
+#define OP_PAGE_READ_ONFI_READ 0x5
+#define OP_PROGRAM_PAGE 0x6
+#define OP_PAGE_PROGRAM_WITH_ECC 0x7
+#define OP_PROGRAM_PAGE_SPARE 0x9
+#define OP_BLOCK_ERASE 0xa
+#define OP_CHECK_STATUS 0xc
+#define OP_FETCH_ID 0xb
+#define OP_RESET_DEVICE 0xd
+
+/* Default Value for NAND_DEV_CMD_VLD */
+#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
+ ERASE_START_VLD | SEQ_READ_START_VLD)
+
+/* NAND_CTRL bits */
+#define BAM_MODE_EN BIT(0)
+
+/*
+ * the NAND controller performs reads/writes with ECC in 516 byte chunks.
+ * the driver calls the chunks 'step' or 'codeword' interchangeably
+ */
+#define NANDC_STEP_SIZE 512
+
+/*
+ * the largest page size we support is 8K, this will have 16 steps/codewords
+ * of 512 bytes each
+ */
+#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
+
+/* we read at most 3 registers per codeword scan */
+#define MAX_REG_RD (3 * MAX_NUM_STEPS)
+
+/* ECC modes supported by the controller */
+#define ECC_NONE BIT(0)
+#define ECC_RS_4BIT BIT(1)
+#define ECC_BCH_4BIT BIT(2)
+#define ECC_BCH_8BIT BIT(3)
+
+/*
+ * Returns the actual register address for all NAND_DEV_ registers
+ * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
+ */
+#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
+
+/* Returns the NAND register physical address */
+#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
+
+/* Returns the dma address for reg read buffer */
+#define reg_buf_dma_addr(chip, vaddr) \
+ ((chip)->reg_read_dma + \
+ ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
+
+#define QPIC_PER_CW_CMD_ELEMENTS 32
+#define QPIC_PER_CW_CMD_SGL 32
+#define QPIC_PER_CW_DATA_SGL 8
+
+#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
+
+/*
+ * Flags used in DMA descriptor preparation helper functions
+ * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
+ */
+/* Don't set the EOT in current tx BAM sgl */
+#define NAND_BAM_NO_EOT BIT(0)
+/* Set the NWD flag in current BAM sgl */
+#define NAND_BAM_NWD BIT(1)
+/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
+#define NAND_BAM_NEXT_SGL BIT(2)
+/*
+ * Erased codeword status is being used two times in single transfer so this
+ * flag will determine the current value of erased codeword status register
+ */
+#define NAND_ERASED_CW_SET BIT(4)
+
+#define MAX_ADDRESS_CYCLE 5
+
+/*
+ * This data type corresponds to the BAM transaction which will be used for all
+ * NAND transfers.
+ * @bam_ce - the array of BAM command elements
+ * @cmd_sgl - sgl for NAND BAM command pipe
+ * @data_sgl - sgl for NAND BAM consumer/producer pipe
+ * @last_data_desc - last DMA desc in data channel (tx/rx).
+ * @last_cmd_desc - last DMA desc in command channel.
+ * @txn_done - completion for NAND transfer.
+ * @bam_ce_pos - the index in bam_ce which is available for next sgl
+ * @bam_ce_start - the index in bam_ce which marks the start position ce
+ * for current sgl. It will be used for size calculation
+ * for current sgl
+ * @cmd_sgl_pos - current index in command sgl.
+ * @cmd_sgl_start - start index in command sgl.
+ * @tx_sgl_pos - current index in data sgl for tx.
+ * @tx_sgl_start - start index in data sgl for tx.
+ * @rx_sgl_pos - current index in data sgl for rx.
+ * @rx_sgl_start - start index in data sgl for rx.
+ */
+struct bam_transaction {
+ struct bam_cmd_element *bam_ce;
+ struct scatterlist *cmd_sgl;
+ struct scatterlist *data_sgl;
+ struct dma_async_tx_descriptor *last_data_desc;
+ struct dma_async_tx_descriptor *last_cmd_desc;
+ struct completion txn_done;
+ struct_group(bam_positions,
+ u32 bam_ce_pos;
+ u32 bam_ce_start;
+ u32 cmd_sgl_pos;
+ u32 cmd_sgl_start;
+ u32 tx_sgl_pos;
+ u32 tx_sgl_start;
+ u32 rx_sgl_pos;
+ u32 rx_sgl_start;
+
+ );
+};
+
+/*
+ * This data type corresponds to the nand dma descriptor
+ * @dma_desc - low level DMA engine descriptor
+ * @list - list for desc_info
+ *
+ * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
+ * ADM
+ * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
+ * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
+ * @dir - DMA transfer direction
+ */
+struct desc_info {
+ struct dma_async_tx_descriptor *dma_desc;
+ struct list_head node;
+
+ union {
+ struct scatterlist adm_sgl;
+ struct {
+ struct scatterlist *bam_sgl;
+ int sgl_cnt;
+ };
+ };
+ enum dma_data_direction dir;
+};
+
+/*
+ * holds the current register values that we want to write. acts as a contiguous
+ * chunk of memory which we use to write the controller registers through DMA.
+ */
+struct nandc_regs {
+ __le32 cmd;
+ __le32 addr0;
+ __le32 addr1;
+ __le32 chip_sel;
+ __le32 exec;
+
+ __le32 cfg0;
+ __le32 cfg1;
+ __le32 ecc_bch_cfg;
+
+ __le32 clrflashstatus;
+ __le32 clrreadstatus;
+
+ __le32 cmd1;
+ __le32 vld;
+
+ __le32 orig_cmd1;
+ __le32 orig_vld;
+
+ __le32 ecc_buf_cfg;
+ __le32 read_location0;
+ __le32 read_location1;
+ __le32 read_location2;
+ __le32 read_location3;
+ __le32 read_location_last0;
+ __le32 read_location_last1;
+ __le32 read_location_last2;
+ __le32 read_location_last3;
+
+ __le32 erased_cw_detect_cfg_clr;
+ __le32 erased_cw_detect_cfg_set;
+};
+
+/*
+ * NAND controller data struct
+ *
+ * @dev: parent device
+ *
+ * @base: MMIO base
+ *
+ * @core_clk: controller clock
+ * @aon_clk: another controller clock
+ *
+ * @regs: a contiguous chunk of memory for DMA register
+ * writes. contains the register values to be
+ * written to controller
+ *
+ * @props: properties of current NAND controller,
+ * initialized via DT match data
+ *
+ * @controller: base controller structure
+ * @host_list: list containing all the chips attached to the
+ * controller
+ *
+ * @chan: dma channel
+ * @cmd_crci: ADM DMA CRCI for command flow control
+ * @data_crci: ADM DMA CRCI for data flow control
+ *
+ * @desc_list: DMA descriptor list (list of desc_infos)
+ *
+ * @data_buffer: our local DMA buffer for page read/writes,
+ * used when we can't use the buffer provided
+ * by upper layers directly
+ * @reg_read_buf: local buffer for reading back registers via DMA
+ *
+ * @base_phys: physical base address of controller registers
+ * @base_dma: dma base address of controller registers
+ * @reg_read_dma: contains dma address for register read buffer
+ *
+ * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
+ * functions
+ * @max_cwperpage: maximum QPIC codewords required. calculated
+ * from all connected NAND devices pagesize
+ *
+ * @reg_read_pos: marker for data read in reg_read_buf
+ *
+ * @cmd1/vld: some fixed controller register values
+ *
+ * @exec_opwrite: flag to select correct number of code word
+ * while reading status
+ */
+struct qcom_nand_controller {
+ struct device *dev;
+
+ void __iomem *base;
+
+ struct clk *core_clk;
+ struct clk *aon_clk;
+
+ struct nandc_regs *regs;
+ struct bam_transaction *bam_txn;
+
+ const struct qcom_nandc_props *props;
+
+ struct nand_controller *controller;
+ struct list_head host_list;
+
+ union {
+ /* will be used only by QPIC for BAM DMA */
+ struct {
+ struct dma_chan *tx_chan;
+ struct dma_chan *rx_chan;
+ struct dma_chan *cmd_chan;
+ };
+
+ /* will be used only by EBI2 for ADM DMA */
+ struct {
+ struct dma_chan *chan;
+ unsigned int cmd_crci;
+ unsigned int data_crci;
+ };
+ };
+
+ struct list_head desc_list;
+
+ u8 *data_buffer;
+ __le32 *reg_read_buf;
+
+ phys_addr_t base_phys;
+ dma_addr_t base_dma;
+ dma_addr_t reg_read_dma;
+
+ int buf_size;
+ int buf_count;
+ int buf_start;
+ unsigned int max_cwperpage;
+
+ int reg_read_pos;
+
+ u32 cmd1, vld;
+ bool exec_opwrite;
+};
+
+/*
+ * This data type corresponds to the NAND controller properties which varies
+ * among different NAND controllers.
+ * @ecc_modes - ecc mode for NAND
+ * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
+ * @supports_bam - whether NAND controller is using BAM
+ * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
+ * @qpic_version2 - flag to indicate QPIC IP version 2
+ * @use_codeword_fixup - whether NAND has different layout for boot partitions
+ */
+struct qcom_nandc_props {
+ u32 ecc_modes;
+ u32 dev_cmd_reg_start;
+ bool supports_bam;
+ bool nandc_part_of_qpic;
+ bool qpic_version2;
+ bool use_codeword_fixup;
+};
+
+void qcom_free_bam_transaction(struct qcom_nand_controller *nandc);
+struct bam_transaction *qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc);
+void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc);
+void qcom_qpic_bam_dma_done(void *data);
+void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu);
+int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
+ struct dma_chan *chan, unsigned long flags);
+int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+ int reg_off, const void *vaddr, int size, unsigned int flags);
+int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
+ const void *vaddr, int size, unsigned int flags);
+int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read, int reg_off,
+ const void *vaddr, int size, bool flow_control);
+int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first, int num_regs,
+ unsigned int flags);
+int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr, int first,
+ int num_regs, unsigned int flags);
+int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
+ int size, unsigned int flags);
+int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
+ int size, unsigned int flags);
+int qcom_submit_descs(struct qcom_nand_controller *nandc);
+void qcom_clear_read_regs(struct qcom_nand_controller *nandc);
+void qcom_nandc_unalloc(struct qcom_nand_controller *nandc);
+int qcom_nandc_alloc(struct qcom_nand_controller *nandc);
+#endif
+
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 702e5fb13dae..0da8a1c7740e 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -62,18 +62,38 @@
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPINAND_PAGE_READ_FROM_CACHE_OP(fast, addr, ndummy, buf, len) \
- SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \
+#define SPINAND_PAGE_READ_FROM_CACHE_OP(addr, ndummy, buf, len, ...) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x03, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 1), \
+ __VA_OPT__(SPI_MEM_OP_MAX_FREQ(__VA_ARGS__)))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x0b, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 1))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_OP_3A(addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x03, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 1))
-#define SPINAND_PAGE_READ_FROM_CACHE_OP_3A(fast, addr, ndummy, buf, len) \
- SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \
+#define SPINAND_PAGE_READ_FROM_CACHE_FAST_OP_3A(addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x0b, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 1))
+#define SPINAND_PAGE_READ_FROM_CACHE_DTR_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x0d, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 1), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
@@ -86,6 +106,13 @@
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 2))
+#define SPINAND_PAGE_READ_FROM_CACHE_X2_DTR_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3d, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 1), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
@@ -98,6 +125,13 @@
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 4))
+#define SPINAND_PAGE_READ_FROM_CACHE_X4_DTR_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6d, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 1), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
SPI_MEM_OP_ADDR(2, addr, 2), \
@@ -110,6 +144,13 @@
SPI_MEM_OP_DUMMY(ndummy, 2), \
SPI_MEM_OP_DATA_IN(len, buf, 2))
+#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_DTR_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xbd, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 2), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 2), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
SPI_MEM_OP_ADDR(2, addr, 4), \
@@ -122,6 +163,13 @@
SPI_MEM_OP_DUMMY(ndummy, 4), \
SPI_MEM_OP_DATA_IN(len, buf, 4))
+#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_DTR_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xed, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 4), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 4), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
#define SPINAND_PROG_EXEC_OP(addr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
@@ -268,6 +316,7 @@ extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
extern const struct spinand_manufacturer macronix_spinand_manufacturer;
extern const struct spinand_manufacturer micron_spinand_manufacturer;
extern const struct spinand_manufacturer paragon_spinand_manufacturer;
+extern const struct spinand_manufacturer skyhigh_spinand_manufacturer;
extern const struct spinand_manufacturer toshiba_spinand_manufacturer;
extern const struct spinand_manufacturer winbond_spinand_manufacturer;
extern const struct spinand_manufacturer xtx_spinand_manufacturer;
@@ -314,6 +363,7 @@ struct spinand_ecc_info {
#define SPINAND_HAS_CR_FEAT_BIT BIT(1)
#define SPINAND_HAS_PROG_PLANE_SELECT_BIT BIT(2)
#define SPINAND_HAS_READ_PLANE_SELECT_BIT BIT(3)
+#define SPINAND_NO_RAW_ACCESS BIT(4)
/**
* struct spinand_ondie_ecc_conf - private SPI-NAND on-die ECC engine structure
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 8da4c61f97b9..03bb584c62cf 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1085,8 +1085,8 @@ struct netdev_net_notifier {
*
* int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
* Old-style ioctl entry point. This is used internally by the
- * appletalk and ieee802154 subsystems but is no longer called by
- * the device ioctl handler.
+ * ieee802154 subsystem but is no longer called by the device
+ * ioctl handler.
*
* int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd);
* Used by the bonding driver for its device specific ioctls:
@@ -2904,9 +2904,9 @@ struct pcpu_sw_netstats {
struct pcpu_dstats {
u64_stats_t rx_packets;
u64_stats_t rx_bytes;
- u64_stats_t rx_drops;
u64_stats_t tx_packets;
u64_stats_t tx_bytes;
+ u64_stats_t rx_drops;
u64_stats_t tx_drops;
struct u64_stats_sync syncp;
} __aligned(8 * sizeof(u64));
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 8d7430d9f218..71fbebfa43c7 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -17,6 +17,7 @@
#include <linux/uidgid.h>
#include <uapi/linux/nfs4.h>
#include <linux/sunrpc/msg_prot.h>
+#include <linux/sunrpc/xdrgen/nfs4_1.h>
enum nfs4_acl_whotype {
NFS4_ACL_WHO_NAMED = 0,
@@ -365,7 +366,7 @@ enum limit_by4 {
NFS4_LIMIT_BLOCKS = 2
};
-enum open_delegation_type4 {
+enum nfs4_open_delegation_type4 {
NFS4_OPEN_DELEGATE_NONE = 0,
NFS4_OPEN_DELEGATE_READ = 1,
NFS4_OPEN_DELEGATE_WRITE = 2,
@@ -512,12 +513,6 @@ enum {
FATTR4_XATTR_SUPPORT = 82,
};
-enum {
- FATTR4_TIME_DELEG_ACCESS = 84,
- FATTR4_TIME_DELEG_MODIFY = 85,
- FATTR4_OPEN_ARGUMENTS = 86,
-};
-
/*
* The following internal definitions enable processing the above
* attribute bits within 32-bit word boundaries.
diff --git a/include/linux/nfs_common.h b/include/linux/nfs_common.h
index 5fc02df88252..a541c3a02887 100644
--- a/include/linux/nfs_common.h
+++ b/include/linux/nfs_common.h
@@ -9,9 +9,10 @@
#include <uapi/linux/nfs.h>
/* Mapping from NFS error code to "errno" error code. */
-#define errno_NFSERR_IO EIO
int nfs_stat_to_errno(enum nfs_stat status);
int nfs4_stat_to_errno(int stat);
+__u32 nfs_localio_errno_to_nfs4_stat(int errno);
+
#endif /* _LINUX_NFS_COMMON_H */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 039898d70954..67ae2c3f41d2 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -77,6 +77,23 @@ struct nfs_lock_context {
struct rcu_head rcu_head;
};
+struct nfs_file_localio {
+ struct nfsd_file __rcu *ro_file;
+ struct nfsd_file __rcu *rw_file;
+ struct list_head list;
+ void __rcu *nfs_uuid; /* opaque pointer to 'nfs_uuid_t' */
+};
+
+static inline void nfs_localio_file_init(struct nfs_file_localio *nfl)
+{
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ nfl->ro_file = NULL;
+ nfl->rw_file = NULL;
+ INIT_LIST_HEAD(&nfl->list);
+ nfl->nfs_uuid = NULL;
+#endif
+}
+
struct nfs4_state;
struct nfs_open_context {
struct nfs_lock_context lock_context;
@@ -87,15 +104,16 @@ struct nfs_open_context {
struct nfs4_state *state;
fmode_t mode;
+ int error;
unsigned long flags;
#define NFS_CONTEXT_BAD (2)
#define NFS_CONTEXT_UNLOCK (3)
#define NFS_CONTEXT_FILE_OPEN (4)
- int error;
- struct list_head list;
struct nfs4_threshold *mdsthreshold;
+ struct list_head list;
struct rcu_head rcu_head;
+ struct nfs_file_localio nfl;
};
struct nfs_open_dir_context {
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index b804346a9741..f00bfcee7120 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -50,7 +50,6 @@ struct nfs_client {
#define NFS_CS_DS 7 /* - Server is a DS */
#define NFS_CS_REUSEPORT 8 /* - reuse src port on reconnect */
#define NFS_CS_PNFS 9 /* - Server used for pnfs */
-#define NFS_CS_LOCAL_IO 10 /* - client is local */
struct sockaddr_storage cl_addr; /* server identifier */
size_t cl_addrlen;
char * cl_hostname; /* hostname of server */
@@ -132,7 +131,7 @@ struct nfs_client {
struct timespec64 cl_nfssvc_boot;
seqlock_t cl_boot_lock;
nfs_uuid_t cl_uuid;
- spinlock_t cl_localio_lock;
+ struct work_struct cl_local_probe_work;
#endif /* CONFIG_NFS_LOCALIO */
};
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 559273a0f16d..9155a6ffc370 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1315,11 +1315,6 @@ struct nfs4_fsid_present_res {
#endif /* CONFIG_NFS_V4 */
-struct nfstime4 {
- u64 seconds;
- u32 nseconds;
-};
-
#ifdef CONFIG_NFS_V4_1
struct pnfs_commit_bucket {
@@ -1637,6 +1632,7 @@ enum {
NFS_IOHDR_RESEND_PNFS,
NFS_IOHDR_RESEND_MDS,
NFS_IOHDR_UNSTABLE_WRITES,
+ NFS_IOHDR_ODIRECT,
};
struct nfs_io_completion;
@@ -1785,7 +1781,7 @@ struct nfs_rpc_ops {
struct nfs_fattr *, struct inode *);
int (*setattr) (struct dentry *, struct nfs_fattr *,
struct iattr *);
- int (*lookup) (struct inode *, struct dentry *,
+ int (*lookup) (struct inode *, struct dentry *, const struct qstr *,
struct nfs_fh *, struct nfs_fattr *);
int (*lookupp) (struct inode *, struct nfs_fh *,
struct nfs_fattr *);
diff --git a/include/linux/nfslocalio.h b/include/linux/nfslocalio.h
index 9202f4b24343..9aa8a43843d7 100644
--- a/include/linux/nfslocalio.h
+++ b/include/linux/nfslocalio.h
@@ -6,9 +6,6 @@
#ifndef __LINUX_NFSLOCALIO_H
#define __LINUX_NFSLOCALIO_H
-/* nfsd_file structure is purposely kept opaque to NFS client */
-struct nfsd_file;
-
#if IS_ENABLED(CONFIG_NFS_LOCALIO)
#include <linux/module.h>
@@ -19,6 +16,9 @@ struct nfsd_file;
#include <linux/nfs.h>
#include <net/net_namespace.h>
+struct nfs_client;
+struct nfs_file_localio;
+
/*
* Useful to allow a client to negotiate if localio
* possible with its server.
@@ -27,28 +27,38 @@ struct nfsd_file;
*/
typedef struct {
uuid_t uuid;
+ unsigned nfs3_localio_probe_count;
+ /* this struct is over a cacheline, avoid bouncing */
+ spinlock_t ____cacheline_aligned lock;
struct list_head list;
+ spinlock_t *list_lock; /* nn->local_clients_lock */
struct net __rcu *net; /* nfsd's network namespace */
struct auth_domain *dom; /* auth_domain for localio */
+ /* Local files to close when net is shut down or exports change */
+ struct list_head files;
} nfs_uuid_t;
void nfs_uuid_init(nfs_uuid_t *);
bool nfs_uuid_begin(nfs_uuid_t *);
void nfs_uuid_end(nfs_uuid_t *);
-void nfs_uuid_is_local(const uuid_t *, struct list_head *,
+void nfs_uuid_is_local(const uuid_t *, struct list_head *, spinlock_t *,
struct net *, struct auth_domain *, struct module *);
-void nfs_uuid_invalidate_clients(struct list_head *list);
-void nfs_uuid_invalidate_one_client(nfs_uuid_t *nfs_uuid);
+
+void nfs_localio_enable_client(struct nfs_client *clp);
+void nfs_localio_disable_client(struct nfs_client *clp);
+void nfs_localio_invalidate_clients(struct list_head *nn_local_clients,
+ spinlock_t *nn_local_clients_lock);
/* localio needs to map filehandle -> struct nfsd_file */
extern struct nfsd_file *
nfsd_open_local_fh(struct net *, struct auth_domain *, struct rpc_clnt *,
const struct cred *, const struct nfs_fh *,
const fmode_t) __must_hold(rcu);
+void nfs_close_local_fh(struct nfs_file_localio *);
struct nfsd_localio_operations {
- bool (*nfsd_serv_try_get)(struct net *);
- void (*nfsd_serv_put)(struct net *);
+ bool (*nfsd_net_try_get)(struct net *);
+ void (*nfsd_net_put)(struct net *);
struct nfsd_file *(*nfsd_open_local_fh)(struct net *,
struct auth_domain *,
struct rpc_clnt *,
@@ -56,6 +66,8 @@ struct nfsd_localio_operations {
const struct nfs_fh *,
const fmode_t);
struct net *(*nfsd_file_put_local)(struct nfsd_file *);
+ struct nfsd_file *(*nfsd_file_get)(struct nfsd_file *);
+ void (*nfsd_file_put)(struct nfsd_file *);
struct file *(*nfsd_file_file)(struct nfsd_file *);
} ____cacheline_aligned;
@@ -64,17 +76,18 @@ extern const struct nfsd_localio_operations *nfs_to;
struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *,
struct rpc_clnt *, const struct cred *,
- const struct nfs_fh *, const fmode_t);
+ const struct nfs_fh *, struct nfs_file_localio *,
+ const fmode_t);
static inline void nfs_to_nfsd_net_put(struct net *net)
{
/*
- * Once reference to nfsd_serv is dropped, NFSD could be
- * unloaded, so ensure safe return from nfsd_file_put_local()
- * by always taking RCU.
+ * Once reference to net (and associated nfsd_serv) is dropped, NFSD
+ * could be unloaded, so ensure safe return from nfsd_net_put() by
+ * always taking RCU.
*/
rcu_read_lock();
- nfs_to->nfsd_serv_put(net);
+ nfs_to->nfsd_net_put(net);
rcu_read_unlock();
}
@@ -91,12 +104,19 @@ static inline void nfs_to_nfsd_file_put_local(struct nfsd_file *localio)
}
#else /* CONFIG_NFS_LOCALIO */
+
+struct nfs_file_localio;
+static inline void nfs_close_local_fh(struct nfs_file_localio *nfl)
+{
+}
static inline void nfsd_localio_ops_init(void)
{
}
-static inline void nfs_to_nfsd_file_put_local(struct nfsd_file *localio)
+struct nfs_client;
+static inline void nfs_localio_disable_client(struct nfs_client *clp)
{
}
+
#endif /* CONFIG_NFS_LOCALIO */
#endif /* __LINUX_NFSLOCALIO_H */
diff --git a/include/linux/numa_memblks.h b/include/linux/numa_memblks.h
index cfad6ce7e1bd..dd85613cdd86 100644
--- a/include/linux/numa_memblks.h
+++ b/include/linux/numa_memblks.h
@@ -29,7 +29,10 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi);
int __init numa_memblks_init(int (*init_func)(void),
bool memblock_force_top_down);
+extern int numa_distance_cnt;
+
#ifdef CONFIG_NUMA_EMU
+extern int emu_nid_to_phys[MAX_NUMNODES];
int numa_emu_cmdline(char *str);
void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys,
unsigned int nr_emu_nids);
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index 3ebeaa0ded00..515676ebe598 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -92,8 +92,8 @@ struct nvmem_cell_info {
* @read_only: Device is read-only.
* @root_only: Device is accessibly to root only.
* @of_node: If given, this will be used instead of the parent's of_node.
- * @reg_read: Callback to read data.
- * @reg_write: Callback to write data.
+ * @reg_read: Callback to read data; return zero if successful.
+ * @reg_write: Callback to write data; return zero if successful.
* @size: Device size.
* @word_size: Minimum read/write access granularity.
* @stride: Minimum read/write access stride.
diff --git a/include/linux/of.h b/include/linux/of.h
index f921786cb8ac..eaf0e2a2b75c 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -311,6 +311,7 @@ extern struct device_node *of_find_node_with_property(
extern struct property *of_find_property(const struct device_node *np,
const char *name,
int *lenp);
+extern bool of_property_read_bool(const struct device_node *np, const char *propname);
extern int of_property_count_elems_of_size(const struct device_node *np,
const char *propname, int elem_size);
extern int of_property_read_u32_index(const struct device_node *np,
@@ -397,7 +398,6 @@ extern int of_phandle_iterator_args(struct of_phandle_iterator *it,
uint32_t *args,
int size);
-extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align));
extern int of_alias_get_id(const struct device_node *np, const char *stem);
extern int of_alias_get_highest_id(const char *stem);
@@ -615,6 +615,12 @@ static inline struct device_node *of_find_compatible_node(
return NULL;
}
+static inline bool of_property_read_bool(const struct device_node *np,
+ const char *propname)
+{
+ return false;
+}
+
static inline int of_property_count_elems_of_size(const struct device_node *np,
const char *propname, int elem_size)
{
@@ -1243,24 +1249,6 @@ static inline int of_property_read_string_index(const struct device_node *np,
}
/**
- * of_property_read_bool - Find a property
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- *
- * Search for a boolean property in a device node. Usage on non-boolean
- * property types is deprecated.
- *
- * Return: true if the property exists false otherwise.
- */
-static inline bool of_property_read_bool(const struct device_node *np,
- const char *propname)
-{
- const struct property *prop = of_find_property(np, propname, NULL);
-
- return prop ? true : false;
-}
-
-/**
* of_property_present - Test if a property is present in a node
* @np: device node to search for the property.
* @propname: name of the property to be searched.
@@ -1271,7 +1259,9 @@ static inline bool of_property_read_bool(const struct device_node *np,
*/
static inline bool of_property_present(const struct device_node *np, const char *propname)
{
- return of_property_read_bool(np, propname);
+ struct property *prop = of_find_property(np, propname, NULL);
+
+ return prop ? true : false;
}
/**
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index 9e034363788a..0cff90365391 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -26,6 +26,7 @@ struct of_pci_range {
u64 bus_addr;
};
u64 cpu_addr;
+ u64 parent_bus_addr;
u64 size;
u32 flags;
};
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index a2ff1ad48f7f..17471ef8e092 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -47,8 +47,6 @@ struct of_dev_auxdata {
{ .compatible = _compat, .phys_addr = _phys, .name = _name, \
.platform_data = _pdata }
-extern const struct of_device_id of_default_bus_match_table[];
-
/* Platform drivers register/unregister */
extern struct platform_device *of_device_alloc(struct device_node *np,
const char *bus_id,
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index c9e3843d2dd5..263b915df1fb 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -66,10 +66,6 @@ extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
struct device_node;
-extern int gpmc_get_client_irq(unsigned irq_config);
-
-extern unsigned int gpmc_ticks_to_ns(unsigned int ticks);
-
extern void gpmc_cs_write_reg(int cs, int idx, u32 val);
extern int gpmc_calc_divider(unsigned int sync_clk);
extern int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t,
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 691506bdf2c5..36d283552f80 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -110,6 +110,7 @@ enum pageflags {
PG_reclaim, /* To be reclaimed asap */
PG_swapbacked, /* Page is backed by RAM/swap */
PG_unevictable, /* Page is "unevictable" */
+ PG_dropbehind, /* drop pages on IO completion */
#ifdef CONFIG_MMU
PG_mlocked, /* Page is vma mlocked */
#endif
@@ -562,6 +563,10 @@ PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
FOLIO_FLAG(readahead, FOLIO_HEAD_PAGE)
FOLIO_TEST_CLEAR_FLAG(readahead, FOLIO_HEAD_PAGE)
+FOLIO_FLAG(dropbehind, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_CLEAR_FLAG(dropbehind, FOLIO_HEAD_PAGE)
+ __FOLIO_SET_FLAG(dropbehind, FOLIO_HEAD_PAGE)
+
#ifdef CONFIG_HIGHMEM
/*
* Must use a macro here due to header dependency issues. page_zone() is not
@@ -894,21 +899,9 @@ static inline int PageTransCompound(const struct page *page)
{
return PageCompound(page);
}
-
-/*
- * PageTransTail returns true for both transparent huge pages
- * and hugetlbfs pages, so it should only be called when it's known
- * that hugetlbfs pages aren't involved.
- */
-static inline int PageTransTail(const struct page *page)
-{
- return PageTail(page);
-}
#else
TESTPAGEFLAG_FALSE(TransHuge, transhuge)
TESTPAGEFLAG_FALSE(TransCompound, transcompound)
-TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap)
-TESTPAGEFLAG_FALSE(TransTail, transtail)
#endif
#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
@@ -918,11 +911,9 @@ TESTPAGEFLAG_FALSE(TransTail, transtail)
*
* This flag is set by hwpoison handler. Cleared by THP split or free page.
*/
-PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
- TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
+FOLIO_FLAG(has_hwpoisoned, FOLIO_SECOND_PAGE)
#else
-PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
- TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
+FOLIO_FLAG_FALSE(has_hwpoisoned)
#endif
/*
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 73dc2c1841ec..898bb788243b 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -31,7 +31,7 @@ bool move_freepages_block_isolate(struct zone *zone, struct page *page,
int migratetype);
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- int migratetype, int flags, gfp_t gfp_flags);
+ int migratetype, int flags);
void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
int migratetype);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index bcf0865a38ae..47bfc6b1b632 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -710,6 +710,7 @@ pgoff_t page_cache_prev_miss(struct address_space *mapping,
* * %FGP_NOFS - __GFP_FS will get cleared in gfp.
* * %FGP_NOWAIT - Don't block on the folio lock.
* * %FGP_STABLE - Wait for the folio to be stable (finished writeback)
+ * * %FGP_DONTCACHE - Uncached buffered IO
* * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin()
* implementation.
*/
@@ -723,10 +724,21 @@ typedef unsigned int __bitwise fgf_t;
#define FGP_NOWAIT ((__force fgf_t)0x00000020)
#define FGP_FOR_MMAP ((__force fgf_t)0x00000040)
#define FGP_STABLE ((__force fgf_t)0x00000080)
+#define FGP_DONTCACHE ((__force fgf_t)0x00000100)
#define FGF_GET_ORDER(fgf) (((__force unsigned)fgf) >> 26) /* top 6 bits */
#define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE)
+static inline unsigned int filemap_get_order(size_t size)
+{
+ unsigned int shift = ilog2(size);
+
+ if (shift <= PAGE_SHIFT)
+ return 0;
+
+ return shift - PAGE_SHIFT;
+}
+
/**
* fgf_set_order - Encode a length in the fgf_t flags.
* @size: The suggested size of the folio to create.
@@ -740,11 +752,11 @@ typedef unsigned int __bitwise fgf_t;
*/
static inline fgf_t fgf_set_order(size_t size)
{
- unsigned int shift = ilog2(size);
+ unsigned int order = filemap_get_order(size);
- if (shift <= PAGE_SHIFT)
+ if (!order)
return 0;
- return (__force fgf_t)((shift - PAGE_SHIFT) << 26);
+ return (__force fgf_t)(order << 26);
}
void *filemap_get_entry(struct address_space *mapping, pgoff_t index);
@@ -1271,11 +1283,6 @@ void folio_wait_private_2(struct folio *folio);
int folio_wait_private_2_killable(struct folio *folio);
/*
- * Add an arbitrary waiter to a page's wait queue
- */
-void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter);
-
-/*
* Fault in userspace address range.
*/
size_t fault_in_writeable(char __user *uaddr, size_t size);
@@ -1353,6 +1360,7 @@ struct readahead_control {
pgoff_t _index;
unsigned int _nr_pages;
unsigned int _batch_count;
+ bool dropbehind;
bool _workingset;
unsigned long _pflags;
};
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index 3a4860bd2758..3a10f8cfc3ad 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -45,6 +45,10 @@ struct pci_ecam_ops {
unsigned int bus_shift;
struct pci_ops pci_ops;
int (*init)(struct pci_config_window *);
+ int (*enable_device)(struct pci_host_bridge *,
+ struct pci_dev *);
+ void (*disable_device)(struct pci_host_bridge *,
+ struct pci_dev *);
};
/*
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index 18a3aeb62ae4..ee6156bcbbd0 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -157,7 +157,7 @@ struct pci_epf {
struct device dev;
const char *name;
struct pci_epf_header *header;
- struct pci_epf_bar bar[6];
+ struct pci_epf_bar bar[PCI_STD_NUM_BARS];
u8 msi_interrupts;
u16 msix_interrupts;
u8 func_no;
@@ -174,7 +174,7 @@ struct pci_epf {
/* Below members are to attach secondary EPC to an endpoint function */
struct pci_epc *sec_epc;
struct list_head sec_epc_list;
- struct pci_epf_bar sec_epc_bar[6];
+ struct pci_epf_bar sec_epc_bar[PCI_STD_NUM_BARS];
u8 sec_epc_func_no;
struct config_group *group;
unsigned int is_bound;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 414ee5fff66b..47b31ad724fa 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -407,7 +407,7 @@ struct pci_dev {
supported from root to here */
#endif
unsigned int pasid_no_tlp:1; /* PASID works without TLP Prefix */
- unsigned int eetlp_prefix_path:1; /* End-to-End TLP Prefix */
+ unsigned int eetlp_prefix_max:3; /* Max # of End-End TLP Prefixes, 0=not supported */
pci_channel_state_t error_state; /* Current connectivity state */
struct device dev; /* Generic device interface */
@@ -595,6 +595,8 @@ struct pci_host_bridge {
u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
int (*map_irq)(const struct pci_dev *, u8, u8);
void (*release_fn)(struct pci_host_bridge *);
+ int (*enable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev);
+ void (*disable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev);
void *release_data;
unsigned int ignore_reset_delay:1; /* For entire hierarchy */
unsigned int no_ext_tags:1; /* No Extended Tags */
@@ -2311,6 +2313,7 @@ static inline void pci_fixup_device(enum pci_fixup_pass pass,
struct pci_dev *dev) { }
#endif
+int pcim_intx(struct pci_dev *pdev, int enabled);
int pcim_request_all_regions(struct pci_dev *pdev, const char *name);
void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index d2402bf4aea2..de5deb1a0118 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2593,6 +2593,11 @@
#define PCI_VENDOR_ID_REDHAT 0x1b36
+#define PCI_VENDOR_ID_WCHIC 0x1c00
+#define PCI_DEVICE_ID_WCHIC_CH382_0S1P 0x3050
+#define PCI_DEVICE_ID_WCHIC_CH382_2S1P 0x3250
+#define PCI_DEVICE_ID_WCHIC_CH382_2S 0x3253
+
#define PCI_VENDOR_ID_SILICOM_DENMARK 0x1c2c
#define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36
@@ -2647,6 +2652,12 @@
#define PCI_VENDOR_ID_AKS 0x416c
#define PCI_DEVICE_ID_AKS_ALADDINCARD 0x0100
+#define PCI_VENDOR_ID_WCHCN 0x4348
+#define PCI_DEVICE_ID_WCHCN_CH353_4S 0x3453
+#define PCI_DEVICE_ID_WCHCN_CH353_2S1PF 0x5046
+#define PCI_DEVICE_ID_WCHCN_CH353_1S1P 0x5053
+#define PCI_DEVICE_ID_WCHCN_CH353_2S1P 0x7053
+
#define PCI_VENDOR_ID_ACCESSIO 0x494f
#define PCI_DEVICE_ID_ACCESSIO_WDG_CSM 0x22c0
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index adef9d6e9b1b..94d267d02372 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -533,7 +533,14 @@ static inline void clear_young_dirty_ptes(struct vm_area_struct *vma,
static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- ptep_get_and_clear(mm, addr, ptep);
+ pte_t pte = ptep_get(ptep);
+
+ pte_clear(mm, addr, ptep);
+ /*
+ * No need for ptep_get_and_clear(): page table check doesn't care about
+ * any bits that could have been set by HW concurrently.
+ */
+ page_table_check_pte_clear(mm, pte);
}
#ifdef CONFIG_GUP_GET_PXX_LOW_HIGH
diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h
index b3c4993e656e..ecf290a0c98f 100644
--- a/include/linux/platform_data/cros_ec_commands.h
+++ b/include/linux/platform_data/cros_ec_commands.h
@@ -5044,8 +5044,11 @@ struct ec_response_pd_status {
#define PD_EVENT_POWER_CHANGE BIT(1)
#define PD_EVENT_IDENTITY_RECEIVED BIT(2)
#define PD_EVENT_DATA_SWAP BIT(3)
+#define PD_EVENT_TYPEC BIT(4)
+#define PD_EVENT_PPM BIT(5)
+
struct ec_response_host_event_status {
- uint32_t status; /* PD MCU host event status */
+ uint32_t status; /* PD MCU host event status */
} __ec_align4;
/* Set USB type-C port role and muxes */
@@ -6105,6 +6108,29 @@ struct ec_response_typec_vdm_response {
#undef VDO_MAX_SIZE
+/*
+ * UCSI OPM-PPM commands
+ *
+ * These commands are used for communication between OPM and PPM.
+ * Only UCSI3.0 is tested.
+ */
+
+#define EC_CMD_UCSI_PPM_SET 0x0140
+
+/* The data size is stored in the host command protocol header. */
+struct ec_params_ucsi_ppm_set {
+ uint16_t offset;
+ uint8_t data[];
+} __ec_align2;
+
+#define EC_CMD_UCSI_PPM_GET 0x0141
+
+/* For 'GET' sub-commands, data will be returned as a raw payload. */
+struct ec_params_ucsi_ppm_get {
+ uint16_t offset;
+ uint8_t size;
+} __ec_align2;
+
/*****************************************************************************/
/* The command range 0x200-0x2FF is reserved for Rotor. */
diff --git a/include/linux/platform_data/keyscan-davinci.h b/include/linux/platform_data/keyscan-davinci.h
deleted file mode 100644
index 260d596ba0af..000000000000
--- a/include/linux/platform_data/keyscan-davinci.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2009 Texas Instruments, Inc
- *
- * Author: Miguel Aguilar <miguel.aguilar@ridgerun.com>
- */
-
-#ifndef DAVINCI_KEYSCAN_H
-#define DAVINCI_KEYSCAN_H
-
-#include <linux/io.h>
-
-enum davinci_matrix_types {
- DAVINCI_KEYSCAN_MATRIX_4X4,
- DAVINCI_KEYSCAN_MATRIX_5X3,
-};
-
-struct davinci_ks_platform_data {
- int (*device_enable)(struct device *dev);
- unsigned short *keymap;
- u32 keymapsize;
- u8 rep:1;
- u8 strobe;
- u8 interval;
- u8 matrix_type;
-};
-
-#endif
-
diff --git a/include/linux/platform_profile.h b/include/linux/platform_profile.h
index f5492ed413f3..8ab5b0e8eb2c 100644
--- a/include/linux/platform_profile.h
+++ b/include/linux/platform_profile.h
@@ -9,6 +9,7 @@
#ifndef _PLATFORM_PROFILE_H_
#define _PLATFORM_PROFILE_H_
+#include <linux/device.h>
#include <linux/bitops.h>
/*
@@ -23,20 +24,34 @@ enum platform_profile_option {
PLATFORM_PROFILE_BALANCED,
PLATFORM_PROFILE_BALANCED_PERFORMANCE,
PLATFORM_PROFILE_PERFORMANCE,
+ PLATFORM_PROFILE_CUSTOM,
PLATFORM_PROFILE_LAST, /*must always be last */
};
-struct platform_profile_handler {
- unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
- int (*profile_get)(struct platform_profile_handler *pprof,
- enum platform_profile_option *profile);
- int (*profile_set)(struct platform_profile_handler *pprof,
- enum platform_profile_option profile);
+/**
+ * struct platform_profile_ops - platform profile operations
+ * @probe: Callback to setup choices available to the new class device. These
+ * choices will only be enforced when setting a new profile, not when
+ * getting the current one.
+ * @profile_get: Callback that will be called when showing the current platform
+ * profile in sysfs.
+ * @profile_set: Callback that will be called when storing a new platform
+ * profile in sysfs.
+ */
+struct platform_profile_ops {
+ int (*probe)(void *drvdata, unsigned long *choices);
+ int (*profile_get)(struct device *dev, enum platform_profile_option *profile);
+ int (*profile_set)(struct device *dev, enum platform_profile_option profile);
};
-int platform_profile_register(struct platform_profile_handler *pprof);
-int platform_profile_remove(void);
+struct device *platform_profile_register(struct device *dev, const char *name,
+ void *drvdata,
+ const struct platform_profile_ops *ops);
+int platform_profile_remove(struct device *dev);
+struct device *devm_platform_profile_register(struct device *dev, const char *name,
+ void *drvdata,
+ const struct platform_profile_ops *ops);
int platform_profile_cycle(void);
-void platform_profile_notify(void);
+void platform_profile_notify(struct device *dev);
#endif /*_PLATFORM_PROFILE_H_*/
diff --git a/include/linux/pm.h b/include/linux/pm.h
index e7f0260f15ad..78855d794342 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -384,12 +384,8 @@ const struct dev_pm_ops name = { \
#ifdef CONFIG_PM
#define _EXPORT_DEV_PM_OPS(name, license, ns) _EXPORT_PM_OPS(name, license, ns)
-#define EXPORT_PM_FN_GPL(name) EXPORT_SYMBOL_GPL(name)
-#define EXPORT_PM_FN_NS_GPL(name, ns) EXPORT_SYMBOL_NS_GPL(name, "ns")
#else
#define _EXPORT_DEV_PM_OPS(name, license, ns) _DISCARD_PM_OPS(name, license, ns)
-#define EXPORT_PM_FN_GPL(name)
-#define EXPORT_PM_FN_NS_GPL(name, ns)
#endif
#ifdef CONFIG_PM_SLEEP
@@ -570,7 +566,8 @@ const struct dev_pm_ops name = { \
{ .event = PM_EVENT_AUTO_RESUME, })
#define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0)
-
+#define PMSG_NO_WAKEUP(msg) (((msg).event & \
+ (PM_EVENT_FREEZE | PM_EVENT_QUIESCE)) != 0)
/*
* Device run-time power management status.
*
@@ -683,6 +680,7 @@ struct dev_pm_info {
bool no_pm_callbacks:1; /* Owned by the PM core */
bool async_in_progress:1; /* Owned by the PM core */
bool must_resume:1; /* Owned by the PM core */
+ bool set_active:1; /* Owned by the PM core */
bool may_skip_resume:1; /* Set by subsystems */
#else
bool should_wakeup:1;
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 45646bfcaf1a..1aab31370065 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -147,6 +147,7 @@ struct genpd_governor_data {
};
struct genpd_power_state {
+ const char *name;
s64 power_off_latency_ns;
s64 power_on_latency_ns;
s64 residency_ns;
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index 5180dc9f1706..6b190639b08e 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -61,6 +61,7 @@ struct bq27xxx_device_info {
struct bq27xxx_access_methods bus;
struct bq27xxx_reg_cache cache;
int charge_design_full;
+ int voltage_min_design;
bool removed;
unsigned long last_update;
union power_supply_propval last_status;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index b98106e1a90f..6ed53b292162 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -15,6 +15,8 @@
#include <linux/device.h>
#include <linux/workqueue.h>
#include <linux/leds.h>
+#include <linux/rwsem.h>
+#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/notifier.h>
@@ -40,7 +42,7 @@ enum {
};
/* What algorithm is the charger using? */
-enum {
+enum power_supply_charge_type {
POWER_SUPPLY_CHARGE_TYPE_UNKNOWN = 0,
POWER_SUPPLY_CHARGE_TYPE_NONE,
POWER_SUPPLY_CHARGE_TYPE_TRICKLE, /* slow speed */
@@ -58,6 +60,7 @@ enum {
POWER_SUPPLY_HEALTH_OVERHEAT,
POWER_SUPPLY_HEALTH_DEAD,
POWER_SUPPLY_HEALTH_OVERVOLTAGE,
+ POWER_SUPPLY_HEALTH_UNDERVOLTAGE,
POWER_SUPPLY_HEALTH_UNSPEC_FAILURE,
POWER_SUPPLY_HEALTH_COLD,
POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE,
@@ -99,6 +102,7 @@ enum power_supply_property {
/* Properties of type `int' */
POWER_SUPPLY_PROP_STATUS = 0,
POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_CHARGE_TYPES,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_ONLINE,
@@ -245,6 +249,7 @@ struct power_supply_desc {
const char *name;
enum power_supply_type type;
u8 charge_behaviours;
+ u32 charge_types;
u32 usb_types;
const enum power_supply_property *properties;
size_t num_properties;
@@ -281,6 +286,28 @@ struct power_supply_desc {
int use_for_apm;
};
+struct power_supply_ext {
+ const char *const name;
+ u8 charge_behaviours;
+ const enum power_supply_property *properties;
+ size_t num_properties;
+
+ int (*get_property)(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *data,
+ enum power_supply_property psp,
+ union power_supply_propval *val);
+ int (*set_property)(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *data,
+ enum power_supply_property psp,
+ const union power_supply_propval *val);
+ int (*property_is_writeable)(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *data,
+ enum power_supply_property psp);
+};
+
struct power_supply {
const struct power_supply_desc *desc;
@@ -300,10 +327,13 @@ struct power_supply {
struct delayed_work deferred_register_work;
spinlock_t changed_lock;
bool changed;
+ bool update_groups;
bool initialized;
bool removing;
atomic_t use_cnt;
struct power_supply_battery_info *battery_info;
+ struct rw_semaphore extensions_sem; /* protects "extensions" */
+ struct list_head extensions;
#ifdef CONFIG_THERMAL
struct thermal_zone_device *tzd;
struct thermal_cooling_device *tcd;
@@ -318,6 +348,8 @@ struct power_supply {
#endif
};
+#define dev_to_psy(__dev) container_of_const(__dev, struct power_supply, dev)
+
/*
* This is recommended structure to specify static power supply parameters.
* Generic one, parametrizable for different power supplies. Power supply
@@ -878,10 +910,18 @@ devm_power_supply_register(struct device *parent,
extern void power_supply_unregister(struct power_supply *psy);
extern int power_supply_powers(struct power_supply *psy, struct device *dev);
+extern int __must_check
+power_supply_register_extension(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ struct device *dev,
+ void *data);
+extern void power_supply_unregister_extension(struct power_supply *psy,
+ const struct power_supply_ext *ext);
+
#define to_power_supply(device) container_of(device, struct power_supply, dev)
extern void *power_supply_get_drvdata(struct power_supply *psy);
-extern int power_supply_for_each_device(void *data, int (*fn)(struct device *dev, void *data));
+extern int power_supply_for_each_psy(void *data, int (*fn)(struct power_supply *psy, void *data));
static inline bool power_supply_is_amp_property(enum power_supply_property psp)
{
@@ -944,6 +984,11 @@ ssize_t power_supply_charge_behaviour_show(struct device *dev,
char *buf);
int power_supply_charge_behaviour_parse(unsigned int available_behaviours, const char *buf);
+ssize_t power_supply_charge_types_show(struct device *dev,
+ unsigned int available_types,
+ enum power_supply_charge_type current_type,
+ char *buf);
+int power_supply_charge_types_parse(unsigned int available_types, const char *buf);
#else
static inline
ssize_t power_supply_charge_behaviour_show(struct device *dev,
@@ -959,6 +1004,20 @@ static inline int power_supply_charge_behaviour_parse(unsigned int available_beh
{
return -EOPNOTSUPP;
}
+
+static inline
+ssize_t power_supply_charge_types_show(struct device *dev,
+ unsigned int available_types,
+ enum power_supply_charge_type current_type,
+ char *buf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int power_supply_charge_types_parse(unsigned int available_types, const char *buf)
+{
+ return -EOPNOTSUPP;
+}
#endif
#endif /* __LINUX_POWER_SUPPLY_H__ */
diff --git a/include/linux/pps_gen_kernel.h b/include/linux/pps_gen_kernel.h
new file mode 100644
index 000000000000..022ea0ac4440
--- /dev/null
+++ b/include/linux/pps_gen_kernel.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * PPS generator API kernel header
+ *
+ * Copyright (C) 2024 Rodolfo Giometti <giometti@enneenne.com>
+ */
+
+#ifndef LINUX_PPS_GEN_KERNEL_H
+#define LINUX_PPS_GEN_KERNEL_H
+
+#include <linux/pps_gen.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+
+/*
+ * Global defines
+ */
+
+#define PPS_GEN_MAX_SOURCES 16 /* should be enough... */
+
+struct pps_gen_device;
+
+/**
+ * struct pps_gen_source_info - the specific PPS generator info
+ * @use_system_clock: true, if the system clock is used to generate pulses
+ * @get_time: query the time stored into the generator clock
+ * @enable: enable/disable the PPS pulses generation
+ *
+ * This is the main generator struct where all needed information must be
+ * placed before calling the pps_gen_register_source().
+ */
+struct pps_gen_source_info {
+ bool use_system_clock;
+
+ int (*get_time)(struct pps_gen_device *pps_gen,
+ struct timespec64 *time);
+ int (*enable)(struct pps_gen_device *pps_gen, bool enable);
+
+/* private: internal use only */
+ struct module *owner;
+ struct device *parent; /* for device_create */
+};
+
+/* The main struct */
+struct pps_gen_device {
+ struct pps_gen_source_info info; /* PSS generator info */
+ bool enabled; /* PSS generator status */
+
+ unsigned int event;
+ unsigned int sequence;
+
+ unsigned int last_ev; /* last PPS event id */
+ wait_queue_head_t queue; /* PPS event queue */
+
+ unsigned int id; /* PPS generator unique ID */
+ struct cdev cdev;
+ struct device *dev;
+ struct fasync_struct *async_queue; /* fasync method */
+ spinlock_t lock;
+};
+
+/*
+ * Global variables
+ */
+
+extern const struct attribute_group *pps_gen_groups[];
+
+/*
+ * Exported functions
+ */
+
+extern struct pps_gen_device *pps_gen_register_source(
+ struct pps_gen_source_info *info);
+extern void pps_gen_unregister_source(struct pps_gen_device *pps_gen);
+extern void pps_gen_event(struct pps_gen_device *pps_gen,
+ unsigned int event, void *data);
+
+#endif /* LINUX_PPS_GEN_KERNEL_H */
diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h
index 78c8ac4951b5..c7abce28ed29 100644
--- a/include/linux/pps_kernel.h
+++ b/include/linux/pps_kernel.h
@@ -56,8 +56,7 @@ struct pps_device {
unsigned int id; /* PPS source unique ID */
void const *lookup_cookie; /* For pps_lookup_dev() only */
- struct cdev cdev;
- struct device *dev;
+ struct device dev;
struct fasync_struct *async_queue; /* fasync method */
spinlock_t lock;
};
diff --git a/include/linux/property.h b/include/linux/property.h
index 61fc20e5f81f..e214ecd241eb 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -37,6 +37,7 @@ struct fwnode_handle *__dev_fwnode(struct device *dev);
struct device *: __dev_fwnode)(dev)
bool device_property_present(const struct device *dev, const char *propname);
+bool device_property_read_bool(const struct device *dev, const char *propname);
int device_property_read_u8_array(const struct device *dev, const char *propname,
u8 *val, size_t nval);
int device_property_read_u16_array(const struct device *dev, const char *propname,
@@ -54,6 +55,8 @@ int device_property_match_string(const struct device *dev,
bool fwnode_property_present(const struct fwnode_handle *fwnode,
const char *propname);
+bool fwnode_property_read_bool(const struct fwnode_handle *fwnode,
+ const char *propname);
int fwnode_property_read_u8_array(const struct fwnode_handle *fwnode,
const char *propname, u8 *val,
size_t nval);
@@ -207,12 +210,6 @@ int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name);
unsigned int device_get_child_node_count(const struct device *dev);
-static inline bool device_property_read_bool(const struct device *dev,
- const char *propname)
-{
- return device_property_present(dev, propname);
-}
-
static inline int device_property_read_u8(const struct device *dev,
const char *propname, u8 *val)
{
@@ -263,12 +260,6 @@ static inline int device_property_string_array_count(const struct device *dev,
return device_property_read_string_array(dev, propname, NULL, 0);
}
-static inline bool fwnode_property_read_bool(const struct fwnode_handle *fwnode,
- const char *propname)
-{
- return fwnode_property_present(fwnode, propname);
-}
-
static inline int fwnode_property_read_u8(const struct fwnode_handle *fwnode,
const char *propname, u8 *val)
{
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 6853e29d9674..a2df509056ac 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -347,6 +347,23 @@ struct pwm_chip {
struct pwm_device pwms[] __counted_by(npwm);
};
+/**
+ * pwmchip_supports_waveform() - checks if the given chip supports waveform callbacks
+ * @chip: The pwm_chip to test
+ *
+ * Returns true iff the pwm chip support the waveform functions like
+ * pwm_set_waveform_might_sleep() and pwm_round_waveform_might_sleep()
+ */
+static inline bool pwmchip_supports_waveform(struct pwm_chip *chip)
+{
+ /*
+ * only check for .write_waveform(). If that is available,
+ * .round_waveform_tohw() and .round_waveform_fromhw() asserted to be
+ * available, too, in pwmchip_add().
+ */
+ return chip->ops->write_waveform != NULL;
+}
+
static inline struct device *pwmchip_parent(const struct pwm_chip *chip)
{
return chip->dev.parent;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ac08431e238f..9632e3318e0d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -398,6 +398,12 @@ struct sched_info {
/* Time spent waiting on a runqueue: */
unsigned long long run_delay;
+ /* Max time spent waiting on a runqueue: */
+ unsigned long long max_run_delay;
+
+ /* Min time spent waiting on a runqueue: */
+ unsigned long long min_run_delay;
+
/* Timestamps: */
/* When did we last run on a CPU? */
diff --git a/include/linux/sched/hotplug.h b/include/linux/sched/hotplug.h
index 412cdaba33eb..17e04859b9a4 100644
--- a/include/linux/sched/hotplug.h
+++ b/include/linux/sched/hotplug.h
@@ -18,10 +18,6 @@ extern int sched_cpu_dying(unsigned int cpu);
# define sched_cpu_dying NULL
#endif
-#ifdef CONFIG_HOTPLUG_CPU
-extern void idle_task_exit(void);
-#else
static inline void idle_task_exit(void) {}
-#endif
#endif /* _LINUX_SCHED_HOTPLUG_H */
diff --git a/include/linux/scmi_imx_protocol.h b/include/linux/scmi_imx_protocol.h
index 066216f1357a..53b356a26414 100644
--- a/include/linux/scmi_imx_protocol.h
+++ b/include/linux/scmi_imx_protocol.h
@@ -13,10 +13,11 @@
#include <linux/notifier.h>
#include <linux/types.h>
-enum scmi_nxp_protocol {
- SCMI_PROTOCOL_IMX_BBM = 0x81,
- SCMI_PROTOCOL_IMX_MISC = 0x84,
-};
+#define SCMI_PROTOCOL_IMX_BBM 0x81
+#define SCMI_PROTOCOL_IMX_MISC 0x84
+
+#define SCMI_IMX_VENDOR "NXP"
+#define SCMI_IMX_SUBVENDOR "IMX"
struct scmi_imx_bbm_proto_ops {
int (*rtc_time_set)(const struct scmi_protocol_handle *ph, u32 id,
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index d1a2346cf0f8..5ce48eab7a2a 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -322,6 +322,7 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
* raw_seqcount_try_begin() - begin a seqcount_t read critical section
* w/o lockdep and w/o counter stabilization
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ * @start: count to be passed to read_seqcount_retry()
*
* Similar to raw_seqcount_begin(), except it enables eliding the critical
* section entirely if odd, instead of doing the speculation knowing it will
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index e0717c8393d7..144de7a7948d 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -161,8 +161,8 @@ struct uart_8250_port {
void (*dl_write)(struct uart_8250_port *up, u32 value);
struct uart_8250_em485 *em485;
- void (*rs485_start_tx)(struct uart_8250_port *);
- void (*rs485_stop_tx)(struct uart_8250_port *);
+ void (*rs485_start_tx)(struct uart_8250_port *up, bool toggle_ier);
+ void (*rs485_stop_tx)(struct uart_8250_port *up, bool toggle_ier);
/* Serial port overrun backoff */
struct delayed_work overrun_backoff;
diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
index 5bee6f7fc400..0c3906e8ad19 100644
--- a/include/linux/soc/mediatek/mtk-cmdq.h
+++ b/include/linux/soc/mediatek/mtk-cmdq.h
@@ -391,14 +391,6 @@ int cmdq_pkt_jump_rel(struct cmdq_pkt *pkt, s32 offset, u8 shift_pa);
*/
int cmdq_pkt_eoc(struct cmdq_pkt *pkt);
-/**
- * cmdq_pkt_finalize() - Append EOC and jump command to pkt.
- * @pkt: the CMDQ packet
- *
- * Return: 0 for success; else the error code is returned
- */
-int cmdq_pkt_finalize(struct cmdq_pkt *pkt);
-
#else /* IS_ENABLED(CONFIG_MTK_CMDQ) */
static inline int cmdq_dev_get_client_reg(struct device *dev,
@@ -519,11 +511,6 @@ static inline int cmdq_pkt_eoc(struct cmdq_pkt *pkt)
return -EINVAL;
}
-static inline int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
-{
- return -EINVAL;
-}
-
#endif /* IS_ENABLED(CONFIG_MTK_CMDQ) */
#endif /* __MTK_CMDQ_H__ */
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index bd9836690da6..2d6c30317792 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -54,6 +54,8 @@ struct sdw_slave;
#define SDW_MAX_PORTS 15
#define SDW_VALID_PORT_RANGE(n) ((n) < SDW_MAX_PORTS && (n) >= 1)
+#define SDW_MAX_LANES 8
+
enum {
SDW_PORT_DIRN_SINK = 0,
SDW_PORT_DIRN_SOURCE,
@@ -356,6 +358,7 @@ struct sdw_dpn_prop {
* and masks are supported
* @commit_register_supported: is PCP_Commit register supported
* @scp_int1_mask: SCP_INT1_MASK desired settings
+ * @lane_maps: Lane mapping for the slave, only valid if lane_control_support is set
* @clock_reg_supported: the Peripheral implements the clock base and scale
* registers introduced with the SoundWire 1.2 specification. SDCA devices
* do not need to set this boolean property as the registers are required.
@@ -385,6 +388,7 @@ struct sdw_slave_prop {
u32 sdca_interrupt_register_list;
u8 commit_register_supported;
u8 scp_int1_mask;
+ u8 lane_maps[SDW_MAX_LANES];
bool clock_reg_supported;
bool use_domain_irq;
};
@@ -450,6 +454,7 @@ struct sdw_master_prop {
int sdw_master_read_prop(struct sdw_bus *bus);
int sdw_slave_read_prop(struct sdw_slave *slave);
+int sdw_slave_read_lane_mapping(struct sdw_slave *slave);
/*
* SDW Slave Structures and APIs
@@ -850,77 +855,6 @@ struct sdw_master_ops {
int dev_num);
};
-/**
- * struct sdw_bus - SoundWire bus
- * @dev: Shortcut to &bus->md->dev to avoid changing the entire code.
- * @md: Master device
- * @bus_lock_key: bus lock key associated to @bus_lock
- * @bus_lock: bus lock
- * @slaves: list of Slaves on this bus
- * @msg_lock_key: message lock key associated to @msg_lock
- * @msg_lock: message lock
- * @m_rt_list: List of Master instance of all stream(s) running on Bus. This
- * is used to compute and program bus bandwidth, clock, frame shape,
- * transport and port parameters
- * @defer_msg: Defer message
- * @params: Current bus parameters
- * @stream_refcount: number of streams currently using this bus
- * @ops: Master callback ops
- * @port_ops: Master port callback ops
- * @prop: Master properties
- * @vendor_specific_prop: pointer to non-standard properties
- * @hw_sync_min_links: Number of links used by a stream above which
- * hardware-based synchronization is required. This value is only
- * meaningful if multi_link is set. If set to 1, hardware-based
- * synchronization will be used even if a stream only uses a single
- * SoundWire segment.
- * @controller_id: system-unique controller ID. If set to -1, the bus @id will be used.
- * @link_id: Link id number, can be 0 to N, unique for each Controller
- * @id: bus system-wide unique id
- * @compute_params: points to Bus resource management implementation
- * @assigned: Bitmap for Slave device numbers.
- * Bit set implies used number, bit clear implies unused number.
- * @clk_stop_timeout: Clock stop timeout computed
- * @bank_switch_timeout: Bank switch timeout computed
- * @domain: IRQ domain
- * @irq_chip: IRQ chip
- * @debugfs: Bus debugfs (optional)
- * @multi_link: Store bus property that indicates if multi links
- * are supported. This flag is populated by drivers after reading
- * appropriate firmware (ACPI/DT).
- */
-struct sdw_bus {
- struct device *dev;
- struct sdw_master_device *md;
- struct lock_class_key bus_lock_key;
- struct mutex bus_lock;
- struct list_head slaves;
- struct lock_class_key msg_lock_key;
- struct mutex msg_lock;
- struct list_head m_rt_list;
- struct sdw_defer defer_msg;
- struct sdw_bus_params params;
- int stream_refcount;
- const struct sdw_master_ops *ops;
- const struct sdw_master_port_ops *port_ops;
- struct sdw_master_prop prop;
- void *vendor_specific_prop;
- int hw_sync_min_links;
- int controller_id;
- unsigned int link_id;
- int id;
- int (*compute_params)(struct sdw_bus *bus);
- DECLARE_BITMAP(assigned, SDW_MAX_DEVICES);
- unsigned int clk_stop_timeout;
- u32 bank_switch_timeout;
- struct irq_chip irq_chip;
- struct irq_domain *domain;
-#ifdef CONFIG_DEBUG_FS
- struct dentry *debugfs;
-#endif
- bool multi_link;
-};
-
int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
struct fwnode_handle *fwnode);
void sdw_bus_master_delete(struct sdw_bus *bus);
@@ -1010,10 +944,83 @@ struct sdw_stream_runtime {
struct list_head master_list;
};
+/**
+ * struct sdw_bus - SoundWire bus
+ * @dev: Shortcut to &bus->md->dev to avoid changing the entire code.
+ * @md: Master device
+ * @bus_lock_key: bus lock key associated to @bus_lock
+ * @bus_lock: bus lock
+ * @slaves: list of Slaves on this bus
+ * @msg_lock_key: message lock key associated to @msg_lock
+ * @msg_lock: message lock
+ * @m_rt_list: List of Master instance of all stream(s) running on Bus. This
+ * is used to compute and program bus bandwidth, clock, frame shape,
+ * transport and port parameters
+ * @defer_msg: Defer message
+ * @params: Current bus parameters
+ * @stream_refcount: number of streams currently using this bus
+ * @ops: Master callback ops
+ * @port_ops: Master port callback ops
+ * @prop: Master properties
+ * @vendor_specific_prop: pointer to non-standard properties
+ * @hw_sync_min_links: Number of links used by a stream above which
+ * hardware-based synchronization is required. This value is only
+ * meaningful if multi_link is set. If set to 1, hardware-based
+ * synchronization will be used even if a stream only uses a single
+ * SoundWire segment.
+ * @controller_id: system-unique controller ID. If set to -1, the bus @id will be used.
+ * @link_id: Link id number, can be 0 to N, unique for each Controller
+ * @id: bus system-wide unique id
+ * @compute_params: points to Bus resource management implementation
+ * @assigned: Bitmap for Slave device numbers.
+ * Bit set implies used number, bit clear implies unused number.
+ * @clk_stop_timeout: Clock stop timeout computed
+ * @bank_switch_timeout: Bank switch timeout computed
+ * @domain: IRQ domain
+ * @irq_chip: IRQ chip
+ * @debugfs: Bus debugfs (optional)
+ * @multi_link: Store bus property that indicates if multi links
+ * are supported. This flag is populated by drivers after reading
+ * appropriate firmware (ACPI/DT).
+ * @lane_used_bandwidth: how much bandwidth in bits per second is used by each lane
+ */
+struct sdw_bus {
+ struct device *dev;
+ struct sdw_master_device *md;
+ struct lock_class_key bus_lock_key;
+ struct mutex bus_lock;
+ struct list_head slaves;
+ struct lock_class_key msg_lock_key;
+ struct mutex msg_lock;
+ struct list_head m_rt_list;
+ struct sdw_defer defer_msg;
+ struct sdw_bus_params params;
+ int stream_refcount;
+ const struct sdw_master_ops *ops;
+ const struct sdw_master_port_ops *port_ops;
+ struct sdw_master_prop prop;
+ void *vendor_specific_prop;
+ int hw_sync_min_links;
+ int controller_id;
+ unsigned int link_id;
+ int id;
+ int (*compute_params)(struct sdw_bus *bus, struct sdw_stream_runtime *stream);
+ DECLARE_BITMAP(assigned, SDW_MAX_DEVICES);
+ unsigned int clk_stop_timeout;
+ u32 bank_switch_timeout;
+ struct irq_chip irq_chip;
+ struct irq_domain *domain;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+#endif
+ bool multi_link;
+ unsigned int lane_used_bandwidth[SDW_MAX_LANES];
+};
+
struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name);
void sdw_release_stream(struct sdw_stream_runtime *stream);
-int sdw_compute_params(struct sdw_bus *bus);
+int sdw_compute_params(struct sdw_bus *bus, struct sdw_stream_runtime *stream);
int sdw_stream_add_master(struct sdw_bus *bus,
struct sdw_stream_config *stream_config,
@@ -1034,6 +1041,7 @@ int sdw_bus_exit_clk_stop(struct sdw_bus *bus);
int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id);
void sdw_extract_slave_id(struct sdw_bus *bus, u64 addr, struct sdw_slave_id *id);
+bool is_clock_scaling_supported_by_slave(struct sdw_slave *slave);
#if IS_ENABLED(CONFIG_SOUNDWIRE)
@@ -1045,6 +1053,8 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
int sdw_stream_remove_slave(struct sdw_slave *slave,
struct sdw_stream_runtime *stream);
+int sdw_slave_get_scale_index(struct sdw_slave *slave, u8 *base);
+
/* messaging and data APIs */
int sdw_read(struct sdw_slave *slave, u32 addr);
int sdw_write(struct sdw_slave *slave, u32 addr, u8 value);
diff --git a/include/linux/stat.h b/include/linux/stat.h
index 9d8382e23a9c..be7496a6a0dd 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -50,11 +50,11 @@ struct kstat {
struct timespec64 btime; /* File creation time */
u64 blocks;
u64 mnt_id;
+ u64 change_cookie;
+ u64 subvol;
u32 dio_mem_align;
u32 dio_offset_align;
u32 dio_read_offset_align;
- u64 change_cookie;
- u64 subvol;
u32 atomic_write_unit_min;
u32 atomic_write_unit_max;
u32 atomic_write_segments_max;
diff --git a/include/linux/string.h b/include/linux/string.h
index 493ac4862c77..f8e21e80942f 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -4,6 +4,7 @@
#include <linux/args.h>
#include <linux/array_size.h>
+#include <linux/cleanup.h> /* for DEFINE_FREE() */
#include <linux/compiler.h> /* for inline */
#include <linux/types.h> /* for size_t */
#include <linux/stddef.h> /* for NULL */
@@ -312,6 +313,8 @@ extern void *kmemdup_array(const void *src, size_t count, size_t element_size, g
extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
extern void argv_free(char **argv);
+DEFINE_FREE(argv_free, char **, if (!IS_ERR_OR_NULL(_T)) argv_free(_T))
+
/* lib/cmdline.c */
extern int get_option(char **str, int *pint);
extern char *get_options(const char *str, int nints, int *ints);
@@ -411,7 +414,8 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
* must be discoverable by the compiler.
*/
#define strtomem_pad(dest, src, pad) do { \
- const size_t _dest_len = __builtin_object_size(dest, 1); \
+ const size_t _dest_len = __must_be_byte_array(dest) + \
+ ARRAY_SIZE(dest); \
const size_t _src_len = __builtin_object_size(src, 1); \
\
BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
@@ -434,7 +438,8 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
* must be discoverable by the compiler.
*/
#define strtomem(dest, src) do { \
- const size_t _dest_len = __builtin_object_size(dest, 1); \
+ const size_t _dest_len = __must_be_byte_array(dest) + \
+ ARRAY_SIZE(dest); \
const size_t _src_len = __builtin_object_size(src, 1); \
\
BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
@@ -453,7 +458,8 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
* Note that sizes of @dest and @src must be known at compile-time.
*/
#define memtostr(dest, src) do { \
- const size_t _dest_len = __builtin_object_size(dest, 1); \
+ const size_t _dest_len = __must_be_byte_array(dest) + \
+ ARRAY_SIZE(dest); \
const size_t _src_len = __builtin_object_size(src, 1); \
const size_t _src_chars = strnlen(src, _src_len); \
const size_t _copy_len = min(_dest_len - 1, _src_chars); \
@@ -478,7 +484,8 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
* Note that sizes of @dest and @src must be known at compile-time.
*/
#define memtostr_pad(dest, src) do { \
- const size_t _dest_len = __builtin_object_size(dest, 1); \
+ const size_t _dest_len = __must_be_byte_array(dest) + \
+ ARRAY_SIZE(dest); \
const size_t _src_len = __builtin_object_size(src, 1); \
const size_t _src_chars = strnlen(src, _src_len); \
const size_t _copy_len = min(_dest_len - 1, _src_chars); \
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 35766963dd14..e783132e481f 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -222,6 +222,8 @@ static inline bool cache_is_expired(struct cache_detail *detail, struct cache_he
return detail->flush_time >= h->last_refresh;
}
+extern int cache_check_rcu(struct cache_detail *detail,
+ struct cache_head *h, struct cache_req *rqstp);
extern int cache_check(struct cache_detail *detail,
struct cache_head *h, struct cache_req *rqstp);
extern void cache_flush(void);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 5321585c778f..fec976e58174 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -93,6 +93,7 @@ struct rpc_clnt {
const struct cred *cl_cred;
unsigned int cl_max_connect; /* max number of transports not to the same IP */
struct super_block *pipefs_sb;
+ atomic_t cl_task_count;
};
/*
diff --git a/include/linux/sunrpc/gss_asn1.h b/include/linux/sunrpc/gss_asn1.h
deleted file mode 100644
index 3ccecd0ad229..000000000000
--- a/include/linux/sunrpc/gss_asn1.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * linux/include/linux/sunrpc/gss_asn1.h
- *
- * minimal asn1 for generic encoding/decoding of gss tokens
- *
- * Adapted from MIT Kerberos 5-1.2.1 lib/include/krb5.h,
- * lib/gssapi/krb5/gssapiP_krb5.h, and others
- *
- * Copyright (c) 2000 The Regents of the University of Michigan.
- * All rights reserved.
- *
- * Andy Adamson <andros@umich.edu>
- */
-
-/*
- * Copyright 1995 by the Massachusetts Institute of Technology.
- * All Rights Reserved.
- *
- * Export of this software from the United States of America may
- * require a specific license from the United States Government.
- * It is the responsibility of any person or organization contemplating
- * export to obtain such a license before exporting.
- *
- * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
- * distribute this software and its documentation for any purpose and
- * without fee is hereby granted, provided that the above copyright
- * notice appear in all copies and that both that copyright notice and
- * this permission notice appear in supporting documentation, and that
- * the name of M.I.T. not be used in advertising or publicity pertaining
- * to distribution of the software without specific, written prior
- * permission. Furthermore if you modify this software you must label
- * your software as modified software and not distribute it in such a
- * fashion that it might be confused with the original M.I.T. software.
- * M.I.T. makes no representations about the suitability of
- * this software for any purpose. It is provided "as is" without express
- * or implied warranty.
- *
- */
-
-
-#include <linux/sunrpc/gss_api.h>
-
-#define SIZEOF_INT 4
-
-/* from gssapi_err_generic.h */
-#define G_BAD_SERVICE_NAME (-2045022976L)
-#define G_BAD_STRING_UID (-2045022975L)
-#define G_NOUSER (-2045022974L)
-#define G_VALIDATE_FAILED (-2045022973L)
-#define G_BUFFER_ALLOC (-2045022972L)
-#define G_BAD_MSG_CTX (-2045022971L)
-#define G_WRONG_SIZE (-2045022970L)
-#define G_BAD_USAGE (-2045022969L)
-#define G_UNKNOWN_QOP (-2045022968L)
-#define G_NO_HOSTNAME (-2045022967L)
-#define G_BAD_HOSTNAME (-2045022966L)
-#define G_WRONG_MECH (-2045022965L)
-#define G_BAD_TOK_HEADER (-2045022964L)
-#define G_BAD_DIRECTION (-2045022963L)
-#define G_TOK_TRUNC (-2045022962L)
-#define G_REFLECT (-2045022961L)
-#define G_WRONG_TOKID (-2045022960L)
-
-#define g_OID_equal(o1,o2) \
- (((o1)->len == (o2)->len) && \
- (memcmp((o1)->data,(o2)->data,(int) (o1)->len) == 0))
-
-u32 g_verify_token_header(
- struct xdr_netobj *mech,
- int *body_size,
- unsigned char **buf_in,
- int toksize);
-
-int g_token_size(
- struct xdr_netobj *mech,
- unsigned int body_size);
-
-void g_make_token_header(
- struct xdr_netobj *mech,
- int body_size,
- unsigned char **buf);
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
index 78a80bf3fdcb..43950b5237c8 100644
--- a/include/linux/sunrpc/gss_krb5.h
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -40,7 +40,6 @@
#include <crypto/skcipher.h>
#include <linux/sunrpc/auth_gss.h>
#include <linux/sunrpc/gss_err.h>
-#include <linux/sunrpc/gss_asn1.h>
/* Length of constant used in key derivation */
#define GSS_KRB5_K5CLENGTH (5)
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index e68fecf6eab5..74658cca0f38 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -72,16 +72,12 @@ struct svc_serv {
spinlock_t sv_lock;
unsigned int sv_nprogs; /* Number of sv_programs */
unsigned int sv_nrthreads; /* # of server threads */
- unsigned int sv_maxconn; /* max connections allowed or
- * '0' causing max to be based
- * on number of threads. */
-
unsigned int sv_max_payload; /* datagram payload size */
unsigned int sv_max_mesg; /* max_payload + 1 page for overheads */
unsigned int sv_xdrsize; /* XDR buffer size */
struct list_head sv_permsocks; /* all permanent sockets */
struct list_head sv_tempsocks; /* all temporary sockets */
- int sv_tmpcnt; /* count of temporary sockets */
+ int sv_tmpcnt; /* count of temporary "valid" sockets */
struct timer_list sv_temptimer; /* timer for aging temporary sockets */
char * sv_name; /* service name */
@@ -327,12 +323,7 @@ static inline bool svc_thread_should_stop(struct svc_rqst *rqstp)
*/
static inline void svc_thread_init_status(struct svc_rqst *rqstp, int err)
{
- rqstp->rq_err = err;
- /* memory barrier ensures assignment to error above is visible before
- * waitqueue_active() test below completes.
- */
- smp_mb();
- wake_up_var(&rqstp->rq_err);
+ store_release_wake_up(&rqstp->rq_err, err);
if (err)
kthread_exit(1);
}
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 0981e35a9fed..72be60952579 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -99,8 +99,30 @@ enum {
XPT_HANDSHAKE, /* xprt requests a handshake */
XPT_TLS_SESSION, /* transport-layer security established */
XPT_PEER_AUTH, /* peer has been authenticated */
+ XPT_PEER_VALID, /* peer has presented a filehandle that
+ * it has access to. It is NOT counted
+ * in ->sv_tmpcnt.
+ */
};
+/*
+ * Maximum number of "tmp" connections - those without XPT_PEER_VALID -
+ * permitted on any service.
+ */
+#define XPT_MAX_TMP_CONN 64
+
+static inline void svc_xprt_set_valid(struct svc_xprt *xpt)
+{
+ if (test_bit(XPT_TEMP, &xpt->xpt_flags) &&
+ !test_and_set_bit(XPT_PEER_VALID, &xpt->xpt_flags)) {
+ struct svc_serv *serv = xpt->xpt_server;
+
+ spin_lock(&serv->sv_lock);
+ serv->sv_tmpcnt -= 1;
+ spin_unlock(&serv->sv_lock);
+ }
+}
+
static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
{
spin_lock(&xpt->xpt_lock);
diff --git a/include/linux/sunrpc/xdrgen/nfs4_1.h b/include/linux/sunrpc/xdrgen/nfs4_1.h
new file mode 100644
index 000000000000..cf21a14aa885
--- /dev/null
+++ b/include/linux/sunrpc/xdrgen/nfs4_1.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Generated by xdrgen. Manual edits will be lost. */
+/* XDR specification file: ../../Documentation/sunrpc/xdr/nfs4_1.x */
+/* XDR specification modification time: Mon Oct 14 09:10:13 2024 */
+
+#ifndef _LINUX_XDRGEN_NFS4_1_DEF_H
+#define _LINUX_XDRGEN_NFS4_1_DEF_H
+
+#include <linux/types.h>
+#include <linux/sunrpc/xdrgen/_defs.h>
+
+typedef s64 int64_t;
+
+typedef u32 uint32_t;
+
+typedef struct {
+ u32 count;
+ uint32_t *element;
+} bitmap4;
+
+struct nfstime4 {
+ int64_t seconds;
+ uint32_t nseconds;
+};
+
+typedef bool fattr4_offline;
+
+enum { FATTR4_OFFLINE = 83 };
+
+struct open_arguments4 {
+ bitmap4 oa_share_access;
+ bitmap4 oa_share_deny;
+ bitmap4 oa_share_access_want;
+ bitmap4 oa_open_claim;
+ bitmap4 oa_create_mode;
+};
+
+enum open_args_share_access4 {
+ OPEN_ARGS_SHARE_ACCESS_READ = 1,
+ OPEN_ARGS_SHARE_ACCESS_WRITE = 2,
+ OPEN_ARGS_SHARE_ACCESS_BOTH = 3,
+};
+typedef enum open_args_share_access4 open_args_share_access4;
+
+enum open_args_share_deny4 {
+ OPEN_ARGS_SHARE_DENY_NONE = 0,
+ OPEN_ARGS_SHARE_DENY_READ = 1,
+ OPEN_ARGS_SHARE_DENY_WRITE = 2,
+ OPEN_ARGS_SHARE_DENY_BOTH = 3,
+};
+typedef enum open_args_share_deny4 open_args_share_deny4;
+
+enum open_args_share_access_want4 {
+ OPEN_ARGS_SHARE_ACCESS_WANT_ANY_DELEG = 3,
+ OPEN_ARGS_SHARE_ACCESS_WANT_NO_DELEG = 4,
+ OPEN_ARGS_SHARE_ACCESS_WANT_CANCEL = 5,
+ OPEN_ARGS_SHARE_ACCESS_WANT_SIGNAL_DELEG_WHEN_RESRC_AVAIL = 17,
+ OPEN_ARGS_SHARE_ACCESS_WANT_PUSH_DELEG_WHEN_UNCONTENDED = 18,
+ OPEN_ARGS_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS = 20,
+ OPEN_ARGS_SHARE_ACCESS_WANT_OPEN_XOR_DELEGATION = 21,
+};
+typedef enum open_args_share_access_want4 open_args_share_access_want4;
+
+enum open_args_open_claim4 {
+ OPEN_ARGS_OPEN_CLAIM_NULL = 0,
+ OPEN_ARGS_OPEN_CLAIM_PREVIOUS = 1,
+ OPEN_ARGS_OPEN_CLAIM_DELEGATE_CUR = 2,
+ OPEN_ARGS_OPEN_CLAIM_DELEGATE_PREV = 3,
+ OPEN_ARGS_OPEN_CLAIM_FH = 4,
+ OPEN_ARGS_OPEN_CLAIM_DELEG_CUR_FH = 5,
+ OPEN_ARGS_OPEN_CLAIM_DELEG_PREV_FH = 6,
+};
+typedef enum open_args_open_claim4 open_args_open_claim4;
+
+enum open_args_createmode4 {
+ OPEN_ARGS_CREATEMODE_UNCHECKED4 = 0,
+ OPEN_ARGS_CREATE_MODE_GUARDED = 1,
+ OPEN_ARGS_CREATEMODE_EXCLUSIVE4 = 2,
+ OPEN_ARGS_CREATE_MODE_EXCLUSIVE4_1 = 3,
+};
+typedef enum open_args_createmode4 open_args_createmode4;
+
+typedef struct open_arguments4 fattr4_open_arguments;
+
+enum { FATTR4_OPEN_ARGUMENTS = 86 };
+
+enum { OPEN4_RESULT_NO_OPEN_STATEID = 0x00000010 };
+
+typedef struct nfstime4 fattr4_time_deleg_access;
+
+typedef struct nfstime4 fattr4_time_deleg_modify;
+
+enum { FATTR4_TIME_DELEG_ACCESS = 84 };
+
+enum { FATTR4_TIME_DELEG_MODIFY = 85 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_DELEG_MASK = 0xFF00 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_NO_PREFERENCE = 0x0000 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_READ_DELEG = 0x0100 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_WRITE_DELEG = 0x0200 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_ANY_DELEG = 0x0300 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_NO_DELEG = 0x0400 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_CANCEL = 0x0500 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_SIGNAL_DELEG_WHEN_RESRC_AVAIL = 0x10000 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_PUSH_DELEG_WHEN_UNCONTENDED = 0x20000 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS = 0x100000 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_OPEN_XOR_DELEGATION = 0x200000 };
+
+enum open_delegation_type4 {
+ OPEN_DELEGATE_NONE = 0,
+ OPEN_DELEGATE_READ = 1,
+ OPEN_DELEGATE_WRITE = 2,
+ OPEN_DELEGATE_NONE_EXT = 3,
+ OPEN_DELEGATE_READ_ATTRS_DELEG = 4,
+ OPEN_DELEGATE_WRITE_ATTRS_DELEG = 5,
+};
+typedef enum open_delegation_type4 open_delegation_type4;
+
+#define NFS4_int64_t_sz \
+ (XDR_hyper)
+#define NFS4_uint32_t_sz \
+ (XDR_unsigned_int)
+#define NFS4_bitmap4_sz (XDR_unsigned_int)
+#define NFS4_nfstime4_sz \
+ (NFS4_int64_t_sz + NFS4_uint32_t_sz)
+#define NFS4_fattr4_offline_sz \
+ (XDR_bool)
+#define NFS4_open_arguments4_sz \
+ (NFS4_bitmap4_sz + NFS4_bitmap4_sz + NFS4_bitmap4_sz + NFS4_bitmap4_sz + NFS4_bitmap4_sz)
+#define NFS4_open_args_share_access4_sz (XDR_int)
+#define NFS4_open_args_share_deny4_sz (XDR_int)
+#define NFS4_open_args_share_access_want4_sz (XDR_int)
+#define NFS4_open_args_open_claim4_sz (XDR_int)
+#define NFS4_open_args_createmode4_sz (XDR_int)
+#define NFS4_fattr4_open_arguments_sz \
+ (NFS4_open_arguments4_sz)
+#define NFS4_fattr4_time_deleg_access_sz \
+ (NFS4_nfstime4_sz)
+#define NFS4_fattr4_time_deleg_modify_sz \
+ (NFS4_nfstime4_sz)
+#define NFS4_open_delegation_type4_sz (XDR_int)
+
+#endif /* _LINUX_XDRGEN_NFS4_1_DEF_H */
diff --git a/include/linux/sunrpc/xprtmultipath.h b/include/linux/sunrpc/xprtmultipath.h
index c0514c684b2c..e411368cdacf 100644
--- a/include/linux/sunrpc/xprtmultipath.h
+++ b/include/linux/sunrpc/xprtmultipath.h
@@ -75,7 +75,6 @@ extern struct rpc_xprt_switch *xprt_iter_xchg_switch(
struct rpc_xprt_switch *newswitch);
extern struct rpc_xprt *xprt_iter_xprt(struct rpc_xprt_iter *xpi);
-extern struct rpc_xprt *xprt_iter_get_xprt(struct rpc_xprt_iter *xpi);
extern struct rpc_xprt *xprt_iter_get_next(struct rpc_xprt_iter *xpi);
extern bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps,
diff --git a/include/linux/swap.h b/include/linux/swap.h
index f3e0ac20c2e8..b13b72645db3 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -219,10 +219,10 @@ enum {
SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */
SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
/* add others here before... */
- SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */
};
#define SWAP_CLUSTER_MAX 32UL
+#define SWAP_CLUSTER_MAX_SKIPPED (SWAP_CLUSTER_MAX << 10)
#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
/* Bit flag in swap_map */
@@ -257,18 +257,27 @@ struct swap_cluster_info {
u8 order;
struct list_head list;
};
-#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
-#define CLUSTER_FLAG_NONFULL 2 /* This cluster is on nonfull list */
-#define CLUSTER_FLAG_FRAG 4 /* This cluster is on nonfull list */
-#define CLUSTER_FLAG_FULL 8 /* This cluster is on full list */
+
+/* All on-list cluster must have a non-zero flag. */
+enum swap_cluster_flags {
+ CLUSTER_FLAG_NONE = 0, /* For temporary off-list cluster */
+ CLUSTER_FLAG_FREE,
+ CLUSTER_FLAG_NONFULL,
+ CLUSTER_FLAG_FRAG,
+ /* Clusters with flags above are allocatable */
+ CLUSTER_FLAG_USABLE = CLUSTER_FLAG_FRAG,
+ CLUSTER_FLAG_FULL,
+ CLUSTER_FLAG_DISCARD,
+ CLUSTER_FLAG_MAX,
+};
/*
* The first page in the swap file is the swap header, which is always marked
* bad to prevent it from being allocated as an entry. This also prevents the
* cluster to which it belongs being marked free. Therefore 0 is safe to use as
- * a sentinel to indicate next is not valid in percpu_cluster.
+ * a sentinel to indicate an entry is not valid.
*/
-#define SWAP_NEXT_INVALID 0
+#define SWAP_ENTRY_INVALID 0
#ifdef CONFIG_THP_SWAP
#define SWAP_NR_ORDERS (PMD_ORDER + 1)
@@ -282,6 +291,7 @@ struct swap_cluster_info {
* throughput.
*/
struct percpu_cluster {
+ local_lock_t lock; /* Protect the percpu_cluster above */
unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */
};
@@ -304,15 +314,12 @@ struct swap_info_struct {
/* list of cluster that contains at least one free slot */
struct list_head frag_clusters[SWAP_NR_ORDERS];
/* list of cluster that are fragmented or contented */
- unsigned int frag_cluster_nr[SWAP_NR_ORDERS];
- unsigned int lowest_bit; /* index of first free in swap_map */
- unsigned int highest_bit; /* index of last free in swap_map */
+ atomic_long_t frag_cluster_nr[SWAP_NR_ORDERS];
unsigned int pages; /* total of usable pages of swap */
- unsigned int inuse_pages; /* number of those currently in use */
- unsigned int cluster_next; /* likely index for next allocation */
- unsigned int cluster_nr; /* countdown to next cluster search */
- unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
+ atomic_long_t inuse_pages; /* number of those currently in use */
struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
+ struct percpu_cluster *global_cluster; /* Use one global cluster for rotating device */
+ spinlock_t global_cluster_lock; /* Serialize usage of global cluster */
struct rb_root swap_extent_root;/* root of the swap extent rbtree */
struct block_device *bdev; /* swap device or bdev of swap file */
struct file *swap_file; /* seldom referenced */
diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h
index ae73a87775b3..b5ec038069da 100644
--- a/include/linux/swap_cgroup.h
+++ b/include/linux/swap_cgroup.h
@@ -6,10 +6,8 @@
#if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
-extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
- unsigned short old, unsigned short new);
-extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id,
- unsigned int nr_ents);
+extern void swap_cgroup_record(struct folio *folio, swp_entry_t ent);
+extern unsigned short swap_cgroup_clear(swp_entry_t ent, unsigned int nr_ents);
extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent);
extern int swap_cgroup_swapon(int type, unsigned long max_pages);
extern void swap_cgroup_swapoff(int type);
@@ -17,8 +15,12 @@ extern void swap_cgroup_swapoff(int type);
#else
static inline
-unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id,
- unsigned int nr_ents)
+void swap_cgroup_record(struct folio *folio, swp_entry_t ent)
+{
+}
+
+static inline
+unsigned short swap_cgroup_clear(swp_entry_t ent, unsigned int nr_ents)
{
return 0;
}
diff --git a/include/linux/swap_slots.h b/include/linux/swap_slots.h
index 15adfb8c813a..840aec3523b2 100644
--- a/include/linux/swap_slots.h
+++ b/include/linux/swap_slots.h
@@ -16,15 +16,12 @@ struct swap_slots_cache {
swp_entry_t *slots;
int nr;
int cur;
- spinlock_t free_lock; /* protects slots_ret, n_ret */
- swp_entry_t *slots_ret;
int n_ret;
};
void disable_swap_slots_cache_lock(void);
void reenable_swap_slots_cache_unlock(void);
void enable_swap_slots_cache(void);
-void free_swap_slot(swp_entry_t entry);
extern bool swap_slot_cache_enabled;
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 0f2fcd244523..18f7e1fd093c 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -293,7 +293,7 @@ __ATTRIBUTE_GROUPS(_name)
#define BIN_ATTRIBUTE_GROUPS(_name) \
static const struct attribute_group _name##_group = { \
- .bin_attrs = _name##_attrs, \
+ .bin_attrs_new = _name##_attrs, \
}; \
__ATTRIBUTE_GROUPS(_name)
@@ -511,7 +511,7 @@ __printf(3, 4)
int sysfs_emit_at(char *buf, int at, const char *fmt, ...);
ssize_t sysfs_bin_attr_simple_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count);
#else /* CONFIG_SYSFS */
@@ -774,7 +774,7 @@ static inline int sysfs_emit_at(char *buf, int at, const char *fmt, ...)
static inline ssize_t sysfs_bin_attr_simple_read(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
diff --git a/include/linux/task_work.h b/include/linux/task_work.h
index 2964171856e0..0646804860ff 100644
--- a/include/linux/task_work.h
+++ b/include/linux/task_work.h
@@ -19,9 +19,6 @@ enum task_work_notify_mode {
TWA_SIGNAL,
TWA_SIGNAL_NO_IPI,
TWA_NMI_CURRENT,
-
- TWA_FLAGS = 0xff00,
- TWAF_NO_ALLOC = 0x0100,
};
static inline bool task_work_pending(struct task_struct *task)
diff --git a/include/linux/time64.h b/include/linux/time64.h
index f1bcea8c124a..9934331c7b86 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -49,6 +49,11 @@ static inline int timespec64_equal(const struct timespec64 *a,
return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
}
+static inline bool timespec64_is_epoch(const struct timespec64 *ts)
+{
+ return ts->tv_sec == 0 && ts->tv_nsec == 0;
+}
+
/*
* lhs < rhs: return <0
* lhs == rhs: return 0
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 58ad4ead33fc..5caea596fef0 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -673,6 +673,20 @@ struct trace_event_file {
atomic_t tm_ref; /* trigger-mode reference counter */
};
+#ifdef CONFIG_HIST_TRIGGERS
+extern struct irq_work hist_poll_work;
+extern wait_queue_head_t hist_poll_wq;
+
+static inline void hist_poll_wakeup(void)
+{
+ if (wq_has_sleeper(&hist_poll_wq))
+ irq_work_queue(&hist_poll_work);
+}
+
+#define hist_poll_wait(file, wait) \
+ poll_wait(file, &hist_poll_wq, wait)
+#endif
+
#define __TRACE_EVENT_FLAGS(name, value) \
static int __init trace_init_flags_##name(void) \
{ \
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 76d9055b2cff..a351763e6965 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -218,7 +218,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#define __DEFINE_RUST_DO_TRACE(name, proto, args) \
notrace void rust_do_trace_##name(proto) \
{ \
- __rust_do_trace_##name(args); \
+ __do_trace_##name(args); \
}
/*
@@ -268,7 +268,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
__DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \
- static inline void __rust_do_trace_##name(proto) \
+ static inline void __do_trace_##name(proto) \
{ \
if (cond) { \
guard(preempt_notrace)(); \
@@ -277,12 +277,8 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
} \
static inline void trace_##name(proto) \
{ \
- if (static_branch_unlikely(&__tracepoint_##name.key)) { \
- if (cond) { \
- guard(preempt_notrace)(); \
- __DO_TRACE_CALL(name, TP_ARGS(args)); \
- } \
- } \
+ if (static_branch_unlikely(&__tracepoint_##name.key)) \
+ __do_trace_##name(args); \
if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
WARN_ONCE(!rcu_is_watching(), \
"RCU not watching for tracepoint"); \
@@ -291,7 +287,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#define __DECLARE_TRACE_SYSCALL(name, proto, args, data_proto) \
__DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \
- static inline void __rust_do_trace_##name(proto) \
+ static inline void __do_trace_##name(proto) \
{ \
guard(rcu_tasks_trace)(); \
__DO_TRACE_CALL(name, TP_ARGS(args)); \
@@ -299,10 +295,8 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
static inline void trace_##name(proto) \
{ \
might_fault(); \
- if (static_branch_unlikely(&__tracepoint_##name.key)) { \
- guard(rcu_tasks_trace)(); \
- __DO_TRACE_CALL(name, TP_ARGS(args)); \
- } \
+ if (static_branch_unlikely(&__tracepoint_##name.key)) \
+ __do_trace_##name(args); \
if (IS_ENABLED(CONFIG_LOCKDEP)) { \
WARN_ONCE(!rcu_is_watching(), \
"RCU not watching for tracepoint"); \
diff --git a/include/linux/types.h b/include/linux/types.h
index 2d7b9ae8714c..1c509ce8f7f6 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -43,7 +43,7 @@ typedef unsigned long uintptr_t;
typedef long intptr_t;
#ifdef CONFIG_HAVE_UID16
-/* This is defined by include/asm-{arch}/posix_types.h */
+/* This is defined by arch/{arch}/include/asm/posix_types.h */
typedef __kernel_old_uid_t old_uid_t;
typedef __kernel_old_gid_t old_gid_t;
#endif /* CONFIG_UID16 */
diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
index d50098fb16b5..3068c3084eb6 100644
--- a/include/linux/usb/pd.h
+++ b/include/linux/usb/pd.h
@@ -33,7 +33,9 @@ enum pd_ctrl_msg_type {
PD_CTRL_FR_SWAP = 19,
PD_CTRL_GET_PPS_STATUS = 20,
PD_CTRL_GET_COUNTRY_CODES = 21,
- /* 22-31 Reserved */
+ /* 22-23 Reserved */
+ PD_CTRL_GET_REVISION = 24,
+ /* 25-31 Reserved */
};
enum pd_data_msg_type {
@@ -46,7 +48,9 @@ enum pd_data_msg_type {
PD_DATA_ALERT = 6,
PD_DATA_GET_COUNTRY_INFO = 7,
PD_DATA_ENTER_USB = 8,
- /* 9-14 Reserved */
+ /* 9-11 Reserved */
+ PD_DATA_REVISION = 12,
+ /* 13-14 Reserved */
PD_DATA_VENDOR_DEF = 15,
/* 16-31 Reserved */
};
@@ -453,6 +457,20 @@ static inline unsigned int rdo_max_power(u32 rdo)
#define EUDO_TBT_SUPPORT BIT(14)
#define EUDO_HOST_PRESENT BIT(13)
+/*
+ * Request Message Data Object (PD Revision 3.1+ only)
+ * --------
+ * <31:28> :: Revision Major
+ * <27:24> :: Revision Minor
+ * <23:20> :: Version Major
+ * <19:16> :: Version Minor
+ * <15:0> :: Reserved, Shall be set to zero
+ */
+
+#define RMDO(rev_maj, rev_min, ver_maj, ver_min) \
+ (((rev_maj) & 0xf) << 28 | ((rev_min) & 0xf) << 24 | \
+ ((ver_maj) & 0xf) << 20 | ((ver_min) & 0xf) << 16)
+
/* USB PD timers and counters */
#define PD_T_NO_RESPONSE 5000 /* 4.5 - 5.5 seconds */
#define PD_T_DB_DETECT 10000 /* 10 - 15 seconds */
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index e4de6bc1f69b..0fa9885a1038 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -223,7 +223,6 @@ extern struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev,
extern struct usb_phy *devm_usb_get_phy_by_node(struct device *dev,
struct device_node *node, struct notifier_block *nb);
extern void usb_put_phy(struct usb_phy *);
-extern void devm_usb_put_phy(struct device *dev, struct usb_phy *x);
extern void usb_phy_set_event(struct usb_phy *x, unsigned long event);
extern void usb_phy_set_charger_current(struct usb_phy *usb_phy,
unsigned int mA);
@@ -259,10 +258,6 @@ static inline void usb_put_phy(struct usb_phy *x)
{
}
-static inline void devm_usb_put_phy(struct device *dev, struct usb_phy *x)
-{
-}
-
static inline void usb_phy_set_event(struct usb_phy *x, unsigned long event)
{
}
diff --git a/include/linux/usb/storage.h b/include/linux/usb/storage.h
index 8539956bc2be..51be3bb8fccb 100644
--- a/include/linux/usb/storage.h
+++ b/include/linux/usb/storage.h
@@ -82,4 +82,12 @@ struct bulk_cs_wrap {
#define US_BULK_RESET_REQUEST 0xff
#define US_BULK_GET_MAX_LUN 0xfe
+/*
+ * If 4 LUNs are supported then the LUNs would be
+ * numbered from 0 to 3, and the return value for
+ * US_BULK_GET_MAX_LUN request would be 3. The valid
+ * LUN field is 4 bits wide, the upper limit is 0x0f.
+ */
+#define US_BULK_MAX_LUN_LIMIT 0x0f
+
#endif
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
index 061da9546a81..b22e659f81ba 100644
--- a/include/linux/usb/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -163,7 +163,8 @@ struct tcpc_dev {
void (*frs_sourcing_vbus)(struct tcpc_dev *dev);
int (*enable_auto_vbus_discharge)(struct tcpc_dev *dev, bool enable);
int (*set_auto_vbus_discharge_threshold)(struct tcpc_dev *dev, enum typec_pwr_opmode mode,
- bool pps_active, u32 requested_vbus_voltage);
+ bool pps_active, u32 requested_vbus_voltage,
+ u32 pps_apdo_min_voltage);
bool (*is_vbus_vsafe0v)(struct tcpc_dev *dev);
void (*set_partner_usb_comm_capable)(struct tcpc_dev *dev, bool enable);
void (*check_contaminant)(struct tcpc_dev *dev);
diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
index d616b8807000..252af3f77039 100644
--- a/include/linux/usb/typec.h
+++ b/include/linux/usb/typec.h
@@ -140,6 +140,7 @@ int typec_cable_set_identity(struct typec_cable *cable);
* @mode: Index of the Mode
* @vdo: VDO returned by Discover Modes USB PD command
* @roles: Only for ports. DRP if the mode is available in both roles
+ * @inactive: Only for ports. Make this port inactive (default is active).
*
* Description of an Alternate Mode which a connector, cable plug or partner
* supports.
@@ -150,6 +151,7 @@ struct typec_altmode_desc {
u32 vdo;
/* Only used with ports */
enum typec_port_data roles;
+ bool inactive;
};
void typec_partner_set_pd_revision(struct typec_partner *partner, u16 pd_revision);
diff --git a/include/linux/usb/typec_tbt.h b/include/linux/usb/typec_tbt.h
index fa97d7e00f5c..55dcea12082c 100644
--- a/include/linux/usb/typec_tbt.h
+++ b/include/linux/usb/typec_tbt.h
@@ -44,6 +44,7 @@ struct typec_thunderbolt_data {
#define TBT_GEN3_NON_ROUNDED 0
#define TBT_GEN3_GEN4_ROUNDED_NON_ROUNDED 1
+#define TBT_CABLE_ROUNDED BIT(19)
#define TBT_CABLE_OPTICAL BIT(21)
#define TBT_CABLE_RETIMER BIT(22)
#define TBT_CABLE_LINK_TRAINING BIT(23)
diff --git a/include/linux/verification.h b/include/linux/verification.h
index cb2d47f28091..4f3022d081c3 100644
--- a/include/linux/verification.h
+++ b/include/linux/verification.h
@@ -38,8 +38,6 @@ enum key_being_used_for {
VERIFYING_UNSPECIFIED_SIGNATURE,
NR__KEY_BEING_USED_FOR
};
-extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR];
-
#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
struct key;
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index dd88682e27e3..4d16c13d0df5 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -190,6 +190,8 @@ int virtio_device_freeze(struct virtio_device *dev);
int virtio_device_restore(struct virtio_device *dev);
#endif
void virtio_reset_device(struct virtio_device *dev);
+int virtio_device_reset_prepare(struct virtio_device *dev);
+int virtio_device_reset_done(struct virtio_device *dev);
size_t virtio_max_dma_size(const struct virtio_device *vdev);
@@ -214,6 +216,10 @@ size_t virtio_max_dma_size(const struct virtio_device *vdev);
* changes; may be called in interrupt context.
* @freeze: optional function to call during suspend/hibernation.
* @restore: optional function to call on resume.
+ * @reset_prepare: optional function to call when a transport specific reset
+ * occurs.
+ * @reset_done: optional function to call after transport specific reset
+ * operation has finished.
*/
struct virtio_driver {
struct device_driver driver;
@@ -229,6 +235,8 @@ struct virtio_driver {
void (*config_changed)(struct virtio_device *dev);
int (*freeze)(struct virtio_device *dev);
int (*restore)(struct virtio_device *dev);
+ int (*reset_prepare)(struct virtio_device *dev);
+ int (*reset_done)(struct virtio_device *dev);
};
#define drv_to_virtio(__drv) container_of_const(__drv, struct virtio_driver, driver)
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
index 6fb663b36f72..60c9eacd2cf3 100644
--- a/include/linux/vmw_vmci_defs.h
+++ b/include/linux/vmw_vmci_defs.h
@@ -431,11 +431,11 @@ enum {
((((_p)[0] & 0xFF) << 24) | (((_p)[1] & 0xFF) << 16) | ((_p)[2]))
/*
- * The VMCI IOCTLs. We use identity code 7, as noted in ioctl-number.h, and
- * we start at sequence 9f. This gives us the same values that our shipping
- * products use, starting at 1951, provided we leave out the direction and
- * structure size. Note that VMMon occupies the block following us, starting
- * at 2001.
+ * The VMCI IOCTLs. We use identity code 7, as noted in ioctl-number.rst,
+ * and we start at sequence 9f. This gives us the same values that our
+ * shipping products use, starting at 1951, provided we leave out the
+ * direction and structure size. Note that VMMon occupies the block
+ * following us, starting at 2001.
*/
#define IOCTL_VMCI_VERSION _IO(7, 0x9f) /* 1951 */
#define IOCTL_VMCI_INIT_CONTEXT _IO(7, 0xa0)
@@ -453,9 +453,7 @@ enum {
#define IOCTL_VMCI_CTX_GET_CPT_STATE _IO(7, 0xb1)
#define IOCTL_VMCI_CTX_SET_CPT_STATE _IO(7, 0xb2)
#define IOCTL_VMCI_GET_CONTEXT_ID _IO(7, 0xb3)
-#define IOCTL_VMCI_SOCKETS_VERSION _IO(7, 0xb4)
-#define IOCTL_VMCI_SOCKETS_GET_AF_VALUE _IO(7, 0xb8)
-#define IOCTL_VMCI_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9)
+/*IOCTL_VM_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9)*/
#define IOCTL_VMCI_SET_NOTIFY _IO(7, 0xcb) /* 1995 */
/*IOCTL_VMMON_START _IO(7, 0xd1)*/ /* 2001 */
diff --git a/include/media/cec.h b/include/media/cec.h
index 16b412b3131b..0c8e86115b6f 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -10,7 +10,6 @@
#include <linux/poll.h>
#include <linux/fs.h>
-#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/cdev.h>
#include <linux/kthread.h>
diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
index 5bce6e423e94..e7f019f68c8d 100644
--- a/include/media/v4l2-mediabus.h
+++ b/include/media/v4l2-mediabus.h
@@ -74,6 +74,24 @@
#define V4L2_MBUS_CSI2_MAX_DATA_LANES 8
/**
+ * enum v4l2_mbus_csi2_cphy_line_orders_type - CSI-2 C-PHY line order
+ * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ABC: C-PHY line order ABC (default)
+ * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ACB: C-PHY line order ACB
+ * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_BAC: C-PHY line order BAC
+ * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_BCA: C-PHY line order BCA
+ * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_CAB: C-PHY line order CAB
+ * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_CBA: C-PHY line order CBA
+ */
+enum v4l2_mbus_csi2_cphy_line_orders_type {
+ V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ABC,
+ V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ACB,
+ V4L2_MBUS_CSI2_CPHY_LINE_ORDER_BAC,
+ V4L2_MBUS_CSI2_CPHY_LINE_ORDER_BCA,
+ V4L2_MBUS_CSI2_CPHY_LINE_ORDER_CAB,
+ V4L2_MBUS_CSI2_CPHY_LINE_ORDER_CBA,
+};
+
+/**
* struct v4l2_mbus_config_mipi_csi2 - MIPI CSI-2 data bus configuration
* @flags: media bus (V4L2_MBUS_*) flags
* @data_lanes: an array of physical data lane indexes
@@ -81,6 +99,8 @@
* @num_data_lanes: number of data lanes
* @lane_polarities: polarity of the lanes. The order is the same of
* the physical lanes.
+ * @line_orders: line order of the data lanes. The order is the same of the
+ * physical lanes.
*/
struct v4l2_mbus_config_mipi_csi2 {
unsigned int flags;
@@ -88,6 +108,7 @@ struct v4l2_mbus_config_mipi_csi2 {
unsigned char clock_lane;
unsigned char num_data_lanes;
bool lane_polarities[1 + V4L2_MBUS_CSI2_MAX_DATA_LANES];
+ enum v4l2_mbus_csi2_cphy_line_orders_type line_orders[V4L2_MBUS_CSI2_MAX_DATA_LANES];
};
/**
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index ed4cd114180a..7f405672b089 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -237,7 +237,6 @@ struct page_pool {
struct {
struct hlist_node list;
u64 detach_time;
- u32 napi_id;
u32 id;
} user;
};
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index d635c5b47eba..d48c657191cd 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -851,7 +851,7 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
static inline void _bstats_update(struct gnet_stats_basic_sync *bstats,
- __u64 bytes, __u32 packets)
+ __u64 bytes, __u64 packets)
{
u64_stats_update_begin(&bstats->syncp);
u64_stats_add(&bstats->bytes, bytes);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 4b0677e48190..ed4b83696c77 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1268,9 +1268,19 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
if (xo) {
x = xfrm_input_state(skb);
- if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
- return (xo->flags & CRYPTO_DONE) &&
- (xo->status & CRYPTO_SUCCESS);
+ if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
+ bool check = (xo->flags & CRYPTO_DONE) &&
+ (xo->status & CRYPTO_SUCCESS);
+
+ /* The packets here are plain ones and secpath was
+ * needed to indicate that hardware already handled
+ * them and there is no need to do nothing in addition.
+ *
+ * Consume secpath which was set by drivers.
+ */
+ secpath_reset(skb);
+ return check;
+ }
}
return __xfrm_check_nopolicy(net, skb, dir) ||
diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h
index 226ae3702d8a..2bf09b594d10 100644
--- a/include/rdma/ib_cache.h
+++ b/include/rdma/ib_cache.h
@@ -64,22 +64,6 @@ int ib_find_cached_pkey(struct ib_device *device,
u16 *index);
/**
- * ib_find_exact_cached_pkey - Returns the PKey table index where a specified
- * PKey value occurs. Comparison uses the FULL 16 bits (incl membership bit)
- * @device: The device to query.
- * @port_num: The port number of the device to search for the PKey.
- * @pkey: The PKey value to search for.
- * @index: The index into the cached PKey table where the PKey was found.
- *
- * ib_find_exact_cached_pkey() searches the specified PKey table in
- * the local software cache.
- */
-int ib_find_exact_cached_pkey(struct ib_device *device,
- u32 port_num,
- u16 pkey,
- u16 *index);
-
-/**
* ib_get_cached_lmc - Returns a cached lmc table entry
* @device: The device to query.
* @port_num: The port number of the device to query.
diff --git a/include/rdma/ib_marshall.h b/include/rdma/ib_marshall.h
index 1838869aad28..b179e464e3d1 100644
--- a/include/rdma/ib_marshall.h
+++ b/include/rdma/ib_marshall.h
@@ -22,7 +22,4 @@ void ib_copy_ah_attr_to_user(struct ib_device *device,
void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
struct sa_path_rec *src);
-void ib_copy_path_rec_from_user(struct sa_path_rec *dst,
- struct ib_user_path_rec *src);
-
#endif /* IB_USER_MARSHALL_H */
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index b8c56d7dc35d..8266fab826a7 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -283,7 +283,4 @@ int ib_ud_header_init(int payload_bytes,
int ib_ud_header_pack(struct ib_ud_header *header,
void *buf);
-int ib_ud_header_unpack(void *buf,
- struct ib_ud_header *header);
-
#endif /* IB_PACK_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 3417636da960..0ad104dae253 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -59,9 +59,6 @@ extern struct workqueue_struct *ib_comp_unbound_wq;
struct ib_ucq_object;
-__printf(3, 4) __cold
-void ibdev_printk(const char *level, const struct ib_device *ibdev,
- const char *format, ...);
__printf(2, 3) __cold
void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
__printf(2, 3) __cold
@@ -2177,6 +2174,7 @@ struct ib_port_cache {
struct ib_gid_table *gid;
u8 lmc;
enum ib_port_state port_state;
+ enum ib_port_state last_port_state;
};
struct ib_port_immutable {
@@ -2256,7 +2254,9 @@ struct rdma_netdev_alloc_params {
struct ib_odp_counters {
atomic64_t faults;
+ atomic64_t faults_handled;
atomic64_t invalidations;
+ atomic64_t invalidations_handled;
atomic64_t prefetch;
};
@@ -2681,6 +2681,13 @@ struct ib_device_ops {
*/
void (*ufile_hw_cleanup)(struct ib_uverbs_file *ufile);
+ /**
+ * report_port_event - Drivers need to implement this if they have
+ * some private stuff to handle when link status changes.
+ */
+ void (*report_port_event)(struct ib_device *ibdev,
+ struct net_device *ndev, unsigned long event);
+
DECLARE_RDMA_OBJ_SIZE(ib_ah);
DECLARE_RDMA_OBJ_SIZE(ib_counters);
DECLARE_RDMA_OBJ_SIZE(ib_cq);
@@ -4469,6 +4476,17 @@ int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
unsigned int port);
struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
u32 port);
+int ib_query_netdev_port(struct ib_device *ibdev, struct net_device *ndev,
+ u32 *port);
+
+static inline enum ib_port_state ib_get_curr_port_state(struct net_device *net_dev)
+{
+ return (netif_running(net_dev) && netif_carrier_ok(net_dev)) ?
+ IB_PORT_ACTIVE : IB_PORT_DOWN;
+}
+
+void ib_dispatch_port_state_event(struct ib_device *ibdev,
+ struct net_device *ndev);
struct ib_wq *ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr);
int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
diff --git a/include/rv/da_monitor.h b/include/rv/da_monitor.h
index 9705b2a98e49..510c88bfabd4 100644
--- a/include/rv/da_monitor.h
+++ b/include/rv/da_monitor.h
@@ -14,6 +14,7 @@
#include <rv/automata.h>
#include <linux/rv.h>
#include <linux/bug.h>
+#include <linux/sched.h>
#ifdef CONFIG_RV_REACTORS
@@ -324,10 +325,13 @@ static inline struct da_monitor *da_get_monitor_##name(struct task_struct *tsk)
static void da_monitor_reset_all_##name(void) \
{ \
struct task_struct *g, *p; \
+ int cpu; \
\
read_lock(&tasklist_lock); \
for_each_process_thread(g, p) \
da_monitor_reset_##name(da_get_monitor_##name(p)); \
+ for_each_present_cpu(cpu) \
+ da_monitor_reset_##name(da_get_monitor_##name(idle_task(cpu))); \
read_unlock(&tasklist_lock); \
} \
\
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 4a9b4169e081..183d9fd50d2d 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -963,7 +963,7 @@ int fc_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
int fc_eh_abort(struct scsi_cmnd *);
int fc_eh_device_reset(struct scsi_cmnd *);
int fc_eh_host_reset(struct scsi_cmnd *);
-int fc_slave_alloc(struct scsi_device *);
+int fc_sdev_init(struct scsi_device *);
/*
* ELS/CT interface
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 1324068dd950..ba460b6c0374 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -683,8 +683,7 @@ int sas_phy_reset(struct sas_phy *phy, int hard_reset);
int sas_phy_enable(struct sas_phy *phy, int enable);
extern int sas_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
extern int sas_target_alloc(struct scsi_target *);
-int sas_device_configure(struct scsi_device *dev,
- struct queue_limits *lim);
+int sas_sdev_configure(struct scsi_device *dev, struct queue_limits *lim);
extern int sas_change_queue_depth(struct scsi_device *, int new_depth);
extern int sas_bios_param(struct scsi_device *, struct block_device *,
sector_t capacity, int *hsc);
@@ -703,7 +702,7 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd);
int sas_eh_target_reset_handler(struct scsi_cmnd *cmd);
extern void sas_target_destroy(struct scsi_target *);
-extern int sas_slave_alloc(struct scsi_device *);
+extern int sas_sdev_init(struct scsi_device *);
extern int sas_ioctl(struct scsi_device *sdev, unsigned int cmd,
void __user *arg);
extern int sas_drain_work(struct sas_ha_struct *ha);
@@ -750,8 +749,8 @@ void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
#endif
#define LIBSAS_SHT_BASE _LIBSAS_SHT_BASE \
- .device_configure = sas_device_configure, \
- .slave_alloc = sas_slave_alloc, \
+ .sdev_configure = sas_sdev_configure, \
+ .sdev_init = sas_sdev_init, \
#define LIBSAS_SHT_BASE_NO_SLAVE_INIT _LIBSAS_SHT_BASE
diff --git a/include/scsi/scsi_bsg_iscsi.h b/include/scsi/scsi_bsg_iscsi.h
index 9b1f0f424a79..a569c35b258d 100644
--- a/include/scsi/scsi_bsg_iscsi.h
+++ b/include/scsi/scsi_bsg_iscsi.h
@@ -59,7 +59,7 @@ struct iscsi_bsg_host_vendor {
*/
struct iscsi_bsg_host_vendor_reply {
/* start of vendor response area */
- uint32_t vendor_rsp[0];
+ DECLARE_FLEX_ARRAY(uint32_t, vendor_rsp);
};
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 9c540f5468eb..7acd0ec82bb0 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -155,7 +155,7 @@ struct scsi_device {
blist_flags_t sdev_bflags; /* black/white flags as also found in
* scsi_devinfo.[hc]. For now used only to
- * pass settings from slave_alloc to scsi
+ * pass settings from sdev_init to scsi
* core. */
unsigned int eh_timeout; /* Error handling timeout */
@@ -357,7 +357,7 @@ struct scsi_target {
atomic_t target_blocked;
/*
- * LLDs should set this in the slave_alloc host template callout.
+ * LLDs should set this in the sdev_init host template callout.
* If set to zero then there is not limit.
*/
unsigned int can_queue;
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 02823d6af37d..26bc23419cfd 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -168,20 +168,20 @@ struct scsi_host_template {
* Return values: 0 on success, non-0 on failure
*
* Deallocation: If we didn't find any devices at this ID, you will
- * get an immediate call to slave_destroy(). If we find something
- * here then you will get a call to slave_configure(), then the
+ * get an immediate call to sdev_destroy(). If we find something
+ * here then you will get a call to sdev_configure(), then the
* device will be used for however long it is kept around, then when
* the device is removed from the system (or * possibly at reboot
- * time), you will then get a call to slave_destroy(). This is
- * assuming you implement slave_configure and slave_destroy.
+ * time), you will then get a call to sdev_destroy(). This is
+ * assuming you implement sdev_configure and sdev_destroy.
* However, if you allocate memory and hang it off the device struct,
- * then you must implement the slave_destroy() routine at a minimum
+ * then you must implement the sdev_destroy() routine at a minimum
* in order to avoid leaking memory
* each time a device is tore down.
*
* Status: OPTIONAL
*/
- int (* slave_alloc)(struct scsi_device *);
+ int (* sdev_init)(struct scsi_device *);
/*
* Once the device has responded to an INQUIRY and we know the
@@ -206,28 +206,24 @@ struct scsi_host_template {
* specific setup basis...
* 6. Return 0 on success, non-0 on error. The device will be marked
* as offline on error so that no access will occur. If you return
- * non-0, your slave_destroy routine will never get called for this
+ * non-0, your sdev_destroy routine will never get called for this
* device, so don't leave any loose memory hanging around, clean
* up after yourself before returning non-0
*
* Status: OPTIONAL
- *
- * Note: slave_configure is the legacy version, use device_configure for
- * all new code. A driver must never define both.
*/
- int (* device_configure)(struct scsi_device *, struct queue_limits *lim);
- int (* slave_configure)(struct scsi_device *);
+ int (* sdev_configure)(struct scsi_device *, struct queue_limits *lim);
/*
* Immediately prior to deallocating the device and after all activity
* has ceased the mid layer calls this point so that the low level
* driver may completely detach itself from the scsi device and vice
* versa. The low level driver is responsible for freeing any memory
- * it allocated in the slave_alloc or slave_configure calls.
+ * it allocated in the sdev_init or sdev_configure calls.
*
* Status: OPTIONAL
*/
- void (* slave_destroy)(struct scsi_device *);
+ void (* sdev_destroy)(struct scsi_device *);
/*
* Before the mid layer attempts to scan for a new device attached
@@ -601,7 +597,7 @@ struct Scsi_Host {
* have some way of identifying each detected host adapter properly
* and uniquely. For hosts that do not support more than one card
* in the system at one time, this does not need to be set. It is
- * initialized to 0 in scsi_register.
+ * initialized to 0 in scsi_host_alloc.
*/
unsigned int unique_id;
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index bd1243657c01..76de2b662f4f 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -447,10 +447,6 @@ extern int iscsi_add_session(struct iscsi_cls_session *session,
unsigned int target_id);
extern int iscsi_session_event(struct iscsi_cls_session *session,
enum iscsi_uevent_e event);
-extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost,
- struct iscsi_transport *t,
- int dd_size,
- unsigned int target_id);
extern void iscsi_force_destroy_session(struct iscsi_cls_session *session);
extern void iscsi_remove_session(struct iscsi_cls_session *session);
extern void iscsi_free_session(struct iscsi_cls_session *session);
@@ -497,8 +493,8 @@ extern void iscsi_destroy_all_flashnode(struct Scsi_Host *shost);
extern int iscsi_flashnode_bus_match(struct device *dev,
const struct device_driver *drv);
extern struct device *
-iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
- int (*fn)(struct device *dev, void *data));
+iscsi_find_flashnode_sess(struct Scsi_Host *shost, const void *data,
+ device_match_t fn);
extern struct device *
iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess);
diff --git a/include/soc/amlogic/reset-meson-aux.h b/include/soc/amlogic/reset-meson-aux.h
deleted file mode 100644
index d8a15d48c984..000000000000
--- a/include/soc/amlogic/reset-meson-aux.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __SOC_RESET_MESON_AUX_H
-#define __SOC_RESET_MESON_AUX_H
-
-#include <linux/err.h>
-
-struct device;
-struct regmap;
-
-#if IS_ENABLED(CONFIG_RESET_MESON_AUX)
-int devm_meson_rst_aux_register(struct device *dev,
- struct regmap *map,
- const char *adev_name);
-#else
-static inline int devm_meson_rst_aux_register(struct device *dev,
- struct regmap *map,
- const char *adev_name)
-{
- return 0;
-}
-#endif
-
-#endif /* __SOC_RESET_MESON_AUX_H */
diff --git a/include/soc/qcom/tcs.h b/include/soc/qcom/tcs.h
index 3acca067c72b..cff67ce25488 100644
--- a/include/soc/qcom/tcs.h
+++ b/include/soc/qcom/tcs.h
@@ -6,6 +6,9 @@
#ifndef __SOC_QCOM_TCS_H__
#define __SOC_QCOM_TCS_H__
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
#define MAX_RPMH_PAYLOAD 16
/**
@@ -60,22 +63,17 @@ struct tcs_request {
struct tcs_cmd *cmds;
};
-#define BCM_TCS_CMD_COMMIT_SHFT 30
-#define BCM_TCS_CMD_COMMIT_MASK 0x40000000
-#define BCM_TCS_CMD_VALID_SHFT 29
-#define BCM_TCS_CMD_VALID_MASK 0x20000000
-#define BCM_TCS_CMD_VOTE_X_SHFT 14
-#define BCM_TCS_CMD_VOTE_MASK 0x3fff
-#define BCM_TCS_CMD_VOTE_Y_SHFT 0
-#define BCM_TCS_CMD_VOTE_Y_MASK 0xfffc000
+#define BCM_TCS_CMD_COMMIT_MASK BIT(30)
+#define BCM_TCS_CMD_VALID_MASK BIT(29)
+#define BCM_TCS_CMD_VOTE_MASK GENMASK(13, 0)
+#define BCM_TCS_CMD_VOTE_Y_MASK GENMASK(13, 0)
+#define BCM_TCS_CMD_VOTE_X_MASK GENMASK(27, 14)
/* Construct a Bus Clock Manager (BCM) specific TCS command */
#define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \
- (((commit) << BCM_TCS_CMD_COMMIT_SHFT) | \
- ((valid) << BCM_TCS_CMD_VALID_SHFT) | \
- ((cpu_to_le32(vote_x) & \
- BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_X_SHFT) | \
- ((cpu_to_le32(vote_y) & \
- BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_Y_SHFT))
+ (u32_encode_bits(commit, BCM_TCS_CMD_COMMIT_MASK) | \
+ u32_encode_bits(valid, BCM_TCS_CMD_VALID_MASK) | \
+ u32_encode_bits(vote_x, BCM_TCS_CMD_VOTE_X_MASK) | \
+ u32_encode_bits(vote_y, BCM_TCS_CMD_VOTE_Y_MASK))
#endif /* __SOC_QCOM_TCS_H__ */
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index 957295364a5e..4c7a40e149a5 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -2,8 +2,6 @@
#ifndef __SOUND_HDAUDIO_EXT_H
#define __SOUND_HDAUDIO_EXT_H
-#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/iopoll.h>
#include <sound/hdaudio.h>
int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev,
@@ -119,49 +117,6 @@ int snd_hdac_ext_bus_link_put(struct hdac_bus *bus, struct hdac_ext_link *hlink)
void snd_hdac_ext_bus_link_power(struct hdac_device *codec, bool enable);
-#define snd_hdac_adsp_writeb(chip, reg, value) \
- snd_hdac_reg_writeb(chip, (chip)->dsp_ba + (reg), value)
-#define snd_hdac_adsp_readb(chip, reg) \
- snd_hdac_reg_readb(chip, (chip)->dsp_ba + (reg))
-#define snd_hdac_adsp_writew(chip, reg, value) \
- snd_hdac_reg_writew(chip, (chip)->dsp_ba + (reg), value)
-#define snd_hdac_adsp_readw(chip, reg) \
- snd_hdac_reg_readw(chip, (chip)->dsp_ba + (reg))
-#define snd_hdac_adsp_writel(chip, reg, value) \
- snd_hdac_reg_writel(chip, (chip)->dsp_ba + (reg), value)
-#define snd_hdac_adsp_readl(chip, reg) \
- snd_hdac_reg_readl(chip, (chip)->dsp_ba + (reg))
-#define snd_hdac_adsp_writeq(chip, reg, value) \
- snd_hdac_reg_writeq(chip, (chip)->dsp_ba + (reg), value)
-#define snd_hdac_adsp_readq(chip, reg) \
- snd_hdac_reg_readq(chip, (chip)->dsp_ba + (reg))
-
-#define snd_hdac_adsp_updateb(chip, reg, mask, val) \
- snd_hdac_adsp_writeb(chip, reg, \
- (snd_hdac_adsp_readb(chip, reg) & ~(mask)) | (val))
-#define snd_hdac_adsp_updatew(chip, reg, mask, val) \
- snd_hdac_adsp_writew(chip, reg, \
- (snd_hdac_adsp_readw(chip, reg) & ~(mask)) | (val))
-#define snd_hdac_adsp_updatel(chip, reg, mask, val) \
- snd_hdac_adsp_writel(chip, reg, \
- (snd_hdac_adsp_readl(chip, reg) & ~(mask)) | (val))
-#define snd_hdac_adsp_updateq(chip, reg, mask, val) \
- snd_hdac_adsp_writeq(chip, reg, \
- (snd_hdac_adsp_readq(chip, reg) & ~(mask)) | (val))
-
-#define snd_hdac_adsp_readb_poll(chip, reg, val, cond, delay_us, timeout_us) \
- readb_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
- delay_us, timeout_us)
-#define snd_hdac_adsp_readw_poll(chip, reg, val, cond, delay_us, timeout_us) \
- readw_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
- delay_us, timeout_us)
-#define snd_hdac_adsp_readl_poll(chip, reg, val, cond, delay_us, timeout_us) \
- readl_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
- delay_us, timeout_us)
-#define snd_hdac_adsp_readq_poll(chip, reg, val, cond, delay_us, timeout_us) \
- readq_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
- delay_us, timeout_us)
-
struct hdac_ext_device;
/* ops common to all codec drivers */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 67c99ffbf51b..8becb4504887 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -1532,9 +1532,10 @@ static inline u64 pcm_format_to_bits(snd_pcm_format_t pcm_format)
dev_dbg((pcm)->card->dev, fmt, ##args)
/* helpers for copying between iov_iter and iomem */
-int copy_to_iter_fromio(struct iov_iter *itert, const void __iomem *src,
- size_t count);
-int copy_from_iter_toio(void __iomem *dst, struct iov_iter *iter, size_t count);
+size_t copy_to_iter_fromio(const void __iomem *src, size_t bytes,
+ struct iov_iter *iter) __must_check;
+size_t copy_from_iter_toio(void __iomem *dst, size_t bytes,
+ struct iov_iter *iter) __must_check;
struct snd_pcm_status64 {
snd_pcm_state_t state; /* stream state */
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
index f31cabf0158c..6916f7133597 100644
--- a/include/sound/rawmidi.h
+++ b/include/sound/rawmidi.h
@@ -89,6 +89,7 @@ struct snd_rawmidi_substream {
unsigned int framing; /* whether to frame input data */
unsigned int clock_type; /* clock source to use for input framing */
int use_count; /* use counter (for output) */
+ bool inactive; /* inactive substream (for UMP legacy) */
size_t bytes;
spinlock_t lock;
struct snd_rawmidi *rmidi;
@@ -118,6 +119,7 @@ struct snd_rawmidi {
struct list_head list;
unsigned int device; /* device number */
unsigned int info_flags; /* SNDRV_RAWMIDI_INFO_XXXX */
+ unsigned int tied_device;
char id[64];
char name[80];
@@ -189,4 +191,13 @@ long snd_rawmidi_kernel_read(struct snd_rawmidi_substream *substream,
long snd_rawmidi_kernel_write(struct snd_rawmidi_substream *substream,
const unsigned char *buf, long count);
+/* set up the tied devices */
+static inline void snd_rawmidi_tie_devices(struct snd_rawmidi *r1,
+ struct snd_rawmidi *r2)
+{
+ /* tied_device field keeps the device+1 (so that 0 being unknown) */
+ r1->tied_device = r2->device + 1;
+ r2->tied_device = r1->device + 1;
+}
+
#endif /* __SOUND_RAWMIDI_H */
diff --git a/include/sound/sdca.h b/include/sound/sdca.h
index 7e138229e8f3..973252d0adac 100644
--- a/include/sound/sdca.h
+++ b/include/sound/sdca.h
@@ -9,6 +9,9 @@
#ifndef __SDCA_H__
#define __SDCA_H__
+#include <linux/types.h>
+#include <linux/kconfig.h>
+
struct sdw_slave;
#define SDCA_MAX_FUNCTION_COUNT 8
@@ -20,9 +23,9 @@ struct sdw_slave;
* @name: human-readable string
*/
struct sdca_function_desc {
- u64 adr;
- u32 type;
const char *name;
+ u32 type;
+ u8 adr;
};
/**
diff --git a/include/sound/sdca_function.h b/include/sound/sdca_function.h
index 9dc5bfec07e5..c051c17903e8 100644
--- a/include/sound/sdca_function.h
+++ b/include/sound/sdca_function.h
@@ -9,6 +9,8 @@
#ifndef __SDCA_FUNCTION_H__
#define __SDCA_FUNCTION_H__
+#include <linux/bits.h>
+
/*
* SDCA Function Types from SDCA specification v1.0a Section 5.1.2
* all Function types not described are reserved
@@ -40,6 +42,7 @@ enum sdca_function_type {
#define SDCA_FUNCTION_TYPE_RJ_NAME "RJ"
#define SDCA_FUNCTION_TYPE_SIMPLE_NAME "SimpleJack"
#define SDCA_FUNCTION_TYPE_HID_NAME "HID"
+#define SDCA_FUNCTION_TYPE_IMP_DEF_NAME "ImplementationDefined"
enum sdca_entity0_controls {
SDCA_CTL_ENTITY_0_COMMIT_GROUP_MASK = 0x01,
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index 3360d9eab068..892f70532363 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -89,6 +89,13 @@ struct simple_util_priv {
#define simple_props_to_dai_codec(props, i) ((props)->codec_dai + i)
#define simple_props_to_codec_conf(props, i) ((props)->codec_conf + i)
+/* has the same effect as simple_priv_to_props(). Preferred over
+ * simple_priv_to_props() when dealing with PCM runtime data as
+ * the ID stored in rtd->id may not be a valid array index.
+ */
+#define runtime_simple_priv_to_props(priv, rtd) \
+ ((priv)->dai_props + ((rtd)->dai_link - (priv)->dai_link))
+
#define for_each_prop_dlc_cpus(props, i, cpu) \
for ((i) = 0; \
((i) < (props)->num.cpus) && \
@@ -264,9 +271,13 @@ static inline void simple_util_debug_info(struct simple_util_priv *priv)
simple_util_debug_dai(priv, "codec", dai);
if (link->name)
- dev_dbg(dev, "dai name = %s\n", link->name);
+ dev_dbg(dev, "link name = %s\n", link->name);
if (link->dai_fmt)
- dev_dbg(dev, "dai format = %04x\n", link->dai_fmt);
+ dev_dbg(dev, "link format = %04x\n", link->dai_fmt);
+ if (link->playback_only)
+ dev_dbg(dev, "link has playback_only");
+ if (link->capture_only)
+ dev_dbg(dev, "link has capture_only");
if (props->adata.convert_rate)
dev_dbg(dev, "convert_rate = %d\n", props->adata.convert_rate);
if (props->adata.convert_channels)
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index aab57c19f62b..a11501752637 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -193,6 +193,9 @@ int snd_soc_dai_set_channel_map(struct snd_soc_dai *dai,
int snd_soc_dai_set_tristate(struct snd_soc_dai *dai, int tristate);
+int snd_soc_dai_prepare(struct snd_soc_dai *dai,
+ struct snd_pcm_substream *substream);
+
/* Digital Audio Interface mute */
int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute,
int direction);
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 4f5d411e3823..fcdb5adfcd5e 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -681,6 +681,17 @@ struct snd_soc_dai_link_component {
struct device_node *of_node;
const char *dai_name;
const struct of_phandle_args *dai_args;
+
+ /*
+ * Extra format = SND_SOC_DAIFMT_Bx_Fx
+ *
+ * [Note] it is Bx_Fx base, not CBx_CFx
+ *
+ * It will be used with dai_link->dai_fmt
+ * see
+ * snd_soc_runtime_set_dai_fmt()
+ */
+ unsigned int ext_fmt;
};
/*
@@ -1118,7 +1129,6 @@ struct snd_soc_card {
unsigned int instantiated:1;
unsigned int topology_shortname_created:1;
unsigned int fully_routed:1;
- unsigned int disable_route_checks:1;
unsigned int probed:1;
unsigned int component_chaining:1;
diff --git a/include/sound/soc_sdw_utils.h b/include/sound/soc_sdw_utils.h
index 0e82598e10af..36a4a1e1d8ca 100644
--- a/include/sound/soc_sdw_utils.h
+++ b/include/sound/soc_sdw_utils.h
@@ -224,6 +224,8 @@ int asoc_sdw_cs_amp_init(struct snd_soc_card *card,
struct snd_soc_dai_link *dai_links,
struct asoc_sdw_codec_info *info,
bool playback);
+int asoc_sdw_cs_spk_feedback_rtd_init(struct snd_soc_pcm_runtime *rtd,
+ struct snd_soc_dai *dai);
/* MAXIM codec support */
int asoc_sdw_maxim_init(struct snd_soc_card *card,
diff --git a/include/sound/ump.h b/include/sound/ump.h
index 532c2c3ea28e..73f97f88e2ed 100644
--- a/include/sound/ump.h
+++ b/include/sound/ump.h
@@ -83,6 +83,7 @@ struct snd_ump_ops {
struct snd_seq_ump_ops {
void (*input_receive)(struct snd_ump_endpoint *ump,
const u32 *data, int words);
+ int (*notify_ep_change)(struct snd_ump_endpoint *ump);
int (*notify_fb_change)(struct snd_ump_endpoint *ump,
struct snd_ump_block *fb);
int (*switch_protocol)(struct snd_ump_endpoint *ump);
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 60af7c63b34e..51ca80abacf7 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -576,9 +576,6 @@ struct iscsit_conn {
spinlock_t state_lock;
spinlock_t login_timer_lock;
spinlock_t login_worker_lock;
- /* libcrypto RX and TX contexts for crc32c */
- struct ahash_request *conn_rx_hash;
- struct ahash_request *conn_tx_hash;
/* Used for scheduling TX and RX connection kthreads */
cpumask_var_t conn_cpumask;
cpumask_var_t allowed_cpumask;
diff --git a/include/trace/events/capability.h b/include/trace/events/capability.h
new file mode 100644
index 000000000000..17340257946c
--- /dev/null
+++ b/include/trace/events/capability.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM capability
+
+#if !defined(_TRACE_CAPABILITY_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CAPABILITY_H
+
+#include <linux/cred.h>
+#include <linux/tracepoint.h>
+#include <linux/user_namespace.h>
+
+/**
+ * cap_capable - called after it's determined if a task has a particular
+ * effective capability
+ *
+ * @cred: The credentials used
+ * @target_ns: The user namespace of the resource being accessed
+ * @capable_ns: The user namespace in which the credential provides the
+ * capability to access the targeted resource.
+ * This will be NULL if ret is not 0.
+ * @cap: The capability to check for
+ * @ret: The return value of the check: 0 if it does, -ve if it does not
+ *
+ * Allows to trace calls to cap_capable in commoncap.c
+ */
+TRACE_EVENT(cap_capable,
+
+ TP_PROTO(const struct cred *cred, struct user_namespace *target_ns,
+ const struct user_namespace *capable_ns, int cap, int ret),
+
+ TP_ARGS(cred, target_ns, capable_ns, cap, ret),
+
+ TP_STRUCT__entry(
+ __field(const struct cred *, cred)
+ __field(struct user_namespace *, target_ns)
+ __field(const struct user_namespace *, capable_ns)
+ __field(int, cap)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->cred = cred;
+ __entry->target_ns = target_ns;
+ __entry->capable_ns = ret == 0 ? capable_ns : NULL;
+ __entry->cap = cap;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("cred %p, target_ns %p, capable_ns %p, cap %d, ret %d",
+ __entry->cred, __entry->target_ns, __entry->capable_ns, __entry->cap,
+ __entry->ret)
+);
+
+#endif /* _TRACE_CAPABILITY_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 2851c823095b..eb3b2f1326b1 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -1119,11 +1119,11 @@ TRACE_EVENT(f2fs_reserve_new_blocks,
(unsigned long long)__entry->count)
);
-DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
+DECLARE_EVENT_CLASS(f2fs__submit_folio_bio,
- TP_PROTO(struct page *page, struct f2fs_io_info *fio),
+ TP_PROTO(struct folio *folio, struct f2fs_io_info *fio),
- TP_ARGS(page, fio),
+ TP_ARGS(folio, fio),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -1138,9 +1138,9 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
),
TP_fast_assign(
- __entry->dev = page_file_mapping(page)->host->i_sb->s_dev;
- __entry->ino = page_file_mapping(page)->host->i_ino;
- __entry->index = page->index;
+ __entry->dev = folio->mapping->host->i_sb->s_dev;
+ __entry->ino = folio->mapping->host->i_ino;
+ __entry->index = folio->index;
__entry->old_blkaddr = fio->old_blkaddr;
__entry->new_blkaddr = fio->new_blkaddr;
__entry->op = fio->op;
@@ -1149,7 +1149,7 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
__entry->type = fio->type;
),
- TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
+ TP_printk("dev = (%d,%d), ino = %lu, folio_index = 0x%lx, "
"oldaddr = 0x%llx, newaddr = 0x%llx, rw = %s(%s), type = %s_%s",
show_dev_ino(__entry),
(unsigned long)__entry->index,
@@ -1160,22 +1160,22 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
show_block_type(__entry->type))
);
-DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_bio,
+DEFINE_EVENT_CONDITION(f2fs__submit_folio_bio, f2fs_submit_folio_bio,
- TP_PROTO(struct page *page, struct f2fs_io_info *fio),
+ TP_PROTO(struct folio *folio, struct f2fs_io_info *fio),
- TP_ARGS(page, fio),
+ TP_ARGS(folio, fio),
- TP_CONDITION(page->mapping)
+ TP_CONDITION(folio->mapping)
);
-DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_write,
+DEFINE_EVENT_CONDITION(f2fs__submit_folio_bio, f2fs_submit_folio_write,
- TP_PROTO(struct page *page, struct f2fs_io_info *fio),
+ TP_PROTO(struct folio *folio, struct f2fs_io_info *fio),
- TP_ARGS(page, fio),
+ TP_ARGS(folio, fio),
- TP_CONDITION(page->mapping)
+ TP_CONDITION(folio->mapping)
);
DECLARE_EVENT_CLASS(f2fs__bio,
@@ -1322,12 +1322,11 @@ DECLARE_EVENT_CLASS(f2fs__folio,
),
TP_fast_assign(
- __entry->dev = folio_file_mapping(folio)->host->i_sb->s_dev;
- __entry->ino = folio_file_mapping(folio)->host->i_ino;
+ __entry->dev = folio->mapping->host->i_sb->s_dev;
+ __entry->ino = folio->mapping->host->i_ino;
__entry->type = type;
- __entry->dir =
- S_ISDIR(folio_file_mapping(folio)->host->i_mode);
- __entry->index = folio_index(folio);
+ __entry->dir = S_ISDIR(folio->mapping->host->i_mode);
+ __entry->index = folio->index;
__entry->dirty = folio_test_dirty(folio);
__entry->uptodate = folio_test_uptodate(folio);
),
diff --git a/include/trace/events/mmap_lock.h b/include/trace/events/mmap_lock.h
index bc2e3ad787b3..cf9f9faf8914 100644
--- a/include/trace/events/mmap_lock.h
+++ b/include/trace/events/mmap_lock.h
@@ -5,6 +5,7 @@
#if !defined(_TRACE_MMAP_LOCK_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_MMAP_LOCK_H
+#include <linux/memcontrol.h>
#include <linux/tracepoint.h>
#include <linux/types.h>
@@ -12,64 +13,61 @@ struct mm_struct;
DECLARE_EVENT_CLASS(mmap_lock,
- TP_PROTO(struct mm_struct *mm, const char *memcg_path, bool write),
+ TP_PROTO(struct mm_struct *mm, bool write),
- TP_ARGS(mm, memcg_path, write),
+ TP_ARGS(mm, write),
TP_STRUCT__entry(
__field(struct mm_struct *, mm)
- __string(memcg_path, memcg_path)
+ __field(u64, memcg_id)
__field(bool, write)
),
TP_fast_assign(
__entry->mm = mm;
- __assign_str(memcg_path);
+ __entry->memcg_id = cgroup_id_from_mm(mm);
__entry->write = write;
),
TP_printk(
- "mm=%p memcg_path=%s write=%s",
- __entry->mm,
- __get_str(memcg_path),
+ "mm=%p memcg_id=%llu write=%s",
+ __entry->mm, __entry->memcg_id,
__entry->write ? "true" : "false"
)
);
#define DEFINE_MMAP_LOCK_EVENT(name) \
DEFINE_EVENT(mmap_lock, name, \
- TP_PROTO(struct mm_struct *mm, const char *memcg_path, \
- bool write), \
- TP_ARGS(mm, memcg_path, write))
+ TP_PROTO(struct mm_struct *mm, bool write), \
+ TP_ARGS(mm, write))
DEFINE_MMAP_LOCK_EVENT(mmap_lock_start_locking);
DEFINE_MMAP_LOCK_EVENT(mmap_lock_released);
TRACE_EVENT(mmap_lock_acquire_returned,
- TP_PROTO(struct mm_struct *mm, const char *memcg_path, bool write,
- bool success),
+ TP_PROTO(struct mm_struct *mm, bool write, bool success),
- TP_ARGS(mm, memcg_path, write, success),
+ TP_ARGS(mm, write, success),
TP_STRUCT__entry(
__field(struct mm_struct *, mm)
- __string(memcg_path, memcg_path)
+ __field(u64, memcg_id)
__field(bool, write)
__field(bool, success)
),
TP_fast_assign(
__entry->mm = mm;
- __assign_str(memcg_path);
+ __entry->memcg_id = cgroup_id_from_mm(mm);
__entry->write = write;
__entry->success = success;
),
TP_printk(
- "mm=%p memcg_path=%s write=%s success=%s",
+ "mm=%p memcg_id=%llu write=%s success=%s",
__entry->mm,
- __get_str(memcg_path),
+ __entry->memcg_id,
__entry->write ? "true" : "false",
__entry->success ? "true" : "false"
)
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index d36c857dd249..72fbfe3caeaf 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -179,7 +179,8 @@ TRACE_DEFINE_ENUM(___GFP_LAST_BIT);
DEF_PAGEFLAG_NAME(head), \
DEF_PAGEFLAG_NAME(reclaim), \
DEF_PAGEFLAG_NAME(swapbacked), \
- DEF_PAGEFLAG_NAME(unevictable) \
+ DEF_PAGEFLAG_NAME(unevictable), \
+ DEF_PAGEFLAG_NAME(dropbehind) \
IF_HAVE_PG_MLOCK(mlocked) \
IF_HAVE_PG_HWPOISON(hwpoison) \
IF_HAVE_PG_IDLE(idle) \
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 2f119d18a061..cad50d91077e 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -219,6 +219,7 @@
EM(rxrpc_conn_get_conn_input, "GET inp-conn") \
EM(rxrpc_conn_get_idle, "GET idle ") \
EM(rxrpc_conn_get_poke_abort, "GET pk-abort") \
+ EM(rxrpc_conn_get_poke_secured, "GET secured ") \
EM(rxrpc_conn_get_poke_timer, "GET poke ") \
EM(rxrpc_conn_get_service_conn, "GET svc-conn") \
EM(rxrpc_conn_new_client, "NEW client ") \
diff --git a/include/trace/events/task.h b/include/trace/events/task.h
index 47b527464d1a..af535b053033 100644
--- a/include/trace/events/task.h
+++ b/include/trace/events/task.h
@@ -38,22 +38,56 @@ TRACE_EVENT(task_rename,
TP_ARGS(task, comm),
TP_STRUCT__entry(
- __field( pid_t, pid)
__array( char, oldcomm, TASK_COMM_LEN)
__array( char, newcomm, TASK_COMM_LEN)
__field( short, oom_score_adj)
),
TP_fast_assign(
- __entry->pid = task->pid;
memcpy(entry->oldcomm, task->comm, TASK_COMM_LEN);
strscpy(entry->newcomm, comm, TASK_COMM_LEN);
__entry->oom_score_adj = task->signal->oom_score_adj;
),
- TP_printk("pid=%d oldcomm=%s newcomm=%s oom_score_adj=%hd",
- __entry->pid, __entry->oldcomm,
- __entry->newcomm, __entry->oom_score_adj)
+ TP_printk("oldcomm=%s newcomm=%s oom_score_adj=%hd",
+ __entry->oldcomm, __entry->newcomm, __entry->oom_score_adj)
+);
+
+/**
+ * task_prctl_unknown - called on unknown prctl() option
+ * @option: option passed
+ * @arg2: arg2 passed
+ * @arg3: arg3 passed
+ * @arg4: arg4 passed
+ * @arg5: arg5 passed
+ *
+ * Called on an unknown prctl() option.
+ */
+TRACE_EVENT(task_prctl_unknown,
+
+ TP_PROTO(int option, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5),
+
+ TP_ARGS(option, arg2, arg3, arg4, arg5),
+
+ TP_STRUCT__entry(
+ __field( int, option)
+ __field( unsigned long, arg2)
+ __field( unsigned long, arg3)
+ __field( unsigned long, arg4)
+ __field( unsigned long, arg5)
+ ),
+
+ TP_fast_assign(
+ __entry->option = option;
+ __entry->arg2 = arg2;
+ __entry->arg3 = arg3;
+ __entry->arg4 = arg4;
+ __entry->arg5 = arg5;
+ ),
+
+ TP_printk("option=%d arg2=%ld arg3=%ld arg4=%ld arg5=%ld",
+ __entry->option, __entry->arg2, __entry->arg3, __entry->arg4, __entry->arg5)
);
#endif
diff --git a/include/uapi/asm-generic/fcntl.h b/include/uapi/asm-generic/fcntl.h
index 80f37a0d40d7..613475285643 100644
--- a/include/uapi/asm-generic/fcntl.h
+++ b/include/uapi/asm-generic/fcntl.h
@@ -6,7 +6,6 @@
/*
* FMODE_EXEC is 0x20
- * FMODE_NONOTIFY is 0x4000000
* These cannot be used by userspace O_* until internal and external open
* flags are split.
* -Eric Paris
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index efe5de6ce208..aaa4f3bc688b 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -411,13 +411,20 @@ struct drm_amdgpu_gem_userptr {
/* GFX12 and later: */
#define AMDGPU_TILING_GFX12_SWIZZLE_MODE_SHIFT 0
#define AMDGPU_TILING_GFX12_SWIZZLE_MODE_MASK 0x7
-/* These are DCC recompression setting for memory management: */
+/* These are DCC recompression settings for memory management: */
#define AMDGPU_TILING_GFX12_DCC_MAX_COMPRESSED_BLOCK_SHIFT 3
#define AMDGPU_TILING_GFX12_DCC_MAX_COMPRESSED_BLOCK_MASK 0x3 /* 0:64B, 1:128B, 2:256B */
#define AMDGPU_TILING_GFX12_DCC_NUMBER_TYPE_SHIFT 5
#define AMDGPU_TILING_GFX12_DCC_NUMBER_TYPE_MASK 0x7 /* CB_COLOR0_INFO.NUMBER_TYPE */
#define AMDGPU_TILING_GFX12_DCC_DATA_FORMAT_SHIFT 8
#define AMDGPU_TILING_GFX12_DCC_DATA_FORMAT_MASK 0x3f /* [0:4]:CB_COLOR0_INFO.FORMAT, [5]:MM */
+/* When clearing the buffer or moving it from VRAM to GTT, don't compress and set DCC metadata
+ * to uncompressed. Set when parts of an allocation bypass DCC and read raw data. */
+#define AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE_SHIFT 14
+#define AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE_MASK 0x1
+/* bit gap */
+#define AMDGPU_TILING_GFX12_SCANOUT_SHIFT 63
+#define AMDGPU_TILING_GFX12_SCANOUT_MASK 0x1
/* Set/Get helpers for tiling flags. */
#define AMDGPU_TILING_SET(field, value) \
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 75e21a135483..d9a069b4a775 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -161,6 +161,7 @@
#define AUDIT_INTEGRITY_RULE 1805 /* policy rule */
#define AUDIT_INTEGRITY_EVM_XATTR 1806 /* New EVM-covered xattr */
#define AUDIT_INTEGRITY_POLICY_RULE 1807 /* IMA policy rules */
+#define AUDIT_INTEGRITY_USERSPACE 1808 /* Userspace enforced data integrity */
#define AUDIT_KERNEL 2000 /* Asynchronous audit record. NOT A REQUEST. */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 4162afc6b5d0..2acf9b336371 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -1573,6 +1573,16 @@ union bpf_attr {
* If provided, prog_flags should have BPF_F_TOKEN_FD flag set.
*/
__s32 prog_token_fd;
+ /* The fd_array_cnt can be used to pass the length of the
+ * fd_array array. In this case all the [map] file descriptors
+ * passed in this array will be bound to the program, even if
+ * the maps are not referenced directly. The functionality is
+ * similar to the BPF_PROG_BIND_MAP syscall, but maps can be
+ * used by the verifier during the program load. If provided,
+ * then the fd_array[0,...,fd_array_cnt-1] is expected to be
+ * continuous.
+ */
+ __u32 fd_array_cnt;
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index 1990b5700f69..b08c7378164d 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -286,9 +286,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 48
+#define DM_VERSION_MINOR 49
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2023-03-01)"
+#define DM_VERSION_EXTRA "-ioctl (2025-01-17)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h
index 34f221d3a1b9..bd8167979707 100644
--- a/include/uapi/linux/fanotify.h
+++ b/include/uapi/linux/fanotify.h
@@ -25,6 +25,9 @@
#define FAN_OPEN_PERM 0x00010000 /* File open in perm check */
#define FAN_ACCESS_PERM 0x00020000 /* File accessed in perm check */
#define FAN_OPEN_EXEC_PERM 0x00040000 /* File open/exec in perm check */
+/* #define FAN_DIR_MODIFY 0x00080000 */ /* Deprecated (reserved) */
+
+#define FAN_PRE_ACCESS 0x00100000 /* Pre-content access hook */
#define FAN_EVENT_ON_CHILD 0x08000000 /* Interested in child events */
@@ -143,6 +146,7 @@ struct fanotify_event_metadata {
#define FAN_EVENT_INFO_TYPE_DFID 3
#define FAN_EVENT_INFO_TYPE_PIDFD 4
#define FAN_EVENT_INFO_TYPE_ERROR 5
+#define FAN_EVENT_INFO_TYPE_RANGE 6
/* Special info types for FAN_RENAME */
#define FAN_EVENT_INFO_TYPE_OLD_DFID_NAME 10
@@ -189,6 +193,13 @@ struct fanotify_event_info_error {
__u32 error_count;
};
+struct fanotify_event_info_range {
+ struct fanotify_event_info_header hdr;
+ __u32 pad;
+ __u64 offset;
+ __u64 count;
+};
+
/*
* User space may need to record additional information about its decision.
* The extra information type records what kind of information is included.
@@ -224,6 +235,13 @@ struct fanotify_response_info_audit_rule {
/* Legit userspace responses to a _PERM event */
#define FAN_ALLOW 0x01
#define FAN_DENY 0x02
+/* errno other than EPERM can specified in upper byte of deny response */
+#define FAN_ERRNO_BITS 8
+#define FAN_ERRNO_SHIFT (32 - FAN_ERRNO_BITS)
+#define FAN_ERRNO_MASK ((1 << FAN_ERRNO_BITS) - 1)
+#define FAN_DENY_ERRNO(err) \
+ (FAN_DENY | ((((__u32)(err)) & FAN_ERRNO_MASK) << FAN_ERRNO_SHIFT))
+
#define FAN_AUDIT 0x10 /* Bitmask to create audit record for result */
#define FAN_INFO 0x20 /* Bitmask to indicate additional information */
diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h
index 6e6907e63bfc..a15ac2fa4b20 100644
--- a/include/uapi/linux/fcntl.h
+++ b/include/uapi/linux/fcntl.h
@@ -155,4 +155,8 @@
#define AT_HANDLE_MNT_ID_UNIQUE 0x001 /* Return the u64 unique mount ID. */
#define AT_HANDLE_CONNECTABLE 0x002 /* Request a connectable file handle */
+/* Flags for execveat2(2). */
+#define AT_EXECVE_CHECK 0x10000 /* Only perform a check if execution
+ would be allowed. */
+
#endif /* _UAPI_LINUX_FCNTL_H */
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index f1e99458e29e..5e0eb41d967e 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -220,6 +220,15 @@
*
* 7.41
* - add FUSE_ALLOW_IDMAP
+ * 7.42
+ * - Add FUSE_OVER_IO_URING and all other io-uring related flags and data
+ * structures:
+ * - struct fuse_uring_ent_in_out
+ * - struct fuse_uring_req_header
+ * - struct fuse_uring_cmd_req
+ * - FUSE_URING_IN_OUT_HEADER_SZ
+ * - FUSE_URING_OP_IN_OUT_SZ
+ * - enum fuse_uring_cmd
*/
#ifndef _LINUX_FUSE_H
@@ -255,7 +264,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 41
+#define FUSE_KERNEL_MINOR_VERSION 42
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -425,6 +434,7 @@ struct fuse_file_lock {
* FUSE_HAS_RESEND: kernel supports resending pending requests, and the high bit
* of the request ID indicates resend requests
* FUSE_ALLOW_IDMAP: allow creation of idmapped mounts
+ * FUSE_OVER_IO_URING: Indicate that client supports io-uring
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -471,6 +481,7 @@ struct fuse_file_lock {
/* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */
#define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP
#define FUSE_ALLOW_IDMAP (1ULL << 40)
+#define FUSE_OVER_IO_URING (1ULL << 41)
/**
* CUSE INIT request/reply flags
@@ -1206,4 +1217,67 @@ struct fuse_supp_groups {
uint32_t groups[];
};
+/**
+ * Size of the ring buffer header
+ */
+#define FUSE_URING_IN_OUT_HEADER_SZ 128
+#define FUSE_URING_OP_IN_OUT_SZ 128
+
+/* Used as part of the fuse_uring_req_header */
+struct fuse_uring_ent_in_out {
+ uint64_t flags;
+
+ /*
+ * commit ID to be used in a reply to a ring request (see also
+ * struct fuse_uring_cmd_req)
+ */
+ uint64_t commit_id;
+
+ /* size of user payload buffer */
+ uint32_t payload_sz;
+ uint32_t padding;
+
+ uint64_t reserved;
+};
+
+/**
+ * Header for all fuse-io-uring requests
+ */
+struct fuse_uring_req_header {
+ /* struct fuse_in_header / struct fuse_out_header */
+ char in_out[FUSE_URING_IN_OUT_HEADER_SZ];
+
+ /* per op code header */
+ char op_in[FUSE_URING_OP_IN_OUT_SZ];
+
+ struct fuse_uring_ent_in_out ring_ent_in_out;
+};
+
+/**
+ * sqe commands to the kernel
+ */
+enum fuse_uring_cmd {
+ FUSE_IO_URING_CMD_INVALID = 0,
+
+ /* register the request buffer and fetch a fuse request */
+ FUSE_IO_URING_CMD_REGISTER = 1,
+
+ /* commit fuse request result and fetch next request */
+ FUSE_IO_URING_CMD_COMMIT_AND_FETCH = 2,
+};
+
+/**
+ * In the 80B command area of the SQE.
+ */
+struct fuse_uring_cmd_req {
+ uint64_t flags;
+
+ /* entry identifier for commits */
+ uint64_t commit_id;
+
+ /* queue the command is for (queue index) */
+ uint16_t qid;
+ uint8_t padding[6];
+};
+
#endif /* _LINUX_FUSE_H */
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index a4206723f503..5a199f3d4a26 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -519,6 +519,7 @@
#define KEY_NOTIFICATION_CENTER 0x1bc /* Show/hide the notification center */
#define KEY_PICKUP_PHONE 0x1bd /* Answer incoming call */
#define KEY_HANGUP_PHONE 0x1be /* Decline incoming call */
+#define KEY_LINK_PHONE 0x1bf /* AL Phone Syncing */
#define KEY_DEL_EOL 0x1c0
#define KEY_DEL_EOS 0x1c1
diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h
index 34810f6ae2b5..78747b24bd0f 100644
--- a/include/uapi/linux/iommufd.h
+++ b/include/uapi/linux/iommufd.h
@@ -868,6 +868,7 @@ enum iommu_hwpt_pgfault_perm {
* @pasid: Process Address Space ID
* @grpid: Page Request Group Index
* @perm: Combination of enum iommu_hwpt_pgfault_perm
+ * @__reserved: Must be 0.
* @addr: Fault address
* @length: a hint of how much data the requestor is expecting to fetch. For
* example, if the PRI initiator knows it is going to do a 10MB
@@ -883,7 +884,8 @@ struct iommu_hwpt_pgfault {
__u32 pasid;
__u32 grpid;
__u32 perm;
- __u64 addr;
+ __u32 __reserved;
+ __aligned_u64 addr;
__u32 length;
__u32 cookie;
};
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 502ea63b5d2e..45e6d8fca9b9 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -617,10 +617,6 @@ struct kvm_ioeventfd {
#define KVM_X86_DISABLE_EXITS_HLT (1 << 1)
#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
#define KVM_X86_DISABLE_EXITS_CSTATE (1 << 3)
-#define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \
- KVM_X86_DISABLE_EXITS_HLT | \
- KVM_X86_DISABLE_EXITS_PAUSE | \
- KVM_X86_DISABLE_EXITS_CSTATE)
/* for KVM_ENABLE_CAP */
struct kvm_enable_cap {
@@ -1070,6 +1066,10 @@ struct kvm_dirty_tlb {
#define KVM_REG_SIZE_SHIFT 52
#define KVM_REG_SIZE_MASK 0x00f0000000000000ULL
+
+#define KVM_REG_SIZE(id) \
+ (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
+
#define KVM_REG_SIZE_U8 0x0000000000000000ULL
#define KVM_REG_SIZE_U16 0x0010000000000000ULL
#define KVM_REG_SIZE_U32 0x0020000000000000ULL
diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h
index caf4db2fcbb9..4273e0249fcb 100644
--- a/include/uapi/linux/nfs4.h
+++ b/include/uapi/linux/nfs4.h
@@ -58,7 +58,7 @@
#define NFS4_SHARE_DENY_BOTH 0x0003
/* nfs41 */
-#define NFS4_SHARE_WANT_MASK 0xFF00
+#define NFS4_SHARE_WANT_TYPE_MASK 0xFF00
#define NFS4_SHARE_WANT_NO_PREFERENCE 0x0000
#define NFS4_SHARE_WANT_READ_DELEG 0x0100
#define NFS4_SHARE_WANT_WRITE_DELEG 0x0200
@@ -66,13 +66,16 @@
#define NFS4_SHARE_WANT_NO_DELEG 0x0400
#define NFS4_SHARE_WANT_CANCEL 0x0500
-#define NFS4_SHARE_WHEN_MASK 0xF0000
+#define NFS4_SHARE_WHEN_MASK 0xF0000
#define NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL 0x10000
#define NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED 0x20000
+#define NFS4_SHARE_WANT_MOD_MASK 0xF00000
#define NFS4_SHARE_WANT_DELEG_TIMESTAMPS 0x100000
#define NFS4_SHARE_WANT_OPEN_XOR_DELEGATION 0x200000
+#define NFS4_SHARE_WANT_MASK (NFS4_SHARE_WANT_TYPE_MASK | NFS4_SHARE_WANT_MOD_MASK)
+
#define NFS4_CDFC4_FORE 0x1
#define NFS4_CDFC4_BACK 0x2
#define NFS4_CDFC4_BOTH 0x3
diff --git a/include/uapi/linux/ntsync.h b/include/uapi/linux/ntsync.h
index dcfa38fdc93c..6d06793512b1 100644
--- a/include/uapi/linux/ntsync.h
+++ b/include/uapi/linux/ntsync.h
@@ -11,13 +11,49 @@
#include <linux/types.h>
struct ntsync_sem_args {
- __u32 sem;
__u32 count;
__u32 max;
};
-#define NTSYNC_IOC_CREATE_SEM _IOWR('N', 0x80, struct ntsync_sem_args)
+struct ntsync_mutex_args {
+ __u32 owner;
+ __u32 count;
+};
+
+struct ntsync_event_args {
+ __u32 manual;
+ __u32 signaled;
+};
+
+#define NTSYNC_WAIT_REALTIME 0x1
+
+struct ntsync_wait_args {
+ __u64 timeout;
+ __u64 objs;
+ __u32 count;
+ __u32 index;
+ __u32 flags;
+ __u32 owner;
+ __u32 alert;
+ __u32 pad;
+};
+
+#define NTSYNC_MAX_WAIT_COUNT 64
+
+#define NTSYNC_IOC_CREATE_SEM _IOW ('N', 0x80, struct ntsync_sem_args)
+#define NTSYNC_IOC_WAIT_ANY _IOWR('N', 0x82, struct ntsync_wait_args)
+#define NTSYNC_IOC_WAIT_ALL _IOWR('N', 0x83, struct ntsync_wait_args)
+#define NTSYNC_IOC_CREATE_MUTEX _IOW ('N', 0x84, struct ntsync_mutex_args)
+#define NTSYNC_IOC_CREATE_EVENT _IOW ('N', 0x87, struct ntsync_event_args)
-#define NTSYNC_IOC_SEM_POST _IOWR('N', 0x81, __u32)
+#define NTSYNC_IOC_SEM_RELEASE _IOWR('N', 0x81, __u32)
+#define NTSYNC_IOC_MUTEX_UNLOCK _IOWR('N', 0x85, struct ntsync_mutex_args)
+#define NTSYNC_IOC_MUTEX_KILL _IOW ('N', 0x86, __u32)
+#define NTSYNC_IOC_EVENT_SET _IOR ('N', 0x88, __u32)
+#define NTSYNC_IOC_EVENT_RESET _IOR ('N', 0x89, __u32)
+#define NTSYNC_IOC_EVENT_PULSE _IOR ('N', 0x8a, __u32)
+#define NTSYNC_IOC_SEM_READ _IOR ('N', 0x8b, struct ntsync_sem_args)
+#define NTSYNC_IOC_MUTEX_READ _IOR ('N', 0x8c, struct ntsync_mutex_args)
+#define NTSYNC_IOC_EVENT_READ _IOR ('N', 0x8d, struct ntsync_event_args)
#endif
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 1601c7ed5fab..3445c4970e4d 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -533,7 +533,7 @@
#define PCI_EXP_DEVSTA_TRPND 0x0020 /* Transactions Pending */
#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V1 12 /* v1 endpoints without link end here */
#define PCI_EXP_LNKCAP 0x0c /* Link Capabilities */
-#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */
+#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Max Link Speed (prior to PCIe r3.0: Supported Link Speeds) */
#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */
#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */
#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */
@@ -665,6 +665,7 @@
#define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */
#define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */
#define PCI_EXP_DEVCAP2_EE_PREFIX 0x00200000 /* End-End TLP Prefix */
+#define PCI_EXP_DEVCAP2_EE_PREFIX_MAX 0x00c00000 /* Max End-End TLP Prefixes */
#define PCI_EXP_DEVCTL2 0x28 /* Device Control 2 */
#define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */
#define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS 0x0010 /* Completion Timeout Disable */
@@ -789,10 +790,11 @@
/* Same bits as above */
#define PCI_ERR_CAP 0x18 /* Advanced Error Capabilities & Ctrl*/
#define PCI_ERR_CAP_FEP(x) ((x) & 0x1f) /* First Error Pointer */
-#define PCI_ERR_CAP_ECRC_GENC 0x00000020 /* ECRC Generation Capable */
-#define PCI_ERR_CAP_ECRC_GENE 0x00000040 /* ECRC Generation Enable */
-#define PCI_ERR_CAP_ECRC_CHKC 0x00000080 /* ECRC Check Capable */
-#define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */
+#define PCI_ERR_CAP_ECRC_GENC 0x00000020 /* ECRC Generation Capable */
+#define PCI_ERR_CAP_ECRC_GENE 0x00000040 /* ECRC Generation Enable */
+#define PCI_ERR_CAP_ECRC_CHKC 0x00000080 /* ECRC Check Capable */
+#define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */
+#define PCI_ERR_CAP_PREFIX_LOG_PRESENT 0x00000800 /* TLP Prefix Log Present */
#define PCI_ERR_HEADER_LOG 0x1c /* Header Log Register (16 bytes) */
#define PCI_ERR_ROOT_COMMAND 0x2c /* Root Error Command */
#define PCI_ERR_ROOT_CMD_COR_EN 0x00000001 /* Correctable Err Reporting Enable */
@@ -808,6 +810,7 @@
#define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */
#define PCI_ERR_ROOT_AER_IRQ 0xf8000000 /* Advanced Error Interrupt Message Number */
#define PCI_ERR_ROOT_ERR_SRC 0x34 /* Error Source Identification */
+#define PCI_ERR_PREFIX_LOG 0x38 /* TLP Prefix LOG Register (up to 16 bytes) */
/* Virtual Channel */
#define PCI_VC_PORT_CAP1 0x04
@@ -1001,9 +1004,6 @@
#define PCI_ACS_CTRL 0x06 /* ACS Control Register */
#define PCI_ACS_EGRESS_CTL_V 0x08 /* ACS Egress Control Vector */
-#define PCI_VSEC_HDR 4 /* extended cap - vendor-specific */
-#define PCI_VSEC_HDR_LEN_SHIFT 20 /* shift for length field */
-
/* SATA capability */
#define PCI_SATA_REGS 4 /* SATA REGs specifier */
#define PCI_SATA_REGS_MASK 0xF /* location - BAR#/inline */
diff --git a/include/uapi/linux/pcitest.h b/include/uapi/linux/pcitest.h
index 94b46b043b53..acd261f49866 100644
--- a/include/uapi/linux/pcitest.h
+++ b/include/uapi/linux/pcitest.h
@@ -20,6 +20,7 @@
#define PCITEST_MSIX _IOW('P', 0x7, int)
#define PCITEST_SET_IRQTYPE _IOW('P', 0x8, int)
#define PCITEST_GET_IRQTYPE _IO('P', 0x9)
+#define PCITEST_BARS _IO('P', 0xa)
#define PCITEST_CLEAR_IRQ _IO('P', 0x10)
#define PCITEST_FLAGS_USE_DMA 0x00000001
diff --git a/include/uapi/linux/pps_gen.h b/include/uapi/linux/pps_gen.h
new file mode 100644
index 000000000000..60a5d0fcfa68
--- /dev/null
+++ b/include/uapi/linux/pps_gen.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * PPS generator API header
+ *
+ * Copyright (C) 2024 Rodolfo Giometti <giometti@enneenne.com>
+ */
+
+#ifndef _PPS_GEN_H_
+#define _PPS_GEN_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/**
+ * struct pps_gen_event - the PPS generator events
+ * @event: the event type
+ * @sequence: the event sequence number
+ *
+ * Userspace can get the last PPS generator event by using the
+ * ioctl(pps_gen, PPS_GEN_FETCHEVENT, ...) syscall.
+ * The sequence field can be used to save the last event ID, while in the
+ * event field is stored the last event type. Currently known event is:
+ *
+ * PPS_GEN_EVENT_MISSEDPULSE : last pulse was not generated
+ */
+struct pps_gen_event {
+ unsigned int event;
+ unsigned int sequence;
+};
+
+#define PPS_GEN_EVENT_MISSEDPULSE 1
+
+#define PPS_GEN_SETENABLE _IOW('p', 0xb1, unsigned int *)
+#define PPS_GEN_USESYSTEMCLOCK _IOR('p', 0xb2, unsigned int *)
+#define PPS_GEN_FETCHEVENT _IOR('p', 0xb3, struct pps_gen_event *)
+
+#endif /* _PPS_GEN_H_ */
diff --git a/include/uapi/linux/securebits.h b/include/uapi/linux/securebits.h
index d6d98877ff1a..3fba30dbd68b 100644
--- a/include/uapi/linux/securebits.h
+++ b/include/uapi/linux/securebits.h
@@ -52,10 +52,32 @@
#define SECBIT_NO_CAP_AMBIENT_RAISE_LOCKED \
(issecure_mask(SECURE_NO_CAP_AMBIENT_RAISE_LOCKED))
+/* See Documentation/userspace-api/check_exec.rst */
+#define SECURE_EXEC_RESTRICT_FILE 8
+#define SECURE_EXEC_RESTRICT_FILE_LOCKED 9 /* make bit-8 immutable */
+
+#define SECBIT_EXEC_RESTRICT_FILE (issecure_mask(SECURE_EXEC_RESTRICT_FILE))
+#define SECBIT_EXEC_RESTRICT_FILE_LOCKED \
+ (issecure_mask(SECURE_EXEC_RESTRICT_FILE_LOCKED))
+
+/* See Documentation/userspace-api/check_exec.rst */
+#define SECURE_EXEC_DENY_INTERACTIVE 10
+#define SECURE_EXEC_DENY_INTERACTIVE_LOCKED 11 /* make bit-10 immutable */
+
+#define SECBIT_EXEC_DENY_INTERACTIVE \
+ (issecure_mask(SECURE_EXEC_DENY_INTERACTIVE))
+#define SECBIT_EXEC_DENY_INTERACTIVE_LOCKED \
+ (issecure_mask(SECURE_EXEC_DENY_INTERACTIVE_LOCKED))
+
#define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \
issecure_mask(SECURE_NO_SETUID_FIXUP) | \
issecure_mask(SECURE_KEEP_CAPS) | \
- issecure_mask(SECURE_NO_CAP_AMBIENT_RAISE))
+ issecure_mask(SECURE_NO_CAP_AMBIENT_RAISE) | \
+ issecure_mask(SECURE_EXEC_RESTRICT_FILE) | \
+ issecure_mask(SECURE_EXEC_DENY_INTERACTIVE))
#define SECURE_ALL_LOCKS (SECURE_ALL_BITS << 1)
+#define SECURE_ALL_UNPRIVILEGED (issecure_mask(SECURE_EXEC_RESTRICT_FILE) | \
+ issecure_mask(SECURE_EXEC_DENY_INTERACTIVE))
+
#endif /* _UAPI_LINUX_SECUREBITS_H */
diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h
index b50b2eb257a0..934e20ef7f79 100644
--- a/include/uapi/linux/taskstats.h
+++ b/include/uapi/linux/taskstats.h
@@ -72,6 +72,8 @@ struct taskstats {
*/
__u64 cpu_count __attribute__((aligned(8)));
__u64 cpu_delay_total;
+ __u64 cpu_delay_max;
+ __u64 cpu_delay_min;
/* Following four fields atomically updated using task->delays->lock */
@@ -80,10 +82,14 @@ struct taskstats {
*/
__u64 blkio_count;
__u64 blkio_delay_total;
+ __u64 blkio_delay_max;
+ __u64 blkio_delay_min;
/* Delay waiting for page fault I/O (swap in only) */
__u64 swapin_count;
__u64 swapin_delay_total;
+ __u64 swapin_delay_max;
+ __u64 swapin_delay_min;
/* cpu "wall-clock" running time
* On some architectures, value will adjust for cpu time stolen
@@ -166,10 +172,14 @@ struct taskstats {
/* Delay waiting for memory reclaim */
__u64 freepages_count;
__u64 freepages_delay_total;
+ __u64 freepages_delay_max;
+ __u64 freepages_delay_min;
/* Delay waiting for thrashing page */
__u64 thrashing_count;
__u64 thrashing_delay_total;
+ __u64 thrashing_delay_max;
+ __u64 thrashing_delay_min;
/* v10: 64-bit btime to avoid overflow */
__u64 ac_btime64; /* 64-bit begin time */
@@ -177,6 +187,8 @@ struct taskstats {
/* v11: Delay waiting for memory compact */
__u64 compact_count;
__u64 compact_delay_total;
+ __u64 compact_delay_max;
+ __u64 compact_delay_min;
/* v12 begin */
__u32 ac_tgid; /* thread group ID */
@@ -198,10 +210,15 @@ struct taskstats {
/* v13: Delay waiting for write-protect copy */
__u64 wpcopy_count;
__u64 wpcopy_delay_total;
+ __u64 wpcopy_delay_max;
+ __u64 wpcopy_delay_min;
/* v14: Delay waiting for IRQ/SOFTIRQ */
__u64 irq_count;
__u64 irq_delay_total;
+ __u64 irq_delay_max;
+ __u64 irq_delay_min;
+ /* v15: add Delay max */
};
diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h
index 2ebdba111a8f..beef1752e36e 100644
--- a/include/uapi/linux/usb/functionfs.h
+++ b/include/uapi/linux/usb/functionfs.h
@@ -206,7 +206,7 @@ struct usb_ffs_dmabuf_transfer_req {
* +-----+-----------------+------+--------------------------+
* | off | name | type | description |
* +-----+-----------------+------+--------------------------+
- * | 0 | inteface | U8 | related interface number |
+ * | 0 | interface | U8 | related interface number |
* +-----+-----------------+------+--------------------------+
* | 1 | dwLength | U32 | length of the descriptor |
* +-----+-----------------+------+--------------------------+
@@ -224,7 +224,7 @@ struct usb_ffs_dmabuf_transfer_req {
* +-----+-----------------+------+--------------------------+
* | off | name | type | description |
* +-----+-----------------+------+--------------------------+
- * | 0 | inteface | U8 | related interface number |
+ * | 0 | interface | U8 | related interface number |
* +-----+-----------------+------+--------------------------+
* | 1 | dwLength | U32 | length of the descriptor |
* +-----+-----------------+------+--------------------------+
@@ -237,7 +237,7 @@ struct usb_ffs_dmabuf_transfer_req {
* | 11 | ExtProp[] | | list of ext. prop. d. |
* +-----+-----------------+------+--------------------------+
*
- * ExtCompat[] is an array of valid Extended Compatiblity descriptors
+ * ExtCompat[] is an array of valid Extended Compatibility descriptors
* which have the following format:
*
* +-----+-----------------------+------+-------------------------------------+
@@ -295,7 +295,7 @@ struct usb_functionfs_strings_head {
* | 16 | stringtab | StringTab[lang_count] | table of strings per lang |
*
* For each language there is one stringtab entry (ie. there are lang_count
- * stringtab entires). Each StringTab has following format:
+ * stringtab entries). Each StringTab has following format:
*
* | off | name | type | description |
* |-----+---------+-------------------+------------------------------------|
diff --git a/include/uapi/linux/vduse.h b/include/uapi/linux/vduse.h
index 11bd48c72c6c..68a627d04afa 100644
--- a/include/uapi/linux/vduse.h
+++ b/include/uapi/linux/vduse.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
#ifndef _UAPI_VDUSE_H_
#define _UAPI_VDUSE_H_
diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h
index 1beb317df1b9..8549d4571257 100644
--- a/include/uapi/linux/virtio_pci.h
+++ b/include/uapi/linux/virtio_pci.h
@@ -116,6 +116,8 @@
#define VIRTIO_PCI_CAP_PCI_CFG 5
/* Additional shared memory capability */
#define VIRTIO_PCI_CAP_SHARED_MEMORY_CFG 8
+/* PCI vendor data configuration */
+#define VIRTIO_PCI_CAP_VENDOR_CFG 9
/* This is the PCI capability header: */
struct virtio_pci_cap {
@@ -130,6 +132,18 @@ struct virtio_pci_cap {
__le32 length; /* Length of the structure, in bytes. */
};
+/* This is the PCI vendor data capability header: */
+struct virtio_pci_vndr_data {
+ __u8 cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */
+ __u8 cap_next; /* Generic PCI field: next ptr. */
+ __u8 cap_len; /* Generic PCI field: capability length */
+ __u8 cfg_type; /* Identifies the structure. */
+ __u16 vendor_id; /* Identifies the vendor-specific format. */
+ /* For Vendor Definition */
+ /* Pads structure to a multiple of 4 bytes */
+ /* Reads must not have side effects */
+};
+
struct virtio_pci_cap64 {
struct virtio_pci_cap cap;
__le32 offset_hi; /* Most sig 32 bits of offset */
diff --git a/include/uapi/mtd/ubi-user.h b/include/uapi/mtd/ubi-user.h
index e1571603175e..aa872a41ffb9 100644
--- a/include/uapi/mtd/ubi-user.h
+++ b/include/uapi/mtd/ubi-user.h
@@ -175,6 +175,8 @@
#define UBI_IOCRPEB _IOW(UBI_IOC_MAGIC, 4, __s32)
/* Force scrubbing on the specified PEB */
#define UBI_IOCSPEB _IOW(UBI_IOC_MAGIC, 5, __s32)
+/* Read detailed device erase counter information */
+#define UBI_IOCECNFO _IOWR(UBI_IOC_MAGIC, 6, struct ubi_ecinfo_req)
/* ioctl commands of the UBI control character device */
@@ -413,6 +415,37 @@ struct ubi_rnvol_req {
} __packed;
/**
+ * struct ubi_ecinfo_req - a data structure used for requesting and receiving
+ * erase block counter information from a UBI device.
+ *
+ * @start: index of first physical erase block to read (in)
+ * @length: number of erase counters to read (in)
+ * @read_length: number of erase counters that was actually read (out)
+ * @padding: reserved for future, not used, has to be zeroed
+ * @erase_counters: array of erase counter values (out)
+ *
+ * This structure is used to retrieve erase counter information for a specified
+ * range of PEBs on a UBI device.
+ * Erase counters are read from @start and attempts to read @length number of
+ * erase counters.
+ * The retrieved values are stored in the @erase_counters array. It is the
+ * responsibility of the caller to allocate enough memory for storing @length
+ * elements in the @erase_counters array.
+ * If a block is bad or if the erase counter is unknown the corresponding value
+ * in the array will be set to -1.
+ * The @read_length field will indicate the number of erase counters actually
+ * read. Typically @read_length will be limited due to memory or the number of
+ * PEBs on the UBI device.
+ */
+struct ubi_ecinfo_req {
+ __s32 start;
+ __s32 length;
+ __s32 read_length;
+ __s8 padding[16];
+ __s32 erase_counters[];
+} __packed;
+
+/**
* struct ubi_leb_change_req - a data structure used in atomic LEB change
* requests.
* @lnum: logical eraseblock number to change
diff --git a/include/uapi/sound/asequencer.h b/include/uapi/sound/asequencer.h
index bc30c1f2a109..a5c41f771e05 100644
--- a/include/uapi/sound/asequencer.h
+++ b/include/uapi/sound/asequencer.h
@@ -10,7 +10,7 @@
#include <sound/asound.h>
/** version of the sequencer */
-#define SNDRV_SEQ_VERSION SNDRV_PROTOCOL_VERSION(1, 0, 4)
+#define SNDRV_SEQ_VERSION SNDRV_PROTOCOL_VERSION(1, 0, 5)
/**
* definition of sequencer event types
@@ -92,6 +92,9 @@
#define SNDRV_SEQ_EVENT_PORT_SUBSCRIBED 66 /* ports connected */
#define SNDRV_SEQ_EVENT_PORT_UNSUBSCRIBED 67 /* ports disconnected */
+#define SNDRV_SEQ_EVENT_UMP_EP_CHANGE 68 /* UMP EP info has changed */
+#define SNDRV_SEQ_EVENT_UMP_BLOCK_CHANGE 69 /* UMP block info has changed */
+
/* 70-89: synthesizer events - obsoleted */
/** user-defined events with fixed length
@@ -253,6 +256,12 @@ struct snd_seq_ev_quote {
struct snd_seq_event *event; /* quoted event */
} __packed;
+ /* UMP info change notify */
+struct snd_seq_ev_ump_notify {
+ unsigned char client; /**< Client number */
+ unsigned char block; /**< Block number (optional) */
+};
+
union snd_seq_event_data { /* event data... */
struct snd_seq_ev_note note;
struct snd_seq_ev_ctrl control;
@@ -265,6 +274,7 @@ union snd_seq_event_data { /* event data... */
struct snd_seq_connect connect;
struct snd_seq_result result;
struct snd_seq_ev_quote quote;
+ struct snd_seq_ev_ump_notify ump_notify;
};
/* sequencer event */
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 4cd513215bcd..5a049eeaecce 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -716,7 +716,7 @@ enum {
* Raw MIDI section - /dev/snd/midi??
*/
-#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 4)
+#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 5)
enum {
SNDRV_RAWMIDI_STREAM_OUTPUT = 0,
@@ -728,6 +728,9 @@ enum {
#define SNDRV_RAWMIDI_INFO_INPUT 0x00000002
#define SNDRV_RAWMIDI_INFO_DUPLEX 0x00000004
#define SNDRV_RAWMIDI_INFO_UMP 0x00000008
+#define SNDRV_RAWMIDI_INFO_STREAM_INACTIVE 0x00000010
+
+#define SNDRV_RAWMIDI_DEVICE_UNKNOWN 0
struct snd_rawmidi_info {
unsigned int device; /* RO/WR (control): device number */
@@ -740,7 +743,8 @@ struct snd_rawmidi_info {
unsigned char subname[32]; /* name of active or selected subdevice */
unsigned int subdevices_count;
unsigned int subdevices_avail;
- unsigned char reserved[64]; /* reserved for future use */
+ int tied_device; /* R: tied rawmidi device (UMP/legacy) */
+ unsigned char reserved[60]; /* reserved for future use */
};
#define SNDRV_RAWMIDI_MODE_FRAMING_MASK (7<<0)
diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h
index ddc77322d571..bc7648a30746 100644
--- a/include/uapi/sound/compress_params.h
+++ b/include/uapi/sound/compress_params.h
@@ -334,6 +334,14 @@ union snd_codec_options {
struct snd_dec_wma wma_d;
struct snd_dec_alac alac_d;
struct snd_dec_ape ape_d;
+ struct {
+ __u32 out_sample_rate;
+ } src_d;
+} __attribute__((packed, aligned(4)));
+
+struct snd_codec_desc_src {
+ __u32 out_sample_rate_min;
+ __u32 out_sample_rate_max;
} __attribute__((packed, aligned(4)));
/** struct snd_codec_desc - description of codec capabilities
@@ -347,6 +355,9 @@ union snd_codec_options {
* @modes: Supported modes. See SND_AUDIOMODE defines
* @formats: Supported formats. See SND_AUDIOSTREAMFORMAT defines
* @min_buffer: Minimum buffer size handled by codec implementation
+ * @pcm_formats: Output (for decoders) or input (for encoders)
+ * PCM formats (required to accel mode, 0 for other modes)
+ * @u_space: union space (for codec dependent data)
* @reserved: reserved for future use
*
* This structure provides a scalar value for profiles, modes and stream
@@ -370,7 +381,12 @@ struct snd_codec_desc {
__u32 modes;
__u32 formats;
__u32 min_buffer;
- __u32 reserved[15];
+ __u32 pcm_formats;
+ union {
+ __u32 u_space[6];
+ struct snd_codec_desc_src src;
+ } __attribute__((packed, aligned(4)));
+ __u32 reserved[8];
} __attribute__((packed, aligned(4)));
/** struct snd_codec
@@ -395,6 +411,8 @@ struct snd_codec_desc {
* @align: Block alignment in bytes of an audio sample.
* Only required for PCM or IEC formats.
* @options: encoder-specific settings
+ * @pcm_format: Output (for decoders) or input (for encoders)
+ * PCM formats (required to accel mode, 0 for other modes)
* @reserved: reserved for future use
*/
@@ -411,7 +429,8 @@ struct snd_codec {
__u32 format;
__u32 align;
union snd_codec_options options;
- __u32 reserved[3];
+ __u32 pcm_format;
+ __u32 reserved[2];
} __attribute__((packed, aligned(4)));
#endif
diff --git a/include/uapi/sound/fcp.h b/include/uapi/sound/fcp.h
new file mode 100644
index 000000000000..aea428956d8f
--- /dev/null
+++ b/include/uapi/sound/fcp.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Focusrite Control Protocol Driver for ALSA
+ *
+ * Copyright (c) 2024-2025 by Geoffrey D. Bennett <g at b4.vu>
+ */
+/*
+ * DOC: FCP (Focusrite Control Protocol) User-Space API
+ *
+ * This header defines the interface between the FCP kernel driver and
+ * user-space programs to enable the use of the proprietary features
+ * available in Focusrite USB audio interfaces. This includes Scarlett
+ * 2nd Gen, 3rd Gen, 4th Gen, Clarett USB, Clarett+, and Vocaster
+ * series devices.
+ *
+ * The interface is provided via ALSA's hwdep interface. Opening the
+ * hwdep device requires CAP_SYS_RAWIO privileges as this interface
+ * provides near-direct access.
+ *
+ * For details on the FCP protocol, refer to the kernel scarlett2
+ * driver in sound/usb/mixer_scarlett2.c and the fcp-support project
+ * at https://github.com/geoffreybennett/fcp-support
+ *
+ * For examples of using these IOCTLs, see the fcp-server source in
+ * the fcp-support project.
+ *
+ * IOCTL Interface
+ * --------------
+ * FCP_IOCTL_PVERSION:
+ * Returns the protocol version supported by the driver.
+ *
+ * FCP_IOCTL_INIT:
+ * Initialises the protocol and synchronises sequence numbers
+ * between the driver and device. Must be called at least once
+ * before sending commands. Can be safely called again at any time.
+ *
+ * FCP_IOCTL_CMD:
+ * Sends an FCP command to the device and returns the response.
+ * Requires prior initialisation via FCP_IOCTL_INIT.
+ *
+ * FCP_IOCTL_SET_METER_MAP:
+ * Configures the Level Meter control's mapping between device
+ * meters and control channels. Requires FCP_IOCTL_INIT to have been
+ * called first. The map size and number of slots cannot be changed
+ * after initial configuration, although the map itself can be
+ * updated. Once configured, the Level Meter remains functional even
+ * after the hwdep device is closed.
+ *
+ * FCP_IOCTL_SET_METER_LABELS:
+ * Set the labels for the Level Meter control. Requires
+ * FCP_IOCTL_SET_METER_MAP to have been called first. labels[]
+ * should contain a sequence of null-terminated labels corresponding
+ * to the control's channels.
+ */
+#ifndef __UAPI_SOUND_FCP_H
+#define __UAPI_SOUND_FCP_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define FCP_HWDEP_MAJOR 2
+#define FCP_HWDEP_MINOR 0
+#define FCP_HWDEP_SUBMINOR 0
+
+#define FCP_HWDEP_VERSION \
+ ((FCP_HWDEP_MAJOR << 16) | \
+ (FCP_HWDEP_MINOR << 8) | \
+ FCP_HWDEP_SUBMINOR)
+
+#define FCP_HWDEP_VERSION_MAJOR(v) (((v) >> 16) & 0xFF)
+#define FCP_HWDEP_VERSION_MINOR(v) (((v) >> 8) & 0xFF)
+#define FCP_HWDEP_VERSION_SUBMINOR(v) ((v) & 0xFF)
+
+/* Get protocol version */
+#define FCP_IOCTL_PVERSION _IOR('S', 0x60, int)
+
+/* Start the protocol */
+
+/* Step 0 and step 2 responses are variable length and placed in
+ * resp[] one after the other.
+ */
+struct fcp_init {
+ __u16 step0_resp_size;
+ __u16 step2_resp_size;
+ __u32 init1_opcode;
+ __u32 init2_opcode;
+ __u8 resp[];
+} __attribute__((packed));
+
+#define FCP_IOCTL_INIT _IOWR('S', 0x64, struct fcp_init)
+
+/* Perform a command */
+
+/* The request data is placed in data[] and the response data will
+ * overwrite it.
+ */
+struct fcp_cmd {
+ __u32 opcode;
+ __u16 req_size;
+ __u16 resp_size;
+ __u8 data[];
+} __attribute__((packed));
+#define FCP_IOCTL_CMD _IOWR('S', 0x65, struct fcp_cmd)
+
+/* Set the meter map */
+struct fcp_meter_map {
+ __u16 map_size;
+ __u16 meter_slots;
+ __s16 map[];
+} __attribute__((packed));
+#define FCP_IOCTL_SET_METER_MAP _IOW('S', 0x66, struct fcp_meter_map)
+
+/* Set the meter labels */
+struct fcp_meter_labels {
+ __u16 labels_size;
+ char labels[];
+} __attribute__((packed));
+#define FCP_IOCTL_SET_METER_LABELS _IOW('S', 0x67, struct fcp_meter_labels)
+
+#endif /* __UAPI_SOUND_FCP_H */
diff --git a/include/uapi/sound/sof/tokens.h b/include/uapi/sound/sof/tokens.h
index 0a246bc218d3..c28c766270de 100644
--- a/include/uapi/sound/sof/tokens.h
+++ b/include/uapi/sound/sof/tokens.h
@@ -153,6 +153,8 @@
/* Stream */
#define SOF_TKN_STREAM_PLAYBACK_COMPATIBLE_D0I3 1200
#define SOF_TKN_STREAM_CAPTURE_COMPATIBLE_D0I3 1201
+#define SOF_TKN_STREAM_PLAYBACK_PAUSE_SUPPORTED 1202
+#define SOF_TKN_STREAM_CAPTURE_PAUSE_SUPPORTED 1203
/* Led control for mute switches */
#define SOF_TKN_MUTE_LED_USE 1300
diff --git a/include/uapi/sound/tlv.h b/include/uapi/sound/tlv.h
index b99a2414b53d..5bb55c80e095 100644
--- a/include/uapi/sound/tlv.h
+++ b/include/uapi/sound/tlv.h
@@ -18,6 +18,8 @@
#define SNDRV_CTL_TLVT_CHMAP_VAR 0x102 /* channels freely swappable */
#define SNDRV_CTL_TLVT_CHMAP_PAIRED 0x103 /* pair-wise swappable */
+#define SNDRV_CTL_TLVT_FCP_CHANNEL_LABELS 0x110 /* channel labels */
+
/*
* TLV structure is right behind the struct snd_ctl_tlv:
* unsigned int type - see SNDRV_CTL_TLVT_*
diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h
index e594abe5d05f..f151feb0ca8c 100644
--- a/include/ufs/ufs.h
+++ b/include/ufs/ufs.h
@@ -180,7 +180,6 @@ enum attr_idn {
QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE = 0x1D,
QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E,
QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F,
- QUERY_ATTR_IDN_EXT_IID_EN = 0x2A,
QUERY_ATTR_IDN_TIMESTAMP = 0x30
};
@@ -386,12 +385,11 @@ enum {
/* Possible values for dExtendedUFSFeaturesSupport */
enum {
- UFS_DEV_LOW_TEMP_NOTIF = BIT(4),
- UFS_DEV_HIGH_TEMP_NOTIF = BIT(5),
+ UFS_DEV_HIGH_TEMP_NOTIF = BIT(4),
+ UFS_DEV_LOW_TEMP_NOTIF = BIT(5),
UFS_DEV_EXT_TEMP_NOTIF = BIT(6),
UFS_DEV_HPB_SUPPORT = BIT(7),
UFS_DEV_WRITE_BOOSTER_SUP = BIT(8),
- UFS_DEV_EXT_IID_SUP = BIT(16),
};
#define UFS_DEV_HPB_SUPPORT_VERSION 0x310
@@ -585,9 +583,6 @@ struct ufs_dev_info {
bool b_advanced_rpmb_en;
- /* UFS EXT_IID Enable */
- bool b_ext_iid_en;
-
/* UFS RTC */
enum ufs_rtc_time rtc_type;
time64_t rtc_time_baseline;
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 74e5b9960c54..8bf31e6ca4e5 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -326,7 +326,6 @@ struct ufs_pwr_mode_info {
* @phy_initialization: used to initialize phys
* @device_reset: called to issue a reset pulse on the UFS device
* @config_scaling_param: called to configure clock scaling parameters
- * @program_key: program or evict an inline encryption key
* @fill_crypto_prdt: initialize crypto-related fields in the PRDT
* @event_notify: called to notify important events
* @mcq_config_resource: called to configure MCQ platform resources
@@ -373,8 +372,6 @@ struct ufs_hba_variant_ops {
void (*config_scaling_param)(struct ufs_hba *hba,
struct devfreq_dev_profile *profile,
struct devfreq_simple_ondemand_data *data);
- int (*program_key)(struct ufs_hba *hba,
- const union ufs_crypto_cfg_entry *cfg, int slot);
int (*fill_crypto_prdt)(struct ufs_hba *hba,
const struct bio_crypt_ctx *crypt_ctx,
void *prdt, unsigned int num_segments);
@@ -403,6 +400,9 @@ enum clk_gating_state {
* delay_ms
* @ungate_work: worker to turn on clocks that will be used in case of
* interrupt context
+ * @clk_gating_workq: workqueue for clock gating work.
+ * @lock: serialize access to some struct ufs_clk_gating members. An outer lock
+ * relative to the host lock
* @state: the current clocks state
* @delay_ms: gating delay in ms
* @is_suspended: clk gating is suspended when set to 1 which can be used
@@ -413,11 +413,14 @@ enum clk_gating_state {
* @is_initialized: Indicates whether clock gating is initialized or not
* @active_reqs: number of requests that are pending and should be waited for
* completion before gating clocks.
- * @clk_gating_workq: workqueue for clock gating work.
*/
struct ufs_clk_gating {
struct delayed_work gate_work;
struct work_struct ungate_work;
+ struct workqueue_struct *clk_gating_workq;
+
+ spinlock_t lock;
+
enum clk_gating_state state;
unsigned long delay_ms;
bool is_suspended;
@@ -426,11 +429,14 @@ struct ufs_clk_gating {
bool is_enabled;
bool is_initialized;
int active_reqs;
- struct workqueue_struct *clk_gating_workq;
};
/**
* struct ufs_clk_scaling - UFS clock scaling related data
+ * @workq: workqueue to schedule devfreq suspend/resume work
+ * @suspend_work: worker to suspend devfreq
+ * @resume_work: worker to resume devfreq
+ * @lock: serialize access to some struct ufs_clk_scaling members
* @active_reqs: number of requests that are pending. If this is zero when
* devfreq ->target() function is called then schedule "suspend_work" to
* suspend devfreq.
@@ -440,9 +446,6 @@ struct ufs_clk_gating {
* @enable_attr: sysfs attribute to enable/disable clock scaling
* @saved_pwr_info: UFS power mode may also be changed during scaling and this
* one keeps track of previous power mode.
- * @workq: workqueue to schedule devfreq suspend/resume work
- * @suspend_work: worker to suspend devfreq
- * @resume_work: worker to resume devfreq
* @target_freq: frequency requested by devfreq framework
* @min_gear: lowest HS gear to scale down to
* @is_enabled: tracks if scaling is currently enabled or not, controlled by
@@ -454,15 +457,18 @@ struct ufs_clk_gating {
* @is_suspended: tracks if devfreq is suspended or not
*/
struct ufs_clk_scaling {
+ struct workqueue_struct *workq;
+ struct work_struct suspend_work;
+ struct work_struct resume_work;
+
+ spinlock_t lock;
+
int active_reqs;
unsigned long tot_busy_t;
ktime_t window_start_t;
ktime_t busy_start_t;
struct device_attribute enable_attr;
struct ufs_pa_layer_attr saved_pwr_info;
- struct workqueue_struct *workq;
- struct work_struct suspend_work;
- struct work_struct resume_work;
unsigned long target_freq;
u32 min_gear;
bool is_enabled;
@@ -946,7 +952,6 @@ enum ufshcd_mcq_opr {
* @nr_queues: number of Queues of different queue types
* @complete_put: whether or not to call ufshcd_rpm_put() from inside
* ufshcd_resume_complete()
- * @ext_iid_sup: is EXT_IID is supported by UFSHC
* @mcq_sup: is mcq supported by UFSHC
* @mcq_enabled: is mcq ready to accept requests
* @res: array of resource info of MCQ registers
@@ -1112,7 +1117,6 @@ struct ufs_hba {
unsigned int nr_hw_queues;
unsigned int nr_queues[HCTX_MAX_TYPES];
bool complete_put;
- bool ext_iid_sup;
bool scsi_host_added;
bool mcq_sup;
bool lsdb_sup;
@@ -1202,6 +1206,14 @@ static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba)
({ (void)(hba); BUILD_BUG_ON(sg_entry_size != sizeof(struct ufshcd_sg_entry)); })
#endif
+#ifdef CONFIG_SCSI_UFS_CRYPTO
+static inline struct ufs_hba *
+ufs_hba_from_crypto_profile(struct blk_crypto_profile *profile)
+{
+ return container_of(profile, struct ufs_hba, crypto_profile);
+}
+#endif
+
static inline size_t ufshcd_get_ucd_size(const struct ufs_hba *hba)
{
return sizeof(struct utp_transfer_cmd_desc) + SG_ALL * ufshcd_sg_entry_size(hba);
@@ -1297,7 +1309,6 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
void ufshcd_enable_irq(struct ufs_hba *hba);
void ufshcd_disable_irq(struct ufs_hba *hba);
int ufshcd_alloc_host(struct device *, struct ufs_hba **);
-void ufshcd_dealloc_host(struct ufs_hba *);
int ufshcd_hba_enable(struct ufs_hba *hba);
int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int);
int ufshcd_link_recovery(struct ufs_hba *hba);
diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h
index 27364c4a6ef9..612500a7088f 100644
--- a/include/ufs/ufshci.h
+++ b/include/ufs/ufshci.h
@@ -82,11 +82,6 @@ enum {
MASK_MCQ_SUPPORT = 0x40000000,
};
-/* MCQ capability mask */
-enum {
- MASK_EXT_IID_SUPPORT = 0x00000400,
-};
-
enum {
REG_SQATTR = 0x0,
REG_SQLBA = 0x4,
diff --git a/init/Kconfig b/init/Kconfig
index 7fe82a46e88c..d0d021b3fa3b 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1131,7 +1131,6 @@ config CGROUP_PIDS
config CGROUP_RDMA
bool "RDMA controller"
- select PAGE_COUNTER
help
Provides enforcement of RDMA resources defined by IB stack.
It is fairly easy for consumers to exhaust RDMA resources, which
@@ -1970,7 +1969,8 @@ config RUST
bool "Rust support"
depends on HAVE_RUST
depends on RUST_IS_AVAILABLE
- depends on !MODVERSIONS
+ select EXTENDED_MODVERSIONS if MODVERSIONS
+ depends on !MODVERSIONS || GENDWARFKSYMS
depends on !GCC_PLUGIN_RANDSTRUCT
depends on !RANDSTRUCT
depends on !DEBUG_INFO_BTF || PAHOLE_HAS_LANG_EXCLUDE
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index 22c7f41ff642..f6867bad0d78 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -21,7 +21,7 @@ phys_addr_t phys_initrd_start __initdata;
unsigned long phys_initrd_size __initdata;
#ifdef CONFIG_SYSCTL
-static struct ctl_table kern_do_mounts_initrd_table[] = {
+static const struct ctl_table kern_do_mounts_initrd_table[] = {
{
.procname = "real-root-dev",
.data = &real_root_dev,
@@ -89,7 +89,7 @@ static void __init handle_initrd(char *root_device_name)
extern char *envp_init[];
int error;
- pr_warn("using deprecated initrd support, will be removed in 2021.\n");
+ pr_warn("using deprecated initrd support, will be removed soon.\n");
real_root_dev = new_encode_dev(ROOT_DEV);
create_dev("/dev/root.old", Root_RAM0);
diff --git a/init/main.c b/init/main.c
index 893cb77aef22..2a1757826397 100644
--- a/init/main.c
+++ b/init/main.c
@@ -640,15 +640,11 @@ static void __init setup_command_line(char *command_line)
len = xlen + strlen(boot_command_line) + ilen + 1;
- saved_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
- if (!saved_command_line)
- panic("%s: Failed to allocate %zu bytes\n", __func__, len);
+ saved_command_line = memblock_alloc_or_panic(len, SMP_CACHE_BYTES);
len = xlen + strlen(command_line) + 1;
- static_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
- if (!static_command_line)
- panic("%s: Failed to allocate %zu bytes\n", __func__, len);
+ static_command_line = memblock_alloc_or_panic(len, SMP_CACHE_BYTES);
if (xlen) {
/*
@@ -1146,16 +1142,10 @@ static int __init initcall_blacklist(char *str)
str_entry = strsep(&str, ",");
if (str_entry) {
pr_debug("blacklisting initcall %s\n", str_entry);
- entry = memblock_alloc(sizeof(*entry),
+ entry = memblock_alloc_or_panic(sizeof(*entry),
SMP_CACHE_BYTES);
- if (!entry)
- panic("%s: Failed to allocate %zu bytes\n",
- __func__, sizeof(*entry));
- entry->buf = memblock_alloc(strlen(str_entry) + 1,
+ entry->buf = memblock_alloc_or_panic(strlen(str_entry) + 1,
SMP_CACHE_BYTES);
- if (!entry->buf)
- panic("%s: Failed to allocate %zu bytes\n",
- __func__, strlen(str_entry) + 1);
strcpy(entry->buf, str_entry);
list_add(&entry->next, &blacklisted_initcalls);
}
diff --git a/io_uring/Makefile b/io_uring/Makefile
index 53167bef37d7..d695b60dba4f 100644
--- a/io_uring/Makefile
+++ b/io_uring/Makefile
@@ -13,7 +13,7 @@ obj-$(CONFIG_IO_URING) += io_uring.o opdef.o kbuf.o rsrc.o notif.o \
sync.o msg_ring.o advise.o openclose.o \
epoll.o statx.o timeout.o fdinfo.o \
cancel.o waitid.o register.o \
- truncate.o memmap.o
+ truncate.o memmap.o alloc_cache.o
obj-$(CONFIG_IO_WQ) += io-wq.o
obj-$(CONFIG_FUTEX) += futex.o
obj-$(CONFIG_NET_RX_BUSY_POLL) += napi.o
diff --git a/io_uring/alloc_cache.c b/io_uring/alloc_cache.c
new file mode 100644
index 000000000000..58423888b736
--- /dev/null
+++ b/io_uring/alloc_cache.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "alloc_cache.h"
+
+void io_alloc_cache_free(struct io_alloc_cache *cache,
+ void (*free)(const void *))
+{
+ void *entry;
+
+ if (!cache->entries)
+ return;
+
+ while ((entry = io_alloc_cache_get(cache)) != NULL)
+ free(entry);
+
+ kvfree(cache->entries);
+ cache->entries = NULL;
+}
+
+/* returns false if the cache was initialized properly */
+bool io_alloc_cache_init(struct io_alloc_cache *cache,
+ unsigned max_nr, unsigned int size,
+ unsigned int init_bytes)
+{
+ cache->entries = kvmalloc_array(max_nr, sizeof(void *), GFP_KERNEL);
+ if (!cache->entries)
+ return true;
+
+ cache->nr_cached = 0;
+ cache->max_cached = max_nr;
+ cache->elem_size = size;
+ cache->init_clear = init_bytes;
+ return false;
+}
+
+void *io_cache_alloc_new(struct io_alloc_cache *cache, gfp_t gfp)
+{
+ void *obj;
+
+ obj = kmalloc(cache->elem_size, gfp);
+ if (obj && cache->init_clear)
+ memset(obj, 0, cache->init_clear);
+ return obj;
+}
diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h
index a3a8cfec32ce..0dd17d8ba93a 100644
--- a/io_uring/alloc_cache.h
+++ b/io_uring/alloc_cache.h
@@ -1,11 +1,30 @@
#ifndef IOU_ALLOC_CACHE_H
#define IOU_ALLOC_CACHE_H
+#include <linux/io_uring_types.h>
+
/*
* Don't allow the cache to grow beyond this size.
*/
#define IO_ALLOC_CACHE_MAX 128
+void io_alloc_cache_free(struct io_alloc_cache *cache,
+ void (*free)(const void *));
+bool io_alloc_cache_init(struct io_alloc_cache *cache,
+ unsigned max_nr, unsigned int size,
+ unsigned int init_bytes);
+
+void *io_cache_alloc_new(struct io_alloc_cache *cache, gfp_t gfp);
+
+static inline void io_alloc_cache_kasan(struct iovec **iov, int *nr)
+{
+ if (IS_ENABLED(CONFIG_KASAN)) {
+ kfree(*iov);
+ *iov = NULL;
+ *nr = 0;
+ }
+}
+
static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
void *entry)
{
@@ -23,52 +42,30 @@ static inline void *io_alloc_cache_get(struct io_alloc_cache *cache)
if (cache->nr_cached) {
void *entry = cache->entries[--cache->nr_cached];
+ /*
+ * If KASAN is enabled, always clear the initial bytes that
+ * must be zeroed post alloc, in case any of them overlap
+ * with KASAN storage.
+ */
+#if defined(CONFIG_KASAN)
kasan_mempool_unpoison_object(entry, cache->elem_size);
+ if (cache->init_clear)
+ memset(entry, 0, cache->init_clear);
+#endif
return entry;
}
return NULL;
}
-static inline void *io_cache_alloc(struct io_alloc_cache *cache, gfp_t gfp,
- void (*init_once)(void *obj))
+static inline void *io_cache_alloc(struct io_alloc_cache *cache, gfp_t gfp)
{
- if (unlikely(!cache->nr_cached)) {
- void *obj = kmalloc(cache->elem_size, gfp);
+ void *obj;
- if (obj && init_once)
- init_once(obj);
+ obj = io_alloc_cache_get(cache);
+ if (obj)
return obj;
- }
- return io_alloc_cache_get(cache);
+ return io_cache_alloc_new(cache, gfp);
}
-/* returns false if the cache was initialized properly */
-static inline bool io_alloc_cache_init(struct io_alloc_cache *cache,
- unsigned max_nr, size_t size)
-{
- cache->entries = kvmalloc_array(max_nr, sizeof(void *), GFP_KERNEL);
- if (cache->entries) {
- cache->nr_cached = 0;
- cache->max_cached = max_nr;
- cache->elem_size = size;
- return false;
- }
- return true;
-}
-
-static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
- void (*free)(const void *))
-{
- void *entry;
-
- if (!cache->entries)
- return;
-
- while ((entry = io_alloc_cache_get(cache)) != NULL)
- free(entry);
-
- kvfree(cache->entries);
- cache->entries = NULL;
-}
#endif
diff --git a/io_uring/filetable.c b/io_uring/filetable.c
index a21660e3145a..dd8eeec97acf 100644
--- a/io_uring/filetable.c
+++ b/io_uring/filetable.c
@@ -68,7 +68,7 @@ static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file,
if (slot_index >= ctx->file_table.data.nr)
return -EINVAL;
- node = io_rsrc_node_alloc(ctx, IORING_RSRC_FILE);
+ node = io_rsrc_node_alloc(IORING_RSRC_FILE);
if (!node)
return -ENOMEM;
diff --git a/io_uring/futex.c b/io_uring/futex.c
index 30139cc150f2..43e2143255f5 100644
--- a/io_uring/futex.c
+++ b/io_uring/futex.c
@@ -36,7 +36,7 @@ struct io_futex_data {
bool io_futex_cache_init(struct io_ring_ctx *ctx)
{
return io_alloc_cache_init(&ctx->futex_cache, IO_FUTEX_ALLOC_CACHE_MAX,
- sizeof(struct io_futex_data));
+ sizeof(struct io_futex_data), 0);
}
void io_futex_cache_free(struct io_ring_ctx *ctx)
@@ -320,7 +320,7 @@ int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags)
}
io_ring_submit_lock(ctx, issue_flags);
- ifd = io_cache_alloc(&ctx->futex_cache, GFP_NOWAIT, NULL);
+ ifd = io_cache_alloc(&ctx->futex_cache, GFP_NOWAIT);
if (!ifd) {
ret = -ENOMEM;
goto done_unlock;
@@ -338,7 +338,7 @@ int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags)
hlist_add_head(&req->hash_node, &ctx->futex_list);
io_ring_submit_unlock(ctx, issue_flags);
- futex_queue(&ifd->q, hb);
+ futex_queue(&ifd->q, hb, NULL);
return IOU_ISSUE_SKIP_COMPLETE;
}
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 7bfbc7c22367..ceacf6230e34 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -157,7 +157,7 @@ static int __read_mostly sysctl_io_uring_disabled;
static int __read_mostly sysctl_io_uring_group = -1;
#ifdef CONFIG_SYSCTL
-static struct ctl_table kernel_io_uring_disabled_table[] = {
+static const struct ctl_table kernel_io_uring_disabled_table[] = {
{
.procname = "io_uring_disabled",
.data = &sysctl_io_uring_disabled,
@@ -315,16 +315,18 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
INIT_LIST_HEAD(&ctx->cq_overflow_list);
INIT_LIST_HEAD(&ctx->io_buffers_cache);
ret = io_alloc_cache_init(&ctx->apoll_cache, IO_POLL_ALLOC_CACHE_MAX,
- sizeof(struct async_poll));
+ sizeof(struct async_poll), 0);
ret |= io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX,
- sizeof(struct io_async_msghdr));
+ sizeof(struct io_async_msghdr),
+ offsetof(struct io_async_msghdr, clear));
ret |= io_alloc_cache_init(&ctx->rw_cache, IO_ALLOC_CACHE_MAX,
- sizeof(struct io_async_rw));
+ sizeof(struct io_async_rw),
+ offsetof(struct io_async_rw, clear));
ret |= io_alloc_cache_init(&ctx->uring_cache, IO_ALLOC_CACHE_MAX,
- sizeof(struct io_uring_cmd_data));
+ sizeof(struct io_uring_cmd_data), 0);
spin_lock_init(&ctx->msg_lock);
ret |= io_alloc_cache_init(&ctx->msg_cache, IO_ALLOC_CACHE_MAX,
- sizeof(struct io_kiocb));
+ sizeof(struct io_kiocb), 0);
ret |= io_futex_cache_init(ctx);
if (ret)
goto free_ref;
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index f65e3f3ede51..ab619e63ef39 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -226,21 +226,16 @@ static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
}
static inline void *io_uring_alloc_async_data(struct io_alloc_cache *cache,
- struct io_kiocb *req,
- void (*init_once)(void *obj))
+ struct io_kiocb *req)
{
- req->async_data = io_cache_alloc(cache, GFP_KERNEL, init_once);
- if (req->async_data)
- req->flags |= REQ_F_ASYNC_DATA;
- return req->async_data;
-}
+ if (cache) {
+ req->async_data = io_cache_alloc(cache, GFP_KERNEL);
+ } else {
+ const struct io_issue_def *def = &io_issue_defs[req->opcode];
-static inline void *io_uring_alloc_async_data_nocache(struct io_kiocb *req)
-{
- const struct io_issue_def *def = &io_issue_defs[req->opcode];
-
- WARN_ON_ONCE(!def->async_size);
- req->async_data = kmalloc(def->async_size, GFP_KERNEL);
+ WARN_ON_ONCE(!def->async_size);
+ req->async_data = kmalloc(def->async_size, GFP_KERNEL);
+ }
if (req->async_data)
req->flags |= REQ_F_ASYNC_DATA;
return req->async_data;
diff --git a/io_uring/memmap.c b/io_uring/memmap.c
index dda846190fbd..361134544427 100644
--- a/io_uring/memmap.c
+++ b/io_uring/memmap.c
@@ -170,8 +170,8 @@ static int io_region_allocate_pages(struct io_ring_ctx *ctx,
goto done;
}
- nr_allocated = alloc_pages_bulk_array_node(gfp, NUMA_NO_NODE,
- mr->nr_pages, pages);
+ nr_allocated = alloc_pages_bulk_node(gfp, NUMA_NO_NODE,
+ mr->nr_pages, pages);
if (nr_allocated != mr->nr_pages) {
if (nr_allocated)
release_pages(pages, nr_allocated);
diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
index bd3cd78d2dba..7e6f68e911f1 100644
--- a/io_uring/msg_ring.c
+++ b/io_uring/msg_ring.c
@@ -89,8 +89,7 @@ static void io_msg_tw_complete(struct io_kiocb *req, struct io_tw_state *ts)
static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
int res, u32 cflags, u64 user_data)
{
- req->tctx = READ_ONCE(ctx->submitter_task->io_uring);
- if (!req->tctx) {
+ if (!READ_ONCE(ctx->submitter_task)) {
kmem_cache_free(req_cachep, req);
return -EOWNERDEAD;
}
@@ -98,6 +97,7 @@ static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
io_req_set_res(req, res, cflags);
percpu_ref_get(&ctx->refs);
req->ctx = ctx;
+ req->tctx = NULL;
req->io_task_work.func = io_msg_tw_complete;
io_req_task_work_add_remote(req, ctx, IOU_F_TWQ_LAZY_WAKE);
return 0;
diff --git a/io_uring/net.c b/io_uring/net.c
index 85f55fbc25c9..17852a6616ff 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -137,7 +137,6 @@ static void io_netmsg_iovec_free(struct io_async_msghdr *kmsg)
static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_async_msghdr *hdr = req->async_data;
- struct iovec *iov;
/* can't recycle, ensure we free the iovec if we have one */
if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) {
@@ -146,44 +145,30 @@ static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
}
/* Let normal cleanup path reap it if we fail adding to the cache */
- iov = hdr->free_iov;
+ io_alloc_cache_kasan(&hdr->free_iov, &hdr->free_iov_nr);
if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr)) {
- if (iov)
- kasan_mempool_poison_object(iov);
req->async_data = NULL;
req->flags &= ~REQ_F_ASYNC_DATA;
}
}
-static void io_msg_async_data_init(void *obj)
-{
- struct io_async_msghdr *hdr = (struct io_async_msghdr *)obj;
-
- hdr->free_iov = NULL;
- hdr->free_iov_nr = 0;
-}
-
static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_async_msghdr *hdr;
- hdr = io_uring_alloc_async_data(&ctx->netmsg_cache, req,
- io_msg_async_data_init);
+ hdr = io_uring_alloc_async_data(&ctx->netmsg_cache, req);
if (!hdr)
return NULL;
/* If the async data was cached, we might have an iov cached inside. */
- if (hdr->free_iov) {
- kasan_mempool_unpoison_object(hdr->free_iov,
- hdr->free_iov_nr * sizeof(struct iovec));
+ if (hdr->free_iov)
req->flags |= REQ_F_NEED_CLEANUP;
- }
return hdr;
}
/* assign new iovec to kmsg, if we need to */
-static int io_net_vec_assign(struct io_kiocb *req, struct io_async_msghdr *kmsg,
+static void io_net_vec_assign(struct io_kiocb *req, struct io_async_msghdr *kmsg,
struct iovec *iov)
{
if (iov) {
@@ -193,7 +178,6 @@ static int io_net_vec_assign(struct io_kiocb *req, struct io_async_msghdr *kmsg,
kfree(kmsg->free_iov);
kmsg->free_iov = iov;
}
- return 0;
}
static inline void io_mshot_prep_retry(struct io_kiocb *req,
@@ -255,7 +239,8 @@ static int io_compat_msg_copy_hdr(struct io_kiocb *req,
if (unlikely(ret < 0))
return ret;
- return io_net_vec_assign(req, iomsg, iov);
+ io_net_vec_assign(req, iomsg, iov);
+ return 0;
}
#endif
@@ -295,11 +280,12 @@ static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
ret = -EINVAL;
goto ua_end;
} else {
+ struct iovec __user *uiov = msg->msg_iov;
+
/* we only need the length for provided buffers */
- if (!access_ok(&msg->msg_iov[0].iov_len, sizeof(__kernel_size_t)))
+ if (!access_ok(&uiov->iov_len, sizeof(uiov->iov_len)))
goto ua_end;
- unsafe_get_user(iov->iov_len, &msg->msg_iov[0].iov_len,
- ua_end);
+ unsafe_get_user(iov->iov_len, &uiov->iov_len, ua_end);
sr->len = iov->iov_len;
}
ret = 0;
@@ -314,7 +300,8 @@ ua_end:
if (unlikely(ret < 0))
return ret;
- return io_net_vec_assign(req, iomsg, iov);
+ io_net_vec_assign(req, iomsg, iov);
+ return 0;
}
static int io_sendmsg_copy_hdr(struct io_kiocb *req,
@@ -579,6 +566,54 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
return IOU_OK;
}
+static int io_send_select_buffer(struct io_kiocb *req, unsigned int issue_flags,
+ struct io_async_msghdr *kmsg)
+{
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+
+ int ret;
+ struct buf_sel_arg arg = {
+ .iovs = &kmsg->fast_iov,
+ .max_len = min_not_zero(sr->len, INT_MAX),
+ .nr_iovs = 1,
+ };
+
+ if (kmsg->free_iov) {
+ arg.nr_iovs = kmsg->free_iov_nr;
+ arg.iovs = kmsg->free_iov;
+ arg.mode = KBUF_MODE_FREE;
+ }
+
+ if (!(sr->flags & IORING_RECVSEND_BUNDLE))
+ arg.nr_iovs = 1;
+ else
+ arg.mode |= KBUF_MODE_EXPAND;
+
+ ret = io_buffers_select(req, &arg, issue_flags);
+ if (unlikely(ret < 0))
+ return ret;
+
+ if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) {
+ kmsg->free_iov_nr = ret;
+ kmsg->free_iov = arg.iovs;
+ req->flags |= REQ_F_NEED_CLEANUP;
+ }
+ sr->len = arg.out_len;
+
+ if (ret == 1) {
+ sr->buf = arg.iovs[0].iov_base;
+ ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
+ &kmsg->msg.msg_iter);
+ if (unlikely(ret))
+ return ret;
+ } else {
+ iov_iter_init(&kmsg->msg.msg_iter, ITER_SOURCE,
+ arg.iovs, ret, arg.out_len);
+ }
+
+ return 0;
+}
+
int io_send(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
@@ -602,44 +637,9 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
retry_bundle:
if (io_do_buffer_select(req)) {
- struct buf_sel_arg arg = {
- .iovs = &kmsg->fast_iov,
- .max_len = min_not_zero(sr->len, INT_MAX),
- .nr_iovs = 1,
- };
-
- if (kmsg->free_iov) {
- arg.nr_iovs = kmsg->free_iov_nr;
- arg.iovs = kmsg->free_iov;
- arg.mode = KBUF_MODE_FREE;
- }
-
- if (!(sr->flags & IORING_RECVSEND_BUNDLE))
- arg.nr_iovs = 1;
- else
- arg.mode |= KBUF_MODE_EXPAND;
-
- ret = io_buffers_select(req, &arg, issue_flags);
- if (unlikely(ret < 0))
+ ret = io_send_select_buffer(req, issue_flags, kmsg);
+ if (ret)
return ret;
-
- if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) {
- kmsg->free_iov_nr = ret;
- kmsg->free_iov = arg.iovs;
- req->flags |= REQ_F_NEED_CLEANUP;
- }
- sr->len = arg.out_len;
-
- if (ret == 1) {
- sr->buf = arg.iovs[0].iov_base;
- ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
- &kmsg->msg.msg_iter);
- if (unlikely(ret))
- return ret;
- } else {
- iov_iter_init(&kmsg->msg.msg_iter, ITER_SOURCE,
- arg.iovs, ret, arg.out_len);
- }
}
/*
@@ -1710,6 +1710,11 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
int ret;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+ if (unlikely(req->flags & REQ_F_FAIL)) {
+ ret = -ECONNRESET;
+ goto out;
+ }
+
file_flags = force_nonblock ? O_NONBLOCK : 0;
ret = __sys_connect_file(req->file, &io->addr, connect->addr_len,
@@ -1813,11 +1818,8 @@ void io_netmsg_cache_free(const void *entry)
{
struct io_async_msghdr *kmsg = (struct io_async_msghdr *) entry;
- if (kmsg->free_iov) {
- kasan_mempool_unpoison_object(kmsg->free_iov,
- kmsg->free_iov_nr * sizeof(struct iovec));
+ if (kmsg->free_iov)
io_netmsg_iovec_free(kmsg);
- }
kfree(kmsg);
}
#endif
diff --git a/io_uring/net.h b/io_uring/net.h
index 52bfee05f06a..b804c2b36e60 100644
--- a/io_uring/net.h
+++ b/io_uring/net.h
@@ -5,16 +5,20 @@
struct io_async_msghdr {
#if defined(CONFIG_NET)
- struct iovec fast_iov;
- /* points to an allocated iov, if NULL we use fast_iov instead */
struct iovec *free_iov;
+ /* points to an allocated iov, if NULL we use fast_iov instead */
int free_iov_nr;
- int namelen;
- __kernel_size_t controllen;
- __kernel_size_t payloadlen;
- struct sockaddr __user *uaddr;
- struct msghdr msg;
- struct sockaddr_storage addr;
+ struct_group(clear,
+ int namelen;
+ struct iovec fast_iov;
+ __kernel_size_t controllen;
+ __kernel_size_t payloadlen;
+ struct sockaddr __user *uaddr;
+ struct msghdr msg;
+ struct sockaddr_storage addr;
+ );
+#else
+ struct_group(clear);
#endif
};
diff --git a/io_uring/poll.c b/io_uring/poll.c
index cc01c40b43d3..bb1c0cd4f809 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -273,6 +273,8 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
return IOU_POLL_REISSUE;
}
}
+ if (unlikely(req->cqe.res & EPOLLERR))
+ req_set_fail(req);
if (req->apoll_events & EPOLLONESHOT)
return IOU_POLL_DONE;
@@ -315,8 +317,10 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
ret = io_poll_check_events(req, ts);
if (ret == IOU_POLL_NO_ACTION) {
+ io_kbuf_recycle(req, 0);
return;
} else if (ret == IOU_POLL_REQUEUE) {
+ io_kbuf_recycle(req, 0);
__io_poll_execute(req, 0);
return;
}
@@ -650,7 +654,7 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
kfree(apoll->double_poll);
} else {
if (!(issue_flags & IO_URING_F_UNLOCKED))
- apoll = io_cache_alloc(&ctx->apoll_cache, GFP_ATOMIC, NULL);
+ apoll = io_cache_alloc(&ctx->apoll_cache, GFP_ATOMIC);
else
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
if (!apoll)
diff --git a/io_uring/register.c b/io_uring/register.c
index 05025047d1da..9a4d2fbce4ae 100644
--- a/io_uring/register.c
+++ b/io_uring/register.c
@@ -552,7 +552,7 @@ overflow:
ctx->cqe_cached = ctx->cqe_sentinel = NULL;
WRITE_ONCE(n.rings->sq_dropped, READ_ONCE(o.rings->sq_dropped));
- WRITE_ONCE(n.rings->sq_flags, READ_ONCE(o.rings->sq_flags));
+ atomic_set(&n.rings->sq_flags, atomic_read(&o.rings->sq_flags));
WRITE_ONCE(n.rings->cq_flags, READ_ONCE(o.rings->cq_flags));
WRITE_ONCE(n.rings->cq_overflow, READ_ONCE(o.rings->cq_overflow));
@@ -853,6 +853,8 @@ struct file *io_uring_register_get_file(unsigned int fd, bool registered)
return ERR_PTR(-EINVAL);
fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
file = tctx->registered_rings[fd];
+ if (file)
+ get_file(file);
} else {
file = fget(fd);
}
@@ -919,7 +921,7 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
trace_io_uring_register(ctx, opcode, ctx->file_table.data.nr,
ctx->buf_table.nr, ret);
mutex_unlock(&ctx->uring_lock);
- if (!use_registered_ring)
- fput(file);
+
+ fput(file);
return ret;
}
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index e32ac5853391..af39b69eb4fd 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -118,7 +118,7 @@ static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
}
}
-struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx, int type)
+struct io_rsrc_node *io_rsrc_node_alloc(int type)
{
struct io_rsrc_node *node;
@@ -203,7 +203,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
err = -EBADF;
break;
}
- node = io_rsrc_node_alloc(ctx, IORING_RSRC_FILE);
+ node = io_rsrc_node_alloc(IORING_RSRC_FILE);
if (!node) {
err = -ENOMEM;
fput(file);
@@ -444,8 +444,6 @@ int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
void io_free_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
{
- lockdep_assert_held(&ctx->uring_lock);
-
if (node->tag)
io_post_aux_cqe(ctx, node->tag, 0, 0);
@@ -525,7 +523,7 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
goto fail;
}
ret = -ENOMEM;
- node = io_rsrc_node_alloc(ctx, IORING_RSRC_FILE);
+ node = io_rsrc_node_alloc(IORING_RSRC_FILE);
if (!node) {
fput(file);
goto fail;
@@ -730,7 +728,7 @@ static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
if (!iov->iov_base)
return NULL;
- node = io_rsrc_node_alloc(ctx, IORING_RSRC_BUFFER);
+ node = io_rsrc_node_alloc(IORING_RSRC_BUFFER);
if (!node)
return ERR_PTR(-ENOMEM);
node->buf = NULL;
@@ -921,6 +919,16 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
return 0;
}
+/* Lock two rings at once. The rings must be different! */
+static void lock_two_rings(struct io_ring_ctx *ctx1, struct io_ring_ctx *ctx2)
+{
+ if (ctx1 > ctx2)
+ swap(ctx1, ctx2);
+ mutex_lock(&ctx1->uring_lock);
+ mutex_lock_nested(&ctx2->uring_lock, SINGLE_DEPTH_NESTING);
+}
+
+/* Both rings are locked by the caller. */
static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx,
struct io_uring_clone_buffers *arg)
{
@@ -928,6 +936,9 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
int i, ret, off, nr;
unsigned int nbufs;
+ lockdep_assert_held(&ctx->uring_lock);
+ lockdep_assert_held(&src_ctx->uring_lock);
+
/*
* Accounting state is shared between the two rings; that only works if
* both rings are accounted towards the same counters.
@@ -942,7 +953,7 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
if (ctx->buf_table.nr && !(arg->flags & IORING_REGISTER_DST_REPLACE))
return -EBUSY;
- nbufs = READ_ONCE(src_ctx->buf_table.nr);
+ nbufs = src_ctx->buf_table.nr;
if (!arg->nr)
arg->nr = nbufs;
else if (arg->nr > nbufs)
@@ -966,27 +977,20 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
}
}
- /*
- * Drop our own lock here. We'll setup the data we need and reference
- * the source buffers, then re-grab, check, and assign at the end.
- */
- mutex_unlock(&ctx->uring_lock);
-
- mutex_lock(&src_ctx->uring_lock);
ret = -ENXIO;
nbufs = src_ctx->buf_table.nr;
if (!nbufs)
- goto out_unlock;
+ goto out_free;
ret = -EINVAL;
if (!arg->nr)
arg->nr = nbufs;
else if (arg->nr > nbufs)
- goto out_unlock;
+ goto out_free;
ret = -EOVERFLOW;
if (check_add_overflow(arg->nr, arg->src_off, &off))
- goto out_unlock;
+ goto out_free;
if (off > nbufs)
- goto out_unlock;
+ goto out_free;
off = arg->dst_off;
i = arg->src_off;
@@ -998,10 +1002,10 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
if (!src_node) {
dst_node = NULL;
} else {
- dst_node = io_rsrc_node_alloc(ctx, IORING_RSRC_BUFFER);
+ dst_node = io_rsrc_node_alloc(IORING_RSRC_BUFFER);
if (!dst_node) {
ret = -ENOMEM;
- goto out_unlock;
+ goto out_free;
}
refcount_inc(&src_node->buf->refs);
@@ -1011,10 +1015,6 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
i++;
}
- /* Have a ref on the bufs now, drop src lock and re-grab our own lock */
- mutex_unlock(&src_ctx->uring_lock);
- mutex_lock(&ctx->uring_lock);
-
/*
* If asked for replace, put the old table. data->nodes[] holds both
* old and new nodes at this point.
@@ -1023,24 +1023,17 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
io_rsrc_data_free(ctx, &ctx->buf_table);
/*
- * ctx->buf_table should be empty now - either the contents are being
- * replaced and we just freed the table, or someone raced setting up
- * a buffer table while the clone was happening. If not empty, fall
- * through to failure handling.
+ * ctx->buf_table must be empty now - either the contents are being
+ * replaced and we just freed the table, or the contents are being
+ * copied to a ring that does not have buffers yet (checked at function
+ * entry).
*/
- if (!ctx->buf_table.nr) {
- ctx->buf_table = data;
- return 0;
- }
+ WARN_ON_ONCE(ctx->buf_table.nr);
+ ctx->buf_table = data;
+ return 0;
- mutex_unlock(&ctx->uring_lock);
- mutex_lock(&src_ctx->uring_lock);
- /* someone raced setting up buffers, dump ours */
- ret = -EBUSY;
-out_unlock:
+out_free:
io_rsrc_data_free(ctx, &data);
- mutex_unlock(&src_ctx->uring_lock);
- mutex_lock(&ctx->uring_lock);
return ret;
}
@@ -1054,6 +1047,7 @@ out_unlock:
int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg)
{
struct io_uring_clone_buffers buf;
+ struct io_ring_ctx *src_ctx;
bool registered_src;
struct file *file;
int ret;
@@ -1071,8 +1065,18 @@ int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg)
file = io_uring_register_get_file(buf.src_fd, registered_src);
if (IS_ERR(file))
return PTR_ERR(file);
- ret = io_clone_buffers(ctx, file->private_data, &buf);
- if (!registered_src)
- fput(file);
+
+ src_ctx = file->private_data;
+ if (src_ctx != ctx) {
+ mutex_unlock(&ctx->uring_lock);
+ lock_two_rings(ctx, src_ctx);
+ }
+
+ ret = io_clone_buffers(ctx, src_ctx, &buf);
+
+ if (src_ctx != ctx)
+ mutex_unlock(&src_ctx->uring_lock);
+
+ fput(file);
return ret;
}
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index c8b093584461..190f7ee45de9 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -2,6 +2,8 @@
#ifndef IOU_RSRC_H
#define IOU_RSRC_H
+#include <linux/lockdep.h>
+
#define IO_NODE_ALLOC_CACHE_MAX 32
#define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3)
@@ -43,7 +45,7 @@ struct io_imu_folio_data {
unsigned int nr_folios;
};
-struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx, int type);
+struct io_rsrc_node *io_rsrc_node_alloc(int type);
void io_free_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node);
void io_rsrc_data_free(struct io_ring_ctx *ctx, struct io_rsrc_data *data);
int io_rsrc_data_alloc(struct io_rsrc_data *data, unsigned nr);
@@ -80,6 +82,7 @@ static inline struct io_rsrc_node *io_rsrc_node_lookup(struct io_rsrc_data *data
static inline void io_put_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
{
+ lockdep_assert_held(&ctx->uring_lock);
if (node && !--node->refs)
io_free_rsrc_node(ctx, node);
}
diff --git a/io_uring/rw.c b/io_uring/rw.c
index a9a2733be842..7aa1e4c9f64a 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -146,28 +146,15 @@ static inline int io_import_iovec(int rw, struct io_kiocb *req,
return 0;
}
-static void io_rw_iovec_free(struct io_async_rw *rw)
-{
- if (rw->free_iovec) {
- kfree(rw->free_iovec);
- rw->free_iov_nr = 0;
- rw->free_iovec = NULL;
- }
-}
-
static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_async_rw *rw = req->async_data;
- struct iovec *iov;
- if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) {
- io_rw_iovec_free(rw);
+ if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
return;
- }
- iov = rw->free_iovec;
+
+ io_alloc_cache_kasan(&rw->free_iovec, &rw->free_iov_nr);
if (io_alloc_cache_put(&req->ctx->rw_cache, rw)) {
- if (iov)
- kasan_mempool_poison_object(iov);
req->async_data = NULL;
req->flags &= ~REQ_F_ASYNC_DATA;
}
@@ -208,27 +195,16 @@ static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags)
}
}
-static void io_rw_async_data_init(void *obj)
-{
- struct io_async_rw *rw = (struct io_async_rw *)obj;
-
- rw->free_iovec = NULL;
- rw->bytes_done = 0;
-}
-
static int io_rw_alloc_async(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_async_rw *rw;
- rw = io_uring_alloc_async_data(&ctx->rw_cache, req, io_rw_async_data_init);
+ rw = io_uring_alloc_async_data(&ctx->rw_cache, req);
if (!rw)
return -ENOMEM;
- if (rw->free_iovec) {
- kasan_mempool_unpoison_object(rw->free_iovec,
- rw->free_iov_nr * sizeof(struct iovec));
+ if (rw->free_iovec)
req->flags |= REQ_F_NEED_CLEANUP;
- }
rw->bytes_done = 0;
return 0;
}
@@ -1323,10 +1299,7 @@ void io_rw_cache_free(const void *entry)
{
struct io_async_rw *rw = (struct io_async_rw *) entry;
- if (rw->free_iovec) {
- kasan_mempool_unpoison_object(rw->free_iovec,
- rw->free_iov_nr * sizeof(struct iovec));
- io_rw_iovec_free(rw);
- }
+ if (rw->free_iovec)
+ kfree(rw->free_iovec);
kfree(rw);
}
diff --git a/io_uring/rw.h b/io_uring/rw.h
index 2d7656bd268d..eaa59bd64870 100644
--- a/io_uring/rw.h
+++ b/io_uring/rw.h
@@ -9,19 +9,24 @@ struct io_meta_state {
struct io_async_rw {
size_t bytes_done;
- struct iov_iter iter;
- struct iov_iter_state iter_state;
- struct iovec fast_iov;
struct iovec *free_iovec;
- int free_iov_nr;
- /* wpq is for buffered io, while meta fields are used with direct io */
- union {
- struct wait_page_queue wpq;
- struct {
- struct uio_meta meta;
- struct io_meta_state meta_state;
+ struct_group(clear,
+ struct iov_iter iter;
+ struct iov_iter_state iter_state;
+ struct iovec fast_iov;
+ int free_iov_nr;
+ /*
+ * wpq is for buffered io, while meta fields are used with
+ * direct io
+ */
+ union {
+ struct wait_page_queue wpq;
+ struct {
+ struct uio_meta meta;
+ struct io_meta_state meta_state;
+ };
};
- };
+ );
};
int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe);
diff --git a/io_uring/timeout.c b/io_uring/timeout.c
index 2bd7e0a317bb..48fc8cf70784 100644
--- a/io_uring/timeout.c
+++ b/io_uring/timeout.c
@@ -544,7 +544,7 @@ static int __io_timeout_prep(struct io_kiocb *req,
if (WARN_ON_ONCE(req_has_async_data(req)))
return -EFAULT;
- data = io_uring_alloc_async_data_nocache(req);
+ data = io_uring_alloc_async_data(NULL, req);
if (!data)
return -ENOMEM;
data->req = req;
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index fc94c465a985..1f6a82128b47 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -168,23 +168,16 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, u64 res2,
}
EXPORT_SYMBOL_GPL(io_uring_cmd_done);
-static void io_uring_cmd_init_once(void *obj)
-{
- struct io_uring_cmd_data *data = obj;
-
- data->op_data = NULL;
-}
-
static int io_uring_cmd_prep_setup(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
struct io_uring_cmd_data *cache;
- cache = io_uring_alloc_async_data(&req->ctx->uring_cache, req,
- io_uring_cmd_init_once);
+ cache = io_uring_alloc_async_data(&req->ctx->uring_cache, req);
if (!cache)
return -ENOMEM;
+ cache->op_data = NULL;
if (!(req->flags & REQ_F_FORCE_ASYNC)) {
/* defer memcpy until we need it */
@@ -192,8 +185,8 @@ static int io_uring_cmd_prep_setup(struct io_kiocb *req,
return 0;
}
- memcpy(req->async_data, sqe, uring_sqe_size(req->ctx));
- ioucmd->sqe = req->async_data;
+ memcpy(cache->sqes, sqe, uring_sqe_size(req->ctx));
+ ioucmd->sqe = cache->sqes;
return 0;
}
@@ -260,7 +253,7 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
struct io_uring_cmd_data *cache = req->async_data;
if (ioucmd->sqe != (void *) cache)
- memcpy(cache, ioucmd->sqe, uring_sqe_size(req->ctx));
+ memcpy(cache->sqes, ioucmd->sqe, uring_sqe_size(req->ctx));
return -EAGAIN;
} else if (ret == -EIOCBQUEUED) {
return -EIOCBQUEUED;
@@ -350,7 +343,7 @@ int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
if (!prot || !prot->ioctl)
return -EOPNOTSUPP;
- switch (cmd->sqe->cmd_op) {
+ switch (cmd->cmd_op) {
case SOCKET_URING_OP_SIOCINQ:
ret = prot->ioctl(sk, SIOCINQ, &arg);
if (ret)
diff --git a/io_uring/waitid.c b/io_uring/waitid.c
index 6778c0ee76c4..853e97a7b0ec 100644
--- a/io_uring/waitid.c
+++ b/io_uring/waitid.c
@@ -303,7 +303,7 @@ int io_waitid(struct io_kiocb *req, unsigned int issue_flags)
struct io_waitid_async *iwa;
int ret;
- iwa = io_uring_alloc_async_data_nocache(req);
+ iwa = io_uring_alloc_async_data(NULL, req);
if (!iwa)
return -ENOMEM;
diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
index 54318e0b4557..15b17e86e198 100644
--- a/ipc/ipc_sysctl.c
+++ b/ipc/ipc_sysctl.c
@@ -73,7 +73,7 @@ int ipc_mni = IPCMNI;
int ipc_mni_shift = IPCMNI_SHIFT;
int ipc_min_cycle = RADIX_TREE_MAP_SIZE;
-static struct ctl_table ipc_sysctls[] = {
+static const struct ctl_table ipc_sysctls[] = {
{
.procname = "shmmax",
.data = &init_ipc_ns.shm_ctlmax,
diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
index b70dc2ff22d8..0dd12e1c9f53 100644
--- a/ipc/mq_sysctl.c
+++ b/ipc/mq_sysctl.c
@@ -20,7 +20,7 @@ static int msg_max_limit_max = HARD_MSGMAX;
static int msg_maxsize_limit_min = MIN_MSGSIZEMAX;
static int msg_maxsize_limit_max = HARD_MSGSIZEMAX;
-static struct ctl_table mq_sysctls[] = {
+static const struct ctl_table mq_sysctls[] = {
{
.procname = "queues_max",
.data = &init_ipc_ns.mq_queues_max,
diff --git a/ipc/util.c b/ipc/util.c
index 05cb9de66735..cae60f11d9c2 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -615,12 +615,11 @@ void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out)
}
/**
- * ipc_obtain_object_idr
+ * ipc_obtain_object_idr - Look for an id in the ipc ids idr and
+ * return associated ipc object.
* @ids: ipc identifier set
* @id: ipc id to look for
*
- * Look for an id in the ipc ids idr and return associated ipc object.
- *
* Call inside the RCU critical section.
* The ipc object is *not* locked on exit.
*/
@@ -637,13 +636,11 @@ struct kern_ipc_perm *ipc_obtain_object_idr(struct ipc_ids *ids, int id)
}
/**
- * ipc_obtain_object_check
+ * ipc_obtain_object_check - Similar to ipc_obtain_object_idr() but
+ * also checks the ipc object sequence number.
* @ids: ipc identifier set
* @id: ipc id to look for
*
- * Similar to ipc_obtain_object_idr() but also checks the ipc object
- * sequence number.
- *
* Call inside the RCU critical section.
* The ipc object is *not* locked on exit.
*/
diff --git a/kernel/acct.c b/kernel/acct.c
index 179848ad33e9..31222e8cd534 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -76,7 +76,7 @@ static int acct_parm[3] = {4, 2, 30};
#define ACCT_TIMEOUT (acct_parm[2]) /* foo second timeout between checks */
#ifdef CONFIG_SYSCTL
-static struct ctl_table kern_acct_table[] = {
+static const struct ctl_table kern_acct_table[] = {
{
.procname = "acct",
.data = &acct_parm,
diff --git a/kernel/audit.c b/kernel/audit.c
index 13d0144efaa3..5f5bf85bcc90 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1221,7 +1221,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct audit_buffer *ab;
u16 msg_type = nlh->nlmsg_type;
struct audit_sig_info *sig_data;
- struct lsm_context lsmctx;
+ struct lsm_context lsmctx = { NULL, 0, 0 };
err = audit_netlink_ok(skb, msg_type);
if (err)
diff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c
index 945a5680f6a5..870aeb51d70a 100644
--- a/kernel/bpf/arena.c
+++ b/kernel/bpf/arena.c
@@ -138,7 +138,11 @@ static struct bpf_map *arena_map_alloc(union bpf_attr *attr)
INIT_LIST_HEAD(&arena->vma_list);
bpf_map_init_from_attr(&arena->map, attr);
range_tree_init(&arena->rt);
- range_tree_set(&arena->rt, 0, attr->max_entries);
+ err = range_tree_set(&arena->rt, 0, attr->max_entries);
+ if (err) {
+ bpf_map_area_free(arena);
+ goto err;
+ }
mutex_init(&arena->lock);
return &arena->map;
@@ -218,7 +222,7 @@ static u64 arena_map_mem_usage(const struct bpf_map *map)
struct vma_list {
struct vm_area_struct *vma;
struct list_head head;
- atomic_t mmap_count;
+ refcount_t mmap_count;
};
static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
@@ -228,7 +232,7 @@ static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
vml = kmalloc(sizeof(*vml), GFP_KERNEL);
if (!vml)
return -ENOMEM;
- atomic_set(&vml->mmap_count, 1);
+ refcount_set(&vml->mmap_count, 1);
vma->vm_private_data = vml;
vml->vma = vma;
list_add(&vml->head, &arena->vma_list);
@@ -239,7 +243,7 @@ static void arena_vm_open(struct vm_area_struct *vma)
{
struct vma_list *vml = vma->vm_private_data;
- atomic_inc(&vml->mmap_count);
+ refcount_inc(&vml->mmap_count);
}
static void arena_vm_close(struct vm_area_struct *vma)
@@ -248,7 +252,7 @@ static void arena_vm_close(struct vm_area_struct *vma)
struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
struct vma_list *vml = vma->vm_private_data;
- if (!atomic_dec_and_test(&vml->mmap_count))
+ if (!refcount_dec_and_test(&vml->mmap_count))
return;
guard(mutex)(&arena->lock);
/* update link list under lock */
@@ -257,8 +261,6 @@ static void arena_vm_close(struct vm_area_struct *vma)
kfree(vml);
}
-#define MT_ENTRY ((void *)&arena_map_ops) /* unused. has to be valid pointer */
-
static vm_fault_t arena_vm_fault(struct vm_fault *vmf)
{
struct bpf_map *map = vmf->vma->vm_file->private_data;
@@ -443,7 +445,7 @@ static long arena_alloc_pages(struct bpf_arena *arena, long uaddr, long page_cnt
return 0;
}
- /* zeroing is needed, since alloc_pages_bulk_array() only fills in non-zero entries */
+ /* zeroing is needed, since alloc_pages_bulk() only fills in non-zero entries */
pages = kvcalloc(page_cnt, sizeof(struct page *), GFP_KERNEL);
if (!pages)
return 0;
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 6cdbb4c33d31..eb28c0f219ee 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -735,13 +735,13 @@ static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback
u64 ret = 0;
void *val;
+ cant_migrate();
+
if (flags != 0)
return -EINVAL;
is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
array = container_of(map, struct bpf_array, map);
- if (is_percpu)
- migrate_disable();
for (i = 0; i < map->max_entries; i++) {
if (is_percpu)
val = this_cpu_ptr(array->pptrs[i]);
@@ -756,8 +756,6 @@ static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback
break;
}
- if (is_percpu)
- migrate_enable();
return num_elems;
}
diff --git a/kernel/bpf/bpf_cgrp_storage.c b/kernel/bpf/bpf_cgrp_storage.c
index 20f05de92e9c..d5dc65bb1755 100644
--- a/kernel/bpf/bpf_cgrp_storage.c
+++ b/kernel/bpf/bpf_cgrp_storage.c
@@ -15,22 +15,20 @@ static DEFINE_PER_CPU(int, bpf_cgrp_storage_busy);
static void bpf_cgrp_storage_lock(void)
{
- migrate_disable();
+ cant_migrate();
this_cpu_inc(bpf_cgrp_storage_busy);
}
static void bpf_cgrp_storage_unlock(void)
{
this_cpu_dec(bpf_cgrp_storage_busy);
- migrate_enable();
}
static bool bpf_cgrp_storage_trylock(void)
{
- migrate_disable();
+ cant_migrate();
if (unlikely(this_cpu_inc_return(bpf_cgrp_storage_busy) != 1)) {
this_cpu_dec(bpf_cgrp_storage_busy);
- migrate_enable();
return false;
}
return true;
@@ -47,17 +45,18 @@ void bpf_cgrp_storage_free(struct cgroup *cgroup)
{
struct bpf_local_storage *local_storage;
+ migrate_disable();
rcu_read_lock();
local_storage = rcu_dereference(cgroup->bpf_cgrp_storage);
- if (!local_storage) {
- rcu_read_unlock();
- return;
- }
+ if (!local_storage)
+ goto out;
bpf_cgrp_storage_lock();
bpf_local_storage_destroy(local_storage);
bpf_cgrp_storage_unlock();
+out:
rcu_read_unlock();
+ migrate_enable();
}
static struct bpf_local_storage_data *
diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c
index a51c82dee1bd..15a3eb9b02d9 100644
--- a/kernel/bpf/bpf_inode_storage.c
+++ b/kernel/bpf/bpf_inode_storage.c
@@ -62,16 +62,17 @@ void bpf_inode_storage_free(struct inode *inode)
if (!bsb)
return;
+ migrate_disable();
rcu_read_lock();
local_storage = rcu_dereference(bsb->storage);
- if (!local_storage) {
- rcu_read_unlock();
- return;
- }
+ if (!local_storage)
+ goto out;
bpf_local_storage_destroy(local_storage);
+out:
rcu_read_unlock();
+ migrate_enable();
}
static void *bpf_fd_inode_storage_lookup_elem(struct bpf_map *map, void *key)
diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
index 7e6a0af0afc1..fa56c30833ff 100644
--- a/kernel/bpf/bpf_local_storage.c
+++ b/kernel/bpf/bpf_local_storage.c
@@ -81,9 +81,7 @@ bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
return NULL;
if (smap->bpf_ma) {
- migrate_disable();
selem = bpf_mem_cache_alloc_flags(&smap->selem_ma, gfp_flags);
- migrate_enable();
if (selem)
/* Keep the original bpf_map_kzalloc behavior
* before started using the bpf_mem_cache_alloc.
@@ -174,17 +172,14 @@ static void bpf_local_storage_free(struct bpf_local_storage *local_storage,
return;
}
- if (smap) {
- migrate_disable();
+ if (smap)
bpf_mem_cache_free(&smap->storage_ma, local_storage);
- migrate_enable();
- } else {
+ else
/* smap could be NULL if the selem that triggered
* this 'local_storage' creation had been long gone.
* In this case, directly do call_rcu().
*/
call_rcu(&local_storage->rcu, bpf_local_storage_free_rcu);
- }
}
/* rcu tasks trace callback for bpf_ma == false */
@@ -217,7 +212,10 @@ static void bpf_selem_free_rcu(struct rcu_head *rcu)
selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
/* The bpf_local_storage_map_free will wait for rcu_barrier */
smap = rcu_dereference_check(SDATA(selem)->smap, 1);
+
+ migrate_disable();
bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
+ migrate_enable();
bpf_mem_cache_raw_free(selem);
}
@@ -256,9 +254,7 @@ void bpf_selem_free(struct bpf_local_storage_elem *selem,
* bpf_mem_cache_free will be able to reuse selem
* immediately.
*/
- migrate_disable();
bpf_mem_cache_free(&smap->selem_ma, selem);
- migrate_enable();
return;
}
@@ -497,15 +493,11 @@ int bpf_local_storage_alloc(void *owner,
if (err)
return err;
- if (smap->bpf_ma) {
- migrate_disable();
+ if (smap->bpf_ma)
storage = bpf_mem_cache_alloc_flags(&smap->storage_ma, gfp_flags);
- migrate_enable();
- } else {
+ else
storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
gfp_flags | __GFP_NOWARN);
- }
-
if (!storage) {
err = -ENOMEM;
goto uncharge;
@@ -841,8 +833,12 @@ bpf_local_storage_map_alloc(union bpf_attr *attr,
smap->elem_size = offsetof(struct bpf_local_storage_elem,
sdata.data[attr->value_size]);
- smap->bpf_ma = bpf_ma;
- if (bpf_ma) {
+ /* In PREEMPT_RT, kmalloc(GFP_ATOMIC) is still not safe in non
+ * preemptible context. Thus, enforce all storages to use
+ * bpf_mem_alloc when CONFIG_PREEMPT_RT is enabled.
+ */
+ smap->bpf_ma = IS_ENABLED(CONFIG_PREEMPT_RT) ? true : bpf_ma;
+ if (smap->bpf_ma) {
err = bpf_mem_alloc_init(&smap->selem_ma, smap->elem_size, false);
if (err)
goto free_smap;
@@ -898,15 +894,11 @@ void bpf_local_storage_map_free(struct bpf_map *map,
while ((selem = hlist_entry_safe(
rcu_dereference_raw(hlist_first_rcu(&b->list)),
struct bpf_local_storage_elem, map_node))) {
- if (busy_counter) {
- migrate_disable();
+ if (busy_counter)
this_cpu_inc(*busy_counter);
- }
bpf_selem_unlink(selem, true);
- if (busy_counter) {
+ if (busy_counter)
this_cpu_dec(*busy_counter);
- migrate_enable();
- }
cond_resched_rcu();
}
rcu_read_unlock();
diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
index 606efe32485a..040fb1cd840b 100644
--- a/kernel/bpf/bpf_struct_ops.c
+++ b/kernel/bpf/bpf_struct_ops.c
@@ -310,6 +310,20 @@ void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc)
kfree(arg_info);
}
+static bool is_module_member(const struct btf *btf, u32 id)
+{
+ const struct btf_type *t;
+
+ t = btf_type_resolve_ptr(btf, id, NULL);
+ if (!t)
+ return false;
+
+ if (!__btf_type_is_struct(t) && !btf_type_is_fwd(t))
+ return false;
+
+ return !strcmp(btf_name_by_offset(btf, t->name_off), "module");
+}
+
int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
struct btf *btf,
struct bpf_verifier_log *log)
@@ -389,6 +403,13 @@ int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
goto errout;
}
+ if (!st_ops_ids[IDX_MODULE_ID] && is_module_member(btf, member->type)) {
+ pr_warn("'struct module' btf id not found. Is CONFIG_MODULES enabled? bpf_struct_ops '%s' needs module support.\n",
+ st_ops->name);
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+
func_proto = btf_type_resolve_func_ptr(btf,
member->type,
NULL);
diff --git a/kernel/bpf/bpf_task_storage.c b/kernel/bpf/bpf_task_storage.c
index bf7fa15fdcc6..1109475953c0 100644
--- a/kernel/bpf/bpf_task_storage.c
+++ b/kernel/bpf/bpf_task_storage.c
@@ -24,22 +24,20 @@ static DEFINE_PER_CPU(int, bpf_task_storage_busy);
static void bpf_task_storage_lock(void)
{
- migrate_disable();
+ cant_migrate();
this_cpu_inc(bpf_task_storage_busy);
}
static void bpf_task_storage_unlock(void)
{
this_cpu_dec(bpf_task_storage_busy);
- migrate_enable();
}
static bool bpf_task_storage_trylock(void)
{
- migrate_disable();
+ cant_migrate();
if (unlikely(this_cpu_inc_return(bpf_task_storage_busy) != 1)) {
this_cpu_dec(bpf_task_storage_busy);
- migrate_enable();
return false;
}
return true;
@@ -72,18 +70,19 @@ void bpf_task_storage_free(struct task_struct *task)
{
struct bpf_local_storage *local_storage;
+ migrate_disable();
rcu_read_lock();
local_storage = rcu_dereference(task->bpf_storage);
- if (!local_storage) {
- rcu_read_unlock();
- return;
- }
+ if (!local_storage)
+ goto out;
bpf_task_storage_lock();
bpf_local_storage_destroy(local_storage);
bpf_task_storage_unlock();
+out:
rcu_read_unlock();
+ migrate_enable();
}
static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key)
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index e5a5f023cedd..9de6acddd479 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -498,11 +498,6 @@ bool btf_type_is_void(const struct btf_type *t)
return t == &btf_void;
}
-static bool btf_type_is_fwd(const struct btf_type *t)
-{
- return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
-}
-
static bool btf_type_is_datasec(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
@@ -7887,14 +7882,9 @@ struct btf *btf_get_by_fd(int fd)
struct btf *btf;
CLASS(fd, f)(fd);
- if (fd_empty(f))
- return ERR_PTR(-EBADF);
-
- if (fd_file(f)->f_op != &btf_fops)
- return ERR_PTR(-EINVAL);
-
- btf = fd_file(f)->private_data;
- refcount_inc(&btf->refcnt);
+ btf = __btf_get_by_fd(f);
+ if (!IS_ERR(btf))
+ refcount_inc(&btf->refcnt);
return btf;
}
@@ -8011,17 +8001,6 @@ struct btf_module {
static LIST_HEAD(btf_modules);
static DEFINE_MUTEX(btf_module_mutex);
-static ssize_t
-btf_module_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t len)
-{
- const struct btf *btf = bin_attr->private;
-
- memcpy(buf, btf->data + off, len);
- return len;
-}
-
static void purge_cand_cache(struct btf *btf);
static int btf_module_notify(struct notifier_block *nb, unsigned long op,
@@ -8082,8 +8061,8 @@ static int btf_module_notify(struct notifier_block *nb, unsigned long op,
attr->attr.name = btf->name;
attr->attr.mode = 0444;
attr->size = btf->data_size;
- attr->private = btf;
- attr->read = btf_module_read;
+ attr->private = btf->data;
+ attr->read_new = sysfs_bin_attr_simple_read;
err = sysfs_create_bin_file(btf_kobj, attr);
if (err) {
diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c
index 33c473d676a5..cfa1c18e3a48 100644
--- a/kernel/bpf/cpumask.c
+++ b/kernel/bpf/cpumask.c
@@ -91,9 +91,7 @@ __bpf_kfunc void bpf_cpumask_release(struct bpf_cpumask *cpumask)
if (!refcount_dec_and_test(&cpumask->usage))
return;
- migrate_disable();
bpf_mem_cache_free_rcu(&bpf_cpumask_ma, cpumask);
- migrate_enable();
}
__bpf_kfunc void bpf_cpumask_release_dtor(void *cpumask)
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 3ec941a0ea41..4a9eeb7aef85 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -824,13 +824,14 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
if (l == tgt_l) {
hlist_nulls_del_rcu(&l->hash_node);
- check_and_free_fields(htab, l);
bpf_map_dec_elem_count(&htab->map);
break;
}
htab_unlock_bucket(htab, b, tgt_l->hash, flags);
+ if (l == tgt_l)
+ check_and_free_fields(htab, l);
return l == tgt_l;
}
@@ -897,11 +898,9 @@ static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
{
check_and_free_fields(htab, l);
- migrate_disable();
if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr);
bpf_mem_cache_free(&htab->ma, l);
- migrate_enable();
}
static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
@@ -1502,10 +1501,9 @@ static void delete_all_elements(struct bpf_htab *htab)
{
int i;
- /* It's called from a worker thread, so disable migration here,
- * since bpf_mem_cache_free() relies on that.
+ /* It's called from a worker thread and migration has been disabled,
+ * therefore, it is OK to invoke bpf_mem_cache_free() directly.
*/
- migrate_disable();
for (i = 0; i < htab->n_buckets; i++) {
struct hlist_nulls_head *head = select_bucket(htab, i);
struct hlist_nulls_node *n;
@@ -1517,7 +1515,6 @@ static void delete_all_elements(struct bpf_htab *htab)
}
cond_resched();
}
- migrate_enable();
}
static void htab_free_malloced_timers_and_wq(struct bpf_htab *htab)
@@ -1638,41 +1635,44 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
l = lookup_elem_raw(head, hash, key, key_size);
if (!l) {
ret = -ENOENT;
- } else {
- if (is_percpu) {
- u32 roundup_value_size = round_up(map->value_size, 8);
- void __percpu *pptr;
- int off = 0, cpu;
+ goto out_unlock;
+ }
- pptr = htab_elem_get_ptr(l, key_size);
- for_each_possible_cpu(cpu) {
- copy_map_value_long(&htab->map, value + off, per_cpu_ptr(pptr, cpu));
- check_and_init_map_value(&htab->map, value + off);
- off += roundup_value_size;
- }
- } else {
- u32 roundup_key_size = round_up(map->key_size, 8);
+ if (is_percpu) {
+ u32 roundup_value_size = round_up(map->value_size, 8);
+ void __percpu *pptr;
+ int off = 0, cpu;
- if (flags & BPF_F_LOCK)
- copy_map_value_locked(map, value, l->key +
- roundup_key_size,
- true);
- else
- copy_map_value(map, value, l->key +
- roundup_key_size);
- /* Zeroing special fields in the temp buffer */
- check_and_init_map_value(map, value);
+ pptr = htab_elem_get_ptr(l, key_size);
+ for_each_possible_cpu(cpu) {
+ copy_map_value_long(&htab->map, value + off, per_cpu_ptr(pptr, cpu));
+ check_and_init_map_value(&htab->map, value + off);
+ off += roundup_value_size;
}
+ } else {
+ u32 roundup_key_size = round_up(map->key_size, 8);
- hlist_nulls_del_rcu(&l->hash_node);
- if (!is_lru_map)
- free_htab_elem(htab, l);
+ if (flags & BPF_F_LOCK)
+ copy_map_value_locked(map, value, l->key +
+ roundup_key_size,
+ true);
+ else
+ copy_map_value(map, value, l->key +
+ roundup_key_size);
+ /* Zeroing special fields in the temp buffer */
+ check_and_init_map_value(map, value);
}
+ hlist_nulls_del_rcu(&l->hash_node);
+out_unlock:
htab_unlock_bucket(htab, b, hash, bflags);
- if (is_lru_map && l)
- htab_lru_push_free(htab, l);
+ if (l) {
+ if (is_lru_map)
+ htab_lru_push_free(htab, l);
+ else
+ free_htab_elem(htab, l);
+ }
return ret;
}
@@ -2208,17 +2208,18 @@ static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_
bool is_percpu;
u64 ret = 0;
+ cant_migrate();
+
if (flags != 0)
return -EINVAL;
is_percpu = htab_is_percpu(htab);
roundup_key_size = round_up(map->key_size, 8);
- /* disable migration so percpu value prepared here will be the
- * same as the one seen by the bpf program with bpf_map_lookup_elem().
+ /* migration has been disabled, so percpu value prepared here will be
+ * the same as the one seen by the bpf program with
+ * bpf_map_lookup_elem().
*/
- if (is_percpu)
- migrate_disable();
for (i = 0; i < htab->n_buckets; i++) {
b = &htab->buckets[i];
rcu_read_lock();
@@ -2244,8 +2245,6 @@ static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_
rcu_read_unlock();
}
out:
- if (is_percpu)
- migrate_enable();
return num_elems;
}
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 751c150f9e1c..f27ce162427a 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -1593,10 +1593,24 @@ void bpf_timer_cancel_and_free(void *val)
* To avoid these issues, punt to workqueue context when we are in a
* timer callback.
*/
- if (this_cpu_read(hrtimer_running))
+ if (this_cpu_read(hrtimer_running)) {
queue_work(system_unbound_wq, &t->cb.delete_work);
- else
+ return;
+ }
+
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ /* If the timer is running on other CPU, also use a kworker to
+ * wait for the completion of the timer instead of trying to
+ * acquire a sleepable lock in hrtimer_cancel() to wait for its
+ * completion.
+ */
+ if (hrtimer_try_to_cancel(&t->timer) >= 0)
+ kfree_rcu(t, cb.rcu);
+ else
+ queue_work(system_unbound_wq, &t->cb.delete_work);
+ } else {
bpf_timer_delete_work(&t->cb.delete_work);
+ }
}
/* This function is called by map_delete/update_elem for individual element and
@@ -2066,9 +2080,7 @@ unlock:
/* The contained type can also have resources, including a
* bpf_list_head which needs to be freed.
*/
- migrate_disable();
__bpf_obj_drop_impl(obj, field->graph_root.value_rec, false);
- migrate_enable();
}
}
@@ -2105,9 +2117,7 @@ void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
obj -= field->graph_root.node_offset;
- migrate_disable();
__bpf_obj_drop_impl(obj, field->graph_root.value_rec, false);
- migrate_enable();
}
}
@@ -3057,6 +3067,21 @@ __bpf_kfunc int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void __user
return ret + 1;
}
+/* Keep unsinged long in prototype so that kfunc is usable when emitted to
+ * vmlinux.h in BPF programs directly, but note that while in BPF prog, the
+ * unsigned long always points to 8-byte region on stack, the kernel may only
+ * read and write the 4-bytes on 32-bit.
+ */
+__bpf_kfunc void bpf_local_irq_save(unsigned long *flags__irq_flag)
+{
+ local_irq_save(*flags__irq_flag);
+}
+
+__bpf_kfunc void bpf_local_irq_restore(unsigned long *flags__irq_flag)
+{
+ local_irq_restore(*flags__irq_flag);
+}
+
__bpf_kfunc_end_defs();
BTF_KFUNCS_START(generic_btf_ids)
@@ -3089,7 +3114,9 @@ BTF_ID_FLAGS(func, bpf_task_get_cgroup1, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_task_from_vpid, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_throw)
+#ifdef CONFIG_BPF_EVENTS
BTF_ID_FLAGS(func, bpf_send_signal_task, KF_TRUSTED_ARGS)
+#endif
BTF_KFUNCS_END(generic_btf_ids)
static const struct btf_kfunc_id_set generic_kfunc_set = {
@@ -3135,7 +3162,9 @@ BTF_ID_FLAGS(func, bpf_dynptr_is_null)
BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly)
BTF_ID_FLAGS(func, bpf_dynptr_size)
BTF_ID_FLAGS(func, bpf_dynptr_clone)
+#ifdef CONFIG_NET
BTF_ID_FLAGS(func, bpf_modify_return_test_tp)
+#endif
BTF_ID_FLAGS(func, bpf_wq_init)
BTF_ID_FLAGS(func, bpf_wq_set_callback_impl)
BTF_ID_FLAGS(func, bpf_wq_start)
@@ -3149,6 +3178,8 @@ BTF_ID_FLAGS(func, bpf_get_kmem_cache)
BTF_ID_FLAGS(func, bpf_iter_kmem_cache_new, KF_ITER_NEW | KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)
+BTF_ID_FLAGS(func, bpf_local_irq_save)
+BTF_ID_FLAGS(func, bpf_local_irq_restore)
BTF_KFUNCS_END(common_btf_ids)
static const struct btf_kfunc_id_set common_kfunc_set = {
diff --git a/kernel/bpf/log.c b/kernel/bpf/log.c
index 4a858fdb6476..38050f4ee400 100644
--- a/kernel/bpf/log.c
+++ b/kernel/bpf/log.c
@@ -537,6 +537,7 @@ static char slot_type_char[] = {
[STACK_ZERO] = '0',
[STACK_DYNPTR] = 'd',
[STACK_ITER] = 'i',
+ [STACK_IRQ_FLAG] = 'f'
};
static void print_liveness(struct bpf_verifier_env *env,
@@ -753,9 +754,10 @@ static void print_reg_state(struct bpf_verifier_env *env,
verbose(env, ")");
}
-void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_func_state *state,
- bool print_all)
+void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
+ u32 frameno, bool print_all)
{
+ const struct bpf_func_state *state = vstate->frame[frameno];
const struct bpf_reg_state *reg;
int i;
@@ -843,11 +845,11 @@ void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_func_st
break;
}
}
- if (state->acquired_refs && state->refs[0].id) {
- verbose(env, " refs=%d", state->refs[0].id);
- for (i = 1; i < state->acquired_refs; i++)
- if (state->refs[i].id)
- verbose(env, ",%d", state->refs[i].id);
+ if (vstate->acquired_refs && vstate->refs[0].id) {
+ verbose(env, " refs=%d", vstate->refs[0].id);
+ for (i = 1; i < vstate->acquired_refs; i++)
+ if (vstate->refs[i].id)
+ verbose(env, ",%d", vstate->refs[i].id);
}
if (state->in_callback_fn)
verbose(env, " cb");
@@ -864,7 +866,8 @@ static inline u32 vlog_alignment(u32 pos)
BPF_LOG_MIN_ALIGNMENT) - pos - 1;
}
-void print_insn_state(struct bpf_verifier_env *env, const struct bpf_func_state *state)
+void print_insn_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
+ u32 frameno)
{
if (env->prev_log_pos && env->prev_log_pos == env->log.end_pos) {
/* remove new line character */
@@ -873,5 +876,5 @@ void print_insn_state(struct bpf_verifier_env *env, const struct bpf_func_state
} else {
verbose(env, "%d:", env->insn_idx);
}
- print_verifier_state(env, state, false);
+ print_verifier_state(env, vstate, frameno, false);
}
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index f8bc1e096182..e8a772e64324 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -289,16 +289,11 @@ static void *trie_lookup_elem(struct bpf_map *map, void *_key)
}
static struct lpm_trie_node *lpm_trie_node_alloc(struct lpm_trie *trie,
- const void *value,
- bool disable_migration)
+ const void *value)
{
struct lpm_trie_node *node;
- if (disable_migration)
- migrate_disable();
node = bpf_mem_cache_alloc(&trie->ma);
- if (disable_migration)
- migrate_enable();
if (!node)
return NULL;
@@ -342,10 +337,8 @@ static long trie_update_elem(struct bpf_map *map,
if (key->prefixlen > trie->max_prefixlen)
return -EINVAL;
- /* Allocate and fill a new node. Need to disable migration before
- * invoking bpf_mem_cache_alloc().
- */
- new_node = lpm_trie_node_alloc(trie, value, true);
+ /* Allocate and fill a new node */
+ new_node = lpm_trie_node_alloc(trie, value);
if (!new_node)
return -ENOMEM;
@@ -425,8 +418,7 @@ static long trie_update_elem(struct bpf_map *map,
goto out;
}
- /* migration is disabled within the locked scope */
- im_node = lpm_trie_node_alloc(trie, NULL, false);
+ im_node = lpm_trie_node_alloc(trie, NULL);
if (!im_node) {
trie->n_entries--;
ret = -ENOMEM;
@@ -452,11 +444,9 @@ static long trie_update_elem(struct bpf_map *map,
out:
raw_spin_unlock_irqrestore(&trie->lock, irq_flags);
- migrate_disable();
if (ret)
bpf_mem_cache_free(&trie->ma, new_node);
bpf_mem_cache_free_rcu(&trie->ma, free_node);
- migrate_enable();
return ret;
}
@@ -555,10 +545,8 @@ static long trie_delete_elem(struct bpf_map *map, void *_key)
out:
raw_spin_unlock_irqrestore(&trie->lock, irq_flags);
- migrate_disable();
bpf_mem_cache_free_rcu(&trie->ma, free_parent);
bpf_mem_cache_free_rcu(&trie->ma, free_node);
- migrate_enable();
return ret;
}
diff --git a/kernel/bpf/range_tree.c b/kernel/bpf/range_tree.c
index 5bdf9aadca3a..37b80a23ae1a 100644
--- a/kernel/bpf/range_tree.c
+++ b/kernel/bpf/range_tree.c
@@ -259,9 +259,7 @@ void range_tree_destroy(struct range_tree *rt)
while ((rn = range_it_iter_first(rt, 0, -1U))) {
range_it_remove(rn, rt);
- migrate_disable();
bpf_mem_free(&bpf_global_ma, rn);
- migrate_enable();
}
}
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 5684e8ce132d..c420edbfb7c8 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -796,11 +796,9 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
if (!btf_is_kernel(field->kptr.btf)) {
pointee_struct_meta = btf_find_struct_meta(field->kptr.btf,
field->kptr.btf_id);
- migrate_disable();
__bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ?
pointee_struct_meta->record : NULL,
fields[i].type == BPF_KPTR_PERCPU);
- migrate_enable();
} else {
field->kptr.dtor(xchgd_field);
}
@@ -835,8 +833,14 @@ static void bpf_map_free(struct bpf_map *map)
struct btf_record *rec = map->record;
struct btf *btf = map->btf;
- /* implementation dependent freeing */
+ /* implementation dependent freeing. Disabling migration to simplify
+ * the free of values or special fields allocated from bpf memory
+ * allocator.
+ */
+ migrate_disable();
map->ops->map_free(map);
+ migrate_enable();
+
/* Delay freeing of btf_record for maps, as map_free
* callback usually needs access to them. It is better to do it here
* than require each callback to do the free itself manually.
@@ -2730,7 +2734,7 @@ static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
}
/* last field in 'union bpf_attr' used by this command */
-#define BPF_PROG_LOAD_LAST_FIELD prog_token_fd
+#define BPF_PROG_LOAD_LAST_FIELD fd_array_cnt
static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
{
@@ -6124,7 +6128,7 @@ static int bpf_unpriv_handler(const struct ctl_table *table, int write,
return ret;
}
-static struct ctl_table bpf_syscall_table[] = {
+static const struct ctl_table bpf_syscall_table[] = {
{
.procname = "unprivileged_bpf_disabled",
.data = &sysctl_unprivileged_bpf_disabled,
diff --git a/kernel/bpf/sysfs_btf.c b/kernel/bpf/sysfs_btf.c
index fedb54c94cdb..81d6cf90584a 100644
--- a/kernel/bpf/sysfs_btf.c
+++ b/kernel/bpf/sysfs_btf.c
@@ -12,24 +12,16 @@
extern char __start_BTF[];
extern char __stop_BTF[];
-static ssize_t
-btf_vmlinux_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t len)
-{
- memcpy(buf, __start_BTF + off, len);
- return len;
-}
-
static struct bin_attribute bin_attr_btf_vmlinux __ro_after_init = {
.attr = { .name = "vmlinux", .mode = 0444, },
- .read = btf_vmlinux_read,
+ .read_new = sysfs_bin_attr_simple_read,
};
struct kobject *btf_kobj;
static int __init btf_vmlinux_init(void)
{
+ bin_attr_btf_vmlinux.private = __start_BTF;
bin_attr_btf_vmlinux.size = __stop_BTF - __start_BTF;
if (bin_attr_btf_vmlinux.size == 0)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 77f56674aaa9..9971c03adfd5 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -196,7 +196,8 @@ struct bpf_verifier_stack_elem {
#define BPF_PRIV_STACK_MIN_SIZE 64
-static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx);
+static int acquire_reference(struct bpf_verifier_env *env, int insn_idx);
+static int release_reference_nomark(struct bpf_verifier_state *state, int ref_obj_id);
static int release_reference(struct bpf_verifier_env *env, int ref_obj_id);
static void invalidate_non_owning_refs(struct bpf_verifier_env *env);
static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env);
@@ -286,6 +287,7 @@ struct bpf_call_arg_meta {
u32 ret_btf_id;
u32 subprogno;
struct btf_field *kptr_field;
+ s64 const_map_key;
};
struct bpf_kfunc_call_arg_meta {
@@ -641,6 +643,11 @@ static int iter_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
return stack_slot_obj_get_spi(env, reg, "iter", nr_slots);
}
+static int irq_flag_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+{
+ return stack_slot_obj_get_spi(env, reg, "irq_flag", 1);
+}
+
static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type)
{
switch (arg_type & DYNPTR_TYPE_FLAG_MASK) {
@@ -752,7 +759,7 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_
if (clone_ref_obj_id)
id = clone_ref_obj_id;
else
- id = acquire_reference_state(env, insn_idx);
+ id = acquire_reference(env, insn_idx);
if (id < 0)
return id;
@@ -1014,7 +1021,7 @@ static int mark_stack_slots_iter(struct bpf_verifier_env *env,
if (spi < 0)
return spi;
- id = acquire_reference_state(env, insn_idx);
+ id = acquire_reference(env, insn_idx);
if (id < 0)
return id;
@@ -1136,10 +1143,136 @@ static int is_iter_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_s
return 0;
}
+static int acquire_irq_state(struct bpf_verifier_env *env, int insn_idx);
+static int release_irq_state(struct bpf_verifier_state *state, int id);
+
+static int mark_stack_slot_irq_flag(struct bpf_verifier_env *env,
+ struct bpf_kfunc_call_arg_meta *meta,
+ struct bpf_reg_state *reg, int insn_idx)
+{
+ struct bpf_func_state *state = func(env, reg);
+ struct bpf_stack_state *slot;
+ struct bpf_reg_state *st;
+ int spi, i, id;
+
+ spi = irq_flag_get_spi(env, reg);
+ if (spi < 0)
+ return spi;
+
+ id = acquire_irq_state(env, insn_idx);
+ if (id < 0)
+ return id;
+
+ slot = &state->stack[spi];
+ st = &slot->spilled_ptr;
+
+ __mark_reg_known_zero(st);
+ st->type = PTR_TO_STACK; /* we don't have dedicated reg type */
+ st->live |= REG_LIVE_WRITTEN;
+ st->ref_obj_id = id;
+
+ for (i = 0; i < BPF_REG_SIZE; i++)
+ slot->slot_type[i] = STACK_IRQ_FLAG;
+
+ mark_stack_slot_scratched(env, spi);
+ return 0;
+}
+
+static int unmark_stack_slot_irq_flag(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+{
+ struct bpf_func_state *state = func(env, reg);
+ struct bpf_stack_state *slot;
+ struct bpf_reg_state *st;
+ int spi, i, err;
+
+ spi = irq_flag_get_spi(env, reg);
+ if (spi < 0)
+ return spi;
+
+ slot = &state->stack[spi];
+ st = &slot->spilled_ptr;
+
+ err = release_irq_state(env->cur_state, st->ref_obj_id);
+ WARN_ON_ONCE(err && err != -EACCES);
+ if (err) {
+ int insn_idx = 0;
+
+ for (int i = 0; i < env->cur_state->acquired_refs; i++) {
+ if (env->cur_state->refs[i].id == env->cur_state->active_irq_id) {
+ insn_idx = env->cur_state->refs[i].insn_idx;
+ break;
+ }
+ }
+
+ verbose(env, "cannot restore irq state out of order, expected id=%d acquired at insn_idx=%d\n",
+ env->cur_state->active_irq_id, insn_idx);
+ return err;
+ }
+
+ __mark_reg_not_init(env, st);
+
+ /* see unmark_stack_slots_dynptr() for why we need to set REG_LIVE_WRITTEN */
+ st->live |= REG_LIVE_WRITTEN;
+
+ for (i = 0; i < BPF_REG_SIZE; i++)
+ slot->slot_type[i] = STACK_INVALID;
+
+ mark_stack_slot_scratched(env, spi);
+ return 0;
+}
+
+static bool is_irq_flag_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+{
+ struct bpf_func_state *state = func(env, reg);
+ struct bpf_stack_state *slot;
+ int spi, i;
+
+ /* For -ERANGE (i.e. spi not falling into allocated stack slots), we
+ * will do check_mem_access to check and update stack bounds later, so
+ * return true for that case.
+ */
+ spi = irq_flag_get_spi(env, reg);
+ if (spi == -ERANGE)
+ return true;
+ if (spi < 0)
+ return false;
+
+ slot = &state->stack[spi];
+
+ for (i = 0; i < BPF_REG_SIZE; i++)
+ if (slot->slot_type[i] == STACK_IRQ_FLAG)
+ return false;
+ return true;
+}
+
+static int is_irq_flag_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+{
+ struct bpf_func_state *state = func(env, reg);
+ struct bpf_stack_state *slot;
+ struct bpf_reg_state *st;
+ int spi, i;
+
+ spi = irq_flag_get_spi(env, reg);
+ if (spi < 0)
+ return -EINVAL;
+
+ slot = &state->stack[spi];
+ st = &slot->spilled_ptr;
+
+ if (!st->ref_obj_id)
+ return -EINVAL;
+
+ for (i = 0; i < BPF_REG_SIZE; i++)
+ if (slot->slot_type[i] != STACK_IRQ_FLAG)
+ return -EINVAL;
+ return 0;
+}
+
/* Check if given stack slot is "special":
* - spilled register state (STACK_SPILL);
* - dynptr state (STACK_DYNPTR);
* - iter state (STACK_ITER).
+ * - irq flag state (STACK_IRQ_FLAG)
*/
static bool is_stack_slot_special(const struct bpf_stack_state *stack)
{
@@ -1149,6 +1282,7 @@ static bool is_stack_slot_special(const struct bpf_stack_state *stack)
case STACK_SPILL:
case STACK_DYNPTR:
case STACK_ITER:
+ case STACK_IRQ_FLAG:
return true;
case STACK_INVALID:
case STACK_MISC:
@@ -1263,15 +1397,18 @@ out:
return arr ? arr : ZERO_SIZE_PTR;
}
-static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
+static int copy_reference_state(struct bpf_verifier_state *dst, const struct bpf_verifier_state *src)
{
dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs,
sizeof(struct bpf_reference_state), GFP_KERNEL);
if (!dst->refs)
return -ENOMEM;
- dst->active_locks = src->active_locks;
dst->acquired_refs = src->acquired_refs;
+ dst->active_locks = src->active_locks;
+ dst->active_preempt_locks = src->active_preempt_locks;
+ dst->active_rcu_lock = src->active_rcu_lock;
+ dst->active_irq_id = src->active_irq_id;
return 0;
}
@@ -1288,7 +1425,7 @@ static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_st
return 0;
}
-static int resize_reference_state(struct bpf_func_state *state, size_t n)
+static int resize_reference_state(struct bpf_verifier_state *state, size_t n)
{
state->refs = realloc_array(state->refs, state->acquired_refs, n,
sizeof(struct bpf_reference_state));
@@ -1331,94 +1468,128 @@ static int grow_stack_state(struct bpf_verifier_env *env, struct bpf_func_state
* On success, returns a valid pointer id to associate with the register
* On failure, returns a negative errno.
*/
-static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
+static struct bpf_reference_state *acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
{
- struct bpf_func_state *state = cur_func(env);
+ struct bpf_verifier_state *state = env->cur_state;
int new_ofs = state->acquired_refs;
- int id, err;
+ int err;
err = resize_reference_state(state, state->acquired_refs + 1);
if (err)
- return err;
- id = ++env->id_gen;
- state->refs[new_ofs].type = REF_TYPE_PTR;
- state->refs[new_ofs].id = id;
+ return NULL;
state->refs[new_ofs].insn_idx = insn_idx;
- return id;
+ return &state->refs[new_ofs];
+}
+
+static int acquire_reference(struct bpf_verifier_env *env, int insn_idx)
+{
+ struct bpf_reference_state *s;
+
+ s = acquire_reference_state(env, insn_idx);
+ if (!s)
+ return -ENOMEM;
+ s->type = REF_TYPE_PTR;
+ s->id = ++env->id_gen;
+ return s->id;
}
static int acquire_lock_state(struct bpf_verifier_env *env, int insn_idx, enum ref_state_type type,
int id, void *ptr)
{
- struct bpf_func_state *state = cur_func(env);
- int new_ofs = state->acquired_refs;
- int err;
+ struct bpf_verifier_state *state = env->cur_state;
+ struct bpf_reference_state *s;
- err = resize_reference_state(state, state->acquired_refs + 1);
- if (err)
- return err;
- state->refs[new_ofs].type = type;
- state->refs[new_ofs].id = id;
- state->refs[new_ofs].insn_idx = insn_idx;
- state->refs[new_ofs].ptr = ptr;
+ s = acquire_reference_state(env, insn_idx);
+ s->type = type;
+ s->id = id;
+ s->ptr = ptr;
state->active_locks++;
return 0;
}
-/* release function corresponding to acquire_reference_state(). Idempotent. */
-static int release_reference_state(struct bpf_func_state *state, int ptr_id)
+static int acquire_irq_state(struct bpf_verifier_env *env, int insn_idx)
+{
+ struct bpf_verifier_state *state = env->cur_state;
+ struct bpf_reference_state *s;
+
+ s = acquire_reference_state(env, insn_idx);
+ if (!s)
+ return -ENOMEM;
+ s->type = REF_TYPE_IRQ;
+ s->id = ++env->id_gen;
+
+ state->active_irq_id = s->id;
+ return s->id;
+}
+
+static void release_reference_state(struct bpf_verifier_state *state, int idx)
{
- int i, last_idx;
+ int last_idx;
+ size_t rem;
+ /* IRQ state requires the relative ordering of elements remaining the
+ * same, since it relies on the refs array to behave as a stack, so that
+ * it can detect out-of-order IRQ restore. Hence use memmove to shift
+ * the array instead of swapping the final element into the deleted idx.
+ */
last_idx = state->acquired_refs - 1;
+ rem = state->acquired_refs - idx - 1;
+ if (last_idx && idx != last_idx)
+ memmove(&state->refs[idx], &state->refs[idx + 1], sizeof(*state->refs) * rem);
+ memset(&state->refs[last_idx], 0, sizeof(*state->refs));
+ state->acquired_refs--;
+ return;
+}
+
+static int release_lock_state(struct bpf_verifier_state *state, int type, int id, void *ptr)
+{
+ int i;
+
for (i = 0; i < state->acquired_refs; i++) {
- if (state->refs[i].type != REF_TYPE_PTR)
+ if (state->refs[i].type != type)
continue;
- if (state->refs[i].id == ptr_id) {
- if (last_idx && i != last_idx)
- memcpy(&state->refs[i], &state->refs[last_idx],
- sizeof(*state->refs));
- memset(&state->refs[last_idx], 0, sizeof(*state->refs));
- state->acquired_refs--;
+ if (state->refs[i].id == id && state->refs[i].ptr == ptr) {
+ release_reference_state(state, i);
+ state->active_locks--;
return 0;
}
}
return -EINVAL;
}
-static int release_lock_state(struct bpf_func_state *state, int type, int id, void *ptr)
+static int release_irq_state(struct bpf_verifier_state *state, int id)
{
- int i, last_idx;
+ u32 prev_id = 0;
+ int i;
+
+ if (id != state->active_irq_id)
+ return -EACCES;
- last_idx = state->acquired_refs - 1;
for (i = 0; i < state->acquired_refs; i++) {
- if (state->refs[i].type != type)
+ if (state->refs[i].type != REF_TYPE_IRQ)
continue;
- if (state->refs[i].id == id && state->refs[i].ptr == ptr) {
- if (last_idx && i != last_idx)
- memcpy(&state->refs[i], &state->refs[last_idx],
- sizeof(*state->refs));
- memset(&state->refs[last_idx], 0, sizeof(*state->refs));
- state->acquired_refs--;
- state->active_locks--;
+ if (state->refs[i].id == id) {
+ release_reference_state(state, i);
+ state->active_irq_id = prev_id;
return 0;
+ } else {
+ prev_id = state->refs[i].id;
}
}
return -EINVAL;
}
-static struct bpf_reference_state *find_lock_state(struct bpf_verifier_env *env, enum ref_state_type type,
+static struct bpf_reference_state *find_lock_state(struct bpf_verifier_state *state, enum ref_state_type type,
int id, void *ptr)
{
- struct bpf_func_state *state = cur_func(env);
int i;
for (i = 0; i < state->acquired_refs; i++) {
struct bpf_reference_state *s = &state->refs[i];
- if (s->type == REF_TYPE_PTR || s->type != type)
+ if (s->type != type)
continue;
if (s->id == id && s->ptr == ptr)
@@ -1431,7 +1602,6 @@ static void free_func_state(struct bpf_func_state *state)
{
if (!state)
return;
- kfree(state->refs);
kfree(state->stack);
kfree(state);
}
@@ -1445,6 +1615,7 @@ static void free_verifier_state(struct bpf_verifier_state *state,
free_func_state(state->frame[i]);
state->frame[i] = NULL;
}
+ kfree(state->refs);
if (free_self)
kfree(state);
}
@@ -1455,12 +1626,7 @@ static void free_verifier_state(struct bpf_verifier_state *state,
static int copy_func_state(struct bpf_func_state *dst,
const struct bpf_func_state *src)
{
- int err;
-
- memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
- err = copy_reference_state(dst, src);
- if (err)
- return err;
+ memcpy(dst, src, offsetof(struct bpf_func_state, stack));
return copy_stack_state(dst, src);
}
@@ -1477,9 +1643,10 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
free_func_state(dst_state->frame[i]);
dst_state->frame[i] = NULL;
}
+ err = copy_reference_state(dst_state, src);
+ if (err)
+ return err;
dst_state->speculative = src->speculative;
- dst_state->active_rcu_lock = src->active_rcu_lock;
- dst_state->active_preempt_lock = src->active_preempt_lock;
dst_state->in_sleepable = src->in_sleepable;
dst_state->curframe = src->curframe;
dst_state->branches = src->branches;
@@ -3206,10 +3373,27 @@ static int mark_reg_read(struct bpf_verifier_env *env,
return 0;
}
-static int mark_dynptr_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+static int mark_stack_slot_obj_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
+ int spi, int nr_slots)
{
struct bpf_func_state *state = func(env, reg);
- int spi, ret;
+ int err, i;
+
+ for (i = 0; i < nr_slots; i++) {
+ struct bpf_reg_state *st = &state->stack[spi - i].spilled_ptr;
+
+ err = mark_reg_read(env, st, st->parent, REG_LIVE_READ64);
+ if (err)
+ return err;
+
+ mark_stack_slot_scratched(env, spi - i);
+ }
+ return 0;
+}
+
+static int mark_dynptr_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+{
+ int spi;
/* For CONST_PTR_TO_DYNPTR, it must have already been done by
* check_reg_arg in check_helper_call and mark_btf_func_reg_size in
@@ -3224,31 +3408,23 @@ static int mark_dynptr_read(struct bpf_verifier_env *env, struct bpf_reg_state *
* bounds and spi is the first dynptr slot. Simply mark stack slot as
* read.
*/
- ret = mark_reg_read(env, &state->stack[spi].spilled_ptr,
- state->stack[spi].spilled_ptr.parent, REG_LIVE_READ64);
- if (ret)
- return ret;
- return mark_reg_read(env, &state->stack[spi - 1].spilled_ptr,
- state->stack[spi - 1].spilled_ptr.parent, REG_LIVE_READ64);
+ return mark_stack_slot_obj_read(env, reg, spi, BPF_DYNPTR_NR_SLOTS);
}
static int mark_iter_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
int spi, int nr_slots)
{
- struct bpf_func_state *state = func(env, reg);
- int err, i;
-
- for (i = 0; i < nr_slots; i++) {
- struct bpf_reg_state *st = &state->stack[spi - i].spilled_ptr;
-
- err = mark_reg_read(env, st, st->parent, REG_LIVE_READ64);
- if (err)
- return err;
+ return mark_stack_slot_obj_read(env, reg, spi, nr_slots);
+}
- mark_stack_slot_scratched(env, spi - i);
- }
+static int mark_irq_flag_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+{
+ int spi;
- return 0;
+ spi = irq_flag_get_spi(env, reg);
+ if (spi < 0)
+ return spi;
+ return mark_stack_slot_obj_read(env, reg, spi, 1);
}
/* This function is supposed to be used by the following 32-bit optimization
@@ -4503,7 +4679,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN,
bt_frame_stack_mask(bt, fr));
verbose(env, "stack=%s: ", env->tmp_str_buf);
- print_verifier_state(env, func, true);
+ print_verifier_state(env, st, fr, true);
}
}
@@ -5128,7 +5304,7 @@ enum bpf_access_src {
static int check_stack_range_initialized(struct bpf_verifier_env *env,
int regno, int off, int access_size,
bool zero_size_allowed,
- enum bpf_access_src type,
+ enum bpf_access_type type,
struct bpf_call_arg_meta *meta);
static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
@@ -5161,7 +5337,7 @@ static int check_stack_read_var_off(struct bpf_verifier_env *env,
/* Note that we pass a NULL meta, so raw access will not be permitted.
*/
err = check_stack_range_initialized(env, ptr_regno, off, size,
- false, ACCESS_DIRECT, NULL);
+ false, BPF_READ, NULL);
if (err)
return err;
@@ -5501,13 +5677,15 @@ static bool in_sleepable(struct bpf_verifier_env *env)
static bool in_rcu_cs(struct bpf_verifier_env *env)
{
return env->cur_state->active_rcu_lock ||
- cur_func(env)->active_locks ||
+ env->cur_state->active_locks ||
!in_sleepable(env);
}
/* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */
BTF_SET_START(rcu_protected_types)
+#ifdef CONFIG_NET
BTF_ID(struct, prog_test_ref_kfunc)
+#endif
#ifdef CONFIG_CGROUPS
BTF_ID(struct, cgroup)
#endif
@@ -5515,7 +5693,9 @@ BTF_ID(struct, cgroup)
BTF_ID(struct, bpf_cpumask)
#endif
BTF_ID(struct, task_struct)
+#ifdef CONFIG_CRYPTO
BTF_ID(struct, bpf_crypto_ctx)
+#endif
BTF_SET_END(rcu_protected_types)
static bool rcu_protected_object(const struct btf *btf, u32 btf_id)
@@ -7011,7 +7191,7 @@ static int check_stack_slot_within_bounds(struct bpf_verifier_env *env,
static int check_stack_access_within_bounds(
struct bpf_verifier_env *env,
int regno, int off, int access_size,
- enum bpf_access_src src, enum bpf_access_type type)
+ enum bpf_access_type type)
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = regs + regno;
@@ -7020,10 +7200,7 @@ static int check_stack_access_within_bounds(
int err;
char *err_extra;
- if (src == ACCESS_HELPER)
- /* We don't know if helpers are reading or writing (or both). */
- err_extra = " indirect access to";
- else if (type == BPF_READ)
+ if (type == BPF_READ)
err_extra = " read from";
else
err_extra = " write to";
@@ -7241,7 +7418,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
} else if (reg->type == PTR_TO_STACK) {
/* Basic bounds checks. */
- err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t);
+ err = check_stack_access_within_bounds(env, regno, off, size, t);
if (err)
return err;
@@ -7461,13 +7638,11 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
static int check_stack_range_initialized(
struct bpf_verifier_env *env, int regno, int off,
int access_size, bool zero_size_allowed,
- enum bpf_access_src type, struct bpf_call_arg_meta *meta)
+ enum bpf_access_type type, struct bpf_call_arg_meta *meta)
{
struct bpf_reg_state *reg = reg_state(env, regno);
struct bpf_func_state *state = func(env, reg);
int err, min_off, max_off, i, j, slot, spi;
- char *err_extra = type == ACCESS_HELPER ? " indirect" : "";
- enum bpf_access_type bounds_check_type;
/* Some accesses can write anything into the stack, others are
* read-only.
*/
@@ -7478,18 +7653,10 @@ static int check_stack_range_initialized(
return -EACCES;
}
- if (type == ACCESS_HELPER) {
- /* The bounds checks for writes are more permissive than for
- * reads. However, if raw_mode is not set, we'll do extra
- * checks below.
- */
- bounds_check_type = BPF_WRITE;
+ if (type == BPF_WRITE)
clobber = true;
- } else {
- bounds_check_type = BPF_READ;
- }
- err = check_stack_access_within_bounds(env, regno, off, access_size,
- type, bounds_check_type);
+
+ err = check_stack_access_within_bounds(env, regno, off, access_size, type);
if (err)
return err;
@@ -7506,8 +7673,8 @@ static int check_stack_range_initialized(
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
- verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n",
- regno, err_extra, tn_buf);
+ verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n",
+ regno, tn_buf);
return -EACCES;
}
/* Only initialized buffer on stack is allowed to be accessed
@@ -7560,7 +7727,7 @@ static int check_stack_range_initialized(
slot = -i - 1;
spi = slot / BPF_REG_SIZE;
if (state->allocated_stack <= slot) {
- verbose(env, "verifier bug: allocated_stack too small");
+ verbose(env, "verifier bug: allocated_stack too small\n");
return -EFAULT;
}
@@ -7588,14 +7755,14 @@ static int check_stack_range_initialized(
}
if (tnum_is_const(reg->var_off)) {
- verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n",
- err_extra, regno, min_off, i - min_off, access_size);
+ verbose(env, "invalid read from stack R%d off %d+%d size %d\n",
+ regno, min_off, i - min_off, access_size);
} else {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
- verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n",
- err_extra, regno, tn_buf, i - min_off, access_size);
+ verbose(env, "invalid read from stack R%d var_off %s+%d size %d\n",
+ regno, tn_buf, i - min_off, access_size);
}
return -EACCES;
mark:
@@ -7670,7 +7837,7 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
return check_stack_range_initialized(
env,
regno, reg->off, access_size,
- zero_size_allowed, ACCESS_HELPER, meta);
+ zero_size_allowed, access_type, meta);
case PTR_TO_BTF_ID:
return check_ptr_to_btf_access(env, regs, regno, reg->off,
access_size, BPF_READ, -1);
@@ -7835,15 +8002,15 @@ static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg
* Since only one bpf_spin_lock is allowed the checks are simpler than
* reg_is_refcounted() logic. The verifier needs to remember only
* one spin_lock instead of array of acquired_refs.
- * cur_func(env)->active_locks remembers which map value element or allocated
+ * env->cur_state->active_locks remembers which map value element or allocated
* object got locked and clears it after bpf_spin_unlock.
*/
static int process_spin_lock(struct bpf_verifier_env *env, int regno,
bool is_lock)
{
struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
+ struct bpf_verifier_state *cur = env->cur_state;
bool is_const = tnum_is_const(reg->var_off);
- struct bpf_func_state *cur = cur_func(env);
u64 val = reg->var_off.value;
struct bpf_map *map = NULL;
struct btf *btf = NULL;
@@ -7910,7 +8077,7 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno,
return -EINVAL;
}
- if (release_lock_state(cur_func(env), REF_TYPE_LOCK, reg->id, ptr)) {
+ if (release_lock_state(env->cur_state, REF_TYPE_LOCK, reg->id, ptr)) {
verbose(env, "bpf_spin_unlock of different lock\n");
return -EINVAL;
}
@@ -8982,6 +9149,63 @@ static int check_reg_const_str(struct bpf_verifier_env *env,
return 0;
}
+/* Returns constant key value if possible, else negative error */
+static s64 get_constant_map_key(struct bpf_verifier_env *env,
+ struct bpf_reg_state *key,
+ u32 key_size)
+{
+ struct bpf_func_state *state = func(env, key);
+ struct bpf_reg_state *reg;
+ int slot, spi, off;
+ int spill_size = 0;
+ int zero_size = 0;
+ int stack_off;
+ int i, err;
+ u8 *stype;
+
+ if (!env->bpf_capable)
+ return -EOPNOTSUPP;
+ if (key->type != PTR_TO_STACK)
+ return -EOPNOTSUPP;
+ if (!tnum_is_const(key->var_off))
+ return -EOPNOTSUPP;
+
+ stack_off = key->off + key->var_off.value;
+ slot = -stack_off - 1;
+ spi = slot / BPF_REG_SIZE;
+ off = slot % BPF_REG_SIZE;
+ stype = state->stack[spi].slot_type;
+
+ /* First handle precisely tracked STACK_ZERO */
+ for (i = off; i >= 0 && stype[i] == STACK_ZERO; i--)
+ zero_size++;
+ if (zero_size >= key_size)
+ return 0;
+
+ /* Check that stack contains a scalar spill of expected size */
+ if (!is_spilled_scalar_reg(&state->stack[spi]))
+ return -EOPNOTSUPP;
+ for (i = off; i >= 0 && stype[i] == STACK_SPILL; i--)
+ spill_size++;
+ if (spill_size != key_size)
+ return -EOPNOTSUPP;
+
+ reg = &state->stack[spi].spilled_ptr;
+ if (!tnum_is_const(reg->var_off))
+ /* Stack value not statically known */
+ return -EOPNOTSUPP;
+
+ /* We are relying on a constant value. So mark as precise
+ * to prevent pruning on it.
+ */
+ bt_set_frame_slot(&env->bt, key->frameno, spi);
+ err = mark_chain_precision_batch(env);
+ if (err < 0)
+ return err;
+
+ return reg->var_off.value;
+}
+
static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
struct bpf_call_arg_meta *meta,
const struct bpf_func_proto *fn,
@@ -8992,6 +9216,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
enum bpf_arg_type arg_type = fn->arg_type[arg];
enum bpf_reg_type type = reg->type;
u32 *arg_btf_id = NULL;
+ u32 key_size;
int err = 0;
if (arg_type == ARG_DONTCARE)
@@ -9125,8 +9350,13 @@ skip_type_check:
verbose(env, "invalid map_ptr to access map->key\n");
return -EACCES;
}
- err = check_helper_mem_access(env, regno, meta->map_ptr->key_size,
- BPF_READ, false, NULL);
+ key_size = meta->map_ptr->key_size;
+ err = check_helper_mem_access(env, regno, key_size, BPF_READ, false, NULL);
+ if (err)
+ return err;
+ meta->const_map_key = get_constant_map_key(env, reg, key_size);
+ if (meta->const_map_key < 0 && meta->const_map_key != -EOPNOTSUPP)
+ return meta->const_map_key;
break;
case ARG_PTR_TO_MAP_VALUE:
if (type_may_be_null(arg_type) && register_is_null(reg))
@@ -9658,21 +9888,38 @@ static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range
reg->range = AT_PKT_END;
}
+static int release_reference_nomark(struct bpf_verifier_state *state, int ref_obj_id)
+{
+ int i;
+
+ for (i = 0; i < state->acquired_refs; i++) {
+ if (state->refs[i].type != REF_TYPE_PTR)
+ continue;
+ if (state->refs[i].id == ref_obj_id) {
+ release_reference_state(state, i);
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
/* The pointer with the specified id has released its reference to kernel
* resources. Identify all copies of the same pointer and clear the reference.
+ *
+ * This is the release function corresponding to acquire_reference(). Idempotent.
*/
-static int release_reference(struct bpf_verifier_env *env,
- int ref_obj_id)
+static int release_reference(struct bpf_verifier_env *env, int ref_obj_id)
{
+ struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_func_state *state;
struct bpf_reg_state *reg;
int err;
- err = release_reference_state(cur_func(env), ref_obj_id);
+ err = release_reference_nomark(vstate, ref_obj_id);
if (err)
return err;
- bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
+ bpf_for_each_reg_in_vstate(vstate, state, reg, ({
if (reg->ref_obj_id == ref_obj_id)
mark_reg_invalid(env, reg);
}));
@@ -9746,9 +9993,7 @@ static int setup_func_entry(struct bpf_verifier_env *env, int subprog, int calls
callsite,
state->curframe + 1 /* frameno within this callchain */,
subprog /* subprog number within this prog */);
- /* Transfer references to the callee */
- err = copy_reference_state(callee, caller);
- err = err ?: set_callee_state_cb(env, caller, callee, callsite);
+ err = set_callee_state_cb(env, caller, callee, callsite);
if (err)
goto err_out;
@@ -9978,19 +10223,25 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
const char *sub_name = subprog_name(env, subprog);
/* Only global subprogs cannot be called with a lock held. */
- if (cur_func(env)->active_locks) {
+ if (env->cur_state->active_locks) {
verbose(env, "global function calls are not allowed while holding a lock,\n"
"use static function instead\n");
return -EINVAL;
}
/* Only global subprogs cannot be called with preemption disabled. */
- if (env->cur_state->active_preempt_lock) {
+ if (env->cur_state->active_preempt_locks) {
verbose(env, "global function calls are not allowed with preemption disabled,\n"
"use static function instead\n");
return -EINVAL;
}
+ if (env->cur_state->active_irq_id) {
+ verbose(env, "global function calls are not allowed with IRQs disabled,\n"
+ "use static function instead\n");
+ return -EINVAL;
+ }
+
if (err) {
verbose(env, "Caller passes invalid args into func#%d ('%s')\n",
subprog, sub_name);
@@ -10027,9 +10278,9 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
if (env->log.level & BPF_LOG_LEVEL) {
verbose(env, "caller:\n");
- print_verifier_state(env, caller, true);
+ print_verifier_state(env, state, caller->frameno, true);
verbose(env, "callee:\n");
- print_verifier_state(env, state->frame[state->curframe], true);
+ print_verifier_state(env, state, state->curframe, true);
}
return 0;
@@ -10321,11 +10572,6 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
caller->regs[BPF_REG_0] = *r0;
}
- /* Transfer references to the caller */
- err = copy_reference_state(caller, callee);
- if (err)
- return err;
-
/* for callbacks like bpf_loop or bpf_for_each_map_elem go back to callsite,
* there function call logic would reschedule callback visit. If iteration
* converges is_state_visited() would prune that visit eventually.
@@ -10338,9 +10584,9 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
if (env->log.level & BPF_LOG_LEVEL) {
verbose(env, "returning from callee:\n");
- print_verifier_state(env, callee, true);
+ print_verifier_state(env, state, callee->frameno, true);
verbose(env, "to caller at %d:\n", *insn_idx);
- print_verifier_state(env, caller, true);
+ print_verifier_state(env, state, caller->frameno, true);
}
/* clear everything in the callee. In case of exceptional exits using
* bpf_throw, this will be done by copy_verifier_state for extra frames. */
@@ -10490,11 +10736,11 @@ record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
static int check_reference_leak(struct bpf_verifier_env *env, bool exception_exit)
{
- struct bpf_func_state *state = cur_func(env);
+ struct bpf_verifier_state *state = env->cur_state;
bool refs_lingering = false;
int i;
- if (!exception_exit && state->frameno)
+ if (!exception_exit && cur_func(env)->frameno)
return 0;
for (i = 0; i < state->acquired_refs; i++) {
@@ -10511,7 +10757,7 @@ static int check_resource_leak(struct bpf_verifier_env *env, bool exception_exit
{
int err;
- if (check_lock && cur_func(env)->active_locks) {
+ if (check_lock && env->cur_state->active_locks) {
verbose(env, "%s cannot be used inside bpf_spin_lock-ed region\n", prefix);
return -EINVAL;
}
@@ -10522,12 +10768,17 @@ static int check_resource_leak(struct bpf_verifier_env *env, bool exception_exit
return err;
}
+ if (check_lock && env->cur_state->active_irq_id) {
+ verbose(env, "%s cannot be used inside bpf_local_irq_save-ed region\n", prefix);
+ return -EINVAL;
+ }
+
if (check_lock && env->cur_state->active_rcu_lock) {
verbose(env, "%s cannot be used inside bpf_rcu_read_lock-ed region\n", prefix);
return -EINVAL;
}
- if (check_lock && env->cur_state->active_preempt_lock) {
+ if (check_lock && env->cur_state->active_preempt_locks) {
verbose(env, "%s cannot be used inside bpf_preempt_disable-ed region\n", prefix);
return -EINVAL;
}
@@ -10629,6 +10880,21 @@ static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno
state->callback_subprogno == subprogno);
}
+/* Returns whether or not the given map type can potentially elide
+ * lookup return value nullness check. This is possible if the key
+ * is statically known.
+ */
+static bool can_elide_value_nullness(enum bpf_map_type type)
+{
+ switch (type) {
+ case BPF_MAP_TYPE_ARRAY:
+ case BPF_MAP_TYPE_PERCPU_ARRAY:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int get_helper_proto(struct bpf_verifier_env *env, int func_id,
const struct bpf_func_proto **ptr)
{
@@ -10715,7 +10981,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
}
- if (env->cur_state->active_preempt_lock) {
+ if (env->cur_state->active_preempt_locks) {
if (fn->might_sleep) {
verbose(env, "sleepable helper %s#%d in non-preemptible region\n",
func_id_name(func_id), func_id);
@@ -10726,6 +10992,17 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
}
+ if (env->cur_state->active_irq_id) {
+ if (fn->might_sleep) {
+ verbose(env, "sleepable helper %s#%d in IRQ-disabled region\n",
+ func_id_name(func_id), func_id);
+ return -EINVAL;
+ }
+
+ if (in_sleepable(env) && is_storage_get_function(func_id))
+ env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
+ }
+
meta.func_id = func_id;
/* check args */
for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
@@ -10772,7 +11049,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
struct bpf_func_state *state;
struct bpf_reg_state *reg;
- err = release_reference_state(cur_func(env), ref_obj_id);
+ err = release_reference_nomark(env->cur_state, ref_obj_id);
if (!err) {
bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
if (reg->ref_obj_id == ref_obj_id) {
@@ -10984,10 +11261,17 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
"kernel subsystem misconfigured verifier\n");
return -EINVAL;
}
+
+ if (func_id == BPF_FUNC_map_lookup_elem &&
+ can_elide_value_nullness(meta.map_ptr->map_type) &&
+ meta.const_map_key >= 0 &&
+ meta.const_map_key < meta.map_ptr->max_entries)
+ ret_flag &= ~PTR_MAYBE_NULL;
+
regs[BPF_REG_0].map_ptr = meta.map_ptr;
regs[BPF_REG_0].map_uid = meta.map_uid;
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag;
- if (!type_may_be_null(ret_type) &&
+ if (!type_may_be_null(ret_flag) &&
btf_record_has_field(meta.map_ptr->record, BPF_SPIN_LOCK)) {
regs[BPF_REG_0].id = ++env->id_gen;
}
@@ -11105,7 +11389,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
/* For release_reference() */
regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
} else if (is_acquire_function(func_id, meta.map_ptr)) {
- int id = acquire_reference_state(env, insn_idx);
+ int id = acquire_reference(env, insn_idx);
if (id < 0)
return id;
@@ -11287,6 +11571,11 @@ static bool is_kfunc_arg_const_str(const struct btf *btf, const struct btf_param
return btf_param_match_suffix(btf, arg, "__str");
}
+static bool is_kfunc_arg_irq_flag(const struct btf *btf, const struct btf_param *arg)
+{
+ return btf_param_match_suffix(btf, arg, "__irq_flag");
+}
+
static bool is_kfunc_arg_scalar_with_name(const struct btf *btf,
const struct btf_param *arg,
const char *name)
@@ -11440,6 +11729,7 @@ enum kfunc_ptr_arg_type {
KF_ARG_PTR_TO_CONST_STR,
KF_ARG_PTR_TO_MAP,
KF_ARG_PTR_TO_WORKQUEUE,
+ KF_ARG_PTR_TO_IRQ_FLAG,
};
enum special_kfunc_type {
@@ -11471,6 +11761,11 @@ enum special_kfunc_type {
KF_bpf_iter_css_task_new,
KF_bpf_session_cookie,
KF_bpf_get_kmem_cache,
+ KF_bpf_local_irq_save,
+ KF_bpf_local_irq_restore,
+ KF_bpf_iter_num_new,
+ KF_bpf_iter_num_next,
+ KF_bpf_iter_num_destroy,
};
BTF_SET_START(special_kfunc_set)
@@ -11486,8 +11781,10 @@ BTF_ID(func, bpf_rdonly_cast)
BTF_ID(func, bpf_rbtree_remove)
BTF_ID(func, bpf_rbtree_add_impl)
BTF_ID(func, bpf_rbtree_first)
+#ifdef CONFIG_NET
BTF_ID(func, bpf_dynptr_from_skb)
BTF_ID(func, bpf_dynptr_from_xdp)
+#endif
BTF_ID(func, bpf_dynptr_slice)
BTF_ID(func, bpf_dynptr_slice_rdwr)
BTF_ID(func, bpf_dynptr_clone)
@@ -11515,8 +11812,13 @@ BTF_ID(func, bpf_rcu_read_unlock)
BTF_ID(func, bpf_rbtree_remove)
BTF_ID(func, bpf_rbtree_add_impl)
BTF_ID(func, bpf_rbtree_first)
+#ifdef CONFIG_NET
BTF_ID(func, bpf_dynptr_from_skb)
BTF_ID(func, bpf_dynptr_from_xdp)
+#else
+BTF_ID_UNUSED
+BTF_ID_UNUSED
+#endif
BTF_ID(func, bpf_dynptr_slice)
BTF_ID(func, bpf_dynptr_slice_rdwr)
BTF_ID(func, bpf_dynptr_clone)
@@ -11537,6 +11839,11 @@ BTF_ID(func, bpf_session_cookie)
BTF_ID_UNUSED
#endif
BTF_ID(func, bpf_get_kmem_cache)
+BTF_ID(func, bpf_local_irq_save)
+BTF_ID(func, bpf_local_irq_restore)
+BTF_ID(func, bpf_iter_num_new)
+BTF_ID(func, bpf_iter_num_next)
+BTF_ID(func, bpf_iter_num_destroy)
static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
{
@@ -11627,6 +11934,9 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
if (is_kfunc_arg_wq(meta->btf, &args[argno]))
return KF_ARG_PTR_TO_WORKQUEUE;
+ if (is_kfunc_arg_irq_flag(meta->btf, &args[argno]))
+ return KF_ARG_PTR_TO_IRQ_FLAG;
+
if ((base_type(reg->type) == PTR_TO_BTF_ID || reg2btf_ids[base_type(reg->type)])) {
if (!btf_type_is_struct(ref_t)) {
verbose(env, "kernel function %s args#%d pointer type %s %s is not supported\n",
@@ -11730,11 +12040,59 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
return 0;
}
+static int process_irq_flag(struct bpf_verifier_env *env, int regno,
+ struct bpf_kfunc_call_arg_meta *meta)
+{
+ struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
+ bool irq_save;
+ int err;
+
+ if (meta->func_id == special_kfunc_list[KF_bpf_local_irq_save]) {
+ irq_save = true;
+ } else if (meta->func_id == special_kfunc_list[KF_bpf_local_irq_restore]) {
+ irq_save = false;
+ } else {
+ verbose(env, "verifier internal error: unknown irq flags kfunc\n");
+ return -EFAULT;
+ }
+
+ if (irq_save) {
+ if (!is_irq_flag_reg_valid_uninit(env, reg)) {
+ verbose(env, "expected uninitialized irq flag as arg#%d\n", regno - 1);
+ return -EINVAL;
+ }
+
+ err = check_mem_access(env, env->insn_idx, regno, 0, BPF_DW, BPF_WRITE, -1, false, false);
+ if (err)
+ return err;
+
+ err = mark_stack_slot_irq_flag(env, meta, reg, env->insn_idx);
+ if (err)
+ return err;
+ } else {
+ err = is_irq_flag_reg_valid_init(env, reg);
+ if (err) {
+ verbose(env, "expected an initialized irq flag as arg#%d\n", regno - 1);
+ return err;
+ }
+
+ err = mark_irq_flag_read(env, reg);
+ if (err)
+ return err;
+
+ err = unmark_stack_slot_irq_flag(env, reg);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+
static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{
struct btf_record *rec = reg_btf_record(reg);
- if (!cur_func(env)->active_locks) {
+ if (!env->cur_state->active_locks) {
verbose(env, "verifier internal error: ref_set_non_owning w/o active lock\n");
return -EFAULT;
}
@@ -11753,12 +12111,11 @@ static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state
static int ref_convert_owning_non_owning(struct bpf_verifier_env *env, u32 ref_obj_id)
{
- struct bpf_func_state *state, *unused;
+ struct bpf_verifier_state *state = env->cur_state;
+ struct bpf_func_state *unused;
struct bpf_reg_state *reg;
int i;
- state = cur_func(env);
-
if (!ref_obj_id) {
verbose(env, "verifier internal error: ref_obj_id is zero for "
"owning -> non-owning conversion\n");
@@ -11848,9 +12205,9 @@ static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_
}
id = reg->id;
- if (!cur_func(env)->active_locks)
+ if (!env->cur_state->active_locks)
return -EINVAL;
- s = find_lock_state(env, REF_TYPE_LOCK, id, ptr);
+ s = find_lock_state(env->cur_state, REF_TYPE_LOCK, id, ptr);
if (!s) {
verbose(env, "held lock and object are not in the same allocation\n");
return -EINVAL;
@@ -11873,12 +12230,24 @@ static bool is_bpf_rbtree_api_kfunc(u32 btf_id)
btf_id == special_kfunc_list[KF_bpf_rbtree_first];
}
+static bool is_bpf_iter_num_api_kfunc(u32 btf_id)
+{
+ return btf_id == special_kfunc_list[KF_bpf_iter_num_new] ||
+ btf_id == special_kfunc_list[KF_bpf_iter_num_next] ||
+ btf_id == special_kfunc_list[KF_bpf_iter_num_destroy];
+}
+
static bool is_bpf_graph_api_kfunc(u32 btf_id)
{
return is_bpf_list_api_kfunc(btf_id) || is_bpf_rbtree_api_kfunc(btf_id) ||
btf_id == special_kfunc_list[KF_bpf_refcount_acquire_impl];
}
+static bool kfunc_spin_allowed(u32 btf_id)
+{
+ return is_bpf_graph_api_kfunc(btf_id) || is_bpf_iter_num_api_kfunc(btf_id);
+}
+
static bool is_sync_callback_calling_kfunc(u32 btf_id)
{
return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl];
@@ -12307,6 +12676,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
case KF_ARG_PTR_TO_REFCOUNTED_KPTR:
case KF_ARG_PTR_TO_CONST_STR:
case KF_ARG_PTR_TO_WORKQUEUE:
+ case KF_ARG_PTR_TO_IRQ_FLAG:
break;
default:
WARN_ON_ONCE(1);
@@ -12596,6 +12966,15 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
if (ret < 0)
return ret;
break;
+ case KF_ARG_PTR_TO_IRQ_FLAG:
+ if (reg->type != PTR_TO_STACK) {
+ verbose(env, "arg#%d doesn't point to an irq flag on stack\n", i);
+ return -EINVAL;
+ }
+ ret = process_irq_flag(env, regno, meta);
+ if (ret < 0)
+ return ret;
+ break;
}
}
@@ -12760,22 +13139,27 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
return -EINVAL;
}
- if (env->cur_state->active_preempt_lock) {
+ if (env->cur_state->active_preempt_locks) {
if (preempt_disable) {
- env->cur_state->active_preempt_lock++;
+ env->cur_state->active_preempt_locks++;
} else if (preempt_enable) {
- env->cur_state->active_preempt_lock--;
+ env->cur_state->active_preempt_locks--;
} else if (sleepable) {
verbose(env, "kernel func %s is sleepable within non-preemptible region\n", func_name);
return -EACCES;
}
} else if (preempt_disable) {
- env->cur_state->active_preempt_lock++;
+ env->cur_state->active_preempt_locks++;
} else if (preempt_enable) {
verbose(env, "unmatched attempt to enable preemption (kernel function %s)\n", func_name);
return -EINVAL;
}
+ if (env->cur_state->active_irq_id && sleepable) {
+ verbose(env, "kernel func %s is sleepable within IRQ-disabled region\n", func_name);
+ return -EACCES;
+ }
+
/* In case of release function, we get register number of refcounted
* PTR_TO_BTF_ID in bpf_kfunc_arg_meta, do the release now.
*/
@@ -13069,7 +13453,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
}
mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *));
if (is_kfunc_acquire(&meta)) {
- int id = acquire_reference_state(env, insn_idx);
+ int id = acquire_reference(env, insn_idx);
if (id < 0)
return id;
@@ -13794,64 +14178,56 @@ static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- s32 smin_val = src_reg->s32_min_value;
- u32 umin_val = src_reg->u32_min_value;
- u32 umax_val = src_reg->u32_max_value;
+ s32 *dst_smin = &dst_reg->s32_min_value;
+ s32 *dst_smax = &dst_reg->s32_max_value;
+ u32 *dst_umin = &dst_reg->u32_min_value;
+ u32 *dst_umax = &dst_reg->u32_max_value;
+ s32 tmp_prod[4];
- if (smin_val < 0 || dst_reg->s32_min_value < 0) {
- /* Ain't nobody got time to multiply that sign */
- __mark_reg32_unbounded(dst_reg);
- return;
- }
- /* Both values are positive, so we can work with unsigned and
- * copy the result to signed (unless it exceeds S32_MAX).
- */
- if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) {
- /* Potential overflow, we know nothing */
- __mark_reg32_unbounded(dst_reg);
- return;
+ if (check_mul_overflow(*dst_umax, src_reg->u32_max_value, dst_umax) ||
+ check_mul_overflow(*dst_umin, src_reg->u32_min_value, dst_umin)) {
+ /* Overflow possible, we know nothing */
+ *dst_umin = 0;
+ *dst_umax = U32_MAX;
}
- dst_reg->u32_min_value *= umin_val;
- dst_reg->u32_max_value *= umax_val;
- if (dst_reg->u32_max_value > S32_MAX) {
+ if (check_mul_overflow(*dst_smin, src_reg->s32_min_value, &tmp_prod[0]) ||
+ check_mul_overflow(*dst_smin, src_reg->s32_max_value, &tmp_prod[1]) ||
+ check_mul_overflow(*dst_smax, src_reg->s32_min_value, &tmp_prod[2]) ||
+ check_mul_overflow(*dst_smax, src_reg->s32_max_value, &tmp_prod[3])) {
/* Overflow possible, we know nothing */
- dst_reg->s32_min_value = S32_MIN;
- dst_reg->s32_max_value = S32_MAX;
+ *dst_smin = S32_MIN;
+ *dst_smax = S32_MAX;
} else {
- dst_reg->s32_min_value = dst_reg->u32_min_value;
- dst_reg->s32_max_value = dst_reg->u32_max_value;
+ *dst_smin = min_array(tmp_prod, 4);
+ *dst_smax = max_array(tmp_prod, 4);
}
}
static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- s64 smin_val = src_reg->smin_value;
- u64 umin_val = src_reg->umin_value;
- u64 umax_val = src_reg->umax_value;
+ s64 *dst_smin = &dst_reg->smin_value;
+ s64 *dst_smax = &dst_reg->smax_value;
+ u64 *dst_umin = &dst_reg->umin_value;
+ u64 *dst_umax = &dst_reg->umax_value;
+ s64 tmp_prod[4];
- if (smin_val < 0 || dst_reg->smin_value < 0) {
- /* Ain't nobody got time to multiply that sign */
- __mark_reg64_unbounded(dst_reg);
- return;
- }
- /* Both values are positive, so we can work with unsigned and
- * copy the result to signed (unless it exceeds S64_MAX).
- */
- if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
- /* Potential overflow, we know nothing */
- __mark_reg64_unbounded(dst_reg);
- return;
+ if (check_mul_overflow(*dst_umax, src_reg->umax_value, dst_umax) ||
+ check_mul_overflow(*dst_umin, src_reg->umin_value, dst_umin)) {
+ /* Overflow possible, we know nothing */
+ *dst_umin = 0;
+ *dst_umax = U64_MAX;
}
- dst_reg->umin_value *= umin_val;
- dst_reg->umax_value *= umax_val;
- if (dst_reg->umax_value > S64_MAX) {
+ if (check_mul_overflow(*dst_smin, src_reg->smin_value, &tmp_prod[0]) ||
+ check_mul_overflow(*dst_smin, src_reg->smax_value, &tmp_prod[1]) ||
+ check_mul_overflow(*dst_smax, src_reg->smin_value, &tmp_prod[2]) ||
+ check_mul_overflow(*dst_smax, src_reg->smax_value, &tmp_prod[3])) {
/* Overflow possible, we know nothing */
- dst_reg->smin_value = S64_MIN;
- dst_reg->smax_value = S64_MAX;
+ *dst_smin = S64_MIN;
+ *dst_smax = S64_MAX;
} else {
- dst_reg->smin_value = dst_reg->umin_value;
- dst_reg->smax_value = dst_reg->umax_value;
+ *dst_smin = min_array(tmp_prod, 4);
+ *dst_smax = max_array(tmp_prod, 4);
}
}
@@ -14462,12 +14838,12 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
/* Got here implies adding two SCALAR_VALUEs */
if (WARN_ON_ONCE(ptr_reg)) {
- print_verifier_state(env, state, true);
+ print_verifier_state(env, vstate, vstate->curframe, true);
verbose(env, "verifier internal error: unexpected ptr_reg\n");
return -EINVAL;
}
if (WARN_ON(!src_reg)) {
- print_verifier_state(env, state, true);
+ print_verifier_state(env, vstate, vstate->curframe, true);
verbose(env, "verifier internal error: no src_reg\n");
return -EINVAL;
}
@@ -15365,7 +15741,7 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
* No one could have freed the reference state before
* doing the NULL check.
*/
- WARN_ON_ONCE(release_reference_state(state, id));
+ WARN_ON_ONCE(release_reference_nomark(vstate, id));
bpf_for_each_reg_in_vstate(vstate, state, reg, ({
mark_ptr_or_null_reg(state, reg, id, is_null);
@@ -15596,9 +15972,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
if (insn->code != (BPF_JMP | BPF_JCOND) ||
insn->src_reg != BPF_MAY_GOTO ||
- insn->dst_reg || insn->imm || insn->off == 0) {
- verbose(env, "invalid may_goto off %d imm %d\n",
- insn->off, insn->imm);
+ insn->dst_reg || insn->imm) {
+ verbose(env, "invalid may_goto imm %d\n", insn->imm);
return -EINVAL;
}
prev_st = find_prev_entry(env, cur_st->parent, idx);
@@ -15675,7 +16050,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
*insn_idx))
return -EFAULT;
if (env->log.level & BPF_LOG_LEVEL)
- print_insn_state(env, this_branch->frame[this_branch->curframe]);
+ print_insn_state(env, this_branch, this_branch->curframe);
*insn_idx += insn->off;
return 0;
} else if (pred == 0) {
@@ -15689,7 +16064,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
*insn_idx))
return -EFAULT;
if (env->log.level & BPF_LOG_LEVEL)
- print_insn_state(env, this_branch->frame[this_branch->curframe]);
+ print_insn_state(env, this_branch, this_branch->curframe);
return 0;
}
@@ -15806,7 +16181,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
return -EACCES;
}
if (env->log.level & BPF_LOG_LEVEL)
- print_insn_state(env, this_branch->frame[this_branch->curframe]);
+ print_insn_state(env, this_branch, this_branch->curframe);
return 0;
}
@@ -17734,6 +18109,12 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
!check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap))
return false;
break;
+ case STACK_IRQ_FLAG:
+ old_reg = &old->stack[spi].spilled_ptr;
+ cur_reg = &cur->stack[spi].spilled_ptr;
+ if (!check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap))
+ return false;
+ break;
case STACK_MISC:
case STACK_ZERO:
case STACK_INVALID:
@@ -17746,7 +18127,7 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
return true;
}
-static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur,
+static bool refsafe(struct bpf_verifier_state *old, struct bpf_verifier_state *cur,
struct bpf_idmap *idmap)
{
int i;
@@ -17754,12 +18135,25 @@ static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur,
if (old->acquired_refs != cur->acquired_refs)
return false;
+ if (old->active_locks != cur->active_locks)
+ return false;
+
+ if (old->active_preempt_locks != cur->active_preempt_locks)
+ return false;
+
+ if (old->active_rcu_lock != cur->active_rcu_lock)
+ return false;
+
+ if (!check_ids(old->active_irq_id, cur->active_irq_id, idmap))
+ return false;
+
for (i = 0; i < old->acquired_refs; i++) {
if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap) ||
old->refs[i].type != cur->refs[i].type)
return false;
switch (old->refs[i].type) {
case REF_TYPE_PTR:
+ case REF_TYPE_IRQ:
break;
case REF_TYPE_LOCK:
if (old->refs[i].ptr != cur->refs[i].ptr)
@@ -17816,9 +18210,6 @@ static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_stat
if (!stacksafe(env, old, cur, &env->idmap_scratch, exact))
return false;
- if (!refsafe(old, cur, &env->idmap_scratch))
- return false;
-
return true;
}
@@ -17846,13 +18237,10 @@ static bool states_equal(struct bpf_verifier_env *env,
if (old->speculative && !cur->speculative)
return false;
- if (old->active_rcu_lock != cur->active_rcu_lock)
- return false;
-
- if (old->active_preempt_lock != cur->active_preempt_lock)
+ if (old->in_sleepable != cur->in_sleepable)
return false;
- if (old->in_sleepable != cur->in_sleepable)
+ if (!refsafe(old, cur, &env->idmap_scratch))
return false;
/* for states to be equal callsites have to be the same
@@ -18245,9 +18633,9 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
verbose_linfo(env, insn_idx, "; ");
verbose(env, "infinite loop detected at insn %d\n", insn_idx);
verbose(env, "cur state:");
- print_verifier_state(env, cur->frame[cur->curframe], true);
+ print_verifier_state(env, cur, cur->curframe, true);
verbose(env, "old state:");
- print_verifier_state(env, sl->state.frame[cur->curframe], true);
+ print_verifier_state(env, &sl->state, cur->curframe, true);
return -EINVAL;
}
/* if the verifier is processing a loop, avoid adding new state
@@ -18603,7 +18991,7 @@ static int do_check(struct bpf_verifier_env *env)
env->prev_insn_idx, env->insn_idx,
env->cur_state->speculative ?
" (speculative execution)" : "");
- print_verifier_state(env, state->frame[state->curframe], true);
+ print_verifier_state(env, state, state->curframe, true);
do_print_state = false;
}
@@ -18615,7 +19003,7 @@ static int do_check(struct bpf_verifier_env *env)
};
if (verifier_state_scratched(env))
- print_insn_state(env, state->frame[state->curframe]);
+ print_insn_state(env, state, state->curframe);
verbose_linfo(env, env->insn_idx, "; ");
env->prev_log_pos = env->log.end_pos;
@@ -18747,10 +19135,10 @@ static int do_check(struct bpf_verifier_env *env)
return -EINVAL;
}
- if (cur_func(env)->active_locks) {
+ if (env->cur_state->active_locks) {
if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) ||
(insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
- (insn->off != 0 || !is_bpf_graph_api_kfunc(insn->imm)))) {
+ (insn->off != 0 || !kfunc_spin_allowed(insn->imm)))) {
verbose(env, "function calls are not allowed while holding a lock\n");
return -EINVAL;
}
@@ -18803,7 +19191,7 @@ process_bpf_exit_full:
* match caller reference state when it exits.
*/
err = check_resource_leak(env, exception_exit, !env->cur_state->curframe,
- "BPF_EXIT instruction");
+ "BPF_EXIT instruction in main prog");
if (err)
return err;
@@ -18910,50 +19298,68 @@ static int find_btf_percpu_datasec(struct btf *btf)
return -ENOENT;
}
+/*
+ * Add btf to the used_btfs array and return the index. (If the btf was
+ * already added, then just return the index.) Upon successful insertion
+ * increase btf refcnt, and, if present, also refcount the corresponding
+ * kernel module.
+ */
+static int __add_used_btf(struct bpf_verifier_env *env, struct btf *btf)
+{
+ struct btf_mod_pair *btf_mod;
+ int i;
+
+ /* check whether we recorded this BTF (and maybe module) already */
+ for (i = 0; i < env->used_btf_cnt; i++)
+ if (env->used_btfs[i].btf == btf)
+ return i;
+
+ if (env->used_btf_cnt >= MAX_USED_BTFS)
+ return -E2BIG;
+
+ btf_get(btf);
+
+ btf_mod = &env->used_btfs[env->used_btf_cnt];
+ btf_mod->btf = btf;
+ btf_mod->module = NULL;
+
+ /* if we reference variables from kernel module, bump its refcount */
+ if (btf_is_module(btf)) {
+ btf_mod->module = btf_try_get_module(btf);
+ if (!btf_mod->module) {
+ btf_put(btf);
+ return -ENXIO;
+ }
+ }
+
+ return env->used_btf_cnt++;
+}
+
/* replace pseudo btf_id with kernel symbol address */
-static int check_pseudo_btf_id(struct bpf_verifier_env *env,
- struct bpf_insn *insn,
- struct bpf_insn_aux_data *aux)
+static int __check_pseudo_btf_id(struct bpf_verifier_env *env,
+ struct bpf_insn *insn,
+ struct bpf_insn_aux_data *aux,
+ struct btf *btf)
{
const struct btf_var_secinfo *vsi;
const struct btf_type *datasec;
- struct btf_mod_pair *btf_mod;
const struct btf_type *t;
const char *sym_name;
bool percpu = false;
u32 type, id = insn->imm;
- struct btf *btf;
s32 datasec_id;
u64 addr;
- int i, btf_fd, err;
-
- btf_fd = insn[1].imm;
- if (btf_fd) {
- btf = btf_get_by_fd(btf_fd);
- if (IS_ERR(btf)) {
- verbose(env, "invalid module BTF object FD specified.\n");
- return -EINVAL;
- }
- } else {
- if (!btf_vmlinux) {
- verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n");
- return -EINVAL;
- }
- btf = btf_vmlinux;
- btf_get(btf);
- }
+ int i;
t = btf_type_by_id(btf, id);
if (!t) {
verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id);
- err = -ENOENT;
- goto err_put;
+ return -ENOENT;
}
if (!btf_type_is_var(t) && !btf_type_is_func(t)) {
verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR or KIND_FUNC\n", id);
- err = -EINVAL;
- goto err_put;
+ return -EINVAL;
}
sym_name = btf_name_by_offset(btf, t->name_off);
@@ -18961,8 +19367,7 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env,
if (!addr) {
verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n",
sym_name);
- err = -ENOENT;
- goto err_put;
+ return -ENOENT;
}
insn[0].imm = (u32)addr;
insn[1].imm = addr >> 32;
@@ -18970,7 +19375,7 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env,
if (btf_type_is_func(t)) {
aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY;
aux->btf_var.mem_size = 0;
- goto check_btf;
+ return 0;
}
datasec_id = find_btf_percpu_datasec(btf);
@@ -19001,8 +19406,7 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env,
tname = btf_name_by_offset(btf, t->name_off);
verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n",
tname, PTR_ERR(ret));
- err = -EINVAL;
- goto err_put;
+ return -EINVAL;
}
aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY;
aux->btf_var.mem_size = tsize;
@@ -19011,39 +19415,43 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env,
aux->btf_var.btf = btf;
aux->btf_var.btf_id = type;
}
-check_btf:
- /* check whether we recorded this BTF (and maybe module) already */
- for (i = 0; i < env->used_btf_cnt; i++) {
- if (env->used_btfs[i].btf == btf) {
- btf_put(btf);
- return 0;
- }
- }
- if (env->used_btf_cnt >= MAX_USED_BTFS) {
- err = -E2BIG;
- goto err_put;
- }
+ return 0;
+}
- btf_mod = &env->used_btfs[env->used_btf_cnt];
- btf_mod->btf = btf;
- btf_mod->module = NULL;
+static int check_pseudo_btf_id(struct bpf_verifier_env *env,
+ struct bpf_insn *insn,
+ struct bpf_insn_aux_data *aux)
+{
+ struct btf *btf;
+ int btf_fd;
+ int err;
- /* if we reference variables from kernel module, bump its refcount */
- if (btf_is_module(btf)) {
- btf_mod->module = btf_try_get_module(btf);
- if (!btf_mod->module) {
- err = -ENXIO;
- goto err_put;
+ btf_fd = insn[1].imm;
+ if (btf_fd) {
+ CLASS(fd, f)(btf_fd);
+
+ btf = __btf_get_by_fd(f);
+ if (IS_ERR(btf)) {
+ verbose(env, "invalid module BTF object FD specified.\n");
+ return -EINVAL;
}
+ } else {
+ if (!btf_vmlinux) {
+ verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n");
+ return -EINVAL;
+ }
+ btf = btf_vmlinux;
}
- env->used_btf_cnt++;
+ err = __check_pseudo_btf_id(env, insn, aux, btf);
+ if (err)
+ return err;
+ err = __add_used_btf(env, btf);
+ if (err < 0)
+ return err;
return 0;
-err_put:
- btf_put(btf);
- return err;
}
static bool is_tracing_prog_type(enum bpf_prog_type type)
@@ -19060,6 +19468,12 @@ static bool is_tracing_prog_type(enum bpf_prog_type type)
}
}
+static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
+{
+ return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
+ map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
+}
+
static int check_map_prog_compatibility(struct bpf_verifier_env *env,
struct bpf_map *map,
struct bpf_prog *prog)
@@ -19138,39 +19552,47 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
return -EINVAL;
}
- return 0;
-}
+ if (bpf_map_is_cgroup_storage(map) &&
+ bpf_cgroup_storage_assign(env->prog->aux, map)) {
+ verbose(env, "only one cgroup storage of each type is allowed\n");
+ return -EBUSY;
+ }
-static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
-{
- return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
- map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
+ if (map->map_type == BPF_MAP_TYPE_ARENA) {
+ if (env->prog->aux->arena) {
+ verbose(env, "Only one arena per program\n");
+ return -EBUSY;
+ }
+ if (!env->allow_ptr_leaks || !env->bpf_capable) {
+ verbose(env, "CAP_BPF and CAP_PERFMON are required to use arena\n");
+ return -EPERM;
+ }
+ if (!env->prog->jit_requested) {
+ verbose(env, "JIT is required to use arena\n");
+ return -EOPNOTSUPP;
+ }
+ if (!bpf_jit_supports_arena()) {
+ verbose(env, "JIT doesn't support arena\n");
+ return -EOPNOTSUPP;
+ }
+ env->prog->aux->arena = (void *)map;
+ if (!bpf_arena_get_user_vm_start(env->prog->aux->arena)) {
+ verbose(env, "arena's user address must be set via map_extra or mmap()\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
}
-/* Add map behind fd to used maps list, if it's not already there, and return
- * its index. Also set *reused to true if this map was already in the list of
- * used maps.
- * Returns <0 on error, or >= 0 index, on success.
- */
-static int add_used_map_from_fd(struct bpf_verifier_env *env, int fd, bool *reused)
+static int __add_used_map(struct bpf_verifier_env *env, struct bpf_map *map)
{
- CLASS(fd, f)(fd);
- struct bpf_map *map;
- int i;
-
- map = __bpf_map_get(f);
- if (IS_ERR(map)) {
- verbose(env, "fd %d is not pointing to valid bpf_map\n", fd);
- return PTR_ERR(map);
- }
+ int i, err;
/* check whether we recorded this map already */
- for (i = 0; i < env->used_map_cnt; i++) {
- if (env->used_maps[i] == map) {
- *reused = true;
+ for (i = 0; i < env->used_map_cnt; i++)
+ if (env->used_maps[i] == map)
return i;
- }
- }
if (env->used_map_cnt >= MAX_USED_MAPS) {
verbose(env, "The total number of maps per program has reached the limit of %u\n",
@@ -19178,6 +19600,10 @@ static int add_used_map_from_fd(struct bpf_verifier_env *env, int fd, bool *reus
return -E2BIG;
}
+ err = check_map_prog_compatibility(env, map, env->prog);
+ if (err)
+ return err;
+
if (env->prog->sleepable)
atomic64_inc(&map->sleepable_refcnt);
@@ -19188,12 +19614,29 @@ static int add_used_map_from_fd(struct bpf_verifier_env *env, int fd, bool *reus
*/
bpf_map_inc(map);
- *reused = false;
env->used_maps[env->used_map_cnt++] = map;
return env->used_map_cnt - 1;
}
+/* Add map behind fd to used maps list, if it's not already there, and return
+ * its index.
+ * Returns <0 on error, or >= 0 index, on success.
+ */
+static int add_used_map(struct bpf_verifier_env *env, int fd)
+{
+ struct bpf_map *map;
+ CLASS(fd, f)(fd);
+
+ map = __bpf_map_get(f);
+ if (IS_ERR(map)) {
+ verbose(env, "fd %d is not pointing to valid bpf_map\n", fd);
+ return PTR_ERR(map);
+ }
+
+ return __add_used_map(env, map);
+}
+
/* find and rewrite pseudo imm in ld_imm64 instructions:
*
* 1. if it accesses map FD, replace it with actual map pointer.
@@ -19225,7 +19668,6 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
int map_idx;
u64 addr;
u32 fd;
- bool reused;
if (i == insn_cnt - 1 || insn[1].code != 0 ||
insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
@@ -19286,7 +19728,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
break;
}
- map_idx = add_used_map_from_fd(env, fd, &reused);
+ map_idx = add_used_map(env, fd);
if (map_idx < 0)
return map_idx;
map = env->used_maps[map_idx];
@@ -19294,10 +19736,6 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
aux = &env->insn_aux_data[i];
aux->map_index = map_idx;
- err = check_map_prog_compatibility(env, map, env->prog);
- if (err)
- return err;
-
if (insn[0].src_reg == BPF_PSEUDO_MAP_FD ||
insn[0].src_reg == BPF_PSEUDO_MAP_IDX) {
addr = (unsigned long)map;
@@ -19328,39 +19766,6 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
insn[0].imm = (u32)addr;
insn[1].imm = addr >> 32;
- /* proceed with extra checks only if its newly added used map */
- if (reused)
- goto next_insn;
-
- if (bpf_map_is_cgroup_storage(map) &&
- bpf_cgroup_storage_assign(env->prog->aux, map)) {
- verbose(env, "only one cgroup storage of each type is allowed\n");
- return -EBUSY;
- }
- if (map->map_type == BPF_MAP_TYPE_ARENA) {
- if (env->prog->aux->arena) {
- verbose(env, "Only one arena per program\n");
- return -EBUSY;
- }
- if (!env->allow_ptr_leaks || !env->bpf_capable) {
- verbose(env, "CAP_BPF and CAP_PERFMON are required to use arena\n");
- return -EPERM;
- }
- if (!env->prog->jit_requested) {
- verbose(env, "JIT is required to use arena\n");
- return -EOPNOTSUPP;
- }
- if (!bpf_jit_supports_arena()) {
- verbose(env, "JIT doesn't support arena\n");
- return -EOPNOTSUPP;
- }
- env->prog->aux->arena = (void *)map;
- if (!bpf_arena_get_user_vm_start(env->prog->aux->arena)) {
- verbose(env, "arena's user address must be set via map_extra or mmap()\n");
- return -EINVAL;
- }
- }
-
next_insn:
insn++;
i++;
@@ -19779,23 +20184,28 @@ static int opt_remove_dead_code(struct bpf_verifier_env *env)
}
static const struct bpf_insn NOP = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
+static const struct bpf_insn MAY_GOTO_0 = BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0, 0);
static int opt_remove_nops(struct bpf_verifier_env *env)
{
- const struct bpf_insn ja = NOP;
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
+ bool is_may_goto_0, is_ja;
int i, err;
for (i = 0; i < insn_cnt; i++) {
- if (memcmp(&insn[i], &ja, sizeof(ja)))
+ is_may_goto_0 = !memcmp(&insn[i], &MAY_GOTO_0, sizeof(MAY_GOTO_0));
+ is_ja = !memcmp(&insn[i], &NOP, sizeof(NOP));
+
+ if (!is_may_goto_0 && !is_ja)
continue;
err = verifier_remove_insns(env, i, 1);
if (err)
return err;
insn_cnt--;
- i--;
+ /* Go back one insn to catch may_goto +1; may_goto +0 sequence */
+ i -= (is_may_goto_0 && i > 0) ? 2 : 1;
}
return 0;
@@ -22544,6 +22954,73 @@ struct btf *bpf_get_btf_vmlinux(void)
return btf_vmlinux;
}
+/*
+ * The add_fd_from_fd_array() is executed only if fd_array_cnt is non-zero. In
+ * this case expect that every file descriptor in the array is either a map or
+ * a BTF. Everything else is considered to be trash.
+ */
+static int add_fd_from_fd_array(struct bpf_verifier_env *env, int fd)
+{
+ struct bpf_map *map;
+ struct btf *btf;
+ CLASS(fd, f)(fd);
+ int err;
+
+ map = __bpf_map_get(f);
+ if (!IS_ERR(map)) {
+ err = __add_used_map(env, map);
+ if (err < 0)
+ return err;
+ return 0;
+ }
+
+ btf = __btf_get_by_fd(f);
+ if (!IS_ERR(btf)) {
+ err = __add_used_btf(env, btf);
+ if (err < 0)
+ return err;
+ return 0;
+ }
+
+ verbose(env, "fd %d is not pointing to valid bpf_map or btf\n", fd);
+ return PTR_ERR(map);
+}
+
+static int process_fd_array(struct bpf_verifier_env *env, union bpf_attr *attr, bpfptr_t uattr)
+{
+ size_t size = sizeof(int);
+ int ret;
+ int fd;
+ u32 i;
+
+ env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);
+
+ /*
+ * The only difference between old (no fd_array_cnt is given) and new
+ * APIs is that in the latter case the fd_array is expected to be
+ * continuous and is scanned for map fds right away
+ */
+ if (!attr->fd_array_cnt)
+ return 0;
+
+ /* Check for integer overflow */
+ if (attr->fd_array_cnt >= (U32_MAX / size)) {
+ verbose(env, "fd_array_cnt is too big (%u)\n", attr->fd_array_cnt);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < attr->fd_array_cnt; i++) {
+ if (copy_from_bpfptr_offset(&fd, env->fd_array, i * size, size))
+ return -EFAULT;
+
+ ret = add_fd_from_fd_array(env, fd);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
{
u64 start_time = ktime_get_ns();
@@ -22575,7 +23052,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
env->insn_aux_data[i].orig_idx = i;
env->prog = *prog;
env->ops = bpf_verifier_ops[env->prog->type];
- env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);
env->allow_ptr_leaks = bpf_allow_ptr_leaks(env->prog->aux->token);
env->allow_uninit_stack = bpf_allow_uninit_stack(env->prog->aux->token);
@@ -22598,6 +23074,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
if (ret)
goto err_unlock;
+ ret = process_fd_array(env, attr, uattr);
+ if (ret)
+ goto skip_full_check;
+
mark_verifier_state_clean(env);
if (IS_ERR(btf_vmlinux)) {
diff --git a/kernel/capability.c b/kernel/capability.c
index dac4df77e376..e089d2628c29 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -38,10 +38,8 @@ __setup("no_file_caps", file_caps_disable);
static void warn_legacy_capability_use(void)
{
- char name[sizeof(current->comm)];
-
pr_info_once("warning: `%s' uses 32-bit capabilities (legacy support in use)\n",
- get_task_comm(name, current));
+ current->comm);
}
/*
@@ -62,10 +60,8 @@ static void warn_legacy_capability_use(void)
static void warn_deprecated_v2(void)
{
- char name[sizeof(current->comm)];
-
pr_info_once("warning: `%s' uses deprecated v2 capabilities in a way that may be insecure\n",
- get_task_comm(name, current));
+ current->comm);
}
/*
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 0509a9733745..07455d25329c 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -905,12 +905,13 @@ static int finish_cpu(unsigned int cpu)
struct mm_struct *mm = idle->active_mm;
/*
- * idle_task_exit() will have switched to &init_mm, now
- * clean up any remaining active_mm state.
+ * sched_force_init_mm() ensured the use of &init_mm,
+ * drop that refcount now that the CPU has stopped.
*/
- if (mm != &init_mm)
- idle->active_mm = &init_mm;
+ WARN_ON(mm != &init_mm);
+ idle->active_mm = NULL;
mmdrop_lazy_tlb(mm);
+
return 0;
}
@@ -3128,11 +3129,6 @@ void init_cpu_possible(const struct cpumask *src)
cpumask_copy(&__cpu_possible_mask, src);
}
-void init_cpu_online(const struct cpumask *src)
-{
- cpumask_copy(&__cpu_online_mask, src);
-}
-
void set_cpu_online(unsigned int cpu, bool online)
{
/*
diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
index 0a39497140bf..05b137e7dcb9 100644
--- a/kernel/debug/kdb/kdb_support.c
+++ b/kernel/debug/kdb/kdb_support.c
@@ -305,7 +305,7 @@ int kdb_putarea_size(unsigned long addr, void *res, size_t size)
/*
* kdb_getphys - Read data from a physical address. Validate the
- * address is in range, use kmap_atomic() to get data
+ * address is in range, use kmap_local_page() to get data
* similar to kdb_getarea() - but for phys addresses
* Inputs:
* res Pointer to the word to receive the result
@@ -324,9 +324,9 @@ static int kdb_getphys(void *res, unsigned long addr, size_t size)
if (!pfn_valid(pfn))
return 1;
page = pfn_to_page(pfn);
- vaddr = kmap_atomic(page);
+ vaddr = kmap_local_page(page);
memcpy(res, vaddr + (addr & (PAGE_SIZE - 1)), size);
- kunmap_atomic(vaddr);
+ kunmap_local(vaddr);
return 0;
}
@@ -536,21 +536,3 @@ bool kdb_task_state(const struct task_struct *p, const char *mask)
return strchr(mask, state);
}
-
-/* Maintain a small stack of kdb_flags to allow recursion without disturbing
- * the global kdb state.
- */
-
-static int kdb_flags_stack[4], kdb_flags_index;
-
-void kdb_save_flags(void)
-{
- BUG_ON(kdb_flags_index >= ARRAY_SIZE(kdb_flags_stack));
- kdb_flags_stack[kdb_flags_index++] = kdb_flags;
-}
-
-void kdb_restore_flags(void)
-{
- BUG_ON(kdb_flags_index <= 0);
- kdb_flags = kdb_flags_stack[--kdb_flags_index];
-}
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index dead51de8eb5..eb63a021ac04 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -64,7 +64,7 @@ static int sysctl_delayacct(const struct ctl_table *table, int write, void *buff
return err;
}
-static struct ctl_table kern_delayacct_table[] = {
+static const struct ctl_table kern_delayacct_table[] = {
{
.procname = "task_delayacct",
.data = NULL,
@@ -93,9 +93,9 @@ void __delayacct_tsk_init(struct task_struct *tsk)
/*
* Finish delay accounting for a statistic using its timestamps (@start),
- * accumalator (@total) and @count
+ * accumulator (@total) and @count
*/
-static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, u32 *count)
+static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, u32 *count, u64 *max, u64 *min)
{
s64 ns = local_clock() - *start;
unsigned long flags;
@@ -104,6 +104,10 @@ static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, u32 *cou
raw_spin_lock_irqsave(lock, flags);
*total += ns;
(*count)++;
+ if (ns > *max)
+ *max = ns;
+ if (*min == 0 || ns < *min)
+ *min = ns;
raw_spin_unlock_irqrestore(lock, flags);
}
}
@@ -122,7 +126,9 @@ void __delayacct_blkio_end(struct task_struct *p)
delayacct_end(&p->delays->lock,
&p->delays->blkio_start,
&p->delays->blkio_delay,
- &p->delays->blkio_count);
+ &p->delays->blkio_count,
+ &p->delays->blkio_delay_max,
+ &p->delays->blkio_delay_min);
}
int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
@@ -153,10 +159,12 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
d->cpu_count += t1;
+ d->cpu_delay_max = tsk->sched_info.max_run_delay;
+ d->cpu_delay_min = tsk->sched_info.min_run_delay;
tmp = (s64)d->cpu_delay_total + t2;
d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp;
-
tmp = (s64)d->cpu_run_virtual_total + t3;
+
d->cpu_run_virtual_total =
(tmp < (s64)d->cpu_run_virtual_total) ? 0 : tmp;
@@ -164,20 +172,33 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
return 0;
/* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */
-
raw_spin_lock_irqsave(&tsk->delays->lock, flags);
+ d->blkio_delay_max = tsk->delays->blkio_delay_max;
+ d->blkio_delay_min = tsk->delays->blkio_delay_min;
tmp = d->blkio_delay_total + tsk->delays->blkio_delay;
d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp;
+ d->swapin_delay_max = tsk->delays->swapin_delay_max;
+ d->swapin_delay_min = tsk->delays->swapin_delay_min;
tmp = d->swapin_delay_total + tsk->delays->swapin_delay;
d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp;
+ d->freepages_delay_max = tsk->delays->freepages_delay_max;
+ d->freepages_delay_min = tsk->delays->freepages_delay_min;
tmp = d->freepages_delay_total + tsk->delays->freepages_delay;
d->freepages_delay_total = (tmp < d->freepages_delay_total) ? 0 : tmp;
+ d->thrashing_delay_max = tsk->delays->thrashing_delay_max;
+ d->thrashing_delay_min = tsk->delays->thrashing_delay_min;
tmp = d->thrashing_delay_total + tsk->delays->thrashing_delay;
d->thrashing_delay_total = (tmp < d->thrashing_delay_total) ? 0 : tmp;
+ d->compact_delay_max = tsk->delays->compact_delay_max;
+ d->compact_delay_min = tsk->delays->compact_delay_min;
tmp = d->compact_delay_total + tsk->delays->compact_delay;
d->compact_delay_total = (tmp < d->compact_delay_total) ? 0 : tmp;
+ d->wpcopy_delay_max = tsk->delays->wpcopy_delay_max;
+ d->wpcopy_delay_min = tsk->delays->wpcopy_delay_min;
tmp = d->wpcopy_delay_total + tsk->delays->wpcopy_delay;
d->wpcopy_delay_total = (tmp < d->wpcopy_delay_total) ? 0 : tmp;
+ d->irq_delay_max = tsk->delays->irq_delay_max;
+ d->irq_delay_min = tsk->delays->irq_delay_min;
tmp = d->irq_delay_total + tsk->delays->irq_delay;
d->irq_delay_total = (tmp < d->irq_delay_total) ? 0 : tmp;
d->blkio_count += tsk->delays->blkio_count;
@@ -213,7 +234,9 @@ void __delayacct_freepages_end(void)
delayacct_end(&current->delays->lock,
&current->delays->freepages_start,
&current->delays->freepages_delay,
- &current->delays->freepages_count);
+ &current->delays->freepages_count,
+ &current->delays->freepages_delay_max,
+ &current->delays->freepages_delay_min);
}
void __delayacct_thrashing_start(bool *in_thrashing)
@@ -235,7 +258,9 @@ void __delayacct_thrashing_end(bool *in_thrashing)
delayacct_end(&current->delays->lock,
&current->delays->thrashing_start,
&current->delays->thrashing_delay,
- &current->delays->thrashing_count);
+ &current->delays->thrashing_count,
+ &current->delays->thrashing_delay_max,
+ &current->delays->thrashing_delay_min);
}
void __delayacct_swapin_start(void)
@@ -248,7 +273,9 @@ void __delayacct_swapin_end(void)
delayacct_end(&current->delays->lock,
&current->delays->swapin_start,
&current->delays->swapin_delay,
- &current->delays->swapin_count);
+ &current->delays->swapin_count,
+ &current->delays->swapin_delay_max,
+ &current->delays->swapin_delay_min);
}
void __delayacct_compact_start(void)
@@ -261,7 +288,9 @@ void __delayacct_compact_end(void)
delayacct_end(&current->delays->lock,
&current->delays->compact_start,
&current->delays->compact_delay,
- &current->delays->compact_count);
+ &current->delays->compact_count,
+ &current->delays->compact_delay_max,
+ &current->delays->compact_delay_min);
}
void __delayacct_wpcopy_start(void)
@@ -274,7 +303,9 @@ void __delayacct_wpcopy_end(void)
delayacct_end(&current->delays->lock,
&current->delays->wpcopy_start,
&current->delays->wpcopy_delay,
- &current->delays->wpcopy_count);
+ &current->delays->wpcopy_count,
+ &current->delays->wpcopy_delay_max,
+ &current->delays->wpcopy_delay_min);
}
void __delayacct_irq(struct task_struct *task, u32 delta)
@@ -284,6 +315,10 @@ void __delayacct_irq(struct task_struct *task, u32 delta)
raw_spin_lock_irqsave(&task->delays->lock, flags);
task->delays->irq_delay += delta;
task->delays->irq_count++;
+ if (delta > task->delays->irq_delay_max)
+ task->delays->irq_delay_max = delta;
+ if (delta && (!task->delays->irq_delay_min || delta < task->delays->irq_delay_min))
+ task->delays->irq_delay_min = delta;
raw_spin_unlock_irqrestore(&task->delays->lock, flags);
}
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index e421a5f2ec7d..2ca797cbe465 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -28,6 +28,7 @@
#include <linux/rcupdate_trace.h>
#include <linux/workqueue.h>
#include <linux/srcu.h>
+#include <linux/oom.h> /* check_stable_address_space */
#include <linux/uprobes.h>
@@ -1260,6 +1261,9 @@ register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
* returns NULL in find_active_uprobe_rcu().
*/
mmap_write_lock(mm);
+ if (check_stable_address_space(mm))
+ goto unlock;
+
vma = find_vma(mm, info->vaddr);
if (!vma || !valid_vma(vma, is_register) ||
file_inode(vma->vm_file) != uprobe->inode)
diff --git a/kernel/exit.c b/kernel/exit.c
index 1dcddfe537ee..3485e5fc499e 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -85,7 +85,7 @@
static unsigned int oops_limit = 10000;
#ifdef CONFIG_SYSCTL
-static struct ctl_table kern_exit_table[] = {
+static const struct ctl_table kern_exit_table[] = {
{
.procname = "oops_limit",
.data = &oops_limit,
diff --git a/kernel/fork.c b/kernel/fork.c
index ded49f18cd95..735405a9c5f3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -625,8 +625,8 @@ static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm)
* We depend on the oldmm having properly denied write access to the
* exe_file already.
*/
- if (exe_file && deny_write_access(exe_file))
- pr_warn_once("deny_write_access() failed in %s\n", __func__);
+ if (exe_file && exe_file_deny_write_access(exe_file))
+ pr_warn_once("exe_file_deny_write_access() failed in %s\n", __func__);
}
#ifdef CONFIG_MMU
@@ -760,7 +760,8 @@ loop_out:
mt_set_in_rcu(vmi.mas.tree);
ksm_fork(mm, oldmm);
khugepaged_fork(mm, oldmm);
- } else if (mpnt) {
+ } else {
+
/*
* The entire maple tree has already been duplicated. If the
* mmap duplication fails, mark the failure point with
@@ -768,8 +769,18 @@ loop_out:
* stop releasing VMAs that have not been duplicated after this
* point.
*/
- mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1);
- mas_store(&vmi.mas, XA_ZERO_ENTRY);
+ if (mpnt) {
+ mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1);
+ mas_store(&vmi.mas, XA_ZERO_ENTRY);
+ /* Avoid OOM iterating a broken tree */
+ set_bit(MMF_OOM_SKIP, &mm->flags);
+ }
+ /*
+ * The mm_struct is going to exit, but the locks will be dropped
+ * first. Set the mm_struct as unstable is advisable as it is
+ * not fully initialised.
+ */
+ set_bit(MMF_UNSTABLE, &mm->flags);
}
out:
mmap_write_unlock(mm);
@@ -1416,13 +1427,13 @@ int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
* We expect the caller (i.e., sys_execve) to already denied
* write access, so this is unlikely to fail.
*/
- if (unlikely(deny_write_access(new_exe_file)))
+ if (unlikely(exe_file_deny_write_access(new_exe_file)))
return -EACCES;
get_file(new_exe_file);
}
rcu_assign_pointer(mm->exe_file, new_exe_file);
if (old_exe_file) {
- allow_write_access(old_exe_file);
+ exe_file_allow_write_access(old_exe_file);
fput(old_exe_file);
}
return 0;
@@ -1463,7 +1474,7 @@ int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
return ret;
}
- ret = deny_write_access(new_exe_file);
+ ret = exe_file_deny_write_access(new_exe_file);
if (ret)
return -EACCES;
get_file(new_exe_file);
@@ -1475,7 +1486,7 @@ int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
mmap_write_unlock(mm);
if (old_exe_file) {
- allow_write_access(old_exe_file);
+ exe_file_allow_write_access(old_exe_file);
fput(old_exe_file);
}
return 0;
@@ -1511,12 +1522,13 @@ struct file *get_task_exe_file(struct task_struct *task)
struct file *exe_file = NULL;
struct mm_struct *mm;
+ if (task->flags & PF_KTHREAD)
+ return NULL;
+
task_lock(task);
mm = task->mm;
- if (mm) {
- if (!(task->flags & PF_KTHREAD))
- exe_file = get_mm_exe_file(mm);
- }
+ if (mm)
+ exe_file = get_mm_exe_file(mm);
task_unlock(task);
return exe_file;
}
diff --git a/kernel/futex/core.c b/kernel/futex/core.c
index ebdd76b4ecbb..3db8567f5a44 100644
--- a/kernel/futex/core.c
+++ b/kernel/futex/core.c
@@ -532,7 +532,8 @@ void futex_q_unlock(struct futex_hash_bucket *hb)
futex_hb_waiters_dec(hb);
}
-void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb)
+void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb,
+ struct task_struct *task)
{
int prio;
@@ -548,7 +549,7 @@ void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb)
plist_node_init(&q->list, prio);
plist_add(&q->list, &hb->chain);
- q->task = current;
+ q->task = task;
}
/**
diff --git a/kernel/futex/futex.h b/kernel/futex/futex.h
index 99b32e728c4a..6b2f4c7eb720 100644
--- a/kernel/futex/futex.h
+++ b/kernel/futex/futex.h
@@ -285,13 +285,15 @@ static inline int futex_get_value_locked(u32 *dest, u32 __user *from)
}
extern void __futex_unqueue(struct futex_q *q);
-extern void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb);
+extern void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb,
+ struct task_struct *task);
extern int futex_unqueue(struct futex_q *q);
/**
* futex_queue() - Enqueue the futex_q on the futex_hash_bucket
* @q: The futex_q to enqueue
* @hb: The destination hash bucket
+ * @task: Task queueing this futex
*
* The hb->lock must be held by the caller, and is released here. A call to
* futex_queue() is typically paired with exactly one call to futex_unqueue(). The
@@ -299,11 +301,14 @@ extern int futex_unqueue(struct futex_q *q);
* or nothing if the unqueue is done as part of the wake process and the unqueue
* state is implicit in the state of woken task (see futex_wait_requeue_pi() for
* an example).
+ *
+ * Note that @task may be NULL, for async usage of futexes.
*/
-static inline void futex_queue(struct futex_q *q, struct futex_hash_bucket *hb)
+static inline void futex_queue(struct futex_q *q, struct futex_hash_bucket *hb,
+ struct task_struct *task)
__releases(&hb->lock)
{
- __futex_queue(q, hb);
+ __futex_queue(q, hb, task);
spin_unlock(&hb->lock);
}
diff --git a/kernel/futex/pi.c b/kernel/futex/pi.c
index daea650b16f5..7a941845f7ee 100644
--- a/kernel/futex/pi.c
+++ b/kernel/futex/pi.c
@@ -982,7 +982,7 @@ retry_private:
/*
* Only actually queue now that the atomic ops are done:
*/
- __futex_queue(&q, hb);
+ __futex_queue(&q, hb, current);
if (trylock) {
ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
diff --git a/kernel/futex/waitwake.c b/kernel/futex/waitwake.c
index 3a10375d9521..25877d4f2f8f 100644
--- a/kernel/futex/waitwake.c
+++ b/kernel/futex/waitwake.c
@@ -210,13 +210,12 @@ static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
if (oparg < 0 || oparg > 31) {
- char comm[sizeof(current->comm)];
/*
* kill this print and return -EINVAL when userspace
* is sane again
*/
pr_info_ratelimited("futex_wake_op: %s tries to shift op by %d; fix this program\n",
- get_task_comm(comm, current), oparg);
+ current->comm, oparg);
oparg &= 31;
}
oparg = 1 << oparg;
@@ -350,7 +349,7 @@ void futex_wait_queue(struct futex_hash_bucket *hb, struct futex_q *q,
* access to the hash list and forcing another memory barrier.
*/
set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
- futex_queue(q, hb);
+ futex_queue(q, hb, current);
/* Arm the timer */
if (timeout)
@@ -461,7 +460,7 @@ retry:
* next futex. Queue each futex at this moment so hb can
* be unlocked.
*/
- futex_queue(q, hb);
+ futex_queue(q, hb, current);
continue;
}
diff --git a/kernel/gcov/clang.c b/kernel/gcov/clang.c
index 7670a811a565..8b888a6193cc 100644
--- a/kernel/gcov/clang.c
+++ b/kernel/gcov/clang.c
@@ -264,10 +264,10 @@ int gcov_info_is_compatible(struct gcov_info *info1, struct gcov_info *info2)
/**
* gcov_info_add - add up profiling data
- * @dest: profiling data set to which data is added
- * @source: profiling data set which is added
+ * @dst: profiling data set to which data is added
+ * @src: profiling data set which is added
*
- * Adds profiling counts of @source to @dest.
+ * Adds profiling counts of @src to @dst.
*/
void gcov_info_add(struct gcov_info *dst, struct gcov_info *src)
{
diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh
index 7e1340da5aca..00529c81cc40 100755
--- a/kernel/gen_kheaders.sh
+++ b/kernel/gen_kheaders.sh
@@ -7,20 +7,13 @@ set -e
sfile="$(readlink -f "$0")"
outdir="$(pwd)"
tarfile=$1
-cpio_dir=$outdir/${tarfile%/*}/.tmp_cpio_dir
+tmpdir=$outdir/${tarfile%/*}/.tmp_dir
dir_list="
include/
arch/$SRCARCH/include/
"
-if ! command -v cpio >/dev/null; then
- echo >&2 "***"
- echo >&2 "*** 'cpio' could not be found."
- echo >&2 "***"
- exit 1
-fi
-
# Support incremental builds by skipping archive generation
# if timestamps of files being archived are not changed.
@@ -48,9 +41,9 @@ all_dirs="$all_dirs $dir_list"
# check include/generated/autoconf.h explicitly.
#
# Ignore them for md5 calculation to avoid pointless regeneration.
-headers_md5="$(find $all_dirs -name "*.h" |
- grep -v "include/generated/utsversion.h" |
- grep -v "include/generated/autoconf.h" |
+headers_md5="$(find $all_dirs -name "*.h" -a \
+ ! -path include/generated/utsversion.h -a \
+ ! -path include/generated/autoconf.h |
xargs ls -l | md5sum | cut -d ' ' -f1)"
# Any changes to this script will also cause a rebuild of the archive.
@@ -65,36 +58,43 @@ fi
echo " GEN $tarfile"
-rm -rf $cpio_dir
-mkdir $cpio_dir
+rm -rf "${tmpdir}"
+mkdir "${tmpdir}"
if [ "$building_out_of_srctree" ]; then
(
cd $srctree
for f in $dir_list
do find "$f" -name "*.h";
- done | cpio --quiet -pd $cpio_dir
+ done | tar -c -f - -T - | tar -xf - -C "${tmpdir}"
)
fi
-# The second CPIO can complain if files already exist which can happen with out
-# of tree builds having stale headers in srctree. Just silence CPIO for now.
for f in $dir_list;
do find "$f" -name "*.h";
-done | cpio --quiet -pdu $cpio_dir >/dev/null 2>&1
+done | tar -c -f - -T - | tar -xf - -C "${tmpdir}"
+
+# Always exclude include/generated/utsversion.h
+# Otherwise, the contents of the tarball may vary depending on the build steps.
+rm -f "${tmpdir}/include/generated/utsversion.h"
# Remove comments except SDPX lines
-find $cpio_dir -type f -print0 |
- xargs -0 -P8 -n1 perl -pi -e 'BEGIN {undef $/;}; s/\/\*((?!SPDX).)*?\*\///smg;'
+# Use a temporary file to store directory contents to prevent find/xargs from
+# seeing temporary files created by perl.
+find "${tmpdir}" -type f -print0 > "${tmpdir}.contents.txt"
+xargs -0 -P8 -n1 \
+ perl -pi -e 'BEGIN {undef $/;}; s/\/\*((?!SPDX).)*?\*\///smg;' \
+ < "${tmpdir}.contents.txt"
+rm -f "${tmpdir}.contents.txt"
# Create archive and try to normalize metadata for reproducibility.
tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
--exclude=".__afs*" --exclude=".nfs*" \
--owner=0 --group=0 --sort=name --numeric-owner --mode=u=rw,go=r,a+X \
- -I $XZ -cf $tarfile -C $cpio_dir/ . > /dev/null
+ -I $XZ -cf $tarfile -C "${tmpdir}/" . > /dev/null
echo $headers_md5 > kernel/kheaders.md5
echo "$this_file_md5" >> kernel/kheaders.md5
echo "$(md5sum $tarfile | cut -d ' ' -f1)" >> kernel/kheaders.md5
-rm -rf $cpio_dir
+rm -rf "${tmpdir}"
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index c18717189f32..04efa7a6e69b 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -147,6 +147,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
print_tainted(), init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
+ if (t->flags & PF_POSTCOREDUMP)
+ pr_err(" Blocked by coredump.\n");
pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
" disables this message.\n");
sched_show_task(t);
@@ -272,7 +274,7 @@ static int proc_dohung_task_timeout_secs(const struct ctl_table *table, int writ
* and hung_task_check_interval_secs
*/
static const unsigned long hung_task_timeout_max = (LONG_MAX / HZ);
-static struct ctl_table hung_task_sysctls[] = {
+static const struct ctl_table hung_task_sysctls[] = {
#ifdef CONFIG_SMP
{
.procname = "hung_task_all_cpu_backtrace",
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 2f4fb336dda1..73f7e1fd4ab4 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -147,7 +147,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
if (!irq_work_claim(work))
return false;
- kasan_record_aux_stack_noalloc(work);
+ kasan_record_aux_stack(work);
preempt_disable();
if (cpu != smp_processor_id()) {
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index c0caa14880c3..c0bdc1686154 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -925,7 +925,7 @@ static int kexec_limit_handler(const struct ctl_table *table, int write,
return proc_dointvec(&tmp, write, buffer, lenp, ppos);
}
-static struct ctl_table kexec_core_sysctls[] = {
+static const struct ctl_table kexec_core_sysctls[] = {
{
.procname = "kexec_load_disabled",
.data = &kexec_load_disabled,
@@ -1001,6 +1001,12 @@ int kernel_kexec(void)
#ifdef CONFIG_KEXEC_JUMP
if (kexec_image->preserve_context) {
+ /*
+ * This flow is analogous to hibernation flows that occur
+ * before creating an image and before jumping from the
+ * restore kernel to the image one, so it uses the same
+ * device callbacks as those two flows.
+ */
pm_prepare_console();
error = freeze_processes();
if (error) {
@@ -1011,12 +1017,10 @@ int kernel_kexec(void)
error = dpm_suspend_start(PMSG_FREEZE);
if (error)
goto Resume_console;
- /* At this point, dpm_suspend_start() has been called,
- * but *not* dpm_suspend_end(). We *must* call
- * dpm_suspend_end() now. Otherwise, drivers for
- * some devices (e.g. interrupt controllers) become
- * desynchronized with the actual state of the
- * hardware at resume time, and evil weirdness ensues.
+ /*
+ * dpm_suspend_end() must be called after dpm_suspend_start()
+ * to complete the transition, like in the hibernation flows
+ * mentioned above.
*/
error = dpm_suspend_end(PMSG_FREEZE);
if (error)
@@ -1052,6 +1056,13 @@ int kernel_kexec(void)
#ifdef CONFIG_KEXEC_JUMP
if (kexec_image->preserve_context) {
+ /*
+ * This flow is analogous to hibernation flows that occur after
+ * creating an image and after the image kernel has got control
+ * back, and in case the devices have been reset or otherwise
+ * manipulated in the meantime, it uses the device callbacks
+ * used by the latter.
+ */
syscore_resume();
Enable_irqs:
local_irq_enable();
diff --git a/kernel/kheaders.c b/kernel/kheaders.c
index 42163c9e94e5..378088b07f46 100644
--- a/kernel/kheaders.c
+++ b/kernel/kheaders.c
@@ -29,25 +29,12 @@ asm (
extern char kernel_headers_data[];
extern char kernel_headers_data_end[];
-static ssize_t
-ikheaders_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t len)
-{
- memcpy(buf, &kernel_headers_data[off], len);
- return len;
-}
-
-static struct bin_attribute kheaders_attr __ro_after_init = {
- .attr = {
- .name = "kheaders.tar.xz",
- .mode = 0444,
- },
- .read = &ikheaders_read,
-};
+static struct bin_attribute kheaders_attr __ro_after_init =
+ __BIN_ATTR_SIMPLE_RO(kheaders.tar.xz, 0444);
static int __init ikheaders_init(void)
{
+ kheaders_attr.private = kernel_headers_data;
kheaders_attr.size = (kernel_headers_data_end -
kernel_headers_data);
return sysfs_create_bin_file(kernel_kobj, &kheaders_attr);
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index b027a4030976..88aeac84e4c0 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -39,6 +39,7 @@
#include <linux/static_call.h>
#include <linux/perf_event.h>
#include <linux/execmem.h>
+#include <linux/cleanup.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>
@@ -140,45 +141,39 @@ static int collect_garbage_slots(struct kprobe_insn_cache *c);
kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
{
struct kprobe_insn_page *kip;
- kprobe_opcode_t *slot = NULL;
/* Since the slot array is not protected by rcu, we need a mutex */
- mutex_lock(&c->mutex);
- retry:
- rcu_read_lock();
- list_for_each_entry_rcu(kip, &c->pages, list) {
- if (kip->nused < slots_per_page(c)) {
- int i;
-
- for (i = 0; i < slots_per_page(c); i++) {
- if (kip->slot_used[i] == SLOT_CLEAN) {
- kip->slot_used[i] = SLOT_USED;
- kip->nused++;
- slot = kip->insns + (i * c->insn_size);
- rcu_read_unlock();
- goto out;
+ guard(mutex)(&c->mutex);
+ do {
+ guard(rcu)();
+ list_for_each_entry_rcu(kip, &c->pages, list) {
+ if (kip->nused < slots_per_page(c)) {
+ int i;
+
+ for (i = 0; i < slots_per_page(c); i++) {
+ if (kip->slot_used[i] == SLOT_CLEAN) {
+ kip->slot_used[i] = SLOT_USED;
+ kip->nused++;
+ return kip->insns + (i * c->insn_size);
+ }
}
+ /* kip->nused is broken. Fix it. */
+ kip->nused = slots_per_page(c);
+ WARN_ON(1);
}
- /* kip->nused is broken. Fix it. */
- kip->nused = slots_per_page(c);
- WARN_ON(1);
}
- }
- rcu_read_unlock();
-
/* If there are any garbage slots, collect it and try again. */
- if (c->nr_garbage && collect_garbage_slots(c) == 0)
- goto retry;
+ } while (c->nr_garbage && collect_garbage_slots(c) == 0);
/* All out of space. Need to allocate a new page. */
kip = kmalloc(struct_size(kip, slot_used, slots_per_page(c)), GFP_KERNEL);
if (!kip)
- goto out;
+ return NULL;
kip->insns = c->alloc();
if (!kip->insns) {
kfree(kip);
- goto out;
+ return NULL;
}
INIT_LIST_HEAD(&kip->list);
memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
@@ -187,14 +182,12 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
kip->ngarbage = 0;
kip->cache = c;
list_add_rcu(&kip->list, &c->pages);
- slot = kip->insns;
/* Record the perf ksymbol register event after adding the page */
perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns,
PAGE_SIZE, false, c->sym);
-out:
- mutex_unlock(&c->mutex);
- return slot;
+
+ return kip->insns;
}
/* Return true if all garbages are collected, otherwise false. */
@@ -249,25 +242,35 @@ static int collect_garbage_slots(struct kprobe_insn_cache *c)
return 0;
}
-void __free_insn_slot(struct kprobe_insn_cache *c,
- kprobe_opcode_t *slot, int dirty)
+static long __find_insn_page(struct kprobe_insn_cache *c,
+ kprobe_opcode_t *slot, struct kprobe_insn_page **pkip)
{
- struct kprobe_insn_page *kip;
+ struct kprobe_insn_page *kip = NULL;
long idx;
- mutex_lock(&c->mutex);
- rcu_read_lock();
+ guard(rcu)();
list_for_each_entry_rcu(kip, &c->pages, list) {
idx = ((long)slot - (long)kip->insns) /
(c->insn_size * sizeof(kprobe_opcode_t));
- if (idx >= 0 && idx < slots_per_page(c))
- goto out;
+ if (idx >= 0 && idx < slots_per_page(c)) {
+ *pkip = kip;
+ return idx;
+ }
}
/* Could not find this slot. */
WARN_ON(1);
- kip = NULL;
-out:
- rcu_read_unlock();
+ *pkip = NULL;
+ return -1;
+}
+
+void __free_insn_slot(struct kprobe_insn_cache *c,
+ kprobe_opcode_t *slot, int dirty)
+{
+ struct kprobe_insn_page *kip = NULL;
+ long idx;
+
+ guard(mutex)(&c->mutex);
+ idx = __find_insn_page(c, slot, &kip);
/* Mark and sweep: this may sleep */
if (kip) {
/* Check double free */
@@ -281,7 +284,6 @@ out:
collect_one_slot(kip, idx);
}
}
- mutex_unlock(&c->mutex);
}
/*
@@ -600,47 +602,43 @@ static void kick_kprobe_optimizer(void)
/* Kprobe jump optimizer */
static void kprobe_optimizer(struct work_struct *work)
{
- mutex_lock(&kprobe_mutex);
- cpus_read_lock();
- mutex_lock(&text_mutex);
+ guard(mutex)(&kprobe_mutex);
- /*
- * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
- * kprobes before waiting for quiesence period.
- */
- do_unoptimize_kprobes();
+ scoped_guard(cpus_read_lock) {
+ guard(mutex)(&text_mutex);
- /*
- * Step 2: Wait for quiesence period to ensure all potentially
- * preempted tasks to have normally scheduled. Because optprobe
- * may modify multiple instructions, there is a chance that Nth
- * instruction is preempted. In that case, such tasks can return
- * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
- * Note that on non-preemptive kernel, this is transparently converted
- * to synchronoze_sched() to wait for all interrupts to have completed.
- */
- synchronize_rcu_tasks();
+ /*
+ * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
+ * kprobes before waiting for quiesence period.
+ */
+ do_unoptimize_kprobes();
- /* Step 3: Optimize kprobes after quiesence period */
- do_optimize_kprobes();
+ /*
+ * Step 2: Wait for quiesence period to ensure all potentially
+ * preempted tasks to have normally scheduled. Because optprobe
+ * may modify multiple instructions, there is a chance that Nth
+ * instruction is preempted. In that case, such tasks can return
+ * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
+ * Note that on non-preemptive kernel, this is transparently converted
+ * to synchronoze_sched() to wait for all interrupts to have completed.
+ */
+ synchronize_rcu_tasks();
- /* Step 4: Free cleaned kprobes after quiesence period */
- do_free_cleaned_kprobes();
+ /* Step 3: Optimize kprobes after quiesence period */
+ do_optimize_kprobes();
- mutex_unlock(&text_mutex);
- cpus_read_unlock();
+ /* Step 4: Free cleaned kprobes after quiesence period */
+ do_free_cleaned_kprobes();
+ }
/* Step 5: Kick optimizer again if needed */
if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
kick_kprobe_optimizer();
-
- mutex_unlock(&kprobe_mutex);
}
-/* Wait for completing optimization and unoptimization */
-void wait_for_kprobe_optimizer(void)
+static void wait_for_kprobe_optimizer_locked(void)
{
- mutex_lock(&kprobe_mutex);
+ lockdep_assert_held(&kprobe_mutex);
while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
mutex_unlock(&kprobe_mutex);
@@ -652,8 +650,14 @@ void wait_for_kprobe_optimizer(void)
mutex_lock(&kprobe_mutex);
}
+}
- mutex_unlock(&kprobe_mutex);
+/* Wait for completing optimization and unoptimization */
+void wait_for_kprobe_optimizer(void)
+{
+ guard(mutex)(&kprobe_mutex);
+
+ wait_for_kprobe_optimizer_locked();
}
bool optprobe_queued_unopt(struct optimized_kprobe *op)
@@ -852,29 +856,24 @@ static void try_to_optimize_kprobe(struct kprobe *p)
return;
/* For preparing optimization, jump_label_text_reserved() is called. */
- cpus_read_lock();
- jump_label_lock();
- mutex_lock(&text_mutex);
+ guard(cpus_read_lock)();
+ guard(jump_label_lock)();
+ guard(mutex)(&text_mutex);
ap = alloc_aggr_kprobe(p);
if (!ap)
- goto out;
+ return;
op = container_of(ap, struct optimized_kprobe, kp);
if (!arch_prepared_optinsn(&op->optinsn)) {
/* If failed to setup optimizing, fallback to kprobe. */
arch_remove_optimized_kprobe(op);
kfree(op);
- goto out;
+ return;
}
init_aggr_kprobe(ap, p);
optimize_kprobe(ap); /* This just kicks optimizer thread. */
-
-out:
- mutex_unlock(&text_mutex);
- jump_label_unlock();
- cpus_read_unlock();
}
static void optimize_all_kprobes(void)
@@ -883,10 +882,10 @@ static void optimize_all_kprobes(void)
struct kprobe *p;
unsigned int i;
- mutex_lock(&kprobe_mutex);
+ guard(mutex)(&kprobe_mutex);
/* If optimization is already allowed, just return. */
if (kprobes_allow_optimization)
- goto out;
+ return;
cpus_read_lock();
kprobes_allow_optimization = true;
@@ -898,8 +897,6 @@ static void optimize_all_kprobes(void)
}
cpus_read_unlock();
pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n");
-out:
- mutex_unlock(&kprobe_mutex);
}
#ifdef CONFIG_SYSCTL
@@ -909,12 +906,10 @@ static void unoptimize_all_kprobes(void)
struct kprobe *p;
unsigned int i;
- mutex_lock(&kprobe_mutex);
+ guard(mutex)(&kprobe_mutex);
/* If optimization is already prohibited, just return. */
- if (!kprobes_allow_optimization) {
- mutex_unlock(&kprobe_mutex);
+ if (!kprobes_allow_optimization)
return;
- }
cpus_read_lock();
kprobes_allow_optimization = false;
@@ -926,10 +921,8 @@ static void unoptimize_all_kprobes(void)
}
}
cpus_read_unlock();
- mutex_unlock(&kprobe_mutex);
-
/* Wait for unoptimizing completion. */
- wait_for_kprobe_optimizer();
+ wait_for_kprobe_optimizer_locked();
pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n");
}
@@ -941,7 +934,7 @@ static int proc_kprobes_optimization_handler(const struct ctl_table *table,
{
int ret;
- mutex_lock(&kprobe_sysctl_mutex);
+ guard(mutex)(&kprobe_sysctl_mutex);
sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
@@ -949,12 +942,11 @@ static int proc_kprobes_optimization_handler(const struct ctl_table *table,
optimize_all_kprobes();
else
unoptimize_all_kprobes();
- mutex_unlock(&kprobe_sysctl_mutex);
return ret;
}
-static struct ctl_table kprobe_sysctls[] = {
+static const struct ctl_table kprobe_sysctls[] = {
{
.procname = "kprobes-optimization",
.data = &sysctl_kprobes_optimization,
@@ -1024,7 +1016,8 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt)
#define __arm_kprobe(p) arch_arm_kprobe(p)
#define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
#define kprobe_disarmed(p) kprobe_disabled(p)
-#define wait_for_kprobe_optimizer() do {} while (0)
+#define wait_for_kprobe_optimizer_locked() \
+ lockdep_assert_held(&kprobe_mutex)
static int reuse_unused_kprobe(struct kprobe *ap)
{
@@ -1078,20 +1071,18 @@ static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
if (*cnt == 0) {
ret = register_ftrace_function(ops);
- if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret))
- goto err_ftrace;
+ if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret)) {
+ /*
+ * At this point, sinec ops is not registered, we should be sefe from
+ * registering empty filter.
+ */
+ ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
+ return ret;
+ }
}
(*cnt)++;
return ret;
-
-err_ftrace:
- /*
- * At this point, sinec ops is not registered, we should be sefe from
- * registering empty filter.
- */
- ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
- return ret;
}
static int arm_kprobe_ftrace(struct kprobe *p)
@@ -1163,12 +1154,9 @@ static int arm_kprobe(struct kprobe *kp)
if (unlikely(kprobe_ftrace(kp)))
return arm_kprobe_ftrace(kp);
- cpus_read_lock();
- mutex_lock(&text_mutex);
+ guard(cpus_read_lock)();
+ guard(mutex)(&text_mutex);
__arm_kprobe(kp);
- mutex_unlock(&text_mutex);
- cpus_read_unlock();
-
return 0;
}
@@ -1177,12 +1165,9 @@ static int disarm_kprobe(struct kprobe *kp, bool reopt)
if (unlikely(kprobe_ftrace(kp)))
return disarm_kprobe_ftrace(kp);
- cpus_read_lock();
- mutex_lock(&text_mutex);
+ guard(cpus_read_lock)();
+ guard(mutex)(&text_mutex);
__disarm_kprobe(kp, reopt);
- mutex_unlock(&text_mutex);
- cpus_read_unlock();
-
return 0;
}
@@ -1299,62 +1284,55 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
int ret = 0;
struct kprobe *ap = orig_p;
- cpus_read_lock();
-
- /* For preparing optimization, jump_label_text_reserved() is called */
- jump_label_lock();
- mutex_lock(&text_mutex);
-
- if (!kprobe_aggrprobe(orig_p)) {
- /* If 'orig_p' is not an 'aggr_kprobe', create new one. */
- ap = alloc_aggr_kprobe(orig_p);
- if (!ap) {
- ret = -ENOMEM;
- goto out;
+ scoped_guard(cpus_read_lock) {
+ /* For preparing optimization, jump_label_text_reserved() is called */
+ guard(jump_label_lock)();
+ guard(mutex)(&text_mutex);
+
+ if (!kprobe_aggrprobe(orig_p)) {
+ /* If 'orig_p' is not an 'aggr_kprobe', create new one. */
+ ap = alloc_aggr_kprobe(orig_p);
+ if (!ap)
+ return -ENOMEM;
+ init_aggr_kprobe(ap, orig_p);
+ } else if (kprobe_unused(ap)) {
+ /* This probe is going to die. Rescue it */
+ ret = reuse_unused_kprobe(ap);
+ if (ret)
+ return ret;
}
- init_aggr_kprobe(ap, orig_p);
- } else if (kprobe_unused(ap)) {
- /* This probe is going to die. Rescue it */
- ret = reuse_unused_kprobe(ap);
- if (ret)
- goto out;
- }
- if (kprobe_gone(ap)) {
- /*
- * Attempting to insert new probe at the same location that
- * had a probe in the module vaddr area which already
- * freed. So, the instruction slot has already been
- * released. We need a new slot for the new probe.
- */
- ret = arch_prepare_kprobe(ap);
- if (ret)
+ if (kprobe_gone(ap)) {
/*
- * Even if fail to allocate new slot, don't need to
- * free the 'ap'. It will be used next time, or
- * freed by unregister_kprobe().
+ * Attempting to insert new probe at the same location that
+ * had a probe in the module vaddr area which already
+ * freed. So, the instruction slot has already been
+ * released. We need a new slot for the new probe.
*/
- goto out;
-
- /* Prepare optimized instructions if possible. */
- prepare_optimized_kprobe(ap);
+ ret = arch_prepare_kprobe(ap);
+ if (ret)
+ /*
+ * Even if fail to allocate new slot, don't need to
+ * free the 'ap'. It will be used next time, or
+ * freed by unregister_kprobe().
+ */
+ return ret;
- /*
- * Clear gone flag to prevent allocating new slot again, and
- * set disabled flag because it is not armed yet.
- */
- ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
- | KPROBE_FLAG_DISABLED;
- }
+ /* Prepare optimized instructions if possible. */
+ prepare_optimized_kprobe(ap);
- /* Copy the insn slot of 'p' to 'ap'. */
- copy_kprobe(ap, p);
- ret = add_new_kprobe(ap, p);
+ /*
+ * Clear gone flag to prevent allocating new slot again, and
+ * set disabled flag because it is not armed yet.
+ */
+ ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
+ | KPROBE_FLAG_DISABLED;
+ }
-out:
- mutex_unlock(&text_mutex);
- jump_label_unlock();
- cpus_read_unlock();
+ /* Copy the insn slot of 'p' to 'ap'. */
+ copy_kprobe(ap, p);
+ ret = add_new_kprobe(ap, p);
+ }
if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
ap->flags &= ~KPROBE_FLAG_DISABLED;
@@ -1448,7 +1426,7 @@ _kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name,
unsigned long offset, bool *on_func_entry)
{
if ((symbol_name && addr) || (!symbol_name && !addr))
- goto invalid;
+ return ERR_PTR(-EINVAL);
if (symbol_name) {
/*
@@ -1478,16 +1456,16 @@ _kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name,
* at the start of the function.
*/
addr = arch_adjust_kprobe_addr((unsigned long)addr, offset, on_func_entry);
- if (addr)
- return addr;
+ if (!addr)
+ return ERR_PTR(-EINVAL);
-invalid:
- return ERR_PTR(-EINVAL);
+ return addr;
}
static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
{
bool on_func_entry;
+
return _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
}
@@ -1505,15 +1483,15 @@ static struct kprobe *__get_valid_kprobe(struct kprobe *p)
if (unlikely(!ap))
return NULL;
- if (p != ap) {
- list_for_each_entry(list_p, &ap->list, list)
- if (list_p == p)
- /* kprobe p is a valid probe */
- goto valid;
- return NULL;
- }
-valid:
- return ap;
+ if (p == ap)
+ return ap;
+
+ list_for_each_entry(list_p, &ap->list, list)
+ if (list_p == p)
+ /* kprobe p is a valid probe */
+ return ap;
+
+ return NULL;
}
/*
@@ -1522,14 +1500,12 @@ valid:
*/
static inline int warn_kprobe_rereg(struct kprobe *p)
{
- int ret = 0;
+ guard(mutex)(&kprobe_mutex);
- mutex_lock(&kprobe_mutex);
if (WARN_ON_ONCE(__get_valid_kprobe(p)))
- ret = -EINVAL;
- mutex_unlock(&kprobe_mutex);
+ return -EINVAL;
- return ret;
+ return 0;
}
static int check_ftrace_location(struct kprobe *p)
@@ -1565,17 +1541,23 @@ static int check_kprobe_address_safe(struct kprobe *p,
ret = check_ftrace_location(p);
if (ret)
return ret;
- jump_label_lock();
- preempt_disable();
+
+ guard(jump_label_lock)();
/* Ensure the address is in a text area, and find a module if exists. */
*probed_mod = NULL;
if (!core_kernel_text((unsigned long) p->addr)) {
+ guard(preempt)();
*probed_mod = __module_text_address((unsigned long) p->addr);
- if (!(*probed_mod)) {
- ret = -EINVAL;
- goto out;
- }
+ if (!(*probed_mod))
+ return -EINVAL;
+
+ /*
+ * We must hold a refcount of the probed module while updating
+ * its code to prohibit unexpected unloading.
+ */
+ if (unlikely(!try_module_get(*probed_mod)))
+ return -ENOENT;
}
/* Ensure it is not in reserved area. */
if (in_gate_area_no_mm((unsigned long) p->addr) ||
@@ -1584,49 +1566,71 @@ static int check_kprobe_address_safe(struct kprobe *p,
static_call_text_reserved(p->addr, p->addr) ||
find_bug((unsigned long)p->addr) ||
is_cfi_preamble_symbol((unsigned long)p->addr)) {
- ret = -EINVAL;
- goto out;
+ module_put(*probed_mod);
+ return -EINVAL;
}
/* Get module refcount and reject __init functions for loaded modules. */
if (IS_ENABLED(CONFIG_MODULES) && *probed_mod) {
/*
- * We must hold a refcount of the probed module while updating
- * its code to prohibit unexpected unloading.
- */
- if (unlikely(!try_module_get(*probed_mod))) {
- ret = -ENOENT;
- goto out;
- }
-
- /*
* If the module freed '.init.text', we couldn't insert
* kprobes in there.
*/
if (within_module_init((unsigned long)p->addr, *probed_mod) &&
!module_is_coming(*probed_mod)) {
module_put(*probed_mod);
- *probed_mod = NULL;
- ret = -ENOENT;
+ return -ENOENT;
}
}
-out:
- preempt_enable();
- jump_label_unlock();
+ return 0;
+}
- return ret;
+static int __register_kprobe(struct kprobe *p)
+{
+ int ret;
+ struct kprobe *old_p;
+
+ guard(mutex)(&kprobe_mutex);
+
+ old_p = get_kprobe(p->addr);
+ if (old_p)
+ /* Since this may unoptimize 'old_p', locking 'text_mutex'. */
+ return register_aggr_kprobe(old_p, p);
+
+ scoped_guard(cpus_read_lock) {
+ /* Prevent text modification */
+ guard(mutex)(&text_mutex);
+ ret = prepare_kprobe(p);
+ if (ret)
+ return ret;
+ }
+
+ INIT_HLIST_NODE(&p->hlist);
+ hlist_add_head_rcu(&p->hlist,
+ &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
+
+ if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
+ ret = arm_kprobe(p);
+ if (ret) {
+ hlist_del_rcu(&p->hlist);
+ synchronize_rcu();
+ }
+ }
+
+ /* Try to optimize kprobe */
+ try_to_optimize_kprobe(p);
+ return 0;
}
int register_kprobe(struct kprobe *p)
{
int ret;
- struct kprobe *old_p;
struct module *probed_mod;
kprobe_opcode_t *addr;
bool on_func_entry;
- /* Adjust probe address from symbol */
+ /* Canonicalize probe address from symbol */
addr = _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
if (IS_ERR(addr))
return PTR_ERR(addr);
@@ -1638,6 +1642,8 @@ int register_kprobe(struct kprobe *p)
/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
p->flags &= KPROBE_FLAG_DISABLED;
+ if (on_func_entry)
+ p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY;
p->nmissed = 0;
INIT_LIST_HEAD(&p->list);
@@ -1645,44 +1651,7 @@ int register_kprobe(struct kprobe *p)
if (ret)
return ret;
- mutex_lock(&kprobe_mutex);
-
- if (on_func_entry)
- p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY;
-
- old_p = get_kprobe(p->addr);
- if (old_p) {
- /* Since this may unoptimize 'old_p', locking 'text_mutex'. */
- ret = register_aggr_kprobe(old_p, p);
- goto out;
- }
-
- cpus_read_lock();
- /* Prevent text modification */
- mutex_lock(&text_mutex);
- ret = prepare_kprobe(p);
- mutex_unlock(&text_mutex);
- cpus_read_unlock();
- if (ret)
- goto out;
-
- INIT_HLIST_NODE(&p->hlist);
- hlist_add_head_rcu(&p->hlist,
- &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
-
- if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
- ret = arm_kprobe(p);
- if (ret) {
- hlist_del_rcu(&p->hlist);
- synchronize_rcu();
- goto out;
- }
- }
-
- /* Try to optimize kprobe */
- try_to_optimize_kprobe(p);
-out:
- mutex_unlock(&kprobe_mutex);
+ ret = __register_kprobe(p);
if (probed_mod)
module_put(probed_mod);
@@ -1761,29 +1730,31 @@ static int __unregister_kprobe_top(struct kprobe *p)
if (IS_ERR(ap))
return PTR_ERR(ap);
- if (ap == p)
- /*
- * This probe is an independent(and non-optimized) kprobe
- * (not an aggrprobe). Remove from the hash list.
- */
- goto disarmed;
+ WARN_ON(ap != p && !kprobe_aggrprobe(ap));
- /* Following process expects this probe is an aggrprobe */
- WARN_ON(!kprobe_aggrprobe(ap));
-
- if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
+ /*
+ * If the probe is an independent(and non-optimized) kprobe
+ * (not an aggrprobe), the last kprobe on the aggrprobe, or
+ * kprobe is already disarmed, just remove from the hash list.
+ */
+ if (ap == p ||
+ (list_is_singular(&ap->list) && kprobe_disarmed(ap))) {
/*
* !disarmed could be happen if the probe is under delayed
* unoptimizing.
*/
- goto disarmed;
- else {
- /* If disabling probe has special handlers, update aggrprobe */
- if (p->post_handler && !kprobe_gone(p)) {
- list_for_each_entry(list_p, &ap->list, list) {
- if ((list_p != p) && (list_p->post_handler))
- goto noclean;
- }
+ hlist_del_rcu(&ap->hlist);
+ return 0;
+ }
+
+ /* If disabling probe has special handlers, update aggrprobe */
+ if (p->post_handler && !kprobe_gone(p)) {
+ list_for_each_entry(list_p, &ap->list, list) {
+ if ((list_p != p) && (list_p->post_handler))
+ break;
+ }
+ /* No other probe has post_handler */
+ if (list_entry_is_head(list_p, &ap->list, list)) {
/*
* For the kprobe-on-ftrace case, we keep the
* post_handler setting to identify this aggrprobe
@@ -1792,24 +1763,21 @@ static int __unregister_kprobe_top(struct kprobe *p)
if (!kprobe_ftrace(ap))
ap->post_handler = NULL;
}
-noclean:
+ }
+
+ /*
+ * Remove from the aggrprobe: this path will do nothing in
+ * __unregister_kprobe_bottom().
+ */
+ list_del_rcu(&p->list);
+ if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
/*
- * Remove from the aggrprobe: this path will do nothing in
- * __unregister_kprobe_bottom().
+ * Try to optimize this probe again, because post
+ * handler may have been changed.
*/
- list_del_rcu(&p->list);
- if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
- /*
- * Try to optimize this probe again, because post
- * handler may have been changed.
- */
- optimize_kprobe(ap);
- }
+ optimize_kprobe(ap);
return 0;
-disarmed:
- hlist_del_rcu(&ap->hlist);
- return 0;
}
static void __unregister_kprobe_bottom(struct kprobe *p)
@@ -1858,12 +1826,11 @@ void unregister_kprobes(struct kprobe **kps, int num)
if (num <= 0)
return;
- mutex_lock(&kprobe_mutex);
- for (i = 0; i < num; i++)
- if (__unregister_kprobe_top(kps[i]) < 0)
- kps[i]->addr = NULL;
- mutex_unlock(&kprobe_mutex);
-
+ scoped_guard(mutex, &kprobe_mutex) {
+ for (i = 0; i < num; i++)
+ if (__unregister_kprobe_top(kps[i]) < 0)
+ kps[i]->addr = NULL;
+ }
synchronize_rcu();
for (i = 0; i < num; i++)
if (kps[i]->addr)
@@ -2302,8 +2269,9 @@ void unregister_kretprobes(struct kretprobe **rps, int num)
if (num <= 0)
return;
- mutex_lock(&kprobe_mutex);
for (i = 0; i < num; i++) {
+ guard(mutex)(&kprobe_mutex);
+
if (__unregister_kprobe_top(&rps[i]->kp) < 0)
rps[i]->kp.addr = NULL;
#ifdef CONFIG_KRETPROBE_ON_RETHOOK
@@ -2312,7 +2280,6 @@ void unregister_kretprobes(struct kretprobe **rps, int num)
rcu_assign_pointer(rps[i]->rph->rp, NULL);
#endif
}
- mutex_unlock(&kprobe_mutex);
synchronize_rcu();
for (i = 0; i < num; i++) {
@@ -2393,18 +2360,14 @@ static void kill_kprobe(struct kprobe *p)
/* Disable one kprobe */
int disable_kprobe(struct kprobe *kp)
{
- int ret = 0;
struct kprobe *p;
- mutex_lock(&kprobe_mutex);
+ guard(mutex)(&kprobe_mutex);
/* Disable this kprobe */
p = __disable_kprobe(kp);
- if (IS_ERR(p))
- ret = PTR_ERR(p);
- mutex_unlock(&kprobe_mutex);
- return ret;
+ return IS_ERR(p) ? PTR_ERR(p) : 0;
}
EXPORT_SYMBOL_GPL(disable_kprobe);
@@ -2414,20 +2377,16 @@ int enable_kprobe(struct kprobe *kp)
int ret = 0;
struct kprobe *p;
- mutex_lock(&kprobe_mutex);
+ guard(mutex)(&kprobe_mutex);
/* Check whether specified probe is valid. */
p = __get_valid_kprobe(kp);
- if (unlikely(p == NULL)) {
- ret = -EINVAL;
- goto out;
- }
+ if (unlikely(p == NULL))
+ return -EINVAL;
- if (kprobe_gone(kp)) {
+ if (kprobe_gone(kp))
/* This kprobe has gone, we couldn't enable it. */
- ret = -EINVAL;
- goto out;
- }
+ return -EINVAL;
if (p != kp)
kp->flags &= ~KPROBE_FLAG_DISABLED;
@@ -2441,8 +2400,6 @@ int enable_kprobe(struct kprobe *kp)
kp->flags |= KPROBE_FLAG_DISABLED;
}
}
-out:
- mutex_unlock(&kprobe_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(enable_kprobe);
@@ -2630,11 +2587,11 @@ static int kprobes_module_callback(struct notifier_block *nb,
unsigned int i;
int checkcore = (val == MODULE_STATE_GOING);
- if (val == MODULE_STATE_COMING) {
- mutex_lock(&kprobe_mutex);
+ guard(mutex)(&kprobe_mutex);
+
+ if (val == MODULE_STATE_COMING)
add_module_kprobe_blacklist(mod);
- mutex_unlock(&kprobe_mutex);
- }
+
if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
return NOTIFY_DONE;
@@ -2644,7 +2601,6 @@ static int kprobes_module_callback(struct notifier_block *nb,
* notified, only '.init.text' section would be freed. We need to
* disable kprobes which have been inserted in the sections.
*/
- mutex_lock(&kprobe_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry(p, head, hlist)
@@ -2667,7 +2623,6 @@ static int kprobes_module_callback(struct notifier_block *nb,
}
if (val == MODULE_STATE_GOING)
remove_module_kprobe_blacklist(mod);
- mutex_unlock(&kprobe_mutex);
return NOTIFY_DONE;
}
@@ -2695,7 +2650,7 @@ void kprobe_free_init_mem(void)
struct kprobe *p;
int i;
- mutex_lock(&kprobe_mutex);
+ guard(mutex)(&kprobe_mutex);
/* Kill all kprobes on initmem because the target code has been freed. */
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
@@ -2705,8 +2660,6 @@ void kprobe_free_init_mem(void)
kill_kprobe(p);
}
}
-
- mutex_unlock(&kprobe_mutex);
}
static int __init init_kprobes(void)
@@ -2902,11 +2855,11 @@ static int arm_all_kprobes(void)
unsigned int i, total = 0, errors = 0;
int err, ret = 0;
- mutex_lock(&kprobe_mutex);
+ guard(mutex)(&kprobe_mutex);
/* If kprobes are armed, just return */
if (!kprobes_all_disarmed)
- goto already_enabled;
+ return 0;
/*
* optimize_kprobe() called by arm_kprobe() checks
@@ -2936,8 +2889,6 @@ static int arm_all_kprobes(void)
else
pr_info("Kprobes globally enabled\n");
-already_enabled:
- mutex_unlock(&kprobe_mutex);
return ret;
}
@@ -2948,13 +2899,11 @@ static int disarm_all_kprobes(void)
unsigned int i, total = 0, errors = 0;
int err, ret = 0;
- mutex_lock(&kprobe_mutex);
+ guard(mutex)(&kprobe_mutex);
/* If kprobes are already disarmed, just return */
- if (kprobes_all_disarmed) {
- mutex_unlock(&kprobe_mutex);
+ if (kprobes_all_disarmed)
return 0;
- }
kprobes_all_disarmed = true;
@@ -2979,11 +2928,8 @@ static int disarm_all_kprobes(void)
else
pr_info("Kprobes globally disabled\n");
- mutex_unlock(&kprobe_mutex);
-
/* Wait for disarming all kprobes by optimizer */
- wait_for_kprobe_optimizer();
-
+ wait_for_kprobe_optimizer_locked();
return ret;
}
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index 1bab21b4718f..eefb67d9883c 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -239,21 +239,7 @@ extern const void __start_notes;
extern const void __stop_notes;
#define notes_size (&__stop_notes - &__start_notes)
-static ssize_t notes_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
-{
- memcpy(buf, &__start_notes + off, count);
- return count;
-}
-
-static struct bin_attribute notes_attr __ro_after_init = {
- .attr = {
- .name = "notes",
- .mode = S_IRUGO,
- },
- .read = &notes_read,
-};
+static __ro_after_init BIN_ATTR_SIMPLE_RO(notes);
struct kobject *kernel_kobj;
EXPORT_SYMBOL_GPL(kernel_kobj);
@@ -307,8 +293,9 @@ static int __init ksysfs_init(void)
goto kset_exit;
if (notes_size > 0) {
- notes_attr.size = notes_size;
- error = sysfs_create_bin_file(kernel_kobj, &notes_attr);
+ bin_attr_notes.private = (void *)&__start_notes;
+ bin_attr_notes.size = notes_size;
+ error = sysfs_create_bin_file(kernel_kobj, &bin_attr_notes);
if (error)
goto group_exit;
}
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 6a034c76b6e9..5dc5b0d7238e 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -859,7 +859,7 @@ int kthread_affine_preferred(struct task_struct *p, const struct cpumask *mask)
struct kthread *kthread = to_kthread(p);
cpumask_var_t affinity;
unsigned long flags;
- int ret;
+ int ret = 0;
if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE) || kthread->started) {
WARN_ON(1);
@@ -892,7 +892,7 @@ int kthread_affine_preferred(struct task_struct *p, const struct cpumask *mask)
out:
free_cpumask_var(affinity);
- return 0;
+ return ret;
}
/*
@@ -1175,7 +1175,7 @@ static void kthread_insert_work(struct kthread_worker *worker,
* @work: kthread_work to queue
*
* Queue @work to work processor @task for async execution. @task
- * must have been created with kthread_worker_create(). Returns %true
+ * must have been created with kthread_create_worker(). Returns %true
* if @work was successfully queued, %false if it was already pending.
*
* Reinitialize the work if it needs to be used by another worker.
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index 7a75eab9c179..d4281d1e13a6 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -77,7 +77,7 @@ static int sysctl_latencytop(const struct ctl_table *table, int write, void *buf
return err;
}
-static struct ctl_table latencytop_sysctl[] = {
+static const struct ctl_table latencytop_sysctl[] = {
{
.procname = "latencytop",
.data = &latencytop_enabled,
@@ -158,9 +158,9 @@ account_global_scheduler_latency(struct task_struct *tsk,
/**
* __account_scheduler_latency - record an occurred latency
- * @tsk - the task struct of the task hitting the latency
- * @usecs - the duration of the latency in microseconds
- * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
+ * @tsk: the task struct of the task hitting the latency
+ * @usecs: the duration of the latency in microseconds
+ * @inter: 1 if the sleep was interruptible, 0 if uninterruptible
*
* This function is the main entry point for recording latency entries
* as called by the scheduler.
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 29acd238dad7..4470680f0226 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -79,7 +79,7 @@ module_param(lock_stat, int, 0644);
#endif
#ifdef CONFIG_SYSCTL
-static struct ctl_table kern_lockdep_table[] = {
+static const struct ctl_table kern_lockdep_table[] = {
#ifdef CONFIG_PROVE_LOCKING
{
.procname = "prove_locking",
diff --git a/kernel/module/Kconfig b/kernel/module/Kconfig
index 7b329057997a..d7762ef5949a 100644
--- a/kernel/module/Kconfig
+++ b/kernel/module/Kconfig
@@ -169,6 +169,36 @@ config MODVERSIONS
make them incompatible with the kernel you are running. If
unsure, say N.
+choice
+ prompt "Module versioning implementation"
+ depends on MODVERSIONS
+ help
+ Select the tool used to calculate symbol versions for modules.
+
+ If unsure, select GENKSYMS.
+
+config GENKSYMS
+ bool "genksyms (from source code)"
+ help
+ Calculate symbol versions from pre-processed source code using
+ genksyms.
+
+ If unsure, say Y.
+
+config GENDWARFKSYMS
+ bool "gendwarfksyms (from debugging information)"
+ depends on DEBUG_INFO
+ # Requires full debugging information, split DWARF not supported.
+ depends on !DEBUG_INFO_REDUCED && !DEBUG_INFO_SPLIT
+ # Requires ELF object files.
+ depends on !LTO
+ help
+ Calculate symbol versions from DWARF debugging information using
+ gendwarfksyms. Requires DEBUG_INFO to be enabled.
+
+ If unsure, say N.
+endchoice
+
config ASM_MODVERSIONS
bool
default HAVE_ASM_MODVERSIONS && MODVERSIONS
@@ -177,6 +207,31 @@ config ASM_MODVERSIONS
assembly. This can be enabled only when the target architecture
supports it.
+config EXTENDED_MODVERSIONS
+ bool "Extended Module Versioning Support"
+ depends on MODVERSIONS
+ help
+ This enables extended MODVERSIONs support, allowing long symbol
+ names to be versioned.
+
+ The most likely reason you would enable this is to enable Rust
+ support. If unsure, say N.
+
+config BASIC_MODVERSIONS
+ bool "Basic Module Versioning Support"
+ depends on MODVERSIONS
+ default y
+ help
+ This enables basic MODVERSIONS support, allowing older tools or
+ kernels to potentially load modules.
+
+ Disabling this may cause older `modprobe` or `kmod` to be unable
+ to read MODVERSIONS information from built modules. With this
+ disabled, older kernels may treat this module as unversioned.
+
+ This is enabled by default when MODVERSIONS are enabled.
+ If unsure, say Y.
+
config MODULE_SRCVERSION_ALL
bool "Source checksum for all modules"
help
@@ -231,6 +286,7 @@ comment "Do not forget to sign required modules with scripts/sign-file"
choice
prompt "Hash algorithm to sign modules"
depends on MODULE_SIG || IMA_APPRAISE_MODSIG
+ default MODULE_SIG_SHA512
help
This determines which sort of hashing algorithm will be used during
signature generation. This algorithm _must_ be built into the kernel
diff --git a/kernel/module/internal.h b/kernel/module/internal.h
index daef2be83902..d09b46ef032f 100644
--- a/kernel/module/internal.h
+++ b/kernel/module/internal.h
@@ -47,16 +47,16 @@ struct kernel_symbol {
extern struct mutex module_mutex;
extern struct list_head modules;
-extern struct module_attribute *modinfo_attrs[];
-extern size_t modinfo_attrs_count;
+extern const struct module_attribute *const modinfo_attrs[];
+extern const size_t modinfo_attrs_count;
/* Provided by the linker */
extern const struct kernel_symbol __start___ksymtab[];
extern const struct kernel_symbol __stop___ksymtab[];
extern const struct kernel_symbol __start___ksymtab_gpl[];
extern const struct kernel_symbol __stop___ksymtab_gpl[];
-extern const s32 __start___kcrctab[];
-extern const s32 __start___kcrctab_gpl[];
+extern const u32 __start___kcrctab[];
+extern const u32 __start___kcrctab_gpl[];
struct load_info {
const char *name;
@@ -86,6 +86,8 @@ struct load_info {
unsigned int vers;
unsigned int info;
unsigned int pcpu;
+ unsigned int vers_ext_crc;
+ unsigned int vers_ext_name;
} index;
};
@@ -102,7 +104,7 @@ struct find_symbol_arg {
/* Output */
struct module *owner;
- const s32 *crc;
+ const u32 *crc;
const struct kernel_symbol *sym;
enum mod_license license;
};
@@ -327,7 +329,8 @@ static inline struct module *mod_find(unsigned long addr, struct mod_tree_root *
}
#endif /* CONFIG_MODULES_TREE_LOOKUP */
-int module_enable_rodata_ro(const struct module *mod, bool after_init);
+int module_enable_rodata_ro(const struct module *mod);
+int module_enable_rodata_ro_after_init(const struct module *mod);
int module_enable_data_nx(const struct module *mod);
int module_enable_text_rox(const struct module *mod);
int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
@@ -384,16 +387,25 @@ static inline void init_param_lock(struct module *mod) { }
#ifdef CONFIG_MODVERSIONS
int check_version(const struct load_info *info,
- const char *symname, struct module *mod, const s32 *crc);
+ const char *symname, struct module *mod, const u32 *crc);
void module_layout(struct module *mod, struct modversion_info *ver, struct kernel_param *kp,
struct kernel_symbol *ks, struct tracepoint * const *tp);
int check_modstruct_version(const struct load_info *info, struct module *mod);
int same_magic(const char *amagic, const char *bmagic, bool has_crcs);
+struct modversion_info_ext {
+ size_t remaining;
+ const u32 *crc;
+ const char *name;
+};
+void modversion_ext_start(const struct load_info *info, struct modversion_info_ext *ver);
+void modversion_ext_advance(struct modversion_info_ext *ver);
+#define for_each_modversion_info_ext(ver, info) \
+ for (modversion_ext_start(info, &ver); ver.remaining > 0; modversion_ext_advance(&ver))
#else /* !CONFIG_MODVERSIONS */
static inline int check_version(const struct load_info *info,
const char *symname,
struct module *mod,
- const s32 *crc)
+ const u32 *crc)
{
return 1;
}
diff --git a/kernel/module/main.c b/kernel/module/main.c
index 5399c182b3cb..1fb9ad289a6f 100644
--- a/kernel/module/main.c
+++ b/kernel/module/main.c
@@ -86,7 +86,7 @@ struct mod_tree_root mod_tree __cacheline_aligned = {
struct symsearch {
const struct kernel_symbol *start, *stop;
- const s32 *crcs;
+ const u32 *crcs;
enum mod_license license;
};
@@ -538,7 +538,7 @@ static void setup_modinfo_##field(struct module *mod, const char *s) \
{ \
mod->field = kstrdup(s, GFP_KERNEL); \
} \
-static ssize_t show_modinfo_##field(struct module_attribute *mattr, \
+static ssize_t show_modinfo_##field(const struct module_attribute *mattr, \
struct module_kobject *mk, char *buffer) \
{ \
return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \
@@ -552,7 +552,7 @@ static void free_modinfo_##field(struct module *mod) \
kfree(mod->field); \
mod->field = NULL; \
} \
-static struct module_attribute modinfo_##field = { \
+static const struct module_attribute modinfo_##field = { \
.attr = { .name = __stringify(field), .mode = 0444 }, \
.show = show_modinfo_##field, \
.setup = setup_modinfo_##field, \
@@ -842,13 +842,13 @@ void symbol_put_addr(void *addr)
}
EXPORT_SYMBOL_GPL(symbol_put_addr);
-static ssize_t show_refcnt(struct module_attribute *mattr,
+static ssize_t show_refcnt(const struct module_attribute *mattr,
struct module_kobject *mk, char *buffer)
{
return sprintf(buffer, "%i\n", module_refcount(mk->mod));
}
-static struct module_attribute modinfo_refcnt =
+static const struct module_attribute modinfo_refcnt =
__ATTR(refcnt, 0444, show_refcnt, NULL);
void __module_get(struct module *module)
@@ -917,7 +917,7 @@ size_t module_flags_taint(unsigned long taints, char *buf)
return l;
}
-static ssize_t show_initstate(struct module_attribute *mattr,
+static ssize_t show_initstate(const struct module_attribute *mattr,
struct module_kobject *mk, char *buffer)
{
const char *state = "unknown";
@@ -938,10 +938,10 @@ static ssize_t show_initstate(struct module_attribute *mattr,
return sprintf(buffer, "%s\n", state);
}
-static struct module_attribute modinfo_initstate =
+static const struct module_attribute modinfo_initstate =
__ATTR(initstate, 0444, show_initstate, NULL);
-static ssize_t store_uevent(struct module_attribute *mattr,
+static ssize_t store_uevent(const struct module_attribute *mattr,
struct module_kobject *mk,
const char *buffer, size_t count)
{
@@ -951,10 +951,10 @@ static ssize_t store_uevent(struct module_attribute *mattr,
return rc ? rc : count;
}
-struct module_attribute module_uevent =
+const struct module_attribute module_uevent =
__ATTR(uevent, 0200, NULL, store_uevent);
-static ssize_t show_coresize(struct module_attribute *mattr,
+static ssize_t show_coresize(const struct module_attribute *mattr,
struct module_kobject *mk, char *buffer)
{
unsigned int size = mk->mod->mem[MOD_TEXT].size;
@@ -966,11 +966,11 @@ static ssize_t show_coresize(struct module_attribute *mattr,
return sprintf(buffer, "%u\n", size);
}
-static struct module_attribute modinfo_coresize =
+static const struct module_attribute modinfo_coresize =
__ATTR(coresize, 0444, show_coresize, NULL);
#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
-static ssize_t show_datasize(struct module_attribute *mattr,
+static ssize_t show_datasize(const struct module_attribute *mattr,
struct module_kobject *mk, char *buffer)
{
unsigned int size = 0;
@@ -980,11 +980,11 @@ static ssize_t show_datasize(struct module_attribute *mattr,
return sprintf(buffer, "%u\n", size);
}
-static struct module_attribute modinfo_datasize =
+static const struct module_attribute modinfo_datasize =
__ATTR(datasize, 0444, show_datasize, NULL);
#endif
-static ssize_t show_initsize(struct module_attribute *mattr,
+static ssize_t show_initsize(const struct module_attribute *mattr,
struct module_kobject *mk, char *buffer)
{
unsigned int size = 0;
@@ -994,10 +994,10 @@ static ssize_t show_initsize(struct module_attribute *mattr,
return sprintf(buffer, "%u\n", size);
}
-static struct module_attribute modinfo_initsize =
+static const struct module_attribute modinfo_initsize =
__ATTR(initsize, 0444, show_initsize, NULL);
-static ssize_t show_taint(struct module_attribute *mattr,
+static ssize_t show_taint(const struct module_attribute *mattr,
struct module_kobject *mk, char *buffer)
{
size_t l;
@@ -1007,10 +1007,10 @@ static ssize_t show_taint(struct module_attribute *mattr,
return l;
}
-static struct module_attribute modinfo_taint =
+static const struct module_attribute modinfo_taint =
__ATTR(taint, 0444, show_taint, NULL);
-struct module_attribute *modinfo_attrs[] = {
+const struct module_attribute *const modinfo_attrs[] = {
&module_uevent,
&modinfo_version,
&modinfo_srcversion,
@@ -1027,7 +1027,7 @@ struct module_attribute *modinfo_attrs[] = {
NULL,
};
-size_t modinfo_attrs_count = ARRAY_SIZE(modinfo_attrs);
+const size_t modinfo_attrs_count = ARRAY_SIZE(modinfo_attrs);
static const char vermagic[] = VERMAGIC_STRING;
@@ -1681,7 +1681,7 @@ static void module_license_taint_check(struct module *mod, const char *license)
static void setup_modinfo(struct module *mod, struct load_info *info)
{
- struct module_attribute *attr;
+ const struct module_attribute *attr;
int i;
for (i = 0; (attr = modinfo_attrs[i]); i++) {
@@ -1692,7 +1692,7 @@ static void setup_modinfo(struct module *mod, struct load_info *info)
static void free_modinfo(struct module *mod)
{
- struct module_attribute *attr;
+ const struct module_attribute *attr;
int i;
for (i = 0; (attr = modinfo_attrs[i]); i++) {
@@ -2074,6 +2074,82 @@ static int elf_validity_cache_index_str(struct load_info *info)
}
/**
+ * elf_validity_cache_index_versions() - Validate and cache version indices
+ * @info: Load info to cache version indices in.
+ * Must have &load_info->sechdrs and &load_info->secstrings populated.
+ * @flags: Load flags, relevant to suppress version loading, see
+ * uapi/linux/module.h
+ *
+ * If we're ignoring modversions based on @flags, zero all version indices
+ * and return validity. Othewrise check:
+ *
+ * * If "__version_ext_crcs" is present, "__version_ext_names" is present
+ * * There is a name present for every crc
+ *
+ * Then populate:
+ *
+ * * &load_info->index.vers
+ * * &load_info->index.vers_ext_crc
+ * * &load_info->index.vers_ext_names
+ *
+ * if present.
+ *
+ * Return: %0 if valid, %-ENOEXEC on failure.
+ */
+static int elf_validity_cache_index_versions(struct load_info *info, int flags)
+{
+ unsigned int vers_ext_crc;
+ unsigned int vers_ext_name;
+ size_t crc_count;
+ size_t remaining_len;
+ size_t name_size;
+ char *name;
+
+ /* If modversions were suppressed, pretend we didn't find any */
+ if (flags & MODULE_INIT_IGNORE_MODVERSIONS) {
+ info->index.vers = 0;
+ info->index.vers_ext_crc = 0;
+ info->index.vers_ext_name = 0;
+ return 0;
+ }
+
+ vers_ext_crc = find_sec(info, "__version_ext_crcs");
+ vers_ext_name = find_sec(info, "__version_ext_names");
+
+ /* If we have one field, we must have the other */
+ if (!!vers_ext_crc != !!vers_ext_name) {
+ pr_err("extended version crc+name presence does not match");
+ return -ENOEXEC;
+ }
+
+ /*
+ * If we have extended version information, we should have the same
+ * number of entries in every section.
+ */
+ if (vers_ext_crc) {
+ crc_count = info->sechdrs[vers_ext_crc].sh_size / sizeof(u32);
+ name = (void *)info->hdr +
+ info->sechdrs[vers_ext_name].sh_offset;
+ remaining_len = info->sechdrs[vers_ext_name].sh_size;
+
+ while (crc_count--) {
+ name_size = strnlen(name, remaining_len) + 1;
+ if (name_size > remaining_len) {
+ pr_err("more extended version crcs than names");
+ return -ENOEXEC;
+ }
+ remaining_len -= name_size;
+ name += name_size;
+ }
+ }
+
+ info->index.vers = find_sec(info, "__versions");
+ info->index.vers_ext_crc = vers_ext_crc;
+ info->index.vers_ext_name = vers_ext_name;
+ return 0;
+}
+
+/**
* elf_validity_cache_index() - Resolve, validate, cache section indices
* @info: Load info to read from and update.
* &load_info->sechdrs and &load_info->secstrings must be populated.
@@ -2087,9 +2163,7 @@ static int elf_validity_cache_index_str(struct load_info *info)
* * elf_validity_cache_index_mod()
* * elf_validity_cache_index_sym()
* * elf_validity_cache_index_str()
- *
- * If versioning is not suppressed via flags, load the version index from
- * a section called "__versions" with no validation.
+ * * elf_validity_cache_index_versions()
*
* If CONFIG_SMP is enabled, load the percpu section by name with no
* validation.
@@ -2112,11 +2186,9 @@ static int elf_validity_cache_index(struct load_info *info, int flags)
err = elf_validity_cache_index_str(info);
if (err < 0)
return err;
-
- if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
- info->index.vers = 0; /* Pretend no __versions section! */
- else
- info->index.vers = find_sec(info, "__versions");
+ err = elf_validity_cache_index_versions(info, flags);
+ if (err < 0)
+ return err;
info->index.pcpu = find_pcpusec(info);
@@ -2327,16 +2399,29 @@ static int rewrite_section_headers(struct load_info *info, int flags)
/* Track but don't keep modinfo and version sections. */
info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
+ info->sechdrs[info->index.vers_ext_crc].sh_flags &=
+ ~(unsigned long)SHF_ALLOC;
+ info->sechdrs[info->index.vers_ext_name].sh_flags &=
+ ~(unsigned long)SHF_ALLOC;
info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
return 0;
}
+static const char *const module_license_offenders[] = {
+ /* driverloader was caught wrongly pretending to be under GPL */
+ "driverloader",
+
+ /* lve claims to be GPL but upstream won't provide source */
+ "lve",
+};
+
/*
* These calls taint the kernel depending certain module circumstances */
static void module_augment_kernel_taints(struct module *mod, struct load_info *info)
{
int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE);
+ size_t i;
if (!get_modinfo(info, "intree")) {
if (!test_taint(TAINT_OOT_MODULE))
@@ -2385,15 +2470,11 @@ static void module_augment_kernel_taints(struct module *mod, struct load_info *i
if (strcmp(mod->name, "ndiswrapper") == 0)
add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE);
- /* driverloader was caught wrongly pretending to be under GPL */
- if (strcmp(mod->name, "driverloader") == 0)
- add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
- LOCKDEP_NOW_UNRELIABLE);
-
- /* lve claims to be GPL but upstream won't provide source */
- if (strcmp(mod->name, "lve") == 0)
- add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
- LOCKDEP_NOW_UNRELIABLE);
+ for (i = 0; i < ARRAY_SIZE(module_license_offenders); ++i) {
+ if (strcmp(mod->name, module_license_offenders[i]) == 0)
+ add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
+ LOCKDEP_NOW_UNRELIABLE);
+ }
if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE))
pr_warn("%s: module license taints kernel.\n", mod->name);
@@ -2948,9 +3029,12 @@ static noinline int do_init_module(struct module *mod)
/* Switch to core kallsyms now init is done: kallsyms may be walking! */
rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
#endif
- ret = module_enable_rodata_ro(mod, true);
+ ret = module_enable_rodata_ro_after_init(mod);
if (ret)
- goto fail_mutex_unlock;
+ pr_warn("%s: module_enable_rodata_ro_after_init() returned %d, "
+ "ro_after_init data might still be writable\n",
+ mod->name, ret);
+
mod_tree_remove_init(mod);
module_arch_freeing_init(mod);
for_class_mod_mem_type(type, init) {
@@ -2989,8 +3073,6 @@ static noinline int do_init_module(struct module *mod)
return 0;
-fail_mutex_unlock:
- mutex_unlock(&module_mutex);
fail_free_freeinit:
kfree(freeinit);
fail:
@@ -3118,7 +3200,7 @@ static int complete_formation(struct module *mod, struct load_info *info)
module_bug_finalize(info->hdr, info->sechdrs, mod);
module_cfi_finalize(info->hdr, info->sechdrs, mod);
- err = module_enable_rodata_ro(mod, false);
+ err = module_enable_rodata_ro(mod);
if (err)
goto out_strict_rwx;
err = module_enable_data_nx(mod);
diff --git a/kernel/module/strict_rwx.c b/kernel/module/strict_rwx.c
index 239e5013359d..74834ba15615 100644
--- a/kernel/module/strict_rwx.c
+++ b/kernel/module/strict_rwx.c
@@ -47,7 +47,7 @@ int module_enable_text_rox(const struct module *mod)
return 0;
}
-int module_enable_rodata_ro(const struct module *mod, bool after_init)
+int module_enable_rodata_ro(const struct module *mod)
{
int ret;
@@ -61,12 +61,17 @@ int module_enable_rodata_ro(const struct module *mod, bool after_init)
if (ret)
return ret;
- if (after_init)
- return module_set_memory(mod, MOD_RO_AFTER_INIT, set_memory_ro);
-
return 0;
}
+int module_enable_rodata_ro_after_init(const struct module *mod)
+{
+ if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX) || !rodata_enabled)
+ return 0;
+
+ return module_set_memory(mod, MOD_RO_AFTER_INIT, set_memory_ro);
+}
+
int module_enable_data_nx(const struct module *mod)
{
if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
diff --git a/kernel/module/sysfs.c b/kernel/module/sysfs.c
index 456358e1fdc4..b401ff4b02d2 100644
--- a/kernel/module/sysfs.c
+++ b/kernel/module/sysfs.c
@@ -19,24 +19,16 @@
* J. Corbet <corbet@lwn.net>
*/
#ifdef CONFIG_KALLSYMS
-struct module_sect_attr {
- struct bin_attribute battr;
- unsigned long address;
-};
-
struct module_sect_attrs {
struct attribute_group grp;
- unsigned int nsections;
- struct module_sect_attr attrs[];
+ struct bin_attribute attrs[];
};
#define MODULE_SECT_READ_SIZE (3 /* "0x", "\n" */ + (BITS_PER_LONG / 4))
static ssize_t module_sect_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *battr,
+ const struct bin_attribute *battr,
char *buf, loff_t pos, size_t count)
{
- struct module_sect_attr *sattr =
- container_of(battr, struct module_sect_attr, battr);
char bounce[MODULE_SECT_READ_SIZE + 1];
size_t wrote;
@@ -53,7 +45,7 @@ static ssize_t module_sect_read(struct file *file, struct kobject *kobj,
*/
wrote = scnprintf(bounce, sizeof(bounce), "0x%px\n",
kallsyms_show_value(file->f_cred)
- ? (void *)sattr->address : NULL);
+ ? battr->private : NULL);
count = min(count, wrote);
memcpy(buf, bounce, count);
@@ -62,59 +54,59 @@ static ssize_t module_sect_read(struct file *file, struct kobject *kobj,
static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
{
- unsigned int section;
+ const struct bin_attribute *const *bin_attr;
- for (section = 0; section < sect_attrs->nsections; section++)
- kfree(sect_attrs->attrs[section].battr.attr.name);
+ for (bin_attr = sect_attrs->grp.bin_attrs_new; *bin_attr; bin_attr++)
+ kfree((*bin_attr)->attr.name);
+ kfree(sect_attrs->grp.bin_attrs_new);
kfree(sect_attrs);
}
static int add_sect_attrs(struct module *mod, const struct load_info *info)
{
- unsigned int nloaded = 0, i, size[2];
struct module_sect_attrs *sect_attrs;
- struct module_sect_attr *sattr;
- struct bin_attribute **gattr;
+ const struct bin_attribute **gattr;
+ struct bin_attribute *sattr;
+ unsigned int nloaded = 0, i;
int ret;
/* Count loaded sections and allocate structures */
for (i = 0; i < info->hdr->e_shnum; i++)
if (!sect_empty(&info->sechdrs[i]))
nloaded++;
- size[0] = ALIGN(struct_size(sect_attrs, attrs, nloaded),
- sizeof(sect_attrs->grp.bin_attrs[0]));
- size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.bin_attrs[0]);
- sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
+ sect_attrs = kzalloc(struct_size(sect_attrs, attrs, nloaded), GFP_KERNEL);
if (!sect_attrs)
return -ENOMEM;
+ gattr = kcalloc(nloaded + 1, sizeof(*gattr), GFP_KERNEL);
+ if (!gattr) {
+ kfree(sect_attrs);
+ return -ENOMEM;
+ }
+
/* Setup section attributes. */
sect_attrs->grp.name = "sections";
- sect_attrs->grp.bin_attrs = (void *)sect_attrs + size[0];
+ sect_attrs->grp.bin_attrs_new = gattr;
- sect_attrs->nsections = 0;
sattr = &sect_attrs->attrs[0];
- gattr = &sect_attrs->grp.bin_attrs[0];
for (i = 0; i < info->hdr->e_shnum; i++) {
Elf_Shdr *sec = &info->sechdrs[i];
if (sect_empty(sec))
continue;
- sysfs_bin_attr_init(&sattr->battr);
- sattr->address = sec->sh_addr;
- sattr->battr.attr.name =
+ sysfs_bin_attr_init(sattr);
+ sattr->attr.name =
kstrdup(info->secstrings + sec->sh_name, GFP_KERNEL);
- if (!sattr->battr.attr.name) {
+ if (!sattr->attr.name) {
ret = -ENOMEM;
goto out;
}
- sect_attrs->nsections++;
- sattr->battr.read = module_sect_read;
- sattr->battr.size = MODULE_SECT_READ_SIZE;
- sattr->battr.attr.mode = 0400;
- *(gattr++) = &(sattr++)->battr;
+ sattr->read_new = module_sect_read;
+ sattr->private = (void *)sec->sh_addr;
+ sattr->size = MODULE_SECT_READ_SIZE;
+ sattr->attr.mode = 0400;
+ *(gattr++) = sattr++;
}
- *gattr = NULL;
ret = sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp);
if (ret)
@@ -146,20 +138,13 @@ static void remove_sect_attrs(struct module *mod)
*/
struct module_notes_attrs {
- struct kobject *dir;
- unsigned int notes;
- struct bin_attribute attrs[] __counted_by(notes);
+ struct attribute_group grp;
+ struct bin_attribute attrs[];
};
-static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
- unsigned int i)
+static void free_notes_attrs(struct module_notes_attrs *notes_attrs)
{
- if (notes_attrs->dir) {
- while (i-- > 0)
- sysfs_remove_bin_file(notes_attrs->dir,
- &notes_attrs->attrs[i]);
- kobject_put(notes_attrs->dir);
- }
+ kfree(notes_attrs->grp.bin_attrs_new);
kfree(notes_attrs);
}
@@ -167,6 +152,7 @@ static int add_notes_attrs(struct module *mod, const struct load_info *info)
{
unsigned int notes, loaded, i;
struct module_notes_attrs *notes_attrs;
+ const struct bin_attribute **gattr;
struct bin_attribute *nattr;
int ret;
@@ -185,47 +171,55 @@ static int add_notes_attrs(struct module *mod, const struct load_info *info)
if (!notes_attrs)
return -ENOMEM;
- notes_attrs->notes = notes;
+ gattr = kcalloc(notes + 1, sizeof(*gattr), GFP_KERNEL);
+ if (!gattr) {
+ kfree(notes_attrs);
+ return -ENOMEM;
+ }
+
+ notes_attrs->grp.name = "notes";
+ notes_attrs->grp.bin_attrs_new = gattr;
+
nattr = &notes_attrs->attrs[0];
for (loaded = i = 0; i < info->hdr->e_shnum; ++i) {
if (sect_empty(&info->sechdrs[i]))
continue;
if (info->sechdrs[i].sh_type == SHT_NOTE) {
sysfs_bin_attr_init(nattr);
- nattr->attr.name = mod->sect_attrs->attrs[loaded].battr.attr.name;
+ nattr->attr.name = mod->sect_attrs->attrs[loaded].attr.name;
nattr->attr.mode = 0444;
nattr->size = info->sechdrs[i].sh_size;
nattr->private = (void *)info->sechdrs[i].sh_addr;
- nattr->read = sysfs_bin_attr_simple_read;
- ++nattr;
+ nattr->read_new = sysfs_bin_attr_simple_read;
+ *(gattr++) = nattr++;
}
++loaded;
}
- notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
- if (!notes_attrs->dir) {
- ret = -ENOMEM;
+ ret = sysfs_create_group(&mod->mkobj.kobj, &notes_attrs->grp);
+ if (ret)
goto out;
- }
-
- for (i = 0; i < notes; ++i) {
- ret = sysfs_create_bin_file(notes_attrs->dir, &notes_attrs->attrs[i]);
- if (ret)
- goto out;
- }
mod->notes_attrs = notes_attrs;
return 0;
out:
- free_notes_attrs(notes_attrs, i);
+ free_notes_attrs(notes_attrs);
return ret;
}
static void remove_notes_attrs(struct module *mod)
{
- if (mod->notes_attrs)
- free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
+ if (mod->notes_attrs) {
+ sysfs_remove_group(&mod->mkobj.kobj,
+ &mod->notes_attrs->grp);
+ /*
+ * We are positive that no one is using any notes attrs
+ * at this point. Deallocate immediately.
+ */
+ free_notes_attrs(mod->notes_attrs);
+ mod->notes_attrs = NULL;
+ }
}
#else /* !CONFIG_KALLSYMS */
@@ -275,7 +269,7 @@ static int add_usage_links(struct module *mod)
static void module_remove_modinfo_attrs(struct module *mod, int end)
{
- struct module_attribute *attr;
+ const struct module_attribute *attr;
int i;
for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
@@ -293,7 +287,7 @@ static void module_remove_modinfo_attrs(struct module *mod, int end)
static int module_add_modinfo_attrs(struct module *mod)
{
- struct module_attribute *attr;
+ const struct module_attribute *attr;
struct module_attribute *temp_attr;
int error = 0;
int i;
diff --git a/kernel/module/version.c b/kernel/module/version.c
index 53f43ac5a73e..3718a8868321 100644
--- a/kernel/module/version.c
+++ b/kernel/module/version.c
@@ -13,17 +13,34 @@
int check_version(const struct load_info *info,
const char *symname,
struct module *mod,
- const s32 *crc)
+ const u32 *crc)
{
Elf_Shdr *sechdrs = info->sechdrs;
unsigned int versindex = info->index.vers;
unsigned int i, num_versions;
struct modversion_info *versions;
+ struct modversion_info_ext version_ext;
/* Exporting module didn't supply crcs? OK, we're already tainted. */
if (!crc)
return 1;
+ /* If we have extended version info, rely on it */
+ if (info->index.vers_ext_crc) {
+ for_each_modversion_info_ext(version_ext, info) {
+ if (strcmp(version_ext.name, symname) != 0)
+ continue;
+ if (*version_ext.crc == *crc)
+ return 1;
+ pr_debug("Found checksum %X vs module %X\n",
+ *crc, *version_ext.crc);
+ goto bad_version;
+ }
+ pr_warn_once("%s: no extended symbol version for %s\n",
+ info->name, symname);
+ return 1;
+ }
+
/* No versions at all? modprobe --force does this. */
if (versindex == 0)
return try_to_force_load(mod, symname) == 0;
@@ -87,6 +104,34 @@ int same_magic(const char *amagic, const char *bmagic,
return strcmp(amagic, bmagic) == 0;
}
+void modversion_ext_start(const struct load_info *info,
+ struct modversion_info_ext *start)
+{
+ unsigned int crc_idx = info->index.vers_ext_crc;
+ unsigned int name_idx = info->index.vers_ext_name;
+ Elf_Shdr *sechdrs = info->sechdrs;
+
+ /*
+ * Both of these fields are needed for this to be useful
+ * Any future fields should be initialized to NULL if absent.
+ */
+ if (crc_idx == 0 || name_idx == 0) {
+ start->remaining = 0;
+ return;
+ }
+
+ start->crc = (const u32 *)sechdrs[crc_idx].sh_addr;
+ start->name = (const char *)sechdrs[name_idx].sh_addr;
+ start->remaining = sechdrs[crc_idx].sh_size / sizeof(*start->crc);
+}
+
+void modversion_ext_advance(struct modversion_info_ext *vers)
+{
+ vers->remaining--;
+ vers->crc++;
+ vers->name += strlen(vers->name) + 1;
+}
+
/*
* Generate the signature for all relevant module structures here.
* If these change, we don't want to try to parse the module.
diff --git a/kernel/padata.c b/kernel/padata.c
index d51bbc76b227..418987056340 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -47,6 +47,22 @@ struct padata_mt_job_state {
static void padata_free_pd(struct parallel_data *pd);
static void __init padata_mt_helper(struct work_struct *work);
+static inline void padata_get_pd(struct parallel_data *pd)
+{
+ refcount_inc(&pd->refcnt);
+}
+
+static inline void padata_put_pd_cnt(struct parallel_data *pd, int cnt)
+{
+ if (refcount_sub_and_test(cnt, &pd->refcnt))
+ padata_free_pd(pd);
+}
+
+static inline void padata_put_pd(struct parallel_data *pd)
+{
+ padata_put_pd_cnt(pd, 1);
+}
+
static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
{
int cpu, target_cpu;
@@ -206,7 +222,7 @@ int padata_do_parallel(struct padata_shell *ps,
if ((pinst->flags & PADATA_RESET))
goto out;
- refcount_inc(&pd->refcnt);
+ padata_get_pd(pd);
padata->pd = pd;
padata->cb_cpu = *cb_cpu;
@@ -336,8 +352,14 @@ static void padata_reorder(struct parallel_data *pd)
smp_mb();
reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
- if (!list_empty(&reorder->list) && padata_find_next(pd, false))
+ if (!list_empty(&reorder->list) && padata_find_next(pd, false)) {
+ /*
+ * Other context(eg. the padata_serial_worker) can finish the request.
+ * To avoid UAF issue, add pd ref here, and put pd ref after reorder_work finish.
+ */
+ padata_get_pd(pd);
queue_work(pinst->serial_wq, &pd->reorder_work);
+ }
}
static void invoke_padata_reorder(struct work_struct *work)
@@ -348,6 +370,8 @@ static void invoke_padata_reorder(struct work_struct *work)
pd = container_of(work, struct parallel_data, reorder_work);
padata_reorder(pd);
local_bh_enable();
+ /* Pairs with putting the reorder_work in the serial_wq */
+ padata_put_pd(pd);
}
static void padata_serial_worker(struct work_struct *serial_work)
@@ -380,8 +404,7 @@ static void padata_serial_worker(struct work_struct *serial_work)
}
local_bh_enable();
- if (refcount_sub_and_test(cnt, &pd->refcnt))
- padata_free_pd(pd);
+ padata_put_pd_cnt(pd, cnt);
}
/**
@@ -681,8 +704,7 @@ static int padata_replace(struct padata_instance *pinst)
synchronize_rcu();
list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
- if (refcount_dec_and_test(&ps->opd->refcnt))
- padata_free_pd(ps->opd);
+ padata_put_pd(ps->opd);
pinst->flags &= ~PADATA_RESET;
@@ -970,7 +992,7 @@ static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
pinst = kobj2pinst(kobj);
pentry = attr2pentry(attr);
- if (pentry->show)
+ if (pentry->store)
ret = pentry->store(pinst, attr, buf, count);
return ret;
@@ -1121,11 +1143,16 @@ void padata_free_shell(struct padata_shell *ps)
if (!ps)
return;
+ /*
+ * Wait for all _do_serial calls to finish to avoid touching
+ * freed pd's and ps's.
+ */
+ synchronize_rcu();
+
mutex_lock(&ps->pinst->lock);
list_del(&ps->list);
pd = rcu_dereference_protected(ps->pd, 1);
- if (refcount_dec_and_test(&pd->refcnt))
- padata_free_pd(pd);
+ padata_put_pd(pd);
mutex_unlock(&ps->pinst->lock);
kfree(ps);
diff --git a/kernel/panic.c b/kernel/panic.c
index fbc59b3b64d0..d8635d5cecb2 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -84,7 +84,7 @@ ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
EXPORT_SYMBOL(panic_notifier_list);
#ifdef CONFIG_SYSCTL
-static struct ctl_table kern_panic_table[] = {
+static const struct ctl_table kern_panic_table[] = {
#ifdef CONFIG_SMP
{
.procname = "oops_all_cpu_backtrace",
diff --git a/kernel/params.c b/kernel/params.c
index 2e447f8ae183..0074d29c9b80 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -538,7 +538,7 @@ const struct kernel_param_ops param_ops_string = {
EXPORT_SYMBOL(param_ops_string);
/* sysfs output in /sys/modules/XYZ/parameters/ */
-#define to_module_attr(n) container_of(n, struct module_attribute, attr)
+#define to_module_attr(n) container_of_const(n, struct module_attribute, attr)
#define to_module_kobject(n) container_of(n, struct module_kobject, kobj)
struct param_attribute
@@ -555,13 +555,13 @@ struct module_param_attrs
};
#ifdef CONFIG_SYSFS
-#define to_param_attr(n) container_of(n, struct param_attribute, mattr)
+#define to_param_attr(n) container_of_const(n, struct param_attribute, mattr)
-static ssize_t param_attr_show(struct module_attribute *mattr,
+static ssize_t param_attr_show(const struct module_attribute *mattr,
struct module_kobject *mk, char *buf)
{
int count;
- struct param_attribute *attribute = to_param_attr(mattr);
+ const struct param_attribute *attribute = to_param_attr(mattr);
if (!attribute->param->ops->get)
return -EPERM;
@@ -573,12 +573,12 @@ static ssize_t param_attr_show(struct module_attribute *mattr,
}
/* sysfs always hands a nul-terminated string in buf. We rely on that. */
-static ssize_t param_attr_store(struct module_attribute *mattr,
+static ssize_t param_attr_store(const struct module_attribute *mattr,
struct module_kobject *mk,
const char *buf, size_t len)
{
int err;
- struct param_attribute *attribute = to_param_attr(mattr);
+ const struct param_attribute *attribute = to_param_attr(mattr);
if (!attribute->param->ops->set)
return -EPERM;
@@ -857,11 +857,11 @@ static void __init param_sysfs_builtin(void)
}
}
-ssize_t __modver_version_show(struct module_attribute *mattr,
+ssize_t __modver_version_show(const struct module_attribute *mattr,
struct module_kobject *mk, char *buf)
{
- struct module_version_attribute *vattr =
- container_of(mattr, struct module_version_attribute, mattr);
+ const struct module_version_attribute *vattr =
+ container_of_const(mattr, struct module_version_attribute, mattr);
return scnprintf(buf, PAGE_SIZE, "%s\n", vattr->version);
}
@@ -892,7 +892,7 @@ static ssize_t module_attr_show(struct kobject *kobj,
struct attribute *attr,
char *buf)
{
- struct module_attribute *attribute;
+ const struct module_attribute *attribute;
struct module_kobject *mk;
int ret;
@@ -911,7 +911,7 @@ static ssize_t module_attr_store(struct kobject *kobj,
struct attribute *attr,
const char *buf, size_t len)
{
- struct module_attribute *attribute;
+ const struct module_attribute *attribute;
struct module_kobject *mk;
int ret;
diff --git a/kernel/pid.c b/kernel/pid.c
index 3a10a7b6fcf8..924084713be8 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -695,7 +695,7 @@ static struct ctl_table_root pid_table_root = {
.set_ownership = pid_table_root_set_ownership,
};
-static struct ctl_table pid_table[] = {
+static const struct ctl_table pid_table[] = {
{
.procname = "pid_max",
.data = &init_pid_ns.pid_max,
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index f1ffa032fc32..8f6cfec87555 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -303,7 +303,7 @@ static int pid_ns_ctl_handler(const struct ctl_table *table, int write,
return ret;
}
-static struct ctl_table pid_ns_ctl_table[] = {
+static const struct ctl_table pid_ns_ctl_table[] = {
{
.procname = "ns_last_pid",
.maxlen = sizeof(int),
diff --git a/kernel/pid_sysctl.h b/kernel/pid_sysctl.h
index 18ecaef6be41..5d8f981de7c5 100644
--- a/kernel/pid_sysctl.h
+++ b/kernel/pid_sysctl.h
@@ -31,7 +31,7 @@ static int pid_mfd_noexec_dointvec_minmax(const struct ctl_table *table,
return err;
}
-static struct ctl_table pid_ns_ctl_table_vm[] = {
+static const struct ctl_table pid_ns_ctl_table_vm[] = {
{
.procname = "memfd_noexec",
.data = &init_pid_ns.memfd_noexec_scope,
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 1f87aa01ba44..10a01af63a80 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -608,7 +608,11 @@ int hibernation_platform_enter(void)
local_irq_disable();
system_state = SYSTEM_SUSPEND;
- syscore_suspend();
+
+ error = syscore_suspend();
+ if (error)
+ goto Enable_irqs;
+
if (pm_wakeup_pending()) {
error = -EAGAIN;
goto Power_up;
@@ -620,6 +624,7 @@ int hibernation_platform_enter(void)
Power_up:
syscore_resume();
+ Enable_irqs:
system_state = SYSTEM_RUNNING;
local_irq_enable();
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 30894d8f0a78..c9fb559a6399 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1011,11 +1011,8 @@ void __init register_nosave_region(unsigned long start_pfn, unsigned long end_pf
}
}
/* This allocation cannot fail */
- region = memblock_alloc(sizeof(struct nosave_region),
+ region = memblock_alloc_or_panic(sizeof(struct nosave_region),
SMP_CACHE_BYTES);
- if (!region)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(struct nosave_region));
region->start_pfn = start_pfn;
region->end_pfn = end_pfn;
list_add_tail(&region->list, &nosave_regions);
diff --git a/kernel/printk/sysctl.c b/kernel/printk/sysctl.c
index f5072dc85f7a..da77f3f5c1fe 100644
--- a/kernel/printk/sysctl.c
+++ b/kernel/printk/sysctl.c
@@ -20,7 +20,7 @@ static int proc_dointvec_minmax_sysadmin(const struct ctl_table *table, int writ
return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
}
-static struct ctl_table printk_sysctls[] = {
+static const struct ctl_table printk_sysctls[] = {
{
.procname = "printk",
.data = &console_loglevel,
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index b3b3ce34df63..4b3f31911465 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -250,7 +250,7 @@ EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
void kvfree_call_rcu(struct rcu_head *head, void *ptr)
{
if (head)
- kasan_record_aux_stack_noalloc(ptr);
+ kasan_record_aux_stack(ptr);
__kvfree_call_rcu(head, ptr);
}
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 2795d6b5109c..475f31deed14 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3062,7 +3062,7 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
}
head->func = func;
head->next = NULL;
- kasan_record_aux_stack_noalloc(head);
+ kasan_record_aux_stack(head);
local_irq_save(flags);
rdp = this_cpu_ptr(&rcu_data);
diff --git a/kernel/reboot.c b/kernel/reboot.c
index a701000bab34..b5a8569e5d81 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -1287,7 +1287,7 @@ static struct attribute *reboot_attrs[] = {
};
#ifdef CONFIG_SYSCTL
-static struct ctl_table kern_reboot_table[] = {
+static const struct ctl_table kern_reboot_table[] = {
{
.procname = "poweroff_cmd",
.data = &poweroff_cmd,
diff --git a/kernel/resource.c b/kernel/resource.c
index b7c0e24d9398..12004452d999 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -1683,8 +1683,7 @@ void __devm_release_region(struct device *dev, struct resource *parent,
{
struct region_devres match_data = { parent, start, n };
- __release_region(parent, start, n);
- WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
+ WARN_ON(devres_release(dev, devm_region_release, devm_region_match,
&match_data));
}
EXPORT_SYMBOL(__devm_release_region);
diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c
index db68a964e34e..83d46b9b8ec8 100644
--- a/kernel/sched/autogroup.c
+++ b/kernel/sched/autogroup.c
@@ -9,7 +9,7 @@ static struct autogroup autogroup_default;
static atomic_t autogroup_seq_nr;
#ifdef CONFIG_SYSCTL
-static struct ctl_table sched_autogroup_sysctls[] = {
+static const struct ctl_table sched_autogroup_sysctls[] = {
{
.procname = "sched_autogroup_enabled",
.data = &sysctl_sched_autogroup_enabled,
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 88a9a515b2ba..165c90ba64ea 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -793,6 +793,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
void update_rq_clock(struct rq *rq)
{
s64 delta;
+ u64 clock;
lockdep_assert_rq_held(rq);
@@ -804,11 +805,14 @@ void update_rq_clock(struct rq *rq)
SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
rq->clock_update_flags |= RQCF_UPDATED;
#endif
+ clock = sched_clock_cpu(cpu_of(rq));
+ scx_rq_clock_update(rq, clock);
- delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
+ delta = clock - rq->clock;
if (delta < 0)
return;
rq->clock += delta;
+
update_rq_clock_task(rq, delta);
}
@@ -4650,7 +4654,7 @@ static int sysctl_schedstats(const struct ctl_table *table, int write, void *buf
#endif /* CONFIG_SCHEDSTATS */
#ifdef CONFIG_SYSCTL
-static struct ctl_table sched_core_sysctls[] = {
+static const struct ctl_table sched_core_sysctls[] = {
#ifdef CONFIG_SCHEDSTATS
{
.procname = "sched_schedstats",
@@ -7705,9 +7709,9 @@ void sched_show_task(struct task_struct *p)
if (pid_alive(p))
ppid = task_pid_nr(rcu_dereference(p->real_parent));
rcu_read_unlock();
- pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d flags:0x%08lx\n",
+ pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d task_flags:0x%04x flags:0x%08lx\n",
free, task_pid_nr(p), task_tgid_nr(p),
- ppid, read_task_thread_flags(p));
+ ppid, p->flags, read_task_thread_flags(p));
print_worker_info(KERN_INFO, p);
print_stop_info(KERN_INFO, p);
@@ -7934,19 +7938,26 @@ void sched_setnuma(struct task_struct *p, int nid)
#ifdef CONFIG_HOTPLUG_CPU
/*
- * Ensure that the idle task is using init_mm right before its CPU goes
- * offline.
+ * Invoked on the outgoing CPU in context of the CPU hotplug thread
+ * after ensuring that there are no user space tasks left on the CPU.
+ *
+ * If there is a lazy mm in use on the hotplug thread, drop it and
+ * switch to init_mm.
+ *
+ * The reference count on init_mm is dropped in finish_cpu().
*/
-void idle_task_exit(void)
+static void sched_force_init_mm(void)
{
struct mm_struct *mm = current->active_mm;
- BUG_ON(cpu_online(smp_processor_id()));
- BUG_ON(current != this_rq()->idle);
-
if (mm != &init_mm) {
- switch_mm(mm, &init_mm, current);
+ mmgrab_lazy_tlb(&init_mm);
+ local_irq_disable();
+ current->active_mm = &init_mm;
+ switch_mm_irqs_off(mm, &init_mm, current);
+ local_irq_enable();
finish_arch_post_lock_switch();
+ mmdrop_lazy_tlb(mm);
}
/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
@@ -8340,6 +8351,7 @@ int sched_cpu_starting(unsigned int cpu)
int sched_cpu_wait_empty(unsigned int cpu)
{
balance_hotplug_wait();
+ sched_force_init_mm();
return 0;
}
@@ -10586,7 +10598,7 @@ void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
return;
/* No page allocation under rq lock */
- task_work_add(curr, work, TWA_RESUME | TWAF_NO_ALLOC);
+ task_work_add(curr, work, TWA_RESUME);
}
void sched_mm_cid_exit_signals(struct task_struct *t)
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index a2a29e3fffca..1a19d69b91ed 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -666,7 +666,11 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
}
sg_policy->thread = thread;
- kthread_bind_mask(thread, policy->related_cpus);
+ if (policy->dvfs_possible_from_any_cpu)
+ set_cpus_allowed_ptr(thread, policy->related_cpus);
+ else
+ kthread_bind_mask(thread, policy->related_cpus);
+
init_irq_work(&sg_policy->irq_work, sugov_irq_work);
mutex_init(&sg_policy->work_lock);
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 62192ac79c30..38e4537790af 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -26,7 +26,7 @@
static unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */
static unsigned int sysctl_sched_dl_period_min = 100; /* 100 us */
#ifdef CONFIG_SYSCTL
-static struct ctl_table sched_dl_sysctls[] = {
+static const struct ctl_table sched_dl_sysctls[] = {
{
.procname = "sched_deadline_period_max_us",
.data = &sysctl_sched_dl_period_max,
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index fd7e85220715..ef047add7f9e 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -1262,6 +1262,8 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
if (task_has_dl_policy(p)) {
P(dl.runtime);
P(dl.deadline);
+ } else if (fair_policy(p->policy)) {
+ P(se.slice);
}
#ifdef CONFIG_SCHED_CLASS_EXT
__PS("ext.enabled", task_on_scx(p));
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 7fee43426ee7..8857c0709bdd 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -206,7 +206,7 @@ struct scx_dump_ctx {
*/
struct sched_ext_ops {
/**
- * select_cpu - Pick the target CPU for a task which is being woken up
+ * @select_cpu: Pick the target CPU for a task which is being woken up
* @p: task being woken up
* @prev_cpu: the cpu @p was on before sleeping
* @wake_flags: SCX_WAKE_*
@@ -233,7 +233,7 @@ struct sched_ext_ops {
s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags);
/**
- * enqueue - Enqueue a task on the BPF scheduler
+ * @enqueue: Enqueue a task on the BPF scheduler
* @p: task being enqueued
* @enq_flags: %SCX_ENQ_*
*
@@ -248,7 +248,7 @@ struct sched_ext_ops {
void (*enqueue)(struct task_struct *p, u64 enq_flags);
/**
- * dequeue - Remove a task from the BPF scheduler
+ * @dequeue: Remove a task from the BPF scheduler
* @p: task being dequeued
* @deq_flags: %SCX_DEQ_*
*
@@ -264,7 +264,7 @@ struct sched_ext_ops {
void (*dequeue)(struct task_struct *p, u64 deq_flags);
/**
- * dispatch - Dispatch tasks from the BPF scheduler and/or user DSQs
+ * @dispatch: Dispatch tasks from the BPF scheduler and/or user DSQs
* @cpu: CPU to dispatch tasks for
* @prev: previous task being switched out
*
@@ -287,7 +287,7 @@ struct sched_ext_ops {
void (*dispatch)(s32 cpu, struct task_struct *prev);
/**
- * tick - Periodic tick
+ * @tick: Periodic tick
* @p: task running currently
*
* This operation is called every 1/HZ seconds on CPUs which are
@@ -297,7 +297,7 @@ struct sched_ext_ops {
void (*tick)(struct task_struct *p);
/**
- * runnable - A task is becoming runnable on its associated CPU
+ * @runnable: A task is becoming runnable on its associated CPU
* @p: task becoming runnable
* @enq_flags: %SCX_ENQ_*
*
@@ -324,7 +324,7 @@ struct sched_ext_ops {
void (*runnable)(struct task_struct *p, u64 enq_flags);
/**
- * running - A task is starting to run on its associated CPU
+ * @running: A task is starting to run on its associated CPU
* @p: task starting to run
*
* See ->runnable() for explanation on the task state notifiers.
@@ -332,7 +332,7 @@ struct sched_ext_ops {
void (*running)(struct task_struct *p);
/**
- * stopping - A task is stopping execution
+ * @stopping: A task is stopping execution
* @p: task stopping to run
* @runnable: is task @p still runnable?
*
@@ -343,7 +343,7 @@ struct sched_ext_ops {
void (*stopping)(struct task_struct *p, bool runnable);
/**
- * quiescent - A task is becoming not runnable on its associated CPU
+ * @quiescent: A task is becoming not runnable on its associated CPU
* @p: task becoming not runnable
* @deq_flags: %SCX_DEQ_*
*
@@ -363,7 +363,7 @@ struct sched_ext_ops {
void (*quiescent)(struct task_struct *p, u64 deq_flags);
/**
- * yield - Yield CPU
+ * @yield: Yield CPU
* @from: yielding task
* @to: optional yield target task
*
@@ -378,7 +378,7 @@ struct sched_ext_ops {
bool (*yield)(struct task_struct *from, struct task_struct *to);
/**
- * core_sched_before - Task ordering for core-sched
+ * @core_sched_before: Task ordering for core-sched
* @a: task A
* @b: task B
*
@@ -396,7 +396,7 @@ struct sched_ext_ops {
bool (*core_sched_before)(struct task_struct *a, struct task_struct *b);
/**
- * set_weight - Set task weight
+ * @set_weight: Set task weight
* @p: task to set weight for
* @weight: new weight [1..10000]
*
@@ -405,7 +405,7 @@ struct sched_ext_ops {
void (*set_weight)(struct task_struct *p, u32 weight);
/**
- * set_cpumask - Set CPU affinity
+ * @set_cpumask: Set CPU affinity
* @p: task to set CPU affinity for
* @cpumask: cpumask of cpus that @p can run on
*
@@ -415,7 +415,7 @@ struct sched_ext_ops {
const struct cpumask *cpumask);
/**
- * update_idle - Update the idle state of a CPU
+ * @update_idle: Update the idle state of a CPU
* @cpu: CPU to udpate the idle state for
* @idle: whether entering or exiting the idle state
*
@@ -436,7 +436,7 @@ struct sched_ext_ops {
void (*update_idle)(s32 cpu, bool idle);
/**
- * cpu_acquire - A CPU is becoming available to the BPF scheduler
+ * @cpu_acquire: A CPU is becoming available to the BPF scheduler
* @cpu: The CPU being acquired by the BPF scheduler.
* @args: Acquire arguments, see the struct definition.
*
@@ -446,7 +446,7 @@ struct sched_ext_ops {
void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args);
/**
- * cpu_release - A CPU is taken away from the BPF scheduler
+ * @cpu_release: A CPU is taken away from the BPF scheduler
* @cpu: The CPU being released by the BPF scheduler.
* @args: Release arguments, see the struct definition.
*
@@ -458,7 +458,7 @@ struct sched_ext_ops {
void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args);
/**
- * init_task - Initialize a task to run in a BPF scheduler
+ * @init_task: Initialize a task to run in a BPF scheduler
* @p: task to initialize for BPF scheduling
* @args: init arguments, see the struct definition
*
@@ -473,8 +473,9 @@ struct sched_ext_ops {
s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args);
/**
- * exit_task - Exit a previously-running task from the system
+ * @exit_task: Exit a previously-running task from the system
* @p: task to exit
+ * @args: exit arguments, see the struct definition
*
* @p is exiting or the BPF scheduler is being unloaded. Perform any
* necessary cleanup for @p.
@@ -482,7 +483,7 @@ struct sched_ext_ops {
void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args);
/**
- * enable - Enable BPF scheduling for a task
+ * @enable: Enable BPF scheduling for a task
* @p: task to enable BPF scheduling for
*
* Enable @p for BPF scheduling. enable() is called on @p any time it
@@ -491,7 +492,7 @@ struct sched_ext_ops {
void (*enable)(struct task_struct *p);
/**
- * disable - Disable BPF scheduling for a task
+ * @disable: Disable BPF scheduling for a task
* @p: task to disable BPF scheduling for
*
* @p is exiting, leaving SCX or the BPF scheduler is being unloaded.
@@ -501,7 +502,7 @@ struct sched_ext_ops {
void (*disable)(struct task_struct *p);
/**
- * dump - Dump BPF scheduler state on error
+ * @dump: Dump BPF scheduler state on error
* @ctx: debug dump context
*
* Use scx_bpf_dump() to generate BPF scheduler specific debug dump.
@@ -509,7 +510,7 @@ struct sched_ext_ops {
void (*dump)(struct scx_dump_ctx *ctx);
/**
- * dump_cpu - Dump BPF scheduler state for a CPU on error
+ * @dump_cpu: Dump BPF scheduler state for a CPU on error
* @ctx: debug dump context
* @cpu: CPU to generate debug dump for
* @idle: @cpu is currently idle without any runnable tasks
@@ -521,7 +522,7 @@ struct sched_ext_ops {
void (*dump_cpu)(struct scx_dump_ctx *ctx, s32 cpu, bool idle);
/**
- * dump_task - Dump BPF scheduler state for a runnable task on error
+ * @dump_task: Dump BPF scheduler state for a runnable task on error
* @ctx: debug dump context
* @p: runnable task to generate debug dump for
*
@@ -532,7 +533,7 @@ struct sched_ext_ops {
#ifdef CONFIG_EXT_GROUP_SCHED
/**
- * cgroup_init - Initialize a cgroup
+ * @cgroup_init: Initialize a cgroup
* @cgrp: cgroup being initialized
* @args: init arguments, see the struct definition
*
@@ -547,7 +548,7 @@ struct sched_ext_ops {
struct scx_cgroup_init_args *args);
/**
- * cgroup_exit - Exit a cgroup
+ * @cgroup_exit: Exit a cgroup
* @cgrp: cgroup being exited
*
* Either the BPF scheduler is being unloaded or @cgrp destroyed, exit
@@ -556,7 +557,7 @@ struct sched_ext_ops {
void (*cgroup_exit)(struct cgroup *cgrp);
/**
- * cgroup_prep_move - Prepare a task to be moved to a different cgroup
+ * @cgroup_prep_move: Prepare a task to be moved to a different cgroup
* @p: task being moved
* @from: cgroup @p is being moved from
* @to: cgroup @p is being moved to
@@ -571,7 +572,7 @@ struct sched_ext_ops {
struct cgroup *from, struct cgroup *to);
/**
- * cgroup_move - Commit cgroup move
+ * @cgroup_move: Commit cgroup move
* @p: task being moved
* @from: cgroup @p is being moved from
* @to: cgroup @p is being moved to
@@ -582,7 +583,7 @@ struct sched_ext_ops {
struct cgroup *from, struct cgroup *to);
/**
- * cgroup_cancel_move - Cancel cgroup move
+ * @cgroup_cancel_move: Cancel cgroup move
* @p: task whose cgroup move is being canceled
* @from: cgroup @p was being moved from
* @to: cgroup @p was being moved to
@@ -594,7 +595,7 @@ struct sched_ext_ops {
struct cgroup *from, struct cgroup *to);
/**
- * cgroup_set_weight - A cgroup's weight is being changed
+ * @cgroup_set_weight: A cgroup's weight is being changed
* @cgrp: cgroup whose weight is being updated
* @weight: new weight [1..10000]
*
@@ -608,7 +609,7 @@ struct sched_ext_ops {
*/
/**
- * cpu_online - A CPU became online
+ * @cpu_online: A CPU became online
* @cpu: CPU which just came up
*
* @cpu just came online. @cpu will not call ops.enqueue() or
@@ -617,7 +618,7 @@ struct sched_ext_ops {
void (*cpu_online)(s32 cpu);
/**
- * cpu_offline - A CPU is going offline
+ * @cpu_offline: A CPU is going offline
* @cpu: CPU which is going offline
*
* @cpu is going offline. @cpu will not call ops.enqueue() or
@@ -630,12 +631,12 @@ struct sched_ext_ops {
*/
/**
- * init - Initialize the BPF scheduler
+ * @init: Initialize the BPF scheduler
*/
s32 (*init)(void);
/**
- * exit - Clean up after the BPF scheduler
+ * @exit: Clean up after the BPF scheduler
* @info: Exit info
*
* ops.exit() is also called on ops.init() failure, which is a bit
@@ -645,17 +646,17 @@ struct sched_ext_ops {
void (*exit)(struct scx_exit_info *info);
/**
- * dispatch_max_batch - Max nr of tasks that dispatch() can dispatch
+ * @dispatch_max_batch: Max nr of tasks that dispatch() can dispatch
*/
u32 dispatch_max_batch;
/**
- * flags - %SCX_OPS_* flags
+ * @flags: %SCX_OPS_* flags
*/
u64 flags;
/**
- * timeout_ms - The maximum amount of time, in milliseconds, that a
+ * @timeout_ms: The maximum amount of time, in milliseconds, that a
* runnable task should be able to wait before being scheduled. The
* maximum timeout may not exceed the default timeout of 30 seconds.
*
@@ -664,13 +665,13 @@ struct sched_ext_ops {
u32 timeout_ms;
/**
- * exit_dump_len - scx_exit_info.dump buffer length. If 0, the default
+ * @exit_dump_len: scx_exit_info.dump buffer length. If 0, the default
* value of 32768 is used.
*/
u32 exit_dump_len;
/**
- * hotplug_seq - A sequence number that may be set by the scheduler to
+ * @hotplug_seq: A sequence number that may be set by the scheduler to
* detect when a hotplug event has occurred during the loading process.
* If 0, no detection occurs. Otherwise, the scheduler will fail to
* load if the sequence number does not match @scx_hotplug_seq on the
@@ -679,7 +680,7 @@ struct sched_ext_ops {
u64 hotplug_seq;
/**
- * name - BPF scheduler's name
+ * @name: BPF scheduler's name
*
* Must be a non-zero valid BPF object name including only isalnum(),
* '_' and '.' chars. Shows up in kernel.sched_ext_ops sysctl while the
@@ -960,7 +961,7 @@ static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task);
static struct scx_dispatch_q **global_dsqs;
static const struct rhashtable_params dsq_hash_params = {
- .key_len = 8,
+ .key_len = sizeof_field(struct scx_dispatch_q, id),
.key_offset = offsetof(struct scx_dispatch_q, id),
.head_offset = offsetof(struct scx_dispatch_q, hash_node),
};
@@ -1408,7 +1409,6 @@ static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
/**
* scx_task_iter_next_locked - Next non-idle task with its rq locked
* @iter: iterator to walk
- * @include_dead: Whether we should include dead tasks in the iteration
*
* Visit the non-idle task with its rq lock held. Allows callers to specify
* whether they would like to filter out dead tasks. See scx_task_iter_start()
@@ -3136,6 +3136,7 @@ static struct task_struct *pick_task_scx(struct rq *rq)
* scx_prio_less - Task ordering for core-sched
* @a: task A
* @b: task B
+ * @in_fi: in forced idle state
*
* Core-sched is implemented as an additional scheduling layer on top of the
* usual sched_class'es and needs to find out the expected task ordering. For
@@ -3184,6 +3185,10 @@ static bool test_and_clear_cpu_idle(int cpu)
* scx_pick_idle_cpu() can get caught in an infinite loop as
* @cpu is never cleared from idle_masks.smt. Ensure that @cpu
* is eventually cleared.
+ *
+ * NOTE: Use cpumask_intersects() and cpumask_test_cpu() to
+ * reduce memory writes, which may help alleviate cache
+ * coherence pressure.
*/
if (cpumask_intersects(smt, idle_masks.smt))
cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
@@ -3220,6 +3225,74 @@ found:
}
/*
+ * Return the amount of CPUs in the same LLC domain of @cpu (or zero if the LLC
+ * domain is not defined).
+ */
+static unsigned int llc_weight(s32 cpu)
+{
+ struct sched_domain *sd;
+
+ sd = rcu_dereference(per_cpu(sd_llc, cpu));
+ if (!sd)
+ return 0;
+
+ return sd->span_weight;
+}
+
+/*
+ * Return the cpumask representing the LLC domain of @cpu (or NULL if the LLC
+ * domain is not defined).
+ */
+static struct cpumask *llc_span(s32 cpu)
+{
+ struct sched_domain *sd;
+
+ sd = rcu_dereference(per_cpu(sd_llc, cpu));
+ if (!sd)
+ return 0;
+
+ return sched_domain_span(sd);
+}
+
+/*
+ * Return the amount of CPUs in the same NUMA domain of @cpu (or zero if the
+ * NUMA domain is not defined).
+ */
+static unsigned int numa_weight(s32 cpu)
+{
+ struct sched_domain *sd;
+ struct sched_group *sg;
+
+ sd = rcu_dereference(per_cpu(sd_numa, cpu));
+ if (!sd)
+ return 0;
+ sg = sd->groups;
+ if (!sg)
+ return 0;
+
+ return sg->group_weight;
+}
+
+/*
+ * Return the cpumask representing the NUMA domain of @cpu (or NULL if the NUMA
+ * domain is not defined).
+ */
+static struct cpumask *numa_span(s32 cpu)
+{
+ struct sched_domain *sd;
+ struct sched_group *sg;
+
+ sd = rcu_dereference(per_cpu(sd_numa, cpu));
+ if (!sd)
+ return NULL;
+ sg = sd->groups;
+ if (!sg)
+ return NULL;
+
+ return sched_group_span(sg);
+}
+
+/*
* Return true if the LLC domains do not perfectly overlap with the NUMA
* domains, false otherwise.
*/
@@ -3250,19 +3323,10 @@ static bool llc_numa_mismatch(void)
* overlapping, which is incorrect (as NUMA 1 has two distinct LLC
* domains).
*/
- for_each_online_cpu(cpu) {
- const struct cpumask *numa_cpus;
- struct sched_domain *sd;
-
- sd = rcu_dereference(per_cpu(sd_llc, cpu));
- if (!sd)
+ for_each_online_cpu(cpu)
+ if (llc_weight(cpu) != numa_weight(cpu))
return true;
- numa_cpus = cpumask_of_node(cpu_to_node(cpu));
- if (sd->span_weight != cpumask_weight(numa_cpus))
- return true;
- }
-
return false;
}
@@ -3280,8 +3344,7 @@ static bool llc_numa_mismatch(void)
static void update_selcpu_topology(void)
{
bool enable_llc = false, enable_numa = false;
- struct sched_domain *sd;
- const struct cpumask *cpus;
+ unsigned int nr_cpus;
s32 cpu = cpumask_first(cpu_online_mask);
/*
@@ -3295,10 +3358,12 @@ static void update_selcpu_topology(void)
* CPUs.
*/
rcu_read_lock();
- sd = rcu_dereference(per_cpu(sd_llc, cpu));
- if (sd) {
- if (sd->span_weight < num_online_cpus())
+ nr_cpus = llc_weight(cpu);
+ if (nr_cpus > 0) {
+ if (nr_cpus < num_online_cpus())
enable_llc = true;
+ pr_debug("sched_ext: LLC=%*pb weight=%u\n",
+ cpumask_pr_args(llc_span(cpu)), llc_weight(cpu));
}
/*
@@ -3310,15 +3375,19 @@ static void update_selcpu_topology(void)
* enabling both NUMA and LLC optimizations is unnecessary, as checking
* for an idle CPU in the same domain twice is redundant.
*/
- cpus = cpumask_of_node(cpu_to_node(cpu));
- if ((cpumask_weight(cpus) < num_online_cpus()) && llc_numa_mismatch())
- enable_numa = true;
+ nr_cpus = numa_weight(cpu);
+ if (nr_cpus > 0) {
+ if (nr_cpus < num_online_cpus() && llc_numa_mismatch())
+ enable_numa = true;
+ pr_debug("sched_ext: NUMA=%*pb weight=%u\n",
+ cpumask_pr_args(numa_span(cpu)), numa_weight(cpu));
+ }
rcu_read_unlock();
pr_debug("sched_ext: LLC idle selection %s\n",
- enable_llc ? "enabled" : "disabled");
+ str_enabled_disabled(enable_llc));
pr_debug("sched_ext: NUMA idle selection %s\n",
- enable_numa ? "enabled" : "disabled");
+ str_enabled_disabled(enable_numa));
if (enable_llc)
static_branch_enable_cpuslocked(&scx_selcpu_topo_llc);
@@ -3348,6 +3417,8 @@ static void update_selcpu_topology(void)
* 4. Pick a CPU within the same NUMA node, if enabled:
* - choose a CPU from the same NUMA node to reduce memory access latency.
*
+ * 5. Pick any idle CPU usable by the task.
+ *
* Step 3 and 4 are performed only if the system has, respectively, multiple
* LLC domains / multiple NUMA nodes (see scx_selcpu_topo_llc and
* scx_selcpu_topo_numa).
@@ -3364,7 +3435,6 @@ static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
*found = false;
-
/*
* This is necessary to protect llc_cpus.
*/
@@ -3383,15 +3453,10 @@ static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
*/
if (p->nr_cpus_allowed >= num_possible_cpus()) {
if (static_branch_maybe(CONFIG_NUMA, &scx_selcpu_topo_numa))
- numa_cpus = cpumask_of_node(cpu_to_node(prev_cpu));
-
- if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc)) {
- struct sched_domain *sd;
+ numa_cpus = numa_span(prev_cpu);
- sd = rcu_dereference(per_cpu(sd_llc, prev_cpu));
- if (sd)
- llc_cpus = sched_domain_span(sd);
- }
+ if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc))
+ llc_cpus = llc_span(prev_cpu);
}
/*
@@ -3592,10 +3657,7 @@ static void reset_idle_masks(void)
static void update_builtin_idle(int cpu, bool idle)
{
- if (idle)
- cpumask_set_cpu(cpu, idle_masks.cpu);
- else
- cpumask_clear_cpu(cpu, idle_masks.cpu);
+ assign_cpu(cpu, idle_masks.cpu, idle);
#ifdef CONFIG_SCHED_SMT
if (sched_smt_active()) {
@@ -3606,10 +3668,8 @@ static void update_builtin_idle(int cpu, bool idle)
* idle_masks.smt handling is racy but that's fine as
* it's only for optimization and self-correcting.
*/
- for_each_cpu(cpu, smt) {
- if (!cpumask_test_cpu(cpu, idle_masks.cpu))
- return;
- }
+ if (!cpumask_subset(smt, idle_masks.cpu))
+ return;
cpumask_or(idle_masks.smt, idle_masks.smt, smt);
} else {
cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
@@ -4688,6 +4748,7 @@ bool task_should_scx(int policy)
/**
* scx_softlockup - sched_ext softlockup handler
+ * @dur_s: number of seconds of CPU stuck due to soft lockup
*
* On some multi-socket setups (e.g. 2x Intel 8480c), the BPF scheduler can
* live-lock the system by making many CPUs target the same DSQ to the point
@@ -4731,6 +4792,7 @@ static void scx_clear_softlockup(void)
/**
* scx_ops_bypass - [Un]bypass scx_ops and guarantee forward progress
+ * @bypass: true for bypass, false for unbypass
*
* Bypassing guarantees that all runnable tasks make forward progress without
* trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might
@@ -4899,7 +4961,7 @@ static void scx_ops_disable_workfn(struct kthread_work *work)
struct task_struct *p;
struct rhashtable_iter rht_iter;
struct scx_dispatch_q *dsq;
- int i, kind;
+ int i, kind, cpu;
kind = atomic_read(&scx_exit_kind);
while (true) {
@@ -4982,6 +5044,15 @@ static void scx_ops_disable_workfn(struct kthread_work *work)
scx_task_iter_stop(&sti);
percpu_up_write(&scx_fork_rwsem);
+ /*
+ * Invalidate all the rq clocks to prevent getting outdated
+ * rq clocks from a previous scx scheduler.
+ */
+ for_each_possible_cpu(cpu) {
+ struct rq *rq = cpu_rq(cpu);
+ scx_rq_clock_invalidate(rq);
+ }
+
/* no task is on scx, turn off all the switches and flush in-progress calls */
static_branch_disable(&__scx_ops_enabled);
for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++)
@@ -5206,9 +5277,9 @@ static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx,
scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK,
p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK,
ops_state >> SCX_OPSS_QSEQ_SHIFT);
- dump_line(s, " sticky/holding_cpu=%d/%d dsq_id=%s dsq_vtime=%llu",
+ dump_line(s, " sticky/holding_cpu=%d/%d dsq_id=%s dsq_vtime=%llu slice=%llu",
p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf,
- p->scx.dsq_vtime);
+ p->scx.dsq_vtime, p->scx.slice);
dump_line(s, " cpus=%*pb", cpumask_pr_args(p->cpus_ptr));
if (SCX_HAS_OP(dump_task)) {
@@ -6283,6 +6354,15 @@ void __init init_sched_ext_class(void)
__bpf_kfunc_start_defs();
+static bool check_builtin_idle_enabled(void)
+{
+ if (static_branch_likely(&scx_builtin_idle_enabled))
+ return true;
+
+ scx_ops_error("built-in idle tracking is disabled");
+ return false;
+}
+
/**
* scx_bpf_select_cpu_dfl - The default implementation of ops.select_cpu()
* @p: task_struct to select a CPU for
@@ -6300,10 +6380,8 @@ __bpf_kfunc_start_defs();
__bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
u64 wake_flags, bool *is_idle)
{
- if (!static_branch_likely(&scx_builtin_idle_enabled)) {
- scx_ops_error("built-in idle tracking is disabled");
+ if (!check_builtin_idle_enabled())
goto prev_cpu;
- }
if (!scx_kf_allowed(SCX_KF_SELECT_CPU))
goto prev_cpu;
@@ -6387,9 +6465,7 @@ __bpf_kfunc_start_defs();
* ops.select_cpu(), and ops.dispatch().
*
* When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch
- * and @p must match the task being enqueued. Also, %SCX_DSQ_LOCAL_ON can't be
- * used to target the local DSQ of a CPU other than the enqueueing one. Use
- * ops.select_cpu() to be on the target CPU in the first place.
+ * and @p must match the task being enqueued.
*
* When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p
* will be directly inserted into the corresponding dispatch queue after
@@ -7228,7 +7304,7 @@ __bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data,
}
/**
- * scx_bpf_dump - Generate extra debug dump specific to the BPF scheduler
+ * scx_bpf_dump_bstr - Generate extra debug dump specific to the BPF scheduler
* @fmt: format string
* @data: format string parameters packaged using ___bpf_fill() macro
* @data__sz: @data len, must end in '__sz' for the verifier
@@ -7320,7 +7396,6 @@ __bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu)
* scx_bpf_cpuperf_set - Set the relative performance target of a CPU
* @cpu: CPU of interest
* @perf: target performance level [0, %SCX_CPUPERF_ONE]
- * @flags: %SCX_CPUPERF_* flags
*
* Set the target performance level of @cpu to @perf. @perf is in linear
* relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the
@@ -7397,10 +7472,8 @@ __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask)
*/
__bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
{
- if (!static_branch_likely(&scx_builtin_idle_enabled)) {
- scx_ops_error("built-in idle tracking is disabled");
+ if (!check_builtin_idle_enabled())
return cpu_none_mask;
- }
#ifdef CONFIG_SMP
return idle_masks.cpu;
@@ -7418,10 +7491,8 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
*/
__bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
{
- if (!static_branch_likely(&scx_builtin_idle_enabled)) {
- scx_ops_error("built-in idle tracking is disabled");
+ if (!check_builtin_idle_enabled())
return cpu_none_mask;
- }
#ifdef CONFIG_SMP
if (sched_smt_active())
@@ -7436,6 +7507,7 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
/**
* scx_bpf_put_idle_cpumask - Release a previously acquired referenced kptr to
* either the percpu, or SMT idle-tracking cpumask.
+ * @idle_mask: &cpumask to use
*/
__bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask)
{
@@ -7459,10 +7531,8 @@ __bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask)
*/
__bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
{
- if (!static_branch_likely(&scx_builtin_idle_enabled)) {
- scx_ops_error("built-in idle tracking is disabled");
+ if (!check_builtin_idle_enabled())
return false;
- }
if (ops_cpu_valid(cpu, NULL))
return test_and_clear_cpu_idle(cpu);
@@ -7492,10 +7562,8 @@ __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
__bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed,
u64 flags)
{
- if (!static_branch_likely(&scx_builtin_idle_enabled)) {
- scx_ops_error("built-in idle tracking is disabled");
+ if (!check_builtin_idle_enabled())
return -EBUSY;
- }
return scx_pick_idle_cpu(cpus_allowed, flags);
}
@@ -7590,6 +7658,68 @@ out:
}
#endif
+/**
+ * scx_bpf_now - Returns a high-performance monotonically non-decreasing
+ * clock for the current CPU. The clock returned is in nanoseconds.
+ *
+ * It provides the following properties:
+ *
+ * 1) High performance: Many BPF schedulers call bpf_ktime_get_ns() frequently
+ * to account for execution time and track tasks' runtime properties.
+ * Unfortunately, in some hardware platforms, bpf_ktime_get_ns() -- which
+ * eventually reads a hardware timestamp counter -- is neither performant nor
+ * scalable. scx_bpf_now() aims to provide a high-performance clock by
+ * using the rq clock in the scheduler core whenever possible.
+ *
+ * 2) High enough resolution for the BPF scheduler use cases: In most BPF
+ * scheduler use cases, the required clock resolution is lower than the most
+ * accurate hardware clock (e.g., rdtsc in x86). scx_bpf_now() basically
+ * uses the rq clock in the scheduler core whenever it is valid. It considers
+ * that the rq clock is valid from the time the rq clock is updated
+ * (update_rq_clock) until the rq is unlocked (rq_unpin_lock).
+ *
+ * 3) Monotonically non-decreasing clock for the same CPU: scx_bpf_now()
+ * guarantees the clock never goes backward when comparing them in the same
+ * CPU. On the other hand, when comparing clocks in different CPUs, there
+ * is no such guarantee -- the clock can go backward. It provides a
+ * monotonically *non-decreasing* clock so that it would provide the same
+ * clock values in two different scx_bpf_now() calls in the same CPU
+ * during the same period of when the rq clock is valid.
+ */
+__bpf_kfunc u64 scx_bpf_now(void)
+{
+ struct rq *rq;
+ u64 clock;
+
+ preempt_disable();
+
+ rq = this_rq();
+ if (smp_load_acquire(&rq->scx.flags) & SCX_RQ_CLK_VALID) {
+ /*
+ * If the rq clock is valid, use the cached rq clock.
+ *
+ * Note that scx_bpf_now() is re-entrant between a process
+ * context and an interrupt context (e.g., timer interrupt).
+ * However, we don't need to consider the race between them
+ * because such race is not observable from a caller.
+ */
+ clock = READ_ONCE(rq->scx.clock);
+ } else {
+ /*
+ * Otherwise, return a fresh rq clock.
+ *
+ * The rq clock is updated outside of the rq lock.
+ * In this case, keep the updated rq clock invalid so the next
+ * kfunc call outside the rq lock gets a fresh rq clock.
+ */
+ clock = sched_clock_cpu(cpu_of(rq));
+ }
+
+ preempt_enable();
+
+ return clock;
+}
+
__bpf_kfunc_end_defs();
BTF_KFUNCS_START(scx_kfunc_ids_any)
@@ -7621,6 +7751,7 @@ BTF_ID_FLAGS(func, scx_bpf_cpu_rq)
#ifdef CONFIG_CGROUP_SCHED
BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE)
#endif
+BTF_ID_FLAGS(func, scx_bpf_now)
BTF_KFUNCS_END(scx_kfunc_ids_any)
static const struct btf_kfunc_id_set scx_kfunc_set_any = {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1e78caa21436..1c0ef435a7aa 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -133,7 +133,7 @@ static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536;
#endif
#ifdef CONFIG_SYSCTL
-static struct ctl_table sched_fair_sysctls[] = {
+static const struct ctl_table sched_fair_sysctls[] = {
#ifdef CONFIG_CFS_BANDWIDTH
{
.procname = "sched_cfs_bandwidth_slice_us",
@@ -5385,6 +5385,15 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
static void set_delayed(struct sched_entity *se)
{
se->sched_delayed = 1;
+
+ /*
+ * Delayed se of cfs_rq have no tasks queued on them.
+ * Do not adjust h_nr_runnable since dequeue_entities()
+ * will account it for blocked tasks.
+ */
+ if (!entity_is_task(se))
+ return;
+
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
@@ -5397,6 +5406,16 @@ static void set_delayed(struct sched_entity *se)
static void clear_delayed(struct sched_entity *se)
{
se->sched_delayed = 0;
+
+ /*
+ * Delayed se of cfs_rq have no tasks queued on them.
+ * Do not adjust h_nr_runnable since a dequeue has
+ * already accounted for it or an enqueue of a task
+ * below it will account for it in enqueue_task_fair().
+ */
+ if (!entity_is_task(se))
+ return;
+
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index bd66a46b06ac..4b8e33c615b1 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -26,7 +26,7 @@ static int sched_rt_handler(const struct ctl_table *table, int write, void *buff
size_t *lenp, loff_t *ppos);
static int sched_rr_handler(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
-static struct ctl_table sched_rt_sysctls[] = {
+static const struct ctl_table sched_rt_sysctls[] = {
{
.procname = "sched_rt_period_us",
.data = &sysctl_sched_rt_period,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c7cf4cc57cdd..38e0e323dda2 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -759,6 +759,7 @@ enum scx_rq_flags {
SCX_RQ_BAL_PENDING = 1 << 2, /* balance hasn't run yet */
SCX_RQ_BAL_KEEP = 1 << 3, /* balance decided to keep current */
SCX_RQ_BYPASSING = 1 << 4,
+ SCX_RQ_CLK_VALID = 1 << 5, /* RQ clock is fresh and valid */
SCX_RQ_IN_WAKEUP = 1 << 16,
SCX_RQ_IN_BALANCE = 1 << 17,
@@ -771,9 +772,10 @@ struct scx_rq {
unsigned long ops_qseq;
u64 extra_enq_flags; /* see move_task_to_local_dsq() */
u32 nr_running;
- u32 flags;
u32 cpuperf_target; /* [0, SCHED_CAPACITY_SCALE] */
bool cpu_released;
+ u32 flags;
+ u64 clock; /* current per-rq clock -- see scx_bpf_now() */
cpumask_var_t cpus_to_kick;
cpumask_var_t cpus_to_kick_if_idle;
cpumask_var_t cpus_to_preempt;
@@ -1722,6 +1724,38 @@ struct rq_flags {
extern struct balance_callback balance_push_callback;
+#ifdef CONFIG_SCHED_CLASS_EXT
+extern const struct sched_class ext_sched_class;
+
+DECLARE_STATIC_KEY_FALSE(__scx_ops_enabled); /* SCX BPF scheduler loaded */
+DECLARE_STATIC_KEY_FALSE(__scx_switched_all); /* all fair class tasks on SCX */
+
+#define scx_enabled() static_branch_unlikely(&__scx_ops_enabled)
+#define scx_switched_all() static_branch_unlikely(&__scx_switched_all)
+
+static inline void scx_rq_clock_update(struct rq *rq, u64 clock)
+{
+ if (!scx_enabled())
+ return;
+ WRITE_ONCE(rq->scx.clock, clock);
+ smp_store_release(&rq->scx.flags, rq->scx.flags | SCX_RQ_CLK_VALID);
+}
+
+static inline void scx_rq_clock_invalidate(struct rq *rq)
+{
+ if (!scx_enabled())
+ return;
+ WRITE_ONCE(rq->scx.flags, rq->scx.flags & ~SCX_RQ_CLK_VALID);
+}
+
+#else /* !CONFIG_SCHED_CLASS_EXT */
+#define scx_enabled() false
+#define scx_switched_all() false
+
+static inline void scx_rq_clock_update(struct rq *rq, u64 clock) {}
+static inline void scx_rq_clock_invalidate(struct rq *rq) {}
+#endif /* !CONFIG_SCHED_CLASS_EXT */
+
/*
* Lockdep annotation that avoids accidental unlocks; it's like a
* sticky/continuous lockdep_assert_held().
@@ -1751,7 +1785,7 @@ static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
if (rq->clock_update_flags > RQCF_ACT_SKIP)
rf->clock_update_flags = RQCF_UPDATED;
#endif
-
+ scx_rq_clock_invalidate(rq);
lockdep_unpin_lock(__rq_lockp(rq), rf->cookie);
}
@@ -2510,19 +2544,6 @@ extern const struct sched_class rt_sched_class;
extern const struct sched_class fair_sched_class;
extern const struct sched_class idle_sched_class;
-#ifdef CONFIG_SCHED_CLASS_EXT
-extern const struct sched_class ext_sched_class;
-
-DECLARE_STATIC_KEY_FALSE(__scx_ops_enabled); /* SCX BPF scheduler loaded */
-DECLARE_STATIC_KEY_FALSE(__scx_switched_all); /* all fair class tasks on SCX */
-
-#define scx_enabled() static_branch_unlikely(&__scx_ops_enabled)
-#define scx_switched_all() static_branch_unlikely(&__scx_switched_all)
-#else /* !CONFIG_SCHED_CLASS_EXT */
-#define scx_enabled() false
-#define scx_switched_all() false
-#endif /* !CONFIG_SCHED_CLASS_EXT */
-
/*
* Iterate only active classes. SCX can take over all fair tasks or be
* completely disabled. If the former, skip fair. If the latter, skip SCX.
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 6ade91bce63e..19cdbe96f93d 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -248,7 +248,10 @@ static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t)
delta = rq_clock(rq) - t->sched_info.last_queued;
t->sched_info.last_queued = 0;
t->sched_info.run_delay += delta;
-
+ if (delta > t->sched_info.max_run_delay)
+ t->sched_info.max_run_delay = delta;
+ if (delta && (!t->sched_info.min_run_delay || delta < t->sched_info.min_run_delay))
+ t->sched_info.min_run_delay = delta;
rq_sched_info_dequeue(rq, delta);
}
@@ -270,6 +273,10 @@ static void sched_info_arrive(struct rq *rq, struct task_struct *t)
t->sched_info.run_delay += delta;
t->sched_info.last_arrival = now;
t->sched_info.pcount++;
+ if (delta > t->sched_info.max_run_delay)
+ t->sched_info.max_run_delay = delta;
+ if (delta && (!t->sched_info.min_run_delay || delta < t->sched_info.min_run_delay))
+ t->sched_info.min_run_delay = delta;
rq_sched_info_arrive(rq, delta);
}
diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c
index 149e2c8036d3..456d339be98f 100644
--- a/kernel/sched/syscalls.c
+++ b/kernel/sched/syscalls.c
@@ -1130,6 +1130,13 @@ int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
return 0;
/*
+ * The special/sugov task isn't part of regular bandwidth/admission
+ * control so let userspace change affinities.
+ */
+ if (dl_entity_is_special(&p->dl))
+ return 0;
+
+ /*
* Since bandwidth control happens on root_domain basis,
* if admission test is enabled, we only admit -deadline
* tasks allowed to run on all the CPUs in the task's
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index da33ec9e94ab..c49aea8c1025 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -312,7 +312,7 @@ static int sched_energy_aware_handler(const struct ctl_table *table, int write,
return ret;
}
-static struct ctl_table sched_energy_aware_sysctls[] = {
+static const struct ctl_table sched_energy_aware_sysctls[] = {
{
.procname = "sched_energy_aware",
.data = &sysctl_sched_energy_aware,
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 385d48293a5f..7bbb408431eb 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -749,6 +749,15 @@ static bool seccomp_is_const_allow(struct sock_fprog_kern *fprog,
if (WARN_ON_ONCE(!fprog))
return false;
+ /* Our single exception to filtering. */
+#ifdef __NR_uretprobe
+#ifdef SECCOMP_ARCH_COMPAT
+ if (sd->arch == SECCOMP_ARCH_NATIVE)
+#endif
+ if (sd->nr == __NR_uretprobe)
+ return true;
+#endif
+
for (pc = 0; pc < fprog->len; pc++) {
struct sock_filter *insn = &fprog->filter[pc];
u16 code = insn->code;
@@ -1023,6 +1032,9 @@ static inline void seccomp_log(unsigned long syscall, long signr, u32 action,
*/
static const int mode1_syscalls[] = {
__NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
+#ifdef __NR_uretprobe
+ __NR_uretprobe,
+#endif
-1, /* negative terminated */
};
@@ -2450,7 +2462,7 @@ static int seccomp_actions_logged_handler(const struct ctl_table *ro_table, int
return ret;
}
-static struct ctl_table seccomp_sysctl_table[] = {
+static const struct ctl_table seccomp_sysctl_table[] = {
{
.procname = "actions_avail",
.data = (void *) &seccomp_actions_avail,
diff --git a/kernel/signal.c b/kernel/signal.c
index a2afd54303f0..875e97f6205a 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -4950,7 +4950,7 @@ static inline void siginfo_buildtime_checks(void)
}
#if defined(CONFIG_SYSCTL)
-static struct ctl_table signal_debug_table[] = {
+static const struct ctl_table signal_debug_table[] = {
#ifdef CONFIG_SYSCTL_EXCEPTION_TRACE
{
.procname = "exception-trace",
diff --git a/kernel/smp.c b/kernel/smp.c
index f104c8e83fc4..974f3a3962e8 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -170,9 +170,9 @@ static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
static DEFINE_PER_CPU(void *, cur_csd_info);
static ulong csd_lock_timeout = 5000; /* CSD lock timeout in milliseconds. */
-module_param(csd_lock_timeout, ulong, 0444);
+module_param(csd_lock_timeout, ulong, 0644);
static int panic_on_ipistall; /* CSD panic timeout in milliseconds, 300000 for five minutes. */
-module_param(panic_on_ipistall, int, 0444);
+module_param(panic_on_ipistall, int, 0644);
static atomic_t csd_bug_count = ATOMIC_INIT(0);
diff --git a/kernel/stackleak.c b/kernel/stackleak.c
index 39fd620a7db6..bb65321761b4 100644
--- a/kernel/stackleak.c
+++ b/kernel/stackleak.c
@@ -15,6 +15,7 @@
#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
#include <linux/jump_label.h>
+#include <linux/string_choices.h>
#include <linux/sysctl.h>
#include <linux/init.h>
@@ -41,10 +42,10 @@ static int stack_erasing_sysctl(const struct ctl_table *table, int write,
static_branch_enable(&stack_erasing_bypass);
pr_warn("stackleak: kernel stack erasing is %s\n",
- state ? "enabled" : "disabled");
+ str_enabled_disabled(state));
return ret;
}
-static struct ctl_table stackleak_sysctls[] = {
+static const struct ctl_table stackleak_sysctls[] = {
{
.procname = "stack_erasing",
.data = NULL,
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index da821ce258ea..8896d844d738 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -250,8 +250,8 @@ static int multi_cpu_stop(void *data)
* be detected and reported on their side.
*/
touch_nmi_watchdog();
+ rcu_momentary_eqs();
}
- rcu_momentary_eqs();
} while (curstate != MULTI_STOP_EXIT);
local_irq_restore(flags);
diff --git a/kernel/sys.c b/kernel/sys.c
index c4c701c6f0b4..cb366ff8703a 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -75,6 +75,8 @@
#include <asm/io.h>
#include <asm/unistd.h>
+#include <trace/events/task.h>
+
#include "uid16.h"
#ifndef SET_UNALIGN_CTL
@@ -2810,6 +2812,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
error = arch_lock_shadow_stack_status(me, arg2);
break;
default:
+ trace_task_prctl_unknown(option, arg2, arg3, arg4, arg5);
error = -EINVAL;
break;
}
diff --git a/kernel/sysctl-test.c b/kernel/sysctl-test.c
index 3ac98bb7fb82..eb2842bd0557 100644
--- a/kernel/sysctl-test.c
+++ b/kernel/sysctl-test.c
@@ -374,7 +374,7 @@ static void sysctl_test_register_sysctl_sz_invalid_extra_value(
struct kunit *test)
{
unsigned char data = 0;
- struct ctl_table table_foo[] = {
+ const struct ctl_table table_foo[] = {
{
.procname = "foo",
.data = &data,
@@ -386,7 +386,7 @@ static void sysctl_test_register_sysctl_sz_invalid_extra_value(
},
};
- struct ctl_table table_bar[] = {
+ const struct ctl_table table_bar[] = {
{
.procname = "bar",
.data = &data,
@@ -398,7 +398,7 @@ static void sysctl_test_register_sysctl_sz_invalid_extra_value(
},
};
- struct ctl_table table_qux[] = {
+ const struct ctl_table table_qux[] = {
{
.procname = "qux",
.data = &data,
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 7ae7a4136855..cb57da499ebb 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1609,7 +1609,7 @@ int proc_do_static_key(const struct ctl_table *table, int write,
return ret;
}
-static struct ctl_table kern_table[] = {
+static const struct ctl_table kern_table[] = {
{
.procname = "panic",
.data = &panic_timeout,
@@ -2021,7 +2021,7 @@ static struct ctl_table kern_table[] = {
#endif
};
-static struct ctl_table vm_table[] = {
+static const struct ctl_table vm_table[] = {
{
.procname = "overcommit_memory",
.data = &sysctl_overcommit_memory,
diff --git a/kernel/task_work.c b/kernel/task_work.c
index c969f1f26be5..d1efec571a4a 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -55,26 +55,14 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
enum task_work_notify_mode notify)
{
struct callback_head *head;
- int flags = notify & TWA_FLAGS;
- notify &= ~TWA_FLAGS;
if (notify == TWA_NMI_CURRENT) {
if (WARN_ON_ONCE(task != current))
return -EINVAL;
if (!IS_ENABLED(CONFIG_IRQ_WORK))
return -EINVAL;
} else {
- /*
- * Record the work call stack in order to print it in KASAN
- * reports.
- *
- * Note that stack allocation can fail if TWAF_NO_ALLOC flag
- * is set and new page is needed to expand the stack buffer.
- */
- if (flags & TWAF_NO_ALLOC)
- kasan_record_aux_stack_noalloc(work);
- else
- kasan_record_aux_stack(work);
+ kasan_record_aux_stack(work);
}
head = READ_ONCE(task->task_works);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 7304d7cf47f2..2a7802ec480c 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -373,16 +373,18 @@ void clocksource_verify_percpu(struct clocksource *cs)
cpumask_clear(&cpus_ahead);
cpumask_clear(&cpus_behind);
cpus_read_lock();
- preempt_disable();
+ migrate_disable();
clocksource_verify_choose_cpus();
if (cpumask_empty(&cpus_chosen)) {
- preempt_enable();
+ migrate_enable();
cpus_read_unlock();
pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name);
return;
}
testcpu = smp_processor_id();
- pr_warn("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n", cs->name, testcpu, cpumask_pr_args(&cpus_chosen));
+ pr_info("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n",
+ cs->name, testcpu, cpumask_pr_args(&cpus_chosen));
+ preempt_disable();
for_each_cpu(cpu, &cpus_chosen) {
if (cpu == testcpu)
continue;
@@ -402,6 +404,7 @@ void clocksource_verify_percpu(struct clocksource *cs)
cs_nsec_min = cs_nsec;
}
preempt_enable();
+ migrate_enable();
cpus_read_unlock();
if (!cpumask_empty(&cpus_ahead))
pr_warn(" CPUs %*pbl ahead of CPU %d for clocksource %s.\n",
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index f6d8df94045c..deb1aa32814e 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -58,6 +58,8 @@
#define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT)
#define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
+static void retrigger_next_event(void *arg);
+
/*
* The timer bases:
*
@@ -111,7 +113,8 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
.clockid = CLOCK_TAI,
.get_time = &ktime_get_clocktai,
},
- }
+ },
+ .csd = CSD_INIT(retrigger_next_event, NULL)
};
static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
@@ -124,6 +127,14 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
[CLOCK_TAI] = HRTIMER_BASE_TAI,
};
+static inline bool hrtimer_base_is_online(struct hrtimer_cpu_base *base)
+{
+ if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
+ return true;
+ else
+ return likely(base->online);
+}
+
/*
* Functions and macros which are different for UP/SMP systems are kept in a
* single place
@@ -145,11 +156,6 @@ static struct hrtimer_cpu_base migration_cpu_base = {
#define migration_base migration_cpu_base.clock_base[0]
-static inline bool is_migration_base(struct hrtimer_clock_base *base)
-{
- return base == &migration_base;
-}
-
/*
* We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
* means that all timers which are tied to this base via timer->base are
@@ -183,27 +189,54 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
}
/*
- * We do not migrate the timer when it is expiring before the next
- * event on the target cpu. When high resolution is enabled, we cannot
- * reprogram the target cpu hardware and we would cause it to fire
- * late. To keep it simple, we handle the high resolution enabled and
- * disabled case similar.
+ * Check if the elected target is suitable considering its next
+ * event and the hotplug state of the current CPU.
+ *
+ * If the elected target is remote and its next event is after the timer
+ * to queue, then a remote reprogram is necessary. However there is no
+ * guarantee the IPI handling the operation would arrive in time to meet
+ * the high resolution deadline. In this case the local CPU becomes a
+ * preferred target, unless it is offline.
+ *
+ * High and low resolution modes are handled the same way for simplicity.
*
* Called with cpu_base->lock of target cpu held.
*/
-static int
-hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
+static bool hrtimer_suitable_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base,
+ struct hrtimer_cpu_base *new_cpu_base,
+ struct hrtimer_cpu_base *this_cpu_base)
{
ktime_t expires;
+ /*
+ * The local CPU clockevent can be reprogrammed. Also get_target_base()
+ * guarantees it is online.
+ */
+ if (new_cpu_base == this_cpu_base)
+ return true;
+
+ /*
+ * The offline local CPU can't be the default target if the
+ * next remote target event is after this timer. Keep the
+ * elected new base. An IPI will we issued to reprogram
+ * it as a last resort.
+ */
+ if (!hrtimer_base_is_online(this_cpu_base))
+ return true;
+
expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
- return expires < new_base->cpu_base->expires_next;
+
+ return expires >= new_base->cpu_base->expires_next;
}
-static inline
-struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
- int pinned)
+static inline struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, int pinned)
{
+ if (!hrtimer_base_is_online(base)) {
+ int cpu = cpumask_any_and(cpu_online_mask, housekeeping_cpumask(HK_TYPE_TIMER));
+
+ return &per_cpu(hrtimer_bases, cpu);
+ }
+
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
if (static_branch_likely(&timers_migration_enabled) && !pinned)
return &per_cpu(hrtimer_bases, get_nohz_timer_target());
@@ -254,8 +287,8 @@ again:
raw_spin_unlock(&base->cpu_base->lock);
raw_spin_lock(&new_base->cpu_base->lock);
- if (new_cpu_base != this_cpu_base &&
- hrtimer_check_target(timer, new_base)) {
+ if (!hrtimer_suitable_target(timer, new_base, new_cpu_base,
+ this_cpu_base)) {
raw_spin_unlock(&new_base->cpu_base->lock);
raw_spin_lock(&base->cpu_base->lock);
new_cpu_base = this_cpu_base;
@@ -264,8 +297,7 @@ again:
}
WRITE_ONCE(timer->base, new_base);
} else {
- if (new_cpu_base != this_cpu_base &&
- hrtimer_check_target(timer, new_base)) {
+ if (!hrtimer_suitable_target(timer, new_base, new_cpu_base, this_cpu_base)) {
new_cpu_base = this_cpu_base;
goto again;
}
@@ -275,11 +307,6 @@ again:
#else /* CONFIG_SMP */
-static inline bool is_migration_base(struct hrtimer_clock_base *base)
-{
- return false;
-}
-
static inline struct hrtimer_clock_base *
lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
__acquires(&timer->base->cpu_base->lock)
@@ -716,8 +743,6 @@ static inline int hrtimer_is_hres_enabled(void)
return hrtimer_hres_enabled;
}
-static void retrigger_next_event(void *arg);
-
/*
* Switch to high resolution mode
*/
@@ -1205,6 +1230,7 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
u64 delta_ns, const enum hrtimer_mode mode,
struct hrtimer_clock_base *base)
{
+ struct hrtimer_cpu_base *this_cpu_base = this_cpu_ptr(&hrtimer_bases);
struct hrtimer_clock_base *new_base;
bool force_local, first;
@@ -1216,10 +1242,16 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
* and enforce reprogramming after it is queued no matter whether
* it is the new first expiring timer again or not.
*/
- force_local = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
+ force_local = base->cpu_base == this_cpu_base;
force_local &= base->cpu_base->next_timer == timer;
/*
+ * Don't force local queuing if this enqueue happens on a unplugged
+ * CPU after hrtimer_cpu_dying() has been invoked.
+ */
+ force_local &= this_cpu_base->online;
+
+ /*
* Remove an active timer from the queue. In case it is not queued
* on the current CPU, make sure that remove_hrtimer() updates the
* remote data correctly.
@@ -1248,8 +1280,27 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
}
first = enqueue_hrtimer(timer, new_base, mode);
- if (!force_local)
- return first;
+ if (!force_local) {
+ /*
+ * If the current CPU base is online, then the timer is
+ * never queued on a remote CPU if it would be the first
+ * expiring timer there.
+ */
+ if (hrtimer_base_is_online(this_cpu_base))
+ return first;
+
+ /*
+ * Timer was enqueued remote because the current base is
+ * already offline. If the timer is the first to expire,
+ * kick the remote CPU to reprogram the clock event.
+ */
+ if (first) {
+ struct hrtimer_cpu_base *new_cpu_base = new_base->cpu_base;
+
+ smp_call_function_single_async(new_cpu_base->cpu, &new_cpu_base->csd);
+ }
+ return 0;
+ }
/*
* Timer was forced to stay on the current CPU to avoid
@@ -1370,6 +1421,18 @@ static void hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base,
}
}
+#ifdef CONFIG_SMP
+static __always_inline bool is_migration_base(struct hrtimer_clock_base *base)
+{
+ return base == &migration_base;
+}
+#else
+static __always_inline bool is_migration_base(struct hrtimer_clock_base *base)
+{
+ return false;
+}
+#endif
+
/*
* This function is called on PREEMPT_RT kernels when the fast path
* deletion of a timer failed because the timer callback function was
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 40706cb36920..c8f776dc6ee0 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -301,7 +301,7 @@ static int timer_migration_handler(const struct ctl_table *table, int write,
return ret;
}
-static struct ctl_table timer_sysctl[] = {
+static const struct ctl_table timer_sysctl[] = {
{
.procname = "timer_migration",
.data = &sysctl_timer_migration,
diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c
index 9cb9b6584ea1..2f6330831f08 100644
--- a/kernel/time/timer_migration.c
+++ b/kernel/time/timer_migration.c
@@ -1675,6 +1675,9 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
} while (i < tmigr_hierarchy_levels);
+ /* Assert single root */
+ WARN_ON_ONCE(!err && !group->parent && !list_is_singular(&tmigr_level_list[top]));
+
while (i > 0) {
group = stack[--i];
@@ -1716,7 +1719,12 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
WARN_ON_ONCE(top == 0);
lvllist = &tmigr_level_list[top];
- if (group->num_children == 1 && list_is_singular(lvllist)) {
+
+ /*
+ * Newly created root level should have accounted the upcoming
+ * CPU's child group and pre-accounted the old root.
+ */
+ if (group->num_children == 2 && list_is_singular(lvllist)) {
/*
* The target CPU must never do the prepare work, except
* on early boot when the boot CPU is the target. Otherwise
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index c462aca8b7e6..adc947587eb8 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -357,17 +357,6 @@ static const struct bpf_func_proto bpf_probe_write_user_proto = {
.arg3_type = ARG_CONST_SIZE,
};
-static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
-{
- if (!capable(CAP_SYS_ADMIN))
- return NULL;
-
- pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
- current->comm, task_pid_nr(current));
-
- return &bpf_probe_write_user_proto;
-}
-
#define MAX_TRACE_PRINTK_VARARGS 3
#define BPF_TRACE_PRINTK_SIZE 1024
@@ -854,7 +843,7 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type, struct task_struc
if (unlikely(is_global_init(task)))
return -EPERM;
- if (irqs_disabled()) {
+ if (!preemptible()) {
/* Do an early check on signal validity. Otherwise,
* the error is lost in deferred irq_work.
*/
@@ -1445,6 +1434,8 @@ late_initcall(bpf_key_sig_kfuncs_init);
static const struct bpf_func_proto *
bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
+ const struct bpf_func_proto *func_proto;
+
switch (func_id) {
case BPF_FUNC_map_lookup_elem:
return &bpf_map_lookup_elem_proto;
@@ -1486,9 +1477,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_perf_event_read_proto;
case BPF_FUNC_get_prandom_u32:
return &bpf_get_prandom_u32_proto;
- case BPF_FUNC_probe_write_user:
- return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
- NULL : bpf_get_probe_write_proto();
case BPF_FUNC_probe_read_user:
return &bpf_probe_read_user_proto;
case BPF_FUNC_probe_read_kernel:
@@ -1567,7 +1555,22 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_trace_vprintk:
return bpf_get_trace_vprintk_proto();
default:
- return bpf_base_func_proto(func_id, prog);
+ break;
+ }
+
+ func_proto = bpf_base_func_proto(func_id, prog);
+ if (func_proto)
+ return func_proto;
+
+ if (!bpf_token_capable(prog->aux->token, CAP_SYS_ADMIN))
+ return NULL;
+
+ switch (func_id) {
+ case BPF_FUNC_probe_write_user:
+ return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
+ NULL : &bpf_probe_write_user_proto;
+ default:
+ return NULL;
}
}
@@ -2243,6 +2246,7 @@ void perf_event_detach_bpf_prog(struct perf_event *event)
{
struct bpf_prog_array *old_array;
struct bpf_prog_array *new_array;
+ struct bpf_prog *prog = NULL;
int ret;
mutex_lock(&bpf_event_mutex);
@@ -2263,18 +2267,22 @@ void perf_event_detach_bpf_prog(struct perf_event *event)
}
put:
- /*
- * It could be that the bpf_prog is not sleepable (and will be freed
- * via normal RCU), but is called from a point that supports sleepable
- * programs and uses tasks-trace-RCU.
- */
- synchronize_rcu_tasks_trace();
-
- bpf_prog_put(event->prog);
+ prog = event->prog;
event->prog = NULL;
unlock:
mutex_unlock(&bpf_event_mutex);
+
+ if (prog) {
+ /*
+ * It could be that the bpf_prog is not sleepable (and will be freed
+ * via normal RCU), but is called from a point that supports sleepable
+ * programs and uses tasks-trace-RCU.
+ */
+ synchronize_rcu_tasks_trace();
+
+ bpf_prog_put(prog);
+ }
}
int perf_event_query_prog_array(struct perf_event *event, void __user *info)
@@ -2810,7 +2818,7 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
bpf_prog_inc_misses_counter(link->link.prog);
- err = 0;
+ err = 1;
goto out;
}
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index 9e6b5a71555b..5dddfc2149f6 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -826,7 +826,6 @@ __ftrace_return_to_handler(struct ftrace_regs *fregs, unsigned long frame_pointe
return (unsigned long)panic;
}
- trace.rettime = trace_clock_local();
if (fregs)
ftrace_regs_set_instruction_pointer(fregs, ret);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b2955e504193..728ecda6e8d4 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4908,23 +4908,6 @@ static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
return __ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
}
-static bool module_exists(const char *module)
-{
- /* All modules have the symbol __this_module */
- static const char this_mod[] = "__this_module";
- char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
- unsigned long val;
- int n;
-
- n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
-
- if (n > sizeof(modname) - 1)
- return false;
-
- val = module_kallsyms_lookup_name(modname);
- return val != 0;
-}
-
static int cache_mod(struct trace_array *tr,
const char *func, char *module, int enable)
{
@@ -8797,7 +8780,7 @@ ftrace_enable_sysctl(const struct ctl_table *table, int write,
return 0;
}
-static struct ctl_table ftrace_sysctls[] = {
+static const struct ctl_table ftrace_sysctls[] = {
{
.procname = "ftrace_enabled",
.data = &ftrace_enabled,
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 6d61ff78926b..b8e0ae15ca5b 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -4398,8 +4398,13 @@ rb_reserve_next_event(struct trace_buffer *buffer,
int nr_loops = 0;
int add_ts_default;
- /* ring buffer does cmpxchg, make sure it is safe in NMI context */
- if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) &&
+ /*
+ * ring buffer does cmpxchg as well as atomic64 operations
+ * (which some archs use locking for atomic64), make sure this
+ * is safe in NMI context
+ */
+ if ((!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) ||
+ IS_ENABLED(CONFIG_GENERIC_ATOMIC64)) &&
(unlikely(in_nmi()))) {
return NULL;
}
diff --git a/kernel/trace/rv/Kconfig b/kernel/trace/rv/Kconfig
index 831779607e84..8226352a0062 100644
--- a/kernel/trace/rv/Kconfig
+++ b/kernel/trace/rv/Kconfig
@@ -25,30 +25,9 @@ menuconfig RV
For further information, see:
Documentation/trace/rv/runtime-verification.rst
-config RV_MON_WIP
- depends on RV
- depends on PREEMPT_TRACER
- select DA_MON_EVENTS_IMPLICIT
- bool "wip monitor"
- help
- Enable wip (wakeup in preemptive) sample monitor that illustrates
- the usage of per-cpu monitors, and one limitation of the
- preempt_disable/enable events.
-
- For further information, see:
- Documentation/trace/rv/monitor_wip.rst
-
-config RV_MON_WWNR
- depends on RV
- select DA_MON_EVENTS_ID
- bool "wwnr monitor"
- help
- Enable wwnr (wakeup while not running) sample monitor, this is a
- sample monitor that illustrates the usage of per-task monitor.
- The model is borken on purpose: it serves to test reactors.
-
- For further information, see:
- Documentation/trace/rv/monitor_wwnr.rst
+source "kernel/trace/rv/monitors/wip/Kconfig"
+source "kernel/trace/rv/monitors/wwnr/Kconfig"
+# Add new monitors here
config RV_REACTORS
bool "Runtime verification reactors"
diff --git a/kernel/trace/rv/Makefile b/kernel/trace/rv/Makefile
index 963d14875b45..188b64668e1f 100644
--- a/kernel/trace/rv/Makefile
+++ b/kernel/trace/rv/Makefile
@@ -1,8 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
+ccflags-y += -I $(src) # needed for trace events
+
obj-$(CONFIG_RV) += rv.o
obj-$(CONFIG_RV_MON_WIP) += monitors/wip/wip.o
obj-$(CONFIG_RV_MON_WWNR) += monitors/wwnr/wwnr.o
+# Add new monitors here
obj-$(CONFIG_RV_REACTORS) += rv_reactors.o
obj-$(CONFIG_RV_REACT_PRINTK) += reactor_printk.o
obj-$(CONFIG_RV_REACT_PANIC) += reactor_panic.o
diff --git a/kernel/trace/rv/monitors/wip/Kconfig b/kernel/trace/rv/monitors/wip/Kconfig
new file mode 100644
index 000000000000..3ef664b5cd90
--- /dev/null
+++ b/kernel/trace/rv/monitors/wip/Kconfig
@@ -0,0 +1,12 @@
+config RV_MON_WIP
+ depends on RV
+ depends on PREEMPT_TRACER
+ select DA_MON_EVENTS_IMPLICIT
+ bool "wip monitor"
+ help
+ Enable wip (wakeup in preemptive) sample monitor that illustrates
+ the usage of per-cpu monitors, and one limitation of the
+ preempt_disable/enable events.
+
+ For further information, see:
+ Documentation/trace/rv/monitor_wip.rst
diff --git a/kernel/trace/rv/monitors/wip/wip.c b/kernel/trace/rv/monitors/wip/wip.c
index b2b49a27e886..db7389157c87 100644
--- a/kernel/trace/rv/monitors/wip/wip.c
+++ b/kernel/trace/rv/monitors/wip/wip.c
@@ -10,7 +10,7 @@
#define MODULE_NAME "wip"
-#include <trace/events/rv.h>
+#include <rv_trace.h>
#include <trace/events/sched.h>
#include <trace/events/preemptirq.h>
diff --git a/kernel/trace/rv/monitors/wip/wip_trace.h b/kernel/trace/rv/monitors/wip/wip_trace.h
new file mode 100644
index 000000000000..aa2162f47a4c
--- /dev/null
+++ b/kernel/trace/rv/monitors/wip/wip_trace.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Snippet to be included in rv_trace.h
+ */
+
+#ifdef CONFIG_RV_MON_WIP
+DEFINE_EVENT(event_da_monitor, event_wip,
+ TP_PROTO(char *state, char *event, char *next_state, bool final_state),
+ TP_ARGS(state, event, next_state, final_state));
+
+DEFINE_EVENT(error_da_monitor, error_wip,
+ TP_PROTO(char *state, char *event),
+ TP_ARGS(state, event));
+#endif /* CONFIG_RV_MON_WIP */
diff --git a/kernel/trace/rv/monitors/wwnr/Kconfig b/kernel/trace/rv/monitors/wwnr/Kconfig
new file mode 100644
index 000000000000..ee741aa6d6b8
--- /dev/null
+++ b/kernel/trace/rv/monitors/wwnr/Kconfig
@@ -0,0 +1,11 @@
+config RV_MON_WWNR
+ depends on RV
+ select DA_MON_EVENTS_ID
+ bool "wwnr monitor"
+ help
+ Enable wwnr (wakeup while not running) sample monitor, this is a
+ sample monitor that illustrates the usage of per-task monitor.
+ The model is borken on purpose: it serves to test reactors.
+
+ For further information, see:
+ Documentation/trace/rv/monitor_wwnr.rst
diff --git a/kernel/trace/rv/monitors/wwnr/wwnr.c b/kernel/trace/rv/monitors/wwnr/wwnr.c
index 0e43dd2db685..3b16994a9984 100644
--- a/kernel/trace/rv/monitors/wwnr/wwnr.c
+++ b/kernel/trace/rv/monitors/wwnr/wwnr.c
@@ -10,7 +10,7 @@
#define MODULE_NAME "wwnr"
-#include <trace/events/rv.h>
+#include <rv_trace.h>
#include <trace/events/sched.h>
#include "wwnr.h"
diff --git a/kernel/trace/rv/monitors/wwnr/wwnr_trace.h b/kernel/trace/rv/monitors/wwnr/wwnr_trace.h
new file mode 100644
index 000000000000..fc97ec7476ad
--- /dev/null
+++ b/kernel/trace/rv/monitors/wwnr/wwnr_trace.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Snippet to be included in rv_trace.h
+ */
+
+#ifdef CONFIG_RV_MON_WWNR
+/* id is the pid of the task */
+DEFINE_EVENT(event_da_monitor_id, event_wwnr,
+ TP_PROTO(int id, char *state, char *event, char *next_state, bool final_state),
+ TP_ARGS(id, state, event, next_state, final_state));
+
+DEFINE_EVENT(error_da_monitor_id, error_wwnr,
+ TP_PROTO(int id, char *state, char *event),
+ TP_ARGS(id, state, event));
+#endif /* CONFIG_RV_MON_WWNR */
diff --git a/kernel/trace/rv/rv.c b/kernel/trace/rv/rv.c
index 279c70e1bd74..8657fc8806e7 100644
--- a/kernel/trace/rv/rv.c
+++ b/kernel/trace/rv/rv.c
@@ -145,7 +145,7 @@
#ifdef CONFIG_DA_MON_EVENTS
#define CREATE_TRACE_POINTS
-#include <trace/events/rv.h>
+#include <rv_trace.h>
#endif
#include "rv.h"
diff --git a/include/trace/events/rv.h b/kernel/trace/rv/rv_trace.h
index 56592da9301c..5e65097423ba 100644
--- a/include/trace/events/rv.h
+++ b/kernel/trace/rv/rv_trace.h
@@ -57,15 +57,9 @@ DECLARE_EVENT_CLASS(error_da_monitor,
__entry->state)
);
-#ifdef CONFIG_RV_MON_WIP
-DEFINE_EVENT(event_da_monitor, event_wip,
- TP_PROTO(char *state, char *event, char *next_state, bool final_state),
- TP_ARGS(state, event, next_state, final_state));
-
-DEFINE_EVENT(error_da_monitor, error_wip,
- TP_PROTO(char *state, char *event),
- TP_ARGS(state, event));
-#endif /* CONFIG_RV_MON_WIP */
+#include <monitors/wip/wip_trace.h>
+// Add new monitors based on CONFIG_DA_MON_EVENTS_IMPLICIT here
+
#endif /* CONFIG_DA_MON_EVENTS_IMPLICIT */
#ifdef CONFIG_DA_MON_EVENTS_ID
@@ -123,20 +117,14 @@ DECLARE_EVENT_CLASS(error_da_monitor_id,
__entry->state)
);
-#ifdef CONFIG_RV_MON_WWNR
-/* id is the pid of the task */
-DEFINE_EVENT(event_da_monitor_id, event_wwnr,
- TP_PROTO(int id, char *state, char *event, char *next_state, bool final_state),
- TP_ARGS(id, state, event, next_state, final_state));
-
-DEFINE_EVENT(error_da_monitor_id, error_wwnr,
- TP_PROTO(int id, char *state, char *event),
- TP_ARGS(id, state, event));
-#endif /* CONFIG_RV_MON_WWNR */
+#include <monitors/wwnr/wwnr_trace.h>
+// Add new monitors based on CONFIG_DA_MON_EVENTS_ID here
#endif /* CONFIG_DA_MON_EVENTS_ID */
#endif /* _TRACE_RV_H */
/* This part ust be outside protection */
#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE rv_trace
#include <trace/define_trace.h>
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 2542ec398b5d..1496a5ac33ae 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -26,6 +26,7 @@
#include <linux/hardirq.h>
#include <linux/linkage.h>
#include <linux/uaccess.h>
+#include <linux/cleanup.h>
#include <linux/vmalloc.h>
#include <linux/ftrace.h>
#include <linux/module.h>
@@ -535,19 +536,16 @@ LIST_HEAD(ftrace_trace_arrays);
int trace_array_get(struct trace_array *this_tr)
{
struct trace_array *tr;
- int ret = -ENODEV;
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&trace_types_lock);
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
if (tr == this_tr) {
tr->ref++;
- ret = 0;
- break;
+ return 0;
}
}
- mutex_unlock(&trace_types_lock);
- return ret;
+ return -ENODEV;
}
static void __trace_array_put(struct trace_array *this_tr)
@@ -1443,22 +1441,20 @@ EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
cond_update_fn_t update)
{
- struct cond_snapshot *cond_snapshot;
- int ret = 0;
+ struct cond_snapshot *cond_snapshot __free(kfree) =
+ kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
+ int ret;
- cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
if (!cond_snapshot)
return -ENOMEM;
cond_snapshot->cond_data = cond_data;
cond_snapshot->update = update;
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&trace_types_lock);
- if (tr->current_trace->use_max_tr) {
- ret = -EBUSY;
- goto fail_unlock;
- }
+ if (tr->current_trace->use_max_tr)
+ return -EBUSY;
/*
* The cond_snapshot can only change to NULL without the
@@ -1468,29 +1464,20 @@ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
* do safely with only holding the trace_types_lock and not
* having to take the max_lock.
*/
- if (tr->cond_snapshot) {
- ret = -EBUSY;
- goto fail_unlock;
- }
+ if (tr->cond_snapshot)
+ return -EBUSY;
ret = tracing_arm_snapshot_locked(tr);
if (ret)
- goto fail_unlock;
+ return ret;
local_irq_disable();
arch_spin_lock(&tr->max_lock);
- tr->cond_snapshot = cond_snapshot;
+ tr->cond_snapshot = no_free_ptr(cond_snapshot);
arch_spin_unlock(&tr->max_lock);
local_irq_enable();
- mutex_unlock(&trace_types_lock);
-
- return ret;
-
- fail_unlock:
- mutex_unlock(&trace_types_lock);
- kfree(cond_snapshot);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
@@ -2203,10 +2190,10 @@ static __init int init_trace_selftests(void)
selftests_can_run = true;
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&trace_types_lock);
if (list_empty(&postponed_selftests))
- goto out;
+ return 0;
pr_info("Running postponed tracer tests:\n");
@@ -2235,9 +2222,6 @@ static __init int init_trace_selftests(void)
}
tracing_selftest_running = false;
- out:
- mutex_unlock(&trace_types_lock);
-
return 0;
}
core_initcall(init_trace_selftests);
@@ -2807,7 +2791,7 @@ int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
int save_tracepoint_printk;
int ret;
- mutex_lock(&tracepoint_printk_mutex);
+ guard(mutex)(&tracepoint_printk_mutex);
save_tracepoint_printk = tracepoint_printk;
ret = proc_dointvec(table, write, buffer, lenp, ppos);
@@ -2820,16 +2804,13 @@ int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
tracepoint_printk = 0;
if (save_tracepoint_printk == tracepoint_printk)
- goto out;
+ return ret;
if (tracepoint_printk)
static_key_enable(&tracepoint_printk_key.key);
else
static_key_disable(&tracepoint_printk_key.key);
- out:
- mutex_unlock(&tracepoint_printk_mutex);
-
return ret;
}
@@ -5127,7 +5108,8 @@ static int tracing_trace_options_show(struct seq_file *m, void *v)
u32 tracer_flags;
int i;
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&trace_types_lock);
+
tracer_flags = tr->current_trace->flags->val;
trace_opts = tr->current_trace->flags->opts;
@@ -5144,7 +5126,6 @@ static int tracing_trace_options_show(struct seq_file *m, void *v)
else
seq_printf(m, "no%s\n", trace_opts[i].name);
}
- mutex_unlock(&trace_types_lock);
return 0;
}
@@ -5537,6 +5518,8 @@ static const char readme_msg[] =
"\t efield: For event probes ('e' types), the field is on of the fields\n"
"\t of the <attached-group>/<attached-event>.\n"
#endif
+ " set_event\t\t- Enables events by name written into it\n"
+ "\t\t\t Can enable module events via: :mod:<module>\n"
" events/\t\t- Directory containing all trace event subsystems:\n"
" enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
" events/<system>/\t- Directory containing all trace events for <system>:\n"
@@ -5809,7 +5792,7 @@ trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
return;
}
- mutex_lock(&trace_eval_mutex);
+ guard(mutex)(&trace_eval_mutex);
if (!trace_eval_maps)
trace_eval_maps = map_array;
@@ -5833,8 +5816,6 @@ trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
map_array++;
}
memset(map_array, 0, sizeof(*map_array));
-
- mutex_unlock(&trace_eval_mutex);
}
static void trace_create_eval_file(struct dentry *d_tracer)
@@ -5998,23 +5979,18 @@ ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
{
int ret;
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&trace_types_lock);
if (cpu_id != RING_BUFFER_ALL_CPUS) {
/* make sure, this cpu is enabled in the mask */
- if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
- ret = -EINVAL;
- goto out;
- }
+ if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask))
+ return -EINVAL;
}
ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
if (ret < 0)
ret = -ENOMEM;
-out:
- mutex_unlock(&trace_types_lock);
-
return ret;
}
@@ -6106,9 +6082,9 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
#ifdef CONFIG_TRACER_MAX_TRACE
bool had_max_tr;
#endif
- int ret = 0;
+ int ret;
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&trace_types_lock);
update_last_data(tr);
@@ -6116,7 +6092,7 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
RING_BUFFER_ALL_CPUS);
if (ret < 0)
- goto out;
+ return ret;
ret = 0;
}
@@ -6124,43 +6100,37 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
if (strcmp(t->name, buf) == 0)
break;
}
- if (!t) {
- ret = -EINVAL;
- goto out;
- }
+ if (!t)
+ return -EINVAL;
+
if (t == tr->current_trace)
- goto out;
+ return 0;
#ifdef CONFIG_TRACER_SNAPSHOT
if (t->use_max_tr) {
local_irq_disable();
arch_spin_lock(&tr->max_lock);
- if (tr->cond_snapshot)
- ret = -EBUSY;
+ ret = tr->cond_snapshot ? -EBUSY : 0;
arch_spin_unlock(&tr->max_lock);
local_irq_enable();
if (ret)
- goto out;
+ return ret;
}
#endif
/* Some tracers won't work on kernel command line */
if (system_state < SYSTEM_RUNNING && t->noboot) {
pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
t->name);
- goto out;
+ return -EINVAL;
}
/* Some tracers are only allowed for the top level buffer */
- if (!trace_ok_for_array(t, tr)) {
- ret = -EINVAL;
- goto out;
- }
+ if (!trace_ok_for_array(t, tr))
+ return -EINVAL;
/* If trace pipe files are being read, we can't change the tracer */
- if (tr->trace_ref) {
- ret = -EBUSY;
- goto out;
- }
+ if (tr->trace_ref)
+ return -EBUSY;
trace_branch_disable();
@@ -6191,7 +6161,7 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
if (!had_max_tr && t->use_max_tr) {
ret = tracing_arm_snapshot_locked(tr);
if (ret)
- goto out;
+ return ret;
}
#else
tr->current_trace = &nop_trace;
@@ -6204,17 +6174,15 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
if (t->use_max_tr)
tracing_disarm_snapshot(tr);
#endif
- goto out;
+ return ret;
}
}
tr->current_trace = t;
tr->current_trace->enabled++;
trace_branch_enable(tr);
- out:
- mutex_unlock(&trace_types_lock);
- return ret;
+ return 0;
}
static ssize_t
@@ -6292,22 +6260,18 @@ tracing_thresh_write(struct file *filp, const char __user *ubuf,
struct trace_array *tr = filp->private_data;
int ret;
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&trace_types_lock);
ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
if (ret < 0)
- goto out;
+ return ret;
if (tr->current_trace->update_thresh) {
ret = tr->current_trace->update_thresh(tr);
if (ret < 0)
- goto out;
+ return ret;
}
- ret = cnt;
-out:
- mutex_unlock(&trace_types_lock);
-
- return ret;
+ return cnt;
}
#ifdef CONFIG_TRACER_MAX_TRACE
@@ -6526,31 +6490,29 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
* This is just a matter of traces coherency, the ring buffer itself
* is protected.
*/
- mutex_lock(&iter->mutex);
+ guard(mutex)(&iter->mutex);
/* return any leftover data */
sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
if (sret != -EBUSY)
- goto out;
+ return sret;
trace_seq_init(&iter->seq);
if (iter->trace->read) {
sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
if (sret)
- goto out;
+ return sret;
}
waitagain:
sret = tracing_wait_pipe(filp);
if (sret <= 0)
- goto out;
+ return sret;
/* stop when tracing is finished */
- if (trace_empty(iter)) {
- sret = 0;
- goto out;
- }
+ if (trace_empty(iter))
+ return 0;
if (cnt >= TRACE_SEQ_BUFFER_SIZE)
cnt = TRACE_SEQ_BUFFER_SIZE - 1;
@@ -6614,9 +6576,6 @@ waitagain:
if (sret == -EBUSY)
goto waitagain;
-out:
- mutex_unlock(&iter->mutex);
-
return sret;
}
@@ -7208,25 +7167,19 @@ u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_eve
*/
int tracing_set_filter_buffering(struct trace_array *tr, bool set)
{
- int ret = 0;
-
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&trace_types_lock);
if (set && tr->no_filter_buffering_ref++)
- goto out;
+ return 0;
if (!set) {
- if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
- ret = -EINVAL;
- goto out;
- }
+ if (WARN_ON_ONCE(!tr->no_filter_buffering_ref))
+ return -EINVAL;
--tr->no_filter_buffering_ref;
}
- out:
- mutex_unlock(&trace_types_lock);
- return ret;
+ return 0;
}
struct ftrace_buffer_info {
@@ -7302,12 +7255,10 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
if (ret)
return ret;
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&trace_types_lock);
- if (tr->current_trace->use_max_tr) {
- ret = -EBUSY;
- goto out;
- }
+ if (tr->current_trace->use_max_tr)
+ return -EBUSY;
local_irq_disable();
arch_spin_lock(&tr->max_lock);
@@ -7316,24 +7267,20 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
arch_spin_unlock(&tr->max_lock);
local_irq_enable();
if (ret)
- goto out;
+ return ret;
switch (val) {
case 0:
- if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
- ret = -EINVAL;
- break;
- }
+ if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
+ return -EINVAL;
if (tr->allocated_snapshot)
free_snapshot(tr);
break;
case 1:
/* Only allow per-cpu swap if the ring buffer supports it */
#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
- if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
- ret = -EINVAL;
- break;
- }
+ if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
+ return -EINVAL;
#endif
if (tr->allocated_snapshot)
ret = resize_buffer_duplicate_size(&tr->max_buffer,
@@ -7341,7 +7288,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
ret = tracing_arm_snapshot_locked(tr);
if (ret)
- break;
+ return ret;
/* Now, we're going to swap */
if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
@@ -7368,8 +7315,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
*ppos += cnt;
ret = cnt;
}
-out:
- mutex_unlock(&trace_types_lock);
+
return ret;
}
@@ -7755,12 +7701,11 @@ void tracing_log_err(struct trace_array *tr,
len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
- mutex_lock(&tracing_err_log_lock);
+ guard(mutex)(&tracing_err_log_lock);
+
err = get_tracing_log_err(tr, len);
- if (PTR_ERR(err) == -ENOMEM) {
- mutex_unlock(&tracing_err_log_lock);
+ if (PTR_ERR(err) == -ENOMEM)
return;
- }
snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
@@ -7771,7 +7716,6 @@ void tracing_log_err(struct trace_array *tr,
err->info.ts = local_clock();
list_add_tail(&err->list, &tr->err_log);
- mutex_unlock(&tracing_err_log_lock);
}
static void clear_tracing_err_log(struct trace_array *tr)
@@ -9467,6 +9411,10 @@ trace_array_create_systems(const char *name, const char *systems,
INIT_LIST_HEAD(&tr->hist_vars);
INIT_LIST_HEAD(&tr->err_log);
+#ifdef CONFIG_MODULES
+ INIT_LIST_HEAD(&tr->mod_events);
+#endif
+
if (allocate_trace_buffers(tr, trace_buf_size) < 0)
goto out_free_tr;
@@ -9515,20 +9463,17 @@ static int instance_mkdir(const char *name)
struct trace_array *tr;
int ret;
- mutex_lock(&event_mutex);
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&event_mutex);
+ guard(mutex)(&trace_types_lock);
ret = -EEXIST;
if (trace_array_find(name))
- goto out_unlock;
+ return -EEXIST;
tr = trace_array_create(name);
ret = PTR_ERR_OR_ZERO(tr);
-out_unlock:
- mutex_unlock(&trace_types_lock);
- mutex_unlock(&event_mutex);
return ret;
}
@@ -9578,24 +9523,23 @@ struct trace_array *trace_array_get_by_name(const char *name, const char *system
{
struct trace_array *tr;
- mutex_lock(&event_mutex);
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&event_mutex);
+ guard(mutex)(&trace_types_lock);
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
- if (tr->name && strcmp(tr->name, name) == 0)
- goto out_unlock;
+ if (tr->name && strcmp(tr->name, name) == 0) {
+ tr->ref++;
+ return tr;
+ }
}
tr = trace_array_create_systems(name, systems, 0, 0);
if (IS_ERR(tr))
tr = NULL;
-out_unlock:
- if (tr)
+ else
tr->ref++;
- mutex_unlock(&trace_types_lock);
- mutex_unlock(&event_mutex);
return tr;
}
EXPORT_SYMBOL_GPL(trace_array_get_by_name);
@@ -9646,48 +9590,36 @@ static int __remove_instance(struct trace_array *tr)
int trace_array_destroy(struct trace_array *this_tr)
{
struct trace_array *tr;
- int ret;
if (!this_tr)
return -EINVAL;
- mutex_lock(&event_mutex);
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&event_mutex);
+ guard(mutex)(&trace_types_lock);
- ret = -ENODEV;
/* Making sure trace array exists before destroying it. */
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
- if (tr == this_tr) {
- ret = __remove_instance(tr);
- break;
- }
+ if (tr == this_tr)
+ return __remove_instance(tr);
}
- mutex_unlock(&trace_types_lock);
- mutex_unlock(&event_mutex);
-
- return ret;
+ return -ENODEV;
}
EXPORT_SYMBOL_GPL(trace_array_destroy);
static int instance_rmdir(const char *name)
{
struct trace_array *tr;
- int ret;
- mutex_lock(&event_mutex);
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&event_mutex);
+ guard(mutex)(&trace_types_lock);
- ret = -ENODEV;
tr = trace_array_find(name);
- if (tr)
- ret = __remove_instance(tr);
-
- mutex_unlock(&trace_types_lock);
- mutex_unlock(&event_mutex);
+ if (!tr)
+ return -ENODEV;
- return ret;
+ return __remove_instance(tr);
}
static __init void create_trace_instances(struct dentry *d_tracer)
@@ -9700,19 +9632,16 @@ static __init void create_trace_instances(struct dentry *d_tracer)
if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
return;
- mutex_lock(&event_mutex);
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&event_mutex);
+ guard(mutex)(&trace_types_lock);
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
if (!tr->name)
continue;
if (MEM_FAIL(trace_array_create_dir(tr) < 0,
"Failed to create instance directory\n"))
- break;
+ return;
}
-
- mutex_unlock(&trace_types_lock);
- mutex_unlock(&event_mutex);
}
static void
@@ -9902,6 +9831,24 @@ late_initcall_sync(trace_eval_sync);
#ifdef CONFIG_MODULES
+
+bool module_exists(const char *module)
+{
+ /* All modules have the symbol __this_module */
+ static const char this_mod[] = "__this_module";
+ char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
+ unsigned long val;
+ int n;
+
+ n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
+
+ if (n > sizeof(modname) - 1)
+ return false;
+
+ val = module_kallsyms_lookup_name(modname);
+ return val != 0;
+}
+
static void trace_module_add_evals(struct module *mod)
{
if (!mod->num_trace_evals)
@@ -9926,7 +9873,7 @@ static void trace_module_remove_evals(struct module *mod)
if (!mod->num_trace_evals)
return;
- mutex_lock(&trace_eval_mutex);
+ guard(mutex)(&trace_eval_mutex);
map = trace_eval_maps;
@@ -9938,12 +9885,10 @@ static void trace_module_remove_evals(struct module *mod)
map = map->tail.next;
}
if (!map)
- goto out;
+ return;
*last = trace_eval_jmp_to_tail(map)->tail.next;
kfree(map);
- out:
- mutex_unlock(&trace_eval_mutex);
}
#else
static inline void trace_module_remove_evals(struct module *mod) { }
@@ -10616,6 +10561,10 @@ __init static int tracer_alloc_buffers(void)
#endif
ftrace_init_global_array_ops(&global_trace);
+#ifdef CONFIG_MODULES
+ INIT_LIST_HEAD(&global_trace.mod_events);
+#endif
+
init_trace_flags_index(&global_trace);
register_tracer(&nop_trace);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 04058a9889b7..9c21ba45b7af 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -400,6 +400,9 @@ struct trace_array {
cpumask_var_t pipe_cpumask;
int ref;
int trace_ref;
+#ifdef CONFIG_MODULES
+ struct list_head mod_events;
+#endif
#ifdef CONFIG_FUNCTION_TRACER
struct ftrace_ops *ops;
struct trace_pid_list __rcu *function_pids;
@@ -435,6 +438,15 @@ enum {
TRACE_ARRAY_FL_MOD_INIT = BIT(2),
};
+#ifdef CONFIG_MODULES
+bool module_exists(const char *module);
+#else
+static inline bool module_exists(const char *module)
+{
+ return false;
+}
+#endif
+
extern struct list_head ftrace_trace_arrays;
extern struct mutex trace_types_lock;
@@ -912,7 +924,9 @@ extern int __trace_graph_retaddr_entry(struct trace_array *tr,
unsigned long retaddr);
extern void __trace_graph_return(struct trace_array *tr,
struct ftrace_graph_ret *trace,
- unsigned int trace_ctx);
+ unsigned int trace_ctx,
+ u64 calltime, u64 rettime);
+
extern void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops);
extern int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops);
extern void free_fgraph_ops(struct trace_array *tr);
diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c
index 4376887e0d8a..a322e4f249a5 100644
--- a/kernel/trace/trace_dynevent.c
+++ b/kernel/trace/trace_dynevent.c
@@ -74,24 +74,19 @@ int dyn_event_release(const char *raw_command, struct dyn_event_operations *type
struct dyn_event *pos, *n;
char *system = NULL, *event, *p;
int argc, ret = -ENOENT;
- char **argv;
+ char **argv __free(argv_free) = argv_split(GFP_KERNEL, raw_command, &argc);
- argv = argv_split(GFP_KERNEL, raw_command, &argc);
if (!argv)
return -ENOMEM;
if (argv[0][0] == '-') {
- if (argv[0][1] != ':') {
- ret = -EINVAL;
- goto out;
- }
+ if (argv[0][1] != ':')
+ return -EINVAL;
event = &argv[0][2];
} else {
event = strchr(argv[0], ':');
- if (!event) {
- ret = -EINVAL;
- goto out;
- }
+ if (!event)
+ return -EINVAL;
event++;
}
@@ -101,10 +96,8 @@ int dyn_event_release(const char *raw_command, struct dyn_event_operations *type
event = p + 1;
*p = '\0';
}
- if (!system && event[0] == '\0') {
- ret = -EINVAL;
- goto out;
- }
+ if (!system && event[0] == '\0')
+ return -EINVAL;
mutex_lock(&event_mutex);
for_each_dyn_event_safe(pos, n) {
@@ -120,8 +113,6 @@ int dyn_event_release(const char *raw_command, struct dyn_event_operations *type
}
tracing_reset_all_online_cpus();
mutex_unlock(&event_mutex);
-out:
- argv_free(argv);
return ret;
}
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index 82fd174ebbe0..fbfb396905a6 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -124,8 +124,8 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry,
__field_packed( unsigned long, ret, retval )
__field_packed( int, ret, depth )
__field_packed( unsigned int, ret, overrun )
- __field_packed( unsigned long long, ret, calltime)
- __field_packed( unsigned long long, ret, rettime )
+ __field(unsigned long long, calltime )
+ __field(unsigned long long, rettime )
),
F_printk("<-- %ps (%d) (start: %llx end: %llx) over: %d retval: %lx",
@@ -146,8 +146,8 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry,
__field_packed( unsigned long, ret, func )
__field_packed( int, ret, depth )
__field_packed( unsigned int, ret, overrun )
- __field_packed( unsigned long long, ret, calltime)
- __field_packed( unsigned long long, ret, rettime )
+ __field(unsigned long long, calltime )
+ __field(unsigned long long, rettime )
),
F_printk("<-- %ps (%d) (start: %llx end: %llx) over: %d",
diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c
index be8be0c1aaf0..82fd637cfc19 100644
--- a/kernel/trace/trace_eprobe.c
+++ b/kernel/trace/trace_eprobe.c
@@ -917,10 +917,10 @@ static int __trace_eprobe_create(int argc, const char *argv[])
goto error;
}
- mutex_lock(&event_mutex);
- event_call = find_and_get_event(sys_name, sys_event);
- ep = alloc_event_probe(group, event, event_call, argc - 2);
- mutex_unlock(&event_mutex);
+ scoped_guard(mutex, &event_mutex) {
+ event_call = find_and_get_event(sys_name, sys_event);
+ ep = alloc_event_probe(group, event, event_call, argc - 2);
+ }
if (IS_ERR(ep)) {
ret = PTR_ERR(ep);
@@ -952,23 +952,21 @@ static int __trace_eprobe_create(int argc, const char *argv[])
if (ret < 0)
goto error;
init_trace_eprobe_call(ep);
- mutex_lock(&event_mutex);
- ret = trace_probe_register_event_call(&ep->tp);
- if (ret) {
- if (ret == -EEXIST) {
- trace_probe_log_set_index(0);
- trace_probe_log_err(0, EVENT_EXIST);
+ scoped_guard(mutex, &event_mutex) {
+ ret = trace_probe_register_event_call(&ep->tp);
+ if (ret) {
+ if (ret == -EEXIST) {
+ trace_probe_log_set_index(0);
+ trace_probe_log_err(0, EVENT_EXIST);
+ }
+ goto error;
+ }
+ ret = dyn_event_add(&ep->devent, &ep->tp.event->call);
+ if (ret < 0) {
+ trace_probe_unregister_event_call(&ep->tp);
+ goto error;
}
- mutex_unlock(&event_mutex);
- goto error;
- }
- ret = dyn_event_add(&ep->devent, &ep->tp.event->call);
- if (ret < 0) {
- trace_probe_unregister_event_call(&ep->tp);
- mutex_unlock(&event_mutex);
- goto error;
}
- mutex_unlock(&event_mutex);
return ret;
parse_error:
ret = -EINVAL;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 770e7ed91716..4cb275316e51 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -869,6 +869,120 @@ static int ftrace_event_enable_disable(struct trace_event_file *file,
return __ftrace_event_enable_disable(file, enable, 0);
}
+#ifdef CONFIG_MODULES
+struct event_mod_load {
+ struct list_head list;
+ char *module;
+ char *match;
+ char *system;
+ char *event;
+};
+
+static void free_event_mod(struct event_mod_load *event_mod)
+{
+ list_del(&event_mod->list);
+ kfree(event_mod->module);
+ kfree(event_mod->match);
+ kfree(event_mod->system);
+ kfree(event_mod->event);
+ kfree(event_mod);
+}
+
+static void clear_mod_events(struct trace_array *tr)
+{
+ struct event_mod_load *event_mod, *n;
+
+ list_for_each_entry_safe(event_mod, n, &tr->mod_events, list) {
+ free_event_mod(event_mod);
+ }
+}
+
+static int remove_cache_mod(struct trace_array *tr, const char *mod,
+ const char *match, const char *system, const char *event)
+{
+ struct event_mod_load *event_mod, *n;
+ int ret = -EINVAL;
+
+ list_for_each_entry_safe(event_mod, n, &tr->mod_events, list) {
+ if (strcmp(event_mod->module, mod) != 0)
+ continue;
+
+ if (match && strcmp(event_mod->match, match) != 0)
+ continue;
+
+ if (system &&
+ (!event_mod->system || strcmp(event_mod->system, system) != 0))
+ continue;
+
+ if (event &&
+ (!event_mod->event || strcmp(event_mod->event, event) != 0))
+ continue;
+
+ free_event_mod(event_mod);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int cache_mod(struct trace_array *tr, const char *mod, int set,
+ const char *match, const char *system, const char *event)
+{
+ struct event_mod_load *event_mod;
+
+ /* If the module exists, then this just failed to find an event */
+ if (module_exists(mod))
+ return -EINVAL;
+
+ /* See if this is to remove a cached filter */
+ if (!set)
+ return remove_cache_mod(tr, mod, match, system, event);
+
+ event_mod = kzalloc(sizeof(*event_mod), GFP_KERNEL);
+ if (!event_mod)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&event_mod->list);
+ event_mod->module = kstrdup(mod, GFP_KERNEL);
+ if (!event_mod->module)
+ goto out_free;
+
+ if (match) {
+ event_mod->match = kstrdup(match, GFP_KERNEL);
+ if (!event_mod->match)
+ goto out_free;
+ }
+
+ if (system) {
+ event_mod->system = kstrdup(system, GFP_KERNEL);
+ if (!event_mod->system)
+ goto out_free;
+ }
+
+ if (event) {
+ event_mod->event = kstrdup(event, GFP_KERNEL);
+ if (!event_mod->event)
+ goto out_free;
+ }
+
+ list_add(&event_mod->list, &tr->mod_events);
+
+ return 0;
+
+ out_free:
+ free_event_mod(event_mod);
+
+ return -ENOMEM;
+}
+#else /* CONFIG_MODULES */
+static inline void clear_mod_events(struct trace_array *tr) { }
+static int cache_mod(struct trace_array *tr, const char *mod, int set,
+ const char *match, const char *system, const char *event)
+{
+ return -EINVAL;
+}
+#endif
+
static void ftrace_clear_events(struct trace_array *tr)
{
struct trace_event_file *file;
@@ -877,6 +991,7 @@ static void ftrace_clear_events(struct trace_array *tr)
list_for_each_entry(file, &tr->events, list) {
ftrace_event_enable_disable(file, 0);
}
+ clear_mod_events(tr);
mutex_unlock(&event_mutex);
}
@@ -1165,17 +1280,36 @@ static void remove_event_file_dir(struct trace_event_file *file)
*/
static int
__ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
- const char *sub, const char *event, int set)
+ const char *sub, const char *event, int set,
+ const char *mod)
{
struct trace_event_file *file;
struct trace_event_call *call;
+ char *module __free(kfree) = NULL;
const char *name;
int ret = -EINVAL;
int eret = 0;
+ if (mod) {
+ char *p;
+
+ module = kstrdup(mod, GFP_KERNEL);
+ if (!module)
+ return -ENOMEM;
+
+ /* Replace all '-' with '_' as that's what modules do */
+ for (p = strchr(module, '-'); p; p = strchr(p + 1, '-'))
+ *p = '_';
+ }
+
list_for_each_entry(file, &tr->events, list) {
call = file->event_call;
+
+ /* If a module is specified, skip events that are not that module */
+ if (module && (!call->module || strcmp(module_name(call->module), module)))
+ continue;
+
name = trace_event_name(call);
if (!name || !call->class || !call->class->reg)
@@ -1208,16 +1342,24 @@ __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
ret = eret;
}
+ /*
+ * If this is a module setting and nothing was found,
+ * check if the module was loaded. If it wasn't cache it.
+ */
+ if (module && ret == -EINVAL && !eret)
+ ret = cache_mod(tr, module, set, match, sub, event);
+
return ret;
}
static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
- const char *sub, const char *event, int set)
+ const char *sub, const char *event, int set,
+ const char *mod)
{
int ret;
mutex_lock(&event_mutex);
- ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
+ ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set, mod);
mutex_unlock(&event_mutex);
return ret;
@@ -1225,11 +1367,20 @@ static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
{
- char *event = NULL, *sub = NULL, *match;
+ char *event = NULL, *sub = NULL, *match, *mod;
int ret;
if (!tr)
return -ENOENT;
+
+ /* Modules events can be appened with :mod:<module> */
+ mod = strstr(buf, ":mod:");
+ if (mod) {
+ *mod = '\0';
+ /* move to the module name */
+ mod += 5;
+ }
+
/*
* The buf format can be <subsystem>:<event-name>
* *:<event-name> means any event by that name.
@@ -1252,9 +1403,13 @@ int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
sub = NULL;
if (!strlen(event) || strcmp(event, "*") == 0)
event = NULL;
+ } else if (mod) {
+ /* Allow wildcard for no length or star */
+ if (!strlen(match) || strcmp(match, "*") == 0)
+ match = NULL;
}
- ret = __ftrace_set_clr_event(tr, match, sub, event, set);
+ ret = __ftrace_set_clr_event(tr, match, sub, event, set, mod);
/* Put back the colon to allow this to be called again */
if (buf)
@@ -1282,7 +1437,7 @@ int trace_set_clr_event(const char *system, const char *event, int set)
if (!tr)
return -ENODEV;
- return __ftrace_set_clr_event(tr, NULL, system, event, set);
+ return __ftrace_set_clr_event(tr, NULL, system, event, set, NULL);
}
EXPORT_SYMBOL_GPL(trace_set_clr_event);
@@ -1308,7 +1463,7 @@ int trace_array_set_clr_event(struct trace_array *tr, const char *system,
return -ENOENT;
set = (enable == true) ? 1 : 0;
- return __ftrace_set_clr_event(tr, NULL, system, event, set);
+ return __ftrace_set_clr_event(tr, NULL, system, event, set, NULL);
}
EXPORT_SYMBOL_GPL(trace_array_set_clr_event);
@@ -1395,37 +1550,71 @@ static void *t_start(struct seq_file *m, loff_t *pos)
return file;
}
+enum set_event_iter_type {
+ SET_EVENT_FILE,
+ SET_EVENT_MOD,
+};
+
+struct set_event_iter {
+ enum set_event_iter_type type;
+ union {
+ struct trace_event_file *file;
+ struct event_mod_load *event_mod;
+ };
+};
+
static void *
s_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct trace_event_file *file = v;
+ struct set_event_iter *iter = v;
+ struct trace_event_file *file;
struct trace_array *tr = m->private;
(*pos)++;
- list_for_each_entry_continue(file, &tr->events, list) {
- if (file->flags & EVENT_FILE_FL_ENABLED)
- return file;
+ if (iter->type == SET_EVENT_FILE) {
+ file = iter->file;
+ list_for_each_entry_continue(file, &tr->events, list) {
+ if (file->flags & EVENT_FILE_FL_ENABLED) {
+ iter->file = file;
+ return iter;
+ }
+ }
+#ifdef CONFIG_MODULES
+ iter->type = SET_EVENT_MOD;
+ iter->event_mod = list_entry(&tr->mod_events, struct event_mod_load, list);
+#endif
}
+#ifdef CONFIG_MODULES
+ list_for_each_entry_continue(iter->event_mod, &tr->mod_events, list)
+ return iter;
+#endif
+
return NULL;
}
static void *s_start(struct seq_file *m, loff_t *pos)
{
- struct trace_event_file *file;
struct trace_array *tr = m->private;
+ struct set_event_iter *iter;
loff_t l;
+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
+ if (!iter)
+ return NULL;
+
mutex_lock(&event_mutex);
- file = list_entry(&tr->events, struct trace_event_file, list);
+ iter->type = SET_EVENT_FILE;
+ iter->file = list_entry(&tr->events, struct trace_event_file, list);
+
for (l = 0; l <= *pos; ) {
- file = s_next(m, file, &l);
- if (!file)
+ iter = s_next(m, iter, &l);
+ if (!iter)
break;
}
- return file;
+ return iter;
}
static int t_show(struct seq_file *m, void *v)
@@ -1445,6 +1634,45 @@ static void t_stop(struct seq_file *m, void *p)
mutex_unlock(&event_mutex);
}
+#ifdef CONFIG_MODULES
+static int s_show(struct seq_file *m, void *v)
+{
+ struct set_event_iter *iter = v;
+ const char *system;
+ const char *event;
+
+ if (iter->type == SET_EVENT_FILE)
+ return t_show(m, iter->file);
+
+ /* When match is set, system and event are not */
+ if (iter->event_mod->match) {
+ seq_printf(m, "%s:mod:%s\n", iter->event_mod->match,
+ iter->event_mod->module);
+ return 0;
+ }
+
+ system = iter->event_mod->system ? : "*";
+ event = iter->event_mod->event ? : "*";
+
+ seq_printf(m, "%s:%s:mod:%s\n", system, event, iter->event_mod->module);
+
+ return 0;
+}
+#else /* CONFIG_MODULES */
+static int s_show(struct seq_file *m, void *v)
+{
+ struct set_event_iter *iter = v;
+
+ return t_show(m, iter->file);
+}
+#endif
+
+static void s_stop(struct seq_file *m, void *p)
+{
+ kfree(p);
+ t_stop(m, NULL);
+}
+
static void *
__next(struct seq_file *m, void *v, loff_t *pos, int type)
{
@@ -1558,21 +1786,20 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
if (ret)
return ret;
+ guard(mutex)(&event_mutex);
+
switch (val) {
case 0:
case 1:
- ret = -ENODEV;
- mutex_lock(&event_mutex);
file = event_file_file(filp);
- if (likely(file)) {
- ret = tracing_update_buffers(file->tr);
- if (ret < 0) {
- mutex_unlock(&event_mutex);
- return ret;
- }
- ret = ftrace_event_enable_disable(file, val);
- }
- mutex_unlock(&event_mutex);
+ if (!file)
+ return -ENODEV;
+ ret = tracing_update_buffers(file->tr);
+ if (ret < 0)
+ return ret;
+ ret = ftrace_event_enable_disable(file, val);
+ if (ret < 0)
+ return ret;
break;
default:
@@ -1581,7 +1808,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
*ppos += cnt;
- return ret ? ret : cnt;
+ return cnt;
}
static ssize_t
@@ -1659,7 +1886,7 @@ system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
if (system)
name = system->name;
- ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
+ ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val, NULL);
if (ret)
goto out;
@@ -2157,7 +2384,7 @@ event_pid_write(struct file *filp, const char __user *ubuf,
if (ret < 0)
return ret;
- mutex_lock(&event_mutex);
+ guard(mutex)(&event_mutex);
if (type == TRACE_PIDS) {
filtered_pids = rcu_dereference_protected(tr->filtered_pids,
@@ -2173,7 +2400,7 @@ event_pid_write(struct file *filp, const char __user *ubuf,
ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
if (ret < 0)
- goto out;
+ return ret;
if (type == TRACE_PIDS)
rcu_assign_pointer(tr->filtered_pids, pid_list);
@@ -2198,11 +2425,7 @@ event_pid_write(struct file *filp, const char __user *ubuf,
*/
on_each_cpu(ignore_task_cpu, tr, 1);
- out:
- mutex_unlock(&event_mutex);
-
- if (ret > 0)
- *ppos += ret;
+ *ppos += ret;
return ret;
}
@@ -2237,8 +2460,8 @@ static const struct seq_operations show_event_seq_ops = {
static const struct seq_operations show_set_event_seq_ops = {
.start = s_start,
.next = s_next,
- .show = t_show,
- .stop = t_stop,
+ .show = s_show,
+ .stop = s_stop,
};
static const struct seq_operations show_set_pid_seq_ops = {
@@ -3111,6 +3334,20 @@ static bool event_in_systems(struct trace_event_call *call,
return !*p || isspace(*p) || *p == ',';
}
+#ifdef CONFIG_HIST_TRIGGERS
+/*
+ * Wake up waiter on the hist_poll_wq from irq_work because the hist trigger
+ * may happen in any context.
+ */
+static void hist_poll_event_irq_work(struct irq_work *work)
+{
+ wake_up_all(&hist_poll_wq);
+}
+
+DEFINE_IRQ_WORK(hist_poll_work, hist_poll_event_irq_work);
+DECLARE_WAIT_QUEUE_HEAD(hist_poll_wq);
+#endif
+
static struct trace_event_file *
trace_create_new_event(struct trace_event_call *call,
struct trace_array *tr)
@@ -3269,13 +3506,13 @@ int trace_add_event_call(struct trace_event_call *call)
int ret;
lockdep_assert_held(&event_mutex);
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&trace_types_lock);
ret = __register_event(call, NULL);
- if (ret >= 0)
- __add_event_to_tracers(call);
+ if (ret < 0)
+ return ret;
- mutex_unlock(&trace_types_lock);
+ __add_event_to_tracers(call);
return ret;
}
EXPORT_SYMBOL_GPL(trace_add_event_call);
@@ -3355,6 +3592,28 @@ EXPORT_SYMBOL_GPL(trace_remove_event_call);
event++)
#ifdef CONFIG_MODULES
+static void update_mod_cache(struct trace_array *tr, struct module *mod)
+{
+ struct event_mod_load *event_mod, *n;
+
+ list_for_each_entry_safe(event_mod, n, &tr->mod_events, list) {
+ if (strcmp(event_mod->module, mod->name) != 0)
+ continue;
+
+ __ftrace_set_clr_event_nolock(tr, event_mod->match,
+ event_mod->system,
+ event_mod->event, 1, mod->name);
+ free_event_mod(event_mod);
+ }
+}
+
+static void update_cache_events(struct module *mod)
+{
+ struct trace_array *tr;
+
+ list_for_each_entry(tr, &ftrace_trace_arrays, list)
+ update_mod_cache(tr, mod);
+}
static void trace_module_add_events(struct module *mod)
{
@@ -3377,6 +3636,8 @@ static void trace_module_add_events(struct module *mod)
__register_event(*call, mod);
__add_event_to_tracers(*call);
}
+
+ update_cache_events(mod);
}
static void trace_module_remove_events(struct module *mod)
@@ -3529,30 +3790,21 @@ struct trace_event_file *trace_get_event_file(const char *instance,
return ERR_PTR(ret);
}
- mutex_lock(&event_mutex);
+ guard(mutex)(&event_mutex);
file = find_event_file(tr, system, event);
if (!file) {
trace_array_put(tr);
- ret = -EINVAL;
- goto out;
+ return ERR_PTR(-EINVAL);
}
/* Don't let event modules unload while in use */
ret = trace_event_try_get_ref(file->event_call);
if (!ret) {
trace_array_put(tr);
- ret = -EBUSY;
- goto out;
+ return ERR_PTR(-EBUSY);
}
- ret = 0;
- out:
- mutex_unlock(&event_mutex);
-
- if (ret)
- file = ERR_PTR(ret);
-
return file;
}
EXPORT_SYMBOL_GPL(trace_get_event_file);
@@ -3770,6 +4022,7 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
struct trace_event_file *file;
struct ftrace_probe_ops *ops;
struct event_probe_data *data;
+ unsigned long count = -1;
const char *system;
const char *event;
char *number;
@@ -3789,12 +4042,11 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
event = strsep(&param, ":");
- mutex_lock(&event_mutex);
+ guard(mutex)(&event_mutex);
- ret = -EINVAL;
file = find_event_file(tr, system, event);
if (!file)
- goto out;
+ return -EINVAL;
enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
@@ -3803,74 +4055,62 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
else
ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
- if (glob[0] == '!') {
- ret = unregister_ftrace_function_probe_func(glob+1, tr, ops);
- goto out;
- }
-
- ret = -ENOMEM;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- goto out;
+ if (glob[0] == '!')
+ return unregister_ftrace_function_probe_func(glob+1, tr, ops);
- data->enable = enable;
- data->count = -1;
- data->file = file;
+ if (param) {
+ number = strsep(&param, ":");
- if (!param)
- goto out_reg;
-
- number = strsep(&param, ":");
-
- ret = -EINVAL;
- if (!strlen(number))
- goto out_free;
+ if (!strlen(number))
+ return -EINVAL;
- /*
- * We use the callback data field (which is a pointer)
- * as our counter.
- */
- ret = kstrtoul(number, 0, &data->count);
- if (ret)
- goto out_free;
+ /*
+ * We use the callback data field (which is a pointer)
+ * as our counter.
+ */
+ ret = kstrtoul(number, 0, &count);
+ if (ret)
+ return ret;
+ }
- out_reg:
/* Don't let event modules unload while probe registered */
ret = trace_event_try_get_ref(file->event_call);
- if (!ret) {
- ret = -EBUSY;
- goto out_free;
- }
+ if (!ret)
+ return -EBUSY;
ret = __ftrace_event_enable_disable(file, 1, 1);
if (ret < 0)
goto out_put;
+ ret = -ENOMEM;
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ goto out_put;
+
+ data->enable = enable;
+ data->count = count;
+ data->file = file;
+
ret = register_ftrace_function_probe(glob, tr, ops, data);
/*
* The above returns on success the # of functions enabled,
* but if it didn't find any functions it returns zero.
* Consider no functions a failure too.
*/
- if (!ret) {
- ret = -ENOENT;
- goto out_disable;
- } else if (ret < 0)
- goto out_disable;
+
/* Just return zero, not the number of enabled functions */
- ret = 0;
- out:
- mutex_unlock(&event_mutex);
- return ret;
+ if (ret > 0)
+ return 0;
+
+ kfree(data);
+
+ if (!ret)
+ ret = -ENOENT;
- out_disable:
__ftrace_event_enable_disable(file, 0, 1);
out_put:
trace_event_put_ref(file->event_call);
- out_free:
- kfree(data);
- goto out;
+ return ret;
}
static struct ftrace_func_command event_enable_cmd = {
@@ -4093,20 +4333,17 @@ early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
{
int ret;
- mutex_lock(&event_mutex);
+ guard(mutex)(&event_mutex);
ret = create_event_toplevel_files(parent, tr);
if (ret)
- goto out_unlock;
+ return ret;
down_write(&trace_event_sem);
__trace_early_add_event_dirs(tr);
up_write(&trace_event_sem);
- out_unlock:
- mutex_unlock(&event_mutex);
-
- return ret;
+ return 0;
}
/* Must be called with event_mutex held */
@@ -4121,7 +4358,7 @@ int event_trace_del_tracer(struct trace_array *tr)
__ftrace_clear_event_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
/* Disable any running events */
- __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
+ __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0, NULL);
/* Make sure no more events are being executed */
tracepoint_synchronize_unregister();
@@ -4405,7 +4642,7 @@ static __init void event_trace_self_tests(void)
pr_info("Testing event system %s: ", system->name);
- ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
+ ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1, NULL);
if (WARN_ON_ONCE(ret)) {
pr_warn("error enabling system %s\n",
system->name);
@@ -4414,7 +4651,7 @@ static __init void event_trace_self_tests(void)
event_test_stuff();
- ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
+ ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0, NULL);
if (WARN_ON_ONCE(ret)) {
pr_warn("error disabling system %s\n",
system->name);
@@ -4429,7 +4666,7 @@ static __init void event_trace_self_tests(void)
pr_info("Running tests on all trace events:\n");
pr_info("Testing all events: ");
- ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
+ ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1, NULL);
if (WARN_ON_ONCE(ret)) {
pr_warn("error enabling all events\n");
return;
@@ -4438,7 +4675,7 @@ static __init void event_trace_self_tests(void)
event_test_stuff();
/* reset sysname */
- ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
+ ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0, NULL);
if (WARN_ON_ONCE(ret)) {
pr_warn("error disabling all events\n");
return;
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 78051de581e7..0993dfc1c5c1 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -2405,13 +2405,11 @@ int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
struct event_filter *filter = NULL;
int err = 0;
- mutex_lock(&event_mutex);
+ guard(mutex)(&event_mutex);
/* Make sure the system still has events */
- if (!dir->nr_events) {
- err = -ENODEV;
- goto out_unlock;
- }
+ if (!dir->nr_events)
+ return -ENODEV;
if (!strcmp(strstrip(filter_string), "0")) {
filter_free_subsystem_preds(dir, tr);
@@ -2422,7 +2420,7 @@ int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
tracepoint_synchronize_unregister();
filter_free_subsystem_filters(dir, tr);
__free_filter(filter);
- goto out_unlock;
+ return 0;
}
err = create_system_filter(dir, filter_string, &filter);
@@ -2434,8 +2432,6 @@ int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
__free_filter(system->filter);
system->filter = filter;
}
-out_unlock:
- mutex_unlock(&event_mutex);
return err;
}
@@ -2612,17 +2608,15 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id,
struct event_filter *filter = NULL;
struct trace_event_call *call;
- mutex_lock(&event_mutex);
+ guard(mutex)(&event_mutex);
call = event->tp_event;
- err = -EINVAL;
if (!call)
- goto out_unlock;
+ return -EINVAL;
- err = -EEXIST;
if (event->filter)
- goto out_unlock;
+ return -EEXIST;
err = create_filter(NULL, call, filter_str, false, &filter);
if (err)
@@ -2637,9 +2631,6 @@ free_filter:
if (err || ftrace_event_is_function(call))
__free_filter(filter);
-out_unlock:
- mutex_unlock(&event_mutex);
-
return err;
}
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 9c058aa8baf3..261163b00137 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -5311,6 +5311,8 @@ static void event_hist_trigger(struct event_trigger_data *data,
if (resolve_var_refs(hist_data, key, var_ref_vals, true))
hist_trigger_actions(hist_data, elt, buffer, rec, rbe, key, var_ref_vals);
+
+ hist_poll_wakeup();
}
static void hist_trigger_stacktrace_print(struct seq_file *m,
@@ -5590,49 +5592,128 @@ static void hist_trigger_show(struct seq_file *m,
n_entries, (u64)atomic64_read(&hist_data->map->drops));
}
+struct hist_file_data {
+ struct file *file;
+ u64 last_read;
+ u64 last_act;
+};
+
+static u64 get_hist_hit_count(struct trace_event_file *event_file)
+{
+ struct hist_trigger_data *hist_data;
+ struct event_trigger_data *data;
+ u64 ret = 0;
+
+ list_for_each_entry(data, &event_file->triggers, list) {
+ if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) {
+ hist_data = data->private_data;
+ ret += atomic64_read(&hist_data->map->hits);
+ }
+ }
+ return ret;
+}
+
static int hist_show(struct seq_file *m, void *v)
{
+ struct hist_file_data *hist_file = m->private;
struct event_trigger_data *data;
struct trace_event_file *event_file;
- int n = 0, ret = 0;
+ int n = 0;
- mutex_lock(&event_mutex);
+ guard(mutex)(&event_mutex);
- event_file = event_file_file(m->private);
- if (unlikely(!event_file)) {
- ret = -ENODEV;
- goto out_unlock;
- }
+ event_file = event_file_file(hist_file->file);
+ if (unlikely(!event_file))
+ return -ENODEV;
list_for_each_entry(data, &event_file->triggers, list) {
if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
hist_trigger_show(m, data, n++);
}
+ hist_file->last_read = get_hist_hit_count(event_file);
+ /*
+ * Update last_act too so that poll()/POLLPRI can wait for the next
+ * event after any syscall on hist file.
+ */
+ hist_file->last_act = hist_file->last_read;
+
+ return 0;
+}
+
+static __poll_t event_hist_poll(struct file *file, struct poll_table_struct *wait)
+{
+ struct trace_event_file *event_file;
+ struct seq_file *m = file->private_data;
+ struct hist_file_data *hist_file = m->private;
+ __poll_t ret = 0;
+ u64 cnt;
+
+ guard(mutex)(&event_mutex);
- out_unlock:
- mutex_unlock(&event_mutex);
+ event_file = event_file_data(file);
+ if (!event_file)
+ return EPOLLERR;
+
+ hist_poll_wait(file, wait);
+
+ cnt = get_hist_hit_count(event_file);
+ if (hist_file->last_read != cnt)
+ ret |= EPOLLIN | EPOLLRDNORM;
+ if (hist_file->last_act != cnt) {
+ hist_file->last_act = cnt;
+ ret |= EPOLLPRI;
+ }
return ret;
}
+static int event_hist_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *m = file->private_data;
+ struct hist_file_data *hist_file = m->private;
+
+ kfree(hist_file);
+ return tracing_single_release_file_tr(inode, file);
+}
+
static int event_hist_open(struct inode *inode, struct file *file)
{
+ struct trace_event_file *event_file;
+ struct hist_file_data *hist_file;
int ret;
ret = tracing_open_file_tr(inode, file);
if (ret)
return ret;
+ guard(mutex)(&event_mutex);
+
+ event_file = event_file_data(file);
+ if (!event_file)
+ return -ENODEV;
+
+ hist_file = kzalloc(sizeof(*hist_file), GFP_KERNEL);
+ if (!hist_file)
+ return -ENOMEM;
+
+ hist_file->file = file;
+ hist_file->last_act = get_hist_hit_count(event_file);
+
/* Clear private_data to avoid warning in single_open() */
file->private_data = NULL;
- return single_open(file, hist_show, file);
+ ret = single_open(file, hist_show, hist_file);
+ if (ret)
+ kfree(hist_file);
+
+ return ret;
}
const struct file_operations event_hist_fops = {
.open = event_hist_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = tracing_single_release_file_tr,
+ .release = event_hist_release,
+ .poll = event_hist_poll,
};
#ifdef CONFIG_HIST_TRIGGERS_DEBUG
@@ -5873,25 +5954,19 @@ static int hist_debug_show(struct seq_file *m, void *v)
{
struct event_trigger_data *data;
struct trace_event_file *event_file;
- int n = 0, ret = 0;
+ int n = 0;
- mutex_lock(&event_mutex);
+ guard(mutex)(&event_mutex);
event_file = event_file_file(m->private);
- if (unlikely(!event_file)) {
- ret = -ENODEV;
- goto out_unlock;
- }
+ if (unlikely(!event_file))
+ return -ENODEV;
list_for_each_entry(data, &event_file->triggers, list) {
if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
hist_trigger_debug_show(m, data, n++);
}
-
- out_unlock:
- mutex_unlock(&event_mutex);
-
- return ret;
+ return 0;
}
static int event_hist_debug_open(struct inode *inode, struct file *file)
diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
index c82b401a294d..e3f7d09e5512 100644
--- a/kernel/trace/trace_events_synth.c
+++ b/kernel/trace/trace_events_synth.c
@@ -49,16 +49,11 @@ static char *last_cmd;
static int errpos(const char *str)
{
- int ret = 0;
-
- mutex_lock(&lastcmd_mutex);
+ guard(mutex)(&lastcmd_mutex);
if (!str || !last_cmd)
- goto out;
+ return 0;
- ret = err_pos(last_cmd, str);
- out:
- mutex_unlock(&lastcmd_mutex);
- return ret;
+ return err_pos(last_cmd, str);
}
static void last_cmd_set(const char *str)
@@ -74,14 +69,12 @@ static void last_cmd_set(const char *str)
static void synth_err(u8 err_type, u16 err_pos)
{
- mutex_lock(&lastcmd_mutex);
+ guard(mutex)(&lastcmd_mutex);
if (!last_cmd)
- goto out;
+ return;
tracing_log_err(NULL, "synthetic_events", last_cmd, err_text,
err_type, err_pos);
- out:
- mutex_unlock(&lastcmd_mutex);
}
static int create_synth_event(const char *raw_command);
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index a5e3d6acf1e1..d45448947094 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -211,12 +211,10 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file)
if (ret)
return ret;
- mutex_lock(&event_mutex);
+ guard(mutex)(&event_mutex);
- if (unlikely(!event_file_file(file))) {
- mutex_unlock(&event_mutex);
+ if (unlikely(!event_file_file(file)))
return -ENODEV;
- }
if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC)) {
@@ -239,8 +237,6 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file)
}
}
- mutex_unlock(&event_mutex);
-
return ret;
}
@@ -248,7 +244,6 @@ int trigger_process_regex(struct trace_event_file *file, char *buff)
{
char *command, *next;
struct event_command *p;
- int ret = -EINVAL;
next = buff = skip_spaces(buff);
command = strsep(&next, ": \t");
@@ -259,17 +254,14 @@ int trigger_process_regex(struct trace_event_file *file, char *buff)
}
command = (command[0] != '!') ? command : command + 1;
- mutex_lock(&trigger_cmd_mutex);
+ guard(mutex)(&trigger_cmd_mutex);
+
list_for_each_entry(p, &trigger_commands, list) {
- if (strcmp(p->name, command) == 0) {
- ret = p->parse(p, file, buff, command, next);
- goto out_unlock;
- }
+ if (strcmp(p->name, command) == 0)
+ return p->parse(p, file, buff, command, next);
}
- out_unlock:
- mutex_unlock(&trigger_cmd_mutex);
- return ret;
+ return -EINVAL;
}
static ssize_t event_trigger_regex_write(struct file *file,
@@ -278,7 +270,7 @@ static ssize_t event_trigger_regex_write(struct file *file,
{
struct trace_event_file *event_file;
ssize_t ret;
- char *buf;
+ char *buf __free(kfree) = NULL;
if (!cnt)
return 0;
@@ -292,24 +284,18 @@ static ssize_t event_trigger_regex_write(struct file *file,
strim(buf);
- mutex_lock(&event_mutex);
+ guard(mutex)(&event_mutex);
+
event_file = event_file_file(file);
- if (unlikely(!event_file)) {
- mutex_unlock(&event_mutex);
- kfree(buf);
+ if (unlikely(!event_file))
return -ENODEV;
- }
- ret = trigger_process_regex(event_file, buf);
- mutex_unlock(&event_mutex);
- kfree(buf);
+ ret = trigger_process_regex(event_file, buf);
if (ret < 0)
- goto out;
+ return ret;
*ppos += cnt;
- ret = cnt;
- out:
- return ret;
+ return cnt;
}
static int event_trigger_regex_release(struct inode *inode, struct file *file)
@@ -359,20 +345,16 @@ const struct file_operations event_trigger_fops = {
__init int register_event_command(struct event_command *cmd)
{
struct event_command *p;
- int ret = 0;
- mutex_lock(&trigger_cmd_mutex);
+ guard(mutex)(&trigger_cmd_mutex);
+
list_for_each_entry(p, &trigger_commands, list) {
- if (strcmp(cmd->name, p->name) == 0) {
- ret = -EBUSY;
- goto out_unlock;
- }
+ if (strcmp(cmd->name, p->name) == 0)
+ return -EBUSY;
}
list_add(&cmd->list, &trigger_commands);
- out_unlock:
- mutex_unlock(&trigger_cmd_mutex);
- return ret;
+ return 0;
}
/*
@@ -382,20 +364,17 @@ __init int register_event_command(struct event_command *cmd)
__init int unregister_event_command(struct event_command *cmd)
{
struct event_command *p, *n;
- int ret = -ENODEV;
- mutex_lock(&trigger_cmd_mutex);
+ guard(mutex)(&trigger_cmd_mutex);
+
list_for_each_entry_safe(p, n, &trigger_commands, list) {
if (strcmp(cmd->name, p->name) == 0) {
- ret = 0;
list_del_init(&p->list);
- goto out_unlock;
+ return 0;
}
}
- out_unlock:
- mutex_unlock(&trigger_cmd_mutex);
- return ret;
+ return -ENODEV;
}
/**
diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c
index 17bcad8f79de..97325fbd6283 100644
--- a/kernel/trace/trace_events_user.c
+++ b/kernel/trace/trace_events_user.c
@@ -2899,7 +2899,7 @@ static int set_max_user_events_sysctl(const struct ctl_table *table, int write,
return ret;
}
-static struct ctl_table user_event_sysctls[] = {
+static const struct ctl_table user_event_sysctls[] = {
{
.procname = "user_events_max",
.data = &max_user_events,
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index dc62eb93837a..136c750b0b4d 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -198,7 +198,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace,
* returning from the function.
*/
if (ftrace_graph_notrace_addr(trace->func)) {
- *task_var |= TRACE_GRAPH_NOTRACE_BIT;
+ *task_var |= TRACE_GRAPH_NOTRACE;
/*
* Need to return 1 to have the return called
* that will clear the NOTRACE bit.
@@ -266,12 +266,10 @@ __trace_graph_function(struct trace_array *tr,
struct ftrace_graph_ret ret = {
.func = ip,
.depth = 0,
- .calltime = time,
- .rettime = time,
};
__trace_graph_entry(tr, &ent, trace_ctx);
- __trace_graph_return(tr, &ret, trace_ctx);
+ __trace_graph_return(tr, &ret, trace_ctx, time, time);
}
void
@@ -283,8 +281,9 @@ trace_graph_function(struct trace_array *tr,
}
void __trace_graph_return(struct trace_array *tr,
- struct ftrace_graph_ret *trace,
- unsigned int trace_ctx)
+ struct ftrace_graph_ret *trace,
+ unsigned int trace_ctx,
+ u64 calltime, u64 rettime)
{
struct ring_buffer_event *event;
struct trace_buffer *buffer = tr->array_buffer.buffer;
@@ -296,6 +295,8 @@ void __trace_graph_return(struct trace_array *tr,
return;
entry = ring_buffer_event_data(event);
entry->ret = *trace;
+ entry->calltime = calltime;
+ entry->rettime = rettime;
trace_buffer_unlock_commit_nostack(buffer, event);
}
@@ -317,10 +318,13 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
struct trace_array_cpu *data;
struct fgraph_times *ftimes;
unsigned int trace_ctx;
+ u64 calltime, rettime;
long disabled;
int size;
int cpu;
+ rettime = trace_clock_local();
+
ftrace_graph_addr_finish(gops, trace);
if (*task_var & TRACE_GRAPH_NOTRACE) {
@@ -334,7 +338,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
handle_nosleeptime(trace, ftimes, size);
- trace->calltime = ftimes->calltime;
+ calltime = ftimes->calltime;
preempt_disable_notrace();
cpu = raw_smp_processor_id();
@@ -342,7 +346,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
disabled = atomic_read(&data->disabled);
if (likely(!disabled)) {
trace_ctx = tracing_gen_ctx();
- __trace_graph_return(tr, trace, trace_ctx);
+ __trace_graph_return(tr, trace, trace_ctx, calltime, rettime);
}
preempt_enable_notrace();
}
@@ -367,10 +371,8 @@ static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,
handle_nosleeptime(trace, ftimes, size);
- trace->calltime = ftimes->calltime;
-
if (tracing_thresh &&
- (trace->rettime - ftimes->calltime < tracing_thresh))
+ (trace_clock_local() - ftimes->calltime < tracing_thresh))
return;
else
trace_graph_return(trace, gops, fregs);
@@ -856,7 +858,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
graph_ret = &ret_entry->ret;
call = &entry->graph_ent;
- duration = graph_ret->rettime - graph_ret->calltime;
+ duration = ret_entry->rettime - ret_entry->calltime;
func = call->func + iter->tr->text_delta;
@@ -1137,11 +1139,14 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
}
static enum print_line_t
-print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
+print_graph_return(struct ftrace_graph_ret_entry *retentry, struct trace_seq *s,
struct trace_entry *ent, struct trace_iterator *iter,
u32 flags)
{
- unsigned long long duration = trace->rettime - trace->calltime;
+ struct ftrace_graph_ret *trace = &retentry->ret;
+ u64 calltime = retentry->calltime;
+ u64 rettime = retentry->rettime;
+ unsigned long long duration = rettime - calltime;
struct fgraph_data *data = iter->private;
struct trace_array *tr = iter->tr;
unsigned long func;
@@ -1342,7 +1347,7 @@ print_graph_function_flags(struct trace_iterator *iter, u32 flags)
case TRACE_GRAPH_RET: {
struct ftrace_graph_ret_entry *field;
trace_assign_type(field, entry);
- return print_graph_return(&field->ret, s, entry, iter, flags);
+ return print_graph_return(field, s, entry, iter, flags);
}
case TRACE_STACK:
case TRACE_FN:
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 08786c59d397..7294ad676379 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -223,6 +223,7 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace,
unsigned long flags;
unsigned int trace_ctx;
u64 *calltime;
+ u64 rettime;
int size;
ftrace_graph_addr_finish(gops, trace);
@@ -230,13 +231,13 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace,
if (!func_prolog_dec(tr, &data, &flags))
return;
+ rettime = trace_clock_local();
calltime = fgraph_retrieve_data(gops->idx, &size);
if (!calltime)
return;
- trace->calltime = *calltime;
trace_ctx = tracing_gen_ctx_flags(flags);
- __trace_graph_return(tr, trace, trace_ctx);
+ __trace_graph_return(tr, trace, trace_ctx, *calltime, rettime);
atomic_dec(&data->disabled);
}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 0642ea174849..d8d5f18a141a 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -8,6 +8,7 @@
#define pr_fmt(fmt) "trace_kprobe: " fmt
#include <linux/bpf-cgroup.h>
+#include <linux/cleanup.h>
#include <linux/security.h>
#include <linux/module.h>
#include <linux/uaccess.h>
@@ -257,6 +258,9 @@ static void free_trace_kprobe(struct trace_kprobe *tk)
}
}
+DEFINE_FREE(free_trace_kprobe, struct trace_kprobe *,
+ if (!IS_ERR_OR_NULL(_T)) free_trace_kprobe(_T))
+
/*
* Allocate new trace_probe and initialize it (including kprobes).
*/
@@ -268,7 +272,7 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
int maxactive,
int nargs, bool is_return)
{
- struct trace_kprobe *tk;
+ struct trace_kprobe *tk __free(free_trace_kprobe) = NULL;
int ret = -ENOMEM;
tk = kzalloc(struct_size(tk, tp.args, nargs), GFP_KERNEL);
@@ -277,12 +281,12 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
tk->nhit = alloc_percpu(unsigned long);
if (!tk->nhit)
- goto error;
+ return ERR_PTR(ret);
if (symbol) {
tk->symbol = kstrdup(symbol, GFP_KERNEL);
if (!tk->symbol)
- goto error;
+ return ERR_PTR(ret);
tk->rp.kp.symbol_name = tk->symbol;
tk->rp.kp.offset = offs;
} else
@@ -299,13 +303,10 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
ret = trace_probe_init(&tk->tp, event, group, false, nargs);
if (ret < 0)
- goto error;
+ return ERR_PTR(ret);
dyn_event_init(&tk->devent, &trace_kprobe_ops);
- return tk;
-error:
- free_trace_kprobe(tk);
- return ERR_PTR(ret);
+ return_ptr(tk);
}
static struct trace_kprobe *find_trace_kprobe(const char *event,
@@ -634,7 +635,7 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
struct trace_kprobe *old_tk;
int ret;
- mutex_lock(&event_mutex);
+ guard(mutex)(&event_mutex);
old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
trace_probe_group_name(&tk->tp));
@@ -642,11 +643,9 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
trace_probe_log_set_index(0);
trace_probe_log_err(0, DIFF_PROBE_TYPE);
- ret = -EEXIST;
- } else {
- ret = append_trace_kprobe(tk, old_tk);
+ return -EEXIST;
}
- goto end;
+ return append_trace_kprobe(tk, old_tk);
}
/* Register new event */
@@ -657,7 +656,7 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
trace_probe_log_err(0, EVENT_EXIST);
} else
pr_warn("Failed to register probe event(%d)\n", ret);
- goto end;
+ return ret;
}
/* Register k*probe */
@@ -672,8 +671,6 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
else
dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
-end:
- mutex_unlock(&event_mutex);
return ret;
}
@@ -706,7 +703,7 @@ static int trace_kprobe_module_callback(struct notifier_block *nb,
return NOTIFY_DONE;
/* Update probes on coming module */
- mutex_lock(&event_mutex);
+ guard(mutex)(&event_mutex);
for_each_trace_kprobe(tk, pos) {
if (trace_kprobe_within_module(tk, mod)) {
/* Don't need to check busy - this should have gone. */
@@ -718,7 +715,6 @@ static int trace_kprobe_module_callback(struct notifier_block *nb,
module_name(mod), ret);
}
}
- mutex_unlock(&event_mutex);
return NOTIFY_DONE;
}
@@ -840,7 +836,8 @@ out:
static int trace_kprobe_entry_handler(struct kretprobe_instance *ri,
struct pt_regs *regs);
-static int __trace_kprobe_create(int argc, const char *argv[])
+static int trace_kprobe_create_internal(int argc, const char *argv[],
+ struct traceprobe_parse_context *ctx)
{
/*
* Argument syntax:
@@ -866,11 +863,12 @@ static int __trace_kprobe_create(int argc, const char *argv[])
* Type of args:
* FETCHARG:TYPE : use TYPE instead of unsigned long.
*/
- struct trace_kprobe *tk = NULL;
+ struct trace_kprobe *tk __free(free_trace_kprobe) = NULL;
int i, len, new_argc = 0, ret = 0;
bool is_return = false;
- char *symbol = NULL, *tmp = NULL;
- const char **new_argv = NULL;
+ char *symbol __free(kfree) = NULL;
+ char *tmp = NULL;
+ const char **new_argv __free(kfree) = NULL;
const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
enum probe_print_type ptype;
int maxactive = 0;
@@ -879,8 +877,7 @@ static int __trace_kprobe_create(int argc, const char *argv[])
char buf[MAX_EVENT_NAME_LEN];
char gbuf[MAX_EVENT_NAME_LEN];
char abuf[MAX_BTF_ARGS_LEN];
- char *dbuf = NULL;
- struct traceprobe_parse_context ctx = { .flags = TPARG_FL_KERNEL };
+ char *dbuf __free(kfree) = NULL;
switch (argv[0][0]) {
case 'r':
@@ -894,8 +891,6 @@ static int __trace_kprobe_create(int argc, const char *argv[])
if (argc < 2)
return -ECANCELED;
- trace_probe_log_init("trace_kprobe", argc, argv);
-
event = strchr(&argv[0][1], ':');
if (event)
event++;
@@ -903,7 +898,7 @@ static int __trace_kprobe_create(int argc, const char *argv[])
if (isdigit(argv[0][1])) {
if (!is_return) {
trace_probe_log_err(1, BAD_MAXACT_TYPE);
- goto parse_error;
+ return -EINVAL;
}
if (event)
len = event - &argv[0][1] - 1;
@@ -911,21 +906,21 @@ static int __trace_kprobe_create(int argc, const char *argv[])
len = strlen(&argv[0][1]);
if (len > MAX_EVENT_NAME_LEN - 1) {
trace_probe_log_err(1, BAD_MAXACT);
- goto parse_error;
+ return -EINVAL;
}
memcpy(buf, &argv[0][1], len);
buf[len] = '\0';
ret = kstrtouint(buf, 0, &maxactive);
if (ret || !maxactive) {
trace_probe_log_err(1, BAD_MAXACT);
- goto parse_error;
+ return -EINVAL;
}
/* kretprobes instances are iterated over via a list. The
* maximum should stay reasonable.
*/
if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
trace_probe_log_err(1, MAXACT_TOO_BIG);
- goto parse_error;
+ return -EINVAL;
}
}
@@ -934,16 +929,13 @@ static int __trace_kprobe_create(int argc, const char *argv[])
if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
trace_probe_log_set_index(1);
/* Check whether uprobe event specified */
- if (strchr(argv[1], '/') && strchr(argv[1], ':')) {
- ret = -ECANCELED;
- goto error;
- }
+ if (strchr(argv[1], '/') && strchr(argv[1], ':'))
+ return -ECANCELED;
+
/* a symbol specified */
symbol = kstrdup(argv[1], GFP_KERNEL);
- if (!symbol) {
- ret = -ENOMEM;
- goto error;
- }
+ if (!symbol)
+ return -ENOMEM;
tmp = strchr(symbol, '%');
if (tmp) {
@@ -952,7 +944,7 @@ static int __trace_kprobe_create(int argc, const char *argv[])
is_return = true;
} else {
trace_probe_log_err(tmp - symbol, BAD_ADDR_SUFFIX);
- goto parse_error;
+ return -EINVAL;
}
}
@@ -960,7 +952,7 @@ static int __trace_kprobe_create(int argc, const char *argv[])
ret = traceprobe_split_symbol_offset(symbol, &offset);
if (ret || offset < 0 || offset > UINT_MAX) {
trace_probe_log_err(0, BAD_PROBE_ADDR);
- goto parse_error;
+ return -EINVAL;
}
ret = validate_probe_symbol(symbol);
if (ret) {
@@ -968,17 +960,17 @@ static int __trace_kprobe_create(int argc, const char *argv[])
trace_probe_log_err(0, NON_UNIQ_SYMBOL);
else
trace_probe_log_err(0, BAD_PROBE_ADDR);
- goto parse_error;
+ return -EINVAL;
}
if (is_return)
- ctx.flags |= TPARG_FL_RETURN;
+ ctx->flags |= TPARG_FL_RETURN;
ret = kprobe_on_func_entry(NULL, symbol, offset);
if (ret == 0 && !is_return)
- ctx.flags |= TPARG_FL_FENTRY;
+ ctx->flags |= TPARG_FL_FENTRY;
/* Defer the ENOENT case until register kprobe */
if (ret == -EINVAL && is_return) {
trace_probe_log_err(0, BAD_RETPROBE);
- goto parse_error;
+ return -EINVAL;
}
}
@@ -987,7 +979,7 @@ static int __trace_kprobe_create(int argc, const char *argv[])
ret = traceprobe_parse_event_name(&event, &group, gbuf,
event - argv[0]);
if (ret)
- goto parse_error;
+ return ret;
}
if (!event) {
@@ -1003,26 +995,24 @@ static int __trace_kprobe_create(int argc, const char *argv[])
}
argc -= 2; argv += 2;
- ctx.funcname = symbol;
+ ctx->funcname = symbol;
new_argv = traceprobe_expand_meta_args(argc, argv, &new_argc,
- abuf, MAX_BTF_ARGS_LEN, &ctx);
+ abuf, MAX_BTF_ARGS_LEN, ctx);
if (IS_ERR(new_argv)) {
ret = PTR_ERR(new_argv);
new_argv = NULL;
- goto out;
+ return ret;
}
if (new_argv) {
argc = new_argc;
argv = new_argv;
}
- if (argc > MAX_TRACE_ARGS) {
- ret = -E2BIG;
- goto out;
- }
+ if (argc > MAX_TRACE_ARGS)
+ return -E2BIG;
ret = traceprobe_expand_dentry_args(argc, argv, &dbuf);
if (ret)
- goto out;
+ return ret;
/* setup a probe */
tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
@@ -1031,16 +1021,16 @@ static int __trace_kprobe_create(int argc, const char *argv[])
ret = PTR_ERR(tk);
/* This must return -ENOMEM, else there is a bug */
WARN_ON_ONCE(ret != -ENOMEM);
- goto out; /* We know tk is not allocated */
+ return ret; /* We know tk is not allocated */
}
/* parse arguments */
for (i = 0; i < argc; i++) {
trace_probe_log_set_index(i + 2);
- ctx.offset = 0;
- ret = traceprobe_parse_probe_arg(&tk->tp, i, argv[i], &ctx);
+ ctx->offset = 0;
+ ret = traceprobe_parse_probe_arg(&tk->tp, i, argv[i], ctx);
if (ret)
- goto error; /* This can be -ENOMEM */
+ return ret; /* This can be -ENOMEM */
}
/* entry handler for kretprobe */
if (is_return && tk->tp.entry_arg) {
@@ -1051,7 +1041,7 @@ static int __trace_kprobe_create(int argc, const char *argv[])
ptype = is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
ret = traceprobe_set_print_fmt(&tk->tp, ptype);
if (ret < 0)
- goto error;
+ return ret;
ret = register_trace_kprobe(tk);
if (ret) {
@@ -1062,27 +1052,34 @@ static int __trace_kprobe_create(int argc, const char *argv[])
trace_probe_log_err(0, BAD_PROBE_ADDR);
else if (ret != -ENOMEM && ret != -EEXIST)
trace_probe_log_err(0, FAIL_REG_PROBE);
- goto error;
+ return ret;
}
+ /*
+ * Here, 'tk' has been registered to the list successfully,
+ * so we don't need to free it.
+ */
+ tk = NULL;
+
+ return 0;
+}
+
+static int trace_kprobe_create_cb(int argc, const char *argv[])
+{
+ struct traceprobe_parse_context ctx = { .flags = TPARG_FL_KERNEL };
+ int ret;
+
+ trace_probe_log_init("trace_kprobe", argc, argv);
+
+ ret = trace_kprobe_create_internal(argc, argv, &ctx);
-out:
traceprobe_finish_parse(&ctx);
trace_probe_log_clear();
- kfree(new_argv);
- kfree(symbol);
- kfree(dbuf);
return ret;
-
-parse_error:
- ret = -EINVAL;
-error:
- free_trace_kprobe(tk);
- goto out;
}
static int trace_kprobe_create(const char *raw_command)
{
- return trace_probe_create(raw_command, __trace_kprobe_create);
+ return trace_probe_create(raw_command, trace_kprobe_create_cb);
}
static int create_or_delete_trace_kprobe(const char *raw_command)
@@ -1898,7 +1895,7 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
bool is_return)
{
enum probe_print_type ptype;
- struct trace_kprobe *tk;
+ struct trace_kprobe *tk __free(free_trace_kprobe) = NULL;
int ret;
char *event;
@@ -1929,19 +1926,14 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
ptype = trace_kprobe_is_return(tk) ?
PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
- if (traceprobe_set_print_fmt(&tk->tp, ptype) < 0) {
- ret = -ENOMEM;
- goto error;
- }
+ if (traceprobe_set_print_fmt(&tk->tp, ptype) < 0)
+ return ERR_PTR(-ENOMEM);
ret = __register_trace_kprobe(tk);
if (ret < 0)
- goto error;
+ return ERR_PTR(ret);
- return trace_probe_event_call(&tk->tp);
-error:
- free_trace_kprobe(tk);
- return ERR_PTR(ret);
+ return trace_probe_event_call(&(no_free_ptr(tk)->tp));
}
void destroy_local_trace_kprobe(struct trace_event_call *event_call)
@@ -1970,13 +1962,12 @@ static __init void enable_boot_kprobe_events(void)
struct trace_kprobe *tk;
struct dyn_event *pos;
- mutex_lock(&event_mutex);
+ guard(mutex)(&event_mutex);
for_each_trace_kprobe(tk, pos) {
list_for_each_entry(file, &tr->events, list)
if (file->event_call == trace_probe_event_call(&tk->tp))
trace_event_enable_disable(file, 1, 0);
}
- mutex_unlock(&event_mutex);
}
static __init void setup_boot_kprobe_events(void)
diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
index b9f96c77527d..f3a2722ee4c0 100644
--- a/kernel/trace/trace_osnoise.c
+++ b/kernel/trace/trace_osnoise.c
@@ -1229,6 +1229,8 @@ static void trace_sched_migrate_callback(void *data, struct task_struct *p, int
}
}
+static bool monitor_enabled;
+
static int register_migration_monitor(void)
{
int ret = 0;
@@ -1237,16 +1239,25 @@ static int register_migration_monitor(void)
* Timerlat thread migration check is only required when running timerlat in user-space.
* Thus, enable callback only if timerlat is set with no workload.
*/
- if (timerlat_enabled() && !test_bit(OSN_WORKLOAD, &osnoise_options))
+ if (timerlat_enabled() && !test_bit(OSN_WORKLOAD, &osnoise_options)) {
+ if (WARN_ON_ONCE(monitor_enabled))
+ return 0;
+
ret = register_trace_sched_migrate_task(trace_sched_migrate_callback, NULL);
+ if (!ret)
+ monitor_enabled = true;
+ }
return ret;
}
static void unregister_migration_monitor(void)
{
- if (timerlat_enabled() && !test_bit(OSN_WORKLOAD, &osnoise_options))
- unregister_trace_sched_migrate_task(trace_sched_migrate_callback, NULL);
+ if (!monitor_enabled)
+ return;
+
+ unregister_trace_sched_migrate_task(trace_sched_migrate_callback, NULL);
+ monitor_enabled = false;
}
#else
static int register_migration_monitor(void)
@@ -2083,26 +2094,21 @@ static void osnoise_hotplug_workfn(struct work_struct *dummy)
{
unsigned int cpu = smp_processor_id();
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&trace_types_lock);
if (!osnoise_has_registered_instances())
- goto out_unlock_trace;
+ return;
- mutex_lock(&interface_lock);
- cpus_read_lock();
+ guard(mutex)(&interface_lock);
+ guard(cpus_read_lock)();
if (!cpu_online(cpu))
- goto out_unlock;
+ return;
+
if (!cpumask_test_cpu(cpu, &osnoise_cpumask))
- goto out_unlock;
+ return;
start_kthread(cpu);
-
-out_unlock:
- cpus_read_unlock();
- mutex_unlock(&interface_lock);
-out_unlock_trace:
- mutex_unlock(&trace_types_lock);
}
static DECLARE_WORK(osnoise_hotplug_work, osnoise_hotplug_workfn);
@@ -2300,31 +2306,22 @@ static ssize_t
osnoise_cpus_read(struct file *filp, char __user *ubuf, size_t count,
loff_t *ppos)
{
- char *mask_str;
+ char *mask_str __free(kfree) = NULL;
int len;
- mutex_lock(&interface_lock);
+ guard(mutex)(&interface_lock);
len = snprintf(NULL, 0, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask)) + 1;
mask_str = kmalloc(len, GFP_KERNEL);
- if (!mask_str) {
- count = -ENOMEM;
- goto out_unlock;
- }
+ if (!mask_str)
+ return -ENOMEM;
len = snprintf(mask_str, len, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask));
- if (len >= count) {
- count = -EINVAL;
- goto out_free;
- }
+ if (len >= count)
+ return -EINVAL;
count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
-out_free:
- kfree(mask_str);
-out_unlock:
- mutex_unlock(&interface_lock);
-
return count;
}
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 16a5e368e7b7..8f58ee1e8858 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -1409,7 +1409,7 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size,
struct traceprobe_parse_context *ctx)
{
struct fetch_insn *code, *tmp = NULL;
- char *type, *arg;
+ char *type, *arg __free(kfree) = NULL;
int ret, len;
len = strlen(argv);
@@ -1426,22 +1426,16 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size,
return -ENOMEM;
parg->comm = kstrdup(arg, GFP_KERNEL);
- if (!parg->comm) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!parg->comm)
+ return -ENOMEM;
type = parse_probe_arg_type(arg, parg, ctx);
- if (IS_ERR(type)) {
- ret = PTR_ERR(type);
- goto out;
- }
+ if (IS_ERR(type))
+ return PTR_ERR(type);
code = tmp = kcalloc(FETCH_INSN_MAX, sizeof(*code), GFP_KERNEL);
- if (!code) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!code)
+ return -ENOMEM;
code[FETCH_INSN_MAX - 1].op = FETCH_OP_END;
ctx->last_type = NULL;
@@ -1497,8 +1491,6 @@ fail:
kfree(code->data);
}
kfree(tmp);
-out:
- kfree(arg);
return ret;
}
@@ -1668,7 +1660,7 @@ const char **traceprobe_expand_meta_args(int argc, const char *argv[],
{
const struct btf_param *params = NULL;
int i, j, n, used, ret, args_idx = -1;
- const char **new_argv = NULL;
+ const char **new_argv __free(kfree) = NULL;
ret = argv_has_var_arg(argc, argv, &args_idx, ctx);
if (ret < 0)
@@ -1707,7 +1699,7 @@ const char **traceprobe_expand_meta_args(int argc, const char *argv[],
ret = sprint_nth_btf_arg(n, "", buf + used,
bufsize - used, ctx);
if (ret < 0)
- goto error;
+ return ERR_PTR(ret);
new_argv[j++] = buf + used;
used += ret + 1;
@@ -1721,25 +1713,20 @@ const char **traceprobe_expand_meta_args(int argc, const char *argv[],
n = simple_strtoul(argv[i] + 4, &type, 10);
if (type && !(*type == ':' || *type == '\0')) {
trace_probe_log_err(0, BAD_VAR);
- ret = -ENOENT;
- goto error;
+ return ERR_PTR(-ENOENT);
}
/* Note: $argN starts from $arg1 */
ret = sprint_nth_btf_arg(n - 1, type, buf + used,
bufsize - used, ctx);
if (ret < 0)
- goto error;
+ return ERR_PTR(ret);
new_argv[j++] = buf + used;
used += ret + 1;
} else
new_argv[j++] = argv[i];
}
- return new_argv;
-
-error:
- kfree(new_argv);
- return ERR_PTR(ret);
+ return_ptr(new_argv);
}
/* @buf: *buf must be equal to NULL. Caller must to free *buf */
@@ -1747,14 +1734,14 @@ int traceprobe_expand_dentry_args(int argc, const char *argv[], char **buf)
{
int i, used, ret;
const int bufsize = MAX_DENTRY_ARGS_LEN;
- char *tmpbuf = NULL;
+ char *tmpbuf __free(kfree) = NULL;
if (*buf)
return -EINVAL;
used = 0;
for (i = 0; i < argc; i++) {
- char *tmp;
+ char *tmp __free(kfree) = NULL;
char *equal;
size_t arg_len;
@@ -1769,7 +1756,7 @@ int traceprobe_expand_dentry_args(int argc, const char *argv[], char **buf)
tmp = kstrdup(argv[i], GFP_KERNEL);
if (!tmp)
- goto nomem;
+ return -ENOMEM;
equal = strchr(tmp, '=');
if (equal)
@@ -1790,18 +1777,14 @@ int traceprobe_expand_dentry_args(int argc, const char *argv[], char **buf)
offsetof(struct file, f_path.dentry),
equal ? equal + 1 : tmp);
- kfree(tmp);
if (ret >= bufsize - used)
- goto nomem;
+ return -ENOMEM;
argv[i] = tmpbuf + used;
used += ret + 1;
}
- *buf = tmpbuf;
+ *buf = no_free_ptr(tmpbuf);
return 0;
-nomem:
- kfree(tmpbuf);
- return -ENOMEM;
}
void traceprobe_finish_parse(struct traceprobe_parse_context *ctx)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index f372252dc8bb..af30586f1aea 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -158,6 +158,7 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace,
struct trace_array_cpu *data;
unsigned int trace_ctx;
u64 *calltime;
+ u64 rettime;
int size;
ftrace_graph_addr_finish(gops, trace);
@@ -165,12 +166,13 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace,
if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
return;
+ rettime = trace_clock_local();
+
calltime = fgraph_retrieve_data(gops->idx, &size);
if (!calltime)
return;
- trace->calltime = *calltime;
- __trace_graph_return(tr, trace, trace_ctx);
+ __trace_graph_return(tr, trace, trace_ctx, *calltime, rettime);
atomic_dec(&data->disabled);
preempt_enable_notrace();
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 7f9572a37333..14c6f272c4d8 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -520,20 +520,18 @@ stack_trace_sysctl(const struct ctl_table *table, int write, void *buffer,
int was_enabled;
int ret;
- mutex_lock(&stack_sysctl_mutex);
+ guard(mutex)(&stack_sysctl_mutex);
was_enabled = !!stack_tracer_enabled;
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (ret || !write || (was_enabled == !!stack_tracer_enabled))
- goto out;
+ return ret;
if (stack_tracer_enabled)
register_ftrace_function(&trace_ops);
else
unregister_ftrace_function(&trace_ops);
- out:
- mutex_unlock(&stack_sysctl_mutex);
return ret;
}
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index bb247beec447..b3b5586f104d 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -128,7 +128,7 @@ static int stat_seq_init(struct stat_session *session)
int ret = 0;
int i;
- mutex_lock(&session->stat_mutex);
+ guard(mutex)(&session->stat_mutex);
__reset_stat_session(session);
if (!ts->stat_cmp)
@@ -136,11 +136,11 @@ static int stat_seq_init(struct stat_session *session)
stat = ts->stat_start(ts);
if (!stat)
- goto exit;
+ return 0;
ret = insert_stat(root, stat, ts->stat_cmp);
if (ret)
- goto exit;
+ return ret;
/*
* Iterate over the tracer stat entries and store them in an rbtree.
@@ -157,13 +157,10 @@ static int stat_seq_init(struct stat_session *session)
goto exit_free_rbtree;
}
-exit:
- mutex_unlock(&session->stat_mutex);
return ret;
exit_free_rbtree:
__reset_stat_session(session);
- mutex_unlock(&session->stat_mutex);
return ret;
}
@@ -308,7 +305,7 @@ static int init_stat_file(struct stat_session *session)
int register_stat_tracer(struct tracer_stat *trace)
{
struct stat_session *session, *node;
- int ret = -EINVAL;
+ int ret;
if (!trace)
return -EINVAL;
@@ -316,18 +313,18 @@ int register_stat_tracer(struct tracer_stat *trace)
if (!trace->stat_start || !trace->stat_next || !trace->stat_show)
return -EINVAL;
+ guard(mutex)(&all_stat_sessions_mutex);
+
/* Already registered? */
- mutex_lock(&all_stat_sessions_mutex);
list_for_each_entry(node, &all_stat_sessions, session_list) {
if (node->ts == trace)
- goto out;
+ return -EINVAL;
}
- ret = -ENOMEM;
/* Init the session */
session = kzalloc(sizeof(*session), GFP_KERNEL);
if (!session)
- goto out;
+ return -ENOMEM;
session->ts = trace;
INIT_LIST_HEAD(&session->session_list);
@@ -336,16 +333,13 @@ int register_stat_tracer(struct tracer_stat *trace)
ret = init_stat_file(session);
if (ret) {
destroy_session(session);
- goto out;
+ return ret;
}
- ret = 0;
/* Register */
list_add_tail(&session->session_list, &all_stat_sessions);
- out:
- mutex_unlock(&all_stat_sessions_mutex);
- return ret;
+ return 0;
}
void unregister_stat_tracer(struct tracer_stat *trace)
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 4875e7f5de3d..ccc762fbb69c 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -498,11 +498,11 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
struct trace_uprobe *old_tu;
int ret;
- mutex_lock(&event_mutex);
+ guard(mutex)(&event_mutex);
ret = validate_ref_ctr_offset(tu);
if (ret)
- goto end;
+ return ret;
/* register as an event */
old_tu = find_probe_event(trace_probe_name(&tu->tp),
@@ -511,11 +511,9 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
trace_probe_log_set_index(0);
trace_probe_log_err(0, DIFF_PROBE_TYPE);
- ret = -EEXIST;
- } else {
- ret = append_trace_uprobe(tu, old_tu);
+ return -EEXIST;
}
- goto end;
+ return append_trace_uprobe(tu, old_tu);
}
ret = register_uprobe_event(tu);
@@ -525,14 +523,11 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
trace_probe_log_err(0, EVENT_EXIST);
} else
pr_warn("Failed to register probe event(%d)\n", ret);
- goto end;
+ return ret;
}
dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
-end:
- mutex_unlock(&event_mutex);
-
return ret;
}
diff --git a/kernel/ucount.c b/kernel/ucount.c
index f950b5e59d63..86c5f1c0bad9 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -164,8 +164,8 @@ struct ucounts *get_ucounts(struct ucounts *ucounts)
struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
{
struct hlist_head *hashent = ucounts_hashentry(ns, uid);
- struct ucounts *ucounts, *new;
bool wrapped;
+ struct ucounts *ucounts, *new = NULL;
spin_lock_irq(&ucounts_lock);
ucounts = find_ucounts(ns, uid, hashent);
@@ -182,17 +182,17 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
spin_lock_irq(&ucounts_lock);
ucounts = find_ucounts(ns, uid, hashent);
- if (ucounts) {
- kfree(new);
- } else {
+ if (!ucounts) {
hlist_add_head(&new->node, hashent);
get_user_ns(new->ns);
spin_unlock_irq(&ucounts_lock);
return new;
}
}
+
wrapped = !get_ucounts_or_wrap(ucounts);
spin_unlock_irq(&ucounts_lock);
+ kfree(new);
if (wrapped) {
put_ucounts(ucounts);
return NULL;
diff --git a/kernel/umh.c b/kernel/umh.c
index be9234270777..b4da45a3a7cf 100644
--- a/kernel/umh.c
+++ b/kernel/umh.c
@@ -544,7 +544,7 @@ static int proc_cap_handler(const struct ctl_table *table, int write,
return 0;
}
-static struct ctl_table usermodehelper_table[] = {
+static const struct ctl_table usermodehelper_table[] = {
{
.procname = "bset",
.data = &usermodehelper_bset,
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
index 7282f61a8650..bfbaaecb1dd4 100644
--- a/kernel/utsname_sysctl.c
+++ b/kernel/utsname_sysctl.c
@@ -75,7 +75,7 @@ static DEFINE_CTL_TABLE_POLL(hostname_poll);
static DEFINE_CTL_TABLE_POLL(domainname_poll);
// Note: update 'enum uts_proc' to match any changes to this table
-static struct ctl_table uts_kern_table[] = {
+static const struct ctl_table uts_kern_table[] = {
{
.procname = "arch",
.data = init_uts_ns.name.machine,
@@ -129,7 +129,7 @@ static struct ctl_table uts_kern_table[] = {
*/
void uts_proc_notify(enum uts_proc proc)
{
- struct ctl_table *table = &uts_kern_table[proc];
+ const struct ctl_table *table = &uts_kern_table[proc];
proc_sys_poll_notify(table->poll);
}
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 41e0f7e9fa35..b2da7de39d06 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -190,7 +190,7 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs)
* with printk_cpu_sync_get_irqsave() that we can still at least
* get the message about the lockup out.
*/
- pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n", cpu);
+ pr_emerg("CPU%u: Watchdog detected hard LOCKUP on cpu %u\n", this_cpu, cpu);
printk_cpu_sync_get_irqsave(flags);
print_modules();
@@ -1094,7 +1094,7 @@ static int proc_watchdog_cpumask(const struct ctl_table *table, int write,
static const int sixty = 60;
-static struct ctl_table watchdog_sysctls[] = {
+static const struct ctl_table watchdog_sysctls[] = {
{
.procname = "watchdog",
.data = &watchdog_user_enabled,
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 33a23c7b2274..3c2c45313c88 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2180,7 +2180,7 @@ static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
debug_work_activate(work);
/* record the work call stack in order to print it in KASAN reports */
- kasan_record_aux_stack_noalloc(work);
+ kasan_record_aux_stack(work);
/* we own @work, set data and link */
set_work_pwq(work, pwq, extra_flags);
diff --git a/lib/Kconfig b/lib/Kconfig
index 5a318f753b2f..dccb61b7d698 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -156,13 +156,18 @@ config CRC16
config CRC_T10DIF
tristate "CRC calculation for the T10 Data Integrity Field"
- select CRYPTO
- select CRYPTO_CRCT10DIF
help
This option is only needed if a module that's not in the
kernel tree needs to calculate CRC checks for use with the
SCSI data integrity subsystem.
+config ARCH_HAS_CRC_T10DIF
+ bool
+
+config CRC_T10DIF_ARCH
+ tristate
+ default CRC_T10DIF if ARCH_HAS_CRC_T10DIF && CRC_OPTIMIZATIONS
+
config CRC64_ROCKSOFT
tristate "CRC calculation for the Rocksoft model CRC64"
select CRC64
@@ -190,61 +195,12 @@ config CRC32
the kernel tree does. Such modules that use library CRC32/CRC32c
functions require M here.
-config CRC32_SELFTEST
- tristate "CRC32 perform self test on init"
- depends on CRC32
- help
- This option enables the CRC32 library functions to perform a
- self test on initialization. The self test computes crc32_le
- and crc32_be over byte strings with random alignment and length
- and computes the total elapsed time and number of bytes processed.
-
-choice
- prompt "CRC32 implementation"
- depends on CRC32
- default CRC32_SLICEBY8
- help
- This option allows a kernel builder to override the default choice
- of CRC32 algorithm. Choose the default ("slice by 8") unless you
- know that you need one of the others.
-
-config CRC32_SLICEBY8
- bool "Slice by 8 bytes"
- help
- Calculate checksum 8 bytes at a time with a clever slicing algorithm.
- This is the fastest algorithm, but comes with a 8KiB lookup table.
- Most modern processors have enough cache to hold this table without
- thrashing the cache.
-
- This is the default implementation choice. Choose this one unless
- you have a good reason not to.
-
-config CRC32_SLICEBY4
- bool "Slice by 4 bytes"
- help
- Calculate checksum 4 bytes at a time with a clever slicing algorithm.
- This is a bit slower than slice by 8, but has a smaller 4KiB lookup
- table.
-
- Only choose this option if you know what you are doing.
-
-config CRC32_SARWATE
- bool "Sarwate's Algorithm (one byte at a time)"
- help
- Calculate checksum a byte at a time using Sarwate's algorithm. This
- is not particularly fast, but has a small 256 byte lookup table.
-
- Only choose this option if you know what you are doing.
-
-config CRC32_BIT
- bool "Classic Algorithm (one bit at a time)"
- help
- Calculate checksum one bit at a time. This is VERY slow, but has
- no lookup table. This is provided as a debugging option.
-
- Only choose this option if you are debugging crc32.
+config ARCH_HAS_CRC32
+ bool
-endchoice
+config CRC32_ARCH
+ tristate
+ default CRC32 if ARCH_HAS_CRC32 && CRC_OPTIMIZATIONS
config CRC64
tristate "CRC64 functions"
@@ -272,14 +228,10 @@ config CRC7
config LIBCRC32C
tristate "CRC32c (Castagnoli, et al) Cyclic Redundancy-Check"
- select CRYPTO
- select CRYPTO_CRC32C
+ select CRC32
help
- This option is provided for the case where no in-kernel-tree
- modules require CRC32c functions, but a module built outside the
- kernel tree does. Such modules that use library CRC32c functions
- require M here. See Castagnoli93.
- Module will be libcrc32c.
+ This option just selects CRC32 and is provided for compatibility
+ purposes until the users are updated to select CRC32 directly.
config CRC8
tristate "CRC8 function"
@@ -288,6 +240,17 @@ config CRC8
when they need to do cyclic redundancy check according CRC8
algorithm. Module will be called crc8.
+config CRC_OPTIMIZATIONS
+ bool "Enable optimized CRC implementations" if EXPERT
+ default y
+ help
+ Disabling this option reduces code size slightly by disabling the
+ architecture-optimized implementations of any CRC variants that are
+ enabled. CRC checksumming performance may get much slower.
+
+ Keep this enabled unless you're really trying to minimize the size of
+ the kernel.
+
config XXHASH
tristate
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index a9cfddc990b7..1af972a92d06 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2269,7 +2269,6 @@ config TEST_LIST_SORT
config TEST_MIN_HEAP
tristate "Min heap test"
depends on DEBUG_KERNEL || m
- select MIN_HEAP
help
Enable this to turn on min heap function tests. This test is
executed only once during system boot (so affects only boot time),
@@ -2479,6 +2478,17 @@ config TEST_RHASHTABLE
config TEST_IDA
tristate "Perform selftest on IDA functions"
+config TEST_MISC_MINOR
+ tristate "Basic misc minor Kunit test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Kunit test for the misc minor.
+ It tests misc minor functions for dynamic and misc dynamic minor.
+ This include misc_xxx functions
+
+ If unsure, say N.
+
config TEST_PARMAN
tristate "Perform selftest on priority array manager"
depends on PARMAN
@@ -2838,6 +2848,26 @@ config HW_BREAKPOINT_KUNIT_TEST
If unsure, say N.
+config CRC_KUNIT_TEST
+ tristate "KUnit tests for CRC functions" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ select CRC16
+ select CRC_T10DIF
+ select CRC32
+ select CRC64
+ help
+ Unit tests for the CRC library functions.
+
+ This is intended to help people writing architecture-specific
+ optimized versions. If unsure, say N.
+
+config CRC_BENCHMARK
+ bool "Benchmark for the CRC functions"
+ depends on CRC_KUNIT_TEST
+ help
+ Include benchmarks in the KUnit test suite for the CRC functions.
+
config SIPHASH_KUNIT_TEST
tristate "Perform selftest on siphash functions" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -2858,15 +2888,6 @@ config USERCOPY_KUNIT_TEST
on the copy_to/from_user infrastructure, making sure basic
user/kernel boundary testing is working.
-config CRC16_KUNIT_TEST
- tristate "KUnit tests for CRC16"
- depends on KUNIT
- default KUNIT_ALL_TESTS
- select CRC16
- help
- Enable this option to run unit tests for the kernel's CRC16
- implementation (<linux/crc16.h>).
-
config TEST_UDELAY
tristate "udelay test driver"
help
@@ -3161,6 +3182,21 @@ config INT_POW_TEST
If unsure, say N
+config INT_SQRT_KUNIT_TEST
+ tristate "Integer square root test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This option enables the KUnit test suite for the int_sqrt() function,
+ which performs square root calculation. The test suite checks
+ various scenarios, including edge cases, to ensure correctness.
+
+ Enabling this option will include tests that check various scenarios
+ and edge cases to ensure the accuracy and reliability of the square root
+ function.
+
+ If unsure, say N
+
endif # RUNTIME_TESTING_MENU
config ARCH_USE_MEMTEST
diff --git a/lib/Makefile b/lib/Makefile
index a8155c972f02..d5cfc7afbbb8 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -164,10 +164,8 @@ obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o
obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o
obj-$(CONFIG_CRC32) += crc32.o
obj-$(CONFIG_CRC64) += crc64.o
-obj-$(CONFIG_CRC32_SELFTEST) += crc32test.o
obj-$(CONFIG_CRC4) += crc4.o
obj-$(CONFIG_CRC7) += crc7.o
-obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
obj-$(CONFIG_CRC8) += crc8.o
obj-$(CONFIG_CRC64_ROCKSOFT) += crc64-rocksoft.o
obj-$(CONFIG_XXHASH) += xxhash.o
@@ -392,9 +390,9 @@ CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-overread)
CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-truncation)
CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN)
obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o
+obj-$(CONFIG_CRC_KUNIT_TEST) += crc_kunit.o
obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o
obj-$(CONFIG_USERCOPY_KUNIT_TEST) += usercopy_kunit.o
-obj-$(CONFIG_CRC16_KUNIT_TEST) += crc16_kunit.o
obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index 65e706e1bc19..19b45617bdcf 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -29,6 +29,8 @@ EXPORT_SYMBOL(_shared_alloc_tag);
DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
mem_alloc_profiling_key);
+EXPORT_SYMBOL(mem_alloc_profiling_key);
+
DEFINE_STATIC_KEY_FALSE(mem_profiling_compressed);
struct alloc_tag_kernel_section kernel_tags = { NULL, 0 };
@@ -423,8 +425,8 @@ static int vm_module_tags_populate(void)
unsigned long nr;
more_pages = ALIGN(new_end - phys_end, PAGE_SIZE) >> PAGE_SHIFT;
- nr = alloc_pages_bulk_array_node(GFP_KERNEL | __GFP_NOWARN,
- NUMA_NO_NODE, more_pages, next_page);
+ nr = alloc_pages_bulk_node(GFP_KERNEL | __GFP_NOWARN,
+ NUMA_NO_NODE, more_pages, next_page);
if (nr < more_pages ||
vmap_pages_range(phys_end, phys_end + (nr << PAGE_SHIFT), PAGE_KERNEL,
next_page, PAGE_SHIFT) < 0) {
diff --git a/lib/atomic64.c b/lib/atomic64.c
index caf895789a1e..1a72bba36d24 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -25,15 +25,15 @@
* Ensure each lock is in a separate cacheline.
*/
static union {
- raw_spinlock_t lock;
+ arch_spinlock_t lock;
char pad[L1_CACHE_BYTES];
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
[0 ... (NR_LOCKS - 1)] = {
- .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
+ .lock = __ARCH_SPIN_LOCK_UNLOCKED,
},
};
-static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
+static inline arch_spinlock_t *lock_addr(const atomic64_t *v)
{
unsigned long addr = (unsigned long) v;
@@ -45,12 +45,14 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
s64 generic_atomic64_read(const atomic64_t *v)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
s64 val;
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
val = v->counter;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_read);
@@ -58,11 +60,13 @@ EXPORT_SYMBOL(generic_atomic64_read);
void generic_atomic64_set(atomic64_t *v, s64 i)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
v->counter = i;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
}
EXPORT_SYMBOL(generic_atomic64_set);
@@ -70,11 +74,13 @@ EXPORT_SYMBOL(generic_atomic64_set);
void generic_atomic64_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
- raw_spinlock_t *lock = lock_addr(v); \
+ arch_spinlock_t *lock = lock_addr(v); \
\
- raw_spin_lock_irqsave(lock, flags); \
+ local_irq_save(flags); \
+ arch_spin_lock(lock); \
v->counter c_op a; \
- raw_spin_unlock_irqrestore(lock, flags); \
+ arch_spin_unlock(lock); \
+ local_irq_restore(flags); \
} \
EXPORT_SYMBOL(generic_atomic64_##op);
@@ -82,12 +88,14 @@ EXPORT_SYMBOL(generic_atomic64_##op);
s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
- raw_spinlock_t *lock = lock_addr(v); \
+ arch_spinlock_t *lock = lock_addr(v); \
s64 val; \
\
- raw_spin_lock_irqsave(lock, flags); \
+ local_irq_save(flags); \
+ arch_spin_lock(lock); \
val = (v->counter c_op a); \
- raw_spin_unlock_irqrestore(lock, flags); \
+ arch_spin_unlock(lock); \
+ local_irq_restore(flags); \
return val; \
} \
EXPORT_SYMBOL(generic_atomic64_##op##_return);
@@ -96,13 +104,15 @@ EXPORT_SYMBOL(generic_atomic64_##op##_return);
s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
- raw_spinlock_t *lock = lock_addr(v); \
+ arch_spinlock_t *lock = lock_addr(v); \
s64 val; \
\
- raw_spin_lock_irqsave(lock, flags); \
+ local_irq_save(flags); \
+ arch_spin_lock(lock); \
val = v->counter; \
v->counter c_op a; \
- raw_spin_unlock_irqrestore(lock, flags); \
+ arch_spin_unlock(lock); \
+ local_irq_restore(flags); \
return val; \
} \
EXPORT_SYMBOL(generic_atomic64_fetch_##op);
@@ -131,14 +141,16 @@ ATOMIC64_OPS(xor, ^=)
s64 generic_atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
s64 val;
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
val = v->counter - 1;
if (val >= 0)
v->counter = val;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
@@ -146,14 +158,16 @@ EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
s64 val;
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
val = v->counter;
if (val == o)
v->counter = n;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_cmpxchg);
@@ -161,13 +175,15 @@ EXPORT_SYMBOL(generic_atomic64_cmpxchg);
s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
s64 val;
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
val = v->counter;
v->counter = new;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_xchg);
@@ -175,14 +191,16 @@ EXPORT_SYMBOL(generic_atomic64_xchg);
s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
s64 val;
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
val = v->counter;
if (val != u)
v->counter += a;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
return val;
}
diff --git a/lib/cpumask.c b/lib/cpumask.c
index e77ee9d46f71..57274ba8b6d9 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -83,10 +83,7 @@ EXPORT_SYMBOL(alloc_cpumask_var_node);
*/
void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
- *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
- if (!*mask)
- panic("%s: Failed to allocate %u bytes\n", __func__,
- cpumask_size());
+ *mask = memblock_alloc_or_panic(cpumask_size(), SMP_CACHE_BYTES);
}
/**
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
index 1ed2ed487097..311c2ab829f1 100644
--- a/lib/crc-t10dif.c
+++ b/lib/crc-t10dif.c
@@ -9,123 +9,57 @@
#include <linux/types.h>
#include <linux/module.h>
#include <linux/crc-t10dif.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <crypto/hash.h>
-#include <crypto/algapi.h>
-#include <linux/static_key.h>
-#include <linux/notifier.h>
-static struct crypto_shash __rcu *crct10dif_tfm;
-static DEFINE_STATIC_KEY_TRUE(crct10dif_fallback);
-static DEFINE_MUTEX(crc_t10dif_mutex);
-static struct work_struct crct10dif_rehash_work;
-
-static int crc_t10dif_notify(struct notifier_block *self, unsigned long val, void *data)
-{
- struct crypto_alg *alg = data;
-
- if (val != CRYPTO_MSG_ALG_LOADED ||
- strcmp(alg->cra_name, CRC_T10DIF_STRING))
- return NOTIFY_DONE;
-
- schedule_work(&crct10dif_rehash_work);
- return NOTIFY_OK;
-}
-
-static void crc_t10dif_rehash(struct work_struct *work)
-{
- struct crypto_shash *new, *old;
-
- mutex_lock(&crc_t10dif_mutex);
- old = rcu_dereference_protected(crct10dif_tfm,
- lockdep_is_held(&crc_t10dif_mutex));
- new = crypto_alloc_shash(CRC_T10DIF_STRING, 0, 0);
- if (IS_ERR(new)) {
- mutex_unlock(&crc_t10dif_mutex);
- return;
- }
- rcu_assign_pointer(crct10dif_tfm, new);
- mutex_unlock(&crc_t10dif_mutex);
-
- if (old) {
- synchronize_rcu();
- crypto_free_shash(old);
- } else {
- static_branch_disable(&crct10dif_fallback);
- }
-}
-
-static struct notifier_block crc_t10dif_nb = {
- .notifier_call = crc_t10dif_notify,
+/*
+ * Table generated using the following polynomial:
+ * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1
+ * gt: 0x8bb7
+ */
+static const u16 t10_dif_crc_table[256] = {
+ 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B,
+ 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6,
+ 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6,
+ 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B,
+ 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1,
+ 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C,
+ 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C,
+ 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781,
+ 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8,
+ 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255,
+ 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925,
+ 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698,
+ 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472,
+ 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF,
+ 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF,
+ 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02,
+ 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA,
+ 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067,
+ 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17,
+ 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA,
+ 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640,
+ 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD,
+ 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D,
+ 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30,
+ 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759,
+ 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4,
+ 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394,
+ 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29,
+ 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3,
+ 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E,
+ 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E,
+ 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3
};
-__u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
-{
- struct {
- struct shash_desc shash;
- __u16 crc;
- } desc;
- int err;
-
- if (static_branch_unlikely(&crct10dif_fallback))
- return crc_t10dif_generic(crc, buffer, len);
-
- rcu_read_lock();
- desc.shash.tfm = rcu_dereference(crct10dif_tfm);
- desc.crc = crc;
- err = crypto_shash_update(&desc.shash, buffer, len);
- rcu_read_unlock();
-
- BUG_ON(err);
-
- return desc.crc;
-}
-EXPORT_SYMBOL(crc_t10dif_update);
-
-__u16 crc_t10dif(const unsigned char *buffer, size_t len)
-{
- return crc_t10dif_update(0, buffer, len);
-}
-EXPORT_SYMBOL(crc_t10dif);
-
-static int __init crc_t10dif_mod_init(void)
-{
- INIT_WORK(&crct10dif_rehash_work, crc_t10dif_rehash);
- crypto_register_notifier(&crc_t10dif_nb);
- crc_t10dif_rehash(&crct10dif_rehash_work);
- return 0;
-}
-
-static void __exit crc_t10dif_mod_fini(void)
-{
- crypto_unregister_notifier(&crc_t10dif_nb);
- cancel_work_sync(&crct10dif_rehash_work);
- crypto_free_shash(rcu_dereference_protected(crct10dif_tfm, 1));
-}
-
-module_init(crc_t10dif_mod_init);
-module_exit(crc_t10dif_mod_fini);
-
-static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp)
+u16 crc_t10dif_generic(u16 crc, const u8 *p, size_t len)
{
- struct crypto_shash *tfm;
- int len;
+ size_t i;
- if (static_branch_unlikely(&crct10dif_fallback))
- return sprintf(buffer, "fallback\n");
+ for (i = 0; i < len; i++)
+ crc = (crc << 8) ^ t10_dif_crc_table[(crc >> 8) ^ p[i]];
- rcu_read_lock();
- tfm = rcu_dereference(crct10dif_tfm);
- len = snprintf(buffer, PAGE_SIZE, "%s\n",
- crypto_shash_driver_name(tfm));
- rcu_read_unlock();
-
- return len;
+ return crc;
}
+EXPORT_SYMBOL(crc_t10dif_generic);
-module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0444);
-
-MODULE_DESCRIPTION("T10 DIF CRC calculation (library API)");
+MODULE_DESCRIPTION("T10 DIF CRC calculation");
MODULE_LICENSE("GPL");
-MODULE_SOFTDEP("pre: crct10dif");
diff --git a/lib/crc16_kunit.c b/lib/crc16_kunit.c
deleted file mode 100644
index 0918c98a96d2..000000000000
--- a/lib/crc16_kunit.c
+++ /dev/null
@@ -1,155 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * KUnits tests for CRC16.
- *
- * Copyright (C) 2024, LKCAMP
- * Author: Vinicius Peixoto <vpeixoto@lkcamp.dev>
- * Author: Fabricio Gasperin <fgasperin@lkcamp.dev>
- * Author: Enzo Bertoloti <ebertoloti@lkcamp.dev>
- */
-#include <kunit/test.h>
-#include <linux/crc16.h>
-#include <linux/prandom.h>
-
-#define CRC16_KUNIT_DATA_SIZE 4096
-#define CRC16_KUNIT_TEST_SIZE 100
-#define CRC16_KUNIT_SEED 0x12345678
-
-/**
- * struct crc16_test - CRC16 test data
- * @crc: initial input value to CRC16
- * @start: Start index within the data buffer
- * @length: Length of the data
- */
-static struct crc16_test {
- u16 crc;
- u16 start;
- u16 length;
-} tests[CRC16_KUNIT_TEST_SIZE];
-
-u8 data[CRC16_KUNIT_DATA_SIZE];
-
-
-/* Naive implementation of CRC16 for validation purposes */
-static inline u16 _crc16_naive_byte(u16 crc, u8 data)
-{
- u8 i = 0;
-
- crc ^= (u16) data;
- for (i = 0; i < 8; i++) {
- if (crc & 0x01)
- crc = (crc >> 1) ^ 0xa001;
- else
- crc = crc >> 1;
- }
-
- return crc;
-}
-
-
-static inline u16 _crc16_naive(u16 crc, u8 *buffer, size_t len)
-{
- while (len--)
- crc = _crc16_naive_byte(crc, *buffer++);
- return crc;
-}
-
-
-/* Small helper for generating pseudorandom 16-bit data */
-static inline u16 _rand16(void)
-{
- static u32 rand = CRC16_KUNIT_SEED;
-
- rand = next_pseudo_random32(rand);
- return rand & 0xFFFF;
-}
-
-
-static int crc16_init_test_data(struct kunit_suite *suite)
-{
- size_t i;
-
- /* Fill the data buffer with random bytes */
- for (i = 0; i < CRC16_KUNIT_DATA_SIZE; i++)
- data[i] = _rand16() & 0xFF;
-
- /* Generate random test data while ensuring the random
- * start + length values won't overflow the 4096-byte
- * buffer (0x7FF * 2 = 0xFFE < 0x1000)
- */
- for (size_t i = 0; i < CRC16_KUNIT_TEST_SIZE; i++) {
- tests[i].crc = _rand16();
- tests[i].start = _rand16() & 0x7FF;
- tests[i].length = _rand16() & 0x7FF;
- }
-
- return 0;
-}
-
-static void crc16_test_empty(struct kunit *test)
-{
- u16 crc;
-
- /* The result for empty data should be the same as the
- * initial crc
- */
- crc = crc16(0x00, data, 0);
- KUNIT_EXPECT_EQ(test, crc, 0);
- crc = crc16(0xFF, data, 0);
- KUNIT_EXPECT_EQ(test, crc, 0xFF);
-}
-
-static void crc16_test_correctness(struct kunit *test)
-{
- size_t i;
- u16 crc, crc_naive;
-
- for (i = 0; i < CRC16_KUNIT_TEST_SIZE; i++) {
- /* Compare results with the naive crc16 implementation */
- crc = crc16(tests[i].crc, data + tests[i].start,
- tests[i].length);
- crc_naive = _crc16_naive(tests[i].crc, data + tests[i].start,
- tests[i].length);
- KUNIT_EXPECT_EQ(test, crc, crc_naive);
- }
-}
-
-
-static void crc16_test_combine(struct kunit *test)
-{
- size_t i, j;
- u16 crc, crc_naive;
-
- /* Make sure that combining two consecutive crc16 calculations
- * yields the same result as calculating the crc16 for the whole thing
- */
- for (i = 0; i < CRC16_KUNIT_TEST_SIZE; i++) {
- crc_naive = crc16(tests[i].crc, data + tests[i].start, tests[i].length);
- for (j = 0; j < tests[i].length; j++) {
- crc = crc16(tests[i].crc, data + tests[i].start, j);
- crc = crc16(crc, data + tests[i].start + j, tests[i].length - j);
- KUNIT_EXPECT_EQ(test, crc, crc_naive);
- }
- }
-}
-
-
-static struct kunit_case crc16_test_cases[] = {
- KUNIT_CASE(crc16_test_empty),
- KUNIT_CASE(crc16_test_combine),
- KUNIT_CASE(crc16_test_correctness),
- {},
-};
-
-static struct kunit_suite crc16_test_suite = {
- .name = "crc16",
- .test_cases = crc16_test_cases,
- .suite_init = crc16_init_test_data,
-};
-kunit_test_suite(crc16_test_suite);
-
-MODULE_AUTHOR("Fabricio Gasperin <fgasperin@lkcamp.dev>");
-MODULE_AUTHOR("Vinicius Peixoto <vpeixoto@lkcamp.dev>");
-MODULE_AUTHOR("Enzo Bertoloti <ebertoloti@lkcamp.dev>");
-MODULE_DESCRIPTION("Unit tests for crc16");
-MODULE_LICENSE("GPL");
diff --git a/lib/crc32.c b/lib/crc32.c
index ff587fee3893..ede6131f66fc 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -30,20 +30,6 @@
#include <linux/crc32poly.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/sched.h>
-#include "crc32defs.h"
-
-#if CRC_LE_BITS > 8
-# define tole(x) ((__force u32) cpu_to_le32(x))
-#else
-# define tole(x) (x)
-#endif
-
-#if CRC_BE_BITS > 8
-# define tobe(x) ((__force u32) cpu_to_be32(x))
-#else
-# define tobe(x) (x)
-#endif
#include "crc32table.h"
@@ -51,166 +37,21 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
MODULE_DESCRIPTION("Various CRC32 calculations");
MODULE_LICENSE("GPL");
-#if CRC_LE_BITS > 8 || CRC_BE_BITS > 8
-
-/* implements slicing-by-4 or slicing-by-8 algorithm */
-static inline u32 __pure
-crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
+u32 __pure crc32_le_base(u32 crc, const u8 *p, size_t len)
{
-# ifdef __LITTLE_ENDIAN
-# define DO_CRC(x) crc = t0[(crc ^ (x)) & 255] ^ (crc >> 8)
-# define DO_CRC4 (t3[(q) & 255] ^ t2[(q >> 8) & 255] ^ \
- t1[(q >> 16) & 255] ^ t0[(q >> 24) & 255])
-# define DO_CRC8 (t7[(q) & 255] ^ t6[(q >> 8) & 255] ^ \
- t5[(q >> 16) & 255] ^ t4[(q >> 24) & 255])
-# else
-# define DO_CRC(x) crc = t0[((crc >> 24) ^ (x)) & 255] ^ (crc << 8)
-# define DO_CRC4 (t0[(q) & 255] ^ t1[(q >> 8) & 255] ^ \
- t2[(q >> 16) & 255] ^ t3[(q >> 24) & 255])
-# define DO_CRC8 (t4[(q) & 255] ^ t5[(q >> 8) & 255] ^ \
- t6[(q >> 16) & 255] ^ t7[(q >> 24) & 255])
-# endif
- const u32 *b;
- size_t rem_len;
-# ifdef CONFIG_X86
- size_t i;
-# endif
- const u32 *t0=tab[0], *t1=tab[1], *t2=tab[2], *t3=tab[3];
-# if CRC_LE_BITS != 32
- const u32 *t4 = tab[4], *t5 = tab[5], *t6 = tab[6], *t7 = tab[7];
-# endif
- u32 q;
-
- /* Align it */
- if (unlikely((long)buf & 3 && len)) {
- do {
- DO_CRC(*buf++);
- } while ((--len) && ((long)buf)&3);
- }
-
-# if CRC_LE_BITS == 32
- rem_len = len & 3;
- len = len >> 2;
-# else
- rem_len = len & 7;
- len = len >> 3;
-# endif
-
- b = (const u32 *)buf;
-# ifdef CONFIG_X86
- --b;
- for (i = 0; i < len; i++) {
-# else
- for (--b; len; --len) {
-# endif
- q = crc ^ *++b; /* use pre increment for speed */
-# if CRC_LE_BITS == 32
- crc = DO_CRC4;
-# else
- crc = DO_CRC8;
- q = *++b;
- crc ^= DO_CRC4;
-# endif
- }
- len = rem_len;
- /* And the last few bytes */
- if (len) {
- u8 *p = (u8 *)(b + 1) - 1;
-# ifdef CONFIG_X86
- for (i = 0; i < len; i++)
- DO_CRC(*++p); /* use pre increment for speed */
-# else
- do {
- DO_CRC(*++p); /* use pre increment for speed */
- } while (--len);
-# endif
- }
+ while (len--)
+ crc = (crc >> 8) ^ crc32table_le[(crc & 255) ^ *p++];
return crc;
-#undef DO_CRC
-#undef DO_CRC4
-#undef DO_CRC8
}
-#endif
-
+EXPORT_SYMBOL(crc32_le_base);
-/**
- * crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II
- * CRC32/CRC32C
- * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for other
- * uses, or the previous crc32/crc32c value if computing incrementally.
- * @p: pointer to buffer over which CRC32/CRC32C is run
- * @len: length of buffer @p
- * @tab: little-endian Ethernet table
- * @polynomial: CRC32/CRC32c LE polynomial
- */
-static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
- size_t len, const u32 (*tab)[256],
- u32 polynomial)
+u32 __pure crc32c_le_base(u32 crc, const u8 *p, size_t len)
{
-#if CRC_LE_BITS == 1
- int i;
- while (len--) {
- crc ^= *p++;
- for (i = 0; i < 8; i++)
- crc = (crc >> 1) ^ ((crc & 1) ? polynomial : 0);
- }
-# elif CRC_LE_BITS == 2
- while (len--) {
- crc ^= *p++;
- crc = (crc >> 2) ^ tab[0][crc & 3];
- crc = (crc >> 2) ^ tab[0][crc & 3];
- crc = (crc >> 2) ^ tab[0][crc & 3];
- crc = (crc >> 2) ^ tab[0][crc & 3];
- }
-# elif CRC_LE_BITS == 4
- while (len--) {
- crc ^= *p++;
- crc = (crc >> 4) ^ tab[0][crc & 15];
- crc = (crc >> 4) ^ tab[0][crc & 15];
- }
-# elif CRC_LE_BITS == 8
- /* aka Sarwate algorithm */
- while (len--) {
- crc ^= *p++;
- crc = (crc >> 8) ^ tab[0][crc & 255];
- }
-# else
- crc = (__force u32) __cpu_to_le32(crc);
- crc = crc32_body(crc, p, len, tab);
- crc = __le32_to_cpu((__force __le32)crc);
-#endif
+ while (len--)
+ crc = (crc >> 8) ^ crc32ctable_le[(crc & 255) ^ *p++];
return crc;
}
-
-#if CRC_LE_BITS == 1
-u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len)
-{
- return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE);
-}
-u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
-{
- return crc32_le_generic(crc, p, len, NULL, CRC32C_POLY_LE);
-}
-#else
-u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len)
-{
- return crc32_le_generic(crc, p, len, crc32table_le, CRC32_POLY_LE);
-}
-u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
-{
- return crc32_le_generic(crc, p, len, crc32ctable_le, CRC32C_POLY_LE);
-}
-#endif
-EXPORT_SYMBOL(crc32_le);
-EXPORT_SYMBOL(__crc32c_le);
-
-u32 __pure crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le);
-EXPORT_SYMBOL(crc32_le_base);
-
-u32 __pure __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le);
-EXPORT_SYMBOL(__crc32c_le_base);
-
-u32 __pure crc32_be_base(u32, unsigned char const *, size_t) __alias(crc32_be);
+EXPORT_SYMBOL(crc32c_le_base);
/*
* This multiplies the polynomials x and y modulo the given modulus.
@@ -285,64 +126,10 @@ u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len)
EXPORT_SYMBOL(crc32_le_shift);
EXPORT_SYMBOL(__crc32c_le_shift);
-/**
- * crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
- * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
- * other uses, or the previous crc32 value if computing incrementally.
- * @p: pointer to buffer over which CRC32 is run
- * @len: length of buffer @p
- * @tab: big-endian Ethernet table
- * @polynomial: CRC32 BE polynomial
- */
-static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
- size_t len, const u32 (*tab)[256],
- u32 polynomial)
+u32 __pure crc32_be_base(u32 crc, const u8 *p, size_t len)
{
-#if CRC_BE_BITS == 1
- int i;
- while (len--) {
- crc ^= *p++ << 24;
- for (i = 0; i < 8; i++)
- crc =
- (crc << 1) ^ ((crc & 0x80000000) ? polynomial :
- 0);
- }
-# elif CRC_BE_BITS == 2
- while (len--) {
- crc ^= *p++ << 24;
- crc = (crc << 2) ^ tab[0][crc >> 30];
- crc = (crc << 2) ^ tab[0][crc >> 30];
- crc = (crc << 2) ^ tab[0][crc >> 30];
- crc = (crc << 2) ^ tab[0][crc >> 30];
- }
-# elif CRC_BE_BITS == 4
- while (len--) {
- crc ^= *p++ << 24;
- crc = (crc << 4) ^ tab[0][crc >> 28];
- crc = (crc << 4) ^ tab[0][crc >> 28];
- }
-# elif CRC_BE_BITS == 8
- while (len--) {
- crc ^= *p++ << 24;
- crc = (crc << 8) ^ tab[0][crc >> 24];
- }
-# else
- crc = (__force u32) __cpu_to_be32(crc);
- crc = crc32_body(crc, p, len, tab);
- crc = __be32_to_cpu((__force __be32)crc);
-# endif
+ while (len--)
+ crc = (crc << 8) ^ crc32table_be[(crc >> 24) ^ *p++];
return crc;
}
-
-#if CRC_BE_BITS == 1
-u32 __pure __weak crc32_be(u32 crc, unsigned char const *p, size_t len)
-{
- return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
-}
-#else
-u32 __pure __weak crc32_be(u32 crc, unsigned char const *p, size_t len)
-{
- return crc32_be_generic(crc, p, len, crc32table_be, CRC32_POLY_BE);
-}
-#endif
-EXPORT_SYMBOL(crc32_be);
+EXPORT_SYMBOL(crc32_be_base);
diff --git a/lib/crc32defs.h b/lib/crc32defs.h
deleted file mode 100644
index 0c8fb5923e7e..000000000000
--- a/lib/crc32defs.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-/* Try to choose an implementation variant via Kconfig */
-#ifdef CONFIG_CRC32_SLICEBY8
-# define CRC_LE_BITS 64
-# define CRC_BE_BITS 64
-#endif
-#ifdef CONFIG_CRC32_SLICEBY4
-# define CRC_LE_BITS 32
-# define CRC_BE_BITS 32
-#endif
-#ifdef CONFIG_CRC32_SARWATE
-# define CRC_LE_BITS 8
-# define CRC_BE_BITS 8
-#endif
-#ifdef CONFIG_CRC32_BIT
-# define CRC_LE_BITS 1
-# define CRC_BE_BITS 1
-#endif
-
-/*
- * How many bits at a time to use. Valid values are 1, 2, 4, 8, 32 and 64.
- * For less performance-sensitive, use 4 or 8 to save table size.
- * For larger systems choose same as CPU architecture as default.
- * This works well on X86_64, SPARC64 systems. This may require some
- * elaboration after experiments with other architectures.
- */
-#ifndef CRC_LE_BITS
-# ifdef CONFIG_64BIT
-# define CRC_LE_BITS 64
-# else
-# define CRC_LE_BITS 32
-# endif
-#endif
-#ifndef CRC_BE_BITS
-# ifdef CONFIG_64BIT
-# define CRC_BE_BITS 64
-# else
-# define CRC_BE_BITS 32
-# endif
-#endif
-
-/*
- * Little-endian CRC computation. Used with serial bit streams sent
- * lsbit-first. Be sure to use cpu_to_le32() to append the computed CRC.
- */
-#if CRC_LE_BITS > 64 || CRC_LE_BITS < 1 || CRC_LE_BITS == 16 || \
- CRC_LE_BITS & CRC_LE_BITS-1
-# error "CRC_LE_BITS must be one of {1, 2, 4, 8, 32, 64}"
-#endif
-
-/*
- * Big-endian CRC computation. Used with serial bit streams sent
- * msbit-first. Be sure to use cpu_to_be32() to append the computed CRC.
- */
-#if CRC_BE_BITS > 64 || CRC_BE_BITS < 1 || CRC_BE_BITS == 16 || \
- CRC_BE_BITS & CRC_BE_BITS-1
-# error "CRC_BE_BITS must be one of {1, 2, 4, 8, 32, 64}"
-#endif
diff --git a/lib/crc32test.c b/lib/crc32test.c
deleted file mode 100644
index 9b4af79412c4..000000000000
--- a/lib/crc32test.c
+++ /dev/null
@@ -1,852 +0,0 @@
-/*
- * Aug 8, 2011 Bob Pearson with help from Joakim Tjernlund and George Spelvin
- * cleaned up code to current version of sparse and added the slicing-by-8
- * algorithm to the closely similar existing slicing-by-4 algorithm.
- *
- * Oct 15, 2000 Matt Domsch <Matt_Domsch@dell.com>
- * Nicer crc32 functions/docs submitted by linux@horizon.com. Thanks!
- * Code was from the public domain, copyright abandoned. Code was
- * subsequently included in the kernel, thus was re-licensed under the
- * GNU GPL v2.
- *
- * Oct 12, 2000 Matt Domsch <Matt_Domsch@dell.com>
- * Same crc32 function was used in 5 other places in the kernel.
- * I made one version, and deleted the others.
- * There are various incantations of crc32(). Some use a seed of 0 or ~0.
- * Some xor at the end with ~0. The generic crc32() function takes
- * seed as an argument, and doesn't xor at the end. Then individual
- * users can do whatever they need.
- * drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0.
- * fs/jffs2 uses seed 0, doesn't xor with ~0.
- * fs/partitions/efi.c uses seed ~0, xor's with ~0.
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
- */
-
-#include <linux/crc32.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-
-#include "crc32defs.h"
-
-/* 4096 random bytes */
-static u8 const __aligned(8) test_buf[] __initconst =
-{
- 0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30,
- 0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4,
- 0xc9, 0x6e, 0x8b, 0xdb, 0x98, 0x6b, 0xaa, 0x60,
- 0xa8, 0xb5, 0xbc, 0x6c, 0xa9, 0xb1, 0x5b, 0x2c,
- 0xea, 0xb4, 0x92, 0x6a, 0x3f, 0x79, 0x91, 0xe4,
- 0xe9, 0x70, 0x51, 0x8c, 0x7f, 0x95, 0x6f, 0x1a,
- 0x56, 0xa1, 0x5c, 0x27, 0x03, 0x67, 0x9f, 0x3a,
- 0xe2, 0x31, 0x11, 0x29, 0x6b, 0x98, 0xfc, 0xc4,
- 0x53, 0x24, 0xc5, 0x8b, 0xce, 0x47, 0xb2, 0xb9,
- 0x32, 0xcb, 0xc1, 0xd0, 0x03, 0x57, 0x4e, 0xd4,
- 0xe9, 0x3c, 0xa1, 0x63, 0xcf, 0x12, 0x0e, 0xca,
- 0xe1, 0x13, 0xd1, 0x93, 0xa6, 0x88, 0x5c, 0x61,
- 0x5b, 0xbb, 0xf0, 0x19, 0x46, 0xb4, 0xcf, 0x9e,
- 0xb6, 0x6b, 0x4c, 0x3a, 0xcf, 0x60, 0xf9, 0x7a,
- 0x8d, 0x07, 0x63, 0xdb, 0x40, 0xe9, 0x0b, 0x6f,
- 0xad, 0x97, 0xf1, 0xed, 0xd0, 0x1e, 0x26, 0xfd,
- 0xbf, 0xb7, 0xc8, 0x04, 0x94, 0xf8, 0x8b, 0x8c,
- 0xf1, 0xab, 0x7a, 0xd4, 0xdd, 0xf3, 0xe8, 0x88,
- 0xc3, 0xed, 0x17, 0x8a, 0x9b, 0x40, 0x0d, 0x53,
- 0x62, 0x12, 0x03, 0x5f, 0x1b, 0x35, 0x32, 0x1f,
- 0xb4, 0x7b, 0x93, 0x78, 0x0d, 0xdb, 0xce, 0xa4,
- 0xc0, 0x47, 0xd5, 0xbf, 0x68, 0xe8, 0x5d, 0x74,
- 0x8f, 0x8e, 0x75, 0x1c, 0xb2, 0x4f, 0x9a, 0x60,
- 0xd1, 0xbe, 0x10, 0xf4, 0x5c, 0xa1, 0x53, 0x09,
- 0xa5, 0xe0, 0x09, 0x54, 0x85, 0x5c, 0xdc, 0x07,
- 0xe7, 0x21, 0x69, 0x7b, 0x8a, 0xfd, 0x90, 0xf1,
- 0x22, 0xd0, 0xb4, 0x36, 0x28, 0xe6, 0xb8, 0x0f,
- 0x39, 0xde, 0xc8, 0xf3, 0x86, 0x60, 0x34, 0xd2,
- 0x5e, 0xdf, 0xfd, 0xcf, 0x0f, 0xa9, 0x65, 0xf0,
- 0xd5, 0x4d, 0x96, 0x40, 0xe3, 0xdf, 0x3f, 0x95,
- 0x5a, 0x39, 0x19, 0x93, 0xf4, 0x75, 0xce, 0x22,
- 0x00, 0x1c, 0x93, 0xe2, 0x03, 0x66, 0xf4, 0x93,
- 0x73, 0x86, 0x81, 0x8e, 0x29, 0x44, 0x48, 0x86,
- 0x61, 0x7c, 0x48, 0xa3, 0x43, 0xd2, 0x9c, 0x8d,
- 0xd4, 0x95, 0xdd, 0xe1, 0x22, 0x89, 0x3a, 0x40,
- 0x4c, 0x1b, 0x8a, 0x04, 0xa8, 0x09, 0x69, 0x8b,
- 0xea, 0xc6, 0x55, 0x8e, 0x57, 0xe6, 0x64, 0x35,
- 0xf0, 0xc7, 0x16, 0x9f, 0x5d, 0x5e, 0x86, 0x40,
- 0x46, 0xbb, 0xe5, 0x45, 0x88, 0xfe, 0xc9, 0x63,
- 0x15, 0xfb, 0xf5, 0xbd, 0x71, 0x61, 0xeb, 0x7b,
- 0x78, 0x70, 0x07, 0x31, 0x03, 0x9f, 0xb2, 0xc8,
- 0xa7, 0xab, 0x47, 0xfd, 0xdf, 0xa0, 0x78, 0x72,
- 0xa4, 0x2a, 0xe4, 0xb6, 0xba, 0xc0, 0x1e, 0x86,
- 0x71, 0xe6, 0x3d, 0x18, 0x37, 0x70, 0xe6, 0xff,
- 0xe0, 0xbc, 0x0b, 0x22, 0xa0, 0x1f, 0xd3, 0xed,
- 0xa2, 0x55, 0x39, 0xab, 0xa8, 0x13, 0x73, 0x7c,
- 0x3f, 0xb2, 0xd6, 0x19, 0xac, 0xff, 0x99, 0xed,
- 0xe8, 0xe6, 0xa6, 0x22, 0xe3, 0x9c, 0xf1, 0x30,
- 0xdc, 0x01, 0x0a, 0x56, 0xfa, 0xe4, 0xc9, 0x99,
- 0xdd, 0xa8, 0xd8, 0xda, 0x35, 0x51, 0x73, 0xb4,
- 0x40, 0x86, 0x85, 0xdb, 0x5c, 0xd5, 0x85, 0x80,
- 0x14, 0x9c, 0xfd, 0x98, 0xa9, 0x82, 0xc5, 0x37,
- 0xff, 0x32, 0x5d, 0xd0, 0x0b, 0xfa, 0xdc, 0x04,
- 0x5e, 0x09, 0xd2, 0xca, 0x17, 0x4b, 0x1a, 0x8e,
- 0x15, 0xe1, 0xcc, 0x4e, 0x52, 0x88, 0x35, 0xbd,
- 0x48, 0xfe, 0x15, 0xa0, 0x91, 0xfd, 0x7e, 0x6c,
- 0x0e, 0x5d, 0x79, 0x1b, 0x81, 0x79, 0xd2, 0x09,
- 0x34, 0x70, 0x3d, 0x81, 0xec, 0xf6, 0x24, 0xbb,
- 0xfb, 0xf1, 0x7b, 0xdf, 0x54, 0xea, 0x80, 0x9b,
- 0xc7, 0x99, 0x9e, 0xbd, 0x16, 0x78, 0x12, 0x53,
- 0x5e, 0x01, 0xa7, 0x4e, 0xbd, 0x67, 0xe1, 0x9b,
- 0x4c, 0x0e, 0x61, 0x45, 0x97, 0xd2, 0xf0, 0x0f,
- 0xfe, 0x15, 0x08, 0xb7, 0x11, 0x4c, 0xe7, 0xff,
- 0x81, 0x53, 0xff, 0x91, 0x25, 0x38, 0x7e, 0x40,
- 0x94, 0xe5, 0xe0, 0xad, 0xe6, 0xd9, 0x79, 0xb6,
- 0x92, 0xc9, 0xfc, 0xde, 0xc3, 0x1a, 0x23, 0xbb,
- 0xdd, 0xc8, 0x51, 0x0c, 0x3a, 0x72, 0xfa, 0x73,
- 0x6f, 0xb7, 0xee, 0x61, 0x39, 0x03, 0x01, 0x3f,
- 0x7f, 0x94, 0x2e, 0x2e, 0xba, 0x3a, 0xbb, 0xb4,
- 0xfa, 0x6a, 0x17, 0xfe, 0xea, 0xef, 0x5e, 0x66,
- 0x97, 0x3f, 0x32, 0x3d, 0xd7, 0x3e, 0xb1, 0xf1,
- 0x6c, 0x14, 0x4c, 0xfd, 0x37, 0xd3, 0x38, 0x80,
- 0xfb, 0xde, 0xa6, 0x24, 0x1e, 0xc8, 0xca, 0x7f,
- 0x3a, 0x93, 0xd8, 0x8b, 0x18, 0x13, 0xb2, 0xe5,
- 0xe4, 0x93, 0x05, 0x53, 0x4f, 0x84, 0x66, 0xa7,
- 0x58, 0x5c, 0x7b, 0x86, 0x52, 0x6d, 0x0d, 0xce,
- 0xa4, 0x30, 0x7d, 0xb6, 0x18, 0x9f, 0xeb, 0xff,
- 0x22, 0xbb, 0x72, 0x29, 0xb9, 0x44, 0x0b, 0x48,
- 0x1e, 0x84, 0x71, 0x81, 0xe3, 0x6d, 0x73, 0x26,
- 0x92, 0xb4, 0x4d, 0x2a, 0x29, 0xb8, 0x1f, 0x72,
- 0xed, 0xd0, 0xe1, 0x64, 0x77, 0xea, 0x8e, 0x88,
- 0x0f, 0xef, 0x3f, 0xb1, 0x3b, 0xad, 0xf9, 0xc9,
- 0x8b, 0xd0, 0xac, 0xc6, 0xcc, 0xa9, 0x40, 0xcc,
- 0x76, 0xf6, 0x3b, 0x53, 0xb5, 0x88, 0xcb, 0xc8,
- 0x37, 0xf1, 0xa2, 0xba, 0x23, 0x15, 0x99, 0x09,
- 0xcc, 0xe7, 0x7a, 0x3b, 0x37, 0xf7, 0x58, 0xc8,
- 0x46, 0x8c, 0x2b, 0x2f, 0x4e, 0x0e, 0xa6, 0x5c,
- 0xea, 0x85, 0x55, 0xba, 0x02, 0x0e, 0x0e, 0x48,
- 0xbc, 0xe1, 0xb1, 0x01, 0x35, 0x79, 0x13, 0x3d,
- 0x1b, 0xc0, 0x53, 0x68, 0x11, 0xe7, 0x95, 0x0f,
- 0x9d, 0x3f, 0x4c, 0x47, 0x7b, 0x4d, 0x1c, 0xae,
- 0x50, 0x9b, 0xcb, 0xdd, 0x05, 0x8d, 0x9a, 0x97,
- 0xfd, 0x8c, 0xef, 0x0c, 0x1d, 0x67, 0x73, 0xa8,
- 0x28, 0x36, 0xd5, 0xb6, 0x92, 0x33, 0x40, 0x75,
- 0x0b, 0x51, 0xc3, 0x64, 0xba, 0x1d, 0xc2, 0xcc,
- 0xee, 0x7d, 0x54, 0x0f, 0x27, 0x69, 0xa7, 0x27,
- 0x63, 0x30, 0x29, 0xd9, 0xc8, 0x84, 0xd8, 0xdf,
- 0x9f, 0x68, 0x8d, 0x04, 0xca, 0xa6, 0xc5, 0xc7,
- 0x7a, 0x5c, 0xc8, 0xd1, 0xcb, 0x4a, 0xec, 0xd0,
- 0xd8, 0x20, 0x69, 0xc5, 0x17, 0xcd, 0x78, 0xc8,
- 0x75, 0x23, 0x30, 0x69, 0xc9, 0xd4, 0xea, 0x5c,
- 0x4f, 0x6b, 0x86, 0x3f, 0x8b, 0xfe, 0xee, 0x44,
- 0xc9, 0x7c, 0xb7, 0xdd, 0x3e, 0xe5, 0xec, 0x54,
- 0x03, 0x3e, 0xaa, 0x82, 0xc6, 0xdf, 0xb2, 0x38,
- 0x0e, 0x5d, 0xb3, 0x88, 0xd9, 0xd3, 0x69, 0x5f,
- 0x8f, 0x70, 0x8a, 0x7e, 0x11, 0xd9, 0x1e, 0x7b,
- 0x38, 0xf1, 0x42, 0x1a, 0xc0, 0x35, 0xf5, 0xc7,
- 0x36, 0x85, 0xf5, 0xf7, 0xb8, 0x7e, 0xc7, 0xef,
- 0x18, 0xf1, 0x63, 0xd6, 0x7a, 0xc6, 0xc9, 0x0e,
- 0x4d, 0x69, 0x4f, 0x84, 0xef, 0x26, 0x41, 0x0c,
- 0xec, 0xc7, 0xe0, 0x7e, 0x3c, 0x67, 0x01, 0x4c,
- 0x62, 0x1a, 0x20, 0x6f, 0xee, 0x47, 0x4d, 0xc0,
- 0x99, 0x13, 0x8d, 0x91, 0x4a, 0x26, 0xd4, 0x37,
- 0x28, 0x90, 0x58, 0x75, 0x66, 0x2b, 0x0a, 0xdf,
- 0xda, 0xee, 0x92, 0x25, 0x90, 0x62, 0x39, 0x9e,
- 0x44, 0x98, 0xad, 0xc1, 0x88, 0xed, 0xe4, 0xb4,
- 0xaf, 0xf5, 0x8c, 0x9b, 0x48, 0x4d, 0x56, 0x60,
- 0x97, 0x0f, 0x61, 0x59, 0x9e, 0xa6, 0x27, 0xfe,
- 0xc1, 0x91, 0x15, 0x38, 0xb8, 0x0f, 0xae, 0x61,
- 0x7d, 0x26, 0x13, 0x5a, 0x73, 0xff, 0x1c, 0xa3,
- 0x61, 0x04, 0x58, 0x48, 0x55, 0x44, 0x11, 0xfe,
- 0x15, 0xca, 0xc3, 0xbd, 0xca, 0xc5, 0xb4, 0x40,
- 0x5d, 0x1b, 0x7f, 0x39, 0xb5, 0x9c, 0x35, 0xec,
- 0x61, 0x15, 0x32, 0x32, 0xb8, 0x4e, 0x40, 0x9f,
- 0x17, 0x1f, 0x0a, 0x4d, 0xa9, 0x91, 0xef, 0xb7,
- 0xb0, 0xeb, 0xc2, 0x83, 0x9a, 0x6c, 0xd2, 0x79,
- 0x43, 0x78, 0x5e, 0x2f, 0xe5, 0xdd, 0x1a, 0x3c,
- 0x45, 0xab, 0x29, 0x40, 0x3a, 0x37, 0x5b, 0x6f,
- 0xd7, 0xfc, 0x48, 0x64, 0x3c, 0x49, 0xfb, 0x21,
- 0xbe, 0xc3, 0xff, 0x07, 0xfb, 0x17, 0xe9, 0xc9,
- 0x0c, 0x4c, 0x5c, 0x15, 0x9e, 0x8e, 0x22, 0x30,
- 0x0a, 0xde, 0x48, 0x7f, 0xdb, 0x0d, 0xd1, 0x2b,
- 0x87, 0x38, 0x9e, 0xcc, 0x5a, 0x01, 0x16, 0xee,
- 0x75, 0x49, 0x0d, 0x30, 0x01, 0x34, 0x6a, 0xb6,
- 0x9a, 0x5a, 0x2a, 0xec, 0xbb, 0x48, 0xac, 0xd3,
- 0x77, 0x83, 0xd8, 0x08, 0x86, 0x4f, 0x48, 0x09,
- 0x29, 0x41, 0x79, 0xa1, 0x03, 0x12, 0xc4, 0xcd,
- 0x90, 0x55, 0x47, 0x66, 0x74, 0x9a, 0xcc, 0x4f,
- 0x35, 0x8c, 0xd6, 0x98, 0xef, 0xeb, 0x45, 0xb9,
- 0x9a, 0x26, 0x2f, 0x39, 0xa5, 0x70, 0x6d, 0xfc,
- 0xb4, 0x51, 0xee, 0xf4, 0x9c, 0xe7, 0x38, 0x59,
- 0xad, 0xf4, 0xbc, 0x46, 0xff, 0x46, 0x8e, 0x60,
- 0x9c, 0xa3, 0x60, 0x1d, 0xf8, 0x26, 0x72, 0xf5,
- 0x72, 0x9d, 0x68, 0x80, 0x04, 0xf6, 0x0b, 0xa1,
- 0x0a, 0xd5, 0xa7, 0x82, 0x3a, 0x3e, 0x47, 0xa8,
- 0x5a, 0xde, 0x59, 0x4f, 0x7b, 0x07, 0xb3, 0xe9,
- 0x24, 0x19, 0x3d, 0x34, 0x05, 0xec, 0xf1, 0xab,
- 0x6e, 0x64, 0x8f, 0xd3, 0xe6, 0x41, 0x86, 0x80,
- 0x70, 0xe3, 0x8d, 0x60, 0x9c, 0x34, 0x25, 0x01,
- 0x07, 0x4d, 0x19, 0x41, 0x4e, 0x3d, 0x5c, 0x7e,
- 0xa8, 0xf5, 0xcc, 0xd5, 0x7b, 0xe2, 0x7d, 0x3d,
- 0x49, 0x86, 0x7d, 0x07, 0xb7, 0x10, 0xe3, 0x35,
- 0xb8, 0x84, 0x6d, 0x76, 0xab, 0x17, 0xc6, 0x38,
- 0xb4, 0xd3, 0x28, 0x57, 0xad, 0xd3, 0x88, 0x5a,
- 0xda, 0xea, 0xc8, 0x94, 0xcc, 0x37, 0x19, 0xac,
- 0x9c, 0x9f, 0x4b, 0x00, 0x15, 0xc0, 0xc8, 0xca,
- 0x1f, 0x15, 0xaa, 0xe0, 0xdb, 0xf9, 0x2f, 0x57,
- 0x1b, 0x24, 0xc7, 0x6f, 0x76, 0x29, 0xfb, 0xed,
- 0x25, 0x0d, 0xc0, 0xfe, 0xbd, 0x5a, 0xbf, 0x20,
- 0x08, 0x51, 0x05, 0xec, 0x71, 0xa3, 0xbf, 0xef,
- 0x5e, 0x99, 0x75, 0xdb, 0x3c, 0x5f, 0x9a, 0x8c,
- 0xbb, 0x19, 0x5c, 0x0e, 0x93, 0x19, 0xf8, 0x6a,
- 0xbc, 0xf2, 0x12, 0x54, 0x2f, 0xcb, 0x28, 0x64,
- 0x88, 0xb3, 0x92, 0x0d, 0x96, 0xd1, 0xa6, 0xe4,
- 0x1f, 0xf1, 0x4d, 0xa4, 0xab, 0x1c, 0xee, 0x54,
- 0xf2, 0xad, 0x29, 0x6d, 0x32, 0x37, 0xb2, 0x16,
- 0x77, 0x5c, 0xdc, 0x2e, 0x54, 0xec, 0x75, 0x26,
- 0xc6, 0x36, 0xd9, 0x17, 0x2c, 0xf1, 0x7a, 0xdc,
- 0x4b, 0xf1, 0xe2, 0xd9, 0x95, 0xba, 0xac, 0x87,
- 0xc1, 0xf3, 0x8e, 0x58, 0x08, 0xd8, 0x87, 0x60,
- 0xc9, 0xee, 0x6a, 0xde, 0xa4, 0xd2, 0xfc, 0x0d,
- 0xe5, 0x36, 0xc4, 0x5c, 0x52, 0xb3, 0x07, 0x54,
- 0x65, 0x24, 0xc1, 0xb1, 0xd1, 0xb1, 0x53, 0x13,
- 0x31, 0x79, 0x7f, 0x05, 0x76, 0xeb, 0x37, 0x59,
- 0x15, 0x2b, 0xd1, 0x3f, 0xac, 0x08, 0x97, 0xeb,
- 0x91, 0x98, 0xdf, 0x6c, 0x09, 0x0d, 0x04, 0x9f,
- 0xdc, 0x3b, 0x0e, 0x60, 0x68, 0x47, 0x23, 0x15,
- 0x16, 0xc6, 0x0b, 0x35, 0xf8, 0x77, 0xa2, 0x78,
- 0x50, 0xd4, 0x64, 0x22, 0x33, 0xff, 0xfb, 0x93,
- 0x71, 0x46, 0x50, 0x39, 0x1b, 0x9c, 0xea, 0x4e,
- 0x8d, 0x0c, 0x37, 0xe5, 0x5c, 0x51, 0x3a, 0x31,
- 0xb2, 0x85, 0x84, 0x3f, 0x41, 0xee, 0xa2, 0xc1,
- 0xc6, 0x13, 0x3b, 0x54, 0x28, 0xd2, 0x18, 0x37,
- 0xcc, 0x46, 0x9f, 0x6a, 0x91, 0x3d, 0x5a, 0x15,
- 0x3c, 0x89, 0xa3, 0x61, 0x06, 0x7d, 0x2e, 0x78,
- 0xbe, 0x7d, 0x40, 0xba, 0x2f, 0x95, 0xb1, 0x2f,
- 0x87, 0x3b, 0x8a, 0xbe, 0x6a, 0xf4, 0xc2, 0x31,
- 0x74, 0xee, 0x91, 0xe0, 0x23, 0xaa, 0x5d, 0x7f,
- 0xdd, 0xf0, 0x44, 0x8c, 0x0b, 0x59, 0x2b, 0xfc,
- 0x48, 0x3a, 0xdf, 0x07, 0x05, 0x38, 0x6c, 0xc9,
- 0xeb, 0x18, 0x24, 0x68, 0x8d, 0x58, 0x98, 0xd3,
- 0x31, 0xa3, 0xe4, 0x70, 0x59, 0xb1, 0x21, 0xbe,
- 0x7e, 0x65, 0x7d, 0xb8, 0x04, 0xab, 0xf6, 0xe4,
- 0xd7, 0xda, 0xec, 0x09, 0x8f, 0xda, 0x6d, 0x24,
- 0x07, 0xcc, 0x29, 0x17, 0x05, 0x78, 0x1a, 0xc1,
- 0xb1, 0xce, 0xfc, 0xaa, 0x2d, 0xe7, 0xcc, 0x85,
- 0x84, 0x84, 0x03, 0x2a, 0x0c, 0x3f, 0xa9, 0xf8,
- 0xfd, 0x84, 0x53, 0x59, 0x5c, 0xf0, 0xd4, 0x09,
- 0xf0, 0xd2, 0x6c, 0x32, 0x03, 0xb0, 0xa0, 0x8c,
- 0x52, 0xeb, 0x23, 0x91, 0x88, 0x43, 0x13, 0x46,
- 0xf6, 0x1e, 0xb4, 0x1b, 0xf5, 0x8e, 0x3a, 0xb5,
- 0x3d, 0x00, 0xf6, 0xe5, 0x08, 0x3d, 0x5f, 0x39,
- 0xd3, 0x21, 0x69, 0xbc, 0x03, 0x22, 0x3a, 0xd2,
- 0x5c, 0x84, 0xf8, 0x15, 0xc4, 0x80, 0x0b, 0xbc,
- 0x29, 0x3c, 0xf3, 0x95, 0x98, 0xcd, 0x8f, 0x35,
- 0xbc, 0xa5, 0x3e, 0xfc, 0xd4, 0x13, 0x9e, 0xde,
- 0x4f, 0xce, 0x71, 0x9d, 0x09, 0xad, 0xf2, 0x80,
- 0x6b, 0x65, 0x7f, 0x03, 0x00, 0x14, 0x7c, 0x15,
- 0x85, 0x40, 0x6d, 0x70, 0xea, 0xdc, 0xb3, 0x63,
- 0x35, 0x4f, 0x4d, 0xe0, 0xd9, 0xd5, 0x3c, 0x58,
- 0x56, 0x23, 0x80, 0xe2, 0x36, 0xdd, 0x75, 0x1d,
- 0x94, 0x11, 0x41, 0x8e, 0xe0, 0x81, 0x8e, 0xcf,
- 0xe0, 0xe5, 0xf6, 0xde, 0xd1, 0xe7, 0x04, 0x12,
- 0x79, 0x92, 0x2b, 0x71, 0x2a, 0x79, 0x8b, 0x7c,
- 0x44, 0x79, 0x16, 0x30, 0x4e, 0xf4, 0xf6, 0x9b,
- 0xb7, 0x40, 0xa3, 0x5a, 0xa7, 0x69, 0x3e, 0xc1,
- 0x3a, 0x04, 0xd0, 0x88, 0xa0, 0x3b, 0xdd, 0xc6,
- 0x9e, 0x7e, 0x1e, 0x1e, 0x8f, 0x44, 0xf7, 0x73,
- 0x67, 0x1e, 0x1a, 0x78, 0xfa, 0x62, 0xf4, 0xa9,
- 0xa8, 0xc6, 0x5b, 0xb8, 0xfa, 0x06, 0x7d, 0x5e,
- 0x38, 0x1c, 0x9a, 0x39, 0xe9, 0x39, 0x98, 0x22,
- 0x0b, 0xa7, 0xac, 0x0b, 0xf3, 0xbc, 0xf1, 0xeb,
- 0x8c, 0x81, 0xe3, 0x48, 0x8a, 0xed, 0x42, 0xc2,
- 0x38, 0xcf, 0x3e, 0xda, 0xd2, 0x89, 0x8d, 0x9c,
- 0x53, 0xb5, 0x2f, 0x41, 0x01, 0x26, 0x84, 0x9c,
- 0xa3, 0x56, 0xf6, 0x49, 0xc7, 0xd4, 0x9f, 0x93,
- 0x1b, 0x96, 0x49, 0x5e, 0xad, 0xb3, 0x84, 0x1f,
- 0x3c, 0xa4, 0xe0, 0x9b, 0xd1, 0x90, 0xbc, 0x38,
- 0x6c, 0xdd, 0x95, 0x4d, 0x9d, 0xb1, 0x71, 0x57,
- 0x2d, 0x34, 0xe8, 0xb8, 0x42, 0xc7, 0x99, 0x03,
- 0xc7, 0x07, 0x30, 0x65, 0x91, 0x55, 0xd5, 0x90,
- 0x70, 0x97, 0x37, 0x68, 0xd4, 0x11, 0xf9, 0xe8,
- 0xce, 0xec, 0xdc, 0x34, 0xd5, 0xd3, 0xb7, 0xc4,
- 0xb8, 0x97, 0x05, 0x92, 0xad, 0xf8, 0xe2, 0x36,
- 0x64, 0x41, 0xc9, 0xc5, 0x41, 0x77, 0x52, 0xd7,
- 0x2c, 0xa5, 0x24, 0x2f, 0xd9, 0x34, 0x0b, 0x47,
- 0x35, 0xa7, 0x28, 0x8b, 0xc5, 0xcd, 0xe9, 0x46,
- 0xac, 0x39, 0x94, 0x3c, 0x10, 0xc6, 0x29, 0x73,
- 0x0e, 0x0e, 0x5d, 0xe0, 0x71, 0x03, 0x8a, 0x72,
- 0x0e, 0x26, 0xb0, 0x7d, 0x84, 0xed, 0x95, 0x23,
- 0x49, 0x5a, 0x45, 0x83, 0x45, 0x60, 0x11, 0x4a,
- 0x46, 0x31, 0xd4, 0xd8, 0x16, 0x54, 0x98, 0x58,
- 0xed, 0x6d, 0xcc, 0x5d, 0xd6, 0x50, 0x61, 0x9f,
- 0x9d, 0xc5, 0x3e, 0x9d, 0x32, 0x47, 0xde, 0x96,
- 0xe1, 0x5d, 0xd8, 0xf8, 0xb4, 0x69, 0x6f, 0xb9,
- 0x15, 0x90, 0x57, 0x7a, 0xf6, 0xad, 0xb0, 0x5b,
- 0xf5, 0xa6, 0x36, 0x94, 0xfd, 0x84, 0xce, 0x1c,
- 0x0f, 0x4b, 0xd0, 0xc2, 0x5b, 0x6b, 0x56, 0xef,
- 0x73, 0x93, 0x0b, 0xc3, 0xee, 0xd9, 0xcf, 0xd3,
- 0xa4, 0x22, 0x58, 0xcd, 0x50, 0x6e, 0x65, 0xf4,
- 0xe9, 0xb7, 0x71, 0xaf, 0x4b, 0xb3, 0xb6, 0x2f,
- 0x0f, 0x0e, 0x3b, 0xc9, 0x85, 0x14, 0xf5, 0x17,
- 0xe8, 0x7a, 0x3a, 0xbf, 0x5f, 0x5e, 0xf8, 0x18,
- 0x48, 0xa6, 0x72, 0xab, 0x06, 0x95, 0xe9, 0xc8,
- 0xa7, 0xf4, 0x32, 0x44, 0x04, 0x0c, 0x84, 0x98,
- 0x73, 0xe3, 0x89, 0x8d, 0x5f, 0x7e, 0x4a, 0x42,
- 0x8f, 0xc5, 0x28, 0xb1, 0x82, 0xef, 0x1c, 0x97,
- 0x31, 0x3b, 0x4d, 0xe0, 0x0e, 0x10, 0x10, 0x97,
- 0x93, 0x49, 0x78, 0x2f, 0x0d, 0x86, 0x8b, 0xa1,
- 0x53, 0xa9, 0x81, 0x20, 0x79, 0xe7, 0x07, 0x77,
- 0xb6, 0xac, 0x5e, 0xd2, 0x05, 0xcd, 0xe9, 0xdb,
- 0x8a, 0x94, 0x82, 0x8a, 0x23, 0xb9, 0x3d, 0x1c,
- 0xa9, 0x7d, 0x72, 0x4a, 0xed, 0x33, 0xa3, 0xdb,
- 0x21, 0xa7, 0x86, 0x33, 0x45, 0xa5, 0xaa, 0x56,
- 0x45, 0xb5, 0x83, 0x29, 0x40, 0x47, 0x79, 0x04,
- 0x6e, 0xb9, 0x95, 0xd0, 0x81, 0x77, 0x2d, 0x48,
- 0x1e, 0xfe, 0xc3, 0xc2, 0x1e, 0xe5, 0xf2, 0xbe,
- 0xfd, 0x3b, 0x94, 0x9f, 0xc4, 0xc4, 0x26, 0x9d,
- 0xe4, 0x66, 0x1e, 0x19, 0xee, 0x6c, 0x79, 0x97,
- 0x11, 0x31, 0x4b, 0x0d, 0x01, 0xcb, 0xde, 0xa8,
- 0xf6, 0x6d, 0x7c, 0x39, 0x46, 0x4e, 0x7e, 0x3f,
- 0x94, 0x17, 0xdf, 0xa1, 0x7d, 0xd9, 0x1c, 0x8e,
- 0xbc, 0x7d, 0x33, 0x7d, 0xe3, 0x12, 0x40, 0xca,
- 0xab, 0x37, 0x11, 0x46, 0xd4, 0xae, 0xef, 0x44,
- 0xa2, 0xb3, 0x6a, 0x66, 0x0e, 0x0c, 0x90, 0x7f,
- 0xdf, 0x5c, 0x66, 0x5f, 0xf2, 0x94, 0x9f, 0xa6,
- 0x73, 0x4f, 0xeb, 0x0d, 0xad, 0xbf, 0xc0, 0x63,
- 0x5c, 0xdc, 0x46, 0x51, 0xe8, 0x8e, 0x90, 0x19,
- 0xa8, 0xa4, 0x3c, 0x91, 0x79, 0xfa, 0x7e, 0x58,
- 0x85, 0x13, 0x55, 0xc5, 0x19, 0x82, 0x37, 0x1b,
- 0x0a, 0x02, 0x1f, 0x99, 0x6b, 0x18, 0xf1, 0x28,
- 0x08, 0xa2, 0x73, 0xb8, 0x0f, 0x2e, 0xcd, 0xbf,
- 0xf3, 0x86, 0x7f, 0xea, 0xef, 0xd0, 0xbb, 0xa6,
- 0x21, 0xdf, 0x49, 0x73, 0x51, 0xcc, 0x36, 0xd3,
- 0x3e, 0xa0, 0xf8, 0x44, 0xdf, 0xd3, 0xa6, 0xbe,
- 0x8a, 0xd4, 0x57, 0xdd, 0x72, 0x94, 0x61, 0x0f,
- 0x82, 0xd1, 0x07, 0xb8, 0x7c, 0x18, 0x83, 0xdf,
- 0x3a, 0xe5, 0x50, 0x6a, 0x82, 0x20, 0xac, 0xa9,
- 0xa8, 0xff, 0xd9, 0xf3, 0x77, 0x33, 0x5a, 0x9e,
- 0x7f, 0x6d, 0xfe, 0x5d, 0x33, 0x41, 0x42, 0xe7,
- 0x6c, 0x19, 0xe0, 0x44, 0x8a, 0x15, 0xf6, 0x70,
- 0x98, 0xb7, 0x68, 0x4d, 0xfa, 0x97, 0x39, 0xb0,
- 0x8e, 0xe8, 0x84, 0x8b, 0x75, 0x30, 0xb7, 0x7d,
- 0x92, 0x69, 0x20, 0x9c, 0x81, 0xfb, 0x4b, 0xf4,
- 0x01, 0x50, 0xeb, 0xce, 0x0c, 0x1c, 0x6c, 0xb5,
- 0x4a, 0xd7, 0x27, 0x0c, 0xce, 0xbb, 0xe5, 0x85,
- 0xf0, 0xb6, 0xee, 0xd5, 0x70, 0xdd, 0x3b, 0xfc,
- 0xd4, 0x99, 0xf1, 0x33, 0xdd, 0x8b, 0xc4, 0x2f,
- 0xae, 0xab, 0x74, 0x96, 0x32, 0xc7, 0x4c, 0x56,
- 0x3c, 0x89, 0x0f, 0x96, 0x0b, 0x42, 0xc0, 0xcb,
- 0xee, 0x0f, 0x0b, 0x8c, 0xfb, 0x7e, 0x47, 0x7b,
- 0x64, 0x48, 0xfd, 0xb2, 0x00, 0x80, 0x89, 0xa5,
- 0x13, 0x55, 0x62, 0xfc, 0x8f, 0xe2, 0x42, 0x03,
- 0xb7, 0x4e, 0x2a, 0x79, 0xb4, 0x82, 0xea, 0x23,
- 0x49, 0xda, 0xaf, 0x52, 0x63, 0x1e, 0x60, 0x03,
- 0x89, 0x06, 0x44, 0x46, 0x08, 0xc3, 0xc4, 0x87,
- 0x70, 0x2e, 0xda, 0x94, 0xad, 0x6b, 0xe0, 0xe4,
- 0xd1, 0x8a, 0x06, 0xc2, 0xa8, 0xc0, 0xa7, 0x43,
- 0x3c, 0x47, 0x52, 0x0e, 0xc3, 0x77, 0x81, 0x11,
- 0x67, 0x0e, 0xa0, 0x70, 0x04, 0x47, 0x29, 0x40,
- 0x86, 0x0d, 0x34, 0x56, 0xa7, 0xc9, 0x35, 0x59,
- 0x68, 0xdc, 0x93, 0x81, 0x70, 0xee, 0x86, 0xd9,
- 0x80, 0x06, 0x40, 0x4f, 0x1a, 0x0d, 0x40, 0x30,
- 0x0b, 0xcb, 0x96, 0x47, 0xc1, 0xb7, 0x52, 0xfd,
- 0x56, 0xe0, 0x72, 0x4b, 0xfb, 0xbd, 0x92, 0x45,
- 0x61, 0x71, 0xc2, 0x33, 0x11, 0xbf, 0x52, 0x83,
- 0x79, 0x26, 0xe0, 0x49, 0x6b, 0xb7, 0x05, 0x8b,
- 0xe8, 0x0e, 0x87, 0x31, 0xd7, 0x9d, 0x8a, 0xf5,
- 0xc0, 0x5f, 0x2e, 0x58, 0x4a, 0xdb, 0x11, 0xb3,
- 0x6c, 0x30, 0x2a, 0x46, 0x19, 0xe3, 0x27, 0x84,
- 0x1f, 0x63, 0x6e, 0xf6, 0x57, 0xc7, 0xc9, 0xd8,
- 0x5e, 0xba, 0xb3, 0x87, 0xd5, 0x83, 0x26, 0x34,
- 0x21, 0x9e, 0x65, 0xde, 0x42, 0xd3, 0xbe, 0x7b,
- 0xbc, 0x91, 0x71, 0x44, 0x4d, 0x99, 0x3b, 0x31,
- 0xe5, 0x3f, 0x11, 0x4e, 0x7f, 0x13, 0x51, 0x3b,
- 0xae, 0x79, 0xc9, 0xd3, 0x81, 0x8e, 0x25, 0x40,
- 0x10, 0xfc, 0x07, 0x1e, 0xf9, 0x7b, 0x9a, 0x4b,
- 0x6c, 0xe3, 0xb3, 0xad, 0x1a, 0x0a, 0xdd, 0x9e,
- 0x59, 0x0c, 0xa2, 0xcd, 0xae, 0x48, 0x4a, 0x38,
- 0x5b, 0x47, 0x41, 0x94, 0x65, 0x6b, 0xbb, 0xeb,
- 0x5b, 0xe3, 0xaf, 0x07, 0x5b, 0xd4, 0x4a, 0xa2,
- 0xc9, 0x5d, 0x2f, 0x64, 0x03, 0xd7, 0x3a, 0x2c,
- 0x6e, 0xce, 0x76, 0x95, 0xb4, 0xb3, 0xc0, 0xf1,
- 0xe2, 0x45, 0x73, 0x7a, 0x5c, 0xab, 0xc1, 0xfc,
- 0x02, 0x8d, 0x81, 0x29, 0xb3, 0xac, 0x07, 0xec,
- 0x40, 0x7d, 0x45, 0xd9, 0x7a, 0x59, 0xee, 0x34,
- 0xf0, 0xe9, 0xd5, 0x7b, 0x96, 0xb1, 0x3d, 0x95,
- 0xcc, 0x86, 0xb5, 0xb6, 0x04, 0x2d, 0xb5, 0x92,
- 0x7e, 0x76, 0xf4, 0x06, 0xa9, 0xa3, 0x12, 0x0f,
- 0xb1, 0xaf, 0x26, 0xba, 0x7c, 0xfc, 0x7e, 0x1c,
- 0xbc, 0x2c, 0x49, 0x97, 0x53, 0x60, 0x13, 0x0b,
- 0xa6, 0x61, 0x83, 0x89, 0x42, 0xd4, 0x17, 0x0c,
- 0x6c, 0x26, 0x52, 0xc3, 0xb3, 0xd4, 0x67, 0xf5,
- 0xe3, 0x04, 0xb7, 0xf4, 0xcb, 0x80, 0xb8, 0xcb,
- 0x77, 0x56, 0x3e, 0xaa, 0x57, 0x54, 0xee, 0xb4,
- 0x2c, 0x67, 0xcf, 0xf2, 0xdc, 0xbe, 0x55, 0xf9,
- 0x43, 0x1f, 0x6e, 0x22, 0x97, 0x67, 0x7f, 0xc4,
- 0xef, 0xb1, 0x26, 0x31, 0x1e, 0x27, 0xdf, 0x41,
- 0x80, 0x47, 0x6c, 0xe2, 0xfa, 0xa9, 0x8c, 0x2a,
- 0xf6, 0xf2, 0xab, 0xf0, 0x15, 0xda, 0x6c, 0xc8,
- 0xfe, 0xb5, 0x23, 0xde, 0xa9, 0x05, 0x3f, 0x06,
- 0x54, 0x4c, 0xcd, 0xe1, 0xab, 0xfc, 0x0e, 0x62,
- 0x33, 0x31, 0x73, 0x2c, 0x76, 0xcb, 0xb4, 0x47,
- 0x1e, 0x20, 0xad, 0xd8, 0xf2, 0x31, 0xdd, 0xc4,
- 0x8b, 0x0c, 0x77, 0xbe, 0xe1, 0x8b, 0x26, 0x00,
- 0x02, 0x58, 0xd6, 0x8d, 0xef, 0xad, 0x74, 0x67,
- 0xab, 0x3f, 0xef, 0xcb, 0x6f, 0xb0, 0xcc, 0x81,
- 0x44, 0x4c, 0xaf, 0xe9, 0x49, 0x4f, 0xdb, 0xa0,
- 0x25, 0xa4, 0xf0, 0x89, 0xf1, 0xbe, 0xd8, 0x10,
- 0xff, 0xb1, 0x3b, 0x4b, 0xfa, 0x98, 0xf5, 0x79,
- 0x6d, 0x1e, 0x69, 0x4d, 0x57, 0xb1, 0xc8, 0x19,
- 0x1b, 0xbd, 0x1e, 0x8c, 0x84, 0xb7, 0x7b, 0xe8,
- 0xd2, 0x2d, 0x09, 0x41, 0x41, 0x37, 0x3d, 0xb1,
- 0x6f, 0x26, 0x5d, 0x71, 0x16, 0x3d, 0xb7, 0x83,
- 0x27, 0x2c, 0xa7, 0xb6, 0x50, 0xbd, 0x91, 0x86,
- 0xab, 0x24, 0xa1, 0x38, 0xfd, 0xea, 0x71, 0x55,
- 0x7e, 0x9a, 0x07, 0x77, 0x4b, 0xfa, 0x61, 0x66,
- 0x20, 0x1e, 0x28, 0x95, 0x18, 0x1b, 0xa4, 0xa0,
- 0xfd, 0xc0, 0x89, 0x72, 0x43, 0xd9, 0x3b, 0x49,
- 0x5a, 0x3f, 0x9d, 0xbf, 0xdb, 0xb4, 0x46, 0xea,
- 0x42, 0x01, 0x77, 0x23, 0x68, 0x95, 0xb6, 0x24,
- 0xb3, 0xa8, 0x6c, 0x28, 0x3b, 0x11, 0x40, 0x7e,
- 0x18, 0x65, 0x6d, 0xd8, 0x24, 0x42, 0x7d, 0x88,
- 0xc0, 0x52, 0xd9, 0x05, 0xe4, 0x95, 0x90, 0x87,
- 0x8c, 0xf4, 0xd0, 0x6b, 0xb9, 0x83, 0x99, 0x34,
- 0x6d, 0xfe, 0x54, 0x40, 0x94, 0x52, 0x21, 0x4f,
- 0x14, 0x25, 0xc5, 0xd6, 0x5e, 0x95, 0xdc, 0x0a,
- 0x2b, 0x89, 0x20, 0x11, 0x84, 0x48, 0xd6, 0x3a,
- 0xcd, 0x5c, 0x24, 0xad, 0x62, 0xe3, 0xb1, 0x93,
- 0x25, 0x8d, 0xcd, 0x7e, 0xfc, 0x27, 0xa3, 0x37,
- 0xfd, 0x84, 0xfc, 0x1b, 0xb2, 0xf1, 0x27, 0x38,
- 0x5a, 0xb7, 0xfc, 0xf2, 0xfa, 0x95, 0x66, 0xd4,
- 0xfb, 0xba, 0xa7, 0xd7, 0xa3, 0x72, 0x69, 0x48,
- 0x48, 0x8c, 0xeb, 0x28, 0x89, 0xfe, 0x33, 0x65,
- 0x5a, 0x36, 0x01, 0x7e, 0x06, 0x79, 0x0a, 0x09,
- 0x3b, 0x74, 0x11, 0x9a, 0x6e, 0xbf, 0xd4, 0x9e,
- 0x58, 0x90, 0x49, 0x4f, 0x4d, 0x08, 0xd4, 0xe5,
- 0x4a, 0x09, 0x21, 0xef, 0x8b, 0xb8, 0x74, 0x3b,
- 0x91, 0xdd, 0x36, 0x85, 0x60, 0x2d, 0xfa, 0xd4,
- 0x45, 0x7b, 0x45, 0x53, 0xf5, 0x47, 0x87, 0x7e,
- 0xa6, 0x37, 0xc8, 0x78, 0x7a, 0x68, 0x9d, 0x8d,
- 0x65, 0x2c, 0x0e, 0x91, 0x5c, 0xa2, 0x60, 0xf0,
- 0x8e, 0x3f, 0xe9, 0x1a, 0xcd, 0xaa, 0xe7, 0xd5,
- 0x77, 0x18, 0xaf, 0xc9, 0xbc, 0x18, 0xea, 0x48,
- 0x1b, 0xfb, 0x22, 0x48, 0x70, 0x16, 0x29, 0x9e,
- 0x5b, 0xc1, 0x2c, 0x66, 0x23, 0xbc, 0xf0, 0x1f,
- 0xef, 0xaf, 0xe4, 0xd6, 0x04, 0x19, 0x82, 0x7a,
- 0x0b, 0xba, 0x4b, 0x46, 0xb1, 0x6a, 0x85, 0x5d,
- 0xb4, 0x73, 0xd6, 0x21, 0xa1, 0x71, 0x60, 0x14,
- 0xee, 0x0a, 0x77, 0xc4, 0x66, 0x2e, 0xf9, 0x69,
- 0x30, 0xaf, 0x41, 0x0b, 0xc8, 0x83, 0x3c, 0x53,
- 0x99, 0x19, 0x27, 0x46, 0xf7, 0x41, 0x6e, 0x56,
- 0xdc, 0x94, 0x28, 0x67, 0x4e, 0xb7, 0x25, 0x48,
- 0x8a, 0xc2, 0xe0, 0x60, 0x96, 0xcc, 0x18, 0xf4,
- 0x84, 0xdd, 0xa7, 0x5e, 0x3e, 0x05, 0x0b, 0x26,
- 0x26, 0xb2, 0x5c, 0x1f, 0x57, 0x1a, 0x04, 0x7e,
- 0x6a, 0xe3, 0x2f, 0xb4, 0x35, 0xb6, 0x38, 0x40,
- 0x40, 0xcd, 0x6f, 0x87, 0x2e, 0xef, 0xa3, 0xd7,
- 0xa9, 0xc2, 0xe8, 0x0d, 0x27, 0xdf, 0x44, 0x62,
- 0x99, 0xa0, 0xfc, 0xcf, 0x81, 0x78, 0xcb, 0xfe,
- 0xe5, 0xa0, 0x03, 0x4e, 0x6c, 0xd7, 0xf4, 0xaf,
- 0x7a, 0xbb, 0x61, 0x82, 0xfe, 0x71, 0x89, 0xb2,
- 0x22, 0x7c, 0x8e, 0x83, 0x04, 0xce, 0xf6, 0x5d,
- 0x84, 0x8f, 0x95, 0x6a, 0x7f, 0xad, 0xfd, 0x32,
- 0x9c, 0x5e, 0xe4, 0x9c, 0x89, 0x60, 0x54, 0xaa,
- 0x96, 0x72, 0xd2, 0xd7, 0x36, 0x85, 0xa9, 0x45,
- 0xd2, 0x2a, 0xa1, 0x81, 0x49, 0x6f, 0x7e, 0x04,
- 0xfa, 0xe2, 0xfe, 0x90, 0x26, 0x77, 0x5a, 0x33,
- 0xb8, 0x04, 0x9a, 0x7a, 0xe6, 0x4c, 0x4f, 0xad,
- 0x72, 0x96, 0x08, 0x28, 0x58, 0x13, 0xf8, 0xc4,
- 0x1c, 0xf0, 0xc3, 0x45, 0x95, 0x49, 0x20, 0x8c,
- 0x9f, 0x39, 0x70, 0xe1, 0x77, 0xfe, 0xd5, 0x4b,
- 0xaf, 0x86, 0xda, 0xef, 0x22, 0x06, 0x83, 0x36,
- 0x29, 0x12, 0x11, 0x40, 0xbc, 0x3b, 0x86, 0xaa,
- 0xaa, 0x65, 0x60, 0xc3, 0x80, 0xca, 0xed, 0xa9,
- 0xf3, 0xb0, 0x79, 0x96, 0xa2, 0x55, 0x27, 0x28,
- 0x55, 0x73, 0x26, 0xa5, 0x50, 0xea, 0x92, 0x4b,
- 0x3c, 0x5c, 0x82, 0x33, 0xf0, 0x01, 0x3f, 0x03,
- 0xc1, 0x08, 0x05, 0xbf, 0x98, 0xf4, 0x9b, 0x6d,
- 0xa5, 0xa8, 0xb4, 0x82, 0x0c, 0x06, 0xfa, 0xff,
- 0x2d, 0x08, 0xf3, 0x05, 0x4f, 0x57, 0x2a, 0x39,
- 0xd4, 0x83, 0x0d, 0x75, 0x51, 0xd8, 0x5b, 0x1b,
- 0xd3, 0x51, 0x5a, 0x32, 0x2a, 0x9b, 0x32, 0xb2,
- 0xf2, 0xa4, 0x96, 0x12, 0xf2, 0xae, 0x40, 0x34,
- 0x67, 0xa8, 0xf5, 0x44, 0xd5, 0x35, 0x53, 0xfe,
- 0xa3, 0x60, 0x96, 0x63, 0x0f, 0x1f, 0x6e, 0xb0,
- 0x5a, 0x42, 0xa6, 0xfc, 0x51, 0x0b, 0x60, 0x27,
- 0xbc, 0x06, 0x71, 0xed, 0x65, 0x5b, 0x23, 0x86,
- 0x4a, 0x07, 0x3b, 0x22, 0x07, 0x46, 0xe6, 0x90,
- 0x3e, 0xf3, 0x25, 0x50, 0x1b, 0x4c, 0x7f, 0x03,
- 0x08, 0xa8, 0x36, 0x6b, 0x87, 0xe5, 0xe3, 0xdb,
- 0x9a, 0x38, 0x83, 0xff, 0x9f, 0x1a, 0x9f, 0x57,
- 0xa4, 0x2a, 0xf6, 0x37, 0xbc, 0x1a, 0xff, 0xc9,
- 0x1e, 0x35, 0x0c, 0xc3, 0x7c, 0xa3, 0xb2, 0xe5,
- 0xd2, 0xc6, 0xb4, 0x57, 0x47, 0xe4, 0x32, 0x16,
- 0x6d, 0xa9, 0xae, 0x64, 0xe6, 0x2d, 0x8d, 0xc5,
- 0x8d, 0x50, 0x8e, 0xe8, 0x1a, 0x22, 0x34, 0x2a,
- 0xd9, 0xeb, 0x51, 0x90, 0x4a, 0xb1, 0x41, 0x7d,
- 0x64, 0xf9, 0xb9, 0x0d, 0xf6, 0x23, 0x33, 0xb0,
- 0x33, 0xf4, 0xf7, 0x3f, 0x27, 0x84, 0xc6, 0x0f,
- 0x54, 0xa5, 0xc0, 0x2e, 0xec, 0x0b, 0x3a, 0x48,
- 0x6e, 0x80, 0x35, 0x81, 0x43, 0x9b, 0x90, 0xb1,
- 0xd0, 0x2b, 0xea, 0x21, 0xdc, 0xda, 0x5b, 0x09,
- 0xf4, 0xcc, 0x10, 0xb4, 0xc7, 0xfe, 0x79, 0x51,
- 0xc3, 0xc5, 0xac, 0x88, 0x74, 0x84, 0x0b, 0x4b,
- 0xca, 0x79, 0x16, 0x29, 0xfb, 0x69, 0x54, 0xdf,
- 0x41, 0x7e, 0xe9, 0xc7, 0x8e, 0xea, 0xa5, 0xfe,
- 0xfc, 0x76, 0x0e, 0x90, 0xc4, 0x92, 0x38, 0xad,
- 0x7b, 0x48, 0xe6, 0x6e, 0xf7, 0x21, 0xfd, 0x4e,
- 0x93, 0x0a, 0x7b, 0x41, 0x83, 0x68, 0xfb, 0x57,
- 0x51, 0x76, 0x34, 0xa9, 0x6c, 0x00, 0xaa, 0x4f,
- 0x66, 0x65, 0x98, 0x4a, 0x4f, 0xa3, 0xa0, 0xef,
- 0x69, 0x3f, 0xe3, 0x1c, 0x92, 0x8c, 0xfd, 0xd8,
- 0xe8, 0xde, 0x7c, 0x7f, 0x3e, 0x84, 0x8e, 0x69,
- 0x3c, 0xf1, 0xf2, 0x05, 0x46, 0xdc, 0x2f, 0x9d,
- 0x5e, 0x6e, 0x4c, 0xfb, 0xb5, 0x99, 0x2a, 0x59,
- 0x63, 0xc1, 0x34, 0xbc, 0x57, 0xc0, 0x0d, 0xb9,
- 0x61, 0x25, 0xf3, 0x33, 0x23, 0x51, 0xb6, 0x0d,
- 0x07, 0xa6, 0xab, 0x94, 0x4a, 0xb7, 0x2a, 0xea,
- 0xee, 0xac, 0xa3, 0xc3, 0x04, 0x8b, 0x0e, 0x56,
- 0xfe, 0x44, 0xa7, 0x39, 0xe2, 0xed, 0xed, 0xb4,
- 0x22, 0x2b, 0xac, 0x12, 0x32, 0x28, 0x91, 0xd8,
- 0xa5, 0xab, 0xff, 0x5f, 0xe0, 0x4b, 0xda, 0x78,
- 0x17, 0xda, 0xf1, 0x01, 0x5b, 0xcd, 0xe2, 0x5f,
- 0x50, 0x45, 0x73, 0x2b, 0xe4, 0x76, 0x77, 0xf4,
- 0x64, 0x1d, 0x43, 0xfb, 0x84, 0x7a, 0xea, 0x91,
- 0xae, 0xf9, 0x9e, 0xb7, 0xb4, 0xb0, 0x91, 0x5f,
- 0x16, 0x35, 0x9a, 0x11, 0xb8, 0xc7, 0xc1, 0x8c,
- 0xc6, 0x10, 0x8d, 0x2f, 0x63, 0x4a, 0xa7, 0x57,
- 0x3a, 0x51, 0xd6, 0x32, 0x2d, 0x64, 0x72, 0xd4,
- 0x66, 0xdc, 0x10, 0xa6, 0x67, 0xd6, 0x04, 0x23,
- 0x9d, 0x0a, 0x11, 0x77, 0xdd, 0x37, 0x94, 0x17,
- 0x3c, 0xbf, 0x8b, 0x65, 0xb0, 0x2e, 0x5e, 0x66,
- 0x47, 0x64, 0xac, 0xdd, 0xf0, 0x84, 0xfd, 0x39,
- 0xfa, 0x15, 0x5d, 0xef, 0xae, 0xca, 0xc1, 0x36,
- 0xa7, 0x5c, 0xbf, 0xc7, 0x08, 0xc2, 0x66, 0x00,
- 0x74, 0x74, 0x4e, 0x27, 0x3f, 0x55, 0x8a, 0xb7,
- 0x38, 0x66, 0x83, 0x6d, 0xcf, 0x99, 0x9e, 0x60,
- 0x8f, 0xdd, 0x2e, 0x62, 0x22, 0x0e, 0xef, 0x0c,
- 0x98, 0xa7, 0x85, 0x74, 0x3b, 0x9d, 0xec, 0x9e,
- 0xa9, 0x19, 0x72, 0xa5, 0x7f, 0x2c, 0x39, 0xb7,
- 0x7d, 0xb7, 0xf1, 0x12, 0x65, 0x27, 0x4b, 0x5a,
- 0xde, 0x17, 0xfe, 0xad, 0x44, 0xf3, 0x20, 0x4d,
- 0xfd, 0xe4, 0x1f, 0xb5, 0x81, 0xb0, 0x36, 0x37,
- 0x08, 0x6f, 0xc3, 0x0c, 0xe9, 0x85, 0x98, 0x82,
- 0xa9, 0x62, 0x0c, 0xc4, 0x97, 0xc0, 0x50, 0xc8,
- 0xa7, 0x3c, 0x50, 0x9f, 0x43, 0xb9, 0xcd, 0x5e,
- 0x4d, 0xfa, 0x1c, 0x4b, 0x0b, 0xa9, 0x98, 0x85,
- 0x38, 0x92, 0xac, 0x8d, 0xe4, 0xad, 0x9b, 0x98,
- 0xab, 0xd9, 0x38, 0xac, 0x62, 0x52, 0xa3, 0x22,
- 0x63, 0x0f, 0xbf, 0x95, 0x48, 0xdf, 0x69, 0xe7,
- 0x8b, 0x33, 0xd5, 0xb2, 0xbd, 0x05, 0x49, 0x49,
- 0x9d, 0x57, 0x73, 0x19, 0x33, 0xae, 0xfa, 0x33,
- 0xf1, 0x19, 0xa8, 0x80, 0xce, 0x04, 0x9f, 0xbc,
- 0x1d, 0x65, 0x82, 0x1b, 0xe5, 0x3a, 0x51, 0xc8,
- 0x1c, 0x21, 0xe3, 0x5d, 0xf3, 0x7d, 0x9b, 0x2f,
- 0x2c, 0x1d, 0x4a, 0x7f, 0x9b, 0x68, 0x35, 0xa3,
- 0xb2, 0x50, 0xf7, 0x62, 0x79, 0xcd, 0xf4, 0x98,
- 0x4f, 0xe5, 0x63, 0x7c, 0x3e, 0x45, 0x31, 0x8c,
- 0x16, 0xa0, 0x12, 0xc8, 0x58, 0xce, 0x39, 0xa6,
- 0xbc, 0x54, 0xdb, 0xc5, 0xe0, 0xd5, 0xba, 0xbc,
- 0xb9, 0x04, 0xf4, 0x8d, 0xe8, 0x2f, 0x15, 0x9d,
-};
-
-/* 100 test cases */
-static struct crc_test {
- u32 crc; /* random starting crc */
- u32 start; /* random 6 bit offset in buf */
- u32 length; /* random 11 bit length of test */
- u32 crc_le; /* expected crc32_le result */
- u32 crc_be; /* expected crc32_be result */
- u32 crc32c_le; /* expected crc32c_le result */
-} const test[] __initconst =
-{
- {0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c},
- {0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca},
- {0x496da28e, 0x00000039, 0x000005af, 0xd933660f, 0x5d57e81f, 0x52e1ebb8},
- {0x09a9b90e, 0x00000027, 0x000001f8, 0xb45fe007, 0xf45fca9a, 0x0798af9a},
- {0xdc97e5a9, 0x00000025, 0x000003b6, 0xf81a3562, 0xe0126ba2, 0x18eb3152},
- {0x47c58900, 0x0000000a, 0x000000b9, 0x8e58eccf, 0xf3afc793, 0xd00d08c7},
- {0x292561e8, 0x0000000c, 0x00000403, 0xa2ba8aaf, 0x0b797aed, 0x8ba966bc},
- {0x415037f6, 0x00000003, 0x00000676, 0xa17d52e8, 0x7f0fdf35, 0x11d694a2},
- {0x3466e707, 0x00000026, 0x00000042, 0x258319be, 0x75c484a2, 0x6ab3208d},
- {0xafd1281b, 0x00000023, 0x000002ee, 0x4428eaf8, 0x06c7ad10, 0xba4603c5},
- {0xd3857b18, 0x00000028, 0x000004a2, 0x5c430821, 0xb062b7cb, 0xe6071c6f},
- {0x1d825a8f, 0x0000002b, 0x0000050b, 0xd2c45f0c, 0xd68634e0, 0x179ec30a},
- {0x5033e3bc, 0x0000000b, 0x00000078, 0xa3ea4113, 0xac6d31fb, 0x0903beb8},
- {0x94f1fb5e, 0x0000000f, 0x000003a2, 0xfbfc50b1, 0x3cfe50ed, 0x6a7cb4fa},
- {0xc9a0fe14, 0x00000009, 0x00000473, 0x5fb61894, 0x87070591, 0xdb535801},
- {0x88a034b1, 0x0000001c, 0x000005ad, 0xc1b16053, 0x46f95c67, 0x92bed597},
- {0xf0f72239, 0x00000020, 0x0000026d, 0xa6fa58f3, 0xf8c2c1dd, 0x192a3f1b},
- {0xcc20a5e3, 0x0000003b, 0x0000067a, 0x7740185a, 0x308b979a, 0xccbaec1a},
- {0xce589c95, 0x0000002b, 0x00000641, 0xd055e987, 0x40aae25b, 0x7eabae4d},
- {0x78edc885, 0x00000035, 0x000005be, 0xa39cb14b, 0x035b0d1f, 0x28c72982},
- {0x9d40a377, 0x0000003b, 0x00000038, 0x1f47ccd2, 0x197fbc9d, 0xc3cd4d18},
- {0x703d0e01, 0x0000003c, 0x000006f1, 0x88735e7c, 0xfed57c5a, 0xbca8f0e7},
- {0x776bf505, 0x0000000f, 0x000005b2, 0x5cc4fc01, 0xf32efb97, 0x713f60b3},
- {0x4a3e7854, 0x00000027, 0x000004b8, 0x8d923c82, 0x0cbfb4a2, 0xebd08fd5},
- {0x209172dd, 0x0000003b, 0x00000356, 0xb89e9c2b, 0xd7868138, 0x64406c59},
- {0x3ba4cc5b, 0x0000002f, 0x00000203, 0xe51601a9, 0x5b2a1032, 0x7421890e},
- {0xfc62f297, 0x00000000, 0x00000079, 0x71a8e1a2, 0x5d88685f, 0xe9347603},
- {0x64280b8b, 0x00000016, 0x000007ab, 0x0fa7a30c, 0xda3a455f, 0x1bef9060},
- {0x97dd724b, 0x00000033, 0x000007ad, 0x5788b2f4, 0xd7326d32, 0x34720072},
- {0x61394b52, 0x00000035, 0x00000571, 0xc66525f1, 0xcabe7fef, 0x48310f59},
- {0x29b4faff, 0x00000024, 0x0000006e, 0xca13751e, 0x993648e0, 0x783a4213},
- {0x29bfb1dc, 0x0000000b, 0x00000244, 0x436c43f7, 0x429f7a59, 0x9e8efd41},
- {0x86ae934b, 0x00000035, 0x00000104, 0x0760ec93, 0x9cf7d0f4, 0xfc3d34a5},
- {0xc4c1024e, 0x0000002e, 0x000006b1, 0x6516a3ec, 0x19321f9c, 0x17a52ae2},
- {0x3287a80a, 0x00000026, 0x00000496, 0x0b257eb1, 0x754ebd51, 0x886d935a},
- {0xa4db423e, 0x00000023, 0x0000045d, 0x9b3a66dc, 0x873e9f11, 0xeaaeaeb2},
- {0x7a1078df, 0x00000015, 0x0000014a, 0x8c2484c5, 0x6a628659, 0x8e900a4b},
- {0x6048bd5b, 0x00000006, 0x0000006a, 0x897e3559, 0xac9961af, 0xd74662b1},
- {0xd8f9ea20, 0x0000003d, 0x00000277, 0x60eb905b, 0xed2aaf99, 0xd26752ba},
- {0xea5ec3b4, 0x0000002a, 0x000004fe, 0x869965dc, 0x6c1f833b, 0x8b1fcd62},
- {0x2dfb005d, 0x00000016, 0x00000345, 0x6a3b117e, 0xf05e8521, 0xf54342fe},
- {0x5a214ade, 0x00000020, 0x000005b6, 0x467f70be, 0xcb22ccd3, 0x5b95b988},
- {0xf0ab9cca, 0x00000032, 0x00000515, 0xed223df3, 0x7f3ef01d, 0x2e1176be},
- {0x91b444f9, 0x0000002e, 0x000007f8, 0x84e9a983, 0x5676756f, 0x66120546},
- {0x1b5d2ddb, 0x0000002e, 0x0000012c, 0xba638c4c, 0x3f42047b, 0xf256a5cc},
- {0xd824d1bb, 0x0000003a, 0x000007b5, 0x6288653b, 0x3a3ebea0, 0x4af1dd69},
- {0x0470180c, 0x00000034, 0x000001f0, 0x9d5b80d6, 0x3de08195, 0x56f0a04a},
- {0xffaa3a3f, 0x00000036, 0x00000299, 0xf3a82ab8, 0x53e0c13d, 0x74f6b6b2},
- {0x6406cfeb, 0x00000023, 0x00000600, 0xa920b8e8, 0xe4e2acf4, 0x085951fd},
- {0xb24aaa38, 0x0000003e, 0x000004a1, 0x657cc328, 0x5077b2c3, 0xc65387eb},
- {0x58b2ab7c, 0x00000039, 0x000002b4, 0x3a17ee7e, 0x9dcb3643, 0x1ca9257b},
- {0x3db85970, 0x00000006, 0x000002b6, 0x95268b59, 0xb9812c10, 0xfd196d76},
- {0x857830c5, 0x00000003, 0x00000590, 0x4ef439d5, 0xf042161d, 0x5ef88339},
- {0xe1fcd978, 0x0000003e, 0x000007d8, 0xae8d8699, 0xce0a1ef5, 0x2c3714d9},
- {0xb982a768, 0x00000016, 0x000006e0, 0x62fad3df, 0x5f8a067b, 0x58576548},
- {0x1d581ce8, 0x0000001e, 0x0000058b, 0xf0f5da53, 0x26e39eee, 0xfd7c57de},
- {0x2456719b, 0x00000025, 0x00000503, 0x4296ac64, 0xd50e4c14, 0xd5fedd59},
- {0xfae6d8f2, 0x00000000, 0x0000055d, 0x057fdf2e, 0x2a31391a, 0x1cc3b17b},
- {0xcba828e3, 0x00000039, 0x000002ce, 0xe3f22351, 0x8f00877b, 0x270eed73},
- {0x13d25952, 0x0000000a, 0x0000072d, 0x76d4b4cc, 0x5eb67ec3, 0x91ecbb11},
- {0x0342be3f, 0x00000015, 0x00000599, 0xec75d9f1, 0x9d4d2826, 0x05ed8d0c},
- {0xeaa344e0, 0x00000014, 0x000004d8, 0x72a4c981, 0x2064ea06, 0x0b09ad5b},
- {0xbbb52021, 0x0000003b, 0x00000272, 0x04af99fc, 0xaf042d35, 0xf8d511fb},
- {0xb66384dc, 0x0000001d, 0x000007fc, 0xd7629116, 0x782bd801, 0x5ad832cc},
- {0x616c01b6, 0x00000022, 0x000002c8, 0x5b1dab30, 0x783ce7d2, 0x1214d196},
- {0xce2bdaad, 0x00000016, 0x0000062a, 0x932535c8, 0x3f02926d, 0x5747218a},
- {0x00fe84d7, 0x00000005, 0x00000205, 0x850e50aa, 0x753d649c, 0xde8f14de},
- {0xbebdcb4c, 0x00000006, 0x0000055d, 0xbeaa37a2, 0x2d8c9eba, 0x3563b7b9},
- {0xd8b1a02a, 0x00000010, 0x00000387, 0x5017d2fc, 0x503541a5, 0x071475d0},
- {0x3b96cad2, 0x00000036, 0x00000347, 0x1d2372ae, 0x926cd90b, 0x54c79d60},
- {0xc94c1ed7, 0x00000005, 0x0000038b, 0x9e9fdb22, 0x144a9178, 0x4c53eee6},
- {0x1aad454e, 0x00000025, 0x000002b2, 0xc3f6315c, 0x5c7a35b3, 0x10137a3c},
- {0xa4fec9a6, 0x00000000, 0x000006d6, 0x90be5080, 0xa4107605, 0xaa9d6c73},
- {0x1bbe71e2, 0x0000001f, 0x000002fd, 0x4e504c3b, 0x284ccaf1, 0xb63d23e7},
- {0x4201c7e4, 0x00000002, 0x000002b7, 0x7822e3f9, 0x0cc912a9, 0x7f53e9cf},
- {0x23fddc96, 0x00000003, 0x00000627, 0x8a385125, 0x07767e78, 0x13c1cd83},
- {0xd82ba25c, 0x00000016, 0x0000063e, 0x98e4148a, 0x283330c9, 0x49ff5867},
- {0x786f2032, 0x0000002d, 0x0000060f, 0xf201600a, 0xf561bfcd, 0x8467f211},
- {0xfebe4e1f, 0x0000002a, 0x000004f2, 0x95e51961, 0xfd80dcab, 0x3f9683b2},
- {0x1a6e0a39, 0x00000008, 0x00000672, 0x8af6c2a5, 0x78dd84cb, 0x76a3f874},
- {0x56000ab8, 0x0000000e, 0x000000e5, 0x36bacb8f, 0x22ee1f77, 0x863b702f},
- {0x4717fe0c, 0x00000000, 0x000006ec, 0x8439f342, 0x5c8e03da, 0xdc6c58ff},
- {0xd5d5d68e, 0x0000003c, 0x000003a3, 0x46fff083, 0x177d1b39, 0x0622cc95},
- {0xc25dd6c6, 0x00000024, 0x000006c0, 0x5ceb8eb4, 0x892b0d16, 0xe85605cd},
- {0xe9b11300, 0x00000023, 0x00000683, 0x07a5d59a, 0x6c6a3208, 0x31da5f06},
- {0x95cd285e, 0x00000001, 0x00000047, 0x7b3a4368, 0x0202c07e, 0xa1f2e784},
- {0xd9245a25, 0x0000001e, 0x000003a6, 0xd33c1841, 0x1936c0d5, 0xb07cc616},
- {0x103279db, 0x00000006, 0x0000039b, 0xca09b8a0, 0x77d62892, 0xbf943b6c},
- {0x1cba3172, 0x00000027, 0x000001c8, 0xcb377194, 0xebe682db, 0x2c01af1c},
- {0x8f613739, 0x0000000c, 0x000001df, 0xb4b0bc87, 0x7710bd43, 0x0fe5f56d},
- {0x1c6aa90d, 0x0000001b, 0x0000053c, 0x70559245, 0xda7894ac, 0xf8943b2d},
- {0xaabe5b93, 0x0000003d, 0x00000715, 0xcdbf42fa, 0x0c3b99e7, 0xe4d89272},
- {0xf15dd038, 0x00000006, 0x000006db, 0x6e104aea, 0x8d5967f2, 0x7c2f6bbb},
- {0x584dd49c, 0x00000020, 0x000007bc, 0x36b6cfd6, 0xad4e23b2, 0xabbf388b},
- {0x5d8c9506, 0x00000020, 0x00000470, 0x4c62378e, 0x31d92640, 0x1dca1f4e},
- {0xb80d17b0, 0x00000032, 0x00000346, 0x22a5bb88, 0x9a7ec89f, 0x5c170e23},
- {0xdaf0592e, 0x00000023, 0x000007b0, 0x3cab3f99, 0x9b1fdd99, 0xc0e9d672},
- {0x4793cc85, 0x0000000d, 0x00000706, 0xe82e04f6, 0xed3db6b7, 0xc18bdc86},
- {0x82ebf64e, 0x00000009, 0x000007c3, 0x69d590a9, 0x9efa8499, 0xa874fcdd},
- {0xb18a0319, 0x00000026, 0x000007db, 0x1cf98dcc, 0x8fa9ad6a, 0x9dc0bb48},
-};
-
-#include <linux/time.h>
-
-static int __init crc32c_test(void)
-{
- int i;
- int errors = 0;
- int bytes = 0;
- u64 nsec;
- unsigned long flags;
-
- /* keep static to prevent cache warming code from
- * getting eliminated by the compiler */
- static u32 crc;
-
- /* pre-warm the cache */
- for (i = 0; i < 100; i++) {
- bytes += test[i].length;
-
- crc ^= __crc32c_le(test[i].crc, test_buf +
- test[i].start, test[i].length);
- }
-
- /* reduce OS noise */
- local_irq_save(flags);
-
- nsec = ktime_get_ns();
- for (i = 0; i < 100; i++) {
- if (test[i].crc32c_le != __crc32c_le(test[i].crc, test_buf +
- test[i].start, test[i].length))
- errors++;
- }
- nsec = ktime_get_ns() - nsec;
-
- local_irq_restore(flags);
-
- pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS);
-
- if (errors)
- pr_warn("crc32c: %d self tests failed\n", errors);
- else {
- pr_info("crc32c: self tests passed, processed %d bytes in %lld nsec\n",
- bytes, nsec);
- }
-
- return 0;
-}
-
-static int __init crc32c_combine_test(void)
-{
- int i, j;
- int errors = 0, runs = 0;
-
- for (i = 0; i < 10; i++) {
- u32 crc_full;
-
- crc_full = __crc32c_le(test[i].crc, test_buf + test[i].start,
- test[i].length);
- for (j = 0; j <= test[i].length; ++j) {
- u32 crc1, crc2;
- u32 len1 = j, len2 = test[i].length - j;
-
- crc1 = __crc32c_le(test[i].crc, test_buf +
- test[i].start, len1);
- crc2 = __crc32c_le(0, test_buf + test[i].start +
- len1, len2);
-
- if (!(crc_full == __crc32c_le_combine(crc1, crc2, len2) &&
- crc_full == test[i].crc32c_le))
- errors++;
- runs++;
- cond_resched();
- }
- }
-
- if (errors)
- pr_warn("crc32c_combine: %d/%d self tests failed\n", errors, runs);
- else
- pr_info("crc32c_combine: %d self tests passed\n", runs);
-
- return 0;
-}
-
-static int __init crc32_test(void)
-{
- int i;
- int errors = 0;
- int bytes = 0;
- u64 nsec;
- unsigned long flags;
-
- /* keep static to prevent cache warming code from
- * getting eliminated by the compiler */
- static u32 crc;
-
- /* pre-warm the cache */
- for (i = 0; i < 100; i++) {
- bytes += 2*test[i].length;
-
- crc ^= crc32_le(test[i].crc, test_buf +
- test[i].start, test[i].length);
-
- crc ^= crc32_be(test[i].crc, test_buf +
- test[i].start, test[i].length);
- }
-
- /* reduce OS noise */
- local_irq_save(flags);
-
- nsec = ktime_get_ns();
- for (i = 0; i < 100; i++) {
- if (test[i].crc_le != crc32_le(test[i].crc, test_buf +
- test[i].start, test[i].length))
- errors++;
-
- if (test[i].crc_be != crc32_be(test[i].crc, test_buf +
- test[i].start, test[i].length))
- errors++;
- }
- nsec = ktime_get_ns() - nsec;
-
- local_irq_restore(flags);
-
- pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n",
- CRC_LE_BITS, CRC_BE_BITS);
-
- if (errors)
- pr_warn("crc32: %d self tests failed\n", errors);
- else {
- pr_info("crc32: self tests passed, processed %d bytes in %lld nsec\n",
- bytes, nsec);
- }
-
- return 0;
-}
-
-static int __init crc32_combine_test(void)
-{
- int i, j;
- int errors = 0, runs = 0;
-
- for (i = 0; i < 10; i++) {
- u32 crc_full;
-
- crc_full = crc32_le(test[i].crc, test_buf + test[i].start,
- test[i].length);
- for (j = 0; j <= test[i].length; ++j) {
- u32 crc1, crc2;
- u32 len1 = j, len2 = test[i].length - j;
-
- crc1 = crc32_le(test[i].crc, test_buf +
- test[i].start, len1);
- crc2 = crc32_le(0, test_buf + test[i].start +
- len1, len2);
-
- if (!(crc_full == crc32_le_combine(crc1, crc2, len2) &&
- crc_full == test[i].crc_le))
- errors++;
- runs++;
- cond_resched();
- }
- }
-
- if (errors)
- pr_warn("crc32_combine: %d/%d self tests failed\n", errors, runs);
- else
- pr_info("crc32_combine: %d self tests passed\n", runs);
-
- return 0;
-}
-
-static int __init crc32test_init(void)
-{
- crc32_test();
- crc32c_test();
-
- crc32_combine_test();
- crc32c_combine_test();
-
- return 0;
-}
-
-static void __exit crc32_exit(void)
-{
-}
-
-module_init(crc32test_init);
-module_exit(crc32_exit);
-
-MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
-MODULE_DESCRIPTION("CRC32 selftest");
-MODULE_LICENSE("GPL");
diff --git a/lib/crc_kunit.c b/lib/crc_kunit.c
new file mode 100644
index 000000000000..6a61d4b5fd45
--- /dev/null
+++ b/lib/crc_kunit.c
@@ -0,0 +1,435 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Unit tests and benchmarks for the CRC library functions
+ *
+ * Copyright 2024 Google LLC
+ *
+ * Author: Eric Biggers <ebiggers@google.com>
+ */
+#include <kunit/test.h>
+#include <linux/crc16.h>
+#include <linux/crc-t10dif.h>
+#include <linux/crc32.h>
+#include <linux/crc32c.h>
+#include <linux/crc64.h>
+#include <linux/prandom.h>
+#include <linux/vmalloc.h>
+
+#define CRC_KUNIT_SEED 42
+#define CRC_KUNIT_MAX_LEN 16384
+#define CRC_KUNIT_NUM_TEST_ITERS 1000
+
+static struct rnd_state rng;
+static u8 *test_buffer;
+static size_t test_buflen;
+
+/**
+ * struct crc_variant - describes a CRC variant
+ * @bits: Number of bits in the CRC, 1 <= @bits <= 64.
+ * @le: true if it's a "little endian" CRC (reversed mapping between bits and
+ * polynomial coefficients in each byte), false if it's a "big endian" CRC
+ * (natural mapping between bits and polynomial coefficients in each byte)
+ * @poly: The generator polynomial with the highest-order term omitted.
+ * Bit-reversed if @le is true.
+ * @func: The function to compute a CRC. The type signature uses u64 so that it
+ * can fit any CRC up to CRC-64.
+ * @combine_func: Optional function to combine two CRCs.
+ */
+struct crc_variant {
+ int bits;
+ bool le;
+ u64 poly;
+ u64 (*func)(u64 crc, const u8 *p, size_t len);
+ u64 (*combine_func)(u64 crc1, u64 crc2, size_t len2);
+};
+
+static u32 rand32(void)
+{
+ return prandom_u32_state(&rng);
+}
+
+static u64 rand64(void)
+{
+ u32 n = rand32();
+
+ return ((u64)n << 32) | rand32();
+}
+
+static u64 crc_mask(const struct crc_variant *v)
+{
+ return (u64)-1 >> (64 - v->bits);
+}
+
+/* Reference implementation of any CRC variant */
+static u64 crc_ref(const struct crc_variant *v,
+ u64 crc, const u8 *p, size_t len)
+{
+ size_t i, j;
+
+ for (i = 0; i < len; i++) {
+ for (j = 0; j < 8; j++) {
+ if (v->le) {
+ crc ^= (p[i] >> j) & 1;
+ crc = (crc >> 1) ^ ((crc & 1) ? v->poly : 0);
+ } else {
+ crc ^= (u64)((p[i] >> (7 - j)) & 1) <<
+ (v->bits - 1);
+ if (crc & (1ULL << (v->bits - 1)))
+ crc = ((crc << 1) ^ v->poly) &
+ crc_mask(v);
+ else
+ crc <<= 1;
+ }
+ }
+ }
+ return crc;
+}
+
+static int crc_suite_init(struct kunit_suite *suite)
+{
+ /*
+ * Allocate the test buffer using vmalloc() with a page-aligned length
+ * so that it is immediately followed by a guard page. This allows
+ * buffer overreads to be detected, even in assembly code.
+ */
+ test_buflen = round_up(CRC_KUNIT_MAX_LEN, PAGE_SIZE);
+ test_buffer = vmalloc(test_buflen);
+ if (!test_buffer)
+ return -ENOMEM;
+
+ prandom_seed_state(&rng, CRC_KUNIT_SEED);
+ prandom_bytes_state(&rng, test_buffer, test_buflen);
+ return 0;
+}
+
+static void crc_suite_exit(struct kunit_suite *suite)
+{
+ vfree(test_buffer);
+ test_buffer = NULL;
+}
+
+/* Generate a random initial CRC. */
+static u64 generate_random_initial_crc(const struct crc_variant *v)
+{
+ switch (rand32() % 4) {
+ case 0:
+ return 0;
+ case 1:
+ return crc_mask(v); /* All 1 bits */
+ default:
+ return rand64() & crc_mask(v);
+ }
+}
+
+/* Generate a random length, preferring small lengths. */
+static size_t generate_random_length(size_t max_length)
+{
+ size_t len;
+
+ switch (rand32() % 3) {
+ case 0:
+ len = rand32() % 128;
+ break;
+ case 1:
+ len = rand32() % 3072;
+ break;
+ default:
+ len = rand32();
+ break;
+ }
+ return len % (max_length + 1);
+}
+
+/* Test that v->func gives the same CRCs as a reference implementation. */
+static void crc_main_test(struct kunit *test, const struct crc_variant *v)
+{
+ size_t i;
+
+ for (i = 0; i < CRC_KUNIT_NUM_TEST_ITERS; i++) {
+ u64 init_crc, expected_crc, actual_crc;
+ size_t len, offset;
+ bool nosimd;
+
+ init_crc = generate_random_initial_crc(v);
+ len = generate_random_length(CRC_KUNIT_MAX_LEN);
+
+ /* Generate a random offset. */
+ if (rand32() % 2 == 0) {
+ /* Use a random alignment mod 64 */
+ offset = rand32() % 64;
+ offset = min(offset, CRC_KUNIT_MAX_LEN - len);
+ } else {
+ /* Go up to the guard page, to catch buffer overreads */
+ offset = test_buflen - len;
+ }
+
+ if (rand32() % 8 == 0)
+ /* Refresh the data occasionally. */
+ prandom_bytes_state(&rng, &test_buffer[offset], len);
+
+ nosimd = rand32() % 8 == 0;
+
+ /*
+ * Compute the CRC, and verify that it equals the CRC computed
+ * by a simple bit-at-a-time reference implementation.
+ */
+ expected_crc = crc_ref(v, init_crc, &test_buffer[offset], len);
+ if (nosimd)
+ local_irq_disable();
+ actual_crc = v->func(init_crc, &test_buffer[offset], len);
+ if (nosimd)
+ local_irq_enable();
+ KUNIT_EXPECT_EQ_MSG(test, expected_crc, actual_crc,
+ "Wrong result with len=%zu offset=%zu nosimd=%d",
+ len, offset, nosimd);
+ }
+}
+
+/* Test that CRC(concat(A, B)) == combine_CRCs(CRC(A), CRC(B), len(B)). */
+static void crc_combine_test(struct kunit *test, const struct crc_variant *v)
+{
+ int i;
+
+ for (i = 0; i < 100; i++) {
+ u64 init_crc = generate_random_initial_crc(v);
+ size_t len1 = generate_random_length(CRC_KUNIT_MAX_LEN);
+ size_t len2 = generate_random_length(CRC_KUNIT_MAX_LEN - len1);
+ u64 crc1, crc2, expected_crc, actual_crc;
+
+ prandom_bytes_state(&rng, test_buffer, len1 + len2);
+ crc1 = v->func(init_crc, test_buffer, len1);
+ crc2 = v->func(0, &test_buffer[len1], len2);
+ expected_crc = v->func(init_crc, test_buffer, len1 + len2);
+ actual_crc = v->combine_func(crc1, crc2, len2);
+ KUNIT_EXPECT_EQ_MSG(test, expected_crc, actual_crc,
+ "CRC combination gave wrong result with len1=%zu len2=%zu\n",
+ len1, len2);
+ }
+}
+
+static void crc_test(struct kunit *test, const struct crc_variant *v)
+{
+ crc_main_test(test, v);
+ if (v->combine_func)
+ crc_combine_test(test, v);
+}
+
+static __always_inline void
+crc_benchmark(struct kunit *test,
+ u64 (*crc_func)(u64 crc, const u8 *p, size_t len))
+{
+ static const size_t lens_to_test[] = {
+ 1, 16, 64, 127, 128, 200, 256, 511, 512, 1024, 3173, 4096, 16384,
+ };
+ size_t len, i, j, num_iters;
+ /*
+ * Some of the CRC library functions are marked as __pure, so use
+ * volatile to ensure that all calls are really made as intended.
+ */
+ volatile u64 crc = 0;
+ u64 t;
+
+ if (!IS_ENABLED(CONFIG_CRC_BENCHMARK))
+ kunit_skip(test, "not enabled");
+
+ /* warm-up */
+ for (i = 0; i < 10000000; i += CRC_KUNIT_MAX_LEN)
+ crc = crc_func(crc, test_buffer, CRC_KUNIT_MAX_LEN);
+
+ for (i = 0; i < ARRAY_SIZE(lens_to_test); i++) {
+ len = lens_to_test[i];
+ KUNIT_ASSERT_LE(test, len, CRC_KUNIT_MAX_LEN);
+ num_iters = 10000000 / (len + 128);
+ preempt_disable();
+ t = ktime_get_ns();
+ for (j = 0; j < num_iters; j++)
+ crc = crc_func(crc, test_buffer, len);
+ t = ktime_get_ns() - t;
+ preempt_enable();
+ kunit_info(test, "len=%zu: %llu MB/s\n",
+ len, div64_u64((u64)len * num_iters * 1000, t));
+ }
+}
+
+/* crc16 */
+
+static u64 crc16_wrapper(u64 crc, const u8 *p, size_t len)
+{
+ return crc16(crc, p, len);
+}
+
+static const struct crc_variant crc_variant_crc16 = {
+ .bits = 16,
+ .le = true,
+ .poly = 0xa001,
+ .func = crc16_wrapper,
+};
+
+static void crc16_test(struct kunit *test)
+{
+ crc_test(test, &crc_variant_crc16);
+}
+
+static void crc16_benchmark(struct kunit *test)
+{
+ crc_benchmark(test, crc16_wrapper);
+}
+
+/* crc_t10dif */
+
+static u64 crc_t10dif_wrapper(u64 crc, const u8 *p, size_t len)
+{
+ return crc_t10dif_update(crc, p, len);
+}
+
+static const struct crc_variant crc_variant_crc_t10dif = {
+ .bits = 16,
+ .le = false,
+ .poly = 0x8bb7,
+ .func = crc_t10dif_wrapper,
+};
+
+static void crc_t10dif_test(struct kunit *test)
+{
+ crc_test(test, &crc_variant_crc_t10dif);
+}
+
+static void crc_t10dif_benchmark(struct kunit *test)
+{
+ crc_benchmark(test, crc_t10dif_wrapper);
+}
+
+/* crc32_le */
+
+static u64 crc32_le_wrapper(u64 crc, const u8 *p, size_t len)
+{
+ return crc32_le(crc, p, len);
+}
+
+static u64 crc32_le_combine_wrapper(u64 crc1, u64 crc2, size_t len2)
+{
+ return crc32_le_combine(crc1, crc2, len2);
+}
+
+static const struct crc_variant crc_variant_crc32_le = {
+ .bits = 32,
+ .le = true,
+ .poly = 0xedb88320,
+ .func = crc32_le_wrapper,
+ .combine_func = crc32_le_combine_wrapper,
+};
+
+static void crc32_le_test(struct kunit *test)
+{
+ crc_test(test, &crc_variant_crc32_le);
+}
+
+static void crc32_le_benchmark(struct kunit *test)
+{
+ crc_benchmark(test, crc32_le_wrapper);
+}
+
+/* crc32_be */
+
+static u64 crc32_be_wrapper(u64 crc, const u8 *p, size_t len)
+{
+ return crc32_be(crc, p, len);
+}
+
+static const struct crc_variant crc_variant_crc32_be = {
+ .bits = 32,
+ .le = false,
+ .poly = 0x04c11db7,
+ .func = crc32_be_wrapper,
+};
+
+static void crc32_be_test(struct kunit *test)
+{
+ crc_test(test, &crc_variant_crc32_be);
+}
+
+static void crc32_be_benchmark(struct kunit *test)
+{
+ crc_benchmark(test, crc32_be_wrapper);
+}
+
+/* crc32c */
+
+static u64 crc32c_wrapper(u64 crc, const u8 *p, size_t len)
+{
+ return crc32c(crc, p, len);
+}
+
+static u64 crc32c_combine_wrapper(u64 crc1, u64 crc2, size_t len2)
+{
+ return __crc32c_le_combine(crc1, crc2, len2);
+}
+
+static const struct crc_variant crc_variant_crc32c = {
+ .bits = 32,
+ .le = true,
+ .poly = 0x82f63b78,
+ .func = crc32c_wrapper,
+ .combine_func = crc32c_combine_wrapper,
+};
+
+static void crc32c_test(struct kunit *test)
+{
+ crc_test(test, &crc_variant_crc32c);
+}
+
+static void crc32c_benchmark(struct kunit *test)
+{
+ crc_benchmark(test, crc32c_wrapper);
+}
+
+/* crc64_be */
+
+static u64 crc64_be_wrapper(u64 crc, const u8 *p, size_t len)
+{
+ return crc64_be(crc, p, len);
+}
+
+static const struct crc_variant crc_variant_crc64_be = {
+ .bits = 64,
+ .le = false,
+ .poly = 0x42f0e1eba9ea3693,
+ .func = crc64_be_wrapper,
+};
+
+static void crc64_be_test(struct kunit *test)
+{
+ crc_test(test, &crc_variant_crc64_be);
+}
+
+static void crc64_be_benchmark(struct kunit *test)
+{
+ crc_benchmark(test, crc64_be_wrapper);
+}
+
+static struct kunit_case crc_test_cases[] = {
+ KUNIT_CASE(crc16_test),
+ KUNIT_CASE(crc16_benchmark),
+ KUNIT_CASE(crc_t10dif_test),
+ KUNIT_CASE(crc_t10dif_benchmark),
+ KUNIT_CASE(crc32_le_test),
+ KUNIT_CASE(crc32_le_benchmark),
+ KUNIT_CASE(crc32_be_test),
+ KUNIT_CASE(crc32_be_benchmark),
+ KUNIT_CASE(crc32c_test),
+ KUNIT_CASE(crc32c_benchmark),
+ KUNIT_CASE(crc64_be_test),
+ KUNIT_CASE(crc64_be_benchmark),
+ {},
+};
+
+static struct kunit_suite crc_test_suite = {
+ .name = "crc",
+ .test_cases = crc_test_cases,
+ .suite_init = crc_suite_init,
+ .suite_exit = crc_suite_exit,
+};
+kunit_test_suite(crc_test_suite);
+
+MODULE_DESCRIPTION("Unit tests and benchmarks for the CRC library functions");
+MODULE_LICENSE("GPL");
diff --git a/lib/crypto/aesgcm.c b/lib/crypto/aesgcm.c
index 6bba6473fdf3..902e49410aaf 100644
--- a/lib/crypto/aesgcm.c
+++ b/lib/crypto/aesgcm.c
@@ -697,7 +697,7 @@ static int __init libaesgcm_init(void)
u8 tagbuf[AES_BLOCK_SIZE];
int plen = aesgcm_tv[i].plen;
struct aesgcm_ctx ctx;
- u8 buf[sizeof(ptext12)];
+ static u8 buf[sizeof(ptext12)];
if (aesgcm_expandkey(&ctx, aesgcm_tv[i].key, aesgcm_tv[i].klen,
aesgcm_tv[i].clen - plen)) {
diff --git a/lib/crypto/gf128mul.c b/lib/crypto/gf128mul.c
index 8f8c45e0cdcf..fbe72cb3453a 100644
--- a/lib/crypto/gf128mul.c
+++ b/lib/crypto/gf128mul.c
@@ -225,44 +225,6 @@ void gf128mul_lle(be128 *r, const be128 *b)
}
EXPORT_SYMBOL(gf128mul_lle);
-void gf128mul_bbe(be128 *r, const be128 *b)
-{
- be128 p[8];
- int i;
-
- p[0] = *r;
- for (i = 0; i < 7; ++i)
- gf128mul_x_bbe(&p[i + 1], &p[i]);
-
- memset(r, 0, sizeof(*r));
- for (i = 0;;) {
- u8 ch = ((u8 *)b)[i];
-
- if (ch & 0x80)
- be128_xor(r, r, &p[7]);
- if (ch & 0x40)
- be128_xor(r, r, &p[6]);
- if (ch & 0x20)
- be128_xor(r, r, &p[5]);
- if (ch & 0x10)
- be128_xor(r, r, &p[4]);
- if (ch & 0x08)
- be128_xor(r, r, &p[3]);
- if (ch & 0x04)
- be128_xor(r, r, &p[2]);
- if (ch & 0x02)
- be128_xor(r, r, &p[1]);
- if (ch & 0x01)
- be128_xor(r, r, &p[0]);
-
- if (++i >= 16)
- break;
-
- gf128mul_x8_bbe(r);
- }
-}
-EXPORT_SYMBOL(gf128mul_bbe);
-
/* This version uses 64k bytes of table space.
A 16 byte buffer has to be multiplied by a 16 byte key
value in GF(2^128). If we consider a GF(2^128) value in
@@ -380,28 +342,6 @@ out:
}
EXPORT_SYMBOL(gf128mul_init_4k_lle);
-struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g)
-{
- struct gf128mul_4k *t;
- int j, k;
-
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (!t)
- goto out;
-
- t->t[1] = *g;
- for (j = 1; j <= 64; j <<= 1)
- gf128mul_x_bbe(&t->t[j + j], &t->t[j]);
-
- for (j = 2; j < 256; j += j)
- for (k = 1; k < j; ++k)
- be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
-
-out:
- return t;
-}
-EXPORT_SYMBOL(gf128mul_init_4k_bbe);
-
void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t)
{
u8 *ap = (u8 *)a;
@@ -417,20 +357,5 @@ void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t)
}
EXPORT_SYMBOL(gf128mul_4k_lle);
-void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t)
-{
- u8 *ap = (u8 *)a;
- be128 r[1];
- int i = 0;
-
- *r = t->t[ap[0]];
- while (++i < 16) {
- gf128mul_x8_bbe(r);
- be128_xor(r, r, &t->t[ap[i]]);
- }
- *a = *r;
-}
-EXPORT_SYMBOL(gf128mul_4k_bbe);
-
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Functions for multiplying elements of GF(2^128)");
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 52eb6ba29698..999053fa133e 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include <linux/debugfs.h>
#include <linux/sched.h>
#include <linux/stat.h>
@@ -13,6 +13,24 @@
#include <linux/fault-inject.h>
/*
+ * The should_fail() functions use prandom instead of the normal Linux RNG
+ * since they don't need cryptographically secure random numbers.
+ */
+static DEFINE_PER_CPU(struct rnd_state, fault_rnd_state);
+
+static u32 fault_prandom_u32_below_100(void)
+{
+ struct rnd_state *state;
+ u32 res;
+
+ state = &get_cpu_var(fault_rnd_state);
+ res = prandom_u32_state(state);
+ put_cpu_var(fault_rnd_state);
+
+ return res % 100;
+}
+
+/*
* setup_fault_attr() is a helper function for various __setup handlers, so it
* returns 0 on error, because that is what __setup handlers do.
*/
@@ -31,6 +49,8 @@ int setup_fault_attr(struct fault_attr *attr, char *str)
return 0;
}
+ prandom_init_once(&fault_rnd_state);
+
attr->probability = probability;
attr->interval = interval;
atomic_set(&attr->times, times);
@@ -146,7 +166,7 @@ bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags)
return false;
}
- if (attr->probability <= get_random_u32_below(100))
+ if (attr->probability <= fault_prandom_u32_below_100())
return false;
fail:
@@ -219,6 +239,8 @@ struct dentry *fault_create_debugfs_attr(const char *name,
if (IS_ERR(dir))
return dir;
+ prandom_init_once(&fault_rnd_state);
+
debugfs_create_ul("probability", mode, dir, &attr->probability);
debugfs_create_ul("interval", mode, dir, &attr->interval);
debugfs_create_atomic_t("times", mode, dir, &attr->times);
@@ -431,6 +453,8 @@ static const struct config_item_type fault_config_type = {
void fault_config_init(struct fault_config *config, const char *name)
{
+ prandom_init_once(&fault_rnd_state);
+
config_group_init_type_name(&config->group, name, &fault_config_type);
}
EXPORT_SYMBOL_GPL(fault_config_init);
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
index f755b997b967..6d03425b849e 100644
--- a/lib/gen_crc32table.c
+++ b/lib/gen_crc32table.c
@@ -2,30 +2,11 @@
#include <stdio.h>
#include "../include/linux/crc32poly.h"
#include "../include/generated/autoconf.h"
-#include "crc32defs.h"
#include <inttypes.h>
-#define ENTRIES_PER_LINE 4
-
-#if CRC_LE_BITS > 8
-# define LE_TABLE_ROWS (CRC_LE_BITS/8)
-# define LE_TABLE_SIZE 256
-#else
-# define LE_TABLE_ROWS 1
-# define LE_TABLE_SIZE (1 << CRC_LE_BITS)
-#endif
-
-#if CRC_BE_BITS > 8
-# define BE_TABLE_ROWS (CRC_BE_BITS/8)
-# define BE_TABLE_SIZE 256
-#else
-# define BE_TABLE_ROWS 1
-# define BE_TABLE_SIZE (1 << CRC_BE_BITS)
-#endif
-
-static uint32_t crc32table_le[LE_TABLE_ROWS][256];
-static uint32_t crc32table_be[BE_TABLE_ROWS][256];
-static uint32_t crc32ctable_le[LE_TABLE_ROWS][256];
+static uint32_t crc32table_le[256];
+static uint32_t crc32table_be[256];
+static uint32_t crc32ctable_le[256];
/**
* crc32init_le() - allocate and initialize LE table data
@@ -34,25 +15,17 @@ static uint32_t crc32ctable_le[LE_TABLE_ROWS][256];
* fact that crctable[i^j] = crctable[i] ^ crctable[j].
*
*/
-static void crc32init_le_generic(const uint32_t polynomial,
- uint32_t (*tab)[256])
+static void crc32init_le_generic(const uint32_t polynomial, uint32_t tab[256])
{
unsigned i, j;
uint32_t crc = 1;
- tab[0][0] = 0;
+ tab[0] = 0;
- for (i = LE_TABLE_SIZE >> 1; i; i >>= 1) {
+ for (i = 128; i; i >>= 1) {
crc = (crc >> 1) ^ ((crc & 1) ? polynomial : 0);
- for (j = 0; j < LE_TABLE_SIZE; j += 2 * i)
- tab[0][i + j] = crc ^ tab[0][j];
- }
- for (i = 0; i < LE_TABLE_SIZE; i++) {
- crc = tab[0][i];
- for (j = 1; j < LE_TABLE_ROWS; j++) {
- crc = tab[0][crc & 0xff] ^ (crc >> 8);
- tab[j][i] = crc;
- }
+ for (j = 0; j < 256; j += 2 * i)
+ tab[i + j] = crc ^ tab[j];
}
}
@@ -74,34 +47,22 @@ static void crc32init_be(void)
unsigned i, j;
uint32_t crc = 0x80000000;
- crc32table_be[0][0] = 0;
+ crc32table_be[0] = 0;
- for (i = 1; i < BE_TABLE_SIZE; i <<= 1) {
+ for (i = 1; i < 256; i <<= 1) {
crc = (crc << 1) ^ ((crc & 0x80000000) ? CRC32_POLY_BE : 0);
for (j = 0; j < i; j++)
- crc32table_be[0][i + j] = crc ^ crc32table_be[0][j];
- }
- for (i = 0; i < BE_TABLE_SIZE; i++) {
- crc = crc32table_be[0][i];
- for (j = 1; j < BE_TABLE_ROWS; j++) {
- crc = crc32table_be[0][(crc >> 24) & 0xff] ^ (crc << 8);
- crc32table_be[j][i] = crc;
- }
+ crc32table_be[i + j] = crc ^ crc32table_be[j];
}
}
-static void output_table(uint32_t (*table)[256], int rows, int len, char *trans)
+static void output_table(const uint32_t table[256])
{
- int i, j;
-
- for (j = 0 ; j < rows; j++) {
- printf("{");
- for (i = 0; i < len - 1; i++) {
- if (i % ENTRIES_PER_LINE == 0)
- printf("\n");
- printf("%s(0x%8.8xL), ", trans, table[j][i]);
- }
- printf("%s(0x%8.8xL)},\n", trans, table[j][len - 1]);
+ int i;
+
+ for (i = 0; i < 256; i += 4) {
+ printf("\t0x%08x, 0x%08x, 0x%08x, 0x%08x,\n",
+ table[i], table[i + 1], table[i + 2], table[i + 3]);
}
}
@@ -109,34 +70,20 @@ int main(int argc, char** argv)
{
printf("/* this file is generated - do not edit */\n\n");
- if (CRC_LE_BITS > 1) {
- crc32init_le();
- printf("static const u32 ____cacheline_aligned "
- "crc32table_le[%d][%d] = {",
- LE_TABLE_ROWS, LE_TABLE_SIZE);
- output_table(crc32table_le, LE_TABLE_ROWS,
- LE_TABLE_SIZE, "tole");
- printf("};\n");
- }
+ crc32init_le();
+ printf("static const u32 ____cacheline_aligned crc32table_le[256] = {\n");
+ output_table(crc32table_le);
+ printf("};\n");
- if (CRC_BE_BITS > 1) {
- crc32init_be();
- printf("static const u32 ____cacheline_aligned "
- "crc32table_be[%d][%d] = {",
- BE_TABLE_ROWS, BE_TABLE_SIZE);
- output_table(crc32table_be, LE_TABLE_ROWS,
- BE_TABLE_SIZE, "tobe");
- printf("};\n");
- }
- if (CRC_LE_BITS > 1) {
- crc32cinit_le();
- printf("static const u32 ____cacheline_aligned "
- "crc32ctable_le[%d][%d] = {",
- LE_TABLE_ROWS, LE_TABLE_SIZE);
- output_table(crc32ctable_le, LE_TABLE_ROWS,
- LE_TABLE_SIZE, "tole");
- printf("};\n");
- }
+ crc32init_be();
+ printf("static const u32 ____cacheline_aligned crc32table_be[256] = {\n");
+ output_table(crc32table_be);
+ printf("};\n");
+
+ crc32cinit_le();
+ printf("static const u32 ____cacheline_aligned crc32ctable_le[256] = {\n");
+ output_table(crc32ctable_le);
+ printf("};\n");
return 0;
}
diff --git a/lib/inflate.c b/lib/inflate.c
index fbaf03c1748d..eab886baa1b4 100644
--- a/lib/inflate.c
+++ b/lib/inflate.c
@@ -1257,8 +1257,6 @@ static int INIT gunzip(void)
/* Decompress */
if ((res = inflate())) {
switch (res) {
- case 0:
- break;
case 1:
error("invalid compressed format (err=1)");
break;
diff --git a/lib/kobject.c b/lib/kobject.c
index 72fa20f405f1..abe5f5b856ce 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -1096,30 +1096,6 @@ void *kobj_ns_grab_current(enum kobj_ns_type type)
}
EXPORT_SYMBOL_GPL(kobj_ns_grab_current);
-const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk)
-{
- const void *ns = NULL;
-
- spin_lock(&kobj_ns_type_lock);
- if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type])
- ns = kobj_ns_ops_tbl[type]->netlink_ns(sk);
- spin_unlock(&kobj_ns_type_lock);
-
- return ns;
-}
-
-const void *kobj_ns_initial(enum kobj_ns_type type)
-{
- const void *ns = NULL;
-
- spin_lock(&kobj_ns_type_lock);
- if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type])
- ns = kobj_ns_ops_tbl[type]->initial_ns();
- spin_unlock(&kobj_ns_type_lock);
-
- return ns;
-}
-
void kobj_ns_drop(enum kobj_ns_type type, void *ns)
{
spin_lock(&kobj_ns_type_lock);
diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig
index 34d7242d526d..a97897edd964 100644
--- a/lib/kunit/Kconfig
+++ b/lib/kunit/Kconfig
@@ -81,4 +81,16 @@ config KUNIT_DEFAULT_ENABLED
In most cases this should be left as Y. Only if additional opt-in
behavior is needed should this be set to N.
+config KUNIT_AUTORUN_ENABLED
+ bool "Default value of kunit.autorun"
+ default y
+ help
+ Sets the default value of kunit.autorun. If set to N then KUnit
+ tests will not run after initialization unless kunit.autorun=1 is
+ passed to the kernel command line. The test can still be run manually
+ via debugfs interface.
+
+ In most cases this should be left as Y. Only if additional opt-in
+ behavior is needed should this be set to N.
+
endif # KUNIT
diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
index af71911f4a07..9c326f1837bd 100644
--- a/lib/kunit/debugfs.c
+++ b/lib/kunit/debugfs.c
@@ -145,7 +145,7 @@ static ssize_t debugfs_run(struct file *file,
struct inode *f_inode = file->f_inode;
struct kunit_suite *suite = (struct kunit_suite *) f_inode->i_private;
- __kunit_test_suites_init(&suite, 1);
+ __kunit_test_suites_init(&suite, 1, true);
return count;
}
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 34b7b6833df3..3f39955cb0f1 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -29,6 +29,22 @@ const char *kunit_action(void)
return action_param;
}
+/*
+ * Run KUnit tests after initialization
+ */
+#ifdef CONFIG_KUNIT_AUTORUN_ENABLED
+static bool autorun_param = true;
+#else
+static bool autorun_param;
+#endif
+module_param_named(autorun, autorun_param, bool, 0);
+MODULE_PARM_DESC(autorun, "Run KUnit tests after initialization");
+
+bool kunit_autorun(void)
+{
+ return autorun_param;
+}
+
static char *filter_glob_param;
static char *filter_param;
static char *filter_action_param;
@@ -260,13 +276,14 @@ free_copy:
void kunit_exec_run_tests(struct kunit_suite_set *suite_set, bool builtin)
{
size_t num_suites = suite_set->end - suite_set->start;
+ bool autorun = kunit_autorun();
- if (builtin || num_suites) {
+ if (autorun && (builtin || num_suites)) {
pr_info("KTAP version 1\n");
pr_info("1..%zu\n", num_suites);
}
- __kunit_test_suites_init(suite_set->start, num_suites);
+ __kunit_test_suites_init(suite_set->start, num_suites, autorun);
}
void kunit_exec_list_tests(struct kunit_suite_set *suite_set, bool include_attr)
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 089c832e3cdb..146d1b48a096 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -708,7 +708,8 @@ bool kunit_enabled(void)
return enable_param;
}
-int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_suites)
+int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_suites,
+ bool run_tests)
{
unsigned int i;
@@ -731,7 +732,8 @@ int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_
for (i = 0; i < num_suites; i++) {
kunit_init_suite(suites[i]);
- kunit_run_tests(suites[i]);
+ if (run_tests)
+ kunit_run_tests(suites[i]);
}
static_branch_dec(&kunit_running);
diff --git a/lib/kunit_iov_iter.c b/lib/kunit_iov_iter.c
index 10a560feb66e..48342736d016 100644
--- a/lib/kunit_iov_iter.c
+++ b/lib/kunit_iov_iter.c
@@ -57,15 +57,12 @@ static void *__init iov_kunit_create_buffer(struct kunit *test,
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pages);
*ppages = pages;
- got = alloc_pages_bulk_array(GFP_KERNEL, npages, pages);
+ got = alloc_pages_bulk(GFP_KERNEL, npages, pages);
if (got != npages) {
release_pages(pages, got);
KUNIT_ASSERT_EQ(test, got, npages);
}
- for (int i = 0; i < npages; i++)
- pages[i]->index = i;
-
buffer = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buffer);
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
deleted file mode 100644
index 649e687413a0..000000000000
--- a/lib/libcrc32c.c
+++ /dev/null
@@ -1,74 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CRC32C
- *@Article{castagnoli-crc,
- * author = { Guy Castagnoli and Stefan Braeuer and Martin Herrman},
- * title = {{Optimization of Cyclic Redundancy-Check Codes with 24
- * and 32 Parity Bits}},
- * journal = IEEE Transactions on Communication,
- * year = {1993},
- * volume = {41},
- * number = {6},
- * pages = {},
- * month = {June},
- *}
- * Used by the iSCSI driver, possibly others, and derived from
- * the iscsi-crc.c module of the linux-iscsi driver at
- * http://linux-iscsi.sourceforge.net.
- *
- * Following the example of lib/crc32, this function is intended to be
- * flexible and useful for all users. Modules that currently have their
- * own crc32c, but hopefully may be able to use this one are:
- * net/sctp (please add all your doco to here if you change to
- * use this one!)
- * <endoflist>
- *
- * Copyright (c) 2004 Cisco Systems, Inc.
- */
-
-#include <crypto/hash.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/crc32c.h>
-
-static struct crypto_shash *tfm;
-
-u32 crc32c(u32 crc, const void *address, unsigned int length)
-{
- SHASH_DESC_ON_STACK(shash, tfm);
- u32 ret, *ctx = (u32 *)shash_desc_ctx(shash);
- int err;
-
- shash->tfm = tfm;
- *ctx = crc;
-
- err = crypto_shash_update(shash, address, length);
- BUG_ON(err);
-
- ret = *ctx;
- barrier_data(ctx);
- return ret;
-}
-
-EXPORT_SYMBOL(crc32c);
-
-static int __init libcrc32c_mod_init(void)
-{
- tfm = crypto_alloc_shash("crc32c", 0, 0);
- return PTR_ERR_OR_ZERO(tfm);
-}
-
-static void __exit libcrc32c_mod_fini(void)
-{
- crypto_free_shash(tfm);
-}
-
-module_init(libcrc32c_mod_init);
-module_exit(libcrc32c_mod_fini);
-
-MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
-MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations");
-MODULE_LICENSE("GPL");
-MODULE_SOFTDEP("pre: crc32c");
diff --git a/lib/list_debug.c b/lib/list_debug.c
index db602417febf..ee7eeeb8f92c 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -22,17 +22,17 @@ __list_valid_slowpath
bool __list_add_valid_or_report(struct list_head *new, struct list_head *prev,
struct list_head *next)
{
- if (CHECK_DATA_CORRUPTION(prev == NULL,
+ if (CHECK_DATA_CORRUPTION(prev == NULL, NULL,
"list_add corruption. prev is NULL.\n") ||
- CHECK_DATA_CORRUPTION(next == NULL,
+ CHECK_DATA_CORRUPTION(next == NULL, NULL,
"list_add corruption. next is NULL.\n") ||
- CHECK_DATA_CORRUPTION(next->prev != prev,
+ CHECK_DATA_CORRUPTION(next->prev != prev, next,
"list_add corruption. next->prev should be prev (%px), but was %px. (next=%px).\n",
prev, next->prev, next) ||
- CHECK_DATA_CORRUPTION(prev->next != next,
+ CHECK_DATA_CORRUPTION(prev->next != next, prev,
"list_add corruption. prev->next should be next (%px), but was %px. (prev=%px).\n",
next, prev->next, prev) ||
- CHECK_DATA_CORRUPTION(new == prev || new == next,
+ CHECK_DATA_CORRUPTION(new == prev || new == next, NULL,
"list_add double add: new=%px, prev=%px, next=%px.\n",
new, prev, next))
return false;
@@ -49,20 +49,20 @@ bool __list_del_entry_valid_or_report(struct list_head *entry)
prev = entry->prev;
next = entry->next;
- if (CHECK_DATA_CORRUPTION(next == NULL,
+ if (CHECK_DATA_CORRUPTION(next == NULL, NULL,
"list_del corruption, %px->next is NULL\n", entry) ||
- CHECK_DATA_CORRUPTION(prev == NULL,
+ CHECK_DATA_CORRUPTION(prev == NULL, NULL,
"list_del corruption, %px->prev is NULL\n", entry) ||
- CHECK_DATA_CORRUPTION(next == LIST_POISON1,
+ CHECK_DATA_CORRUPTION(next == LIST_POISON1, next,
"list_del corruption, %px->next is LIST_POISON1 (%px)\n",
entry, LIST_POISON1) ||
- CHECK_DATA_CORRUPTION(prev == LIST_POISON2,
+ CHECK_DATA_CORRUPTION(prev == LIST_POISON2, prev,
"list_del corruption, %px->prev is LIST_POISON2 (%px)\n",
entry, LIST_POISON2) ||
- CHECK_DATA_CORRUPTION(prev->next != entry,
+ CHECK_DATA_CORRUPTION(prev->next != entry, prev,
"list_del corruption. prev->next should be %px, but was %px. (prev=%px)\n",
entry, prev->next, prev) ||
- CHECK_DATA_CORRUPTION(next->prev != entry,
+ CHECK_DATA_CORRUPTION(next->prev != entry, next,
"list_del corruption. next->prev should be %px, but was %px. (next=%px)\n",
entry, next->prev, next))
return false;
diff --git a/lib/list_sort.c b/lib/list_sort.c
index 8d3f623536fe..a310ecb7ccc0 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -108,6 +108,13 @@ static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head,
* and list_sort is a stable sort, so it is not necessary to distinguish
* the @a < @b and @a == @b cases.
*
+ * The comparison function must adhere to specific mathematical properties
+ * to ensure correct and stable sorting:
+ * - Antisymmetry: cmp(@a, @b) must return the opposite sign of
+ * cmp(@b, @a).
+ * - Transitivity: if cmp(@a, @b) <= 0 and cmp(@b, @c) <= 0, then
+ * cmp(@a, @c) <= 0.
+ *
* This is compatible with two styles of @cmp function:
* - The traditional style which returns <0 / =0 / >0, or
* - Returning a boolean 0/1.
diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c
index b0bbeeb74b9e..2a397bb2c661 100644
--- a/lib/lz4/lz4_compress.c
+++ b/lib/lz4/lz4_compress.c
@@ -33,7 +33,6 @@
/*-************************************
* Dependencies
**************************************/
-#include <linux/lz4.h>
#include "lz4defs.h"
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 0e31e6da5ce7..3a2cd9acada4 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -33,7 +33,6 @@
/*-************************************
* Dependencies
**************************************/
-#include <linux/lz4.h>
#include "lz4defs.h"
#include <linux/init.h>
#include <linux/module.h>
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
index cb358d6bde5a..17277ec16919 100644
--- a/lib/lz4/lz4defs.h
+++ b/lib/lz4/lz4defs.h
@@ -39,6 +39,7 @@
#include <linux/bitops.h>
#include <linux/string.h> /* memset, memcpy */
+#include <linux/lz4.h>
#define FORCE_INLINE __always_inline
@@ -92,8 +93,7 @@ typedef uintptr_t uptrval;
#define MB (1 << 20)
#define GB (1U << 30)
-#define MAXD_LOG 16
-#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
+#define MAX_DISTANCE LZ4_DISTANCE_MAX
#define STEPSIZE sizeof(size_t)
#define ML_BITS 4
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c
index bc45594ad2a8..91936dc3d14b 100644
--- a/lib/lz4/lz4hc_compress.c
+++ b/lib/lz4/lz4hc_compress.c
@@ -34,7 +34,6 @@
/*-************************************
* Dependencies
**************************************/
-#include <linux/lz4.h>
#include "lz4defs.h"
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 047397136f15..f7153ade1be5 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -1863,11 +1863,11 @@ static inline int mab_no_null_split(struct maple_big_node *b_node,
* Return: The first split location. The middle split is set in @mid_split.
*/
static inline int mab_calc_split(struct ma_state *mas,
- struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
+ struct maple_big_node *bn, unsigned char *mid_split)
{
unsigned char b_end = bn->b_end;
int split = b_end / 2; /* Assume equal split. */
- unsigned char slot_min, slot_count = mt_slots[bn->type];
+ unsigned char slot_count = mt_slots[bn->type];
/*
* To support gap tracking, all NULL entries are kept together and a node cannot
@@ -1900,18 +1900,7 @@ static inline int mab_calc_split(struct ma_state *mas,
split = b_end / 3;
*mid_split = split * 2;
} else {
- slot_min = mt_min_slots[bn->type];
-
*mid_split = 0;
- /*
- * Avoid having a range less than the slot count unless it
- * causes one node to be deficient.
- * NOTE: mt_min_slots is 1 based, b_end and split are zero.
- */
- while ((split < slot_count - 1) &&
- ((bn->pivot[split] - min) < slot_count - 1) &&
- (b_end - split > slot_min))
- split++;
}
/* Avoid ending a node on a NULL entry */
@@ -2377,7 +2366,7 @@ static inline struct maple_enode
static inline unsigned char mas_mab_to_node(struct ma_state *mas,
struct maple_big_node *b_node, struct maple_enode **left,
struct maple_enode **right, struct maple_enode **middle,
- unsigned char *mid_split, unsigned long min)
+ unsigned char *mid_split)
{
unsigned char split = 0;
unsigned char slot_count = mt_slots[b_node->type];
@@ -2390,7 +2379,7 @@ static inline unsigned char mas_mab_to_node(struct ma_state *mas,
if (b_node->b_end < slot_count) {
split = b_node->b_end;
} else {
- split = mab_calc_split(mas, b_node, mid_split, min);
+ split = mab_calc_split(mas, b_node, mid_split);
*right = mas_new_ma_node(mas, b_node);
}
@@ -2877,7 +2866,7 @@ static void mas_spanning_rebalance(struct ma_state *mas,
mast->bn->b_end--;
mast->bn->type = mte_node_type(mast->orig_l->node);
split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
- &mid_split, mast->orig_l->min);
+ &mid_split);
mast_set_split_parents(mast, left, middle, right, split,
mid_split);
mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
@@ -3365,7 +3354,7 @@ static void mas_split(struct ma_state *mas, struct maple_big_node *b_node)
if (mas_push_data(mas, height, &mast, false))
break;
- split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
+ split = mab_calc_split(mas, b_node, &mid_split);
mast_split_data(&mast, mas, split);
/*
* Usually correct, mab_mas_cp in the above call overwrites
@@ -4746,29 +4735,6 @@ again:
}
/*
- * mas_next_entry() - Internal function to get the next entry.
- * @mas: The maple state
- * @limit: The maximum range start.
- *
- * Set the @mas->node to the next entry and the range_start to
- * the beginning value for the entry. Does not check beyond @limit.
- * Sets @mas->index and @mas->last to the range, Does not update @mas->index and
- * @mas->last on overflow.
- * Restarts on dead nodes.
- *
- * Return: the next entry or %NULL.
- */
-static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
-{
- if (mas->last >= limit) {
- mas->status = ma_overflow;
- return NULL;
- }
-
- return mas_next_slot(mas, limit, false);
-}
-
-/*
* mas_rev_awalk() - Internal function. Reverse allocation walk. Find the
* highest gap address of a given size in a given node and descend.
* @mas: The maple state
@@ -4903,15 +4869,14 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
if (gap >= size) {
if (ma_is_leaf(type)) {
found = true;
- goto done;
- }
- if (mas->index <= pivot) {
- mas->node = mas_slot(mas, slots, offset);
- mas->min = min;
- mas->max = pivot;
- offset = 0;
break;
}
+
+ mas->node = mas_slot(mas, slots, offset);
+ mas->min = min;
+ mas->max = pivot;
+ offset = 0;
+ break;
}
next_slot:
min = pivot + 1;
@@ -4921,9 +4886,6 @@ next_slot:
}
}
- if (mte_is_root(mas->node))
- found = true;
-done:
mas->offset = offset;
return found;
}
@@ -5027,8 +4989,8 @@ static inline void mas_awalk(struct ma_state *mas, unsigned long size)
* There are 4 options:
* go to child (descend)
* go back to parent (ascend)
- * no gap found. (return, slot == MAPLE_NODE_SLOTS)
- * found the gap. (return, slot != MAPLE_NODE_SLOTS)
+ * no gap found. (return, error == -EBUSY)
+ * found the gap. (return)
*/
while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) {
if (last == mas->node)
@@ -5113,9 +5075,6 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
return xa_err(mas->node);
offset = mas->offset;
- if (unlikely(offset == MAPLE_NODE_SLOTS))
- return -EBUSY;
-
node = mas_mn(mas);
mt = mte_node_type(mas->node);
pivots = ma_pivots(node, mt);
@@ -6938,7 +6897,7 @@ retry:
goto unlock;
while (mas_is_active(&mas) && (mas.last < max)) {
- entry = mas_next_entry(&mas, max);
+ entry = mas_next_slot(&mas, max, false);
if (likely(entry && !xa_is_zero(entry)))
break;
}
@@ -7597,7 +7556,7 @@ void mt_validate(struct maple_tree *mt)
MAS_WARN_ON(&mas, mte_dead_node(mas.node));
end = mas_data_end(&mas);
if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) &&
- (mas.max != ULONG_MAX))) {
+ (!mte_is_root(mas.node)))) {
pr_err("Invalid size %u of " PTR_FMT "\n",
end, mas_mn(&mas));
}
diff --git a/lib/math/Makefile b/lib/math/Makefile
index 3ef11305f8d2..853f023ae537 100644
--- a/lib/math/Makefile
+++ b/lib/math/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_INT_POW_TEST) += tests/int_pow_kunit.o
obj-$(CONFIG_TEST_DIV64) += test_div64.o
obj-$(CONFIG_TEST_MULDIV64) += test_mul_u64_u64_div_u64.o
obj-$(CONFIG_RATIONAL_KUNIT_TEST) += rational-test.o
+obj-$(CONFIG_INT_SQRT_KUNIT_TEST) += tests/int_sqrt_kunit.o \ No newline at end of file
diff --git a/lib/math/tests/Makefile b/lib/math/tests/Makefile
index 6a169123320a..e1a79f093b2d 100644
--- a/lib/math/tests/Makefile
+++ b/lib/math/tests/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_INT_POW_TEST) += int_pow_kunit.o
+obj-$(CONFIG_INT_SQRT_KUNIT_TEST) += int_sqrt_kunit.o
diff --git a/lib/math/tests/int_sqrt_kunit.c b/lib/math/tests/int_sqrt_kunit.c
new file mode 100644
index 000000000000..1798e1312eb7
--- /dev/null
+++ b/lib/math/tests/int_sqrt_kunit.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <kunit/test.h>
+#include <linux/limits.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+struct test_case_params {
+ unsigned long x;
+ unsigned long expected_result;
+ const char *name;
+};
+
+static const struct test_case_params params[] = {
+ { 0, 0, "edge case: square root of 0" },
+ { 1, 1, "perfect square: square root of 1" },
+ { 2, 1, "non-perfect square: square root of 2" },
+ { 3, 1, "non-perfect square: square root of 3" },
+ { 4, 2, "perfect square: square root of 4" },
+ { 5, 2, "non-perfect square: square root of 5" },
+ { 6, 2, "non-perfect square: square root of 6" },
+ { 7, 2, "non-perfect square: square root of 7" },
+ { 8, 2, "non-perfect square: square root of 8" },
+ { 9, 3, "perfect square: square root of 9" },
+ { 15, 3, "non-perfect square: square root of 15 (N-1 from 16)" },
+ { 16, 4, "perfect square: square root of 16" },
+ { 17, 4, "non-perfect square: square root of 17 (N+1 from 16)" },
+ { 80, 8, "non-perfect square: square root of 80 (N-1 from 81)" },
+ { 81, 9, "perfect square: square root of 81" },
+ { 82, 9, "non-perfect square: square root of 82 (N+1 from 81)" },
+ { 255, 15, "non-perfect square: square root of 255 (N-1 from 256)" },
+ { 256, 16, "perfect square: square root of 256" },
+ { 257, 16, "non-perfect square: square root of 257 (N+1 from 256)" },
+ { 2147483648, 46340, "large input: square root of 2147483648" },
+ { 4294967295, 65535, "edge case: ULONG_MAX for 32-bit" },
+};
+
+static void get_desc(const struct test_case_params *tc, char *desc)
+{
+ strscpy(desc, tc->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(int_sqrt, params, get_desc);
+
+static void int_sqrt_test(struct kunit *test)
+{
+ const struct test_case_params *tc = (const struct test_case_params *)test->param_value;
+
+ KUNIT_EXPECT_EQ(test, tc->expected_result, int_sqrt(tc->x));
+}
+
+static struct kunit_case math_int_sqrt_test_cases[] = {
+ KUNIT_CASE_PARAM(int_sqrt_test, int_sqrt_gen_params),
+ {}
+};
+
+static struct kunit_suite int_sqrt_test_suite = {
+ .name = "math-int_sqrt",
+ .test_cases = math_int_sqrt_test_cases,
+};
+
+kunit_test_suites(&int_sqrt_test_suite);
+
+MODULE_DESCRIPTION("math.int_sqrt KUnit test suite");
+MODULE_LICENSE("GPL");
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 6c902639728b..3e555d012ed6 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -584,10 +584,6 @@ static struct bucket_table *rhashtable_insert_one(
*/
rht_assign_locked(bkt, obj);
- atomic_inc(&ht->nelems);
- if (rht_grow_above_75(ht, tbl))
- schedule_work(&ht->run_work);
-
return NULL;
}
@@ -615,15 +611,23 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
data = ERR_PTR(-EAGAIN);
} else {
+ bool inserted;
+
flags = rht_lock(tbl, bkt);
data = rhashtable_lookup_one(ht, bkt, tbl,
hash, key, obj);
new_tbl = rhashtable_insert_one(ht, bkt, tbl,
hash, obj, data);
+ inserted = data && !new_tbl;
+ if (inserted)
+ atomic_inc(&ht->nelems);
if (PTR_ERR(new_tbl) != -EEXIST)
data = ERR_CAST(new_tbl);
rht_unlock(tbl, bkt, flags);
+
+ if (inserted && rht_grow_above_75(ht, tbl))
+ schedule_work(&ht->run_work);
}
} while (!IS_ERR_OR_NULL(new_tbl));
@@ -665,7 +669,7 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
* structure outside the hash table.
*
* This function may be called from any process context, including
- * non-preemptable context, but cannot be called from softirq or
+ * non-preemptible context, but cannot be called from softirq or
* hardirq context.
*
* You must call rhashtable_walk_exit after this function returns.
diff --git a/lib/sort.c b/lib/sort.c
index 048b7a6ef967..8e73dc55476b 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -200,6 +200,13 @@ static size_t parent(size_t i, unsigned int lsbit, size_t size)
* copy (e.g. fix up pointers or auxiliary data), but the built-in swap
* avoids a slow retpoline and so is significantly faster.
*
+ * The comparison function must adhere to specific mathematical
+ * properties to ensure correct and stable sorting:
+ * - Antisymmetry: cmp_func(a, b) must return the opposite sign of
+ * cmp_func(b, a).
+ * - Transitivity: if cmp_func(a, b) <= 0 and cmp_func(b, c) <= 0, then
+ * cmp_func(a, c) <= 0.
+ *
* Sorting time is O(n log n) both on average and worst-case. While
* quicksort is slightly faster on average, it suffers from exploitable
* O(n*n) worst-case behavior and extra memory requirements that make
diff --git a/lib/stackinit_kunit.c b/lib/stackinit_kunit.c
index c40818ec9c18..135322592faf 100644
--- a/lib/stackinit_kunit.c
+++ b/lib/stackinit_kunit.c
@@ -47,10 +47,12 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
#define DO_NOTHING_TYPE_SCALAR(var_type) var_type
#define DO_NOTHING_TYPE_STRING(var_type) void
#define DO_NOTHING_TYPE_STRUCT(var_type) void
+#define DO_NOTHING_TYPE_UNION(var_type) void
#define DO_NOTHING_RETURN_SCALAR(ptr) *(ptr)
#define DO_NOTHING_RETURN_STRING(ptr) /**/
#define DO_NOTHING_RETURN_STRUCT(ptr) /**/
+#define DO_NOTHING_RETURN_UNION(ptr) /**/
#define DO_NOTHING_CALL_SCALAR(var, name) \
(var) = do_nothing_ ## name(&(var))
@@ -58,10 +60,13 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
do_nothing_ ## name(var)
#define DO_NOTHING_CALL_STRUCT(var, name) \
do_nothing_ ## name(&(var))
+#define DO_NOTHING_CALL_UNION(var, name) \
+ do_nothing_ ## name(&(var))
#define FETCH_ARG_SCALAR(var) &var
#define FETCH_ARG_STRING(var) var
#define FETCH_ARG_STRUCT(var) &var
+#define FETCH_ARG_UNION(var) &var
/*
* On m68k, if the leaf function test variable is longer than 8 bytes,
@@ -70,13 +75,16 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
*/
#ifdef CONFIG_M68K
#define FILL_SIZE_STRING 8
+#define FILL_SIZE_ARRAY 2
#else
#define FILL_SIZE_STRING 16
+#define FILL_SIZE_ARRAY 8
#endif
#define INIT_CLONE_SCALAR /**/
#define INIT_CLONE_STRING [FILL_SIZE_STRING]
#define INIT_CLONE_STRUCT /**/
+#define INIT_CLONE_UNION /**/
#define ZERO_CLONE_SCALAR(zero) memset(&(zero), 0x00, sizeof(zero))
#define ZERO_CLONE_STRING(zero) memset(&(zero), 0x00, sizeof(zero))
@@ -92,6 +100,7 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
zero.three = 0; \
zero.four = 0; \
} while (0)
+#define ZERO_CLONE_UNION(zero) ZERO_CLONE_STRUCT(zero)
#define INIT_SCALAR_none(var_type) /**/
#define INIT_SCALAR_zero(var_type) = 0
@@ -101,6 +110,7 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
#define INIT_STRUCT_none(var_type) /**/
#define INIT_STRUCT_zero(var_type) = { }
+#define INIT_STRUCT_old_zero(var_type) = { 0 }
#define __static_partial { .two = 0, }
@@ -146,6 +156,34 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
#define INIT_STRUCT_assigned_copy(var_type) \
; var = *(arg)
+/* Union initialization is the same as structs. */
+#define INIT_UNION_none(var_type) INIT_STRUCT_none(var_type)
+#define INIT_UNION_zero(var_type) INIT_STRUCT_zero(var_type)
+#define INIT_UNION_old_zero(var_type) INIT_STRUCT_old_zero(var_type)
+
+#define INIT_UNION_static_partial(var_type) \
+ INIT_STRUCT_static_partial(var_type)
+#define INIT_UNION_static_all(var_type) \
+ INIT_STRUCT_static_all(var_type)
+#define INIT_UNION_dynamic_partial(var_type) \
+ INIT_STRUCT_dynamic_partial(var_type)
+#define INIT_UNION_dynamic_all(var_type) \
+ INIT_STRUCT_dynamic_all(var_type)
+#define INIT_UNION_runtime_partial(var_type) \
+ INIT_STRUCT_runtime_partial(var_type)
+#define INIT_UNION_runtime_all(var_type) \
+ INIT_STRUCT_runtime_all(var_type)
+#define INIT_UNION_assigned_static_partial(var_type) \
+ INIT_STRUCT_assigned_static_partial(var_type)
+#define INIT_UNION_assigned_static_all(var_type) \
+ INIT_STRUCT_assigned_static_all(var_type)
+#define INIT_UNION_assigned_dynamic_partial(var_type) \
+ INIT_STRUCT_assigned_dynamic_partial(var_type)
+#define INIT_UNION_assigned_dynamic_all(var_type) \
+ INIT_STRUCT_assigned_dynamic_all(var_type)
+#define INIT_UNION_assigned_copy(var_type) \
+ INIT_STRUCT_assigned_copy(var_type)
+
/*
* @name: unique string name for the test
* @var_type: type to be tested for zeroing initialization
@@ -294,6 +332,33 @@ struct test_user {
unsigned long four;
};
+/* No padding: all members are the same size. */
+union test_same_sizes {
+ unsigned long one;
+ unsigned long two;
+ unsigned long three;
+ unsigned long four;
+};
+
+/* Mismatched sizes, with one and two being small */
+union test_small_start {
+ char one:1;
+ char two;
+ short three;
+ unsigned long four;
+ struct big_struct {
+ unsigned long array[FILL_SIZE_ARRAY];
+ } big;
+};
+
+/* Mismatched sizes, with three and four being small */
+union test_small_end {
+ short one;
+ unsigned long two;
+ char three:1;
+ char four;
+};
+
#define ALWAYS_PASS WANT_SUCCESS
#define ALWAYS_FAIL XFAIL
@@ -332,6 +397,11 @@ struct test_user {
struct test_ ## name, STRUCT, init, \
xfail)
+#define DEFINE_UNION_TEST(name, init, xfail) \
+ DEFINE_TEST(name ## _ ## init, \
+ union test_ ## name, STRUCT, init, \
+ xfail)
+
#define DEFINE_STRUCT_TESTS(init, xfail) \
DEFINE_STRUCT_TEST(small_hole, init, xfail); \
DEFINE_STRUCT_TEST(big_hole, init, xfail); \
@@ -343,9 +413,22 @@ struct test_user {
xfail); \
DEFINE_STRUCT_TESTS(base ## _ ## all, xfail)
+#define DEFINE_UNION_INITIALIZER_TESTS(base, xfail) \
+ DEFINE_UNION_TESTS(base ## _ ## partial, \
+ xfail); \
+ DEFINE_UNION_TESTS(base ## _ ## all, xfail)
+
+#define DEFINE_UNION_TESTS(init, xfail) \
+ DEFINE_UNION_TEST(same_sizes, init, xfail); \
+ DEFINE_UNION_TEST(small_start, init, xfail); \
+ DEFINE_UNION_TEST(small_end, init, xfail);
+
/* These should be fully initialized all the time! */
DEFINE_SCALAR_TESTS(zero, ALWAYS_PASS);
DEFINE_STRUCT_TESTS(zero, ALWAYS_PASS);
+DEFINE_STRUCT_TESTS(old_zero, ALWAYS_PASS);
+DEFINE_UNION_TESTS(zero, ALWAYS_PASS);
+DEFINE_UNION_TESTS(old_zero, ALWAYS_PASS);
/* Struct initializers: padding may be left uninitialized. */
DEFINE_STRUCT_INITIALIZER_TESTS(static, STRONG_PASS);
DEFINE_STRUCT_INITIALIZER_TESTS(dynamic, STRONG_PASS);
@@ -353,6 +436,12 @@ DEFINE_STRUCT_INITIALIZER_TESTS(runtime, STRONG_PASS);
DEFINE_STRUCT_INITIALIZER_TESTS(assigned_static, STRONG_PASS);
DEFINE_STRUCT_INITIALIZER_TESTS(assigned_dynamic, STRONG_PASS);
DEFINE_STRUCT_TESTS(assigned_copy, ALWAYS_FAIL);
+DEFINE_UNION_INITIALIZER_TESTS(static, STRONG_PASS);
+DEFINE_UNION_INITIALIZER_TESTS(dynamic, STRONG_PASS);
+DEFINE_UNION_INITIALIZER_TESTS(runtime, STRONG_PASS);
+DEFINE_UNION_INITIALIZER_TESTS(assigned_static, STRONG_PASS);
+DEFINE_UNION_INITIALIZER_TESTS(assigned_dynamic, STRONG_PASS);
+DEFINE_UNION_TESTS(assigned_copy, ALWAYS_FAIL);
/* No initialization without compiler instrumentation. */
DEFINE_SCALAR_TESTS(none, STRONG_PASS);
DEFINE_STRUCT_TESTS(none, BYREF_PASS);
@@ -436,13 +525,23 @@ DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, ALWAYS_FAIL);
KUNIT_CASE(test_trailing_hole_ ## init),\
KUNIT_CASE(test_packed_ ## init) \
+#define KUNIT_test_unions(init) \
+ KUNIT_CASE(test_same_sizes_ ## init), \
+ KUNIT_CASE(test_small_start_ ## init), \
+ KUNIT_CASE(test_small_end_ ## init) \
+
static struct kunit_case stackinit_test_cases[] = {
/* These are explicitly initialized and should always pass. */
KUNIT_test_scalars(zero),
KUNIT_test_structs(zero),
+ KUNIT_test_structs(old_zero),
+ KUNIT_test_unions(zero),
+ KUNIT_test_unions(old_zero),
/* Padding here appears to be accidentally always initialized? */
KUNIT_test_structs(dynamic_partial),
KUNIT_test_structs(assigned_dynamic_partial),
+ KUNIT_test_unions(dynamic_partial),
+ KUNIT_test_unions(assigned_dynamic_partial),
/* Padding initialization depends on compiler behaviors. */
KUNIT_test_structs(static_partial),
KUNIT_test_structs(static_all),
@@ -452,8 +551,17 @@ static struct kunit_case stackinit_test_cases[] = {
KUNIT_test_structs(assigned_static_partial),
KUNIT_test_structs(assigned_static_all),
KUNIT_test_structs(assigned_dynamic_all),
+ KUNIT_test_unions(static_partial),
+ KUNIT_test_unions(static_all),
+ KUNIT_test_unions(dynamic_all),
+ KUNIT_test_unions(runtime_partial),
+ KUNIT_test_unions(runtime_all),
+ KUNIT_test_unions(assigned_static_partial),
+ KUNIT_test_unions(assigned_static_all),
+ KUNIT_test_unions(assigned_dynamic_all),
/* Everything fails this since it effectively performs a memcpy(). */
KUNIT_test_structs(assigned_copy),
+ KUNIT_test_unions(assigned_copy),
/* STRUCTLEAK_BYREF_ALL should cover everything from here down. */
KUNIT_test_scalars(none),
KUNIT_CASE(test_switch_1_none),
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 2eed1ad958e9..af0041df2b72 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -478,7 +478,7 @@ static int __bpf_ld_imm64(struct bpf_insn insns[2], u8 reg, s64 imm64)
* to overflow the field size of the native instruction, triggering
* a branch conversion mechanism in some JITs.
*/
-static int __bpf_fill_max_jmp(struct bpf_test *self, int jmp, int imm)
+static int __bpf_fill_max_jmp(struct bpf_test *self, int jmp, int imm, bool alu32)
{
struct bpf_insn *insns;
int len = S16_MAX + 5;
@@ -501,7 +501,7 @@ static int __bpf_fill_max_jmp(struct bpf_test *self, int jmp, int imm)
};
int op = ops[(i >> 1) % ARRAY_SIZE(ops)];
- if (i & 1)
+ if ((i & 1) || alu32)
insns[i++] = BPF_ALU32_REG(op, R0, R1);
else
insns[i++] = BPF_ALU64_REG(op, R0, R1);
@@ -516,27 +516,47 @@ static int __bpf_fill_max_jmp(struct bpf_test *self, int jmp, int imm)
}
/* Branch taken by runtime decision */
+static int bpf_fill_max_jmp_taken_32(struct bpf_test *self)
+{
+ return __bpf_fill_max_jmp(self, BPF_JEQ, 1, true);
+}
+
static int bpf_fill_max_jmp_taken(struct bpf_test *self)
{
- return __bpf_fill_max_jmp(self, BPF_JEQ, 1);
+ return __bpf_fill_max_jmp(self, BPF_JEQ, 1, false);
}
/* Branch not taken by runtime decision */
+static int bpf_fill_max_jmp_not_taken_32(struct bpf_test *self)
+{
+ return __bpf_fill_max_jmp(self, BPF_JEQ, 0, true);
+}
+
static int bpf_fill_max_jmp_not_taken(struct bpf_test *self)
{
- return __bpf_fill_max_jmp(self, BPF_JEQ, 0);
+ return __bpf_fill_max_jmp(self, BPF_JEQ, 0, false);
}
/* Branch always taken, known at JIT time */
+static int bpf_fill_max_jmp_always_taken_32(struct bpf_test *self)
+{
+ return __bpf_fill_max_jmp(self, BPF_JGE, 0, true);
+}
+
static int bpf_fill_max_jmp_always_taken(struct bpf_test *self)
{
- return __bpf_fill_max_jmp(self, BPF_JGE, 0);
+ return __bpf_fill_max_jmp(self, BPF_JGE, 0, false);
}
/* Branch never taken, known at JIT time */
+static int bpf_fill_max_jmp_never_taken_32(struct bpf_test *self)
+{
+ return __bpf_fill_max_jmp(self, BPF_JLT, 0, true);
+}
+
static int bpf_fill_max_jmp_never_taken(struct bpf_test *self)
{
- return __bpf_fill_max_jmp(self, BPF_JLT, 0);
+ return __bpf_fill_max_jmp(self, BPF_JLT, 0, false);
}
/* ALU result computation used in tests */
@@ -14234,6 +14254,38 @@ static struct bpf_test tests[] = {
},
/* Conditional branch conversions */
{
+ "Long conditional jump: taken at runtime (32 bits)",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_max_jmp_taken_32,
+ },
+ {
+ "Long conditional jump: not taken at runtime (32 bits)",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 2 } },
+ .fill_helper = bpf_fill_max_jmp_not_taken_32,
+ },
+ {
+ "Long conditional jump: always taken, known at JIT time (32 bits)",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_max_jmp_always_taken_32,
+ },
+ {
+ "Long conditional jump: never taken, known at JIT time (32 bits)",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 2 } },
+ .fill_helper = bpf_fill_max_jmp_never_taken_32,
+ },
+ {
"Long conditional jump: taken at runtime",
{ },
INTERNAL | FLAG_NO_DATA,
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index 704cb1093ae8..13e2a10d7554 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -1563,6 +1563,30 @@ static noinline void __init check_root_expand(struct maple_tree *mt)
mas_unlock(&mas);
}
+static noinline void __init check_deficient_node(struct maple_tree *mt)
+{
+ MA_STATE(mas, mt, 0, 0);
+ int count;
+
+ mas_lock(&mas);
+ for (count = 0; count < 10; count++) {
+ mas_set(&mas, count);
+ mas_store_gfp(&mas, xa_mk_value(count), GFP_KERNEL);
+ }
+
+ for (count = 20; count < 39; count++) {
+ mas_set(&mas, count);
+ mas_store_gfp(&mas, xa_mk_value(count), GFP_KERNEL);
+ }
+
+ for (count = 10; count < 12; count++) {
+ mas_set(&mas, count);
+ mas_store_gfp(&mas, xa_mk_value(count), GFP_KERNEL);
+ }
+ mas_unlock(&mas);
+ mt_validate(mt);
+}
+
static noinline void __init check_gap_combining(struct maple_tree *mt)
{
struct maple_enode *mn1, *mn2;
@@ -3714,6 +3738,34 @@ static noinline void __init alloc_cyclic_testing(struct maple_tree *mt)
}
mtree_destroy(mt);
+
+ /*
+ * Issue with reverse search was discovered
+ * https://lore.kernel.org/all/20241216060600.287B4C4CED0@smtp.kernel.org/
+ * Exhausting the allocation area and forcing the search to wrap needs a
+ * mas_reset() in mas_alloc_cyclic().
+ */
+ next = 0;
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (int i = 0; i < 1023; i++) {
+ mtree_alloc_cyclic(mt, &location, mt, 2, 1024, &next, GFP_KERNEL);
+ MT_BUG_ON(mt, i != location - 2);
+ MT_BUG_ON(mt, i != next - 3);
+ MT_BUG_ON(mt, mtree_load(mt, location) != mt);
+ }
+ mtree_erase(mt, 123);
+ MT_BUG_ON(mt, mtree_load(mt, 123) != NULL);
+ mtree_alloc_cyclic(mt, &location, mt, 2, 1024, &next, GFP_KERNEL);
+ MT_BUG_ON(mt, 123 != location);
+ MT_BUG_ON(mt, 124 != next);
+ MT_BUG_ON(mt, mtree_load(mt, location) != mt);
+ mtree_erase(mt, 100);
+ mtree_alloc_cyclic(mt, &location, mt, 2, 1024, &next, GFP_KERNEL);
+ MT_BUG_ON(mt, 100 != location);
+ MT_BUG_ON(mt, 101 != next);
+ MT_BUG_ON(mt, mtree_load(mt, location) != mt);
+ mtree_destroy(mt);
+
/* Overflow test */
next = ULONG_MAX - 1;
ret = mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL);
@@ -3797,6 +3849,10 @@ static int __init maple_tree_seed(void)
#endif
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_deficient_node(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_store_null(&tree);
mtree_destroy(&tree);
diff --git a/lib/test_min_heap.c b/lib/test_min_heap.c
index e6fbb798558b..a9c4a74d3898 100644
--- a/lib/test_min_heap.c
+++ b/lib/test_min_heap.c
@@ -32,7 +32,7 @@ static __init int pop_verify_heap(bool min_heap,
int last;
last = values[0];
- min_heap_pop(heap, funcs, NULL);
+ min_heap_pop_inline(heap, funcs, NULL);
while (heap->nr > 0) {
if (min_heap) {
if (last > values[0]) {
@@ -48,7 +48,7 @@ static __init int pop_verify_heap(bool min_heap,
}
}
last = values[0];
- min_heap_pop(heap, funcs, NULL);
+ min_heap_pop_inline(heap, funcs, NULL);
}
return err;
}
@@ -69,7 +69,7 @@ static __init int test_heapify_all(bool min_heap)
int i, err;
/* Test with known set of values. */
- min_heapify_all(&heap, &funcs, NULL);
+ min_heapify_all_inline(&heap, &funcs, NULL);
err = pop_verify_heap(min_heap, &heap, &funcs);
@@ -78,7 +78,7 @@ static __init int test_heapify_all(bool min_heap)
for (i = 0; i < heap.nr; i++)
values[i] = get_random_u32();
- min_heapify_all(&heap, &funcs, NULL);
+ min_heapify_all_inline(&heap, &funcs, NULL);
err += pop_verify_heap(min_heap, &heap, &funcs);
return err;
@@ -102,14 +102,14 @@ static __init int test_heap_push(bool min_heap)
/* Test with known set of values copied from data. */
for (i = 0; i < ARRAY_SIZE(data); i++)
- min_heap_push(&heap, &data[i], &funcs, NULL);
+ min_heap_push_inline(&heap, &data[i], &funcs, NULL);
err = pop_verify_heap(min_heap, &heap, &funcs);
/* Test with randomly generated values. */
while (heap.nr < heap.size) {
temp = get_random_u32();
- min_heap_push(&heap, &temp, &funcs, NULL);
+ min_heap_push_inline(&heap, &temp, &funcs, NULL);
}
err += pop_verify_heap(min_heap, &heap, &funcs);
@@ -135,22 +135,22 @@ static __init int test_heap_pop_push(bool min_heap)
/* Fill values with data to pop and replace. */
temp = min_heap ? 0x80000000 : 0x7FFFFFFF;
for (i = 0; i < ARRAY_SIZE(data); i++)
- min_heap_push(&heap, &temp, &funcs, NULL);
+ min_heap_push_inline(&heap, &temp, &funcs, NULL);
/* Test with known set of values copied from data. */
for (i = 0; i < ARRAY_SIZE(data); i++)
- min_heap_pop_push(&heap, &data[i], &funcs, NULL);
+ min_heap_pop_push_inline(&heap, &data[i], &funcs, NULL);
err = pop_verify_heap(min_heap, &heap, &funcs);
heap.nr = 0;
for (i = 0; i < ARRAY_SIZE(data); i++)
- min_heap_push(&heap, &temp, &funcs, NULL);
+ min_heap_push_inline(&heap, &temp, &funcs, NULL);
/* Test with randomly generated values. */
for (i = 0; i < ARRAY_SIZE(data); i++) {
temp = get_random_u32();
- min_heap_pop_push(&heap, &temp, &funcs, NULL);
+ min_heap_pop_push_inline(&heap, &temp, &funcs, NULL);
}
err += pop_verify_heap(min_heap, &heap, &funcs);
@@ -163,7 +163,7 @@ static __init int test_heap_del(bool min_heap)
-3, -1, -2, -4, 0x8000000, 0x7FFFFFF };
struct min_heap_test heap;
- min_heap_init(&heap, values, ARRAY_SIZE(values));
+ min_heap_init_inline(&heap, values, ARRAY_SIZE(values));
heap.nr = ARRAY_SIZE(values);
struct min_heap_callbacks funcs = {
.less = min_heap ? less_than : greater_than,
@@ -172,9 +172,9 @@ static __init int test_heap_del(bool min_heap)
int i, err;
/* Test with known set of values. */
- min_heapify_all(&heap, &funcs, NULL);
+ min_heapify_all_inline(&heap, &funcs, NULL);
for (i = 0; i < ARRAY_SIZE(values) / 2; i++)
- min_heap_del(&heap, get_random_u32() % heap.nr, &funcs, NULL);
+ min_heap_del_inline(&heap, get_random_u32() % heap.nr, &funcs, NULL);
err = pop_verify_heap(min_heap, &heap, &funcs);
@@ -182,10 +182,10 @@ static __init int test_heap_del(bool min_heap)
heap.nr = ARRAY_SIZE(values);
for (i = 0; i < heap.nr; i++)
values[i] = get_random_u32();
- min_heapify_all(&heap, &funcs, NULL);
+ min_heapify_all_inline(&heap, &funcs, NULL);
for (i = 0; i < ARRAY_SIZE(values) / 2; i++)
- min_heap_del(&heap, get_random_u32() % heap.nr, &funcs, NULL);
+ min_heap_del_inline(&heap, get_random_u32() % heap.nr, &funcs, NULL);
err += pop_verify_heap(min_heap, &heap, &funcs);
return err;
diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c
index b6696fa1d426..4249e0cc8aaf 100644
--- a/lib/test_sysctl.c
+++ b/lib/test_sysctl.c
@@ -71,7 +71,7 @@ static struct test_sysctl_data test_data = {
};
/* These are all under /proc/sys/debug/test_sysctl/ */
-static struct ctl_table test_table[] = {
+static const struct ctl_table test_table[] = {
{
.procname = "int_0001",
.data = &test_data.int_0001,
@@ -177,7 +177,7 @@ static int test_sysctl_setup_node_tests(void)
}
/* Used to test that unregister actually removes the directory */
-static struct ctl_table test_table_unregister[] = {
+static const struct ctl_table test_table_unregister[] = {
{
.procname = "unregister_error",
.data = &test_data.int_0001,
@@ -220,7 +220,7 @@ static int test_sysctl_run_register_mount_point(void)
return 0;
}
-static struct ctl_table test_table_empty[] = { };
+static const struct ctl_table test_table_empty[] = { };
static int test_sysctl_run_register_empty(void)
{
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index 4ddf769861ff..f585949ff696 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -373,7 +373,7 @@ vm_map_ram_test(void)
if (!pages)
return -1;
- nr_allocated = alloc_pages_bulk_array(GFP_KERNEL, map_nr_pages, pages);
+ nr_allocated = alloc_pages_bulk(GFP_KERNEL, map_nr_pages, pages);
if (nr_allocated != map_nr_pages)
goto cleanup;
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index d5c5cbba33ed..6932a26f4927 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -1448,6 +1448,41 @@ static noinline void check_pause(struct xarray *xa)
XA_BUG_ON(xa, count != order_limit);
xa_destroy(xa);
+
+ index = 0;
+ for (order = XA_CHUNK_SHIFT; order > 0; order--) {
+ XA_BUG_ON(xa, xa_store_order(xa, index, order,
+ xa_mk_index(index), GFP_KERNEL));
+ index += 1UL << order;
+ }
+
+ index = 0;
+ count = 0;
+ xas_set(&xas, 0);
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_index(index));
+ index += 1UL << (XA_CHUNK_SHIFT - count);
+ count++;
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, count != XA_CHUNK_SHIFT);
+
+ index = 0;
+ count = 0;
+ xas_set(&xas, XA_CHUNK_SIZE / 2 + 1);
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_index(index));
+ index += 1UL << (XA_CHUNK_SHIFT - count);
+ count++;
+ xas_pause(&xas);
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, count != XA_CHUNK_SHIFT);
+
+ xa_destroy(xa);
+
}
static noinline void check_move_tiny(struct xarray *xa)
diff --git a/lib/xarray.c b/lib/xarray.c
index 32d4bac8c94c..116e9286c64e 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -125,19 +125,20 @@ static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
*/
static void xas_squash_marks(const struct xa_state *xas)
{
- unsigned int mark = 0;
+ xa_mark_t mark = 0;
unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
- if (!xas->xa_sibs)
- return;
+ for (;;) {
+ unsigned long *marks = node_marks(xas->xa_node, mark);
- do {
- unsigned long *marks = xas->xa_node->marks[mark];
- if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
- continue;
- __set_bit(xas->xa_offset, marks);
- bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
- } while (mark++ != (__force unsigned)XA_MARK_MAX);
+ if (find_next_bit(marks, limit, xas->xa_offset + 1) != limit) {
+ __set_bit(xas->xa_offset, marks);
+ bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
+ }
+ if (mark == XA_MARK_MAX)
+ break;
+ mark_inc(mark);
+ }
}
/* extracts the offset within this node from the index */
@@ -435,6 +436,11 @@ static unsigned long max_index(void *entry)
return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
}
+static inline void *xa_zero_to_null(void *entry)
+{
+ return xa_is_zero(entry) ? NULL : entry;
+}
+
static void xas_shrink(struct xa_state *xas)
{
struct xarray *xa = xas->xa;
@@ -451,8 +457,8 @@ static void xas_shrink(struct xa_state *xas)
break;
if (!xa_is_node(entry) && node->shift)
break;
- if (xa_is_zero(entry) && xa_zero_busy(xa))
- entry = NULL;
+ if (xa_zero_busy(xa))
+ entry = xa_zero_to_null(entry);
xas->xa_node = XAS_BOUNDS;
RCU_INIT_POINTER(xa->xa_head, entry);
@@ -1022,7 +1028,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
unsigned int mask = xas->xa_sibs;
/* XXX: no support for splitting really large entries yet */
- if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order))
+ if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT <= order))
goto nomem;
if (xas->xa_shift + XA_CHUNK_SHIFT > order)
return;
@@ -1147,6 +1153,7 @@ void xas_pause(struct xa_state *xas)
if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
break;
}
+ xas->xa_index &= ~0UL << node->shift;
xas->xa_index += (offset - xas->xa_offset) << node->shift;
if (xas->xa_index == 0)
xas->xa_node = XAS_BOUNDS;
@@ -1382,6 +1389,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
continue;
+ if (xa_is_sibling(entry))
+ continue;
if (!xa_is_node(entry))
return entry;
xas->xa_node = xa_to_node(entry);
@@ -1474,9 +1483,7 @@ void *xa_load(struct xarray *xa, unsigned long index)
rcu_read_lock();
do {
- entry = xas_load(&xas);
- if (xa_is_zero(entry))
- entry = NULL;
+ entry = xa_zero_to_null(xas_load(&xas));
} while (xas_retry(&xas, entry));
rcu_read_unlock();
@@ -1486,8 +1493,6 @@ EXPORT_SYMBOL(xa_load);
static void *xas_result(struct xa_state *xas, void *curr)
{
- if (xa_is_zero(curr))
- return NULL;
if (xas_error(xas))
curr = xas->xa_node;
return curr;
@@ -1508,7 +1513,7 @@ static void *xas_result(struct xa_state *xas, void *curr)
void *__xa_erase(struct xarray *xa, unsigned long index)
{
XA_STATE(xas, xa, index);
- return xas_result(&xas, xas_store(&xas, NULL));
+ return xas_result(&xas, xa_zero_to_null(xas_store(&xas, NULL)));
}
EXPORT_SYMBOL(__xa_erase);
@@ -1567,7 +1572,7 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
xas_clear_mark(&xas, XA_FREE_MARK);
} while (__xas_nomem(&xas, gfp));
- return xas_result(&xas, curr);
+ return xas_result(&xas, xa_zero_to_null(curr));
}
EXPORT_SYMBOL(__xa_store);
@@ -1600,6 +1605,9 @@ void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
}
EXPORT_SYMBOL(xa_store);
+static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
+ void *old, void *entry, gfp_t gfp);
+
/**
* __xa_cmpxchg() - Store this entry in the XArray.
* @xa: XArray.
@@ -1619,6 +1627,13 @@ EXPORT_SYMBOL(xa_store);
void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
void *old, void *entry, gfp_t gfp)
{
+ return xa_zero_to_null(__xa_cmpxchg_raw(xa, index, old, entry, gfp));
+}
+EXPORT_SYMBOL(__xa_cmpxchg);
+
+static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
+ void *old, void *entry, gfp_t gfp)
+{
XA_STATE(xas, xa, index);
void *curr;
@@ -1636,7 +1651,6 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
return xas_result(&xas, curr);
}
-EXPORT_SYMBOL(__xa_cmpxchg);
/**
* __xa_insert() - Store this entry in the XArray if no entry is present.
@@ -1656,26 +1670,16 @@ EXPORT_SYMBOL(__xa_cmpxchg);
*/
int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
{
- XA_STATE(xas, xa, index);
void *curr;
+ int errno;
- if (WARN_ON_ONCE(xa_is_advanced(entry)))
- return -EINVAL;
if (!entry)
entry = XA_ZERO_ENTRY;
-
- do {
- curr = xas_load(&xas);
- if (!curr) {
- xas_store(&xas, entry);
- if (xa_track_free(xa))
- xas_clear_mark(&xas, XA_FREE_MARK);
- } else {
- xas_set_err(&xas, -EBUSY);
- }
- } while (__xas_nomem(&xas, gfp));
-
- return xas_error(&xas);
+ curr = __xa_cmpxchg_raw(xa, index, NULL, entry, gfp);
+ errno = xa_err(curr);
+ if (errno)
+ return errno;
+ return (curr != NULL) ? -EBUSY : 0;
}
EXPORT_SYMBOL(__xa_insert);
diff --git a/mm/Kconfig b/mm/Kconfig
index 84000b016808..1b501db06417 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -550,20 +550,63 @@ menuconfig MEMORY_HOTPLUG
if MEMORY_HOTPLUG
-config MEMORY_HOTPLUG_DEFAULT_ONLINE
- bool "Online the newly added memory blocks by default"
- depends on MEMORY_HOTPLUG
+choice
+ prompt "Memory Hotplug Default Online Type"
+ default MHP_DEFAULT_ONLINE_TYPE_OFFLINE
help
+ Default memory type for hotplugged memory.
+
This option sets the default policy setting for memory hotplug
onlining policy (/sys/devices/system/memory/auto_online_blocks) which
determines what happens to newly added memory regions. Policy setting
can always be changed at runtime.
+
+ The default is 'offline'.
+
+ Select offline to defer onlining to drivers and user policy.
+ Select auto to let the kernel choose what zones to utilize.
+ Select online_kernel to generally allow kernel usage of this memory.
+ Select online_movable to generally disallow kernel usage of this memory.
+
+ Example kernel usage would be page structs and page tables.
+
See Documentation/admin-guide/mm/memory-hotplug.rst for more information.
- Say Y here if you want all hot-plugged memory blocks to appear in
- 'online' state by default.
- Say N here if you want the default policy to keep all hot-plugged
- memory blocks in 'offline' state.
+config MHP_DEFAULT_ONLINE_TYPE_OFFLINE
+ bool "offline"
+ help
+ Hotplugged memory will not be onlined by default.
+ Choose this for systems with drivers and user policy that
+ handle onlining of hotplug memory policy.
+
+config MHP_DEFAULT_ONLINE_TYPE_ONLINE_AUTO
+ bool "auto"
+ help
+ Select this if you want the kernel to automatically online
+ hotplugged memory into the zone it thinks is reasonable.
+ This memory may be utilized for kernel data.
+
+config MHP_DEFAULT_ONLINE_TYPE_ONLINE_KERNEL
+ bool "kernel"
+ help
+ Select this if you want the kernel to automatically online
+ hotplugged memory into a zone capable of being used for kernel
+ data. This typically means ZONE_NORMAL.
+
+config MHP_DEFAULT_ONLINE_TYPE_ONLINE_MOVABLE
+ bool "movable"
+ help
+ Select this if you want the kernel to automatically online
+ hotplug memory into ZONE_MOVABLE. This memory will generally
+ not be utilized for kernel data.
+
+ This should only be used when the admin knows sufficient
+ ZONE_NORMAL memory is available to describe hotplug memory,
+ otherwise hotplug memory may fail to online. For example,
+ sufficient kernel-capable memory (ZONE_NORMAL) must be
+ available to allocate page structs to describe ZONE_MOVABLE.
+
+endchoice
config MEMORY_HOTREMOVE
bool "Allow for memory hot remove"
@@ -1301,6 +1344,21 @@ config ARCH_HAS_USER_SHADOW_STACK
The architecture has hardware support for userspace shadow call
stacks (eg, x86 CET, arm64 GCS or RISC-V Zicfiss).
+config ARCH_SUPPORTS_PT_RECLAIM
+ def_bool n
+
+config PT_RECLAIM
+ bool "reclaim empty user page table pages"
+ default y
+ depends on ARCH_SUPPORTS_PT_RECLAIM && MMU && SMP
+ select MMU_GATHER_RCU_TABLE_FREE
+ help
+ Try to reclaim empty user page table pages in paths other than munmap
+ and exit_mmap path.
+
+ Note: now only empty user PTE page table pages will be reclaimed.
+
+
source "mm/damon/Kconfig"
endmenu
diff --git a/mm/Makefile b/mm/Makefile
index dba52bb0da8a..850386a67b3e 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -146,3 +146,4 @@ obj-$(CONFIG_GENERIC_IOREMAP) += ioremap.o
obj-$(CONFIG_SHRINKER_DEBUG) += shrinker_debug.o
obj-$(CONFIG_EXECMEM) += execmem.o
obj-$(CONFIG_TMPFS_QUOTA) += shmem_quota.o
+obj-$(CONFIG_PT_RECLAIM) += pt_reclaim.o
diff --git a/mm/cma.h b/mm/cma.h
index ad61cc6dd439..8485ef893e99 100644
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -36,7 +36,7 @@ struct cma {
};
extern struct cma cma_areas[MAX_CMA_AREAS];
-extern unsigned cma_area_count;
+extern unsigned int cma_area_count;
static inline unsigned long cma_bitmap_maxno(struct cma *cma)
{
diff --git a/mm/compaction.c b/mm/compaction.c
index a31c0f5758cf..12ed8425fa17 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -83,6 +83,7 @@ static inline bool is_via_compact_memory(int order) { return false; }
static struct page *mark_allocated_noprof(struct page *page, unsigned int order, gfp_t gfp_flags)
{
post_alloc_hook(page, order, __GFP_MOVABLE);
+ set_page_refcounted(page);
return page;
}
#define mark_allocated(...) alloc_hooks(mark_allocated_noprof(__VA_ARGS__))
@@ -630,7 +631,8 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
if (PageCompound(page)) {
const unsigned int order = compound_order(page);
- if (blockpfn + (1UL << order) <= end_pfn) {
+ if ((order <= MAX_PAGE_ORDER) &&
+ (blockpfn + (1UL << order) <= end_pfn)) {
blockpfn += (1UL << order) - 1;
page += (1UL << order) - 1;
nr_scanned += (1UL << order) - 1;
@@ -1868,6 +1870,7 @@ again:
dst = (struct folio *)freepage;
post_alloc_hook(&dst->page, order, __GFP_MOVABLE);
+ set_page_refcounted(&dst->page);
if (order)
prep_compound_page(&dst->page, order);
cc->nr_freepages -= 1 << order;
@@ -2488,7 +2491,8 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
*/
static enum compact_result
compaction_suit_allocation_order(struct zone *zone, unsigned int order,
- int highest_zoneidx, unsigned int alloc_flags)
+ int highest_zoneidx, unsigned int alloc_flags,
+ bool async)
{
unsigned long watermark;
@@ -2497,6 +2501,23 @@ compaction_suit_allocation_order(struct zone *zone, unsigned int order,
alloc_flags))
return COMPACT_SUCCESS;
+ /*
+ * For unmovable allocations (without ALLOC_CMA), check if there is enough
+ * free memory in the non-CMA pageblocks. Otherwise compaction could form
+ * the high-order page in CMA pageblocks, which would not help the
+ * allocation to succeed. However, limit the check to costly order async
+ * compaction (such as opportunistic THP attempts) because there is the
+ * possibility that compaction would migrate pages from non-CMA to CMA
+ * pageblock.
+ */
+ if (order > PAGE_ALLOC_COSTLY_ORDER && async &&
+ !(alloc_flags & ALLOC_CMA)) {
+ watermark = low_wmark_pages(zone) + compact_gap(order);
+ if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx,
+ 0, zone_page_state(zone, NR_FREE_PAGES)))
+ return COMPACT_SKIPPED;
+ }
+
if (!compaction_suitable(zone, order, highest_zoneidx))
return COMPACT_SKIPPED;
@@ -2532,7 +2553,8 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
if (!is_via_compact_memory(cc->order)) {
ret = compaction_suit_allocation_order(cc->zone, cc->order,
cc->highest_zoneidx,
- cc->alloc_flags);
+ cc->alloc_flags,
+ cc->mode == MIGRATE_ASYNC);
if (ret != COMPACT_CONTINUE)
return ret;
}
@@ -3035,7 +3057,8 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat)
ret = compaction_suit_allocation_order(zone,
pgdat->kcompactd_max_order,
- highest_zoneidx, ALLOC_WMARK_MIN);
+ highest_zoneidx, ALLOC_WMARK_MIN,
+ false);
if (ret == COMPACT_CONTINUE)
return true;
}
@@ -3076,7 +3099,8 @@ static void kcompactd_do_work(pg_data_t *pgdat)
continue;
ret = compaction_suit_allocation_order(zone,
- cc.order, zoneid, ALLOC_WMARK_MIN);
+ cc.order, zoneid, ALLOC_WMARK_MIN,
+ false);
if (ret != COMPACT_CONTINUE)
continue;
@@ -3269,7 +3293,7 @@ static int proc_dointvec_minmax_warn_RT_change(const struct ctl_table *table,
return ret;
}
-static struct ctl_table vm_compaction[] = {
+static const struct ctl_table vm_compaction[] = {
{
.procname = "compact_memory",
.data = &sysctl_compact_memory,
diff --git a/mm/damon/Kconfig b/mm/damon/Kconfig
index d0357f3e9372..c213cf8b5638 100644
--- a/mm/damon/Kconfig
+++ b/mm/damon/Kconfig
@@ -71,36 +71,6 @@ config DAMON_SYSFS_KUNIT_TEST
If unsure, say N.
-config DAMON_DBGFS_DEPRECATED
- bool "DAMON debugfs interface (DEPRECATED!)"
- depends on DAMON_VADDR && DAMON_PADDR && DEBUG_FS
- help
- This builds the debugfs interface for DAMON. The user space admins
- can use the interface for arbitrary data access monitoring.
-
- If unsure, say N.
-
- This is deprecated, so users should move to the sysfs interface
- (DAMON_SYSFS). If you depend on this and cannot move, please report
- your usecase to damon@lists.linux.dev and linux-mm@kvack.org.
-
-config DAMON_DBGFS
- bool
- default y
- depends on DAMON_DBGFS_DEPRECATED
-
-config DAMON_DBGFS_KUNIT_TEST
- bool "Test for damon debugfs interface" if !KUNIT_ALL_TESTS
- depends on DAMON_DBGFS && KUNIT=y
- default KUNIT_ALL_TESTS
- help
- This builds the DAMON debugfs interface Kunit test suite.
-
- For more information on KUnit and unit tests in general, please refer
- to the KUnit documentation.
-
- If unsure, say N.
-
config DAMON_RECLAIM
bool "Build DAMON-based reclaim (DAMON_RECLAIM)"
depends on DAMON_PADDR
diff --git a/mm/damon/Makefile b/mm/damon/Makefile
index f7add3f4aa79..8b49012ba8c3 100644
--- a/mm/damon/Makefile
+++ b/mm/damon/Makefile
@@ -4,6 +4,5 @@ obj-y := core.o
obj-$(CONFIG_DAMON_VADDR) += ops-common.o vaddr.o
obj-$(CONFIG_DAMON_PADDR) += ops-common.o paddr.o
obj-$(CONFIG_DAMON_SYSFS) += sysfs-common.o sysfs-schemes.o sysfs.o
-obj-$(CONFIG_DAMON_DBGFS) += dbgfs.o
obj-$(CONFIG_DAMON_RECLAIM) += modules-common.o reclaim.o
obj-$(CONFIG_DAMON_LRU_SORT) += modules-common.o lru_sort.o
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 0776452a1abb..c7b981308862 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -14,6 +14,7 @@
#include <linux/psi.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#define CREATE_TRACE_POINTS
#include <trace/events/damon.h>
@@ -266,7 +267,7 @@ int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
}
struct damos_filter *damos_new_filter(enum damos_filter_type type,
- bool matching)
+ bool matching, bool allow)
{
struct damos_filter *filter;
@@ -275,6 +276,7 @@ struct damos_filter *damos_new_filter(enum damos_filter_type type,
return NULL;
filter->type = type;
filter->matching = matching;
+ filter->allow = allow;
INIT_LIST_HEAD(&filter->list);
return filter;
}
@@ -504,6 +506,8 @@ struct damon_ctx *damon_new_ctx(void)
ctx->next_ops_update_sis = 0;
mutex_init(&ctx->kdamond_lock);
+ mutex_init(&ctx->call_control_lock);
+ mutex_init(&ctx->walk_control_lock);
ctx->attrs.min_nr_regions = 10;
ctx->attrs.max_nr_regions = 1000;
@@ -803,7 +807,8 @@ static int damos_commit_filters(struct damos *dst, struct damos *src)
continue;
new_filter = damos_new_filter(
- src_filter->type, src_filter->matching);
+ src_filter->type, src_filter->matching,
+ src_filter->allow);
if (!new_filter)
return -ENOMEM;
damos_commit_filter_arg(new_filter, src_filter);
@@ -1162,6 +1167,94 @@ int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
return err;
}
+static bool damon_is_running(struct damon_ctx *ctx)
+{
+ bool running;
+
+ mutex_lock(&ctx->kdamond_lock);
+ running = ctx->kdamond != NULL;
+ mutex_unlock(&ctx->kdamond_lock);
+ return running;
+}
+
+/**
+ * damon_call() - Invoke a given function on DAMON worker thread (kdamond).
+ * @ctx: DAMON context to call the function for.
+ * @control: Control variable of the call request.
+ *
+ * Ask DAMON worker thread (kdamond) of @ctx to call a function with an
+ * argument data that respectively passed via &damon_call_control->fn and
+ * &damon_call_control->data of @control, and wait until the kdamond finishes
+ * handling of the request.
+ *
+ * The kdamond executes the function with the argument in the main loop, just
+ * after a sampling of the iteration is finished. The function can hence
+ * safely access the internal data of the &struct damon_ctx without additional
+ * synchronization. The return value of the function will be saved in
+ * &damon_call_control->return_code.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int damon_call(struct damon_ctx *ctx, struct damon_call_control *control)
+{
+ init_completion(&control->completion);
+ control->canceled = false;
+
+ mutex_lock(&ctx->call_control_lock);
+ if (ctx->call_control) {
+ mutex_unlock(&ctx->call_control_lock);
+ return -EBUSY;
+ }
+ ctx->call_control = control;
+ mutex_unlock(&ctx->call_control_lock);
+ if (!damon_is_running(ctx))
+ return -EINVAL;
+ wait_for_completion(&control->completion);
+ if (control->canceled)
+ return -ECANCELED;
+ return 0;
+}
+
+/**
+ * damos_walk() - Invoke a given functions while DAMOS walk regions.
+ * @ctx: DAMON context to call the functions for.
+ * @control: Control variable of the walk request.
+ *
+ * Ask DAMON worker thread (kdamond) of @ctx to call a function for each region
+ * that the kdamond will apply DAMOS action to, and wait until the kdamond
+ * finishes handling of the request.
+ *
+ * The kdamond executes the given function in the main loop, for each region
+ * just after it applied any DAMOS actions of @ctx to it. The invocation is
+ * made only within one &damos->apply_interval_us since damos_walk()
+ * invocation, for each scheme. The given callback function can hence safely
+ * access the internal data of &struct damon_ctx and &struct damon_region that
+ * each of the scheme will apply the action for next interval, without
+ * additional synchronizations against the kdamond. If every scheme of @ctx
+ * passed at least one &damos->apply_interval_us, kdamond marks the request as
+ * completed so that damos_walk() can wakeup and return.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control)
+{
+ init_completion(&control->completion);
+ control->canceled = false;
+ mutex_lock(&ctx->walk_control_lock);
+ if (ctx->walk_control) {
+ mutex_unlock(&ctx->walk_control_lock);
+ return -EBUSY;
+ }
+ ctx->walk_control = control;
+ mutex_unlock(&ctx->walk_control_lock);
+ if (!damon_is_running(ctx))
+ return -EINVAL;
+ wait_for_completion(&control->completion);
+ if (control->canceled)
+ return -ECANCELED;
+ return 0;
+}
+
/*
* Reset the aggregated monitoring results ('nr_accesses' of each region).
*/
@@ -1272,16 +1365,18 @@ static bool damos_skip_charged_region(struct damon_target *t,
}
static void damos_update_stat(struct damos *s,
- unsigned long sz_tried, unsigned long sz_applied)
+ unsigned long sz_tried, unsigned long sz_applied,
+ unsigned long sz_ops_filter_passed)
{
s->stat.nr_tried++;
s->stat.sz_tried += sz_tried;
if (sz_applied)
s->stat.nr_applied++;
s->stat.sz_applied += sz_applied;
+ s->stat.sz_ops_filter_passed += sz_ops_filter_passed;
}
-static bool __damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
+static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t,
struct damon_region *r, struct damos_filter *filter)
{
bool matched = false;
@@ -1335,12 +1430,98 @@ static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
struct damos_filter *filter;
damos_for_each_filter(filter, s) {
- if (__damos_filter_out(ctx, t, r, filter))
- return true;
+ if (damos_filter_match(ctx, t, r, filter))
+ return !filter->allow;
}
return false;
}
+/*
+ * damos_walk_call_walk() - Call &damos_walk_control->walk_fn.
+ * @ctx: The context of &damon_ctx->walk_control.
+ * @t: The monitoring target of @r that @s will be applied.
+ * @r: The region of @t that @s will be applied.
+ * @s: The scheme of @ctx that will be applied to @r.
+ *
+ * This function is called from kdamond whenever it asked the operation set to
+ * apply a DAMOS scheme action to a region. If a DAMOS walk request is
+ * installed by damos_walk() and not yet uninstalled, invoke it.
+ */
+static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t,
+ struct damon_region *r, struct damos *s,
+ unsigned long sz_filter_passed)
+{
+ struct damos_walk_control *control;
+
+ mutex_lock(&ctx->walk_control_lock);
+ control = ctx->walk_control;
+ mutex_unlock(&ctx->walk_control_lock);
+ if (!control)
+ return;
+ control->walk_fn(control->data, ctx, t, r, s, sz_filter_passed);
+}
+
+/*
+ * damos_walk_complete() - Complete DAMOS walk request if all walks are done.
+ * @ctx: The context of &damon_ctx->walk_control.
+ * @s: A scheme of @ctx that all walks are now done.
+ *
+ * This function is called when kdamond finished applying the action of a DAMOS
+ * scheme to all regions that eligible for the given &damos->apply_interval_us.
+ * If every scheme of @ctx including @s now finished walking for at least one
+ * &damos->apply_interval_us, this function makrs the handling of the given
+ * DAMOS walk request is done, so that damos_walk() can wake up and return.
+ */
+static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s)
+{
+ struct damos *siter;
+ struct damos_walk_control *control;
+
+ mutex_lock(&ctx->walk_control_lock);
+ control = ctx->walk_control;
+ mutex_unlock(&ctx->walk_control_lock);
+ if (!control)
+ return;
+
+ s->walk_completed = true;
+ /* if all schemes completed, signal completion to walker */
+ damon_for_each_scheme(siter, ctx) {
+ if (!siter->walk_completed)
+ return;
+ }
+ complete(&control->completion);
+ mutex_lock(&ctx->walk_control_lock);
+ ctx->walk_control = NULL;
+ mutex_unlock(&ctx->walk_control_lock);
+}
+
+/*
+ * damos_walk_cancel() - Cancel the current DAMOS walk request.
+ * @ctx: The context of &damon_ctx->walk_control.
+ *
+ * This function is called when @ctx is deactivated by DAMOS watermarks, DAMOS
+ * walk is requested but there is no DAMOS scheme to walk for, or the kdamond
+ * is already out of the main loop and therefore gonna be terminated, and hence
+ * cannot continue the walks. This function therefore marks the walk request
+ * as canceled, so that damos_walk() can wake up and return.
+ */
+static void damos_walk_cancel(struct damon_ctx *ctx)
+{
+ struct damos_walk_control *control;
+
+ mutex_lock(&ctx->walk_control_lock);
+ control = ctx->walk_control;
+ mutex_unlock(&ctx->walk_control_lock);
+
+ if (!control)
+ return;
+ control->canceled = true;
+ complete(&control->completion);
+ mutex_lock(&ctx->walk_control_lock);
+ ctx->walk_control = NULL;
+ mutex_unlock(&ctx->walk_control_lock);
+}
+
static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
struct damon_region *r, struct damos *s)
{
@@ -1348,6 +1529,7 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
unsigned long sz = damon_sz_region(r);
struct timespec64 begin, end;
unsigned long sz_applied = 0;
+ unsigned long sz_ops_filter_passed = 0;
int err = 0;
/*
* We plan to support multiple context per kdamond, as DAMON sysfs
@@ -1393,8 +1575,10 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
if (!err) {
trace_damos_before_apply(cidx, sidx, tidx, r,
damon_nr_regions(t), do_trace);
- sz_applied = c->ops.apply_scheme(c, t, r, s);
+ sz_applied = c->ops.apply_scheme(c, t, r, s,
+ &sz_ops_filter_passed);
}
+ damos_walk_call_walk(c, t, r, s, sz_ops_filter_passed);
ktime_get_coarse_ts64(&end);
quota->total_charged_ns += timespec64_to_ns(&end) -
timespec64_to_ns(&begin);
@@ -1408,7 +1592,7 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
r->age = 0;
update_stat:
- damos_update_stat(s, sz, sz_applied);
+ damos_update_stat(s, sz, sz_applied, sz_ops_filter_passed);
}
static void damon_do_apply_schemes(struct damon_ctx *c,
@@ -1550,7 +1734,7 @@ static unsigned long damos_quota_score(struct damos_quota *quota)
static void damos_set_effective_quota(struct damos_quota *quota)
{
unsigned long throughput;
- unsigned long esz;
+ unsigned long esz = ULONG_MAX;
if (!quota->ms && list_empty(&quota->goals)) {
quota->esz = quota->sz;
@@ -1572,10 +1756,7 @@ static void damos_set_effective_quota(struct damos_quota *quota)
quota->total_charged_ns;
else
throughput = PAGE_SIZE * 1024;
- if (!list_empty(&quota->goals))
- esz = min(throughput * quota->ms, esz);
- else
- esz = throughput * quota->ms;
+ esz = min(throughput * quota->ms, esz);
}
if (quota->sz && quota->sz < esz)
@@ -1666,6 +1847,7 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
damon_for_each_scheme(s, c) {
if (c->passed_sample_intervals < s->next_apply_sis)
continue;
+ damos_walk_complete(c, s);
s->next_apply_sis = c->passed_sample_intervals +
(s->apply_interval_us ? s->apply_interval_us :
c->attrs.aggr_interval) / sample_interval;
@@ -1894,9 +2076,8 @@ static unsigned long damos_wmark_wait_us(struct damos *scheme)
if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
if (scheme->wmarks.activated)
pr_debug("deactivate a scheme (%d) for %s wmark\n",
- scheme->action,
- metric > scheme->wmarks.high ?
- "high" : "low");
+ scheme->action,
+ str_high_low(metric > scheme->wmarks.high));
scheme->wmarks.activated = false;
return scheme->wmarks.interval;
}
@@ -1920,6 +2101,39 @@ static void kdamond_usleep(unsigned long usecs)
usleep_range_idle(usecs, usecs + 1);
}
+/*
+ * kdamond_call() - handle damon_call_control.
+ * @ctx: The &struct damon_ctx of the kdamond.
+ * @cancel: Whether to cancel the invocation of the function.
+ *
+ * If there is a &struct damon_call_control request that registered via
+ * &damon_call() on @ctx, do or cancel the invocation of the function depending
+ * on @cancel. @cancel is set when the kdamond is deactivated by DAMOS
+ * watermarks, or the kdamond is already out of the main loop and therefore
+ * will be terminated.
+ */
+static void kdamond_call(struct damon_ctx *ctx, bool cancel)
+{
+ struct damon_call_control *control;
+ int ret = 0;
+
+ mutex_lock(&ctx->call_control_lock);
+ control = ctx->call_control;
+ mutex_unlock(&ctx->call_control_lock);
+ if (!control)
+ return;
+ if (cancel) {
+ control->canceled = true;
+ } else {
+ ret = control->fn(control->data);
+ control->return_code = ret;
+ }
+ complete(&control->completion);
+ mutex_lock(&ctx->call_control_lock);
+ ctx->call_control = NULL;
+ mutex_unlock(&ctx->call_control_lock);
+}
+
/* Returns negative error code if it's not activated but should return */
static int kdamond_wait_activation(struct damon_ctx *ctx)
{
@@ -1944,6 +2158,8 @@ static int kdamond_wait_activation(struct damon_ctx *ctx)
if (ctx->callback.after_wmarks_check &&
ctx->callback.after_wmarks_check(ctx))
break;
+ kdamond_call(ctx, true);
+ damos_walk_cancel(ctx);
}
return -EBUSY;
}
@@ -2014,6 +2230,7 @@ static int kdamond_fn(void *data)
if (ctx->callback.after_sampling &&
ctx->callback.after_sampling(ctx))
break;
+ kdamond_call(ctx, false);
kdamond_usleep(sample_interval);
ctx->passed_sample_intervals++;
@@ -2036,6 +2253,8 @@ static int kdamond_fn(void *data)
*/
if (!list_empty(&ctx->schemes))
kdamond_apply_schemes(ctx);
+ else
+ damos_walk_cancel(ctx);
sample_interval = ctx->attrs.sample_interval ?
ctx->attrs.sample_interval : 1;
@@ -2075,6 +2294,9 @@ done:
ctx->kdamond = NULL;
mutex_unlock(&ctx->kdamond_lock);
+ kdamond_call(ctx, true);
+ damos_walk_cancel(ctx);
+
mutex_lock(&damon_lock);
nr_running_ctxs--;
if (!nr_running_ctxs && running_exclusive_ctxs)
diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c
deleted file mode 100644
index b4213bc47e44..000000000000
--- a/mm/damon/dbgfs.c
+++ /dev/null
@@ -1,1148 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DAMON Debugfs Interface
- *
- * Author: SeongJae Park <sj@kernel.org>
- */
-
-#define pr_fmt(fmt) "damon-dbgfs: " fmt
-
-#include <linux/damon.h>
-#include <linux/debugfs.h>
-#include <linux/file.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/page_idle.h>
-#include <linux/slab.h>
-
-#define DAMON_DBGFS_DEPRECATION_NOTICE \
- "DAMON debugfs interface is deprecated, so users should move " \
- "to DAMON_SYSFS. If you cannot, please report your usecase to " \
- "damon@lists.linux.dev and linux-mm@kvack.org.\n"
-
-static struct damon_ctx **dbgfs_ctxs;
-static int dbgfs_nr_ctxs;
-static struct dentry **dbgfs_dirs;
-static DEFINE_MUTEX(damon_dbgfs_lock);
-
-static void damon_dbgfs_warn_deprecation(void)
-{
- pr_warn_once(DAMON_DBGFS_DEPRECATION_NOTICE);
-}
-
-/*
- * Returns non-empty string on success, negative error code otherwise.
- */
-static char *user_input_str(const char __user *buf, size_t count, loff_t *ppos)
-{
- char *kbuf;
- ssize_t ret;
-
- /* We do not accept continuous write */
- if (*ppos)
- return ERR_PTR(-EINVAL);
-
- kbuf = kmalloc(count + 1, GFP_KERNEL | __GFP_NOWARN);
- if (!kbuf)
- return ERR_PTR(-ENOMEM);
-
- ret = simple_write_to_buffer(kbuf, count + 1, ppos, buf, count);
- if (ret != count) {
- kfree(kbuf);
- return ERR_PTR(-EIO);
- }
- kbuf[ret] = '\0';
-
- return kbuf;
-}
-
-static ssize_t dbgfs_attrs_read(struct file *file,
- char __user *buf, size_t count, loff_t *ppos)
-{
- struct damon_ctx *ctx = file->private_data;
- char kbuf[128];
- int ret;
-
- mutex_lock(&ctx->kdamond_lock);
- ret = scnprintf(kbuf, ARRAY_SIZE(kbuf), "%lu %lu %lu %lu %lu\n",
- ctx->attrs.sample_interval, ctx->attrs.aggr_interval,
- ctx->attrs.ops_update_interval,
- ctx->attrs.min_nr_regions, ctx->attrs.max_nr_regions);
- mutex_unlock(&ctx->kdamond_lock);
-
- return simple_read_from_buffer(buf, count, ppos, kbuf, ret);
-}
-
-static ssize_t dbgfs_attrs_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
-{
- struct damon_ctx *ctx = file->private_data;
- struct damon_attrs attrs;
- char *kbuf;
- ssize_t ret;
-
- kbuf = user_input_str(buf, count, ppos);
- if (IS_ERR(kbuf))
- return PTR_ERR(kbuf);
-
- if (sscanf(kbuf, "%lu %lu %lu %lu %lu",
- &attrs.sample_interval, &attrs.aggr_interval,
- &attrs.ops_update_interval,
- &attrs.min_nr_regions,
- &attrs.max_nr_regions) != 5) {
- ret = -EINVAL;
- goto out;
- }
-
- mutex_lock(&ctx->kdamond_lock);
- if (ctx->kdamond) {
- ret = -EBUSY;
- goto unlock_out;
- }
-
- ret = damon_set_attrs(ctx, &attrs);
- if (!ret)
- ret = count;
-unlock_out:
- mutex_unlock(&ctx->kdamond_lock);
-out:
- kfree(kbuf);
- return ret;
-}
-
-/*
- * Return corresponding dbgfs' scheme action value (int) for the given
- * damos_action if the given damos_action value is valid and supported by
- * dbgfs, negative error code otherwise.
- */
-static int damos_action_to_dbgfs_scheme_action(enum damos_action action)
-{
- switch (action) {
- case DAMOS_WILLNEED:
- return 0;
- case DAMOS_COLD:
- return 1;
- case DAMOS_PAGEOUT:
- return 2;
- case DAMOS_HUGEPAGE:
- return 3;
- case DAMOS_NOHUGEPAGE:
- return 4;
- case DAMOS_STAT:
- return 5;
- default:
- return -EINVAL;
- }
-}
-
-static ssize_t sprint_schemes(struct damon_ctx *c, char *buf, ssize_t len)
-{
- struct damos *s;
- int written = 0;
- int rc;
-
- damon_for_each_scheme(s, c) {
- rc = scnprintf(&buf[written], len - written,
- "%lu %lu %u %u %u %u %d %lu %lu %lu %u %u %u %d %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
- s->pattern.min_sz_region,
- s->pattern.max_sz_region,
- s->pattern.min_nr_accesses,
- s->pattern.max_nr_accesses,
- s->pattern.min_age_region,
- s->pattern.max_age_region,
- damos_action_to_dbgfs_scheme_action(s->action),
- s->quota.ms, s->quota.sz,
- s->quota.reset_interval,
- s->quota.weight_sz,
- s->quota.weight_nr_accesses,
- s->quota.weight_age,
- s->wmarks.metric, s->wmarks.interval,
- s->wmarks.high, s->wmarks.mid, s->wmarks.low,
- s->stat.nr_tried, s->stat.sz_tried,
- s->stat.nr_applied, s->stat.sz_applied,
- s->stat.qt_exceeds);
- if (!rc)
- return -ENOMEM;
-
- written += rc;
- }
- return written;
-}
-
-static ssize_t dbgfs_schemes_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct damon_ctx *ctx = file->private_data;
- char *kbuf;
- ssize_t len;
-
- kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
- if (!kbuf)
- return -ENOMEM;
-
- mutex_lock(&ctx->kdamond_lock);
- len = sprint_schemes(ctx, kbuf, count);
- mutex_unlock(&ctx->kdamond_lock);
- if (len < 0)
- goto out;
- len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
-
-out:
- kfree(kbuf);
- return len;
-}
-
-static void free_schemes_arr(struct damos **schemes, ssize_t nr_schemes)
-{
- ssize_t i;
-
- for (i = 0; i < nr_schemes; i++)
- kfree(schemes[i]);
- kfree(schemes);
-}
-
-/*
- * Return corresponding damos_action for the given dbgfs input for a scheme
- * action if the input is valid, negative error code otherwise.
- */
-static enum damos_action dbgfs_scheme_action_to_damos_action(int dbgfs_action)
-{
- switch (dbgfs_action) {
- case 0:
- return DAMOS_WILLNEED;
- case 1:
- return DAMOS_COLD;
- case 2:
- return DAMOS_PAGEOUT;
- case 3:
- return DAMOS_HUGEPAGE;
- case 4:
- return DAMOS_NOHUGEPAGE;
- case 5:
- return DAMOS_STAT;
- default:
- return -EINVAL;
- }
-}
-
-/*
- * Converts a string into an array of struct damos pointers
- *
- * Returns an array of struct damos pointers that converted if the conversion
- * success, or NULL otherwise.
- */
-static struct damos **str_to_schemes(const char *str, ssize_t len,
- ssize_t *nr_schemes)
-{
- struct damos *scheme, **schemes;
- const int max_nr_schemes = 256;
- int pos = 0, parsed, ret;
- unsigned int action_input;
- enum damos_action action;
-
- schemes = kmalloc_array(max_nr_schemes, sizeof(scheme),
- GFP_KERNEL);
- if (!schemes)
- return NULL;
-
- *nr_schemes = 0;
- while (pos < len && *nr_schemes < max_nr_schemes) {
- struct damos_access_pattern pattern = {};
- struct damos_quota quota = {};
- struct damos_watermarks wmarks;
-
- ret = sscanf(&str[pos],
- "%lu %lu %u %u %u %u %u %lu %lu %lu %u %u %u %u %lu %lu %lu %lu%n",
- &pattern.min_sz_region, &pattern.max_sz_region,
- &pattern.min_nr_accesses,
- &pattern.max_nr_accesses,
- &pattern.min_age_region,
- &pattern.max_age_region,
- &action_input, &quota.ms,
- &quota.sz, &quota.reset_interval,
- &quota.weight_sz, &quota.weight_nr_accesses,
- &quota.weight_age, &wmarks.metric,
- &wmarks.interval, &wmarks.high, &wmarks.mid,
- &wmarks.low, &parsed);
- if (ret != 18)
- break;
- action = dbgfs_scheme_action_to_damos_action(action_input);
- if ((int)action < 0)
- goto fail;
-
- if (pattern.min_sz_region > pattern.max_sz_region ||
- pattern.min_nr_accesses > pattern.max_nr_accesses ||
- pattern.min_age_region > pattern.max_age_region)
- goto fail;
-
- if (wmarks.high < wmarks.mid || wmarks.high < wmarks.low ||
- wmarks.mid < wmarks.low)
- goto fail;
-
- pos += parsed;
- scheme = damon_new_scheme(&pattern, action, 0, &quota,
- &wmarks, NUMA_NO_NODE);
- if (!scheme)
- goto fail;
-
- schemes[*nr_schemes] = scheme;
- *nr_schemes += 1;
- }
- return schemes;
-fail:
- free_schemes_arr(schemes, *nr_schemes);
- return NULL;
-}
-
-static ssize_t dbgfs_schemes_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct damon_ctx *ctx = file->private_data;
- char *kbuf;
- struct damos **schemes;
- ssize_t nr_schemes = 0, ret;
-
- kbuf = user_input_str(buf, count, ppos);
- if (IS_ERR(kbuf))
- return PTR_ERR(kbuf);
-
- schemes = str_to_schemes(kbuf, count, &nr_schemes);
- if (!schemes) {
- ret = -EINVAL;
- goto out;
- }
-
- mutex_lock(&ctx->kdamond_lock);
- if (ctx->kdamond) {
- ret = -EBUSY;
- goto unlock_out;
- }
-
- damon_set_schemes(ctx, schemes, nr_schemes);
- ret = count;
- nr_schemes = 0;
-
-unlock_out:
- mutex_unlock(&ctx->kdamond_lock);
- free_schemes_arr(schemes, nr_schemes);
-out:
- kfree(kbuf);
- return ret;
-}
-
-static ssize_t sprint_target_ids(struct damon_ctx *ctx, char *buf, ssize_t len)
-{
- struct damon_target *t;
- int id;
- int written = 0;
- int rc;
-
- damon_for_each_target(t, ctx) {
- if (damon_target_has_pid(ctx))
- /* Show pid numbers to debugfs users */
- id = pid_vnr(t->pid);
- else
- /* Show 42 for physical address space, just for fun */
- id = 42;
-
- rc = scnprintf(&buf[written], len - written, "%d ", id);
- if (!rc)
- return -ENOMEM;
- written += rc;
- }
- if (written)
- written -= 1;
- written += scnprintf(&buf[written], len - written, "\n");
- return written;
-}
-
-static ssize_t dbgfs_target_ids_read(struct file *file,
- char __user *buf, size_t count, loff_t *ppos)
-{
- struct damon_ctx *ctx = file->private_data;
- ssize_t len;
- char ids_buf[320];
-
- mutex_lock(&ctx->kdamond_lock);
- len = sprint_target_ids(ctx, ids_buf, 320);
- mutex_unlock(&ctx->kdamond_lock);
- if (len < 0)
- return len;
-
- return simple_read_from_buffer(buf, count, ppos, ids_buf, len);
-}
-
-/*
- * Converts a string into an integers array
- *
- * Returns an array of integers array if the conversion success, or NULL
- * otherwise.
- */
-static int *str_to_ints(const char *str, ssize_t len, ssize_t *nr_ints)
-{
- int *array;
- const int max_nr_ints = 32;
- int nr;
- int pos = 0, parsed, ret;
-
- *nr_ints = 0;
- array = kmalloc_array(max_nr_ints, sizeof(*array), GFP_KERNEL);
- if (!array)
- return NULL;
- while (*nr_ints < max_nr_ints && pos < len) {
- ret = sscanf(&str[pos], "%d%n", &nr, &parsed);
- pos += parsed;
- if (ret != 1)
- break;
- array[*nr_ints] = nr;
- *nr_ints += 1;
- }
-
- return array;
-}
-
-static void dbgfs_put_pids(struct pid **pids, int nr_pids)
-{
- int i;
-
- for (i = 0; i < nr_pids; i++)
- put_pid(pids[i]);
-}
-
-/*
- * Converts a string into an struct pid pointers array
- *
- * Returns an array of struct pid pointers if the conversion success, or NULL
- * otherwise.
- */
-static struct pid **str_to_pids(const char *str, ssize_t len, ssize_t *nr_pids)
-{
- int *ints;
- ssize_t nr_ints;
- struct pid **pids;
-
- *nr_pids = 0;
-
- ints = str_to_ints(str, len, &nr_ints);
- if (!ints)
- return NULL;
-
- pids = kmalloc_array(nr_ints, sizeof(*pids), GFP_KERNEL);
- if (!pids)
- goto out;
-
- for (; *nr_pids < nr_ints; (*nr_pids)++) {
- pids[*nr_pids] = find_get_pid(ints[*nr_pids]);
- if (!pids[*nr_pids]) {
- dbgfs_put_pids(pids, *nr_pids);
- kfree(ints);
- kfree(pids);
- return NULL;
- }
- }
-
-out:
- kfree(ints);
- return pids;
-}
-
-/*
- * dbgfs_set_targets() - Set monitoring targets.
- * @ctx: monitoring context
- * @nr_targets: number of targets
- * @pids: array of target pids (size is same to @nr_targets)
- *
- * This function should not be called while the kdamond is running. @pids is
- * ignored if the context is not configured to have pid in each target. On
- * failure, reference counts of all pids in @pids are decremented.
- *
- * Return: 0 on success, negative error code otherwise.
- */
-static int dbgfs_set_targets(struct damon_ctx *ctx, ssize_t nr_targets,
- struct pid **pids)
-{
- ssize_t i;
- struct damon_target *t, *next;
-
- damon_for_each_target_safe(t, next, ctx) {
- if (damon_target_has_pid(ctx))
- put_pid(t->pid);
- damon_destroy_target(t);
- }
-
- for (i = 0; i < nr_targets; i++) {
- t = damon_new_target();
- if (!t) {
- damon_for_each_target_safe(t, next, ctx)
- damon_destroy_target(t);
- if (damon_target_has_pid(ctx))
- dbgfs_put_pids(pids, nr_targets);
- return -ENOMEM;
- }
- if (damon_target_has_pid(ctx))
- t->pid = pids[i];
- damon_add_target(ctx, t);
- }
-
- return 0;
-}
-
-static ssize_t dbgfs_target_ids_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
-{
- struct damon_ctx *ctx = file->private_data;
- bool id_is_pid = true;
- char *kbuf;
- struct pid **target_pids = NULL;
- ssize_t nr_targets;
- ssize_t ret;
-
- kbuf = user_input_str(buf, count, ppos);
- if (IS_ERR(kbuf))
- return PTR_ERR(kbuf);
-
- if (!strncmp(kbuf, "paddr\n", count)) {
- id_is_pid = false;
- nr_targets = 1;
- }
-
- if (id_is_pid) {
- target_pids = str_to_pids(kbuf, count, &nr_targets);
- if (!target_pids) {
- ret = -ENOMEM;
- goto out;
- }
- }
-
- mutex_lock(&ctx->kdamond_lock);
- if (ctx->kdamond) {
- if (id_is_pid)
- dbgfs_put_pids(target_pids, nr_targets);
- ret = -EBUSY;
- goto unlock_out;
- }
-
- /* remove previously set targets */
- dbgfs_set_targets(ctx, 0, NULL);
- if (!nr_targets) {
- ret = count;
- goto unlock_out;
- }
-
- /* Configure the context for the address space type */
- if (id_is_pid)
- ret = damon_select_ops(ctx, DAMON_OPS_VADDR);
- else
- ret = damon_select_ops(ctx, DAMON_OPS_PADDR);
- if (ret)
- goto unlock_out;
-
- ret = dbgfs_set_targets(ctx, nr_targets, target_pids);
- if (!ret)
- ret = count;
-
-unlock_out:
- mutex_unlock(&ctx->kdamond_lock);
- kfree(target_pids);
-out:
- kfree(kbuf);
- return ret;
-}
-
-static ssize_t sprint_init_regions(struct damon_ctx *c, char *buf, ssize_t len)
-{
- struct damon_target *t;
- struct damon_region *r;
- int target_idx = 0;
- int written = 0;
- int rc;
-
- damon_for_each_target(t, c) {
- damon_for_each_region(r, t) {
- rc = scnprintf(&buf[written], len - written,
- "%d %lu %lu\n",
- target_idx, r->ar.start, r->ar.end);
- if (!rc)
- return -ENOMEM;
- written += rc;
- }
- target_idx++;
- }
- return written;
-}
-
-static ssize_t dbgfs_init_regions_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct damon_ctx *ctx = file->private_data;
- char *kbuf;
- ssize_t len;
-
- kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
- if (!kbuf)
- return -ENOMEM;
-
- mutex_lock(&ctx->kdamond_lock);
- if (ctx->kdamond) {
- mutex_unlock(&ctx->kdamond_lock);
- len = -EBUSY;
- goto out;
- }
-
- len = sprint_init_regions(ctx, kbuf, count);
- mutex_unlock(&ctx->kdamond_lock);
- if (len < 0)
- goto out;
- len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
-
-out:
- kfree(kbuf);
- return len;
-}
-
-static int add_init_region(struct damon_ctx *c, int target_idx,
- struct damon_addr_range *ar)
-{
- struct damon_target *t;
- struct damon_region *r, *prev;
- unsigned long idx = 0;
- int rc = -EINVAL;
-
- if (ar->start >= ar->end)
- return -EINVAL;
-
- damon_for_each_target(t, c) {
- if (idx++ == target_idx) {
- r = damon_new_region(ar->start, ar->end);
- if (!r)
- return -ENOMEM;
- damon_add_region(r, t);
- if (damon_nr_regions(t) > 1) {
- prev = damon_prev_region(r);
- if (prev->ar.end > r->ar.start) {
- damon_destroy_region(r, t);
- return -EINVAL;
- }
- }
- rc = 0;
- }
- }
- return rc;
-}
-
-static int set_init_regions(struct damon_ctx *c, const char *str, ssize_t len)
-{
- struct damon_target *t;
- struct damon_region *r, *next;
- int pos = 0, parsed, ret;
- int target_idx;
- struct damon_addr_range ar;
- int err;
-
- damon_for_each_target(t, c) {
- damon_for_each_region_safe(r, next, t)
- damon_destroy_region(r, t);
- }
-
- while (pos < len) {
- ret = sscanf(&str[pos], "%d %lu %lu%n",
- &target_idx, &ar.start, &ar.end, &parsed);
- if (ret != 3)
- break;
- err = add_init_region(c, target_idx, &ar);
- if (err)
- goto fail;
- pos += parsed;
- }
-
- return 0;
-
-fail:
- damon_for_each_target(t, c) {
- damon_for_each_region_safe(r, next, t)
- damon_destroy_region(r, t);
- }
- return err;
-}
-
-static ssize_t dbgfs_init_regions_write(struct file *file,
- const char __user *buf, size_t count,
- loff_t *ppos)
-{
- struct damon_ctx *ctx = file->private_data;
- char *kbuf;
- ssize_t ret = count;
- int err;
-
- kbuf = user_input_str(buf, count, ppos);
- if (IS_ERR(kbuf))
- return PTR_ERR(kbuf);
-
- mutex_lock(&ctx->kdamond_lock);
- if (ctx->kdamond) {
- ret = -EBUSY;
- goto unlock_out;
- }
-
- err = set_init_regions(ctx, kbuf, ret);
- if (err)
- ret = err;
-
-unlock_out:
- mutex_unlock(&ctx->kdamond_lock);
- kfree(kbuf);
- return ret;
-}
-
-static ssize_t dbgfs_kdamond_pid_read(struct file *file,
- char __user *buf, size_t count, loff_t *ppos)
-{
- struct damon_ctx *ctx = file->private_data;
- char *kbuf;
- ssize_t len;
-
- kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
- if (!kbuf)
- return -ENOMEM;
-
- mutex_lock(&ctx->kdamond_lock);
- if (ctx->kdamond)
- len = scnprintf(kbuf, count, "%d\n", ctx->kdamond->pid);
- else
- len = scnprintf(kbuf, count, "none\n");
- mutex_unlock(&ctx->kdamond_lock);
- if (!len)
- goto out;
- len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
-
-out:
- kfree(kbuf);
- return len;
-}
-
-static int damon_dbgfs_open(struct inode *inode, struct file *file)
-{
- damon_dbgfs_warn_deprecation();
-
- file->private_data = inode->i_private;
-
- return nonseekable_open(inode, file);
-}
-
-static const struct file_operations attrs_fops = {
- .open = damon_dbgfs_open,
- .read = dbgfs_attrs_read,
- .write = dbgfs_attrs_write,
-};
-
-static const struct file_operations schemes_fops = {
- .open = damon_dbgfs_open,
- .read = dbgfs_schemes_read,
- .write = dbgfs_schemes_write,
-};
-
-static const struct file_operations target_ids_fops = {
- .open = damon_dbgfs_open,
- .read = dbgfs_target_ids_read,
- .write = dbgfs_target_ids_write,
-};
-
-static const struct file_operations init_regions_fops = {
- .open = damon_dbgfs_open,
- .read = dbgfs_init_regions_read,
- .write = dbgfs_init_regions_write,
-};
-
-static const struct file_operations kdamond_pid_fops = {
- .open = damon_dbgfs_open,
- .read = dbgfs_kdamond_pid_read,
-};
-
-static void dbgfs_fill_ctx_dir(struct dentry *dir, struct damon_ctx *ctx)
-{
- const char * const file_names[] = {"attrs", "schemes", "target_ids",
- "init_regions", "kdamond_pid"};
- const struct file_operations *fops[] = {&attrs_fops, &schemes_fops,
- &target_ids_fops, &init_regions_fops, &kdamond_pid_fops};
- int i;
-
- for (i = 0; i < ARRAY_SIZE(file_names); i++)
- debugfs_create_file(file_names[i], 0600, dir, ctx, fops[i]);
-}
-
-static void dbgfs_before_terminate(struct damon_ctx *ctx)
-{
- struct damon_target *t, *next;
-
- if (!damon_target_has_pid(ctx))
- return;
-
- mutex_lock(&ctx->kdamond_lock);
- damon_for_each_target_safe(t, next, ctx) {
- put_pid(t->pid);
- damon_destroy_target(t);
- }
- mutex_unlock(&ctx->kdamond_lock);
-}
-
-static struct damon_ctx *dbgfs_new_ctx(void)
-{
- struct damon_ctx *ctx;
-
- ctx = damon_new_ctx();
- if (!ctx)
- return NULL;
-
- if (damon_select_ops(ctx, DAMON_OPS_VADDR) &&
- damon_select_ops(ctx, DAMON_OPS_PADDR)) {
- damon_destroy_ctx(ctx);
- return NULL;
- }
- ctx->callback.before_terminate = dbgfs_before_terminate;
- return ctx;
-}
-
-static void dbgfs_destroy_ctx(struct damon_ctx *ctx)
-{
- damon_destroy_ctx(ctx);
-}
-
-static ssize_t damon_dbgfs_deprecated_read(struct file *file,
- char __user *buf, size_t count, loff_t *ppos)
-{
- static const char kbuf[512] = DAMON_DBGFS_DEPRECATION_NOTICE;
-
- return simple_read_from_buffer(buf, count, ppos, kbuf, strlen(kbuf));
-}
-
-/*
- * Make a context of @name and create a debugfs directory for it.
- *
- * This function should be called while holding damon_dbgfs_lock.
- *
- * Returns 0 on success, negative error code otherwise.
- */
-static int dbgfs_mk_context(char *name)
-{
- struct dentry *root, **new_dirs, *new_dir;
- struct damon_ctx **new_ctxs, *new_ctx;
-
- if (damon_nr_running_ctxs())
- return -EBUSY;
-
- new_ctxs = krealloc(dbgfs_ctxs, sizeof(*dbgfs_ctxs) *
- (dbgfs_nr_ctxs + 1), GFP_KERNEL);
- if (!new_ctxs)
- return -ENOMEM;
- dbgfs_ctxs = new_ctxs;
-
- new_dirs = krealloc(dbgfs_dirs, sizeof(*dbgfs_dirs) *
- (dbgfs_nr_ctxs + 1), GFP_KERNEL);
- if (!new_dirs)
- return -ENOMEM;
- dbgfs_dirs = new_dirs;
-
- root = dbgfs_dirs[0];
- if (!root)
- return -ENOENT;
-
- new_dir = debugfs_create_dir(name, root);
- /* Below check is required for a potential duplicated name case */
- if (IS_ERR(new_dir))
- return PTR_ERR(new_dir);
- dbgfs_dirs[dbgfs_nr_ctxs] = new_dir;
-
- new_ctx = dbgfs_new_ctx();
- if (!new_ctx) {
- debugfs_remove(new_dir);
- dbgfs_dirs[dbgfs_nr_ctxs] = NULL;
- return -ENOMEM;
- }
-
- dbgfs_ctxs[dbgfs_nr_ctxs] = new_ctx;
- dbgfs_fill_ctx_dir(dbgfs_dirs[dbgfs_nr_ctxs],
- dbgfs_ctxs[dbgfs_nr_ctxs]);
- dbgfs_nr_ctxs++;
-
- return 0;
-}
-
-static ssize_t dbgfs_mk_context_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
-{
- char *kbuf;
- char *ctx_name;
- ssize_t ret;
-
- kbuf = user_input_str(buf, count, ppos);
- if (IS_ERR(kbuf))
- return PTR_ERR(kbuf);
- ctx_name = kmalloc(count + 1, GFP_KERNEL);
- if (!ctx_name) {
- kfree(kbuf);
- return -ENOMEM;
- }
-
- /* Trim white space */
- if (sscanf(kbuf, "%s", ctx_name) != 1) {
- ret = -EINVAL;
- goto out;
- }
-
- mutex_lock(&damon_dbgfs_lock);
- ret = dbgfs_mk_context(ctx_name);
- if (!ret)
- ret = count;
- mutex_unlock(&damon_dbgfs_lock);
-
-out:
- kfree(kbuf);
- kfree(ctx_name);
- return ret;
-}
-
-/*
- * Remove a context of @name and its debugfs directory.
- *
- * This function should be called while holding damon_dbgfs_lock.
- *
- * Return 0 on success, negative error code otherwise.
- */
-static int dbgfs_rm_context(char *name)
-{
- struct dentry *root, *dir, **new_dirs;
- struct inode *inode;
- struct damon_ctx **new_ctxs;
- int i, j;
- int ret = 0;
-
- if (damon_nr_running_ctxs())
- return -EBUSY;
-
- root = dbgfs_dirs[0];
- if (!root)
- return -ENOENT;
-
- dir = debugfs_lookup(name, root);
- if (!dir)
- return -ENOENT;
-
- inode = d_inode(dir);
- if (!S_ISDIR(inode->i_mode)) {
- ret = -EINVAL;
- goto out_dput;
- }
-
- new_dirs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_dirs),
- GFP_KERNEL);
- if (!new_dirs) {
- ret = -ENOMEM;
- goto out_dput;
- }
-
- new_ctxs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_ctxs),
- GFP_KERNEL);
- if (!new_ctxs) {
- ret = -ENOMEM;
- goto out_new_dirs;
- }
-
- for (i = 0, j = 0; i < dbgfs_nr_ctxs; i++) {
- if (dbgfs_dirs[i] == dir) {
- debugfs_remove(dbgfs_dirs[i]);
- dbgfs_destroy_ctx(dbgfs_ctxs[i]);
- continue;
- }
- new_dirs[j] = dbgfs_dirs[i];
- new_ctxs[j++] = dbgfs_ctxs[i];
- }
-
- kfree(dbgfs_dirs);
- kfree(dbgfs_ctxs);
-
- dbgfs_dirs = new_dirs;
- dbgfs_ctxs = new_ctxs;
- dbgfs_nr_ctxs--;
-
- goto out_dput;
-
-out_new_dirs:
- kfree(new_dirs);
-out_dput:
- dput(dir);
- return ret;
-}
-
-static ssize_t dbgfs_rm_context_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
-{
- char *kbuf;
- ssize_t ret;
- char *ctx_name;
-
- kbuf = user_input_str(buf, count, ppos);
- if (IS_ERR(kbuf))
- return PTR_ERR(kbuf);
- ctx_name = kmalloc(count + 1, GFP_KERNEL);
- if (!ctx_name) {
- kfree(kbuf);
- return -ENOMEM;
- }
-
- /* Trim white space */
- if (sscanf(kbuf, "%s", ctx_name) != 1) {
- ret = -EINVAL;
- goto out;
- }
-
- mutex_lock(&damon_dbgfs_lock);
- ret = dbgfs_rm_context(ctx_name);
- if (!ret)
- ret = count;
- mutex_unlock(&damon_dbgfs_lock);
-
-out:
- kfree(kbuf);
- kfree(ctx_name);
- return ret;
-}
-
-static ssize_t dbgfs_monitor_on_read(struct file *file,
- char __user *buf, size_t count, loff_t *ppos)
-{
- char monitor_on_buf[5];
- bool monitor_on = damon_nr_running_ctxs() != 0;
- int len;
-
- len = scnprintf(monitor_on_buf, 5, monitor_on ? "on\n" : "off\n");
-
- return simple_read_from_buffer(buf, count, ppos, monitor_on_buf, len);
-}
-
-static ssize_t dbgfs_monitor_on_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
-{
- ssize_t ret;
- char *kbuf;
-
- kbuf = user_input_str(buf, count, ppos);
- if (IS_ERR(kbuf))
- return PTR_ERR(kbuf);
-
- /* Remove white space */
- if (sscanf(kbuf, "%s", kbuf) != 1) {
- kfree(kbuf);
- return -EINVAL;
- }
-
- mutex_lock(&damon_dbgfs_lock);
- if (!strncmp(kbuf, "on", count)) {
- int i;
-
- for (i = 0; i < dbgfs_nr_ctxs; i++) {
- if (damon_targets_empty(dbgfs_ctxs[i])) {
- kfree(kbuf);
- mutex_unlock(&damon_dbgfs_lock);
- return -EINVAL;
- }
- }
- ret = damon_start(dbgfs_ctxs, dbgfs_nr_ctxs, true);
- } else if (!strncmp(kbuf, "off", count)) {
- ret = damon_stop(dbgfs_ctxs, dbgfs_nr_ctxs);
- } else {
- ret = -EINVAL;
- }
- mutex_unlock(&damon_dbgfs_lock);
-
- if (!ret)
- ret = count;
- kfree(kbuf);
- return ret;
-}
-
-static int damon_dbgfs_static_file_open(struct inode *inode, struct file *file)
-{
- damon_dbgfs_warn_deprecation();
- return nonseekable_open(inode, file);
-}
-
-static const struct file_operations deprecated_fops = {
- .read = damon_dbgfs_deprecated_read,
-};
-
-static const struct file_operations mk_contexts_fops = {
- .open = damon_dbgfs_static_file_open,
- .write = dbgfs_mk_context_write,
-};
-
-static const struct file_operations rm_contexts_fops = {
- .open = damon_dbgfs_static_file_open,
- .write = dbgfs_rm_context_write,
-};
-
-static const struct file_operations monitor_on_fops = {
- .open = damon_dbgfs_static_file_open,
- .read = dbgfs_monitor_on_read,
- .write = dbgfs_monitor_on_write,
-};
-
-static int __init __damon_dbgfs_init(void)
-{
- struct dentry *dbgfs_root;
- const char * const file_names[] = {"mk_contexts", "rm_contexts",
- "monitor_on_DEPRECATED", "DEPRECATED"};
- const struct file_operations *fops[] = {&mk_contexts_fops,
- &rm_contexts_fops, &monitor_on_fops, &deprecated_fops};
- int i;
-
- dbgfs_root = debugfs_create_dir("damon", NULL);
-
- for (i = 0; i < ARRAY_SIZE(file_names); i++)
- debugfs_create_file(file_names[i], 0600, dbgfs_root, NULL,
- fops[i]);
- dbgfs_fill_ctx_dir(dbgfs_root, dbgfs_ctxs[0]);
-
- dbgfs_dirs = kmalloc(sizeof(dbgfs_root), GFP_KERNEL);
- if (!dbgfs_dirs) {
- debugfs_remove(dbgfs_root);
- return -ENOMEM;
- }
- dbgfs_dirs[0] = dbgfs_root;
-
- return 0;
-}
-
-/*
- * Functions for the initialization
- */
-
-static int __init damon_dbgfs_init(void)
-{
- int rc = -ENOMEM;
-
- mutex_lock(&damon_dbgfs_lock);
- dbgfs_ctxs = kmalloc(sizeof(*dbgfs_ctxs), GFP_KERNEL);
- if (!dbgfs_ctxs)
- goto out;
- dbgfs_ctxs[0] = dbgfs_new_ctx();
- if (!dbgfs_ctxs[0]) {
- kfree(dbgfs_ctxs);
- goto out;
- }
- dbgfs_nr_ctxs = 1;
-
- rc = __damon_dbgfs_init();
- if (rc) {
- kfree(dbgfs_ctxs[0]);
- kfree(dbgfs_ctxs);
- pr_err("%s: dbgfs init failed\n", __func__);
- }
-
-out:
- mutex_unlock(&damon_dbgfs_lock);
- return rc;
-}
-
-module_init(damon_dbgfs_init);
-
-#include "tests/dbgfs-kunit.h"
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index a9ff35341d65..0f9ae14f884d 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -198,7 +198,7 @@ static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
return max_nr_accesses;
}
-static bool __damos_pa_filter_out(struct damos_filter *filter,
+static bool damos_pa_filter_match(struct damos_filter *filter,
struct folio *folio)
{
bool matched = false;
@@ -237,13 +237,14 @@ static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
struct damos_filter *filter;
damos_for_each_filter(filter, scheme) {
- if (__damos_pa_filter_out(filter, folio))
- return true;
+ if (damos_pa_filter_match(filter, folio))
+ return !filter->allow;
}
return false;
}
-static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
+static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s,
+ unsigned long *sz_filter_passed)
{
unsigned long addr, applied;
LIST_HEAD(folio_list);
@@ -258,7 +259,8 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
}
}
if (install_young_filter) {
- filter = damos_new_filter(DAMOS_FILTER_TYPE_YOUNG, true);
+ filter = damos_new_filter(
+ DAMOS_FILTER_TYPE_YOUNG, true, false);
if (!filter)
return 0;
damos_add_filter(s, filter);
@@ -272,6 +274,8 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
if (damos_pa_filter_out(s, folio))
goto put_folio;
+ else
+ *sz_filter_passed += folio_size(folio);
folio_clear_referenced(folio);
folio_test_clear_young(folio);
@@ -292,7 +296,8 @@ put_folio:
}
static inline unsigned long damon_pa_mark_accessed_or_deactivate(
- struct damon_region *r, struct damos *s, bool mark_accessed)
+ struct damon_region *r, struct damos *s, bool mark_accessed,
+ unsigned long *sz_filter_passed)
{
unsigned long addr, applied = 0;
@@ -304,6 +309,8 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate(
if (damos_pa_filter_out(s, folio))
goto put_folio;
+ else
+ *sz_filter_passed += folio_size(folio);
if (mark_accessed)
folio_mark_accessed(folio);
@@ -317,15 +324,17 @@ put_folio:
}
static unsigned long damon_pa_mark_accessed(struct damon_region *r,
- struct damos *s)
+ struct damos *s, unsigned long *sz_filter_passed)
{
- return damon_pa_mark_accessed_or_deactivate(r, s, true);
+ return damon_pa_mark_accessed_or_deactivate(r, s, true,
+ sz_filter_passed);
}
static unsigned long damon_pa_deactivate_pages(struct damon_region *r,
- struct damos *s)
+ struct damos *s, unsigned long *sz_filter_passed)
{
- return damon_pa_mark_accessed_or_deactivate(r, s, false);
+ return damon_pa_mark_accessed_or_deactivate(r, s, false,
+ sz_filter_passed);
}
static unsigned int __damon_pa_migrate_folio_list(
@@ -449,7 +458,8 @@ static unsigned long damon_pa_migrate_pages(struct list_head *folio_list,
return nr_migrated;
}
-static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s)
+static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s,
+ unsigned long *sz_filter_passed)
{
unsigned long addr, applied;
LIST_HEAD(folio_list);
@@ -462,6 +472,8 @@ static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s)
if (damos_pa_filter_out(s, folio))
goto put_folio;
+ else
+ *sz_filter_passed += folio_size(folio);
if (!folio_isolate_lru(folio))
goto put_folio;
@@ -474,23 +486,57 @@ put_folio:
return applied * PAGE_SIZE;
}
+static bool damon_pa_scheme_has_filter(struct damos *s)
+{
+ struct damos_filter *f;
+
+ damos_for_each_filter(f, s)
+ return true;
+ return false;
+}
+
+static unsigned long damon_pa_stat(struct damon_region *r, struct damos *s,
+ unsigned long *sz_filter_passed)
+{
+ unsigned long addr;
+ LIST_HEAD(folio_list);
+
+ if (!damon_pa_scheme_has_filter(s))
+ return 0;
+
+ addr = r->ar.start;
+ while (addr < r->ar.end) {
+ struct folio *folio = damon_get_folio(PHYS_PFN(addr));
+
+ if (!folio) {
+ addr += PAGE_SIZE;
+ continue;
+ }
+
+ if (!damos_pa_filter_out(s, folio))
+ *sz_filter_passed += folio_size(folio);
+ addr += folio_size(folio);
+ folio_put(folio);
+ }
+ return 0;
+}
static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
struct damon_target *t, struct damon_region *r,
- struct damos *scheme)
+ struct damos *scheme, unsigned long *sz_filter_passed)
{
switch (scheme->action) {
case DAMOS_PAGEOUT:
- return damon_pa_pageout(r, scheme);
+ return damon_pa_pageout(r, scheme, sz_filter_passed);
case DAMOS_LRU_PRIO:
- return damon_pa_mark_accessed(r, scheme);
+ return damon_pa_mark_accessed(r, scheme, sz_filter_passed);
case DAMOS_LRU_DEPRIO:
- return damon_pa_deactivate_pages(r, scheme);
+ return damon_pa_deactivate_pages(r, scheme, sz_filter_passed);
case DAMOS_MIGRATE_HOT:
case DAMOS_MIGRATE_COLD:
- return damon_pa_migrate(r, scheme);
+ return damon_pa_migrate(r, scheme, sz_filter_passed);
case DAMOS_STAT:
- break;
+ return damon_pa_stat(r, scheme, sz_filter_passed);
default:
/* DAMOS actions that not yet supported by 'paddr'. */
break;
diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c
index 9e0077a9404e..a675150965e0 100644
--- a/mm/damon/reclaim.c
+++ b/mm/damon/reclaim.c
@@ -221,7 +221,7 @@ static int damon_reclaim_apply_parameters(void)
}
if (skip_anon) {
- filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true);
+ filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, false);
if (!filter)
goto out;
damos_add_filter(scheme, filter);
diff --git a/mm/damon/sysfs-common.h b/mm/damon/sysfs-common.h
index 9a18f3c535d3..70d84bdc9f5f 100644
--- a/mm/damon/sysfs-common.h
+++ b/mm/damon/sysfs-common.h
@@ -45,19 +45,13 @@ void damon_sysfs_schemes_update_stats(
struct damon_sysfs_schemes *sysfs_schemes,
struct damon_ctx *ctx);
-int damon_sysfs_schemes_update_regions_start(
- struct damon_sysfs_schemes *sysfs_schemes,
- struct damon_ctx *ctx, bool total_bytes_only);
-
-void damos_sysfs_mark_finished_regions_updates(struct damon_ctx *ctx);
-
-bool damos_sysfs_regions_upd_done(void);
-
-int damon_sysfs_schemes_update_regions_stop(struct damon_ctx *ctx);
+void damos_sysfs_populate_region_dir(struct damon_sysfs_schemes *sysfs_schemes,
+ struct damon_ctx *ctx, struct damon_target *t,
+ struct damon_region *r, struct damos *s,
+ bool total_bytes_only, unsigned long sz_filter_passed);
int damon_sysfs_schemes_clear_regions(
- struct damon_sysfs_schemes *sysfs_schemes,
- struct damon_ctx *ctx);
+ struct damon_sysfs_schemes *sysfs_schemes);
int damos_sysfs_set_quota_scores(struct damon_sysfs_schemes *sysfs_schemes,
struct damon_ctx *ctx);
diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c
index b095457380b5..98f93ae9f59e 100644
--- a/mm/damon/sysfs-schemes.c
+++ b/mm/damon/sysfs-schemes.c
@@ -19,6 +19,7 @@ struct damon_sysfs_scheme_region {
struct damon_addr_range ar;
unsigned int nr_accesses;
unsigned int age;
+ unsigned long sz_filter_passed;
struct list_head list;
};
@@ -74,6 +75,15 @@ static ssize_t age_show(struct kobject *kobj, struct kobj_attribute *attr,
return sysfs_emit(buf, "%u\n", region->age);
}
+static ssize_t sz_filter_passed_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct damon_sysfs_scheme_region *region = container_of(kobj,
+ struct damon_sysfs_scheme_region, kobj);
+
+ return sysfs_emit(buf, "%lu\n", region->sz_filter_passed);
+}
+
static void damon_sysfs_scheme_region_release(struct kobject *kobj)
{
struct damon_sysfs_scheme_region *region = container_of(kobj,
@@ -95,11 +105,15 @@ static struct kobj_attribute damon_sysfs_scheme_region_nr_accesses_attr =
static struct kobj_attribute damon_sysfs_scheme_region_age_attr =
__ATTR_RO_MODE(age, 0400);
+static struct kobj_attribute damon_sysfs_scheme_region_sz_filter_passed_attr =
+ __ATTR_RO_MODE(sz_filter_passed, 0400);
+
static struct attribute *damon_sysfs_scheme_region_attrs[] = {
&damon_sysfs_scheme_region_start_attr.attr,
&damon_sysfs_scheme_region_end_attr.attr,
&damon_sysfs_scheme_region_nr_accesses_attr.attr,
&damon_sysfs_scheme_region_age_attr.attr,
+ &damon_sysfs_scheme_region_sz_filter_passed_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(damon_sysfs_scheme_region);
@@ -114,55 +128,11 @@ static const struct kobj_type damon_sysfs_scheme_region_ktype = {
* scheme regions directory
*/
-/*
- * enum damos_sysfs_regions_upd_status - Represent DAMOS tried regions update
- * status
- * @DAMOS_TRIED_REGIONS_UPD_IDLE: Waiting for next request.
- * @DAMOS_TRIED_REGIONS_UPD_STARTED: Update started.
- * @DAMOS_TRIED_REGIONS_UPD_FINISHED: Update finished.
- *
- * Each DAMON-based operation scheme (&struct damos) has its own apply
- * interval, and we need to expose the scheme tried regions based on only
- * single snapshot. For this, we keep the tried regions update status for each
- * scheme. The status becomes 'idle' at the beginning.
- *
- * Once the tried regions update request is received, the request handling
- * start function (damon_sysfs_scheme_update_regions_start()) sets the status
- * of all schemes as 'idle' again, and register ->before_damos_apply()
- * callback.
- *
- * Then, the first followup ->before_damos_apply() callback
- * (damon_sysfs_before_damos_apply()) sets the status 'started'. The first
- * ->after_sampling() or ->after_aggregation() callback
- * (damon_sysfs_cmd_request_callback()) after the call is called only after
- * the scheme is completely applied to the given snapshot. Hence the callback
- * knows the situation by showing 'started' status, and sets the status as
- * 'finished'. Then, damon_sysfs_before_damos_apply() understands the
- * situation by showing the 'finished' status and do nothing.
- *
- * If DAMOS is not applied to any region due to any reasons including the
- * access pattern, the watermarks, the quotas, and the filters,
- * ->before_damos_apply() will not be called back. Until the situation is
- * changed, the update will not be finished. To avoid this,
- * damon_sysfs_after_sampling() set the status as 'finished' if more than two
- * apply intervals of the scheme is passed while the state is 'idle'.
- *
- * Finally, the tried regions request handling finisher function
- * (damon_sysfs_schemes_update_regions_stop()) unregisters the callbacks.
- */
-enum damos_sysfs_regions_upd_status {
- DAMOS_TRIED_REGIONS_UPD_IDLE,
- DAMOS_TRIED_REGIONS_UPD_STARTED,
- DAMOS_TRIED_REGIONS_UPD_FINISHED,
-};
-
struct damon_sysfs_scheme_regions {
struct kobject kobj;
struct list_head regions_list;
int nr_regions;
unsigned long total_bytes;
- enum damos_sysfs_regions_upd_status upd_status;
- unsigned long upd_timeout_jiffies;
};
static struct damon_sysfs_scheme_regions *
@@ -178,7 +148,6 @@ damon_sysfs_scheme_regions_alloc(void)
INIT_LIST_HEAD(&regions->regions_list);
regions->nr_regions = 0;
regions->total_bytes = 0;
- regions->upd_status = DAMOS_TRIED_REGIONS_UPD_IDLE;
return regions;
}
@@ -233,6 +202,7 @@ struct damon_sysfs_stats {
unsigned long sz_tried;
unsigned long nr_applied;
unsigned long sz_applied;
+ unsigned long sz_ops_filter_passed;
unsigned long qt_exceeds;
};
@@ -277,6 +247,15 @@ static ssize_t sz_applied_show(struct kobject *kobj,
return sysfs_emit(buf, "%lu\n", stats->sz_applied);
}
+static ssize_t sz_ops_filter_passed_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct damon_sysfs_stats *stats = container_of(kobj,
+ struct damon_sysfs_stats, kobj);
+
+ return sysfs_emit(buf, "%lu\n", stats->sz_ops_filter_passed);
+}
+
static ssize_t qt_exceeds_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -303,6 +282,9 @@ static struct kobj_attribute damon_sysfs_stats_nr_applied_attr =
static struct kobj_attribute damon_sysfs_stats_sz_applied_attr =
__ATTR_RO_MODE(sz_applied, 0400);
+static struct kobj_attribute damon_sysfs_stats_sz_ops_filter_passed_attr =
+ __ATTR_RO_MODE(sz_ops_filter_passed, 0400);
+
static struct kobj_attribute damon_sysfs_stats_qt_exceeds_attr =
__ATTR_RO_MODE(qt_exceeds, 0400);
@@ -311,6 +293,7 @@ static struct attribute *damon_sysfs_stats_attrs[] = {
&damon_sysfs_stats_sz_tried_attr.attr,
&damon_sysfs_stats_nr_applied_attr.attr,
&damon_sysfs_stats_sz_applied_attr.attr,
+ &damon_sysfs_stats_sz_ops_filter_passed_attr.attr,
&damon_sysfs_stats_qt_exceeds_attr.attr,
NULL,
};
@@ -330,6 +313,7 @@ struct damon_sysfs_scheme_filter {
struct kobject kobj;
enum damos_filter_type type;
bool matching;
+ bool allow;
char *memcg_path;
struct damon_addr_range addr_range;
int target_idx;
@@ -402,6 +386,30 @@ static ssize_t matching_store(struct kobject *kobj,
return count;
}
+static ssize_t allow_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct damon_sysfs_scheme_filter *filter = container_of(kobj,
+ struct damon_sysfs_scheme_filter, kobj);
+
+ return sysfs_emit(buf, "%c\n", filter->allow ? 'Y' : 'N');
+}
+
+static ssize_t allow_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct damon_sysfs_scheme_filter *filter = container_of(kobj,
+ struct damon_sysfs_scheme_filter, kobj);
+ bool allow;
+ int err = kstrtobool(buf, &allow);
+
+ if (err)
+ return err;
+
+ filter->allow = allow;
+ return count;
+}
+
static ssize_t memcg_path_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -499,6 +507,9 @@ static struct kobj_attribute damon_sysfs_scheme_filter_type_attr =
static struct kobj_attribute damon_sysfs_scheme_filter_matching_attr =
__ATTR_RW_MODE(matching, 0600);
+static struct kobj_attribute damon_sysfs_scheme_filter_allow_attr =
+ __ATTR_RW_MODE(allow, 0600);
+
static struct kobj_attribute damon_sysfs_scheme_filter_memcg_path_attr =
__ATTR_RW_MODE(memcg_path, 0600);
@@ -514,6 +525,7 @@ static struct kobj_attribute damon_sysfs_scheme_filter_damon_target_idx_attr =
static struct attribute *damon_sysfs_scheme_filter_attrs[] = {
&damon_sysfs_scheme_filter_type_attr.attr,
&damon_sysfs_scheme_filter_matching_attr.attr,
+ &damon_sysfs_scheme_filter_allow_attr.attr,
&damon_sysfs_scheme_filter_memcg_path_attr.attr,
&damon_sysfs_scheme_filter_addr_start_attr.attr,
&damon_sysfs_scheme_filter_addr_end_attr.attr,
@@ -1918,7 +1930,8 @@ static int damon_sysfs_add_scheme_filters(struct damos *scheme,
sysfs_filters->filters_arr[i];
struct damos_filter *filter =
damos_new_filter(sysfs_filter->type,
- sysfs_filter->matching);
+ sysfs_filter->matching,
+ sysfs_filter->allow);
int err;
if (!filter)
@@ -2122,32 +2135,32 @@ void damon_sysfs_schemes_update_stats(
sysfs_stats->sz_tried = scheme->stat.sz_tried;
sysfs_stats->nr_applied = scheme->stat.nr_applied;
sysfs_stats->sz_applied = scheme->stat.sz_applied;
+ sysfs_stats->sz_ops_filter_passed =
+ scheme->stat.sz_ops_filter_passed;
sysfs_stats->qt_exceeds = scheme->stat.qt_exceeds;
}
}
-/*
- * damon_sysfs_schemes that need to update its schemes regions dir. Protected
- * by damon_sysfs_lock
- */
-static struct damon_sysfs_schemes *damon_sysfs_schemes_for_damos_callback;
-static int damon_sysfs_schemes_region_idx;
-static bool damos_regions_upd_total_bytes_only;
-
-/*
- * DAMON callback that called before damos apply. While this callback is
- * registered, damon_sysfs_lock should be held to ensure the regions
- * directories exist.
+/**
+ * damos_sysfs_populate_region_dir() - Populate a schemes tried region dir.
+ * @sysfs_schemes: Schemes directory to populate regions directory.
+ * @ctx: Corresponding DAMON context.
+ * @t: DAMON target of @r.
+ * @r: DAMON region to populate the directory for.
+ * @s: Corresponding scheme.
+ * @total_bytes_only: Whether the request is for bytes update only.
+ * @sz_filter_passed: Bytes of @r that passed filters of @s.
+ *
+ * Called from DAMOS walk callback while holding damon_sysfs_lock.
*/
-static int damon_sysfs_before_damos_apply(struct damon_ctx *ctx,
- struct damon_target *t, struct damon_region *r,
- struct damos *s)
+void damos_sysfs_populate_region_dir(struct damon_sysfs_schemes *sysfs_schemes,
+ struct damon_ctx *ctx, struct damon_target *t,
+ struct damon_region *r, struct damos *s, bool total_bytes_only,
+ unsigned long sz_filter_passed)
{
struct damos *scheme;
struct damon_sysfs_scheme_regions *sysfs_regions;
struct damon_sysfs_scheme_region *region;
- struct damon_sysfs_schemes *sysfs_schemes =
- damon_sysfs_schemes_for_damos_callback;
int schemes_idx = 0;
damon_for_each_scheme(scheme, ctx) {
@@ -2158,152 +2171,40 @@ static int damon_sysfs_before_damos_apply(struct damon_ctx *ctx,
/* user could have removed the scheme sysfs dir */
if (schemes_idx >= sysfs_schemes->nr)
- return 0;
+ return;
sysfs_regions = sysfs_schemes->schemes_arr[schemes_idx]->tried_regions;
- if (sysfs_regions->upd_status == DAMOS_TRIED_REGIONS_UPD_FINISHED)
- return 0;
- if (sysfs_regions->upd_status == DAMOS_TRIED_REGIONS_UPD_IDLE)
- sysfs_regions->upd_status = DAMOS_TRIED_REGIONS_UPD_STARTED;
sysfs_regions->total_bytes += r->ar.end - r->ar.start;
- if (damos_regions_upd_total_bytes_only)
- return 0;
+ if (total_bytes_only)
+ return;
region = damon_sysfs_scheme_region_alloc(r);
if (!region)
- return 0;
+ return;
+ region->sz_filter_passed = sz_filter_passed;
list_add_tail(&region->list, &sysfs_regions->regions_list);
sysfs_regions->nr_regions++;
if (kobject_init_and_add(&region->kobj,
&damon_sysfs_scheme_region_ktype,
&sysfs_regions->kobj, "%d",
- damon_sysfs_schemes_region_idx++)) {
+ sysfs_regions->nr_regions++)) {
kobject_put(&region->kobj);
}
- return 0;
-}
-
-/*
- * DAMON callback that called after each accesses sampling. While this
- * callback is registered, damon_sysfs_lock should be held to ensure the
- * regions directories exist.
- */
-void damos_sysfs_mark_finished_regions_updates(struct damon_ctx *ctx)
-{
- struct damon_sysfs_schemes *sysfs_schemes =
- damon_sysfs_schemes_for_damos_callback;
- struct damon_sysfs_scheme_regions *sysfs_regions;
- int i;
-
- for (i = 0; i < sysfs_schemes->nr; i++) {
- sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions;
- if (sysfs_regions->upd_status ==
- DAMOS_TRIED_REGIONS_UPD_STARTED ||
- time_after(jiffies,
- sysfs_regions->upd_timeout_jiffies))
- sysfs_regions->upd_status =
- DAMOS_TRIED_REGIONS_UPD_FINISHED;
- }
}
/* Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock */
int damon_sysfs_schemes_clear_regions(
- struct damon_sysfs_schemes *sysfs_schemes,
- struct damon_ctx *ctx)
+ struct damon_sysfs_schemes *sysfs_schemes)
{
- struct damos *scheme;
- int schemes_idx = 0;
+ int i;
- damon_for_each_scheme(scheme, ctx) {
+ for (i = 0; i < sysfs_schemes->nr; i++) {
struct damon_sysfs_scheme *sysfs_scheme;
- /* user could have removed the scheme sysfs dir */
- if (schemes_idx >= sysfs_schemes->nr)
- break;
-
- sysfs_scheme = sysfs_schemes->schemes_arr[schemes_idx++];
+ sysfs_scheme = sysfs_schemes->schemes_arr[i];
damon_sysfs_scheme_regions_rm_dirs(
sysfs_scheme->tried_regions);
sysfs_scheme->tried_regions->total_bytes = 0;
}
return 0;
}
-
-static struct damos *damos_sysfs_nth_scheme(int n, struct damon_ctx *ctx)
-{
- struct damos *scheme;
- int i = 0;
-
- damon_for_each_scheme(scheme, ctx) {
- if (i == n)
- return scheme;
- i++;
- }
- return NULL;
-}
-
-static void damos_tried_regions_init_upd_status(
- struct damon_sysfs_schemes *sysfs_schemes,
- struct damon_ctx *ctx)
-{
- int i;
- struct damos *scheme;
- struct damon_sysfs_scheme_regions *sysfs_regions;
-
- for (i = 0; i < sysfs_schemes->nr; i++) {
- sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions;
- scheme = damos_sysfs_nth_scheme(i, ctx);
- if (!scheme) {
- sysfs_regions->upd_status =
- DAMOS_TRIED_REGIONS_UPD_FINISHED;
- continue;
- }
- sysfs_regions->upd_status = DAMOS_TRIED_REGIONS_UPD_IDLE;
- sysfs_regions->upd_timeout_jiffies = jiffies +
- 2 * usecs_to_jiffies(scheme->apply_interval_us ?
- scheme->apply_interval_us :
- ctx->attrs.aggr_interval);
- }
-}
-
-/* Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock */
-int damon_sysfs_schemes_update_regions_start(
- struct damon_sysfs_schemes *sysfs_schemes,
- struct damon_ctx *ctx, bool total_bytes_only)
-{
- damon_sysfs_schemes_clear_regions(sysfs_schemes, ctx);
- damon_sysfs_schemes_for_damos_callback = sysfs_schemes;
- damos_tried_regions_init_upd_status(sysfs_schemes, ctx);
- damos_regions_upd_total_bytes_only = total_bytes_only;
- ctx->callback.before_damos_apply = damon_sysfs_before_damos_apply;
- return 0;
-}
-
-bool damos_sysfs_regions_upd_done(void)
-{
- struct damon_sysfs_schemes *sysfs_schemes =
- damon_sysfs_schemes_for_damos_callback;
- struct damon_sysfs_scheme_regions *sysfs_regions;
- int i;
-
- for (i = 0; i < sysfs_schemes->nr; i++) {
- sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions;
- if (sysfs_regions->upd_status !=
- DAMOS_TRIED_REGIONS_UPD_FINISHED)
- return false;
- }
- return true;
-}
-
-/*
- * Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock. Caller
- * should unlock damon_sysfs_lock which held before
- * damon_sysfs_schemes_update_regions_start()
- */
-int damon_sysfs_schemes_update_regions_stop(struct damon_ctx *ctx)
-{
- damon_sysfs_schemes_for_damos_callback = NULL;
- ctx->callback.before_damos_apply = NULL;
- damon_sysfs_schemes_region_idx = 0;
- return 0;
-}
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index 58145d59881d..deeab04d3b46 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -1181,25 +1181,9 @@ static int damon_sysfs_add_targets(struct damon_ctx *ctx,
return 0;
}
-static bool damon_sysfs_schemes_regions_updating;
-
static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
{
struct damon_target *t, *next;
- struct damon_sysfs_kdamond *kdamond;
- enum damon_sysfs_cmd cmd;
-
- /* damon_sysfs_schemes_update_regions_stop() might not yet called */
- kdamond = damon_sysfs_cmd_request.kdamond;
- cmd = damon_sysfs_cmd_request.cmd;
- if (kdamond && ctx == kdamond->damon_ctx &&
- (cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS ||
- cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES) &&
- damon_sysfs_schemes_regions_updating) {
- damon_sysfs_schemes_update_regions_stop(ctx);
- damon_sysfs_schemes_regions_updating = false;
- mutex_unlock(&damon_sysfs_lock);
- }
if (!damon_target_has_pid(ctx))
return;
@@ -1214,57 +1198,24 @@ static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
/*
* damon_sysfs_upd_schemes_stats() - Update schemes stats sysfs files.
- * @kdamond: The kobject wrapper that associated to the kdamond thread.
+ * @data: The kobject wrapper that associated to the kdamond thread.
*
* This function reads the schemes stats of specific kdamond and update the
* related values for sysfs files. This function should be called from DAMON
- * callbacks while holding ``damon_syfs_lock``, to safely access the DAMON
- * contexts-internal data and DAMON sysfs variables.
+ * worker thread,to safely access the DAMON contexts-internal data. Caller
+ * should also ensure holding ``damon_syfs_lock``, and ->damon_ctx of @data is
+ * not NULL but a valid pointer, to safely access DAMON sysfs variables.
*/
-static int damon_sysfs_upd_schemes_stats(struct damon_sysfs_kdamond *kdamond)
+static int damon_sysfs_upd_schemes_stats(void *data)
{
+ struct damon_sysfs_kdamond *kdamond = data;
struct damon_ctx *ctx = kdamond->damon_ctx;
- if (!ctx)
- return -EINVAL;
damon_sysfs_schemes_update_stats(
kdamond->contexts->contexts_arr[0]->schemes, ctx);
return 0;
}
-static int damon_sysfs_upd_schemes_regions_start(
- struct damon_sysfs_kdamond *kdamond, bool total_bytes_only)
-{
- struct damon_ctx *ctx = kdamond->damon_ctx;
-
- if (!ctx)
- return -EINVAL;
- return damon_sysfs_schemes_update_regions_start(
- kdamond->contexts->contexts_arr[0]->schemes, ctx,
- total_bytes_only);
-}
-
-static int damon_sysfs_upd_schemes_regions_stop(
- struct damon_sysfs_kdamond *kdamond)
-{
- struct damon_ctx *ctx = kdamond->damon_ctx;
-
- if (!ctx)
- return -EINVAL;
- return damon_sysfs_schemes_update_regions_stop(ctx);
-}
-
-static int damon_sysfs_clear_schemes_regions(
- struct damon_sysfs_kdamond *kdamond)
-{
- struct damon_ctx *ctx = kdamond->damon_ctx;
-
- if (!ctx)
- return -EINVAL;
- return damon_sysfs_schemes_clear_regions(
- kdamond->contexts->contexts_arr[0]->schemes, ctx);
-}
-
static inline bool damon_sysfs_kdamond_running(
struct damon_sysfs_kdamond *kdamond)
{
@@ -1318,9 +1269,9 @@ static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond)
return err;
}
-static int damon_sysfs_commit_schemes_quota_goals(
- struct damon_sysfs_kdamond *sysfs_kdamond)
+static int damon_sysfs_commit_schemes_quota_goals(void *data)
{
+ struct damon_sysfs_kdamond *sysfs_kdamond = data;
struct damon_ctx *ctx;
struct damon_sysfs_context *sysfs_ctx;
@@ -1338,20 +1289,18 @@ static int damon_sysfs_commit_schemes_quota_goals(
/*
* damon_sysfs_upd_schemes_effective_quotas() - Update schemes effective quotas
* sysfs files.
- * @kdamond: The kobject wrapper that associated to the kdamond thread.
+ * @data: The kobject wrapper that associated to the kdamond thread.
*
* This function reads the schemes' effective quotas of specific kdamond and
* update the related values for sysfs files. This function should be called
* from DAMON callbacks while holding ``damon_syfs_lock``, to safely access the
* DAMON contexts-internal data and DAMON sysfs variables.
*/
-static int damon_sysfs_upd_schemes_effective_quotas(
- struct damon_sysfs_kdamond *kdamond)
+static int damon_sysfs_upd_schemes_effective_quotas(void *data)
{
+ struct damon_sysfs_kdamond *kdamond = data;
struct damon_ctx *ctx = kdamond->damon_ctx;
- if (!ctx)
- return -EINVAL;
damos_sysfs_update_effective_quotas(
kdamond->contexts->contexts_arr[0]->schemes, ctx);
return 0;
@@ -1371,67 +1320,27 @@ static int damon_sysfs_cmd_request_callback(struct damon_ctx *c, bool active,
bool after_aggregation)
{
struct damon_sysfs_kdamond *kdamond;
- bool total_bytes_only = false;
int err = 0;
/* avoid deadlock due to concurrent state_store('off') */
- if (!damon_sysfs_schemes_regions_updating &&
- !mutex_trylock(&damon_sysfs_lock))
+ if (!mutex_trylock(&damon_sysfs_lock))
return 0;
kdamond = damon_sysfs_cmd_request.kdamond;
if (!kdamond || kdamond->damon_ctx != c)
goto out;
switch (damon_sysfs_cmd_request.cmd) {
- case DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS:
- err = damon_sysfs_upd_schemes_stats(kdamond);
- break;
case DAMON_SYSFS_CMD_COMMIT:
if (!after_aggregation)
goto out;
err = damon_sysfs_commit_input(kdamond);
break;
- case DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS:
- err = damon_sysfs_commit_schemes_quota_goals(kdamond);
- break;
- case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES:
- total_bytes_only = true;
- fallthrough;
- case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS:
- if (!damon_sysfs_schemes_regions_updating) {
- err = damon_sysfs_upd_schemes_regions_start(kdamond,
- total_bytes_only);
- if (!err) {
- damon_sysfs_schemes_regions_updating = true;
- goto keep_lock_out;
- }
- } else {
- damos_sysfs_mark_finished_regions_updates(c);
- /*
- * Continue regions updating if DAMON is till
- * active and the update for all schemes is not
- * finished.
- */
- if (active && !damos_sysfs_regions_upd_done())
- goto keep_lock_out;
- err = damon_sysfs_upd_schemes_regions_stop(kdamond);
- damon_sysfs_schemes_regions_updating = false;
- }
- break;
- case DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS:
- err = damon_sysfs_clear_schemes_regions(kdamond);
- break;
- case DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS:
- err = damon_sysfs_upd_schemes_effective_quotas(kdamond);
- break;
default:
break;
}
/* Mark the request as invalid now. */
damon_sysfs_cmd_request.kdamond = NULL;
out:
- if (!damon_sysfs_schemes_regions_updating)
- mutex_unlock(&damon_sysfs_lock);
-keep_lock_out:
+ mutex_unlock(&damon_sysfs_lock);
return err;
}
@@ -1525,6 +1434,58 @@ static int damon_sysfs_turn_damon_off(struct damon_sysfs_kdamond *kdamond)
*/
}
+static int damon_sysfs_damon_call(int (*fn)(void *data),
+ struct damon_sysfs_kdamond *kdamond)
+{
+ struct damon_call_control call_control = {};
+
+ if (!kdamond->damon_ctx)
+ return -EINVAL;
+ call_control.fn = fn;
+ call_control.data = kdamond;
+ return damon_call(kdamond->damon_ctx, &call_control);
+}
+
+struct damon_sysfs_schemes_walk_data {
+ struct damon_sysfs_kdamond *sysfs_kdamond;
+ bool total_bytes_only;
+};
+
+/* populate the region directory */
+static void damon_sysfs_schemes_tried_regions_upd_one(void *data, struct damon_ctx *ctx,
+ struct damon_target *t, struct damon_region *r,
+ struct damos *s, unsigned long sz_filter_passed)
+{
+ struct damon_sysfs_schemes_walk_data *walk_data = data;
+ struct damon_sysfs_kdamond *sysfs_kdamond = walk_data->sysfs_kdamond;
+
+ damos_sysfs_populate_region_dir(
+ sysfs_kdamond->contexts->contexts_arr[0]->schemes,
+ ctx, t, r, s, walk_data->total_bytes_only,
+ sz_filter_passed);
+}
+
+static int damon_sysfs_update_schemes_tried_regions(
+ struct damon_sysfs_kdamond *sysfs_kdamond, bool total_bytes_only)
+{
+ struct damon_sysfs_schemes_walk_data walk_data = {
+ .sysfs_kdamond = sysfs_kdamond,
+ .total_bytes_only = total_bytes_only,
+ };
+ struct damos_walk_control control = {
+ .walk_fn = damon_sysfs_schemes_tried_regions_upd_one,
+ .data = &walk_data,
+ };
+ struct damon_ctx *ctx = sysfs_kdamond->damon_ctx;
+
+ if (!ctx)
+ return -EINVAL;
+
+ damon_sysfs_schemes_clear_regions(
+ sysfs_kdamond->contexts->contexts_arr[0]->schemes);
+ return damos_walk(ctx, &control);
+}
+
/*
* damon_sysfs_handle_cmd() - Handle a command for a specific kdamond.
* @cmd: The command to handle.
@@ -1543,12 +1504,29 @@ static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd,
{
bool need_wait = true;
- /* Handle commands that doesn't access DAMON context-internal data */
switch (cmd) {
case DAMON_SYSFS_CMD_ON:
return damon_sysfs_turn_damon_on(kdamond);
case DAMON_SYSFS_CMD_OFF:
return damon_sysfs_turn_damon_off(kdamond);
+ case DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS:
+ return damon_sysfs_damon_call(
+ damon_sysfs_commit_schemes_quota_goals,
+ kdamond);
+ case DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS:
+ return damon_sysfs_damon_call(
+ damon_sysfs_upd_schemes_stats, kdamond);
+ case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES:
+ return damon_sysfs_update_schemes_tried_regions(kdamond, true);
+ case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS:
+ return damon_sysfs_update_schemes_tried_regions(kdamond, false);
+ case DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS:
+ return damon_sysfs_schemes_clear_regions(
+ kdamond->contexts->contexts_arr[0]->schemes);
+ case DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS:
+ return damon_sysfs_damon_call(
+ damon_sysfs_upd_schemes_effective_quotas,
+ kdamond);
default:
break;
}
diff --git a/mm/damon/tests/.kunitconfig b/mm/damon/tests/.kunitconfig
index a73be044fc9b..36a450f57b58 100644
--- a/mm/damon/tests/.kunitconfig
+++ b/mm/damon/tests/.kunitconfig
@@ -13,10 +13,3 @@ CONFIG_DAMON_VADDR_KUNIT_TEST=y
CONFIG_SYSFS=y
CONFIG_DAMON_SYSFS=y
CONFIG_DAMON_SYSFS_KUNIT_TEST=y
-
-# for DAMON debugfs interface
-CONFIG_DEBUG_FS=y
-CONFIG_DAMON_PADDR=y
-CONFIG_DAMON_DBGFS_DEPRECATED=y
-CONFIG_DAMON_DBGFS=y
-CONFIG_DAMON_DBGFS_KUNIT_TEST=y
diff --git a/mm/damon/tests/core-kunit.h b/mm/damon/tests/core-kunit.h
index cf22e09a3507..532c6a6f21f9 100644
--- a/mm/damon/tests/core-kunit.h
+++ b/mm/damon/tests/core-kunit.h
@@ -411,7 +411,7 @@ static void damos_test_new_filter(struct kunit *test)
{
struct damos_filter *filter;
- filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true);
+ filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, false);
KUNIT_EXPECT_EQ(test, filter->type, DAMOS_FILTER_TYPE_ANON);
KUNIT_EXPECT_EQ(test, filter->matching, true);
KUNIT_EXPECT_PTR_EQ(test, filter->list.prev, &filter->list);
@@ -425,7 +425,7 @@ static void damos_test_filter_out(struct kunit *test)
struct damon_region *r, *r2;
struct damos_filter *f;
- f = damos_new_filter(DAMOS_FILTER_TYPE_ADDR, true);
+ f = damos_new_filter(DAMOS_FILTER_TYPE_ADDR, true, false);
f->addr_range = (struct damon_addr_range){
.start = DAMON_MIN_REGION * 2, .end = DAMON_MIN_REGION * 6};
@@ -434,25 +434,25 @@ static void damos_test_filter_out(struct kunit *test)
damon_add_region(r, t);
/* region in the range */
- KUNIT_EXPECT_TRUE(test, __damos_filter_out(NULL, t, r, f));
+ KUNIT_EXPECT_TRUE(test, damos_filter_match(NULL, t, r, f));
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
/* region before the range */
r->ar.start = DAMON_MIN_REGION * 1;
r->ar.end = DAMON_MIN_REGION * 2;
- KUNIT_EXPECT_FALSE(test, __damos_filter_out(NULL, t, r, f));
+ KUNIT_EXPECT_FALSE(test, damos_filter_match(NULL, t, r, f));
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
/* region after the range */
r->ar.start = DAMON_MIN_REGION * 6;
r->ar.end = DAMON_MIN_REGION * 8;
- KUNIT_EXPECT_FALSE(test, __damos_filter_out(NULL, t, r, f));
+ KUNIT_EXPECT_FALSE(test, damos_filter_match(NULL, t, r, f));
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
/* region started before the range */
r->ar.start = DAMON_MIN_REGION * 1;
r->ar.end = DAMON_MIN_REGION * 4;
- KUNIT_EXPECT_FALSE(test, __damos_filter_out(NULL, t, r, f));
+ KUNIT_EXPECT_FALSE(test, damos_filter_match(NULL, t, r, f));
/* filter should have split the region */
KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 1);
KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 2);
@@ -465,7 +465,7 @@ static void damos_test_filter_out(struct kunit *test)
/* region started in the range */
r->ar.start = DAMON_MIN_REGION * 2;
r->ar.end = DAMON_MIN_REGION * 8;
- KUNIT_EXPECT_TRUE(test, __damos_filter_out(NULL, t, r, f));
+ KUNIT_EXPECT_TRUE(test, damos_filter_match(NULL, t, r, f));
/* filter should have split the region */
KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 2);
KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 6);
diff --git a/mm/damon/tests/dbgfs-kunit.h b/mm/damon/tests/dbgfs-kunit.h
deleted file mode 100644
index 087e53f641a8..000000000000
--- a/mm/damon/tests/dbgfs-kunit.h
+++ /dev/null
@@ -1,173 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * DAMON Debugfs Interface Unit Tests
- *
- * Author: SeongJae Park <sj@kernel.org>
- */
-
-#ifdef CONFIG_DAMON_DBGFS_KUNIT_TEST
-
-#ifndef _DAMON_DBGFS_TEST_H
-#define _DAMON_DBGFS_TEST_H
-
-#include <kunit/test.h>
-
-static void damon_dbgfs_test_str_to_ints(struct kunit *test)
-{
- char *question;
- int *answers;
- int expected[] = {12, 35, 46};
- ssize_t nr_integers = 0, i;
-
- question = "123";
- answers = str_to_ints(question, strlen(question), &nr_integers);
- KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers);
- KUNIT_EXPECT_EQ(test, 123, answers[0]);
- kfree(answers);
-
- question = "123abc";
- answers = str_to_ints(question, strlen(question), &nr_integers);
- KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers);
- KUNIT_EXPECT_EQ(test, 123, answers[0]);
- kfree(answers);
-
- question = "a123";
- answers = str_to_ints(question, strlen(question), &nr_integers);
- KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
- kfree(answers);
-
- question = "12 35";
- answers = str_to_ints(question, strlen(question), &nr_integers);
- KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers);
- for (i = 0; i < nr_integers; i++)
- KUNIT_EXPECT_EQ(test, expected[i], answers[i]);
- kfree(answers);
-
- question = "12 35 46";
- answers = str_to_ints(question, strlen(question), &nr_integers);
- KUNIT_EXPECT_EQ(test, (ssize_t)3, nr_integers);
- for (i = 0; i < nr_integers; i++)
- KUNIT_EXPECT_EQ(test, expected[i], answers[i]);
- kfree(answers);
-
- question = "12 35 abc 46";
- answers = str_to_ints(question, strlen(question), &nr_integers);
- KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers);
- for (i = 0; i < 2; i++)
- KUNIT_EXPECT_EQ(test, expected[i], answers[i]);
- kfree(answers);
-
- question = "";
- answers = str_to_ints(question, strlen(question), &nr_integers);
- KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
- kfree(answers);
-
- question = "\n";
- answers = str_to_ints(question, strlen(question), &nr_integers);
- KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
- kfree(answers);
-}
-
-static void damon_dbgfs_test_set_targets(struct kunit *test)
-{
- struct damon_ctx *ctx = dbgfs_new_ctx();
- char buf[64];
-
- if (!damon_is_registered_ops(DAMON_OPS_PADDR)) {
- dbgfs_destroy_ctx(ctx);
- kunit_skip(test, "PADDR not registered");
- }
-
- /* Make DAMON consider target has no pid */
- damon_select_ops(ctx, DAMON_OPS_PADDR);
-
- dbgfs_set_targets(ctx, 0, NULL);
- sprint_target_ids(ctx, buf, 64);
- KUNIT_EXPECT_STREQ(test, (char *)buf, "\n");
-
- dbgfs_set_targets(ctx, 1, NULL);
- sprint_target_ids(ctx, buf, 64);
- KUNIT_EXPECT_STREQ(test, (char *)buf, "42\n");
-
- dbgfs_set_targets(ctx, 0, NULL);
- sprint_target_ids(ctx, buf, 64);
- KUNIT_EXPECT_STREQ(test, (char *)buf, "\n");
-
- dbgfs_destroy_ctx(ctx);
-}
-
-static void damon_dbgfs_test_set_init_regions(struct kunit *test)
-{
- struct damon_ctx *ctx = damon_new_ctx();
- /* Each line represents one region in ``<target idx> <start> <end>`` */
- char * const valid_inputs[] = {"1 10 20\n 1 20 30\n1 35 45",
- "1 10 20\n",
- "1 10 20\n0 39 59\n0 70 134\n 1 20 25\n",
- ""};
- /* Reading the file again will show sorted, clean output */
- char * const valid_expects[] = {"1 10 20\n1 20 30\n1 35 45\n",
- "1 10 20\n",
- "0 39 59\n0 70 134\n1 10 20\n1 20 25\n",
- ""};
- char * const invalid_inputs[] = {"3 10 20\n", /* target not exists */
- "1 10 20\n 1 14 26\n", /* regions overlap */
- "0 10 20\n1 30 40\n 0 5 8"}; /* not sorted by address */
- char *input, *expect;
- int i, rc;
- char buf[256];
-
- if (!damon_is_registered_ops(DAMON_OPS_PADDR)) {
- damon_destroy_ctx(ctx);
- kunit_skip(test, "PADDR not registered");
- }
-
- damon_select_ops(ctx, DAMON_OPS_PADDR);
-
- dbgfs_set_targets(ctx, 3, NULL);
-
- /* Put valid inputs and check the results */
- for (i = 0; i < ARRAY_SIZE(valid_inputs); i++) {
- input = valid_inputs[i];
- expect = valid_expects[i];
-
- rc = set_init_regions(ctx, input, strnlen(input, 256));
- KUNIT_EXPECT_EQ(test, rc, 0);
-
- memset(buf, 0, 256);
- sprint_init_regions(ctx, buf, 256);
-
- KUNIT_EXPECT_STREQ(test, (char *)buf, expect);
- }
- /* Put invalid inputs and check the return error code */
- for (i = 0; i < ARRAY_SIZE(invalid_inputs); i++) {
- input = invalid_inputs[i];
- pr_info("input: %s\n", input);
- rc = set_init_regions(ctx, input, strnlen(input, 256));
- KUNIT_EXPECT_EQ(test, rc, -EINVAL);
-
- memset(buf, 0, 256);
- sprint_init_regions(ctx, buf, 256);
-
- KUNIT_EXPECT_STREQ(test, (char *)buf, "");
- }
-
- dbgfs_set_targets(ctx, 0, NULL);
- damon_destroy_ctx(ctx);
-}
-
-static struct kunit_case damon_test_cases[] = {
- KUNIT_CASE(damon_dbgfs_test_str_to_ints),
- KUNIT_CASE(damon_dbgfs_test_set_targets),
- KUNIT_CASE(damon_dbgfs_test_set_init_regions),
- {},
-};
-
-static struct kunit_suite damon_test_suite = {
- .name = "damon-dbgfs",
- .test_cases = damon_test_cases,
-};
-kunit_test_suite(damon_test_suite);
-
-#endif /* _DAMON_DBGFS_TEST_H */
-
-#endif /* CONFIG_DAMON_KUNIT_TEST */
diff --git a/mm/damon/tests/vaddr-kunit.h b/mm/damon/tests/vaddr-kunit.h
index b9fe3bc8472b..7cd944266a92 100644
--- a/mm/damon/tests/vaddr-kunit.h
+++ b/mm/damon/tests/vaddr-kunit.h
@@ -68,7 +68,7 @@ static void damon_test_three_regions_in_vmas(struct kunit *test)
static struct mm_struct mm;
struct damon_addr_range regions[3] = {0};
/* 10-20-25, 200-210-220, 300-305, 307-330 */
- struct vm_area_struct vmas[] = {
+ static struct vm_area_struct vmas[] = {
(struct vm_area_struct) {.vm_start = 10, .vm_end = 20},
(struct vm_area_struct) {.vm_start = 20, .vm_end = 25},
(struct vm_area_struct) {.vm_start = 200, .vm_end = 210},
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index b9eaa20b73b9..a6174f725bd7 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -655,7 +655,7 @@ static unsigned long damos_madvise(struct damon_target *target,
static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx,
struct damon_target *t, struct damon_region *r,
- struct damos *scheme)
+ struct damos *scheme, unsigned long *sz_filter_passed)
{
int madv_action;
diff --git a/mm/debug.c b/mm/debug.c
index 95b6ab809c0e..8d2acf432385 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -249,6 +249,77 @@ void dump_mm(const struct mm_struct *mm)
}
EXPORT_SYMBOL(dump_mm);
+void dump_vmg(const struct vma_merge_struct *vmg, const char *reason)
+{
+ if (reason)
+ pr_warn("vmg %px dumped because: %s\n", vmg, reason);
+
+ if (!vmg) {
+ pr_warn("vmg %px state: (NULL)\n", vmg);
+ return;
+ }
+
+ pr_warn("vmg %px state: mm %px pgoff %lx\n"
+ "vmi %px [%lx,%lx)\n"
+ "prev %px next %px vma %px\n"
+ "start %lx end %lx flags %lx\n"
+ "file %px anon_vma %px policy %px\n"
+ "uffd_ctx %px\n"
+ "anon_name %px\n"
+ "merge_flags %x state %x\n",
+ vmg, vmg->mm, vmg->pgoff,
+ vmg->vmi, vmg->vmi ? vma_iter_addr(vmg->vmi) : 0,
+ vmg->vmi ? vma_iter_end(vmg->vmi) : 0,
+ vmg->prev, vmg->next, vmg->vma,
+ vmg->start, vmg->end, vmg->flags,
+ vmg->file, vmg->anon_vma, vmg->policy,
+#ifdef CONFIG_USERFAULTFD
+ vmg->uffd_ctx.ctx,
+#else
+ (void *)0,
+#endif
+ vmg->anon_name,
+ (int)vmg->merge_flags, (int)vmg->state);
+
+ if (vmg->mm) {
+ pr_warn("vmg %px mm:\n", vmg);
+ dump_mm(vmg->mm);
+ } else {
+ pr_warn("vmg %px mm: (NULL)\n", vmg);
+ }
+
+ if (vmg->vma) {
+ pr_warn("vmg %px vma:\n", vmg);
+ dump_vma(vmg->vma);
+ } else {
+ pr_warn("vmg %px vma: (NULL)\n", vmg);
+ }
+
+ if (vmg->prev) {
+ pr_warn("vmg %px prev:\n", vmg);
+ dump_vma(vmg->prev);
+ } else {
+ pr_warn("vmg %px prev: (NULL)\n", vmg);
+ }
+
+ if (vmg->next) {
+ pr_warn("vmg %px next:\n", vmg);
+ dump_vma(vmg->next);
+ } else {
+ pr_warn("vmg %px next: (NULL)\n", vmg);
+ }
+
+#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
+ if (vmg->vmi) {
+ pr_warn("vmg %px vmi:\n", vmg);
+ vma_iter_dump_tree(vmg->vmi);
+ } else {
+ pr_warn("vmg %px vmi: (NULL)\n", vmg);
+ }
+#endif
+}
+EXPORT_SYMBOL(dump_vmg);
+
static bool page_init_poisoning __read_mostly = true;
static int __init setup_vm_debug(char *str)
diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c
index ce06b2884789..ff35b84a7b50 100644
--- a/mm/early_ioremap.c
+++ b/mm/early_ioremap.c
@@ -245,7 +245,10 @@ early_memremap_prot(resource_size_t phys_addr, unsigned long size,
#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT)
-void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size)
+/*
+ * If no empty slot, handle that and return -ENOMEM.
+ */
+int __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size)
{
unsigned long slop, clen;
char *p;
@@ -256,12 +259,15 @@ void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size)
if (clen > MAX_MAP_CHUNK - slop)
clen = MAX_MAP_CHUNK - slop;
p = early_memremap(src & PAGE_MASK, clen + slop);
+ if (!p)
+ return -ENOMEM;
memcpy(dest, p + slop, clen);
early_memunmap(p, clen + slop);
dest += clen;
src += clen;
size -= clen;
}
+ return 0;
}
#else /* CONFIG_MMU */
diff --git a/mm/filemap.c b/mm/filemap.c
index 440922a7d8f1..804d7365680c 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -47,6 +47,7 @@
#include <linux/splice.h>
#include <linux/rcupdate_wait.h>
#include <linux/sched/mm.h>
+#include <linux/fsnotify.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include "internal.h"
@@ -441,6 +442,24 @@ int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
EXPORT_SYMBOL(filemap_fdatawrite_range);
/**
+ * filemap_fdatawrite_range_kick - start writeback on a range
+ * @mapping: target address_space
+ * @start: index to start writeback on
+ * @end: last (non-inclusive) index for writeback
+ *
+ * This is a non-integrity writeback helper, to start writing back folios
+ * for the indicated range.
+ *
+ * Return: %0 on success, negative error code otherwise.
+ */
+int filemap_fdatawrite_range_kick(struct address_space *mapping, loff_t start,
+ loff_t end)
+{
+ return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_NONE);
+}
+EXPORT_SYMBOL_GPL(filemap_fdatawrite_range_kick);
+
+/**
* filemap_flush - mostly a non-blocking flush
* @mapping: target address_space
*
@@ -1464,25 +1483,6 @@ static int folio_put_wait_locked(struct folio *folio, int state)
}
/**
- * folio_add_wait_queue - Add an arbitrary waiter to a folio's wait queue
- * @folio: Folio defining the wait queue of interest
- * @waiter: Waiter to add to the queue
- *
- * Add an arbitrary @waiter to the wait queue for the nominated @folio.
- */
-void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter)
-{
- wait_queue_head_t *q = folio_waitqueue(folio);
- unsigned long flags;
-
- spin_lock_irqsave(&q->lock, flags);
- __add_wait_queue_entry_tail(q, waiter);
- folio_set_waiters(folio);
- spin_unlock_irqrestore(&q->lock, flags);
-}
-EXPORT_SYMBOL_GPL(folio_add_wait_queue);
-
-/**
* folio_unlock - Unlock a locked folio.
* @folio: The folio.
*
@@ -1590,6 +1590,27 @@ int folio_wait_private_2_killable(struct folio *folio)
}
EXPORT_SYMBOL(folio_wait_private_2_killable);
+/*
+ * If folio was marked as dropbehind, then pages should be dropped when writeback
+ * completes. Do that now. If we fail, it's likely because of a big folio -
+ * just reset dropbehind for that case and latter completions should invalidate.
+ */
+static void folio_end_dropbehind_write(struct folio *folio)
+{
+ /*
+ * Hitting !in_task() should not happen off RWF_DONTCACHE writeback,
+ * but can happen if normal writeback just happens to find dirty folios
+ * that were created as part of uncached writeback, and that writeback
+ * would otherwise not need non-IRQ handling. Just skip the
+ * invalidation in that case.
+ */
+ if (in_task() && folio_trylock(folio)) {
+ if (folio->mapping)
+ folio_unmap_invalidate(folio->mapping, folio, 0);
+ folio_unlock(folio);
+ }
+}
+
/**
* folio_end_writeback - End writeback against a folio.
* @folio: The folio.
@@ -1600,6 +1621,8 @@ EXPORT_SYMBOL(folio_wait_private_2_killable);
*/
void folio_end_writeback(struct folio *folio)
{
+ bool folio_dropbehind = false;
+
VM_BUG_ON_FOLIO(!folio_test_writeback(folio), folio);
/*
@@ -1621,9 +1644,14 @@ void folio_end_writeback(struct folio *folio)
* reused before the folio_wake_bit().
*/
folio_get(folio);
+ if (!folio_test_dirty(folio))
+ folio_dropbehind = folio_test_clear_dropbehind(folio);
if (__folio_end_writeback(folio))
folio_wake_bit(folio, PG_writeback);
acct_reclaim_writeback(folio);
+
+ if (folio_dropbehind)
+ folio_end_dropbehind_write(folio);
folio_put(folio);
}
EXPORT_SYMBOL(folio_end_writeback);
@@ -1946,6 +1974,8 @@ no_page:
/* Init accessed so avoid atomic mark_page_accessed later */
if (fgp_flags & FGP_ACCESSED)
__folio_set_referenced(folio);
+ if (fgp_flags & FGP_DONTCACHE)
+ __folio_set_dropbehind(folio);
err = filemap_add_folio(mapping, folio, index, gfp);
if (!err)
@@ -1968,6 +1998,9 @@ no_page:
if (!folio)
return ERR_PTR(-ENOENT);
+ /* not an uncached lookup, clear uncached if set */
+ if (folio_test_dropbehind(folio) && !(fgp_flags & FGP_DONTCACHE))
+ folio_clear_dropbehind(folio);
return folio;
}
EXPORT_SYMBOL(__filemap_get_folio);
@@ -2450,18 +2483,22 @@ unlock_mapping:
return error;
}
-static int filemap_create_folio(struct file *file,
- struct address_space *mapping, loff_t pos,
- struct folio_batch *fbatch)
+static int filemap_create_folio(struct kiocb *iocb, struct folio_batch *fbatch)
{
+ struct address_space *mapping = iocb->ki_filp->f_mapping;
struct folio *folio;
int error;
unsigned int min_order = mapping_min_folio_order(mapping);
pgoff_t index;
+ if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))
+ return -EAGAIN;
+
folio = filemap_alloc_folio(mapping_gfp_mask(mapping), min_order);
if (!folio)
return -ENOMEM;
+ if (iocb->ki_flags & IOCB_DONTCACHE)
+ __folio_set_dropbehind(folio);
/*
* Protect against truncate / hole punch. Grabbing invalidate_lock
@@ -2477,7 +2514,7 @@ static int filemap_create_folio(struct file *file,
* well to keep locking rules simple.
*/
filemap_invalidate_lock_shared(mapping);
- index = (pos >> (PAGE_SHIFT + min_order)) << min_order;
+ index = (iocb->ki_pos >> (PAGE_SHIFT + min_order)) << min_order;
error = filemap_add_folio(mapping, folio, index,
mapping_gfp_constraint(mapping, GFP_KERNEL));
if (error == -EEXIST)
@@ -2485,7 +2522,8 @@ static int filemap_create_folio(struct file *file,
if (error)
goto error;
- error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
+ error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio,
+ folio);
if (error)
goto error;
@@ -2506,6 +2544,8 @@ static int filemap_readahead(struct kiocb *iocb, struct file *file,
if (iocb->ki_flags & IOCB_NOIO)
return -EAGAIN;
+ if (iocb->ki_flags & IOCB_DONTCACHE)
+ ractl.dropbehind = 1;
page_cache_async_ra(&ractl, folio, last_index - folio->index);
return 0;
}
@@ -2515,7 +2555,6 @@ static int filemap_get_pages(struct kiocb *iocb, size_t count,
{
struct file *filp = iocb->ki_filp;
struct address_space *mapping = filp->f_mapping;
- struct file_ra_state *ra = &filp->f_ra;
pgoff_t index = iocb->ki_pos >> PAGE_SHIFT;
pgoff_t last_index;
struct folio *folio;
@@ -2530,20 +2569,21 @@ retry:
filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
if (!folio_batch_count(fbatch)) {
+ DEFINE_READAHEAD(ractl, filp, &filp->f_ra, mapping, index);
+
if (iocb->ki_flags & IOCB_NOIO)
return -EAGAIN;
if (iocb->ki_flags & IOCB_NOWAIT)
flags = memalloc_noio_save();
- page_cache_sync_readahead(mapping, ra, filp, index,
- last_index - index);
+ if (iocb->ki_flags & IOCB_DONTCACHE)
+ ractl.dropbehind = 1;
+ page_cache_sync_ra(&ractl, last_index - index);
if (iocb->ki_flags & IOCB_NOWAIT)
memalloc_noio_restore(flags);
filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
}
if (!folio_batch_count(fbatch)) {
- if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))
- return -EAGAIN;
- err = filemap_create_folio(filp, mapping, iocb->ki_pos, fbatch);
+ err = filemap_create_folio(iocb, fbatch);
if (err == AOP_TRUNCATED_PAGE)
goto retry;
return err;
@@ -2584,6 +2624,20 @@ static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio)
return (pos1 >> shift == pos2 >> shift);
}
+static void filemap_end_dropbehind_read(struct address_space *mapping,
+ struct folio *folio)
+{
+ if (!folio_test_dropbehind(folio))
+ return;
+ if (folio_test_writeback(folio) || folio_test_dirty(folio))
+ return;
+ if (folio_trylock(folio)) {
+ if (folio_test_clear_dropbehind(folio))
+ folio_unmap_invalidate(mapping, folio, 0);
+ folio_unlock(folio);
+ }
+}
+
/**
* filemap_read - Read data from the page cache.
* @iocb: The iocb to read.
@@ -2697,8 +2751,12 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
}
}
put_folios:
- for (i = 0; i < folio_batch_count(&fbatch); i++)
- folio_put(fbatch.folios[i]);
+ for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ struct folio *folio = fbatch.folios[i];
+
+ filemap_end_dropbehind_read(mapping, folio);
+ folio_put(folio);
+ }
folio_batch_init(&fbatch);
} while (iov_iter_count(iter) && iocb->ki_pos < isize && !error);
@@ -3141,6 +3199,14 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
unsigned long vm_flags = vmf->vma->vm_flags;
unsigned int mmap_miss;
+ /*
+ * If we have pre-content watches we need to disable readahead to make
+ * sure that we don't populate our mapping with 0 filled pages that we
+ * never emitted an event for.
+ */
+ if (unlikely(FMODE_FSNOTIFY_HSM(file->f_mode)))
+ return fpin;
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* Use the readahead code, even if readahead is disabled */
if ((vm_flags & VM_HUGEPAGE) && HPAGE_PMD_ORDER <= MAX_PAGECACHE_ORDER) {
@@ -3209,6 +3275,10 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
struct file *fpin = NULL;
unsigned int mmap_miss;
+ /* See comment in do_sync_mmap_readahead. */
+ if (unlikely(FMODE_FSNOTIFY_HSM(file->f_mode)))
+ return fpin;
+
/* If we don't want any read-ahead, don't bother */
if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages)
return fpin;
@@ -3268,6 +3338,48 @@ static vm_fault_t filemap_fault_recheck_pte_none(struct vm_fault *vmf)
}
/**
+ * filemap_fsnotify_fault - maybe emit a pre-content event.
+ * @vmf: struct vm_fault containing details of the fault.
+ *
+ * If we have a pre-content watch on this file we will emit an event for this
+ * range. If we return anything the fault caller should return immediately, we
+ * will return VM_FAULT_RETRY if we had to emit an event, which will trigger the
+ * fault again and then the fault handler will run the second time through.
+ *
+ * Return: a bitwise-OR of %VM_FAULT_ codes, 0 if nothing happened.
+ */
+vm_fault_t filemap_fsnotify_fault(struct vm_fault *vmf)
+{
+ struct file *fpin = NULL;
+ int mask = (vmf->flags & FAULT_FLAG_WRITE) ? MAY_WRITE : MAY_ACCESS;
+ loff_t pos = vmf->pgoff >> PAGE_SHIFT;
+ size_t count = PAGE_SIZE;
+ int err;
+
+ /*
+ * We already did this and now we're retrying with everything locked,
+ * don't emit the event and continue.
+ */
+ if (vmf->flags & FAULT_FLAG_TRIED)
+ return 0;
+
+ /* No watches, we're done. */
+ if (likely(!FMODE_FSNOTIFY_HSM(vmf->vma->vm_file->f_mode)))
+ return 0;
+
+ fpin = maybe_unlock_mmap_for_io(vmf, fpin);
+ if (!fpin)
+ return VM_FAULT_SIGBUS;
+
+ err = fsnotify_file_area_perm(fpin, mask, &pos, count);
+ fput(fpin);
+ if (err)
+ return VM_FAULT_SIGBUS;
+ return VM_FAULT_RETRY;
+}
+EXPORT_SYMBOL_GPL(filemap_fsnotify_fault);
+
+/**
* filemap_fault - read in file data for page fault handling
* @vmf: struct vm_fault containing details of the fault
*
@@ -3371,6 +3483,37 @@ retry_find:
*/
if (unlikely(!folio_test_uptodate(folio))) {
/*
+ * If this is a precontent file we have can now emit an event to
+ * try and populate the folio.
+ */
+ if (!(vmf->flags & FAULT_FLAG_TRIED) &&
+ unlikely(FMODE_FSNOTIFY_HSM(file->f_mode))) {
+ loff_t pos = folio_pos(folio);
+ size_t count = folio_size(folio);
+
+ /* We're NOWAIT, we have to retry. */
+ if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) {
+ folio_unlock(folio);
+ goto out_retry;
+ }
+
+ if (mapping_locked)
+ filemap_invalidate_unlock_shared(mapping);
+ mapping_locked = false;
+
+ folio_unlock(folio);
+ fpin = maybe_unlock_mmap_for_io(vmf, fpin);
+ if (!fpin)
+ goto out_retry;
+
+ error = fsnotify_file_area_perm(fpin, MAY_ACCESS, &pos,
+ count);
+ if (error)
+ ret = VM_FAULT_SIGBUS;
+ goto out_retry;
+ }
+
+ /*
* If the invalidate lock is not held, the folio was in cache
* and uptodate and now it is not. Strange but possible since we
* didn't hold the page lock all the time. Let's drop
diff --git a/mm/gup.c b/mm/gup.c
index 87a9c2026e4d..3883b307780e 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -596,6 +596,33 @@ static struct folio *try_grab_folio_fast(struct page *page, int refs,
}
#endif /* CONFIG_HAVE_GUP_FAST */
+/* Common code for can_follow_write_* */
+static inline bool can_follow_write_common(struct page *page,
+ struct vm_area_struct *vma, unsigned int flags)
+{
+ /* Maybe FOLL_FORCE is set to override it? */
+ if (!(flags & FOLL_FORCE))
+ return false;
+
+ /* But FOLL_FORCE has no effect on shared mappings */
+ if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
+ return false;
+
+ /* ... or read-only private ones */
+ if (!(vma->vm_flags & VM_MAYWRITE))
+ return false;
+
+ /* ... or already writable ones that just need to take a write fault */
+ if (vma->vm_flags & VM_WRITE)
+ return false;
+
+ /*
+ * See can_change_pte_writable(): we broke COW and could map the page
+ * writable if we have an exclusive anonymous page ...
+ */
+ return page && PageAnon(page) && PageAnonExclusive(page);
+}
+
static struct page *no_page_table(struct vm_area_struct *vma,
unsigned int flags, unsigned long address)
{
@@ -622,6 +649,18 @@ static struct page *no_page_table(struct vm_area_struct *vma,
}
#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
+/* FOLL_FORCE can write to even unwritable PUDs in COW mappings. */
+static inline bool can_follow_write_pud(pud_t pud, struct page *page,
+ struct vm_area_struct *vma,
+ unsigned int flags)
+{
+ /* If the pud is writable, we can write to the page. */
+ if (pud_write(pud))
+ return true;
+
+ return can_follow_write_common(page, vma, flags);
+}
+
static struct page *follow_huge_pud(struct vm_area_struct *vma,
unsigned long addr, pud_t *pudp,
int flags, struct follow_page_context *ctx)
@@ -634,10 +673,11 @@ static struct page *follow_huge_pud(struct vm_area_struct *vma,
assert_spin_locked(pud_lockptr(mm, pudp));
- if ((flags & FOLL_WRITE) && !pud_write(pud))
+ if (!pud_present(pud))
return NULL;
- if (!pud_present(pud))
+ if ((flags & FOLL_WRITE) &&
+ !can_follow_write_pud(pud, pfn_to_page(pfn), vma, flags))
return NULL;
pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
@@ -686,27 +726,7 @@ static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
if (pmd_write(pmd))
return true;
- /* Maybe FOLL_FORCE is set to override it? */
- if (!(flags & FOLL_FORCE))
- return false;
-
- /* But FOLL_FORCE has no effect on shared mappings */
- if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
- return false;
-
- /* ... or read-only private ones */
- if (!(vma->vm_flags & VM_MAYWRITE))
- return false;
-
- /* ... or already writable ones that just need to take a write fault */
- if (vma->vm_flags & VM_WRITE)
- return false;
-
- /*
- * See can_change_pte_writable(): we broke COW and could map the page
- * writable if we have an exclusive anonymous page ...
- */
- if (!page || !PageAnon(page) || !PageAnonExclusive(page))
+ if (!can_follow_write_common(page, vma, flags))
return false;
/* ... and a write-fault isn't required for other reasons. */
@@ -807,27 +827,7 @@ static inline bool can_follow_write_pte(pte_t pte, struct page *page,
if (pte_write(pte))
return true;
- /* Maybe FOLL_FORCE is set to override it? */
- if (!(flags & FOLL_FORCE))
- return false;
-
- /* But FOLL_FORCE has no effect on shared mappings */
- if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
- return false;
-
- /* ... or read-only private ones */
- if (!(vma->vm_flags & VM_MAYWRITE))
- return false;
-
- /* ... or already writable ones that just need to take a write fault */
- if (vma->vm_flags & VM_WRITE)
- return false;
-
- /*
- * See can_change_pte_writable(): we broke COW and could map the page
- * writable if we have an exclusive anonymous page ...
- */
- if (!page || !PageAnon(page) || !PageAnonExclusive(page))
+ if (!can_follow_write_common(page, vma, flags))
return false;
/* ... and a write-fault isn't required for other reasons. */
@@ -1294,9 +1294,6 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
if (!(vm_flags & VM_WRITE) || (vm_flags & VM_SHADOW_STACK)) {
if (!(gup_flags & FOLL_FORCE))
return -EFAULT;
- /* hugetlb does not support FOLL_FORCE|FOLL_WRITE. */
- if (is_vm_hugetlb_page(vma))
- return -EFAULT;
/*
* We used to let the write,force case do COW in a
* VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
@@ -2323,13 +2320,13 @@ static void pofs_unpin(struct pages_or_folios *pofs)
/*
* Returns the number of collected folios. Return value is always >= 0.
*/
-static unsigned long collect_longterm_unpinnable_folios(
+static void collect_longterm_unpinnable_folios(
struct list_head *movable_folio_list,
struct pages_or_folios *pofs)
{
- unsigned long i, collected = 0;
struct folio *prev_folio = NULL;
bool drain_allow = true;
+ unsigned long i;
for (i = 0; i < pofs->nr_entries; i++) {
struct folio *folio = pofs_get_folio(pofs, i);
@@ -2341,13 +2338,11 @@ static unsigned long collect_longterm_unpinnable_folios(
if (folio_is_longterm_pinnable(folio))
continue;
- collected++;
-
if (folio_is_device_coherent(folio))
continue;
if (folio_test_hugetlb(folio)) {
- isolate_hugetlb(folio, movable_folio_list);
+ folio_isolate_hugetlb(folio, movable_folio_list);
continue;
}
@@ -2364,8 +2359,6 @@ static unsigned long collect_longterm_unpinnable_folios(
NR_ISOLATED_ANON + folio_is_file_lru(folio),
folio_nr_pages(folio));
}
-
- return collected;
}
/*
@@ -2442,11 +2435,9 @@ static long
check_and_migrate_movable_pages_or_folios(struct pages_or_folios *pofs)
{
LIST_HEAD(movable_folio_list);
- unsigned long collected;
- collected = collect_longterm_unpinnable_folios(&movable_folio_list,
- pofs);
- if (!collected)
+ collect_longterm_unpinnable_folios(&movable_folio_list, pofs);
+ if (list_empty(&movable_folio_list))
return 0;
return migrate_longterm_unpinnable_folios(&movable_folio_list, pofs);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index db64116a4f84..3d3ebdc002d5 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -617,6 +617,8 @@ DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
DEFINE_MTHP_STAT_ATTR(zswpout, MTHP_STAT_ZSWPOUT);
DEFINE_MTHP_STAT_ATTR(swpin, MTHP_STAT_SWPIN);
+DEFINE_MTHP_STAT_ATTR(swpin_fallback, MTHP_STAT_SWPIN_FALLBACK);
+DEFINE_MTHP_STAT_ATTR(swpin_fallback_charge, MTHP_STAT_SWPIN_FALLBACK_CHARGE);
DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT);
DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
#ifdef CONFIG_SHMEM
@@ -637,6 +639,8 @@ static struct attribute *anon_stats_attrs[] = {
#ifndef CONFIG_SHMEM
&zswpout_attr.attr,
&swpin_attr.attr,
+ &swpin_fallback_attr.attr,
+ &swpin_fallback_charge_attr.attr,
&swpout_attr.attr,
&swpout_fallback_attr.attr,
#endif
@@ -669,6 +673,8 @@ static struct attribute *any_stats_attrs[] = {
#ifdef CONFIG_SHMEM
&zswpout_attr.attr,
&swpin_attr.attr,
+ &swpin_fallback_attr.attr,
+ &swpin_fallback_charge_attr.attr,
&swpout_attr.attr,
&swpout_fallback_attr.attr,
#endif
@@ -2003,7 +2009,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
spin_unlock(vmf->ptl);
writable = false;
- if (!migrate_misplaced_folio(folio, vma, target_nid)) {
+ if (!migrate_misplaced_folio(folio, target_nid)) {
flags |= TNF_MIGRATED;
nid = target_nid;
task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
@@ -3284,7 +3290,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
/* lock lru list/PageCompound, ref frozen by page_ref_freeze */
lruvec = folio_lruvec_lock(folio);
- ClearPageHasHWPoisoned(head);
+ folio_clear_has_hwpoisoned(folio);
for (i = nr - new_nr; i >= new_nr; i -= new_nr) {
struct folio *tail;
@@ -4175,20 +4181,21 @@ static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
if (input_buf[0] == '/') {
char *tok;
- char *buf = input_buf;
+ char *tok_buf = input_buf;
char file_path[MAX_INPUT_BUF_SZ];
pgoff_t off_start = 0, off_end = 0;
size_t input_len = strlen(input_buf);
- tok = strsep(&buf, ",");
- if (tok && buf) {
+ tok = strsep(&tok_buf, ",");
+ if (tok && tok_buf) {
strscpy(file_path, tok);
} else {
ret = -EINVAL;
goto out;
}
- ret = sscanf(buf, "0x%lx,0x%lx,%d", &off_start, &off_end, &new_order);
+ ret = sscanf(tok_buf, "0x%lx,0x%lx,%d", &off_start,
+ &off_end, &new_order);
if (ret != 2 && ret != 3) {
ret = -EINVAL;
goto out;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index eaaec19caa7c..65068671e460 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -48,6 +48,7 @@
#include <linux/page_owner.h>
#include "internal.h"
#include "hugetlb_vmemmap.h"
+#include <linux/page-isolation.h>
int hugetlb_max_hstate __read_mostly;
unsigned int default_hstate_idx;
@@ -1246,69 +1247,6 @@ void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
hugetlb_dup_vma_private(vma);
}
-/* Returns true if the VMA has associated reserve pages */
-static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
-{
- if (vma->vm_flags & VM_NORESERVE) {
- /*
- * This address is already reserved by other process(chg == 0),
- * so, we should decrement reserved count. Without decrementing,
- * reserve count remains after releasing inode, because this
- * allocated page will go into page cache and is regarded as
- * coming from reserved pool in releasing step. Currently, we
- * don't have any other solution to deal with this situation
- * properly, so add work-around here.
- */
- if (vma->vm_flags & VM_MAYSHARE && chg == 0)
- return true;
- else
- return false;
- }
-
- /* Shared mappings always use reserves */
- if (vma->vm_flags & VM_MAYSHARE) {
- /*
- * We know VM_NORESERVE is not set. Therefore, there SHOULD
- * be a region map for all pages. The only situation where
- * there is no region map is if a hole was punched via
- * fallocate. In this case, there really are no reserves to
- * use. This situation is indicated if chg != 0.
- */
- if (chg)
- return false;
- else
- return true;
- }
-
- /*
- * Only the process that called mmap() has reserves for
- * private mappings.
- */
- if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
- /*
- * Like the shared case above, a hole punch or truncate
- * could have been performed on the private mapping.
- * Examine the value of chg to determine if reserves
- * actually exist or were previously consumed.
- * Very Subtle - The value of chg comes from a previous
- * call to vma_needs_reserves(). The reserve map for
- * private mappings has different (opposite) semantics
- * than that of shared mappings. vma_needs_reserves()
- * has already taken this difference in semantics into
- * account. Therefore, the meaning of chg is the same
- * as in the shared case above. Code could easily be
- * combined, but keeping it separate draws attention to
- * subtle differences.
- */
- if (chg)
- return false;
- else
- return true;
- }
-
- return false;
-}
-
static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
{
int nid = folio_nid(folio);
@@ -1336,6 +1274,9 @@ static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h,
if (folio_test_hwpoison(folio))
continue;
+ if (is_migrate_isolate_page(&folio->page))
+ continue;
+
list_move(&folio->lru, &h->hugepage_activelist);
folio_ref_unfreeze(folio, 1);
folio_clear_hugetlb_freed(folio);
@@ -1394,8 +1335,7 @@ static unsigned long available_huge_pages(struct hstate *h)
static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
struct vm_area_struct *vma,
- unsigned long address, int avoid_reserve,
- long chg)
+ unsigned long address, long gbl_chg)
{
struct folio *folio = NULL;
struct mempolicy *mpol;
@@ -1404,15 +1344,10 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
int nid;
/*
- * A child process with MAP_PRIVATE mappings created by their parent
- * have no page reserves. This check ensures that reservations are
- * not "stolen". The child may still get SIGKILLed
+ * gbl_chg==1 means the allocation requires a new page that was not
+ * reserved before. Making sure there's at least one free page.
*/
- if (!vma_has_reserves(vma, chg) && !available_huge_pages(h))
- goto err;
-
- /* If reserves cannot be used, ensure enough pages are in the pool */
- if (avoid_reserve && !available_huge_pages(h))
+ if (gbl_chg && !available_huge_pages(h))
goto err;
gfp_mask = htlb_alloc_mask(h);
@@ -1430,11 +1365,6 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
nid, nodemask);
- if (folio && !avoid_reserve && vma_has_reserves(vma, chg)) {
- folio_set_hugetlb_restore_reserve(folio);
- h->resv_huge_pages--;
- }
-
mpol_cond_put(mpol);
return folio;
@@ -2463,7 +2393,13 @@ static int gather_surplus_pages(struct hstate *h, long delta)
long needed, allocated;
bool alloc_ok = true;
int node;
- nodemask_t *mbind_nodemask = policy_mbind_nodemask(htlb_alloc_mask(h));
+ nodemask_t *mbind_nodemask, alloc_nodemask;
+
+ mbind_nodemask = policy_mbind_nodemask(htlb_alloc_mask(h));
+ if (mbind_nodemask)
+ nodes_and(alloc_nodemask, *mbind_nodemask, cpuset_current_mems_allowed);
+ else
+ alloc_nodemask = cpuset_current_mems_allowed;
lockdep_assert_held(&hugetlb_lock);
needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
@@ -2479,8 +2415,16 @@ retry:
spin_unlock_irq(&hugetlb_lock);
for (i = 0; i < needed; i++) {
folio = NULL;
- for_each_node_mask(node, cpuset_current_mems_allowed) {
- if (!mbind_nodemask || node_isset(node, *mbind_nodemask)) {
+
+ /* Prioritize current node */
+ if (node_isset(numa_mem_id(), alloc_nodemask))
+ folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
+ numa_mem_id(), NULL);
+
+ if (!folio) {
+ for_each_node_mask(node, alloc_nodemask) {
+ if (node == numa_mem_id())
+ continue;
folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
node, NULL);
if (folio)
@@ -2868,7 +2812,7 @@ retry:
* Fail with -EBUSY if not possible.
*/
spin_unlock_irq(&hugetlb_lock);
- isolated = isolate_hugetlb(old_folio, list);
+ isolated = folio_isolate_hugetlb(old_folio, list);
ret = isolated ? 0 : -EBUSY;
spin_lock_irq(&hugetlb_lock);
goto free_new;
@@ -2953,7 +2897,7 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
if (hstate_is_gigantic(h))
return -ENOMEM;
- if (folio_ref_count(folio) && isolate_hugetlb(folio, list))
+ if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list))
ret = 0;
else if (!folio_ref_count(folio))
ret = alloc_and_dissolve_hugetlb_folio(h, folio, list);
@@ -2961,69 +2905,129 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
return ret;
}
+/*
+ * replace_free_hugepage_folios - Replace free hugepage folios in a given pfn
+ * range with new folios.
+ * @start_pfn: start pfn of the given pfn range
+ * @end_pfn: end pfn of the given pfn range
+ * Returns 0 on success, otherwise negated error.
+ */
+int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn)
+{
+ struct hstate *h;
+ struct folio *folio;
+ int ret = 0;
+
+ LIST_HEAD(isolate_list);
+
+ while (start_pfn < end_pfn) {
+ folio = pfn_folio(start_pfn);
+ if (folio_test_hugetlb(folio)) {
+ h = folio_hstate(folio);
+ } else {
+ start_pfn++;
+ continue;
+ }
+
+ if (!folio_ref_count(folio)) {
+ ret = alloc_and_dissolve_hugetlb_folio(h, folio,
+ &isolate_list);
+ if (ret)
+ break;
+
+ putback_movable_pages(&isolate_list);
+ }
+ start_pfn++;
+ }
+
+ return ret;
+}
+
+typedef enum {
+ /*
+ * For either 0/1: we checked the per-vma resv map, and one resv
+ * count either can be reused (0), or an extra needed (1).
+ */
+ MAP_CHG_REUSE = 0,
+ MAP_CHG_NEEDED = 1,
+ /*
+ * Cannot use per-vma resv count can be used, hence a new resv
+ * count is enforced.
+ *
+ * NOTE: This is mostly identical to MAP_CHG_NEEDED, except
+ * that currently vma_needs_reservation() has an unwanted side
+ * effect to either use end() or commit() to complete the
+ * transaction. Hence it needs to differenciate from NEEDED.
+ */
+ MAP_CHG_ENFORCED = 2,
+} map_chg_state;
+
+/*
+ * NOTE! "cow_from_owner" represents a very hacky usage only used in CoW
+ * faults of hugetlb private mappings on top of a non-page-cache folio (in
+ * which case even if there's a private vma resv map it won't cover such
+ * allocation). New call sites should (probably) never set it to true!!
+ * When it's set, the allocation will bypass all vma level reservations.
+ */
struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
- unsigned long addr, int avoid_reserve)
+ unsigned long addr, bool cow_from_owner)
{
struct hugepage_subpool *spool = subpool_vma(vma);
struct hstate *h = hstate_vma(vma);
struct folio *folio;
- long map_chg, map_commit, nr_pages = pages_per_huge_page(h);
- long gbl_chg;
- int memcg_charge_ret, ret, idx;
+ long retval, gbl_chg;
+ map_chg_state map_chg;
+ int ret, idx;
struct hugetlb_cgroup *h_cg = NULL;
- struct mem_cgroup *memcg;
- bool deferred_reserve;
gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL;
- memcg = get_mem_cgroup_from_current();
- memcg_charge_ret = mem_cgroup_hugetlb_try_charge(memcg, gfp, nr_pages);
- if (memcg_charge_ret == -ENOMEM) {
- mem_cgroup_put(memcg);
- return ERR_PTR(-ENOMEM);
- }
-
idx = hstate_index(h);
- /*
- * Examine the region/reserve map to determine if the process
- * has a reservation for the page to be allocated. A return
- * code of zero indicates a reservation exists (no change).
- */
- map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
- if (map_chg < 0) {
- if (!memcg_charge_ret)
- mem_cgroup_cancel_charge(memcg, nr_pages);
- mem_cgroup_put(memcg);
- return ERR_PTR(-ENOMEM);
+
+ /* Whether we need a separate per-vma reservation? */
+ if (cow_from_owner) {
+ /*
+ * Special case! Since it's a CoW on top of a reserved
+ * page, the private resv map doesn't count. So it cannot
+ * consume the per-vma resv map even if it's reserved.
+ */
+ map_chg = MAP_CHG_ENFORCED;
+ } else {
+ /*
+ * Examine the region/reserve map to determine if the process
+ * has a reservation for the page to be allocated. A return
+ * code of zero indicates a reservation exists (no change).
+ */
+ retval = vma_needs_reservation(h, vma, addr);
+ if (retval < 0)
+ return ERR_PTR(-ENOMEM);
+ map_chg = retval ? MAP_CHG_NEEDED : MAP_CHG_REUSE;
}
/*
+ * Whether we need a separate global reservation?
+ *
* Processes that did not create the mapping will have no
* reserves as indicated by the region/reserve map. Check
* that the allocation will not exceed the subpool limit.
- * Allocations for MAP_NORESERVE mappings also need to be
- * checked against any subpool limit.
+ * Or if it can get one from the pool reservation directly.
*/
- if (map_chg || avoid_reserve) {
+ if (map_chg) {
gbl_chg = hugepage_subpool_get_pages(spool, 1);
if (gbl_chg < 0)
goto out_end_reservation;
-
+ } else {
/*
- * Even though there was no reservation in the region/reserve
- * map, there could be reservations associated with the
- * subpool that can be used. This would be indicated if the
- * return value of hugepage_subpool_get_pages() is zero.
- * However, if avoid_reserve is specified we still avoid even
- * the subpool reservations.
+ * If we have the vma reservation ready, no need for extra
+ * global reservation.
*/
- if (avoid_reserve)
- gbl_chg = 1;
+ gbl_chg = 0;
}
- /* If this allocation is not consuming a reservation, charge it now.
+ /*
+ * If this allocation is not consuming a per-vma reservation,
+ * charge the hugetlb cgroup now.
*/
- deferred_reserve = map_chg || avoid_reserve;
- if (deferred_reserve) {
+ if (map_chg) {
ret = hugetlb_cgroup_charge_cgroup_rsvd(
idx, pages_per_huge_page(h), &h_cg);
if (ret)
@@ -3040,27 +3044,32 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
* from the global free pool (global change). gbl_chg == 0 indicates
* a reservation exists for the allocation.
*/
- folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg);
+ folio = dequeue_hugetlb_folio_vma(h, vma, addr, gbl_chg);
if (!folio) {
spin_unlock_irq(&hugetlb_lock);
folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
if (!folio)
goto out_uncharge_cgroup;
spin_lock_irq(&hugetlb_lock);
- if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
- folio_set_hugetlb_restore_reserve(folio);
- h->resv_huge_pages--;
- }
list_add(&folio->lru, &h->hugepage_activelist);
folio_ref_unfreeze(folio, 1);
/* Fall through */
}
+ /*
+ * Either dequeued or buddy-allocated folio needs to add special
+ * mark to the folio when it consumes a global reservation.
+ */
+ if (!gbl_chg) {
+ folio_set_hugetlb_restore_reserve(folio);
+ h->resv_huge_pages--;
+ }
+
hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio);
/* If allocation is not consuming a reservation, also store the
* hugetlb_cgroup pointer on the page.
*/
- if (deferred_reserve) {
+ if (map_chg) {
hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
h_cg, folio);
}
@@ -3069,50 +3078,61 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
hugetlb_set_folio_subpool(folio, spool);
- map_commit = vma_commit_reservation(h, vma, addr);
- if (unlikely(map_chg > map_commit)) {
+ if (map_chg != MAP_CHG_ENFORCED) {
+ /* commit() is only needed if the map_chg is not enforced */
+ retval = vma_commit_reservation(h, vma, addr);
/*
+ * Check for possible race conditions. When it happens..
* The page was added to the reservation map between
* vma_needs_reservation and vma_commit_reservation.
* This indicates a race with hugetlb_reserve_pages.
* Adjust for the subpool count incremented above AND
- * in hugetlb_reserve_pages for the same page. Also,
+ * in hugetlb_reserve_pages for the same page. Also,
* the reservation count added in hugetlb_reserve_pages
* no longer applies.
*/
- long rsv_adjust;
+ if (unlikely(map_chg == MAP_CHG_NEEDED && retval == 0)) {
+ long rsv_adjust;
- rsv_adjust = hugepage_subpool_put_pages(spool, 1);
- hugetlb_acct_memory(h, -rsv_adjust);
- if (deferred_reserve) {
- spin_lock_irq(&hugetlb_lock);
- hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
- pages_per_huge_page(h), folio);
- spin_unlock_irq(&hugetlb_lock);
+ rsv_adjust = hugepage_subpool_put_pages(spool, 1);
+ hugetlb_acct_memory(h, -rsv_adjust);
+ if (map_chg) {
+ spin_lock_irq(&hugetlb_lock);
+ hugetlb_cgroup_uncharge_folio_rsvd(
+ hstate_index(h), pages_per_huge_page(h),
+ folio);
+ spin_unlock_irq(&hugetlb_lock);
+ }
}
}
- if (!memcg_charge_ret)
- mem_cgroup_commit_charge(folio, memcg);
+ ret = mem_cgroup_charge_hugetlb(folio, gfp);
+ /*
+ * Unconditionally increment NR_HUGETLB here. If it turns out that
+ * mem_cgroup_charge_hugetlb failed, then immediately free the page and
+ * decrement NR_HUGETLB.
+ */
lruvec_stat_mod_folio(folio, NR_HUGETLB, pages_per_huge_page(h));
- mem_cgroup_put(memcg);
+
+ if (ret == -ENOMEM) {
+ free_huge_folio(folio);
+ return ERR_PTR(-ENOMEM);
+ }
return folio;
out_uncharge_cgroup:
hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
out_uncharge_cgroup_reservation:
- if (deferred_reserve)
+ if (map_chg)
hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
h_cg);
out_subpool_put:
- if (map_chg || avoid_reserve)
+ if (map_chg)
hugepage_subpool_put_pages(spool, 1);
out_end_reservation:
- vma_end_reservation(h, vma, addr);
- if (!memcg_charge_ret)
- mem_cgroup_cancel_charge(memcg, nr_pages);
- mem_cgroup_put(memcg);
+ if (map_chg != MAP_CHG_ENFORCED)
+ vma_end_reservation(h, vma, addr);
return ERR_PTR(-ENOSPC);
}
@@ -3289,7 +3309,7 @@ static void __init gather_bootmem_prealloc(void)
.thread_fn = gather_bootmem_prealloc_parallel,
.fn_arg = NULL,
.start = 0,
- .size = num_node_state(N_MEMORY),
+ .size = nr_node_ids,
.align = 1,
.min_chunk = 1,
.max_threads = num_node_state(N_MEMORY),
@@ -3806,13 +3826,15 @@ static long demote_free_hugetlb_folios(struct hstate *src, struct hstate *dst,
for (i = 0; i < pages_per_huge_page(src); i += pages_per_huge_page(dst)) {
struct page *page = folio_page(folio, i);
+ /* Careful: see __split_huge_page_tail() */
+ struct folio *new_folio = (struct folio *)page;
- page->mapping = NULL;
clear_compound_head(page);
prep_compound_page(page, dst->order);
- init_new_hugetlb_folio(dst, page_folio(page));
- list_add(&page->lru, &dst_list);
+ new_folio->mapping = NULL;
+ init_new_hugetlb_folio(dst, new_folio);
+ list_add(&new_folio->lru, &dst_list);
}
}
@@ -4845,7 +4867,7 @@ out:
return ret;
}
-static struct ctl_table hugetlb_table[] = {
+static const struct ctl_table hugetlb_table[] = {
{
.procname = "nr_hugepages",
.data = NULL,
@@ -5141,12 +5163,12 @@ const struct vm_operations_struct hugetlb_vm_ops = {
};
static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
- int writable)
+ bool try_mkwrite)
{
pte_t entry;
unsigned int shift = huge_page_shift(hstate_vma(vma));
- if (writable) {
+ if (try_mkwrite && (vma->vm_flags & VM_WRITE)) {
entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
vma->vm_page_prot)));
} else {
@@ -5169,6 +5191,13 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
update_mmu_cache(vma, address, ptep);
}
+static void set_huge_ptep_maybe_writable(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ if (vma->vm_flags & VM_WRITE)
+ set_huge_ptep_writable(vma, address, ptep);
+}
+
bool is_hugetlb_entry_migration(pte_t pte)
{
swp_entry_t swp;
@@ -5199,7 +5228,7 @@ static void
hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
struct folio *new_folio, pte_t old, unsigned long sz)
{
- pte_t newpte = make_huge_pte(vma, &new_folio->page, 1);
+ pte_t newpte = make_huge_pte(vma, &new_folio->page, true);
__folio_mark_uptodate(new_folio);
hugetlb_add_new_anon_rmap(new_folio, vma, addr);
@@ -5333,7 +5362,7 @@ again:
spin_unlock(src_ptl);
spin_unlock(dst_ptl);
/* Do not use reserve as it's private owned */
- new_folio = alloc_hugetlb_folio(dst_vma, addr, 1);
+ new_folio = alloc_hugetlb_folio(dst_vma, addr, false);
if (IS_ERR(new_folio)) {
folio_put(pte_folio);
ret = PTR_ERR(new_folio);
@@ -5799,7 +5828,7 @@ static vm_fault_t hugetlb_wp(struct folio *pagecache_folio,
struct hstate *h = hstate_vma(vma);
struct folio *old_folio;
struct folio *new_folio;
- int outside_reserve = 0;
+ bool cow_from_owner = 0;
vm_fault_t ret = 0;
struct mmu_notifier_range range;
@@ -5814,13 +5843,6 @@ static vm_fault_t hugetlb_wp(struct folio *pagecache_folio,
if (!unshare && huge_pte_uffd_wp(pte))
return 0;
- /*
- * hugetlb does not support FOLL_FORCE-style write faults that keep the
- * PTE mapped R/O such as maybe_mkwrite() would do.
- */
- if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE)))
- return VM_FAULT_SIGSEGV;
-
/* Let's take out MAP_SHARED mappings first. */
if (vma->vm_flags & VM_MAYSHARE) {
set_huge_ptep_writable(vma, vmf->address, vmf->pte);
@@ -5849,7 +5871,8 @@ retry_avoidcopy:
SetPageAnonExclusive(&old_folio->page);
}
if (likely(!unshare))
- set_huge_ptep_writable(vma, vmf->address, vmf->pte);
+ set_huge_ptep_maybe_writable(vma, vmf->address,
+ vmf->pte);
delayacct_wpcopy_end();
return 0;
@@ -5868,7 +5891,7 @@ retry_avoidcopy:
*/
if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
old_folio != pagecache_folio)
- outside_reserve = 1;
+ cow_from_owner = true;
folio_get(old_folio);
@@ -5877,7 +5900,7 @@ retry_avoidcopy:
* be acquired again before returning to the caller, as expected.
*/
spin_unlock(vmf->ptl);
- new_folio = alloc_hugetlb_folio(vma, vmf->address, outside_reserve);
+ new_folio = alloc_hugetlb_folio(vma, vmf->address, cow_from_owner);
if (IS_ERR(new_folio)) {
/*
@@ -5887,7 +5910,7 @@ retry_avoidcopy:
* reliability, unmap the page from child processes. The child
* may get SIGKILLed if it later faults.
*/
- if (outside_reserve) {
+ if (cow_from_owner) {
struct address_space *mapping = vma->vm_file->f_mapping;
pgoff_t idx;
u32 hash;
@@ -6138,7 +6161,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
goto out;
}
- folio = alloc_hugetlb_folio(vma, vmf->address, 0);
+ folio = alloc_hugetlb_folio(vma, vmf->address, false);
if (IS_ERR(folio)) {
/*
* Returning error will result in faulting task being
@@ -6235,8 +6258,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
hugetlb_add_new_anon_rmap(folio, vma, vmf->address);
else
hugetlb_add_file_rmap(folio);
- new_pte = make_huge_pte(vma, &folio->page, ((vma->vm_flags & VM_WRITE)
- && (vma->vm_flags & VM_SHARED)));
+ new_pte = make_huge_pte(vma, &folio->page, vma->vm_flags & VM_SHARED);
/*
* If this pte was previously wr-protected, keep it wr-protected even
* if populated.
@@ -6568,7 +6590,6 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
spinlock_t *ptl;
int ret = -ENOMEM;
struct folio *folio;
- int writable;
bool folio_in_pagecache = false;
if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
@@ -6606,7 +6627,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
goto out;
}
- folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
+ folio = alloc_hugetlb_folio(dst_vma, dst_addr, false);
if (IS_ERR(folio)) {
ret = -ENOMEM;
goto out;
@@ -6648,7 +6669,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
goto out;
}
- folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
+ folio = alloc_hugetlb_folio(dst_vma, dst_addr, false);
if (IS_ERR(folio)) {
folio_put(*foliop);
ret = -ENOMEM;
@@ -6722,12 +6743,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
* For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
* with wp flag set, don't set pte write bit.
*/
- if (wp_enabled || (is_continue && !vm_shared))
- writable = 0;
- else
- writable = dst_vma->vm_flags & VM_WRITE;
-
- _dst_pte = make_huge_pte(dst_vma, &folio->page, writable);
+ _dst_pte = make_huge_pte(dst_vma, &folio->page,
+ !wp_enabled && !(is_continue && !vm_shared));
/*
* Always mark UFFDIO_COPY page dirty; note that this may not be
* extremely important for hugetlbfs for now since swapping is not
@@ -7406,7 +7423,24 @@ __weak unsigned long hugetlb_mask_last_page(struct hstate *h)
#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
-bool isolate_hugetlb(struct folio *folio, struct list_head *list)
+/**
+ * folio_isolate_hugetlb - try to isolate an allocated hugetlb folio
+ * @folio: the folio to isolate
+ * @list: the list to add the folio to on success
+ *
+ * Isolate an allocated (refcount > 0) hugetlb folio, marking it as
+ * isolated/non-migratable, and moving it from the active list to the
+ * given list.
+ *
+ * Isolation will fail if @folio is not an allocated hugetlb folio, or if
+ * it is already isolated/non-migratable.
+ *
+ * On success, an additional folio reference is taken that must be dropped
+ * using folio_putback_hugetlb() to undo the isolation.
+ *
+ * Return: True if isolation worked, otherwise False.
+ */
+bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list)
{
bool ret = true;
@@ -7454,7 +7488,18 @@ int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
return ret;
}
-void folio_putback_active_hugetlb(struct folio *folio)
+/**
+ * folio_putback_hugetlb - unisolate a hugetlb folio
+ * @folio: the isolated hugetlb folio
+ *
+ * Putback/un-isolate the hugetlb folio that was previous isolated using
+ * folio_isolate_hugetlb(): marking it non-isolated/migratable and putting it
+ * back onto the active list.
+ *
+ * Will drop the additional folio reference obtained through
+ * folio_isolate_hugetlb().
+ */
+void folio_putback_hugetlb(struct folio *folio)
{
spin_lock_irq(&hugetlb_lock);
folio_set_hugetlb_migratable(folio);
@@ -7501,6 +7546,16 @@ void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int re
}
spin_unlock_irq(&hugetlb_lock);
}
+
+ /*
+ * Our old folio is isolated and has "migratable" cleared until it
+ * is putback. As migration succeeded, set the new folio "migratable"
+ * and add it to the active list.
+ */
+ spin_lock_irq(&hugetlb_lock);
+ folio_set_hugetlb_migratable(new_folio);
+ list_move_tail(&new_folio->lru, &(folio_hstate(new_folio))->hugepage_activelist);
+ spin_unlock_irq(&hugetlb_lock);
}
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index e716c4671a15..bb9578bd99f9 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -195,24 +195,23 @@ static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
* cannot fail.
*/
static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
- struct page *page)
+ struct folio *folio)
{
unsigned int nr_pages;
struct page_counter *counter;
- struct hugetlb_cgroup *page_hcg;
+ struct hugetlb_cgroup *hcg;
struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
- struct folio *folio = page_folio(page);
- page_hcg = hugetlb_cgroup_from_folio(folio);
+ hcg = hugetlb_cgroup_from_folio(folio);
/*
* We can have pages in active list without any cgroup
* ie, hugepage with less than 3 pages. We can safely
* ignore those pages.
*/
- if (!page_hcg || page_hcg != h_cg)
+ if (!hcg || hcg != h_cg)
goto out;
- nr_pages = compound_nr(page);
+ nr_pages = folio_nr_pages(folio);
if (!parent) {
parent = root_h_cgroup;
/* root has no limit */
@@ -235,13 +234,13 @@ static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
{
struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
struct hstate *h;
- struct page *page;
+ struct folio *folio;
do {
for_each_hstate(h) {
spin_lock_irq(&hugetlb_lock);
- list_for_each_entry(page, &h->hugepage_activelist, lru)
- hugetlb_cgroup_move_parent(hstate_index(h), h_cg, page);
+ list_for_each_entry(folio, &h->hugepage_activelist, lru)
+ hugetlb_cgroup_move_parent(hstate_index(h), h_cg, folio);
spin_unlock_irq(&hugetlb_lock);
}
@@ -917,7 +916,6 @@ void hugetlb_cgroup_migrate(struct folio *old_folio, struct folio *new_folio)
set_hugetlb_cgroup_rsvd(new_folio, h_cg_rsvd);
list_move(&new_folio->lru, &h->hugepage_activelist);
spin_unlock_irq(&hugetlb_lock);
- return;
}
static struct cftype hugetlb_files[] = {
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 57b7f591eee8..7735972add01 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -693,7 +693,7 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
free_vmemmap_page_list(&vmemmap_pages);
}
-static struct ctl_table hugetlb_vmemmap_sysctls[] = {
+static const struct ctl_table hugetlb_vmemmap_sysctls[] = {
{
.procname = "hugetlb_optimize_vmemmap",
.data = &vmemmap_optimize_enabled,
diff --git a/mm/internal.h b/mm/internal.h
index 9826f7dce607..109ef30fee11 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -392,6 +392,8 @@ void unmap_page_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
struct zap_details *details);
+int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
+ gfp_t gfp);
void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
unsigned int order);
@@ -735,15 +737,30 @@ static inline void prep_compound_tail(struct page *head, int tail_idx)
extern void prep_compound_page(struct page *page, unsigned int order);
-extern void post_alloc_hook(struct page *page, unsigned int order,
- gfp_t gfp_flags);
+void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags);
extern bool free_pages_prepare(struct page *page, unsigned int order);
extern int user_min_free_kbytes;
-void free_unref_page(struct page *page, unsigned int order);
+struct page *__alloc_frozen_pages_noprof(gfp_t, unsigned int order, int nid,
+ nodemask_t *);
+#define __alloc_frozen_pages(...) \
+ alloc_hooks(__alloc_frozen_pages_noprof(__VA_ARGS__))
+void free_frozen_pages(struct page *page, unsigned int order);
void free_unref_folios(struct folio_batch *fbatch);
+#ifdef CONFIG_NUMA
+struct page *alloc_frozen_pages_noprof(gfp_t, unsigned int order);
+#else
+static inline struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order)
+{
+ return __alloc_frozen_pages_noprof(gfp, order, numa_node_id(), NULL);
+}
+#endif
+
+#define alloc_frozen_pages(...) \
+ alloc_hooks(alloc_frozen_pages_noprof(__VA_ARGS__))
+
extern void zone_pcp_reset(struct zone *zone);
extern void zone_pcp_disable(struct zone *zone);
extern void zone_pcp_enable(struct zone *zone);
@@ -824,10 +841,6 @@ int
isolate_migratepages_range(struct compact_control *cc,
unsigned long low_pfn, unsigned long end_pfn);
-int __alloc_contig_migrate_range(struct compact_control *cc,
- unsigned long start, unsigned long end,
- int migratetype);
-
/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
void init_cma_reserved_pageblock(struct page *page);
@@ -1440,22 +1453,6 @@ void __meminit __init_single_page(struct page *page, unsigned long pfn,
unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
int priority);
-#ifdef CONFIG_64BIT
-static inline int can_do_mseal(unsigned long flags)
-{
- if (flags)
- return -EINVAL;
-
- return 0;
-}
-
-#else
-static inline int can_do_mseal(unsigned long flags)
-{
- return -EPERM;
-}
-#endif
-
#ifdef CONFIG_SHRINKER_DEBUG
static inline __printf(2, 0) int shrinker_debugfs_name_alloc(
struct shrinker *shrinker, const char *fmt, va_list ap)
@@ -1530,4 +1527,23 @@ int walk_page_range_mm(struct mm_struct *mm, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
void *private);
+/* pt_reclaim.c */
+bool try_get_and_clear_pmd(struct mm_struct *mm, pmd_t *pmd, pmd_t *pmdval);
+void free_pte(struct mm_struct *mm, unsigned long addr, struct mmu_gather *tlb,
+ pmd_t pmdval);
+void try_to_free_pte(struct mm_struct *mm, pmd_t *pmd, unsigned long addr,
+ struct mmu_gather *tlb);
+
+#ifdef CONFIG_PT_RECLAIM
+bool reclaim_pt_is_enabled(unsigned long start, unsigned long end,
+ struct zap_details *details);
+#else
+static inline bool reclaim_pt_is_enabled(unsigned long start, unsigned long end,
+ struct zap_details *details)
+{
+ return false;
+}
+#endif /* CONFIG_PT_RECLAIM */
+
+
#endif /* __MM_INTERNAL_H */
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 8b9e348113b1..d54e89f8c3e7 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -524,7 +524,11 @@ size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
sizeof(struct kasan_free_meta) : 0);
}
-static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
+/*
+ * This function avoids dynamic memory allocations and thus can be called from
+ * contexts that do not allow allocating memory.
+ */
+void kasan_record_aux_stack(void *addr)
{
struct slab *slab = kasan_addr_to_slab(addr);
struct kmem_cache *cache;
@@ -541,17 +545,7 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
return;
alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
- alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags);
-}
-
-void kasan_record_aux_stack(void *addr)
-{
- return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC);
-}
-
-void kasan_record_aux_stack_noalloc(void *addr)
-{
- return __kasan_record_aux_stack(addr, 0);
+ alloc_meta->aux_stack[0] = kasan_save_stack(0, 0);
}
void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index ccd66c7a4081..9a6927394b54 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -16,6 +16,7 @@
#include <linux/mm.h>
#include <linux/static_key.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
@@ -263,8 +264,8 @@ void __init kasan_init_hw_tags(void)
pr_info("KernelAddressSanitizer initialized (hw-tags, mode=%s, vmalloc=%s, stacktrace=%s)\n",
kasan_mode_info(),
- kasan_vmalloc_enabled() ? "on" : "off",
- kasan_stack_collection_enabled() ? "on" : "off");
+ str_on_off(kasan_vmalloc_enabled()),
+ str_on_off(kasan_stack_collection_enabled()));
}
#ifdef CONFIG_KASAN_VMALLOC
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index b7e4b81421b3..129178be5e64 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -501,18 +501,18 @@ static inline bool kasan_byte_accessible(const void *addr)
/**
* kasan_poison - mark the memory range as inaccessible
- * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
- * @size - range size, must be aligned to KASAN_GRANULE_SIZE
- * @value - value that's written to metadata for the range
- * @init - whether to initialize the memory range (only for hardware tag-based)
+ * @addr: range start address, must be aligned to KASAN_GRANULE_SIZE
+ * @size: range size, must be aligned to KASAN_GRANULE_SIZE
+ * @value: value that's written to metadata for the range
+ * @init: whether to initialize the memory range (only for hardware tag-based)
*/
void kasan_poison(const void *addr, size_t size, u8 value, bool init);
/**
* kasan_unpoison - mark the memory range as accessible
- * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
- * @size - range size, can be unaligned
- * @init - whether to initialize the memory range (only for hardware tag-based)
+ * @addr: range start address, must be aligned to KASAN_GRANULE_SIZE
+ * @size: range size, can be unaligned
+ * @init: whether to initialize the memory range (only for hardware tag-based)
*
* For the tag-based modes, the @size gets aligned to KASAN_GRANULE_SIZE before
* marking the range.
@@ -530,8 +530,8 @@ bool kasan_byte_accessible(const void *addr);
/**
* kasan_poison_last_granule - mark the last granule of the memory range as
* inaccessible
- * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
- * @size - range size
+ * @address: range start address, must be aligned to KASAN_GRANULE_SIZE
+ * @size: range size
*
* This function is only available for the generic mode, as it's the only mode
* that has partially poisoned memory granules.
diff --git a/mm/kasan/kasan_test_c.c b/mm/kasan/kasan_test_c.c
index 99d4ff0ed57a..59d673400085 100644
--- a/mm/kasan/kasan_test_c.c
+++ b/mm/kasan/kasan_test_c.c
@@ -47,8 +47,8 @@ static struct {
* Some tests use these global variables to store return values from function
* calls that could otherwise be eliminated by the compiler as dead code.
*/
-void *kasan_ptr_result;
-int kasan_int_result;
+static volatile void *kasan_ptr_result;
+static volatile int kasan_int_result;
/* Probe for console output: obtains test_status lines of interest. */
static void probe_console(void *ignore, const char *buf, size_t len)
diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c
index 220b5d4c6876..b9382b5b6a37 100644
--- a/mm/kasan/sw_tags.c
+++ b/mm/kasan/sw_tags.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/bug.h>
@@ -45,7 +46,7 @@ void __init kasan_init_sw_tags(void)
kasan_init_tags();
pr_info("KernelAddressSanitizer initialized (sw-tags, stacktrace=%s)\n",
- kasan_stack_collection_enabled() ? "on" : "off");
+ str_on_off(kasan_stack_collection_enabled()));
}
/*
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 67fc321db79b..102048821c22 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -21,6 +21,7 @@
#include <linux/log2.h>
#include <linux/memblock.h>
#include <linux/moduleparam.h>
+#include <linux/nodemask.h>
#include <linux/notifier.h>
#include <linux/panic_notifier.h>
#include <linux/random.h>
@@ -1084,6 +1085,7 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
* properties (e.g. reside in DMAable memory).
*/
if ((flags & GFP_ZONEMASK) ||
+ ((flags & __GFP_THISNODE) && num_online_nodes() > 1) ||
(s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) {
atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
return NULL;
diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c
index f65fb182466d..00034e37bc9f 100644
--- a/mm/kfence/kfence_test.c
+++ b/mm/kfence/kfence_test.c
@@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/tracepoint.h>
#include <trace/events/printk.h>
@@ -88,7 +89,7 @@ struct expect_report {
static const char *get_access_type(const struct expect_report *r)
{
- return r->is_write ? "write" : "read";
+ return str_write_read(r->is_write);
}
/* Check observed report matches information in @r. */
diff --git a/mm/kfence/report.c b/mm/kfence/report.c
index 6370c5207d1a..10e6802a2edf 100644
--- a/mm/kfence/report.c
+++ b/mm/kfence/report.c
@@ -16,6 +16,7 @@
#include <linux/sprintf.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/sched/clock.h>
#include <trace/events/error_report.h>
@@ -184,7 +185,7 @@ static void print_diff_canary(unsigned long address, size_t bytes_to_show,
static const char *get_access_type(bool is_write)
{
- return is_write ? "write" : "read";
+ return str_write_read(is_write);
}
void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs,
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index bad1e130eda8..5f0be134141e 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -948,17 +948,10 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
return SCAN_SUCCEED;
}
-static int find_pmd_or_thp_or_none(struct mm_struct *mm,
- unsigned long address,
- pmd_t **pmd)
+static inline int check_pmd_state(pmd_t *pmd)
{
- pmd_t pmde;
+ pmd_t pmde = pmdp_get_lockless(pmd);
- *pmd = mm_find_pmd(mm, address);
- if (!*pmd)
- return SCAN_PMD_NULL;
-
- pmde = pmdp_get_lockless(*pmd);
if (pmd_none(pmde))
return SCAN_PMD_NONE;
if (!pmd_present(pmde))
@@ -972,6 +965,17 @@ static int find_pmd_or_thp_or_none(struct mm_struct *mm,
return SCAN_SUCCEED;
}
+static int find_pmd_or_thp_or_none(struct mm_struct *mm,
+ unsigned long address,
+ pmd_t **pmd)
+{
+ *pmd = mm_find_pmd(mm, address);
+ if (!*pmd)
+ return SCAN_PMD_NULL;
+
+ return check_pmd_state(*pmd);
+}
+
static int check_pmd_still_valid(struct mm_struct *mm,
unsigned long address,
pmd_t *pmd)
@@ -1721,7 +1725,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
pmd_t *pmd, pgt_pmd;
spinlock_t *pml;
spinlock_t *ptl;
- bool skipped_uffd = false;
+ bool success = false;
/*
* Check vma->anon_vma to exclude MAP_PRIVATE mappings that
@@ -1758,6 +1762,19 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
mmu_notifier_invalidate_range_start(&range);
pml = pmd_lock(mm, pmd);
+ /*
+ * The lock of new_folio is still held, we will be blocked in
+ * the page fault path, which prevents the pte entries from
+ * being set again. So even though the old empty PTE page may be
+ * concurrently freed and a new PTE page is filled into the pmd
+ * entry, it is still empty and can be removed.
+ *
+ * So here we only need to recheck if the state of pmd entry
+ * still meets our requirements, rather than checking pmd_same()
+ * like elsewhere.
+ */
+ if (check_pmd_state(pmd) != SCAN_SUCCEED)
+ goto drop_pml;
ptl = pte_lockptr(mm, pmd);
if (ptl != pml)
spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
@@ -1771,20 +1788,20 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
* repeating the anon_vma check protects from one category,
* and repeating the userfaultfd_wp() check from another.
*/
- if (unlikely(vma->anon_vma || userfaultfd_wp(vma))) {
- skipped_uffd = true;
- } else {
+ if (likely(!vma->anon_vma && !userfaultfd_wp(vma))) {
pgt_pmd = pmdp_collapse_flush(vma, addr, pmd);
pmdp_get_lockless_sync();
+ success = true;
}
if (ptl != pml)
spin_unlock(ptl);
+drop_pml:
spin_unlock(pml);
mmu_notifier_invalidate_range_end(&range);
- if (!skipped_uffd) {
+ if (success) {
mm_dec_nr_ptes(mm);
page_table_check_pte_clear_range(mm, addr, pgt_pmd);
pte_free_defer(mm, pmd_pgtable(pgt_pmd));
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 820ba3b5cbfc..c6ed68604136 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1689,7 +1689,7 @@ static void kmemleak_scan(void)
unsigned long phys = object->pointer;
if (PHYS_PFN(phys) < min_low_pfn ||
- PHYS_PFN(phys + object->size) >= max_low_pfn)
+ PHYS_PFN(phys + object->size) > max_low_pfn)
__paint_it(object, KMEMLEAK_BLACK);
}
@@ -1855,7 +1855,7 @@ static int kmemleak_scan_thread(void *arg)
* Wait before the first scan to allow the system to fully initialize.
*/
if (first_run) {
- signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
+ signed long timeout = secs_to_jiffies(SECS_FIRST_SCAN);
first_run = 0;
while (timeout && !kthread_should_stop())
timeout = schedule_timeout_interruptible(timeout);
@@ -2241,7 +2241,7 @@ void __init kmemleak_init(void)
return;
jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
- jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
+ jiffies_scan_wait = secs_to_jiffies(SECS_SCAN_WAIT);
object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
diff --git a/mm/kmsan/shadow.c b/mm/kmsan/shadow.c
index 9c58f081d84f..1bb505a08415 100644
--- a/mm/kmsan/shadow.c
+++ b/mm/kmsan/shadow.c
@@ -280,12 +280,8 @@ void __init kmsan_init_alloc_meta_for_range(void *start, void *end)
start = (void *)PAGE_ALIGN_DOWN((u64)start);
size = PAGE_ALIGN((u64)end - (u64)start);
- shadow = memblock_alloc(size, PAGE_SIZE);
- origin = memblock_alloc(size, PAGE_SIZE);
-
- if (!shadow || !origin)
- panic("%s: Failed to allocate metadata memory for early boot range of size %llu",
- __func__, size);
+ shadow = memblock_alloc_or_panic(size, PAGE_SIZE);
+ origin = memblock_alloc_or_panic(size, PAGE_SIZE);
for (u64 addr = 0; addr < size; addr += PAGE_SIZE) {
page = virt_to_page_or_null((char *)start + addr);
diff --git a/mm/ksm.c b/mm/ksm.c
index 31a9bc365437..8be2b144fefd 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -3262,6 +3262,25 @@ static void wait_while_offlining(void)
#endif /* CONFIG_MEMORY_HOTREMOVE */
#ifdef CONFIG_PROC_FS
+/*
+ * The process is mergeable only if any VMA is currently
+ * applicable to KSM.
+ *
+ * The mmap lock must be held in read mode.
+ */
+bool ksm_process_mergeable(struct mm_struct *mm)
+{
+ struct vm_area_struct *vma;
+
+ mmap_assert_locked(mm);
+ VMA_ITERATOR(vmi, mm, 0);
+ for_each_vma(vmi, vma)
+ if (vma->vm_flags & VM_MERGEABLE)
+ return true;
+
+ return false;
+}
+
long ksm_process_profit(struct mm_struct *mm)
{
return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE -
diff --git a/mm/madvise.c b/mm/madvise.c
index 0ceae57da7da..49f3a75046f6 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -851,7 +851,12 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- zap_page_range_single(vma, start, end - start, NULL);
+ struct zap_details details = {
+ .reclaim_pt = true,
+ .even_cows = true,
+ };
+
+ zap_page_range_single(vma, start, end - start, &details);
return 0;
}
diff --git a/mm/memblock.c b/mm/memblock.c
index 095c18b5c430..95af35fd1389 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1692,6 +1692,26 @@ void * __init memblock_alloc_try_nid(
}
/**
+ * __memblock_alloc_or_panic - Try to allocate memory and panic on failure
+ * @size: size of memory block to be allocated in bytes
+ * @align: alignment of the region and block's size
+ * @func: caller func name
+ *
+ * This function attempts to allocate memory using memblock_alloc,
+ * and in case of failure, it calls panic with the formatted message.
+ * This function should not be used directly, please use the macro memblock_alloc_or_panic.
+ */
+void *__init __memblock_alloc_or_panic(phys_addr_t size, phys_addr_t align,
+ const char *func)
+{
+ void *addr = memblock_alloc(size, align);
+
+ if (unlikely(!addr))
+ panic("%s: Failed to allocate %pap bytes\n", func, &size);
+ return addr;
+}
+
+/**
* memblock_free_late - free pages directly to buddy allocator
* @base: phys starting address of the boot memory block
* @size: size of the boot memory block in bytes
diff --git a/mm/memcontrol-v1.c b/mm/memcontrol-v1.c
index a071fa43d479..2be6b9112808 100644
--- a/mm/memcontrol-v1.c
+++ b/mm/memcontrol-v1.c
@@ -899,7 +899,7 @@ static void memcg_event_remove(struct work_struct *work)
*
* Called with wqh->lock held and interrupts disabled.
*/
-static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
+static int memcg_event_wake(wait_queue_entry_t *wait, unsigned int mode,
int sync, void *key)
{
struct mem_cgroup_event *event =
@@ -1040,13 +1040,13 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
} else if (!strcmp(name, "memory.oom_control")) {
pr_warn_once("oom_control is deprecated and will be removed. "
"Please report your usecase to linux-mm-@kvack.org"
- " if you depend on this functionality. \n");
+ " if you depend on this functionality.\n");
event->register_event = mem_cgroup_oom_register_event;
event->unregister_event = mem_cgroup_oom_unregister_event;
} else if (!strcmp(name, "memory.pressure_level")) {
pr_warn_once("pressure_level is deprecated and will be removed. "
"Please report your usecase to linux-mm-@kvack.org "
- "if you depend on this functionality. \n");
+ "if you depend on this functionality.\n");
event->register_event = vmpressure_register_event;
event->unregister_event = vmpressure_unregister_event;
} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
@@ -1134,8 +1134,8 @@ static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
failed = iter;
mem_cgroup_iter_break(memcg, iter);
break;
- } else
- iter->oom_lock = true;
+ }
+ iter->oom_lock = true;
}
if (failed) {
@@ -1202,7 +1202,7 @@ struct oom_wait_info {
};
static int memcg_oom_wake_function(wait_queue_entry_t *wait,
- unsigned mode, int sync, void *arg)
+ unsigned int mode, int sync, void *arg)
{
struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
struct mem_cgroup *oom_wait_memcg;
@@ -1644,7 +1644,7 @@ static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
unsigned long nr = 0;
enum lru_list lru;
- VM_BUG_ON((unsigned)nid >= nr_node_ids);
+ VM_BUG_ON((unsigned int)nid >= nr_node_ids);
for_each_lru(lru) {
if (!(BIT(lru) & lru_mask))
@@ -1881,7 +1881,7 @@ static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
pr_warn_once("oom_control is deprecated and will be removed. "
"Please report your usecase to linux-mm-@kvack.org if you "
- "depend on this functionality. \n");
+ "depend on this functionality.\n");
/* cannot set to root cgroup and only 0 and 1 are allowed */
if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7b3503d12aaf..46f8b372d212 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1161,6 +1161,7 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
{
struct mem_cgroup *iter;
int ret = 0;
+ int i = 0;
BUG_ON(mem_cgroup_is_root(memcg));
@@ -1169,8 +1170,12 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
struct task_struct *task;
css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
- while (!ret && (task = css_task_iter_next(&it)))
+ while (!ret && (task = css_task_iter_next(&it))) {
+ /* Avoid potential softlockup warning */
+ if ((++i & 1023) == 0)
+ cond_resched();
ret = fn(task, arg);
+ }
css_task_iter_end(&it);
if (ret) {
mem_cgroup_iter_break(memcg, iter);
@@ -1448,6 +1453,18 @@ unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item)
memcg_page_state_output_unit(item);
}
+#ifdef CONFIG_HUGETLB_PAGE
+static bool memcg_accounts_hugetlb(void)
+{
+ return cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
+}
+#else /* CONFIG_HUGETLB_PAGE */
+static bool memcg_accounts_hugetlb(void)
+{
+ return false;
+}
+#endif /* CONFIG_HUGETLB_PAGE */
+
static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
{
int i;
@@ -1469,7 +1486,7 @@ static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
#ifdef CONFIG_HUGETLB_PAGE
if (unlikely(memory_stats[i].idx == NR_HUGETLB) &&
- !(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING))
+ !memcg_accounts_hugetlb())
continue;
#endif
size = memcg_page_state_output(memcg, memory_stats[i].idx);
@@ -2371,21 +2388,6 @@ done_restock:
return 0;
}
-/**
- * mem_cgroup_cancel_charge() - cancel an uncommitted try_charge() call.
- * @memcg: memcg previously charged.
- * @nr_pages: number of pages previously charged.
- */
-void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
-{
- if (mem_cgroup_is_root(memcg))
- return;
-
- page_counter_uncharge(&memcg->memory, nr_pages);
- if (do_memsw_account())
- page_counter_uncharge(&memcg->memsw, nr_pages);
-}
-
static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
{
VM_BUG_ON_FOLIO(folio_memcg_charged(folio), folio);
@@ -2399,18 +2401,6 @@ static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
folio->memcg_data = (unsigned long)memcg;
}
-/**
- * mem_cgroup_commit_charge - commit a previously successful try_charge().
- * @folio: folio to commit the charge to.
- * @memcg: memcg previously charged.
- */
-void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
-{
- css_get(&memcg->css);
- commit_charge(folio, memcg);
- memcg1_commit_charge(folio, memcg);
-}
-
static inline void __mod_objcg_mlstate(struct obj_cgroup *objcg,
struct pglist_data *pgdat,
enum node_stat_item idx, int nr)
@@ -4498,7 +4488,9 @@ static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
if (ret)
goto out;
- mem_cgroup_commit_charge(folio, memcg);
+ css_get(&memcg->css);
+ commit_charge(folio, memcg);
+ memcg1_commit_charge(folio, memcg);
out:
return ret;
}
@@ -4516,38 +4508,37 @@ int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
}
/**
- * mem_cgroup_hugetlb_try_charge - try to charge the memcg for a hugetlb folio
- * @memcg: memcg to charge.
- * @gfp: reclaim mode.
- * @nr_pages: number of pages to charge.
- *
- * This function is called when allocating a huge page folio to determine if
- * the memcg has the capacity for it. It does not commit the charge yet,
- * as the hugetlb folio itself has not been obtained from the hugetlb pool.
+ * mem_cgroup_charge_hugetlb - charge the memcg for a hugetlb folio
+ * @folio: folio being charged
+ * @gfp: reclaim mode
*
- * Once we have obtained the hugetlb folio, we can call
- * mem_cgroup_commit_charge() to commit the charge. If we fail to obtain the
- * folio, we should instead call mem_cgroup_cancel_charge() to undo the effect
- * of try_charge().
+ * This function is called when allocating a huge page folio, after the page has
+ * already been obtained and charged to the appropriate hugetlb cgroup
+ * controller (if it is enabled).
*
- * Returns 0 on success. Otherwise, an error code is returned.
+ * Returns ENOMEM if the memcg is already full.
+ * Returns 0 if either the charge was successful, or if we skip the charging.
*/
-int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
- long nr_pages)
+int mem_cgroup_charge_hugetlb(struct folio *folio, gfp_t gfp)
{
+ struct mem_cgroup *memcg = get_mem_cgroup_from_current();
+ int ret = 0;
+
/*
- * If hugetlb memcg charging is not enabled, do not fail hugetlb allocation,
- * but do not attempt to commit charge later (or cancel on error) either.
+ * Even memcg does not account for hugetlb, we still want to update
+ * system-level stats via lruvec_stat_mod_folio. Return 0, and skip
+ * charging the memcg.
*/
- if (mem_cgroup_disabled() || !memcg ||
- !cgroup_subsys_on_dfl(memory_cgrp_subsys) ||
- !(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING))
- return -EOPNOTSUPP;
+ if (mem_cgroup_disabled() || !memcg_accounts_hugetlb() ||
+ !memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ goto out;
- if (try_charge(memcg, gfp, nr_pages))
- return -ENOMEM;
+ if (charge_memcg(folio, memcg, gfp))
+ ret = -ENOMEM;
- return 0;
+out:
+ mem_cgroup_put(memcg);
+ return ret;
}
/**
@@ -4609,7 +4600,7 @@ void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
* correspond 1:1 to page and swap slot lifetimes: we charge the
* page to memory here, and uncharge swap when the slot is freed.
*/
- if (!mem_cgroup_disabled() && do_memsw_account()) {
+ if (do_memsw_account()) {
/*
* The swap entry might not get freed for a long time,
* let's not wait for it. The page already received a
@@ -4973,7 +4964,6 @@ void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
{
struct mem_cgroup *memcg, *swap_memcg;
unsigned int nr_entries;
- unsigned short oldid;
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
@@ -5000,11 +4990,10 @@ void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
/* Get references for the tail pages, too */
if (nr_entries > 1)
mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
- oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
- nr_entries);
- VM_BUG_ON_FOLIO(oldid, folio);
mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
+ swap_cgroup_record(folio, entry);
+
folio_unqueue_deferred_split(folio);
folio->memcg_data = 0;
@@ -5035,7 +5024,6 @@ int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
unsigned int nr_pages = folio_nr_pages(folio);
struct page_counter *counter;
struct mem_cgroup *memcg;
- unsigned short oldid;
if (do_memsw_account())
return 0;
@@ -5064,10 +5052,10 @@ int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
/* Get references for the tail pages, too */
if (nr_pages > 1)
mem_cgroup_id_get_many(memcg, nr_pages - 1);
- oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
- VM_BUG_ON_FOLIO(oldid, folio);
mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
+ swap_cgroup_record(folio, entry);
+
return 0;
}
@@ -5081,7 +5069,7 @@ void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
struct mem_cgroup *memcg;
unsigned short id;
- id = swap_cgroup_record(entry, 0, nr_pages);
+ id = swap_cgroup_clear(entry, nr_pages);
rcu_read_lock();
memcg = mem_cgroup_from_id(id);
if (memcg) {
diff --git a/mm/memfd.c b/mm/memfd.c
index 35a370d75c9a..37f7be57c2f5 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -170,7 +170,7 @@ static int memfd_wait_for_pins(struct address_space *mapping)
return error;
}
-unsigned int *memfd_file_seals_ptr(struct file *file)
+static unsigned int *memfd_file_seals_ptr(struct file *file)
{
if (shmem_file(file))
return &SHMEM_I(file_inode(file))->seals;
@@ -327,15 +327,51 @@ static int check_sysctl_memfd_noexec(unsigned int *flags)
return 0;
}
-SYSCALL_DEFINE2(memfd_create,
- const char __user *, uname,
- unsigned int, flags)
+static inline bool is_write_sealed(unsigned int seals)
{
- unsigned int *file_seals;
- struct file *file;
- int fd, error;
- char *name;
- long len;
+ return seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE);
+}
+
+static int check_write_seal(unsigned long *vm_flags_ptr)
+{
+ unsigned long vm_flags = *vm_flags_ptr;
+ unsigned long mask = vm_flags & (VM_SHARED | VM_WRITE);
+
+ /* If a private matting then writability is irrelevant. */
+ if (!(mask & VM_SHARED))
+ return 0;
+
+ /*
+ * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
+ * write seals are active.
+ */
+ if (mask & VM_WRITE)
+ return -EPERM;
+
+ /*
+ * This is a read-only mapping, disallow mprotect() from making a
+ * write-sealed mapping writable in future.
+ */
+ *vm_flags_ptr &= ~VM_MAYWRITE;
+
+ return 0;
+}
+
+int memfd_check_seals_mmap(struct file *file, unsigned long *vm_flags_ptr)
+{
+ int err = 0;
+ unsigned int *seals_ptr = memfd_file_seals_ptr(file);
+ unsigned int seals = seals_ptr ? *seals_ptr : 0;
+
+ if (is_write_sealed(seals))
+ err = check_write_seal(vm_flags_ptr);
+
+ return err;
+}
+
+static int sanitize_flags(unsigned int *flags_ptr)
+{
+ unsigned int flags = *flags_ptr;
if (!(flags & MFD_HUGETLB)) {
if (flags & ~(unsigned int)MFD_ALL_FLAGS)
@@ -351,50 +387,52 @@ SYSCALL_DEFINE2(memfd_create,
if ((flags & MFD_EXEC) && (flags & MFD_NOEXEC_SEAL))
return -EINVAL;
- error = check_sysctl_memfd_noexec(&flags);
- if (error < 0)
- return error;
+ return check_sysctl_memfd_noexec(flags_ptr);
+}
- /* length includes terminating zero */
- len = strnlen_user(uname, MFD_NAME_MAX_LEN + 1);
- if (len <= 0)
- return -EFAULT;
- if (len > MFD_NAME_MAX_LEN + 1)
- return -EINVAL;
+static char *alloc_name(const char __user *uname)
+{
+ int error;
+ char *name;
+ long len;
- name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_KERNEL);
+ name = kmalloc(NAME_MAX + 1, GFP_KERNEL);
if (!name)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
strcpy(name, MFD_NAME_PREFIX);
- if (copy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, len)) {
+ /* returned length does not include terminating zero */
+ len = strncpy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, MFD_NAME_MAX_LEN + 1);
+ if (len < 0) {
error = -EFAULT;
goto err_name;
- }
-
- /* terminating-zero may have changed after strnlen_user() returned */
- if (name[len + MFD_NAME_PREFIX_LEN - 1]) {
- error = -EFAULT;
+ } else if (len > MFD_NAME_MAX_LEN) {
+ error = -EINVAL;
goto err_name;
}
- fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0);
- if (fd < 0) {
- error = fd;
- goto err_name;
- }
+ return name;
+
+err_name:
+ kfree(name);
+ return ERR_PTR(error);
+}
+
+static struct file *alloc_file(const char *name, unsigned int flags)
+{
+ unsigned int *file_seals;
+ struct file *file;
if (flags & MFD_HUGETLB) {
file = hugetlb_file_setup(name, 0, VM_NORESERVE,
HUGETLB_ANONHUGE_INODE,
(flags >> MFD_HUGE_SHIFT) &
MFD_HUGE_MASK);
- } else
+ } else {
file = shmem_file_setup(name, 0, VM_NORESERVE);
- if (IS_ERR(file)) {
- error = PTR_ERR(file);
- goto err_fd;
}
+ if (IS_ERR(file))
+ return file;
file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
file->f_flags |= O_LARGEFILE;
@@ -414,6 +452,37 @@ SYSCALL_DEFINE2(memfd_create,
*file_seals &= ~F_SEAL_SEAL;
}
+ return file;
+}
+
+SYSCALL_DEFINE2(memfd_create,
+ const char __user *, uname,
+ unsigned int, flags)
+{
+ struct file *file;
+ int fd, error;
+ char *name;
+
+ error = sanitize_flags(&flags);
+ if (error < 0)
+ return error;
+
+ name = alloc_name(uname);
+ if (IS_ERR(name))
+ return PTR_ERR(name);
+
+ fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0);
+ if (fd < 0) {
+ error = fd;
+ goto err_name;
+ }
+
+ file = alloc_file(name, flags);
+ if (IS_ERR(file)) {
+ error = PTR_ERR(file);
+ goto err_fd;
+ }
+
fd_install(fd, file);
kfree(name);
return fd;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index a7b8ccd29b6f..995a15eb67e2 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -124,7 +124,7 @@ const struct attribute_group memory_failure_attr_group = {
.attrs = memory_failure_attr,
};
-static struct ctl_table memory_failure_table[] = {
+static const struct ctl_table memory_failure_table[] = {
{
.procname = "memory_failure_early_kill",
.data = &sysctl_memory_failure_early_kill,
diff --git a/mm/memory.c b/mm/memory.c
index 398c031be9ba..539c0f7c6d54 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -76,6 +76,7 @@
#include <linux/ptrace.h>
#include <linux/vmalloc.h>
#include <linux/sched/sysctl.h>
+#include <linux/fsnotify.h>
#include <trace/events/kmem.h>
@@ -1436,7 +1437,7 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
static inline bool should_zap_cows(struct zap_details *details)
{
/* By default, zap all pages */
- if (!details)
+ if (!details || details->reclaim_pt)
return true;
/* Or, we zap COWed pages only if the caller wants to */
@@ -1466,34 +1467,42 @@ static inline bool zap_drop_markers(struct zap_details *details)
/*
* This function makes sure that we'll replace the none pte with an uffd-wp
* swap special pte marker when necessary. Must be with the pgtable lock held.
+ *
+ * Returns true if uffd-wp ptes was installed, false otherwise.
*/
-static inline void
+static inline bool
zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
unsigned long addr, pte_t *pte, int nr,
struct zap_details *details, pte_t pteval)
{
+ bool was_installed = false;
+
+#ifdef CONFIG_PTE_MARKER_UFFD_WP
/* Zap on anonymous always means dropping everything */
if (vma_is_anonymous(vma))
- return;
+ return false;
if (zap_drop_markers(details))
- return;
+ return false;
for (;;) {
/* the PFN in the PTE is irrelevant. */
- pte_install_uffd_wp_if_needed(vma, addr, pte, pteval);
+ if (pte_install_uffd_wp_if_needed(vma, addr, pte, pteval))
+ was_installed = true;
if (--nr == 0)
break;
pte++;
addr += PAGE_SIZE;
}
+#endif
+ return was_installed;
}
static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb,
struct vm_area_struct *vma, struct folio *folio,
struct page *page, pte_t *pte, pte_t ptent, unsigned int nr,
unsigned long addr, struct zap_details *details, int *rss,
- bool *force_flush, bool *force_break)
+ bool *force_flush, bool *force_break, bool *any_skipped)
{
struct mm_struct *mm = tlb->mm;
bool delay_rmap = false;
@@ -1519,8 +1528,8 @@ static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb,
arch_check_zapped_pte(vma, ptent);
tlb_remove_tlb_entries(tlb, pte, nr, addr);
if (unlikely(userfaultfd_pte_wp(vma, ptent)))
- zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details,
- ptent);
+ *any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte,
+ nr, details, ptent);
if (!delay_rmap) {
folio_remove_rmap_ptes(folio, page, nr, vma);
@@ -1544,7 +1553,7 @@ static inline int zap_present_ptes(struct mmu_gather *tlb,
struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
unsigned int max_nr, unsigned long addr,
struct zap_details *details, int *rss, bool *force_flush,
- bool *force_break)
+ bool *force_break, bool *any_skipped)
{
const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
struct mm_struct *mm = tlb->mm;
@@ -1559,15 +1568,17 @@ static inline int zap_present_ptes(struct mmu_gather *tlb,
arch_check_zapped_pte(vma, ptent);
tlb_remove_tlb_entry(tlb, pte, addr);
if (userfaultfd_pte_wp(vma, ptent))
- zap_install_uffd_wp_if_needed(vma, addr, pte, 1,
- details, ptent);
+ *any_skipped = zap_install_uffd_wp_if_needed(vma, addr,
+ pte, 1, details, ptent);
ksm_might_unmap_zero_page(mm, ptent);
return 1;
}
folio = page_folio(page);
- if (unlikely(!should_zap_folio(details, folio)))
+ if (unlikely(!should_zap_folio(details, folio))) {
+ *any_skipped = true;
return 1;
+ }
/*
* Make sure that the common "small folio" case is as fast as possible
@@ -1579,14 +1590,121 @@ static inline int zap_present_ptes(struct mmu_gather *tlb,
zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr,
addr, details, rss, force_flush,
- force_break);
+ force_break, any_skipped);
return nr;
}
zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, 1, addr,
- details, rss, force_flush, force_break);
+ details, rss, force_flush, force_break, any_skipped);
return 1;
}
+static inline int zap_nonpresent_ptes(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
+ unsigned int max_nr, unsigned long addr,
+ struct zap_details *details, int *rss, bool *any_skipped)
+{
+ swp_entry_t entry;
+ int nr = 1;
+
+ *any_skipped = true;
+ entry = pte_to_swp_entry(ptent);
+ if (is_device_private_entry(entry) ||
+ is_device_exclusive_entry(entry)) {
+ struct page *page = pfn_swap_entry_to_page(entry);
+ struct folio *folio = page_folio(page);
+
+ if (unlikely(!should_zap_folio(details, folio)))
+ return 1;
+ /*
+ * Both device private/exclusive mappings should only
+ * work with anonymous page so far, so we don't need to
+ * consider uffd-wp bit when zap. For more information,
+ * see zap_install_uffd_wp_if_needed().
+ */
+ WARN_ON_ONCE(!vma_is_anonymous(vma));
+ rss[mm_counter(folio)]--;
+ if (is_device_private_entry(entry))
+ folio_remove_rmap_pte(folio, page, vma);
+ folio_put(folio);
+ } else if (!non_swap_entry(entry)) {
+ /* Genuine swap entries, hence a private anon pages */
+ if (!should_zap_cows(details))
+ return 1;
+
+ nr = swap_pte_batch(pte, max_nr, ptent);
+ rss[MM_SWAPENTS] -= nr;
+ free_swap_and_cache_nr(entry, nr);
+ } else if (is_migration_entry(entry)) {
+ struct folio *folio = pfn_swap_entry_folio(entry);
+
+ if (!should_zap_folio(details, folio))
+ return 1;
+ rss[mm_counter(folio)]--;
+ } else if (pte_marker_entry_uffd_wp(entry)) {
+ /*
+ * For anon: always drop the marker; for file: only
+ * drop the marker if explicitly requested.
+ */
+ if (!vma_is_anonymous(vma) && !zap_drop_markers(details))
+ return 1;
+ } else if (is_guard_swp_entry(entry)) {
+ /*
+ * Ordinary zapping should not remove guard PTE
+ * markers. Only do so if we should remove PTE markers
+ * in general.
+ */
+ if (!zap_drop_markers(details))
+ return 1;
+ } else if (is_hwpoison_entry(entry) || is_poisoned_swp_entry(entry)) {
+ if (!should_zap_cows(details))
+ return 1;
+ } else {
+ /* We should have covered all the swap entry types */
+ pr_alert("unrecognized swap entry 0x%lx\n", entry.val);
+ WARN_ON_ONCE(1);
+ }
+ clear_not_present_full_ptes(vma->vm_mm, addr, pte, nr, tlb->fullmm);
+ *any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent);
+
+ return nr;
+}
+
+static inline int do_zap_pte_range(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, pte_t *pte,
+ unsigned long addr, unsigned long end,
+ struct zap_details *details, int *rss,
+ bool *force_flush, bool *force_break,
+ bool *any_skipped)
+{
+ pte_t ptent = ptep_get(pte);
+ int max_nr = (end - addr) / PAGE_SIZE;
+ int nr = 0;
+
+ /* Skip all consecutive none ptes */
+ if (pte_none(ptent)) {
+ for (nr = 1; nr < max_nr; nr++) {
+ ptent = ptep_get(pte + nr);
+ if (!pte_none(ptent))
+ break;
+ }
+ max_nr -= nr;
+ if (!max_nr)
+ return nr;
+ pte += nr;
+ addr += nr * PAGE_SIZE;
+ }
+
+ if (pte_present(ptent))
+ nr += zap_present_ptes(tlb, vma, pte, ptent, max_nr, addr,
+ details, rss, force_flush, force_break,
+ any_skipped);
+ else
+ nr += zap_nonpresent_ptes(tlb, vma, pte, ptent, max_nr, addr,
+ details, rss, any_skipped);
+
+ return nr;
+}
+
static unsigned long zap_pte_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
@@ -1598,9 +1716,13 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
spinlock_t *ptl;
pte_t *start_pte;
pte_t *pte;
- swp_entry_t entry;
+ pmd_t pmdval;
+ unsigned long start = addr;
+ bool can_reclaim_pt = reclaim_pt_is_enabled(start, end, details);
+ bool direct_reclaim = false;
int nr;
+retry:
tlb_change_page_size(tlb, PAGE_SIZE);
init_rss_vec(rss);
start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
@@ -1610,90 +1732,24 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
flush_tlb_batched_pending(mm);
arch_enter_lazy_mmu_mode();
do {
- pte_t ptent = ptep_get(pte);
- struct folio *folio;
- struct page *page;
- int max_nr;
-
- nr = 1;
- if (pte_none(ptent))
- continue;
+ bool any_skipped = false;
if (need_resched())
break;
- if (pte_present(ptent)) {
- max_nr = (end - addr) / PAGE_SIZE;
- nr = zap_present_ptes(tlb, vma, pte, ptent, max_nr,
- addr, details, rss, &force_flush,
- &force_break);
- if (unlikely(force_break)) {
- addr += nr * PAGE_SIZE;
- break;
- }
- continue;
- }
-
- entry = pte_to_swp_entry(ptent);
- if (is_device_private_entry(entry) ||
- is_device_exclusive_entry(entry)) {
- page = pfn_swap_entry_to_page(entry);
- folio = page_folio(page);
- if (unlikely(!should_zap_folio(details, folio)))
- continue;
- /*
- * Both device private/exclusive mappings should only
- * work with anonymous page so far, so we don't need to
- * consider uffd-wp bit when zap. For more information,
- * see zap_install_uffd_wp_if_needed().
- */
- WARN_ON_ONCE(!vma_is_anonymous(vma));
- rss[mm_counter(folio)]--;
- if (is_device_private_entry(entry))
- folio_remove_rmap_pte(folio, page, vma);
- folio_put(folio);
- } else if (!non_swap_entry(entry)) {
- max_nr = (end - addr) / PAGE_SIZE;
- nr = swap_pte_batch(pte, max_nr, ptent);
- /* Genuine swap entries, hence a private anon pages */
- if (!should_zap_cows(details))
- continue;
- rss[MM_SWAPENTS] -= nr;
- free_swap_and_cache_nr(entry, nr);
- } else if (is_migration_entry(entry)) {
- folio = pfn_swap_entry_folio(entry);
- if (!should_zap_folio(details, folio))
- continue;
- rss[mm_counter(folio)]--;
- } else if (pte_marker_entry_uffd_wp(entry)) {
- /*
- * For anon: always drop the marker; for file: only
- * drop the marker if explicitly requested.
- */
- if (!vma_is_anonymous(vma) &&
- !zap_drop_markers(details))
- continue;
- } else if (is_guard_swp_entry(entry)) {
- /*
- * Ordinary zapping should not remove guard PTE
- * markers. Only do so if we should remove PTE markers
- * in general.
- */
- if (!zap_drop_markers(details))
- continue;
- } else if (is_hwpoison_entry(entry) ||
- is_poisoned_swp_entry(entry)) {
- if (!should_zap_cows(details))
- continue;
- } else {
- /* We should have covered all the swap entry types */
- pr_alert("unrecognized swap entry 0x%lx\n", entry.val);
- WARN_ON_ONCE(1);
+ nr = do_zap_pte_range(tlb, vma, pte, addr, end, details, rss,
+ &force_flush, &force_break, &any_skipped);
+ if (any_skipped)
+ can_reclaim_pt = false;
+ if (unlikely(force_break)) {
+ addr += nr * PAGE_SIZE;
+ break;
}
- clear_not_present_full_ptes(mm, addr, pte, nr, tlb->fullmm);
- zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent);
} while (pte += nr, addr += PAGE_SIZE * nr, addr != end);
+ if (can_reclaim_pt && addr == end)
+ direct_reclaim = try_get_and_clear_pmd(mm, pmd, &pmdval);
+
add_mm_rss_vec(mm, rss);
arch_leave_lazy_mmu_mode();
@@ -1713,6 +1769,20 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
if (force_flush)
tlb_flush_mmu(tlb);
+ if (addr != end) {
+ cond_resched();
+ force_flush = false;
+ force_break = false;
+ goto retry;
+ }
+
+ if (can_reclaim_pt) {
+ if (direct_reclaim)
+ free_pte(mm, start, tlb, pmdval);
+ else
+ try_to_free_pte(mm, pmd, start, tlb);
+ }
+
return addr;
}
@@ -1935,7 +2005,6 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
struct mmu_notifier_range range;
struct mmu_gather tlb;
- lru_add_drain();
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
address, end);
hugetlb_zap_begin(vma, &range.start, &range.end);
@@ -3013,7 +3082,6 @@ int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
{
return __apply_to_page_range(mm, addr, size, fn, data, false);
}
-EXPORT_SYMBOL_GPL(apply_to_existing_page_range);
/*
* handle_pte_fault chooses page fault handler according to an entry which was
@@ -4189,8 +4257,10 @@ static struct folio *alloc_swap_folio(struct vm_fault *vmf)
if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
gfp, entry))
return folio;
+ count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK_CHARGE);
folio_put(folio);
}
+ count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK);
order = next_order(&orders, order);
}
@@ -5625,7 +5695,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
ignore_writable = true;
/* Migrate to the requested node */
- if (!migrate_misplaced_folio(folio, vma, target_nid)) {
+ if (!migrate_misplaced_folio(folio, target_nid)) {
nid = target_nid;
flags |= TNF_MIGRATED;
task_numa_fault(last_cpupid, nid, nr_pages, flags);
@@ -5662,8 +5732,17 @@ out_map:
static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
+
if (vma_is_anonymous(vma))
return do_huge_pmd_anonymous_page(vmf);
+ /*
+ * Currently we just emit PAGE_SIZE for our fault events, so don't allow
+ * a huge fault if we have a pre content watch on this file. This would
+ * be trivial to support, but there would need to be tests to ensure
+ * this works properly and those don't exist currently.
+ */
+ if (unlikely(FMODE_FSNOTIFY_HSM(vma->vm_file->f_mode)))
+ return VM_FAULT_FALLBACK;
if (vma->vm_ops->huge_fault)
return vma->vm_ops->huge_fault(vmf, PMD_ORDER);
return VM_FAULT_FALLBACK;
@@ -5687,6 +5766,9 @@ static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
}
if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
+ /* See comment in create_huge_pmd. */
+ if (unlikely(FMODE_FSNOTIFY_HSM(vma->vm_file->f_mode)))
+ goto split;
if (vma->vm_ops->huge_fault) {
ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER);
if (!(ret & VM_FAULT_FALLBACK))
@@ -5709,6 +5791,9 @@ static vm_fault_t create_huge_pud(struct vm_fault *vmf)
/* No support for anonymous transparent PUD pages yet */
if (vma_is_anonymous(vma))
return VM_FAULT_FALLBACK;
+ /* See comment in create_huge_pmd. */
+ if (unlikely(FMODE_FSNOTIFY_HSM(vma->vm_file->f_mode)))
+ return VM_FAULT_FALLBACK;
if (vma->vm_ops->huge_fault)
return vma->vm_ops->huge_fault(vmf, PUD_ORDER);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -5726,6 +5811,9 @@ static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
if (vma_is_anonymous(vma))
goto split;
if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
+ /* See comment in create_huge_pmd. */
+ if (unlikely(FMODE_FSNOTIFY_HSM(vma->vm_file->f_mode)))
+ goto split;
if (vma->vm_ops->huge_fault) {
ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER);
if (!(ret & VM_FAULT_FALLBACK))
@@ -6069,7 +6157,8 @@ static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma,
}
/*
- * By the time we get here, we already hold the mm semaphore
+ * By the time we get here, we already hold either the VMA lock or the
+ * mmap_lock (FAULT_FLAG_VMA_LOCK tells you which).
*
* The mmap_lock may have been released depending on flags and our
* return value. See filemap_fault() and __folio_lock_or_retry().
@@ -6185,7 +6274,7 @@ static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_r
/*
* Helper for page fault handling.
*
- * This is kind of equivalend to "mmap_read_lock()" followed
+ * This is kind of equivalent to "mmap_read_lock()" followed
* by "find_extend_vma()", except it's a lot more careful about
* the locking (and will drop the lock on failure).
*
@@ -6961,7 +7050,8 @@ bool ptlock_alloc(struct ptdesc *ptdesc)
void ptlock_free(struct ptdesc *ptdesc)
{
- kmem_cache_free(page_ptl_cachep, ptdesc->ptl);
+ if (ptdesc->ptl)
+ kmem_cache_free(page_ptl_cachep, ptdesc->ptl);
}
#endif
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index c43b4e7fb298..e3655f07dd6e 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -219,11 +219,30 @@ void put_online_mems(void)
bool movable_node_enabled = false;
-#ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE
-int mhp_default_online_type = MMOP_OFFLINE;
-#else
-int mhp_default_online_type = MMOP_ONLINE;
-#endif
+static int mhp_default_online_type = -1;
+int mhp_get_default_online_type(void)
+{
+ if (mhp_default_online_type >= 0)
+ return mhp_default_online_type;
+
+ if (IS_ENABLED(CONFIG_MHP_DEFAULT_ONLINE_TYPE_OFFLINE))
+ mhp_default_online_type = MMOP_OFFLINE;
+ else if (IS_ENABLED(CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_AUTO))
+ mhp_default_online_type = MMOP_ONLINE;
+ else if (IS_ENABLED(CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_KERNEL))
+ mhp_default_online_type = MMOP_ONLINE_KERNEL;
+ else if (IS_ENABLED(CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_MOVABLE))
+ mhp_default_online_type = MMOP_ONLINE_MOVABLE;
+ else
+ mhp_default_online_type = MMOP_OFFLINE;
+
+ return mhp_default_online_type;
+}
+
+void mhp_set_default_online_type(int online_type)
+{
+ mhp_default_online_type = online_type;
+}
static int __init setup_memhp_default_state(char *str)
{
@@ -650,6 +669,7 @@ static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages)
* this and the first chunk to online will be pageblock_nr_pages.
*/
for (pfn = start_pfn; pfn < end_pfn;) {
+ struct page *page = pfn_to_page(pfn);
int order;
/*
@@ -664,7 +684,14 @@ static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages)
else
order = MAX_PAGE_ORDER;
- (*online_page_callback)(pfn_to_page(pfn), order);
+ /*
+ * Exposing the page to the buddy by freeing can cause
+ * issues with debug_pagealloc enabled: some archs don't
+ * like double-unmappings. So treat them like any pages that
+ * were allocated from the buddy.
+ */
+ debug_pagealloc_map_pages(page, 1 << order);
+ (*online_page_callback)(page, order);
pfn += (1UL << order);
}
@@ -1320,7 +1347,7 @@ static int check_hotplug_memory_range(u64 start, u64 size)
static int online_memory_block(struct memory_block *mem, void *arg)
{
- mem->online_type = mhp_default_online_type;
+ mem->online_type = mhp_get_default_online_type();
return device_online(&mem->dev);
}
@@ -1567,7 +1594,7 @@ int add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
merge_system_ram_resource(res);
/* online pages if requested */
- if (mhp_default_online_type != MMOP_OFFLINE)
+ if (mhp_get_default_online_type() != MMOP_OFFLINE)
walk_memory_blocks(start, size, NULL, online_memory_block);
return ret;
@@ -1830,7 +1857,7 @@ put_folio:
nodemask_t nmask = node_states[N_MEMORY];
struct migration_target_control mtc = {
.nmask = &nmask,
- .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
+ .gfp_mask = GFP_KERNEL | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
.reason = MR_MEMORY_HOTPLUG,
};
int ret;
@@ -1992,8 +2019,7 @@ int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
/* set above range as isolated */
ret = start_isolate_page_range(start_pfn, end_pfn,
MIGRATE_MOVABLE,
- MEMORY_OFFLINE | REPORT_FAILURE,
- GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL);
+ MEMORY_OFFLINE | REPORT_FAILURE);
if (ret) {
reason = "failure to isolate range";
goto failed_removal_pcplists_disabled;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 162407fbf2bc..bbaadbeeb291 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -647,7 +647,7 @@ static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
*/
if ((flags & MPOL_MF_MOVE_ALL) ||
(!folio_likely_mapped_shared(folio) && !hugetlb_pmd_shared(pte)))
- if (!isolate_hugetlb(folio, qp->pagelist))
+ if (!folio_isolate_hugetlb(folio, qp->pagelist))
qp->nr_failed++;
unlock:
spin_unlock(ptl);
@@ -2205,9 +2205,9 @@ static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
*/
preferred_gfp = gfp | __GFP_NOWARN;
preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
- page = __alloc_pages_noprof(preferred_gfp, order, nid, nodemask);
+ page = __alloc_frozen_pages_noprof(preferred_gfp, order, nid, nodemask);
if (!page)
- page = __alloc_pages_noprof(gfp, order, nid, NULL);
+ page = __alloc_frozen_pages_noprof(gfp, order, nid, NULL);
return page;
}
@@ -2222,7 +2222,7 @@ static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
*
* Return: The page on success or NULL if allocation fails.
*/
-struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order,
+static struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order,
struct mempolicy *pol, pgoff_t ilx, int nid)
{
nodemask_t *nodemask;
@@ -2253,8 +2253,9 @@ struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order,
* First, try to allocate THP only on local node, but
* don't reclaim unnecessarily, just compact.
*/
- page = __alloc_pages_node_noprof(nid,
- gfp | __GFP_THISNODE | __GFP_NORETRY, order);
+ page = __alloc_frozen_pages_noprof(
+ gfp | __GFP_THISNODE | __GFP_NORETRY, order,
+ nid, NULL);
if (page || !(gfp & __GFP_DIRECT_RECLAIM))
return page;
/*
@@ -2266,7 +2267,7 @@ struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order,
}
}
- page = __alloc_pages_noprof(gfp, order, nid, nodemask);
+ page = __alloc_frozen_pages_noprof(gfp, order, nid, nodemask);
if (unlikely(pol->mode == MPOL_INTERLEAVE ||
pol->mode == MPOL_WEIGHTED_INTERLEAVE) && page) {
@@ -2285,8 +2286,13 @@ struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order,
struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
struct mempolicy *pol, pgoff_t ilx, int nid)
{
- return page_rmappable_folio(alloc_pages_mpol_noprof(gfp | __GFP_COMP,
- order, pol, ilx, nid));
+ struct page *page = alloc_pages_mpol(gfp | __GFP_COMP, order, pol,
+ ilx, nid);
+ if (!page)
+ return NULL;
+
+ set_page_refcounted(page);
+ return page_rmappable_folio(page);
}
/**
@@ -2300,7 +2306,7 @@ struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
* NUMA policy. The caller must hold the mmap_lock of the mm_struct of the
* VMA to prevent it from going away. Should be used for all allocations
* for folios that will be mapped into user space, excepting hugetlbfs, and
- * excepting where direct use of alloc_pages_mpol() is more appropriate.
+ * excepting where direct use of folio_alloc_mpol() is more appropriate.
*
* Return: The folio on success or NULL if allocation fails.
*/
@@ -2321,6 +2327,21 @@ struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct
}
EXPORT_SYMBOL(vma_alloc_folio_noprof);
+struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned order)
+{
+ struct mempolicy *pol = &default_policy;
+
+ /*
+ * No reference counting needed for current->mempolicy
+ * nor system default_policy
+ */
+ if (!in_interrupt() && !(gfp & __GFP_THISNODE))
+ pol = get_task_policy(current);
+
+ return alloc_pages_mpol(gfp, order, pol, NO_INTERLEAVE_INDEX,
+ numa_node_id());
+}
+
/**
* alloc_pages - Allocate pages.
* @gfp: GFP flags.
@@ -2337,17 +2358,11 @@ EXPORT_SYMBOL(vma_alloc_folio_noprof);
*/
struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order)
{
- struct mempolicy *pol = &default_policy;
+ struct page *page = alloc_frozen_pages_noprof(gfp, order);
- /*
- * No reference counting needed for current->mempolicy
- * nor system default_policy
- */
- if (!in_interrupt() && !(gfp & __GFP_THISNODE))
- pol = get_task_policy(current);
-
- return alloc_pages_mpol_noprof(gfp, order, pol, NO_INTERLEAVE_INDEX,
- numa_node_id());
+ if (page)
+ set_page_refcounted(page);
+ return page;
}
EXPORT_SYMBOL(alloc_pages_noprof);
@@ -2357,7 +2372,7 @@ struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
}
EXPORT_SYMBOL(folio_alloc_noprof);
-static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
+static unsigned long alloc_pages_bulk_interleave(gfp_t gfp,
struct mempolicy *pol, unsigned long nr_pages,
struct page **page_array)
{
@@ -2376,13 +2391,13 @@ static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
if (delta) {
nr_allocated = alloc_pages_bulk_noprof(gfp,
interleave_nodes(pol), NULL,
- nr_pages_per_node + 1, NULL,
+ nr_pages_per_node + 1,
page_array);
delta--;
} else {
nr_allocated = alloc_pages_bulk_noprof(gfp,
interleave_nodes(pol), NULL,
- nr_pages_per_node, NULL, page_array);
+ nr_pages_per_node, page_array);
}
page_array += nr_allocated;
@@ -2392,7 +2407,7 @@ static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
return total_allocated;
}
-static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
+static unsigned long alloc_pages_bulk_weighted_interleave(gfp_t gfp,
struct mempolicy *pol, unsigned long nr_pages,
struct page **page_array)
{
@@ -2431,7 +2446,7 @@ static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
if (weight && node_isset(node, nodes)) {
node_pages = min(rem_pages, weight);
nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
- NULL, page_array);
+ page_array);
page_array += nr_allocated;
total_allocated += nr_allocated;
/* if that's all the pages, no need to interleave */
@@ -2494,7 +2509,7 @@ static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
if (!node_pages)
break;
nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
- NULL, page_array);
+ page_array);
page_array += nr_allocated;
total_allocated += nr_allocated;
if (total_allocated == nr_pages)
@@ -2507,7 +2522,7 @@ static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
return total_allocated;
}
-static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
+static unsigned long alloc_pages_bulk_preferred_many(gfp_t gfp, int nid,
struct mempolicy *pol, unsigned long nr_pages,
struct page **page_array)
{
@@ -2518,11 +2533,11 @@ static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
nr_allocated = alloc_pages_bulk_noprof(preferred_gfp, nid, &pol->nodes,
- nr_pages, NULL, page_array);
+ nr_pages, page_array);
if (nr_allocated < nr_pages)
nr_allocated += alloc_pages_bulk_noprof(gfp, numa_node_id(), NULL,
- nr_pages - nr_allocated, NULL,
+ nr_pages - nr_allocated,
page_array + nr_allocated);
return nr_allocated;
}
@@ -2533,7 +2548,7 @@ static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
* It can accelerate memory allocation especially interleaving
* allocate memory.
*/
-unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
+unsigned long alloc_pages_bulk_mempolicy_noprof(gfp_t gfp,
unsigned long nr_pages, struct page **page_array)
{
struct mempolicy *pol = &default_policy;
@@ -2544,21 +2559,21 @@ unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
pol = get_task_policy(current);
if (pol->mode == MPOL_INTERLEAVE)
- return alloc_pages_bulk_array_interleave(gfp, pol,
+ return alloc_pages_bulk_interleave(gfp, pol,
nr_pages, page_array);
if (pol->mode == MPOL_WEIGHTED_INTERLEAVE)
- return alloc_pages_bulk_array_weighted_interleave(
+ return alloc_pages_bulk_weighted_interleave(
gfp, pol, nr_pages, page_array);
if (pol->mode == MPOL_PREFERRED_MANY)
- return alloc_pages_bulk_array_preferred_many(gfp,
+ return alloc_pages_bulk_preferred_many(gfp,
numa_node_id(), pol, nr_pages, page_array);
nid = numa_node_id();
nodemask = policy_nodemask(gfp, pol, NO_INTERLEAVE_INDEX, &nid);
return alloc_pages_bulk_noprof(gfp, nid, nodemask,
- nr_pages, NULL, page_array);
+ nr_pages, page_array);
}
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
diff --git a/mm/migrate.c b/mm/migrate.c
index cc68583c86f9..fb19a18892c8 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -68,10 +68,6 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
if (!folio)
goto out;
- if (unlikely(folio_test_slab(folio)))
- goto out_putfolio;
- /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
- smp_rmb();
/*
* Check movable flag before taking the page lock because
* we use non-atomic bitops on newly allocated page flags so
@@ -79,10 +75,6 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
*/
if (unlikely(!__folio_test_movable(folio)))
goto out_putfolio;
- /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
- smp_rmb();
- if (unlikely(folio_test_slab(folio)))
- goto out_putfolio;
/*
* As movable pages are not isolated from LRU lists, concurrent
@@ -136,7 +128,7 @@ static void putback_movable_folio(struct folio *folio)
*
* This function shall be used whenever the isolated pageset has been
* built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
- * and isolate_hugetlb().
+ * and folio_isolate_hugetlb().
*/
void putback_movable_pages(struct list_head *l)
{
@@ -145,7 +137,7 @@ void putback_movable_pages(struct list_head *l)
list_for_each_entry_safe(folio, folio2, l, lru) {
if (unlikely(folio_test_hugetlb(folio))) {
- folio_putback_active_hugetlb(folio);
+ folio_putback_hugetlb(folio);
continue;
}
list_del(&folio->lru);
@@ -177,7 +169,7 @@ bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
bool isolated, lru;
if (folio_test_hugetlb(folio))
- return isolate_hugetlb(folio, list);
+ return folio_isolate_hugetlb(folio, list);
lru = !__folio_test_movable(folio);
if (lru)
@@ -1459,7 +1451,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio,
if (folio_ref_count(src) == 1) {
/* page was freed from under us. So we are done. */
- folio_putback_active_hugetlb(src);
+ folio_putback_hugetlb(src);
return MIGRATEPAGE_SUCCESS;
}
@@ -1542,19 +1534,19 @@ out_unlock:
folio_unlock(src);
out:
if (rc == MIGRATEPAGE_SUCCESS)
- folio_putback_active_hugetlb(src);
+ folio_putback_hugetlb(src);
else if (rc != -EAGAIN)
list_move_tail(&src->lru, ret);
/*
- * If migration was not successful and there's a freeing callback, use
- * it. Otherwise, put_page() will drop the reference grabbed during
- * isolation.
+ * If migration was not successful and there's a freeing callback,
+ * return the folio to that special allocator. Otherwise, simply drop
+ * our additional reference.
*/
if (put_new_folio)
put_new_folio(dst, private);
else
- folio_putback_active_hugetlb(dst);
+ folio_put(dst);
return rc;
}
@@ -1695,6 +1687,81 @@ static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
return nr_failed;
}
+static void migrate_folios_move(struct list_head *src_folios,
+ struct list_head *dst_folios,
+ free_folio_t put_new_folio, unsigned long private,
+ enum migrate_mode mode, int reason,
+ struct list_head *ret_folios,
+ struct migrate_pages_stats *stats,
+ int *retry, int *thp_retry, int *nr_failed,
+ int *nr_retry_pages)
+{
+ struct folio *folio, *folio2, *dst, *dst2;
+ bool is_thp;
+ int nr_pages;
+ int rc;
+
+ dst = list_first_entry(dst_folios, struct folio, lru);
+ dst2 = list_next_entry(dst, lru);
+ list_for_each_entry_safe(folio, folio2, src_folios, lru) {
+ is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
+ nr_pages = folio_nr_pages(folio);
+
+ cond_resched();
+
+ rc = migrate_folio_move(put_new_folio, private,
+ folio, dst, mode,
+ reason, ret_folios);
+ /*
+ * The rules are:
+ * Success: folio will be freed
+ * -EAGAIN: stay on the unmap_folios list
+ * Other errno: put on ret_folios list
+ */
+ switch (rc) {
+ case -EAGAIN:
+ *retry += 1;
+ *thp_retry += is_thp;
+ *nr_retry_pages += nr_pages;
+ break;
+ case MIGRATEPAGE_SUCCESS:
+ stats->nr_succeeded += nr_pages;
+ stats->nr_thp_succeeded += is_thp;
+ break;
+ default:
+ *nr_failed += 1;
+ stats->nr_thp_failed += is_thp;
+ stats->nr_failed_pages += nr_pages;
+ break;
+ }
+ dst = dst2;
+ dst2 = list_next_entry(dst, lru);
+ }
+}
+
+static void migrate_folios_undo(struct list_head *src_folios,
+ struct list_head *dst_folios,
+ free_folio_t put_new_folio, unsigned long private,
+ struct list_head *ret_folios)
+{
+ struct folio *folio, *folio2, *dst, *dst2;
+
+ dst = list_first_entry(dst_folios, struct folio, lru);
+ dst2 = list_next_entry(dst, lru);
+ list_for_each_entry_safe(folio, folio2, src_folios, lru) {
+ int old_page_state = 0;
+ struct anon_vma *anon_vma = NULL;
+
+ __migrate_folio_extract(dst, &old_page_state, &anon_vma);
+ migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
+ anon_vma, true, ret_folios);
+ list_del(&dst->lru);
+ migrate_folio_undo_dst(dst, true, put_new_folio, private);
+ dst = dst2;
+ dst2 = list_next_entry(dst, lru);
+ }
+}
+
/*
* migrate_pages_batch() first unmaps folios in the from list as many as
* possible, then move the unmapped folios.
@@ -1717,7 +1784,7 @@ static int migrate_pages_batch(struct list_head *from,
int pass = 0;
bool is_thp = false;
bool is_large = false;
- struct folio *folio, *folio2, *dst = NULL, *dst2;
+ struct folio *folio, *folio2, *dst = NULL;
int rc, rc_saved = 0, nr_pages;
LIST_HEAD(unmap_folios);
LIST_HEAD(dst_folios);
@@ -1888,42 +1955,11 @@ move:
thp_retry = 0;
nr_retry_pages = 0;
- dst = list_first_entry(&dst_folios, struct folio, lru);
- dst2 = list_next_entry(dst, lru);
- list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
- is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
- nr_pages = folio_nr_pages(folio);
-
- cond_resched();
-
- rc = migrate_folio_move(put_new_folio, private,
- folio, dst, mode,
- reason, ret_folios);
- /*
- * The rules are:
- * Success: folio will be freed
- * -EAGAIN: stay on the unmap_folios list
- * Other errno: put on ret_folios list
- */
- switch(rc) {
- case -EAGAIN:
- retry++;
- thp_retry += is_thp;
- nr_retry_pages += nr_pages;
- break;
- case MIGRATEPAGE_SUCCESS:
- stats->nr_succeeded += nr_pages;
- stats->nr_thp_succeeded += is_thp;
- break;
- default:
- nr_failed++;
- stats->nr_thp_failed += is_thp;
- stats->nr_failed_pages += nr_pages;
- break;
- }
- dst = dst2;
- dst2 = list_next_entry(dst, lru);
- }
+ /* Move the unmapped folios */
+ migrate_folios_move(&unmap_folios, &dst_folios,
+ put_new_folio, private, mode, reason,
+ ret_folios, stats, &retry, &thp_retry,
+ &nr_failed, &nr_retry_pages);
}
nr_failed += retry;
stats->nr_thp_failed += thp_retry;
@@ -1932,20 +1968,8 @@ move:
rc = rc_saved ? : nr_failed;
out:
/* Cleanup remaining folios */
- dst = list_first_entry(&dst_folios, struct folio, lru);
- dst2 = list_next_entry(dst, lru);
- list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
- int old_page_state = 0;
- struct anon_vma *anon_vma = NULL;
-
- __migrate_folio_extract(dst, &old_page_state, &anon_vma);
- migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
- anon_vma, true, ret_folios);
- list_del(&dst->lru);
- migrate_folio_undo_dst(dst, true, put_new_folio, private);
- dst = dst2;
- dst2 = list_next_entry(dst, lru);
- }
+ migrate_folios_undo(&unmap_folios, &dst_folios,
+ put_new_folio, private, ret_folios);
return rc;
}
@@ -2208,7 +2232,7 @@ static int __add_folio_for_migration(struct folio *folio, int node,
return -EACCES;
if (folio_test_hugetlb(folio)) {
- if (isolate_hugetlb(folio, pagelist))
+ if (folio_isolate_hugetlb(folio, pagelist))
return 1;
} else if (folio_isolate_lru(folio)) {
list_add_tail(&folio->lru, pagelist);
@@ -2683,8 +2707,7 @@ int migrate_misplaced_folio_prepare(struct folio *folio,
* elevated reference count on the folio. This function will un-isolate the
* folio, dereferencing the folio before returning.
*/
-int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
- int node)
+int migrate_misplaced_folio(struct folio *folio, int node)
{
pg_data_t *pgdat = NODE_DATA(node);
int nr_remaining;
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 24b68b425afb..2630cc30147e 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -1585,13 +1585,17 @@ void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
{
void *ptr;
+ /*
+ * Kmemleak will explicitly scan mem_map by traversing all valid
+ * `struct *page`,so memblock does not need to be added to the scan list.
+ */
if (exact_nid)
ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
- MEMBLOCK_ALLOC_ACCESSIBLE,
+ MEMBLOCK_ALLOC_NOLEAKTRACE,
nid);
else
ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
- MEMBLOCK_ALLOC_ACCESSIBLE,
+ MEMBLOCK_ALLOC_NOLEAKTRACE,
nid);
if (ptr && size > 0)
diff --git a/mm/mmap.c b/mm/mmap.c
index aec208f90337..cda01071c7b1 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -111,8 +111,7 @@ static int check_brk_limits(unsigned long addr, unsigned long len)
return mlock_future_ok(current->mm, current->mm->def_flags, len)
? 0 : -EAGAIN;
}
-static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
- unsigned long addr, unsigned long request, unsigned long flags);
+
SYSCALL_DEFINE1(brk, unsigned long, brk)
{
unsigned long newbrk, oldbrk, origbrk;
@@ -278,8 +277,62 @@ static inline bool file_mmap_ok(struct file *file, struct inode *inode,
return true;
}
-/*
+/**
+ * do_mmap() - Perform a userland memory mapping into the current process
+ * address space of length @len with protection bits @prot, mmap flags @flags
+ * (from which VMA flags will be inferred), and any additional VMA flags to
+ * apply @vm_flags. If this is a file-backed mapping then the file is specified
+ * in @file and page offset into the file via @pgoff.
+ *
+ * This function does not perform security checks on the file and assumes, if
+ * @uf is non-NULL, the caller has provided a list head to track unmap events
+ * for userfaultfd @uf.
+ *
+ * It also simply indicates whether memory population is required by setting
+ * @populate, which must be non-NULL, expecting the caller to actually perform
+ * this task itself if appropriate.
+ *
+ * This function will invoke architecture-specific (and if provided and
+ * relevant, file system-specific) logic to determine the most appropriate
+ * unmapped area in which to place the mapping if not MAP_FIXED.
+ *
+ * Callers which require userland mmap() behaviour should invoke vm_mmap(),
+ * which is also exported for module use.
+ *
+ * Those which require this behaviour less security checks, userfaultfd and
+ * populate behaviour, and who handle the mmap write lock themselves, should
+ * call this function.
+ *
+ * Note that the returned address may reside within a merged VMA if an
+ * appropriate merge were to take place, so it doesn't necessarily specify the
+ * start of a VMA, rather only the start of a valid mapped range of length
+ * @len bytes, rounded down to the nearest page size.
+ *
* The caller must write-lock current->mm->mmap_lock.
+ *
+ * @file: An optional struct file pointer describing the file which is to be
+ * mapped, if a file-backed mapping.
+ * @addr: If non-zero, hints at (or if @flags has MAP_FIXED set, specifies) the
+ * address at which to perform this mapping. See mmap (2) for details. Must be
+ * page-aligned.
+ * @len: The length of the mapping. Will be page-aligned and must be at least 1
+ * page in size.
+ * @prot: Protection bits describing access required to the mapping. See mmap
+ * (2) for details.
+ * @flags: Flags specifying how the mapping should be performed, see mmap (2)
+ * for details.
+ * @vm_flags: VMA flags which should be set by default, or 0 otherwise.
+ * @pgoff: Page offset into the @file if file-backed, should be 0 otherwise.
+ * @populate: A pointer to a value which will be set to 0 if no population of
+ * the range is required, or the number of bytes to populate if it is. Must be
+ * non-NULL. See mmap (2) for details as to under what circumstances population
+ * of the range occurs.
+ * @uf: An optional pointer to a list head to track userfaultfd unmap events
+ * should unmapping events arise. If provided, it is up to the caller to manage
+ * this.
+ *
+ * Returns: Either an error, or the address at which the requested mapping has
+ * been performed.
*/
unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
@@ -292,6 +345,8 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
*populate = 0;
+ mmap_assert_write_locked(mm);
+
if (!len)
return -EINVAL;
@@ -369,8 +424,8 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
if (file) {
struct inode *inode = file_inode(file);
- unsigned int seals = memfd_file_seals(file);
unsigned long flags_mask;
+ int err;
if (!file_mmap_ok(file, inode, pgoff, len))
return -EOVERFLOW;
@@ -410,8 +465,6 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
vm_flags |= VM_SHARED | VM_MAYSHARE;
if (!(file->f_mode & FMODE_WRITE))
vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
- else if (is_readonly_sealed(seals, vm_flags))
- vm_flags &= ~VM_MAYWRITE;
fallthrough;
case MAP_PRIVATE:
if (!(file->f_mode & FMODE_READ))
@@ -431,6 +484,14 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
default:
return -EINVAL;
}
+
+ /*
+ * Check to see if we are violating any seals and update VMA
+ * flags if necessary to avoid future seal violations.
+ */
+ err = memfd_check_seals_mmap(file, &vm_flags);
+ if (err)
+ return (unsigned long)err;
} else {
switch (flags & MAP_TYPE) {
case MAP_SHARED:
@@ -581,115 +642,6 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
}
#endif /* __ARCH_WANT_SYS_OLD_MMAP */
-/**
- * unmapped_area() - Find an area between the low_limit and the high_limit with
- * the correct alignment and offset, all from @info. Note: current->mm is used
- * for the search.
- *
- * @info: The unmapped area information including the range [low_limit -
- * high_limit), the alignment offset and mask.
- *
- * Return: A memory address or -ENOMEM.
- */
-static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
-{
- unsigned long length, gap;
- unsigned long low_limit, high_limit;
- struct vm_area_struct *tmp;
- VMA_ITERATOR(vmi, current->mm, 0);
-
- /* Adjust search length to account for worst case alignment overhead */
- length = info->length + info->align_mask + info->start_gap;
- if (length < info->length)
- return -ENOMEM;
-
- low_limit = info->low_limit;
- if (low_limit < mmap_min_addr)
- low_limit = mmap_min_addr;
- high_limit = info->high_limit;
-retry:
- if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length))
- return -ENOMEM;
-
- /*
- * Adjust for the gap first so it doesn't interfere with the
- * later alignment. The first step is the minimum needed to
- * fulill the start gap, the next steps is the minimum to align
- * that. It is the minimum needed to fulill both.
- */
- gap = vma_iter_addr(&vmi) + info->start_gap;
- gap += (info->align_offset - gap) & info->align_mask;
- tmp = vma_next(&vmi);
- if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
- if (vm_start_gap(tmp) < gap + length - 1) {
- low_limit = tmp->vm_end;
- vma_iter_reset(&vmi);
- goto retry;
- }
- } else {
- tmp = vma_prev(&vmi);
- if (tmp && vm_end_gap(tmp) > gap) {
- low_limit = vm_end_gap(tmp);
- vma_iter_reset(&vmi);
- goto retry;
- }
- }
-
- return gap;
-}
-
-/**
- * unmapped_area_topdown() - Find an area between the low_limit and the
- * high_limit with the correct alignment and offset at the highest available
- * address, all from @info. Note: current->mm is used for the search.
- *
- * @info: The unmapped area information including the range [low_limit -
- * high_limit), the alignment offset and mask.
- *
- * Return: A memory address or -ENOMEM.
- */
-static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
-{
- unsigned long length, gap, gap_end;
- unsigned long low_limit, high_limit;
- struct vm_area_struct *tmp;
- VMA_ITERATOR(vmi, current->mm, 0);
-
- /* Adjust search length to account for worst case alignment overhead */
- length = info->length + info->align_mask + info->start_gap;
- if (length < info->length)
- return -ENOMEM;
-
- low_limit = info->low_limit;
- if (low_limit < mmap_min_addr)
- low_limit = mmap_min_addr;
- high_limit = info->high_limit;
-retry:
- if (vma_iter_area_highest(&vmi, low_limit, high_limit, length))
- return -ENOMEM;
-
- gap = vma_iter_end(&vmi) - info->length;
- gap -= (gap - info->align_offset) & info->align_mask;
- gap_end = vma_iter_end(&vmi);
- tmp = vma_next(&vmi);
- if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
- if (vm_start_gap(tmp) < gap_end) {
- high_limit = vm_start_gap(tmp);
- vma_iter_reset(&vmi);
- goto retry;
- }
- } else {
- tmp = vma_prev(&vmi);
- if (tmp && vm_end_gap(tmp) > gap) {
- high_limit = tmp->vm_start;
- vma_iter_reset(&vmi);
- goto retry;
- }
- }
-
- return gap;
-}
-
/*
* Determine if the allocation needs to ensure that there is no
* existing mapping within it's guard gaps, for use as start_gap.
@@ -989,211 +941,6 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
return vma;
}
-/*
- * Verify that the stack growth is acceptable and
- * update accounting. This is shared with both the
- * grow-up and grow-down cases.
- */
-static int acct_stack_growth(struct vm_area_struct *vma,
- unsigned long size, unsigned long grow)
-{
- struct mm_struct *mm = vma->vm_mm;
- unsigned long new_start;
-
- /* address space limit tests */
- if (!may_expand_vm(mm, vma->vm_flags, grow))
- return -ENOMEM;
-
- /* Stack limit test */
- if (size > rlimit(RLIMIT_STACK))
- return -ENOMEM;
-
- /* mlock limit tests */
- if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT))
- return -ENOMEM;
-
- /* Check to ensure the stack will not grow into a hugetlb-only region */
- new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
- vma->vm_end - size;
- if (is_hugepage_only_range(vma->vm_mm, new_start, size))
- return -EFAULT;
-
- /*
- * Overcommit.. This must be the final test, as it will
- * update security statistics.
- */
- if (security_vm_enough_memory_mm(mm, grow))
- return -ENOMEM;
-
- return 0;
-}
-
-#if defined(CONFIG_STACK_GROWSUP)
-/*
- * PA-RISC uses this for its stack.
- * vma is the last one with address > vma->vm_end. Have to extend vma.
- */
-static int expand_upwards(struct vm_area_struct *vma, unsigned long address)
-{
- struct mm_struct *mm = vma->vm_mm;
- struct vm_area_struct *next;
- unsigned long gap_addr;
- int error = 0;
- VMA_ITERATOR(vmi, mm, vma->vm_start);
-
- if (!(vma->vm_flags & VM_GROWSUP))
- return -EFAULT;
-
- mmap_assert_write_locked(mm);
-
- /* Guard against exceeding limits of the address space. */
- address &= PAGE_MASK;
- if (address >= (TASK_SIZE & PAGE_MASK))
- return -ENOMEM;
- address += PAGE_SIZE;
-
- /* Enforce stack_guard_gap */
- gap_addr = address + stack_guard_gap;
-
- /* Guard against overflow */
- if (gap_addr < address || gap_addr > TASK_SIZE)
- gap_addr = TASK_SIZE;
-
- next = find_vma_intersection(mm, vma->vm_end, gap_addr);
- if (next && vma_is_accessible(next)) {
- if (!(next->vm_flags & VM_GROWSUP))
- return -ENOMEM;
- /* Check that both stack segments have the same anon_vma? */
- }
-
- if (next)
- vma_iter_prev_range_limit(&vmi, address);
-
- vma_iter_config(&vmi, vma->vm_start, address);
- if (vma_iter_prealloc(&vmi, vma))
- return -ENOMEM;
-
- /* We must make sure the anon_vma is allocated. */
- if (unlikely(anon_vma_prepare(vma))) {
- vma_iter_free(&vmi);
- return -ENOMEM;
- }
-
- /* Lock the VMA before expanding to prevent concurrent page faults */
- vma_start_write(vma);
- /* We update the anon VMA tree. */
- anon_vma_lock_write(vma->anon_vma);
-
- /* Somebody else might have raced and expanded it already */
- if (address > vma->vm_end) {
- unsigned long size, grow;
-
- size = address - vma->vm_start;
- grow = (address - vma->vm_end) >> PAGE_SHIFT;
-
- error = -ENOMEM;
- if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
- error = acct_stack_growth(vma, size, grow);
- if (!error) {
- if (vma->vm_flags & VM_LOCKED)
- mm->locked_vm += grow;
- vm_stat_account(mm, vma->vm_flags, grow);
- anon_vma_interval_tree_pre_update_vma(vma);
- vma->vm_end = address;
- /* Overwrite old entry in mtree. */
- vma_iter_store(&vmi, vma);
- anon_vma_interval_tree_post_update_vma(vma);
-
- perf_event_mmap(vma);
- }
- }
- }
- anon_vma_unlock_write(vma->anon_vma);
- vma_iter_free(&vmi);
- validate_mm(mm);
- return error;
-}
-#endif /* CONFIG_STACK_GROWSUP */
-
-/*
- * vma is the first one with address < vma->vm_start. Have to extend vma.
- * mmap_lock held for writing.
- */
-int expand_downwards(struct vm_area_struct *vma, unsigned long address)
-{
- struct mm_struct *mm = vma->vm_mm;
- struct vm_area_struct *prev;
- int error = 0;
- VMA_ITERATOR(vmi, mm, vma->vm_start);
-
- if (!(vma->vm_flags & VM_GROWSDOWN))
- return -EFAULT;
-
- mmap_assert_write_locked(mm);
-
- address &= PAGE_MASK;
- if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
- return -EPERM;
-
- /* Enforce stack_guard_gap */
- prev = vma_prev(&vmi);
- /* Check that both stack segments have the same anon_vma? */
- if (prev) {
- if (!(prev->vm_flags & VM_GROWSDOWN) &&
- vma_is_accessible(prev) &&
- (address - prev->vm_end < stack_guard_gap))
- return -ENOMEM;
- }
-
- if (prev)
- vma_iter_next_range_limit(&vmi, vma->vm_start);
-
- vma_iter_config(&vmi, address, vma->vm_end);
- if (vma_iter_prealloc(&vmi, vma))
- return -ENOMEM;
-
- /* We must make sure the anon_vma is allocated. */
- if (unlikely(anon_vma_prepare(vma))) {
- vma_iter_free(&vmi);
- return -ENOMEM;
- }
-
- /* Lock the VMA before expanding to prevent concurrent page faults */
- vma_start_write(vma);
- /* We update the anon VMA tree. */
- anon_vma_lock_write(vma->anon_vma);
-
- /* Somebody else might have raced and expanded it already */
- if (address < vma->vm_start) {
- unsigned long size, grow;
-
- size = vma->vm_end - address;
- grow = (vma->vm_start - address) >> PAGE_SHIFT;
-
- error = -ENOMEM;
- if (grow <= vma->vm_pgoff) {
- error = acct_stack_growth(vma, size, grow);
- if (!error) {
- if (vma->vm_flags & VM_LOCKED)
- mm->locked_vm += grow;
- vm_stat_account(mm, vma->vm_flags, grow);
- anon_vma_interval_tree_pre_update_vma(vma);
- vma->vm_start = address;
- vma->vm_pgoff -= grow;
- /* Overwrite old entry in mtree. */
- vma_iter_store(&vmi, vma);
- anon_vma_interval_tree_post_update_vma(vma);
-
- perf_event_mmap(vma);
- }
- }
- }
- anon_vma_unlock_write(vma->anon_vma);
- vma_iter_free(&vmi);
- validate_mm(mm);
- return error;
-}
-
/* enforced gap between the expanding stack and other mappings. */
unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
@@ -1325,58 +1072,6 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
return do_vmi_munmap(&vmi, mm, start, len, uf, false);
}
-unsigned long mmap_region(struct file *file, unsigned long addr,
- unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
- struct list_head *uf)
-{
- unsigned long ret;
- bool writable_file_mapping = false;
-
- /* Check to see if MDWE is applicable. */
- if (map_deny_write_exec(vm_flags, vm_flags))
- return -EACCES;
-
- /* Allow architectures to sanity-check the vm_flags. */
- if (!arch_validate_flags(vm_flags))
- return -EINVAL;
-
- /* Map writable and ensure this isn't a sealed memfd. */
- if (file && is_shared_maywrite(vm_flags)) {
- int error = mapping_map_writable(file->f_mapping);
-
- if (error)
- return error;
- writable_file_mapping = true;
- }
-
- ret = __mmap_region(file, addr, len, vm_flags, pgoff, uf);
-
- /* Clear our write mapping regardless of error. */
- if (writable_file_mapping)
- mapping_unmap_writable(file->f_mapping);
-
- validate_mm(current->mm);
- return ret;
-}
-
-static int __vm_munmap(unsigned long start, size_t len, bool unlock)
-{
- int ret;
- struct mm_struct *mm = current->mm;
- LIST_HEAD(uf);
- VMA_ITERATOR(vmi, mm, start);
-
- if (mmap_write_lock_killable(mm))
- return -EINTR;
-
- ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock);
- if (ret || !unlock)
- mmap_write_unlock(mm);
-
- userfaultfd_unmap_complete(mm, &uf);
- return ret;
-}
-
int vm_munmap(unsigned long start, size_t len)
{
return __vm_munmap(start, len, false);
@@ -1512,88 +1207,6 @@ out:
return ret;
}
-/*
- * do_brk_flags() - Increase the brk vma if the flags match.
- * @vmi: The vma iterator
- * @addr: The start address
- * @len: The length of the increase
- * @vma: The vma,
- * @flags: The VMA Flags
- *
- * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags
- * do not match then create a new anonymous VMA. Eventually we may be able to
- * do some brk-specific accounting here.
- */
-static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long addr, unsigned long len, unsigned long flags)
-{
- struct mm_struct *mm = current->mm;
-
- /*
- * Check against address space limits by the changed size
- * Note: This happens *after* clearing old mappings in some code paths.
- */
- flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
- if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
- return -ENOMEM;
-
- if (mm->map_count > sysctl_max_map_count)
- return -ENOMEM;
-
- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
- return -ENOMEM;
-
- /*
- * Expand the existing vma if possible; Note that singular lists do not
- * occur after forking, so the expand will only happen on new VMAs.
- */
- if (vma && vma->vm_end == addr) {
- VMG_STATE(vmg, mm, vmi, addr, addr + len, flags, PHYS_PFN(addr));
-
- vmg.prev = vma;
- /* vmi is positioned at prev, which this mode expects. */
- vmg.merge_flags = VMG_FLAG_JUST_EXPAND;
-
- if (vma_merge_new_range(&vmg))
- goto out;
- else if (vmg_nomem(&vmg))
- goto unacct_fail;
- }
-
- if (vma)
- vma_iter_next_range(vmi);
- /* create a vma struct for an anonymous mapping */
- vma = vm_area_alloc(mm);
- if (!vma)
- goto unacct_fail;
-
- vma_set_anonymous(vma);
- vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT);
- vm_flags_init(vma, flags);
- vma->vm_page_prot = vm_get_page_prot(flags);
- vma_start_write(vma);
- if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
- goto mas_store_fail;
-
- mm->map_count++;
- validate_mm(mm);
- ksm_add_vma(vma);
-out:
- perf_event_mmap(vma);
- mm->total_vm += len >> PAGE_SHIFT;
- mm->data_vm += len >> PAGE_SHIFT;
- if (flags & VM_LOCKED)
- mm->locked_vm += (len >> PAGE_SHIFT);
- vm_flags_set(vma, VM_SOFTDIRTY);
- return 0;
-
-mas_store_fail:
- vm_area_free(vma);
-unacct_fail:
- vm_unacct_memory(len >> PAGE_SHIFT);
- return -ENOMEM;
-}
-
int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
{
struct mm_struct *mm = current->mm;
@@ -1664,7 +1277,6 @@ void exit_mmap(struct mm_struct *mm)
goto destroy;
}
- lru_add_drain();
flush_cache_mm(mm);
tlb_gather_mmu_fullmm(&tlb, mm);
/* update_hiwater_rss(mm) here? but nobody should be looking */
@@ -2107,7 +1719,6 @@ int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift)
vma, new_start, length, false, true))
return -ENOMEM;
- lru_add_drain();
tlb_gather_mmu(&tlb, mm);
next = vma_next(&vmi);
if (new_end > old_start) {
@@ -2132,3 +1743,55 @@ int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift)
/* Shrink the vma to just the new range */
return vma_shrink(&vmi, vma, new_start, new_end, vma->vm_pgoff);
}
+
+#ifdef CONFIG_MMU
+/*
+ * Obtain a read lock on mm->mmap_lock, if the specified address is below the
+ * start of the VMA, the intent is to perform a write, and it is a
+ * downward-growing stack, then attempt to expand the stack to contain it.
+ *
+ * This function is intended only for obtaining an argument page from an ELF
+ * image, and is almost certainly NOT what you want to use for any other
+ * purpose.
+ *
+ * IMPORTANT - VMA fields are accessed without an mmap lock being held, so the
+ * VMA referenced must not be linked in any user-visible tree, i.e. it must be a
+ * new VMA being mapped.
+ *
+ * The function assumes that addr is either contained within the VMA or below
+ * it, and makes no attempt to validate this value beyond that.
+ *
+ * Returns true if the read lock was obtained and a stack was perhaps expanded,
+ * false if the stack expansion failed.
+ *
+ * On stack expansion the function temporarily acquires an mmap write lock
+ * before downgrading it.
+ */
+bool mmap_read_lock_maybe_expand(struct mm_struct *mm,
+ struct vm_area_struct *new_vma,
+ unsigned long addr, bool write)
+{
+ if (!write || addr >= new_vma->vm_start) {
+ mmap_read_lock(mm);
+ return true;
+ }
+
+ if (!(new_vma->vm_flags & VM_GROWSDOWN))
+ return false;
+
+ mmap_write_lock(mm);
+ if (expand_downwards(new_vma, addr)) {
+ mmap_write_unlock(mm);
+ return false;
+ }
+
+ mmap_write_downgrade(mm);
+ return true;
+}
+#else
+bool mmap_read_lock_maybe_expand(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, bool write)
+{
+ return false;
+}
+#endif
diff --git a/mm/mmap_lock.c b/mm/mmap_lock.c
index f186d57df2c6..e7dbaf96aa17 100644
--- a/mm/mmap_lock.c
+++ b/mm/mmap_lock.c
@@ -17,51 +17,7 @@ EXPORT_TRACEPOINT_SYMBOL(mmap_lock_start_locking);
EXPORT_TRACEPOINT_SYMBOL(mmap_lock_acquire_returned);
EXPORT_TRACEPOINT_SYMBOL(mmap_lock_released);
-#ifdef CONFIG_MEMCG
-
-/*
- * Size of the buffer for memcg path names. Ignoring stack trace support,
- * trace_events_hist.c uses MAX_FILTER_STR_VAL for this, so we also use it.
- */
-#define MEMCG_PATH_BUF_SIZE MAX_FILTER_STR_VAL
-
-#define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \
- do { \
- if (trace_mmap_lock_##type##_enabled()) { \
- char buf[MEMCG_PATH_BUF_SIZE]; \
- get_mm_memcg_path(mm, buf, sizeof(buf)); \
- trace_mmap_lock_##type(mm, buf, ##__VA_ARGS__); \
- } \
- } while (0)
-
-#else /* !CONFIG_MEMCG */
-
-#define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \
- trace_mmap_lock_##type(mm, "", ##__VA_ARGS__)
-
-#endif /* CONFIG_MEMCG */
-
#ifdef CONFIG_TRACING
-#ifdef CONFIG_MEMCG
-/*
- * Write the given mm_struct's memcg path to a buffer. If the path cannot be
- * determined, empty string is written.
- */
-static void get_mm_memcg_path(struct mm_struct *mm, char *buf, size_t buflen)
-{
- struct mem_cgroup *memcg;
-
- buf[0] = '\0';
- memcg = get_mem_cgroup_from_mm(mm);
- if (memcg == NULL)
- return;
- if (memcg->css.cgroup)
- cgroup_path(memcg->css.cgroup, buf, buflen);
- css_put(&memcg->css);
-}
-
-#endif /* CONFIG_MEMCG */
-
/*
* Trace calls must be in a separate file, as otherwise there's a circular
* dependency between linux/mmap_lock.h and trace/events/mmap_lock.h.
@@ -69,20 +25,20 @@ static void get_mm_memcg_path(struct mm_struct *mm, char *buf, size_t buflen)
void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write)
{
- TRACE_MMAP_LOCK_EVENT(start_locking, mm, write);
+ trace_mmap_lock_start_locking(mm, write);
}
EXPORT_SYMBOL(__mmap_lock_do_trace_start_locking);
void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
bool success)
{
- TRACE_MMAP_LOCK_EVENT(acquire_returned, mm, write, success);
+ trace_mmap_lock_acquire_returned(mm, write, success);
}
EXPORT_SYMBOL(__mmap_lock_do_trace_acquire_returned);
void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write)
{
- TRACE_MMAP_LOCK_EVENT(released, mm, write);
+ trace_mmap_lock_released(mm, write);
}
EXPORT_SYMBOL(__mmap_lock_do_trace_released);
#endif /* CONFIG_TRACING */
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 99b3e9408aa0..7aa6f18c500b 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -311,11 +311,34 @@ static inline void tlb_table_invalidate(struct mmu_gather *tlb)
}
}
-static void tlb_remove_table_one(void *table)
+#ifdef CONFIG_PT_RECLAIM
+static inline void __tlb_remove_table_one_rcu(struct rcu_head *head)
+{
+ struct ptdesc *ptdesc;
+
+ ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
+ __tlb_remove_table(ptdesc);
+}
+
+static inline void __tlb_remove_table_one(void *table)
+{
+ struct ptdesc *ptdesc;
+
+ ptdesc = table;
+ call_rcu(&ptdesc->pt_rcu_head, __tlb_remove_table_one_rcu);
+}
+#else
+static inline void __tlb_remove_table_one(void *table)
{
tlb_remove_table_sync_one();
__tlb_remove_table(table);
}
+#endif /* CONFIG_PT_RECLAIM */
+
+static void tlb_remove_table_one(void *table)
+{
+ __tlb_remove_table_one(table);
+}
static void tlb_table_flush(struct mmu_gather *tlb)
{
diff --git a/mm/mseal.c b/mm/mseal.c
index 81d6e980e8a9..c27197ac04e8 100644
--- a/mm/mseal.c
+++ b/mm/mseal.c
@@ -217,9 +217,9 @@ int do_mseal(unsigned long start, size_t len_in, unsigned long flags)
unsigned long end;
struct mm_struct *mm = current->mm;
- ret = can_do_mseal(flags);
- if (ret)
- return ret;
+ /* Verify flags not set. */
+ if (flags)
+ return -EINVAL;
start = untagged_addr(start);
if (!PAGE_ALIGNED(start))
diff --git a/mm/nommu.c b/mm/nommu.c
index 9cb6e99215e2..baa79abdaf03 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1613,6 +1613,13 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
}
EXPORT_SYMBOL(remap_vmalloc_range);
+vm_fault_t filemap_fsnotify_fault(struct vm_fault *vmf)
+{
+ BUG();
+ return 0;
+}
+EXPORT_SYMBOL_GPL(filemap_fsnotify_fault);
+
vm_fault_t filemap_fault(struct vm_fault *vmf)
{
BUG();
diff --git a/mm/numa.c b/mm/numa.c
index e2eec07707d1..f1787d7713a6 100644
--- a/mm/numa.c
+++ b/mm/numa.c
@@ -37,13 +37,7 @@ void __init alloc_node_data(int nid)
void __init alloc_offline_node_data(int nid)
{
pg_data_t *pgdat;
-
- pgdat = memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES);
- if (!pgdat)
- panic("Cannot allocate %zuB for node %d.\n",
- sizeof(*pgdat), nid);
-
- node_data[nid] = pgdat;
+ node_data[nid] = memblock_alloc_or_panic(sizeof(*pgdat), SMP_CACHE_BYTES);
}
/* Stub functions: */
diff --git a/mm/numa_emulation.c b/mm/numa_emulation.c
index 031fb9961bf7..9d55679d99ce 100644
--- a/mm/numa_emulation.c
+++ b/mm/numa_emulation.c
@@ -8,11 +8,12 @@
#include <linux/memblock.h>
#include <linux/numa_memblks.h>
#include <asm/numa.h>
+#include <acpi/acpi_numa.h>
#define FAKE_NODE_MIN_SIZE ((u64)32 << 20)
#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL))
-static int emu_nid_to_phys[MAX_NUMNODES];
+int emu_nid_to_phys[MAX_NUMNODES];
static char *emu_cmdline __initdata;
int __init numa_emu_cmdline(char *str)
@@ -379,6 +380,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
size_t phys_size = numa_dist_cnt * numa_dist_cnt * sizeof(phys_dist[0]);
int max_emu_nid, dfl_phys_nid;
int i, j, ret;
+ nodemask_t physnode_mask = numa_nodes_parsed;
if (!emu_cmdline)
goto no_emu;
@@ -395,7 +397,6 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
* split the system RAM into N fake nodes.
*/
if (strchr(emu_cmdline, 'U')) {
- nodemask_t physnode_mask = numa_nodes_parsed;
unsigned long n;
int nid = 0;
@@ -465,9 +466,6 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
*/
max_emu_nid = setup_emu2phys_nid(&dfl_phys_nid);
- /* commit */
- *numa_meminfo = ei;
-
/* Make sure numa_nodes_parsed only contains emulated nodes */
nodes_clear(numa_nodes_parsed);
for (i = 0; i < ARRAY_SIZE(ei.blk); i++)
@@ -475,10 +473,21 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
ei.blk[i].nid != NUMA_NO_NODE)
node_set(ei.blk[i].nid, numa_nodes_parsed);
- numa_emu_update_cpu_to_node(emu_nid_to_phys, ARRAY_SIZE(emu_nid_to_phys));
+ /* fix pxm_to_node_map[] and node_to_pxm_map[] to avoid collision
+ * with faked numa nodes, particularly during later memory hotplug
+ * handling, and also update numa_nodes_parsed accordingly.
+ */
+ ret = fix_pxm_node_maps(max_emu_nid);
+ if (ret < 0)
+ goto no_emu;
+
+ /* commit */
+ *numa_meminfo = ei;
+
+ numa_emu_update_cpu_to_node(emu_nid_to_phys, max_emu_nid + 1);
/* make sure all emulated nodes are mapped to a physical node */
- for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++)
+ for (i = 0; i < max_emu_nid + 1; i++)
if (emu_nid_to_phys[i] == NUMA_NO_NODE)
emu_nid_to_phys[i] = dfl_phys_nid;
@@ -501,12 +510,34 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
numa_set_distance(i, j, dist);
}
}
+ for (i = 0; i < numa_distance_cnt; i++) {
+ for (j = 0; j < numa_distance_cnt; j++) {
+ int physi, physj;
+ u8 dist;
+
+ /* distance between fake nodes is already ok */
+ if (emu_nid_to_phys[i] != NUMA_NO_NODE &&
+ emu_nid_to_phys[j] != NUMA_NO_NODE)
+ continue;
+ if (emu_nid_to_phys[i] != NUMA_NO_NODE)
+ physi = emu_nid_to_phys[i];
+ else
+ physi = i - max_emu_nid;
+ if (emu_nid_to_phys[j] != NUMA_NO_NODE)
+ physj = emu_nid_to_phys[j];
+ else
+ physj = j - max_emu_nid;
+ dist = phys_dist[physi * numa_dist_cnt + physj];
+ numa_set_distance(i, j, dist);
+ }
+ }
/* free the copied physical distance table */
memblock_free(phys_dist, phys_size);
return;
no_emu:
+ numa_nodes_parsed = physnode_mask;
/* No emulation. Build identity emu_nid_to_phys[] for numa_add_cpu() */
for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++)
emu_nid_to_phys[i] = i;
diff --git a/mm/numa_memblks.c b/mm/numa_memblks.c
index a3877e9bc878..ff4054f4334d 100644
--- a/mm/numa_memblks.c
+++ b/mm/numa_memblks.c
@@ -7,7 +7,7 @@
#include <linux/numa.h>
#include <linux/numa_memblks.h>
-static int numa_distance_cnt;
+int numa_distance_cnt;
static u8 *numa_distance;
nodemask_t numa_nodes_parsed __initdata;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 1c485beb0b93..1cf121ad7085 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -44,6 +44,7 @@
#include <linux/init.h>
#include <linux/mmu_notifier.h>
#include <linux/cred.h>
+#include <linux/nmi.h>
#include <asm/tlb.h>
#include "internal.h"
@@ -430,10 +431,15 @@ static void dump_tasks(struct oom_control *oc)
mem_cgroup_scan_tasks(oc->memcg, dump_task, oc);
else {
struct task_struct *p;
+ int i = 0;
rcu_read_lock();
- for_each_process(p)
+ for_each_process(p) {
+ /* Avoid potential softlockup warning */
+ if ((++i & 1023) == 0)
+ touch_softlockup_watchdog();
dump_task(p, oc);
+ }
rcu_read_unlock();
}
}
@@ -699,7 +705,7 @@ static void queue_oom_reaper(struct task_struct *tsk)
}
#ifdef CONFIG_SYSCTL
-static struct ctl_table vm_oom_kill_table[] = {
+static const struct ctl_table vm_oom_kill_table[] = {
{
.procname = "panic_on_oom",
.data = &sysctl_panic_on_oom,
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index d9861e42b2bd..eb55ece39c56 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -942,26 +942,25 @@ static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc,
wb_min_max_ratio(wb, &wb_min_ratio, &wb_max_ratio);
wb_thresh += (thresh * wb_min_ratio) / (100 * BDI_RATIO_SCALE);
- wb_max_thresh = thresh * wb_max_ratio / (100 * BDI_RATIO_SCALE);
- if (wb_thresh > wb_max_thresh)
- wb_thresh = wb_max_thresh;
/*
- * With strictlimit flag, the wb_thresh is treated as
- * a hard limit in balance_dirty_pages() and wb_position_ratio().
- * It's possible that wb_thresh is close to zero, not because
- * the device is slow, but because it has been inactive.
- * To prevent occasional writes from being blocked, we raise wb_thresh.
+ * It's very possible that wb_thresh is close to 0 not because the
+ * device is slow, but that it has remained inactive for long time.
+ * Honour such devices a reasonable good (hopefully IO efficient)
+ * threshold, so that the occasional writes won't be blocked and active
+ * writes can rampup the threshold quickly.
*/
- if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
- unsigned long limit = hard_dirty_limit(dom, dtc->thresh);
- u64 wb_scale_thresh = 0;
-
- if (limit > dtc->dirty)
- wb_scale_thresh = (limit - dtc->dirty) / 100;
- wb_thresh = max(wb_thresh, min(wb_scale_thresh, wb_max_thresh / 4));
+ if (thresh > dtc->dirty) {
+ if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT))
+ wb_thresh = max(wb_thresh, (thresh - dtc->dirty) / 100);
+ else
+ wb_thresh = max(wb_thresh, (thresh - dtc->dirty) / 8);
}
+ wb_max_thresh = thresh * wb_max_ratio / (100 * BDI_RATIO_SCALE);
+ if (wb_thresh > wb_max_thresh)
+ wb_thresh = wb_max_thresh;
+
return wb_thresh;
}
@@ -969,6 +968,7 @@ unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)
{
struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
+ domain_dirty_avail(&gdtc, true);
return __wb_calc_thresh(&gdtc, thresh);
}
@@ -1145,12 +1145,6 @@ static void wb_position_ratio(struct dirty_throttle_control *dtc)
if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
long long wb_pos_ratio;
- if (dtc->wb_dirty < 8) {
- dtc->pos_ratio = min_t(long long, pos_ratio * 2,
- 2 << RATELIMIT_CALC_SHIFT);
- return;
- }
-
if (dtc->wb_dirty >= wb_thresh)
return;
@@ -1222,14 +1216,6 @@ static void wb_position_ratio(struct dirty_throttle_control *dtc)
if (unlikely(wb_thresh > dtc->thresh))
wb_thresh = dtc->thresh;
/*
- * It's very possible that wb_thresh is close to 0 not because the
- * device is slow, but that it has remained inactive for long time.
- * Honour such devices a reasonable good (hopefully IO efficient)
- * threshold, so that the occasional writes won't be blocked and active
- * writes can rampup the threshold quickly.
- */
- wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8);
- /*
* scale global setpoint to wb's:
* wb_setpoint = setpoint * wb_thresh / thresh
*/
@@ -1484,17 +1470,10 @@ static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc,
* balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate).
* Hence, to calculate "step" properly, we have to use wb_dirty as
* "dirty" and wb_setpoint as "setpoint".
- *
- * We rampup dirty_ratelimit forcibly if wb_dirty is low because
- * it's possible that wb_thresh is close to zero due to inactivity
- * of backing device.
*/
if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
dirty = dtc->wb_dirty;
- if (dtc->wb_dirty < 8)
- setpoint = dtc->wb_dirty + 1;
- else
- setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2;
+ setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2;
}
if (dirty < setpoint) {
@@ -2319,7 +2298,7 @@ static int page_writeback_cpu_online(unsigned int cpu)
/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
static const unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
-static struct ctl_table vm_page_writeback_sysctls[] = {
+static const struct ctl_table vm_page_writeback_sysctls[] = {
{
.procname = "dirty_background_ratio",
.data = &dirty_background_ratio,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 01eab25edf89..579789600a3c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1295,12 +1295,6 @@ void __meminit __free_pages_core(struct page *page, unsigned int order,
set_page_count(p, 0);
}
- /*
- * Freeing the page with debug_pagealloc enabled will try to
- * unmap it; some archs don't like double-unmappings, so
- * map it first.
- */
- debug_pagealloc_map_pages(page, nr_pages);
adjust_managed_page_count(page, nr_pages);
} else {
for (loop = 0; loop < nr_pages; loop++, p++) {
@@ -1508,7 +1502,6 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
int i;
set_page_private(page, 0);
- set_page_refcounted(page);
arch_alloc_page(page, order);
debug_pagealloc_map_pages(page, 1 << order);
@@ -1856,6 +1849,14 @@ static bool can_steal_fallback(unsigned int order, int start_mt)
if (order >= pageblock_order)
return true;
+ /*
+ * Movable pages won't cause permanent fragmentation, so when you alloc
+ * small pages, you just need to temporarily steal unmovable or
+ * reclaimable pages that are closest to the request size. After a
+ * while, memory compaction may occur to form large contiguous pages,
+ * and the next movable allocation may not need to steal. Unmovable and
+ * reclaimable allocations need to actually steal pages.
+ */
if (order >= pageblock_order / 2 ||
start_mt == MIGRATE_RECLAIMABLE ||
start_mt == MIGRATE_UNMOVABLE ||
@@ -2592,9 +2593,9 @@ static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
return high;
}
-static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
- struct page *page, int migratetype,
- unsigned int order)
+static void free_frozen_page_commit(struct zone *zone,
+ struct per_cpu_pages *pcp, struct page *page, int migratetype,
+ unsigned int order)
{
int high, batch;
int pindex;
@@ -2643,7 +2644,7 @@ static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
/*
* Free a pcp page
*/
-void free_unref_page(struct page *page, unsigned int order)
+void free_frozen_pages(struct page *page, unsigned int order)
{
unsigned long __maybe_unused UP_flags;
struct per_cpu_pages *pcp;
@@ -2666,20 +2667,20 @@ void free_unref_page(struct page *page, unsigned int order)
* get those areas back if necessary. Otherwise, we may have to free
* excessively into the page allocator
*/
+ zone = page_zone(page);
migratetype = get_pfnblock_migratetype(page, pfn);
if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
if (unlikely(is_migrate_isolate(migratetype))) {
- free_one_page(page_zone(page), page, pfn, order, FPI_NONE);
+ free_one_page(zone, page, pfn, order, FPI_NONE);
return;
}
migratetype = MIGRATE_MOVABLE;
}
- zone = page_zone(page);
pcp_trylock_prepare(UP_flags);
pcp = pcp_spin_trylock(zone->per_cpu_pageset);
if (pcp) {
- free_unref_page_commit(zone, pcp, page, migratetype, order);
+ free_frozen_page_commit(zone, pcp, page, migratetype, order);
pcp_spin_unlock(pcp);
} else {
free_one_page(zone, page, pfn, order, FPI_NONE);
@@ -2743,7 +2744,7 @@ void free_unref_folios(struct folio_batch *folios)
/*
* Free isolated pages directly to the
- * allocator, see comment in free_unref_page.
+ * allocator, see comment in free_frozen_pages.
*/
if (is_migrate_isolate(migratetype)) {
free_one_page(zone, &folio->page, pfn,
@@ -2774,7 +2775,7 @@ void free_unref_folios(struct folio_batch *folios)
migratetype = MIGRATE_MOVABLE;
trace_mm_page_free_batched(&folio->page);
- free_unref_page_commit(zone, pcp, &folio->page, migratetype,
+ free_frozen_page_commit(zone, pcp, &folio->page, migratetype,
order);
}
@@ -3567,7 +3568,6 @@ __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
if (!page)
page = get_page_from_freelist(gfp_mask, order,
alloc_flags, ac);
-
return page;
}
@@ -4531,28 +4531,23 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
}
/*
- * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
+ * __alloc_pages_bulk - Allocate a number of order-0 pages to an array
* @gfp: GFP flags for the allocation
* @preferred_nid: The preferred NUMA node ID to allocate from
* @nodemask: Set of nodes to allocate from, may be NULL
- * @nr_pages: The number of pages desired on the list or array
- * @page_list: Optional list to store the allocated pages
- * @page_array: Optional array to store the pages
+ * @nr_pages: The number of pages desired in the array
+ * @page_array: Array to store the pages
*
* This is a batched version of the page allocator that attempts to
- * allocate nr_pages quickly. Pages are added to page_list if page_list
- * is not NULL, otherwise it is assumed that the page_array is valid.
+ * allocate nr_pages quickly. Pages are added to the page_array.
*
- * For lists, nr_pages is the number of pages that should be allocated.
- *
- * For arrays, only NULL elements are populated with pages and nr_pages
+ * Note that only NULL elements are populated with pages and nr_pages
* is the maximum number of pages that will be stored in the array.
*
- * Returns the number of pages on the list or array.
+ * Returns the number of pages in the array.
*/
unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
nodemask_t *nodemask, int nr_pages,
- struct list_head *page_list,
struct page **page_array)
{
struct page *page;
@@ -4570,7 +4565,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
* Skip populated array elements to determine if any pages need
* to be allocated before disabling IRQs.
*/
- while (page_array && nr_populated < nr_pages && page_array[nr_populated])
+ while (nr_populated < nr_pages && page_array[nr_populated])
nr_populated++;
/* No pages requested? */
@@ -4578,7 +4573,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
goto out;
/* Already populated array? */
- if (unlikely(page_array && nr_pages - nr_populated == 0))
+ if (unlikely(nr_pages - nr_populated == 0))
goto out;
/* Bulk allocator does not support memcg accounting. */
@@ -4660,7 +4655,7 @@ retry_this_zone:
while (nr_populated < nr_pages) {
/* Skip existing pages */
- if (page_array && page_array[nr_populated]) {
+ if (page_array[nr_populated]) {
nr_populated++;
continue;
}
@@ -4678,11 +4673,8 @@ retry_this_zone:
nr_account++;
prep_new_page(page, 0, gfp, 0);
- if (page_list)
- list_add(&page->lru, page_list);
- else
- page_array[nr_populated] = page;
- nr_populated++;
+ set_page_refcounted(page);
+ page_array[nr_populated++] = page;
}
pcp_spin_unlock(pcp);
@@ -4699,14 +4691,8 @@ failed_irq:
failed:
page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask);
- if (page) {
- if (page_list)
- list_add(&page->lru, page_list);
- else
- page_array[nr_populated] = page;
- nr_populated++;
- }
-
+ if (page)
+ page_array[nr_populated++] = page;
goto out;
}
EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof);
@@ -4714,8 +4700,8 @@ EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof);
/*
* This is the 'heart' of the zoned buddy allocator.
*/
-struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
- int preferred_nid, nodemask_t *nodemask)
+struct page *__alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order,
+ int preferred_nid, nodemask_t *nodemask)
{
struct page *page;
unsigned int alloc_flags = ALLOC_WMARK_LOW;
@@ -4768,7 +4754,7 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
out:
if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page &&
unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
- __free_pages(page, order);
+ free_frozen_pages(page, order);
page = NULL;
}
@@ -4777,6 +4763,18 @@ out:
return page;
}
+EXPORT_SYMBOL(__alloc_frozen_pages_noprof);
+
+struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
+ int preferred_nid, nodemask_t *nodemask)
+{
+ struct page *page;
+
+ page = __alloc_frozen_pages_noprof(gfp, order, preferred_nid, nodemask);
+ if (page)
+ set_page_refcounted(page);
+ return page;
+}
EXPORT_SYMBOL(__alloc_pages_noprof);
struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
@@ -4837,11 +4835,11 @@ void __free_pages(struct page *page, unsigned int order)
struct alloc_tag *tag = pgalloc_tag_get(page);
if (put_page_testzero(page))
- free_unref_page(page, order);
+ free_frozen_pages(page, order);
else if (!head) {
pgalloc_tag_sub_pages(tag, (1 << order) - 1);
while (order-- > 0)
- free_unref_page(page + (1 << order), order);
+ free_frozen_pages(page + (1 << order), order);
}
}
EXPORT_SYMBOL(__free_pages);
@@ -5161,13 +5159,6 @@ static void build_thisnode_zonelists(pg_data_t *pgdat)
zonerefs->zone_idx = 0;
}
-/*
- * Build zonelists ordered by zone and nodes within zones.
- * This results in conserving DMA zone[s] until all Normal memory is
- * exhausted, but results in overflowing to remote node while memory
- * may still exist in local DMA zone.
- */
-
static void build_zonelists(pg_data_t *pgdat)
{
static int node_order[MAX_NUMNODES];
@@ -6175,7 +6166,7 @@ out:
return ret;
}
-static struct ctl_table page_alloc_sysctl_table[] = {
+static const struct ctl_table page_alloc_sysctl_table[] = {
{
.procname = "min_free_kbytes",
.data = &min_free_kbytes,
@@ -6270,9 +6261,8 @@ static void alloc_contig_dump_pages(struct list_head *page_list)
* @migratetype: using migratetype to filter the type of migration in
* trace_mm_alloc_contig_migrate_range_info.
*/
-int __alloc_contig_migrate_range(struct compact_control *cc,
- unsigned long start, unsigned long end,
- int migratetype)
+static int __alloc_contig_migrate_range(struct compact_control *cc,
+ unsigned long start, unsigned long end, int migratetype)
{
/* This function is based on compact_zone() from compaction.c. */
unsigned int nr_reclaimed;
@@ -6281,7 +6271,7 @@ int __alloc_contig_migrate_range(struct compact_control *cc,
int ret = 0;
struct migration_target_control mtc = {
.nid = zone_to_nid(cc->zone),
- .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
+ .gfp_mask = cc->gfp_mask,
.reason = MR_CONTIG_RANGE,
};
struct page *page;
@@ -6351,7 +6341,7 @@ int __alloc_contig_migrate_range(struct compact_control *cc,
return (ret < 0) ? ret : 0;
}
-static void split_free_pages(struct list_head *list)
+static void split_free_pages(struct list_head *list, gfp_t gfp_mask)
{
int order;
@@ -6362,7 +6352,8 @@ static void split_free_pages(struct list_head *list)
list_for_each_entry_safe(page, next, &list[order], lru) {
int i;
- post_alloc_hook(page, order, __GFP_MOVABLE);
+ post_alloc_hook(page, order, gfp_mask);
+ set_page_refcounted(page);
if (!order)
continue;
@@ -6376,6 +6367,40 @@ static void split_free_pages(struct list_head *list)
}
}
+static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask)
+{
+ const gfp_t reclaim_mask = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
+ const gfp_t action_mask = __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN |
+ __GFP_ZERO | __GFP_ZEROTAGS | __GFP_SKIP_ZERO;
+ const gfp_t cc_action_mask = __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+
+ /*
+ * We are given the range to allocate; node, mobility and placement
+ * hints are irrelevant at this point. We'll simply ignore them.
+ */
+ gfp_mask &= ~(GFP_ZONEMASK | __GFP_RECLAIMABLE | __GFP_WRITE |
+ __GFP_HARDWALL | __GFP_THISNODE | __GFP_MOVABLE);
+
+ /*
+ * We only support most reclaim flags (but not NOFAIL/NORETRY), and
+ * selected action flags.
+ */
+ if (gfp_mask & ~(reclaim_mask | action_mask))
+ return -EINVAL;
+
+ /*
+ * Flags to control page compaction/migration/reclaim, to free up our
+ * page range. Migratable pages are movable, __GFP_MOVABLE is implied
+ * for them.
+ *
+ * Traditionally we always had __GFP_RETRY_MAYFAIL set, keep doing that
+ * to not degrade callers.
+ */
+ *gfp_cc_mask = (gfp_mask & (reclaim_mask | cc_action_mask)) |
+ __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
+ return 0;
+}
+
/**
* alloc_contig_range() -- tries to allocate given range of pages
* @start: start PFN to allocate
@@ -6384,7 +6409,9 @@ static void split_free_pages(struct list_head *list)
* #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
* in range must have the same migratetype and it must
* be either of the two.
- * @gfp_mask: GFP mask to use during compaction
+ * @gfp_mask: GFP mask. Node/zone/placement hints are ignored; only some
+ * action and reclaim modifiers are supported. Reclaim modifiers
+ * control allocation behavior during compaction/migration/reclaim.
*
* The PFN range does not have to be pageblock aligned. The PFN range must
* belong to a single zone.
@@ -6410,11 +6437,14 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
.mode = MIGRATE_SYNC,
.ignore_skip_hint = true,
.no_set_skip_hint = true,
- .gfp_mask = current_gfp_context(gfp_mask),
.alloc_contig = true,
};
INIT_LIST_HEAD(&cc.migratepages);
+ gfp_mask = current_gfp_context(gfp_mask);
+ if (__alloc_contig_verify_gfp_mask(gfp_mask, (gfp_t *)&cc.gfp_mask))
+ return -EINVAL;
+
/*
* What we do here is we mark all pageblocks in range as
* MIGRATE_ISOLATE. Because pageblock and max order pages may
@@ -6436,7 +6466,7 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
* put back to page allocator so that buddy can use them.
*/
- ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask);
+ ret = start_isolate_page_range(start, end, migratetype, 0);
if (ret)
goto done;
@@ -6455,7 +6485,17 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
ret = __alloc_contig_migrate_range(&cc, start, end, migratetype);
if (ret && ret != -EBUSY)
goto done;
- ret = 0;
+
+ /*
+ * When in-use hugetlb pages are migrated, they may simply be released
+ * back into the free hugepage pool instead of being returned to the
+ * buddy system. After the migration of in-use huge pages is completed,
+ * we will invoke replace_free_hugepage_folios() to ensure that these
+ * hugepages are properly released to the buddy system.
+ */
+ ret = replace_free_hugepage_folios(start, end);
+ if (ret)
+ goto done;
/*
* Pages from [start, end) are within a pageblock_nr_pages
@@ -6489,7 +6529,7 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
}
if (!(gfp_mask & __GFP_COMP)) {
- split_free_pages(cc.freepages);
+ split_free_pages(cc.freepages, gfp_mask);
/* Free head and tail (if any) */
if (start != outer_start)
@@ -6502,6 +6542,7 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
check_new_pages(head, order);
prep_new_page(head, order, gfp_mask, 0);
+ set_page_refcounted(head);
} else {
ret = -EINVAL;
WARN(true, "PFN range: requested [%lu, %lu), allocated [%lu, %lu)\n",
@@ -6556,7 +6597,9 @@ static bool zone_spans_last_pfn(const struct zone *zone,
/**
* alloc_contig_pages() -- tries to find and allocate contiguous range of pages
* @nr_pages: Number of contiguous pages to allocate
- * @gfp_mask: GFP mask to limit search and used during compaction
+ * @gfp_mask: GFP mask. Node/zone/placement hints limit the search; only some
+ * action and reclaim modifiers are supported. Reclaim modifiers
+ * control allocation behavior during compaction/migration/reclaim.
* @nid: Target node
* @nodemask: Mask for other possible nodes
*
diff --git a/mm/page_frag_cache.c b/mm/page_frag_cache.c
index 3f7a203d35c6..d2423f30577e 100644
--- a/mm/page_frag_cache.c
+++ b/mm/page_frag_cache.c
@@ -86,7 +86,7 @@ void __page_frag_cache_drain(struct page *page, unsigned int count)
VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
if (page_ref_sub_and_test(page, count))
- free_unref_page(page, compound_order(page));
+ free_frozen_pages(page, compound_order(page));
}
EXPORT_SYMBOL(__page_frag_cache_drain);
@@ -138,7 +138,7 @@ refill:
goto refill;
if (unlikely(encoded_page_decode_pfmemalloc(encoded_page))) {
- free_unref_page(page,
+ free_frozen_pages(page,
encoded_page_decode_order(encoded_page));
goto refill;
}
@@ -166,6 +166,6 @@ void page_frag_free(void *addr)
struct page *page = virt_to_head_page(addr);
if (unlikely(put_page_testzero(page)))
- free_unref_page(page, compound_order(page));
+ free_frozen_pages(page, compound_order(page));
}
EXPORT_SYMBOL(page_frag_free);
diff --git a/mm/page_idle.c b/mm/page_idle.c
index 41ea77f22011..947c7c7a3728 100644
--- a/mm/page_idle.c
+++ b/mm/page_idle.c
@@ -112,7 +112,7 @@ static void page_idle_clear_pte_refs(struct folio *folio)
}
static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
u64 *out = (u64 *)buf;
@@ -157,7 +157,7 @@ static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj,
}
static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
const u64 *in = (u64 *)buf;
@@ -193,17 +193,17 @@ static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj,
return (char *)in - buf;
}
-static struct bin_attribute page_idle_bitmap_attr =
+static const struct bin_attribute page_idle_bitmap_attr =
__BIN_ATTR(bitmap, 0600,
page_idle_bitmap_read, page_idle_bitmap_write, 0);
-static struct bin_attribute *page_idle_bin_attrs[] = {
+static const struct bin_attribute *const page_idle_bin_attrs[] = {
&page_idle_bitmap_attr,
NULL,
};
static const struct attribute_group page_idle_attr_group = {
- .bin_attrs = page_idle_bin_attrs,
+ .bin_attrs_new = page_idle_bin_attrs,
.name = "page_idle",
};
diff --git a/mm/page_io.c b/mm/page_io.c
index 4b4ea8e49cf6..9b983de351f9 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -163,7 +163,6 @@ reprobe:
page_no = 1; /* force Empty message */
sis->max = page_no;
sis->pages = page_no - 1;
- sis->highest_bit = page_no - 1;
out:
return ret;
bad_bmap:
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 7e04047977cf..c608e9d72865 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -286,7 +286,6 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* within a free or in-use page.
* @boundary_pfn: pageblock-aligned pfn that a page might cross
* @flags: isolation flags
- * @gfp_flags: GFP flags used for migrating pages
* @isolate_before: isolate the pageblock before the boundary_pfn
* @skip_isolation: the flag to skip the pageblock isolation in second
* isolate_single_pageblock()
@@ -306,8 +305,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* the in-use page then splitting the free page.
*/
static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
- gfp_t gfp_flags, bool isolate_before, bool skip_isolation,
- int migratetype)
+ bool isolate_before, bool skip_isolation, int migratetype)
{
unsigned long start_pfn;
unsigned long isolate_pageblock;
@@ -444,8 +442,6 @@ failed:
* and PageOffline() pages.
* REPORT_FAILURE - report details about the failure to
* isolate the range
- * @gfp_flags: GFP flags used for migrating pages that sit across the
- * range boundaries.
*
* Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
* the range will never be allocated. Any free pages and pages freed in the
@@ -478,7 +474,7 @@ failed:
* Return: 0 on success and -EBUSY if any part of range cannot be isolated.
*/
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- int migratetype, int flags, gfp_t gfp_flags)
+ int migratetype, int flags)
{
unsigned long pfn;
struct page *page;
@@ -489,7 +485,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
bool skip_isolation = false;
/* isolate [isolate_start, isolate_start + pageblock_nr_pages) pageblock */
- ret = isolate_single_pageblock(isolate_start, flags, gfp_flags, false,
+ ret = isolate_single_pageblock(isolate_start, flags, false,
skip_isolation, migratetype);
if (ret)
return ret;
@@ -498,7 +494,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
skip_isolation = true;
/* isolate [isolate_end - pageblock_nr_pages, isolate_end) pageblock */
- ret = isolate_single_pageblock(isolate_end, flags, gfp_flags, true,
+ ret = isolate_single_pageblock(isolate_end, flags, true,
skip_isolation, migratetype);
if (ret) {
unset_migratetype_isolate(pfn_to_page(isolate_start), migratetype);
diff --git a/mm/percpu.c b/mm/percpu.c
index d8dd31a2e407..ac61e3fc5f15 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1359,10 +1359,7 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
/* allocate chunk */
alloc_size = struct_size(chunk, populated,
BITS_TO_LONGS(region_size >> PAGE_SHIFT));
- chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
- if (!chunk)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- alloc_size);
+ chunk = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES);
INIT_LIST_HEAD(&chunk->list);
@@ -1374,24 +1371,14 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
region_bits = pcpu_chunk_map_bits(chunk);
alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]);
- chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
- if (!chunk->alloc_map)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- alloc_size);
+ chunk->alloc_map = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES);
alloc_size =
BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]);
- chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
- if (!chunk->bound_map)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- alloc_size);
+ chunk->bound_map = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES);
alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]);
- chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
- if (!chunk->md_blocks)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- alloc_size);
-
+ chunk->md_blocks = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES);
#ifdef NEED_PCPUOBJ_EXT
/* first chunk is free to use */
chunk->obj_exts = NULL;
@@ -2595,28 +2582,16 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
/* process group information and build config tables accordingly */
alloc_size = ai->nr_groups * sizeof(group_offsets[0]);
- group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
- if (!group_offsets)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- alloc_size);
+ group_offsets = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES);
alloc_size = ai->nr_groups * sizeof(group_sizes[0]);
- group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
- if (!group_sizes)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- alloc_size);
+ group_sizes = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES);
alloc_size = nr_cpu_ids * sizeof(unit_map[0]);
- unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
- if (!unit_map)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- alloc_size);
+ unit_map = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES);
alloc_size = nr_cpu_ids * sizeof(unit_off[0]);
- unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
- if (!unit_off)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- alloc_size);
+ unit_off = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES);
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
unit_map[cpu] = UINT_MAX;
@@ -2685,12 +2660,9 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
pcpu_free_slot = pcpu_sidelined_slot + 1;
pcpu_to_depopulate_slot = pcpu_free_slot + 1;
pcpu_nr_slots = pcpu_to_depopulate_slot + 1;
- pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots *
+ pcpu_chunk_lists = memblock_alloc_or_panic(pcpu_nr_slots *
sizeof(pcpu_chunk_lists[0]),
SMP_CACHE_BYTES);
- if (!pcpu_chunk_lists)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- pcpu_nr_slots * sizeof(pcpu_chunk_lists[0]));
for (i = 0; i < pcpu_nr_slots; i++)
INIT_LIST_HEAD(&pcpu_chunk_lists[i]);
@@ -3155,25 +3127,19 @@ void __init __weak pcpu_populate_pte(unsigned long addr)
pmd_t *pmd;
if (pgd_none(*pgd)) {
- p4d = memblock_alloc(P4D_TABLE_SIZE, P4D_TABLE_SIZE);
- if (!p4d)
- goto err_alloc;
+ p4d = memblock_alloc_or_panic(P4D_TABLE_SIZE, P4D_TABLE_SIZE);
pgd_populate(&init_mm, pgd, p4d);
}
p4d = p4d_offset(pgd, addr);
if (p4d_none(*p4d)) {
- pud = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
- if (!pud)
- goto err_alloc;
+ pud = memblock_alloc_or_panic(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
p4d_populate(&init_mm, p4d, pud);
}
pud = pud_offset(p4d, addr);
if (pud_none(*pud)) {
- pmd = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE);
- if (!pmd)
- goto err_alloc;
+ pmd = memblock_alloc_or_panic(PMD_TABLE_SIZE, PMD_TABLE_SIZE);
pud_populate(&init_mm, pud, pmd);
}
@@ -3181,16 +3147,11 @@ void __init __weak pcpu_populate_pte(unsigned long addr)
if (!pmd_present(*pmd)) {
pte_t *new;
- new = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE);
- if (!new)
- goto err_alloc;
+ new = memblock_alloc_or_panic(PTE_TABLE_SIZE, PTE_TABLE_SIZE);
pmd_populate_kernel(&init_mm, pmd, new);
}
return;
-
-err_alloc:
- panic("%s: Failed to allocate memory\n", __func__);
}
/**
@@ -3237,10 +3198,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_cpu_to_node_fn_t
/* unaligned allocations can't be freed, round up to page size */
pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
sizeof(pages[0]));
- pages = memblock_alloc(pages_size, SMP_CACHE_BYTES);
- if (!pages)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- pages_size);
+ pages = memblock_alloc_or_panic(pages_size, SMP_CACHE_BYTES);
/* allocate pages */
j = 0;
diff --git a/mm/pt_reclaim.c b/mm/pt_reclaim.c
new file mode 100644
index 000000000000..7e9455a18aae
--- /dev/null
+++ b/mm/pt_reclaim.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/hugetlb.h>
+#include <asm-generic/tlb.h>
+#include <asm/pgalloc.h>
+
+#include "internal.h"
+
+bool reclaim_pt_is_enabled(unsigned long start, unsigned long end,
+ struct zap_details *details)
+{
+ return details && details->reclaim_pt && (end - start >= PMD_SIZE);
+}
+
+bool try_get_and_clear_pmd(struct mm_struct *mm, pmd_t *pmd, pmd_t *pmdval)
+{
+ spinlock_t *pml = pmd_lockptr(mm, pmd);
+
+ if (!spin_trylock(pml))
+ return false;
+
+ *pmdval = pmdp_get_lockless(pmd);
+ pmd_clear(pmd);
+ spin_unlock(pml);
+
+ return true;
+}
+
+void free_pte(struct mm_struct *mm, unsigned long addr, struct mmu_gather *tlb,
+ pmd_t pmdval)
+{
+ pte_free_tlb(tlb, pmd_pgtable(pmdval), addr);
+ mm_dec_nr_ptes(mm);
+}
+
+void try_to_free_pte(struct mm_struct *mm, pmd_t *pmd, unsigned long addr,
+ struct mmu_gather *tlb)
+{
+ pmd_t pmdval;
+ spinlock_t *pml, *ptl = NULL;
+ pte_t *start_pte, *pte;
+ int i;
+
+ pml = pmd_lock(mm, pmd);
+ start_pte = pte_offset_map_rw_nolock(mm, pmd, addr, &pmdval, &ptl);
+ if (!start_pte)
+ goto out_ptl;
+ if (ptl != pml)
+ spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
+
+ /* Check if it is empty PTE page */
+ for (i = 0, pte = start_pte; i < PTRS_PER_PTE; i++, pte++) {
+ if (!pte_none(ptep_get(pte)))
+ goto out_ptl;
+ }
+ pte_unmap(start_pte);
+
+ pmd_clear(pmd);
+
+ if (ptl != pml)
+ spin_unlock(ptl);
+ spin_unlock(pml);
+
+ free_pte(mm, addr, tlb, pmdval);
+
+ return;
+out_ptl:
+ if (start_pte)
+ pte_unmap_unlock(start_pte, ptl);
+ if (ptl != pml)
+ spin_unlock(pml);
+}
diff --git a/mm/readahead.c b/mm/readahead.c
index e151f4b13ca4..220155a5c964 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -128,6 +128,7 @@
#include <linux/blk-cgroup.h>
#include <linux/fadvise.h>
#include <linux/sched/mm.h>
+#include <linux/fsnotify.h>
#include "internal.h"
@@ -158,20 +159,10 @@ static void read_pages(struct readahead_control *rac)
if (aops->readahead) {
aops->readahead(rac);
- /*
- * Clean up the remaining folios. The sizes in ->ra
- * may be used to size the next readahead, so make sure
- * they accurately reflect what happened.
- */
+ /* Clean up the remaining folios. */
while ((folio = readahead_folio(rac)) != NULL) {
- unsigned long nr = folio_nr_pages(folio);
-
folio_get(folio);
- rac->ra->size -= nr;
- if (rac->ra->async_size >= nr) {
- rac->ra->async_size -= nr;
- filemap_remove_folio(folio);
- }
+ filemap_remove_folio(folio);
folio_unlock(folio);
folio_put(folio);
}
@@ -188,6 +179,18 @@ static void read_pages(struct readahead_control *rac)
BUG_ON(readahead_count(rac));
}
+static struct folio *ractl_alloc_folio(struct readahead_control *ractl,
+ gfp_t gfp_mask, unsigned int order)
+{
+ struct folio *folio;
+
+ folio = filemap_alloc_folio(gfp_mask, order);
+ if (folio && ractl->dropbehind)
+ __folio_set_dropbehind(folio);
+
+ return folio;
+}
+
/**
* page_cache_ra_unbounded - Start unchecked readahead.
* @ractl: Readahead control.
@@ -265,8 +268,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
continue;
}
- folio = filemap_alloc_folio(gfp_mask,
- mapping_min_folio_order(mapping));
+ folio = ractl_alloc_folio(ractl, gfp_mask,
+ mapping_min_folio_order(mapping));
if (!folio)
break;
@@ -436,7 +439,7 @@ static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index,
pgoff_t mark, unsigned int order, gfp_t gfp)
{
int err;
- struct folio *folio = filemap_alloc_folio(gfp, order);
+ struct folio *folio = ractl_alloc_folio(ractl, gfp, order);
if (!folio)
return -ENOMEM;
@@ -458,7 +461,8 @@ void page_cache_ra_order(struct readahead_control *ractl,
struct file_ra_state *ra, unsigned int new_order)
{
struct address_space *mapping = ractl->mapping;
- pgoff_t index = readahead_index(ractl);
+ pgoff_t start = readahead_index(ractl);
+ pgoff_t index = start;
unsigned int min_order = mapping_min_folio_order(mapping);
pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT;
pgoff_t mark = index + ra->size - ra->async_size;
@@ -516,12 +520,18 @@ void page_cache_ra_order(struct readahead_control *ractl,
/*
* If there were already pages in the page cache, then we may have
* left some gaps. Let the regular readahead code take care of this
- * situation.
+ * situation below.
*/
if (!err)
return;
fallback:
- do_page_cache_ra(ractl, ra->size, ra->async_size);
+ /*
+ * ->readahead() may have updated readahead window size so we have to
+ * check there's still something to read.
+ */
+ if (ra->size > index - start)
+ do_page_cache_ra(ractl, ra->size - (index - start),
+ ra->async_size);
}
static unsigned long ractl_max_pages(struct readahead_control *ractl,
@@ -549,6 +559,15 @@ void page_cache_sync_ra(struct readahead_control *ractl,
pgoff_t prev_index, miss;
/*
+ * If we have pre-content watches we need to disable readahead to make
+ * sure that we don't find 0 filled pages in cache that we never emitted
+ * events for. Filesystems supporting HSM must make sure to not call
+ * this function with ractl->file unset for files handled by HSM.
+ */
+ if (ractl->file && unlikely(FMODE_FSNOTIFY_HSM(ractl->file->f_mode)))
+ return;
+
+ /*
* Even if readahead is disabled, issue this request as readahead
* as we'll need it to satisfy the requested range. The forced
* readahead will do the right thing and limit the read to just the
@@ -626,6 +645,10 @@ void page_cache_async_ra(struct readahead_control *ractl,
if (!ra->ra_pages)
return;
+ /* See the comment in page_cache_sync_ra. */
+ if (ractl->file && unlikely(FMODE_FSNOTIFY_HSM(ractl->file->f_mode)))
+ return;
+
/*
* Same bit is used for PG_readahead and PG_reclaim.
*/
@@ -754,7 +777,7 @@ void readahead_expand(struct readahead_control *ractl,
if (folio && !xa_is_value(folio))
return; /* Folio apparently present */
- folio = filemap_alloc_folio(gfp_mask, min_order);
+ folio = ractl_alloc_folio(ractl, gfp_mask, min_order);
if (!folio)
return;
@@ -783,7 +806,7 @@ void readahead_expand(struct readahead_control *ractl,
if (folio && !xa_is_value(folio))
return; /* Folio apparently present */
- folio = filemap_alloc_folio(gfp_mask, min_order);
+ folio = ractl_alloc_folio(ractl, gfp_mask, min_order);
if (!folio)
return;
diff --git a/mm/rodata_test.c b/mm/rodata_test.c
index 6d783436951f..e7173fcd210c 100644
--- a/mm/rodata_test.c
+++ b/mm/rodata_test.c
@@ -12,7 +12,8 @@
#include <linux/mm.h>
#include <asm/sections.h>
-static const int rodata_test_data = 0xC3;
+#define TEST_VALUE 0xC3
+static const int rodata_test_data = TEST_VALUE;
void rodata_test(void)
{
@@ -20,7 +21,7 @@ void rodata_test(void)
/* test 1: read the value */
/* If this test fails, some previous testrun has clobbered the state */
- if (!rodata_test_data) {
+ if (unlikely(READ_ONCE(rodata_test_data) != TEST_VALUE)) {
pr_err("test 1 fails (start data)\n");
return;
}
@@ -33,7 +34,7 @@ void rodata_test(void)
}
/* test 3: check the value hasn't changed */
- if (rodata_test_data == zero) {
+ if (unlikely(READ_ONCE(rodata_test_data) != TEST_VALUE)) {
pr_err("test data was changed\n");
return;
}
diff --git a/mm/secretmem.c b/mm/secretmem.c
index 399552814fd0..1b0a214ee558 100644
--- a/mm/secretmem.c
+++ b/mm/secretmem.c
@@ -195,14 +195,13 @@ static struct file *secretmem_file_create(unsigned long flags)
struct file *file;
struct inode *inode;
const char *anon_name = "[secretmem]";
- const struct qstr qname = QSTR_INIT(anon_name, strlen(anon_name));
int err;
inode = alloc_anon_inode(secretmem_mnt->mnt_sb);
if (IS_ERR(inode))
return ERR_CAST(inode);
- err = security_inode_init_security_anon(inode, &qname, NULL);
+ err = security_inode_init_security_anon(inode, &QSTR(anon_name), NULL);
if (err) {
file = ERR_PTR(err);
goto err_free_inode;
diff --git a/mm/shmem.c b/mm/shmem.c
index 532afd8e049c..4ea6109a8043 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -553,38 +553,105 @@ static bool shmem_confirm_swap(struct address_space *mapping,
/* ifdef here to avoid bloating shmem.o when not necessary */
static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
+static int tmpfs_huge __read_mostly = SHMEM_HUGE_NEVER;
-static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
- loff_t write_end, bool shmem_huge_force,
- unsigned long vm_flags)
+/**
+ * shmem_mapping_size_orders - Get allowable folio orders for the given file size.
+ * @mapping: Target address_space.
+ * @index: The page index.
+ * @write_end: end of a write, could extend inode size.
+ *
+ * This returns huge orders for folios (when supported) based on the file size
+ * which the mapping currently allows at the given index. The index is relevant
+ * due to alignment considerations the mapping might have. The returned order
+ * may be less than the size passed.
+ *
+ * Return: The orders.
+ */
+static inline unsigned int
+shmem_mapping_size_orders(struct address_space *mapping, pgoff_t index, loff_t write_end)
{
+ unsigned int order;
+ size_t size;
+
+ if (!mapping_large_folio_support(mapping) || !write_end)
+ return 0;
+
+ /* Calculate the write size based on the write_end */
+ size = write_end - (index << PAGE_SHIFT);
+ order = filemap_get_order(size);
+ if (!order)
+ return 0;
+
+ /* If we're not aligned, allocate a smaller folio */
+ if (index & ((1UL << order) - 1))
+ order = __ffs(index);
+
+ order = min_t(size_t, order, MAX_PAGECACHE_ORDER);
+ return order > 0 ? BIT(order + 1) - 1 : 0;
+}
+
+static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+ loff_t write_end, bool shmem_huge_force,
+ struct vm_area_struct *vma,
+ unsigned long vm_flags)
+{
+ unsigned int maybe_pmd_order = HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER ?
+ 0 : BIT(HPAGE_PMD_ORDER);
+ unsigned long within_size_orders;
+ unsigned int order;
+ pgoff_t aligned_index;
loff_t i_size;
- if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
- return false;
if (!S_ISREG(inode->i_mode))
- return false;
+ return 0;
if (shmem_huge == SHMEM_HUGE_DENY)
- return false;
+ return 0;
if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
- return true;
+ return maybe_pmd_order;
+ /*
+ * The huge order allocation for anon shmem is controlled through
+ * the mTHP interface, so we still use PMD-sized huge order to
+ * check whether global control is enabled.
+ *
+ * For tmpfs mmap()'s huge order, we still use PMD-sized order to
+ * allocate huge pages due to lack of a write size hint.
+ *
+ * Otherwise, tmpfs will allow getting a highest order hint based on
+ * the size of write and fallocate paths, then will try each allowable
+ * huge orders.
+ */
switch (SHMEM_SB(inode->i_sb)->huge) {
case SHMEM_HUGE_ALWAYS:
- return true;
+ if (vma)
+ return maybe_pmd_order;
+
+ return shmem_mapping_size_orders(inode->i_mapping, index, write_end);
case SHMEM_HUGE_WITHIN_SIZE:
- index = round_up(index + 1, HPAGE_PMD_NR);
- i_size = max(write_end, i_size_read(inode));
- i_size = round_up(i_size, PAGE_SIZE);
- if (i_size >> PAGE_SHIFT >= index)
- return true;
+ if (vma)
+ within_size_orders = maybe_pmd_order;
+ else
+ within_size_orders = shmem_mapping_size_orders(inode->i_mapping,
+ index, write_end);
+
+ order = highest_order(within_size_orders);
+ while (within_size_orders) {
+ aligned_index = round_up(index + 1, 1 << order);
+ i_size = max(write_end, i_size_read(inode));
+ i_size = round_up(i_size, PAGE_SIZE);
+ if (i_size >> PAGE_SHIFT >= aligned_index)
+ return within_size_orders;
+
+ order = next_order(&within_size_orders, order);
+ }
fallthrough;
case SHMEM_HUGE_ADVISE:
if (vm_flags & VM_HUGEPAGE)
- return true;
+ return maybe_pmd_order;
fallthrough;
default:
- return false;
+ return 0;
}
}
@@ -779,11 +846,12 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
return 0;
}
-static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
- loff_t write_end, bool shmem_huge_force,
- unsigned long vm_flags)
+static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+ loff_t write_end, bool shmem_huge_force,
+ struct vm_area_struct *vma,
+ unsigned long vm_flags)
{
- return false;
+ return 0;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -1180,7 +1248,7 @@ static int shmem_getattr(struct mnt_idmap *idmap,
STATX_ATTR_NODUMP);
generic_fillattr(idmap, request_mask, inode, stat);
- if (shmem_huge_global_enabled(inode, 0, 0, false, 0))
+ if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0))
stat->blksize = HPAGE_PMD_SIZE;
if (request_mask & STATX_BTIME) {
@@ -1690,22 +1758,18 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
unsigned long vm_flags = vma ? vma->vm_flags : 0;
pgoff_t aligned_index;
- bool global_huge;
+ unsigned int global_orders;
loff_t i_size;
int order;
if (thp_disabled_by_hw() || (vma && vma_thp_disabled(vma, vm_flags)))
return 0;
- global_huge = shmem_huge_global_enabled(inode, index, write_end,
- shmem_huge_force, vm_flags);
- if (!vma || !vma_is_anon_shmem(vma)) {
- /*
- * For tmpfs, we now only support PMD sized THP if huge page
- * is enabled, otherwise fallback to order 0.
- */
- return global_huge ? BIT(HPAGE_PMD_ORDER) : 0;
- }
+ global_orders = shmem_huge_global_enabled(inode, index, write_end,
+ shmem_huge_force, vma, vm_flags);
+ /* Tmpfs huge pages allocation */
+ if (!vma || !vma_is_anon_shmem(vma))
+ return global_orders;
/*
* Following the 'deny' semantics of the top level, force the huge
@@ -1737,7 +1801,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
if (vm_flags & VM_HUGEPAGE)
mask |= READ_ONCE(huge_shmem_orders_madvise);
- if (global_huge)
+ if (global_orders > 0)
mask |= READ_ONCE(huge_shmem_orders_inherit);
return THP_ORDERS_ALL_FILE_DEFAULT & mask;
@@ -1903,6 +1967,65 @@ unlock:
return ERR_PTR(error);
}
+static struct folio *shmem_swap_alloc_folio(struct inode *inode,
+ struct vm_area_struct *vma, pgoff_t index,
+ swp_entry_t entry, int order, gfp_t gfp)
+{
+ struct shmem_inode_info *info = SHMEM_I(inode);
+ struct folio *new;
+ void *shadow;
+ int nr_pages;
+
+ /*
+ * We have arrived here because our zones are constrained, so don't
+ * limit chance of success with further cpuset and node constraints.
+ */
+ gfp &= ~GFP_CONSTRAINT_MASK;
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && order > 0) {
+ gfp_t huge_gfp = vma_thp_gfp_mask(vma);
+
+ gfp = limit_gfp_mask(huge_gfp, gfp);
+ }
+
+ new = shmem_alloc_folio(gfp, order, info, index);
+ if (!new)
+ return ERR_PTR(-ENOMEM);
+
+ nr_pages = folio_nr_pages(new);
+ if (mem_cgroup_swapin_charge_folio(new, vma ? vma->vm_mm : NULL,
+ gfp, entry)) {
+ folio_put(new);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * Prevent parallel swapin from proceeding with the swap cache flag.
+ *
+ * Of course there is another possible concurrent scenario as well,
+ * that is to say, the swap cache flag of a large folio has already
+ * been set by swapcache_prepare(), while another thread may have
+ * already split the large swap entry stored in the shmem mapping.
+ * In this case, shmem_add_to_page_cache() will help identify the
+ * concurrent swapin and return -EEXIST.
+ */
+ if (swapcache_prepare(entry, nr_pages)) {
+ folio_put(new);
+ return ERR_PTR(-EEXIST);
+ }
+
+ __folio_set_locked(new);
+ __folio_set_swapbacked(new);
+ new->swap = entry;
+
+ mem_cgroup_swapin_uncharge_swap(entry, nr_pages);
+ shadow = get_shadow_from_swap_cache(entry);
+ if (shadow)
+ workingset_refault(new, shadow);
+ folio_add_lru(new);
+ swap_read_folio(new, NULL);
+ return new;
+}
+
/*
* When a page is moved from swapcache to shmem filecache (either by the
* usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of
@@ -2006,7 +2129,8 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
}
static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
- struct folio *folio, swp_entry_t swap)
+ struct folio *folio, swp_entry_t swap,
+ bool skip_swapcache)
{
struct address_space *mapping = inode->i_mapping;
swp_entry_t swapin_error;
@@ -2022,7 +2146,8 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
nr_pages = folio_nr_pages(folio);
folio_wait_writeback(folio);
- delete_from_swap_cache(folio);
+ if (!skip_swapcache)
+ delete_from_swap_cache(folio);
/*
* Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
* won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
@@ -2126,6 +2251,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
struct shmem_inode_info *info = SHMEM_I(inode);
struct swap_info_struct *si;
struct folio *folio = NULL;
+ bool skip_swapcache = false;
swp_entry_t swap;
int error, nr_pages;
@@ -2147,6 +2273,8 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
/* Look it up and read it in.. */
folio = swap_cache_get_folio(swap, NULL, 0);
if (!folio) {
+ int order = xa_get_order(&mapping->i_pages, index);
+ bool fallback_order0 = false;
int split_order;
/* Or update major stats only when swapin succeeds?? */
@@ -2157,6 +2285,33 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
}
/*
+ * If uffd is active for the vma, we need per-page fault
+ * fidelity to maintain the uffd semantics, then fallback
+ * to swapin order-0 folio, as well as for zswap case.
+ */
+ if (order > 0 && ((vma && unlikely(userfaultfd_armed(vma))) ||
+ !zswap_never_enabled()))
+ fallback_order0 = true;
+
+ /* Skip swapcache for synchronous device. */
+ if (!fallback_order0 && data_race(si->flags & SWP_SYNCHRONOUS_IO)) {
+ folio = shmem_swap_alloc_folio(inode, vma, index, swap, order, gfp);
+ if (!IS_ERR(folio)) {
+ skip_swapcache = true;
+ goto alloced;
+ }
+
+ /*
+ * Fallback to swapin order-0 folio unless the swap entry
+ * already exists.
+ */
+ error = PTR_ERR(folio);
+ folio = NULL;
+ if (error == -EEXIST)
+ goto failed;
+ }
+
+ /*
* Now swap device can only swap in order 0 folio, then we
* should split the large swap entry stored in the pagecache
* if necessary.
@@ -2186,9 +2341,10 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
}
}
+alloced:
/* We have to do this with folio locked to prevent races */
folio_lock(folio);
- if (!folio_test_swapcache(folio) ||
+ if ((!skip_swapcache && !folio_test_swapcache(folio)) ||
folio->swap.val != swap.val ||
!shmem_confirm_swap(mapping, index, swap)) {
error = -EEXIST;
@@ -2224,7 +2380,12 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
if (sgp == SGP_WRITE)
folio_mark_accessed(folio);
- delete_from_swap_cache(folio);
+ if (skip_swapcache) {
+ folio->swap.val = 0;
+ swapcache_clear(si, swap, nr_pages);
+ } else {
+ delete_from_swap_cache(folio);
+ }
folio_mark_dirty(folio);
swap_free_nr(swap, nr_pages);
put_swap_device(si);
@@ -2235,8 +2396,11 @@ failed:
if (!shmem_confirm_swap(mapping, index, swap))
error = -EEXIST;
if (error == -EIO)
- shmem_set_folio_swapin_error(inode, index, folio, swap);
+ shmem_set_folio_swapin_error(inode, index, folio, swap,
+ skip_swapcache);
unlock:
+ if (skip_swapcache)
+ swapcache_clear(si, swap, folio_nr_pages(folio));
if (folio) {
folio_unlock(folio);
folio_put(folio);
@@ -2752,12 +2916,6 @@ out_nomem:
static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
{
struct inode *inode = file_inode(file);
- struct shmem_inode_info *info = SHMEM_I(inode);
- int ret;
-
- ret = seal_check_write(info->seals, vma);
- if (ret)
- return ret;
file_accessed(file);
/* This is anonymous shared memory if it is unlinked at the time of mmap */
@@ -4585,48 +4743,37 @@ bad_value:
return invalfc(fc, "Bad value for '%s'", param->key);
}
-static int shmem_parse_options(struct fs_context *fc, void *data)
+static char *shmem_next_opt(char **s)
{
- char *options = data;
+ char *sbegin = *s;
+ char *p;
- if (options) {
- int err = security_sb_eat_lsm_opts(options, &fc->security);
- if (err)
- return err;
- }
+ if (sbegin == NULL)
+ return NULL;
- while (options != NULL) {
- char *this_char = options;
- for (;;) {
- /*
- * NUL-terminate this option: unfortunately,
- * mount options form a comma-separated list,
- * but mpol's nodelist may also contain commas.
- */
- options = strchr(options, ',');
- if (options == NULL)
- break;
- options++;
- if (!isdigit(*options)) {
- options[-1] = '\0';
- break;
- }
- }
- if (*this_char) {
- char *value = strchr(this_char, '=');
- size_t len = 0;
- int err;
-
- if (value) {
- *value++ = '\0';
- len = strlen(value);
- }
- err = vfs_parse_fs_string(fc, this_char, value, len);
- if (err < 0)
- return err;
+ /*
+ * NUL-terminate this option: unfortunately,
+ * mount options form a comma-separated list,
+ * but mpol's nodelist may also contain commas.
+ */
+ for (;;) {
+ p = strchr(*s, ',');
+ if (p == NULL)
+ break;
+ *s = p + 1;
+ if (!isdigit(*(p+1))) {
+ *p = '\0';
+ return sbegin;
}
}
- return 0;
+
+ *s = NULL;
+ return sbegin;
+}
+
+static int shmem_parse_monolithic(struct fs_context *fc, void *data)
+{
+ return vfs_parse_monolithic_sep(fc, data, shmem_next_opt);
}
/*
@@ -4893,7 +5040,12 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
sbinfo->gid = ctx->gid;
sbinfo->full_inums = ctx->full_inums;
sbinfo->mode = ctx->mode;
- sbinfo->huge = ctx->huge;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (ctx->seen & SHMEM_SEEN_HUGE)
+ sbinfo->huge = ctx->huge;
+ else
+ sbinfo->huge = tmpfs_huge;
+#endif
sbinfo->mpol = ctx->mpol;
ctx->mpol = NULL;
@@ -4971,7 +5123,7 @@ static const struct fs_context_operations shmem_fs_context_ops = {
.free = shmem_free_fc,
.get_tree = shmem_get_tree,
#ifdef CONFIG_TMPFS
- .parse_monolithic = shmem_parse_options,
+ .parse_monolithic = shmem_parse_monolithic,
.parse_param = shmem_parse_one,
.reconfigure = shmem_reconfigure,
#endif
@@ -5444,6 +5596,21 @@ static int __init setup_transparent_hugepage_shmem(char *str)
}
__setup("transparent_hugepage_shmem=", setup_transparent_hugepage_shmem);
+static int __init setup_transparent_hugepage_tmpfs(char *str)
+{
+ int huge;
+
+ huge = shmem_parse_huge(str);
+ if (huge < 0) {
+ pr_warn("transparent_hugepage_tmpfs= cannot parse, ignored\n");
+ return huge;
+ }
+
+ tmpfs_huge = huge;
+ return 1;
+}
+__setup("transparent_hugepage_tmpfs=", setup_transparent_hugepage_tmpfs);
+
static char str_dup[PAGE_SIZE] __initdata;
static int __init setup_thp_shmem(char *str)
{
diff --git a/mm/shrinker_debug.c b/mm/shrinker_debug.c
index 4a85b94d12ce..794bd433cce0 100644
--- a/mm/shrinker_debug.c
+++ b/mm/shrinker_debug.c
@@ -195,8 +195,6 @@ int shrinker_debugfs_add(struct shrinker *shrinker)
int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
{
- struct dentry *entry;
- char buf[128];
const char *new, *old;
va_list ap;
int ret = 0;
@@ -213,18 +211,8 @@ int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
old = shrinker->name;
shrinker->name = new;
- if (shrinker->debugfs_entry) {
- snprintf(buf, sizeof(buf), "%s-%d", shrinker->name,
- shrinker->debugfs_id);
-
- entry = debugfs_rename(shrinker_debugfs_root,
- shrinker->debugfs_entry,
- shrinker_debugfs_root, buf);
- if (IS_ERR(entry))
- ret = PTR_ERR(entry);
- else
- shrinker->debugfs_entry = entry;
- }
+ ret = debugfs_change_name(shrinker->debugfs_entry, "%s-%d",
+ shrinker->name, shrinker->debugfs_id);
mutex_unlock(&shrinker_mutex);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 69f2d19010de..4030907b6b7d 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1944,7 +1944,7 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
return;
}
- kasan_record_aux_stack_noalloc(ptr);
+ kasan_record_aux_stack(ptr);
success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
if (!success) {
run_page_cache_worker(krcp);
diff --git a/mm/slub.c b/mm/slub.c
index c2151c9fee22..1f50129dcfb3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2311,7 +2311,7 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
* We have to do this manually because the rcu_head is
* not located inside the object.
*/
- kasan_record_aux_stack_noalloc(x);
+ kasan_record_aux_stack(x);
delayed_free->object = x;
call_rcu(&delayed_free->head, slab_free_after_rcu_debug);
@@ -2420,17 +2420,15 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node,
unsigned int order = oo_order(oo);
if (node == NUMA_NO_NODE)
- folio = (struct folio *)alloc_pages(flags, order);
+ folio = (struct folio *)alloc_frozen_pages(flags, order);
else
- folio = (struct folio *)__alloc_pages_node(node, flags, order);
+ folio = (struct folio *)__alloc_frozen_pages(flags, order, node, NULL);
if (!folio)
return NULL;
slab = folio_slab(folio);
__folio_set_slab(folio);
- /* Make the flag visible before any changes to folio->mapping */
- smp_wmb();
if (folio_is_pfmemalloc(folio))
slab_set_pfmemalloc(slab);
@@ -2651,12 +2649,10 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab)
__slab_clear_pfmemalloc(slab);
folio->mapping = NULL;
- /* Make the mapping reset visible before clearing the flag */
- smp_wmb();
__folio_clear_slab(folio);
mm_account_reclaimed_pages(pages);
unaccount_slab(slab, order, s);
- __free_pages(&folio->page, order);
+ free_frozen_pages(&folio->page, order);
}
static void rcu_free_slab(struct rcu_head *h)
@@ -7513,10 +7509,7 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep)
return -ENOMEM;
}
- if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0)
- alloc = TRACK_ALLOC;
- else
- alloc = TRACK_FREE;
+ alloc = debugfs_get_aux_num(filep);
if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) {
bitmap_free(obj_map);
@@ -7572,11 +7565,11 @@ static void debugfs_slab_add(struct kmem_cache *s)
slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root);
- debugfs_create_file("alloc_traces", 0400,
- slab_cache_dir, s, &slab_debugfs_fops);
+ debugfs_create_file_aux_num("alloc_traces", 0400, slab_cache_dir, s,
+ TRACK_ALLOC, &slab_debugfs_fops);
- debugfs_create_file("free_traces", 0400,
- slab_cache_dir, s, &slab_debugfs_fops);
+ debugfs_create_file_aux_num("free_traces", 0400, slab_cache_dir, s,
+ TRACK_FREE, &slab_debugfs_fops);
}
void debugfs_slab_release(struct kmem_cache *s)
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index cec67c5f37d8..3287ebadd167 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -31,6 +31,8 @@
#include <asm/dma.h>
#include <asm/pgalloc.h>
+#include "internal.h"
+
/*
* Allocate a block of memory to be used to back the virtual memory map
* or to back the page tables that are used to create the mapping.
@@ -42,8 +44,7 @@ static void * __ref __earlyonly_bootmem_alloc(int node,
unsigned long align,
unsigned long goal)
{
- return memblock_alloc_try_nid_raw(size, align, goal,
- MEMBLOCK_ALLOC_ACCESSIBLE, node);
+ return memmap_alloc(size, align, goal, node, false);
}
void * __meminit vmemmap_alloc_block(unsigned long size, int node)
diff --git a/mm/sparse.c b/mm/sparse.c
index 13b6624d3562..133b033d0cba 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -257,10 +257,7 @@ static void __init memblocks_present(void)
size = sizeof(struct mem_section *) * NR_SECTION_ROOTS;
align = 1 << (INTERNODE_CACHE_SHIFT);
- mem_section = memblock_alloc(size, align);
- if (!mem_section)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, size, align);
+ mem_section = memblock_alloc_or_panic(size, align);
}
#endif
diff --git a/mm/swap.c b/mm/swap.c
index 10decd9dffa1..fc8281ef4241 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -109,7 +109,7 @@ void __folio_put(struct folio *folio)
page_cache_release(folio);
folio_unqueue_deferred_split(folio);
mem_cgroup_uncharge(folio);
- free_unref_page(&folio->page, folio_order(folio));
+ free_frozen_pages(&folio->page, folio_order(folio));
}
EXPORT_SYMBOL(__folio_put);
@@ -379,37 +379,58 @@ static void __lru_cache_activate_folio(struct folio *folio)
}
#ifdef CONFIG_LRU_GEN
-static void folio_inc_refs(struct folio *folio)
+
+static void lru_gen_inc_refs(struct folio *folio)
{
unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
if (folio_test_unevictable(folio))
return;
+ /* see the comment on LRU_REFS_FLAGS */
if (!folio_test_referenced(folio)) {
- folio_set_referenced(folio);
- return;
- }
-
- if (!folio_test_workingset(folio)) {
- folio_set_workingset(folio);
+ set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced));
return;
}
- /* see the comment on MAX_NR_TIERS */
do {
- new_flags = old_flags & LRU_REFS_MASK;
- if (new_flags == LRU_REFS_MASK)
- break;
+ if ((old_flags & LRU_REFS_MASK) == LRU_REFS_MASK) {
+ if (!folio_test_workingset(folio))
+ folio_set_workingset(folio);
+ return;
+ }
- new_flags += BIT(LRU_REFS_PGOFF);
- new_flags |= old_flags & ~LRU_REFS_MASK;
+ new_flags = old_flags + BIT(LRU_REFS_PGOFF);
} while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
}
-#else
-static void folio_inc_refs(struct folio *folio)
+
+static bool lru_gen_clear_refs(struct folio *folio)
+{
+ struct lru_gen_folio *lrugen;
+ int gen = folio_lru_gen(folio);
+ int type = folio_is_file_lru(folio);
+
+ if (gen < 0)
+ return true;
+
+ set_mask_bits(&folio->flags, LRU_REFS_FLAGS | BIT(PG_workingset), 0);
+
+ lrugen = &folio_lruvec(folio)->lrugen;
+ /* whether can do without shuffling under the LRU lock */
+ return gen == lru_gen_from_seq(READ_ONCE(lrugen->min_seq[type]));
+}
+
+#else /* !CONFIG_LRU_GEN */
+
+static void lru_gen_inc_refs(struct folio *folio)
+{
+}
+
+static bool lru_gen_clear_refs(struct folio *folio)
{
+ return false;
}
+
#endif /* CONFIG_LRU_GEN */
/**
@@ -427,8 +448,10 @@ static void folio_inc_refs(struct folio *folio)
*/
void folio_mark_accessed(struct folio *folio)
{
+ if (folio_test_dropbehind(folio))
+ return;
if (lru_gen_enabled()) {
- folio_inc_refs(folio);
+ lru_gen_inc_refs(folio);
return;
}
@@ -474,7 +497,7 @@ void folio_add_lru(struct folio *folio)
folio_test_unevictable(folio), folio);
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
- /* see the comment in lru_gen_add_folio() */
+ /* see the comment in lru_gen_folio_seq() */
if (lru_gen_enabled() && !folio_test_unevictable(folio) &&
lru_gen_in_fault() && !(current->flags & PF_MEMALLOC))
folio_set_active(folio);
@@ -524,7 +547,7 @@ void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma)
*/
static void lru_deactivate_file(struct lruvec *lruvec, struct folio *folio)
{
- bool active = folio_test_active(folio);
+ bool active = folio_test_active(folio) || lru_gen_enabled();
long nr_pages = folio_nr_pages(folio);
if (folio_test_unevictable(folio))
@@ -589,7 +612,10 @@ static void lru_lazyfree(struct lruvec *lruvec, struct folio *folio)
lruvec_del_folio(lruvec, folio);
folio_clear_active(folio);
- folio_clear_referenced(folio);
+ if (lru_gen_enabled())
+ lru_gen_clear_refs(folio);
+ else
+ folio_clear_referenced(folio);
/*
* Lazyfree folios are clean anonymous folios. They have
* the swapbacked flag cleared, to distinguish them from normal
@@ -657,6 +683,9 @@ void deactivate_file_folio(struct folio *folio)
if (folio_test_unevictable(folio))
return;
+ if (lru_gen_enabled() && lru_gen_clear_refs(folio))
+ return;
+
folio_batch_add_and_move(folio, lru_deactivate_file, true);
}
@@ -670,7 +699,10 @@ void deactivate_file_folio(struct folio *folio)
*/
void folio_deactivate(struct folio *folio)
{
- if (folio_test_unevictable(folio) || !(folio_test_active(folio) || lru_gen_enabled()))
+ if (folio_test_unevictable(folio))
+ return;
+
+ if (lru_gen_enabled() ? lru_gen_clear_refs(folio) : !folio_test_active(folio))
return;
folio_batch_add_and_move(folio, lru_deactivate, true);
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
index da1278f0563b..be39078f255b 100644
--- a/mm/swap_cgroup.c
+++ b/mm/swap_cgroup.c
@@ -6,149 +6,106 @@
#include <linux/swapops.h> /* depends on mm.h include */
static DEFINE_MUTEX(swap_cgroup_mutex);
-struct swap_cgroup_ctrl {
- struct page **map;
- unsigned long length;
- spinlock_t lock;
-};
-
-static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
+/* Pack two cgroup id (short) of two entries in one swap_cgroup (atomic_t) */
+#define ID_PER_SC (sizeof(struct swap_cgroup) / sizeof(unsigned short))
+#define ID_SHIFT (BITS_PER_TYPE(unsigned short))
+#define ID_MASK (BIT(ID_SHIFT) - 1)
struct swap_cgroup {
- unsigned short id;
+ atomic_t ids;
};
-#define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup))
-
-/*
- * SwapCgroup implements "lookup" and "exchange" operations.
- * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
- * against SwapCache. At swap_free(), this is accessed directly from swap.
- *
- * This means,
- * - we have no race in "exchange" when we're accessed via SwapCache because
- * SwapCache(and its swp_entry) is under lock.
- * - When called via swap_free(), there is no user of this entry and no race.
- * Then, we don't need lock around "exchange".
- *
- * TODO: we can push these buffers out to HIGHMEM.
- */
-
-/*
- * allocate buffer for swap_cgroup.
- */
-static int swap_cgroup_prepare(int type)
-{
- struct page *page;
- struct swap_cgroup_ctrl *ctrl;
- unsigned long idx, max;
-
- ctrl = &swap_cgroup_ctrl[type];
-
- for (idx = 0; idx < ctrl->length; idx++) {
- page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!page)
- goto not_enough_page;
- ctrl->map[idx] = page;
- if (!(idx % SWAP_CLUSTER_MAX))
- cond_resched();
- }
- return 0;
-not_enough_page:
- max = idx;
- for (idx = 0; idx < max; idx++)
- __free_page(ctrl->map[idx]);
+struct swap_cgroup_ctrl {
+ struct swap_cgroup *map;
+};
- return -ENOMEM;
-}
+static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
-static struct swap_cgroup *__lookup_swap_cgroup(struct swap_cgroup_ctrl *ctrl,
- pgoff_t offset)
+static unsigned short __swap_cgroup_id_lookup(struct swap_cgroup *map,
+ pgoff_t offset)
{
- struct page *mappage;
- struct swap_cgroup *sc;
+ unsigned int shift = (offset % ID_PER_SC) * ID_SHIFT;
+ unsigned int old_ids = atomic_read(&map[offset / ID_PER_SC].ids);
+
+ BUILD_BUG_ON(!is_power_of_2(ID_PER_SC));
+ BUILD_BUG_ON(sizeof(struct swap_cgroup) != sizeof(atomic_t));
- mappage = ctrl->map[offset / SC_PER_PAGE];
- sc = page_address(mappage);
- return sc + offset % SC_PER_PAGE;
+ return (old_ids >> shift) & ID_MASK;
}
-static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent,
- struct swap_cgroup_ctrl **ctrlp)
+static unsigned short __swap_cgroup_id_xchg(struct swap_cgroup *map,
+ pgoff_t offset,
+ unsigned short new_id)
{
- pgoff_t offset = swp_offset(ent);
- struct swap_cgroup_ctrl *ctrl;
-
- ctrl = &swap_cgroup_ctrl[swp_type(ent)];
- if (ctrlp)
- *ctrlp = ctrl;
- return __lookup_swap_cgroup(ctrl, offset);
+ unsigned short old_id;
+ struct swap_cgroup *sc = &map[offset / ID_PER_SC];
+ unsigned int shift = (offset % ID_PER_SC) * ID_SHIFT;
+ unsigned int new_ids, old_ids = atomic_read(&sc->ids);
+
+ do {
+ old_id = (old_ids >> shift) & ID_MASK;
+ new_ids = (old_ids & ~(ID_MASK << shift));
+ new_ids |= ((unsigned int)new_id) << shift;
+ } while (!atomic_try_cmpxchg(&sc->ids, &old_ids, new_ids));
+
+ return old_id;
}
/**
- * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry.
- * @ent: swap entry to be cmpxchged
- * @old: old id
- * @new: new id
+ * swap_cgroup_record - record mem_cgroup for a set of swap entries.
+ * These entries must belong to one single folio, and that folio
+ * must be being charged for swap space (swap out), and these
+ * entries must not have been charged
*
- * Returns old id at success, 0 at failure.
- * (There is no mem_cgroup using 0 as its id)
+ * @folio: the folio that the swap entry belongs to
+ * @ent: the first swap entry to be recorded
*/
-unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
- unsigned short old, unsigned short new)
+void swap_cgroup_record(struct folio *folio, swp_entry_t ent)
{
- struct swap_cgroup_ctrl *ctrl;
- struct swap_cgroup *sc;
- unsigned long flags;
- unsigned short retval;
-
- sc = lookup_swap_cgroup(ent, &ctrl);
-
- spin_lock_irqsave(&ctrl->lock, flags);
- retval = sc->id;
- if (retval == old)
- sc->id = new;
- else
- retval = 0;
- spin_unlock_irqrestore(&ctrl->lock, flags);
- return retval;
+ unsigned int nr_ents = folio_nr_pages(folio);
+ struct swap_cgroup *map;
+ pgoff_t offset, end;
+ unsigned short old;
+
+ offset = swp_offset(ent);
+ end = offset + nr_ents;
+ map = swap_cgroup_ctrl[swp_type(ent)].map;
+
+ do {
+ old = __swap_cgroup_id_xchg(map, offset,
+ mem_cgroup_id(folio_memcg(folio)));
+ VM_BUG_ON(old);
+ } while (++offset != end);
}
/**
- * swap_cgroup_record - record mem_cgroup for a set of swap entries
+ * swap_cgroup_clear - clear mem_cgroup for a set of swap entries.
+ * These entries must be being uncharged from swap. They either
+ * belongs to one single folio in the swap cache (swap in for
+ * cgroup v1), or no longer have any users (slot freeing).
+ *
* @ent: the first swap entry to be recorded into
- * @id: mem_cgroup to be recorded
* @nr_ents: number of swap entries to be recorded
*
- * Returns old value at success, 0 at failure.
- * (Of course, old value can be 0.)
+ * Returns the existing old value.
*/
-unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id,
- unsigned int nr_ents)
+unsigned short swap_cgroup_clear(swp_entry_t ent, unsigned int nr_ents)
{
- struct swap_cgroup_ctrl *ctrl;
- struct swap_cgroup *sc;
- unsigned short old;
- unsigned long flags;
pgoff_t offset = swp_offset(ent);
pgoff_t end = offset + nr_ents;
+ struct swap_cgroup *map;
+ unsigned short old, iter = 0;
- sc = lookup_swap_cgroup(ent, &ctrl);
-
- spin_lock_irqsave(&ctrl->lock, flags);
- old = sc->id;
- for (;;) {
- VM_BUG_ON(sc->id != old);
- sc->id = id;
- offset++;
- if (offset == end)
- break;
- if (offset % SC_PER_PAGE)
- sc++;
- else
- sc = __lookup_swap_cgroup(ctrl, offset);
- }
- spin_unlock_irqrestore(&ctrl->lock, flags);
+ offset = swp_offset(ent);
+ end = offset + nr_ents;
+ map = swap_cgroup_ctrl[swp_type(ent)].map;
+
+ do {
+ old = __swap_cgroup_id_xchg(map, offset, 0);
+ if (!iter)
+ iter = old;
+ VM_BUG_ON(iter != old);
+ } while (++offset != end);
return old;
}
@@ -161,39 +118,33 @@ unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id,
*/
unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
{
+ struct swap_cgroup_ctrl *ctrl;
+
if (mem_cgroup_disabled())
return 0;
- return lookup_swap_cgroup(ent, NULL)->id;
+
+ ctrl = &swap_cgroup_ctrl[swp_type(ent)];
+ return __swap_cgroup_id_lookup(ctrl->map, swp_offset(ent));
}
int swap_cgroup_swapon(int type, unsigned long max_pages)
{
- void *array;
- unsigned long length;
+ struct swap_cgroup *map;
struct swap_cgroup_ctrl *ctrl;
if (mem_cgroup_disabled())
return 0;
- length = DIV_ROUND_UP(max_pages, SC_PER_PAGE);
-
- array = vcalloc(length, sizeof(void *));
- if (!array)
+ BUILD_BUG_ON(sizeof(unsigned short) * ID_PER_SC !=
+ sizeof(struct swap_cgroup));
+ map = vzalloc(DIV_ROUND_UP(max_pages, ID_PER_SC) *
+ sizeof(struct swap_cgroup));
+ if (!map)
goto nomem;
ctrl = &swap_cgroup_ctrl[type];
mutex_lock(&swap_cgroup_mutex);
- ctrl->length = length;
- ctrl->map = array;
- spin_lock_init(&ctrl->lock);
- if (swap_cgroup_prepare(type)) {
- /* memory shortage */
- ctrl->map = NULL;
- ctrl->length = 0;
- mutex_unlock(&swap_cgroup_mutex);
- vfree(array);
- goto nomem;
- }
+ ctrl->map = map;
mutex_unlock(&swap_cgroup_mutex);
return 0;
@@ -205,8 +156,7 @@ nomem:
void swap_cgroup_swapoff(int type)
{
- struct page **map;
- unsigned long i, length;
+ struct swap_cgroup *map;
struct swap_cgroup_ctrl *ctrl;
if (mem_cgroup_disabled())
@@ -215,19 +165,8 @@ void swap_cgroup_swapoff(int type)
mutex_lock(&swap_cgroup_mutex);
ctrl = &swap_cgroup_ctrl[type];
map = ctrl->map;
- length = ctrl->length;
ctrl->map = NULL;
- ctrl->length = 0;
mutex_unlock(&swap_cgroup_mutex);
- if (map) {
- for (i = 0; i < length; i++) {
- struct page *page = map[i];
- if (page)
- __free_page(page);
- if (!(i % SWAP_CLUSTER_MAX))
- cond_resched();
- }
- vfree(map);
- }
+ vfree(map);
}
diff --git a/mm/swap_slots.c b/mm/swap_slots.c
index 13ab3b771409..9c7c171df7ba 100644
--- a/mm/swap_slots.c
+++ b/mm/swap_slots.c
@@ -43,17 +43,15 @@ static DEFINE_MUTEX(swap_slots_cache_mutex);
/* Serialize swap slots cache enable/disable operations */
static DEFINE_MUTEX(swap_slots_cache_enable_mutex);
-static void __drain_swap_slots_cache(unsigned int type);
+static void __drain_swap_slots_cache(void);
#define use_swap_slot_cache (swap_slot_cache_active && swap_slot_cache_enabled)
-#define SLOTS_CACHE 0x1
-#define SLOTS_CACHE_RET 0x2
static void deactivate_swap_slots_cache(void)
{
mutex_lock(&swap_slots_cache_mutex);
swap_slot_cache_active = false;
- __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET);
+ __drain_swap_slots_cache();
mutex_unlock(&swap_slots_cache_mutex);
}
@@ -72,7 +70,7 @@ void disable_swap_slots_cache_lock(void)
if (swap_slot_cache_initialized) {
/* serialize with cpu hotplug operations */
cpus_read_lock();
- __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET);
+ __drain_swap_slots_cache();
cpus_read_unlock();
}
}
@@ -113,7 +111,7 @@ out:
static int alloc_swap_slot_cache(unsigned int cpu)
{
struct swap_slots_cache *cache;
- swp_entry_t *slots, *slots_ret;
+ swp_entry_t *slots;
/*
* Do allocation outside swap_slots_cache_mutex
@@ -125,28 +123,19 @@ static int alloc_swap_slot_cache(unsigned int cpu)
if (!slots)
return -ENOMEM;
- slots_ret = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t),
- GFP_KERNEL);
- if (!slots_ret) {
- kvfree(slots);
- return -ENOMEM;
- }
-
mutex_lock(&swap_slots_cache_mutex);
cache = &per_cpu(swp_slots, cpu);
- if (cache->slots || cache->slots_ret) {
+ if (cache->slots) {
/* cache already allocated */
mutex_unlock(&swap_slots_cache_mutex);
kvfree(slots);
- kvfree(slots_ret);
return 0;
}
if (!cache->lock_initialized) {
mutex_init(&cache->alloc_lock);
- spin_lock_init(&cache->free_lock);
cache->lock_initialized = true;
}
cache->nr = 0;
@@ -160,19 +149,16 @@ static int alloc_swap_slot_cache(unsigned int cpu)
*/
mb();
cache->slots = slots;
- cache->slots_ret = slots_ret;
mutex_unlock(&swap_slots_cache_mutex);
return 0;
}
-static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type,
- bool free_slots)
+static void drain_slots_cache_cpu(unsigned int cpu, bool free_slots)
{
struct swap_slots_cache *cache;
- swp_entry_t *slots = NULL;
cache = &per_cpu(swp_slots, cpu);
- if ((type & SLOTS_CACHE) && cache->slots) {
+ if (cache->slots) {
mutex_lock(&cache->alloc_lock);
swapcache_free_entries(cache->slots + cache->cur, cache->nr);
cache->cur = 0;
@@ -183,20 +169,9 @@ static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type,
}
mutex_unlock(&cache->alloc_lock);
}
- if ((type & SLOTS_CACHE_RET) && cache->slots_ret) {
- spin_lock_irq(&cache->free_lock);
- swapcache_free_entries(cache->slots_ret, cache->n_ret);
- cache->n_ret = 0;
- if (free_slots && cache->slots_ret) {
- slots = cache->slots_ret;
- cache->slots_ret = NULL;
- }
- spin_unlock_irq(&cache->free_lock);
- kvfree(slots);
- }
}
-static void __drain_swap_slots_cache(unsigned int type)
+static void __drain_swap_slots_cache(void)
{
unsigned int cpu;
@@ -224,13 +199,13 @@ static void __drain_swap_slots_cache(unsigned int type)
* There are no slots on such cpu that need to be drained.
*/
for_each_online_cpu(cpu)
- drain_slots_cache_cpu(cpu, type, false);
+ drain_slots_cache_cpu(cpu, false);
}
static int free_slot_cache(unsigned int cpu)
{
mutex_lock(&swap_slots_cache_mutex);
- drain_slots_cache_cpu(cpu, SLOTS_CACHE | SLOTS_CACHE_RET, true);
+ drain_slots_cache_cpu(cpu, true);
mutex_unlock(&swap_slots_cache_mutex);
return 0;
}
@@ -269,39 +244,6 @@ static int refill_swap_slots_cache(struct swap_slots_cache *cache)
return cache->nr;
}
-void free_swap_slot(swp_entry_t entry)
-{
- struct swap_slots_cache *cache;
-
- /* Large folio swap slot is not covered. */
- zswap_invalidate(entry);
-
- cache = raw_cpu_ptr(&swp_slots);
- if (likely(use_swap_slot_cache && cache->slots_ret)) {
- spin_lock_irq(&cache->free_lock);
- /* Swap slots cache may be deactivated before acquiring lock */
- if (!use_swap_slot_cache || !cache->slots_ret) {
- spin_unlock_irq(&cache->free_lock);
- goto direct_free;
- }
- if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) {
- /*
- * Return slots to global pool.
- * The current swap_map value is SWAP_HAS_CACHE.
- * Set it to 0 to indicate it is available for
- * allocation in global pool
- */
- swapcache_free_entries(cache->slots_ret, cache->n_ret);
- cache->n_ret = 0;
- }
- cache->slots_ret[cache->n_ret++] = entry;
- spin_unlock_irq(&cache->free_lock);
- } else {
-direct_free:
- swapcache_free_entries(&entry, 1);
- }
-}
-
swp_entry_t folio_alloc_swap(struct folio *folio)
{
swp_entry_t entry;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index e0c0321b8ff7..ca42b2be64d9 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -317,7 +317,6 @@ void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
struct folio_batch folios;
unsigned int refs[PAGEVEC_SIZE];
- lru_add_drain();
folio_batch_init(&folios);
for (int i = 0; i < nr; i++) {
struct folio *folio = page_folio(encoded_page_ptr(pages[i]));
diff --git a/mm/swapfile.c b/mm/swapfile.c
index b0a9071cfe1d..ba19430dd4ea 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -53,15 +53,15 @@
static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
unsigned char);
static void free_swap_count_continuations(struct swap_info_struct *);
-static void swap_entry_range_free(struct swap_info_struct *si, swp_entry_t entry,
- unsigned int nr_pages);
-static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
+static void swap_entry_range_free(struct swap_info_struct *si,
+ struct swap_cluster_info *ci,
+ swp_entry_t entry, unsigned int nr_pages);
+static void swap_range_alloc(struct swap_info_struct *si,
unsigned int nr_entries);
static bool folio_swapcache_freeable(struct folio *folio);
-static struct swap_cluster_info *lock_cluster_or_swap_info(
- struct swap_info_struct *si, unsigned long offset);
-static void unlock_cluster_or_swap_info(struct swap_info_struct *si,
- struct swap_cluster_info *ci);
+static struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
+ unsigned long offset);
+static inline void unlock_cluster(struct swap_cluster_info *ci);
static DEFINE_SPINLOCK(swap_lock);
static unsigned int nr_swapfiles;
@@ -129,6 +129,26 @@ static inline unsigned char swap_count(unsigned char ent)
return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */
}
+/*
+ * Use the second highest bit of inuse_pages counter as the indicator
+ * if one swap device is on the available plist, so the atomic can
+ * still be updated arithmetically while having special data embedded.
+ *
+ * inuse_pages counter is the only thing indicating if a device should
+ * be on avail_lists or not (except swapon / swapoff). By embedding the
+ * off-list bit in the atomic counter, updates no longer need any lock
+ * to check the list status.
+ *
+ * This bit will be set if the device is not on the plist and not
+ * usable, will be cleared if the device is on the plist.
+ */
+#define SWAP_USAGE_OFFLIST_BIT (1UL << (BITS_PER_TYPE(atomic_t) - 2))
+#define SWAP_USAGE_COUNTER_MASK (~SWAP_USAGE_OFFLIST_BIT)
+static long swap_usage_in_pages(struct swap_info_struct *si)
+{
+ return atomic_long_read(&si->inuse_pages) & SWAP_USAGE_COUNTER_MASK;
+}
+
/* Reclaim the swap entry anyway if possible */
#define TTRS_ANYWAY 0x1
/*
@@ -222,9 +242,9 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si,
* swap_map is HAS_CACHE only, which means the slots have no page table
* reference or pending writeback, and can't be allocated to others.
*/
- ci = lock_cluster_or_swap_info(si, offset);
+ ci = lock_cluster(si, offset);
need_reclaim = swap_is_has_cache(si, offset, nr_pages);
- unlock_cluster_or_swap_info(si, ci);
+ unlock_cluster(ci);
if (!need_reclaim)
goto out_unlock;
@@ -242,12 +262,9 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si,
folio_ref_sub(folio, nr_pages);
folio_set_dirty(folio);
- spin_lock(&si->lock);
- /* Only sinple page folio can be backed by zswap */
- if (nr_pages == 1)
- zswap_invalidate(entry);
- swap_entry_range_free(si, entry, nr_pages);
- spin_unlock(&si->lock);
+ ci = lock_cluster(si, offset);
+ swap_entry_range_free(si, ci, entry, nr_pages);
+ unlock_cluster(ci);
ret = nr_pages;
out_unlock:
folio_unlock(folio);
@@ -382,9 +399,23 @@ static void discard_swap_cluster(struct swap_info_struct *si,
#endif
#define LATENCY_LIMIT 256
-static inline bool cluster_is_free(struct swap_cluster_info *info)
+static inline bool cluster_is_empty(struct swap_cluster_info *info)
{
- return info->flags & CLUSTER_FLAG_FREE;
+ return info->count == 0;
+}
+
+static inline bool cluster_is_discard(struct swap_cluster_info *info)
+{
+ return info->flags == CLUSTER_FLAG_DISCARD;
+}
+
+static inline bool cluster_is_usable(struct swap_cluster_info *ci, int order)
+{
+ if (unlikely(ci->flags > CLUSTER_FLAG_USABLE))
+ return false;
+ if (!order)
+ return true;
+ return cluster_is_empty(ci) || order == ci->order;
}
static inline unsigned int cluster_index(struct swap_info_struct *si,
@@ -393,6 +424,12 @@ static inline unsigned int cluster_index(struct swap_info_struct *si,
return ci - si->cluster_info;
}
+static inline struct swap_cluster_info *offset_to_cluster(struct swap_info_struct *si,
+ unsigned long offset)
+{
+ return &si->cluster_info[offset / SWAPFILE_CLUSTER];
+}
+
static inline unsigned int cluster_offset(struct swap_info_struct *si,
struct swap_cluster_info *ci)
{
@@ -404,45 +441,38 @@ static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si
{
struct swap_cluster_info *ci;
- ci = si->cluster_info;
- if (ci) {
- ci += offset / SWAPFILE_CLUSTER;
- spin_lock(&ci->lock);
- }
+ ci = offset_to_cluster(si, offset);
+ spin_lock(&ci->lock);
+
return ci;
}
static inline void unlock_cluster(struct swap_cluster_info *ci)
{
- if (ci)
- spin_unlock(&ci->lock);
+ spin_unlock(&ci->lock);
}
-/*
- * Determine the locking method in use for this device. Return
- * swap_cluster_info if SSD-style cluster-based locking is in place.
- */
-static inline struct swap_cluster_info *lock_cluster_or_swap_info(
- struct swap_info_struct *si, unsigned long offset)
+static void move_cluster(struct swap_info_struct *si,
+ struct swap_cluster_info *ci, struct list_head *list,
+ enum swap_cluster_flags new_flags)
{
- struct swap_cluster_info *ci;
+ VM_WARN_ON(ci->flags == new_flags);
- /* Try to use fine-grained SSD-style locking if available: */
- ci = lock_cluster(si, offset);
- /* Otherwise, fall back to traditional, coarse locking: */
- if (!ci)
- spin_lock(&si->lock);
-
- return ci;
-}
+ BUILD_BUG_ON(1 << sizeof(ci->flags) * BITS_PER_BYTE < CLUSTER_FLAG_MAX);
+ lockdep_assert_held(&ci->lock);
-static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si,
- struct swap_cluster_info *ci)
-{
- if (ci)
- unlock_cluster(ci);
+ spin_lock(&si->lock);
+ if (ci->flags == CLUSTER_FLAG_NONE)
+ list_add_tail(&ci->list, list);
else
- spin_unlock(&si->lock);
+ list_move_tail(&ci->list, list);
+ spin_unlock(&si->lock);
+
+ if (ci->flags == CLUSTER_FLAG_FRAG)
+ atomic_long_dec(&si->frag_cluster_nr[ci->order]);
+ else if (new_flags == CLUSTER_FLAG_FRAG)
+ atomic_long_inc(&si->frag_cluster_nr[ci->order]);
+ ci->flags = new_flags;
}
/* Add a cluster to discard list and schedule it to do discard */
@@ -458,51 +488,98 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si,
*/
memset(si->swap_map + idx * SWAPFILE_CLUSTER,
SWAP_MAP_BAD, SWAPFILE_CLUSTER);
-
- VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE);
- list_move_tail(&ci->list, &si->discard_clusters);
- ci->flags = 0;
+ VM_BUG_ON(ci->flags == CLUSTER_FLAG_FREE);
+ move_cluster(si, ci, &si->discard_clusters, CLUSTER_FLAG_DISCARD);
schedule_work(&si->discard_work);
}
static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci)
{
- lockdep_assert_held(&si->lock);
lockdep_assert_held(&ci->lock);
-
- if (ci->flags)
- list_move_tail(&ci->list, &si->free_clusters);
- else
- list_add_tail(&ci->list, &si->free_clusters);
- ci->flags = CLUSTER_FLAG_FREE;
+ move_cluster(si, ci, &si->free_clusters, CLUSTER_FLAG_FREE);
ci->order = 0;
}
/*
+ * Isolate and lock the first cluster that is not contented on a list,
+ * clean its flag before taken off-list. Cluster flag must be in sync
+ * with list status, so cluster updaters can always know the cluster
+ * list status without touching si lock.
+ *
+ * Note it's possible that all clusters on a list are contented so
+ * this returns NULL for an non-empty list.
+ */
+static struct swap_cluster_info *isolate_lock_cluster(
+ struct swap_info_struct *si, struct list_head *list)
+{
+ struct swap_cluster_info *ci, *ret = NULL;
+
+ spin_lock(&si->lock);
+
+ if (unlikely(!(si->flags & SWP_WRITEOK)))
+ goto out;
+
+ list_for_each_entry(ci, list, list) {
+ if (!spin_trylock(&ci->lock))
+ continue;
+
+ /* We may only isolate and clear flags of following lists */
+ VM_BUG_ON(!ci->flags);
+ VM_BUG_ON(ci->flags > CLUSTER_FLAG_USABLE &&
+ ci->flags != CLUSTER_FLAG_FULL);
+
+ list_del(&ci->list);
+ ci->flags = CLUSTER_FLAG_NONE;
+ ret = ci;
+ break;
+ }
+out:
+ spin_unlock(&si->lock);
+
+ return ret;
+}
+
+/*
* Doing discard actually. After a cluster discard is finished, the cluster
- * will be added to free cluster list. caller should hold si->lock.
-*/
-static void swap_do_scheduled_discard(struct swap_info_struct *si)
+ * will be added to free cluster list. Discard cluster is a bit special as
+ * they don't participate in allocation or reclaim, so clusters marked as
+ * CLUSTER_FLAG_DISCARD must remain off-list or on discard list.
+ */
+static bool swap_do_scheduled_discard(struct swap_info_struct *si)
{
struct swap_cluster_info *ci;
+ bool ret = false;
unsigned int idx;
+ spin_lock(&si->lock);
while (!list_empty(&si->discard_clusters)) {
ci = list_first_entry(&si->discard_clusters, struct swap_cluster_info, list);
+ /*
+ * Delete the cluster from list to prepare for discard, but keep
+ * the CLUSTER_FLAG_DISCARD flag, there could be percpu_cluster
+ * pointing to it, or ran into by relocate_cluster.
+ */
list_del(&ci->list);
idx = cluster_index(si, ci);
spin_unlock(&si->lock);
-
discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
SWAPFILE_CLUSTER);
- spin_lock(&si->lock);
spin_lock(&ci->lock);
- __free_cluster(si, ci);
+ /*
+ * Discard is done, clear its flags as it's off-list, then
+ * return the cluster to allocation list.
+ */
+ ci->flags = CLUSTER_FLAG_NONE;
memset(si->swap_map + idx * SWAPFILE_CLUSTER,
0, SWAPFILE_CLUSTER);
+ __free_cluster(si, ci);
spin_unlock(&ci->lock);
+ ret = true;
+ spin_lock(&si->lock);
}
+ spin_unlock(&si->lock);
+ return ret;
}
static void swap_discard_work(struct work_struct *work)
@@ -511,9 +588,7 @@ static void swap_discard_work(struct work_struct *work)
si = container_of(work, struct swap_info_struct, discard_work);
- spin_lock(&si->lock);
swap_do_scheduled_discard(si);
- spin_unlock(&si->lock);
}
static void swap_users_ref_free(struct percpu_ref *ref)
@@ -524,15 +599,16 @@ static void swap_users_ref_free(struct percpu_ref *ref)
complete(&si->comp);
}
+/*
+ * Must be called after freeing if ci->count == 0, moves the cluster to free
+ * or discard list.
+ */
static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci)
{
VM_BUG_ON(ci->count != 0);
- lockdep_assert_held(&si->lock);
+ VM_BUG_ON(ci->flags == CLUSTER_FLAG_FREE);
lockdep_assert_held(&ci->lock);
- if (ci->flags & CLUSTER_FLAG_FRAG)
- si->frag_cluster_nr[ci->order]--;
-
/*
* If the swap is discardable, prepare discard the cluster
* instead of free it immediately. The cluster will be freed
@@ -548,6 +624,48 @@ static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info *
}
/*
+ * Must be called after freeing if ci->count != 0, moves the cluster to
+ * nonfull list.
+ */
+static void partial_free_cluster(struct swap_info_struct *si,
+ struct swap_cluster_info *ci)
+{
+ VM_BUG_ON(!ci->count || ci->count == SWAPFILE_CLUSTER);
+ lockdep_assert_held(&ci->lock);
+
+ if (ci->flags != CLUSTER_FLAG_NONFULL)
+ move_cluster(si, ci, &si->nonfull_clusters[ci->order],
+ CLUSTER_FLAG_NONFULL);
+}
+
+/*
+ * Must be called after allocation, moves the cluster to full or frag list.
+ * Note: allocation doesn't acquire si lock, and may drop the ci lock for
+ * reclaim, so the cluster could be any where when called.
+ */
+static void relocate_cluster(struct swap_info_struct *si,
+ struct swap_cluster_info *ci)
+{
+ lockdep_assert_held(&ci->lock);
+
+ /* Discard cluster must remain off-list or on discard list */
+ if (cluster_is_discard(ci))
+ return;
+
+ if (!ci->count) {
+ free_cluster(si, ci);
+ } else if (ci->count != SWAPFILE_CLUSTER) {
+ if (ci->flags != CLUSTER_FLAG_FRAG)
+ move_cluster(si, ci, &si->frag_clusters[ci->order],
+ CLUSTER_FLAG_FRAG);
+ } else {
+ if (ci->flags != CLUSTER_FLAG_FULL)
+ move_cluster(si, ci, &si->full_clusters,
+ CLUSTER_FLAG_FULL);
+ }
+}
+
+/*
* The cluster corresponding to page_nr will be used. The cluster will not be
* added to free cluster list and its usage counter will be increased by 1.
* Only used for initialization.
@@ -558,9 +676,6 @@ static void inc_cluster_info_page(struct swap_info_struct *si,
unsigned long idx = page_nr / SWAPFILE_CLUSTER;
struct swap_cluster_info *ci;
- if (!cluster_info)
- return;
-
ci = cluster_info + idx;
ci->count++;
@@ -568,63 +683,33 @@ static void inc_cluster_info_page(struct swap_info_struct *si,
VM_BUG_ON(ci->flags);
}
-/*
- * The cluster ci decreases @nr_pages usage. If the usage counter becomes 0,
- * which means no page in the cluster is in use, we can optionally discard
- * the cluster and add it to free cluster list.
- */
-static void dec_cluster_info_page(struct swap_info_struct *si,
- struct swap_cluster_info *ci, int nr_pages)
-{
- if (!si->cluster_info)
- return;
-
- VM_BUG_ON(ci->count < nr_pages);
- VM_BUG_ON(cluster_is_free(ci));
- lockdep_assert_held(&si->lock);
- lockdep_assert_held(&ci->lock);
- ci->count -= nr_pages;
-
- if (!ci->count) {
- free_cluster(si, ci);
- return;
- }
-
- if (!(ci->flags & CLUSTER_FLAG_NONFULL)) {
- VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE);
- if (ci->flags & CLUSTER_FLAG_FRAG)
- si->frag_cluster_nr[ci->order]--;
- list_move_tail(&ci->list, &si->nonfull_clusters[ci->order]);
- ci->flags = CLUSTER_FLAG_NONFULL;
- }
-}
-
static bool cluster_reclaim_range(struct swap_info_struct *si,
struct swap_cluster_info *ci,
unsigned long start, unsigned long end)
{
unsigned char *map = si->swap_map;
- unsigned long offset;
+ unsigned long offset = start;
+ int nr_reclaim;
spin_unlock(&ci->lock);
- spin_unlock(&si->lock);
-
- for (offset = start; offset < end; offset++) {
+ do {
switch (READ_ONCE(map[offset])) {
case 0:
- continue;
+ offset++;
+ break;
case SWAP_HAS_CACHE:
- if (__try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT) > 0)
- continue;
- goto out;
+ nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT);
+ if (nr_reclaim > 0)
+ offset += nr_reclaim;
+ else
+ goto out;
+ break;
default:
goto out;
}
- }
+ } while (offset < end);
out:
- spin_lock(&si->lock);
spin_lock(&ci->lock);
-
/*
* Recheck the range no matter reclaim succeeded or not, the slot
* could have been be freed while we are not holding the lock.
@@ -638,11 +723,11 @@ out:
static bool cluster_scan_range(struct swap_info_struct *si,
struct swap_cluster_info *ci,
- unsigned long start, unsigned int nr_pages)
+ unsigned long start, unsigned int nr_pages,
+ bool *need_reclaim)
{
unsigned long offset, end = start + nr_pages;
unsigned char *map = si->swap_map;
- bool need_reclaim = false;
for (offset = start; offset < end; offset++) {
switch (READ_ONCE(map[offset])) {
@@ -651,16 +736,13 @@ static bool cluster_scan_range(struct swap_info_struct *si,
case SWAP_HAS_CACHE:
if (!vm_swap_full())
return false;
- need_reclaim = true;
+ *need_reclaim = true;
continue;
default:
return false;
}
}
- if (need_reclaim)
- return cluster_reclaim_range(si, ci, start, end);
-
return true;
}
@@ -670,73 +752,79 @@ static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster
{
unsigned int nr_pages = 1 << order;
+ lockdep_assert_held(&ci->lock);
+
if (!(si->flags & SWP_WRITEOK))
return false;
- if (cluster_is_free(ci)) {
- if (nr_pages < SWAPFILE_CLUSTER) {
- list_move_tail(&ci->list, &si->nonfull_clusters[order]);
- ci->flags = CLUSTER_FLAG_NONFULL;
- }
+ /*
+ * The first allocation in a cluster makes the
+ * cluster exclusive to this order
+ */
+ if (cluster_is_empty(ci))
ci->order = order;
- }
memset(si->swap_map + start, usage, nr_pages);
- swap_range_alloc(si, start, nr_pages);
+ swap_range_alloc(si, nr_pages);
ci->count += nr_pages;
- if (ci->count == SWAPFILE_CLUSTER) {
- VM_BUG_ON(!(ci->flags &
- (CLUSTER_FLAG_FREE | CLUSTER_FLAG_NONFULL | CLUSTER_FLAG_FRAG)));
- if (ci->flags & CLUSTER_FLAG_FRAG)
- si->frag_cluster_nr[ci->order]--;
- list_move_tail(&ci->list, &si->full_clusters);
- ci->flags = CLUSTER_FLAG_FULL;
- }
-
return true;
}
-static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, unsigned long offset,
- unsigned int *foundp, unsigned int order,
+/* Try use a new cluster for current CPU and allocate from it. */
+static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si,
+ struct swap_cluster_info *ci,
+ unsigned long offset,
+ unsigned int order,
unsigned char usage)
{
- unsigned long start = offset & ~(SWAPFILE_CLUSTER - 1);
+ unsigned int next = SWAP_ENTRY_INVALID, found = SWAP_ENTRY_INVALID;
+ unsigned long start = ALIGN_DOWN(offset, SWAPFILE_CLUSTER);
unsigned long end = min(start + SWAPFILE_CLUSTER, si->max);
unsigned int nr_pages = 1 << order;
- struct swap_cluster_info *ci;
+ bool need_reclaim, ret;
- if (end < nr_pages)
- return SWAP_NEXT_INVALID;
- end -= nr_pages;
+ lockdep_assert_held(&ci->lock);
- ci = lock_cluster(si, offset);
- if (ci->count + nr_pages > SWAPFILE_CLUSTER) {
- offset = SWAP_NEXT_INVALID;
- goto done;
- }
+ if (end < nr_pages || ci->count + nr_pages > SWAPFILE_CLUSTER)
+ goto out;
- while (offset <= end) {
- if (cluster_scan_range(si, ci, offset, nr_pages)) {
- if (!cluster_alloc_range(si, ci, offset, usage, order)) {
- offset = SWAP_NEXT_INVALID;
- goto done;
- }
- *foundp = offset;
- if (ci->count == SWAPFILE_CLUSTER) {
- offset = SWAP_NEXT_INVALID;
- goto done;
- }
- offset += nr_pages;
- break;
+ for (end -= nr_pages; offset <= end; offset += nr_pages) {
+ need_reclaim = false;
+ if (!cluster_scan_range(si, ci, offset, nr_pages, &need_reclaim))
+ continue;
+ if (need_reclaim) {
+ ret = cluster_reclaim_range(si, ci, offset, offset + nr_pages);
+ /*
+ * Reclaim drops ci->lock and cluster could be used
+ * by another order. Not checking flag as off-list
+ * cluster has no flag set, and change of list
+ * won't cause fragmentation.
+ */
+ if (!cluster_is_usable(ci, order))
+ goto out;
+ if (cluster_is_empty(ci))
+ offset = start;
+ /* Reclaim failed but cluster is usable, try next */
+ if (!ret)
+ continue;
}
+ if (!cluster_alloc_range(si, ci, offset, usage, order))
+ break;
+ found = offset;
offset += nr_pages;
+ if (ci->count < SWAPFILE_CLUSTER && offset <= end)
+ next = offset;
+ break;
}
- if (offset > end)
- offset = SWAP_NEXT_INVALID;
-done:
+out:
+ relocate_cluster(si, ci);
unlock_cluster(ci);
- return offset;
+ if (si->flags & SWP_SOLIDSTATE)
+ __this_cpu_write(si->percpu_cluster->next[order], next);
+ else
+ si->global_cluster->next[order] = next;
+ return found;
}
/* Return true if reclaimed a whole cluster */
@@ -749,20 +837,19 @@ static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force)
int nr_reclaim;
if (force)
- to_scan = si->inuse_pages / SWAPFILE_CLUSTER;
+ to_scan = swap_usage_in_pages(si) / SWAPFILE_CLUSTER;
- while (!list_empty(&si->full_clusters)) {
- ci = list_first_entry(&si->full_clusters, struct swap_cluster_info, list);
- list_move_tail(&ci->list, &si->full_clusters);
+ while ((ci = isolate_lock_cluster(si, &si->full_clusters))) {
offset = cluster_offset(si, ci);
end = min(si->max, offset + SWAPFILE_CLUSTER);
to_scan--;
- spin_unlock(&si->lock);
while (offset < end) {
if (READ_ONCE(map[offset]) == SWAP_HAS_CACHE) {
+ spin_unlock(&ci->lock);
nr_reclaim = __try_to_reclaim_swap(si, offset,
TTRS_ANYWAY | TTRS_DIRECT);
+ spin_lock(&ci->lock);
if (nr_reclaim) {
offset += abs(nr_reclaim);
continue;
@@ -770,8 +857,8 @@ static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force)
}
offset++;
}
- spin_lock(&si->lock);
+ unlock_cluster(ci);
if (to_scan <= 0)
break;
}
@@ -783,9 +870,7 @@ static void swap_reclaim_work(struct work_struct *work)
si = container_of(work, struct swap_info_struct, reclaim_work);
- spin_lock(&si->lock);
swap_reclaim_full_clusters(si, true);
- spin_unlock(&si->lock);
}
/*
@@ -796,29 +881,41 @@ static void swap_reclaim_work(struct work_struct *work)
static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int order,
unsigned char usage)
{
- struct percpu_cluster *cluster;
struct swap_cluster_info *ci;
unsigned int offset, found = 0;
-new_cluster:
- lockdep_assert_held(&si->lock);
- cluster = this_cpu_ptr(si->percpu_cluster);
- offset = cluster->next[order];
+ if (si->flags & SWP_SOLIDSTATE) {
+ /* Fast path using per CPU cluster */
+ local_lock(&si->percpu_cluster->lock);
+ offset = __this_cpu_read(si->percpu_cluster->next[order]);
+ } else {
+ /* Serialize HDD SWAP allocation for each device. */
+ spin_lock(&si->global_cluster_lock);
+ offset = si->global_cluster->next[order];
+ }
+
if (offset) {
- offset = alloc_swap_scan_cluster(si, offset, &found, order, usage);
+ ci = lock_cluster(si, offset);
+ /* Cluster could have been used by another order */
+ if (cluster_is_usable(ci, order)) {
+ if (cluster_is_empty(ci))
+ offset = cluster_offset(si, ci);
+ found = alloc_swap_scan_cluster(si, ci, offset,
+ order, usage);
+ } else {
+ unlock_cluster(ci);
+ }
if (found)
goto done;
}
- if (!list_empty(&si->free_clusters)) {
- ci = list_first_entry(&si->free_clusters, struct swap_cluster_info, list);
- offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), &found, order, usage);
- /*
- * Either we didn't touch the cluster due to swapoff,
- * or the allocation must success.
- */
- VM_BUG_ON((si->flags & SWP_WRITEOK) && !found);
- goto done;
+new_cluster:
+ ci = isolate_lock_cluster(si, &si->free_clusters);
+ if (ci) {
+ found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
+ order, usage);
+ if (found)
+ goto done;
}
/* Try reclaim from full clusters if free clusters list is drained */
@@ -826,56 +923,42 @@ new_cluster:
swap_reclaim_full_clusters(si, false);
if (order < PMD_ORDER) {
- unsigned int frags = 0;
-
- while (!list_empty(&si->nonfull_clusters[order])) {
- ci = list_first_entry(&si->nonfull_clusters[order],
- struct swap_cluster_info, list);
- list_move_tail(&ci->list, &si->frag_clusters[order]);
- ci->flags = CLUSTER_FLAG_FRAG;
- si->frag_cluster_nr[order]++;
- offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci),
- &found, order, usage);
- frags++;
+ unsigned int frags = 0, frags_existing;
+
+ while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[order]))) {
+ found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
+ order, usage);
if (found)
- break;
+ goto done;
+ /* Clusters failed to allocate are moved to frag_clusters */
+ frags++;
}
- if (!found) {
+ frags_existing = atomic_long_read(&si->frag_cluster_nr[order]);
+ while (frags < frags_existing &&
+ (ci = isolate_lock_cluster(si, &si->frag_clusters[order]))) {
+ atomic_long_dec(&si->frag_cluster_nr[order]);
/*
- * Nonfull clusters are moved to frag tail if we reached
- * here, count them too, don't over scan the frag list.
+ * Rotate the frag list to iterate, they were all
+ * failing high order allocation or moved here due to
+ * per-CPU usage, but they could contain newly released
+ * reclaimable (eg. lazy-freed swap cache) slots.
*/
- while (frags < si->frag_cluster_nr[order]) {
- ci = list_first_entry(&si->frag_clusters[order],
- struct swap_cluster_info, list);
- /*
- * Rotate the frag list to iterate, they were all failing
- * high order allocation or moved here due to per-CPU usage,
- * this help keeping usable cluster ahead.
- */
- list_move_tail(&ci->list, &si->frag_clusters[order]);
- offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci),
- &found, order, usage);
- frags++;
- if (found)
- break;
- }
+ found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
+ order, usage);
+ if (found)
+ goto done;
+ frags++;
}
}
- if (found)
- goto done;
-
- if (!list_empty(&si->discard_clusters)) {
- /*
- * we don't have free cluster but have some clusters in
- * discarding, do discard now and reclaim them, then
- * reread cluster_next_cpu since we dropped si->lock
- */
- swap_do_scheduled_discard(si);
+ /*
+ * We don't have free cluster but have some clusters in
+ * discarding, do discard now and reclaim them, then
+ * reread cluster_next_cpu since we dropped si->lock
+ */
+ if ((si->flags & SWP_PAGE_DISCARD) && swap_do_scheduled_discard(si))
goto new_cluster;
- }
if (order)
goto done;
@@ -886,74 +969,151 @@ new_cluster:
* Clusters here have at least one usable slots and can't fail order 0
* allocation, but reclaim may drop si->lock and race with another user.
*/
- while (!list_empty(&si->frag_clusters[o])) {
- ci = list_first_entry(&si->frag_clusters[o],
- struct swap_cluster_info, list);
- offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci),
- &found, 0, usage);
+ while ((ci = isolate_lock_cluster(si, &si->frag_clusters[o]))) {
+ atomic_long_dec(&si->frag_cluster_nr[o]);
+ found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
+ 0, usage);
if (found)
goto done;
}
- while (!list_empty(&si->nonfull_clusters[o])) {
- ci = list_first_entry(&si->nonfull_clusters[o],
- struct swap_cluster_info, list);
- offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci),
- &found, 0, usage);
+ while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[o]))) {
+ found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
+ 0, usage);
if (found)
goto done;
}
}
-
done:
- cluster->next[order] = offset;
+ if (si->flags & SWP_SOLIDSTATE)
+ local_unlock(&si->percpu_cluster->lock);
+ else
+ spin_unlock(&si->global_cluster_lock);
return found;
}
-static void __del_from_avail_list(struct swap_info_struct *si)
+/* SWAP_USAGE_OFFLIST_BIT can only be set by this helper. */
+static void del_from_avail_list(struct swap_info_struct *si, bool swapoff)
{
int nid;
+ unsigned long pages;
+
+ spin_lock(&swap_avail_lock);
+
+ if (swapoff) {
+ /*
+ * Forcefully remove it. Clear the SWP_WRITEOK flags for
+ * swapoff here so it's synchronized by both si->lock and
+ * swap_avail_lock, to ensure the result can be seen by
+ * add_to_avail_list.
+ */
+ lockdep_assert_held(&si->lock);
+ si->flags &= ~SWP_WRITEOK;
+ atomic_long_or(SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages);
+ } else {
+ /*
+ * If not called by swapoff, take it off-list only if it's
+ * full and SWAP_USAGE_OFFLIST_BIT is not set (strictly
+ * si->inuse_pages == pages), any concurrent slot freeing,
+ * or device already removed from plist by someone else
+ * will make this return false.
+ */
+ pages = si->pages;
+ if (!atomic_long_try_cmpxchg(&si->inuse_pages, &pages,
+ pages | SWAP_USAGE_OFFLIST_BIT))
+ goto skip;
+ }
- assert_spin_locked(&si->lock);
for_each_node(nid)
plist_del(&si->avail_lists[nid], &swap_avail_heads[nid]);
+
+skip:
+ spin_unlock(&swap_avail_lock);
}
-static void del_from_avail_list(struct swap_info_struct *si)
+/* SWAP_USAGE_OFFLIST_BIT can only be cleared by this helper. */
+static void add_to_avail_list(struct swap_info_struct *si, bool swapon)
{
+ int nid;
+ long val;
+ unsigned long pages;
+
spin_lock(&swap_avail_lock);
- __del_from_avail_list(si);
+
+ /* Corresponding to SWP_WRITEOK clearing in del_from_avail_list */
+ if (swapon) {
+ lockdep_assert_held(&si->lock);
+ si->flags |= SWP_WRITEOK;
+ } else {
+ if (!(READ_ONCE(si->flags) & SWP_WRITEOK))
+ goto skip;
+ }
+
+ if (!(atomic_long_read(&si->inuse_pages) & SWAP_USAGE_OFFLIST_BIT))
+ goto skip;
+
+ val = atomic_long_fetch_and_relaxed(~SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages);
+
+ /*
+ * When device is full and device is on the plist, only one updater will
+ * see (inuse_pages == si->pages) and will call del_from_avail_list. If
+ * that updater happen to be here, just skip adding.
+ */
+ pages = si->pages;
+ if (val == pages) {
+ /* Just like the cmpxchg in del_from_avail_list */
+ if (atomic_long_try_cmpxchg(&si->inuse_pages, &pages,
+ pages | SWAP_USAGE_OFFLIST_BIT))
+ goto skip;
+ }
+
+ for_each_node(nid)
+ plist_add(&si->avail_lists[nid], &swap_avail_heads[nid]);
+
+skip:
spin_unlock(&swap_avail_lock);
}
-static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
- unsigned int nr_entries)
+/*
+ * swap_usage_add / swap_usage_sub of each slot are serialized by ci->lock
+ * within each cluster, so the total contribution to the global counter should
+ * always be positive and cannot exceed the total number of usable slots.
+ */
+static bool swap_usage_add(struct swap_info_struct *si, unsigned int nr_entries)
{
- unsigned int end = offset + nr_entries - 1;
-
- if (offset == si->lowest_bit)
- si->lowest_bit += nr_entries;
- if (end == si->highest_bit)
- WRITE_ONCE(si->highest_bit, si->highest_bit - nr_entries);
- WRITE_ONCE(si->inuse_pages, si->inuse_pages + nr_entries);
- if (si->inuse_pages == si->pages) {
- si->lowest_bit = si->max;
- si->highest_bit = 0;
- del_from_avail_list(si);
+ long val = atomic_long_add_return_relaxed(nr_entries, &si->inuse_pages);
- if (si->cluster_info && vm_swap_full())
- schedule_work(&si->reclaim_work);
+ /*
+ * If device is full, and SWAP_USAGE_OFFLIST_BIT is not set,
+ * remove it from the plist.
+ */
+ if (unlikely(val == si->pages)) {
+ del_from_avail_list(si, false);
+ return true;
}
+
+ return false;
}
-static void add_to_avail_list(struct swap_info_struct *si)
+static void swap_usage_sub(struct swap_info_struct *si, unsigned int nr_entries)
{
- int nid;
+ long val = atomic_long_sub_return_relaxed(nr_entries, &si->inuse_pages);
- spin_lock(&swap_avail_lock);
- for_each_node(nid)
- plist_add(&si->avail_lists[nid], &swap_avail_heads[nid]);
- spin_unlock(&swap_avail_lock);
+ /*
+ * If device is not full, and SWAP_USAGE_OFFLIST_BIT is set,
+ * remove it from the plist.
+ */
+ if (unlikely(val & SWAP_USAGE_OFFLIST_BIT))
+ add_to_avail_list(si, false);
+}
+
+static void swap_range_alloc(struct swap_info_struct *si,
+ unsigned int nr_entries)
+{
+ if (swap_usage_add(si, nr_entries)) {
+ if (vm_swap_full())
+ schedule_work(&si->reclaim_work);
+ }
}
static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
@@ -968,18 +1128,11 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
* Use atomic clear_bit operations only on zeromap instead of non-atomic
* bitmap_clear to prevent adjacent bits corruption due to simultaneous writes.
*/
- for (i = 0; i < nr_entries; i++)
+ for (i = 0; i < nr_entries; i++) {
clear_bit(offset + i, si->zeromap);
-
- if (offset < si->lowest_bit)
- si->lowest_bit = offset;
- if (end > si->highest_bit) {
- bool was_full = !si->highest_bit;
-
- WRITE_ONCE(si->highest_bit, end);
- if (was_full && (si->flags & SWP_WRITEOK))
- add_to_avail_list(si);
+ zswap_invalidate(swp_entry(si->type, offset + i));
}
+
if (si->flags & SWP_BLKDEV)
swap_slot_free_notify =
si->bdev->bd_disk->fops->swap_slot_free_notify;
@@ -999,50 +1152,7 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
*/
smp_wmb();
atomic_long_add(nr_entries, &nr_swap_pages);
- WRITE_ONCE(si->inuse_pages, si->inuse_pages - nr_entries);
-}
-
-static void set_cluster_next(struct swap_info_struct *si, unsigned long next)
-{
- unsigned long prev;
-
- if (!(si->flags & SWP_SOLIDSTATE)) {
- si->cluster_next = next;
- return;
- }
-
- prev = this_cpu_read(*si->cluster_next_cpu);
- /*
- * Cross the swap address space size aligned trunk, choose
- * another trunk randomly to avoid lock contention on swap
- * address space if possible.
- */
- if ((prev >> SWAP_ADDRESS_SPACE_SHIFT) !=
- (next >> SWAP_ADDRESS_SPACE_SHIFT)) {
- /* No free swap slots available */
- if (si->highest_bit <= si->lowest_bit)
- return;
- next = get_random_u32_inclusive(si->lowest_bit, si->highest_bit);
- next = ALIGN_DOWN(next, SWAP_ADDRESS_SPACE_PAGES);
- next = max_t(unsigned int, next, si->lowest_bit);
- }
- this_cpu_write(*si->cluster_next_cpu, next);
-}
-
-static bool swap_offset_available_and_locked(struct swap_info_struct *si,
- unsigned long offset)
-{
- if (data_race(!si->swap_map[offset])) {
- spin_lock(&si->lock);
- return true;
- }
-
- if (vm_swap_full() && READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
- spin_lock(&si->lock);
- return true;
- }
-
- return false;
+ swap_usage_sub(si, nr_entries);
}
static int cluster_alloc_swap(struct swap_info_struct *si,
@@ -1051,10 +1161,6 @@ static int cluster_alloc_swap(struct swap_info_struct *si,
{
int n_ret = 0;
- VM_BUG_ON(!si->cluster_info);
-
- si->flags += SWP_SCANNING;
-
while (n_ret < nr) {
unsigned long offset = cluster_alloc_swap_entry(si, order, usage);
@@ -1063,8 +1169,6 @@ static int cluster_alloc_swap(struct swap_info_struct *si,
slots[n_ret++] = swp_entry(si->type, offset);
}
- si->flags -= SWP_SCANNING;
-
return n_ret;
}
@@ -1072,13 +1176,7 @@ static int scan_swap_map_slots(struct swap_info_struct *si,
unsigned char usage, int nr,
swp_entry_t slots[], int order)
{
- unsigned long offset;
- unsigned long scan_base;
- unsigned long last_in_cluster = 0;
- int latency_ration = LATENCY_LIMIT;
unsigned int nr_pages = 1 << order;
- int n_ret = 0;
- bool scanned_many = false;
/*
* We try to cluster swap pages by allocating them sequentially
@@ -1090,7 +1188,6 @@ static int scan_swap_map_slots(struct swap_info_struct *si,
* But we do now try to find an empty cluster. -Andrea
* And we let swap pages go all over an SSD partition. Hugh
*/
-
if (order > 0) {
/*
* Should not even be attempting large allocations when huge
@@ -1103,165 +1200,30 @@ static int scan_swap_map_slots(struct swap_info_struct *si,
}
/*
- * Swapfile is not block device or not using clusters so unable
+ * Swapfile is not block device so unable
* to allocate large entries.
*/
- if (!(si->flags & SWP_BLKDEV) || !si->cluster_info)
+ if (!(si->flags & SWP_BLKDEV))
return 0;
}
- if (si->cluster_info)
- return cluster_alloc_swap(si, usage, nr, slots, order);
-
- si->flags += SWP_SCANNING;
-
- /* For HDD, sequential access is more important. */
- scan_base = si->cluster_next;
- offset = scan_base;
-
- if (unlikely(!si->cluster_nr--)) {
- if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
- si->cluster_nr = SWAPFILE_CLUSTER - 1;
- goto checks;
- }
-
- spin_unlock(&si->lock);
-
- /*
- * If seek is expensive, start searching for new cluster from
- * start of partition, to minimize the span of allocated swap.
- */
- scan_base = offset = si->lowest_bit;
- last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
-
- /* Locate the first empty (unaligned) cluster */
- for (; last_in_cluster <= READ_ONCE(si->highest_bit); offset++) {
- if (si->swap_map[offset])
- last_in_cluster = offset + SWAPFILE_CLUSTER;
- else if (offset == last_in_cluster) {
- spin_lock(&si->lock);
- offset -= SWAPFILE_CLUSTER - 1;
- si->cluster_next = offset;
- si->cluster_nr = SWAPFILE_CLUSTER - 1;
- goto checks;
- }
- if (unlikely(--latency_ration < 0)) {
- cond_resched();
- latency_ration = LATENCY_LIMIT;
- }
- }
-
- offset = scan_base;
- spin_lock(&si->lock);
- si->cluster_nr = SWAPFILE_CLUSTER - 1;
- }
-
-checks:
- if (!(si->flags & SWP_WRITEOK))
- goto no_page;
- if (!si->highest_bit)
- goto no_page;
- if (offset > si->highest_bit)
- scan_base = offset = si->lowest_bit;
-
- /* reuse swap entry of cache-only swap if not busy. */
- if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
- int swap_was_freed;
- spin_unlock(&si->lock);
- swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT);
- spin_lock(&si->lock);
- /* entry was freed successfully, try to use this again */
- if (swap_was_freed > 0)
- goto checks;
- goto scan; /* check next one */
- }
-
- if (si->swap_map[offset]) {
- if (!n_ret)
- goto scan;
- else
- goto done;
- }
- memset(si->swap_map + offset, usage, nr_pages);
-
- swap_range_alloc(si, offset, nr_pages);
- slots[n_ret++] = swp_entry(si->type, offset);
-
- /* got enough slots or reach max slots? */
- if ((n_ret == nr) || (offset >= si->highest_bit))
- goto done;
-
- /* search for next available slot */
-
- /* time to take a break? */
- if (unlikely(--latency_ration < 0)) {
- if (n_ret)
- goto done;
- spin_unlock(&si->lock);
- cond_resched();
- spin_lock(&si->lock);
- latency_ration = LATENCY_LIMIT;
- }
-
- if (si->cluster_nr && !si->swap_map[++offset]) {
- /* non-ssd case, still more slots in cluster? */
- --si->cluster_nr;
- goto checks;
- }
+ return cluster_alloc_swap(si, usage, nr, slots, order);
+}
+static bool get_swap_device_info(struct swap_info_struct *si)
+{
+ if (!percpu_ref_tryget_live(&si->users))
+ return false;
/*
- * Even if there's no free clusters available (fragmented),
- * try to scan a little more quickly with lock held unless we
- * have scanned too many slots already.
+ * Guarantee the si->users are checked before accessing other
+ * fields of swap_info_struct, and si->flags (SWP_WRITEOK) is
+ * up to dated.
+ *
+ * Paired with the spin_unlock() after setup_swap_info() in
+ * enable_swap_info(), and smp_wmb() in swapoff.
*/
- if (!scanned_many) {
- unsigned long scan_limit;
-
- if (offset < scan_base)
- scan_limit = scan_base;
- else
- scan_limit = si->highest_bit;
- for (; offset <= scan_limit && --latency_ration > 0;
- offset++) {
- if (!si->swap_map[offset])
- goto checks;
- }
- }
-
-done:
- if (order == 0)
- set_cluster_next(si, offset + 1);
- si->flags -= SWP_SCANNING;
- return n_ret;
-
-scan:
- VM_WARN_ON(order > 0);
- spin_unlock(&si->lock);
- while (++offset <= READ_ONCE(si->highest_bit)) {
- if (unlikely(--latency_ration < 0)) {
- cond_resched();
- latency_ration = LATENCY_LIMIT;
- scanned_many = true;
- }
- if (swap_offset_available_and_locked(si, offset))
- goto checks;
- }
- offset = si->lowest_bit;
- while (offset < scan_base) {
- if (unlikely(--latency_ration < 0)) {
- cond_resched();
- latency_ration = LATENCY_LIMIT;
- scanned_many = true;
- }
- if (swap_offset_available_and_locked(si, offset))
- goto checks;
- offset++;
- }
- spin_lock(&si->lock);
-
-no_page:
- si->flags -= SWP_SCANNING;
- return n_ret;
+ smp_rmb();
+ return true;
}
int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order)
@@ -1291,32 +1253,15 @@ start_over:
/* requeue si to after same-priority siblings */
plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]);
spin_unlock(&swap_avail_lock);
- spin_lock(&si->lock);
- if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) {
- spin_lock(&swap_avail_lock);
- if (plist_node_empty(&si->avail_lists[node])) {
- spin_unlock(&si->lock);
- goto nextsi;
- }
- WARN(!si->highest_bit,
- "swap_info %d in list but !highest_bit\n",
- si->type);
- WARN(!(si->flags & SWP_WRITEOK),
- "swap_info %d in list but !SWP_WRITEOK\n",
- si->type);
- __del_from_avail_list(si);
- spin_unlock(&si->lock);
- goto nextsi;
+ if (get_swap_device_info(si)) {
+ n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
+ n_goal, swp_entries, order);
+ put_swap_device(si);
+ if (n_ret || size > 1)
+ goto check_out;
}
- n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
- n_goal, swp_entries, order);
- spin_unlock(&si->lock);
- if (n_ret || size > 1)
- goto check_out;
- cond_resched();
spin_lock(&swap_avail_lock);
-nextsi:
/*
* if we got here, it's likely that si was almost full before,
* and since scan_swap_map_slots() can drop the si->lock,
@@ -1376,22 +1321,6 @@ out:
return NULL;
}
-static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry,
- struct swap_info_struct *q)
-{
- struct swap_info_struct *p;
-
- p = _swap_info_get(entry);
-
- if (p != q) {
- if (q != NULL)
- spin_unlock(&q->lock);
- if (p != NULL)
- spin_lock(&p->lock);
- }
- return p;
-}
-
static unsigned char __swap_entry_free_locked(struct swap_info_struct *si,
unsigned long offset,
unsigned char usage)
@@ -1481,16 +1410,8 @@ struct swap_info_struct *get_swap_device(swp_entry_t entry)
si = swp_swap_info(entry);
if (!si)
goto bad_nofile;
- if (!percpu_ref_tryget_live(&si->users))
+ if (!get_swap_device_info(si))
goto out;
- /*
- * Guarantee the si->users are checked before accessing other
- * fields of swap_info_struct.
- *
- * Paired with the spin_unlock() after setup_swap_info() in
- * enable_swap_info().
- */
- smp_rmb();
offset = swp_offset(entry);
if (offset >= si->max)
goto put_out;
@@ -1513,11 +1434,11 @@ static unsigned char __swap_entry_free(struct swap_info_struct *si,
unsigned long offset = swp_offset(entry);
unsigned char usage;
- ci = lock_cluster_or_swap_info(si, offset);
+ ci = lock_cluster(si, offset);
usage = __swap_entry_free_locked(si, offset, 1);
- unlock_cluster_or_swap_info(si, ci);
if (!usage)
- free_swap_slot(entry);
+ swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1);
+ unlock_cluster(ci);
return usage;
}
@@ -1538,22 +1459,17 @@ static bool __swap_entries_free(struct swap_info_struct *si,
if (nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER)
goto fallback;
- ci = lock_cluster_or_swap_info(si, offset);
+ ci = lock_cluster(si, offset);
if (!swap_is_last_map(si, offset, nr, &has_cache)) {
- unlock_cluster_or_swap_info(si, ci);
+ unlock_cluster(ci);
goto fallback;
}
for (i = 0; i < nr; i++)
WRITE_ONCE(si->swap_map[offset + i], SWAP_HAS_CACHE);
- unlock_cluster_or_swap_info(si, ci);
+ if (!has_cache)
+ swap_entry_range_free(si, ci, entry, nr);
+ unlock_cluster(ci);
- if (!has_cache) {
- for (i = 0; i < nr; i++)
- zswap_invalidate(swp_entry(si->type, offset + i));
- spin_lock(&si->lock);
- swap_entry_range_free(si, entry, nr);
- spin_unlock(&si->lock);
- }
return has_cache;
fallback:
@@ -1573,24 +1489,32 @@ fallback:
* Drop the last HAS_CACHE flag of swap entries, caller have to
* ensure all entries belong to the same cgroup.
*/
-static void swap_entry_range_free(struct swap_info_struct *si, swp_entry_t entry,
- unsigned int nr_pages)
+static void swap_entry_range_free(struct swap_info_struct *si,
+ struct swap_cluster_info *ci,
+ swp_entry_t entry, unsigned int nr_pages)
{
unsigned long offset = swp_offset(entry);
unsigned char *map = si->swap_map + offset;
unsigned char *map_end = map + nr_pages;
- struct swap_cluster_info *ci;
- ci = lock_cluster(si, offset);
+ /* It should never free entries across different clusters */
+ VM_BUG_ON(ci != offset_to_cluster(si, offset + nr_pages - 1));
+ VM_BUG_ON(cluster_is_empty(ci));
+ VM_BUG_ON(ci->count < nr_pages);
+
+ ci->count -= nr_pages;
do {
VM_BUG_ON(*map != SWAP_HAS_CACHE);
*map = 0;
} while (++map < map_end);
- dec_cluster_info_page(si, ci, nr_pages);
- unlock_cluster(ci);
mem_cgroup_uncharge_swap(entry, nr_pages);
swap_range_free(si, offset, nr_pages);
+
+ if (!ci->count)
+ free_cluster(si, ci);
+ else
+ partial_free_cluster(si, ci);
}
static void cluster_swap_free_nr(struct swap_info_struct *si,
@@ -1598,29 +1522,14 @@ static void cluster_swap_free_nr(struct swap_info_struct *si,
unsigned char usage)
{
struct swap_cluster_info *ci;
- DECLARE_BITMAP(to_free, BITS_PER_LONG) = { 0 };
- int i, nr;
+ unsigned long end = offset + nr_pages;
- ci = lock_cluster_or_swap_info(si, offset);
- while (nr_pages) {
- nr = min(BITS_PER_LONG, nr_pages);
- for (i = 0; i < nr; i++) {
- if (!__swap_entry_free_locked(si, offset + i, usage))
- bitmap_set(to_free, i, 1);
- }
- if (!bitmap_empty(to_free, BITS_PER_LONG)) {
- unlock_cluster_or_swap_info(si, ci);
- for_each_set_bit(i, to_free, BITS_PER_LONG)
- free_swap_slot(swp_entry(si->type, offset + i));
- if (nr == nr_pages)
- return;
- bitmap_clear(to_free, 0, BITS_PER_LONG);
- ci = lock_cluster_or_swap_info(si, offset);
- }
- offset += nr;
- nr_pages -= nr;
- }
- unlock_cluster_or_swap_info(si, ci);
+ ci = lock_cluster(si, offset);
+ do {
+ if (!__swap_entry_free_locked(si, offset, usage))
+ swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1);
+ } while (++offset < end);
+ unlock_cluster(ci);
}
/*
@@ -1659,59 +1568,35 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry)
if (!si)
return;
- ci = lock_cluster_or_swap_info(si, offset);
- if (size > 1 && swap_is_has_cache(si, offset, size)) {
- unlock_cluster_or_swap_info(si, ci);
- spin_lock(&si->lock);
- swap_entry_range_free(si, entry, size);
- spin_unlock(&si->lock);
- return;
- }
- for (int i = 0; i < size; i++, entry.val++) {
- if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) {
- unlock_cluster_or_swap_info(si, ci);
- free_swap_slot(entry);
- if (i == size - 1)
- return;
- lock_cluster_or_swap_info(si, offset);
+ ci = lock_cluster(si, offset);
+ if (swap_is_has_cache(si, offset, size))
+ swap_entry_range_free(si, ci, entry, size);
+ else {
+ for (int i = 0; i < size; i++, entry.val++) {
+ if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE))
+ swap_entry_range_free(si, ci, entry, 1);
}
}
- unlock_cluster_or_swap_info(si, ci);
-}
-
-static int swp_entry_cmp(const void *ent1, const void *ent2)
-{
- const swp_entry_t *e1 = ent1, *e2 = ent2;
-
- return (int)swp_type(*e1) - (int)swp_type(*e2);
+ unlock_cluster(ci);
}
void swapcache_free_entries(swp_entry_t *entries, int n)
{
- struct swap_info_struct *p, *prev;
int i;
+ struct swap_cluster_info *ci;
+ struct swap_info_struct *si = NULL;
if (n <= 0)
return;
- prev = NULL;
- p = NULL;
-
- /*
- * Sort swap entries by swap device, so each lock is only taken once.
- * nr_swapfiles isn't absolutely correct, but the overhead of sort() is
- * so low that it isn't necessary to optimize further.
- */
- if (nr_swapfiles > 1)
- sort(entries, n, sizeof(entries[0]), swp_entry_cmp, NULL);
for (i = 0; i < n; ++i) {
- p = swap_info_get_cont(entries[i], prev);
- if (p)
- swap_entry_range_free(p, entries[i], 1);
- prev = p;
+ si = _swap_info_get(entries[i]);
+ if (si) {
+ ci = lock_cluster(si, swp_offset(entries[i]));
+ swap_entry_range_free(si, ci, entries[i], 1);
+ unlock_cluster(ci);
+ }
}
- if (p)
- spin_unlock(&p->lock);
}
int __swap_count(swp_entry_t entry)
@@ -1733,9 +1618,9 @@ int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
struct swap_cluster_info *ci;
int count;
- ci = lock_cluster_or_swap_info(si, offset);
+ ci = lock_cluster(si, offset);
count = swap_count(si->swap_map[offset]);
- unlock_cluster_or_swap_info(si, ci);
+ unlock_cluster(ci);
return count;
}
@@ -1758,7 +1643,7 @@ int swp_swapcount(swp_entry_t entry)
offset = swp_offset(entry);
- ci = lock_cluster_or_swap_info(si, offset);
+ ci = lock_cluster(si, offset);
count = swap_count(si->swap_map[offset]);
if (!(count & COUNT_CONTINUED))
@@ -1781,7 +1666,7 @@ int swp_swapcount(swp_entry_t entry)
n *= (SWAP_CONT_MAX + 1);
} while (tmp_count & COUNT_CONTINUED);
out:
- unlock_cluster_or_swap_info(si, ci);
+ unlock_cluster(ci);
return count;
}
@@ -1796,8 +1681,8 @@ static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
int i;
bool ret = false;
- ci = lock_cluster_or_swap_info(si, offset);
- if (!ci || nr_pages == 1) {
+ ci = lock_cluster(si, offset);
+ if (nr_pages == 1) {
if (swap_count(map[roffset]))
ret = true;
goto unlock_out;
@@ -1809,7 +1694,7 @@ static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
}
}
unlock_out:
- unlock_cluster_or_swap_info(si, ci);
+ unlock_cluster(ci);
return ret;
}
@@ -1963,10 +1848,11 @@ swp_entry_t get_swap_page_of_type(int type)
goto fail;
/* This is called for allocating swap entry, not cache */
- spin_lock(&si->lock);
- if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry, 0))
- atomic_long_dec(&nr_swap_pages);
- spin_unlock(&si->lock);
+ if (get_swap_device_info(si)) {
+ if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry, 0))
+ atomic_long_dec(&nr_swap_pages);
+ put_swap_device(si);
+ }
fail:
return entry;
}
@@ -2057,7 +1943,7 @@ unsigned int count_swap_pages(int type, int free)
if (sis->flags & SWP_WRITEOK) {
n = sis->pages;
if (free)
- n -= sis->inuse_pages;
+ n -= swap_usage_in_pages(sis);
}
spin_unlock(&sis->lock);
}
@@ -2392,7 +2278,7 @@ static int try_to_unuse(unsigned int type)
swp_entry_t entry;
unsigned int i;
- if (!READ_ONCE(si->inuse_pages))
+ if (!swap_usage_in_pages(si))
goto success;
retry:
@@ -2405,7 +2291,7 @@ retry:
spin_lock(&mmlist_lock);
p = &init_mm.mmlist;
- while (READ_ONCE(si->inuse_pages) &&
+ while (swap_usage_in_pages(si) &&
!signal_pending(current) &&
(p = p->next) != &init_mm.mmlist) {
@@ -2433,7 +2319,7 @@ retry:
mmput(prev_mm);
i = 0;
- while (READ_ONCE(si->inuse_pages) &&
+ while (swap_usage_in_pages(si) &&
!signal_pending(current) &&
(i = find_next_to_unuse(si, i)) != 0) {
@@ -2468,7 +2354,7 @@ retry:
* folio_alloc_swap(), temporarily hiding that swap. It's easy
* and robust (though cpu-intensive) just to keep retrying.
*/
- if (READ_ONCE(si->inuse_pages)) {
+ if (swap_usage_in_pages(si)) {
if (!signal_pending(current))
goto retry;
return -EINTR;
@@ -2495,7 +2381,7 @@ static void drain_mmlist(void)
unsigned int type;
for (type = 0; type < nr_swapfiles; type++)
- if (swap_info[type]->inuse_pages)
+ if (swap_usage_in_pages(swap_info[type]))
return;
spin_lock(&mmlist_lock);
list_for_each_safe(p, next, &init_mm.mmlist)
@@ -2674,7 +2560,6 @@ static void setup_swap_info(struct swap_info_struct *si, int prio,
static void _enable_swap_info(struct swap_info_struct *si)
{
- si->flags |= SWP_WRITEOK;
atomic_long_add(si->pages, &nr_swap_pages);
total_swap_pages += si->pages;
@@ -2691,9 +2576,8 @@ static void _enable_swap_info(struct swap_info_struct *si)
*/
plist_add(&si->list, &swap_active_head);
- /* add to available list iff swap device is not full */
- if (si->highest_bit)
- add_to_avail_list(si);
+ /* Add back to available list */
+ add_to_avail_list(si, true);
}
static void enable_swap_info(struct swap_info_struct *si, int prio,
@@ -2742,6 +2626,25 @@ bool has_usable_swap(void)
return ret;
}
+/*
+ * Called after clearing SWP_WRITEOK, ensures cluster_alloc_range
+ * see the updated flags, so there will be no more allocations.
+ */
+static void wait_for_allocation(struct swap_info_struct *si)
+{
+ unsigned long offset;
+ unsigned long end = ALIGN(si->max, SWAPFILE_CLUSTER);
+ struct swap_cluster_info *ci;
+
+ BUG_ON(si->flags & SWP_WRITEOK);
+
+ for (offset = 0; offset < end; offset += SWAPFILE_CLUSTER) {
+ ci = lock_cluster(si, offset);
+ unlock_cluster(ci);
+ offset += SWAPFILE_CLUSTER;
+ }
+}
+
SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
{
struct swap_info_struct *p = NULL;
@@ -2791,7 +2694,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
goto out_dput;
}
spin_lock(&p->lock);
- del_from_avail_list(p);
+ del_from_avail_list(p, true);
if (p->prio < 0) {
struct swap_info_struct *si = p;
int nid;
@@ -2809,10 +2712,11 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
plist_del(&p->list, &swap_active_head);
atomic_long_sub(p->pages, &nr_swap_pages);
total_swap_pages -= p->pages;
- p->flags &= ~SWP_WRITEOK;
spin_unlock(&p->lock);
spin_unlock(&swap_lock);
+ wait_for_allocation(p);
+
disable_swap_slots_cache_lock();
set_current_oom_origin();
@@ -2855,16 +2759,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
spin_lock(&p->lock);
drain_mmlist();
- /* wait for anyone still in scan_swap_map_slots */
- p->highest_bit = 0; /* cuts scans short */
- while (p->flags >= SWP_SCANNING) {
- spin_unlock(&p->lock);
- spin_unlock(&swap_lock);
- schedule_timeout_uninterruptible(1);
- spin_lock(&swap_lock);
- spin_lock(&p->lock);
- }
-
swap_file = p->swap_file;
p->swap_file = NULL;
p->max = 0;
@@ -2881,8 +2775,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
mutex_unlock(&swapon_mutex);
free_percpu(p->percpu_cluster);
p->percpu_cluster = NULL;
- free_percpu(p->cluster_next_cpu);
- p->cluster_next_cpu = NULL;
+ kfree(p->global_cluster);
+ p->global_cluster = NULL;
vfree(swap_map);
kvfree(zeromap);
kvfree(cluster_info);
@@ -2992,7 +2886,7 @@ static int swap_show(struct seq_file *swap, void *v)
}
bytes = K(si->pages);
- inuse = K(READ_ONCE(si->inuse_pages));
+ inuse = K(swap_usage_in_pages(si));
file = si->swap_file;
len = seq_file_path(swap, file, " \t\n\\");
@@ -3109,6 +3003,7 @@ static struct swap_info_struct *alloc_swap_info(void)
}
spin_lock_init(&p->lock);
spin_lock_init(&p->cont_lock);
+ atomic_long_set(&p->inuse_pages, SWAP_USAGE_OFFLIST_BIT);
init_completion(&p->comp);
return p;
@@ -3193,10 +3088,6 @@ static unsigned long read_swap_header(struct swap_info_struct *si,
return 0;
}
- si->lowest_bit = 1;
- si->cluster_next = 1;
- si->cluster_nr = 0;
-
maxpages = swapfile_maximum_size;
last_page = swap_header->info.last_page;
if (!last_page) {
@@ -3213,7 +3104,6 @@ static unsigned long read_swap_header(struct swap_info_struct *si,
if ((unsigned int)maxpages == 0)
maxpages = UINT_MAX;
}
- si->highest_bit = maxpages - 1;
if (!maxpages)
return 0;
@@ -3281,7 +3171,6 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si,
unsigned long maxpages)
{
unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
- unsigned long col = si->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS;
struct swap_cluster_info *cluster_info;
unsigned long i, j, k, idx;
int cpu, err = -ENOMEM;
@@ -3293,25 +3182,27 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si,
for (i = 0; i < nr_clusters; i++)
spin_lock_init(&cluster_info[i].lock);
- si->cluster_next_cpu = alloc_percpu(unsigned int);
- if (!si->cluster_next_cpu)
- goto err_free;
-
- /* Random start position to help with wear leveling */
- for_each_possible_cpu(cpu)
- per_cpu(*si->cluster_next_cpu, cpu) =
- get_random_u32_inclusive(1, si->highest_bit);
+ if (si->flags & SWP_SOLIDSTATE) {
+ si->percpu_cluster = alloc_percpu(struct percpu_cluster);
+ if (!si->percpu_cluster)
+ goto err_free;
- si->percpu_cluster = alloc_percpu(struct percpu_cluster);
- if (!si->percpu_cluster)
- goto err_free;
+ for_each_possible_cpu(cpu) {
+ struct percpu_cluster *cluster;
- for_each_possible_cpu(cpu) {
- struct percpu_cluster *cluster;
-
- cluster = per_cpu_ptr(si->percpu_cluster, cpu);
+ cluster = per_cpu_ptr(si->percpu_cluster, cpu);
+ for (i = 0; i < SWAP_NR_ORDERS; i++)
+ cluster->next[i] = SWAP_ENTRY_INVALID;
+ local_lock_init(&cluster->lock);
+ }
+ } else {
+ si->global_cluster = kmalloc(sizeof(*si->global_cluster),
+ GFP_KERNEL);
+ if (!si->global_cluster)
+ goto err_free;
for (i = 0; i < SWAP_NR_ORDERS; i++)
- cluster->next[i] = SWAP_NEXT_INVALID;
+ si->global_cluster->next[i] = SWAP_ENTRY_INVALID;
+ spin_lock_init(&si->global_cluster_lock);
}
/*
@@ -3335,7 +3226,7 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si,
for (i = 0; i < SWAP_NR_ORDERS; i++) {
INIT_LIST_HEAD(&si->nonfull_clusters[i]);
INIT_LIST_HEAD(&si->frag_clusters[i]);
- si->frag_cluster_nr[i] = 0;
+ atomic_long_set(&si->frag_cluster_nr[i], 0);
}
/*
@@ -3343,7 +3234,7 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si,
* sharing same address space.
*/
for (k = 0; k < SWAP_CLUSTER_COLS; k++) {
- j = (k + col) % SWAP_CLUSTER_COLS;
+ j = k % SWAP_CLUSTER_COLS;
for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) {
struct swap_cluster_info *ci;
idx = i * SWAP_CLUSTER_COLS + j;
@@ -3493,18 +3384,18 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
if (si->bdev && bdev_nonrot(si->bdev)) {
si->flags |= SWP_SOLIDSTATE;
-
- cluster_info = setup_clusters(si, swap_header, maxpages);
- if (IS_ERR(cluster_info)) {
- error = PTR_ERR(cluster_info);
- cluster_info = NULL;
- goto bad_swap_unlock_inode;
- }
} else {
atomic_inc(&nr_rotate_swap);
inced_nr_rotate_swap = true;
}
+ cluster_info = setup_clusters(si, swap_header, maxpages);
+ if (IS_ERR(cluster_info)) {
+ error = PTR_ERR(cluster_info);
+ cluster_info = NULL;
+ goto bad_swap_unlock_inode;
+ }
+
if ((swap_flags & SWAP_FLAG_DISCARD) &&
si->bdev && bdev_max_discard_sectors(si->bdev)) {
/*
@@ -3585,8 +3476,8 @@ bad_swap_unlock_inode:
bad_swap:
free_percpu(si->percpu_cluster);
si->percpu_cluster = NULL;
- free_percpu(si->cluster_next_cpu);
- si->cluster_next_cpu = NULL;
+ kfree(si->global_cluster);
+ si->global_cluster = NULL;
inode = NULL;
destroy_swap_extents(si);
swap_cgroup_swapoff(si->type);
@@ -3623,7 +3514,7 @@ void si_swapinfo(struct sysinfo *val)
struct swap_info_struct *si = swap_info[type];
if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
- nr_to_be_unused += READ_ONCE(si->inuse_pages);
+ nr_to_be_unused += swap_usage_in_pages(si);
}
val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
val->totalswap = total_swap_pages + nr_to_be_unused;
@@ -3655,7 +3546,7 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage, int nr)
offset = swp_offset(entry);
VM_WARN_ON(nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER);
VM_WARN_ON(usage == 1 && nr > 1);
- ci = lock_cluster_or_swap_info(si, offset);
+ ci = lock_cluster(si, offset);
err = 0;
for (i = 0; i < nr; i++) {
@@ -3710,7 +3601,7 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage, int nr)
}
unlock_out:
- unlock_cluster_or_swap_info(si, ci);
+ unlock_cluster(ci);
return err;
}
@@ -3819,7 +3710,6 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
*/
goto outer;
}
- spin_lock(&si->lock);
offset = swp_offset(entry);
@@ -3884,7 +3774,6 @@ out_unlock_cont:
spin_unlock(&si->cont_lock);
out:
unlock_cluster(ci);
- spin_unlock(&si->lock);
put_swap_device(si);
outer:
if (page)
diff --git a/mm/truncate.c b/mm/truncate.c
index 7c304d2f0052..e2e115adfbc5 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -525,6 +525,15 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
}
EXPORT_SYMBOL(invalidate_mapping_pages);
+static int folio_launder(struct address_space *mapping, struct folio *folio)
+{
+ if (!folio_test_dirty(folio))
+ return 0;
+ if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
+ return 0;
+ return mapping->a_ops->launder_folio(folio);
+}
+
/*
* This is like mapping_evict_folio(), except it ignores the folio's
* refcount. We do this because invalidate_inode_pages2() needs stronger
@@ -532,14 +541,26 @@ EXPORT_SYMBOL(invalidate_mapping_pages);
* shrink_folio_list() has a temp ref on them, or because they're transiently
* sitting in the folio_add_lru() caches.
*/
-static int invalidate_complete_folio2(struct address_space *mapping,
- struct folio *folio)
+int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
+ gfp_t gfp)
{
- if (folio->mapping != mapping)
- return 0;
+ int ret;
+
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
- if (!filemap_release_folio(folio, GFP_KERNEL))
+ if (folio_test_dirty(folio))
return 0;
+ if (folio_mapped(folio))
+ unmap_mapping_folio(folio);
+ BUG_ON(folio_mapped(folio));
+
+ ret = folio_launder(mapping, folio);
+ if (ret)
+ return ret;
+ if (folio->mapping != mapping)
+ return -EBUSY;
+ if (!filemap_release_folio(folio, gfp))
+ return -EBUSY;
spin_lock(&mapping->host->i_lock);
xa_lock_irq(&mapping->i_pages);
@@ -558,16 +579,7 @@ static int invalidate_complete_folio2(struct address_space *mapping,
failed:
xa_unlock_irq(&mapping->i_pages);
spin_unlock(&mapping->host->i_lock);
- return 0;
-}
-
-static int folio_launder(struct address_space *mapping, struct folio *folio)
-{
- if (!folio_test_dirty(folio))
- return 0;
- if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
- return 0;
- return mapping->a_ops->launder_folio(folio);
+ return -EBUSY;
}
/**
@@ -631,16 +643,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
}
VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
folio_wait_writeback(folio);
-
- if (folio_mapped(folio))
- unmap_mapping_folio(folio);
- BUG_ON(folio_mapped(folio));
-
- ret2 = folio_launder(mapping, folio);
- if (ret2 == 0) {
- if (!invalidate_complete_folio2(mapping, folio))
- ret2 = -EBUSY;
- }
+ ret2 = folio_unmap_invalidate(mapping, folio, GFP_KERNEL);
if (ret2 < 0)
ret = ret2;
folio_unlock(folio);
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 60a0be33766f..af3dfc3633db 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -1020,6 +1020,14 @@ void double_pt_unlock(spinlock_t *ptl1,
__release(ptl2);
}
+static inline bool is_pte_pages_stable(pte_t *dst_pte, pte_t *src_pte,
+ pte_t orig_dst_pte, pte_t orig_src_pte,
+ pmd_t *dst_pmd, pmd_t dst_pmdval)
+{
+ return pte_same(ptep_get(src_pte), orig_src_pte) &&
+ pte_same(ptep_get(dst_pte), orig_dst_pte) &&
+ pmd_same(dst_pmdval, pmdp_get_lockless(dst_pmd));
+}
static int move_present_pte(struct mm_struct *mm,
struct vm_area_struct *dst_vma,
@@ -1027,6 +1035,7 @@ static int move_present_pte(struct mm_struct *mm,
unsigned long dst_addr, unsigned long src_addr,
pte_t *dst_pte, pte_t *src_pte,
pte_t orig_dst_pte, pte_t orig_src_pte,
+ pmd_t *dst_pmd, pmd_t dst_pmdval,
spinlock_t *dst_ptl, spinlock_t *src_ptl,
struct folio *src_folio)
{
@@ -1034,8 +1043,8 @@ static int move_present_pte(struct mm_struct *mm,
double_pt_lock(dst_ptl, src_ptl);
- if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
- !pte_same(ptep_get(dst_pte), orig_dst_pte)) {
+ if (!is_pte_pages_stable(dst_pte, src_pte, orig_dst_pte, orig_src_pte,
+ dst_pmd, dst_pmdval)) {
err = -EAGAIN;
goto out;
}
@@ -1071,6 +1080,7 @@ static int move_swap_pte(struct mm_struct *mm,
unsigned long dst_addr, unsigned long src_addr,
pte_t *dst_pte, pte_t *src_pte,
pte_t orig_dst_pte, pte_t orig_src_pte,
+ pmd_t *dst_pmd, pmd_t dst_pmdval,
spinlock_t *dst_ptl, spinlock_t *src_ptl)
{
if (!pte_swp_exclusive(orig_src_pte))
@@ -1078,8 +1088,8 @@ static int move_swap_pte(struct mm_struct *mm,
double_pt_lock(dst_ptl, src_ptl);
- if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
- !pte_same(ptep_get(dst_pte), orig_dst_pte)) {
+ if (!is_pte_pages_stable(dst_pte, src_pte, orig_dst_pte, orig_src_pte,
+ dst_pmd, dst_pmdval)) {
double_pt_unlock(dst_ptl, src_ptl);
return -EAGAIN;
}
@@ -1097,13 +1107,14 @@ static int move_zeropage_pte(struct mm_struct *mm,
unsigned long dst_addr, unsigned long src_addr,
pte_t *dst_pte, pte_t *src_pte,
pte_t orig_dst_pte, pte_t orig_src_pte,
+ pmd_t *dst_pmd, pmd_t dst_pmdval,
spinlock_t *dst_ptl, spinlock_t *src_ptl)
{
pte_t zero_pte;
double_pt_lock(dst_ptl, src_ptl);
- if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
- !pte_same(ptep_get(dst_pte), orig_dst_pte)) {
+ if (!is_pte_pages_stable(dst_pte, src_pte, orig_dst_pte, orig_src_pte,
+ dst_pmd, dst_pmdval)) {
double_pt_unlock(dst_ptl, src_ptl);
return -EAGAIN;
}
@@ -1136,6 +1147,7 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd,
pte_t *src_pte = NULL;
pte_t *dst_pte = NULL;
pmd_t dummy_pmdval;
+ pmd_t dst_pmdval;
struct folio *src_folio = NULL;
struct anon_vma *src_anon_vma = NULL;
struct mmu_notifier_range range;
@@ -1148,11 +1160,11 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd,
retry:
/*
* Use the maywrite version to indicate that dst_pte will be modified,
- * but since we will use pte_same() to detect the change of the pte
- * entry, there is no need to get pmdval, so just pass a dummy variable
- * to it.
+ * since dst_pte needs to be none, the subsequent pte_same() check
+ * cannot prevent the dst_pte page from being freed concurrently, so we
+ * also need to abtain dst_pmdval and recheck pmd_same() later.
*/
- dst_pte = pte_offset_map_rw_nolock(mm, dst_pmd, dst_addr, &dummy_pmdval,
+ dst_pte = pte_offset_map_rw_nolock(mm, dst_pmd, dst_addr, &dst_pmdval,
&dst_ptl);
/* Retry if a huge pmd materialized from under us */
@@ -1161,7 +1173,11 @@ retry:
goto out;
}
- /* same as dst_pte */
+ /*
+ * Unlike dst_pte, the subsequent pte_same() check can ensure the
+ * stability of the src_pte page, so there is no need to get pmdval,
+ * just pass a dummy variable to it.
+ */
src_pte = pte_offset_map_rw_nolock(mm, src_pmd, src_addr, &dummy_pmdval,
&src_ptl);
@@ -1177,8 +1193,8 @@ retry:
}
/* Sanity checks before the operation */
- if (WARN_ON_ONCE(pmd_none(*dst_pmd)) || WARN_ON_ONCE(pmd_none(*src_pmd)) ||
- WARN_ON_ONCE(pmd_trans_huge(*dst_pmd)) || WARN_ON_ONCE(pmd_trans_huge(*src_pmd))) {
+ if (pmd_none(*dst_pmd) || pmd_none(*src_pmd) ||
+ pmd_trans_huge(*dst_pmd) || pmd_trans_huge(*src_pmd)) {
err = -EINVAL;
goto out;
}
@@ -1213,7 +1229,7 @@ retry:
err = move_zeropage_pte(mm, dst_vma, src_vma,
dst_addr, src_addr, dst_pte, src_pte,
orig_dst_pte, orig_src_pte,
- dst_ptl, src_ptl);
+ dst_pmd, dst_pmdval, dst_ptl, src_ptl);
goto out;
}
@@ -1303,8 +1319,8 @@ retry:
err = move_present_pte(mm, dst_vma, src_vma,
dst_addr, src_addr, dst_pte, src_pte,
- orig_dst_pte, orig_src_pte,
- dst_ptl, src_ptl, src_folio);
+ orig_dst_pte, orig_src_pte, dst_pmd,
+ dst_pmdval, dst_ptl, src_ptl, src_folio);
} else {
entry = pte_to_swp_entry(orig_src_pte);
if (non_swap_entry(entry)) {
@@ -1319,10 +1335,9 @@ retry:
goto out;
}
- err = move_swap_pte(mm, dst_addr, src_addr,
- dst_pte, src_pte,
- orig_dst_pte, orig_src_pte,
- dst_ptl, src_ptl);
+ err = move_swap_pte(mm, dst_addr, src_addr, dst_pte, src_pte,
+ orig_dst_pte, orig_src_pte, dst_pmd,
+ dst_pmdval, dst_ptl, src_ptl);
}
out:
diff --git a/mm/util.c b/mm/util.c
index 60aa40f612b8..b6b9684a1438 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -582,6 +582,23 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
return ret;
}
+/*
+ * Perform a userland memory mapping into the current process address space. See
+ * the comment for do_mmap() for more details on this operation in general.
+ *
+ * This differs from do_mmap() in that:
+ *
+ * a. An offset parameter is provided rather than pgoff, which is both checked
+ * for overflow and page alignment.
+ * b. mmap locking is performed on the caller's behalf.
+ * c. Userfaultfd unmap events and memory population are handled.
+ *
+ * This means that this function performs essentially the same work as if
+ * userland were invoking mmap (2).
+ *
+ * Returns either an error, or the address at which the requested mapping has
+ * been performed.
+ */
unsigned long vm_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long offset)
diff --git a/mm/vma.c b/mm/vma.c
index bb2119e5a0d0..af1d549b179c 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -203,6 +203,38 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma,
}
/*
+ * vma has some anon_vma assigned, and is already inserted on that
+ * anon_vma's interval trees.
+ *
+ * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
+ * vma must be removed from the anon_vma's interval trees using
+ * anon_vma_interval_tree_pre_update_vma().
+ *
+ * After the update, the vma will be reinserted using
+ * anon_vma_interval_tree_post_update_vma().
+ *
+ * The entire update must be protected by exclusive mmap_lock and by
+ * the root anon_vma's mutex.
+ */
+static void
+anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
+{
+ struct anon_vma_chain *avc;
+
+ list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
+ anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
+}
+
+static void
+anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
+{
+ struct anon_vma_chain *avc;
+
+ list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
+ anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
+}
+
+/*
* vma_prepare() - Helper function for handling locking VMAs prior to altering
* @vp: The initialized vma_prepare struct
*/
@@ -398,7 +430,6 @@ void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm;
struct mmu_gather tlb;
- lru_add_drain();
tlb_gather_mmu(&tlb, mm);
update_hiwater_rss(mm);
unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end,
@@ -415,8 +446,9 @@ void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
* has already been checked or doesn't make sense to fail.
* VMA Iterator will point to the original VMA.
*/
-static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long addr, int new_below)
+static __must_check int
+__split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ unsigned long addr, int new_below)
{
struct vma_prepare vp;
struct vm_area_struct *new;
@@ -511,38 +543,6 @@ static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
}
/*
- * vma has some anon_vma assigned, and is already inserted on that
- * anon_vma's interval trees.
- *
- * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
- * vma must be removed from the anon_vma's interval trees using
- * anon_vma_interval_tree_pre_update_vma().
- *
- * After the update, the vma will be reinserted using
- * anon_vma_interval_tree_post_update_vma().
- *
- * The entire update must be protected by exclusive mmap_lock and by
- * the root anon_vma's mutex.
- */
-void
-anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
-{
- struct anon_vma_chain *avc;
-
- list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
- anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
-}
-
-void
-anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
-{
- struct anon_vma_chain *avc;
-
- list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
- anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
-}
-
-/*
* dup_anon_vma() - Helper function to duplicate anon_vma
* @dst: The destination VMA
* @src: The source VMA
@@ -710,7 +710,8 @@ static bool can_merge_remove_vma(struct vm_area_struct *vma)
* - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
* - vmi must be positioned within [@vmg->vma->vm_start, @vmg->vma->vm_end).
*/
-static struct vm_area_struct *vma_merge_existing_range(struct vma_merge_struct *vmg)
+static __must_check struct vm_area_struct *vma_merge_existing_range(
+ struct vma_merge_struct *vmg)
{
struct vm_area_struct *vma = vmg->vma;
struct vm_area_struct *prev = vmg->prev;
@@ -728,19 +729,20 @@ static struct vm_area_struct *vma_merge_existing_range(struct vma_merge_struct *
bool expanded;
mmap_assert_write_locked(vmg->mm);
- VM_WARN_ON(!vma); /* We are modifying a VMA, so caller must specify. */
- VM_WARN_ON(vmg->next); /* We set this. */
- VM_WARN_ON(prev && start <= prev->vm_start);
- VM_WARN_ON(start >= end);
+ VM_WARN_ON_VMG(!vma, vmg); /* We are modifying a VMA, so caller must specify. */
+ VM_WARN_ON_VMG(vmg->next, vmg); /* We set this. */
+ VM_WARN_ON_VMG(prev && start <= prev->vm_start, vmg);
+ VM_WARN_ON_VMG(start >= end, vmg);
+
/*
* If vma == prev, then we are offset into a VMA. Otherwise, if we are
* not, we must span a portion of the VMA.
*/
- VM_WARN_ON(vma && ((vma != prev && vmg->start != vma->vm_start) ||
- vmg->end > vma->vm_end));
+ VM_WARN_ON_VMG(vma && ((vma != prev && vmg->start != vma->vm_start) ||
+ vmg->end > vma->vm_end), vmg);
/* The vmi must be positioned within vmg->vma. */
- VM_WARN_ON(vma && !(vma_iter_addr(vmg->vmi) >= vma->vm_start &&
- vma_iter_addr(vmg->vmi) < vma->vm_end));
+ VM_WARN_ON_VMG(vma && !(vma_iter_addr(vmg->vmi) >= vma->vm_start &&
+ vma_iter_addr(vmg->vmi) < vma->vm_end), vmg);
vmg->state = VMA_MERGE_NOMERGE;
@@ -857,9 +859,9 @@ static struct vm_area_struct *vma_merge_existing_range(struct vma_merge_struct *
pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
- VM_WARN_ON(!merge_right);
+ VM_WARN_ON_VMG(!merge_right, vmg);
/* If we are offset into a VMA, then prev must be vma. */
- VM_WARN_ON(vmg->start > vma->vm_start && prev && vma != prev);
+ VM_WARN_ON_VMG(vmg->start > vma->vm_start && prev && vma != prev, vmg);
if (merge_will_delete_vma) {
vmg->vma = next;
@@ -971,9 +973,9 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
bool just_expand = vmg->merge_flags & VMG_FLAG_JUST_EXPAND;
mmap_assert_write_locked(vmg->mm);
- VM_WARN_ON(vmg->vma);
+ VM_WARN_ON_VMG(vmg->vma, vmg);
/* vmi must point at or before the gap. */
- VM_WARN_ON(vma_iter_addr(vmg->vmi) > end);
+ VM_WARN_ON_VMG(vma_iter_addr(vmg->vmi) > end, vmg);
vmg->state = VMA_MERGE_NOMERGE;
@@ -1055,7 +1057,7 @@ int vma_expand(struct vma_merge_struct *vmg)
remove_next = true;
/* This should already have been checked by this point. */
- VM_WARN_ON(!can_merge_remove_vma(next));
+ VM_WARN_ON_VMG(!can_merge_remove_vma(next), vmg);
vma_start_write(next);
ret = dup_anon_vma(vma, next, &anon_dup);
if (ret)
@@ -1063,10 +1065,10 @@ int vma_expand(struct vma_merge_struct *vmg)
}
/* Not merging but overwriting any part of next is not handled. */
- VM_WARN_ON(next && !remove_next &&
- next != vma && vmg->end > next->vm_start);
+ VM_WARN_ON_VMG(next && !remove_next &&
+ next != vma && vmg->end > next->vm_start, vmg);
/* Only handles expanding */
- VM_WARN_ON(vma->vm_start < vmg->start || vma->vm_end > vmg->end);
+ VM_WARN_ON_VMG(vma->vm_start < vmg->start || vma->vm_end > vmg->end, vmg);
if (commit_merge(vmg, NULL, remove_next ? next : NULL, NULL, 0, true))
goto nomem;
@@ -1130,7 +1132,6 @@ static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
* were isolated before we downgraded mmap_lock.
*/
mas_set(mas_detach, 1);
- lru_add_drain();
tlb_gather_mmu(&tlb, vms->vma->vm_mm);
update_hiwater_rss(vms->vma->vm_mm);
unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end,
@@ -2430,7 +2431,7 @@ static void __mmap_complete(struct mmap_state *map, struct vm_area_struct *vma)
vma_set_page_prot(vma);
}
-unsigned long __mmap_region(struct file *file, unsigned long addr,
+static unsigned long __mmap_region(struct file *file, unsigned long addr,
unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
struct list_head *uf)
{
@@ -2481,3 +2482,476 @@ abort_munmap:
vms_abort_munmap_vmas(&map.vms, &map.mas_detach);
return error;
}
+
+/**
+ * mmap_region() - Actually perform the userland mapping of a VMA into
+ * current->mm with known, aligned and overflow-checked @addr and @len, and
+ * correctly determined VMA flags @vm_flags and page offset @pgoff.
+ *
+ * This is an internal memory management function, and should not be used
+ * directly.
+ *
+ * The caller must write-lock current->mm->mmap_lock.
+ *
+ * @file: If a file-backed mapping, a pointer to the struct file describing the
+ * file to be mapped, otherwise NULL.
+ * @addr: The page-aligned address at which to perform the mapping.
+ * @len: The page-aligned, non-zero, length of the mapping.
+ * @vm_flags: The VMA flags which should be applied to the mapping.
+ * @pgoff: If @file is specified, the page offset into the file, if not then
+ * the virtual page offset in memory of the anonymous mapping.
+ * @uf: Optionally, a pointer to a list head used for tracking userfaultfd unmap
+ * events.
+ *
+ * Returns: Either an error, or the address at which the requested mapping has
+ * been performed.
+ */
+unsigned long mmap_region(struct file *file, unsigned long addr,
+ unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
+ struct list_head *uf)
+{
+ unsigned long ret;
+ bool writable_file_mapping = false;
+
+ mmap_assert_write_locked(current->mm);
+
+ /* Check to see if MDWE is applicable. */
+ if (map_deny_write_exec(vm_flags, vm_flags))
+ return -EACCES;
+
+ /* Allow architectures to sanity-check the vm_flags. */
+ if (!arch_validate_flags(vm_flags))
+ return -EINVAL;
+
+ /* Map writable and ensure this isn't a sealed memfd. */
+ if (file && is_shared_maywrite(vm_flags)) {
+ int error = mapping_map_writable(file->f_mapping);
+
+ if (error)
+ return error;
+ writable_file_mapping = true;
+ }
+
+ ret = __mmap_region(file, addr, len, vm_flags, pgoff, uf);
+
+ /* Clear our write mapping regardless of error. */
+ if (writable_file_mapping)
+ mapping_unmap_writable(file->f_mapping);
+
+ validate_mm(current->mm);
+ return ret;
+}
+
+/*
+ * do_brk_flags() - Increase the brk vma if the flags match.
+ * @vmi: The vma iterator
+ * @addr: The start address
+ * @len: The length of the increase
+ * @vma: The vma,
+ * @flags: The VMA Flags
+ *
+ * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags
+ * do not match then create a new anonymous VMA. Eventually we may be able to
+ * do some brk-specific accounting here.
+ */
+int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ unsigned long addr, unsigned long len, unsigned long flags)
+{
+ struct mm_struct *mm = current->mm;
+
+ /*
+ * Check against address space limits by the changed size
+ * Note: This happens *after* clearing old mappings in some code paths.
+ */
+ flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
+ if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
+ return -ENOMEM;
+
+ if (mm->map_count > sysctl_max_map_count)
+ return -ENOMEM;
+
+ if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
+ return -ENOMEM;
+
+ /*
+ * Expand the existing vma if possible; Note that singular lists do not
+ * occur after forking, so the expand will only happen on new VMAs.
+ */
+ if (vma && vma->vm_end == addr) {
+ VMG_STATE(vmg, mm, vmi, addr, addr + len, flags, PHYS_PFN(addr));
+
+ vmg.prev = vma;
+ /* vmi is positioned at prev, which this mode expects. */
+ vmg.merge_flags = VMG_FLAG_JUST_EXPAND;
+
+ if (vma_merge_new_range(&vmg))
+ goto out;
+ else if (vmg_nomem(&vmg))
+ goto unacct_fail;
+ }
+
+ if (vma)
+ vma_iter_next_range(vmi);
+ /* create a vma struct for an anonymous mapping */
+ vma = vm_area_alloc(mm);
+ if (!vma)
+ goto unacct_fail;
+
+ vma_set_anonymous(vma);
+ vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT);
+ vm_flags_init(vma, flags);
+ vma->vm_page_prot = vm_get_page_prot(flags);
+ vma_start_write(vma);
+ if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
+ goto mas_store_fail;
+
+ mm->map_count++;
+ validate_mm(mm);
+ ksm_add_vma(vma);
+out:
+ perf_event_mmap(vma);
+ mm->total_vm += len >> PAGE_SHIFT;
+ mm->data_vm += len >> PAGE_SHIFT;
+ if (flags & VM_LOCKED)
+ mm->locked_vm += (len >> PAGE_SHIFT);
+ vm_flags_set(vma, VM_SOFTDIRTY);
+ return 0;
+
+mas_store_fail:
+ vm_area_free(vma);
+unacct_fail:
+ vm_unacct_memory(len >> PAGE_SHIFT);
+ return -ENOMEM;
+}
+
+/**
+ * unmapped_area() - Find an area between the low_limit and the high_limit with
+ * the correct alignment and offset, all from @info. Note: current->mm is used
+ * for the search.
+ *
+ * @info: The unmapped area information including the range [low_limit -
+ * high_limit), the alignment offset and mask.
+ *
+ * Return: A memory address or -ENOMEM.
+ */
+unsigned long unmapped_area(struct vm_unmapped_area_info *info)
+{
+ unsigned long length, gap;
+ unsigned long low_limit, high_limit;
+ struct vm_area_struct *tmp;
+ VMA_ITERATOR(vmi, current->mm, 0);
+
+ /* Adjust search length to account for worst case alignment overhead */
+ length = info->length + info->align_mask + info->start_gap;
+ if (length < info->length)
+ return -ENOMEM;
+
+ low_limit = info->low_limit;
+ if (low_limit < mmap_min_addr)
+ low_limit = mmap_min_addr;
+ high_limit = info->high_limit;
+retry:
+ if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length))
+ return -ENOMEM;
+
+ /*
+ * Adjust for the gap first so it doesn't interfere with the
+ * later alignment. The first step is the minimum needed to
+ * fulill the start gap, the next steps is the minimum to align
+ * that. It is the minimum needed to fulill both.
+ */
+ gap = vma_iter_addr(&vmi) + info->start_gap;
+ gap += (info->align_offset - gap) & info->align_mask;
+ tmp = vma_next(&vmi);
+ if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
+ if (vm_start_gap(tmp) < gap + length - 1) {
+ low_limit = tmp->vm_end;
+ vma_iter_reset(&vmi);
+ goto retry;
+ }
+ } else {
+ tmp = vma_prev(&vmi);
+ if (tmp && vm_end_gap(tmp) > gap) {
+ low_limit = vm_end_gap(tmp);
+ vma_iter_reset(&vmi);
+ goto retry;
+ }
+ }
+
+ return gap;
+}
+
+/**
+ * unmapped_area_topdown() - Find an area between the low_limit and the
+ * high_limit with the correct alignment and offset at the highest available
+ * address, all from @info. Note: current->mm is used for the search.
+ *
+ * @info: The unmapped area information including the range [low_limit -
+ * high_limit), the alignment offset and mask.
+ *
+ * Return: A memory address or -ENOMEM.
+ */
+unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
+{
+ unsigned long length, gap, gap_end;
+ unsigned long low_limit, high_limit;
+ struct vm_area_struct *tmp;
+ VMA_ITERATOR(vmi, current->mm, 0);
+
+ /* Adjust search length to account for worst case alignment overhead */
+ length = info->length + info->align_mask + info->start_gap;
+ if (length < info->length)
+ return -ENOMEM;
+
+ low_limit = info->low_limit;
+ if (low_limit < mmap_min_addr)
+ low_limit = mmap_min_addr;
+ high_limit = info->high_limit;
+retry:
+ if (vma_iter_area_highest(&vmi, low_limit, high_limit, length))
+ return -ENOMEM;
+
+ gap = vma_iter_end(&vmi) - info->length;
+ gap -= (gap - info->align_offset) & info->align_mask;
+ gap_end = vma_iter_end(&vmi);
+ tmp = vma_next(&vmi);
+ if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
+ if (vm_start_gap(tmp) < gap_end) {
+ high_limit = vm_start_gap(tmp);
+ vma_iter_reset(&vmi);
+ goto retry;
+ }
+ } else {
+ tmp = vma_prev(&vmi);
+ if (tmp && vm_end_gap(tmp) > gap) {
+ high_limit = tmp->vm_start;
+ vma_iter_reset(&vmi);
+ goto retry;
+ }
+ }
+
+ return gap;
+}
+
+/*
+ * Verify that the stack growth is acceptable and
+ * update accounting. This is shared with both the
+ * grow-up and grow-down cases.
+ */
+static int acct_stack_growth(struct vm_area_struct *vma,
+ unsigned long size, unsigned long grow)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long new_start;
+
+ /* address space limit tests */
+ if (!may_expand_vm(mm, vma->vm_flags, grow))
+ return -ENOMEM;
+
+ /* Stack limit test */
+ if (size > rlimit(RLIMIT_STACK))
+ return -ENOMEM;
+
+ /* mlock limit tests */
+ if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT))
+ return -ENOMEM;
+
+ /* Check to ensure the stack will not grow into a hugetlb-only region */
+ new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
+ vma->vm_end - size;
+ if (is_hugepage_only_range(vma->vm_mm, new_start, size))
+ return -EFAULT;
+
+ /*
+ * Overcommit.. This must be the final test, as it will
+ * update security statistics.
+ */
+ if (security_vm_enough_memory_mm(mm, grow))
+ return -ENOMEM;
+
+ return 0;
+}
+
+#if defined(CONFIG_STACK_GROWSUP)
+/*
+ * PA-RISC uses this for its stack.
+ * vma is the last one with address > vma->vm_end. Have to extend vma.
+ */
+int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ struct vm_area_struct *next;
+ unsigned long gap_addr;
+ int error = 0;
+ VMA_ITERATOR(vmi, mm, vma->vm_start);
+
+ if (!(vma->vm_flags & VM_GROWSUP))
+ return -EFAULT;
+
+ mmap_assert_write_locked(mm);
+
+ /* Guard against exceeding limits of the address space. */
+ address &= PAGE_MASK;
+ if (address >= (TASK_SIZE & PAGE_MASK))
+ return -ENOMEM;
+ address += PAGE_SIZE;
+
+ /* Enforce stack_guard_gap */
+ gap_addr = address + stack_guard_gap;
+
+ /* Guard against overflow */
+ if (gap_addr < address || gap_addr > TASK_SIZE)
+ gap_addr = TASK_SIZE;
+
+ next = find_vma_intersection(mm, vma->vm_end, gap_addr);
+ if (next && vma_is_accessible(next)) {
+ if (!(next->vm_flags & VM_GROWSUP))
+ return -ENOMEM;
+ /* Check that both stack segments have the same anon_vma? */
+ }
+
+ if (next)
+ vma_iter_prev_range_limit(&vmi, address);
+
+ vma_iter_config(&vmi, vma->vm_start, address);
+ if (vma_iter_prealloc(&vmi, vma))
+ return -ENOMEM;
+
+ /* We must make sure the anon_vma is allocated. */
+ if (unlikely(anon_vma_prepare(vma))) {
+ vma_iter_free(&vmi);
+ return -ENOMEM;
+ }
+
+ /* Lock the VMA before expanding to prevent concurrent page faults */
+ vma_start_write(vma);
+ /* We update the anon VMA tree. */
+ anon_vma_lock_write(vma->anon_vma);
+
+ /* Somebody else might have raced and expanded it already */
+ if (address > vma->vm_end) {
+ unsigned long size, grow;
+
+ size = address - vma->vm_start;
+ grow = (address - vma->vm_end) >> PAGE_SHIFT;
+
+ error = -ENOMEM;
+ if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
+ error = acct_stack_growth(vma, size, grow);
+ if (!error) {
+ if (vma->vm_flags & VM_LOCKED)
+ mm->locked_vm += grow;
+ vm_stat_account(mm, vma->vm_flags, grow);
+ anon_vma_interval_tree_pre_update_vma(vma);
+ vma->vm_end = address;
+ /* Overwrite old entry in mtree. */
+ vma_iter_store(&vmi, vma);
+ anon_vma_interval_tree_post_update_vma(vma);
+
+ perf_event_mmap(vma);
+ }
+ }
+ }
+ anon_vma_unlock_write(vma->anon_vma);
+ vma_iter_free(&vmi);
+ validate_mm(mm);
+ return error;
+}
+#endif /* CONFIG_STACK_GROWSUP */
+
+/*
+ * vma is the first one with address < vma->vm_start. Have to extend vma.
+ * mmap_lock held for writing.
+ */
+int expand_downwards(struct vm_area_struct *vma, unsigned long address)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ struct vm_area_struct *prev;
+ int error = 0;
+ VMA_ITERATOR(vmi, mm, vma->vm_start);
+
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ return -EFAULT;
+
+ mmap_assert_write_locked(mm);
+
+ address &= PAGE_MASK;
+ if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
+ return -EPERM;
+
+ /* Enforce stack_guard_gap */
+ prev = vma_prev(&vmi);
+ /* Check that both stack segments have the same anon_vma? */
+ if (prev) {
+ if (!(prev->vm_flags & VM_GROWSDOWN) &&
+ vma_is_accessible(prev) &&
+ (address - prev->vm_end < stack_guard_gap))
+ return -ENOMEM;
+ }
+
+ if (prev)
+ vma_iter_next_range_limit(&vmi, vma->vm_start);
+
+ vma_iter_config(&vmi, address, vma->vm_end);
+ if (vma_iter_prealloc(&vmi, vma))
+ return -ENOMEM;
+
+ /* We must make sure the anon_vma is allocated. */
+ if (unlikely(anon_vma_prepare(vma))) {
+ vma_iter_free(&vmi);
+ return -ENOMEM;
+ }
+
+ /* Lock the VMA before expanding to prevent concurrent page faults */
+ vma_start_write(vma);
+ /* We update the anon VMA tree. */
+ anon_vma_lock_write(vma->anon_vma);
+
+ /* Somebody else might have raced and expanded it already */
+ if (address < vma->vm_start) {
+ unsigned long size, grow;
+
+ size = vma->vm_end - address;
+ grow = (vma->vm_start - address) >> PAGE_SHIFT;
+
+ error = -ENOMEM;
+ if (grow <= vma->vm_pgoff) {
+ error = acct_stack_growth(vma, size, grow);
+ if (!error) {
+ if (vma->vm_flags & VM_LOCKED)
+ mm->locked_vm += grow;
+ vm_stat_account(mm, vma->vm_flags, grow);
+ anon_vma_interval_tree_pre_update_vma(vma);
+ vma->vm_start = address;
+ vma->vm_pgoff -= grow;
+ /* Overwrite old entry in mtree. */
+ vma_iter_store(&vmi, vma);
+ anon_vma_interval_tree_post_update_vma(vma);
+
+ perf_event_mmap(vma);
+ }
+ }
+ }
+ anon_vma_unlock_write(vma->anon_vma);
+ vma_iter_free(&vmi);
+ validate_mm(mm);
+ return error;
+}
+
+int __vm_munmap(unsigned long start, size_t len, bool unlock)
+{
+ int ret;
+ struct mm_struct *mm = current->mm;
+ LIST_HEAD(uf);
+ VMA_ITERATOR(vmi, mm, start);
+
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
+
+ ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock);
+ if (ret || !unlock)
+ mmap_write_unlock(mm);
+
+ userfaultfd_unmap_complete(mm, &uf);
+ return ret;
+}
diff --git a/mm/vma.h b/mm/vma.h
index 388d34748674..a2e8710b8c47 100644
--- a/mm/vma.h
+++ b/mm/vma.h
@@ -139,15 +139,10 @@ void validate_mm(struct mm_struct *mm);
#define validate_mm(mm) do { } while (0)
#endif
-/* Required for expand_downwards(). */
-void anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma);
-
-/* Required for expand_downwards(). */
-void anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma);
-
-int vma_expand(struct vma_merge_struct *vmg);
-int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long start, unsigned long end, pgoff_t pgoff);
+__must_check int vma_expand(struct vma_merge_struct *vmg);
+__must_check int vma_shrink(struct vma_iterator *vmi,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, pgoff_t pgoff);
static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
struct vm_area_struct *vma, gfp_t gfp)
@@ -180,13 +175,14 @@ void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
struct vm_area_struct *prev, struct vm_area_struct *next);
/* We are about to modify the VMA's flags. */
-struct vm_area_struct *vma_modify_flags(struct vma_iterator *vmi,
+__must_check struct vm_area_struct
+*vma_modify_flags(struct vma_iterator *vmi,
struct vm_area_struct *prev, struct vm_area_struct *vma,
unsigned long start, unsigned long end,
unsigned long new_flags);
/* We are about to modify the VMA's flags and/or anon_name. */
-struct vm_area_struct
+__must_check struct vm_area_struct
*vma_modify_flags_name(struct vma_iterator *vmi,
struct vm_area_struct *prev,
struct vm_area_struct *vma,
@@ -196,7 +192,7 @@ struct vm_area_struct
struct anon_vma_name *new_name);
/* We are about to modify the VMA's memory policy. */
-struct vm_area_struct
+__must_check struct vm_area_struct
*vma_modify_policy(struct vma_iterator *vmi,
struct vm_area_struct *prev,
struct vm_area_struct *vma,
@@ -204,7 +200,7 @@ struct vm_area_struct
struct mempolicy *new_pol);
/* We are about to modify the VMA's flags and/or uffd context. */
-struct vm_area_struct
+__must_check struct vm_area_struct
*vma_modify_flags_uffd(struct vma_iterator *vmi,
struct vm_area_struct *prev,
struct vm_area_struct *vma,
@@ -212,11 +208,13 @@ struct vm_area_struct
unsigned long new_flags,
struct vm_userfaultfd_ctx new_ctx);
-struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg);
+__must_check struct vm_area_struct
+*vma_merge_new_range(struct vma_merge_struct *vmg);
-struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
- struct vm_area_struct *vma,
- unsigned long delta);
+__must_check struct vm_area_struct
+*vma_merge_extend(struct vma_iterator *vmi,
+ struct vm_area_struct *vma,
+ unsigned long delta);
void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb);
@@ -243,10 +241,16 @@ bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
int mm_take_all_locks(struct mm_struct *mm);
void mm_drop_all_locks(struct mm_struct *mm);
-unsigned long __mmap_region(struct file *file, unsigned long addr,
+unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
struct list_head *uf);
+int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
+ unsigned long addr, unsigned long request, unsigned long flags);
+
+unsigned long unmapped_area(struct vm_unmapped_area_info *info);
+unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
+
static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
{
/*
@@ -472,4 +476,12 @@ static inline bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior)
#endif
+#if defined(CONFIG_STACK_GROWSUP)
+int expand_upwards(struct vm_area_struct *vma, unsigned long address);
+#endif
+
+int expand_downwards(struct vm_area_struct *vma, unsigned long address);
+
+int __vm_munmap(unsigned long start, size_t len, bool unlock);
+
#endif /* __MM_VMA_H */
diff --git a/mm/vma_internal.h b/mm/vma_internal.h
index fc5f172a36bd..2f05735ff190 100644
--- a/mm/vma_internal.h
+++ b/mm/vma_internal.h
@@ -35,6 +35,7 @@
#include <linux/mutex.h>
#include <linux/pagemap.h>
#include <linux/perf_event.h>
+#include <linux/personality.h>
#include <linux/pfn.h>
#include <linux/rcupdate.h>
#include <linux/rmap.h>
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 5c88d0e90c20..a6e7acebe9ad 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3562,11 +3562,11 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
* but mempolicy wants to alloc memory by interleaving.
*/
if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
- nr = alloc_pages_bulk_array_mempolicy_noprof(gfp,
+ nr = alloc_pages_bulk_mempolicy_noprof(gfp,
nr_pages_request,
pages + nr_allocated);
else
- nr = alloc_pages_bulk_array_node_noprof(gfp, nid,
+ nr = alloc_pages_bulk_node_noprof(gfp, nid,
nr_pages_request,
pages + nr_allocated);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 867a2554434a..c767d71c43d7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -862,6 +862,31 @@ enum folio_references {
FOLIOREF_ACTIVATE,
};
+#ifdef CONFIG_LRU_GEN
+/*
+ * Only used on a mapped folio in the eviction (rmap walk) path, where promotion
+ * needs to be done by taking the folio off the LRU list and then adding it back
+ * with PG_active set. In contrast, the aging (page table walk) path uses
+ * folio_update_gen().
+ */
+static bool lru_gen_set_refs(struct folio *folio)
+{
+ /* see the comment on LRU_REFS_FLAGS */
+ if (!folio_test_referenced(folio) && !folio_test_workingset(folio)) {
+ set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced));
+ return false;
+ }
+
+ set_mask_bits(&folio->flags, LRU_REFS_FLAGS, BIT(PG_workingset));
+ return true;
+}
+#else
+static bool lru_gen_set_refs(struct folio *folio)
+{
+ return false;
+}
+#endif /* CONFIG_LRU_GEN */
+
static enum folio_references folio_check_references(struct folio *folio,
struct scan_control *sc)
{
@@ -870,7 +895,6 @@ static enum folio_references folio_check_references(struct folio *folio,
referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup,
&vm_flags);
- referenced_folio = folio_test_clear_referenced(folio);
/*
* The supposedly reclaimable folio was found to be in a VM_LOCKED vma.
@@ -888,6 +912,15 @@ static enum folio_references folio_check_references(struct folio *folio,
if (referenced_ptes == -1)
return FOLIOREF_KEEP;
+ if (lru_gen_enabled()) {
+ if (!referenced_ptes)
+ return FOLIOREF_RECLAIM;
+
+ return lru_gen_set_refs(folio) ? FOLIOREF_ACTIVATE : FOLIOREF_KEEP;
+ }
+
+ referenced_folio = folio_test_clear_referenced(folio);
+
if (referenced_ptes) {
/*
* All mapped folios start out with page table
@@ -1053,7 +1086,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
struct folio_batch free_folios;
LIST_HEAD(ret_folios);
LIST_HEAD(demote_folios);
- unsigned int nr_reclaimed = 0;
+ unsigned int nr_reclaimed = 0, nr_demoted = 0;
unsigned int pgactivate = 0;
bool do_demote_pass;
struct swap_iocb *plug = NULL;
@@ -1092,11 +1125,6 @@ retry:
if (!sc->may_unmap && folio_mapped(folio))
goto keep_locked;
- /* folio_update_gen() tried to promote this page? */
- if (lru_gen_enabled() && !ignore_references &&
- folio_mapped(folio) && folio_test_referenced(folio))
- goto keep_locked;
-
/*
* The number of dirty pages determines if a node is marked
* reclaim_congested. kswapd will stall and start writing
@@ -1522,8 +1550,9 @@ keep:
/* 'folio_list' is always empty here */
/* Migrate folios selected for demotion */
- stat->nr_demoted = demote_folio_list(&demote_folios, pgdat);
- nr_reclaimed += stat->nr_demoted;
+ nr_demoted = demote_folio_list(&demote_folios, pgdat);
+ nr_reclaimed += nr_demoted;
+ stat->nr_demoted += nr_demoted;
/* Folios that could not be demoted are still in @demote_folios */
if (!list_empty(&demote_folios)) {
/* Folios which weren't demoted go back on @folio_list */
@@ -1664,6 +1693,7 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
unsigned long skipped = 0;
unsigned long scan, total_scan, nr_pages;
+ unsigned long max_nr_skipped = 0;
LIST_HEAD(folios_skipped);
total_scan = 0;
@@ -1678,9 +1708,12 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
nr_pages = folio_nr_pages(folio);
total_scan += nr_pages;
- if (folio_zonenum(folio) > sc->reclaim_idx) {
+ /* Using max_nr_skipped to prevent hard LOCKUP*/
+ if (max_nr_skipped < SWAP_CLUSTER_MAX_SKIPPED &&
+ (folio_zonenum(folio) > sc->reclaim_idx)) {
nr_skipped[folio_zonenum(folio)] += nr_pages;
move_to = &folios_skipped;
+ max_nr_skipped++;
goto move;
}
@@ -2619,11 +2652,17 @@ static bool should_clear_pmd_young(void)
READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \
}
+#define evictable_min_seq(min_seq, swappiness) \
+ min((min_seq)[!(swappiness)], (min_seq)[(swappiness) <= MAX_SWAPPINESS])
+
#define for_each_gen_type_zone(gen, type, zone) \
for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++) \
for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \
for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
+#define for_each_evictable_type(type, swappiness) \
+ for ((type) = !(swappiness); (type) <= ((swappiness) <= MAX_SWAPPINESS); (type)++)
+
#define get_memcg_gen(seq) ((seq) % MEMCG_NR_GENS)
#define get_memcg_bin(bin) ((bin) % MEMCG_NR_BINS)
@@ -2669,10 +2708,16 @@ static int get_nr_gens(struct lruvec *lruvec, int type)
static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
{
- /* see the comment on lru_gen_folio */
- return get_nr_gens(lruvec, LRU_GEN_FILE) >= MIN_NR_GENS &&
- get_nr_gens(lruvec, LRU_GEN_FILE) <= get_nr_gens(lruvec, LRU_GEN_ANON) &&
- get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS;
+ int type;
+
+ for (type = 0; type < ANON_AND_FILE; type++) {
+ int n = get_nr_gens(lruvec, type);
+
+ if (n < MIN_NR_GENS || n > MAX_NR_GENS)
+ return false;
+ }
+
+ return true;
}
/******************************************************************************
@@ -3073,16 +3118,20 @@ struct ctrl_pos {
static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain,
struct ctrl_pos *pos)
{
+ int i;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
int hist = lru_hist_from_seq(lrugen->min_seq[type]);
- pos->refaulted = lrugen->avg_refaulted[type][tier] +
- atomic_long_read(&lrugen->refaulted[hist][type][tier]);
- pos->total = lrugen->avg_total[type][tier] +
- atomic_long_read(&lrugen->evicted[hist][type][tier]);
- if (tier)
- pos->total += lrugen->protected[hist][type][tier - 1];
pos->gain = gain;
+ pos->refaulted = pos->total = 0;
+
+ for (i = tier % MAX_NR_TIERS; i <= min(tier, MAX_NR_TIERS - 1); i++) {
+ pos->refaulted += lrugen->avg_refaulted[type][i] +
+ atomic_long_read(&lrugen->refaulted[hist][type][i]);
+ pos->total += lrugen->avg_total[type][i] +
+ lrugen->protected[hist][type][i] +
+ atomic_long_read(&lrugen->evicted[hist][type][i]);
+ }
}
static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover)
@@ -3108,17 +3157,15 @@ static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover)
WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2);
sum = lrugen->avg_total[type][tier] +
+ lrugen->protected[hist][type][tier] +
atomic_long_read(&lrugen->evicted[hist][type][tier]);
- if (tier)
- sum += lrugen->protected[hist][type][tier - 1];
WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2);
}
if (clear) {
atomic_long_set(&lrugen->refaulted[hist][type][tier], 0);
atomic_long_set(&lrugen->evicted[hist][type][tier], 0);
- if (tier)
- WRITE_ONCE(lrugen->protected[hist][type][tier - 1], 0);
+ WRITE_ONCE(lrugen->protected[hist][type][tier], 0);
}
}
}
@@ -3145,16 +3192,19 @@ static int folio_update_gen(struct folio *folio, int gen)
VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
+ /* see the comment on LRU_REFS_FLAGS */
+ if (!folio_test_referenced(folio) && !folio_test_workingset(folio)) {
+ set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced));
+ return -1;
+ }
+
do {
/* lru_gen_del_folio() has isolated this page? */
- if (!(old_flags & LRU_GEN_MASK)) {
- /* for shrink_folio_list() */
- new_flags = old_flags | BIT(PG_referenced);
- continue;
- }
+ if (!(old_flags & LRU_GEN_MASK))
+ return -1;
- new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS);
- new_flags |= (gen + 1UL) << LRU_GEN_PGOFF;
+ new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_FLAGS);
+ new_flags |= ((gen + 1UL) << LRU_GEN_PGOFF) | BIT(PG_workingset);
} while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
@@ -3178,7 +3228,7 @@ static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclai
new_gen = (old_gen + 1) % MAX_NR_GENS;
- new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS);
+ new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_FLAGS);
new_flags |= (new_gen + 1UL) << LRU_GEN_PGOFF;
/* for folio_end_writeback() */
if (reclaiming)
@@ -3253,7 +3303,7 @@ static int should_skip_vma(unsigned long start, unsigned long end, struct mm_wal
return true;
if (vma_is_anonymous(vma))
- return !walk->can_swap;
+ return !walk->swappiness;
if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping))
return true;
@@ -3263,7 +3313,10 @@ static int should_skip_vma(unsigned long start, unsigned long end, struct mm_wal
return true;
if (shmem_mapping(mapping))
- return !walk->can_swap;
+ return !walk->swappiness;
+
+ if (walk->swappiness > MAX_SWAPPINESS)
+ return true;
/* to exclude special mappings like dax, etc. */
return !mapping->a_ops->read_folio;
@@ -3351,19 +3404,17 @@ static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned
}
static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg,
- struct pglist_data *pgdat, bool can_swap)
+ struct pglist_data *pgdat)
{
- struct folio *folio;
+ struct folio *folio = pfn_folio(pfn);
- folio = pfn_folio(pfn);
- if (folio_nid(folio) != pgdat->node_id)
+ if (folio_lru_gen(folio) < 0)
return NULL;
- if (folio_memcg(folio) != memcg)
+ if (folio_nid(folio) != pgdat->node_id)
return NULL;
- /* file VMAs can contain anon pages from COW */
- if (!folio_is_file_lru(folio) && !can_swap)
+ if (folio_memcg(folio) != memcg)
return NULL;
return folio;
@@ -3377,29 +3428,55 @@ static bool suitable_to_scan(int total, int young)
return young * n >= total;
}
+static void walk_update_folio(struct lru_gen_mm_walk *walk, struct folio *folio,
+ int new_gen, bool dirty)
+{
+ int old_gen;
+
+ if (!folio)
+ return;
+
+ if (dirty && !folio_test_dirty(folio) &&
+ !(folio_test_anon(folio) && folio_test_swapbacked(folio) &&
+ !folio_test_swapcache(folio)))
+ folio_mark_dirty(folio);
+
+ if (walk) {
+ old_gen = folio_update_gen(folio, new_gen);
+ if (old_gen >= 0 && old_gen != new_gen)
+ update_batch_size(walk, folio, old_gen, new_gen);
+ } else if (lru_gen_set_refs(folio)) {
+ old_gen = folio_lru_gen(folio);
+ if (old_gen >= 0 && old_gen != new_gen)
+ folio_activate(folio);
+ }
+}
+
static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
struct mm_walk *args)
{
int i;
+ bool dirty;
pte_t *pte;
spinlock_t *ptl;
unsigned long addr;
int total = 0;
int young = 0;
+ struct folio *last = NULL;
struct lru_gen_mm_walk *walk = args->private;
struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
DEFINE_MAX_SEQ(walk->lruvec);
- int old_gen, new_gen = lru_gen_from_seq(max_seq);
+ int gen = lru_gen_from_seq(max_seq);
pmd_t pmdval;
- pte = pte_offset_map_rw_nolock(args->mm, pmd, start & PMD_MASK, &pmdval,
- &ptl);
+ pte = pte_offset_map_rw_nolock(args->mm, pmd, start & PMD_MASK, &pmdval, &ptl);
if (!pte)
return false;
+
if (!spin_trylock(ptl)) {
pte_unmap(pte);
- return false;
+ return true;
}
if (unlikely(!pmd_same(pmdval, pmdp_get_lockless(pmd)))) {
@@ -3421,26 +3498,30 @@ restart:
if (pfn == -1)
continue;
- folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap);
+ folio = get_pfn_folio(pfn, memcg, pgdat);
if (!folio)
continue;
if (!ptep_clear_young_notify(args->vma, addr, pte + i))
continue;
- young++;
- walk->mm_stats[MM_LEAF_YOUNG]++;
+ if (last != folio) {
+ walk_update_folio(walk, last, gen, dirty);
- if (pte_dirty(ptent) && !folio_test_dirty(folio) &&
- !(folio_test_anon(folio) && folio_test_swapbacked(folio) &&
- !folio_test_swapcache(folio)))
- folio_mark_dirty(folio);
+ last = folio;
+ dirty = false;
+ }
- old_gen = folio_update_gen(folio, new_gen);
- if (old_gen >= 0 && old_gen != new_gen)
- update_batch_size(walk, folio, old_gen, new_gen);
+ if (pte_dirty(ptent))
+ dirty = true;
+
+ young++;
+ walk->mm_stats[MM_LEAF_YOUNG]++;
}
+ walk_update_folio(walk, last, gen, dirty);
+ last = NULL;
+
if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end))
goto restart;
@@ -3454,13 +3535,15 @@ static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area
struct mm_walk *args, unsigned long *bitmap, unsigned long *first)
{
int i;
+ bool dirty;
pmd_t *pmd;
spinlock_t *ptl;
+ struct folio *last = NULL;
struct lru_gen_mm_walk *walk = args->private;
struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
DEFINE_MAX_SEQ(walk->lruvec);
- int old_gen, new_gen = lru_gen_from_seq(max_seq);
+ int gen = lru_gen_from_seq(max_seq);
VM_WARN_ON_ONCE(pud_leaf(*pud));
@@ -3506,27 +3589,30 @@ static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area
if (pfn == -1)
goto next;
- folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap);
+ folio = get_pfn_folio(pfn, memcg, pgdat);
if (!folio)
goto next;
if (!pmdp_clear_young_notify(vma, addr, pmd + i))
goto next;
- walk->mm_stats[MM_LEAF_YOUNG]++;
+ if (last != folio) {
+ walk_update_folio(walk, last, gen, dirty);
+
+ last = folio;
+ dirty = false;
+ }
- if (pmd_dirty(pmd[i]) && !folio_test_dirty(folio) &&
- !(folio_test_anon(folio) && folio_test_swapbacked(folio) &&
- !folio_test_swapcache(folio)))
- folio_mark_dirty(folio);
+ if (pmd_dirty(pmd[i]))
+ dirty = true;
- old_gen = folio_update_gen(folio, new_gen);
- if (old_gen >= 0 && old_gen != new_gen)
- update_batch_size(walk, folio, old_gen, new_gen);
+ walk->mm_stats[MM_LEAF_YOUNG]++;
next:
i = i > MIN_LRU_BATCH ? 0 : find_next_bit(bitmap, MIN_LRU_BATCH, i) + 1;
} while (i <= MIN_LRU_BATCH);
+ walk_update_folio(walk, last, gen, dirty);
+
arch_leave_lazy_mmu_mode();
spin_unlock(ptl);
done:
@@ -3718,22 +3804,25 @@ static void clear_mm_walk(void)
kfree(walk);
}
-static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap)
+static bool inc_min_seq(struct lruvec *lruvec, int type, int swappiness)
{
int zone;
int remaining = MAX_LRU_BATCH;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
+ int hist = lru_hist_from_seq(lrugen->min_seq[type]);
int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
- if (type == LRU_GEN_ANON && !can_swap)
+ if (type ? swappiness > MAX_SWAPPINESS : !swappiness)
goto done;
- /* prevent cold/hot inversion if force_scan is true */
+ /* prevent cold/hot inversion if the type is evictable */
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
struct list_head *head = &lrugen->folios[old_gen][type][zone];
while (!list_empty(head)) {
struct folio *folio = lru_to_folio(head);
+ int refs = folio_lru_refs(folio);
+ bool workingset = folio_test_workingset(folio);
VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
@@ -3743,6 +3832,15 @@ static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap)
new_gen = folio_inc_gen(lruvec, folio, false);
list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]);
+ /* don't count the workingset being lazily promoted */
+ if (refs + workingset != BIT(LRU_REFS_WIDTH) + 1) {
+ int tier = lru_tier_from_refs(refs, workingset);
+ int delta = folio_nr_pages(folio);
+
+ WRITE_ONCE(lrugen->protected[hist][type][tier],
+ lrugen->protected[hist][type][tier] + delta);
+ }
+
if (!--remaining)
return false;
}
@@ -3754,7 +3852,7 @@ done:
return true;
}
-static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap)
+static bool try_to_inc_min_seq(struct lruvec *lruvec, int swappiness)
{
int gen, type, zone;
bool success = false;
@@ -3764,7 +3862,7 @@ static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap)
VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
/* find the oldest populated generation */
- for (type = !can_swap; type < ANON_AND_FILE; type++) {
+ for_each_evictable_type(type, swappiness) {
while (min_seq[type] + MIN_NR_GENS <= lrugen->max_seq) {
gen = lru_gen_from_seq(min_seq[type]);
@@ -3780,13 +3878,17 @@ next:
}
/* see the comment on lru_gen_folio */
- if (can_swap) {
- min_seq[LRU_GEN_ANON] = min(min_seq[LRU_GEN_ANON], min_seq[LRU_GEN_FILE]);
- min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]);
+ if (swappiness && swappiness <= MAX_SWAPPINESS) {
+ unsigned long seq = lrugen->max_seq - MIN_NR_GENS;
+
+ if (min_seq[LRU_GEN_ANON] > seq && min_seq[LRU_GEN_FILE] < seq)
+ min_seq[LRU_GEN_ANON] = seq;
+ else if (min_seq[LRU_GEN_FILE] > seq && min_seq[LRU_GEN_ANON] < seq)
+ min_seq[LRU_GEN_FILE] = seq;
}
- for (type = !can_swap; type < ANON_AND_FILE; type++) {
- if (min_seq[type] == lrugen->min_seq[type])
+ for_each_evictable_type(type, swappiness) {
+ if (min_seq[type] <= lrugen->min_seq[type])
continue;
reset_ctrl_pos(lruvec, type, true);
@@ -3797,8 +3899,7 @@ next:
return success;
}
-static bool inc_max_seq(struct lruvec *lruvec, unsigned long seq,
- bool can_swap, bool force_scan)
+static bool inc_max_seq(struct lruvec *lruvec, unsigned long seq, int swappiness)
{
bool success;
int prev, next;
@@ -3816,13 +3917,11 @@ restart:
if (!success)
goto unlock;
- for (type = ANON_AND_FILE - 1; type >= 0; type--) {
+ for (type = 0; type < ANON_AND_FILE; type++) {
if (get_nr_gens(lruvec, type) != MAX_NR_GENS)
continue;
- VM_WARN_ON_ONCE(!force_scan && (type == LRU_GEN_FILE || can_swap));
-
- if (inc_min_seq(lruvec, type, can_swap))
+ if (inc_min_seq(lruvec, type, swappiness))
continue;
spin_unlock_irq(&lruvec->lru_lock);
@@ -3866,7 +3965,7 @@ unlock:
}
static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long seq,
- bool can_swap, bool force_scan)
+ int swappiness, bool force_scan)
{
bool success;
struct lru_gen_mm_walk *walk;
@@ -3877,7 +3976,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long seq,
VM_WARN_ON_ONCE(seq > READ_ONCE(lrugen->max_seq));
if (!mm_state)
- return inc_max_seq(lruvec, seq, can_swap, force_scan);
+ return inc_max_seq(lruvec, seq, swappiness);
/* see the comment in iterate_mm_list() */
if (seq <= READ_ONCE(mm_state->seq))
@@ -3902,7 +4001,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long seq,
walk->lruvec = lruvec;
walk->seq = seq;
- walk->can_swap = can_swap;
+ walk->swappiness = swappiness;
walk->force_scan = force_scan;
do {
@@ -3912,7 +4011,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long seq,
} while (mm);
done:
if (success) {
- success = inc_max_seq(lruvec, seq, can_swap, force_scan);
+ success = inc_max_seq(lruvec, seq, swappiness);
WARN_ON_ONCE(!success);
}
@@ -3953,13 +4052,13 @@ static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
{
int gen, type, zone;
unsigned long total = 0;
- bool can_swap = get_swappiness(lruvec, sc);
+ int swappiness = get_swappiness(lruvec, sc);
struct lru_gen_folio *lrugen = &lruvec->lrugen;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
DEFINE_MAX_SEQ(lruvec);
DEFINE_MIN_SEQ(lruvec);
- for (type = !can_swap; type < ANON_AND_FILE; type++) {
+ for_each_evictable_type(type, swappiness) {
unsigned long seq;
for (seq = min_seq[type]; seq <= max_seq; seq++) {
@@ -3979,6 +4078,7 @@ static bool lruvec_is_reclaimable(struct lruvec *lruvec, struct scan_control *sc
{
int gen;
unsigned long birth;
+ int swappiness = get_swappiness(lruvec, sc);
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
DEFINE_MIN_SEQ(lruvec);
@@ -3988,8 +4088,7 @@ static bool lruvec_is_reclaimable(struct lruvec *lruvec, struct scan_control *sc
if (!lruvec_is_sizable(lruvec, sc))
return false;
- /* see the comment on lru_gen_folio */
- gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]);
+ gen = lru_gen_from_seq(evictable_min_seq(min_seq, swappiness));
birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
return time_is_before_jiffies(birth + min_ttl);
@@ -4048,21 +4147,22 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
{
int i;
+ bool dirty;
unsigned long start;
unsigned long end;
struct lru_gen_mm_walk *walk;
+ struct folio *last = NULL;
int young = 1;
pte_t *pte = pvmw->pte;
unsigned long addr = pvmw->address;
struct vm_area_struct *vma = pvmw->vma;
struct folio *folio = pfn_folio(pvmw->pfn);
- bool can_swap = !folio_is_file_lru(folio);
struct mem_cgroup *memcg = folio_memcg(folio);
struct pglist_data *pgdat = folio_pgdat(folio);
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
DEFINE_MAX_SEQ(lruvec);
- int old_gen, new_gen = lru_gen_from_seq(max_seq);
+ int gen = lru_gen_from_seq(max_seq);
lockdep_assert_held(pvmw->ptl);
VM_WARN_ON_ONCE_FOLIO(folio_test_lru(folio), folio);
@@ -4109,37 +4209,28 @@ bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
if (pfn == -1)
continue;
- folio = get_pfn_folio(pfn, memcg, pgdat, can_swap);
+ folio = get_pfn_folio(pfn, memcg, pgdat);
if (!folio)
continue;
if (!ptep_clear_young_notify(vma, addr, pte + i))
continue;
- young++;
-
- if (pte_dirty(ptent) && !folio_test_dirty(folio) &&
- !(folio_test_anon(folio) && folio_test_swapbacked(folio) &&
- !folio_test_swapcache(folio)))
- folio_mark_dirty(folio);
+ if (last != folio) {
+ walk_update_folio(walk, last, gen, dirty);
- if (walk) {
- old_gen = folio_update_gen(folio, new_gen);
- if (old_gen >= 0 && old_gen != new_gen)
- update_batch_size(walk, folio, old_gen, new_gen);
-
- continue;
+ last = folio;
+ dirty = false;
}
- old_gen = folio_lru_gen(folio);
- if (old_gen < 0)
- folio_set_referenced(folio);
- else if (old_gen != new_gen) {
- folio_clear_lru_refs(folio);
- folio_activate(folio);
- }
+ if (pte_dirty(ptent))
+ dirty = true;
+
+ young++;
}
+ walk_update_folio(walk, last, gen, dirty);
+
arch_leave_lazy_mmu_mode();
/* feedback from rmap walkers to page table walkers */
@@ -4297,7 +4388,8 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
int zone = folio_zonenum(folio);
int delta = folio_nr_pages(folio);
int refs = folio_lru_refs(folio);
- int tier = lru_tier_from_refs(refs);
+ bool workingset = folio_test_workingset(folio);
+ int tier = lru_tier_from_refs(refs, workingset);
struct lru_gen_folio *lrugen = &lruvec->lrugen;
VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio);
@@ -4319,14 +4411,17 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
}
/* protected */
- if (tier > tier_idx || refs == BIT(LRU_REFS_WIDTH)) {
- int hist = lru_hist_from_seq(lrugen->min_seq[type]);
-
+ if (tier > tier_idx || refs + workingset == BIT(LRU_REFS_WIDTH) + 1) {
gen = folio_inc_gen(lruvec, folio, false);
- list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
+ list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
+
+ /* don't count the workingset being lazily promoted */
+ if (refs + workingset != BIT(LRU_REFS_WIDTH) + 1) {
+ int hist = lru_hist_from_seq(lrugen->min_seq[type]);
- WRITE_ONCE(lrugen->protected[hist][type][tier - 1],
- lrugen->protected[hist][type][tier - 1] + delta);
+ WRITE_ONCE(lrugen->protected[hist][type][tier],
+ lrugen->protected[hist][type][tier] + delta);
+ }
return true;
}
@@ -4346,8 +4441,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
}
/* waiting for writeback */
- if (folio_test_locked(folio) || writeback ||
- (type == LRU_GEN_FILE && dirty)) {
+ if (writeback || (type == LRU_GEN_FILE && dirty)) {
gen = folio_inc_gen(lruvec, folio, true);
list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
return true;
@@ -4376,13 +4470,12 @@ static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct sca
return false;
}
- /* see the comment on MAX_NR_TIERS */
+ /* see the comment on LRU_REFS_FLAGS */
if (!folio_test_referenced(folio))
- folio_clear_lru_refs(folio);
+ set_mask_bits(&folio->flags, LRU_REFS_MASK, 0);
/* for shrink_folio_list() */
folio_clear_reclaim(folio);
- folio_clear_referenced(folio);
success = lru_gen_del_folio(lruvec, folio, true);
VM_WARN_ON_ONCE_FOLIO(!success, folio);
@@ -4478,13 +4571,13 @@ static int get_tier_idx(struct lruvec *lruvec, int type)
struct ctrl_pos sp, pv;
/*
- * To leave a margin for fluctuations, use a larger gain factor (1:2).
+ * To leave a margin for fluctuations, use a larger gain factor (2:3).
* This value is chosen because any other tier would have at least twice
* as many refaults as the first tier.
*/
- read_ctrl_pos(lruvec, type, 0, 1, &sp);
+ read_ctrl_pos(lruvec, type, 0, 2, &sp);
for (tier = 1; tier < MAX_NR_TIERS; tier++) {
- read_ctrl_pos(lruvec, type, tier, 2, &pv);
+ read_ctrl_pos(lruvec, type, tier, 3, &pv);
if (!positive_ctrl_err(&sp, &pv))
break;
}
@@ -4492,79 +4585,45 @@ static int get_tier_idx(struct lruvec *lruvec, int type)
return tier - 1;
}
-static int get_type_to_scan(struct lruvec *lruvec, int swappiness, int *tier_idx)
+static int get_type_to_scan(struct lruvec *lruvec, int swappiness)
{
- int type, tier;
struct ctrl_pos sp, pv;
- int gain[ANON_AND_FILE] = { swappiness, MAX_SWAPPINESS - swappiness };
+ if (swappiness <= MIN_SWAPPINESS + 1)
+ return LRU_GEN_FILE;
+
+ if (swappiness >= MAX_SWAPPINESS)
+ return LRU_GEN_ANON;
/*
- * Compare the first tier of anon with that of file to determine which
- * type to scan. Also need to compare other tiers of the selected type
- * with the first tier of the other type to determine the last tier (of
- * the selected type) to evict.
+ * Compare the sum of all tiers of anon with that of file to determine
+ * which type to scan.
*/
- read_ctrl_pos(lruvec, LRU_GEN_ANON, 0, gain[LRU_GEN_ANON], &sp);
- read_ctrl_pos(lruvec, LRU_GEN_FILE, 0, gain[LRU_GEN_FILE], &pv);
- type = positive_ctrl_err(&sp, &pv);
-
- read_ctrl_pos(lruvec, !type, 0, gain[!type], &sp);
- for (tier = 1; tier < MAX_NR_TIERS; tier++) {
- read_ctrl_pos(lruvec, type, tier, gain[type], &pv);
- if (!positive_ctrl_err(&sp, &pv))
- break;
- }
+ read_ctrl_pos(lruvec, LRU_GEN_ANON, MAX_NR_TIERS, swappiness, &sp);
+ read_ctrl_pos(lruvec, LRU_GEN_FILE, MAX_NR_TIERS, MAX_SWAPPINESS - swappiness, &pv);
- *tier_idx = tier - 1;
-
- return type;
+ return positive_ctrl_err(&sp, &pv);
}
static int isolate_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness,
int *type_scanned, struct list_head *list)
{
int i;
- int type;
- int scanned;
- int tier = -1;
- DEFINE_MIN_SEQ(lruvec);
+ int type = get_type_to_scan(lruvec, swappiness);
- /*
- * Try to make the obvious choice first, and if anon and file are both
- * available from the same generation,
- * 1. Interpret swappiness 1 as file first and MAX_SWAPPINESS as anon
- * first.
- * 2. If !__GFP_IO, file first since clean pagecache is more likely to
- * exist than clean swapcache.
- */
- if (!swappiness)
- type = LRU_GEN_FILE;
- else if (min_seq[LRU_GEN_ANON] < min_seq[LRU_GEN_FILE])
- type = LRU_GEN_ANON;
- else if (swappiness == 1)
- type = LRU_GEN_FILE;
- else if (swappiness == MAX_SWAPPINESS)
- type = LRU_GEN_ANON;
- else if (!(sc->gfp_mask & __GFP_IO))
- type = LRU_GEN_FILE;
- else
- type = get_type_to_scan(lruvec, swappiness, &tier);
+ for_each_evictable_type(i, swappiness) {
+ int scanned;
+ int tier = get_tier_idx(lruvec, type);
- for (i = !swappiness; i < ANON_AND_FILE; i++) {
- if (tier < 0)
- tier = get_tier_idx(lruvec, type);
+ *type_scanned = type;
scanned = scan_folios(lruvec, sc, type, tier, list);
if (scanned)
- break;
+ return scanned;
type = !type;
- tier = -1;
}
- *type_scanned = type;
-
- return scanned;
+ return 0;
}
static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
@@ -4580,6 +4639,7 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
struct reclaim_stat stat;
struct lru_gen_mm_walk *walk;
bool skip_retry = false;
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
@@ -4589,7 +4649,7 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
scanned += try_to_inc_min_seq(lruvec, swappiness);
- if (get_nr_gens(lruvec, !swappiness) == MIN_NR_GENS)
+ if (evictable_min_seq(lrugen->min_seq, swappiness) + MIN_NR_GENS > lrugen->max_seq)
scanned = 0;
spin_unlock_irq(&lruvec->lru_lock);
@@ -4605,31 +4665,24 @@ retry:
type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON);
list_for_each_entry_safe_reverse(folio, next, &list, lru) {
+ DEFINE_MIN_SEQ(lruvec);
+
if (!folio_evictable(folio)) {
list_del(&folio->lru);
folio_putback_lru(folio);
continue;
}
- if (folio_test_reclaim(folio) &&
- (folio_test_dirty(folio) || folio_test_writeback(folio))) {
- /* restore LRU_REFS_FLAGS cleared by isolate_folio() */
- if (folio_test_workingset(folio))
- folio_set_referenced(folio);
- continue;
- }
-
- if (skip_retry || folio_test_active(folio) || folio_test_referenced(folio) ||
- folio_mapped(folio) || folio_test_locked(folio) ||
- folio_test_dirty(folio) || folio_test_writeback(folio)) {
- /* don't add rejected folios to the oldest generation */
- set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS,
- BIT(PG_active));
+ /* retry folios that may have missed folio_rotate_reclaimable() */
+ if (!skip_retry && !folio_test_active(folio) && !folio_mapped(folio) &&
+ !folio_test_dirty(folio) && !folio_test_writeback(folio)) {
+ list_move(&folio->lru, &clean);
continue;
}
- /* retry folios that may have missed folio_rotate_reclaimable() */
- list_move(&folio->lru, &clean);
+ /* don't add rejected folios to the oldest generation */
+ if (lru_gen_folio_seq(lruvec, folio, false) == min_seq[type])
+ set_mask_bits(&folio->flags, LRU_REFS_FLAGS, BIT(PG_active));
}
spin_lock_irq(&lruvec->lru_lock);
@@ -4664,63 +4717,32 @@ retry:
}
static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
- bool can_swap, unsigned long *nr_to_scan)
+ int swappiness, unsigned long *nr_to_scan)
{
int gen, type, zone;
- unsigned long old = 0;
- unsigned long young = 0;
- unsigned long total = 0;
+ unsigned long size = 0;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
DEFINE_MIN_SEQ(lruvec);
- /* whether this lruvec is completely out of cold folios */
- if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) {
- *nr_to_scan = 0;
+ *nr_to_scan = 0;
+ /* have to run aging, since eviction is not possible anymore */
+ if (evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS > max_seq)
return true;
- }
- for (type = !can_swap; type < ANON_AND_FILE; type++) {
+ for_each_evictable_type(type, swappiness) {
unsigned long seq;
for (seq = min_seq[type]; seq <= max_seq; seq++) {
- unsigned long size = 0;
-
gen = lru_gen_from_seq(seq);
for (zone = 0; zone < MAX_NR_ZONES; zone++)
size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
-
- total += size;
- if (seq == max_seq)
- young += size;
- else if (seq + MIN_NR_GENS == max_seq)
- old += size;
}
}
- *nr_to_scan = total;
-
- /*
- * The aging tries to be lazy to reduce the overhead, while the eviction
- * stalls when the number of generations reaches MIN_NR_GENS. Hence, the
- * ideal number of generations is MIN_NR_GENS+1.
- */
- if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
- return false;
-
- /*
- * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1)
- * of the total number of pages for each generation. A reasonable range
- * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The
- * aging cares about the upper bound of hot pages, while the eviction
- * cares about the lower bound of cold pages.
- */
- if (young * MIN_NR_GENS > total)
- return true;
- if (old * (MIN_NR_GENS + 2) < total)
- return true;
-
- return false;
+ *nr_to_scan = size;
+ /* better to run aging even though eviction is still possible */
+ return evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS == max_seq;
}
/*
@@ -4728,7 +4750,7 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
* 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
* reclaim.
*/
-static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool can_swap)
+static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
{
bool success;
unsigned long nr_to_scan;
@@ -4738,7 +4760,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool
if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg))
return -1;
- success = should_run_aging(lruvec, max_seq, can_swap, &nr_to_scan);
+ success = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan);
/* try to scrape all its memory if this memcg was deleted */
if (nr_to_scan && !mem_cgroup_online(memcg))
@@ -4749,7 +4771,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool
return nr_to_scan >> sc->priority;
/* stop scanning this lruvec as it's low on cold folios */
- return try_to_inc_max_seq(lruvec, max_seq, can_swap, false) ? -1 : 0;
+ return try_to_inc_max_seq(lruvec, max_seq, swappiness, false) ? -1 : 0;
}
static bool should_abort_scan(struct lruvec *lruvec, struct scan_control *sc)
@@ -5293,8 +5315,7 @@ static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec,
s = "rep";
n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]);
n[1] = atomic_long_read(&lrugen->evicted[hist][type][tier]);
- if (tier)
- n[2] = READ_ONCE(lrugen->protected[hist][type][tier - 1]);
+ n[2] = READ_ONCE(lrugen->protected[hist][type][tier]);
}
for (i = 0; i < 3; i++)
@@ -5349,7 +5370,7 @@ static int lru_gen_seq_show(struct seq_file *m, void *v)
seq_printf(m, " node %5d\n", nid);
if (!full)
- seq = min_seq[LRU_GEN_ANON];
+ seq = evictable_min_seq(min_seq, MAX_SWAPPINESS / 2);
else if (max_seq >= MAX_NR_GENS)
seq = max_seq - MAX_NR_GENS + 1;
else
@@ -5389,23 +5410,14 @@ static const struct seq_operations lru_gen_seq_ops = {
};
static int run_aging(struct lruvec *lruvec, unsigned long seq,
- bool can_swap, bool force_scan)
+ int swappiness, bool force_scan)
{
DEFINE_MAX_SEQ(lruvec);
- DEFINE_MIN_SEQ(lruvec);
-
- if (seq < max_seq)
- return 0;
if (seq > max_seq)
return -EINVAL;
- if (!force_scan && min_seq[!can_swap] + MAX_NR_GENS - 1 <= max_seq)
- return -ERANGE;
-
- try_to_inc_max_seq(lruvec, max_seq, can_swap, force_scan);
-
- return 0;
+ return try_to_inc_max_seq(lruvec, max_seq, swappiness, force_scan) ? 0 : -EEXIST;
}
static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc,
@@ -5421,7 +5433,7 @@ static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_co
while (!signal_pending(current)) {
DEFINE_MIN_SEQ(lruvec);
- if (seq < min_seq[!swappiness])
+ if (seq < evictable_min_seq(min_seq, swappiness))
return 0;
if (sc->nr_reclaimed >= nr_to_reclaim)
@@ -5466,7 +5478,7 @@ static int run_cmd(char cmd, int memcg_id, int nid, unsigned long seq,
if (swappiness < MIN_SWAPPINESS)
swappiness = get_swappiness(lruvec, sc);
- else if (swappiness > MAX_SWAPPINESS)
+ else if (swappiness > MAX_SWAPPINESS + 1)
goto done;
switch (cmd) {
diff --git a/mm/workingset.c b/mm/workingset.c
index a4705e196545..4841ae8af411 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -239,7 +239,8 @@ static void *lru_gen_eviction(struct folio *folio)
int type = folio_is_file_lru(folio);
int delta = folio_nr_pages(folio);
int refs = folio_lru_refs(folio);
- int tier = lru_tier_from_refs(refs);
+ bool workingset = folio_test_workingset(folio);
+ int tier = lru_tier_from_refs(refs, workingset);
struct mem_cgroup *memcg = folio_memcg(folio);
struct pglist_data *pgdat = folio_pgdat(folio);
@@ -253,18 +254,18 @@ static void *lru_gen_eviction(struct folio *folio)
hist = lru_hist_from_seq(min_seq);
atomic_long_add(delta, &lrugen->evicted[hist][type][tier]);
- return pack_shadow(mem_cgroup_id(memcg), pgdat, token, refs);
+ return pack_shadow(mem_cgroup_id(memcg), pgdat, token, workingset);
}
/*
* Tests if the shadow entry is for a folio that was recently evicted.
* Fills in @lruvec, @token, @workingset with the values unpacked from shadow.
*/
-static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec,
+static bool lru_gen_test_recent(void *shadow, struct lruvec **lruvec,
unsigned long *token, bool *workingset)
{
int memcg_id;
- unsigned long min_seq;
+ unsigned long max_seq;
struct mem_cgroup *memcg;
struct pglist_data *pgdat;
@@ -273,8 +274,10 @@ static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec,
memcg = mem_cgroup_from_id(memcg_id);
*lruvec = mem_cgroup_lruvec(memcg, pgdat);
- min_seq = READ_ONCE((*lruvec)->lrugen.min_seq[file]);
- return (*token >> LRU_REFS_WIDTH) == (min_seq & (EVICTION_MASK >> LRU_REFS_WIDTH));
+ max_seq = READ_ONCE((*lruvec)->lrugen.max_seq);
+ max_seq &= EVICTION_MASK >> LRU_REFS_WIDTH;
+
+ return abs_diff(max_seq, *token >> LRU_REFS_WIDTH) < MAX_NR_GENS;
}
static void lru_gen_refault(struct folio *folio, void *shadow)
@@ -290,7 +293,7 @@ static void lru_gen_refault(struct folio *folio, void *shadow)
rcu_read_lock();
- recent = lru_gen_test_recent(shadow, type, &lruvec, &token, &workingset);
+ recent = lru_gen_test_recent(shadow, &lruvec, &token, &workingset);
if (lruvec != folio_lruvec(folio))
goto unlock;
@@ -302,24 +305,20 @@ static void lru_gen_refault(struct folio *folio, void *shadow)
lrugen = &lruvec->lrugen;
hist = lru_hist_from_seq(READ_ONCE(lrugen->min_seq[type]));
- /* see the comment in folio_lru_refs() */
- refs = (token & (BIT(LRU_REFS_WIDTH) - 1)) + workingset;
- tier = lru_tier_from_refs(refs);
+ refs = (token & (BIT(LRU_REFS_WIDTH) - 1)) + 1;
+ tier = lru_tier_from_refs(refs, workingset);
atomic_long_add(delta, &lrugen->refaulted[hist][type][tier]);
- mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + type, delta);
- /*
- * Count the following two cases as stalls:
- * 1. For pages accessed through page tables, hotter pages pushed out
- * hot pages which refaulted immediately.
- * 2. For pages accessed multiple times through file descriptors,
- * they would have been protected by sort_folio().
- */
- if (lru_gen_in_fault() || refs >= BIT(LRU_REFS_WIDTH) - 1) {
- set_mask_bits(&folio->flags, 0, LRU_REFS_MASK | BIT(PG_workingset));
+ /* see folio_add_lru() where folio_set_active() will be called */
+ if (lru_gen_in_fault())
+ mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + type, delta);
+
+ if (workingset) {
+ folio_set_workingset(folio);
mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + type, delta);
- }
+ } else
+ set_mask_bits(&folio->flags, LRU_REFS_MASK, (refs - 1UL) << LRU_REFS_PGOFF);
unlock:
rcu_read_unlock();
}
@@ -331,7 +330,7 @@ static void *lru_gen_eviction(struct folio *folio)
return NULL;
}
-static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec,
+static bool lru_gen_test_recent(void *shadow, struct lruvec **lruvec,
unsigned long *token, bool *workingset)
{
return false;
@@ -428,17 +427,16 @@ bool workingset_test_recent(void *shadow, bool file, bool *workingset,
struct pglist_data *pgdat;
unsigned long eviction;
- rcu_read_lock();
-
if (lru_gen_enabled()) {
- bool recent = lru_gen_test_recent(shadow, file,
- &eviction_lruvec, &eviction, workingset);
+ bool recent;
+ rcu_read_lock();
+ recent = lru_gen_test_recent(shadow, &eviction_lruvec, &eviction, workingset);
rcu_read_unlock();
return recent;
}
-
+ rcu_read_lock();
unpack_shadow(shadow, &memcgid, &pgdat, &eviction, workingset);
eviction <<= bucket_order;
@@ -459,14 +457,12 @@ bool workingset_test_recent(void *shadow, bool file, bool *workingset,
* configurations instead.
*/
eviction_memcg = mem_cgroup_from_id(memcgid);
- if (!mem_cgroup_disabled() &&
- (!eviction_memcg || !mem_cgroup_tryget(eviction_memcg))) {
- rcu_read_unlock();
- return false;
- }
-
+ if (!mem_cgroup_tryget(eviction_memcg))
+ eviction_memcg = NULL;
rcu_read_unlock();
+ if (!mem_cgroup_disabled() && !eviction_memcg)
+ return false;
/*
* Flush stats (and potentially sleep) outside the RCU read section.
*
@@ -544,6 +540,8 @@ void workingset_refault(struct folio *folio, void *shadow)
bool workingset;
long nr;
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+
if (lru_gen_enabled()) {
lru_gen_refault(folio, shadow);
return;
@@ -558,7 +556,6 @@ void workingset_refault(struct folio *folio, void *shadow)
* is actually experiencing the refault event. Make sure the folio is
* locked to guarantee folio_memcg() stability throughout.
*/
- VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
nr = folio_nr_pages(folio);
memcg = folio_memcg(folio);
pgdat = folio_pgdat(folio);
diff --git a/mm/zpdesc.h b/mm/zpdesc.h
new file mode 100644
index 000000000000..fa47fece2237
--- /dev/null
+++ b/mm/zpdesc.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* zpdesc.h: zswap.zpool memory descriptor
+ *
+ * Written by Alex Shi <alexs@kernel.org>
+ * Hyeonggon Yoo <42.hyeyoo@gmail.com>
+ */
+#ifndef __MM_ZPDESC_H__
+#define __MM_ZPDESC_H__
+
+/*
+ * struct zpdesc - Memory descriptor for zpool memory.
+ * @flags: Page flags, mostly unused by zsmalloc.
+ * @lru: Indirectly used by page migration.
+ * @movable_ops: Used by page migration.
+ * @next: Next zpdesc in a zspage in zsmalloc zpool.
+ * @handle: For huge zspage in zsmalloc zpool.
+ * @zspage: Points to the zspage this zpdesc is a part of.
+ * @first_obj_offset: First object offset in zsmalloc zpool.
+ * @_refcount: The number of references to this zpdesc.
+ *
+ * This struct overlays struct page for now. Do not modify without a good
+ * understanding of the issues. In particular, do not expand into the overlap
+ * with memcg_data.
+ *
+ * Page flags used:
+ * * PG_private identifies the first component page.
+ * * PG_locked is used by page migration code.
+ */
+struct zpdesc {
+ unsigned long flags;
+ struct list_head lru;
+ unsigned long movable_ops;
+ union {
+ struct zpdesc *next;
+ unsigned long handle;
+ };
+ struct zspage *zspage;
+ /*
+ * Only the lower 24 bits are available for offset, limiting a page
+ * to 16 MiB. The upper 8 bits are reserved for PGTY_zsmalloc.
+ *
+ * Do not access this field directly.
+ * Instead, use {get,set}_first_obj_offset() helpers.
+ */
+ unsigned int first_obj_offset;
+ atomic_t _refcount;
+};
+#define ZPDESC_MATCH(pg, zp) \
+ static_assert(offsetof(struct page, pg) == offsetof(struct zpdesc, zp))
+
+ZPDESC_MATCH(flags, flags);
+ZPDESC_MATCH(lru, lru);
+ZPDESC_MATCH(mapping, movable_ops);
+ZPDESC_MATCH(index, next);
+ZPDESC_MATCH(index, handle);
+ZPDESC_MATCH(private, zspage);
+ZPDESC_MATCH(page_type, first_obj_offset);
+ZPDESC_MATCH(_refcount, _refcount);
+#undef ZPDESC_MATCH
+static_assert(sizeof(struct zpdesc) <= sizeof(struct page));
+
+/*
+ * zpdesc_page - The first struct page allocated for a zpdesc
+ * @zp: The zpdesc.
+ *
+ * A convenience wrapper for converting zpdesc to the first struct page of the
+ * underlying folio, to communicate with code not yet converted to folio or
+ * struct zpdesc.
+ *
+ */
+#define zpdesc_page(zp) (_Generic((zp), \
+ const struct zpdesc *: (const struct page *)(zp), \
+ struct zpdesc *: (struct page *)(zp)))
+
+/**
+ * zpdesc_folio - The folio allocated for a zpdesc
+ * @zp: The zpdesc.
+ *
+ * Zpdescs are descriptors for zpool memory. The zpool memory itself is
+ * allocated as folios that contain the zpool objects, and zpdesc uses specific
+ * fields in the first struct page of the folio - those fields are now accessed
+ * by struct zpdesc.
+ *
+ * It is occasionally necessary convert to back to a folio in order to
+ * communicate with the rest of the mm. Please use this helper function
+ * instead of casting yourself, as the implementation may change in the future.
+ */
+#define zpdesc_folio(zp) (_Generic((zp), \
+ const struct zpdesc *: (const struct folio *)(zp), \
+ struct zpdesc *: (struct folio *)(zp)))
+/**
+ * page_zpdesc - Converts from first struct page to zpdesc.
+ * @p: The first (either head of compound or single) page of zpdesc.
+ *
+ * A temporary wrapper to convert struct page to struct zpdesc in situations
+ * where we know the page is the compound head, or single order-0 page.
+ *
+ * Long-term ideally everything would work with struct zpdesc directly or go
+ * through folio to struct zpdesc.
+ *
+ * Return: The zpdesc which contains this page
+ */
+#define page_zpdesc(p) (_Generic((p), \
+ const struct page *: (const struct zpdesc *)(p), \
+ struct page *: (struct zpdesc *)(p)))
+
+static inline void zpdesc_lock(struct zpdesc *zpdesc)
+{
+ folio_lock(zpdesc_folio(zpdesc));
+}
+
+static inline bool zpdesc_trylock(struct zpdesc *zpdesc)
+{
+ return folio_trylock(zpdesc_folio(zpdesc));
+}
+
+static inline void zpdesc_unlock(struct zpdesc *zpdesc)
+{
+ folio_unlock(zpdesc_folio(zpdesc));
+}
+
+static inline void zpdesc_wait_locked(struct zpdesc *zpdesc)
+{
+ folio_wait_locked(zpdesc_folio(zpdesc));
+}
+
+static inline void zpdesc_get(struct zpdesc *zpdesc)
+{
+ folio_get(zpdesc_folio(zpdesc));
+}
+
+static inline void zpdesc_put(struct zpdesc *zpdesc)
+{
+ folio_put(zpdesc_folio(zpdesc));
+}
+
+static inline void *kmap_local_zpdesc(struct zpdesc *zpdesc)
+{
+ return kmap_local_page(zpdesc_page(zpdesc));
+}
+
+static inline unsigned long zpdesc_pfn(struct zpdesc *zpdesc)
+{
+ return page_to_pfn(zpdesc_page(zpdesc));
+}
+
+static inline struct zpdesc *pfn_zpdesc(unsigned long pfn)
+{
+ return page_zpdesc(pfn_to_page(pfn));
+}
+
+static inline void __zpdesc_set_movable(struct zpdesc *zpdesc,
+ const struct movable_operations *mops)
+{
+ __SetPageMovable(zpdesc_page(zpdesc), mops);
+}
+
+static inline void __zpdesc_set_zsmalloc(struct zpdesc *zpdesc)
+{
+ __SetPageZsmalloc(zpdesc_page(zpdesc));
+}
+
+static inline void __zpdesc_clear_zsmalloc(struct zpdesc *zpdesc)
+{
+ __ClearPageZsmalloc(zpdesc_page(zpdesc));
+}
+
+static inline bool zpdesc_is_isolated(struct zpdesc *zpdesc)
+{
+ return PageIsolated(zpdesc_page(zpdesc));
+}
+
+static inline struct zone *zpdesc_zone(struct zpdesc *zpdesc)
+{
+ return page_zone(zpdesc_page(zpdesc));
+}
+
+static inline bool zpdesc_is_locked(struct zpdesc *zpdesc)
+{
+ return folio_test_locked(zpdesc_folio(zpdesc));
+}
+#endif
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 64b66a4d3e6e..6d0e47f7ae33 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -13,24 +13,6 @@
* Released under the terms of GNU General Public License Version 2.0
*/
-/*
- * Following is how we use various fields and flags of underlying
- * struct page(s) to form a zspage.
- *
- * Usage of struct page fields:
- * page->private: points to zspage
- * page->index: links together all component pages of a zspage
- * For the huge page, this is always 0, so we use this field
- * to store handle.
- * page->page_type: PGTY_zsmalloc, lower 24 bits locate the first object
- * offset in a subpage of a zspage
- *
- * Usage of struct page flags:
- * PG_private: identifies the first component page
- * PG_owner_priv_1: identifies the huge component page
- *
- */
-
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
/*
@@ -67,6 +49,7 @@
#include <linux/pagemap.h>
#include <linux/fs.h>
#include <linux/local_lock.h>
+#include "zpdesc.h"
#define ZSPAGE_MAGIC 0x58
@@ -245,6 +228,35 @@ struct zs_pool {
atomic_t compaction_in_progress;
};
+static inline void zpdesc_set_first(struct zpdesc *zpdesc)
+{
+ SetPagePrivate(zpdesc_page(zpdesc));
+}
+
+static inline void zpdesc_inc_zone_page_state(struct zpdesc *zpdesc)
+{
+ inc_zone_page_state(zpdesc_page(zpdesc), NR_ZSPAGES);
+}
+
+static inline void zpdesc_dec_zone_page_state(struct zpdesc *zpdesc)
+{
+ dec_zone_page_state(zpdesc_page(zpdesc), NR_ZSPAGES);
+}
+
+static inline struct zpdesc *alloc_zpdesc(gfp_t gfp)
+{
+ struct page *page = alloc_page(gfp);
+
+ return page_zpdesc(page);
+}
+
+static inline void free_zpdesc(struct zpdesc *zpdesc)
+{
+ struct page *page = zpdesc_page(zpdesc);
+
+ __free_page(page);
+}
+
struct zspage {
struct {
unsigned int huge:HUGE_BITS;
@@ -254,7 +266,7 @@ struct zspage {
};
unsigned int inuse;
unsigned int freeobj;
- struct page *first_page;
+ struct zpdesc *first_zpdesc;
struct list_head list; /* fullness list */
struct zs_pool *pool;
rwlock_t lock;
@@ -440,9 +452,9 @@ static DEFINE_PER_CPU(struct mapping_area, zs_map_area) = {
.lock = INIT_LOCAL_LOCK(lock),
};
-static __maybe_unused int is_first_page(struct page *page)
+static inline bool __maybe_unused is_first_zpdesc(struct zpdesc *zpdesc)
{
- return PagePrivate(page);
+ return PagePrivate(zpdesc_page(zpdesc));
}
/* Protected by class->lock */
@@ -451,36 +463,35 @@ static inline int get_zspage_inuse(struct zspage *zspage)
return zspage->inuse;
}
-
static inline void mod_zspage_inuse(struct zspage *zspage, int val)
{
zspage->inuse += val;
}
-static inline struct page *get_first_page(struct zspage *zspage)
+static struct zpdesc *get_first_zpdesc(struct zspage *zspage)
{
- struct page *first_page = zspage->first_page;
+ struct zpdesc *first_zpdesc = zspage->first_zpdesc;
- VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
- return first_page;
+ VM_BUG_ON_PAGE(!is_first_zpdesc(first_zpdesc), zpdesc_page(first_zpdesc));
+ return first_zpdesc;
}
#define FIRST_OBJ_PAGE_TYPE_MASK 0xffffff
-static inline unsigned int get_first_obj_offset(struct page *page)
+static inline unsigned int get_first_obj_offset(struct zpdesc *zpdesc)
{
- VM_WARN_ON_ONCE(!PageZsmalloc(page));
- return page->page_type & FIRST_OBJ_PAGE_TYPE_MASK;
+ VM_WARN_ON_ONCE(!PageZsmalloc(zpdesc_page(zpdesc)));
+ return zpdesc->first_obj_offset & FIRST_OBJ_PAGE_TYPE_MASK;
}
-static inline void set_first_obj_offset(struct page *page, unsigned int offset)
+static inline void set_first_obj_offset(struct zpdesc *zpdesc, unsigned int offset)
{
/* With 24 bits available, we can support offsets into 16 MiB pages. */
BUILD_BUG_ON(PAGE_SIZE > SZ_16M);
- VM_WARN_ON_ONCE(!PageZsmalloc(page));
+ VM_WARN_ON_ONCE(!PageZsmalloc(zpdesc_page(zpdesc)));
VM_WARN_ON_ONCE(offset & ~FIRST_OBJ_PAGE_TYPE_MASK);
- page->page_type &= ~FIRST_OBJ_PAGE_TYPE_MASK;
- page->page_type |= offset & FIRST_OBJ_PAGE_TYPE_MASK;
+ zpdesc->first_obj_offset &= ~FIRST_OBJ_PAGE_TYPE_MASK;
+ zpdesc->first_obj_offset |= offset & FIRST_OBJ_PAGE_TYPE_MASK;
}
static inline unsigned int get_freeobj(struct zspage *zspage)
@@ -733,52 +744,52 @@ out:
return newfg;
}
-static struct zspage *get_zspage(struct page *page)
+static struct zspage *get_zspage(struct zpdesc *zpdesc)
{
- struct zspage *zspage = (struct zspage *)page_private(page);
+ struct zspage *zspage = zpdesc->zspage;
BUG_ON(zspage->magic != ZSPAGE_MAGIC);
return zspage;
}
-static struct page *get_next_page(struct page *page)
+static struct zpdesc *get_next_zpdesc(struct zpdesc *zpdesc)
{
- struct zspage *zspage = get_zspage(page);
+ struct zspage *zspage = get_zspage(zpdesc);
if (unlikely(ZsHugePage(zspage)))
return NULL;
- return (struct page *)page->index;
+ return zpdesc->next;
}
/**
- * obj_to_location - get (<page>, <obj_idx>) from encoded object value
+ * obj_to_location - get (<zpdesc>, <obj_idx>) from encoded object value
* @obj: the encoded object value
- * @page: page object resides in zspage
+ * @zpdesc: zpdesc object resides in zspage
* @obj_idx: object index
*/
-static void obj_to_location(unsigned long obj, struct page **page,
+static void obj_to_location(unsigned long obj, struct zpdesc **zpdesc,
unsigned int *obj_idx)
{
- *page = pfn_to_page(obj >> OBJ_INDEX_BITS);
+ *zpdesc = pfn_zpdesc(obj >> OBJ_INDEX_BITS);
*obj_idx = (obj & OBJ_INDEX_MASK);
}
-static void obj_to_page(unsigned long obj, struct page **page)
+static void obj_to_zpdesc(unsigned long obj, struct zpdesc **zpdesc)
{
- *page = pfn_to_page(obj >> OBJ_INDEX_BITS);
+ *zpdesc = pfn_zpdesc(obj >> OBJ_INDEX_BITS);
}
/**
- * location_to_obj - get obj value encoded from (<page>, <obj_idx>)
- * @page: page object resides in zspage
+ * location_to_obj - get obj value encoded from (<zpdesc>, <obj_idx>)
+ * @zpdesc: zpdesc object resides in zspage
* @obj_idx: object index
*/
-static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)
+static unsigned long location_to_obj(struct zpdesc *zpdesc, unsigned int obj_idx)
{
unsigned long obj;
- obj = page_to_pfn(page) << OBJ_INDEX_BITS;
+ obj = zpdesc_pfn(zpdesc) << OBJ_INDEX_BITS;
obj |= obj_idx & OBJ_INDEX_MASK;
return obj;
@@ -789,15 +800,15 @@ static unsigned long handle_to_obj(unsigned long handle)
return *(unsigned long *)handle;
}
-static inline bool obj_allocated(struct page *page, void *obj,
+static inline bool obj_allocated(struct zpdesc *zpdesc, void *obj,
unsigned long *phandle)
{
unsigned long handle;
- struct zspage *zspage = get_zspage(page);
+ struct zspage *zspage = get_zspage(zpdesc);
if (unlikely(ZsHugePage(zspage))) {
- VM_BUG_ON_PAGE(!is_first_page(page), page);
- handle = page->index;
+ VM_BUG_ON_PAGE(!is_first_zpdesc(zpdesc), zpdesc_page(zpdesc));
+ handle = zpdesc->handle;
} else
handle = *(unsigned long *)obj;
@@ -809,22 +820,24 @@ static inline bool obj_allocated(struct page *page, void *obj,
return true;
}
-static void reset_page(struct page *page)
+static void reset_zpdesc(struct zpdesc *zpdesc)
{
+ struct page *page = zpdesc_page(zpdesc);
+
__ClearPageMovable(page);
ClearPagePrivate(page);
- set_page_private(page, 0);
- page->index = 0;
+ zpdesc->zspage = NULL;
+ zpdesc->next = NULL;
__ClearPageZsmalloc(page);
}
static int trylock_zspage(struct zspage *zspage)
{
- struct page *cursor, *fail;
+ struct zpdesc *cursor, *fail;
- for (cursor = get_first_page(zspage); cursor != NULL; cursor =
- get_next_page(cursor)) {
- if (!trylock_page(cursor)) {
+ for (cursor = get_first_zpdesc(zspage); cursor != NULL; cursor =
+ get_next_zpdesc(cursor)) {
+ if (!zpdesc_trylock(cursor)) {
fail = cursor;
goto unlock;
}
@@ -832,9 +845,9 @@ static int trylock_zspage(struct zspage *zspage)
return 1;
unlock:
- for (cursor = get_first_page(zspage); cursor != fail; cursor =
- get_next_page(cursor))
- unlock_page(cursor);
+ for (cursor = get_first_zpdesc(zspage); cursor != fail; cursor =
+ get_next_zpdesc(cursor))
+ zpdesc_unlock(cursor);
return 0;
}
@@ -842,23 +855,23 @@ unlock:
static void __free_zspage(struct zs_pool *pool, struct size_class *class,
struct zspage *zspage)
{
- struct page *page, *next;
+ struct zpdesc *zpdesc, *next;
assert_spin_locked(&class->lock);
VM_BUG_ON(get_zspage_inuse(zspage));
VM_BUG_ON(zspage->fullness != ZS_INUSE_RATIO_0);
- next = page = get_first_page(zspage);
+ next = zpdesc = get_first_zpdesc(zspage);
do {
- VM_BUG_ON_PAGE(!PageLocked(page), page);
- next = get_next_page(page);
- reset_page(page);
- unlock_page(page);
- dec_zone_page_state(page, NR_ZSPAGES);
- put_page(page);
- page = next;
- } while (page != NULL);
+ VM_BUG_ON_PAGE(!zpdesc_is_locked(zpdesc), zpdesc_page(zpdesc));
+ next = get_next_zpdesc(zpdesc);
+ reset_zpdesc(zpdesc);
+ zpdesc_unlock(zpdesc);
+ zpdesc_dec_zone_page_state(zpdesc);
+ zpdesc_put(zpdesc);
+ zpdesc = next;
+ } while (zpdesc != NULL);
cache_free_zspage(pool, zspage);
@@ -891,16 +904,16 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
{
unsigned int freeobj = 1;
unsigned long off = 0;
- struct page *page = get_first_page(zspage);
+ struct zpdesc *zpdesc = get_first_zpdesc(zspage);
- while (page) {
- struct page *next_page;
+ while (zpdesc) {
+ struct zpdesc *next_zpdesc;
struct link_free *link;
void *vaddr;
- set_first_obj_offset(page, off);
+ set_first_obj_offset(zpdesc, off);
- vaddr = kmap_local_page(page);
+ vaddr = kmap_local_zpdesc(zpdesc);
link = (struct link_free *)vaddr + off / sizeof(*link);
while ((off += class->size) < PAGE_SIZE) {
@@ -913,8 +926,8 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
* page, which must point to the first object on the next
* page (if present)
*/
- next_page = get_next_page(page);
- if (next_page) {
+ next_zpdesc = get_next_zpdesc(zpdesc);
+ if (next_zpdesc) {
link->next = freeobj++ << OBJ_TAG_BITS;
} else {
/*
@@ -924,7 +937,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
link->next = -1UL << OBJ_TAG_BITS;
}
kunmap_local(vaddr);
- page = next_page;
+ zpdesc = next_zpdesc;
off %= PAGE_SIZE;
}
@@ -932,35 +945,35 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
}
static void create_page_chain(struct size_class *class, struct zspage *zspage,
- struct page *pages[])
+ struct zpdesc *zpdescs[])
{
int i;
- struct page *page;
- struct page *prev_page = NULL;
- int nr_pages = class->pages_per_zspage;
+ struct zpdesc *zpdesc;
+ struct zpdesc *prev_zpdesc = NULL;
+ int nr_zpdescs = class->pages_per_zspage;
/*
* Allocate individual pages and link them together as:
- * 1. all pages are linked together using page->index
- * 2. each sub-page point to zspage using page->private
+ * 1. all pages are linked together using zpdesc->next
+ * 2. each sub-page point to zspage using zpdesc->zspage
*
- * we set PG_private to identify the first page (i.e. no other sub-page
+ * we set PG_private to identify the first zpdesc (i.e. no other zpdesc
* has this flag set).
*/
- for (i = 0; i < nr_pages; i++) {
- page = pages[i];
- set_page_private(page, (unsigned long)zspage);
- page->index = 0;
+ for (i = 0; i < nr_zpdescs; i++) {
+ zpdesc = zpdescs[i];
+ zpdesc->zspage = zspage;
+ zpdesc->next = NULL;
if (i == 0) {
- zspage->first_page = page;
- SetPagePrivate(page);
+ zspage->first_zpdesc = zpdesc;
+ zpdesc_set_first(zpdesc);
if (unlikely(class->objs_per_zspage == 1 &&
class->pages_per_zspage == 1))
SetZsHugePage(zspage);
} else {
- prev_page->index = (unsigned long)page;
+ prev_zpdesc->next = zpdesc;
}
- prev_page = page;
+ prev_zpdesc = zpdesc;
}
}
@@ -972,7 +985,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
gfp_t gfp)
{
int i;
- struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE];
+ struct zpdesc *zpdescs[ZS_MAX_PAGES_PER_ZSPAGE];
struct zspage *zspage = cache_alloc_zspage(pool, gfp);
if (!zspage)
@@ -982,25 +995,25 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
migrate_lock_init(zspage);
for (i = 0; i < class->pages_per_zspage; i++) {
- struct page *page;
+ struct zpdesc *zpdesc;
- page = alloc_page(gfp);
- if (!page) {
+ zpdesc = alloc_zpdesc(gfp);
+ if (!zpdesc) {
while (--i >= 0) {
- dec_zone_page_state(pages[i], NR_ZSPAGES);
- __ClearPageZsmalloc(pages[i]);
- __free_page(pages[i]);
+ zpdesc_dec_zone_page_state(zpdescs[i]);
+ __zpdesc_clear_zsmalloc(zpdescs[i]);
+ free_zpdesc(zpdescs[i]);
}
cache_free_zspage(pool, zspage);
return NULL;
}
- __SetPageZsmalloc(page);
+ __zpdesc_set_zsmalloc(zpdesc);
- inc_zone_page_state(page, NR_ZSPAGES);
- pages[i] = page;
+ zpdesc_inc_zone_page_state(zpdesc);
+ zpdescs[i] = zpdesc;
}
- create_page_chain(class, zspage, pages);
+ create_page_chain(class, zspage, zpdescs);
init_zspage(class, zspage);
zspage->pool = pool;
zspage->class = class->index;
@@ -1044,7 +1057,7 @@ static inline void __zs_cpu_down(struct mapping_area *area)
}
static void *__zs_map_object(struct mapping_area *area,
- struct page *pages[2], int off, int size)
+ struct zpdesc *zpdescs[2], int off, int size)
{
size_t sizes[2];
char *buf = area->vm_buf;
@@ -1060,14 +1073,14 @@ static void *__zs_map_object(struct mapping_area *area,
sizes[1] = size - sizes[0];
/* copy object to per-cpu buffer */
- memcpy_from_page(buf, pages[0], off, sizes[0]);
- memcpy_from_page(buf + sizes[0], pages[1], 0, sizes[1]);
+ memcpy_from_page(buf, zpdesc_page(zpdescs[0]), off, sizes[0]);
+ memcpy_from_page(buf + sizes[0], zpdesc_page(zpdescs[1]), 0, sizes[1]);
out:
return area->vm_buf;
}
static void __zs_unmap_object(struct mapping_area *area,
- struct page *pages[2], int off, int size)
+ struct zpdesc *zpdescs[2], int off, int size)
{
size_t sizes[2];
char *buf;
@@ -1085,8 +1098,8 @@ static void __zs_unmap_object(struct mapping_area *area,
sizes[1] = size - sizes[0];
/* copy per-cpu buffer to object */
- memcpy_to_page(pages[0], off, buf, sizes[0]);
- memcpy_to_page(pages[1], 0, buf + sizes[0], sizes[1]);
+ memcpy_to_page(zpdesc_page(zpdescs[0]), off, buf, sizes[0]);
+ memcpy_to_page(zpdesc_page(zpdescs[1]), 0, buf + sizes[0], sizes[1]);
out:
/* enable page faults to match kunmap_local() return conditions */
@@ -1176,13 +1189,13 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
enum zs_mapmode mm)
{
struct zspage *zspage;
- struct page *page;
+ struct zpdesc *zpdesc;
unsigned long obj, off;
unsigned int obj_idx;
struct size_class *class;
struct mapping_area *area;
- struct page *pages[2];
+ struct zpdesc *zpdescs[2];
void *ret;
/*
@@ -1195,8 +1208,8 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
/* It guarantees it can get zspage from handle safely */
read_lock(&pool->migrate_lock);
obj = handle_to_obj(handle);
- obj_to_location(obj, &page, &obj_idx);
- zspage = get_zspage(page);
+ obj_to_location(obj, &zpdesc, &obj_idx);
+ zspage = get_zspage(zpdesc);
/*
* migration cannot move any zpages in this zspage. Here, class->lock
@@ -1215,17 +1228,17 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
area->vm_mm = mm;
if (off + class->size <= PAGE_SIZE) {
/* this object is contained entirely within a page */
- area->vm_addr = kmap_local_page(page);
+ area->vm_addr = kmap_local_zpdesc(zpdesc);
ret = area->vm_addr + off;
goto out;
}
/* this object spans two pages */
- pages[0] = page;
- pages[1] = get_next_page(page);
- BUG_ON(!pages[1]);
+ zpdescs[0] = zpdesc;
+ zpdescs[1] = get_next_zpdesc(zpdesc);
+ BUG_ON(!zpdescs[1]);
- ret = __zs_map_object(area, pages, off, class->size);
+ ret = __zs_map_object(area, zpdescs, off, class->size);
out:
if (likely(!ZsHugePage(zspage)))
ret += ZS_HANDLE_SIZE;
@@ -1237,7 +1250,7 @@ EXPORT_SYMBOL_GPL(zs_map_object);
void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
{
struct zspage *zspage;
- struct page *page;
+ struct zpdesc *zpdesc;
unsigned long obj, off;
unsigned int obj_idx;
@@ -1245,8 +1258,8 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
struct mapping_area *area;
obj = handle_to_obj(handle);
- obj_to_location(obj, &page, &obj_idx);
- zspage = get_zspage(page);
+ obj_to_location(obj, &zpdesc, &obj_idx);
+ zspage = get_zspage(zpdesc);
class = zspage_class(pool, zspage);
off = offset_in_page(class->size * obj_idx);
@@ -1254,13 +1267,13 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
if (off + class->size <= PAGE_SIZE)
kunmap_local(area->vm_addr);
else {
- struct page *pages[2];
+ struct zpdesc *zpdescs[2];
- pages[0] = page;
- pages[1] = get_next_page(page);
- BUG_ON(!pages[1]);
+ zpdescs[0] = zpdesc;
+ zpdescs[1] = get_next_zpdesc(zpdesc);
+ BUG_ON(!zpdescs[1]);
- __zs_unmap_object(area, pages, off, class->size);
+ __zs_unmap_object(area, zpdescs, off, class->size);
}
local_unlock(&zs_map_area.lock);
@@ -1290,12 +1303,12 @@ EXPORT_SYMBOL_GPL(zs_huge_class_size);
static unsigned long obj_malloc(struct zs_pool *pool,
struct zspage *zspage, unsigned long handle)
{
- int i, nr_page, offset;
+ int i, nr_zpdesc, offset;
unsigned long obj;
struct link_free *link;
struct size_class *class;
- struct page *m_page;
+ struct zpdesc *m_zpdesc;
unsigned long m_offset;
void *vaddr;
@@ -1303,27 +1316,26 @@ static unsigned long obj_malloc(struct zs_pool *pool,
obj = get_freeobj(zspage);
offset = obj * class->size;
- nr_page = offset >> PAGE_SHIFT;
+ nr_zpdesc = offset >> PAGE_SHIFT;
m_offset = offset_in_page(offset);
- m_page = get_first_page(zspage);
+ m_zpdesc = get_first_zpdesc(zspage);
- for (i = 0; i < nr_page; i++)
- m_page = get_next_page(m_page);
+ for (i = 0; i < nr_zpdesc; i++)
+ m_zpdesc = get_next_zpdesc(m_zpdesc);
- vaddr = kmap_local_page(m_page);
+ vaddr = kmap_local_zpdesc(m_zpdesc);
link = (struct link_free *)vaddr + m_offset / sizeof(*link);
set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
if (likely(!ZsHugePage(zspage)))
/* record handle in the header of allocated chunk */
link->handle = handle | OBJ_ALLOCATED_TAG;
else
- /* record handle to page->index */
- zspage->first_page->index = handle | OBJ_ALLOCATED_TAG;
+ zspage->first_zpdesc->handle = handle | OBJ_ALLOCATED_TAG;
kunmap_local(vaddr);
mod_zspage_inuse(zspage, 1);
- obj = location_to_obj(m_page, obj);
+ obj = location_to_obj(m_zpdesc, obj);
record_obj(handle, obj);
return obj;
@@ -1402,23 +1414,24 @@ static void obj_free(int class_size, unsigned long obj)
{
struct link_free *link;
struct zspage *zspage;
- struct page *f_page;
+ struct zpdesc *f_zpdesc;
unsigned long f_offset;
unsigned int f_objidx;
void *vaddr;
- obj_to_location(obj, &f_page, &f_objidx);
+
+ obj_to_location(obj, &f_zpdesc, &f_objidx);
f_offset = offset_in_page(class_size * f_objidx);
- zspage = get_zspage(f_page);
+ zspage = get_zspage(f_zpdesc);
- vaddr = kmap_local_page(f_page);
+ vaddr = kmap_local_zpdesc(f_zpdesc);
link = (struct link_free *)(vaddr + f_offset);
/* Insert this object in containing zspage's freelist */
if (likely(!ZsHugePage(zspage)))
link->next = get_freeobj(zspage) << OBJ_TAG_BITS;
else
- f_page->index = 0;
+ f_zpdesc->handle = 0;
set_freeobj(zspage, f_objidx);
kunmap_local(vaddr);
@@ -1428,7 +1441,7 @@ static void obj_free(int class_size, unsigned long obj)
void zs_free(struct zs_pool *pool, unsigned long handle)
{
struct zspage *zspage;
- struct page *f_page;
+ struct zpdesc *f_zpdesc;
unsigned long obj;
struct size_class *class;
int fullness;
@@ -1442,8 +1455,8 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
*/
read_lock(&pool->migrate_lock);
obj = handle_to_obj(handle);
- obj_to_page(obj, &f_page);
- zspage = get_zspage(f_page);
+ obj_to_zpdesc(obj, &f_zpdesc);
+ zspage = get_zspage(f_zpdesc);
class = zspage_class(pool, zspage);
spin_lock(&class->lock);
read_unlock(&pool->migrate_lock);
@@ -1463,7 +1476,7 @@ EXPORT_SYMBOL_GPL(zs_free);
static void zs_object_copy(struct size_class *class, unsigned long dst,
unsigned long src)
{
- struct page *s_page, *d_page;
+ struct zpdesc *s_zpdesc, *d_zpdesc;
unsigned int s_objidx, d_objidx;
unsigned long s_off, d_off;
void *s_addr, *d_addr;
@@ -1472,8 +1485,8 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
s_size = d_size = class->size;
- obj_to_location(src, &s_page, &s_objidx);
- obj_to_location(dst, &d_page, &d_objidx);
+ obj_to_location(src, &s_zpdesc, &s_objidx);
+ obj_to_location(dst, &d_zpdesc, &d_objidx);
s_off = offset_in_page(class->size * s_objidx);
d_off = offset_in_page(class->size * d_objidx);
@@ -1484,8 +1497,8 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
if (d_off + class->size > PAGE_SIZE)
d_size = PAGE_SIZE - d_off;
- s_addr = kmap_local_page(s_page);
- d_addr = kmap_local_page(d_page);
+ s_addr = kmap_local_zpdesc(s_zpdesc);
+ d_addr = kmap_local_zpdesc(d_zpdesc);
while (1) {
size = min(s_size, d_size);
@@ -1510,17 +1523,17 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
if (s_off >= PAGE_SIZE) {
kunmap_local(d_addr);
kunmap_local(s_addr);
- s_page = get_next_page(s_page);
- s_addr = kmap_local_page(s_page);
- d_addr = kmap_local_page(d_page);
+ s_zpdesc = get_next_zpdesc(s_zpdesc);
+ s_addr = kmap_local_zpdesc(s_zpdesc);
+ d_addr = kmap_local_zpdesc(d_zpdesc);
s_size = class->size - written;
s_off = 0;
}
if (d_off >= PAGE_SIZE) {
kunmap_local(d_addr);
- d_page = get_next_page(d_page);
- d_addr = kmap_local_page(d_page);
+ d_zpdesc = get_next_zpdesc(d_zpdesc);
+ d_addr = kmap_local_zpdesc(d_zpdesc);
d_size = class->size - written;
d_off = 0;
}
@@ -1535,18 +1548,18 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
* return handle.
*/
static unsigned long find_alloced_obj(struct size_class *class,
- struct page *page, int *obj_idx)
+ struct zpdesc *zpdesc, int *obj_idx)
{
unsigned int offset;
int index = *obj_idx;
unsigned long handle = 0;
- void *addr = kmap_local_page(page);
+ void *addr = kmap_local_zpdesc(zpdesc);
- offset = get_first_obj_offset(page);
+ offset = get_first_obj_offset(zpdesc);
offset += class->size * index;
while (offset < PAGE_SIZE) {
- if (obj_allocated(page, addr + offset, &handle))
+ if (obj_allocated(zpdesc, addr + offset, &handle))
break;
offset += class->size;
@@ -1566,14 +1579,14 @@ static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage,
unsigned long used_obj, free_obj;
unsigned long handle;
int obj_idx = 0;
- struct page *s_page = get_first_page(src_zspage);
+ struct zpdesc *s_zpdesc = get_first_zpdesc(src_zspage);
struct size_class *class = pool->size_class[src_zspage->class];
while (1) {
- handle = find_alloced_obj(class, s_page, &obj_idx);
+ handle = find_alloced_obj(class, s_zpdesc, &obj_idx);
if (!handle) {
- s_page = get_next_page(s_page);
- if (!s_page)
+ s_zpdesc = get_next_zpdesc(s_zpdesc);
+ if (!s_zpdesc)
break;
obj_idx = 0;
continue;
@@ -1653,7 +1666,7 @@ static int putback_zspage(struct size_class *class, struct zspage *zspage)
*/
static void lock_zspage(struct zspage *zspage)
{
- struct page *curr_page, *page;
+ struct zpdesc *curr_zpdesc, *zpdesc;
/*
* Pages we haven't locked yet can be migrated off the list while we're
@@ -1665,24 +1678,24 @@ static void lock_zspage(struct zspage *zspage)
*/
while (1) {
migrate_read_lock(zspage);
- page = get_first_page(zspage);
- if (trylock_page(page))
+ zpdesc = get_first_zpdesc(zspage);
+ if (zpdesc_trylock(zpdesc))
break;
- get_page(page);
+ zpdesc_get(zpdesc);
migrate_read_unlock(zspage);
- wait_on_page_locked(page);
- put_page(page);
+ zpdesc_wait_locked(zpdesc);
+ zpdesc_put(zpdesc);
}
- curr_page = page;
- while ((page = get_next_page(curr_page))) {
- if (trylock_page(page)) {
- curr_page = page;
+ curr_zpdesc = zpdesc;
+ while ((zpdesc = get_next_zpdesc(curr_zpdesc))) {
+ if (zpdesc_trylock(zpdesc)) {
+ curr_zpdesc = zpdesc;
} else {
- get_page(page);
+ zpdesc_get(zpdesc);
migrate_read_unlock(zspage);
- wait_on_page_locked(page);
- put_page(page);
+ zpdesc_wait_locked(zpdesc);
+ zpdesc_put(zpdesc);
migrate_read_lock(zspage);
}
}
@@ -1720,26 +1733,28 @@ static void migrate_write_unlock(struct zspage *zspage)
static const struct movable_operations zsmalloc_mops;
static void replace_sub_page(struct size_class *class, struct zspage *zspage,
- struct page *newpage, struct page *oldpage)
+ struct zpdesc *newzpdesc, struct zpdesc *oldzpdesc)
{
- struct page *page;
- struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, };
+ struct zpdesc *zpdesc;
+ struct zpdesc *zpdescs[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, };
+ unsigned int first_obj_offset;
int idx = 0;
- page = get_first_page(zspage);
+ zpdesc = get_first_zpdesc(zspage);
do {
- if (page == oldpage)
- pages[idx] = newpage;
+ if (zpdesc == oldzpdesc)
+ zpdescs[idx] = newzpdesc;
else
- pages[idx] = page;
+ zpdescs[idx] = zpdesc;
idx++;
- } while ((page = get_next_page(page)) != NULL);
+ } while ((zpdesc = get_next_zpdesc(zpdesc)) != NULL);
- create_page_chain(class, zspage, pages);
- set_first_obj_offset(newpage, get_first_obj_offset(oldpage));
+ create_page_chain(class, zspage, zpdescs);
+ first_obj_offset = get_first_obj_offset(oldzpdesc);
+ set_first_obj_offset(newzpdesc, first_obj_offset);
if (unlikely(ZsHugePage(zspage)))
- newpage->index = oldpage->index;
- __SetPageMovable(newpage, &zsmalloc_mops);
+ newzpdesc->handle = oldzpdesc->handle;
+ __zpdesc_set_movable(newzpdesc, &zsmalloc_mops);
}
static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
@@ -1759,20 +1774,22 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
struct zs_pool *pool;
struct size_class *class;
struct zspage *zspage;
- struct page *dummy;
+ struct zpdesc *dummy;
+ struct zpdesc *newzpdesc = page_zpdesc(newpage);
+ struct zpdesc *zpdesc = page_zpdesc(page);
void *s_addr, *d_addr, *addr;
unsigned int offset;
unsigned long handle;
unsigned long old_obj, new_obj;
unsigned int obj_idx;
- VM_BUG_ON_PAGE(!PageIsolated(page), page);
+ VM_BUG_ON_PAGE(!zpdesc_is_isolated(zpdesc), zpdesc_page(zpdesc));
/* We're committed, tell the world that this is a Zsmalloc page. */
- __SetPageZsmalloc(newpage);
+ __zpdesc_set_zsmalloc(newzpdesc);
/* The page is locked, so this pointer must remain valid */
- zspage = get_zspage(page);
+ zspage = get_zspage(zpdesc);
pool = zspage->pool;
/*
@@ -1789,30 +1806,29 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
/* the migrate_write_lock protects zpage access via zs_map_object */
migrate_write_lock(zspage);
- offset = get_first_obj_offset(page);
- s_addr = kmap_local_page(page);
+ offset = get_first_obj_offset(zpdesc);
+ s_addr = kmap_local_zpdesc(zpdesc);
/*
* Here, any user cannot access all objects in the zspage so let's move.
*/
- d_addr = kmap_local_page(newpage);
+ d_addr = kmap_local_zpdesc(newzpdesc);
copy_page(d_addr, s_addr);
kunmap_local(d_addr);
for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE;
addr += class->size) {
- if (obj_allocated(page, addr, &handle)) {
+ if (obj_allocated(zpdesc, addr, &handle)) {
old_obj = handle_to_obj(handle);
obj_to_location(old_obj, &dummy, &obj_idx);
- new_obj = (unsigned long)location_to_obj(newpage,
- obj_idx);
+ new_obj = (unsigned long)location_to_obj(newzpdesc, obj_idx);
record_obj(handle, new_obj);
}
}
kunmap_local(s_addr);
- replace_sub_page(class, zspage, newpage, page);
+ replace_sub_page(class, zspage, newzpdesc, zpdesc);
/*
* Since we complete the data copy and set up new zspage structure,
* it's okay to release migration_lock.
@@ -1821,14 +1837,14 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
spin_unlock(&class->lock);
migrate_write_unlock(zspage);
- get_page(newpage);
- if (page_zone(newpage) != page_zone(page)) {
- dec_zone_page_state(page, NR_ZSPAGES);
- inc_zone_page_state(newpage, NR_ZSPAGES);
+ zpdesc_get(newzpdesc);
+ if (zpdesc_zone(newzpdesc) != zpdesc_zone(zpdesc)) {
+ zpdesc_dec_zone_page_state(zpdesc);
+ zpdesc_inc_zone_page_state(newzpdesc);
}
- reset_page(page);
- put_page(page);
+ reset_zpdesc(zpdesc);
+ zpdesc_put(zpdesc);
return MIGRATEPAGE_SUCCESS;
}
@@ -1897,13 +1913,13 @@ static void init_deferred_free(struct zs_pool *pool)
static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage)
{
- struct page *page = get_first_page(zspage);
+ struct zpdesc *zpdesc = get_first_zpdesc(zspage);
do {
- WARN_ON(!trylock_page(page));
- __SetPageMovable(page, &zsmalloc_mops);
- unlock_page(page);
- } while ((page = get_next_page(page)) != NULL);
+ WARN_ON(!zpdesc_trylock(zpdesc));
+ __zpdesc_set_movable(zpdesc, &zsmalloc_mops);
+ zpdesc_unlock(zpdesc);
+ } while ((zpdesc = get_next_zpdesc(zpdesc)) != NULL);
}
#else
static inline void zs_flush_migration(struct zs_pool *pool) { }
diff --git a/mm/zswap.c b/mm/zswap.c
index b84c20d889b1..6504174fbc6a 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1192,7 +1192,7 @@ static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_o
/*
* It's safe to drop the lock here because we return either
- * LRU_REMOVED_RETRY or LRU_RETRY.
+ * LRU_REMOVED_RETRY, LRU_RETRY or LRU_STOP.
*/
spin_unlock(&l->lock);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 49f97d4138ea..46ea0bee2259 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -710,12 +710,12 @@ static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu)
{
switch (chan->scid) {
case L2CAP_CID_ATT:
- if (mtu < L2CAP_LE_MIN_MTU)
+ if (mtu && mtu < L2CAP_LE_MIN_MTU)
return false;
break;
default:
- if (mtu < L2CAP_DEFAULT_MIN_MTU)
+ if (mtu && mtu < L2CAP_DEFAULT_MIN_MTU)
return false;
}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 09bac3c9c2d5..f53304cb09db 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -210,7 +210,7 @@ static const u16 mgmt_untrusted_events[] = {
MGMT_EV_EXP_FEATURE_CHANGED,
};
-#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
+#define CACHE_TIMEOUT secs_to_jiffies(2)
#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
"\x00\x00\x00\x00\x00\x00\x00\x00"
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 9ae2a7f1738b..8f6f7db48d4e 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -1018,6 +1018,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
case BPF_PROG_TYPE_LWT_IN:
case BPF_PROG_TYPE_LWT_OUT:
case BPF_PROG_TYPE_LWT_XMIT:
+ case BPF_PROG_TYPE_CGROUP_SKB:
is_direct_pkt_access = true;
break;
default:
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index 2f4ed83a75ae..7d41cde1bcca 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -50,15 +50,16 @@ void bpf_sk_storage_free(struct sock *sk)
{
struct bpf_local_storage *sk_storage;
+ migrate_disable();
rcu_read_lock();
sk_storage = rcu_dereference(sk->sk_bpf_storage);
- if (!sk_storage) {
- rcu_read_unlock();
- return;
- }
+ if (!sk_storage)
+ goto out;
bpf_local_storage_destroy(sk_storage);
+out:
rcu_read_unlock();
+ migrate_enable();
}
static void bpf_sk_storage_map_free(struct bpf_map *map)
@@ -160,6 +161,7 @@ int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL);
+ migrate_disable();
rcu_read_lock();
sk_storage = rcu_dereference(sk->sk_bpf_storage);
@@ -212,6 +214,7 @@ int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
out:
rcu_read_unlock();
+ migrate_enable();
/* In case of an error, don't free anything explicitly here, the
* caller is responsible to call bpf_sk_storage_free.
diff --git a/net/core/dev.c b/net/core/dev.c
index afa2282f2604..b91658e8aedb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6708,7 +6708,7 @@ void napi_resume_irqs(unsigned int napi_id)
static void __napi_hash_add_with_id(struct napi_struct *napi,
unsigned int napi_id)
{
- napi->napi_id = napi_id;
+ WRITE_ONCE(napi->napi_id, napi_id);
hlist_add_head_rcu(&napi->napi_hash_node,
&napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
}
@@ -9924,6 +9924,10 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack
NL_SET_ERR_MSG(extack, "Program bound to different device");
return -EINVAL;
}
+ if (bpf_prog_is_dev_bound(new_prog->aux) && mode == XDP_MODE_SKB) {
+ NL_SET_ERR_MSG(extack, "Can't attach device-bound programs in generic mode");
+ return -EINVAL;
+ }
if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) {
NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device");
return -EINVAL;
@@ -10260,37 +10264,14 @@ static bool from_cleanup_net(void)
#endif
}
-static void rtnl_drop_if_cleanup_net(void)
-{
- if (from_cleanup_net())
- __rtnl_unlock();
-}
-
-static void rtnl_acquire_if_cleanup_net(void)
-{
- if (from_cleanup_net())
- rtnl_lock();
-}
-
/* Delayed registration/unregisteration */
LIST_HEAD(net_todo_list);
-static LIST_HEAD(net_todo_list_for_cleanup_net);
-
-/* TODO: net_todo_list/net_todo_list_for_cleanup_net should probably
- * be provided by callers, instead of being static, rtnl protected.
- */
-static struct list_head *todo_list(void)
-{
- return from_cleanup_net() ? &net_todo_list_for_cleanup_net :
- &net_todo_list;
-}
-
DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
atomic_t dev_unreg_count = ATOMIC_INIT(0);
static void net_set_todo(struct net_device *dev)
{
- list_add_tail(&dev->todo_list, todo_list());
+ list_add_tail(&dev->todo_list, &net_todo_list);
}
static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
@@ -11140,7 +11121,7 @@ void netdev_run_todo(void)
#endif
/* Snapshot list, allow later requests */
- list_replace_init(todo_list(), &list);
+ list_replace_init(&net_todo_list, &list);
__rtnl_unlock();
@@ -11305,6 +11286,20 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
const struct net_device_ops *ops = dev->netdev_ops;
const struct net_device_core_stats __percpu *p;
+ /*
+ * IPv{4,6} and udp tunnels share common stat helpers and use
+ * different stat type (NETDEV_PCPU_STAT_TSTATS vs
+ * NETDEV_PCPU_STAT_DSTATS). Ensure the accounting is consistent.
+ */
+ BUILD_BUG_ON(offsetof(struct pcpu_sw_netstats, rx_bytes) !=
+ offsetof(struct pcpu_dstats, rx_bytes));
+ BUILD_BUG_ON(offsetof(struct pcpu_sw_netstats, rx_packets) !=
+ offsetof(struct pcpu_dstats, rx_packets));
+ BUILD_BUG_ON(offsetof(struct pcpu_sw_netstats, tx_bytes) !=
+ offsetof(struct pcpu_dstats, tx_bytes));
+ BUILD_BUG_ON(offsetof(struct pcpu_sw_netstats, tx_packets) !=
+ offsetof(struct pcpu_dstats, tx_packets));
+
if (ops->ndo_get_stats64) {
memset(storage, 0, sizeof(*storage));
ops->ndo_get_stats64(dev, storage);
@@ -11785,11 +11780,9 @@ void unregister_netdevice_many_notify(struct list_head *head,
WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERING);
netdev_unlock(dev);
}
-
- rtnl_drop_if_cleanup_net();
flush_all_backlogs();
+
synchronize_net();
- rtnl_acquire_if_cleanup_net();
list_for_each_entry(dev, head, unreg_list) {
struct sk_buff *skb = NULL;
@@ -11849,9 +11842,7 @@ void unregister_netdevice_many_notify(struct list_head *head,
#endif
}
- rtnl_drop_if_cleanup_net();
synchronize_net();
- rtnl_acquire_if_cleanup_net();
list_for_each_entry(dev, head, unreg_list) {
netdev_put(dev, &dev->dev_registered_tracker);
diff --git a/net/core/filter.c b/net/core/filter.c
index 5b5996901ccc..2ec162dd83c4 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -7651,7 +7651,7 @@ static const struct bpf_func_proto bpf_sock_ops_load_hdr_opt_proto = {
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
- .arg2_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_PTR_TO_MEM | MEM_WRITE,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_ANYTHING,
};
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index a3de752c5178..f5e908c9e7ad 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -536,12 +536,11 @@ static noinline netmem_ref __page_pool_alloc_pages_slow(struct page_pool *pool,
if (unlikely(pool->alloc.count > 0))
return pool->alloc.cache[--pool->alloc.count];
- /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */
+ /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk */
memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
- nr_pages = alloc_pages_bulk_array_node(gfp,
- pool->p.nid, bulk,
- (struct page **)pool->alloc.cache);
+ nr_pages = alloc_pages_bulk_node(gfp, pool->p.nid, bulk,
+ (struct page **)pool->alloc.cache);
if (unlikely(!nr_pages))
return 0;
@@ -1147,7 +1146,9 @@ void page_pool_disable_direct_recycling(struct page_pool *pool)
WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state));
WARN_ON(READ_ONCE(pool->p.napi->list_owner) != -1);
+ mutex_lock(&page_pools_lock);
WRITE_ONCE(pool->p.napi, NULL);
+ mutex_unlock(&page_pools_lock);
}
EXPORT_SYMBOL(page_pool_disable_direct_recycling);
diff --git a/net/core/page_pool_priv.h b/net/core/page_pool_priv.h
index 57439787b9c2..2fb06d5f6d55 100644
--- a/net/core/page_pool_priv.h
+++ b/net/core/page_pool_priv.h
@@ -7,6 +7,8 @@
#include "netmem_priv.h"
+extern struct mutex page_pools_lock;
+
s32 page_pool_inflight(const struct page_pool *pool, bool strict);
int page_pool_list(struct page_pool *pool);
diff --git a/net/core/page_pool_user.c b/net/core/page_pool_user.c
index 48335766c1bf..6677e0c2e256 100644
--- a/net/core/page_pool_user.c
+++ b/net/core/page_pool_user.c
@@ -3,6 +3,7 @@
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/xarray.h>
+#include <net/busy_poll.h>
#include <net/net_debug.h>
#include <net/netdev_rx_queue.h>
#include <net/page_pool/helpers.h>
@@ -14,10 +15,11 @@
#include "netdev-genl-gen.h"
static DEFINE_XARRAY_FLAGS(page_pools, XA_FLAGS_ALLOC1);
-/* Protects: page_pools, netdevice->page_pools, pool->slow.netdev, pool->user.
+/* Protects: page_pools, netdevice->page_pools, pool->p.napi, pool->slow.netdev,
+ * pool->user.
* Ordering: inside rtnl_lock
*/
-static DEFINE_MUTEX(page_pools_lock);
+DEFINE_MUTEX(page_pools_lock);
/* Page pools are only reachable from user space (via netlink) if they are
* linked to a netdev at creation time. Following page pool "visibility"
@@ -216,6 +218,7 @@ page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool,
{
struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
size_t inflight, refsz;
+ unsigned int napi_id;
void *hdr;
hdr = genlmsg_iput(rsp, info);
@@ -229,8 +232,10 @@ page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool,
nla_put_u32(rsp, NETDEV_A_PAGE_POOL_IFINDEX,
pool->slow.netdev->ifindex))
goto err_cancel;
- if (pool->user.napi_id &&
- nla_put_uint(rsp, NETDEV_A_PAGE_POOL_NAPI_ID, pool->user.napi_id))
+
+ napi_id = pool->p.napi ? READ_ONCE(pool->p.napi->napi_id) : 0;
+ if (napi_id >= MIN_NAPI_ID &&
+ nla_put_uint(rsp, NETDEV_A_PAGE_POOL_NAPI_ID, napi_id))
goto err_cancel;
inflight = page_pool_inflight(pool, false);
@@ -319,8 +324,6 @@ int page_pool_list(struct page_pool *pool)
if (pool->slow.netdev) {
hlist_add_head(&pool->user.list,
&pool->slow.netdev->page_pools);
- pool->user.napi_id = pool->p.napi ? pool->p.napi->napi_id : 0;
-
netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_ADD_NTF);
}
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 5a7c0e565a89..e827775baf2e 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -1367,7 +1367,7 @@ static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
return dsa_switch_parse_ports_of(ds, dn);
}
-static int dev_is_class(struct device *dev, void *class)
+static int dev_is_class(struct device *dev, const void *class)
{
if (dev->class != NULL && !strcmp(dev->class->name, class))
return 1;
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 7bb94875a7ec..7609ce2b2c5e 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -993,12 +993,12 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
return rc;
/* Nonzero ring with RSS only makes sense if NIC adds them together */
- if (cmd == ETHTOOL_SRXCLSRLINS && info.flow_type & FLOW_RSS &&
+ if (cmd == ETHTOOL_SRXCLSRLINS && info.fs.flow_type & FLOW_RSS &&
!ops->cap_rss_rxnfc_adds &&
ethtool_get_flow_spec_ring(info.fs.ring_cookie))
return -EINVAL;
- if (ops->get_rxfh) {
+ if (cmd == ETHTOOL_SRXFH && ops->get_rxfh) {
struct ethtool_rxfh_param rxfh = {};
rc = ops->get_rxfh(dev, &rxfh);
diff --git a/net/ethtool/rss.c b/net/ethtool/rss.c
index 7cb106b590ab..58df9ad02ce8 100644
--- a/net/ethtool/rss.c
+++ b/net/ethtool/rss.c
@@ -107,6 +107,8 @@ rss_prepare_ctx(const struct rss_req_info *request, struct net_device *dev,
u32 total_size, indir_bytes;
u8 *rss_config;
+ data->no_key_fields = !dev->ethtool_ops->rxfh_per_ctx_key;
+
ctx = xa_load(&dev->ethtool->rss_ctx, request->rss_context);
if (!ctx)
return -ENOENT;
@@ -153,7 +155,6 @@ rss_prepare_data(const struct ethnl_req_info *req_base,
if (!ops->cap_rss_ctx_supported && !ops->create_rxfh_context)
return -EOPNOTSUPP;
- data->no_key_fields = !ops->rxfh_per_ctx_key;
return rss_prepare_ctx(request, dev, data, info);
}
diff --git a/net/hsr/hsr_debugfs.c b/net/hsr/hsr_debugfs.c
index 1a195efc79cd..5b2cfac3b2ba 100644
--- a/net/hsr/hsr_debugfs.c
+++ b/net/hsr/hsr_debugfs.c
@@ -57,14 +57,11 @@ DEFINE_SHOW_ATTRIBUTE(hsr_node_table);
void hsr_debugfs_rename(struct net_device *dev)
{
struct hsr_priv *priv = netdev_priv(dev);
- struct dentry *d;
+ int err;
- d = debugfs_rename(hsr_debugfs_root_dir, priv->node_tbl_root,
- hsr_debugfs_root_dir, dev->name);
- if (IS_ERR(d))
+ err = debugfs_change_name(priv->node_tbl_root, "%s", dev->name);
+ if (err)
netdev_warn(dev, "failed to rename\n");
- else
- priv->node_tbl_root = d;
}
/* hsr_debugfs_init - create hsr node_table file for dumping
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index 87bb3a91598e..a4bacf198555 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -700,9 +700,12 @@ static int fill_frame_info(struct hsr_frame_info *frame,
frame->is_vlan = true;
if (frame->is_vlan) {
- if (skb->mac_len < offsetofend(struct hsr_vlan_ethhdr, vlanhdr))
+ /* Note: skb->mac_len might be wrong here. */
+ if (!pskb_may_pull(skb,
+ skb_mac_offset(skb) +
+ offsetofend(struct hsr_vlan_ethhdr, vlanhdr)))
return -EINVAL;
- vlan_hdr = (struct hsr_vlan_ethhdr *)ethhdr;
+ vlan_hdr = (struct hsr_vlan_ethhdr *)skb_mac_header(skb);
proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
}
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index b0fbf804bbba..0e4076866c0a 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -279,7 +279,7 @@ static void esp_output_done(void *data, int err)
x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
esp_output_tail_tcp(x, skb);
else
- xfrm_output_resume(skb->sk, skb, err);
+ xfrm_output_resume(skb_to_full_sk(skb), skb, err);
}
}
diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c
index 03b6eee407a2..28d77d454d44 100644
--- a/net/ipv4/ipmr_base.c
+++ b/net/ipv4/ipmr_base.c
@@ -330,9 +330,6 @@ next_entry:
list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
if (e < s_e)
goto next_entry2;
- if (filter->dev &&
- !mr_mfc_uses_dev(mrt, mfc, filter->dev))
- goto next_entry2;
err = fill(mrt, skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, mfc, RTM_NEWROUTE, flags);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 0e5b9a654254..bc95d2a5924f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -265,11 +265,14 @@ static u16 tcp_select_window(struct sock *sk)
u32 cur_win, new_win;
/* Make the window 0 if we failed to queue the data because we
- * are out of memory. The window is temporary, so we don't store
- * it on the socket.
+ * are out of memory.
*/
- if (unlikely(inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOMEM))
+ if (unlikely(inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOMEM)) {
+ tp->pred_flags = 0;
+ tp->rcv_wnd = 0;
+ tp->rcv_wup = tp->rcv_nxt;
return 0;
+ }
cur_win = tcp_receive_window(tp);
new_win = __tcp_select_window(sk);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index c472c9a57cf6..a9bb9ce5438e 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1141,9 +1141,9 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
const int hlen = skb_network_header_len(skb) +
sizeof(struct udphdr);
- if (hlen + cork->gso_size > cork->fragsize) {
+ if (hlen + min(datalen, cork->gso_size) > cork->fragsize) {
kfree_skb(skb);
- return -EINVAL;
+ return -EMSGSIZE;
}
if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
kfree_skb(skb);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 5f3d0cc1555a..9e73944e3b53 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -315,7 +315,7 @@ static void esp_output_done(void *data, int err)
x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
esp_output_tail_tcp(x, skb);
else
- xfrm_output_resume(skb->sk, skb, err);
+ xfrm_output_resume(skb_to_full_sk(skb), skb, err);
}
}
diff --git a/net/ipv6/ioam6_iptunnel.c b/net/ipv6/ioam6_iptunnel.c
index 28e5a89dc255..2c383c12a431 100644
--- a/net/ipv6/ioam6_iptunnel.c
+++ b/net/ipv6/ioam6_iptunnel.c
@@ -336,7 +336,7 @@ static int ioam6_do_encap(struct net *net, struct sk_buff *skb,
static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- struct dst_entry *dst = skb_dst(skb), *cache_dst;
+ struct dst_entry *dst = skb_dst(skb), *cache_dst = NULL;
struct in6_addr orig_daddr;
struct ioam6_lwt *ilwt;
int err = -EINVAL;
@@ -407,13 +407,15 @@ do_encap:
cache_dst = ip6_route_output(net, NULL, &fl6);
if (cache_dst->error) {
err = cache_dst->error;
- dst_release(cache_dst);
goto drop;
}
- local_bh_disable();
- dst_cache_set_ip6(&ilwt->cache, cache_dst, &fl6.saddr);
- local_bh_enable();
+ /* cache only if we don't create a dst reference loop */
+ if (dst->lwtstate != cache_dst->lwtstate) {
+ local_bh_disable();
+ dst_cache_set_ip6(&ilwt->cache, cache_dst, &fl6.saddr);
+ local_bh_enable();
+ }
err = skb_cow_head(skb, LL_RESERVED_SPACE(cache_dst->dev));
if (unlikely(err))
@@ -426,8 +428,10 @@ do_encap:
return dst_output(net, sk, skb);
}
out:
+ dst_release(cache_dst);
return dst->lwtstate->orig_output(net, sk, skb);
drop:
+ dst_release(cache_dst);
kfree_skb(skb);
return err;
}
diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c
index 7ba22d2f2bfe..0ac4283acdf2 100644
--- a/net/ipv6/rpl_iptunnel.c
+++ b/net/ipv6/rpl_iptunnel.c
@@ -232,13 +232,15 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
dst = ip6_route_output(net, NULL, &fl6);
if (dst->error) {
err = dst->error;
- dst_release(dst);
goto drop;
}
- local_bh_disable();
- dst_cache_set_ip6(&rlwt->cache, dst, &fl6.saddr);
- local_bh_enable();
+ /* cache only if we don't create a dst reference loop */
+ if (orig_dst->lwtstate != dst->lwtstate) {
+ local_bh_disable();
+ dst_cache_set_ip6(&rlwt->cache, dst, &fl6.saddr);
+ local_bh_enable();
+ }
err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
if (unlikely(err))
@@ -251,6 +253,7 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
return dst_output(net, sk, skb);
drop:
+ dst_release(dst);
kfree_skb(skb);
return err;
}
@@ -269,8 +272,10 @@ static int rpl_input(struct sk_buff *skb)
local_bh_enable();
err = rpl_do_srh(skb, rlwt, dst);
- if (unlikely(err))
+ if (unlikely(err)) {
+ dst_release(dst);
goto drop;
+ }
if (!dst) {
ip6_route_input(skb);
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index 4bf937bfc263..33833b2064c0 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -482,8 +482,10 @@ static int seg6_input_core(struct net *net, struct sock *sk,
local_bh_enable();
err = seg6_do_srh(skb, dst);
- if (unlikely(err))
+ if (unlikely(err)) {
+ dst_release(dst);
goto drop;
+ }
if (!dst) {
ip6_route_input(skb);
@@ -571,13 +573,15 @@ static int seg6_output_core(struct net *net, struct sock *sk,
dst = ip6_route_output(net, NULL, &fl6);
if (dst->error) {
err = dst->error;
- dst_release(dst);
goto drop;
}
- local_bh_disable();
- dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
- local_bh_enable();
+ /* cache only if we don't create a dst reference loop */
+ if (orig_dst->lwtstate != dst->lwtstate) {
+ local_bh_disable();
+ dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
+ local_bh_enable();
+ }
err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
if (unlikely(err))
@@ -593,6 +597,7 @@ static int seg6_output_core(struct net *net, struct sock *sk,
return dst_output(net, sk, skb);
drop:
+ dst_release(dst);
kfree_skb(skb);
return err;
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 6671daa67f4f..c6ea438b5c75 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1389,9 +1389,9 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
const int hlen = skb_network_header_len(skb) +
sizeof(struct udphdr);
- if (hlen + cork->gso_size > cork->fragsize) {
+ if (hlen + min(datalen, cork->gso_size) > cork->fragsize) {
kfree_skb(skb);
- return -EINVAL;
+ return -EMSGSIZE;
}
if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
kfree_skb(skb);
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 5f7b1fdbffe6..b3d5d1f266ee 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -82,14 +82,14 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
toobig = skb->len > mtu && !skb_is_gso(skb);
- if (toobig && xfrm6_local_dontfrag(skb->sk)) {
+ if (toobig && xfrm6_local_dontfrag(sk)) {
xfrm6_local_rxpmtu(skb, mtu);
kfree_skb(skb);
return -EMSGSIZE;
} else if (toobig && xfrm6_noneed_fragment(skb)) {
skb->ignore_df = 1;
goto skip_frag;
- } else if (!skb->ignore_df && toobig && skb->sk) {
+ } else if (!skb->ignore_df && toobig && sk) {
xfrm_local_error(skb, mtu);
kfree_skb(skb);
return -EMSGSIZE;
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index e7687a7b1683..54c479910d05 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -1025,16 +1025,7 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
{
- struct dentry *dir;
- char buf[10 + IFNAMSIZ];
-
- dir = sdata->vif.debugfs_dir;
-
- if (IS_ERR_OR_NULL(dir))
- return;
-
- sprintf(buf, "netdev:%s", sdata->name);
- debugfs_rename(dir->d_parent, dir, dir->d_parent, buf);
+ debugfs_change_name(sdata->vif.debugfs_dir, "netdev:%s", sdata->name);
}
void ieee80211_debugfs_recreate_netdev(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mptcp/ctrl.c b/net/mptcp/ctrl.c
index 3999e0ba2c35..2dd81e6c26bd 100644
--- a/net/mptcp/ctrl.c
+++ b/net/mptcp/ctrl.c
@@ -418,9 +418,9 @@ void mptcp_active_detect_blackhole(struct sock *ssk, bool expired)
MPTCP_INC_STATS(net, MPTCP_MIB_MPCAPABLEACTIVEDROP);
subflow->mpc_drop = 1;
mptcp_subflow_early_fallback(mptcp_sk(subflow->conn), subflow);
- } else {
- subflow->mpc_drop = 0;
}
+ } else if (ssk->sk_state == TCP_SYN_SENT) {
+ subflow->mpc_drop = 0;
}
}
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 123f3f297284..fd2de185bc93 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -108,7 +108,6 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->suboptions |= OPTION_MPTCP_DSS;
mp_opt->use_map = 1;
mp_opt->mpc_map = 1;
- mp_opt->use_ack = 0;
mp_opt->data_len = get_unaligned_be16(ptr);
ptr += 2;
}
@@ -157,11 +156,6 @@ static void mptcp_parse_option(const struct sk_buff *skb,
pr_debug("DSS\n");
ptr++;
- /* we must clear 'mpc_map' be able to detect MP_CAPABLE
- * map vs DSS map in mptcp_incoming_options(), and reconstruct
- * map info accordingly
- */
- mp_opt->mpc_map = 0;
flags = (*ptr++) & MPTCP_DSS_FLAG_MASK;
mp_opt->data_fin = (flags & MPTCP_DSS_DATA_FIN) != 0;
mp_opt->dsn64 = (flags & MPTCP_DSS_DSN64) != 0;
@@ -369,8 +363,11 @@ void mptcp_get_options(const struct sk_buff *skb,
const unsigned char *ptr;
int length;
- /* initialize option status */
- mp_opt->suboptions = 0;
+ /* Ensure that casting the whole status to u32 is efficient and safe */
+ BUILD_BUG_ON(sizeof_field(struct mptcp_options_received, status) != sizeof(u32));
+ BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct mptcp_options_received, status),
+ sizeof(u32)));
+ *(u32 *)&mp_opt->status = 0;
length = (th->doff * 4) - sizeof(struct tcphdr);
ptr = (const unsigned char *)(th + 1);
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 98ac73938bd8..572d160edca3 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -2020,7 +2020,8 @@ int mptcp_pm_nl_set_flags(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
if ((addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) &&
- (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) {
+ (entry->flags & (MPTCP_PM_ADDR_FLAG_SIGNAL |
+ MPTCP_PM_ADDR_FLAG_IMPLICIT))) {
spin_unlock_bh(&pernet->lock);
GENL_SET_ERR_MSG(info, "invalid addr flags");
return -EINVAL;
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index c44c89ecaca6..6bd819047470 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1767,8 +1767,10 @@ static int mptcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
* see mptcp_disconnect().
* Attempt it again outside the problematic scope.
*/
- if (!mptcp_disconnect(sk, 0))
+ if (!mptcp_disconnect(sk, 0)) {
+ sk->sk_disconnects++;
sk->sk_socket->state = SS_UNCONNECTED;
+ }
}
inet_clear_bit(DEFER_CONNECT, sk);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 0174a5aad279..f6a207958459 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -149,22 +149,24 @@ struct mptcp_options_received {
u32 subflow_seq;
u16 data_len;
__sum16 csum;
- u16 suboptions;
+ struct_group(status,
+ u16 suboptions;
+ u16 use_map:1,
+ dsn64:1,
+ data_fin:1,
+ use_ack:1,
+ ack64:1,
+ mpc_map:1,
+ reset_reason:4,
+ reset_transient:1,
+ echo:1,
+ backup:1,
+ deny_join_id0:1,
+ __unused:2;
+ );
+ u8 join_id;
u32 token;
u32 nonce;
- u16 use_map:1,
- dsn64:1,
- data_fin:1,
- use_ack:1,
- ack64:1,
- mpc_map:1,
- reset_reason:4,
- reset_transient:1,
- echo:1,
- backup:1,
- deny_join_id0:1,
- __unused:2;
- u8 join_id;
u64 thmac;
u8 hmac[MPTCPOPT_HMAC_LEN];
struct mptcp_addr_info addr;
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index bf276eaf9330..7891a537bddd 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -1385,6 +1385,12 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
nd->state = ncsi_dev_state_probe_package;
break;
case ncsi_dev_state_probe_package:
+ if (ndp->package_probe_id >= 8) {
+ /* Last package probed, finishing */
+ ndp->flags |= NCSI_DEV_PROBED;
+ break;
+ }
+
ndp->pending_req_num = 1;
nca.type = NCSI_PKT_CMD_SP;
@@ -1501,13 +1507,8 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
if (ret)
goto error;
- /* Probe next package */
+ /* Probe next package after receiving response */
ndp->package_probe_id++;
- if (ndp->package_probe_id >= 8) {
- /* Probe finished */
- ndp->flags |= NCSI_DEV_PROBED;
- break;
- }
nd->state = ncsi_dev_state_probe_package;
ndp->active_package = NULL;
break;
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index 14bd66909ca4..4a8ce2949fae 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -1089,14 +1089,12 @@ static int ncsi_rsp_handler_netlink(struct ncsi_request *nr)
static int ncsi_rsp_handler_gmcma(struct ncsi_request *nr)
{
struct ncsi_dev_priv *ndp = nr->ndp;
+ struct sockaddr *saddr = &ndp->pending_mac;
struct net_device *ndev = ndp->ndev.dev;
struct ncsi_rsp_gmcma_pkt *rsp;
- struct sockaddr saddr;
- int ret = -1;
int i;
rsp = (struct ncsi_rsp_gmcma_pkt *)skb_network_header(nr->rsp);
- saddr.sa_family = ndev->type;
ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
netdev_info(ndev, "NCSI: Received %d provisioned MAC addresses\n",
@@ -1108,20 +1106,20 @@ static int ncsi_rsp_handler_gmcma(struct ncsi_request *nr)
rsp->addresses[i][4], rsp->addresses[i][5]);
}
+ saddr->sa_family = ndev->type;
for (i = 0; i < rsp->address_count; i++) {
- memcpy(saddr.sa_data, &rsp->addresses[i], ETH_ALEN);
- ret = ndev->netdev_ops->ndo_set_mac_address(ndev, &saddr);
- if (ret < 0) {
+ if (!is_valid_ether_addr(rsp->addresses[i])) {
netdev_warn(ndev, "NCSI: Unable to assign %pM to device\n",
- saddr.sa_data);
+ rsp->addresses[i]);
continue;
}
- netdev_warn(ndev, "NCSI: Set MAC address to %pM\n", saddr.sa_data);
+ memcpy(saddr->sa_data, rsp->addresses[i], ETH_ALEN);
+ netdev_warn(ndev, "NCSI: Will set MAC address to %pM\n", saddr->sa_data);
break;
}
- ndp->gma_flag = ret == 0;
- return ret;
+ ndp->gma_flag = 1;
+ return 0;
}
static struct ncsi_rsp_handler {
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 4cc97f971264..7c6f7c9f7332 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -39,20 +39,15 @@ static const char *const sctp_conntrack_names[] = {
[SCTP_CONNTRACK_HEARTBEAT_SENT] = "HEARTBEAT_SENT",
};
-#define SECS * HZ
-#define MINS * 60 SECS
-#define HOURS * 60 MINS
-#define DAYS * 24 HOURS
-
static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
- [SCTP_CONNTRACK_CLOSED] = 10 SECS,
- [SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS,
- [SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS,
- [SCTP_CONNTRACK_ESTABLISHED] = 210 SECS,
- [SCTP_CONNTRACK_SHUTDOWN_SENT] = 3 SECS,
- [SCTP_CONNTRACK_SHUTDOWN_RECD] = 3 SECS,
- [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS,
- [SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS,
+ [SCTP_CONNTRACK_CLOSED] = secs_to_jiffies(10),
+ [SCTP_CONNTRACK_COOKIE_WAIT] = secs_to_jiffies(3),
+ [SCTP_CONNTRACK_COOKIE_ECHOED] = secs_to_jiffies(3),
+ [SCTP_CONNTRACK_ESTABLISHED] = secs_to_jiffies(210),
+ [SCTP_CONNTRACK_SHUTDOWN_SENT] = secs_to_jiffies(3),
+ [SCTP_CONNTRACK_SHUTDOWN_RECD] = secs_to_jiffies(3),
+ [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = secs_to_jiffies(3),
+ [SCTP_CONNTRACK_HEARTBEAT_SENT] = secs_to_jiffies(30),
};
#define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 667459256e4c..a34de9c17cf1 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -5078,7 +5078,7 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr,
static int nft_set_desc_concat(struct nft_set_desc *desc,
const struct nlattr *nla)
{
- u32 num_regs = 0, key_num_regs = 0;
+ u32 len = 0, num_regs;
struct nlattr *attr;
int rem, err, i;
@@ -5092,12 +5092,12 @@ static int nft_set_desc_concat(struct nft_set_desc *desc,
}
for (i = 0; i < desc->field_count; i++)
- num_regs += DIV_ROUND_UP(desc->field_len[i], sizeof(u32));
+ len += round_up(desc->field_len[i], sizeof(u32));
- key_num_regs = DIV_ROUND_UP(desc->klen, sizeof(u32));
- if (key_num_regs != num_regs)
+ if (len != desc->klen)
return -EINVAL;
+ num_regs = DIV_ROUND_UP(desc->klen, sizeof(u32));
if (num_regs > NFT_REG32_COUNT)
return -E2BIG;
diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
index de175318a3a0..082ab66f120b 100644
--- a/net/nfc/nci/hci.c
+++ b/net/nfc/nci/hci.c
@@ -542,6 +542,8 @@ static u8 nci_hci_create_pipe(struct nci_dev *ndev, u8 dest_host,
pr_debug("pipe created=%d\n", pipe);
+ if (pipe >= NCI_HCI_MAX_PIPES)
+ pipe = NCI_HCI_INVALID_PIPE;
return pipe;
}
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 72c65d938a15..a4a668b88a8f 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -701,11 +701,9 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct net_device *dev;
ax25_address *source;
ax25_uid_assoc *user;
+ int err = -EINVAL;
int n;
- if (!sock_flag(sk, SOCK_ZAPPED))
- return -EINVAL;
-
if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
return -EINVAL;
@@ -718,8 +716,15 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
return -EINVAL;
- if ((dev = rose_dev_get(&addr->srose_addr)) == NULL)
- return -EADDRNOTAVAIL;
+ lock_sock(sk);
+
+ if (!sock_flag(sk, SOCK_ZAPPED))
+ goto out_release;
+
+ err = -EADDRNOTAVAIL;
+ dev = rose_dev_get(&addr->srose_addr);
+ if (!dev)
+ goto out_release;
source = &addr->srose_call;
@@ -730,7 +735,8 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
} else {
if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) {
dev_put(dev);
- return -EACCES;
+ err = -EACCES;
+ goto out_release;
}
rose->source_call = *source;
}
@@ -753,8 +759,10 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
rose_insert_socket(sk);
sock_reset_flag(sk, SOCK_ZAPPED);
-
- return 0;
+ err = 0;
+out_release:
+ release_sock(sk);
+ return err;
}
static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags)
diff --git a/net/rose/rose_timer.c b/net/rose/rose_timer.c
index f06ddbed3fed..1525773e94aa 100644
--- a/net/rose/rose_timer.c
+++ b/net/rose/rose_timer.c
@@ -122,6 +122,10 @@ static void rose_heartbeat_expiry(struct timer_list *t)
struct rose_sock *rose = rose_sk(sk);
bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) {
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ/20);
+ goto out;
+ }
switch (rose->state) {
case ROSE_STATE_0:
/* Magic here: If we listen() and a new link dies before it
@@ -152,6 +156,7 @@ static void rose_heartbeat_expiry(struct timer_list *t)
}
rose_start_heartbeat(sk);
+out:
bh_unlock_sock(sk);
sock_put(sk);
}
@@ -162,6 +167,10 @@ static void rose_timer_expiry(struct timer_list *t)
struct sock *sk = &rose->sock;
bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) {
+ sk_reset_timer(sk, &rose->timer, jiffies + HZ/20);
+ goto out;
+ }
switch (rose->state) {
case ROSE_STATE_1: /* T1 */
case ROSE_STATE_4: /* T2 */
@@ -182,6 +191,7 @@ static void rose_timer_expiry(struct timer_list *t)
}
break;
}
+out:
bh_unlock_sock(sk);
sock_put(sk);
}
@@ -192,6 +202,10 @@ static void rose_idletimer_expiry(struct timer_list *t)
struct sock *sk = &rose->sock;
bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) {
+ sk_reset_timer(sk, &rose->idletimer, jiffies + HZ/20);
+ goto out;
+ }
rose_clear_queues(sk);
rose_write_internal(sk, ROSE_CLEAR_REQUEST);
@@ -207,6 +221,7 @@ static void rose_idletimer_expiry(struct timer_list *t)
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
+out:
bh_unlock_sock(sk);
sock_put(sk);
}
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 718193df9d2e..f251845fe532 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -582,6 +582,7 @@ enum rxrpc_call_flag {
RXRPC_CALL_EXCLUSIVE, /* The call uses a once-only connection */
RXRPC_CALL_RX_IS_IDLE, /* recvmsg() is idle - send an ACK */
RXRPC_CALL_RECVMSG_READ_ALL, /* recvmsg() read all of the received data */
+ RXRPC_CALL_CONN_CHALLENGING, /* The connection is being challenged */
};
/*
@@ -602,7 +603,6 @@ enum rxrpc_call_state {
RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */
RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
RXRPC_CALL_SERVER_PREALLOC, /* - service preallocation */
- RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 5a543c3f6fb0..c4c8b46a68c6 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -22,7 +22,6 @@ const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
[RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
[RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
[RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc",
- [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
[RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
[RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
[RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
@@ -453,17 +452,16 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
call->cong_tstamp = skb->tstamp;
__set_bit(RXRPC_CALL_EXPOSED, &call->flags);
- rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SECURING);
+ rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST);
spin_lock(&conn->state_lock);
switch (conn->state) {
case RXRPC_CONN_SERVICE_UNSECURED:
case RXRPC_CONN_SERVICE_CHALLENGING:
- rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SECURING);
+ __set_bit(RXRPC_CALL_CONN_CHALLENGING, &call->flags);
break;
case RXRPC_CONN_SERVICE:
- rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST);
break;
case RXRPC_CONN_ABORTED:
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 713e04394ceb..4d9c5e21ba78 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -228,10 +228,8 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn)
*/
static void rxrpc_call_is_secure(struct rxrpc_call *call)
{
- if (call && __rxrpc_call_state(call) == RXRPC_CALL_SERVER_SECURING) {
- rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST);
+ if (call && __test_and_clear_bit(RXRPC_CALL_CONN_CHALLENGING, &call->flags))
rxrpc_notify_socket(call);
- }
}
/*
@@ -272,6 +270,7 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
* we've already received the packet, put it on the
* front of the queue.
*/
+ sp->conn = rxrpc_get_connection(conn, rxrpc_conn_get_poke_secured);
skb->mark = RXRPC_SKB_MARK_SERVICE_CONN_SECURED;
rxrpc_get_skb(skb, rxrpc_skb_get_conn_secured);
skb_queue_head(&conn->local->rx_queue, skb);
@@ -437,14 +436,16 @@ void rxrpc_input_conn_event(struct rxrpc_connection *conn, struct sk_buff *skb)
if (test_and_clear_bit(RXRPC_CONN_EV_ABORT_CALLS, &conn->events))
rxrpc_abort_calls(conn);
- switch (skb->mark) {
- case RXRPC_SKB_MARK_SERVICE_CONN_SECURED:
- if (conn->state != RXRPC_CONN_SERVICE)
- break;
+ if (skb) {
+ switch (skb->mark) {
+ case RXRPC_SKB_MARK_SERVICE_CONN_SECURED:
+ if (conn->state != RXRPC_CONN_SERVICE)
+ break;
- for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
- rxrpc_call_is_secure(conn->channels[loop].call);
- break;
+ for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
+ rxrpc_call_is_secure(conn->channels[loop].call);
+ break;
+ }
}
/* Process delayed ACKs whose time has come. */
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index 7eba4d7d9a38..2f1fd1e2e7e4 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -67,6 +67,7 @@ struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *rxnet,
INIT_WORK(&conn->destructor, rxrpc_clean_up_connection);
INIT_LIST_HEAD(&conn->proc_link);
INIT_LIST_HEAD(&conn->link);
+ INIT_LIST_HEAD(&conn->attend_link);
mutex_init(&conn->security_lock);
mutex_init(&conn->tx_data_alloc_lock);
skb_queue_head_init(&conn->rx_queue);
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 4974b5accafa..9047ba13bd31 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -448,11 +448,19 @@ static void rxrpc_input_queue_data(struct rxrpc_call *call, struct sk_buff *skb,
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
bool last = sp->hdr.flags & RXRPC_LAST_PACKET;
- skb_queue_tail(&call->recvmsg_queue, skb);
+ spin_lock_irq(&call->recvmsg_queue.lock);
+
+ __skb_queue_tail(&call->recvmsg_queue, skb);
rxrpc_input_update_ack_window(call, window, wtop);
trace_rxrpc_receive(call, last ? why + 1 : why, sp->hdr.serial, sp->hdr.seq);
if (last)
+ /* Change the state inside the lock so that recvmsg syncs
+ * correctly with it and using sendmsg() to send a reply
+ * doesn't race.
+ */
rxrpc_end_rx_phase(call, sp->hdr.serial);
+
+ spin_unlock_irq(&call->recvmsg_queue.lock);
}
/*
@@ -657,7 +665,7 @@ static bool rxrpc_input_split_jumbo(struct rxrpc_call *call, struct sk_buff *skb
rxrpc_propose_delay_ACK(call, sp->hdr.serial,
rxrpc_propose_ack_input_data);
}
- if (notify) {
+ if (notify && !test_bit(RXRPC_CALL_CONN_CHALLENGING, &call->flags)) {
trace_rxrpc_notify_socket(call->debug_id, sp->hdr.serial);
rxrpc_notify_socket(call);
}
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index d82e44a3901b..e874c31fa901 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -246,7 +246,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
bool use;
int slot;
- spin_lock(&rxnet->peer_hash_lock);
+ spin_lock_bh(&rxnet->peer_hash_lock);
while (!list_empty(collector)) {
peer = list_entry(collector->next,
@@ -257,7 +257,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
continue;
use = __rxrpc_use_local(peer->local, rxrpc_local_use_peer_keepalive);
- spin_unlock(&rxnet->peer_hash_lock);
+ spin_unlock_bh(&rxnet->peer_hash_lock);
if (use) {
keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
@@ -277,17 +277,17 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
*/
slot += cursor;
slot &= mask;
- spin_lock(&rxnet->peer_hash_lock);
+ spin_lock_bh(&rxnet->peer_hash_lock);
list_add_tail(&peer->keepalive_link,
&rxnet->peer_keepalive[slot & mask]);
- spin_unlock(&rxnet->peer_hash_lock);
+ spin_unlock_bh(&rxnet->peer_hash_lock);
rxrpc_unuse_local(peer->local, rxrpc_local_unuse_peer_keepalive);
}
rxrpc_put_peer(peer, rxrpc_peer_put_keepalive);
- spin_lock(&rxnet->peer_hash_lock);
+ spin_lock_bh(&rxnet->peer_hash_lock);
}
- spin_unlock(&rxnet->peer_hash_lock);
+ spin_unlock_bh(&rxnet->peer_hash_lock);
}
/*
@@ -317,7 +317,7 @@ void rxrpc_peer_keepalive_worker(struct work_struct *work)
* second; the bucket at cursor + 1 goes at now + 1s and so
* on...
*/
- spin_lock(&rxnet->peer_hash_lock);
+ spin_lock_bh(&rxnet->peer_hash_lock);
list_splice_init(&rxnet->peer_keepalive_new, &collector);
stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive);
@@ -329,7 +329,7 @@ void rxrpc_peer_keepalive_worker(struct work_struct *work)
}
base = now;
- spin_unlock(&rxnet->peer_hash_lock);
+ spin_unlock_bh(&rxnet->peer_hash_lock);
rxnet->peer_keepalive_base = base;
rxnet->peer_keepalive_cursor = cursor;
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index e1c63129586b..0fcc87f0409f 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -325,10 +325,10 @@ void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
hash_key = rxrpc_peer_hash_key(local, &peer->srx);
rxrpc_init_peer(local, peer, hash_key);
- spin_lock(&rxnet->peer_hash_lock);
+ spin_lock_bh(&rxnet->peer_hash_lock);
hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
- spin_unlock(&rxnet->peer_hash_lock);
+ spin_unlock_bh(&rxnet->peer_hash_lock);
}
/*
@@ -360,7 +360,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
return NULL;
}
- spin_lock(&rxnet->peer_hash_lock);
+ spin_lock_bh(&rxnet->peer_hash_lock);
/* Need to check that we aren't racing with someone else */
peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
@@ -373,7 +373,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
&rxnet->peer_keepalive_new);
}
- spin_unlock(&rxnet->peer_hash_lock);
+ spin_unlock_bh(&rxnet->peer_hash_lock);
if (peer)
rxrpc_free_peer(candidate);
@@ -423,10 +423,10 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
ASSERT(hlist_empty(&peer->error_targets));
- spin_lock(&rxnet->peer_hash_lock);
+ spin_lock_bh(&rxnet->peer_hash_lock);
hash_del_rcu(&peer->hash_link);
list_del_init(&peer->keepalive_link);
- spin_unlock(&rxnet->peer_hash_lock);
+ spin_unlock_bh(&rxnet->peer_hash_lock);
rxrpc_free_peer(peer);
}
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 0e8da909d4f2..584397aba4a0 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -707,7 +707,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
} else {
switch (rxrpc_call_state(call)) {
case RXRPC_CALL_CLIENT_AWAIT_CONN:
- case RXRPC_CALL_SERVER_SECURING:
+ case RXRPC_CALL_SERVER_RECV_REQUEST:
if (p.command == RXRPC_CMD_SEND_ABORT)
break;
fallthrough;
diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
index f80bc05d4c5a..516038a44163 100644
--- a/net/sched/sch_ets.c
+++ b/net/sched/sch_ets.c
@@ -91,6 +91,8 @@ ets_class_from_arg(struct Qdisc *sch, unsigned long arg)
{
struct ets_sched *q = qdisc_priv(sch);
+ if (arg == 0 || arg > q->nbands)
+ return NULL;
return &q->classes[arg - 1];
}
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index b50b2c2cc09b..e6bfd39ff339 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -40,6 +40,9 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
{
unsigned int prev_backlog;
+ if (unlikely(READ_ONCE(sch->limit) == 0))
+ return qdisc_drop(skb, sch, to_free);
+
if (likely(sch->q.qlen < READ_ONCE(sch->limit)))
return qdisc_enqueue_tail(skb, sch);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 71ec9986ed37..fdd79d3ccd8c 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -749,9 +749,9 @@ deliver:
if (err != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(err))
qdisc_qstats_drop(sch);
- qdisc_tree_reduce_backlog(sch, 1, pkt_len);
sch->qstats.backlog -= pkt_len;
sch->q.qlen--;
+ qdisc_tree_reduce_backlog(sch, 1, pkt_len);
}
goto tfifo_dequeue;
}
diff --git a/net/socket.c b/net/socket.c
index 262a28b59c7f..28bae5a94234 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -479,6 +479,11 @@ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
sock->file = file;
file->private_data = sock;
stream_open(SOCK_INODE(sock), file);
+ /*
+ * Disable permission and pre-content events, but enable legacy
+ * inotify events for legacy users.
+ */
+ file_set_fsnotify_mode(file, FMODE_NONOTIFY_PERM);
return file;
}
EXPORT_SYMBOL(sock_alloc_file);
diff --git a/net/sunrpc/auth_gss/Makefile b/net/sunrpc/auth_gss/Makefile
index ad1736d93b76..452f67deebc6 100644
--- a/net/sunrpc/auth_gss/Makefile
+++ b/net/sunrpc/auth_gss/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_SUNRPC_GSS) += auth_rpcgss.o
-auth_rpcgss-y := auth_gss.o gss_generic_token.o \
+auth_rpcgss-y := auth_gss.o \
gss_mech_switch.o svcauth_gss.o \
gss_rpc_upcall.o gss_rpc_xdr.o trace.o
diff --git a/net/sunrpc/auth_gss/gss_generic_token.c b/net/sunrpc/auth_gss/gss_generic_token.c
deleted file mode 100644
index 4a4082bb22ad..000000000000
--- a/net/sunrpc/auth_gss/gss_generic_token.c
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * linux/net/sunrpc/gss_generic_token.c
- *
- * Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/generic/util_token.c
- *
- * Copyright (c) 2000 The Regents of the University of Michigan.
- * All rights reserved.
- *
- * Andy Adamson <andros@umich.edu>
- */
-
-/*
- * Copyright 1993 by OpenVision Technologies, Inc.
- *
- * Permission to use, copy, modify, distribute, and sell this software
- * and its documentation for any purpose is hereby granted without fee,
- * provided that the above copyright notice appears in all copies and
- * that both that copyright notice and this permission notice appear in
- * supporting documentation, and that the name of OpenVision not be used
- * in advertising or publicity pertaining to distribution of the software
- * without specific, written prior permission. OpenVision makes no
- * representations about the suitability of this software for any
- * purpose. It is provided "as is" without express or implied warranty.
- *
- * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
- * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
- * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
- * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/sunrpc/sched.h>
-#include <linux/sunrpc/gss_asn1.h>
-
-
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
-# define RPCDBG_FACILITY RPCDBG_AUTH
-#endif
-
-
-/* TWRITE_STR from gssapiP_generic.h */
-#define TWRITE_STR(ptr, str, len) \
- memcpy((ptr), (char *) (str), (len)); \
- (ptr) += (len);
-
-/* XXXX this code currently makes the assumption that a mech oid will
- never be longer than 127 bytes. This assumption is not inherent in
- the interfaces, so the code can be fixed if the OSI namespace
- balloons unexpectedly. */
-
-/* Each token looks like this:
-
-0x60 tag for APPLICATION 0, SEQUENCE
- (constructed, definite-length)
- <length> possible multiple bytes, need to parse/generate
- 0x06 tag for OBJECT IDENTIFIER
- <moid_length> compile-time constant string (assume 1 byte)
- <moid_bytes> compile-time constant string
- <inner_bytes> the ANY containing the application token
- bytes 0,1 are the token type
- bytes 2,n are the token data
-
-For the purposes of this abstraction, the token "header" consists of
-the sequence tag and length octets, the mech OID DER encoding, and the
-first two inner bytes, which indicate the token type. The token
-"body" consists of everything else.
-
-*/
-
-static int
-der_length_size( int length)
-{
- if (length < (1<<7))
- return 1;
- else if (length < (1<<8))
- return 2;
-#if (SIZEOF_INT == 2)
- else
- return 3;
-#else
- else if (length < (1<<16))
- return 3;
- else if (length < (1<<24))
- return 4;
- else
- return 5;
-#endif
-}
-
-static void
-der_write_length(unsigned char **buf, int length)
-{
- if (length < (1<<7)) {
- *(*buf)++ = (unsigned char) length;
- } else {
- *(*buf)++ = (unsigned char) (der_length_size(length)+127);
-#if (SIZEOF_INT > 2)
- if (length >= (1<<24))
- *(*buf)++ = (unsigned char) (length>>24);
- if (length >= (1<<16))
- *(*buf)++ = (unsigned char) ((length>>16)&0xff);
-#endif
- if (length >= (1<<8))
- *(*buf)++ = (unsigned char) ((length>>8)&0xff);
- *(*buf)++ = (unsigned char) (length&0xff);
- }
-}
-
-/* returns decoded length, or < 0 on failure. Advances buf and
- decrements bufsize */
-
-static int
-der_read_length(unsigned char **buf, int *bufsize)
-{
- unsigned char sf;
- int ret;
-
- if (*bufsize < 1)
- return -1;
- sf = *(*buf)++;
- (*bufsize)--;
- if (sf & 0x80) {
- if ((sf &= 0x7f) > ((*bufsize)-1))
- return -1;
- if (sf > SIZEOF_INT)
- return -1;
- ret = 0;
- for (; sf; sf--) {
- ret = (ret<<8) + (*(*buf)++);
- (*bufsize)--;
- }
- } else {
- ret = sf;
- }
-
- return ret;
-}
-
-/* returns the length of a token, given the mech oid and the body size */
-
-int
-g_token_size(struct xdr_netobj *mech, unsigned int body_size)
-{
- /* set body_size to sequence contents size */
- body_size += 2 + (int) mech->len; /* NEED overflow check */
- return 1 + der_length_size(body_size) + body_size;
-}
-
-EXPORT_SYMBOL_GPL(g_token_size);
-
-/* fills in a buffer with the token header. The buffer is assumed to
- be the right size. buf is advanced past the token header */
-
-void
-g_make_token_header(struct xdr_netobj *mech, int body_size, unsigned char **buf)
-{
- *(*buf)++ = 0x60;
- der_write_length(buf, 2 + mech->len + body_size);
- *(*buf)++ = 0x06;
- *(*buf)++ = (unsigned char) mech->len;
- TWRITE_STR(*buf, mech->data, ((int) mech->len));
-}
-
-EXPORT_SYMBOL_GPL(g_make_token_header);
-
-/*
- * Given a buffer containing a token, reads and verifies the token,
- * leaving buf advanced past the token header, and setting body_size
- * to the number of remaining bytes. Returns 0 on success,
- * G_BAD_TOK_HEADER for a variety of errors, and G_WRONG_MECH if the
- * mechanism in the token does not match the mech argument. buf and
- * *body_size are left unmodified on error.
- */
-u32
-g_verify_token_header(struct xdr_netobj *mech, int *body_size,
- unsigned char **buf_in, int toksize)
-{
- unsigned char *buf = *buf_in;
- int seqsize;
- struct xdr_netobj toid;
- int ret = 0;
-
- if ((toksize-=1) < 0)
- return G_BAD_TOK_HEADER;
- if (*buf++ != 0x60)
- return G_BAD_TOK_HEADER;
-
- if ((seqsize = der_read_length(&buf, &toksize)) < 0)
- return G_BAD_TOK_HEADER;
-
- if (seqsize != toksize)
- return G_BAD_TOK_HEADER;
-
- if ((toksize-=1) < 0)
- return G_BAD_TOK_HEADER;
- if (*buf++ != 0x06)
- return G_BAD_TOK_HEADER;
-
- if ((toksize-=1) < 0)
- return G_BAD_TOK_HEADER;
- toid.len = *buf++;
-
- if ((toksize-=toid.len) < 0)
- return G_BAD_TOK_HEADER;
- toid.data = buf;
- buf+=toid.len;
-
- if (! g_OID_equal(&toid, mech))
- ret = G_WRONG_MECH;
-
- /* G_WRONG_MECH is not returned immediately because it's more important
- to return G_BAD_TOK_HEADER if the token header is in fact bad */
-
- if ((toksize-=2) < 0)
- return G_BAD_TOK_HEADER;
-
- if (ret)
- return ret;
-
- *buf_in = buf;
- *body_size = toksize;
-
- return ret;
-}
-
-EXPORT_SYMBOL_GPL(g_verify_token_header);
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index d2b02710ab07..9a27201638e2 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -442,35 +442,6 @@ encryptor(struct scatterlist *sg, void *data)
return 0;
}
-int
-gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf,
- int offset, struct page **pages)
-{
- int ret;
- struct encryptor_desc desc;
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
-
- BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0);
-
- skcipher_request_set_sync_tfm(req, tfm);
- skcipher_request_set_callback(req, 0, NULL, NULL);
-
- memset(desc.iv, 0, sizeof(desc.iv));
- desc.req = req;
- desc.pos = offset;
- desc.outbuf = buf;
- desc.pages = pages;
- desc.fragno = 0;
- desc.fraglen = 0;
-
- sg_init_table(desc.infrags, 4);
- sg_init_table(desc.outfrags, 4);
-
- ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
- skcipher_request_zero(req);
- return ret;
-}
-
struct decryptor_desc {
u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
struct skcipher_request *req;
@@ -525,32 +496,6 @@ decryptor(struct scatterlist *sg, void *data)
return 0;
}
-int
-gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf,
- int offset)
-{
- int ret;
- struct decryptor_desc desc;
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
-
- /* XXXJBF: */
- BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0);
-
- skcipher_request_set_sync_tfm(req, tfm);
- skcipher_request_set_callback(req, 0, NULL, NULL);
-
- memset(desc.iv, 0, sizeof(desc.iv));
- desc.req = req;
- desc.fragno = 0;
- desc.fraglen = 0;
-
- sg_init_table(desc.frags, 4);
-
- ret = xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
- skcipher_request_zero(req);
- return ret;
-}
-
/*
* This function makes the assumption that it was ultimately called
* from gss_wrap().
diff --git a/net/sunrpc/auth_gss/gss_krb5_internal.h b/net/sunrpc/auth_gss/gss_krb5_internal.h
index 3afd4065bf3d..a47e9ec228a5 100644
--- a/net/sunrpc/auth_gss/gss_krb5_internal.h
+++ b/net/sunrpc/auth_gss/gss_krb5_internal.h
@@ -172,13 +172,6 @@ u32 krb5_decrypt(struct crypto_sync_skcipher *key, void *iv, void *in,
int xdr_extend_head(struct xdr_buf *buf, unsigned int base,
unsigned int shiftlen);
-int gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm,
- struct xdr_buf *outbuf, int offset,
- struct page **pages);
-
-int gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm,
- struct xdr_buf *inbuf, int offset);
-
u32 gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
struct xdr_buf *buf, struct page **pages);
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index fae632da1058..c84d0cf61980 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/oid_registry.h>
#include <linux/sunrpc/msg_prot.h>
-#include <linux/sunrpc/gss_asn1.h>
#include <linux/sunrpc/auth_gss.h>
#include <linux/sunrpc/svcauth_gss.h>
#include <linux/sunrpc/gss_err.h>
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 059f6ef1ad18..cb279eb9ac4b 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -281,21 +281,7 @@ static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h
return rv;
}
-/*
- * This is the generic cache management routine for all
- * the authentication caches.
- * It checks the currency of a cache item and will (later)
- * initiate an upcall to fill it if needed.
- *
- *
- * Returns 0 if the cache_head can be used, or cache_puts it and returns
- * -EAGAIN if upcall is pending and request has been queued
- * -ETIMEDOUT if upcall failed or request could not be queue or
- * upcall completed but item is still invalid (implying that
- * the cache item has been replaced with a newer one).
- * -ENOENT if cache entry was negative
- */
-int cache_check(struct cache_detail *detail,
+int cache_check_rcu(struct cache_detail *detail,
struct cache_head *h, struct cache_req *rqstp)
{
int rv;
@@ -336,6 +322,31 @@ int cache_check(struct cache_detail *detail,
rv = -ETIMEDOUT;
}
}
+
+ return rv;
+}
+EXPORT_SYMBOL_GPL(cache_check_rcu);
+
+/*
+ * This is the generic cache management routine for all
+ * the authentication caches.
+ * It checks the currency of a cache item and will (later)
+ * initiate an upcall to fill it if needed.
+ *
+ *
+ * Returns 0 if the cache_head can be used, or cache_puts it and returns
+ * -EAGAIN if upcall is pending and request has been queued
+ * -ETIMEDOUT if upcall failed or request could not be queue or
+ * upcall completed but item is still invalid (implying that
+ * the cache item has been replaced with a newer one).
+ * -ENOENT if cache entry was negative
+ */
+int cache_check(struct cache_detail *detail,
+ struct cache_head *h, struct cache_req *rqstp)
+{
+ int rv;
+
+ rv = cache_check_rcu(detail, h, rqstp);
if (rv)
cache_put(h, detail);
return rv;
@@ -1427,17 +1438,11 @@ static int c_show(struct seq_file *m, void *p)
seq_printf(m, "# expiry=%lld refcnt=%d flags=%lx\n",
convert_to_wallclock(cp->expiry_time),
kref_read(&cp->ref), cp->flags);
- if (!cache_get_rcu(cp))
- return 0;
- if (cache_check(cd, cp, NULL))
- /* cache_check does a cache_put on failure */
+ if (cache_check_rcu(cd, cp, NULL))
+ seq_puts(m, "# ");
+ else if (cache_is_expired(cd, cp))
seq_puts(m, "# ");
- else {
- if (cache_is_expired(cd, cp))
- seq_puts(m, "# ");
- cache_put(cp, cd);
- }
return cd->cache_show(m, cd, cp);
}
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 0090162ee8c3..2fe88ea79a70 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -958,12 +958,17 @@ void rpc_shutdown_client(struct rpc_clnt *clnt)
trace_rpc_clnt_shutdown(clnt);
+ clnt->cl_shutdown = 1;
while (!list_empty(&clnt->cl_tasks)) {
rpc_killall_tasks(clnt);
wait_event_timeout(destroy_wait,
list_empty(&clnt->cl_tasks), 1*HZ);
}
+ /* wait for tasks still in workqueue or waitqueue */
+ wait_event_timeout(destroy_wait,
+ atomic_read(&clnt->cl_task_count) == 0, 1 * HZ);
+
rpc_release_client(clnt);
}
EXPORT_SYMBOL_GPL(rpc_shutdown_client);
@@ -1139,6 +1144,7 @@ void rpc_task_release_client(struct rpc_task *task)
list_del(&task->tk_task);
spin_unlock(&clnt->cl_lock);
task->tk_client = NULL;
+ atomic_dec(&clnt->cl_task_count);
rpc_release_client(clnt);
}
@@ -1189,10 +1195,7 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
task->tk_flags |= RPC_TASK_TIMEOUT;
if (clnt->cl_noretranstimeo)
task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT;
- /* Add to the client's list of all tasks */
- spin_lock(&clnt->cl_lock);
- list_add_tail(&task->tk_task, &clnt->cl_tasks);
- spin_unlock(&clnt->cl_lock);
+ atomic_inc(&clnt->cl_task_count);
}
static void
@@ -1787,9 +1790,14 @@ call_reserveresult(struct rpc_task *task)
if (status >= 0) {
if (task->tk_rqstp) {
task->tk_action = call_refresh;
+
+ /* Add to the client's list of all tasks */
+ spin_lock(&task->tk_client->cl_lock);
+ if (list_empty(&task->tk_task))
+ list_add_tail(&task->tk_task, &task->tk_client->cl_tasks);
+ spin_unlock(&task->tk_client->cl_lock);
return;
}
-
rpc_call_rpcerror(task, -EIO);
return;
}
@@ -1854,13 +1862,13 @@ call_refreshresult(struct rpc_task *task)
fallthrough;
case -EAGAIN:
status = -EACCES;
- fallthrough;
- case -EKEYEXPIRED:
if (!task->tk_cred_retry)
break;
task->tk_cred_retry--;
trace_rpc_retry_refresh_status(task);
return;
+ case -EKEYEXPIRED:
+ break;
case -ENOMEM:
rpc_delay(task, HZ >> 4);
return;
@@ -3319,8 +3327,11 @@ bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_has_addr);
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
-static void rpc_show_header(void)
+static void rpc_show_header(struct rpc_clnt *clnt)
{
+ printk(KERN_INFO "clnt[%pISpc] RPC tasks[%d]\n",
+ (struct sockaddr *)&clnt->cl_xprt->addr,
+ atomic_read(&clnt->cl_task_count));
printk(KERN_INFO "-pid- flgs status -client- --rqstp- "
"-timeout ---ops--\n");
}
@@ -3352,7 +3363,7 @@ void rpc_show_tasks(struct net *net)
spin_lock(&clnt->cl_lock);
list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
if (!header) {
- rpc_show_header();
+ rpc_show_header(clnt);
header++;
}
rpc_show_task(clnt, task);
diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c
index a176d5a0b0ee..32417db340de 100644
--- a/net/sunrpc/debugfs.c
+++ b/net/sunrpc/debugfs.c
@@ -74,6 +74,9 @@ tasks_stop(struct seq_file *f, void *v)
{
struct rpc_clnt *clnt = f->private;
spin_unlock(&clnt->cl_lock);
+ seq_printf(f, "clnt[%pISpc] RPC tasks[%d]\n",
+ (struct sockaddr *)&clnt->cl_xprt->addr,
+ atomic_read(&clnt->cl_task_count));
}
static const struct seq_operations tasks_seq_operations = {
@@ -179,6 +182,18 @@ xprt_info_show(struct seq_file *f, void *v)
seq_printf(f, "addr: %s\n", xprt->address_strings[RPC_DISPLAY_ADDR]);
seq_printf(f, "port: %s\n", xprt->address_strings[RPC_DISPLAY_PORT]);
seq_printf(f, "state: 0x%lx\n", xprt->state);
+ seq_printf(f, "netns: %u\n", xprt->xprt_net->ns.inum);
+
+ if (xprt->ops->get_srcaddr) {
+ int ret, buflen;
+ char buf[INET6_ADDRSTRLEN];
+
+ buflen = ARRAY_SIZE(buf);
+ ret = xprt->ops->get_srcaddr(xprt, buf, buflen);
+ if (ret < 0)
+ ret = sprintf(buf, "<closed>");
+ seq_printf(f, "saddr: %.*s\n", ret, buf);
+ }
return 0;
}
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 7ce3721c06ca..eadc00410ebc 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -630,7 +630,7 @@ static int __rpc_rmpipe(struct inode *dir, struct dentry *dentry)
static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent,
const char *name)
{
- struct qstr q = QSTR_INIT(name, strlen(name));
+ struct qstr q = QSTR(name);
struct dentry *dentry = d_hash_and_lookup(parent, &q);
if (!dentry) {
dentry = d_alloc(parent, &q);
@@ -1190,8 +1190,7 @@ static const struct rpc_filelist files[] = {
struct dentry *rpc_d_lookup_sb(const struct super_block *sb,
const unsigned char *dir_name)
{
- struct qstr dir = QSTR_INIT(dir_name, strlen(dir_name));
- return d_hash_and_lookup(sb->s_root, &dir);
+ return d_hash_and_lookup(sb->s_root, &QSTR(dir_name));
}
EXPORT_SYMBOL_GPL(rpc_d_lookup_sb);
@@ -1300,11 +1299,9 @@ rpc_gssd_dummy_populate(struct dentry *root, struct rpc_pipe *pipe_data)
struct dentry *gssd_dentry;
struct dentry *clnt_dentry = NULL;
struct dentry *pipe_dentry = NULL;
- struct qstr q = QSTR_INIT(files[RPCAUTH_gssd].name,
- strlen(files[RPCAUTH_gssd].name));
/* We should never get this far if "gssd" doesn't exist */
- gssd_dentry = d_hash_and_lookup(root, &q);
+ gssd_dentry = d_hash_and_lookup(root, &QSTR(files[RPCAUTH_gssd].name));
if (!gssd_dentry)
return ERR_PTR(-ENOENT);
@@ -1314,9 +1311,8 @@ rpc_gssd_dummy_populate(struct dentry *root, struct rpc_pipe *pipe_data)
goto out;
}
- q.name = gssd_dummy_clnt_dir[0].name;
- q.len = strlen(gssd_dummy_clnt_dir[0].name);
- clnt_dentry = d_hash_and_lookup(gssd_dentry, &q);
+ clnt_dentry = d_hash_and_lookup(gssd_dentry,
+ &QSTR(gssd_dummy_clnt_dir[0].name));
if (!clnt_dentry) {
__rpc_depopulate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1);
pipe_dentry = ERR_PTR(-ENOENT);
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 79879b7d39cb..e7f9c295d13c 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -651,8 +651,8 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
if (pages > RPCSVC_MAXPAGES)
pages = RPCSVC_MAXPAGES;
- ret = alloc_pages_bulk_array_node(GFP_KERNEL, node, pages,
- rqstp->rq_pages);
+ ret = alloc_pages_bulk_node(GFP_KERNEL, node, pages,
+ rqstp->rq_pages);
return ret == pages;
}
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 43c57124de52..ae25405d8bd2 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -606,7 +606,8 @@ int svc_port_is_privileged(struct sockaddr *sin)
}
/*
- * Make sure that we don't have too many active connections. If we have,
+ * Make sure that we don't have too many connections that have not yet
+ * demonstrated that they have access to the NFS server. If we have,
* something must be dropped. It's not clear what will happen if we allow
* "too many" connections, but when dealing with network-facing software,
* we have to code defensively. Here we do that by imposing hard limits.
@@ -618,34 +619,26 @@ int svc_port_is_privileged(struct sockaddr *sin)
* The only somewhat efficient mechanism would be if drop old
* connections from the same IP first. But right now we don't even
* record the client IP in svc_sock.
- *
- * single-threaded services that expect a lot of clients will probably
- * need to set sv_maxconn to override the default value which is based
- * on the number of threads
*/
static void svc_check_conn_limits(struct svc_serv *serv)
{
- unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn :
- (serv->sv_nrthreads+3) * 20;
-
- if (serv->sv_tmpcnt > limit) {
- struct svc_xprt *xprt = NULL;
+ if (serv->sv_tmpcnt > XPT_MAX_TMP_CONN) {
+ struct svc_xprt *xprt = NULL, *xprti;
spin_lock_bh(&serv->sv_lock);
if (!list_empty(&serv->sv_tempsocks)) {
- /* Try to help the admin */
- net_notice_ratelimited("%s: too many open connections, consider increasing the %s\n",
- serv->sv_name, serv->sv_maxconn ?
- "max number of connections" :
- "number of threads");
/*
* Always select the oldest connection. It's not fair,
- * but so is life
+ * but nor is life.
*/
- xprt = list_entry(serv->sv_tempsocks.prev,
- struct svc_xprt,
- xpt_list);
- set_bit(XPT_CLOSE, &xprt->xpt_flags);
- svc_xprt_get(xprt);
+ list_for_each_entry_reverse(xprti, &serv->sv_tempsocks,
+ xpt_list) {
+ if (!test_bit(XPT_PEER_VALID, &xprti->xpt_flags)) {
+ xprt = xprti;
+ set_bit(XPT_CLOSE, &xprt->xpt_flags);
+ svc_xprt_get(xprt);
+ break;
+ }
+ }
}
spin_unlock_bh(&serv->sv_lock);
@@ -671,8 +664,7 @@ static bool svc_alloc_arg(struct svc_rqst *rqstp)
}
for (filled = 0; filled < pages; filled = ret) {
- ret = alloc_pages_bulk_array(GFP_KERNEL, pages,
- rqstp->rq_pages);
+ ret = alloc_pages_bulk(GFP_KERNEL, pages, rqstp->rq_pages);
if (ret > filled)
/* Made progress, don't sleep yet */
continue;
@@ -1039,7 +1031,8 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
spin_lock_bh(&serv->sv_lock);
list_del_init(&xprt->xpt_list);
- if (test_bit(XPT_TEMP, &xprt->xpt_flags))
+ if (test_bit(XPT_TEMP, &xprt->xpt_flags) &&
+ !test_bit(XPT_PEER_VALID, &xprt->xpt_flags))
serv->sv_tmpcnt--;
spin_unlock_bh(&serv->sv_lock);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 95397677673b..cb3bd12f5818 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1083,9 +1083,6 @@ static void svc_tcp_fragment_received(struct svc_sock *svsk)
/* If we have more data, signal svc_xprt_enqueue() to try again */
svsk->sk_tcplen = 0;
svsk->sk_marker = xdr_zero;
-
- smp_wmb();
- tcp_set_rcvlowat(svsk->sk_sk, 1);
}
/**
@@ -1175,17 +1172,10 @@ err_incomplete:
goto err_delete;
if (len == want)
svc_tcp_fragment_received(svsk);
- else {
- /* Avoid more ->sk_data_ready() calls until the rest
- * of the message has arrived. This reduces service
- * thread wake-ups on large incoming messages. */
- tcp_set_rcvlowat(svsk->sk_sk,
- svc_sock_reclen(svsk) - svsk->sk_tcplen);
-
+ else
trace_svcsock_tcp_recv_short(&svsk->sk_xprt,
svc_sock_reclen(svsk),
svsk->sk_tcplen - sizeof(rpc_fraghdr));
- }
goto err_noclose;
error:
if (len != -EAGAIN)
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 62e07c330a66..4e003cb516fe 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -1097,6 +1097,12 @@ out_overflow:
* Checks that we have enough buffer space to encode 'nbytes' more
* bytes of data. If so, update the total xdr_buf length, and
* adjust the length of the current kvec.
+ *
+ * The returned pointer is valid only until the next call to
+ * xdr_reserve_space() or xdr_commit_encode() on @xdr. The current
+ * implementation of this API guarantees that space reserved for a
+ * four-byte data item remains valid until @xdr is destroyed, but
+ * that might not always be true in the future.
*/
__be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
{
diff --git a/net/sunrpc/xprtmultipath.c b/net/sunrpc/xprtmultipath.c
index 720d3ba742ec..7e98d4dd9f10 100644
--- a/net/sunrpc/xprtmultipath.c
+++ b/net/sunrpc/xprtmultipath.c
@@ -603,23 +603,6 @@ struct rpc_xprt *xprt_iter_get_helper(struct rpc_xprt_iter *xpi,
}
/**
- * xprt_iter_get_xprt - Returns the rpc_xprt pointed to by the cursor
- * @xpi: pointer to rpc_xprt_iter
- *
- * Returns a reference to the struct rpc_xprt that is currently
- * pointed to by the cursor.
- */
-struct rpc_xprt *xprt_iter_get_xprt(struct rpc_xprt_iter *xpi)
-{
- struct rpc_xprt *xprt;
-
- rcu_read_lock();
- xprt = xprt_iter_get_helper(xpi, xprt_iter_ops(xpi)->xpi_xprt);
- rcu_read_unlock();
- return xprt;
-}
-
-/**
* xprt_iter_get_next - Returns the next rpc_xprt following the cursor
* @xpi: pointer to rpc_xprt_iter
*
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index fa9d1b49599b..075695173648 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -337,7 +337,10 @@ EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
void vsock_remove_sock(struct vsock_sock *vsk)
{
- vsock_remove_bound(vsk);
+ /* Transport reassignment must not remove the binding. */
+ if (sock_flag(sk_vsock(vsk), SOCK_DEAD))
+ vsock_remove_bound(vsk);
+
vsock_remove_connected(vsk);
}
EXPORT_SYMBOL_GPL(vsock_remove_sock);
@@ -821,12 +824,13 @@ static void __vsock_release(struct sock *sk, int level)
*/
lock_sock_nested(sk, level);
+ sock_orphan(sk);
+
if (vsk->transport)
vsk->transport->release(vsk);
else if (sock_type_connectible(sk->sk_type))
vsock_remove_sock(vsk);
- sock_orphan(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
skb_queue_purge(&sk->sk_receive_queue);
@@ -1519,6 +1523,11 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr,
if (err < 0)
goto out;
+ /* sk_err might have been set as a result of an earlier
+ * (failed) connect attempt.
+ */
+ sk->sk_err = 0;
+
/* Mark sock as connecting and set the error code to in
* progress in case this is a non-blocking connect.
*/
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index 56c232cf5b0f..31342ab502b4 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -13,12 +13,12 @@
#include <linux/hyperv.h>
#include <net/sock.h>
#include <net/af_vsock.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
/* Older (VMBUS version 'VERSION_WIN10' or before) Windows hosts have some
* stricter requirements on the hv_sock ring buffer size of six 4K pages.
- * hyperv-tlfs defines HV_HYP_PAGE_SIZE as 4K. Newer hosts don't have this
- * limitation; but, keep the defaults the same for compat.
+ * HV_HYP_PAGE_SIZE is defined as 4K. Newer hosts don't have this limitation;
+ * but, keep the defaults the same for compat.
*/
#define RINGBUFFER_HVS_RCV_SIZE (HV_HYP_PAGE_SIZE * 6)
#define RINGBUFFER_HVS_SND_SIZE (HV_HYP_PAGE_SIZE * 6)
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 70857018f020..12b780de8779 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -143,10 +143,7 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
if (result)
return result;
- if (!IS_ERR_OR_NULL(rdev->wiphy.debugfsdir))
- debugfs_rename(rdev->wiphy.debugfsdir->d_parent,
- rdev->wiphy.debugfsdir,
- rdev->wiphy.debugfsdir->d_parent, newname);
+ debugfs_change_name(rdev->wiphy.debugfsdir, "%s", newname);
nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY);
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 3bb04b05c5ce..bea70eb6f034 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -640,10 +640,8 @@ EXPORT_SYMBOL(wireless_send_event);
#ifdef CONFIG_CFG80211_WEXT
static void wireless_warn_cfg80211_wext(void)
{
- char name[sizeof(current->comm)];
-
pr_warn_once("warning: `%s' uses wireless extensions which will stop working for Wi-Fi 7 hardware; use nl80211\n",
- get_task_comm(name, current));
+ current->comm);
}
#endif
diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c
index 98f1e2b67c76..c397eb99d867 100644
--- a/net/xfrm/xfrm_interface_core.c
+++ b/net/xfrm/xfrm_interface_core.c
@@ -506,7 +506,7 @@ xmit:
skb_dst_set(skb, dst);
skb->dev = tdev;
- err = dst_output(xi->net, skb->sk, skb);
+ err = dst_output(xi->net, skb_to_full_sk(skb), skb);
if (net_xmit_eval(err) == 0) {
dev_sw_netstats_tx_add(dev, 1, length);
} else {
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index b5025cf6136e..f7abd42c077d 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -802,7 +802,7 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
!skb_gso_validate_network_len(skb, ip_skb_dst_mtu(skb->sk, skb)))) {
skb->protocol = htons(ETH_P_IP);
- if (skb->sk)
+ if (skb->sk && sk_fullsock(skb->sk))
xfrm_local_error(skb, mtu);
else
icmp_send(skb, ICMP_DEST_UNREACH,
@@ -838,6 +838,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
{
int mtu, ret = 0;
struct dst_entry *dst = skb_dst(skb);
+ struct sock *sk = skb_to_full_sk(skb);
if (skb->ignore_df)
goto out;
@@ -852,9 +853,9 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
skb->dev = dst->dev;
skb->protocol = htons(ETH_P_IPV6);
- if (xfrm6_local_dontfrag(skb->sk))
+ if (xfrm6_local_dontfrag(sk))
ipv6_stub->xfrm6_local_rxpmtu(skb, mtu);
- else if (skb->sk)
+ else if (sk)
xfrm_local_error(skb, mtu);
else
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 9e510021ee91..6551e588fe52 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2964,7 +2964,7 @@ static void xfrm_policy_queue_process(struct timer_list *t)
skb_dst_drop(skb);
skb_dst_set(skb, dst);
- dst_output(net, skb->sk, skb);
+ dst_output(net, skb_to_full_sk(skb), skb);
}
out:
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index e500aebbad22..dbdf8a39dffe 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -714,10 +714,12 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff
oseq += skb_shinfo(skb)->gso_segs;
}
- if (unlikely(xo->seq.low < replay_esn->oseq)) {
- XFRM_SKB_CB(skb)->seq.output.hi = ++oseq_hi;
- xo->seq.hi = oseq_hi;
- replay_esn->oseq_hi = oseq_hi;
+ if (unlikely(oseq < replay_esn->oseq)) {
+ replay_esn->oseq_hi = ++oseq_hi;
+ if (xo->seq.low < replay_esn->oseq) {
+ XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi;
+ xo->seq.hi = oseq_hi;
+ }
if (replay_esn->oseq_hi == 0) {
replay_esn->oseq--;
replay_esn->oseq_hi--;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 34067cb8a479..ad2202fa82f3 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -34,6 +34,8 @@
#define xfrm_state_deref_prot(table, net) \
rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
+#define xfrm_state_deref_check(table, net) \
+ rcu_dereference_check((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
static void xfrm_state_gc_task(struct work_struct *work);
@@ -62,6 +64,8 @@ static inline unsigned int xfrm_dst_hash(struct net *net,
u32 reqid,
unsigned short family)
{
+ lockdep_assert_held(&net->xfrm.xfrm_state_lock);
+
return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask);
}
@@ -70,6 +74,8 @@ static inline unsigned int xfrm_src_hash(struct net *net,
const xfrm_address_t *saddr,
unsigned short family)
{
+ lockdep_assert_held(&net->xfrm.xfrm_state_lock);
+
return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask);
}
@@ -77,11 +83,15 @@ static inline unsigned int
xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
__be32 spi, u8 proto, unsigned short family)
{
+ lockdep_assert_held(&net->xfrm.xfrm_state_lock);
+
return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
}
static unsigned int xfrm_seq_hash(struct net *net, u32 seq)
{
+ lockdep_assert_held(&net->xfrm.xfrm_state_lock);
+
return __xfrm_seq_hash(seq, net->xfrm.state_hmask);
}
@@ -1108,16 +1118,38 @@ xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
x->props.family = tmpl->encap_family;
}
-static struct xfrm_state *__xfrm_state_lookup_all(struct net *net, u32 mark,
+struct xfrm_hash_state_ptrs {
+ const struct hlist_head *bydst;
+ const struct hlist_head *bysrc;
+ const struct hlist_head *byspi;
+ unsigned int hmask;
+};
+
+static void xfrm_hash_ptrs_get(const struct net *net, struct xfrm_hash_state_ptrs *ptrs)
+{
+ unsigned int sequence;
+
+ do {
+ sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
+
+ ptrs->bydst = xfrm_state_deref_check(net->xfrm.state_bydst, net);
+ ptrs->bysrc = xfrm_state_deref_check(net->xfrm.state_bysrc, net);
+ ptrs->byspi = xfrm_state_deref_check(net->xfrm.state_byspi, net);
+ ptrs->hmask = net->xfrm.state_hmask;
+ } while (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence));
+}
+
+static struct xfrm_state *__xfrm_state_lookup_all(const struct xfrm_hash_state_ptrs *state_ptrs,
+ u32 mark,
const xfrm_address_t *daddr,
__be32 spi, u8 proto,
unsigned short family,
struct xfrm_dev_offload *xdo)
{
- unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
+ unsigned int h = __xfrm_spi_hash(daddr, spi, proto, family, state_ptrs->hmask);
struct xfrm_state *x;
- hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) {
+ hlist_for_each_entry_rcu(x, state_ptrs->byspi + h, byspi) {
#ifdef CONFIG_XFRM_OFFLOAD
if (xdo->type == XFRM_DEV_OFFLOAD_PACKET) {
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
@@ -1151,15 +1183,16 @@ static struct xfrm_state *__xfrm_state_lookup_all(struct net *net, u32 mark,
return NULL;
}
-static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
+static struct xfrm_state *__xfrm_state_lookup(const struct xfrm_hash_state_ptrs *state_ptrs,
+ u32 mark,
const xfrm_address_t *daddr,
__be32 spi, u8 proto,
unsigned short family)
{
- unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
+ unsigned int h = __xfrm_spi_hash(daddr, spi, proto, family, state_ptrs->hmask);
struct xfrm_state *x;
- hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) {
+ hlist_for_each_entry_rcu(x, state_ptrs->byspi + h, byspi) {
if (x->props.family != family ||
x->id.spi != spi ||
x->id.proto != proto ||
@@ -1181,11 +1214,11 @@ struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark,
__be32 spi, u8 proto,
unsigned short family)
{
+ struct xfrm_hash_state_ptrs state_ptrs;
struct hlist_head *state_cache_input;
struct xfrm_state *x = NULL;
- int cpu = get_cpu();
- state_cache_input = per_cpu_ptr(net->xfrm.state_cache_input, cpu);
+ state_cache_input = raw_cpu_ptr(net->xfrm.state_cache_input);
rcu_read_lock();
hlist_for_each_entry_rcu(x, state_cache_input, state_cache_input) {
@@ -1202,7 +1235,9 @@ struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark,
goto out;
}
- x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family);
+ xfrm_hash_ptrs_get(net, &state_ptrs);
+
+ x = __xfrm_state_lookup(&state_ptrs, mark, daddr, spi, proto, family);
if (x && x->km.state == XFRM_STATE_VALID) {
spin_lock_bh(&net->xfrm.xfrm_state_lock);
@@ -1217,20 +1252,20 @@ struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark,
out:
rcu_read_unlock();
- put_cpu();
return x;
}
EXPORT_SYMBOL(xfrm_input_state_lookup);
-static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
+static struct xfrm_state *__xfrm_state_lookup_byaddr(const struct xfrm_hash_state_ptrs *state_ptrs,
+ u32 mark,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
u8 proto, unsigned short family)
{
- unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
+ unsigned int h = __xfrm_src_hash(daddr, saddr, family, state_ptrs->hmask);
struct xfrm_state *x;
- hlist_for_each_entry_rcu(x, net->xfrm.state_bysrc + h, bysrc) {
+ hlist_for_each_entry_rcu(x, state_ptrs->bysrc + h, bysrc) {
if (x->props.family != family ||
x->id.proto != proto ||
!xfrm_addr_equal(&x->id.daddr, daddr, family) ||
@@ -1250,14 +1285,17 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
static inline struct xfrm_state *
__xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
{
+ struct xfrm_hash_state_ptrs state_ptrs;
struct net *net = xs_net(x);
u32 mark = x->mark.v & x->mark.m;
+ xfrm_hash_ptrs_get(net, &state_ptrs);
+
if (use_spi)
- return __xfrm_state_lookup(net, mark, &x->id.daddr,
+ return __xfrm_state_lookup(&state_ptrs, mark, &x->id.daddr,
x->id.spi, x->id.proto, family);
else
- return __xfrm_state_lookup_byaddr(net, mark,
+ return __xfrm_state_lookup_byaddr(&state_ptrs, mark,
&x->id.daddr,
&x->props.saddr,
x->id.proto, family);
@@ -1331,6 +1369,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
unsigned short family, u32 if_id)
{
static xfrm_address_t saddr_wildcard = { };
+ struct xfrm_hash_state_ptrs state_ptrs;
struct net *net = xp_net(pol);
unsigned int h, h_wildcard;
struct xfrm_state *x, *x0, *to_put;
@@ -1395,8 +1434,10 @@ cached:
else if (acquire_in_progress) /* XXX: acquire_in_progress should not happen */
WARN_ON(1);
- h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
- hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h, bydst) {
+ xfrm_hash_ptrs_get(net, &state_ptrs);
+
+ h = __xfrm_dst_hash(daddr, saddr, tmpl->reqid, encap_family, state_ptrs.hmask);
+ hlist_for_each_entry_rcu(x, state_ptrs.bydst + h, bydst) {
#ifdef CONFIG_XFRM_OFFLOAD
if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
@@ -1429,8 +1470,9 @@ cached:
if (best || acquire_in_progress)
goto found;
- h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
- hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h_wildcard, bydst) {
+ h_wildcard = __xfrm_dst_hash(daddr, &saddr_wildcard, tmpl->reqid,
+ encap_family, state_ptrs.hmask);
+ hlist_for_each_entry_rcu(x, state_ptrs.bydst + h_wildcard, bydst) {
#ifdef CONFIG_XFRM_OFFLOAD
if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
@@ -1468,7 +1510,7 @@ found:
if (!x && !error && !acquire_in_progress) {
if (tmpl->id.spi &&
- (x0 = __xfrm_state_lookup_all(net, mark, daddr,
+ (x0 = __xfrm_state_lookup_all(&state_ptrs, mark, daddr,
tmpl->id.spi, tmpl->id.proto,
encap_family,
&pol->xdo)) != NULL) {
@@ -2253,10 +2295,13 @@ struct xfrm_state *
xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
u8 proto, unsigned short family)
{
+ struct xfrm_hash_state_ptrs state_ptrs;
struct xfrm_state *x;
rcu_read_lock();
- x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family);
+ xfrm_hash_ptrs_get(net, &state_ptrs);
+
+ x = __xfrm_state_lookup(&state_ptrs, mark, daddr, spi, proto, family);
rcu_read_unlock();
return x;
}
@@ -2267,10 +2312,14 @@ xfrm_state_lookup_byaddr(struct net *net, u32 mark,
const xfrm_address_t *daddr, const xfrm_address_t *saddr,
u8 proto, unsigned short family)
{
+ struct xfrm_hash_state_ptrs state_ptrs;
struct xfrm_state *x;
spin_lock_bh(&net->xfrm.xfrm_state_lock);
- x = __xfrm_state_lookup_byaddr(net, mark, daddr, saddr, proto, family);
+
+ xfrm_hash_ptrs_get(net, &state_ptrs);
+
+ x = __xfrm_state_lookup_byaddr(&state_ptrs, mark, daddr, saddr, proto, family);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
return x;
}
diff --git a/rust/Makefile b/rust/Makefile
index 71a05a3c895a..ea3849eb78f6 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -144,7 +144,7 @@ rusttestlib-kernel: private rustc_target_flags = --extern ffi \
--extern bindings --extern uapi
rusttestlib-kernel: $(src)/kernel/lib.rs \
rusttestlib-bindings rusttestlib-uapi rusttestlib-build_error \
- $(obj)/libmacros.so $(obj)/bindings.o FORCE
+ $(obj)/$(libmacros_name) $(obj)/bindings.o FORCE
+$(call if_changed,rustc_test_library)
rusttestlib-bindings: private rustc_target_flags = --extern ffi
@@ -240,6 +240,7 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \
-fzero-call-used-regs=% -fno-stack-clash-protection \
-fno-inline-functions-called-once -fsanitize=bounds-strict \
-fstrict-flex-arrays=% -fmin-function-alignment=% \
+ -fzero-init-padding-bits=% \
--param=% --param asan-%
# Derived from `scripts/Makefile.clang`.
@@ -331,10 +332,11 @@ $(obj)/bindings/bindings_helpers_generated.rs: private bindgen_target_extra = ;
$(obj)/bindings/bindings_helpers_generated.rs: $(src)/helpers/helpers.c FORCE
$(call if_changed_dep,bindgen)
+rust_exports = $(NM) -p --defined-only $(1) | awk '$$2~/(T|R|D|B)/ && $$3!~/__cfi/ && $$3!~/__odr_asan/ { printf $(2),$$3 }'
+
quiet_cmd_exports = EXPORTS $@
cmd_exports = \
- $(NM) -p --defined-only $< \
- | awk '$$2~/(T|R|D|B)/ && $$3!~/__cfi/ {printf "EXPORT_SYMBOL_RUST_GPL(%s);\n",$$3}' > $@
+ $(call rust_exports,$<,"EXPORT_SYMBOL_RUST_GPL(%s);\n") > $@
$(obj)/exports_core_generated.h: $(obj)/core.o FORCE
$(call if_changed,exports)
@@ -403,11 +405,36 @@ ifneq ($(or $(CONFIG_ARM64),$(and $(CONFIG_RISCV),$(CONFIG_64BIT))),)
__ashlti3 __lshrti3
endif
+ifdef CONFIG_MODVERSIONS
+cmd_gendwarfksyms = $(if $(skip_gendwarfksyms),, \
+ $(call rust_exports,$@,"%s\n") | \
+ scripts/gendwarfksyms/gendwarfksyms \
+ $(if $(KBUILD_GENDWARFKSYMS_STABLE), --stable) \
+ $(if $(KBUILD_SYMTYPES), --symtypes $(@:.o=.symtypes),) \
+ $@ >> $(dot-target).cmd)
+endif
+
define rule_rustc_library
$(call cmd_and_fixdep,rustc_library)
$(call cmd,gen_objtooldep)
+ $(call cmd,gendwarfksyms)
endef
+define rule_rust_cc_library
+ $(call if_changed_rule,cc_o_c)
+ $(call cmd,force_checksrc)
+ $(call cmd,gendwarfksyms)
+endef
+
+# helpers.o uses the same export mechanism as Rust libraries, so ensure symbol
+# versions are calculated for the helpers too.
+$(obj)/helpers/helpers.o: $(src)/helpers/helpers.c $(recordmcount_source) FORCE
+ +$(call if_changed_rule,rust_cc_library)
+
+# Disable symbol versioning for exports.o to avoid conflicts with the actual
+# symbol versions generated from Rust objects.
+$(obj)/exports.o: private skip_gendwarfksyms = 1
+
$(obj)/core.o: private skip_clippy = 1
$(obj)/core.o: private skip_flags = -Wunreachable_pub
$(obj)/core.o: private rustc_objcopy = $(foreach sym,$(redirect-intrinsics),--redefine-sym $(sym)=__rust$(sym))
@@ -419,13 +446,16 @@ ifneq ($(or $(CONFIG_X86_64),$(CONFIG_X86_32)),)
$(obj)/core.o: scripts/target.json
endif
+$(obj)/compiler_builtins.o: private skip_gendwarfksyms = 1
$(obj)/compiler_builtins.o: private rustc_objcopy = -w -W '__*'
$(obj)/compiler_builtins.o: $(src)/compiler_builtins.rs $(obj)/core.o FORCE
+$(call if_changed_rule,rustc_library)
+$(obj)/build_error.o: private skip_gendwarfksyms = 1
$(obj)/build_error.o: $(src)/build_error.rs $(obj)/compiler_builtins.o FORCE
+$(call if_changed_rule,rustc_library)
+$(obj)/ffi.o: private skip_gendwarfksyms = 1
$(obj)/ffi.o: $(src)/ffi.rs $(obj)/compiler_builtins.o FORCE
+$(call if_changed_rule,rustc_library)
@@ -437,6 +467,7 @@ $(obj)/bindings.o: $(src)/bindings/lib.rs \
+$(call if_changed_rule,rustc_library)
$(obj)/uapi.o: private rustc_target_flags = --extern ffi
+$(obj)/uapi.o: private skip_gendwarfksyms = 1
$(obj)/uapi.o: $(src)/uapi/lib.rs \
$(obj)/ffi.o \
$(obj)/uapi/uapi_generated.rs FORCE
diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
index 5c4dfe22f41a..55354e4dec14 100644
--- a/rust/bindings/bindings_helper.h
+++ b/rust/bindings/bindings_helper.h
@@ -20,9 +20,13 @@
#include <linux/jump_label.h>
#include <linux/mdio.h>
#include <linux/miscdevice.h>
+#include <linux/of_device.h>
+#include <linux/pci.h>
#include <linux/phy.h>
#include <linux/pid_namespace.h>
+#include <linux/platform_device.h>
#include <linux/poll.h>
+#include <linux/property.h>
#include <linux/refcount.h>
#include <linux/sched.h>
#include <linux/security.h>
diff --git a/rust/helpers/device.c b/rust/helpers/device.c
new file mode 100644
index 000000000000..b2135c6686b0
--- /dev/null
+++ b/rust/helpers/device.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/device.h>
+
+int rust_helper_devm_add_action(struct device *dev,
+ void (*action)(void *),
+ void *data)
+{
+ return devm_add_action(dev, action, data);
+}
diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c
index dcf827a61b52..0640b7e115be 100644
--- a/rust/helpers/helpers.c
+++ b/rust/helpers/helpers.c
@@ -12,14 +12,19 @@
#include "build_assert.c"
#include "build_bug.c"
#include "cred.c"
+#include "device.c"
#include "err.c"
#include "fs.c"
+#include "io.c"
#include "jump_label.c"
#include "kunit.c"
#include "mutex.c"
#include "page.c"
+#include "platform.c"
+#include "pci.c"
#include "pid_namespace.c"
#include "rbtree.c"
+#include "rcu.c"
#include "refcount.c"
#include "security.c"
#include "signal.c"
diff --git a/rust/helpers/io.c b/rust/helpers/io.c
new file mode 100644
index 000000000000..4c2401ccd720
--- /dev/null
+++ b/rust/helpers/io.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/io.h>
+
+void __iomem *rust_helper_ioremap(phys_addr_t offset, size_t size)
+{
+ return ioremap(offset, size);
+}
+
+void rust_helper_iounmap(volatile void __iomem *addr)
+{
+ iounmap(addr);
+}
+
+u8 rust_helper_readb(const volatile void __iomem *addr)
+{
+ return readb(addr);
+}
+
+u16 rust_helper_readw(const volatile void __iomem *addr)
+{
+ return readw(addr);
+}
+
+u32 rust_helper_readl(const volatile void __iomem *addr)
+{
+ return readl(addr);
+}
+
+#ifdef CONFIG_64BIT
+u64 rust_helper_readq(const volatile void __iomem *addr)
+{
+ return readq(addr);
+}
+#endif
+
+void rust_helper_writeb(u8 value, volatile void __iomem *addr)
+{
+ writeb(value, addr);
+}
+
+void rust_helper_writew(u16 value, volatile void __iomem *addr)
+{
+ writew(value, addr);
+}
+
+void rust_helper_writel(u32 value, volatile void __iomem *addr)
+{
+ writel(value, addr);
+}
+
+#ifdef CONFIG_64BIT
+void rust_helper_writeq(u64 value, volatile void __iomem *addr)
+{
+ writeq(value, addr);
+}
+#endif
+
+u8 rust_helper_readb_relaxed(const volatile void __iomem *addr)
+{
+ return readb_relaxed(addr);
+}
+
+u16 rust_helper_readw_relaxed(const volatile void __iomem *addr)
+{
+ return readw_relaxed(addr);
+}
+
+u32 rust_helper_readl_relaxed(const volatile void __iomem *addr)
+{
+ return readl_relaxed(addr);
+}
+
+#ifdef CONFIG_64BIT
+u64 rust_helper_readq_relaxed(const volatile void __iomem *addr)
+{
+ return readq_relaxed(addr);
+}
+#endif
+
+void rust_helper_writeb_relaxed(u8 value, volatile void __iomem *addr)
+{
+ writeb_relaxed(value, addr);
+}
+
+void rust_helper_writew_relaxed(u16 value, volatile void __iomem *addr)
+{
+ writew_relaxed(value, addr);
+}
+
+void rust_helper_writel_relaxed(u32 value, volatile void __iomem *addr)
+{
+ writel_relaxed(value, addr);
+}
+
+#ifdef CONFIG_64BIT
+void rust_helper_writeq_relaxed(u64 value, volatile void __iomem *addr)
+{
+ writeq_relaxed(value, addr);
+}
+#endif
diff --git a/rust/helpers/pci.c b/rust/helpers/pci.c
new file mode 100644
index 000000000000..8ba22f911459
--- /dev/null
+++ b/rust/helpers/pci.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/pci.h>
+
+void rust_helper_pci_set_drvdata(struct pci_dev *pdev, void *data)
+{
+ pci_set_drvdata(pdev, data);
+}
+
+void *rust_helper_pci_get_drvdata(struct pci_dev *pdev)
+{
+ return pci_get_drvdata(pdev);
+}
+
+resource_size_t rust_helper_pci_resource_len(struct pci_dev *pdev, int bar)
+{
+ return pci_resource_len(pdev, bar);
+}
diff --git a/rust/helpers/platform.c b/rust/helpers/platform.c
new file mode 100644
index 000000000000..ab9b9f317301
--- /dev/null
+++ b/rust/helpers/platform.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/platform_device.h>
+
+void *rust_helper_platform_get_drvdata(const struct platform_device *pdev)
+{
+ return platform_get_drvdata(pdev);
+}
+
+void rust_helper_platform_set_drvdata(struct platform_device *pdev, void *data)
+{
+ platform_set_drvdata(pdev, data);
+}
diff --git a/rust/helpers/rcu.c b/rust/helpers/rcu.c
new file mode 100644
index 000000000000..f1cec6583513
--- /dev/null
+++ b/rust/helpers/rcu.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/rcupdate.h>
+
+void rust_helper_rcu_read_lock(void)
+{
+ rcu_read_lock();
+}
+
+void rust_helper_rcu_read_unlock(void)
+{
+ rcu_read_unlock();
+}
diff --git a/rust/kernel/device.rs b/rust/kernel/device.rs
index d5e6a19ff6b7..db2d9658ba47 100644
--- a/rust/kernel/device.rs
+++ b/rust/kernel/device.rs
@@ -6,6 +6,7 @@
use crate::{
bindings,
+ str::CStr,
types::{ARef, Opaque},
};
use core::{fmt, ptr};
@@ -180,6 +181,12 @@ impl Device {
)
};
}
+
+ /// Checks if property is present or not.
+ pub fn property_present(&self, name: &CStr) -> bool {
+ // SAFETY: By the invariant of `CStr`, `name` is null-terminated.
+ unsafe { bindings::device_property_present(self.as_raw().cast_const(), name.as_char_ptr()) }
+ }
}
// SAFETY: Instances of `Device` are always reference-counted.
diff --git a/rust/kernel/device_id.rs b/rust/kernel/device_id.rs
new file mode 100644
index 000000000000..e5859217a579
--- /dev/null
+++ b/rust/kernel/device_id.rs
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Generic implementation of device IDs.
+//!
+//! Each bus / subsystem that matches device and driver through a bus / subsystem specific ID is
+//! expected to implement [`RawDeviceId`].
+
+use core::mem::MaybeUninit;
+
+/// Marker trait to indicate a Rust device ID type represents a corresponding C device ID type.
+///
+/// This is meant to be implemented by buses/subsystems so that they can use [`IdTable`] to
+/// guarantee (at compile-time) zero-termination of device id tables provided by drivers.
+///
+/// # Safety
+///
+/// Implementers must ensure that:
+/// - `Self` is layout-compatible with [`RawDeviceId::RawType`]; i.e. it's safe to transmute to
+/// `RawDeviceId`.
+///
+/// This requirement is needed so `IdArray::new` can convert `Self` to `RawType` when building
+/// the ID table.
+///
+/// Ideally, this should be achieved using a const function that does conversion instead of
+/// transmute; however, const trait functions relies on `const_trait_impl` unstable feature,
+/// which is broken/gone in Rust 1.73.
+///
+/// - `DRIVER_DATA_OFFSET` is the offset of context/data field of the device ID (usually named
+/// `driver_data`) of the device ID, the field is suitable sized to write a `usize` value.
+///
+/// Similar to the previous requirement, the data should ideally be added during `Self` to
+/// `RawType` conversion, but there's currently no way to do it when using traits in const.
+pub unsafe trait RawDeviceId {
+ /// The raw type that holds the device id.
+ ///
+ /// Id tables created from [`Self`] are going to hold this type in its zero-terminated array.
+ type RawType: Copy;
+
+ /// The offset to the context/data field.
+ const DRIVER_DATA_OFFSET: usize;
+
+ /// The index stored at `DRIVER_DATA_OFFSET` of the implementor of the [`RawDeviceId`] trait.
+ fn index(&self) -> usize;
+}
+
+/// A zero-terminated device id array.
+#[repr(C)]
+pub struct RawIdArray<T: RawDeviceId, const N: usize> {
+ ids: [T::RawType; N],
+ sentinel: MaybeUninit<T::RawType>,
+}
+
+impl<T: RawDeviceId, const N: usize> RawIdArray<T, N> {
+ #[doc(hidden)]
+ pub const fn size(&self) -> usize {
+ core::mem::size_of::<Self>()
+ }
+}
+
+/// A zero-terminated device id array, followed by context data.
+#[repr(C)]
+pub struct IdArray<T: RawDeviceId, U, const N: usize> {
+ raw_ids: RawIdArray<T, N>,
+ id_infos: [U; N],
+}
+
+impl<T: RawDeviceId, U, const N: usize> IdArray<T, U, N> {
+ /// Creates a new instance of the array.
+ ///
+ /// The contents are derived from the given identifiers and context information.
+ pub const fn new(ids: [(T, U); N]) -> Self {
+ let mut raw_ids = [const { MaybeUninit::<T::RawType>::uninit() }; N];
+ let mut infos = [const { MaybeUninit::uninit() }; N];
+
+ let mut i = 0usize;
+ while i < N {
+ // SAFETY: by the safety requirement of `RawDeviceId`, we're guaranteed that `T` is
+ // layout-wise compatible with `RawType`.
+ raw_ids[i] = unsafe { core::mem::transmute_copy(&ids[i].0) };
+ // SAFETY: by the safety requirement of `RawDeviceId`, this would be effectively
+ // `raw_ids[i].driver_data = i;`.
+ unsafe {
+ raw_ids[i]
+ .as_mut_ptr()
+ .byte_offset(T::DRIVER_DATA_OFFSET as _)
+ .cast::<usize>()
+ .write(i);
+ }
+
+ // SAFETY: this is effectively a move: `infos[i] = ids[i].1`. We make a copy here but
+ // later forget `ids`.
+ infos[i] = MaybeUninit::new(unsafe { core::ptr::read(&ids[i].1) });
+ i += 1;
+ }
+
+ core::mem::forget(ids);
+
+ Self {
+ raw_ids: RawIdArray {
+ // SAFETY: this is effectively `array_assume_init`, which is unstable, so we use
+ // `transmute_copy` instead. We have initialized all elements of `raw_ids` so this
+ // `array_assume_init` is safe.
+ ids: unsafe { core::mem::transmute_copy(&raw_ids) },
+ sentinel: MaybeUninit::zeroed(),
+ },
+ // SAFETY: We have initialized all elements of `infos` so this `array_assume_init` is
+ // safe.
+ id_infos: unsafe { core::mem::transmute_copy(&infos) },
+ }
+ }
+
+ /// Reference to the contained [`RawIdArray`].
+ pub const fn raw_ids(&self) -> &RawIdArray<T, N> {
+ &self.raw_ids
+ }
+}
+
+/// A device id table.
+///
+/// This trait is only implemented by `IdArray`.
+///
+/// The purpose of this trait is to allow `&'static dyn IdArray<T, U>` to be in context when `N` in
+/// `IdArray` doesn't matter.
+pub trait IdTable<T: RawDeviceId, U> {
+ /// Obtain the pointer to the ID table.
+ fn as_ptr(&self) -> *const T::RawType;
+
+ /// Obtain the pointer to the bus specific device ID from an index.
+ fn id(&self, index: usize) -> &T::RawType;
+
+ /// Obtain the pointer to the driver-specific information from an index.
+ fn info(&self, index: usize) -> &U;
+}
+
+impl<T: RawDeviceId, U, const N: usize> IdTable<T, U> for IdArray<T, U, N> {
+ fn as_ptr(&self) -> *const T::RawType {
+ // This cannot be `self.ids.as_ptr()`, as the return pointer must have correct provenance
+ // to access the sentinel.
+ (self as *const Self).cast()
+ }
+
+ fn id(&self, index: usize) -> &T::RawType {
+ &self.raw_ids.ids[index]
+ }
+
+ fn info(&self, index: usize) -> &U {
+ &self.id_infos[index]
+ }
+}
+
+/// Create device table alias for modpost.
+#[macro_export]
+macro_rules! module_device_table {
+ ($table_type: literal, $module_table_name:ident, $table_name:ident) => {
+ #[rustfmt::skip]
+ #[export_name =
+ concat!("__mod_device_table__", $table_type,
+ "__", module_path!(),
+ "_", line!(),
+ "_", stringify!($table_name))
+ ]
+ static $module_table_name: [core::mem::MaybeUninit<u8>; $table_name.raw_ids().size()] =
+ unsafe { core::mem::transmute_copy($table_name.raw_ids()) };
+ };
+}
diff --git a/rust/kernel/devres.rs b/rust/kernel/devres.rs
new file mode 100644
index 000000000000..942376f6f3af
--- /dev/null
+++ b/rust/kernel/devres.rs
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Devres abstraction
+//!
+//! [`Devres`] represents an abstraction for the kernel devres (device resource management)
+//! implementation.
+
+use crate::{
+ alloc::Flags,
+ bindings,
+ device::Device,
+ error::{Error, Result},
+ ffi::c_void,
+ prelude::*,
+ revocable::Revocable,
+ sync::Arc,
+ types::ARef,
+};
+
+use core::ops::Deref;
+
+#[pin_data]
+struct DevresInner<T> {
+ dev: ARef<Device>,
+ callback: unsafe extern "C" fn(*mut c_void),
+ #[pin]
+ data: Revocable<T>,
+}
+
+/// This abstraction is meant to be used by subsystems to containerize [`Device`] bound resources to
+/// manage their lifetime.
+///
+/// [`Device`] bound resources should be freed when either the resource goes out of scope or the
+/// [`Device`] is unbound respectively, depending on what happens first.
+///
+/// To achieve that [`Devres`] registers a devres callback on creation, which is called once the
+/// [`Device`] is unbound, revoking access to the encapsulated resource (see also [`Revocable`]).
+///
+/// After the [`Devres`] has been unbound it is not possible to access the encapsulated resource
+/// anymore.
+///
+/// [`Devres`] users should make sure to simply free the corresponding backing resource in `T`'s
+/// [`Drop`] implementation.
+///
+/// # Example
+///
+/// ```no_run
+/// # use kernel::{bindings, c_str, device::Device, devres::Devres, io::{Io, IoRaw}};
+/// # use core::ops::Deref;
+///
+/// // See also [`pci::Bar`] for a real example.
+/// struct IoMem<const SIZE: usize>(IoRaw<SIZE>);
+///
+/// impl<const SIZE: usize> IoMem<SIZE> {
+/// /// # Safety
+/// ///
+/// /// [`paddr`, `paddr` + `SIZE`) must be a valid MMIO region that is mappable into the CPUs
+/// /// virtual address space.
+/// unsafe fn new(paddr: usize) -> Result<Self>{
+/// // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is
+/// // valid for `ioremap`.
+/// let addr = unsafe { bindings::ioremap(paddr as _, SIZE as _) };
+/// if addr.is_null() {
+/// return Err(ENOMEM);
+/// }
+///
+/// Ok(IoMem(IoRaw::new(addr as _, SIZE)?))
+/// }
+/// }
+///
+/// impl<const SIZE: usize> Drop for IoMem<SIZE> {
+/// fn drop(&mut self) {
+/// // SAFETY: `self.0.addr()` is guaranteed to be properly mapped by `Self::new`.
+/// unsafe { bindings::iounmap(self.0.addr() as _); };
+/// }
+/// }
+///
+/// impl<const SIZE: usize> Deref for IoMem<SIZE> {
+/// type Target = Io<SIZE>;
+///
+/// fn deref(&self) -> &Self::Target {
+/// // SAFETY: The memory range stored in `self` has been properly mapped in `Self::new`.
+/// unsafe { Io::from_raw(&self.0) }
+/// }
+/// }
+/// # fn no_run() -> Result<(), Error> {
+/// # // SAFETY: Invalid usage; just for the example to get an `ARef<Device>` instance.
+/// # let dev = unsafe { Device::get_device(core::ptr::null_mut()) };
+///
+/// // SAFETY: Invalid usage for example purposes.
+/// let iomem = unsafe { IoMem::<{ core::mem::size_of::<u32>() }>::new(0xBAAAAAAD)? };
+/// let devres = Devres::new(&dev, iomem, GFP_KERNEL)?;
+///
+/// let res = devres.try_access().ok_or(ENXIO)?;
+/// res.writel(0x42, 0x0);
+/// # Ok(())
+/// # }
+/// ```
+pub struct Devres<T>(Arc<DevresInner<T>>);
+
+impl<T> DevresInner<T> {
+ fn new(dev: &Device, data: T, flags: Flags) -> Result<Arc<DevresInner<T>>> {
+ let inner = Arc::pin_init(
+ pin_init!( DevresInner {
+ dev: dev.into(),
+ callback: Self::devres_callback,
+ data <- Revocable::new(data),
+ }),
+ flags,
+ )?;
+
+ // Convert `Arc<DevresInner>` into a raw pointer and make devres own this reference until
+ // `Self::devres_callback` is called.
+ let data = inner.clone().into_raw();
+
+ // SAFETY: `devm_add_action` guarantees to call `Self::devres_callback` once `dev` is
+ // detached.
+ let ret =
+ unsafe { bindings::devm_add_action(dev.as_raw(), Some(inner.callback), data as _) };
+
+ if ret != 0 {
+ // SAFETY: We just created another reference to `inner` in order to pass it to
+ // `bindings::devm_add_action`. If `bindings::devm_add_action` fails, we have to drop
+ // this reference accordingly.
+ let _ = unsafe { Arc::from_raw(data) };
+ return Err(Error::from_errno(ret));
+ }
+
+ Ok(inner)
+ }
+
+ fn as_ptr(&self) -> *const Self {
+ self as _
+ }
+
+ fn remove_action(this: &Arc<Self>) {
+ // SAFETY:
+ // - `self.inner.dev` is a valid `Device`,
+ // - the `action` and `data` pointers are the exact same ones as given to devm_add_action()
+ // previously,
+ // - `self` is always valid, even if the action has been released already.
+ let ret = unsafe {
+ bindings::devm_remove_action_nowarn(
+ this.dev.as_raw(),
+ Some(this.callback),
+ this.as_ptr() as _,
+ )
+ };
+
+ if ret == 0 {
+ // SAFETY: We leaked an `Arc` reference to devm_add_action() in `DevresInner::new`; if
+ // devm_remove_action_nowarn() was successful we can (and have to) claim back ownership
+ // of this reference.
+ let _ = unsafe { Arc::from_raw(this.as_ptr()) };
+ }
+ }
+
+ #[allow(clippy::missing_safety_doc)]
+ unsafe extern "C" fn devres_callback(ptr: *mut kernel::ffi::c_void) {
+ let ptr = ptr as *mut DevresInner<T>;
+ // Devres owned this memory; now that we received the callback, drop the `Arc` and hence the
+ // reference.
+ // SAFETY: Safe, since we leaked an `Arc` reference to devm_add_action() in
+ // `DevresInner::new`.
+ let inner = unsafe { Arc::from_raw(ptr) };
+
+ inner.data.revoke();
+ }
+}
+
+impl<T> Devres<T> {
+ /// Creates a new [`Devres`] instance of the given `data`. The `data` encapsulated within the
+ /// returned `Devres` instance' `data` will be revoked once the device is detached.
+ pub fn new(dev: &Device, data: T, flags: Flags) -> Result<Self> {
+ let inner = DevresInner::new(dev, data, flags)?;
+
+ Ok(Devres(inner))
+ }
+
+ /// Same as [`Devres::new`], but does not return a `Devres` instance. Instead the given `data`
+ /// is owned by devres and will be revoked / dropped, once the device is detached.
+ pub fn new_foreign_owned(dev: &Device, data: T, flags: Flags) -> Result {
+ let _ = DevresInner::new(dev, data, flags)?;
+
+ Ok(())
+ }
+}
+
+impl<T> Deref for Devres<T> {
+ type Target = Revocable<T>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0.data
+ }
+}
+
+impl<T> Drop for Devres<T> {
+ fn drop(&mut self) {
+ DevresInner::remove_action(&self.0);
+ }
+}
diff --git a/rust/kernel/driver.rs b/rust/kernel/driver.rs
new file mode 100644
index 000000000000..2a16d5e64e6c
--- /dev/null
+++ b/rust/kernel/driver.rs
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Generic support for drivers of different buses (e.g., PCI, Platform, Amba, etc.).
+//!
+//! Each bus / subsystem is expected to implement [`RegistrationOps`], which allows drivers to
+//! register using the [`Registration`] class.
+
+use crate::error::{Error, Result};
+use crate::{device, init::PinInit, of, str::CStr, try_pin_init, types::Opaque, ThisModule};
+use core::pin::Pin;
+use macros::{pin_data, pinned_drop};
+
+/// The [`RegistrationOps`] trait serves as generic interface for subsystems (e.g., PCI, Platform,
+/// Amba, etc.) to provide the corresponding subsystem specific implementation to register /
+/// unregister a driver of the particular type (`RegType`).
+///
+/// For instance, the PCI subsystem would set `RegType` to `bindings::pci_driver` and call
+/// `bindings::__pci_register_driver` from `RegistrationOps::register` and
+/// `bindings::pci_unregister_driver` from `RegistrationOps::unregister`.
+///
+/// # Safety
+///
+/// A call to [`RegistrationOps::unregister`] for a given instance of `RegType` is only valid if a
+/// preceding call to [`RegistrationOps::register`] has been successful.
+pub unsafe trait RegistrationOps {
+ /// The type that holds information about the registration. This is typically a struct defined
+ /// by the C portion of the kernel.
+ type RegType: Default;
+
+ /// Registers a driver.
+ ///
+ /// # Safety
+ ///
+ /// On success, `reg` must remain pinned and valid until the matching call to
+ /// [`RegistrationOps::unregister`].
+ unsafe fn register(
+ reg: &Opaque<Self::RegType>,
+ name: &'static CStr,
+ module: &'static ThisModule,
+ ) -> Result;
+
+ /// Unregisters a driver previously registered with [`RegistrationOps::register`].
+ ///
+ /// # Safety
+ ///
+ /// Must only be called after a preceding successful call to [`RegistrationOps::register`] for
+ /// the same `reg`.
+ unsafe fn unregister(reg: &Opaque<Self::RegType>);
+}
+
+/// A [`Registration`] is a generic type that represents the registration of some driver type (e.g.
+/// `bindings::pci_driver`). Therefore a [`Registration`] must be initialized with a type that
+/// implements the [`RegistrationOps`] trait, such that the generic `T::register` and
+/// `T::unregister` calls result in the subsystem specific registration calls.
+///
+///Once the `Registration` structure is dropped, the driver is unregistered.
+#[pin_data(PinnedDrop)]
+pub struct Registration<T: RegistrationOps> {
+ #[pin]
+ reg: Opaque<T::RegType>,
+}
+
+// SAFETY: `Registration` has no fields or methods accessible via `&Registration`, so it is safe to
+// share references to it with multiple threads as nothing can be done.
+unsafe impl<T: RegistrationOps> Sync for Registration<T> {}
+
+// SAFETY: Both registration and unregistration are implemented in C and safe to be performed from
+// any thread, so `Registration` is `Send`.
+unsafe impl<T: RegistrationOps> Send for Registration<T> {}
+
+impl<T: RegistrationOps> Registration<T> {
+ /// Creates a new instance of the registration object.
+ pub fn new(name: &'static CStr, module: &'static ThisModule) -> impl PinInit<Self, Error> {
+ try_pin_init!(Self {
+ reg <- Opaque::try_ffi_init(|ptr: *mut T::RegType| {
+ // SAFETY: `try_ffi_init` guarantees that `ptr` is valid for write.
+ unsafe { ptr.write(T::RegType::default()) };
+
+ // SAFETY: `try_ffi_init` guarantees that `ptr` is valid for write, and it has
+ // just been initialised above, so it's also valid for read.
+ let drv = unsafe { &*(ptr as *const Opaque<T::RegType>) };
+
+ // SAFETY: `drv` is guaranteed to be pinned until `T::unregister`.
+ unsafe { T::register(drv, name, module) }
+ }),
+ })
+ }
+}
+
+#[pinned_drop]
+impl<T: RegistrationOps> PinnedDrop for Registration<T> {
+ fn drop(self: Pin<&mut Self>) {
+ // SAFETY: The existence of `self` guarantees that `self.reg` has previously been
+ // successfully registered with `T::register`
+ unsafe { T::unregister(&self.reg) };
+ }
+}
+
+/// Declares a kernel module that exposes a single driver.
+///
+/// It is meant to be used as a helper by other subsystems so they can more easily expose their own
+/// macros.
+#[macro_export]
+macro_rules! module_driver {
+ (<$gen_type:ident>, $driver_ops:ty, { type: $type:ty, $($f:tt)* }) => {
+ type Ops<$gen_type> = $driver_ops;
+
+ #[$crate::prelude::pin_data]
+ struct DriverModule {
+ #[pin]
+ _driver: $crate::driver::Registration<Ops<$type>>,
+ }
+
+ impl $crate::InPlaceModule for DriverModule {
+ fn init(
+ module: &'static $crate::ThisModule
+ ) -> impl $crate::init::PinInit<Self, $crate::error::Error> {
+ $crate::try_pin_init!(Self {
+ _driver <- $crate::driver::Registration::new(
+ <Self as $crate::ModuleMetadata>::NAME,
+ module,
+ ),
+ })
+ }
+ }
+
+ $crate::prelude::module! {
+ type: DriverModule,
+ $($f)*
+ }
+ }
+}
+
+/// The bus independent adapter to match a drivers and a devices.
+///
+/// This trait should be implemented by the bus specific adapter, which represents the connection
+/// of a device and a driver.
+///
+/// It provides bus independent functions for device / driver interactions.
+pub trait Adapter {
+ /// The type holding driver private data about each device id supported by the driver.
+ type IdInfo: 'static;
+
+ /// The [`of::IdTable`] of the corresponding driver.
+ fn of_id_table() -> Option<of::IdTable<Self::IdInfo>>;
+
+ /// Returns the driver's private data from the matching entry in the [`of::IdTable`], if any.
+ ///
+ /// If this returns `None`, it means there is no match with an entry in the [`of::IdTable`].
+ #[cfg(CONFIG_OF)]
+ fn of_id_info(dev: &device::Device) -> Option<&'static Self::IdInfo> {
+ let table = Self::of_id_table()?;
+
+ // SAFETY:
+ // - `table` has static lifetime, hence it's valid for read,
+ // - `dev` is guaranteed to be valid while it's alive, and so is `pdev.as_ref().as_raw()`.
+ let raw_id = unsafe { bindings::of_match_device(table.as_ptr(), dev.as_raw()) };
+
+ if raw_id.is_null() {
+ None
+ } else {
+ // SAFETY: `DeviceId` is a `#[repr(transparent)` wrapper of `struct of_device_id` and
+ // does not add additional invariants, so it's safe to transmute.
+ let id = unsafe { &*raw_id.cast::<of::DeviceId>() };
+
+ Some(table.info(<of::DeviceId as crate::device_id::RawDeviceId>::index(id)))
+ }
+ }
+
+ #[cfg(not(CONFIG_OF))]
+ #[allow(missing_docs)]
+ fn of_id_info(_dev: &device::Device) -> Option<&'static Self::IdInfo> {
+ None
+ }
+
+ /// Returns the driver's private data from the matching entry of any of the ID tables, if any.
+ ///
+ /// If this returns `None`, it means that there is no match in any of the ID tables directly
+ /// associated with a [`device::Device`].
+ fn id_info(dev: &device::Device) -> Option<&'static Self::IdInfo> {
+ let id = Self::of_id_info(dev);
+ if id.is_some() {
+ return id;
+ }
+
+ None
+ }
+}
diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
index 3f9236c1c9d5..7fd1ea8265a5 100644
--- a/rust/kernel/init.rs
+++ b/rust/kernel/init.rs
@@ -870,7 +870,7 @@ pub unsafe trait PinInit<T: ?Sized, E = Infallible>: Sized {
/// use kernel::{types::Opaque, init::pin_init_from_closure};
/// #[repr(C)]
/// struct RawFoo([u8; 16]);
- /// extern {
+ /// extern "C" {
/// fn init_foo(_: *mut RawFoo);
/// }
///
diff --git a/rust/kernel/io.rs b/rust/kernel/io.rs
new file mode 100644
index 000000000000..d4a73e52e3ee
--- /dev/null
+++ b/rust/kernel/io.rs
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Memory-mapped IO.
+//!
+//! C header: [`include/asm-generic/io.h`](srctree/include/asm-generic/io.h)
+
+use crate::error::{code::EINVAL, Result};
+use crate::{bindings, build_assert};
+
+/// Raw representation of an MMIO region.
+///
+/// By itself, the existence of an instance of this structure does not provide any guarantees that
+/// the represented MMIO region does exist or is properly mapped.
+///
+/// Instead, the bus specific MMIO implementation must convert this raw representation into an `Io`
+/// instance providing the actual memory accessors. Only by the conversion into an `Io` structure
+/// any guarantees are given.
+pub struct IoRaw<const SIZE: usize = 0> {
+ addr: usize,
+ maxsize: usize,
+}
+
+impl<const SIZE: usize> IoRaw<SIZE> {
+ /// Returns a new `IoRaw` instance on success, an error otherwise.
+ pub fn new(addr: usize, maxsize: usize) -> Result<Self> {
+ if maxsize < SIZE {
+ return Err(EINVAL);
+ }
+
+ Ok(Self { addr, maxsize })
+ }
+
+ /// Returns the base address of the MMIO region.
+ #[inline]
+ pub fn addr(&self) -> usize {
+ self.addr
+ }
+
+ /// Returns the maximum size of the MMIO region.
+ #[inline]
+ pub fn maxsize(&self) -> usize {
+ self.maxsize
+ }
+}
+
+/// IO-mapped memory, starting at the base address @addr and spanning @maxlen bytes.
+///
+/// The creator (usually a subsystem / bus such as PCI) is responsible for creating the
+/// mapping, performing an additional region request etc.
+///
+/// # Invariant
+///
+/// `addr` is the start and `maxsize` the length of valid I/O mapped memory region of size
+/// `maxsize`.
+///
+/// # Examples
+///
+/// ```no_run
+/// # use kernel::{bindings, io::{Io, IoRaw}};
+/// # use core::ops::Deref;
+///
+/// // See also [`pci::Bar`] for a real example.
+/// struct IoMem<const SIZE: usize>(IoRaw<SIZE>);
+///
+/// impl<const SIZE: usize> IoMem<SIZE> {
+/// /// # Safety
+/// ///
+/// /// [`paddr`, `paddr` + `SIZE`) must be a valid MMIO region that is mappable into the CPUs
+/// /// virtual address space.
+/// unsafe fn new(paddr: usize) -> Result<Self>{
+/// // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is
+/// // valid for `ioremap`.
+/// let addr = unsafe { bindings::ioremap(paddr as _, SIZE as _) };
+/// if addr.is_null() {
+/// return Err(ENOMEM);
+/// }
+///
+/// Ok(IoMem(IoRaw::new(addr as _, SIZE)?))
+/// }
+/// }
+///
+/// impl<const SIZE: usize> Drop for IoMem<SIZE> {
+/// fn drop(&mut self) {
+/// // SAFETY: `self.0.addr()` is guaranteed to be properly mapped by `Self::new`.
+/// unsafe { bindings::iounmap(self.0.addr() as _); };
+/// }
+/// }
+///
+/// impl<const SIZE: usize> Deref for IoMem<SIZE> {
+/// type Target = Io<SIZE>;
+///
+/// fn deref(&self) -> &Self::Target {
+/// // SAFETY: The memory range stored in `self` has been properly mapped in `Self::new`.
+/// unsafe { Io::from_raw(&self.0) }
+/// }
+/// }
+///
+///# fn no_run() -> Result<(), Error> {
+/// // SAFETY: Invalid usage for example purposes.
+/// let iomem = unsafe { IoMem::<{ core::mem::size_of::<u32>() }>::new(0xBAAAAAAD)? };
+/// iomem.writel(0x42, 0x0);
+/// assert!(iomem.try_writel(0x42, 0x0).is_ok());
+/// assert!(iomem.try_writel(0x42, 0x4).is_err());
+/// # Ok(())
+/// # }
+/// ```
+#[repr(transparent)]
+pub struct Io<const SIZE: usize = 0>(IoRaw<SIZE>);
+
+macro_rules! define_read {
+ ($(#[$attr:meta])* $name:ident, $try_name:ident, $type_name:ty) => {
+ /// Read IO data from a given offset known at compile time.
+ ///
+ /// Bound checks are performed on compile time, hence if the offset is not known at compile
+ /// time, the build will fail.
+ $(#[$attr])*
+ #[inline]
+ pub fn $name(&self, offset: usize) -> $type_name {
+ let addr = self.io_addr_assert::<$type_name>(offset);
+
+ // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
+ unsafe { bindings::$name(addr as _) }
+ }
+
+ /// Read IO data from a given offset.
+ ///
+ /// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
+ /// out of bounds.
+ $(#[$attr])*
+ pub fn $try_name(&self, offset: usize) -> Result<$type_name> {
+ let addr = self.io_addr::<$type_name>(offset)?;
+
+ // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
+ Ok(unsafe { bindings::$name(addr as _) })
+ }
+ };
+}
+
+macro_rules! define_write {
+ ($(#[$attr:meta])* $name:ident, $try_name:ident, $type_name:ty) => {
+ /// Write IO data from a given offset known at compile time.
+ ///
+ /// Bound checks are performed on compile time, hence if the offset is not known at compile
+ /// time, the build will fail.
+ $(#[$attr])*
+ #[inline]
+ pub fn $name(&self, value: $type_name, offset: usize) {
+ let addr = self.io_addr_assert::<$type_name>(offset);
+
+ // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
+ unsafe { bindings::$name(value, addr as _, ) }
+ }
+
+ /// Write IO data from a given offset.
+ ///
+ /// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
+ /// out of bounds.
+ $(#[$attr])*
+ pub fn $try_name(&self, value: $type_name, offset: usize) -> Result {
+ let addr = self.io_addr::<$type_name>(offset)?;
+
+ // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
+ unsafe { bindings::$name(value, addr as _) }
+ Ok(())
+ }
+ };
+}
+
+impl<const SIZE: usize> Io<SIZE> {
+ /// Converts an `IoRaw` into an `Io` instance, providing the accessors to the MMIO mapping.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `addr` is the start of a valid I/O mapped memory region of size
+ /// `maxsize`.
+ pub unsafe fn from_raw(raw: &IoRaw<SIZE>) -> &Self {
+ // SAFETY: `Io` is a transparent wrapper around `IoRaw`.
+ unsafe { &*core::ptr::from_ref(raw).cast() }
+ }
+
+ /// Returns the base address of this mapping.
+ #[inline]
+ pub fn addr(&self) -> usize {
+ self.0.addr()
+ }
+
+ /// Returns the maximum size of this mapping.
+ #[inline]
+ pub fn maxsize(&self) -> usize {
+ self.0.maxsize()
+ }
+
+ #[inline]
+ const fn offset_valid<U>(offset: usize, size: usize) -> bool {
+ let type_size = core::mem::size_of::<U>();
+ if let Some(end) = offset.checked_add(type_size) {
+ end <= size && offset % type_size == 0
+ } else {
+ false
+ }
+ }
+
+ #[inline]
+ fn io_addr<U>(&self, offset: usize) -> Result<usize> {
+ if !Self::offset_valid::<U>(offset, self.maxsize()) {
+ return Err(EINVAL);
+ }
+
+ // Probably no need to check, since the safety requirements of `Self::new` guarantee that
+ // this can't overflow.
+ self.addr().checked_add(offset).ok_or(EINVAL)
+ }
+
+ #[inline]
+ fn io_addr_assert<U>(&self, offset: usize) -> usize {
+ build_assert!(Self::offset_valid::<U>(offset, SIZE));
+
+ self.addr() + offset
+ }
+
+ define_read!(readb, try_readb, u8);
+ define_read!(readw, try_readw, u16);
+ define_read!(readl, try_readl, u32);
+ define_read!(
+ #[cfg(CONFIG_64BIT)]
+ readq,
+ try_readq,
+ u64
+ );
+
+ define_read!(readb_relaxed, try_readb_relaxed, u8);
+ define_read!(readw_relaxed, try_readw_relaxed, u16);
+ define_read!(readl_relaxed, try_readl_relaxed, u32);
+ define_read!(
+ #[cfg(CONFIG_64BIT)]
+ readq_relaxed,
+ try_readq_relaxed,
+ u64
+ );
+
+ define_write!(writeb, try_writeb, u8);
+ define_write!(writew, try_writew, u16);
+ define_write!(writel, try_writel, u32);
+ define_write!(
+ #[cfg(CONFIG_64BIT)]
+ writeq,
+ try_writeq,
+ u64
+ );
+
+ define_write!(writeb_relaxed, try_writeb_relaxed, u8);
+ define_write!(writew_relaxed, try_writew_relaxed, u16);
+ define_write!(writel_relaxed, try_writel_relaxed, u32);
+ define_write!(
+ #[cfg(CONFIG_64BIT)]
+ writeq_relaxed,
+ try_writeq_relaxed,
+ u64
+ );
+}
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index 545d1170ee63..496ed32b0911 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -19,6 +19,11 @@
#![cfg_attr(not(CONFIG_RUSTC_HAS_COERCE_POINTEE), feature(unsize))]
#![feature(inline_const)]
#![feature(lint_reasons)]
+// Stable in Rust 1.83
+#![feature(const_maybe_uninit_as_mut_ptr)]
+#![feature(const_mut_refs)]
+#![feature(const_ptr_write)]
+#![feature(const_refs_to_cell)]
// Ensure conditional compilation based on the kernel configuration works;
// otherwise we may silently break things like initcall handling.
@@ -37,11 +42,15 @@ pub mod block;
pub mod build_assert;
pub mod cred;
pub mod device;
+pub mod device_id;
+pub mod devres;
+pub mod driver;
pub mod error;
#[cfg(CONFIG_RUST_FW_LOADER_ABSTRACTIONS)]
pub mod firmware;
pub mod fs;
pub mod init;
+pub mod io;
pub mod ioctl;
pub mod jump_label;
#[cfg(CONFIG_KUNIT)]
@@ -50,11 +59,16 @@ pub mod list;
pub mod miscdevice;
#[cfg(CONFIG_NET)]
pub mod net;
+pub mod of;
pub mod page;
+#[cfg(CONFIG_PCI)]
+pub mod pci;
pub mod pid_namespace;
+pub mod platform;
pub mod prelude;
pub mod print;
pub mod rbtree;
+pub mod revocable;
pub mod security;
pub mod seq_file;
pub mod sizes;
@@ -115,6 +129,12 @@ impl<T: Module> InPlaceModule for T {
}
}
+/// Metadata attached to a [`Module`] or [`InPlaceModule`].
+pub trait ModuleMetadata {
+ /// The name of the module as specified in the `module!` macro.
+ const NAME: &'static crate::str::CStr;
+}
+
/// Equivalent to `THIS_MODULE` in the C API.
///
/// C header: [`include/linux/init.h`](srctree/include/linux/init.h)
diff --git a/rust/kernel/miscdevice.rs b/rust/kernel/miscdevice.rs
index b3a6cc50b240..e14433b2ab9d 100644
--- a/rust/kernel/miscdevice.rs
+++ b/rust/kernel/miscdevice.rs
@@ -10,9 +10,12 @@
use crate::{
bindings,
+ device::Device,
error::{to_result, Error, Result, VTABLE_DEFAULT_ERROR},
ffi::{c_int, c_long, c_uint, c_ulong},
+ fs::File,
prelude::*,
+ seq_file::SeqFile,
str::CStr,
types::{ForeignOwnable, Opaque},
};
@@ -80,6 +83,16 @@ impl<T: MiscDevice> MiscDeviceRegistration<T> {
pub fn as_raw(&self) -> *mut bindings::miscdevice {
self.inner.get()
}
+
+ /// Access the `this_device` field.
+ pub fn device(&self) -> &Device {
+ // SAFETY: This can only be called after a successful register(), which always
+ // initialises `this_device` with a valid device. Furthermore, the signature of this
+ // function tells the borrow-checker that the `&Device` reference must not outlive the
+ // `&MiscDeviceRegistration<T>` used to obtain it, so the last use of the reference must be
+ // before the underlying `struct miscdevice` is destroyed.
+ unsafe { Device::as_ref((*self.as_raw()).this_device) }
+ }
}
#[pinned_drop]
@@ -92,17 +105,17 @@ impl<T> PinnedDrop for MiscDeviceRegistration<T> {
/// Trait implemented by the private data of an open misc device.
#[vtable]
-pub trait MiscDevice {
+pub trait MiscDevice: Sized {
/// What kind of pointer should `Self` be wrapped in.
type Ptr: ForeignOwnable + Send + Sync;
/// Called when the misc device is opened.
///
/// The returned pointer will be stored as the private data for the file.
- fn open() -> Result<Self::Ptr>;
+ fn open(_file: &File, _misc: &MiscDeviceRegistration<Self>) -> Result<Self::Ptr>;
/// Called when the misc device is released.
- fn release(device: Self::Ptr) {
+ fn release(device: Self::Ptr, _file: &File) {
drop(device);
}
@@ -113,6 +126,7 @@ pub trait MiscDevice {
/// [`kernel::ioctl`]: mod@crate::ioctl
fn ioctl(
_device: <Self::Ptr as ForeignOwnable>::Borrowed<'_>,
+ _file: &File,
_cmd: u32,
_arg: usize,
) -> Result<isize> {
@@ -129,11 +143,21 @@ pub trait MiscDevice {
#[cfg(CONFIG_COMPAT)]
fn compat_ioctl(
_device: <Self::Ptr as ForeignOwnable>::Borrowed<'_>,
+ _file: &File,
_cmd: u32,
_arg: usize,
) -> Result<isize> {
build_error!(VTABLE_DEFAULT_ERROR)
}
+
+ /// Show info for this fd.
+ fn show_fdinfo(
+ _device: <Self::Ptr as ForeignOwnable>::Borrowed<'_>,
+ _m: &SeqFile,
+ _file: &File,
+ ) {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
}
const fn create_vtable<T: MiscDevice>() -> &'static bindings::file_operations {
@@ -161,6 +185,7 @@ const fn create_vtable<T: MiscDevice>() -> &'static bindings::file_operations {
} else {
None
},
+ show_fdinfo: maybe_fn(T::HAS_SHOW_FDINFO, fops_show_fdinfo::<T>),
// SAFETY: All zeros is a valid value for `bindings::file_operations`.
..unsafe { MaybeUninit::zeroed().assume_init() }
};
@@ -175,21 +200,38 @@ const fn create_vtable<T: MiscDevice>() -> &'static bindings::file_operations {
/// The file must be associated with a `MiscDeviceRegistration<T>`.
unsafe extern "C" fn fops_open<T: MiscDevice>(
inode: *mut bindings::inode,
- file: *mut bindings::file,
+ raw_file: *mut bindings::file,
) -> c_int {
// SAFETY: The pointers are valid and for a file being opened.
- let ret = unsafe { bindings::generic_file_open(inode, file) };
+ let ret = unsafe { bindings::generic_file_open(inode, raw_file) };
if ret != 0 {
return ret;
}
- let ptr = match T::open() {
+ // SAFETY: The open call of a file can access the private data.
+ let misc_ptr = unsafe { (*raw_file).private_data };
+
+ // SAFETY: This is a miscdevice, so `misc_open()` set the private data to a pointer to the
+ // associated `struct miscdevice` before calling into this method. Furthermore, `misc_open()`
+ // ensures that the miscdevice can't be unregistered and freed during this call to `fops_open`.
+ let misc = unsafe { &*misc_ptr.cast::<MiscDeviceRegistration<T>>() };
+
+ // SAFETY:
+ // * This underlying file is valid for (much longer than) the duration of `T::open`.
+ // * There is no active fdget_pos region on the file on this thread.
+ let file = unsafe { File::from_raw_file(raw_file) };
+
+ let ptr = match T::open(file, misc) {
Ok(ptr) => ptr,
Err(err) => return err.to_errno(),
};
- // SAFETY: The open call of a file owns the private data.
- unsafe { (*file).private_data = ptr.into_foreign() };
+ // This overwrites the private data with the value specified by the user, changing the type of
+ // this file's private data. All future accesses to the private data is performed by other
+ // fops_* methods in this file, which all correctly cast the private data to the new type.
+ //
+ // SAFETY: The open call of a file can access the private data.
+ unsafe { (*raw_file).private_data = ptr.into_foreign() };
0
}
@@ -207,7 +249,10 @@ unsafe extern "C" fn fops_release<T: MiscDevice>(
// SAFETY: The release call of a file owns the private data.
let ptr = unsafe { <T::Ptr as ForeignOwnable>::from_foreign(private) };
- T::release(ptr);
+ // SAFETY:
+ // * The file is valid for the duration of this call.
+ // * There is no active fdget_pos region on the file on this thread.
+ T::release(ptr, unsafe { File::from_raw_file(file) });
0
}
@@ -225,7 +270,12 @@ unsafe extern "C" fn fops_ioctl<T: MiscDevice>(
// SAFETY: Ioctl calls can borrow the private data of the file.
let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
- match T::ioctl(device, cmd, arg) {
+ // SAFETY:
+ // * The file is valid for the duration of this call.
+ // * There is no active fdget_pos region on the file on this thread.
+ let file = unsafe { File::from_raw_file(file) };
+
+ match T::ioctl(device, file, cmd, arg) {
Ok(ret) => ret as c_long,
Err(err) => err.to_errno() as c_long,
}
@@ -245,8 +295,36 @@ unsafe extern "C" fn fops_compat_ioctl<T: MiscDevice>(
// SAFETY: Ioctl calls can borrow the private data of the file.
let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
- match T::compat_ioctl(device, cmd, arg) {
+ // SAFETY:
+ // * The file is valid for the duration of this call.
+ // * There is no active fdget_pos region on the file on this thread.
+ let file = unsafe { File::from_raw_file(file) };
+
+ match T::compat_ioctl(device, file, cmd, arg) {
Ok(ret) => ret as c_long,
Err(err) => err.to_errno() as c_long,
}
}
+
+/// # Safety
+///
+/// - `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
+/// - `seq_file` must be a valid `struct seq_file` that we can write to.
+unsafe extern "C" fn fops_show_fdinfo<T: MiscDevice>(
+ seq_file: *mut bindings::seq_file,
+ file: *mut bindings::file,
+) {
+ // SAFETY: The release call of a file owns the private data.
+ let private = unsafe { (*file).private_data };
+ // SAFETY: Ioctl calls can borrow the private data of the file.
+ let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
+ // SAFETY:
+ // * The file is valid for the duration of this call.
+ // * There is no active fdget_pos region on the file on this thread.
+ let file = unsafe { File::from_raw_file(file) };
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
+ // this method is called.
+ let m = unsafe { SeqFile::from_raw(seq_file) };
+
+ T::show_fdinfo(device, m, file);
+}
diff --git a/rust/kernel/of.rs b/rust/kernel/of.rs
new file mode 100644
index 000000000000..04f2d8ef29cb
--- /dev/null
+++ b/rust/kernel/of.rs
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Device Tree / Open Firmware abstractions.
+
+use crate::{bindings, device_id::RawDeviceId, prelude::*};
+
+/// IdTable type for OF drivers.
+pub type IdTable<T> = &'static dyn kernel::device_id::IdTable<DeviceId, T>;
+
+/// An open firmware device id.
+#[repr(transparent)]
+#[derive(Clone, Copy)]
+pub struct DeviceId(bindings::of_device_id);
+
+// SAFETY:
+// * `DeviceId` is a `#[repr(transparent)` wrapper of `struct of_device_id` and does not add
+// additional invariants, so it's safe to transmute to `RawType`.
+// * `DRIVER_DATA_OFFSET` is the offset to the `data` field.
+unsafe impl RawDeviceId for DeviceId {
+ type RawType = bindings::of_device_id;
+
+ const DRIVER_DATA_OFFSET: usize = core::mem::offset_of!(bindings::of_device_id, data);
+
+ fn index(&self) -> usize {
+ self.0.data as _
+ }
+}
+
+impl DeviceId {
+ /// Create a new device id from an OF 'compatible' string.
+ pub const fn new(compatible: &'static CStr) -> Self {
+ let src = compatible.as_bytes_with_nul();
+ // Replace with `bindings::of_device_id::default()` once stabilized for `const`.
+ // SAFETY: FFI type is valid to be zero-initialized.
+ let mut of: bindings::of_device_id = unsafe { core::mem::zeroed() };
+
+ // TODO: Use `clone_from_slice` once the corresponding types do match.
+ let mut i = 0;
+ while i < src.len() {
+ of.compatible[i] = src[i] as _;
+ i += 1;
+ }
+
+ Self(of)
+ }
+}
+
+/// Create an OF `IdTable` with an "alias" for modpost.
+#[macro_export]
+macro_rules! of_device_table {
+ ($table_name:ident, $module_table_name:ident, $id_info_type: ty, $table_data: expr) => {
+ const $table_name: $crate::device_id::IdArray<
+ $crate::of::DeviceId,
+ $id_info_type,
+ { $table_data.len() },
+ > = $crate::device_id::IdArray::new($table_data);
+
+ $crate::module_device_table!("of", $module_table_name, $table_name);
+ };
+}
diff --git a/rust/kernel/pci.rs b/rust/kernel/pci.rs
new file mode 100644
index 000000000000..4c98b5b9aa1e
--- /dev/null
+++ b/rust/kernel/pci.rs
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Abstractions for the PCI bus.
+//!
+//! C header: [`include/linux/pci.h`](srctree/include/linux/pci.h)
+
+use crate::{
+ alloc::flags::*,
+ bindings, container_of, device,
+ device_id::RawDeviceId,
+ devres::Devres,
+ driver,
+ error::{to_result, Result},
+ io::Io,
+ io::IoRaw,
+ str::CStr,
+ types::{ARef, ForeignOwnable, Opaque},
+ ThisModule,
+};
+use core::{ops::Deref, ptr::addr_of_mut};
+use kernel::prelude::*;
+
+/// An adapter for the registration of PCI drivers.
+pub struct Adapter<T: Driver>(T);
+
+// SAFETY: A call to `unregister` for a given instance of `RegType` is guaranteed to be valid if
+// a preceding call to `register` has been successful.
+unsafe impl<T: Driver + 'static> driver::RegistrationOps for Adapter<T> {
+ type RegType = bindings::pci_driver;
+
+ unsafe fn register(
+ pdrv: &Opaque<Self::RegType>,
+ name: &'static CStr,
+ module: &'static ThisModule,
+ ) -> Result {
+ // SAFETY: It's safe to set the fields of `struct pci_driver` on initialization.
+ unsafe {
+ (*pdrv.get()).name = name.as_char_ptr();
+ (*pdrv.get()).probe = Some(Self::probe_callback);
+ (*pdrv.get()).remove = Some(Self::remove_callback);
+ (*pdrv.get()).id_table = T::ID_TABLE.as_ptr();
+ }
+
+ // SAFETY: `pdrv` is guaranteed to be a valid `RegType`.
+ to_result(unsafe {
+ bindings::__pci_register_driver(pdrv.get(), module.0, name.as_char_ptr())
+ })
+ }
+
+ unsafe fn unregister(pdrv: &Opaque<Self::RegType>) {
+ // SAFETY: `pdrv` is guaranteed to be a valid `RegType`.
+ unsafe { bindings::pci_unregister_driver(pdrv.get()) }
+ }
+}
+
+impl<T: Driver + 'static> Adapter<T> {
+ extern "C" fn probe_callback(
+ pdev: *mut bindings::pci_dev,
+ id: *const bindings::pci_device_id,
+ ) -> kernel::ffi::c_int {
+ // SAFETY: The PCI bus only ever calls the probe callback with a valid pointer to a
+ // `struct pci_dev`.
+ let dev = unsafe { device::Device::get_device(addr_of_mut!((*pdev).dev)) };
+ // SAFETY: `dev` is guaranteed to be embedded in a valid `struct pci_dev` by the call
+ // above.
+ let mut pdev = unsafe { Device::from_dev(dev) };
+
+ // SAFETY: `DeviceId` is a `#[repr(transparent)` wrapper of `struct pci_device_id` and
+ // does not add additional invariants, so it's safe to transmute.
+ let id = unsafe { &*id.cast::<DeviceId>() };
+ let info = T::ID_TABLE.info(id.index());
+
+ match T::probe(&mut pdev, info) {
+ Ok(data) => {
+ // Let the `struct pci_dev` own a reference of the driver's private data.
+ // SAFETY: By the type invariant `pdev.as_raw` returns a valid pointer to a
+ // `struct pci_dev`.
+ unsafe { bindings::pci_set_drvdata(pdev.as_raw(), data.into_foreign() as _) };
+ }
+ Err(err) => return Error::to_errno(err),
+ }
+
+ 0
+ }
+
+ extern "C" fn remove_callback(pdev: *mut bindings::pci_dev) {
+ // SAFETY: The PCI bus only ever calls the remove callback with a valid pointer to a
+ // `struct pci_dev`.
+ let ptr = unsafe { bindings::pci_get_drvdata(pdev) };
+
+ // SAFETY: `remove_callback` is only ever called after a successful call to
+ // `probe_callback`, hence it's guaranteed that `ptr` points to a valid and initialized
+ // `KBox<T>` pointer created through `KBox::into_foreign`.
+ let _ = unsafe { KBox::<T>::from_foreign(ptr) };
+ }
+}
+
+/// Declares a kernel module that exposes a single PCI driver.
+///
+/// # Example
+///
+///```ignore
+/// kernel::module_pci_driver! {
+/// type: MyDriver,
+/// name: "Module name",
+/// author: "Author name",
+/// description: "Description",
+/// license: "GPL v2",
+/// }
+///```
+#[macro_export]
+macro_rules! module_pci_driver {
+($($f:tt)*) => {
+ $crate::module_driver!(<T>, $crate::pci::Adapter<T>, { $($f)* });
+};
+}
+
+/// Abstraction for bindings::pci_device_id.
+#[repr(transparent)]
+#[derive(Clone, Copy)]
+pub struct DeviceId(bindings::pci_device_id);
+
+impl DeviceId {
+ const PCI_ANY_ID: u32 = !0;
+
+ /// Equivalent to C's `PCI_DEVICE` macro.
+ ///
+ /// Create a new `pci::DeviceId` from a vendor and device ID number.
+ pub const fn from_id(vendor: u32, device: u32) -> Self {
+ Self(bindings::pci_device_id {
+ vendor,
+ device,
+ subvendor: DeviceId::PCI_ANY_ID,
+ subdevice: DeviceId::PCI_ANY_ID,
+ class: 0,
+ class_mask: 0,
+ driver_data: 0,
+ override_only: 0,
+ })
+ }
+
+ /// Equivalent to C's `PCI_DEVICE_CLASS` macro.
+ ///
+ /// Create a new `pci::DeviceId` from a class number and mask.
+ pub const fn from_class(class: u32, class_mask: u32) -> Self {
+ Self(bindings::pci_device_id {
+ vendor: DeviceId::PCI_ANY_ID,
+ device: DeviceId::PCI_ANY_ID,
+ subvendor: DeviceId::PCI_ANY_ID,
+ subdevice: DeviceId::PCI_ANY_ID,
+ class,
+ class_mask,
+ driver_data: 0,
+ override_only: 0,
+ })
+ }
+}
+
+// SAFETY:
+// * `DeviceId` is a `#[repr(transparent)` wrapper of `pci_device_id` and does not add
+// additional invariants, so it's safe to transmute to `RawType`.
+// * `DRIVER_DATA_OFFSET` is the offset to the `driver_data` field.
+unsafe impl RawDeviceId for DeviceId {
+ type RawType = bindings::pci_device_id;
+
+ const DRIVER_DATA_OFFSET: usize = core::mem::offset_of!(bindings::pci_device_id, driver_data);
+
+ fn index(&self) -> usize {
+ self.0.driver_data as _
+ }
+}
+
+/// IdTable type for PCI
+pub type IdTable<T> = &'static dyn kernel::device_id::IdTable<DeviceId, T>;
+
+/// Create a PCI `IdTable` with its alias for modpost.
+#[macro_export]
+macro_rules! pci_device_table {
+ ($table_name:ident, $module_table_name:ident, $id_info_type: ty, $table_data: expr) => {
+ const $table_name: $crate::device_id::IdArray<
+ $crate::pci::DeviceId,
+ $id_info_type,
+ { $table_data.len() },
+ > = $crate::device_id::IdArray::new($table_data);
+
+ $crate::module_device_table!("pci", $module_table_name, $table_name);
+ };
+}
+
+/// The PCI driver trait.
+///
+/// # Example
+///
+///```
+/// # use kernel::{bindings, pci};
+///
+/// struct MyDriver;
+///
+/// kernel::pci_device_table!(
+/// PCI_TABLE,
+/// MODULE_PCI_TABLE,
+/// <MyDriver as pci::Driver>::IdInfo,
+/// [
+/// (pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_REDHAT, bindings::PCI_ANY_ID as _), ())
+/// ]
+/// );
+///
+/// impl pci::Driver for MyDriver {
+/// type IdInfo = ();
+/// const ID_TABLE: pci::IdTable<Self::IdInfo> = &PCI_TABLE;
+///
+/// fn probe(
+/// _pdev: &mut pci::Device,
+/// _id_info: &Self::IdInfo,
+/// ) -> Result<Pin<KBox<Self>>> {
+/// Err(ENODEV)
+/// }
+/// }
+///```
+/// Drivers must implement this trait in order to get a PCI driver registered. Please refer to the
+/// `Adapter` documentation for an example.
+pub trait Driver {
+ /// The type holding information about each device id supported by the driver.
+ ///
+ /// TODO: Use associated_type_defaults once stabilized:
+ ///
+ /// type IdInfo: 'static = ();
+ type IdInfo: 'static;
+
+ /// The table of device ids supported by the driver.
+ const ID_TABLE: IdTable<Self::IdInfo>;
+
+ /// PCI driver probe.
+ ///
+ /// Called when a new platform device is added or discovered.
+ /// Implementers should attempt to initialize the device here.
+ fn probe(dev: &mut Device, id_info: &Self::IdInfo) -> Result<Pin<KBox<Self>>>;
+}
+
+/// The PCI device representation.
+///
+/// A PCI device is based on an always reference counted `device:Device` instance. Cloning a PCI
+/// device, hence, also increments the base device' reference count.
+///
+/// # Invariants
+///
+/// `Device` hold a valid reference of `ARef<device::Device>` whose underlying `struct device` is a
+/// member of a `struct pci_dev`.
+#[derive(Clone)]
+pub struct Device(ARef<device::Device>);
+
+/// A PCI BAR to perform I/O-Operations on.
+///
+/// # Invariants
+///
+/// `Bar` always holds an `IoRaw` inststance that holds a valid pointer to the start of the I/O
+/// memory mapped PCI bar and its size.
+pub struct Bar<const SIZE: usize = 0> {
+ pdev: Device,
+ io: IoRaw<SIZE>,
+ num: i32,
+}
+
+impl<const SIZE: usize> Bar<SIZE> {
+ fn new(pdev: Device, num: u32, name: &CStr) -> Result<Self> {
+ let len = pdev.resource_len(num)?;
+ if len == 0 {
+ return Err(ENOMEM);
+ }
+
+ // Convert to `i32`, since that's what all the C bindings use.
+ let num = i32::try_from(num)?;
+
+ // SAFETY:
+ // `pdev` is valid by the invariants of `Device`.
+ // `num` is checked for validity by a previous call to `Device::resource_len`.
+ // `name` is always valid.
+ let ret = unsafe { bindings::pci_request_region(pdev.as_raw(), num, name.as_char_ptr()) };
+ if ret != 0 {
+ return Err(EBUSY);
+ }
+
+ // SAFETY:
+ // `pdev` is valid by the invariants of `Device`.
+ // `num` is checked for validity by a previous call to `Device::resource_len`.
+ // `name` is always valid.
+ let ioptr: usize = unsafe { bindings::pci_iomap(pdev.as_raw(), num, 0) } as usize;
+ if ioptr == 0 {
+ // SAFETY:
+ // `pdev` valid by the invariants of `Device`.
+ // `num` is checked for validity by a previous call to `Device::resource_len`.
+ unsafe { bindings::pci_release_region(pdev.as_raw(), num) };
+ return Err(ENOMEM);
+ }
+
+ let io = match IoRaw::new(ioptr, len as usize) {
+ Ok(io) => io,
+ Err(err) => {
+ // SAFETY:
+ // `pdev` is valid by the invariants of `Device`.
+ // `ioptr` is guaranteed to be the start of a valid I/O mapped memory region.
+ // `num` is checked for validity by a previous call to `Device::resource_len`.
+ unsafe { Self::do_release(&pdev, ioptr, num) };
+ return Err(err);
+ }
+ };
+
+ Ok(Bar { pdev, io, num })
+ }
+
+ /// # Safety
+ ///
+ /// `ioptr` must be a valid pointer to the memory mapped PCI bar number `num`.
+ unsafe fn do_release(pdev: &Device, ioptr: usize, num: i32) {
+ // SAFETY:
+ // `pdev` is valid by the invariants of `Device`.
+ // `ioptr` is valid by the safety requirements.
+ // `num` is valid by the safety requirements.
+ unsafe {
+ bindings::pci_iounmap(pdev.as_raw(), ioptr as _);
+ bindings::pci_release_region(pdev.as_raw(), num);
+ }
+ }
+
+ fn release(&self) {
+ // SAFETY: The safety requirements are guaranteed by the type invariant of `self.pdev`.
+ unsafe { Self::do_release(&self.pdev, self.io.addr(), self.num) };
+ }
+}
+
+impl Bar {
+ fn index_is_valid(index: u32) -> bool {
+ // A `struct pci_dev` owns an array of resources with at most `PCI_NUM_RESOURCES` entries.
+ index < bindings::PCI_NUM_RESOURCES
+ }
+}
+
+impl<const SIZE: usize> Drop for Bar<SIZE> {
+ fn drop(&mut self) {
+ self.release();
+ }
+}
+
+impl<const SIZE: usize> Deref for Bar<SIZE> {
+ type Target = Io<SIZE>;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: By the type invariant of `Self`, the MMIO range in `self.io` is properly mapped.
+ unsafe { Io::from_raw(&self.io) }
+ }
+}
+
+impl Device {
+ /// Create a PCI Device instance from an existing `device::Device`.
+ ///
+ /// # Safety
+ ///
+ /// `dev` must be an `ARef<device::Device>` whose underlying `bindings::device` is a member of
+ /// a `bindings::pci_dev`.
+ pub unsafe fn from_dev(dev: ARef<device::Device>) -> Self {
+ Self(dev)
+ }
+
+ fn as_raw(&self) -> *mut bindings::pci_dev {
+ // SAFETY: By the type invariant `self.0.as_raw` is a pointer to the `struct device`
+ // embedded in `struct pci_dev`.
+ unsafe { container_of!(self.0.as_raw(), bindings::pci_dev, dev) as _ }
+ }
+
+ /// Returns the PCI vendor ID.
+ pub fn vendor_id(&self) -> u16 {
+ // SAFETY: `self.as_raw` is a valid pointer to a `struct pci_dev`.
+ unsafe { (*self.as_raw()).vendor }
+ }
+
+ /// Returns the PCI device ID.
+ pub fn device_id(&self) -> u16 {
+ // SAFETY: `self.as_raw` is a valid pointer to a `struct pci_dev`.
+ unsafe { (*self.as_raw()).device }
+ }
+
+ /// Enable memory resources for this device.
+ pub fn enable_device_mem(&self) -> Result {
+ // SAFETY: `self.as_raw` is guaranteed to be a pointer to a valid `struct pci_dev`.
+ let ret = unsafe { bindings::pci_enable_device_mem(self.as_raw()) };
+ if ret != 0 {
+ Err(Error::from_errno(ret))
+ } else {
+ Ok(())
+ }
+ }
+
+ /// Enable bus-mastering for this device.
+ pub fn set_master(&self) {
+ // SAFETY: `self.as_raw` is guaranteed to be a pointer to a valid `struct pci_dev`.
+ unsafe { bindings::pci_set_master(self.as_raw()) };
+ }
+
+ /// Returns the size of the given PCI bar resource.
+ pub fn resource_len(&self, bar: u32) -> Result<bindings::resource_size_t> {
+ if !Bar::index_is_valid(bar) {
+ return Err(EINVAL);
+ }
+
+ // SAFETY:
+ // - `bar` is a valid bar number, as guaranteed by the above call to `Bar::index_is_valid`,
+ // - by its type invariant `self.as_raw` is always a valid pointer to a `struct pci_dev`.
+ Ok(unsafe { bindings::pci_resource_len(self.as_raw(), bar.try_into()?) })
+ }
+
+ /// Mapps an entire PCI-BAR after performing a region-request on it. I/O operation bound checks
+ /// can be performed on compile time for offsets (plus the requested type size) < SIZE.
+ pub fn iomap_region_sized<const SIZE: usize>(
+ &self,
+ bar: u32,
+ name: &CStr,
+ ) -> Result<Devres<Bar<SIZE>>> {
+ let bar = Bar::<SIZE>::new(self.clone(), bar, name)?;
+ let devres = Devres::new(self.as_ref(), bar, GFP_KERNEL)?;
+
+ Ok(devres)
+ }
+
+ /// Mapps an entire PCI-BAR after performing a region-request on it.
+ pub fn iomap_region(&self, bar: u32, name: &CStr) -> Result<Devres<Bar>> {
+ self.iomap_region_sized::<0>(bar, name)
+ }
+}
+
+impl AsRef<device::Device> for Device {
+ fn as_ref(&self) -> &device::Device {
+ &self.0
+ }
+}
diff --git a/rust/kernel/platform.rs b/rust/kernel/platform.rs
new file mode 100644
index 000000000000..50e6b0421813
--- /dev/null
+++ b/rust/kernel/platform.rs
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Abstractions for the platform bus.
+//!
+//! C header: [`include/linux/platform_device.h`](srctree/include/linux/platform_device.h)
+
+use crate::{
+ bindings, container_of, device, driver,
+ error::{to_result, Result},
+ of,
+ prelude::*,
+ str::CStr,
+ types::{ARef, ForeignOwnable, Opaque},
+ ThisModule,
+};
+
+use core::ptr::addr_of_mut;
+
+/// An adapter for the registration of platform drivers.
+pub struct Adapter<T: Driver>(T);
+
+// SAFETY: A call to `unregister` for a given instance of `RegType` is guaranteed to be valid if
+// a preceding call to `register` has been successful.
+unsafe impl<T: Driver + 'static> driver::RegistrationOps for Adapter<T> {
+ type RegType = bindings::platform_driver;
+
+ unsafe fn register(
+ pdrv: &Opaque<Self::RegType>,
+ name: &'static CStr,
+ module: &'static ThisModule,
+ ) -> Result {
+ let of_table = match T::OF_ID_TABLE {
+ Some(table) => table.as_ptr(),
+ None => core::ptr::null(),
+ };
+
+ // SAFETY: It's safe to set the fields of `struct platform_driver` on initialization.
+ unsafe {
+ (*pdrv.get()).driver.name = name.as_char_ptr();
+ (*pdrv.get()).probe = Some(Self::probe_callback);
+ (*pdrv.get()).remove = Some(Self::remove_callback);
+ (*pdrv.get()).driver.of_match_table = of_table;
+ }
+
+ // SAFETY: `pdrv` is guaranteed to be a valid `RegType`.
+ to_result(unsafe { bindings::__platform_driver_register(pdrv.get(), module.0) })
+ }
+
+ unsafe fn unregister(pdrv: &Opaque<Self::RegType>) {
+ // SAFETY: `pdrv` is guaranteed to be a valid `RegType`.
+ unsafe { bindings::platform_driver_unregister(pdrv.get()) };
+ }
+}
+
+impl<T: Driver + 'static> Adapter<T> {
+ extern "C" fn probe_callback(pdev: *mut bindings::platform_device) -> kernel::ffi::c_int {
+ // SAFETY: The platform bus only ever calls the probe callback with a valid `pdev`.
+ let dev = unsafe { device::Device::get_device(addr_of_mut!((*pdev).dev)) };
+ // SAFETY: `dev` is guaranteed to be embedded in a valid `struct platform_device` by the
+ // call above.
+ let mut pdev = unsafe { Device::from_dev(dev) };
+
+ let info = <Self as driver::Adapter>::id_info(pdev.as_ref());
+ match T::probe(&mut pdev, info) {
+ Ok(data) => {
+ // Let the `struct platform_device` own a reference of the driver's private data.
+ // SAFETY: By the type invariant `pdev.as_raw` returns a valid pointer to a
+ // `struct platform_device`.
+ unsafe { bindings::platform_set_drvdata(pdev.as_raw(), data.into_foreign() as _) };
+ }
+ Err(err) => return Error::to_errno(err),
+ }
+
+ 0
+ }
+
+ extern "C" fn remove_callback(pdev: *mut bindings::platform_device) {
+ // SAFETY: `pdev` is a valid pointer to a `struct platform_device`.
+ let ptr = unsafe { bindings::platform_get_drvdata(pdev) };
+
+ // SAFETY: `remove_callback` is only ever called after a successful call to
+ // `probe_callback`, hence it's guaranteed that `ptr` points to a valid and initialized
+ // `KBox<T>` pointer created through `KBox::into_foreign`.
+ let _ = unsafe { KBox::<T>::from_foreign(ptr) };
+ }
+}
+
+impl<T: Driver + 'static> driver::Adapter for Adapter<T> {
+ type IdInfo = T::IdInfo;
+
+ fn of_id_table() -> Option<of::IdTable<Self::IdInfo>> {
+ T::OF_ID_TABLE
+ }
+}
+
+/// Declares a kernel module that exposes a single platform driver.
+///
+/// # Examples
+///
+/// ```ignore
+/// kernel::module_platform_driver! {
+/// type: MyDriver,
+/// name: "Module name",
+/// author: "Author name",
+/// description: "Description",
+/// license: "GPL v2",
+/// }
+/// ```
+#[macro_export]
+macro_rules! module_platform_driver {
+ ($($f:tt)*) => {
+ $crate::module_driver!(<T>, $crate::platform::Adapter<T>, { $($f)* });
+ };
+}
+
+/// The platform driver trait.
+///
+/// Drivers must implement this trait in order to get a platform driver registered.
+///
+/// # Example
+///
+///```
+/// # use kernel::{bindings, c_str, of, platform};
+///
+/// struct MyDriver;
+///
+/// kernel::of_device_table!(
+/// OF_TABLE,
+/// MODULE_OF_TABLE,
+/// <MyDriver as platform::Driver>::IdInfo,
+/// [
+/// (of::DeviceId::new(c_str!("test,device")), ())
+/// ]
+/// );
+///
+/// impl platform::Driver for MyDriver {
+/// type IdInfo = ();
+/// const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE);
+///
+/// fn probe(
+/// _pdev: &mut platform::Device,
+/// _id_info: Option<&Self::IdInfo>,
+/// ) -> Result<Pin<KBox<Self>>> {
+/// Err(ENODEV)
+/// }
+/// }
+///```
+pub trait Driver {
+ /// The type holding driver private data about each device id supported by the driver.
+ ///
+ /// TODO: Use associated_type_defaults once stabilized:
+ ///
+ /// type IdInfo: 'static = ();
+ type IdInfo: 'static;
+
+ /// The table of OF device ids supported by the driver.
+ const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>>;
+
+ /// Platform driver probe.
+ ///
+ /// Called when a new platform device is added or discovered.
+ /// Implementers should attempt to initialize the device here.
+ fn probe(dev: &mut Device, id_info: Option<&Self::IdInfo>) -> Result<Pin<KBox<Self>>>;
+}
+
+/// The platform device representation.
+///
+/// A platform device is based on an always reference counted `device:Device` instance. Cloning a
+/// platform device, hence, also increments the base device' reference count.
+///
+/// # Invariants
+///
+/// `Device` holds a valid reference of `ARef<device::Device>` whose underlying `struct device` is a
+/// member of a `struct platform_device`.
+#[derive(Clone)]
+pub struct Device(ARef<device::Device>);
+
+impl Device {
+ /// Convert a raw kernel device into a `Device`
+ ///
+ /// # Safety
+ ///
+ /// `dev` must be an `Aref<device::Device>` whose underlying `bindings::device` is a member of a
+ /// `bindings::platform_device`.
+ unsafe fn from_dev(dev: ARef<device::Device>) -> Self {
+ Self(dev)
+ }
+
+ fn as_raw(&self) -> *mut bindings::platform_device {
+ // SAFETY: By the type invariant `self.0.as_raw` is a pointer to the `struct device`
+ // embedded in `struct platform_device`.
+ unsafe { container_of!(self.0.as_raw(), bindings::platform_device, dev) }.cast_mut()
+ }
+}
+
+impl AsRef<device::Device> for Device {
+ fn as_ref(&self) -> &device::Device {
+ &self.0
+ }
+}
diff --git a/rust/kernel/revocable.rs b/rust/kernel/revocable.rs
new file mode 100644
index 000000000000..1e5a9d25c21b
--- /dev/null
+++ b/rust/kernel/revocable.rs
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Revocable objects.
+//!
+//! The [`Revocable`] type wraps other types and allows access to them to be revoked. The existence
+//! of a [`RevocableGuard`] ensures that objects remain valid.
+
+use crate::{bindings, prelude::*, sync::rcu, types::Opaque};
+use core::{
+ marker::PhantomData,
+ ops::Deref,
+ ptr::drop_in_place,
+ sync::atomic::{AtomicBool, Ordering},
+};
+
+/// An object that can become inaccessible at runtime.
+///
+/// Once access is revoked and all concurrent users complete (i.e., all existing instances of
+/// [`RevocableGuard`] are dropped), the wrapped object is also dropped.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::revocable::Revocable;
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// fn add_two(v: &Revocable<Example>) -> Option<u32> {
+/// let guard = v.try_access()?;
+/// Some(guard.a + guard.b)
+/// }
+///
+/// let v = KBox::pin_init(Revocable::new(Example { a: 10, b: 20 }), GFP_KERNEL).unwrap();
+/// assert_eq!(add_two(&v), Some(30));
+/// v.revoke();
+/// assert_eq!(add_two(&v), None);
+/// ```
+///
+/// Sample example as above, but explicitly using the rcu read side lock.
+///
+/// ```
+/// # use kernel::revocable::Revocable;
+/// use kernel::sync::rcu;
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// fn add_two(v: &Revocable<Example>) -> Option<u32> {
+/// let guard = rcu::read_lock();
+/// let e = v.try_access_with_guard(&guard)?;
+/// Some(e.a + e.b)
+/// }
+///
+/// let v = KBox::pin_init(Revocable::new(Example { a: 10, b: 20 }), GFP_KERNEL).unwrap();
+/// assert_eq!(add_two(&v), Some(30));
+/// v.revoke();
+/// assert_eq!(add_two(&v), None);
+/// ```
+#[pin_data(PinnedDrop)]
+pub struct Revocable<T> {
+ is_available: AtomicBool,
+ #[pin]
+ data: Opaque<T>,
+}
+
+// SAFETY: `Revocable` is `Send` if the wrapped object is also `Send`. This is because while the
+// functionality exposed by `Revocable` can be accessed from any thread/CPU, it is possible that
+// this isn't supported by the wrapped object.
+unsafe impl<T: Send> Send for Revocable<T> {}
+
+// SAFETY: `Revocable` is `Sync` if the wrapped object is both `Send` and `Sync`. We require `Send`
+// from the wrapped object as well because of `Revocable::revoke`, which can trigger the `Drop`
+// implementation of the wrapped object from an arbitrary thread.
+unsafe impl<T: Sync + Send> Sync for Revocable<T> {}
+
+impl<T> Revocable<T> {
+ /// Creates a new revocable instance of the given data.
+ pub fn new(data: impl PinInit<T>) -> impl PinInit<Self> {
+ pin_init!(Self {
+ is_available: AtomicBool::new(true),
+ data <- Opaque::pin_init(data),
+ })
+ }
+
+ /// Tries to access the revocable wrapped object.
+ ///
+ /// Returns `None` if the object has been revoked and is therefore no longer accessible.
+ ///
+ /// Returns a guard that gives access to the object otherwise; the object is guaranteed to
+ /// remain accessible while the guard is alive. In such cases, callers are not allowed to sleep
+ /// because another CPU may be waiting to complete the revocation of this object.
+ pub fn try_access(&self) -> Option<RevocableGuard<'_, T>> {
+ let guard = rcu::read_lock();
+ if self.is_available.load(Ordering::Relaxed) {
+ // Since `self.is_available` is true, data is initialised and has to remain valid
+ // because the RCU read side lock prevents it from being dropped.
+ Some(RevocableGuard::new(self.data.get(), guard))
+ } else {
+ None
+ }
+ }
+
+ /// Tries to access the revocable wrapped object.
+ ///
+ /// Returns `None` if the object has been revoked and is therefore no longer accessible.
+ ///
+ /// Returns a shared reference to the object otherwise; the object is guaranteed to
+ /// remain accessible while the rcu read side guard is alive. In such cases, callers are not
+ /// allowed to sleep because another CPU may be waiting to complete the revocation of this
+ /// object.
+ pub fn try_access_with_guard<'a>(&'a self, _guard: &'a rcu::Guard) -> Option<&'a T> {
+ if self.is_available.load(Ordering::Relaxed) {
+ // SAFETY: Since `self.is_available` is true, data is initialised and has to remain
+ // valid because the RCU read side lock prevents it from being dropped.
+ Some(unsafe { &*self.data.get() })
+ } else {
+ None
+ }
+ }
+
+ /// # Safety
+ ///
+ /// Callers must ensure that there are no more concurrent users of the revocable object.
+ unsafe fn revoke_internal<const SYNC: bool>(&self) {
+ if self.is_available.swap(false, Ordering::Relaxed) {
+ if SYNC {
+ // SAFETY: Just an FFI call, there are no further requirements.
+ unsafe { bindings::synchronize_rcu() };
+ }
+
+ // SAFETY: We know `self.data` is valid because only one CPU can succeed the
+ // `compare_exchange` above that takes `is_available` from `true` to `false`.
+ unsafe { drop_in_place(self.data.get()) };
+ }
+ }
+
+ /// Revokes access to and drops the wrapped object.
+ ///
+ /// Access to the object is revoked immediately to new callers of [`Revocable::try_access`],
+ /// expecting that there are no concurrent users of the object.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that there are no more concurrent users of the revocable object.
+ pub unsafe fn revoke_nosync(&self) {
+ // SAFETY: By the safety requirement of this function, the caller ensures that nobody is
+ // accessing the data anymore and hence we don't have to wait for the grace period to
+ // finish.
+ unsafe { self.revoke_internal::<false>() }
+ }
+
+ /// Revokes access to and drops the wrapped object.
+ ///
+ /// Access to the object is revoked immediately to new callers of [`Revocable::try_access`].
+ ///
+ /// If there are concurrent users of the object (i.e., ones that called
+ /// [`Revocable::try_access`] beforehand and still haven't dropped the returned guard), this
+ /// function waits for the concurrent access to complete before dropping the wrapped object.
+ pub fn revoke(&self) {
+ // SAFETY: By passing `true` we ask `revoke_internal` to wait for the grace period to
+ // finish.
+ unsafe { self.revoke_internal::<true>() }
+ }
+}
+
+#[pinned_drop]
+impl<T> PinnedDrop for Revocable<T> {
+ fn drop(self: Pin<&mut Self>) {
+ // Drop only if the data hasn't been revoked yet (in which case it has already been
+ // dropped).
+ // SAFETY: We are not moving out of `p`, only dropping in place
+ let p = unsafe { self.get_unchecked_mut() };
+ if *p.is_available.get_mut() {
+ // SAFETY: We know `self.data` is valid because no other CPU has changed
+ // `is_available` to `false` yet, and no other CPU can do it anymore because this CPU
+ // holds the only reference (mutable) to `self` now.
+ unsafe { drop_in_place(p.data.get()) };
+ }
+ }
+}
+
+/// A guard that allows access to a revocable object and keeps it alive.
+///
+/// CPUs may not sleep while holding on to [`RevocableGuard`] because it's in atomic context
+/// holding the RCU read-side lock.
+///
+/// # Invariants
+///
+/// The RCU read-side lock is held while the guard is alive.
+pub struct RevocableGuard<'a, T> {
+ data_ref: *const T,
+ _rcu_guard: rcu::Guard,
+ _p: PhantomData<&'a ()>,
+}
+
+impl<T> RevocableGuard<'_, T> {
+ fn new(data_ref: *const T, rcu_guard: rcu::Guard) -> Self {
+ Self {
+ data_ref,
+ _rcu_guard: rcu_guard,
+ _p: PhantomData,
+ }
+ }
+}
+
+impl<T> Deref for RevocableGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: By the type invariants, we hold the rcu read-side lock, so the object is
+ // guaranteed to remain valid.
+ unsafe { &*self.data_ref }
+ }
+}
diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs
index dffdaad972ce..3498fb344dc9 100644
--- a/rust/kernel/sync.rs
+++ b/rust/kernel/sync.rs
@@ -12,6 +12,7 @@ mod condvar;
pub mod lock;
mod locked_by;
pub mod poll;
+pub mod rcu;
pub use arc::{Arc, ArcBorrow, UniqueArc};
pub use condvar::{new_condvar, CondVar, CondVarTimeoutResult};
diff --git a/rust/kernel/sync/rcu.rs b/rust/kernel/sync/rcu.rs
new file mode 100644
index 000000000000..b51d9150ffe2
--- /dev/null
+++ b/rust/kernel/sync/rcu.rs
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! RCU support.
+//!
+//! C header: [`include/linux/rcupdate.h`](srctree/include/linux/rcupdate.h)
+
+use crate::{bindings, types::NotThreadSafe};
+
+/// Evidence that the RCU read side lock is held on the current thread/CPU.
+///
+/// The type is explicitly not `Send` because this property is per-thread/CPU.
+///
+/// # Invariants
+///
+/// The RCU read side lock is actually held while instances of this guard exist.
+pub struct Guard(NotThreadSafe);
+
+impl Guard {
+ /// Acquires the RCU read side lock and returns a guard.
+ pub fn new() -> Self {
+ // SAFETY: An FFI call with no additional requirements.
+ unsafe { bindings::rcu_read_lock() };
+ // INVARIANT: The RCU read side lock was just acquired above.
+ Self(NotThreadSafe)
+ }
+
+ /// Explicitly releases the RCU read side lock.
+ pub fn unlock(self) {}
+}
+
+impl Default for Guard {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl Drop for Guard {
+ fn drop(&mut self) {
+ // SAFETY: By the type invariants, the RCU read side is locked, so it is ok to unlock it.
+ unsafe { bindings::rcu_read_unlock() };
+ }
+}
+
+/// Acquires the RCU read side lock.
+pub fn read_lock() -> Guard {
+ Guard::new()
+}
diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs
index 0dfaf45a755c..2bbaab83b9d6 100644
--- a/rust/kernel/types.rs
+++ b/rust/kernel/types.rs
@@ -326,6 +326,17 @@ impl<T> Opaque<T> {
}
}
+ /// Create an opaque pin-initializer from the given pin-initializer.
+ pub fn pin_init(slot: impl PinInit<T>) -> impl PinInit<Self> {
+ Self::ffi_init(|ptr: *mut T| {
+ // SAFETY:
+ // - `ptr` is a valid pointer to uninitialized memory,
+ // - `slot` is not accessed on error; the call is infallible,
+ // - `slot` is pinned in memory.
+ let _ = unsafe { init::PinInit::<T>::__pinned_init(slot, ptr) };
+ })
+ }
+
/// Creates a pin-initializer from the given initializer closure.
///
/// The returned initializer calls the given closure with the pointer to the inner `T` of this
diff --git a/rust/macros/module.rs b/rust/macros/module.rs
index 2587f41b0d39..cdf94f4982df 100644
--- a/rust/macros/module.rs
+++ b/rust/macros/module.rs
@@ -228,6 +228,10 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
kernel::ThisModule::from_ptr(core::ptr::null_mut())
}};
+ impl kernel::ModuleMetadata for {type_} {{
+ const NAME: &'static kernel::str::CStr = kernel::c_str!(\"{name}\");
+ }}
+
// Double nested modules, since then nobody can access the public items inside.
mod __module_init {{
mod __module_init {{
diff --git a/samples/Kconfig b/samples/Kconfig
index b288d9991d27..820e00b2ed68 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -291,8 +291,19 @@ config SAMPLE_CGROUP
help
Build samples that demonstrate the usage of the cgroup API.
+config SAMPLE_CHECK_EXEC
+ bool "Exec secure bits examples"
+ depends on CC_CAN_LINK && HEADERS_INSTALL
+ help
+ Build a tool to easily configure SECBIT_EXEC_RESTRICT_FILE and
+ SECBIT_EXEC_DENY_INTERACTIVE, and a simple script interpreter to
+ demonstrate how they should be used with execveat(2) +
+ AT_EXECVE_CHECK.
+
source "samples/rust/Kconfig"
+source "samples/damon/Kconfig"
+
endif # SAMPLES
config HAVE_SAMPLE_FTRACE_DIRECT
diff --git a/samples/Makefile b/samples/Makefile
index b85fa64390c5..f24cd0d72dd0 100644
--- a/samples/Makefile
+++ b/samples/Makefile
@@ -3,6 +3,7 @@
subdir-$(CONFIG_SAMPLE_AUXDISPLAY) += auxdisplay
subdir-$(CONFIG_SAMPLE_ANDROID_BINDERFS) += binderfs
+subdir-$(CONFIG_SAMPLE_CHECK_EXEC) += check-exec
subdir-$(CONFIG_SAMPLE_CGROUP) += cgroup
obj-$(CONFIG_SAMPLE_CONFIGFS) += configfs/
obj-$(CONFIG_SAMPLE_CONNECTOR) += connector/
@@ -39,3 +40,5 @@ obj-$(CONFIG_SAMPLE_KMEMLEAK) += kmemleak/
obj-$(CONFIG_SAMPLE_CORESIGHT_SYSCFG) += coresight/
obj-$(CONFIG_SAMPLE_FPROBE) += fprobe/
obj-$(CONFIG_SAMPLES_RUST) += rust/
+obj-$(CONFIG_SAMPLE_DAMON_WSSE) += damon/
+obj-$(CONFIG_SAMPLE_DAMON_PRCL) += damon/
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 96a05e70ace3..dd9944a97b7e 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -123,7 +123,7 @@ always-y += ibumad_kern.o
always-y += hbm_out_kern.o
always-y += hbm_edt_kern.o
-TPROGS_CFLAGS = $(TPROGS_USER_CFLAGS)
+COMMON_CFLAGS = $(TPROGS_USER_CFLAGS)
TPROGS_LDFLAGS = $(TPROGS_USER_LDFLAGS)
ifeq ($(ARCH), arm)
diff --git a/samples/bpf/xdp2skb_meta_kern.c b/samples/bpf/xdp2skb_meta_kern.c
index af29a1bde4e4..3c36c25d9902 100644
--- a/samples/bpf/xdp2skb_meta_kern.c
+++ b/samples/bpf/xdp2skb_meta_kern.c
@@ -63,7 +63,6 @@ SEC("tc_mark")
int _tc_mark(struct __sk_buff *ctx)
{
void *data = (void *)(unsigned long)ctx->data;
- void *data_end = (void *)(unsigned long)ctx->data_end;
void *data_meta = (void *)(unsigned long)ctx->data_meta;
struct meta_info *meta = data_meta;
diff --git a/samples/check-exec/.gitignore b/samples/check-exec/.gitignore
new file mode 100644
index 000000000000..cd759a19dacd
--- /dev/null
+++ b/samples/check-exec/.gitignore
@@ -0,0 +1,2 @@
+/inc
+/set-exec
diff --git a/samples/check-exec/Makefile b/samples/check-exec/Makefile
new file mode 100644
index 000000000000..c4f08ad0f8e3
--- /dev/null
+++ b/samples/check-exec/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: BSD-3-Clause
+
+userprogs-always-y := \
+ inc \
+ set-exec
+
+userccflags += -I usr/include
+
+.PHONY: all clean
+
+all:
+ $(MAKE) -C ../.. samples/check-exec/
+
+clean:
+ $(MAKE) -C ../.. M=samples/check-exec/ clean
diff --git a/samples/check-exec/inc.c b/samples/check-exec/inc.c
new file mode 100644
index 000000000000..7f6ef06a2f06
--- /dev/null
+++ b/samples/check-exec/inc.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Very simple script interpreter that can evaluate two different commands (one
+ * per line):
+ * - "?" to initialize a counter from user's input;
+ * - "+" to increment the counter (which is set to 0 by default).
+ *
+ * See tools/testing/selftests/exec/check-exec-tests.sh and
+ * Documentation/userspace-api/check_exec.rst
+ *
+ * Copyright © 2024 Microsoft Corporation
+ */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <linux/fcntl.h>
+#include <linux/prctl.h>
+#include <linux/securebits.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/prctl.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+static int sys_execveat(int dirfd, const char *pathname, char *const argv[],
+ char *const envp[], int flags)
+{
+ return syscall(__NR_execveat, dirfd, pathname, argv, envp, flags);
+}
+
+/* Returns 1 on error, 0 otherwise. */
+static int interpret_buffer(char *buffer, size_t buffer_size)
+{
+ char *line, *saveptr = NULL;
+ long long number = 0;
+
+ /* Each command is the first character of a line. */
+ saveptr = NULL;
+ line = strtok_r(buffer, "\n", &saveptr);
+ while (line) {
+ if (*line != '#' && strlen(line) != 1) {
+ fprintf(stderr, "# ERROR: Unknown string\n");
+ return 1;
+ }
+ switch (*line) {
+ case '#':
+ /* Skips shebang and comments. */
+ break;
+ case '+':
+ /* Increments and prints the number. */
+ number++;
+ printf("%lld\n", number);
+ break;
+ case '?':
+ /* Reads integer from stdin. */
+ fprintf(stderr, "> Enter new number: \n");
+ if (scanf("%lld", &number) != 1) {
+ fprintf(stderr,
+ "# WARNING: Failed to read number from stdin\n");
+ }
+ break;
+ default:
+ fprintf(stderr, "# ERROR: Unknown character '%c'\n",
+ *line);
+ return 1;
+ }
+ line = strtok_r(NULL, "\n", &saveptr);
+ }
+ return 0;
+}
+
+/* Returns 1 on error, 0 otherwise. */
+static int interpret_stream(FILE *script, char *const script_name,
+ char *const *const envp, const bool restrict_stream)
+{
+ int err;
+ char *const script_argv[] = { script_name, NULL };
+ char buf[128] = {};
+ size_t buf_size = sizeof(buf);
+
+ /*
+ * We pass a valid argv and envp to the kernel to emulate a native
+ * script execution. We must use the script file descriptor instead of
+ * the script path name to avoid race conditions.
+ */
+ err = sys_execveat(fileno(script), "", script_argv, envp,
+ AT_EMPTY_PATH | AT_EXECVE_CHECK);
+ if (err && restrict_stream) {
+ perror("ERROR: Script execution check");
+ return 1;
+ }
+
+ /* Reads script. */
+ buf_size = fread(buf, 1, buf_size - 1, script);
+ return interpret_buffer(buf, buf_size);
+}
+
+static void print_usage(const char *argv0)
+{
+ fprintf(stderr, "usage: %s <script.inc> | -i | -c <command>\n\n",
+ argv0);
+ fprintf(stderr, "Example:\n");
+ fprintf(stderr, " ./set-exec -fi -- ./inc -i < script-exec.inc\n");
+}
+
+int main(const int argc, char *const argv[], char *const *const envp)
+{
+ int opt;
+ char *cmd = NULL;
+ char *script_name = NULL;
+ bool interpret_stdin = false;
+ FILE *script_file = NULL;
+ int secbits;
+ bool deny_interactive, restrict_file;
+ size_t arg_nb;
+
+ secbits = prctl(PR_GET_SECUREBITS);
+ if (secbits == -1) {
+ /*
+ * This should never happen, except with a buggy seccomp
+ * filter.
+ */
+ perror("ERROR: Failed to get securebits");
+ return 1;
+ }
+
+ deny_interactive = !!(secbits & SECBIT_EXEC_DENY_INTERACTIVE);
+ restrict_file = !!(secbits & SECBIT_EXEC_RESTRICT_FILE);
+
+ while ((opt = getopt(argc, argv, "c:i")) != -1) {
+ switch (opt) {
+ case 'c':
+ if (cmd) {
+ fprintf(stderr, "ERROR: Command already set");
+ return 1;
+ }
+ cmd = optarg;
+ break;
+ case 'i':
+ interpret_stdin = true;
+ break;
+ default:
+ print_usage(argv[0]);
+ return 1;
+ }
+ }
+
+ /* Checks that only one argument is used, or read stdin. */
+ arg_nb = !!cmd + !!interpret_stdin;
+ if (arg_nb == 0 && argc == 2) {
+ script_name = argv[1];
+ } else if (arg_nb != 1) {
+ print_usage(argv[0]);
+ return 1;
+ }
+
+ if (cmd) {
+ /*
+ * Other kind of interactive interpretations should be denied
+ * as well (e.g. CLI arguments passing script snippets,
+ * environment variables interpreted as script). However, any
+ * way to pass script files should only be restricted according
+ * to restrict_file.
+ */
+ if (deny_interactive) {
+ fprintf(stderr,
+ "ERROR: Interactive interpretation denied.\n");
+ return 1;
+ }
+
+ return interpret_buffer(cmd, strlen(cmd));
+ }
+
+ if (interpret_stdin && !script_name) {
+ script_file = stdin;
+ /*
+ * As for any execve(2) call, this path may be logged by the
+ * kernel.
+ */
+ script_name = "/proc/self/fd/0";
+ /*
+ * When stdin is used, it can point to a regular file or a
+ * pipe. Restrict stdin execution according to
+ * SECBIT_EXEC_DENY_INTERACTIVE but always allow executable
+ * files (which are not considered as interactive inputs).
+ */
+ return interpret_stream(script_file, script_name, envp,
+ deny_interactive);
+ } else if (script_name && !interpret_stdin) {
+ /*
+ * In this sample, we don't pass any argument to scripts, but
+ * otherwise we would have to forge an argv with such
+ * arguments.
+ */
+ script_file = fopen(script_name, "r");
+ if (!script_file) {
+ perror("ERROR: Failed to open script");
+ return 1;
+ }
+ /*
+ * Restricts file execution according to
+ * SECBIT_EXEC_RESTRICT_FILE.
+ */
+ return interpret_stream(script_file, script_name, envp,
+ restrict_file);
+ }
+
+ print_usage(argv[0]);
+ return 1;
+}
diff --git a/samples/check-exec/run-script-ask.inc b/samples/check-exec/run-script-ask.inc
new file mode 100755
index 000000000000..8ef0fdc37266
--- /dev/null
+++ b/samples/check-exec/run-script-ask.inc
@@ -0,0 +1,9 @@
+#!/usr/bin/env sh
+# SPDX-License-Identifier: BSD-3-Clause
+
+DIR="$(dirname -- "$0")"
+
+PATH="${PATH}:${DIR}"
+
+set -x
+"${DIR}/script-ask.inc"
diff --git a/samples/check-exec/script-ask.inc b/samples/check-exec/script-ask.inc
new file mode 100755
index 000000000000..720a8e649225
--- /dev/null
+++ b/samples/check-exec/script-ask.inc
@@ -0,0 +1,5 @@
+#!/usr/bin/env inc
+# SPDX-License-Identifier: BSD-3-Clause
+
+?
++
diff --git a/samples/check-exec/script-exec.inc b/samples/check-exec/script-exec.inc
new file mode 100755
index 000000000000..3245cb9d8dd1
--- /dev/null
+++ b/samples/check-exec/script-exec.inc
@@ -0,0 +1,4 @@
+#!/usr/bin/env inc
+# SPDX-License-Identifier: BSD-3-Clause
+
++
diff --git a/samples/check-exec/script-noexec.inc b/samples/check-exec/script-noexec.inc
new file mode 100644
index 000000000000..3245cb9d8dd1
--- /dev/null
+++ b/samples/check-exec/script-noexec.inc
@@ -0,0 +1,4 @@
+#!/usr/bin/env inc
+# SPDX-License-Identifier: BSD-3-Clause
+
++
diff --git a/samples/check-exec/set-exec.c b/samples/check-exec/set-exec.c
new file mode 100644
index 000000000000..ba86a60a20dd
--- /dev/null
+++ b/samples/check-exec/set-exec.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Simple tool to set SECBIT_EXEC_RESTRICT_FILE, SECBIT_EXEC_DENY_INTERACTIVE,
+ * before executing a command.
+ *
+ * Copyright © 2024 Microsoft Corporation
+ */
+
+#define _GNU_SOURCE
+#define __SANE_USERSPACE_TYPES__
+#include <errno.h>
+#include <linux/prctl.h>
+#include <linux/securebits.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/prctl.h>
+#include <unistd.h>
+
+static void print_usage(const char *argv0)
+{
+ fprintf(stderr, "usage: %s -f|-i -- <cmd> [args]...\n\n", argv0);
+ fprintf(stderr, "Execute a command with\n");
+ fprintf(stderr, "- SECBIT_EXEC_RESTRICT_FILE set: -f\n");
+ fprintf(stderr, "- SECBIT_EXEC_DENY_INTERACTIVE set: -i\n");
+}
+
+int main(const int argc, char *const argv[], char *const *const envp)
+{
+ const char *cmd_path;
+ char *const *cmd_argv;
+ int opt, secbits_cur, secbits_new;
+ bool has_policy = false;
+
+ secbits_cur = prctl(PR_GET_SECUREBITS);
+ if (secbits_cur == -1) {
+ /*
+ * This should never happen, except with a buggy seccomp
+ * filter.
+ */
+ perror("ERROR: Failed to get securebits");
+ return 1;
+ }
+
+ secbits_new = secbits_cur;
+ while ((opt = getopt(argc, argv, "fi")) != -1) {
+ switch (opt) {
+ case 'f':
+ secbits_new |= SECBIT_EXEC_RESTRICT_FILE |
+ SECBIT_EXEC_RESTRICT_FILE_LOCKED;
+ has_policy = true;
+ break;
+ case 'i':
+ secbits_new |= SECBIT_EXEC_DENY_INTERACTIVE |
+ SECBIT_EXEC_DENY_INTERACTIVE_LOCKED;
+ has_policy = true;
+ break;
+ default:
+ print_usage(argv[0]);
+ return 1;
+ }
+ }
+
+ if (!argv[optind] || !has_policy) {
+ print_usage(argv[0]);
+ return 1;
+ }
+
+ if (secbits_cur != secbits_new &&
+ prctl(PR_SET_SECUREBITS, secbits_new)) {
+ perror("Failed to set secure bit(s).");
+ fprintf(stderr,
+ "Hint: The running kernel may not support this feature.\n");
+ return 1;
+ }
+
+ cmd_path = argv[optind];
+ cmd_argv = argv + optind;
+ fprintf(stderr, "Executing command...\n");
+ execvpe(cmd_path, cmd_argv, envp);
+ fprintf(stderr, "Failed to execute \"%s\": %s\n", cmd_path,
+ strerror(errno));
+ return 1;
+}
diff --git a/samples/damon/Kconfig b/samples/damon/Kconfig
new file mode 100644
index 000000000000..63f6dcd71daa
--- /dev/null
+++ b/samples/damon/Kconfig
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "DAMON Samples"
+
+config SAMPLE_DAMON_WSSE
+ bool "DAMON sameple module for working set size estimation"
+ depends on DAMON && DAMON_VADDR
+ help
+ This builds DAMON sample module for working set size estimation.
+
+ The module receives a pid, monitor access to the virtual address
+ space of the process, estimate working set size of the process, and
+ repeatedly prints the size on the kernel log.
+
+ If unsure, say N.
+
+config SAMPLE_DAMON_PRCL
+ bool "DAMON sameple module for access-aware proactive reclamation"
+ depends on DAMON && DAMON_VADDR
+ help
+ This builds DAMON sample module for access-aware proactive
+ reclamation.
+
+ The module receives a pid, monitor access to the virtual address
+ space of the process, find memory regions that not accessed, and
+ proactively reclaim the regions.
+
+ If unsure, say N.
+
+endmenu
diff --git a/samples/damon/Makefile b/samples/damon/Makefile
new file mode 100644
index 000000000000..7f155143f237
--- /dev/null
+++ b/samples/damon/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SAMPLE_DAMON_WSSE) += wsse.o
+obj-$(CONFIG_SAMPLE_DAMON_PRCL) += prcl.o
diff --git a/samples/damon/prcl.c b/samples/damon/prcl.c
new file mode 100644
index 000000000000..c3acbdab7a62
--- /dev/null
+++ b/samples/damon/prcl.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * proactive reclamation: monitor access pattern of a given process, find
+ * regiosn that seems not accessed, and proactively page out the regions.
+ */
+
+#define pr_fmt(fmt) "damon_sample_prcl: " fmt
+
+#include <linux/damon.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+static int target_pid __read_mostly;
+module_param(target_pid, int, 0600);
+
+static int damon_sample_prcl_enable_store(
+ const char *val, const struct kernel_param *kp);
+
+static const struct kernel_param_ops enable_param_ops = {
+ .set = damon_sample_prcl_enable_store,
+ .get = param_get_bool,
+};
+
+static bool enable __read_mostly;
+module_param_cb(enable, &enable_param_ops, &enable, 0600);
+MODULE_PARM_DESC(enable, "Enable of disable DAMON_SAMPLE_WSSE");
+
+static struct damon_ctx *ctx;
+static struct pid *target_pidp;
+
+static int damon_sample_prcl_after_aggregate(struct damon_ctx *c)
+{
+ struct damon_target *t;
+
+ damon_for_each_target(t, c) {
+ struct damon_region *r;
+ unsigned long wss = 0;
+
+ damon_for_each_region(r, t) {
+ if (r->nr_accesses > 0)
+ wss += r->ar.end - r->ar.start;
+ }
+ pr_info("wss: %lu\n", wss);
+ }
+ return 0;
+}
+
+static int damon_sample_prcl_start(void)
+{
+ struct damon_target *target;
+ struct damos *scheme;
+
+ pr_info("start\n");
+
+ ctx = damon_new_ctx();
+ if (!ctx)
+ return -ENOMEM;
+ if (damon_select_ops(ctx, DAMON_OPS_VADDR)) {
+ damon_destroy_ctx(ctx);
+ return -EINVAL;
+ }
+
+ target = damon_new_target();
+ if (!target) {
+ damon_destroy_ctx(ctx);
+ return -ENOMEM;
+ }
+ damon_add_target(ctx, target);
+ target_pidp = find_get_pid(target_pid);
+ if (!target_pidp) {
+ damon_destroy_ctx(ctx);
+ return -EINVAL;
+ }
+ target->pid = target_pidp;
+
+ ctx->callback.after_aggregation = damon_sample_prcl_after_aggregate;
+
+ scheme = damon_new_scheme(
+ &(struct damos_access_pattern) {
+ .min_sz_region = PAGE_SIZE,
+ .max_sz_region = ULONG_MAX,
+ .min_nr_accesses = 0,
+ .max_nr_accesses = 0,
+ .min_age_region = 50,
+ .max_age_region = UINT_MAX},
+ DAMOS_PAGEOUT,
+ 0,
+ &(struct damos_quota){},
+ &(struct damos_watermarks){},
+ NUMA_NO_NODE);
+ if (!scheme) {
+ damon_destroy_ctx(ctx);
+ return -ENOMEM;
+ }
+ damon_set_schemes(ctx, &scheme, 1);
+
+ return damon_start(&ctx, 1, true);
+}
+
+static void damon_sample_prcl_stop(void)
+{
+ pr_info("stop\n");
+ if (ctx) {
+ damon_stop(&ctx, 1);
+ damon_destroy_ctx(ctx);
+ }
+ if (target_pidp)
+ put_pid(target_pidp);
+}
+
+static int damon_sample_prcl_enable_store(
+ const char *val, const struct kernel_param *kp)
+{
+ bool enabled = enable;
+ int err;
+
+ err = kstrtobool(val, &enable);
+ if (err)
+ return err;
+
+ if (enable == enabled)
+ return 0;
+
+ if (enable)
+ return damon_sample_prcl_start();
+ damon_sample_prcl_stop();
+ return 0;
+}
+
+static int __init damon_sample_prcl_init(void)
+{
+ return 0;
+}
+
+module_init(damon_sample_prcl_init);
diff --git a/samples/damon/wsse.c b/samples/damon/wsse.c
new file mode 100644
index 000000000000..11be25803274
--- /dev/null
+++ b/samples/damon/wsse.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * working set size estimation: monitor access pattern of given process and
+ * print estimated working set size (total size of regions that showing some
+ * access).
+ */
+
+#define pr_fmt(fmt) "damon_sample_wsse: " fmt
+
+#include <linux/damon.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+static int target_pid __read_mostly;
+module_param(target_pid, int, 0600);
+
+static int damon_sample_wsse_enable_store(
+ const char *val, const struct kernel_param *kp);
+
+static const struct kernel_param_ops enable_param_ops = {
+ .set = damon_sample_wsse_enable_store,
+ .get = param_get_bool,
+};
+
+static bool enable __read_mostly;
+module_param_cb(enable, &enable_param_ops, &enable, 0600);
+MODULE_PARM_DESC(enable, "Enable or disable DAMON_SAMPLE_WSSE");
+
+static struct damon_ctx *ctx;
+static struct pid *target_pidp;
+
+static int damon_sample_wsse_after_aggregate(struct damon_ctx *c)
+{
+ struct damon_target *t;
+
+ damon_for_each_target(t, c) {
+ struct damon_region *r;
+ unsigned long wss = 0;
+
+ damon_for_each_region(r, t) {
+ if (r->nr_accesses > 0)
+ wss += r->ar.end - r->ar.start;
+ }
+ pr_info("wss: %lu\n", wss);
+ }
+ return 0;
+}
+
+static int damon_sample_wsse_start(void)
+{
+ struct damon_target *target;
+
+ pr_info("start\n");
+
+ ctx = damon_new_ctx();
+ if (!ctx)
+ return -ENOMEM;
+ if (damon_select_ops(ctx, DAMON_OPS_VADDR)) {
+ damon_destroy_ctx(ctx);
+ return -EINVAL;
+ }
+
+ target = damon_new_target();
+ if (!target) {
+ damon_destroy_ctx(ctx);
+ return -ENOMEM;
+ }
+ damon_add_target(ctx, target);
+ target_pidp = find_get_pid(target_pid);
+ if (!target_pidp) {
+ damon_destroy_ctx(ctx);
+ return -EINVAL;
+ }
+ target->pid = target_pidp;
+
+ ctx->callback.after_aggregation = damon_sample_wsse_after_aggregate;
+ return damon_start(&ctx, 1, true);
+}
+
+static void damon_sample_wsse_stop(void)
+{
+ pr_info("stop\n");
+ if (ctx) {
+ damon_stop(&ctx, 1);
+ damon_destroy_ctx(ctx);
+ }
+ if (target_pidp)
+ put_pid(target_pidp);
+}
+
+static int damon_sample_wsse_enable_store(
+ const char *val, const struct kernel_param *kp)
+{
+ bool enabled = enable;
+ int err;
+
+ err = kstrtobool(val, &enable);
+ if (err)
+ return err;
+
+ if (enable == enabled)
+ return 0;
+
+ if (enable)
+ return damon_sample_wsse_start();
+ damon_sample_wsse_stop();
+ return 0;
+}
+
+static int __init damon_sample_wsse_init(void)
+{
+ return 0;
+}
+
+module_init(damon_sample_wsse_init);
diff --git a/samples/landlock/sandboxer.c b/samples/landlock/sandboxer.c
index 57565dfd74a2..07fab2ef534e 100644
--- a/samples/landlock/sandboxer.c
+++ b/samples/landlock/sandboxer.c
@@ -91,6 +91,9 @@ static int parse_path(char *env_path, const char ***const path_list)
}
}
*path_list = malloc(num_paths * sizeof(**path_list));
+ if (!*path_list)
+ return -1;
+
for (i = 0; i < num_paths; i++)
(*path_list)[i] = strsep(&env_path, ENV_DELIMITER);
@@ -127,6 +130,10 @@ static int populate_ruleset_fs(const char *const env_var, const int ruleset_fd,
env_path_name = strdup(env_path_name);
unsetenv(env_var);
num_paths = parse_path(env_path_name, &path_list);
+ if (num_paths < 0) {
+ fprintf(stderr, "Failed to allocate memory\n");
+ goto out_free_name;
+ }
if (num_paths == 1 && path_list[0][0] == '\0') {
/*
* Allows to not use all possible restrictions (e.g. use
diff --git a/samples/livepatch/livepatch-callbacks-busymod.c b/samples/livepatch/livepatch-callbacks-busymod.c
index 378e2d40271a..69105596e72e 100644
--- a/samples/livepatch/livepatch-callbacks-busymod.c
+++ b/samples/livepatch/livepatch-callbacks-busymod.c
@@ -44,8 +44,7 @@ static void busymod_work_func(struct work_struct *work)
static int livepatch_callbacks_mod_init(void)
{
pr_info("%s\n", __func__);
- schedule_delayed_work(&work,
- msecs_to_jiffies(1000 * 0));
+ schedule_delayed_work(&work, 0);
return 0;
}
diff --git a/samples/livepatch/livepatch-shadow-fix1.c b/samples/livepatch/livepatch-shadow-fix1.c
index 6701641bf12d..f3f153895d6c 100644
--- a/samples/livepatch/livepatch-shadow-fix1.c
+++ b/samples/livepatch/livepatch-shadow-fix1.c
@@ -72,8 +72,7 @@ static struct dummy *livepatch_fix1_dummy_alloc(void)
if (!d)
return NULL;
- d->jiffies_expire = jiffies +
- msecs_to_jiffies(1000 * EXPIRE_PERIOD);
+ d->jiffies_expire = jiffies + secs_to_jiffies(EXPIRE_PERIOD);
/*
* Patch: save the extra memory location into a SV_LEAK shadow
diff --git a/samples/livepatch/livepatch-shadow-mod.c b/samples/livepatch/livepatch-shadow-mod.c
index 7e753b0d2fa6..5d83ad5a8118 100644
--- a/samples/livepatch/livepatch-shadow-mod.c
+++ b/samples/livepatch/livepatch-shadow-mod.c
@@ -101,8 +101,7 @@ static __used noinline struct dummy *dummy_alloc(void)
if (!d)
return NULL;
- d->jiffies_expire = jiffies +
- msecs_to_jiffies(1000 * EXPIRE_PERIOD);
+ d->jiffies_expire = jiffies + secs_to_jiffies(EXPIRE_PERIOD);
/* Oops, forgot to save leak! */
leak = kzalloc(sizeof(*leak), GFP_KERNEL);
@@ -152,8 +151,7 @@ static void alloc_work_func(struct work_struct *work)
list_add(&d->list, &dummy_list);
mutex_unlock(&dummy_list_mutex);
- schedule_delayed_work(&alloc_dwork,
- msecs_to_jiffies(1000 * ALLOC_PERIOD));
+ schedule_delayed_work(&alloc_dwork, secs_to_jiffies(ALLOC_PERIOD));
}
/*
@@ -184,16 +182,13 @@ static void cleanup_work_func(struct work_struct *work)
}
mutex_unlock(&dummy_list_mutex);
- schedule_delayed_work(&cleanup_dwork,
- msecs_to_jiffies(1000 * CLEANUP_PERIOD));
+ schedule_delayed_work(&cleanup_dwork, secs_to_jiffies(CLEANUP_PERIOD));
}
static int livepatch_shadow_mod_init(void)
{
- schedule_delayed_work(&alloc_dwork,
- msecs_to_jiffies(1000 * ALLOC_PERIOD));
- schedule_delayed_work(&cleanup_dwork,
- msecs_to_jiffies(1000 * CLEANUP_PERIOD));
+ schedule_delayed_work(&alloc_dwork, secs_to_jiffies(ALLOC_PERIOD));
+ schedule_delayed_work(&cleanup_dwork, secs_to_jiffies(CLEANUP_PERIOD));
return 0;
}
diff --git a/samples/rust/Kconfig b/samples/rust/Kconfig
index b0f74a81c8f9..918dbead2c0b 100644
--- a/samples/rust/Kconfig
+++ b/samples/rust/Kconfig
@@ -20,6 +20,16 @@ config SAMPLE_RUST_MINIMAL
If unsure, say N.
+config SAMPLE_RUST_MISC_DEVICE
+ tristate "Misc device"
+ help
+ This option builds the Rust misc device.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_misc_device.
+
+ If unsure, say N.
+
config SAMPLE_RUST_PRINT
tristate "Printing macros"
help
@@ -30,6 +40,27 @@ config SAMPLE_RUST_PRINT
If unsure, say N.
+config SAMPLE_RUST_DRIVER_PCI
+ tristate "PCI Driver"
+ depends on PCI
+ help
+ This option builds the Rust PCI driver sample.
+
+ To compile this as a module, choose M here:
+ the module will be called driver_pci.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_DRIVER_PLATFORM
+ tristate "Platform Driver"
+ help
+ This option builds the Rust Platform driver sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_driver_platform.
+
+ If unsure, say N.
+
config SAMPLE_RUST_HOSTPROGS
bool "Host programs"
help
diff --git a/samples/rust/Makefile b/samples/rust/Makefile
index c1a5c1655395..5a8ab0df0567 100644
--- a/samples/rust/Makefile
+++ b/samples/rust/Makefile
@@ -2,7 +2,10 @@
ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_SAMPLE_RUST_MINIMAL) += rust_minimal.o
+obj-$(CONFIG_SAMPLE_RUST_MISC_DEVICE) += rust_misc_device.o
obj-$(CONFIG_SAMPLE_RUST_PRINT) += rust_print.o
+obj-$(CONFIG_SAMPLE_RUST_DRIVER_PCI) += rust_driver_pci.o
+obj-$(CONFIG_SAMPLE_RUST_DRIVER_PLATFORM) += rust_driver_platform.o
rust_print-y := rust_print_main.o rust_print_events.o
diff --git a/samples/rust/rust_driver_pci.rs b/samples/rust/rust_driver_pci.rs
new file mode 100644
index 000000000000..1fb6e44f3395
--- /dev/null
+++ b/samples/rust/rust_driver_pci.rs
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust PCI driver sample (based on QEMU's `pci-testdev`).
+//!
+//! To make this driver probe, QEMU must be run with `-device pci-testdev`.
+
+use kernel::{bindings, c_str, devres::Devres, pci, prelude::*};
+
+struct Regs;
+
+impl Regs {
+ const TEST: usize = 0x0;
+ const OFFSET: usize = 0x4;
+ const DATA: usize = 0x8;
+ const COUNT: usize = 0xC;
+ const END: usize = 0x10;
+}
+
+type Bar0 = pci::Bar<{ Regs::END }>;
+
+#[derive(Debug)]
+struct TestIndex(u8);
+
+impl TestIndex {
+ const NO_EVENTFD: Self = Self(0);
+}
+
+struct SampleDriver {
+ pdev: pci::Device,
+ bar: Devres<Bar0>,
+}
+
+kernel::pci_device_table!(
+ PCI_TABLE,
+ MODULE_PCI_TABLE,
+ <SampleDriver as pci::Driver>::IdInfo,
+ [(
+ pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_REDHAT, 0x5),
+ TestIndex::NO_EVENTFD
+ )]
+);
+
+impl SampleDriver {
+ fn testdev(index: &TestIndex, bar: &Bar0) -> Result<u32> {
+ // Select the test.
+ bar.writeb(index.0, Regs::TEST);
+
+ let offset = u32::from_le(bar.readl(Regs::OFFSET)) as usize;
+ let data = bar.readb(Regs::DATA);
+
+ // Write `data` to `offset` to increase `count` by one.
+ //
+ // Note that we need `try_writeb`, since `offset` can't be checked at compile-time.
+ bar.try_writeb(data, offset)?;
+
+ Ok(bar.readl(Regs::COUNT))
+ }
+}
+
+impl pci::Driver for SampleDriver {
+ type IdInfo = TestIndex;
+
+ const ID_TABLE: pci::IdTable<Self::IdInfo> = &PCI_TABLE;
+
+ fn probe(pdev: &mut pci::Device, info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
+ dev_dbg!(
+ pdev.as_ref(),
+ "Probe Rust PCI driver sample (PCI ID: 0x{:x}, 0x{:x}).\n",
+ pdev.vendor_id(),
+ pdev.device_id()
+ );
+
+ pdev.enable_device_mem()?;
+ pdev.set_master();
+
+ let bar = pdev.iomap_region_sized::<{ Regs::END }>(0, c_str!("rust_driver_pci"))?;
+
+ let drvdata = KBox::new(
+ Self {
+ pdev: pdev.clone(),
+ bar,
+ },
+ GFP_KERNEL,
+ )?;
+
+ let bar = drvdata.bar.try_access().ok_or(ENXIO)?;
+
+ dev_info!(
+ pdev.as_ref(),
+ "pci-testdev data-match count: {}\n",
+ Self::testdev(info, &bar)?
+ );
+
+ Ok(drvdata.into())
+ }
+}
+
+impl Drop for SampleDriver {
+ fn drop(&mut self) {
+ dev_dbg!(self.pdev.as_ref(), "Remove Rust PCI driver sample.\n");
+ }
+}
+
+kernel::module_pci_driver! {
+ type: SampleDriver,
+ name: "rust_driver_pci",
+ author: "Danilo Krummrich",
+ description: "Rust PCI driver",
+ license: "GPL v2",
+}
diff --git a/samples/rust/rust_driver_platform.rs b/samples/rust/rust_driver_platform.rs
new file mode 100644
index 000000000000..8120609e2940
--- /dev/null
+++ b/samples/rust/rust_driver_platform.rs
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust Platform driver sample.
+
+use kernel::{c_str, of, platform, prelude::*};
+
+struct SampleDriver {
+ pdev: platform::Device,
+}
+
+struct Info(u32);
+
+kernel::of_device_table!(
+ OF_TABLE,
+ MODULE_OF_TABLE,
+ <SampleDriver as platform::Driver>::IdInfo,
+ [(of::DeviceId::new(c_str!("test,rust-device")), Info(42))]
+);
+
+impl platform::Driver for SampleDriver {
+ type IdInfo = Info;
+ const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE);
+
+ fn probe(pdev: &mut platform::Device, info: Option<&Self::IdInfo>) -> Result<Pin<KBox<Self>>> {
+ dev_dbg!(pdev.as_ref(), "Probe Rust Platform driver sample.\n");
+
+ if let Some(info) = info {
+ dev_info!(pdev.as_ref(), "Probed with info: '{}'.\n", info.0);
+ }
+
+ let drvdata = KBox::new(Self { pdev: pdev.clone() }, GFP_KERNEL)?;
+
+ Ok(drvdata.into())
+ }
+}
+
+impl Drop for SampleDriver {
+ fn drop(&mut self) {
+ dev_dbg!(self.pdev.as_ref(), "Remove Rust Platform driver sample.\n");
+ }
+}
+
+kernel::module_platform_driver! {
+ type: SampleDriver,
+ name: "rust_driver_platform",
+ author: "Danilo Krummrich",
+ description: "Rust Platform driver",
+ license: "GPL v2",
+}
diff --git a/samples/rust/rust_misc_device.rs b/samples/rust/rust_misc_device.rs
new file mode 100644
index 000000000000..40ad7266c225
--- /dev/null
+++ b/samples/rust/rust_misc_device.rs
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! Rust misc device sample.
+
+/// Below is an example userspace C program that exercises this sample's functionality.
+///
+/// ```c
+/// #include <stdio.h>
+/// #include <stdlib.h>
+/// #include <errno.h>
+/// #include <fcntl.h>
+/// #include <unistd.h>
+/// #include <sys/ioctl.h>
+///
+/// #define RUST_MISC_DEV_FAIL _IO('|', 0)
+/// #define RUST_MISC_DEV_HELLO _IO('|', 0x80)
+/// #define RUST_MISC_DEV_GET_VALUE _IOR('|', 0x81, int)
+/// #define RUST_MISC_DEV_SET_VALUE _IOW('|', 0x82, int)
+///
+/// int main() {
+/// int value, new_value;
+/// int fd, ret;
+///
+/// // Open the device file
+/// printf("Opening /dev/rust-misc-device for reading and writing\n");
+/// fd = open("/dev/rust-misc-device", O_RDWR);
+/// if (fd < 0) {
+/// perror("open");
+/// return errno;
+/// }
+///
+/// // Make call into driver to say "hello"
+/// printf("Calling Hello\n");
+/// ret = ioctl(fd, RUST_MISC_DEV_HELLO, NULL);
+/// if (ret < 0) {
+/// perror("ioctl: Failed to call into Hello");
+/// close(fd);
+/// return errno;
+/// }
+///
+/// // Get initial value
+/// printf("Fetching initial value\n");
+/// ret = ioctl(fd, RUST_MISC_DEV_GET_VALUE, &value);
+/// if (ret < 0) {
+/// perror("ioctl: Failed to fetch the initial value");
+/// close(fd);
+/// return errno;
+/// }
+///
+/// value++;
+///
+/// // Set value to something different
+/// printf("Submitting new value (%d)\n", value);
+/// ret = ioctl(fd, RUST_MISC_DEV_SET_VALUE, &value);
+/// if (ret < 0) {
+/// perror("ioctl: Failed to submit new value");
+/// close(fd);
+/// return errno;
+/// }
+///
+/// // Ensure new value was applied
+/// printf("Fetching new value\n");
+/// ret = ioctl(fd, RUST_MISC_DEV_GET_VALUE, &new_value);
+/// if (ret < 0) {
+/// perror("ioctl: Failed to fetch the new value");
+/// close(fd);
+/// return errno;
+/// }
+///
+/// if (value != new_value) {
+/// printf("Failed: Committed and retrieved values are different (%d - %d)\n", value, new_value);
+/// close(fd);
+/// return -1;
+/// }
+///
+/// // Call the unsuccessful ioctl
+/// printf("Attempting to call in to an non-existent IOCTL\n");
+/// ret = ioctl(fd, RUST_MISC_DEV_FAIL, NULL);
+/// if (ret < 0) {
+/// perror("ioctl: Succeeded to fail - this was expected");
+/// } else {
+/// printf("ioctl: Failed to fail\n");
+/// close(fd);
+/// return -1;
+/// }
+///
+/// // Close the device file
+/// printf("Closing /dev/rust-misc-device\n");
+/// close(fd);
+///
+/// printf("Success\n");
+/// return 0;
+/// }
+/// ```
+use core::pin::Pin;
+
+use kernel::{
+ c_str,
+ device::Device,
+ fs::File,
+ ioctl::{_IO, _IOC_SIZE, _IOR, _IOW},
+ miscdevice::{MiscDevice, MiscDeviceOptions, MiscDeviceRegistration},
+ new_mutex,
+ prelude::*,
+ sync::Mutex,
+ types::ARef,
+ uaccess::{UserSlice, UserSliceReader, UserSliceWriter},
+};
+
+const RUST_MISC_DEV_HELLO: u32 = _IO('|' as u32, 0x80);
+const RUST_MISC_DEV_GET_VALUE: u32 = _IOR::<i32>('|' as u32, 0x81);
+const RUST_MISC_DEV_SET_VALUE: u32 = _IOW::<i32>('|' as u32, 0x82);
+
+module! {
+ type: RustMiscDeviceModule,
+ name: "rust_misc_device",
+ author: "Lee Jones",
+ description: "Rust misc device sample",
+ license: "GPL",
+}
+
+#[pin_data]
+struct RustMiscDeviceModule {
+ #[pin]
+ _miscdev: MiscDeviceRegistration<RustMiscDevice>,
+}
+
+impl kernel::InPlaceModule for RustMiscDeviceModule {
+ fn init(_module: &'static ThisModule) -> impl PinInit<Self, Error> {
+ pr_info!("Initialising Rust Misc Device Sample\n");
+
+ let options = MiscDeviceOptions {
+ name: c_str!("rust-misc-device"),
+ };
+
+ try_pin_init!(Self {
+ _miscdev <- MiscDeviceRegistration::register(options),
+ })
+ }
+}
+
+struct Inner {
+ value: i32,
+}
+
+#[pin_data(PinnedDrop)]
+struct RustMiscDevice {
+ #[pin]
+ inner: Mutex<Inner>,
+ dev: ARef<Device>,
+}
+
+#[vtable]
+impl MiscDevice for RustMiscDevice {
+ type Ptr = Pin<KBox<Self>>;
+
+ fn open(_file: &File, misc: &MiscDeviceRegistration<Self>) -> Result<Pin<KBox<Self>>> {
+ let dev = ARef::from(misc.device());
+
+ dev_info!(dev, "Opening Rust Misc Device Sample\n");
+
+ KBox::try_pin_init(
+ try_pin_init! {
+ RustMiscDevice {
+ inner <- new_mutex!( Inner{ value: 0_i32 } ),
+ dev: dev,
+ }
+ },
+ GFP_KERNEL,
+ )
+ }
+
+ fn ioctl(me: Pin<&RustMiscDevice>, _file: &File, cmd: u32, arg: usize) -> Result<isize> {
+ dev_info!(me.dev, "IOCTLing Rust Misc Device Sample\n");
+
+ let size = _IOC_SIZE(cmd);
+
+ match cmd {
+ RUST_MISC_DEV_GET_VALUE => me.get_value(UserSlice::new(arg, size).writer())?,
+ RUST_MISC_DEV_SET_VALUE => me.set_value(UserSlice::new(arg, size).reader())?,
+ RUST_MISC_DEV_HELLO => me.hello()?,
+ _ => {
+ dev_err!(me.dev, "-> IOCTL not recognised: {}\n", cmd);
+ return Err(ENOTTY);
+ }
+ };
+
+ Ok(0)
+ }
+}
+
+#[pinned_drop]
+impl PinnedDrop for RustMiscDevice {
+ fn drop(self: Pin<&mut Self>) {
+ dev_info!(self.dev, "Exiting the Rust Misc Device Sample\n");
+ }
+}
+
+impl RustMiscDevice {
+ fn set_value(&self, mut reader: UserSliceReader) -> Result<isize> {
+ let new_value = reader.read::<i32>()?;
+ let mut guard = self.inner.lock();
+
+ dev_info!(
+ self.dev,
+ "-> Copying data from userspace (value: {})\n",
+ new_value
+ );
+
+ guard.value = new_value;
+ Ok(0)
+ }
+
+ fn get_value(&self, mut writer: UserSliceWriter) -> Result<isize> {
+ let guard = self.inner.lock();
+ let value = guard.value;
+
+ // Free-up the lock and use our locally cached instance from here
+ drop(guard);
+
+ dev_info!(
+ self.dev,
+ "-> Copying data to userspace (value: {})\n",
+ &value
+ );
+
+ writer.write::<i32>(&value)?;
+ Ok(0)
+ }
+
+ fn hello(&self) -> Result<isize> {
+ dev_info!(self.dev, "-> Hello from the Rust Misc Device\n");
+
+ Ok(0)
+ }
+}
diff --git a/scripts/Makefile b/scripts/Makefile
index 546e8175e1c4..46f860529df5 100644
--- a/scripts/Makefile
+++ b/scripts/Makefile
@@ -53,7 +53,8 @@ hostprogs += unifdef gen_packed_field_checks
targets += module.lds
subdir-$(CONFIG_GCC_PLUGINS) += gcc-plugins
-subdir-$(CONFIG_MODVERSIONS) += genksyms
+subdir-$(CONFIG_GENKSYMS) += genksyms
+subdir-$(CONFIG_GENDWARFKSYMS) += gendwarfksyms
subdir-$(CONFIG_SECURITY_SELINUX) += selinux
subdir-$(CONFIG_SECURITY_IPE) += ipe
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index c16e4cf54d77..993708d11874 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -107,13 +107,24 @@ cmd_cpp_i_c = $(CPP) $(c_flags) -o $@ $<
$(obj)/%.i: $(obj)/%.c FORCE
$(call if_changed_dep,cpp_i_c)
+getexportsymbols = $(NM) $@ | sed -n 's/.* __export_symbol_\(.*\)/$(1)/p'
+
+gendwarfksyms = $(objtree)/scripts/gendwarfksyms/gendwarfksyms \
+ $(if $(KBUILD_SYMTYPES), --symtypes $(@:.o=.symtypes)) \
+ $(if $(KBUILD_GENDWARFKSYMS_STABLE), --stable)
+
genksyms = $(objtree)/scripts/genksyms/genksyms \
$(if $(KBUILD_SYMTYPES), -T $(@:.o=.symtypes)) \
$(if $(KBUILD_PRESERVE), -p) \
$(addprefix -r , $(wildcard $(@:.o=.symref)))
# These mirror gensymtypes_S and co below, keep them in synch.
+ifdef CONFIG_GENDWARFKSYMS
+cmd_gensymtypes_c = $(if $(skip_gendwarfksyms),, \
+ $(call getexportsymbols,\1) | $(gendwarfksyms) $@)
+else
cmd_gensymtypes_c = $(CPP) -D__GENKSYMS__ $(c_flags) $< | $(genksyms)
+endif # CONFIG_GENDWARFKSYMS
# LLVM assembly
# Generate .ll files from .c
@@ -183,7 +194,9 @@ endif # CONFIG_FTRACE_MCOUNT_USE_RECORDMCOUNT
is-standard-object = $(if $(filter-out y%, $(OBJECT_FILES_NON_STANDARD_$(target-stem).o)$(OBJECT_FILES_NON_STANDARD)n),$(is-kernel-object))
+ifdef CONFIG_OBJTOOL
$(obj)/%.o: private objtool-enabled = $(if $(is-standard-object),$(if $(delay-objtool),$(is-single-obj-m),y))
+endif
ifneq ($(findstring 1, $(KBUILD_EXTRA_WARN)),)
cmd_warn_shared_object = $(if $(word 2, $(modname-multi)),$(warning $(kbuild-file): $*.o is added to multiple modules: $(modname-multi)))
@@ -286,14 +299,26 @@ $(obj)/%.rs: $(obj)/%.rs.S FORCE
# This is convoluted. The .S file must first be preprocessed to run guards and
# expand names, then the resulting exports must be constructed into plain
# EXPORT_SYMBOL(symbol); to build our dummy C file, and that gets preprocessed
-# to make the genksyms input.
+# to make the genksyms input or compiled into an object for gendwarfksyms.
#
# These mirror gensymtypes_c and co above, keep them in synch.
-cmd_gensymtypes_S = \
- { echo "\#include <linux/kernel.h>" ; \
- echo "\#include <asm/asm-prototypes.h>" ; \
- $(NM) $@ | sed -n 's/.* __export_symbol_\(.*\)/EXPORT_SYMBOL(\1);/p' ; } | \
- $(CPP) -D__GENKSYMS__ $(c_flags) -xc - | $(genksyms)
+getasmexports = \
+ { echo "\#include <linux/kernel.h>" ; \
+ echo "\#include <linux/string.h>" ; \
+ echo "\#include <asm/asm-prototypes.h>" ; \
+ $(call getexportsymbols,EXPORT_SYMBOL(\1);) ; }
+
+ifdef CONFIG_GENDWARFKSYMS
+cmd_gensymtypes_S = \
+ $(getasmexports) | \
+ $(CC) $(c_flags) -c -o $(@:.o=.gendwarfksyms.o) -xc -; \
+ $(call getexportsymbols,\1) | \
+ $(gendwarfksyms) $(@:.o=.gendwarfksyms.o)
+else
+cmd_gensymtypes_S = \
+ $(getasmexports) | \
+ $(CPP) -D__GENKSYMS__ $(c_flags) -xc - | $(genksyms)
+endif # CONFIG_GENDWARFKSYMS
quiet_cmd_cpp_s_S = CPP $(quiet_modtag) $@
cmd_cpp_s_S = $(CPP) $(a_flags) -o $@ $<
diff --git a/scripts/Makefile.defconf b/scripts/Makefile.defconf
index 226ea3df3b4b..a44307f08e9d 100644
--- a/scripts/Makefile.defconf
+++ b/scripts/Makefile.defconf
@@ -1,6 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
# Configuration heplers
+cmd_merge_fragments = \
+ $(srctree)/scripts/kconfig/merge_config.sh \
+ $4 -m -O $(objtree) $(srctree)/arch/$(SRCARCH)/configs/$2 \
+ $(foreach config,$3,$(srctree)/arch/$(SRCARCH)/configs/$(config).config)
+
# Creates 'merged defconfigs'
# ---------------------------------------------------------------------------
# Usage:
@@ -8,9 +13,7 @@
#
# Input config fragments without '.config' suffix
define merge_into_defconfig
- $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh \
- -m -O $(objtree) $(srctree)/arch/$(SRCARCH)/configs/$(1) \
- $(foreach config,$(2),$(srctree)/arch/$(SRCARCH)/configs/$(config).config)
+ $(call cmd,merge_fragments,$1,$2)
+$(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
endef
@@ -22,8 +25,6 @@ endef
#
# Input config fragments without '.config' suffix
define merge_into_defconfig_override
- $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh \
- -Q -m -O $(objtree) $(srctree)/arch/$(SRCARCH)/configs/$(1) \
- $(foreach config,$(2),$(srctree)/arch/$(SRCARCH)/configs/$(config).config)
+ $(call cmd,merge_fragments,$1,$2,-Q)
+$(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
endef
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
index 1d13cecc7cc7..dc081cf46d21 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.extrawarn
@@ -31,6 +31,11 @@ KBUILD_CFLAGS-$(CONFIG_CC_NO_ARRAY_BOUNDS) += -Wno-array-bounds
ifdef CONFIG_CC_IS_CLANG
# The kernel builds with '-std=gnu11' so use of GNU extensions is acceptable.
KBUILD_CFLAGS += -Wno-gnu
+
+# Clang checks for overflow/truncation with '%p', while GCC does not:
+# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=111219
+KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow-non-kprintf)
+KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation-non-kprintf)
else
# gcc inanely warns about local variables called 'main'
@@ -77,6 +82,9 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init)
# Warn if there is an enum types mismatch
KBUILD_CFLAGS += $(call cc-option,-Wenum-conversion)
+# Explicitly clear padding bits during variable initialization
+KBUILD_CFLAGS += $(call cc-option,-fzero-init-padding-bits=all)
+
KBUILD_CFLAGS += -Wextra
KBUILD_CFLAGS += -Wunused
@@ -102,11 +110,6 @@ KBUILD_CFLAGS += $(call cc-disable-warning, packed-not-aligned)
KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
ifdef CONFIG_CC_IS_GCC
KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
-else
-# Clang checks for overflow/truncation with '%p', while GCC does not:
-# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=111219
-KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow-non-kprintf)
-KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation-non-kprintf)
endif
KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
@@ -130,7 +133,6 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast)
KBUILD_CFLAGS += -Wno-tautological-constant-out-of-range-compare
KBUILD_CFLAGS += $(call cc-disable-warning, unaligned-access)
KBUILD_CFLAGS += -Wno-enum-compare-conditional
-KBUILD_CFLAGS += -Wno-enum-enum-conversion
endif
endif
@@ -154,6 +156,10 @@ KBUILD_CFLAGS += -Wno-missing-field-initializers
KBUILD_CFLAGS += -Wno-type-limits
KBUILD_CFLAGS += -Wno-shift-negative-value
+ifdef CONFIG_CC_IS_CLANG
+KBUILD_CFLAGS += -Wno-enum-enum-conversion
+endif
+
ifdef CONFIG_CC_IS_GCC
KBUILD_CFLAGS += -Wno-maybe-uninitialized
endif
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 7395200538da..cad20f0e66ee 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -287,6 +287,8 @@ delay-objtool := $(or $(CONFIG_LTO_CLANG),$(CONFIG_X86_KERNEL_IBT))
cmd_objtool = $(if $(objtool-enabled), ; $(objtool) $(objtool-args) $@)
cmd_gen_objtooldep = $(if $(objtool-enabled), { echo ; echo '$@: $$(wildcard $(objtool))' ; } >> $(dot-target).cmd)
+objtool-enabled := y
+
endif # CONFIG_OBJTOOL
# Useful for describing the dependency of composite objects
@@ -302,11 +304,11 @@ endef
# ===========================================================================
# These are shared by some Makefile.* files.
-objtool-enabled := y
-
ifdef CONFIG_LTO_CLANG
-# objtool cannot process LLVM IR. Make $(LD) covert LLVM IR to ELF here.
-cmd_ld_single = $(if $(objtool-enabled), ; $(LD) $(ld_flags) -r -o $(tmp-target) $@; mv $(tmp-target) $@)
+# Run $(LD) here to convert LLVM IR to ELF in the following cases:
+# - when this object needs objtool processing, as objtool cannot process LLVM IR
+# - when this is a single-object module, as modpost cannot process LLVM IR
+cmd_ld_single = $(if $(objtool-enabled)$(is-single-obj-m), ; $(LD) $(ld_flags) -r -o $(tmp-target) $@; mv $(tmp-target) $@)
endif
quiet_cmd_cc_o_c = CC $(quiet_modtag) $@
@@ -374,6 +376,9 @@ quiet_cmd_ar = AR $@
quiet_cmd_objcopy = OBJCOPY $@
cmd_objcopy = $(OBJCOPY) $(OBJCOPYFLAGS) $(OBJCOPYFLAGS_$(@F)) $< $@
+quiet_cmd_strip_relocs = RSTRIP $@
+cmd_strip_relocs = $(OBJCOPY) --remove-section='.rel*' $@
+
# Gzip
# ---------------------------------------------------------------------------
diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst
index f97c9926ed31..1628198f3e83 100644
--- a/scripts/Makefile.modinst
+++ b/scripts/Makefile.modinst
@@ -105,7 +105,7 @@ else
sig-key := $(CONFIG_MODULE_SIG_KEY)
endif
quiet_cmd_sign = SIGN $@
- cmd_sign = scripts/sign-file $(CONFIG_MODULE_SIG_HASH) "$(sig-key)" certs/signing_key.x509 $@ \
+ cmd_sign = $(objtree)/scripts/sign-file $(CONFIG_MODULE_SIG_HASH) "$(sig-key)" $(objtree)/certs/signing_key.x509 $@ \
$(if $(KBUILD_EXTMOD),|| true)
ifeq ($(sign-only),)
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index ab0e94ea6249..d7d45067d08b 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -43,6 +43,8 @@ MODPOST = $(objtree)/scripts/mod/modpost
modpost-args = \
$(if $(CONFIG_MODULES),-M) \
$(if $(CONFIG_MODVERSIONS),-m) \
+ $(if $(CONFIG_BASIC_MODVERSIONS),-b) \
+ $(if $(CONFIG_EXTENDED_MODVERSIONS),-x) \
$(if $(CONFIG_MODULE_SRCVERSION_ALL),-a) \
$(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E) \
$(if $(KBUILD_MODPOST_WARN),-w) \
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index fe71e6a65eb9..7b28ad331742 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -834,16 +834,6 @@ foreach my $entry (@mode_permission_funcs) {
$mode_perms_search = "(?:${mode_perms_search})";
our %deprecated_apis = (
- "synchronize_rcu_bh" => "synchronize_rcu",
- "synchronize_rcu_bh_expedited" => "synchronize_rcu_expedited",
- "call_rcu_bh" => "call_rcu",
- "rcu_barrier_bh" => "rcu_barrier",
- "synchronize_sched" => "synchronize_rcu",
- "synchronize_sched_expedited" => "synchronize_rcu_expedited",
- "call_rcu_sched" => "call_rcu",
- "rcu_barrier_sched" => "rcu_barrier",
- "get_state_synchronize_sched" => "get_state_synchronize_rcu",
- "cond_synchronize_sched" => "cond_synchronize_rcu",
"kmap" => "kmap_local_page",
"kunmap" => "kunmap_local",
"kmap_atomic" => "kmap_local_page",
@@ -2875,7 +2865,7 @@ sub process {
if ($realfile =~ m@^include/asm/@) {
ERROR("MODIFIED_INCLUDE_ASM",
- "do not modify files in include/asm, change architecture specific files in include/asm-<architecture>\n" . "$here$rawline\n");
+ "do not modify files in include/asm, change architecture specific files in arch/<architecture>/include/asm\n" . "$here$rawline\n");
}
$found_file = 1;
}
@@ -3237,12 +3227,12 @@ sub process {
my ($cid, $ctitle) = git_commit_info($orig_commit, $id,
$title);
- if ($ctitle ne $title || $tag_case || $tag_space ||
- $id_length || $id_case || !$title_has_quotes) {
+ if (defined($cid) && ($ctitle ne $title || $tag_case || $tag_space || $id_length || $id_case || !$title_has_quotes)) {
+ my $fixed = "Fixes: $cid (\"$ctitle\")";
if (WARN("BAD_FIXES_TAG",
- "Please use correct Fixes: style 'Fixes: <12+ chars of sha1> (\"<title line>\")' - ie: 'Fixes: $cid (\"$ctitle\")'\n" . $herecurr) &&
+ "Please use correct Fixes: style 'Fixes: <12+ chars of sha1> (\"<title line>\")' - ie: '$fixed'\n" . $herecurr) &&
$fix) {
- $fixed[$fixlinenr] = "Fixes: $cid (\"$ctitle\")";
+ $fixed[$fixlinenr] = $fixed;
}
}
}
@@ -5513,9 +5503,9 @@ sub process {
}
}
-# check for unnecessary parentheses around comparisons in if uses
-# when !drivers/staging or command-line uses --strict
- if (($realfile !~ m@^(?:drivers/staging/)@ || $check_orig) &&
+# check for unnecessary parentheses around comparisons
+# except in drivers/staging
+ if (($realfile !~ m@^(?:drivers/staging/)@) &&
$perl_version_ok && defined($stat) &&
$stat =~ /(^.\s*if\s*($balanced_parens))/) {
my $if_stat = $1;
diff --git a/scripts/coccinelle/misc/secs_to_jiffies.cocci b/scripts/coccinelle/misc/secs_to_jiffies.cocci
new file mode 100644
index 000000000000..8bbb2884ea5d
--- /dev/null
+++ b/scripts/coccinelle/misc/secs_to_jiffies.cocci
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-only
+///
+/// Find usages of:
+/// - msecs_to_jiffies(value*1000)
+/// - msecs_to_jiffies(value*MSEC_PER_SEC)
+///
+// Confidence: High
+// Copyright: (C) 2024 Easwar Hariharan, Microsoft
+// Keywords: secs, seconds, jiffies
+//
+
+virtual patch
+
+@depends on patch@ constant C; @@
+
+- msecs_to_jiffies(C * 1000)
++ secs_to_jiffies(C)
+
+@depends on patch@ constant C; @@
+
+- msecs_to_jiffies(C * MSEC_PER_SEC)
++ secs_to_jiffies(C)
diff --git a/scripts/gdb/linux/cpus.py b/scripts/gdb/linux/cpus.py
index 2f11c4f9c345..13eb8b3901b8 100644
--- a/scripts/gdb/linux/cpus.py
+++ b/scripts/gdb/linux/cpus.py
@@ -167,7 +167,7 @@ def get_current_task(cpu):
var_ptr = gdb.parse_and_eval("&pcpu_hot.current_task")
return per_cpu(var_ptr, cpu).dereference()
elif utils.is_target_arch("aarch64"):
- current_task_addr = gdb.parse_and_eval("$SP_EL0")
+ current_task_addr = gdb.parse_and_eval("(unsigned long)$SP_EL0")
if (current_task_addr >> 63) != 0:
current_task = current_task_addr.cast(task_ptr_type)
return current_task.dereference()
diff --git a/scripts/gendwarfksyms/.gitignore b/scripts/gendwarfksyms/.gitignore
new file mode 100644
index 000000000000..0927f8d3cd96
--- /dev/null
+++ b/scripts/gendwarfksyms/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+/gendwarfksyms
diff --git a/scripts/gendwarfksyms/Makefile b/scripts/gendwarfksyms/Makefile
new file mode 100644
index 000000000000..6334c7d3c4d5
--- /dev/null
+++ b/scripts/gendwarfksyms/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+hostprogs-always-y += gendwarfksyms
+
+gendwarfksyms-objs += gendwarfksyms.o
+gendwarfksyms-objs += cache.o
+gendwarfksyms-objs += die.o
+gendwarfksyms-objs += dwarf.o
+gendwarfksyms-objs += kabi.o
+gendwarfksyms-objs += symbols.o
+gendwarfksyms-objs += types.o
+
+HOSTLDLIBS_gendwarfksyms := -ldw -lelf -lz
diff --git a/scripts/gendwarfksyms/cache.c b/scripts/gendwarfksyms/cache.c
new file mode 100644
index 000000000000..c9c19b86a686
--- /dev/null
+++ b/scripts/gendwarfksyms/cache.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Google LLC
+ */
+
+#include "gendwarfksyms.h"
+
+struct cache_item {
+ unsigned long key;
+ int value;
+ struct hlist_node hash;
+};
+
+void cache_set(struct cache *cache, unsigned long key, int value)
+{
+ struct cache_item *ci;
+
+ ci = xmalloc(sizeof(struct cache_item));
+ ci->key = key;
+ ci->value = value;
+ hash_add(cache->cache, &ci->hash, hash_32(key));
+}
+
+int cache_get(struct cache *cache, unsigned long key)
+{
+ struct cache_item *ci;
+
+ hash_for_each_possible(cache->cache, ci, hash, hash_32(key)) {
+ if (ci->key == key)
+ return ci->value;
+ }
+
+ return -1;
+}
+
+void cache_init(struct cache *cache)
+{
+ hash_init(cache->cache);
+}
+
+void cache_free(struct cache *cache)
+{
+ struct hlist_node *tmp;
+ struct cache_item *ci;
+
+ hash_for_each_safe(cache->cache, ci, tmp, hash) {
+ free(ci);
+ }
+
+ hash_init(cache->cache);
+}
diff --git a/scripts/gendwarfksyms/die.c b/scripts/gendwarfksyms/die.c
new file mode 100644
index 000000000000..66bd4c9bc952
--- /dev/null
+++ b/scripts/gendwarfksyms/die.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Google LLC
+ */
+
+#include <string.h>
+#include "gendwarfksyms.h"
+
+#define DIE_HASH_BITS 15
+
+/* {die->addr, state} -> struct die * */
+static HASHTABLE_DEFINE(die_map, 1 << DIE_HASH_BITS);
+
+static unsigned int map_hits;
+static unsigned int map_misses;
+
+static inline unsigned int die_hash(uintptr_t addr, enum die_state state)
+{
+ return hash_32(addr_hash(addr) ^ (unsigned int)state);
+}
+
+static void init_die(struct die *cd)
+{
+ cd->state = DIE_INCOMPLETE;
+ cd->mapped = false;
+ cd->fqn = NULL;
+ cd->tag = -1;
+ cd->addr = 0;
+ INIT_LIST_HEAD(&cd->fragments);
+}
+
+static struct die *create_die(Dwarf_Die *die, enum die_state state)
+{
+ struct die *cd;
+
+ cd = xmalloc(sizeof(struct die));
+ init_die(cd);
+ cd->addr = (uintptr_t)die->addr;
+
+ hash_add(die_map, &cd->hash, die_hash(cd->addr, state));
+ return cd;
+}
+
+int __die_map_get(uintptr_t addr, enum die_state state, struct die **res)
+{
+ struct die *cd;
+
+ hash_for_each_possible(die_map, cd, hash, die_hash(addr, state)) {
+ if (cd->addr == addr && cd->state == state) {
+ *res = cd;
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+struct die *die_map_get(Dwarf_Die *die, enum die_state state)
+{
+ struct die *cd;
+
+ if (__die_map_get((uintptr_t)die->addr, state, &cd) == 0) {
+ map_hits++;
+ return cd;
+ }
+
+ map_misses++;
+ return create_die(die, state);
+}
+
+static void reset_die(struct die *cd)
+{
+ struct die_fragment *tmp;
+ struct die_fragment *df;
+
+ list_for_each_entry_safe(df, tmp, &cd->fragments, list) {
+ if (df->type == FRAGMENT_STRING)
+ free(df->data.str);
+ free(df);
+ }
+
+ if (cd->fqn && *cd->fqn)
+ free(cd->fqn);
+ init_die(cd);
+}
+
+void die_map_for_each(die_map_callback_t func, void *arg)
+{
+ struct hlist_node *tmp;
+ struct die *cd;
+
+ hash_for_each_safe(die_map, cd, tmp, hash) {
+ func(cd, arg);
+ }
+}
+
+void die_map_free(void)
+{
+ struct hlist_node *tmp;
+ unsigned int stats[DIE_LAST + 1];
+ struct die *cd;
+ int i;
+
+ memset(stats, 0, sizeof(stats));
+
+ hash_for_each_safe(die_map, cd, tmp, hash) {
+ stats[cd->state]++;
+ reset_die(cd);
+ free(cd);
+ }
+ hash_init(die_map);
+
+ if (map_hits + map_misses > 0)
+ debug("hits %u, misses %u (hit rate %.02f%%)", map_hits,
+ map_misses,
+ (100.0f * map_hits) / (map_hits + map_misses));
+
+ for (i = 0; i <= DIE_LAST; i++)
+ debug("%s: %u entries", die_state_name(i), stats[i]);
+}
+
+static struct die_fragment *append_item(struct die *cd)
+{
+ struct die_fragment *df;
+
+ df = xmalloc(sizeof(struct die_fragment));
+ df->type = FRAGMENT_EMPTY;
+ list_add_tail(&df->list, &cd->fragments);
+ return df;
+}
+
+void die_map_add_string(struct die *cd, const char *str)
+{
+ struct die_fragment *df;
+
+ if (!cd)
+ return;
+
+ df = append_item(cd);
+ df->data.str = xstrdup(str);
+ df->type = FRAGMENT_STRING;
+}
+
+void die_map_add_linebreak(struct die *cd, int linebreak)
+{
+ struct die_fragment *df;
+
+ if (!cd)
+ return;
+
+ df = append_item(cd);
+ df->data.linebreak = linebreak;
+ df->type = FRAGMENT_LINEBREAK;
+}
+
+void die_map_add_die(struct die *cd, struct die *child)
+{
+ struct die_fragment *df;
+
+ if (!cd)
+ return;
+
+ df = append_item(cd);
+ df->data.addr = child->addr;
+ df->type = FRAGMENT_DIE;
+}
diff --git a/scripts/gendwarfksyms/dwarf.c b/scripts/gendwarfksyms/dwarf.c
new file mode 100644
index 000000000000..534d9aa7c114
--- /dev/null
+++ b/scripts/gendwarfksyms/dwarf.c
@@ -0,0 +1,1159 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Google LLC
+ */
+
+#include <assert.h>
+#include <inttypes.h>
+#include <stdarg.h>
+#include "gendwarfksyms.h"
+
+/* See get_union_kabi_status */
+#define KABI_PREFIX "__kabi_"
+#define KABI_PREFIX_LEN (sizeof(KABI_PREFIX) - 1)
+#define KABI_RESERVED_PREFIX "reserved"
+#define KABI_RESERVED_PREFIX_LEN (sizeof(KABI_RESERVED_PREFIX) - 1)
+#define KABI_RENAMED_PREFIX "renamed"
+#define KABI_RENAMED_PREFIX_LEN (sizeof(KABI_RENAMED_PREFIX) - 1)
+#define KABI_IGNORED_PREFIX "ignored"
+#define KABI_IGNORED_PREFIX_LEN (sizeof(KABI_IGNORED_PREFIX) - 1)
+
+static inline bool is_kabi_prefix(const char *name)
+{
+ return name && !strncmp(name, KABI_PREFIX, KABI_PREFIX_LEN);
+}
+
+enum kabi_status {
+ /* >0 to stop DIE processing */
+ KABI_NORMAL = 1,
+ KABI_RESERVED,
+ KABI_IGNORED,
+};
+
+static bool do_linebreak;
+static int indentation_level;
+
+/* Line breaks and indentation for pretty-printing */
+static void process_linebreak(struct die *cache, int n)
+{
+ indentation_level += n;
+ do_linebreak = true;
+ die_map_add_linebreak(cache, n);
+}
+
+#define DEFINE_GET_ATTR(attr, type) \
+ static bool get_##attr##_attr(Dwarf_Die *die, unsigned int id, \
+ type *value) \
+ { \
+ Dwarf_Attribute da; \
+ return dwarf_attr(die, id, &da) && \
+ !dwarf_form##attr(&da, value); \
+ }
+
+DEFINE_GET_ATTR(flag, bool)
+DEFINE_GET_ATTR(udata, Dwarf_Word)
+
+static bool get_ref_die_attr(Dwarf_Die *die, unsigned int id, Dwarf_Die *value)
+{
+ Dwarf_Attribute da;
+
+ /* dwarf_formref_die returns a pointer instead of an error value. */
+ return dwarf_attr(die, id, &da) && dwarf_formref_die(&da, value);
+}
+
+#define DEFINE_GET_STRING_ATTR(attr) \
+ static const char *get_##attr##_attr(Dwarf_Die *die) \
+ { \
+ Dwarf_Attribute da; \
+ if (dwarf_attr(die, DW_AT_##attr, &da)) \
+ return dwarf_formstring(&da); \
+ return NULL; \
+ }
+
+DEFINE_GET_STRING_ATTR(name)
+DEFINE_GET_STRING_ATTR(linkage_name)
+
+static const char *get_symbol_name(Dwarf_Die *die)
+{
+ const char *name;
+
+ /* rustc uses DW_AT_linkage_name for exported symbols */
+ name = get_linkage_name_attr(die);
+ if (!name)
+ name = get_name_attr(die);
+
+ return name;
+}
+
+static bool match_export_symbol(struct state *state, Dwarf_Die *die)
+{
+ Dwarf_Die *source = die;
+ Dwarf_Die origin;
+
+ /* If the DIE has an abstract origin, use it for type information. */
+ if (get_ref_die_attr(die, DW_AT_abstract_origin, &origin))
+ source = &origin;
+
+ state->sym = symbol_get(get_symbol_name(die));
+
+ /* Look up using the origin name if there are no matches. */
+ if (!state->sym && source != die)
+ state->sym = symbol_get(get_symbol_name(source));
+
+ state->die = *source;
+ return !!state->sym;
+}
+
+/* DW_AT_decl_file -> struct srcfile */
+static struct cache srcfile_cache;
+
+static bool is_definition_private(Dwarf_Die *die)
+{
+ Dwarf_Word filenum;
+ Dwarf_Files *files;
+ Dwarf_Die cudie;
+ const char *s;
+ int res;
+
+ /*
+ * Definitions in .c files cannot change the public ABI,
+ * so consider them private.
+ */
+ if (!get_udata_attr(die, DW_AT_decl_file, &filenum))
+ return false;
+
+ res = cache_get(&srcfile_cache, filenum);
+ if (res >= 0)
+ return !!res;
+
+ if (!dwarf_cu_die(die->cu, &cudie, NULL, NULL, NULL, NULL, NULL, NULL))
+ error("dwarf_cu_die failed: '%s'", dwarf_errmsg(-1));
+
+ if (dwarf_getsrcfiles(&cudie, &files, NULL))
+ error("dwarf_getsrcfiles failed: '%s'", dwarf_errmsg(-1));
+
+ s = dwarf_filesrc(files, filenum, NULL, NULL);
+ if (!s)
+ error("dwarf_filesrc failed: '%s'", dwarf_errmsg(-1));
+
+ s = strrchr(s, '.');
+ res = s && !strcmp(s, ".c");
+ cache_set(&srcfile_cache, filenum, res);
+
+ return !!res;
+}
+
+static bool is_kabi_definition(struct die *cache, Dwarf_Die *die)
+{
+ bool value;
+
+ if (get_flag_attr(die, DW_AT_declaration, &value) && value)
+ return false;
+
+ if (kabi_is_declonly(cache->fqn))
+ return false;
+
+ return !is_definition_private(die);
+}
+
+/*
+ * Type string processing
+ */
+static void process(struct die *cache, const char *s)
+{
+ s = s ?: "<null>";
+
+ if (dump_dies && do_linebreak) {
+ fputs("\n", stderr);
+ for (int i = 0; i < indentation_level; i++)
+ fputs(" ", stderr);
+ do_linebreak = false;
+ }
+ if (dump_dies)
+ fputs(s, stderr);
+
+ if (cache)
+ die_debug_r("cache %p string '%s'", cache, s);
+ die_map_add_string(cache, s);
+}
+
+#define MAX_FMT_BUFFER_SIZE 128
+
+static void process_fmt(struct die *cache, const char *fmt, ...)
+{
+ char buf[MAX_FMT_BUFFER_SIZE];
+ va_list args;
+
+ va_start(args, fmt);
+
+ if (checkp(vsnprintf(buf, sizeof(buf), fmt, args)) >= sizeof(buf))
+ error("vsnprintf overflow: increase MAX_FMT_BUFFER_SIZE");
+
+ process(cache, buf);
+ va_end(args);
+}
+
+#define MAX_FQN_SIZE 64
+
+/* Get a fully qualified name from DWARF scopes */
+static char *get_fqn(Dwarf_Die *die)
+{
+ const char *list[MAX_FQN_SIZE];
+ Dwarf_Die *scopes = NULL;
+ bool has_name = false;
+ char *fqn = NULL;
+ char *p;
+ int count = 0;
+ int len = 0;
+ int res;
+ int i;
+
+ res = checkp(dwarf_getscopes_die(die, &scopes));
+ if (!res) {
+ list[count] = get_name_attr(die);
+
+ if (!list[count])
+ return NULL;
+
+ len += strlen(list[count]);
+ count++;
+
+ goto done;
+ }
+
+ for (i = res - 1; i >= 0 && count < MAX_FQN_SIZE; i--) {
+ if (dwarf_tag(&scopes[i]) == DW_TAG_compile_unit)
+ continue;
+
+ list[count] = get_name_attr(&scopes[i]);
+
+ if (list[count]) {
+ has_name = true;
+ } else {
+ list[count] = "<anonymous>";
+ has_name = false;
+ }
+
+ len += strlen(list[count]);
+ count++;
+
+ if (i > 0) {
+ list[count++] = "::";
+ len += 2;
+ }
+ }
+
+ free(scopes);
+
+ if (count == MAX_FQN_SIZE)
+ warn("increase MAX_FQN_SIZE: reached the maximum");
+
+ /* Consider the DIE unnamed if the last scope doesn't have a name */
+ if (!has_name)
+ return NULL;
+done:
+ fqn = xmalloc(len + 1);
+ *fqn = '\0';
+
+ p = fqn;
+ for (i = 0; i < count; i++)
+ p = stpcpy(p, list[i]);
+
+ return fqn;
+}
+
+static void update_fqn(struct die *cache, Dwarf_Die *die)
+{
+ if (!cache->fqn)
+ cache->fqn = get_fqn(die) ?: "";
+}
+
+static void process_fqn(struct die *cache, Dwarf_Die *die)
+{
+ update_fqn(cache, die);
+ if (*cache->fqn)
+ process(cache, " ");
+ process(cache, cache->fqn);
+}
+
+#define DEFINE_PROCESS_UDATA_ATTRIBUTE(attribute) \
+ static void process_##attribute##_attr(struct die *cache, \
+ Dwarf_Die *die) \
+ { \
+ Dwarf_Word value; \
+ if (get_udata_attr(die, DW_AT_##attribute, &value)) \
+ process_fmt(cache, " " #attribute "(%" PRIu64 ")", \
+ value); \
+ }
+
+DEFINE_PROCESS_UDATA_ATTRIBUTE(accessibility)
+DEFINE_PROCESS_UDATA_ATTRIBUTE(alignment)
+DEFINE_PROCESS_UDATA_ATTRIBUTE(bit_size)
+DEFINE_PROCESS_UDATA_ATTRIBUTE(byte_size)
+DEFINE_PROCESS_UDATA_ATTRIBUTE(encoding)
+DEFINE_PROCESS_UDATA_ATTRIBUTE(data_bit_offset)
+DEFINE_PROCESS_UDATA_ATTRIBUTE(data_member_location)
+DEFINE_PROCESS_UDATA_ATTRIBUTE(discr_value)
+
+/* Match functions -- die_match_callback_t */
+#define DEFINE_MATCH(type) \
+ static bool match_##type##_type(Dwarf_Die *die) \
+ { \
+ return dwarf_tag(die) == DW_TAG_##type##_type; \
+ }
+
+DEFINE_MATCH(enumerator)
+DEFINE_MATCH(formal_parameter)
+DEFINE_MATCH(member)
+DEFINE_MATCH(subrange)
+
+bool match_all(Dwarf_Die *die)
+{
+ return true;
+}
+
+int process_die_container(struct state *state, struct die *cache,
+ Dwarf_Die *die, die_callback_t func,
+ die_match_callback_t match)
+{
+ Dwarf_Die current;
+ int res;
+
+ /* Track the first item in lists. */
+ if (state)
+ state->first_list_item = true;
+
+ res = checkp(dwarf_child(die, &current));
+ while (!res) {
+ if (match(&current)) {
+ /* <0 = error, 0 = continue, >0 = stop */
+ res = checkp(func(state, cache, &current));
+ if (res)
+ goto out;
+ }
+
+ res = checkp(dwarf_siblingof(&current, &current));
+ }
+
+ res = 0;
+out:
+ if (state)
+ state->first_list_item = false;
+
+ return res;
+}
+
+static int process_type(struct state *state, struct die *parent,
+ Dwarf_Die *die);
+
+static void process_type_attr(struct state *state, struct die *cache,
+ Dwarf_Die *die)
+{
+ Dwarf_Die type;
+
+ if (get_ref_die_attr(die, DW_AT_type, &type)) {
+ check(process_type(state, cache, &type));
+ return;
+ }
+
+ /* Compilers can omit DW_AT_type -- print out 'void' to clarify */
+ process(cache, "base_type void");
+}
+
+static void process_list_comma(struct state *state, struct die *cache)
+{
+ if (state->first_list_item) {
+ state->first_list_item = false;
+ } else {
+ process(cache, " ,");
+ process_linebreak(cache, 0);
+ }
+}
+
+/* Comma-separated with DW_AT_type */
+static void __process_list_type(struct state *state, struct die *cache,
+ Dwarf_Die *die, const char *type)
+{
+ const char *name = get_name_attr(die);
+
+ if (stable) {
+ if (is_kabi_prefix(name))
+ name = NULL;
+ state->kabi.orig_name = NULL;
+ }
+
+ process_list_comma(state, cache);
+ process(cache, type);
+ process_type_attr(state, cache, die);
+
+ if (stable && state->kabi.orig_name)
+ name = state->kabi.orig_name;
+ if (name) {
+ process(cache, " ");
+ process(cache, name);
+ }
+
+ process_accessibility_attr(cache, die);
+ process_bit_size_attr(cache, die);
+ process_data_bit_offset_attr(cache, die);
+ process_data_member_location_attr(cache, die);
+}
+
+#define DEFINE_PROCESS_LIST_TYPE(type) \
+ static void process_##type##_type(struct state *state, \
+ struct die *cache, Dwarf_Die *die) \
+ { \
+ __process_list_type(state, cache, die, #type " "); \
+ }
+
+DEFINE_PROCESS_LIST_TYPE(formal_parameter)
+DEFINE_PROCESS_LIST_TYPE(member)
+
+/* Container types with DW_AT_type */
+static void __process_type(struct state *state, struct die *cache,
+ Dwarf_Die *die, const char *type)
+{
+ process(cache, type);
+ process_fqn(cache, die);
+ process(cache, " {");
+ process_linebreak(cache, 1);
+ process_type_attr(state, cache, die);
+ process_linebreak(cache, -1);
+ process(cache, "}");
+ process_byte_size_attr(cache, die);
+ process_alignment_attr(cache, die);
+}
+
+#define DEFINE_PROCESS_TYPE(type) \
+ static void process_##type##_type(struct state *state, \
+ struct die *cache, Dwarf_Die *die) \
+ { \
+ __process_type(state, cache, die, #type "_type"); \
+ }
+
+DEFINE_PROCESS_TYPE(atomic)
+DEFINE_PROCESS_TYPE(const)
+DEFINE_PROCESS_TYPE(immutable)
+DEFINE_PROCESS_TYPE(packed)
+DEFINE_PROCESS_TYPE(pointer)
+DEFINE_PROCESS_TYPE(reference)
+DEFINE_PROCESS_TYPE(restrict)
+DEFINE_PROCESS_TYPE(rvalue_reference)
+DEFINE_PROCESS_TYPE(shared)
+DEFINE_PROCESS_TYPE(template_type_parameter)
+DEFINE_PROCESS_TYPE(volatile)
+DEFINE_PROCESS_TYPE(typedef)
+
+static void process_subrange_type(struct state *state, struct die *cache,
+ Dwarf_Die *die)
+{
+ Dwarf_Word count = 0;
+
+ if (get_udata_attr(die, DW_AT_count, &count))
+ process_fmt(cache, "[%" PRIu64 "]", count);
+ else if (get_udata_attr(die, DW_AT_upper_bound, &count))
+ process_fmt(cache, "[%" PRIu64 "]", count + 1);
+ else
+ process(cache, "[]");
+}
+
+static void process_array_type(struct state *state, struct die *cache,
+ Dwarf_Die *die)
+{
+ process(cache, "array_type");
+ /* Array size */
+ check(process_die_container(state, cache, die, process_type,
+ match_subrange_type));
+ process(cache, " {");
+ process_linebreak(cache, 1);
+ process_type_attr(state, cache, die);
+ process_linebreak(cache, -1);
+ process(cache, "}");
+}
+
+static void __process_subroutine_type(struct state *state, struct die *cache,
+ Dwarf_Die *die, const char *type)
+{
+ process(cache, type);
+ process(cache, " (");
+ process_linebreak(cache, 1);
+ /* Parameters */
+ check(process_die_container(state, cache, die, process_type,
+ match_formal_parameter_type));
+ process_linebreak(cache, -1);
+ process(cache, ")");
+ process_linebreak(cache, 0);
+ /* Return type */
+ process(cache, "-> ");
+ process_type_attr(state, cache, die);
+}
+
+static void process_subroutine_type(struct state *state, struct die *cache,
+ Dwarf_Die *die)
+{
+ __process_subroutine_type(state, cache, die, "subroutine_type");
+}
+
+static void process_variant_type(struct state *state, struct die *cache,
+ Dwarf_Die *die)
+{
+ process_list_comma(state, cache);
+ process(cache, "variant {");
+ process_linebreak(cache, 1);
+ check(process_die_container(state, cache, die, process_type,
+ match_member_type));
+ process_linebreak(cache, -1);
+ process(cache, "}");
+ process_discr_value_attr(cache, die);
+}
+
+static void process_variant_part_type(struct state *state, struct die *cache,
+ Dwarf_Die *die)
+{
+ process_list_comma(state, cache);
+ process(cache, "variant_part {");
+ process_linebreak(cache, 1);
+ check(process_die_container(state, cache, die, process_type,
+ match_all));
+ process_linebreak(cache, -1);
+ process(cache, "}");
+}
+
+static int get_kabi_status(Dwarf_Die *die, const char **suffix)
+{
+ const char *name = get_name_attr(die);
+
+ if (suffix)
+ *suffix = NULL;
+
+ if (is_kabi_prefix(name)) {
+ name += KABI_PREFIX_LEN;
+
+ if (!strncmp(name, KABI_RESERVED_PREFIX,
+ KABI_RESERVED_PREFIX_LEN))
+ return KABI_RESERVED;
+ if (!strncmp(name, KABI_IGNORED_PREFIX,
+ KABI_IGNORED_PREFIX_LEN))
+ return KABI_IGNORED;
+
+ if (!strncmp(name, KABI_RENAMED_PREFIX,
+ KABI_RENAMED_PREFIX_LEN)) {
+ if (suffix) {
+ name += KABI_RENAMED_PREFIX_LEN;
+ *suffix = name;
+ }
+ return KABI_RESERVED;
+ }
+ }
+
+ return KABI_NORMAL;
+}
+
+static int check_struct_member_kabi_status(struct state *state,
+ struct die *__unused, Dwarf_Die *die)
+{
+ int res;
+
+ assert(dwarf_tag(die) == DW_TAG_member_type);
+
+ /*
+ * If the union member is a struct, expect the __kabi field to
+ * be the first member of the structure, i.e..:
+ *
+ * union {
+ * type new_member;
+ * struct {
+ * type __kabi_field;
+ * }
+ * };
+ */
+ res = get_kabi_status(die, &state->kabi.orig_name);
+
+ if (res == KABI_RESERVED &&
+ !get_ref_die_attr(die, DW_AT_type, &state->kabi.placeholder))
+ error("structure member missing a type?");
+
+ return res;
+}
+
+static int check_union_member_kabi_status(struct state *state,
+ struct die *__unused, Dwarf_Die *die)
+{
+ Dwarf_Die type;
+ int res;
+
+ assert(dwarf_tag(die) == DW_TAG_member_type);
+
+ if (!get_ref_die_attr(die, DW_AT_type, &type))
+ error("union member missing a type?");
+
+ /*
+ * We expect a union with two members. Check if either of them
+ * has a __kabi name prefix, i.e.:
+ *
+ * union {
+ * ...
+ * type memberN; // <- type, N = {0,1}
+ * ...
+ * };
+ *
+ * The member can also be a structure type, in which case we'll
+ * check the first structure member.
+ *
+ * In any case, stop processing after we've seen two members.
+ */
+ res = get_kabi_status(die, &state->kabi.orig_name);
+
+ if (res == KABI_RESERVED)
+ state->kabi.placeholder = type;
+ if (res != KABI_NORMAL)
+ return res;
+
+ if (dwarf_tag(&type) == DW_TAG_structure_type)
+ res = checkp(process_die_container(
+ state, NULL, &type, check_struct_member_kabi_status,
+ match_member_type));
+
+ if (res <= KABI_NORMAL && ++state->kabi.members < 2)
+ return 0; /* Continue */
+
+ return res;
+}
+
+static int get_union_kabi_status(Dwarf_Die *die, Dwarf_Die *placeholder,
+ const char **orig_name)
+{
+ struct state state;
+ int res;
+
+ if (!stable)
+ return KABI_NORMAL;
+
+ /*
+ * To maintain a stable kABI, distributions may choose to reserve
+ * space in structs for later use by adding placeholder members,
+ * for example:
+ *
+ * struct s {
+ * u32 a;
+ * // an 8-byte placeholder for future use
+ * u64 __kabi_reserved_0;
+ * };
+ *
+ * When the reserved member is taken into use, the type change
+ * would normally cause the symbol version to change as well, but
+ * if the replacement uses the following convention, gendwarfksyms
+ * continues to use the placeholder type for versioning instead,
+ * thus maintaining the same symbol version:
+ *
+ * struct s {
+ * u32 a;
+ * union {
+ * // placeholder replaced with a new member `b`
+ * struct t b;
+ * struct {
+ * // the placeholder type that is still
+ * // used for versioning
+ * u64 __kabi_reserved_0;
+ * };
+ * };
+ * };
+ *
+ * I.e., as long as the replaced member is in a union, and the
+ * placeholder has a __kabi_reserved name prefix, we'll continue
+ * to use the placeholder type (here u64) for version calculation
+ * instead of the union type.
+ *
+ * It's also possible to ignore new members from versioning if
+ * they've been added to alignment holes, for example, by
+ * including them in a union with another member that uses the
+ * __kabi_ignored name prefix:
+ *
+ * struct s {
+ * u32 a;
+ * // an alignment hole is used to add `n`
+ * union {
+ * u32 n;
+ * // hide the entire union member from versioning
+ * u8 __kabi_ignored_0;
+ * };
+ * u64 b;
+ * };
+ *
+ * Note that the user of this feature is responsible for ensuring
+ * that the structure actually remains ABI compatible.
+ */
+ memset(&state.kabi, 0, sizeof(struct kabi_state));
+
+ res = checkp(process_die_container(&state, NULL, die,
+ check_union_member_kabi_status,
+ match_member_type));
+
+ if (res == KABI_RESERVED) {
+ if (placeholder)
+ *placeholder = state.kabi.placeholder;
+ if (orig_name)
+ *orig_name = state.kabi.orig_name;
+ }
+
+ return res;
+}
+
+static bool is_kabi_ignored(Dwarf_Die *die)
+{
+ Dwarf_Die type;
+
+ if (!stable)
+ return false;
+
+ if (!get_ref_die_attr(die, DW_AT_type, &type))
+ error("member missing a type?");
+
+ return dwarf_tag(&type) == DW_TAG_union_type &&
+ checkp(get_union_kabi_status(&type, NULL, NULL)) == KABI_IGNORED;
+}
+
+static int ___process_structure_type(struct state *state, struct die *cache,
+ Dwarf_Die *die)
+{
+ switch (dwarf_tag(die)) {
+ case DW_TAG_member:
+ if (is_kabi_ignored(die))
+ return 0;
+ return check(process_type(state, cache, die));
+ case DW_TAG_variant_part:
+ return check(process_type(state, cache, die));
+ case DW_TAG_class_type:
+ case DW_TAG_enumeration_type:
+ case DW_TAG_structure_type:
+ case DW_TAG_template_type_parameter:
+ case DW_TAG_union_type:
+ case DW_TAG_subprogram:
+ /* Skip non-member types, including member functions */
+ return 0;
+ default:
+ error("unexpected structure_type child: %x", dwarf_tag(die));
+ }
+}
+
+static void __process_structure_type(struct state *state, struct die *cache,
+ Dwarf_Die *die, const char *type,
+ die_callback_t process_func,
+ die_match_callback_t match_func)
+{
+ bool expand;
+
+ process(cache, type);
+ process_fqn(cache, die);
+ process(cache, " {");
+ process_linebreak(cache, 1);
+
+ expand = state->expand.expand && is_kabi_definition(cache, die);
+
+ if (expand) {
+ state->expand.current_fqn = cache->fqn;
+ check(process_die_container(state, cache, die, process_func,
+ match_func));
+ }
+
+ process_linebreak(cache, -1);
+ process(cache, "}");
+
+ if (expand) {
+ process_byte_size_attr(cache, die);
+ process_alignment_attr(cache, die);
+ }
+}
+
+#define DEFINE_PROCESS_STRUCTURE_TYPE(structure) \
+ static void process_##structure##_type( \
+ struct state *state, struct die *cache, Dwarf_Die *die) \
+ { \
+ __process_structure_type(state, cache, die, \
+ #structure "_type", \
+ ___process_structure_type, \
+ match_all); \
+ }
+
+DEFINE_PROCESS_STRUCTURE_TYPE(class)
+DEFINE_PROCESS_STRUCTURE_TYPE(structure)
+
+static void process_union_type(struct state *state, struct die *cache,
+ Dwarf_Die *die)
+{
+ Dwarf_Die placeholder;
+
+ int res = checkp(get_union_kabi_status(die, &placeholder,
+ &state->kabi.orig_name));
+
+ if (res == KABI_RESERVED)
+ check(process_type(state, cache, &placeholder));
+ if (res > KABI_NORMAL)
+ return;
+
+ __process_structure_type(state, cache, die, "union_type",
+ ___process_structure_type, match_all);
+}
+
+static void process_enumerator_type(struct state *state, struct die *cache,
+ Dwarf_Die *die)
+{
+ bool overridden = false;
+ Dwarf_Word value;
+
+ if (stable) {
+ /* Get the fqn before we process anything */
+ update_fqn(cache, die);
+
+ if (kabi_is_enumerator_ignored(state->expand.current_fqn,
+ cache->fqn))
+ return;
+
+ overridden = kabi_get_enumerator_value(
+ state->expand.current_fqn, cache->fqn, &value);
+ }
+
+ process_list_comma(state, cache);
+ process(cache, "enumerator");
+ process_fqn(cache, die);
+
+ if (overridden || get_udata_attr(die, DW_AT_const_value, &value)) {
+ process(cache, " = ");
+ process_fmt(cache, "%" PRIu64, value);
+ }
+}
+
+static void process_enumeration_type(struct state *state, struct die *cache,
+ Dwarf_Die *die)
+{
+ __process_structure_type(state, cache, die, "enumeration_type",
+ process_type, match_enumerator_type);
+}
+
+static void process_base_type(struct state *state, struct die *cache,
+ Dwarf_Die *die)
+{
+ process(cache, "base_type");
+ process_fqn(cache, die);
+ process_byte_size_attr(cache, die);
+ process_encoding_attr(cache, die);
+ process_alignment_attr(cache, die);
+}
+
+static void process_unspecified_type(struct state *state, struct die *cache,
+ Dwarf_Die *die)
+{
+ /*
+ * These can be emitted for stand-alone assembly code, which means we
+ * might run into them in vmlinux.o.
+ */
+ process(cache, "unspecified_type");
+}
+
+static void process_cached(struct state *state, struct die *cache,
+ Dwarf_Die *die)
+{
+ struct die_fragment *df;
+ Dwarf_Die child;
+
+ list_for_each_entry(df, &cache->fragments, list) {
+ switch (df->type) {
+ case FRAGMENT_STRING:
+ die_debug_b("cache %p STRING '%s'", cache,
+ df->data.str);
+ process(NULL, df->data.str);
+ break;
+ case FRAGMENT_LINEBREAK:
+ process_linebreak(NULL, df->data.linebreak);
+ break;
+ case FRAGMENT_DIE:
+ if (!dwarf_die_addr_die(dwarf_cu_getdwarf(die->cu),
+ (void *)df->data.addr, &child))
+ error("dwarf_die_addr_die failed");
+ die_debug_b("cache %p DIE addr %" PRIxPTR " tag %x",
+ cache, df->data.addr, dwarf_tag(&child));
+ check(process_type(state, NULL, &child));
+ break;
+ default:
+ error("empty die_fragment");
+ }
+ }
+}
+
+static void state_init(struct state *state)
+{
+ state->expand.expand = true;
+ state->expand.current_fqn = NULL;
+ cache_init(&state->expansion_cache);
+}
+
+static void expansion_state_restore(struct expansion_state *state,
+ struct expansion_state *saved)
+{
+ state->expand = saved->expand;
+ state->current_fqn = saved->current_fqn;
+}
+
+static void expansion_state_save(struct expansion_state *state,
+ struct expansion_state *saved)
+{
+ expansion_state_restore(saved, state);
+}
+
+static bool is_expanded_type(int tag)
+{
+ return tag == DW_TAG_class_type || tag == DW_TAG_structure_type ||
+ tag == DW_TAG_union_type || tag == DW_TAG_enumeration_type;
+}
+
+#define PROCESS_TYPE(type) \
+ case DW_TAG_##type##_type: \
+ process_##type##_type(state, cache, die); \
+ break;
+
+static int process_type(struct state *state, struct die *parent, Dwarf_Die *die)
+{
+ enum die_state want_state = DIE_COMPLETE;
+ struct die *cache;
+ struct expansion_state saved;
+ int tag = dwarf_tag(die);
+
+ expansion_state_save(&state->expand, &saved);
+
+ /*
+ * Structures and enumeration types are expanded only once per
+ * exported symbol. This is sufficient for detecting ABI changes
+ * within the structure.
+ */
+ if (is_expanded_type(tag)) {
+ if (cache_was_expanded(&state->expansion_cache, die->addr))
+ state->expand.expand = false;
+
+ if (state->expand.expand)
+ cache_mark_expanded(&state->expansion_cache, die->addr);
+ else
+ want_state = DIE_UNEXPANDED;
+ }
+
+ /*
+ * If we have want_state already cached, use it instead of walking
+ * through DWARF.
+ */
+ cache = die_map_get(die, want_state);
+
+ if (cache->state == want_state) {
+ die_debug_g("cached addr %p tag %x -- %s", die->addr, tag,
+ die_state_name(cache->state));
+
+ process_cached(state, cache, die);
+ die_map_add_die(parent, cache);
+
+ expansion_state_restore(&state->expand, &saved);
+ return 0;
+ }
+
+ die_debug_g("addr %p tag %x -- %s -> %s", die->addr, tag,
+ die_state_name(cache->state), die_state_name(want_state));
+
+ switch (tag) {
+ /* Type modifiers */
+ PROCESS_TYPE(atomic)
+ PROCESS_TYPE(const)
+ PROCESS_TYPE(immutable)
+ PROCESS_TYPE(packed)
+ PROCESS_TYPE(pointer)
+ PROCESS_TYPE(reference)
+ PROCESS_TYPE(restrict)
+ PROCESS_TYPE(rvalue_reference)
+ PROCESS_TYPE(shared)
+ PROCESS_TYPE(volatile)
+ /* Container types */
+ PROCESS_TYPE(class)
+ PROCESS_TYPE(structure)
+ PROCESS_TYPE(union)
+ PROCESS_TYPE(enumeration)
+ /* Subtypes */
+ PROCESS_TYPE(enumerator)
+ PROCESS_TYPE(formal_parameter)
+ PROCESS_TYPE(member)
+ PROCESS_TYPE(subrange)
+ PROCESS_TYPE(template_type_parameter)
+ PROCESS_TYPE(variant)
+ PROCESS_TYPE(variant_part)
+ /* Other types */
+ PROCESS_TYPE(array)
+ PROCESS_TYPE(base)
+ PROCESS_TYPE(subroutine)
+ PROCESS_TYPE(typedef)
+ PROCESS_TYPE(unspecified)
+ default:
+ error("unexpected type: %x", tag);
+ }
+
+ die_debug_r("parent %p cache %p die addr %p tag %x", parent, cache,
+ die->addr, tag);
+
+ /* Update cache state and append to the parent (if any) */
+ cache->tag = tag;
+ cache->state = want_state;
+ die_map_add_die(parent, cache);
+
+ expansion_state_restore(&state->expand, &saved);
+ return 0;
+}
+
+/*
+ * Exported symbol processing
+ */
+static struct die *get_symbol_cache(struct state *state, Dwarf_Die *die)
+{
+ struct die *cache;
+
+ cache = die_map_get(die, DIE_SYMBOL);
+
+ if (cache->state != DIE_INCOMPLETE)
+ return NULL; /* We already processed a symbol for this DIE */
+
+ cache->tag = dwarf_tag(die);
+ return cache;
+}
+
+static void process_symbol(struct state *state, Dwarf_Die *die,
+ die_callback_t process_func)
+{
+ struct die *cache;
+
+ symbol_set_die(state->sym, die);
+
+ cache = get_symbol_cache(state, die);
+ if (!cache)
+ return;
+
+ debug("%s", state->sym->name);
+ check(process_func(state, cache, die));
+ cache->state = DIE_SYMBOL;
+ if (dump_dies)
+ fputs("\n", stderr);
+}
+
+static int __process_subprogram(struct state *state, struct die *cache,
+ Dwarf_Die *die)
+{
+ __process_subroutine_type(state, cache, die, "subprogram");
+ return 0;
+}
+
+static void process_subprogram(struct state *state, Dwarf_Die *die)
+{
+ process_symbol(state, die, __process_subprogram);
+}
+
+static int __process_variable(struct state *state, struct die *cache,
+ Dwarf_Die *die)
+{
+ process(cache, "variable ");
+ process_type_attr(state, cache, die);
+ return 0;
+}
+
+static void process_variable(struct state *state, Dwarf_Die *die)
+{
+ process_symbol(state, die, __process_variable);
+}
+
+static void save_symbol_ptr(struct state *state)
+{
+ Dwarf_Die ptr_type;
+ Dwarf_Die type;
+
+ if (!get_ref_die_attr(&state->die, DW_AT_type, &ptr_type) ||
+ dwarf_tag(&ptr_type) != DW_TAG_pointer_type)
+ error("%s must be a pointer type!",
+ get_symbol_name(&state->die));
+
+ if (!get_ref_die_attr(&ptr_type, DW_AT_type, &type))
+ error("%s pointer missing a type attribute?",
+ get_symbol_name(&state->die));
+
+ /*
+ * Save the symbol pointer DIE in case the actual symbol is
+ * missing from the DWARF. Clang, for example, intentionally
+ * omits external symbols from the debugging information.
+ */
+ if (dwarf_tag(&type) == DW_TAG_subroutine_type)
+ symbol_set_ptr(state->sym, &type);
+ else
+ symbol_set_ptr(state->sym, &ptr_type);
+}
+
+static int process_exported_symbols(struct state *unused, struct die *cache,
+ Dwarf_Die *die)
+{
+ int tag = dwarf_tag(die);
+
+ switch (tag) {
+ /* Possible containers of exported symbols */
+ case DW_TAG_namespace:
+ case DW_TAG_class_type:
+ case DW_TAG_structure_type:
+ return check(process_die_container(
+ NULL, cache, die, process_exported_symbols, match_all));
+
+ /* Possible exported symbols */
+ case DW_TAG_subprogram:
+ case DW_TAG_variable: {
+ struct state state;
+
+ if (!match_export_symbol(&state, die))
+ return 0;
+
+ state_init(&state);
+
+ if (is_symbol_ptr(get_symbol_name(&state.die)))
+ save_symbol_ptr(&state);
+ else if (tag == DW_TAG_subprogram)
+ process_subprogram(&state, &state.die);
+ else
+ process_variable(&state, &state.die);
+
+ cache_free(&state.expansion_cache);
+ return 0;
+ }
+ default:
+ return 0;
+ }
+}
+
+static void process_symbol_ptr(struct symbol *sym, void *arg)
+{
+ struct state state;
+ Dwarf *dwarf = arg;
+
+ if (sym->state != SYMBOL_UNPROCESSED || !sym->ptr_die_addr)
+ return;
+
+ debug("%s", sym->name);
+ state_init(&state);
+ state.sym = sym;
+
+ if (!dwarf_die_addr_die(dwarf, (void *)sym->ptr_die_addr, &state.die))
+ error("dwarf_die_addr_die failed for symbol ptr: '%s'",
+ sym->name);
+
+ if (dwarf_tag(&state.die) == DW_TAG_subroutine_type)
+ process_subprogram(&state, &state.die);
+ else
+ process_variable(&state, &state.die);
+
+ cache_free(&state.expansion_cache);
+}
+
+void process_cu(Dwarf_Die *cudie)
+{
+ check(process_die_container(NULL, NULL, cudie, process_exported_symbols,
+ match_all));
+
+ symbol_for_each(process_symbol_ptr, dwarf_cu_getdwarf(cudie->cu));
+
+ cache_free(&srcfile_cache);
+}
diff --git a/scripts/gendwarfksyms/examples/kabi.h b/scripts/gendwarfksyms/examples/kabi.h
new file mode 100644
index 000000000000..97a5669b083d
--- /dev/null
+++ b/scripts/gendwarfksyms/examples/kabi.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Google LLC
+ *
+ * Example macros for maintaining kABI stability.
+ *
+ * This file is based on android_kabi.h, which has the following notice:
+ *
+ * Heavily influenced by rh_kabi.h which came from the RHEL/CENTOS kernel
+ * and was:
+ * Copyright (c) 2014 Don Zickus
+ * Copyright (c) 2015-2018 Jiri Benc
+ * Copyright (c) 2015 Sabrina Dubroca, Hannes Frederic Sowa
+ * Copyright (c) 2016-2018 Prarit Bhargava
+ * Copyright (c) 2017 Paolo Abeni, Larry Woodman
+ */
+
+#ifndef __KABI_H__
+#define __KABI_H__
+
+/* Kernel macros for userspace testing. */
+#ifndef __aligned
+#define __aligned(x) __attribute__((__aligned__(x)))
+#endif
+#ifndef __used
+#define __used __attribute__((__used__))
+#endif
+#ifndef __section
+#define __section(section) __attribute__((__section__(section)))
+#endif
+#ifndef __PASTE
+#define ___PASTE(a, b) a##b
+#define __PASTE(a, b) ___PASTE(a, b)
+#endif
+#ifndef __stringify
+#define __stringify_1(x...) #x
+#define __stringify(x...) __stringify_1(x)
+#endif
+
+#define __KABI_RULE(hint, target, value) \
+ static const char __PASTE(__gendwarfksyms_rule_, \
+ __COUNTER__)[] __used __aligned(1) \
+ __section(".discard.gendwarfksyms.kabi_rules") = \
+ "1\0" #hint "\0" #target "\0" #value
+
+#define __KABI_NORMAL_SIZE_ALIGN(_orig, _new) \
+ union { \
+ _Static_assert( \
+ sizeof(struct { _new; }) <= sizeof(struct { _orig; }), \
+ __FILE__ ":" __stringify(__LINE__) ": " __stringify( \
+ _new) " is larger than " __stringify(_orig)); \
+ _Static_assert( \
+ __alignof__(struct { _new; }) <= \
+ __alignof__(struct { _orig; }), \
+ __FILE__ ":" __stringify(__LINE__) ": " __stringify( \
+ _orig) " is not aligned the same as " __stringify(_new)); \
+ }
+
+#define __KABI_REPLACE(_orig, _new) \
+ union { \
+ _new; \
+ struct { \
+ _orig; \
+ }; \
+ __KABI_NORMAL_SIZE_ALIGN(_orig, _new); \
+ }
+
+/*
+ * KABI_DECLONLY(fqn)
+ * Treat the struct/union/enum fqn as a declaration, i.e. even if
+ * a definition is available, don't expand the contents.
+ */
+#define KABI_DECLONLY(fqn) __KABI_RULE(declonly, fqn, )
+
+/*
+ * KABI_ENUMERATOR_IGNORE(fqn, field)
+ * When expanding enum fqn, skip the provided field. This makes it
+ * possible to hide added enum fields from versioning.
+ */
+#define KABI_ENUMERATOR_IGNORE(fqn, field) \
+ __KABI_RULE(enumerator_ignore, fqn field, )
+
+/*
+ * KABI_ENUMERATOR_VALUE(fqn, field, value)
+ * When expanding enum fqn, use the provided value for the
+ * specified field. This makes it possible to override enumerator
+ * values when calculating versions.
+ */
+#define KABI_ENUMERATOR_VALUE(fqn, field, value) \
+ __KABI_RULE(enumerator_value, fqn field, value)
+
+/*
+ * KABI_RESERVE
+ * Reserve some "padding" in a structure for use by LTS backports.
+ * This is normally placed at the end of a structure.
+ * number: the "number" of the padding variable in the structure. Start with
+ * 1 and go up.
+ */
+#define KABI_RESERVE(n) unsigned long __kabi_reserved##n
+
+/*
+ * KABI_RESERVE_ARRAY
+ * Same as _BACKPORT_RESERVE but allocates an array with the specified
+ * size in bytes.
+ */
+#define KABI_RESERVE_ARRAY(n, s) \
+ unsigned char __aligned(8) __kabi_reserved##n[s]
+
+/*
+ * KABI_IGNORE
+ * Add a new field that's ignored in versioning.
+ */
+#define KABI_IGNORE(n, _new) \
+ union { \
+ _new; \
+ unsigned char __kabi_ignored##n; \
+ }
+
+/*
+ * KABI_REPLACE
+ * Replace a field with a compatible new field.
+ */
+#define KABI_REPLACE(_oldtype, _oldname, _new) \
+ __KABI_REPLACE(_oldtype __kabi_renamed##_oldname, struct { _new; })
+
+/*
+ * KABI_USE(number, _new)
+ * Use a previous padding entry that was defined with KABI_RESERVE
+ * number: the previous "number" of the padding variable
+ * _new: the variable to use now instead of the padding variable
+ */
+#define KABI_USE(number, _new) __KABI_REPLACE(KABI_RESERVE(number), _new)
+
+/*
+ * KABI_USE2(number, _new1, _new2)
+ * Use a previous padding entry that was defined with KABI_RESERVE for
+ * two new variables that fit into 64 bits. This is good for when you do not
+ * want to "burn" a 64bit padding variable for a smaller variable size if not
+ * needed.
+ */
+#define KABI_USE2(number, _new1, _new2) \
+ __KABI_REPLACE( \
+ KABI_RESERVE(number), struct { \
+ _new1; \
+ _new2; \
+ })
+/*
+ * KABI_USE_ARRAY(number, bytes, _new)
+ * Use a previous padding entry that was defined with KABI_RESERVE_ARRAY
+ * number: the previous "number" of the padding variable
+ * bytes: the size in bytes reserved for the array
+ * _new: the variable to use now instead of the padding variable
+ */
+#define KABI_USE_ARRAY(number, bytes, _new) \
+ __KABI_REPLACE(KABI_RESERVE_ARRAY(number, bytes), _new)
+
+#endif /* __KABI_H__ */
diff --git a/scripts/gendwarfksyms/examples/kabi_ex.c b/scripts/gendwarfksyms/examples/kabi_ex.c
new file mode 100644
index 000000000000..0b7ffd830541
--- /dev/null
+++ b/scripts/gendwarfksyms/examples/kabi_ex.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * kabi_ex.c
+ *
+ * Copyright (C) 2024 Google LLC
+ *
+ * Examples for kABI stability features with --stable. See kabi_ex.h
+ * for details.
+ */
+
+#include "kabi_ex.h"
+
+struct s e0;
+enum e e1;
+
+struct ex0a ex0a;
+struct ex0b ex0b;
+struct ex0c ex0c;
+
+struct ex1a ex1a;
+struct ex1b ex1b;
+struct ex1c ex1c;
+
+struct ex2a ex2a;
+struct ex2b ex2b;
+struct ex2c ex2c;
+
+struct ex3a ex3a;
+struct ex3b ex3b;
+struct ex3c ex3c;
diff --git a/scripts/gendwarfksyms/examples/kabi_ex.h b/scripts/gendwarfksyms/examples/kabi_ex.h
new file mode 100644
index 000000000000..1736e0f65208
--- /dev/null
+++ b/scripts/gendwarfksyms/examples/kabi_ex.h
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * kabi_ex.h
+ *
+ * Copyright (C) 2024 Google LLC
+ *
+ * Examples for kABI stability features with --stable.
+ */
+
+/*
+ * The comments below each example contain the expected gendwarfksyms
+ * output, which can be verified using LLVM's FileCheck tool:
+ *
+ * https://llvm.org/docs/CommandGuide/FileCheck.html
+ *
+ * Usage:
+ *
+ * $ gcc -g -c examples/kabi_ex.c -o examples/kabi_ex.o
+ *
+ * $ nm examples/kabi_ex.o | awk '{ print $NF }' | \
+ * ./gendwarfksyms --stable --dump-dies \
+ * examples/kabi_ex.o 2>&1 >/dev/null | \
+ * FileCheck examples/kabi_ex.h --check-prefix=STABLE
+ */
+
+#ifndef __KABI_EX_H__
+#define __KABI_EX_H__
+
+#include "kabi.h"
+
+/*
+ * Example: kABI rules
+ */
+
+struct s {
+ int a;
+};
+
+KABI_DECLONLY(s);
+
+/*
+ * STABLE: variable structure_type s {
+ * STABLE-NEXT: }
+ */
+
+enum e {
+ A,
+ B,
+ C,
+ D,
+};
+
+KABI_ENUMERATOR_IGNORE(e, B);
+KABI_ENUMERATOR_IGNORE(e, C);
+KABI_ENUMERATOR_VALUE(e, D, 123456789);
+
+/*
+ * STABLE: variable enumeration_type e {
+ * STABLE-NEXT: enumerator A = 0 ,
+ * STABLE-NEXT: enumerator D = 123456789
+ * STABLE-NEXT: } byte_size(4)
+*/
+
+/*
+ * Example: Reserved fields
+ */
+struct ex0a {
+ int a;
+ KABI_RESERVE(0);
+ KABI_RESERVE(1);
+};
+
+/*
+ * STABLE: variable structure_type ex0a {
+ * STABLE-NEXT: member base_type int byte_size(4) encoding(5) a data_member_location(0) ,
+ * STABLE-NEXT: member base_type [[ULONG:long unsigned int|unsigned long]] byte_size(8) encoding(7) data_member_location(8) ,
+ * STABLE-NEXT: member base_type [[ULONG]] byte_size(8) encoding(7) data_member_location(16)
+ * STABLE-NEXT: } byte_size(24)
+ */
+
+struct ex0b {
+ int a;
+ KABI_RESERVE(0);
+ KABI_USE2(1, int b, int c);
+};
+
+/*
+ * STABLE: variable structure_type ex0b {
+ * STABLE-NEXT: member base_type int byte_size(4) encoding(5) a data_member_location(0) ,
+ * STABLE-NEXT: member base_type [[ULONG]] byte_size(8) encoding(7) data_member_location(8) ,
+ * STABLE-NEXT: member base_type [[ULONG]] byte_size(8) encoding(7) data_member_location(16)
+ * STABLE-NEXT: } byte_size(24)
+ */
+
+struct ex0c {
+ int a;
+ KABI_USE(0, void *p);
+ KABI_USE2(1, int b, int c);
+};
+
+/*
+ * STABLE: variable structure_type ex0c {
+ * STABLE-NEXT: member base_type int byte_size(4) encoding(5) a data_member_location(0) ,
+ * STABLE-NEXT: member base_type [[ULONG]] byte_size(8) encoding(7) data_member_location(8) ,
+ * STABLE-NEXT: member base_type [[ULONG]] byte_size(8) encoding(7) data_member_location(16)
+ * STABLE-NEXT: } byte_size(24)
+ */
+
+/*
+ * Example: A reserved array
+ */
+
+struct ex1a {
+ unsigned int a;
+ KABI_RESERVE_ARRAY(0, 64);
+};
+
+/*
+ * STABLE: variable structure_type ex1a {
+ * STABLE-NEXT: member base_type unsigned int byte_size(4) encoding(7) a data_member_location(0) ,
+ * STABLE-NEXT: member array_type[64] {
+ * STABLE-NEXT: base_type unsigned char byte_size(1) encoding(8)
+ * STABLE-NEXT: } data_member_location(8)
+ * STABLE-NEXT: } byte_size(72)
+ */
+
+struct ex1b {
+ unsigned int a;
+ KABI_USE_ARRAY(
+ 0, 64, struct {
+ void *p;
+ KABI_RESERVE_ARRAY(1, 56);
+ });
+};
+
+/*
+ * STABLE: variable structure_type ex1b {
+ * STABLE-NEXT: member base_type unsigned int byte_size(4) encoding(7) a data_member_location(0) ,
+ * STABLE-NEXT: member array_type[64] {
+ * STABLE-NEXT: base_type unsigned char byte_size(1) encoding(8)
+ * STABLE-NEXT: } data_member_location(8)
+ * STABLE-NEXT: } byte_size(72)
+ */
+
+struct ex1c {
+ unsigned int a;
+ KABI_USE_ARRAY(0, 64, void *p[8]);
+};
+
+/*
+ * STABLE: variable structure_type ex1c {
+ * STABLE-NEXT: member base_type unsigned int byte_size(4) encoding(7) a data_member_location(0) ,
+ * STABLE-NEXT: member array_type[64] {
+ * STABLE-NEXT: base_type unsigned char byte_size(1) encoding(8)
+ * STABLE-NEXT: } data_member_location(8)
+ * STABLE-NEXT: } byte_size(72)
+ */
+
+/*
+ * Example: An ignored field added to an alignment hole
+ */
+
+struct ex2a {
+ int a;
+ unsigned long b;
+ int c;
+ unsigned long d;
+};
+
+/*
+ * STABLE: variable structure_type ex2a {
+ * STABLE-NEXT: member base_type int byte_size(4) encoding(5) a data_member_location(0) ,
+ * STABLE-NEXT: member base_type [[ULONG:long unsigned int|unsigned long]] byte_size(8) encoding(7) b data_member_location(8)
+ * STABLE-NEXT: member base_type int byte_size(4) encoding(5) c data_member_location(16) ,
+ * STABLE-NEXT: member base_type [[ULONG]] byte_size(8) encoding(7) d data_member_location(24)
+ * STABLE-NEXT: } byte_size(32)
+ */
+
+struct ex2b {
+ int a;
+ KABI_IGNORE(0, unsigned int n);
+ unsigned long b;
+ int c;
+ unsigned long d;
+};
+
+_Static_assert(sizeof(struct ex2a) == sizeof(struct ex2b), "ex2a size doesn't match ex2b");
+
+/*
+ * STABLE: variable structure_type ex2b {
+ * STABLE-NEXT: member base_type int byte_size(4) encoding(5) a data_member_location(0) ,
+ * STABLE-NEXT: member base_type [[ULONG]] byte_size(8) encoding(7) b data_member_location(8)
+ * STABLE-NEXT: member base_type int byte_size(4) encoding(5) c data_member_location(16) ,
+ * STABLE-NEXT: member base_type [[ULONG]] byte_size(8) encoding(7) d data_member_location(24)
+ * STABLE-NEXT: } byte_size(32)
+ */
+
+struct ex2c {
+ int a;
+ KABI_IGNORE(0, unsigned int n);
+ unsigned long b;
+ int c;
+ KABI_IGNORE(1, unsigned int m);
+ unsigned long d;
+};
+
+_Static_assert(sizeof(struct ex2a) == sizeof(struct ex2c), "ex2a size doesn't match ex2c");
+
+/*
+ * STABLE: variable structure_type ex2c {
+ * STABLE-NEXT: member base_type int byte_size(4) encoding(5) a data_member_location(0) ,
+ * STABLE-NEXT: member base_type [[ULONG]] byte_size(8) encoding(7) b data_member_location(8)
+ * STABLE-NEXT: member base_type int byte_size(4) encoding(5) c data_member_location(16) ,
+ * STABLE-NEXT: member base_type [[ULONG]] byte_size(8) encoding(7) d data_member_location(24)
+ * STABLE-NEXT: } byte_size(32)
+ */
+
+
+/*
+ * Example: A replaced field
+ */
+
+struct ex3a {
+ unsigned long a;
+ unsigned long unused;
+};
+
+/*
+ * STABLE: variable structure_type ex3a {
+ * STABLE-NEXT: member base_type [[ULONG:long unsigned int|unsigned long]] byte_size(8) encoding(7) a data_member_location(0)
+ * STABLE-NEXT: member base_type [[ULONG]] byte_size(8) encoding(7) unused data_member_location(8)
+ * STABLE-NEXT: } byte_size(16)
+ */
+
+struct ex3b {
+ unsigned long a;
+ KABI_REPLACE(unsigned long, unused, unsigned long renamed);
+};
+
+_Static_assert(sizeof(struct ex3a) == sizeof(struct ex3b), "ex3a size doesn't match ex3b");
+
+/*
+ * STABLE: variable structure_type ex3b {
+ * STABLE-NEXT: member base_type [[ULONG]] byte_size(8) encoding(7) a data_member_location(0)
+ * STABLE-NEXT: member base_type [[ULONG]] byte_size(8) encoding(7) unused data_member_location(8)
+ * STABLE-NEXT: } byte_size(16)
+ */
+
+struct ex3c {
+ unsigned long a;
+ KABI_REPLACE(unsigned long, unused, long replaced);
+};
+
+_Static_assert(sizeof(struct ex3a) == sizeof(struct ex3c), "ex3a size doesn't match ex3c");
+
+/*
+ * STABLE: variable structure_type ex3c {
+ * STABLE-NEXT: member base_type [[ULONG]] byte_size(8) encoding(7) a data_member_location(0)
+ * STABLE-NEXT: member base_type [[ULONG]] byte_size(8) encoding(7) unused data_member_location(8)
+ * STABLE-NEXT: } byte_size(16)
+ */
+
+#endif /* __KABI_EX_H__ */
diff --git a/scripts/gendwarfksyms/examples/symbolptr.c b/scripts/gendwarfksyms/examples/symbolptr.c
new file mode 100644
index 000000000000..88bc1bd60da8
--- /dev/null
+++ b/scripts/gendwarfksyms/examples/symbolptr.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Google LLC
+ *
+ * Example for symbol pointers. When compiled with Clang, gendwarfkyms
+ * uses a symbol pointer for `f`.
+ *
+ * $ clang -g -c examples/symbolptr.c -o examples/symbolptr.o
+ * $ echo -e "f\ng\np" | ./gendwarfksyms -d examples/symbolptr.o
+ */
+
+/* Kernel macros for userspace testing. */
+#ifndef __used
+#define __used __attribute__((__used__))
+#endif
+#ifndef __section
+#define __section(section) __attribute__((__section__(section)))
+#endif
+
+#define __GENDWARFKSYMS_EXPORT(sym) \
+ static typeof(sym) *__gendwarfksyms_ptr_##sym __used \
+ __section(".discard.gendwarfksyms") = &sym;
+
+extern void f(unsigned int arg);
+void g(int *arg);
+void g(int *arg) {}
+
+struct s;
+extern struct s *p;
+
+__GENDWARFKSYMS_EXPORT(f);
+__GENDWARFKSYMS_EXPORT(g);
+__GENDWARFKSYMS_EXPORT(p);
diff --git a/scripts/gendwarfksyms/gendwarfksyms.c b/scripts/gendwarfksyms/gendwarfksyms.c
new file mode 100644
index 000000000000..08ae61eb327e
--- /dev/null
+++ b/scripts/gendwarfksyms/gendwarfksyms.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Google LLC
+ */
+
+#include <fcntl.h>
+#include <getopt.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <string.h>
+#include <unistd.h>
+#include "gendwarfksyms.h"
+
+/*
+ * Options
+ */
+
+/* Print debugging information to stderr */
+int debug;
+/* Dump DIE contents */
+int dump_dies;
+/* Print debugging information about die_map changes */
+int dump_die_map;
+/* Print out type strings (i.e. type_map) */
+int dump_types;
+/* Print out expanded type strings used for symbol versions */
+int dump_versions;
+/* Support kABI stability features */
+int stable;
+/* Write a symtypes file */
+int symtypes;
+static const char *symtypes_file;
+
+static void usage(void)
+{
+ fputs("Usage: gendwarfksyms [options] elf-object-file ... < symbol-list\n\n"
+ "Options:\n"
+ " -d, --debug Print debugging information\n"
+ " --dump-dies Dump DWARF DIE contents\n"
+ " --dump-die-map Print debugging information about die_map changes\n"
+ " --dump-types Dump type strings\n"
+ " --dump-versions Dump expanded type strings used for symbol versions\n"
+ " -s, --stable Support kABI stability features\n"
+ " -T, --symtypes file Write a symtypes file\n"
+ " -h, --help Print this message\n"
+ "\n",
+ stderr);
+}
+
+static int process_module(Dwfl_Module *mod, void **userdata, const char *name,
+ Dwarf_Addr base, void *arg)
+{
+ Dwarf_Addr dwbias;
+ Dwarf_Die cudie;
+ Dwarf_CU *cu = NULL;
+ Dwarf *dbg;
+ FILE *symfile = arg;
+ int res;
+
+ debug("%s", name);
+ dbg = dwfl_module_getdwarf(mod, &dwbias);
+
+ /*
+ * Look for exported symbols in each CU, follow the DIE tree, and add
+ * the entries to die_map.
+ */
+ do {
+ res = dwarf_get_units(dbg, cu, &cu, NULL, NULL, &cudie, NULL);
+ if (res < 0)
+ error("dwarf_get_units failed: no debugging information?");
+ if (res == 1)
+ break; /* No more units */
+
+ process_cu(&cudie);
+ } while (cu);
+
+ /*
+ * Use die_map to expand type strings, write them to `symfile`, and
+ * calculate symbol versions.
+ */
+ generate_symtypes_and_versions(symfile);
+ die_map_free();
+
+ return DWARF_CB_OK;
+}
+
+static const Dwfl_Callbacks callbacks = {
+ .section_address = dwfl_offline_section_address,
+ .find_debuginfo = dwfl_standard_find_debuginfo,
+};
+
+int main(int argc, char **argv)
+{
+ FILE *symfile = NULL;
+ unsigned int n;
+ int opt;
+
+ static const struct option opts[] = {
+ { "debug", 0, NULL, 'd' },
+ { "dump-dies", 0, &dump_dies, 1 },
+ { "dump-die-map", 0, &dump_die_map, 1 },
+ { "dump-types", 0, &dump_types, 1 },
+ { "dump-versions", 0, &dump_versions, 1 },
+ { "stable", 0, NULL, 's' },
+ { "symtypes", 1, NULL, 'T' },
+ { "help", 0, NULL, 'h' },
+ { 0, 0, NULL, 0 }
+ };
+
+ while ((opt = getopt_long(argc, argv, "dsT:h", opts, NULL)) != EOF) {
+ switch (opt) {
+ case 0:
+ break;
+ case 'd':
+ debug = 1;
+ break;
+ case 's':
+ stable = 1;
+ break;
+ case 'T':
+ symtypes = 1;
+ symtypes_file = optarg;
+ break;
+ case 'h':
+ usage();
+ return 0;
+ default:
+ usage();
+ return 1;
+ }
+ }
+
+ if (dump_die_map)
+ dump_dies = 1;
+
+ if (optind >= argc) {
+ usage();
+ error("no input files?");
+ }
+
+ symbol_read_exports(stdin);
+
+ if (symtypes_file) {
+ symfile = fopen(symtypes_file, "w");
+ if (!symfile)
+ error("fopen failed for '%s': %s", symtypes_file,
+ strerror(errno));
+ }
+
+ for (n = optind; n < argc; n++) {
+ Dwfl *dwfl;
+ int fd;
+
+ fd = open(argv[n], O_RDONLY);
+ if (fd == -1)
+ error("open failed for '%s': %s", argv[n],
+ strerror(errno));
+
+ symbol_read_symtab(fd);
+ kabi_read_rules(fd);
+
+ dwfl = dwfl_begin(&callbacks);
+ if (!dwfl)
+ error("dwfl_begin failed for '%s': %s", argv[n],
+ dwarf_errmsg(-1));
+
+ if (!dwfl_report_offline(dwfl, argv[n], argv[n], fd))
+ error("dwfl_report_offline failed for '%s': %s",
+ argv[n], dwarf_errmsg(-1));
+
+ dwfl_report_end(dwfl, NULL, NULL);
+
+ if (dwfl_getmodules(dwfl, &process_module, symfile, 0))
+ error("dwfl_getmodules failed for '%s'", argv[n]);
+
+ dwfl_end(dwfl);
+ kabi_free();
+ }
+
+ if (symfile)
+ check(fclose(symfile));
+
+ symbol_print_versions();
+ symbol_free();
+
+ return 0;
+}
diff --git a/scripts/gendwarfksyms/gendwarfksyms.h b/scripts/gendwarfksyms/gendwarfksyms.h
new file mode 100644
index 000000000000..197a1a8123c6
--- /dev/null
+++ b/scripts/gendwarfksyms/gendwarfksyms.h
@@ -0,0 +1,296 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Google LLC
+ */
+
+#include <dwarf.h>
+#include <elfutils/libdw.h>
+#include <elfutils/libdwfl.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include <hash.h>
+#include <hashtable.h>
+#include <xalloc.h>
+
+#ifndef __GENDWARFKSYMS_H
+#define __GENDWARFKSYMS_H
+
+/*
+ * Options -- in gendwarfksyms.c
+ */
+extern int debug;
+extern int dump_dies;
+extern int dump_die_map;
+extern int dump_types;
+extern int dump_versions;
+extern int stable;
+extern int symtypes;
+
+/*
+ * Output helpers
+ */
+#define __PREFIX "gendwarfksyms: "
+#define __println(prefix, format, ...) \
+ fprintf(stderr, prefix __PREFIX "%s: " format "\n", __func__, \
+ ##__VA_ARGS__)
+
+#define debug(format, ...) \
+ do { \
+ if (debug) \
+ __println("", format, ##__VA_ARGS__); \
+ } while (0)
+
+#define warn(format, ...) __println("warning: ", format, ##__VA_ARGS__)
+#define error(format, ...) \
+ do { \
+ __println("error: ", format, ##__VA_ARGS__); \
+ exit(1); \
+ } while (0)
+
+#define __die_debug(color, format, ...) \
+ do { \
+ if (dump_dies && dump_die_map) \
+ fprintf(stderr, \
+ "\033[" #color "m<" format ">\033[39m", \
+ __VA_ARGS__); \
+ } while (0)
+
+#define die_debug_r(format, ...) __die_debug(91, format, __VA_ARGS__)
+#define die_debug_g(format, ...) __die_debug(92, format, __VA_ARGS__)
+#define die_debug_b(format, ...) __die_debug(94, format, __VA_ARGS__)
+
+/*
+ * Error handling helpers
+ */
+#define __check(expr, test) \
+ ({ \
+ int __res = expr; \
+ if (test) \
+ error("`%s` failed: %d", #expr, __res); \
+ __res; \
+ })
+
+/* Error == non-zero values */
+#define check(expr) __check(expr, __res)
+/* Error == negative values */
+#define checkp(expr) __check(expr, __res < 0)
+
+/* Consistent aliases (DW_TAG_<type>_type) for DWARF tags */
+#define DW_TAG_enumerator_type DW_TAG_enumerator
+#define DW_TAG_formal_parameter_type DW_TAG_formal_parameter
+#define DW_TAG_member_type DW_TAG_member
+#define DW_TAG_template_type_parameter_type DW_TAG_template_type_parameter
+#define DW_TAG_typedef_type DW_TAG_typedef
+#define DW_TAG_variant_part_type DW_TAG_variant_part
+#define DW_TAG_variant_type DW_TAG_variant
+
+/*
+ * symbols.c
+ */
+
+/* See symbols.c:is_symbol_ptr */
+#define SYMBOL_PTR_PREFIX "__gendwarfksyms_ptr_"
+#define SYMBOL_PTR_PREFIX_LEN (sizeof(SYMBOL_PTR_PREFIX) - 1)
+
+static inline unsigned int addr_hash(uintptr_t addr)
+{
+ return hash_ptr((const void *)addr);
+}
+
+enum symbol_state {
+ SYMBOL_UNPROCESSED,
+ SYMBOL_MAPPED,
+ SYMBOL_PROCESSED
+};
+
+struct symbol_addr {
+ uint32_t section;
+ Elf64_Addr address;
+};
+
+struct symbol {
+ const char *name;
+ struct symbol_addr addr;
+ struct hlist_node addr_hash;
+ struct hlist_node name_hash;
+ enum symbol_state state;
+ uintptr_t die_addr;
+ uintptr_t ptr_die_addr;
+ unsigned long crc;
+};
+
+typedef void (*symbol_callback_t)(struct symbol *, void *arg);
+
+bool is_symbol_ptr(const char *name);
+void symbol_read_exports(FILE *file);
+void symbol_read_symtab(int fd);
+struct symbol *symbol_get(const char *name);
+void symbol_set_ptr(struct symbol *sym, Dwarf_Die *ptr);
+void symbol_set_die(struct symbol *sym, Dwarf_Die *die);
+void symbol_set_crc(struct symbol *sym, unsigned long crc);
+void symbol_for_each(symbol_callback_t func, void *arg);
+void symbol_print_versions(void);
+void symbol_free(void);
+
+/*
+ * die.c
+ */
+
+enum die_state {
+ DIE_INCOMPLETE,
+ DIE_UNEXPANDED,
+ DIE_COMPLETE,
+ DIE_SYMBOL,
+ DIE_LAST = DIE_SYMBOL
+};
+
+enum die_fragment_type {
+ FRAGMENT_EMPTY,
+ FRAGMENT_STRING,
+ FRAGMENT_LINEBREAK,
+ FRAGMENT_DIE
+};
+
+struct die_fragment {
+ enum die_fragment_type type;
+ union {
+ char *str;
+ int linebreak;
+ uintptr_t addr;
+ } data;
+ struct list_head list;
+};
+
+#define CASE_CONST_TO_STR(name) \
+ case name: \
+ return #name;
+
+static inline const char *die_state_name(enum die_state state)
+{
+ switch (state) {
+ CASE_CONST_TO_STR(DIE_INCOMPLETE)
+ CASE_CONST_TO_STR(DIE_UNEXPANDED)
+ CASE_CONST_TO_STR(DIE_COMPLETE)
+ CASE_CONST_TO_STR(DIE_SYMBOL)
+ }
+
+ error("unexpected die_state: %d", state);
+}
+
+struct die {
+ enum die_state state;
+ bool mapped;
+ char *fqn;
+ int tag;
+ uintptr_t addr;
+ struct list_head fragments;
+ struct hlist_node hash;
+};
+
+typedef void (*die_map_callback_t)(struct die *, void *arg);
+
+int __die_map_get(uintptr_t addr, enum die_state state, struct die **res);
+struct die *die_map_get(Dwarf_Die *die, enum die_state state);
+void die_map_add_string(struct die *pd, const char *str);
+void die_map_add_linebreak(struct die *pd, int linebreak);
+void die_map_for_each(die_map_callback_t func, void *arg);
+void die_map_add_die(struct die *pd, struct die *child);
+void die_map_free(void);
+
+/*
+ * cache.c
+ */
+
+#define CACHE_HASH_BITS 10
+
+/* A cache for addresses we've already seen. */
+struct cache {
+ HASHTABLE_DECLARE(cache, 1 << CACHE_HASH_BITS);
+};
+
+void cache_set(struct cache *cache, unsigned long key, int value);
+int cache_get(struct cache *cache, unsigned long key);
+void cache_init(struct cache *cache);
+void cache_free(struct cache *cache);
+
+static inline void __cache_mark_expanded(struct cache *cache, uintptr_t addr)
+{
+ cache_set(cache, addr, 1);
+}
+
+static inline bool __cache_was_expanded(struct cache *cache, uintptr_t addr)
+{
+ return cache_get(cache, addr) == 1;
+}
+
+static inline void cache_mark_expanded(struct cache *cache, void *addr)
+{
+ __cache_mark_expanded(cache, (uintptr_t)addr);
+}
+
+static inline bool cache_was_expanded(struct cache *cache, void *addr)
+{
+ return __cache_was_expanded(cache, (uintptr_t)addr);
+}
+
+/*
+ * dwarf.c
+ */
+
+struct expansion_state {
+ bool expand;
+ const char *current_fqn;
+};
+
+struct kabi_state {
+ int members;
+ Dwarf_Die placeholder;
+ const char *orig_name;
+};
+
+struct state {
+ struct symbol *sym;
+ Dwarf_Die die;
+
+ /* List expansion */
+ bool first_list_item;
+
+ /* Structure expansion */
+ struct expansion_state expand;
+ struct cache expansion_cache;
+
+ /* Reserved or ignored members */
+ struct kabi_state kabi;
+};
+
+typedef int (*die_callback_t)(struct state *state, struct die *cache,
+ Dwarf_Die *die);
+typedef bool (*die_match_callback_t)(Dwarf_Die *die);
+bool match_all(Dwarf_Die *die);
+
+int process_die_container(struct state *state, struct die *cache,
+ Dwarf_Die *die, die_callback_t func,
+ die_match_callback_t match);
+
+void process_cu(Dwarf_Die *cudie);
+
+/*
+ * types.c
+ */
+
+void generate_symtypes_and_versions(FILE *file);
+
+/*
+ * kabi.c
+ */
+
+bool kabi_is_enumerator_ignored(const char *fqn, const char *field);
+bool kabi_get_enumerator_value(const char *fqn, const char *field,
+ unsigned long *value);
+bool kabi_is_declonly(const char *fqn);
+
+void kabi_read_rules(int fd);
+void kabi_free(void);
+
+#endif /* __GENDWARFKSYMS_H */
diff --git a/scripts/gendwarfksyms/kabi.c b/scripts/gendwarfksyms/kabi.c
new file mode 100644
index 000000000000..66f01fcd1607
--- /dev/null
+++ b/scripts/gendwarfksyms/kabi.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Google LLC
+ */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <stdio.h>
+
+#include "gendwarfksyms.h"
+
+#define KABI_RULE_SECTION ".discard.gendwarfksyms.kabi_rules"
+#define KABI_RULE_VERSION "1"
+
+/*
+ * The rule section consists of four null-terminated strings per
+ * entry:
+ *
+ * 1. version
+ * Entry format version. Must match KABI_RULE_VERSION.
+ *
+ * 2. type
+ * Type of the kABI rule. Must be one of the tags defined below.
+ *
+ * 3. target
+ * Rule-dependent target, typically the fully qualified name of
+ * the target DIE.
+ *
+ * 4. value
+ * Rule-dependent value.
+ */
+#define KABI_RULE_MIN_ENTRY_SIZE \
+ (/* version\0 */ 2 + /* type\0 */ 2 + /* target\0" */ 1 + \
+ /* value\0 */ 1)
+#define KABI_RULE_EMPTY_VALUE ""
+
+/*
+ * Rule: declonly
+ * - For the struct/enum/union in the target field, treat it as a
+ * declaration only even if a definition is available.
+ */
+#define KABI_RULE_TAG_DECLONLY "declonly"
+
+/*
+ * Rule: enumerator_ignore
+ * - For the enum_field in the target field, ignore the enumerator.
+ */
+#define KABI_RULE_TAG_ENUMERATOR_IGNORE "enumerator_ignore"
+
+/*
+ * Rule: enumerator_value
+ * - For the fqn_field in the target field, set the value to the
+ * unsigned integer in the value field.
+ */
+#define KABI_RULE_TAG_ENUMERATOR_VALUE "enumerator_value"
+
+enum kabi_rule_type {
+ KABI_RULE_TYPE_UNKNOWN,
+ KABI_RULE_TYPE_DECLONLY,
+ KABI_RULE_TYPE_ENUMERATOR_IGNORE,
+ KABI_RULE_TYPE_ENUMERATOR_VALUE,
+};
+
+#define RULE_HASH_BITS 7
+
+struct rule {
+ enum kabi_rule_type type;
+ const char *target;
+ const char *value;
+ struct hlist_node hash;
+};
+
+/* { type, target } -> struct rule */
+static HASHTABLE_DEFINE(rules, 1 << RULE_HASH_BITS);
+
+static inline unsigned int rule_values_hash(enum kabi_rule_type type,
+ const char *target)
+{
+ return hash_32(type) ^ hash_str(target);
+}
+
+static inline unsigned int rule_hash(const struct rule *rule)
+{
+ return rule_values_hash(rule->type, rule->target);
+}
+
+static inline const char *get_rule_field(const char **pos, ssize_t *left)
+{
+ const char *start = *pos;
+ size_t len;
+
+ if (*left <= 0)
+ error("unexpected end of kABI rules");
+
+ len = strnlen(start, *left) + 1;
+ *pos += len;
+ *left -= len;
+
+ return start;
+}
+
+void kabi_read_rules(int fd)
+{
+ GElf_Shdr shdr_mem;
+ GElf_Shdr *shdr;
+ Elf_Data *rule_data = NULL;
+ Elf_Scn *scn;
+ Elf *elf;
+ size_t shstrndx;
+ const char *rule_str;
+ ssize_t left;
+ int i;
+
+ const struct {
+ enum kabi_rule_type type;
+ const char *tag;
+ } rule_types[] = {
+ {
+ .type = KABI_RULE_TYPE_DECLONLY,
+ .tag = KABI_RULE_TAG_DECLONLY,
+ },
+ {
+ .type = KABI_RULE_TYPE_ENUMERATOR_IGNORE,
+ .tag = KABI_RULE_TAG_ENUMERATOR_IGNORE,
+ },
+ {
+ .type = KABI_RULE_TYPE_ENUMERATOR_VALUE,
+ .tag = KABI_RULE_TAG_ENUMERATOR_VALUE,
+ },
+ };
+
+ if (!stable)
+ return;
+
+ if (elf_version(EV_CURRENT) != EV_CURRENT)
+ error("elf_version failed: %s", elf_errmsg(-1));
+
+ elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
+ if (!elf)
+ error("elf_begin failed: %s", elf_errmsg(-1));
+
+ if (elf_getshdrstrndx(elf, &shstrndx) < 0)
+ error("elf_getshdrstrndx failed: %s", elf_errmsg(-1));
+
+ scn = elf_nextscn(elf, NULL);
+
+ while (scn) {
+ const char *sname;
+
+ shdr = gelf_getshdr(scn, &shdr_mem);
+ if (!shdr)
+ error("gelf_getshdr failed: %s", elf_errmsg(-1));
+
+ sname = elf_strptr(elf, shstrndx, shdr->sh_name);
+ if (!sname)
+ error("elf_strptr failed: %s", elf_errmsg(-1));
+
+ if (!strcmp(sname, KABI_RULE_SECTION)) {
+ rule_data = elf_getdata(scn, NULL);
+ if (!rule_data)
+ error("elf_getdata failed: %s", elf_errmsg(-1));
+ break;
+ }
+
+ scn = elf_nextscn(elf, scn);
+ }
+
+ if (!rule_data) {
+ debug("kABI rules not found");
+ check(elf_end(elf));
+ return;
+ }
+
+ rule_str = rule_data->d_buf;
+ left = shdr->sh_size;
+
+ if (left < KABI_RULE_MIN_ENTRY_SIZE)
+ error("kABI rule section too small: %zd bytes", left);
+
+ if (rule_str[left - 1] != '\0')
+ error("kABI rules are not null-terminated");
+
+ while (left > KABI_RULE_MIN_ENTRY_SIZE) {
+ enum kabi_rule_type type = KABI_RULE_TYPE_UNKNOWN;
+ const char *field;
+ struct rule *rule;
+
+ /* version */
+ field = get_rule_field(&rule_str, &left);
+
+ if (strcmp(field, KABI_RULE_VERSION))
+ error("unsupported kABI rule version: '%s'", field);
+
+ /* type */
+ field = get_rule_field(&rule_str, &left);
+
+ for (i = 0; i < ARRAY_SIZE(rule_types); i++) {
+ if (!strcmp(field, rule_types[i].tag)) {
+ type = rule_types[i].type;
+ break;
+ }
+ }
+
+ if (type == KABI_RULE_TYPE_UNKNOWN)
+ error("unsupported kABI rule type: '%s'", field);
+
+ rule = xmalloc(sizeof(struct rule));
+
+ rule->type = type;
+ rule->target = xstrdup(get_rule_field(&rule_str, &left));
+ rule->value = xstrdup(get_rule_field(&rule_str, &left));
+
+ hash_add(rules, &rule->hash, rule_hash(rule));
+
+ debug("kABI rule: type: '%s', target: '%s', value: '%s'", field,
+ rule->target, rule->value);
+ }
+
+ if (left > 0)
+ warn("unexpected data at the end of the kABI rules section");
+
+ check(elf_end(elf));
+}
+
+bool kabi_is_declonly(const char *fqn)
+{
+ struct rule *rule;
+
+ if (!stable)
+ return false;
+ if (!fqn || !*fqn)
+ return false;
+
+ hash_for_each_possible(rules, rule, hash,
+ rule_values_hash(KABI_RULE_TYPE_DECLONLY, fqn)) {
+ if (rule->type == KABI_RULE_TYPE_DECLONLY &&
+ !strcmp(fqn, rule->target))
+ return true;
+ }
+
+ return false;
+}
+
+static char *get_enumerator_target(const char *fqn, const char *field)
+{
+ char *target = NULL;
+
+ if (asprintf(&target, "%s %s", fqn, field) < 0)
+ error("asprintf failed for '%s %s'", fqn, field);
+
+ return target;
+}
+
+static unsigned long get_ulong_value(const char *value)
+{
+ unsigned long result = 0;
+ char *endptr = NULL;
+
+ errno = 0;
+ result = strtoul(value, &endptr, 10);
+
+ if (errno || *endptr)
+ error("invalid unsigned value '%s'", value);
+
+ return result;
+}
+
+bool kabi_is_enumerator_ignored(const char *fqn, const char *field)
+{
+ bool match = false;
+ struct rule *rule;
+ char *target;
+
+ if (!stable)
+ return false;
+ if (!fqn || !*fqn || !field || !*field)
+ return false;
+
+ target = get_enumerator_target(fqn, field);
+
+ hash_for_each_possible(
+ rules, rule, hash,
+ rule_values_hash(KABI_RULE_TYPE_ENUMERATOR_IGNORE, target)) {
+ if (rule->type == KABI_RULE_TYPE_ENUMERATOR_IGNORE &&
+ !strcmp(target, rule->target)) {
+ match = true;
+ break;
+ }
+ }
+
+ free(target);
+ return match;
+}
+
+bool kabi_get_enumerator_value(const char *fqn, const char *field,
+ unsigned long *value)
+{
+ bool match = false;
+ struct rule *rule;
+ char *target;
+
+ if (!stable)
+ return false;
+ if (!fqn || !*fqn || !field || !*field)
+ return false;
+
+ target = get_enumerator_target(fqn, field);
+
+ hash_for_each_possible(rules, rule, hash,
+ rule_values_hash(KABI_RULE_TYPE_ENUMERATOR_VALUE,
+ target)) {
+ if (rule->type == KABI_RULE_TYPE_ENUMERATOR_VALUE &&
+ !strcmp(target, rule->target)) {
+ *value = get_ulong_value(rule->value);
+ match = true;
+ break;
+ }
+ }
+
+ free(target);
+ return match;
+}
+
+void kabi_free(void)
+{
+ struct hlist_node *tmp;
+ struct rule *rule;
+
+ hash_for_each_safe(rules, rule, tmp, hash) {
+ free((void *)rule->target);
+ free((void *)rule->value);
+ free(rule);
+ }
+
+ hash_init(rules);
+}
diff --git a/scripts/gendwarfksyms/symbols.c b/scripts/gendwarfksyms/symbols.c
new file mode 100644
index 000000000000..327f87389c34
--- /dev/null
+++ b/scripts/gendwarfksyms/symbols.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Google LLC
+ */
+
+#include "gendwarfksyms.h"
+
+#define SYMBOL_HASH_BITS 12
+
+/* struct symbol_addr -> struct symbol */
+static HASHTABLE_DEFINE(symbol_addrs, 1 << SYMBOL_HASH_BITS);
+/* name -> struct symbol */
+static HASHTABLE_DEFINE(symbol_names, 1 << SYMBOL_HASH_BITS);
+
+static inline unsigned int symbol_addr_hash(const struct symbol_addr *addr)
+{
+ return hash_32(addr->section ^ addr_hash(addr->address));
+}
+
+static unsigned int __for_each_addr(struct symbol *sym, symbol_callback_t func,
+ void *data)
+{
+ struct hlist_node *tmp;
+ struct symbol *match = NULL;
+ unsigned int processed = 0;
+
+ hash_for_each_possible_safe(symbol_addrs, match, tmp, addr_hash,
+ symbol_addr_hash(&sym->addr)) {
+ if (match == sym)
+ continue; /* Already processed */
+
+ if (match->addr.section == sym->addr.section &&
+ match->addr.address == sym->addr.address) {
+ func(match, data);
+ ++processed;
+ }
+ }
+
+ return processed;
+}
+
+/*
+ * For symbols without debugging information (e.g. symbols defined in other
+ * TUs), we also match __gendwarfksyms_ptr_<symbol_name> symbols, which the
+ * kernel uses to ensure type information is present in the TU that exports
+ * the symbol. A __gendwarfksyms_ptr pointer must have the same type as the
+ * exported symbol, e.g.:
+ *
+ * typeof(symname) *__gendwarf_ptr_symname = &symname;
+ */
+bool is_symbol_ptr(const char *name)
+{
+ return name && !strncmp(name, SYMBOL_PTR_PREFIX, SYMBOL_PTR_PREFIX_LEN);
+}
+
+static unsigned int for_each(const char *name, symbol_callback_t func,
+ void *data)
+{
+ struct hlist_node *tmp;
+ struct symbol *match;
+
+ if (!name || !*name)
+ return 0;
+ if (is_symbol_ptr(name))
+ name += SYMBOL_PTR_PREFIX_LEN;
+
+ hash_for_each_possible_safe(symbol_names, match, tmp, name_hash,
+ hash_str(name)) {
+ if (strcmp(match->name, name))
+ continue;
+
+ /* Call func for the match, and all address matches */
+ if (func)
+ func(match, data);
+
+ if (match->addr.section != SHN_UNDEF)
+ return __for_each_addr(match, func, data) + 1;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static void set_crc(struct symbol *sym, void *data)
+{
+ unsigned long *crc = data;
+
+ if (sym->state == SYMBOL_PROCESSED && sym->crc != *crc)
+ warn("overriding version for symbol %s (crc %lx vs. %lx)",
+ sym->name, sym->crc, *crc);
+
+ sym->state = SYMBOL_PROCESSED;
+ sym->crc = *crc;
+}
+
+void symbol_set_crc(struct symbol *sym, unsigned long crc)
+{
+ if (for_each(sym->name, set_crc, &crc) == 0)
+ error("no matching symbols: '%s'", sym->name);
+}
+
+static void set_ptr(struct symbol *sym, void *data)
+{
+ sym->ptr_die_addr = (uintptr_t)((Dwarf_Die *)data)->addr;
+}
+
+void symbol_set_ptr(struct symbol *sym, Dwarf_Die *ptr)
+{
+ if (for_each(sym->name, set_ptr, ptr) == 0)
+ error("no matching symbols: '%s'", sym->name);
+}
+
+static void set_die(struct symbol *sym, void *data)
+{
+ sym->die_addr = (uintptr_t)((Dwarf_Die *)data)->addr;
+ sym->state = SYMBOL_MAPPED;
+}
+
+void symbol_set_die(struct symbol *sym, Dwarf_Die *die)
+{
+ if (for_each(sym->name, set_die, die) == 0)
+ error("no matching symbols: '%s'", sym->name);
+}
+
+static bool is_exported(const char *name)
+{
+ return for_each(name, NULL, NULL) > 0;
+}
+
+void symbol_read_exports(FILE *file)
+{
+ struct symbol *sym;
+ char *line = NULL;
+ char *name = NULL;
+ size_t size = 0;
+ int nsym = 0;
+
+ while (getline(&line, &size, file) > 0) {
+ if (sscanf(line, "%ms\n", &name) != 1)
+ error("malformed input line: %s", line);
+
+ if (is_exported(name)) {
+ /* Ignore duplicates */
+ free(name);
+ continue;
+ }
+
+ sym = xcalloc(1, sizeof(struct symbol));
+ sym->name = name;
+ sym->addr.section = SHN_UNDEF;
+ sym->state = SYMBOL_UNPROCESSED;
+
+ hash_add(symbol_names, &sym->name_hash, hash_str(sym->name));
+ ++nsym;
+
+ debug("%s", sym->name);
+ }
+
+ free(line);
+ debug("%d exported symbols", nsym);
+}
+
+static void get_symbol(struct symbol *sym, void *arg)
+{
+ struct symbol **res = arg;
+
+ if (sym->state == SYMBOL_UNPROCESSED)
+ *res = sym;
+}
+
+struct symbol *symbol_get(const char *name)
+{
+ struct symbol *sym = NULL;
+
+ for_each(name, get_symbol, &sym);
+ return sym;
+}
+
+void symbol_for_each(symbol_callback_t func, void *arg)
+{
+ struct hlist_node *tmp;
+ struct symbol *sym;
+
+ hash_for_each_safe(symbol_names, sym, tmp, name_hash) {
+ func(sym, arg);
+ }
+}
+
+typedef void (*elf_symbol_callback_t)(const char *name, GElf_Sym *sym,
+ Elf32_Word xndx, void *arg);
+
+static void elf_for_each_global(int fd, elf_symbol_callback_t func, void *arg)
+{
+ size_t sym_size;
+ GElf_Shdr shdr_mem;
+ GElf_Shdr *shdr;
+ Elf_Data *xndx_data = NULL;
+ Elf_Scn *scn;
+ Elf *elf;
+
+ if (elf_version(EV_CURRENT) != EV_CURRENT)
+ error("elf_version failed: %s", elf_errmsg(-1));
+
+ elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
+ if (!elf)
+ error("elf_begin failed: %s", elf_errmsg(-1));
+
+ scn = elf_nextscn(elf, NULL);
+
+ while (scn) {
+ shdr = gelf_getshdr(scn, &shdr_mem);
+ if (!shdr)
+ error("gelf_getshdr failed: %s", elf_errmsg(-1));
+
+ if (shdr->sh_type == SHT_SYMTAB_SHNDX) {
+ xndx_data = elf_getdata(scn, NULL);
+ if (!xndx_data)
+ error("elf_getdata failed: %s", elf_errmsg(-1));
+ break;
+ }
+
+ scn = elf_nextscn(elf, scn);
+ }
+
+ sym_size = gelf_fsize(elf, ELF_T_SYM, 1, EV_CURRENT);
+ scn = elf_nextscn(elf, NULL);
+
+ while (scn) {
+ shdr = gelf_getshdr(scn, &shdr_mem);
+ if (!shdr)
+ error("gelf_getshdr failed: %s", elf_errmsg(-1));
+
+ if (shdr->sh_type == SHT_SYMTAB) {
+ unsigned int nsyms;
+ unsigned int n;
+ Elf_Data *data = elf_getdata(scn, NULL);
+
+ if (!data)
+ error("elf_getdata failed: %s", elf_errmsg(-1));
+
+ if (shdr->sh_entsize != sym_size)
+ error("expected sh_entsize (%lu) to be %zu",
+ shdr->sh_entsize, sym_size);
+
+ nsyms = shdr->sh_size / shdr->sh_entsize;
+
+ for (n = 1; n < nsyms; ++n) {
+ const char *name = NULL;
+ Elf32_Word xndx = 0;
+ GElf_Sym sym_mem;
+ GElf_Sym *sym;
+
+ sym = gelf_getsymshndx(data, xndx_data, n,
+ &sym_mem, &xndx);
+ if (!sym)
+ error("gelf_getsymshndx failed: %s",
+ elf_errmsg(-1));
+
+ if (GELF_ST_BIND(sym->st_info) == STB_LOCAL)
+ continue;
+
+ if (sym->st_shndx != SHN_XINDEX)
+ xndx = sym->st_shndx;
+
+ name = elf_strptr(elf, shdr->sh_link,
+ sym->st_name);
+ if (!name)
+ error("elf_strptr failed: %s",
+ elf_errmsg(-1));
+
+ /* Skip empty symbol names */
+ if (*name)
+ func(name, sym, xndx, arg);
+ }
+ }
+
+ scn = elf_nextscn(elf, scn);
+ }
+
+ check(elf_end(elf));
+}
+
+static void set_symbol_addr(struct symbol *sym, void *arg)
+{
+ struct symbol_addr *addr = arg;
+
+ if (sym->addr.section == SHN_UNDEF) {
+ sym->addr = *addr;
+ hash_add(symbol_addrs, &sym->addr_hash,
+ symbol_addr_hash(&sym->addr));
+
+ debug("%s -> { %u, %lx }", sym->name, sym->addr.section,
+ sym->addr.address);
+ } else if (sym->addr.section != addr->section ||
+ sym->addr.address != addr->address) {
+ warn("multiple addresses for symbol %s?", sym->name);
+ }
+}
+
+static void elf_set_symbol_addr(const char *name, GElf_Sym *sym,
+ Elf32_Word xndx, void *arg)
+{
+ struct symbol_addr addr = { .section = xndx, .address = sym->st_value };
+
+ /* Set addresses for exported symbols */
+ if (addr.section != SHN_UNDEF)
+ for_each(name, set_symbol_addr, &addr);
+}
+
+void symbol_read_symtab(int fd)
+{
+ elf_for_each_global(fd, elf_set_symbol_addr, NULL);
+}
+
+void symbol_print_versions(void)
+{
+ struct hlist_node *tmp;
+ struct symbol *sym;
+
+ hash_for_each_safe(symbol_names, sym, tmp, name_hash) {
+ if (sym->state != SYMBOL_PROCESSED)
+ warn("no information for symbol %s", sym->name);
+
+ printf("#SYMVER %s 0x%08lx\n", sym->name, sym->crc);
+ }
+}
+
+void symbol_free(void)
+{
+ struct hlist_node *tmp;
+ struct symbol *sym;
+
+ hash_for_each_safe(symbol_names, sym, tmp, name_hash) {
+ free((void *)sym->name);
+ free(sym);
+ }
+
+ hash_init(symbol_addrs);
+ hash_init(symbol_names);
+}
diff --git a/scripts/gendwarfksyms/types.c b/scripts/gendwarfksyms/types.c
new file mode 100644
index 000000000000..6c03265f4d10
--- /dev/null
+++ b/scripts/gendwarfksyms/types.c
@@ -0,0 +1,481 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Google LLC
+ */
+
+#define _GNU_SOURCE
+#include <inttypes.h>
+#include <stdio.h>
+#include <zlib.h>
+
+#include "gendwarfksyms.h"
+
+static struct cache expansion_cache;
+
+/*
+ * A simple linked list of shared or owned strings to avoid copying strings
+ * around when not necessary.
+ */
+struct type_list_entry {
+ const char *str;
+ void *owned;
+ struct list_head list;
+};
+
+static void type_list_free(struct list_head *list)
+{
+ struct type_list_entry *entry;
+ struct type_list_entry *tmp;
+
+ list_for_each_entry_safe(entry, tmp, list, list) {
+ if (entry->owned)
+ free(entry->owned);
+ free(entry);
+ }
+
+ INIT_LIST_HEAD(list);
+}
+
+static int type_list_append(struct list_head *list, const char *s, void *owned)
+{
+ struct type_list_entry *entry;
+
+ if (!s)
+ return 0;
+
+ entry = xmalloc(sizeof(struct type_list_entry));
+ entry->str = s;
+ entry->owned = owned;
+ list_add_tail(&entry->list, list);
+
+ return strlen(entry->str);
+}
+
+static void type_list_write(struct list_head *list, FILE *file)
+{
+ struct type_list_entry *entry;
+
+ list_for_each_entry(entry, list, list) {
+ if (entry->str)
+ checkp(fputs(entry->str, file));
+ }
+}
+
+/*
+ * An expanded type string in symtypes format.
+ */
+struct type_expansion {
+ char *name;
+ size_t len;
+ struct list_head expanded;
+ struct hlist_node hash;
+};
+
+static void type_expansion_init(struct type_expansion *type)
+{
+ type->name = NULL;
+ type->len = 0;
+ INIT_LIST_HEAD(&type->expanded);
+}
+
+static inline void type_expansion_free(struct type_expansion *type)
+{
+ free(type->name);
+ type->name = NULL;
+ type->len = 0;
+ type_list_free(&type->expanded);
+}
+
+static void type_expansion_append(struct type_expansion *type, const char *s,
+ void *owned)
+{
+ type->len += type_list_append(&type->expanded, s, owned);
+}
+
+/*
+ * type_map -- the longest expansions for each type.
+ *
+ * const char *name -> struct type_expansion *
+ */
+#define TYPE_HASH_BITS 12
+static HASHTABLE_DEFINE(type_map, 1 << TYPE_HASH_BITS);
+
+static int type_map_get(const char *name, struct type_expansion **res)
+{
+ struct type_expansion *e;
+
+ hash_for_each_possible(type_map, e, hash, hash_str(name)) {
+ if (!strcmp(name, e->name)) {
+ *res = e;
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+static void type_map_add(const char *name, struct type_expansion *type)
+{
+ struct type_expansion *e;
+
+ if (type_map_get(name, &e)) {
+ e = xmalloc(sizeof(struct type_expansion));
+ type_expansion_init(e);
+ e->name = xstrdup(name);
+
+ hash_add(type_map, &e->hash, hash_str(e->name));
+
+ if (dump_types)
+ debug("adding %s", e->name);
+ } else {
+ /* Use the longest available expansion */
+ if (type->len <= e->len)
+ return;
+
+ type_list_free(&e->expanded);
+
+ if (dump_types)
+ debug("replacing %s", e->name);
+ }
+
+ /* Take ownership of type->expanded */
+ list_replace_init(&type->expanded, &e->expanded);
+ e->len = type->len;
+
+ if (dump_types) {
+ checkp(fputs(e->name, stderr));
+ checkp(fputs(" ", stderr));
+ type_list_write(&e->expanded, stderr);
+ checkp(fputs("\n", stderr));
+ }
+}
+
+static void type_map_write(FILE *file)
+{
+ struct type_expansion *e;
+ struct hlist_node *tmp;
+
+ if (!file)
+ return;
+
+ hash_for_each_safe(type_map, e, tmp, hash) {
+ checkp(fputs(e->name, file));
+ checkp(fputs(" ", file));
+ type_list_write(&e->expanded, file);
+ checkp(fputs("\n", file));
+ }
+}
+
+static void type_map_free(void)
+{
+ struct type_expansion *e;
+ struct hlist_node *tmp;
+
+ hash_for_each_safe(type_map, e, tmp, hash) {
+ type_expansion_free(e);
+ free(e);
+ }
+
+ hash_init(type_map);
+}
+
+/*
+ * CRC for a type, with an optional fully expanded type string for
+ * debugging.
+ */
+struct version {
+ struct type_expansion type;
+ unsigned long crc;
+};
+
+static void version_init(struct version *version)
+{
+ version->crc = crc32(0, NULL, 0);
+ type_expansion_init(&version->type);
+}
+
+static void version_free(struct version *version)
+{
+ type_expansion_free(&version->type);
+}
+
+static void version_add(struct version *version, const char *s)
+{
+ version->crc = crc32(version->crc, (void *)s, strlen(s));
+ if (dump_versions)
+ type_expansion_append(&version->type, s, NULL);
+}
+
+/*
+ * Type reference format: <prefix>#<name>, where prefix:
+ * s -> structure
+ * u -> union
+ * e -> enum
+ * t -> typedef
+ *
+ * Names with spaces are additionally wrapped in single quotes.
+ */
+static inline bool is_type_prefix(const char *s)
+{
+ return (s[0] == 's' || s[0] == 'u' || s[0] == 'e' || s[0] == 't') &&
+ s[1] == '#';
+}
+
+static char get_type_prefix(int tag)
+{
+ switch (tag) {
+ case DW_TAG_class_type:
+ case DW_TAG_structure_type:
+ return 's';
+ case DW_TAG_union_type:
+ return 'u';
+ case DW_TAG_enumeration_type:
+ return 'e';
+ case DW_TAG_typedef_type:
+ return 't';
+ default:
+ return 0;
+ }
+}
+
+static char *get_type_name(struct die *cache)
+{
+ const char *quote;
+ char prefix;
+ char *name;
+
+ if (cache->state == DIE_INCOMPLETE) {
+ warn("found incomplete cache entry: %p", cache);
+ return NULL;
+ }
+ if (cache->state == DIE_SYMBOL)
+ return NULL;
+ if (!cache->fqn || !*cache->fqn)
+ return NULL;
+
+ prefix = get_type_prefix(cache->tag);
+ if (!prefix)
+ return NULL;
+
+ /* Wrap names with spaces in single quotes */
+ quote = strstr(cache->fqn, " ") ? "'" : "";
+
+ /* <prefix>#<type_name>\0 */
+ if (asprintf(&name, "%c#%s%s%s", prefix, quote, cache->fqn, quote) < 0)
+ error("asprintf failed for '%s'", cache->fqn);
+
+ return name;
+}
+
+static void __calculate_version(struct version *version, struct list_head *list)
+{
+ struct type_list_entry *entry;
+ struct type_expansion *e;
+
+ /* Calculate a CRC over an expanded type string */
+ list_for_each_entry(entry, list, list) {
+ if (is_type_prefix(entry->str)) {
+ check(type_map_get(entry->str, &e));
+
+ /*
+ * It's sufficient to expand each type reference just
+ * once to detect changes.
+ */
+ if (cache_was_expanded(&expansion_cache, e)) {
+ version_add(version, entry->str);
+ } else {
+ cache_mark_expanded(&expansion_cache, e);
+ __calculate_version(version, &e->expanded);
+ }
+ } else {
+ version_add(version, entry->str);
+ }
+ }
+}
+
+static void calculate_version(struct version *version, struct list_head *list)
+{
+ version_init(version);
+ __calculate_version(version, list);
+ cache_free(&expansion_cache);
+}
+
+static void __type_expand(struct die *cache, struct type_expansion *type,
+ bool recursive);
+
+static void type_expand_child(struct die *cache, struct type_expansion *type,
+ bool recursive)
+{
+ struct type_expansion child;
+ char *name;
+
+ name = get_type_name(cache);
+ if (!name) {
+ __type_expand(cache, type, recursive);
+ return;
+ }
+
+ if (recursive && !__cache_was_expanded(&expansion_cache, cache->addr)) {
+ __cache_mark_expanded(&expansion_cache, cache->addr);
+ type_expansion_init(&child);
+ __type_expand(cache, &child, true);
+ type_map_add(name, &child);
+ type_expansion_free(&child);
+ }
+
+ type_expansion_append(type, name, name);
+}
+
+static void __type_expand(struct die *cache, struct type_expansion *type,
+ bool recursive)
+{
+ struct die_fragment *df;
+ struct die *child;
+
+ list_for_each_entry(df, &cache->fragments, list) {
+ switch (df->type) {
+ case FRAGMENT_STRING:
+ type_expansion_append(type, df->data.str, NULL);
+ break;
+ case FRAGMENT_DIE:
+ /* Use a complete die_map expansion if available */
+ if (__die_map_get(df->data.addr, DIE_COMPLETE,
+ &child) &&
+ __die_map_get(df->data.addr, DIE_UNEXPANDED,
+ &child))
+ error("unknown child: %" PRIxPTR,
+ df->data.addr);
+
+ type_expand_child(child, type, recursive);
+ break;
+ case FRAGMENT_LINEBREAK:
+ /*
+ * Keep whitespace in the symtypes format, but avoid
+ * repeated spaces.
+ */
+ if (list_is_last(&df->list, &cache->fragments) ||
+ list_next_entry(df, list)->type !=
+ FRAGMENT_LINEBREAK)
+ type_expansion_append(type, " ", NULL);
+ break;
+ default:
+ error("empty die_fragment in %p", cache);
+ }
+ }
+}
+
+static void type_expand(struct die *cache, struct type_expansion *type,
+ bool recursive)
+{
+ type_expansion_init(type);
+ __type_expand(cache, type, recursive);
+ cache_free(&expansion_cache);
+}
+
+static void expand_type(struct die *cache, void *arg)
+{
+ struct type_expansion type;
+ char *name;
+
+ if (cache->mapped)
+ return;
+
+ cache->mapped = true;
+
+ /*
+ * Skip unexpanded die_map entries if there's a complete
+ * expansion available for this DIE.
+ */
+ if (cache->state == DIE_UNEXPANDED &&
+ !__die_map_get(cache->addr, DIE_COMPLETE, &cache)) {
+ if (cache->mapped)
+ return;
+
+ cache->mapped = true;
+ }
+
+ name = get_type_name(cache);
+ if (!name)
+ return;
+
+ debug("%s", name);
+ type_expand(cache, &type, true);
+ type_map_add(name, &type);
+
+ type_expansion_free(&type);
+ free(name);
+}
+
+static void expand_symbol(struct symbol *sym, void *arg)
+{
+ struct type_expansion type;
+ struct version version;
+ struct die *cache;
+
+ /*
+ * No need to expand again unless we want a symtypes file entry
+ * for the symbol. Note that this means `sym` has the same address
+ * as another symbol that was already processed.
+ */
+ if (!symtypes && sym->state == SYMBOL_PROCESSED)
+ return;
+
+ if (__die_map_get(sym->die_addr, DIE_SYMBOL, &cache))
+ return; /* We'll warn about missing CRCs later. */
+
+ type_expand(cache, &type, false);
+
+ /* If the symbol already has a version, don't calculate it again. */
+ if (sym->state != SYMBOL_PROCESSED) {
+ calculate_version(&version, &type.expanded);
+ symbol_set_crc(sym, version.crc);
+ debug("%s = %lx", sym->name, version.crc);
+
+ if (dump_versions) {
+ checkp(fputs(sym->name, stderr));
+ checkp(fputs(" ", stderr));
+ type_list_write(&version.type.expanded, stderr);
+ checkp(fputs("\n", stderr));
+ }
+
+ version_free(&version);
+ }
+
+ /* These aren't needed in type_map unless we want a symtypes file. */
+ if (symtypes)
+ type_map_add(sym->name, &type);
+
+ type_expansion_free(&type);
+}
+
+void generate_symtypes_and_versions(FILE *file)
+{
+ cache_init(&expansion_cache);
+
+ /*
+ * die_map processing:
+ *
+ * 1. die_map contains all types referenced in exported symbol
+ * signatures, but can contain duplicates just like the original
+ * DWARF, and some references may not be fully expanded depending
+ * on how far we processed the DIE tree for that specific symbol.
+ *
+ * For each die_map entry, find the longest available expansion,
+ * and add it to type_map.
+ */
+ die_map_for_each(expand_type, NULL);
+
+ /*
+ * 2. For each exported symbol, expand the die_map type, and use
+ * type_map expansions to calculate a symbol version from the
+ * fully expanded type string.
+ */
+ symbol_for_each(expand_symbol, NULL);
+
+ /*
+ * 3. If a symtypes file is requested, write type_map contents to
+ * the file.
+ */
+ type_map_write(file);
+ type_map_free();
+}
diff --git a/scripts/generate_rust_target.rs b/scripts/generate_rust_target.rs
index 0d00ac3723b5..4fd6b6ab3e32 100644
--- a/scripts/generate_rust_target.rs
+++ b/scripts/generate_rust_target.rs
@@ -165,6 +165,18 @@ impl KernelConfig {
let option = "CONFIG_".to_owned() + option;
self.0.contains_key(&option)
}
+
+ /// Is the rustc version at least `major.minor.patch`?
+ fn rustc_version_atleast(&self, major: u32, minor: u32, patch: u32) -> bool {
+ let check_version = 100000 * major + 100 * minor + patch;
+ let actual_version = self
+ .0
+ .get("CONFIG_RUSTC_VERSION")
+ .unwrap()
+ .parse::<u32>()
+ .unwrap();
+ check_version <= actual_version
+ }
}
fn main() {
@@ -182,6 +194,9 @@ fn main() {
}
} else if cfg.has("X86_64") {
ts.push("arch", "x86_64");
+ if cfg.rustc_version_atleast(1, 86, 0) {
+ ts.push("rustc-abi", "x86-softfloat");
+ }
ts.push(
"data-layout",
"e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128",
@@ -215,6 +230,9 @@ fn main() {
panic!("32-bit x86 only works under UML");
}
ts.push("arch", "x86");
+ if cfg.rustc_version_atleast(1, 86, 0) {
+ ts.push("rustc-abi", "x86-softfloat");
+ }
ts.push(
"data-layout",
"e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i128:128-f64:32:64-f80:32-n8:16:32-S128",
diff --git a/scripts/genksyms/Makefile b/scripts/genksyms/Makefile
index 312edccda736..4350311fb7b3 100644
--- a/scripts/genksyms/Makefile
+++ b/scripts/genksyms/Makefile
@@ -4,24 +4,6 @@ hostprogs-always-y += genksyms
genksyms-objs := genksyms.o parse.tab.o lex.lex.o
-# FIXME: fix the ambiguous grammar in parse.y and delete this hack
-#
-# Suppress shift/reduce, reduce/reduce conflicts warnings
-# unless W=1 is specified.
-#
-# Just in case, run "$(YACC) --version" without suppressing stderr
-# so that 'bison: not found' will be displayed if it is missing.
-ifeq ($(findstring 1,$(KBUILD_EXTRA_WARN)),)
-
-quiet_cmd_bison_no_warn = $(quiet_cmd_bison)
- cmd_bison_no_warn = $(YACC) --version >/dev/null; \
- $(cmd_bison) 2>/dev/null
-
-$(obj)/pars%.tab.c $(obj)/pars%.tab.h: $(src)/pars%.y FORCE
- $(call if_changed,bison_no_warn)
-
-endif
-
# -I needed for generated C source to include headers in source tree
HOSTCFLAGS_parse.tab.o := -I $(src)
HOSTCFLAGS_lex.lex.o := -I $(src)
diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c
index 07f9b8cfb233..8b0d7ac73dbb 100644
--- a/scripts/genksyms/genksyms.c
+++ b/scripts/genksyms/genksyms.c
@@ -12,18 +12,19 @@
#include <stdio.h>
#include <string.h>
+#include <stdint.h>
#include <stdlib.h>
#include <unistd.h>
#include <assert.h>
#include <stdarg.h>
#include <getopt.h>
+#include <hashtable.h>
+
#include "genksyms.h"
/*----------------------------------------------------------------------*/
-#define HASH_BUCKETS 4096
-
-static struct symbol *symtab[HASH_BUCKETS];
+static HASHTABLE_DEFINE(symbol_hashtable, 1U << 12);
static FILE *debugfile;
int cur_line = 1;
@@ -60,7 +61,7 @@ static void print_type_name(enum symbol_type type, const char *name);
/*----------------------------------------------------------------------*/
-static const unsigned int crctab32[] = {
+static const uint32_t crctab32[] = {
0x00000000U, 0x77073096U, 0xee0e612cU, 0x990951baU, 0x076dc419U,
0x706af48fU, 0xe963a535U, 0x9e6495a3U, 0x0edb8832U, 0x79dcb8a4U,
0xe0d5e91eU, 0x97d2d988U, 0x09b64c2bU, 0x7eb17cbdU, 0xe7b82d07U,
@@ -115,19 +116,19 @@ static const unsigned int crctab32[] = {
0x2d02ef8dU
};
-static unsigned long partial_crc32_one(unsigned char c, unsigned long crc)
+static uint32_t partial_crc32_one(uint8_t c, uint32_t crc)
{
return crctab32[(crc ^ c) & 0xff] ^ (crc >> 8);
}
-static unsigned long partial_crc32(const char *s, unsigned long crc)
+static uint32_t partial_crc32(const char *s, uint32_t crc)
{
while (*s)
crc = partial_crc32_one(*s++, crc);
return crc;
}
-static unsigned long crc32(const char *s)
+static uint32_t crc32(const char *s)
{
return partial_crc32(s, 0xffffffff) ^ 0xffffffff;
}
@@ -151,14 +152,14 @@ static enum symbol_type map_to_ns(enum symbol_type t)
struct symbol *find_symbol(const char *name, enum symbol_type ns, int exact)
{
- unsigned long h = crc32(name) % HASH_BUCKETS;
struct symbol *sym;
- for (sym = symtab[h]; sym; sym = sym->hash_next)
+ hash_for_each_possible(symbol_hashtable, sym, hnode, crc32(name)) {
if (map_to_ns(sym->type) == map_to_ns(ns) &&
strcmp(name, sym->name) == 0 &&
sym->is_declared)
break;
+ }
if (exact && sym && sym->type != ns)
return NULL;
@@ -224,64 +225,56 @@ static struct symbol *__add_symbol(const char *name, enum symbol_type type,
return NULL;
}
- h = crc32(name) % HASH_BUCKETS;
- for (sym = symtab[h]; sym; sym = sym->hash_next) {
- if (map_to_ns(sym->type) == map_to_ns(type) &&
- strcmp(name, sym->name) == 0) {
- if (is_reference)
- /* fall through */ ;
- else if (sym->type == type &&
- equal_list(sym->defn, defn)) {
- if (!sym->is_declared && sym->is_override) {
- print_location();
- print_type_name(type, name);
- fprintf(stderr, " modversion is "
- "unchanged\n");
- }
- sym->is_declared = 1;
- return sym;
- } else if (!sym->is_declared) {
- if (sym->is_override && flag_preserve) {
- print_location();
- fprintf(stderr, "ignoring ");
- print_type_name(type, name);
- fprintf(stderr, " modversion change\n");
- sym->is_declared = 1;
- return sym;
- } else {
- status = is_unknown_symbol(sym) ?
- STATUS_DEFINED : STATUS_MODIFIED;
- }
- } else {
- error_with_pos("redefinition of %s", name);
- return sym;
+ h = crc32(name);
+ hash_for_each_possible(symbol_hashtable, sym, hnode, h) {
+ if (map_to_ns(sym->type) != map_to_ns(type) ||
+ strcmp(name, sym->name))
+ continue;
+
+ if (is_reference) {
+ break;
+ } else if (sym->type == type && equal_list(sym->defn, defn)) {
+ if (!sym->is_declared && sym->is_override) {
+ print_location();
+ print_type_name(type, name);
+ fprintf(stderr, " modversion is unchanged\n");
}
+ sym->is_declared = 1;
+ } else if (sym->is_declared) {
+ error_with_pos("redefinition of %s", name);
+ } else if (sym->is_override && flag_preserve) {
+ print_location();
+ fprintf(stderr, "ignoring ");
+ print_type_name(type, name);
+ fprintf(stderr, " modversion change\n");
+ sym->is_declared = 1;
+ } else {
+ status = is_unknown_symbol(sym) ?
+ STATUS_DEFINED : STATUS_MODIFIED;
break;
}
+ free_list(defn, NULL);
+ return sym;
}
if (sym) {
- struct symbol **psym;
+ hash_del(&sym->hnode);
- for (psym = &symtab[h]; *psym; psym = &(*psym)->hash_next) {
- if (*psym == sym) {
- *psym = sym->hash_next;
- break;
- }
- }
+ free_list(sym->defn, NULL);
+ free(sym->name);
+ free(sym);
--nsyms;
}
sym = xmalloc(sizeof(*sym));
- sym->name = name;
+ sym->name = xstrdup(name);
sym->type = type;
sym->defn = defn;
sym->expansion_trail = NULL;
sym->visited = NULL;
sym->is_extern = is_extern;
- sym->hash_next = symtab[h];
- symtab[h] = sym;
+ hash_add(symbol_hashtable, &sym->hnode, h);
sym->is_declared = !is_reference;
sym->status = status;
@@ -480,7 +473,7 @@ static void read_reference(FILE *f)
defn = def;
def = read_node(f);
}
- subsym = add_reference_symbol(xstrdup(sym->string), sym->tag,
+ subsym = add_reference_symbol(sym->string, sym->tag,
defn, is_extern);
subsym->is_override = is_override;
free_node(sym);
@@ -525,7 +518,7 @@ static void print_list(FILE * f, struct string_list *list)
}
}
-static unsigned long expand_and_crc_sym(struct symbol *sym, unsigned long crc)
+static uint32_t expand_and_crc_sym(struct symbol *sym, uint32_t crc)
{
struct string_list *list = sym->defn;
struct string_list **e, **b;
@@ -632,7 +625,7 @@ static unsigned long expand_and_crc_sym(struct symbol *sym, unsigned long crc)
void export_symbol(const char *name)
{
struct symbol *sym;
- unsigned long crc;
+ uint32_t crc;
int has_changed = 0;
sym = find_symbol(name, SYM_NORMAL, 0);
@@ -680,7 +673,7 @@ void export_symbol(const char *name)
if (flag_dump_defs)
fputs(">\n", debugfile);
- printf("#SYMVER %s 0x%08lx\n", name, crc);
+ printf("#SYMVER %s 0x%08lx\n", name, (unsigned long)crc);
}
/*----------------------------------------------------------------------*/
@@ -832,9 +825,9 @@ int main(int argc, char **argv)
}
if (flag_debug) {
- fprintf(debugfile, "Hash table occupancy %d/%d = %g\n",
- nsyms, HASH_BUCKETS,
- (double)nsyms / (double)HASH_BUCKETS);
+ fprintf(debugfile, "Hash table occupancy %d/%zd = %g\n",
+ nsyms, HASH_SIZE(symbol_hashtable),
+ (double)nsyms / HASH_SIZE(symbol_hashtable));
}
if (dumpfile)
diff --git a/scripts/genksyms/genksyms.h b/scripts/genksyms/genksyms.h
index 21ed2ec2d98c..0c355075f0e6 100644
--- a/scripts/genksyms/genksyms.h
+++ b/scripts/genksyms/genksyms.h
@@ -12,8 +12,11 @@
#ifndef MODUTILS_GENKSYMS_H
#define MODUTILS_GENKSYMS_H 1
+#include <stdbool.h>
#include <stdio.h>
+#include <list_types.h>
+
enum symbol_type {
SYM_NORMAL, SYM_TYPEDEF, SYM_ENUM, SYM_STRUCT, SYM_UNION,
SYM_ENUM_CONST
@@ -31,8 +34,8 @@ struct string_list {
};
struct symbol {
- struct symbol *hash_next;
- const char *name;
+ struct hlist_node hnode;
+ char *name;
enum symbol_type type;
struct string_list *defn;
struct symbol *expansion_trail;
@@ -64,6 +67,8 @@ struct string_list *copy_list_range(struct string_list *start,
int yylex(void);
int yyparse(void);
+extern bool dont_want_type_specifier;
+
void error_with_pos(const char *, ...) __attribute__ ((format(printf, 1, 2)));
/*----------------------------------------------------------------------*/
diff --git a/scripts/genksyms/lex.l b/scripts/genksyms/lex.l
index a4d7495eaf75..22aeb57649d9 100644
--- a/scripts/genksyms/lex.l
+++ b/scripts/genksyms/lex.l
@@ -12,6 +12,7 @@
%{
#include <limits.h>
+#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
@@ -50,6 +51,7 @@ MC_TOKEN ([~%^&*+=|<>/-]=)|(&&)|("||")|(->)|(<<)|(>>)
%%
+u?int(8|16|32|64)x(1|2|4|8|16)_t return BUILTIN_INT_KEYW;
/* Keep track of our location in the original source files. */
^#[ \t]+{INT}[ \t]+\"[^\"\n]+\".*\n return FILENAME;
@@ -113,6 +115,12 @@ MC_TOKEN ([~%^&*+=|<>/-]=)|(&&)|("||")|(->)|(<<)|(>>)
/* The second stage lexer. Here we incorporate knowledge of the state
of the parser to tailor the tokens that are returned. */
+/*
+ * The lexer cannot distinguish whether a typedef'ed string is a TYPE or an
+ * IDENT. We need a hint from the parser to handle this accurately.
+ */
+bool dont_want_type_specifier;
+
int
yylex(void)
{
@@ -207,7 +215,7 @@ repeat:
goto repeat;
}
}
- if (!suppress_type_lookup)
+ if (!suppress_type_lookup && !dont_want_type_specifier)
{
if (find_symbol(yytext, SYM_TYPEDEF, 1))
token = TYPE;
@@ -431,7 +439,12 @@ fini:
if (suppress_type_lookup > 0)
--suppress_type_lookup;
- if (dont_want_brace_phrase > 0)
+
+ /*
+ * __attribute__() can be placed immediately after the 'struct' keyword.
+ * e.g.) struct __attribute__((__packed__)) foo { ... };
+ */
+ if (token != ATTRIBUTE_PHRASE && dont_want_brace_phrase > 0)
--dont_want_brace_phrase;
yylval = &next_node->next;
diff --git a/scripts/genksyms/parse.y b/scripts/genksyms/parse.y
index 8e9b5e69e8f0..ee600a804fa1 100644
--- a/scripts/genksyms/parse.y
+++ b/scripts/genksyms/parse.y
@@ -12,6 +12,7 @@
%{
#include <assert.h>
+#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include "genksyms.h"
@@ -148,32 +149,45 @@ simple_declaration:
current_name = NULL;
}
$$ = $3;
+ dont_want_type_specifier = false;
}
;
init_declarator_list_opt:
- /* empty */ { $$ = NULL; }
- | init_declarator_list
+ /* empty */ { $$ = NULL; }
+ | init_declarator_list { free_list(decl_spec, NULL); $$ = $1; }
;
init_declarator_list:
init_declarator
{ struct string_list *decl = *$1;
*$1 = NULL;
+
+ /* avoid sharing among multiple init_declarators */
+ if (decl_spec)
+ decl_spec = copy_list_range(decl_spec, NULL);
+
add_symbol(current_name,
is_typedef ? SYM_TYPEDEF : SYM_NORMAL, decl, is_extern);
current_name = NULL;
$$ = $1;
+ dont_want_type_specifier = true;
}
- | init_declarator_list ',' init_declarator
- { struct string_list *decl = *$3;
- *$3 = NULL;
+ | init_declarator_list ',' attribute_opt init_declarator
+ { struct string_list *decl = *$4;
+ *$4 = NULL;
free_list(*$2, NULL);
*$2 = decl_spec;
+
+ /* avoid sharing among multiple init_declarators */
+ if (decl_spec)
+ decl_spec = copy_list_range(decl_spec, NULL);
+
add_symbol(current_name,
is_typedef ? SYM_TYPEDEF : SYM_NORMAL, decl, is_extern);
current_name = NULL;
- $$ = $3;
+ $$ = $4;
+ dont_want_type_specifier = true;
}
;
@@ -189,8 +203,9 @@ decl_specifier_seq_opt:
;
decl_specifier_seq:
- decl_specifier { decl_spec = *$1; }
+ attribute_opt decl_specifier { decl_spec = *$2; }
| decl_specifier_seq decl_specifier { decl_spec = *$2; }
+ | decl_specifier_seq ATTRIBUTE_PHRASE { decl_spec = *$2; }
;
decl_specifier:
@@ -200,7 +215,8 @@ decl_specifier:
remove_node($1);
$$ = $1;
}
- | type_specifier
+ | type_specifier { dont_want_type_specifier = true; $$ = $1; }
+ | type_qualifier
;
storage_class_specifier:
@@ -213,24 +229,23 @@ storage_class_specifier:
type_specifier:
simple_type_specifier
- | cvar_qualifier
| TYPEOF_KEYW '(' parameter_declaration ')'
| TYPEOF_PHRASE
/* References to s/u/e's defined elsewhere. Rearrange things
so that it is easier to expand the definition fully later. */
- | STRUCT_KEYW IDENT
- { remove_node($1); (*$2)->tag = SYM_STRUCT; $$ = $2; }
- | UNION_KEYW IDENT
- { remove_node($1); (*$2)->tag = SYM_UNION; $$ = $2; }
+ | STRUCT_KEYW attribute_opt IDENT
+ { remove_node($1); (*$3)->tag = SYM_STRUCT; $$ = $3; }
+ | UNION_KEYW attribute_opt IDENT
+ { remove_node($1); (*$3)->tag = SYM_UNION; $$ = $3; }
| ENUM_KEYW IDENT
{ remove_node($1); (*$2)->tag = SYM_ENUM; $$ = $2; }
/* Full definitions of an s/u/e. Record it. */
- | STRUCT_KEYW IDENT class_body
- { record_compound($1, $2, $3, SYM_STRUCT); $$ = $3; }
- | UNION_KEYW IDENT class_body
- { record_compound($1, $2, $3, SYM_UNION); $$ = $3; }
+ | STRUCT_KEYW attribute_opt IDENT class_body
+ { record_compound($1, $3, $4, SYM_STRUCT); $$ = $4; }
+ | UNION_KEYW attribute_opt IDENT class_body
+ { record_compound($1, $3, $4, SYM_UNION); $$ = $4; }
| ENUM_KEYW IDENT enum_body
{ record_compound($1, $2, $3, SYM_ENUM); $$ = $3; }
/*
@@ -239,8 +254,8 @@ type_specifier:
| ENUM_KEYW enum_body
{ add_symbol(NULL, SYM_ENUM, NULL, 0); $$ = $2; }
/* Anonymous s/u definitions. Nothing needs doing. */
- | STRUCT_KEYW class_body { $$ = $2; }
- | UNION_KEYW class_body { $$ = $2; }
+ | STRUCT_KEYW attribute_opt class_body { $$ = $3; }
+ | UNION_KEYW attribute_opt class_body { $$ = $3; }
;
simple_type_specifier:
@@ -260,22 +275,24 @@ simple_type_specifier:
;
ptr_operator:
- '*' cvar_qualifier_seq_opt
+ '*' type_qualifier_seq_opt
{ $$ = $2 ? $2 : $1; }
;
-cvar_qualifier_seq_opt:
+type_qualifier_seq_opt:
/* empty */ { $$ = NULL; }
- | cvar_qualifier_seq
+ | type_qualifier_seq
;
-cvar_qualifier_seq:
- cvar_qualifier
- | cvar_qualifier_seq cvar_qualifier { $$ = $2; }
+type_qualifier_seq:
+ type_qualifier
+ | ATTRIBUTE_PHRASE
+ | type_qualifier_seq type_qualifier { $$ = $2; }
+ | type_qualifier_seq ATTRIBUTE_PHRASE { $$ = $2; }
;
-cvar_qualifier:
- CONST_KEYW | VOLATILE_KEYW | ATTRIBUTE_PHRASE
+type_qualifier:
+ CONST_KEYW | VOLATILE_KEYW
| RESTRICT_KEYW
{ /* restrict has no effect in prototypes so ignore it */
remove_node($1);
@@ -297,15 +314,7 @@ direct_declarator:
current_name = (*$1)->string;
$$ = $1;
}
- }
- | TYPE
- { if (current_name != NULL) {
- error_with_pos("unexpected second declaration name");
- YYERROR;
- } else {
- current_name = (*$1)->string;
- $$ = $1;
- }
+ dont_want_type_specifier = false;
}
| direct_declarator '(' parameter_declaration_clause ')'
{ $$ = $4; }
@@ -325,16 +334,19 @@ nested_declarator:
;
direct_nested_declarator:
- IDENT
- | TYPE
- | direct_nested_declarator '(' parameter_declaration_clause ')'
+ direct_nested_declarator1
+ | direct_nested_declarator1 '(' parameter_declaration_clause ')'
{ $$ = $4; }
- | direct_nested_declarator '(' error ')'
+ ;
+
+direct_nested_declarator1:
+ IDENT { $$ = $1; dont_want_type_specifier = false; }
+ | direct_nested_declarator1 '(' error ')'
{ $$ = $4; }
- | direct_nested_declarator BRACKET_PHRASE
+ | direct_nested_declarator1 BRACKET_PHRASE
{ $$ = $2; }
- | '(' nested_declarator ')'
- { $$ = $3; }
+ | '(' attribute_opt nested_declarator ')'
+ { $$ = $4; }
| '(' error ')'
{ $$ = $3; }
;
@@ -352,45 +364,57 @@ parameter_declaration_list_opt:
parameter_declaration_list:
parameter_declaration
+ { $$ = $1; dont_want_type_specifier = false; }
| parameter_declaration_list ',' parameter_declaration
- { $$ = $3; }
+ { $$ = $3; dont_want_type_specifier = false; }
;
parameter_declaration:
- decl_specifier_seq m_abstract_declarator
+ decl_specifier_seq abstract_declarator_opt
{ $$ = $2 ? $2 : $1; }
;
-m_abstract_declarator:
- ptr_operator m_abstract_declarator
+abstract_declarator_opt:
+ /* empty */ { $$ = NULL; }
+ | abstract_declarator
+ ;
+
+abstract_declarator:
+ ptr_operator
+ | ptr_operator abstract_declarator
{ $$ = $2 ? $2 : $1; }
- | direct_m_abstract_declarator
+ | direct_abstract_declarator attribute_opt
+ { $$ = $2; dont_want_type_specifier = false; }
;
-direct_m_abstract_declarator:
- /* empty */ { $$ = NULL; }
- | IDENT
+direct_abstract_declarator:
+ direct_abstract_declarator1
+ | direct_abstract_declarator1 open_paren parameter_declaration_clause ')'
+ { $$ = $4; }
+ | open_paren parameter_declaration_clause ')'
+ { $$ = $3; }
+ ;
+
+direct_abstract_declarator1:
+ IDENT
{ /* For version 2 checksums, we don't want to remember
private parameter names. */
remove_node($1);
$$ = $1;
}
- /* This wasn't really a typedef name but an identifier that
- shadows one. */
- | TYPE
- { remove_node($1);
- $$ = $1;
- }
- | direct_m_abstract_declarator '(' parameter_declaration_clause ')'
- { $$ = $4; }
- | direct_m_abstract_declarator '(' error ')'
+ | direct_abstract_declarator1 open_paren error ')'
{ $$ = $4; }
- | direct_m_abstract_declarator BRACKET_PHRASE
+ | direct_abstract_declarator1 BRACKET_PHRASE
{ $$ = $2; }
- | '(' m_abstract_declarator ')'
- { $$ = $3; }
- | '(' error ')'
+ | open_paren attribute_opt abstract_declarator ')'
+ { $$ = $4; }
+ | open_paren error ')'
{ $$ = $3; }
+ | BRACKET_PHRASE
+ ;
+
+open_paren:
+ '(' { $$ = $1; dont_want_type_specifier = false; }
;
function_definition:
@@ -430,9 +454,9 @@ member_specification:
member_declaration:
decl_specifier_seq_opt member_declarator_list_opt ';'
- { $$ = $3; }
+ { $$ = $3; dont_want_type_specifier = false; }
| error ';'
- { $$ = $2; }
+ { $$ = $2; dont_want_type_specifier = false; }
;
member_declarator_list_opt:
@@ -442,7 +466,9 @@ member_declarator_list_opt:
member_declarator_list:
member_declarator
- | member_declarator_list ',' member_declarator { $$ = $3; }
+ { $$ = $1; dont_want_type_specifier = true; }
+ | member_declarator_list ',' member_declarator
+ { $$ = $3; dont_want_type_specifier = true; }
;
member_declarator:
@@ -457,7 +483,7 @@ member_bitfield_declarator:
attribute_opt:
/* empty */ { $$ = NULL; }
- | attribute_opt ATTRIBUTE_PHRASE
+ | attribute_opt ATTRIBUTE_PHRASE { $$ = $2; }
;
enum_body:
@@ -472,12 +498,12 @@ enumerator_list:
enumerator:
IDENT
{
- const char *name = strdup((*$1)->string);
+ const char *name = (*$1)->string;
add_symbol(name, SYM_ENUM_CONST, NULL, 0);
}
| IDENT '=' EXPRESSION_PHRASE
{
- const char *name = strdup((*$1)->string);
+ const char *name = (*$1)->string;
struct string_list *expr = copy_list_range(*$3, *$2);
add_symbol(name, SYM_ENUM_CONST, expr, 0);
}
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index a0a0be38cbdc..fb50bd4f4103 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -105,9 +105,11 @@ configfiles = $(wildcard $(srctree)/kernel/configs/$(1) $(srctree)/arch/$(SRCARC
all-config-fragments = $(call configfiles,*.config)
config-fragments = $(call configfiles,$@)
+cmd_merge_fragments = $(srctree)/scripts/kconfig/merge_config.sh -m $(KCONFIG_CONFIG) $(config-fragments)
+
%.config: $(obj)/conf
$(if $(config-fragments),, $(error $@ fragment does not exists on this architecture))
- $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh -m $(KCONFIG_CONFIG) $(config-fragments)
+ $(call cmd,merge_fragments)
$(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
PHONY += tinyconfig
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 4286d5e7f95d..3b55e7a4131d 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -360,10 +360,12 @@ int conf_read_simple(const char *name, int def)
*p = '\0';
- in = zconf_fopen(env);
+ name = env;
+
+ in = zconf_fopen(name);
if (in) {
conf_message("using defaults found in %s",
- env);
+ name);
goto load;
}
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
index 6c92ef1e16ef..eaa465b0ccf9 100644
--- a/scripts/kconfig/qconf.cc
+++ b/scripts/kconfig/qconf.cc
@@ -1464,8 +1464,8 @@ void ConfigMainWindow::loadConfig(void)
{
QString str;
- str = QFileDialog::getOpenFileName(this, "", configname);
- if (str.isNull())
+ str = QFileDialog::getOpenFileName(this, QString(), configname);
+ if (str.isEmpty())
return;
if (conf_read(str.toLocal8Bit().constData()))
@@ -1491,8 +1491,8 @@ void ConfigMainWindow::saveConfigAs(void)
{
QString str;
- str = QFileDialog::getSaveFileName(this, "", configname);
- if (str.isNull())
+ str = QFileDialog::getSaveFileName(this, QString(), configname);
+ if (str.isEmpty())
return;
if (conf_write(str.toLocal8Bit().constData())) {
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index 89b84bf8e21f..7beb59dec5a0 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -388,6 +388,7 @@ static void sym_warn_unmet_dep(const struct symbol *sym)
" Selected by [m]:\n");
fputs(str_get(&gs), stderr);
+ str_free(&gs);
sym_warnings++;
}
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index d853ddb3b28c..56a077d204cf 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -283,7 +283,11 @@ vmlinux_link vmlinux
# fill in BTF IDs
if is_enabled CONFIG_DEBUG_INFO_BTF; then
info BTFIDS vmlinux
- ${RESOLVE_BTFIDS} vmlinux
+ RESOLVE_BTFIDS_ARGS=""
+ if is_enabled CONFIG_WERROR; then
+ RESOLVE_BTFIDS_ARGS=" --fatal_warnings "
+ fi
+ ${RESOLVE_BTFIDS} ${RESOLVE_BTFIDS_ARGS} vmlinux
fi
mksysmap vmlinux System.map
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
index 9c7b404defbd..d3d00e85edf7 100644
--- a/scripts/mod/devicetable-offsets.c
+++ b/scripts/mod/devicetable-offsets.c
@@ -237,7 +237,6 @@ int main(void)
DEVID(typec_device_id);
DEVID_FIELD(typec_device_id, svid);
- DEVID_FIELD(typec_device_id, mode);
DEVID(tee_client_device_id);
DEVID_FIELD(tee_client_device_id, uuid);
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 19ec72a69e90..00586119a25b 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -1219,17 +1219,12 @@ static void do_tbsvc_entry(struct module *mod, void *symval)
module_alias_printf(mod, true, "tbsvc:%s", alias);
}
-/* Looks like: typec:idNmN */
+/* Looks like: typec:idN */
static void do_typec_entry(struct module *mod, void *symval)
{
- char alias[256] = {};
-
DEF_FIELD(symval, typec_device_id, svid);
- DEF_FIELD(symval, typec_device_id, mode);
-
- ADD(alias, "m", mode != TYPEC_ANY_MODE, mode);
- module_alias_printf(mod, false, "typec:id%04X%s", svid, alias);
+ module_alias_printf(mod, false, "typec:id%04X", svid);
}
/* Looks like: tee:uuid */
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 7ea59dc4926b..36b28987a2f0 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -33,6 +33,10 @@ static bool module_enabled;
static bool modversions;
/* Is CONFIG_MODULE_SRCVERSION_ALL set? */
static bool all_versions;
+/* Is CONFIG_BASIC_MODVERSIONS set? */
+static bool basic_modversions;
+/* Is CONFIG_EXTENDED_MODVERSIONS set? */
+static bool extended_modversions;
/* If we are modposting external module set to 1 */
static bool external_module;
/* Only warn about unresolved symbols */
@@ -503,6 +507,9 @@ static int parse_elf(struct elf_info *info, const char *filename)
info->modinfo_len = sechdrs[i].sh_size;
} else if (!strcmp(secname, ".export_symbol")) {
info->export_symbol_secndx = i;
+ } else if (!strcmp(secname, ".no_trim_symbol")) {
+ info->no_trim_symbol = (void *)hdr + sechdrs[i].sh_offset;
+ info->no_trim_symbol_len = sechdrs[i].sh_size;
}
if (sechdrs[i].sh_type == SHT_SYMTAB) {
@@ -1562,6 +1569,14 @@ static void read_symbols(const char *modname)
/* strip trailing .o */
mod = new_module(modname, strlen(modname) - strlen(".o"));
+ /* save .no_trim_symbol section for later use */
+ if (info.no_trim_symbol_len) {
+ mod->no_trim_symbol = xmalloc(info.no_trim_symbol_len);
+ memcpy(mod->no_trim_symbol, info.no_trim_symbol,
+ info.no_trim_symbol_len);
+ mod->no_trim_symbol_len = info.no_trim_symbol_len;
+ }
+
if (!mod->is_vmlinux) {
license = get_modinfo(&info, "license");
if (!license)
@@ -1724,6 +1739,28 @@ static void handle_white_list_exports(const char *white_list)
free(buf);
}
+/*
+ * Keep symbols recorded in the .no_trim_symbol section. This is necessary to
+ * prevent CONFIG_TRIM_UNUSED_KSYMS from dropping EXPORT_SYMBOL because
+ * symbol_get() relies on the symbol being present in the ksymtab for lookups.
+ */
+static void keep_no_trim_symbols(struct module *mod)
+{
+ unsigned long size = mod->no_trim_symbol_len;
+
+ for (char *s = mod->no_trim_symbol; s; s = next_string(s , &size)) {
+ struct symbol *sym;
+
+ /*
+ * If find_symbol() returns NULL, this symbol is not provided
+ * by any module, and symbol_get() will fail.
+ */
+ sym = find_symbol(s);
+ if (sym)
+ sym->used = true;
+ }
+}
+
static void check_modname_len(struct module *mod)
{
const char *mod_name;
@@ -1806,13 +1843,56 @@ static void add_exported_symbols(struct buffer *buf, struct module *mod)
}
/**
+ * Record CRCs for unresolved symbols, supporting long names
+ */
+static void add_extended_versions(struct buffer *b, struct module *mod)
+{
+ struct symbol *s;
+
+ if (!extended_modversions)
+ return;
+
+ buf_printf(b, "\n");
+ buf_printf(b, "static const u32 ____version_ext_crcs[]\n");
+ buf_printf(b, "__used __section(\"__version_ext_crcs\") = {\n");
+ list_for_each_entry(s, &mod->unresolved_symbols, list) {
+ if (!s->module)
+ continue;
+ if (!s->crc_valid) {
+ warn("\"%s\" [%s.ko] has no CRC!\n",
+ s->name, mod->name);
+ continue;
+ }
+ buf_printf(b, "\t0x%08x,\n", s->crc);
+ }
+ buf_printf(b, "};\n");
+
+ buf_printf(b, "static const char ____version_ext_names[]\n");
+ buf_printf(b, "__used __section(\"__version_ext_names\") =\n");
+ list_for_each_entry(s, &mod->unresolved_symbols, list) {
+ if (!s->module)
+ continue;
+ if (!s->crc_valid)
+ /*
+ * We already warned on this when producing the crc
+ * table.
+ * We need to skip its name too, as the indexes in
+ * both tables need to align.
+ */
+ continue;
+ buf_printf(b, "\t\"%s\\0\"\n", s->name);
+ }
+ buf_printf(b, ";\n");
+}
+
+/**
* Record CRCs for unresolved symbols
**/
static void add_versions(struct buffer *b, struct module *mod)
{
struct symbol *s;
- if (!modversions)
+ if (!basic_modversions)
return;
buf_printf(b, "\n");
@@ -1828,11 +1908,16 @@ static void add_versions(struct buffer *b, struct module *mod)
continue;
}
if (strlen(s->name) >= MODULE_NAME_LEN) {
- error("too long symbol \"%s\" [%s.ko]\n",
- s->name, mod->name);
- break;
+ if (extended_modversions) {
+ /* this symbol will only be in the extended info */
+ continue;
+ } else {
+ error("too long symbol \"%s\" [%s.ko]\n",
+ s->name, mod->name);
+ break;
+ }
}
- buf_printf(b, "\t{ %#8x, \"%s\" },\n",
+ buf_printf(b, "\t{ 0x%08x, \"%s\" },\n",
s->crc, s->name);
}
@@ -1961,6 +2046,7 @@ static void write_mod_c_file(struct module *mod)
add_header(&buf, mod);
add_exported_symbols(&buf, mod);
add_versions(&buf, mod);
+ add_extended_versions(&buf, mod);
add_depends(&buf, mod);
buf_printf(&buf, "\n");
@@ -2126,7 +2212,7 @@ int main(int argc, char **argv)
LIST_HEAD(dump_lists);
struct dump_list *dl, *dl2;
- while ((opt = getopt(argc, argv, "ei:MmnT:to:au:WwENd:")) != -1) {
+ while ((opt = getopt(argc, argv, "ei:MmnT:to:au:WwENd:xb")) != -1) {
switch (opt) {
case 'e':
external_module = true;
@@ -2175,6 +2261,12 @@ int main(int argc, char **argv)
case 'd':
missing_namespace_deps = optarg;
break;
+ case 'b':
+ basic_modversions = true;
+ break;
+ case 'x':
+ extended_modversions = true;
+ break;
default:
exit(1);
}
@@ -2195,6 +2287,8 @@ int main(int argc, char **argv)
read_symbols_from_files(files_source);
list_for_each_entry(mod, &modules, list) {
+ keep_no_trim_symbols(mod);
+
if (mod->dump_file || mod->is_vmlinux)
continue;
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
index ffd0a52a606e..59366f456b76 100644
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -111,6 +111,8 @@ struct module_alias {
*
* @dump_file: path to the .symvers file if loaded from a file
* @aliases: list head for module_aliases
+ * @no_trim_symbol: .no_trim_symbol section data
+ * @no_trim_symbol_len: length of the .no_trim_symbol section
*/
struct module {
struct list_head list;
@@ -128,6 +130,8 @@ struct module {
// Actual imported namespaces
struct list_head imported_namespaces;
struct list_head aliases;
+ char *no_trim_symbol;
+ unsigned int no_trim_symbol_len;
char name[];
};
@@ -141,6 +145,8 @@ struct elf_info {
char *strtab;
char *modinfo;
unsigned int modinfo_len;
+ char *no_trim_symbol;
+ unsigned int no_trim_symbol_len;
/* support for 32bit section numbers */
diff --git a/scripts/module.lds.S b/scripts/module.lds.S
index c2f80f9141d4..450f1088d5fd 100644
--- a/scripts/module.lds.S
+++ b/scripts/module.lds.S
@@ -16,6 +16,7 @@ SECTIONS {
*(.discard)
*(.discard.*)
*(.export_symbol)
+ *(.no_trim_symbol)
}
__ksymtab 0 : ALIGN(8) { *(SORT(___ksymtab+*)) }
diff --git a/scripts/package/PKGBUILD b/scripts/package/PKGBUILD
index dca706617adc..0cf3a55b05e1 100644
--- a/scripts/package/PKGBUILD
+++ b/scripts/package/PKGBUILD
@@ -22,7 +22,6 @@ license=(GPL-2.0-only)
makedepends=(
bc
bison
- cpio
flex
gettext
kmod
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index ad7aba0f268e..3627ca227e5a 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -5,10 +5,12 @@
#
# Simple script to generate a deb package for a Linux kernel. All the
# complexity of what to do with a kernel after it is installed or removed
-# is left to other scripts and packages: they can install scripts in the
-# /etc/kernel/{pre,post}{inst,rm}.d/ directories (or an alternative location
-# specified in KDEB_HOOKDIR) that will be called on package install and
-# removal.
+# is left to other scripts and packages. Scripts can be placed into the
+# preinst, postinst, prerm and postrm directories in /etc/kernel or
+# /usr/share/kernel. A different list of search directories can be given
+# via KDEB_HOOKDIR. Scripts in directories earlier in the list will
+# override scripts of the same name in later directories. The script will
+# be called on package installation and removal.
set -eu
@@ -74,10 +76,8 @@ install_maint_scripts () {
# kernel packages, as well as kernel packages built using make-kpkg.
# make-kpkg sets $INITRD to indicate whether an initramfs is wanted, and
# so do we; recent versions of dracut and initramfs-tools will obey this.
- debhookdir=${KDEB_HOOKDIR:-/etc/kernel}
+ debhookdir=${KDEB_HOOKDIR:-/etc/kernel /usr/share/kernel}
for script in postinst postrm preinst prerm; do
- mkdir -p "${pdir}${debhookdir}/${script}.d"
-
mkdir -p "${pdir}/DEBIAN"
cat <<-EOF > "${pdir}/DEBIAN/${script}"
#!/bin/sh
@@ -90,7 +90,15 @@ install_maint_scripts () {
# Tell initramfs builder whether it's wanted
export INITRD=$(if_enabled_echo CONFIG_BLK_DEV_INITRD Yes No)
- test -d ${debhookdir}/${script}.d && run-parts --arg="${KERNELRELEASE}" --arg="/${installed_image_path}" ${debhookdir}/${script}.d
+ # run-parts will error out if one of its directory arguments does not
+ # exist, so filter the list of hook directories accordingly.
+ hookdirs=
+ for dir in ${debhookdir}; do
+ test -d "\$dir/${script}.d" || continue
+ hookdirs="\$hookdirs \$dir/${script}.d"
+ done
+ hookdirs="\${hookdirs# }"
+ test -n "\$hookdirs" && run-parts --arg="${KERNELRELEASE}" --arg="/${installed_image_path}" \$hookdirs
exit 0
EOF
chmod 755 "${pdir}/DEBIAN/${script}"
diff --git a/scripts/package/install-extmod-build b/scripts/package/install-extmod-build
index d3c5b104c063..b724626ea0ca 100755
--- a/scripts/package/install-extmod-build
+++ b/scripts/package/install-extmod-build
@@ -49,17 +49,10 @@ mkdir -p "${destdir}"
# This caters to host programs that participate in Kbuild. objtool and
# resolve_btfids are out of scope.
if [ "${CC}" != "${HOSTCC}" ]; then
- echo "Rebuilding host programs with ${CC}..."
-
- # This leverages external module building.
- # - Clear sub_make_done to allow the top-level Makefile to redo sub-make.
- # - Filter out --no-print-directory to print "Entering directory" logs
- # when Make changes the working directory.
- unset sub_make_done
- MAKEFLAGS=$(echo "${MAKEFLAGS}" | sed s/--no-print-directory//)
-
- cat <<-'EOF' > "${destdir}/Kbuild"
- subdir-y := scripts
+ cat "${destdir}/scripts/Makefile" - <<-'EOF' > "${destdir}/scripts/Kbuild"
+ subdir-y += basic
+ hostprogs-always-y += mod/modpost
+ mod/modpost-objs := $(addprefix mod/, modpost.o file2alias.o sumversion.o symsearch.o)
EOF
# HOSTCXX is not overridden. The C++ compiler is used to build:
@@ -67,20 +60,12 @@ if [ "${CC}" != "${HOSTCC}" ]; then
# - GCC plugins, which will not work on the installed system even after
# being rebuilt.
#
- # Use the single-target build to avoid the modpost invocation, which
- # would overwrite Module.symvers.
- "${MAKE}" HOSTCC="${CC}" KBUILD_OUTPUT=. KBUILD_EXTMOD="${destdir}" scripts/
-
- cat <<-'EOF' > "${destdir}/scripts/Kbuild"
- subdir-y := basic
- hostprogs-always-y := mod/modpost
- mod/modpost-objs := $(addprefix mod/, modpost.o file2alias.o sumversion.o symsearch.o)
- EOF
-
- # Run once again to rebuild scripts/basic/ and scripts/mod/modpost.
- "${MAKE}" HOSTCC="${CC}" KBUILD_OUTPUT=. KBUILD_EXTMOD="${destdir}" scripts/
+ # Clear VPATH and srcroot because the source files reside in the output
+ # directory.
+ # shellcheck disable=SC2016 # $(MAKE), $(CC), and $(build) will be expanded by Make
+ "${MAKE}" run-command KBUILD_RUN_COMMAND='+$(MAKE) HOSTCC="$(CC)" VPATH= srcroot=. $(build)='"${destdir}"/scripts
- rm -f "${destdir}/Kbuild" "${destdir}/scripts/Kbuild"
+ rm -f "${destdir}/scripts/Kbuild"
fi
find "${destdir}" \( -name '.*.cmd' -o -name '*.o' \) -delete
diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian
index b038a1380b8a..b6dd98ca860b 100755
--- a/scripts/package/mkdebian
+++ b/scripts/package/mkdebian
@@ -205,7 +205,7 @@ Priority: optional
Maintainer: $maintainer
Rules-Requires-Root: no
Build-Depends: debhelper-compat (= 12)
-Build-Depends-Arch: bc, bison, cpio, flex,
+Build-Depends-Arch: bc, bison, flex,
gcc-${host_gnu} <!pkg.${sourcename}.nokernelheaders>,
kmod, libelf-dev:native,
libssl-dev:native, libssl-dev <!pkg.${sourcename}.nokernelheaders>,
diff --git a/scripts/spdxcheck.py b/scripts/spdxcheck.py
index 8b8fb115fc81..8d608f61bf37 100755
--- a/scripts/spdxcheck.py
+++ b/scripts/spdxcheck.py
@@ -214,9 +214,15 @@ class id_parser(object):
# Remove trailing xml comment closure
if line.strip().endswith('-->'):
expr = expr.rstrip('-->').strip()
+ # Remove trailing Jinja2 comment closure
+ if line.strip().endswith('#}'):
+ expr = expr.rstrip('#}').strip()
# Special case for SH magic boot code files
if line.startswith('LIST \"'):
expr = expr.rstrip('\"').strip()
+ # Remove j2 comment closure
+ if line.startswith('{#'):
+ expr = expr.rstrip('#}').strip()
self.parse(expr)
self.spdx_valid += 1
#
diff --git a/scripts/spelling.txt b/scripts/spelling.txt
index 05bd9ca1fbfa..a290db720b0f 100644
--- a/scripts/spelling.txt
+++ b/scripts/spelling.txt
@@ -222,6 +222,7 @@ autonymous||autonomous
auxillary||auxiliary
auxilliary||auxiliary
avaiable||available
+avaialable||available
avaible||available
availabe||available
availabled||available
@@ -267,6 +268,7 @@ broadcase||broadcast
broadcat||broadcast
bufer||buffer
bufferred||buffered
+bufferur||buffer
bufufer||buffer
cacluated||calculated
caculate||calculate
@@ -405,6 +407,7 @@ configutation||configuration
congiuration||configuration
conider||consider
conjuction||conjunction
+connction||connection
connecetd||connected
connectinos||connections
connetor||connector
@@ -413,6 +416,7 @@ connnections||connections
consistancy||consistency
consistant||consistent
consits||consists
+constructred||constructed
containes||contains
containts||contains
contaisn||contains
@@ -450,6 +454,7 @@ creationg||creating
cryptocraphic||cryptographic
cummulative||cumulative
cunter||counter
+curent||current
curently||currently
cylic||cyclic
dafault||default
@@ -461,6 +466,7 @@ decendant||descendant
decendants||descendants
decompres||decompress
decsribed||described
+decrese||decrease
decription||description
detault||default
dectected||detected
@@ -485,6 +491,7 @@ delare||declare
delares||declares
delaring||declaring
delemiter||delimiter
+deley||delay
delibrately||deliberately
delievered||delivered
demodualtor||demodulator
@@ -551,6 +558,7 @@ disgest||digest
disired||desired
dispalying||displaying
dissable||disable
+dissapeared||disappeared
diplay||display
directon||direction
direcly||directly
@@ -606,6 +614,7 @@ eigth||eight
elementry||elementary
eletronic||electronic
embeded||embedded
+emtpy||empty
enabledi||enabled
enbale||enable
enble||enable
@@ -669,6 +678,7 @@ exmaple||example
expecially||especially
experies||expires
explicite||explicit
+explicity||explicitly
explicitely||explicitly
explict||explicit
explictely||explicitly
@@ -723,10 +733,12 @@ followign||following
followings||following
follwing||following
fonud||found
+forcebly||forcibly
forseeable||foreseeable
forse||force
fortan||fortran
forwardig||forwarding
+forwared||forwarded
frambuffer||framebuffer
framming||framing
framwork||framework
@@ -767,6 +779,7 @@ grahpical||graphical
granularty||granularity
grapic||graphic
grranted||granted
+grups||groups
guage||gauge
guarenteed||guaranteed
guarentee||guarantee
@@ -780,6 +793,7 @@ hardare||hardware
harware||hardware
hardward||hardware
havind||having
+heigth||height
heirarchically||hierarchically
heirarchy||hierarchy
heirachy||hierarchy
@@ -788,9 +802,11 @@ hearbeat||heartbeat
heterogenous||heterogeneous
hexdecimal||hexadecimal
hybernate||hibernate
+hiearchy||hierarchy
hierachy||hierarchy
hierarchie||hierarchy
homogenous||homogeneous
+horizental||horizontal
howver||however
hsould||should
hypervior||hypervisor
@@ -842,6 +858,7 @@ independed||independent
indiate||indicate
indicat||indicate
inexpect||inexpected
+infalte||inflate
inferface||interface
infinit||infinite
infomation||information
@@ -861,6 +878,7 @@ initators||initiators
initialiazation||initialization
initializationg||initialization
initializiation||initialization
+initializtion||initialization
initialze||initialize
initialzed||initialized
initialzing||initializing
@@ -877,6 +895,7 @@ instanciate||instantiate
instanciated||instantiated
instuments||instruments
insufficent||insufficient
+intead||instead
inteface||interface
integreated||integrated
integrety||integrity
@@ -1081,6 +1100,7 @@ notications||notifications
notifcations||notifications
notifed||notified
notity||notify
+notfify||notify
nubmer||number
numebr||number
numer||number
@@ -1122,6 +1142,7 @@ orientatied||orientated
orientied||oriented
orignal||original
originial||original
+orphanded||orphaned
otherise||otherwise
ouput||output
oustanding||outstanding
@@ -1184,9 +1205,11 @@ peroid||period
persistance||persistence
persistant||persistent
phoneticly||phonetically
+pipline||pipeline
plaform||platform
plalform||platform
platfoem||platform
+platfomr||platform
platfrom||platform
plattform||platform
pleaes||please
@@ -1211,6 +1234,7 @@ preceeding||preceding
preceed||precede
precendence||precedence
precission||precision
+predicition||prediction
preemptable||preemptible
prefered||preferred
prefferably||preferably
@@ -1289,6 +1313,7 @@ querrying||querying
queus||queues
randomally||randomly
raoming||roaming
+readyness||readiness
reasearcher||researcher
reasearchers||researchers
reasearch||research
@@ -1305,8 +1330,10 @@ recieves||receives
recieving||receiving
recogniced||recognised
recognizeable||recognizable
+recompte||recompute
recommanded||recommended
recyle||recycle
+redect||reject
redircet||redirect
redirectrion||redirection
redundacy||redundancy
@@ -1314,6 +1341,7 @@ reename||rename
refcounf||refcount
refence||reference
refered||referred
+referencce||reference
referenace||reference
refererence||reference
refering||referring
@@ -1348,11 +1376,13 @@ replys||replies
reponse||response
representaion||representation
repsonse||response
+reqested||requested
reqeust||request
reqister||register
requed||requeued
requestied||requested
requiere||require
+requieres||requires
requirment||requirement
requred||required
requried||required
@@ -1440,6 +1470,7 @@ sequencial||sequential
serivce||service
serveral||several
servive||service
+sesion||session
setts||sets
settting||setting
shapshot||snapshot
@@ -1602,11 +1633,13 @@ trys||tries
thses||these
tiggers||triggers
tiggered||triggered
+tiggerring||triggering
tipically||typically
timeing||timing
timming||timing
timout||timeout
tmis||this
+tolarance||tolerance
toogle||toggle
torerable||tolerable
torlence||tolerance
@@ -1633,6 +1666,7 @@ trasfer||transfer
trasmission||transmission
trasmitter||transmitter
treshold||threshold
+trigged||triggered
triggerd||triggered
trigerred||triggered
trigerring||triggering
@@ -1648,6 +1682,7 @@ uknown||unknown
usccess||success
uncommited||uncommitted
uncompatible||incompatible
+uncomressed||uncompressed
unconditionaly||unconditionally
undeflow||underflow
undelying||underlying
@@ -1715,6 +1750,7 @@ utitity||utility
utitlty||utility
vaid||valid
vaild||valid
+validationg||validating
valide||valid
variantions||variations
varible||variable
@@ -1724,6 +1760,7 @@ verbse||verbose
veify||verify
verfication||verification
veriosn||version
+versoin||version
verisons||versions
verison||version
veritical||vertical
diff --git a/scripts/tags.sh b/scripts/tags.sh
index 7939aea731f1..45eaf35f5bff 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -188,6 +188,7 @@ regex_c=(
'/^PCI_OP_WRITE([[:space:]]*\(\w*\).*[1-4])/pci_bus_write_config_\1/'
'/\<DEFINE_\(RT_MUTEX\|MUTEX\|SEMAPHORE\|SPINLOCK\)([[:space:]]*\([[:alnum:]_]*\)/\2/v/'
'/\<DEFINE_\(RAW_SPINLOCK\|RWLOCK\|SEQLOCK\)([[:space:]]*\([[:alnum:]_]*\)/\2/v/'
+ '/\<DEFINE_TIMER(\([^,)]*\),/\1/'
'/\<DECLARE_\(RWSEM\|COMPLETION\)([[:space:]]*\([[:alnum:]_]\+\)/\2/v/'
'/\<DECLARE_BITMAP([[:space:]]*\([[:alnum:]_]\+\)/\1/v/'
'/\(^\|\s\)\(\|L\|H\)LIST_HEAD([[:space:]]*\([[:alnum:]_]*\)/\3/v/'
@@ -267,7 +268,8 @@ exuberant()
# identifiers to ignore by ctags
local ign=(
ACPI_EXPORT_SYMBOL
- DEFINE_{TRACE,MUTEX}
+ DECLARE_BITMAP
+ DEFINE_{TRACE,MUTEX,TIMER}
EXPORT_SYMBOL EXPORT_SYMBOL_GPL
EXPORT_TRACEPOINT_SYMBOL EXPORT_TRACEPOINT_SYMBOL_GPL
____cacheline_aligned ____cacheline_aligned_in_smp
diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
index c9d5ca3d8d08..b56e001e0c6a 100644
--- a/security/Kconfig.hardening
+++ b/security/Kconfig.hardening
@@ -127,6 +127,7 @@ choice
repeating for all types and padding except float and double
which use 0xFF repeating (-NaN). Clang on 32-bit uses 0xFF
repeating for all types and padding.
+ GCC uses 0xFE repeating for all types, and zero for padding.
config INIT_STACK_ALL_ZERO
bool "zero-init everything (strongest and safest)"
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 1edc12862a7d..9b6c2f157f83 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -2038,7 +2038,7 @@ static int apparmor_dointvec(const struct ctl_table *table, int write,
return proc_dointvec(table, write, buffer, lenp, ppos);
}
-static struct ctl_table apparmor_sysctl_table[] = {
+static const struct ctl_table apparmor_sysctl_table[] = {
#ifdef CONFIG_USER_NS
{
.procname = "unprivileged_userns_apparmor_policy",
diff --git a/security/bpf/hooks.c b/security/bpf/hooks.c
index 3663aec7bcbd..db759025abe1 100644
--- a/security/bpf/hooks.c
+++ b/security/bpf/hooks.c
@@ -13,7 +13,6 @@ static struct security_hook_list bpf_lsm_hooks[] __ro_after_init = {
#include <linux/lsm_hook_defs.h>
#undef LSM_HOOK
LSM_HOOK_INIT(inode_free_security, bpf_inode_storage_free),
- LSM_HOOK_INIT(task_free, bpf_task_storage_free),
};
static const struct lsm_id bpf_lsmid = {
diff --git a/security/commoncap.c b/security/commoncap.c
index cefad323a0b1..58a0c1c3e409 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -27,6 +27,9 @@
#include <linux/mnt_idmapping.h>
#include <uapi/linux/lsm.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/capability.h>
+
/*
* If a non-root user executes a setuid-root binary in
* !secure(SECURE_NOROOT) mode, then we raise capabilities.
@@ -50,24 +53,24 @@ static void warn_setuid_and_fcaps_mixed(const char *fname)
}
/**
- * cap_capable - Determine whether a task has a particular effective capability
+ * cap_capable_helper - Determine whether a task has a particular effective
+ * capability.
* @cred: The credentials to use
- * @targ_ns: The user namespace in which we need the capability
+ * @target_ns: The user namespace of the resource being accessed
+ * @cred_ns: The user namespace of the credentials
* @cap: The capability to check for
- * @opts: Bitmask of options defined in include/linux/security.h
*
* Determine whether the nominated task has the specified capability amongst
* its effective set, returning 0 if it does, -ve if it does not.
*
- * NOTE WELL: cap_has_capability() cannot be used like the kernel's capable()
- * and has_capability() functions. That is, it has the reverse semantics:
- * cap_has_capability() returns 0 when a task has a capability, but the
- * kernel's capable() and has_capability() returns 1 for this case.
+ * See cap_capable for more details.
*/
-int cap_capable(const struct cred *cred, struct user_namespace *targ_ns,
- int cap, unsigned int opts)
+static inline int cap_capable_helper(const struct cred *cred,
+ struct user_namespace *target_ns,
+ const struct user_namespace *cred_ns,
+ int cap)
{
- struct user_namespace *ns = targ_ns;
+ struct user_namespace *ns = target_ns;
/* See if cred has the capability in the target user namespace
* by examining the target user namespace and all of the target
@@ -75,21 +78,21 @@ int cap_capable(const struct cred *cred, struct user_namespace *targ_ns,
*/
for (;;) {
/* Do we have the necessary capabilities? */
- if (ns == cred->user_ns)
+ if (likely(ns == cred_ns))
return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM;
/*
* If we're already at a lower level than we're looking for,
* we're done searching.
*/
- if (ns->level <= cred->user_ns->level)
+ if (ns->level <= cred_ns->level)
return -EPERM;
/*
* The owner of the user namespace in the parent of the
* user namespace has all caps.
*/
- if ((ns->parent == cred->user_ns) && uid_eq(ns->owner, cred->euid))
+ if ((ns->parent == cred_ns) && uid_eq(ns->owner, cred->euid))
return 0;
/*
@@ -103,6 +106,31 @@ int cap_capable(const struct cred *cred, struct user_namespace *targ_ns,
}
/**
+ * cap_capable - Determine whether a task has a particular effective capability
+ * @cred: The credentials to use
+ * @target_ns: The user namespace of the resource being accessed
+ * @cap: The capability to check for
+ * @opts: Bitmask of options defined in include/linux/security.h (unused)
+ *
+ * Determine whether the nominated task has the specified capability amongst
+ * its effective set, returning 0 if it does, -ve if it does not.
+ *
+ * NOTE WELL: cap_has_capability() cannot be used like the kernel's capable()
+ * and has_capability() functions. That is, it has the reverse semantics:
+ * cap_has_capability() returns 0 when a task has a capability, but the
+ * kernel's capable() and has_capability() returns 1 for this case.
+ */
+int cap_capable(const struct cred *cred, struct user_namespace *target_ns,
+ int cap, unsigned int opts)
+{
+ const struct user_namespace *cred_ns = cred->user_ns;
+ int ret = cap_capable_helper(cred, target_ns, cred_ns, cap);
+
+ trace_cap_capable(cred, target_ns, cred_ns, cap, ret);
+ return ret;
+}
+
+/**
* cap_settime - Determine whether the current process may set the system clock
* @ts: The time to set
* @tz: The timezone to set
@@ -1302,21 +1330,38 @@ int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
& (old->securebits ^ arg2)) /*[1]*/
|| ((old->securebits & SECURE_ALL_LOCKS & ~arg2)) /*[2]*/
|| (arg2 & ~(SECURE_ALL_LOCKS | SECURE_ALL_BITS)) /*[3]*/
- || (cap_capable(current_cred(),
- current_cred()->user_ns,
- CAP_SETPCAP,
- CAP_OPT_NONE) != 0) /*[4]*/
/*
* [1] no changing of bits that are locked
* [2] no unlocking of locks
* [3] no setting of unsupported bits
- * [4] doing anything requires privilege (go read about
- * the "sendmail capabilities bug")
*/
)
/* cannot change a locked bit */
return -EPERM;
+ /*
+ * Doing anything requires privilege (go read about the
+ * "sendmail capabilities bug"), except for unprivileged bits.
+ * Indeed, the SECURE_ALL_UNPRIVILEGED bits are not
+ * restrictions enforced by the kernel but by user space on
+ * itself.
+ */
+ if (cap_capable(current_cred(), current_cred()->user_ns,
+ CAP_SETPCAP, CAP_OPT_NONE) != 0) {
+ const unsigned long unpriv_and_locks =
+ SECURE_ALL_UNPRIVILEGED |
+ SECURE_ALL_UNPRIVILEGED << 1;
+ const unsigned long changed = old->securebits ^ arg2;
+
+ /* For legacy reason, denies non-change. */
+ if (!changed)
+ return -EPERM;
+
+ /* Denies privileged changes. */
+ if (changed & ~unpriv_and_locks)
+ return -EPERM;
+ }
+
new = prepare_creds();
if (!new)
return -ENOMEM;
@@ -1428,12 +1473,6 @@ int cap_mmap_addr(unsigned long addr)
return ret;
}
-int cap_mmap_file(struct file *file, unsigned long reqprot,
- unsigned long prot, unsigned long flags)
-{
- return 0;
-}
-
#ifdef CONFIG_SECURITY
static const struct lsm_id capability_lsmid = {
@@ -1453,7 +1492,6 @@ static struct security_hook_list capability_hooks[] __ro_after_init = {
LSM_HOOK_INIT(inode_killpriv, cap_inode_killpriv),
LSM_HOOK_INIT(inode_getsecurity, cap_inode_getsecurity),
LSM_HOOK_INIT(mmap_addr, cap_mmap_addr),
- LSM_HOOK_INIT(mmap_file, cap_mmap_file),
LSM_HOOK_INIT(task_fix_setuid, cap_task_fix_setuid),
LSM_HOOK_INIT(task_prctl, cap_task_prctl),
LSM_HOOK_INIT(task_setscheduler, cap_task_setscheduler),
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 884a3533f7af..f435eff4667f 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/file.h>
+#include <linux/binfmts.h>
#include <linux/fs.h>
#include <linux/xattr.h>
#include <linux/magic.h>
@@ -469,6 +470,17 @@ int ima_check_blacklist(struct ima_iint_cache *iint,
return rc;
}
+static bool is_bprm_creds_for_exec(enum ima_hooks func, struct file *file)
+{
+ struct linux_binprm *bprm;
+
+ if (func == BPRM_CHECK) {
+ bprm = container_of(&file, struct linux_binprm, file);
+ return bprm->is_check;
+ }
+ return false;
+}
+
/*
* ima_appraise_measurement - appraise file measurement
*
@@ -483,6 +495,7 @@ int ima_appraise_measurement(enum ima_hooks func, struct ima_iint_cache *iint,
int xattr_len, const struct modsig *modsig)
{
static const char op[] = "appraise_data";
+ int audit_msgno = AUDIT_INTEGRITY_DATA;
const char *cause = "unknown";
struct dentry *dentry = file_dentry(file);
struct inode *inode = d_backing_inode(dentry);
@@ -494,6 +507,16 @@ int ima_appraise_measurement(enum ima_hooks func, struct ima_iint_cache *iint,
if (!(inode->i_opflags & IOP_XATTR) && !try_modsig)
return INTEGRITY_UNKNOWN;
+ /*
+ * Unlike any of the other LSM hooks where the kernel enforces file
+ * integrity, enforcing file integrity for the bprm_creds_for_exec()
+ * LSM hook with the AT_EXECVE_CHECK flag is left up to the discretion
+ * of the script interpreter(userspace). Differentiate kernel and
+ * userspace enforced integrity audit messages.
+ */
+ if (is_bprm_creds_for_exec(func, file))
+ audit_msgno = AUDIT_INTEGRITY_USERSPACE;
+
/* If reading the xattr failed and there's no modsig, error out. */
if (rc <= 0 && !try_modsig) {
if (rc && rc != -ENODATA)
@@ -569,7 +592,7 @@ out:
(iint->flags & IMA_FAIL_UNVERIFIABLE_SIGS))) {
status = INTEGRITY_FAIL;
cause = "unverifiable-signature";
- integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, filename,
+ integrity_audit_msg(audit_msgno, inode, filename,
op, cause, rc, 0);
} else if (status != INTEGRITY_PASS) {
/* Fix mode, but don't replace file signatures. */
@@ -589,7 +612,7 @@ out:
status = INTEGRITY_PASS;
}
- integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, filename,
+ integrity_audit_msg(audit_msgno, inode, filename,
op, cause, rc, 0);
} else {
ima_cache_flags(iint, func);
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 9b87556b03a7..9f9897a7c217 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -555,6 +555,34 @@ static int ima_bprm_check(struct linux_binprm *bprm)
}
/**
+ * ima_bprm_creds_for_exec - collect/store/appraise measurement.
+ * @bprm: contains the linux_binprm structure
+ *
+ * Based on the IMA policy and the execveat(2) AT_EXECVE_CHECK flag, measure
+ * and appraise the integrity of a file to be executed by script interpreters.
+ * Unlike any of the other LSM hooks where the kernel enforces file integrity,
+ * enforcing file integrity is left up to the discretion of the script
+ * interpreter (userspace).
+ *
+ * On success return 0. On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
+ */
+static int ima_bprm_creds_for_exec(struct linux_binprm *bprm)
+{
+ /*
+ * As security_bprm_check() is called multiple times, both
+ * the script and the shebang interpreter are measured, appraised,
+ * and audited. Limit usage of this LSM hook to just measuring,
+ * appraising, and auditing the indirect script execution
+ * (e.g. ./sh example.sh).
+ */
+ if (!bprm->is_check)
+ return 0;
+
+ return ima_bprm_check(bprm);
+}
+
+/**
* ima_file_check - based on policy, collect/store measurement.
* @file: pointer to the file to be measured
* @mask: contains MAY_READ, MAY_WRITE, MAY_EXEC or MAY_APPEND
@@ -1174,6 +1202,7 @@ static int __init init_ima(void)
static struct security_hook_list ima_hooks[] __ro_after_init = {
LSM_HOOK_INIT(bprm_check_security, ima_bprm_check),
+ LSM_HOOK_INIT(bprm_creds_for_exec, ima_bprm_creds_for_exec),
LSM_HOOK_INIT(file_post_open, ima_file_check),
LSM_HOOK_INIT(inode_post_create_tmpfile, ima_post_create_tmpfile),
LSM_HOOK_INIT(file_release, ima_file_free),
diff --git a/security/keys/sysctl.c b/security/keys/sysctl.c
index 91f000eef3ad..cde08c478f32 100644
--- a/security/keys/sysctl.c
+++ b/security/keys/sysctl.c
@@ -9,7 +9,7 @@
#include <linux/sysctl.h>
#include "internal.h"
-static struct ctl_table key_sysctls[] = {
+static const struct ctl_table key_sysctls[] = {
{
.procname = "maxkeys",
.data = &key_quota_maxkeys,
diff --git a/security/keys/trusted-keys/trusted_dcp.c b/security/keys/trusted-keys/trusted_dcp.c
index e908c53a803c..7b6eb655df0c 100644
--- a/security/keys/trusted-keys/trusted_dcp.c
+++ b/security/keys/trusted-keys/trusted_dcp.c
@@ -201,12 +201,16 @@ static int trusted_dcp_seal(struct trusted_key_payload *p, char *datablob)
{
struct dcp_blob_fmt *b = (struct dcp_blob_fmt *)p->blob;
int blen, ret;
- u8 plain_blob_key[AES_KEYSIZE_128];
+ u8 *plain_blob_key;
blen = calc_blob_len(p->key_len);
if (blen > MAX_BLOB_SIZE)
return -E2BIG;
+ plain_blob_key = kmalloc(AES_KEYSIZE_128, GFP_KERNEL);
+ if (!plain_blob_key)
+ return -ENOMEM;
+
b->fmt_version = DCP_BLOB_VERSION;
get_random_bytes(b->nonce, AES_KEYSIZE_128);
get_random_bytes(plain_blob_key, AES_KEYSIZE_128);
@@ -229,7 +233,8 @@ static int trusted_dcp_seal(struct trusted_key_payload *p, char *datablob)
ret = 0;
out:
- memzero_explicit(plain_blob_key, sizeof(plain_blob_key));
+ memzero_explicit(plain_blob_key, AES_KEYSIZE_128);
+ kfree(plain_blob_key);
return ret;
}
@@ -238,7 +243,7 @@ static int trusted_dcp_unseal(struct trusted_key_payload *p, char *datablob)
{
struct dcp_blob_fmt *b = (struct dcp_blob_fmt *)p->blob;
int blen, ret;
- u8 plain_blob_key[AES_KEYSIZE_128];
+ u8 *plain_blob_key = NULL;
if (b->fmt_version != DCP_BLOB_VERSION) {
pr_err("DCP blob has bad version: %i, expected %i\n",
@@ -256,6 +261,12 @@ static int trusted_dcp_unseal(struct trusted_key_payload *p, char *datablob)
goto out;
}
+ plain_blob_key = kmalloc(AES_KEYSIZE_128, GFP_KERNEL);
+ if (!plain_blob_key) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
ret = decrypt_blob_key(b->blob_key, plain_blob_key);
if (ret) {
pr_err("Unable to decrypt blob key: %i\n", ret);
@@ -271,7 +282,10 @@ static int trusted_dcp_unseal(struct trusted_key_payload *p, char *datablob)
ret = 0;
out:
- memzero_explicit(plain_blob_key, sizeof(plain_blob_key));
+ if (plain_blob_key) {
+ memzero_explicit(plain_blob_key, AES_KEYSIZE_128);
+ kfree(plain_blob_key);
+ }
return ret;
}
diff --git a/security/landlock/access.h b/security/landlock/access.h
new file mode 100644
index 000000000000..74fd8f399fbd
--- /dev/null
+++ b/security/landlock/access.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Landlock LSM - Access types and helpers
+ *
+ * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ * Copyright © 2024-2025 Microsoft Corporation
+ */
+
+#ifndef _SECURITY_LANDLOCK_ACCESS_H
+#define _SECURITY_LANDLOCK_ACCESS_H
+
+#include <linux/bitops.h>
+#include <linux/build_bug.h>
+#include <linux/kernel.h>
+#include <uapi/linux/landlock.h>
+
+#include "limits.h"
+
+/*
+ * All access rights that are denied by default whether they are handled or not
+ * by a ruleset/layer. This must be ORed with all ruleset->access_masks[]
+ * entries when we need to get the absolute handled access masks, see
+ * landlock_upgrade_handled_access_masks().
+ */
+/* clang-format off */
+#define _LANDLOCK_ACCESS_FS_INITIALLY_DENIED ( \
+ LANDLOCK_ACCESS_FS_REFER)
+/* clang-format on */
+
+typedef u16 access_mask_t;
+
+/* Makes sure all filesystem access rights can be stored. */
+static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_ACCESS_FS);
+/* Makes sure all network access rights can be stored. */
+static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_ACCESS_NET);
+/* Makes sure all scoped rights can be stored. */
+static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_SCOPE);
+/* Makes sure for_each_set_bit() and for_each_clear_bit() calls are OK. */
+static_assert(sizeof(unsigned long) >= sizeof(access_mask_t));
+
+/* Ruleset access masks. */
+struct access_masks {
+ access_mask_t fs : LANDLOCK_NUM_ACCESS_FS;
+ access_mask_t net : LANDLOCK_NUM_ACCESS_NET;
+ access_mask_t scope : LANDLOCK_NUM_SCOPE;
+};
+
+union access_masks_all {
+ struct access_masks masks;
+ u32 all;
+};
+
+/* Makes sure all fields are covered. */
+static_assert(sizeof(typeof_member(union access_masks_all, masks)) ==
+ sizeof(typeof_member(union access_masks_all, all)));
+
+typedef u16 layer_mask_t;
+
+/* Makes sure all layers can be checked. */
+static_assert(BITS_PER_TYPE(layer_mask_t) >= LANDLOCK_MAX_NUM_LAYERS);
+
+/* Upgrades with all initially denied by default access rights. */
+static inline struct access_masks
+landlock_upgrade_handled_access_masks(struct access_masks access_masks)
+{
+ /*
+ * All access rights that are denied by default whether they are
+ * explicitly handled or not.
+ */
+ if (access_masks.fs)
+ access_masks.fs |= _LANDLOCK_ACCESS_FS_INITIALLY_DENIED;
+
+ return access_masks;
+}
+
+#endif /* _SECURITY_LANDLOCK_ACCESS_H */
diff --git a/security/landlock/fs.c b/security/landlock/fs.c
index e31b97a9f175..71b9dc331aae 100644
--- a/security/landlock/fs.c
+++ b/security/landlock/fs.c
@@ -36,6 +36,7 @@
#include <uapi/linux/fiemap.h>
#include <uapi/linux/landlock.h>
+#include "access.h"
#include "common.h"
#include "cred.h"
#include "fs.h"
@@ -388,14 +389,6 @@ static bool is_nouser_or_private(const struct dentry *dentry)
unlikely(IS_PRIVATE(d_backing_inode(dentry))));
}
-static access_mask_t
-get_handled_fs_accesses(const struct landlock_ruleset *const domain)
-{
- /* Handles all initially denied by default access rights. */
- return landlock_union_access_masks(domain).fs |
- LANDLOCK_ACCESS_FS_INITIALLY_DENIED;
-}
-
static const struct access_masks any_fs = {
.fs = ~0,
};
@@ -572,6 +565,12 @@ static void test_no_more_access(struct kunit *const test)
#undef NMA_TRUE
#undef NMA_FALSE
+static bool is_layer_masks_allowed(
+ layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
+{
+ return !memchr_inv(layer_masks, 0, sizeof(*layer_masks));
+}
+
/*
* Removes @layer_masks accesses that are not requested.
*
@@ -589,7 +588,8 @@ scope_to_request(const access_mask_t access_request,
for_each_clear_bit(access_bit, &access_req, ARRAY_SIZE(*layer_masks))
(*layer_masks)[access_bit] = 0;
- return !memchr_inv(layer_masks, 0, sizeof(*layer_masks));
+
+ return is_layer_masks_allowed(layer_masks);
}
#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
@@ -778,16 +778,21 @@ static bool is_access_to_paths_allowed(
if (WARN_ON_ONCE(domain->num_layers < 1 || !layer_masks_parent1))
return false;
+ allowed_parent1 = is_layer_masks_allowed(layer_masks_parent1);
+
if (unlikely(layer_masks_parent2)) {
if (WARN_ON_ONCE(!dentry_child1))
return false;
+
+ allowed_parent2 = is_layer_masks_allowed(layer_masks_parent2);
+
/*
* For a double request, first check for potential privilege
* escalation by looking at domain handled accesses (which are
* a superset of the meaningful requested accesses).
*/
access_masked_parent1 = access_masked_parent2 =
- get_handled_fs_accesses(domain);
+ landlock_union_access_masks(domain).fs;
is_dom_check = true;
} else {
if (WARN_ON_ONCE(dentry_child1 || dentry_child2))
@@ -847,15 +852,6 @@ static bool is_access_to_paths_allowed(
child1_is_directory, layer_masks_parent2,
layer_masks_child2,
child2_is_directory))) {
- allowed_parent1 = scope_to_request(
- access_request_parent1, layer_masks_parent1);
- allowed_parent2 = scope_to_request(
- access_request_parent2, layer_masks_parent2);
-
- /* Stops when all accesses are granted. */
- if (allowed_parent1 && allowed_parent2)
- break;
-
/*
* Now, downgrades the remaining checks from domain
* handled accesses to requested accesses.
@@ -863,15 +859,32 @@ static bool is_access_to_paths_allowed(
is_dom_check = false;
access_masked_parent1 = access_request_parent1;
access_masked_parent2 = access_request_parent2;
+
+ allowed_parent1 =
+ allowed_parent1 ||
+ scope_to_request(access_masked_parent1,
+ layer_masks_parent1);
+ allowed_parent2 =
+ allowed_parent2 ||
+ scope_to_request(access_masked_parent2,
+ layer_masks_parent2);
+
+ /* Stops when all accesses are granted. */
+ if (allowed_parent1 && allowed_parent2)
+ break;
}
rule = find_rule(domain, walker_path.dentry);
- allowed_parent1 = landlock_unmask_layers(
- rule, access_masked_parent1, layer_masks_parent1,
- ARRAY_SIZE(*layer_masks_parent1));
- allowed_parent2 = landlock_unmask_layers(
- rule, access_masked_parent2, layer_masks_parent2,
- ARRAY_SIZE(*layer_masks_parent2));
+ allowed_parent1 = allowed_parent1 ||
+ landlock_unmask_layers(
+ rule, access_masked_parent1,
+ layer_masks_parent1,
+ ARRAY_SIZE(*layer_masks_parent1));
+ allowed_parent2 = allowed_parent2 ||
+ landlock_unmask_layers(
+ rule, access_masked_parent2,
+ layer_masks_parent2,
+ ARRAY_SIZE(*layer_masks_parent2));
/* Stops when a rule from each layer grants access. */
if (allowed_parent1 && allowed_parent2)
@@ -895,8 +908,10 @@ jump_up:
* access to internal filesystems (e.g. nsfs, which is
* reachable through /proc/<pid>/ns/<namespace>).
*/
- allowed_parent1 = allowed_parent2 =
- !!(walker_path.mnt->mnt_flags & MNT_INTERNAL);
+ if (walker_path.mnt->mnt_flags & MNT_INTERNAL) {
+ allowed_parent1 = true;
+ allowed_parent2 = true;
+ }
break;
}
parent_dentry = dget_parent(walker_path.dentry);
@@ -908,39 +923,29 @@ jump_up:
return allowed_parent1 && allowed_parent2;
}
-static int check_access_path(const struct landlock_ruleset *const domain,
- const struct path *const path,
- access_mask_t access_request)
-{
- layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
-
- access_request = landlock_init_layer_masks(
- domain, access_request, &layer_masks, LANDLOCK_KEY_INODE);
- if (is_access_to_paths_allowed(domain, path, access_request,
- &layer_masks, NULL, 0, NULL, NULL))
- return 0;
- return -EACCES;
-}
-
static int current_check_access_path(const struct path *const path,
- const access_mask_t access_request)
+ access_mask_t access_request)
{
const struct landlock_ruleset *const dom = get_current_fs_domain();
+ layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
if (!dom)
return 0;
- return check_access_path(dom, path, access_request);
+
+ access_request = landlock_init_layer_masks(
+ dom, access_request, &layer_masks, LANDLOCK_KEY_INODE);
+ if (is_access_to_paths_allowed(dom, path, access_request, &layer_masks,
+ NULL, 0, NULL, NULL))
+ return 0;
+
+ return -EACCES;
}
-static access_mask_t get_mode_access(const umode_t mode)
+static __attribute_const__ access_mask_t get_mode_access(const umode_t mode)
{
switch (mode & S_IFMT) {
case S_IFLNK:
return LANDLOCK_ACCESS_FS_MAKE_SYM;
- case 0:
- /* A zero mode translates to S_IFREG. */
- case S_IFREG:
- return LANDLOCK_ACCESS_FS_MAKE_REG;
case S_IFDIR:
return LANDLOCK_ACCESS_FS_MAKE_DIR;
case S_IFCHR:
@@ -951,9 +956,12 @@ static access_mask_t get_mode_access(const umode_t mode)
return LANDLOCK_ACCESS_FS_MAKE_FIFO;
case S_IFSOCK:
return LANDLOCK_ACCESS_FS_MAKE_SOCK;
+ case S_IFREG:
+ case 0:
+ /* A zero mode translates to S_IFREG. */
default:
- WARN_ON_ONCE(1);
- return 0;
+ /* Treats weird files as regular files. */
+ return LANDLOCK_ACCESS_FS_MAKE_REG;
}
}
@@ -1414,11 +1422,7 @@ static int hook_path_mknod(const struct path *const dir,
struct dentry *const dentry, const umode_t mode,
const unsigned int dev)
{
- const struct landlock_ruleset *const dom = get_current_fs_domain();
-
- if (!dom)
- return 0;
- return check_access_path(dom, dir, get_mode_access(mode));
+ return current_check_access_path(dir, get_mode_access(mode));
}
static int hook_path_symlink(const struct path *const dir,
diff --git a/security/landlock/fs.h b/security/landlock/fs.h
index 1487e1f023a1..d445f411c26a 100644
--- a/security/landlock/fs.h
+++ b/security/landlock/fs.h
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/rcupdate.h>
+#include "access.h"
#include "ruleset.h"
#include "setup.h"
diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c
index a93bdbf52fff..241ce44375b6 100644
--- a/security/landlock/ruleset.c
+++ b/security/landlock/ruleset.c
@@ -8,11 +8,13 @@
#include <linux/bits.h>
#include <linux/bug.h>
+#include <linux/cleanup.h>
#include <linux/compiler_types.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/lockdep.h>
+#include <linux/mutex.h>
#include <linux/overflow.h>
#include <linux/rbtree.h>
#include <linux/refcount.h>
@@ -20,6 +22,7 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
+#include "access.h"
#include "limits.h"
#include "object.h"
#include "ruleset.h"
@@ -384,7 +387,8 @@ static int merge_ruleset(struct landlock_ruleset *const dst,
err = -EINVAL;
goto out_unlock;
}
- dst->access_masks[dst->num_layers - 1] = src->access_masks[0];
+ dst->access_masks[dst->num_layers - 1] =
+ landlock_upgrade_handled_access_masks(src->access_masks[0]);
/* Merges the @src inode tree. */
err = merge_tree(dst, src, LANDLOCK_KEY_INODE);
@@ -537,7 +541,7 @@ struct landlock_ruleset *
landlock_merge_ruleset(struct landlock_ruleset *const parent,
struct landlock_ruleset *const ruleset)
{
- struct landlock_ruleset *new_dom;
+ struct landlock_ruleset *new_dom __free(landlock_put_ruleset) = NULL;
u32 num_layers;
int err;
@@ -557,29 +561,25 @@ landlock_merge_ruleset(struct landlock_ruleset *const parent,
new_dom = create_ruleset(num_layers);
if (IS_ERR(new_dom))
return new_dom;
+
new_dom->hierarchy =
kzalloc(sizeof(*new_dom->hierarchy), GFP_KERNEL_ACCOUNT);
- if (!new_dom->hierarchy) {
- err = -ENOMEM;
- goto out_put_dom;
- }
+ if (!new_dom->hierarchy)
+ return ERR_PTR(-ENOMEM);
+
refcount_set(&new_dom->hierarchy->usage, 1);
/* ...as a child of @parent... */
err = inherit_ruleset(parent, new_dom);
if (err)
- goto out_put_dom;
+ return ERR_PTR(err);
/* ...and including @ruleset. */
err = merge_ruleset(new_dom, ruleset);
if (err)
- goto out_put_dom;
-
- return new_dom;
+ return ERR_PTR(err);
-out_put_dom:
- landlock_put_ruleset(new_dom);
- return ERR_PTR(err);
+ return no_free_ptr(new_dom);
}
/*
diff --git a/security/landlock/ruleset.h b/security/landlock/ruleset.h
index 631e24d4ffe9..52f4f0af6ab0 100644
--- a/security/landlock/ruleset.h
+++ b/security/landlock/ruleset.h
@@ -9,58 +9,17 @@
#ifndef _SECURITY_LANDLOCK_RULESET_H
#define _SECURITY_LANDLOCK_RULESET_H
-#include <linux/bitops.h>
-#include <linux/build_bug.h>
-#include <linux/kernel.h>
+#include <linux/cleanup.h>
+#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/rbtree.h>
#include <linux/refcount.h>
#include <linux/workqueue.h>
-#include <uapi/linux/landlock.h>
+#include "access.h"
#include "limits.h"
#include "object.h"
-/*
- * All access rights that are denied by default whether they are handled or not
- * by a ruleset/layer. This must be ORed with all ruleset->access_masks[]
- * entries when we need to get the absolute handled access masks.
- */
-/* clang-format off */
-#define LANDLOCK_ACCESS_FS_INITIALLY_DENIED ( \
- LANDLOCK_ACCESS_FS_REFER)
-/* clang-format on */
-
-typedef u16 access_mask_t;
-/* Makes sure all filesystem access rights can be stored. */
-static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_ACCESS_FS);
-/* Makes sure all network access rights can be stored. */
-static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_ACCESS_NET);
-/* Makes sure all scoped rights can be stored. */
-static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_SCOPE);
-/* Makes sure for_each_set_bit() and for_each_clear_bit() calls are OK. */
-static_assert(sizeof(unsigned long) >= sizeof(access_mask_t));
-
-/* Ruleset access masks. */
-struct access_masks {
- access_mask_t fs : LANDLOCK_NUM_ACCESS_FS;
- access_mask_t net : LANDLOCK_NUM_ACCESS_NET;
- access_mask_t scope : LANDLOCK_NUM_SCOPE;
-};
-
-union access_masks_all {
- struct access_masks masks;
- u32 all;
-};
-
-/* Makes sure all fields are covered. */
-static_assert(sizeof(typeof_member(union access_masks_all, masks)) ==
- sizeof(typeof_member(union access_masks_all, all)));
-
-typedef u16 layer_mask_t;
-/* Makes sure all layers can be checked. */
-static_assert(BITS_PER_TYPE(layer_mask_t) >= LANDLOCK_MAX_NUM_LAYERS);
-
/**
* struct landlock_layer - Access rights for a given layer
*/
@@ -252,6 +211,9 @@ landlock_create_ruleset(const access_mask_t access_mask_fs,
void landlock_put_ruleset(struct landlock_ruleset *const ruleset);
void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset);
+DEFINE_FREE(landlock_put_ruleset, struct landlock_ruleset *,
+ if (!IS_ERR_OR_NULL(_T)) landlock_put_ruleset(_T))
+
int landlock_insert_rule(struct landlock_ruleset *const ruleset,
const struct landlock_id id,
const access_mask_t access);
@@ -366,7 +328,7 @@ landlock_get_fs_access_mask(const struct landlock_ruleset *const ruleset,
{
/* Handles all initially denied by default access rights. */
return ruleset->access_masks[layer_level].fs |
- LANDLOCK_ACCESS_FS_INITIALLY_DENIED;
+ _LANDLOCK_ACCESS_FS_INITIALLY_DENIED;
}
static inline access_mask_t
diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c
index 4ed8e70c25ed..a9760d252fc2 100644
--- a/security/landlock/syscalls.c
+++ b/security/landlock/syscalls.c
@@ -10,6 +10,7 @@
#include <linux/anon_inodes.h>
#include <linux/build_bug.h>
#include <linux/capability.h>
+#include <linux/cleanup.h>
#include <linux/compiler_types.h>
#include <linux/dcache.h>
#include <linux/err.h>
@@ -398,8 +399,7 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd,
const enum landlock_rule_type, rule_type,
const void __user *const, rule_attr, const __u32, flags)
{
- struct landlock_ruleset *ruleset;
- int err;
+ struct landlock_ruleset *ruleset __free(landlock_put_ruleset) = NULL;
if (!is_initialized())
return -EOPNOTSUPP;
@@ -415,17 +415,12 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd,
switch (rule_type) {
case LANDLOCK_RULE_PATH_BENEATH:
- err = add_rule_path_beneath(ruleset, rule_attr);
- break;
+ return add_rule_path_beneath(ruleset, rule_attr);
case LANDLOCK_RULE_NET_PORT:
- err = add_rule_net_port(ruleset, rule_attr);
- break;
+ return add_rule_net_port(ruleset, rule_attr);
default:
- err = -EINVAL;
- break;
+ return -EINVAL;
}
- landlock_put_ruleset(ruleset);
- return err;
}
/* Enforcement */
@@ -456,10 +451,10 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd,
SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32,
flags)
{
- struct landlock_ruleset *new_dom, *ruleset;
+ struct landlock_ruleset *new_dom,
+ *ruleset __free(landlock_put_ruleset) = NULL;
struct cred *new_cred;
struct landlock_cred_security *new_llcred;
- int err;
if (!is_initialized())
return -EOPNOTSUPP;
@@ -483,10 +478,9 @@ SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32,
/* Prepares new credentials. */
new_cred = prepare_creds();
- if (!new_cred) {
- err = -ENOMEM;
- goto out_put_ruleset;
- }
+ if (!new_cred)
+ return -ENOMEM;
+
new_llcred = landlock_cred(new_cred);
/*
@@ -495,21 +489,12 @@ SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32,
*/
new_dom = landlock_merge_ruleset(new_llcred->domain, ruleset);
if (IS_ERR(new_dom)) {
- err = PTR_ERR(new_dom);
- goto out_put_creds;
+ abort_creds(new_cred);
+ return PTR_ERR(new_dom);
}
/* Replaces the old (prepared) domain. */
landlock_put_ruleset(new_llcred->domain);
new_llcred->domain = new_dom;
-
- landlock_put_ruleset(ruleset);
return commit_creds(new_cred);
-
-out_put_creds:
- abort_creds(new_cred);
-
-out_put_ruleset:
- landlock_put_ruleset(ruleset);
- return err;
}
diff --git a/security/security.c b/security/security.c
index 39df4451455b..143561ebc3e8 100644
--- a/security/security.c
+++ b/security/security.c
@@ -1248,6 +1248,12 @@ int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
* to 1 if AT_SECURE should be set to request libc enable secure mode. @bprm
* contains the linux_binprm structure.
*
+ * If execveat(2) is called with the AT_EXECVE_CHECK flag, bprm->is_check is
+ * set. The result must be the same as without this flag even if the execution
+ * will never really happen and @bprm will always be dropped.
+ *
+ * This hook must not change current->cred, only @bprm->cred.
+ *
* Return: Returns 0 if the hook is successful and permission is granted.
*/
int security_bprm_creds_for_exec(struct linux_binprm *bprm)
@@ -3097,6 +3103,10 @@ int security_file_receive(struct file *file)
* Save open-time permission checking state for later use upon file_permission,
* and recheck access if anything has changed since inode_permission.
*
+ * We can check if a file is opened for execution (e.g. execve(2) call), either
+ * directly or indirectly (e.g. ELF's ld.so) by checking file->f_flags &
+ * __FMODE_EXEC .
+ *
* Return: Returns 0 if permission is granted.
*/
int security_file_open(struct file *file)
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 22fd7436f372..7b867dfec88b 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3404,7 +3404,8 @@ static int selinux_path_notify(const struct path *path, u64 mask,
perm |= FILE__WATCH_WITH_PERM;
/* watches on read-like events need the file:watch_reads permission */
- if (mask & (FS_ACCESS | FS_ACCESS_PERM | FS_CLOSE_NOWRITE))
+ if (mask & (FS_ACCESS | FS_ACCESS_PERM | FS_PRE_ACCESS |
+ FS_CLOSE_NOWRITE))
perm |= FILE__WATCH_READS;
return path_has_perm(current_cred(), path, perm);
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index 5c7b059a332a..d9fa69632147 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -2024,6 +2024,36 @@ static void tomoyo_add_entry(struct tomoyo_domain_info *domain, char *header)
if (!buffer)
return;
snprintf(buffer, len - 1, "%s", cp);
+ if (*cp == 'f' && strchr(buffer, ':')) {
+ /* Automatically replace 2 or more digits with \$ pattern. */
+ char *cp2;
+
+ /* e.g. file read proc:/$PID/stat */
+ cp = strstr(buffer, " proc:/");
+ if (cp && simple_strtoul(cp + 7, &cp2, 10) >= 10 && *cp2 == '/') {
+ *(cp + 7) = '\\';
+ *(cp + 8) = '$';
+ memmove(cp + 9, cp2, strlen(cp2) + 1);
+ goto ok;
+ }
+ /* e.g. file ioctl pipe:[$INO] $CMD */
+ cp = strstr(buffer, " pipe:[");
+ if (cp && simple_strtoul(cp + 7, &cp2, 10) >= 10 && *cp2 == ']') {
+ *(cp + 7) = '\\';
+ *(cp + 8) = '$';
+ memmove(cp + 9, cp2, strlen(cp2) + 1);
+ goto ok;
+ }
+ /* e.g. file ioctl socket:[$INO] $CMD */
+ cp = strstr(buffer, " socket:[");
+ if (cp && simple_strtoul(cp + 9, &cp2, 10) >= 10 && *cp2 == ']') {
+ *(cp + 9) = '\\';
+ *(cp + 10) = '$';
+ memmove(cp + 11, cp2, strlen(cp2) + 1);
+ goto ok;
+ }
+ }
+ok:
if (realpath)
tomoyo_addprintf(buffer, len, " exec.%s", realpath);
if (argv0)
@@ -2665,7 +2695,7 @@ ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head,
if (head->w.avail >= head->writebuf_size - 1) {
const int len = head->writebuf_size * 2;
- char *cp = kzalloc(len, GFP_NOFS);
+ char *cp = kzalloc(len, GFP_NOFS | __GFP_NOWARN);
if (!cp) {
error = -ENOMEM;
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index aed9e3ef2c9e..3a7b0874cf44 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -722,10 +722,17 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
ee->bprm = bprm;
ee->r.obj = &ee->obj;
ee->obj.path1 = bprm->file->f_path;
- /* Get symlink's pathname of program. */
+ /*
+ * Get symlink's pathname of program, but fallback to realpath if
+ * symlink's pathname does not exist or symlink's pathname refers
+ * to proc filesystem (e.g. /dev/fd/<num> or /proc/self/fd/<num> ).
+ */
exename.name = tomoyo_realpath_nofollow(original_name);
+ if (exename.name && !strncmp(exename.name, "proc:/", 6)) {
+ kfree(exename.name);
+ exename.name = NULL;
+ }
if (!exename.name) {
- /* Fallback to realpath if symlink's pathname does not exist. */
exename.name = tomoyo_realpath_from_path(&bprm->file->f_path);
if (!exename.name)
goto out;
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index e1a5e13ea269..1971710620c1 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -76,7 +76,6 @@ static void report_access(const char *access, struct task_struct *target,
struct task_struct *agent)
{
struct access_report_info *info;
- char agent_comm[sizeof(agent->comm)];
assert_spin_locked(&target->alloc_lock); /* for target->comm */
@@ -86,8 +85,7 @@ static void report_access(const char *access, struct task_struct *target,
*/
pr_notice_ratelimited(
"ptrace %s of \"%s\"[%d] was attempted by \"%s\"[%d]\n",
- access, target->comm, target->pid,
- get_task_comm(agent_comm, agent), agent->pid);
+ access, target->comm, target->pid, agent->comm, agent->pid);
return;
}
@@ -454,7 +452,7 @@ static int yama_dointvec_minmax(const struct ctl_table *table, int write,
static int max_scope = YAMA_SCOPE_NO_ATTACH;
-static struct ctl_table yama_sysctl_table[] = {
+static const struct ctl_table yama_sysctl_table[] = {
{
.procname = "ptrace_scope",
.data = &ptrace_scope,
diff --git a/sound/core/memory.c b/sound/core/memory.c
index 2d2d0094c897..d683442b4c97 100644
--- a/sound/core/memory.c
+++ b/sound/core/memory.c
@@ -27,38 +27,43 @@ int copy_to_user_fromio(void __user *dst, const volatile void __iomem *src, size
if (import_ubuf(ITER_DEST, dst, count, &iter))
return -EFAULT;
- return copy_to_iter_fromio(&iter, (const void __iomem *)src, count);
+ if (copy_to_iter_fromio((const void __iomem *)src, count, &iter) != count)
+ return -EFAULT;
+ return 0;
}
EXPORT_SYMBOL(copy_to_user_fromio);
/**
* copy_to_iter_fromio - copy data from mmio-space to iov_iter
- * @dst: the destination iov_iter
* @src: the source pointer on mmio
* @count: the data size to copy in bytes
+ * @dst: the destination iov_iter
*
* Copies the data from mmio-space to iov_iter.
*
- * Return: Zero if successful, or non-zero on failure.
+ * Return: number of bytes to be copied
*/
-int copy_to_iter_fromio(struct iov_iter *dst, const void __iomem *src,
- size_t count)
+size_t copy_to_iter_fromio(const void __iomem *src, size_t count,
+ struct iov_iter *dst)
{
#if defined(__i386__) || defined(CONFIG_SPARC32)
- return copy_to_iter((const void __force *)src, count, dst) == count ? 0 : -EFAULT;
+ return copy_to_iter((const void __force *)src, count, dst);
#else
char buf[256];
+ size_t res = 0;
+
while (count) {
size_t c = count;
if (c > sizeof(buf))
c = sizeof(buf);
memcpy_fromio(buf, (void __iomem *)src, c);
if (copy_to_iter(buf, c, dst) != c)
- return -EFAULT;
+ return res;
count -= c;
src += c;
+ res += c;
}
- return 0;
+ return res;
#endif
}
EXPORT_SYMBOL(copy_to_iter_fromio);
@@ -79,37 +84,43 @@ int copy_from_user_toio(volatile void __iomem *dst, const void __user *src, size
if (import_ubuf(ITER_SOURCE, (void __user *)src, count, &iter))
return -EFAULT;
- return copy_from_iter_toio((void __iomem *)dst, &iter, count);
+ if (copy_from_iter_toio((void __iomem *)dst, count, &iter) != count)
+ return -EFAULT;
+ return 0;
}
EXPORT_SYMBOL(copy_from_user_toio);
/**
* copy_from_iter_toio - copy data from iov_iter to mmio-space
* @dst: the destination pointer on mmio-space
- * @src: the source iov_iter
* @count: the data size to copy in bytes
+ * @src: the source iov_iter
*
* Copies the data from iov_iter to mmio-space.
*
- * Return: Zero if successful, or non-zero on failure.
+ * Return: number of bytes to be copied
*/
-int copy_from_iter_toio(void __iomem *dst, struct iov_iter *src, size_t count)
+size_t copy_from_iter_toio(void __iomem *dst, size_t count,
+ struct iov_iter *src)
{
#if defined(__i386__) || defined(CONFIG_SPARC32)
- return copy_from_iter((void __force *)dst, count, src) == count ? 0 : -EFAULT;
+ return copy_from_iter((void __force *)dst, count, src);
#else
char buf[256];
+ size_t res = 0;
+
while (count) {
size_t c = count;
if (c > sizeof(buf))
c = sizeof(buf);
if (copy_from_iter(buf, c, src) != c)
- return -EFAULT;
+ return res;
memcpy_toio(dst, buf, c);
count -= c;
dst += c;
+ res += c;
}
- return 0;
+ return res;
#endif
}
EXPORT_SYMBOL(copy_from_iter_toio);
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 8a3384342e8d..6c2b6a62d9d2 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -3245,7 +3245,7 @@ static int snd_pcm_xfern_frames_ioctl(struct snd_pcm_substream *substream,
if (copy_from_user(&xfern, _xfern, sizeof(xfern)))
return -EFAULT;
- bufs = memdup_user(xfern.bufs, sizeof(void *) * runtime->channels);
+ bufs = memdup_array_user(xfern.bufs, runtime->channels, sizeof(void *));
if (IS_ERR(bufs))
return PTR_ERR(bufs);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 348ce1b7725e..70a958ac1112 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -629,12 +629,15 @@ static int snd_rawmidi_info(struct snd_rawmidi_substream *substream,
info->subdevice = substream->number;
info->stream = substream->stream;
info->flags = rmidi->info_flags;
+ if (substream->inactive)
+ info->flags |= SNDRV_RAWMIDI_INFO_STREAM_INACTIVE;
strcpy(info->id, rmidi->id);
strcpy(info->name, rmidi->name);
strcpy(info->subname, substream->name);
info->subdevices_count = substream->pstr->substream_count;
info->subdevices_avail = (substream->pstr->substream_count -
substream->pstr->substream_opened);
+ info->tied_device = rmidi->tied_device;
return 0;
}
diff --git a/sound/core/seq/Kconfig b/sound/core/seq/Kconfig
index 0374bbf51cd4..e4f58cb985d4 100644
--- a/sound/core/seq/Kconfig
+++ b/sound/core/seq/Kconfig
@@ -62,7 +62,7 @@ config SND_SEQ_VIRMIDI
config SND_SEQ_UMP
bool "Support for UMP events"
- default y if SND_SEQ_UMP_CLIENT
+ default SND_UMP
help
Say Y here to enable the support for handling UMP (Universal MIDI
Packet) events via ALSA sequencer infrastructure, which is an
@@ -71,6 +71,6 @@ config SND_SEQ_UMP
among legacy and UMP clients.
config SND_SEQ_UMP_CLIENT
- def_tristate SND_UMP
+ def_tristate SND_UMP && SND_SEQ_UMP
endif # SND_SEQUENCER
diff --git a/sound/core/seq/oss/seq_oss_device.h b/sound/core/seq/oss/seq_oss_device.h
index 98dd20b42976..6163a00bc8de 100644
--- a/sound/core/seq/oss/seq_oss_device.h
+++ b/sound/core/seq/oss/seq_oss_device.h
@@ -55,7 +55,6 @@ struct seq_oss_chinfo {
struct seq_oss_synthinfo {
struct snd_seq_oss_arg arg;
struct seq_oss_chinfo *ch;
- struct seq_oss_synth_sysex *sysex;
int nr_voices;
int opened;
int is_midi;
@@ -157,8 +156,4 @@ snd_seq_oss_fill_addr(struct seq_oss_devinfo *dp, struct snd_seq_event *ev,
ev->dest.port = dest_port;
}
-
-/* misc. functions for proc interface */
-char *enabled_str(bool b);
-
#endif /* __SEQ_OSS_DEVICE_H */
diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c
index 676246fa02f1..e6d7d83ed0e7 100644
--- a/sound/core/seq/oss/seq_oss_init.c
+++ b/sound/core/seq/oss/seq_oss_init.c
@@ -111,7 +111,7 @@ snd_seq_oss_create_client(void)
/*
- * receive annoucement from system port, and check the midi device
+ * receive announcement from system port, and check the midi device
*/
static int
receive_announce(struct snd_seq_event *ev, int direct, void *private, int atomic, int hop)
@@ -449,12 +449,6 @@ snd_seq_oss_reset(struct seq_oss_devinfo *dp)
/*
* misc. functions for proc interface
*/
-char *
-enabled_str(bool b)
-{
- return b ? "enabled" : "disabled";
-}
-
static const char *
filemode_str(int val)
{
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
index 51ee4c00a843..9de47e098b29 100644
--- a/sound/core/seq/oss/seq_oss_synth.c
+++ b/sound/core/seq/oss/seq_oss_synth.c
@@ -26,13 +26,6 @@
* definition of synth info records
*/
-/* sysex buffer */
-struct seq_oss_synth_sysex {
- int len;
- int skip;
- unsigned char buf[MAX_SYSEX_BUFLEN];
-};
-
/* synth info */
struct seq_oss_synth {
int seq_device;
@@ -66,7 +59,6 @@ static struct seq_oss_synth midi_synth_dev = {
};
static DEFINE_SPINLOCK(register_lock);
-static DEFINE_MUTEX(sysex_mutex);
/*
* prototypes
@@ -319,8 +311,6 @@ snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp)
}
snd_use_lock_free(&rec->use_lock);
}
- kfree(info->sysex);
- info->sysex = NULL;
kfree(info->ch);
info->ch = NULL;
}
@@ -396,8 +386,6 @@ snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev)
info = get_synthinfo_nospec(dp, dev);
if (!info || !info->opened)
return;
- if (info->sysex)
- info->sysex->len = 0; /* reset sysex */
reset_channels(info);
if (info->is_midi) {
if (midi_synth_dev.opened <= 0)
@@ -409,8 +397,6 @@ snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev)
dp->file_mode) < 0) {
midi_synth_dev.opened--;
info->opened = 0;
- kfree(info->sysex);
- info->sysex = NULL;
kfree(info->ch);
info->ch = NULL;
}
@@ -483,64 +469,26 @@ snd_seq_oss_synth_info(struct seq_oss_devinfo *dp, int dev)
/*
* receive OSS 6 byte sysex packet:
- * the full sysex message will be sent if it reaches to the end of data
- * (0xff).
+ * the event is filled and prepared for sending immediately
+ * (i.e. sysex messages are fragmented)
*/
int
snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf, struct snd_seq_event *ev)
{
- int i, send;
- unsigned char *dest;
- struct seq_oss_synth_sysex *sysex;
- struct seq_oss_synthinfo *info;
+ unsigned char *p;
+ int len = 6;
- info = snd_seq_oss_synth_info(dp, dev);
- if (!info)
- return -ENXIO;
+ p = memchr(buf, 0xff, 6);
+ if (p)
+ len = p - buf + 1;
- guard(mutex)(&sysex_mutex);
- sysex = info->sysex;
- if (sysex == NULL) {
- sysex = kzalloc(sizeof(*sysex), GFP_KERNEL);
- if (sysex == NULL)
- return -ENOMEM;
- info->sysex = sysex;
- }
-
- send = 0;
- dest = sysex->buf + sysex->len;
- /* copy 6 byte packet to the buffer */
- for (i = 0; i < 6; i++) {
- if (buf[i] == 0xff) {
- send = 1;
- break;
- }
- dest[i] = buf[i];
- sysex->len++;
- if (sysex->len >= MAX_SYSEX_BUFLEN) {
- sysex->len = 0;
- sysex->skip = 1;
- break;
- }
- }
-
- if (sysex->len && send) {
- if (sysex->skip) {
- sysex->skip = 0;
- sysex->len = 0;
- return -EINVAL; /* skip */
- }
- /* copy the data to event record and send it */
- ev->flags = SNDRV_SEQ_EVENT_LENGTH_VARIABLE;
- if (snd_seq_oss_synth_addr(dp, dev, ev))
- return -EINVAL;
- ev->data.ext.len = sysex->len;
- ev->data.ext.ptr = sysex->buf;
- sysex->len = 0;
- return 0;
- }
-
- return -EINVAL; /* skip */
+ /* copy the data to event record and send it */
+ if (snd_seq_oss_synth_addr(dp, dev, ev))
+ return -EINVAL;
+ ev->flags = SNDRV_SEQ_EVENT_LENGTH_VARIABLE;
+ ev->data.ext.len = len;
+ ev->data.ext.ptr = buf;
+ return 0;
}
/*
@@ -660,8 +608,8 @@ snd_seq_oss_synth_info_read(struct snd_info_buffer *buf)
rec->synth_type, rec->synth_subtype,
rec->nr_voices);
snd_iprintf(buf, " capabilities : ioctl %s / load_patch %s\n",
- enabled_str((long)rec->oper.ioctl),
- enabled_str((long)rec->oper.load_patch));
+ str_enabled_disabled((long)rec->oper.ioctl),
+ str_enabled_disabled((long)rec->oper.load_patch));
snd_use_lock_free(&rec->use_lock);
}
}
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 77b6ac9b5c11..073b56dc2225 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1296,6 +1296,10 @@ static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
client->midi_version = client_info->midi_version;
memcpy(client->event_filter, client_info->event_filter, 32);
client->group_filter = client_info->group_filter;
+
+ /* notify the change */
+ snd_seq_system_client_ev_client_change(client->number);
+
return 0;
}
@@ -1419,6 +1423,9 @@ static int snd_seq_ioctl_set_port_info(struct snd_seq_client *client, void *arg)
if (port) {
snd_seq_set_port_info(port, info);
snd_seq_port_unlock(port);
+ /* notify the change */
+ snd_seq_system_client_ev_port_change(info->addr.client,
+ info->addr.port);
}
return 0;
}
@@ -1475,7 +1482,7 @@ int snd_seq_client_notify_subscription(int client, int port,
event.data.connect.dest = info->dest;
event.data.connect.sender = info->sender;
- return snd_seq_system_notify(client, port, &event); /* non-atomic */
+ return snd_seq_system_notify(client, port, &event, false); /* non-atomic */
}
@@ -2229,6 +2236,16 @@ static int snd_seq_ioctl_client_ump_info(struct snd_seq_client *caller,
error:
mutex_unlock(&cptr->ioctl_mutex);
snd_seq_client_unlock(cptr);
+ if (!err && cmd == SNDRV_SEQ_IOCTL_SET_CLIENT_UMP_INFO) {
+ if (type == SNDRV_SEQ_CLIENT_UMP_INFO_ENDPOINT)
+ snd_seq_system_ump_notify(client, 0,
+ SNDRV_SEQ_EVENT_UMP_EP_CHANGE,
+ false);
+ else
+ snd_seq_system_ump_notify(client, type - 1,
+ SNDRV_SEQ_EVENT_UMP_BLOCK_CHANGE,
+ false);
+ }
return err;
}
#endif
diff --git a/sound/core/seq/seq_system.c b/sound/core/seq/seq_system.c
index 80267290190d..853920f79016 100644
--- a/sound/core/seq/seq_system.c
+++ b/sound/core/seq/seq_system.c
@@ -49,12 +49,14 @@ static int sysclient = -1;
/* port id numbers for this client */
static int announce_port = -1;
+/* number of subscriptions to announce port */
+static int announce_subscribed;
/* fill standard header data, source port & channel are filled in */
static int setheader(struct snd_seq_event * ev, int client, int port)
{
- if (announce_port < 0)
+ if (announce_port < 0 || !announce_subscribed)
return -ENODEV;
memset(ev, 0, sizeof(struct snd_seq_event));
@@ -76,26 +78,27 @@ static int setheader(struct snd_seq_event * ev, int client, int port)
/* entry points for broadcasting system events */
-void snd_seq_system_broadcast(int client, int port, int type)
+void snd_seq_system_broadcast(int client, int port, int type, bool atomic)
{
struct snd_seq_event ev;
if (setheader(&ev, client, port) < 0)
return;
ev.type = type;
- snd_seq_kernel_client_dispatch(sysclient, &ev, 0, 0);
+ snd_seq_kernel_client_dispatch(sysclient, &ev, atomic, 0);
}
EXPORT_SYMBOL_GPL(snd_seq_system_broadcast);
/* entry points for broadcasting system events */
-int snd_seq_system_notify(int client, int port, struct snd_seq_event *ev)
+int snd_seq_system_notify(int client, int port, struct snd_seq_event *ev,
+ bool atomic)
{
ev->flags = SNDRV_SEQ_EVENT_LENGTH_FIXED;
ev->source.client = sysclient;
ev->source.port = announce_port;
ev->dest.client = client;
ev->dest.port = port;
- return snd_seq_kernel_client_dispatch(sysclient, ev, 0, 0);
+ return snd_seq_kernel_client_dispatch(sysclient, ev, atomic, 0);
}
/* call-back handler for timer events */
@@ -104,6 +107,22 @@ static int event_input_timer(struct snd_seq_event * ev, int direct, void *privat
return snd_seq_control_queue(ev, atomic, hop);
}
+static int sys_announce_subscribe(void *private_data,
+ struct snd_seq_port_subscribe *info)
+{
+ announce_subscribed++;
+ return 0;
+}
+
+static int sys_announce_unsubscribe(void *private_data,
+ struct snd_seq_port_subscribe *info)
+{
+ if (snd_BUG_ON(!announce_subscribed))
+ return 0;
+ announce_subscribed--;
+ return 0;
+}
+
/* register our internal client */
int __init snd_seq_system_client_init(void)
{
@@ -143,7 +162,10 @@ int __init snd_seq_system_client_init(void)
/* register announcement port */
strcpy(port->name, "Announce");
port->capability = SNDRV_SEQ_PORT_CAP_READ|SNDRV_SEQ_PORT_CAP_SUBS_READ; /* for broadcast only */
- port->kernel = NULL;
+ pcallbacks.event_input = NULL;
+ pcallbacks.subscribe = sys_announce_subscribe;
+ pcallbacks.unsubscribe = sys_announce_unsubscribe;
+ port->kernel = &pcallbacks;
port->type = 0;
port->flags = SNDRV_SEQ_PORT_FLG_GIVEN_PORT;
port->addr.client = sysclient;
diff --git a/sound/core/seq/seq_system.h b/sound/core/seq/seq_system.h
index 4fe88ad40346..62e513f74871 100644
--- a/sound/core/seq/seq_system.h
+++ b/sound/core/seq/seq_system.h
@@ -10,16 +10,31 @@
/* entry points for broadcasting system events */
-void snd_seq_system_broadcast(int client, int port, int type);
-
-#define snd_seq_system_client_ev_client_start(client) snd_seq_system_broadcast(client, 0, SNDRV_SEQ_EVENT_CLIENT_START)
-#define snd_seq_system_client_ev_client_exit(client) snd_seq_system_broadcast(client, 0, SNDRV_SEQ_EVENT_CLIENT_EXIT)
-#define snd_seq_system_client_ev_client_change(client) snd_seq_system_broadcast(client, 0, SNDRV_SEQ_EVENT_CLIENT_CHANGE)
-#define snd_seq_system_client_ev_port_start(client, port) snd_seq_system_broadcast(client, port, SNDRV_SEQ_EVENT_PORT_START)
-#define snd_seq_system_client_ev_port_exit(client, port) snd_seq_system_broadcast(client, port, SNDRV_SEQ_EVENT_PORT_EXIT)
-#define snd_seq_system_client_ev_port_change(client, port) snd_seq_system_broadcast(client, port, SNDRV_SEQ_EVENT_PORT_CHANGE)
-
-int snd_seq_system_notify(int client, int port, struct snd_seq_event *ev);
+void snd_seq_system_broadcast(int client, int port, int type, bool atomic);
+
+/* normal system notification event broadcast */
+#define notify_event(client, port, type) \
+ snd_seq_system_broadcast(client, port, type, false)
+
+/* notify UMP EP/FB change event */
+static inline void snd_seq_system_ump_notify(int client, int block, int type,
+ bool atomic)
+{
+ /* reuse the existing snd_seq_system_broadcast():
+ * struct snd_seq_ev_ump_notify is compatible with struct snd_seq_addr
+ */
+ snd_seq_system_broadcast(client, block, type, atomic);
+}
+
+#define snd_seq_system_client_ev_client_start(client) notify_event(client, 0, SNDRV_SEQ_EVENT_CLIENT_START)
+#define snd_seq_system_client_ev_client_exit(client) notify_event(client, 0, SNDRV_SEQ_EVENT_CLIENT_EXIT)
+#define snd_seq_system_client_ev_client_change(client) notify_event(client, 0, SNDRV_SEQ_EVENT_CLIENT_CHANGE)
+#define snd_seq_system_client_ev_port_start(client, port) notify_event(client, port, SNDRV_SEQ_EVENT_PORT_START)
+#define snd_seq_system_client_ev_port_exit(client, port) notify_event(client, port, SNDRV_SEQ_EVENT_PORT_EXIT)
+#define snd_seq_system_client_ev_port_change(client, port) notify_event(client, port, SNDRV_SEQ_EVENT_PORT_CHANGE)
+
+int snd_seq_system_notify(int client, int port, struct snd_seq_event *ev,
+ bool atomic);
/* register our internal client */
int snd_seq_system_client_init(void);
diff --git a/sound/core/seq/seq_ump_client.c b/sound/core/seq/seq_ump_client.c
index e956f17f3792..1255351b59ce 100644
--- a/sound/core/seq/seq_ump_client.c
+++ b/sound/core/seq/seq_ump_client.c
@@ -272,8 +272,6 @@ static void update_port_infos(struct seq_ump_client *client)
new);
if (err < 0)
continue;
- /* notify to system port */
- snd_seq_system_client_ev_port_change(client->seq_client, i);
}
}
@@ -390,6 +388,33 @@ static void handle_group_notify(struct work_struct *work)
setup_client_group_filter(client);
}
+/* UMP EP change notification */
+static int seq_ump_notify_ep_change(struct snd_ump_endpoint *ump)
+{
+ struct seq_ump_client *client = ump->seq_client;
+ struct snd_seq_client *cptr;
+ int client_id;
+
+ if (!client)
+ return -ENODEV;
+ client_id = client->seq_client;
+ cptr = snd_seq_kernel_client_get(client_id);
+ if (!cptr)
+ return -ENODEV;
+
+ snd_seq_system_ump_notify(client_id, 0, SNDRV_SEQ_EVENT_UMP_EP_CHANGE,
+ true);
+
+ /* update sequencer client name if needed */
+ if (*ump->core.name && strcmp(ump->core.name, cptr->name)) {
+ strscpy(cptr->name, ump->core.name, sizeof(cptr->name));
+ snd_seq_system_client_ev_client_change(client_id);
+ }
+
+ snd_seq_kernel_client_put(cptr);
+ return 0;
+}
+
/* UMP FB change notification */
static int seq_ump_notify_fb_change(struct snd_ump_endpoint *ump,
struct snd_ump_block *fb)
@@ -399,20 +424,29 @@ static int seq_ump_notify_fb_change(struct snd_ump_endpoint *ump,
if (!client)
return -ENODEV;
schedule_work(&client->group_notify_work);
+ snd_seq_system_ump_notify(client->seq_client, fb->info.block_id,
+ SNDRV_SEQ_EVENT_UMP_BLOCK_CHANGE,
+ true);
return 0;
}
/* UMP protocol change notification; just update the midi_version field */
static int seq_ump_switch_protocol(struct snd_ump_endpoint *ump)
{
- if (!ump->seq_client)
+ struct seq_ump_client *client = ump->seq_client;
+
+ if (!client)
return -ENODEV;
- setup_client_midi_version(ump->seq_client);
+ setup_client_midi_version(client);
+ snd_seq_system_ump_notify(client->seq_client, 0,
+ SNDRV_SEQ_EVENT_UMP_EP_CHANGE,
+ true);
return 0;
}
static const struct snd_seq_ump_ops seq_ump_ops = {
.input_receive = seq_ump_input_receive,
+ .notify_ep_change = seq_ump_notify_ep_change,
.notify_fb_change = seq_ump_notify_fb_change,
.switch_protocol = seq_ump_switch_protocol,
};
diff --git a/sound/core/ump.c b/sound/core/ump.c
index 9198bff4768c..8d8681a42ca5 100644
--- a/sound/core/ump.c
+++ b/sound/core/ump.c
@@ -37,6 +37,7 @@ static int process_legacy_output(struct snd_ump_endpoint *ump,
u32 *buffer, int count);
static void process_legacy_input(struct snd_ump_endpoint *ump, const u32 *src,
int words);
+static void ump_legacy_set_rawmidi_name(struct snd_ump_endpoint *ump);
static void update_legacy_names(struct snd_ump_endpoint *ump);
#else
static inline int process_legacy_output(struct snd_ump_endpoint *ump,
@@ -48,11 +49,42 @@ static inline void process_legacy_input(struct snd_ump_endpoint *ump,
const u32 *src, int words)
{
}
+static inline void ump_legacy_set_rawmidi_name(struct snd_ump_endpoint *ump)
+{
+}
static inline void update_legacy_names(struct snd_ump_endpoint *ump)
{
}
#endif
+/* copy a string safely with stripping non-printable letters */
+static void safe_copy_string(void *dst, size_t max_dst_size,
+ const void *src, size_t max_src_size)
+{
+ const unsigned char *s = src;
+ unsigned char *d = dst;
+
+ if (!max_dst_size--)
+ return;
+ for (s = src; max_dst_size && *s && max_src_size--; s++) {
+ if (!isascii(*s) || !isprint(*s))
+ continue;
+ *d++ = *s;
+ max_dst_size--;
+ }
+ *d = 0;
+}
+
+/* append a string safely with stripping non-printable letters */
+static void safe_append_string(void *dst, size_t max_dst_size,
+ const void *src, size_t max_src_size)
+{
+ unsigned char *d = dst;
+ size_t len = strlen(d);
+
+ safe_copy_string(d + len, max_dst_size - len, src, max_src_size);
+}
+
static const struct snd_rawmidi_global_ops snd_ump_rawmidi_ops = {
.dev_register = snd_ump_dev_register,
.dev_unregister = snd_ump_dev_unregister,
@@ -565,16 +597,10 @@ void snd_ump_update_group_attrs(struct snd_ump_endpoint *ump)
}
if (!*fb->info.name)
continue;
- if (!*group->name) {
- /* store the first matching name */
- strscpy(group->name, fb->info.name,
- sizeof(group->name));
- } else {
- /* when overlapping, concat names */
+ if (*group->name)
strlcat(group->name, ", ", sizeof(group->name));
- strlcat(group->name, fb->info.name,
- sizeof(group->name));
- }
+ safe_append_string(group->name, sizeof(group->name),
+ fb->info.name, sizeof(fb->info.name));
}
}
}
@@ -669,6 +695,15 @@ static void choose_default_protocol(struct snd_ump_endpoint *ump)
ump->info.protocol |= SNDRV_UMP_EP_INFO_PROTO_MIDI1;
}
+/* notify the EP info/name change to sequencer */
+static void seq_notify_ep_change(struct snd_ump_endpoint *ump)
+{
+#if IS_ENABLED(CONFIG_SND_SEQUENCER)
+ if (ump->parsed && ump->seq_ops && ump->seq_ops->notify_ep_change)
+ ump->seq_ops->notify_ep_change(ump);
+#endif
+}
+
/* handle EP info stream message; update the UMP attributes */
static int ump_handle_ep_info_msg(struct snd_ump_endpoint *ump,
const union snd_ump_stream_msg *buf)
@@ -693,6 +728,7 @@ static int ump_handle_ep_info_msg(struct snd_ump_endpoint *ump,
ump->info.protocol &= ump->info.protocol_caps;
choose_default_protocol(ump);
+ seq_notify_ep_change(ump);
return 1; /* finished */
}
@@ -715,24 +751,46 @@ static int ump_handle_device_info_msg(struct snd_ump_endpoint *ump,
ump->info.family_id,
ump->info.model_id,
ump->info.sw_revision);
+ seq_notify_ep_change(ump);
return 1; /* finished */
}
+/* set up the core rawmidi name from UMP EP name string */
+static void ump_set_rawmidi_name(struct snd_ump_endpoint *ump)
+{
+ safe_copy_string(ump->core.name, sizeof(ump->core.name),
+ ump->info.name, sizeof(ump->info.name));
+}
+
/* handle EP name stream message; update the UMP name string */
static int ump_handle_ep_name_msg(struct snd_ump_endpoint *ump,
const union snd_ump_stream_msg *buf)
{
- return ump_append_string(ump, ump->info.name, sizeof(ump->info.name),
- buf->raw, 2);
+ int ret;
+
+ ret = ump_append_string(ump, ump->info.name, sizeof(ump->info.name),
+ buf->raw, 2);
+ if (ret && ump->parsed) {
+ ump_set_rawmidi_name(ump);
+ ump_legacy_set_rawmidi_name(ump);
+ seq_notify_ep_change(ump);
+ }
+
+ return ret;
}
/* handle EP product id stream message; update the UMP product_id string */
static int ump_handle_product_id_msg(struct snd_ump_endpoint *ump,
const union snd_ump_stream_msg *buf)
{
- return ump_append_string(ump, ump->info.product_id,
- sizeof(ump->info.product_id),
- buf->raw, 2);
+ int ret;
+
+ ret = ump_append_string(ump, ump->info.product_id,
+ sizeof(ump->info.product_id),
+ buf->raw, 2);
+ if (ret)
+ seq_notify_ep_change(ump);
+ return ret;
}
/* notify the protocol change to sequencer */
@@ -1045,6 +1103,8 @@ int snd_ump_parse_endpoint(struct snd_ump_endpoint *ump)
if (err < 0)
ump_dbg(ump, "Unable to get UMP EP name string\n");
+ ump_set_rawmidi_name(ump);
+
/* Request Endpoint Product ID */
err = ump_req_msg(ump, msg, UMP_STREAM_MSG_REQUEST_PRODUCT_ID,
UMP_STREAM_MSG_STATUS_PRODUCT_ID);
@@ -1250,8 +1310,8 @@ static int fill_legacy_mapping(struct snd_ump_endpoint *ump)
return num;
}
-static void fill_substream_names(struct snd_ump_endpoint *ump,
- struct snd_rawmidi *rmidi, int dir)
+static void update_legacy_substreams(struct snd_ump_endpoint *ump,
+ struct snd_rawmidi *rmidi, int dir)
{
struct snd_rawmidi_substream *s;
const char *name;
@@ -1261,10 +1321,11 @@ static void fill_substream_names(struct snd_ump_endpoint *ump,
idx = ump->legacy_mapping[s->number];
name = ump->groups[idx].name;
if (!*name)
- name = ump->info.name;
+ name = ump->core.name;
scnprintf(s->name, sizeof(s->name), "Group %d (%.16s)%s",
idx + 1, name,
ump->groups[idx].active ? "" : " [Inactive]");
+ s->inactive = !ump->groups[idx].active;
}
}
@@ -1272,8 +1333,16 @@ static void update_legacy_names(struct snd_ump_endpoint *ump)
{
struct snd_rawmidi *rmidi = ump->legacy_rmidi;
- fill_substream_names(ump, rmidi, SNDRV_RAWMIDI_STREAM_INPUT);
- fill_substream_names(ump, rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT);
+ update_legacy_substreams(ump, rmidi, SNDRV_RAWMIDI_STREAM_INPUT);
+ update_legacy_substreams(ump, rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT);
+}
+
+static void ump_legacy_set_rawmidi_name(struct snd_ump_endpoint *ump)
+{
+ struct snd_rawmidi *rmidi = ump->legacy_rmidi;
+
+ snprintf(rmidi->name, sizeof(rmidi->name), "%.68s (MIDI 1.0)",
+ ump->core.name);
}
int snd_ump_attach_legacy_rawmidi(struct snd_ump_endpoint *ump,
@@ -1306,14 +1375,15 @@ int snd_ump_attach_legacy_rawmidi(struct snd_ump_endpoint *ump,
if (output)
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT,
&snd_ump_legacy_output_ops);
- snprintf(rmidi->name, sizeof(rmidi->name), "%.68s (MIDI 1.0)",
- ump->info.name);
rmidi->info_flags = ump->core.info_flags & ~SNDRV_RAWMIDI_INFO_UMP;
rmidi->ops = &snd_ump_legacy_ops;
rmidi->private_data = ump;
ump->legacy_rmidi = rmidi;
+ ump_legacy_set_rawmidi_name(ump);
update_legacy_names(ump);
+ snd_rawmidi_tie_devices(rmidi, &ump->core);
+
ump_dbg(ump, "Created a legacy rawmidi #%d (%s)\n", device, id);
return 0;
}
diff --git a/sound/firewire/fireface/ff-protocol-former.c b/sound/firewire/fireface/ff-protocol-former.c
index efd59e9d9935..0907d0a2296f 100644
--- a/sound/firewire/fireface/ff-protocol-former.c
+++ b/sound/firewire/fireface/ff-protocol-former.c
@@ -135,13 +135,13 @@ static void dump_clock_config(struct snd_ff *ff, struct snd_info_buffer *buffer)
snd_iprintf(buffer, "Output S/PDIF format: %s (Emphasis: %s)\n",
(data & 0x00000020) ? "Professional" : "Consumer",
- (data & 0x00000040) ? "on" : "off");
+ str_on_off(data & 0x00000040));
snd_iprintf(buffer, "Optical output interface format: %s\n",
(data & 0x00000100) ? "S/PDIF" : "ADAT");
snd_iprintf(buffer, "Word output single speed: %s\n",
- (data & 0x00002000) ? "on" : "off");
+ str_on_off(data & 0x00002000));
snd_iprintf(buffer, "S/PDIF input interface: %s\n",
(data & 0x00000200) ? "Optical" : "Coaxial");
diff --git a/sound/hda/hdac_component.c b/sound/hda/hdac_component.c
index bb37e7e0bd79..9c82a2864a2f 100644
--- a/sound/hda/hdac_component.c
+++ b/sound/hda/hdac_component.c
@@ -5,6 +5,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/component.h>
+#include <linux/string_choices.h>
#include <sound/core.h>
#include <sound/hdaudio.h>
#include <sound/hda_component.h>
@@ -42,8 +43,7 @@ int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
if (!acomp->ops->codec_wake_override)
return 0;
- dev_dbg(bus->dev, "%s codec wakeup\n",
- enable ? "enable" : "disable");
+ dev_dbg(bus->dev, "%s codec wakeup\n", str_enable_disable(enable));
acomp->ops->codec_wake_override(acomp->dev, enable);
@@ -67,8 +67,7 @@ void snd_hdac_display_power(struct hdac_bus *bus, unsigned int idx, bool enable)
{
struct drm_audio_component *acomp = bus->audio_component;
- dev_dbg(bus->dev, "display power %s\n",
- enable ? "enable" : "disable");
+ dev_dbg(bus->dev, "display power %s\n", str_enable_disable(enable));
mutex_lock(&bus->lock);
if (enable)
diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c
index 2670792f43b4..4e85a838ad7e 100644
--- a/sound/hda/hdac_stream.c
+++ b/sound/hda/hdac_stream.c
@@ -492,32 +492,21 @@ static int setup_bdle(struct hdac_bus *bus,
}
/**
- * snd_hdac_stream_setup_periods - set up BDL entries
+ * snd_hdac_stream_setup_bdle - set up BDL entries
* @azx_dev: HD-audio core stream to set up
+ * @dmab: allocated DMA buffer
+ * @runtime: substream runtime, optional
*
* Set up the buffer descriptor table of the given stream based on the
* period and buffer sizes of the assigned PCM substream.
*/
-int snd_hdac_stream_setup_periods(struct hdac_stream *azx_dev)
+static int snd_hdac_stream_setup_bdle(struct hdac_stream *azx_dev, struct snd_dma_buffer *dmab,
+ struct snd_pcm_runtime *runtime)
{
struct hdac_bus *bus = azx_dev->bus;
- struct snd_pcm_substream *substream = azx_dev->substream;
- struct snd_compr_stream *cstream = azx_dev->cstream;
- struct snd_pcm_runtime *runtime = NULL;
- struct snd_dma_buffer *dmab;
- __le32 *bdl;
int i, ofs, periods, period_bytes;
int pos_adj, pos_align;
-
- if (substream) {
- runtime = substream->runtime;
- dmab = snd_pcm_get_dma_buf(substream);
- } else if (cstream) {
- dmab = snd_pcm_get_dma_buf(cstream);
- } else {
- WARN(1, "No substream or cstream assigned\n");
- return -EINVAL;
- }
+ __le32 *bdl;
/* reset BDL address */
snd_hdac_stream_writel(azx_dev, SD_BDLPL, 0);
@@ -571,6 +560,33 @@ int snd_hdac_stream_setup_periods(struct hdac_stream *azx_dev)
azx_dev->bufsize, period_bytes);
return -EINVAL;
}
+
+/**
+ * snd_hdac_stream_setup_periods - set up BDL entries
+ * @azx_dev: HD-audio core stream to set up
+ *
+ * Set up the buffer descriptor table of the given stream based on the
+ * period and buffer sizes of the assigned PCM substream.
+ */
+int snd_hdac_stream_setup_periods(struct hdac_stream *azx_dev)
+{
+ struct snd_pcm_substream *substream = azx_dev->substream;
+ struct snd_compr_stream *cstream = azx_dev->cstream;
+ struct snd_pcm_runtime *runtime = NULL;
+ struct snd_dma_buffer *dmab;
+
+ if (substream) {
+ runtime = substream->runtime;
+ dmab = snd_pcm_get_dma_buf(substream);
+ } else if (cstream) {
+ dmab = snd_pcm_get_dma_buf(cstream);
+ } else {
+ WARN(1, "No substream or cstream assigned\n");
+ return -EINVAL;
+ }
+
+ return snd_hdac_stream_setup_bdle(azx_dev, dmab, runtime);
+}
EXPORT_SYMBOL_GPL(snd_hdac_stream_setup_periods);
/**
@@ -923,7 +939,6 @@ int snd_hdac_dsp_prepare(struct hdac_stream *azx_dev, unsigned int format,
unsigned int byte_size, struct snd_dma_buffer *bufp)
{
struct hdac_bus *bus = azx_dev->bus;
- __le32 *bdl;
int err;
snd_hdac_dsp_lock(azx_dev);
@@ -943,18 +958,14 @@ int snd_hdac_dsp_prepare(struct hdac_stream *azx_dev, unsigned int format,
azx_dev->substream = NULL;
azx_dev->bufsize = byte_size;
- azx_dev->period_bytes = byte_size;
+ /* It is recommended to transfer the firmware in two or more chunks. */
+ azx_dev->period_bytes = byte_size / 2;
azx_dev->format_val = format;
+ azx_dev->no_period_wakeup = 1;
snd_hdac_stream_reset(azx_dev);
- /* reset BDL address */
- snd_hdac_stream_writel(azx_dev, SD_BDLPL, 0);
- snd_hdac_stream_writel(azx_dev, SD_BDLPU, 0);
-
- azx_dev->frags = 0;
- bdl = (__le32 *)azx_dev->bdl.area;
- err = setup_bdle(bus, bufp, azx_dev, &bdl, 0, byte_size, 0);
+ err = snd_hdac_stream_setup_bdle(azx_dev, bufp, NULL);
if (err < 0)
goto error;
diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c
index 071ba85f76bb..7034072c80d4 100644
--- a/sound/isa/sb/sb16_csp.c
+++ b/sound/isa/sb/sb16_csp.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/string_choices.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/info.h>
@@ -1157,8 +1158,8 @@ static void info_read(struct snd_info_entry *entry, struct snd_info_buffer *buff
((p->acc_rates & SNDRV_SB_CSP_RATE_44100) ? "44100Hz" : ""));
}
if (p->mode == SNDRV_SB_CSP_MODE_QSOUND) {
- snd_iprintf(buffer, "QSound decoder %sabled\n",
- p->q_enabled ? "en" : "dis");
+ snd_iprintf(buffer, "QSound decoder %s\n",
+ str_enabled_disabled(p->q_enabled));
} else {
snd_iprintf(buffer, "PCM format ID: 0x%x (%s/%s) [%s/%s] [%s/%s]\n",
p->acc_format,
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index 25f93e56cfc7..6e710dce5c60 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -1864,7 +1864,7 @@ void snd_ac97_get_name(struct snd_ac97 *ac97, unsigned int id, char *name, int m
strcat(name, " ");
strcat(name, pid->name);
if (pid->mask != 0xffffffff)
- sprintf(name + strlen(name), " rev %d", id & ~pid->mask);
+ sprintf(name + strlen(name), " rev %u", id & ~pid->mask);
if (ac97 && pid->patch) {
if ((modem && (pid->flags & AC97_MODEM_PATCH)) ||
(! modem && ! (pid->flags & AC97_MODEM_PATCH)))
diff --git a/sound/pci/ac97/ac97_proc.c b/sound/pci/ac97/ac97_proc.c
index 5426f7bc9884..518834964b7b 100644
--- a/sound/pci/ac97/ac97_proc.c
+++ b/sound/pci/ac97/ac97_proc.c
@@ -161,12 +161,12 @@ static void snd_ac97_proc_read_main(struct snd_ac97 *ac97, struct snd_info_buffe
"Mic select : %s\n"
"ADC/DAC loopback : %s\n",
val & 0x8000 ? "post" : "pre",
- val & 0x4000 ? "on" : "off",
- val & 0x2000 ? "on" : "off",
- val & 0x1000 ? "on" : "off",
+ str_on_off(val & 0x4000),
+ str_on_off(val & 0x2000),
+ str_on_off(val & 0x1000),
val & 0x0200 ? "Mic" : "MIX",
val & 0x0100 ? "Mic2" : "Mic1",
- val & 0x0080 ? "on" : "off");
+ str_on_off(val & 0x0080));
if (ac97->ext_id & AC97_EI_DRA)
snd_iprintf(buffer, "Double rate slots: %s\n",
double_rate_slots[(val >> 10) & 3]);
diff --git a/sound/pci/ad1889.c b/sound/pci/ad1889.c
index 50e30704bf6f..9ed778b6b03c 100644
--- a/sound/pci/ad1889.c
+++ b/sound/pci/ad1889.c
@@ -626,7 +626,7 @@ snd_ad1889_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffe
reg = ad1889_readw(chip, AD_DS_WSMC);
snd_iprintf(buffer, "Wave output: %s\n",
- (reg & AD_DS_WSMC_WAEN) ? "enabled" : "disabled");
+ str_enabled_disabled(reg & AD_DS_WSMC_WAEN));
snd_iprintf(buffer, "Wave Channels: %s\n",
(reg & AD_DS_WSMC_WAST) ? "stereo" : "mono");
snd_iprintf(buffer, "Wave Quality: %d-bit linear\n",
@@ -642,7 +642,7 @@ snd_ad1889_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffe
snd_iprintf(buffer, "Synthesis output: %s\n",
- reg & AD_DS_WSMC_SYEN ? "enabled" : "disabled");
+ str_enabled_disabled(reg & AD_DS_WSMC_SYEN));
/* SYRQ is at offset 4 */
tmp = (reg & AD_DS_WSMC_SYRQ) ?
@@ -654,7 +654,7 @@ snd_ad1889_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffe
reg = ad1889_readw(chip, AD_DS_RAMC);
snd_iprintf(buffer, "ADC input: %s\n",
- (reg & AD_DS_RAMC_ADEN) ? "enabled" : "disabled");
+ str_enabled_disabled(reg & AD_DS_RAMC_ADEN));
snd_iprintf(buffer, "ADC Channels: %s\n",
(reg & AD_DS_RAMC_ADST) ? "stereo" : "mono");
snd_iprintf(buffer, "ADC Quality: %d-bit linear\n",
@@ -669,7 +669,7 @@ snd_ad1889_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffe
(reg & AD_DS_RAMC_ADST) ? "stereo" : "mono");
snd_iprintf(buffer, "Resampler input: %s\n",
- reg & AD_DS_RAMC_REEN ? "enabled" : "disabled");
+ str_enabled_disabled(reg & AD_DS_RAMC_REEN));
/* RERQ is at offset 12 */
tmp = (reg & AD_DS_RAMC_RERQ) ?
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
index e3cac73517d6..cb8593c376ee 100644
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -3084,7 +3084,7 @@ static int snd_cmipci_create(struct snd_card *card, struct pci_dev *pci,
}
}
}
- sprintf(card->shortname, "C-Media CMI%d", val);
+ sprintf(card->shortname, "C-Media CMI%u", val);
if (cm->chip_version < 68)
scnprintf(modelstr, sizeof(modelstr),
" (model %d)", cm->chip_version);
diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c
index 83aaf9441ef3..9993b02d2968 100644
--- a/sound/pci/ctxfi/ctdaio.c
+++ b/sound/pci/ctxfi/ctdaio.c
@@ -211,52 +211,30 @@ static int dao_set_right_input(struct dao *dao, struct rsc *input)
return 0;
}
-static int dao_clear_left_input(struct dao *dao)
+static int dao_clear_input(struct dao *dao, unsigned int start, unsigned int end)
{
- struct imapper *entry;
- struct daio *daio = &dao->daio;
- int i;
+ unsigned int i;
- if (!dao->imappers[0])
+ if (!dao->imappers[start])
return 0;
-
- entry = dao->imappers[0];
- dao->mgr->imap_delete(dao->mgr, entry);
- /* Program conjugate resources */
- for (i = 1; i < daio->rscl.msr; i++) {
- entry = dao->imappers[i];
- dao->mgr->imap_delete(dao->mgr, entry);
+ for (i = start; i < end; i++) {
+ dao->mgr->imap_delete(dao->mgr, dao->imappers[i]);
dao->imappers[i] = NULL;
}
- kfree(dao->imappers[0]);
- dao->imappers[0] = NULL;
-
return 0;
}
-static int dao_clear_right_input(struct dao *dao)
-{
- struct imapper *entry;
- struct daio *daio = &dao->daio;
- int i;
- if (!dao->imappers[daio->rscl.msr])
- return 0;
-
- entry = dao->imappers[daio->rscl.msr];
- dao->mgr->imap_delete(dao->mgr, entry);
- /* Program conjugate resources */
- for (i = 1; i < daio->rscr.msr; i++) {
- entry = dao->imappers[daio->rscl.msr + i];
- dao->mgr->imap_delete(dao->mgr, entry);
- dao->imappers[daio->rscl.msr + i] = NULL;
- }
-
- kfree(dao->imappers[daio->rscl.msr]);
- dao->imappers[daio->rscl.msr] = NULL;
+static int dao_clear_left_input(struct dao *dao)
+{
+ return dao_clear_input(dao, 0, dao->daio.rscl.msr);
+}
- return 0;
+static int dao_clear_right_input(struct dao *dao)
+{
+ return dao_clear_input(dao, dao->daio.rscl.msr,
+ dao->daio.rscl.msr + dao->daio.rscr.msr);
}
static const struct dao_rsc_ops dao_ops = {
diff --git a/sound/pci/emu10k1/emuproc.c b/sound/pci/emu10k1/emuproc.c
index 737c28d31b41..bd4734dc04cd 100644
--- a/sound/pci/emu10k1/emuproc.c
+++ b/sound/pci/emu10k1/emuproc.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/init.h>
+#include <linux/string_choices.h>
#include <sound/core.h>
#include <sound/emu10k1.h>
#include "p16v.h"
@@ -32,9 +33,9 @@ static void snd_emu10k1_proc_spdif_status(struct snd_emu10k1 * emu,
snd_iprintf(buffer, "\n%s\n", title);
if (status != 0xffffffff) {
- snd_iprintf(buffer, "Professional Mode : %s\n", (status & SPCS_PROFESSIONAL) ? "yes" : "no");
- snd_iprintf(buffer, "Not Audio Data : %s\n", (status & SPCS_NOTAUDIODATA) ? "yes" : "no");
- snd_iprintf(buffer, "Copyright : %s\n", (status & SPCS_COPYRIGHT) ? "yes" : "no");
+ snd_iprintf(buffer, "Professional Mode : %s\n", str_yes_no(status & SPCS_PROFESSIONAL));
+ snd_iprintf(buffer, "Not Audio Data : %s\n", str_yes_no(status & SPCS_NOTAUDIODATA));
+ snd_iprintf(buffer, "Copyright : %s\n", str_yes_no(status & SPCS_COPYRIGHT));
snd_iprintf(buffer, "Emphasis : %s\n", emphasis[(status & SPCS_EMPHASISMASK) >> 3]);
snd_iprintf(buffer, "Mode : %i\n", (status & SPCS_MODEMASK) >> 6);
snd_iprintf(buffer, "Category Code : 0x%x\n", (status & SPCS_CATEGORYCODEMASK) >> 8);
@@ -46,9 +47,9 @@ static void snd_emu10k1_proc_spdif_status(struct snd_emu10k1 * emu,
if (rate_reg > 0) {
rate = snd_emu10k1_ptr_read(emu, rate_reg, 0);
- snd_iprintf(buffer, "S/PDIF Valid : %s\n", rate & SRCS_SPDIFVALID ? "on" : "off");
- snd_iprintf(buffer, "S/PDIF Locked : %s\n", rate & SRCS_SPDIFLOCKED ? "on" : "off");
- snd_iprintf(buffer, "Rate Locked : %s\n", rate & SRCS_RATELOCKED ? "on" : "off");
+ snd_iprintf(buffer, "S/PDIF Valid : %s\n", str_on_off(rate & SRCS_SPDIFVALID));
+ snd_iprintf(buffer, "S/PDIF Locked : %s\n", str_on_off(rate & SRCS_SPDIFLOCKED));
+ snd_iprintf(buffer, "Rate Locked : %s\n", str_on_off(rate & SRCS_RATELOCKED));
/* From ((Rate * 48000 ) / 262144); */
snd_iprintf(buffer, "Estimated Sample Rate : %d\n", ((rate & 0xFFFFF ) * 375) >> 11);
}
@@ -208,7 +209,7 @@ static void snd_emu10k1_proc_spdif_read(struct snd_info_entry *entry,
#if 0
val = snd_emu10k1_ptr_read(emu, ZVSRCS, 0);
snd_iprintf(buffer, "\nZoomed Video\n");
- snd_iprintf(buffer, "Rate Locked : %s\n", val & SRCS_RATELOCKED ? "on" : "off");
+ snd_iprintf(buffer, "Rate Locked : %s\n", str_on_off(val & SRCS_RATELOCKED));
snd_iprintf(buffer, "Estimated Sample Rate : 0x%x\n", val & SRCS_ESTSAMPLERATE);
#endif
}
diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c
index 18928b905939..71c89e4e3090 100644
--- a/sound/pci/ens1370.c
+++ b/sound/pci/ens1370.c
@@ -1850,12 +1850,12 @@ static void snd_ensoniq_proc_read(struct snd_info_entry *entry,
snd_iprintf(buffer, "Ensoniq AudioPCI " CHIP_NAME "\n\n");
snd_iprintf(buffer, "Joystick enable : %s\n",
- ensoniq->ctrl & ES_JYSTK_EN ? "on" : "off");
+ str_on_off(ensoniq->ctrl & ES_JYSTK_EN));
#ifdef CHIP1370
snd_iprintf(buffer, "MIC +5V bias : %s\n",
- ensoniq->ctrl & ES_1370_XCTL1 ? "on" : "off");
+ str_on_off(ensoniq->ctrl & ES_1370_XCTL1));
snd_iprintf(buffer, "Line In to AOUT : %s\n",
- ensoniq->ctrl & ES_1370_XCTL0 ? "on" : "off");
+ str_on_off(ensoniq->ctrl & ES_1370_XCTL0));
#else
snd_iprintf(buffer, "Joystick port : 0x%x\n",
(ES_1371_JOY_ASELI(ensoniq->ctrl) * 8) + 0x200);
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index 68f1eee9e5c9..e393578cbe68 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -206,6 +206,20 @@ config SND_HDA_SCODEC_TAS2781_I2C
comment "Set to Y if you want auto-loading the side codec driver"
depends on SND_HDA=y && SND_HDA_SCODEC_TAS2781_I2C=m
+config SND_HDA_SCODEC_TAS2781_SPI
+ tristate "Build TAS2781 HD-audio side codec support for SPI Bus"
+ depends on SPI_MASTER
+ depends on ACPI
+ depends on EFI
+ depends on SND_SOC
+ select CRC32
+ help
+ Say Y or M here to include TAS2781 SPI HD-audio side codec support
+ in snd-hda-intel driver, such as ALC287.
+
+comment "Set to Y if you want auto-loading the side codec driver"
+ depends on SND_HDA=y && SND_HDA_SCODEC_TAS2781_SPI=m
+
config SND_HDA_CODEC_REALTEK
tristate "Build Realtek HD-audio codec support"
select SND_HDA_GENERIC
diff --git a/sound/pci/hda/Makefile b/sound/pci/hda/Makefile
index 80210f845df2..210c406dfbc5 100644
--- a/sound/pci/hda/Makefile
+++ b/sound/pci/hda/Makefile
@@ -40,6 +40,7 @@ snd-hda-scodec-cs35l56-spi-y := cs35l56_hda_spi.o
snd-hda-cs-dsp-ctls-y := hda_cs_dsp_ctl.o
snd-hda-scodec-component-y := hda_component.o
snd-hda-scodec-tas2781-i2c-y := tas2781_hda_i2c.o
+snd-hda-scodec-tas2781-spi-y := tas2781_hda_spi.o tas2781_spi_fwlib.o
# common driver
obj-$(CONFIG_SND_HDA) := snd-hda-codec.o
@@ -72,6 +73,7 @@ obj-$(CONFIG_SND_HDA_SCODEC_CS35L56_SPI) += snd-hda-scodec-cs35l56-spi.o
obj-$(CONFIG_SND_HDA_CS_DSP_CONTROLS) += snd-hda-cs-dsp-ctls.o
obj-$(CONFIG_SND_HDA_SCODEC_COMPONENT) += snd-hda-scodec-component.o
obj-$(CONFIG_SND_HDA_SCODEC_TAS2781_I2C) += snd-hda-scodec-tas2781-i2c.o
+obj-$(CONFIG_SND_HDA_SCODEC_TAS2781_SPI) += snd-hda-scodec-tas2781-spi.o
# this must be the last entry after codec drivers;
# otherwise the codec patches won't be hooked before the PCI probe
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index 84393f4f429d..8923813ce424 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -80,7 +80,11 @@ static int compare_input_type(const void *ap, const void *bp)
/* In case one has boost and the other one has not,
pick the one with boost first. */
- return (int)(b->has_boost_on_pin - a->has_boost_on_pin);
+ if (a->has_boost_on_pin != b->has_boost_on_pin)
+ return (int)(b->has_boost_on_pin - a->has_boost_on_pin);
+
+ /* Keep the original order */
+ return a->order - b->order;
}
/* Reorder the surround channels
@@ -400,6 +404,8 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
reorder_outputs(cfg->speaker_outs, cfg->speaker_pins);
/* sort inputs in the order of AUTO_PIN_* type */
+ for (i = 0; i < cfg->num_inputs; i++)
+ cfg->inputs[i].order = i;
sort(cfg->inputs, cfg->num_inputs, sizeof(cfg->inputs[0]),
compare_input_type, NULL);
diff --git a/sound/pci/hda/hda_auto_parser.h b/sound/pci/hda/hda_auto_parser.h
index 579b11beac71..87af3d8c02f7 100644
--- a/sound/pci/hda/hda_auto_parser.h
+++ b/sound/pci/hda/hda_auto_parser.h
@@ -37,6 +37,7 @@ struct auto_pin_cfg_item {
unsigned int is_headset_mic:1;
unsigned int is_headphone_mic:1; /* Mic-only in headphone jack */
unsigned int has_boost_on_pin:1;
+ int order;
};
struct auto_pin_cfg;
diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
index 727f39acedfc..9325e5c3cbe6 100644
--- a/sound/pci/hda/hda_hwdep.c
+++ b/sound/pci/hda/hda_hwdep.c
@@ -84,10 +84,8 @@ static int hda_hwdep_ioctl_compat(struct snd_hwdep *hw, struct file *file,
static int hda_hwdep_open(struct snd_hwdep *hw, struct file *file)
{
-#ifndef CONFIG_SND_DEBUG_VERBOSE
if (!capable(CAP_SYS_RAWIO))
return -EACCES;
-#endif
return 0;
}
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 4a62440adfaf..7d7f9aac50a9 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2738,9 +2738,9 @@ static const struct pci_device_id azx_ids[] = {
{ PCI_VDEVICE(ZHAOXIN, 0x3288), .driver_data = AZX_DRIVER_ZHAOXIN },
/* Loongson HDAudio*/
{ PCI_VDEVICE(LOONGSON, PCI_DEVICE_ID_LOONGSON_HDA),
- .driver_data = AZX_DRIVER_LOONGSON },
+ .driver_data = AZX_DRIVER_LOONGSON | AZX_DCAPS_NO_TCSEL },
{ PCI_VDEVICE(LOONGSON, PCI_DEVICE_ID_LOONGSON_HDMI),
- .driver_data = AZX_DRIVER_LOONGSON },
+ .driver_data = AZX_DRIVER_LOONGSON | AZX_DCAPS_NO_TCSEL },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, azx_ids);
diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
index 265fd4737893..140e24bf4d7f 100644
--- a/sound/pci/hda/hda_sysfs.c
+++ b/sound/pci/hda/hda_sysfs.c
@@ -648,7 +648,7 @@ static const struct hda_patch_item patch_items[NUM_LINE_MODES] = {
},
};
-/* check the line starting with '[' -- change the parser mode accodingly */
+/* check the line starting with '[' -- change the parser mode accordingly */
static int parse_line_mode(char *buf, struct hda_bus *bus)
{
int i;
diff --git a/sound/pci/hda/ideapad_hotkey_led_helper.c b/sound/pci/hda/ideapad_hotkey_led_helper.c
new file mode 100644
index 000000000000..c10d97964d49
--- /dev/null
+++ b/sound/pci/hda/ideapad_hotkey_led_helper.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Ideapad helper functions for Lenovo Ideapad LED control,
+ * It should be included from codec driver.
+ */
+
+#if IS_ENABLED(CONFIG_IDEAPAD_LAPTOP)
+
+#include <linux/acpi.h>
+#include <linux/leds.h>
+
+static bool is_ideapad(struct hda_codec *codec)
+{
+ return (codec->core.subsystem_id >> 16 == 0x17aa) &&
+ (acpi_dev_found("LHK2019") || acpi_dev_found("VPC2004"));
+}
+
+static void hda_fixup_ideapad_acpi(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ if (!is_ideapad(codec))
+ return;
+ snd_hda_gen_add_mute_led_cdev(codec, NULL);
+ snd_hda_gen_add_micmute_led_cdev(codec, NULL);
+ }
+}
+
+#else /* CONFIG_IDEAPAD_LAPTOP */
+
+static void hda_fixup_ideapad_acpi(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+}
+
+#endif /* CONFIG_IDEAPAD_LAPTOP */
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 538c37a78a56..4985e72b9094 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -291,6 +291,7 @@ enum {
CXT_FIXUP_GPIO1,
CXT_FIXUP_ASPIRE_DMIC,
CXT_FIXUP_THINKPAD_ACPI,
+ CXT_FIXUP_LENOVO_XPAD_ACPI,
CXT_FIXUP_OLPC_XO,
CXT_FIXUP_CAP_MIX_AMP,
CXT_FIXUP_TOSHIBA_P105,
@@ -313,6 +314,9 @@ enum {
/* for hda_fixup_thinkpad_acpi() */
#include "thinkpad_helper.c"
+/* for hda_fixup_ideapad_acpi() */
+#include "ideapad_hotkey_led_helper.c"
+
static void cxt_fixup_stereo_dmic(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -928,6 +932,12 @@ static const struct hda_fixup cxt_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = hda_fixup_thinkpad_acpi,
},
+ [CXT_FIXUP_LENOVO_XPAD_ACPI] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = hda_fixup_ideapad_acpi,
+ .chained = true,
+ .chain_id = CXT_FIXUP_THINKPAD_ACPI,
+ },
[CXT_FIXUP_OLPC_XO] = {
.type = HDA_FIXUP_FUNC,
.v.func = cxt_fixup_olpc_xo,
@@ -1119,7 +1129,7 @@ static const struct hda_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
- SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
+ SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad/Ideapad", CXT_FIXUP_LENOVO_XPAD_ACPI),
SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004),
SND_PCI_QUIRK(0x1c06, 0x2012, "Lemote A1205", CXT_PINCFG_LEMOTE_A1205),
HDA_CODEC_QUIRK(0x2782, 0x12c3, "Sirius Gen1", CXT_PINCFG_TOP_SPEAKER),
@@ -1133,6 +1143,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
{ .id = CXT_FIXUP_HEADPHONE_MIC_PIN, .name = "headphone-mic-pin" },
{ .id = CXT_PINCFG_LENOVO_TP410, .name = "tp410" },
{ .id = CXT_FIXUP_THINKPAD_ACPI, .name = "thinkpad" },
+ { .id = CXT_FIXUP_LENOVO_XPAD_ACPI, .name = "thinkpad-ideapad" },
{ .id = CXT_PINCFG_LEMOTE_A1004, .name = "lemote-a1004" },
{ .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" },
{ .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" },
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index ad66378d7321..8192be394d0d 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -890,9 +890,7 @@ static void alc_ssid_check(struct hda_codec *codec, const hda_nid_t *ports)
}
}
-/*
- */
-
+/* inverted digital-mic */
static void alc_fixup_inv_dmic(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -5902,7 +5900,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
}
codec_dbg(codec, "Headset jack detected iPhone-style headset: %s\n",
- is_ctia ? "yes" : "no");
+ str_yes_no(is_ctia));
spec->current_headset_type = is_ctia ? ALC_HEADSET_TYPE_CTIA : ALC_HEADSET_TYPE_OMTP;
}
@@ -6934,6 +6932,15 @@ static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
hda_fixup_thinkpad_acpi(codec, fix, action);
}
+/* for hda_fixup_ideapad_acpi() */
+#include "ideapad_hotkey_led_helper.c"
+
+static void alc_fixup_ideapad_acpi(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ hda_fixup_ideapad_acpi(codec, fix, action);
+}
+
/* Fixup for Lenovo Legion 15IMHg05 speaker output on headset removal. */
static void alc287_fixup_legion_15imhg05_speakers(struct hda_codec *codec,
const struct hda_fixup *fix,
@@ -7128,6 +7135,11 @@ static void tas2781_fixup_i2c(struct hda_codec *cdc,
comp_generic_fixup(cdc, action, "i2c", "TIAS2781", "-%s:00", 1);
}
+static void tas2781_fixup_spi(struct hda_codec *cdc, const struct hda_fixup *fix, int action)
+{
+ comp_generic_fixup(cdc, action, "spi", "TXNW2781", "-%s:00-tas2781-hda.%d", 2);
+}
+
static void yoga7_14arb7_fixup_i2c(struct hda_codec *cdc,
const struct hda_fixup *fix, int action)
{
@@ -7485,6 +7497,16 @@ static void alc287_fixup_lenovo_thinkpad_with_alc1318(struct hda_codec *codec,
spec->gen.pcm_playback_hook = alc287_alc1318_playback_pcm_hook;
}
+/*
+ * Clear COEF 0x0d (PCBEEP passthrough) bit 0x40 where BIOS sets it wrongly
+ * at PM resume
+ */
+static void alc283_fixup_dell_hp_resume(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ if (action == HDA_FIXUP_ACT_INIT)
+ alc_write_coef_idx(codec, 0xd, 0x2800);
+}
enum {
ALC269_FIXUP_GPIO2,
@@ -7556,6 +7578,7 @@ enum {
ALC290_FIXUP_SUBWOOFER,
ALC290_FIXUP_SUBWOOFER_HSJACK,
ALC269_FIXUP_THINKPAD_ACPI,
+ ALC269_FIXUP_LENOVO_XPAD_ACPI,
ALC269_FIXUP_DMIC_THINKPAD_ACPI,
ALC269VB_FIXUP_INFINIX_ZERO_BOOK_13,
ALC269VC_FIXUP_INFINIX_Y4_MAX,
@@ -7762,6 +7785,7 @@ enum {
ALC236_FIXUP_DELL_DUAL_CODECS,
ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI,
ALC287_FIXUP_TAS2781_I2C,
+ ALC245_FIXUP_TAS2781_SPI_2,
ALC287_FIXUP_YOGA7_14ARB7_I2C,
ALC245_FIXUP_HP_MUTE_LED_COEFBIT,
ALC245_FIXUP_HP_X360_MUTE_LEDS,
@@ -7785,6 +7809,7 @@ enum {
ALC269_FIXUP_VAIO_VJFH52_MIC_NO_PRESENCE,
ALC233_FIXUP_MEDION_MTL_SPK,
ALC294_FIXUP_BASS_SPEAKER_15,
+ ALC283_FIXUP_DELL_HP_RESUME,
};
/* A special fixup for Lenovo C940 and Yoga Duet 7;
@@ -8327,6 +8352,12 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC269_FIXUP_SKU_IGNORE,
},
+ [ALC269_FIXUP_LENOVO_XPAD_ACPI] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_ideapad_acpi,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
+ },
[ALC269_FIXUP_DMIC_THINKPAD_ACPI] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_inv_dmic,
@@ -9986,6 +10017,12 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC285_FIXUP_THINKPAD_HEADSET_JACK,
},
+ [ALC245_FIXUP_TAS2781_SPI_2] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = tas2781_fixup_spi,
+ .chained = true,
+ .chain_id = ALC285_FIXUP_HP_GPIO_LED,
+ },
[ALC287_FIXUP_YOGA7_14ARB7_I2C] = {
.type = HDA_FIXUP_FUNC,
.v.func = yoga7_14arb7_fixup_i2c,
@@ -10117,6 +10154,10 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc294_fixup_bass_speaker_15,
},
+ [ALC283_FIXUP_DELL_HP_RESUME] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc283_fixup_dell_hp_resume,
+ },
};
static const struct hda_quirk alc269_fixup_tbl[] = {
@@ -10158,6 +10199,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1025, 0x1360, "Acer Aspire A115", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x141f, "Acer Spin SP513-54N", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x142b, "Acer Swift SF314-42", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
@@ -10176,6 +10218,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x05f4, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0604, "Dell Venue 11 Pro 7130", ALC283_FIXUP_DELL_HP_RESUME),
SND_PCI_QUIRK(0x1028, 0x0615, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
SND_PCI_QUIRK(0x1028, 0x062c, "Dell Latitude E5550", ALC292_FIXUP_DELL_E7X),
@@ -10388,6 +10431,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x887a, "HP Laptop 15s-eq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ SND_PCI_QUIRK(0x103c, 0x887c, "HP Laptop 14s-fq1xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
SND_PCI_QUIRK(0x103c, 0x888a, "HP ENVY x360 Convertible 15-eu0xxx", ALC245_FIXUP_HP_X360_MUTE_LEDS),
SND_PCI_QUIRK(0x103c, 0x888d, "HP ZBook Power 15.6 inch G8 Mobile Workstation PC", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8895, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED),
@@ -10548,6 +10592,8 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8d84, "HP EliteBook X G1i", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8d91, "HP ZBook Firefly 14 G12", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8d92, "HP ZBook Firefly 16 G12", ALC285_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8de8, "HP Gemtree", ALC245_FIXUP_TAS2781_SPI_2),
+ SND_PCI_QUIRK(0x103c, 0x8de9, "HP Gemtree", ALC245_FIXUP_TAS2781_SPI_2),
SND_PCI_QUIRK(0x103c, 0x8e18, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8e19, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8e1a, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED),
@@ -10888,7 +10934,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3869, "Lenovo Yoga7 14IAL7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
HDA_CODEC_QUIRK(0x17aa, 0x386e, "Legion Y9000X 2022 IAH7", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x386e, "Yoga Pro 7 14ARP8", ALC285_FIXUP_SPEAKER2_TO_DAC1),
- HDA_CODEC_QUIRK(0x17aa, 0x386f, "Legion Pro 7 16ARX8H", ALC287_FIXUP_TAS2781_I2C),
+ HDA_CODEC_QUIRK(0x17aa, 0x38a8, "Legion Pro 7 16ARX8H", ALC287_FIXUP_TAS2781_I2C), /* this must match before PCI SSID 17aa:386f below */
SND_PCI_QUIRK(0x17aa, 0x386f, "Legion Pro 7i 16IAX7", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x3870, "Lenovo Yoga 7 14ARB7", ALC287_FIXUP_YOGA7_14ARB7_I2C),
SND_PCI_QUIRK(0x17aa, 0x3877, "Lenovo Legion 7 Slim 16ARHA7", ALC287_FIXUP_CS35L41_I2C_2),
@@ -10963,6 +11009,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
SND_PCI_QUIRK(0x17aa, 0x9e56, "Lenovo ZhaoYang CF4620Z", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1849, 0x0269, "Positivo Master C6400", ALC269VB_FIXUP_ASUS_ZENBOOK),
SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK),
SND_PCI_QUIRK(0x1849, 0xa233, "Positivo Master C6300", ALC269_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1854, 0x0440, "LG CQ6", ALC256_FIXUP_HEADPHONE_AMP_VOL),
@@ -11069,7 +11116,7 @@ static const struct hda_quirk alc269_fixup_vendor_tbl[] = {
SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
SND_PCI_QUIRK_VENDOR(0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
- SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", ALC269_FIXUP_THINKPAD_ACPI),
+ SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo XPAD", ALC269_FIXUP_LENOVO_XPAD_ACPI),
SND_PCI_QUIRK_VENDOR(0x19e5, "Huawei Matebook", ALC255_FIXUP_MIC_MUTE_LED),
{}
};
@@ -11134,6 +11181,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC290_FIXUP_MONO_SPEAKERS_HSJACK, .name = "mono-speakers"},
{.id = ALC290_FIXUP_SUBWOOFER_HSJACK, .name = "alc290-subwoofer"},
{.id = ALC269_FIXUP_THINKPAD_ACPI, .name = "thinkpad"},
+ {.id = ALC269_FIXUP_LENOVO_XPAD_ACPI, .name = "lenovo-xpad-led"},
{.id = ALC269_FIXUP_DMIC_THINKPAD_ACPI, .name = "dmic-thinkpad"},
{.id = ALC255_FIXUP_ACER_MIC_NO_PRESENCE, .name = "alc255-acer"},
{.id = ALC255_FIXUP_ASUS_MIC_NO_PRESENCE, .name = "alc255-asus"},
diff --git a/sound/pci/hda/tas2781-spi.h b/sound/pci/hda/tas2781-spi.h
new file mode 100644
index 000000000000..ecfc3c8bb821
--- /dev/null
+++ b/sound/pci/hda/tas2781-spi.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+//
+// ALSA SoC Texas Instruments TAS2781 Audio Smart Amplifier
+//
+// Copyright (C) 2024 Texas Instruments Incorporated
+// https://www.ti.com
+//
+// The TAS2781 driver implements a flexible and configurable
+// algo coefficient setting for TAS2781 chips.
+//
+// Author: Baojun Xu <baojun.xu@ti.com>
+//
+
+#ifndef __TAS2781_SPI_H__
+#define __TAS2781_SPI_H__
+
+#define TASDEVICE_RATES \
+ (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | \
+ SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_88200)
+
+#define TASDEVICE_FORMATS \
+ (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+#define TASDEVICE_MAX_BOOK_NUM 256
+#define TASDEVICE_MAX_PAGE 256
+
+#define TASDEVICE_MAX_SIZE (TASDEVICE_MAX_BOOK_NUM * TASDEVICE_MAX_PAGE)
+
+/* PAGE Control Register (available in page0 of each book) */
+#define TASDEVICE_PAGE_SELECT 0x00
+#define TASDEVICE_BOOKCTL_PAGE 0x00
+#define TASDEVICE_BOOKCTL_REG GENMASK(7, 1)
+#define TASDEVICE_BOOK_ID(reg) (((reg) & GENMASK(24, 16)) >> 16)
+#define TASDEVICE_PAGE_ID(reg) (((reg) & GENMASK(15, 8)) >> 8)
+#define TASDEVICE_REG_ID(reg) (((reg) & GENMASK(7, 1)) >> 1)
+#define TASDEVICE_PAGE_REG(reg) ((reg) & GENMASK(15, 1))
+#define TASDEVICE_REG(book, page, reg) \
+ (((book) << 16) | ((page) << 8) | ((reg) << 1))
+
+/* Software Reset */
+#define TAS2781_REG_SWRESET TASDEVICE_REG(0x0, 0x0, 0x01)
+#define TAS2781_REG_SWRESET_RESET BIT(0)
+
+/* System Reset Check Register */
+#define TAS2781_REG_CLK_CONFIG TASDEVICE_REG(0x0, 0x0, 0x5c)
+#define TAS2781_REG_CLK_CONFIG_RESET (0x19)
+#define TAS2781_PRE_POST_RESET_CFG 3
+
+/* Block Checksum */
+#define TASDEVICE_CHECKSUM TASDEVICE_REG(0x0, 0x0, 0x7e)
+
+/* Volume control */
+#define TAS2781_DVC_LVL TASDEVICE_REG(0x0, 0x0, 0x1a)
+#define TAS2781_AMP_LEVEL TASDEVICE_REG(0x0, 0x0, 0x03)
+#define TAS2781_AMP_LEVEL_MASK GENMASK(5, 1)
+
+#define TASDEVICE_CMD_SING_W 0x1
+#define TASDEVICE_CMD_BURST 0x2
+#define TASDEVICE_CMD_DELAY 0x3
+#define TASDEVICE_CMD_FIELD_W 0x4
+
+#define TAS2781_SPI_MAX_FREQ (4 * HZ_PER_MHZ)
+
+#define TASDEVICE_CRC8_POLYNOMIAL 0x4d
+#define TASDEVICE_SPEAKER_CALIBRATION_SIZE 20
+
+/* Flag of calibration registers address. */
+#define TASDEVICE_CALIBRATION_REG_ADDRESS BIT(7)
+
+#define TASDEVICE_CALIBRATION_DATA_NAME L"CALI_DATA"
+#define TASDEVICE_CALIBRATION_DATA_SIZE 256
+
+enum calib_data {
+ R0_VAL = 0,
+ INV_R0,
+ R0LOW,
+ POWER,
+ TLIM,
+ CALIB_MAX
+};
+
+struct tasdevice_priv {
+ struct tasdevice_fw *cali_data_fmw;
+ struct tasdevice_rca rcabin;
+ struct tasdevice_fw *fmw;
+ struct gpio_desc *reset;
+ struct mutex codec_lock;
+ struct regmap *regmap;
+ struct device *dev;
+ struct tm tm;
+
+ unsigned char crc8_lkp_tbl[CRC8_TABLE_SIZE];
+ unsigned char coef_binaryname[64];
+ unsigned char rca_binaryname[64];
+ unsigned char dev_name[32];
+
+ bool force_fwload_status;
+ bool playback_started;
+ bool is_loading;
+ bool is_loaderr;
+ unsigned int cali_reg_array[CALIB_MAX];
+ unsigned int cali_data[CALIB_MAX];
+ unsigned int err_code;
+ void *codec;
+ int cur_book;
+ int cur_prog;
+ int cur_conf;
+ int fw_state;
+ int index;
+ int irq;
+
+ int (*fw_parse_variable_header)(struct tasdevice_priv *tas_priv,
+ const struct firmware *fmw,
+ int offset);
+ int (*fw_parse_program_data)(struct tasdevice_priv *tas_priv,
+ struct tasdevice_fw *tas_fmw,
+ const struct firmware *fmw, int offset);
+ int (*fw_parse_configuration_data)(struct tasdevice_priv *tas_priv,
+ struct tasdevice_fw *tas_fmw,
+ const struct firmware *fmw,
+ int offset);
+ int (*tasdevice_load_block)(struct tasdevice_priv *tas_priv,
+ struct tasdev_blk *block);
+
+ int (*save_calibration)(struct tasdevice_priv *tas_priv);
+ void (*apply_calibration)(struct tasdevice_priv *tas_priv);
+};
+
+int tasdevice_spi_dev_read(struct tasdevice_priv *tas_priv,
+ unsigned int reg, unsigned int *value);
+int tasdevice_spi_dev_write(struct tasdevice_priv *tas_priv,
+ unsigned int reg, unsigned int value);
+int tasdevice_spi_dev_bulk_write(struct tasdevice_priv *tas_priv,
+ unsigned int reg, unsigned char *p_data,
+ unsigned int n_length);
+int tasdevice_spi_dev_bulk_read(struct tasdevice_priv *tas_priv,
+ unsigned int reg, unsigned char *p_data,
+ unsigned int n_length);
+int tasdevice_spi_dev_update_bits(struct tasdevice_priv *tasdevice,
+ unsigned int reg, unsigned int mask,
+ unsigned int value);
+
+void tasdevice_spi_select_cfg_blk(void *context, int conf_no,
+ unsigned char block_type);
+void tasdevice_spi_config_info_remove(void *context);
+int tasdevice_spi_dsp_parser(void *context);
+int tasdevice_spi_rca_parser(void *context, const struct firmware *fmw);
+void tasdevice_spi_dsp_remove(void *context);
+void tasdevice_spi_calbin_remove(void *context);
+int tasdevice_spi_select_tuningprm_cfg(void *context, int prm, int cfg_no,
+ int rca_conf_no);
+int tasdevice_spi_prmg_load(void *context, int prm_no);
+int tasdevice_spi_prmg_calibdata_load(void *context, int prm_no);
+void tasdevice_spi_tuning_switch(void *context, int state);
+int tas2781_spi_load_calibration(void *context, char *file_name,
+ unsigned short i);
+#endif /* __TAS2781_SPI_H__ */
diff --git a/sound/pci/hda/tas2781_hda_spi.c b/sound/pci/hda/tas2781_hda_spi.c
new file mode 100644
index 000000000000..a42fa990e7b9
--- /dev/null
+++ b/sound/pci/hda/tas2781_hda_spi.c
@@ -0,0 +1,1265 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// TAS2781 HDA SPI driver
+//
+// Copyright 2024 Texas Instruments, Inc.
+//
+// Author: Baojun Xu <baojun.xu@ti.com>
+
+#include <linux/acpi.h>
+#include <linux/array_size.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/crc8.h>
+#include <linux/crc32.h>
+#include <linux/efi.h>
+#include <linux/firmware.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include <sound/hda_codec.h>
+#include <sound/soc.h>
+#include <sound/tas2781-dsp.h>
+#include <sound/tlv.h>
+#include <sound/tas2781-tlv.h>
+
+#include "tas2781-spi.h"
+
+#include "hda_local.h"
+#include "hda_auto_parser.h"
+#include "hda_component.h"
+#include "hda_jack.h"
+#include "hda_generic.h"
+
+/*
+ * No standard control callbacks for SNDRV_CTL_ELEM_IFACE_CARD
+ * Define two controls, one is Volume control callbacks, the other is
+ * flag setting control callbacks.
+ */
+
+/* Volume control callbacks for tas2781 */
+#define ACARD_SINGLE_RANGE_EXT_TLV(xname, xreg, xshift, xmin, xmax, xinvert, \
+ xhandler_get, xhandler_put, tlv_array) { \
+ .iface = SNDRV_CTL_ELEM_IFACE_CARD, .name = (xname), \
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
+ SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ .tlv.p = (tlv_array), \
+ .info = snd_soc_info_volsw_range, \
+ .get = xhandler_get, .put = xhandler_put, \
+ .private_value = (unsigned long)&(struct soc_mixer_control) { \
+ .reg = xreg, .rreg = xreg, \
+ .shift = xshift, .rshift = xshift,\
+ .min = xmin, .max = xmax, .invert = xinvert, \
+ } \
+}
+
+/* Flag control callbacks for tas2781 */
+#define ACARD_SINGLE_BOOL_EXT(xname, xdata, xhandler_get, xhandler_put) { \
+ .iface = SNDRV_CTL_ELEM_IFACE_CARD, \
+ .name = xname, \
+ .info = snd_ctl_boolean_mono_info, \
+ .get = xhandler_get, \
+ .put = xhandler_put, \
+ .private_value = xdata, \
+}
+
+struct tas2781_hda {
+ struct tasdevice_priv *priv;
+ struct acpi_device *dacpi;
+ struct snd_kcontrol *dsp_prog_ctl;
+ struct snd_kcontrol *dsp_conf_ctl;
+ struct snd_kcontrol *snd_ctls[3];
+ struct snd_kcontrol *prof_ctl;
+};
+
+static const struct regmap_range_cfg tasdevice_ranges[] = {
+ {
+ .range_min = 0,
+ .range_max = TASDEVICE_MAX_SIZE,
+ .selector_reg = TASDEVICE_PAGE_SELECT,
+ .selector_mask = GENMASK(7, 0),
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = TASDEVICE_MAX_PAGE,
+ },
+};
+
+static const struct regmap_config tasdevice_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .zero_flag_mask = true,
+ .cache_type = REGCACHE_NONE,
+ .ranges = tasdevice_ranges,
+ .num_ranges = ARRAY_SIZE(tasdevice_ranges),
+ .max_register = TASDEVICE_MAX_SIZE,
+};
+
+static int tasdevice_spi_switch_book(struct tasdevice_priv *tas_priv, int reg)
+{
+ struct regmap *map = tas_priv->regmap;
+
+ if (tas_priv->cur_book != TASDEVICE_BOOK_ID(reg)) {
+ int ret = regmap_write(map, TASDEVICE_BOOKCTL_REG,
+ TASDEVICE_BOOK_ID(reg));
+ if (ret < 0) {
+ dev_err(tas_priv->dev, "Switch Book E=%d\n", ret);
+ return ret;
+ }
+ tas_priv->cur_book = TASDEVICE_BOOK_ID(reg);
+ }
+ return 0;
+}
+
+int tasdevice_spi_dev_read(struct tasdevice_priv *tas_priv,
+ unsigned int reg,
+ unsigned int *val)
+{
+ struct regmap *map = tas_priv->regmap;
+ int ret;
+
+ ret = tasdevice_spi_switch_book(tas_priv, reg);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * In our TAS2781 SPI mode, if read from other book (not book 0),
+ * or read from page number larger than 1 in book 0, one more byte
+ * read is needed, and first byte is a dummy byte, need to be ignored.
+ */
+ if ((TASDEVICE_BOOK_ID(reg) > 0) || (TASDEVICE_PAGE_ID(reg) > 1)) {
+ unsigned char data[2];
+
+ ret = regmap_bulk_read(map, TASDEVICE_PAGE_REG(reg) | 1,
+ data, sizeof(data));
+ *val = data[1];
+ } else {
+ ret = regmap_read(map, TASDEVICE_PAGE_REG(reg) | 1, val);
+ }
+ if (ret < 0)
+ dev_err(tas_priv->dev, "%s, E=%d\n", __func__, ret);
+
+ return ret;
+}
+
+int tasdevice_spi_dev_write(struct tasdevice_priv *tas_priv,
+ unsigned int reg,
+ unsigned int value)
+{
+ struct regmap *map = tas_priv->regmap;
+ int ret;
+
+ ret = tasdevice_spi_switch_book(tas_priv, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(map, TASDEVICE_PAGE_REG(reg), value);
+ if (ret < 0)
+ dev_err(tas_priv->dev, "%s, E=%d\n", __func__, ret);
+
+ return ret;
+}
+
+int tasdevice_spi_dev_bulk_write(struct tasdevice_priv *tas_priv,
+ unsigned int reg,
+ unsigned char *data,
+ unsigned int len)
+{
+ struct regmap *map = tas_priv->regmap;
+ int ret;
+
+ ret = tasdevice_spi_switch_book(tas_priv, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_bulk_write(map, TASDEVICE_PAGE_REG(reg), data, len);
+ if (ret < 0)
+ dev_err(tas_priv->dev, "%s, E=%d\n", __func__, ret);
+
+ return ret;
+}
+
+int tasdevice_spi_dev_bulk_read(struct tasdevice_priv *tas_priv,
+ unsigned int reg,
+ unsigned char *data,
+ unsigned int len)
+{
+ struct regmap *map = tas_priv->regmap;
+ int ret;
+
+ ret = tasdevice_spi_switch_book(tas_priv, reg);
+ if (ret < 0)
+ return ret;
+
+ if (len > TASDEVICE_MAX_PAGE)
+ len = TASDEVICE_MAX_PAGE;
+ /*
+ * In our TAS2781 SPI mode, if read from other book (not book 0),
+ * or read from page number larger than 1 in book 0, one more byte
+ * read is needed, and first byte is a dummy byte, need to be ignored.
+ */
+ if ((TASDEVICE_BOOK_ID(reg) > 0) || (TASDEVICE_PAGE_ID(reg) > 1)) {
+ unsigned char buf[TASDEVICE_MAX_PAGE+1];
+
+ ret = regmap_bulk_read(map, TASDEVICE_PAGE_REG(reg) | 1, buf,
+ len + 1);
+ memcpy(data, buf + 1, len);
+ } else {
+ ret = regmap_bulk_read(map, TASDEVICE_PAGE_REG(reg) | 1, data,
+ len);
+ }
+ if (ret < 0)
+ dev_err(tas_priv->dev, "%s, E=%d\n", __func__, ret);
+
+ return ret;
+}
+
+int tasdevice_spi_dev_update_bits(struct tasdevice_priv *tas_priv,
+ unsigned int reg,
+ unsigned int mask,
+ unsigned int value)
+{
+ struct regmap *map = tas_priv->regmap;
+ int ret, val;
+
+ /*
+ * In our TAS2781 SPI mode, read/write was masked in last bit of
+ * address, it cause regmap_update_bits() not work as expected.
+ */
+ ret = tasdevice_spi_dev_read(tas_priv, reg, &val);
+ if (ret < 0) {
+ dev_err(tas_priv->dev, "%s, E=%d\n", __func__, ret);
+ return ret;
+ }
+ ret = regmap_write(map, TASDEVICE_PAGE_REG(reg),
+ (val & ~mask) | (mask & value));
+ if (ret < 0)
+ dev_err(tas_priv->dev, "%s, E=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static void tas2781_spi_reset(struct tasdevice_priv *tas_dev)
+{
+ int ret;
+
+ if (tas_dev->reset) {
+ gpiod_set_value_cansleep(tas_dev->reset, 0);
+ fsleep(800);
+ gpiod_set_value_cansleep(tas_dev->reset, 1);
+ }
+ ret = tasdevice_spi_dev_write(tas_dev, TAS2781_REG_SWRESET,
+ TAS2781_REG_SWRESET_RESET);
+ if (ret < 0)
+ dev_err(tas_dev->dev, "dev sw-reset fail, %d\n", ret);
+ fsleep(1000);
+}
+
+static int tascodec_spi_init(struct tasdevice_priv *tas_priv,
+ void *codec, struct module *module,
+ void (*cont)(const struct firmware *fw, void *context))
+{
+ int ret;
+
+ /*
+ * Codec Lock Hold to ensure that codec_probe and firmware parsing and
+ * loading do not simultaneously execute.
+ */
+ guard(mutex)(&tas_priv->codec_lock);
+
+ scnprintf(tas_priv->rca_binaryname,
+ sizeof(tas_priv->rca_binaryname), "%sRCA%d.bin",
+ tas_priv->dev_name, tas_priv->index);
+ crc8_populate_msb(tas_priv->crc8_lkp_tbl, TASDEVICE_CRC8_POLYNOMIAL);
+ tas_priv->codec = codec;
+ ret = request_firmware_nowait(module, FW_ACTION_UEVENT,
+ tas_priv->rca_binaryname, tas_priv->dev, GFP_KERNEL, tas_priv,
+ cont);
+ if (ret)
+ dev_err(tas_priv->dev, "request_firmware_nowait err:0x%08x\n",
+ ret);
+
+ return ret;
+}
+
+static void tasdevice_spi_init(struct tasdevice_priv *tas_priv)
+{
+ tas_priv->cur_prog = -1;
+ tas_priv->cur_conf = -1;
+
+ tas_priv->cur_book = -1;
+ tas_priv->cur_prog = -1;
+ tas_priv->cur_conf = -1;
+
+ /* Store default registers address for calibration data. */
+ tas_priv->cali_reg_array[0] = TASDEVICE_REG(0, 0x17, 0x74);
+ tas_priv->cali_reg_array[1] = TASDEVICE_REG(0, 0x18, 0x0c);
+ tas_priv->cali_reg_array[2] = TASDEVICE_REG(0, 0x18, 0x14);
+ tas_priv->cali_reg_array[3] = TASDEVICE_REG(0, 0x13, 0x70);
+ tas_priv->cali_reg_array[4] = TASDEVICE_REG(0, 0x18, 0x7c);
+
+ mutex_init(&tas_priv->codec_lock);
+}
+
+static int tasdevice_spi_amp_putvol(struct tasdevice_priv *tas_priv,
+ struct snd_ctl_elem_value *ucontrol,
+ struct soc_mixer_control *mc)
+{
+ unsigned int invert = mc->invert;
+ unsigned char mask;
+ int max = mc->max;
+ int val, ret;
+
+ mask = rounddown_pow_of_two(max);
+ mask <<= mc->shift;
+ val = clamp(invert ? max - ucontrol->value.integer.value[0] :
+ ucontrol->value.integer.value[0], 0, max);
+ ret = tasdevice_spi_dev_update_bits(tas_priv,
+ mc->reg, mask, (unsigned int)(val << mc->shift));
+ if (ret)
+ dev_err(tas_priv->dev, "set AMP vol error in dev %d\n",
+ tas_priv->index);
+
+ return ret;
+}
+
+static int tasdevice_spi_amp_getvol(struct tasdevice_priv *tas_priv,
+ struct snd_ctl_elem_value *ucontrol,
+ struct soc_mixer_control *mc)
+{
+ unsigned int invert = mc->invert;
+ unsigned char mask = 0;
+ int max = mc->max;
+ int ret, val;
+
+ /* Read the primary device */
+ ret = tasdevice_spi_dev_read(tas_priv, mc->reg, &val);
+ if (ret) {
+ dev_err(tas_priv->dev, "%s, get AMP vol error\n", __func__);
+ return ret;
+ }
+
+ mask = rounddown_pow_of_two(max);
+ mask <<= mc->shift;
+ val = (val & mask) >> mc->shift;
+ val = clamp(invert ? max - val : val, 0, max);
+ ucontrol->value.integer.value[0] = val;
+
+ return ret;
+}
+
+static int tasdevice_spi_digital_putvol(struct tasdevice_priv *tas_priv,
+ struct snd_ctl_elem_value *ucontrol,
+ struct soc_mixer_control *mc)
+{
+ unsigned int invert = mc->invert;
+ int max = mc->max;
+ int val, ret;
+
+ val = clamp(invert ? max - ucontrol->value.integer.value[0] :
+ ucontrol->value.integer.value[0], 0, max);
+ ret = tasdevice_spi_dev_write(tas_priv, mc->reg, (unsigned int)val);
+ if (ret)
+ dev_err(tas_priv->dev, "set digital vol err in dev %d\n",
+ tas_priv->index);
+
+ return ret;
+}
+
+static int tasdevice_spi_digital_getvol(struct tasdevice_priv *tas_priv,
+ struct snd_ctl_elem_value *ucontrol,
+ struct soc_mixer_control *mc)
+{
+ unsigned int invert = mc->invert;
+ int max = mc->max;
+ int ret, val;
+
+ /* Read the primary device as the whole */
+ ret = tasdevice_spi_dev_read(tas_priv, mc->reg, &val);
+ if (ret) {
+ dev_err(tas_priv->dev, "%s, get digital vol err\n", __func__);
+ return ret;
+ }
+
+ val = clamp(invert ? max - val : val, 0, max);
+ ucontrol->value.integer.value[0] = val;
+
+ return ret;
+}
+
+static int tas2781_read_acpi(struct tas2781_hda *tas_hda,
+ const char *hid,
+ int id)
+{
+ struct tasdevice_priv *p = tas_hda->priv;
+ struct acpi_device *adev;
+ struct device *physdev;
+ u32 values[HDA_MAX_COMPONENTS];
+ const char *property;
+ size_t nval;
+ int ret, i;
+
+ adev = acpi_dev_get_first_match_dev(hid, NULL, -1);
+ if (!adev) {
+ dev_err(p->dev, "Failed to find ACPI device: %s\n", hid);
+ return -ENODEV;
+ }
+
+ strscpy(p->dev_name, hid, sizeof(p->dev_name));
+ tas_hda->dacpi = adev;
+ physdev = get_device(acpi_get_first_physical_node(adev));
+ acpi_dev_put(adev);
+
+ property = "ti,dev-index";
+ ret = device_property_count_u32(physdev, property);
+ if (ret <= 0 || ret > ARRAY_SIZE(values)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ nval = ret;
+
+ ret = device_property_read_u32_array(physdev, property, values, nval);
+ if (ret)
+ goto err;
+
+ p->index = U8_MAX;
+ for (i = 0; i < nval; i++) {
+ if (values[i] == id) {
+ p->index = i;
+ break;
+ }
+ }
+ if (p->index == U8_MAX) {
+ dev_dbg(p->dev, "No index found in %s\n", property);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ if (p->index == 0) {
+ /* All of amps share same RESET pin. */
+ p->reset = devm_gpiod_get_index_optional(physdev, "reset",
+ p->index, GPIOD_OUT_LOW);
+ if (IS_ERR(p->reset)) {
+ ret = PTR_ERR(p->reset);
+ dev_err_probe(p->dev, ret, "Failed on reset GPIO\n");
+ goto err;
+ }
+ }
+ put_device(physdev);
+
+ return 0;
+err:
+ dev_err(p->dev, "read acpi error, ret: %d\n", ret);
+ put_device(physdev);
+ acpi_dev_put(adev);
+
+ return ret;
+}
+
+static void tas2781_hda_playback_hook(struct device *dev, int action)
+{
+ struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
+
+ if (action == HDA_GEN_PCM_ACT_OPEN) {
+ pm_runtime_get_sync(dev);
+ guard(mutex)(&tas_hda->priv->codec_lock);
+ tasdevice_spi_tuning_switch(tas_hda->priv, 0);
+ } else if (action == HDA_GEN_PCM_ACT_CLOSE) {
+ guard(mutex)(&tas_hda->priv->codec_lock);
+ tasdevice_spi_tuning_switch(tas_hda->priv, 1);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ }
+}
+
+static int tasdevice_info_profile(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = tas_priv->rcabin.ncfgs - 1;
+
+ return 0;
+}
+
+static int tasdevice_get_profile_id(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+
+ ucontrol->value.integer.value[0] = tas_priv->rcabin.profile_cfg_id;
+
+ return 0;
+}
+
+static int tasdevice_set_profile_id(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+ int max = tas_priv->rcabin.ncfgs - 1;
+ int val;
+
+ val = clamp(ucontrol->value.integer.value[0], 0, max);
+ if (tas_priv->rcabin.profile_cfg_id != val) {
+ tas_priv->rcabin.profile_cfg_id = val;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int tasdevice_info_programs(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = tas_priv->fmw->nr_programs - 1;
+
+ return 0;
+}
+
+static int tasdevice_info_config(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = tas_priv->fmw->nr_configurations - 1;
+
+ return 0;
+}
+
+static int tasdevice_program_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+
+ ucontrol->value.integer.value[0] = tas_priv->cur_prog;
+
+ return 0;
+}
+
+static int tasdevice_program_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+ int nr_program = ucontrol->value.integer.value[0];
+ int max = tas_priv->fmw->nr_programs - 1;
+ int val;
+
+ val = clamp(nr_program, 0, max);
+
+ if (tas_priv->cur_prog != val) {
+ tas_priv->cur_prog = val;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int tasdevice_config_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+
+ ucontrol->value.integer.value[0] = tas_priv->cur_conf;
+
+ return 0;
+}
+
+static int tasdevice_config_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+ int max = tas_priv->fmw->nr_configurations - 1;
+ int val;
+
+ val = clamp(ucontrol->value.integer.value[0], 0, max);
+
+ if (tas_priv->cur_conf != val) {
+ tas_priv->cur_conf = val;
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * tas2781_digital_getvol - get the volum control
+ * @kcontrol: control pointer
+ * @ucontrol: User data
+ *
+ * Customer Kcontrol for tas2781 is primarily for regmap booking, paging
+ * depends on internal regmap mechanism.
+ * tas2781 contains book and page two-level register map, especially
+ * book switching will set the register BXXP00R7F, after switching to the
+ * correct book, then leverage the mechanism for paging to access the
+ * register.
+ *
+ * Return 0 if succeeded.
+ */
+static int tas2781_digital_getvol(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+
+ return tasdevice_spi_digital_getvol(tas_priv, ucontrol, mc);
+}
+
+static int tas2781_amp_getvol(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+
+ return tasdevice_spi_amp_getvol(tas_priv, ucontrol, mc);
+}
+
+static int tas2781_digital_putvol(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+
+ /* The check of the given value is in tasdevice_digital_putvol. */
+ return tasdevice_spi_digital_putvol(tas_priv, ucontrol, mc);
+}
+
+static int tas2781_amp_putvol(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+
+ /* The check of the given value is in tasdevice_amp_putvol. */
+ return tasdevice_spi_amp_putvol(tas_priv, ucontrol, mc);
+}
+
+static int tas2781_force_fwload_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+
+ ucontrol->value.integer.value[0] = (int)tas_priv->force_fwload_status;
+ dev_dbg(tas_priv->dev, "%s : Force FWload %s\n", __func__,
+ str_on_off(tas_priv->force_fwload_status));
+
+ return 0;
+}
+
+static int tas2781_force_fwload_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+ bool change, val = (bool)ucontrol->value.integer.value[0];
+
+ if (tas_priv->force_fwload_status == val) {
+ change = false;
+ } else {
+ change = true;
+ tas_priv->force_fwload_status = val;
+ }
+ dev_dbg(tas_priv->dev, "%s : Force FWload %s\n", __func__,
+ str_on_off(tas_priv->force_fwload_status));
+
+ return change;
+}
+
+static const struct snd_kcontrol_new tas2781_snd_controls[] = {
+ ACARD_SINGLE_RANGE_EXT_TLV("Speaker Analog Gain 0", TAS2781_AMP_LEVEL,
+ 1, 0, 20, 0, tas2781_amp_getvol,
+ tas2781_amp_putvol, amp_vol_tlv),
+ ACARD_SINGLE_RANGE_EXT_TLV("Speaker Digital Gain 0", TAS2781_DVC_LVL,
+ 0, 0, 200, 1, tas2781_digital_getvol,
+ tas2781_digital_putvol, dvc_tlv),
+ ACARD_SINGLE_BOOL_EXT("Speaker Force Firmware Load 0", 0,
+ tas2781_force_fwload_get, tas2781_force_fwload_put),
+ ACARD_SINGLE_RANGE_EXT_TLV("Speaker Analog Gain 1", TAS2781_AMP_LEVEL,
+ 1, 0, 20, 0, tas2781_amp_getvol,
+ tas2781_amp_putvol, amp_vol_tlv),
+ ACARD_SINGLE_RANGE_EXT_TLV("Speaker Digital Gain 1", TAS2781_DVC_LVL,
+ 0, 0, 200, 1, tas2781_digital_getvol,
+ tas2781_digital_putvol, dvc_tlv),
+ ACARD_SINGLE_BOOL_EXT("Speaker Force Firmware Load 1", 0,
+ tas2781_force_fwload_get, tas2781_force_fwload_put),
+};
+
+static const struct snd_kcontrol_new tas2781_prof_ctrl[] = {
+{
+ .name = "Speaker Profile Id - 0",
+ .iface = SNDRV_CTL_ELEM_IFACE_CARD,
+ .info = tasdevice_info_profile,
+ .get = tasdevice_get_profile_id,
+ .put = tasdevice_set_profile_id,
+},
+{
+ .name = "Speaker Profile Id - 1",
+ .iface = SNDRV_CTL_ELEM_IFACE_CARD,
+ .info = tasdevice_info_profile,
+ .get = tasdevice_get_profile_id,
+ .put = tasdevice_set_profile_id,
+},
+};
+static const struct snd_kcontrol_new tas2781_dsp_prog_ctrl[] = {
+{
+ .name = "Speaker Program Id 0",
+ .iface = SNDRV_CTL_ELEM_IFACE_CARD,
+ .info = tasdevice_info_programs,
+ .get = tasdevice_program_get,
+ .put = tasdevice_program_put,
+},
+{
+ .name = "Speaker Program Id 1",
+ .iface = SNDRV_CTL_ELEM_IFACE_CARD,
+ .info = tasdevice_info_programs,
+ .get = tasdevice_program_get,
+ .put = tasdevice_program_put,
+},
+};
+
+static const struct snd_kcontrol_new tas2781_dsp_conf_ctrl[] = {
+{
+ .name = "Speaker Config Id 0",
+ .iface = SNDRV_CTL_ELEM_IFACE_CARD,
+ .info = tasdevice_info_config,
+ .get = tasdevice_config_get,
+ .put = tasdevice_config_put,
+},
+{
+ .name = "Speaker Config Id 1",
+ .iface = SNDRV_CTL_ELEM_IFACE_CARD,
+ .info = tasdevice_info_config,
+ .get = tasdevice_config_get,
+ .put = tasdevice_config_put,
+},
+};
+
+static void tas2781_apply_calib(struct tasdevice_priv *tas_priv)
+{
+ int i, rc;
+
+ /*
+ * If no calibration data exist in tasdevice_priv *tas_priv,
+ * calibration apply will be ignored, and use default values
+ * in firmware binary, which was loaded during firmware download.
+ */
+ if (tas_priv->cali_data[0] == 0)
+ return;
+ /*
+ * Calibration data was saved in tasdevice_priv *tas_priv as:
+ * unsigned int cali_data[CALIB_MAX];
+ * and every data (in 4 bytes) will be saved in register which in
+ * book 0, and page number in page_array[], offset was saved in
+ * rgno_array[].
+ */
+ for (i = 0; i < CALIB_MAX; i++) {
+ rc = tasdevice_spi_dev_bulk_write(tas_priv,
+ tas_priv->cali_reg_array[i],
+ (unsigned char *)&tas_priv->cali_data[i], 4);
+ if (rc < 0)
+ dev_err(tas_priv->dev,
+ "chn %d calib %d bulk_wr err = %d\n",
+ tas_priv->index, i, rc);
+ }
+}
+
+/*
+ * Update the calibration data, including speaker impedance, f0, etc,
+ * into algo. Calibrate data is done by manufacturer in the factory.
+ * These data are used by Algo for calculating the speaker temperature,
+ * speaker membrane excursion and f0 in real time during playback.
+ * Calibration data format in EFI is V2, since 2024.
+ */
+static int tas2781_save_calibration(struct tasdevice_priv *tas_priv)
+{
+ /*
+ * GUID was used for data access in BIOS, it was provided by board
+ * manufactory, like HP: "{02f9af02-7734-4233-b43d-93fe5aa35db3}"
+ */
+ efi_guid_t efi_guid =
+ EFI_GUID(0x02f9af02, 0x7734, 0x4233,
+ 0xb4, 0x3d, 0x93, 0xfe, 0x5a, 0xa3, 0x5d, 0xb3);
+ static efi_char16_t efi_name[] = TASDEVICE_CALIBRATION_DATA_NAME;
+ unsigned char data[TASDEVICE_CALIBRATION_DATA_SIZE], *buf;
+ unsigned int attr, crc, offset, *tmp_val;
+ struct tm *tm = &tas_priv->tm;
+ unsigned long total_sz = 0;
+ efi_status_t status;
+
+ tas_priv->cali_data[0] = 0;
+ status = efi.get_variable(efi_name, &efi_guid, &attr, &total_sz, data);
+ if (status == EFI_BUFFER_TOO_SMALL) {
+ if (total_sz > TASDEVICE_CALIBRATION_DATA_SIZE)
+ return -ENOMEM;
+ /* Get variable contents into buffer */
+ status = efi.get_variable(efi_name, &efi_guid, &attr,
+ &total_sz, data);
+ }
+ if (status != EFI_SUCCESS)
+ return status;
+
+ tmp_val = (unsigned int *)data;
+ if (tmp_val[0] == 2781) {
+ /*
+ * New features were added in calibrated Data V3:
+ * 1. Added calibration registers address define in
+ * a node, marked as Device id == 0x80.
+ * New features were added in calibrated Data V2:
+ * 1. Added some the fields to store the link_id and
+ * uniqie_id for multi-link solutions
+ * 2. Support flexible number of devices instead of
+ * fixed one in V1.
+ * Layout of calibrated data V2 in UEFI(total 256 bytes):
+ * ChipID (2781, 4 bytes)
+ * Device-Sum (4 bytes)
+ * TimeStamp of Calibration (4 bytes)
+ * for (i = 0; i < Device-Sum; i++) {
+ * Device #i index_info () {
+ * SDW link id (2bytes)
+ * SDW unique_id (2bytes)
+ * } // if Device number is 0x80, mean it's
+ * calibration registers address.
+ * Calibrated Data of Device #i (20 bytes)
+ * }
+ * CRC (4 bytes)
+ * Reserved (the rest)
+ */
+ crc = crc32(~0, data, (3 + tmp_val[1] * 6) * 4) ^ ~0;
+
+ if (crc != tmp_val[3 + tmp_val[1] * 6])
+ return 0;
+
+ time64_to_tm(tmp_val[2], 0, tm);
+ for (int j = 0; j < tmp_val[1]; j++) {
+ offset = j * 6 + 3;
+ if (tmp_val[offset] == tas_priv->index) {
+ for (int i = 0; i < CALIB_MAX; i++)
+ tas_priv->cali_data[i] =
+ tmp_val[offset + i + 1];
+ } else if (tmp_val[offset] ==
+ TASDEVICE_CALIBRATION_REG_ADDRESS) {
+ for (int i = 0; i < CALIB_MAX; i++) {
+ buf = &data[(offset + i + 1) * 4];
+ tas_priv->cali_reg_array[i] =
+ TASDEVICE_REG(buf[1], buf[2],
+ buf[3]);
+ }
+ }
+ tas_priv->apply_calibration(tas_priv);
+ }
+ } else {
+ /*
+ * Calibration data is in V1 format.
+ * struct cali_data {
+ * char cali_data[20];
+ * }
+ *
+ * struct {
+ * struct cali_data cali_data[4];
+ * int TimeStamp of Calibration (4 bytes)
+ * int CRC (4 bytes)
+ * } ueft;
+ */
+ crc = crc32(~0, data, 84) ^ ~0;
+ if (crc == tmp_val[21]) {
+ time64_to_tm(tmp_val[20], 0, tm);
+ for (int i = 0; i < CALIB_MAX; i++)
+ tas_priv->cali_data[i] =
+ tmp_val[tas_priv->index * 5 + i];
+ tas_priv->apply_calibration(tas_priv);
+ }
+ }
+
+ return 0;
+}
+
+static void tas2781_hda_remove_controls(struct tas2781_hda *tas_hda)
+{
+ struct hda_codec *codec = tas_hda->priv->codec;
+
+ snd_ctl_remove(codec->card, tas_hda->dsp_prog_ctl);
+
+ snd_ctl_remove(codec->card, tas_hda->dsp_conf_ctl);
+
+ for (int i = ARRAY_SIZE(tas_hda->snd_ctls) - 1; i >= 0; i--)
+ snd_ctl_remove(codec->card, tas_hda->snd_ctls[i]);
+
+ snd_ctl_remove(codec->card, tas_hda->prof_ctl);
+}
+
+static void tasdev_fw_ready(const struct firmware *fmw, void *context)
+{
+ struct tasdevice_priv *tas_priv = context;
+ struct tas2781_hda *tas_hda = dev_get_drvdata(tas_priv->dev);
+ struct hda_codec *codec = tas_priv->codec;
+ int i, j, ret;
+
+ pm_runtime_get_sync(tas_priv->dev);
+ guard(mutex)(&tas_priv->codec_lock);
+
+ ret = tasdevice_spi_rca_parser(tas_priv, fmw);
+ if (ret)
+ goto out;
+
+ /* Add control one time only. */
+ tas_hda->prof_ctl = snd_ctl_new1(&tas2781_prof_ctrl[tas_priv->index],
+ tas_priv);
+ ret = snd_ctl_add(codec->card, tas_hda->prof_ctl);
+ if (ret) {
+ dev_err(tas_priv->dev, "Failed to add KControl %s = %d\n",
+ tas2781_prof_ctrl[tas_priv->index].name, ret);
+ goto out;
+ }
+ j = tas_priv->index * ARRAY_SIZE(tas2781_snd_controls) / 2;
+ for (i = 0; i < 3; i++) {
+ tas_hda->snd_ctls[i] = snd_ctl_new1(&tas2781_snd_controls[i+j],
+ tas_priv);
+ ret = snd_ctl_add(codec->card, tas_hda->snd_ctls[i]);
+ if (ret) {
+ dev_err(tas_priv->dev,
+ "Failed to add KControl %s = %d\n",
+ tas2781_snd_controls[i+tas_priv->index*3].name,
+ ret);
+ goto out;
+ }
+ }
+
+ tasdevice_spi_dsp_remove(tas_priv);
+
+ tas_priv->fw_state = TASDEVICE_DSP_FW_PENDING;
+ scnprintf(tas_priv->coef_binaryname, 64, "TAS2XXX%08X-%01d.bin",
+ codec->core.subsystem_id, tas_priv->index);
+ ret = tasdevice_spi_dsp_parser(tas_priv);
+ if (ret) {
+ dev_err(tas_priv->dev, "dspfw load %s error\n",
+ tas_priv->coef_binaryname);
+ tas_priv->fw_state = TASDEVICE_DSP_FW_FAIL;
+ goto out;
+ }
+
+ /* Add control one time only. */
+ tas_hda->dsp_prog_ctl =
+ snd_ctl_new1(&tas2781_dsp_prog_ctrl[tas_priv->index],
+ tas_priv);
+ ret = snd_ctl_add(codec->card, tas_hda->dsp_prog_ctl);
+ if (ret) {
+ dev_err(tas_priv->dev,
+ "Failed to add KControl %s = %d\n",
+ tas2781_dsp_prog_ctrl[tas_priv->index].name, ret);
+ goto out;
+ }
+
+ tas_hda->dsp_conf_ctl =
+ snd_ctl_new1(&tas2781_dsp_conf_ctrl[tas_priv->index],
+ tas_priv);
+ ret = snd_ctl_add(codec->card, tas_hda->dsp_conf_ctl);
+ if (ret) {
+ dev_err(tas_priv->dev, "Failed to add KControl %s = %d\n",
+ tas2781_dsp_conf_ctrl[tas_priv->index].name, ret);
+ goto out;
+ }
+
+ /* Perform AMP reset before firmware download. */
+ tas_priv->rcabin.profile_cfg_id = TAS2781_PRE_POST_RESET_CFG;
+ tasdevice_spi_tuning_switch(tas_priv, 0);
+ tas2781_spi_reset(tas_priv);
+ tas_priv->rcabin.profile_cfg_id = 0;
+ tasdevice_spi_tuning_switch(tas_priv, 1);
+
+ tas_priv->fw_state = TASDEVICE_DSP_FW_ALL_OK;
+ ret = tasdevice_spi_prmg_load(tas_priv, 0);
+ if (ret < 0) {
+ dev_err(tas_priv->dev, "FW download failed = %d\n", ret);
+ goto out;
+ }
+ if (tas_priv->fmw->nr_programs > 0)
+ tas_priv->cur_prog = 0;
+ if (tas_priv->fmw->nr_configurations > 0)
+ tas_priv->cur_conf = 0;
+
+ /*
+ * If calibrated data occurs error, dsp will still works with default
+ * calibrated data inside algo.
+ */
+ tas_priv->save_calibration(tas_priv);
+
+out:
+ if (fmw)
+ release_firmware(fmw);
+ pm_runtime_mark_last_busy(tas_hda->priv->dev);
+ pm_runtime_put_autosuspend(tas_hda->priv->dev);
+}
+
+static int tas2781_hda_bind(struct device *dev, struct device *master,
+ void *master_data)
+{
+ struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
+ struct hda_component_parent *parent = master_data;
+ struct hda_component *comp;
+ struct hda_codec *codec;
+ int ret;
+
+ comp = hda_component_from_index(parent, tas_hda->priv->index);
+ if (!comp)
+ return -EINVAL;
+
+ if (comp->dev)
+ return -EBUSY;
+
+ codec = parent->codec;
+
+ pm_runtime_get_sync(dev);
+
+ comp->dev = dev;
+
+ strscpy(comp->name, dev_name(dev), sizeof(comp->name));
+
+ ret = tascodec_spi_init(tas_hda->priv, codec, THIS_MODULE,
+ tasdev_fw_ready);
+ if (!ret)
+ comp->playback_hook = tas2781_hda_playback_hook;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static void tas2781_hda_unbind(struct device *dev, struct device *master,
+ void *master_data)
+{
+ struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
+ struct hda_component_parent *parent = master_data;
+ struct hda_component *comp;
+
+ comp = hda_component_from_index(parent, tas_hda->priv->index);
+ if (comp && (comp->dev == dev)) {
+ comp->dev = NULL;
+ memset(comp->name, 0, sizeof(comp->name));
+ comp->playback_hook = NULL;
+ }
+
+ tas2781_hda_remove_controls(tas_hda);
+
+ tasdevice_spi_config_info_remove(tas_hda->priv);
+ tasdevice_spi_dsp_remove(tas_hda->priv);
+
+ tas_hda->priv->fw_state = TASDEVICE_DSP_FW_PENDING;
+}
+
+static const struct component_ops tas2781_hda_comp_ops = {
+ .bind = tas2781_hda_bind,
+ .unbind = tas2781_hda_unbind,
+};
+
+static void tas2781_hda_remove(struct device *dev)
+{
+ struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
+
+ component_del(tas_hda->priv->dev, &tas2781_hda_comp_ops);
+
+ pm_runtime_get_sync(tas_hda->priv->dev);
+ pm_runtime_disable(tas_hda->priv->dev);
+
+ pm_runtime_put_noidle(tas_hda->priv->dev);
+
+ mutex_destroy(&tas_hda->priv->codec_lock);
+}
+
+static int tas2781_hda_spi_probe(struct spi_device *spi)
+{
+ struct tasdevice_priv *tas_priv;
+ struct tas2781_hda *tas_hda;
+ const char *device_name;
+ int ret = 0;
+
+ tas_hda = devm_kzalloc(&spi->dev, sizeof(*tas_hda), GFP_KERNEL);
+ if (!tas_hda)
+ return -ENOMEM;
+
+ spi->max_speed_hz = TAS2781_SPI_MAX_FREQ;
+
+ tas_priv = devm_kzalloc(&spi->dev, sizeof(*tas_priv), GFP_KERNEL);
+ if (!tas_priv)
+ return -ENOMEM;
+ tas_priv->dev = &spi->dev;
+ tas_hda->priv = tas_priv;
+ tas_priv->regmap = devm_regmap_init_spi(spi, &tasdevice_regmap);
+ if (IS_ERR(tas_priv->regmap)) {
+ ret = PTR_ERR(tas_priv->regmap);
+ dev_err(tas_priv->dev, "Failed to allocate regmap: %d\n",
+ ret);
+ return ret;
+ }
+ if (strstr(dev_name(&spi->dev), "TXNW2781")) {
+ device_name = "TXNW2781";
+ tas_priv->save_calibration = tas2781_save_calibration;
+ tas_priv->apply_calibration = tas2781_apply_calib;
+ } else {
+ dev_err(tas_priv->dev, "Unmatched spi dev %s\n",
+ dev_name(&spi->dev));
+ return -ENODEV;
+ }
+
+ tas_priv->irq = spi->irq;
+ dev_set_drvdata(&spi->dev, tas_hda);
+ ret = tas2781_read_acpi(tas_hda, device_name,
+ spi_get_chipselect(spi, 0));
+ if (ret)
+ return dev_err_probe(tas_priv->dev, ret,
+ "Platform not supported\n");
+
+ tasdevice_spi_init(tas_priv);
+
+ ret = component_add(tas_priv->dev, &tas2781_hda_comp_ops);
+ if (ret) {
+ dev_err(tas_priv->dev, "Register component fail: %d\n", ret);
+ return ret;
+ }
+
+ pm_runtime_set_autosuspend_delay(tas_priv->dev, 3000);
+ pm_runtime_use_autosuspend(tas_priv->dev);
+ pm_runtime_mark_last_busy(tas_priv->dev);
+ pm_runtime_set_active(tas_priv->dev);
+ pm_runtime_get_noresume(tas_priv->dev);
+ pm_runtime_enable(tas_priv->dev);
+
+ pm_runtime_put_autosuspend(tas_priv->dev);
+
+ return 0;
+}
+
+static void tas2781_hda_spi_remove(struct spi_device *spi)
+{
+ tas2781_hda_remove(&spi->dev);
+}
+
+static int tas2781_runtime_suspend(struct device *dev)
+{
+ struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
+
+ guard(mutex)(&tas_hda->priv->codec_lock);
+
+ tasdevice_spi_tuning_switch(tas_hda->priv, 1);
+
+ tas_hda->priv->cur_book = -1;
+ tas_hda->priv->cur_conf = -1;
+
+ return 0;
+}
+
+static int tas2781_runtime_resume(struct device *dev)
+{
+ struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
+
+ guard(mutex)(&tas_hda->priv->codec_lock);
+
+ tasdevice_spi_tuning_switch(tas_hda->priv, 0);
+
+ return 0;
+}
+
+static int tas2781_system_suspend(struct device *dev)
+{
+ struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ return ret;
+
+ /* Shutdown chip before system suspend */
+ tasdevice_spi_tuning_switch(tas_hda->priv, 1);
+ tas2781_spi_reset(tas_hda->priv);
+ /*
+ * Reset GPIO may be shared, so cannot reset here.
+ * However beyond this point, amps may be powered down.
+ */
+ return 0;
+}
+
+static int tas2781_system_resume(struct device *dev)
+{
+ struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
+ int ret, val;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
+
+ guard(mutex)(&tas_hda->priv->codec_lock);
+ ret = tasdevice_spi_dev_read(tas_hda->priv, TAS2781_REG_CLK_CONFIG,
+ &val);
+ if (ret < 0)
+ return ret;
+
+ if (val == TAS2781_REG_CLK_CONFIG_RESET) {
+ tas_hda->priv->cur_book = -1;
+ tas_hda->priv->cur_conf = -1;
+ tas_hda->priv->cur_prog = -1;
+
+ ret = tasdevice_spi_prmg_load(tas_hda->priv, 0);
+ if (ret < 0) {
+ dev_err(tas_hda->priv->dev,
+ "FW download failed = %d\n", ret);
+ return ret;
+ }
+
+ if (tas_hda->priv->playback_started)
+ tasdevice_spi_tuning_switch(tas_hda->priv, 0);
+ }
+
+ return ret;
+}
+
+static const struct dev_pm_ops tas2781_hda_pm_ops = {
+ RUNTIME_PM_OPS(tas2781_runtime_suspend, tas2781_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(tas2781_system_suspend, tas2781_system_resume)
+};
+
+static const struct spi_device_id tas2781_hda_spi_id[] = {
+ { "tas2781-hda", },
+ {}
+};
+
+static const struct acpi_device_id tas2781_acpi_hda_match[] = {
+ {"TXNW2781", },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, tas2781_acpi_hda_match);
+
+static struct spi_driver tas2781_hda_spi_driver = {
+ .driver = {
+ .name = "tas2781-hda",
+ .acpi_match_table = tas2781_acpi_hda_match,
+ .pm = &tas2781_hda_pm_ops,
+ },
+ .id_table = tas2781_hda_spi_id,
+ .probe = tas2781_hda_spi_probe,
+ .remove = tas2781_hda_spi_remove,
+};
+module_spi_driver(tas2781_hda_spi_driver);
+
+MODULE_DESCRIPTION("TAS2781 HDA SPI Driver");
+MODULE_AUTHOR("Baojun, Xu, <baojun.xug@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/pci/hda/tas2781_spi_fwlib.c b/sound/pci/hda/tas2781_spi_fwlib.c
new file mode 100644
index 000000000000..0e2acbc3c900
--- /dev/null
+++ b/sound/pci/hda/tas2781_spi_fwlib.c
@@ -0,0 +1,2006 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// TAS2781 HDA SPI driver
+//
+// Copyright 2024 Texas Instruments, Inc.
+//
+// Author: Baojun Xu <baojun.xu@ti.com>
+
+#include <linux/crc8.h>
+#include <linux/firmware.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/tas2781-dsp.h>
+#include <sound/tlv.h>
+
+#include "tas2781-spi.h"
+
+#define OFFSET_ERROR_BIT BIT(31)
+
+#define ERROR_PRAM_CRCCHK 0x0000000
+#define ERROR_YRAM_CRCCHK 0x0000001
+#define PPC_DRIVER_CRCCHK 0x00000200
+
+#define TAS2781_SA_COEFF_SWAP_REG TASDEVICE_REG(0, 0x35, 0x2c)
+#define TAS2781_YRAM_BOOK1 140
+#define TAS2781_YRAM1_PAGE 42
+#define TAS2781_YRAM1_START_REG 88
+
+#define TAS2781_YRAM2_START_PAGE 43
+#define TAS2781_YRAM2_END_PAGE 49
+#define TAS2781_YRAM2_START_REG 8
+#define TAS2781_YRAM2_END_REG 127
+
+#define TAS2781_YRAM3_PAGE 50
+#define TAS2781_YRAM3_START_REG 8
+#define TAS2781_YRAM3_END_REG 27
+
+/* should not include B0_P53_R44-R47 */
+#define TAS2781_YRAM_BOOK2 0
+#define TAS2781_YRAM4_START_PAGE 50
+#define TAS2781_YRAM4_END_PAGE 60
+
+#define TAS2781_YRAM5_PAGE 61
+#define TAS2781_YRAM5_START_REG TAS2781_YRAM3_START_REG
+#define TAS2781_YRAM5_END_REG TAS2781_YRAM3_END_REG
+
+#define TASDEVICE_MAXPROGRAM_NUM_KERNEL 5
+#define TASDEVICE_MAXCONFIG_NUM_KERNEL_MULTIPLE_AMPS 64
+#define TASDEVICE_MAXCONFIG_NUM_KERNEL 10
+#define MAIN_ALL_DEVICES_1X 0x01
+#define MAIN_DEVICE_A_1X 0x02
+#define MAIN_DEVICE_B_1X 0x03
+#define MAIN_DEVICE_C_1X 0x04
+#define MAIN_DEVICE_D_1X 0x05
+#define COEFF_DEVICE_A_1X 0x12
+#define COEFF_DEVICE_B_1X 0x13
+#define COEFF_DEVICE_C_1X 0x14
+#define COEFF_DEVICE_D_1X 0x15
+#define PRE_DEVICE_A_1X 0x22
+#define PRE_DEVICE_B_1X 0x23
+#define PRE_DEVICE_C_1X 0x24
+#define PRE_DEVICE_D_1X 0x25
+#define PRE_SOFTWARE_RESET_DEVICE_A 0x41
+#define PRE_SOFTWARE_RESET_DEVICE_B 0x42
+#define PRE_SOFTWARE_RESET_DEVICE_C 0x43
+#define PRE_SOFTWARE_RESET_DEVICE_D 0x44
+#define POST_SOFTWARE_RESET_DEVICE_A 0x45
+#define POST_SOFTWARE_RESET_DEVICE_B 0x46
+#define POST_SOFTWARE_RESET_DEVICE_C 0x47
+#define POST_SOFTWARE_RESET_DEVICE_D 0x48
+
+struct tas_crc {
+ unsigned char offset;
+ unsigned char len;
+};
+
+struct blktyp_devidx_map {
+ unsigned char blktyp;
+ unsigned char dev_idx;
+};
+
+/* fixed m68k compiling issue: mapping table can save code field */
+static const struct blktyp_devidx_map ppc3_tas2781_mapping_table[] = {
+ { MAIN_ALL_DEVICES_1X, 0x80 },
+ { MAIN_DEVICE_A_1X, 0x81 },
+ { COEFF_DEVICE_A_1X, 0x81 },
+ { PRE_DEVICE_A_1X, 0x81 },
+ { PRE_SOFTWARE_RESET_DEVICE_A, 0xC1 },
+ { POST_SOFTWARE_RESET_DEVICE_A, 0xC1 },
+ { MAIN_DEVICE_B_1X, 0x82 },
+ { COEFF_DEVICE_B_1X, 0x82 },
+ { PRE_DEVICE_B_1X, 0x82 },
+ { PRE_SOFTWARE_RESET_DEVICE_B, 0xC2 },
+ { POST_SOFTWARE_RESET_DEVICE_B, 0xC2 },
+ { MAIN_DEVICE_C_1X, 0x83 },
+ { COEFF_DEVICE_C_1X, 0x83 },
+ { PRE_DEVICE_C_1X, 0x83 },
+ { PRE_SOFTWARE_RESET_DEVICE_C, 0xC3 },
+ { POST_SOFTWARE_RESET_DEVICE_C, 0xC3 },
+ { MAIN_DEVICE_D_1X, 0x84 },
+ { COEFF_DEVICE_D_1X, 0x84 },
+ { PRE_DEVICE_D_1X, 0x84 },
+ { PRE_SOFTWARE_RESET_DEVICE_D, 0xC4 },
+ { POST_SOFTWARE_RESET_DEVICE_D, 0xC4 },
+};
+
+static const struct blktyp_devidx_map ppc3_mapping_table[] = {
+ { MAIN_ALL_DEVICES_1X, 0x80 },
+ { MAIN_DEVICE_A_1X, 0x81 },
+ { COEFF_DEVICE_A_1X, 0xC1 },
+ { PRE_DEVICE_A_1X, 0xC1 },
+ { MAIN_DEVICE_B_1X, 0x82 },
+ { COEFF_DEVICE_B_1X, 0xC2 },
+ { PRE_DEVICE_B_1X, 0xC2 },
+ { MAIN_DEVICE_C_1X, 0x83 },
+ { COEFF_DEVICE_C_1X, 0xC3 },
+ { PRE_DEVICE_C_1X, 0xC3 },
+ { MAIN_DEVICE_D_1X, 0x84 },
+ { COEFF_DEVICE_D_1X, 0xC4 },
+ { PRE_DEVICE_D_1X, 0xC4 },
+};
+
+static const struct blktyp_devidx_map non_ppc3_mapping_table[] = {
+ { MAIN_ALL_DEVICES, 0x80 },
+ { MAIN_DEVICE_A, 0x81 },
+ { COEFF_DEVICE_A, 0xC1 },
+ { PRE_DEVICE_A, 0xC1 },
+ { MAIN_DEVICE_B, 0x82 },
+ { COEFF_DEVICE_B, 0xC2 },
+ { PRE_DEVICE_B, 0xC2 },
+ { MAIN_DEVICE_C, 0x83 },
+ { COEFF_DEVICE_C, 0xC3 },
+ { PRE_DEVICE_C, 0xC3 },
+ { MAIN_DEVICE_D, 0x84 },
+ { COEFF_DEVICE_D, 0xC4 },
+ { PRE_DEVICE_D, 0xC4 },
+};
+
+/*
+ * Device support different configurations for different scene,
+ * like voice, music, calibration, was write in regbin file.
+ * Will be stored into tas_priv after regbin was loaded.
+ */
+static struct tasdevice_config_info *tasdevice_add_config(
+ struct tasdevice_priv *tas_priv, unsigned char *config_data,
+ unsigned int config_size, int *status)
+{
+ struct tasdevice_config_info *cfg_info;
+ struct tasdev_blk_data **bk_da;
+ unsigned int config_offset = 0;
+ unsigned int i;
+
+ /*
+ * In most projects are many audio cases, such as music, handfree,
+ * receiver, games, audio-to-haptics, PMIC record, bypass mode,
+ * portrait, landscape, etc. Even in multiple audios, one or
+ * two of the chips will work for the special case, such as
+ * ultrasonic application. In order to support these variable-numbers
+ * of audio cases, flexible configs have been introduced in the
+ * DSP firmware.
+ */
+ cfg_info = kzalloc(sizeof(*cfg_info), GFP_KERNEL);
+ if (!cfg_info) {
+ *status = -ENOMEM;
+ return NULL;
+ }
+
+ if (tas_priv->rcabin.fw_hdr.binary_version_num >= 0x105) {
+ if ((config_offset + 64) > config_size) {
+ *status = -EINVAL;
+ dev_err(tas_priv->dev, "add conf: Out of boundary\n");
+ goto config_err;
+ }
+ config_offset += 64;
+ }
+
+ if ((config_offset + 4) > config_size) {
+ *status = -EINVAL;
+ dev_err(tas_priv->dev, "add config: Out of boundary\n");
+ goto config_err;
+ }
+
+ /*
+ * convert data[offset], data[offset + 1], data[offset + 2] and
+ * data[offset + 3] into host
+ */
+ cfg_info->nblocks = get_unaligned_be32(&config_data[config_offset]);
+ config_offset += 4;
+
+ /*
+ * Several kinds of dsp/algorithm firmwares can run on tas2781,
+ * the number and size of blk are not fixed and different among
+ * these firmwares.
+ */
+ bk_da = cfg_info->blk_data = kcalloc(cfg_info->nblocks,
+ sizeof(*bk_da), GFP_KERNEL);
+ if (!bk_da) {
+ *status = -ENOMEM;
+ goto config_err;
+ }
+ cfg_info->real_nblocks = 0;
+ for (i = 0; i < cfg_info->nblocks; i++) {
+ if (config_offset + 12 > config_size) {
+ *status = -EINVAL;
+ dev_err(tas_priv->dev,
+ "%s: Out of boundary: i = %d nblocks = %u!\n",
+ __func__, i, cfg_info->nblocks);
+ goto block_err;
+ }
+ bk_da[i] = kzalloc(sizeof(*bk_da[i]), GFP_KERNEL);
+ if (!bk_da[i]) {
+ *status = -ENOMEM;
+ goto block_err;
+ }
+
+ bk_da[i]->dev_idx = config_data[config_offset];
+ config_offset++;
+
+ bk_da[i]->block_type = config_data[config_offset];
+ config_offset++;
+
+ bk_da[i]->yram_checksum =
+ get_unaligned_be16(&config_data[config_offset]);
+ config_offset += 2;
+ bk_da[i]->block_size =
+ get_unaligned_be32(&config_data[config_offset]);
+ config_offset += 4;
+
+ bk_da[i]->n_subblks =
+ get_unaligned_be32(&config_data[config_offset]);
+
+ config_offset += 4;
+
+ if (config_offset + bk_da[i]->block_size > config_size) {
+ *status = -EINVAL;
+ dev_err(tas_priv->dev,
+ "%s: Out of boundary: i = %d blks = %u!\n",
+ __func__, i, cfg_info->nblocks);
+ goto block_err;
+ }
+ /* instead of kzalloc+memcpy */
+ bk_da[i]->regdata = kmemdup(&config_data[config_offset],
+ bk_da[i]->block_size, GFP_KERNEL);
+ if (!bk_da[i]->regdata) {
+ *status = -ENOMEM;
+ i++;
+ goto block_err;
+ }
+
+ config_offset += bk_da[i]->block_size;
+ cfg_info->real_nblocks += 1;
+ }
+
+ return cfg_info;
+block_err:
+ for (int j = 0; j < i; j++)
+ kfree(bk_da[j]);
+ kfree(bk_da);
+config_err:
+ kfree(cfg_info);
+ return NULL;
+}
+
+/* Regbin file parser function. */
+int tasdevice_spi_rca_parser(void *context, const struct firmware *fmw)
+{
+ struct tasdevice_priv *tas_priv = context;
+ struct tasdevice_config_info **cfg_info;
+ struct tasdevice_rca_hdr *fw_hdr;
+ struct tasdevice_rca *rca;
+ unsigned int total_config_sz = 0;
+ int offset = 0, ret = 0, i;
+ unsigned char *buf;
+
+ rca = &tas_priv->rcabin;
+ fw_hdr = &rca->fw_hdr;
+ if (!fmw || !fmw->data) {
+ dev_err(tas_priv->dev, "Failed to read %s\n",
+ tas_priv->rca_binaryname);
+ tas_priv->fw_state = TASDEVICE_DSP_FW_FAIL;
+ return -EINVAL;
+ }
+ buf = (unsigned char *)fmw->data;
+ fw_hdr->img_sz = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+ if (fw_hdr->img_sz != fmw->size) {
+ dev_err(tas_priv->dev,
+ "File size not match, %d %u", (int)fmw->size,
+ fw_hdr->img_sz);
+ tas_priv->fw_state = TASDEVICE_DSP_FW_FAIL;
+ return -EINVAL;
+ }
+
+ fw_hdr->checksum = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+ fw_hdr->binary_version_num = get_unaligned_be32(&buf[offset]);
+ if (fw_hdr->binary_version_num < 0x103) {
+ dev_err(tas_priv->dev, "File version 0x%04x is too low",
+ fw_hdr->binary_version_num);
+ tas_priv->fw_state = TASDEVICE_DSP_FW_FAIL;
+ return -EINVAL;
+ }
+ offset += 4;
+ fw_hdr->drv_fw_version = get_unaligned_be32(&buf[offset]);
+ offset += 8;
+ fw_hdr->plat_type = buf[offset++];
+ fw_hdr->dev_family = buf[offset++];
+ fw_hdr->reserve = buf[offset++];
+ fw_hdr->ndev = buf[offset++];
+ if (offset + TASDEVICE_DEVICE_SUM > fw_hdr->img_sz) {
+ dev_err(tas_priv->dev, "rca_ready: Out of boundary!\n");
+ tas_priv->fw_state = TASDEVICE_DSP_FW_FAIL;
+ return -EINVAL;
+ }
+
+ for (i = 0; i < TASDEVICE_DEVICE_SUM; i++, offset++)
+ fw_hdr->devs[i] = buf[offset];
+
+ fw_hdr->nconfig = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+
+ for (i = 0; i < TASDEVICE_CONFIG_SUM; i++) {
+ fw_hdr->config_size[i] = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+ total_config_sz += fw_hdr->config_size[i];
+ }
+
+ if (fw_hdr->img_sz - total_config_sz != (unsigned int)offset) {
+ dev_err(tas_priv->dev, "Bin file err %d - %d != %d!\n",
+ fw_hdr->img_sz, total_config_sz, (int)offset);
+ tas_priv->fw_state = TASDEVICE_DSP_FW_FAIL;
+ return -EINVAL;
+ }
+
+ cfg_info = kcalloc(fw_hdr->nconfig, sizeof(*cfg_info), GFP_KERNEL);
+ if (!cfg_info) {
+ tas_priv->fw_state = TASDEVICE_DSP_FW_FAIL;
+ return -ENOMEM;
+ }
+ rca->cfg_info = cfg_info;
+ rca->ncfgs = 0;
+ for (i = 0; i < (int)fw_hdr->nconfig; i++) {
+ rca->ncfgs += 1;
+ cfg_info[i] = tasdevice_add_config(tas_priv, &buf[offset],
+ fw_hdr->config_size[i], &ret);
+ if (ret) {
+ tas_priv->fw_state = TASDEVICE_DSP_FW_FAIL;
+ return ret;
+ }
+ offset += (int)fw_hdr->config_size[i];
+ }
+
+ return ret;
+}
+
+/* fixed m68k compiling issue: mapping table can save code field */
+static unsigned char map_dev_idx(struct tasdevice_fw *tas_fmw,
+ struct tasdev_blk *block)
+{
+ struct blktyp_devidx_map *p =
+ (struct blktyp_devidx_map *)non_ppc3_mapping_table;
+ struct tasdevice_dspfw_hdr *fw_hdr = &tas_fmw->fw_hdr;
+ struct tasdevice_fw_fixed_hdr *fw_fixed_hdr = &fw_hdr->fixed_hdr;
+ int i, n = ARRAY_SIZE(non_ppc3_mapping_table);
+ unsigned char dev_idx = 0;
+
+ if (fw_fixed_hdr->ppcver >= PPC3_VERSION_TAS2781) {
+ p = (struct blktyp_devidx_map *)ppc3_tas2781_mapping_table;
+ n = ARRAY_SIZE(ppc3_tas2781_mapping_table);
+ } else if (fw_fixed_hdr->ppcver >= PPC3_VERSION) {
+ p = (struct blktyp_devidx_map *)ppc3_mapping_table;
+ n = ARRAY_SIZE(ppc3_mapping_table);
+ }
+
+ for (i = 0; i < n; i++) {
+ if (block->type == p[i].blktyp) {
+ dev_idx = p[i].dev_idx;
+ break;
+ }
+ }
+
+ return dev_idx;
+}
+
+/* Block parser function. */
+static int fw_parse_block_data_kernel(struct tasdevice_fw *tas_fmw,
+ struct tasdev_blk *block, const struct firmware *fmw, int offset)
+{
+ const unsigned char *data = fmw->data;
+
+ if (offset + 16 > fmw->size) {
+ dev_err(tas_fmw->dev, "%s: File Size error\n", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * Convert data[offset], data[offset + 1], data[offset + 2] and
+ * data[offset + 3] into host.
+ */
+ block->type = get_unaligned_be32(&data[offset]);
+ offset += 4;
+
+ block->is_pchksum_present = data[offset++];
+ block->pchksum = data[offset++];
+ block->is_ychksum_present = data[offset++];
+ block->ychksum = data[offset++];
+ block->blk_size = get_unaligned_be32(&data[offset]);
+ offset += 4;
+ block->nr_subblocks = get_unaligned_be32(&data[offset]);
+ offset += 4;
+
+ /*
+ * Fixed m68k compiling issue:
+ * 1. mapping table can save code field.
+ * 2. storing the dev_idx as a member of block can reduce unnecessary
+ * time and system resource comsumption of dev_idx mapping every
+ * time the block data writing to the dsp.
+ */
+ block->dev_idx = map_dev_idx(tas_fmw, block);
+
+ if (offset + block->blk_size > fmw->size) {
+ dev_err(tas_fmw->dev, "%s: nSublocks error\n", __func__);
+ return -EINVAL;
+ }
+ /* instead of kzalloc+memcpy */
+ block->data = kmemdup(&data[offset], block->blk_size, GFP_KERNEL);
+ if (!block->data)
+ return -ENOMEM;
+
+ offset += block->blk_size;
+
+ return offset;
+}
+
+/* Data of block parser function. */
+static int fw_parse_data_kernel(struct tasdevice_fw *tas_fmw,
+ struct tasdevice_data *img_data, const struct firmware *fmw,
+ int offset)
+{
+ const unsigned char *data = fmw->data;
+ struct tasdev_blk *blk;
+ unsigned int i;
+
+ if (offset + 4 > fmw->size) {
+ dev_err(tas_fmw->dev, "%s: File Size error\n", __func__);
+ return -EINVAL;
+ }
+ img_data->nr_blk = get_unaligned_be32(&data[offset]);
+ offset += 4;
+
+ img_data->dev_blks = kcalloc(img_data->nr_blk,
+ sizeof(struct tasdev_blk), GFP_KERNEL);
+ if (!img_data->dev_blks)
+ return -ENOMEM;
+
+ for (i = 0; i < img_data->nr_blk; i++) {
+ blk = &img_data->dev_blks[i];
+ offset = fw_parse_block_data_kernel(
+ tas_fmw, blk, fmw, offset);
+ if (offset < 0) {
+ kfree(img_data->dev_blks);
+ return -EINVAL;
+ }
+ }
+
+ return offset;
+}
+
+/* Data of DSP program parser function. */
+static int fw_parse_program_data_kernel(
+ struct tasdevice_priv *tas_priv, struct tasdevice_fw *tas_fmw,
+ const struct firmware *fmw, int offset)
+{
+ struct tasdevice_prog *program;
+ unsigned int i;
+
+ for (i = 0; i < tas_fmw->nr_programs; i++) {
+ program = &tas_fmw->programs[i];
+ if (offset + 72 > fmw->size) {
+ dev_err(tas_priv->dev, "%s: mpName error\n", __func__);
+ return -EINVAL;
+ }
+ /* skip 72 unused byts */
+ offset += 72;
+
+ offset = fw_parse_data_kernel(tas_fmw, &program->dev_data,
+ fmw, offset);
+ if (offset < 0)
+ break;
+ }
+
+ return offset;
+}
+
+/* Data of DSP configurations parser function. */
+static int fw_parse_configuration_data_kernel(struct tasdevice_priv *tas_priv,
+ struct tasdevice_fw *tas_fmw, const struct firmware *fmw, int offset)
+{
+ const unsigned char *data = fmw->data;
+ struct tasdevice_config *config;
+ unsigned int i;
+
+ for (i = 0; i < tas_fmw->nr_configurations; i++) {
+ config = &tas_fmw->configs[i];
+ if (offset + 80 > fmw->size) {
+ dev_err(tas_priv->dev, "%s: mpName error\n", __func__);
+ return -EINVAL;
+ }
+ memcpy(config->name, &data[offset], 64);
+ /* skip extra 16 bytes */
+ offset += 80;
+
+ offset = fw_parse_data_kernel(tas_fmw, &config->dev_data,
+ fmw, offset);
+ if (offset < 0)
+ break;
+ }
+
+ return offset;
+}
+
+/* DSP firmware file header parser function for early PPC3 firmware binary. */
+static int fw_parse_variable_header_kernel(struct tasdevice_priv *tas_priv,
+ const struct firmware *fmw, int offset)
+{
+ struct tasdevice_fw *tas_fmw = tas_priv->fmw;
+ struct tasdevice_dspfw_hdr *fw_hdr = &tas_fmw->fw_hdr;
+ struct tasdevice_config *config;
+ struct tasdevice_prog *program;
+ const unsigned char *buf = fmw->data;
+ unsigned short max_confs;
+ unsigned int i;
+
+ if (offset + 12 + 4 * TASDEVICE_MAXPROGRAM_NUM_KERNEL > fmw->size) {
+ dev_err(tas_priv->dev, "%s: File Size error\n", __func__);
+ return -EINVAL;
+ }
+ fw_hdr->device_family = get_unaligned_be16(&buf[offset]);
+ if (fw_hdr->device_family != 0) {
+ dev_err(tas_priv->dev, "%s:not TAS device\n", __func__);
+ return -EINVAL;
+ }
+ offset += 2;
+ fw_hdr->device = get_unaligned_be16(&buf[offset]);
+ if (fw_hdr->device >= TASDEVICE_DSP_TAS_MAX_DEVICE ||
+ fw_hdr->device == 6) {
+ dev_err(tas_priv->dev, "Unsupported dev %d\n", fw_hdr->device);
+ return -EINVAL;
+ }
+ offset += 2;
+
+ tas_fmw->nr_programs = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+
+ if (tas_fmw->nr_programs == 0 ||
+ tas_fmw->nr_programs > TASDEVICE_MAXPROGRAM_NUM_KERNEL) {
+ dev_err(tas_priv->dev, "mnPrograms is invalid\n");
+ return -EINVAL;
+ }
+
+ tas_fmw->programs = kcalloc(tas_fmw->nr_programs,
+ sizeof(*tas_fmw->programs), GFP_KERNEL);
+ if (!tas_fmw->programs)
+ return -ENOMEM;
+
+ for (i = 0; i < tas_fmw->nr_programs; i++) {
+ program = &tas_fmw->programs[i];
+ program->prog_size = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+ }
+
+ /* Skip the unused prog_size */
+ offset += 4 * (TASDEVICE_MAXPROGRAM_NUM_KERNEL - tas_fmw->nr_programs);
+
+ tas_fmw->nr_configurations = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+
+ /*
+ * The max number of config in firmware greater than 4 pieces of
+ * tas2781s is different from the one lower than 4 pieces of
+ * tas2781s.
+ */
+ max_confs = TASDEVICE_MAXCONFIG_NUM_KERNEL;
+ if (tas_fmw->nr_configurations == 0 ||
+ tas_fmw->nr_configurations > max_confs) {
+ dev_err(tas_priv->dev, "%s: Conf is invalid\n", __func__);
+ kfree(tas_fmw->programs);
+ return -EINVAL;
+ }
+
+ if (offset + 4 * max_confs > fmw->size) {
+ dev_err(tas_priv->dev, "%s: mpConfigurations err\n", __func__);
+ kfree(tas_fmw->programs);
+ return -EINVAL;
+ }
+
+ tas_fmw->configs = kcalloc(tas_fmw->nr_configurations,
+ sizeof(*tas_fmw->configs), GFP_KERNEL);
+ if (!tas_fmw->configs) {
+ kfree(tas_fmw->programs);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < tas_fmw->nr_programs; i++) {
+ config = &tas_fmw->configs[i];
+ config->cfg_size = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+ }
+
+ /* Skip the unused configs */
+ offset += 4 * (max_confs - tas_fmw->nr_programs);
+
+ return offset;
+}
+
+/*
+ * In sub-block data, have three type sub-block:
+ * 1. Single byte write.
+ * 2. Multi-byte write.
+ * 3. Delay.
+ * 4. Bits update.
+ * This function perform single byte write to device.
+ */
+static int tasdevice_single_byte_wr(void *context, int dev_idx,
+ unsigned char *data, int sublocksize)
+{
+ struct tasdevice_priv *tas_priv = context;
+ unsigned short len = get_unaligned_be16(&data[2]);
+ int i, subblk_offset, rc;
+
+ subblk_offset = 4;
+ if (subblk_offset + 4 * len > sublocksize) {
+ dev_err(tas_priv->dev, "process_block: Out of boundary\n");
+ return 0;
+ }
+
+ for (i = 0; i < len; i++) {
+ if (dev_idx == (tas_priv->index + 1) || dev_idx == 0) {
+ rc = tasdevice_spi_dev_write(tas_priv,
+ TASDEVICE_REG(data[subblk_offset],
+ data[subblk_offset + 1],
+ data[subblk_offset + 2]),
+ data[subblk_offset + 3]);
+ if (rc < 0) {
+ dev_err(tas_priv->dev,
+ "process_block: single write error\n");
+ subblk_offset |= OFFSET_ERROR_BIT;
+ }
+ }
+ subblk_offset += 4;
+ }
+
+ return subblk_offset;
+}
+
+/*
+ * In sub-block data, have three type sub-block:
+ * 1. Single byte write.
+ * 2. Multi-byte write.
+ * 3. Delay.
+ * 4. Bits update.
+ * This function perform multi-write to device.
+ */
+static int tasdevice_burst_wr(void *context, int dev_idx, unsigned char *data,
+ int sublocksize)
+{
+ struct tasdevice_priv *tas_priv = context;
+ unsigned short len = get_unaligned_be16(&data[2]);
+ int subblk_offset, rc;
+
+ subblk_offset = 4;
+ if (subblk_offset + 4 + len > sublocksize) {
+ dev_err(tas_priv->dev, "%s: BST Out of boundary\n", __func__);
+ subblk_offset |= OFFSET_ERROR_BIT;
+ }
+ if (len % 4) {
+ dev_err(tas_priv->dev, "%s:Bst-len(%u)not div by 4\n",
+ __func__, len);
+ subblk_offset |= OFFSET_ERROR_BIT;
+ }
+
+ if (dev_idx == (tas_priv->index + 1) || dev_idx == 0) {
+ rc = tasdevice_spi_dev_bulk_write(tas_priv,
+ TASDEVICE_REG(data[subblk_offset],
+ data[subblk_offset + 1],
+ data[subblk_offset + 2]),
+ &data[subblk_offset + 4], len);
+ if (rc < 0) {
+ dev_err(tas_priv->dev, "%s: bulk_write error = %d\n",
+ __func__, rc);
+ subblk_offset |= OFFSET_ERROR_BIT;
+ }
+ }
+ subblk_offset += (len + 4);
+
+ return subblk_offset;
+}
+
+/* Just delay for ms.*/
+static int tasdevice_delay(void *context, int dev_idx, unsigned char *data,
+ int sublocksize)
+{
+ struct tasdevice_priv *tas_priv = context;
+ unsigned int sleep_time, subblk_offset = 2;
+
+ if (subblk_offset + 2 > sublocksize) {
+ dev_err(tas_priv->dev, "%s: delay Out of boundary\n",
+ __func__);
+ subblk_offset |= OFFSET_ERROR_BIT;
+ }
+ if (dev_idx == (tas_priv->index + 1) || dev_idx == 0) {
+ sleep_time = get_unaligned_be16(&data[2]) * 1000;
+ fsleep(sleep_time);
+ }
+ subblk_offset += 2;
+
+ return subblk_offset;
+}
+
+/*
+ * In sub-block data, have three type sub-block:
+ * 1. Single byte write.
+ * 2. Multi-byte write.
+ * 3. Delay.
+ * 4. Bits update.
+ * This function perform bits update.
+ */
+static int tasdevice_field_wr(void *context, int dev_idx, unsigned char *data,
+ int sublocksize)
+{
+ struct tasdevice_priv *tas_priv = context;
+ int rc, subblk_offset = 2;
+
+ if (subblk_offset + 6 > sublocksize) {
+ dev_err(tas_priv->dev, "%s: bit write Out of boundary\n",
+ __func__);
+ subblk_offset |= OFFSET_ERROR_BIT;
+ }
+ if (dev_idx == (tas_priv->index + 1) || dev_idx == 0) {
+ rc = tasdevice_spi_dev_update_bits(tas_priv,
+ TASDEVICE_REG(data[subblk_offset + 2],
+ data[subblk_offset + 3],
+ data[subblk_offset + 4]),
+ data[subblk_offset + 1],
+ data[subblk_offset + 5]);
+ if (rc < 0) {
+ dev_err(tas_priv->dev, "%s: update_bits error = %d\n",
+ __func__, rc);
+ subblk_offset |= OFFSET_ERROR_BIT;
+ }
+ }
+ subblk_offset += 6;
+
+ return subblk_offset;
+}
+
+/* Data block process function. */
+static int tasdevice_process_block(void *context, unsigned char *data,
+ unsigned char dev_idx, int sublocksize)
+{
+ struct tasdevice_priv *tas_priv = context;
+ int blktyp = dev_idx & 0xC0, subblk_offset;
+ unsigned char subblk_typ = data[1];
+
+ switch (subblk_typ) {
+ case TASDEVICE_CMD_SING_W:
+ subblk_offset = tasdevice_single_byte_wr(tas_priv,
+ dev_idx & 0x4f, data, sublocksize);
+ break;
+ case TASDEVICE_CMD_BURST:
+ subblk_offset = tasdevice_burst_wr(tas_priv,
+ dev_idx & 0x4f, data, sublocksize);
+ break;
+ case TASDEVICE_CMD_DELAY:
+ subblk_offset = tasdevice_delay(tas_priv,
+ dev_idx & 0x4f, data, sublocksize);
+ break;
+ case TASDEVICE_CMD_FIELD_W:
+ subblk_offset = tasdevice_field_wr(tas_priv,
+ dev_idx & 0x4f, data, sublocksize);
+ break;
+ default:
+ subblk_offset = 2;
+ break;
+ }
+ if (((subblk_offset & OFFSET_ERROR_BIT) != 0) && blktyp != 0) {
+ if (blktyp == 0x80) {
+ tas_priv->cur_prog = -1;
+ tas_priv->cur_conf = -1;
+ } else
+ tas_priv->cur_conf = -1;
+ }
+ subblk_offset &= ~OFFSET_ERROR_BIT;
+
+ return subblk_offset;
+}
+
+/*
+ * Device support different configurations for different scene,
+ * this function was used for choose different config.
+ */
+void tasdevice_spi_select_cfg_blk(void *pContext, int conf_no,
+ unsigned char block_type)
+{
+ struct tasdevice_priv *tas_priv = pContext;
+ struct tasdevice_rca *rca = &tas_priv->rcabin;
+ struct tasdevice_config_info **cfg_info = rca->cfg_info;
+ struct tasdev_blk_data **blk_data;
+ unsigned int j, k;
+
+ if (conf_no >= rca->ncfgs || conf_no < 0 || !cfg_info) {
+ dev_err(tas_priv->dev, "conf_no should be not more than %u\n",
+ rca->ncfgs);
+ return;
+ }
+ blk_data = cfg_info[conf_no]->blk_data;
+
+ for (j = 0; j < cfg_info[conf_no]->real_nblocks; j++) {
+ unsigned int length = 0, rc = 0;
+
+ if (block_type > 5 || block_type < 2) {
+ dev_err(tas_priv->dev,
+ "block_type should be in range from 2 to 5\n");
+ break;
+ }
+ if (block_type != blk_data[j]->block_type)
+ continue;
+
+ for (k = 0; k < blk_data[j]->n_subblks; k++) {
+ tas_priv->is_loading = true;
+
+ rc = tasdevice_process_block(tas_priv,
+ blk_data[j]->regdata + length,
+ blk_data[j]->dev_idx,
+ blk_data[j]->block_size - length);
+ length += rc;
+ if (blk_data[j]->block_size < length) {
+ dev_err(tas_priv->dev,
+ "%s: %u %u out of boundary\n",
+ __func__, length,
+ blk_data[j]->block_size);
+ break;
+ }
+ }
+ if (length != blk_data[j]->block_size)
+ dev_err(tas_priv->dev, "%s: %u %u size is not same\n",
+ __func__, length, blk_data[j]->block_size);
+ }
+}
+
+/* Block process function. */
+static int tasdevice_load_block_kernel(
+ struct tasdevice_priv *tasdevice, struct tasdev_blk *block)
+{
+ const unsigned int blk_size = block->blk_size;
+ unsigned char *data = block->data;
+ unsigned int i, length;
+
+ for (i = 0, length = 0; i < block->nr_subblocks; i++) {
+ int rc = tasdevice_process_block(tasdevice, data + length,
+ block->dev_idx, blk_size - length);
+ if (rc < 0) {
+ dev_err(tasdevice->dev,
+ "%s: %u %u sublock write error\n",
+ __func__, length, blk_size);
+ return rc;
+ }
+ length += rc;
+ if (blk_size < length) {
+ dev_err(tasdevice->dev, "%s: %u %u out of boundary\n",
+ __func__, length, blk_size);
+ rc = -ENOMEM;
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/* DSP firmware file header parser function. */
+static int fw_parse_variable_hdr(struct tasdevice_priv *tas_priv,
+ struct tasdevice_dspfw_hdr *fw_hdr,
+ const struct firmware *fmw, int offset)
+{
+ const unsigned char *buf = fmw->data;
+ int len = strlen((char *)&buf[offset]);
+
+ len++;
+
+ if (offset + len + 8 > fmw->size) {
+ dev_err(tas_priv->dev, "%s: File Size error\n", __func__);
+ return -EINVAL;
+ }
+
+ offset += len;
+
+ fw_hdr->device_family = get_unaligned_be32(&buf[offset]);
+ if (fw_hdr->device_family != 0) {
+ dev_err(tas_priv->dev, "%s: not TAS device\n", __func__);
+ return -EINVAL;
+ }
+ offset += 4;
+
+ fw_hdr->device = get_unaligned_be32(&buf[offset]);
+ if (fw_hdr->device >= TASDEVICE_DSP_TAS_MAX_DEVICE ||
+ fw_hdr->device == 6) {
+ dev_err(tas_priv->dev, "Unsupported dev %d\n", fw_hdr->device);
+ return -EINVAL;
+ }
+ offset += 4;
+ fw_hdr->ndev = 1;
+
+ return offset;
+}
+
+/* DSP firmware file header parser function for size variabled header. */
+static int fw_parse_variable_header_git(struct tasdevice_priv
+ *tas_priv, const struct firmware *fmw, int offset)
+{
+ struct tasdevice_fw *tas_fmw = tas_priv->fmw;
+ struct tasdevice_dspfw_hdr *fw_hdr = &tas_fmw->fw_hdr;
+
+ offset = fw_parse_variable_hdr(tas_priv, fw_hdr, fmw, offset);
+
+ return offset;
+}
+
+/* DSP firmware file block parser function. */
+static int fw_parse_block_data(struct tasdevice_fw *tas_fmw,
+ struct tasdev_blk *block, const struct firmware *fmw, int offset)
+{
+ unsigned char *data = (unsigned char *)fmw->data;
+ int n;
+
+ if (offset + 8 > fmw->size) {
+ dev_err(tas_fmw->dev, "%s: Type error\n", __func__);
+ return -EINVAL;
+ }
+ block->type = get_unaligned_be32(&data[offset]);
+ offset += 4;
+
+ if (tas_fmw->fw_hdr.fixed_hdr.drv_ver >= PPC_DRIVER_CRCCHK) {
+ if (offset + 8 > fmw->size) {
+ dev_err(tas_fmw->dev, "PChkSumPresent error\n");
+ return -EINVAL;
+ }
+ block->is_pchksum_present = data[offset];
+ offset++;
+
+ block->pchksum = data[offset];
+ offset++;
+
+ block->is_ychksum_present = data[offset];
+ offset++;
+
+ block->ychksum = data[offset];
+ offset++;
+ } else {
+ block->is_pchksum_present = 0;
+ block->is_ychksum_present = 0;
+ }
+
+ block->nr_cmds = get_unaligned_be32(&data[offset]);
+ offset += 4;
+
+ n = block->nr_cmds * 4;
+ if (offset + n > fmw->size) {
+ dev_err(tas_fmw->dev,
+ "%s: File Size(%lu) error offset = %d n = %d\n",
+ __func__, (unsigned long)fmw->size, offset, n);
+ return -EINVAL;
+ }
+ /* instead of kzalloc+memcpy */
+ block->data = kmemdup(&data[offset], n, GFP_KERNEL);
+ if (!block->data)
+ return -ENOMEM;
+
+ offset += n;
+
+ return offset;
+}
+
+/*
+ * When parsing error occurs, all the memory resource will be released
+ * in the end of tasdevice_rca_ready.
+ */
+static int fw_parse_data(struct tasdevice_fw *tas_fmw,
+ struct tasdevice_data *img_data, const struct firmware *fmw,
+ int offset)
+{
+ const unsigned char *data = (unsigned char *)fmw->data;
+ struct tasdev_blk *blk;
+ unsigned int i, n;
+
+ if (offset + 64 > fmw->size) {
+ dev_err(tas_fmw->dev, "%s: Name error\n", __func__);
+ return -EINVAL;
+ }
+ memcpy(img_data->name, &data[offset], 64);
+ offset += 64;
+
+ n = strlen((char *)&data[offset]);
+ n++;
+ if (offset + n + 2 > fmw->size) {
+ dev_err(tas_fmw->dev, "%s: Description error\n", __func__);
+ return -EINVAL;
+ }
+ offset += n;
+ img_data->nr_blk = get_unaligned_be16(&data[offset]);
+ offset += 2;
+
+ img_data->dev_blks = kcalloc(img_data->nr_blk,
+ sizeof(*img_data->dev_blks), GFP_KERNEL);
+ if (!img_data->dev_blks)
+ return -ENOMEM;
+
+ for (i = 0; i < img_data->nr_blk; i++) {
+ blk = &img_data->dev_blks[i];
+ offset = fw_parse_block_data(tas_fmw, blk, fmw, offset);
+ if (offset < 0)
+ return -EINVAL;
+ }
+
+ return offset;
+}
+
+/*
+ * When parsing error occurs, all the memory resource will be released
+ * in the end of tasdevice_rca_ready.
+ */
+static int fw_parse_program_data(struct tasdevice_priv *tas_priv,
+ struct tasdevice_fw *tas_fmw, const struct firmware *fmw, int offset)
+{
+ unsigned char *buf = (unsigned char *)fmw->data;
+ struct tasdevice_prog *program;
+ int i;
+
+ if (offset + 2 > fmw->size) {
+ dev_err(tas_priv->dev, "%s: File Size error\n", __func__);
+ return -EINVAL;
+ }
+ tas_fmw->nr_programs = get_unaligned_be16(&buf[offset]);
+ offset += 2;
+
+ if (tas_fmw->nr_programs == 0) {
+ /* Not error in calibration Data file, return directly */
+ dev_dbg(tas_priv->dev, "%s: No Programs data, maybe calbin\n",
+ __func__);
+ return offset;
+ }
+
+ tas_fmw->programs =
+ kcalloc(tas_fmw->nr_programs, sizeof(*tas_fmw->programs),
+ GFP_KERNEL);
+ if (!tas_fmw->programs)
+ return -ENOMEM;
+
+ for (i = 0; i < tas_fmw->nr_programs; i++) {
+ int n = 0;
+
+ program = &tas_fmw->programs[i];
+ if (offset + 64 > fmw->size) {
+ dev_err(tas_priv->dev, "%s: mpName error\n", __func__);
+ return -EINVAL;
+ }
+ offset += 64;
+
+ n = strlen((char *)&buf[offset]);
+ /* skip '\0' and 5 unused bytes */
+ n += 6;
+ if (offset + n > fmw->size) {
+ dev_err(tas_priv->dev, "Description err\n");
+ return -EINVAL;
+ }
+
+ offset += n;
+
+ offset = fw_parse_data(tas_fmw, &program->dev_data, fmw,
+ offset);
+ if (offset < 0)
+ return offset;
+ }
+
+ return offset;
+}
+
+/*
+ * When parsing error occurs, all the memory resource will be released
+ * in the end of tasdevice_rca_ready.
+ */
+static int fw_parse_configuration_data(struct tasdevice_priv *tas_priv,
+ struct tasdevice_fw *tas_fmw, const struct firmware *fmw, int offset)
+{
+ unsigned char *data = (unsigned char *)fmw->data;
+ struct tasdevice_config *config;
+ unsigned int i, n;
+
+ if (offset + 2 > fmw->size) {
+ dev_err(tas_priv->dev, "%s: File Size error\n", __func__);
+ return -EINVAL;
+ }
+ tas_fmw->nr_configurations = get_unaligned_be16(&data[offset]);
+ offset += 2;
+
+ if (tas_fmw->nr_configurations == 0) {
+ dev_err(tas_priv->dev, "%s: Conf is zero\n", __func__);
+ /* Not error for calibration Data file, return directly */
+ return offset;
+ }
+ tas_fmw->configs = kcalloc(tas_fmw->nr_configurations,
+ sizeof(*tas_fmw->configs), GFP_KERNEL);
+ if (!tas_fmw->configs)
+ return -ENOMEM;
+ for (i = 0; i < tas_fmw->nr_configurations; i++) {
+ config = &tas_fmw->configs[i];
+ if (offset + 64 > fmw->size) {
+ dev_err(tas_priv->dev, "File Size err\n");
+ return -EINVAL;
+ }
+ memcpy(config->name, &data[offset], 64);
+ offset += 64;
+
+ n = strlen((char *)&data[offset]);
+ n += 15;
+ if (offset + n > fmw->size) {
+ dev_err(tas_priv->dev, "Description err\n");
+ return -EINVAL;
+ }
+ offset += n;
+ offset = fw_parse_data(tas_fmw, &config->dev_data,
+ fmw, offset);
+ if (offset < 0)
+ break;
+ }
+
+ return offset;
+}
+
+/* yram5 page check. */
+static bool check_inpage_yram_rg(struct tas_crc *cd,
+ unsigned char reg, unsigned char len)
+{
+ bool in = false;
+
+ if (reg <= TAS2781_YRAM5_END_REG &&
+ reg >= TAS2781_YRAM5_START_REG) {
+ if (reg + len > TAS2781_YRAM5_END_REG)
+ cd->len = TAS2781_YRAM5_END_REG - reg + 1;
+ else
+ cd->len = len;
+ cd->offset = reg;
+ in = true;
+ } else if (reg < TAS2781_YRAM5_START_REG) {
+ if (reg + len > TAS2781_YRAM5_START_REG) {
+ cd->offset = TAS2781_YRAM5_START_REG;
+ cd->len = len - TAS2781_YRAM5_START_REG + reg;
+ in = true;
+ }
+ }
+
+ return in;
+}
+
+/* DSP firmware yram block check. */
+static bool check_inpage_yram_bk1(struct tas_crc *cd,
+ unsigned char page, unsigned char reg, unsigned char len)
+{
+ bool in = false;
+
+ if (page == TAS2781_YRAM1_PAGE) {
+ if (reg >= TAS2781_YRAM1_START_REG) {
+ cd->offset = reg;
+ cd->len = len;
+ in = true;
+ } else if (reg + len > TAS2781_YRAM1_START_REG) {
+ cd->offset = TAS2781_YRAM1_START_REG;
+ cd->len = len - TAS2781_YRAM1_START_REG + reg;
+ in = true;
+ }
+ } else if (page == TAS2781_YRAM3_PAGE) {
+ in = check_inpage_yram_rg(cd, reg, len);
+ }
+
+ return in;
+}
+
+/*
+ * Return Code:
+ * true -- the registers are in the inpage yram
+ * false -- the registers are NOT in the inpage yram
+ */
+static bool check_inpage_yram(struct tas_crc *cd, unsigned char book,
+ unsigned char page, unsigned char reg, unsigned char len)
+{
+ bool in = false;
+
+ if (book == TAS2781_YRAM_BOOK1)
+ in = check_inpage_yram_bk1(cd, page, reg, len);
+ else if (book == TAS2781_YRAM_BOOK2 && page == TAS2781_YRAM5_PAGE)
+ in = check_inpage_yram_rg(cd, reg, len);
+
+ return in;
+}
+
+/* yram4 page check. */
+static bool check_inblock_yram_bk(struct tas_crc *cd,
+ unsigned char page, unsigned char reg, unsigned char len)
+{
+ bool in = false;
+
+ if ((page >= TAS2781_YRAM4_START_PAGE &&
+ page <= TAS2781_YRAM4_END_PAGE) ||
+ (page >= TAS2781_YRAM2_START_PAGE &&
+ page <= TAS2781_YRAM2_END_PAGE)) {
+ if (reg <= TAS2781_YRAM2_END_REG &&
+ reg >= TAS2781_YRAM2_START_REG) {
+ cd->offset = reg;
+ cd->len = len;
+ in = true;
+ } else if (reg < TAS2781_YRAM2_START_REG) {
+ if (reg + len - 1 >= TAS2781_YRAM2_START_REG) {
+ cd->offset = TAS2781_YRAM2_START_REG;
+ cd->len = reg + len - TAS2781_YRAM2_START_REG;
+ in = true;
+ }
+ }
+ }
+
+ return in;
+}
+
+/*
+ * Return Code:
+ * true -- the registers are in the inblock yram
+ * false -- the registers are NOT in the inblock yram
+ */
+static bool check_inblock_yram(struct tas_crc *cd, unsigned char book,
+ unsigned char page, unsigned char reg, unsigned char len)
+{
+ bool in = false;
+
+ if (book == TAS2781_YRAM_BOOK1 || book == TAS2781_YRAM_BOOK2)
+ in = check_inblock_yram_bk(cd, page, reg, len);
+
+ return in;
+}
+
+/* yram page check. */
+static bool check_yram(struct tas_crc *cd, unsigned char book,
+ unsigned char page, unsigned char reg, unsigned char len)
+{
+ bool in;
+
+ in = check_inpage_yram(cd, book, page, reg, len);
+ if (!in)
+ in = check_inblock_yram(cd, book, page, reg, len);
+
+ return in;
+}
+
+/* Checksum for data block. */
+static int tasdev_multibytes_chksum(struct tasdevice_priv *tasdevice,
+ unsigned char book, unsigned char page,
+ unsigned char reg, unsigned int len)
+{
+ struct tas_crc crc_data;
+ unsigned char crc_chksum = 0;
+ unsigned char nBuf1[128];
+ int ret = 0, i;
+ bool in;
+
+ if ((reg + len - 1) > 127) {
+ ret = -EINVAL;
+ dev_err(tasdevice->dev, "firmware error\n");
+ goto end;
+ }
+
+ if ((book == TASDEVICE_BOOK_ID(TAS2781_SA_COEFF_SWAP_REG)) &&
+ (page == TASDEVICE_PAGE_ID(TAS2781_SA_COEFF_SWAP_REG)) &&
+ (reg == TASDEVICE_REG_ID(TAS2781_SA_COEFF_SWAP_REG)) &&
+ (len == 4)) {
+ /* DSP swap command, pass */
+ ret = 0;
+ goto end;
+ }
+
+ in = check_yram(&crc_data, book, page, reg, len);
+ if (!in)
+ goto end;
+
+ if (len == 1) {
+ dev_err(tasdevice->dev, "firmware error\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = tasdevice_spi_dev_bulk_read(tasdevice,
+ TASDEVICE_REG(book, page, crc_data.offset),
+ nBuf1, crc_data.len);
+ if (ret < 0)
+ goto end;
+
+ for (i = 0; i < crc_data.len; i++) {
+ if ((book == TASDEVICE_BOOK_ID(TAS2781_SA_COEFF_SWAP_REG)) &&
+ (page == TASDEVICE_PAGE_ID(TAS2781_SA_COEFF_SWAP_REG)) &&
+ ((i + crc_data.offset) >=
+ TASDEVICE_REG_ID(TAS2781_SA_COEFF_SWAP_REG)) &&
+ ((i + crc_data.offset) <=
+ (TASDEVICE_REG_ID(TAS2781_SA_COEFF_SWAP_REG) + 4)))
+ /* DSP swap command, bypass */
+ continue;
+ else
+ crc_chksum += crc8(tasdevice->crc8_lkp_tbl, &nBuf1[i],
+ 1, 0);
+ }
+
+ ret = crc_chksum;
+
+end:
+ return ret;
+}
+
+/* Checksum for single register. */
+static int do_singlereg_checksum(struct tasdevice_priv *tasdevice,
+ unsigned char book, unsigned char page,
+ unsigned char reg, unsigned char val)
+{
+ struct tas_crc crc_data;
+ unsigned int nData1;
+ int ret = 0;
+ bool in;
+
+ /* DSP swap command, pass */
+ if ((book == TASDEVICE_BOOK_ID(TAS2781_SA_COEFF_SWAP_REG)) &&
+ (page == TASDEVICE_PAGE_ID(TAS2781_SA_COEFF_SWAP_REG)) &&
+ (reg >= TASDEVICE_REG_ID(TAS2781_SA_COEFF_SWAP_REG)) &&
+ (reg <= (TASDEVICE_REG_ID(TAS2781_SA_COEFF_SWAP_REG) + 4)))
+ return 0;
+
+ in = check_yram(&crc_data, book, page, reg, 1);
+ if (!in)
+ return 0;
+ ret = tasdevice_spi_dev_read(tasdevice,
+ TASDEVICE_REG(book, page, reg), &nData1);
+ if (ret < 0)
+ return ret;
+
+ if (nData1 != val) {
+ dev_err(tasdevice->dev,
+ "B[0x%x]P[0x%x]R[0x%x] W[0x%x], R[0x%x]\n",
+ book, page, reg, val, nData1);
+ tasdevice->err_code |= ERROR_YRAM_CRCCHK;
+ return -EAGAIN;
+ }
+
+ ret = crc8(tasdevice->crc8_lkp_tbl, &val, 1, 0);
+
+ return ret;
+}
+
+/* Block type check. */
+static void set_err_prg_cfg(unsigned int type, struct tasdevice_priv *p)
+{
+ if ((type == MAIN_ALL_DEVICES) || (type == MAIN_DEVICE_A) ||
+ (type == MAIN_DEVICE_B) || (type == MAIN_DEVICE_C) ||
+ (type == MAIN_DEVICE_D))
+ p->cur_prog = -1;
+ else
+ p->cur_conf = -1;
+}
+
+/* Checksum for data bytes. */
+static int tasdev_bytes_chksum(struct tasdevice_priv *tas_priv,
+ struct tasdev_blk *block, unsigned char book,
+ unsigned char page, unsigned char reg, unsigned int len,
+ unsigned char val, unsigned char *crc_chksum)
+{
+ int ret;
+
+ if (len > 1)
+ ret = tasdev_multibytes_chksum(tas_priv, book, page, reg,
+ len);
+ else
+ ret = do_singlereg_checksum(tas_priv, book, page, reg, val);
+
+ if (ret > 0) {
+ *crc_chksum += ret;
+ goto end;
+ }
+
+ if (ret != -EAGAIN)
+ goto end;
+
+ block->nr_retry--;
+ if (block->nr_retry > 0)
+ goto end;
+
+ set_err_prg_cfg(block->type, tas_priv);
+
+end:
+ return ret;
+}
+
+/* Multi-data byte write. */
+static int tasdev_multibytes_wr(struct tasdevice_priv *tas_priv,
+ struct tasdev_blk *block, unsigned char book,
+ unsigned char page, unsigned char reg, unsigned char *data,
+ unsigned int len, unsigned int *nr_cmds,
+ unsigned char *crc_chksum)
+{
+ int ret;
+
+ if (len > 1) {
+ ret = tasdevice_spi_dev_bulk_write(tas_priv,
+ TASDEVICE_REG(book, page, reg), data + 3, len);
+ if (ret < 0)
+ return ret;
+ if (block->is_ychksum_present)
+ ret = tasdev_bytes_chksum(tas_priv, block,
+ book, page, reg, len, 0, crc_chksum);
+ } else {
+ ret = tasdevice_spi_dev_write(tas_priv,
+ TASDEVICE_REG(book, page, reg), data[3]);
+ if (ret < 0)
+ return ret;
+ if (block->is_ychksum_present)
+ ret = tasdev_bytes_chksum(tas_priv, block, book,
+ page, reg, 1, data[3], crc_chksum);
+ }
+
+ if (!block->is_ychksum_present || ret >= 0) {
+ *nr_cmds += 1;
+ if (len >= 2)
+ *nr_cmds += ((len - 2) / 4) + 1;
+ }
+
+ return ret;
+}
+
+/* Checksum for block. */
+static int tasdev_block_chksum(struct tasdevice_priv *tas_priv,
+ struct tasdev_blk *block)
+{
+ unsigned int nr_value;
+ int ret;
+
+ ret = tasdevice_spi_dev_read(tas_priv, TASDEVICE_CHECKSUM, &nr_value);
+ if (ret < 0) {
+ dev_err(tas_priv->dev, "%s: read error %d.\n", __func__, ret);
+ set_err_prg_cfg(block->type, tas_priv);
+ return ret;
+ }
+
+ if ((nr_value & 0xff) != block->pchksum) {
+ dev_err(tas_priv->dev, "%s: PChkSum err %d ", __func__, ret);
+ dev_err(tas_priv->dev, "PChkSum = 0x%x, Reg = 0x%x\n",
+ block->pchksum, (nr_value & 0xff));
+ tas_priv->err_code |= ERROR_PRAM_CRCCHK;
+ ret = -EAGAIN;
+ block->nr_retry--;
+
+ if (block->nr_retry <= 0)
+ set_err_prg_cfg(block->type, tas_priv);
+ } else {
+ tas_priv->err_code &= ~ERROR_PRAM_CRCCHK;
+ }
+
+ return ret;
+}
+
+/* Firmware block load function. */
+static int tasdev_load_blk(struct tasdevice_priv *tas_priv,
+ struct tasdev_blk *block)
+{
+ unsigned int sleep_time, len, nr_cmds;
+ unsigned char offset, book, page, val;
+ unsigned char *data = block->data;
+ unsigned char crc_chksum = 0;
+ int ret = 0;
+
+ while (block->nr_retry > 0) {
+ if (block->is_pchksum_present) {
+ ret = tasdevice_spi_dev_write(tas_priv,
+ TASDEVICE_CHECKSUM, 0);
+ if (ret < 0)
+ break;
+ }
+
+ if (block->is_ychksum_present)
+ crc_chksum = 0;
+
+ nr_cmds = 0;
+
+ while (nr_cmds < block->nr_cmds) {
+ data = block->data + nr_cmds * 4;
+
+ book = data[0];
+ page = data[1];
+ offset = data[2];
+ val = data[3];
+
+ nr_cmds++;
+ /* Single byte write */
+ if (offset <= 0x7F) {
+ ret = tasdevice_spi_dev_write(tas_priv,
+ TASDEVICE_REG(book, page, offset),
+ val);
+ if (ret < 0)
+ break;
+ if (block->is_ychksum_present) {
+ ret = tasdev_bytes_chksum(tas_priv,
+ block, book, page, offset,
+ 1, val, &crc_chksum);
+ if (ret < 0)
+ break;
+ }
+ continue;
+ }
+ /* sleep command */
+ if (offset == 0x81) {
+ /* book -- data[0] page -- data[1] */
+ sleep_time = ((book << 8) + page)*1000;
+ fsleep(sleep_time);
+ continue;
+ }
+ /* Multiple bytes write */
+ if (offset == 0x85) {
+ data += 4;
+ len = (book << 8) + page;
+ book = data[0];
+ page = data[1];
+ offset = data[2];
+ ret = tasdev_multibytes_wr(tas_priv,
+ block, book, page, offset, data,
+ len, &nr_cmds, &crc_chksum);
+ if (ret < 0)
+ break;
+ }
+ }
+ if (ret == -EAGAIN) {
+ if (block->nr_retry > 0)
+ continue;
+ } else if (ret < 0) {
+ /* err in current device, skip it */
+ break;
+ }
+
+ if (block->is_pchksum_present) {
+ ret = tasdev_block_chksum(tas_priv, block);
+ if (ret == -EAGAIN) {
+ if (block->nr_retry > 0)
+ continue;
+ } else if (ret < 0) {
+ /* err in current device, skip it */
+ break;
+ }
+ }
+
+ if (block->is_ychksum_present) {
+ /* TBD, open it when FW ready */
+ dev_err(tas_priv->dev,
+ "Blk YChkSum: FW = 0x%x, YCRC = 0x%x\n",
+ block->ychksum, crc_chksum);
+
+ tas_priv->err_code &=
+ ~ERROR_YRAM_CRCCHK;
+ ret = 0;
+ }
+ /* skip current blk */
+ break;
+ }
+
+ return ret;
+}
+
+/* Firmware block load function. */
+static int tasdevice_load_block(struct tasdevice_priv *tas_priv,
+ struct tasdev_blk *block)
+{
+ int ret = 0;
+
+ block->nr_retry = 6;
+ if (tas_priv->is_loading == false)
+ return 0;
+ ret = tasdev_load_blk(tas_priv, block);
+ if (ret < 0)
+ dev_err(tas_priv->dev, "Blk (%d) load error\n", block->type);
+
+ return ret;
+}
+
+/*
+ * Select firmware binary parser & load callback functions by ppc3 version
+ * and firmware binary version.
+ */
+static int dspfw_default_callback(struct tasdevice_priv *tas_priv,
+ unsigned int drv_ver, unsigned int ppcver)
+{
+ int rc = 0;
+
+ if (drv_ver == 0x100) {
+ if (ppcver >= PPC3_VERSION) {
+ tas_priv->fw_parse_variable_header =
+ fw_parse_variable_header_kernel;
+ tas_priv->fw_parse_program_data =
+ fw_parse_program_data_kernel;
+ tas_priv->fw_parse_configuration_data =
+ fw_parse_configuration_data_kernel;
+ tas_priv->tasdevice_load_block =
+ tasdevice_load_block_kernel;
+ } else if (ppcver == 0x00) {
+ tas_priv->fw_parse_variable_header =
+ fw_parse_variable_header_git;
+ tas_priv->fw_parse_program_data =
+ fw_parse_program_data;
+ tas_priv->fw_parse_configuration_data =
+ fw_parse_configuration_data;
+ tas_priv->tasdevice_load_block =
+ tasdevice_load_block;
+ } else {
+ dev_err(tas_priv->dev,
+ "Wrong PPCVer :0x%08x\n", ppcver);
+ rc = -EINVAL;
+ }
+ } else {
+ dev_err(tas_priv->dev, "Wrong DrvVer : 0x%02x\n", drv_ver);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+/* DSP firmware binary file header parser function. */
+static int fw_parse_header(struct tasdevice_priv *tas_priv,
+ struct tasdevice_fw *tas_fmw, const struct firmware *fmw, int offset)
+{
+ struct tasdevice_dspfw_hdr *fw_hdr = &tas_fmw->fw_hdr;
+ struct tasdevice_fw_fixed_hdr *fw_fixed_hdr = &fw_hdr->fixed_hdr;
+ static const unsigned char magic_number[] = {0x35, 0x35, 0x35, 0x32, };
+ const unsigned char *buf = (unsigned char *)fmw->data;
+
+ if (offset + 92 > fmw->size) {
+ dev_err(tas_priv->dev, "%s: File Size error\n", __func__);
+ offset = -EINVAL;
+ goto out;
+ }
+ if (memcmp(&buf[offset], magic_number, 4)) {
+ dev_err(tas_priv->dev, "%s: Magic num NOT match\n", __func__);
+ offset = -EINVAL;
+ goto out;
+ }
+ offset += 4;
+
+ /*
+ * Convert data[offset], data[offset + 1], data[offset + 2] and
+ * data[offset + 3] into host
+ */
+ fw_fixed_hdr->fwsize = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+ if (fw_fixed_hdr->fwsize != fmw->size) {
+ dev_err(tas_priv->dev, "File size not match, %lu %u",
+ (unsigned long)fmw->size, fw_fixed_hdr->fwsize);
+ offset = -EINVAL;
+ goto out;
+ }
+ offset += 4;
+ fw_fixed_hdr->ppcver = get_unaligned_be32(&buf[offset]);
+ offset += 8;
+ fw_fixed_hdr->drv_ver = get_unaligned_be32(&buf[offset]);
+ offset += 72;
+
+out:
+ return offset;
+}
+
+/* DSP firmware binary file parser function. */
+static int tasdevice_dspfw_ready(const struct firmware *fmw, void *context)
+{
+ struct tasdevice_priv *tas_priv = context;
+ struct tasdevice_fw_fixed_hdr *fw_fixed_hdr;
+ struct tasdevice_fw *tas_fmw;
+ int offset = 0, ret = 0;
+
+ if (!fmw || !fmw->data) {
+ dev_err(tas_priv->dev, "%s: Failed to read firmware %s\n",
+ __func__, tas_priv->coef_binaryname);
+ return -EINVAL;
+ }
+
+ tas_priv->fmw = kzalloc(sizeof(*tas_priv->fmw), GFP_KERNEL);
+ if (!tas_priv->fmw)
+ return -ENOMEM;
+ tas_fmw = tas_priv->fmw;
+ tas_fmw->dev = tas_priv->dev;
+ offset = fw_parse_header(tas_priv, tas_fmw, fmw, offset);
+
+ if (offset == -EINVAL)
+ return -EINVAL;
+
+ fw_fixed_hdr = &tas_fmw->fw_hdr.fixed_hdr;
+ /* Support different versions of firmware */
+ switch (fw_fixed_hdr->drv_ver) {
+ case 0x301:
+ case 0x302:
+ case 0x502:
+ case 0x503:
+ tas_priv->fw_parse_variable_header =
+ fw_parse_variable_header_kernel;
+ tas_priv->fw_parse_program_data =
+ fw_parse_program_data_kernel;
+ tas_priv->fw_parse_configuration_data =
+ fw_parse_configuration_data_kernel;
+ tas_priv->tasdevice_load_block =
+ tasdevice_load_block_kernel;
+ break;
+ case 0x202:
+ case 0x400:
+ tas_priv->fw_parse_variable_header =
+ fw_parse_variable_header_git;
+ tas_priv->fw_parse_program_data =
+ fw_parse_program_data;
+ tas_priv->fw_parse_configuration_data =
+ fw_parse_configuration_data;
+ tas_priv->tasdevice_load_block =
+ tasdevice_load_block;
+ break;
+ default:
+ ret = dspfw_default_callback(tas_priv,
+ fw_fixed_hdr->drv_ver, fw_fixed_hdr->ppcver);
+ if (ret)
+ return ret;
+ break;
+ }
+
+ offset = tas_priv->fw_parse_variable_header(tas_priv, fmw, offset);
+ if (offset < 0)
+ return offset;
+
+ offset = tas_priv->fw_parse_program_data(tas_priv, tas_fmw, fmw,
+ offset);
+ if (offset < 0)
+ return offset;
+
+ offset = tas_priv->fw_parse_configuration_data(tas_priv,
+ tas_fmw, fmw, offset);
+ if (offset < 0)
+ ret = offset;
+
+ return ret;
+}
+
+/* DSP firmware binary file parser function. */
+int tasdevice_spi_dsp_parser(void *context)
+{
+ struct tasdevice_priv *tas_priv = context;
+ const struct firmware *fw_entry;
+ int ret;
+
+ ret = request_firmware(&fw_entry, tas_priv->coef_binaryname,
+ tas_priv->dev);
+ if (ret) {
+ dev_err(tas_priv->dev, "%s: load %s error\n", __func__,
+ tas_priv->coef_binaryname);
+ return ret;
+ }
+
+ ret = tasdevice_dspfw_ready(fw_entry, tas_priv);
+ release_firmware(fw_entry);
+ fw_entry = NULL;
+
+ return ret;
+}
+
+/* DSP firmware program block data remove function. */
+static void tasdev_dsp_prog_blk_remove(struct tasdevice_prog *prog)
+{
+ struct tasdevice_data *tas_dt;
+ struct tasdev_blk *blk;
+ unsigned int i;
+
+ if (!prog)
+ return;
+
+ tas_dt = &prog->dev_data;
+
+ if (!tas_dt->dev_blks)
+ return;
+
+ for (i = 0; i < tas_dt->nr_blk; i++) {
+ blk = &tas_dt->dev_blks[i];
+ kfree(blk->data);
+ }
+ kfree(tas_dt->dev_blks);
+}
+
+/* DSP firmware program block data remove function. */
+static void tasdev_dsp_prog_remove(struct tasdevice_prog *prog,
+ unsigned short nr)
+{
+ int i;
+
+ for (i = 0; i < nr; i++)
+ tasdev_dsp_prog_blk_remove(&prog[i]);
+ kfree(prog);
+}
+
+/* DSP firmware config block data remove function. */
+static void tasdev_dsp_cfg_blk_remove(struct tasdevice_config *cfg)
+{
+ struct tasdevice_data *tas_dt;
+ struct tasdev_blk *blk;
+ unsigned int i;
+
+ if (cfg) {
+ tas_dt = &cfg->dev_data;
+
+ if (!tas_dt->dev_blks)
+ return;
+
+ for (i = 0; i < tas_dt->nr_blk; i++) {
+ blk = &tas_dt->dev_blks[i];
+ kfree(blk->data);
+ }
+ kfree(tas_dt->dev_blks);
+ }
+}
+
+/* DSP firmware config remove function. */
+static void tasdev_dsp_cfg_remove(struct tasdevice_config *config,
+ unsigned short nr)
+{
+ int i;
+
+ for (i = 0; i < nr; i++)
+ tasdev_dsp_cfg_blk_remove(&config[i]);
+ kfree(config);
+}
+
+/* DSP firmware remove function. */
+void tasdevice_spi_dsp_remove(void *context)
+{
+ struct tasdevice_priv *tas_dev = context;
+
+ if (!tas_dev->fmw)
+ return;
+
+ if (tas_dev->fmw->programs)
+ tasdev_dsp_prog_remove(tas_dev->fmw->programs,
+ tas_dev->fmw->nr_programs);
+ if (tas_dev->fmw->configs)
+ tasdev_dsp_cfg_remove(tas_dev->fmw->configs,
+ tas_dev->fmw->nr_configurations);
+ kfree(tas_dev->fmw);
+ tas_dev->fmw = NULL;
+}
+
+/* DSP firmware calibration data remove function. */
+static void tas2781_clear_calfirmware(struct tasdevice_fw *tas_fmw)
+{
+ struct tasdevice_calibration *calibration;
+ struct tasdev_blk *block;
+ unsigned int blks;
+ int i;
+
+ if (!tas_fmw->calibrations)
+ goto out;
+
+ for (i = 0; i < tas_fmw->nr_calibrations; i++) {
+ calibration = &tas_fmw->calibrations[i];
+ if (!calibration)
+ continue;
+
+ if (!calibration->dev_data.dev_blks)
+ continue;
+
+ for (blks = 0; blks < calibration->dev_data.nr_blk; blks++) {
+ block = &calibration->dev_data.dev_blks[blks];
+ if (!block)
+ continue;
+ kfree(block->data);
+ }
+ kfree(calibration->dev_data.dev_blks);
+ }
+ kfree(tas_fmw->calibrations);
+out:
+ kfree(tas_fmw);
+}
+
+/* Calibration data from firmware remove function. */
+void tasdevice_spi_calbin_remove(void *context)
+{
+ struct tasdevice_priv *tas_priv = context;
+
+ if (tas_priv->cali_data_fmw) {
+ tas2781_clear_calfirmware(tas_priv->cali_data_fmw);
+ tas_priv->cali_data_fmw = NULL;
+ }
+}
+
+/* Configuration remove function. */
+void tasdevice_spi_config_info_remove(void *context)
+{
+ struct tasdevice_priv *tas_priv = context;
+ struct tasdevice_rca *rca = &tas_priv->rcabin;
+ struct tasdevice_config_info **ci = rca->cfg_info;
+ unsigned int i, j;
+
+ if (!ci)
+ return;
+ for (i = 0; i < rca->ncfgs; i++) {
+ if (!ci[i])
+ continue;
+ if (ci[i]->blk_data) {
+ for (j = 0; j < ci[i]->real_nblocks; j++) {
+ if (!ci[i]->blk_data[j])
+ continue;
+ kfree(ci[i]->blk_data[j]->regdata);
+ kfree(ci[i]->blk_data[j]);
+ }
+ kfree(ci[i]->blk_data);
+ }
+ kfree(ci[i]);
+ }
+ kfree(ci);
+}
+
+/* DSP firmware program block data load function. */
+static int tasdevice_load_data(struct tasdevice_priv *tas_priv,
+ struct tasdevice_data *dev_data)
+{
+ struct tasdev_blk *block;
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < dev_data->nr_blk; i++) {
+ block = &dev_data->dev_blks[i];
+ ret = tas_priv->tasdevice_load_block(tas_priv, block);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+/* DSP firmware program load interface function. */
+int tasdevice_spi_prmg_load(void *context, int prm_no)
+{
+ struct tasdevice_priv *tas_priv = context;
+ struct tasdevice_fw *tas_fmw = tas_priv->fmw;
+ struct tasdevice_prog *program;
+ struct tasdevice_config *conf;
+ int ret = 0;
+
+ if (!tas_fmw) {
+ dev_err(tas_priv->dev, "%s: Firmware is NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (prm_no >= 0 && prm_no <= tas_fmw->nr_programs) {
+ tas_priv->cur_conf = 0;
+ tas_priv->is_loading = true;
+ program = &tas_fmw->programs[prm_no];
+ ret = tasdevice_load_data(tas_priv, &program->dev_data);
+ if (ret < 0) {
+ dev_err(tas_priv->dev, "Program failed %d.\n", ret);
+ return ret;
+ }
+ tas_priv->cur_prog = prm_no;
+
+ conf = &tas_fmw->configs[tas_priv->cur_conf];
+ ret = tasdevice_load_data(tas_priv, &conf->dev_data);
+ if (ret < 0)
+ dev_err(tas_priv->dev, "Config failed %d.\n", ret);
+ } else {
+ dev_err(tas_priv->dev,
+ "%s: prm(%d) is not in range of Programs %u\n",
+ __func__, prm_no, tas_fmw->nr_programs);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/* RCABIN configuration switch interface function. */
+void tasdevice_spi_tuning_switch(void *context, int state)
+{
+ struct tasdevice_priv *tas_priv = context;
+ int profile_cfg_id = tas_priv->rcabin.profile_cfg_id;
+
+ if (tas_priv->fw_state == TASDEVICE_DSP_FW_FAIL) {
+ dev_err(tas_priv->dev, "DSP bin file not loaded\n");
+ return;
+ }
+
+ if (state == 0)
+ tasdevice_spi_select_cfg_blk(tas_priv, profile_cfg_id,
+ TASDEVICE_BIN_BLK_PRE_POWER_UP);
+ else
+ tasdevice_spi_select_cfg_blk(tas_priv, profile_cfg_id,
+ TASDEVICE_BIN_BLK_PRE_SHUTDOWN);
+}
diff --git a/sound/pci/lola/lola_clock.c b/sound/pci/lola/lola_clock.c
index cafd30e30913..2e73fbf335ed 100644
--- a/sound/pci/lola/lola_clock.c
+++ b/sound/pci/lola/lola_clock.c
@@ -35,7 +35,7 @@ unsigned int lola_sample_rate_convert(unsigned int coded)
default: return 0; /* error */
}
- /* ajustement */
+ /* adjustement */
switch (coded & 0x60) {
case (0 << 5): break;
case (1 << 5): freq = (freq * 999) / 1000; break;
diff --git a/sound/pci/nm256/nm256.c b/sound/pci/nm256/nm256.c
index 11ba7d4eac2a..44085237fb44 100644
--- a/sound/pci/nm256/nm256.c
+++ b/sound/pci/nm256/nm256.c
@@ -696,7 +696,9 @@ snd_nm256_playback_copy(struct snd_pcm_substream *substream,
struct snd_pcm_runtime *runtime = substream->runtime;
struct nm256_stream *s = runtime->private_data;
- return copy_from_iter_toio(s->bufptr + pos, src, count);
+ if (copy_from_iter_toio(s->bufptr + pos, count, src) != count)
+ return -EFAULT;
+ return 0;
}
/*
@@ -710,7 +712,9 @@ snd_nm256_capture_copy(struct snd_pcm_substream *substream,
struct snd_pcm_runtime *runtime = substream->runtime;
struct nm256_stream *s = runtime->private_data;
- return copy_to_iter_fromio(dst, s->bufptr + pos, count);
+ if (copy_to_iter_fromio(s->bufptr + pos, count, dst) != count)
+ return -EFAULT;
+ return 0;
}
#endif /* !__i386__ */
diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
index 02144bbee6d5..a8c2ceaadef5 100644
--- a/sound/pci/rme32.c
+++ b/sound/pci/rme32.c
@@ -256,8 +256,10 @@ static int snd_rme32_playback_copy(struct snd_pcm_substream *substream,
{
struct rme32 *rme32 = snd_pcm_substream_chip(substream);
- return copy_from_iter_toio(rme32->iobase + RME32_IO_DATA_BUFFER + pos,
- src, count);
+ if (copy_from_iter_toio(rme32->iobase + RME32_IO_DATA_BUFFER + pos,
+ count, src) != count)
+ return -EFAULT;
+ return 0;
}
/* copy callback for halfduplex mode */
@@ -267,9 +269,10 @@ static int snd_rme32_capture_copy(struct snd_pcm_substream *substream,
{
struct rme32 *rme32 = snd_pcm_substream_chip(substream);
- return copy_to_iter_fromio(dst,
- rme32->iobase + RME32_IO_DATA_BUFFER + pos,
- count);
+ if (copy_to_iter_fromio(rme32->iobase + RME32_IO_DATA_BUFFER + pos,
+ count, dst) != count)
+ return -EFAULT;
+ return 0;
}
/*
diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
index d50ad25574ad..1265a7efac60 100644
--- a/sound/pci/rme96.c
+++ b/sound/pci/rme96.c
@@ -322,8 +322,10 @@ snd_rme96_playback_copy(struct snd_pcm_substream *substream,
{
struct rme96 *rme96 = snd_pcm_substream_chip(substream);
- return copy_from_iter_toio(rme96->iobase + RME96_IO_PLAY_BUFFER + pos,
- src, count);
+ if (copy_from_iter_toio(rme96->iobase + RME96_IO_PLAY_BUFFER + pos,
+ count, src) != count)
+ return -EFAULT;
+ return 0;
}
static int
@@ -333,9 +335,10 @@ snd_rme96_capture_copy(struct snd_pcm_substream *substream,
{
struct rme96 *rme96 = snd_pcm_substream_chip(substream);
- return copy_to_iter_fromio(dst,
- rme96->iobase + RME96_IO_REC_BUFFER + pos,
- count);
+ if (copy_to_iter_fromio(rme96->iobase + RME96_IO_REC_BUFFER + pos,
+ count, dst) != count)
+ return -EFAULT;
+ return 0;
}
/*
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 1c504a591948..fd3dfbad397a 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -3444,7 +3444,7 @@ snd_hdsp_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
snd_iprintf(buffer, "MIDI1 Input status: 0x%x\n", hdsp_read(hdsp, HDSP_midiStatusIn0));
snd_iprintf(buffer, "MIDI2 Output status: 0x%x\n", hdsp_read(hdsp, HDSP_midiStatusOut1));
snd_iprintf(buffer, "MIDI2 Input status: 0x%x\n", hdsp_read(hdsp, HDSP_midiStatusIn1));
- snd_iprintf(buffer, "Use Midi Tasklet: %s\n", hdsp->use_midi_work ? "on" : "off");
+ snd_iprintf(buffer, "Use Midi Tasklet: %s\n", str_on_off(hdsp->use_midi_work));
snd_iprintf(buffer, "\n");
@@ -3452,8 +3452,8 @@ snd_hdsp_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
snd_iprintf(buffer, "Buffer Size (Latency): %d samples (2 periods of %lu bytes)\n", x, (unsigned long) hdsp->period_bytes);
snd_iprintf(buffer, "Hardware pointer (frames): %ld\n", hdsp_hw_pointer(hdsp));
- snd_iprintf(buffer, "Precise pointer: %s\n", hdsp->precise_ptr ? "on" : "off");
- snd_iprintf(buffer, "Line out: %s\n", (hdsp->control_register & HDSP_LineOut) ? "on" : "off");
+ snd_iprintf(buffer, "Precise pointer: %s\n", str_on_off(hdsp->precise_ptr));
+ snd_iprintf(buffer, "Line out: %s\n", str_on_off(hdsp->control_register & HDSP_LineOut));
snd_iprintf(buffer, "Firmware version: %d\n", (status2&HDSP_version0)|(status2&HDSP_version1)<<1|(status2&HDSP_version2)<<2);
@@ -3750,8 +3750,8 @@ snd_hdsp_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
snd_iprintf(buffer, "Phones Gain : %s\n", tmp);
snd_iprintf(buffer, "XLR Breakout Cable : %s\n",
- hdsp_toggle_setting(hdsp, HDSP_XLRBreakoutCable) ?
- "yes" : "no");
+ str_yes_no(hdsp_toggle_setting(hdsp,
+ HDSP_XLRBreakoutCable)));
if (hdsp->control_register & HDSP_AnalogExtensionBoard)
snd_iprintf(buffer, "AEB : on (ADAT1 internal)\n");
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index d7290463d654..f89718b19d23 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -4927,14 +4927,14 @@ snd_hdspm_proc_read_madi(struct snd_info_entry *entry,
x, (unsigned long) hdspm->period_bytes);
snd_iprintf(buffer, "Line out: %s\n",
- (hdspm->control_register & HDSPM_LineOut) ? "on " : "off");
+ str_on_off(hdspm->control_register & HDSPM_LineOut));
snd_iprintf(buffer,
"ClearTrackMarker = %s, Transmit in %s Channel Mode, "
"Auto Input %s\n",
- (hdspm->control_register & HDSPM_clr_tms) ? "on" : "off",
+ str_on_off(hdspm->control_register & HDSPM_clr_tms),
(hdspm->control_register & HDSPM_TX_64ch) ? "64" : "56",
- (hdspm->control_register & HDSPM_AutoInp) ? "on" : "off");
+ str_on_off(hdspm->control_register & HDSPM_AutoInp));
if (!(hdspm->control_register & HDSPM_ClockModeMaster))
@@ -5088,12 +5088,9 @@ snd_hdspm_proc_read_aes32(struct snd_info_entry * entry,
snd_iprintf(buffer,
"ClearTrackMarker %s, Emphasis %s, Dolby %s\n",
- (hdspm->
- control_register & HDSPM_clr_tms) ? "on" : "off",
- (hdspm->
- control_register & HDSPM_Emphasis) ? "on" : "off",
- (hdspm->
- control_register & HDSPM_Dolby) ? "on" : "off");
+ str_on_off(hdspm->control_register & HDSPM_clr_tms),
+ str_on_off(hdspm->control_register & HDSPM_Emphasis),
+ str_on_off(hdspm->control_register & HDSPM_Dolby));
pref_syncref = hdspm_pref_sync_ref(hdspm);
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index d066c70ae160..5b8dd7b0a02c 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -1561,8 +1561,7 @@ snd_rme9652_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buff
x, (unsigned long) rme9652->period_bytes);
snd_iprintf(buffer, "Hardware pointer (frames): %ld\n",
rme9652_hw_pointer(rme9652));
- snd_iprintf(buffer, "Passthru: %s\n",
- rme9652->passthru ? "yes" : "no");
+ snd_iprintf(buffer, "Passthru: %s\n", str_yes_no(rme9652->passthru));
if ((rme9652->control_register & (RME9652_Master | RME9652_wsel)) == 0) {
snd_iprintf(buffer, "Clock mode: autosync\n");
@@ -1685,7 +1684,7 @@ snd_rme9652_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buff
snd_iprintf(buffer, "\n");
snd_iprintf(buffer, "Timecode signal: %s\n",
- (status & RME9652_tc_valid) ? "yes" : "no");
+ str_yes_no(status & RME9652_tc_valid));
/* thru modes */
diff --git a/sound/pci/sonicvibes.c b/sound/pci/sonicvibes.c
index f91cbf6eeca0..c30eaf1038e7 100644
--- a/sound/pci/sonicvibes.c
+++ b/sound/pci/sonicvibes.c
@@ -1118,7 +1118,7 @@ static void snd_sonicvibes_proc_read(struct snd_info_entry *entry,
tmp = sonic->srs_space & 0x0f;
snd_iprintf(buffer, "SRS 3D : %s\n",
- sonic->srs_space & 0x80 ? "off" : "on");
+ str_off_on(sonic->srs_space & 0x80));
snd_iprintf(buffer, "SRS Space : %s\n",
tmp == 0x00 ? "100%" :
tmp == 0x01 ? "75%" :
@@ -1135,9 +1135,9 @@ static void snd_sonicvibes_proc_read(struct snd_info_entry *entry,
tmp == 0x00 ? "on-board ROM" :
tmp == 0x01 ? "PCI bus" : "on-board ROM + PCI bus");
tmp = sonic->mpu_switch;
- snd_iprintf(buffer, "Onboard synth : %s\n", tmp & 0x01 ? "on" : "off");
- snd_iprintf(buffer, "Ext. Rx to synth : %s\n", tmp & 0x02 ? "on" : "off");
- snd_iprintf(buffer, "MIDI to ext. Tx : %s\n", tmp & 0x04 ? "on" : "off");
+ snd_iprintf(buffer, "Onboard synth : %s\n", str_on_off(tmp & 0x01));
+ snd_iprintf(buffer, "Ext. Rx to synth : %s\n", str_on_off(tmp & 0x02));
+ snd_iprintf(buffer, "MIDI to ext. Tx : %s\n", str_on_off(tmp & 0x04));
}
static void snd_sonicvibes_proc_init(struct sonicvibes *sonic)
diff --git a/sound/pci/trident/trident_main.c b/sound/pci/trident/trident_main.c
index e98eea1e6d81..8039f445bee2 100644
--- a/sound/pci/trident/trident_main.c
+++ b/sound/pci/trident/trident_main.c
@@ -3278,9 +3278,9 @@ static void snd_trident_proc_read(struct snd_info_entry *entry,
snd_iprintf(buffer, "Spurious IRQs : %d\n", trident->spurious_irq_count);
snd_iprintf(buffer, "Spurious IRQ dlta: %d\n", trident->spurious_irq_max_delta);
if (trident->device == TRIDENT_DEVICE_ID_NX || trident->device == TRIDENT_DEVICE_ID_SI7018)
- snd_iprintf(buffer, "IEC958 Mixer Out : %s\n", trident->spdif_ctrl == 0x28 ? "on" : "off");
+ snd_iprintf(buffer, "IEC958 Mixer Out : %s\n", str_on_off(trident->spdif_ctrl == 0x28));
if (trident->device == TRIDENT_DEVICE_ID_NX) {
- snd_iprintf(buffer, "Rear Speakers : %s\n", trident->ac97_ctrl & 0x00000010 ? "on" : "off");
+ snd_iprintf(buffer, "Rear Speakers : %s\n", str_on_off(trident->ac97_ctrl & 0x00000010));
if (trident->tlb.entries) {
snd_iprintf(buffer,"\nVirtual Memory\n");
snd_iprintf(buffer, "Memory Maximum : %d\n", trident->tlb.memhdr->size);
diff --git a/sound/soc/amd/Kconfig b/sound/soc/amd/Kconfig
index c7590d4989bb..803521178279 100644
--- a/sound/soc/amd/Kconfig
+++ b/sound/soc/amd/Kconfig
@@ -105,7 +105,7 @@ config SND_SOC_AMD_ACP6x
config SND_SOC_AMD_YC_MACH
tristate "AMD YC support for DMIC"
select SND_SOC_DMIC
- depends on SND_SOC_AMD_ACP6x
+ depends on SND_SOC_AMD_ACP6x && ACPI
help
This option enables machine driver for Yellow Carp platform
using dmic. ACP IP has PDM Decoder block with DMA controller.
diff --git a/sound/soc/amd/acp/acp-i2s.c b/sound/soc/amd/acp/acp-i2s.c
index 1f59ee248771..89e99ed4275a 100644
--- a/sound/soc/amd/acp/acp-i2s.c
+++ b/sound/soc/amd/acp/acp-i2s.c
@@ -181,6 +181,7 @@ static int acp_i2s_set_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask, u32 rx_mas
break;
default:
dev_err(dev, "Unknown chip revision %d\n", chip->acp_rev);
+ spin_unlock_irq(&adata->acp_lock);
return -EINVAL;
}
}
diff --git a/sound/soc/amd/ps/pci-ps.c b/sound/soc/amd/ps/pci-ps.c
index 4575326d0635..8b556950b855 100644
--- a/sound/soc/amd/ps/pci-ps.c
+++ b/sound/soc/amd/ps/pci-ps.c
@@ -83,6 +83,7 @@ static int acp63_init(void __iomem *acp_base, struct device *dev)
return ret;
}
acp63_enable_interrupts(acp_base);
+ writel(0, acp_base + ACP_ZSC_DSP_CTRL);
return 0;
}
@@ -97,6 +98,7 @@ static int acp63_deinit(void __iomem *acp_base, struct device *dev)
return ret;
}
writel(0, acp_base + ACP_CONTROL);
+ writel(1, acp_base + ACP_ZSC_DSP_CTRL);
return 0;
}
@@ -312,6 +314,7 @@ static struct snd_soc_acpi_mach *acp63_sdw_machine_select(struct device *dev)
if (mach && mach->link_mask) {
mach->mach_params.links = mach->links;
mach->mach_params.link_mask = mach->link_mask;
+ mach->mach_params.subsystem_rev = acp_data->acp_rev;
return mach;
}
}
@@ -669,8 +672,10 @@ static int __maybe_unused snd_acp63_suspend(struct device *dev)
adata = dev_get_drvdata(dev);
if (adata->is_sdw_dev) {
adata->sdw_en_stat = check_acp_sdw_enable_status(adata);
- if (adata->sdw_en_stat)
+ if (adata->sdw_en_stat) {
+ writel(1, adata->acp63_base + ACP_ZSC_DSP_CTRL);
return 0;
+ }
}
ret = acp63_deinit(adata->acp63_base, dev);
if (ret)
@@ -685,9 +690,10 @@ static int __maybe_unused snd_acp63_runtime_resume(struct device *dev)
int ret;
adata = dev_get_drvdata(dev);
- if (adata->sdw_en_stat)
+ if (adata->sdw_en_stat) {
+ writel(0, adata->acp63_base + ACP_ZSC_DSP_CTRL);
return 0;
-
+ }
ret = acp63_init(adata->acp63_base, dev);
if (ret) {
dev_err(dev, "ACP init failed\n");
@@ -705,8 +711,10 @@ static int __maybe_unused snd_acp63_resume(struct device *dev)
int ret;
adata = dev_get_drvdata(dev);
- if (adata->sdw_en_stat)
+ if (adata->sdw_en_stat) {
+ writel(0, adata->acp63_base + ACP_ZSC_DSP_CTRL);
return 0;
+ }
ret = acp63_init(adata->acp63_base, dev);
if (ret)
diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
index ecf57a6cb7c3..b16587d8f97a 100644
--- a/sound/soc/amd/yc/acp6x-mach.c
+++ b/sound/soc/amd/yc/acp6x-mach.c
@@ -307,6 +307,34 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
{
.driver_data = &acp6x_card,
.matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83L3"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83N6"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83Q2"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "UM5302TA"),
}
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 0b9e87dc2b6c..ee35f3aa5521 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -692,7 +692,7 @@ config SND_SOC_AW88261
the input amplitude.
config SND_SOC_AW88081
- tristate "Soc Audio for awinic aw88081"
+ tristate "Soc Audio for awinic aw88081/aw88083"
depends on I2C
select REGMAP_I2C
select SND_SOC_AW88395_LIB
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index f37e82ddb7a1..d7ad795603c1 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -80,7 +80,7 @@ snd-soc-cs35l56-shared-y := cs35l56-shared.o
snd-soc-cs35l56-i2c-y := cs35l56-i2c.o
snd-soc-cs35l56-spi-y := cs35l56-spi.o
snd-soc-cs35l56-sdw-y := cs35l56-sdw.o
-snd-soc-cs40l50-objs := cs40l50-codec.o
+snd-soc-cs40l50-y := cs40l50-codec.o
snd-soc-cs42l42-y := cs42l42.o
snd-soc-cs42l42-i2c-y := cs42l42-i2c.o
snd-soc-cs42l42-sdw-y := cs42l42-sdw.o
@@ -92,7 +92,7 @@ snd-soc-cs42l52-y := cs42l52.o
snd-soc-cs42l56-y := cs42l56.o
snd-soc-cs42l73-y := cs42l73.o
snd-soc-cs42l83-i2c-y := cs42l83-i2c.o
-snd-soc-cs42l84-objs := cs42l84.o
+snd-soc-cs42l84-y := cs42l84.o
snd-soc-cs4234-y := cs4234.o
snd-soc-cs4265-y := cs4265.o
snd-soc-cs4270-y := cs4270.o
@@ -334,8 +334,8 @@ snd-soc-wcd-classh-y := wcd-clsh-v2.o
snd-soc-wcd-mbhc-y := wcd-mbhc-v2.o
snd-soc-wcd9335-y := wcd9335.o
snd-soc-wcd934x-y := wcd934x.o
-snd-soc-wcd937x-objs := wcd937x.o
-snd-soc-wcd937x-sdw-objs := wcd937x-sdw.o
+snd-soc-wcd937x-y := wcd937x.o
+snd-soc-wcd937x-sdw-y := wcd937x-sdw.o
snd-soc-wcd938x-y := wcd938x.o
snd-soc-wcd938x-sdw-y := wcd938x-sdw.o
snd-soc-wcd939x-y := wcd939x.o
diff --git a/sound/soc/codecs/ad193x-i2c.c b/sound/soc/codecs/ad193x-i2c.c
index 15d74bb31c4c..6aa168e01fbb 100644
--- a/sound/soc/codecs/ad193x-i2c.c
+++ b/sound/soc/codecs/ad193x-i2c.c
@@ -23,7 +23,6 @@ MODULE_DEVICE_TABLE(i2c, ad193x_id);
static int ad193x_i2c_probe(struct i2c_client *client)
{
struct regmap_config config;
- const struct i2c_device_id *id = i2c_match_id(ad193x_id, client);
config = ad193x_regmap_config;
config.val_bits = 8;
@@ -31,7 +30,7 @@ static int ad193x_i2c_probe(struct i2c_client *client)
return ad193x_probe(&client->dev,
devm_regmap_init_i2c(client, &config),
- (enum ad193x_type)id->driver_data);
+ (uintptr_t)i2c_get_match_data(client));
}
static struct i2c_driver ad193x_i2c_driver = {
diff --git a/sound/soc/codecs/adau1761-i2c.c b/sound/soc/codecs/adau1761-i2c.c
index a554255186ae..eba7e4f42c78 100644
--- a/sound/soc/codecs/adau1761-i2c.c
+++ b/sound/soc/codecs/adau1761-i2c.c
@@ -14,12 +14,9 @@
#include "adau1761.h"
-static const struct i2c_device_id adau1761_i2c_ids[];
-
static int adau1761_i2c_probe(struct i2c_client *client)
{
struct regmap_config config;
- const struct i2c_device_id *id = i2c_match_id(adau1761_i2c_ids, client);
config = adau1761_regmap_config;
config.val_bits = 8;
@@ -27,7 +24,7 @@ static int adau1761_i2c_probe(struct i2c_client *client)
return adau1761_probe(&client->dev,
devm_regmap_init_i2c(client, &config),
- id->driver_data, NULL);
+ (uintptr_t)i2c_get_match_data(client), NULL);
}
static void adau1761_i2c_remove(struct i2c_client *client)
diff --git a/sound/soc/codecs/adau1781-i2c.c b/sound/soc/codecs/adau1781-i2c.c
index 3a170fd78ff3..cb67fde8d9a8 100644
--- a/sound/soc/codecs/adau1781-i2c.c
+++ b/sound/soc/codecs/adau1781-i2c.c
@@ -14,12 +14,9 @@
#include "adau1781.h"
-static const struct i2c_device_id adau1781_i2c_ids[];
-
static int adau1781_i2c_probe(struct i2c_client *client)
{
struct regmap_config config;
- const struct i2c_device_id *id = i2c_match_id(adau1781_i2c_ids, client);
config = adau1781_regmap_config;
config.val_bits = 8;
@@ -27,7 +24,7 @@ static int adau1781_i2c_probe(struct i2c_client *client)
return adau1781_probe(&client->dev,
devm_regmap_init_i2c(client, &config),
- id->driver_data, NULL);
+ (uintptr_t)i2c_get_match_data(client), NULL);
}
static void adau1781_i2c_remove(struct i2c_client *client)
diff --git a/sound/soc/codecs/adau1977-i2c.c b/sound/soc/codecs/adau1977-i2c.c
index 24c7b9c84c19..441c8079246a 100644
--- a/sound/soc/codecs/adau1977-i2c.c
+++ b/sound/soc/codecs/adau1977-i2c.c
@@ -14,12 +14,9 @@
#include "adau1977.h"
-static const struct i2c_device_id adau1977_i2c_ids[];
-
static int adau1977_i2c_probe(struct i2c_client *client)
{
struct regmap_config config;
- const struct i2c_device_id *id = i2c_match_id(adau1977_i2c_ids, client);
config = adau1977_regmap_config;
config.val_bits = 8;
@@ -27,7 +24,7 @@ static int adau1977_i2c_probe(struct i2c_client *client)
return adau1977_probe(&client->dev,
devm_regmap_init_i2c(client, &config),
- id->driver_data, NULL);
+ (uintptr_t)i2c_get_match_data(client), NULL);
}
static const struct i2c_device_id adau1977_i2c_ids[] = {
diff --git a/sound/soc/codecs/alc5623.c b/sound/soc/codecs/alc5623.c
index b24c32206884..fbf723758079 100644
--- a/sound/soc/codecs/alc5623.c
+++ b/sound/soc/codecs/alc5623.c
@@ -987,9 +987,9 @@ static int alc5623_i2c_probe(struct i2c_client *client)
struct alc5623_priv *alc5623;
struct device_node *np;
unsigned int vid1, vid2;
+ unsigned int matched_id;
int ret;
u32 val32;
- const struct i2c_device_id *id;
alc5623 = devm_kzalloc(&client->dev, sizeof(struct alc5623_priv),
GFP_KERNEL);
@@ -1016,12 +1016,12 @@ static int alc5623_i2c_probe(struct i2c_client *client)
}
vid2 >>= 8;
- id = i2c_match_id(alc5623_i2c_table, client);
+ matched_id = (uintptr_t)i2c_get_match_data(client);
- if ((vid1 != 0x10ec) || (vid2 != id->driver_data)) {
+ if ((vid1 != 0x10ec) || (vid2 != matched_id)) {
dev_err(&client->dev, "unknown or wrong codec\n");
- dev_err(&client->dev, "Expected %x:%lx, got %x:%x\n",
- 0x10ec, id->driver_data,
+ dev_err(&client->dev, "Expected %x:%x, got %x:%x\n",
+ 0x10ec, matched_id,
vid1, vid2);
return -ENODEV;
}
diff --git a/sound/soc/codecs/alc5632.c b/sound/soc/codecs/alc5632.c
index d5021f266930..72f4622204ff 100644
--- a/sound/soc/codecs/alc5632.c
+++ b/sound/soc/codecs/alc5632.c
@@ -1108,7 +1108,7 @@ static int alc5632_i2c_probe(struct i2c_client *client)
struct alc5632_priv *alc5632;
int ret, ret1, ret2;
unsigned int vid1, vid2;
- const struct i2c_device_id *id;
+ unsigned int matched_id;
alc5632 = devm_kzalloc(&client->dev,
sizeof(struct alc5632_priv), GFP_KERNEL);
@@ -1134,9 +1134,9 @@ static int alc5632_i2c_probe(struct i2c_client *client)
vid2 >>= 8;
- id = i2c_match_id(alc5632_i2c_table, client);
+ matched_id = (uintptr_t)i2c_get_match_data(client);
- if ((vid1 != 0x10EC) || (vid2 != id->driver_data)) {
+ if ((vid1 != 0x10EC) || (vid2 != matched_id)) {
dev_err(&client->dev,
"Device is not a ALC5632: VID1=0x%x, VID2=0x%x\n", vid1, vid2);
return -EINVAL;
diff --git a/sound/soc/codecs/aw88081.c b/sound/soc/codecs/aw88081.c
index 58b8e002d76f..ad16ab6812cd 100644
--- a/sound/soc/codecs/aw88081.c
+++ b/sound/soc/codecs/aw88081.c
@@ -14,13 +14,18 @@
#include "aw88081.h"
#include "aw88395/aw88395_device.h"
+enum aw8808x_type {
+ AW88081,
+ AW88083,
+};
+
struct aw88081 {
struct aw_device *aw_pa;
struct mutex lock;
struct delayed_work start_work;
struct regmap *regmap;
struct aw_container *aw_cfg;
-
+ enum aw8808x_type devtype;
bool phase_sync;
};
@@ -32,6 +37,14 @@ static const struct regmap_config aw88081_regmap_config = {
.val_format_endian = REGMAP_ENDIAN_BIG,
};
+static const struct regmap_config aw88083_regmap_config = {
+ .val_bits = 16,
+ .reg_bits = 8,
+ .max_register = AW88083_REG_MAX,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+};
+
static int aw88081_dev_get_iis_status(struct aw_device *aw_dev)
{
unsigned int reg_val;
@@ -196,6 +209,41 @@ static void aw88081_dev_amppd(struct aw_device *aw_dev, bool amppd)
~AW88081_EN_PA_MASK, AW88081_EN_PA_WORKING_VALUE);
}
+static void aw88083_i2c_wen(struct aw88081 *aw88081, bool flag)
+{
+ struct aw_device *aw_dev = aw88081->aw_pa;
+
+ if (aw88081->devtype != AW88083)
+ return;
+
+ if (flag)
+ regmap_update_bits(aw_dev->regmap, AW88081_SYSCTRL_REG,
+ ~AW88083_I2C_WEN_MASK, AW88083_I2C_WEN_ENABLE_VALUE);
+ else
+ regmap_update_bits(aw_dev->regmap, AW88081_SYSCTRL_REG,
+ ~AW88083_I2C_WEN_MASK, AW88083_I2C_WEN_DISABLE_VALUE);
+}
+
+static void aw88083_dev_amppd(struct aw_device *aw_dev, bool amppd)
+{
+ if (amppd)
+ regmap_update_bits(aw_dev->regmap, AW88081_SYSCTRL_REG,
+ ~AW88083_AMPPD_MASK, AW88083_AMPPD_POWER_DOWN_VALUE);
+ else
+ regmap_update_bits(aw_dev->regmap, AW88081_SYSCTRL_REG,
+ ~AW88083_AMPPD_MASK, AW88083_AMPPD_WORKING_VALUE);
+}
+
+static void aw88083_dev_pllpd(struct aw_device *aw_dev, bool pllpd)
+{
+ if (pllpd)
+ regmap_update_bits(aw_dev->regmap, AW88081_SYSCTRL_REG,
+ ~AW88083_PLL_PD_MASK, AW88083_PLL_PD_WORKING_VALUE);
+ else
+ regmap_update_bits(aw_dev->regmap, AW88081_SYSCTRL_REG,
+ ~AW88083_PLL_PD_MASK, AW88083_PLL_PD_POWER_DOWN_VALUE);
+}
+
static void aw88081_dev_clear_int_status(struct aw_device *aw_dev)
{
unsigned int int_status;
@@ -284,12 +332,90 @@ static void aw88081_dev_uls_hmute(struct aw_device *aw_dev, bool uls_hmute)
AW88081_ULS_HMUTE_DISABLE_VALUE);
}
+static int aw88081_dev_reg_value_check(struct aw_device *aw_dev,
+ unsigned char reg_addr, unsigned short *reg_val)
+{
+ unsigned int read_vol;
+
+ if (reg_addr == AW88081_SYSCTRL_REG) {
+ *reg_val &= ~(~AW88081_EN_PA_MASK |
+ ~AW88081_PWDN_MASK |
+ ~AW88081_HMUTE_MASK |
+ ~AW88081_ULS_HMUTE_MASK);
+
+ *reg_val |= AW88081_EN_PA_POWER_DOWN_VALUE |
+ AW88081_PWDN_POWER_DOWN_VALUE |
+ AW88081_HMUTE_ENABLE_VALUE |
+ AW88081_ULS_HMUTE_ENABLE_VALUE;
+ }
+
+ if (reg_addr == AW88081_SYSCTRL2_REG) {
+ read_vol = (*reg_val & (~AW88081_VOL_MASK)) >> AW88081_VOL_START_BIT;
+ aw_dev->volume_desc.init_volume = read_vol;
+ }
+
+ /* i2stxen */
+ if (reg_addr == AW88081_I2SCTRL3_REG) {
+ /* close tx */
+ *reg_val &= AW88081_I2STXEN_MASK;
+ *reg_val |= AW88081_I2STXEN_DISABLE_VALUE;
+ }
+
+ return 0;
+}
+
+static int aw88083_dev_reg_value_check(struct aw_device *aw_dev,
+ unsigned char reg_addr, unsigned short *reg_val)
+{
+ unsigned int read_vol;
+
+ if (reg_addr == AW88081_SYSCTRL_REG) {
+ *reg_val &= ~(~AW88083_AMPPD_MASK |
+ ~AW88081_PWDN_MASK |
+ ~AW88081_HMUTE_MASK |
+ ~AW88083_I2C_WEN_MASK);
+
+ *reg_val |= AW88083_AMPPD_POWER_DOWN_VALUE |
+ AW88081_PWDN_POWER_DOWN_VALUE |
+ AW88081_HMUTE_ENABLE_VALUE |
+ AW88083_I2C_WEN_ENABLE_VALUE;
+ }
+
+ if (reg_addr == AW88081_SYSCTRL2_REG) {
+ read_vol = (*reg_val & (~AW88081_VOL_MASK)) >> AW88081_VOL_START_BIT;
+ aw_dev->volume_desc.init_volume = read_vol;
+ }
+
+ return 0;
+}
+
+static int aw88081_reg_value_check(struct aw88081 *aw88081,
+ unsigned char reg_addr, unsigned short *reg_val)
+{
+ struct aw_device *aw_dev = aw88081->aw_pa;
+ int ret;
+
+ switch (aw88081->devtype) {
+ case AW88081:
+ ret = aw88081_dev_reg_value_check(aw_dev, reg_addr, reg_val);
+ break;
+ case AW88083:
+ ret = aw88083_dev_reg_value_check(aw_dev, reg_addr, reg_val);
+ break;
+ default:
+ dev_err(aw_dev->dev, "unsupported device\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static int aw88081_dev_reg_update(struct aw88081 *aw88081,
unsigned char *data, unsigned int len)
{
struct aw_device *aw_dev = aw88081->aw_pa;
struct aw_volume_desc *vol_desc = &aw_dev->volume_desc;
- unsigned int read_vol;
int data_len, i, ret;
int16_t *reg_data;
u16 reg_val;
@@ -312,30 +438,9 @@ static int aw88081_dev_reg_update(struct aw88081 *aw88081,
reg_addr = reg_data[i];
reg_val = reg_data[i + 1];
- if (reg_addr == AW88081_SYSCTRL_REG) {
- reg_val &= ~(~AW88081_EN_PA_MASK |
- ~AW88081_PWDN_MASK |
- ~AW88081_HMUTE_MASK |
- ~AW88081_ULS_HMUTE_MASK);
-
- reg_val |= AW88081_EN_PA_POWER_DOWN_VALUE |
- AW88081_PWDN_POWER_DOWN_VALUE |
- AW88081_HMUTE_ENABLE_VALUE |
- AW88081_ULS_HMUTE_ENABLE_VALUE;
- }
-
- if (reg_addr == AW88081_SYSCTRL2_REG) {
- read_vol = (reg_val & (~AW88081_VOL_MASK)) >>
- AW88081_VOL_START_BIT;
- aw_dev->volume_desc.init_volume = read_vol;
- }
-
- /* i2stxen */
- if (reg_addr == AW88081_I2SCTRL3_REG) {
- /* close tx */
- reg_val &= AW88081_I2STXEN_MASK;
- reg_val |= AW88081_I2STXEN_DISABLE_VALUE;
- }
+ ret = aw88081_reg_value_check(aw88081, reg_addr, &reg_val);
+ if (ret)
+ return ret;
ret = regmap_write(aw_dev->regmap, reg_addr, reg_val);
if (ret)
@@ -474,8 +579,60 @@ pll_check_fail:
return ret;
}
-static int aw88081_dev_stop(struct aw_device *aw_dev)
+static int aw88083_dev_start(struct aw88081 *aw88081)
+{
+ struct aw_device *aw_dev = aw88081->aw_pa;
+
+ if (aw_dev->status == AW88081_DEV_PW_ON) {
+ dev_dbg(aw_dev->dev, "already power on");
+ return 0;
+ }
+
+ aw88083_i2c_wen(aw88081, true);
+
+ /* power on */
+ aw88081_dev_pwd(aw_dev, false);
+ usleep_range(AW88081_2000_US, AW88081_2000_US + 10);
+
+ aw88083_dev_pllpd(aw_dev, true);
+ /* amppd on */
+ aw88083_dev_amppd(aw_dev, false);
+ usleep_range(AW88081_2000_US, AW88081_2000_US + 50);
+
+ /* close mute */
+ aw88081_dev_mute(aw_dev, false);
+
+ aw88083_i2c_wen(aw88081, false);
+
+ aw_dev->status = AW88081_DEV_PW_ON;
+
+ return 0;
+}
+
+static int aw88081_device_start(struct aw88081 *aw88081)
+{
+ int ret;
+
+ switch (aw88081->devtype) {
+ case AW88081:
+ ret = aw88081_dev_start(aw88081);
+ break;
+ case AW88083:
+ ret = aw88083_dev_start(aw88081);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(aw88081->aw_pa->dev, "unsupported device\n");
+ break;
+ }
+
+ return ret;
+}
+
+static int aw88081_dev_stop(struct aw88081 *aw88081)
{
+ struct aw_device *aw_dev = aw88081->aw_pa;
+
if (aw_dev->status == AW88081_DEV_PW_OFF) {
dev_dbg(aw_dev->dev, "already power off");
return 0;
@@ -503,6 +660,56 @@ static int aw88081_dev_stop(struct aw_device *aw_dev)
return 0;
}
+static int aw88083_dev_stop(struct aw88081 *aw88081)
+{
+ struct aw_device *aw_dev = aw88081->aw_pa;
+
+ if (aw_dev->status == AW88081_DEV_PW_OFF) {
+ dev_dbg(aw_dev->dev, "already power off");
+ return 0;
+ }
+
+ aw_dev->status = AW88081_DEV_PW_OFF;
+
+ aw88083_i2c_wen(aw88081, true);
+ /* set mute */
+ aw88081_dev_mute(aw_dev, true);
+
+ usleep_range(AW88081_2000_US, AW88081_2000_US + 100);
+
+ /* enable amppd */
+ aw88083_dev_amppd(aw_dev, true);
+
+ aw88083_dev_pllpd(aw_dev, false);
+
+ /* set power down */
+ aw88081_dev_pwd(aw_dev, true);
+
+ aw88083_i2c_wen(aw88081, false);
+
+ return 0;
+}
+
+static int aw88081_stop(struct aw88081 *aw88081)
+{
+ int ret;
+
+ switch (aw88081->devtype) {
+ case AW88081:
+ ret = aw88081_dev_stop(aw88081);
+ break;
+ case AW88083:
+ ret = aw88083_dev_stop(aw88081);
+ break;
+ default:
+ dev_err(aw88081->aw_pa->dev, "unsupported device\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static int aw88081_reg_update(struct aw88081 *aw88081, bool force)
{
struct aw_device *aw_dev = aw88081->aw_pa;
@@ -540,7 +747,7 @@ static void aw88081_start_pa(struct aw88081 *aw88081)
dev_err(aw88081->aw_pa->dev, "fw update failed, cnt:%d\n", i);
continue;
}
- ret = aw88081_dev_start(aw88081);
+ ret = aw88081_device_start(aw88081);
if (ret) {
dev_err(aw88081->aw_pa->dev, "aw88081 device start failed. retry = %d", i);
continue;
@@ -745,7 +952,7 @@ static int aw88081_profile_set(struct snd_kcontrol *kcontrol,
}
if (aw88081->aw_pa->status) {
- aw88081_dev_stop(aw88081->aw_pa);
+ aw88081_stop(aw88081);
aw88081_start(aw88081, AW88081_SYNC_START);
}
@@ -781,12 +988,16 @@ static int aw88081_volume_set(struct snd_kcontrol *kcontrol,
if (value < mc->min || value > mc->max)
return -EINVAL;
+ aw88083_i2c_wen(aw88081, true);
+
if (vol_desc->ctl_volume != value) {
vol_desc->ctl_volume = value;
aw88081_dev_set_volume(aw88081->aw_pa, vol_desc->ctl_volume);
return 1;
}
+ aw88083_i2c_wen(aw88081, false);
+
return 0;
}
@@ -860,13 +1071,19 @@ static int aw88081_init(struct aw88081 *aw88081, struct i2c_client *i2c, struct
dev_err(&i2c->dev, "%s read chipid error. ret = %d", __func__, ret);
return ret;
}
- if (chip_id != AW88081_CHIP_ID) {
+
+ switch (chip_id) {
+ case AW88081_CHIP_ID:
+ dev_dbg(&i2c->dev, "chip id = 0x%x\n", chip_id);
+ break;
+ case AW88083_CHIP_ID:
+ dev_dbg(&i2c->dev, "chip id = 0x%x\n", chip_id);
+ break;
+ default:
dev_err(&i2c->dev, "unsupported device");
return -ENXIO;
}
- dev_dbg(&i2c->dev, "chip id = %x\n", chip_id);
-
aw_dev = devm_kzalloc(&i2c->dev, sizeof(*aw_dev), GFP_KERNEL);
if (!aw_dev)
return -ENOMEM;
@@ -875,7 +1092,7 @@ static int aw88081_init(struct aw88081 *aw88081, struct i2c_client *i2c, struct
aw_dev->i2c = i2c;
aw_dev->regmap = regmap;
aw_dev->dev = &i2c->dev;
- aw_dev->chip_id = AW88081_CHIP_ID;
+ aw_dev->chip_id = chip_id;
aw_dev->acf = NULL;
aw_dev->prof_info.prof_desc = NULL;
aw_dev->prof_info.prof_type = AW88395_DEV_NONE_TYPE_ID;
@@ -912,21 +1129,8 @@ static int aw88081_dev_init(struct aw88081 *aw88081, struct aw_container *aw_cfg
return ret;
}
- aw88081_dev_clear_int_status(aw_dev);
-
- aw88081_dev_uls_hmute(aw_dev, true);
-
- aw88081_dev_mute(aw_dev, true);
-
- usleep_range(AW88081_5000_US, AW88081_5000_US + 10);
-
- aw88081_dev_i2s_tx_enable(aw_dev, false);
-
- usleep_range(AW88081_1000_US, AW88081_1000_US + 100);
-
- aw88081_dev_amppd(aw_dev, true);
-
- aw88081_dev_pwd(aw_dev, true);
+ aw_dev->status = AW88081_DEV_PW_ON;
+ aw88081_stop(aw88081);
return 0;
}
@@ -977,7 +1181,7 @@ static int aw88081_playback_event(struct snd_soc_dapm_widget *w,
aw88081_start(aw88081, AW88081_ASYNC_START);
break;
case SND_SOC_DAPM_POST_PMD:
- aw88081_dev_stop(aw88081->aw_pa);
+ aw88081_stop(aw88081);
break;
default:
break;
@@ -1036,8 +1240,17 @@ static const struct snd_soc_component_driver soc_codec_dev_aw88081 = {
.num_controls = ARRAY_SIZE(aw88081_controls),
};
+static const struct i2c_device_id aw88081_i2c_id[] = {
+ { AW88081_I2C_NAME, AW88081},
+ { AW88083_I2C_NAME, AW88083},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, aw88081_i2c_id);
+
static int aw88081_i2c_probe(struct i2c_client *i2c)
{
+ const struct regmap_config *regmap_config;
+ const struct i2c_device_id *id;
struct aw88081 *aw88081;
int ret;
@@ -1049,11 +1262,25 @@ static int aw88081_i2c_probe(struct i2c_client *i2c)
if (!aw88081)
return -ENOMEM;
+ id = i2c_match_id(aw88081_i2c_id, i2c);
+ aw88081->devtype = id->driver_data;
+
mutex_init(&aw88081->lock);
i2c_set_clientdata(i2c, aw88081);
- aw88081->regmap = devm_regmap_init_i2c(i2c, &aw88081_regmap_config);
+ switch (aw88081->devtype) {
+ case AW88081:
+ regmap_config = &aw88081_regmap_config;
+ break;
+ case AW88083:
+ regmap_config = &aw88083_regmap_config;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ aw88081->regmap = devm_regmap_init_i2c(i2c, regmap_config);
if (IS_ERR(aw88081->regmap))
return dev_err_probe(&i2c->dev, PTR_ERR(aw88081->regmap),
"failed to init regmap\n");
@@ -1068,12 +1295,6 @@ static int aw88081_i2c_probe(struct i2c_client *i2c)
aw88081_dai, ARRAY_SIZE(aw88081_dai));
}
-static const struct i2c_device_id aw88081_i2c_id[] = {
- { AW88081_I2C_NAME },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, aw88081_i2c_id);
-
static struct i2c_driver aw88081_i2c_driver = {
.driver = {
.name = AW88081_I2C_NAME,
diff --git a/sound/soc/codecs/aw88081.h b/sound/soc/codecs/aw88081.h
index b4bf7288021a..7a4564270ab3 100644
--- a/sound/soc/codecs/aw88081.h
+++ b/sound/soc/codecs/aw88081.h
@@ -231,6 +231,49 @@
#define AW88081_CCO_MUX_BYPASS_VALUE \
(AW88081_CCO_MUX_BYPASS << AW88081_CCO_MUX_START_BIT)
+#define AW88083_I2C_WEN_START_BIT (14)
+#define AW88083_I2C_WEN_BITS_LEN (2)
+#define AW88083_I2C_WEN_MASK \
+ (~(((1<<AW88083_I2C_WEN_BITS_LEN)-1) << AW88083_I2C_WEN_START_BIT))
+
+#define AW88083_I2C_WEN_DISABLE (0)
+#define AW88083_I2C_WEN_DISABLE_VALUE \
+ (AW88083_I2C_WEN_DISABLE << AW88083_I2C_WEN_START_BIT)
+
+#define AW88083_I2C_WEN_ENABLE (2)
+#define AW88083_I2C_WEN_ENABLE_VALUE \
+ (AW88083_I2C_WEN_ENABLE << AW88083_I2C_WEN_START_BIT)
+
+#define AW88083_PLL_PD_START_BIT (2)
+#define AW88083_PLL_PD_BITS_LEN (1)
+#define AW88083_PLL_PD_MASK \
+ (~(((1<<AW88083_PLL_PD_BITS_LEN)-1) << AW88083_PLL_PD_START_BIT))
+
+#define AW88083_PLL_PD_POWER_DOWN (1)
+#define AW88083_PLL_PD_POWER_DOWN_VALUE \
+ (AW88083_PLL_PD_POWER_DOWN << AW88083_PLL_PD_START_BIT)
+
+#define AW88083_PLL_PD_WORKING (0)
+#define AW88083_PLL_PD_WORKING_VALUE \
+ (AW88083_PLL_PD_WORKING << AW88083_PLL_PD_START_BIT)
+
+#define AW88083_AMPPD_START_BIT (1)
+#define AW88083_AMPPD_BITS_LEN (1)
+#define AW88083_AMPPD_MASK \
+ (~(((1<<AW88083_AMPPD_BITS_LEN)-1) << AW88083_AMPPD_START_BIT))
+
+#define AW88083_AMPPD_WORKING (0)
+#define AW88083_AMPPD_WORKING_VALUE \
+ (AW88083_AMPPD_WORKING << AW88083_AMPPD_START_BIT)
+
+#define AW88083_AMPPD_POWER_DOWN (1)
+#define AW88083_AMPPD_POWER_DOWN_VALUE \
+ (AW88083_AMPPD_POWER_DOWN << AW88083_AMPPD_START_BIT)
+
+#define AW88083_REG_MAX (0x7D)
+#define AW88083_I2C_NAME "aw88083"
+#define AW88083_CHIP_ID 0x2407
+
#define AW88081_START_RETRIES (5)
#define AW88081_START_WORK_DELAY_MS (0)
diff --git a/sound/soc/codecs/cs35l56.c b/sound/soc/codecs/cs35l56.c
index ae045c88c48d..735a1e487c6f 100644
--- a/sound/soc/codecs/cs35l56.c
+++ b/sound/soc/codecs/cs35l56.c
@@ -646,6 +646,12 @@ static struct snd_soc_dai_driver cs35l56_dai[] = {
.rates = CS35L56_RATES,
.formats = CS35L56_RX_FORMATS,
},
+ .symmetric_rate = 1,
+ .ops = &cs35l56_sdw_dai_ops,
+ },
+ {
+ .name = "cs35l56-sdw1c",
+ .id = 2,
.capture = {
.stream_name = "SDW1 Capture",
.channels_min = 1,
@@ -655,7 +661,7 @@ static struct snd_soc_dai_driver cs35l56_dai[] = {
},
.symmetric_rate = 1,
.ops = &cs35l56_sdw_dai_ops,
- }
+ },
};
static int cs35l56_write_cal(struct cs35l56_private *cs35l56)
diff --git a/sound/soc/codecs/cs42l43.c b/sound/soc/codecs/cs42l43.c
index 83c21c17fb80..d2a2daefc2ec 100644
--- a/sound/soc/codecs/cs42l43.c
+++ b/sound/soc/codecs/cs42l43.c
@@ -12,7 +12,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/errno.h>
-#include <linux/find.h>
+#include <linux/bitmap.h>
#include <linux/gcd.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
diff --git a/sound/soc/codecs/cs42l51-i2c.c b/sound/soc/codecs/cs42l51-i2c.c
index e7cc50096297..f171bd66fcac 100644
--- a/sound/soc/codecs/cs42l51-i2c.c
+++ b/sound/soc/codecs/cs42l51-i2c.c
@@ -13,9 +13,9 @@
#include "cs42l51.h"
-static struct i2c_device_id cs42l51_i2c_id[] = {
- {"cs42l51"},
- {}
+static const struct i2c_device_id cs42l51_i2c_id[] = {
+ { "cs42l51" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, cs42l51_i2c_id);
diff --git a/sound/soc/codecs/cs42l84.c b/sound/soc/codecs/cs42l84.c
index 17d5c96e334d..88cf3c03986e 100644
--- a/sound/soc/codecs/cs42l84.c
+++ b/sound/soc/codecs/cs42l84.c
@@ -1087,7 +1087,7 @@ static const struct of_device_id cs42l84_of_match[] = {
MODULE_DEVICE_TABLE(of, cs42l84_of_match);
static const struct i2c_device_id cs42l84_id[] = {
- {"cs42l84", 0},
+ { "cs42l84" },
{}
};
MODULE_DEVICE_TABLE(i2c, cs42l84_id);
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index ca4cc954efa8..eb97ac73ec06 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -2203,6 +2203,8 @@ static int da7213_i2c_probe(struct i2c_client *i2c)
return ret;
}
+ mutex_init(&da7213->ctrl_lock);
+
pm_runtime_set_autosuspend_delay(&i2c->dev, 100);
pm_runtime_use_autosuspend(&i2c->dev);
pm_runtime_set_active(&i2c->dev);
diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c
index f508df01145b..e7bd561a8f40 100644
--- a/sound/soc/codecs/es8316.c
+++ b/sound/soc/codecs/es8316.c
@@ -101,7 +101,7 @@ static const struct snd_kcontrol_new es8316_snd_controls[] = {
SOC_DOUBLE_R_TLV("DAC Playback Volume", ES8316_DAC_VOLL,
ES8316_DAC_VOLR, 0, 0xc0, 1, dac_vol_tlv),
SOC_SINGLE("DAC Soft Ramp Switch", ES8316_DAC_SET1, 4, 1, 1),
- SOC_SINGLE("DAC Soft Ramp Rate", ES8316_DAC_SET1, 2, 4, 0),
+ SOC_SINGLE("DAC Soft Ramp Rate", ES8316_DAC_SET1, 2, 3, 0),
SOC_SINGLE("DAC Notch Filter Switch", ES8316_DAC_SET2, 6, 1, 0),
SOC_SINGLE("DAC Double Fs Switch", ES8316_DAC_SET2, 7, 1, 0),
SOC_SINGLE("DAC Stereo Enhancement", ES8316_DAC_SET3, 0, 7, 0),
diff --git a/sound/soc/codecs/es8323.c b/sound/soc/codecs/es8323.c
index 6f4fa36ea34d..a9822998199f 100644
--- a/sound/soc/codecs/es8323.c
+++ b/sound/soc/codecs/es8323.c
@@ -758,7 +758,7 @@ static int es8323_i2c_probe(struct i2c_client *i2c_client)
}
static const struct i2c_device_id es8323_i2c_id[] = {
- { "es8323", 0 },
+ { "es8323" },
{ }
};
MODULE_DEVICE_TABLE(i2c, es8323_i2c_id);
diff --git a/sound/soc/codecs/es8326.c b/sound/soc/codecs/es8326.c
index b06eead7e0f6..066d92b54312 100644
--- a/sound/soc/codecs/es8326.c
+++ b/sound/soc/codecs/es8326.c
@@ -911,7 +911,7 @@ static void es8326_jack_detect_handler(struct work_struct *work)
regmap_write(es8326->regmap, ES8326_INT_SOURCE,
(ES8326_INT_SRC_PIN9 | ES8326_INT_SRC_BUTTON));
regmap_write(es8326->regmap, ES8326_SYS_BIAS, 0x1f);
- regmap_update_bits(es8326->regmap, ES8326_HP_DRIVER_REF, 0x0f, 0x08);
+ regmap_update_bits(es8326->regmap, ES8326_HP_DRIVER_REF, 0x0f, 0x0d);
queue_delayed_work(system_wq, &es8326->jack_detect_work,
msecs_to_jiffies(400));
es8326->hp = 1;
@@ -1023,7 +1023,7 @@ static void es8326_init(struct snd_soc_component *component)
struct es8326_priv *es8326 = snd_soc_component_get_drvdata(component);
regmap_write(es8326->regmap, ES8326_RESET, 0x1f);
- regmap_write(es8326->regmap, ES8326_VMIDSEL, 0x0E);
+ regmap_write(es8326->regmap, ES8326_VMIDSEL, 0x3E);
regmap_write(es8326->regmap, ES8326_ANA_LP, 0xf0);
usleep_range(10000, 15000);
regmap_write(es8326->regmap, ES8326_HPJACK_TIMER, 0xd9);
diff --git a/sound/soc/codecs/madera.c b/sound/soc/codecs/madera.c
index b24d6472ad5f..a840a2eb92b9 100644
--- a/sound/soc/codecs/madera.c
+++ b/sound/soc/codecs/madera.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/tlv.h>
@@ -3965,7 +3966,7 @@ static int madera_enable_fll(struct madera_fll *fll)
}
madera_fll_dbg(fll, "Enabling FLL, initially %s\n",
- already_enabled ? "enabled" : "disabled");
+ str_enabled_disabled(already_enabled));
if (fll->fout < MADERA_FLL_MIN_FOUT ||
fll->fout > MADERA_FLL_MAX_FOUT) {
@@ -4252,7 +4253,7 @@ static int madera_enable_fll_ao(struct madera_fll *fll,
pm_runtime_get_sync(madera->dev);
madera_fll_dbg(fll, "Enabling FLL_AO, initially %s\n",
- already_enabled ? "enabled" : "disabled");
+ str_enabled_disabled(already_enabled));
/* FLL_AO_HOLD must be set before configuring any registers */
regmap_update_bits(fll->madera->regmap,
@@ -4576,7 +4577,7 @@ static int madera_fllhj_enable(struct madera_fll *fll)
pm_runtime_get_sync(madera->dev);
madera_fll_dbg(fll, "Enabling FLL, initially %s\n",
- already_enabled ? "enabled" : "disabled");
+ str_enabled_disabled(already_enabled));
/* FLLn_HOLD must be set before configuring any registers */
regmap_update_bits(fll->madera->regmap,
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index 8915f5250695..37e61d8d4be6 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -1731,7 +1731,6 @@ MODULE_DEVICE_TABLE(i2c, max98088_i2c_id);
static int max98088_i2c_probe(struct i2c_client *i2c)
{
struct max98088_priv *max98088;
- const struct i2c_device_id *id;
max98088 = devm_kzalloc(&i2c->dev, sizeof(struct max98088_priv),
GFP_KERNEL);
@@ -1747,8 +1746,7 @@ static int max98088_i2c_probe(struct i2c_client *i2c)
if (PTR_ERR(max98088->mclk) == -EPROBE_DEFER)
return PTR_ERR(max98088->mclk);
- id = i2c_match_id(max98088_i2c_id, i2c);
- max98088->devtype = id->driver_data;
+ max98088->devtype = (uintptr_t)i2c_get_match_data(i2c);
i2c_set_clientdata(i2c, max98088);
max98088->pdata = i2c->dev.platform_data;
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index 2adf744c6526..790e2ae6dc18 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -2543,8 +2543,6 @@ MODULE_DEVICE_TABLE(i2c, max98090_i2c_id);
static int max98090_i2c_probe(struct i2c_client *i2c)
{
struct max98090_priv *max98090;
- const struct acpi_device_id *acpi_id;
- kernel_ulong_t driver_data = 0;
int ret;
pr_debug("max98090_i2c_probe\n");
@@ -2554,21 +2552,7 @@ static int max98090_i2c_probe(struct i2c_client *i2c)
if (max98090 == NULL)
return -ENOMEM;
- if (ACPI_HANDLE(&i2c->dev)) {
- acpi_id = acpi_match_device(i2c->dev.driver->acpi_match_table,
- &i2c->dev);
- if (!acpi_id) {
- dev_err(&i2c->dev, "No driver data\n");
- return -EINVAL;
- }
- driver_data = acpi_id->driver_data;
- } else {
- const struct i2c_device_id *i2c_id =
- i2c_match_id(max98090_i2c_id, i2c);
- driver_data = i2c_id->driver_data;
- }
-
- max98090->devtype = driver_data;
+ max98090->devtype = (uintptr_t)i2c_get_match_data(i2c);
i2c_set_clientdata(i2c, max98090);
max98090->pdata = i2c->dev.platform_data;
diff --git a/sound/soc/codecs/max98095.c b/sound/soc/codecs/max98095.c
index 7e525d49328d..cfb63fe69267 100644
--- a/sound/soc/codecs/max98095.c
+++ b/sound/soc/codecs/max98095.c
@@ -2115,7 +2115,6 @@ static int max98095_i2c_probe(struct i2c_client *i2c)
{
struct max98095_priv *max98095;
int ret;
- const struct i2c_device_id *id;
max98095 = devm_kzalloc(&i2c->dev, sizeof(struct max98095_priv),
GFP_KERNEL);
@@ -2131,8 +2130,7 @@ static int max98095_i2c_probe(struct i2c_client *i2c)
return ret;
}
- id = i2c_match_id(max98095_i2c_id, i2c);
- max98095->devtype = id->driver_data;
+ max98095->devtype = (uintptr_t)i2c_get_match_data(i2c);
i2c_set_clientdata(i2c, max98095);
max98095->pdata = i2c->dev.platform_data;
diff --git a/sound/soc/codecs/nau8824.c b/sound/soc/codecs/nau8824.c
index 12540397fd4d..5aaf8c496300 100644
--- a/sound/soc/codecs/nau8824.c
+++ b/sound/soc/codecs/nau8824.c
@@ -368,13 +368,13 @@ static const struct snd_kcontrol_new nau8824_snd_controls[] = {
SOC_ENUM("DAC Oversampling Rate", nau8824_dac_oversampl_enum),
SOC_SINGLE_TLV("Speaker Right DACR Volume",
- NAU8824_REG_CLASSD_GAIN_1, 8, 0x1f, 0, spk_vol_tlv),
+ NAU8824_REG_CLASSD_GAIN_1, 8, 0x19, 0, spk_vol_tlv),
SOC_SINGLE_TLV("Speaker Left DACL Volume",
- NAU8824_REG_CLASSD_GAIN_2, 0, 0x1f, 0, spk_vol_tlv),
+ NAU8824_REG_CLASSD_GAIN_2, 0, 0x19, 0, spk_vol_tlv),
SOC_SINGLE_TLV("Speaker Left DACR Volume",
- NAU8824_REG_CLASSD_GAIN_1, 0, 0x1f, 0, spk_vol_tlv),
+ NAU8824_REG_CLASSD_GAIN_1, 0, 0x19, 0, spk_vol_tlv),
SOC_SINGLE_TLV("Speaker Right DACL Volume",
- NAU8824_REG_CLASSD_GAIN_2, 8, 0x1f, 0, spk_vol_tlv),
+ NAU8824_REG_CLASSD_GAIN_2, 8, 0x19, 0, spk_vol_tlv),
SOC_SINGLE_TLV("Headphone Right DACR Volume",
NAU8824_REG_ATT_PORT0, 8, 0x1f, 0, hp_vol_tlv),
diff --git a/sound/soc/codecs/ntp8835.c b/sound/soc/codecs/ntp8835.c
index 796e1410496f..2cc4c6395f55 100644
--- a/sound/soc/codecs/ntp8835.c
+++ b/sound/soc/codecs/ntp8835.c
@@ -454,7 +454,7 @@ static int ntp8835_i2c_probe(struct i2c_client *i2c)
}
static const struct i2c_device_id ntp8835_i2c_id[] = {
- { "ntp8835", 0 },
+ { "ntp8835" },
{}
};
MODULE_DEVICE_TABLE(i2c, ntp8835_i2c_id);
diff --git a/sound/soc/codecs/ntp8918.c b/sound/soc/codecs/ntp8918.c
index 0493ab6acbe4..a332893fc51d 100644
--- a/sound/soc/codecs/ntp8918.c
+++ b/sound/soc/codecs/ntp8918.c
@@ -371,7 +371,7 @@ static int ntp8918_i2c_probe(struct i2c_client *i2c)
}
static const struct i2c_device_id ntp8918_i2c_id[] = {
- { "ntp8918", 0 },
+ { "ntp8918" },
{}
};
MODULE_DEVICE_TABLE(i2c, ntp8918_i2c_id);
diff --git a/sound/soc/codecs/pcm186x-i2c.c b/sound/soc/codecs/pcm186x-i2c.c
index a514ebd1b68a..a50f9f6e39c1 100644
--- a/sound/soc/codecs/pcm186x-i2c.c
+++ b/sound/soc/codecs/pcm186x-i2c.c
@@ -33,8 +33,7 @@ MODULE_DEVICE_TABLE(i2c, pcm186x_i2c_id);
static int pcm186x_i2c_probe(struct i2c_client *i2c)
{
- const struct i2c_device_id *id = i2c_match_id(pcm186x_i2c_id, i2c);
- const enum pcm186x_type type = (enum pcm186x_type)id->driver_data;
+ const enum pcm186x_type type = (uintptr_t)i2c_get_match_data(i2c);
int irq = i2c->irq;
struct regmap *regmap;
diff --git a/sound/soc/codecs/pcm6240.c b/sound/soc/codecs/pcm6240.c
index 5d99877f8839..4ff39e0b95b2 100644
--- a/sound/soc/codecs/pcm6240.c
+++ b/sound/soc/codecs/pcm6240.c
@@ -2059,7 +2059,6 @@ static char *str_to_upper(char *str)
static int pcmdevice_i2c_probe(struct i2c_client *i2c)
{
- const struct i2c_device_id *id = i2c_match_id(pcmdevice_i2c_id, i2c);
struct pcmdevice_priv *pcm_dev;
struct device_node *np;
unsigned int dev_addrs[PCMDEVICE_MAX_I2C_DEVICES];
@@ -2069,7 +2068,7 @@ static int pcmdevice_i2c_probe(struct i2c_client *i2c)
if (!pcm_dev)
return -ENOMEM;
- pcm_dev->chip_id = (id != NULL) ? id->driver_data : 0;
+ pcm_dev->chip_id = (uintptr_t)i2c_get_match_data(i2c);
pcm_dev->dev = &i2c->dev;
pcm_dev->client = i2c;
diff --git a/sound/soc/codecs/peb2466.c b/sound/soc/codecs/peb2466.c
index bb9ca6354ae1..a989cfe058f0 100644
--- a/sound/soc/codecs/peb2466.c
+++ b/sound/soc/codecs/peb2466.c
@@ -26,8 +26,7 @@ struct peb2466_lookup {
unsigned int count;
};
-#define PEB2466_TLV_SIZE (sizeof((unsigned int []){TLV_DB_SCALE_ITEM(0, 0, 0)}) / \
- sizeof(unsigned int))
+#define PEB2466_TLV_SIZE ARRAY_SIZE(((unsigned int[]){TLV_DB_SCALE_ITEM(0, 0, 0)}))
struct peb2466_lkup_ctrl {
int reg;
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
index 2b3c0f9e178c..9cb74962161a 100644
--- a/sound/soc/codecs/rt5514.c
+++ b/sound/soc/codecs/rt5514.c
@@ -1091,8 +1091,7 @@ static int rt5514_set_bias_level(struct snd_soc_component *component,
static int rt5514_probe(struct snd_soc_component *component)
{
struct rt5514_priv *rt5514 = snd_soc_component_get_drvdata(component);
- struct platform_device *pdev = container_of(component->dev,
- struct platform_device, dev);
+ struct platform_device *pdev = to_platform_device(component->dev);
rt5514->mclk = devm_clk_get_optional(component->dev, "mclk");
if (IS_ERR(rt5514->mclk))
diff --git a/sound/soc/codecs/rt5682-i2c.c b/sound/soc/codecs/rt5682-i2c.c
index ff9e14fad0cd..a8820435d1e0 100644
--- a/sound/soc/codecs/rt5682-i2c.c
+++ b/sound/soc/codecs/rt5682-i2c.c
@@ -186,6 +186,12 @@ static int rt5682_i2c_probe(struct i2c_client *i2c)
return -ENODEV;
}
+ regmap_read(rt5682->regmap, RT5682_INT_DEVICE_ID, &val);
+ if (val == 0x6956) {
+ dev_dbg(&i2c->dev, "ALC5682I-VE device\n");
+ rt5682->ve_ic = true;
+ }
+
mutex_init(&rt5682->calibrate_mutex);
rt5682_calibrate(rt5682);
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index aa163ec40862..b4d72fc4a44d 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -395,6 +395,7 @@ bool rt5682_volatile_register(struct device *dev, unsigned int reg)
case RT5682_4BTN_IL_CMD_1:
case RT5682_AJD1_CTRL:
case RT5682_HP_CALIB_CTRL_1:
+ case RT5682_INT_DEVICE_ID:
case RT5682_DEVICE_ID:
case RT5682_I2C_MODE:
case RT5682_HP_CALIB_CTRL_10:
@@ -419,6 +420,7 @@ bool rt5682_readable_register(struct device *dev, unsigned int reg)
{
switch (reg) {
case RT5682_RESET:
+ case RT5682_INT_DEVICE_ID:
case RT5682_VERSION_ID:
case RT5682_VENDOR_ID:
case RT5682_DEVICE_ID:
@@ -3139,7 +3141,10 @@ void rt5682_calibrate(struct rt5682_priv *rt5682)
regmap_write(rt5682->regmap, RT5682_PWR_DIG_1, 0x0100);
regmap_write(rt5682->regmap, RT5682_HP_IMP_SENS_CTRL_19, 0x3800);
regmap_write(rt5682->regmap, RT5682_CHOP_DAC, 0x3000);
- regmap_write(rt5682->regmap, RT5682_CALIB_ADC_CTRL, 0x7005);
+ if (rt5682->ve_ic)
+ regmap_write(rt5682->regmap, RT5682_CHOP_ADC, 0x7005);
+ else
+ regmap_write(rt5682->regmap, RT5682_CALIB_ADC_CTRL, 0x7005);
regmap_write(rt5682->regmap, RT5682_STO1_ADC_MIXER, 0x686c);
regmap_write(rt5682->regmap, RT5682_CAL_REC, 0x0d0d);
regmap_write(rt5682->regmap, RT5682_HP_CALIB_CTRL_2, 0x0321);
@@ -3168,7 +3173,10 @@ void rt5682_calibrate(struct rt5682_priv *rt5682)
regmap_write(rt5682->regmap, RT5682_GLB_CLK, 0x0000);
regmap_write(rt5682->regmap, RT5682_PWR_DIG_1, 0x0000);
regmap_write(rt5682->regmap, RT5682_CHOP_DAC, 0x2000);
- regmap_write(rt5682->regmap, RT5682_CALIB_ADC_CTRL, 0x2005);
+ if (rt5682->ve_ic)
+ regmap_write(rt5682->regmap, RT5682_CHOP_ADC, 0x2005);
+ else
+ regmap_write(rt5682->regmap, RT5682_CALIB_ADC_CTRL, 0x2005);
regmap_write(rt5682->regmap, RT5682_STO1_ADC_MIXER, 0xc0c4);
regmap_write(rt5682->regmap, RT5682_CAL_REC, 0x0c0c);
diff --git a/sound/soc/codecs/rt5682.h b/sound/soc/codecs/rt5682.h
index b2d9e87af259..de43a5d99403 100644
--- a/sound/soc/codecs/rt5682.h
+++ b/sound/soc/codecs/rt5682.h
@@ -22,6 +22,7 @@
/* Info */
#define RT5682_RESET 0x0000
+#define RT5682_INT_DEVICE_ID 0x00f9
#define RT5682_VERSION_ID 0x00fd
#define RT5682_VENDOR_ID 0x00fe
#define RT5682_DEVICE_ID 0x00ff
@@ -1446,6 +1447,7 @@ struct rt5682_priv {
bool hw_init;
bool first_hw_init;
bool is_sdw;
+ bool ve_ic;
#ifdef CONFIG_COMMON_CLK
struct clk_hw dai_clks_hw[RT5682_DAI_NUM_CLKS];
diff --git a/sound/soc/codecs/rt715-sdw.c b/sound/soc/codecs/rt715-sdw.c
index ec255ada44e0..cd702574c84b 100644
--- a/sound/soc/codecs/rt715-sdw.c
+++ b/sound/soc/codecs/rt715-sdw.c
@@ -372,47 +372,6 @@ static const struct regmap_config rt715_sdw_regmap = {
.use_single_write = true,
};
-int hda_to_sdw(unsigned int nid, unsigned int verb, unsigned int payload,
- unsigned int *sdw_addr_h, unsigned int *sdw_data_h,
- unsigned int *sdw_addr_l, unsigned int *sdw_data_l)
-{
- unsigned int offset_h, offset_l, e_verb;
-
- if (((verb & 0xff) != 0) || verb == 0xf00) { /* 12 bits command */
- if (verb == 0x7ff) /* special case */
- offset_h = 0;
- else
- offset_h = 0x3000;
-
- if (verb & 0x800) /* get command */
- e_verb = (verb - 0xf00) | 0x80;
- else /* set command */
- e_verb = (verb - 0x700);
-
- *sdw_data_h = payload; /* 7 bits payload */
- *sdw_addr_l = *sdw_data_l = 0;
- } else { /* 4 bits command */
- if ((verb & 0x800) == 0x800) { /* read */
- offset_h = 0x9000;
- offset_l = 0xa000;
- } else { /* write */
- offset_h = 0x7000;
- offset_l = 0x8000;
- }
- e_verb = verb >> 8;
- *sdw_data_h = (payload >> 8); /* 16 bits payload [15:8] */
- *sdw_addr_l = (e_verb << 8) | nid | 0x80; /* 0x80: valid bit */
- *sdw_addr_l += offset_l;
- *sdw_data_l = payload & 0xff;
- }
-
- *sdw_addr_h = (e_verb << 8) | nid;
- *sdw_addr_h += offset_h;
-
- return 0;
-}
-EXPORT_SYMBOL(hda_to_sdw);
-
static int rt715_update_status(struct sdw_slave *slave,
enum sdw_slave_status status)
{
diff --git a/sound/soc/codecs/rt715.h b/sound/soc/codecs/rt715.h
index 6e37bf64e12f..a0c56aa1003a 100644
--- a/sound/soc/codecs/rt715.h
+++ b/sound/soc/codecs/rt715.h
@@ -220,8 +220,5 @@ int rt715_io_init(struct device *dev, struct sdw_slave *slave);
int rt715_init(struct device *dev, struct regmap *sdw_regmap,
struct regmap *regmap, struct sdw_slave *slave);
-int hda_to_sdw(unsigned int nid, unsigned int verb, unsigned int payload,
- unsigned int *sdw_addr_h, unsigned int *sdw_data_h,
- unsigned int *sdw_addr_l, unsigned int *sdw_data_l);
int rt715_clock_config(struct device *dev);
#endif /* __RT715_H__ */
diff --git a/sound/soc/codecs/sma1307.c b/sound/soc/codecs/sma1307.c
index f2cea6186d98..480bcea48541 100644
--- a/sound/soc/codecs/sma1307.c
+++ b/sound/soc/codecs/sma1307.c
@@ -2011,8 +2011,8 @@ static void sma1307_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id sma1307_i2c_id[] = {
- { "sma1307a", 0 },
- { "sma1307aq", 0 },
+ { "sma1307a" },
+ { "sma1307aq" },
{ }
};
diff --git a/sound/soc/codecs/ssm2602-i2c.c b/sound/soc/codecs/ssm2602-i2c.c
index 596096466cd4..49c74cba17c7 100644
--- a/sound/soc/codecs/ssm2602-i2c.c
+++ b/sound/soc/codecs/ssm2602-i2c.c
@@ -13,8 +13,6 @@
#include "ssm2602.h"
-static const struct i2c_device_id ssm2602_i2c_id[];
-
/*
* ssm2602 2 wire address is determined by GPIO5
* state during powerup.
@@ -23,8 +21,7 @@ static const struct i2c_device_id ssm2602_i2c_id[];
*/
static int ssm2602_i2c_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_match_id(ssm2602_i2c_id, client);
- return ssm2602_probe(&client->dev, id->driver_data,
+ return ssm2602_probe(&client->dev, (uintptr_t)i2c_get_match_data(client),
devm_regmap_init_i2c(client, &ssm2602_regmap_config));
}
diff --git a/sound/soc/codecs/tas2562.c b/sound/soc/codecs/tas2562.c
index 54561ae598b8..fef7ce39f664 100644
--- a/sound/soc/codecs/tas2562.c
+++ b/sound/soc/codecs/tas2562.c
@@ -731,16 +731,14 @@ static int tas2562_probe(struct i2c_client *client)
struct device *dev = &client->dev;
struct tas2562_data *data;
int ret;
- const struct i2c_device_id *id;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- id = i2c_match_id(tas2562_id, client);
data->client = client;
data->dev = &client->dev;
- data->model_id = id->driver_data;
+ data->model_id = (uintptr_t)i2c_get_match_data(client);
tas2562_parse_dt(data);
diff --git a/sound/soc/codecs/tas2781-i2c.c b/sound/soc/codecs/tas2781-i2c.c
index 728bf78ae71f..a730ab6ad4e3 100644
--- a/sound/soc/codecs/tas2781-i2c.c
+++ b/sound/soc/codecs/tas2781-i2c.c
@@ -489,14 +489,11 @@ static int tas2563_calib_start_put(struct snd_kcontrol *kcontrol,
struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
struct tasdevice_priv *tas_priv = snd_soc_component_get_drvdata(comp);
const int sum = ARRAY_SIZE(tas2563_cali_start_reg);
- int rc = 1;
int i, j;
guard(mutex)(&tas_priv->codec_lock);
- if (tas_priv->chip_id != TAS2563) {
- rc = -1;
- goto out;
- }
+ if (tas_priv->chip_id != TAS2563)
+ return -1;
for (i = 0; i < tas_priv->ndev; i++) {
struct tasdevice *tasdev = tas_priv->tasdevice;
@@ -523,8 +520,8 @@ static int tas2563_calib_start_put(struct snd_kcontrol *kcontrol,
q[j].val, 4);
}
}
-out:
- return rc;
+
+ return 1;
}
static void tas2563_calib_stop_put(struct tasdevice_priv *tas_priv)
@@ -576,7 +573,7 @@ static int tasdev_cali_data_put(struct snd_kcontrol *kcontrol,
struct cali_reg *p = &cali_data->cali_reg_array;
unsigned char *src = ucontrol->value.bytes.data;
unsigned char *dst = cali_data->data;
- int rc = 1, i = 0;
+ int i = 0;
int j;
guard(mutex)(&priv->codec_lock);
@@ -605,7 +602,7 @@ static int tasdev_cali_data_put(struct snd_kcontrol *kcontrol,
i += 3;
memcpy(dst, &src[i], cali_data->total_sz);
- return rc;
+ return 1;
}
static int tas2781_latch_reg_get(struct snd_kcontrol *kcontrol,
@@ -1115,25 +1112,21 @@ static int tasdevice_dsp_create_ctrls(struct tasdevice_priv *tas_priv)
char *conf_name, *prog_name;
int nr_controls = 4;
int mix_index = 0;
- int ret;
/* Alloc kcontrol via devm_kzalloc, which don't manually
* free the kcontrol
*/
dsp_ctrls = devm_kcalloc(tas_priv->dev, nr_controls,
sizeof(dsp_ctrls[0]), GFP_KERNEL);
- if (!dsp_ctrls) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!dsp_ctrls)
+ return -ENOMEM;
/* Create mixer items for selecting the active Program and Config */
prog_name = devm_kstrdup(tas_priv->dev, "Speaker Program Id",
GFP_KERNEL);
- if (!prog_name) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!prog_name)
+ return -ENOMEM;
+
dsp_ctrls[mix_index].name = prog_name;
dsp_ctrls[mix_index].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
dsp_ctrls[mix_index].info = tasdevice_info_programs;
@@ -1143,10 +1136,9 @@ static int tasdevice_dsp_create_ctrls(struct tasdevice_priv *tas_priv)
conf_name = devm_kstrdup(tas_priv->dev, "Speaker Config Id",
GFP_KERNEL);
- if (!conf_name) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!conf_name)
+ return -ENOMEM;
+
dsp_ctrls[mix_index].name = conf_name;
dsp_ctrls[mix_index].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
dsp_ctrls[mix_index].info = tasdevice_info_configurations;
@@ -1156,10 +1148,9 @@ static int tasdevice_dsp_create_ctrls(struct tasdevice_priv *tas_priv)
active_dev_num = devm_kstrdup(tas_priv->dev, "Activate Tasdevice Num",
GFP_KERNEL);
- if (!active_dev_num) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!active_dev_num)
+ return -ENOMEM;
+
dsp_ctrls[mix_index].name = active_dev_num;
dsp_ctrls[mix_index].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
dsp_ctrls[mix_index].info = tasdevice_info_active_num;
@@ -1168,21 +1159,17 @@ static int tasdevice_dsp_create_ctrls(struct tasdevice_priv *tas_priv)
mix_index++;
chip_id = devm_kstrdup(tas_priv->dev, "Tasdevice Chip Id", GFP_KERNEL);
- if (!chip_id) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!chip_id)
+ return -ENOMEM;
+
dsp_ctrls[mix_index].name = chip_id;
dsp_ctrls[mix_index].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
dsp_ctrls[mix_index].info = tasdevice_info_chip_id;
dsp_ctrls[mix_index].get = tasdevice_get_chip_id;
mix_index++;
- ret = snd_soc_add_component_controls(tas_priv->codec, dsp_ctrls,
+ return snd_soc_add_component_controls(tas_priv->codec, dsp_ctrls,
nr_controls < mix_index ? nr_controls : mix_index);
-
-out:
- return ret;
}
static int tasdevice_create_cali_ctrls(struct tasdevice_priv *priv)
@@ -1469,7 +1456,6 @@ static int tasdevice_hw_params(struct snd_pcm_substream *substream,
unsigned int slot_width;
unsigned int fsrate;
int bclk_rate;
- int rc = 0;
fsrate = params_rate(params);
switch (fsrate) {
@@ -1479,8 +1465,7 @@ static int tasdevice_hw_params(struct snd_pcm_substream *substream,
default:
dev_err(tas_priv->dev, "%s: incorrect sample rate = %u\n",
__func__, fsrate);
- rc = -EINVAL;
- goto out;
+ return -EINVAL;
}
slot_width = params_width(params);
@@ -1493,20 +1478,17 @@ static int tasdevice_hw_params(struct snd_pcm_substream *substream,
default:
dev_err(tas_priv->dev, "%s: incorrect slot width = %u\n",
__func__, slot_width);
- rc = -EINVAL;
- goto out;
+ return -EINVAL;
}
bclk_rate = snd_soc_params_to_bclk(params);
if (bclk_rate < 0) {
dev_err(tas_priv->dev, "%s: incorrect bclk rate = %d\n",
__func__, bclk_rate);
- rc = bclk_rate;
- goto out;
+ return bclk_rate;
}
-out:
- return rc;
+ return 0;
}
static int tasdevice_set_dai_sysclk(struct snd_soc_dai *codec_dai,
@@ -1663,7 +1645,6 @@ static void tasdevice_parse_dt(struct tasdevice_priv *tas_priv)
static int tasdevice_i2c_probe(struct i2c_client *i2c)
{
- const struct i2c_device_id *id = i2c_match_id(tasdevice_id, i2c);
const struct acpi_device_id *acpi_id;
struct tasdevice_priv *tas_priv;
int ret;
@@ -1685,7 +1666,7 @@ static int tasdevice_i2c_probe(struct i2c_client *i2c)
tas_priv->chip_id = acpi_id->driver_data;
tas_priv->isacpi = true;
} else {
- tas_priv->chip_id = id ? id->driver_data : 0;
+ tas_priv->chip_id = (uintptr_t)i2c_get_match_data(i2c);
tas_priv->isacpi = false;
}
diff --git a/sound/soc/codecs/tas5720.c b/sound/soc/codecs/tas5720.c
index 6dd6c0896eff..f0361822061f 100644
--- a/sound/soc/codecs/tas5720.c
+++ b/sound/soc/codecs/tas5720.c
@@ -43,7 +43,6 @@ static const char * const tas5720_supply_names[] = {
struct tas5720_data {
struct snd_soc_component *component;
struct regmap *regmap;
- struct i2c_client *tas5720_client;
enum tas572x_type devtype;
struct regulator_bulk_data supplies[TAS5720_NUM_SUPPLIES];
struct delayed_work fault_check_work;
@@ -729,7 +728,6 @@ static int tas5720_probe(struct i2c_client *client)
struct device *dev = &client->dev;
struct tas5720_data *data;
const struct regmap_config *regmap_config;
- const struct i2c_device_id *id;
int ret;
int i;
@@ -737,11 +735,9 @@ static int tas5720_probe(struct i2c_client *client)
if (!data)
return -ENOMEM;
- id = i2c_match_id(tas5720_id, client);
- data->tas5720_client = client;
- data->devtype = id->driver_data;
+ data->devtype = (uintptr_t)i2c_get_match_data(client);
- switch (id->driver_data) {
+ switch (data->devtype) {
case TAS5720:
regmap_config = &tas5720_regmap_config;
break;
@@ -774,7 +770,7 @@ static int tas5720_probe(struct i2c_client *client)
dev_set_drvdata(dev, data);
- switch (id->driver_data) {
+ switch (data->devtype) {
case TAS5720:
ret = devm_snd_soc_register_component(&client->dev,
&soc_component_dev_tas5720,
diff --git a/sound/soc/codecs/tlv320adc3xxx.c b/sound/soc/codecs/tlv320adc3xxx.c
index 868e8a91e05b..1a50ff675244 100644
--- a/sound/soc/codecs/tlv320adc3xxx.c
+++ b/sound/soc/codecs/tlv320adc3xxx.c
@@ -1401,7 +1401,6 @@ static int adc3xxx_i2c_probe(struct i2c_client *i2c)
{
struct device *dev = &i2c->dev;
struct adc3xxx *adc3xxx = NULL;
- const struct i2c_device_id *id;
int ret;
adc3xxx = devm_kzalloc(dev, sizeof(struct adc3xxx), GFP_KERNEL);
@@ -1466,8 +1465,7 @@ static int adc3xxx_i2c_probe(struct i2c_client *i2c)
i2c_set_clientdata(i2c, adc3xxx);
- id = i2c_match_id(adc3xxx_i2c_id, i2c);
- adc3xxx->type = id->driver_data;
+ adc3xxx->type = (uintptr_t)i2c_get_match_data(i2c);
/* Reset codec chip */
gpiod_set_value_cansleep(adc3xxx->rst_pin, 1);
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index d81ab9c25c29..4b3f9128ec37 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -1736,12 +1736,8 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c)
{
struct aic31xx_priv *aic31xx;
unsigned int micbias_value = MICBIAS_2_0V;
- const struct i2c_device_id *id = i2c_match_id(aic31xx_i2c_id, i2c);
int i, ret;
- dev_dbg(&i2c->dev, "## %s: %s codec_type = %d\n", __func__,
- id->name, (int)id->driver_data);
-
aic31xx = devm_kzalloc(&i2c->dev, sizeof(*aic31xx), GFP_KERNEL);
if (!aic31xx)
return -ENOMEM;
@@ -1758,7 +1754,7 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c)
aic31xx->dev = &i2c->dev;
aic31xx->irq = i2c->irq;
- aic31xx->codec_type = id->driver_data;
+ aic31xx->codec_type = (uintptr_t)i2c_get_match_data(i2c);
dev_set_drvdata(aic31xx->dev, aic31xx);
diff --git a/sound/soc/codecs/tlv320aic3x-i2c.c b/sound/soc/codecs/tlv320aic3x-i2c.c
index bb33fd3dfb4f..0b585925c1ac 100644
--- a/sound/soc/codecs/tlv320aic3x-i2c.c
+++ b/sound/soc/codecs/tlv320aic3x-i2c.c
@@ -31,14 +31,13 @@ static int aic3x_i2c_probe(struct i2c_client *i2c)
{
struct regmap *regmap;
struct regmap_config config;
- const struct i2c_device_id *id = i2c_match_id(aic3x_i2c_id, i2c);
config = aic3x_regmap;
config.reg_bits = 8;
config.val_bits = 8;
regmap = devm_regmap_init_i2c(i2c, &config);
- return aic3x_probe(&i2c->dev, regmap, id->driver_data);
+ return aic3x_probe(&i2c->dev, regmap, (uintptr_t)i2c_get_match_data(i2c));
}
static void aic3x_i2c_remove(struct i2c_client *i2c)
diff --git a/sound/soc/codecs/tpa6130a2.c b/sound/soc/codecs/tpa6130a2.c
index 5bc486283fde..b5472fa1bdda 100644
--- a/sound/soc/codecs/tpa6130a2.c
+++ b/sound/soc/codecs/tpa6130a2.c
@@ -222,7 +222,6 @@ static int tpa6130a2_probe(struct i2c_client *client)
struct tpa6130a2_data *data;
struct tpa6130a2_platform_data *pdata = client->dev.platform_data;
struct device_node *np = client->dev.of_node;
- const struct i2c_device_id *id;
const char *regulator;
unsigned int version;
int ret;
@@ -251,8 +250,7 @@ static int tpa6130a2_probe(struct i2c_client *client)
i2c_set_clientdata(client, data);
- id = i2c_match_id(tpa6130a2_id, client);
- data->id = id->driver_data;
+ data->id = (uintptr_t)i2c_get_match_data(client);
if (data->power_gpio >= 0) {
ret = devm_gpio_request(dev, data->power_gpio,
diff --git a/sound/soc/codecs/uda1342.c b/sound/soc/codecs/uda1342.c
index 3d49a7869948..b0b29012842d 100644
--- a/sound/soc/codecs/uda1342.c
+++ b/sound/soc/codecs/uda1342.c
@@ -319,7 +319,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(uda1342_pm_ops,
uda1342_suspend, uda1342_resume, NULL);
static const struct i2c_device_id uda1342_i2c_id[] = {
- { "uda1342", 0 },
+ { "uda1342" },
{ }
};
MODULE_DEVICE_TABLE(i2c, uda1342_i2c_id);
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index a2521e16c099..7cef43bb2a88 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -159,6 +159,8 @@
{"AMIC MUX" #id, "ADC5", "ADC5"}, \
{"AMIC MUX" #id, "ADC6", "ADC6"}
+#define NUM_CODEC_DAIS 7
+
enum {
WCD9335_RX0 = 0,
WCD9335_RX1,
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 829bf055622a..aef82532f8cf 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -2196,18 +2196,7 @@ static int wm8904_i2c_probe(struct i2c_client *i2c)
return ret;
}
- if (i2c->dev.of_node) {
- const struct of_device_id *match;
-
- match = of_match_node(wm8904_of_match, i2c->dev.of_node);
- if (match == NULL)
- return -EINVAL;
- wm8904->devtype = (uintptr_t)match->data;
- } else {
- const struct i2c_device_id *id =
- i2c_match_id(wm8904_i2c_id, i2c);
- wm8904->devtype = id->driver_data;
- }
+ wm8904->devtype = (uintptr_t)i2c_get_match_data(i2c);
i2c_set_clientdata(i2c, wm8904);
wm8904->pdata = i2c->dev.platform_data;
diff --git a/sound/soc/codecs/wm8985.c b/sound/soc/codecs/wm8985.c
index 8606e0752a60..da00db5b0172 100644
--- a/sound/soc/codecs/wm8985.c
+++ b/sound/soc/codecs/wm8985.c
@@ -1166,12 +1166,10 @@ static struct spi_driver wm8985_spi_driver = {
#endif
#if IS_ENABLED(CONFIG_I2C)
-static const struct i2c_device_id wm8985_i2c_id[];
static int wm8985_i2c_probe(struct i2c_client *i2c)
{
struct wm8985_priv *wm8985;
- const struct i2c_device_id *id = i2c_match_id(wm8985_i2c_id, i2c);
int ret;
wm8985 = devm_kzalloc(&i2c->dev, sizeof *wm8985, GFP_KERNEL);
@@ -1180,7 +1178,7 @@ static int wm8985_i2c_probe(struct i2c_client *i2c)
i2c_set_clientdata(i2c, wm8985);
- wm8985->dev_type = id->driver_data;
+ wm8985->dev_type = (uintptr_t)i2c_get_match_data(i2c);
wm8985->regmap = devm_regmap_init_i2c(i2c, &wm8985_regmap);
if (IS_ERR(wm8985->regmap)) {
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 678540b78280..c4cf3cff58de 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -6,8 +6,11 @@ comment "Common SoC Audio options for Freescale CPUs:"
config SND_SOC_FSL_ASRC
tristate "Asynchronous Sample Rate Converter (ASRC) module support"
depends on HAS_DMA
+ select DMA_SHARED_BUFFER
select REGMAP_MMIO
select SND_SOC_GENERIC_DMAENGINE_PCM
+ select SND_COMPRESS_ACCEL
+ select SND_COMPRESS_OFFLOAD
help
Say Y if you want to add Asynchronous Sample Rate Converter (ASRC)
support for the Freescale CPUs.
diff --git a/sound/soc/fsl/Makefile b/sound/soc/fsl/Makefile
index ad97244b5cc3..d656a9ab54e3 100644
--- a/sound/soc/fsl/Makefile
+++ b/sound/soc/fsl/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_SND_SOC_P1022_RDK) += snd-soc-p1022-rdk.o
# Freescale SSI/DMA/SAI/SPDIF Support
snd-soc-fsl-audmix-y := fsl_audmix.o
snd-soc-fsl-asoc-card-y := fsl-asoc-card.o
-snd-soc-fsl-asrc-y := fsl_asrc.o fsl_asrc_dma.o
+snd-soc-fsl-asrc-y := fsl_asrc.o fsl_asrc_dma.o fsl_asrc_m2m.o
snd-soc-fsl-lpc3xxx-y := lpc3xxx-pcm.o lpc3xxx-i2s.o
snd-soc-fsl-sai-y := fsl_sai.o
snd-soc-fsl-ssi-y := fsl_ssi.o
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index 02e1594e8223..2bad9cb1daaf 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -932,7 +932,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
if (!asrc_pdev)
priv->card.num_dapm_routes /= 2;
- if (of_property_read_bool(np, "audio-routing")) {
+ if (of_property_present(np, "audio-routing")) {
ret = snd_soc_of_parse_audio_routing(&priv->card, "audio-routing");
if (ret) {
dev_err(&pdev->dev, "failed to parse audio-routing: %d\n", ret);
diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c
index bd5c46d763c0..677529916dc0 100644
--- a/sound/soc/fsl/fsl_asrc.c
+++ b/sound/soc/fsl/fsl_asrc.c
@@ -1063,6 +1063,139 @@ static int fsl_asrc_get_fifo_addr(u8 dir, enum asrc_pair_index index)
return REG_ASRDx(dir, index);
}
+/* Get sample numbers in FIFO */
+static unsigned int fsl_asrc_get_output_fifo_size(struct fsl_asrc_pair *pair)
+{
+ struct fsl_asrc *asrc = pair->asrc;
+ enum asrc_pair_index index = pair->index;
+ u32 val;
+
+ regmap_read(asrc->regmap, REG_ASRFST(index), &val);
+
+ val &= ASRFSTi_OUTPUT_FIFO_MASK;
+
+ return val >> ASRFSTi_OUTPUT_FIFO_SHIFT;
+}
+
+static int fsl_asrc_m2m_prepare(struct fsl_asrc_pair *pair)
+{
+ struct fsl_asrc_pair_priv *pair_priv = pair->private;
+ struct fsl_asrc *asrc = pair->asrc;
+ struct device *dev = &asrc->pdev->dev;
+ struct asrc_config config;
+ int ret;
+
+ /* fill config */
+ config.pair = pair->index;
+ config.channel_num = pair->channels;
+ config.input_sample_rate = pair->rate[IN];
+ config.output_sample_rate = pair->rate[OUT];
+ config.input_format = pair->sample_format[IN];
+ config.output_format = pair->sample_format[OUT];
+ config.inclk = INCLK_NONE;
+ config.outclk = OUTCLK_ASRCK1_CLK;
+
+ pair_priv->config = &config;
+ ret = fsl_asrc_config_pair(pair, true);
+ if (ret) {
+ dev_err(dev, "failed to config pair: %d\n", ret);
+ return ret;
+ }
+
+ pair->first_convert = 1;
+
+ return 0;
+}
+
+static int fsl_asrc_m2m_start(struct fsl_asrc_pair *pair)
+{
+ if (pair->first_convert) {
+ fsl_asrc_start_pair(pair);
+ pair->first_convert = 0;
+ }
+ /*
+ * Clear DMA request during the stall state of ASRC:
+ * During STALL state, the remaining in input fifo would never be
+ * smaller than the input threshold while the output fifo would not
+ * be bigger than output one. Thus the DMA request would be cleared.
+ */
+ fsl_asrc_set_watermarks(pair, ASRC_FIFO_THRESHOLD_MIN,
+ ASRC_FIFO_THRESHOLD_MAX);
+
+ /* Update the real input threshold to raise DMA request */
+ fsl_asrc_set_watermarks(pair, ASRC_M2M_INPUTFIFO_WML,
+ ASRC_M2M_OUTPUTFIFO_WML);
+
+ return 0;
+}
+
+static int fsl_asrc_m2m_stop(struct fsl_asrc_pair *pair)
+{
+ if (!pair->first_convert) {
+ fsl_asrc_stop_pair(pair);
+ pair->first_convert = 1;
+ }
+
+ return 0;
+}
+
+/* calculate capture data length according to output data length and sample rate */
+static int fsl_asrc_m2m_calc_out_len(struct fsl_asrc_pair *pair, int input_buffer_length)
+{
+ unsigned int in_width, out_width;
+ unsigned int channels = pair->channels;
+ unsigned int in_samples, out_samples;
+ unsigned int out_length;
+
+ in_width = snd_pcm_format_physical_width(pair->sample_format[IN]) / 8;
+ out_width = snd_pcm_format_physical_width(pair->sample_format[OUT]) / 8;
+
+ in_samples = input_buffer_length / in_width / channels;
+ out_samples = pair->rate[OUT] * in_samples / pair->rate[IN];
+ out_length = (out_samples - ASRC_OUTPUT_LAST_SAMPLE) * out_width * channels;
+
+ return out_length;
+}
+
+static int fsl_asrc_m2m_get_maxburst(u8 dir, struct fsl_asrc_pair *pair)
+{
+ struct fsl_asrc *asrc = pair->asrc;
+ struct fsl_asrc_priv *asrc_priv = asrc->private;
+ int wml = (dir == IN) ? ASRC_M2M_INPUTFIFO_WML : ASRC_M2M_OUTPUTFIFO_WML;
+
+ if (!asrc_priv->soc->use_edma)
+ return wml * pair->channels;
+ else
+ return 1;
+}
+
+static int fsl_asrc_m2m_get_cap(struct fsl_asrc_m2m_cap *cap)
+{
+ cap->fmt_in = FSL_ASRC_FORMATS;
+ cap->fmt_out = FSL_ASRC_FORMATS | SNDRV_PCM_FMTBIT_S8;
+
+ cap->rate_in = supported_asrc_rate;
+ cap->rate_in_count = ARRAY_SIZE(supported_asrc_rate);
+ cap->rate_out = supported_asrc_rate;
+ cap->rate_out_count = ARRAY_SIZE(supported_asrc_rate);
+ cap->chan_min = 1;
+ cap->chan_max = 10;
+
+ return 0;
+}
+
+static int fsl_asrc_m2m_pair_resume(struct fsl_asrc_pair *pair)
+{
+ struct fsl_asrc *asrc = pair->asrc;
+ int i;
+
+ for (i = 0; i < pair->channels * 4; i++)
+ regmap_write(asrc->regmap, REG_ASRDI(pair->index), 0);
+
+ pair->first_convert = 1;
+ return 0;
+}
+
static int fsl_asrc_runtime_resume(struct device *dev);
static int fsl_asrc_runtime_suspend(struct device *dev);
@@ -1147,6 +1280,15 @@ static int fsl_asrc_probe(struct platform_device *pdev)
asrc->get_fifo_addr = fsl_asrc_get_fifo_addr;
asrc->pair_priv_size = sizeof(struct fsl_asrc_pair_priv);
+ asrc->m2m_prepare = fsl_asrc_m2m_prepare;
+ asrc->m2m_start = fsl_asrc_m2m_start;
+ asrc->m2m_stop = fsl_asrc_m2m_stop;
+ asrc->get_output_fifo_size = fsl_asrc_get_output_fifo_size;
+ asrc->m2m_calc_out_len = fsl_asrc_m2m_calc_out_len;
+ asrc->m2m_get_maxburst = fsl_asrc_m2m_get_maxburst;
+ asrc->m2m_pair_resume = fsl_asrc_m2m_pair_resume;
+ asrc->m2m_get_cap = fsl_asrc_m2m_get_cap;
+
if (of_device_is_compatible(np, "fsl,imx35-asrc")) {
asrc_priv->clk_map[IN] = input_clk_map_imx35;
asrc_priv->clk_map[OUT] = output_clk_map_imx35;
@@ -1242,6 +1384,12 @@ static int fsl_asrc_probe(struct platform_device *pdev)
goto err_pm_get_sync;
}
+ ret = fsl_asrc_m2m_init(asrc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init m2m device %d\n", ret);
+ return ret;
+ }
+
return 0;
err_pm_get_sync:
@@ -1254,6 +1402,10 @@ err_pm_disable:
static void fsl_asrc_remove(struct platform_device *pdev)
{
+ struct fsl_asrc *asrc = dev_get_drvdata(&pdev->dev);
+
+ fsl_asrc_m2m_exit(asrc);
+
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
fsl_asrc_runtime_suspend(&pdev->dev);
@@ -1355,10 +1507,29 @@ static int fsl_asrc_runtime_suspend(struct device *dev)
return 0;
}
+static int fsl_asrc_suspend(struct device *dev)
+{
+ struct fsl_asrc *asrc = dev_get_drvdata(dev);
+ int ret;
+
+ fsl_asrc_m2m_suspend(asrc);
+ ret = pm_runtime_force_suspend(dev);
+ return ret;
+}
+
+static int fsl_asrc_resume(struct device *dev)
+{
+ struct fsl_asrc *asrc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ fsl_asrc_m2m_resume(asrc);
+ return ret;
+}
+
static const struct dev_pm_ops fsl_asrc_pm = {
- SET_RUNTIME_PM_OPS(fsl_asrc_runtime_suspend, fsl_asrc_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ RUNTIME_PM_OPS(fsl_asrc_runtime_suspend, fsl_asrc_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(fsl_asrc_suspend, fsl_asrc_resume)
};
static const struct fsl_asrc_soc_data fsl_asrc_imx35_data = {
@@ -1396,7 +1567,7 @@ static struct platform_driver fsl_asrc_driver = {
.driver = {
.name = "fsl-asrc",
.of_match_table = fsl_asrc_ids,
- .pm = &fsl_asrc_pm,
+ .pm = pm_ptr(&fsl_asrc_pm),
},
};
module_platform_driver(fsl_asrc_driver);
diff --git a/sound/soc/fsl/fsl_asrc.h b/sound/soc/fsl/fsl_asrc.h
index 86d2422ad606..1c492eb237f5 100644
--- a/sound/soc/fsl/fsl_asrc.h
+++ b/sound/soc/fsl/fsl_asrc.h
@@ -12,6 +12,8 @@
#include "fsl_asrc_common.h"
+#define ASRC_M2M_INPUTFIFO_WML 0x4
+#define ASRC_M2M_OUTPUTFIFO_WML 0x2
#define ASRC_DMA_BUFFER_NUM 2
#define ASRC_INPUTFIFO_THRESHOLD 32
#define ASRC_OUTPUTFIFO_THRESHOLD 32
diff --git a/sound/soc/fsl/fsl_asrc_common.h b/sound/soc/fsl/fsl_asrc_common.h
index 7e1c13ca37f1..0cd595b0f629 100644
--- a/sound/soc/fsl/fsl_asrc_common.h
+++ b/sound/soc/fsl/fsl_asrc_common.h
@@ -22,6 +22,26 @@ enum asrc_pair_index {
#define PAIR_CTX_NUM 0x4
/**
+ * struct fsl_asrc_m2m_cap - capability data
+ * @fmt_in: input sample format
+ * @fmt_out: output sample format
+ * @chan_min: minimum channel number
+ * @chan_max: maximum channel number
+ * @rate_in: minimum rate
+ * @rate_out: maximum rete
+ */
+struct fsl_asrc_m2m_cap {
+ u64 fmt_in;
+ u64 fmt_out;
+ int chan_min;
+ int chan_max;
+ const unsigned int *rate_in;
+ int rate_in_count;
+ const unsigned int *rate_out;
+ int rate_out_count;
+};
+
+/**
* fsl_asrc_pair: ASRC Pair common data
*
* @asrc: pointer to its parent module
@@ -34,6 +54,14 @@ enum asrc_pair_index {
* @pos: hardware pointer position
* @req_dma_chan: flag to release dev_to_dev chan
* @private: pair private area
+ * @complete: dma task complete
+ * @sample_format: format of m2m
+ * @rate: rate of m2m
+ * @buf_len: buffer length of m2m
+ * @dma_buffer: buffer pointers
+ * @first_convert: start of conversion
+ * @ratio_mod_flag: flag for new ratio modifier
+ * @ratio_mod: ratio modification
*/
struct fsl_asrc_pair {
struct fsl_asrc *asrc;
@@ -49,6 +77,16 @@ struct fsl_asrc_pair {
bool req_dma_chan;
void *private;
+
+ /* used for m2m */
+ struct completion complete[2];
+ snd_pcm_format_t sample_format[2];
+ unsigned int rate[2];
+ unsigned int buf_len[2];
+ struct snd_dma_buffer dma_buffer[2];
+ unsigned int first_convert;
+ bool ratio_mod_flag;
+ unsigned int ratio_mod;
};
/**
@@ -62,6 +100,7 @@ struct fsl_asrc_pair {
* @mem_clk: clock source to access register
* @ipg_clk: clock source to drive peripheral
* @spba_clk: SPBA clock (optional, depending on SoC design)
+ * @card: compress sound card
* @lock: spin lock for resource protection
* @pair: pair pointers
* @channel_avail: non-occupied channel numbers
@@ -72,6 +111,17 @@ struct fsl_asrc_pair {
* @request_pair: function pointer
* @release_pair: function pointer
* @get_fifo_addr: function pointer
+ * @m2m_get_cap: function pointer
+ * @m2m_prepare: function pointer
+ * @m2m_start: function pointer
+ * @m2m_unprepare: function pointer
+ * @m2m_stop: function pointer
+ * @m2m_calc_out_len: function pointer
+ * @m2m_get_maxburst: function pointer
+ * @m2m_pair_suspend: function pointer
+ * @m2m_pair_resume: function pointer
+ * @m2m_set_ratio_mod: function pointer
+ * @get_output_fifo_size: function pointer
* @pair_priv_size: size of pair private struct.
* @private: private data structure
*/
@@ -84,6 +134,7 @@ struct fsl_asrc {
struct clk *mem_clk;
struct clk *ipg_clk;
struct clk *spba_clk;
+ struct snd_card *card;
spinlock_t lock; /* spin lock for resource protection */
struct fsl_asrc_pair *pair[PAIR_CTX_NUM];
@@ -97,6 +148,20 @@ struct fsl_asrc {
int (*request_pair)(int channels, struct fsl_asrc_pair *pair);
void (*release_pair)(struct fsl_asrc_pair *pair);
int (*get_fifo_addr)(u8 dir, enum asrc_pair_index index);
+ int (*m2m_get_cap)(struct fsl_asrc_m2m_cap *cap);
+
+ int (*m2m_prepare)(struct fsl_asrc_pair *pair);
+ int (*m2m_start)(struct fsl_asrc_pair *pair);
+ int (*m2m_unprepare)(struct fsl_asrc_pair *pair);
+ int (*m2m_stop)(struct fsl_asrc_pair *pair);
+
+ int (*m2m_calc_out_len)(struct fsl_asrc_pair *pair, int input_buffer_length);
+ int (*m2m_get_maxburst)(u8 dir, struct fsl_asrc_pair *pair);
+ int (*m2m_pair_suspend)(struct fsl_asrc_pair *pair);
+ int (*m2m_pair_resume)(struct fsl_asrc_pair *pair);
+ int (*m2m_set_ratio_mod)(struct fsl_asrc_pair *pair, int val);
+
+ unsigned int (*get_output_fifo_size)(struct fsl_asrc_pair *pair);
size_t pair_priv_size;
void *private;
@@ -105,4 +170,9 @@ struct fsl_asrc {
#define DRV_NAME "fsl-asrc-dai"
extern struct snd_soc_component_driver fsl_asrc_component;
+int fsl_asrc_m2m_init(struct fsl_asrc *asrc);
+void fsl_asrc_m2m_exit(struct fsl_asrc *asrc);
+int fsl_asrc_m2m_resume(struct fsl_asrc *asrc);
+int fsl_asrc_m2m_suspend(struct fsl_asrc *asrc);
+
#endif /* _FSL_ASRC_COMMON_H */
diff --git a/sound/soc/fsl/fsl_asrc_m2m.c b/sound/soc/fsl/fsl_asrc_m2m.c
new file mode 100644
index 000000000000..f46881f71e43
--- /dev/null
+++ b/sound/soc/fsl/fsl_asrc_m2m.c
@@ -0,0 +1,729 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+// Copyright (C) 2019-2024 NXP
+//
+// Freescale ASRC Memory to Memory (M2M) driver
+
+#include <linux/dma/imx-dma.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <sound/asound.h>
+#include <sound/dmaengine_pcm.h>
+#include <sound/initval.h>
+
+#include "fsl_asrc_common.h"
+
+#define DIR_STR(dir) (dir) == IN ? "in" : "out"
+
+#define ASRC_xPUT_DMA_CALLBACK(dir) \
+ (((dir) == IN) ? asrc_input_dma_callback \
+ : asrc_output_dma_callback)
+
+/* Maximum output and capture buffer size */
+#define ASRC_M2M_BUFFER_SIZE (512 * 1024)
+
+/* Maximum output and capture period size */
+#define ASRC_M2M_PERIOD_SIZE (48 * 1024)
+
+/* dma complete callback */
+static void asrc_input_dma_callback(void *data)
+{
+ struct fsl_asrc_pair *pair = (struct fsl_asrc_pair *)data;
+
+ complete(&pair->complete[IN]);
+}
+
+/* dma complete callback */
+static void asrc_output_dma_callback(void *data)
+{
+ struct fsl_asrc_pair *pair = (struct fsl_asrc_pair *)data;
+
+ complete(&pair->complete[OUT]);
+}
+
+/**
+ *asrc_read_last_fifo: read all the remaining data from FIFO
+ *@pair: Structure pointer of fsl_asrc_pair
+ *@dma_vaddr: virtual address of capture buffer
+ *@length: payload length of capture buffer
+ */
+static void asrc_read_last_fifo(struct fsl_asrc_pair *pair, void *dma_vaddr, u32 *length)
+{
+ struct fsl_asrc *asrc = pair->asrc;
+ enum asrc_pair_index index = pair->index;
+ u32 i, reg, size, t_size = 0, width;
+ u32 *reg32 = NULL;
+ u16 *reg16 = NULL;
+ u8 *reg24 = NULL;
+
+ width = snd_pcm_format_physical_width(pair->sample_format[OUT]);
+ if (width == 32)
+ reg32 = dma_vaddr + *length;
+ else if (width == 16)
+ reg16 = dma_vaddr + *length;
+ else
+ reg24 = dma_vaddr + *length;
+retry:
+ size = asrc->get_output_fifo_size(pair);
+ if (size + *length > ASRC_M2M_BUFFER_SIZE)
+ goto end;
+
+ for (i = 0; i < size * pair->channels; i++) {
+ regmap_read(asrc->regmap, asrc->get_fifo_addr(OUT, index), &reg);
+ if (reg32) {
+ *reg32++ = reg;
+ } else if (reg16) {
+ *reg16++ = (u16)reg;
+ } else {
+ *reg24++ = (u8)reg;
+ *reg24++ = (u8)(reg >> 8);
+ *reg24++ = (u8)(reg >> 16);
+ }
+ }
+ t_size += size;
+
+ /* In case there is data left in FIFO */
+ if (size)
+ goto retry;
+end:
+ /* Update payload length */
+ if (reg32)
+ *length += t_size * pair->channels * 4;
+ else if (reg16)
+ *length += t_size * pair->channels * 2;
+ else
+ *length += t_size * pair->channels * 3;
+}
+
+/* config dma channel */
+static int asrc_dmaconfig(struct fsl_asrc_pair *pair,
+ struct dma_chan *chan,
+ u32 dma_addr, dma_addr_t buf_addr, u32 buf_len,
+ int dir, int width)
+{
+ struct fsl_asrc *asrc = pair->asrc;
+ struct device *dev = &asrc->pdev->dev;
+ struct dma_slave_config slave_config;
+ enum dma_slave_buswidth buswidth;
+ unsigned int sg_len, max_period_size;
+ struct scatterlist *sg;
+ int ret, i;
+
+ switch (width) {
+ case 8:
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+ case 16:
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+ case 24:
+ buswidth = DMA_SLAVE_BUSWIDTH_3_BYTES;
+ break;
+ case 32:
+ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
+ default:
+ dev_err(dev, "invalid word width\n");
+ return -EINVAL;
+ }
+
+ memset(&slave_config, 0, sizeof(slave_config));
+ if (dir == IN) {
+ slave_config.direction = DMA_MEM_TO_DEV;
+ slave_config.dst_addr = dma_addr;
+ slave_config.dst_addr_width = buswidth;
+ slave_config.dst_maxburst = asrc->m2m_get_maxburst(IN, pair);
+ } else {
+ slave_config.direction = DMA_DEV_TO_MEM;
+ slave_config.src_addr = dma_addr;
+ slave_config.src_addr_width = buswidth;
+ slave_config.src_maxburst = asrc->m2m_get_maxburst(OUT, pair);
+ }
+
+ ret = dmaengine_slave_config(chan, &slave_config);
+ if (ret) {
+ dev_err(dev, "failed to config dmaengine for %s task: %d\n",
+ DIR_STR(dir), ret);
+ return -EINVAL;
+ }
+
+ max_period_size = rounddown(ASRC_M2M_PERIOD_SIZE, width * pair->channels / 8);
+ /* scatter gather mode */
+ sg_len = buf_len / max_period_size;
+ if (buf_len % max_period_size)
+ sg_len += 1;
+
+ sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL);
+ if (!sg)
+ return -ENOMEM;
+
+ sg_init_table(sg, sg_len);
+ for (i = 0; i < (sg_len - 1); i++) {
+ sg_dma_address(&sg[i]) = buf_addr + i * max_period_size;
+ sg_dma_len(&sg[i]) = max_period_size;
+ }
+ sg_dma_address(&sg[i]) = buf_addr + i * max_period_size;
+ sg_dma_len(&sg[i]) = buf_len - i * max_period_size;
+
+ pair->desc[dir] = dmaengine_prep_slave_sg(chan, sg, sg_len,
+ slave_config.direction,
+ DMA_PREP_INTERRUPT);
+ kfree(sg);
+ if (!pair->desc[dir]) {
+ dev_err(dev, "failed to prepare dmaengine for %s task\n", DIR_STR(dir));
+ return -EINVAL;
+ }
+
+ pair->desc[dir]->callback = ASRC_xPUT_DMA_CALLBACK(dir);
+ pair->desc[dir]->callback_param = pair;
+
+ return 0;
+}
+
+/* main function of converter */
+static int asrc_m2m_device_run(struct fsl_asrc_pair *pair, struct snd_compr_task_runtime *task)
+{
+ struct fsl_asrc *asrc = pair->asrc;
+ struct device *dev = &asrc->pdev->dev;
+ enum asrc_pair_index index = pair->index;
+ struct snd_dma_buffer *src_buf, *dst_buf;
+ unsigned int in_buf_len;
+ unsigned int out_dma_len;
+ unsigned int width;
+ u32 fifo_addr;
+ int ret = 0;
+
+ /* set ratio mod */
+ if (asrc->m2m_set_ratio_mod) {
+ if (pair->ratio_mod_flag) {
+ asrc->m2m_set_ratio_mod(pair, pair->ratio_mod);
+ pair->ratio_mod_flag = false;
+ }
+ }
+
+ src_buf = &pair->dma_buffer[IN];
+ dst_buf = &pair->dma_buffer[OUT];
+
+ width = snd_pcm_format_physical_width(pair->sample_format[IN]);
+ fifo_addr = asrc->paddr + asrc->get_fifo_addr(IN, index);
+
+ in_buf_len = task->input_size;
+
+ if (in_buf_len < width * pair->channels / 8 ||
+ in_buf_len > ASRC_M2M_BUFFER_SIZE ||
+ in_buf_len % (width * pair->channels / 8)) {
+ dev_err(dev, "out buffer size is error: [%d]\n", in_buf_len);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* dma config for output dma channel */
+ ret = asrc_dmaconfig(pair,
+ pair->dma_chan[IN],
+ fifo_addr,
+ src_buf->addr,
+ in_buf_len, IN, width);
+ if (ret) {
+ dev_err(dev, "out dma config error\n");
+ goto end;
+ }
+
+ width = snd_pcm_format_physical_width(pair->sample_format[OUT]);
+ fifo_addr = asrc->paddr + asrc->get_fifo_addr(OUT, index);
+ out_dma_len = asrc->m2m_calc_out_len(pair, in_buf_len);
+ if (out_dma_len > 0 && out_dma_len <= ASRC_M2M_BUFFER_SIZE) {
+ /* dma config for capture dma channel */
+ ret = asrc_dmaconfig(pair,
+ pair->dma_chan[OUT],
+ fifo_addr,
+ dst_buf->addr,
+ out_dma_len, OUT, width);
+ if (ret) {
+ dev_err(dev, "cap dma config error\n");
+ goto end;
+ }
+ } else if (out_dma_len > ASRC_M2M_BUFFER_SIZE) {
+ dev_err(dev, "cap buffer size error\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ reinit_completion(&pair->complete[IN]);
+ reinit_completion(&pair->complete[OUT]);
+
+ /* Submit DMA request */
+ dmaengine_submit(pair->desc[IN]);
+ dma_async_issue_pending(pair->desc[IN]->chan);
+ if (out_dma_len > 0) {
+ dmaengine_submit(pair->desc[OUT]);
+ dma_async_issue_pending(pair->desc[OUT]->chan);
+ }
+
+ asrc->m2m_start(pair);
+
+ if (!wait_for_completion_interruptible_timeout(&pair->complete[IN], 10 * HZ)) {
+ dev_err(dev, "out DMA task timeout\n");
+ ret = -ETIMEDOUT;
+ goto end;
+ }
+
+ if (out_dma_len > 0) {
+ if (!wait_for_completion_interruptible_timeout(&pair->complete[OUT], 10 * HZ)) {
+ dev_err(dev, "cap DMA task timeout\n");
+ ret = -ETIMEDOUT;
+ goto end;
+ }
+ }
+
+ /* read the last words from FIFO */
+ asrc_read_last_fifo(pair, dst_buf->area, &out_dma_len);
+ /* update payload length for capture */
+ task->output_size = out_dma_len;
+end:
+ return ret;
+}
+
+static int fsl_asrc_m2m_comp_open(struct snd_compr_stream *stream)
+{
+ struct fsl_asrc *asrc = stream->private_data;
+ struct snd_compr_runtime *runtime = stream->runtime;
+ struct device *dev = &asrc->pdev->dev;
+ struct fsl_asrc_pair *pair;
+ int size, ret;
+
+ pair = kzalloc(sizeof(*pair) + asrc->pair_priv_size, GFP_KERNEL);
+ if (!pair)
+ return -ENOMEM;
+
+ pair->private = (void *)pair + sizeof(struct fsl_asrc_pair);
+ pair->asrc = asrc;
+
+ init_completion(&pair->complete[IN]);
+ init_completion(&pair->complete[OUT]);
+
+ runtime->private_data = pair;
+
+ size = ASRC_M2M_BUFFER_SIZE;
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dev, size, &pair->dma_buffer[IN]);
+ if (ret)
+ goto error_alloc_in_buf;
+
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dev, size, &pair->dma_buffer[OUT]);
+ if (ret)
+ goto error_alloc_out_buf;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to power up asrc\n");
+ goto err_pm_runtime;
+ }
+
+ return 0;
+
+err_pm_runtime:
+ snd_dma_free_pages(&pair->dma_buffer[OUT]);
+error_alloc_out_buf:
+ snd_dma_free_pages(&pair->dma_buffer[IN]);
+error_alloc_in_buf:
+ kfree(pair);
+ return ret;
+}
+
+static int fsl_asrc_m2m_comp_release(struct snd_compr_stream *stream)
+{
+ struct fsl_asrc *asrc = stream->private_data;
+ struct snd_compr_runtime *runtime = stream->runtime;
+ struct fsl_asrc_pair *pair = runtime->private_data;
+ struct device *dev = &asrc->pdev->dev;
+
+ pm_runtime_put_sync(dev);
+
+ snd_dma_free_pages(&pair->dma_buffer[IN]);
+ snd_dma_free_pages(&pair->dma_buffer[OUT]);
+
+ kfree(runtime->private_data);
+
+ return 0;
+}
+
+static int fsl_asrc_m2m_comp_set_params(struct snd_compr_stream *stream,
+ struct snd_compr_params *params)
+{
+ struct fsl_asrc *asrc = stream->private_data;
+ struct snd_compr_runtime *runtime = stream->runtime;
+ struct fsl_asrc_pair *pair = runtime->private_data;
+ struct fsl_asrc_m2m_cap cap;
+ int ret, i;
+
+ ret = asrc->m2m_get_cap(&cap);
+ if (ret)
+ return -EINVAL;
+
+ if (pcm_format_to_bits((__force snd_pcm_format_t)params->codec.format) & cap.fmt_in)
+ pair->sample_format[IN] = (__force snd_pcm_format_t)params->codec.format;
+ else
+ return -EINVAL;
+
+ if (pcm_format_to_bits((__force snd_pcm_format_t)params->codec.pcm_format) & cap.fmt_out)
+ pair->sample_format[OUT] = (__force snd_pcm_format_t)params->codec.pcm_format;
+ else
+ return -EINVAL;
+
+ /* check input rate is in scope */
+ for (i = 0; i < cap.rate_in_count; i++)
+ if (params->codec.sample_rate == cap.rate_in[i]) {
+ pair->rate[IN] = params->codec.sample_rate;
+ break;
+ }
+ if (i == cap.rate_in_count)
+ return -EINVAL;
+
+ /* check output rate is in scope */
+ for (i = 0; i < cap.rate_out_count; i++)
+ if (params->codec.options.src_d.out_sample_rate == cap.rate_out[i]) {
+ pair->rate[OUT] = params->codec.options.src_d.out_sample_rate;
+ break;
+ }
+ if (i == cap.rate_out_count)
+ return -EINVAL;
+
+ if (params->codec.ch_in != params->codec.ch_out ||
+ params->codec.ch_in < cap.chan_min ||
+ params->codec.ch_in > cap.chan_max)
+ return -EINVAL;
+
+ pair->channels = params->codec.ch_in;
+ pair->buf_len[IN] = params->buffer.fragment_size;
+ pair->buf_len[OUT] = params->buffer.fragment_size;
+
+ return 0;
+}
+
+static int fsl_asrc_m2m_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct snd_dma_buffer *dmab = dmabuf->priv;
+
+ return snd_dma_buffer_mmap(dmab, vma);
+}
+
+static struct sg_table *fsl_asrc_m2m_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct snd_dma_buffer *dmab = attachment->dmabuf->priv;
+ struct sg_table *sgt;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return NULL;
+
+ if (dma_get_sgtable(attachment->dev, sgt, dmab->area, dmab->addr, dmab->bytes) < 0)
+ goto free;
+
+ if (dma_map_sgtable(attachment->dev, sgt, direction, 0))
+ goto free;
+
+ return sgt;
+
+free:
+ sg_free_table(sgt);
+ kfree(sgt);
+ return NULL;
+}
+
+static void fsl_asrc_m2m_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+ dma_unmap_sgtable(attachment->dev, table, direction, 0);
+}
+
+static void fsl_asrc_m2m_release(struct dma_buf *dmabuf)
+{
+ /* buffer is released by fsl_asrc_m2m_comp_release() */
+}
+
+static const struct dma_buf_ops fsl_asrc_m2m_dma_buf_ops = {
+ .mmap = fsl_asrc_m2m_mmap,
+ .map_dma_buf = fsl_asrc_m2m_map_dma_buf,
+ .unmap_dma_buf = fsl_asrc_m2m_unmap_dma_buf,
+ .release = fsl_asrc_m2m_release,
+};
+
+static int fsl_asrc_m2m_comp_task_create(struct snd_compr_stream *stream,
+ struct snd_compr_task_runtime *task)
+{
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info_in);
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info_out);
+ struct fsl_asrc *asrc = stream->private_data;
+ struct snd_compr_runtime *runtime = stream->runtime;
+ struct fsl_asrc_pair *pair = runtime->private_data;
+ struct device *dev = &asrc->pdev->dev;
+ int ret;
+
+ exp_info_in.ops = &fsl_asrc_m2m_dma_buf_ops;
+ exp_info_in.size = ASRC_M2M_BUFFER_SIZE;
+ exp_info_in.flags = O_RDWR;
+ exp_info_in.priv = &pair->dma_buffer[IN];
+ task->input = dma_buf_export(&exp_info_in);
+ if (IS_ERR(task->input)) {
+ ret = PTR_ERR(task->input);
+ return ret;
+ }
+
+ exp_info_out.ops = &fsl_asrc_m2m_dma_buf_ops;
+ exp_info_out.size = ASRC_M2M_BUFFER_SIZE;
+ exp_info_out.flags = O_RDWR;
+ exp_info_out.priv = &pair->dma_buffer[OUT];
+ task->output = dma_buf_export(&exp_info_out);
+ if (IS_ERR(task->output)) {
+ ret = PTR_ERR(task->output);
+ return ret;
+ }
+
+ /* Request asrc pair/context */
+ ret = asrc->request_pair(pair->channels, pair);
+ if (ret) {
+ dev_err(dev, "failed to request pair: %d\n", ret);
+ goto err_request_pair;
+ }
+
+ ret = asrc->m2m_prepare(pair);
+ if (ret) {
+ dev_err(dev, "failed to start pair part one: %d\n", ret);
+ goto err_start_part_one;
+ }
+
+ /* Request dma channels */
+ pair->dma_chan[IN] = asrc->get_dma_channel(pair, IN);
+ if (!pair->dma_chan[IN]) {
+ dev_err(dev, "[ctx%d] failed to get input DMA channel\n", pair->index);
+ ret = -EBUSY;
+ goto err_dma_channel_in;
+ }
+
+ pair->dma_chan[OUT] = asrc->get_dma_channel(pair, OUT);
+ if (!pair->dma_chan[OUT]) {
+ dev_err(dev, "[ctx%d] failed to get output DMA channel\n", pair->index);
+ ret = -EBUSY;
+ goto err_dma_channel_out;
+ }
+
+ return 0;
+
+err_dma_channel_out:
+ dma_release_channel(pair->dma_chan[IN]);
+err_dma_channel_in:
+ if (asrc->m2m_unprepare)
+ asrc->m2m_unprepare(pair);
+err_start_part_one:
+ asrc->release_pair(pair);
+err_request_pair:
+ return ret;
+}
+
+static int fsl_asrc_m2m_comp_task_start(struct snd_compr_stream *stream,
+ struct snd_compr_task_runtime *task)
+{
+ struct snd_compr_runtime *runtime = stream->runtime;
+ struct fsl_asrc_pair *pair = runtime->private_data;
+
+ return asrc_m2m_device_run(pair, task);
+}
+
+static int fsl_asrc_m2m_comp_task_stop(struct snd_compr_stream *stream,
+ struct snd_compr_task_runtime *task)
+{
+ return 0;
+}
+
+static int fsl_asrc_m2m_comp_task_free(struct snd_compr_stream *stream,
+ struct snd_compr_task_runtime *task)
+{
+ struct fsl_asrc *asrc = stream->private_data;
+ struct snd_compr_runtime *runtime = stream->runtime;
+ struct fsl_asrc_pair *pair = runtime->private_data;
+
+ /* Stop & release pair/context */
+ if (asrc->m2m_stop)
+ asrc->m2m_stop(pair);
+
+ if (asrc->m2m_unprepare)
+ asrc->m2m_unprepare(pair);
+ asrc->release_pair(pair);
+
+ /* Release dma channel */
+ if (pair->dma_chan[IN])
+ dma_release_channel(pair->dma_chan[IN]);
+ if (pair->dma_chan[OUT])
+ dma_release_channel(pair->dma_chan[OUT]);
+
+ return 0;
+}
+
+static int fsl_asrc_m2m_get_caps(struct snd_compr_stream *cstream,
+ struct snd_compr_caps *caps)
+{
+ caps->num_codecs = 1;
+ caps->min_fragment_size = 4096;
+ caps->max_fragment_size = 4096;
+ caps->min_fragments = 1;
+ caps->max_fragments = 1;
+ caps->codecs[0] = SND_AUDIOCODEC_PCM;
+
+ return 0;
+}
+
+static int fsl_asrc_m2m_fill_codec_caps(struct fsl_asrc *asrc,
+ struct snd_compr_codec_caps *codec)
+{
+ struct fsl_asrc_m2m_cap cap;
+ snd_pcm_format_t k;
+ int j = 0;
+ int ret;
+
+ ret = asrc->m2m_get_cap(&cap);
+ if (ret)
+ return -EINVAL;
+
+ pcm_for_each_format(k) {
+ if (pcm_format_to_bits(k) & cap.fmt_in) {
+ codec->descriptor[j].max_ch = cap.chan_max;
+ memcpy(codec->descriptor[j].sample_rates,
+ cap.rate_in,
+ cap.rate_in_count * sizeof(__u32));
+ codec->descriptor[j].num_sample_rates = cap.rate_in_count;
+ codec->descriptor[j].formats = (__force __u32)k;
+ codec->descriptor[j].pcm_formats = cap.fmt_out;
+ codec->descriptor[j].src.out_sample_rate_min = cap.rate_out[0];
+ codec->descriptor[j].src.out_sample_rate_max =
+ cap.rate_out[cap.rate_out_count - 1];
+ j++;
+ }
+ }
+
+ codec->codec = SND_AUDIOCODEC_PCM;
+ codec->num_descriptors = j;
+ return 0;
+}
+
+static int fsl_asrc_m2m_get_codec_caps(struct snd_compr_stream *stream,
+ struct snd_compr_codec_caps *codec)
+{
+ struct fsl_asrc *asrc = stream->private_data;
+
+ return fsl_asrc_m2m_fill_codec_caps(asrc, codec);
+}
+
+static struct snd_compr_ops fsl_asrc_m2m_compr_ops = {
+ .open = fsl_asrc_m2m_comp_open,
+ .free = fsl_asrc_m2m_comp_release,
+ .set_params = fsl_asrc_m2m_comp_set_params,
+ .get_caps = fsl_asrc_m2m_get_caps,
+ .get_codec_caps = fsl_asrc_m2m_get_codec_caps,
+ .task_create = fsl_asrc_m2m_comp_task_create,
+ .task_start = fsl_asrc_m2m_comp_task_start,
+ .task_stop = fsl_asrc_m2m_comp_task_stop,
+ .task_free = fsl_asrc_m2m_comp_task_free,
+};
+
+int fsl_asrc_m2m_suspend(struct fsl_asrc *asrc)
+{
+ struct fsl_asrc_pair *pair;
+ int i;
+
+ for (i = 0; i < PAIR_CTX_NUM; i++) {
+ pair = asrc->pair[i];
+ if (!pair || !pair->dma_buffer[IN].area || !pair->dma_buffer[OUT].area)
+ continue;
+ if (!completion_done(&pair->complete[IN])) {
+ if (pair->dma_chan[IN])
+ dmaengine_terminate_all(pair->dma_chan[IN]);
+ asrc_input_dma_callback((void *)pair);
+ }
+ if (!completion_done(&pair->complete[OUT])) {
+ if (pair->dma_chan[OUT])
+ dmaengine_terminate_all(pair->dma_chan[OUT]);
+ asrc_output_dma_callback((void *)pair);
+ }
+
+ if (asrc->m2m_pair_suspend)
+ asrc->m2m_pair_suspend(pair);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsl_asrc_m2m_suspend);
+
+int fsl_asrc_m2m_resume(struct fsl_asrc *asrc)
+{
+ struct fsl_asrc_pair *pair;
+ int i;
+
+ for (i = 0; i < PAIR_CTX_NUM; i++) {
+ pair = asrc->pair[i];
+ if (!pair)
+ continue;
+ if (asrc->m2m_pair_resume)
+ asrc->m2m_pair_resume(pair);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsl_asrc_m2m_resume);
+
+int fsl_asrc_m2m_init(struct fsl_asrc *asrc)
+{
+ struct device *dev = &asrc->pdev->dev;
+ struct snd_card *card;
+ struct snd_compr *compr;
+ int ret;
+
+ ret = snd_card_new(dev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+ THIS_MODULE, 0, &card);
+ if (ret < 0)
+ return ret;
+
+ strscpy(card->driver, "fsl-asrc-m2m", sizeof(card->driver));
+ strscpy(card->shortname, "ASRC-M2M", sizeof(card->shortname));
+ strscpy(card->longname, "ASRC-M2M", sizeof(card->shortname));
+
+ asrc->card = card;
+
+ compr = devm_kzalloc(dev, sizeof(*compr), GFP_KERNEL);
+ if (!compr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ compr->ops = &fsl_asrc_m2m_compr_ops;
+ compr->private_data = asrc;
+
+ ret = snd_compress_new(card, 0, SND_COMPRESS_ACCEL, "ASRC M2M", compr);
+ if (ret < 0)
+ goto err;
+
+ ret = snd_card_register(card);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+err:
+ snd_card_free(card);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(fsl_asrc_m2m_init);
+
+void fsl_asrc_m2m_exit(struct fsl_asrc *asrc)
+{
+ struct snd_card *card = asrc->card;
+
+ snd_card_free(card);
+}
+EXPORT_SYMBOL_GPL(fsl_asrc_m2m_exit);
+
+MODULE_IMPORT_NS("DMA_BUF");
+MODULE_AUTHOR("Shengjiu Wang <Shengjiu.Wang@nxp.com>");
+MODULE_DESCRIPTION("Freescale ASRC M2M driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/fsl/fsl_easrc.c b/sound/soc/fsl/fsl_easrc.c
index d22f0621c465..f404a39009e1 100644
--- a/sound/soc/fsl/fsl_easrc.c
+++ b/sound/soc/fsl/fsl_easrc.c
@@ -1861,6 +1861,224 @@ static int fsl_easrc_get_fifo_addr(u8 dir, enum asrc_pair_index index)
return REG_EASRC_FIFO(dir, index);
}
+/* Get sample numbers in FIFO */
+static unsigned int fsl_easrc_get_output_fifo_size(struct fsl_asrc_pair *pair)
+{
+ struct fsl_asrc *asrc = pair->asrc;
+ enum asrc_pair_index index = pair->index;
+ u32 val;
+
+ regmap_read(asrc->regmap, REG_EASRC_SFS(index), &val);
+ val &= EASRC_SFS_NSGO_MASK;
+
+ return val >> EASRC_SFS_NSGO_SHIFT;
+}
+
+static int fsl_easrc_m2m_prepare(struct fsl_asrc_pair *pair)
+{
+ struct fsl_easrc_ctx_priv *ctx_priv = pair->private;
+ struct fsl_asrc *asrc = pair->asrc;
+ struct device *dev = &asrc->pdev->dev;
+ int ret;
+
+ ctx_priv->in_params.sample_rate = pair->rate[IN];
+ ctx_priv->in_params.sample_format = pair->sample_format[IN];
+ ctx_priv->out_params.sample_rate = pair->rate[OUT];
+ ctx_priv->out_params.sample_format = pair->sample_format[OUT];
+
+ ctx_priv->in_params.fifo_wtmk = FSL_EASRC_INPUTFIFO_WML;
+ ctx_priv->out_params.fifo_wtmk = FSL_EASRC_OUTPUTFIFO_WML;
+ /* Fill the right half of the re-sampler with zeros */
+ ctx_priv->rs_init_mode = 0x2;
+ /* Zero fill the right half of the prefilter */
+ ctx_priv->pf_init_mode = 0x2;
+
+ ret = fsl_easrc_set_ctx_format(pair,
+ &ctx_priv->in_params.sample_format,
+ &ctx_priv->out_params.sample_format);
+ if (ret) {
+ dev_err(dev, "failed to set context format: %d\n", ret);
+ return ret;
+ }
+
+ ret = fsl_easrc_config_context(asrc, pair->index);
+ if (ret) {
+ dev_err(dev, "failed to config context %d\n", ret);
+ return ret;
+ }
+
+ ctx_priv->in_params.iterations = 1;
+ ctx_priv->in_params.group_len = pair->channels;
+ ctx_priv->in_params.access_len = pair->channels;
+ ctx_priv->out_params.iterations = 1;
+ ctx_priv->out_params.group_len = pair->channels;
+ ctx_priv->out_params.access_len = pair->channels;
+
+ ret = fsl_easrc_set_ctx_organziation(pair);
+ if (ret) {
+ dev_err(dev, "failed to set fifo organization\n");
+ return ret;
+ }
+
+ /* The context start flag */
+ pair->first_convert = 1;
+ return 0;
+}
+
+static int fsl_easrc_m2m_start(struct fsl_asrc_pair *pair)
+{
+ /* start context once */
+ if (pair->first_convert) {
+ fsl_easrc_start_context(pair);
+ pair->first_convert = 0;
+ }
+
+ return 0;
+}
+
+static int fsl_easrc_m2m_stop(struct fsl_asrc_pair *pair)
+{
+ /* Stop pair/context */
+ if (!pair->first_convert) {
+ fsl_easrc_stop_context(pair);
+ pair->first_convert = 1;
+ }
+
+ return 0;
+}
+
+/* calculate capture data length according to output data length and sample rate */
+static int fsl_easrc_m2m_calc_out_len(struct fsl_asrc_pair *pair, int input_buffer_length)
+{
+ struct fsl_asrc *easrc = pair->asrc;
+ struct fsl_easrc_priv *easrc_priv = easrc->private;
+ struct fsl_easrc_ctx_priv *ctx_priv = pair->private;
+ unsigned int in_rate = ctx_priv->in_params.norm_rate;
+ unsigned int out_rate = ctx_priv->out_params.norm_rate;
+ unsigned int channels = pair->channels;
+ unsigned int in_samples, out_samples;
+ unsigned int in_width, out_width;
+ unsigned int out_length;
+ unsigned int frac_bits;
+ u64 val1, val2;
+
+ switch (easrc_priv->rs_num_taps) {
+ case EASRC_RS_32_TAPS:
+ /* integer bits = 5; */
+ frac_bits = 39;
+ break;
+ case EASRC_RS_64_TAPS:
+ /* integer bits = 6; */
+ frac_bits = 38;
+ break;
+ case EASRC_RS_128_TAPS:
+ /* integer bits = 7; */
+ frac_bits = 37;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val1 = (u64)in_rate << frac_bits;
+ do_div(val1, out_rate);
+ val1 += (s64)ctx_priv->ratio_mod << (frac_bits - 31);
+
+ in_width = snd_pcm_format_physical_width(ctx_priv->in_params.sample_format) / 8;
+ out_width = snd_pcm_format_physical_width(ctx_priv->out_params.sample_format) / 8;
+
+ ctx_priv->in_filled_len += input_buffer_length;
+ if (ctx_priv->in_filled_len <= ctx_priv->in_filled_sample * in_width * channels) {
+ out_length = 0;
+ } else {
+ in_samples = ctx_priv->in_filled_len / (in_width * channels) -
+ ctx_priv->in_filled_sample;
+
+ /* right shift 12 bit to make ratio in 32bit space */
+ val2 = (u64)in_samples << (frac_bits - 12);
+ val1 = val1 >> 12;
+ do_div(val2, val1);
+ out_samples = val2;
+
+ out_length = out_samples * out_width * channels;
+ ctx_priv->in_filled_len = ctx_priv->in_filled_sample * in_width * channels;
+ }
+
+ return out_length;
+}
+
+static int fsl_easrc_m2m_get_maxburst(u8 dir, struct fsl_asrc_pair *pair)
+{
+ struct fsl_easrc_ctx_priv *ctx_priv = pair->private;
+
+ if (dir == IN)
+ return ctx_priv->in_params.fifo_wtmk * pair->channels;
+ else
+ return ctx_priv->out_params.fifo_wtmk * pair->channels;
+}
+
+static int fsl_easrc_m2m_pair_suspend(struct fsl_asrc_pair *pair)
+{
+ fsl_easrc_stop_context(pair);
+
+ return 0;
+}
+
+static int fsl_easrc_m2m_pair_resume(struct fsl_asrc_pair *pair)
+{
+ struct fsl_easrc_ctx_priv *ctx_priv = pair->private;
+
+ pair->first_convert = 1;
+ ctx_priv->in_filled_len = 0;
+
+ return 0;
+}
+
+/* val is Q31 */
+static int fsl_easrc_m2m_set_ratio_mod(struct fsl_asrc_pair *pair, int val)
+{
+ struct fsl_easrc_ctx_priv *ctx_priv = pair->private;
+ struct fsl_asrc *easrc = pair->asrc;
+ struct fsl_easrc_priv *easrc_priv = easrc->private;
+ unsigned int frac_bits;
+
+ ctx_priv->ratio_mod += val;
+
+ switch (easrc_priv->rs_num_taps) {
+ case EASRC_RS_32_TAPS:
+ /* integer bits = 5; */
+ frac_bits = 39;
+ break;
+ case EASRC_RS_64_TAPS:
+ /* integer bits = 6; */
+ frac_bits = 38;
+ break;
+ case EASRC_RS_128_TAPS:
+ /* integer bits = 7; */
+ frac_bits = 37;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val <<= (frac_bits - 31);
+ regmap_write(easrc->regmap, REG_EASRC_RUC(pair->index), EASRC_RSUC_RS_RM(val));
+
+ return 0;
+}
+
+static int fsl_easrc_m2m_get_cap(struct fsl_asrc_m2m_cap *cap)
+{
+ cap->fmt_in = FSL_EASRC_FORMATS;
+ cap->fmt_out = FSL_EASRC_FORMATS | SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE;
+ cap->rate_in = easrc_rates;
+ cap->rate_in_count = ARRAY_SIZE(easrc_rates);
+ cap->rate_out = easrc_rates;
+ cap->rate_out_count = ARRAY_SIZE(easrc_rates);
+ cap->chan_min = 1;
+ cap->chan_max = 32;
+ return 0;
+}
+
static const struct of_device_id fsl_easrc_dt_ids[] = {
{ .compatible = "fsl,imx8mn-easrc",},
{}
@@ -1926,6 +2144,16 @@ static int fsl_easrc_probe(struct platform_device *pdev)
easrc->release_pair = fsl_easrc_release_context;
easrc->get_fifo_addr = fsl_easrc_get_fifo_addr;
easrc->pair_priv_size = sizeof(struct fsl_easrc_ctx_priv);
+ easrc->m2m_prepare = fsl_easrc_m2m_prepare;
+ easrc->m2m_start = fsl_easrc_m2m_start;
+ easrc->m2m_stop = fsl_easrc_m2m_stop;
+ easrc->get_output_fifo_size = fsl_easrc_get_output_fifo_size;
+ easrc->m2m_calc_out_len = fsl_easrc_m2m_calc_out_len;
+ easrc->m2m_get_maxburst = fsl_easrc_m2m_get_maxburst;
+ easrc->m2m_pair_suspend = fsl_easrc_m2m_pair_suspend;
+ easrc->m2m_pair_resume = fsl_easrc_m2m_pair_resume;
+ easrc->m2m_set_ratio_mod = fsl_easrc_m2m_set_ratio_mod;
+ easrc->m2m_get_cap = fsl_easrc_m2m_get_cap;
easrc_priv->rs_num_taps = EASRC_RS_32_TAPS;
easrc_priv->const_coeff = 0x3FF0000000000000;
@@ -1976,6 +2204,12 @@ static int fsl_easrc_probe(struct platform_device *pdev)
goto err_pm_disable;
}
+ ret = fsl_asrc_m2m_init(easrc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init m2m device %d\n", ret);
+ return ret;
+ }
+
return 0;
err_pm_disable:
@@ -1985,6 +2219,10 @@ err_pm_disable:
static void fsl_easrc_remove(struct platform_device *pdev)
{
+ struct fsl_asrc *easrc = dev_get_drvdata(&pdev->dev);
+
+ fsl_asrc_m2m_exit(easrc);
+
pm_runtime_disable(&pdev->dev);
}
@@ -2085,10 +2323,29 @@ disable_mem_clk:
return ret;
}
+static int fsl_easrc_suspend(struct device *dev)
+{
+ struct fsl_asrc *easrc = dev_get_drvdata(dev);
+ int ret;
+
+ fsl_asrc_m2m_suspend(easrc);
+ ret = pm_runtime_force_suspend(dev);
+ return ret;
+}
+
+static int fsl_easrc_resume(struct device *dev)
+{
+ struct fsl_asrc *easrc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ fsl_asrc_m2m_resume(easrc);
+ return ret;
+}
+
static const struct dev_pm_ops fsl_easrc_pm_ops = {
RUNTIME_PM_OPS(fsl_easrc_runtime_suspend, fsl_easrc_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ SYSTEM_SLEEP_PM_OPS(fsl_easrc_suspend, fsl_easrc_resume)
};
static struct platform_driver fsl_easrc_driver = {
diff --git a/sound/soc/fsl/fsl_easrc.h b/sound/soc/fsl/fsl_easrc.h
index 7c70dac52713..c9f770862662 100644
--- a/sound/soc/fsl/fsl_easrc.h
+++ b/sound/soc/fsl/fsl_easrc.h
@@ -601,6 +601,8 @@ struct fsl_easrc_slot {
* @out_missed_sample: sample missed in output
* @st1_addexp: exponent added for stage1
* @st2_addexp: exponent added for stage2
+ * @ratio_mod: update ratio
+ * @in_filled_len: input filled length
*/
struct fsl_easrc_ctx_priv {
struct fsl_easrc_io_params in_params;
@@ -618,6 +620,8 @@ struct fsl_easrc_ctx_priv {
int out_missed_sample;
int st1_addexp;
int st2_addexp;
+ int ratio_mod;
+ unsigned int in_filled_len;
};
/**
diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c
index 8c15389c9a04..1075598a6647 100644
--- a/sound/soc/fsl/fsl_micfil.c
+++ b/sound/soc/fsl/fsl_micfil.c
@@ -35,6 +35,15 @@
#define MICFIL_AUDIO_PLL2 1
#define MICFIL_CLK_EXT3 2
+static const unsigned int fsl_micfil_rates[] = {
+ 8000, 11025, 16000, 22050, 32000, 44100, 48000,
+};
+
+static const struct snd_pcm_hw_constraint_list fsl_micfil_rate_constraints = {
+ .count = ARRAY_SIZE(fsl_micfil_rates),
+ .list = fsl_micfil_rates,
+};
+
enum quality {
QUALITY_HIGH,
QUALITY_MEDIUM,
@@ -80,6 +89,7 @@ struct fsl_micfil_soc_data {
bool use_verid;
bool volume_sx;
u64 formats;
+ int fifo_offset;
};
static struct fsl_micfil_soc_data fsl_micfil_imx8mm = {
@@ -89,6 +99,7 @@ static struct fsl_micfil_soc_data fsl_micfil_imx8mm = {
.dataline = 0xf,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.volume_sx = true,
+ .fifo_offset = 0,
};
static struct fsl_micfil_soc_data fsl_micfil_imx8mp = {
@@ -98,6 +109,7 @@ static struct fsl_micfil_soc_data fsl_micfil_imx8mp = {
.dataline = 0xf,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.volume_sx = false,
+ .fifo_offset = 0,
};
static struct fsl_micfil_soc_data fsl_micfil_imx93 = {
@@ -109,12 +121,26 @@ static struct fsl_micfil_soc_data fsl_micfil_imx93 = {
.use_edma = true,
.use_verid = true,
.volume_sx = false,
+ .fifo_offset = 0,
+};
+
+static struct fsl_micfil_soc_data fsl_micfil_imx943 = {
+ .imx = true,
+ .fifos = 8,
+ .fifo_depth = 32,
+ .dataline = 0xf,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .use_edma = true,
+ .use_verid = true,
+ .volume_sx = false,
+ .fifo_offset = -4,
};
static const struct of_device_id fsl_micfil_dt_ids[] = {
{ .compatible = "fsl,imx8mm-micfil", .data = &fsl_micfil_imx8mm },
{ .compatible = "fsl,imx8mp-micfil", .data = &fsl_micfil_imx8mp },
{ .compatible = "fsl,imx93-micfil", .data = &fsl_micfil_imx93 },
+ { .compatible = "fsl,imx943-micfil", .data = &fsl_micfil_imx943 },
{}
};
MODULE_DEVICE_TABLE(of, fsl_micfil_dt_ids);
@@ -486,29 +512,12 @@ static int fsl_micfil_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct fsl_micfil *micfil = snd_soc_dai_get_drvdata(dai);
- unsigned int rates[MICFIL_NUM_RATES] = {8000, 11025, 16000, 22050, 32000, 44100, 48000};
- int i, j, k = 0;
- u64 clk_rate;
if (!micfil) {
dev_err(dai->dev, "micfil dai priv_data not set\n");
return -EINVAL;
}
- micfil->constraint_rates.list = micfil->constraint_rates_list;
- micfil->constraint_rates.count = 0;
-
- for (j = 0; j < MICFIL_NUM_RATES; j++) {
- for (i = 0; i < MICFIL_CLK_SRC_NUM; i++) {
- clk_rate = clk_get_rate(micfil->clk_src[i]);
- if (clk_rate != 0 && do_div(clk_rate, rates[j]) == 0) {
- micfil->constraint_rates_list[k++] = rates[j];
- micfil->constraint_rates.count++;
- break;
- }
- }
- }
-
if (micfil->constraint_rates.count > 0)
snd_pcm_hw_constraint_list(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
@@ -801,7 +810,7 @@ static int fsl_micfil_hw_params(struct snd_pcm_substream *substream,
ret = regmap_update_bits(micfil->regmap, REG_MICFIL_CTRL2,
MICFIL_CTRL2_CLKDIV | MICFIL_CTRL2_CICOSR,
FIELD_PREP(MICFIL_CTRL2_CLKDIV, clk_div) |
- FIELD_PREP(MICFIL_CTRL2_CICOSR, 16 - osr));
+ FIELD_PREP(MICFIL_CTRL2_CICOSR, 32 - osr));
/* Configure CIC OSR in VADCICOSR */
regmap_update_bits(micfil->regmap, REG_MICFIL_VAD0_CTRL1,
@@ -940,9 +949,39 @@ static const struct reg_default fsl_micfil_reg_defaults[] = {
{REG_MICFIL_VAD0_ZCD, 0x00000004},
};
+static const struct reg_default fsl_micfil_reg_defaults_v2[] = {
+ {REG_MICFIL_CTRL1, 0x00000000},
+ {REG_MICFIL_CTRL2, 0x00000000},
+ {REG_MICFIL_STAT, 0x00000000},
+ {REG_MICFIL_FIFO_CTRL, 0x0000001F},
+ {REG_MICFIL_FIFO_STAT, 0x00000000},
+ {REG_MICFIL_DATACH0 - 0x4, 0x00000000},
+ {REG_MICFIL_DATACH1 - 0x4, 0x00000000},
+ {REG_MICFIL_DATACH2 - 0x4, 0x00000000},
+ {REG_MICFIL_DATACH3 - 0x4, 0x00000000},
+ {REG_MICFIL_DATACH4 - 0x4, 0x00000000},
+ {REG_MICFIL_DATACH5 - 0x4, 0x00000000},
+ {REG_MICFIL_DATACH6 - 0x4, 0x00000000},
+ {REG_MICFIL_DATACH7 - 0x4, 0x00000000},
+ {REG_MICFIL_DC_CTRL, 0x00000000},
+ {REG_MICFIL_OUT_CTRL, 0x00000000},
+ {REG_MICFIL_OUT_STAT, 0x00000000},
+ {REG_MICFIL_VAD0_CTRL1, 0x00000000},
+ {REG_MICFIL_VAD0_CTRL2, 0x000A0000},
+ {REG_MICFIL_VAD0_STAT, 0x00000000},
+ {REG_MICFIL_VAD0_SCONFIG, 0x00000000},
+ {REG_MICFIL_VAD0_NCONFIG, 0x80000000},
+ {REG_MICFIL_VAD0_NDATA, 0x00000000},
+ {REG_MICFIL_VAD0_ZCD, 0x00000004},
+};
+
static bool fsl_micfil_readable_reg(struct device *dev, unsigned int reg)
{
struct fsl_micfil *micfil = dev_get_drvdata(dev);
+ int ofs = micfil->soc->fifo_offset;
+
+ if (reg >= (REG_MICFIL_DATACH0 + ofs) && reg <= (REG_MICFIL_DATACH7 + ofs))
+ return true;
switch (reg) {
case REG_MICFIL_CTRL1:
@@ -950,14 +989,6 @@ static bool fsl_micfil_readable_reg(struct device *dev, unsigned int reg)
case REG_MICFIL_STAT:
case REG_MICFIL_FIFO_CTRL:
case REG_MICFIL_FIFO_STAT:
- case REG_MICFIL_DATACH0:
- case REG_MICFIL_DATACH1:
- case REG_MICFIL_DATACH2:
- case REG_MICFIL_DATACH3:
- case REG_MICFIL_DATACH4:
- case REG_MICFIL_DATACH5:
- case REG_MICFIL_DATACH6:
- case REG_MICFIL_DATACH7:
case REG_MICFIL_DC_CTRL:
case REG_MICFIL_OUT_CTRL:
case REG_MICFIL_OUT_STAT:
@@ -1011,17 +1042,15 @@ static bool fsl_micfil_writeable_reg(struct device *dev, unsigned int reg)
static bool fsl_micfil_volatile_reg(struct device *dev, unsigned int reg)
{
+ struct fsl_micfil *micfil = dev_get_drvdata(dev);
+ int ofs = micfil->soc->fifo_offset;
+
+ if (reg >= (REG_MICFIL_DATACH0 + ofs) && reg <= (REG_MICFIL_DATACH7 + ofs))
+ return true;
+
switch (reg) {
case REG_MICFIL_STAT:
case REG_MICFIL_FIFO_STAT:
- case REG_MICFIL_DATACH0:
- case REG_MICFIL_DATACH1:
- case REG_MICFIL_DATACH2:
- case REG_MICFIL_DATACH3:
- case REG_MICFIL_DATACH4:
- case REG_MICFIL_DATACH5:
- case REG_MICFIL_DATACH6:
- case REG_MICFIL_DATACH7:
case REG_MICFIL_OUT_STAT:
case REG_MICFIL_VERID:
case REG_MICFIL_PARAM:
@@ -1047,6 +1076,20 @@ static const struct regmap_config fsl_micfil_regmap_config = {
.cache_type = REGCACHE_MAPLE,
};
+static const struct regmap_config fsl_micfil_regmap_config_v2 = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+
+ .max_register = REG_MICFIL_VAD0_ZCD,
+ .reg_defaults = fsl_micfil_reg_defaults_v2,
+ .num_reg_defaults = ARRAY_SIZE(fsl_micfil_reg_defaults_v2),
+ .readable_reg = fsl_micfil_readable_reg,
+ .volatile_reg = fsl_micfil_volatile_reg,
+ .writeable_reg = fsl_micfil_writeable_reg,
+ .cache_type = REGCACHE_MAPLE,
+};
+
/* END OF REGMAP */
static irqreturn_t micfil_isr(int irq, void *devid)
@@ -1239,14 +1282,26 @@ static int fsl_micfil_probe(struct platform_device *pdev)
if (IS_ERR(micfil->clk_src[MICFIL_CLK_EXT3]))
micfil->clk_src[MICFIL_CLK_EXT3] = NULL;
+ fsl_asoc_constrain_rates(&micfil->constraint_rates,
+ &fsl_micfil_rate_constraints,
+ micfil->clk_src[MICFIL_AUDIO_PLL1],
+ micfil->clk_src[MICFIL_AUDIO_PLL2],
+ micfil->clk_src[MICFIL_CLK_EXT3],
+ micfil->constraint_rates_list);
+
/* init regmap */
regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(regs))
return PTR_ERR(regs);
- micfil->regmap = devm_regmap_init_mmio(&pdev->dev,
- regs,
- &fsl_micfil_regmap_config);
+ if (of_device_is_compatible(np, "fsl,imx943-micfil"))
+ micfil->regmap = devm_regmap_init_mmio(&pdev->dev,
+ regs,
+ &fsl_micfil_regmap_config_v2);
+ else
+ micfil->regmap = devm_regmap_init_mmio(&pdev->dev,
+ regs,
+ &fsl_micfil_regmap_config);
if (IS_ERR(micfil->regmap)) {
dev_err(&pdev->dev, "failed to init MICFIL regmap: %ld\n",
PTR_ERR(micfil->regmap));
@@ -1315,7 +1370,7 @@ static int fsl_micfil_probe(struct platform_device *pdev)
}
micfil->dma_params_rx.chan_name = "rx";
- micfil->dma_params_rx.addr = res->start + REG_MICFIL_DATACH0;
+ micfil->dma_params_rx.addr = res->start + REG_MICFIL_DATACH0 + micfil->soc->fifo_offset;
micfil->dma_params_rx.maxburst = MICFIL_DMA_MAXBURST_RX;
platform_set_drvdata(pdev, micfil);
diff --git a/sound/soc/fsl/fsl_micfil.h b/sound/soc/fsl/fsl_micfil.h
index b7798a7cbf2a..aa3661ea4ffc 100644
--- a/sound/soc/fsl/fsl_micfil.h
+++ b/sound/soc/fsl/fsl_micfil.h
@@ -62,7 +62,7 @@
#define MICFIL_QSEL_VLOW1_QUALITY 5
#define MICFIL_QSEL_VLOW2_QUALITY 4
-#define MICFIL_CTRL2_CICOSR GENMASK(19, 16)
+#define MICFIL_CTRL2_CICOSR GENMASK(20, 16)
#define MICFIL_CTRL2_CLKDIV GENMASK(7, 0)
/* MICFIL Status Register -- REG_MICFIL_STAT 0x08 */
diff --git a/sound/soc/fsl/fsl_mqs.c b/sound/soc/fsl/fsl_mqs.c
index 0513e9e8402e..e34e5ea98de5 100644
--- a/sound/soc/fsl/fsl_mqs.c
+++ b/sound/soc/fsl/fsl_mqs.c
@@ -410,12 +410,40 @@ static const struct fsl_mqs_soc_data fsl_mqs_imx95_netc_data = {
.div_shift = 9,
};
+static const struct fsl_mqs_soc_data fsl_mqs_imx943_aon_data = {
+ .type = TYPE_REG_SM,
+ .ctrl_off = 0x88,
+ .en_mask = BIT(1),
+ .en_shift = 1,
+ .rst_mask = BIT(2),
+ .rst_shift = 2,
+ .osr_mask = BIT(3),
+ .osr_shift = 3,
+ .div_mask = GENMASK(15, 8),
+ .div_shift = 8,
+};
+
+static const struct fsl_mqs_soc_data fsl_mqs_imx943_wakeup_data = {
+ .type = TYPE_REG_GPR,
+ .ctrl_off = 0x10,
+ .en_mask = BIT(1),
+ .en_shift = 1,
+ .rst_mask = BIT(2),
+ .rst_shift = 2,
+ .osr_mask = BIT(3),
+ .osr_shift = 3,
+ .div_mask = GENMASK(15, 8),
+ .div_shift = 8,
+};
+
static const struct of_device_id fsl_mqs_dt_ids[] = {
{ .compatible = "fsl,imx8qm-mqs", .data = &fsl_mqs_imx8qm_data },
{ .compatible = "fsl,imx6sx-mqs", .data = &fsl_mqs_imx6sx_data },
{ .compatible = "fsl,imx93-mqs", .data = &fsl_mqs_imx93_data },
{ .compatible = "fsl,imx95-aonmix-mqs", .data = &fsl_mqs_imx95_aon_data },
{ .compatible = "fsl,imx95-netcmix-mqs", .data = &fsl_mqs_imx95_netc_data },
+ { .compatible = "fsl,imx943-aonmix-mqs", .data = &fsl_mqs_imx943_aon_data },
+ { .compatible = "fsl,imx943-wakeupmix-mqs", .data = &fsl_mqs_imx943_wakeup_data },
{}
};
MODULE_DEVICE_TABLE(of, fsl_mqs_dt_ids);
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index 634168d2bb6e..c4eb87c5d39e 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -885,7 +885,7 @@ static int fsl_sai_startup(struct snd_pcm_substream *substream,
sai->dma_params_rx.maxburst);
ret = snd_pcm_hw_constraint_list(substream->runtime, 0,
- SNDRV_PCM_HW_PARAM_RATE, &fsl_sai_rate_constraints);
+ SNDRV_PCM_HW_PARAM_RATE, &sai->constraint_rates);
return ret;
}
@@ -1442,6 +1442,11 @@ static int fsl_sai_probe(struct platform_device *pdev)
fsl_asoc_get_pll_clocks(&pdev->dev, &sai->pll8k_clk,
&sai->pll11k_clk);
+ fsl_asoc_constrain_rates(&sai->constraint_rates,
+ &fsl_sai_rate_constraints,
+ sai->pll8k_clk, sai->pll11k_clk, NULL,
+ sai->constraint_rates_list);
+
/* Use Multi FIFO mode depending on the support from SDMA script */
ret = of_property_read_u32_array(np, "dmas", dmas, 4);
if (!sai->soc_data->use_edma && !ret && dmas[2] == IMX_DMATYPE_MULTI_SAI)
diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
index 9c4d19fe22c6..0e25e2fc7ce0 100644
--- a/sound/soc/fsl/fsl_sai.h
+++ b/sound/soc/fsl/fsl_sai.h
@@ -9,6 +9,7 @@
#include <linux/dma/imx-dma.h>
#include <sound/dmaengine_pcm.h>
+#define FAL_SAI_NUM_RATES 20
#define FSL_SAI_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S24_LE |\
@@ -309,6 +310,8 @@ struct fsl_sai {
struct pinctrl *pinctrl;
struct pinctrl_state *pins_state;
struct sdma_peripheral_config audio_config[2];
+ struct snd_pcm_hw_constraint_list constraint_rates;
+ unsigned int constraint_rates_list[FAL_SAI_NUM_RATES];
};
#define TX 1
diff --git a/sound/soc/fsl/fsl_utils.c b/sound/soc/fsl/fsl_utils.c
index a5ab27c2f711..d69a6b9795bf 100644
--- a/sound/soc/fsl/fsl_utils.c
+++ b/sound/soc/fsl/fsl_utils.c
@@ -152,6 +152,51 @@ void fsl_asoc_reparent_pll_clocks(struct device *dev, struct clk *clk,
}
EXPORT_SYMBOL(fsl_asoc_reparent_pll_clocks);
+/**
+ * fsl_asoc_constrain_rates - constrain rates according to clocks
+ *
+ * @target_constr: target constraint
+ * @original_constr: original constraint
+ * @pll8k_clk: PLL clock pointer for 8kHz
+ * @pll11k_clk: PLL clock pointer for 11kHz
+ * @ext_clk: External clock pointer
+ * @target_rates: target rates array
+ *
+ * This function constrain rates according to clocks
+ */
+void fsl_asoc_constrain_rates(struct snd_pcm_hw_constraint_list *target_constr,
+ const struct snd_pcm_hw_constraint_list *original_constr,
+ struct clk *pll8k_clk, struct clk *pll11k_clk,
+ struct clk *ext_clk, int *target_rates)
+{
+ int i, j, k = 0;
+ u64 clk_rate[3];
+
+ *target_constr = *original_constr;
+ if (pll8k_clk || pll11k_clk || ext_clk) {
+ target_constr->list = target_rates;
+ target_constr->count = 0;
+ for (i = 0; i < original_constr->count; i++) {
+ clk_rate[0] = clk_get_rate(pll8k_clk);
+ clk_rate[1] = clk_get_rate(pll11k_clk);
+ clk_rate[2] = clk_get_rate(ext_clk);
+ for (j = 0; j < 3; j++) {
+ if (clk_rate[j] != 0 &&
+ do_div(clk_rate[j], original_constr->list[i]) == 0) {
+ target_rates[k++] = original_constr->list[i];
+ target_constr->count++;
+ break;
+ }
+ }
+ }
+
+ /* protection for if there is no proper rate found*/
+ if (!target_constr->count)
+ *target_constr = *original_constr;
+ }
+}
+EXPORT_SYMBOL(fsl_asoc_constrain_rates);
+
MODULE_AUTHOR("Timur Tabi <timur@freescale.com>");
MODULE_DESCRIPTION("Freescale ASoC utility code");
MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/fsl/fsl_utils.h b/sound/soc/fsl/fsl_utils.h
index 4d5f3d93bc81..21b25a11ecda 100644
--- a/sound/soc/fsl/fsl_utils.h
+++ b/sound/soc/fsl/fsl_utils.h
@@ -26,4 +26,9 @@ void fsl_asoc_get_pll_clocks(struct device *dev, struct clk **pll8k_clk,
void fsl_asoc_reparent_pll_clocks(struct device *dev, struct clk *clk,
struct clk *pll8k_clk,
struct clk *pll11k_clk, u64 ratio);
+
+void fsl_asoc_constrain_rates(struct snd_pcm_hw_constraint_list *target_constr,
+ const struct snd_pcm_hw_constraint_list *original_constr,
+ struct clk *pll8k_clk, struct clk *pll11k_clk,
+ struct clk *ext_clk, int *target_rates);
#endif /* _FSL_UTILS_H */
diff --git a/sound/soc/fsl/fsl_xcvr.c b/sound/soc/fsl/fsl_xcvr.c
index 9c184ab73468..c59c1af5a98a 100644
--- a/sound/soc/fsl/fsl_xcvr.c
+++ b/sound/soc/fsl/fsl_xcvr.c
@@ -19,6 +19,7 @@
#include "imx-pcm.h"
#define FSL_XCVR_CAPDS_SIZE 256
+#define SPDIF_NUM_RATES 7
enum fsl_xcvr_pll_verison {
PLL_MX8MP,
@@ -37,6 +38,8 @@ struct fsl_xcvr {
const struct fsl_xcvr_soc_data *soc_data;
struct platform_device *pdev;
struct regmap *regmap;
+ struct regmap *regmap_phy;
+ struct regmap *regmap_pll;
struct clk *ipg_clk;
struct clk *pll_ipg_clk;
struct clk *phy_clk;
@@ -55,6 +58,8 @@ struct fsl_xcvr {
u8 cap_ds[FSL_XCVR_CAPDS_SIZE];
struct work_struct work_rst;
spinlock_t lock; /* Protect hw_reset and trigger */
+ struct snd_pcm_hw_constraint_list spdif_constr_rates;
+ u32 spdif_constr_rates_list[SPDIF_NUM_RATES];
};
static const struct fsl_xcvr_pll_conf {
@@ -257,7 +262,7 @@ static int fsl_xcvr_ai_write(struct fsl_xcvr *xcvr, u8 reg, u32 data, bool phy)
idx = BIT(phy ? 26 : 24);
tidx = BIT(phy ? 27 : 25);
- regmap_write(xcvr->regmap, FSL_XCVR_PHY_AI_CTRL_CLR, 0xFF);
+ regmap_write(xcvr->regmap, FSL_XCVR_PHY_AI_CTRL_CLR, 0xFF | FSL_XCVR_PHY_AI_CTRL_AI_RWB);
regmap_write(xcvr->regmap, FSL_XCVR_PHY_AI_CTRL_SET, reg);
regmap_write(xcvr->regmap, FSL_XCVR_PHY_AI_WDATA, data);
regmap_write(xcvr->regmap, FSL_XCVR_PHY_AI_CTRL_TOG, idx);
@@ -271,6 +276,59 @@ static int fsl_xcvr_ai_write(struct fsl_xcvr *xcvr, u8 reg, u32 data, bool phy)
return ret;
}
+static int fsl_xcvr_ai_read(struct fsl_xcvr *xcvr, u8 reg, u32 *data, bool phy)
+{
+ struct device *dev = &xcvr->pdev->dev;
+ u32 val, idx, tidx;
+ int ret;
+
+ idx = BIT(phy ? 26 : 24);
+ tidx = BIT(phy ? 27 : 25);
+
+ regmap_write(xcvr->regmap, FSL_XCVR_PHY_AI_CTRL_CLR, 0xFF | FSL_XCVR_PHY_AI_CTRL_AI_RWB);
+ regmap_write(xcvr->regmap, FSL_XCVR_PHY_AI_CTRL_SET, reg | FSL_XCVR_PHY_AI_CTRL_AI_RWB);
+ regmap_write(xcvr->regmap, FSL_XCVR_PHY_AI_CTRL_TOG, idx);
+
+ ret = regmap_read_poll_timeout(xcvr->regmap, FSL_XCVR_PHY_AI_CTRL, val,
+ (val & idx) == ((val & tidx) >> 1),
+ 10, 10000);
+ if (ret)
+ dev_err(dev, "AI timeout: failed to read %s reg 0x%02x\n",
+ phy ? "PHY" : "PLL", reg);
+
+ regmap_read(xcvr->regmap, FSL_XCVR_PHY_AI_RDATA, data);
+
+ return ret;
+}
+
+static int fsl_xcvr_phy_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct fsl_xcvr *xcvr = context;
+
+ return fsl_xcvr_ai_read(xcvr, reg, val, 1);
+}
+
+static int fsl_xcvr_phy_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct fsl_xcvr *xcvr = context;
+
+ return fsl_xcvr_ai_write(xcvr, reg, val, 1);
+}
+
+static int fsl_xcvr_pll_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct fsl_xcvr *xcvr = context;
+
+ return fsl_xcvr_ai_read(xcvr, reg, val, 0);
+}
+
+static int fsl_xcvr_pll_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct fsl_xcvr *xcvr = context;
+
+ return fsl_xcvr_ai_write(xcvr, reg, val, 0);
+}
+
static int fsl_xcvr_en_phy_pll(struct fsl_xcvr *xcvr, u32 freq, bool tx)
{
struct device *dev = &xcvr->pdev->dev;
@@ -303,55 +361,55 @@ static int fsl_xcvr_en_phy_pll(struct fsl_xcvr *xcvr, u32 freq, bool tx)
switch (xcvr->soc_data->pll_ver) {
case PLL_MX8MP:
/* PLL: BANDGAP_SET: EN_VBG (enable bandgap) */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PLL_BANDGAP_SET,
- FSL_XCVR_PLL_BANDGAP_EN_VBG, 0);
+ regmap_set_bits(xcvr->regmap_pll, FSL_XCVR_PLL_BANDGAP,
+ FSL_XCVR_PLL_BANDGAP_EN_VBG);
/* PLL: CTRL0: DIV_INTEGER */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PLL_CTRL0, fsl_xcvr_pll_cfg[i].mfi, 0);
+ regmap_write(xcvr->regmap_pll, FSL_XCVR_PLL_CTRL0, fsl_xcvr_pll_cfg[i].mfi);
/* PLL: NUMERATOR: MFN */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PLL_NUM, fsl_xcvr_pll_cfg[i].mfn, 0);
+ regmap_write(xcvr->regmap_pll, FSL_XCVR_PLL_NUM, fsl_xcvr_pll_cfg[i].mfn);
/* PLL: DENOMINATOR: MFD */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PLL_DEN, fsl_xcvr_pll_cfg[i].mfd, 0);
+ regmap_write(xcvr->regmap_pll, FSL_XCVR_PLL_DEN, fsl_xcvr_pll_cfg[i].mfd);
/* PLL: CTRL0_SET: HOLD_RING_OFF, POWER_UP */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PLL_CTRL0_SET,
- FSL_XCVR_PLL_CTRL0_HROFF | FSL_XCVR_PLL_CTRL0_PWP, 0);
+ regmap_set_bits(xcvr->regmap_pll, FSL_XCVR_PLL_CTRL0,
+ FSL_XCVR_PLL_CTRL0_HROFF | FSL_XCVR_PLL_CTRL0_PWP);
udelay(25);
/* PLL: CTRL0: Clear Hold Ring Off */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PLL_CTRL0_CLR,
- FSL_XCVR_PLL_CTRL0_HROFF, 0);
+ regmap_clear_bits(xcvr->regmap_pll, FSL_XCVR_PLL_CTRL0,
+ FSL_XCVR_PLL_CTRL0_HROFF);
udelay(100);
if (tx) { /* TX is enabled for SPDIF only */
/* PLL: POSTDIV: PDIV0 */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PLL_PDIV,
- FSL_XCVR_PLL_PDIVx(log2, 0), 0);
+ regmap_write(xcvr->regmap_pll, FSL_XCVR_PLL_PDIV,
+ FSL_XCVR_PLL_PDIVx(log2, 0));
/* PLL: CTRL_SET: CLKMUX0_EN */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PLL_CTRL0_SET,
- FSL_XCVR_PLL_CTRL0_CM0_EN, 0);
+ regmap_set_bits(xcvr->regmap_pll, FSL_XCVR_PLL_CTRL0,
+ FSL_XCVR_PLL_CTRL0_CM0_EN);
} else if (xcvr->mode == FSL_XCVR_MODE_EARC) { /* eARC RX */
/* PLL: POSTDIV: PDIV1 */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PLL_PDIV,
- FSL_XCVR_PLL_PDIVx(log2, 1), 0);
+ regmap_write(xcvr->regmap_pll, FSL_XCVR_PLL_PDIV,
+ FSL_XCVR_PLL_PDIVx(log2, 1));
/* PLL: CTRL_SET: CLKMUX1_EN */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PLL_CTRL0_SET,
- FSL_XCVR_PLL_CTRL0_CM1_EN, 0);
+ regmap_set_bits(xcvr->regmap_pll, FSL_XCVR_PLL_CTRL0,
+ FSL_XCVR_PLL_CTRL0_CM1_EN);
} else { /* SPDIF / ARC RX */
/* PLL: POSTDIV: PDIV2 */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PLL_PDIV,
- FSL_XCVR_PLL_PDIVx(log2, 2), 0);
+ regmap_write(xcvr->regmap_pll, FSL_XCVR_PLL_PDIV,
+ FSL_XCVR_PLL_PDIVx(log2, 2));
/* PLL: CTRL_SET: CLKMUX2_EN */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PLL_CTRL0_SET,
- FSL_XCVR_PLL_CTRL0_CM2_EN, 0);
+ regmap_set_bits(xcvr->regmap_pll, FSL_XCVR_PLL_CTRL0,
+ FSL_XCVR_PLL_CTRL0_CM2_EN);
}
break;
case PLL_MX95:
val = fsl_xcvr_pll_cfg[i].mfi << FSL_XCVR_GP_PLL_DIV_MFI_SHIFT | div;
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_GP_PLL_DIV, val, 0);
+ regmap_write(xcvr->regmap_pll, FSL_XCVR_GP_PLL_DIV, val);
val = fsl_xcvr_pll_cfg[i].mfn << FSL_XCVR_GP_PLL_NUMERATOR_MFN_SHIFT;
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_GP_PLL_NUMERATOR, val, 0);
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_GP_PLL_DENOMINATOR,
- fsl_xcvr_pll_cfg[i].mfd, 0);
+ regmap_write(xcvr->regmap_pll, FSL_XCVR_GP_PLL_NUMERATOR, val);
+ regmap_write(xcvr->regmap_pll, FSL_XCVR_GP_PLL_DENOMINATOR,
+ fsl_xcvr_pll_cfg[i].mfd);
val = FSL_XCVR_GP_PLL_CTRL_POWERUP | FSL_XCVR_GP_PLL_CTRL_CLKMUX_EN;
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_GP_PLL_CTRL, val, 0);
+ regmap_write(xcvr->regmap_pll, FSL_XCVR_GP_PLL_CTRL, val);
break;
default:
dev_err(dev, "Error for PLL version %d\n", xcvr->soc_data->pll_ver);
@@ -360,22 +418,22 @@ static int fsl_xcvr_en_phy_pll(struct fsl_xcvr *xcvr, u32 freq, bool tx)
if (xcvr->mode == FSL_XCVR_MODE_EARC) { /* eARC mode */
/* PHY: CTRL_SET: TX_DIFF_OE, PHY_EN */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PHY_CTRL_SET,
- FSL_XCVR_PHY_CTRL_TSDIFF_OE |
- FSL_XCVR_PHY_CTRL_PHY_EN, 1);
+ regmap_set_bits(xcvr->regmap_phy, FSL_XCVR_PHY_CTRL,
+ FSL_XCVR_PHY_CTRL_TSDIFF_OE |
+ FSL_XCVR_PHY_CTRL_PHY_EN);
/* PHY: CTRL2_SET: EARC_TX_MODE */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PHY_CTRL2_SET,
- FSL_XCVR_PHY_CTRL2_EARC_TXMS, 1);
+ regmap_set_bits(xcvr->regmap_phy, FSL_XCVR_PHY_CTRL2,
+ FSL_XCVR_PHY_CTRL2_EARC_TXMS);
} else if (!tx) { /* SPDIF / ARC RX mode */
if (xcvr->mode == FSL_XCVR_MODE_SPDIF)
/* PHY: CTRL_SET: SPDIF_EN */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PHY_CTRL_SET,
- FSL_XCVR_PHY_CTRL_SPDIF_EN, 1);
+ regmap_set_bits(xcvr->regmap_phy, FSL_XCVR_PHY_CTRL,
+ FSL_XCVR_PHY_CTRL_SPDIF_EN);
else /* PHY: CTRL_SET: ARC RX setup */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PHY_CTRL_SET,
- FSL_XCVR_PHY_CTRL_PHY_EN |
- FSL_XCVR_PHY_CTRL_RX_CM_EN |
- fsl_xcvr_phy_arc_cfg[xcvr->arc_mode], 1);
+ regmap_set_bits(xcvr->regmap_phy, FSL_XCVR_PHY_CTRL,
+ FSL_XCVR_PHY_CTRL_PHY_EN |
+ FSL_XCVR_PHY_CTRL_RX_CM_EN |
+ fsl_xcvr_phy_arc_cfg[xcvr->arc_mode]);
}
dev_dbg(dev, "PLL Fexp: %u, Fout: %u, mfi: %u, mfn: %u, mfd: %d, div: %u, pdiv0: %u\n",
@@ -416,17 +474,17 @@ static int fsl_xcvr_en_aud_pll(struct fsl_xcvr *xcvr, u32 freq)
if (xcvr->mode == FSL_XCVR_MODE_EARC) { /* eARC mode */
/* PHY: CTRL_SET: TX_DIFF_OE, PHY_EN */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PHY_CTRL_SET,
- FSL_XCVR_PHY_CTRL_TSDIFF_OE |
- FSL_XCVR_PHY_CTRL_PHY_EN, 1);
+ regmap_set_bits(xcvr->regmap_phy, FSL_XCVR_PHY_CTRL,
+ FSL_XCVR_PHY_CTRL_TSDIFF_OE |
+ FSL_XCVR_PHY_CTRL_PHY_EN);
/* PHY: CTRL2_SET: EARC_TX_MODE */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PHY_CTRL2_SET,
- FSL_XCVR_PHY_CTRL2_EARC_TXMS, 1);
+ regmap_set_bits(xcvr->regmap_phy, FSL_XCVR_PHY_CTRL2,
+ FSL_XCVR_PHY_CTRL2_EARC_TXMS);
} else { /* SPDIF mode */
/* PHY: CTRL_SET: TX_CLK_AUD_SS | SPDIF_EN */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PHY_CTRL_SET,
- FSL_XCVR_PHY_CTRL_TX_CLK_AUD_SS |
- FSL_XCVR_PHY_CTRL_SPDIF_EN, 1);
+ regmap_set_bits(xcvr->regmap_phy, FSL_XCVR_PHY_CTRL,
+ FSL_XCVR_PHY_CTRL_TX_CLK_AUD_SS |
+ FSL_XCVR_PHY_CTRL_SPDIF_EN);
}
dev_dbg(dev, "PLL Fexp: %u\n", freq);
@@ -448,7 +506,7 @@ static int fsl_xcvr_prepare(struct snd_pcm_substream *substream,
switch (xcvr->mode) {
case FSL_XCVR_MODE_SPDIF:
if (xcvr->soc_data->spdif_only && tx) {
- ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_TX_DPTH_CTRL_SET,
+ ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_TX_DPTH_CTRL,
FSL_XCVR_TX_DPTH_CTRL_BYPASS_FEM,
FSL_XCVR_TX_DPTH_CTRL_BYPASS_FEM);
if (ret < 0) {
@@ -466,8 +524,8 @@ static int fsl_xcvr_prepare(struct snd_pcm_substream *substream,
return ret;
}
- ret = regmap_write(xcvr->regmap, FSL_XCVR_TX_DPTH_CTRL_SET,
- FSL_XCVR_TX_DPTH_CTRL_FRM_FMT);
+ ret = regmap_set_bits(xcvr->regmap, FSL_XCVR_TX_DPTH_CTRL,
+ FSL_XCVR_TX_DPTH_CTRL_FRM_FMT);
if (ret < 0) {
dev_err(dai->dev, "Failed to set TX_DPTH: %d\n", ret);
return ret;
@@ -484,11 +542,11 @@ static int fsl_xcvr_prepare(struct snd_pcm_substream *substream,
* Clear RX FIFO, flip RX FIFO bits,
* disable eARC related HW mode detects
*/
- ret = regmap_write(xcvr->regmap, FSL_XCVR_RX_DPTH_CTRL_SET,
- FSL_XCVR_RX_DPTH_CTRL_STORE_FMT |
- FSL_XCVR_RX_DPTH_CTRL_CLR_RX_FIFO |
- FSL_XCVR_RX_DPTH_CTRL_COMP |
- FSL_XCVR_RX_DPTH_CTRL_LAYB_CTRL);
+ ret = regmap_set_bits(xcvr->regmap, FSL_XCVR_RX_DPTH_CTRL,
+ FSL_XCVR_RX_DPTH_CTRL_STORE_FMT |
+ FSL_XCVR_RX_DPTH_CTRL_CLR_RX_FIFO |
+ FSL_XCVR_RX_DPTH_CTRL_COMP |
+ FSL_XCVR_RX_DPTH_CTRL_LAYB_CTRL);
if (ret < 0) {
dev_err(dai->dev, "Failed to set RX_DPTH: %d\n", ret);
return ret;
@@ -505,18 +563,18 @@ static int fsl_xcvr_prepare(struct snd_pcm_substream *substream,
case FSL_XCVR_MODE_EARC:
if (!tx) {
/** Clear RX FIFO, flip RX FIFO bits */
- ret = regmap_write(xcvr->regmap, FSL_XCVR_RX_DPTH_CTRL_SET,
- FSL_XCVR_RX_DPTH_CTRL_STORE_FMT |
- FSL_XCVR_RX_DPTH_CTRL_CLR_RX_FIFO);
+ ret = regmap_set_bits(xcvr->regmap, FSL_XCVR_RX_DPTH_CTRL,
+ FSL_XCVR_RX_DPTH_CTRL_STORE_FMT |
+ FSL_XCVR_RX_DPTH_CTRL_CLR_RX_FIFO);
if (ret < 0) {
dev_err(dai->dev, "Failed to set RX_DPTH: %d\n", ret);
return ret;
}
/** Enable eARC related HW mode detects */
- ret = regmap_write(xcvr->regmap, FSL_XCVR_RX_DPTH_CTRL_CLR,
- FSL_XCVR_RX_DPTH_CTRL_COMP |
- FSL_XCVR_RX_DPTH_CTRL_LAYB_CTRL);
+ ret = regmap_clear_bits(xcvr->regmap, FSL_XCVR_RX_DPTH_CTRL,
+ FSL_XCVR_RX_DPTH_CTRL_COMP |
+ FSL_XCVR_RX_DPTH_CTRL_LAYB_CTRL);
if (ret < 0) {
dev_err(dai->dev, "Failed to clr TX_DPTH: %d\n", ret);
return ret;
@@ -585,8 +643,12 @@ static int fsl_xcvr_startup(struct snd_pcm_substream *substream,
switch (xcvr->mode) {
case FSL_XCVR_MODE_SPDIF:
case FSL_XCVR_MODE_ARC:
- ret = fsl_xcvr_constr(substream, &fsl_xcvr_spdif_channels_constr,
- &fsl_xcvr_spdif_rates_constr);
+ if (xcvr->soc_data->spdif_only && tx)
+ ret = fsl_xcvr_constr(substream, &fsl_xcvr_spdif_channels_constr,
+ &xcvr->spdif_constr_rates);
+ else
+ ret = fsl_xcvr_constr(substream, &fsl_xcvr_spdif_channels_constr,
+ &fsl_xcvr_spdif_rates_constr);
break;
case FSL_XCVR_MODE_EARC:
ret = fsl_xcvr_constr(substream, &fsl_xcvr_earc_channels_constr,
@@ -696,9 +758,9 @@ static int fsl_xcvr_trigger(struct snd_pcm_substream *substream, int cmd,
}
fallthrough;
case FSL_XCVR_MODE_SPDIF:
- ret = regmap_write(xcvr->regmap,
- FSL_XCVR_TX_DPTH_CTRL_SET,
- FSL_XCVR_TX_DPTH_CTRL_STRT_DATA_TX);
+ ret = regmap_set_bits(xcvr->regmap,
+ FSL_XCVR_TX_DPTH_CTRL,
+ FSL_XCVR_TX_DPTH_CTRL_STRT_DATA_TX);
if (ret < 0) {
dev_err(dai->dev, "Failed to start DATA_TX: %d\n", ret);
goto release_lock;
@@ -754,9 +816,9 @@ static int fsl_xcvr_trigger(struct snd_pcm_substream *substream, int cmd,
if (tx) {
switch (xcvr->mode) {
case FSL_XCVR_MODE_SPDIF:
- ret = regmap_write(xcvr->regmap,
- FSL_XCVR_TX_DPTH_CTRL_CLR,
- FSL_XCVR_TX_DPTH_CTRL_STRT_DATA_TX);
+ ret = regmap_clear_bits(xcvr->regmap,
+ FSL_XCVR_TX_DPTH_CTRL,
+ FSL_XCVR_TX_DPTH_CTRL_STRT_DATA_TX);
if (ret < 0) {
dev_err(dai->dev, "Failed to stop DATA_TX: %d\n", ret);
goto release_lock;
@@ -1169,6 +1231,7 @@ static bool fsl_xcvr_writeable_reg(struct device *dev, unsigned int reg)
case FSL_XCVR_RX_DPTH_CNTR_CTRL_SET:
case FSL_XCVR_RX_DPTH_CNTR_CTRL_CLR:
case FSL_XCVR_RX_DPTH_CNTR_CTRL_TOG:
+ case FSL_XCVR_TX_DPTH_CTRL:
case FSL_XCVR_TX_DPTH_CTRL_SET:
case FSL_XCVR_TX_DPTH_CTRL_CLR:
case FSL_XCVR_TX_DPTH_CTRL_TOG:
@@ -1190,7 +1253,49 @@ static bool fsl_xcvr_writeable_reg(struct device *dev, unsigned int reg)
static bool fsl_xcvr_volatile_reg(struct device *dev, unsigned int reg)
{
- return fsl_xcvr_readable_reg(dev, reg);
+ switch (reg) {
+ case FSL_XCVR_EXT_STATUS:
+ case FSL_XCVR_EXT_ISR:
+ case FSL_XCVR_EXT_ISR_SET:
+ case FSL_XCVR_EXT_ISR_CLR:
+ case FSL_XCVR_EXT_ISR_TOG:
+ case FSL_XCVR_ISR:
+ case FSL_XCVR_ISR_SET:
+ case FSL_XCVR_ISR_CLR:
+ case FSL_XCVR_ISR_TOG:
+ case FSL_XCVR_PHY_AI_CTRL:
+ case FSL_XCVR_PHY_AI_CTRL_SET:
+ case FSL_XCVR_PHY_AI_CTRL_CLR:
+ case FSL_XCVR_PHY_AI_CTRL_TOG:
+ case FSL_XCVR_PHY_AI_RDATA:
+ case FSL_XCVR_RX_CS_DATA_0:
+ case FSL_XCVR_RX_CS_DATA_1:
+ case FSL_XCVR_RX_CS_DATA_2:
+ case FSL_XCVR_RX_CS_DATA_3:
+ case FSL_XCVR_RX_CS_DATA_4:
+ case FSL_XCVR_RX_CS_DATA_5:
+ case FSL_XCVR_RX_DPTH_CNTR_CTRL:
+ case FSL_XCVR_RX_DPTH_CNTR_CTRL_SET:
+ case FSL_XCVR_RX_DPTH_CNTR_CTRL_CLR:
+ case FSL_XCVR_RX_DPTH_CNTR_CTRL_TOG:
+ case FSL_XCVR_RX_DPTH_TSCR:
+ case FSL_XCVR_RX_DPTH_BCR:
+ case FSL_XCVR_RX_DPTH_BCTR:
+ case FSL_XCVR_RX_DPTH_BCRR:
+ case FSL_XCVR_TX_DPTH_CNTR_CTRL:
+ case FSL_XCVR_TX_DPTH_CNTR_CTRL_SET:
+ case FSL_XCVR_TX_DPTH_CNTR_CTRL_CLR:
+ case FSL_XCVR_TX_DPTH_CNTR_CTRL_TOG:
+ case FSL_XCVR_TX_DPTH_TSCR:
+ case FSL_XCVR_TX_DPTH_BCR:
+ case FSL_XCVR_TX_DPTH_BCTR:
+ case FSL_XCVR_TX_DPTH_BCRR:
+ case FSL_XCVR_DEBUG_REG_0:
+ case FSL_XCVR_DEBUG_REG_1:
+ return true;
+ default:
+ return false;
+ }
}
static const struct regmap_config fsl_xcvr_regmap_cfg = {
@@ -1206,6 +1311,49 @@ static const struct regmap_config fsl_xcvr_regmap_cfg = {
.cache_type = REGCACHE_FLAT,
};
+static const struct reg_default fsl_xcvr_phy_reg_defaults[] = {
+ { FSL_XCVR_PHY_CTRL, 0x58200804 },
+ { FSL_XCVR_PHY_STATUS, 0x00000000 },
+ { FSL_XCVR_PHY_ANALOG_TRIM, 0x00260F13 },
+ { FSL_XCVR_PHY_SLEW_RATE_TRIM, 0x00000411 },
+ { FSL_XCVR_PHY_DATA_TEST_DELAY, 0x00990000 },
+ { FSL_XCVR_PHY_TEST_CTRL, 0x00000000 },
+ { FSL_XCVR_PHY_DIFF_CDR_CTRL, 0x016D0009 },
+ { FSL_XCVR_PHY_CTRL2, 0x80000000 },
+};
+
+static const struct regmap_config fsl_xcvr_regmap_phy_cfg = {
+ .reg_bits = 8,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = FSL_XCVR_PHY_CTRL2_TOG,
+ .reg_defaults = fsl_xcvr_phy_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(fsl_xcvr_phy_reg_defaults),
+ .cache_type = REGCACHE_FLAT,
+ .reg_read = fsl_xcvr_phy_reg_read,
+ .reg_write = fsl_xcvr_phy_reg_write,
+};
+
+static const struct regmap_config fsl_xcvr_regmap_pllv0_cfg = {
+ .reg_bits = 8,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = FSL_XCVR_PLL_STAT0_TOG,
+ .cache_type = REGCACHE_FLAT,
+ .reg_read = fsl_xcvr_pll_reg_read,
+ .reg_write = fsl_xcvr_pll_reg_write,
+};
+
+static const struct regmap_config fsl_xcvr_regmap_pllv1_cfg = {
+ .reg_bits = 8,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = FSL_XCVR_GP_PLL_STATUS_TOG,
+ .cache_type = REGCACHE_FLAT,
+ .reg_read = fsl_xcvr_pll_reg_read,
+ .reg_write = fsl_xcvr_pll_reg_write,
+};
+
static void reset_rx_work(struct work_struct *work)
{
struct fsl_xcvr *xcvr = container_of(work, struct fsl_xcvr, work_rst);
@@ -1405,6 +1553,15 @@ static int fsl_xcvr_probe(struct platform_device *pdev)
fsl_asoc_get_pll_clocks(dev, &xcvr->pll8k_clk,
&xcvr->pll11k_clk);
+ if (xcvr->soc_data->spdif_only) {
+ if (!(xcvr->pll8k_clk || xcvr->pll11k_clk))
+ xcvr->pll8k_clk = xcvr->phy_clk;
+ fsl_asoc_constrain_rates(&xcvr->spdif_constr_rates,
+ &fsl_xcvr_spdif_rates_constr,
+ xcvr->pll8k_clk, xcvr->pll11k_clk, NULL,
+ xcvr->spdif_constr_rates_list);
+ }
+
xcvr->ram_addr = devm_platform_ioremap_resource_byname(pdev, "ram");
if (IS_ERR(xcvr->ram_addr))
return PTR_ERR(xcvr->ram_addr);
@@ -1421,6 +1578,40 @@ static int fsl_xcvr_probe(struct platform_device *pdev)
return PTR_ERR(xcvr->regmap);
}
+ if (xcvr->soc_data->use_phy) {
+ xcvr->regmap_phy = devm_regmap_init(dev, NULL, xcvr,
+ &fsl_xcvr_regmap_phy_cfg);
+ if (IS_ERR(xcvr->regmap_phy)) {
+ dev_err(dev, "failed to init XCVR PHY regmap: %ld\n",
+ PTR_ERR(xcvr->regmap_phy));
+ return PTR_ERR(xcvr->regmap_phy);
+ }
+
+ switch (xcvr->soc_data->pll_ver) {
+ case PLL_MX8MP:
+ xcvr->regmap_pll = devm_regmap_init(dev, NULL, xcvr,
+ &fsl_xcvr_regmap_pllv0_cfg);
+ if (IS_ERR(xcvr->regmap_pll)) {
+ dev_err(dev, "failed to init XCVR PLL regmap: %ld\n",
+ PTR_ERR(xcvr->regmap_pll));
+ return PTR_ERR(xcvr->regmap_pll);
+ }
+ break;
+ case PLL_MX95:
+ xcvr->regmap_pll = devm_regmap_init(dev, NULL, xcvr,
+ &fsl_xcvr_regmap_pllv1_cfg);
+ if (IS_ERR(xcvr->regmap_pll)) {
+ dev_err(dev, "failed to init XCVR PLL regmap: %ld\n",
+ PTR_ERR(xcvr->regmap_pll));
+ return PTR_ERR(xcvr->regmap_pll);
+ }
+ break;
+ default:
+ dev_err(dev, "Error for PLL version %d\n", xcvr->soc_data->pll_ver);
+ return -EINVAL;
+ }
+ }
+
xcvr->reset = devm_reset_control_get_optional_exclusive(dev, NULL);
if (IS_ERR(xcvr->reset)) {
dev_err(dev, "failed to get XCVR reset control\n");
@@ -1454,6 +1645,10 @@ static int fsl_xcvr_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, xcvr);
pm_runtime_enable(dev);
regcache_cache_only(xcvr->regmap, true);
+ if (xcvr->soc_data->use_phy) {
+ regcache_cache_only(xcvr->regmap_phy, true);
+ regcache_cache_only(xcvr->regmap_pll, true);
+ }
/*
* Register platform component before registering cpu dai for there
@@ -1492,7 +1687,8 @@ static int fsl_xcvr_runtime_suspend(struct device *dev)
struct fsl_xcvr *xcvr = dev_get_drvdata(dev);
int ret;
- if (!xcvr->soc_data->spdif_only) {
+ if (!xcvr->soc_data->spdif_only &&
+ xcvr->mode == FSL_XCVR_MODE_EARC) {
/* Assert M0+ reset */
ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_CTRL,
FSL_XCVR_EXT_CTRL_CORE_RESET,
@@ -1502,6 +1698,10 @@ static int fsl_xcvr_runtime_suspend(struct device *dev)
}
regcache_cache_only(xcvr->regmap, true);
+ if (xcvr->soc_data->use_phy) {
+ regcache_cache_only(xcvr->regmap_phy, true);
+ regcache_cache_only(xcvr->regmap_pll, true);
+ }
clk_disable_unprepare(xcvr->spba_clk);
clk_disable_unprepare(xcvr->phy_clk);
@@ -1546,6 +1746,12 @@ static int fsl_xcvr_runtime_resume(struct device *dev)
goto stop_phy_clk;
}
+ ret = reset_control_deassert(xcvr->reset);
+ if (ret) {
+ dev_err(dev, "failed to deassert M0+ reset.\n");
+ goto stop_spba_clk;
+ }
+
regcache_cache_only(xcvr->regmap, false);
regcache_mark_dirty(xcvr->regmap);
ret = regcache_sync(xcvr->regmap);
@@ -1555,31 +1761,49 @@ static int fsl_xcvr_runtime_resume(struct device *dev)
goto stop_spba_clk;
}
- if (xcvr->soc_data->spdif_only)
- return 0;
+ if (xcvr->soc_data->use_phy) {
+ ret = regmap_write(xcvr->regmap, FSL_XCVR_PHY_AI_CTRL_SET,
+ FSL_XCVR_PHY_AI_CTRL_AI_RESETN);
+ if (ret < 0) {
+ dev_err(dev, "Error while release PHY reset: %d\n", ret);
+ goto stop_spba_clk;
+ }
- ret = reset_control_deassert(xcvr->reset);
- if (ret) {
- dev_err(dev, "failed to deassert M0+ reset.\n");
- goto stop_spba_clk;
- }
+ regcache_cache_only(xcvr->regmap_phy, false);
+ regcache_mark_dirty(xcvr->regmap_phy);
+ ret = regcache_sync(xcvr->regmap_phy);
+ if (ret) {
+ dev_err(dev, "failed to sync phy regcache.\n");
+ goto stop_spba_clk;
+ }
- ret = fsl_xcvr_load_firmware(xcvr);
- if (ret) {
- dev_err(dev, "failed to load firmware.\n");
- goto stop_spba_clk;
+ regcache_cache_only(xcvr->regmap_pll, false);
+ regcache_mark_dirty(xcvr->regmap_pll);
+ ret = regcache_sync(xcvr->regmap_pll);
+ if (ret) {
+ dev_err(dev, "failed to sync pll regcache.\n");
+ goto stop_spba_clk;
+ }
}
- /* Release M0+ reset */
- ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_CTRL,
- FSL_XCVR_EXT_CTRL_CORE_RESET, 0);
- if (ret < 0) {
- dev_err(dev, "M0+ core release failed: %d\n", ret);
- goto stop_spba_clk;
- }
+ if (xcvr->mode == FSL_XCVR_MODE_EARC) {
+ ret = fsl_xcvr_load_firmware(xcvr);
+ if (ret) {
+ dev_err(dev, "failed to load firmware.\n");
+ goto stop_spba_clk;
+ }
+
+ /* Release M0+ reset */
+ ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_CTRL,
+ FSL_XCVR_EXT_CTRL_CORE_RESET, 0);
+ if (ret < 0) {
+ dev_err(dev, "M0+ core release failed: %d\n", ret);
+ goto stop_spba_clk;
+ }
- /* Let M0+ core complete firmware initialization */
- msleep(50);
+ /* Let M0+ core complete firmware initialization */
+ msleep(50);
+ }
return 0;
diff --git a/sound/soc/fsl/fsl_xcvr.h b/sound/soc/fsl/fsl_xcvr.h
index c72cb05184df..dade3945cc0c 100644
--- a/sound/soc/fsl/fsl_xcvr.h
+++ b/sound/soc/fsl/fsl_xcvr.h
@@ -234,6 +234,7 @@
#define FSL_XCVR_TX_DPTH_CTRL_TM_NO_PRE_BME GENMASK(31, 30)
#define FSL_XCVR_PHY_AI_CTRL_AI_RESETN BIT(15)
+#define FSL_XCVR_PHY_AI_CTRL_AI_RWB BIT(31)
#define FSL_XCVR_PLL_CTRL0 0x00
#define FSL_XCVR_PLL_CTRL0_SET 0x04
@@ -241,13 +242,25 @@
#define FSL_XCVR_PLL_NUM 0x20
#define FSL_XCVR_PLL_DEN 0x30
#define FSL_XCVR_PLL_PDIV 0x40
+#define FSL_XCVR_PLL_BANDGAP 0x50
#define FSL_XCVR_PLL_BANDGAP_SET 0x54
+#define FSL_XCVR_PLL_STAT0 0x60
+#define FSL_XCVR_PLL_STAT0_TOG 0x6c
+
#define FSL_XCVR_PHY_CTRL 0x00
#define FSL_XCVR_PHY_CTRL_SET 0x04
#define FSL_XCVR_PHY_CTRL_CLR 0x08
+#define FSL_XCVR_PHY_CTRL_TOG 0x0c
+#define FSL_XCVR_PHY_STATUS 0x10
+#define FSL_XCVR_PHY_ANALOG_TRIM 0x20
+#define FSL_XCVR_PHY_SLEW_RATE_TRIM 0x30
+#define FSL_XCVR_PHY_DATA_TEST_DELAY 0x40
+#define FSL_XCVR_PHY_TEST_CTRL 0x50
+#define FSL_XCVR_PHY_DIFF_CDR_CTRL 0x60
#define FSL_XCVR_PHY_CTRL2 0x70
#define FSL_XCVR_PHY_CTRL2_SET 0x74
#define FSL_XCVR_PHY_CTRL2_CLR 0x78
+#define FSL_XCVR_PHY_CTRL2_TOG 0x7c
#define FSL_XCVR_PLL_BANDGAP_EN_VBG BIT(0)
#define FSL_XCVR_PLL_CTRL0_HROFF BIT(13)
diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c
index 43e14f2eca8d..cc2918ee2cf5 100644
--- a/sound/soc/fsl/imx-audmux.c
+++ b/sound/soc/fsl/imx-audmux.c
@@ -237,7 +237,7 @@ static int imx_audmux_parse_dt_defaults(struct platform_device *pdev,
child);
continue;
}
- if (!of_property_read_bool(child, "fsl,port-config")) {
+ if (!of_property_present(child, "fsl,port-config")) {
dev_warn(&pdev->dev, "child node \"%pOF\" does not have property fsl,port-config\n",
child);
continue;
diff --git a/sound/soc/fsl/imx-card.c b/sound/soc/fsl/imx-card.c
index 95a57fda0250..ac043ad367ac 100644
--- a/sound/soc/fsl/imx-card.c
+++ b/sound/soc/fsl/imx-card.c
@@ -529,7 +529,7 @@ static int imx_card_parse_of(struct imx_card_data *data)
}
/* DAPM routes */
- if (of_property_read_bool(dev->of_node, "audio-routing")) {
+ if (of_property_present(dev->of_node, "audio-routing")) {
ret = snd_soc_of_parse_audio_routing(card, "audio-routing");
if (ret)
return ret;
diff --git a/sound/soc/fsl/imx-rpmsg.c b/sound/soc/fsl/imx-rpmsg.c
index ce98d2288193..7cd3aa4c8706 100644
--- a/sound/soc/fsl/imx-rpmsg.c
+++ b/sound/soc/fsl/imx-rpmsg.c
@@ -218,7 +218,7 @@ static int imx_rpmsg_probe(struct platform_device *pdev)
if (ret)
goto fail;
- if (of_property_read_bool(np, "audio-routing")) {
+ if (of_property_present(np, "audio-routing")) {
ret = snd_soc_of_parse_audio_routing(&data->card, "audio-routing");
if (ret) {
dev_err(&pdev->dev, "failed to parse audio-routing: %d\n", ret);
diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
index 7655425a3deb..7c422535b01a 100644
--- a/sound/soc/generic/audio-graph-card.c
+++ b/sound/soc/generic/audio-graph-card.c
@@ -81,18 +81,14 @@ static void graph_parse_convert(struct device *dev,
struct simple_util_data *adata)
{
struct device_node *top = dev->of_node;
- struct device_node *port = ep_to_port(ep);
- struct device_node *ports = port_to_ports(port);
- struct device_node *node = of_graph_get_port_parent(ep);
+ struct device_node *port __free(device_node) = ep_to_port(ep);
+ struct device_node *ports __free(device_node) = port_to_ports(port);
+ struct device_node *node __free(device_node) = of_graph_get_port_parent(ep);
simple_util_parse_convert(top, NULL, adata);
simple_util_parse_convert(ports, NULL, adata);
simple_util_parse_convert(port, NULL, adata);
simple_util_parse_convert(ep, NULL, adata);
-
- of_node_put(port);
- of_node_put(ports);
- of_node_put(node);
}
static int graph_parse_node(struct simple_util_priv *priv,
@@ -140,10 +136,10 @@ static int graph_link_init(struct simple_util_priv *priv,
struct device_node *top = dev->of_node;
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
- struct device_node *port_cpu = ep_to_port(ep_cpu);
- struct device_node *port_codec = ep_to_port(ep_codec);
- struct device_node *ports_cpu = port_to_ports(port_cpu);
- struct device_node *ports_codec = port_to_ports(port_codec);
+ struct device_node *port_cpu __free(device_node) = ep_to_port(ep_cpu);
+ struct device_node *port_codec __free(device_node) = ep_to_port(ep_codec);
+ struct device_node *ports_cpu __free(device_node) = port_to_ports(port_cpu);
+ struct device_node *ports_codec __free(device_node) = port_to_ports(port_codec);
enum snd_soc_trigger_order trigger_start = SND_SOC_TRIGGER_ORDER_DEFAULT;
enum snd_soc_trigger_order trigger_stop = SND_SOC_TRIGGER_ORDER_DEFAULT;
bool playback_only = 0, capture_only = 0;
@@ -152,7 +148,7 @@ static int graph_link_init(struct simple_util_priv *priv,
ret = simple_util_parse_daifmt(dev, ep_cpu, ep_codec,
NULL, &dai_link->dai_fmt);
if (ret < 0)
- goto init_end;
+ return ret;
graph_util_parse_link_direction(top, &playback_only, &capture_only);
graph_util_parse_link_direction(port_cpu, &playback_only, &capture_only);
@@ -187,14 +183,7 @@ static int graph_link_init(struct simple_util_priv *priv,
if (priv->ops)
dai_link->ops = priv->ops;
- ret = simple_util_set_dailink_name(dev, dai_link, name);
-init_end:
- of_node_put(ports_cpu);
- of_node_put(ports_codec);
- of_node_put(port_cpu);
- of_node_put(port_codec);
-
- return ret;
+ return simple_util_set_dailink_name(dev, dai_link, name);
}
static int graph_dai_link_of_dpcm(struct simple_util_priv *priv,
@@ -250,8 +239,6 @@ static int graph_dai_link_of_dpcm(struct simple_util_priv *priv,
} else {
struct snd_soc_codec_conf *cconf = simple_props_to_codec_conf(dai_props, 0);
struct snd_soc_dai_link_component *codecs = snd_soc_link_to_codec(dai_link, 0);
- struct device_node *port;
- struct device_node *ports;
/* CPU is dummy */
@@ -267,14 +254,12 @@ static int graph_dai_link_of_dpcm(struct simple_util_priv *priv,
"be.%pOFP.%s", codecs->of_node, codecs->dai_name);
/* check "prefix" from top node */
- port = ep_to_port(ep);
- ports = port_to_ports(port);
+ struct device_node *port __free(device_node) = ep_to_port(ep);
+ struct device_node *ports __free(device_node) = port_to_ports(port);
+
snd_soc_of_parse_node_prefix(top, cconf, codecs->of_node, "prefix");
snd_soc_of_parse_node_prefix(ports, cconf, codecs->of_node, "prefix");
snd_soc_of_parse_node_prefix(port, cconf, codecs->of_node, "prefix");
-
- of_node_put(ports);
- of_node_put(port);
}
graph_parse_convert(dev, ep, &dai_props->adata);
@@ -361,8 +346,6 @@ static int __graph_for_each_link(struct simple_util_priv *priv,
struct device *dev = simple_priv_to_dev(priv);
struct device_node *node = dev->of_node;
struct device_node *cpu_port;
- struct device_node *codec_ep;
- struct device_node *codec_port;
struct device_node *codec_port_old = NULL;
struct simple_util_data adata;
int rc, ret = 0;
@@ -374,8 +357,8 @@ static int __graph_for_each_link(struct simple_util_priv *priv,
/* loop for all CPU endpoint */
for_each_of_graph_port_endpoint(cpu_port, cpu_ep) {
/* get codec */
- codec_ep = of_graph_get_remote_endpoint(cpu_ep);
- codec_port = ep_to_port(codec_ep);
+ struct device_node *codec_ep __free(device_node) = of_graph_get_remote_endpoint(cpu_ep);
+ struct device_node *codec_port __free(device_node) = ep_to_port(codec_ep);
/* get convert-xxx property */
memset(&adata, 0, sizeof(adata));
@@ -399,9 +382,6 @@ static int __graph_for_each_link(struct simple_util_priv *priv,
ret = func_noml(priv, cpu_ep, codec_ep, li);
}
- of_node_put(codec_ep);
- of_node_put(codec_port);
-
if (ret < 0)
return ret;
diff --git a/sound/soc/generic/audio-graph-card2.c b/sound/soc/generic/audio-graph-card2.c
index 1f5c4e8ff1b9..ee94b256b770 100644
--- a/sound/soc/generic/audio-graph-card2.c
+++ b/sound/soc/generic/audio-graph-card2.c
@@ -331,10 +331,9 @@ static int graph_lnk_is_multi(struct device_node *lnk)
return __graph_get_type(lnk) == GRAPH_MULTI;
}
-static struct device_node *graph_get_next_multi_ep(struct device_node **port)
+static struct device_node *graph_get_next_multi_ep(struct device_node **port, int idx)
{
- struct device_node *ports = port_to_ports(*port);
- struct device_node *ep = NULL;
+ struct device_node *ports __free(device_node) = port_to_ports(*port);
struct device_node *rep = NULL;
/*
@@ -352,15 +351,22 @@ static struct device_node *graph_get_next_multi_ep(struct device_node **port)
* port@1 { rep1 };
* };
*/
- *port = of_graph_get_next_port(ports, *port);
+
+ /*
+ * Don't use of_graph_get_next_port() here
+ *
+ * In overlay case, "port" are not necessarily in order. So we need to use
+ * of_graph_get_port_by_id() instead
+ */
+ of_node_put(*port);
+
+ *port = of_graph_get_port_by_id(ports, idx);
if (*port) {
- ep = of_graph_get_next_port_endpoint(*port, NULL);
+ struct device_node *ep __free(device_node) = of_graph_get_next_port_endpoint(*port, NULL);
+
rep = of_graph_get_remote_endpoint(ep);
}
- of_node_put(ep);
- of_node_put(ports);
-
return rep;
}
@@ -373,16 +379,13 @@ static const struct snd_soc_ops graph_ops = {
static void graph_parse_convert(struct device_node *ep,
struct simple_dai_props *props)
{
- struct device_node *port = ep_to_port(ep);
- struct device_node *ports = port_to_ports(port);
+ struct device_node *port __free(device_node) = ep_to_port(ep);
+ struct device_node *ports __free(device_node) = port_to_ports(port);
struct simple_util_data *adata = &props->adata;
simple_util_parse_convert(ports, NULL, adata);
simple_util_parse_convert(port, NULL, adata);
simple_util_parse_convert(ep, NULL, adata);
-
- of_node_put(port);
- of_node_put(ports);
}
static int __graph_parse_node(struct simple_util_priv *priv,
@@ -471,14 +474,11 @@ static int __graph_parse_node(struct simple_util_priv *priv,
if (!is_cpu && gtype == GRAPH_DPCM) {
struct snd_soc_dai_link_component *codecs = snd_soc_link_to_codec(dai_link, idx);
struct snd_soc_codec_conf *cconf = simple_props_to_codec_conf(dai_props, idx);
- struct device_node *rport = ep_to_port(ep);
- struct device_node *rports = port_to_ports(rport);
+ struct device_node *rport __free(device_node) = ep_to_port(ep);
+ struct device_node *rports __free(device_node) = port_to_ports(rport);
snd_soc_of_parse_node_prefix(rports, cconf, codecs->of_node, "prefix");
snd_soc_of_parse_node_prefix(rport, cconf, codecs->of_node, "prefix");
-
- of_node_put(rport);
- of_node_put(rports);
}
if (is_cpu) {
@@ -526,25 +526,21 @@ static int graph_parse_node_multi_nm(struct snd_soc_dai_link *dai_link,
* };
* };
*/
- struct device_node *mcpu_ep = of_graph_get_next_port_endpoint(mcpu_port, NULL);
- struct device_node *mcpu_ports = port_to_ports(mcpu_port);
- struct device_node *mcpu_port_top = of_graph_get_next_port(mcpu_ports, NULL);
- struct device_node *mcpu_ep_top = of_graph_get_next_port_endpoint(mcpu_port_top, NULL);
- struct device_node *mcodec_ep_top = of_graph_get_remote_endpoint(mcpu_ep_top);
- struct device_node *mcodec_port_top = ep_to_port(mcodec_ep_top);
- struct device_node *mcodec_ports = port_to_ports(mcodec_port_top);
+ struct device_node *mcpu_ep __free(device_node) = of_graph_get_next_port_endpoint(mcpu_port, NULL);
+ struct device_node *mcpu_ports __free(device_node) = port_to_ports(mcpu_port);
+ struct device_node *mcpu_port_top __free(device_node) = of_graph_get_next_port(mcpu_ports, NULL);
+ struct device_node *mcpu_ep_top __free(device_node) = of_graph_get_next_port_endpoint(mcpu_port_top, NULL);
+ struct device_node *mcodec_ep_top __free(device_node) = of_graph_get_remote_endpoint(mcpu_ep_top);
+ struct device_node *mcodec_port_top __free(device_node) = ep_to_port(mcodec_ep_top);
+ struct device_node *mcodec_ports __free(device_node) = port_to_ports(mcodec_port_top);
int nm_max = max(dai_link->num_cpus, dai_link->num_codecs);
int ret = 0;
- if (cpu_idx > dai_link->num_cpus) {
- ret = -EINVAL;
- goto mcpu_err;
- }
+ if (cpu_idx > dai_link->num_cpus)
+ return -EINVAL;
for_each_of_graph_port_endpoint(mcpu_port, mcpu_ep_n) {
- struct device_node *mcodec_ep_n;
- struct device_node *mcodec_port;
- int codec_idx;
+ int codec_idx = 0;
/* ignore 1st ep which is for element */
if (mcpu_ep_n == mcpu_ep)
@@ -553,16 +549,13 @@ static int graph_parse_node_multi_nm(struct snd_soc_dai_link *dai_link,
if (*nm_idx > nm_max)
break;
- mcodec_ep_n = of_graph_get_remote_endpoint(mcpu_ep_n);
- mcodec_port = ep_to_port(mcodec_ep_n);
+ struct device_node *mcodec_ep_n __free(device_node) = of_graph_get_remote_endpoint(mcpu_ep_n);
+ struct device_node *mcodec_port __free(device_node) = ep_to_port(mcodec_ep_n);
- if (mcodec_ports != port_to_ports(mcodec_port)) {
- ret = -EINVAL;
- goto mcpu_err;
- }
-
- codec_idx = 0;
ret = -EINVAL;
+ if (mcodec_ports != port_to_ports(mcodec_port))
+ break;
+
for_each_of_graph_port(mcodec_ports, mcodec_port_i) {
/* ignore 1st port which is for pair connection */
@@ -582,18 +575,9 @@ static int graph_parse_node_multi_nm(struct snd_soc_dai_link *dai_link,
}
codec_idx++;
}
- of_node_put(mcodec_port);
- of_node_put(mcodec_ep_n);
if (ret < 0)
break;
}
-mcpu_err:
- of_node_put(mcpu_ep);
- of_node_put(mcpu_port_top);
- of_node_put(mcpu_ep_top);
- of_node_put(mcodec_ep_top);
- of_node_put(mcodec_port_top);
- of_node_put(mcodec_ports);
return ret;
}
@@ -605,7 +589,6 @@ static int graph_parse_node_multi(struct simple_util_priv *priv,
{
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
struct device *dev = simple_priv_to_dev(priv);
- struct device_node *ep;
int ret = -ENOMEM;
int nm_idx = 0;
int nm_max = max(dai_link->num_cpus, dai_link->num_codecs);
@@ -640,12 +623,11 @@ static int graph_parse_node_multi(struct simple_util_priv *priv,
* };
* };
*/
- ep = graph_get_next_multi_ep(&port);
+ struct device_node *ep __free(device_node) = graph_get_next_multi_ep(&port, idx + 1);
if (!ep)
break;
ret = __graph_parse_node(priv, gtype, ep, li, is_cpu, idx);
- of_node_put(ep);
if (ret < 0)
goto multi_err;
@@ -666,30 +648,26 @@ multi_err:
static int graph_parse_node_single(struct simple_util_priv *priv,
enum graph_type gtype,
- struct device_node *port,
+ struct device_node *ep,
struct link_info *li, int is_cpu)
{
- struct device_node *ep = of_graph_get_next_port_endpoint(port, NULL);
- int ret = __graph_parse_node(priv, gtype, ep, li, is_cpu, 0);
-
- of_node_put(ep);
-
- return ret;
+ return __graph_parse_node(priv, gtype, ep, li, is_cpu, 0);
}
static int graph_parse_node(struct simple_util_priv *priv,
enum graph_type gtype,
- struct device_node *port,
+ struct device_node *ep,
struct link_info *li, int is_cpu)
{
+ struct device_node *port __free(device_node) = ep_to_port(ep);
+
if (graph_lnk_is_multi(port))
return graph_parse_node_multi(priv, gtype, port, li, is_cpu);
else
- return graph_parse_node_single(priv, gtype, port, li, is_cpu);
+ return graph_parse_node_single(priv, gtype, ep, li, is_cpu);
}
-static void graph_parse_daifmt(struct device_node *node,
- unsigned int *daifmt, unsigned int *bit_frame)
+static void graph_parse_daifmt(struct device_node *node, unsigned int *daifmt)
{
unsigned int fmt;
@@ -714,16 +692,6 @@ static void graph_parse_daifmt(struct device_node *node,
* };
*/
- /*
- * clock_provider:
- *
- * It can be judged it is provider
- * if (A) or (B) or (C) has bitclock-master / frame-master flag.
- *
- * use "or"
- */
- *bit_frame |= snd_soc_daifmt_parse_clock_provider_as_bitmap(node, NULL);
-
#define update_daifmt(name) \
if (!(*daifmt & SND_SOC_DAIFMT_##name##_MASK) && \
(fmt & SND_SOC_DAIFMT_##name##_MASK)) \
@@ -741,51 +709,64 @@ static void graph_parse_daifmt(struct device_node *node,
update_daifmt(INV);
}
+static unsigned int graph_parse_bitframe(struct device_node *ep)
+{
+ struct device_node *port __free(device_node) = ep_to_port(ep);
+ struct device_node *ports __free(device_node) = port_to_ports(port);
+
+ return snd_soc_daifmt_clock_provider_from_bitmap(
+ snd_soc_daifmt_parse_clock_provider_as_bitmap(ep, NULL) |
+ snd_soc_daifmt_parse_clock_provider_as_bitmap(port, NULL) |
+ snd_soc_daifmt_parse_clock_provider_as_bitmap(ports, NULL));
+}
+
static void graph_link_init(struct simple_util_priv *priv,
struct device_node *lnk,
- struct device_node *port_cpu,
- struct device_node *port_codec,
+ struct device_node *ep_cpu,
+ struct device_node *ep_codec,
struct link_info *li,
int is_cpu_node)
{
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
- struct device_node *ep_cpu, *ep_codec;
- struct device_node *ports_cpu, *ports_codec;
- unsigned int daifmt = 0, daiclk = 0;
+ struct device_node *port_cpu = ep_to_port(ep_cpu);
+ struct device_node *port_codec = ep_to_port(ep_codec);
+ struct device_node *multi_cpu_port = NULL, *multi_codec_port = NULL;
+ struct snd_soc_dai_link_component *dlc;
+ unsigned int daifmt = 0;
bool playback_only = 0, capture_only = 0;
enum snd_soc_trigger_order trigger_start = SND_SOC_TRIGGER_ORDER_DEFAULT;
enum snd_soc_trigger_order trigger_stop = SND_SOC_TRIGGER_ORDER_DEFAULT;
- unsigned int bit_frame = 0;
+ int multi_cpu_port_idx = 1, multi_codec_port_idx = 1;
+ int i;
- of_node_get(port_cpu);
if (graph_lnk_is_multi(port_cpu)) {
- ep_cpu = graph_get_next_multi_ep(&port_cpu);
+ multi_cpu_port = port_cpu;
+ ep_cpu = graph_get_next_multi_ep(&multi_cpu_port, multi_cpu_port_idx++);
of_node_put(port_cpu);
port_cpu = ep_to_port(ep_cpu);
} else {
- ep_cpu = of_graph_get_next_port_endpoint(port_cpu, NULL);
+ of_node_get(ep_cpu);
}
- ports_cpu = port_to_ports(port_cpu);
+ struct device_node *ports_cpu __free(device_node) = port_to_ports(port_cpu);
- of_node_get(port_codec);
if (graph_lnk_is_multi(port_codec)) {
- ep_codec = graph_get_next_multi_ep(&port_codec);
+ multi_codec_port = port_codec;
+ ep_codec = graph_get_next_multi_ep(&multi_codec_port, multi_codec_port_idx++);
of_node_put(port_codec);
port_codec = ep_to_port(ep_codec);
} else {
- ep_codec = of_graph_get_next_port_endpoint(port_codec, NULL);
+ of_node_get(ep_codec);
}
- ports_codec = port_to_ports(port_codec);
+ struct device_node *ports_codec __free(device_node) = port_to_ports(port_codec);
-
- graph_parse_daifmt(ep_cpu, &daifmt, &bit_frame);
- graph_parse_daifmt(ep_codec, &daifmt, &bit_frame);
- graph_parse_daifmt(port_cpu, &daifmt, &bit_frame);
- graph_parse_daifmt(port_codec, &daifmt, &bit_frame);
- graph_parse_daifmt(ports_cpu, &daifmt, &bit_frame);
- graph_parse_daifmt(ports_codec, &daifmt, &bit_frame);
- graph_parse_daifmt(lnk, &daifmt, &bit_frame);
+ graph_parse_daifmt(ep_cpu, &daifmt);
+ graph_parse_daifmt(ep_codec, &daifmt);
+ graph_parse_daifmt(port_cpu, &daifmt);
+ graph_parse_daifmt(port_codec, &daifmt);
+ graph_parse_daifmt(ports_cpu, &daifmt);
+ graph_parse_daifmt(ports_codec, &daifmt);
+ graph_parse_daifmt(lnk, &daifmt);
graph_util_parse_link_direction(lnk, &playback_only, &capture_only);
graph_util_parse_link_direction(ports_cpu, &playback_only, &capture_only);
@@ -811,14 +792,21 @@ static void graph_link_init(struct simple_util_priv *priv,
graph_util_parse_trigger_order(priv, ep_cpu, &trigger_start, &trigger_stop);
graph_util_parse_trigger_order(priv, ep_codec, &trigger_start, &trigger_stop);
- /*
- * convert bit_frame
- * We need to flip clock_provider if it was CPU node,
- * because it is Codec base.
- */
- daiclk = snd_soc_daifmt_clock_provider_from_bitmap(bit_frame);
- if (is_cpu_node)
- daiclk = snd_soc_daifmt_clock_provider_flipped(daiclk);
+ for_each_link_cpus(dai_link, i, dlc) {
+ dlc->ext_fmt = graph_parse_bitframe(ep_cpu);
+
+ if (multi_cpu_port)
+ ep_cpu = graph_get_next_multi_ep(&multi_cpu_port, multi_cpu_port_idx++);
+ }
+
+ for_each_link_codecs(dai_link, i, dlc) {
+ dlc->ext_fmt = graph_parse_bitframe(ep_codec);
+
+ if (multi_codec_port)
+ ep_codec = graph_get_next_multi_ep(&multi_codec_port, multi_codec_port_idx++);
+ }
+
+ /*** Don't use port_cpu / port_codec after here ***/
dai_link->playback_only = playback_only;
dai_link->capture_only = capture_only;
@@ -826,14 +814,12 @@ static void graph_link_init(struct simple_util_priv *priv,
dai_link->trigger_start = trigger_start;
dai_link->trigger_stop = trigger_stop;
- dai_link->dai_fmt = daifmt | daiclk;
+ dai_link->dai_fmt = daifmt;
dai_link->init = simple_util_dai_init;
dai_link->ops = &graph_ops;
if (priv->ops)
dai_link->ops = priv->ops;
- of_node_put(ports_cpu);
- of_node_put(ports_codec);
of_node_put(port_cpu);
of_node_put(port_codec);
of_node_put(ep_cpu);
@@ -845,8 +831,8 @@ int audio_graph2_link_normal(struct simple_util_priv *priv,
struct link_info *li)
{
struct device_node *cpu_port = lnk;
- struct device_node *cpu_ep = of_graph_get_next_port_endpoint(cpu_port, NULL);
- struct device_node *codec_port = of_graph_get_remote_port(cpu_ep);
+ struct device_node *cpu_ep __free(device_node) = of_graph_get_next_port_endpoint(cpu_port, NULL);
+ struct device_node *codec_ep __free(device_node) = of_graph_get_remote_endpoint(cpu_ep);
int ret;
/*
@@ -854,21 +840,18 @@ int audio_graph2_link_normal(struct simple_util_priv *priv,
* see
* __graph_parse_node() :: DAI Naming
*/
- ret = graph_parse_node(priv, GRAPH_NORMAL, codec_port, li, 0);
+ ret = graph_parse_node(priv, GRAPH_NORMAL, codec_ep, li, 0);
if (ret < 0)
- goto err;
+ return ret;
/*
* call CPU, and set DAI Name
*/
- ret = graph_parse_node(priv, GRAPH_NORMAL, cpu_port, li, 1);
+ ret = graph_parse_node(priv, GRAPH_NORMAL, cpu_ep, li, 1);
if (ret < 0)
- goto err;
+ return ret;
- graph_link_init(priv, lnk, cpu_port, codec_port, li, 1);
-err:
- of_node_put(codec_port);
- of_node_put(cpu_ep);
+ graph_link_init(priv, lnk, cpu_ep, codec_ep, li, 1);
return ret;
}
@@ -878,17 +861,17 @@ int audio_graph2_link_dpcm(struct simple_util_priv *priv,
struct device_node *lnk,
struct link_info *li)
{
- struct device_node *ep = of_graph_get_next_port_endpoint(lnk, NULL);
- struct device_node *rep = of_graph_get_remote_endpoint(ep);
- struct device_node *cpu_port = NULL;
- struct device_node *codec_port = NULL;
+ struct device_node *ep __free(device_node) = of_graph_get_next_port_endpoint(lnk, NULL);
+ struct device_node *rep __free(device_node) = of_graph_get_remote_endpoint(ep);
+ struct device_node *cpu_ep = NULL;
+ struct device_node *codec_ep = NULL;
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
int is_cpu = graph_util_is_ports0(lnk);
int ret;
if (is_cpu) {
- cpu_port = of_graph_get_remote_port(ep); /* rport */
+ cpu_ep = rep;
/*
* dpcm {
@@ -917,12 +900,12 @@ int audio_graph2_link_dpcm(struct simple_util_priv *priv,
dai_link->dynamic = 1;
dai_link->dpcm_merged_format = 1;
- ret = graph_parse_node(priv, GRAPH_DPCM, cpu_port, li, 1);
+ ret = graph_parse_node(priv, GRAPH_DPCM, cpu_ep, li, 1);
if (ret)
- goto err;
+ return ret;
} else {
- codec_port = of_graph_get_remote_port(ep); /* rport */
+ codec_ep = rep;
/*
* dpcm {
@@ -953,20 +936,15 @@ int audio_graph2_link_dpcm(struct simple_util_priv *priv,
dai_link->no_pcm = 1;
dai_link->be_hw_params_fixup = simple_util_be_hw_params_fixup;
- ret = graph_parse_node(priv, GRAPH_DPCM, codec_port, li, 0);
+ ret = graph_parse_node(priv, GRAPH_DPCM, codec_ep, li, 0);
if (ret < 0)
- goto err;
+ return ret;
}
graph_parse_convert(ep, dai_props); /* at node of <dpcm> */
graph_parse_convert(rep, dai_props); /* at node of <CPU/Codec> */
- graph_link_init(priv, lnk, cpu_port, codec_port, li, is_cpu);
-err:
- of_node_put(ep);
- of_node_put(rep);
- of_node_put(cpu_port);
- of_node_put(codec_port);
+ graph_link_init(priv, lnk, cpu_ep, codec_ep, li, is_cpu);
return ret;
}
@@ -977,9 +955,9 @@ int audio_graph2_link_c2c(struct simple_util_priv *priv,
struct link_info *li)
{
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
- struct device_node *port0, *port1, *ports;
- struct device_node *codec0_port, *codec1_port;
- struct device_node *ep0, *ep1;
+ struct device_node *port0 = lnk;
+ struct device_node *ports __free(device_node) = port_to_ports(port0);
+ struct device_node *port1 __free(device_node) = of_graph_get_next_port(ports, port0);
u32 val = 0;
int ret = -EINVAL;
@@ -999,10 +977,6 @@ int audio_graph2_link_c2c(struct simple_util_priv *priv,
* };
* };
*/
- of_node_get(lnk);
- port0 = lnk;
- ports = port_to_ports(port0);
- port1 = of_graph_get_next_port(ports, port0);
/*
* Card2 can use original Codec2Codec settings if DT has.
@@ -1019,7 +993,7 @@ int audio_graph2_link_c2c(struct simple_util_priv *priv,
c2c_conf = devm_kzalloc(dev, sizeof(*c2c_conf), GFP_KERNEL);
if (!c2c_conf)
- goto err1;
+ return ret;
c2c_conf->formats = SNDRV_PCM_FMTBIT_S32_LE; /* update ME */
c2c_conf->rates = SNDRV_PCM_RATE_8000_384000;
@@ -1032,38 +1006,29 @@ int audio_graph2_link_c2c(struct simple_util_priv *priv,
dai_link->num_c2c_params = 1;
}
- ep0 = of_graph_get_next_port_endpoint(port0, NULL);
- ep1 = of_graph_get_next_port_endpoint(port1, NULL);
+ struct device_node *ep0 __free(device_node) = of_graph_get_next_port_endpoint(port0, NULL);
+ struct device_node *ep1 __free(device_node) = of_graph_get_next_port_endpoint(port1, NULL);
- codec0_port = of_graph_get_remote_port(ep0);
- codec1_port = of_graph_get_remote_port(ep1);
+ struct device_node *codec0_ep __free(device_node) = of_graph_get_remote_endpoint(ep0);
+ struct device_node *codec1_ep __free(device_node) = of_graph_get_remote_endpoint(ep1);
/*
* call Codec first.
* see
* __graph_parse_node() :: DAI Naming
*/
- ret = graph_parse_node(priv, GRAPH_C2C, codec1_port, li, 0);
+ ret = graph_parse_node(priv, GRAPH_C2C, codec1_ep, li, 0);
if (ret < 0)
- goto err2;
+ return ret;
/*
* call CPU, and set DAI Name
*/
- ret = graph_parse_node(priv, GRAPH_C2C, codec0_port, li, 1);
+ ret = graph_parse_node(priv, GRAPH_C2C, codec0_ep, li, 1);
if (ret < 0)
- goto err2;
-
- graph_link_init(priv, lnk, codec0_port, codec1_port, li, 1);
-err2:
- of_node_put(ep0);
- of_node_put(ep1);
- of_node_put(codec0_port);
- of_node_put(codec1_port);
-err1:
- of_node_put(ports);
- of_node_put(port0);
- of_node_put(port1);
+ return ret;
+
+ graph_link_init(priv, lnk, codec0_ep, codec1_ep, li, 1);
return ret;
}
@@ -1153,8 +1118,8 @@ static int graph_count_normal(struct simple_util_priv *priv,
struct link_info *li)
{
struct device_node *cpu_port = lnk;
- struct device_node *cpu_ep = of_graph_get_next_port_endpoint(cpu_port, NULL);
- struct device_node *codec_port = of_graph_get_remote_port(cpu_ep);
+ struct device_node *cpu_ep __free(device_node) = of_graph_get_next_port_endpoint(cpu_port, NULL);
+ struct device_node *codec_port __free(device_node) = of_graph_get_remote_port(cpu_ep);
/*
* CPU {
@@ -1171,9 +1136,6 @@ static int graph_count_normal(struct simple_util_priv *priv,
li->num[li->link].codecs = graph_counter(codec_port);
- of_node_put(cpu_ep);
- of_node_put(codec_port);
-
return 0;
}
@@ -1181,8 +1143,8 @@ static int graph_count_dpcm(struct simple_util_priv *priv,
struct device_node *lnk,
struct link_info *li)
{
- struct device_node *ep = of_graph_get_next_port_endpoint(lnk, NULL);
- struct device_node *rport = of_graph_get_remote_port(ep);
+ struct device_node *ep __free(device_node) = of_graph_get_next_port_endpoint(lnk, NULL);
+ struct device_node *rport __free(device_node) = of_graph_get_remote_port(ep);
/*
* dpcm {
@@ -1211,9 +1173,6 @@ static int graph_count_dpcm(struct simple_util_priv *priv,
li->num[li->link].codecs = graph_counter(rport); /* BE */
}
- of_node_put(ep);
- of_node_put(rport);
-
return 0;
}
@@ -1221,13 +1180,13 @@ static int graph_count_c2c(struct simple_util_priv *priv,
struct device_node *lnk,
struct link_info *li)
{
- struct device_node *ports = port_to_ports(lnk);
- struct device_node *port0 = lnk;
- struct device_node *port1 = of_graph_get_next_port(ports, of_node_get(port0));
- struct device_node *ep0 = of_graph_get_next_port_endpoint(port0, NULL);
- struct device_node *ep1 = of_graph_get_next_port_endpoint(port1, NULL);
- struct device_node *codec0 = of_graph_get_remote_port(ep0);
- struct device_node *codec1 = of_graph_get_remote_port(ep1);
+ struct device_node *ports __free(device_node) = port_to_ports(lnk);
+ struct device_node *port0 = of_node_get(lnk);
+ struct device_node *port1 = of_node_get(of_graph_get_next_port(ports, of_node_get(port0)));
+ struct device_node *ep0 __free(device_node) = of_graph_get_next_port_endpoint(port0, NULL);
+ struct device_node *ep1 __free(device_node) = of_graph_get_next_port_endpoint(port1, NULL);
+ struct device_node *codec0 __free(device_node) = of_graph_get_remote_port(ep0);
+ struct device_node *codec1 __free(device_node) = of_graph_get_remote_port(ep1);
/*
* codec2codec {
@@ -1247,13 +1206,6 @@ static int graph_count_c2c(struct simple_util_priv *priv,
li->num[li->link].codecs = graph_counter(codec1);
- of_node_put(ports);
- of_node_put(port1);
- of_node_put(ep0);
- of_node_put(ep1);
- of_node_put(codec0);
- of_node_put(codec1);
-
return 0;
}
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index 24b371f32066..dd414634b4ac 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -139,10 +139,9 @@ int simple_util_parse_tdm_width_map(struct device *dev, struct device_node *np,
int n, i, ret;
u32 *p;
- if (!of_property_read_bool(np, "dai-tdm-slot-width-map"))
- return 0;
-
n = of_property_count_elems_of_size(np, "dai-tdm-slot-width-map", sizeof(u32));
+ if (n <= 0)
+ return 0;
if (n % 3) {
dev_err(dev, "Invalid number of cells for dai-tdm-slot-width-map\n");
return -EINVAL;
@@ -296,7 +295,7 @@ int simple_util_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
struct simple_util_priv *priv = snd_soc_card_get_drvdata(rtd->card);
- struct simple_dai_props *props = simple_priv_to_props(priv, rtd->id);
+ struct simple_dai_props *props = runtime_simple_priv_to_props(priv, rtd);
struct simple_util_dai *dai;
unsigned int fixed_sysclk = 0;
int i1, i2, i;
@@ -357,7 +356,7 @@ void simple_util_shutdown(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
struct simple_util_priv *priv = snd_soc_card_get_drvdata(rtd->card);
- struct simple_dai_props *props = simple_priv_to_props(priv, rtd->id);
+ struct simple_dai_props *props = runtime_simple_priv_to_props(priv, rtd);
struct simple_util_dai *dai;
int i;
@@ -365,8 +364,7 @@ void simple_util_shutdown(struct snd_pcm_substream *substream)
struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, i);
if (props->mclk_fs && !dai->clk_fixed && !snd_soc_dai_active(cpu_dai))
- snd_soc_dai_set_sysclk(cpu_dai,
- 0, 0, SND_SOC_CLOCK_OUT);
+ snd_soc_dai_set_sysclk(cpu_dai, 0, 0, dai->clk_direction);
simple_clk_disable(dai);
}
@@ -374,8 +372,7 @@ void simple_util_shutdown(struct snd_pcm_substream *substream)
struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, i);
if (props->mclk_fs && !dai->clk_fixed && !snd_soc_dai_active(codec_dai))
- snd_soc_dai_set_sysclk(codec_dai,
- 0, 0, SND_SOC_CLOCK_IN);
+ snd_soc_dai_set_sysclk(codec_dai, 0, 0, dai->clk_direction);
simple_clk_disable(dai);
}
@@ -448,7 +445,7 @@ int simple_util_hw_params(struct snd_pcm_substream *substream,
struct simple_util_dai *pdai;
struct snd_soc_dai *sdai;
struct simple_util_priv *priv = snd_soc_card_get_drvdata(rtd->card);
- struct simple_dai_props *props = simple_priv_to_props(priv, rtd->id);
+ struct simple_dai_props *props = runtime_simple_priv_to_props(priv, rtd);
unsigned int mclk, mclk_fs = 0;
int i, ret;
@@ -483,13 +480,15 @@ int simple_util_hw_params(struct snd_pcm_substream *substream,
}
for_each_rtd_codec_dais(rtd, i, sdai) {
- ret = snd_soc_dai_set_sysclk(sdai, 0, mclk, SND_SOC_CLOCK_IN);
+ pdai = simple_props_to_dai_codec(props, i);
+ ret = snd_soc_dai_set_sysclk(sdai, 0, mclk, pdai->clk_direction);
if (ret && ret != -ENOTSUPP)
return ret;
}
for_each_rtd_cpu_dais(rtd, i, sdai) {
- ret = snd_soc_dai_set_sysclk(sdai, 0, mclk, SND_SOC_CLOCK_OUT);
+ pdai = simple_props_to_dai_cpu(props, i);
+ ret = snd_soc_dai_set_sysclk(sdai, 0, mclk, pdai->clk_direction);
if (ret && ret != -ENOTSUPP)
return ret;
}
@@ -517,7 +516,7 @@ int simple_util_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
struct simple_util_priv *priv = snd_soc_card_get_drvdata(rtd->card);
- struct simple_dai_props *dai_props = simple_priv_to_props(priv, rtd->id);
+ struct simple_dai_props *dai_props = runtime_simple_priv_to_props(priv, rtd);
struct simple_util_data *data = &dai_props->adata;
struct snd_interval *rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
@@ -628,7 +627,7 @@ static int simple_init_for_codec2codec(struct snd_soc_pcm_runtime *rtd,
int simple_util_dai_init(struct snd_soc_pcm_runtime *rtd)
{
struct simple_util_priv *priv = snd_soc_card_get_drvdata(rtd->card);
- struct simple_dai_props *props = simple_priv_to_props(priv, rtd->id);
+ struct simple_dai_props *props = runtime_simple_priv_to_props(priv, rtd);
struct simple_util_dai *dai;
int i, ret;
@@ -713,7 +712,7 @@ int simple_util_parse_routing(struct snd_soc_card *card,
snprintf(prop, sizeof(prop), "%s%s", prefix, "routing");
- if (!of_property_read_bool(node, prop))
+ if (!of_property_present(node, prop))
return 0;
return snd_soc_of_parse_audio_routing(card, prop);
@@ -731,7 +730,7 @@ int simple_util_parse_widgets(struct snd_soc_card *card,
snprintf(prop, sizeof(prop), "%s%s", prefix, "widgets");
- if (of_property_read_bool(node, prop))
+ if (of_property_present(node, prop))
return snd_soc_of_parse_audio_simple_widgets(card, prop);
/* no widgets is not error */
@@ -1005,36 +1004,27 @@ EXPORT_SYMBOL_GPL(graph_util_card_probe);
int graph_util_is_ports0(struct device_node *np)
{
- struct device_node *port, *ports, *ports0, *top;
- int ret;
+ struct device_node *parent __free(device_node) = of_get_parent(np);
+ struct device_node *port;
/* np is "endpoint" or "port" */
- if (of_node_name_eq(np, "endpoint")) {
- port = of_get_parent(np);
- } else {
+ if (of_node_name_eq(np, "endpoint"))
+ port = parent;
+ else
port = np;
- of_node_get(port);
- }
-
- ports = of_get_parent(port);
- top = of_get_parent(ports);
- ports0 = of_get_child_by_name(top, "ports");
-
- ret = ports0 == ports;
- of_node_put(port);
- of_node_put(ports);
- of_node_put(ports0);
- of_node_put(top);
+ struct device_node *ports __free(device_node) = of_get_parent(port);
+ struct device_node *top __free(device_node) = of_get_parent(ports);
+ struct device_node *ports0 __free(device_node) = of_get_child_by_name(top, "ports");
- return ret;
+ return ports0 == ports;
}
EXPORT_SYMBOL_GPL(graph_util_is_ports0);
static int graph_get_dai_id(struct device_node *ep)
{
- struct device_node *node;
- struct device_node *endpoint;
+ struct device_node *node __free(device_node) = of_graph_get_port_parent(ep);
+ struct device_node *port __free(device_node) = of_get_parent(ep);
struct of_endpoint info;
int i, id;
int ret;
@@ -1053,16 +1043,16 @@ static int graph_get_dai_id(struct device_node *ep)
* only of_graph_parse_endpoint().
* We need to check "reg" property
*/
- if (of_property_present(ep, "reg"))
- return info.id;
- node = of_get_parent(ep);
- ret = of_property_present(node, "reg");
- of_node_put(node);
+ /* check port first */
+ ret = of_property_present(port, "reg");
if (ret)
return info.port;
+
+ /* check endpoint 2nd as backup */
+ if (of_property_present(ep, "reg"))
+ return info.id;
}
- node = of_graph_get_port_parent(ep);
/*
* Non HDMI sound case, counting port/endpoint on its DT
@@ -1070,14 +1060,14 @@ static int graph_get_dai_id(struct device_node *ep)
*/
i = 0;
id = -1;
- for_each_endpoint_of_node(node, endpoint) {
- if (endpoint == ep)
+ for_each_of_graph_port(node, p) {
+ if (port == p) {
id = i;
+ break;
+ }
i++;
}
- of_node_put(node);
-
if (id < 0)
return -ENODEV;
@@ -1087,7 +1077,6 @@ static int graph_get_dai_id(struct device_node *ep)
int graph_util_parse_dai(struct device *dev, struct device_node *ep,
struct snd_soc_dai_link_component *dlc, int *is_single_link)
{
- struct device_node *node;
struct of_phandle_args args = {};
struct snd_soc_dai *dai;
int ret;
@@ -1095,7 +1084,7 @@ int graph_util_parse_dai(struct device *dev, struct device_node *ep,
if (!ep)
return 0;
- node = of_graph_get_port_parent(ep);
+ struct device_node *node __free(device_node) = of_graph_get_port_parent(ep);
/*
* Try to find from DAI node
@@ -1136,10 +1125,8 @@ int graph_util_parse_dai(struct device *dev, struct device_node *ep,
* if he unbinded CPU or Codec.
*/
ret = snd_soc_get_dlc(&args, dlc);
- if (ret < 0) {
- of_node_put(node);
+ if (ret < 0)
return ret;
- }
parse_dai_end:
if (is_single_link)
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 76a1d05e2ebe..afe7e79ffdbd 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -120,14 +120,12 @@ static void simple_parse_convert(struct device *dev,
struct simple_util_data *adata)
{
struct device_node *top = dev->of_node;
- struct device_node *node = of_get_parent(np);
+ struct device_node *node __free(device_node) = of_get_parent(np);
simple_util_parse_convert(top, PREFIX, adata);
simple_util_parse_convert(node, PREFIX, adata);
simple_util_parse_convert(node, NULL, adata);
simple_util_parse_convert(np, NULL, adata);
-
- of_node_put(node);
}
static int simple_parse_node(struct simple_util_priv *priv,
@@ -176,7 +174,7 @@ static int simple_link_init(struct simple_util_priv *priv,
struct device_node *top = dev->of_node;
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
- struct device_node *node = of_get_parent(cpu);
+ struct device_node *node __free(device_node) = of_get_parent(cpu);
enum snd_soc_trigger_order trigger_start = SND_SOC_TRIGGER_ORDER_DEFAULT;
enum snd_soc_trigger_order trigger_stop = SND_SOC_TRIGGER_ORDER_DEFAULT;
bool playback_only = 0, capture_only = 0;
@@ -185,7 +183,7 @@ static int simple_link_init(struct simple_util_priv *priv,
ret = simple_util_parse_daifmt(dev, node, codec,
prefix, &dai_link->dai_fmt);
if (ret < 0)
- goto init_end;
+ return ret;
graph_util_parse_link_direction(top, &playback_only, &capture_only);
graph_util_parse_link_direction(node, &playback_only, &capture_only);
@@ -215,11 +213,7 @@ static int simple_link_init(struct simple_util_priv *priv,
dai_link->init = simple_util_dai_init;
dai_link->ops = &simple_ops;
- ret = simple_util_set_dailink_name(dev, dai_link, name);
-init_end:
- of_node_put(node);
-
- return ret;
+ return simple_util_set_dailink_name(dev, dai_link, name);
}
static int simple_dai_link_of_dpcm(struct simple_util_priv *priv,
@@ -232,7 +226,7 @@ static int simple_dai_link_of_dpcm(struct simple_util_priv *priv,
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
struct device_node *top = dev->of_node;
- struct device_node *node = of_get_parent(np);
+ struct device_node *node __free(device_node) = of_get_parent(np);
char *prefix = "";
char dai_name[64];
int ret;
@@ -296,7 +290,6 @@ static int simple_dai_link_of_dpcm(struct simple_util_priv *priv,
out_put_node:
li->link++;
- of_node_put(node);
return ret;
}
@@ -312,15 +305,13 @@ static int simple_dai_link_of(struct simple_util_priv *priv,
struct snd_soc_dai_link_component *codecs = snd_soc_link_to_codec(dai_link, 0);
struct snd_soc_dai_link_component *platforms = snd_soc_link_to_platform(dai_link, 0);
struct device_node *cpu = NULL;
- struct device_node *node = NULL;
- struct device_node *plat = NULL;
char dai_name[64];
char prop[128];
char *prefix = "";
int ret, single_cpu = 0;
cpu = np;
- node = of_get_parent(np);
+ struct device_node *node __free(device_node) = of_get_parent(np);
dev_dbg(dev, "link_of (%pOF)\n", node);
@@ -329,7 +320,7 @@ static int simple_dai_link_of(struct simple_util_priv *priv,
prefix = PREFIX;
snprintf(prop, sizeof(prop), "%splat", prefix);
- plat = of_get_child_by_name(node, prop);
+ struct device_node *plat __free(device_node) = of_get_child_by_name(node, prop);
ret = simple_parse_node(priv, cpu, li, prefix, &single_cpu);
if (ret < 0)
@@ -352,9 +343,6 @@ static int simple_dai_link_of(struct simple_util_priv *priv,
ret = simple_link_init(priv, cpu, codec, li, prefix, dai_name);
dai_link_of_err:
- of_node_put(plat);
- of_node_put(node);
-
li->link++;
return ret;
@@ -374,7 +362,6 @@ static int __simple_for_each_link(struct simple_util_priv *priv,
struct device *dev = simple_priv_to_dev(priv);
struct device_node *top = dev->of_node;
struct device_node *node;
- struct device_node *add_devs;
uintptr_t dpcm_selectable = (uintptr_t)of_device_get_match_data(dev);
bool is_top = 0;
int ret = 0;
@@ -386,14 +373,11 @@ static int __simple_for_each_link(struct simple_util_priv *priv,
is_top = 1;
}
- add_devs = of_get_child_by_name(top, PREFIX "additional-devs");
+ struct device_node *add_devs __free(device_node) = of_get_child_by_name(top, PREFIX "additional-devs");
/* loop for all dai-link */
do {
struct simple_util_data adata;
- struct device_node *codec;
- struct device_node *plat;
- struct device_node *np;
int num = of_get_child_count(node);
/* Skip additional-devs node */
@@ -403,26 +387,26 @@ static int __simple_for_each_link(struct simple_util_priv *priv,
}
/* get codec */
- codec = of_get_child_by_name(node, is_top ?
- PREFIX "codec" : "codec");
+ struct device_node *codec __free(device_node) =
+ of_get_child_by_name(node, is_top ? PREFIX "codec" : "codec");
if (!codec) {
ret = -ENODEV;
goto error;
}
/* get platform */
- plat = of_get_child_by_name(node, is_top ?
- PREFIX "plat" : "plat");
+ struct device_node *plat __free(device_node) =
+ of_get_child_by_name(node, is_top ? PREFIX "plat" : "plat");
/* get convert-xxx property */
memset(&adata, 0, sizeof(adata));
- for_each_child_of_node(node, np) {
+ for_each_child_of_node_scoped(node, np) {
if (np == add_devs)
continue;
simple_parse_convert(dev, np, &adata);
}
/* loop for all CPU/Codec node */
- for_each_child_of_node(node, np) {
+ for_each_child_of_node_scoped(node, np) {
if (plat == np || add_devs == np)
continue;
/*
@@ -452,22 +436,16 @@ static int __simple_for_each_link(struct simple_util_priv *priv,
ret = func_noml(priv, np, codec, li, is_top);
}
- if (ret < 0) {
- of_node_put(codec);
- of_node_put(plat);
- of_node_put(np);
+ if (ret < 0)
goto error;
- }
}
- of_node_put(codec);
- of_node_put(plat);
node = of_get_next_child(top, node);
} while (!is_top && node);
error:
- of_node_put(add_devs);
of_node_put(node);
+
return ret;
}
@@ -514,15 +492,13 @@ static void simple_depopulate_aux(void *data)
static int simple_populate_aux(struct simple_util_priv *priv)
{
struct device *dev = simple_priv_to_dev(priv);
- struct device_node *node;
+ struct device_node *node __free(device_node) = of_get_child_by_name(dev->of_node, PREFIX "additional-devs");
int ret;
- node = of_get_child_by_name(dev->of_node, PREFIX "additional-devs");
if (!node)
return 0;
ret = of_platform_populate(node, NULL, NULL, dev);
- of_node_put(node);
if (ret)
return ret;
diff --git a/sound/soc/intel/avs/apl.c b/sound/soc/intel/avs/apl.c
index 27516ef57185..3dccf0a57a3a 100644
--- a/sound/soc/intel/avs/apl.c
+++ b/sound/soc/intel/avs/apl.c
@@ -12,6 +12,7 @@
#include "avs.h"
#include "messages.h"
#include "path.h"
+#include "registers.h"
#include "topology.h"
static irqreturn_t avs_apl_dsp_interrupt(struct avs_dev *adev)
@@ -125,7 +126,7 @@ int avs_apl_coredump(struct avs_dev *adev, union avs_notify_msg *msg)
struct avs_apl_log_buffer_layout layout;
void __iomem *addr, *buf;
size_t dump_size;
- u16 offset = 0;
+ u32 offset = 0;
u8 *dump, *pos;
dump_size = AVS_FW_REGS_SIZE + msg->ext.coredump.stack_dump_size;
diff --git a/sound/soc/intel/avs/cnl.c b/sound/soc/intel/avs/cnl.c
index bd3c4bb8bf5a..03f8fb0dc187 100644
--- a/sound/soc/intel/avs/cnl.c
+++ b/sound/soc/intel/avs/cnl.c
@@ -9,6 +9,7 @@
#include <sound/hdaudio_ext.h>
#include "avs.h"
#include "messages.h"
+#include "registers.h"
static void avs_cnl_ipc_interrupt(struct avs_dev *adev)
{
diff --git a/sound/soc/intel/avs/core.c b/sound/soc/intel/avs/core.c
index 73d4bde9b2f7..0e750e9e01d9 100644
--- a/sound/soc/intel/avs/core.c
+++ b/sound/soc/intel/avs/core.c
@@ -829,10 +829,10 @@ static const struct avs_spec jsl_desc = {
.hipc = &cnl_hipc_spec,
};
-#define AVS_TGL_BASED_SPEC(sname) \
+#define AVS_TGL_BASED_SPEC(sname, min) \
static const struct avs_spec sname##_desc = { \
.name = #sname, \
- .min_fw_version = { 10, 29, 0, 5646 }, \
+ .min_fw_version = { 10, min, 0, 5646 }, \
.dsp_ops = &avs_tgl_dsp_ops, \
.core_init_mask = 1, \
.attributes = AVS_PLATATTR_IMR, \
@@ -840,11 +840,11 @@ static const struct avs_spec sname##_desc = { \
.hipc = &cnl_hipc_spec, \
}
-AVS_TGL_BASED_SPEC(lkf);
-AVS_TGL_BASED_SPEC(tgl);
-AVS_TGL_BASED_SPEC(ehl);
-AVS_TGL_BASED_SPEC(adl);
-AVS_TGL_BASED_SPEC(adl_n);
+AVS_TGL_BASED_SPEC(lkf, 28);
+AVS_TGL_BASED_SPEC(tgl, 29);
+AVS_TGL_BASED_SPEC(ehl, 30);
+AVS_TGL_BASED_SPEC(adl, 35);
+AVS_TGL_BASED_SPEC(adl_n, 35);
static const struct pci_device_id avs_ids[] = {
{ PCI_DEVICE_DATA(INTEL, HDA_SKL_LP, &skl_desc) },
@@ -902,3 +902,13 @@ MODULE_AUTHOR("Cezary Rojewski <cezary.rojewski@intel.com>");
MODULE_AUTHOR("Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>");
MODULE_DESCRIPTION("Intel cAVS sound driver");
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("intel/skl/dsp_basefw.bin");
+MODULE_FIRMWARE("intel/apl/dsp_basefw.bin");
+MODULE_FIRMWARE("intel/cnl/dsp_basefw.bin");
+MODULE_FIRMWARE("intel/icl/dsp_basefw.bin");
+MODULE_FIRMWARE("intel/jsl/dsp_basefw.bin");
+MODULE_FIRMWARE("intel/lkf/dsp_basefw.bin");
+MODULE_FIRMWARE("intel/tgl/dsp_basefw.bin");
+MODULE_FIRMWARE("intel/ehl/dsp_basefw.bin");
+MODULE_FIRMWARE("intel/adl/dsp_basefw.bin");
+MODULE_FIRMWARE("intel/adl_n/dsp_basefw.bin");
diff --git a/sound/soc/intel/avs/debugfs.c b/sound/soc/intel/avs/debugfs.c
index 1767ded4d983..8c4edda97f75 100644
--- a/sound/soc/intel/avs/debugfs.c
+++ b/sound/soc/intel/avs/debugfs.c
@@ -10,6 +10,7 @@
#include <linux/kfifo.h>
#include <linux/wait.h>
#include <linux/sched/signal.h>
+#include <linux/string_helpers.h>
#include <sound/soc.h>
#include "avs.h"
#include "messages.h"
diff --git a/sound/soc/intel/avs/ipc.c b/sound/soc/intel/avs/ipc.c
index 4fba46e77c47..08ed9d96738a 100644
--- a/sound/soc/intel/avs/ipc.c
+++ b/sound/soc/intel/avs/ipc.c
@@ -184,10 +184,11 @@ static void avs_dsp_receive_rx(struct avs_dev *adev, u64 header)
{
struct avs_ipc *ipc = adev->ipc;
union avs_reply_msg msg = AVS_MSG(header);
- u64 reg;
+ u32 sts, lec;
- reg = readq(avs_sram_addr(adev, AVS_FW_REGS_WINDOW));
- trace_avs_ipc_reply_msg(header, reg);
+ sts = snd_hdac_adsp_readl(adev, AVS_FW_REG_STATUS(adev));
+ lec = snd_hdac_adsp_readl(adev, AVS_FW_REG_ERROR(adev));
+ trace_avs_ipc_reply_msg(header, sts, lec);
ipc->rx.header = header;
/* Abort copying payload if request processing was unsuccessful. */
@@ -209,10 +210,11 @@ static void avs_dsp_process_notification(struct avs_dev *adev, u64 header)
union avs_notify_msg msg = AVS_MSG(header);
size_t data_size = 0;
void *data = NULL;
- u64 reg;
+ u32 sts, lec;
- reg = readq(avs_sram_addr(adev, AVS_FW_REGS_WINDOW));
- trace_avs_ipc_notify_msg(header, reg);
+ sts = snd_hdac_adsp_readl(adev, AVS_FW_REG_STATUS(adev));
+ lec = snd_hdac_adsp_readl(adev, AVS_FW_REG_ERROR(adev));
+ trace_avs_ipc_notify_msg(header, sts, lec);
/* Ignore spurious notifications until handshake is established. */
if (!adev->ipc->ready && msg.notify_msg_type != AVS_NOTIFY_FW_READY) {
@@ -367,13 +369,16 @@ static void avs_ipc_msg_init(struct avs_ipc *ipc, struct avs_ipc_msg *reply)
static void avs_dsp_send_tx(struct avs_dev *adev, struct avs_ipc_msg *tx, bool read_fwregs)
{
const struct avs_spec *const spec = adev->spec;
- u64 reg = ULONG_MAX;
+ u32 sts = UINT_MAX;
+ u32 lec = UINT_MAX;
tx->header |= spec->hipc->req_busy_mask;
- if (read_fwregs)
- reg = readq(avs_sram_addr(adev, AVS_FW_REGS_WINDOW));
+ if (read_fwregs) {
+ sts = snd_hdac_adsp_readl(adev, AVS_FW_REG_STATUS(adev));
+ lec = snd_hdac_adsp_readl(adev, AVS_FW_REG_ERROR(adev));
+ }
- trace_avs_request(tx, reg);
+ trace_avs_request(tx, sts, lec);
if (tx->size)
memcpy_toio(avs_downlink_addr(adev), tx->data, tx->size);
diff --git a/sound/soc/intel/avs/loader.c b/sound/soc/intel/avs/loader.c
index 890efd2f1fea..9ff7818395cd 100644
--- a/sound/soc/intel/avs/loader.c
+++ b/sound/soc/intel/avs/loader.c
@@ -167,7 +167,8 @@ int avs_cldma_load_basefw(struct avs_dev *adev, struct firmware *fw)
(reg & AVS_ROM_INIT_DONE) == AVS_ROM_INIT_DONE,
AVS_ROM_INIT_POLLING_US, SKL_ROM_INIT_TIMEOUT_US);
if (ret < 0) {
- dev_err(adev->dev, "rom init timeout: %d\n", ret);
+ dev_err(adev->dev, "rom init failed: %d, status: 0x%08x, lec: 0x%08x\n",
+ ret, reg, snd_hdac_adsp_readl(adev, AVS_FW_REG_ERROR(adev)));
avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK);
return ret;
}
@@ -180,7 +181,8 @@ int avs_cldma_load_basefw(struct avs_dev *adev, struct firmware *fw)
AVS_FW_INIT_POLLING_US, AVS_FW_INIT_TIMEOUT_US);
hda_cldma_stop(cl);
if (ret < 0) {
- dev_err(adev->dev, "transfer fw failed: %d\n", ret);
+ dev_err(adev->dev, "transfer fw failed: %d, status: 0x%08x, lec: 0x%08x\n",
+ ret, reg, snd_hdac_adsp_readl(adev, AVS_FW_REG_ERROR(adev)));
avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK);
return ret;
}
@@ -308,12 +310,13 @@ avs_hda_init_rom(struct avs_dev *adev, unsigned int dma_id, bool purge)
}
/* await ROM init */
- ret = snd_hdac_adsp_readq_poll(adev, spec->sram->rom_status_offset, reg,
+ ret = snd_hdac_adsp_readl_poll(adev, spec->sram->rom_status_offset, reg,
(reg & 0xF) == AVS_ROM_INIT_DONE ||
(reg & 0xF) == APL_ROM_FW_ENTERED,
AVS_ROM_INIT_POLLING_US, APL_ROM_INIT_TIMEOUT_US);
if (ret < 0) {
- dev_err(adev->dev, "rom init timeout: %d\n", ret);
+ dev_err(adev->dev, "rom init failed: %d, status: 0x%08x, lec: 0x%08x\n",
+ ret, reg, snd_hdac_adsp_readl(adev, AVS_FW_REG_ERROR(adev)));
goto err;
}
@@ -337,15 +340,15 @@ static int avs_imr_load_basefw(struct avs_dev *adev)
/* DMA id ignored when flashing from IMR as no transfer occurs. */
ret = avs_hda_init_rom(adev, 0, false);
- if (ret < 0) {
- dev_err(adev->dev, "rom init failed: %d\n", ret);
+ if (ret < 0)
return ret;
- }
ret = wait_for_completion_timeout(&adev->fw_ready,
msecs_to_jiffies(AVS_FW_INIT_TIMEOUT_MS));
if (!ret) {
- dev_err(adev->dev, "firmware ready timeout\n");
+ dev_err(adev->dev, "firmware ready timeout, status: 0x%08x, lec: 0x%08x\n",
+ snd_hdac_adsp_readl(adev, AVS_FW_REG_STATUS(adev)),
+ snd_hdac_adsp_readl(adev, AVS_FW_REG_ERROR(adev)));
avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK);
return -ETIMEDOUT;
}
@@ -392,7 +395,7 @@ int avs_hda_load_basefw(struct avs_dev *adev, struct firmware *fw)
ret = avs_hda_init_rom(adev, dma_id, true);
if (!ret)
break;
- dev_info(adev->dev, "#%d rom init fail: %d\n", i + 1, ret);
+ dev_info(adev->dev, "#%d rom init failed: %d\n", i + 1, ret);
}
if (ret < 0)
goto cleanup_resources;
@@ -404,7 +407,8 @@ int avs_hda_load_basefw(struct avs_dev *adev, struct firmware *fw)
AVS_FW_INIT_POLLING_US, AVS_FW_INIT_TIMEOUT_US);
snd_hdac_dsp_trigger(hstream, false);
if (ret < 0) {
- dev_err(adev->dev, "transfer fw failed: %d\n", ret);
+ dev_err(adev->dev, "transfer fw failed: %d, status: 0x%08x, lec: 0x%08x\n",
+ ret, reg, snd_hdac_adsp_readl(adev, AVS_FW_REG_ERROR(adev)));
avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK);
}
@@ -584,7 +588,9 @@ static int avs_dsp_load_basefw(struct avs_dev *adev)
ret = wait_for_completion_timeout(&adev->fw_ready,
msecs_to_jiffies(AVS_FW_INIT_TIMEOUT_MS));
if (!ret) {
- dev_err(adev->dev, "firmware ready timeout\n");
+ dev_err(adev->dev, "firmware ready timeout, status: 0x%08x, lec: 0x%08x\n",
+ snd_hdac_adsp_readl(adev, AVS_FW_REG_STATUS(adev)),
+ snd_hdac_adsp_readl(adev, AVS_FW_REG_ERROR(adev)));
avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK);
ret = -ETIMEDOUT;
goto release_fw;
@@ -675,16 +681,12 @@ int avs_dsp_first_boot_firmware(struct avs_dev *adev)
}
ret = avs_ipc_get_hw_config(adev, &adev->hw_cfg);
- if (ret) {
- dev_err(adev->dev, "get hw cfg failed: %d\n", ret);
+ if (ret)
return AVS_IPC_RET(ret);
- }
ret = avs_ipc_get_fw_config(adev, &adev->fw_cfg);
- if (ret) {
- dev_err(adev->dev, "get fw cfg failed: %d\n", ret);
+ if (ret)
return AVS_IPC_RET(ret);
- }
adev->core_refs = devm_kcalloc(adev->dev, adev->hw_cfg.dsp_cores,
sizeof(*adev->core_refs), GFP_KERNEL);
diff --git a/sound/soc/intel/avs/messages.c b/sound/soc/intel/avs/messages.c
index ec458bd51b10..30b666f8909b 100644
--- a/sound/soc/intel/avs/messages.c
+++ b/sound/soc/intel/avs/messages.c
@@ -400,10 +400,12 @@ int avs_ipc_get_fw_config(struct avs_dev *adev, struct avs_fw_cfg *cfg)
AVS_BASEFW_FIRMWARE_CONFIG, NULL, 0,
&payload, &payload_size);
if (ret)
- return ret;
+ goto err;
/* Non-zero payload expected for FIRMWARE_CONFIG. */
- if (!payload_size)
- return -EREMOTEIO;
+ if (!payload_size) {
+ ret = -EREMOTEIO;
+ goto err;
+ }
while (offset < payload_size) {
tlv = (struct avs_tlv *)(payload + offset);
@@ -502,6 +504,9 @@ int avs_ipc_get_fw_config(struct avs_dev *adev, struct avs_fw_cfg *cfg)
/* No longer needed, free it as it's owned by the get_large_config() caller. */
kfree(payload);
+err:
+ if (ret)
+ dev_err(adev->dev, "get fw cfg failed: %d\n", ret);
return ret;
}
@@ -517,10 +522,12 @@ int avs_ipc_get_hw_config(struct avs_dev *adev, struct avs_hw_cfg *cfg)
AVS_BASEFW_HARDWARE_CONFIG, NULL, 0,
&payload, &payload_size);
if (ret)
- return ret;
+ goto err;
/* Non-zero payload expected for HARDWARE_CONFIG. */
- if (!payload_size)
- return -EREMOTEIO;
+ if (!payload_size) {
+ ret = -EREMOTEIO;
+ goto err;
+ }
while (offset < payload_size) {
tlv = (struct avs_tlv *)(payload + offset);
@@ -590,6 +597,9 @@ int avs_ipc_get_hw_config(struct avs_dev *adev, struct avs_hw_cfg *cfg)
exit:
/* No longer needed, free it as it's owned by the get_large_config() caller. */
kfree(payload);
+err:
+ if (ret)
+ dev_err(adev->dev, "get hw cfg failed: %d\n", ret);
return ret;
}
diff --git a/sound/soc/intel/avs/messages.h b/sound/soc/intel/avs/messages.h
index d0bdb7d9266c..0378633c7f96 100644
--- a/sound/soc/intel/avs/messages.h
+++ b/sound/soc/intel/avs/messages.h
@@ -859,8 +859,7 @@ static_assert(sizeof(struct avs_aec_cfg) == 92);
struct avs_asrc_cfg {
struct avs_modcfg_base base;
u32 out_freq;
- u32 rsvd0:1;
- u32 mode:1;
+ u32 mode:2;
u32 rsvd2:2;
u32 disable_jitter_buffer:1;
u32 rsvd3:27;
diff --git a/sound/soc/intel/avs/pcm.c b/sound/soc/intel/avs/pcm.c
index 945f9c0a6a54..4bfbcb5a5ae8 100644
--- a/sound/soc/intel/avs/pcm.c
+++ b/sound/soc/intel/avs/pcm.c
@@ -161,6 +161,7 @@ static int avs_dai_be_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dpcm *dpcm;
be = snd_soc_substream_to_rtd(substream);
+ /* dpcm_fe_dai_open() guarantees the list is not empty at this point. */
for_each_dpcm_fe(be, substream->stream, dpcm) {
fe = dpcm->fe;
fe_hw_params = &fe->dpcm[substream->stream].hw_params;
@@ -576,6 +577,7 @@ static int avs_dai_fe_hw_params(struct snd_pcm_substream *substream,
hdac_stream(host_stream)->format_val = 0;
fe = snd_soc_substream_to_rtd(substream);
+ /* dpcm_fe_dai_open() guarantees the list is not empty at this point. */
for_each_dpcm_be(fe, substream->stream, dpcm) {
be = dpcm->be;
be_hw_params = &be->dpcm[substream->stream].hw_params;
@@ -1564,6 +1566,7 @@ static int avs_component_hda_probe(struct snd_soc_component *component)
if (ret < 0) {
dev_err(component->dev, "create widgets failed: %d\n",
ret);
+ snd_soc_unregister_dai(dai);
goto exit;
}
}
@@ -1578,8 +1581,8 @@ exit:
static void avs_component_hda_remove(struct snd_soc_component *component)
{
- avs_component_hda_unregister_dais(component);
avs_component_remove(component);
+ avs_component_hda_unregister_dais(component);
}
static int avs_component_hda_open(struct snd_soc_component *component,
diff --git a/sound/soc/intel/avs/registers.h b/sound/soc/intel/avs/registers.h
index f76e91cff2a9..368ede05f2cd 100644
--- a/sound/soc/intel/avs/registers.h
+++ b/sound/soc/intel/avs/registers.h
@@ -9,6 +9,8 @@
#ifndef __SOUND_SOC_INTEL_AVS_REGS_H
#define __SOUND_SOC_INTEL_AVS_REGS_H
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/iopoll.h>
#include <linux/sizes.h>
#define AZX_PCIREG_PGCTL 0x44
@@ -74,7 +76,7 @@
/* Constants used when accessing SRAM, space shared with firmware */
#define AVS_FW_REG_BASE(adev) ((adev)->spec->sram->base_offset)
#define AVS_FW_REG_STATUS(adev) (AVS_FW_REG_BASE(adev) + 0x0)
-#define AVS_FW_REG_ERROR_CODE(adev) (AVS_FW_REG_BASE(adev) + 0x4)
+#define AVS_FW_REG_ERROR(adev) (AVS_FW_REG_BASE(adev) + 0x4)
#define AVS_WINDOW_CHUNK_SIZE SZ_4K
#define AVS_FW_REGS_SIZE AVS_WINDOW_CHUNK_SIZE
@@ -98,4 +100,47 @@
#define avs_downlink_addr(adev) \
avs_sram_addr(adev, AVS_DOWNLINK_WINDOW)
+#define snd_hdac_adsp_writeb(adev, reg, value) \
+ snd_hdac_reg_writeb(&(adev)->base.core, (adev)->dsp_ba + (reg), value)
+#define snd_hdac_adsp_readb(adev, reg) \
+ snd_hdac_reg_readb(&(adev)->base.core, (adev)->dsp_ba + (reg))
+#define snd_hdac_adsp_writew(adev, reg, value) \
+ snd_hdac_reg_writew(&(adev)->base.core, (adev)->dsp_ba + (reg), value)
+#define snd_hdac_adsp_readw(adev, reg) \
+ snd_hdac_reg_readw(&(adev)->base.core, (adev)->dsp_ba + (reg))
+#define snd_hdac_adsp_writel(adev, reg, value) \
+ snd_hdac_reg_writel(&(adev)->base.core, (adev)->dsp_ba + (reg), value)
+#define snd_hdac_adsp_readl(adev, reg) \
+ snd_hdac_reg_readl(&(adev)->base.core, (adev)->dsp_ba + (reg))
+#define snd_hdac_adsp_writeq(adev, reg, value) \
+ snd_hdac_reg_writeq(&(adev)->base.core, (adev)->dsp_ba + (reg), value)
+#define snd_hdac_adsp_readq(adev, reg) \
+ snd_hdac_reg_readq(&(adev)->base.core, (adev)->dsp_ba + (reg))
+
+#define snd_hdac_adsp_updateb(adev, reg, mask, val) \
+ snd_hdac_adsp_writeb(adev, reg, \
+ (snd_hdac_adsp_readb(adev, reg) & ~(mask)) | (val))
+#define snd_hdac_adsp_updatew(adev, reg, mask, val) \
+ snd_hdac_adsp_writew(adev, reg, \
+ (snd_hdac_adsp_readw(adev, reg) & ~(mask)) | (val))
+#define snd_hdac_adsp_updatel(adev, reg, mask, val) \
+ snd_hdac_adsp_writel(adev, reg, \
+ (snd_hdac_adsp_readl(adev, reg) & ~(mask)) | (val))
+#define snd_hdac_adsp_updateq(adev, reg, mask, val) \
+ snd_hdac_adsp_writeq(adev, reg, \
+ (snd_hdac_adsp_readq(adev, reg) & ~(mask)) | (val))
+
+#define snd_hdac_adsp_readb_poll(adev, reg, val, cond, delay_us, timeout_us) \
+ readb_poll_timeout((adev)->dsp_ba + (reg), val, cond, \
+ delay_us, timeout_us)
+#define snd_hdac_adsp_readw_poll(adev, reg, val, cond, delay_us, timeout_us) \
+ readw_poll_timeout((adev)->dsp_ba + (reg), val, cond, \
+ delay_us, timeout_us)
+#define snd_hdac_adsp_readl_poll(adev, reg, val, cond, delay_us, timeout_us) \
+ readl_poll_timeout((adev)->dsp_ba + (reg), val, cond, \
+ delay_us, timeout_us)
+#define snd_hdac_adsp_readq_poll(adev, reg, val, cond, delay_us, timeout_us) \
+ readq_poll_timeout((adev)->dsp_ba + (reg), val, cond, \
+ delay_us, timeout_us)
+
#endif /* __SOUND_SOC_INTEL_AVS_REGS_H */
diff --git a/sound/soc/intel/avs/skl.c b/sound/soc/intel/avs/skl.c
index 34f859d6e5a4..d66ef000de9e 100644
--- a/sound/soc/intel/avs/skl.c
+++ b/sound/soc/intel/avs/skl.c
@@ -12,6 +12,7 @@
#include "avs.h"
#include "cldma.h"
#include "messages.h"
+#include "registers.h"
void avs_skl_ipc_interrupt(struct avs_dev *adev)
{
diff --git a/sound/soc/intel/avs/topology.c b/sound/soc/intel/avs/topology.c
index 5cda527020c7..d612f20ed989 100644
--- a/sound/soc/intel/avs/topology.c
+++ b/sound/soc/intel/avs/topology.c
@@ -1466,7 +1466,7 @@ avs_tplg_path_template_create(struct snd_soc_component *comp, struct avs_tplg *o
static const struct avs_tplg_token_parser mod_init_config_parsers[] = {
{
- .token = AVS_TKN_MOD_INIT_CONFIG_ID_U32,
+ .token = AVS_TKN_INIT_CONFIG_ID_U32,
.type = SND_SOC_TPLG_TUPLE_TYPE_WORD,
.offset = offsetof(struct avs_tplg_init_config, id),
.parse = avs_parse_word_token,
@@ -1519,7 +1519,7 @@ static int avs_tplg_parse_initial_configs(struct snd_soc_component *comp,
esize = le32_to_cpu(tuples->size) + le32_to_cpu(tmp->size);
ret = parse_dictionary_entries(comp, tuples, esize, config, 1, sizeof(*config),
- AVS_TKN_MOD_INIT_CONFIG_ID_U32,
+ AVS_TKN_INIT_CONFIG_ID_U32,
mod_init_config_parsers,
ARRAY_SIZE(mod_init_config_parsers));
diff --git a/sound/soc/intel/avs/trace.h b/sound/soc/intel/avs/trace.h
index c9eaa5a60ed3..f4288d0ad5ef 100644
--- a/sound/soc/intel/avs/trace.h
+++ b/sound/soc/intel/avs/trace.h
@@ -37,60 +37,62 @@ TRACE_EVENT(avs_dsp_core_op,
void trace_avs_msg_payload(const void *data, size_t size);
-#define trace_avs_request(msg, fwregs) \
+#define trace_avs_request(msg, sts, lec) \
({ \
- trace_avs_ipc_request_msg((msg)->header, fwregs); \
+ trace_avs_ipc_request_msg((msg)->header, sts, lec); \
trace_avs_msg_payload((msg)->data, (msg)->size); \
})
-#define trace_avs_reply(msg, fwregs) \
+#define trace_avs_reply(msg, sts, lec) \
({ \
- trace_avs_ipc_reply_msg((msg)->header, fwregs); \
+ trace_avs_ipc_reply_msg((msg)->header, sts, lec); \
trace_avs_msg_payload((msg)->data, (msg)->size); \
})
-#define trace_avs_notify(msg, fwregs) \
+#define trace_avs_notify(msg, sts, lec) \
({ \
- trace_avs_ipc_notify_msg((msg)->header, fwregs); \
+ trace_avs_ipc_notify_msg((msg)->header, sts, lec); \
trace_avs_msg_payload((msg)->data, (msg)->size); \
})
#endif
DECLARE_EVENT_CLASS(avs_ipc_msg_hdr,
- TP_PROTO(u64 header, u64 fwregs),
+ TP_PROTO(u64 header, u32 sts, u32 lec),
- TP_ARGS(header, fwregs),
+ TP_ARGS(header, sts, lec),
TP_STRUCT__entry(
__field(u64, header)
- __field(u64, fwregs)
+ __field(u32, sts)
+ __field(u32, lec)
),
TP_fast_assign(
__entry->header = header;
- __entry->fwregs = fwregs;
+ __entry->sts = sts;
+ __entry->lec = lec;
),
TP_printk("primary: 0x%08X, extension: 0x%08X,\n"
- "fwstatus: 0x%08X, fwerror: 0x%08X",
+ "status: 0x%08X, error: 0x%08X",
lower_32_bits(__entry->header), upper_32_bits(__entry->header),
- lower_32_bits(__entry->fwregs), upper_32_bits(__entry->fwregs))
+ __entry->sts, __entry->lec)
);
DEFINE_EVENT(avs_ipc_msg_hdr, avs_ipc_request_msg,
- TP_PROTO(u64 header, u64 fwregs),
- TP_ARGS(header, fwregs)
+ TP_PROTO(u64 header, u32 sts, u32 lec),
+ TP_ARGS(header, sts, lec)
);
DEFINE_EVENT(avs_ipc_msg_hdr, avs_ipc_reply_msg,
- TP_PROTO(u64 header, u64 fwregs),
- TP_ARGS(header, fwregs)
+ TP_PROTO(u64 header, u32 sts, u32 lec),
+ TP_ARGS(header, sts, lec)
);
DEFINE_EVENT(avs_ipc_msg_hdr, avs_ipc_notify_msg,
- TP_PROTO(u64 header, u64 fwregs),
- TP_ARGS(header, fwregs)
+ TP_PROTO(u64 header, u32 sts, u32 lec),
+ TP_ARGS(header, sts, lec)
);
TRACE_EVENT_CONDITION(avs_ipc_msg_payload,
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 9caa4407c1ca..6446cda0f857 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -1132,7 +1132,22 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_SSP0_AIF2 |
BYT_RT5640_MCLK_EN),
},
- { /* Vexia Edu Atla 10 tablet */
+ {
+ /* Vexia Edu Atla 10 tablet 5V version */
+ .matches = {
+ /* Having all 3 of these not set is somewhat unique */
+ DMI_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
+ DMI_MATCH(DMI_BOARD_NAME, "To be filled by O.E.M."),
+ /* Above strings are too generic, also match on BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "05/14/2015"),
+ },
+ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
+ BYT_RT5640_JD_NOT_INV |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
+ { /* Vexia Edu Atla 10 tablet 9V version */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c
index 22668bac74a1..0554c7e2cb34 100644
--- a/sound/soc/intel/boards/skl_hda_dsp_generic.c
+++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c
@@ -124,8 +124,6 @@ static int skl_hda_audio_probe(struct platform_device *pdev)
return ret;
card->dev = &pdev->dev;
- if (!snd_soc_acpi_sof_parent(&pdev->dev))
- card->disable_route_checks = true;
if (mach->mach_params.dmic_num > 0) {
card->components = devm_kasprintf(card->dev, GFP_KERNEL,
diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
index c9f9c9b0de9b..b0d35fda7b17 100644
--- a/sound/soc/intel/boards/sof_sdw.c
+++ b/sound/soc/intel/boards/sof_sdw.c
@@ -22,6 +22,8 @@ static int quirk_override = -1;
module_param_named(quirk, quirk_override, int, 0444);
MODULE_PARM_DESC(quirk, "Board-specific quirk override");
+#define DMIC_DEFAULT_CHANNELS 2
+
static void log_quirks(struct device *dev)
{
if (SOC_SDW_JACK_JDSRC(sof_sdw_quirk))
@@ -42,6 +44,8 @@ static void log_quirks(struct device *dev)
dev_dbg(dev, "quirk SOC_SDW_CODEC_SPKR enabled\n");
if (sof_sdw_quirk & SOC_SDW_SIDECAR_AMPS)
dev_dbg(dev, "quirk SOC_SDW_SIDECAR_AMPS enabled\n");
+ if (sof_sdw_quirk & SOC_SDW_CODEC_MIC)
+ dev_dbg(dev, "quirk SOC_SDW_CODEC_MIC enabled\n");
}
static int sof_sdw_quirk_cb(const struct dmi_system_id *id)
@@ -608,25 +612,41 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
.callback = sof_sdw_quirk_cb,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "3838")
+ DMI_MATCH(DMI_PRODUCT_NAME, "83JX")
},
- .driver_data = (void *)(SOC_SDW_SIDECAR_AMPS),
+ .driver_data = (void *)(SOC_SDW_SIDECAR_AMPS | SOC_SDW_CODEC_MIC),
},
{
.callback = sof_sdw_quirk_cb,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "3832")
+ DMI_MATCH(DMI_PRODUCT_NAME, "83LC")
},
- .driver_data = (void *)(SOC_SDW_SIDECAR_AMPS),
+ .driver_data = (void *)(SOC_SDW_SIDECAR_AMPS | SOC_SDW_CODEC_MIC),
},
{
.callback = sof_sdw_quirk_cb,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "380E")
+ DMI_MATCH(DMI_PRODUCT_NAME, "83MC")
},
- .driver_data = (void *)(SOC_SDW_SIDECAR_AMPS),
+ .driver_data = (void *)(SOC_SDW_SIDECAR_AMPS | SOC_SDW_CODEC_MIC),
+ }, {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83NM")
+ },
+ .driver_data = (void *)(SOC_SDW_SIDECAR_AMPS | SOC_SDW_CODEC_MIC),
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83HM")
+ },
+ .driver_data = (void *)(SOC_SDW_SIDECAR_AMPS |
+ SOC_SDW_CODEC_MIC),
},
{
.callback = sof_sdw_quirk_cb,
@@ -1127,22 +1147,24 @@ static int sof_card_dai_links_create(struct snd_soc_card *card)
hdmi_num = SOF_PRE_TGL_HDMI_COUNT;
/* enable dmic01 & dmic16k */
- if (sof_sdw_quirk & SOC_SDW_PCH_DMIC || mach_params->dmic_num) {
- if (ctx->ignore_internal_dmic)
- dev_warn(dev, "Ignoring PCH DMIC\n");
- else
- dmic_num = 2;
+ if (ctx->ignore_internal_dmic) {
+ dev_dbg(dev, "SoundWire DMIC is used, ignoring internal DMIC\n");
+ mach_params->dmic_num = 0;
+ } else if (mach_params->dmic_num) {
+ dmic_num = 2;
+ } else if (sof_sdw_quirk & SOC_SDW_PCH_DMIC) {
+ dmic_num = 2;
+ /*
+ * mach_params->dmic_num will be used to set the cfg-mics value of
+ * card->components string. Set it to the default value.
+ */
+ mach_params->dmic_num = DMIC_DEFAULT_CHANNELS;
}
- /*
- * mach_params->dmic_num will be used to set the cfg-mics value of card->components
- * string. Overwrite it to the actual number of PCH DMICs used in the device.
- */
- mach_params->dmic_num = dmic_num;
if (sof_sdw_quirk & SOF_SSP_BT_OFFLOAD_PRESENT)
bt_num = 1;
- dev_dbg(dev, "sdw %d, ssp %d, dmic %d, hdmi %d, bt: %d\n",
+ dev_dbg(dev, "DAI link numbers: sdw %d, ssp %d, dmic %d, hdmi %d, bt: %d\n",
sdw_be_num, ssp_num, dmic_num,
intel_ctx->hdmi.idisp_codec ? hdmi_num : 0, bt_num);
diff --git a/sound/soc/intel/common/soc-acpi-intel-arl-match.c b/sound/soc/intel/common/soc-acpi-intel-arl-match.c
index 24d850df77ca..32147dc9d2d6 100644
--- a/sound/soc/intel/common/soc-acpi-intel-arl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-arl-match.c
@@ -138,7 +138,7 @@ static const struct snd_soc_acpi_adr_device cs35l56_2_r1_adr[] = {
},
};
-static const struct snd_soc_acpi_adr_device cs35l56_3_l1_adr[] = {
+static const struct snd_soc_acpi_adr_device cs35l56_3_l3_adr[] = {
{
.adr = 0x00033301fa355601ull,
.num_endpoints = 1,
@@ -147,6 +147,24 @@ static const struct snd_soc_acpi_adr_device cs35l56_3_l1_adr[] = {
},
};
+static const struct snd_soc_acpi_adr_device cs35l56_2_r3_adr[] = {
+ {
+ .adr = 0x00023301fa355601ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_r_endpoint,
+ .name_prefix = "AMP2"
+ },
+};
+
+static const struct snd_soc_acpi_adr_device cs35l56_3_l1_adr[] = {
+ {
+ .adr = 0x00033101fa355601ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_l_endpoint,
+ .name_prefix = "AMP1"
+ },
+};
+
static const struct snd_soc_acpi_endpoint cs42l43_endpoints[] = {
{ /* Jack Playback Endpoint */
.num = 0,
@@ -306,6 +324,25 @@ static const struct snd_soc_acpi_link_adr arl_cs42l43_l0_cs35l56_2_l23[] = {
},
{
.mask = BIT(3),
+ .num_adr = ARRAY_SIZE(cs35l56_3_l3_adr),
+ .adr_d = cs35l56_3_l3_adr,
+ },
+ {}
+};
+
+static const struct snd_soc_acpi_link_adr arl_cs42l43_l0_cs35l56_3_l23[] = {
+ {
+ .mask = BIT(0),
+ .num_adr = ARRAY_SIZE(cs42l43_0_adr),
+ .adr_d = cs42l43_0_adr,
+ },
+ {
+ .mask = BIT(2),
+ .num_adr = ARRAY_SIZE(cs35l56_2_r3_adr),
+ .adr_d = cs35l56_2_r3_adr,
+ },
+ {
+ .mask = BIT(3),
.num_adr = ARRAY_SIZE(cs35l56_3_l1_adr),
.adr_d = cs35l56_3_l1_adr,
},
@@ -407,6 +444,12 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_sdw_machines[] = {
.sof_tplg_filename = "sof-arl-cs42l43-l0-cs35l56-l23.tplg",
},
{
+ .link_mask = BIT(0) | BIT(2) | BIT(3),
+ .links = arl_cs42l43_l0_cs35l56_3_l23,
+ .drv_name = "sof_sdw",
+ .sof_tplg_filename = "sof-arl-cs42l43-l0-cs35l56-l23.tplg",
+ },
+ {
.link_mask = BIT(0) | BIT(2),
.links = arl_cs42l43_l0_cs35l56_l2,
.drv_name = "sof_sdw",
diff --git a/sound/soc/intel/common/soc-acpi-intel-lnl-match.c b/sound/soc/intel/common/soc-acpi-intel-lnl-match.c
index 98a9c36d7a4c..0b4a9c27c47e 100644
--- a/sound/soc/intel/common/soc-acpi-intel-lnl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-lnl-match.c
@@ -91,6 +91,23 @@ static const struct snd_soc_acpi_endpoint rt722_endpoints[] = {
},
};
+static const struct snd_soc_acpi_endpoint jack_dmic_endpoints[] = {
+ /* Jack Endpoint */
+ {
+ .num = 0,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
+ },
+ /* DMIC Endpoint */
+ {
+ .num = 1,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
+ },
+};
+
static const struct snd_soc_acpi_endpoint jack_amp_g1_dmic_endpoints_endpoints[] = {
/* Jack Endpoint */
{
@@ -295,6 +312,24 @@ static const struct snd_soc_acpi_adr_device rt1320_1_group1_adr[] = {
}
};
+static const struct snd_soc_acpi_adr_device rt1320_1_group2_adr[] = {
+ {
+ .adr = 0x000130025D132001ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_l_endpoint,
+ .name_prefix = "rt1320-1"
+ }
+};
+
+static const struct snd_soc_acpi_adr_device rt1320_3_group2_adr[] = {
+ {
+ .adr = 0x000330025D132001ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_r_endpoint,
+ .name_prefix = "rt1320-2"
+ }
+};
+
static const struct snd_soc_acpi_adr_device rt713_0_adr[] = {
{
.adr = 0x000031025D071301ull,
@@ -304,6 +339,15 @@ static const struct snd_soc_acpi_adr_device rt713_0_adr[] = {
}
};
+static const struct snd_soc_acpi_adr_device rt713_vb_2_adr[] = {
+ {
+ .adr = 0x000230025d071301ull,
+ .num_endpoints = ARRAY_SIZE(jack_dmic_endpoints),
+ .endpoints = jack_dmic_endpoints,
+ .name_prefix = "rt713"
+ }
+};
+
static const struct snd_soc_acpi_adr_device rt714_0_adr[] = {
{
.adr = 0x000030025D071401ull,
@@ -453,6 +497,25 @@ static const struct snd_soc_acpi_link_adr lnl_sdw_rt713_l0_rt1318_l1[] = {
{}
};
+static const struct snd_soc_acpi_link_adr lnl_sdw_rt713_vb_l2_rt1320_l13[] = {
+ {
+ .mask = BIT(2),
+ .num_adr = ARRAY_SIZE(rt713_vb_2_adr),
+ .adr_d = rt713_vb_2_adr,
+ },
+ {
+ .mask = BIT(1),
+ .num_adr = ARRAY_SIZE(rt1320_1_group2_adr),
+ .adr_d = rt1320_1_group2_adr,
+ },
+ {
+ .mask = BIT(3),
+ .num_adr = ARRAY_SIZE(rt1320_3_group2_adr),
+ .adr_d = rt1320_3_group2_adr,
+ },
+ {}
+};
+
static const struct snd_soc_acpi_link_adr lnl_sdw_rt712_vb_l2_rt1320_l1[] = {
{
.mask = BIT(2),
@@ -550,6 +613,13 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_lnl_sdw_machines[] = {
.machine_check = snd_soc_acpi_intel_sdca_is_device_rt712_vb,
.sof_tplg_filename = "sof-lnl-rt712-l2-rt1320-l1.tplg"
},
+ {
+ .link_mask = BIT(1) | BIT(2) | BIT(3),
+ .links = lnl_sdw_rt713_vb_l2_rt1320_l13,
+ .drv_name = "sof_sdw",
+ .machine_check = snd_soc_acpi_intel_sdca_is_device_rt712_vb,
+ .sof_tplg_filename = "sof-lnl-rt713-l2-rt1320-l13.tplg"
+ },
{},
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_lnl_sdw_machines);
diff --git a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
index 03fc5a187012..770e2194a283 100644
--- a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
@@ -441,6 +441,179 @@ static const struct snd_soc_acpi_adr_device cs42l43_0_adr[] = {
}
};
+/* CS42L43 - speaker DAI aggregated with 4 amps */
+static const struct snd_soc_acpi_endpoint cs42l43_4amp_spkagg_endpoints[] = {
+ { /* Jack Playback Endpoint */
+ .num = 0,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
+ },
+ { /* DMIC Capture Endpoint */
+ .num = 1,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
+ },
+ { /* Jack Capture Endpoint */
+ .num = 2,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
+ },
+ { /* Speaker Playback Endpoint */
+ .num = 3,
+ .aggregated = 1,
+ .group_position = 4,
+ .group_id = 1,
+ },
+};
+
+/* CS42L43 on link3 aggregated with 4 amps */
+static const struct snd_soc_acpi_adr_device cs42l43_l3_4amp_spkagg_adr[] = {
+ {
+ .adr = 0x00033001FA424301ull,
+ .num_endpoints = ARRAY_SIZE(cs42l43_4amp_spkagg_endpoints),
+ .endpoints = cs42l43_4amp_spkagg_endpoints,
+ .name_prefix = "cs42l43"
+ }
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_l_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 0,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 0,
+ .group_id = 2,
+ },
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_r_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 1,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 1,
+ .group_id = 2,
+ },
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_2_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 2,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 2,
+ .group_id = 2,
+ },
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_3_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 3,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 3,
+ .group_id = 2,
+ },
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_4_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 4,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 4,
+ .group_id = 2,
+ },
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_5_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 5,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 5,
+ .group_id = 2,
+ },
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_6_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 6,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 6,
+ .group_id = 2,
+ },
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_7_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 7,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 7,
+ .group_id = 2,
+ },
+};
+
+static const struct snd_soc_acpi_adr_device cs35l56_0_adr[] = {
+ {
+ .adr = 0x00003301FA355601ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_l_endpoint,
+ .name_prefix = "AMP1"
+ },
+ {
+ .adr = 0x00003201FA355601ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_2_endpoint,
+ .name_prefix = "AMP2"
+ }
+};
+
static const struct snd_soc_acpi_adr_device cs35l56_1_adr[] = {
{
.adr = 0x00013701FA355601ull,
@@ -471,17 +644,71 @@ static const struct snd_soc_acpi_adr_device cs35l56_2_adr[] = {
}
};
+static const struct snd_soc_acpi_adr_device cs35l56_0_fb_adr[] = {
+ {
+ .adr = 0x00003301FA355601ull,
+ .num_endpoints = ARRAY_SIZE(cs35l56_l_fb_endpoints),
+ .endpoints = cs35l56_l_fb_endpoints,
+ .name_prefix = "AMP1"
+ },
+ {
+ .adr = 0x00003201FA355601ull,
+ .num_endpoints = ARRAY_SIZE(cs35l56_2_fb_endpoints),
+ .endpoints = cs35l56_2_fb_endpoints,
+ .name_prefix = "AMP2"
+ },
+ {
+ .adr = 0x00003101FA355601ull,
+ .num_endpoints = ARRAY_SIZE(cs35l56_4_fb_endpoints),
+ .endpoints = cs35l56_4_fb_endpoints,
+ .name_prefix = "AMP3"
+ },
+ {
+ .adr = 0x00003001FA355601ull,
+ .num_endpoints = ARRAY_SIZE(cs35l56_6_fb_endpoints),
+ .endpoints = cs35l56_6_fb_endpoints,
+ .name_prefix = "AMP4"
+ },
+};
+
+static const struct snd_soc_acpi_adr_device cs35l56_1_fb_adr[] = {
+ {
+ .adr = 0x00013701FA355601ull,
+ .num_endpoints = ARRAY_SIZE(cs35l56_r_fb_endpoints),
+ .endpoints = cs35l56_r_fb_endpoints,
+ .name_prefix = "AMP8"
+ },
+ {
+ .adr = 0x00013601FA355601ull,
+ .num_endpoints = ARRAY_SIZE(cs35l56_3_fb_endpoints),
+ .endpoints = cs35l56_3_fb_endpoints,
+ .name_prefix = "AMP7"
+ },
+ {
+ .adr = 0x00013501FA355601ull,
+ .num_endpoints = ARRAY_SIZE(cs35l56_5_fb_endpoints),
+ .endpoints = cs35l56_5_fb_endpoints,
+ .name_prefix = "AMP6"
+ },
+ {
+ .adr = 0x00013401FA355601ull,
+ .num_endpoints = ARRAY_SIZE(cs35l56_7_fb_endpoints),
+ .endpoints = cs35l56_7_fb_endpoints,
+ .name_prefix = "AMP5"
+ },
+};
+
static const struct snd_soc_acpi_adr_device cs35l56_2_r_adr[] = {
{
.adr = 0x00023201FA355601ull,
- .num_endpoints = 1,
- .endpoints = &spk_r_endpoint,
+ .num_endpoints = ARRAY_SIZE(cs35l56_r_fb_endpoints),
+ .endpoints = cs35l56_r_fb_endpoints,
.name_prefix = "AMP3"
},
{
.adr = 0x00023301FA355601ull,
- .num_endpoints = 1,
- .endpoints = &spk_3_endpoint,
+ .num_endpoints = ARRAY_SIZE(cs35l56_3_fb_endpoints),
+ .endpoints = cs35l56_3_fb_endpoints,
.name_prefix = "AMP4"
}
@@ -490,14 +717,14 @@ static const struct snd_soc_acpi_adr_device cs35l56_2_r_adr[] = {
static const struct snd_soc_acpi_adr_device cs35l56_3_l_adr[] = {
{
.adr = 0x00033001fa355601ull,
- .num_endpoints = 1,
- .endpoints = &spk_l_endpoint,
+ .num_endpoints = ARRAY_SIZE(cs35l56_l_fb_endpoints),
+ .endpoints = cs35l56_l_fb_endpoints,
.name_prefix = "AMP1"
},
{
.adr = 0x00033101fa355601ull,
- .num_endpoints = 1,
- .endpoints = &spk_2_endpoint,
+ .num_endpoints = ARRAY_SIZE(cs35l56_2_fb_endpoints),
+ .endpoints = cs35l56_2_fb_endpoints,
.name_prefix = "AMP2"
}
};
@@ -765,6 +992,40 @@ static const struct snd_soc_acpi_link_adr cs42l43_link0_cs35l56_link2_link3[] =
{}
};
+static const struct snd_soc_acpi_link_adr cs42l43_link3_cs35l56_x4_link0_link1_spkagg[] = {
+ /* Expected order: jack -> amp */
+ {
+ .mask = BIT(3),
+ .num_adr = ARRAY_SIZE(cs42l43_l3_4amp_spkagg_adr),
+ .adr_d = cs42l43_l3_4amp_spkagg_adr,
+ },
+ {
+ .mask = BIT(1),
+ .num_adr = 2,
+ .adr_d = cs35l56_1_adr,
+ },
+ {
+ .mask = BIT(0),
+ .num_adr = 2,
+ .adr_d = cs35l56_0_adr,
+ },
+ {}
+};
+
+static const struct snd_soc_acpi_link_adr mtl_cs35l56_x8_link0_link1_fb[] = {
+ {
+ .mask = BIT(1),
+ .num_adr = ARRAY_SIZE(cs35l56_1_fb_adr),
+ .adr_d = cs35l56_1_fb_adr,
+ },
+ {
+ .mask = BIT(0),
+ .num_adr = ARRAY_SIZE(cs35l56_0_fb_adr),
+ .adr_d = cs35l56_0_fb_adr,
+ },
+ {}
+};
+
/* this table is used when there is no I2S codec present */
struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_sdw_machines[] = {
/* mockup tests need to be first */
@@ -842,12 +1103,24 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_sdw_machines[] = {
.sof_tplg_filename = "sof-mtl-cs42l43-l0-cs35l56-l23.tplg",
},
{
+ .link_mask = BIT(0) | BIT(1) | BIT(3),
+ .links = cs42l43_link3_cs35l56_x4_link0_link1_spkagg,
+ .drv_name = "sof_sdw",
+ .sof_tplg_filename = "sof-mtl-cs42l43-l3-cs35l56-l01-spkagg.tplg",
+ },
+ {
.link_mask = GENMASK(2, 0),
.links = mtl_cs42l43_cs35l56,
.drv_name = "sof_sdw",
.sof_tplg_filename = "sof-mtl-cs42l43-l0-cs35l56-l12.tplg",
},
{
+ .link_mask = BIT(0) | BIT(1),
+ .links = mtl_cs35l56_x8_link0_link1_fb,
+ .drv_name = "sof_sdw",
+ .sof_tplg_filename = "sof-mtl-cs35l56-l01-fb8.tplg"
+ },
+ {
.link_mask = BIT(0),
.links = mtl_cs42l43_l0,
.drv_name = "sof_sdw",
diff --git a/sound/soc/intel/common/soc-acpi-intel-ptl-match.c b/sound/soc/intel/common/soc-acpi-intel-ptl-match.c
index f1c0d7a02cda..9eb4a43e3e7a 100644
--- a/sound/soc/intel/common/soc-acpi-intel-ptl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-ptl-match.c
@@ -8,6 +8,7 @@
#include <sound/soc-acpi.h>
#include <sound/soc-acpi-intel-match.h>
+#include "soc-acpi-intel-sdca-quirks.h"
#include "soc-acpi-intel-sdw-mockup-match.h"
#include <sound/soc-acpi-intel-ssp-common.h>
@@ -35,6 +36,20 @@ static const struct snd_soc_acpi_endpoint single_endpoint = {
.group_id = 0,
};
+static const struct snd_soc_acpi_endpoint spk_l_endpoint = {
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 0,
+ .group_id = 1,
+};
+
+static const struct snd_soc_acpi_endpoint spk_r_endpoint = {
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 1,
+ .group_id = 1,
+};
+
/*
* Multi-function codecs with three endpoints created for
* headset, amp and dmic functions.
@@ -60,6 +75,47 @@ static const struct snd_soc_acpi_endpoint rt_mf_endpoints[] = {
},
};
+static const struct snd_soc_acpi_endpoint jack_dmic_endpoints[] = {
+ /* Jack Endpoint */
+ {
+ .num = 0,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
+ },
+ /* DMIC Endpoint */
+ {
+ .num = 1,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
+ },
+};
+
+static const struct snd_soc_acpi_endpoint jack_amp_g1_dmic_endpoints_endpoints[] = {
+ /* Jack Endpoint */
+ {
+ .num = 0,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
+ },
+ /* Amp Endpoint, work as spk_l_endpoint */
+ {
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 0,
+ .group_id = 1,
+ },
+ /* DMIC Endpoint */
+ {
+ .num = 2,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
+ },
+};
+
static const struct snd_soc_acpi_adr_device rt711_sdca_0_adr[] = {
{
.adr = 0x000030025D071101ull,
@@ -69,6 +125,24 @@ static const struct snd_soc_acpi_adr_device rt711_sdca_0_adr[] = {
}
};
+static const struct snd_soc_acpi_adr_device rt712_vb_2_group1_adr[] = {
+ {
+ .adr = 0x000230025D071201ull,
+ .num_endpoints = ARRAY_SIZE(jack_amp_g1_dmic_endpoints_endpoints),
+ .endpoints = jack_amp_g1_dmic_endpoints_endpoints,
+ .name_prefix = "rt712"
+ }
+};
+
+static const struct snd_soc_acpi_adr_device rt713_vb_2_adr[] = {
+ {
+ .adr = 0x000230025d071301ull,
+ .num_endpoints = ARRAY_SIZE(jack_dmic_endpoints),
+ .endpoints = jack_dmic_endpoints,
+ .name_prefix = "rt713"
+ }
+};
+
static const struct snd_soc_acpi_adr_device rt721_3_single_adr[] = {
{
.adr = 0x000330025d072101ull,
@@ -114,6 +188,33 @@ static const struct snd_soc_acpi_adr_device rt722_3_single_adr[] = {
}
};
+static const struct snd_soc_acpi_adr_device rt1320_1_group1_adr[] = {
+ {
+ .adr = 0x000130025D132001ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_r_endpoint,
+ .name_prefix = "rt1320-1"
+ }
+};
+
+static const struct snd_soc_acpi_adr_device rt1320_1_group2_adr[] = {
+ {
+ .adr = 0x000130025D132001ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_l_endpoint,
+ .name_prefix = "rt1320-1"
+ }
+};
+
+static const struct snd_soc_acpi_adr_device rt1320_3_group2_adr[] = {
+ {
+ .adr = 0x000330025D132001ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_r_endpoint,
+ .name_prefix = "rt1320-2"
+ }
+};
+
static const struct snd_soc_acpi_link_adr ptl_rt722_only[] = {
{
.mask = BIT(0),
@@ -150,6 +251,39 @@ static const struct snd_soc_acpi_link_adr ptl_rvp[] = {
{}
};
+static const struct snd_soc_acpi_link_adr lnl_sdw_rt713_vb_l2_rt1320_l13[] = {
+ {
+ .mask = BIT(2),
+ .num_adr = ARRAY_SIZE(rt713_vb_2_adr),
+ .adr_d = rt713_vb_2_adr,
+ },
+ {
+ .mask = BIT(1),
+ .num_adr = ARRAY_SIZE(rt1320_1_group2_adr),
+ .adr_d = rt1320_1_group2_adr,
+ },
+ {
+ .mask = BIT(3),
+ .num_adr = ARRAY_SIZE(rt1320_3_group2_adr),
+ .adr_d = rt1320_3_group2_adr,
+ },
+ {}
+};
+
+static const struct snd_soc_acpi_link_adr lnl_sdw_rt712_vb_l2_rt1320_l1[] = {
+ {
+ .mask = BIT(2),
+ .num_adr = ARRAY_SIZE(rt712_vb_2_group1_adr),
+ .adr_d = rt712_vb_2_group1_adr,
+ },
+ {
+ .mask = BIT(1),
+ .num_adr = ARRAY_SIZE(rt1320_1_group1_adr),
+ .adr_d = rt1320_1_group1_adr,
+ },
+ {}
+};
+
/* this table is used when there is no I2S codec present */
struct snd_soc_acpi_mach snd_soc_acpi_intel_ptl_sdw_machines[] = {
/* mockup tests need to be first */
@@ -201,6 +335,20 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_ptl_sdw_machines[] = {
.drv_name = "sof_sdw",
.sof_tplg_filename = "sof-ptl-rt722.tplg",
},
+ {
+ .link_mask = BIT(1) | BIT(2),
+ .links = lnl_sdw_rt712_vb_l2_rt1320_l1,
+ .drv_name = "sof_sdw",
+ .machine_check = snd_soc_acpi_intel_sdca_is_device_rt712_vb,
+ .sof_tplg_filename = "sof-lnl-rt712-l2-rt1320-l1.tplg"
+ },
+ {
+ .link_mask = BIT(1) | BIT(2) | BIT(3),
+ .links = lnl_sdw_rt713_vb_l2_rt1320_l13,
+ .drv_name = "sof_sdw",
+ .machine_check = snd_soc_acpi_intel_sdca_is_device_rt712_vb,
+ .sof_tplg_filename = "sof-lnl-rt713-l2-rt1320-l13.tplg"
+ },
{},
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_ptl_sdw_machines);
diff --git a/sound/soc/intel/common/soc-acpi-intel-tgl-match.c b/sound/soc/intel/common/soc-acpi-intel-tgl-match.c
index 161ba532d270..6f8c06413665 100644
--- a/sound/soc/intel/common/soc-acpi-intel-tgl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-tgl-match.c
@@ -536,6 +536,194 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_machines[] = {
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_tgl_machines);
+static const struct snd_soc_acpi_endpoint cs35l56_l_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 0,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 0,
+ .group_id = 2,
+ },
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_r_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 1,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 1,
+ .group_id = 2,
+ },
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_2_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 2,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 2,
+ .group_id = 2,
+ },
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_3_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 3,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 3,
+ .group_id = 2,
+ },
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_4_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 4,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 4,
+ .group_id = 2,
+ }
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_5_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 5,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 5,
+ .group_id = 2,
+ }
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_6_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 6,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 6,
+ .group_id = 2,
+ }
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_7_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 7,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 7,
+ .group_id = 2,
+ }
+};
+
+static const struct snd_soc_acpi_adr_device cs35l56_sdw_eight_1_4_fb_adr[] = {
+ {
+ .adr = 0x00003301fa355601,
+ .num_endpoints = ARRAY_SIZE(cs35l56_l_fb_endpoints),
+ .endpoints = cs35l56_l_fb_endpoints,
+ .name_prefix = "AMP1"
+ },
+ {
+ .adr = 0x00003201fa355601,
+ .num_endpoints = ARRAY_SIZE(cs35l56_2_fb_endpoints),
+ .endpoints = cs35l56_2_fb_endpoints,
+ .name_prefix = "AMP2"
+ },
+ {
+ .adr = 0x00003101fa355601,
+ .num_endpoints = ARRAY_SIZE(cs35l56_4_fb_endpoints),
+ .endpoints = cs35l56_4_fb_endpoints,
+ .name_prefix = "AMP3"
+ },
+ {
+ .adr = 0x00003001fa355601,
+ .num_endpoints = ARRAY_SIZE(cs35l56_6_fb_endpoints),
+ .endpoints = cs35l56_6_fb_endpoints,
+ .name_prefix = "AMP4"
+ },
+};
+
+static const struct snd_soc_acpi_adr_device cs35l56_sdw_eight_5_8_fb_adr[] = {
+ {
+ .adr = 0x00013701fa355601,
+ .num_endpoints = ARRAY_SIZE(cs35l56_r_fb_endpoints),
+ .endpoints = cs35l56_r_fb_endpoints,
+ .name_prefix = "AMP8"
+ },
+ {
+ .adr = 0x00013601fa355601,
+ .num_endpoints = ARRAY_SIZE(cs35l56_3_fb_endpoints),
+ .endpoints = cs35l56_3_fb_endpoints,
+ .name_prefix = "AMP7"
+ },
+ {
+ .adr = 0x00013501fa355601,
+ .num_endpoints = ARRAY_SIZE(cs35l56_5_fb_endpoints),
+ .endpoints = cs35l56_5_fb_endpoints,
+ .name_prefix = "AMP6"
+ },
+ {
+ .adr = 0x00013401fa355601,
+ .num_endpoints = ARRAY_SIZE(cs35l56_7_fb_endpoints),
+ .endpoints = cs35l56_7_fb_endpoints,
+ .name_prefix = "AMP5"
+ },
+};
+
+static const struct snd_soc_acpi_link_adr up_extreme_cs35l56_sdw_eight[] = {
+ {
+ .mask = BIT(1),
+ .num_adr = ARRAY_SIZE(cs35l56_sdw_eight_5_8_fb_adr),
+ .adr_d = cs35l56_sdw_eight_5_8_fb_adr,
+ },
+ {
+ .mask = BIT(0),
+ .num_adr = ARRAY_SIZE(cs35l56_sdw_eight_1_4_fb_adr),
+ .adr_d = cs35l56_sdw_eight_1_4_fb_adr,
+ },
+ {}
+};
+
/* this table is used when there is no I2S codec present */
struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_sdw_machines[] = {
/* mockup tests need to be first */
@@ -635,6 +823,12 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_sdw_machines[] = {
.drv_name = "sof_sdw",
.sof_tplg_filename = "sof-tgl-rt711.tplg",
},
+ {
+ .link_mask = BIT(0) | BIT(1),
+ .links = up_extreme_cs35l56_sdw_eight,
+ .drv_name = "sof_sdw",
+ .sof_tplg_filename = "sof-tgl-cs35l56-l01-fb8.tplg"
+ },
{},
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_tgl_sdw_machines);
diff --git a/sound/soc/intel/keembay/kmb_platform.c b/sound/soc/intel/keembay/kmb_platform.c
index aa5de167e790..4ed71d11ad77 100644
--- a/sound/soc/intel/keembay/kmb_platform.c
+++ b/sound/soc/intel/keembay/kmb_platform.c
@@ -869,7 +869,7 @@ static int kmb_plat_dai_probe(struct platform_device *pdev)
kmb_i2s->fifo_th = (1 << COMP1_FIFO_DEPTH(comp1_reg)) / 2;
- kmb_i2s->use_pio = !(of_property_read_bool(np, "dmas"));
+ kmb_i2s->use_pio = !of_property_present(np, "dmas");
if (kmb_i2s->use_pio) {
irq = platform_get_irq_optional(pdev, 0);
diff --git a/sound/soc/mediatek/common/mtk-soundcard-driver.c b/sound/soc/mediatek/common/mtk-soundcard-driver.c
index 3bbf42c42805..f4314dddc460 100644
--- a/sound/soc/mediatek/common/mtk-soundcard-driver.c
+++ b/sound/soc/mediatek/common/mtk-soundcard-driver.c
@@ -221,7 +221,7 @@ int mtk_soundcard_common_probe(struct platform_device *pdev)
card->name = pdata->card_name;
}
- needs_legacy_probe = !of_property_read_bool(pdev->dev.of_node, "audio-routing");
+ needs_legacy_probe = !of_property_present(pdev->dev.of_node, "audio-routing");
if (needs_legacy_probe) {
/*
* If we have no .soc_probe() callback there's no way of using
@@ -262,7 +262,7 @@ int mtk_soundcard_common_probe(struct platform_device *pdev)
adsp_node = NULL;
if (adsp_node) {
- if (of_property_read_bool(pdev->dev.of_node, "mediatek,dai-link")) {
+ if (of_property_present(pdev->dev.of_node, "mediatek,dai-link")) {
ret = mtk_sof_dailink_parse_of(card, pdev->dev.of_node,
"mediatek,dai-link",
card->dai_link, card->num_links);
diff --git a/sound/soc/mediatek/mt8192/mt8192-afe-pcm.c b/sound/soc/mediatek/mt8192/mt8192-afe-pcm.c
index 9b502f4cd6ea..80cda7bf5ccc 100644
--- a/sound/soc/mediatek/mt8192/mt8192-afe-pcm.c
+++ b/sound/soc/mediatek/mt8192/mt8192-afe-pcm.c
@@ -2158,27 +2158,26 @@ static int mt8192_afe_pcm_dev_probe(struct platform_device *pdev)
{
struct mtk_base_afe *afe;
struct mt8192_afe_private *afe_priv;
- struct device *dev;
+ struct device *dev = &pdev->dev;
struct reset_control *rstc;
int i, ret, irq_id;
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(34));
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(34));
if (ret)
return ret;
- afe = devm_kzalloc(&pdev->dev, sizeof(*afe), GFP_KERNEL);
+ afe = devm_kzalloc(dev, sizeof(*afe), GFP_KERNEL);
if (!afe)
return -ENOMEM;
platform_set_drvdata(pdev, afe);
- afe->platform_priv = devm_kzalloc(&pdev->dev, sizeof(*afe_priv),
+ afe->platform_priv = devm_kzalloc(dev, sizeof(*afe_priv),
GFP_KERNEL);
if (!afe->platform_priv)
return -ENOMEM;
afe_priv = afe->platform_priv;
- afe->dev = &pdev->dev;
- dev = afe->dev;
+ afe->dev = dev;
/* init audio related clock */
ret = mt8192_init_clock(afe);
@@ -2196,7 +2195,7 @@ static int mt8192_afe_pcm_dev_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "failed to trigger audio reset\n");
- ret = devm_pm_runtime_enable(&pdev->dev);
+ ret = devm_pm_runtime_enable(dev);
if (ret)
return ret;
@@ -2212,13 +2211,13 @@ static int mt8192_afe_pcm_dev_probe(struct platform_device *pdev)
/* enable clock for regcache get default value from hw */
afe_priv->pm_runtime_bypass_reg_ctl = true;
- pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_get_sync(dev);
ret = regmap_reinit_cache(afe->regmap, &mt8192_afe_regmap_config);
if (ret)
return dev_err_probe(dev, ret, "regmap_reinit_cache fail\n");
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_put_sync(dev);
afe_priv->pm_runtime_bypass_reg_ctl = false;
regcache_cache_only(afe->regmap, true);
@@ -2285,7 +2284,7 @@ static int mt8192_afe_pcm_dev_probe(struct platform_device *pdev)
afe->runtime_suspend = mt8192_afe_runtime_suspend;
/* register platform */
- ret = devm_snd_soc_register_component(&pdev->dev,
+ ret = devm_snd_soc_register_component(dev,
&mtk_afe_pcm_platform,
afe->dai_drivers,
afe->num_dai_drivers);
diff --git a/sound/soc/mediatek/mt8365/Makefile b/sound/soc/mediatek/mt8365/Makefile
index 52ba45a8498a..b197025e34bb 100644
--- a/sound/soc/mediatek/mt8365/Makefile
+++ b/sound/soc/mediatek/mt8365/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
# MTK Platform driver
-snd-soc-mt8365-pcm-objs := \
+snd-soc-mt8365-pcm-y := \
mt8365-afe-clk.o \
mt8365-afe-pcm.o \
mt8365-dai-adda.o \
diff --git a/sound/soc/mediatek/mt8365/mt8365-mt6357.c b/sound/soc/mediatek/mt8365/mt8365-mt6357.c
index d398e83ea052..9f28d6bf0323 100644
--- a/sound/soc/mediatek/mt8365/mt8365-mt6357.c
+++ b/sound/soc/mediatek/mt8365/mt8365-mt6357.c
@@ -6,12 +6,19 @@
* Authors: Nicolas Belin <nbelin@baylibre.com>
*/
+#include <linux/array_size.h>
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
#include <sound/soc.h>
#include <sound/pcm_params.h>
+
#include "mt8365-afe-common.h"
-#include <linux/pinctrl/consumer.h>
#include "../common/mtk-soc-card.h"
#include "../common/mtk-soundcard-driver.h"
diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c
index 928cf5cb5999..7ee60a58a336 100644
--- a/sound/soc/qcom/common.c
+++ b/sound/soc/qcom/common.c
@@ -44,20 +44,20 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
return ret;
}
- if (of_property_read_bool(dev->of_node, "widgets")) {
+ if (of_property_present(dev->of_node, "widgets")) {
ret = snd_soc_of_parse_audio_simple_widgets(card, "widgets");
if (ret)
return ret;
}
/* DAPM routes */
- if (of_property_read_bool(dev->of_node, "audio-routing")) {
+ if (of_property_present(dev->of_node, "audio-routing")) {
ret = snd_soc_of_parse_audio_routing(card, "audio-routing");
if (ret)
return ret;
}
/* Deprecated, only for compatibility with old device trees */
- if (of_property_read_bool(dev->of_node, "qcom,audio-routing")) {
+ if (of_property_present(dev->of_node, "qcom,audio-routing")) {
ret = snd_soc_of_parse_audio_routing(card, "qcom,audio-routing");
if (ret)
return ret;
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index addd2c4bdd3e..9946f12254b3 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -1232,14 +1232,16 @@ static int lpass_platform_copy(struct snd_soc_component *component,
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
if (is_cdc_dma_port(dai_id)) {
- ret = copy_from_iter_toio(dma_buf, buf, bytes);
+ if (copy_from_iter_toio(dma_buf, bytes, buf) != bytes)
+ ret = -EFAULT;
} else {
if (copy_from_iter((void __force *)dma_buf, bytes, buf) != bytes)
ret = -EFAULT;
}
} else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
if (is_cdc_dma_port(dai_id)) {
- ret = copy_to_iter_fromio(buf, dma_buf, bytes);
+ if (copy_to_iter_fromio(dma_buf, bytes, buf) != bytes)
+ ret = -EFAULT;
} else {
if (copy_to_iter((void __force *)dma_buf, bytes, buf) != bytes)
ret = -EFAULT;
diff --git a/sound/soc/qcom/sc7180.c b/sound/soc/qcom/sc7180.c
index bc030ce29680..d95710b1ea4e 100644
--- a/sound/soc/qcom/sc7180.c
+++ b/sound/soc/qcom/sc7180.c
@@ -513,7 +513,7 @@ static int sc7180_snd_platform_probe(struct platform_device *pdev)
card->controls = sc7180_snd_controls;
card->num_controls = ARRAY_SIZE(sc7180_snd_controls);
- if (of_property_read_bool(dev->of_node, "dmic-gpios")) {
+ if (of_property_present(dev->of_node, "dmic-gpios")) {
card->dapm_widgets = sc7180_snd_dual_mic_widgets,
card->num_dapm_widgets = ARRAY_SIZE(sc7180_snd_dual_mic_widgets),
card->controls = sc7180_snd_dual_mic_controls,
diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c
index a479d7e5b7fb..fcc7df75346f 100644
--- a/sound/soc/qcom/sdm845.c
+++ b/sound/soc/qcom/sdm845.c
@@ -215,6 +215,7 @@ static int sdm845_snd_hw_params(struct snd_pcm_substream *substream,
ret = sdm845_slim_snd_hw_params(substream, params);
break;
case QUATERNARY_MI2S_RX:
+ case SECONDARY_MI2S_RX:
break;
default:
pr_err("%s: invalid dai id 0x%x\n", __func__, cpu_dai->id);
@@ -356,6 +357,7 @@ static int sdm845_snd_startup(struct snd_pcm_substream *substream)
snd_soc_dai_set_fmt(codec_dai, codec_dai_fmt);
break;
+ case SECONDARY_MI2S_RX:
case SECONDARY_MI2S_TX:
codec_dai_fmt |= SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_I2S;
if (++(data->sec_mi2s_clk_count) == 1) {
@@ -371,8 +373,6 @@ static int sdm845_snd_startup(struct snd_pcm_substream *substream)
Q6AFE_LPASS_CLK_ID_QUAD_MI2S_IBIT,
MI2S_BCLK_RATE, SNDRV_PCM_STREAM_PLAYBACK);
snd_soc_dai_set_fmt(cpu_dai, fmt);
-
-
break;
case QUATERNARY_TDM_RX_0:
@@ -441,6 +441,7 @@ static void sdm845_snd_shutdown(struct snd_pcm_substream *substream)
}
break;
+ case SECONDARY_MI2S_RX:
case SECONDARY_MI2S_TX:
if (--(data->sec_mi2s_clk_count) == 0) {
snd_soc_dai_set_sysclk(cpu_dai,
diff --git a/sound/soc/renesas/Kconfig b/sound/soc/renesas/Kconfig
index 426632996a0a..cb01fb36355f 100644
--- a/sound/soc/renesas/Kconfig
+++ b/sound/soc/renesas/Kconfig
@@ -67,7 +67,7 @@ config SND_SH7760_AC97
config SND_SIU_MIGOR
tristate "SIU sound support on Migo-R"
- depends on SH_MIGOR && I2C
+ depends on SH_MIGOR && I2C && DMADEVICES
select SND_SOC_SH4_SIU
select SND_SOC_WM8978
help
diff --git a/sound/soc/renesas/rz-ssi.c b/sound/soc/renesas/rz-ssi.c
index 6efd017aaa7f..3a0af4ca7ab6 100644
--- a/sound/soc/renesas/rz-ssi.c
+++ b/sound/soc/renesas/rz-ssi.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -71,7 +72,7 @@
#define PREALLOC_BUFFER (SZ_32K)
#define PREALLOC_BUFFER_MAX (SZ_32K)
-#define SSI_RATES SNDRV_PCM_RATE_8000_48000 /* 8k-44.1kHz */
+#define SSI_RATES SNDRV_PCM_RATE_8000_48000 /* 8k-48kHz */
#define SSI_FMTS SNDRV_PCM_FMTBIT_S16_LE
#define SSI_CHAN_MIN 2
#define SSI_CHAN_MAX 2
@@ -99,7 +100,6 @@ struct rz_ssi_stream {
struct rz_ssi_priv {
void __iomem *base;
- struct platform_device *pdev;
struct reset_control *rstc;
struct device *dev;
struct clk *sfr_clk;
@@ -163,16 +163,7 @@ static void rz_ssi_reg_mask_setl(struct rz_ssi_priv *priv, uint reg,
writel(val, (priv->base + reg));
}
-static inline struct snd_soc_dai *
-rz_ssi_get_dai(struct snd_pcm_substream *substream)
-{
- struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
-
- return snd_soc_rtd_to_cpu(rtd, 0);
-}
-
-static inline bool rz_ssi_stream_is_play(struct rz_ssi_priv *ssi,
- struct snd_pcm_substream *substream)
+static inline bool rz_ssi_stream_is_play(struct snd_pcm_substream *substream)
{
return substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
}
@@ -244,22 +235,21 @@ static void rz_ssi_stream_init(struct rz_ssi_stream *strm,
static void rz_ssi_stream_quit(struct rz_ssi_priv *ssi,
struct rz_ssi_stream *strm)
{
- struct snd_soc_dai *dai = rz_ssi_get_dai(strm->substream);
+ struct device *dev = ssi->dev;
rz_ssi_set_substream(strm, NULL);
if (strm->oerr_num > 0)
- dev_info(dai->dev, "overrun = %d\n", strm->oerr_num);
+ dev_info(dev, "overrun = %d\n", strm->oerr_num);
if (strm->uerr_num > 0)
- dev_info(dai->dev, "underrun = %d\n", strm->uerr_num);
+ dev_info(dev, "underrun = %d\n", strm->uerr_num);
}
static int rz_ssi_clk_setup(struct rz_ssi_priv *ssi, unsigned int rate,
unsigned int channels)
{
- static s8 ckdv[16] = { 1, 2, 4, 8, 16, 32, 64, 128,
- 6, 12, 24, 48, 96, -1, -1, -1 };
+ static u8 ckdv[] = { 1, 2, 4, 8, 16, 32, 64, 128, 6, 12, 24, 48, 96 };
unsigned int channel_bits = 32; /* System Word Length */
unsigned long bclk_rate = rate * channels * channel_bits;
unsigned int div;
@@ -318,7 +308,8 @@ static int rz_ssi_clk_setup(struct rz_ssi_priv *ssi, unsigned int rate,
static void rz_ssi_set_idle(struct rz_ssi_priv *ssi)
{
- int timeout;
+ u32 tmp;
+ int ret;
/* Disable irqs */
rz_ssi_reg_mask_setl(ssi, SSICR, SSICR_TUIEN | SSICR_TOIEN |
@@ -331,15 +322,9 @@ static void rz_ssi_set_idle(struct rz_ssi_priv *ssi)
SSISR_RUIRQ), 0);
/* Wait for idle */
- timeout = 100;
- while (--timeout) {
- if (rz_ssi_reg_readl(ssi, SSISR) & SSISR_IIRQ)
- break;
- udelay(1);
- }
-
- if (!timeout)
- dev_info(ssi->dev, "timeout waiting for SSI idle\n");
+ ret = readl_poll_timeout_atomic(ssi->base + SSISR, tmp, (tmp & SSISR_IIRQ), 1, 100);
+ if (ret)
+ dev_warn_ratelimited(ssi->dev, "timeout waiting for SSI idle\n");
/* Hold FIFOs in reset */
rz_ssi_reg_mask_setl(ssi, SSIFCR, 0, SSIFCR_FIFO_RST);
@@ -347,7 +332,7 @@ static void rz_ssi_set_idle(struct rz_ssi_priv *ssi)
static int rz_ssi_start(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm)
{
- bool is_play = rz_ssi_stream_is_play(ssi, strm->substream);
+ bool is_play = rz_ssi_stream_is_play(strm->substream);
bool is_full_duplex;
u32 ssicr, ssifcr;
@@ -403,6 +388,15 @@ static int rz_ssi_start(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm)
return 0;
}
+static int rz_ssi_swreset(struct rz_ssi_priv *ssi)
+{
+ u32 tmp;
+
+ rz_ssi_reg_mask_setl(ssi, SSIFCR, 0, SSIFCR_SSIRST);
+ rz_ssi_reg_mask_setl(ssi, SSIFCR, SSIFCR_SSIRST, 0);
+ return readl_poll_timeout_atomic(ssi->base + SSIFCR, tmp, !(tmp & SSIFCR_SSIRST), 1, 5);
+}
+
static int rz_ssi_stop(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm)
{
strm->running = 0;
@@ -415,8 +409,12 @@ static int rz_ssi_stop(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm)
rz_ssi_reg_mask_setl(ssi, SSICR, SSICR_TEN | SSICR_REN, 0);
/* Cancel all remaining DMA transactions */
- if (rz_ssi_is_dma_enabled(ssi))
- dmaengine_terminate_async(strm->dma_ch);
+ if (rz_ssi_is_dma_enabled(ssi)) {
+ if (ssi->playback.dma_ch)
+ dmaengine_terminate_async(ssi->playback.dma_ch);
+ if (ssi->capture.dma_ch)
+ dmaengine_terminate_async(ssi->capture.dma_ch);
+ }
rz_ssi_set_idle(ssi);
@@ -523,6 +521,8 @@ static int rz_ssi_pio_send(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm)
sample_space = strm->fifo_sample_size;
ssifsr = rz_ssi_reg_readl(ssi, SSIFSR);
sample_space -= (ssifsr >> SSIFSR_TDC_SHIFT) & SSIFSR_TDC_MASK;
+ if (sample_space < 0)
+ return -EINVAL;
/* Only add full frames at a time */
while (frames_left && (sample_space >= runtime->channels)) {
@@ -680,7 +680,7 @@ static int rz_ssi_dma_transfer(struct rz_ssi_priv *ssi,
*/
return 0;
- dir = rz_ssi_stream_is_play(ssi, substream) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+ dir = rz_ssi_stream_is_play(substream) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
/* Always transfer 1 period */
amount = runtime->period_size;
@@ -784,6 +784,32 @@ no_dma:
return -ENODEV;
}
+static int rz_ssi_trigger_resume(struct rz_ssi_priv *ssi)
+{
+ int ret;
+
+ if (rz_ssi_is_stream_running(&ssi->playback) ||
+ rz_ssi_is_stream_running(&ssi->capture))
+ return 0;
+
+ ret = rz_ssi_swreset(ssi);
+ if (ret)
+ return ret;
+
+ return rz_ssi_clk_setup(ssi, ssi->hw_params_cache.rate,
+ ssi->hw_params_cache.channels);
+}
+
+static void rz_ssi_streams_suspend(struct rz_ssi_priv *ssi)
+{
+ if (rz_ssi_is_stream_running(&ssi->playback) ||
+ rz_ssi_is_stream_running(&ssi->capture))
+ return;
+
+ ssi->playback.dma_buffer_pos = 0;
+ ssi->capture.dma_buffer_pos = 0;
+}
+
static int rz_ssi_dai_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
@@ -792,21 +818,21 @@ static int rz_ssi_dai_trigger(struct snd_pcm_substream *substream, int cmd,
int ret = 0, i, num_transfer = 1;
switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- /* Soft Reset */
- if (!rz_ssi_is_stream_running(&ssi->playback) &&
- !rz_ssi_is_stream_running(&ssi->capture)) {
- rz_ssi_reg_mask_setl(ssi, SSIFCR, 0, SSIFCR_SSIRST);
- rz_ssi_reg_mask_setl(ssi, SSIFCR, SSIFCR_SSIRST, 0);
- udelay(5);
- }
+ case SNDRV_PCM_TRIGGER_RESUME:
+ ret = rz_ssi_trigger_resume(ssi);
+ if (ret)
+ return ret;
- rz_ssi_stream_init(strm, substream);
+ fallthrough;
+
+ case SNDRV_PCM_TRIGGER_START:
+ if (cmd == SNDRV_PCM_TRIGGER_START)
+ rz_ssi_stream_init(strm, substream);
if (ssi->dma_rt) {
bool is_playback;
- is_playback = rz_ssi_stream_is_play(ssi, substream);
+ is_playback = rz_ssi_stream_is_play(substream);
ret = rz_ssi_dma_slave_config(ssi, ssi->playback.dma_ch,
is_playback);
/* Fallback to pio */
@@ -829,6 +855,12 @@ static int rz_ssi_dai_trigger(struct snd_pcm_substream *substream, int cmd,
ret = rz_ssi_start(ssi, strm);
break;
+
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ rz_ssi_stop(ssi, strm);
+ rz_ssi_streams_suspend(ssi);
+ break;
+
case SNDRV_PCM_TRIGGER_STOP:
rz_ssi_stop(ssi, strm);
rz_ssi_stream_quit(ssi, strm);
@@ -925,6 +957,7 @@ static int rz_ssi_dai_hw_params(struct snd_pcm_substream *substream,
SNDRV_PCM_HW_PARAM_SAMPLE_BITS)->min;
unsigned int channels = params_channels(params);
unsigned int rate = params_rate(params);
+ int ret;
if (sample_bits != 16) {
dev_err(ssi->dev, "Unsupported sample width: %d\n",
@@ -951,6 +984,10 @@ static int rz_ssi_dai_hw_params(struct snd_pcm_substream *substream,
rz_ssi_cache_hw_params(ssi, rate, channels, strm->sample_width,
sample_bits);
+ ret = rz_ssi_swreset(ssi);
+ if (ret)
+ return ret;
+
return rz_ssi_clk_setup(ssi, rate, channels);
}
@@ -963,7 +1000,8 @@ static const struct snd_soc_dai_ops rz_ssi_dai_ops = {
static const struct snd_pcm_hardware rz_ssi_pcm_hardware = {
.info = SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_MMAP_VALID,
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_RESUME,
.buffer_bytes_max = PREALLOC_BUFFER,
.period_bytes_min = 32,
.period_bytes_max = 8192,
@@ -986,7 +1024,8 @@ static int rz_ssi_pcm_open(struct snd_soc_component *component,
static snd_pcm_uframes_t rz_ssi_pcm_pointer(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
- struct snd_soc_dai *dai = rz_ssi_get_dai(substream);
+ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct snd_soc_dai *dai = snd_soc_rtd_to_cpu(rtd, 0);
struct rz_ssi_priv *ssi = snd_soc_dai_get_drvdata(dai);
struct rz_ssi_stream *strm = rz_ssi_stream_get(ssi, substream);
@@ -1031,37 +1070,37 @@ static const struct snd_soc_component_driver rz_ssi_soc_component = {
static int rz_ssi_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct rz_ssi_priv *ssi;
struct clk *audio_clk;
struct resource *res;
int ret;
- ssi = devm_kzalloc(&pdev->dev, sizeof(*ssi), GFP_KERNEL);
+ ssi = devm_kzalloc(dev, sizeof(*ssi), GFP_KERNEL);
if (!ssi)
return -ENOMEM;
- ssi->pdev = pdev;
- ssi->dev = &pdev->dev;
+ ssi->dev = dev;
ssi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(ssi->base))
return PTR_ERR(ssi->base);
ssi->phys = res->start;
- ssi->clk = devm_clk_get(&pdev->dev, "ssi");
+ ssi->clk = devm_clk_get(dev, "ssi");
if (IS_ERR(ssi->clk))
return PTR_ERR(ssi->clk);
- ssi->sfr_clk = devm_clk_get(&pdev->dev, "ssi_sfr");
+ ssi->sfr_clk = devm_clk_get(dev, "ssi_sfr");
if (IS_ERR(ssi->sfr_clk))
return PTR_ERR(ssi->sfr_clk);
- audio_clk = devm_clk_get(&pdev->dev, "audio_clk1");
+ audio_clk = devm_clk_get(dev, "audio_clk1");
if (IS_ERR(audio_clk))
return dev_err_probe(&pdev->dev, PTR_ERR(audio_clk),
"no audio clk1");
ssi->audio_clk_1 = clk_get_rate(audio_clk);
- audio_clk = devm_clk_get(&pdev->dev, "audio_clk2");
+ audio_clk = devm_clk_get(dev, "audio_clk2");
if (IS_ERR(audio_clk))
return dev_err_probe(&pdev->dev, PTR_ERR(audio_clk),
"no audio clk2");
@@ -1074,13 +1113,13 @@ static int rz_ssi_probe(struct platform_device *pdev)
ssi->audio_mck = ssi->audio_clk_1 ? ssi->audio_clk_1 : ssi->audio_clk_2;
/* Detect DMA support */
- ret = rz_ssi_dma_request(ssi, &pdev->dev);
+ ret = rz_ssi_dma_request(ssi, dev);
if (ret < 0) {
- dev_warn(&pdev->dev, "DMA not available, using PIO\n");
+ dev_warn(dev, "DMA not available, using PIO\n");
ssi->playback.transfer = rz_ssi_pio_send;
ssi->capture.transfer = rz_ssi_pio_recv;
} else {
- dev_info(&pdev->dev, "DMA enabled");
+ dev_info(dev, "DMA enabled");
ssi->playback.transfer = rz_ssi_dma_transfer;
ssi->capture.transfer = rz_ssi_dma_transfer;
}
@@ -1089,21 +1128,20 @@ static int rz_ssi_probe(struct platform_device *pdev)
ssi->capture.priv = ssi;
spin_lock_init(&ssi->lock);
- dev_set_drvdata(&pdev->dev, ssi);
+ dev_set_drvdata(dev, ssi);
/* Error Interrupt */
ssi->irq_int = platform_get_irq_byname(pdev, "int_req");
if (ssi->irq_int < 0) {
- rz_ssi_release_dma_channels(ssi);
- return ssi->irq_int;
+ ret = ssi->irq_int;
+ goto err_release_dma_chs;
}
- ret = devm_request_irq(&pdev->dev, ssi->irq_int, &rz_ssi_interrupt,
- 0, dev_name(&pdev->dev), ssi);
+ ret = devm_request_irq(dev, ssi->irq_int, &rz_ssi_interrupt,
+ 0, dev_name(dev), ssi);
if (ret < 0) {
- rz_ssi_release_dma_channels(ssi);
- return dev_err_probe(&pdev->dev, ret,
- "irq request error (int_req)\n");
+ dev_err_probe(dev, ret, "irq request error (int_req)\n");
+ goto err_release_dma_chs;
}
if (!rz_ssi_is_dma_enabled(ssi)) {
@@ -1115,11 +1153,11 @@ static int rz_ssi_probe(struct platform_device *pdev)
if (ssi->irq_rt < 0)
return ssi->irq_rt;
- ret = devm_request_irq(&pdev->dev, ssi->irq_rt,
+ ret = devm_request_irq(dev, ssi->irq_rt,
&rz_ssi_interrupt, 0,
- dev_name(&pdev->dev), ssi);
+ dev_name(dev), ssi);
if (ret < 0)
- return dev_err_probe(&pdev->dev, ret,
+ return dev_err_probe(dev, ret,
"irq request error (dma_rt)\n");
} else {
if (ssi->irq_tx < 0)
@@ -1128,52 +1166,48 @@ static int rz_ssi_probe(struct platform_device *pdev)
if (ssi->irq_rx < 0)
return ssi->irq_rx;
- ret = devm_request_irq(&pdev->dev, ssi->irq_tx,
+ ret = devm_request_irq(dev, ssi->irq_tx,
&rz_ssi_interrupt, 0,
- dev_name(&pdev->dev), ssi);
+ dev_name(dev), ssi);
if (ret < 0)
- return dev_err_probe(&pdev->dev, ret,
+ return dev_err_probe(dev, ret,
"irq request error (dma_tx)\n");
- ret = devm_request_irq(&pdev->dev, ssi->irq_rx,
+ ret = devm_request_irq(dev, ssi->irq_rx,
&rz_ssi_interrupt, 0,
- dev_name(&pdev->dev), ssi);
+ dev_name(dev), ssi);
if (ret < 0)
- return dev_err_probe(&pdev->dev, ret,
+ return dev_err_probe(dev, ret,
"irq request error (dma_rx)\n");
}
}
- ssi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ ssi->rstc = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(ssi->rstc)) {
ret = PTR_ERR(ssi->rstc);
- goto err_reset;
+ goto err_release_dma_chs;
}
- reset_control_deassert(ssi->rstc);
- pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_resume_and_get(&pdev->dev);
+ /* Default 0 for power saving. Can be overridden via sysfs. */
+ pm_runtime_set_autosuspend_delay(dev, 0);
+ pm_runtime_use_autosuspend(dev);
+ ret = devm_pm_runtime_enable(dev);
if (ret < 0) {
- dev_err(&pdev->dev, "pm_runtime_resume_and_get failed\n");
- goto err_pm;
+ dev_err(dev, "Failed to enable runtime PM!\n");
+ goto err_release_dma_chs;
}
- ret = devm_snd_soc_register_component(&pdev->dev, &rz_ssi_soc_component,
+ ret = devm_snd_soc_register_component(dev, &rz_ssi_soc_component,
rz_ssi_soc_dai,
ARRAY_SIZE(rz_ssi_soc_dai));
if (ret < 0) {
- dev_err(&pdev->dev, "failed to register snd component\n");
- goto err_snd_soc;
+ dev_err(dev, "failed to register snd component\n");
+ goto err_release_dma_chs;
}
return 0;
-err_snd_soc:
- pm_runtime_put(ssi->dev);
-err_pm:
- pm_runtime_disable(ssi->dev);
- reset_control_assert(ssi->rstc);
-err_reset:
+err_release_dma_chs:
rz_ssi_release_dma_channels(ssi);
return ret;
@@ -1185,8 +1219,6 @@ static void rz_ssi_remove(struct platform_device *pdev)
rz_ssi_release_dma_channels(ssi);
- pm_runtime_put(ssi->dev);
- pm_runtime_disable(ssi->dev);
reset_control_assert(ssi->rstc);
}
@@ -1196,10 +1228,30 @@ static const struct of_device_id rz_ssi_of_match[] = {
};
MODULE_DEVICE_TABLE(of, rz_ssi_of_match);
+static int rz_ssi_runtime_suspend(struct device *dev)
+{
+ struct rz_ssi_priv *ssi = dev_get_drvdata(dev);
+
+ return reset_control_assert(ssi->rstc);
+}
+
+static int rz_ssi_runtime_resume(struct device *dev)
+{
+ struct rz_ssi_priv *ssi = dev_get_drvdata(dev);
+
+ return reset_control_deassert(ssi->rstc);
+}
+
+static const struct dev_pm_ops rz_ssi_pm_ops = {
+ RUNTIME_PM_OPS(rz_ssi_runtime_suspend, rz_ssi_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+};
+
static struct platform_driver rz_ssi_driver = {
.driver = {
.name = "rz-ssi-pcm-audio",
.of_match_table = rz_ssi_of_match,
+ .pm = pm_ptr(&rz_ssi_pm_ops),
},
.probe = rz_ssi_probe,
.remove = rz_ssi_remove,
diff --git a/sound/soc/rockchip/rockchip_i2s_tdm.c b/sound/soc/rockchip/rockchip_i2s_tdm.c
index d1f28699652f..7f5fcaecee4b 100644
--- a/sound/soc/rockchip/rockchip_i2s_tdm.c
+++ b/sound/soc/rockchip/rockchip_i2s_tdm.c
@@ -22,7 +22,6 @@
#define DRV_NAME "rockchip-i2s-tdm"
-#define DEFAULT_MCLK_FS 256
#define CH_GRP_MAX 4 /* The max channel 8 / 2 */
#define MULTIPLEX_CH_MAX 10
@@ -70,6 +69,8 @@ struct rk_i2s_tdm_dev {
bool has_playback;
bool has_capture;
struct snd_soc_dai_driver *dai;
+ unsigned int mclk_rx_freq;
+ unsigned int mclk_tx_freq;
};
static int to_ch_num(unsigned int val)
@@ -514,33 +515,6 @@ static void rockchip_i2s_tdm_xfer_resume(struct snd_pcm_substream *substream,
I2S_XFER_RXS_START);
}
-static int rockchip_i2s_ch_to_io(unsigned int ch, bool substream_capture)
-{
- if (substream_capture) {
- switch (ch) {
- case I2S_CHN_4:
- return I2S_IO_6CH_OUT_4CH_IN;
- case I2S_CHN_6:
- return I2S_IO_4CH_OUT_6CH_IN;
- case I2S_CHN_8:
- return I2S_IO_2CH_OUT_8CH_IN;
- default:
- return I2S_IO_8CH_OUT_2CH_IN;
- }
- } else {
- switch (ch) {
- case I2S_CHN_4:
- return I2S_IO_4CH_OUT_6CH_IN;
- case I2S_CHN_6:
- return I2S_IO_6CH_OUT_4CH_IN;
- case I2S_CHN_8:
- return I2S_IO_8CH_OUT_2CH_IN;
- default:
- return I2S_IO_2CH_OUT_8CH_IN;
- }
- }
-}
-
static int rockchip_i2s_io_multiplex(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
@@ -577,7 +551,6 @@ static int rockchip_i2s_io_multiplex(struct snd_pcm_substream *substream,
return -EINVAL;
}
- rockchip_i2s_ch_to_io(val, true);
} else {
struct snd_pcm_str *capture_str =
&substream->pcm->streams[SNDRV_PCM_STREAM_CAPTURE];
@@ -645,6 +618,27 @@ static int rockchip_i2s_trcm_mode(struct snd_pcm_substream *substream,
return 0;
}
+static int rockchip_i2s_tdm_set_sysclk(struct snd_soc_dai *cpu_dai, int stream,
+ unsigned int freq, int dir)
+{
+ struct rk_i2s_tdm_dev *i2s_tdm = to_info(cpu_dai);
+
+ if (i2s_tdm->clk_trcm) {
+ i2s_tdm->mclk_tx_freq = freq;
+ i2s_tdm->mclk_rx_freq = freq;
+ } else {
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ i2s_tdm->mclk_tx_freq = freq;
+ else
+ i2s_tdm->mclk_rx_freq = freq;
+ }
+
+ dev_dbg(i2s_tdm->dev, "The target mclk_%s freq is: %d\n",
+ stream ? "rx" : "tx", freq);
+
+ return 0;
+}
+
static int rockchip_i2s_tdm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
@@ -659,15 +653,19 @@ static int rockchip_i2s_tdm_hw_params(struct snd_pcm_substream *substream,
if (i2s_tdm->clk_trcm == TRCM_TX) {
mclk = i2s_tdm->mclk_tx;
+ mclk_rate = i2s_tdm->mclk_tx_freq;
} else if (i2s_tdm->clk_trcm == TRCM_RX) {
mclk = i2s_tdm->mclk_rx;
+ mclk_rate = i2s_tdm->mclk_rx_freq;
} else if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
mclk = i2s_tdm->mclk_tx;
+ mclk_rate = i2s_tdm->mclk_tx_freq;
} else {
mclk = i2s_tdm->mclk_rx;
+ mclk_rate = i2s_tdm->mclk_rx_freq;
}
- err = clk_set_rate(mclk, DEFAULT_MCLK_FS * params_rate(params));
+ err = clk_set_rate(mclk, mclk_rate);
if (err)
return err;
@@ -827,6 +825,7 @@ static const struct snd_soc_dai_ops rockchip_i2s_tdm_dai_ops = {
.hw_params = rockchip_i2s_tdm_hw_params,
.set_bclk_ratio = rockchip_i2s_tdm_set_bclk_ratio,
.set_fmt = rockchip_i2s_tdm_set_fmt,
+ .set_sysclk = rockchip_i2s_tdm_set_sysclk,
.set_tdm_slot = rockchip_dai_tdm_slot,
.trigger = rockchip_i2s_tdm_trigger,
};
diff --git a/sound/soc/sdca/Makefile b/sound/soc/sdca/Makefile
index c296bd5a0a7c..5d1ddbbfbf62 100644
--- a/sound/soc/sdca/Makefile
+++ b/sound/soc/sdca/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-snd-soc-sdca-objs := sdca_functions.o sdca_device.o
+snd-soc-sdca-y := sdca_functions.o sdca_device.o
obj-$(CONFIG_SND_SOC_SDCA) += snd-soc-sdca.o
diff --git a/sound/soc/sdca/sdca_device.c b/sound/soc/sdca/sdca_device.c
index 80d663777eb5..b6399b773986 100644
--- a/sound/soc/sdca/sdca_device.c
+++ b/sound/soc/sdca/sdca_device.c
@@ -7,6 +7,8 @@
*/
#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/property.h>
#include <linux/soundwire/sdw.h>
#include <sound/sdca.h>
#include <sound/sdca_function.h>
diff --git a/sound/soc/sdca/sdca_functions.c b/sound/soc/sdca/sdca_functions.c
index 652865329968..38071bc838b9 100644
--- a/sound/soc/sdca/sdca_functions.c
+++ b/sound/soc/sdca/sdca_functions.c
@@ -6,86 +6,75 @@
* https://www.mipi.org/mipi-sdca-v1-0-download
*/
+#define dev_fmt(fmt) "%s: " fmt, __func__
+
#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/property.h>
#include <linux/soundwire/sdw.h>
+#include <linux/types.h>
#include <sound/sdca.h>
#include <sound/sdca_function.h>
-static int patch_sdca_function_type(struct device *dev,
- u32 interface_revision,
- u32 *function_type,
- const char **function_name)
+static int patch_sdca_function_type(u32 interface_revision, u32 *function_type)
{
- unsigned long function_type_patch = 0;
-
/*
* Unfortunately early SDCA specifications used different indices for Functions,
* for backwards compatibility we have to reorder the values found
*/
- if (interface_revision >= 0x0801)
- goto skip_early_draft_order;
-
- switch (*function_type) {
- case 1:
- function_type_patch = SDCA_FUNCTION_TYPE_SMART_AMP;
- break;
- case 2:
- function_type_patch = SDCA_FUNCTION_TYPE_SMART_MIC;
- break;
- case 3:
- function_type_patch = SDCA_FUNCTION_TYPE_SPEAKER_MIC;
- break;
- case 4:
- function_type_patch = SDCA_FUNCTION_TYPE_UAJ;
- break;
- case 5:
- function_type_patch = SDCA_FUNCTION_TYPE_RJ;
- break;
- case 6:
- function_type_patch = SDCA_FUNCTION_TYPE_HID;
- break;
- default:
- dev_warn(dev, "%s: SDCA version %#x unsupported function type %d, skipped\n",
- __func__, interface_revision, *function_type);
- return -EINVAL;
+ if (interface_revision < 0x0801) {
+ switch (*function_type) {
+ case 1:
+ *function_type = SDCA_FUNCTION_TYPE_SMART_AMP;
+ break;
+ case 2:
+ *function_type = SDCA_FUNCTION_TYPE_SMART_MIC;
+ break;
+ case 3:
+ *function_type = SDCA_FUNCTION_TYPE_SPEAKER_MIC;
+ break;
+ case 4:
+ *function_type = SDCA_FUNCTION_TYPE_UAJ;
+ break;
+ case 5:
+ *function_type = SDCA_FUNCTION_TYPE_RJ;
+ break;
+ case 6:
+ *function_type = SDCA_FUNCTION_TYPE_HID;
+ break;
+ default:
+ return -EINVAL;
+ }
}
-skip_early_draft_order:
- if (function_type_patch)
- *function_type = function_type_patch;
+ return 0;
+}
- /* now double-check the values */
- switch (*function_type) {
+static const char *get_sdca_function_name(u32 function_type)
+{
+ switch (function_type) {
case SDCA_FUNCTION_TYPE_SMART_AMP:
- *function_name = SDCA_FUNCTION_TYPE_SMART_AMP_NAME;
- break;
+ return SDCA_FUNCTION_TYPE_SMART_AMP_NAME;
case SDCA_FUNCTION_TYPE_SMART_MIC:
- *function_name = SDCA_FUNCTION_TYPE_SMART_MIC_NAME;
- break;
+ return SDCA_FUNCTION_TYPE_SMART_MIC_NAME;
case SDCA_FUNCTION_TYPE_UAJ:
- *function_name = SDCA_FUNCTION_TYPE_UAJ_NAME;
- break;
+ return SDCA_FUNCTION_TYPE_UAJ_NAME;
case SDCA_FUNCTION_TYPE_HID:
- *function_name = SDCA_FUNCTION_TYPE_HID_NAME;
- break;
+ return SDCA_FUNCTION_TYPE_HID_NAME;
case SDCA_FUNCTION_TYPE_SIMPLE_AMP:
+ return SDCA_FUNCTION_TYPE_SIMPLE_AMP_NAME;
case SDCA_FUNCTION_TYPE_SIMPLE_MIC:
+ return SDCA_FUNCTION_TYPE_SIMPLE_MIC_NAME;
case SDCA_FUNCTION_TYPE_SPEAKER_MIC:
+ return SDCA_FUNCTION_TYPE_SPEAKER_MIC_NAME;
case SDCA_FUNCTION_TYPE_RJ:
+ return SDCA_FUNCTION_TYPE_RJ_NAME;
case SDCA_FUNCTION_TYPE_IMP_DEF:
- dev_warn(dev, "%s: found unsupported SDCA function type %d, skipped\n",
- __func__, *function_type);
- return -EINVAL;
+ return SDCA_FUNCTION_TYPE_IMP_DEF_NAME;
default:
- dev_err(dev, "%s: found invalid SDCA function type %d, skipped\n",
- __func__, *function_type);
- return -EINVAL;
+ return NULL;
}
-
- dev_info(dev, "%s: found SDCA function %s (type %d)\n",
- __func__, *function_name, *function_type);
-
- return 0;
}
static int find_sdca_function(struct acpi_device *adev, void *data)
@@ -101,21 +90,16 @@ static int find_sdca_function(struct acpi_device *adev, void *data)
int ret;
if (sdca_data->num_functions >= SDCA_MAX_FUNCTION_COUNT) {
- dev_err(dev, "%s: maximum number of functions exceeded\n", __func__);
+ dev_err(dev, "maximum number of functions exceeded\n");
return -EINVAL;
}
- /*
- * The number of functions cannot exceed 8, we could use
- * acpi_get_local_address() but the value is stored as u64 so
- * we might as well avoid casts and intermediate levels
- */
ret = acpi_get_local_u64_address(adev->handle, &addr);
if (ret < 0)
return ret;
- if (!addr) {
- dev_err(dev, "%s: no addr\n", __func__);
+ if (!addr || addr > 0x7) {
+ dev_err(dev, "invalid addr: 0x%llx\n", addr);
return -ENODEV;
}
@@ -140,15 +124,25 @@ static int find_sdca_function(struct acpi_device *adev, void *data)
fwnode_handle_put(control5);
if (ret < 0) {
- dev_err(dev, "%s: the function type can only be determined from ACPI information\n",
- __func__);
+ dev_err(dev, "function type only supported as DisCo constant\n");
return ret;
}
- ret = patch_sdca_function_type(dev, sdca_data->interface_revision,
- &function_type, &function_name);
- if (ret < 0)
+ ret = patch_sdca_function_type(sdca_data->interface_revision, &function_type);
+ if (ret < 0) {
+ dev_err(dev, "SDCA version %#x invalid function type %d\n",
+ sdca_data->interface_revision, function_type);
return ret;
+ }
+
+ function_name = get_sdca_function_name(function_type);
+ if (!function_name) {
+ dev_err(dev, "invalid SDCA function type %d\n", function_type);
+ return -EINVAL;
+ }
+
+ dev_info(dev, "SDCA function %s (type %d) at 0x%llx\n",
+ function_name, function_type, addr);
/* store results */
func_index = sdca_data->num_functions;
diff --git a/sound/soc/sdw_utils/soc_sdw_cs_amp.c b/sound/soc/sdw_utils/soc_sdw_cs_amp.c
index a0bb626c5cb8..4b6181cf2971 100644
--- a/sound/soc/sdw_utils/soc_sdw_cs_amp.c
+++ b/sound/soc/sdw_utils/soc_sdw_cs_amp.c
@@ -15,6 +15,7 @@
#include <sound/soc_sdw_utils.h>
#define CODEC_NAME_SIZE 8
+#define CS_AMP_CHANNELS_PER_AMP 4
int asoc_sdw_cs_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai)
{
@@ -48,6 +49,51 @@ int asoc_sdw_cs_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai
}
EXPORT_SYMBOL_NS(asoc_sdw_cs_spk_rtd_init, "SND_SOC_SDW_UTILS");
+int asoc_sdw_cs_spk_feedback_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai)
+{
+ const struct snd_soc_dai_link *dai_link = rtd->dai_link;
+ const struct snd_soc_dai_link_ch_map *ch_map;
+ const struct snd_soc_dai_link_component *codec_dlc;
+ struct snd_soc_dai *codec_dai;
+ u8 ch_slot[8] = {};
+ unsigned int amps_per_bus, ch_per_amp, mask;
+ int i, ret;
+
+ WARN_ON(dai_link->num_cpus > ARRAY_SIZE(ch_slot));
+
+ /*
+ * CS35L56 has 4 TX channels. When the capture is aggregated the
+ * same bus slots will be allocated to all the amps on a bus. Only
+ * one amp on that bus can be transmitting in each slot so divide
+ * the available 4 slots between all the amps on a bus.
+ */
+ amps_per_bus = dai_link->num_codecs / dai_link->num_cpus;
+ if ((amps_per_bus == 0) || (amps_per_bus > CS_AMP_CHANNELS_PER_AMP)) {
+ dev_err(rtd->card->dev, "Illegal num_codecs:%u / num_cpus:%u\n",
+ dai_link->num_codecs, dai_link->num_cpus);
+ return -EINVAL;
+ }
+
+ ch_per_amp = CS_AMP_CHANNELS_PER_AMP / amps_per_bus;
+
+ for_each_rtd_ch_maps(rtd, i, ch_map) {
+ codec_dlc = snd_soc_link_to_codec(rtd->dai_link, i);
+ codec_dai = snd_soc_find_dai(codec_dlc);
+ mask = GENMASK(ch_per_amp - 1, 0) << ch_slot[ch_map->cpu];
+
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0, mask, 4, 32);
+ if (ret < 0) {
+ dev_err(rtd->card->dev, "Failed to set TDM slot:%d\n", ret);
+ return ret;
+ }
+
+ ch_slot[ch_map->cpu] += ch_per_amp;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(asoc_sdw_cs_spk_feedback_rtd_init, "SND_SOC_SDW_UTILS");
+
int asoc_sdw_cs_amp_init(struct snd_soc_card *card,
struct snd_soc_dai_link *dai_links,
struct asoc_sdw_codec_info *info,
diff --git a/sound/soc/sdw_utils/soc_sdw_utils.c b/sound/soc/sdw_utils/soc_sdw_utils.c
index 937fa3ce59df..6ee7d30b8ece 100644
--- a/sound/soc/sdw_utils/soc_sdw_utils.c
+++ b/sound/soc/sdw_utils/soc_sdw_utils.c
@@ -488,10 +488,10 @@ struct asoc_sdw_codec_info codec_info_list[] = {
.part_id = 0x3556,
.dais = {
{
- .direction = {true, true},
+ .direction = {true, false},
.dai_name = "cs35l56-sdw1",
.dai_type = SOC_SDW_DAI_TYPE_AMP,
- .dailink = {SOC_SDW_AMP_OUT_DAI_ID, SOC_SDW_AMP_IN_DAI_ID},
+ .dailink = {SOC_SDW_AMP_OUT_DAI_ID, SOC_SDW_UNUSED_DAI_ID},
.init = asoc_sdw_cs_amp_init,
.rtd_init = asoc_sdw_cs_spk_rtd_init,
.controls = generic_spk_controls,
@@ -499,8 +499,15 @@ struct asoc_sdw_codec_info codec_info_list[] = {
.widgets = generic_spk_widgets,
.num_widgets = ARRAY_SIZE(generic_spk_widgets),
},
+ {
+ .direction = {false, true},
+ .dai_name = "cs35l56-sdw1c",
+ .dai_type = SOC_SDW_DAI_TYPE_AMP,
+ .dailink = {SOC_SDW_UNUSED_DAI_ID, SOC_SDW_AMP_IN_DAI_ID},
+ .rtd_init = asoc_sdw_cs_spk_feedback_rtd_init,
+ },
},
- .dai_num = 1,
+ .dai_num = 2,
},
{
.part_id = 0x4242,
diff --git a/sound/soc/soc-card.c b/sound/soc/soc-card.c
index 8e9546fe7428..e6eb71b3010a 100644
--- a/sound/soc/soc-card.c
+++ b/sound/soc/soc-card.c
@@ -219,7 +219,7 @@ int snd_soc_card_set_bias_level(struct snd_soc_card *card,
{
int ret = 0;
- if (card && card->set_bias_level)
+ if (card->set_bias_level)
ret = card->set_bias_level(card, dapm, level);
return soc_card_ret(card, ret);
@@ -231,7 +231,7 @@ int snd_soc_card_set_bias_level_post(struct snd_soc_card *card,
{
int ret = 0;
- if (card && card->set_bias_level_post)
+ if (card->set_bias_level_post)
ret = card->set_bias_level_post(card, dapm, level);
return soc_card_ret(card, ret);
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index a1dace4bb616..3c6d8aef4130 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1449,23 +1449,46 @@ int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
{
struct snd_soc_dai *cpu_dai;
struct snd_soc_dai *codec_dai;
+ unsigned int ext_fmt;
unsigned int i;
int ret;
if (!dai_fmt)
return 0;
+ /*
+ * dai_fmt has 4 types
+ * 1. SND_SOC_DAIFMT_FORMAT_MASK
+ * 2. SND_SOC_DAIFMT_CLOCK
+ * 3. SND_SOC_DAIFMT_INV
+ * 4. SND_SOC_DAIFMT_CLOCK_PROVIDER
+ *
+ * 4. CLOCK_PROVIDER is set from Codec perspective in dai_fmt. So it will be flipped
+ * when this function calls set_fmt() for CPU (CBx_CFx -> Bx_Cx). see below.
+ * This mean, we can't set CPU/Codec both are clock consumer for example.
+ * New idea handles 4. in each dai->ext_fmt. It can keep compatibility.
+ *
+ * Legacy
+ * dai_fmt includes 1, 2, 3, 4
+ *
+ * New idea
+ * dai_fmt includes 1, 2, 3
+ * ext_fmt includes 4
+ */
for_each_rtd_codec_dais(rtd, i, codec_dai) {
- ret = snd_soc_dai_set_fmt(codec_dai, dai_fmt);
+ ext_fmt = rtd->dai_link->codecs[i].ext_fmt;
+ ret = snd_soc_dai_set_fmt(codec_dai, dai_fmt | ext_fmt);
if (ret != 0 && ret != -ENOTSUPP)
return ret;
}
/* Flip the polarity for the "CPU" end of link */
+ /* Will effect only for 4. SND_SOC_DAIFMT_CLOCK_PROVIDER */
dai_fmt = snd_soc_daifmt_clock_provider_flipped(dai_fmt);
for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
- ret = snd_soc_dai_set_fmt(cpu_dai, dai_fmt);
+ ext_fmt = rtd->dai_link->cpus[i].ext_fmt;
+ ret = snd_soc_dai_set_fmt(cpu_dai, dai_fmt | ext_fmt);
if (ret != 0 && ret != -ENOTSUPP)
return ret;
}
@@ -1644,18 +1667,8 @@ static int soc_probe_component(struct snd_soc_card *card,
ret = snd_soc_dapm_add_routes(dapm,
component->driver->dapm_routes,
component->driver->num_dapm_routes);
- if (ret < 0) {
- if (card->disable_route_checks) {
- dev_info(card->dev,
- "%s: disable_route_checks set, ignoring errors on add_routes\n",
- __func__);
- } else {
- dev_err(card->dev,
- "%s: snd_soc_dapm_add_routes failed: %d\n",
- __func__, ret);
- goto err_probe;
- }
- }
+ if (ret < 0)
+ goto err_probe;
/* see for_each_card_components */
list_add(&component->card_list, &card->component_dev_list);
@@ -2234,18 +2247,8 @@ static int snd_soc_bind_card(struct snd_soc_card *card)
ret = snd_soc_dapm_add_routes(&card->dapm, card->dapm_routes,
card->num_dapm_routes);
- if (ret < 0) {
- if (card->disable_route_checks) {
- dev_info(card->dev,
- "%s: disable_route_checks set, ignoring errors on add_routes\n",
- __func__);
- } else {
- dev_err(card->dev,
- "%s: snd_soc_dapm_add_routes failed: %d\n",
- __func__, ret);
- goto probe_end;
- }
- }
+ if (ret < 0)
+ goto probe_end;
ret = snd_soc_dapm_add_routes(&card->dapm, card->of_dapm_routes,
card->num_of_dapm_routes);
@@ -3389,6 +3392,9 @@ unsigned int snd_soc_daifmt_parse_clock_provider_raw(struct device_node *np,
char prop[128];
unsigned int bit, frame;
+ if (!np)
+ return 0;
+
if (!prefix)
prefix = "";
diff --git a/sound/soc/soc-dai.c b/sound/soc/soc-dai.c
index 34ba1a93a4c9..ca0308f6d41c 100644
--- a/sound/soc/soc-dai.c
+++ b/sound/soc/soc-dai.c
@@ -360,6 +360,22 @@ int snd_soc_dai_set_tristate(struct snd_soc_dai *dai, int tristate)
}
EXPORT_SYMBOL_GPL(snd_soc_dai_set_tristate);
+int snd_soc_dai_prepare(struct snd_soc_dai *dai,
+ struct snd_pcm_substream *substream)
+{
+ int ret = 0;
+
+ if (!snd_soc_dai_stream_valid(dai, substream->stream))
+ return 0;
+
+ if (dai->driver->ops &&
+ dai->driver->ops->prepare)
+ ret = dai->driver->ops->prepare(substream, dai);
+
+ return soc_dai_ret(dai, ret);
+}
+EXPORT_SYMBOL_GPL(snd_soc_dai_prepare);
+
/**
* snd_soc_dai_digital_mute - configure DAI system or master clock.
* @dai: DAI
@@ -577,14 +593,9 @@ int snd_soc_pcm_dai_prepare(struct snd_pcm_substream *substream)
int i, ret;
for_each_rtd_dais(rtd, i, dai) {
- if (!snd_soc_dai_stream_valid(dai, substream->stream))
- continue;
- if (dai->driver->ops &&
- dai->driver->ops->prepare) {
- ret = dai->driver->ops->prepare(substream, dai);
- if (ret < 0)
- return soc_dai_ret(dai, ret);
- }
+ ret = snd_soc_dai_prepare(dai, substream);
+ if (ret < 0)
+ return ret;
}
return 0;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 99521c784a9b..b5116b700d73 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -730,7 +730,7 @@ static int snd_soc_dapm_set_bias_level(struct snd_soc_dapm_context *dapm,
if (ret != 0)
goto out;
- if (!card || dapm != &card->dapm)
+ if (dapm != &card->dapm)
ret = snd_soc_dapm_force_bias_level(dapm, level);
if (ret != 0)
@@ -4013,6 +4013,18 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
break;
case SND_SOC_DAPM_POST_PMU:
+ snd_soc_dapm_widget_for_each_source_path(w, path) {
+ source = path->source->priv;
+
+ snd_soc_dai_prepare(source, substream);
+ }
+
+ snd_soc_dapm_widget_for_each_sink_path(w, path) {
+ sink = path->sink->priv;
+
+ snd_soc_dai_prepare(sink, substream);
+ }
+
snd_soc_dapm_widget_for_each_sink_path(w, path) {
sink = path->sink->priv;
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 1150455619aa..88b3ad5a2552 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -38,7 +38,6 @@ static inline int _soc_pcm_ret(struct snd_soc_pcm_runtime *rtd,
switch (ret) {
case -EPROBE_DEFER:
case -ENOTSUPP:
- case -EINVAL:
break;
default:
dev_err(rtd->dev,
@@ -986,7 +985,13 @@ static int __soc_pcm_prepare(struct snd_soc_pcm_runtime *rtd,
}
out:
- return soc_pcm_ret(rtd, ret);
+ /*
+ * Don't use soc_pcm_ret() on .prepare callback to lower error log severity
+ *
+ * We don't want to log an error since we do not want to give userspace a way to do a
+ * denial-of-service attack on the syslog / diskspace.
+ */
+ return ret;
}
/* PCM prepare ops for non-DPCM streams */
@@ -998,6 +1003,13 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
snd_soc_dpcm_mutex_lock(rtd);
ret = __soc_pcm_prepare(rtd, substream);
snd_soc_dpcm_mutex_unlock(rtd);
+
+ /*
+ * Don't use soc_pcm_ret() on .prepare callback to lower error log severity
+ *
+ * We don't want to log an error since we do not want to give userspace a way to do a
+ * denial-of-service attack on the syslog / diskspace.
+ */
return ret;
}
@@ -2539,7 +2551,13 @@ int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
be->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
}
- return soc_pcm_ret(fe, ret);
+ /*
+ * Don't use soc_pcm_ret() on .prepare callback to lower error log severity
+ *
+ * We don't want to log an error since we do not want to give userspace a way to do a
+ * denial-of-service attack on the syslog / diskspace.
+ */
+ return ret;
}
static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream)
@@ -2579,7 +2597,13 @@ out:
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
snd_soc_dpcm_mutex_unlock(fe);
- return soc_pcm_ret(fe, ret);
+ /*
+ * Don't use soc_pcm_ret() on .prepare callback to lower error log severity
+ *
+ * We don't want to log an error since we do not want to give userspace a way to do a
+ * denial-of-service attack on the syslog / diskspace.
+ */
+ return ret;
}
static int dpcm_run_update_shutdown(struct snd_soc_pcm_runtime *fe, int stream)
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 43003d2d3666..9f4da061eff9 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -1101,14 +1101,8 @@ static int soc_tplg_dapm_graph_elems_load(struct soc_tplg *tplg,
}
ret = snd_soc_dapm_add_routes(dapm, route, 1);
- if (ret) {
- if (!dapm->card->disable_route_checks) {
- dev_err(tplg->dev, "ASoC: dapm_add_routes failed: %d\n", ret);
- break;
- }
- dev_info(tplg->dev,
- "ASoC: disable_route_checks set, ignoring dapm_add_routes errors\n");
- }
+ if (ret)
+ break;
}
return ret;
diff --git a/sound/soc/sof/imx/imx8.c b/sound/soc/sof/imx/imx8.c
index 0b85b29d1067..1e7bf00d7c46 100644
--- a/sound/soc/sof/imx/imx8.c
+++ b/sound/soc/sof/imx/imx8.c
@@ -175,8 +175,7 @@ static int imx8_run(struct snd_sof_dev *sdev)
static int imx8_probe(struct snd_sof_dev *sdev)
{
- struct platform_device *pdev =
- container_of(sdev->dev, struct platform_device, dev);
+ struct platform_device *pdev = to_platform_device(sdev->dev);
struct device_node *np = pdev->dev.of_node;
struct device_node *res_node;
struct resource *mmio;
@@ -607,10 +606,31 @@ static struct snd_sof_of_mach sof_imx8_machs[] = {
.drv_name = "asoc-audio-graph-card2",
},
{
+ .compatible = "fsl,imx8qxp-mek-wcpu",
+ .sof_tplg_filename = "sof-imx8-wm8962.tplg",
+ .drv_name = "asoc-audio-graph-card2",
+ },
+ {
.compatible = "fsl,imx8qm-mek",
.sof_tplg_filename = "sof-imx8-wm8960.tplg",
.drv_name = "asoc-audio-graph-card2",
},
+ {
+ .compatible = "fsl,imx8qm-mek-revd",
+ .sof_tplg_filename = "sof-imx8-wm8962.tplg",
+ .drv_name = "asoc-audio-graph-card2",
+ },
+ {
+ .compatible = "fsl,imx8qxp-mek-bb",
+ .sof_tplg_filename = "sof-imx8-cs42888.tplg",
+ .drv_name = "asoc-audio-graph-card2",
+ },
+ {
+ .compatible = "fsl,imx8qm-mek-bb",
+ .sof_tplg_filename = "sof-imx8-cs42888.tplg",
+ .drv_name = "asoc-audio-graph-card2",
+ },
+
{}
};
diff --git a/sound/soc/sof/imx/imx8m.c b/sound/soc/sof/imx/imx8m.c
index ff42743efa79..3cabdebac558 100644
--- a/sound/soc/sof/imx/imx8m.c
+++ b/sound/soc/sof/imx/imx8m.c
@@ -144,8 +144,7 @@ static int imx8m_reset(struct snd_sof_dev *sdev)
static int imx8m_probe(struct snd_sof_dev *sdev)
{
- struct platform_device *pdev =
- container_of(sdev->dev, struct platform_device, dev);
+ struct platform_device *pdev = to_platform_device(sdev->dev);
struct device_node *np = pdev->dev.of_node;
struct device_node *res_node;
struct resource *mmio;
@@ -295,6 +294,17 @@ static struct snd_soc_dai_driver imx8m_dai[] = {
},
},
{
+ .name = "sai2",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 32,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 32,
+ },
+},
+{
.name = "sai3",
.playback = {
.channels_min = 1,
@@ -306,6 +316,39 @@ static struct snd_soc_dai_driver imx8m_dai[] = {
},
},
{
+ .name = "sai5",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 32,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 32,
+ },
+},
+{
+ .name = "sai6",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 32,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 32,
+ },
+},
+{
+ .name = "sai7",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 32,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 32,
+ },
+},
+{
.name = "micfil",
.capture = {
.channels_min = 1,
@@ -472,6 +515,11 @@ static const struct snd_sof_dsp_ops sof_imx8m_ops = {
static struct snd_sof_of_mach sof_imx8mp_machs[] = {
{
+ .compatible = "fsl,imx8mp-evk-revb4",
+ .sof_tplg_filename = "sof-imx8mp-wm8962.tplg",
+ .drv_name = "asoc-audio-graph-card2",
+ },
+ {
.compatible = "fsl,imx8mp-evk",
.sof_tplg_filename = "sof-imx8mp-wm8960.tplg",
.drv_name = "asoc-audio-graph-card2",
diff --git a/sound/soc/sof/imx/imx8ulp.c b/sound/soc/sof/imx/imx8ulp.c
index 6965791ab6ef..0704da27e69d 100644
--- a/sound/soc/sof/imx/imx8ulp.c
+++ b/sound/soc/sof/imx/imx8ulp.c
@@ -155,8 +155,7 @@ static int imx8ulp_reset(struct snd_sof_dev *sdev)
static int imx8ulp_probe(struct snd_sof_dev *sdev)
{
- struct platform_device *pdev =
- container_of(sdev->dev, struct platform_device, dev);
+ struct platform_device *pdev = to_platform_device(sdev->dev);
struct device_node *np = pdev->dev.of_node;
struct device_node *res_node;
struct resource *mmio;
diff --git a/sound/soc/sof/intel/atom.c b/sound/soc/sof/intel/atom.c
index 30e981c558c6..0d364bcdcfa9 100644
--- a/sound/soc/sof/intel/atom.c
+++ b/sound/soc/sof/intel/atom.c
@@ -78,20 +78,20 @@ void atom_dump(struct snd_sof_dev *sdev, u32 flags)
imrd = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IMRD);
dev_err(sdev->dev,
"error: ipc host -> DSP: pending %s complete %s raw 0x%llx\n",
- (panic & SHIM_IPCX_BUSY) ? "yes" : "no",
- (panic & SHIM_IPCX_DONE) ? "yes" : "no", panic);
+ str_yes_no(panic & SHIM_IPCX_BUSY),
+ str_yes_no(panic & SHIM_IPCX_DONE), panic);
dev_err(sdev->dev,
"error: mask host: pending %s complete %s raw 0x%llx\n",
- (imrx & SHIM_IMRX_BUSY) ? "yes" : "no",
- (imrx & SHIM_IMRX_DONE) ? "yes" : "no", imrx);
+ str_yes_no(imrx & SHIM_IMRX_BUSY),
+ str_yes_no(imrx & SHIM_IMRX_DONE), imrx);
dev_err(sdev->dev,
"error: ipc DSP -> host: pending %s complete %s raw 0x%llx\n",
- (status & SHIM_IPCD_BUSY) ? "yes" : "no",
- (status & SHIM_IPCD_DONE) ? "yes" : "no", status);
+ str_yes_no(status & SHIM_IPCD_BUSY),
+ str_yes_no(status & SHIM_IPCD_DONE), status);
dev_err(sdev->dev,
"error: mask DSP: pending %s complete %s raw 0x%llx\n",
- (imrd & SHIM_IMRD_BUSY) ? "yes" : "no",
- (imrd & SHIM_IMRD_DONE) ? "yes" : "no", imrd);
+ str_yes_no(imrd & SHIM_IMRD_BUSY),
+ str_yes_no(imrd & SHIM_IMRD_DONE), imrd);
}
EXPORT_SYMBOL_NS(atom_dump, "SND_SOC_SOF_INTEL_ATOM_HIFI_EP");
diff --git a/sound/soc/sof/intel/bdw.c b/sound/soc/sof/intel/bdw.c
index c4d92f3508b6..e1f0e38c2407 100644
--- a/sound/soc/sof/intel/bdw.c
+++ b/sound/soc/sof/intel/bdw.c
@@ -266,20 +266,20 @@ static void bdw_dump(struct snd_sof_dev *sdev, u32 flags)
imrd = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IMRD);
dev_err(sdev->dev,
"error: ipc host -> DSP: pending %s complete %s raw 0x%8.8x\n",
- (panic & SHIM_IPCX_BUSY) ? "yes" : "no",
- (panic & SHIM_IPCX_DONE) ? "yes" : "no", panic);
+ str_yes_no(panic & SHIM_IPCX_BUSY),
+ str_yes_no(panic & SHIM_IPCX_DONE), panic);
dev_err(sdev->dev,
"error: mask host: pending %s complete %s raw 0x%8.8x\n",
- (imrx & SHIM_IMRX_BUSY) ? "yes" : "no",
- (imrx & SHIM_IMRX_DONE) ? "yes" : "no", imrx);
+ str_yes_no(imrx & SHIM_IMRX_BUSY),
+ str_yes_no(imrx & SHIM_IMRX_DONE), imrx);
dev_err(sdev->dev,
"error: ipc DSP -> host: pending %s complete %s raw 0x%8.8x\n",
- (status & SHIM_IPCD_BUSY) ? "yes" : "no",
- (status & SHIM_IPCD_DONE) ? "yes" : "no", status);
+ str_yes_no(status & SHIM_IPCD_BUSY),
+ str_yes_no(status & SHIM_IPCD_DONE), status);
dev_err(sdev->dev,
"error: mask DSP: pending %s complete %s raw 0x%8.8x\n",
- (imrd & SHIM_IMRD_BUSY) ? "yes" : "no",
- (imrd & SHIM_IMRD_DONE) ? "yes" : "no", imrd);
+ str_yes_no(imrd & SHIM_IMRD_BUSY),
+ str_yes_no(imrd & SHIM_IMRD_DONE), imrd);
}
/*
@@ -410,8 +410,7 @@ static int bdw_probe(struct snd_sof_dev *sdev)
{
struct snd_sof_pdata *pdata = sdev->pdata;
const struct sof_dev_desc *desc = pdata->desc;
- struct platform_device *pdev =
- container_of(sdev->dev, struct platform_device, dev);
+ struct platform_device *pdev = to_platform_device(sdev->dev);
const struct sof_intel_dsp_desc *chip;
struct resource *mmio;
u32 base, size;
diff --git a/sound/soc/sof/intel/byt.c b/sound/soc/sof/intel/byt.c
index 536d4c89d2f0..cae7dc0036c6 100644
--- a/sound/soc/sof/intel/byt.c
+++ b/sound/soc/sof/intel/byt.c
@@ -109,8 +109,7 @@ static int byt_acpi_probe(struct snd_sof_dev *sdev)
{
struct snd_sof_pdata *pdata = sdev->pdata;
const struct sof_dev_desc *desc = pdata->desc;
- struct platform_device *pdev =
- container_of(sdev->dev, struct platform_device, dev);
+ struct platform_device *pdev = to_platform_device(sdev->dev);
const struct sof_intel_dsp_desc *chip;
struct resource *mmio;
u32 base, size;
diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
index 0db2a3e554fb..da12aabc1bb8 100644
--- a/sound/soc/sof/intel/hda-dai.c
+++ b/sound/soc/sof/intel/hda-dai.c
@@ -503,6 +503,12 @@ int sdw_hda_dai_hw_params(struct snd_pcm_substream *substream,
int ret;
int i;
+ if (!w) {
+ dev_err(cpu_dai->dev, "%s widget not found, check amp link num in the topology\n",
+ cpu_dai->name);
+ return -EINVAL;
+ }
+
ops = hda_dai_get_ops(substream, cpu_dai);
if (!ops) {
dev_err(cpu_dai->dev, "DAI widget ops not set\n");
@@ -582,6 +588,12 @@ int sdw_hda_dai_hw_params(struct snd_pcm_substream *substream,
*/
for_each_rtd_cpu_dais(rtd, i, dai) {
w = snd_soc_dai_get_widget(dai, substream->stream);
+ if (!w) {
+ dev_err(cpu_dai->dev,
+ "%s widget not found, check amp link num in the topology\n",
+ dai->name);
+ return -EINVAL;
+ }
ipc4_copier = widget_to_copier(w);
memcpy(&ipc4_copier->dma_config_tlv[cpu_dai_id], dma_config_tlv,
sizeof(*dma_config_tlv));
diff --git a/sound/soc/sof/intel/hda-pcm.c b/sound/soc/sof/intel/hda-pcm.c
index 5b5e484f9acf..1dd8d2092c3b 100644
--- a/sound/soc/sof/intel/hda-pcm.c
+++ b/sound/soc/sof/intel/hda-pcm.c
@@ -37,6 +37,11 @@ static bool hda_disable_rewinds;
module_param_named(disable_rewinds, hda_disable_rewinds, bool, 0444);
MODULE_PARM_DESC(disable_rewinds, "SOF HDA disable rewinds");
+static int hda_force_pause_support = -1;
+module_param_named(force_pause_support, hda_force_pause_support, int, 0444);
+MODULE_PARM_DESC(force_pause_support,
+ "Pause support: -1: Use default, 0: Disable, 1: Enable (default -1)");
+
u32 hda_dsp_get_mult_div(struct snd_sof_dev *sdev, int rate)
{
switch (rate) {
@@ -240,6 +245,16 @@ int hda_dsp_pcm_open(struct snd_sof_dev *sdev,
if (hda_always_enable_dmi_l1 && direction == SNDRV_PCM_STREAM_CAPTURE)
runtime->hw.info &= ~SNDRV_PCM_INFO_PAUSE;
+ /*
+ * Do not advertise the PAUSE support if it is forced to be disabled via
+ * module parameter or if the pause_supported is false for the PCM
+ * device
+ */
+ if (hda_force_pause_support == 0 ||
+ (hda_force_pause_support == -1 &&
+ !spcm->stream[substream->stream].pause_supported))
+ runtime->hw.info &= ~SNDRV_PCM_INFO_PAUSE;
+
if (hda_always_enable_dmi_l1 ||
direction == SNDRV_PCM_STREAM_PLAYBACK ||
spcm->stream[substream->stream].d0i3_compatible)
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index f991785f727e..be689f6e10c8 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -63,6 +63,11 @@ static int sdw_params_stream(struct device *dev,
struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(d, params_data->substream->stream);
struct snd_sof_dai_config_data data = { 0 };
+ if (!w) {
+ dev_err(dev, "%s widget not found, check amp link num in the topology\n",
+ d->name);
+ return -EINVAL;
+ }
data.dai_index = (params_data->link_id << 8) | d->id;
data.dai_data = params_data->alh_stream_id;
data.dai_node_id = data.dai_data;
diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c
index b55eb977e443..c04c62478827 100644
--- a/sound/soc/sof/ipc4-topology.c
+++ b/sound/soc/sof/ipc4-topology.c
@@ -2827,7 +2827,7 @@ static int sof_ipc4_widget_setup(struct snd_sof_dev *sdev, struct snd_sof_widget
msg->primary |= SOF_IPC4_MOD_INSTANCE(swidget->instance_id);
msg->extension &= ~SOF_IPC4_MOD_EXT_PARAM_SIZE_MASK;
- msg->extension |= ipc_size >> 2;
+ msg->extension |= SOF_IPC4_MOD_EXT_PARAM_SIZE(ipc_size >> 2);
msg->extension &= ~SOF_IPC4_MOD_EXT_PPL_ID_MASK;
msg->extension |= SOF_IPC4_MOD_EXT_PPL_ID(pipe_widget->instance_id);
diff --git a/sound/soc/sof/mediatek/mt8186/mt8186.c b/sound/soc/sof/mediatek/mt8186/mt8186.c
index 9955dfa520ae..31437fdd4e92 100644
--- a/sound/soc/sof/mediatek/mt8186/mt8186.c
+++ b/sound/soc/sof/mediatek/mt8186/mt8186.c
@@ -238,7 +238,7 @@ static int mt8186_run(struct snd_sof_dev *sdev)
static int mt8186_dsp_probe(struct snd_sof_dev *sdev)
{
- struct platform_device *pdev = container_of(sdev->dev, struct platform_device, dev);
+ struct platform_device *pdev = to_platform_device(sdev->dev);
struct adsp_priv *priv;
int ret;
diff --git a/sound/soc/sof/mediatek/mt8195/mt8195.c b/sound/soc/sof/mediatek/mt8195/mt8195.c
index 6032b566c679..371563d7ce79 100644
--- a/sound/soc/sof/mediatek/mt8195/mt8195.c
+++ b/sound/soc/sof/mediatek/mt8195/mt8195.c
@@ -228,7 +228,7 @@ static int mt8195_run(struct snd_sof_dev *sdev)
static int mt8195_dsp_probe(struct snd_sof_dev *sdev)
{
- struct platform_device *pdev = container_of(sdev->dev, struct platform_device, dev);
+ struct platform_device *pdev = to_platform_device(sdev->dev);
struct adsp_priv *priv;
int ret;
@@ -341,7 +341,7 @@ static int mt8195_dsp_shutdown(struct snd_sof_dev *sdev)
static void mt8195_dsp_remove(struct snd_sof_dev *sdev)
{
- struct platform_device *pdev = container_of(sdev->dev, struct platform_device, dev);
+ struct platform_device *pdev = to_platform_device(sdev->dev);
struct adsp_priv *priv = sdev->pdata->hw_pdata;
platform_device_unregister(priv->ipc_dev);
@@ -351,7 +351,7 @@ static void mt8195_dsp_remove(struct snd_sof_dev *sdev)
static int mt8195_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
{
- struct platform_device *pdev = container_of(sdev->dev, struct platform_device, dev);
+ struct platform_device *pdev = to_platform_device(sdev->dev);
int ret;
u32 reset_sw, dbg_pc;
diff --git a/sound/soc/sof/sof-audio.h b/sound/soc/sof/sof-audio.h
index 01b819dd8498..62f3c11a9216 100644
--- a/sound/soc/sof/sof-audio.h
+++ b/sound/soc/sof/sof-audio.h
@@ -332,6 +332,7 @@ struct snd_sof_pcm_stream {
struct work_struct period_elapsed_work;
struct snd_soc_dapm_widget_list *list; /* list of connected DAPM widgets */
bool d0i3_compatible; /* DSP can be in D0I3 when this pcm is opened */
+ bool pause_supported; /* PCM device supports PAUSE operation */
unsigned int dsp_max_burst_size_in_ms; /* The maximum size of the host DMA burst in ms */
/*
* flag to indicate that the DSP pipelines should be kept
diff --git a/sound/soc/sof/sof-client-ipc-flood-test.c b/sound/soc/sof/sof-client-ipc-flood-test.c
index b35c98896968..11b6f7da2882 100644
--- a/sound/soc/sof/sof-client-ipc-flood-test.c
+++ b/sound/soc/sof/sof-client-ipc-flood-test.c
@@ -158,7 +158,6 @@ static ssize_t sof_ipc_flood_dfs_write(struct file *file, const char __user *buf
unsigned long ipc_duration_ms = 0;
bool flood_duration_test = false;
unsigned long ipc_count = 0;
- struct dentry *dentry;
int err;
char *string;
int ret;
@@ -182,14 +181,7 @@ static ssize_t sof_ipc_flood_dfs_write(struct file *file, const char __user *buf
* ipc_duration_ms test floods the DSP for the time specified
* in the debugfs entry.
*/
- dentry = file->f_path.dentry;
- if (strcmp(dentry->d_name.name, DEBUGFS_IPC_FLOOD_COUNT) &&
- strcmp(dentry->d_name.name, DEBUGFS_IPC_FLOOD_DURATION)) {
- ret = -EINVAL;
- goto out;
- }
-
- if (!strcmp(dentry->d_name.name, DEBUGFS_IPC_FLOOD_DURATION))
+ if (debugfs_get_aux_num(file))
flood_duration_test = true;
/* test completion criterion */
@@ -252,22 +244,15 @@ static ssize_t sof_ipc_flood_dfs_read(struct file *file, char __user *buffer,
struct sof_ipc_flood_priv *priv = cdev->data;
size_t size_ret;
- struct dentry *dentry;
+ if (*ppos)
+ return 0;
- dentry = file->f_path.dentry;
- if (!strcmp(dentry->d_name.name, DEBUGFS_IPC_FLOOD_COUNT) ||
- !strcmp(dentry->d_name.name, DEBUGFS_IPC_FLOOD_DURATION)) {
- if (*ppos)
- return 0;
+ count = min_t(size_t, count, strlen(priv->buf));
+ size_ret = copy_to_user(buffer, priv->buf, count);
+ if (size_ret)
+ return -EFAULT;
- count = min_t(size_t, count, strlen(priv->buf));
- size_ret = copy_to_user(buffer, priv->buf, count);
- if (size_ret)
- return -EFAULT;
-
- *ppos += count;
- return count;
- }
+ *ppos += count;
return count;
}
@@ -320,12 +305,12 @@ static int sof_ipc_flood_probe(struct auxiliary_device *auxdev,
priv->dfs_root = debugfs_create_dir(dev_name(dev), debugfs_root);
if (!IS_ERR_OR_NULL(priv->dfs_root)) {
/* create read-write ipc_flood_count debugfs entry */
- debugfs_create_file(DEBUGFS_IPC_FLOOD_COUNT, 0644, priv->dfs_root,
- cdev, &sof_ipc_flood_fops);
+ debugfs_create_file_aux_num(DEBUGFS_IPC_FLOOD_COUNT, 0644,
+ priv->dfs_root, cdev, 0, &sof_ipc_flood_fops);
/* create read-write ipc_flood_duration_ms debugfs entry */
- debugfs_create_file(DEBUGFS_IPC_FLOOD_DURATION, 0644,
- priv->dfs_root, cdev, &sof_ipc_flood_fops);
+ debugfs_create_file_aux_num(DEBUGFS_IPC_FLOOD_DURATION, 0644,
+ priv->dfs_root, cdev, 1, &sof_ipc_flood_fops);
if (auxdev->id == 0) {
/*
diff --git a/sound/soc/sof/sof-priv.h b/sound/soc/sof/sof-priv.h
index 843be3b6415d..abbb5ee7e08c 100644
--- a/sound/soc/sof/sof-priv.h
+++ b/sound/soc/sof/sof-priv.h
@@ -76,14 +76,6 @@ bool sof_debug_check_flag(int mask);
#define SOF_IPC_DSP_REPLY 0
#define SOF_IPC_HOST_REPLY 1
-/* convenience constructor for DAI driver streams */
-#define SOF_DAI_STREAM(sname, scmin, scmax, srates, sfmt) \
- {.stream_name = sname, .channels_min = scmin, .channels_max = scmax, \
- .rates = srates, .formats = sfmt}
-
-#define SOF_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | \
- SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_FLOAT)
-
/* So far the primary core on all DSPs has ID 0 */
#define SOF_DSP_PRIMARY_CORE 0
diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
index b3fca5fd87d6..688cc7ac1714 100644
--- a/sound/soc/sof/topology.c
+++ b/sound/soc/sof/topology.c
@@ -407,6 +407,10 @@ static const struct sof_topology_token stream_tokens[] = {
offsetof(struct snd_sof_pcm, stream[0].d0i3_compatible)},
{SOF_TKN_STREAM_CAPTURE_COMPATIBLE_D0I3, SND_SOC_TPLG_TUPLE_TYPE_BOOL, get_token_u16,
offsetof(struct snd_sof_pcm, stream[1].d0i3_compatible)},
+ {SOF_TKN_STREAM_PLAYBACK_PAUSE_SUPPORTED, SND_SOC_TPLG_TUPLE_TYPE_BOOL, get_token_u16,
+ offsetof(struct snd_sof_pcm, stream[0].pause_supported)},
+ {SOF_TKN_STREAM_CAPTURE_PAUSE_SUPPORTED, SND_SOC_TPLG_TUPLE_TYPE_BOOL, get_token_u16,
+ offsetof(struct snd_sof_pcm, stream[1].pause_supported)},
};
/* Leds */
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
index 933a0913237c..886b3fa537d2 100644
--- a/sound/soc/sunxi/sun4i-codec.c
+++ b/sound/soc/sunxi/sun4i-codec.c
@@ -5,6 +5,7 @@
* Copyright 2015 Maxime Ripard <maxime.ripard@free-electrons.com>
* Copyright 2015 Adam Sampson <ats@offog.org>
* Copyright 2016 Chen-Yu Tsai <wens@csie.org>
+ * Copyright 2018 Mesih Kilinc <mesihkilinc@gmail.com>
*
* Based on the Allwinner SDK driver, released under the GPL.
*/
@@ -265,6 +266,64 @@
/* TODO H3 DAP (Digital Audio Processing) bits */
+#define SUN4I_DMA_MAX_BURST (8)
+
+/* suniv specific registers */
+
+#define SUNIV_DMA_MAX_BURST (4)
+
+/* Codec DAC digital controls and FIFO registers */
+#define SUNIV_CODEC_ADC_FIFOC (0x10)
+#define SUNIV_CODEC_ADC_FIFOC_EN_AD (28)
+#define SUNIV_CODEC_ADC_FIFOS (0x14)
+#define SUNIV_CODEC_ADC_RXDATA (0x18)
+
+/* Output mixer and gain controls */
+#define SUNIV_CODEC_OM_DACA_CTRL (0x20)
+#define SUNIV_CODEC_OM_DACA_CTRL_DACAREN (31)
+#define SUNIV_CODEC_OM_DACA_CTRL_DACALEN (30)
+#define SUNIV_CODEC_OM_DACA_CTRL_RMIXEN (29)
+#define SUNIV_CODEC_OM_DACA_CTRL_LMIXEN (28)
+#define SUNIV_CODEC_OM_DACA_CTRL_RHPPAMUTE (27)
+#define SUNIV_CODEC_OM_DACA_CTRL_LHPPAMUTE (26)
+#define SUNIV_CODEC_OM_DACA_CTRL_RHPIS (25)
+#define SUNIV_CODEC_OM_DACA_CTRL_LHPIS (24)
+#define SUNIV_CODEC_OM_DACA_CTRL_HPCOM_CTL (22)
+#define SUNIV_CODEC_OM_DACA_CTRL_COMPTEN (21)
+#define SUNIV_CODEC_OM_DACA_CTRL_RMIXMUTE_MICIN (20)
+#define SUNIV_CODEC_OM_DACA_CTRL_RMIXMUTE_LINEIN (19)
+#define SUNIV_CODEC_OM_DACA_CTRL_RMIXMUTE_FMIN (18)
+#define SUNIV_CODEC_OM_DACA_CTRL_RMIXMUTE_RDAC (17)
+#define SUNIV_CODEC_OM_DACA_CTRL_RMIXMUTE_LDAC (16)
+#define SUNIV_CODEC_OM_DACA_CTRL_HPPAEN (15)
+#define SUNIV_CODEC_OM_DACA_CTRL_LMIXMUTE_MICIN (12)
+#define SUNIV_CODEC_OM_DACA_CTRL_LMIXMUTE_LINEIN (11)
+#define SUNIV_CODEC_OM_DACA_CTRL_LMIXMUTE_FMIN (10)
+#define SUNIV_CODEC_OM_DACA_CTRL_LMIXMUTE_LDAC (9)
+#define SUNIV_CODEC_OM_DACA_CTRL_LMIXMUTE_RDAC (8)
+#define SUNIV_CODEC_OM_DACA_CTRL_LTLNMUTE (7)
+#define SUNIV_CODEC_OM_DACA_CTRL_RTLNMUTE (6)
+#define SUNIV_CODEC_OM_DACA_CTRL_HPVOL (0)
+
+/* Analog Input Mixer controls */
+#define SUNIV_CODEC_ADC_ACTL (0x24)
+#define SUNIV_CODEC_ADC_ADCEN (31)
+#define SUNIV_CODEC_ADC_MICG (24)
+#define SUNIV_CODEC_ADC_LINEINVOL (21)
+#define SUNIV_CODEC_ADC_ADCG (16)
+#define SUNIV_CODEC_ADC_ADCMIX_MIC (13)
+#define SUNIV_CODEC_ADC_ADCMIX_FMINL (12)
+#define SUNIV_CODEC_ADC_ADCMIX_FMINR (11)
+#define SUNIV_CODEC_ADC_ADCMIX_LINEIN (10)
+#define SUNIV_CODEC_ADC_ADCMIX_LOUT (9)
+#define SUNIV_CODEC_ADC_ADCMIX_ROUT (8)
+#define SUNIV_CODEC_ADC_PASPEEDSELECT (7)
+#define SUNIV_CODEC_ADC_FMINVOL (4)
+#define SUNIV_CODEC_ADC_MICAMPEN (3)
+#define SUNIV_CODEC_ADC_MICBOOST (0)
+
+#define SUNIV_CODEC_ADC_DBG (0x4c)
+
struct sun4i_codec {
struct device *dev;
struct regmap *regmap;
@@ -1255,6 +1314,228 @@ static const struct snd_soc_component_driver sun8i_a23_codec_codec = {
.endianness = 1,
};
+/*suniv F1C100s codec */
+
+/* headphone controls */
+static const char * const suniv_codec_hp_src_enum_text[] = {
+ "DAC", "Mixer",
+};
+
+static SOC_ENUM_DOUBLE_DECL(suniv_codec_hp_src_enum,
+ SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_LHPIS,
+ SUNIV_CODEC_OM_DACA_CTRL_RHPIS,
+ suniv_codec_hp_src_enum_text);
+
+static const struct snd_kcontrol_new suniv_codec_hp_src[] = {
+ SOC_DAPM_ENUM("Headphone Source Playback Route",
+ suniv_codec_hp_src_enum),
+};
+
+/* mixer controls */
+static const struct snd_kcontrol_new suniv_codec_adc_mixer_controls[] = {
+ SOC_DAPM_SINGLE("Right Out Capture Switch", SUNIV_CODEC_ADC_ACTL,
+ SUNIV_CODEC_ADC_ADCMIX_ROUT, 1, 0),
+ SOC_DAPM_SINGLE("Left Out Capture Switch", SUNIV_CODEC_ADC_ACTL,
+ SUNIV_CODEC_ADC_ADCMIX_LOUT, 1, 0),
+ SOC_DAPM_SINGLE("Line In Capture Switch", SUNIV_CODEC_ADC_ACTL,
+ SUNIV_CODEC_ADC_ADCMIX_LINEIN, 1, 0),
+ SOC_DAPM_SINGLE("Right FM In Capture Switch", SUNIV_CODEC_ADC_ACTL,
+ SUNIV_CODEC_ADC_ADCMIX_FMINR, 1, 0),
+ SOC_DAPM_SINGLE("Left FM In Capture Switch", SUNIV_CODEC_ADC_ACTL,
+ SUNIV_CODEC_ADC_ADCMIX_FMINL, 1, 0),
+ SOC_DAPM_SINGLE("Mic Capture Switch", SUNIV_CODEC_ADC_ACTL,
+ SUNIV_CODEC_ADC_ADCMIX_MIC, 1, 0),
+};
+
+static const struct snd_kcontrol_new suniv_codec_dac_lmixer_controls[] = {
+ SOC_DAPM_SINGLE("Right DAC Playback Switch", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_LMIXMUTE_RDAC, 1, 0),
+ SOC_DAPM_SINGLE("Left DAC Playback Switch", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_LMIXMUTE_LDAC, 1, 0),
+ SOC_DAPM_SINGLE("FM In Playback Switch", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_LMIXMUTE_FMIN, 1, 0),
+ SOC_DAPM_SINGLE("Line In Playback Switch", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_LMIXMUTE_LINEIN, 1, 0),
+ SOC_DAPM_SINGLE("Mic In Playback Switch", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_LMIXMUTE_MICIN, 1, 0),
+};
+
+static const struct snd_kcontrol_new suniv_codec_dac_rmixer_controls[] = {
+ SOC_DAPM_SINGLE("Left DAC Playback Switch", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_RMIXMUTE_LDAC, 1, 0),
+ SOC_DAPM_SINGLE("Right DAC Playback Switch", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_RMIXMUTE_RDAC, 1, 0),
+ SOC_DAPM_SINGLE("FM In Playback Switch", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_RMIXMUTE_FMIN, 1, 0),
+ SOC_DAPM_SINGLE("Line In Playback Switch", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_RMIXMUTE_LINEIN, 1, 0),
+ SOC_DAPM_SINGLE("Mic In Playback Switch", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_RMIXMUTE_MICIN, 1, 0),
+};
+
+static const DECLARE_TLV_DB_SCALE(suniv_codec_dvol_scale, -7308, 116, 0);
+static const DECLARE_TLV_DB_SCALE(suniv_codec_hp_vol_scale, -6300, 100, 1);
+static const DECLARE_TLV_DB_SCALE(suniv_codec_out_mixer_pregain_scale,
+ -450, 150, 0);
+
+static const DECLARE_TLV_DB_RANGE(suniv_codec_mic_gain_scale,
+ 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
+ 1, 7, TLV_DB_SCALE_ITEM(2400, 300, 0),
+);
+
+static const struct snd_kcontrol_new suniv_codec_codec_widgets[] = {
+ SOC_SINGLE_TLV("DAC Playback Volume", SUN4I_CODEC_DAC_DPC,
+ SUN4I_CODEC_DAC_DPC_DVOL, 0x3f, 1,
+ suniv_codec_dvol_scale),
+ SOC_SINGLE_TLV("Headphone Playback Volume",
+ SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_HPVOL, 0x3f, 0,
+ suniv_codec_hp_vol_scale),
+ SOC_DOUBLE("Headphone Playback Switch",
+ SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_LHPPAMUTE,
+ SUNIV_CODEC_OM_DACA_CTRL_RHPPAMUTE, 1, 0),
+ SOC_SINGLE_TLV("Line In Playback Volume",
+ SUNIV_CODEC_ADC_ACTL, SUNIV_CODEC_ADC_LINEINVOL,
+ 0x7, 0, suniv_codec_out_mixer_pregain_scale),
+ SOC_SINGLE_TLV("FM In Playback Volume",
+ SUNIV_CODEC_ADC_ACTL, SUNIV_CODEC_ADC_FMINVOL,
+ 0x7, 0, suniv_codec_out_mixer_pregain_scale),
+ SOC_SINGLE_TLV("Mic In Playback Volume",
+ SUNIV_CODEC_ADC_ACTL, SUNIV_CODEC_ADC_MICG,
+ 0x7, 0, suniv_codec_out_mixer_pregain_scale),
+
+ /* Microphone Amp boost gains */
+ SOC_SINGLE_TLV("Mic Boost Volume", SUNIV_CODEC_ADC_ACTL,
+ SUNIV_CODEC_ADC_MICBOOST, 0x7, 0,
+ suniv_codec_mic_gain_scale),
+ SOC_SINGLE_TLV("ADC Capture Volume",
+ SUNIV_CODEC_ADC_ACTL, SUNIV_CODEC_ADC_ADCG,
+ 0x7, 0, suniv_codec_out_mixer_pregain_scale),
+};
+
+static const struct snd_soc_dapm_widget suniv_codec_codec_dapm_widgets[] = {
+ /* Microphone inputs */
+ SND_SOC_DAPM_INPUT("MIC"),
+
+ /* Microphone Bias */
+ /* deleted: HBIAS, MBIAS */
+
+ /* Mic input path */
+ SND_SOC_DAPM_PGA("Mic Amplifier", SUNIV_CODEC_ADC_ACTL,
+ SUNIV_CODEC_ADC_MICAMPEN, 0, NULL, 0),
+
+ /* Line In */
+ SND_SOC_DAPM_INPUT("LINEIN"),
+
+ /* FM In */
+ SND_SOC_DAPM_INPUT("FMINR"),
+ SND_SOC_DAPM_INPUT("FMINL"),
+
+ /* Digital parts of the ADCs */
+ SND_SOC_DAPM_SUPPLY("ADC Enable", SUNIV_CODEC_ADC_FIFOC,
+ SUNIV_CODEC_ADC_FIFOC_EN_AD, 0,
+ NULL, 0),
+
+ /* Analog parts of the ADCs */
+ SND_SOC_DAPM_ADC("ADC", "Codec Capture", SUNIV_CODEC_ADC_ACTL,
+ SUNIV_CODEC_ADC_ADCEN, 0),
+
+ /* ADC Mixers */
+ SOC_MIXER_ARRAY("ADC Mixer", SUNIV_CODEC_ADC_ACTL,
+ SND_SOC_NOPM, 0,
+ suniv_codec_adc_mixer_controls),
+
+ /* Digital parts of the DACs */
+ SND_SOC_DAPM_SUPPLY("DAC Enable", SUN4I_CODEC_DAC_DPC,
+ SUN4I_CODEC_DAC_DPC_EN_DA, 0,
+ NULL, 0),
+
+ /* Analog parts of the DACs */
+ SND_SOC_DAPM_DAC("Left DAC", "Codec Playback",
+ SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_DACALEN, 0),
+ SND_SOC_DAPM_DAC("Right DAC", "Codec Playback",
+ SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_DACAREN, 0),
+
+ /* Mixers */
+ SOC_MIXER_ARRAY("Left Mixer", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_LMIXEN, 0,
+ suniv_codec_dac_lmixer_controls),
+ SOC_MIXER_ARRAY("Right Mixer", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_RMIXEN, 0,
+ suniv_codec_dac_rmixer_controls),
+
+ /* Headphone output path */
+ SND_SOC_DAPM_MUX("Headphone Source Playback Route",
+ SND_SOC_NOPM, 0, 0, suniv_codec_hp_src),
+ SND_SOC_DAPM_OUT_DRV("Headphone Amp", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_HPPAEN, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("HPCOM Protection", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_COMPTEN, 0, NULL, 0),
+ SND_SOC_DAPM_REG(snd_soc_dapm_supply, "HPCOM", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_HPCOM_CTL, 0x3, 0x3, 0),
+ SND_SOC_DAPM_OUTPUT("HP"),
+};
+
+static const struct snd_soc_dapm_route suniv_codec_codec_dapm_routes[] = {
+ /* DAC Routes */
+ { "Left DAC", NULL, "DAC Enable" },
+ { "Right DAC", NULL, "DAC Enable" },
+
+ /* Microphone Routes */
+ { "Mic Amplifier", NULL, "MIC"},
+
+ /* Left Mixer Routes */
+ { "Left Mixer", "Right DAC Playback Switch", "Right DAC" },
+ { "Left Mixer", "Left DAC Playback Switch", "Left DAC" },
+ { "Left Mixer", "FM In Playback Switch", "FMINL" },
+ { "Left Mixer", "Line In Playback Switch", "LINEIN" },
+ { "Left Mixer", "Mic In Playback Switch", "Mic Amplifier" },
+
+ /* Right Mixer Routes */
+ { "Right Mixer", "Left DAC Playback Switch", "Left DAC" },
+ { "Right Mixer", "Right DAC Playback Switch", "Right DAC" },
+ { "Right Mixer", "FM In Playback Switch", "FMINR" },
+ { "Right Mixer", "Line In Playback Switch", "LINEIN" },
+ { "Right Mixer", "Mic In Playback Switch", "Mic Amplifier" },
+
+ /* ADC Mixer Routes */
+ { "ADC Mixer", "Right Out Capture Switch", "Right Mixer" },
+ { "ADC Mixer", "Left Out Capture Switch", "Left Mixer" },
+ { "ADC Mixer", "Line In Capture Switch", "LINEIN" },
+ { "ADC Mixer", "Right FM In Capture Switch", "FMINR" },
+ { "ADC Mixer", "Left FM In Capture Switch", "FMINL" },
+ { "ADC Mixer", "Mic Capture Switch", "Mic Amplifier" },
+
+ /* Headphone Routes */
+ { "Headphone Source Playback Route", "DAC", "Left DAC" },
+ { "Headphone Source Playback Route", "DAC", "Right DAC" },
+ { "Headphone Source Playback Route", "Mixer", "Left Mixer" },
+ { "Headphone Source Playback Route", "Mixer", "Right Mixer" },
+ { "Headphone Amp", NULL, "Headphone Source Playback Route" },
+ { "HP", NULL, "Headphone Amp" },
+ { "HPCOM", NULL, "HPCOM Protection" },
+
+ /* ADC Routes */
+ { "ADC", NULL, "ADC Mixer" },
+ { "ADC", NULL, "ADC Enable" },
+};
+
+static const struct snd_soc_component_driver suniv_codec_codec = {
+ .controls = suniv_codec_codec_widgets,
+ .num_controls = ARRAY_SIZE(suniv_codec_codec_widgets),
+ .dapm_widgets = suniv_codec_codec_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(suniv_codec_codec_dapm_widgets),
+ .dapm_routes = suniv_codec_codec_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(suniv_codec_codec_dapm_routes),
+ .idle_bias_on = 1,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+};
+
static const struct snd_soc_component_driver sun4i_codec_component = {
.name = "sun4i-codec",
.legacy_dai_naming = 1,
@@ -1701,6 +1982,56 @@ static struct snd_soc_card *sun50i_h616_codec_create_card(struct device *dev)
return card;
};
+static const struct snd_soc_dapm_widget suniv_codec_card_dapm_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone", NULL),
+ SND_SOC_DAPM_LINE("Line In", NULL),
+ SND_SOC_DAPM_LINE("Right FM In", NULL),
+ SND_SOC_DAPM_LINE("Left FM In", NULL),
+ SND_SOC_DAPM_MIC("Mic", NULL),
+ SND_SOC_DAPM_SPK("Speaker", sun4i_codec_spk_event),
+};
+
+/* Connect digital side enables to analog side widgets */
+static const struct snd_soc_dapm_route suniv_codec_card_routes[] = {
+ /* ADC Routes */
+ { "ADC", NULL, "ADC Enable" },
+ { "Codec Capture", NULL, "ADC" },
+
+ /* DAC Routes */
+ { "Left DAC", NULL, "DAC Enable" },
+ { "Right DAC", NULL, "DAC Enable" },
+ { "Left DAC", NULL, "Codec Playback" },
+ { "Right DAC", NULL, "Codec Playback" },
+};
+
+static struct snd_soc_card *suniv_codec_create_card(struct device *dev)
+{
+ struct snd_soc_card *card;
+ int ret;
+
+ card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return ERR_PTR(-ENOMEM);
+
+ card->dai_link = sun4i_codec_create_link(dev, &card->num_links);
+ if (!card->dai_link)
+ return ERR_PTR(-ENOMEM);
+
+ card->dev = dev;
+ card->name = "F1C100s Audio Codec";
+ card->dapm_widgets = suniv_codec_card_dapm_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(suniv_codec_card_dapm_widgets);
+ card->dapm_routes = suniv_codec_card_routes;
+ card->num_dapm_routes = ARRAY_SIZE(suniv_codec_card_routes);
+ card->fully_routed = true;
+
+ ret = snd_soc_of_parse_audio_routing(card, "allwinner,audio-routing");
+ if (ret)
+ dev_warn(dev, "failed to parse audio-routing: %d\n", ret);
+
+ return card;
+};
+
static const struct regmap_config sun4i_codec_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -1751,6 +2082,13 @@ static const struct regmap_config sun50i_h616_codec_regmap_config = {
.cache_type = REGCACHE_NONE,
};
+static const struct regmap_config suniv_codec_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = SUNIV_CODEC_ADC_DBG,
+};
+
struct sun4i_codec_quirks {
const struct regmap_config *regmap_config;
const struct snd_soc_component_driver *codec;
@@ -1761,6 +2099,7 @@ struct sun4i_codec_quirks {
unsigned int reg_adc_rxdata; /* RX FIFO offset for DMA config */
bool has_reset;
bool playback_only;
+ u32 dma_max_burst;
};
static const struct sun4i_codec_quirks sun4i_codec_quirks = {
@@ -1771,6 +2110,7 @@ static const struct sun4i_codec_quirks sun4i_codec_quirks = {
.reg_dac_fifoc = REG_FIELD(SUN4I_CODEC_DAC_FIFOC, 0, 31),
.reg_dac_txdata = SUN4I_CODEC_DAC_TXDATA,
.reg_adc_rxdata = SUN4I_CODEC_ADC_RXDATA,
+ .dma_max_burst = SUN4I_DMA_MAX_BURST,
};
static const struct sun4i_codec_quirks sun6i_a31_codec_quirks = {
@@ -1782,6 +2122,7 @@ static const struct sun4i_codec_quirks sun6i_a31_codec_quirks = {
.reg_dac_txdata = SUN4I_CODEC_DAC_TXDATA,
.reg_adc_rxdata = SUN6I_CODEC_ADC_RXDATA,
.has_reset = true,
+ .dma_max_burst = SUN4I_DMA_MAX_BURST,
};
static const struct sun4i_codec_quirks sun7i_codec_quirks = {
@@ -1792,6 +2133,7 @@ static const struct sun4i_codec_quirks sun7i_codec_quirks = {
.reg_dac_fifoc = REG_FIELD(SUN4I_CODEC_DAC_FIFOC, 0, 31),
.reg_dac_txdata = SUN4I_CODEC_DAC_TXDATA,
.reg_adc_rxdata = SUN4I_CODEC_ADC_RXDATA,
+ .dma_max_burst = SUN4I_DMA_MAX_BURST,
};
static const struct sun4i_codec_quirks sun8i_a23_codec_quirks = {
@@ -1803,6 +2145,7 @@ static const struct sun4i_codec_quirks sun8i_a23_codec_quirks = {
.reg_dac_txdata = SUN4I_CODEC_DAC_TXDATA,
.reg_adc_rxdata = SUN6I_CODEC_ADC_RXDATA,
.has_reset = true,
+ .dma_max_burst = SUN4I_DMA_MAX_BURST,
};
static const struct sun4i_codec_quirks sun8i_h3_codec_quirks = {
@@ -1819,6 +2162,7 @@ static const struct sun4i_codec_quirks sun8i_h3_codec_quirks = {
.reg_dac_txdata = SUN8I_H3_CODEC_DAC_TXDATA,
.reg_adc_rxdata = SUN6I_CODEC_ADC_RXDATA,
.has_reset = true,
+ .dma_max_burst = SUN4I_DMA_MAX_BURST,
};
static const struct sun4i_codec_quirks sun8i_v3s_codec_quirks = {
@@ -1834,6 +2178,7 @@ static const struct sun4i_codec_quirks sun8i_v3s_codec_quirks = {
.reg_dac_txdata = SUN8I_H3_CODEC_DAC_TXDATA,
.reg_adc_rxdata = SUN6I_CODEC_ADC_RXDATA,
.has_reset = true,
+ .dma_max_burst = SUN4I_DMA_MAX_BURST,
};
static const struct sun4i_codec_quirks sun50i_h616_codec_quirks = {
@@ -1843,6 +2188,19 @@ static const struct sun4i_codec_quirks sun50i_h616_codec_quirks = {
.reg_dac_fifoc = REG_FIELD(SUN50I_H616_CODEC_DAC_FIFOC, 0, 31),
.reg_dac_txdata = SUN8I_H3_CODEC_DAC_TXDATA,
.has_reset = true,
+ .dma_max_burst = SUN4I_DMA_MAX_BURST,
+};
+
+static const struct sun4i_codec_quirks suniv_f1c100s_codec_quirks = {
+ .regmap_config = &suniv_codec_regmap_config,
+ .codec = &suniv_codec_codec,
+ .create_card = suniv_codec_create_card,
+ .reg_adc_fifoc = REG_FIELD(SUNIV_CODEC_ADC_FIFOC, 0, 31),
+ .reg_dac_fifoc = REG_FIELD(SUN4I_CODEC_DAC_FIFOC, 0, 31),
+ .reg_dac_txdata = SUN4I_CODEC_DAC_TXDATA,
+ .reg_adc_rxdata = SUNIV_CODEC_ADC_RXDATA,
+ .has_reset = true,
+ .dma_max_burst = SUNIV_DMA_MAX_BURST,
};
static const struct of_device_id sun4i_codec_of_match[] = {
@@ -1874,6 +2232,10 @@ static const struct of_device_id sun4i_codec_of_match[] = {
.compatible = "allwinner,sun50i-h616-codec",
.data = &sun50i_h616_codec_quirks,
},
+ {
+ .compatible = "allwinner,suniv-f1c100s-codec",
+ .data = &suniv_f1c100s_codec_quirks,
+ },
{}
};
MODULE_DEVICE_TABLE(of, sun4i_codec_of_match);
@@ -1911,7 +2273,7 @@ static int sun4i_codec_probe(struct platform_device *pdev)
}
/* Get the clocks from the DT */
- scodec->clk_apb = devm_clk_get(&pdev->dev, "apb");
+ scodec->clk_apb = devm_clk_get_enabled(&pdev->dev, "apb");
if (IS_ERR(scodec->clk_apb)) {
dev_err(&pdev->dev, "Failed to get the APB clock\n");
return PTR_ERR(scodec->clk_apb);
@@ -1924,8 +2286,7 @@ static int sun4i_codec_probe(struct platform_device *pdev)
}
if (quirks->has_reset) {
- scodec->rst = devm_reset_control_get_exclusive(&pdev->dev,
- NULL);
+ scodec->rst = devm_reset_control_get_exclusive_deasserted(&pdev->dev, NULL);
if (IS_ERR(scodec->rst)) {
dev_err(&pdev->dev, "Failed to get reset control\n");
return PTR_ERR(scodec->rst);
@@ -1961,32 +2322,16 @@ static int sun4i_codec_probe(struct platform_device *pdev)
return ret;
}
- /* Enable the bus clock */
- if (clk_prepare_enable(scodec->clk_apb)) {
- dev_err(&pdev->dev, "Failed to enable the APB clock\n");
- return -EINVAL;
- }
-
- /* Deassert the reset control */
- if (scodec->rst) {
- ret = reset_control_deassert(scodec->rst);
- if (ret) {
- dev_err(&pdev->dev,
- "Failed to deassert the reset control\n");
- goto err_clk_disable;
- }
- }
-
/* DMA configuration for TX FIFO */
scodec->playback_dma_data.addr = res->start + quirks->reg_dac_txdata;
- scodec->playback_dma_data.maxburst = 8;
+ scodec->playback_dma_data.maxburst = quirks->dma_max_burst;
scodec->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
if (!quirks->playback_only) {
/* DMA configuration for RX FIFO */
scodec->capture_dma_data.addr = res->start +
quirks->reg_adc_rxdata;
- scodec->capture_dma_data.maxburst = 8;
+ scodec->capture_dma_data.maxburst = quirks->dma_max_burst;
scodec->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
}
@@ -1994,7 +2339,7 @@ static int sun4i_codec_probe(struct platform_device *pdev)
&sun4i_codec_dai, 1);
if (ret) {
dev_err(&pdev->dev, "Failed to register our codec\n");
- goto err_assert_reset;
+ return ret;
}
ret = devm_snd_soc_register_component(&pdev->dev,
@@ -2002,20 +2347,20 @@ static int sun4i_codec_probe(struct platform_device *pdev)
&dummy_cpu_dai, 1);
if (ret) {
dev_err(&pdev->dev, "Failed to register our DAI\n");
- goto err_assert_reset;
+ return ret;
}
ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
if (ret) {
dev_err(&pdev->dev, "Failed to register against DMAEngine\n");
- goto err_assert_reset;
+ return ret;
}
card = quirks->create_card(&pdev->dev);
if (IS_ERR(card)) {
ret = PTR_ERR(card);
dev_err(&pdev->dev, "Failed to create our card\n");
- goto err_assert_reset;
+ return ret;
}
snd_soc_card_set_drvdata(card, scodec);
@@ -2023,28 +2368,17 @@ static int sun4i_codec_probe(struct platform_device *pdev)
ret = snd_soc_register_card(card);
if (ret) {
dev_err_probe(&pdev->dev, ret, "Failed to register our card\n");
- goto err_assert_reset;
+ return ret;
}
return 0;
-
-err_assert_reset:
- if (scodec->rst)
- reset_control_assert(scodec->rst);
-err_clk_disable:
- clk_disable_unprepare(scodec->clk_apb);
- return ret;
}
static void sun4i_codec_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
- struct sun4i_codec *scodec = snd_soc_card_get_drvdata(card);
snd_soc_unregister_card(card);
- if (scodec->rst)
- reset_control_assert(scodec->rst);
- clk_disable_unprepare(scodec->clk_apb);
}
static struct platform_driver sun4i_codec_driver = {
@@ -2063,4 +2397,5 @@ MODULE_AUTHOR("Jon Smirl <jonsmirl@gmail.com>");
MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
MODULE_AUTHOR("Ryan Walklin <ryan@testtoast.com");
+MODULE_AUTHOR("Mesih Kilinc <mesikilinc@gmail.com>");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/sunxi/sun4i-spdif.c b/sound/soc/sunxi/sun4i-spdif.c
index 0aa416423246..41caf1795d09 100644
--- a/sound/soc/sunxi/sun4i-spdif.c
+++ b/sound/soc/sunxi/sun4i-spdif.c
@@ -176,6 +176,7 @@ struct sun4i_spdif_quirks {
unsigned int reg_dac_txdata;
bool has_reset;
unsigned int val_fctl_ftx;
+ unsigned int mclk_multiplier;
};
struct sun4i_spdif_dev {
@@ -201,6 +202,10 @@ static void sun4i_spdif_configure(struct sun4i_spdif_dev *host)
regmap_update_bits(host->regmap, SUN4I_SPDIF_FCTL,
quirks->val_fctl_ftx, quirks->val_fctl_ftx);
+ /* Valid data at the MSB of TXFIFO Register */
+ regmap_update_bits(host->regmap, SUN4I_SPDIF_FCTL,
+ SUN4I_SPDIF_FCTL_TXIM, 0);
+
/* clear TX counter */
regmap_write(host->regmap, SUN4I_SPDIF_TXCNT, 0);
}
@@ -282,14 +287,17 @@ static int sun4i_spdif_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
+ host->dma_params_tx.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
fmt |= SUN4I_SPDIF_TXCFG_FMT16BIT;
+ host->dma_params_tx.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
break;
case SNDRV_PCM_FORMAT_S20_3LE:
fmt |= SUN4I_SPDIF_TXCFG_FMT20BIT;
break;
case SNDRV_PCM_FORMAT_S24_LE:
+ case SNDRV_PCM_FORMAT_S32_LE:
fmt |= SUN4I_SPDIF_TXCFG_FMT24BIT;
break;
default:
@@ -313,6 +321,7 @@ static int sun4i_spdif_hw_params(struct snd_pcm_substream *substream,
default:
return -EINVAL;
}
+ mclk *= host->quirks->mclk_multiplier;
ret = clk_set_rate(host->spdif_clk, mclk);
if (ret < 0) {
@@ -321,9 +330,6 @@ static int sun4i_spdif_hw_params(struct snd_pcm_substream *substream,
return ret;
}
- regmap_update_bits(host->regmap, SUN4I_SPDIF_FCTL,
- SUN4I_SPDIF_FCTL_TXIM, SUN4I_SPDIF_FCTL_TXIM);
-
switch (rate) {
case 22050:
case 24000:
@@ -347,6 +353,7 @@ static int sun4i_spdif_hw_params(struct snd_pcm_substream *substream,
default:
return -EINVAL;
}
+ mclk_div *= host->quirks->mclk_multiplier;
reg_val = 0;
reg_val |= SUN4I_SPDIF_TXCFG_ASS;
@@ -522,9 +529,10 @@ static const struct regmap_config sun4i_spdif_regmap_config = {
#define SUN4I_RATES SNDRV_PCM_RATE_8000_192000
-#define SUN4I_FORMATS (SNDRV_PCM_FORMAT_S16_LE | \
- SNDRV_PCM_FORMAT_S20_3LE | \
- SNDRV_PCM_FORMAT_S24_LE)
+#define SUN4I_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
static struct snd_soc_dai_driver sun4i_spdif_dai = {
.playback = {
@@ -540,24 +548,28 @@ static struct snd_soc_dai_driver sun4i_spdif_dai = {
static const struct sun4i_spdif_quirks sun4i_a10_spdif_quirks = {
.reg_dac_txdata = SUN4I_SPDIF_TXFIFO,
.val_fctl_ftx = SUN4I_SPDIF_FCTL_FTX,
+ .mclk_multiplier = 1,
};
static const struct sun4i_spdif_quirks sun6i_a31_spdif_quirks = {
.reg_dac_txdata = SUN4I_SPDIF_TXFIFO,
.val_fctl_ftx = SUN4I_SPDIF_FCTL_FTX,
.has_reset = true,
+ .mclk_multiplier = 1,
};
static const struct sun4i_spdif_quirks sun8i_h3_spdif_quirks = {
.reg_dac_txdata = SUN8I_SPDIF_TXFIFO,
.val_fctl_ftx = SUN4I_SPDIF_FCTL_FTX,
.has_reset = true,
+ .mclk_multiplier = 4,
};
static const struct sun4i_spdif_quirks sun50i_h6_spdif_quirks = {
.reg_dac_txdata = SUN8I_SPDIF_TXFIFO,
.val_fctl_ftx = SUN50I_H6_SPDIF_FCTL_FTX,
.has_reset = true,
+ .mclk_multiplier = 1,
};
static const struct of_device_id sun4i_spdif_of_match[] = {
diff --git a/sound/soc/xilinx/xlnx_spdif.c b/sound/soc/xilinx/xlnx_spdif.c
index 7febb3830dc2..017a64ab9f1e 100644
--- a/sound/soc/xilinx/xlnx_spdif.c
+++ b/sound/soc/xilinx/xlnx_spdif.c
@@ -248,41 +248,35 @@ static int xlnx_spdif_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
- ctx->axi_clk = devm_clk_get(dev, "s_axi_aclk");
+ ctx->axi_clk = devm_clk_get_enabled(dev, "s_axi_aclk");
if (IS_ERR(ctx->axi_clk)) {
ret = PTR_ERR(ctx->axi_clk);
dev_err(dev, "failed to get s_axi_aclk(%d)\n", ret);
return ret;
}
- ret = clk_prepare_enable(ctx->axi_clk);
- if (ret) {
- dev_err(dev, "failed to enable s_axi_aclk(%d)\n", ret);
- return ret;
- }
ctx->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(ctx->base)) {
- ret = PTR_ERR(ctx->base);
- goto clk_err;
- }
+ if (IS_ERR(ctx->base))
+ return PTR_ERR(ctx->base);
+
ret = of_property_read_u32(node, "xlnx,spdif-mode", &ctx->mode);
if (ret < 0) {
dev_err(dev, "cannot get SPDIF mode\n");
- goto clk_err;
+ return ret;
}
if (ctx->mode) {
dai_drv = &xlnx_spdif_tx_dai;
} else {
ret = platform_get_irq(pdev, 0);
if (ret < 0)
- goto clk_err;
+ return ret;
+
ret = devm_request_irq(dev, ret,
xlnx_spdifrx_irq_handler,
0, "XLNX_SPDIF_RX", ctx);
if (ret) {
dev_err(dev, "spdif rx irq request failed\n");
- ret = -ENODEV;
- goto clk_err;
+ return -ENODEV;
}
init_waitqueue_head(&ctx->chsts_q);
@@ -292,7 +286,7 @@ static int xlnx_spdif_probe(struct platform_device *pdev)
ret = of_property_read_u32(node, "xlnx,aud_clk_i", &ctx->aclk);
if (ret < 0) {
dev_err(dev, "cannot get aud_clk_i value\n");
- goto clk_err;
+ return ret;
}
dev_set_drvdata(dev, ctx);
@@ -301,22 +295,13 @@ static int xlnx_spdif_probe(struct platform_device *pdev)
dai_drv, 1);
if (ret) {
dev_err(dev, "SPDIF component registration failed\n");
- goto clk_err;
+ return ret;
}
writel(XSPDIF_SOFT_RESET_VALUE, ctx->base + XSPDIF_SOFT_RESET_REG);
dev_info(dev, "%s DAI registered\n", dai_drv->name);
-clk_err:
- clk_disable_unprepare(ctx->axi_clk);
- return ret;
-}
-
-static void xlnx_spdif_remove(struct platform_device *pdev)
-{
- struct spdif_dev_data *ctx = dev_get_drvdata(&pdev->dev);
-
- clk_disable_unprepare(ctx->axi_clk);
+ return 0;
}
static struct platform_driver xlnx_spdif_driver = {
@@ -325,7 +310,6 @@ static struct platform_driver xlnx_spdif_driver = {
.of_match_table = xlnx_spdif_of_match,
},
.probe = xlnx_spdif_probe,
- .remove = xlnx_spdif_remove,
};
module_platform_driver(xlnx_spdif_driver);
diff --git a/sound/usb/Makefile b/sound/usb/Makefile
index 0532499dbc6d..30bd5348477b 100644
--- a/sound/usb/Makefile
+++ b/sound/usb/Makefile
@@ -6,6 +6,7 @@
snd-usb-audio-y := card.o \
clock.o \
endpoint.o \
+ fcp.o \
format.o \
helper.o \
implicit.o \
diff --git a/sound/usb/fcp.c b/sound/usb/fcp.c
new file mode 100644
index 000000000000..7df65041ace5
--- /dev/null
+++ b/sound/usb/fcp.c
@@ -0,0 +1,1134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Focusrite Control Protocol Driver for ALSA
+ *
+ * Copyright (c) 2024-2025 by Geoffrey D. Bennett <g at b4.vu>
+ */
+/*
+ * DOC: Theory of Operation
+ *
+ * The Focusrite Control Protocol (FCP) driver provides a minimal
+ * kernel interface that allows a user-space driver (primarily
+ * fcp-server) to communicate with Focusrite USB audio interfaces
+ * using their vendor-specific protocol. This protocol is used by
+ * Scarlett 2nd Gen, 3rd Gen, 4th Gen, Clarett USB, Clarett+, and
+ * Vocaster series devices.
+ *
+ * Unlike the existing scarlett2 driver which implements all controls
+ * in kernel space, this driver takes a lighter-weight approach by
+ * moving most functionality to user space. The only control
+ * implemented in kernel space is the Level Meter, since it requires
+ * frequent polling of volatile data.
+ *
+ * The driver provides an hwdep interface that allows the user-space
+ * driver to:
+ * - Initialise the protocol
+ * - Send arbitrary FCP commands to the device
+ * - Receive notifications from the device
+ * - Configure the Level Meter control
+ *
+ * Usage Flow
+ * ----------
+ * 1. Open the hwdep device (requires CAP_SYS_RAWIO)
+ * 2. Get protocol version using FCP_IOCTL_PVERSION
+ * 3. Initialise protocol using FCP_IOCTL_INIT
+ * 4. Send commands using FCP_IOCTL_CMD
+ * 5. Receive notifications using read()
+ * 6. Optionally set up the Level Meter control using
+ * FCP_IOCTL_SET_METER_MAP
+ * 7. Optionally add labels to the Level Meter control using
+ * FCP_IOCTL_SET_METER_LABELS
+ *
+ * Level Meter
+ * -----------
+ * The Level Meter is implemented as an ALSA control that provides
+ * real-time level monitoring. When the control is read, the driver
+ * requests the current meter levels from the device, translates the
+ * levels using the configured mapping, and returns the result to the
+ * user. The mapping between device meters and the ALSA control's
+ * channels is configured with FCP_IOCTL_SET_METER_MAP.
+ *
+ * Labels for the Level Meter channels can be set using
+ * FCP_IOCTL_SET_METER_LABELS and read by applications through the
+ * control's TLV data. The labels are transferred as a sequence of
+ * null-terminated strings.
+ */
+
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include <sound/control.h>
+#include <sound/hwdep.h>
+#include <sound/tlv.h>
+
+#include <uapi/sound/fcp.h>
+
+#include "usbaudio.h"
+#include "mixer.h"
+#include "helper.h"
+
+#include "fcp.h"
+
+/* notify waiting to send to *file */
+struct fcp_notify {
+ wait_queue_head_t queue;
+ u32 event;
+ spinlock_t lock;
+};
+
+struct fcp_data {
+ struct usb_mixer_interface *mixer;
+
+ struct mutex mutex; /* serialise access to the device */
+ struct completion cmd_done; /* wait for command completion */
+ struct file *file; /* hwdep file */
+
+ struct fcp_notify notify;
+
+ u8 bInterfaceNumber;
+ u8 bEndpointAddress;
+ u16 wMaxPacketSize;
+ u8 bInterval;
+
+ uint16_t step0_resp_size;
+ uint16_t step2_resp_size;
+ uint32_t init1_opcode;
+ uint32_t init2_opcode;
+
+ u8 init;
+ u16 seq;
+
+ u8 num_meter_slots;
+ s16 *meter_level_map;
+ __le32 *meter_levels;
+ struct snd_kcontrol *meter_ctl;
+
+ unsigned int *meter_labels_tlv;
+ int meter_labels_tlv_size;
+};
+
+/*** USB Interactions ***/
+
+/* FCP Command ACK notification bit */
+#define FCP_NOTIFY_ACK 1
+
+/* Vendor-specific USB control requests */
+#define FCP_USB_REQ_STEP0 0
+#define FCP_USB_REQ_CMD_TX 2
+#define FCP_USB_REQ_CMD_RX 3
+
+/* Focusrite Control Protocol opcodes that the kernel side needs to
+ * know about
+ */
+#define FCP_USB_REBOOT 0x00000003
+#define FCP_USB_GET_METER 0x00001001
+#define FCP_USB_FLASH_ERASE 0x00004002
+#define FCP_USB_FLASH_WRITE 0x00004004
+
+#define FCP_USB_METER_LEVELS_GET_MAGIC 1
+
+#define FCP_SEGMENT_APP_GOLD 0
+
+/* Forward declarations */
+static int fcp_init(struct usb_mixer_interface *mixer,
+ void *step0_resp, void *step2_resp);
+
+/* FCP command request/response format */
+struct fcp_usb_packet {
+ __le32 opcode;
+ __le16 size;
+ __le16 seq;
+ __le32 error;
+ __le32 pad;
+ u8 data[];
+};
+
+static void fcp_fill_request_header(struct fcp_data *private,
+ struct fcp_usb_packet *req,
+ u32 opcode, u16 req_size)
+{
+ /* sequence must go up by 1 for each request */
+ u16 seq = private->seq++;
+
+ req->opcode = cpu_to_le32(opcode);
+ req->size = cpu_to_le16(req_size);
+ req->seq = cpu_to_le16(seq);
+ req->error = 0;
+ req->pad = 0;
+}
+
+static int fcp_usb_tx(struct usb_device *dev, int interface,
+ void *buf, u16 size)
+{
+ return snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0),
+ FCP_USB_REQ_CMD_TX,
+ USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
+ 0, interface, buf, size);
+}
+
+static int fcp_usb_rx(struct usb_device *dev, int interface,
+ void *buf, u16 size)
+{
+ return snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0),
+ FCP_USB_REQ_CMD_RX,
+ USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
+ 0, interface, buf, size);
+}
+
+/* Send an FCP command and get the response */
+static int fcp_usb(struct usb_mixer_interface *mixer, u32 opcode,
+ const void *req_data, u16 req_size,
+ void *resp_data, u16 resp_size)
+{
+ struct fcp_data *private = mixer->private_data;
+ struct usb_device *dev = mixer->chip->dev;
+ struct fcp_usb_packet *req __free(kfree) = NULL;
+ struct fcp_usb_packet *resp __free(kfree) = NULL;
+ size_t req_buf_size = struct_size(req, data, req_size);
+ size_t resp_buf_size = struct_size(resp, data, resp_size);
+ int retries = 0;
+ const int max_retries = 5;
+ int err;
+
+ if (!mixer->urb)
+ return -ENODEV;
+
+ req = kmalloc(req_buf_size, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kmalloc(resp_buf_size, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ /* build request message */
+ fcp_fill_request_header(private, req, opcode, req_size);
+ if (req_size)
+ memcpy(req->data, req_data, req_size);
+
+ /* send the request and retry on EPROTO */
+retry:
+ err = fcp_usb_tx(dev, private->bInterfaceNumber, req, req_buf_size);
+ if (err == -EPROTO && ++retries <= max_retries) {
+ msleep(1 << (retries - 1));
+ goto retry;
+ }
+
+ if (err != req_buf_size) {
+ usb_audio_err(mixer->chip,
+ "FCP request %08x failed: %d\n", opcode, err);
+ return -EINVAL;
+ }
+
+ if (!wait_for_completion_timeout(&private->cmd_done,
+ msecs_to_jiffies(1000))) {
+ usb_audio_err(mixer->chip,
+ "FCP request %08x timed out\n", opcode);
+
+ return -ETIMEDOUT;
+ }
+
+ /* send a second message to get the response */
+ err = fcp_usb_rx(dev, private->bInterfaceNumber, resp, resp_buf_size);
+
+ /* validate the response */
+
+ if (err < 0) {
+
+ /* ESHUTDOWN and EPROTO are valid responses to a
+ * reboot request
+ */
+ if (opcode == FCP_USB_REBOOT &&
+ (err == -ESHUTDOWN || err == -EPROTO))
+ return 0;
+
+ usb_audio_err(mixer->chip,
+ "FCP read response %08x failed: %d\n",
+ opcode, err);
+ return -EINVAL;
+ }
+
+ if (err < sizeof(*resp)) {
+ usb_audio_err(mixer->chip,
+ "FCP response %08x too short: %d\n",
+ opcode, err);
+ return -EINVAL;
+ }
+
+ if (req->seq != resp->seq) {
+ usb_audio_err(mixer->chip,
+ "FCP response %08x seq mismatch %d/%d\n",
+ opcode,
+ le16_to_cpu(req->seq), le16_to_cpu(resp->seq));
+ return -EINVAL;
+ }
+
+ if (req->opcode != resp->opcode) {
+ usb_audio_err(mixer->chip,
+ "FCP response %08x opcode mismatch %08x\n",
+ opcode, le32_to_cpu(resp->opcode));
+ return -EINVAL;
+ }
+
+ if (resp->error) {
+ usb_audio_err(mixer->chip,
+ "FCP response %08x error %d\n",
+ opcode, le32_to_cpu(resp->error));
+ return -EINVAL;
+ }
+
+ if (err != resp_buf_size) {
+ usb_audio_err(mixer->chip,
+ "FCP response %08x buffer size mismatch %d/%zu\n",
+ opcode, err, resp_buf_size);
+ return -EINVAL;
+ }
+
+ if (resp_size != le16_to_cpu(resp->size)) {
+ usb_audio_err(mixer->chip,
+ "FCP response %08x size mismatch %d/%d\n",
+ opcode, resp_size, le16_to_cpu(resp->size));
+ return -EINVAL;
+ }
+
+ if (resp_data && resp_size > 0)
+ memcpy(resp_data, resp->data, resp_size);
+
+ return 0;
+}
+
+static int fcp_reinit(struct usb_mixer_interface *mixer)
+{
+ struct fcp_data *private = mixer->private_data;
+ void *step0_resp __free(kfree) = NULL;
+ void *step2_resp __free(kfree) = NULL;
+
+ if (mixer->urb)
+ return 0;
+
+ step0_resp = kmalloc(private->step0_resp_size, GFP_KERNEL);
+ if (!step0_resp)
+ return -ENOMEM;
+ step2_resp = kmalloc(private->step2_resp_size, GFP_KERNEL);
+ if (!step2_resp)
+ return -ENOMEM;
+
+ return fcp_init(mixer, step0_resp, step2_resp);
+}
+
+/*** Control Functions ***/
+
+/* helper function to create a new control */
+static int fcp_add_new_ctl(struct usb_mixer_interface *mixer,
+ const struct snd_kcontrol_new *ncontrol,
+ int index, int channels, const char *name,
+ struct snd_kcontrol **kctl_return)
+{
+ struct snd_kcontrol *kctl;
+ struct usb_mixer_elem_info *elem;
+ int err;
+
+ elem = kzalloc(sizeof(*elem), GFP_KERNEL);
+ if (!elem)
+ return -ENOMEM;
+
+ /* We set USB_MIXER_BESPOKEN type, so that the core USB mixer code
+ * ignores them for resume and other operations.
+ * Also, the head.id field is set to 0, as we don't use this field.
+ */
+ elem->head.mixer = mixer;
+ elem->control = index;
+ elem->head.id = 0;
+ elem->channels = channels;
+ elem->val_type = USB_MIXER_BESPOKEN;
+
+ kctl = snd_ctl_new1(ncontrol, elem);
+ if (!kctl) {
+ kfree(elem);
+ return -ENOMEM;
+ }
+ kctl->private_free = snd_usb_mixer_elem_free;
+
+ strscpy(kctl->id.name, name, sizeof(kctl->id.name));
+
+ err = snd_usb_mixer_add_control(&elem->head, kctl);
+ if (err < 0)
+ return err;
+
+ if (kctl_return)
+ *kctl_return = kctl;
+
+ return 0;
+}
+
+/*** Level Meter Control ***/
+
+static int fcp_meter_ctl_info(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = elem->channels;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 4095;
+ uinfo->value.integer.step = 1;
+ return 0;
+}
+
+static int fcp_meter_ctl_get(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct fcp_data *private = mixer->private_data;
+ int num_meter_slots, resp_size;
+ __le32 *resp = private->meter_levels;
+ int i, err = 0;
+
+ struct {
+ __le16 pad;
+ __le16 num_meters;
+ __le32 magic;
+ } __packed req;
+
+ guard(mutex)(&private->mutex);
+
+ err = fcp_reinit(mixer);
+ if (err < 0)
+ return err;
+
+ num_meter_slots = private->num_meter_slots;
+ resp_size = num_meter_slots * sizeof(u32);
+
+ req.pad = 0;
+ req.num_meters = cpu_to_le16(num_meter_slots);
+ req.magic = cpu_to_le32(FCP_USB_METER_LEVELS_GET_MAGIC);
+ err = fcp_usb(mixer, FCP_USB_GET_METER,
+ &req, sizeof(req), resp, resp_size);
+ if (err < 0)
+ return err;
+
+ /* copy & translate from resp[] using meter_level_map[] */
+ for (i = 0; i < elem->channels; i++) {
+ int idx = private->meter_level_map[i];
+ int value = idx < 0 ? 0 : le32_to_cpu(resp[idx]);
+
+ ucontrol->value.integer.value[i] = value;
+ }
+
+ return 0;
+}
+
+static int fcp_meter_tlv_callback(struct snd_kcontrol *kctl,
+ int op_flag, unsigned int size,
+ unsigned int __user *tlv)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct fcp_data *private = mixer->private_data;
+
+ guard(mutex)(&private->mutex);
+
+ if (op_flag == SNDRV_CTL_TLV_OP_READ) {
+ if (private->meter_labels_tlv_size == 0)
+ return 0;
+
+ if (size > private->meter_labels_tlv_size)
+ size = private->meter_labels_tlv_size;
+
+ if (copy_to_user(tlv, private->meter_labels_tlv, size))
+ return -EFAULT;
+
+ return size;
+ }
+
+ return -EINVAL;
+}
+
+static const struct snd_kcontrol_new fcp_meter_ctl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM,
+ .access = SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = fcp_meter_ctl_info,
+ .get = fcp_meter_ctl_get,
+ .tlv = { .c = fcp_meter_tlv_callback },
+};
+
+/*** hwdep interface ***/
+
+/* FCP initialisation */
+static int fcp_ioctl_init(struct usb_mixer_interface *mixer,
+ struct fcp_init __user *arg)
+{
+ struct fcp_init init;
+ struct usb_device *dev = mixer->chip->dev;
+ struct fcp_data *private = mixer->private_data;
+ void *resp __free(kfree) = NULL;
+ void *step2_resp;
+ int err, buf_size;
+
+ if (usb_pipe_type_check(dev, usb_sndctrlpipe(dev, 0)))
+ return -EINVAL;
+
+ /* Get initialisation parameters */
+ if (copy_from_user(&init, arg, sizeof(init)))
+ return -EFAULT;
+
+ /* Validate the response sizes */
+ if (init.step0_resp_size < 1 ||
+ init.step0_resp_size > 255 ||
+ init.step2_resp_size < 1 ||
+ init.step2_resp_size > 255)
+ return -EINVAL;
+
+ /* Allocate response buffer */
+ buf_size = init.step0_resp_size + init.step2_resp_size;
+
+ resp = kmalloc(buf_size, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ private->step0_resp_size = init.step0_resp_size;
+ private->step2_resp_size = init.step2_resp_size;
+ private->init1_opcode = init.init1_opcode;
+ private->init2_opcode = init.init2_opcode;
+
+ step2_resp = resp + private->step0_resp_size;
+
+ err = fcp_init(mixer, resp, step2_resp);
+ if (err < 0)
+ return err;
+
+ if (copy_to_user(arg->resp, resp, buf_size))
+ return -EFAULT;
+
+ return 0;
+}
+
+/* Check that the command is allowed
+ * Don't permit erasing/writing segment 0 (App_Gold)
+ */
+static int fcp_validate_cmd(u32 opcode, void *data, u16 size)
+{
+ if (opcode == FCP_USB_FLASH_ERASE) {
+ struct {
+ __le32 segment_num;
+ __le32 pad;
+ } __packed *req = data;
+
+ if (size != sizeof(*req))
+ return -EINVAL;
+
+ if (le32_to_cpu(req->segment_num) == FCP_SEGMENT_APP_GOLD)
+ return -EPERM;
+
+ if (req->pad != 0)
+ return -EINVAL;
+
+ } else if (opcode == FCP_USB_FLASH_WRITE) {
+ struct {
+ __le32 segment_num;
+ __le32 offset;
+ __le32 pad;
+ u8 data[];
+ } __packed *req = data;
+
+ if (size < sizeof(*req))
+ return -EINVAL;
+
+ if (le32_to_cpu(req->segment_num) == FCP_SEGMENT_APP_GOLD)
+ return -EPERM;
+
+ if (req->pad != 0)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Execute an FCP command specified by the user */
+static int fcp_ioctl_cmd(struct usb_mixer_interface *mixer,
+ struct fcp_cmd __user *arg)
+{
+ struct fcp_cmd cmd;
+ int err, buf_size;
+ void *data __free(kfree) = NULL;
+
+ /* get opcode and request/response size */
+ if (copy_from_user(&cmd, arg, sizeof(cmd)))
+ return -EFAULT;
+
+ /* validate request and response sizes */
+ if (cmd.req_size > 4096 || cmd.resp_size > 4096)
+ return -EINVAL;
+
+ /* reinit if needed */
+ err = fcp_reinit(mixer);
+ if (err < 0)
+ return err;
+
+ /* allocate request/response buffer */
+ buf_size = max(cmd.req_size, cmd.resp_size);
+
+ if (buf_size > 0) {
+ data = kmalloc(buf_size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ }
+
+ /* copy request from user */
+ if (cmd.req_size > 0)
+ if (copy_from_user(data, arg->data, cmd.req_size))
+ return -EFAULT;
+
+ /* check that the command is allowed */
+ err = fcp_validate_cmd(cmd.opcode, data, cmd.req_size);
+ if (err < 0)
+ return err;
+
+ /* send request, get response */
+ err = fcp_usb(mixer, cmd.opcode,
+ data, cmd.req_size, data, cmd.resp_size);
+ if (err < 0)
+ return err;
+
+ /* copy response to user */
+ if (cmd.resp_size > 0)
+ if (copy_to_user(arg->data, data, cmd.resp_size))
+ return -EFAULT;
+
+ return 0;
+}
+
+/* Validate the Level Meter map passed by the user */
+static int validate_meter_map(const s16 *map, int map_size, int meter_slots)
+{
+ int i;
+
+ for (i = 0; i < map_size; i++)
+ if (map[i] < -1 || map[i] >= meter_slots)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Set the Level Meter map and add the control */
+static int fcp_ioctl_set_meter_map(struct usb_mixer_interface *mixer,
+ struct fcp_meter_map __user *arg)
+{
+ struct fcp_meter_map map;
+ struct fcp_data *private = mixer->private_data;
+ s16 *tmp_map __free(kfree) = NULL;
+ int err;
+
+ if (copy_from_user(&map, arg, sizeof(map)))
+ return -EFAULT;
+
+ /* Don't allow changing the map size or meter slots once set */
+ if (private->meter_ctl) {
+ struct usb_mixer_elem_info *elem =
+ private->meter_ctl->private_data;
+
+ if (map.map_size != elem->channels ||
+ map.meter_slots != private->num_meter_slots)
+ return -EINVAL;
+ }
+
+ /* Validate the map size */
+ if (map.map_size < 1 || map.map_size > 255 ||
+ map.meter_slots < 1 || map.meter_slots > 255)
+ return -EINVAL;
+
+ /* Allocate and copy the map data */
+ tmp_map = kmalloc_array(map.map_size, sizeof(s16), GFP_KERNEL);
+ if (!tmp_map)
+ return -ENOMEM;
+
+ if (copy_from_user(tmp_map, arg->map, map.map_size * sizeof(s16)))
+ return -EFAULT;
+
+ err = validate_meter_map(tmp_map, map.map_size, map.meter_slots);
+ if (err < 0)
+ return err;
+
+ /* If the control doesn't exist, create it */
+ if (!private->meter_ctl) {
+ s16 *new_map __free(kfree) = NULL;
+ __le32 *meter_levels __free(kfree) = NULL;
+
+ /* Allocate buffer for the map */
+ new_map = kmalloc_array(map.map_size, sizeof(s16), GFP_KERNEL);
+ if (!new_map)
+ return -ENOMEM;
+
+ /* Allocate buffer for reading meter levels */
+ meter_levels = kmalloc_array(map.meter_slots, sizeof(__le32),
+ GFP_KERNEL);
+ if (!meter_levels)
+ return -ENOMEM;
+
+ /* Create the Level Meter control */
+ err = fcp_add_new_ctl(mixer, &fcp_meter_ctl, 0, map.map_size,
+ "Level Meter", &private->meter_ctl);
+ if (err < 0)
+ return err;
+
+ /* Success; save the pointers in private and don't free them */
+ private->meter_level_map = new_map;
+ private->meter_levels = meter_levels;
+ private->num_meter_slots = map.meter_slots;
+ new_map = NULL;
+ meter_levels = NULL;
+ }
+
+ /* Install the new map */
+ memcpy(private->meter_level_map, tmp_map, map.map_size * sizeof(s16));
+
+ return 0;
+}
+
+/* Set the Level Meter labels */
+static int fcp_ioctl_set_meter_labels(struct usb_mixer_interface *mixer,
+ struct fcp_meter_labels __user *arg)
+{
+ struct fcp_meter_labels labels;
+ struct fcp_data *private = mixer->private_data;
+ unsigned int *tlv_data;
+ unsigned int tlv_size, data_size;
+
+ if (copy_from_user(&labels, arg, sizeof(labels)))
+ return -EFAULT;
+
+ /* Remove existing labels if size is zero */
+ if (!labels.labels_size) {
+
+ /* Clear TLV read/callback bits if labels were present */
+ if (private->meter_labels_tlv) {
+ private->meter_ctl->vd[0].access &=
+ ~(SNDRV_CTL_ELEM_ACCESS_TLV_READ |
+ SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK);
+ snd_ctl_notify(mixer->chip->card,
+ SNDRV_CTL_EVENT_MASK_INFO,
+ &private->meter_ctl->id);
+ }
+
+ kfree(private->meter_labels_tlv);
+ private->meter_labels_tlv = NULL;
+ private->meter_labels_tlv_size = 0;
+
+ return 0;
+ }
+
+ /* Validate size */
+ if (labels.labels_size > 4096)
+ return -EINVAL;
+
+ /* Calculate padded data size */
+ data_size = ALIGN(labels.labels_size, sizeof(unsigned int));
+
+ /* Calculate total TLV size including header */
+ tlv_size = sizeof(unsigned int) * 2 + data_size;
+
+ /* Allocate, set up TLV header, and copy the labels data */
+ tlv_data = kzalloc(tlv_size, GFP_KERNEL);
+ if (!tlv_data)
+ return -ENOMEM;
+ tlv_data[0] = SNDRV_CTL_TLVT_FCP_CHANNEL_LABELS;
+ tlv_data[1] = data_size;
+ if (copy_from_user(&tlv_data[2], arg->labels, labels.labels_size)) {
+ kfree(tlv_data);
+ return -EFAULT;
+ }
+
+ /* Set TLV read/callback bits if labels weren't present */
+ if (!private->meter_labels_tlv) {
+ private->meter_ctl->vd[0].access |=
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ |
+ SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
+ snd_ctl_notify(mixer->chip->card,
+ SNDRV_CTL_EVENT_MASK_INFO,
+ &private->meter_ctl->id);
+ }
+
+ /* Swap in the new labels */
+ kfree(private->meter_labels_tlv);
+ private->meter_labels_tlv = tlv_data;
+ private->meter_labels_tlv_size = tlv_size;
+
+ return 0;
+}
+
+static int fcp_hwdep_open(struct snd_hwdep *hw, struct file *file)
+{
+ struct usb_mixer_interface *mixer = hw->private_data;
+ struct fcp_data *private = mixer->private_data;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ private->file = file;
+
+ return 0;
+}
+
+static int fcp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct usb_mixer_interface *mixer = hw->private_data;
+ struct fcp_data *private = mixer->private_data;
+ void __user *argp = (void __user *)arg;
+
+ guard(mutex)(&private->mutex);
+
+ switch (cmd) {
+
+ case FCP_IOCTL_PVERSION:
+ return put_user(FCP_HWDEP_VERSION,
+ (int __user *)argp) ? -EFAULT : 0;
+ break;
+
+ case FCP_IOCTL_INIT:
+ return fcp_ioctl_init(mixer, argp);
+
+ case FCP_IOCTL_CMD:
+ if (!private->init)
+ return -EINVAL;
+ return fcp_ioctl_cmd(mixer, argp);
+
+ case FCP_IOCTL_SET_METER_MAP:
+ if (!private->init)
+ return -EINVAL;
+ return fcp_ioctl_set_meter_map(mixer, argp);
+
+ case FCP_IOCTL_SET_METER_LABELS:
+ if (!private->init)
+ return -EINVAL;
+ if (!private->meter_ctl)
+ return -EINVAL;
+ return fcp_ioctl_set_meter_labels(mixer, argp);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ /* not reached */
+}
+
+static long fcp_hwdep_read(struct snd_hwdep *hw, char __user *buf,
+ long count, loff_t *offset)
+{
+ struct usb_mixer_interface *mixer = hw->private_data;
+ struct fcp_data *private = mixer->private_data;
+ unsigned long flags;
+ long ret = 0;
+ u32 event;
+
+ if (count < sizeof(event))
+ return -EINVAL;
+
+ ret = wait_event_interruptible(private->notify.queue,
+ private->notify.event);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&private->notify.lock, flags);
+ event = private->notify.event;
+ private->notify.event = 0;
+ spin_unlock_irqrestore(&private->notify.lock, flags);
+
+ if (copy_to_user(buf, &event, sizeof(event)))
+ return -EFAULT;
+
+ return sizeof(event);
+}
+
+static __poll_t fcp_hwdep_poll(struct snd_hwdep *hw,
+ struct file *file,
+ poll_table *wait)
+{
+ struct usb_mixer_interface *mixer = hw->private_data;
+ struct fcp_data *private = mixer->private_data;
+ __poll_t mask = 0;
+
+ poll_wait(file, &private->notify.queue, wait);
+
+ if (private->notify.event)
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ return mask;
+}
+
+static int fcp_hwdep_release(struct snd_hwdep *hw, struct file *file)
+{
+ struct usb_mixer_interface *mixer = hw->private_data;
+ struct fcp_data *private = mixer->private_data;
+
+ if (!private)
+ return 0;
+
+ private->file = NULL;
+
+ return 0;
+}
+
+static int fcp_hwdep_init(struct usb_mixer_interface *mixer)
+{
+ struct snd_hwdep *hw;
+ int err;
+
+ err = snd_hwdep_new(mixer->chip->card, "Focusrite Control", 0, &hw);
+ if (err < 0)
+ return err;
+
+ hw->private_data = mixer;
+ hw->exclusive = 1;
+ hw->ops.open = fcp_hwdep_open;
+ hw->ops.ioctl = fcp_hwdep_ioctl;
+ hw->ops.ioctl_compat = fcp_hwdep_ioctl;
+ hw->ops.read = fcp_hwdep_read;
+ hw->ops.poll = fcp_hwdep_poll;
+ hw->ops.release = fcp_hwdep_release;
+
+ return 0;
+}
+
+/*** Cleanup ***/
+
+static void fcp_cleanup_urb(struct usb_mixer_interface *mixer)
+{
+ if (!mixer->urb)
+ return;
+
+ usb_kill_urb(mixer->urb);
+ kfree(mixer->urb->transfer_buffer);
+ usb_free_urb(mixer->urb);
+ mixer->urb = NULL;
+}
+
+static void fcp_private_free(struct usb_mixer_interface *mixer)
+{
+ struct fcp_data *private = mixer->private_data;
+
+ fcp_cleanup_urb(mixer);
+
+ kfree(private->meter_level_map);
+ kfree(private->meter_levels);
+ kfree(private->meter_labels_tlv);
+ kfree(private);
+ mixer->private_data = NULL;
+}
+
+static void fcp_private_suspend(struct usb_mixer_interface *mixer)
+{
+ fcp_cleanup_urb(mixer);
+}
+
+/*** Callbacks ***/
+
+static void fcp_notify(struct urb *urb)
+{
+ struct usb_mixer_interface *mixer = urb->context;
+ struct fcp_data *private = mixer->private_data;
+ int len = urb->actual_length;
+ int ustatus = urb->status;
+ u32 data;
+
+ if (ustatus != 0 || len != 8)
+ goto requeue;
+
+ data = le32_to_cpu(*(__le32 *)urb->transfer_buffer);
+
+ /* Handle command acknowledgement */
+ if (data & FCP_NOTIFY_ACK) {
+ complete(&private->cmd_done);
+ data &= ~FCP_NOTIFY_ACK;
+ }
+
+ if (data) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&private->notify.lock, flags);
+ private->notify.event |= data;
+ spin_unlock_irqrestore(&private->notify.lock, flags);
+
+ wake_up_interruptible(&private->notify.queue);
+ }
+
+requeue:
+ if (ustatus != -ENOENT &&
+ ustatus != -ECONNRESET &&
+ ustatus != -ESHUTDOWN) {
+ urb->dev = mixer->chip->dev;
+ usb_submit_urb(urb, GFP_ATOMIC);
+ } else {
+ complete(&private->cmd_done);
+ }
+}
+
+/* Submit a URB to receive notifications from the device */
+static int fcp_init_notify(struct usb_mixer_interface *mixer)
+{
+ struct usb_device *dev = mixer->chip->dev;
+ struct fcp_data *private = mixer->private_data;
+ unsigned int pipe = usb_rcvintpipe(dev, private->bEndpointAddress);
+ void *transfer_buffer;
+ int err;
+
+ /* Already set up */
+ if (mixer->urb)
+ return 0;
+
+ if (usb_pipe_type_check(dev, pipe))
+ return -EINVAL;
+
+ mixer->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!mixer->urb)
+ return -ENOMEM;
+
+ transfer_buffer = kmalloc(private->wMaxPacketSize, GFP_KERNEL);
+ if (!transfer_buffer) {
+ usb_free_urb(mixer->urb);
+ mixer->urb = NULL;
+ return -ENOMEM;
+ }
+
+ usb_fill_int_urb(mixer->urb, dev, pipe,
+ transfer_buffer, private->wMaxPacketSize,
+ fcp_notify, mixer, private->bInterval);
+
+ init_completion(&private->cmd_done);
+
+ err = usb_submit_urb(mixer->urb, GFP_KERNEL);
+ if (err) {
+ usb_audio_err(mixer->chip,
+ "%s: usb_submit_urb failed: %d\n",
+ __func__, err);
+ kfree(transfer_buffer);
+ usb_free_urb(mixer->urb);
+ mixer->urb = NULL;
+ }
+
+ return err;
+}
+
+/*** Initialisation ***/
+
+static int fcp_init(struct usb_mixer_interface *mixer,
+ void *step0_resp, void *step2_resp)
+{
+ struct fcp_data *private = mixer->private_data;
+ struct usb_device *dev = mixer->chip->dev;
+ int err;
+
+ err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0),
+ FCP_USB_REQ_STEP0,
+ USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
+ 0, private->bInterfaceNumber,
+ step0_resp, private->step0_resp_size);
+ if (err < 0)
+ return err;
+
+ err = fcp_init_notify(mixer);
+ if (err < 0)
+ return err;
+
+ private->seq = 0;
+ private->init = 1;
+
+ err = fcp_usb(mixer, private->init1_opcode, NULL, 0, NULL, 0);
+ if (err < 0)
+ return err;
+
+ err = fcp_usb(mixer, private->init2_opcode,
+ NULL, 0, step2_resp, private->step2_resp_size);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int fcp_init_private(struct usb_mixer_interface *mixer)
+{
+ struct fcp_data *private =
+ kzalloc(sizeof(struct fcp_data), GFP_KERNEL);
+
+ if (!private)
+ return -ENOMEM;
+
+ mutex_init(&private->mutex);
+ init_waitqueue_head(&private->notify.queue);
+ spin_lock_init(&private->notify.lock);
+
+ mixer->private_data = private;
+ mixer->private_free = fcp_private_free;
+ mixer->private_suspend = fcp_private_suspend;
+
+ private->mixer = mixer;
+
+ return 0;
+}
+
+/* Look through the interface descriptors for the Focusrite Control
+ * interface (bInterfaceClass = 255 Vendor Specific Class) and set
+ * bInterfaceNumber, bEndpointAddress, wMaxPacketSize, and bInterval
+ * in private
+ */
+static int fcp_find_fc_interface(struct usb_mixer_interface *mixer)
+{
+ struct snd_usb_audio *chip = mixer->chip;
+ struct fcp_data *private = mixer->private_data;
+ struct usb_host_config *config = chip->dev->actconfig;
+ int i;
+
+ for (i = 0; i < config->desc.bNumInterfaces; i++) {
+ struct usb_interface *intf = config->interface[i];
+ struct usb_interface_descriptor *desc =
+ &intf->altsetting[0].desc;
+ struct usb_endpoint_descriptor *epd;
+
+ if (desc->bInterfaceClass != 255)
+ continue;
+
+ epd = get_endpoint(intf->altsetting, 0);
+ private->bInterfaceNumber = desc->bInterfaceNumber;
+ private->bEndpointAddress = epd->bEndpointAddress &
+ USB_ENDPOINT_NUMBER_MASK;
+ private->wMaxPacketSize = le16_to_cpu(epd->wMaxPacketSize);
+ private->bInterval = epd->bInterval;
+ return 0;
+ }
+
+ usb_audio_err(chip, "Focusrite vendor-specific interface not found\n");
+ return -EINVAL;
+}
+
+int snd_fcp_init(struct usb_mixer_interface *mixer)
+{
+ struct snd_usb_audio *chip = mixer->chip;
+ int err;
+
+ /* only use UAC_VERSION_2 */
+ if (!mixer->protocol)
+ return 0;
+
+ err = fcp_init_private(mixer);
+ if (err < 0)
+ return err;
+
+ err = fcp_find_fc_interface(mixer);
+ if (err < 0)
+ return err;
+
+ err = fcp_hwdep_init(mixer);
+ if (err < 0)
+ return err;
+
+ usb_audio_info(chip,
+ "Focusrite Control Protocol Driver ready (pid=0x%04x); "
+ "report any issues to "
+ "https://github.com/geoffreybennett/fcp-support/issues",
+ USB_ID_PRODUCT(chip->usb_id));
+
+ return err;
+}
diff --git a/sound/usb/fcp.h b/sound/usb/fcp.h
new file mode 100644
index 000000000000..d58cc7db75b1
--- /dev/null
+++ b/sound/usb/fcp.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __USBAUDIO_FCP_H
+#define __USBAUDIO_FCP_H
+
+int snd_fcp_init(struct usb_mixer_interface *mixer);
+
+#endif /* __USBAUDIO_FCP_H */
diff --git a/sound/usb/line6/toneport.c b/sound/usb/line6/toneport.c
index ca2c6f5de407..c073b38cd673 100644
--- a/sound/usb/line6/toneport.c
+++ b/sound/usb/line6/toneport.c
@@ -386,7 +386,7 @@ static int toneport_setup(struct usb_line6_toneport *toneport)
toneport_update_led(toneport);
schedule_delayed_work(&toneport->line6.startup_work,
- msecs_to_jiffies(TONEPORT_PCM_DELAY * 1000));
+ secs_to_jiffies(TONEPORT_PCM_DELAY));
return 0;
}
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 23fcd680167d..ed6127b0389f 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -38,6 +38,7 @@
#include "mixer_us16x08.h"
#include "mixer_s1810c.h"
#include "helper.h"
+#include "fcp.h"
struct std_mono_table {
unsigned int unitid, control, cmask;
@@ -4090,6 +4091,12 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
err = snd_scarlett2_init(mixer);
break;
+ case USB_ID(0x1235, 0x821b): /* Focusrite Scarlett 16i16 4th Gen */
+ case USB_ID(0x1235, 0x821c): /* Focusrite Scarlett 18i16 4th Gen */
+ case USB_ID(0x1235, 0x821d): /* Focusrite Scarlett 18i20 4th Gen */
+ err = snd_fcp_init(mixer);
+ break;
+
case USB_ID(0x041e, 0x323b): /* Creative Sound Blaster E1 */
err = snd_soundblaster_e1_switch_create(mixer);
break;
diff --git a/sound/usb/mixer_scarlett2.c b/sound/usb/mixer_scarlett2.c
index 7f595c1752a5..288d22e6a0b2 100644
--- a/sound/usb/mixer_scarlett2.c
+++ b/sound/usb/mixer_scarlett2.c
@@ -166,6 +166,7 @@
#include "helper.h"
#include "mixer_scarlett2.h"
+#include "fcp.h"
/* device_setup value to allow turning MSD mode back on */
#define SCARLETT2_MSD_ENABLE 0x02
@@ -173,6 +174,9 @@
/* device_setup value to disable this mixer driver */
#define SCARLETT2_DISABLE 0x04
+/* device_setup value to use the FCP driver instead */
+#define SCARLETT2_USE_FCP_DRIVER 0x08
+
/* some gui mixers can't handle negative ctl values */
#define SCARLETT2_VOLUME_BIAS 127
@@ -9702,6 +9706,10 @@ int snd_scarlett2_init(struct usb_mixer_interface *mixer)
if (!mixer->protocol)
return 0;
+ /* check if the user wants to use the FCP driver instead */
+ if (chip->setup & SCARLETT2_USE_FCP_DRIVER)
+ return snd_fcp_init(mixer);
+
/* find entry in scarlett2_devices */
entry = get_scarlett2_device_entry(mixer);
if (!entry) {
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 8ba0aff8be2e..a97efb7b131e 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -2239,6 +2239,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
QUIRK_FLAG_CTL_MSG_DELAY_1M),
DEVICE_FLG(0x0c45, 0x6340, /* Sonix HD USB Camera */
QUIRK_FLAG_GET_SAMPLE_RATE),
+ DEVICE_FLG(0x0d8c, 0x0014, /* USB Audio Device */
+ QUIRK_FLAG_CTL_MSG_DELAY_1M),
DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */
QUIRK_FLAG_FIXED_RATE),
DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */
@@ -2341,6 +2343,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
QUIRK_FLAG_CTL_MSG_DELAY_1M),
DEVICE_FLG(0x2d95, 0x8021, /* VIVO USB-C-XE710 HEADSET */
QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ DEVICE_FLG(0x2fc6, 0xf0b7, /* iBasso DC07 Pro */
+ QUIRK_FLAG_CTL_MSG_DELAY_1M),
DEVICE_FLG(0x30be, 0x0101, /* Schiit Hel */
QUIRK_FLAG_IGNORE_CTL_ERROR),
DEVICE_FLG(0x413c, 0xa506, /* Dell AE515 sound bar */
diff --git a/tools/accounting/getdelays.c b/tools/accounting/getdelays.c
index 1334214546d7..100ad3dc091a 100644
--- a/tools/accounting/getdelays.c
+++ b/tools/accounting/getdelays.c
@@ -192,60 +192,77 @@ static int get_family_id(int sd)
}
#define average_ms(t, c) (t / 1000000ULL / (c ? c : 1))
+#define delay_ms(t) (t / 1000000ULL)
static void print_delayacct(struct taskstats *t)
{
- printf("\n\nCPU %15s%15s%15s%15s%15s\n"
- " %15llu%15llu%15llu%15llu%15.3fms\n"
- "IO %15s%15s%15s\n"
- " %15llu%15llu%15.3fms\n"
- "SWAP %15s%15s%15s\n"
- " %15llu%15llu%15.3fms\n"
- "RECLAIM %12s%15s%15s\n"
- " %15llu%15llu%15.3fms\n"
- "THRASHING%12s%15s%15s\n"
- " %15llu%15llu%15.3fms\n"
- "COMPACT %12s%15s%15s\n"
- " %15llu%15llu%15.3fms\n"
- "WPCOPY %12s%15s%15s\n"
- " %15llu%15llu%15.3fms\n"
- "IRQ %15s%15s%15s\n"
- " %15llu%15llu%15.3fms\n",
+ printf("\n\nCPU %15s%15s%15s%15s%15s%15s\n"
+ " %15llu%15llu%15llu%15llu%15.3fms%13.6fms\n"
+ "IO %15s%15s%15s%15s\n"
+ " %15llu%15llu%15.3fms%13.6fms\n"
+ "SWAP %15s%15s%15s%15s\n"
+ " %15llu%15llu%15.3fms%13.6fms\n"
+ "RECLAIM %12s%15s%15s%15s\n"
+ " %15llu%15llu%15.3fms%13.6fms\n"
+ "THRASHING%12s%15s%15s%15s\n"
+ " %15llu%15llu%15.3fms%13.6fms\n"
+ "COMPACT %12s%15s%15s%15s\n"
+ " %15llu%15llu%15.3fms%13.6fms\n"
+ "WPCOPY %12s%15s%15s%15s\n"
+ " %15llu%15llu%15.3fms%13.6fms\n"
+ "IRQ %15s%15s%15s%15s\n"
+ " %15llu%15llu%15.3fms%13.6fms\n",
"count", "real total", "virtual total",
- "delay total", "delay average",
+ "delay total", "delay average", "delay max", "delay min",
(unsigned long long)t->cpu_count,
(unsigned long long)t->cpu_run_real_total,
(unsigned long long)t->cpu_run_virtual_total,
(unsigned long long)t->cpu_delay_total,
average_ms((double)t->cpu_delay_total, t->cpu_count),
- "count", "delay total", "delay average",
+ delay_ms((double)t->cpu_delay_max),
+ delay_ms((double)t->cpu_delay_min),
+ "count", "delay total", "delay average", "delay max", "delay min",
(unsigned long long)t->blkio_count,
(unsigned long long)t->blkio_delay_total,
average_ms((double)t->blkio_delay_total, t->blkio_count),
- "count", "delay total", "delay average",
+ delay_ms((double)t->blkio_delay_max),
+ delay_ms((double)t->blkio_delay_min),
+ "count", "delay total", "delay average", "delay max", "delay min",
(unsigned long long)t->swapin_count,
(unsigned long long)t->swapin_delay_total,
average_ms((double)t->swapin_delay_total, t->swapin_count),
- "count", "delay total", "delay average",
+ delay_ms((double)t->swapin_delay_max),
+ delay_ms((double)t->swapin_delay_min),
+ "count", "delay total", "delay average", "delay max", "delay min",
(unsigned long long)t->freepages_count,
(unsigned long long)t->freepages_delay_total,
average_ms((double)t->freepages_delay_total, t->freepages_count),
- "count", "delay total", "delay average",
+ delay_ms((double)t->freepages_delay_max),
+ delay_ms((double)t->freepages_delay_min),
+ "count", "delay total", "delay average", "delay max", "delay min",
(unsigned long long)t->thrashing_count,
(unsigned long long)t->thrashing_delay_total,
average_ms((double)t->thrashing_delay_total, t->thrashing_count),
- "count", "delay total", "delay average",
+ delay_ms((double)t->thrashing_delay_max),
+ delay_ms((double)t->thrashing_delay_min),
+ "count", "delay total", "delay average", "delay max", "delay min",
(unsigned long long)t->compact_count,
(unsigned long long)t->compact_delay_total,
average_ms((double)t->compact_delay_total, t->compact_count),
- "count", "delay total", "delay average",
+ delay_ms((double)t->compact_delay_max),
+ delay_ms((double)t->compact_delay_min),
+ "count", "delay total", "delay average", "delay max", "delay min",
(unsigned long long)t->wpcopy_count,
(unsigned long long)t->wpcopy_delay_total,
average_ms((double)t->wpcopy_delay_total, t->wpcopy_count),
- "count", "delay total", "delay average",
+ delay_ms((double)t->wpcopy_delay_max),
+ delay_ms((double)t->wpcopy_delay_min),
+ "count", "delay total", "delay average", "delay max", "delay min",
(unsigned long long)t->irq_count,
(unsigned long long)t->irq_delay_total,
- average_ms((double)t->irq_delay_total, t->irq_count));
+ average_ms((double)t->irq_delay_total, t->irq_count),
+ delay_ms((double)t->irq_delay_max),
+ delay_ms((double)t->irq_delay_min));
}
static void task_context_switch_counts(struct taskstats *t)
diff --git a/tools/accounting/procacct.c b/tools/accounting/procacct.c
index 90c4a37f53d9..e8dee05a6264 100644
--- a/tools/accounting/procacct.c
+++ b/tools/accounting/procacct.c
@@ -274,12 +274,11 @@ int main(int argc, char *argv[])
int maskset = 0;
char *logfile = NULL;
int cfd = 0;
- int forking = 0;
struct msgtemplate msg;
- while (!forking) {
- c = getopt(argc, argv, "m:vr:");
+ while (1) {
+ c = getopt(argc, argv, "m:vr:w:");
if (c < 0)
break;
diff --git a/tools/arch/arm64/include/asm/sysreg.h b/tools/arch/arm64/include/asm/sysreg.h
index cd8420e8c3ad..150416682e2c 100644
--- a/tools/arch/arm64/include/asm/sysreg.h
+++ b/tools/arch/arm64/include/asm/sysreg.h
@@ -11,6 +11,7 @@
#include <linux/bits.h>
#include <linux/stringify.h>
+#include <linux/kasan-tags.h>
#include <asm/gpr-num.h>
@@ -108,6 +109,9 @@
#define set_pstate_ssbs(x) asm volatile(SET_PSTATE_SSBS(x))
#define set_pstate_dit(x) asm volatile(SET_PSTATE_DIT(x))
+/* Register-based PAN access, for save/restore purposes */
+#define SYS_PSTATE_PAN sys_reg(3, 0, 4, 2, 3)
+
#define __SYS_BARRIER_INSN(CRm, op2, Rt) \
__emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f))
@@ -123,6 +127,37 @@
#define SYS_DC_CIGSW sys_insn(1, 0, 7, 14, 4)
#define SYS_DC_CIGDSW sys_insn(1, 0, 7, 14, 6)
+#define SYS_IC_IALLUIS sys_insn(1, 0, 7, 1, 0)
+#define SYS_IC_IALLU sys_insn(1, 0, 7, 5, 0)
+#define SYS_IC_IVAU sys_insn(1, 3, 7, 5, 1)
+
+#define SYS_DC_IVAC sys_insn(1, 0, 7, 6, 1)
+#define SYS_DC_IGVAC sys_insn(1, 0, 7, 6, 3)
+#define SYS_DC_IGDVAC sys_insn(1, 0, 7, 6, 5)
+
+#define SYS_DC_CVAC sys_insn(1, 3, 7, 10, 1)
+#define SYS_DC_CGVAC sys_insn(1, 3, 7, 10, 3)
+#define SYS_DC_CGDVAC sys_insn(1, 3, 7, 10, 5)
+
+#define SYS_DC_CVAU sys_insn(1, 3, 7, 11, 1)
+
+#define SYS_DC_CVAP sys_insn(1, 3, 7, 12, 1)
+#define SYS_DC_CGVAP sys_insn(1, 3, 7, 12, 3)
+#define SYS_DC_CGDVAP sys_insn(1, 3, 7, 12, 5)
+
+#define SYS_DC_CVADP sys_insn(1, 3, 7, 13, 1)
+#define SYS_DC_CGVADP sys_insn(1, 3, 7, 13, 3)
+#define SYS_DC_CGDVADP sys_insn(1, 3, 7, 13, 5)
+
+#define SYS_DC_CIVAC sys_insn(1, 3, 7, 14, 1)
+#define SYS_DC_CIGVAC sys_insn(1, 3, 7, 14, 3)
+#define SYS_DC_CIGDVAC sys_insn(1, 3, 7, 14, 5)
+
+/* Data cache zero operations */
+#define SYS_DC_ZVA sys_insn(1, 3, 7, 4, 1)
+#define SYS_DC_GVA sys_insn(1, 3, 7, 4, 3)
+#define SYS_DC_GZVA sys_insn(1, 3, 7, 4, 4)
+
/*
* Automatically generated definitions for system registers, the
* manual encodings below are in the process of being converted to
@@ -162,6 +197,84 @@
#define SYS_DBGDTRTX_EL0 sys_reg(2, 3, 0, 5, 0)
#define SYS_DBGVCR32_EL2 sys_reg(2, 4, 0, 7, 0)
+#define SYS_BRBINF_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 0))
+#define SYS_BRBINFINJ_EL1 sys_reg(2, 1, 9, 1, 0)
+#define SYS_BRBSRC_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 1))
+#define SYS_BRBSRCINJ_EL1 sys_reg(2, 1, 9, 1, 1)
+#define SYS_BRBTGT_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 2))
+#define SYS_BRBTGTINJ_EL1 sys_reg(2, 1, 9, 1, 2)
+#define SYS_BRBTS_EL1 sys_reg(2, 1, 9, 0, 2)
+
+#define SYS_BRBCR_EL1 sys_reg(2, 1, 9, 0, 0)
+#define SYS_BRBFCR_EL1 sys_reg(2, 1, 9, 0, 1)
+#define SYS_BRBIDR0_EL1 sys_reg(2, 1, 9, 2, 0)
+
+#define SYS_TRCITECR_EL1 sys_reg(3, 0, 1, 2, 3)
+#define SYS_TRCACATR(m) sys_reg(2, 1, 2, ((m & 7) << 1), (2 | (m >> 3)))
+#define SYS_TRCACVR(m) sys_reg(2, 1, 2, ((m & 7) << 1), (0 | (m >> 3)))
+#define SYS_TRCAUTHSTATUS sys_reg(2, 1, 7, 14, 6)
+#define SYS_TRCAUXCTLR sys_reg(2, 1, 0, 6, 0)
+#define SYS_TRCBBCTLR sys_reg(2, 1, 0, 15, 0)
+#define SYS_TRCCCCTLR sys_reg(2, 1, 0, 14, 0)
+#define SYS_TRCCIDCCTLR0 sys_reg(2, 1, 3, 0, 2)
+#define SYS_TRCCIDCCTLR1 sys_reg(2, 1, 3, 1, 2)
+#define SYS_TRCCIDCVR(m) sys_reg(2, 1, 3, ((m & 7) << 1), 0)
+#define SYS_TRCCLAIMCLR sys_reg(2, 1, 7, 9, 6)
+#define SYS_TRCCLAIMSET sys_reg(2, 1, 7, 8, 6)
+#define SYS_TRCCNTCTLR(m) sys_reg(2, 1, 0, (4 | (m & 3)), 5)
+#define SYS_TRCCNTRLDVR(m) sys_reg(2, 1, 0, (0 | (m & 3)), 5)
+#define SYS_TRCCNTVR(m) sys_reg(2, 1, 0, (8 | (m & 3)), 5)
+#define SYS_TRCCONFIGR sys_reg(2, 1, 0, 4, 0)
+#define SYS_TRCDEVARCH sys_reg(2, 1, 7, 15, 6)
+#define SYS_TRCDEVID sys_reg(2, 1, 7, 2, 7)
+#define SYS_TRCEVENTCTL0R sys_reg(2, 1, 0, 8, 0)
+#define SYS_TRCEVENTCTL1R sys_reg(2, 1, 0, 9, 0)
+#define SYS_TRCEXTINSELR(m) sys_reg(2, 1, 0, (8 | (m & 3)), 4)
+#define SYS_TRCIDR0 sys_reg(2, 1, 0, 8, 7)
+#define SYS_TRCIDR10 sys_reg(2, 1, 0, 2, 6)
+#define SYS_TRCIDR11 sys_reg(2, 1, 0, 3, 6)
+#define SYS_TRCIDR12 sys_reg(2, 1, 0, 4, 6)
+#define SYS_TRCIDR13 sys_reg(2, 1, 0, 5, 6)
+#define SYS_TRCIDR1 sys_reg(2, 1, 0, 9, 7)
+#define SYS_TRCIDR2 sys_reg(2, 1, 0, 10, 7)
+#define SYS_TRCIDR3 sys_reg(2, 1, 0, 11, 7)
+#define SYS_TRCIDR4 sys_reg(2, 1, 0, 12, 7)
+#define SYS_TRCIDR5 sys_reg(2, 1, 0, 13, 7)
+#define SYS_TRCIDR6 sys_reg(2, 1, 0, 14, 7)
+#define SYS_TRCIDR7 sys_reg(2, 1, 0, 15, 7)
+#define SYS_TRCIDR8 sys_reg(2, 1, 0, 0, 6)
+#define SYS_TRCIDR9 sys_reg(2, 1, 0, 1, 6)
+#define SYS_TRCIMSPEC(m) sys_reg(2, 1, 0, (m & 7), 7)
+#define SYS_TRCITEEDCR sys_reg(2, 1, 0, 2, 1)
+#define SYS_TRCOSLSR sys_reg(2, 1, 1, 1, 4)
+#define SYS_TRCPRGCTLR sys_reg(2, 1, 0, 1, 0)
+#define SYS_TRCQCTLR sys_reg(2, 1, 0, 1, 1)
+#define SYS_TRCRSCTLR(m) sys_reg(2, 1, 1, (m & 15), (0 | (m >> 4)))
+#define SYS_TRCRSR sys_reg(2, 1, 0, 10, 0)
+#define SYS_TRCSEQEVR(m) sys_reg(2, 1, 0, (m & 3), 4)
+#define SYS_TRCSEQRSTEVR sys_reg(2, 1, 0, 6, 4)
+#define SYS_TRCSEQSTR sys_reg(2, 1, 0, 7, 4)
+#define SYS_TRCSSCCR(m) sys_reg(2, 1, 1, (m & 7), 2)
+#define SYS_TRCSSCSR(m) sys_reg(2, 1, 1, (8 | (m & 7)), 2)
+#define SYS_TRCSSPCICR(m) sys_reg(2, 1, 1, (m & 7), 3)
+#define SYS_TRCSTALLCTLR sys_reg(2, 1, 0, 11, 0)
+#define SYS_TRCSTATR sys_reg(2, 1, 0, 3, 0)
+#define SYS_TRCSYNCPR sys_reg(2, 1, 0, 13, 0)
+#define SYS_TRCTRACEIDR sys_reg(2, 1, 0, 0, 1)
+#define SYS_TRCTSCTLR sys_reg(2, 1, 0, 12, 0)
+#define SYS_TRCVICTLR sys_reg(2, 1, 0, 0, 2)
+#define SYS_TRCVIIECTLR sys_reg(2, 1, 0, 1, 2)
+#define SYS_TRCVIPCSSCTLR sys_reg(2, 1, 0, 3, 2)
+#define SYS_TRCVISSCTLR sys_reg(2, 1, 0, 2, 2)
+#define SYS_TRCVMIDCCTLR0 sys_reg(2, 1, 3, 2, 2)
+#define SYS_TRCVMIDCCTLR1 sys_reg(2, 1, 3, 3, 2)
+#define SYS_TRCVMIDCVR(m) sys_reg(2, 1, 3, ((m & 7) << 1), 1)
+
+/* ETM */
+#define SYS_TRCOSLAR sys_reg(2, 1, 1, 0, 4)
+
+#define SYS_BRBCR_EL2 sys_reg(2, 4, 9, 0, 0)
+
#define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0)
#define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5)
#define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6)
@@ -170,8 +283,6 @@
#define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5)
#define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6)
-#define SYS_TRFCR_EL1 sys_reg(3, 0, 1, 2, 1)
-
#define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2)
#define SYS_APIAKEYLO_EL1 sys_reg(3, 0, 2, 1, 0)
@@ -202,15 +313,38 @@
#define SYS_ERXCTLR_EL1 sys_reg(3, 0, 5, 4, 1)
#define SYS_ERXSTATUS_EL1 sys_reg(3, 0, 5, 4, 2)
#define SYS_ERXADDR_EL1 sys_reg(3, 0, 5, 4, 3)
+#define SYS_ERXPFGF_EL1 sys_reg(3, 0, 5, 4, 4)
+#define SYS_ERXPFGCTL_EL1 sys_reg(3, 0, 5, 4, 5)
+#define SYS_ERXPFGCDN_EL1 sys_reg(3, 0, 5, 4, 6)
#define SYS_ERXMISC0_EL1 sys_reg(3, 0, 5, 5, 0)
#define SYS_ERXMISC1_EL1 sys_reg(3, 0, 5, 5, 1)
+#define SYS_ERXMISC2_EL1 sys_reg(3, 0, 5, 5, 2)
+#define SYS_ERXMISC3_EL1 sys_reg(3, 0, 5, 5, 3)
#define SYS_TFSR_EL1 sys_reg(3, 0, 5, 6, 0)
#define SYS_TFSRE0_EL1 sys_reg(3, 0, 5, 6, 1)
#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0)
#define SYS_PAR_EL1_F BIT(0)
+/* When PAR_EL1.F == 1 */
#define SYS_PAR_EL1_FST GENMASK(6, 1)
+#define SYS_PAR_EL1_PTW BIT(8)
+#define SYS_PAR_EL1_S BIT(9)
+#define SYS_PAR_EL1_AssuredOnly BIT(12)
+#define SYS_PAR_EL1_TopLevel BIT(13)
+#define SYS_PAR_EL1_Overlay BIT(14)
+#define SYS_PAR_EL1_DirtyBit BIT(15)
+#define SYS_PAR_EL1_F1_IMPDEF GENMASK_ULL(63, 48)
+#define SYS_PAR_EL1_F1_RES0 (BIT(7) | BIT(10) | GENMASK_ULL(47, 16))
+#define SYS_PAR_EL1_RES1 BIT(11)
+/* When PAR_EL1.F == 0 */
+#define SYS_PAR_EL1_SH GENMASK_ULL(8, 7)
+#define SYS_PAR_EL1_NS BIT(9)
+#define SYS_PAR_EL1_F0_IMPDEF BIT(10)
+#define SYS_PAR_EL1_NSE BIT(11)
+#define SYS_PAR_EL1_PA GENMASK_ULL(51, 12)
+#define SYS_PAR_EL1_ATTR GENMASK_ULL(63, 56)
+#define SYS_PAR_EL1_F0_RES0 (GENMASK_ULL(6, 1) | GENMASK_ULL(55, 52))
/*** Statistical Profiling Extension ***/
#define PMSEVFR_EL1_RES0_IMP \
@@ -274,6 +408,8 @@
#define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6)
#define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
+#define SYS_ACCDATA_EL1 sys_reg(3, 0, 13, 0, 5)
+
#define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0)
#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7)
@@ -286,7 +422,6 @@
#define SYS_PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2)
#define SYS_PMOVSCLR_EL0 sys_reg(3, 3, 9, 12, 3)
#define SYS_PMSWINC_EL0 sys_reg(3, 3, 9, 12, 4)
-#define SYS_PMSELR_EL0 sys_reg(3, 3, 9, 12, 5)
#define SYS_PMCEID0_EL0 sys_reg(3, 3, 9, 12, 6)
#define SYS_PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7)
#define SYS_PMCCNTR_EL0 sys_reg(3, 3, 9, 13, 0)
@@ -369,6 +504,7 @@
#define SYS_SCTLR_EL2 sys_reg(3, 4, 1, 0, 0)
#define SYS_ACTLR_EL2 sys_reg(3, 4, 1, 0, 1)
+#define SYS_SCTLR2_EL2 sys_reg(3, 4, 1, 0, 3)
#define SYS_HCR_EL2 sys_reg(3, 4, 1, 1, 0)
#define SYS_MDCR_EL2 sys_reg(3, 4, 1, 1, 1)
#define SYS_CPTR_EL2 sys_reg(3, 4, 1, 1, 2)
@@ -381,13 +517,15 @@
#define SYS_VTTBR_EL2 sys_reg(3, 4, 2, 1, 0)
#define SYS_VTCR_EL2 sys_reg(3, 4, 2, 1, 2)
-#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1)
-#define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4)
-#define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5)
+#define SYS_VNCR_EL2 sys_reg(3, 4, 2, 2, 0)
#define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6)
#define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0)
#define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1)
#define SYS_SP_EL1 sys_reg(3, 4, 4, 1, 0)
+#define SYS_SPSR_irq sys_reg(3, 4, 4, 3, 0)
+#define SYS_SPSR_abt sys_reg(3, 4, 4, 3, 1)
+#define SYS_SPSR_und sys_reg(3, 4, 4, 3, 2)
+#define SYS_SPSR_fiq sys_reg(3, 4, 4, 3, 3)
#define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1)
#define SYS_AFSR0_EL2 sys_reg(3, 4, 5, 1, 0)
#define SYS_AFSR1_EL2 sys_reg(3, 4, 5, 1, 1)
@@ -449,24 +587,49 @@
#define SYS_CONTEXTIDR_EL2 sys_reg(3, 4, 13, 0, 1)
#define SYS_TPIDR_EL2 sys_reg(3, 4, 13, 0, 2)
+#define SYS_SCXTNUM_EL2 sys_reg(3, 4, 13, 0, 7)
+
+#define __AMEV_op2(m) (m & 0x7)
+#define __AMEV_CRm(n, m) (n | ((m & 0x8) >> 3))
+#define __SYS__AMEVCNTVOFF0n_EL2(m) sys_reg(3, 4, 13, __AMEV_CRm(0x8, m), __AMEV_op2(m))
+#define SYS_AMEVCNTVOFF0n_EL2(m) __SYS__AMEVCNTVOFF0n_EL2(m)
+#define __SYS__AMEVCNTVOFF1n_EL2(m) sys_reg(3, 4, 13, __AMEV_CRm(0xA, m), __AMEV_op2(m))
+#define SYS_AMEVCNTVOFF1n_EL2(m) __SYS__AMEVCNTVOFF1n_EL2(m)
#define SYS_CNTVOFF_EL2 sys_reg(3, 4, 14, 0, 3)
#define SYS_CNTHCTL_EL2 sys_reg(3, 4, 14, 1, 0)
+#define SYS_CNTHP_TVAL_EL2 sys_reg(3, 4, 14, 2, 0)
+#define SYS_CNTHP_CTL_EL2 sys_reg(3, 4, 14, 2, 1)
+#define SYS_CNTHP_CVAL_EL2 sys_reg(3, 4, 14, 2, 2)
+#define SYS_CNTHV_TVAL_EL2 sys_reg(3, 4, 14, 3, 0)
+#define SYS_CNTHV_CTL_EL2 sys_reg(3, 4, 14, 3, 1)
+#define SYS_CNTHV_CVAL_EL2 sys_reg(3, 4, 14, 3, 2)
/* VHE encodings for architectural EL0/1 system registers */
+#define SYS_BRBCR_EL12 sys_reg(2, 5, 9, 0, 0)
#define SYS_SCTLR_EL12 sys_reg(3, 5, 1, 0, 0)
+#define SYS_CPACR_EL12 sys_reg(3, 5, 1, 0, 2)
+#define SYS_SCTLR2_EL12 sys_reg(3, 5, 1, 0, 3)
+#define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0)
+#define SYS_TRFCR_EL12 sys_reg(3, 5, 1, 2, 1)
+#define SYS_SMCR_EL12 sys_reg(3, 5, 1, 2, 6)
#define SYS_TTBR0_EL12 sys_reg(3, 5, 2, 0, 0)
#define SYS_TTBR1_EL12 sys_reg(3, 5, 2, 0, 1)
#define SYS_TCR_EL12 sys_reg(3, 5, 2, 0, 2)
+#define SYS_TCR2_EL12 sys_reg(3, 5, 2, 0, 3)
#define SYS_SPSR_EL12 sys_reg(3, 5, 4, 0, 0)
#define SYS_ELR_EL12 sys_reg(3, 5, 4, 0, 1)
#define SYS_AFSR0_EL12 sys_reg(3, 5, 5, 1, 0)
#define SYS_AFSR1_EL12 sys_reg(3, 5, 5, 1, 1)
#define SYS_ESR_EL12 sys_reg(3, 5, 5, 2, 0)
#define SYS_TFSR_EL12 sys_reg(3, 5, 5, 6, 0)
+#define SYS_FAR_EL12 sys_reg(3, 5, 6, 0, 0)
+#define SYS_PMSCR_EL12 sys_reg(3, 5, 9, 9, 0)
#define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0)
#define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0)
#define SYS_VBAR_EL12 sys_reg(3, 5, 12, 0, 0)
+#define SYS_CONTEXTIDR_EL12 sys_reg(3, 5, 13, 0, 1)
+#define SYS_SCXTNUM_EL12 sys_reg(3, 5, 13, 0, 7)
#define SYS_CNTKCTL_EL12 sys_reg(3, 5, 14, 1, 0)
#define SYS_CNTP_TVAL_EL02 sys_reg(3, 5, 14, 2, 0)
#define SYS_CNTP_CTL_EL02 sys_reg(3, 5, 14, 2, 1)
@@ -477,6 +640,183 @@
#define SYS_SP_EL2 sys_reg(3, 6, 4, 1, 0)
+/* AT instructions */
+#define AT_Op0 1
+#define AT_CRn 7
+
+#define OP_AT_S1E1R sys_insn(AT_Op0, 0, AT_CRn, 8, 0)
+#define OP_AT_S1E1W sys_insn(AT_Op0, 0, AT_CRn, 8, 1)
+#define OP_AT_S1E0R sys_insn(AT_Op0, 0, AT_CRn, 8, 2)
+#define OP_AT_S1E0W sys_insn(AT_Op0, 0, AT_CRn, 8, 3)
+#define OP_AT_S1E1RP sys_insn(AT_Op0, 0, AT_CRn, 9, 0)
+#define OP_AT_S1E1WP sys_insn(AT_Op0, 0, AT_CRn, 9, 1)
+#define OP_AT_S1E1A sys_insn(AT_Op0, 0, AT_CRn, 9, 2)
+#define OP_AT_S1E2R sys_insn(AT_Op0, 4, AT_CRn, 8, 0)
+#define OP_AT_S1E2W sys_insn(AT_Op0, 4, AT_CRn, 8, 1)
+#define OP_AT_S12E1R sys_insn(AT_Op0, 4, AT_CRn, 8, 4)
+#define OP_AT_S12E1W sys_insn(AT_Op0, 4, AT_CRn, 8, 5)
+#define OP_AT_S12E0R sys_insn(AT_Op0, 4, AT_CRn, 8, 6)
+#define OP_AT_S12E0W sys_insn(AT_Op0, 4, AT_CRn, 8, 7)
+#define OP_AT_S1E2A sys_insn(AT_Op0, 4, AT_CRn, 9, 2)
+
+/* TLBI instructions */
+#define TLBI_Op0 1
+
+#define TLBI_Op1_EL1 0 /* Accessible from EL1 or higher */
+#define TLBI_Op1_EL2 4 /* Accessible from EL2 or higher */
+
+#define TLBI_CRn_XS 8 /* Extra Slow (the common one) */
+#define TLBI_CRn_nXS 9 /* not Extra Slow (which nobody uses)*/
+
+#define TLBI_CRm_IPAIS 0 /* S2 Inner-Shareable */
+#define TLBI_CRm_nROS 1 /* non-Range, Outer-Sharable */
+#define TLBI_CRm_RIS 2 /* Range, Inner-Sharable */
+#define TLBI_CRm_nRIS 3 /* non-Range, Inner-Sharable */
+#define TLBI_CRm_IPAONS 4 /* S2 Outer and Non-Shareable */
+#define TLBI_CRm_ROS 5 /* Range, Outer-Sharable */
+#define TLBI_CRm_RNS 6 /* Range, Non-Sharable */
+#define TLBI_CRm_nRNS 7 /* non-Range, Non-Sharable */
+
+#define OP_TLBI_VMALLE1OS sys_insn(1, 0, 8, 1, 0)
+#define OP_TLBI_VAE1OS sys_insn(1, 0, 8, 1, 1)
+#define OP_TLBI_ASIDE1OS sys_insn(1, 0, 8, 1, 2)
+#define OP_TLBI_VAAE1OS sys_insn(1, 0, 8, 1, 3)
+#define OP_TLBI_VALE1OS sys_insn(1, 0, 8, 1, 5)
+#define OP_TLBI_VAALE1OS sys_insn(1, 0, 8, 1, 7)
+#define OP_TLBI_RVAE1IS sys_insn(1, 0, 8, 2, 1)
+#define OP_TLBI_RVAAE1IS sys_insn(1, 0, 8, 2, 3)
+#define OP_TLBI_RVALE1IS sys_insn(1, 0, 8, 2, 5)
+#define OP_TLBI_RVAALE1IS sys_insn(1, 0, 8, 2, 7)
+#define OP_TLBI_VMALLE1IS sys_insn(1, 0, 8, 3, 0)
+#define OP_TLBI_VAE1IS sys_insn(1, 0, 8, 3, 1)
+#define OP_TLBI_ASIDE1IS sys_insn(1, 0, 8, 3, 2)
+#define OP_TLBI_VAAE1IS sys_insn(1, 0, 8, 3, 3)
+#define OP_TLBI_VALE1IS sys_insn(1, 0, 8, 3, 5)
+#define OP_TLBI_VAALE1IS sys_insn(1, 0, 8, 3, 7)
+#define OP_TLBI_RVAE1OS sys_insn(1, 0, 8, 5, 1)
+#define OP_TLBI_RVAAE1OS sys_insn(1, 0, 8, 5, 3)
+#define OP_TLBI_RVALE1OS sys_insn(1, 0, 8, 5, 5)
+#define OP_TLBI_RVAALE1OS sys_insn(1, 0, 8, 5, 7)
+#define OP_TLBI_RVAE1 sys_insn(1, 0, 8, 6, 1)
+#define OP_TLBI_RVAAE1 sys_insn(1, 0, 8, 6, 3)
+#define OP_TLBI_RVALE1 sys_insn(1, 0, 8, 6, 5)
+#define OP_TLBI_RVAALE1 sys_insn(1, 0, 8, 6, 7)
+#define OP_TLBI_VMALLE1 sys_insn(1, 0, 8, 7, 0)
+#define OP_TLBI_VAE1 sys_insn(1, 0, 8, 7, 1)
+#define OP_TLBI_ASIDE1 sys_insn(1, 0, 8, 7, 2)
+#define OP_TLBI_VAAE1 sys_insn(1, 0, 8, 7, 3)
+#define OP_TLBI_VALE1 sys_insn(1, 0, 8, 7, 5)
+#define OP_TLBI_VAALE1 sys_insn(1, 0, 8, 7, 7)
+#define OP_TLBI_VMALLE1OSNXS sys_insn(1, 0, 9, 1, 0)
+#define OP_TLBI_VAE1OSNXS sys_insn(1, 0, 9, 1, 1)
+#define OP_TLBI_ASIDE1OSNXS sys_insn(1, 0, 9, 1, 2)
+#define OP_TLBI_VAAE1OSNXS sys_insn(1, 0, 9, 1, 3)
+#define OP_TLBI_VALE1OSNXS sys_insn(1, 0, 9, 1, 5)
+#define OP_TLBI_VAALE1OSNXS sys_insn(1, 0, 9, 1, 7)
+#define OP_TLBI_RVAE1ISNXS sys_insn(1, 0, 9, 2, 1)
+#define OP_TLBI_RVAAE1ISNXS sys_insn(1, 0, 9, 2, 3)
+#define OP_TLBI_RVALE1ISNXS sys_insn(1, 0, 9, 2, 5)
+#define OP_TLBI_RVAALE1ISNXS sys_insn(1, 0, 9, 2, 7)
+#define OP_TLBI_VMALLE1ISNXS sys_insn(1, 0, 9, 3, 0)
+#define OP_TLBI_VAE1ISNXS sys_insn(1, 0, 9, 3, 1)
+#define OP_TLBI_ASIDE1ISNXS sys_insn(1, 0, 9, 3, 2)
+#define OP_TLBI_VAAE1ISNXS sys_insn(1, 0, 9, 3, 3)
+#define OP_TLBI_VALE1ISNXS sys_insn(1, 0, 9, 3, 5)
+#define OP_TLBI_VAALE1ISNXS sys_insn(1, 0, 9, 3, 7)
+#define OP_TLBI_RVAE1OSNXS sys_insn(1, 0, 9, 5, 1)
+#define OP_TLBI_RVAAE1OSNXS sys_insn(1, 0, 9, 5, 3)
+#define OP_TLBI_RVALE1OSNXS sys_insn(1, 0, 9, 5, 5)
+#define OP_TLBI_RVAALE1OSNXS sys_insn(1, 0, 9, 5, 7)
+#define OP_TLBI_RVAE1NXS sys_insn(1, 0, 9, 6, 1)
+#define OP_TLBI_RVAAE1NXS sys_insn(1, 0, 9, 6, 3)
+#define OP_TLBI_RVALE1NXS sys_insn(1, 0, 9, 6, 5)
+#define OP_TLBI_RVAALE1NXS sys_insn(1, 0, 9, 6, 7)
+#define OP_TLBI_VMALLE1NXS sys_insn(1, 0, 9, 7, 0)
+#define OP_TLBI_VAE1NXS sys_insn(1, 0, 9, 7, 1)
+#define OP_TLBI_ASIDE1NXS sys_insn(1, 0, 9, 7, 2)
+#define OP_TLBI_VAAE1NXS sys_insn(1, 0, 9, 7, 3)
+#define OP_TLBI_VALE1NXS sys_insn(1, 0, 9, 7, 5)
+#define OP_TLBI_VAALE1NXS sys_insn(1, 0, 9, 7, 7)
+#define OP_TLBI_IPAS2E1IS sys_insn(1, 4, 8, 0, 1)
+#define OP_TLBI_RIPAS2E1IS sys_insn(1, 4, 8, 0, 2)
+#define OP_TLBI_IPAS2LE1IS sys_insn(1, 4, 8, 0, 5)
+#define OP_TLBI_RIPAS2LE1IS sys_insn(1, 4, 8, 0, 6)
+#define OP_TLBI_ALLE2OS sys_insn(1, 4, 8, 1, 0)
+#define OP_TLBI_VAE2OS sys_insn(1, 4, 8, 1, 1)
+#define OP_TLBI_ALLE1OS sys_insn(1, 4, 8, 1, 4)
+#define OP_TLBI_VALE2OS sys_insn(1, 4, 8, 1, 5)
+#define OP_TLBI_VMALLS12E1OS sys_insn(1, 4, 8, 1, 6)
+#define OP_TLBI_RVAE2IS sys_insn(1, 4, 8, 2, 1)
+#define OP_TLBI_RVALE2IS sys_insn(1, 4, 8, 2, 5)
+#define OP_TLBI_ALLE2IS sys_insn(1, 4, 8, 3, 0)
+#define OP_TLBI_VAE2IS sys_insn(1, 4, 8, 3, 1)
+#define OP_TLBI_ALLE1IS sys_insn(1, 4, 8, 3, 4)
+#define OP_TLBI_VALE2IS sys_insn(1, 4, 8, 3, 5)
+#define OP_TLBI_VMALLS12E1IS sys_insn(1, 4, 8, 3, 6)
+#define OP_TLBI_IPAS2E1OS sys_insn(1, 4, 8, 4, 0)
+#define OP_TLBI_IPAS2E1 sys_insn(1, 4, 8, 4, 1)
+#define OP_TLBI_RIPAS2E1 sys_insn(1, 4, 8, 4, 2)
+#define OP_TLBI_RIPAS2E1OS sys_insn(1, 4, 8, 4, 3)
+#define OP_TLBI_IPAS2LE1OS sys_insn(1, 4, 8, 4, 4)
+#define OP_TLBI_IPAS2LE1 sys_insn(1, 4, 8, 4, 5)
+#define OP_TLBI_RIPAS2LE1 sys_insn(1, 4, 8, 4, 6)
+#define OP_TLBI_RIPAS2LE1OS sys_insn(1, 4, 8, 4, 7)
+#define OP_TLBI_RVAE2OS sys_insn(1, 4, 8, 5, 1)
+#define OP_TLBI_RVALE2OS sys_insn(1, 4, 8, 5, 5)
+#define OP_TLBI_RVAE2 sys_insn(1, 4, 8, 6, 1)
+#define OP_TLBI_RVALE2 sys_insn(1, 4, 8, 6, 5)
+#define OP_TLBI_ALLE2 sys_insn(1, 4, 8, 7, 0)
+#define OP_TLBI_VAE2 sys_insn(1, 4, 8, 7, 1)
+#define OP_TLBI_ALLE1 sys_insn(1, 4, 8, 7, 4)
+#define OP_TLBI_VALE2 sys_insn(1, 4, 8, 7, 5)
+#define OP_TLBI_VMALLS12E1 sys_insn(1, 4, 8, 7, 6)
+#define OP_TLBI_IPAS2E1ISNXS sys_insn(1, 4, 9, 0, 1)
+#define OP_TLBI_RIPAS2E1ISNXS sys_insn(1, 4, 9, 0, 2)
+#define OP_TLBI_IPAS2LE1ISNXS sys_insn(1, 4, 9, 0, 5)
+#define OP_TLBI_RIPAS2LE1ISNXS sys_insn(1, 4, 9, 0, 6)
+#define OP_TLBI_ALLE2OSNXS sys_insn(1, 4, 9, 1, 0)
+#define OP_TLBI_VAE2OSNXS sys_insn(1, 4, 9, 1, 1)
+#define OP_TLBI_ALLE1OSNXS sys_insn(1, 4, 9, 1, 4)
+#define OP_TLBI_VALE2OSNXS sys_insn(1, 4, 9, 1, 5)
+#define OP_TLBI_VMALLS12E1OSNXS sys_insn(1, 4, 9, 1, 6)
+#define OP_TLBI_RVAE2ISNXS sys_insn(1, 4, 9, 2, 1)
+#define OP_TLBI_RVALE2ISNXS sys_insn(1, 4, 9, 2, 5)
+#define OP_TLBI_ALLE2ISNXS sys_insn(1, 4, 9, 3, 0)
+#define OP_TLBI_VAE2ISNXS sys_insn(1, 4, 9, 3, 1)
+#define OP_TLBI_ALLE1ISNXS sys_insn(1, 4, 9, 3, 4)
+#define OP_TLBI_VALE2ISNXS sys_insn(1, 4, 9, 3, 5)
+#define OP_TLBI_VMALLS12E1ISNXS sys_insn(1, 4, 9, 3, 6)
+#define OP_TLBI_IPAS2E1OSNXS sys_insn(1, 4, 9, 4, 0)
+#define OP_TLBI_IPAS2E1NXS sys_insn(1, 4, 9, 4, 1)
+#define OP_TLBI_RIPAS2E1NXS sys_insn(1, 4, 9, 4, 2)
+#define OP_TLBI_RIPAS2E1OSNXS sys_insn(1, 4, 9, 4, 3)
+#define OP_TLBI_IPAS2LE1OSNXS sys_insn(1, 4, 9, 4, 4)
+#define OP_TLBI_IPAS2LE1NXS sys_insn(1, 4, 9, 4, 5)
+#define OP_TLBI_RIPAS2LE1NXS sys_insn(1, 4, 9, 4, 6)
+#define OP_TLBI_RIPAS2LE1OSNXS sys_insn(1, 4, 9, 4, 7)
+#define OP_TLBI_RVAE2OSNXS sys_insn(1, 4, 9, 5, 1)
+#define OP_TLBI_RVALE2OSNXS sys_insn(1, 4, 9, 5, 5)
+#define OP_TLBI_RVAE2NXS sys_insn(1, 4, 9, 6, 1)
+#define OP_TLBI_RVALE2NXS sys_insn(1, 4, 9, 6, 5)
+#define OP_TLBI_ALLE2NXS sys_insn(1, 4, 9, 7, 0)
+#define OP_TLBI_VAE2NXS sys_insn(1, 4, 9, 7, 1)
+#define OP_TLBI_ALLE1NXS sys_insn(1, 4, 9, 7, 4)
+#define OP_TLBI_VALE2NXS sys_insn(1, 4, 9, 7, 5)
+#define OP_TLBI_VMALLS12E1NXS sys_insn(1, 4, 9, 7, 6)
+
+/* Misc instructions */
+#define OP_GCSPUSHX sys_insn(1, 0, 7, 7, 4)
+#define OP_GCSPOPCX sys_insn(1, 0, 7, 7, 5)
+#define OP_GCSPOPX sys_insn(1, 0, 7, 7, 6)
+#define OP_GCSPUSHM sys_insn(1, 3, 7, 7, 0)
+
+#define OP_BRB_IALL sys_insn(1, 1, 7, 2, 4)
+#define OP_BRB_INJ sys_insn(1, 1, 7, 2, 5)
+#define OP_CFP_RCTX sys_insn(1, 3, 7, 3, 4)
+#define OP_DVP_RCTX sys_insn(1, 3, 7, 3, 5)
+#define OP_COSP_RCTX sys_insn(1, 3, 7, 3, 6)
+#define OP_CPP_RCTX sys_insn(1, 3, 7, 3, 7)
+
/* Common SCTLR_ELx flags. */
#define SCTLR_ELx_ENTP2 (BIT(60))
#define SCTLR_ELx_DSSBS (BIT(44))
@@ -555,16 +895,14 @@
/* Position the attr at the correct index */
#define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8))
-/* id_aa64pfr0 */
-#define ID_AA64PFR0_EL1_ELx_64BIT_ONLY 0x1
-#define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2
-
/* id_aa64mmfr0 */
#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN 0x0
+#define ID_AA64MMFR0_EL1_TGRAN4_LPA2 ID_AA64MMFR0_EL1_TGRAN4_52_BIT
#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN 0x1
+#define ID_AA64MMFR0_EL1_TGRAN16_LPA2 ID_AA64MMFR0_EL1_TGRAN16_52_BIT
#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX 0xf
#define ARM64_MIN_PARANGE_BITS 32
@@ -572,6 +910,7 @@
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT 0x0
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE 0x1
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN 0x2
+#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2 0x3
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7
#ifdef CONFIG_ARM64_PA_BITS_52
@@ -582,11 +921,13 @@
#if defined(CONFIG_ARM64_4K_PAGES)
#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT
+#define ID_AA64MMFR0_EL1_TGRAN_LPA2 ID_AA64MMFR0_EL1_TGRAN4_52_BIT
#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN
#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX
#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT
#elif defined(CONFIG_ARM64_16K_PAGES)
#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN16_SHIFT
+#define ID_AA64MMFR0_EL1_TGRAN_LPA2 ID_AA64MMFR0_EL1_TGRAN16_52_BIT
#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN
#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX
#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT
@@ -610,6 +951,19 @@
#define SYS_GCR_EL1_RRND (BIT(16))
#define SYS_GCR_EL1_EXCL_MASK 0xffffUL
+#ifdef CONFIG_KASAN_HW_TAGS
+/*
+ * KASAN always uses a whole byte for its tags. With CONFIG_KASAN_HW_TAGS it
+ * only uses tags in the range 0xF0-0xFF, which we map to MTE tags 0x0-0xF.
+ */
+#define __MTE_TAG_MIN (KASAN_TAG_MIN & 0xf)
+#define __MTE_TAG_MAX (KASAN_TAG_MAX & 0xf)
+#define __MTE_TAG_INCL GENMASK(__MTE_TAG_MAX, __MTE_TAG_MIN)
+#define KERNEL_GCR_EL1_EXCL (SYS_GCR_EL1_EXCL_MASK & ~__MTE_TAG_INCL)
+#else
+#define KERNEL_GCR_EL1_EXCL SYS_GCR_EL1_EXCL_MASK
+#endif
+
#define KERNEL_GCR_EL1 (SYS_GCR_EL1_RRND | KERNEL_GCR_EL1_EXCL)
/* RGSR_EL1 Definitions */
@@ -626,15 +980,6 @@
/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */
#define SYS_MPIDR_SAFE_VAL (BIT(31))
-#define TRFCR_ELx_TS_SHIFT 5
-#define TRFCR_ELx_TS_MASK ((0x3UL) << TRFCR_ELx_TS_SHIFT)
-#define TRFCR_ELx_TS_VIRTUAL ((0x1UL) << TRFCR_ELx_TS_SHIFT)
-#define TRFCR_ELx_TS_GUEST_PHYSICAL ((0x2UL) << TRFCR_ELx_TS_SHIFT)
-#define TRFCR_ELx_TS_PHYSICAL ((0x3UL) << TRFCR_ELx_TS_SHIFT)
-#define TRFCR_EL2_CX BIT(3)
-#define TRFCR_ELx_ExTRE BIT(1)
-#define TRFCR_ELx_E0TRE BIT(0)
-
/* GIC Hypervisor interface registers */
/* ICH_MISR_EL2 bit definitions */
#define ICH_MISR_EOI (1 << 0)
@@ -716,6 +1061,22 @@
#define PIRx_ELx_PERM(idx, perm) ((perm) << ((idx) * 4))
+/*
+ * Permission Overlay Extension (POE) permission encodings.
+ */
+#define POE_NONE UL(0x0)
+#define POE_R UL(0x1)
+#define POE_X UL(0x2)
+#define POE_RX UL(0x3)
+#define POE_W UL(0x4)
+#define POE_RW UL(0x5)
+#define POE_XW UL(0x6)
+#define POE_RXW UL(0x7)
+#define POE_MASK UL(0xf)
+
+/* Initial value for Permission Overlay Extension for EL0 */
+#define POR_EL0_INIT POE_RXW
+
#define ARM64_FEATURE_FIELD_BITS 4
/* Defined for compatibility only, do not add new users. */
@@ -789,15 +1150,21 @@
/*
* For registers without architectural names, or simply unsupported by
* GAS.
+ *
+ * __check_r forces warnings to be generated by the compiler when
+ * evaluating r which wouldn't normally happen due to being passed to
+ * the assembler via __stringify(r).
*/
#define read_sysreg_s(r) ({ \
u64 __val; \
+ u32 __maybe_unused __check_r = (u32)(r); \
asm volatile(__mrs_s("%0", r) : "=r" (__val)); \
__val; \
})
#define write_sysreg_s(v, r) do { \
u64 __val = (u64)(v); \
+ u32 __maybe_unused __check_r = (u32)(r); \
asm volatile(__msr_s(r, "%x0") : : "rZ" (__val)); \
} while (0)
@@ -827,6 +1194,8 @@
par; \
})
+#define SYS_FIELD_VALUE(reg, field, val) reg##_##field##_##val
+
#define SYS_FIELD_GET(reg, field, val) \
FIELD_GET(reg##_##field##_MASK, val)
@@ -834,7 +1203,8 @@
FIELD_PREP(reg##_##field##_MASK, val)
#define SYS_FIELD_PREP_ENUM(reg, field, val) \
- FIELD_PREP(reg##_##field##_MASK, reg##_##field##_##val)
+ FIELD_PREP(reg##_##field##_MASK, \
+ SYS_FIELD_VALUE(reg, field, val))
#endif
diff --git a/tools/bootconfig/main.c b/tools/bootconfig/main.c
index 156b62a163c5..8a48cc2536f5 100644
--- a/tools/bootconfig/main.c
+++ b/tools/bootconfig/main.c
@@ -226,7 +226,7 @@ static int load_xbc_from_initrd(int fd, char **buf)
/* Wrong Checksum */
rcsum = xbc_calc_checksum(*buf, size);
if (csum != rcsum) {
- pr_err("checksum error: %d != %d\n", csum, rcsum);
+ pr_err("checksum error: %u != %u\n", csum, rcsum);
return -EINVAL;
}
@@ -395,7 +395,7 @@ static int apply_xbc(const char *path, const char *xbc_path)
xbc_get_info(&ret, NULL);
printf("\tNumber of nodes: %d\n", ret);
printf("\tSize: %u bytes\n", (unsigned int)size);
- printf("\tChecksum: %d\n", (unsigned int)csum);
+ printf("\tChecksum: %u\n", (unsigned int)csum);
/* TODO: Check the options by schema */
xbc_exit();
diff --git a/tools/bpf/bpftool/Documentation/bpftool-btf.rst b/tools/bpf/bpftool/Documentation/bpftool-btf.rst
index 3f6bca03ad2e..d47dddc2b4ee 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-btf.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-btf.rst
@@ -24,7 +24,7 @@ BTF COMMANDS
=============
| **bpftool** **btf** { **show** | **list** } [**id** *BTF_ID*]
-| **bpftool** **btf dump** *BTF_SRC* [**format** *FORMAT*]
+| **bpftool** **btf dump** *BTF_SRC* [**format** *FORMAT*] [**root_id** *ROOT_ID*]
| **bpftool** **btf help**
|
| *BTF_SRC* := { **id** *BTF_ID* | **prog** *PROG* | **map** *MAP* [{**key** | **value** | **kv** | **all**}] | **file** *FILE* }
@@ -43,7 +43,7 @@ bpftool btf { show | list } [id *BTF_ID*]
that hold open file descriptors (FDs) against BTF objects. On such kernels
bpftool will automatically emit this information as well.
-bpftool btf dump *BTF_SRC*
+bpftool btf dump *BTF_SRC* [format *FORMAT*] [root_id *ROOT_ID*]
Dump BTF entries from a given *BTF_SRC*.
When **id** is specified, BTF object with that ID will be loaded and all
@@ -67,6 +67,11 @@ bpftool btf dump *BTF_SRC*
formatting, the output is sorted by default. Use the **unsorted** option
to avoid sorting the output.
+ **root_id** option can be used to filter a dump to a single type and all
+ its dependent types. It cannot be used with any other types of filtering
+ (such as the "key", "value", or "kv" arguments when dumping BTF for a map).
+ It can be passed multiple times to dump multiple types.
+
bpftool btf help
Print short help message.
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index a4263dfb5e03..dd9f3ec84201 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -106,6 +106,7 @@ FEATURE_TESTS += libbfd-liberty
FEATURE_TESTS += libbfd-liberty-z
FEATURE_TESTS += disassembler-four-args
FEATURE_TESTS += disassembler-init-styled
+FEATURE_TESTS += libelf-zstd
FEATURE_DISPLAY := clang-bpf-co-re
FEATURE_DISPLAY += llvm
@@ -132,6 +133,12 @@ endif
LIBS = $(LIBBPF) -lelf -lz
LIBS_BOOTSTRAP = $(LIBBPF_BOOTSTRAP) -lelf -lz
+
+ifeq ($(feature-libelf-zstd),1)
+LIBS += -lzstd
+LIBS_BOOTSTRAP += -lzstd
+endif
+
ifeq ($(feature-libcap), 1)
CFLAGS += -DUSE_LIBCAP
LIBS += -lcap
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index 0c541498c301..1ce409a6cbd9 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -930,19 +930,24 @@ _bpftool()
format)
COMPREPLY=( $( compgen -W "c raw" -- "$cur" ) )
;;
+ root_id)
+ return 0;
+ ;;
c)
- COMPREPLY=( $( compgen -W "unsorted" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "unsorted root_id" -- "$cur" ) )
;;
*)
# emit extra options
case ${words[3]} in
id|file)
+ COMPREPLY=( $( compgen -W "root_id" -- "$cur" ) )
_bpftool_once_attr 'format'
;;
map|prog)
if [[ ${words[3]} == "map" ]] && [[ $cword == 6 ]]; then
COMPREPLY+=( $( compgen -W "key value kv all" -- "$cur" ) )
fi
+ COMPREPLY=( $( compgen -W "root_id" -- "$cur" ) )
_bpftool_once_attr 'format'
;;
*)
diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
index d005e4fd6128..2636655ac180 100644
--- a/tools/bpf/bpftool/btf.c
+++ b/tools/bpf/bpftool/btf.c
@@ -27,6 +27,8 @@
#define KFUNC_DECL_TAG "bpf_kfunc"
#define FASTCALL_DECL_TAG "bpf_fastcall"
+#define MAX_ROOT_IDS 16
+
static const char * const btf_kind_str[NR_BTF_KINDS] = {
[BTF_KIND_UNKN] = "UNKNOWN",
[BTF_KIND_INT] = "INT",
@@ -880,12 +882,14 @@ static int do_dump(int argc, char **argv)
{
bool dump_c = false, sort_dump_c = true;
struct btf *btf = NULL, *base = NULL;
- __u32 root_type_ids[2];
+ __u32 root_type_ids[MAX_ROOT_IDS];
+ bool have_id_filtering;
int root_type_cnt = 0;
__u32 btf_id = -1;
const char *src;
int fd = -1;
int err = 0;
+ int i;
if (!REQ_ARGS(2)) {
usage();
@@ -973,6 +977,8 @@ static int do_dump(int argc, char **argv)
goto done;
}
+ have_id_filtering = !!root_type_cnt;
+
while (argc) {
if (is_prefix(*argv, "format")) {
NEXT_ARG();
@@ -992,6 +998,36 @@ static int do_dump(int argc, char **argv)
goto done;
}
NEXT_ARG();
+ } else if (is_prefix(*argv, "root_id")) {
+ __u32 root_id;
+ char *end;
+
+ if (have_id_filtering) {
+ p_err("cannot use root_id with other type filtering");
+ err = -EINVAL;
+ goto done;
+ } else if (root_type_cnt == MAX_ROOT_IDS) {
+ p_err("only %d root_id are supported", MAX_ROOT_IDS);
+ err = -E2BIG;
+ goto done;
+ }
+
+ NEXT_ARG();
+ root_id = strtoul(*argv, &end, 0);
+ if (*end) {
+ err = -1;
+ p_err("can't parse %s as root ID", *argv);
+ goto done;
+ }
+ for (i = 0; i < root_type_cnt; i++) {
+ if (root_type_ids[i] == root_id) {
+ err = -EINVAL;
+ p_err("duplicate root_id %d supplied", root_id);
+ goto done;
+ }
+ }
+ root_type_ids[root_type_cnt++] = root_id;
+ NEXT_ARG();
} else if (is_prefix(*argv, "unsorted")) {
sort_dump_c = false;
NEXT_ARG();
@@ -1017,6 +1053,17 @@ static int do_dump(int argc, char **argv)
}
}
+ /* Invalid root IDs causes half emitted boilerplate and then unclean
+ * exit. It's an ugly user experience, so handle common error here.
+ */
+ for (i = 0; i < root_type_cnt; i++) {
+ if (root_type_ids[i] >= btf__type_cnt(btf)) {
+ err = -EINVAL;
+ p_err("invalid root ID: %u", root_type_ids[i]);
+ goto done;
+ }
+ }
+
if (dump_c) {
if (json_output) {
p_err("JSON output for C-syntax dump is not supported");
@@ -1391,7 +1438,7 @@ static int do_help(int argc, char **argv)
fprintf(stderr,
"Usage: %1$s %2$s { show | list } [id BTF_ID]\n"
- " %1$s %2$s dump BTF_SRC [format FORMAT]\n"
+ " %1$s %2$s dump BTF_SRC [format FORMAT] [root_id ROOT_ID]\n"
" %1$s %2$s help\n"
"\n"
" BTF_SRC := { id BTF_ID | prog PROG | map MAP [{key | value | kv | all}] | file FILE }\n"
diff --git a/tools/bpf/bpftool/cfg.c b/tools/bpf/bpftool/cfg.c
index eec437cca2ea..e3785f9a697d 100644
--- a/tools/bpf/bpftool/cfg.c
+++ b/tools/bpf/bpftool/cfg.c
@@ -302,6 +302,7 @@ static bool func_add_bb_edges(struct func_node *func)
insn = bb->tail;
if (!is_jmp_insn(insn->code) ||
+ BPF_OP(insn->code) == BPF_CALL ||
BPF_OP(insn->code) == BPF_EXIT) {
e->dst = bb_next(bb);
e->flags |= EDGE_FLAG_FALLTHROUGH;
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
index 4dbc4fcdf473..24fecdf8e430 100644
--- a/tools/bpf/bpftool/feature.c
+++ b/tools/bpf/bpftool/feature.c
@@ -885,6 +885,28 @@ probe_v3_isa_extension(const char *define_prefix, __u32 ifindex)
"V3_ISA_EXTENSION");
}
+/*
+ * Probe for the v4 instruction set extension introduced in commit 1f9a1ea821ff
+ * ("bpf: Support new sign-extension load insns").
+ */
+static void
+probe_v4_isa_extension(const char *define_prefix, __u32 ifindex)
+{
+ struct bpf_insn insns[5] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 1, 1),
+ BPF_JMP32_A(1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN()
+ };
+
+ probe_misc_feature(insns, ARRAY_SIZE(insns),
+ define_prefix, ifindex,
+ "have_v4_isa_extension",
+ "ISA extension v4",
+ "V4_ISA_EXTENSION");
+}
+
static void
section_system_config(enum probe_component target, const char *define_prefix)
{
@@ -1029,6 +1051,7 @@ static void section_misc(const char *define_prefix, __u32 ifindex)
probe_bounded_loops(define_prefix, ifindex);
probe_v2_isa_extension(define_prefix, ifindex);
probe_v3_isa_extension(define_prefix, ifindex);
+ probe_v4_isa_extension(define_prefix, ifindex);
print_end_section();
}
diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
index bd9f960bce3d..d47191c6e55e 100644
--- a/tools/bpf/resolve_btfids/main.c
+++ b/tools/bpf/resolve_btfids/main.c
@@ -141,6 +141,7 @@ struct object {
};
static int verbose;
+static int warnings;
static int eprintf(int level, int var, const char *fmt, ...)
{
@@ -604,6 +605,7 @@ static int symbols_resolve(struct object *obj)
if (id->id) {
pr_info("WARN: multiple IDs found for '%s': %d, %d - using %d\n",
str, id->id, type_id, id->id);
+ warnings++;
} else {
id->id = type_id;
(*nr)--;
@@ -625,8 +627,10 @@ static int id_patch(struct object *obj, struct btf_id *id)
int i;
/* For set, set8, id->id may be 0 */
- if (!id->id && !id->is_set && !id->is_set8)
+ if (!id->id && !id->is_set && !id->is_set8) {
pr_err("WARN: resolve_btfids: unresolved symbol %s\n", id->name);
+ warnings++;
+ }
for (i = 0; i < id->addr_cnt; i++) {
unsigned long addr = id->addr[i];
@@ -782,6 +786,7 @@ int main(int argc, const char **argv)
.funcs = RB_ROOT,
.sets = RB_ROOT,
};
+ bool fatal_warnings = false;
struct option btfid_options[] = {
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show errors, etc)"),
@@ -789,6 +794,8 @@ int main(int argc, const char **argv)
"BTF data"),
OPT_STRING('b', "btf_base", &obj.base_btf_path, "file",
"path of file providing base BTF"),
+ OPT_BOOLEAN(0, "fatal_warnings", &fatal_warnings,
+ "turn warnings into errors"),
OPT_END()
};
int err = -1;
@@ -823,7 +830,8 @@ int main(int argc, const char **argv)
if (symbols_patch(&obj))
goto out;
- err = 0;
+ if (!(fatal_warnings && warnings))
+ err = 0;
out:
if (obj.efile.elf) {
elf_end(obj.efile.elf);
diff --git a/tools/build/Build.include b/tools/build/Build.include
index c2a95ab47379..e45b2eb0d24a 100644
--- a/tools/build/Build.include
+++ b/tools/build/Build.include
@@ -13,6 +13,8 @@
comma := ,
squote := '
pound := \#
+empty :=
+space := $(empty) $(empty)
###
# Name of target with a '.' as filename prefix. foo/bar.o => foo/.bar.o
diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build
index 5fb3fb3d97e0..e710ed67a1b4 100644
--- a/tools/build/Makefile.build
+++ b/tools/build/Makefile.build
@@ -12,26 +12,6 @@
PHONY := __build
__build:
-ifeq ($(V),1)
- quiet =
- Q =
-else
- quiet=quiet_
- Q=@
-endif
-
-# If the user is running make -s (silent mode), suppress echoing of commands
-# make-4.0 (and later) keep single letter options in the 1st word of MAKEFLAGS.
-ifeq ($(filter 3.%,$(MAKE_VERSION)),)
-short-opts := $(firstword -$(MAKEFLAGS))
-else
-short-opts := $(filter-out --%,$(MAKEFLAGS))
-endif
-
-ifneq ($(findstring s,$(short-opts)),)
- quiet=silent_
-endif
-
build-dir := $(srctree)/tools/build
# Define $(fixdep) for dep-cmd function
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index bca47d136f05..1931b6321314 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -28,6 +28,41 @@ endef
# the rule that uses them - an example for that is the 'bionic'
# feature check. ]
#
+# These + the ones in FEATURE_TESTS_EXTRA are included in
+# tools/build/feature/test-all.c and we try to build it all together
+# then setting all those features to '1' meaning they are all enabled.
+#
+# There are things like fortify-source that will be set to 1 because test-all
+# is built with the flags needed to test if its enabled, resulting in
+#
+# $ rm -rf /tmp/b ; mkdir /tmp/b ; make -C tools/perf O=/tmp/b feature-dump
+# $ grep fortify-source /tmp/b/FEATURE-DUMP
+# feature-fortify-source=1
+# $
+#
+# All the others should have lines in tools/build/feature/test-all.c like:
+#
+# #define main main_test_disassembler_init_styled
+# # include "test-disassembler-init-styled.c"
+# #undef main
+#
+# #define main main_test_libzstd
+# # include "test-libzstd.c"
+# #undef main
+#
+# int main(int argc, char *argv[])
+# {
+# main_test_disassembler_four_args();
+# main_test_libzstd();
+# return 0;
+# }
+#
+# If the sample above works, then we end up with these lines in the FEATURE-DUMP
+# file:
+#
+# feature-disassembler-four-args=1
+# feature-libzstd=1
+#
FEATURE_TESTS_BASIC := \
backtrace \
libdw \
@@ -38,17 +73,16 @@ FEATURE_TESTS_BASIC := \
glibc \
libbfd \
libbfd-buildid \
- libcap \
libelf \
libelf-getphdrnum \
libelf-gelf_getnote \
libelf-getshdrstrndx \
+ libelf-zstd \
libnuma \
numa_num_possible_cpus \
libperl \
libpython \
libslang \
- libslang-include-subdir \
libtraceevent \
libtracefs \
libcpupower \
@@ -89,13 +123,6 @@ FEATURE_TESTS_EXTRA := \
libbfd-liberty \
libbfd-liberty-z \
libopencsd \
- libunwind-x86 \
- libunwind-x86_64 \
- libunwind-arm \
- libunwind-aarch64 \
- libunwind-debug-frame \
- libunwind-debug-frame-arm \
- libunwind-debug-frame-aarch64 \
cxx \
llvm \
clang \
@@ -122,7 +149,6 @@ FEATURE_DISPLAY ?= \
glibc \
libbfd \
libbfd-buildid \
- libcap \
libelf \
libnuma \
numa_num_possible_cpus \
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 043dfd00fce7..cb1e3e2feedf 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -13,7 +13,6 @@ FILES= \
test-gtk2.bin \
test-gtk2-infobar.bin \
test-hello.bin \
- test-libaudit.bin \
test-libbfd.bin \
test-libbfd-buildid.bin \
test-disassembler-four-args.bin \
@@ -28,6 +27,7 @@ FILES= \
test-libelf-getphdrnum.bin \
test-libelf-gelf_getnote.bin \
test-libelf-getshdrstrndx.bin \
+ test-libelf-zstd.bin \
test-libdebuginfod.bin \
test-libnuma.bin \
test-numa_num_possible_cpus.bin \
@@ -110,7 +110,7 @@ all: $(FILES)
__BUILD = $(CC) $(CFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.c,$(@F)) $(LDFLAGS)
BUILD = $(__BUILD) > $(@:.bin=.make.output) 2>&1
BUILD_BFD = $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
- BUILD_ALL = $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -lslang $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma -lzstd -lcap
+ BUILD_ALL = $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -lslang $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma -lzstd
__BUILDXX = $(CXX) $(CXXFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.cpp,$(@F)) $(LDFLAGS)
BUILDXX = $(__BUILDXX) > $(@:.bin=.make.output) 2>&1
@@ -196,6 +196,9 @@ $(OUTPUT)test-libelf-gelf_getnote.bin:
$(OUTPUT)test-libelf-getshdrstrndx.bin:
$(BUILD) -lelf
+$(OUTPUT)test-libelf-zstd.bin:
+ $(BUILD) -lelf -lz -lzstd
+
$(OUTPUT)test-libdebuginfod.bin:
$(BUILD) -ldebuginfod
@@ -228,9 +231,6 @@ $(OUTPUT)test-libunwind-debug-frame-arm.bin:
$(OUTPUT)test-libunwind-debug-frame-aarch64.bin:
$(BUILD) -lelf -llzma -lunwind-aarch64
-$(OUTPUT)test-libaudit.bin:
- $(BUILD) -laudit
-
$(OUTPUT)test-libslang.bin:
$(BUILD) -lslang
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index 59ef3d7fe6a4..03ddaac6f4c4 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -58,8 +58,8 @@
# include "test-libelf-getshdrstrndx.c"
#undef main
-#define main main_test_libunwind
-# include "test-libunwind.c"
+#define main main_test_libelf_zstd
+# include "test-libelf-zstd.c"
#undef main
#define main main_test_libslang
@@ -170,6 +170,14 @@
# include "test-libzstd.c"
#undef main
+#define main main_test_libtraceevent
+# include "test-libtraceevent.c"
+#undef main
+
+#define main main_test_libtracefs
+# include "test-libtracefs.c"
+#undef main
+
int main(int argc, char *argv[])
{
main_test_libpython();
@@ -184,7 +192,6 @@ int main(int argc, char *argv[])
main_test_libelf_getphdrnum();
main_test_libelf_gelf_getnote();
main_test_libelf_getshdrstrndx();
- main_test_libunwind();
main_test_libslang();
main_test_libbfd();
main_test_libbfd_buildid();
@@ -208,6 +215,8 @@ int main(int argc, char *argv[])
main_test_reallocarray();
main_test_disassembler_four_args();
main_test_libzstd();
+ main_test_libtraceevent();
+ main_test_libtracefs();
return 0;
}
diff --git a/tools/build/feature/test-libaudit.c b/tools/build/feature/test-libaudit.c
deleted file mode 100644
index f5b0863fa1ec..000000000000
--- a/tools/build/feature/test-libaudit.c
+++ /dev/null
@@ -1,11 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <libaudit.h>
-
-extern int printf(const char *format, ...);
-
-int main(void)
-{
- printf("error message: %s\n", audit_errno_to_name(0));
-
- return audit_open();
-}
diff --git a/tools/build/feature/test-libelf-zstd.c b/tools/build/feature/test-libelf-zstd.c
new file mode 100644
index 000000000000..a1324a1db3bb
--- /dev/null
+++ b/tools/build/feature/test-libelf-zstd.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stddef.h>
+#include <libelf.h>
+
+int main(void)
+{
+ elf_compress(NULL, ELFCOMPRESS_ZSTD, 0);
+ return 0;
+}
diff --git a/tools/include/linux/filter.h b/tools/include/linux/filter.h
index 65aa8ce142e5..bcc6df79301a 100644
--- a/tools/include/linux/filter.h
+++ b/tools/include/linux/filter.h
@@ -273,6 +273,16 @@
.off = OFF, \
.imm = 0 })
+/* Unconditional jumps, gotol pc + imm32 */
+
+#define BPF_JMP32_A(IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_JA, \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
/* Function call */
#define BPF_EMIT_CALL(FUNC) \
diff --git a/tools/include/linux/kasan-tags.h b/tools/include/linux/kasan-tags.h
new file mode 100644
index 000000000000..4f85f562512c
--- /dev/null
+++ b/tools/include/linux/kasan-tags.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KASAN_TAGS_H
+#define _LINUX_KASAN_TAGS_H
+
+#define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */
+#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */
+#define KASAN_TAG_MAX 0xFD /* maximum value for random tags */
+
+#ifdef CONFIG_KASAN_HW_TAGS
+#define KASAN_TAG_MIN 0xF0 /* minimum value for random tags */
+#else
+#define KASAN_TAG_MIN 0x00 /* minimum value for random tags */
+#endif
+
+#endif /* LINUX_KASAN_TAGS_H */
diff --git a/tools/include/nolibc/sys.h b/tools/include/nolibc/sys.h
index 7b82bc3cf107..d4a5c2399a66 100644
--- a/tools/include/nolibc/sys.h
+++ b/tools/include/nolibc/sys.h
@@ -23,6 +23,7 @@
#include <linux/prctl.h>
#include <linux/resource.h>
#include <linux/utsname.h>
+#include <linux/signal.h>
#include "arch.h"
#include "errno.h"
@@ -1226,6 +1227,23 @@ pid_t waitpid(pid_t pid, int *status, int options)
/*
+ * int waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options);
+ */
+
+static __attribute__((unused))
+int sys_waitid(int which, pid_t pid, siginfo_t *infop, int options, struct rusage *rusage)
+{
+ return my_syscall5(__NR_waitid, which, pid, infop, options, rusage);
+}
+
+static __attribute__((unused))
+int waitid(int which, pid_t pid, siginfo_t *infop, int options)
+{
+ return __sysret(sys_waitid(which, pid, infop, options, NULL));
+}
+
+
+/*
* ssize_t write(int fd, const void *buf, size_t count);
*/
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 4162afc6b5d0..2acf9b336371 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1573,6 +1573,16 @@ union bpf_attr {
* If provided, prog_flags should have BPF_F_TOKEN_FD flag set.
*/
__s32 prog_token_fd;
+ /* The fd_array_cnt can be used to pass the length of the
+ * fd_array array. In this case all the [map] file descriptors
+ * passed in this array will be bound to the program, even if
+ * the maps are not referenced directly. The functionality is
+ * similar to the BPF_PROG_BIND_MAP syscall, but maps can be
+ * used by the verifier during the program load. If provided,
+ * then the fd_array[0,...,fd_array_cnt-1] is expected to be
+ * continuous.
+ */
+ __u32 fd_array_cnt;
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
diff --git a/tools/include/uapi/linux/if_xdp.h b/tools/include/uapi/linux/if_xdp.h
index 2f082b01ff22..42ec5ddaab8d 100644
--- a/tools/include/uapi/linux/if_xdp.h
+++ b/tools/include/uapi/linux/if_xdp.h
@@ -117,12 +117,12 @@ struct xdp_options {
((1ULL << XSK_UNALIGNED_BUF_OFFSET_SHIFT) - 1)
/* Request transmit timestamp. Upon completion, put it into tx_timestamp
- * field of union xsk_tx_metadata.
+ * field of struct xsk_tx_metadata.
*/
#define XDP_TXMD_FLAGS_TIMESTAMP (1 << 0)
/* Request transmit checksum offload. Checksum start position and offset
- * are communicated via csum_start and csum_offset fields of union
+ * are communicated via csum_start and csum_offset fields of struct
* xsk_tx_metadata.
*/
#define XDP_TXMD_FLAGS_CHECKSUM (1 << 1)
diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c
index 337fde770e45..edec23406dbc 100644
--- a/tools/lib/api/fs/fs.c
+++ b/tools/lib/api/fs/fs.c
@@ -296,7 +296,7 @@ int filename__read_int(const char *filename, int *value)
int fd = open(filename, O_RDONLY), err = -1;
if (fd < 0)
- return -1;
+ return -errno;
if (read(fd, line, sizeof(line)) > 0) {
*value = atoi(line);
@@ -314,7 +314,7 @@ static int filename__read_ull_base(const char *filename,
int fd = open(filename, O_RDONLY), err = -1;
if (fd < 0)
- return -1;
+ return -errno;
if (read(fd, line, sizeof(line)) > 0) {
*value = strtoull(line, NULL, base);
@@ -372,7 +372,7 @@ int filename__write_int(const char *filename, int value)
char buf[64];
if (fd < 0)
- return err;
+ return -errno;
sprintf(buf, "%d", value);
if (write(fd, buf, sizeof(buf)) == sizeof(buf))
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index becdfa701c75..359f73ead613 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -238,7 +238,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
const struct bpf_insn *insns, size_t insn_cnt,
struct bpf_prog_load_opts *opts)
{
- const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
+ const size_t attr_sz = offsetofend(union bpf_attr, fd_array_cnt);
void *finfo = NULL, *linfo = NULL;
const char *func_info, *line_info;
__u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd;
@@ -311,6 +311,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0);
attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL));
+ attr.fd_array_cnt = OPTS_GET(opts, fd_array_cnt, 0);
if (log_level) {
attr.log_buf = ptr_to_u64(log_buf);
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index a4a7b1ad1b63..435da95d2058 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -107,9 +107,12 @@ struct bpf_prog_load_opts {
*/
__u32 log_true_size;
__u32 token_fd;
+
+ /* if set, provides the length of fd_array */
+ __u32 fd_array_cnt;
size_t :0;
};
-#define bpf_prog_load_opts__last_field token_fd
+#define bpf_prog_load_opts__last_field fd_array_cnt
LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type,
const char *prog_name, const char *license,
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 12468ae0d573..48c66f3a9200 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -283,7 +283,7 @@ static int btf_parse_str_sec(struct btf *btf)
return -EINVAL;
}
if (!btf->base_btf && start[0]) {
- pr_debug("Invalid BTF string section\n");
+ pr_debug("Malformed BTF string section, did you forget to provide base BTF?\n");
return -EINVAL;
}
return 0;
@@ -1186,6 +1186,7 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
elf = elf_begin(fd, ELF_C_READ, NULL);
if (!elf) {
+ err = -LIBBPF_ERRNO__FORMAT;
pr_warn("failed to open %s as ELF file\n", path);
goto done;
}
diff --git a/tools/lib/bpf/btf_relocate.c b/tools/lib/bpf/btf_relocate.c
index b72f83e15156..53d1f3541bce 100644
--- a/tools/lib/bpf/btf_relocate.c
+++ b/tools/lib/bpf/btf_relocate.c
@@ -212,7 +212,7 @@ static int btf_relocate_map_distilled_base(struct btf_relocate *r)
* need to match both name and size, otherwise embedding the base
* struct/union in the split type is invalid.
*/
- for (id = r->nr_dist_base_types; id < r->nr_split_types; id++) {
+ for (id = r->nr_dist_base_types; id < r->nr_dist_base_types + r->nr_split_types; id++) {
err = btf_mark_embedded_composite_type_ids(r, id);
if (err)
goto done;
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 66173ddb5a2d..194809da5172 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -1731,12 +1731,24 @@ static int sys_memfd_create(const char *name, unsigned flags)
#ifndef MFD_CLOEXEC
#define MFD_CLOEXEC 0x0001U
#endif
+#ifndef MFD_NOEXEC_SEAL
+#define MFD_NOEXEC_SEAL 0x0008U
+#endif
static int create_placeholder_fd(void)
{
+ unsigned int flags = MFD_CLOEXEC | MFD_NOEXEC_SEAL;
+ const char *name = "libbpf-placeholder-fd";
int fd;
- fd = ensure_good_fd(sys_memfd_create("libbpf-placeholder-fd", MFD_CLOEXEC));
+ fd = ensure_good_fd(sys_memfd_create(name, flags));
+ if (fd >= 0)
+ return fd;
+ else if (errno != EINVAL)
+ return -errno;
+
+ /* Possibly running on kernel without MFD_NOEXEC_SEAL */
+ fd = ensure_good_fd(sys_memfd_create(name, flags & ~MFD_NOEXEC_SEAL));
if (fd < 0)
return -errno;
return fd;
@@ -11375,9 +11387,33 @@ static int avail_kallsyms_cb(unsigned long long sym_addr, char sym_type,
struct kprobe_multi_resolve *res = data->res;
int err;
- if (!bsearch(&sym_name, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp))
+ if (!glob_match(sym_name, res->pattern))
return 0;
+ if (!bsearch(&sym_name, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp)) {
+ /* Some versions of kernel strip out .llvm.<hash> suffix from
+ * function names reported in available_filter_functions, but
+ * don't do so for kallsyms. While this is clearly a kernel
+ * bug (fixed by [0]) we try to accommodate that in libbpf to
+ * make multi-kprobe usability a bit better: if no match is
+ * found, we will strip .llvm. suffix and try one more time.
+ *
+ * [0] fb6a421fb615 ("kallsyms: Match symbols exactly with CONFIG_LTO_CLANG")
+ */
+ char sym_trim[256], *psym_trim = sym_trim, *sym_sfx;
+
+ if (!(sym_sfx = strstr(sym_name, ".llvm.")))
+ return 0;
+
+ /* psym_trim vs sym_trim dance is done to avoid pointer vs array
+ * coercion differences and get proper `const char **` pointer
+ * which avail_func_cmp() expects
+ */
+ snprintf(sym_trim, sizeof(sym_trim), "%.*s", (int)(sym_sfx - sym_name), sym_name);
+ if (!bsearch(&psym_trim, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp))
+ return 0;
+ }
+
err = libbpf_ensure_mem((void **)&res->addrs, &res->cap, sizeof(*res->addrs), res->cnt + 1);
if (err)
return err;
@@ -11522,7 +11558,7 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
struct bpf_link *link = NULL;
const unsigned long *addrs;
int err, link_fd, prog_fd;
- bool retprobe, session;
+ bool retprobe, session, unique_match;
const __u64 *cookies;
const char **syms;
size_t cnt;
@@ -11541,6 +11577,7 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
addrs = OPTS_GET(opts, addrs, false);
cnt = OPTS_GET(opts, cnt, false);
cookies = OPTS_GET(opts, cookies, false);
+ unique_match = OPTS_GET(opts, unique_match, false);
if (!pattern && !addrs && !syms)
return libbpf_err_ptr(-EINVAL);
@@ -11548,6 +11585,8 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
return libbpf_err_ptr(-EINVAL);
if (!pattern && !cnt)
return libbpf_err_ptr(-EINVAL);
+ if (!pattern && unique_match)
+ return libbpf_err_ptr(-EINVAL);
if (addrs && syms)
return libbpf_err_ptr(-EINVAL);
@@ -11558,6 +11597,14 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
err = libbpf_available_kallsyms_parse(&res);
if (err)
goto error;
+
+ if (unique_match && res.cnt != 1) {
+ pr_warn("prog '%s': failed to find a unique match for '%s' (%zu matches)\n",
+ prog->name, pattern, res.cnt);
+ err = -EINVAL;
+ goto error;
+ }
+
addrs = res.addrs;
cnt = res.cnt;
}
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index b2ce3a72b11d..3020ee45303a 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -552,10 +552,12 @@ struct bpf_kprobe_multi_opts {
bool retprobe;
/* create session kprobes */
bool session;
+ /* enforce unique match */
+ bool unique_match;
size_t :0;
};
-#define bpf_kprobe_multi_opts__last_field session
+#define bpf_kprobe_multi_opts__last_field unique_match
LIBBPF_API struct bpf_link *
bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
@@ -1796,9 +1798,14 @@ struct bpf_linker_file_opts {
struct bpf_linker;
LIBBPF_API struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts *opts);
+LIBBPF_API struct bpf_linker *bpf_linker__new_fd(int fd, struct bpf_linker_opts *opts);
LIBBPF_API int bpf_linker__add_file(struct bpf_linker *linker,
const char *filename,
const struct bpf_linker_file_opts *opts);
+LIBBPF_API int bpf_linker__add_fd(struct bpf_linker *linker, int fd,
+ const struct bpf_linker_file_opts *opts);
+LIBBPF_API int bpf_linker__add_buf(struct bpf_linker *linker, void *buf, size_t buf_sz,
+ const struct bpf_linker_file_opts *opts);
LIBBPF_API int bpf_linker__finalize(struct bpf_linker *linker);
LIBBPF_API void bpf_linker__free(struct bpf_linker *linker);
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 54b6f312cfa8..a8b2936a1646 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -432,4 +432,8 @@ LIBBPF_1.5.0 {
} LIBBPF_1.4.0;
LIBBPF_1.6.0 {
+ global:
+ bpf_linker__add_buf;
+ bpf_linker__add_fd;
+ bpf_linker__new_fd;
} LIBBPF_1.5.0;
diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
index cf71d149fe26..b52f71c59616 100644
--- a/tools/lib/bpf/linker.c
+++ b/tools/lib/bpf/linker.c
@@ -4,6 +4,10 @@
*
* Copyright (c) 2021 Facebook
*/
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
#include <stdbool.h>
#include <stddef.h>
#include <stdio.h>
@@ -16,6 +20,7 @@
#include <elf.h>
#include <libelf.h>
#include <fcntl.h>
+#include <sys/mman.h>
#include "libbpf.h"
#include "btf.h"
#include "libbpf_internal.h"
@@ -152,15 +157,19 @@ struct bpf_linker {
/* global (including extern) ELF symbols */
int glob_sym_cnt;
struct glob_sym *glob_syms;
+
+ bool fd_is_owned;
};
#define pr_warn_elf(fmt, ...) \
libbpf_print(LIBBPF_WARN, "libbpf: " fmt ": %s\n", ##__VA_ARGS__, elf_errmsg(-1))
-static int init_output_elf(struct bpf_linker *linker, const char *file);
+static int init_output_elf(struct bpf_linker *linker);
-static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
- const struct bpf_linker_file_opts *opts,
+static int bpf_linker_add_file(struct bpf_linker *linker, int fd,
+ const char *filename);
+
+static int linker_load_obj_file(struct bpf_linker *linker,
struct src_obj *obj);
static int linker_sanity_check_elf(struct src_obj *obj);
static int linker_sanity_check_elf_symtab(struct src_obj *obj, struct src_sec *sec);
@@ -191,7 +200,7 @@ void bpf_linker__free(struct bpf_linker *linker)
if (linker->elf)
elf_end(linker->elf);
- if (linker->fd >= 0)
+ if (linker->fd >= 0 && linker->fd_is_owned)
close(linker->fd);
strset__free(linker->strtab_strs);
@@ -233,9 +242,63 @@ struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts
if (!linker)
return errno = ENOMEM, NULL;
- linker->fd = -1;
+ linker->filename = strdup(filename);
+ if (!linker->filename) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ linker->fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0644);
+ if (linker->fd < 0) {
+ err = -errno;
+ pr_warn("failed to create '%s': %d\n", filename, err);
+ goto err_out;
+ }
+ linker->fd_is_owned = true;
+
+ err = init_output_elf(linker);
+ if (err)
+ goto err_out;
+
+ return linker;
+
+err_out:
+ bpf_linker__free(linker);
+ return errno = -err, NULL;
+}
+
+struct bpf_linker *bpf_linker__new_fd(int fd, struct bpf_linker_opts *opts)
+{
+ struct bpf_linker *linker;
+ char filename[32];
+ int err;
+
+ if (fd < 0)
+ return errno = EINVAL, NULL;
+
+ if (!OPTS_VALID(opts, bpf_linker_opts))
+ return errno = EINVAL, NULL;
+
+ if (elf_version(EV_CURRENT) == EV_NONE) {
+ pr_warn_elf("libelf initialization failed");
+ return errno = EINVAL, NULL;
+ }
- err = init_output_elf(linker, filename);
+ linker = calloc(1, sizeof(*linker));
+ if (!linker)
+ return errno = ENOMEM, NULL;
+
+ snprintf(filename, sizeof(filename), "fd:%d", fd);
+ linker->filename = strdup(filename);
+ if (!linker->filename) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ linker->fd = fd;
+ linker->fd_is_owned = false;
+
+ err = init_output_elf(linker);
if (err)
goto err_out;
@@ -294,23 +357,12 @@ static Elf64_Sym *add_new_sym(struct bpf_linker *linker, size_t *sym_idx)
return sym;
}
-static int init_output_elf(struct bpf_linker *linker, const char *file)
+static int init_output_elf(struct bpf_linker *linker)
{
int err, str_off;
Elf64_Sym *init_sym;
struct dst_sec *sec;
- linker->filename = strdup(file);
- if (!linker->filename)
- return -ENOMEM;
-
- linker->fd = open(file, O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0644);
- if (linker->fd < 0) {
- err = -errno;
- pr_warn("failed to create '%s': %s\n", file, errstr(err));
- return err;
- }
-
linker->elf = elf_begin(linker->fd, ELF_C_WRITE, NULL);
if (!linker->elf) {
pr_warn_elf("failed to create ELF object");
@@ -436,19 +488,16 @@ static int init_output_elf(struct bpf_linker *linker, const char *file)
return 0;
}
-int bpf_linker__add_file(struct bpf_linker *linker, const char *filename,
- const struct bpf_linker_file_opts *opts)
+static int bpf_linker_add_file(struct bpf_linker *linker, int fd,
+ const char *filename)
{
struct src_obj obj = {};
int err = 0;
- if (!OPTS_VALID(opts, bpf_linker_file_opts))
- return libbpf_err(-EINVAL);
-
- if (!linker->elf)
- return libbpf_err(-EINVAL);
+ obj.filename = filename;
+ obj.fd = fd;
- err = err ?: linker_load_obj_file(linker, filename, opts, &obj);
+ err = err ?: linker_load_obj_file(linker, &obj);
err = err ?: linker_append_sec_data(linker, &obj);
err = err ?: linker_append_elf_syms(linker, &obj);
err = err ?: linker_append_elf_relos(linker, &obj);
@@ -463,12 +512,91 @@ int bpf_linker__add_file(struct bpf_linker *linker, const char *filename,
free(obj.sym_map);
if (obj.elf)
elf_end(obj.elf);
- if (obj.fd >= 0)
- close(obj.fd);
+ return err;
+}
+
+int bpf_linker__add_file(struct bpf_linker *linker, const char *filename,
+ const struct bpf_linker_file_opts *opts)
+{
+ int fd, err;
+
+ if (!OPTS_VALID(opts, bpf_linker_file_opts))
+ return libbpf_err(-EINVAL);
+
+ if (!linker->elf)
+ return libbpf_err(-EINVAL);
+
+ fd = open(filename, O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ err = -errno;
+ pr_warn("failed to open file '%s': %s\n", filename, errstr(err));
+ return libbpf_err(err);
+ }
+
+ err = bpf_linker_add_file(linker, fd, filename);
+ close(fd);
return libbpf_err(err);
}
+int bpf_linker__add_fd(struct bpf_linker *linker, int fd,
+ const struct bpf_linker_file_opts *opts)
+{
+ char filename[32];
+ int err;
+
+ if (!OPTS_VALID(opts, bpf_linker_file_opts))
+ return libbpf_err(-EINVAL);
+
+ if (!linker->elf)
+ return libbpf_err(-EINVAL);
+
+ if (fd < 0)
+ return libbpf_err(-EINVAL);
+
+ snprintf(filename, sizeof(filename), "fd:%d", fd);
+ err = bpf_linker_add_file(linker, fd, filename);
+ return libbpf_err(err);
+}
+
+int bpf_linker__add_buf(struct bpf_linker *linker, void *buf, size_t buf_sz,
+ const struct bpf_linker_file_opts *opts)
+{
+ char filename[32];
+ int fd, written, ret;
+
+ if (!OPTS_VALID(opts, bpf_linker_file_opts))
+ return libbpf_err(-EINVAL);
+
+ if (!linker->elf)
+ return libbpf_err(-EINVAL);
+
+ snprintf(filename, sizeof(filename), "mem:%p+%zu", buf, buf_sz);
+
+ fd = memfd_create(filename, 0);
+ if (fd < 0) {
+ ret = -errno;
+ pr_warn("failed to create memfd '%s': %s\n", filename, errstr(ret));
+ return libbpf_err(ret);
+ }
+
+ written = 0;
+ while (written < buf_sz) {
+ ret = write(fd, buf, buf_sz);
+ if (ret < 0) {
+ ret = -errno;
+ pr_warn("failed to write '%s': %s\n", filename, errstr(ret));
+ goto err_out;
+ }
+ written += ret;
+ }
+
+ ret = bpf_linker_add_file(linker, fd, filename);
+err_out:
+ close(fd);
+ return libbpf_err(ret);
+}
+
static bool is_dwarf_sec_name(const char *name)
{
/* approximation, but the actual list is too long */
@@ -534,8 +662,7 @@ static struct src_sec *add_src_sec(struct src_obj *obj, const char *sec_name)
return sec;
}
-static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
- const struct bpf_linker_file_opts *opts,
+static int linker_load_obj_file(struct bpf_linker *linker,
struct src_obj *obj)
{
int err = 0;
@@ -554,36 +681,26 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
#error "Unknown __BYTE_ORDER__"
#endif
- pr_debug("linker: adding object file '%s'...\n", filename);
-
- obj->filename = filename;
+ pr_debug("linker: adding object file '%s'...\n", obj->filename);
- obj->fd = open(filename, O_RDONLY | O_CLOEXEC);
- if (obj->fd < 0) {
- err = -errno;
- pr_warn("failed to open file '%s': %s\n", filename, errstr(err));
- return err;
- }
obj->elf = elf_begin(obj->fd, ELF_C_READ_MMAP, NULL);
if (!obj->elf) {
- err = -errno;
- pr_warn_elf("failed to parse ELF file '%s'", filename);
- return err;
+ pr_warn_elf("failed to parse ELF file '%s'", obj->filename);
+ return -EINVAL;
}
/* Sanity check ELF file high-level properties */
ehdr = elf64_getehdr(obj->elf);
if (!ehdr) {
- err = -errno;
- pr_warn_elf("failed to get ELF header for %s", filename);
- return err;
+ pr_warn_elf("failed to get ELF header for %s", obj->filename);
+ return -EINVAL;
}
/* Linker output endianness set by first input object */
obj_byteorder = ehdr->e_ident[EI_DATA];
if (obj_byteorder != ELFDATA2LSB && obj_byteorder != ELFDATA2MSB) {
err = -EOPNOTSUPP;
- pr_warn("unknown byte order of ELF file %s\n", filename);
+ pr_warn("unknown byte order of ELF file %s\n", obj->filename);
return err;
}
if (link_byteorder == ELFDATANONE) {
@@ -593,7 +710,7 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
obj_byteorder == ELFDATA2MSB ? "big" : "little");
} else if (link_byteorder != obj_byteorder) {
err = -EOPNOTSUPP;
- pr_warn("byte order mismatch with ELF file %s\n", filename);
+ pr_warn("byte order mismatch with ELF file %s\n", obj->filename);
return err;
}
@@ -601,14 +718,13 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
|| ehdr->e_machine != EM_BPF
|| ehdr->e_ident[EI_CLASS] != ELFCLASS64) {
err = -EOPNOTSUPP;
- pr_warn_elf("unsupported kind of ELF file %s", filename);
+ pr_warn_elf("unsupported kind of ELF file %s", obj->filename);
return err;
}
if (elf_getshdrstrndx(obj->elf, &obj->shstrs_sec_idx)) {
- err = -errno;
- pr_warn_elf("failed to get SHSTRTAB section index for %s", filename);
- return err;
+ pr_warn_elf("failed to get SHSTRTAB section index for %s", obj->filename);
+ return -EINVAL;
}
scn = NULL;
@@ -618,26 +734,23 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
shdr = elf64_getshdr(scn);
if (!shdr) {
- err = -errno;
pr_warn_elf("failed to get section #%zu header for %s",
- sec_idx, filename);
- return err;
+ sec_idx, obj->filename);
+ return -EINVAL;
}
sec_name = elf_strptr(obj->elf, obj->shstrs_sec_idx, shdr->sh_name);
if (!sec_name) {
- err = -errno;
pr_warn_elf("failed to get section #%zu name for %s",
- sec_idx, filename);
- return err;
+ sec_idx, obj->filename);
+ return -EINVAL;
}
data = elf_getdata(scn, 0);
if (!data) {
- err = -errno;
pr_warn_elf("failed to get section #%zu (%s) data from %s",
- sec_idx, sec_name, filename);
- return err;
+ sec_idx, sec_name, obj->filename);
+ return -EINVAL;
}
sec = add_src_sec(obj, sec_name);
@@ -672,7 +785,7 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
err = libbpf_get_error(obj->btf);
if (err) {
pr_warn("failed to parse .BTF from %s: %s\n",
- filename, errstr(err));
+ obj->filename, errstr(err));
return err;
}
sec->skipped = true;
@@ -683,7 +796,7 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
err = libbpf_get_error(obj->btf_ext);
if (err) {
pr_warn("failed to parse .BTF.ext from '%s': %s\n",
- filename, errstr(err));
+ obj->filename, errstr(err));
return err;
}
sec->skipped = true;
@@ -700,7 +813,7 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
break;
default:
pr_warn("unrecognized section #%zu (%s) in %s\n",
- sec_idx, sec_name, filename);
+ sec_idx, sec_name, obj->filename);
err = -EINVAL;
return err;
}
@@ -2680,22 +2793,23 @@ int bpf_linker__finalize(struct bpf_linker *linker)
/* Finalize ELF layout */
if (elf_update(linker->elf, ELF_C_NULL) < 0) {
- err = -errno;
+ err = -EINVAL;
pr_warn_elf("failed to finalize ELF layout");
return libbpf_err(err);
}
/* Write out final ELF contents */
if (elf_update(linker->elf, ELF_C_WRITE) < 0) {
- err = -errno;
+ err = -EINVAL;
pr_warn_elf("failed to write ELF contents");
return libbpf_err(err);
}
elf_end(linker->elf);
- close(linker->fd);
-
linker->elf = NULL;
+
+ if (linker->fd_is_owned)
+ close(linker->fd);
linker->fd = -1;
return 0;
diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c
index 5f085736c6c4..4e4a52742b01 100644
--- a/tools/lib/bpf/usdt.c
+++ b/tools/lib/bpf/usdt.c
@@ -661,7 +661,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
* [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
*/
usdt_abs_ip = note.loc_addr;
- if (base_addr)
+ if (base_addr && note.base_addr)
usdt_abs_ip += base_addr - note.base_addr;
/* When attaching uprobes (which is what USDTs basically are)
diff --git a/tools/lib/perf/Documentation/libperf.txt b/tools/lib/perf/Documentation/libperf.txt
index fcfb9499ef9c..59aabdd3cabf 100644
--- a/tools/lib/perf/Documentation/libperf.txt
+++ b/tools/lib/perf/Documentation/libperf.txt
@@ -39,7 +39,6 @@ SYNOPSIS
struct perf_cpu_map *perf_cpu_map__new_any_cpu(void);
struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list);
- struct perf_cpu_map *perf_cpu_map__read(FILE *file);
struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map);
struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
struct perf_cpu_map *other);
diff --git a/tools/lib/perf/cpumap.c b/tools/lib/perf/cpumap.c
index cae799ad44e1..fcc47214062a 100644
--- a/tools/lib/perf/cpumap.c
+++ b/tools/lib/perf/cpumap.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <errno.h>
#include <perf/cpumap.h>
#include <stdlib.h>
#include <linux/refcount.h>
@@ -10,6 +11,9 @@
#include <ctype.h>
#include <limits.h>
#include "internal.h"
+#include <api/fs/fs.h>
+
+#define MAX_NR_CPUS 4096
void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus)
{
@@ -100,12 +104,12 @@ static struct perf_cpu_map *cpu_map__new_sysconf(void)
static struct perf_cpu_map *cpu_map__new_sysfs_online(void)
{
struct perf_cpu_map *cpus = NULL;
- FILE *onlnf;
+ char *buf = NULL;
+ size_t buf_len;
- onlnf = fopen("/sys/devices/system/cpu/online", "r");
- if (onlnf) {
- cpus = perf_cpu_map__read(onlnf);
- fclose(onlnf);
+ if (sysfs__read_str("devices/system/cpu/online", &buf, &buf_len) >= 0) {
+ cpus = perf_cpu_map__new(buf);
+ free(buf);
}
return cpus;
}
@@ -158,62 +162,6 @@ static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu
return cpus;
}
-struct perf_cpu_map *perf_cpu_map__read(FILE *file)
-{
- struct perf_cpu_map *cpus = NULL;
- int nr_cpus = 0;
- struct perf_cpu *tmp_cpus = NULL, *tmp;
- int max_entries = 0;
- int n, cpu, prev;
- char sep;
-
- sep = 0;
- prev = -1;
- for (;;) {
- n = fscanf(file, "%u%c", &cpu, &sep);
- if (n <= 0)
- break;
- if (prev >= 0) {
- int new_max = nr_cpus + cpu - prev - 1;
-
- WARN_ONCE(new_max >= MAX_NR_CPUS, "Perf can support %d CPUs. "
- "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
-
- if (new_max >= max_entries) {
- max_entries = new_max + MAX_NR_CPUS / 2;
- tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
- if (tmp == NULL)
- goto out_free_tmp;
- tmp_cpus = tmp;
- }
-
- while (++prev < cpu)
- tmp_cpus[nr_cpus++].cpu = prev;
- }
- if (nr_cpus == max_entries) {
- max_entries += MAX_NR_CPUS;
- tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
- if (tmp == NULL)
- goto out_free_tmp;
- tmp_cpus = tmp;
- }
-
- tmp_cpus[nr_cpus++].cpu = cpu;
- if (n == 2 && sep == '-')
- prev = cpu;
- else
- prev = -1;
- if (n == 1 || sep == '\n')
- break;
- }
-
- if (nr_cpus > 0)
- cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
-out_free_tmp:
- free(tmp_cpus);
- return cpus;
-}
-
struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
{
struct perf_cpu_map *cpus = NULL;
@@ -238,7 +186,7 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
p = NULL;
start_cpu = strtoul(cpu_list, &p, 0);
if (start_cpu >= INT_MAX
- || (*p != '\0' && *p != ',' && *p != '-'))
+ || (*p != '\0' && *p != ',' && *p != '-' && *p != '\n'))
goto invalid;
if (*p == '-') {
@@ -246,7 +194,7 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
p = NULL;
end_cpu = strtoul(cpu_list, &p, 0);
- if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
+ if (end_cpu >= INT_MAX || (*p != '\0' && *p != ',' && *p != '\n'))
goto invalid;
if (end_cpu < start_cpu)
@@ -265,7 +213,7 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
goto invalid;
if (nr_cpus == max_entries) {
- max_entries += MAX_NR_CPUS;
+ max_entries += max(end_cpu - start_cpu + 1, 16UL);
tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
if (tmp == NULL)
goto invalid;
@@ -279,14 +227,15 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
cpu_list = p;
}
- if (nr_cpus > 0)
+ if (nr_cpus > 0) {
cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
- else if (*cpu_list != '\0') {
+ } else if (*cpu_list != '\0') {
pr_warning("Unexpected characters at end of cpu list ('%s'), using online CPUs.",
cpu_list);
cpus = perf_cpu_map__new_online_cpus();
- } else
+ } else {
cpus = perf_cpu_map__new_any_cpu();
+ }
invalid:
free(tmp_cpus);
out:
@@ -436,46 +385,49 @@ bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu
}
/*
- * Merge two cpumaps
+ * Merge two cpumaps.
+ *
+ * If 'other' is subset of '*orig', '*orig' keeps itself with no reference count
+ * change (similar to "realloc").
+ *
+ * If '*orig' is subset of 'other', '*orig' reuses 'other' with its reference
+ * count increased.
*
- * orig either gets freed and replaced with a new map, or reused
- * with no reference count change (similar to "realloc")
- * other has its reference count increased.
+ * Otherwise, '*orig' gets freed and replaced with a new map.
*/
-
-struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
- struct perf_cpu_map *other)
+int perf_cpu_map__merge(struct perf_cpu_map **orig, struct perf_cpu_map *other)
{
struct perf_cpu *tmp_cpus;
int tmp_len;
int i, j, k;
struct perf_cpu_map *merged;
- if (perf_cpu_map__is_subset(orig, other))
- return orig;
- if (perf_cpu_map__is_subset(other, orig)) {
- perf_cpu_map__put(orig);
- return perf_cpu_map__get(other);
+ if (perf_cpu_map__is_subset(*orig, other))
+ return 0;
+ if (perf_cpu_map__is_subset(other, *orig)) {
+ perf_cpu_map__put(*orig);
+ *orig = perf_cpu_map__get(other);
+ return 0;
}
- tmp_len = __perf_cpu_map__nr(orig) + __perf_cpu_map__nr(other);
+ tmp_len = __perf_cpu_map__nr(*orig) + __perf_cpu_map__nr(other);
tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
if (!tmp_cpus)
- return NULL;
+ return -ENOMEM;
/* Standard merge algorithm from wikipedia */
i = j = k = 0;
- while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
- if (__perf_cpu_map__cpu(orig, i).cpu <= __perf_cpu_map__cpu(other, j).cpu) {
- if (__perf_cpu_map__cpu(orig, i).cpu == __perf_cpu_map__cpu(other, j).cpu)
+ while (i < __perf_cpu_map__nr(*orig) && j < __perf_cpu_map__nr(other)) {
+ if (__perf_cpu_map__cpu(*orig, i).cpu <= __perf_cpu_map__cpu(other, j).cpu) {
+ if (__perf_cpu_map__cpu(*orig, i).cpu == __perf_cpu_map__cpu(other, j).cpu)
j++;
- tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
+ tmp_cpus[k++] = __perf_cpu_map__cpu(*orig, i++);
} else
tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
}
- while (i < __perf_cpu_map__nr(orig))
- tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
+ while (i < __perf_cpu_map__nr(*orig))
+ tmp_cpus[k++] = __perf_cpu_map__cpu(*orig, i++);
while (j < __perf_cpu_map__nr(other))
tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
@@ -483,8 +435,9 @@ struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
merged = cpu_map__trim_new(k, tmp_cpus);
free(tmp_cpus);
- perf_cpu_map__put(orig);
- return merged;
+ perf_cpu_map__put(*orig);
+ *orig = merged;
+ return 0;
}
struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
index 83c43dc13313..b1f4c8176b32 100644
--- a/tools/lib/perf/evlist.c
+++ b/tools/lib/perf/evlist.c
@@ -89,7 +89,7 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
evsel->threads = perf_thread_map__get(evlist->threads);
}
- evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus);
+ perf_cpu_map__merge(&evlist->all_cpus, evsel->cpus);
}
static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
diff --git a/tools/lib/perf/include/internal/cpumap.h b/tools/lib/perf/include/internal/cpumap.h
index 49649eb51ce4..e2be2d17c32b 100644
--- a/tools/lib/perf/include/internal/cpumap.h
+++ b/tools/lib/perf/include/internal/cpumap.h
@@ -21,10 +21,6 @@ DECLARE_RC_STRUCT(perf_cpu_map) {
struct perf_cpu map[];
};
-#ifndef MAX_NR_CPUS
-#define MAX_NR_CPUS 2048
-#endif
-
struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus);
int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu);
bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b);
diff --git a/tools/lib/perf/include/perf/cpumap.h b/tools/lib/perf/include/perf/cpumap.h
index 90457d17fb2f..188a667babc6 100644
--- a/tools/lib/perf/include/perf/cpumap.h
+++ b/tools/lib/perf/include/perf/cpumap.h
@@ -3,7 +3,6 @@
#define __LIBPERF_CPUMAP_H
#include <perf/core.h>
-#include <stdio.h>
#include <stdbool.h>
/** A wrapper around a CPU to avoid confusion with the perf_cpu_map's map's indices. */
@@ -37,10 +36,9 @@ LIBPERF_API struct perf_cpu_map *perf_cpu_map__new_online_cpus(void);
* perf_cpu_map__new_online_cpus is returned.
*/
LIBPERF_API struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list);
-LIBPERF_API struct perf_cpu_map *perf_cpu_map__read(FILE *file);
LIBPERF_API struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map);
-LIBPERF_API struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
- struct perf_cpu_map *other);
+LIBPERF_API int perf_cpu_map__merge(struct perf_cpu_map **orig,
+ struct perf_cpu_map *other);
LIBPERF_API struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
struct perf_cpu_map *other);
LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map);
diff --git a/tools/lib/perf/libperf.map b/tools/lib/perf/libperf.map
index 2aa79b696032..fdd8304fe9d0 100644
--- a/tools/lib/perf/libperf.map
+++ b/tools/lib/perf/libperf.map
@@ -6,7 +6,6 @@ LIBPERF_0.0.1 {
perf_cpu_map__get;
perf_cpu_map__put;
perf_cpu_map__new;
- perf_cpu_map__read;
perf_cpu_map__nr;
perf_cpu_map__cpu;
perf_cpu_map__has_any_cpu_or_is_empty;
diff --git a/tools/net/ynl/lib/ynl.c b/tools/net/ynl/lib/ynl.c
index e16cef160bc2..ce32cb35007d 100644
--- a/tools/net/ynl/lib/ynl.c
+++ b/tools/net/ynl/lib/ynl.c
@@ -95,7 +95,7 @@ ynl_err_walk(struct ynl_sock *ys, void *start, void *end, unsigned int off,
ynl_attr_for_each_payload(start, data_len, attr) {
astart_off = (char *)attr - (char *)start;
- aend_off = astart_off + ynl_attr_data_len(attr);
+ aend_off = (char *)ynl_attr_data_end(attr) - (char *)start;
if (aend_off <= off)
continue;
diff --git a/tools/pci/Build b/tools/pci/Build
deleted file mode 100644
index c375aea21790..000000000000
--- a/tools/pci/Build
+++ /dev/null
@@ -1 +0,0 @@
-pcitest-y += pcitest.o
diff --git a/tools/pci/Makefile b/tools/pci/Makefile
deleted file mode 100644
index 62d41f1a1e2c..000000000000
--- a/tools/pci/Makefile
+++ /dev/null
@@ -1,58 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-include ../scripts/Makefile.include
-
-bindir ?= /usr/bin
-
-ifeq ($(srctree),)
-srctree := $(patsubst %/,%,$(dir $(CURDIR)))
-srctree := $(patsubst %/,%,$(dir $(srctree)))
-endif
-
-# Do not use make's built-in rules
-# (this improves performance and avoids hard-to-debug behaviour);
-MAKEFLAGS += -r
-
-CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
-
-ALL_TARGETS := pcitest
-ALL_PROGRAMS := $(patsubst %,$(OUTPUT)%,$(ALL_TARGETS))
-
-SCRIPTS := pcitest.sh
-
-all: $(ALL_PROGRAMS)
-
-export srctree OUTPUT CC LD CFLAGS
-include $(srctree)/tools/build/Makefile.include
-
-#
-# We need the following to be outside of kernel tree
-#
-$(OUTPUT)include/linux/: ../../include/uapi/linux/
- mkdir -p $(OUTPUT)include/linux/ 2>&1 || true
- ln -sf $(CURDIR)/../../include/uapi/linux/pcitest.h $@
-
-prepare: $(OUTPUT)include/linux/
-
-PCITEST_IN := $(OUTPUT)pcitest-in.o
-$(PCITEST_IN): prepare FORCE
- $(Q)$(MAKE) $(build)=pcitest
-$(OUTPUT)pcitest: $(PCITEST_IN)
- $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
-
-clean:
- rm -f $(ALL_PROGRAMS)
- rm -rf $(OUTPUT)include/
- find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
-
-install: $(ALL_PROGRAMS)
- install -d -m 755 $(DESTDIR)$(bindir); \
- for program in $(ALL_PROGRAMS); do \
- install $$program $(DESTDIR)$(bindir); \
- done; \
- for script in $(SCRIPTS); do \
- install $$script $(DESTDIR)$(bindir); \
- done
-
-FORCE:
-
-.PHONY: all install clean FORCE prepare
diff --git a/tools/pci/pcitest.c b/tools/pci/pcitest.c
deleted file mode 100644
index 7b530d838d40..000000000000
--- a/tools/pci/pcitest.c
+++ /dev/null
@@ -1,250 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/**
- * Userspace PCI Endpoint Test Module
- *
- * Copyright (C) 2017 Texas Instruments
- * Author: Kishon Vijay Abraham I <kishon@ti.com>
- */
-
-#include <errno.h>
-#include <fcntl.h>
-#include <stdbool.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/ioctl.h>
-#include <unistd.h>
-
-#include <linux/pcitest.h>
-
-static char *result[] = { "NOT OKAY", "OKAY" };
-static char *irq[] = { "LEGACY", "MSI", "MSI-X" };
-
-struct pci_test {
- char *device;
- char barnum;
- bool legacyirq;
- unsigned int msinum;
- unsigned int msixnum;
- int irqtype;
- bool set_irqtype;
- bool get_irqtype;
- bool clear_irq;
- bool read;
- bool write;
- bool copy;
- unsigned long size;
- bool use_dma;
-};
-
-static int run_test(struct pci_test *test)
-{
- struct pci_endpoint_test_xfer_param param = {};
- int ret = -EINVAL;
- int fd;
-
- fd = open(test->device, O_RDWR);
- if (fd < 0) {
- perror("can't open PCI Endpoint Test device");
- return -ENODEV;
- }
-
- if (test->barnum >= 0 && test->barnum <= 5) {
- ret = ioctl(fd, PCITEST_BAR, test->barnum);
- fprintf(stdout, "BAR%d:\t\t", test->barnum);
- if (ret < 0)
- fprintf(stdout, "TEST FAILED\n");
- else
- fprintf(stdout, "%s\n", result[ret]);
- }
-
- if (test->set_irqtype) {
- ret = ioctl(fd, PCITEST_SET_IRQTYPE, test->irqtype);
- fprintf(stdout, "SET IRQ TYPE TO %s:\t\t", irq[test->irqtype]);
- if (ret < 0)
- fprintf(stdout, "FAILED\n");
- else
- fprintf(stdout, "%s\n", result[ret]);
- }
-
- if (test->get_irqtype) {
- ret = ioctl(fd, PCITEST_GET_IRQTYPE);
- fprintf(stdout, "GET IRQ TYPE:\t\t");
- if (ret < 0)
- fprintf(stdout, "FAILED\n");
- else
- fprintf(stdout, "%s\n", irq[ret]);
- }
-
- if (test->clear_irq) {
- ret = ioctl(fd, PCITEST_CLEAR_IRQ);
- fprintf(stdout, "CLEAR IRQ:\t\t");
- if (ret < 0)
- fprintf(stdout, "FAILED\n");
- else
- fprintf(stdout, "%s\n", result[ret]);
- }
-
- if (test->legacyirq) {
- ret = ioctl(fd, PCITEST_LEGACY_IRQ, 0);
- fprintf(stdout, "LEGACY IRQ:\t");
- if (ret < 0)
- fprintf(stdout, "TEST FAILED\n");
- else
- fprintf(stdout, "%s\n", result[ret]);
- }
-
- if (test->msinum > 0 && test->msinum <= 32) {
- ret = ioctl(fd, PCITEST_MSI, test->msinum);
- fprintf(stdout, "MSI%u:\t\t", test->msinum);
- if (ret < 0)
- fprintf(stdout, "TEST FAILED\n");
- else
- fprintf(stdout, "%s\n", result[ret]);
- }
-
- if (test->msixnum > 0 && test->msixnum <= 2048) {
- ret = ioctl(fd, PCITEST_MSIX, test->msixnum);
- fprintf(stdout, "MSI-X%u:\t\t", test->msixnum);
- if (ret < 0)
- fprintf(stdout, "TEST FAILED\n");
- else
- fprintf(stdout, "%s\n", result[ret]);
- }
-
- if (test->write) {
- param.size = test->size;
- if (test->use_dma)
- param.flags = PCITEST_FLAGS_USE_DMA;
- ret = ioctl(fd, PCITEST_WRITE, &param);
- fprintf(stdout, "WRITE (%7lu bytes):\t\t", test->size);
- if (ret < 0)
- fprintf(stdout, "TEST FAILED\n");
- else
- fprintf(stdout, "%s\n", result[ret]);
- }
-
- if (test->read) {
- param.size = test->size;
- if (test->use_dma)
- param.flags = PCITEST_FLAGS_USE_DMA;
- ret = ioctl(fd, PCITEST_READ, &param);
- fprintf(stdout, "READ (%7lu bytes):\t\t", test->size);
- if (ret < 0)
- fprintf(stdout, "TEST FAILED\n");
- else
- fprintf(stdout, "%s\n", result[ret]);
- }
-
- if (test->copy) {
- param.size = test->size;
- if (test->use_dma)
- param.flags = PCITEST_FLAGS_USE_DMA;
- ret = ioctl(fd, PCITEST_COPY, &param);
- fprintf(stdout, "COPY (%7lu bytes):\t\t", test->size);
- if (ret < 0)
- fprintf(stdout, "TEST FAILED\n");
- else
- fprintf(stdout, "%s\n", result[ret]);
- }
-
- fflush(stdout);
- close(fd);
- return (ret < 0) ? ret : 1 - ret; /* return 0 if test succeeded */
-}
-
-int main(int argc, char **argv)
-{
- int c;
- struct pci_test *test;
-
- test = calloc(1, sizeof(*test));
- if (!test) {
- perror("Fail to allocate memory for pci_test\n");
- return -ENOMEM;
- }
-
- /* since '0' is a valid BAR number, initialize it to -1 */
- test->barnum = -1;
-
- /* set default size as 100KB */
- test->size = 0x19000;
-
- /* set default endpoint device */
- test->device = "/dev/pci-endpoint-test.0";
-
- while ((c = getopt(argc, argv, "D:b:m:x:i:deIlhrwcs:")) != EOF)
- switch (c) {
- case 'D':
- test->device = optarg;
- continue;
- case 'b':
- test->barnum = atoi(optarg);
- if (test->barnum < 0 || test->barnum > 5)
- goto usage;
- continue;
- case 'l':
- test->legacyirq = true;
- continue;
- case 'm':
- test->msinum = atoi(optarg);
- if (test->msinum < 1 || test->msinum > 32)
- goto usage;
- continue;
- case 'x':
- test->msixnum = atoi(optarg);
- if (test->msixnum < 1 || test->msixnum > 2048)
- goto usage;
- continue;
- case 'i':
- test->irqtype = atoi(optarg);
- if (test->irqtype < 0 || test->irqtype > 2)
- goto usage;
- test->set_irqtype = true;
- continue;
- case 'I':
- test->get_irqtype = true;
- continue;
- case 'r':
- test->read = true;
- continue;
- case 'w':
- test->write = true;
- continue;
- case 'c':
- test->copy = true;
- continue;
- case 'e':
- test->clear_irq = true;
- continue;
- case 's':
- test->size = strtoul(optarg, NULL, 0);
- continue;
- case 'd':
- test->use_dma = true;
- continue;
- case 'h':
- default:
-usage:
- fprintf(stderr,
- "usage: %s [options]\n"
- "Options:\n"
- "\t-D <dev> PCI endpoint test device {default: /dev/pci-endpoint-test.0}\n"
- "\t-b <bar num> BAR test (bar number between 0..5)\n"
- "\t-m <msi num> MSI test (msi number between 1..32)\n"
- "\t-x <msix num> \tMSI-X test (msix number between 1..2048)\n"
- "\t-i <irq type> \tSet IRQ type (0 - Legacy, 1 - MSI, 2 - MSI-X)\n"
- "\t-e Clear IRQ\n"
- "\t-I Get current IRQ type configured\n"
- "\t-d Use DMA\n"
- "\t-l Legacy IRQ test\n"
- "\t-r Read buffer test\n"
- "\t-w Write buffer test\n"
- "\t-c Copy buffer test\n"
- "\t-s <size> Size of buffer {default: 100KB}\n"
- "\t-h Print this help message\n",
- argv[0]);
- return -EINVAL;
- }
-
- return run_test(test);
-}
diff --git a/tools/pci/pcitest.sh b/tools/pci/pcitest.sh
deleted file mode 100644
index 75ed48ff2990..000000000000
--- a/tools/pci/pcitest.sh
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-echo "BAR tests"
-echo
-
-bar=0
-
-while [ $bar -lt 6 ]
-do
- pcitest -b $bar
- bar=`expr $bar + 1`
-done
-echo
-
-echo "Interrupt tests"
-echo
-
-pcitest -i 0
-pcitest -l
-
-pcitest -i 1
-msi=1
-
-while [ $msi -lt 33 ]
-do
- pcitest -m $msi
- msi=`expr $msi + 1`
-done
-echo
-
-pcitest -i 2
-msix=1
-
-while [ $msix -lt 2049 ]
-do
- pcitest -x $msix
- msix=`expr $msix + 1`
-done
-echo
-
-echo "Read Tests"
-echo
-
-pcitest -i 1
-
-pcitest -r -s 1
-pcitest -r -s 1024
-pcitest -r -s 1025
-pcitest -r -s 1024000
-pcitest -r -s 1024001
-echo
-
-echo "Write Tests"
-echo
-
-pcitest -w -s 1
-pcitest -w -s 1024
-pcitest -w -s 1025
-pcitest -w -s 1024000
-pcitest -w -s 1024001
-echo
-
-echo "Copy Tests"
-echo
-
-pcitest -c -s 1
-pcitest -c -s 1024
-pcitest -c -s 1025
-pcitest -c -s 1024000
-pcitest -c -s 1024001
-echo
diff --git a/tools/perf/Documentation/perf-check.txt b/tools/perf/Documentation/perf-check.txt
index 31741499e786..a764a4629220 100644
--- a/tools/perf/Documentation/perf-check.txt
+++ b/tools/perf/Documentation/perf-check.txt
@@ -51,7 +51,6 @@ feature::
dwarf_getlocations / HAVE_LIBDW_SUPPORT
dwarf-unwind / HAVE_DWARF_UNWIND_SUPPORT
auxtrace / HAVE_AUXTRACE_SUPPORT
- libaudit / HAVE_LIBAUDIT_SUPPORT
libbfd / HAVE_LIBBFD_SUPPORT
libcapstone / HAVE_LIBCAPSTONE_SUPPORT
libcrypto / HAVE_LIBCRYPTO_SUPPORT
@@ -67,7 +66,6 @@ feature::
libunwind / HAVE_LIBUNWIND_SUPPORT
lzma / HAVE_LZMA_SUPPORT
numa_num_possible_cpus / HAVE_LIBNUMA_SUPPORT
- syscall_table / HAVE_SYSCALL_TABLE_SUPPORT
zlib / HAVE_ZLIB_SUPPORT
zstd / HAVE_ZSTD_SUPPORT
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index 1f668d4724e3..36ebebc875ea 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -40,7 +40,7 @@ The '$HOME/.perfconfig' file is used to store a per-user configuration.
The file '$(sysconfdir)/perfconfig' can be used to
store a system-wide default configuration.
-One an disable reading config files by setting the PERF_CONFIG environment
+One can disable reading config files by setting the PERF_CONFIG environment
variable to /dev/null, or provide an alternate config file by setting that
variable.
diff --git a/tools/perf/Documentation/perf-ftrace.txt b/tools/perf/Documentation/perf-ftrace.txt
index eaec8253be68..b77f58c4d2fd 100644
--- a/tools/perf/Documentation/perf-ftrace.txt
+++ b/tools/perf/Documentation/perf-ftrace.txt
@@ -148,6 +148,17 @@ OPTIONS for 'perf ftrace latency'
--use-nsec::
Use nano-second instead of micro-second as a base unit of the histogram.
+--bucket-range=::
+ Bucket range in ms or ns (according to -n/--use-nsec), default is log2() mode.
+
+--min-latency=::
+ Minimum latency for the start of the first bucket, in ms or ns (according to
+ -n/--use-nsec).
+
+--max-latency=::
+ Maximum latency for the start of the last bucket, in ms or ns (according to
+ -n/--use-nsec). The setting is ignored if the value results in more than
+ 22 buckets.
OPTIONS for 'perf ftrace profile'
---------------------------------
@@ -190,6 +201,14 @@ OPTIONS for 'perf ftrace profile'
Sort the result by the given field. Available values are:
total, avg, max, count, name. Default is 'total'.
+--graph-opts::
+ List of options allowed to set:
+
+ - nosleep-time - Measure on-CPU time only for function_graph tracer.
+ - noirqs - Ignore functions that happen inside interrupt.
+ - thresh=<n> - Setup trace duration threshold in microseconds.
+ - depth=<n> - Set max depth for function graph tracer to follow.
+
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-intel-pt.txt b/tools/perf/Documentation/perf-intel-pt.txt
index 59ab1ff9d75f..cc0f37f0fa5a 100644
--- a/tools/perf/Documentation/perf-intel-pt.txt
+++ b/tools/perf/Documentation/perf-intel-pt.txt
@@ -151,7 +151,7 @@ displayed as follows:
There are two ways that instructions-per-cycle (IPC) can be calculated depending
on the recording.
-If the 'cyc' config term (see config terms section below) was used, then IPC
+If the 'cyc' config term (see <<_config_terms,config terms>> section below) was used, then IPC
and cycle events are calculated using the cycle count from CYC packets, otherwise
MTC packets are used - refer to the 'mtc' config term. When MTC is used, however,
the values are less accurate because the timing is less accurate.
@@ -239,7 +239,7 @@ which is the same as
-e intel_pt/tsc=1,noretcomp=0/
-Note there are now new config terms - see section 'config terms' further below.
+Note there are other config terms - see section <<_config_terms,config terms>> further below.
The config terms are listed in /sys/devices/intel_pt/format. They are bit
fields within the config member of the struct perf_event_attr which is
@@ -311,218 +311,271 @@ perf_event_attr is displayed if the -vv option is used e.g.
config terms
~~~~~~~~~~~~
-The June 2015 version of Intel 64 and IA-32 Architectures Software Developer
-Manuals, Chapter 36 Intel Processor Trace, defined new Intel PT features.
-Some of the features are reflect in new config terms. All the config terms are
-described below.
-
-tsc Always supported. Produces TSC timestamp packets to provide
- timing information. In some cases it is possible to decode
- without timing information, for example a per-thread context
- that does not overlap executable memory maps.
-
- The default config selects tsc (i.e. tsc=1).
-
-noretcomp Always supported. Disables "return compression" so a TIP packet
- is produced when a function returns. Causes more packets to be
- produced but might make decoding more reliable.
-
- The default config does not select noretcomp (i.e. noretcomp=0).
-
-psb_period Allows the frequency of PSB packets to be specified.
-
- The PSB packet is a synchronization packet that provides a
- starting point for decoding or recovery from errors.
-
- Support for psb_period is indicated by:
-
- /sys/bus/event_source/devices/intel_pt/caps/psb_cyc
-
- which contains "1" if the feature is supported and "0"
- otherwise.
-
- Valid values are given by:
-
- /sys/bus/event_source/devices/intel_pt/caps/psb_periods
-
- which contains a hexadecimal value, the bits of which represent
- valid values e.g. bit 2 set means value 2 is valid.
-
- The psb_period value is converted to the approximate number of
- trace bytes between PSB packets as:
-
- 2 ^ (value + 11)
-
- e.g. value 3 means 16KiB bytes between PSBs
-
- If an invalid value is entered, the error message
- will give a list of valid values e.g.
-
- $ perf record -e intel_pt/psb_period=15/u uname
- Invalid psb_period for intel_pt. Valid values are: 0-5
-
- If MTC packets are selected, the default config selects a value
- of 3 (i.e. psb_period=3) or the nearest lower value that is
- supported (0 is always supported). Otherwise the default is 0.
-
- If decoding is expected to be reliable and the buffer is large
- then a large PSB period can be used.
-
- Because a TSC packet is produced with PSB, the PSB period can
- also affect the granularity to timing information in the absence
- of MTC or CYC.
-
-mtc Produces MTC timing packets.
-
- MTC packets provide finer grain timestamp information than TSC
- packets. MTC packets record time using the hardware crystal
- clock (CTC) which is related to TSC packets using a TMA packet.
-
- Support for this feature is indicated by:
-
- /sys/bus/event_source/devices/intel_pt/caps/mtc
-
- which contains "1" if the feature is supported and
- "0" otherwise.
-
- The frequency of MTC packets can also be specified - see
- mtc_period below.
-
-mtc_period Specifies how frequently MTC packets are produced - see mtc
- above for how to determine if MTC packets are supported.
-
- Valid values are given by:
-
- /sys/bus/event_source/devices/intel_pt/caps/mtc_periods
-
- which contains a hexadecimal value, the bits of which represent
- valid values e.g. bit 2 set means value 2 is valid.
-
- The mtc_period value is converted to the MTC frequency as:
-
- CTC-frequency / (2 ^ value)
-
- e.g. value 3 means one eighth of CTC-frequency
-
- Where CTC is the hardware crystal clock, the frequency of which
- can be related to TSC via values provided in cpuid leaf 0x15.
-
- If an invalid value is entered, the error message
- will give a list of valid values e.g.
-
- $ perf record -e intel_pt/mtc_period=15/u uname
- Invalid mtc_period for intel_pt. Valid values are: 0,3,6,9
-
- The default value is 3 or the nearest lower value
- that is supported (0 is always supported).
-
-cyc Produces CYC timing packets.
-
- CYC packets provide even finer grain timestamp information than
- MTC and TSC packets. A CYC packet contains the number of CPU
- cycles since the last CYC packet. Unlike MTC and TSC packets,
- CYC packets are only sent when another packet is also sent.
-
- Support for this feature is indicated by:
-
- /sys/bus/event_source/devices/intel_pt/caps/psb_cyc
-
- which contains "1" if the feature is supported and
- "0" otherwise.
-
- The number of CYC packets produced can be reduced by specifying
- a threshold - see cyc_thresh below.
-
-cyc_thresh Specifies how frequently CYC packets are produced - see cyc
- above for how to determine if CYC packets are supported.
-
- Valid cyc_thresh values are given by:
-
- /sys/bus/event_source/devices/intel_pt/caps/cycle_thresholds
-
- which contains a hexadecimal value, the bits of which represent
- valid values e.g. bit 2 set means value 2 is valid.
-
- The cyc_thresh value represents the minimum number of CPU cycles
- that must have passed before a CYC packet can be sent. The
- number of CPU cycles is:
-
- 2 ^ (value - 1)
-
- e.g. value 4 means 8 CPU cycles must pass before a CYC packet
- can be sent. Note a CYC packet is still only sent when another
- packet is sent, not at, e.g. every 8 CPU cycles.
-
- If an invalid value is entered, the error message
- will give a list of valid values e.g.
-
- $ perf record -e intel_pt/cyc,cyc_thresh=15/u uname
- Invalid cyc_thresh for intel_pt. Valid values are: 0-12
-
- CYC packets are not requested by default.
-
-pt Specifies pass-through which enables the 'branch' config term.
-
- The default config selects 'pt' if it is available, so a user will
- never need to specify this term.
-
-branch Enable branch tracing. Branch tracing is enabled by default so to
- disable branch tracing use 'branch=0'.
-
- The default config selects 'branch' if it is available.
-
-ptw Enable PTWRITE packets which are produced when a ptwrite instruction
- is executed.
-
- Support for this feature is indicated by:
-
- /sys/bus/event_source/devices/intel_pt/caps/ptwrite
-
- which contains "1" if the feature is supported and
- "0" otherwise.
-
- As an alternative, refer to "Emulated PTWRITE" further below.
-
-fup_on_ptw Enable a FUP packet to follow the PTWRITE packet. The FUP packet
- provides the address of the ptwrite instruction. In the absence of
- fup_on_ptw, the decoder will use the address of the previous branch
- if branch tracing is enabled, otherwise the address will be zero.
- Note that fup_on_ptw will work even when branch tracing is disabled.
-
-pwr_evt Enable power events. The power events provide information about
- changes to the CPU C-state.
-
- Support for this feature is indicated by:
-
- /sys/bus/event_source/devices/intel_pt/caps/power_event_trace
-
- which contains "1" if the feature is supported and
- "0" otherwise.
-
-event Enable Event Trace. The events provide information about asynchronous
- events.
-
- Support for this feature is indicated by:
-
- /sys/bus/event_source/devices/intel_pt/caps/event_trace
-
- which contains "1" if the feature is supported and
- "0" otherwise.
-
-notnt Disable TNT packets. Without TNT packets, it is not possible to walk
- executable code to reconstruct control flow, however FUP, TIP, TIP.PGE
- and TIP.PGD packets still indicate asynchronous control flow, and (if
- return compression is disabled - see noretcomp) return statements.
- The advantage of eliminating TNT packets is reducing the size of the
- trace and corresponding tracing overhead.
-
- Support for this feature is indicated by:
-
- /sys/bus/event_source/devices/intel_pt/caps/tnt_disable
-
- which contains "1" if the feature is supported and
- "0" otherwise.
-
+Config terms are parameters specified with the -e intel_pt// event option,
+for example:
+
+ -e intel_pt/cyc/
+
+which selects cycle accurate mode. Each config term can have a value which
+defaults to 1, so the above is the same as:
+
+ -e intel_pt/cyc=1/
+
+Some terms are set by default, so must be set to 0 to turn them off. For
+example, to turn off branch tracing:
+
+ -e intel_pt/branch=0/
+
+Multiple config terms are separated by commas, for example:
+
+ -e intel_pt/cyc,mtc_period=9/
+
+There are also common config terms, see linkperf:perf-record[1] documentation.
+
+Intel PT config terms are described below.
+
+*tsc*::
+Always supported. Produces TSC timestamp packets to provide
+timing information. In some cases it is possible to decode
+without timing information, for example a per-thread context
+that does not overlap executable memory maps.
++
+The default config selects tsc (i.e. tsc=1).
+
+*noretcomp*::
+Always supported. Disables "return compression" so a TIP packet
+is produced when a function returns. Causes more packets to be
+produced but might make decoding more reliable.
++
+The default config does not select noretcomp (i.e. noretcomp=0).
+
+*psb_period*::
+Allows the frequency of PSB packets to be specified.
++
+The PSB packet is a synchronization packet that provides a
+starting point for decoding or recovery from errors.
++
+Support for psb_period is indicated by:
++
+ /sys/bus/event_source/devices/intel_pt/caps/psb_cyc
++
+which contains "1" if the feature is supported and "0"
+otherwise.
++
+Valid values are given by:
++
+ /sys/bus/event_source/devices/intel_pt/caps/psb_periods
++
+which contains a hexadecimal value, the bits of which represent
+valid values e.g. bit 2 set means value 2 is valid.
++
+The psb_period value is converted to the approximate number of
+trace bytes between PSB packets as:
++
+ 2 ^ (value + 11)
++
+e.g. value 3 means 16KiB bytes between PSBs
++
+If an invalid value is entered, the error message
+will give a list of valid values e.g.
++
+ $ perf record -e intel_pt/psb_period=15/u uname
+ Invalid psb_period for intel_pt. Valid values are: 0-5
++
+If MTC packets are selected, the default config selects a value
+of 3 (i.e. psb_period=3) or the nearest lower value that is
+supported (0 is always supported). Otherwise the default is 0.
++
+If decoding is expected to be reliable and the buffer is large
+then a large PSB period can be used.
++
+Because a TSC packet is produced with PSB, the PSB period can
+also affect the granularity to timing information in the absence
+of MTC or CYC.
+
+*mtc*::
+Produces MTC timing packets.
++
+MTC packets provide finer grain timestamp information than TSC
+packets. MTC packets record time using the hardware crystal
+clock (CTC) which is related to TSC packets using a TMA packet.
++
+Support for this feature is indicated by:
++
+ /sys/bus/event_source/devices/intel_pt/caps/mtc
++
+which contains "1" if the feature is supported and
+"0" otherwise.
++
+The frequency of MTC packets can also be specified - see
+mtc_period below.
+
+*mtc_period*::
+Specifies how frequently MTC packets are produced - see mtc
+above for how to determine if MTC packets are supported.
++
+Valid values are given by:
++
+ /sys/bus/event_source/devices/intel_pt/caps/mtc_periods
++
+which contains a hexadecimal value, the bits of which represent
+valid values e.g. bit 2 set means value 2 is valid.
++
+The mtc_period value is converted to the MTC frequency as:
+
+ CTC-frequency / (2 ^ value)
++
+e.g. value 3 means one eighth of CTC-frequency
++
+Where CTC is the hardware crystal clock, the frequency of which
+can be related to TSC via values provided in cpuid leaf 0x15.
++
+If an invalid value is entered, the error message
+will give a list of valid values e.g.
++
+ $ perf record -e intel_pt/mtc_period=15/u uname
+ Invalid mtc_period for intel_pt. Valid values are: 0,3,6,9
++
+The default value is 3 or the nearest lower value
+that is supported (0 is always supported).
+
+*cyc*::
+Produces CYC timing packets.
++
+CYC packets provide even finer grain timestamp information than
+MTC and TSC packets. A CYC packet contains the number of CPU
+cycles since the last CYC packet. Unlike MTC and TSC packets,
+CYC packets are only sent when another packet is also sent.
++
+Support for this feature is indicated by:
++
+ /sys/bus/event_source/devices/intel_pt/caps/psb_cyc
++
+which contains "1" if the feature is supported and
+"0" otherwise.
++
+The number of CYC packets produced can be reduced by specifying
+a threshold - see cyc_thresh below.
+
+*cyc_thresh*::
+Specifies how frequently CYC packets are produced - see cyc
+above for how to determine if CYC packets are supported.
++
+Valid cyc_thresh values are given by:
++
+ /sys/bus/event_source/devices/intel_pt/caps/cycle_thresholds
++
+which contains a hexadecimal value, the bits of which represent
+valid values e.g. bit 2 set means value 2 is valid.
++
+The cyc_thresh value represents the minimum number of CPU cycles
+that must have passed before a CYC packet can be sent. The
+number of CPU cycles is:
++
+ 2 ^ (value - 1)
++
+e.g. value 4 means 8 CPU cycles must pass before a CYC packet
+can be sent. Note a CYC packet is still only sent when another
+packet is sent, not at, e.g. every 8 CPU cycles.
++
+If an invalid value is entered, the error message
+will give a list of valid values e.g.
++
+ $ perf record -e intel_pt/cyc,cyc_thresh=15/u uname
+ Invalid cyc_thresh for intel_pt. Valid values are: 0-12
++
+CYC packets are not requested by default.
+
+*pt*::
+Specifies pass-through which enables the 'branch' config term.
++
+The default config selects 'pt' if it is available, so a user will
+never need to specify this term.
+
+*branch*::
+Enable branch tracing. Branch tracing is enabled by default so to
+disable branch tracing use 'branch=0'.
++
+The default config selects 'branch' if it is available.
+
+*ptw*::
+Enable PTWRITE packets which are produced when a ptwrite instruction
+is executed.
++
+Support for this feature is indicated by:
++
+ /sys/bus/event_source/devices/intel_pt/caps/ptwrite
++
+which contains "1" if the feature is supported and
+"0" otherwise.
++
+As an alternative, refer to "Emulated PTWRITE" further below.
+
+*fup_on_ptw*::
+Enable a FUP packet to follow the PTWRITE packet. The FUP packet
+provides the address of the ptwrite instruction. In the absence of
+fup_on_ptw, the decoder will use the address of the previous branch
+if branch tracing is enabled, otherwise the address will be zero.
+Note that fup_on_ptw will work even when branch tracing is disabled.
+
+*pwr_evt*::
+Enable power events. The power events provide information about
+changes to the CPU C-state.
++
+Support for this feature is indicated by:
++
+ /sys/bus/event_source/devices/intel_pt/caps/power_event_trace
++
+which contains "1" if the feature is supported and
+"0" otherwise.
+
+*event*::
+Enable Event Trace. The events provide information about asynchronous
+events.
++
+Support for this feature is indicated by:
++
+ /sys/bus/event_source/devices/intel_pt/caps/event_trace
++
+which contains "1" if the feature is supported and
+"0" otherwise.
+
+*notnt*::
+Disable TNT packets. Without TNT packets, it is not possible to walk
+executable code to reconstruct control flow, however FUP, TIP, TIP.PGE
+and TIP.PGD packets still indicate asynchronous control flow, and (if
+return compression is disabled - see noretcomp) return statements.
+The advantage of eliminating TNT packets is reducing the size of the
+trace and corresponding tracing overhead.
++
+Support for this feature is indicated by:
++
+ /sys/bus/event_source/devices/intel_pt/caps/tnt_disable
++
+which contains "1" if the feature is supported and
+"0" otherwise.
+
+*aux-action=start-paused*::
+Start tracing paused, refer to the section <<_pause_or_resume_tracing,Pause or Resume Tracing>>
+
+
+config terms on other events
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Some Intel PT features work with other events, features such as AUX area sampling
+and PEBS-via-PT. In those cases, the other events can have config terms below:
+
+*aux-sample-size*::
+ Used to set the AUX area sample size, refer to the section
+ <<_aux_area_sampling_option,AUX area sampling option>>
+
+*aux-output*::
+ Used to select PEBS-via-PT, refer to the
+ section <<_pebs_via_intel_pt,PEBS via Intel PT>>
+
+*aux-action*::
+ Used to pause or resume tracing, refer to the section
+ <<_pause_or_resume_tracing,Pause or Resume Tracing>>
AUX area sampling option
~~~~~~~~~~~~~~~~~~~~~~~~
@@ -596,7 +649,8 @@ The default snapshot size is the auxtrace mmap size. If neither auxtrace mmap s
nor snapshot size is specified, then the default is 4MiB for privileged users
(or if /proc/sys/kernel/perf_event_paranoid < 0), 128KiB for unprivileged users.
If an unprivileged user does not specify mmap pages, the mmap pages will be
-reduced as described in the 'new auxtrace mmap size option' section below.
+reduced as described in the <<_new_auxtrace_mmap_size_option,new auxtrace mmap size option>>
+section below.
The snapshot size is displayed if the option -vv is used e.g.
@@ -952,11 +1006,11 @@ transaction start, commit or abort.
Note that "instructions", "cycles", "branches" and "transactions" events
depend on code flow packets which can be disabled by using the config term
-"branch=0". Refer to the config terms section above.
+"branch=0". Refer to the <<_config_terms,config terms>> section above.
"ptwrite" events record the payload of the ptwrite instruction and whether
"fup_on_ptw" was used. "ptwrite" events depend on PTWRITE packets which are
-recorded only if the "ptw" config term was used. Refer to the config terms
+recorded only if the "ptw" config term was used. Refer to the <<_config_terms,config terms>>
section above. perf script "synth" field displays "ptwrite" information like
this: "ip: 0 payload: 0x123456789abcdef0" where "ip" is 1 if "fup_on_ptw" was
used.
@@ -964,7 +1018,7 @@ used.
"Power" events correspond to power event packets and CBR (core-to-bus ratio)
packets. While CBR packets are always recorded when tracing is enabled, power
event packets are recorded only if the "pwr_evt" config term was used. Refer to
-the config terms section above. The power events record information about
+the <<_config_terms,config terms>> section above. The power events record information about
C-state changes, whereas CBR is indicative of CPU frequency. perf script
"event,synth" fields display information like this:
@@ -1120,7 +1174,7 @@ What *will* be decoded with the (single) q option:
- asynchronous branches such as interrupts
- indirect branches
- function return target address *if* the noretcomp config term (refer
- config terms section) was used
+ <<_config_terms,config terms>> section) was used
- start of (control-flow) tracing
- end of (control-flow) tracing, if it is not out of context
- power events, ptwrite, transaction start and abort
@@ -1133,7 +1187,7 @@ Repeating the q option (double-q i.e. qq) results in even faster decoding and ev
less detail. The decoder decodes only extended PSB (PSB+) packets, getting the
instruction pointer if there is a FUP packet within PSB+ (i.e. between PSB and
PSBEND). Note PSB packets occur regularly in the trace based on the psb_period
-config term (refer config terms section). There will be a FUP packet if the
+config term (refer <<_config_terms,config terms>> section). There will be a FUP packet if the
PSB+ occurs while control flow is being traced.
What will *not* be decoded with the qq option:
@@ -1867,6 +1921,108 @@ For pipe mode, the order of events and timestamps can presumably
be messed up.
+Pause or Resume Tracing
+-----------------------
+
+With newer Kernels, it is possible to use other selected events to pause
+or resume Intel PT tracing. This is configured by using the "aux-action"
+config term:
+
+"aux-action=pause" is used with events that are to pause Intel PT tracing.
+
+"aux-action=resume" is used with events that are to resume Intel PT tracing.
+
+"aux-action=start-paused" is used with the Intel PT event to start in a
+paused state.
+
+For example, to trace only the uname system call (sys_newuname) when running the
+command line utility uname:
+
+ $ perf record --kcore -e intel_pt/aux-action=start-paused/k,syscalls:sys_enter_newuname/aux-action=resume/,syscalls:sys_exit_newuname/aux-action=pause/ uname
+ Linux
+ [ perf record: Woken up 1 times to write data ]
+ [ perf record: Captured and wrote 0.043 MB perf.data ]
+ $ perf script --call-trace
+ uname 30805 [000] 24001.058782799: name: 0x7ffc9c1865b0
+ uname 30805 [000] 24001.058784424: psb offs: 0
+ uname 30805 [000] 24001.058784424: cbr: 39 freq: 3904 MHz (139%)
+ uname 30805 [000] 24001.058784629: ([kernel.kallsyms]) debug_smp_processor_id
+ uname 30805 [000] 24001.058784629: ([kernel.kallsyms]) __x64_sys_newuname
+ uname 30805 [000] 24001.058784629: ([kernel.kallsyms]) down_read
+ uname 30805 [000] 24001.058784629: ([kernel.kallsyms]) __cond_resched
+ uname 30805 [000] 24001.058784629: ([kernel.kallsyms]) preempt_count_add
+ uname 30805 [000] 24001.058784629: ([kernel.kallsyms]) in_lock_functions
+ uname 30805 [000] 24001.058784629: ([kernel.kallsyms]) preempt_count_sub
+ uname 30805 [000] 24001.058784629: ([kernel.kallsyms]) up_read
+ uname 30805 [000] 24001.058784629: ([kernel.kallsyms]) preempt_count_add
+ uname 30805 [000] 24001.058784838: ([kernel.kallsyms]) in_lock_functions
+ uname 30805 [000] 24001.058784838: ([kernel.kallsyms]) preempt_count_sub
+ uname 30805 [000] 24001.058784838: ([kernel.kallsyms]) _copy_to_user
+ uname 30805 [000] 24001.058784838: ([kernel.kallsyms]) syscall_exit_to_user_mode
+ uname 30805 [000] 24001.058784838: ([kernel.kallsyms]) syscall_exit_work
+ uname 30805 [000] 24001.058784838: ([kernel.kallsyms]) perf_syscall_exit
+ uname 30805 [000] 24001.058784838: ([kernel.kallsyms]) debug_smp_processor_id
+ uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) perf_trace_buf_alloc
+ uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) perf_swevent_get_recursion_context
+ uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) debug_smp_processor_id
+ uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) debug_smp_processor_id
+ uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) perf_tp_event
+ uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) perf_trace_buf_update
+ uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) tracing_gen_ctx_irq_test
+ uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) perf_swevent_event
+ uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) __perf_event_account_interrupt
+ uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) __this_cpu_preempt_check
+ uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) perf_event_output_forward
+ uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) perf_event_aux_pause
+ uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) ring_buffer_get
+ uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) __rcu_read_lock
+ uname 30805 [000] 24001.058785046: ([kernel.kallsyms]) __rcu_read_unlock
+ uname 30805 [000] 24001.058785254: ([kernel.kallsyms]) pt_event_stop
+ uname 30805 [000] 24001.058785254: ([kernel.kallsyms]) debug_smp_processor_id
+ uname 30805 [000] 24001.058785254: ([kernel.kallsyms]) debug_smp_processor_id
+ uname 30805 [000] 24001.058785254: ([kernel.kallsyms]) native_write_msr
+ uname 30805 [000] 24001.058785463: ([kernel.kallsyms]) native_write_msr
+ uname 30805 [000] 24001.058785639: 0x0
+
+The example above uses tracepoints, but any kind of sampled event can be used.
+
+For example:
+
+ Tracing between arch_cpu_idle_enter() and arch_cpu_idle_exit() using breakpoint events:
+
+ $ sudo cat /proc/kallsyms | sort | grep ' arch_cpu_idle_enter\| arch_cpu_idle_exit'
+ ffffffffb605bf60 T arch_cpu_idle_enter
+ ffffffffb614d8a0 W arch_cpu_idle_exit
+ $ sudo perf record --kcore -a -e intel_pt/aux-action=start-paused/k -e mem:0xffffffffb605bf60:x/aux-action=resume/ -e mem:0xffffffffb614d8a0:x/aux-action=pause/ -- sleep 1
+ [ perf record: Woken up 1 times to write data ]
+ [ perf record: Captured and wrote 1.387 MB perf.data ]
+
+ Tracing __alloc_pages() using kprobes:
+
+ $ sudo perf probe --add '__alloc_pages order'
+ Added new event: probe:__alloc_pages (on __alloc_pages with order)
+ $ sudo perf probe --add __alloc_pages%return
+ Added new event: probe:__alloc_pages__return (on __alloc_pages%return)
+ $ sudo perf record --kcore -aR -e intel_pt/aux-action=start-paused/k -e probe:__alloc_pages/aux-action=resume/ -e probe:__alloc_pages__return/aux-action=pause/ -- sleep 1
+ [ perf record: Woken up 1 times to write data ]
+ [ perf record: Captured and wrote 1.490 MB perf.data ]
+
+ Tracing starting at main() using a uprobe event:
+
+ $ sudo perf probe -x /usr/bin/uname main
+ Added new event: probe_uname:main (on main in /usr/bin/uname)
+ $ sudo perf record -e intel_pt/-aux-action=start-paused/u -e probe_uname:main/aux-action=resume/ -- uname
+ Linux
+ [ perf record: Woken up 1 times to write data ]
+ [ perf record: Captured and wrote 0.031 MB perf.data ]
+
+ Tracing occasionally using cycles events with different periods:
+
+ $ perf record --kcore -a -m,64M -e intel_pt/aux-action=start-paused/k -e cycles/aux-action=pause,period=1000000/Pk -e cycles/aux-action=resume,period=10500000/Pk -- firefox
+ [ perf record: Woken up 19 times to write data ]
+ [ perf record: Captured and wrote 16.561 MB perf.data ]
+
+
EXAMPLE
-------
diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
index 57a940399de0..d3793054f7d3 100644
--- a/tools/perf/Documentation/perf-lock.txt
+++ b/tools/perf/Documentation/perf-lock.txt
@@ -187,8 +187,8 @@ CONTENTION OPTIONS
Show lock contention only for given lock types (comma separated list).
Available values are:
semaphore, spinlock, rwlock, rwlock:R, rwlock:W, rwsem, rwsem:R, rwsem:W,
- rtmutex, rwlock-rt, rwlock-rt:R, rwlock-rt:W, pcpu-sem, pcpu-sem:R, pcpu-sem:W,
- mutex
+ rtmutex, rwlock-rt, rwlock-rt:R, rwlock-rt:W, percpu-rwmem, pcpu-sem,
+ pcpu-sem:R, pcpu-sem:W, mutex
Note that RW-variant of locks have :R and :W suffix. Names without the
suffix are shortcuts for the both variants. Ex) rwsem = rwsem:R + rwsem:W.
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 242223240a08..80686d590de2 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -68,6 +68,10 @@ OPTIONS
like this: name=\'CPU_CLK_UNHALTED.THREAD:cmask=0x1\'.
- 'aux-output': Generate AUX records instead of events. This requires
that an AUX area event is also provided.
+ - 'aux-action': "pause" or "resume" to pause or resume an AUX
+ area event (the group leader) when this event occurs.
+ "start-paused" on an AUX area event itself, will
+ start in a paused state.
- 'aux-sample-size': Set sample size for AUX area sampling. If the
'--aux-sample' option has been used, set aux-sample-size=0 to disable
AUX area sampling for the event.
diff --git a/tools/perf/Documentation/perf-test.txt b/tools/perf/Documentation/perf-test.txt
index efcdec528a8f..32da0d1fa86a 100644
--- a/tools/perf/Documentation/perf-test.txt
+++ b/tools/perf/Documentation/perf-test.txt
@@ -28,18 +28,22 @@ OPTIONS
Tests to skip (comma separated numeric list).
-v::
+-vv::
+-vvv::
--verbose::
- Be more verbose.
+ With a single '-v', verbose level 1, only failing test output
+ is displayed. With '-vv' and higher all test output is shown.
-S::
--sequential::
- Run tests one after the other, this is the default mode.
-
--p::
---parallel::
- Run tests in parallel, speeds up the whole process but is not safe with
- the current infrastructure, where some tests that compete for some resources,
- for instance, 'perf probe' tests that add/remove probes or clean all probes, etc.
+ Run all tests one after the other. By default "exclusive"
+ tests are run sequentially, but other tests are run in
+ parallel to speed execution.
+
+-r::
+--runs-per-test::
+ Run each test the given number of times, by default once. This
+ option can be useful to determine if a test is flaky.
-F::
--dont-fork::
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 6e0cc50bbc13..fb3d2af33844 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -241,6 +241,11 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
printing using the existing 'perf trace' syscall arg beautifiers to map integer
arguments to strings (pid to comm, syscall id to syscall name, etc).
+--force-btf::
+ Use btf_dump to pretty print syscall argument data, instead of using hand-crafted pretty
+ printers. This option is intended for testing BTF integration in perf trace. btf_dump-based
+ pretty-printing serves as a fallback to hand-crafted pretty printers, as the latter can
+ better pretty-print integer flags and struct pointers.
PAGEFAULTS
----------
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index dc42de1785ce..364b55b00b48 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -1,5 +1,8 @@
+COPYING
+LICENSES/preferred/GPL-2.0
arch/arm64/tools/gen-sysreg.awk
arch/arm64/tools/sysreg
+arch/*/include/uapi/asm/bpf_perf_event.h
tools/perf
tools/arch
tools/scripts
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 2916d59c88cd..a148ca9efca9 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -28,63 +28,58 @@ include $(srctree)/tools/scripts/Makefile.arch
$(call detected_var,SRCARCH)
-ifneq ($(NO_SYSCALL_TABLE),1)
- NO_SYSCALL_TABLE := 1
-
- ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 powerpc arm64 s390 mips loongarch riscv))
- NO_SYSCALL_TABLE := 0
- endif
-
- ifneq ($(NO_SYSCALL_TABLE),1)
- CFLAGS += -DHAVE_SYSCALL_TABLE_SUPPORT
- endif
-endif
+CFLAGS += -I$(OUTPUT)arch/$(SRCARCH)/include/generated
# Additional ARCH settings for ppc
ifeq ($(SRCARCH),powerpc)
- CFLAGS += -I$(OUTPUT)arch/powerpc/include/generated
- LIBUNWIND_LIBS := -lunwind -lunwind-ppc64
+ ifndef NO_LIBUNWIND
+ LIBUNWIND_LIBS := -lunwind -lunwind-ppc64
+ endif
endif
# Additional ARCH settings for x86
ifeq ($(SRCARCH),x86)
$(call detected,CONFIG_X86)
- CFLAGS += -I$(OUTPUT)arch/x86/include/generated
ifeq (${IS_64_BIT}, 1)
CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT
ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S
- LIBUNWIND_LIBS = -lunwind-x86_64 -lunwind -llzma
+ ifndef NO_LIBUNWIND
+ LIBUNWIND_LIBS = -lunwind-x86_64 -lunwind -llzma
+ endif
$(call detected,CONFIG_X86_64)
else
- LIBUNWIND_LIBS = -lunwind-x86 -llzma -lunwind
+ ifndef NO_LIBUNWIND
+ LIBUNWIND_LIBS = -lunwind-x86 -llzma -lunwind
+ endif
endif
endif
ifeq ($(SRCARCH),arm)
- LIBUNWIND_LIBS = -lunwind -lunwind-arm
+ ifndef NO_LIBUNWIND
+ LIBUNWIND_LIBS = -lunwind -lunwind-arm
+ endif
endif
ifeq ($(SRCARCH),arm64)
- CFLAGS += -I$(OUTPUT)arch/arm64/include/generated
- LIBUNWIND_LIBS = -lunwind -lunwind-aarch64
+ ifndef NO_LIBUNWIND
+ LIBUNWIND_LIBS = -lunwind -lunwind-aarch64
+ endif
endif
ifeq ($(SRCARCH),loongarch)
- CFLAGS += -I$(OUTPUT)arch/loongarch/include/generated
- LIBUNWIND_LIBS = -lunwind -lunwind-loongarch64
+ ifndef NO_LIBUNWIND
+ LIBUNWIND_LIBS = -lunwind -lunwind-loongarch64
+ endif
endif
ifeq ($(ARCH),s390)
- CFLAGS += -fPIC -I$(OUTPUT)arch/s390/include/generated
+ CFLAGS += -fPIC
endif
ifeq ($(ARCH),mips)
- CFLAGS += -I$(OUTPUT)arch/mips/include/generated
- LIBUNWIND_LIBS = -lunwind -lunwind-mips
-endif
-
-ifeq ($(ARCH),riscv)
- CFLAGS += -I$(OUTPUT)arch/riscv/include/generated
+ ifndef NO_LIBUNWIND
+ LIBUNWIND_LIBS = -lunwind -lunwind-mips
+ endif
endif
# So far there's only x86 and arm libdw unwind support merged in perf.
@@ -121,16 +116,18 @@ ifdef LIBUNWIND_DIR
$(foreach libunwind_arch,$(LIBUNWIND_ARCHS),$(call libunwind_arch_set_flags,$(libunwind_arch)))
endif
-# Set per-feature check compilation flags
-FEATURE_CHECK_CFLAGS-libunwind = $(LIBUNWIND_CFLAGS)
-FEATURE_CHECK_LDFLAGS-libunwind = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
-FEATURE_CHECK_CFLAGS-libunwind-debug-frame = $(LIBUNWIND_CFLAGS)
-FEATURE_CHECK_LDFLAGS-libunwind-debug-frame = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
-
-FEATURE_CHECK_LDFLAGS-libunwind-arm += -lunwind -lunwind-arm
-FEATURE_CHECK_LDFLAGS-libunwind-aarch64 += -lunwind -lunwind-aarch64
-FEATURE_CHECK_LDFLAGS-libunwind-x86 += -lunwind -llzma -lunwind-x86
-FEATURE_CHECK_LDFLAGS-libunwind-x86_64 += -lunwind -llzma -lunwind-x86_64
+ifndef NO_LIBUNWIND
+ # Set per-feature check compilation flags
+ FEATURE_CHECK_CFLAGS-libunwind = $(LIBUNWIND_CFLAGS)
+ FEATURE_CHECK_LDFLAGS-libunwind = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
+ FEATURE_CHECK_CFLAGS-libunwind-debug-frame = $(LIBUNWIND_CFLAGS)
+ FEATURE_CHECK_LDFLAGS-libunwind-debug-frame = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
+
+ FEATURE_CHECK_LDFLAGS-libunwind-arm += -lunwind -lunwind-arm
+ FEATURE_CHECK_LDFLAGS-libunwind-aarch64 += -lunwind -lunwind-aarch64
+ FEATURE_CHECK_LDFLAGS-libunwind-x86 += -lunwind -llzma -lunwind-x86
+ FEATURE_CHECK_LDFLAGS-libunwind-x86_64 += -lunwind -llzma -lunwind-x86_64
+endif
FEATURE_CHECK_LDFLAGS-libcrypto = -lcrypto
@@ -155,7 +152,7 @@ ifdef LIBDW_DIR
endif
DWARFLIBS := -ldw
ifeq ($(findstring -static,${LDFLAGS}),-static)
- DWARFLIBS += -lelf -lz -llzma -lbz2 -lzstd
+ DWARFLIBS += -lelf -lz -llzma -lbz2
LIBDW_VERSION := $(shell $(PKG_CONFIG) --modversion libdw).0.0
LIBDW_VERSION_1 := $(word 1, $(subst ., ,$(LIBDW_VERSION)))
@@ -550,6 +547,12 @@ ifndef NO_LIBELF
CFLAGS += -DHAVE_ELF_GETSHDRSTRNDX_SUPPORT
endif
+ ifeq ($(feature-libelf-zstd), 1)
+ ifdef NO_LIBZSTD
+ $(error Error: libzstd is required by libelf, please do not set NO_LIBZSTD)
+ endif
+ endif
+
ifndef NO_LIBDEBUGINFOD
$(call feature_check,libdebuginfod)
ifeq ($(feature-libdebuginfod), 1)
@@ -734,26 +737,25 @@ ifeq ($(dwarf-post-unwind),1)
$(call detected,CONFIG_DWARF_UNWIND)
endif
-ifndef NO_LOCAL_LIBUNWIND
- ifeq ($(SRCARCH),$(filter $(SRCARCH),arm arm64))
- $(call feature_check,libunwind-debug-frame)
- ifneq ($(feature-libunwind-debug-frame), 1)
- $(warning No debug_frame support found in libunwind)
+ifndef NO_LIBUNWIND
+ ifndef NO_LOCAL_LIBUNWIND
+ ifeq ($(SRCARCH),$(filter $(SRCARCH),arm arm64))
+ $(call feature_check,libunwind-debug-frame)
+ ifneq ($(feature-libunwind-debug-frame), 1)
+ $(warning No debug_frame support found in libunwind)
+ CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME
+ endif
+ else
+ # non-ARM has no dwarf_find_debug_frame() function:
CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME
endif
- else
- # non-ARM has no dwarf_find_debug_frame() function:
- CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME
+ EXTLIBS += $(LIBUNWIND_LIBS)
+ LDFLAGS += $(LIBUNWIND_LIBS)
+ endif
+ ifeq ($(findstring -static,${LDFLAGS}),-static)
+ # gcc -static links libgcc_eh which contans piece of libunwind
+ LIBUNWIND_LDFLAGS += -Wl,--allow-multiple-definition
endif
- EXTLIBS += $(LIBUNWIND_LIBS)
- LDFLAGS += $(LIBUNWIND_LIBS)
-endif
-ifeq ($(findstring -static,${LDFLAGS}),-static)
- # gcc -static links libgcc_eh which contans piece of libunwind
- LIBUNWIND_LDFLAGS += -Wl,--allow-multiple-definition
-endif
-
-ifndef NO_LIBUNWIND
CFLAGS += -DHAVE_LIBUNWIND_SUPPORT
CFLAGS += $(LIBUNWIND_CFLAGS)
LDFLAGS += $(LIBUNWIND_LDFLAGS)
@@ -761,21 +763,7 @@ ifndef NO_LIBUNWIND
endif
ifneq ($(NO_LIBTRACEEVENT),1)
- ifeq ($(NO_SYSCALL_TABLE),0)
- $(call detected,CONFIG_TRACE)
- else
- ifndef NO_LIBAUDIT
- $(call feature_check,libaudit)
- ifneq ($(feature-libaudit), 1)
- $(warning No libaudit.h found, disables 'trace' tool, please install audit-libs-devel or libaudit-dev)
- NO_LIBAUDIT := 1
- else
- CFLAGS += -DHAVE_LIBAUDIT_SUPPORT
- EXTLIBS += -laudit
- $(call detected,CONFIG_TRACE)
- endif
- endif
- endif
+ $(call detected,CONFIG_TRACE)
endif
ifndef NO_LIBCRYPTO
@@ -1172,7 +1160,6 @@ endif
# libtraceevent is a recommended dependency picked up from the system.
ifneq ($(NO_LIBTRACEEVENT),1)
- $(call feature_check,libtraceevent)
ifeq ($(feature-libtraceevent), 1)
CFLAGS += -DHAVE_LIBTRACEEVENT $(shell $(PKG_CONFIG) --cflags libtraceevent)
LDFLAGS += $(shell $(PKG_CONFIG) --libs-only-L libtraceevent)
@@ -1188,7 +1175,6 @@ ifneq ($(NO_LIBTRACEEVENT),1)
$(error ERROR: libtraceevent is missing. Please install libtraceevent-dev/libtraceevent-devel and/or set LIBTRACEEVENT_DIR or build with NO_LIBTRACEEVENT=1)
endif
- $(call feature_check,libtracefs)
ifeq ($(feature-libtracefs), 1)
CFLAGS += $(shell $(PKG_CONFIG) --cflags libtracefs)
LDFLAGS += $(shell $(PKG_CONFIG) --libs-only-L libtracefs)
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index d74241a15131..55d6ce9ea52f 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -59,8 +59,6 @@ include ../scripts/utilities.mak
#
# Define NO_LIBNUMA if you do not want numa perf benchmark
#
-# Define NO_LIBAUDIT if you do not want libaudit support
-#
# Define NO_LIBBIONIC if you do not want bionic support
#
# Define NO_LIBCRYPTO if you do not want libcrypto (openssl) support
@@ -119,10 +117,6 @@ include ../scripts/utilities.mak
#
# Define LIBBPF_DYNAMIC to enable libbpf dynamic linking.
#
-# Define NO_SYSCALL_TABLE=1 to disable the use of syscall id to/from name tables
-# generated from the kernel .tbl or unistd.h files and use, if available, libaudit
-# for doing the conversions to/from strings/id.
-#
# Define NO_LIBPFM4 to disable libpfm4 events extension.
#
# Define NO_LIBDEBUGINFOD if you do not want support debuginfod
@@ -167,12 +161,47 @@ export VPATH
SOURCE := $(shell ln -sf $(srctree)/tools/perf $(OUTPUT)/source)
endif
+# Beautify output
+# ---------------------------------------------------------------------------
+#
+# Most of build commands in Kbuild start with "cmd_". You can optionally define
+# "quiet_cmd_*". If defined, the short log is printed. Otherwise, no log from
+# that command is printed by default.
+#
+# e.g.)
+# quiet_cmd_depmod = DEPMOD $(MODLIB)
+# cmd_depmod = $(srctree)/scripts/depmod.sh $(DEPMOD) $(KERNELRELEASE)
+#
+# A simple variant is to prefix commands with $(Q) - that's useful
+# for commands that shall be hidden in non-verbose mode.
+#
+# $(Q)$(MAKE) $(build)=scripts/basic
+#
+# To put more focus on warnings, be less verbose as default
+# Use 'make V=1' to see the full commands
+
ifeq ($(V),1)
+ quiet =
Q =
else
- Q = @
+ quiet=quiet_
+ Q=@
endif
+# If the user is running make -s (silent mode), suppress echoing of commands
+# make-4.0 (and later) keep single letter options in the 1st word of MAKEFLAGS.
+ifeq ($(filter 3.%,$(MAKE_VERSION)),)
+short-opts := $(firstword -$(MAKEFLAGS))
+else
+short-opts := $(filter-out --%,$(MAKEFLAGS))
+endif
+
+ifneq ($(findstring s,$(short-opts)),)
+ quiet=silent_
+endif
+
+export quiet Q
+
# Do not use make's built-in rules
# (this improves performance and avoids hard-to-debug behaviour);
MAKEFLAGS += -r
@@ -310,6 +339,7 @@ ifeq ($(filter feature-dump,$(MAKECMDGOALS)),feature-dump)
FEATURE_TESTS := all
endif
endif
+include $(srctree)/tools/perf/scripts/Makefile.syscalls
include Makefile.config
endif
@@ -487,6 +517,9 @@ endif
EXTLIBS := $(call filter-out,$(EXCLUDE_EXTLIBS),$(EXTLIBS))
LIBS = -Wl,--whole-archive $(PERFLIBS) $(EXTRA_PERFLIBS) -Wl,--no-whole-archive -Wl,--start-group $(EXTLIBS) -Wl,--end-group
+PERFLIBS_PY := $(call filter-out,$(LIBPERF_BENCH) $(LIBPERF_TEST),$(PERFLIBS))
+LIBS_PY = -Wl,--whole-archive $(PERFLIBS_PY) $(EXTRA_PERFLIBS) -Wl,--no-whole-archive -Wl,--start-group $(EXTLIBS) -Wl,--end-group
+
export INSTALL SHELL_PATH
### Build rules
@@ -735,9 +768,9 @@ all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS)
# Create python binding output directory if not already present
$(shell [ -d '$(OUTPUT)python' ] || mkdir -p '$(OUTPUT)python')
-$(OUTPUT)python/perf$(PYTHON_EXTENSION_SUFFIX): util/python.c util/setup.py $(PERFLIBS)
+$(OUTPUT)python/perf$(PYTHON_EXTENSION_SUFFIX): util/python.c util/setup.py $(PERFLIBS_PY)
$(QUIET_GEN)LDSHARED="$(CC) -pthread -shared" \
- CFLAGS='$(CFLAGS)' LDFLAGS='$(LDFLAGS) $(LIBS)' \
+ CFLAGS='$(CFLAGS)' LDFLAGS='$(LDFLAGS) $(LIBS_PY)' \
$(PYTHON_WORD) util/setup.py \
--quiet build_ext; \
cp $(PYTHON_EXTBUILD_LIB)perf*.so $(OUTPUT)python/
@@ -1094,11 +1127,6 @@ endif
$(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
$(call QUIET_INSTALL, perf-iostat) \
$(INSTALL) $(OUTPUT)perf-iostat -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
-ifndef NO_LIBAUDIT
- $(call QUIET_INSTALL, strace/groups) \
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(STRACE_GROUPS_INSTDIR_SQ)'; \
- $(INSTALL) trace/strace/groups/* -m 644 -t '$(DESTDIR_SQ)$(STRACE_GROUPS_INSTDIR_SQ)'
-endif
ifndef NO_LIBPERL
$(call QUIET_INSTALL, perl-scripts) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'; \
diff --git a/tools/perf/arch/alpha/entry/syscalls/Kbuild b/tools/perf/arch/alpha/entry/syscalls/Kbuild
new file mode 100644
index 000000000000..9a41e3572c3a
--- /dev/null
+++ b/tools/perf/arch/alpha/entry/syscalls/Kbuild
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscalls_64.h
diff --git a/tools/perf/arch/alpha/entry/syscalls/Makefile.syscalls b/tools/perf/arch/alpha/entry/syscalls/Makefile.syscalls
new file mode 100644
index 000000000000..690168aac34d
--- /dev/null
+++ b/tools/perf/arch/alpha/entry/syscalls/Makefile.syscalls
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+syscall_abis_64 +=
+
+syscalltbl = $(srctree)/tools/perf/arch/alpha/entry/syscalls/syscall.tbl
diff --git a/tools/perf/arch/alpha/entry/syscalls/syscall.tbl b/tools/perf/arch/alpha/entry/syscalls/syscall.tbl
new file mode 100644
index 000000000000..74720667fe09
--- /dev/null
+++ b/tools/perf/arch/alpha/entry/syscalls/syscall.tbl
@@ -0,0 +1,504 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for alpha
+#
+# The format is:
+# <number> <abi> <name> <entry point>
+#
+# The <abi> is always "common" for this file
+#
+0 common osf_syscall alpha_syscall_zero
+1 common exit sys_exit
+2 common fork alpha_fork
+3 common read sys_read
+4 common write sys_write
+5 common osf_old_open sys_ni_syscall
+6 common close sys_close
+7 common osf_wait4 sys_osf_wait4
+8 common osf_old_creat sys_ni_syscall
+9 common link sys_link
+10 common unlink sys_unlink
+11 common osf_execve sys_ni_syscall
+12 common chdir sys_chdir
+13 common fchdir sys_fchdir
+14 common mknod sys_mknod
+15 common chmod sys_chmod
+16 common chown sys_chown
+17 common brk sys_osf_brk
+18 common osf_getfsstat sys_ni_syscall
+19 common lseek sys_lseek
+20 common getxpid sys_getxpid
+21 common osf_mount sys_osf_mount
+22 common umount2 sys_umount
+23 common setuid sys_setuid
+24 common getxuid sys_getxuid
+25 common exec_with_loader sys_ni_syscall
+26 common ptrace sys_ptrace
+27 common osf_nrecvmsg sys_ni_syscall
+28 common osf_nsendmsg sys_ni_syscall
+29 common osf_nrecvfrom sys_ni_syscall
+30 common osf_naccept sys_ni_syscall
+31 common osf_ngetpeername sys_ni_syscall
+32 common osf_ngetsockname sys_ni_syscall
+33 common access sys_access
+34 common osf_chflags sys_ni_syscall
+35 common osf_fchflags sys_ni_syscall
+36 common sync sys_sync
+37 common kill sys_kill
+38 common osf_old_stat sys_ni_syscall
+39 common setpgid sys_setpgid
+40 common osf_old_lstat sys_ni_syscall
+41 common dup sys_dup
+42 common pipe sys_alpha_pipe
+43 common osf_set_program_attributes sys_osf_set_program_attributes
+44 common osf_profil sys_ni_syscall
+45 common open sys_open
+46 common osf_old_sigaction sys_ni_syscall
+47 common getxgid sys_getxgid
+48 common osf_sigprocmask sys_osf_sigprocmask
+49 common osf_getlogin sys_ni_syscall
+50 common osf_setlogin sys_ni_syscall
+51 common acct sys_acct
+52 common sigpending sys_sigpending
+54 common ioctl sys_ioctl
+55 common osf_reboot sys_ni_syscall
+56 common osf_revoke sys_ni_syscall
+57 common symlink sys_symlink
+58 common readlink sys_readlink
+59 common execve sys_execve
+60 common umask sys_umask
+61 common chroot sys_chroot
+62 common osf_old_fstat sys_ni_syscall
+63 common getpgrp sys_getpgrp
+64 common getpagesize sys_getpagesize
+65 common osf_mremap sys_ni_syscall
+66 common vfork alpha_vfork
+67 common stat sys_newstat
+68 common lstat sys_newlstat
+69 common osf_sbrk sys_ni_syscall
+70 common osf_sstk sys_ni_syscall
+71 common mmap sys_osf_mmap
+72 common osf_old_vadvise sys_ni_syscall
+73 common munmap sys_munmap
+74 common mprotect sys_mprotect
+75 common madvise sys_madvise
+76 common vhangup sys_vhangup
+77 common osf_kmodcall sys_ni_syscall
+78 common osf_mincore sys_ni_syscall
+79 common getgroups sys_getgroups
+80 common setgroups sys_setgroups
+81 common osf_old_getpgrp sys_ni_syscall
+82 common setpgrp sys_setpgid
+83 common osf_setitimer compat_sys_setitimer
+84 common osf_old_wait sys_ni_syscall
+85 common osf_table sys_ni_syscall
+86 common osf_getitimer compat_sys_getitimer
+87 common gethostname sys_gethostname
+88 common sethostname sys_sethostname
+89 common getdtablesize sys_getdtablesize
+90 common dup2 sys_dup2
+91 common fstat sys_newfstat
+92 common fcntl sys_fcntl
+93 common osf_select sys_osf_select
+94 common poll sys_poll
+95 common fsync sys_fsync
+96 common setpriority sys_setpriority
+97 common socket sys_socket
+98 common connect sys_connect
+99 common accept sys_accept
+100 common getpriority sys_osf_getpriority
+101 common send sys_send
+102 common recv sys_recv
+103 common sigreturn sys_sigreturn
+104 common bind sys_bind
+105 common setsockopt sys_setsockopt
+106 common listen sys_listen
+107 common osf_plock sys_ni_syscall
+108 common osf_old_sigvec sys_ni_syscall
+109 common osf_old_sigblock sys_ni_syscall
+110 common osf_old_sigsetmask sys_ni_syscall
+111 common sigsuspend sys_sigsuspend
+112 common osf_sigstack sys_osf_sigstack
+113 common recvmsg sys_recvmsg
+114 common sendmsg sys_sendmsg
+115 common osf_old_vtrace sys_ni_syscall
+116 common osf_gettimeofday sys_osf_gettimeofday
+117 common osf_getrusage sys_osf_getrusage
+118 common getsockopt sys_getsockopt
+120 common readv sys_readv
+121 common writev sys_writev
+122 common osf_settimeofday sys_osf_settimeofday
+123 common fchown sys_fchown
+124 common fchmod sys_fchmod
+125 common recvfrom sys_recvfrom
+126 common setreuid sys_setreuid
+127 common setregid sys_setregid
+128 common rename sys_rename
+129 common truncate sys_truncate
+130 common ftruncate sys_ftruncate
+131 common flock sys_flock
+132 common setgid sys_setgid
+133 common sendto sys_sendto
+134 common shutdown sys_shutdown
+135 common socketpair sys_socketpair
+136 common mkdir sys_mkdir
+137 common rmdir sys_rmdir
+138 common osf_utimes sys_osf_utimes
+139 common osf_old_sigreturn sys_ni_syscall
+140 common osf_adjtime sys_ni_syscall
+141 common getpeername sys_getpeername
+142 common osf_gethostid sys_ni_syscall
+143 common osf_sethostid sys_ni_syscall
+144 common getrlimit sys_getrlimit
+145 common setrlimit sys_setrlimit
+146 common osf_old_killpg sys_ni_syscall
+147 common setsid sys_setsid
+148 common quotactl sys_quotactl
+149 common osf_oldquota sys_ni_syscall
+150 common getsockname sys_getsockname
+153 common osf_pid_block sys_ni_syscall
+154 common osf_pid_unblock sys_ni_syscall
+156 common sigaction sys_osf_sigaction
+157 common osf_sigwaitprim sys_ni_syscall
+158 common osf_nfssvc sys_ni_syscall
+159 common osf_getdirentries sys_osf_getdirentries
+160 common osf_statfs sys_osf_statfs
+161 common osf_fstatfs sys_osf_fstatfs
+163 common osf_asynch_daemon sys_ni_syscall
+164 common osf_getfh sys_ni_syscall
+165 common osf_getdomainname sys_osf_getdomainname
+166 common setdomainname sys_setdomainname
+169 common osf_exportfs sys_ni_syscall
+181 common osf_alt_plock sys_ni_syscall
+184 common osf_getmnt sys_ni_syscall
+187 common osf_alt_sigpending sys_ni_syscall
+188 common osf_alt_setsid sys_ni_syscall
+199 common osf_swapon sys_swapon
+200 common msgctl sys_old_msgctl
+201 common msgget sys_msgget
+202 common msgrcv sys_msgrcv
+203 common msgsnd sys_msgsnd
+204 common semctl sys_old_semctl
+205 common semget sys_semget
+206 common semop sys_semop
+207 common osf_utsname sys_osf_utsname
+208 common lchown sys_lchown
+209 common shmat sys_shmat
+210 common shmctl sys_old_shmctl
+211 common shmdt sys_shmdt
+212 common shmget sys_shmget
+213 common osf_mvalid sys_ni_syscall
+214 common osf_getaddressconf sys_ni_syscall
+215 common osf_msleep sys_ni_syscall
+216 common osf_mwakeup sys_ni_syscall
+217 common msync sys_msync
+218 common osf_signal sys_ni_syscall
+219 common osf_utc_gettime sys_ni_syscall
+220 common osf_utc_adjtime sys_ni_syscall
+222 common osf_security sys_ni_syscall
+223 common osf_kloadcall sys_ni_syscall
+224 common osf_stat sys_osf_stat
+225 common osf_lstat sys_osf_lstat
+226 common osf_fstat sys_osf_fstat
+227 common osf_statfs64 sys_osf_statfs64
+228 common osf_fstatfs64 sys_osf_fstatfs64
+233 common getpgid sys_getpgid
+234 common getsid sys_getsid
+235 common sigaltstack sys_sigaltstack
+236 common osf_waitid sys_ni_syscall
+237 common osf_priocntlset sys_ni_syscall
+238 common osf_sigsendset sys_ni_syscall
+239 common osf_set_speculative sys_ni_syscall
+240 common osf_msfs_syscall sys_ni_syscall
+241 common osf_sysinfo sys_osf_sysinfo
+242 common osf_uadmin sys_ni_syscall
+243 common osf_fuser sys_ni_syscall
+244 common osf_proplist_syscall sys_osf_proplist_syscall
+245 common osf_ntp_adjtime sys_ni_syscall
+246 common osf_ntp_gettime sys_ni_syscall
+247 common osf_pathconf sys_ni_syscall
+248 common osf_fpathconf sys_ni_syscall
+250 common osf_uswitch sys_ni_syscall
+251 common osf_usleep_thread sys_osf_usleep_thread
+252 common osf_audcntl sys_ni_syscall
+253 common osf_audgen sys_ni_syscall
+254 common sysfs sys_sysfs
+255 common osf_subsys_info sys_ni_syscall
+256 common osf_getsysinfo sys_osf_getsysinfo
+257 common osf_setsysinfo sys_osf_setsysinfo
+258 common osf_afs_syscall sys_ni_syscall
+259 common osf_swapctl sys_ni_syscall
+260 common osf_memcntl sys_ni_syscall
+261 common osf_fdatasync sys_ni_syscall
+300 common bdflush sys_ni_syscall
+301 common sethae sys_sethae
+302 common mount sys_mount
+303 common old_adjtimex sys_old_adjtimex
+304 common swapoff sys_swapoff
+305 common getdents sys_getdents
+306 common create_module sys_ni_syscall
+307 common init_module sys_init_module
+308 common delete_module sys_delete_module
+309 common get_kernel_syms sys_ni_syscall
+310 common syslog sys_syslog
+311 common reboot sys_reboot
+312 common clone alpha_clone
+313 common uselib sys_uselib
+314 common mlock sys_mlock
+315 common munlock sys_munlock
+316 common mlockall sys_mlockall
+317 common munlockall sys_munlockall
+318 common sysinfo sys_sysinfo
+319 common _sysctl sys_ni_syscall
+# 320 was sys_idle
+321 common oldumount sys_oldumount
+322 common swapon sys_swapon
+323 common times sys_times
+324 common personality sys_personality
+325 common setfsuid sys_setfsuid
+326 common setfsgid sys_setfsgid
+327 common ustat sys_ustat
+328 common statfs sys_statfs
+329 common fstatfs sys_fstatfs
+330 common sched_setparam sys_sched_setparam
+331 common sched_getparam sys_sched_getparam
+332 common sched_setscheduler sys_sched_setscheduler
+333 common sched_getscheduler sys_sched_getscheduler
+334 common sched_yield sys_sched_yield
+335 common sched_get_priority_max sys_sched_get_priority_max
+336 common sched_get_priority_min sys_sched_get_priority_min
+337 common sched_rr_get_interval sys_sched_rr_get_interval
+338 common afs_syscall sys_ni_syscall
+339 common uname sys_newuname
+340 common nanosleep sys_nanosleep
+341 common mremap sys_mremap
+342 common nfsservctl sys_ni_syscall
+343 common setresuid sys_setresuid
+344 common getresuid sys_getresuid
+345 common pciconfig_read sys_pciconfig_read
+346 common pciconfig_write sys_pciconfig_write
+347 common query_module sys_ni_syscall
+348 common prctl sys_prctl
+349 common pread64 sys_pread64
+350 common pwrite64 sys_pwrite64
+351 common rt_sigreturn sys_rt_sigreturn
+352 common rt_sigaction sys_rt_sigaction
+353 common rt_sigprocmask sys_rt_sigprocmask
+354 common rt_sigpending sys_rt_sigpending
+355 common rt_sigtimedwait sys_rt_sigtimedwait
+356 common rt_sigqueueinfo sys_rt_sigqueueinfo
+357 common rt_sigsuspend sys_rt_sigsuspend
+358 common select sys_select
+359 common gettimeofday sys_gettimeofday
+360 common settimeofday sys_settimeofday
+361 common getitimer sys_getitimer
+362 common setitimer sys_setitimer
+363 common utimes sys_utimes
+364 common getrusage sys_getrusage
+365 common wait4 sys_wait4
+366 common adjtimex sys_adjtimex
+367 common getcwd sys_getcwd
+368 common capget sys_capget
+369 common capset sys_capset
+370 common sendfile sys_sendfile64
+371 common setresgid sys_setresgid
+372 common getresgid sys_getresgid
+373 common dipc sys_ni_syscall
+374 common pivot_root sys_pivot_root
+375 common mincore sys_mincore
+376 common pciconfig_iobase sys_pciconfig_iobase
+377 common getdents64 sys_getdents64
+378 common gettid sys_gettid
+379 common readahead sys_readahead
+# 380 is unused
+381 common tkill sys_tkill
+382 common setxattr sys_setxattr
+383 common lsetxattr sys_lsetxattr
+384 common fsetxattr sys_fsetxattr
+385 common getxattr sys_getxattr
+386 common lgetxattr sys_lgetxattr
+387 common fgetxattr sys_fgetxattr
+388 common listxattr sys_listxattr
+389 common llistxattr sys_llistxattr
+390 common flistxattr sys_flistxattr
+391 common removexattr sys_removexattr
+392 common lremovexattr sys_lremovexattr
+393 common fremovexattr sys_fremovexattr
+394 common futex sys_futex
+395 common sched_setaffinity sys_sched_setaffinity
+396 common sched_getaffinity sys_sched_getaffinity
+397 common tuxcall sys_ni_syscall
+398 common io_setup sys_io_setup
+399 common io_destroy sys_io_destroy
+400 common io_getevents sys_io_getevents
+401 common io_submit sys_io_submit
+402 common io_cancel sys_io_cancel
+405 common exit_group sys_exit_group
+406 common lookup_dcookie sys_ni_syscall
+407 common epoll_create sys_epoll_create
+408 common epoll_ctl sys_epoll_ctl
+409 common epoll_wait sys_epoll_wait
+410 common remap_file_pages sys_remap_file_pages
+411 common set_tid_address sys_set_tid_address
+412 common restart_syscall sys_restart_syscall
+413 common fadvise64 sys_fadvise64
+414 common timer_create sys_timer_create
+415 common timer_settime sys_timer_settime
+416 common timer_gettime sys_timer_gettime
+417 common timer_getoverrun sys_timer_getoverrun
+418 common timer_delete sys_timer_delete
+419 common clock_settime sys_clock_settime
+420 common clock_gettime sys_clock_gettime
+421 common clock_getres sys_clock_getres
+422 common clock_nanosleep sys_clock_nanosleep
+423 common semtimedop sys_semtimedop
+424 common tgkill sys_tgkill
+425 common stat64 sys_stat64
+426 common lstat64 sys_lstat64
+427 common fstat64 sys_fstat64
+428 common vserver sys_ni_syscall
+429 common mbind sys_ni_syscall
+430 common get_mempolicy sys_ni_syscall
+431 common set_mempolicy sys_ni_syscall
+432 common mq_open sys_mq_open
+433 common mq_unlink sys_mq_unlink
+434 common mq_timedsend sys_mq_timedsend
+435 common mq_timedreceive sys_mq_timedreceive
+436 common mq_notify sys_mq_notify
+437 common mq_getsetattr sys_mq_getsetattr
+438 common waitid sys_waitid
+439 common add_key sys_add_key
+440 common request_key sys_request_key
+441 common keyctl sys_keyctl
+442 common ioprio_set sys_ioprio_set
+443 common ioprio_get sys_ioprio_get
+444 common inotify_init sys_inotify_init
+445 common inotify_add_watch sys_inotify_add_watch
+446 common inotify_rm_watch sys_inotify_rm_watch
+447 common fdatasync sys_fdatasync
+448 common kexec_load sys_kexec_load
+449 common migrate_pages sys_migrate_pages
+450 common openat sys_openat
+451 common mkdirat sys_mkdirat
+452 common mknodat sys_mknodat
+453 common fchownat sys_fchownat
+454 common futimesat sys_futimesat
+455 common fstatat64 sys_fstatat64
+456 common unlinkat sys_unlinkat
+457 common renameat sys_renameat
+458 common linkat sys_linkat
+459 common symlinkat sys_symlinkat
+460 common readlinkat sys_readlinkat
+461 common fchmodat sys_fchmodat
+462 common faccessat sys_faccessat
+463 common pselect6 sys_pselect6
+464 common ppoll sys_ppoll
+465 common unshare sys_unshare
+466 common set_robust_list sys_set_robust_list
+467 common get_robust_list sys_get_robust_list
+468 common splice sys_splice
+469 common sync_file_range sys_sync_file_range
+470 common tee sys_tee
+471 common vmsplice sys_vmsplice
+472 common move_pages sys_move_pages
+473 common getcpu sys_getcpu
+474 common epoll_pwait sys_epoll_pwait
+475 common utimensat sys_utimensat
+476 common signalfd sys_signalfd
+477 common timerfd sys_ni_syscall
+478 common eventfd sys_eventfd
+479 common recvmmsg sys_recvmmsg
+480 common fallocate sys_fallocate
+481 common timerfd_create sys_timerfd_create
+482 common timerfd_settime sys_timerfd_settime
+483 common timerfd_gettime sys_timerfd_gettime
+484 common signalfd4 sys_signalfd4
+485 common eventfd2 sys_eventfd2
+486 common epoll_create1 sys_epoll_create1
+487 common dup3 sys_dup3
+488 common pipe2 sys_pipe2
+489 common inotify_init1 sys_inotify_init1
+490 common preadv sys_preadv
+491 common pwritev sys_pwritev
+492 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
+493 common perf_event_open sys_perf_event_open
+494 common fanotify_init sys_fanotify_init
+495 common fanotify_mark sys_fanotify_mark
+496 common prlimit64 sys_prlimit64
+497 common name_to_handle_at sys_name_to_handle_at
+498 common open_by_handle_at sys_open_by_handle_at
+499 common clock_adjtime sys_clock_adjtime
+500 common syncfs sys_syncfs
+501 common setns sys_setns
+502 common accept4 sys_accept4
+503 common sendmmsg sys_sendmmsg
+504 common process_vm_readv sys_process_vm_readv
+505 common process_vm_writev sys_process_vm_writev
+506 common kcmp sys_kcmp
+507 common finit_module sys_finit_module
+508 common sched_setattr sys_sched_setattr
+509 common sched_getattr sys_sched_getattr
+510 common renameat2 sys_renameat2
+511 common getrandom sys_getrandom
+512 common memfd_create sys_memfd_create
+513 common execveat sys_execveat
+514 common seccomp sys_seccomp
+515 common bpf sys_bpf
+516 common userfaultfd sys_userfaultfd
+517 common membarrier sys_membarrier
+518 common mlock2 sys_mlock2
+519 common copy_file_range sys_copy_file_range
+520 common preadv2 sys_preadv2
+521 common pwritev2 sys_pwritev2
+522 common statx sys_statx
+523 common io_pgetevents sys_io_pgetevents
+524 common pkey_mprotect sys_pkey_mprotect
+525 common pkey_alloc sys_pkey_alloc
+526 common pkey_free sys_pkey_free
+527 common rseq sys_rseq
+528 common statfs64 sys_statfs64
+529 common fstatfs64 sys_fstatfs64
+530 common getegid sys_getegid
+531 common geteuid sys_geteuid
+532 common getppid sys_getppid
+# all other architectures have common numbers for new syscall, alpha
+# is the exception.
+534 common pidfd_send_signal sys_pidfd_send_signal
+535 common io_uring_setup sys_io_uring_setup
+536 common io_uring_enter sys_io_uring_enter
+537 common io_uring_register sys_io_uring_register
+538 common open_tree sys_open_tree
+539 common move_mount sys_move_mount
+540 common fsopen sys_fsopen
+541 common fsconfig sys_fsconfig
+542 common fsmount sys_fsmount
+543 common fspick sys_fspick
+544 common pidfd_open sys_pidfd_open
+545 common clone3 alpha_clone3
+546 common close_range sys_close_range
+547 common openat2 sys_openat2
+548 common pidfd_getfd sys_pidfd_getfd
+549 common faccessat2 sys_faccessat2
+550 common process_madvise sys_process_madvise
+551 common epoll_pwait2 sys_epoll_pwait2
+552 common mount_setattr sys_mount_setattr
+553 common quotactl_fd sys_quotactl_fd
+554 common landlock_create_ruleset sys_landlock_create_ruleset
+555 common landlock_add_rule sys_landlock_add_rule
+556 common landlock_restrict_self sys_landlock_restrict_self
+# 557 reserved for memfd_secret
+558 common process_mrelease sys_process_mrelease
+559 common futex_waitv sys_futex_waitv
+560 common set_mempolicy_home_node sys_ni_syscall
+561 common cachestat sys_cachestat
+562 common fchmodat2 sys_fchmodat2
+563 common map_shadow_stack sys_map_shadow_stack
+564 common futex_wake sys_futex_wake
+565 common futex_wait sys_futex_wait
+566 common futex_requeue sys_futex_requeue
+567 common statmount sys_statmount
+568 common listmount sys_listmount
+569 common lsm_get_self_attr sys_lsm_get_self_attr
+570 common lsm_set_self_attr sys_lsm_set_self_attr
+571 common lsm_list_modules sys_lsm_list_modules
+572 common mseal sys_mseal
diff --git a/tools/perf/arch/alpha/include/syscall_table.h b/tools/perf/arch/alpha/include/syscall_table.h
new file mode 100644
index 000000000000..b53e31c15805
--- /dev/null
+++ b/tools/perf/arch/alpha/include/syscall_table.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/syscalls_64.h>
diff --git a/tools/perf/arch/arc/entry/syscalls/Kbuild b/tools/perf/arch/arc/entry/syscalls/Kbuild
new file mode 100644
index 000000000000..11707c481a24
--- /dev/null
+++ b/tools/perf/arch/arc/entry/syscalls/Kbuild
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscalls_32.h
diff --git a/tools/perf/arch/arc/entry/syscalls/Makefile.syscalls b/tools/perf/arch/arc/entry/syscalls/Makefile.syscalls
new file mode 100644
index 000000000000..391d30ab7a83
--- /dev/null
+++ b/tools/perf/arch/arc/entry/syscalls/Makefile.syscalls
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+syscall_abis_32 += arc time32 renameat stat64 rlimit
diff --git a/tools/perf/arch/arc/include/syscall_table.h b/tools/perf/arch/arc/include/syscall_table.h
new file mode 100644
index 000000000000..4c942821662d
--- /dev/null
+++ b/tools/perf/arch/arc/include/syscall_table.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/syscalls_32.h>
diff --git a/tools/perf/arch/arm/entry/syscalls/Kbuild b/tools/perf/arch/arm/entry/syscalls/Kbuild
new file mode 100644
index 000000000000..9d777540f089
--- /dev/null
+++ b/tools/perf/arch/arm/entry/syscalls/Kbuild
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+syscall_abis_32 += oabi
+syscalltbl = $(srctree)/tools/perf/arch/arm/entry/syscalls/syscall.tbl
diff --git a/tools/perf/arch/arm/entry/syscalls/Makefile.syscalls b/tools/perf/arch/arm/entry/syscalls/Makefile.syscalls
new file mode 100644
index 000000000000..11707c481a24
--- /dev/null
+++ b/tools/perf/arch/arm/entry/syscalls/Makefile.syscalls
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscalls_32.h
diff --git a/tools/perf/arch/arm/entry/syscalls/syscall.tbl b/tools/perf/arch/arm/entry/syscalls/syscall.tbl
new file mode 100644
index 000000000000..49eeb2ad8dbd
--- /dev/null
+++ b/tools/perf/arch/arm/entry/syscalls/syscall.tbl
@@ -0,0 +1,483 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# Linux system call numbers and entry vectors
+#
+# The format is:
+# <num> <abi> <name> [<entry point> [<oabi compat entry point>]]
+#
+# Where abi is:
+# common - for system calls shared between oabi and eabi (may have compat)
+# oabi - for oabi-only system calls (may have compat)
+# eabi - for eabi-only system calls
+#
+# For each syscall number, "common" is mutually exclusive with oabi and eabi
+#
+0 common restart_syscall sys_restart_syscall
+1 common exit sys_exit
+2 common fork sys_fork
+3 common read sys_read
+4 common write sys_write
+5 common open sys_open
+6 common close sys_close
+# 7 was sys_waitpid
+8 common creat sys_creat
+9 common link sys_link
+10 common unlink sys_unlink
+11 common execve sys_execve
+12 common chdir sys_chdir
+13 oabi time sys_time32
+14 common mknod sys_mknod
+15 common chmod sys_chmod
+16 common lchown sys_lchown16
+# 17 was sys_break
+# 18 was sys_stat
+19 common lseek sys_lseek
+20 common getpid sys_getpid
+21 common mount sys_mount
+22 oabi umount sys_oldumount
+23 common setuid sys_setuid16
+24 common getuid sys_getuid16
+25 oabi stime sys_stime32
+26 common ptrace sys_ptrace
+27 oabi alarm sys_alarm
+# 28 was sys_fstat
+29 common pause sys_pause
+30 oabi utime sys_utime32
+# 31 was sys_stty
+# 32 was sys_gtty
+33 common access sys_access
+34 common nice sys_nice
+# 35 was sys_ftime
+36 common sync sys_sync
+37 common kill sys_kill
+38 common rename sys_rename
+39 common mkdir sys_mkdir
+40 common rmdir sys_rmdir
+41 common dup sys_dup
+42 common pipe sys_pipe
+43 common times sys_times
+# 44 was sys_prof
+45 common brk sys_brk
+46 common setgid sys_setgid16
+47 common getgid sys_getgid16
+# 48 was sys_signal
+49 common geteuid sys_geteuid16
+50 common getegid sys_getegid16
+51 common acct sys_acct
+52 common umount2 sys_umount
+# 53 was sys_lock
+54 common ioctl sys_ioctl
+55 common fcntl sys_fcntl
+# 56 was sys_mpx
+57 common setpgid sys_setpgid
+# 58 was sys_ulimit
+# 59 was sys_olduname
+60 common umask sys_umask
+61 common chroot sys_chroot
+62 common ustat sys_ustat
+63 common dup2 sys_dup2
+64 common getppid sys_getppid
+65 common getpgrp sys_getpgrp
+66 common setsid sys_setsid
+67 common sigaction sys_sigaction
+# 68 was sys_sgetmask
+# 69 was sys_ssetmask
+70 common setreuid sys_setreuid16
+71 common setregid sys_setregid16
+72 common sigsuspend sys_sigsuspend
+73 common sigpending sys_sigpending
+74 common sethostname sys_sethostname
+75 common setrlimit sys_setrlimit
+# Back compat 2GB limited rlimit
+76 oabi getrlimit sys_old_getrlimit
+77 common getrusage sys_getrusage
+78 common gettimeofday sys_gettimeofday
+79 common settimeofday sys_settimeofday
+80 common getgroups sys_getgroups16
+81 common setgroups sys_setgroups16
+82 oabi select sys_old_select
+83 common symlink sys_symlink
+# 84 was sys_lstat
+85 common readlink sys_readlink
+86 common uselib sys_uselib
+87 common swapon sys_swapon
+88 common reboot sys_reboot
+89 oabi readdir sys_old_readdir
+90 oabi mmap sys_old_mmap
+91 common munmap sys_munmap
+92 common truncate sys_truncate
+93 common ftruncate sys_ftruncate
+94 common fchmod sys_fchmod
+95 common fchown sys_fchown16
+96 common getpriority sys_getpriority
+97 common setpriority sys_setpriority
+# 98 was sys_profil
+99 common statfs sys_statfs
+100 common fstatfs sys_fstatfs
+# 101 was sys_ioperm
+102 oabi socketcall sys_socketcall sys_oabi_socketcall
+103 common syslog sys_syslog
+104 common setitimer sys_setitimer
+105 common getitimer sys_getitimer
+106 common stat sys_newstat
+107 common lstat sys_newlstat
+108 common fstat sys_newfstat
+# 109 was sys_uname
+# 110 was sys_iopl
+111 common vhangup sys_vhangup
+# 112 was sys_idle
+# syscall to call a syscall!
+113 oabi syscall sys_syscall
+114 common wait4 sys_wait4
+115 common swapoff sys_swapoff
+116 common sysinfo sys_sysinfo
+117 oabi ipc sys_ipc sys_oabi_ipc
+118 common fsync sys_fsync
+119 common sigreturn sys_sigreturn_wrapper
+120 common clone sys_clone
+121 common setdomainname sys_setdomainname
+122 common uname sys_newuname
+# 123 was sys_modify_ldt
+124 common adjtimex sys_adjtimex_time32
+125 common mprotect sys_mprotect
+126 common sigprocmask sys_sigprocmask
+# 127 was sys_create_module
+128 common init_module sys_init_module
+129 common delete_module sys_delete_module
+# 130 was sys_get_kernel_syms
+131 common quotactl sys_quotactl
+132 common getpgid sys_getpgid
+133 common fchdir sys_fchdir
+134 common bdflush sys_ni_syscall
+135 common sysfs sys_sysfs
+136 common personality sys_personality
+# 137 was sys_afs_syscall
+138 common setfsuid sys_setfsuid16
+139 common setfsgid sys_setfsgid16
+140 common _llseek sys_llseek
+141 common getdents sys_getdents
+142 common _newselect sys_select
+143 common flock sys_flock
+144 common msync sys_msync
+145 common readv sys_readv
+146 common writev sys_writev
+147 common getsid sys_getsid
+148 common fdatasync sys_fdatasync
+149 common _sysctl sys_ni_syscall
+150 common mlock sys_mlock
+151 common munlock sys_munlock
+152 common mlockall sys_mlockall
+153 common munlockall sys_munlockall
+154 common sched_setparam sys_sched_setparam
+155 common sched_getparam sys_sched_getparam
+156 common sched_setscheduler sys_sched_setscheduler
+157 common sched_getscheduler sys_sched_getscheduler
+158 common sched_yield sys_sched_yield
+159 common sched_get_priority_max sys_sched_get_priority_max
+160 common sched_get_priority_min sys_sched_get_priority_min
+161 common sched_rr_get_interval sys_sched_rr_get_interval_time32
+162 common nanosleep sys_nanosleep_time32
+163 common mremap sys_mremap
+164 common setresuid sys_setresuid16
+165 common getresuid sys_getresuid16
+# 166 was sys_vm86
+# 167 was sys_query_module
+168 common poll sys_poll
+169 common nfsservctl
+170 common setresgid sys_setresgid16
+171 common getresgid sys_getresgid16
+172 common prctl sys_prctl
+173 common rt_sigreturn sys_rt_sigreturn_wrapper
+174 common rt_sigaction sys_rt_sigaction
+175 common rt_sigprocmask sys_rt_sigprocmask
+176 common rt_sigpending sys_rt_sigpending
+177 common rt_sigtimedwait sys_rt_sigtimedwait_time32
+178 common rt_sigqueueinfo sys_rt_sigqueueinfo
+179 common rt_sigsuspend sys_rt_sigsuspend
+180 common pread64 sys_pread64 sys_oabi_pread64
+181 common pwrite64 sys_pwrite64 sys_oabi_pwrite64
+182 common chown sys_chown16
+183 common getcwd sys_getcwd
+184 common capget sys_capget
+185 common capset sys_capset
+186 common sigaltstack sys_sigaltstack
+187 common sendfile sys_sendfile
+# 188 reserved
+# 189 reserved
+190 common vfork sys_vfork
+# SuS compliant getrlimit
+191 common ugetrlimit sys_getrlimit
+192 common mmap2 sys_mmap2
+193 common truncate64 sys_truncate64 sys_oabi_truncate64
+194 common ftruncate64 sys_ftruncate64 sys_oabi_ftruncate64
+195 common stat64 sys_stat64 sys_oabi_stat64
+196 common lstat64 sys_lstat64 sys_oabi_lstat64
+197 common fstat64 sys_fstat64 sys_oabi_fstat64
+198 common lchown32 sys_lchown
+199 common getuid32 sys_getuid
+200 common getgid32 sys_getgid
+201 common geteuid32 sys_geteuid
+202 common getegid32 sys_getegid
+203 common setreuid32 sys_setreuid
+204 common setregid32 sys_setregid
+205 common getgroups32 sys_getgroups
+206 common setgroups32 sys_setgroups
+207 common fchown32 sys_fchown
+208 common setresuid32 sys_setresuid
+209 common getresuid32 sys_getresuid
+210 common setresgid32 sys_setresgid
+211 common getresgid32 sys_getresgid
+212 common chown32 sys_chown
+213 common setuid32 sys_setuid
+214 common setgid32 sys_setgid
+215 common setfsuid32 sys_setfsuid
+216 common setfsgid32 sys_setfsgid
+217 common getdents64 sys_getdents64
+218 common pivot_root sys_pivot_root
+219 common mincore sys_mincore
+220 common madvise sys_madvise
+221 common fcntl64 sys_fcntl64 sys_oabi_fcntl64
+# 222 for tux
+# 223 is unused
+224 common gettid sys_gettid
+225 common readahead sys_readahead sys_oabi_readahead
+226 common setxattr sys_setxattr
+227 common lsetxattr sys_lsetxattr
+228 common fsetxattr sys_fsetxattr
+229 common getxattr sys_getxattr
+230 common lgetxattr sys_lgetxattr
+231 common fgetxattr sys_fgetxattr
+232 common listxattr sys_listxattr
+233 common llistxattr sys_llistxattr
+234 common flistxattr sys_flistxattr
+235 common removexattr sys_removexattr
+236 common lremovexattr sys_lremovexattr
+237 common fremovexattr sys_fremovexattr
+238 common tkill sys_tkill
+239 common sendfile64 sys_sendfile64
+240 common futex sys_futex_time32
+241 common sched_setaffinity sys_sched_setaffinity
+242 common sched_getaffinity sys_sched_getaffinity
+243 common io_setup sys_io_setup
+244 common io_destroy sys_io_destroy
+245 common io_getevents sys_io_getevents_time32
+246 common io_submit sys_io_submit
+247 common io_cancel sys_io_cancel
+248 common exit_group sys_exit_group
+249 common lookup_dcookie sys_ni_syscall
+250 common epoll_create sys_epoll_create
+251 common epoll_ctl sys_epoll_ctl sys_oabi_epoll_ctl
+252 common epoll_wait sys_epoll_wait
+253 common remap_file_pages sys_remap_file_pages
+# 254 for set_thread_area
+# 255 for get_thread_area
+256 common set_tid_address sys_set_tid_address
+257 common timer_create sys_timer_create
+258 common timer_settime sys_timer_settime32
+259 common timer_gettime sys_timer_gettime32
+260 common timer_getoverrun sys_timer_getoverrun
+261 common timer_delete sys_timer_delete
+262 common clock_settime sys_clock_settime32
+263 common clock_gettime sys_clock_gettime32
+264 common clock_getres sys_clock_getres_time32
+265 common clock_nanosleep sys_clock_nanosleep_time32
+266 common statfs64 sys_statfs64_wrapper
+267 common fstatfs64 sys_fstatfs64_wrapper
+268 common tgkill sys_tgkill
+269 common utimes sys_utimes_time32
+270 common arm_fadvise64_64 sys_arm_fadvise64_64
+271 common pciconfig_iobase sys_pciconfig_iobase
+272 common pciconfig_read sys_pciconfig_read
+273 common pciconfig_write sys_pciconfig_write
+274 common mq_open sys_mq_open
+275 common mq_unlink sys_mq_unlink
+276 common mq_timedsend sys_mq_timedsend_time32
+277 common mq_timedreceive sys_mq_timedreceive_time32
+278 common mq_notify sys_mq_notify
+279 common mq_getsetattr sys_mq_getsetattr
+280 common waitid sys_waitid
+281 common socket sys_socket
+282 common bind sys_bind sys_oabi_bind
+283 common connect sys_connect sys_oabi_connect
+284 common listen sys_listen
+285 common accept sys_accept
+286 common getsockname sys_getsockname
+287 common getpeername sys_getpeername
+288 common socketpair sys_socketpair
+289 common send sys_send
+290 common sendto sys_sendto sys_oabi_sendto
+291 common recv sys_recv
+292 common recvfrom sys_recvfrom
+293 common shutdown sys_shutdown
+294 common setsockopt sys_setsockopt
+295 common getsockopt sys_getsockopt
+296 common sendmsg sys_sendmsg sys_oabi_sendmsg
+297 common recvmsg sys_recvmsg
+298 common semop sys_semop sys_oabi_semop
+299 common semget sys_semget
+300 common semctl sys_old_semctl
+301 common msgsnd sys_msgsnd
+302 common msgrcv sys_msgrcv
+303 common msgget sys_msgget
+304 common msgctl sys_old_msgctl
+305 common shmat sys_shmat
+306 common shmdt sys_shmdt
+307 common shmget sys_shmget
+308 common shmctl sys_old_shmctl
+309 common add_key sys_add_key
+310 common request_key sys_request_key
+311 common keyctl sys_keyctl
+312 common semtimedop sys_semtimedop_time32 sys_oabi_semtimedop
+313 common vserver
+314 common ioprio_set sys_ioprio_set
+315 common ioprio_get sys_ioprio_get
+316 common inotify_init sys_inotify_init
+317 common inotify_add_watch sys_inotify_add_watch
+318 common inotify_rm_watch sys_inotify_rm_watch
+319 common mbind sys_mbind
+320 common get_mempolicy sys_get_mempolicy
+321 common set_mempolicy sys_set_mempolicy
+322 common openat sys_openat
+323 common mkdirat sys_mkdirat
+324 common mknodat sys_mknodat
+325 common fchownat sys_fchownat
+326 common futimesat sys_futimesat_time32
+327 common fstatat64 sys_fstatat64 sys_oabi_fstatat64
+328 common unlinkat sys_unlinkat
+329 common renameat sys_renameat
+330 common linkat sys_linkat
+331 common symlinkat sys_symlinkat
+332 common readlinkat sys_readlinkat
+333 common fchmodat sys_fchmodat
+334 common faccessat sys_faccessat
+335 common pselect6 sys_pselect6_time32
+336 common ppoll sys_ppoll_time32
+337 common unshare sys_unshare
+338 common set_robust_list sys_set_robust_list
+339 common get_robust_list sys_get_robust_list
+340 common splice sys_splice
+341 common arm_sync_file_range sys_sync_file_range2
+342 common tee sys_tee
+343 common vmsplice sys_vmsplice
+344 common move_pages sys_move_pages
+345 common getcpu sys_getcpu
+346 common epoll_pwait sys_epoll_pwait
+347 common kexec_load sys_kexec_load
+348 common utimensat sys_utimensat_time32
+349 common signalfd sys_signalfd
+350 common timerfd_create sys_timerfd_create
+351 common eventfd sys_eventfd
+352 common fallocate sys_fallocate
+353 common timerfd_settime sys_timerfd_settime32
+354 common timerfd_gettime sys_timerfd_gettime32
+355 common signalfd4 sys_signalfd4
+356 common eventfd2 sys_eventfd2
+357 common epoll_create1 sys_epoll_create1
+358 common dup3 sys_dup3
+359 common pipe2 sys_pipe2
+360 common inotify_init1 sys_inotify_init1
+361 common preadv sys_preadv
+362 common pwritev sys_pwritev
+363 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
+364 common perf_event_open sys_perf_event_open
+365 common recvmmsg sys_recvmmsg_time32
+366 common accept4 sys_accept4
+367 common fanotify_init sys_fanotify_init
+368 common fanotify_mark sys_fanotify_mark
+369 common prlimit64 sys_prlimit64
+370 common name_to_handle_at sys_name_to_handle_at
+371 common open_by_handle_at sys_open_by_handle_at
+372 common clock_adjtime sys_clock_adjtime32
+373 common syncfs sys_syncfs
+374 common sendmmsg sys_sendmmsg
+375 common setns sys_setns
+376 common process_vm_readv sys_process_vm_readv
+377 common process_vm_writev sys_process_vm_writev
+378 common kcmp sys_kcmp
+379 common finit_module sys_finit_module
+380 common sched_setattr sys_sched_setattr
+381 common sched_getattr sys_sched_getattr
+382 common renameat2 sys_renameat2
+383 common seccomp sys_seccomp
+384 common getrandom sys_getrandom
+385 common memfd_create sys_memfd_create
+386 common bpf sys_bpf
+387 common execveat sys_execveat
+388 common userfaultfd sys_userfaultfd
+389 common membarrier sys_membarrier
+390 common mlock2 sys_mlock2
+391 common copy_file_range sys_copy_file_range
+392 common preadv2 sys_preadv2
+393 common pwritev2 sys_pwritev2
+394 common pkey_mprotect sys_pkey_mprotect
+395 common pkey_alloc sys_pkey_alloc
+396 common pkey_free sys_pkey_free
+397 common statx sys_statx
+398 common rseq sys_rseq
+399 common io_pgetevents sys_io_pgetevents_time32
+400 common migrate_pages sys_migrate_pages
+401 common kexec_file_load sys_kexec_file_load
+# 402 is unused
+403 common clock_gettime64 sys_clock_gettime
+404 common clock_settime64 sys_clock_settime
+405 common clock_adjtime64 sys_clock_adjtime
+406 common clock_getres_time64 sys_clock_getres
+407 common clock_nanosleep_time64 sys_clock_nanosleep
+408 common timer_gettime64 sys_timer_gettime
+409 common timer_settime64 sys_timer_settime
+410 common timerfd_gettime64 sys_timerfd_gettime
+411 common timerfd_settime64 sys_timerfd_settime
+412 common utimensat_time64 sys_utimensat
+413 common pselect6_time64 sys_pselect6
+414 common ppoll_time64 sys_ppoll
+416 common io_pgetevents_time64 sys_io_pgetevents
+417 common recvmmsg_time64 sys_recvmmsg
+418 common mq_timedsend_time64 sys_mq_timedsend
+419 common mq_timedreceive_time64 sys_mq_timedreceive
+420 common semtimedop_time64 sys_semtimedop
+421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait
+422 common futex_time64 sys_futex
+423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
+434 common pidfd_open sys_pidfd_open
+435 common clone3 sys_clone3
+436 common close_range sys_close_range
+437 common openat2 sys_openat2
+438 common pidfd_getfd sys_pidfd_getfd
+439 common faccessat2 sys_faccessat2
+440 common process_madvise sys_process_madvise
+441 common epoll_pwait2 sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
+463 common setxattrat sys_setxattrat
+464 common getxattrat sys_getxattrat
+465 common listxattrat sys_listxattrat
+466 common removexattrat sys_removexattrat
diff --git a/tools/perf/arch/arm/include/syscall_table.h b/tools/perf/arch/arm/include/syscall_table.h
new file mode 100644
index 000000000000..4c942821662d
--- /dev/null
+++ b/tools/perf/arch/arm/include/syscall_table.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/syscalls_32.h>
diff --git a/tools/perf/arch/arm64/Makefile b/tools/perf/arch/arm64/Makefile
index 91570d5d428e..087e099fb453 100644
--- a/tools/perf/arch/arm64/Makefile
+++ b/tools/perf/arch/arm64/Makefile
@@ -1,25 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
PERF_HAVE_JITDUMP := 1
HAVE_KVM_STAT_SUPPORT := 1
-
-#
-# Syscall table generation for perf
-#
-
-out := $(OUTPUT)arch/arm64/include/generated/asm
-header := $(out)/syscalls.c
-incpath := $(srctree)/tools
-sysdef := $(srctree)/tools/arch/arm64/include/uapi/asm/unistd.h
-sysprf := $(srctree)/tools/perf/arch/arm64/entry/syscalls/
-systbl := $(sysprf)/mksyscalltbl
-
-# Create output directory if not already present
-$(shell [ -d '$(out)' ] || mkdir -p '$(out)')
-
-$(header): $(sysdef) $(systbl)
- $(Q)$(SHELL) '$(systbl)' '$(CC)' '$(HOSTCC)' $(incpath) $(sysdef) > $@
-
-clean::
- $(call QUIET_CLEAN, arm64) $(RM) $(header)
-
-archheaders: $(header)
diff --git a/tools/perf/arch/arm64/entry/syscalls/Kbuild b/tools/perf/arch/arm64/entry/syscalls/Kbuild
new file mode 100644
index 000000000000..84c6599b4ea6
--- /dev/null
+++ b/tools/perf/arch/arm64/entry/syscalls/Kbuild
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscalls_32.h
+syscall-y += syscalls_64.h
diff --git a/tools/perf/arch/arm64/entry/syscalls/Makefile.syscalls b/tools/perf/arch/arm64/entry/syscalls/Makefile.syscalls
new file mode 100644
index 000000000000..e7e78c2d1c02
--- /dev/null
+++ b/tools/perf/arch/arm64/entry/syscalls/Makefile.syscalls
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+
+syscall_abis_32 +=
+syscall_abis_64 += renameat rlimit memfd_secret
+
+syscalltbl = $(srctree)/tools/perf/arch/arm64/entry/syscalls/syscall_%.tbl
diff --git a/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl b/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl
deleted file mode 100755
index 27d747c92d44..000000000000
--- a/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-#
-# Generate system call table for perf. Derived from
-# powerpc script.
-#
-# Copyright IBM Corp. 2017
-# Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
-# Changed by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
-# Changed by: Kim Phillips <kim.phillips@arm.com>
-
-gcc=$1
-hostcc=$2
-incpath=$3
-input=$4
-
-if ! test -r $input; then
- echo "Could not read input file" >&2
- exit 1
-fi
-
-create_sc_table()
-{
- local sc nr max_nr
-
- while read sc nr; do
- printf "%s\n" " [$nr] = \"$sc\","
- max_nr=$nr
- done
-
- echo "#define SYSCALLTBL_ARM64_MAX_ID $max_nr"
-}
-
-create_table()
-{
- echo "#include \"$input\""
- echo "static const char *const syscalltbl_arm64[] = {"
- create_sc_table
- echo "};"
-}
-
-$gcc -E -dM -x c -I $incpath/include/uapi $input \
- |awk '$2 ~ "__NR" && $3 !~ "__NR3264_" {
- sub("^#define __NR(3264)?_", "");
- print | "sort -k2 -n"}' \
- |create_table
diff --git a/tools/perf/arch/arm64/entry/syscalls/syscall_32.tbl b/tools/perf/arch/arm64/entry/syscalls/syscall_32.tbl
new file mode 100644
index 000000000000..9a37930d4e26
--- /dev/null
+++ b/tools/perf/arch/arm64/entry/syscalls/syscall_32.tbl
@@ -0,0 +1,476 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# AArch32 (compat) system call definitions.
+#
+# Copyright (C) 2001-2005 Russell King
+# Copyright (C) 2012 ARM Ltd.
+#
+# This file corresponds to arch/arm/tools/syscall.tbl
+# for the native EABI syscalls and should be kept in sync
+# Instead of the OABI syscalls, it contains pointers to
+# the compat entry points where they differ from the native
+# syscalls.
+#
+0 common restart_syscall sys_restart_syscall
+1 common exit sys_exit
+2 common fork sys_fork
+3 common read sys_read
+4 common write sys_write
+5 common open sys_open compat_sys_open
+6 common close sys_close
+# 7 was sys_waitpid
+8 common creat sys_creat
+9 common link sys_link
+10 common unlink sys_unlink
+11 common execve sys_execve compat_sys_execve
+12 common chdir sys_chdir
+# 13 was sys_time
+14 common mknod sys_mknod
+15 common chmod sys_chmod
+16 common lchown sys_lchown16
+# 17 was sys_break
+# 18 was sys_stat
+19 common lseek sys_lseek compat_sys_lseek
+20 common getpid sys_getpid
+21 common mount sys_mount
+# 22 was sys_umount
+23 common setuid sys_setuid16
+24 common getuid sys_getuid16
+# 25 was sys_stime
+26 common ptrace sys_ptrace compat_sys_ptrace
+# 27 was sys_alarm
+# 28 was sys_fstat
+29 common pause sys_pause
+# 30 was sys_utime
+# 31 was sys_stty
+# 32 was sys_gtty
+33 common access sys_access
+34 common nice sys_nice
+# 35 was sys_ftime
+36 common sync sys_sync
+37 common kill sys_kill
+38 common rename sys_rename
+39 common mkdir sys_mkdir
+40 common rmdir sys_rmdir
+41 common dup sys_dup
+42 common pipe sys_pipe
+43 common times sys_times compat_sys_times
+# 44 was sys_prof
+45 common brk sys_brk
+46 common setgid sys_setgid16
+47 common getgid sys_getgid16
+# 48 was sys_signal
+49 common geteuid sys_geteuid16
+50 common getegid sys_getegid16
+51 common acct sys_acct
+52 common umount2 sys_umount
+# 53 was sys_lock
+54 common ioctl sys_ioctl compat_sys_ioctl
+55 common fcntl sys_fcntl compat_sys_fcntl
+# 56 was sys_mpx
+57 common setpgid sys_setpgid
+# 58 was sys_ulimit
+# 59 was sys_olduname
+60 common umask sys_umask
+61 common chroot sys_chroot
+62 common ustat sys_ustat compat_sys_ustat
+63 common dup2 sys_dup2
+64 common getppid sys_getppid
+65 common getpgrp sys_getpgrp
+66 common setsid sys_setsid
+67 common sigaction sys_sigaction compat_sys_sigaction
+# 68 was sys_sgetmask
+# 69 was sys_ssetmask
+70 common setreuid sys_setreuid16
+71 common setregid sys_setregid16
+72 common sigsuspend sys_sigsuspend
+73 common sigpending sys_sigpending compat_sys_sigpending
+74 common sethostname sys_sethostname
+75 common setrlimit sys_setrlimit compat_sys_setrlimit
+# 76 was compat_sys_getrlimit
+77 common getrusage sys_getrusage compat_sys_getrusage
+78 common gettimeofday sys_gettimeofday compat_sys_gettimeofday
+79 common settimeofday sys_settimeofday compat_sys_settimeofday
+80 common getgroups sys_getgroups16
+81 common setgroups sys_setgroups16
+# 82 was compat_sys_select
+83 common symlink sys_symlink
+# 84 was sys_lstat
+85 common readlink sys_readlink
+86 common uselib sys_uselib
+87 common swapon sys_swapon
+88 common reboot sys_reboot
+# 89 was sys_readdir
+# 90 was sys_mmap
+91 common munmap sys_munmap
+92 common truncate sys_truncate compat_sys_truncate
+93 common ftruncate sys_ftruncate compat_sys_ftruncate
+94 common fchmod sys_fchmod
+95 common fchown sys_fchown16
+96 common getpriority sys_getpriority
+97 common setpriority sys_setpriority
+# 98 was sys_profil
+99 common statfs sys_statfs compat_sys_statfs
+100 common fstatfs sys_fstatfs compat_sys_fstatfs
+# 101 was sys_ioperm
+# 102 was sys_socketcall
+103 common syslog sys_syslog
+104 common setitimer sys_setitimer compat_sys_setitimer
+105 common getitimer sys_getitimer compat_sys_getitimer
+106 common stat sys_newstat compat_sys_newstat
+107 common lstat sys_newlstat compat_sys_newlstat
+108 common fstat sys_newfstat compat_sys_newfstat
+# 109 was sys_uname
+# 110 was sys_iopl
+111 common vhangup sys_vhangup
+# 112 was sys_idle
+# 113 was sys_syscall
+114 common wait4 sys_wait4 compat_sys_wait4
+115 common swapoff sys_swapoff
+116 common sysinfo sys_sysinfo compat_sys_sysinfo
+# 117 was sys_ipc
+118 common fsync sys_fsync
+119 common sigreturn sys_sigreturn_wrapper compat_sys_sigreturn
+120 common clone sys_clone
+121 common setdomainname sys_setdomainname
+122 common uname sys_newuname
+# 123 was sys_modify_ldt
+124 common adjtimex sys_adjtimex_time32
+125 common mprotect sys_mprotect
+126 common sigprocmask sys_sigprocmask compat_sys_sigprocmask
+# 127 was sys_create_module
+128 common init_module sys_init_module
+129 common delete_module sys_delete_module
+# 130 was sys_get_kernel_syms
+131 common quotactl sys_quotactl
+132 common getpgid sys_getpgid
+133 common fchdir sys_fchdir
+134 common bdflush sys_ni_syscall
+135 common sysfs sys_sysfs
+136 common personality sys_personality
+# 137 was sys_afs_syscall
+138 common setfsuid sys_setfsuid16
+139 common setfsgid sys_setfsgid16
+140 common _llseek sys_llseek
+141 common getdents sys_getdents compat_sys_getdents
+142 common _newselect sys_select compat_sys_select
+143 common flock sys_flock
+144 common msync sys_msync
+145 common readv sys_readv
+146 common writev sys_writev
+147 common getsid sys_getsid
+148 common fdatasync sys_fdatasync
+149 common _sysctl sys_ni_syscall
+150 common mlock sys_mlock
+151 common munlock sys_munlock
+152 common mlockall sys_mlockall
+153 common munlockall sys_munlockall
+154 common sched_setparam sys_sched_setparam
+155 common sched_getparam sys_sched_getparam
+156 common sched_setscheduler sys_sched_setscheduler
+157 common sched_getscheduler sys_sched_getscheduler
+158 common sched_yield sys_sched_yield
+159 common sched_get_priority_max sys_sched_get_priority_max
+160 common sched_get_priority_min sys_sched_get_priority_min
+161 common sched_rr_get_interval sys_sched_rr_get_interval_time32
+162 common nanosleep sys_nanosleep_time32
+163 common mremap sys_mremap
+164 common setresuid sys_setresuid16
+165 common getresuid sys_getresuid16
+# 166 was sys_vm86
+# 167 was sys_query_module
+168 common poll sys_poll
+169 common nfsservctl sys_ni_syscall
+170 common setresgid sys_setresgid16
+171 common getresgid sys_getresgid16
+172 common prctl sys_prctl
+173 common rt_sigreturn sys_rt_sigreturn_wrapper compat_sys_rt_sigreturn
+174 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
+175 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
+176 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
+177 common rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32
+178 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+179 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
+180 common pread64 sys_pread64 compat_sys_aarch32_pread64
+181 common pwrite64 sys_pwrite64 compat_sys_aarch32_pwrite64
+182 common chown sys_chown16
+183 common getcwd sys_getcwd
+184 common capget sys_capget
+185 common capset sys_capset
+186 common sigaltstack sys_sigaltstack compat_sys_sigaltstack
+187 common sendfile sys_sendfile compat_sys_sendfile
+# 188 reserved
+# 189 reserved
+190 common vfork sys_vfork
+# SuS compliant getrlimit
+191 common ugetrlimit sys_getrlimit compat_sys_getrlimit
+192 common mmap2 sys_mmap2 compat_sys_aarch32_mmap2
+193 common truncate64 sys_truncate64 compat_sys_aarch32_truncate64
+194 common ftruncate64 sys_ftruncate64 compat_sys_aarch32_ftruncate64
+195 common stat64 sys_stat64
+196 common lstat64 sys_lstat64
+197 common fstat64 sys_fstat64
+198 common lchown32 sys_lchown
+199 common getuid32 sys_getuid
+200 common getgid32 sys_getgid
+201 common geteuid32 sys_geteuid
+202 common getegid32 sys_getegid
+203 common setreuid32 sys_setreuid
+204 common setregid32 sys_setregid
+205 common getgroups32 sys_getgroups
+206 common setgroups32 sys_setgroups
+207 common fchown32 sys_fchown
+208 common setresuid32 sys_setresuid
+209 common getresuid32 sys_getresuid
+210 common setresgid32 sys_setresgid
+211 common getresgid32 sys_getresgid
+212 common chown32 sys_chown
+213 common setuid32 sys_setuid
+214 common setgid32 sys_setgid
+215 common setfsuid32 sys_setfsuid
+216 common setfsgid32 sys_setfsgid
+217 common getdents64 sys_getdents64
+218 common pivot_root sys_pivot_root
+219 common mincore sys_mincore
+220 common madvise sys_madvise
+221 common fcntl64 sys_fcntl64 compat_sys_fcntl64
+# 222 for tux
+# 223 is unused
+224 common gettid sys_gettid
+225 common readahead sys_readahead compat_sys_aarch32_readahead
+226 common setxattr sys_setxattr
+227 common lsetxattr sys_lsetxattr
+228 common fsetxattr sys_fsetxattr
+229 common getxattr sys_getxattr
+230 common lgetxattr sys_lgetxattr
+231 common fgetxattr sys_fgetxattr
+232 common listxattr sys_listxattr
+233 common llistxattr sys_llistxattr
+234 common flistxattr sys_flistxattr
+235 common removexattr sys_removexattr
+236 common lremovexattr sys_lremovexattr
+237 common fremovexattr sys_fremovexattr
+238 common tkill sys_tkill
+239 common sendfile64 sys_sendfile64
+240 common futex sys_futex_time32
+241 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
+242 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
+243 common io_setup sys_io_setup compat_sys_io_setup
+244 common io_destroy sys_io_destroy
+245 common io_getevents sys_io_getevents_time32
+246 common io_submit sys_io_submit compat_sys_io_submit
+247 common io_cancel sys_io_cancel
+248 common exit_group sys_exit_group
+249 common lookup_dcookie sys_ni_syscall
+250 common epoll_create sys_epoll_create
+251 common epoll_ctl sys_epoll_ctl
+252 common epoll_wait sys_epoll_wait
+253 common remap_file_pages sys_remap_file_pages
+# 254 for set_thread_area
+# 255 for get_thread_area
+256 common set_tid_address sys_set_tid_address
+257 common timer_create sys_timer_create compat_sys_timer_create
+258 common timer_settime sys_timer_settime32
+259 common timer_gettime sys_timer_gettime32
+260 common timer_getoverrun sys_timer_getoverrun
+261 common timer_delete sys_timer_delete
+262 common clock_settime sys_clock_settime32
+263 common clock_gettime sys_clock_gettime32
+264 common clock_getres sys_clock_getres_time32
+265 common clock_nanosleep sys_clock_nanosleep_time32
+266 common statfs64 sys_statfs64_wrapper compat_sys_aarch32_statfs64
+267 common fstatfs64 sys_fstatfs64_wrapper compat_sys_aarch32_fstatfs64
+268 common tgkill sys_tgkill
+269 common utimes sys_utimes_time32
+270 common arm_fadvise64_64 sys_arm_fadvise64_64 compat_sys_aarch32_fadvise64_64
+271 common pciconfig_iobase sys_pciconfig_iobase
+272 common pciconfig_read sys_pciconfig_read
+273 common pciconfig_write sys_pciconfig_write
+274 common mq_open sys_mq_open compat_sys_mq_open
+275 common mq_unlink sys_mq_unlink
+276 common mq_timedsend sys_mq_timedsend_time32
+277 common mq_timedreceive sys_mq_timedreceive_time32
+278 common mq_notify sys_mq_notify compat_sys_mq_notify
+279 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
+280 common waitid sys_waitid compat_sys_waitid
+281 common socket sys_socket
+282 common bind sys_bind
+283 common connect sys_connect
+284 common listen sys_listen
+285 common accept sys_accept
+286 common getsockname sys_getsockname
+287 common getpeername sys_getpeername
+288 common socketpair sys_socketpair
+289 common send sys_send
+290 common sendto sys_sendto
+291 common recv sys_recv compat_sys_recv
+292 common recvfrom sys_recvfrom compat_sys_recvfrom
+293 common shutdown sys_shutdown
+294 common setsockopt sys_setsockopt
+295 common getsockopt sys_getsockopt
+296 common sendmsg sys_sendmsg compat_sys_sendmsg
+297 common recvmsg sys_recvmsg compat_sys_recvmsg
+298 common semop sys_semop
+299 common semget sys_semget
+300 common semctl sys_old_semctl compat_sys_old_semctl
+301 common msgsnd sys_msgsnd compat_sys_msgsnd
+302 common msgrcv sys_msgrcv compat_sys_msgrcv
+303 common msgget sys_msgget
+304 common msgctl sys_old_msgctl compat_sys_old_msgctl
+305 common shmat sys_shmat compat_sys_shmat
+306 common shmdt sys_shmdt
+307 common shmget sys_shmget
+308 common shmctl sys_old_shmctl compat_sys_old_shmctl
+309 common add_key sys_add_key
+310 common request_key sys_request_key
+311 common keyctl sys_keyctl compat_sys_keyctl
+312 common semtimedop sys_semtimedop_time32
+313 common vserver sys_ni_syscall
+314 common ioprio_set sys_ioprio_set
+315 common ioprio_get sys_ioprio_get
+316 common inotify_init sys_inotify_init
+317 common inotify_add_watch sys_inotify_add_watch
+318 common inotify_rm_watch sys_inotify_rm_watch
+319 common mbind sys_mbind
+320 common get_mempolicy sys_get_mempolicy
+321 common set_mempolicy sys_set_mempolicy
+322 common openat sys_openat compat_sys_openat
+323 common mkdirat sys_mkdirat
+324 common mknodat sys_mknodat
+325 common fchownat sys_fchownat
+326 common futimesat sys_futimesat_time32
+327 common fstatat64 sys_fstatat64
+328 common unlinkat sys_unlinkat
+329 common renameat sys_renameat
+330 common linkat sys_linkat
+331 common symlinkat sys_symlinkat
+332 common readlinkat sys_readlinkat
+333 common fchmodat sys_fchmodat
+334 common faccessat sys_faccessat
+335 common pselect6 sys_pselect6_time32 compat_sys_pselect6_time32
+336 common ppoll sys_ppoll_time32 compat_sys_ppoll_time32
+337 common unshare sys_unshare
+338 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
+339 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
+340 common splice sys_splice
+341 common arm_sync_file_range sys_sync_file_range2 compat_sys_aarch32_sync_file_range2
+342 common tee sys_tee
+343 common vmsplice sys_vmsplice
+344 common move_pages sys_move_pages
+345 common getcpu sys_getcpu
+346 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
+347 common kexec_load sys_kexec_load compat_sys_kexec_load
+348 common utimensat sys_utimensat_time32
+349 common signalfd sys_signalfd compat_sys_signalfd
+350 common timerfd_create sys_timerfd_create
+351 common eventfd sys_eventfd
+352 common fallocate sys_fallocate compat_sys_aarch32_fallocate
+353 common timerfd_settime sys_timerfd_settime32
+354 common timerfd_gettime sys_timerfd_gettime32
+355 common signalfd4 sys_signalfd4 compat_sys_signalfd4
+356 common eventfd2 sys_eventfd2
+357 common epoll_create1 sys_epoll_create1
+358 common dup3 sys_dup3
+359 common pipe2 sys_pipe2
+360 common inotify_init1 sys_inotify_init1
+361 common preadv sys_preadv compat_sys_preadv
+362 common pwritev sys_pwritev compat_sys_pwritev
+363 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+364 common perf_event_open sys_perf_event_open
+365 common recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32
+366 common accept4 sys_accept4
+367 common fanotify_init sys_fanotify_init
+368 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
+369 common prlimit64 sys_prlimit64
+370 common name_to_handle_at sys_name_to_handle_at
+371 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
+372 common clock_adjtime sys_clock_adjtime32
+373 common syncfs sys_syncfs
+374 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
+375 common setns sys_setns
+376 common process_vm_readv sys_process_vm_readv
+377 common process_vm_writev sys_process_vm_writev
+378 common kcmp sys_kcmp
+379 common finit_module sys_finit_module
+380 common sched_setattr sys_sched_setattr
+381 common sched_getattr sys_sched_getattr
+382 common renameat2 sys_renameat2
+383 common seccomp sys_seccomp
+384 common getrandom sys_getrandom
+385 common memfd_create sys_memfd_create
+386 common bpf sys_bpf
+387 common execveat sys_execveat compat_sys_execveat
+388 common userfaultfd sys_userfaultfd
+389 common membarrier sys_membarrier
+390 common mlock2 sys_mlock2
+391 common copy_file_range sys_copy_file_range
+392 common preadv2 sys_preadv2 compat_sys_preadv2
+393 common pwritev2 sys_pwritev2 compat_sys_pwritev2
+394 common pkey_mprotect sys_pkey_mprotect
+395 common pkey_alloc sys_pkey_alloc
+396 common pkey_free sys_pkey_free
+397 common statx sys_statx
+398 common rseq sys_rseq
+399 common io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents
+400 common migrate_pages sys_migrate_pages
+401 common kexec_file_load sys_kexec_file_load
+# 402 is unused
+403 common clock_gettime64 sys_clock_gettime
+404 common clock_settime64 sys_clock_settime
+405 common clock_adjtime64 sys_clock_adjtime
+406 common clock_getres_time64 sys_clock_getres
+407 common clock_nanosleep_time64 sys_clock_nanosleep
+408 common timer_gettime64 sys_timer_gettime
+409 common timer_settime64 sys_timer_settime
+410 common timerfd_gettime64 sys_timerfd_gettime
+411 common timerfd_settime64 sys_timerfd_settime
+412 common utimensat_time64 sys_utimensat
+413 common pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
+414 common ppoll_time64 sys_ppoll compat_sys_ppoll_time64
+416 common io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
+417 common recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
+418 common mq_timedsend_time64 sys_mq_timedsend
+419 common mq_timedreceive_time64 sys_mq_timedreceive
+420 common semtimedop_time64 sys_semtimedop
+421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
+422 common futex_time64 sys_futex
+423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
+434 common pidfd_open sys_pidfd_open
+435 common clone3 sys_clone3
+436 common close_range sys_close_range
+437 common openat2 sys_openat2
+438 common pidfd_getfd sys_pidfd_getfd
+439 common faccessat2 sys_faccessat2
+440 common process_madvise sys_process_madvise
+441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
diff --git a/tools/perf/arch/arm64/entry/syscalls/syscall_64.tbl b/tools/perf/arch/arm64/entry/syscalls/syscall_64.tbl
new file mode 120000
index 000000000000..4fdd58f10c15
--- /dev/null
+++ b/tools/perf/arch/arm64/entry/syscalls/syscall_64.tbl
@@ -0,0 +1 @@
+../../../../../scripts/syscall.tbl \ No newline at end of file
diff --git a/tools/perf/arch/arm64/include/syscall_table.h b/tools/perf/arch/arm64/include/syscall_table.h
new file mode 100644
index 000000000000..7ff51b783000
--- /dev/null
+++ b/tools/perf/arch/arm64/include/syscall_table.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/bitsperlong.h>
+
+#if __BITS_PER_LONG == 64
+#include <asm/syscalls_64.h>
+#else
+#include <asm/syscalls_32.h>
+#endif
diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
index 22b19dcc6beb..4301181b8e45 100644
--- a/tools/perf/arch/arm64/util/arm-spe.c
+++ b/tools/perf/arch/arm64/util/arm-spe.c
@@ -274,33 +274,9 @@ static void arm_spe_setup_evsel(struct evsel *evsel, struct perf_cpu_map *cpus)
evsel__set_sample_bit(evsel, PHYS_ADDR);
}
-static int arm_spe_recording_options(struct auxtrace_record *itr,
- struct evlist *evlist,
- struct record_opts *opts)
+static int arm_spe_setup_aux_buffer(struct record_opts *opts)
{
- struct arm_spe_recording *sper =
- container_of(itr, struct arm_spe_recording, itr);
- struct evsel *evsel, *tmp;
- struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
bool privileged = perf_event_paranoid_check(-1);
- struct evsel *tracking_evsel;
- int err;
-
- sper->evlist = evlist;
-
- evlist__for_each_entry(evlist, evsel) {
- if (evsel__is_aux_event(evsel)) {
- if (!strstarts(evsel->pmu->name, ARM_SPE_PMU_NAME)) {
- pr_err("Found unexpected auxtrace event: %s\n",
- evsel->pmu->name);
- return -EINVAL;
- }
- opts->full_auxtrace = true;
- }
- }
-
- if (!opts->full_auxtrace)
- return 0;
/*
* we are in snapshot mode.
@@ -330,6 +306,9 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
return -EINVAL;
}
+
+ pr_debug2("%sx snapshot size: %zu\n", ARM_SPE_PMU_NAME,
+ opts->auxtrace_snapshot_size);
}
/* We are in full trace mode but '-m,xyz' wasn't specified */
@@ -355,14 +334,15 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
}
}
- if (opts->auxtrace_snapshot_mode)
- pr_debug2("%sx snapshot size: %zu\n", ARM_SPE_PMU_NAME,
- opts->auxtrace_snapshot_size);
+ return 0;
+}
- evlist__for_each_entry_safe(evlist, tmp, evsel) {
- if (evsel__is_aux_event(evsel))
- arm_spe_setup_evsel(evsel, cpus);
- }
+static int arm_spe_setup_tracking_event(struct evlist *evlist,
+ struct record_opts *opts)
+{
+ int err;
+ struct evsel *tracking_evsel;
+ struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
/* Add dummy event to keep tracking */
err = parse_event(evlist, "dummy:u");
@@ -388,6 +368,52 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
return 0;
}
+static int arm_spe_recording_options(struct auxtrace_record *itr,
+ struct evlist *evlist,
+ struct record_opts *opts)
+{
+ struct arm_spe_recording *sper =
+ container_of(itr, struct arm_spe_recording, itr);
+ struct evsel *evsel, *tmp;
+ struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
+ bool discard = false;
+ int err;
+
+ sper->evlist = evlist;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel__is_aux_event(evsel)) {
+ if (!strstarts(evsel->pmu->name, ARM_SPE_PMU_NAME)) {
+ pr_err("Found unexpected auxtrace event: %s\n",
+ evsel->pmu->name);
+ return -EINVAL;
+ }
+ opts->full_auxtrace = true;
+ }
+ }
+
+ if (!opts->full_auxtrace)
+ return 0;
+
+ evlist__for_each_entry_safe(evlist, tmp, evsel) {
+ if (evsel__is_aux_event(evsel)) {
+ arm_spe_setup_evsel(evsel, cpus);
+ if (evsel->core.attr.config &
+ perf_pmu__format_bits(evsel->pmu, "discard"))
+ discard = true;
+ }
+ }
+
+ if (discard)
+ return 0;
+
+ err = arm_spe_setup_aux_buffer(opts);
+ if (err)
+ return err;
+
+ return arm_spe_setup_tracking_event(evlist, opts);
+}
+
static int arm_spe_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused,
struct record_opts *opts,
const char *str)
diff --git a/tools/perf/arch/csky/entry/syscalls/Kbuild b/tools/perf/arch/csky/entry/syscalls/Kbuild
new file mode 100644
index 000000000000..11707c481a24
--- /dev/null
+++ b/tools/perf/arch/csky/entry/syscalls/Kbuild
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscalls_32.h
diff --git a/tools/perf/arch/csky/entry/syscalls/Makefile.syscalls b/tools/perf/arch/csky/entry/syscalls/Makefile.syscalls
new file mode 100644
index 000000000000..ea2dd10d0571
--- /dev/null
+++ b/tools/perf/arch/csky/entry/syscalls/Makefile.syscalls
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+syscall_abis_32 += csky time32 stat64 rlimit
diff --git a/tools/perf/arch/csky/include/syscall_table.h b/tools/perf/arch/csky/include/syscall_table.h
new file mode 100644
index 000000000000..4c942821662d
--- /dev/null
+++ b/tools/perf/arch/csky/include/syscall_table.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/syscalls_32.h>
diff --git a/tools/perf/arch/loongarch/Makefile b/tools/perf/arch/loongarch/Makefile
index 52544d59245b..087e099fb453 100644
--- a/tools/perf/arch/loongarch/Makefile
+++ b/tools/perf/arch/loongarch/Makefile
@@ -1,25 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
PERF_HAVE_JITDUMP := 1
HAVE_KVM_STAT_SUPPORT := 1
-
-#
-# Syscall table generation for perf
-#
-
-out := $(OUTPUT)arch/loongarch/include/generated/asm
-header := $(out)/syscalls.c
-incpath := $(srctree)/tools
-sysdef := $(srctree)/tools/arch/loongarch/include/uapi/asm/unistd.h
-sysprf := $(srctree)/tools/perf/arch/loongarch/entry/syscalls/
-systbl := $(sysprf)/mksyscalltbl
-
-# Create output directory if not already present
-$(shell [ -d '$(out)' ] || mkdir -p '$(out)')
-
-$(header): $(sysdef) $(systbl)
- $(Q)$(SHELL) '$(systbl)' '$(CC)' '$(HOSTCC)' $(incpath) $(sysdef) > $@
-
-clean::
- $(call QUIET_CLEAN, loongarch) $(RM) $(header)
-
-archheaders: $(header)
diff --git a/tools/perf/arch/loongarch/entry/syscalls/Kbuild b/tools/perf/arch/loongarch/entry/syscalls/Kbuild
new file mode 100644
index 000000000000..9a41e3572c3a
--- /dev/null
+++ b/tools/perf/arch/loongarch/entry/syscalls/Kbuild
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscalls_64.h
diff --git a/tools/perf/arch/loongarch/entry/syscalls/Makefile.syscalls b/tools/perf/arch/loongarch/entry/syscalls/Makefile.syscalls
new file mode 100644
index 000000000000..47d32da2aed8
--- /dev/null
+++ b/tools/perf/arch/loongarch/entry/syscalls/Makefile.syscalls
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+syscall_abis_64 +=
diff --git a/tools/perf/arch/loongarch/entry/syscalls/mksyscalltbl b/tools/perf/arch/loongarch/entry/syscalls/mksyscalltbl
deleted file mode 100755
index c10ad3580aef..000000000000
--- a/tools/perf/arch/loongarch/entry/syscalls/mksyscalltbl
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-#
-# Generate system call table for perf. Derived from
-# powerpc script.
-#
-# Author(s): Ming Wang <wangming01@loongson.cn>
-# Author(s): Huacai Chen <chenhuacai@loongson.cn>
-# Copyright (C) 2020-2023 Loongson Technology Corporation Limited
-
-gcc=$1
-hostcc=$2
-incpath=$3
-input=$4
-
-if ! test -r $input; then
- echo "Could not read input file" >&2
- exit 1
-fi
-
-create_sc_table()
-{
- local sc nr max_nr
-
- while read sc nr; do
- printf "%s\n" " [$nr] = \"$sc\","
- max_nr=$nr
- done
-
- echo "#define SYSCALLTBL_LOONGARCH_MAX_ID $max_nr"
-}
-
-create_table()
-{
- echo "#include \"$input\""
- echo "static const char *const syscalltbl_loongarch[] = {"
- create_sc_table
- echo "};"
-}
-
-$gcc -E -dM -x c -I $incpath/include/uapi $input \
- |awk '$2 ~ "__NR" && $3 !~ "__NR3264_" {
- sub("^#define __NR(3264)?_", "");
- print | "sort -k2 -n"}' \
- |create_table
diff --git a/tools/perf/arch/loongarch/include/syscall_table.h b/tools/perf/arch/loongarch/include/syscall_table.h
new file mode 100644
index 000000000000..9d0646d3455c
--- /dev/null
+++ b/tools/perf/arch/loongarch/include/syscall_table.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/syscall_table_64.h>
diff --git a/tools/perf/arch/mips/Makefile b/tools/perf/arch/mips/Makefile
deleted file mode 100644
index 827168f1077a..000000000000
--- a/tools/perf/arch/mips/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# Syscall table generation for perf
-out := $(OUTPUT)arch/mips/include/generated/asm
-header := $(out)/syscalls_n64.c
-sysprf := $(srctree)/tools/perf/arch/mips/entry/syscalls
-sysdef := $(sysprf)/syscall_n64.tbl
-systbl := $(sysprf)/mksyscalltbl
-
-# Create output directory if not already present
-$(shell [ -d '$(out)' ] || mkdir -p '$(out)')
-
-$(header): $(sysdef) $(systbl)
- $(Q)$(SHELL) '$(systbl)' $(sysdef) > $@
-
-clean::
- $(call QUIET_CLEAN, mips) $(RM) $(header)
-
-archheaders: $(header)
diff --git a/tools/perf/arch/mips/entry/syscalls/Kbuild b/tools/perf/arch/mips/entry/syscalls/Kbuild
new file mode 100644
index 000000000000..9a41e3572c3a
--- /dev/null
+++ b/tools/perf/arch/mips/entry/syscalls/Kbuild
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscalls_64.h
diff --git a/tools/perf/arch/mips/entry/syscalls/Makefile.syscalls b/tools/perf/arch/mips/entry/syscalls/Makefile.syscalls
new file mode 100644
index 000000000000..9ee914bdfb05
--- /dev/null
+++ b/tools/perf/arch/mips/entry/syscalls/Makefile.syscalls
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+syscall_abis_64 += n64
+
+syscalltbl = $(srctree)/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
diff --git a/tools/perf/arch/mips/entry/syscalls/mksyscalltbl b/tools/perf/arch/mips/entry/syscalls/mksyscalltbl
deleted file mode 100644
index c0d93f959c4e..000000000000
--- a/tools/perf/arch/mips/entry/syscalls/mksyscalltbl
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-#
-# Generate system call table for perf. Derived from
-# s390 script.
-#
-# Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
-# Changed by: Tiezhu Yang <yangtiezhu@loongson.cn>
-
-SYSCALL_TBL=$1
-
-if ! test -r $SYSCALL_TBL; then
- echo "Could not read input file" >&2
- exit 1
-fi
-
-create_table()
-{
- local max_nr nr abi sc discard
-
- echo 'static const char *const syscalltbl_mips_n64[] = {'
- while read nr abi sc discard; do
- printf '\t[%d] = "%s",\n' $nr $sc
- max_nr=$nr
- done
- echo '};'
- echo "#define SYSCALLTBL_MIPS_N64_MAX_ID $max_nr"
-}
-
-grep -E "^[[:digit:]]+[[:space:]]+(n64)" $SYSCALL_TBL \
- |sort -k1 -n \
- |create_table
diff --git a/tools/perf/arch/mips/include/syscall_table.h b/tools/perf/arch/mips/include/syscall_table.h
new file mode 100644
index 000000000000..b53e31c15805
--- /dev/null
+++ b/tools/perf/arch/mips/include/syscall_table.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/syscalls_64.h>
diff --git a/tools/perf/arch/parisc/entry/syscalls/Kbuild b/tools/perf/arch/parisc/entry/syscalls/Kbuild
new file mode 100644
index 000000000000..84c6599b4ea6
--- /dev/null
+++ b/tools/perf/arch/parisc/entry/syscalls/Kbuild
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscalls_32.h
+syscall-y += syscalls_64.h
diff --git a/tools/perf/arch/parisc/entry/syscalls/Makefile.syscalls b/tools/perf/arch/parisc/entry/syscalls/Makefile.syscalls
new file mode 100644
index 000000000000..ae326fecb83b
--- /dev/null
+++ b/tools/perf/arch/parisc/entry/syscalls/Makefile.syscalls
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+
+syscall_abis_32 +=
+syscall_abis_64 +=
+
+syscalltbl = $(srctree)/tools/perf/arch/parisc/entry/syscalls/syscall.tbl
diff --git a/tools/perf/arch/parisc/entry/syscalls/syscall.tbl b/tools/perf/arch/parisc/entry/syscalls/syscall.tbl
new file mode 100644
index 000000000000..66dc406b12e4
--- /dev/null
+++ b/tools/perf/arch/parisc/entry/syscalls/syscall.tbl
@@ -0,0 +1,463 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for parisc
+#
+# The format is:
+# <number> <abi> <name> <entry point> <compat entry point>
+#
+# The <abi> can be common, 64, or 32 for this file.
+#
+0 common restart_syscall sys_restart_syscall
+1 common exit sys_exit
+2 common fork sys_fork_wrapper
+3 common read sys_read
+4 common write sys_write
+5 common open sys_open compat_sys_open
+6 common close sys_close
+7 common waitpid sys_waitpid
+8 common creat sys_creat
+9 common link sys_link
+10 common unlink sys_unlink
+11 common execve sys_execve compat_sys_execve
+12 common chdir sys_chdir
+13 32 time sys_time32
+13 64 time sys_time
+14 common mknod sys_mknod
+15 common chmod sys_chmod
+16 common lchown sys_lchown
+17 common socket sys_socket
+18 common stat sys_newstat compat_sys_newstat
+19 common lseek sys_lseek compat_sys_lseek
+20 common getpid sys_getpid
+21 common mount sys_mount
+22 common bind sys_bind
+23 common setuid sys_setuid
+24 common getuid sys_getuid
+25 32 stime sys_stime32
+25 64 stime sys_stime
+26 common ptrace sys_ptrace compat_sys_ptrace
+27 common alarm sys_alarm
+28 common fstat sys_newfstat compat_sys_newfstat
+29 common pause sys_pause
+30 32 utime sys_utime32
+30 64 utime sys_utime
+31 common connect sys_connect
+32 common listen sys_listen
+33 common access sys_access
+34 common nice sys_nice
+35 common accept sys_accept
+36 common sync sys_sync
+37 common kill sys_kill
+38 common rename sys_rename
+39 common mkdir sys_mkdir
+40 common rmdir sys_rmdir
+41 common dup sys_dup
+42 common pipe sys_pipe
+43 common times sys_times compat_sys_times
+44 common getsockname sys_getsockname
+45 common brk sys_brk
+46 common setgid sys_setgid
+47 common getgid sys_getgid
+48 common signal sys_signal
+49 common geteuid sys_geteuid
+50 common getegid sys_getegid
+51 common acct sys_acct
+52 common umount2 sys_umount
+53 common getpeername sys_getpeername
+54 common ioctl sys_ioctl compat_sys_ioctl
+55 common fcntl sys_fcntl compat_sys_fcntl
+56 common socketpair sys_socketpair
+57 common setpgid sys_setpgid
+58 common send sys_send
+59 common uname sys_newuname
+60 common umask sys_umask
+61 common chroot sys_chroot
+62 common ustat sys_ustat compat_sys_ustat
+63 common dup2 sys_dup2
+64 common getppid sys_getppid
+65 common getpgrp sys_getpgrp
+66 common setsid sys_setsid
+67 common pivot_root sys_pivot_root
+68 common sgetmask sys_sgetmask sys32_unimplemented
+69 common ssetmask sys_ssetmask sys32_unimplemented
+70 common setreuid sys_setreuid
+71 common setregid sys_setregid
+72 common mincore sys_mincore
+73 common sigpending sys_sigpending compat_sys_sigpending
+74 common sethostname sys_sethostname
+75 common setrlimit sys_setrlimit compat_sys_setrlimit
+76 common getrlimit sys_getrlimit compat_sys_getrlimit
+77 common getrusage sys_getrusage compat_sys_getrusage
+78 common gettimeofday sys_gettimeofday compat_sys_gettimeofday
+79 common settimeofday sys_settimeofday compat_sys_settimeofday
+80 common getgroups sys_getgroups
+81 common setgroups sys_setgroups
+82 common sendto sys_sendto
+83 common symlink sys_symlink
+84 common lstat sys_newlstat compat_sys_newlstat
+85 common readlink sys_readlink
+86 common uselib sys_ni_syscall
+87 common swapon sys_swapon
+88 common reboot sys_reboot
+89 common mmap2 sys_mmap2
+90 common mmap sys_mmap
+91 common munmap sys_munmap
+92 common truncate sys_truncate compat_sys_truncate
+93 common ftruncate sys_ftruncate compat_sys_ftruncate
+94 common fchmod sys_fchmod
+95 common fchown sys_fchown
+96 common getpriority sys_getpriority
+97 common setpriority sys_setpriority
+98 common recv sys_recv compat_sys_recv
+99 common statfs sys_statfs compat_sys_statfs
+100 common fstatfs sys_fstatfs compat_sys_fstatfs
+101 common stat64 sys_stat64
+# 102 was socketcall
+103 common syslog sys_syslog
+104 common setitimer sys_setitimer compat_sys_setitimer
+105 common getitimer sys_getitimer compat_sys_getitimer
+106 common capget sys_capget
+107 common capset sys_capset
+108 32 pread64 parisc_pread64
+108 64 pread64 sys_pread64
+109 32 pwrite64 parisc_pwrite64
+109 64 pwrite64 sys_pwrite64
+110 common getcwd sys_getcwd
+111 common vhangup sys_vhangup
+112 common fstat64 sys_fstat64
+113 common vfork sys_vfork_wrapper
+114 common wait4 sys_wait4 compat_sys_wait4
+115 common swapoff sys_swapoff
+116 common sysinfo sys_sysinfo compat_sys_sysinfo
+117 common shutdown sys_shutdown
+118 common fsync sys_fsync
+119 common madvise parisc_madvise
+120 common clone sys_clone_wrapper
+121 common setdomainname sys_setdomainname
+122 common sendfile sys_sendfile compat_sys_sendfile
+123 common recvfrom sys_recvfrom compat_sys_recvfrom
+124 32 adjtimex sys_adjtimex_time32
+124 64 adjtimex sys_adjtimex
+125 common mprotect sys_mprotect
+126 common sigprocmask sys_sigprocmask compat_sys_sigprocmask
+# 127 was create_module
+128 common init_module sys_init_module
+129 common delete_module sys_delete_module
+# 130 was get_kernel_syms
+131 common quotactl sys_quotactl
+132 common getpgid sys_getpgid
+133 common fchdir sys_fchdir
+134 common bdflush sys_ni_syscall
+135 common sysfs sys_sysfs
+136 32 personality parisc_personality
+136 64 personality sys_personality
+# 137 was afs_syscall
+138 common setfsuid sys_setfsuid
+139 common setfsgid sys_setfsgid
+140 common _llseek sys_llseek
+141 common getdents sys_getdents compat_sys_getdents
+142 common _newselect sys_select compat_sys_select
+143 common flock sys_flock
+144 common msync sys_msync
+145 common readv sys_readv
+146 common writev sys_writev
+147 common getsid sys_getsid
+148 common fdatasync sys_fdatasync
+149 common _sysctl sys_ni_syscall
+150 common mlock sys_mlock
+151 common munlock sys_munlock
+152 common mlockall sys_mlockall
+153 common munlockall sys_munlockall
+154 common sched_setparam sys_sched_setparam
+155 common sched_getparam sys_sched_getparam
+156 common sched_setscheduler sys_sched_setscheduler
+157 common sched_getscheduler sys_sched_getscheduler
+158 common sched_yield sys_sched_yield
+159 common sched_get_priority_max sys_sched_get_priority_max
+160 common sched_get_priority_min sys_sched_get_priority_min
+161 32 sched_rr_get_interval sys_sched_rr_get_interval_time32
+161 64 sched_rr_get_interval sys_sched_rr_get_interval
+162 32 nanosleep sys_nanosleep_time32
+162 64 nanosleep sys_nanosleep
+163 common mremap sys_mremap
+164 common setresuid sys_setresuid
+165 common getresuid sys_getresuid
+166 common sigaltstack sys_sigaltstack compat_sys_sigaltstack
+# 167 was query_module
+168 common poll sys_poll
+# 169 was nfsservctl
+170 common setresgid sys_setresgid
+171 common getresgid sys_getresgid
+172 common prctl sys_prctl
+173 common rt_sigreturn sys_rt_sigreturn_wrapper
+174 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
+175 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
+176 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
+177 32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32
+177 64 rt_sigtimedwait sys_rt_sigtimedwait
+178 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+179 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
+180 common chown sys_chown
+181 common setsockopt sys_setsockopt sys_setsockopt
+182 common getsockopt sys_getsockopt sys_getsockopt
+183 common sendmsg sys_sendmsg compat_sys_sendmsg
+184 common recvmsg sys_recvmsg compat_sys_recvmsg
+185 common semop sys_semop
+186 common semget sys_semget
+187 common semctl sys_semctl compat_sys_semctl
+188 common msgsnd sys_msgsnd compat_sys_msgsnd
+189 common msgrcv sys_msgrcv compat_sys_msgrcv
+190 common msgget sys_msgget
+191 common msgctl sys_msgctl compat_sys_msgctl
+192 common shmat sys_shmat compat_sys_shmat
+193 common shmdt sys_shmdt
+194 common shmget sys_shmget
+195 common shmctl sys_shmctl compat_sys_shmctl
+# 196 was getpmsg
+# 197 was putpmsg
+198 common lstat64 sys_lstat64
+199 32 truncate64 parisc_truncate64
+199 64 truncate64 sys_truncate64
+200 32 ftruncate64 parisc_ftruncate64
+200 64 ftruncate64 sys_ftruncate64
+201 common getdents64 sys_getdents64
+202 common fcntl64 sys_fcntl64 compat_sys_fcntl64
+# 203 was attrctl
+# 204 was acl_get
+# 205 was acl_set
+206 common gettid sys_gettid
+207 32 readahead parisc_readahead
+207 64 readahead sys_readahead
+208 common tkill sys_tkill
+209 common sendfile64 sys_sendfile64 compat_sys_sendfile64
+210 32 futex sys_futex_time32
+210 64 futex sys_futex
+211 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
+212 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
+# 213 was set_thread_area
+# 214 was get_thread_area
+215 common io_setup sys_io_setup compat_sys_io_setup
+216 common io_destroy sys_io_destroy
+217 32 io_getevents sys_io_getevents_time32
+217 64 io_getevents sys_io_getevents
+218 common io_submit sys_io_submit compat_sys_io_submit
+219 common io_cancel sys_io_cancel
+# 220 was alloc_hugepages
+# 221 was free_hugepages
+222 common exit_group sys_exit_group
+223 common lookup_dcookie sys_ni_syscall
+224 common epoll_create sys_epoll_create
+225 common epoll_ctl sys_epoll_ctl
+226 common epoll_wait sys_epoll_wait
+227 common remap_file_pages sys_remap_file_pages
+228 32 semtimedop sys_semtimedop_time32
+228 64 semtimedop sys_semtimedop
+229 common mq_open sys_mq_open compat_sys_mq_open
+230 common mq_unlink sys_mq_unlink
+231 32 mq_timedsend sys_mq_timedsend_time32
+231 64 mq_timedsend sys_mq_timedsend
+232 32 mq_timedreceive sys_mq_timedreceive_time32
+232 64 mq_timedreceive sys_mq_timedreceive
+233 common mq_notify sys_mq_notify compat_sys_mq_notify
+234 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
+235 common waitid sys_waitid compat_sys_waitid
+236 32 fadvise64_64 parisc_fadvise64_64
+236 64 fadvise64_64 sys_fadvise64_64
+237 common set_tid_address sys_set_tid_address
+238 common setxattr sys_setxattr
+239 common lsetxattr sys_lsetxattr
+240 common fsetxattr sys_fsetxattr
+241 common getxattr sys_getxattr
+242 common lgetxattr sys_lgetxattr
+243 common fgetxattr sys_fgetxattr
+244 common listxattr sys_listxattr
+245 common llistxattr sys_llistxattr
+246 common flistxattr sys_flistxattr
+247 common removexattr sys_removexattr
+248 common lremovexattr sys_lremovexattr
+249 common fremovexattr sys_fremovexattr
+250 common timer_create sys_timer_create compat_sys_timer_create
+251 32 timer_settime sys_timer_settime32
+251 64 timer_settime sys_timer_settime
+252 32 timer_gettime sys_timer_gettime32
+252 64 timer_gettime sys_timer_gettime
+253 common timer_getoverrun sys_timer_getoverrun
+254 common timer_delete sys_timer_delete
+255 32 clock_settime sys_clock_settime32
+255 64 clock_settime sys_clock_settime
+256 32 clock_gettime sys_clock_gettime32
+256 64 clock_gettime sys_clock_gettime
+257 32 clock_getres sys_clock_getres_time32
+257 64 clock_getres sys_clock_getres
+258 32 clock_nanosleep sys_clock_nanosleep_time32
+258 64 clock_nanosleep sys_clock_nanosleep
+259 common tgkill sys_tgkill
+260 common mbind sys_mbind
+261 common get_mempolicy sys_get_mempolicy
+262 common set_mempolicy sys_set_mempolicy
+# 263 was vserver
+264 common add_key sys_add_key
+265 common request_key sys_request_key
+266 common keyctl sys_keyctl compat_sys_keyctl
+267 common ioprio_set sys_ioprio_set
+268 common ioprio_get sys_ioprio_get
+269 common inotify_init sys_inotify_init
+270 common inotify_add_watch sys_inotify_add_watch
+271 common inotify_rm_watch sys_inotify_rm_watch
+272 common migrate_pages sys_migrate_pages
+273 32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32
+273 64 pselect6 sys_pselect6
+274 32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32
+274 64 ppoll sys_ppoll
+275 common openat sys_openat compat_sys_openat
+276 common mkdirat sys_mkdirat
+277 common mknodat sys_mknodat
+278 common fchownat sys_fchownat
+279 32 futimesat sys_futimesat_time32
+279 64 futimesat sys_futimesat
+280 common fstatat64 sys_fstatat64
+281 common unlinkat sys_unlinkat
+282 common renameat sys_renameat
+283 common linkat sys_linkat
+284 common symlinkat sys_symlinkat
+285 common readlinkat sys_readlinkat
+286 common fchmodat sys_fchmodat
+287 common faccessat sys_faccessat
+288 common unshare sys_unshare
+289 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
+290 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
+291 common splice sys_splice
+292 32 sync_file_range parisc_sync_file_range
+292 64 sync_file_range sys_sync_file_range
+293 common tee sys_tee
+294 common vmsplice sys_vmsplice
+295 common move_pages sys_move_pages
+296 common getcpu sys_getcpu
+297 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
+298 common statfs64 sys_statfs64 compat_sys_statfs64
+299 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
+300 common kexec_load sys_kexec_load compat_sys_kexec_load
+301 32 utimensat sys_utimensat_time32
+301 64 utimensat sys_utimensat
+302 common signalfd sys_signalfd compat_sys_signalfd
+# 303 was timerfd
+304 common eventfd sys_eventfd
+305 32 fallocate parisc_fallocate
+305 64 fallocate sys_fallocate
+306 common timerfd_create parisc_timerfd_create
+307 32 timerfd_settime sys_timerfd_settime32
+307 64 timerfd_settime sys_timerfd_settime
+308 32 timerfd_gettime sys_timerfd_gettime32
+308 64 timerfd_gettime sys_timerfd_gettime
+309 common signalfd4 parisc_signalfd4 parisc_compat_signalfd4
+310 common eventfd2 parisc_eventfd2
+311 common epoll_create1 sys_epoll_create1
+312 common dup3 sys_dup3
+313 common pipe2 parisc_pipe2
+314 common inotify_init1 parisc_inotify_init1
+315 common preadv sys_preadv compat_sys_preadv
+316 common pwritev sys_pwritev compat_sys_pwritev
+317 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+318 common perf_event_open sys_perf_event_open
+319 32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32
+319 64 recvmmsg sys_recvmmsg
+320 common accept4 sys_accept4
+321 common prlimit64 sys_prlimit64
+322 common fanotify_init sys_fanotify_init
+323 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
+324 32 clock_adjtime sys_clock_adjtime32
+324 64 clock_adjtime sys_clock_adjtime
+325 common name_to_handle_at sys_name_to_handle_at
+326 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
+327 common syncfs sys_syncfs
+328 common setns sys_setns
+329 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
+330 common process_vm_readv sys_process_vm_readv
+331 common process_vm_writev sys_process_vm_writev
+332 common kcmp sys_kcmp
+333 common finit_module sys_finit_module
+334 common sched_setattr sys_sched_setattr
+335 common sched_getattr sys_sched_getattr
+336 32 utimes sys_utimes_time32
+336 64 utimes sys_utimes
+337 common renameat2 sys_renameat2
+338 common seccomp sys_seccomp
+339 common getrandom sys_getrandom
+340 common memfd_create sys_memfd_create
+341 common bpf sys_bpf
+342 common execveat sys_execveat compat_sys_execveat
+343 common membarrier sys_membarrier
+344 common userfaultfd parisc_userfaultfd
+345 common mlock2 sys_mlock2
+346 common copy_file_range sys_copy_file_range
+347 common preadv2 sys_preadv2 compat_sys_preadv2
+348 common pwritev2 sys_pwritev2 compat_sys_pwritev2
+349 common statx sys_statx
+350 32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents
+350 64 io_pgetevents sys_io_pgetevents
+351 common pkey_mprotect sys_pkey_mprotect
+352 common pkey_alloc sys_pkey_alloc
+353 common pkey_free sys_pkey_free
+354 common rseq sys_rseq
+355 common kexec_file_load sys_kexec_file_load sys_kexec_file_load
+356 common cacheflush sys_cacheflush
+# up to 402 is unassigned and reserved for arch specific syscalls
+403 32 clock_gettime64 sys_clock_gettime sys_clock_gettime
+404 32 clock_settime64 sys_clock_settime sys_clock_settime
+405 32 clock_adjtime64 sys_clock_adjtime sys_clock_adjtime
+406 32 clock_getres_time64 sys_clock_getres sys_clock_getres
+407 32 clock_nanosleep_time64 sys_clock_nanosleep sys_clock_nanosleep
+408 32 timer_gettime64 sys_timer_gettime sys_timer_gettime
+409 32 timer_settime64 sys_timer_settime sys_timer_settime
+410 32 timerfd_gettime64 sys_timerfd_gettime sys_timerfd_gettime
+411 32 timerfd_settime64 sys_timerfd_settime sys_timerfd_settime
+412 32 utimensat_time64 sys_utimensat sys_utimensat
+413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
+414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
+416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
+417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
+418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
+419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive
+420 32 semtimedop_time64 sys_semtimedop sys_semtimedop
+421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
+422 32 futex_time64 sys_futex sys_futex
+423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
+434 common pidfd_open sys_pidfd_open
+435 common clone3 sys_clone3_wrapper
+436 common close_range sys_close_range
+437 common openat2 sys_openat2
+438 common pidfd_getfd sys_pidfd_getfd
+439 common faccessat2 sys_faccessat2
+440 common process_madvise sys_process_madvise
+441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
diff --git a/tools/perf/arch/parisc/include/syscall_table.h b/tools/perf/arch/parisc/include/syscall_table.h
new file mode 100644
index 000000000000..7ff51b783000
--- /dev/null
+++ b/tools/perf/arch/parisc/include/syscall_table.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/bitsperlong.h>
+
+#if __BITS_PER_LONG == 64
+#include <asm/syscalls_64.h>
+#else
+#include <asm/syscalls_32.h>
+#endif
diff --git a/tools/perf/arch/powerpc/Makefile b/tools/perf/arch/powerpc/Makefile
index dc8f4fb8e324..a295a80ea078 100644
--- a/tools/perf/arch/powerpc/Makefile
+++ b/tools/perf/arch/powerpc/Makefile
@@ -1,28 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
HAVE_KVM_STAT_SUPPORT := 1
PERF_HAVE_JITDUMP := 1
-
-#
-# Syscall table generation for perf
-#
-
-out := $(OUTPUT)arch/powerpc/include/generated/asm
-header32 := $(out)/syscalls_32.c
-header64 := $(out)/syscalls_64.c
-sysprf := $(srctree)/tools/perf/arch/powerpc/entry/syscalls
-sysdef := $(sysprf)/syscall.tbl
-systbl := $(sysprf)/mksyscalltbl
-
-# Create output directory if not already present
-$(shell [ -d '$(out)' ] || mkdir -p '$(out)')
-
-$(header64): $(sysdef) $(systbl)
- $(Q)$(SHELL) '$(systbl)' '64' $(sysdef) > $@
-
-$(header32): $(sysdef) $(systbl)
- $(Q)$(SHELL) '$(systbl)' '32' $(sysdef) > $@
-
-clean::
- $(call QUIET_CLEAN, powerpc) $(RM) $(header32) $(header64)
-
-archheaders: $(header32) $(header64)
diff --git a/tools/perf/arch/powerpc/entry/syscalls/Kbuild b/tools/perf/arch/powerpc/entry/syscalls/Kbuild
new file mode 100644
index 000000000000..84c6599b4ea6
--- /dev/null
+++ b/tools/perf/arch/powerpc/entry/syscalls/Kbuild
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscalls_32.h
+syscall-y += syscalls_64.h
diff --git a/tools/perf/arch/powerpc/entry/syscalls/Makefile.syscalls b/tools/perf/arch/powerpc/entry/syscalls/Makefile.syscalls
new file mode 100644
index 000000000000..e35afbc57c79
--- /dev/null
+++ b/tools/perf/arch/powerpc/entry/syscalls/Makefile.syscalls
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+
+syscall_abis_32 += nospu
+syscall_abis_64 += nospu
+
+syscalltbl = $(srctree)/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
diff --git a/tools/perf/arch/powerpc/entry/syscalls/mksyscalltbl b/tools/perf/arch/powerpc/entry/syscalls/mksyscalltbl
deleted file mode 100755
index 0eb316fe6dd1..000000000000
--- a/tools/perf/arch/powerpc/entry/syscalls/mksyscalltbl
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-#
-# Generate system call table for perf. Derived from
-# s390 script.
-#
-# Copyright IBM Corp. 2017
-# Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
-# Changed by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
-
-wordsize=$1
-SYSCALL_TBL=$2
-
-if ! test -r $SYSCALL_TBL; then
- echo "Could not read input file" >&2
- exit 1
-fi
-
-create_table()
-{
- local wordsize=$1
- local max_nr nr abi sc discard
- max_nr=-1
- nr=0
-
- echo "static const char *const syscalltbl_powerpc_${wordsize}[] = {"
- while read nr abi sc discard; do
- if [ "$max_nr" -lt "$nr" ]; then
- printf '\t[%d] = "%s",\n' $nr $sc
- max_nr=$nr
- fi
- done
- echo '};'
- echo "#define SYSCALLTBL_POWERPC_${wordsize}_MAX_ID $max_nr"
-}
-
-grep -E "^[[:digit:]]+[[:space:]]+(common|spu|nospu|${wordsize})" $SYSCALL_TBL \
- |sort -k1 -n \
- |create_table ${wordsize}
diff --git a/tools/perf/arch/powerpc/include/syscall_table.h b/tools/perf/arch/powerpc/include/syscall_table.h
new file mode 100644
index 000000000000..7ff51b783000
--- /dev/null
+++ b/tools/perf/arch/powerpc/include/syscall_table.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/bitsperlong.h>
+
+#if __BITS_PER_LONG == 64
+#include <asm/syscalls_64.h>
+#else
+#include <asm/syscalls_32.h>
+#endif
diff --git a/tools/perf/arch/powerpc/util/perf_regs.c b/tools/perf/arch/powerpc/util/perf_regs.c
index e8e6e6fc6f17..bd36cfd420a2 100644
--- a/tools/perf/arch/powerpc/util/perf_regs.c
+++ b/tools/perf/arch/powerpc/util/perf_regs.c
@@ -16,6 +16,7 @@
#define PVR_POWER9 0x004E
#define PVR_POWER10 0x0080
+#define PVR_POWER11 0x0082
static const struct sample_reg sample_reg_masks[] = {
SMPL_REG(r0, PERF_REG_POWERPC_R0),
@@ -207,7 +208,7 @@ uint64_t arch__intr_reg_mask(void)
version = (((mfspr(SPRN_PVR)) >> 16) & 0xFFFF);
if (version == PVR_POWER9)
extended_mask = PERF_REG_PMU_MASK_300;
- else if (version == PVR_POWER10)
+ else if ((version == PVR_POWER10) || (version == PVR_POWER11))
extended_mask = PERF_REG_PMU_MASK_31;
else
return mask;
diff --git a/tools/perf/arch/riscv/Makefile b/tools/perf/arch/riscv/Makefile
index 18ad078000e2..087e099fb453 100644
--- a/tools/perf/arch/riscv/Makefile
+++ b/tools/perf/arch/riscv/Makefile
@@ -1,25 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
PERF_HAVE_JITDUMP := 1
HAVE_KVM_STAT_SUPPORT := 1
-
-#
-# Syscall table generation for perf
-#
-
-out := $(OUTPUT)arch/riscv/include/generated/asm
-header := $(out)/syscalls.c
-incpath := $(srctree)/tools
-sysdef := $(srctree)/tools/arch/riscv/include/uapi/asm/unistd.h
-sysprf := $(srctree)/tools/perf/arch/riscv/entry/syscalls/
-systbl := $(sysprf)/mksyscalltbl
-
-# Create output directory if not already present
-$(shell [ -d '$(out)' ] || mkdir -p '$(out)')
-
-$(header): $(sysdef) $(systbl)
- $(Q)$(SHELL) '$(systbl)' '$(CC)' '$(HOSTCC)' $(incpath) $(sysdef) > $@
-
-clean::
- $(call QUIET_CLEAN, riscv) $(RM) $(header)
-
-archheaders: $(header)
diff --git a/tools/perf/arch/riscv/entry/syscalls/Kbuild b/tools/perf/arch/riscv/entry/syscalls/Kbuild
new file mode 100644
index 000000000000..9a41e3572c3a
--- /dev/null
+++ b/tools/perf/arch/riscv/entry/syscalls/Kbuild
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscalls_64.h
diff --git a/tools/perf/arch/riscv/entry/syscalls/Makefile.syscalls b/tools/perf/arch/riscv/entry/syscalls/Makefile.syscalls
new file mode 100644
index 000000000000..9668fd1faf60
--- /dev/null
+++ b/tools/perf/arch/riscv/entry/syscalls/Makefile.syscalls
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+syscall_abis_32 += riscv memfd_secret
+syscall_abis_64 += riscv rlimit memfd_secret
diff --git a/tools/perf/arch/riscv/entry/syscalls/mksyscalltbl b/tools/perf/arch/riscv/entry/syscalls/mksyscalltbl
deleted file mode 100755
index c59f5e852b97..000000000000
--- a/tools/perf/arch/riscv/entry/syscalls/mksyscalltbl
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-#
-# Generate system call table for perf. Derived from
-# powerpc script.
-#
-# Copyright IBM Corp. 2017
-# Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
-# Changed by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
-# Changed by: Kim Phillips <kim.phillips@arm.com>
-# Changed by: Björn Töpel <bjorn@rivosinc.com>
-
-gcc=$1
-hostcc=$2
-incpath=$3
-input=$4
-
-if ! test -r $input; then
- echo "Could not read input file" >&2
- exit 1
-fi
-
-create_sc_table()
-{
- local sc nr max_nr
-
- while read sc nr; do
- printf "%s\n" " [$nr] = \"$sc\","
- max_nr=$nr
- done
-
- echo "#define SYSCALLTBL_RISCV_MAX_ID $max_nr"
-}
-
-create_table()
-{
- echo "#include \"$input\""
- echo "static const char *const syscalltbl_riscv[] = {"
- create_sc_table
- echo "};"
-}
-
-$gcc -E -dM -x c -I $incpath/include/uapi $input \
- |awk '$2 ~ "__NR" && $3 !~ "__NR3264_" {
- sub("^#define __NR(3264)?_", "");
- print | "sort -k2 -n"}' \
- |create_table
diff --git a/tools/perf/arch/riscv/include/syscall_table.h b/tools/perf/arch/riscv/include/syscall_table.h
new file mode 100644
index 000000000000..7ff51b783000
--- /dev/null
+++ b/tools/perf/arch/riscv/include/syscall_table.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/bitsperlong.h>
+
+#if __BITS_PER_LONG == 64
+#include <asm/syscalls_64.h>
+#else
+#include <asm/syscalls_32.h>
+#endif
diff --git a/tools/perf/arch/s390/Makefile b/tools/perf/arch/s390/Makefile
index c431c21b11ef..0033698a65ce 100644
--- a/tools/perf/arch/s390/Makefile
+++ b/tools/perf/arch/s390/Makefile
@@ -1,24 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
HAVE_KVM_STAT_SUPPORT := 1
PERF_HAVE_JITDUMP := 1
-
-#
-# Syscall table generation for perf
-#
-
-out := $(OUTPUT)arch/s390/include/generated/asm
-header := $(out)/syscalls_64.c
-sysprf := $(srctree)/tools/perf/arch/s390/entry/syscalls
-sysdef := $(sysprf)/syscall.tbl
-systbl := $(sysprf)/mksyscalltbl
-
-# Create output directory if not already present
-$(shell [ -d '$(out)' ] || mkdir -p '$(out)')
-
-$(header): $(sysdef) $(systbl)
- $(Q)$(SHELL) '$(systbl)' $(sysdef) > $@
-
-clean::
- $(call QUIET_CLEAN, s390) $(RM) $(header)
-
-archheaders: $(header)
diff --git a/tools/perf/arch/s390/entry/syscalls/Kbuild b/tools/perf/arch/s390/entry/syscalls/Kbuild
new file mode 100644
index 000000000000..9a41e3572c3a
--- /dev/null
+++ b/tools/perf/arch/s390/entry/syscalls/Kbuild
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscalls_64.h
diff --git a/tools/perf/arch/s390/entry/syscalls/Makefile.syscalls b/tools/perf/arch/s390/entry/syscalls/Makefile.syscalls
new file mode 100644
index 000000000000..9762d7abf17c
--- /dev/null
+++ b/tools/perf/arch/s390/entry/syscalls/Makefile.syscalls
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+syscall_abis_64 += renameat rlimit memfd_secret
+
+syscalltbl = $(srctree)/tools/perf/arch/s390/entry/syscalls/syscall.tbl
diff --git a/tools/perf/arch/s390/entry/syscalls/mksyscalltbl b/tools/perf/arch/s390/entry/syscalls/mksyscalltbl
deleted file mode 100755
index 52eb88a77c94..000000000000
--- a/tools/perf/arch/s390/entry/syscalls/mksyscalltbl
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-#
-# Generate system call table for perf
-#
-# Copyright IBM Corp. 2017, 2018
-# Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
-#
-
-SYSCALL_TBL=$1
-
-if ! test -r $SYSCALL_TBL; then
- echo "Could not read input file" >&2
- exit 1
-fi
-
-create_table()
-{
- local max_nr nr abi sc discard
-
- echo 'static const char *const syscalltbl_s390_64[] = {'
- while read nr abi sc discard; do
- printf '\t[%d] = "%s",\n' $nr $sc
- max_nr=$nr
- done
- echo '};'
- echo "#define SYSCALLTBL_S390_64_MAX_ID $max_nr"
-}
-
-grep -E "^[[:digit:]]+[[:space:]]+(common|64)" $SYSCALL_TBL \
- |sort -k1 -n \
- |create_table
diff --git a/tools/perf/arch/s390/include/syscall_table.h b/tools/perf/arch/s390/include/syscall_table.h
new file mode 100644
index 000000000000..b53e31c15805
--- /dev/null
+++ b/tools/perf/arch/s390/include/syscall_table.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/syscalls_64.h>
diff --git a/tools/perf/arch/sh/entry/syscalls/Kbuild b/tools/perf/arch/sh/entry/syscalls/Kbuild
new file mode 100644
index 000000000000..11707c481a24
--- /dev/null
+++ b/tools/perf/arch/sh/entry/syscalls/Kbuild
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscalls_32.h
diff --git a/tools/perf/arch/sh/entry/syscalls/Makefile.syscalls b/tools/perf/arch/sh/entry/syscalls/Makefile.syscalls
new file mode 100644
index 000000000000..25080390e4ed
--- /dev/null
+++ b/tools/perf/arch/sh/entry/syscalls/Makefile.syscalls
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+syscall_abis_32 +=
+syscalltbl = $(srctree)/tools/perf/arch/sh/entry/syscalls/syscall.tbl
diff --git a/tools/perf/arch/sh/entry/syscalls/syscall.tbl b/tools/perf/arch/sh/entry/syscalls/syscall.tbl
new file mode 100644
index 000000000000..c8cad33bf250
--- /dev/null
+++ b/tools/perf/arch/sh/entry/syscalls/syscall.tbl
@@ -0,0 +1,472 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for sh
+#
+# The format is:
+# <number> <abi> <name> <entry point>
+#
+# The <abi> is always "common" for this file
+#
+0 common restart_syscall sys_restart_syscall
+1 common exit sys_exit
+2 common fork sys_fork
+3 common read sys_read
+4 common write sys_write
+5 common open sys_open
+6 common close sys_close
+7 common waitpid sys_waitpid
+8 common creat sys_creat
+9 common link sys_link
+10 common unlink sys_unlink
+11 common execve sys_execve
+12 common chdir sys_chdir
+13 common time sys_time32
+14 common mknod sys_mknod
+15 common chmod sys_chmod
+16 common lchown sys_lchown16
+# 17 was break
+18 common oldstat sys_stat
+19 common lseek sys_lseek
+20 common getpid sys_getpid
+21 common mount sys_mount
+22 common umount sys_oldumount
+23 common setuid sys_setuid16
+24 common getuid sys_getuid16
+25 common stime sys_stime32
+26 common ptrace sys_ptrace
+27 common alarm sys_alarm
+28 common oldfstat sys_fstat
+29 common pause sys_pause
+30 common utime sys_utime32
+# 31 was stty
+# 32 was gtty
+33 common access sys_access
+34 common nice sys_nice
+# 35 was ftime
+36 common sync sys_sync
+37 common kill sys_kill
+38 common rename sys_rename
+39 common mkdir sys_mkdir
+40 common rmdir sys_rmdir
+41 common dup sys_dup
+42 common pipe sys_sh_pipe
+43 common times sys_times
+# 44 was prof
+45 common brk sys_brk
+46 common setgid sys_setgid16
+47 common getgid sys_getgid16
+48 common signal sys_signal
+49 common geteuid sys_geteuid16
+50 common getegid sys_getegid16
+51 common acct sys_acct
+52 common umount2 sys_umount
+# 53 was lock
+54 common ioctl sys_ioctl
+55 common fcntl sys_fcntl
+# 56 was mpx
+57 common setpgid sys_setpgid
+# 58 was ulimit
+# 59 was olduname
+60 common umask sys_umask
+61 common chroot sys_chroot
+62 common ustat sys_ustat
+63 common dup2 sys_dup2
+64 common getppid sys_getppid
+65 common getpgrp sys_getpgrp
+66 common setsid sys_setsid
+67 common sigaction sys_sigaction
+68 common sgetmask sys_sgetmask
+69 common ssetmask sys_ssetmask
+70 common setreuid sys_setreuid16
+71 common setregid sys_setregid16
+72 common sigsuspend sys_sigsuspend
+73 common sigpending sys_sigpending
+74 common sethostname sys_sethostname
+75 common setrlimit sys_setrlimit
+76 common getrlimit sys_old_getrlimit
+77 common getrusage sys_getrusage
+78 common gettimeofday sys_gettimeofday
+79 common settimeofday sys_settimeofday
+80 common getgroups sys_getgroups16
+81 common setgroups sys_setgroups16
+# 82 was select
+83 common symlink sys_symlink
+84 common oldlstat sys_lstat
+85 common readlink sys_readlink
+86 common uselib sys_uselib
+87 common swapon sys_swapon
+88 common reboot sys_reboot
+89 common readdir sys_old_readdir
+90 common mmap old_mmap
+91 common munmap sys_munmap
+92 common truncate sys_truncate
+93 common ftruncate sys_ftruncate
+94 common fchmod sys_fchmod
+95 common fchown sys_fchown16
+96 common getpriority sys_getpriority
+97 common setpriority sys_setpriority
+# 98 was profil
+99 common statfs sys_statfs
+100 common fstatfs sys_fstatfs
+# 101 was ioperm
+102 common socketcall sys_socketcall
+103 common syslog sys_syslog
+104 common setitimer sys_setitimer
+105 common getitimer sys_getitimer
+106 common stat sys_newstat
+107 common lstat sys_newlstat
+108 common fstat sys_newfstat
+109 common olduname sys_uname
+# 110 was iopl
+111 common vhangup sys_vhangup
+# 112 was idle
+# 113 was vm86old
+114 common wait4 sys_wait4
+115 common swapoff sys_swapoff
+116 common sysinfo sys_sysinfo
+117 common ipc sys_ipc
+118 common fsync sys_fsync
+119 common sigreturn sys_sigreturn
+120 common clone sys_clone
+121 common setdomainname sys_setdomainname
+122 common uname sys_newuname
+123 common cacheflush sys_cacheflush
+124 common adjtimex sys_adjtimex_time32
+125 common mprotect sys_mprotect
+126 common sigprocmask sys_sigprocmask
+# 127 was create_module
+128 common init_module sys_init_module
+129 common delete_module sys_delete_module
+# 130 was get_kernel_syms
+131 common quotactl sys_quotactl
+132 common getpgid sys_getpgid
+133 common fchdir sys_fchdir
+134 common bdflush sys_ni_syscall
+135 common sysfs sys_sysfs
+136 common personality sys_personality
+# 137 was afs_syscall
+138 common setfsuid sys_setfsuid16
+139 common setfsgid sys_setfsgid16
+140 common _llseek sys_llseek
+141 common getdents sys_getdents
+142 common _newselect sys_select
+143 common flock sys_flock
+144 common msync sys_msync
+145 common readv sys_readv
+146 common writev sys_writev
+147 common getsid sys_getsid
+148 common fdatasync sys_fdatasync
+149 common _sysctl sys_ni_syscall
+150 common mlock sys_mlock
+151 common munlock sys_munlock
+152 common mlockall sys_mlockall
+153 common munlockall sys_munlockall
+154 common sched_setparam sys_sched_setparam
+155 common sched_getparam sys_sched_getparam
+156 common sched_setscheduler sys_sched_setscheduler
+157 common sched_getscheduler sys_sched_getscheduler
+158 common sched_yield sys_sched_yield
+159 common sched_get_priority_max sys_sched_get_priority_max
+160 common sched_get_priority_min sys_sched_get_priority_min
+161 common sched_rr_get_interval sys_sched_rr_get_interval_time32
+162 common nanosleep sys_nanosleep_time32
+163 common mremap sys_mremap
+164 common setresuid sys_setresuid16
+165 common getresuid sys_getresuid16
+# 166 was vm86
+# 167 was query_module
+168 common poll sys_poll
+169 common nfsservctl sys_ni_syscall
+170 common setresgid sys_setresgid16
+171 common getresgid sys_getresgid16
+172 common prctl sys_prctl
+173 common rt_sigreturn sys_rt_sigreturn
+174 common rt_sigaction sys_rt_sigaction
+175 common rt_sigprocmask sys_rt_sigprocmask
+176 common rt_sigpending sys_rt_sigpending
+177 common rt_sigtimedwait sys_rt_sigtimedwait_time32
+178 common rt_sigqueueinfo sys_rt_sigqueueinfo
+179 common rt_sigsuspend sys_rt_sigsuspend
+180 common pread64 sys_pread_wrapper
+181 common pwrite64 sys_pwrite_wrapper
+182 common chown sys_chown16
+183 common getcwd sys_getcwd
+184 common capget sys_capget
+185 common capset sys_capset
+186 common sigaltstack sys_sigaltstack
+187 common sendfile sys_sendfile
+# 188 is reserved for getpmsg
+# 189 is reserved for putpmsg
+190 common vfork sys_vfork
+191 common ugetrlimit sys_getrlimit
+192 common mmap2 sys_mmap2
+193 common truncate64 sys_truncate64
+194 common ftruncate64 sys_ftruncate64
+195 common stat64 sys_stat64
+196 common lstat64 sys_lstat64
+197 common fstat64 sys_fstat64
+198 common lchown32 sys_lchown
+199 common getuid32 sys_getuid
+200 common getgid32 sys_getgid
+201 common geteuid32 sys_geteuid
+202 common getegid32 sys_getegid
+203 common setreuid32 sys_setreuid
+204 common setregid32 sys_setregid
+205 common getgroups32 sys_getgroups
+206 common setgroups32 sys_setgroups
+207 common fchown32 sys_fchown
+208 common setresuid32 sys_setresuid
+209 common getresuid32 sys_getresuid
+210 common setresgid32 sys_setresgid
+211 common getresgid32 sys_getresgid
+212 common chown32 sys_chown
+213 common setuid32 sys_setuid
+214 common setgid32 sys_setgid
+215 common setfsuid32 sys_setfsuid
+216 common setfsgid32 sys_setfsgid
+217 common pivot_root sys_pivot_root
+218 common mincore sys_mincore
+219 common madvise sys_madvise
+220 common getdents64 sys_getdents64
+221 common fcntl64 sys_fcntl64
+# 222 is reserved for tux
+# 223 is unused
+224 common gettid sys_gettid
+225 common readahead sys_readahead
+226 common setxattr sys_setxattr
+227 common lsetxattr sys_lsetxattr
+228 common fsetxattr sys_fsetxattr
+229 common getxattr sys_getxattr
+230 common lgetxattr sys_lgetxattr
+231 common fgetxattr sys_fgetxattr
+232 common listxattr sys_listxattr
+233 common llistxattr sys_llistxattr
+234 common flistxattr sys_flistxattr
+235 common removexattr sys_removexattr
+236 common lremovexattr sys_lremovexattr
+237 common fremovexattr sys_fremovexattr
+238 common tkill sys_tkill
+239 common sendfile64 sys_sendfile64
+240 common futex sys_futex_time32
+241 common sched_setaffinity sys_sched_setaffinity
+242 common sched_getaffinity sys_sched_getaffinity
+# 243 is reserved for set_thread_area
+# 244 is reserved for get_thread_area
+245 common io_setup sys_io_setup
+246 common io_destroy sys_io_destroy
+247 common io_getevents sys_io_getevents_time32
+248 common io_submit sys_io_submit
+249 common io_cancel sys_io_cancel
+250 common fadvise64 sys_fadvise64
+# 251 is unused
+252 common exit_group sys_exit_group
+253 common lookup_dcookie sys_ni_syscall
+254 common epoll_create sys_epoll_create
+255 common epoll_ctl sys_epoll_ctl
+256 common epoll_wait sys_epoll_wait
+257 common remap_file_pages sys_remap_file_pages
+258 common set_tid_address sys_set_tid_address
+259 common timer_create sys_timer_create
+260 common timer_settime sys_timer_settime32
+261 common timer_gettime sys_timer_gettime32
+262 common timer_getoverrun sys_timer_getoverrun
+263 common timer_delete sys_timer_delete
+264 common clock_settime sys_clock_settime32
+265 common clock_gettime sys_clock_gettime32
+266 common clock_getres sys_clock_getres_time32
+267 common clock_nanosleep sys_clock_nanosleep_time32
+268 common statfs64 sys_statfs64
+269 common fstatfs64 sys_fstatfs64
+270 common tgkill sys_tgkill
+271 common utimes sys_utimes_time32
+272 common fadvise64_64 sys_fadvise64_64_wrapper
+# 273 is reserved for vserver
+274 common mbind sys_mbind
+275 common get_mempolicy sys_get_mempolicy
+276 common set_mempolicy sys_set_mempolicy
+277 common mq_open sys_mq_open
+278 common mq_unlink sys_mq_unlink
+279 common mq_timedsend sys_mq_timedsend_time32
+280 common mq_timedreceive sys_mq_timedreceive_time32
+281 common mq_notify sys_mq_notify
+282 common mq_getsetattr sys_mq_getsetattr
+283 common kexec_load sys_kexec_load
+284 common waitid sys_waitid
+285 common add_key sys_add_key
+286 common request_key sys_request_key
+287 common keyctl sys_keyctl
+288 common ioprio_set sys_ioprio_set
+289 common ioprio_get sys_ioprio_get
+290 common inotify_init sys_inotify_init
+291 common inotify_add_watch sys_inotify_add_watch
+292 common inotify_rm_watch sys_inotify_rm_watch
+# 293 is unused
+294 common migrate_pages sys_migrate_pages
+295 common openat sys_openat
+296 common mkdirat sys_mkdirat
+297 common mknodat sys_mknodat
+298 common fchownat sys_fchownat
+299 common futimesat sys_futimesat_time32
+300 common fstatat64 sys_fstatat64
+301 common unlinkat sys_unlinkat
+302 common renameat sys_renameat
+303 common linkat sys_linkat
+304 common symlinkat sys_symlinkat
+305 common readlinkat sys_readlinkat
+306 common fchmodat sys_fchmodat
+307 common faccessat sys_faccessat
+308 common pselect6 sys_pselect6_time32
+309 common ppoll sys_ppoll_time32
+310 common unshare sys_unshare
+311 common set_robust_list sys_set_robust_list
+312 common get_robust_list sys_get_robust_list
+313 common splice sys_splice
+314 common sync_file_range sys_sh_sync_file_range6
+315 common tee sys_tee
+316 common vmsplice sys_vmsplice
+317 common move_pages sys_move_pages
+318 common getcpu sys_getcpu
+319 common epoll_pwait sys_epoll_pwait
+320 common utimensat sys_utimensat_time32
+321 common signalfd sys_signalfd
+322 common timerfd_create sys_timerfd_create
+323 common eventfd sys_eventfd
+324 common fallocate sys_fallocate
+325 common timerfd_settime sys_timerfd_settime32
+326 common timerfd_gettime sys_timerfd_gettime32
+327 common signalfd4 sys_signalfd4
+328 common eventfd2 sys_eventfd2
+329 common epoll_create1 sys_epoll_create1
+330 common dup3 sys_dup3
+331 common pipe2 sys_pipe2
+332 common inotify_init1 sys_inotify_init1
+333 common preadv sys_preadv
+334 common pwritev sys_pwritev
+335 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
+336 common perf_event_open sys_perf_event_open
+337 common fanotify_init sys_fanotify_init
+338 common fanotify_mark sys_fanotify_mark
+339 common prlimit64 sys_prlimit64
+340 common socket sys_socket
+341 common bind sys_bind
+342 common connect sys_connect
+343 common listen sys_listen
+344 common accept sys_accept
+345 common getsockname sys_getsockname
+346 common getpeername sys_getpeername
+347 common socketpair sys_socketpair
+348 common send sys_send
+349 common sendto sys_sendto
+350 common recv sys_recv
+351 common recvfrom sys_recvfrom
+352 common shutdown sys_shutdown
+353 common setsockopt sys_setsockopt
+354 common getsockopt sys_getsockopt
+355 common sendmsg sys_sendmsg
+356 common recvmsg sys_recvmsg
+357 common recvmmsg sys_recvmmsg_time32
+358 common accept4 sys_accept4
+359 common name_to_handle_at sys_name_to_handle_at
+360 common open_by_handle_at sys_open_by_handle_at
+361 common clock_adjtime sys_clock_adjtime32
+362 common syncfs sys_syncfs
+363 common sendmmsg sys_sendmmsg
+364 common setns sys_setns
+365 common process_vm_readv sys_process_vm_readv
+366 common process_vm_writev sys_process_vm_writev
+367 common kcmp sys_kcmp
+368 common finit_module sys_finit_module
+369 common sched_getattr sys_sched_getattr
+370 common sched_setattr sys_sched_setattr
+371 common renameat2 sys_renameat2
+372 common seccomp sys_seccomp
+373 common getrandom sys_getrandom
+374 common memfd_create sys_memfd_create
+375 common bpf sys_bpf
+376 common execveat sys_execveat
+377 common userfaultfd sys_userfaultfd
+378 common membarrier sys_membarrier
+379 common mlock2 sys_mlock2
+380 common copy_file_range sys_copy_file_range
+381 common preadv2 sys_preadv2
+382 common pwritev2 sys_pwritev2
+383 common statx sys_statx
+384 common pkey_mprotect sys_pkey_mprotect
+385 common pkey_alloc sys_pkey_alloc
+386 common pkey_free sys_pkey_free
+387 common rseq sys_rseq
+388 common sync_file_range2 sys_sync_file_range2
+# room for arch specific syscalls
+393 common semget sys_semget
+394 common semctl sys_semctl
+395 common shmget sys_shmget
+396 common shmctl sys_shmctl
+397 common shmat sys_shmat
+398 common shmdt sys_shmdt
+399 common msgget sys_msgget
+400 common msgsnd sys_msgsnd
+401 common msgrcv sys_msgrcv
+402 common msgctl sys_msgctl
+403 common clock_gettime64 sys_clock_gettime
+404 common clock_settime64 sys_clock_settime
+405 common clock_adjtime64 sys_clock_adjtime
+406 common clock_getres_time64 sys_clock_getres
+407 common clock_nanosleep_time64 sys_clock_nanosleep
+408 common timer_gettime64 sys_timer_gettime
+409 common timer_settime64 sys_timer_settime
+410 common timerfd_gettime64 sys_timerfd_gettime
+411 common timerfd_settime64 sys_timerfd_settime
+412 common utimensat_time64 sys_utimensat
+413 common pselect6_time64 sys_pselect6
+414 common ppoll_time64 sys_ppoll
+416 common io_pgetevents_time64 sys_io_pgetevents
+417 common recvmmsg_time64 sys_recvmmsg
+418 common mq_timedsend_time64 sys_mq_timedsend
+419 common mq_timedreceive_time64 sys_mq_timedreceive
+420 common semtimedop_time64 sys_semtimedop
+421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait
+422 common futex_time64 sys_futex
+423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
+434 common pidfd_open sys_pidfd_open
+# 435 reserved for clone3
+436 common close_range sys_close_range
+437 common openat2 sys_openat2
+438 common pidfd_getfd sys_pidfd_getfd
+439 common faccessat2 sys_faccessat2
+440 common process_madvise sys_process_madvise
+441 common epoll_pwait2 sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
+463 common setxattrat sys_setxattrat
+464 common getxattrat sys_getxattrat
+465 common listxattrat sys_listxattrat
+466 common removexattrat sys_removexattrat
diff --git a/tools/perf/arch/sh/include/syscall_table.h b/tools/perf/arch/sh/include/syscall_table.h
new file mode 100644
index 000000000000..4c942821662d
--- /dev/null
+++ b/tools/perf/arch/sh/include/syscall_table.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/syscalls_32.h>
diff --git a/tools/perf/arch/sparc/entry/syscalls/Kbuild b/tools/perf/arch/sparc/entry/syscalls/Kbuild
new file mode 100644
index 000000000000..84c6599b4ea6
--- /dev/null
+++ b/tools/perf/arch/sparc/entry/syscalls/Kbuild
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscalls_32.h
+syscall-y += syscalls_64.h
diff --git a/tools/perf/arch/sparc/entry/syscalls/Makefile.syscalls b/tools/perf/arch/sparc/entry/syscalls/Makefile.syscalls
new file mode 100644
index 000000000000..212c1800b644
--- /dev/null
+++ b/tools/perf/arch/sparc/entry/syscalls/Makefile.syscalls
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+syscall_abis_32 +=
+syscall_abis_64 +=
+syscalltbl = $(srctree)/tools/perf/arch/sparc/entry/syscalls/syscall.tbl
diff --git a/tools/perf/arch/sparc/entry/syscalls/syscall.tbl b/tools/perf/arch/sparc/entry/syscalls/syscall.tbl
new file mode 100644
index 000000000000..727f99d333b3
--- /dev/null
+++ b/tools/perf/arch/sparc/entry/syscalls/syscall.tbl
@@ -0,0 +1,514 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for sparc
+#
+# The format is:
+# <number> <abi> <name> <entry point> <compat entry point>
+#
+# The <abi> can be common, 64, or 32 for this file.
+#
+0 common restart_syscall sys_restart_syscall
+1 32 exit sys_exit sparc_exit
+1 64 exit sparc_exit
+2 common fork sys_fork
+3 common read sys_read
+4 common write sys_write
+5 common open sys_open compat_sys_open
+6 common close sys_close
+7 common wait4 sys_wait4 compat_sys_wait4
+8 common creat sys_creat
+9 common link sys_link
+10 common unlink sys_unlink
+11 32 execv sunos_execv
+11 64 execv sys_nis_syscall
+12 common chdir sys_chdir
+13 32 chown sys_chown16
+13 64 chown sys_chown
+14 common mknod sys_mknod
+15 common chmod sys_chmod
+16 32 lchown sys_lchown16
+16 64 lchown sys_lchown
+17 common brk sys_brk
+18 common perfctr sys_nis_syscall
+19 common lseek sys_lseek compat_sys_lseek
+20 common getpid sys_getpid
+21 common capget sys_capget
+22 common capset sys_capset
+23 32 setuid sys_setuid16
+23 64 setuid sys_setuid
+24 32 getuid sys_getuid16
+24 64 getuid sys_getuid
+25 common vmsplice sys_vmsplice
+26 common ptrace sys_ptrace compat_sys_ptrace
+27 common alarm sys_alarm
+28 common sigaltstack sys_sigaltstack compat_sys_sigaltstack
+29 32 pause sys_pause
+29 64 pause sys_nis_syscall
+30 32 utime sys_utime32
+30 64 utime sys_utime
+31 32 lchown32 sys_lchown
+32 32 fchown32 sys_fchown
+33 common access sys_access
+34 common nice sys_nice
+35 32 chown32 sys_chown
+36 common sync sys_sync
+37 common kill sys_kill
+38 common stat sys_newstat compat_sys_newstat
+39 32 sendfile sys_sendfile compat_sys_sendfile
+39 64 sendfile sys_sendfile64
+40 common lstat sys_newlstat compat_sys_newlstat
+41 common dup sys_dup
+42 common pipe sys_sparc_pipe
+43 common times sys_times compat_sys_times
+44 32 getuid32 sys_getuid
+45 common umount2 sys_umount
+46 32 setgid sys_setgid16
+46 64 setgid sys_setgid
+47 32 getgid sys_getgid16
+47 64 getgid sys_getgid
+48 common signal sys_signal
+49 32 geteuid sys_geteuid16
+49 64 geteuid sys_geteuid
+50 32 getegid sys_getegid16
+50 64 getegid sys_getegid
+51 common acct sys_acct
+52 64 memory_ordering sys_memory_ordering
+53 32 getgid32 sys_getgid
+54 common ioctl sys_ioctl compat_sys_ioctl
+55 common reboot sys_reboot
+56 32 mmap2 sys_mmap2 sys32_mmap2
+57 common symlink sys_symlink
+58 common readlink sys_readlink
+59 32 execve sys_execve sys32_execve
+59 64 execve sys64_execve
+60 common umask sys_umask
+61 common chroot sys_chroot
+62 common fstat sys_newfstat compat_sys_newfstat
+63 common fstat64 sys_fstat64 compat_sys_fstat64
+64 common getpagesize sys_getpagesize
+65 common msync sys_msync
+66 common vfork sys_vfork
+67 common pread64 sys_pread64 compat_sys_pread64
+68 common pwrite64 sys_pwrite64 compat_sys_pwrite64
+69 32 geteuid32 sys_geteuid
+70 32 getegid32 sys_getegid
+71 common mmap sys_mmap
+72 32 setreuid32 sys_setreuid
+73 32 munmap sys_munmap
+73 64 munmap sys_64_munmap
+74 common mprotect sys_mprotect
+75 common madvise sys_madvise
+76 common vhangup sys_vhangup
+77 32 truncate64 sys_truncate64 compat_sys_truncate64
+78 common mincore sys_mincore
+79 32 getgroups sys_getgroups16
+79 64 getgroups sys_getgroups
+80 32 setgroups sys_setgroups16
+80 64 setgroups sys_setgroups
+81 common getpgrp sys_getpgrp
+82 32 setgroups32 sys_setgroups
+83 common setitimer sys_setitimer compat_sys_setitimer
+84 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64
+85 common swapon sys_swapon
+86 common getitimer sys_getitimer compat_sys_getitimer
+87 32 setuid32 sys_setuid
+88 common sethostname sys_sethostname
+89 32 setgid32 sys_setgid
+90 common dup2 sys_dup2
+91 32 setfsuid32 sys_setfsuid
+92 common fcntl sys_fcntl compat_sys_fcntl
+93 common select sys_select compat_sys_select
+94 32 setfsgid32 sys_setfsgid
+95 common fsync sys_fsync
+96 common setpriority sys_setpriority
+97 common socket sys_socket
+98 common connect sys_connect
+99 common accept sys_accept
+100 common getpriority sys_getpriority
+101 common rt_sigreturn sys_rt_sigreturn sys32_rt_sigreturn
+102 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
+103 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
+104 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
+105 32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32
+105 64 rt_sigtimedwait sys_rt_sigtimedwait
+106 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+107 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
+108 32 setresuid32 sys_setresuid
+108 64 setresuid sys_setresuid
+109 32 getresuid32 sys_getresuid
+109 64 getresuid sys_getresuid
+110 32 setresgid32 sys_setresgid
+110 64 setresgid sys_setresgid
+111 32 getresgid32 sys_getresgid
+111 64 getresgid sys_getresgid
+112 32 setregid32 sys_setregid
+113 common recvmsg sys_recvmsg compat_sys_recvmsg
+114 common sendmsg sys_sendmsg compat_sys_sendmsg
+115 32 getgroups32 sys_getgroups
+116 common gettimeofday sys_gettimeofday compat_sys_gettimeofday
+117 common getrusage sys_getrusage compat_sys_getrusage
+118 common getsockopt sys_getsockopt sys_getsockopt
+119 common getcwd sys_getcwd
+120 common readv sys_readv
+121 common writev sys_writev
+122 common settimeofday sys_settimeofday compat_sys_settimeofday
+123 32 fchown sys_fchown16
+123 64 fchown sys_fchown
+124 common fchmod sys_fchmod
+125 common recvfrom sys_recvfrom compat_sys_recvfrom
+126 32 setreuid sys_setreuid16
+126 64 setreuid sys_setreuid
+127 32 setregid sys_setregid16
+127 64 setregid sys_setregid
+128 common rename sys_rename
+129 common truncate sys_truncate compat_sys_truncate
+130 common ftruncate sys_ftruncate compat_sys_ftruncate
+131 common flock sys_flock
+132 common lstat64 sys_lstat64 compat_sys_lstat64
+133 common sendto sys_sendto
+134 common shutdown sys_shutdown
+135 common socketpair sys_socketpair
+136 common mkdir sys_mkdir
+137 common rmdir sys_rmdir
+138 32 utimes sys_utimes_time32
+138 64 utimes sys_utimes
+139 common stat64 sys_stat64 compat_sys_stat64
+140 common sendfile64 sys_sendfile64
+141 common getpeername sys_getpeername
+142 32 futex sys_futex_time32
+142 64 futex sys_futex
+143 common gettid sys_gettid
+144 common getrlimit sys_getrlimit compat_sys_getrlimit
+145 common setrlimit sys_setrlimit compat_sys_setrlimit
+146 common pivot_root sys_pivot_root
+147 common prctl sys_prctl
+148 common pciconfig_read sys_pciconfig_read
+149 common pciconfig_write sys_pciconfig_write
+150 common getsockname sys_getsockname
+151 common inotify_init sys_inotify_init
+152 common inotify_add_watch sys_inotify_add_watch
+153 common poll sys_poll
+154 common getdents64 sys_getdents64
+155 32 fcntl64 sys_fcntl64 compat_sys_fcntl64
+156 common inotify_rm_watch sys_inotify_rm_watch
+157 common statfs sys_statfs compat_sys_statfs
+158 common fstatfs sys_fstatfs compat_sys_fstatfs
+159 common umount sys_oldumount
+160 common sched_set_affinity sys_sched_setaffinity compat_sys_sched_setaffinity
+161 common sched_get_affinity sys_sched_getaffinity compat_sys_sched_getaffinity
+162 common getdomainname sys_getdomainname
+163 common setdomainname sys_setdomainname
+164 64 utrap_install sys_utrap_install
+165 common quotactl sys_quotactl
+166 common set_tid_address sys_set_tid_address
+167 common mount sys_mount
+168 common ustat sys_ustat compat_sys_ustat
+169 common setxattr sys_setxattr
+170 common lsetxattr sys_lsetxattr
+171 common fsetxattr sys_fsetxattr
+172 common getxattr sys_getxattr
+173 common lgetxattr sys_lgetxattr
+174 common getdents sys_getdents compat_sys_getdents
+175 common setsid sys_setsid
+176 common fchdir sys_fchdir
+177 common fgetxattr sys_fgetxattr
+178 common listxattr sys_listxattr
+179 common llistxattr sys_llistxattr
+180 common flistxattr sys_flistxattr
+181 common removexattr sys_removexattr
+182 common lremovexattr sys_lremovexattr
+183 32 sigpending sys_sigpending compat_sys_sigpending
+183 64 sigpending sys_nis_syscall
+184 common query_module sys_ni_syscall
+185 common setpgid sys_setpgid
+186 common fremovexattr sys_fremovexattr
+187 common tkill sys_tkill
+188 32 exit_group sys_exit_group sparc_exit_group
+188 64 exit_group sparc_exit_group
+189 common uname sys_newuname
+190 common init_module sys_init_module
+191 32 personality sys_personality sys_sparc64_personality
+191 64 personality sys_sparc64_personality
+192 32 remap_file_pages sys_sparc_remap_file_pages sys_remap_file_pages
+192 64 remap_file_pages sys_remap_file_pages
+193 common epoll_create sys_epoll_create
+194 common epoll_ctl sys_epoll_ctl
+195 common epoll_wait sys_epoll_wait
+196 common ioprio_set sys_ioprio_set
+197 common getppid sys_getppid
+198 32 sigaction sys_sparc_sigaction compat_sys_sparc_sigaction
+198 64 sigaction sys_nis_syscall
+199 common sgetmask sys_sgetmask
+200 common ssetmask sys_ssetmask
+201 32 sigsuspend sys_sigsuspend
+201 64 sigsuspend sys_nis_syscall
+202 common oldlstat sys_newlstat compat_sys_newlstat
+203 common uselib sys_uselib
+204 32 readdir sys_old_readdir compat_sys_old_readdir
+204 64 readdir sys_nis_syscall
+205 common readahead sys_readahead compat_sys_readahead
+206 common socketcall sys_socketcall compat_sys_socketcall
+207 common syslog sys_syslog
+208 common lookup_dcookie sys_ni_syscall
+209 common fadvise64 sys_fadvise64 compat_sys_fadvise64
+210 common fadvise64_64 sys_fadvise64_64 compat_sys_fadvise64_64
+211 common tgkill sys_tgkill
+212 common waitpid sys_waitpid
+213 common swapoff sys_swapoff
+214 common sysinfo sys_sysinfo compat_sys_sysinfo
+215 32 ipc sys_ipc compat_sys_ipc
+215 64 ipc sys_sparc_ipc
+216 32 sigreturn sys_sigreturn sys32_sigreturn
+216 64 sigreturn sys_nis_syscall
+217 common clone sys_clone
+218 common ioprio_get sys_ioprio_get
+219 32 adjtimex sys_adjtimex_time32
+219 64 adjtimex sys_sparc_adjtimex
+220 32 sigprocmask sys_sigprocmask compat_sys_sigprocmask
+220 64 sigprocmask sys_nis_syscall
+221 common create_module sys_ni_syscall
+222 common delete_module sys_delete_module
+223 common get_kernel_syms sys_ni_syscall
+224 common getpgid sys_getpgid
+225 common bdflush sys_ni_syscall
+226 common sysfs sys_sysfs
+227 common afs_syscall sys_nis_syscall
+228 common setfsuid sys_setfsuid16
+229 common setfsgid sys_setfsgid16
+230 common _newselect sys_select compat_sys_select
+231 32 time sys_time32
+232 common splice sys_splice
+233 32 stime sys_stime32
+233 64 stime sys_stime
+234 common statfs64 sys_statfs64 compat_sys_statfs64
+235 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
+236 common _llseek sys_llseek
+237 common mlock sys_mlock
+238 common munlock sys_munlock
+239 common mlockall sys_mlockall
+240 common munlockall sys_munlockall
+241 common sched_setparam sys_sched_setparam
+242 common sched_getparam sys_sched_getparam
+243 common sched_setscheduler sys_sched_setscheduler
+244 common sched_getscheduler sys_sched_getscheduler
+245 common sched_yield sys_sched_yield
+246 common sched_get_priority_max sys_sched_get_priority_max
+247 common sched_get_priority_min sys_sched_get_priority_min
+248 32 sched_rr_get_interval sys_sched_rr_get_interval_time32
+248 64 sched_rr_get_interval sys_sched_rr_get_interval
+249 32 nanosleep sys_nanosleep_time32
+249 64 nanosleep sys_nanosleep
+250 32 mremap sys_mremap
+250 64 mremap sys_64_mremap
+251 common _sysctl sys_ni_syscall
+252 common getsid sys_getsid
+253 common fdatasync sys_fdatasync
+254 32 nfsservctl sys_ni_syscall sys_nis_syscall
+254 64 nfsservctl sys_nis_syscall
+255 common sync_file_range sys_sync_file_range compat_sys_sync_file_range
+256 32 clock_settime sys_clock_settime32
+256 64 clock_settime sys_clock_settime
+257 32 clock_gettime sys_clock_gettime32
+257 64 clock_gettime sys_clock_gettime
+258 32 clock_getres sys_clock_getres_time32
+258 64 clock_getres sys_clock_getres
+259 32 clock_nanosleep sys_clock_nanosleep_time32
+259 64 clock_nanosleep sys_clock_nanosleep
+260 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
+261 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
+262 32 timer_settime sys_timer_settime32
+262 64 timer_settime sys_timer_settime
+263 32 timer_gettime sys_timer_gettime32
+263 64 timer_gettime sys_timer_gettime
+264 common timer_getoverrun sys_timer_getoverrun
+265 common timer_delete sys_timer_delete
+266 common timer_create sys_timer_create compat_sys_timer_create
+# 267 was vserver
+267 common vserver sys_nis_syscall
+268 common io_setup sys_io_setup compat_sys_io_setup
+269 common io_destroy sys_io_destroy
+270 common io_submit sys_io_submit compat_sys_io_submit
+271 common io_cancel sys_io_cancel
+272 32 io_getevents sys_io_getevents_time32
+272 64 io_getevents sys_io_getevents
+273 common mq_open sys_mq_open compat_sys_mq_open
+274 common mq_unlink sys_mq_unlink
+275 32 mq_timedsend sys_mq_timedsend_time32
+275 64 mq_timedsend sys_mq_timedsend
+276 32 mq_timedreceive sys_mq_timedreceive_time32
+276 64 mq_timedreceive sys_mq_timedreceive
+277 common mq_notify sys_mq_notify compat_sys_mq_notify
+278 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
+279 common waitid sys_waitid compat_sys_waitid
+280 common tee sys_tee
+281 common add_key sys_add_key
+282 common request_key sys_request_key
+283 common keyctl sys_keyctl compat_sys_keyctl
+284 common openat sys_openat compat_sys_openat
+285 common mkdirat sys_mkdirat
+286 common mknodat sys_mknodat
+287 common fchownat sys_fchownat
+288 32 futimesat sys_futimesat_time32
+288 64 futimesat sys_futimesat
+289 common fstatat64 sys_fstatat64 compat_sys_fstatat64
+290 common unlinkat sys_unlinkat
+291 common renameat sys_renameat
+292 common linkat sys_linkat
+293 common symlinkat sys_symlinkat
+294 common readlinkat sys_readlinkat
+295 common fchmodat sys_fchmodat
+296 common faccessat sys_faccessat
+297 32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32
+297 64 pselect6 sys_pselect6
+298 32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32
+298 64 ppoll sys_ppoll
+299 common unshare sys_unshare
+300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
+301 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
+302 common migrate_pages sys_migrate_pages
+303 common mbind sys_mbind
+304 common get_mempolicy sys_get_mempolicy
+305 common set_mempolicy sys_set_mempolicy
+306 common kexec_load sys_kexec_load compat_sys_kexec_load
+307 common move_pages sys_move_pages
+308 common getcpu sys_getcpu
+309 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
+310 32 utimensat sys_utimensat_time32
+310 64 utimensat sys_utimensat
+311 common signalfd sys_signalfd compat_sys_signalfd
+312 common timerfd_create sys_timerfd_create
+313 common eventfd sys_eventfd
+314 common fallocate sys_fallocate compat_sys_fallocate
+315 32 timerfd_settime sys_timerfd_settime32
+315 64 timerfd_settime sys_timerfd_settime
+316 32 timerfd_gettime sys_timerfd_gettime32
+316 64 timerfd_gettime sys_timerfd_gettime
+317 common signalfd4 sys_signalfd4 compat_sys_signalfd4
+318 common eventfd2 sys_eventfd2
+319 common epoll_create1 sys_epoll_create1
+320 common dup3 sys_dup3
+321 common pipe2 sys_pipe2
+322 common inotify_init1 sys_inotify_init1
+323 common accept4 sys_accept4
+324 common preadv sys_preadv compat_sys_preadv
+325 common pwritev sys_pwritev compat_sys_pwritev
+326 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+327 common perf_event_open sys_perf_event_open
+328 32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32
+328 64 recvmmsg sys_recvmmsg
+329 common fanotify_init sys_fanotify_init
+330 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
+331 common prlimit64 sys_prlimit64
+332 common name_to_handle_at sys_name_to_handle_at
+333 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
+334 32 clock_adjtime sys_clock_adjtime32
+334 64 clock_adjtime sys_sparc_clock_adjtime
+335 common syncfs sys_syncfs
+336 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
+337 common setns sys_setns
+338 common process_vm_readv sys_process_vm_readv
+339 common process_vm_writev sys_process_vm_writev
+340 32 kern_features sys_ni_syscall sys_kern_features
+340 64 kern_features sys_kern_features
+341 common kcmp sys_kcmp
+342 common finit_module sys_finit_module
+343 common sched_setattr sys_sched_setattr
+344 common sched_getattr sys_sched_getattr
+345 common renameat2 sys_renameat2
+346 common seccomp sys_seccomp
+347 common getrandom sys_getrandom
+348 common memfd_create sys_memfd_create
+349 common bpf sys_bpf
+350 32 execveat sys_execveat sys32_execveat
+350 64 execveat sys64_execveat
+351 common membarrier sys_membarrier
+352 common userfaultfd sys_userfaultfd
+353 common bind sys_bind
+354 common listen sys_listen
+355 common setsockopt sys_setsockopt sys_setsockopt
+356 common mlock2 sys_mlock2
+357 common copy_file_range sys_copy_file_range
+358 common preadv2 sys_preadv2 compat_sys_preadv2
+359 common pwritev2 sys_pwritev2 compat_sys_pwritev2
+360 common statx sys_statx
+361 32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents
+361 64 io_pgetevents sys_io_pgetevents
+362 common pkey_mprotect sys_pkey_mprotect
+363 common pkey_alloc sys_pkey_alloc
+364 common pkey_free sys_pkey_free
+365 common rseq sys_rseq
+# room for arch specific syscalls
+392 64 semtimedop sys_semtimedop
+393 common semget sys_semget
+394 common semctl sys_semctl compat_sys_semctl
+395 common shmget sys_shmget
+396 common shmctl sys_shmctl compat_sys_shmctl
+397 common shmat sys_shmat compat_sys_shmat
+398 common shmdt sys_shmdt
+399 common msgget sys_msgget
+400 common msgsnd sys_msgsnd compat_sys_msgsnd
+401 common msgrcv sys_msgrcv compat_sys_msgrcv
+402 common msgctl sys_msgctl compat_sys_msgctl
+403 32 clock_gettime64 sys_clock_gettime sys_clock_gettime
+404 32 clock_settime64 sys_clock_settime sys_clock_settime
+405 32 clock_adjtime64 sys_clock_adjtime sys_clock_adjtime
+406 32 clock_getres_time64 sys_clock_getres sys_clock_getres
+407 32 clock_nanosleep_time64 sys_clock_nanosleep sys_clock_nanosleep
+408 32 timer_gettime64 sys_timer_gettime sys_timer_gettime
+409 32 timer_settime64 sys_timer_settime sys_timer_settime
+410 32 timerfd_gettime64 sys_timerfd_gettime sys_timerfd_gettime
+411 32 timerfd_settime64 sys_timerfd_settime sys_timerfd_settime
+412 32 utimensat_time64 sys_utimensat sys_utimensat
+413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
+414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
+416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
+417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
+418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
+419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive
+420 32 semtimedop_time64 sys_semtimedop sys_semtimedop
+421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
+422 32 futex_time64 sys_futex sys_futex
+423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
+434 common pidfd_open sys_pidfd_open
+# 435 reserved for clone3
+436 common close_range sys_close_range
+437 common openat2 sys_openat2
+438 common pidfd_getfd sys_pidfd_getfd
+439 common faccessat2 sys_faccessat2
+440 common process_madvise sys_process_madvise
+441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
+463 common setxattrat sys_setxattrat
+464 common getxattrat sys_getxattrat
+465 common listxattrat sys_listxattrat
+466 common removexattrat sys_removexattrat
diff --git a/tools/perf/arch/sparc/include/syscall_table.h b/tools/perf/arch/sparc/include/syscall_table.h
new file mode 100644
index 000000000000..7ff51b783000
--- /dev/null
+++ b/tools/perf/arch/sparc/include/syscall_table.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/bitsperlong.h>
+
+#if __BITS_PER_LONG == 64
+#include <asm/syscalls_64.h>
+#else
+#include <asm/syscalls_32.h>
+#endif
diff --git a/tools/perf/arch/x86/Build b/tools/perf/arch/x86/Build
index 87d057491343..02a1ca780a20 100644
--- a/tools/perf/arch/x86/Build
+++ b/tools/perf/arch/x86/Build
@@ -2,7 +2,6 @@ perf-util-y += util/
perf-test-y += tests/
ifdef SHELLCHECK
- SHELL_TESTS := entry/syscalls/syscalltbl.sh
TEST_LOGS := $(SHELL_TESTS:%=%.shellcheck_log)
else
SHELL_TESTS :=
diff --git a/tools/perf/arch/x86/Makefile b/tools/perf/arch/x86/Makefile
index a6b6e0a9308a..a295a80ea078 100644
--- a/tools/perf/arch/x86/Makefile
+++ b/tools/perf/arch/x86/Makefile
@@ -1,28 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
HAVE_KVM_STAT_SUPPORT := 1
PERF_HAVE_JITDUMP := 1
-
-###
-# Syscall table generation
-#
-
-generated := $(OUTPUT)arch/x86/include/generated
-out := $(generated)/asm
-header := $(out)/syscalls_64.c
-header_32 := $(out)/syscalls_32.c
-sys := $(srctree)/tools/perf/arch/x86/entry/syscalls
-systbl := $(sys)/syscalltbl.sh
-
-# Create output directory if not already present
-$(shell [ -d '$(out)' ] || mkdir -p '$(out)')
-
-$(header): $(sys)/syscall_64.tbl $(systbl)
- $(Q)$(SHELL) '$(systbl)' $(sys)/syscall_64.tbl 'x86_64' > $@
-
-$(header_32): $(sys)/syscall_32.tbl $(systbl)
- $(Q)$(SHELL) '$(systbl)' $(sys)/syscall_32.tbl 'x86' > $@
-
-clean::
- $(call QUIET_CLEAN, x86) $(RM) -r $(header) $(generated)
-
-archheaders: $(header) $(header_32)
diff --git a/tools/perf/arch/x86/entry/syscalls/Kbuild b/tools/perf/arch/x86/entry/syscalls/Kbuild
new file mode 100644
index 000000000000..84c6599b4ea6
--- /dev/null
+++ b/tools/perf/arch/x86/entry/syscalls/Kbuild
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscalls_32.h
+syscall-y += syscalls_64.h
diff --git a/tools/perf/arch/x86/entry/syscalls/Makefile.syscalls b/tools/perf/arch/x86/entry/syscalls/Makefile.syscalls
new file mode 100644
index 000000000000..db3d5d6d4e56
--- /dev/null
+++ b/tools/perf/arch/x86/entry/syscalls/Makefile.syscalls
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+
+syscall_abis_32 += i386
+syscall_abis_64 +=
+
+syscalltbl = $(srctree)/tools/perf/arch/x86/entry/syscalls/syscall_%.tbl
diff --git a/tools/perf/arch/x86/entry/syscalls/syscalltbl.sh b/tools/perf/arch/x86/entry/syscalls/syscalltbl.sh
deleted file mode 100755
index 2b71f99933a5..000000000000
--- a/tools/perf/arch/x86/entry/syscalls/syscalltbl.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-arch="$2"
-
-syscall_macro() {
- nr="$1"
- name="$2"
-
- echo " [$nr] = \"$name\","
-}
-
-emit() {
- nr="$1"
- entry="$2"
-
- syscall_macro "$nr" "$entry"
-}
-
-echo "static const char *const syscalltbl_${arch}[] = {"
-
-sorted_table=$(mktemp /tmp/syscalltbl.XXXXXX)
-grep '^[0-9]' "$in" | sort -n > $sorted_table
-
-max_nr=0
-# the params are: nr abi name entry compat
-# use _ for intentionally unused variables according to SC2034
-while read nr _ name _ _; do
- if [ $nr -ge 512 ] ; then # discard compat sycalls
- break
- fi
-
- emit "$nr" "$name"
- max_nr=$nr
-done < $sorted_table
-
-rm -f $sorted_table
-
-echo "};"
-
-echo "#define SYSCALLTBL_${arch}_MAX_ID ${max_nr}"
diff --git a/tools/perf/arch/x86/include/syscall_table.h b/tools/perf/arch/x86/include/syscall_table.h
new file mode 100644
index 000000000000..7ff51b783000
--- /dev/null
+++ b/tools/perf/arch/x86/include/syscall_table.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/bitsperlong.h>
+
+#if __BITS_PER_LONG == 64
+#include <asm/syscalls_64.h>
+#else
+#include <asm/syscalls_32.h>
+#endif
diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build
index 848327378694..06d7c0205b3d 100644
--- a/tools/perf/arch/x86/util/Build
+++ b/tools/perf/arch/x86/util/Build
@@ -15,6 +15,6 @@ perf-util-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
perf-util-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
perf-util-$(CONFIG_AUXTRACE) += auxtrace.o
-perf-util-$(CONFIG_AUXTRACE) += archinsn.o
+perf-util-y += archinsn.o
perf-util-$(CONFIG_AUXTRACE) += intel-pt.o
perf-util-$(CONFIG_AUXTRACE) += intel-bts.o
diff --git a/tools/perf/arch/x86/util/iostat.c b/tools/perf/arch/x86/util/iostat.c
index 366b44d0bb7e..00f645a0c18a 100644
--- a/tools/perf/arch/x86/util/iostat.c
+++ b/tools/perf/arch/x86/util/iostat.c
@@ -403,6 +403,10 @@ void iostat_prefix(struct evlist *evlist,
struct iio_root_port *rp = evlist->selected->priv;
if (rp) {
+ /*
+ * TODO: This is the incorrect format in JSON mode.
+ * See prepare_timestamp()
+ */
if (ts)
sprintf(prefix, "%6lu.%09lu%s%04x:%02x%s",
ts->tv_sec, ts->tv_nsec,
diff --git a/tools/perf/arch/xtensa/entry/syscalls/Kbuild b/tools/perf/arch/xtensa/entry/syscalls/Kbuild
new file mode 100644
index 000000000000..11707c481a24
--- /dev/null
+++ b/tools/perf/arch/xtensa/entry/syscalls/Kbuild
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscalls_32.h
diff --git a/tools/perf/arch/xtensa/entry/syscalls/Makefile.syscalls b/tools/perf/arch/xtensa/entry/syscalls/Makefile.syscalls
new file mode 100644
index 000000000000..d4aa2358460c
--- /dev/null
+++ b/tools/perf/arch/xtensa/entry/syscalls/Makefile.syscalls
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+syscall_abis_32 +=
+syscalltbl = $(srctree)/tools/perf/arch/xtensa/entry/syscalls/syscall.tbl
diff --git a/tools/perf/arch/xtensa/entry/syscalls/syscall.tbl b/tools/perf/arch/xtensa/entry/syscalls/syscall.tbl
new file mode 100644
index 000000000000..37effc1b134e
--- /dev/null
+++ b/tools/perf/arch/xtensa/entry/syscalls/syscall.tbl
@@ -0,0 +1,439 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for xtensa
+#
+# The format is:
+# <number> <abi> <name> <entry point>
+#
+# The <abi> is always "common" for this file
+#
+0 common spill sys_ni_syscall
+1 common xtensa sys_ni_syscall
+2 common available4 sys_ni_syscall
+3 common available5 sys_ni_syscall
+4 common available6 sys_ni_syscall
+5 common available7 sys_ni_syscall
+6 common available8 sys_ni_syscall
+7 common available9 sys_ni_syscall
+# File Operations
+8 common open sys_open
+9 common close sys_close
+10 common dup sys_dup
+11 common dup2 sys_dup2
+12 common read sys_read
+13 common write sys_write
+14 common select sys_select
+15 common lseek sys_lseek
+16 common poll sys_poll
+17 common _llseek sys_llseek
+18 common epoll_wait sys_epoll_wait
+19 common epoll_ctl sys_epoll_ctl
+20 common epoll_create sys_epoll_create
+21 common creat sys_creat
+22 common truncate sys_truncate
+23 common ftruncate sys_ftruncate
+24 common readv sys_readv
+25 common writev sys_writev
+26 common fsync sys_fsync
+27 common fdatasync sys_fdatasync
+28 common truncate64 sys_truncate64
+29 common ftruncate64 sys_ftruncate64
+30 common pread64 sys_pread64
+31 common pwrite64 sys_pwrite64
+32 common link sys_link
+33 common rename sys_rename
+34 common symlink sys_symlink
+35 common readlink sys_readlink
+36 common mknod sys_mknod
+37 common pipe sys_pipe
+38 common unlink sys_unlink
+39 common rmdir sys_rmdir
+40 common mkdir sys_mkdir
+41 common chdir sys_chdir
+42 common fchdir sys_fchdir
+43 common getcwd sys_getcwd
+44 common chmod sys_chmod
+45 common chown sys_chown
+46 common stat sys_newstat
+47 common stat64 sys_stat64
+48 common lchown sys_lchown
+49 common lstat sys_newlstat
+50 common lstat64 sys_lstat64
+51 common available51 sys_ni_syscall
+52 common fchmod sys_fchmod
+53 common fchown sys_fchown
+54 common fstat sys_newfstat
+55 common fstat64 sys_fstat64
+56 common flock sys_flock
+57 common access sys_access
+58 common umask sys_umask
+59 common getdents sys_getdents
+60 common getdents64 sys_getdents64
+61 common fcntl64 sys_fcntl64
+62 common fallocate sys_fallocate
+63 common fadvise64_64 xtensa_fadvise64_64
+64 common utime sys_utime32
+65 common utimes sys_utimes_time32
+66 common ioctl sys_ioctl
+67 common fcntl sys_fcntl
+68 common setxattr sys_setxattr
+69 common getxattr sys_getxattr
+70 common listxattr sys_listxattr
+71 common removexattr sys_removexattr
+72 common lsetxattr sys_lsetxattr
+73 common lgetxattr sys_lgetxattr
+74 common llistxattr sys_llistxattr
+75 common lremovexattr sys_lremovexattr
+76 common fsetxattr sys_fsetxattr
+77 common fgetxattr sys_fgetxattr
+78 common flistxattr sys_flistxattr
+79 common fremovexattr sys_fremovexattr
+# File Map / Shared Memory Operations
+80 common mmap2 sys_mmap_pgoff
+81 common munmap sys_munmap
+82 common mprotect sys_mprotect
+83 common brk sys_brk
+84 common mlock sys_mlock
+85 common munlock sys_munlock
+86 common mlockall sys_mlockall
+87 common munlockall sys_munlockall
+88 common mremap sys_mremap
+89 common msync sys_msync
+90 common mincore sys_mincore
+91 common madvise sys_madvise
+92 common shmget sys_shmget
+93 common shmat xtensa_shmat
+94 common shmctl sys_old_shmctl
+95 common shmdt sys_shmdt
+# Socket Operations
+96 common socket sys_socket
+97 common setsockopt sys_setsockopt
+98 common getsockopt sys_getsockopt
+99 common shutdown sys_shutdown
+100 common bind sys_bind
+101 common connect sys_connect
+102 common listen sys_listen
+103 common accept sys_accept
+104 common getsockname sys_getsockname
+105 common getpeername sys_getpeername
+106 common sendmsg sys_sendmsg
+107 common recvmsg sys_recvmsg
+108 common send sys_send
+109 common recv sys_recv
+110 common sendto sys_sendto
+111 common recvfrom sys_recvfrom
+112 common socketpair sys_socketpair
+113 common sendfile sys_sendfile
+114 common sendfile64 sys_sendfile64
+115 common sendmmsg sys_sendmmsg
+# Process Operations
+116 common clone sys_clone
+117 common execve sys_execve
+118 common exit sys_exit
+119 common exit_group sys_exit_group
+120 common getpid sys_getpid
+121 common wait4 sys_wait4
+122 common waitid sys_waitid
+123 common kill sys_kill
+124 common tkill sys_tkill
+125 common tgkill sys_tgkill
+126 common set_tid_address sys_set_tid_address
+127 common gettid sys_gettid
+128 common setsid sys_setsid
+129 common getsid sys_getsid
+130 common prctl sys_prctl
+131 common personality sys_personality
+132 common getpriority sys_getpriority
+133 common setpriority sys_setpriority
+134 common setitimer sys_setitimer
+135 common getitimer sys_getitimer
+136 common setuid sys_setuid
+137 common getuid sys_getuid
+138 common setgid sys_setgid
+139 common getgid sys_getgid
+140 common geteuid sys_geteuid
+141 common getegid sys_getegid
+142 common setreuid sys_setreuid
+143 common setregid sys_setregid
+144 common setresuid sys_setresuid
+145 common getresuid sys_getresuid
+146 common setresgid sys_setresgid
+147 common getresgid sys_getresgid
+148 common setpgid sys_setpgid
+149 common getpgid sys_getpgid
+150 common getppid sys_getppid
+151 common getpgrp sys_getpgrp
+# 152 was set_thread_area
+152 common reserved152 sys_ni_syscall
+# 153 was get_thread_area
+153 common reserved153 sys_ni_syscall
+154 common times sys_times
+155 common acct sys_acct
+156 common sched_setaffinity sys_sched_setaffinity
+157 common sched_getaffinity sys_sched_getaffinity
+158 common capget sys_capget
+159 common capset sys_capset
+160 common ptrace sys_ptrace
+161 common semtimedop sys_semtimedop_time32
+162 common semget sys_semget
+163 common semop sys_semop
+164 common semctl sys_old_semctl
+165 common available165 sys_ni_syscall
+166 common msgget sys_msgget
+167 common msgsnd sys_msgsnd
+168 common msgrcv sys_msgrcv
+169 common msgctl sys_old_msgctl
+170 common available170 sys_ni_syscall
+# File System
+171 common umount2 sys_umount
+172 common mount sys_mount
+173 common swapon sys_swapon
+174 common chroot sys_chroot
+175 common pivot_root sys_pivot_root
+176 common umount sys_oldumount
+177 common swapoff sys_swapoff
+178 common sync sys_sync
+179 common syncfs sys_syncfs
+180 common setfsuid sys_setfsuid
+181 common setfsgid sys_setfsgid
+182 common sysfs sys_sysfs
+183 common ustat sys_ustat
+184 common statfs sys_statfs
+185 common fstatfs sys_fstatfs
+186 common statfs64 sys_statfs64
+187 common fstatfs64 sys_fstatfs64
+# System
+188 common setrlimit sys_setrlimit
+189 common getrlimit sys_getrlimit
+190 common getrusage sys_getrusage
+191 common futex sys_futex_time32
+192 common gettimeofday sys_gettimeofday
+193 common settimeofday sys_settimeofday
+194 common adjtimex sys_adjtimex_time32
+195 common nanosleep sys_nanosleep_time32
+196 common getgroups sys_getgroups
+197 common setgroups sys_setgroups
+198 common sethostname sys_sethostname
+199 common setdomainname sys_setdomainname
+200 common syslog sys_syslog
+201 common vhangup sys_vhangup
+202 common uselib sys_uselib
+203 common reboot sys_reboot
+204 common quotactl sys_quotactl
+# 205 was old nfsservctl
+205 common nfsservctl sys_ni_syscall
+206 common _sysctl sys_ni_syscall
+207 common bdflush sys_ni_syscall
+208 common uname sys_newuname
+209 common sysinfo sys_sysinfo
+210 common init_module sys_init_module
+211 common delete_module sys_delete_module
+212 common sched_setparam sys_sched_setparam
+213 common sched_getparam sys_sched_getparam
+214 common sched_setscheduler sys_sched_setscheduler
+215 common sched_getscheduler sys_sched_getscheduler
+216 common sched_get_priority_max sys_sched_get_priority_max
+217 common sched_get_priority_min sys_sched_get_priority_min
+218 common sched_rr_get_interval sys_sched_rr_get_interval_time32
+219 common sched_yield sys_sched_yield
+222 common available222 sys_ni_syscall
+# Signal Handling
+223 common restart_syscall sys_restart_syscall
+224 common sigaltstack sys_sigaltstack
+225 common rt_sigreturn xtensa_rt_sigreturn
+226 common rt_sigaction sys_rt_sigaction
+227 common rt_sigprocmask sys_rt_sigprocmask
+228 common rt_sigpending sys_rt_sigpending
+229 common rt_sigtimedwait sys_rt_sigtimedwait_time32
+230 common rt_sigqueueinfo sys_rt_sigqueueinfo
+231 common rt_sigsuspend sys_rt_sigsuspend
+# Message
+232 common mq_open sys_mq_open
+233 common mq_unlink sys_mq_unlink
+234 common mq_timedsend sys_mq_timedsend_time32
+235 common mq_timedreceive sys_mq_timedreceive_time32
+236 common mq_notify sys_mq_notify
+237 common mq_getsetattr sys_mq_getsetattr
+238 common available238 sys_ni_syscall
+239 common io_setup sys_io_setup
+# IO
+240 common io_destroy sys_io_destroy
+241 common io_submit sys_io_submit
+242 common io_getevents sys_io_getevents_time32
+243 common io_cancel sys_io_cancel
+244 common clock_settime sys_clock_settime32
+245 common clock_gettime sys_clock_gettime32
+246 common clock_getres sys_clock_getres_time32
+247 common clock_nanosleep sys_clock_nanosleep_time32
+# Timer
+248 common timer_create sys_timer_create
+249 common timer_delete sys_timer_delete
+250 common timer_settime sys_timer_settime32
+251 common timer_gettime sys_timer_gettime32
+252 common timer_getoverrun sys_timer_getoverrun
+# System
+253 common reserved253 sys_ni_syscall
+254 common lookup_dcookie sys_ni_syscall
+255 common available255 sys_ni_syscall
+256 common add_key sys_add_key
+257 common request_key sys_request_key
+258 common keyctl sys_keyctl
+259 common available259 sys_ni_syscall
+260 common readahead sys_readahead
+261 common remap_file_pages sys_remap_file_pages
+262 common migrate_pages sys_migrate_pages
+263 common mbind sys_mbind
+264 common get_mempolicy sys_get_mempolicy
+265 common set_mempolicy sys_set_mempolicy
+266 common unshare sys_unshare
+267 common move_pages sys_move_pages
+268 common splice sys_splice
+269 common tee sys_tee
+270 common vmsplice sys_vmsplice
+271 common available271 sys_ni_syscall
+272 common pselect6 sys_pselect6_time32
+273 common ppoll sys_ppoll_time32
+274 common epoll_pwait sys_epoll_pwait
+275 common epoll_create1 sys_epoll_create1
+276 common inotify_init sys_inotify_init
+277 common inotify_add_watch sys_inotify_add_watch
+278 common inotify_rm_watch sys_inotify_rm_watch
+279 common inotify_init1 sys_inotify_init1
+280 common getcpu sys_getcpu
+281 common kexec_load sys_ni_syscall
+282 common ioprio_set sys_ioprio_set
+283 common ioprio_get sys_ioprio_get
+284 common set_robust_list sys_set_robust_list
+285 common get_robust_list sys_get_robust_list
+286 common available286 sys_ni_syscall
+287 common available287 sys_ni_syscall
+# Relative File Operations
+288 common openat sys_openat
+289 common mkdirat sys_mkdirat
+290 common mknodat sys_mknodat
+291 common unlinkat sys_unlinkat
+292 common renameat sys_renameat
+293 common linkat sys_linkat
+294 common symlinkat sys_symlinkat
+295 common readlinkat sys_readlinkat
+296 common utimensat sys_utimensat_time32
+297 common fchownat sys_fchownat
+298 common futimesat sys_futimesat_time32
+299 common fstatat64 sys_fstatat64
+300 common fchmodat sys_fchmodat
+301 common faccessat sys_faccessat
+302 common available302 sys_ni_syscall
+303 common available303 sys_ni_syscall
+304 common signalfd sys_signalfd
+# 305 was timerfd
+306 common eventfd sys_eventfd
+307 common recvmmsg sys_recvmmsg_time32
+308 common setns sys_setns
+309 common signalfd4 sys_signalfd4
+310 common dup3 sys_dup3
+311 common pipe2 sys_pipe2
+312 common timerfd_create sys_timerfd_create
+313 common timerfd_settime sys_timerfd_settime32
+314 common timerfd_gettime sys_timerfd_gettime32
+315 common available315 sys_ni_syscall
+316 common eventfd2 sys_eventfd2
+317 common preadv sys_preadv
+318 common pwritev sys_pwritev
+319 common available319 sys_ni_syscall
+320 common fanotify_init sys_fanotify_init
+321 common fanotify_mark sys_fanotify_mark
+322 common process_vm_readv sys_process_vm_readv
+323 common process_vm_writev sys_process_vm_writev
+324 common name_to_handle_at sys_name_to_handle_at
+325 common open_by_handle_at sys_open_by_handle_at
+326 common sync_file_range2 sys_sync_file_range2
+327 common perf_event_open sys_perf_event_open
+328 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
+329 common clock_adjtime sys_clock_adjtime32
+330 common prlimit64 sys_prlimit64
+331 common kcmp sys_kcmp
+332 common finit_module sys_finit_module
+333 common accept4 sys_accept4
+334 common sched_setattr sys_sched_setattr
+335 common sched_getattr sys_sched_getattr
+336 common renameat2 sys_renameat2
+337 common seccomp sys_seccomp
+338 common getrandom sys_getrandom
+339 common memfd_create sys_memfd_create
+340 common bpf sys_bpf
+341 common execveat sys_execveat
+342 common userfaultfd sys_userfaultfd
+343 common membarrier sys_membarrier
+344 common mlock2 sys_mlock2
+345 common copy_file_range sys_copy_file_range
+346 common preadv2 sys_preadv2
+347 common pwritev2 sys_pwritev2
+348 common pkey_mprotect sys_pkey_mprotect
+349 common pkey_alloc sys_pkey_alloc
+350 common pkey_free sys_pkey_free
+351 common statx sys_statx
+352 common rseq sys_rseq
+# 353 through 402 are unassigned to sync up with generic numbers
+403 common clock_gettime64 sys_clock_gettime
+404 common clock_settime64 sys_clock_settime
+405 common clock_adjtime64 sys_clock_adjtime
+406 common clock_getres_time64 sys_clock_getres
+407 common clock_nanosleep_time64 sys_clock_nanosleep
+408 common timer_gettime64 sys_timer_gettime
+409 common timer_settime64 sys_timer_settime
+410 common timerfd_gettime64 sys_timerfd_gettime
+411 common timerfd_settime64 sys_timerfd_settime
+412 common utimensat_time64 sys_utimensat
+413 common pselect6_time64 sys_pselect6
+414 common ppoll_time64 sys_ppoll
+416 common io_pgetevents_time64 sys_io_pgetevents
+417 common recvmmsg_time64 sys_recvmmsg
+418 common mq_timedsend_time64 sys_mq_timedsend
+419 common mq_timedreceive_time64 sys_mq_timedreceive
+420 common semtimedop_time64 sys_semtimedop
+421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait
+422 common futex_time64 sys_futex
+423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
+434 common pidfd_open sys_pidfd_open
+435 common clone3 sys_clone3
+436 common close_range sys_close_range
+437 common openat2 sys_openat2
+438 common pidfd_getfd sys_pidfd_getfd
+439 common faccessat2 sys_faccessat2
+440 common process_madvise sys_process_madvise
+441 common epoll_pwait2 sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
+463 common setxattrat sys_setxattrat
+464 common getxattrat sys_getxattrat
+465 common listxattrat sys_listxattrat
+466 common removexattrat sys_removexattrat
diff --git a/tools/perf/arch/xtensa/include/syscall_table.h b/tools/perf/arch/xtensa/include/syscall_table.h
new file mode 100644
index 000000000000..4c942821662d
--- /dev/null
+++ b/tools/perf/arch/xtensa/include/syscall_table.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/syscalls_32.h>
diff --git a/tools/perf/bench/epoll-wait.c b/tools/perf/bench/epoll-wait.c
index ef5c4257844d..20fe4f72b4af 100644
--- a/tools/perf/bench/epoll-wait.c
+++ b/tools/perf/bench/epoll-wait.c
@@ -420,7 +420,12 @@ static int cmpworker(const void *p1, const void *p2)
struct worker *w1 = (struct worker *) p1;
struct worker *w2 = (struct worker *) p2;
- return w1->tid > w2->tid;
+
+ if (w1->tid > w2->tid)
+ return 1;
+ if (w1->tid < w2->tid)
+ return -1;
+ return 0;
}
int bench_epoll_wait(int argc, const char **argv)
diff --git a/tools/perf/bench/inject-buildid.c b/tools/perf/bench/inject-buildid.c
index a759eb2328be..f55c07e4be94 100644
--- a/tools/perf/bench/inject-buildid.c
+++ b/tools/perf/bench/inject-buildid.c
@@ -52,7 +52,7 @@ struct bench_dso {
static int nr_dsos;
static struct bench_dso *dsos;
-extern int cmd_inject(int argc, const char *argv[]);
+extern int main(int argc, const char **argv);
static const struct option options[] = {
OPT_UINTEGER('i', "iterations", &iterations,
@@ -294,7 +294,7 @@ static int setup_injection(struct bench_data *data, bool build_id_all)
if (data->pid == 0) {
const char **inject_argv;
- int inject_argc = 2;
+ int inject_argc = 3;
close(data->input_pipe[1]);
close(data->output_pipe[0]);
@@ -318,15 +318,16 @@ static int setup_injection(struct bench_data *data, bool build_id_all)
if (inject_argv == NULL)
exit(1);
- inject_argv[0] = strdup("inject");
- inject_argv[1] = strdup("-b");
+ inject_argv[0] = strdup("perf");
+ inject_argv[1] = strdup("inject");
+ inject_argv[2] = strdup("-b");
if (build_id_all)
- inject_argv[2] = strdup("--buildid-all");
+ inject_argv[3] = strdup("--buildid-all");
/* signal that we're ready to go */
close(ready_pipe[1]);
- cmd_inject(inject_argc, inject_argv);
+ main(inject_argc, inject_argv);
exit(0);
}
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index bb87e6e7687d..836ae0122dab 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -7,6 +7,7 @@
* a histogram of results, along various sorting keys.
*/
#include "builtin.h"
+#include "perf.h"
#include "util/color.h"
#include <linux/list.h>
diff --git a/tools/perf/builtin-check.c b/tools/perf/builtin-check.c
index 2346536a5ee1..61a11a9b4e75 100644
--- a/tools/perf/builtin-check.c
+++ b/tools/perf/builtin-check.c
@@ -31,7 +31,6 @@ struct feature_status supported_features[] = {
FEATURE_STATUS("dwarf_getlocations", HAVE_LIBDW_SUPPORT),
FEATURE_STATUS("dwarf-unwind", HAVE_DWARF_UNWIND_SUPPORT),
FEATURE_STATUS("auxtrace", HAVE_AUXTRACE_SUPPORT),
- FEATURE_STATUS("libaudit", HAVE_LIBAUDIT_SUPPORT),
FEATURE_STATUS("libbfd", HAVE_LIBBFD_SUPPORT),
FEATURE_STATUS("libcapstone", HAVE_LIBCAPSTONE_SUPPORT),
FEATURE_STATUS("libcrypto", HAVE_LIBCRYPTO_SUPPORT),
@@ -47,7 +46,6 @@ struct feature_status supported_features[] = {
FEATURE_STATUS("libunwind", HAVE_LIBUNWIND_SUPPORT),
FEATURE_STATUS("lzma", HAVE_LZMA_SUPPORT),
FEATURE_STATUS("numa_num_possible_cpus", HAVE_LIBNUMA_SUPPORT),
- FEATURE_STATUS("syscall_table", HAVE_SYSCALL_TABLE_SUPPORT),
FEATURE_STATUS("zlib", HAVE_ZLIB_SUPPORT),
FEATURE_STATUS("zstd", HAVE_ZSTD_SUPPORT),
diff --git a/tools/perf/builtin-config.c b/tools/perf/builtin-config.c
index 2e8363778935..45b5312fbe83 100644
--- a/tools/perf/builtin-config.c
+++ b/tools/perf/builtin-config.c
@@ -154,6 +154,44 @@ static int parse_config_arg(char *arg, char **var, char **value)
return 0;
}
+int perf_config__set_variable(const char *var, const char *value)
+{
+ char path[PATH_MAX];
+ char *user_config = mkpath(path, sizeof(path), "%s/.perfconfig", getenv("HOME"));
+ const char *config_filename;
+ struct perf_config_set *set;
+ int ret = -1;
+
+ if (use_system_config)
+ config_exclusive_filename = perf_etc_perfconfig();
+ else if (use_user_config)
+ config_exclusive_filename = user_config;
+
+ if (!config_exclusive_filename)
+ config_filename = user_config;
+ else
+ config_filename = config_exclusive_filename;
+
+ set = perf_config_set__new();
+ if (!set)
+ goto out_err;
+
+ if (perf_config_set__collect(set, config_filename, var, value) < 0) {
+ pr_err("Failed to add '%s=%s'\n", var, value);
+ goto out_err;
+ }
+
+ if (set_config(set, config_filename) < 0) {
+ pr_err("Failed to set the configs on %s\n", config_filename);
+ goto out_err;
+ }
+
+ ret = 0;
+out_err:
+ perf_config_set__delete(set);
+ return ret;
+}
+
int cmd_config(int argc, const char **argv)
{
int i, ret = -1;
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 82fb7773e03e..ae490d58af92 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -6,6 +6,7 @@
* DSOs and symbol information, sort them and produce a diff.
*/
#include "builtin.h"
+#include "perf.h"
#include "util/debug.h"
#include "util/event.h"
@@ -1019,12 +1020,12 @@ static int process_base_stream(struct data__file *data_base,
continue;
es_base = evsel_streams__entry(data_base->evlist_streams,
- evsel_base->core.idx);
+ evsel_base);
if (!es_base)
return -1;
es_pair = evsel_streams__entry(data_pair->evlist_streams,
- evsel_pair->core.idx);
+ evsel_pair);
if (!es_pair)
return -1;
diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
index a56cf8b0a7d4..cfd770ec7286 100644
--- a/tools/perf/builtin-ftrace.c
+++ b/tools/perf/builtin-ftrace.c
@@ -43,6 +43,8 @@
static volatile sig_atomic_t workload_exec_errno;
static volatile sig_atomic_t done;
+static struct stats latency_stats; /* for tracepoints */
+
static void sig_handler(int sig __maybe_unused)
{
done = true;
@@ -726,9 +728,11 @@ out:
return (done && !workload_exec_errno) ? 0 : -1;
}
-static void make_histogram(int buckets[], char *buf, size_t len, char *linebuf,
- bool use_nsec)
+static void make_histogram(struct perf_ftrace *ftrace, int buckets[],
+ char *buf, size_t len, char *linebuf)
{
+ int min_latency = ftrace->min_latency;
+ int max_latency = ftrace->max_latency;
char *p, *q;
char *unit;
double num;
@@ -774,16 +778,34 @@ static void make_histogram(int buckets[], char *buf, size_t len, char *linebuf,
if (!unit || strncmp(unit, " us", 3))
goto next;
- if (use_nsec)
+ if (ftrace->use_nsec)
num *= 1000;
- i = log2(num);
- if (i < 0)
- i = 0;
+ i = 0;
+ if (num < min_latency)
+ goto do_inc;
+
+ num -= min_latency;
+
+ if (!ftrace->bucket_range) {
+ i = log2(num);
+ if (i < 0)
+ i = 0;
+ } else {
+ // Less than 1 unit (ms or ns), or, in the future,
+ // than the min latency desired.
+ if (num > 0) // 1st entry: [ 1 unit .. bucket_range units ]
+ i = num / ftrace->bucket_range + 1;
+ if (num >= max_latency - min_latency)
+ i = NUM_BUCKET -1;
+ }
if (i >= NUM_BUCKET)
i = NUM_BUCKET - 1;
+ num += min_latency;
+do_inc:
buckets[i]++;
+ update_stats(&latency_stats, num);
next:
/* empty the line buffer for the next output */
@@ -794,8 +816,10 @@ next:
strcat(linebuf, p);
}
-static void display_histogram(int buckets[], bool use_nsec)
+static void display_histogram(struct perf_ftrace *ftrace, int buckets[])
{
+ int min_latency = ftrace->min_latency;
+ bool use_nsec = ftrace->use_nsec;
int i;
int total = 0;
int bar_total = 46; /* to fit in 80 column */
@@ -814,30 +838,74 @@ static void display_histogram(int buckets[], bool use_nsec)
" DURATION ", "COUNT", bar_total, "GRAPH");
bar_len = buckets[0] * bar_total / total;
- printf(" %4d - %-4d %s | %10d | %.*s%*s |\n",
- 0, 1, use_nsec ? "ns" : "us", buckets[0], bar_len, bar, bar_total - bar_len, "");
+
+ printf(" %4d - %4d %s | %10d | %.*s%*s |\n",
+ 0, min_latency ?: 1, use_nsec ? "ns" : "us",
+ buckets[0], bar_len, bar, bar_total - bar_len, "");
for (i = 1; i < NUM_BUCKET - 1; i++) {
- int start = (1 << (i - 1));
- int stop = 1 << i;
+ unsigned int start, stop;
const char *unit = use_nsec ? "ns" : "us";
- if (start >= 1024) {
- start >>= 10;
- stop >>= 10;
- unit = use_nsec ? "us" : "ms";
+ if (!ftrace->bucket_range) {
+ start = (1 << (i - 1));
+ stop = 1 << i;
+
+ if (start >= 1024) {
+ start >>= 10;
+ stop >>= 10;
+ unit = use_nsec ? "us" : "ms";
+ }
+ } else {
+ start = (i - 1) * ftrace->bucket_range + min_latency;
+ stop = i * ftrace->bucket_range + min_latency;
+
+ if (start >= ftrace->max_latency)
+ break;
+ if (stop > ftrace->max_latency)
+ stop = ftrace->max_latency;
+
+ if (start >= 1000) {
+ double dstart = start / 1000.0,
+ dstop = stop / 1000.0;
+ printf(" %4.2f - %-4.2f", dstart, dstop);
+ unit = use_nsec ? "us" : "ms";
+ goto print_bucket_info;
+ }
}
+
+ printf(" %4d - %4d", start, stop);
+print_bucket_info:
bar_len = buckets[i] * bar_total / total;
- printf(" %4d - %-4d %s | %10d | %.*s%*s |\n",
- start, stop, unit, buckets[i], bar_len, bar,
+ printf(" %s | %10d | %.*s%*s |\n", unit, buckets[i], bar_len, bar,
bar_total - bar_len, "");
}
bar_len = buckets[NUM_BUCKET - 1] * bar_total / total;
- printf(" %4d - %-4s %s | %10d | %.*s%*s |\n",
- 1, "...", use_nsec ? "ms" : " s", buckets[NUM_BUCKET - 1],
+ if (!ftrace->bucket_range) {
+ printf(" %4d - %-4s %s", 1, "...", use_nsec ? "ms" : "s ");
+ } else {
+ unsigned int upper_outlier = (NUM_BUCKET - 2) * ftrace->bucket_range + min_latency;
+ if (upper_outlier > ftrace->max_latency)
+ upper_outlier = ftrace->max_latency;
+
+ if (upper_outlier >= 1000) {
+ double dstart = upper_outlier / 1000.0;
+
+ printf(" %4.2f - %-4s %s", dstart, "...", use_nsec ? "us" : "ms");
+ } else {
+ printf(" %4d - %4s %s", upper_outlier, "...", use_nsec ? "ns" : "us");
+ }
+ }
+ printf(" | %10d | %.*s%*s |\n", buckets[NUM_BUCKET - 1],
bar_len, bar, bar_total - bar_len, "");
+ printf("\n# statistics (in %s)\n", ftrace->use_nsec ? "nsec" : "usec");
+ printf(" total time: %20.0f\n", latency_stats.mean * latency_stats.n);
+ printf(" avg time: %20.0f\n", latency_stats.mean);
+ printf(" max time: %20"PRIu64"\n", latency_stats.max);
+ printf(" min time: %20"PRIu64"\n", latency_stats.min);
+ printf(" count: %20.0f\n", latency_stats.n);
}
static int prepare_func_latency(struct perf_ftrace *ftrace)
@@ -876,6 +944,8 @@ static int prepare_func_latency(struct perf_ftrace *ftrace)
if (fd < 0)
pr_err("failed to open trace_pipe\n");
+ init_stats(&latency_stats);
+
put_tracing_file(trace_file);
return fd;
}
@@ -905,7 +975,7 @@ static int stop_func_latency(struct perf_ftrace *ftrace)
static int read_func_latency(struct perf_ftrace *ftrace, int buckets[])
{
if (ftrace->target.use_bpf)
- return perf_ftrace__latency_read_bpf(ftrace, buckets);
+ return perf_ftrace__latency_read_bpf(ftrace, buckets, &latency_stats);
return 0;
}
@@ -951,7 +1021,7 @@ static int __cmd_latency(struct perf_ftrace *ftrace)
if (n < 0)
break;
- make_histogram(buckets, buf, n, line, ftrace->use_nsec);
+ make_histogram(ftrace, buckets, buf, n, line);
}
}
@@ -968,12 +1038,12 @@ static int __cmd_latency(struct perf_ftrace *ftrace)
int n = read(trace_fd, buf, sizeof(buf) - 1);
if (n <= 0)
break;
- make_histogram(buckets, buf, n, line, ftrace->use_nsec);
+ make_histogram(ftrace, buckets, buf, n, line);
}
read_func_latency(ftrace, buckets);
- display_histogram(buckets, ftrace->use_nsec);
+ display_histogram(ftrace, buckets);
out:
close(trace_fd);
@@ -996,6 +1066,7 @@ static int prepare_func_profile(struct perf_ftrace *ftrace)
{
ftrace->tracer = "function_graph";
ftrace->graph_tail = 1;
+ ftrace->graph_verbose = 0;
ftrace->profile_hash = hashmap__new(profile_hash, profile_equal, NULL);
if (ftrace->profile_hash == NULL)
@@ -1558,6 +1629,12 @@ int cmd_ftrace(int argc, const char **argv)
#endif
OPT_BOOLEAN('n', "use-nsec", &ftrace.use_nsec,
"Use nano-second histogram"),
+ OPT_UINTEGER(0, "bucket-range", &ftrace.bucket_range,
+ "Bucket range in ms or ns (-n/--use-nsec), default is log2() mode"),
+ OPT_UINTEGER(0, "min-latency", &ftrace.min_latency,
+ "Minimum latency (1st bucket). Works only with --bucket-range."),
+ OPT_UINTEGER(0, "max-latency", &ftrace.max_latency,
+ "Maximum latency (last bucket). Works only with --bucket-range and total buckets less than 22."),
OPT_PARENT(common_options),
};
const struct option profile_options[] = {
@@ -1576,6 +1653,9 @@ int cmd_ftrace(int argc, const char **argv)
OPT_CALLBACK('s', "sort", &profile_sort, "key",
"Sort result by key: total (default), avg, max, count, name.",
parse_sort_key),
+ OPT_CALLBACK(0, "graph-opts", &ftrace, "options",
+ "Graph tracer options, available options: nosleep-time,noirqs,thresh=<n>,depth=<n>",
+ parse_graph_tracer_opts),
OPT_PARENT(common_options),
};
const struct option *options = ftrace_options;
@@ -1653,6 +1733,29 @@ int cmd_ftrace(int argc, const char **argv)
ret = -EINVAL;
goto out_delete_filters;
}
+ if (!ftrace.bucket_range && ftrace.min_latency) {
+ pr_err("--min-latency works only with --bucket-range\n");
+ parse_options_usage(ftrace_usage, options,
+ "min-latency", /*short_opt=*/false);
+ ret = -EINVAL;
+ goto out_delete_filters;
+ }
+ if (ftrace.bucket_range && !ftrace.min_latency) {
+ /* default min latency should be the bucket range */
+ ftrace.min_latency = ftrace.bucket_range;
+ }
+ if (!ftrace.bucket_range && ftrace.max_latency) {
+ pr_err("--max-latency works only with --bucket-range\n");
+ parse_options_usage(ftrace_usage, options,
+ "max-latency", /*short_opt=*/false);
+ ret = -EINVAL;
+ goto out_delete_filters;
+ }
+ if (ftrace.bucket_range && !ftrace.max_latency) {
+ /* default max latency should depend on bucket range and num_buckets */
+ ftrace.max_latency = (NUM_BUCKET - 2) * ftrace.bucket_range +
+ ftrace.min_latency;
+ }
cmd_func = __cmd_latency;
break;
case PERF_FTRACE_PROFILE:
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index 0854d3cd9f6a..7be6fb6df595 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -447,9 +447,7 @@ int cmd_help(int argc, const char **argv)
#ifdef HAVE_LIBELF_SUPPORT
"probe",
#endif
-#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT)
"trace",
-#endif
NULL };
const char *builtin_help_usage[] = {
"perf help [--all] [--man|--web|--info] [command]",
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index d6989195a061..11e49cafa3af 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -2367,10 +2367,10 @@ int cmd_inject(int argc, const char **argv)
};
int ret;
const char *known_build_ids = NULL;
- bool build_ids;
- bool build_id_all;
- bool mmap2_build_ids;
- bool mmap2_build_id_all;
+ bool build_ids = false;
+ bool build_id_all = false;
+ bool mmap2_build_ids = false;
+ bool mmap2_build_id_all = false;
struct option options[] = {
OPT_BOOLEAN('b', "build-ids", &build_ids,
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 4d8d94146f8d..67fb1946ef13 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -761,6 +761,7 @@ static int parse_gfp_flags(struct evsel *evsel, struct perf_sample *sample,
};
struct trace_seq seq;
char *str, *pos = NULL;
+ const struct tep_event *tp_format;
if (nr_gfps) {
struct gfp_flag key = {
@@ -772,8 +773,9 @@ static int parse_gfp_flags(struct evsel *evsel, struct perf_sample *sample,
}
trace_seq_init(&seq);
- tep_print_event(evsel->tp_format->tep,
- &seq, &record, "%s", TEP_PRINT_INFO);
+ tp_format = evsel__tp_format(evsel);
+ if (tp_format)
+ tep_print_event(tp_format->tep, &seq, &record, "%s", TEP_PRINT_INFO);
str = strtok_r(seq.buffer, " ", &pos);
while (str) {
@@ -2012,13 +2014,13 @@ int cmd_kmem(int argc, const char **argv)
if (kmem_page) {
struct evsel *evsel = evlist__find_tracepoint_by_name(session->evlist, "kmem:mm_page_alloc");
+ const struct tep_event *tp_format = evsel ? evsel__tp_format(evsel) : NULL;
- if (evsel == NULL) {
+ if (tp_format == NULL) {
pr_err(errmsg, "page", "page");
goto out_delete;
}
-
- kmem_page_size = tep_get_page_size(evsel->tp_format->tep);
+ kmem_page_size = tep_get_page_size(tp_format->tep);
symbol_conf.use_callchain = true;
}
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 274568d712d1..67fd2b006b0b 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -615,67 +615,6 @@ static const char *get_filename_for_perf_kvm(void)
#if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
-void exit_event_get_key(struct evsel *evsel,
- struct perf_sample *sample,
- struct event_key *key)
-{
- key->info = 0;
- key->key = evsel__intval(evsel, sample, kvm_exit_reason);
-}
-
-bool kvm_exit_event(struct evsel *evsel)
-{
- return evsel__name_is(evsel, kvm_exit_trace);
-}
-
-bool exit_event_begin(struct evsel *evsel,
- struct perf_sample *sample, struct event_key *key)
-{
- if (kvm_exit_event(evsel)) {
- exit_event_get_key(evsel, sample, key);
- return true;
- }
-
- return false;
-}
-
-bool kvm_entry_event(struct evsel *evsel)
-{
- return evsel__name_is(evsel, kvm_entry_trace);
-}
-
-bool exit_event_end(struct evsel *evsel,
- struct perf_sample *sample __maybe_unused,
- struct event_key *key __maybe_unused)
-{
- return kvm_entry_event(evsel);
-}
-
-static const char *get_exit_reason(struct perf_kvm_stat *kvm,
- struct exit_reasons_table *tbl,
- u64 exit_code)
-{
- while (tbl->reason != NULL) {
- if (tbl->exit_code == exit_code)
- return tbl->reason;
- tbl++;
- }
-
- pr_err("unknown kvm exit code:%lld on %s\n",
- (unsigned long long)exit_code, kvm->exit_reasons_isa);
- return "UNKNOWN";
-}
-
-void exit_event_decode_key(struct perf_kvm_stat *kvm,
- struct event_key *key,
- char *decode)
-{
- const char *exit_reason = get_exit_reason(kvm, key->exit_reasons,
- key->key);
-
- scnprintf(decode, KVM_EVENT_NAME_LEN, "%s", exit_reason);
-}
-
static bool register_kvm_events_ops(struct perf_kvm_stat *kvm)
{
struct kvm_reg_events_ops *events_ops = kvm_reg_events_ops;
diff --git a/tools/perf/builtin-kwork.c b/tools/perf/builtin-kwork.c
index 8234410cba4c..c41a68d073de 100644
--- a/tools/perf/builtin-kwork.c
+++ b/tools/perf/builtin-kwork.c
@@ -6,6 +6,7 @@
*/
#include "builtin.h"
+#include "perf.h"
#include "util/data.h"
#include "util/evlist.h"
@@ -1103,7 +1104,8 @@ static char *evsel__softirq_name(struct evsel *evsel, u64 num)
char *name = NULL;
bool found = false;
struct tep_print_flag_sym *sym = NULL;
- struct tep_print_arg *args = evsel->tp_format->print_fmt.args;
+ const struct tep_event *tp_format = evsel__tp_format(evsel);
+ struct tep_print_arg *args = tp_format ? tp_format->print_fmt.args : NULL;
if ((args == NULL) || (args->next == NULL))
return NULL;
@@ -1846,7 +1848,7 @@ static void process_skipped_events(struct perf_kwork *kwork,
}
}
-struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
+static struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
struct kwork_class *class,
struct kwork_work *key)
{
@@ -2344,6 +2346,7 @@ int cmd_kwork(int argc, const char **argv)
.all_runtime = 0,
.all_count = 0,
.nr_skipped_events = { 0 },
+ .add_work = perf_kwork_add_work,
};
static const char default_report_sort_order[] = "runtime, max, count";
static const char default_latency_sort_order[] = "avg, max, count";
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 062e2b56a2ab..5d405cd8e696 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -46,15 +46,6 @@
static struct perf_session *session;
static struct target target;
-/* based on kernel/lockdep.c */
-#define LOCKHASH_BITS 12
-#define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
-
-static struct hlist_head *lockhash_table;
-
-#define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
-#define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
-
static struct rb_root thread_stats;
static bool combine_locks;
@@ -67,24 +58,13 @@ static unsigned long bpf_map_entries = MAX_ENTRIES;
static int max_stack_depth = CONTENTION_STACK_DEPTH;
static int stack_skip = CONTENTION_STACK_SKIP;
static int print_nr_entries = INT_MAX / 2;
-static LIST_HEAD(callstack_filters);
static const char *output_name = NULL;
static FILE *lock_output;
-struct callstack_filter {
- struct list_head list;
- char name[];
-};
-
static struct lock_filter filters;
static enum lock_aggr_mode aggr_mode = LOCK_AGGR_ADDR;
-static bool needs_callstack(void)
-{
- return !list_empty(&callstack_filters);
-}
-
static struct thread_stat *thread_stat_find(u32 tid)
{
struct rb_node *node;
@@ -477,93 +457,6 @@ static struct lock_stat *pop_from_result(void)
return container_of(node, struct lock_stat, rb);
}
-struct lock_stat *lock_stat_find(u64 addr)
-{
- struct hlist_head *entry = lockhashentry(addr);
- struct lock_stat *ret;
-
- hlist_for_each_entry(ret, entry, hash_entry) {
- if (ret->addr == addr)
- return ret;
- }
- return NULL;
-}
-
-struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags)
-{
- struct hlist_head *entry = lockhashentry(addr);
- struct lock_stat *ret, *new;
-
- hlist_for_each_entry(ret, entry, hash_entry) {
- if (ret->addr == addr)
- return ret;
- }
-
- new = zalloc(sizeof(struct lock_stat));
- if (!new)
- goto alloc_failed;
-
- new->addr = addr;
- new->name = strdup(name);
- if (!new->name) {
- free(new);
- goto alloc_failed;
- }
-
- new->flags = flags;
- new->wait_time_min = ULLONG_MAX;
-
- hlist_add_head(&new->hash_entry, entry);
- return new;
-
-alloc_failed:
- pr_err("memory allocation failed\n");
- return NULL;
-}
-
-bool match_callstack_filter(struct machine *machine, u64 *callstack)
-{
- struct map *kmap;
- struct symbol *sym;
- u64 ip;
- const char *arch = perf_env__arch(machine->env);
-
- if (list_empty(&callstack_filters))
- return true;
-
- for (int i = 0; i < max_stack_depth; i++) {
- struct callstack_filter *filter;
-
- /*
- * In powerpc, the callchain saved by kernel always includes
- * first three entries as the NIP (next instruction pointer),
- * LR (link register), and the contents of LR save area in the
- * second stack frame. In certain scenarios its possible to have
- * invalid kernel instruction addresses in either LR or the second
- * stack frame's LR. In that case, kernel will store that address as
- * zero.
- *
- * The below check will continue to look into callstack,
- * incase first or second callstack index entry has 0
- * address for powerpc.
- */
- if (!callstack || (!callstack[i] && (strcmp(arch, "powerpc") ||
- (i != 1 && i != 2))))
- break;
-
- ip = callstack[i];
- sym = machine__find_kernel_symbol(machine, ip, &kmap);
- if (sym == NULL)
- continue;
-
- list_for_each_entry(filter, &callstack_filters, list) {
- if (strstr(sym->name, filter->name))
- return true;
- }
- }
- return false;
-}
-
struct trace_lock_handler {
/* it's used on CONFIG_LOCKDEP */
int (*acquire_event)(struct evsel *evsel,
@@ -1165,7 +1058,7 @@ static int report_lock_contention_begin_event(struct evsel *evsel,
if (callstack == NULL)
return -ENOMEM;
- if (!match_callstack_filter(machine, callstack)) {
+ if (!match_callstack_filter(machine, callstack, max_stack_depth)) {
free(callstack);
return 0;
}
@@ -1575,8 +1468,13 @@ static void sort_result(void)
static const struct {
unsigned int flags;
- const char *str;
- const char *name;
+ /*
+ * Name of the lock flags (access), with delimeter ':'.
+ * For example, rwsem:R of rwsem:W.
+ */
+ const char *flags_name;
+ /* Name of the lock (type), for example, rwlock or rwsem. */
+ const char *lock_name;
} lock_type_table[] = {
{ 0, "semaphore", "semaphore" },
{ LCB_F_SPIN, "spinlock", "spinlock" },
@@ -1591,45 +1489,32 @@ static const struct {
{ LCB_F_PERCPU | LCB_F_WRITE, "pcpu-sem:W", "percpu-rwsem" },
{ LCB_F_MUTEX, "mutex", "mutex" },
{ LCB_F_MUTEX | LCB_F_SPIN, "mutex", "mutex" },
- /* alias for get_type_flag() */
- { LCB_F_MUTEX | LCB_F_SPIN, "mutex-spin", "mutex" },
+ /* alias for optimistic spinning only */
+ { LCB_F_MUTEX | LCB_F_SPIN, "mutex:spin", "mutex-spin" },
};
-static const char *get_type_str(unsigned int flags)
+static const char *get_type_flags_name(unsigned int flags)
{
- flags &= LCB_F_MAX_FLAGS - 1;
+ flags &= LCB_F_TYPE_MASK;
for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
if (lock_type_table[i].flags == flags)
- return lock_type_table[i].str;
+ return lock_type_table[i].flags_name;
}
return "unknown";
}
-static const char *get_type_name(unsigned int flags)
+static const char *get_type_lock_name(unsigned int flags)
{
- flags &= LCB_F_MAX_FLAGS - 1;
+ flags &= LCB_F_TYPE_MASK;
for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
if (lock_type_table[i].flags == flags)
- return lock_type_table[i].name;
+ return lock_type_table[i].lock_name;
}
return "unknown";
}
-static unsigned int get_type_flag(const char *str)
-{
- for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
- if (!strcmp(lock_type_table[i].name, str))
- return lock_type_table[i].flags;
- }
- for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
- if (!strcmp(lock_type_table[i].str, str))
- return lock_type_table[i].flags;
- }
- return UINT_MAX;
-}
-
static void lock_filter_finish(void)
{
zfree(&filters.types);
@@ -1646,6 +1531,12 @@ static void lock_filter_finish(void)
zfree(&filters.cgrps);
filters.nr_cgrps = 0;
+
+ for (int i = 0; i < filters.nr_slabs; i++)
+ free(filters.slabs[i]);
+
+ zfree(&filters.slabs);
+ filters.nr_slabs = 0;
}
static void sort_contention_result(void)
@@ -1732,7 +1623,7 @@ static void print_lock_stat_stdio(struct lock_contention *con, struct lock_stat
switch (aggr_mode) {
case LOCK_AGGR_CALLER:
- fprintf(lock_output, " %10s %s\n", get_type_str(st->flags), st->name);
+ fprintf(lock_output, " %10s %s\n", get_type_flags_name(st->flags), st->name);
break;
case LOCK_AGGR_TASK:
pid = st->addr;
@@ -1742,7 +1633,7 @@ static void print_lock_stat_stdio(struct lock_contention *con, struct lock_stat
break;
case LOCK_AGGR_ADDR:
fprintf(lock_output, " %016llx %s (%s)\n", (unsigned long long)st->addr,
- st->name, get_type_name(st->flags));
+ st->name, get_type_lock_name(st->flags));
break;
case LOCK_AGGR_CGROUP:
fprintf(lock_output, " %s\n", st->name);
@@ -1783,7 +1674,7 @@ static void print_lock_stat_csv(struct lock_contention *con, struct lock_stat *s
switch (aggr_mode) {
case LOCK_AGGR_CALLER:
- fprintf(lock_output, "%s%s %s", get_type_str(st->flags), sep, st->name);
+ fprintf(lock_output, "%s%s %s", get_type_flags_name(st->flags), sep, st->name);
if (verbose <= 0)
fprintf(lock_output, "\n");
break;
@@ -1795,7 +1686,7 @@ static void print_lock_stat_csv(struct lock_contention *con, struct lock_stat *s
break;
case LOCK_AGGR_ADDR:
fprintf(lock_output, "%llx%s %s%s %s\n", (unsigned long long)st->addr, sep,
- st->name, sep, get_type_name(st->flags));
+ st->name, sep, get_type_lock_name(st->flags));
break;
case LOCK_AGGR_CGROUP:
fprintf(lock_output, "%s\n",st->name);
@@ -2150,7 +2041,8 @@ static int __cmd_contention(int argc, const char **argv)
goto out_delete;
}
- if (lock_contention_prepare(&con) < 0) {
+ err = lock_contention_prepare(&con);
+ if (err < 0) {
pr_err("lock contention BPF setup failed\n");
goto out_delete;
}
@@ -2171,10 +2063,14 @@ static int __cmd_contention(int argc, const char **argv)
}
}
- if (setup_output_field(true, output_fields))
+ err = setup_output_field(true, output_fields);
+ if (err) {
+ pr_err("Failed to setup output field\n");
goto out_delete;
+ }
- if (select_key(true))
+ err = select_key(true);
+ if (err)
goto out_delete;
if (symbol_conf.field_sep) {
@@ -2350,29 +2246,61 @@ static int parse_lock_type(const struct option *opt __maybe_unused, const char *
int unset __maybe_unused)
{
char *s, *tmp, *tok;
- int ret = 0;
s = strdup(str);
if (s == NULL)
return -1;
for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
- unsigned int flags = get_type_flag(tok);
+ bool found = false;
- if (flags == -1U) {
- pr_err("Unknown lock flags: %s\n", tok);
- ret = -1;
- break;
+ /* `tok` is a flags name if it contains ':'. */
+ if (strchr(tok, ':')) {
+ for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
+ if (!strcmp(lock_type_table[i].flags_name, tok) &&
+ add_lock_type(lock_type_table[i].flags)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_err("Unknown lock flags name: %s\n", tok);
+ free(s);
+ return -1;
+ }
+
+ continue;
}
- if (!add_lock_type(flags)) {
- ret = -1;
- break;
+ /*
+ * Otherwise `tok` is a lock name.
+ * Single lock name could contain multiple flags.
+ * Replace alias `pcpu-sem` with actual name `percpu-rwsem.
+ */
+ if (!strcmp(tok, "pcpu-sem"))
+ tok = (char *)"percpu-rwsem";
+ for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
+ if (!strcmp(lock_type_table[i].lock_name, tok)) {
+ if (add_lock_type(lock_type_table[i].flags)) {
+ found = true;
+ } else {
+ free(s);
+ return -1;
+ }
+ }
}
+
+ if (!found) {
+ pr_err("Unknown lock name: %s\n", tok);
+ free(s);
+ return -1;
+ }
+
}
free(s);
- return ret;
+ return 0;
}
static bool add_lock_addr(unsigned long addr)
@@ -2412,6 +2340,27 @@ static bool add_lock_sym(char *name)
return true;
}
+static bool add_lock_slab(char *name)
+{
+ char **tmp;
+ char *sym = strdup(name);
+
+ if (sym == NULL) {
+ pr_err("Memory allocation failure\n");
+ return false;
+ }
+
+ tmp = realloc(filters.slabs, (filters.nr_slabs + 1) * sizeof(*filters.slabs));
+ if (tmp == NULL) {
+ pr_err("Memory allocation failure\n");
+ return false;
+ }
+
+ tmp[filters.nr_slabs++] = sym;
+ filters.slabs = tmp;
+ return true;
+}
+
static int parse_lock_addr(const struct option *opt __maybe_unused, const char *str,
int unset __maybe_unused)
{
@@ -2435,6 +2384,14 @@ static int parse_lock_addr(const struct option *opt __maybe_unused, const char *
continue;
}
+ if (*tok == '&') {
+ if (!add_lock_slab(tok + 1)) {
+ ret = -1;
+ break;
+ }
+ continue;
+ }
+
/*
* At this moment, we don't have kernel symbols. Save the symbols
* in a separate list and resolve them to addresses later.
@@ -2449,34 +2406,6 @@ static int parse_lock_addr(const struct option *opt __maybe_unused, const char *
return ret;
}
-static int parse_call_stack(const struct option *opt __maybe_unused, const char *str,
- int unset __maybe_unused)
-{
- char *s, *tmp, *tok;
- int ret = 0;
-
- s = strdup(str);
- if (s == NULL)
- return -1;
-
- for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
- struct callstack_filter *entry;
-
- entry = malloc(sizeof(*entry) + strlen(tok) + 1);
- if (entry == NULL) {
- pr_err("Memory allocation failure\n");
- free(s);
- return -1;
- }
-
- strcpy(entry->name, tok);
- list_add_tail(&entry->list, &callstack_filters);
- }
-
- free(s);
- return ret;
-}
-
static int parse_output(const struct option *opt __maybe_unused, const char *str,
int unset __maybe_unused)
{
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index 651188c1d825..99d5e1491a28 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -4,6 +4,7 @@
#include <sys/stat.h>
#include <unistd.h>
#include "builtin.h"
+#include "perf.h"
#include <subcmd/parse-options.h>
#include "util/auxtrace.h"
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index f83252472921..5db1aedf48df 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -860,7 +860,9 @@ static int record__auxtrace_init(struct record *rec)
if (err)
return err;
- auxtrace_regroup_aux_output(rec->evlist);
+ err = auxtrace_parse_aux_action(rec->evlist);
+ if (err)
+ return err;
return auxtrace_parse_filters(rec->evlist);
}
@@ -1748,10 +1750,8 @@ static void record__init_features(struct record *rec)
if (rec->no_buildid)
perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
-#ifdef HAVE_LIBTRACEEVENT
if (!have_tracepoints(&rec->evlist->core.entries))
perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
-#endif
if (!rec->opts.branch_stack)
perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 048c91960ba9..f5fbd670d619 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -348,11 +348,9 @@ static int process_read_event(const struct perf_tool *tool,
struct report *rep = container_of(tool, struct report, tool);
if (rep->show_threads) {
- const char *name = evsel__name(evsel);
int err = perf_read_values_add_value(&rep->show_threads_values,
event->read.pid, event->read.tid,
- evsel->core.idx,
- name,
+ evsel,
event->read.value);
if (err)
@@ -1422,7 +1420,7 @@ int cmd_report(int argc, const char **argv)
OPT_STRING(0, "addr2line", &addr2line_path, "path",
"addr2line binary to use for line numbers"),
OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
- "Disable symbol demangling"),
+ "Symbol demangling. Enabled by default, use --no-demangle to disable."),
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
"Enable kernel symbol demangling"),
OPT_BOOLEAN(0, "mem-mode", &report.mem_mode, "mem access profile"),
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 7049c60ebf77..26ece6e9bfd1 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include "builtin.h"
+#include "perf.h"
#include "perf-sys.h"
#include "util/cpumap.h"
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 9e47905f75a6..33667b534634 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -85,15 +85,12 @@ static bool system_wide;
static bool print_flags;
static const char *cpu_list;
static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
-static struct perf_stat_config stat_config;
static int max_blocks;
static bool native_arch;
static struct dlfilter *dlfilter;
static int dlargc;
static char **dlargv;
-unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
-
enum perf_output_field {
PERF_OUTPUT_COMM = 1ULL << 0,
PERF_OUTPUT_TID = 1ULL << 1,
@@ -224,6 +221,10 @@ enum {
OUTPUT_TYPE_MAX
};
+// We need to refactor the evsel->priv use in in 'perf script' to allow for
+// using that area, that is being used only in some cases.
+#define OUTPUT_TYPE_UNSET -1
+
/* default set to maintain compatibility with current format */
static struct {
bool user_set;
@@ -397,6 +398,14 @@ static inline int output_type(unsigned int type)
return OUTPUT_TYPE_OTHER;
}
+static inline int evsel__output_type(struct evsel *evsel)
+{
+ if (evsel->script_output_type == OUTPUT_TYPE_UNSET)
+ evsel->script_output_type = output_type(evsel->core.attr.type);
+
+ return evsel->script_output_type;
+}
+
static bool output_set_by_user(void)
{
int j;
@@ -421,13 +430,13 @@ static const char *output_field2str(enum perf_output_field field)
return str;
}
-#define PRINT_FIELD(x) (output[output_type(attr->type)].fields & PERF_OUTPUT_##x)
+#define PRINT_FIELD(x) (output[evsel__output_type(evsel)].fields & PERF_OUTPUT_##x)
static int evsel__do_check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg,
enum perf_output_field field, bool allow_user_set)
{
struct perf_event_attr *attr = &evsel->core.attr;
- int type = output_type(attr->type);
+ int type = evsel__output_type(evsel);
const char *evname;
if (attr->sample_type & sample_type)
@@ -461,7 +470,6 @@ static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *
static int evsel__check_attr(struct evsel *evsel, struct perf_session *session)
{
- struct perf_event_attr *attr = &evsel->core.attr;
bool allow_user_set;
if (evsel__is_dummy_event(evsel))
@@ -578,9 +586,9 @@ static int evsel__check_attr(struct evsel *evsel, struct perf_session *session)
return 0;
}
-static void set_print_ip_opts(struct perf_event_attr *attr)
+static void evsel__set_print_ip_opts(struct evsel *evsel)
{
- unsigned int type = output_type(attr->type);
+ unsigned int type = evsel__output_type(evsel);
output[type].print_ip_opts = 0;
if (PRINT_FIELD(IP))
@@ -610,7 +618,7 @@ static struct evsel *find_first_output_type(struct evlist *evlist,
evlist__for_each_entry(evlist, evsel) {
if (evsel__is_dummy_event(evsel))
continue;
- if (output_type(evsel->core.attr.type) == (int)type)
+ if (evsel__output_type(evsel) == (int)type)
return evsel;
}
return NULL;
@@ -652,7 +660,7 @@ static int perf_session__check_output_opt(struct perf_session *session)
if (output[j].fields & PERF_OUTPUT_DSOFF)
output[j].fields |= PERF_OUTPUT_DSO;
- set_print_ip_opts(&evsel->core.attr);
+ evsel__set_print_ip_opts(evsel);
tod |= output[j].fields & PERF_OUTPUT_TOD;
}
@@ -688,7 +696,7 @@ static int perf_session__check_output_opt(struct perf_session *session)
output[j].fields |= PERF_OUTPUT_SYM;
output[j].fields |= PERF_OUTPUT_SYMOFFSET;
output[j].fields |= PERF_OUTPUT_DSO;
- set_print_ip_opts(&evsel->core.attr);
+ evsel__set_print_ip_opts(evsel);
goto out;
}
}
@@ -792,7 +800,6 @@ static int perf_sample__fprintf_start(struct perf_script *script,
struct evsel *evsel,
u32 type, FILE *fp)
{
- struct perf_event_attr *attr = &evsel->core.attr;
unsigned long secs;
unsigned long long nsecs;
int printed = 0;
@@ -944,7 +951,7 @@ static int print_bstack_flags(FILE *fp, struct branch_entry *br)
static int perf_sample__fprintf_brstack(struct perf_sample *sample,
struct thread *thread,
- struct perf_event_attr *attr, FILE *fp)
+ struct evsel *evsel, FILE *fp)
{
struct branch_stack *br = sample->branch_stack;
struct branch_entry *entries = perf_sample__branch_entries(sample);
@@ -983,7 +990,7 @@ static int perf_sample__fprintf_brstack(struct perf_sample *sample,
static int perf_sample__fprintf_brstacksym(struct perf_sample *sample,
struct thread *thread,
- struct perf_event_attr *attr, FILE *fp)
+ struct evsel *evsel, FILE *fp)
{
struct branch_stack *br = sample->branch_stack;
struct branch_entry *entries = perf_sample__branch_entries(sample);
@@ -1021,7 +1028,7 @@ static int perf_sample__fprintf_brstacksym(struct perf_sample *sample,
static int perf_sample__fprintf_brstackoff(struct perf_sample *sample,
struct thread *thread,
- struct perf_event_attr *attr, FILE *fp)
+ struct evsel *evsel, FILE *fp)
{
struct branch_stack *br = sample->branch_stack;
struct branch_entry *entries = perf_sample__branch_entries(sample);
@@ -1188,7 +1195,7 @@ out:
return ret;
}
-static int any_dump_insn(struct perf_event_attr *attr __maybe_unused,
+static int any_dump_insn(struct evsel *evsel __maybe_unused,
struct perf_insn *x, uint64_t ip,
u8 *inbuf, int inlen, int *lenp,
FILE *fp)
@@ -1216,15 +1223,14 @@ static int add_padding(FILE *fp, int printed, int padding)
static int ip__fprintf_jump(uint64_t ip, struct branch_entry *en,
struct perf_insn *x, u8 *inbuf, int len,
int insn, FILE *fp, int *total_cycles,
- struct perf_event_attr *attr,
- struct thread *thread,
struct evsel *evsel,
+ struct thread *thread,
u64 br_cntr)
{
int ilen = 0;
int printed = fprintf(fp, "\t%016" PRIx64 "\t", ip);
- printed += add_padding(fp, any_dump_insn(attr, x, ip, inbuf, len, &ilen, fp), 30);
+ printed += add_padding(fp, any_dump_insn(evsel, x, ip, inbuf, len, &ilen, fp), 30);
printed += fprintf(fp, "\t");
if (PRINT_FIELD(BRSTACKINSNLEN))
@@ -1280,7 +1286,7 @@ static int ip__fprintf_jump(uint64_t ip, struct branch_entry *en,
static int ip__fprintf_sym(uint64_t addr, struct thread *thread,
u8 cpumode, int cpu, struct symbol **lastsym,
- struct perf_event_attr *attr, FILE *fp)
+ struct evsel *evsel, FILE *fp)
{
struct addr_location al;
int off, printed = 0, ret = 0;
@@ -1356,10 +1362,10 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
machine, thread, &x.is64bit, &x.cpumode, false);
if (len > 0) {
printed += ip__fprintf_sym(entries[nr - 1].from, thread,
- x.cpumode, x.cpu, &lastsym, attr, fp);
+ x.cpumode, x.cpu, &lastsym, evsel, fp);
printed += ip__fprintf_jump(entries[nr - 1].from, &entries[nr - 1],
&x, buffer, len, 0, fp, &total_cycles,
- attr, thread, evsel, br_cntr);
+ evsel, thread, br_cntr);
if (PRINT_FIELD(SRCCODE))
printed += print_srccode(thread, x.cpumode, entries[nr - 1].from);
}
@@ -1387,19 +1393,19 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
for (off = 0; off < (unsigned)len; off += ilen) {
uint64_t ip = start + off;
- printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, attr, fp);
+ printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, evsel, fp);
if (ip == end) {
if (PRINT_FIELD(BRCNTR) && sample->branch_stack_cntr)
br_cntr = sample->branch_stack_cntr[i];
printed += ip__fprintf_jump(ip, &entries[i], &x, buffer + off, len - off, ++insn, fp,
- &total_cycles, attr, thread, evsel, br_cntr);
+ &total_cycles, evsel, thread, br_cntr);
if (PRINT_FIELD(SRCCODE))
printed += print_srccode(thread, x.cpumode, ip);
break;
} else {
ilen = 0;
printed += fprintf(fp, "\t%016" PRIx64 "\t", ip);
- printed += any_dump_insn(attr, &x, ip, buffer + off, len - off, &ilen, fp);
+ printed += any_dump_insn(evsel, &x, ip, buffer + off, len - off, &ilen, fp);
if (PRINT_FIELD(BRSTACKINSNLEN))
printed += fprintf(fp, "\tilen: %d", ilen);
printed += fprintf(fp, "\n");
@@ -1438,7 +1444,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
end = start + 128;
}
len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, true);
- printed += ip__fprintf_sym(start, thread, x.cpumode, x.cpu, &lastsym, attr, fp);
+ printed += ip__fprintf_sym(start, thread, x.cpumode, x.cpu, &lastsym, evsel, fp);
if (len <= 0) {
/* Print at least last IP if basic block did not work */
len = grab_bb(buffer, sample->ip, sample->ip,
@@ -1447,7 +1453,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
goto out;
ilen = 0;
printed += fprintf(fp, "\t%016" PRIx64 "\t", sample->ip);
- printed += any_dump_insn(attr, &x, sample->ip, buffer, len, &ilen, fp);
+ printed += any_dump_insn(evsel, &x, sample->ip, buffer, len, &ilen, fp);
if (PRINT_FIELD(BRSTACKINSNLEN))
printed += fprintf(fp, "\tilen: %d", ilen);
printed += fprintf(fp, "\n");
@@ -1458,7 +1464,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
for (off = 0; off <= end - start; off += ilen) {
ilen = 0;
printed += fprintf(fp, "\t%016" PRIx64 "\t", start + off);
- printed += any_dump_insn(attr, &x, start + off, buffer + off, len - off, &ilen, fp);
+ printed += any_dump_insn(evsel, &x, start + off, buffer + off, len - off, &ilen, fp);
if (PRINT_FIELD(BRSTACKINSNLEN))
printed += fprintf(fp, "\tilen: %d", ilen);
printed += fprintf(fp, "\n");
@@ -1482,13 +1488,13 @@ out:
static int perf_sample__fprintf_addr(struct perf_sample *sample,
struct thread *thread,
- struct perf_event_attr *attr, FILE *fp)
+ struct evsel *evsel, FILE *fp)
{
struct addr_location al;
int printed = fprintf(fp, "%16" PRIx64, sample->addr);
addr_location__init(&al);
- if (!sample_addr_correlates_sym(attr))
+ if (!sample_addr_correlates_sym(&evsel->core.attr))
goto out;
thread__resolve(thread, &al, sample);
@@ -1515,11 +1521,10 @@ static const char *resolve_branch_sym(struct perf_sample *sample,
struct addr_location *addr_al,
u64 *ip)
{
- struct perf_event_attr *attr = &evsel->core.attr;
const char *name = NULL;
if (sample->flags & (PERF_IP_FLAG_CALL | PERF_IP_FLAG_TRACE_BEGIN)) {
- if (sample_addr_correlates_sym(attr)) {
+ if (sample_addr_correlates_sym(&evsel->core.attr)) {
if (!addr_al->thread)
thread__resolve(thread, addr_al, sample);
if (addr_al->sym)
@@ -1545,7 +1550,6 @@ static int perf_sample__fprintf_callindent(struct perf_sample *sample,
struct addr_location *addr_al,
FILE *fp)
{
- struct perf_event_attr *attr = &evsel->core.attr;
size_t depth = thread_stack__depth(thread, sample->cpu);
const char *name = NULL;
static int spacing;
@@ -1589,19 +1593,6 @@ static int perf_sample__fprintf_callindent(struct perf_sample *sample,
return len + dlen;
}
-__weak void arch_fetch_insn(struct perf_sample *sample __maybe_unused,
- struct thread *thread __maybe_unused,
- struct machine *machine __maybe_unused)
-{
-}
-
-void script_fetch_insn(struct perf_sample *sample, struct thread *thread,
- struct machine *machine)
-{
- if (sample->insn_len == 0 && native_arch)
- arch_fetch_insn(sample, thread, machine);
-}
-
static int perf_sample__fprintf_insn(struct perf_sample *sample,
struct evsel *evsel,
struct perf_event_attr *attr,
@@ -1611,7 +1602,7 @@ static int perf_sample__fprintf_insn(struct perf_sample *sample,
{
int printed = 0;
- script_fetch_insn(sample, thread, machine);
+ script_fetch_insn(sample, thread, machine, native_arch);
if (PRINT_FIELD(INSNLEN))
printed += fprintf(fp, " ilen: %d", sample->insn_len);
@@ -1630,7 +1621,7 @@ static int perf_sample__fprintf_insn(struct perf_sample *sample,
}
static int perf_sample__fprintf_ipc(struct perf_sample *sample,
- struct perf_event_attr *attr, FILE *fp)
+ struct evsel *evsel, FILE *fp)
{
unsigned int ipc;
@@ -1651,7 +1642,7 @@ static int perf_sample__fprintf_bts(struct perf_sample *sample,
struct machine *machine, FILE *fp)
{
struct perf_event_attr *attr = &evsel->core.attr;
- unsigned int type = output_type(attr->type);
+ unsigned int type = evsel__output_type(evsel);
bool print_srcline_last = false;
int printed = 0;
@@ -1688,10 +1679,10 @@ static int perf_sample__fprintf_bts(struct perf_sample *sample,
((evsel->core.attr.sample_type & PERF_SAMPLE_ADDR) &&
!output[type].user_set)) {
printed += fprintf(fp, " => ");
- printed += perf_sample__fprintf_addr(sample, thread, attr, fp);
+ printed += perf_sample__fprintf_addr(sample, thread, evsel, fp);
}
- printed += perf_sample__fprintf_ipc(sample, attr, fp);
+ printed += perf_sample__fprintf_ipc(sample, evsel, fp);
if (print_srcline_last)
printed += map__fprintf_srcline(al->map, al->addr, "\n ", fp);
@@ -1709,87 +1700,6 @@ static int perf_sample__fprintf_bts(struct perf_sample *sample,
return printed;
}
-static struct {
- u32 flags;
- const char *name;
-} sample_flags[] = {
- {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL, "call"},
- {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN, "return"},
- {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CONDITIONAL, "jcc"},
- {PERF_IP_FLAG_BRANCH, "jmp"},
- {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_INTERRUPT, "int"},
- {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | PERF_IP_FLAG_INTERRUPT, "iret"},
- {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_SYSCALLRET, "syscall"},
- {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | PERF_IP_FLAG_SYSCALLRET, "sysret"},
- {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_ASYNC, "async"},
- {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC | PERF_IP_FLAG_INTERRUPT, "hw int"},
- {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT, "tx abrt"},
- {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_BEGIN, "tr strt"},
- {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_END, "tr end"},
- {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_VMENTRY, "vmentry"},
- {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_VMEXIT, "vmexit"},
- {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_BRANCH_MISS, "br miss"},
- {0, NULL}
-};
-
-static const char *sample_flags_to_name(u32 flags)
-{
- int i;
-
- for (i = 0; sample_flags[i].name ; i++) {
- if (sample_flags[i].flags == flags)
- return sample_flags[i].name;
- }
-
- return NULL;
-}
-
-int perf_sample__sprintf_flags(u32 flags, char *str, size_t sz)
-{
- u32 xf = PERF_IP_FLAG_IN_TX | PERF_IP_FLAG_INTR_DISABLE |
- PERF_IP_FLAG_INTR_TOGGLE;
- const char *chars = PERF_IP_FLAG_CHARS;
- const size_t n = strlen(PERF_IP_FLAG_CHARS);
- const char *name = NULL;
- size_t i, pos = 0;
- char xs[16] = {0};
-
- if (flags & xf)
- snprintf(xs, sizeof(xs), "(%s%s%s)",
- flags & PERF_IP_FLAG_IN_TX ? "x" : "",
- flags & PERF_IP_FLAG_INTR_DISABLE ? "D" : "",
- flags & PERF_IP_FLAG_INTR_TOGGLE ? "t" : "");
-
- name = sample_flags_to_name(flags & ~xf);
- if (name)
- return snprintf(str, sz, "%-15s%6s", name, xs);
-
- if (flags & PERF_IP_FLAG_TRACE_BEGIN) {
- name = sample_flags_to_name(flags & ~(xf | PERF_IP_FLAG_TRACE_BEGIN));
- if (name)
- return snprintf(str, sz, "tr strt %-7s%6s", name, xs);
- }
-
- if (flags & PERF_IP_FLAG_TRACE_END) {
- name = sample_flags_to_name(flags & ~(xf | PERF_IP_FLAG_TRACE_END));
- if (name)
- return snprintf(str, sz, "tr end %-7s%6s", name, xs);
- }
-
- for (i = 0; i < n; i++, flags >>= 1) {
- if ((flags & 1) && pos < sz)
- str[pos++] = chars[i];
- }
- for (; i < 32; i++, flags >>= 1) {
- if ((flags & 1) && pos < sz)
- str[pos++] = '?';
- }
- if (pos < sz)
- str[pos] = 0;
-
- return pos;
-}
-
static int perf_sample__fprintf_flags(u32 flags, FILE *fp)
{
char str[SAMPLE_FLAGS_BUF_SIZE];
@@ -2254,7 +2164,7 @@ static void process_event(struct perf_script *script,
{
struct thread *thread = al->thread;
struct perf_event_attr *attr = &evsel->core.attr;
- unsigned int type = output_type(attr->type);
+ unsigned int type = evsel__output_type(evsel);
struct evsel_script *es = evsel->priv;
FILE *fp = es->fp;
char str[PAGE_SIZE_NAME_LEN];
@@ -2289,15 +2199,20 @@ static void process_event(struct perf_script *script,
}
#ifdef HAVE_LIBTRACEEVENT
if (PRINT_FIELD(TRACE) && sample->raw_data) {
- event_format__fprintf(evsel->tp_format, sample->cpu,
- sample->raw_data, sample->raw_size, fp);
+ const struct tep_event *tp_format = evsel__tp_format(evsel);
+
+ if (tp_format) {
+ event_format__fprintf(tp_format, sample->cpu,
+ sample->raw_data, sample->raw_size,
+ fp);
+ }
}
#endif
if (attr->type == PERF_TYPE_SYNTH && PRINT_FIELD(SYNTH))
perf_sample__fprintf_synth(sample, evsel, fp);
if (PRINT_FIELD(ADDR))
- perf_sample__fprintf_addr(sample, thread, attr, fp);
+ perf_sample__fprintf_addr(sample, thread, evsel, fp);
if (PRINT_FIELD(DATA_SRC))
data_src__fprintf(sample->data_src, fp);
@@ -2347,11 +2262,11 @@ static void process_event(struct perf_script *script,
perf_sample__fprintf_uregs(sample, attr, arch, fp);
if (PRINT_FIELD(BRSTACK))
- perf_sample__fprintf_brstack(sample, thread, attr, fp);
+ perf_sample__fprintf_brstack(sample, thread, evsel, fp);
else if (PRINT_FIELD(BRSTACKSYM))
- perf_sample__fprintf_brstacksym(sample, thread, attr, fp);
+ perf_sample__fprintf_brstacksym(sample, thread, evsel, fp);
else if (PRINT_FIELD(BRSTACKOFF))
- perf_sample__fprintf_brstackoff(sample, thread, attr, fp);
+ perf_sample__fprintf_brstackoff(sample, thread, evsel, fp);
if (evsel__is_bpf_output(evsel) && PRINT_FIELD(BPF_OUTPUT))
perf_sample__fprintf_bpf_output(sample, fp);
@@ -2366,7 +2281,7 @@ static void process_event(struct perf_script *script,
if (PRINT_FIELD(CODE_PAGE_SIZE))
fprintf(fp, " %s", get_page_size_name(sample->code_page_size, str));
- perf_sample__fprintf_ipc(sample, attr, fp);
+ perf_sample__fprintf_ipc(sample, evsel, fp);
fprintf(fp, "\n");
@@ -2599,14 +2514,14 @@ static int process_attr(const struct perf_tool *tool, union perf_event *event,
sample_type & PERF_SAMPLE_BRANCH_STACK ||
(sample_type & PERF_SAMPLE_REGS_USER &&
sample_type & PERF_SAMPLE_STACK_USER))) {
- int type = output_type(evsel->core.attr.type);
+ int type = evsel__output_type(evsel);
if (!(output[type].user_unset_fields & PERF_OUTPUT_IP))
output[type].fields |= PERF_OUTPUT_IP;
if (!(output[type].user_unset_fields & PERF_OUTPUT_SYM))
output[type].fields |= PERF_OUTPUT_SYM;
}
- set_print_ip_opts(&evsel->core.attr);
+ evsel__set_print_ip_opts(evsel);
return 0;
}
@@ -2959,79 +2874,18 @@ static int __cmd_script(struct perf_script *script)
return ret;
}
-struct script_spec {
- struct list_head node;
- struct scripting_ops *ops;
- char spec[];
-};
-
-static LIST_HEAD(script_specs);
-
-static struct script_spec *script_spec__new(const char *spec,
- struct scripting_ops *ops)
-{
- struct script_spec *s = malloc(sizeof(*s) + strlen(spec) + 1);
-
- if (s != NULL) {
- strcpy(s->spec, spec);
- s->ops = ops;
- }
-
- return s;
-}
-
-static void script_spec__add(struct script_spec *s)
-{
- list_add_tail(&s->node, &script_specs);
-}
-
-static struct script_spec *script_spec__find(const char *spec)
+static int list_available_languages_cb(struct scripting_ops *ops, const char *spec)
{
- struct script_spec *s;
-
- list_for_each_entry(s, &script_specs, node)
- if (strcasecmp(s->spec, spec) == 0)
- return s;
- return NULL;
-}
-
-int script_spec_register(const char *spec, struct scripting_ops *ops)
-{
- struct script_spec *s;
-
- s = script_spec__find(spec);
- if (s)
- return -1;
-
- s = script_spec__new(spec, ops);
- if (!s)
- return -1;
- else
- script_spec__add(s);
-
+ fprintf(stderr, " %-42s [%s]\n", spec, ops->name);
return 0;
}
-static struct scripting_ops *script_spec__lookup(const char *spec)
-{
- struct script_spec *s = script_spec__find(spec);
- if (!s)
- return NULL;
-
- return s->ops;
-}
-
static void list_available_languages(void)
{
- struct script_spec *s;
-
fprintf(stderr, "\n");
fprintf(stderr, "Scripting language extensions (used in "
"perf script -s [spec:]script.[spec]):\n\n");
-
- list_for_each_entry(s, &script_specs, node)
- fprintf(stderr, " %-42s [%s]\n", s->spec, s->ops->name);
-
+ script_spec__for_each(&list_available_languages_cb);
fprintf(stderr, "\n");
}
@@ -3523,144 +3377,6 @@ static void free_dlarg(void)
free(dlargv);
}
-/*
- * Some scripts specify the required events in their "xxx-record" file,
- * this function will check if the events in perf.data match those
- * mentioned in the "xxx-record".
- *
- * Fixme: All existing "xxx-record" are all in good formats "-e event ",
- * which is covered well now. And new parsing code should be added to
- * cover the future complex formats like event groups etc.
- */
-static int check_ev_match(char *dir_name, char *scriptname,
- struct perf_session *session)
-{
- char filename[MAXPATHLEN], evname[128];
- char line[BUFSIZ], *p;
- struct evsel *pos;
- int match, len;
- FILE *fp;
-
- scnprintf(filename, MAXPATHLEN, "%s/bin/%s-record", dir_name, scriptname);
-
- fp = fopen(filename, "r");
- if (!fp)
- return -1;
-
- while (fgets(line, sizeof(line), fp)) {
- p = skip_spaces(line);
- if (*p == '#')
- continue;
-
- while (strlen(p)) {
- p = strstr(p, "-e");
- if (!p)
- break;
-
- p += 2;
- p = skip_spaces(p);
- len = strcspn(p, " \t");
- if (!len)
- break;
-
- snprintf(evname, len + 1, "%s", p);
-
- match = 0;
- evlist__for_each_entry(session->evlist, pos) {
- if (evsel__name_is(pos, evname)) {
- match = 1;
- break;
- }
- }
-
- if (!match) {
- fclose(fp);
- return -1;
- }
- }
- }
-
- fclose(fp);
- return 0;
-}
-
-/*
- * Return -1 if none is found, otherwise the actual scripts number.
- *
- * Currently the only user of this function is the script browser, which
- * will list all statically runnable scripts, select one, execute it and
- * show the output in a perf browser.
- */
-int find_scripts(char **scripts_array, char **scripts_path_array, int num,
- int pathlen)
-{
- struct dirent *script_dirent, *lang_dirent;
- char scripts_path[MAXPATHLEN], lang_path[MAXPATHLEN];
- DIR *scripts_dir, *lang_dir;
- struct perf_session *session;
- struct perf_data data = {
- .path = input_name,
- .mode = PERF_DATA_MODE_READ,
- };
- char *temp;
- int i = 0;
-
- session = perf_session__new(&data, NULL);
- if (IS_ERR(session))
- return PTR_ERR(session);
-
- snprintf(scripts_path, MAXPATHLEN, "%s/scripts", get_argv_exec_path());
-
- scripts_dir = opendir(scripts_path);
- if (!scripts_dir) {
- perf_session__delete(session);
- return -1;
- }
-
- for_each_lang(scripts_path, scripts_dir, lang_dirent) {
- scnprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path,
- lang_dirent->d_name);
-#ifndef HAVE_LIBPERL_SUPPORT
- if (strstr(lang_path, "perl"))
- continue;
-#endif
-#ifndef HAVE_LIBPYTHON_SUPPORT
- if (strstr(lang_path, "python"))
- continue;
-#endif
-
- lang_dir = opendir(lang_path);
- if (!lang_dir)
- continue;
-
- for_each_script(lang_path, lang_dir, script_dirent) {
- /* Skip those real time scripts: xxxtop.p[yl] */
- if (strstr(script_dirent->d_name, "top."))
- continue;
- if (i >= num)
- break;
- snprintf(scripts_path_array[i], pathlen, "%s/%s",
- lang_path,
- script_dirent->d_name);
- temp = strchr(script_dirent->d_name, '.');
- snprintf(scripts_array[i],
- (temp - script_dirent->d_name) + 1,
- "%s", script_dirent->d_name);
-
- if (check_ev_match(lang_path,
- scripts_array[i], session))
- continue;
-
- i++;
- }
- closedir(lang_dir);
- }
-
- closedir(scripts_dir);
- perf_session__delete(session);
- return i;
-}
-
static char *get_script_path(const char *script_root, const char *suffix)
{
struct dirent *script_dirent, *lang_dirent;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index fdf5172646a5..77e327d4a9a7 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -112,8 +112,6 @@ static struct target target = {
.uid = UINT_MAX,
};
-#define METRIC_ONLY_LEN 20
-
static volatile sig_atomic_t child_pid = -1;
static int detailed_run = 0;
static bool transaction_run;
@@ -151,21 +149,6 @@ static struct perf_stat perf_stat;
static volatile sig_atomic_t done = 0;
-static struct perf_stat_config stat_config = {
- .aggr_mode = AGGR_GLOBAL,
- .aggr_level = MAX_CACHE_LVL + 1,
- .scale = true,
- .unit_width = 4, /* strlen("unit") */
- .run_count = 1,
- .metric_only_len = METRIC_ONLY_LEN,
- .walltime_nsecs_stats = &walltime_nsecs_stats,
- .ru_stats = &ru_stats,
- .big_num = true,
- .ctl_fd = -1,
- .ctl_fd_ack = -1,
- .iostat_run = false,
-};
-
/* Options set from the command line. */
struct opt_aggr_mode {
bool node, socket, die, cluster, cache, core, thread, no_aggr;
@@ -1071,16 +1054,6 @@ static void sig_atexit(void)
kill(getpid(), signr);
}
-void perf_stat__set_big_num(int set)
-{
- stat_config.big_num = (set != 0);
-}
-
-void perf_stat__set_no_csv_summary(int set)
-{
- stat_config.no_csv_summary = (set != 0);
-}
-
static int stat__set_big_num(const struct option *opt __maybe_unused,
const char *s __maybe_unused, int unset)
{
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 724a79386321..4fd31d29b2ab 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -267,9 +267,9 @@ static void perf_top__show_details(struct perf_top *top)
if (top->evlist->enabled) {
if (top->zero)
- symbol__annotate_zero_histogram(symbol, top->sym_evsel->core.idx);
+ symbol__annotate_zero_histogram(symbol, top->sym_evsel);
else
- symbol__annotate_decay_histogram(symbol, top->sym_evsel->core.idx);
+ symbol__annotate_decay_histogram(symbol, top->sym_evsel);
}
if (more != 0)
printf("%d lines not displayed, maybe increase display entries [e]\n", more);
@@ -809,7 +809,7 @@ static void perf_event__process_sample(const struct perf_tool *tool,
* invalid --vmlinux ;-)
*/
if (!machine->kptr_restrict_warned && !top->vmlinux_warned &&
- __map__is_kernel(al.map) && map__has_symbols(al.map)) {
+ __map__is_kernel(al.map) && !map__has_symbols(al.map)) {
if (symbol_conf.vmlinux_name) {
char serr[256];
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 6a1a128fe645..d466447ae928 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -389,7 +389,12 @@ static struct syscall_arg_fmt *evsel__syscall_arg_fmt(struct evsel *evsel)
}
if (et->fmt == NULL) {
- et->fmt = calloc(evsel->tp_format->format.nr_fields, sizeof(struct syscall_arg_fmt));
+ const struct tep_event *tp_format = evsel__tp_format(evsel);
+
+ if (tp_format == NULL)
+ goto out_delete;
+
+ et->fmt = calloc(tp_format->format.nr_fields, sizeof(struct syscall_arg_fmt));
if (et->fmt == NULL)
goto out_delete;
}
@@ -1108,7 +1113,6 @@ static bool syscall_arg__strtoul_btf_type(char *bf __maybe_unused, size_t size _
.strtoul = STUL_STRARRAY_FLAGS, \
.parm = &strarray__##array, }
-#include "trace/beauty/arch_errno_names.c"
#include "trace/beauty/eventfd.c"
#include "trace/beauty/futex_op.c"
#include "trace/beauty/futex_val3.c"
@@ -2069,30 +2073,11 @@ static int trace__read_syscall_info(struct trace *trace, int id)
const char *name = syscalltbl__name(trace->sctbl, id);
int err;
-#ifdef HAVE_SYSCALL_TABLE_SUPPORT
if (trace->syscalls.table == NULL) {
trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc));
if (trace->syscalls.table == NULL)
return -ENOMEM;
}
-#else
- if (id > trace->sctbl->syscalls.max_id || (id == 0 && trace->syscalls.table == NULL)) {
- // When using libaudit we don't know beforehand what is the max syscall id
- struct syscall *table = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
-
- if (table == NULL)
- return -ENOMEM;
-
- // Need to memset from offset 0 and +1 members if brand new
- if (trace->syscalls.table == NULL)
- memset(table, 0, (id + 1) * sizeof(*sc));
- else
- memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc));
-
- trace->syscalls.table = table;
- trace->sctbl->syscalls.max_id = id;
- }
-#endif
sc = trace->syscalls.table + id;
if (sc->nonexistent)
return -EEXIST;
@@ -2122,8 +2107,12 @@ static int trace__read_syscall_info(struct trace *trace, int id)
return PTR_ERR(sc->tp_format);
}
+ /*
+ * The tracepoint format contains __syscall_nr field, so it's one more
+ * than the actual number of syscall arguments.
+ */
if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ?
- RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields))
+ RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields - 1))
return -ENOMEM;
sc->args = sc->tp_format->format.fields;
@@ -2154,8 +2143,12 @@ static int evsel__init_tp_arg_scnprintf(struct evsel *evsel, bool *use_btf)
struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
if (fmt != NULL) {
- syscall_arg_fmt__init_array(fmt, evsel->tp_format->format.fields, use_btf);
- return 0;
+ const struct tep_event *tp_format = evsel__tp_format(evsel);
+
+ if (tp_format) {
+ syscall_arg_fmt__init_array(fmt, tp_format->format.fields, use_btf);
+ return 0;
+ }
}
return -ENOMEM;
@@ -2439,18 +2432,7 @@ static struct syscall *trace__syscall_info(struct trace *trace,
err = -EINVAL;
-#ifdef HAVE_SYSCALL_TABLE_SUPPORT
if (id > trace->sctbl->syscalls.max_id) {
-#else
- if (id >= trace->sctbl->syscalls.max_id) {
- /*
- * With libaudit we don't know beforehand what is the max_id,
- * so we let trace__read_syscall_info() figure that out as we
- * go on reading syscalls.
- */
- err = trace__read_syscall_info(trace, id);
- if (err)
-#endif
goto out_cant_read;
}
@@ -2581,7 +2563,6 @@ static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel,
static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size)
{
- void *augmented_args = NULL;
/*
* For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
* and there we get all 6 syscall args plus the tracepoint common fields
@@ -2599,10 +2580,24 @@ static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sam
int args_size = raw_augmented_args_size ?: sc->args_size;
*augmented_args_size = sample->raw_size - args_size;
- if (*augmented_args_size > 0)
- augmented_args = sample->raw_data + args_size;
+ if (*augmented_args_size > 0) {
+ static uintptr_t argbuf[1024]; /* assuming single-threaded */
- return augmented_args;
+ if ((size_t)(*augmented_args_size) > sizeof(argbuf))
+ return NULL;
+
+ /*
+ * The perf ring-buffer is 8-byte aligned but sample->raw_data
+ * is not because it's preceded by u32 size. Later, beautifier
+ * will use the augmented args with stricter alignments like in
+ * some struct. To make sure it's aligned, let's copy the args
+ * into a static buffer as it's single-threaded for now.
+ */
+ memcpy(argbuf, sample->raw_data + args_size, *augmented_args_size);
+
+ return argbuf;
+ }
+ return NULL;
}
static void syscall__exit(struct syscall *sc)
@@ -3027,7 +3022,8 @@ static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel,
{
char bf[2048];
size_t size = sizeof(bf);
- struct tep_format_field *field = evsel->tp_format->format.fields;
+ const struct tep_event *tp_format = evsel__tp_format(evsel);
+ struct tep_format_field *field = tp_format ? tp_format->format.fields : NULL;
struct syscall_arg_fmt *arg = __evsel__syscall_arg_fmt(evsel);
size_t printed = 0, btf_printed;
unsigned long val;
@@ -3145,11 +3141,13 @@ static int trace__event_handler(struct trace *trace, struct evsel *evsel,
if (evsel__is_bpf_output(evsel)) {
bpf_output__fprintf(trace, sample);
- } else if (evsel->tp_format) {
- if (strncmp(evsel->tp_format->name, "sys_enter_", 10) ||
- trace__fprintf_sys_enter(trace, evsel, sample)) {
+ } else {
+ const struct tep_event *tp_format = evsel__tp_format(evsel);
+
+ if (tp_format && (strncmp(tp_format->name, "sys_enter_", 10) ||
+ trace__fprintf_sys_enter(trace, evsel, sample))) {
if (trace->libtraceevent_print) {
- event_format__fprintf(evsel->tp_format, sample->cpu,
+ event_format__fprintf(tp_format, sample->cpu,
sample->raw_data, sample->raw_size,
trace->output);
} else {
@@ -4077,17 +4075,23 @@ static int ordered_events__deliver_event(struct ordered_events *oe,
static struct syscall_arg_fmt *evsel__find_syscall_arg_fmt_by_name(struct evsel *evsel, char *arg,
char **type)
{
- struct tep_format_field *field;
struct syscall_arg_fmt *fmt = __evsel__syscall_arg_fmt(evsel);
+ const struct tep_event *tp_format;
- if (evsel->tp_format == NULL || fmt == NULL)
+ if (!fmt)
return NULL;
- for (field = evsel->tp_format->format.fields; field; field = field->next, ++fmt)
+ tp_format = evsel__tp_format(evsel);
+ if (!tp_format)
+ return NULL;
+
+ for (const struct tep_format_field *field = tp_format->format.fields; field;
+ field = field->next, ++fmt) {
if (strcmp(field->name, arg) == 0) {
*type = field->type;
return fmt;
}
+ }
return NULL;
}
@@ -4843,13 +4847,18 @@ static void evsel__set_syscall_arg_fmt(struct evsel *evsel, const char *name)
const struct syscall_fmt *scfmt = syscall_fmt__find(name);
if (scfmt) {
- int skip = 0;
+ const struct tep_event *tp_format = evsel__tp_format(evsel);
- if (strcmp(evsel->tp_format->format.fields->name, "__syscall_nr") == 0 ||
- strcmp(evsel->tp_format->format.fields->name, "nr") == 0)
- ++skip;
+ if (tp_format) {
+ int skip = 0;
- memcpy(fmt + skip, scfmt->arg, (evsel->tp_format->format.nr_fields - skip) * sizeof(*fmt));
+ if (strcmp(tp_format->format.fields->name, "__syscall_nr") == 0 ||
+ strcmp(tp_format->format.fields->name, "nr") == 0)
+ ++skip;
+
+ memcpy(fmt + skip, scfmt->arg,
+ (tp_format->format.nr_fields - skip) * sizeof(*fmt));
+ }
}
}
}
@@ -4859,10 +4868,16 @@ static int evlist__set_syscall_tp_fields(struct evlist *evlist, bool *use_btf)
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
- if (evsel->priv || !evsel->tp_format)
+ const struct tep_event *tp_format;
+
+ if (evsel->priv)
+ continue;
+
+ tp_format = evsel__tp_format(evsel);
+ if (!tp_format)
continue;
- if (strcmp(evsel->tp_format->system, "syscalls")) {
+ if (strcmp(tp_format->system, "syscalls")) {
evsel__init_tp_arg_scnprintf(evsel, use_btf);
continue;
}
@@ -4870,20 +4885,24 @@ static int evlist__set_syscall_tp_fields(struct evlist *evlist, bool *use_btf)
if (evsel__init_syscall_tp(evsel))
return -1;
- if (!strncmp(evsel->tp_format->name, "sys_enter_", 10)) {
+ if (!strncmp(tp_format->name, "sys_enter_", 10)) {
struct syscall_tp *sc = __evsel__syscall_tp(evsel);
if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)))
return -1;
- evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_enter_") - 1);
- } else if (!strncmp(evsel->tp_format->name, "sys_exit_", 9)) {
+ evsel__set_syscall_arg_fmt(evsel,
+ tp_format->name + sizeof("sys_enter_") - 1);
+ } else if (!strncmp(tp_format->name, "sys_exit_", 9)) {
struct syscall_tp *sc = __evsel__syscall_tp(evsel);
- if (__tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap))
+ if (__tp_field__init_uint(&sc->ret, sizeof(u64),
+ sc->id.offset + sizeof(u64),
+ evsel->needs_swap))
return -1;
- evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_exit_") - 1);
+ evsel__set_syscall_arg_fmt(evsel,
+ tp_format->name + sizeof("sys_exit_") - 1);
}
}
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h
index 94f4b3769bf7..a07e93c53848 100644
--- a/tools/perf/builtin.h
+++ b/tools/perf/builtin.h
@@ -2,10 +2,6 @@
#ifndef BUILTIN_H
#define BUILTIN_H
-#include <stddef.h>
-#include <linux/compiler.h>
-#include <tools/config.h>
-
struct feature_status {
const char *name;
const char *macro;
@@ -56,6 +52,4 @@ int cmd_ftrace(int argc, const char **argv);
int cmd_daemon(int argc, const char **argv);
int cmd_kwork(int argc, const char **argv);
-int find_scripts(char **scripts_array, char **scripts_path_array, int num,
- int pathlen);
#endif
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index a05c1c105c51..d3c6e10dce73 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -71,6 +71,7 @@ FILES=(
"include/uapi/asm-generic/ioctls.h"
"include/uapi/asm-generic/mman-common.h"
"include/uapi/asm-generic/unistd.h"
+ "scripts/syscall.tbl"
)
declare -a SYNC_CHECK_FILES
@@ -201,6 +202,14 @@ check_2 tools/perf/arch/x86/entry/syscalls/syscall_64.tbl arch/x86/entry/syscall
check_2 tools/perf/arch/powerpc/entry/syscalls/syscall.tbl arch/powerpc/kernel/syscalls/syscall.tbl
check_2 tools/perf/arch/s390/entry/syscalls/syscall.tbl arch/s390/kernel/syscalls/syscall.tbl
check_2 tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl arch/mips/kernel/syscalls/syscall_n64.tbl
+check_2 tools/perf/arch/arm/entry/syscalls/syscall.tbl arch/arm/tools/syscall.tbl
+check_2 tools/perf/arch/sh/entry/syscalls/syscall.tbl arch/sh/kernel/syscalls/syscall.tbl
+check_2 tools/perf/arch/sparc/entry/syscalls/syscall.tbl arch/sparc/kernel/syscalls/syscall.tbl
+check_2 tools/perf/arch/xtensa/entry/syscalls/syscall.tbl arch/xtensa/kernel/syscalls/syscall.tbl
+check_2 tools/perf/arch/alpha/entry/syscalls/syscall.tbl arch/alpha/entry/syscalls/syscall.tbl
+check_2 tools/perf/arch/parisc/entry/syscalls/syscall.tbl arch/parisc/entry/syscalls/syscall.tbl
+check_2 tools/perf/arch/arm64/entry/syscalls/syscall_32.tbl arch/arm64/entry/syscalls/syscall_32.tbl
+check_2 tools/perf/arch/arm64/entry/syscalls/syscall_64.tbl arch/arm64/entry/syscalls/syscall_64.tbl
for i in "${BEAUTY_FILES[@]}"
do
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index a2987f2cfe1a..f0617cc41f5f 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -84,7 +84,7 @@ static struct cmd_struct commands[] = {
#endif
{ "kvm", cmd_kvm, 0 },
{ "test", cmd_test, 0 },
-#if defined(HAVE_LIBTRACEEVENT) && (defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT))
+#if defined(HAVE_LIBTRACEEVENT)
{ "trace", cmd_trace, 0 },
#endif
{ "inject", cmd_inject, 0 },
@@ -514,10 +514,6 @@ int main(int argc, const char **argv)
fprintf(stderr,
"trace command not available: missing libtraceevent devel package at build time.\n");
goto out;
-#elif !defined(HAVE_LIBAUDIT_SUPPORT) && !defined(HAVE_SYSCALL_TABLE_SUPPORT)
- fprintf(stderr,
- "trace command not available: missing audit-libs devel package at build time.\n");
- goto out;
#else
setup_path();
argv[0] = "trace";
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index c004dd4e65a3..3cb40965549f 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -3,7 +3,7 @@
#define _PERF_PERF_H
#ifndef MAX_NR_CPUS
-#define MAX_NR_CPUS 2048
+#define MAX_NR_CPUS 4096
#endif
enum perf_affinity {
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/exception.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/exception.json
index 4404b8e91690..7126fbf292e0 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/exception.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/exception.json
@@ -5,7 +5,7 @@
},
{
"ArchStdEvent": "EXC_RETURN",
- "PublicDescription": "Counts any architecturally executed exception return instructions. Eg: AArch64: ERET"
+ "PublicDescription": "Counts any architecturally executed exception return instructions. For example: AArch64: ERET"
},
{
"ArchStdEvent": "EXC_UNDEF",
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/general.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/general.json
index 428810f855b8..c5dcdcf43c58 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/general.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/general.json
@@ -5,6 +5,6 @@
},
{
"ArchStdEvent": "CNT_CYCLES",
- "PublicDescription": "Counts constant frequency cycles"
+ "PublicDescription": "Increments at a constant frequency equal to the rate of increment of the System Counter, CNTPCT_EL0."
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l1d_cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l1d_cache.json
index da7c129f2569..799d106d5173 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l1d_cache.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l1d_cache.json
@@ -1,11 +1,11 @@
[
{
"ArchStdEvent": "L1D_CACHE_REFILL",
- "PublicDescription": "Counts level 1 data cache refills caused by speculatively executed load or store operations that missed in the level 1 data cache. This event only counts one event per cache line. This event does not count cache line allocations from preload instructions or from hardware cache prefetching."
+ "PublicDescription": "Counts level 1 data cache refills caused by speculatively executed load or store operations that missed in the level 1 data cache. This event only counts one event per cache line."
},
{
"ArchStdEvent": "L1D_CACHE",
- "PublicDescription": "Counts level 1 data cache accesses from any load/store operations. Atomic operations that resolve in the CPUs caches (near atomic operations) count as both a write access and read access. Each access to a cache line is counted including the multiple accesses caused by single instructions such as LDM or STM. Each access to other level 1 data or unified memory structures, for example refill buffers, write buffers, and write-back buffers, are also counted."
+ "PublicDescription": "Counts level 1 data cache accesses from any load/store operations. Atomic operations that resolve in the CPUs caches (near atomic operations) counts as both a write access and read access. Each access to a cache line is counted including the multiple accesses caused by single instructions such as LDM or STM. Each access to other level 1 data or unified memory structures, for example refill buffers, write buffers, and write-back buffers, are also counted."
},
{
"ArchStdEvent": "L1D_CACHE_WB",
@@ -17,7 +17,7 @@
},
{
"ArchStdEvent": "L1D_CACHE_RD",
- "PublicDescription": "Counts level 1 data cache accesses from any load operation. Atomic load operations that resolve in the CPUs caches count as both a write access and read access."
+ "PublicDescription": "Counts level 1 data cache accesses from any load operation. Atomic load operations that resolve in the CPUs caches counts as both a write access and read access."
},
{
"ArchStdEvent": "L1D_CACHE_WR",
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l2_cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l2_cache.json
index 0e31d0daf88b..ed8291ab9737 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l2_cache.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l2_cache.json
@@ -1,11 +1,11 @@
[
{
"ArchStdEvent": "L2D_CACHE",
- "PublicDescription": "Counts level 2 cache accesses. level 2 cache is a unified cache for data and instruction accesses. Accesses are for misses in the first level caches or translation resolutions due to accesses. This event also counts write back of dirty data from level 1 data cache to the L2 cache."
+ "PublicDescription": "Counts accesses to the level 2 cache due to data accesses. Level 2 cache is a unified cache for data and instruction accesses. Accesses are for misses in the first level data cache or translation resolutions due to accesses. This event also counts write back of dirty data from level 1 data cache to the L2 cache."
},
{
"ArchStdEvent": "L2D_CACHE_REFILL",
- "PublicDescription": "Counts cache line refills into the level 2 cache. level 2 cache is a unified cache for data and instruction accesses. Accesses are for misses in the level 1 caches or translation resolutions due to accesses."
+ "PublicDescription": "Counts cache line refills into the level 2 cache. Level 2 cache is a unified cache for data and instruction accesses. Accesses are for misses in the level 1 data cache or translation resolutions due to accesses."
},
{
"ArchStdEvent": "L2D_CACHE_WB",
@@ -13,23 +13,23 @@
},
{
"ArchStdEvent": "L2D_CACHE_ALLOCATE",
- "PublicDescription": "TBD"
+ "PublicDescription": "Counts level 2 cache line allocates that do not fetch data from outside the level 2 data or unified cache."
},
{
"ArchStdEvent": "L2D_CACHE_RD",
- "PublicDescription": "Counts level 2 cache accesses due to memory read operations. level 2 cache is a unified cache for data and instruction accesses, accesses are for misses in the level 1 caches or translation resolutions due to accesses."
+ "PublicDescription": "Counts level 2 data cache accesses due to memory read operations. Level 2 cache is a unified cache for data and instruction accesses, accesses are for misses in the level 1 data cache or translation resolutions due to accesses."
},
{
"ArchStdEvent": "L2D_CACHE_WR",
- "PublicDescription": "Counts level 2 cache accesses due to memory write operations. level 2 cache is a unified cache for data and instruction accesses, accesses are for misses in the level 1 caches or translation resolutions due to accesses."
+ "PublicDescription": "Counts level 2 cache accesses due to memory write operations. Level 2 cache is a unified cache for data and instruction accesses, accesses are for misses in the level 1 data cache or translation resolutions due to accesses."
},
{
"ArchStdEvent": "L2D_CACHE_REFILL_RD",
- "PublicDescription": "Counts refills for memory accesses due to memory read operation counted by L2D_CACHE_RD. level 2 cache is a unified cache for data and instruction accesses, accesses are for misses in the level 1 caches or translation resolutions due to accesses."
+ "PublicDescription": "Counts refills for memory accesses due to memory read operation counted by L2D_CACHE_RD. Level 2 cache is a unified cache for data and instruction accesses, accesses are for misses in the level 1 data cache or translation resolutions due to accesses."
},
{
"ArchStdEvent": "L2D_CACHE_REFILL_WR",
- "PublicDescription": "Counts refills for memory accesses due to memory write operation counted by L2D_CACHE_WR. level 2 cache is a unified cache for data and instruction accesses, accesses are for misses in the level 1 caches or translation resolutions due to accesses."
+ "PublicDescription": "Counts refills for memory accesses due to memory write operation counted by L2D_CACHE_WR. Level 2 cache is a unified cache for data and instruction accesses, accesses are for misses in the level 1 data cache or translation resolutions due to accesses."
},
{
"ArchStdEvent": "L2D_CACHE_WB_VICTIM",
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l3_cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l3_cache.json
index 45bfba532df7..4a2e72fc5ada 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l3_cache.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/l3_cache.json
@@ -9,11 +9,11 @@
},
{
"ArchStdEvent": "L3D_CACHE",
- "PublicDescription": "Counts level 3 cache accesses. level 3 cache is a unified cache for data and instruction accesses. Accesses are for misses in the lower level caches or translation resolutions due to accesses."
+ "PublicDescription": "Counts level 3 cache accesses. Level 3 cache is a unified cache for data and instruction accesses. Accesses are for misses in the lower level caches or translation resolutions due to accesses."
},
{
"ArchStdEvent": "L3D_CACHE_RD",
- "PublicDescription": "TBD"
+ "PublicDescription": "Counts level 3 cache accesses caused by any memory read operation. Level 3 cache is a unified cache for data and instruction accesses. Accesses are for misses in the lower level caches or translation resolutions due to accesses."
},
{
"ArchStdEvent": "L3D_CACHE_LMISS_RD",
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/ll_cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/ll_cache.json
index bb712d57d58a..fd5a2e0099b8 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/ll_cache.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/ll_cache.json
@@ -1,10 +1,10 @@
[
{
"ArchStdEvent": "LL_CACHE_RD",
- "PublicDescription": "Counts read transactions that were returned from outside the core cluster. This event counts when the system register CPUECTLR.EXTLLC bit is set. This event counts read transactions returned from outside the core if those transactions are either hit in the system level cache or missed in the SLC and are returned from any other external sources."
+ "PublicDescription": "Counts read transactions that were returned from outside the core cluster. This event counts for external last level cache when the system register CPUECTLR.EXTLLC bit is set, otherwise it counts for the L3 cache. This event counts read transactions returned from outside the core if those transactions are either hit in the system level cache or missed in the SLC and are returned from any other external sources."
},
{
"ArchStdEvent": "LL_CACHE_MISS_RD",
- "PublicDescription": "Counts read transactions that were returned from outside the core cluster but missed in the system level cache. This event counts when the system register CPUECTLR.EXTLLC bit is set. This event counts read transactions returned from outside the core if those transactions are missed in the System level Cache. The data source of the transaction is indicated by a field in the CHI transaction returning to the CPU. This event does not count reads caused by cache maintenance operations."
+ "PublicDescription": "Counts read transactions that were returned from outside the core cluster but missed in the system level cache. This event counts for external last level cache when the system register CPUECTLR.EXTLLC bit is set, otherwise it counts for L3 cache. This event counts read transactions returned from outside the core if those transactions are missed in the System level Cache. The data source of the transaction is indicated by a field in the CHI transaction returning to the CPU. This event does not count reads caused by cache maintenance operations."
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/memory.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/memory.json
index 106a97f8b2e7..bb3491012a8f 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/memory.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/memory.json
@@ -33,7 +33,7 @@
},
{
"ArchStdEvent": "MEM_ACCESS_CHECKED",
- "PublicDescription": "Counts the number of memory read and write accesses in a cycle that are tag checked by the Memory Tagging Extension (MTE)."
+ "PublicDescription": "Counts the number of memory read and write accesses counted by MEM_ACCESS that are tag checked by the Memory Tagging Extension (MTE). This event is implemented as the sum of MEM_ACCESS_CHECKED_RD and MEM_ACCESS_CHECKED_WR"
},
{
"ArchStdEvent": "MEM_ACCESS_CHECKED_RD",
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/metrics.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/metrics.json
index 5f449270b448..97d352f94323 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/metrics.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/metrics.json
@@ -5,7 +5,7 @@
},
{
"MetricName": "backend_stalled_cycles",
- "MetricExpr": "((STALL_BACKEND / CPU_CYCLES) * 100)",
+ "MetricExpr": "STALL_BACKEND / CPU_CYCLES * 100",
"BriefDescription": "This metric is the percentage of cycles that were stalled due to resource constraints in the backend unit of the processor.",
"MetricGroup": "Cycle_Accounting",
"ScaleUnit": "1percent of cycles"
@@ -16,45 +16,45 @@
},
{
"MetricName": "branch_misprediction_ratio",
- "MetricExpr": "(BR_MIS_PRED_RETIRED / BR_RETIRED)",
+ "MetricExpr": "BR_MIS_PRED_RETIRED / BR_RETIRED",
"BriefDescription": "This metric measures the ratio of branches mispredicted to the total number of branches architecturally executed. This gives an indication of the effectiveness of the branch prediction unit.",
"MetricGroup": "Miss_Ratio;Branch_Effectiveness",
- "ScaleUnit": "1per branch"
+ "ScaleUnit": "100percent of branches"
},
{
"MetricName": "branch_mpki",
- "MetricExpr": "((BR_MIS_PRED_RETIRED / INST_RETIRED) * 1000)",
+ "MetricExpr": "BR_MIS_PRED_RETIRED / INST_RETIRED * 1000",
"BriefDescription": "This metric measures the number of branch mispredictions per thousand instructions executed.",
"MetricGroup": "MPKI;Branch_Effectiveness",
"ScaleUnit": "1MPKI"
},
{
"MetricName": "branch_percentage",
- "MetricExpr": "(((BR_IMMED_SPEC + BR_INDIRECT_SPEC) / INST_SPEC) * 100)",
+ "MetricExpr": "(BR_IMMED_SPEC + BR_INDIRECT_SPEC) / INST_SPEC * 100",
"BriefDescription": "This metric measures branch operations as a percentage of operations speculatively executed.",
"MetricGroup": "Operation_Mix",
"ScaleUnit": "1percent of operations"
},
{
"MetricName": "crypto_percentage",
- "MetricExpr": "((CRYPTO_SPEC / INST_SPEC) * 100)",
+ "MetricExpr": "CRYPTO_SPEC / INST_SPEC * 100",
"BriefDescription": "This metric measures crypto operations as a percentage of operations speculatively executed.",
"MetricGroup": "Operation_Mix",
"ScaleUnit": "1percent of operations"
},
{
"MetricName": "dtlb_mpki",
- "MetricExpr": "((DTLB_WALK / INST_RETIRED) * 1000)",
+ "MetricExpr": "DTLB_WALK / INST_RETIRED * 1000",
"BriefDescription": "This metric measures the number of data TLB Walks per thousand instructions executed.",
"MetricGroup": "MPKI;DTLB_Effectiveness",
"ScaleUnit": "1MPKI"
},
{
"MetricName": "dtlb_walk_ratio",
- "MetricExpr": "(DTLB_WALK / L1D_TLB)",
+ "MetricExpr": "DTLB_WALK / L1D_TLB",
"BriefDescription": "This metric measures the ratio of data TLB Walks to the total number of data TLB accesses. This gives an indication of the effectiveness of the data TLB accesses.",
"MetricGroup": "Miss_Ratio;DTLB_Effectiveness",
- "ScaleUnit": "1per TLB access"
+ "ScaleUnit": "100percent of TLB accesses"
},
{
"ArchStdEvent": "frontend_bound",
@@ -62,147 +62,147 @@
},
{
"MetricName": "frontend_stalled_cycles",
- "MetricExpr": "((STALL_FRONTEND / CPU_CYCLES) * 100)",
+ "MetricExpr": "STALL_FRONTEND / CPU_CYCLES * 100",
"BriefDescription": "This metric is the percentage of cycles that were stalled due to resource constraints in the frontend unit of the processor.",
"MetricGroup": "Cycle_Accounting",
"ScaleUnit": "1percent of cycles"
},
{
"MetricName": "integer_dp_percentage",
- "MetricExpr": "((DP_SPEC / INST_SPEC) * 100)",
+ "MetricExpr": "DP_SPEC / INST_SPEC * 100",
"BriefDescription": "This metric measures scalar integer operations as a percentage of operations speculatively executed.",
"MetricGroup": "Operation_Mix",
"ScaleUnit": "1percent of operations"
},
{
"MetricName": "ipc",
- "MetricExpr": "(INST_RETIRED / CPU_CYCLES)",
+ "MetricExpr": "INST_RETIRED / CPU_CYCLES",
"BriefDescription": "This metric measures the number of instructions retired per cycle.",
"MetricGroup": "General",
"ScaleUnit": "1per cycle"
},
{
"MetricName": "itlb_mpki",
- "MetricExpr": "((ITLB_WALK / INST_RETIRED) * 1000)",
+ "MetricExpr": "ITLB_WALK / INST_RETIRED * 1000",
"BriefDescription": "This metric measures the number of instruction TLB Walks per thousand instructions executed.",
"MetricGroup": "MPKI;ITLB_Effectiveness",
"ScaleUnit": "1MPKI"
},
{
"MetricName": "itlb_walk_ratio",
- "MetricExpr": "(ITLB_WALK / L1I_TLB)",
+ "MetricExpr": "ITLB_WALK / L1I_TLB",
"BriefDescription": "This metric measures the ratio of instruction TLB Walks to the total number of instruction TLB accesses. This gives an indication of the effectiveness of the instruction TLB accesses.",
"MetricGroup": "Miss_Ratio;ITLB_Effectiveness",
- "ScaleUnit": "1per TLB access"
+ "ScaleUnit": "100percent of TLB accesses"
},
{
"MetricName": "l1d_cache_miss_ratio",
- "MetricExpr": "(L1D_CACHE_REFILL / L1D_CACHE)",
+ "MetricExpr": "L1D_CACHE_REFILL / L1D_CACHE",
"BriefDescription": "This metric measures the ratio of level 1 data cache accesses missed to the total number of level 1 data cache accesses. This gives an indication of the effectiveness of the level 1 data cache.",
"MetricGroup": "Miss_Ratio;L1D_Cache_Effectiveness",
- "ScaleUnit": "1per cache access"
+ "ScaleUnit": "100percent of cache accesses"
},
{
"MetricName": "l1d_cache_mpki",
- "MetricExpr": "((L1D_CACHE_REFILL / INST_RETIRED) * 1000)",
+ "MetricExpr": "L1D_CACHE_REFILL / INST_RETIRED * 1000",
"BriefDescription": "This metric measures the number of level 1 data cache accesses missed per thousand instructions executed.",
"MetricGroup": "MPKI;L1D_Cache_Effectiveness",
"ScaleUnit": "1MPKI"
},
{
"MetricName": "l1d_tlb_miss_ratio",
- "MetricExpr": "(L1D_TLB_REFILL / L1D_TLB)",
+ "MetricExpr": "L1D_TLB_REFILL / L1D_TLB",
"BriefDescription": "This metric measures the ratio of level 1 data TLB accesses missed to the total number of level 1 data TLB accesses. This gives an indication of the effectiveness of the level 1 data TLB.",
"MetricGroup": "Miss_Ratio;DTLB_Effectiveness",
- "ScaleUnit": "1per TLB access"
+ "ScaleUnit": "100percent of TLB accesses"
},
{
"MetricName": "l1d_tlb_mpki",
- "MetricExpr": "((L1D_TLB_REFILL / INST_RETIRED) * 1000)",
- "BriefDescription": "This metric measures the number of level 1 instruction TLB accesses missed per thousand instructions executed.",
+ "MetricExpr": "L1D_TLB_REFILL / INST_RETIRED * 1000",
+ "BriefDescription": "This metric measures the number of level 1 data TLB accesses missed per thousand instructions executed.",
"MetricGroup": "MPKI;DTLB_Effectiveness",
"ScaleUnit": "1MPKI"
},
{
"MetricName": "l1i_cache_miss_ratio",
- "MetricExpr": "(L1I_CACHE_REFILL / L1I_CACHE)",
+ "MetricExpr": "L1I_CACHE_REFILL / L1I_CACHE",
"BriefDescription": "This metric measures the ratio of level 1 instruction cache accesses missed to the total number of level 1 instruction cache accesses. This gives an indication of the effectiveness of the level 1 instruction cache.",
"MetricGroup": "Miss_Ratio;L1I_Cache_Effectiveness",
- "ScaleUnit": "1per cache access"
+ "ScaleUnit": "100percent of cache accesses"
},
{
"MetricName": "l1i_cache_mpki",
- "MetricExpr": "((L1I_CACHE_REFILL / INST_RETIRED) * 1000)",
+ "MetricExpr": "L1I_CACHE_REFILL / INST_RETIRED * 1000",
"BriefDescription": "This metric measures the number of level 1 instruction cache accesses missed per thousand instructions executed.",
"MetricGroup": "MPKI;L1I_Cache_Effectiveness",
"ScaleUnit": "1MPKI"
},
{
"MetricName": "l1i_tlb_miss_ratio",
- "MetricExpr": "(L1I_TLB_REFILL / L1I_TLB)",
+ "MetricExpr": "L1I_TLB_REFILL / L1I_TLB",
"BriefDescription": "This metric measures the ratio of level 1 instruction TLB accesses missed to the total number of level 1 instruction TLB accesses. This gives an indication of the effectiveness of the level 1 instruction TLB.",
"MetricGroup": "Miss_Ratio;ITLB_Effectiveness",
- "ScaleUnit": "1per TLB access"
+ "ScaleUnit": "100percent of TLB accesses"
},
{
"MetricName": "l1i_tlb_mpki",
- "MetricExpr": "((L1I_TLB_REFILL / INST_RETIRED) * 1000)",
+ "MetricExpr": "L1I_TLB_REFILL / INST_RETIRED * 1000",
"BriefDescription": "This metric measures the number of level 1 instruction TLB accesses missed per thousand instructions executed.",
"MetricGroup": "MPKI;ITLB_Effectiveness",
"ScaleUnit": "1MPKI"
},
{
"MetricName": "l2_cache_miss_ratio",
- "MetricExpr": "(L2D_CACHE_REFILL / L2D_CACHE)",
+ "MetricExpr": "L2D_CACHE_REFILL / L2D_CACHE",
"BriefDescription": "This metric measures the ratio of level 2 cache accesses missed to the total number of level 2 cache accesses. This gives an indication of the effectiveness of the level 2 cache, which is a unified cache that stores both data and instruction. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a unified cache.",
"MetricGroup": "Miss_Ratio;L2_Cache_Effectiveness",
- "ScaleUnit": "1per cache access"
+ "ScaleUnit": "100percent of cache accesses"
},
{
"MetricName": "l2_cache_mpki",
- "MetricExpr": "((L2D_CACHE_REFILL / INST_RETIRED) * 1000)",
+ "MetricExpr": "L2D_CACHE_REFILL / INST_RETIRED * 1000",
"BriefDescription": "This metric measures the number of level 2 unified cache accesses missed per thousand instructions executed. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a unified cache.",
"MetricGroup": "MPKI;L2_Cache_Effectiveness",
"ScaleUnit": "1MPKI"
},
{
"MetricName": "l2_tlb_miss_ratio",
- "MetricExpr": "(L2D_TLB_REFILL / L2D_TLB)",
+ "MetricExpr": "L2D_TLB_REFILL / L2D_TLB",
"BriefDescription": "This metric measures the ratio of level 2 unified TLB accesses missed to the total number of level 2 unified TLB accesses. This gives an indication of the effectiveness of the level 2 TLB.",
"MetricGroup": "Miss_Ratio;ITLB_Effectiveness;DTLB_Effectiveness",
- "ScaleUnit": "1per TLB access"
+ "ScaleUnit": "100percent of TLB accesses"
},
{
"MetricName": "l2_tlb_mpki",
- "MetricExpr": "((L2D_TLB_REFILL / INST_RETIRED) * 1000)",
+ "MetricExpr": "L2D_TLB_REFILL / INST_RETIRED * 1000",
"BriefDescription": "This metric measures the number of level 2 unified TLB accesses missed per thousand instructions executed.",
"MetricGroup": "MPKI;ITLB_Effectiveness;DTLB_Effectiveness",
"ScaleUnit": "1MPKI"
},
{
"MetricName": "ll_cache_read_hit_ratio",
- "MetricExpr": "((LL_CACHE_RD - LL_CACHE_MISS_RD) / LL_CACHE_RD)",
+ "MetricExpr": "(LL_CACHE_RD - LL_CACHE_MISS_RD) / LL_CACHE_RD",
"BriefDescription": "This metric measures the ratio of last level cache read accesses hit in the cache to the total number of last level cache accesses. This gives an indication of the effectiveness of the last level cache for read traffic. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a system level cache.",
"MetricGroup": "LL_Cache_Effectiveness",
- "ScaleUnit": "1per cache access"
+ "ScaleUnit": "100percent of cache accesses"
},
{
"MetricName": "ll_cache_read_miss_ratio",
- "MetricExpr": "(LL_CACHE_MISS_RD / LL_CACHE_RD)",
+ "MetricExpr": "LL_CACHE_MISS_RD / LL_CACHE_RD",
"BriefDescription": "This metric measures the ratio of last level cache read accesses missed to the total number of last level cache accesses. This gives an indication of the effectiveness of the last level cache for read traffic. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a system level cache.",
"MetricGroup": "Miss_Ratio;LL_Cache_Effectiveness",
- "ScaleUnit": "1per cache access"
+ "ScaleUnit": "100percent of cache accesses"
},
{
"MetricName": "ll_cache_read_mpki",
- "MetricExpr": "((LL_CACHE_MISS_RD / INST_RETIRED) * 1000)",
+ "MetricExpr": "LL_CACHE_MISS_RD / INST_RETIRED * 1000",
"BriefDescription": "This metric measures the number of last level cache read accesses missed per thousand instructions executed.",
"MetricGroup": "MPKI;LL_Cache_Effectiveness",
"ScaleUnit": "1MPKI"
},
{
"MetricName": "load_percentage",
- "MetricExpr": "((LD_SPEC / INST_SPEC) * 100)",
+ "MetricExpr": "LD_SPEC / INST_SPEC * 100",
"BriefDescription": "This metric measures load operations as a percentage of operations speculatively executed.",
"MetricGroup": "Operation_Mix",
"ScaleUnit": "1percent of operations"
@@ -213,21 +213,21 @@
},
{
"MetricName": "scalar_fp_percentage",
- "MetricExpr": "((VFP_SPEC / INST_SPEC) * 100)",
+ "MetricExpr": "VFP_SPEC / INST_SPEC * 100",
"BriefDescription": "This metric measures scalar floating point operations as a percentage of operations speculatively executed.",
"MetricGroup": "Operation_Mix",
"ScaleUnit": "1percent of operations"
},
{
"MetricName": "simd_percentage",
- "MetricExpr": "((ASE_SPEC / INST_SPEC) * 100)",
+ "MetricExpr": "ASE_SPEC / INST_SPEC * 100",
"BriefDescription": "This metric measures advanced SIMD operations as a percentage of total operations speculatively executed.",
"MetricGroup": "Operation_Mix",
"ScaleUnit": "1percent of operations"
},
{
"MetricName": "store_percentage",
- "MetricExpr": "((ST_SPEC / INST_SPEC) * 100)",
+ "MetricExpr": "ST_SPEC / INST_SPEC * 100",
"BriefDescription": "This metric measures store operations as a percentage of operations speculatively executed.",
"MetricGroup": "Operation_Mix",
"ScaleUnit": "1percent of operations"
@@ -300,5 +300,12 @@
"MetricGroup": "Operation_Mix",
"MetricName": "branch_indirect_spec_rate",
"ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "sve_all_percentage",
+ "MetricExpr": "SVE_INST_SPEC / INST_SPEC * 100",
+ "BriefDescription": "This metric measures scalable vector operations, including loads and stores, as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/retired.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/retired.json
index f297b049b62f..337e6a916f2b 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/retired.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/retired.json
@@ -9,7 +9,7 @@
},
{
"ArchStdEvent": "CID_WRITE_RETIRED",
- "PublicDescription": "Counts architecturally executed writes to the CONTEXTIDR register, which usually contain the kernel PID and can be output with hardware trace."
+ "PublicDescription": "Counts architecturally executed writes to the CONTEXTIDR_EL1 register, which usually contain the kernel PID and can be output with hardware trace."
},
{
"ArchStdEvent": "TTBR_WRITE_RETIRED",
@@ -17,7 +17,7 @@
},
{
"ArchStdEvent": "BR_RETIRED",
- "PublicDescription": "Counts architecturally executed branches, whether the branch is taken or not. Instructions that explicitly write to the PC are also counted."
+ "PublicDescription": "Counts architecturally executed branches, whether the branch is taken or not. Instructions that explicitly write to the PC are also counted. Note that exception generating instructions, exception return instructions and context synchronization instructions are not counted."
},
{
"ArchStdEvent": "BR_MIS_PRED_RETIRED",
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/spec_operation.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/spec_operation.json
index 1af961f8a6c8..a7ea0d4c4ea4 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/spec_operation.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/spec_operation.json
@@ -5,7 +5,7 @@
},
{
"ArchStdEvent": "BR_PRED",
- "PublicDescription": "Counts branches speculatively executed and were predicted right."
+ "PublicDescription": "Counts all speculatively executed branches."
},
{
"ArchStdEvent": "INST_SPEC",
@@ -29,7 +29,7 @@
},
{
"ArchStdEvent": "LDREX_SPEC",
- "PublicDescription": "Counts Load-Exclusive operations that have been speculatively executed. Eg: LDREX, LDX"
+ "PublicDescription": "Counts Load-Exclusive operations that have been speculatively executed. For example: LDREX, LDX"
},
{
"ArchStdEvent": "STREX_PASS_SPEC",
@@ -73,15 +73,15 @@
},
{
"ArchStdEvent": "BR_IMMED_SPEC",
- "PublicDescription": "Counts immediate branch operations which are speculatively executed."
+ "PublicDescription": "Counts direct branch operations which are speculatively executed."
},
{
"ArchStdEvent": "BR_RETURN_SPEC",
- "PublicDescription": "Counts procedure return operations (RET) which are speculatively executed."
+ "PublicDescription": "Counts procedure return operations (RET, RETAA and RETAB) which are speculatively executed."
},
{
"ArchStdEvent": "BR_INDIRECT_SPEC",
- "PublicDescription": "Counts indirect branch operations including procedure returns, which are speculatively executed. This includes operations that force a software change of the PC, other than exception-generating operations. Eg: BR Xn, RET"
+ "PublicDescription": "Counts indirect branch operations including procedure returns, which are speculatively executed. This includes operations that force a software change of the PC, other than exception-generating operations and direct branch instructions. Some examples of the instructions counted by this event include BR Xn, RET, etc..."
},
{
"ArchStdEvent": "ISB_SPEC",
@@ -97,11 +97,11 @@
},
{
"ArchStdEvent": "RC_LD_SPEC",
- "PublicDescription": "Counts any load acquire operations that are speculatively executed. Eg: LDAR, LDARH, LDARB"
+ "PublicDescription": "Counts any load acquire operations that are speculatively executed. For example: LDAR, LDARH, LDARB"
},
{
"ArchStdEvent": "RC_ST_SPEC",
- "PublicDescription": "Counts any store release operations that are speculatively executed. Eg: STLR, STLRH, STLRB'"
+ "PublicDescription": "Counts any store release operations that are speculatively executed. For example: STLR, STLRH, STLRB"
},
{
"ArchStdEvent": "ASE_INST_SPEC",
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/stall.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/stall.json
index bbbebc805034..1fcba19dfb7d 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/stall.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/stall.json
@@ -1,7 +1,7 @@
[
{
"ArchStdEvent": "STALL_FRONTEND",
- "PublicDescription": "Counts cycles when frontend could not send any micro-operations to the rename stage because of frontend resource stalls caused by fetch memory latency or branch prediction flow stalls. All the frontend slots were empty during the cycle when this event counts."
+ "PublicDescription": "Counts cycles when frontend could not send any micro-operations to the rename stage because of frontend resource stalls caused by fetch memory latency or branch prediction flow stalls. STALL_FRONTEND_SLOTS counts SLOTS during the cycle when this event counts."
},
{
"ArchStdEvent": "STALL_BACKEND",
@@ -9,11 +9,11 @@
},
{
"ArchStdEvent": "STALL",
- "PublicDescription": "Counts cycles when no operations are sent to the rename unit from the frontend or from the rename unit to the backend for any reason (either frontend or backend stall)."
+ "PublicDescription": "Counts cycles when no operations are sent to the rename unit from the frontend or from the rename unit to the backend for any reason (either frontend or backend stall). This event is the sum of STALL_FRONTEND and STALL_BACKEND"
},
{
"ArchStdEvent": "STALL_SLOT_BACKEND",
- "PublicDescription": "Counts slots per cycle in which no operations are sent from the rename unit to the backend due to backend resource constraints."
+ "PublicDescription": "Counts slots per cycle in which no operations are sent from the rename unit to the backend due to backend resource constraints. STALL_BACKEND counts during the cycle when STALL_SLOT_BACKEND counts at least 1."
},
{
"ArchStdEvent": "STALL_SLOT_FRONTEND",
@@ -21,7 +21,7 @@
},
{
"ArchStdEvent": "STALL_SLOT",
- "PublicDescription": "Counts slots per cycle in which no operations are sent to the rename unit from the frontend or from the rename unit to the backend for any reason (either frontend or backend stall)."
+ "PublicDescription": "Counts slots per cycle in which no operations are sent to the rename unit from the frontend or from the rename unit to the backend for any reason (either frontend or backend stall). STALL_SLOT is the sum of STALL_SLOT_FRONTEND and STALL_SLOT_BACKEND."
},
{
"ArchStdEvent": "STALL_BACKEND_MEM",
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/tlb.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/tlb.json
index b550af1831f5..5704f1e83af9 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/tlb.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/tlb.json
@@ -25,11 +25,11 @@
},
{
"ArchStdEvent": "DTLB_WALK",
- "PublicDescription": "Counts data memory translation table walks caused by a miss in the L2 TLB driven by a memory access. Note that partial translations that also cause a table walk are counted. This event does not count table walks caused by TLB maintenance operations."
+ "PublicDescription": "Counts number of demand data translation table walks caused by a miss in the L2 TLB and performing at least one memory access. Translation table walks are counted even if the translation ended up taking a translation fault for reasons different than EPD, E0PD and NFD. Note that partial translations that cause a translation table walk are also counted. Also note that this event counts walks triggered by software preloads, but not walks triggered by hardware prefetchers, and that this event does not count walks triggered by TLB maintenance operations."
},
{
"ArchStdEvent": "ITLB_WALK",
- "PublicDescription": "Counts instruction memory translation table walks caused by a miss in the L2 TLB driven by a memory access. Partial translations that also cause a table walk are counted. This event does not count table walks caused by TLB maintenance operations."
+ "PublicDescription": "Counts number of instruction translation table walks caused by a miss in the L2 TLB and performing at least one memory access. Translation table walks are counted even if the translation ended up taking a translation fault for reasons different than EPD, E0PD and NFD. Note that partial translations that cause a translation table walk are also counted. Also note that this event does not count walks triggered by TLB maintenance operations."
},
{
"ArchStdEvent": "L1D_TLB_REFILL_RD",
diff --git a/tools/perf/pmu-events/arch/arm64/common-and-microarch.json b/tools/perf/pmu-events/arch/arm64/common-and-microarch.json
index 492083b99256..dddecc946575 100644
--- a/tools/perf/pmu-events/arch/arm64/common-and-microarch.json
+++ b/tools/perf/pmu-events/arch/arm64/common-and-microarch.json
@@ -534,6 +534,11 @@
"BriefDescription": "SVE operations speculatively executed"
},
{
+ "EventCode": "0x8007",
+ "EventName": "ASE_SVE_INST_SPEC",
+ "BriefDescription": "Operation speculatively executed, Advanced SIMD or SVE."
+ },
+ {
"PublicDescription": "Microarchitectural operation, Operations speculatively executed.",
"EventCode": "0x8008",
"EventName": "UOP_SPEC",
@@ -552,48 +557,393 @@
"BriefDescription": "Floating-point Operations speculatively executed."
},
{
+ "EventCode": "0x8011",
+ "EventName": "ASE_FP_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD."
+ },
+ {
+ "EventCode": "0x8012",
+ "EventName": "SVE_FP_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, SVE."
+ },
+ {
+ "EventCode": "0x8013",
+ "EventName": "ASE_SVE_FP_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD or SVE."
+ },
+ {
"PublicDescription": "Floating-point half-precision operations speculatively executed",
"EventCode": "0x8014",
"EventName": "FP_HP_SPEC",
"BriefDescription": "Floating-point half-precision operations speculatively executed"
},
{
+ "EventCode": "0x8015",
+ "EventName": "ASE_FP_HP_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD half precision."
+ },
+ {
+ "EventCode": "0x8016",
+ "EventName": "SVE_FP_HP_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, SVE half precision."
+ },
+ {
+ "EventCode": "0x8017",
+ "EventName": "ASE_SVE_FP_HP_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD or SVE half precision."
+ },
+ {
"PublicDescription": "Floating-point single-precision operations speculatively executed",
"EventCode": "0x8018",
"EventName": "FP_SP_SPEC",
"BriefDescription": "Floating-point single-precision operations speculatively executed"
},
{
+ "EventCode": "0x8019",
+ "EventName": "ASE_FP_SP_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD single precision."
+ },
+ {
+ "EventCode": "0x801A",
+ "EventName": "SVE_FP_SP_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, SVE single precision."
+ },
+ {
+ "EventCode": "0x801B",
+ "EventName": "ASE_SVE_FP_SP_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD or SVE single precision."
+ },
+ {
"PublicDescription": "Floating-point double-precision operations speculatively executed",
"EventCode": "0x801C",
"EventName": "FP_DP_SPEC",
"BriefDescription": "Floating-point double-precision operations speculatively executed"
},
{
+ "EventCode": "0x801D",
+ "EventName": "ASE_FP_DP_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD double precision."
+ },
+ {
+ "EventCode": "0x801E",
+ "EventName": "SVE_FP_DP_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, SVE double precision."
+ },
+ {
+ "EventCode": "0x801F",
+ "EventName": "ASE_SVE_FP_DP_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD or SVE double precision."
+ },
+ {
+ "EventCode": "0x8020",
+ "EventName": "FP_DIV_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, divide."
+ },
+ {
+ "EventCode": "0x8021",
+ "EventName": "ASE_FP_DIV_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD divide."
+ },
+ {
+ "EventCode": "0x8022",
+ "EventName": "SVE_FP_DIV_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, SVE divide."
+ },
+ {
+ "EventCode": "0x8023",
+ "EventName": "ASE_SVE_FP_DIV_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD or SVE divide."
+ },
+ {
+ "EventCode": "0x8024",
+ "EventName": "FP_SQRT_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, square root."
+ },
+ {
+ "EventCode": "0x8025",
+ "EventName": "ASE_FP_SQRT_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD square root."
+ },
+ {
+ "EventCode": "0x8026",
+ "EventName": "SVE_FP_SQRT_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, SVE square root."
+ },
+ {
+ "EventCode": "0x8027",
+ "EventName": "ASE_SVE_FP_SQRT_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD or SVE square-root."
+ },
+ {
"PublicDescription": "Floating-point FMA Operations speculatively executed.",
"EventCode": "0x8028",
"EventName": "FP_FMA_SPEC",
"BriefDescription": "Floating-point FMA Operations speculatively executed."
},
{
+ "EventCode": "0x8029",
+ "EventName": "ASE_FP_FMA_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD FMA."
+ },
+ {
+ "EventCode": "0x802A",
+ "EventName": "SVE_FP_FMA_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, SVE FMA."
+ },
+ {
+ "EventCode": "0x802B",
+ "EventName": "ASE_SVE_FP_FMA_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD or SVE FMA."
+ },
+ {
+ "EventCode": "0x802C",
+ "EventName": "FP_MUL_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, multiply."
+ },
+ {
+ "EventCode": "0x802D",
+ "EventName": "ASE_FP_MUL_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD multiply."
+ },
+ {
+ "EventCode": "0x802E",
+ "EventName": "SVE_FP_MUL_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, SVE multiply."
+ },
+ {
+ "EventCode": "0x802F",
+ "EventName": "ASE_SVE_FP_MUL_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD or SVE multiply."
+ },
+ {
+ "EventCode": "0x8030",
+ "EventName": "FP_ADDSUB_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, add or subtract."
+ },
+ {
+ "EventCode": "0x8031",
+ "EventName": "ASE_FP_ADDSUB_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD add or subtract."
+ },
+ {
+ "EventCode": "0x8032",
+ "EventName": "SVE_FP_ADDSUB_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, SVE add or subtract."
+ },
+ {
+ "EventCode": "0x8033",
+ "EventName": "ASE_SVE_FP_ADDSUB_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD or SVE add or subtract."
+ },
+ {
"PublicDescription": "Floating-point reciprocal estimate Operations speculatively executed.",
"EventCode": "0x8034",
"EventName": "FP_RECPE_SPEC",
"BriefDescription": "Floating-point reciprocal estimate Operations speculatively executed."
},
{
+ "EventCode": "0x8035",
+ "EventName": "ASE_FP_RECPE_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD reciprocal estimate."
+ },
+ {
+ "EventCode": "0x8036",
+ "EventName": "SVE_FP_RECPE_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, SVE reciprocal estimate."
+ },
+ {
+ "EventCode": "0x8037",
+ "EventName": "ASE_SVE_FP_RECPE_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD or SVE reciprocal estimate."
+ },
+ {
"PublicDescription": "floating-point convert Operations speculatively executed.",
"EventCode": "0x8038",
"EventName": "FP_CVT_SPEC",
"BriefDescription": "floating-point convert Operations speculatively executed."
},
{
+ "EventCode": "0x8039",
+ "EventName": "ASE_FP_CVT_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD convert."
+ },
+ {
+ "EventCode": "0x803A",
+ "EventName": "SVE_FP_CVT_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, SVE convert."
+ },
+ {
+ "EventCode": "0x803B",
+ "EventName": "ASE_SVE_FP_CVT_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD or SVE convert."
+ },
+ {
+ "EventCode": "0x803C",
+ "EventName": "SVE_FP_AREDUCE_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, SVE accumulating reduction."
+ },
+ {
+ "EventCode": "0x803D",
+ "EventName": "ASE_FP_PREDUCE_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD pairwise add step."
+ },
+ {
+ "EventCode": "0x803E",
+ "EventName": "SVE_FP_VREDUCE_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, SVE vector reduction."
+ },
+ {
+ "EventCode": "0x803F",
+ "EventName": "ASE_SVE_FP_VREDUCE_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD or SVE vector reduction."
+ },
+ {
+ "EventCode": "0x8040",
+ "EventName": "INT_SPEC",
+ "BriefDescription": "Integer Operation speculatively executed."
+ },
+ {
+ "EventCode": "0x8041",
+ "EventName": "ASE_INT_SPEC",
+ "BriefDescription": "Integer Operation speculatively executed, Advanced SIMD."
+ },
+ {
+ "EventCode": "0x8042",
+ "EventName": "SVE_INT_SPEC",
+ "BriefDescription": "Integer Operation speculatively executed, SVE."
+ },
+ {
"PublicDescription": "Advanced SIMD and SVE integer Operations speculatively executed.",
"EventCode": "0x8043",
"EventName": "ASE_SVE_INT_SPEC",
"BriefDescription": "Advanced SIMD and SVE integer Operations speculatively executed."
},
{
+ "EventCode": "0x8044",
+ "EventName": "INT_DIV_SPEC",
+ "BriefDescription": "Integer Operation speculatively executed, divide."
+ },
+ {
+ "EventCode": "0x8045",
+ "EventName": "INT_DIV64_SPEC",
+ "BriefDescription": "Integer Operation speculatively executed, 64-bit divide."
+ },
+ {
+ "EventCode": "0x8046",
+ "EventName": "SVE_INT_DIV_SPEC",
+ "BriefDescription": "Integer Operation speculatively executed, SVE divide."
+ },
+ {
+ "EventCode": "0x8047",
+ "EventName": "SVE_INT_DIV64_SPEC",
+ "BriefDescription": "Integer Operation speculatively executed, SVE 64-bit divide."
+ },
+ {
+ "EventCode": "0x8048",
+ "EventName": "INT_MUL_SPEC",
+ "BriefDescription": "Integer Operation speculatively executed, multiply."
+ },
+ {
+ "EventCode": "0x8049",
+ "EventName": "ASE_INT_MUL_SPEC",
+ "BriefDescription": "Integer Operation speculatively executed, Advanced SIMD multiply."
+ },
+ {
+ "EventCode": "0x804A",
+ "EventName": "SVE_INT_MUL_SPEC",
+ "BriefDescription": "Integer Operation speculatively executed, SVE multiply."
+ },
+ {
+ "EventCode": "0x804B",
+ "EventName": "ASE_SVE_INT_MUL_SPEC",
+ "BriefDescription": "Integer Operation speculatively executed, Advanced SIMD or SVE multiply."
+ },
+ {
+ "EventCode": "0x804C",
+ "EventName": "INT_MUL64_SPEC",
+ "BriefDescription": "Integer Operation speculatively executed, 64\u00d764 multiply."
+ },
+ {
+ "EventCode": "0x804D",
+ "EventName": "SVE_INT_MUL64_SPEC",
+ "BriefDescription": "Integer Operation speculatively executed, SVE 64\u00d764 multiply."
+ },
+ {
+ "EventCode": "0x804E",
+ "EventName": "INT_MULH64_SPEC",
+ "BriefDescription": "Integer Operation speculatively executed, 64\u00d764 multiply returning high part."
+ },
+ {
+ "EventCode": "0x804F",
+ "EventName": "SVE_INT_MULH64_SPEC",
+ "BriefDescription": "Integer Operation speculatively executed, SVE 64\u00d764 multiply high part."
+ },
+ {
+ "EventCode": "0x8058",
+ "EventName": "NONFP_SPEC",
+ "BriefDescription": "Non-floating-point Operation speculatively executed."
+ },
+ {
+ "EventCode": "0x8059",
+ "EventName": "ASE_NONFP_SPEC",
+ "BriefDescription": "Non-floating-point Operation speculatively executed, Advanced SIMD."
+ },
+ {
+ "EventCode": "0x805A",
+ "EventName": "SVE_NONFP_SPEC",
+ "BriefDescription": "Non-floating-point Operation speculatively executed, SVE."
+ },
+ {
+ "EventCode": "0x805B",
+ "EventName": "ASE_SVE_NONFP_SPEC",
+ "BriefDescription": "Non-floating-point Operation speculatively executed, Advanced SIMD or SVE."
+ },
+ {
+ "EventCode": "0x805D",
+ "EventName": "ASE_INT_VREDUCE_SPEC",
+ "BriefDescription": "Integer Operation speculatively executed, Advanced SIMD reduction."
+ },
+ {
+ "EventCode": "0x805E",
+ "EventName": "SVE_INT_VREDUCE_SPEC",
+ "BriefDescription": "Integer Operation speculatively executed, SVE reduction."
+ },
+ {
+ "EventCode": "0x805F",
+ "EventName": "ASE_SVE_INT_VREDUCE_SPEC",
+ "BriefDescription": "Integer Operation speculatively executed, Advanced SIMD or SVE reduction."
+ },
+ {
+ "EventCode": "0x8060",
+ "EventName": "SVE_PERM_SPEC",
+ "BriefDescription": "Operation speculatively executed, SVE permute."
+ },
+ {
+ "EventCode": "0x8065",
+ "EventName": "SVE_XPIPE_Z2R_SPEC",
+ "BriefDescription": "Operation speculatively executed, SVE vector to scalar cross-pipe."
+ },
+ {
+ "EventCode": "0x8066",
+ "EventName": "SVE_XPIPE_R2Z_SPEC",
+ "BriefDescription": "Operation speculatively executed, SVE scalar to vector cross-pipe."
+ },
+ {
+ "EventCode": "0x8068",
+ "EventName": "SVE_PGEN_SPEC",
+ "BriefDescription": "Operation speculatively executed, SVE predicate generating."
+ },
+ {
+ "EventCode": "0x8069",
+ "EventName": "SVE_PGEN_FLG_SPEC",
+ "BriefDescription": "Operation speculatively executed, SVE predicate flag setting."
+ },
+ {
+ "EventCode": "0x806D",
+ "EventName": "SVE_PPERM_SPEC",
+ "BriefDescription": "Operation speculatively executed, SVE predicate permute."
+ },
+ {
"PublicDescription": "SVE predicated Operations speculatively executed.",
"EventCode": "0x8074",
"EventName": "SVE_PRED_SPEC",
@@ -630,6 +980,16 @@
"BriefDescription": "SVE MOVPRFX Operations speculatively executed."
},
{
+ "EventCode": "0x807D",
+ "EventName": "SVE_MOVPRFX_Z_SPEC",
+ "BriefDescription": "Operation speculatively executed, SVE MOVPRFX zeroing predication."
+ },
+ {
+ "EventCode": "0x807E",
+ "EventName": "SVE_MOVPRFX_M_SPEC",
+ "BriefDescription": "Operation speculatively executed, SVE MOVPRFX merging predication."
+ },
+ {
"PublicDescription": "SVE MOVPRFX unfused Operations speculatively executed.",
"EventCode": "0x807F",
"EventName": "SVE_MOVPRFX_U_SPEC",
@@ -696,6 +1056,16 @@
"BriefDescription": "SVE contiguous prefetch element Operations speculatively executed."
},
{
+ "EventCode": "0x80A1",
+ "EventName": "SVE_LDNT_CONTIG_SPEC",
+ "BriefDescription": "Operation speculatively executed, SVE non-temporal contiguous load element."
+ },
+ {
+ "EventCode": "0x80A2",
+ "EventName": "SVE_STNT_CONTIG_SPEC",
+ "BriefDescription": "Operation speculatively executed, SVE non-temporal contiguous store element."
+ },
+ {
"PublicDescription": "Advanced SIMD and SVE contiguous load multiple vector Operations speculatively executed.",
"EventCode": "0x80A5",
"EventName": "ASE_SVE_LD_MULTI_SPEC",
@@ -786,6 +1156,16 @@
"BriefDescription": "Non-scalable double-precision floating-point element Operations speculatively executed."
},
{
+ "EventCode": "0x80C8",
+ "EventName": "INT_SCALE_OPS_SPEC",
+ "BriefDescription": "Scalable integer element arithmetic operations Speculatively executed."
+ },
+ {
+ "EventCode": "0x80C9",
+ "EventName": "INT_FIXED_OPS_SPEC",
+ "BriefDescription": "Non-scalable integer element arithmetic operations Speculatively executed."
+ },
+ {
"PublicDescription": "Advanced SIMD and SVE 8-bit integer operations speculatively executed",
"EventCode": "0x80E3",
"EventName": "ASE_SVE_INT8_SPEC",
@@ -808,5 +1188,340 @@
"EventCode": "0x80EF",
"EventName": "ASE_SVE_INT64_SPEC",
"BriefDescription": "Advanced SIMD and SVE 64-bit integer operations speculatively executed"
+ },
+ {
+ "EventCode": "0x80F3",
+ "EventName": "ASE_SVE_FP_DOT_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD or SVE dot-product."
+ },
+ {
+ "EventCode": "0x80F7",
+ "EventName": "ASE_SVE_FP_MMLA_SPEC",
+ "BriefDescription": "Floating-point Operation speculatively executed, Advanced SIMD or SVE matrix multiply."
+ },
+ {
+ "EventCode": "0x80FB",
+ "EventName": "ASE_SVE_INT_DOT_SPEC",
+ "BriefDescription": "Integer Operation speculatively executed, Advanced SIMD or SVE dot-product."
+ },
+ {
+ "EventCode": "0x80FF",
+ "EventName": "ASE_SVE_INT_MMLA_SPEC",
+ "BriefDescription": "Integer Operation speculatively executed, Advanced SIMD or SVE matrix multiply."
+ },
+ {
+ "EventCode": "0x8128",
+ "EventName": "DTLB_WALK_PERCYC",
+ "BriefDescription": "Data translation table walks in progress."
+ },
+ {
+ "EventCode": "0x8129",
+ "EventName": "ITLB_WALK_PERCYC",
+ "BriefDescription": "Instruction translation table walks in progress."
+ },
+ {
+ "EventCode": "0x8136",
+ "EventName": "DTLB_STEP",
+ "BriefDescription": "Data TLB translation table walk, step."
+ },
+ {
+ "EventCode": "0x8137",
+ "EventName": "ITLB_STEP",
+ "BriefDescription": "Instruction TLB translation table walk, step."
+ },
+ {
+ "EventCode": "0x8138",
+ "EventName": "DTLB_WALK_LARGE",
+ "BriefDescription": "Data TLB large page translation table walk."
+ },
+ {
+ "EventCode": "0x8139",
+ "EventName": "ITLB_WALK_LARGE",
+ "BriefDescription": "Instruction TLB large page translation table walk."
+ },
+ {
+ "EventCode": "0x813A",
+ "EventName": "DTLB_WALK_SMALL",
+ "BriefDescription": "Data TLB small page translation table walk."
+ },
+ {
+ "EventCode": "0x813B",
+ "EventName": "ITLB_WALK_SMALL",
+ "BriefDescription": "Instruction TLB small page translation table walk."
+ },
+ {
+ "EventCode": "0x8144",
+ "EventName": "L1D_CACHE_MISS",
+ "BriefDescription": "Level 1 data cache demand access miss."
+ },
+ {
+ "EventCode": "0x8145",
+ "EventName": "L1I_CACHE_HWPRF",
+ "BriefDescription": "Level 1 instruction cache hardware prefetch."
+ },
+ {
+ "EventCode": "0x814C",
+ "EventName": "L2D_CACHE_MISS",
+ "BriefDescription": "Level 2 data cache demand access miss."
+ },
+ {
+ "EventCode": "0x8154",
+ "EventName": "L1D_CACHE_HWPRF",
+ "BriefDescription": "Level 1 data cache hardware prefetch."
+ },
+ {
+ "EventCode": "0x8155",
+ "EventName": "L2D_CACHE_HWPRF",
+ "BriefDescription": "Level 2 data cache hardware prefetch."
+ },
+ {
+ "EventCode": "0x8158",
+ "EventName": "STALL_FRONTEND_MEMBOUND",
+ "BriefDescription": "Frontend stall cycles, memory bound."
+ },
+ {
+ "EventCode": "0x8159",
+ "EventName": "STALL_FRONTEND_L1I",
+ "BriefDescription": "Frontend stall cycles, level 1 instruction cache."
+ },
+ {
+ "EventCode": "0x815A",
+ "EventName": "STALL_FRONTEND_L2I",
+ "BriefDescription": "Frontend stall cycles, level 2 instruction cache."
+ },
+ {
+ "EventCode": "0x815B",
+ "EventName": "STALL_FRONTEND_MEM",
+ "BriefDescription": "Frontend stall cycles, last level PE cache or memory."
+ },
+ {
+ "EventCode": "0x815C",
+ "EventName": "STALL_FRONTEND_TLB",
+ "BriefDescription": "Frontend stall cycles, TLB."
+ },
+ {
+ "EventCode": "0x8160",
+ "EventName": "STALL_FRONTEND_CPUBOUND",
+ "BriefDescription": "Frontend stall cycles, processor bound."
+ },
+ {
+ "EventCode": "0x8161",
+ "EventName": "STALL_FRONTEND_FLOW",
+ "BriefDescription": "Frontend stall cycles, flow control."
+ },
+ {
+ "EventCode": "0x8162",
+ "EventName": "STALL_FRONTEND_FLUSH",
+ "BriefDescription": "Frontend stall cycles, flush recovery."
+ },
+ {
+ "EventCode": "0x8163",
+ "EventName": "STALL_FRONTEND_RENAME",
+ "BriefDescription": "Frontend stall cycles, rename full."
+ },
+ {
+ "EventCode": "0x8164",
+ "EventName": "STALL_BACKEND_MEMBOUND",
+ "BriefDescription": "Backend stall cycles, memory bound."
+ },
+ {
+ "EventCode": "0x8165",
+ "EventName": "STALL_BACKEND_L1D",
+ "BriefDescription": "Backend stall cycles, level 1 data cache."
+ },
+ {
+ "EventCode": "0x8166",
+ "EventName": "STALL_BACKEND_L2D",
+ "BriefDescription": "Backend stall cycles, level 2 data cache."
+ },
+ {
+ "EventCode": "0x8167",
+ "EventName": "STALL_BACKEND_TLB",
+ "BriefDescription": "Backend stall cycles, TLB."
+ },
+ {
+ "EventCode": "0x8168",
+ "EventName": "STALL_BACKEND_ST",
+ "BriefDescription": "Backend stall cycles, store."
+ },
+ {
+ "EventCode": "0x816A",
+ "EventName": "STALL_BACKEND_CPUBOUND",
+ "BriefDescription": "Backend stall cycles, processor bound."
+ },
+ {
+ "EventCode": "0x816B",
+ "EventName": "STALL_BACKEND_BUSY",
+ "BriefDescription": "Backend stall cycles, backend busy."
+ },
+ {
+ "EventCode": "0x816C",
+ "EventName": "STALL_BACKEND_ILOCK",
+ "BriefDescription": "Backend stall cycles, input dependency."
+ },
+ {
+ "EventCode": "0x816D",
+ "EventName": "STALL_BACKEND_RENAME",
+ "BriefDescription": "Backend stall cycles, rename full."
+ },
+ {
+ "EventCode": "0x816E",
+ "EventName": "STALL_BACKEND_ATOMIC",
+ "BriefDescription": "Backend stall cycles, atomic operation."
+ },
+ {
+ "EventCode": "0x816F",
+ "EventName": "STALL_BACKEND_MEMCPYSET",
+ "BriefDescription": "Backend stall cycles, Memory Copy or Set operation."
+ },
+ {
+ "EventCode": "0x8186",
+ "EventName": "UOP_RETIRED",
+ "BriefDescription": "Micro-operation architecturally executed."
+ },
+ {
+ "EventCode": "0x8188",
+ "EventName": "DTLB_WALK_BLOCK",
+ "BriefDescription": "Data TLB block translation table walk."
+ },
+ {
+ "EventCode": "0x8189",
+ "EventName": "ITLB_WALK_BLOCK",
+ "BriefDescription": "Instruction TLB block translation table walk."
+ },
+ {
+ "EventCode": "0x818A",
+ "EventName": "DTLB_WALK_PAGE",
+ "BriefDescription": "Data TLB page translation table walk."
+ },
+ {
+ "EventCode": "0x818B",
+ "EventName": "ITLB_WALK_PAGE",
+ "BriefDescription": "Instruction TLB page translation table walk."
+ },
+ {
+ "EventCode": "0x81B8",
+ "EventName": "L1I_CACHE_REFILL_HWPRF",
+ "BriefDescription": "Level 1 instruction cache refill, hardware prefetch."
+ },
+ {
+ "EventCode": "0x81BC",
+ "EventName": "L1D_CACHE_REFILL_HWPRF",
+ "BriefDescription": "Level 1 data cache refill, hardware prefetch."
+ },
+ {
+ "EventCode": "0x81BD",
+ "EventName": "L2D_CACHE_REFILL_HWPRF",
+ "BriefDescription": "Level 2 data cache refill, hardware prefetch."
+ },
+ {
+ "EventCode": "0x81C0",
+ "EventName": "L1I_CACHE_HIT_RD",
+ "BriefDescription": "Level 1 instruction cache demand fetch hit."
+ },
+ {
+ "EventCode": "0x81C4",
+ "EventName": "L1D_CACHE_HIT_RD",
+ "BriefDescription": "Level 1 data cache demand access hit, read."
+ },
+ {
+ "EventCode": "0x81C5",
+ "EventName": "L2D_CACHE_HIT_RD",
+ "BriefDescription": "Level 2 data cache demand access hit, read."
+ },
+ {
+ "EventCode": "0x81C8",
+ "EventName": "L1D_CACHE_HIT_WR",
+ "BriefDescription": "Level 1 data cache demand access hit, write."
+ },
+ {
+ "EventCode": "0x81C9",
+ "EventName": "L2D_CACHE_HIT_WR",
+ "BriefDescription": "Level 2 data cache demand access hit, write."
+ },
+ {
+ "EventCode": "0x8200",
+ "EventName": "L1I_CACHE_HIT",
+ "BriefDescription": "Level 1 instruction cache hit."
+ },
+ {
+ "EventCode": "0x8204",
+ "EventName": "L1D_CACHE_HIT",
+ "BriefDescription": "Level 1 data cache hit."
+ },
+ {
+ "EventCode": "0x8205",
+ "EventName": "L2D_CACHE_HIT",
+ "BriefDescription": "Level 2 data cache hit."
+ },
+ {
+ "EventCode": "0x8240",
+ "EventName": "L1I_LFB_HIT_RD",
+ "BriefDescription": "Level 1 instruction cache demand fetch line-fill buffer hit."
+ },
+ {
+ "EventCode": "0x8244",
+ "EventName": "L1D_LFB_HIT_RD",
+ "BriefDescription": "Level 1 data cache demand access line-fill buffer hit, read."
+ },
+ {
+ "EventCode": "0x8245",
+ "EventName": "L2D_LFB_HIT_RD",
+ "BriefDescription": "Level 2 data cache demand access line-fill buffer hit, read."
+ },
+ {
+ "EventCode": "0x8248",
+ "EventName": "L1D_LFB_HIT_WR",
+ "BriefDescription": "Level 1 data cache demand access line-fill buffer hit, write."
+ },
+ {
+ "EventCode": "0x8249",
+ "EventName": "L2D_LFB_HIT_WR",
+ "BriefDescription": "Level 2 data cache demand access line-fill buffer hit, write."
+ },
+ {
+ "EventCode": "0x8280",
+ "EventName": "L1I_CACHE_PRF",
+ "BriefDescription": "Level 1 instruction cache, preload or prefetch hit."
+ },
+ {
+ "EventCode": "0x8284",
+ "EventName": "L1D_CACHE_PRF",
+ "BriefDescription": "Level 1 data cache, preload or prefetch hit."
+ },
+ {
+ "EventCode": "0x8285",
+ "EventName": "L2D_CACHE_PRF",
+ "BriefDescription": "Level 2 data cache, preload or prefetch hit."
+ },
+ {
+ "EventCode": "0x8288",
+ "EventName": "L1I_CACHE_REFILL_PRF",
+ "BriefDescription": "Level 1 instruction cache refill, preload or prefetch hit."
+ },
+ {
+ "EventCode": "0x828C",
+ "EventName": "L1D_CACHE_REFILL_PRF",
+ "BriefDescription": "Level 1 data cache refill, preload or prefetch hit."
+ },
+ {
+ "EventCode": "0x828D",
+ "EventName": "L2D_CACHE_REFILL_PRF",
+ "BriefDescription": "Level 2 data cache refill, preload or prefetch hit."
+ },
+ {
+ "EventCode": "0x8320",
+ "EventName": "L1D_CACHE_REFILL_PERCYC",
+ "BriefDescription": "Level 1 data or unified cache refills in progress."
+ },
+ {
+ "EventCode": "0x8321",
+ "EventName": "L2D_CACHE_REFILL_PERCYC",
+ "BriefDescription": "Level 2 data or unified cache refills in progress."
+ },
+ {
+ "EventCode": "0x8324",
+ "EventName": "L1I_CACHE_REFILL_PERCYC",
+ "BriefDescription": "Level 1 instruction or unified cache refills in progress."
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/core-imp-def.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/core-imp-def.json
new file mode 100644
index 000000000000..52f5ca1482fe
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/core-imp-def.json
@@ -0,0 +1,6 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_PRF",
+ "BriefDescription": "This event counts fetch counted by either Level 1 instruction hardware prefetch or Level 1 instruction software prefetch."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/cycle_accounting.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/cycle_accounting.json
new file mode 100644
index 000000000000..24ff5d8dbb98
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/cycle_accounting.json
@@ -0,0 +1,122 @@
+[
+ {
+ "EventCode": "0x0182",
+ "EventName": "LD_COMP_WAIT_L1_MISS",
+ "BriefDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted load/store/prefetch operation waits for L2 cache access."
+ },
+ {
+ "EventCode": "0x0183",
+ "EventName": "LD_COMP_WAIT_L1_MISS_EX",
+ "BriefDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted integer load operation waits for L2 cache access."
+ },
+ {
+ "EventCode": "0x0184",
+ "EventName": "LD_COMP_WAIT",
+ "BriefDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted load/store/prefetch operation waits for L1D cache, L2 cache and memory access."
+ },
+ {
+ "EventCode": "0x0185",
+ "EventName": "LD_COMP_WAIT_EX",
+ "BriefDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted integer load operation waits for L1D cache, L2 cache and memory access."
+ },
+ {
+ "EventCode": "0x0186",
+ "EventName": "LD_COMP_WAIT_PFP_BUSY",
+ "BriefDescription": "This event counts every cycle that no instruction was committed due to the lack of an available prefetch port."
+ },
+ {
+ "EventCode": "0x0187",
+ "EventName": "LD_COMP_WAIT_PFP_BUSY_EX",
+ "BriefDescription": "This event counts the LD_COMP_WAIT_PFP_BUSY caused by an integer load operation."
+ },
+ {
+ "EventCode": "0x0188",
+ "EventName": "LD_COMP_WAIT_PFP_BUSY_SWPF",
+ "BriefDescription": "This event counts the LD_COMP_WAIT_PFP_BUSY caused by a software prefetch instruction."
+ },
+ {
+ "EventCode": "0x0189",
+ "EventName": "EU_COMP_WAIT",
+ "BriefDescription": "This event counts every cycle that no instruction was committed and the oldest and uncommitted instruction is an integer or floating-point/SIMD instruction."
+ },
+ {
+ "EventCode": "0x018A",
+ "EventName": "FL_COMP_WAIT",
+ "BriefDescription": "This event counts every cycle that no instruction was committed and the oldest and uncommitted instruction is a floating-point/SIMD instruction."
+ },
+ {
+ "EventCode": "0x018B",
+ "EventName": "BR_COMP_WAIT",
+ "BriefDescription": "This event counts every cycle that no instruction was committed and the oldest and uncommitted instruction is a branch instruction."
+ },
+ {
+ "EventCode": "0x018C",
+ "EventName": "ROB_EMPTY",
+ "BriefDescription": "This event counts every cycle that no instruction was committed because the CSE is empty."
+ },
+ {
+ "EventCode": "0x018D",
+ "EventName": "ROB_EMPTY_STQ_BUSY",
+ "BriefDescription": "This event counts every cycle that no instruction was committed because the CSE is empty and the store port (SP) is full."
+ },
+ {
+ "EventCode": "0x018E",
+ "EventName": "WFE_WFI_CYCLE",
+ "BriefDescription": "This event counts every cycle that the instruction unit is halted by the WFE/WFI instruction."
+ },
+ {
+ "EventCode": "0x018F",
+ "EventName": "RETENTION_CYCLE",
+ "BriefDescription": "This event counts every cycle that the instruction unit is halted by the RETENTION state."
+ },
+ {
+ "EventCode": "0x0190",
+ "EventName": "_0INST_COMMIT",
+ "BriefDescription": "This event counts every cycle that no instruction was committed, but counts at the time when commits MOVPRFX only."
+ },
+ {
+ "EventCode": "0x0191",
+ "EventName": "_1INST_COMMIT",
+ "BriefDescription": "This event counts every cycle that one instruction is committed."
+ },
+ {
+ "EventCode": "0x0192",
+ "EventName": "_2INST_COMMIT",
+ "BriefDescription": "This event counts every cycle that two instructions are committed."
+ },
+ {
+ "EventCode": "0x0193",
+ "EventName": "_3INST_COMMIT",
+ "BriefDescription": "This event counts every cycle that three instructions are committed."
+ },
+ {
+ "EventCode": "0x0194",
+ "EventName": "_4INST_COMMIT",
+ "BriefDescription": "This event counts every cycle that four instructions are committed."
+ },
+ {
+ "EventCode": "0x0195",
+ "EventName": "_5INST_COMMIT",
+ "BriefDescription": "This event counts every cycle that five instructions are committed."
+ },
+ {
+ "EventCode": "0x0198",
+ "EventName": "UOP_ONLY_COMMIT",
+ "BriefDescription": "This event counts every cycle that only any micro-operations are committed."
+ },
+ {
+ "EventCode": "0x0199",
+ "EventName": "SINGLE_MOVPRFX_COMMIT",
+ "BriefDescription": "This event counts every cycle that only the MOVPRFX instruction is committed."
+ },
+ {
+ "EventCode": "0x019C",
+ "EventName": "LD_COMP_WAIT_L2_MISS",
+ "BriefDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted load/store/prefetch operation waits for L2 cache miss."
+ },
+ {
+ "EventCode": "0x019D",
+ "EventName": "LD_COMP_WAIT_L2_MISS_EX",
+ "BriefDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted integer load operation waits for L2 cache miss."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/energy.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/energy.json
new file mode 100644
index 000000000000..b55173f71e42
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/energy.json
@@ -0,0 +1,17 @@
+[
+ {
+ "EventCode": "0x01F0",
+ "EventName": "EA_CORE",
+ "BriefDescription": "This event counts energy consumption of core."
+ },
+ {
+ "EventCode": "0x03F0",
+ "EventName": "EA_L3",
+ "BriefDescription": "This event counts energy consumption of L3 cache."
+ },
+ {
+ "EventCode": "0x03F1",
+ "EventName": "EA_LDO_LOSS",
+ "BriefDescription": "This event counts energy consumption of LDO loss."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/exception.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/exception.json
new file mode 100644
index 000000000000..f231712fe261
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/exception.json
@@ -0,0 +1,42 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN",
+ "BriefDescription": "This event counts each exception taken."
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN",
+ "BriefDescription": "This event counts each executed exception return instruction."
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF",
+ "BriefDescription": "This event counts only other synchronous exceptions that are taken locally."
+ },
+ {
+ "ArchStdEvent": "EXC_SVC",
+ "BriefDescription": "This event counts only Supervisor Call exceptions that are taken locally."
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT",
+ "BriefDescription": "This event counts only Instruction Abort exceptions that are taken locally."
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT",
+ "BriefDescription": "This event counts only Data Abort or SError interrupt exceptions that are taken locally."
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ",
+ "BriefDescription": "This event counts only IRQ exceptions that are taken locally, including Virtual IRQ exceptions."
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ",
+ "BriefDescription": "This event counts only FIQ exceptions that are taken locally, including Virtual FIQ exceptions."
+ },
+ {
+ "ArchStdEvent": "EXC_SMC",
+ "BriefDescription": "This event counts only Secure Monitor Call exceptions. The counter does not increment on SMC instructions trapped as a Hyp Trap exception."
+ },
+ {
+ "ArchStdEvent": "EXC_HVC",
+ "BriefDescription": "This event counts for both Hypervisor Call exceptions taken locally in the hypervisor and those taken as an exception from Non-secure EL1."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/fp_operation.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/fp_operation.json
new file mode 100644
index 000000000000..a3c368959199
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/fp_operation.json
@@ -0,0 +1,209 @@
+[
+ {
+ "EventCode": "0x0105",
+ "EventName": "FP_MV_SPEC",
+ "BriefDescription": "This event counts architecturally executed floating-point move operations."
+ },
+ {
+ "EventCode": "0x0112",
+ "EventName": "FP_LD_SPEC",
+ "BriefDescription": "This event counts architecturally executed NOSIMD load operations that using SIMD&FP registers."
+ },
+ {
+ "EventCode": "0x0113",
+ "EventName": "FP_ST_SPEC",
+ "BriefDescription": "This event counts architecturally executed NOSIMD store operations that using SIMD&FP registers."
+ },
+ {
+ "ArchStdEvent": "ASE_FP_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD floating-point operation."
+ },
+ {
+ "ArchStdEvent": "SVE_FP_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE floating-point operation."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_FP_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE floating-point operations."
+ },
+ {
+ "ArchStdEvent": "FP_HP_SPEC",
+ "BriefDescription": "This event counts architecturally executed half-precision floating-point operation."
+ },
+ {
+ "ArchStdEvent": "ASE_FP_HP_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD half-precision floating-point operation."
+ },
+ {
+ "ArchStdEvent": "SVE_FP_HP_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE half-precision floating-point operation."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_FP_HP_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE half-precision floating-point operations."
+ },
+ {
+ "ArchStdEvent": "FP_SP_SPEC",
+ "BriefDescription": "This event counts architecturally executed single-precision floating-point operation."
+ },
+ {
+ "ArchStdEvent": "ASE_FP_SP_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD single-precision floating-point operation."
+ },
+ {
+ "ArchStdEvent": "SVE_FP_SP_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE single-precision floating-point operation."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_FP_SP_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE single-precision floating-point operations."
+ },
+ {
+ "ArchStdEvent": "FP_DP_SPEC",
+ "BriefDescription": "This event counts architecturally executed double-precision floating-point operation."
+ },
+ {
+ "ArchStdEvent": "ASE_FP_DP_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD double-precision floating-point operation."
+ },
+ {
+ "ArchStdEvent": "SVE_FP_DP_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE double-precision floating-point operation."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_FP_DP_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE double-precision floating-point operations."
+ },
+ {
+ "ArchStdEvent": "FP_DIV_SPEC",
+ "BriefDescription": "This event counts architecturally executed floating-point divide operation."
+ },
+ {
+ "ArchStdEvent": "ASE_FP_DIV_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD floating-point divide operation."
+ },
+ {
+ "ArchStdEvent": "SVE_FP_DIV_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE floating-point divide operation."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_FP_DIV_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE floating-point divide operations."
+ },
+ {
+ "ArchStdEvent": "FP_SQRT_SPEC",
+ "BriefDescription": "This event counts architecturally executed floating-point square root operation."
+ },
+ {
+ "ArchStdEvent": "ASE_FP_SQRT_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD floating-point square root operation."
+ },
+ {
+ "ArchStdEvent": "SVE_FP_SQRT_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE floating-point square root operation."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_FP_SQRT_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE floating-point square root operations."
+ },
+ {
+ "ArchStdEvent": "ASE_FP_FMA_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD floating-point FMA operation."
+ },
+ {
+ "ArchStdEvent": "SVE_FP_FMA_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE floating-point FMA operation."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_FP_FMA_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE floating-point FMA operations."
+ },
+ {
+ "ArchStdEvent": "FP_MUL_SPEC",
+ "BriefDescription": "This event counts architecturally executed floating-point multiply operations."
+ },
+ {
+ "ArchStdEvent": "ASE_FP_MUL_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD floating-point multiply operation."
+ },
+ {
+ "ArchStdEvent": "SVE_FP_MUL_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE floating-point multiply operation."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_FP_MUL_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE floating-point multiply operations."
+ },
+ {
+ "ArchStdEvent": "FP_ADDSUB_SPEC",
+ "BriefDescription": "This event counts architecturally executed floating-point add or subtract operations."
+ },
+ {
+ "ArchStdEvent": "ASE_FP_ADDSUB_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD floating-point add or subtract operation."
+ },
+ {
+ "ArchStdEvent": "SVE_FP_ADDSUB_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE floating-point add or subtract operation."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_FP_ADDSUB_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE floating-point add or subtract operations."
+ },
+ {
+ "ArchStdEvent": "ASE_FP_RECPE_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD floating-point reciprocal estimate operations."
+ },
+ {
+ "ArchStdEvent": "SVE_FP_RECPE_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE floating-point reciprocal estimate operations."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_FP_RECPE_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE floating-point reciprocal estimate operations."
+ },
+ {
+ "ArchStdEvent": "ASE_FP_CVT_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD floating-point convert operation."
+ },
+ {
+ "ArchStdEvent": "SVE_FP_CVT_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE floating-point convert operation."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_FP_CVT_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE floating-point convert operations."
+ },
+ {
+ "ArchStdEvent": "SVE_FP_AREDUCE_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE floating-point accumulating reduction operations."
+ },
+ {
+ "ArchStdEvent": "ASE_FP_PREDUCE_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD floating-point pairwise add step operations."
+ },
+ {
+ "ArchStdEvent": "SVE_FP_VREDUCE_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE floating-point vector reduction operation."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_FP_VREDUCE_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE floating-point vector reduction operations."
+ },
+ {
+ "ArchStdEvent": "FP_SCALE_OPS_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE arithmetic operations. See FP_SCALE_OPS_SPEC of ARMv9 Reference Manual for more information. This event counter is incremented by (128 / CSIZE) and by twice that amount for operations that would also be counted by SVE_FP_FMA_SPEC."
+ },
+ {
+ "ArchStdEvent": "FP_FIXED_OPS_SPEC",
+ "BriefDescription": "This event counts architecturally executed v8SIMD&FP arithmetic operations. See FP_FIXED_OPS_SPEC of ARMv9 Reference Manual for more information. The event counter is incremented by the specified number of elements for Advanced SIMD operations or by 1 for scalar operations, and by twice those amounts for operations that would also be counted by FP_FMA_SPEC."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_FP_DOT_SPEC",
+ "BriefDescription": "This event counts architecturally executed microarchitectural Advanced SIMD or SVE floating-point dot-product operation."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_FP_MMLA_SPEC",
+ "BriefDescription": "This event counts architecturally executed microarchitectural Advanced SIMD or SVE floating-point matrix multiply operation."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/gcycle.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/gcycle.json
new file mode 100644
index 000000000000..b4ceddc0d25e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/gcycle.json
@@ -0,0 +1,97 @@
+[
+ {
+ "EventCode": "0x0880",
+ "EventName": "GCYCLES",
+ "BriefDescription": "This event counts the number of cycles at 100MHz."
+ },
+ {
+ "EventCode": "0x0890",
+ "EventName": "FL0_GCYCLES",
+ "BriefDescription": "This event counts the number of cycles where the measured core is staying in the Frequency Level 0."
+ },
+ {
+ "EventCode": "0x0891",
+ "EventName": "FL1_GCYCLES",
+ "BriefDescription": "This event counts the number of cycles where the measured core is staying in the Frequency Level 1."
+ },
+ {
+ "EventCode": "0x0892",
+ "EventName": "FL2_GCYCLES",
+ "BriefDescription": "This event counts the number of cycles where the measured core is staying in the Frequency Level 2."
+ },
+ {
+ "EventCode": "0x0893",
+ "EventName": "FL3_GCYCLES",
+ "BriefDescription": "This event counts the number of cycles where the measured core is staying in the Frequency Level 3."
+ },
+ {
+ "EventCode": "0x0894",
+ "EventName": "FL4_GCYCLES",
+ "BriefDescription": "This event counts the number of cycles where the measured core is staying in the Frequency Level 4."
+ },
+ {
+ "EventCode": "0x0895",
+ "EventName": "FL5_GCYCLES",
+ "BriefDescription": "This event counts the number of cycles where the measured core is staying in the Frequency Level 5."
+ },
+ {
+ "EventCode": "0x0896",
+ "EventName": "FL6_GCYCLES",
+ "BriefDescription": "This event counts the number of cycles where the measured core is staying in the Frequency Level 6."
+ },
+ {
+ "EventCode": "0x0897",
+ "EventName": "FL7_GCYCLES",
+ "BriefDescription": "This event counts the number of cycles where the measured core is staying in the Frequency Level 7."
+ },
+ {
+ "EventCode": "0x0898",
+ "EventName": "FL8_GCYCLES",
+ "BriefDescription": "This event counts the number of cycles where the measured core is staying in the Frequency Level 8."
+ },
+ {
+ "EventCode": "0x0899",
+ "EventName": "FL9_GCYCLES",
+ "BriefDescription": "This event counts the number of cycles where the measured core is staying in the Frequency Level 9."
+ },
+ {
+ "EventCode": "0x089A",
+ "EventName": "FL10_GCYCLES",
+ "BriefDescription": "This event counts the number of cycles where the measured core is staying in the Frequency Level 10."
+ },
+ {
+ "EventCode": "0x089B",
+ "EventName": "FL11_GCYCLES",
+ "BriefDescription": "This event counts the number of cycles where the measured core is staying in the Frequency Level 11."
+ },
+ {
+ "EventCode": "0x089C",
+ "EventName": "FL12_GCYCLES",
+ "BriefDescription": "This event counts the number of cycles where the measured core is staying in the Frequency Level 12."
+ },
+ {
+ "EventCode": "0x089D",
+ "EventName": "FL13_GCYCLES",
+ "BriefDescription": "This event counts the number of cycles where the measured core is staying in the Frequency Level 13."
+ },
+ {
+ "EventCode": "0x089E",
+ "EventName": "FL14_GCYCLES",
+ "BriefDescription": "This event counts the number of cycles where the measured core is staying in the Frequency Level 14."
+ },
+ {
+ "EventCode": "0x089F",
+ "EventName": "FL15_GCYCLES",
+ "BriefDescription": "This event counts the number of cycles where the measured core is staying in the Frequency Level 15."
+ },
+ {
+ "EventCode": "0x08A0",
+ "EventName": "RETENTION_GCYCLES",
+ "BriefDescription": "This event counts the number of cycles where the measured core is staying in the RETENTION state."
+ },
+ {
+ "EventCode": "0x08A1",
+ "EventName": "RETENTION_COUNT",
+ "BriefDescription": "This event counts the number of changes from the normal state to the RETENTION state."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/general.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/general.json
new file mode 100644
index 000000000000..32f0fbfc4de4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/general.json
@@ -0,0 +1,10 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES",
+ "BriefDescription": "This event counts every cycle."
+ },
+ {
+ "ArchStdEvent": "CNT_CYCLES",
+ "BriefDescription": "This event counts the constant frequency cycles counter increments at a constant frequency equal to the rate of increment of the System counter."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/hwpf.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/hwpf.json
new file mode 100644
index 000000000000..a784a032f353
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/hwpf.json
@@ -0,0 +1,52 @@
+[
+ {
+ "EventCode": "0x0230",
+ "EventName": "L1HWPF_STREAM_PF",
+ "BriefDescription": "This event counts streaming prefetch requests to L1D cache generated by hardware prefetcher."
+ },
+ {
+ "EventCode": "0x0231",
+ "EventName": "L1HWPF_STRIDE_PF",
+ "BriefDescription": "This event counts stride prefetch requests to L1D cache generated by hardware prefetcher."
+ },
+ {
+ "EventCode": "0x0232",
+ "EventName": "L1HWPF_PFTGT_PF",
+ "BriefDescription": "This event counts LDS prefetch requests to L1D cache generated by hardware prefetcher."
+ },
+ {
+ "EventCode": "0x0234",
+ "EventName": "L2HWPF_STREAM_PF",
+ "BriefDescription": "This event counts streaming prefetch requests to L2 cache generated by hardware prefetcher."
+ },
+ {
+ "EventCode": "0x0235",
+ "EventName": "L2HWPF_STRIDE_PF",
+ "BriefDescription": "This event counts stride prefetch requests to L2 cache generated by hardware prefetcher."
+ },
+ {
+ "EventCode": "0x0237",
+ "EventName": "L2HWPF_OTHER",
+ "BriefDescription": "This event counts prefetch requests to L2 cache generated by the other causes."
+ },
+ {
+ "EventCode": "0x0238",
+ "EventName": "L3HWPF_STREAM_PF",
+ "BriefDescription": "This event counts streaming prefetch requests to L3 cache generated by hardware prefetcher."
+ },
+ {
+ "EventCode": "0x0239",
+ "EventName": "L3HWPF_STRIDE_PF",
+ "BriefDescription": "This event counts stride prefetch requests to L3 cache generated by hardware prefetcher."
+ },
+ {
+ "EventCode": "0x023B",
+ "EventName": "L3HWPF_OTHER",
+ "BriefDescription": "This event counts prefetch requests to L3 cache generated by the other causes."
+ },
+ {
+ "EventCode": "0x023C",
+ "EventName": "L1IHWPF_NEXTLINE_PF",
+ "BriefDescription": "This event counts next line's prefetch requests to L1I cache generated by hardware prefetcher."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l1d_cache.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l1d_cache.json
new file mode 100644
index 000000000000..b0818a2fedb0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l1d_cache.json
@@ -0,0 +1,113 @@
+[
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL",
+ "BriefDescription": "This event counts operations that cause a refill of the L1D cache. See L1D_CACHE_REFILL of ARMv9 Reference Manual for more information."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE",
+ "BriefDescription": "This event counts operations that cause a cache access to the L1D cache. See L1D_CACHE of ARMv9 Reference Manual for more information."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB",
+ "BriefDescription": "This event counts every write-back of data from the L1D cache. See L1D_CACHE_WB of ARMv9 Reference Manual for more information."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_LMISS_RD",
+ "BriefDescription": "This event counts operations that cause a refill of the L1D cache that incurs additional latency."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD",
+ "BriefDescription": "This event counts L1D CACHE caused by read access."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR",
+ "BriefDescription": "This event counts L1D CACHE caused by write access."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD",
+ "BriefDescription": "This event counts L1D_CACHE_REFILL caused by read access."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR",
+ "BriefDescription": "This event counts L1D_CACHE_REFILL caused by write access."
+ },
+ {
+ "EventCode": "0x0200",
+ "EventName": "L1D_CACHE_DM",
+ "BriefDescription": "This event counts L1D_CACHE caused by demand access."
+ },
+ {
+ "EventCode": "0x0201",
+ "EventName": "L1D_CACHE_DM_RD",
+ "BriefDescription": "This event counts L1D_CACHE caused by demand read access."
+ },
+ {
+ "EventCode": "0x0202",
+ "EventName": "L1D_CACHE_DM_WR",
+ "BriefDescription": "This event counts L1D_CACHE caused by demand write access."
+ },
+ {
+ "EventCode": "0x0208",
+ "EventName": "L1D_CACHE_REFILL_DM",
+ "BriefDescription": "This event counts L1D_CACHE_REFILL caused by demand access."
+ },
+ {
+ "EventCode": "0x0209",
+ "EventName": "L1D_CACHE_REFILL_DM_RD",
+ "BriefDescription": "This event counts L1D_CACHE_REFILL caused by demand read access."
+ },
+ {
+ "EventCode": "0x020A",
+ "EventName": "L1D_CACHE_REFILL_DM_WR",
+ "BriefDescription": "This event counts L1D_CACHE_REFILL caused by demand write access."
+ },
+ {
+ "EventCode": "0x020D",
+ "EventName": "L1D_CACHE_BTC",
+ "BriefDescription": "This event counts demand access that hits cache line with shared status and requests exclusive access in the Level 1 data cache, causing a coherence access to outside of the Level 1 caches of this PE."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_MISS",
+ "BriefDescription": "This event counts demand access that misses in the Level 1 data cache, causing an access to outside of the Level 1 caches of this PE."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_HWPRF",
+ "BriefDescription": "This event counts access counted by L1D_CACHE that is due to a hardware prefetch."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_HWPRF",
+ "BriefDescription": "This event counts hardware prefetch counted by L1D_CACHE_HWPRF that causes a refill of the Level 1 data cache from outside of the Level 1 data cache."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_HIT_RD",
+ "BriefDescription": "This event counts demand read counted by L1D_CACHE_RD that hits in the Level 1 data cache."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_HIT_WR",
+ "BriefDescription": "This event counts demand write counted by L1D_CACHE_WR that hits in the Level 1 data cache."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_HIT",
+ "BriefDescription": "This event counts access counted by L1D_CACHE that hits in the Level 1 data cache."
+ },
+ {
+ "ArchStdEvent": "L1D_LFB_HIT_RD",
+ "BriefDescription": "This event counts demand access counted by L1D_CACHE_HIT_RD that hits a cache line that is in the process of being loaded into the Level 1 data cache."
+ },
+ {
+ "ArchStdEvent": "L1D_LFB_HIT_WR",
+ "BriefDescription": "This event counts demand access counted by L1D_CACHE_HIT_WR that hits a cache line that is in the process of being loaded into the Level 1 data cache."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_PRF",
+ "BriefDescription": "This event counts fetch counted by either Level 1 data hardware prefetch or Level 1 data software prefetch."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_PRF",
+ "BriefDescription": "This event counts hardware prefetch counted by L1D_CACHE_PRF that causes a refill of the Level 1 data cache from outside of the Level 1 data cache."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_PERCYC",
+ "BriefDescription": "The counter counts by the number of cache refills counted by L1D_CACHE_REFILL in progress on each Processor cycle."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l1i_cache.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l1i_cache.json
new file mode 100644
index 000000000000..8680d8ec461d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l1i_cache.json
@@ -0,0 +1,52 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL",
+ "BriefDescription": "This event counts operations that cause a refill of the L1I cache. See L1I_CACHE_REFILL of ARMv9 Reference Manual for more information."
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE",
+ "BriefDescription": "This event counts operations that cause a cache access to the L1I cache. See L1I_CACHE of ARMv9 Reference Manual for more information."
+ },
+ {
+ "EventCode": "0x0207",
+ "EventName": "L1I_CACHE_DM_RD",
+ "BriefDescription": "This event counts L1I_CACHE caused by demand read access."
+ },
+ {
+ "EventCode": "0x020F",
+ "EventName": "L1I_CACHE_REFILL_DM_RD",
+ "BriefDescription": "This event counts L1I_CACHE_REFILL caused by demand read access."
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_LMISS",
+ "BriefDescription": "This event counts operations that cause a refill of the L1I cache that incurs additional latency."
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_HWPRF",
+ "BriefDescription": "This event counts access counted by L1I_CACHE that is due to a hardware prefetch."
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL_HWPRF",
+ "BriefDescription": "This event counts hardware prefetch counted by L1I_CACHE_HWPRF that causes a refill of the Level 1 instruction cache from outside of the Level 1 instruction cache."
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_HIT_RD",
+ "BriefDescription": "This event counts demand fetch counted by L1I_CACHE_DM_RD that hits in the Level 1 instruction cache."
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_HIT",
+ "BriefDescription": "This event counts access counted by L1I_CACHE that hits in the Level 1 instruction cache."
+ },
+ {
+ "ArchStdEvent": "L1I_LFB_HIT_RD",
+ "BriefDescription": "This event counts demand access counted by L1I_CACHE_HIT_RD that hits a cache line that is in the process of being loaded into the Level 1 instruction cache."
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL_PRF",
+ "BriefDescription": "This event counts hardware prefetch counted by L1I_CACHE_PRF that causes a refill of the Level 1 instruction cache from outside of the Level 1 instruction cache."
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL_PERCYC",
+ "BriefDescription": "The counter counts by the number of cache refills counted by L1I_CACHE_REFILL in progress on each Processor cycle."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l2_cache.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l2_cache.json
new file mode 100644
index 000000000000..9e092752e6db
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l2_cache.json
@@ -0,0 +1,160 @@
+[
+ {
+ "ArchStdEvent": "L2D_CACHE",
+ "BriefDescription": "This event counts operations that cause a cache access to the L2 cache. See L2D_CACHE of ARMv9 Reference Manual for more information."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL",
+ "BriefDescription": "This event counts operations that cause a refill of the L2 cache. See L2D_CACHE_REFILL of ARMv9 Reference Manual for more information."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB",
+ "BriefDescription": "This event counts every write-back of data from the L2 cache caused by L2 replace, non-temporal-store and DC ZVA."
+ },
+ {
+ "ArchStdEvent": "L2I_TLB_REFILL",
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L2I TLB. See L2I_TLB_REFILL of ARMv9 Reference Manual for more information."
+ },
+ {
+ "ArchStdEvent": "L2I_TLB",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L2I TLB. See L2I_TLB of ARMv9 Reference Manual for more information."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD",
+ "BriefDescription": "This event counts L2D CACHE caused by read access."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR",
+ "BriefDescription": "This event counts L2D CACHE caused by write access."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD",
+ "BriefDescription": "This event counts L2D CACHE_REFILL caused by read access."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR",
+ "BriefDescription": "This event counts L2D CACHE_REFILL caused by write access."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM",
+ "BriefDescription": "This event counts every write-back of data from the L2 cache caused by L2 replace."
+ },
+ {
+ "EventCode": "0x0300",
+ "EventName": "L2D_CACHE_DM",
+ "BriefDescription": "This event counts L2D_CACHE caused by demand access."
+ },
+ {
+ "EventCode": "0x0301",
+ "EventName": "L2D_CACHE_DM_RD",
+ "BriefDescription": "This event counts L2D_CACHE caused by demand read access."
+ },
+ {
+ "EventCode": "0x0302",
+ "EventName": "L2D_CACHE_DM_WR",
+ "BriefDescription": "This event counts L2D_CACHE caused by demand write access."
+ },
+ {
+ "EventCode": "0x0305",
+ "EventName": "L2D_CACHE_HWPRF_ADJACENT",
+ "BriefDescription": "This event counts L2D_CACHE caused by hardware adjacent prefetch access."
+ },
+ {
+ "EventCode": "0x0308",
+ "EventName": "L2D_CACHE_REFILL_DM",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL caused by demand access."
+ },
+ {
+ "EventCode": "0x0309",
+ "EventName": "L2D_CACHE_REFILL_DM_RD",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL caused by demand read access."
+ },
+ {
+ "EventCode": "0x030A",
+ "EventName": "L2D_CACHE_REFILL_DM_WR",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL caused by demand write access."
+ },
+ {
+ "EventCode": "0x030B",
+ "EventName": "L2D_CACHE_REFILL_DM_WR_EXCL",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL caused by demand write exclusive access."
+ },
+ {
+ "EventCode": "0x030C",
+ "EventName": "L2D_CACHE_REFILL_DM_WR_ATOM",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL caused by demand write atomic access."
+ },
+ {
+ "EventCode": "0x030D",
+ "EventName": "L2D_CACHE_BTC",
+ "BriefDescription": "This event counts demand access that hits cache line with shared status and requests exclusive access in the Level 1 data and Level 2 caches, causing a coherence access to outside of the Level 1 and Level 2 caches of this PE."
+ },
+ {
+ "EventCode": "0x03B0",
+ "EventName": "L2D_CACHE_WB_VICTIM_CLEAN",
+ "BriefDescription": "This event counts every write-back of data from the L2 cache caused by L2 replace where the data is clean. In this case, the data will usually be written to L3 cache."
+ },
+ {
+ "EventCode": "0x03B1",
+ "EventName": "L2D_CACHE_WB_NT",
+ "BriefDescription": "This event counts every write-back of data from the L2 cache caused by non-temporal-store."
+ },
+ {
+ "EventCode": "0x03B2",
+ "EventName": "L2D_CACHE_WB_DCZVA",
+ "BriefDescription": "This event counts every write-back of data from the L2 cache caused by DC ZVA."
+ },
+ {
+ "EventCode": "0x03B3",
+ "EventName": "L2D_CACHE_FB",
+ "BriefDescription": "This event counts every flush-back (drop) of data from the L2 cache."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_LMISS_RD",
+ "BriefDescription": "This event counts operations that cause a refill of the L2D cache that incurs additional latency."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_MISS",
+ "BriefDescription": "This event counts demand access that misses in the Level 1 data and Level 2 caches, causing an access to outside of the Level 1 and Level 2 caches of this PE."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_HWPRF",
+ "BriefDescription": "This event counts access counted by L2D_CACHE that is due to a hardware prefetch."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_HWPRF",
+ "BriefDescription": "This event counts hardware prefetch counted by L2D_CACHE_HWPRF that causes a refill of the Level 2 cache, or any Level 1 data and instruction cache of this PE, from outside of those caches."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_HIT_RD",
+ "BriefDescription": "This event counts demand read counted by L2D_CACHE_RD that hits in the Level 2 data cache."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_HIT_WR",
+ "BriefDescription": "This event counts demand write counted by L2D_CACHE_WR that hits in the Level 2 data cache."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_HIT",
+ "BriefDescription": "This event counts access counted by L2D_CACHE that hits in the Level 2 data cache."
+ },
+ {
+ "ArchStdEvent": "L2D_LFB_HIT_RD",
+ "BriefDescription": "This event counts demand access counted by L2D_CACHE_HIT_RD that hits a recently fetched line in the Level 2 cache."
+ },
+ {
+ "ArchStdEvent": "L2D_LFB_HIT_WR",
+ "BriefDescription": "This event counts demand access counted by L2D_CACHE_HIT_WR that hits a recently fetched line in the Level 2 cache."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_PRF",
+ "BriefDescription": "This event counts fetch counted by either Level 2 data hardware prefetch or Level 2 data software prefetch."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_PRF",
+ "BriefDescription": "This event counts hardware prefetch counted by L2D_CACHE_PRF that causes a refill of the Level 2 data cache from outside of the Level 1 data cache."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_PERCYC",
+ "BriefDescription": "The counter counts by the number of cache refills counted by L2D_CACHE_REFILL in progress on each Processor cycle."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l3_cache.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l3_cache.json
new file mode 100644
index 000000000000..3f3e0d22ac68
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l3_cache.json
@@ -0,0 +1,159 @@
+[
+ {
+ "ArchStdEvent": "L3D_CACHE",
+ "BriefDescription": "This event counts operations that cause a cache access to the L3 cache, as defined by the sum of L2D_CACHE_REFILL_L3D_CACHE and L2D_CACHE_WB_VICTIM_CLEAN events."
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD",
+ "BriefDescription": "This event counts access counted by L3D_CACHE that is a Memory-read operation, as defined by the L2D_CACHE_REFILL_L3D_CACHE events."
+ },
+ {
+ "EventCode": "0x0390",
+ "EventName": "L2D_CACHE_REFILL_L3D_CACHE",
+ "BriefDescription": "This event counts operations that cause a cache access to the L3 cache."
+ },
+ {
+ "EventCode": "0x0391",
+ "EventName": "L2D_CACHE_REFILL_L3D_CACHE_DM",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_CACHE caused by demand access."
+ },
+ {
+ "EventCode": "0x0392",
+ "EventName": "L2D_CACHE_REFILL_L3D_CACHE_DM_RD",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_CACHE caused by demand read access."
+ },
+ {
+ "EventCode": "0x0393",
+ "EventName": "L2D_CACHE_REFILL_L3D_CACHE_DM_WR",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_CACHE caused by demand write access."
+ },
+ {
+ "EventCode": "0x0394",
+ "EventName": "L2D_CACHE_REFILL_L3D_CACHE_PRF",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_CACHE caused by prefetch access."
+ },
+ {
+ "EventCode": "0x0395",
+ "EventName": "L2D_CACHE_REFILL_L3D_CACHE_HWPRF",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_CACHE caused by hardware prefetch access."
+ },
+ {
+ "EventCode": "0x0396",
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS",
+ "BriefDescription": "This event counts operations that cause a miss of the L3 cache."
+ },
+ {
+ "EventCode": "0x0397",
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_DM",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_MISS caused by demand access."
+ },
+ {
+ "EventCode": "0x0398",
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_DM_RD",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_MISS caused by demand read access."
+ },
+ {
+ "EventCode": "0x0399",
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_DM_WR",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_MISS caused by demand write access."
+ },
+ {
+ "EventCode": "0x039A",
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_PRF",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_MISS caused by prefetch access."
+ },
+ {
+ "EventCode": "0x039B",
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_HWPRF",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_MISS caused by hardware prefetch access."
+ },
+ {
+ "EventCode": "0x039C",
+ "EventName": "L2D_CACHE_REFILL_L3D_HIT",
+ "BriefDescription": "This event counts operations that cause a hit of the L3 cache."
+ },
+ {
+ "EventCode": "0x039D",
+ "EventName": "L2D_CACHE_REFILL_L3D_HIT_DM",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_HIT caused by demand access."
+ },
+ {
+ "EventCode": "0x039E",
+ "EventName": "L2D_CACHE_REFILL_L3D_HIT_DM_RD",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_HIT caused by demand read access."
+ },
+ {
+ "EventCode": "0x039F",
+ "EventName": "L2D_CACHE_REFILL_L3D_HIT_DM_WR",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_HIT caused by demand write access."
+ },
+ {
+ "EventCode": "0x03A0",
+ "EventName": "L2D_CACHE_REFILL_L3D_HIT_PRF",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_HIT caused by prefetch access."
+ },
+ {
+ "EventCode": "0x03A1",
+ "EventName": "L2D_CACHE_REFILL_L3D_HIT_HWPRF",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_HIT caused by hardware prefetch access."
+ },
+ {
+ "EventCode": "0x03A2",
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_PFTGT_HIT",
+ "BriefDescription": "This event counts the number of L3 cache misses where the requests hit the PFTGT buffer."
+ },
+ {
+ "EventCode": "0x03A3",
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_PFTGT_HIT_DM",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_MISS_PFTGT_HIT caused by demand access."
+ },
+ {
+ "EventCode": "0x03A4",
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_PFTGT_HIT_DM_RD",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_MISS_PFTGT_HIT caused by demand read access."
+ },
+ {
+ "EventCode": "0x03A5",
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_PFTGT_HIT_DM_WR",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_MISS_PFTGT_HIT caused by demand write access."
+ },
+ {
+ "EventCode": "0x03A6",
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_L_MEM",
+ "BriefDescription": "This event counts the number of L3 cache misses where the requests access the memory in the same socket as the requests."
+ },
+ {
+ "EventCode": "0x03A7",
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_FR_MEM",
+ "BriefDescription": "This event counts the number of L3 cache misses where the requests access the memory in the different socket from the requests."
+ },
+ {
+ "EventCode": "0x03A8",
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_L_L2",
+ "BriefDescription": "This event counts the number of L3 cache misses where the requests access the different L2 cache from the requests in the same Numa nodes as the requests."
+ },
+ {
+ "EventCode": "0x03A9",
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_NR_L2",
+ "BriefDescription": "This event counts the number of L3 cache misses where the requests access L2 cache in the different Numa nodes from the requests in the same socket as the requests."
+ },
+ {
+ "EventCode": "0x03AA",
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_NR_L3",
+ "BriefDescription": "This event counts the number of L3 cache misses where the requests access L3 cache in the different Numa nodes from the requests in the same socket as the requests."
+ },
+ {
+ "EventCode": "0x03AB",
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_FR_L2",
+ "BriefDescription": "This event counts the number of L3 cache misses where the requests access L2 cache in the different socket from the requests."
+ },
+ {
+ "EventCode": "0x03AC",
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_FR_L3",
+ "BriefDescription": "This event counts the number of L3 cache misses where the requests access L3 cache in the different socket from the requests."
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_LMISS_RD",
+ "BriefDescription": "This event counts access counted by L3D_CACHE that is not completed by the L3D cache, and a Memory-read operation, as defined by the L2D_CACHE_REFILL_L3D_MISS events."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/ll_cache.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/ll_cache.json
new file mode 100644
index 000000000000..a441b84729ab
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/ll_cache.json
@@ -0,0 +1,10 @@
+[
+ {
+ "ArchStdEvent": "LL_CACHE_RD",
+ "BriefDescription": "This event counts access counted by L3D_CACHE that is a Memory-read operation, as defined by the L2D_CACHE_REFILL_L3D_CACHE events."
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD",
+ "BriefDescription": "This event counts access counted by L3D_CACHE that is not completed by the L3D cache, and a Memory-read operation, as defined by the L2D_CACHE_REFILL_L3D_MISS events."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/memory.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/memory.json
new file mode 100644
index 000000000000..4ef125e3a253
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/memory.json
@@ -0,0 +1,10 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS",
+ "BriefDescription": "This event counts architecturally executed memory-reading instructions and memory-writing instructions, as defined by the LDST_SPEC events."
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD",
+ "BriefDescription": "This event counts architecturally executed memory-reading instructions, as defined by the LD_SPEC events."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/pipeline.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/pipeline.json
new file mode 100644
index 000000000000..3cc3105f4a5e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/pipeline.json
@@ -0,0 +1,208 @@
+[
+ {
+ "EventCode": "0x01A0",
+ "EventName": "EAGA_VAL",
+ "BriefDescription": "This event counts valid cycles of EAGA pipeline."
+ },
+ {
+ "EventCode": "0x01A1",
+ "EventName": "EAGB_VAL",
+ "BriefDescription": "This event counts valid cycles of EAGB pipeline."
+ },
+ {
+ "EventCode": "0x01A3",
+ "EventName": "PRX_VAL",
+ "BriefDescription": "This event counts valid cycles of PRX pipeline."
+ },
+ {
+ "EventCode": "0x01A4",
+ "EventName": "EXA_VAL",
+ "BriefDescription": "This event counts valid cycles of EXA pipeline."
+ },
+ {
+ "EventCode": "0x01A5",
+ "EventName": "EXB_VAL",
+ "BriefDescription": "This event counts valid cycles of EXB pipeline."
+ },
+ {
+ "EventCode": "0x01A6",
+ "EventName": "EXC_VAL",
+ "BriefDescription": "This event counts valid cycles of EXC pipeline."
+ },
+ {
+ "EventCode": "0x01A7",
+ "EventName": "EXD_VAL",
+ "BriefDescription": "This event counts valid cycles of EXD pipeline."
+ },
+ {
+ "EventCode": "0x01A8",
+ "EventName": "FLA_VAL",
+ "BriefDescription": "This event counts valid cycles of FLA pipeline."
+ },
+ {
+ "EventCode": "0x01A9",
+ "EventName": "FLB_VAL",
+ "BriefDescription": "This event counts valid cycles of FLB pipeline."
+ },
+ {
+ "EventCode": "0x01AA",
+ "EventName": "STEA_VAL",
+ "BriefDescription": "This event counts valid cycles of STEA pipeline."
+ },
+ {
+ "EventCode": "0x01AB",
+ "EventName": "STEB_VAL",
+ "BriefDescription": "This event counts valid cycles of STEB pipeline."
+ },
+ {
+ "EventCode": "0x01AC",
+ "EventName": "STFL_VAL",
+ "BriefDescription": "This event counts valid cycles of STFL pipeline."
+ },
+ {
+ "EventCode": "0x01AD",
+ "EventName": "STPX_VAL",
+ "BriefDescription": "This event counts valid cycles of STPX pipeline."
+ },
+ {
+ "EventCode": "0x01B0",
+ "EventName": "FLA_VAL_PRD_CNT",
+ "BriefDescription": "This event counts the number of 1's in the predicate bits of request in FLA pipeline, where it is corrected so that it becomes 32 when all bits are 1."
+ },
+ {
+ "EventCode": "0x01B1",
+ "EventName": "FLB_VAL_PRD_CNT",
+ "BriefDescription": "This event counts the number of 1's in the predicate bits of request in FLB pipeline, where it is corrected so that it becomes 32 when all bits are 1."
+ },
+ {
+ "EventCode": "0x01B2",
+ "EventName": "FLA_VAL_FOR_PRD",
+ "BriefDescription": "This event counts valid cycles of FLA pipeline."
+ },
+ {
+ "EventCode": "0x01B3",
+ "EventName": "FLB_VAL_FOR_PRD",
+ "BriefDescription": "This event counts valid cycles of FLB pipeline."
+ },
+ {
+ "EventCode": "0x0240",
+ "EventName": "L1_PIPE0_VAL",
+ "BriefDescription": "This event counts valid cycles of L1D cache pipeline#0."
+ },
+ {
+ "EventCode": "0x0241",
+ "EventName": "L1_PIPE1_VAL",
+ "BriefDescription": "This event counts valid cycles of L1D cache pipeline#1."
+ },
+ {
+ "EventCode": "0x0242",
+ "EventName": "L1_PIPE2_VAL",
+ "BriefDescription": "This event counts valid cycles of L1D cache pipeline#2."
+ },
+ {
+ "EventCode": "0x0250",
+ "EventName": "L1_PIPE0_COMP",
+ "BriefDescription": "This event counts completed requests in L1D cache pipeline#0."
+ },
+ {
+ "EventCode": "0x0251",
+ "EventName": "L1_PIPE1_COMP",
+ "BriefDescription": "This event counts completed requests in L1D cache pipeline#1."
+ },
+ {
+ "EventCode": "0x025A",
+ "EventName": "L1_PIPE_ABORT_STLD_INTLK",
+ "BriefDescription": "This event counts aborted requests in L1D pipelines that due to store-load interlock."
+ },
+ {
+ "EventCode": "0x026C",
+ "EventName": "L1I_PIPE_COMP",
+ "BriefDescription": "This event counts completed requests in L1I cache pipeline."
+ },
+ {
+ "EventCode": "0x026D",
+ "EventName": "L1I_PIPE_VAL",
+ "BriefDescription": "This event counts valid cycles of L1I cache pipeline."
+ },
+ {
+ "EventCode": "0x0278",
+ "EventName": "L1_PIPE0_VAL_IU_TAG_ADRS_SCE",
+ "BriefDescription": "This event counts requests in L1D cache pipeline#0 that its sce bit of tagged address is 1."
+ },
+ {
+ "EventCode": "0x0279",
+ "EventName": "L1_PIPE1_VAL_IU_TAG_ADRS_SCE",
+ "BriefDescription": "This event counts requests in L1D cache pipeline#1 that its sce bit of tagged address is 1."
+ },
+ {
+ "EventCode": "0x02A0",
+ "EventName": "L1_PIPE0_VAL_IU_NOT_SEC0",
+ "BriefDescription": "This event counts requests in L1D cache pipeline#0 that its sector cache ID is not 0."
+ },
+ {
+ "EventCode": "0x02A1",
+ "EventName": "L1_PIPE1_VAL_IU_NOT_SEC0",
+ "BriefDescription": "This event counts requests in L1D cache pipeline#1 that its sector cache ID is not 0."
+ },
+ {
+ "EventCode": "0x02B0",
+ "EventName": "L1_PIPE_COMP_GATHER_2FLOW",
+ "BriefDescription": "This event counts the number of times where 2 elements of the gather instructions became 2 flows because 2 elements could not be combined."
+ },
+ {
+ "EventCode": "0x02B1",
+ "EventName": "L1_PIPE_COMP_GATHER_1FLOW",
+ "BriefDescription": "This event counts the number of times where 2 elements of the gather instructions became 1 flow because 2 elements could be combined."
+ },
+ {
+ "EventCode": "0x02B2",
+ "EventName": "L1_PIPE_COMP_GATHER_0FLOW",
+ "BriefDescription": "This event counts the number of times where 2 elements of the gather instructions became 0 flow because both predicate values are 0."
+ },
+ {
+ "EventCode": "0x02B3",
+ "EventName": "L1_PIPE_COMP_SCATTER_1FLOW",
+ "BriefDescription": "This event counts the number of flows of the scatter instructions."
+ },
+ {
+ "EventCode": "0x02B8",
+ "EventName": "L1_PIPE0_COMP_PRD_CNT",
+ "BriefDescription": "This event counts the number of 1's in the predicate bits of request in L1D cache pipeline#0, where it is corrected so that it becomes 64 when all bits are 1."
+ },
+ {
+ "EventCode": "0x02B9",
+ "EventName": "L1_PIPE1_COMP_PRD_CNT",
+ "BriefDescription": "This event counts the number of 1's in the predicate bits of request in L1D cache pipeline#1, where it is corrected so that it becomes 64 when all bits are 1."
+ },
+ {
+ "EventCode": "0x0330",
+ "EventName": "L2_PIPE_VAL",
+ "BriefDescription": "This event counts valid cycles of L2 cache pipeline."
+ },
+ {
+ "EventCode": "0x0350",
+ "EventName": "L2_PIPE_COMP_ALL",
+ "BriefDescription": "This event counts completed requests in L2 cache pipeline."
+ },
+ {
+ "EventCode": "0x0370",
+ "EventName": "L2_PIPE_COMP_PF_L2MIB_MCH",
+ "BriefDescription": "This event counts operations where software or hardware prefetch hits an L2 cache refill buffer allocated by demand access."
+ },
+ {
+ "ArchStdEvent": "STALL_FRONTEND_TLB",
+ "BriefDescription": "This event counts every cycle counted by STALL_FRONTEND_MEMBOUND when there is a demand instruction miss in the instruction TLB."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_TLB",
+ "BriefDescription": "This event counts every cycle counted by STALL_BACKEND_MEMBOUND when there is a demand data miss in the data TLB."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_ST",
+ "BriefDescription": "This event counts every cycle counted by STALL_BACKEND_MEMBOUND when the backend is stalled waiting for a store."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_ILOCK",
+ "BriefDescription": "This event counts every cycle counted by STALL_BACKEND when operations are available from the frontend but at least one is not ready to be sent to the backend because of an input dependency."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/pmu.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/pmu.json
new file mode 100644
index 000000000000..65bd6cdd0dd5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/pmu.json
@@ -0,0 +1,10 @@
+[
+ {
+ "ArchStdEvent": "PMU_OVFS",
+ "BriefDescription": "This event counts the event generated each time one of the condition occurs described in Arm Architecture Reference Manual for A-profile architecture. This event is only for output to the trace unit."
+ },
+ {
+ "ArchStdEvent": "PMU_HOVFS",
+ "BriefDescription": "This event counts the event generated each time an event is counted by an event counter <n> and all of the condition occur described in Arm Architecture Reference Manual for A-profile architecture. This event is only for output to the trace unit."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/retired.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/retired.json
new file mode 100644
index 000000000000..de56aafec2dc
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/retired.json
@@ -0,0 +1,30 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR",
+ "BriefDescription": "This event counts on writes to the PMSWINC register."
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED",
+ "BriefDescription": "This event counts every architecturally executed instruction."
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED",
+ "BriefDescription": "This event counts every write to CONTEXTIDR."
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED",
+ "BriefDescription": "This event counts architecturally executed branch instruction."
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED",
+ "BriefDescription": "This event counts architecturally executed branch instruction which was mispredicted."
+ },
+ {
+ "ArchStdEvent": "OP_RETIRED",
+ "BriefDescription": "This event counts every architecturally executed micro-operation."
+ },
+ {
+ "ArchStdEvent": "UOP_RETIRED",
+ "BriefDescription": "This event counts micro-operation that would be executed in a Simple sequential execution of the program."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/spec_operation.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/spec_operation.json
new file mode 100644
index 000000000000..4841b43e2871
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/spec_operation.json
@@ -0,0 +1,171 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED",
+ "BriefDescription": "This event counts each correction to the predicted program flow that occurs because of a misprediction from, or no prediction from, the branch prediction resources and that relates to instructions that the branch prediction resources are capable of predicting."
+ },
+ {
+ "ArchStdEvent": "BR_PRED",
+ "BriefDescription": "This event counts every branch or other change in the program flow that the branch prediction resources are capable of predicting."
+ },
+ {
+ "ArchStdEvent": "INST_SPEC",
+ "BriefDescription": "This event counts every architecturally executed instruction."
+ },
+ {
+ "ArchStdEvent": "OP_SPEC",
+ "BriefDescription": "This event counts every speculatively executed micro-operation."
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC",
+ "BriefDescription": "This event counts architecturally executed load-exclusive instructions."
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC",
+ "BriefDescription": "This event counts architecturally executed store-exclusive instructions."
+ },
+ {
+ "ArchStdEvent": "LD_SPEC",
+ "BriefDescription": "This event counts architecturally executed memory-reading instructions, as defined by the LD_RETIRED event."
+ },
+ {
+ "ArchStdEvent": "ST_SPEC",
+ "BriefDescription": "This event counts architecturally executed memory-writing instructions, as defined by the ST_RETIRED event. This event counts DCZVA as a store operation."
+ },
+ {
+ "ArchStdEvent": "LDST_SPEC",
+ "BriefDescription": "This event counts architecturally executed memory-reading instructions and memory-writing instructions, as defined by the LD_RETIRED and ST_RETIRED events."
+ },
+ {
+ "ArchStdEvent": "DP_SPEC",
+ "BriefDescription": "This event counts architecturally executed integer data-processing instructions. See DP_SPEC of ARMv9 Reference Manual for more information."
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD data-processing instructions."
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC",
+ "BriefDescription": "This event counts architecturally executed floating-point data-processing instructions."
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC",
+ "BriefDescription": "This event counts only software changes of the PC that defined by the instruction architecturally executed, condition code check pass, software change of the PC event."
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC",
+ "BriefDescription": "This event counts architecturally executed cryptographic instructions, except PMULL and VMULL."
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC",
+ "BriefDescription": "This event counts architecturally executed immediate branch instructions."
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC",
+ "BriefDescription": "This event counts architecturally executed procedure return operations that defined by the BR_RETURN_RETIRED event."
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC",
+ "BriefDescription": "This event counts architecturally executed indirect branch instructions that includes software change of the PC other than exception-generating instructions and immediate branch instructions."
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC",
+ "BriefDescription": "This event counts architecturally executed Instruction Synchronization Barrier instructions."
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC",
+ "BriefDescription": "This event counts architecturally executed Data Synchronization Barrier instructions."
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC",
+ "BriefDescription": "This event counts architecturally executed Data Memory Barrier instructions, excluding the implied barrier operations of load/store operations with release consistency semantics."
+ },
+ {
+ "ArchStdEvent": "CSDB_SPEC",
+ "BriefDescription": "This event counts speculatively executed control speculation barrier instructions."
+ },
+ {
+ "EventCode": "0x0108",
+ "EventName": "PRD_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations that using predicate register."
+ },
+ {
+ "EventCode": "0x0109",
+ "EventName": "IEL_SPEC",
+ "BriefDescription": "This event counts architecturally executed inter-element manipulation operations."
+ },
+ {
+ "EventCode": "0x010A",
+ "EventName": "IREG_SPEC",
+ "BriefDescription": "This event counts architecturally executed inter-register manipulation operations."
+ },
+ {
+ "EventCode": "0x011A",
+ "EventName": "BC_LD_SPEC",
+ "BriefDescription": "This event counts architecturally executed SIMD broadcast floating-point load operations."
+ },
+ {
+ "EventCode": "0x011B",
+ "EventName": "DCZVA_SPEC",
+ "BriefDescription": "This event counts architecturally executed zero blocking operations due to the DC ZVA instruction."
+ },
+ {
+ "EventCode": "0x0121",
+ "EventName": "EFFECTIVE_INST_SPEC",
+ "BriefDescription": "This event counts architecturally executed instructions, excluding the MOVPRFX instruction."
+ },
+ {
+ "EventCode": "0x0123",
+ "EventName": "PRE_INDEX_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations that uses pre-index as its addressing mode."
+ },
+ {
+ "EventCode": "0x0124",
+ "EventName": "POST_INDEX_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations that uses post-index as its addressing mode."
+ },
+ {
+ "EventCode": "0x0139",
+ "EventName": "UOP_SPLIT",
+ "BriefDescription": "This event counts the occurrence count of the micro-operation split."
+ },
+ {
+ "ArchStdEvent": "ASE_INST_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD operations."
+ },
+ {
+ "ArchStdEvent": "INT_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations due to scalar, Advanced SIMD, and SVE instructions listed in Integer instructions section of ARMv9 Reference Manual."
+ },
+ {
+ "ArchStdEvent": "INT_DIV_SPEC",
+ "BriefDescription": "This event counts architecturally executed integer divide operation."
+ },
+ {
+ "ArchStdEvent": "INT_DIV64_SPEC",
+ "BriefDescription": "This event counts architecturally executed 64-bit integer divide operation."
+ },
+ {
+ "ArchStdEvent": "INT_MUL_SPEC",
+ "BriefDescription": "This event counts architecturally executed integer multiply operation."
+ },
+ {
+ "ArchStdEvent": "INT_MUL64_SPEC",
+ "BriefDescription": "This event counts architecturally executed integer 64-bit x 64-bit multiply operation."
+ },
+ {
+ "ArchStdEvent": "INT_MULH64_SPEC",
+ "BriefDescription": "This event counts architecturally executed integer 64-bit x 64-bit multiply returning high part operation."
+ },
+ {
+ "ArchStdEvent": "NONFP_SPEC",
+ "BriefDescription": "This event counts architecturally executed non-floating-point operations."
+ },
+ {
+ "ArchStdEvent": "INT_SCALE_OPS_SPEC",
+ "BriefDescription": "This event counts each integer ALU operation counted by SVE_INT_SPEC. See ALU operation counts section of ARMv9 Reference Manual for information on the counter increment for different types of instruction."
+ },
+ {
+ "ArchStdEvent": "INT_FIXED_OPS_SPEC",
+ "BriefDescription": "This event counts each integer ALU operation counted by INT_SPEC that is not counted by SVE_INT_SPEC. See ALU operation counts section of ARMv9 Reference Manual for information on the counter increment for different types of instruction."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/stall.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/stall.json
new file mode 100644
index 000000000000..5fb81e2a0a07
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/stall.json
@@ -0,0 +1,94 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND",
+ "BriefDescription": "This event counts every cycle counted by the CPU_CYCLES event on that no operation was issued because there are no operations available to issue for this PE from the frontend."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND",
+ "BriefDescription": "This event counts every cycle counted by the CPU_CYCLES event on that no operation was issued because the backend is unable to accept any operations."
+ },
+ {
+ "ArchStdEvent": "STALL",
+ "BriefDescription": "This event counts every cycle that no instruction was dispatched from decode unit."
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_BACKEND",
+ "BriefDescription": "This event counts every cycle that no instruction was dispatched from decode unit due to the backend."
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_FRONTEND",
+ "BriefDescription": "This event counts every cycle that no instruction was dispatched from decode unit due to the frontend."
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT",
+ "BriefDescription": "This event counts every cycle that no instruction or operation Slot was dispatched from decode unit."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_MEM",
+ "BriefDescription": "This event counts every cycle that no instruction was dispatched from decode unit due to memory stall."
+ },
+ {
+ "ArchStdEvent": "STALL_FRONTEND_MEMBOUND",
+ "BriefDescription": "This event counts every cycle counted by STALL_FRONTEND when no instructions are delivered from the memory system."
+ },
+ {
+ "ArchStdEvent": "STALL_FRONTEND_L1I",
+ "BriefDescription": "This event counts every cycle counted by STALL_FRONTEND_MEMBOUND when there is a demand instruction miss in the first level of instruction cache."
+ },
+ {
+ "ArchStdEvent": "STALL_FRONTEND_L2I",
+ "BriefDescription": "This event counts every cycle counted by STALL_FRONTEND_MEMBOUND when there is a demand instruction miss in the second level of instruction cache."
+ },
+ {
+ "ArchStdEvent": "STALL_FRONTEND_MEM",
+ "BriefDescription": "This event counts every cycle counted by STALL_FRONTEND_MEMBOUND when there is a demand instruction miss in the last level of instruction cache within the PE clock domain or a non-cacheable instruction fetch in progress."
+ },
+ {
+ "ArchStdEvent": "STALL_FRONTEND_CPUBOUND",
+ "BriefDescription": "This event counts every cycle counted by STALL_FRONTEND when the frontend is stalled on a frontend processor resource, not including memory."
+ },
+ {
+ "ArchStdEvent": "STALL_FRONTEND_FLOW",
+ "BriefDescription": "This event counts every cycle counted by STALL_FRONTEND_CPUBOUND when the frontend is stalled on unavailability of prediction flow resources."
+ },
+ {
+ "ArchStdEvent": "STALL_FRONTEND_FLUSH",
+ "BriefDescription": "This event counts every cycle counted by STALL_FRONTEND_CPUBOUND when the frontend is recovering from a pipeline flush."
+ },
+ {
+ "ArchStdEvent": "STALL_FRONTEND_RENAME",
+ "BriefDescription": "This event counts every cycle counted by STALL_FRONTEND_CPUBOUND when operations are available from the frontend but at least one is not ready to be sent to the backend because no rename register is available."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_MEMBOUND",
+ "BriefDescription": "This event counts every cycle counted by STALL_BACKEND when the backend is waiting for a memory access to complete."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_L1D",
+ "BriefDescription": "This event counts every cycle counted by STALL_BACKEND_MEMBOUND when there is a demand data miss in L1D cache."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_L2D",
+ "BriefDescription": "This event counts every cycle counted by STALL_BACKEND_MEMBOUND when there is a demand data miss in L2D cache."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_CPUBOUND",
+ "BriefDescription": "This event counts every cycle counted by STALL_BACKEND when the backend is stalled on a processor resource, not including memory."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_BUSY",
+ "BriefDescription": "This event counts every cycle counted by STALL_BACKEND when operations are available from the frontend but the backend is not able to accept an operation because an execution unit is busy."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_RENAME",
+ "BriefDescription": "This event counts every cycle counted by STALL_BACKEND_CPUBOUND when operations are available from the frontend but at least one is not ready to be sent to the backend because no rename register is available."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_ATOMIC",
+ "BriefDescription": "This event counts every cycle counted by STALL_BACKEND_MEMBOUND when the backend is processing an Atomic operation."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_MEMCPYSET",
+ "BriefDescription": "This event counts every cycle counted by STALL_BACKEND_MEMBOUND when the backend is processing a Memory Copy or Set instruction."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/sve.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/sve.json
new file mode 100644
index 000000000000..e66b5af00f90
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/sve.json
@@ -0,0 +1,254 @@
+[
+ {
+ "ArchStdEvent": "SIMD_INST_RETIRED",
+ "BriefDescription": "This event counts architecturally executed SIMD instructions, excluding the Advanced SIMD scalar instructions and the instructions listed in Non-SIMD SVE instructions section of ARMv9 Reference Manual."
+ },
+ {
+ "ArchStdEvent": "SVE_INST_RETIRED",
+ "BriefDescription": "This event counts architecturally executed SVE instructions, including the instructions listed in Non-SIMD SVE instructions section of ARMv9 Reference Manual."
+ },
+ {
+ "ArchStdEvent": "SVE_INST_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE instructions, including the instructions listed in Non-SIMD SVE instructions section of ARMv9 Reference Manual."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INST_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE operations."
+ },
+ {
+ "ArchStdEvent": "UOP_SPEC",
+ "BriefDescription": "This event counts all architecturally executed micro-operations."
+ },
+ {
+ "ArchStdEvent": "SVE_MATH_SPEC",
+ "BriefDescription": "This event counts architecturally executed math function operations due to the SVE FTSMUL, FTMAD, FTSSEL, and FEXPA instructions."
+ },
+ {
+ "ArchStdEvent": "FP_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations due to scalar, Advanced SIMD, and SVE instructions listed in Floating-point instructions section of ARMv9 Reference Manual."
+ },
+ {
+ "ArchStdEvent": "FP_FMA_SPEC",
+ "BriefDescription": "This event counts architecturally executed floating-point fused multiply-add and multiply-subtract operations."
+ },
+ {
+ "ArchStdEvent": "FP_RECPE_SPEC",
+ "BriefDescription": "This event counts architecturally executed floating-point reciprocal estimate operations due to the Advanced SIMD scalar, Advanced SIMD vector, and SVE FRECPE and FRSQRTE instructions."
+ },
+ {
+ "ArchStdEvent": "FP_CVT_SPEC",
+ "BriefDescription": "This event counts architecturally executed floating-point convert operations due to the scalar, Advanced SIMD, and SVE floating-point conversion instructions listed in Floating-point conversions section of ARMv9 Reference Manual."
+ },
+ {
+ "ArchStdEvent": "ASE_INT_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD integer operations."
+ },
+ {
+ "ArchStdEvent": "SVE_INT_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE integer operations."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE integer operations."
+ },
+ {
+ "ArchStdEvent": "SVE_INT_DIV_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE integer divide operation."
+ },
+ {
+ "ArchStdEvent": "SVE_INT_DIV64_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE 64-bit integer divide operation."
+ },
+ {
+ "ArchStdEvent": "ASE_INT_MUL_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD integer multiply operation."
+ },
+ {
+ "ArchStdEvent": "SVE_INT_MUL_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE integer multiply operation."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT_MUL_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE integer multiply operations."
+ },
+ {
+ "ArchStdEvent": "SVE_INT_MUL64_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE integer 64-bit x 64-bit multiply operation."
+ },
+ {
+ "ArchStdEvent": "SVE_INT_MULH64_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE integer 64-bit x 64-bit multiply returning high part operations."
+ },
+ {
+ "ArchStdEvent": "ASE_NONFP_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD non-floating-point operations."
+ },
+ {
+ "ArchStdEvent": "SVE_NONFP_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE non-floating-point operations."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_NONFP_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE non-floating-point operations."
+ },
+ {
+ "ArchStdEvent": "ASE_INT_VREDUCE_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD integer reduction operation."
+ },
+ {
+ "ArchStdEvent": "SVE_INT_VREDUCE_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE integer reduction operation."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT_VREDUCE_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE integer reduction operations."
+ },
+ {
+ "ArchStdEvent": "SVE_PERM_SPEC",
+ "BriefDescription": "This event counts architecturally executed vector or predicate permute operation."
+ },
+ {
+ "ArchStdEvent": "SVE_XPIPE_Z2R_SPEC",
+ "BriefDescription": "This event counts architecturally executed vector to general-purpose scalar cross-pipeline transfer operation."
+ },
+ {
+ "ArchStdEvent": "SVE_XPIPE_R2Z_SPEC",
+ "BriefDescription": "This event counts architecturally executed general-purpose scalar to vector cross-pipeline transfer operation."
+ },
+ {
+ "ArchStdEvent": "SVE_PGEN_SPEC",
+ "BriefDescription": "This event counts architecturally executed predicate-generating operation."
+ },
+ {
+ "ArchStdEvent": "SVE_PGEN_FLG_SPEC",
+ "BriefDescription": "This event counts architecturally executed predicate-generating operation that sets condition flags."
+ },
+ {
+ "ArchStdEvent": "SVE_PPERM_SPEC",
+ "BriefDescription": "This event counts architecturally executed predicate permute operation."
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_SPEC",
+ "BriefDescription": "This event counts architecturally executed SIMD data-processing and load/store operations due to SVE instructions with a Governing predicate operand that determines the Active elements."
+ },
+ {
+ "ArchStdEvent": "SVE_MOVPRFX_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations due to MOVPRFX instructions, whether or not they were fused with the prefixed instruction."
+ },
+ {
+ "ArchStdEvent": "SVE_MOVPRFX_Z_SPEC",
+ "BriefDescription": "This event counts architecturally executed operation counted by SVE_MOVPRFX_SPEC where the operation uses zeroing predication."
+ },
+ {
+ "ArchStdEvent": "SVE_MOVPRFX_M_SPEC",
+ "BriefDescription": "This event counts architecturally executed operation counted by SVE_MOVPRFX_SPEC where the operation uses merging predication."
+ },
+ {
+ "ArchStdEvent": "SVE_MOVPRFX_U_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations due to MOVPRFX instructions that were not fused with the prefixed instruction."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_LD_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations that read from memory due to SVE and Advanced SIMD load instructions."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_ST_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations that write to memory due to SVE and Advanced SIMD store instructions."
+ },
+ {
+ "ArchStdEvent": "PRF_SPEC",
+ "BriefDescription": "This event counts architecturally executed prefetch operations due to scalar PRFM, PRFUM and SVE PRF instructions."
+ },
+ {
+ "ArchStdEvent": "BASE_LD_REG_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations that read from memory due to an instruction that loads a general-purpose register."
+ },
+ {
+ "ArchStdEvent": "BASE_ST_REG_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations that write to memory due to an instruction that stores a general-purpose register, excluding the DC ZVA instruction."
+ },
+ {
+ "ArchStdEvent": "SVE_LDR_REG_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations that read from memory due to an SVE LDR instruction."
+ },
+ {
+ "ArchStdEvent": "SVE_STR_REG_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations that write to memory due to an SVE STR instruction."
+ },
+ {
+ "ArchStdEvent": "SVE_LDR_PREG_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations that read from memory due to an SVE LDR (predicate) instruction."
+ },
+ {
+ "ArchStdEvent": "SVE_STR_PREG_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations that write to memory due to an SVE STR (predicate) instruction."
+ },
+ {
+ "ArchStdEvent": "SVE_PRF_CONTIG_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations that prefetch memory due to an SVE predicated single contiguous element prefetch instruction."
+ },
+ {
+ "ArchStdEvent": "SVE_LDNT_CONTIG_SPEC",
+ "BriefDescription": "This event counts architecturally executed operation that reads from memory with a non-temporal hint due to an SVE non-temporal contiguous element load instruction."
+ },
+ {
+ "ArchStdEvent": "SVE_STNT_CONTIG_SPEC",
+ "BriefDescription": "This event counts architecturally executed operation that writes to memory with a non-temporal hint due to an SVE non-temporal contiguous element store instruction."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_LD_MULTI_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations that read from memory due to SVE and Advanced SIMD multiple vector contiguous structure load instructions."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_ST_MULTI_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations that write to memory due to SVE and Advanced SIMD multiple vector contiguous structure store instructions."
+ },
+ {
+ "ArchStdEvent": "SVE_LD_GATHER_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations that read from memory due to SVE non-contiguous gather-load instructions."
+ },
+ {
+ "ArchStdEvent": "SVE_ST_SCATTER_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations that write to memory due to SVE non-contiguous scatter-store instructions."
+ },
+ {
+ "ArchStdEvent": "SVE_PRF_GATHER_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations that prefetch memory due to SVE non-contiguous gather-prefetch instructions."
+ },
+ {
+ "ArchStdEvent": "SVE_LDFF_SPEC",
+ "BriefDescription": "This event counts architecturally executed memory read operations due to SVE First-fault and Non-fault load instructions."
+ },
+ {
+ "ArchStdEvent": "FP_HP_SCALE_OPS_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE half-precision arithmetic operations. See FP_HP_SCALE_OPS_SPEC of ARMv9 Reference Manual for more information. This event counter is incremented by 8, or by 16 for operations that would also be counted by SVE_FP_FMA_SPEC."
+ },
+ {
+ "ArchStdEvent": "FP_HP_FIXED_OPS_SPEC",
+ "BriefDescription": "This event counts architecturally executed v8SIMD&FP half-precision arithmetic operations. See FP_HP_FIXED_OPS_SPEC of ARMv9 Reference Manual for more information. This event counter is incremented by the number of 16-bit elements for Advanced SIMD operations, or by 1 for scalar operations, and by twice those amounts for operations that would also be counted by FP_FMA_SPEC."
+ },
+ {
+ "ArchStdEvent": "FP_SP_SCALE_OPS_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE single-precision arithmetic operations. See FP_SP_SCALE_OPS_SPEC of ARMv9 Reference Manual for more information. This event counter is incremented by 4, or by 8 for operations that would also be counted by SVE_FP_FMA_SPEC."
+ },
+ {
+ "ArchStdEvent": "FP_SP_FIXED_OPS_SPEC",
+ "BriefDescription": "This event counts architecturally executed v8SIMD&FP single-precision arithmetic operations. See FP_SP_FIXED_OPS_SPEC of ARMv9 Reference Manual for more information. This event counter is incremented by the number of 32-bit elements for Advanced SIMD operations, or by 1 for scalar operations, and by twice those amounts for operations that would also be counted by FP_FMA_SPEC."
+ },
+ {
+ "ArchStdEvent": "FP_DP_SCALE_OPS_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE double-precision arithmetic operations. See FP_DP_SCALE_OPS_SPEC of ARMv9 Reference Manual for more information. This event counter is incremented by 2, or by 4 for operations that would also be counted by SVE_FP_FMA_SPEC."
+ },
+ {
+ "ArchStdEvent": "FP_DP_FIXED_OPS_SPEC",
+ "BriefDescription": "This event counts architecturally executed v8SIMD&FP double-precision arithmetic operations. See FP_DP_FIXED_OPS_SPEC of ARMv9 Reference Manual for more information. This event counter is incremented by 2 for Advanced SIMD operations, or by 1 for scalar operations, and by twice those amounts for operations that would also be counted by FP_FMA_SPEC."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT_DOT_SPEC",
+ "BriefDescription": "This event counts architecturally executed microarchitectural Advanced SIMD or SVE integer dot-product operation."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT_MMLA_SPEC",
+ "BriefDescription": "This event counts architecturally executed microarchitectural Advanced SIMD or SVE integer matrix multiply operation."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/tlb.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/tlb.json
new file mode 100644
index 000000000000..edc7cb8696c8
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/tlb.json
@@ -0,0 +1,362 @@
+[
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL",
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L1I TLB. See L1I_TLB_REFILL of ARMv9 Reference Manual for more information."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL",
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L1D TLB. See L1D_TLB_REFILL of ARMv9 Reference Manual for more information."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L1D TLB. See L1D_TLB of ARMv9 Reference Manual for more information."
+ },
+ {
+ "ArchStdEvent": "L1I_TLB",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L1I TLB. See L1I_TLB of ARMv9 Reference Manual for more information."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL",
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L2D TLB. See L2D_TLB_REFILL of ARMv9 Reference Manual for more information."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L2D TLB. See L2D_TLB of ARMv9 Reference Manual for more information."
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK",
+ "BriefDescription": "This event counts data TLB access with at least one translation table walk."
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK",
+ "BriefDescription": "This event counts instruction TLB access with at least one translation table walk."
+ },
+ {
+ "EventCode": "0x0C00",
+ "EventName": "L1I_TLB_4K",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L1I in 4KB page."
+ },
+ {
+ "EventCode": "0x0C01",
+ "EventName": "L1I_TLB_64K",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L1I in 64KB page."
+ },
+ {
+ "EventCode": "0x0C02",
+ "EventName": "L1I_TLB_2M",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L1I in 2MB page."
+ },
+ {
+ "EventCode": "0x0C03",
+ "EventName": "L1I_TLB_32M",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L1I in 32MB page."
+ },
+ {
+ "EventCode": "0x0C04",
+ "EventName": "L1I_TLB_512M",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L1I in 512MB page."
+ },
+ {
+ "EventCode": "0x0C05",
+ "EventName": "L1I_TLB_1G",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L1I in 1GB page."
+ },
+ {
+ "EventCode": "0x0C06",
+ "EventName": "L1I_TLB_16G",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L1I in 16GB page."
+ },
+ {
+ "EventCode": "0x0C08",
+ "EventName": "L1D_TLB_4K",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L1D in 4KB page."
+ },
+ {
+ "EventCode": "0x0C09",
+ "EventName": "L1D_TLB_64K",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L1D in 64KB page."
+ },
+ {
+ "EventCode": "0x0C0A",
+ "EventName": "L1D_TLB_2M",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L1D in 2MB page."
+ },
+ {
+ "EventCode": "0x0C0B",
+ "EventName": "L1D_TLB_32M",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L1D in 32MB page."
+ },
+ {
+ "EventCode": "0x0C0C",
+ "EventName": "L1D_TLB_512M",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L1D in 512MB page."
+ },
+ {
+ "EventCode": "0x0C0D",
+ "EventName": "L1D_TLB_1G",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L1D in 1GB page."
+ },
+ {
+ "EventCode": "0x0C0E",
+ "EventName": "L1D_TLB_16G",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L1D in 16GB page."
+ },
+ {
+ "EventCode": "0x0C10",
+ "EventName": "L1I_TLB_REFILL_4K",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L1I in 4KB page."
+ },
+ {
+ "EventCode": "0x0C11",
+ "EventName": "L1I_TLB_REFILL_64K",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L1I in 64KB page."
+ },
+ {
+ "EventCode": "0x0C12",
+ "EventName": "L1I_TLB_REFILL_2M",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L1I in 2MB page."
+ },
+ {
+ "EventCode": "0x0C13",
+ "EventName": "L1I_TLB_REFILL_32M",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L1I in 32MB page."
+ },
+ {
+ "EventCode": "0x0C14",
+ "EventName": "L1I_TLB_REFILL_512M",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L1I in 512MB page."
+ },
+ {
+ "EventCode": "0x0C15",
+ "EventName": "L1I_TLB_REFILL_1G",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L1I in 1GB page."
+ },
+ {
+ "EventCode": "0x0C16",
+ "EventName": "L1I_TLB_REFILL_16G",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L1I in 16GB page."
+ },
+ {
+ "EventCode": "0x0C18",
+ "EventName": "L1D_TLB_REFILL_4K",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L1D in 4KB page."
+ },
+ {
+ "EventCode": "0x0C19",
+ "EventName": "L1D_TLB_REFILL_64K",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L1D in 64KB page."
+ },
+ {
+ "EventCode": "0x0C1A",
+ "EventName": "L1D_TLB_REFILL_2M",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L1D in 2MB page."
+ },
+ {
+ "EventCode": "0x0C1B",
+ "EventName": "L1D_TLB_REFILL_32M",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L1D in 32MB page."
+ },
+ {
+ "EventCode": "0x0C1C",
+ "EventName": "L1D_TLB_REFILL_512M",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L1D in 512MB page."
+ },
+ {
+ "EventCode": "0x0C1D",
+ "EventName": "L1D_TLB_REFILL_1G",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L1D in 1GB page."
+ },
+ {
+ "EventCode": "0x0C1E",
+ "EventName": "L1D_TLB_REFILL_16G",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L1D in 16GB page."
+ },
+ {
+ "EventCode": "0x0C20",
+ "EventName": "L2I_TLB_4K",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L2I in 4KB page."
+ },
+ {
+ "EventCode": "0x0C21",
+ "EventName": "L2I_TLB_64K",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L2I in 64KB page."
+ },
+ {
+ "EventCode": "0x0C22",
+ "EventName": "L2I_TLB_2M",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L2I in 2MB page."
+ },
+ {
+ "EventCode": "0x0C23",
+ "EventName": "L2I_TLB_32M",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L2I in 32MB page."
+ },
+ {
+ "EventCode": "0x0C24",
+ "EventName": "L2I_TLB_512M",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L2I in 512MB page."
+ },
+ {
+ "EventCode": "0x0C25",
+ "EventName": "L2I_TLB_1G",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L2I in 1GB page."
+ },
+ {
+ "EventCode": "0x0C26",
+ "EventName": "L2I_TLB_16G",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L2I in 16GB page."
+ },
+ {
+ "EventCode": "0x0C28",
+ "EventName": "L2D_TLB_4K",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L2D in 4KB page."
+ },
+ {
+ "EventCode": "0x0C29",
+ "EventName": "L2D_TLB_64K",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L2D in 64KB page."
+ },
+ {
+ "EventCode": "0x0C2A",
+ "EventName": "L2D_TLB_2M",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L2D in 2MB page."
+ },
+ {
+ "EventCode": "0x0C2B",
+ "EventName": "L2D_TLB_32M",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L2D in 32MB page."
+ },
+ {
+ "EventCode": "0x0C2C",
+ "EventName": "L2D_TLB_512M",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L2D in 512MB page."
+ },
+ {
+ "EventCode": "0x0C2D",
+ "EventName": "L2D_TLB_1G",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L2D in 1GB page."
+ },
+ {
+ "EventCode": "0x0C2E",
+ "EventName": "L2D_TLB_16G",
+ "BriefDescription": "This event counts operations that cause a TLB access to the L2D in 16GB page."
+ },
+ {
+ "EventCode": "0x0C30",
+ "EventName": "L2I_TLB_REFILL_4K",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L2Iin 4KB page."
+ },
+ {
+ "EventCode": "0x0C31",
+ "EventName": "L2I_TLB_REFILL_64K",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L2I in 64KB page."
+ },
+ {
+ "EventCode": "0x0C32",
+ "EventName": "L2I_TLB_REFILL_2M",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L2I in 2MB page."
+ },
+ {
+ "EventCode": "0x0C33",
+ "EventName": "L2I_TLB_REFILL_32M",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L2I in 32MB page."
+ },
+ {
+ "EventCode": "0x0C34",
+ "EventName": "L2I_TLB_REFILL_512M",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L2I in 512MB page."
+ },
+ {
+ "EventCode": "0x0C35",
+ "EventName": "L2I_TLB_REFILL_1G",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L2I in 1GB page."
+ },
+ {
+ "EventCode": "0x0C36",
+ "EventName": "L2I_TLB_REFILL_16G",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L2I in 16GB page."
+ },
+ {
+ "EventCode": "0x0C38",
+ "EventName": "L2D_TLB_REFILL_4K",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L2D in 4KB page."
+ },
+ {
+ "EventCode": "0x0C39",
+ "EventName": "L2D_TLB_REFILL_64K",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L2D in 64KB page."
+ },
+ {
+ "EventCode": "0x0C3A",
+ "EventName": "L2D_TLB_REFILL_2M",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L2D in 2MB page."
+ },
+ {
+ "EventCode": "0x0C3B",
+ "EventName": "L2D_TLB_REFILL_32M",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L2D in 32MB page."
+ },
+ {
+ "EventCode": "0x0C3C",
+ "EventName": "L2D_TLB_REFILL_512M",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L2D in 512MB page."
+ },
+ {
+ "EventCode": "0x0C3D",
+ "EventName": "L2D_TLB_REFILL_1G",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L2D in 1GB page."
+ },
+ {
+ "EventCode": "0x0C3E",
+ "EventName": "L2D_TLB_REFILL_16G",
+ "BriefDescription": "This event counts operations that cause a TLB refill to the L2D in 16GB page."
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK_PERCYC",
+ "BriefDescription": "This event counts the number of DTLB_WALK events in progress on each Processor cycle."
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK_PERCYC",
+ "BriefDescription": "This event counts the number of ITLB_WALK events in progress on each Processor cycle."
+ },
+ {
+ "ArchStdEvent": "DTLB_STEP",
+ "BriefDescription": "This event counts translation table walk access made by a refill of the data TLB."
+ },
+ {
+ "ArchStdEvent": "ITLB_STEP",
+ "BriefDescription": "This event counts translation table walk access made by a refill of the instruction TLB."
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK_LARGE",
+ "BriefDescription": "This event counts translation table walk counted by DTLB_WALK where the result of the walk yields a large page size."
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK_LARGE",
+ "BriefDescription": "This event counts translation table walk counted by ITLB_WALK where the result of the walk yields a large page size."
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK_SMALL",
+ "BriefDescription": "This event counts translation table walk counted by DTLB_WALK where the result of the walk yields a small page size."
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK_SMALL",
+ "BriefDescription": "This event counts translation table walk counted by ITLB_WALK where the result of the walk yields a small page size."
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK_BLOCK",
+ "BriefDescription": "This event counts translation table walk counted by DTLB_WALK where the result of the walk yields a Block."
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK_BLOCK",
+ "BriefDescription": "This event counts translation table walk counted by ITLB_WALK where the result of the walk yields a Block."
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK_PAGE",
+ "BriefDescription": "This event counts translation table walk counted by DTLB_WALK where the result of the walk yields a Page."
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK_PAGE",
+ "BriefDescription": "This event counts translation table walk counted by ITLB_WALK where the result of the walk yields a Page."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/trace.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/trace.json
new file mode 100644
index 000000000000..0c6e5054c9b5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/trace.json
@@ -0,0 +1,18 @@
+[
+ {
+ "ArchStdEvent": "TRB_WRAP",
+ "BriefDescription": "This event counts the event generated each time the current write pointer is wrapped to the base pointer."
+ },
+ {
+ "ArchStdEvent": "TRB_TRIG",
+ "BriefDescription": "This event counts the event generated when a Trace Buffer Extension Trigger Event occurs."
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT0",
+ "BriefDescription": "This event counts the event generated each time an event is signaled by the trace unit external event 0."
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT4",
+ "BriefDescription": "This event counts the event generated each time an event is signaled on CTI output trigger 4."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/mapfile.csv b/tools/perf/pmu-events/arch/arm64/mapfile.csv
index f4d1ca4d1493..5c846fe90513 100644
--- a/tools/perf/pmu-events/arch/arm64/mapfile.csv
+++ b/tools/perf/pmu-events/arch/arm64/mapfile.csv
@@ -39,6 +39,7 @@
0x00000000420f5160,v1,cavium/thunderx2,core
0x00000000430f0af0,v1,cavium/thunderx2,core
0x00000000460f0010,v1,fujitsu/a64fx,core
+0x00000000460f0030,v1,fujitsu/monaka,core
0x00000000480fd010,v1,hisilicon/hip08,core
0x00000000500f0000,v1,ampere/emag,core
0x00000000c00fac30,v1,ampere/ampereone,core
diff --git a/tools/perf/pmu-events/arch/arm64/recommended.json b/tools/perf/pmu-events/arch/arm64/recommended.json
index 210afa856091..a3b4941ae90c 100644
--- a/tools/perf/pmu-events/arch/arm64/recommended.json
+++ b/tools/perf/pmu-events/arch/arm64/recommended.json
@@ -318,6 +318,11 @@
"BriefDescription": "Barrier speculatively executed, DMB"
},
{
+ "EventCode": "0x7F",
+ "EventName": "CSDB_SPEC",
+ "BriefDescription": "Barrier Speculatively executed, CSDB."
+ },
+ {
"PublicDescription": "Exception taken, Other synchronous",
"EventCode": "0x81",
"EventName": "EXC_UNDEF",
diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py
index d781a377757a..3e204700b59a 100755
--- a/tools/perf/pmu-events/jevents.py
+++ b/tools/perf/pmu-events/jevents.py
@@ -430,8 +430,11 @@ class JsonEvent:
def to_c_string(self, metric: bool) -> str:
"""Representation of the event as a C struct initializer."""
+ def fix_comment(s: str) -> str:
+ return s.replace('*/', r'\*\/')
+
s = self.build_c_string(metric)
- return f'{{ { _bcs.offsets[s] } }}, /* {s} */\n'
+ return f'{{ { _bcs.offsets[s] } }}, /* {fix_comment(s)} */\n'
@lru_cache(maxsize=None)
@@ -461,12 +464,16 @@ def preprocess_arch_std_files(archpath: str) -> None:
"""Read in all architecture standard events."""
global _arch_std_events
for item in os.scandir(archpath):
- if item.is_file() and item.name.endswith('.json'):
+ if not item.is_file() or not item.name.endswith('.json'):
+ continue
+ try:
for event in read_json_events(item.path, topic=''):
if event.name:
_arch_std_events[event.name.lower()] = event
if event.metric_name:
_arch_std_events[event.metric_name.lower()] = event
+ except Exception as e:
+ raise RuntimeError(f'Failure processing \'{item.name}\' in \'{archpath}\'') from e
def add_events_table_entries(item: os.DirEntry, topic: str) -> None:
@@ -1252,7 +1259,10 @@ def main() -> None:
item_path = '/'.join(parents) + ('/' if len(parents) > 0 else '') + item.name
if 'test' not in item_path and 'common' not in item_path and item_path not in _args.model.split(','):
continue
- action(parents, item)
+ try:
+ action(parents, item)
+ except Exception as e:
+ raise RuntimeError(f'Action failure for \'{item.name}\' in {parents}') from e
if item.is_dir():
ftw(item.path, parents + [item.name], action)
diff --git a/tools/perf/scripts/Makefile.syscalls b/tools/perf/scripts/Makefile.syscalls
new file mode 100644
index 000000000000..8bf55333262e
--- /dev/null
+++ b/tools/perf/scripts/Makefile.syscalls
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: GPL-2.0
+# This Makefile generates headers in
+# tools/perf/arch/$(SRCARCH)/include/generated/asm from the architecture's
+# syscall table. This will either be from the generic syscall table, or from a
+# table that is specific to that architecture.
+
+PHONY := all
+all:
+
+obj := $(OUTPUT)arch/$(SRCARCH)/include/generated/asm
+
+syscall_abis_32 := common,32
+syscall_abis_64 := common,64
+syscalltbl := $(srctree)/tools/scripts/syscall.tbl
+
+# let architectures override $(syscall_abis_%) and $(syscalltbl)
+-include $(srctree)/tools/perf/arch/$(SRCARCH)/entry/syscalls/Makefile.syscalls
+include $(srctree)/tools/build/Build.include
+-include $(srctree)/tools/perf/arch/$(SRCARCH)/entry/syscalls/Kbuild
+
+systbl := $(srctree)/tools/perf/scripts/syscalltbl.sh
+
+syscall-y := $(addprefix $(obj)/, $(syscall-y))
+
+# Remove stale wrappers when the corresponding files are removed from generic-y
+old-headers := $(wildcard $(obj)/*.h)
+unwanted := $(filter-out $(syscall-y),$(old-headers))
+
+quiet_cmd_remove = REMOVE $(unwanted)
+ cmd_remove = rm -f $(unwanted)
+
+quiet_cmd_systbl = SYSTBL $@
+ cmd_systbl = $(CONFIG_SHELL) $(systbl) \
+ $(if $(systbl-args-$*),$(systbl-args-$*),$(systbl-args)) \
+ --abis $(subst $(space),$(comma),$(strip $(syscall_abis_$*))) \
+ $< $@
+
+all: $(syscall-y)
+ $(if $(unwanted),$(call cmd,remove))
+ @:
+
+$(obj)/syscalls_%.h: $(syscalltbl) $(systbl) FORCE
+ $(call if_changed,systbl)
+
+targets := $(syscall-y)
+
+# Create output directory. Skip it if at least one old header exists
+# since we know the output directory already exists.
+ifeq ($(old-headers),)
+$(shell mkdir -p $(obj))
+endif
+
+PHONY += FORCE
+
+FORCE:
+
+existing-targets := $(wildcard $(sort $(targets)))
+
+-include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd)
+
+.PHONY: $(PHONY)
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Context.c b/tools/perf/scripts/python/Perf-Trace-Util/Context.c
index 01f54d6724a5..60dcfe56d4d9 100644
--- a/tools/perf/scripts/python/Perf-Trace-Util/Context.c
+++ b/tools/perf/scripts/python/Perf-Trace-Util/Context.c
@@ -24,16 +24,6 @@
#include "../../../util/srcline.h"
#include "../../../util/srccode.h"
-#if PY_MAJOR_VERSION < 3
-#define _PyCapsule_GetPointer(arg1, arg2) \
- PyCObject_AsVoidPtr(arg1)
-#define _PyBytes_FromStringAndSize(arg1, arg2) \
- PyString_FromStringAndSize((arg1), (arg2))
-#define _PyUnicode_AsUTF8(arg) \
- PyString_AsString(arg)
-
-PyMODINIT_FUNC initperf_trace_context(void);
-#else
#define _PyCapsule_GetPointer(arg1, arg2) \
PyCapsule_GetPointer((arg1), (arg2))
#define _PyBytes_FromStringAndSize(arg1, arg2) \
@@ -42,7 +32,6 @@ PyMODINIT_FUNC initperf_trace_context(void);
PyUnicode_AsUTF8(arg)
PyMODINIT_FUNC PyInit_perf_trace_context(void);
-#endif
static struct scripting_context *get_args(PyObject *args, const char *name, PyObject **arg2)
{
@@ -104,7 +93,7 @@ static PyObject *perf_sample_insn(PyObject *obj, PyObject *args)
if (c->sample->ip && !c->sample->insn_len && thread__maps(c->al->thread)) {
struct machine *machine = maps__machine(thread__maps(c->al->thread));
- script_fetch_insn(c->sample, c->al->thread, machine);
+ script_fetch_insn(c->sample, c->al->thread, machine, /*native_arch=*/true);
}
if (!c->sample->insn_len)
Py_RETURN_NONE; /* N.B. This is a return statement */
@@ -213,12 +202,6 @@ static PyMethodDef ContextMethods[] = {
{ NULL, NULL, 0, NULL}
};
-#if PY_MAJOR_VERSION < 3
-PyMODINIT_FUNC initperf_trace_context(void)
-{
- (void) Py_InitModule("perf_trace_context", ContextMethods);
-}
-#else
PyMODINIT_FUNC PyInit_perf_trace_context(void)
{
static struct PyModuleDef moduledef = {
@@ -240,4 +223,3 @@ PyMODINIT_FUNC PyInit_perf_trace_context(void)
return mod;
}
-#endif
diff --git a/tools/perf/scripts/python/mem-phys-addr.py b/tools/perf/scripts/python/mem-phys-addr.py
index 1f332e72b9b0..5e237a5a5f1b 100644
--- a/tools/perf/scripts/python/mem-phys-addr.py
+++ b/tools/perf/scripts/python/mem-phys-addr.py
@@ -3,98 +3,125 @@
#
# Copyright (c) 2018, Intel Corporation.
-from __future__ import division
-from __future__ import print_function
-
import os
import sys
-import struct
import re
import bisect
import collections
+from dataclasses import dataclass
+from typing import (Dict, Optional)
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
- '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+@dataclass(frozen=True)
+class IomemEntry:
+ """Read from a line in /proc/iomem"""
+ begin: int
+ end: int
+ indent: int
+ label: str
-#physical address ranges for System RAM
-system_ram = []
-#physical address ranges for Persistent Memory
-pmem = []
-#file object for proc iomem
-f = None
-#Count for each type of memory
-load_mem_type_cnt = collections.Counter()
-#perf event name
-event_name = None
+# Physical memory layout from /proc/iomem. Key is the indent and then
+# a list of ranges.
+iomem: Dict[int, list[IomemEntry]] = collections.defaultdict(list)
+# Child nodes from the iomem parent.
+children: Dict[IomemEntry, set[IomemEntry]] = collections.defaultdict(set)
+# Maximum indent seen before an entry in the iomem file.
+max_indent: int = 0
+# Count for each range of memory.
+load_mem_type_cnt: Dict[IomemEntry, int] = collections.Counter()
+# Perf event name set from the first sample in the data.
+event_name: Optional[str] = None
def parse_iomem():
- global f
- f = open('/proc/iomem', 'r')
- for i, j in enumerate(f):
- m = re.split('-|:',j,2)
- if m[2].strip() == 'System RAM':
- system_ram.append(int(m[0], 16))
- system_ram.append(int(m[1], 16))
- if m[2].strip() == 'Persistent Memory':
- pmem.append(int(m[0], 16))
- pmem.append(int(m[1], 16))
+ """Populate iomem from /proc/iomem file"""
+ global iomem
+ global max_indent
+ global children
+ with open('/proc/iomem', 'r', encoding='ascii') as f:
+ for line in f:
+ indent = 0
+ while line[indent] == ' ':
+ indent += 1
+ if indent > max_indent:
+ max_indent = indent
+ m = re.split('-|:', line, 2)
+ begin = int(m[0], 16)
+ end = int(m[1], 16)
+ label = m[2].strip()
+ entry = IomemEntry(begin, end, indent, label)
+ # Before adding entry, search for a parent node using its begin.
+ if indent > 0:
+ parent = find_memory_type(begin)
+ assert parent, f"Given indent expected a parent for {label}"
+ children[parent].add(entry)
+ iomem[indent].append(entry)
-def print_memory_type():
- print("Event: %s" % (event_name))
- print("%-40s %10s %10s\n" % ("Memory type", "count", "percentage"), end='')
- print("%-40s %10s %10s\n" % ("----------------------------------------",
- "-----------", "-----------"),
- end='');
- total = sum(load_mem_type_cnt.values())
- for mem_type, count in sorted(load_mem_type_cnt.most_common(), \
- key = lambda kv: (kv[1], kv[0]), reverse = True):
- print("%-40s %10d %10.1f%%\n" %
- (mem_type, count, 100 * count / total),
- end='')
+def find_memory_type(phys_addr) -> Optional[IomemEntry]:
+ """Search iomem for the range containing phys_addr with the maximum indent"""
+ for i in range(max_indent, -1, -1):
+ if i not in iomem:
+ continue
+ position = bisect.bisect_right(iomem[i], phys_addr,
+ key=lambda entry: entry.begin)
+ if position is None:
+ continue
+ iomem_entry = iomem[i][position-1]
+ if iomem_entry.begin <= phys_addr <= iomem_entry.end:
+ return iomem_entry
+ print(f"Didn't find {phys_addr}")
+ return None
-def trace_begin():
- parse_iomem()
+def print_memory_type():
+ print(f"Event: {event_name}")
+ print(f"{'Memory type':<40} {'count':>10} {'percentage':>10}")
+ print(f"{'-' * 40:<40} {'-' * 10:>10} {'-' * 10:>10}")
+ total = sum(load_mem_type_cnt.values())
+ # Add count from children into the parent.
+ for i in range(max_indent, -1, -1):
+ if i not in iomem:
+ continue
+ for entry in iomem[i]:
+ global children
+ for child in children[entry]:
+ if load_mem_type_cnt[child] > 0:
+ load_mem_type_cnt[entry] += load_mem_type_cnt[child]
-def trace_end():
- print_memory_type()
- f.close()
+ def print_entries(entries):
+ """Print counts from parents down to their children"""
+ global children
+ for entry in sorted(entries,
+ key = lambda entry: load_mem_type_cnt[entry],
+ reverse = True):
+ count = load_mem_type_cnt[entry]
+ if count > 0:
+ mem_type = ' ' * entry.indent + f"{entry.begin:x}-{entry.end:x} : {entry.label}"
+ percent = 100 * count / total
+ print(f"{mem_type:<40} {count:>10} {percent:>10.1f}")
+ print_entries(children[entry])
-def is_system_ram(phys_addr):
- #/proc/iomem is sorted
- position = bisect.bisect(system_ram, phys_addr)
- if position % 2 == 0:
- return False
- return True
+ print_entries(iomem[0])
-def is_persistent_mem(phys_addr):
- position = bisect.bisect(pmem, phys_addr)
- if position % 2 == 0:
- return False
- return True
+def trace_begin():
+ parse_iomem()
-def find_memory_type(phys_addr):
- if phys_addr == 0:
- return "N/A"
- if is_system_ram(phys_addr):
- return "System RAM"
+def trace_end():
+ print_memory_type()
- if is_persistent_mem(phys_addr):
- return "Persistent Memory"
+def process_event(param_dict):
+ if "sample" not in param_dict:
+ return
- #slow path, search all
- f.seek(0, 0)
- for j in f:
- m = re.split('-|:',j,2)
- if int(m[0], 16) <= phys_addr <= int(m[1], 16):
- return m[2]
- return "N/A"
+ sample = param_dict["sample"]
+ if "phys_addr" not in sample:
+ return
-def process_event(param_dict):
- name = param_dict["ev_name"]
- sample = param_dict["sample"]
- phys_addr = sample["phys_addr"]
+ phys_addr = sample["phys_addr"]
+ entry = find_memory_type(phys_addr)
+ if entry:
+ load_mem_type_cnt[entry] += 1
- global event_name
- if event_name == None:
- event_name = name
- load_mem_type_cnt[find_memory_type(phys_addr)] += 1
+ global event_name
+ if event_name is None:
+ event_name = param_dict["ev_name"]
diff --git a/tools/perf/scripts/syscalltbl.sh b/tools/perf/scripts/syscalltbl.sh
new file mode 100755
index 000000000000..1ce0d5aa8b50
--- /dev/null
+++ b/tools/perf/scripts/syscalltbl.sh
@@ -0,0 +1,86 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Generate a syscall table header.
+#
+# Each line of the syscall table should have the following format:
+#
+# NR ABI NAME [NATIVE] [COMPAT]
+#
+# NR syscall number
+# ABI ABI name
+# NAME syscall name
+# NATIVE native entry point (optional)
+# COMPAT compat entry point (optional)
+
+set -e
+
+usage() {
+ echo >&2 "usage: $0 [--abis ABIS] INFILE OUTFILE" >&2
+ echo >&2
+ echo >&2 " INFILE input syscall table"
+ echo >&2 " OUTFILE output header file"
+ echo >&2
+ echo >&2 "options:"
+ echo >&2 " --abis ABIS ABI(s) to handle (By default, all lines are handled)"
+ exit 1
+}
+
+# default unless specified by options
+abis=
+
+while [ $# -gt 0 ]
+do
+ case $1 in
+ --abis)
+ abis=$(echo "($2)" | tr ',' '|')
+ shift 2;;
+ -*)
+ echo "$1: unknown option" >&2
+ usage;;
+ *)
+ break;;
+ esac
+done
+
+if [ $# -ne 2 ]; then
+ usage
+fi
+
+infile="$1"
+outfile="$2"
+
+nxt=0
+
+syscall_macro() {
+ nr="$1"
+ name="$2"
+
+ echo " [$nr] = \"$name\","
+}
+
+emit() {
+ nr="$1"
+ entry="$2"
+
+ syscall_macro "$nr" "$entry"
+}
+
+echo "static const char *const syscalltbl[] = {" > $outfile
+
+sorted_table=$(mktemp /tmp/syscalltbl.XXXXXX)
+grep -E "^[0-9]+[[:space:]]+$abis" "$infile" | sort -n > $sorted_table
+
+max_nr=0
+# the params are: nr abi name entry compat
+# use _ for intentionally unused variables according to SC2034
+while read nr _ name _ _; do
+ emit "$nr" "$name" >> $outfile
+ max_nr=$nr
+done < $sorted_table
+
+rm -f $sorted_table
+
+echo "};" >> $outfile
+
+echo "#define SYSCALLTBL_MAX_ID ${max_nr}" >> $outfile
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index ec4e1f034742..4bf8d3f5eae7 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -5,10 +5,10 @@ perf-test-y += tests-scripts.o
perf-test-y += parse-events.o
perf-test-y += dso-data.o
perf-test-y += vmlinux-kallsyms.o
-perf-test-$(CONFIG_LIBTRACEEVENT) += openat-syscall.o
-perf-test-$(CONFIG_LIBTRACEEVENT) += openat-syscall-all-cpus.o
+perf-test-y += openat-syscall.o
+perf-test-y += openat-syscall-all-cpus.o
perf-test-$(CONFIG_LIBTRACEEVENT) += openat-syscall-tp-fields.o
-perf-test-$(CONFIG_LIBTRACEEVENT) += mmap-basic.o
+perf-test-y += mmap-basic.o
perf-test-y += perf-record.o
perf-test-y += evsel-roundtrip-name.o
perf-test-$(CONFIG_LIBTRACEEVENT) += evsel-tp-sched.o
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 4751dd3c6f67..14d30a5053be 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -42,6 +42,8 @@
static bool dont_fork;
/* Fork the tests in parallel and wait for their completion. */
static bool sequential;
+/* Number of times each test is run. */
+static unsigned int runs_per_test = 1;
const char *dso_to_test;
const char *test_objdump_path = "objdump";
@@ -60,11 +62,9 @@ static struct test_suite *arch_tests[] = {
static struct test_suite *generic_tests[] = {
&suite__vmlinux_matches_kallsyms,
-#ifdef HAVE_LIBTRACEEVENT
&suite__openat_syscall_event,
&suite__openat_syscall_event_on_all_cpus,
&suite__basic_mmap,
-#endif
&suite__mem,
&suite__parse_events,
&suite__expr,
@@ -151,58 +151,51 @@ static struct test_workload *workloads[] = {
#define workloads__for_each(workload) \
for (unsigned i = 0; i < ARRAY_SIZE(workloads) && ({ workload = workloads[i]; 1; }); i++)
-static int num_subtests(const struct test_suite *t)
+#define test_suite__for_each_test_case(suite, idx) \
+ for (idx = 0; (suite)->test_cases && (suite)->test_cases[idx].name != NULL; idx++)
+
+static int test_suite__num_test_cases(const struct test_suite *t)
{
int num;
- if (!t->test_cases)
- return 0;
-
- num = 0;
- while (t->test_cases[num].name)
- num++;
+ test_suite__for_each_test_case(t, num);
return num;
}
-static bool has_subtests(const struct test_suite *t)
-{
- return num_subtests(t) > 1;
-}
-
-static const char *skip_reason(const struct test_suite *t, int subtest)
+static const char *skip_reason(const struct test_suite *t, int test_case)
{
if (!t->test_cases)
return NULL;
- return t->test_cases[subtest >= 0 ? subtest : 0].skip_reason;
+ return t->test_cases[test_case >= 0 ? test_case : 0].skip_reason;
}
-static const char *test_description(const struct test_suite *t, int subtest)
+static const char *test_description(const struct test_suite *t, int test_case)
{
- if (t->test_cases && subtest >= 0)
- return t->test_cases[subtest].desc;
+ if (t->test_cases && test_case >= 0)
+ return t->test_cases[test_case].desc;
return t->desc;
}
-static test_fnptr test_function(const struct test_suite *t, int subtest)
+static test_fnptr test_function(const struct test_suite *t, int test_case)
{
- if (subtest <= 0)
+ if (test_case <= 0)
return t->test_cases[0].run_case;
- return t->test_cases[subtest].run_case;
+ return t->test_cases[test_case].run_case;
}
-static bool test_exclusive(const struct test_suite *t, int subtest)
+static bool test_exclusive(const struct test_suite *t, int test_case)
{
- if (subtest <= 0)
+ if (test_case <= 0)
return t->test_cases[0].exclusive;
- return t->test_cases[subtest].exclusive;
+ return t->test_cases[test_case].exclusive;
}
-static bool perf_test__matches(const char *desc, int curr, int argc, const char *argv[])
+static bool perf_test__matches(const char *desc, int suite_num, int argc, const char *argv[])
{
int i;
@@ -214,7 +207,7 @@ static bool perf_test__matches(const char *desc, int curr, int argc, const char
long nr = strtoul(argv[i], &end, 10);
if (*end == '\0') {
- if (nr == curr + 1)
+ if (nr == suite_num + 1)
return true;
continue;
}
@@ -229,8 +222,8 @@ static bool perf_test__matches(const char *desc, int curr, int argc, const char
struct child_test {
struct child_process process;
struct test_suite *test;
- int test_num;
- int subtest;
+ int suite_num;
+ int test_case_num;
};
static jmp_buf run_test_jmp_buf;
@@ -260,7 +253,7 @@ static int run_test_child(struct child_process *process)
pr_debug("--- start ---\n");
pr_debug("test child forked, pid %d\n", getpid());
- err = test_function(child->test, child->subtest)(child->test, child->subtest);
+ err = test_function(child->test, child->test_case_num)(child->test, child->test_case_num);
pr_debug("---- end(%d) ----\n", err);
err_out:
@@ -272,15 +265,16 @@ err_out:
#define TEST_RUNNING -3
-static int print_test_result(struct test_suite *t, int i, int subtest, int result, int width,
- int running)
+static int print_test_result(struct test_suite *t, int curr_suite, int curr_test_case,
+ int result, int width, int running)
{
- if (has_subtests(t)) {
+ if (test_suite__num_test_cases(t) > 1) {
int subw = width > 2 ? width - 2 : width;
- pr_info("%3d.%1d: %-*s:", i + 1, subtest + 1, subw, test_description(t, subtest));
+ pr_info("%3d.%1d: %-*s:", curr_suite + 1, curr_test_case + 1, subw,
+ test_description(t, curr_test_case));
} else
- pr_info("%3d: %-*s:", i + 1, width, test_description(t, subtest));
+ pr_info("%3d: %-*s:", curr_suite + 1, width, test_description(t, curr_test_case));
switch (result) {
case TEST_RUNNING:
@@ -290,7 +284,7 @@ static int print_test_result(struct test_suite *t, int i, int subtest, int resul
pr_info(" Ok\n");
break;
case TEST_SKIP: {
- const char *reason = skip_reason(t, subtest);
+ const char *reason = skip_reason(t, curr_test_case);
if (reason)
color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (%s)\n", reason);
@@ -312,7 +306,7 @@ static void finish_test(struct child_test **child_tests, int running_test, int c
{
struct child_test *child_test = child_tests[running_test];
struct test_suite *t;
- int i, subi, err;
+ int curr_suite, curr_test_case, err;
bool err_done = false;
struct strbuf err_output = STRBUF_INIT;
int last_running = -1;
@@ -323,15 +317,15 @@ static void finish_test(struct child_test **child_tests, int running_test, int c
return;
}
t = child_test->test;
- i = child_test->test_num;
- subi = child_test->subtest;
+ curr_suite = child_test->suite_num;
+ curr_test_case = child_test->test_case_num;
err = child_test->process.err;
/*
* For test suites with subtests, display the suite name ahead of the
* sub test names.
*/
- if (has_subtests(t) && subi == 0)
- pr_info("%3d: %-*s:\n", i + 1, width, test_description(t, -1));
+ if (test_suite__num_test_cases(t) > 1 && curr_test_case == 0)
+ pr_info("%3d: %-*s:\n", curr_suite + 1, width, test_description(t, -1));
/*
* Busy loop reading from the child's stdout/stderr that are set to be
@@ -340,10 +334,11 @@ static void finish_test(struct child_test **child_tests, int running_test, int c
if (err > 0)
fcntl(err, F_SETFL, O_NONBLOCK);
if (verbose > 1) {
- if (has_subtests(t))
- pr_info("%3d.%1d: %s:\n", i + 1, subi + 1, test_description(t, subi));
+ if (test_suite__num_test_cases(t) > 1)
+ pr_info("%3d.%1d: %s:\n", curr_suite + 1, curr_test_case + 1,
+ test_description(t, curr_test_case));
else
- pr_info("%3d: %s:\n", i + 1, test_description(t, -1));
+ pr_info("%3d: %s:\n", curr_suite + 1, test_description(t, -1));
}
while (!err_done) {
struct pollfd pfds[1] = {
@@ -368,7 +363,8 @@ static void finish_test(struct child_test **child_tests, int running_test, int c
*/
fprintf(debug_file(), PERF_COLOR_DELETE_LINE);
}
- print_test_result(t, i, subi, TEST_RUNNING, width, running);
+ print_test_result(t, curr_suite, curr_test_case, TEST_RUNNING,
+ width, running);
last_running = running;
}
}
@@ -406,14 +402,14 @@ static void finish_test(struct child_test **child_tests, int running_test, int c
fprintf(stderr, "%s", err_output.buf);
strbuf_release(&err_output);
- print_test_result(t, i, subi, ret, width, /*running=*/0);
+ print_test_result(t, curr_suite, curr_test_case, ret, width, /*running=*/0);
if (err > 0)
close(err);
zfree(&child_tests[running_test]);
}
-static int start_test(struct test_suite *test, int i, int subi, struct child_test **child,
- int width, int pass)
+static int start_test(struct test_suite *test, int curr_suite, int curr_test_case,
+ struct child_test **child, int width, int pass)
{
int err;
@@ -421,17 +417,18 @@ static int start_test(struct test_suite *test, int i, int subi, struct child_tes
if (dont_fork) {
if (pass == 1) {
pr_debug("--- start ---\n");
- err = test_function(test, subi)(test, subi);
+ err = test_function(test, curr_test_case)(test, curr_test_case);
pr_debug("---- end ----\n");
- print_test_result(test, i, subi, err, width, /*running=*/0);
+ print_test_result(test, curr_suite, curr_test_case, err, width,
+ /*running=*/0);
}
return 0;
}
- if (pass == 1 && !sequential && test_exclusive(test, subi)) {
+ if (pass == 1 && !sequential && test_exclusive(test, curr_test_case)) {
/* When parallel, skip exclusive tests on the first pass. */
return 0;
}
- if (pass != 1 && (sequential || !test_exclusive(test, subi))) {
+ if (pass != 1 && (sequential || !test_exclusive(test, curr_test_case))) {
/* Sequential and non-exclusive tests were run on the first pass. */
return 0;
}
@@ -440,8 +437,8 @@ static int start_test(struct test_suite *test, int i, int subi, struct child_tes
return -ENOMEM;
(*child)->test = test;
- (*child)->test_num = i;
- (*child)->subtest = subi;
+ (*child)->suite_num = curr_suite;
+ (*child)->test_case_num = curr_test_case;
(*child)->process.pid = -1;
(*child)->process.no_stdin = 1;
if (verbose <= 0) {
@@ -481,20 +478,16 @@ static int __cmd_test(struct test_suite **suites, int argc, const char *argv[],
int err = 0;
for (struct test_suite **t = suites; *t; t++) {
- int len = strlen(test_description(*t, -1));
+ int i, len = strlen(test_description(*t, -1));
if (width < len)
width = len;
- if (has_subtests(*t)) {
- for (int subi = 0, subn = num_subtests(*t); subi < subn; subi++) {
- len = strlen(test_description(*t, subi));
- if (width < len)
- width = len;
- num_tests++;
- }
- } else {
- num_tests++;
+ test_suite__for_each_test_case(*t, i) {
+ len = strlen(test_description(*t, i));
+ if (width < len)
+ width = len;
+ num_tests += runs_per_test;
}
}
child_tests = calloc(num_tests, sizeof(*child_tests));
@@ -512,7 +505,7 @@ static int __cmd_test(struct test_suite **suites, int argc, const char *argv[],
continue;
pr_debug3("Killing %d pid %d\n",
- child_test->test_num + 1,
+ child_test->suite_num + 1,
child_test->process.pid);
kill(child_test->process.pid, err);
}
@@ -528,50 +521,48 @@ static int __cmd_test(struct test_suite **suites, int argc, const char *argv[],
*/
for (int pass = 1; pass <= 2; pass++) {
int child_test_num = 0;
- int i = 0;
+ int curr_suite = 0;
- for (struct test_suite **t = suites; *t; t++) {
- int curr = i++;
+ for (struct test_suite **t = suites; *t; t++, curr_suite++) {
+ int curr_test_case;
- if (!perf_test__matches(test_description(*t, -1), curr, argc, argv)) {
+ if (!perf_test__matches(test_description(*t, -1), curr_suite, argc, argv)) {
/*
* Test suite shouldn't be run based on
- * description. See if subtest should.
+ * description. See if any test case should.
*/
bool skip = true;
- for (int subi = 0, subn = num_subtests(*t); subi < subn; subi++) {
- if (perf_test__matches(test_description(*t, subi),
- curr, argc, argv))
+ test_suite__for_each_test_case(*t, curr_test_case) {
+ if (perf_test__matches(test_description(*t, curr_test_case),
+ curr_suite, argc, argv)) {
skip = false;
+ break;
+ }
}
-
if (skip)
continue;
}
- if (intlist__find(skiplist, i)) {
- pr_info("%3d: %-*s:", curr + 1, width, test_description(*t, -1));
+ if (intlist__find(skiplist, curr_suite + 1)) {
+ pr_info("%3d: %-*s:", curr_suite + 1, width,
+ test_description(*t, -1));
color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
continue;
}
- if (!has_subtests(*t)) {
- err = start_test(*t, curr, -1, &child_tests[child_test_num++],
- width, pass);
- if (err)
- goto err_out;
- continue;
- }
- for (int subi = 0, subn = num_subtests(*t); subi < subn; subi++) {
- if (!perf_test__matches(test_description(*t, subi),
- curr, argc, argv))
- continue;
-
- err = start_test(*t, curr, subi, &child_tests[child_test_num++],
- width, pass);
- if (err)
- goto err_out;
+ for (unsigned int run = 0; run < runs_per_test; run++) {
+ test_suite__for_each_test_case(*t, curr_test_case) {
+ if (!perf_test__matches(test_description(*t, curr_test_case),
+ curr_suite, argc, argv))
+ continue;
+
+ err = start_test(*t, curr_suite, curr_test_case,
+ &child_tests[child_test_num++],
+ width, pass);
+ if (err)
+ goto err_out;
+ }
}
}
if (!sequential) {
@@ -592,25 +583,24 @@ err_out:
return err;
}
-static int perf_test__list(struct test_suite **suites, int argc, const char **argv)
+static int perf_test__list(FILE *fp, struct test_suite **suites, int argc, const char **argv)
{
- int i = 0;
+ int curr_suite = 0;
- for (struct test_suite **t = suites; *t; t++) {
- int curr = i++;
+ for (struct test_suite **t = suites; *t; t++, curr_suite++) {
+ int curr_test_case;
- if (!perf_test__matches(test_description(*t, -1), curr, argc, argv))
+ if (!perf_test__matches(test_description(*t, -1), curr_suite, argc, argv))
continue;
- pr_info("%3d: %s\n", i, test_description(*t, -1));
+ fprintf(fp, "%3d: %s\n", curr_suite + 1, test_description(*t, -1));
- if (has_subtests(*t)) {
- int subn = num_subtests(*t);
- int subi;
+ if (test_suite__num_test_cases(*t) <= 1)
+ continue;
- for (subi = 0; subi < subn; subi++)
- pr_info("%3d:%1d: %s\n", i, subi + 1,
- test_description(*t, subi));
+ test_suite__for_each_test_case(*t, curr_test_case) {
+ fprintf(fp, "%3d.%1d: %s\n", curr_suite + 1, curr_test_case + 1,
+ test_description(*t, curr_test_case));
}
}
return 0;
@@ -667,27 +657,24 @@ static struct test_suite **build_suites(void)
if (suites[2] == NULL)
suites[2] = create_script_test_suites();
-#define for_each_test(t) \
+#define for_each_suite(suite) \
for (size_t i = 0, j = 0; i < ARRAY_SIZE(suites); i++, j = 0) \
- while ((t = suites[i][j++]) != NULL)
+ while ((suite = suites[i][j++]) != NULL)
- for_each_test(t)
+ for_each_suite(t)
num_suites++;
result = calloc(num_suites + 1, sizeof(struct test_suite *));
for (int pass = 1; pass <= 2; pass++) {
- for_each_test(t) {
+ for_each_suite(t) {
bool exclusive = false;
+ int curr_test_case;
- if (!has_subtests(t)) {
- exclusive = test_exclusive(t, -1);
- } else {
- for (int subi = 0, subn = num_subtests(t); subi < subn; subi++) {
- if (test_exclusive(t, subi)) {
- exclusive = true;
- break;
- }
+ test_suite__for_each_test_case(t, curr_test_case) {
+ if (test_exclusive(t, curr_test_case)) {
+ exclusive = true;
+ break;
}
}
if ((!exclusive && pass == 1) || (exclusive && pass == 2))
@@ -695,7 +682,7 @@ static struct test_suite **build_suites(void)
}
}
return result;
-#undef for_each_test
+#undef for_each_suite
}
int cmd_test(int argc, const char **argv)
@@ -715,6 +702,8 @@ int cmd_test(int argc, const char **argv)
"Do not fork for testcase"),
OPT_BOOLEAN('S', "sequential", &sequential,
"Run the tests one after another rather than in parallel"),
+ OPT_UINTEGER('r', "runs-per-test", &runs_per_test,
+ "Run each test the given number of times, default 1"),
OPT_STRING('w', "workload", &workload, "work", "workload to run for testing, use '--list-workloads' to list the available ones."),
OPT_BOOLEAN(0, "list-workloads", &list_workloads, "List the available builtin workloads to use with -w/--workload"),
OPT_STRING(0, "dso", &dso_to_test, "dso", "dso to test"),
@@ -738,7 +727,7 @@ int cmd_test(int argc, const char **argv)
argc = parse_options_subcommand(argc, argv, test_options, test_subcommands, test_usage, 0);
if (argc >= 1 && !strcmp(argv[0], "list")) {
suites = build_suites();
- ret = perf_test__list(suites, argc - 1, argv + 1);
+ ret = perf_test__list(stdout, suites, argc - 1, argv + 1);
free(suites);
return ret;
}
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 27c82cfb7e7d..b1abb34d7818 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
+#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <inttypes.h>
@@ -8,6 +9,7 @@
#include <stdio.h>
#include <string.h>
#include <sys/param.h>
+#include <sys/utsname.h>
#include <perf/cpumap.h>
#include <perf/evlist.h>
#include <perf/mmap.h>
@@ -176,16 +178,104 @@ static int read_objdump_output(FILE *f, void *buf, size_t *len, u64 start_addr)
return err;
}
+/*
+ * Only gets GNU objdump version. Returns 0 for llvm-objdump.
+ */
+static int objdump_version(void)
+{
+ size_t line_len;
+ char cmd[PATH_MAX * 2];
+ char *line = NULL;
+ const char *fmt;
+ FILE *f;
+ int ret;
+
+ int version_tmp, version_num = 0;
+ char *version = 0, *token;
+
+ fmt = "%s --version";
+ ret = snprintf(cmd, sizeof(cmd), fmt, test_objdump_path);
+ if (ret <= 0 || (size_t)ret >= sizeof(cmd))
+ return -1;
+ /* Ignore objdump errors */
+ strcat(cmd, " 2>/dev/null");
+ f = popen(cmd, "r");
+ if (!f) {
+ pr_debug("popen failed\n");
+ return -1;
+ }
+ /* Get first line of objdump --version output */
+ ret = getline(&line, &line_len, f);
+ pclose(f);
+ if (ret < 0) {
+ pr_debug("getline failed\n");
+ return -1;
+ }
+
+ token = strsep(&line, " ");
+ if (token != NULL && !strcmp(token, "GNU")) {
+ // version is last part of first line of objdump --version output.
+ while ((token = strsep(&line, " ")))
+ version = token;
+
+ // Convert version into a format we can compare with
+ token = strsep(&version, ".");
+ version_num = atoi(token);
+ if (version_num)
+ version_num *= 10000;
+
+ token = strsep(&version, ".");
+ version_tmp = atoi(token);
+ if (token)
+ version_num += version_tmp * 100;
+
+ token = strsep(&version, ".");
+ version_tmp = atoi(token);
+ if (token)
+ version_num += version_tmp;
+ }
+
+ return version_num;
+}
+
static int read_via_objdump(const char *filename, u64 addr, void *buf,
size_t len)
{
+ u64 stop_address = addr + len;
+ struct utsname uname_buf;
char cmd[PATH_MAX * 2];
const char *fmt;
FILE *f;
int ret;
+ ret = uname(&uname_buf);
+ if (ret) {
+ pr_debug("uname failed\n");
+ return -1;
+ }
+
+ if (!strncmp(uname_buf.machine, "riscv", 5)) {
+ int version = objdump_version();
+
+ /* Default to this workaround if version parsing fails */
+ if (version < 0 || version > 24100) {
+ /*
+ * Starting at riscv objdump version 2.41, dumping in
+ * the middle of an instruction is not supported. riscv
+ * instructions are aligned along 2-byte intervals and
+ * can be either 2-bytes or 4-bytes. This makes it
+ * possible that the stop-address lands in the middle of
+ * a 4-byte instruction. Increase the stop_address by
+ * two to ensure an instruction is not cut in half, but
+ * leave the len as-is so only the expected number of
+ * bytes are collected.
+ */
+ stop_address += 2;
+ }
+ }
+
fmt = "%s -z -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s";
- ret = snprintf(cmd, sizeof(cmd), fmt, test_objdump_path, addr, addr + len,
+ ret = snprintf(cmd, sizeof(cmd), fmt, test_objdump_path, addr, stop_address,
filename);
if (ret <= 0 || (size_t)ret >= sizeof(cmd))
return -1;
diff --git a/tools/perf/tests/cpumap.c b/tools/perf/tests/cpumap.c
index 2f0168b2a5a9..2354246afc5a 100644
--- a/tools/perf/tests/cpumap.c
+++ b/tools/perf/tests/cpumap.c
@@ -156,21 +156,54 @@ static int test__cpu_map_print(struct test_suite *test __maybe_unused, int subte
return 0;
}
-static int test__cpu_map_merge(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+static int __test__cpu_map_merge(const char *lhs, const char *rhs, int nr, const char *expected)
{
- struct perf_cpu_map *a = perf_cpu_map__new("4,2,1");
- struct perf_cpu_map *b = perf_cpu_map__new("4,5,7");
- struct perf_cpu_map *c = perf_cpu_map__merge(a, b);
+ struct perf_cpu_map *a = perf_cpu_map__new(lhs);
+ struct perf_cpu_map *b = perf_cpu_map__new(rhs);
char buf[100];
- TEST_ASSERT_VAL("failed to merge map: bad nr", perf_cpu_map__nr(c) == 5);
- cpu_map__snprint(c, buf, sizeof(buf));
- TEST_ASSERT_VAL("failed to merge map: bad result", !strcmp(buf, "1-2,4-5,7"));
+ perf_cpu_map__merge(&a, b);
+ TEST_ASSERT_VAL("failed to merge map: bad nr", perf_cpu_map__nr(a) == nr);
+ cpu_map__snprint(a, buf, sizeof(buf));
+ TEST_ASSERT_VAL("failed to merge map: bad result", !strcmp(buf, expected));
perf_cpu_map__put(b);
- perf_cpu_map__put(c);
+
+ /*
+ * If 'b' is a superset of 'a', 'a' points to the same map with the
+ * map 'b'. In this case, the owner 'b' has released the resource above
+ * but 'a' still keeps the ownership, the reference counter should be 1.
+ */
+ TEST_ASSERT_VAL("unexpected refcnt: bad result",
+ refcount_read(perf_cpu_map__refcnt(a)) == 1);
+
+ perf_cpu_map__put(a);
return 0;
}
+static int test__cpu_map_merge(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ int ret;
+
+ ret = __test__cpu_map_merge("4,2,1", "4,5,7", 5, "1-2,4-5,7");
+ if (ret)
+ return ret;
+ ret = __test__cpu_map_merge("1-8", "6-9", 9, "1-9");
+ if (ret)
+ return ret;
+ ret = __test__cpu_map_merge("1-8,12-20", "6-9,15", 18, "1-9,12-20");
+ if (ret)
+ return ret;
+ ret = __test__cpu_map_merge("4,2,1", "1", 3, "1-2,4");
+ if (ret)
+ return ret;
+ ret = __test__cpu_map_merge("1", "4,2,1", 3, "1-2,4");
+ if (ret)
+ return ret;
+ ret = __test__cpu_map_merge("1", "1", 1, "1");
+ return ret;
+}
+
static int __test__cpu_map_intersect(const char *lhs, const char *rhs, int nr, const char *expected)
{
struct perf_cpu_map *a = perf_cpu_map__new(lhs);
@@ -219,30 +252,29 @@ static int test__cpu_map_equal(struct test_suite *test __maybe_unused, int subte
struct perf_cpu_map *empty = perf_cpu_map__intersect(one, two);
struct perf_cpu_map *pair = perf_cpu_map__new("1-2");
struct perf_cpu_map *tmp;
- struct perf_cpu_map *maps[] = {empty, any, one, two, pair};
+ struct perf_cpu_map **maps[] = {&empty, &any, &one, &two, &pair};
for (size_t i = 0; i < ARRAY_SIZE(maps); i++) {
/* Maps equal themself. */
- TEST_ASSERT_VAL("equal", perf_cpu_map__equal(maps[i], maps[i]));
+ TEST_ASSERT_VAL("equal", perf_cpu_map__equal(*maps[i], *maps[i]));
for (size_t j = 0; j < ARRAY_SIZE(maps); j++) {
/* Maps dont't equal each other. */
if (i == j)
continue;
- TEST_ASSERT_VAL("not equal", !perf_cpu_map__equal(maps[i], maps[j]));
+ TEST_ASSERT_VAL("not equal", !perf_cpu_map__equal(*maps[i], *maps[j]));
}
}
/* Maps equal made maps. */
- tmp = perf_cpu_map__merge(perf_cpu_map__get(one), two);
- TEST_ASSERT_VAL("pair", perf_cpu_map__equal(pair, tmp));
- perf_cpu_map__put(tmp);
+ perf_cpu_map__merge(&two, one);
+ TEST_ASSERT_VAL("pair", perf_cpu_map__equal(pair, two));
tmp = perf_cpu_map__intersect(pair, one);
TEST_ASSERT_VAL("one", perf_cpu_map__equal(one, tmp));
perf_cpu_map__put(tmp);
for (size_t i = 0; i < ARRAY_SIZE(maps); i++)
- perf_cpu_map__put(maps[i]);
+ perf_cpu_map__put(*maps[i]);
return TEST_OK;
}
diff --git a/tools/perf/tests/event_groups.c b/tools/perf/tests/event_groups.c
index ccd9d8b2903f..c119ff114948 100644
--- a/tools/perf/tests/event_groups.c
+++ b/tools/perf/tests/event_groups.c
@@ -10,9 +10,10 @@
#include "header.h"
#include "../perf-sys.h"
-/* hw: cycles, sw: context-switch, uncore: [arch dependent] */
+/* hw: cycles,instructions sw: context-switch, uncore: [arch dependent] */
static int types[] = {0, 1, -1};
static unsigned long configs[] = {0, 3, 0};
+static unsigned long configs_hw[] = {1};
#define NR_UNCORE_PMUS 5
@@ -93,7 +94,18 @@ static int run_test(int i, int j, int k)
return erroneous ? 0 : -1;
}
- sibling_fd2 = event_open(types[k], configs[k], group_fd);
+ /*
+ * if all three events (leader and two sibling events)
+ * are hardware events, use instructions as one of the
+ * sibling event. There is event constraint in powerpc that
+ * events using same counter cannot be programmed in a group.
+ * Since PERF_COUNT_HW_INSTRUCTIONS is a generic hardware
+ * event and present in all platforms, lets use that.
+ */
+ if (!i && !j && !k)
+ sibling_fd2 = event_open(types[k], configs_hw[k], group_fd);
+ else
+ sibling_fd2 = event_open(types[k], configs[k], group_fd);
if (sibling_fd2 == -1) {
close(sibling_fd1);
close(group_fd);
@@ -124,9 +136,18 @@ static int test__event_groups(struct test_suite *text __maybe_unused, int subtes
if (r)
ret = TEST_FAIL;
- pr_debug("0x%x 0x%lx, 0x%x 0x%lx, 0x%x 0x%lx: %s\n",
- types[i], configs[i], types[j], configs[j],
- types[k], configs[k], r ? "Fail" : "Pass");
+ /*
+ * For all three events as HW events, second sibling
+ * event is picked from configs_hw. So print accordingly
+ */
+ if (!i && !j && !k)
+ pr_debug("0x%x 0x%lx, 0x%x 0x%lx, 0x%x 0x%lx: %s\n",
+ types[i], configs[i], types[j], configs[j],
+ types[k], configs_hw[k], r ? "Fail" : "Pass");
+ else
+ pr_debug("0x%x 0x%lx, 0x%x 0x%lx, 0x%x 0x%lx: %s\n",
+ types[i], configs[i], types[j], configs[j],
+ types[k], configs[k], r ? "Fail" : "Pass");
}
}
}
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index a7fcbd589752..0ee94caf9ec1 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -86,7 +86,6 @@ make_no_libdw_dwarf_unwind := NO_LIBDW_DWARF_UNWIND=1
make_no_backtrace := NO_BACKTRACE=1
make_no_libcapstone := NO_CAPSTONE=1
make_no_libnuma := NO_LIBNUMA=1
-make_no_libaudit := NO_LIBAUDIT=1
make_no_libbionic := NO_LIBBIONIC=1
make_no_auxtrace := NO_AUXTRACE=1
make_no_libbpf := NO_LIBBPF=1
@@ -97,7 +96,6 @@ make_no_libllvm := NO_LIBLLVM=1
make_with_babeltrace:= LIBBABELTRACE=1
make_with_coresight := CORESIGHT=1
make_no_sdt := NO_SDT=1
-make_no_syscall_tbl := NO_SYSCALL_TABLE=1
make_no_libpfm4 := NO_LIBPFM4=1
make_with_gtk2 := GTK2=1
make_refcnt_check := EXTRA_CFLAGS="-DREFCNT_CHECKING=1"
@@ -122,10 +120,10 @@ make_static := LDFLAGS=-static NO_PERF_READ_VDSO32=1 NO_PERF_READ_VDSOX3
# all the NO_* variable combined
make_minimal := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_GTK2=1
make_minimal += NO_DEMANGLE=1 NO_LIBELF=1 NO_BACKTRACE=1
-make_minimal += NO_LIBNUMA=1 NO_LIBAUDIT=1 NO_LIBBIONIC=1
+make_minimal += NO_LIBNUMA=1 NO_LIBBIONIC=1
make_minimal += NO_LIBDW_DWARF_UNWIND=1 NO_AUXTRACE=1 NO_LIBBPF=1
make_minimal += NO_LIBCRYPTO=1 NO_SDT=1 NO_JVMTI=1 NO_LIBZSTD=1
-make_minimal += NO_LIBCAP=1 NO_SYSCALL_TABLE=1 NO_CAPSTONE=1
+make_minimal += NO_LIBCAP=1 NO_CAPSTONE=1
# $(run) contains all available tests
run := make_pure
@@ -158,7 +156,6 @@ run += make_no_libdw_dwarf_unwind
run += make_no_backtrace
run += make_no_libcapstone
run += make_no_libnuma
-run += make_no_libaudit
run += make_no_libbionic
run += make_no_auxtrace
run += make_no_libbpf
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 82a19674a38f..5ec2e5607987 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -54,8 +54,6 @@ static bool test_perf_config(const struct perf_evsel *evsel, __u64 expected_conf
return (evsel->attr.config & PERF_HW_EVENT_MASK) == expected_config;
}
-#ifdef HAVE_LIBTRACEEVENT
-
#if defined(__s390x__)
/* Return true if kvm module is available and loaded. Test this
* and return success when trace point kvm_s390_create_vm
@@ -112,7 +110,6 @@ static int test__checkevent_tracepoint_multi(struct evlist *evlist)
}
return TEST_OK;
}
-#endif /* HAVE_LIBTRACEEVENT */
static int test__checkevent_raw(struct evlist *evlist)
{
@@ -311,7 +308,6 @@ static int test__checkevent_breakpoint_rw(struct evlist *evlist)
return TEST_OK;
}
-#ifdef HAVE_LIBTRACEEVENT
static int test__checkevent_tracepoint_modifier(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
@@ -340,7 +336,6 @@ test__checkevent_tracepoint_multi_modifier(struct evlist *evlist)
return test__checkevent_tracepoint_multi(evlist);
}
-#endif /* HAVE_LIBTRACEEVENT */
static int test__checkevent_raw_modifier(struct evlist *evlist)
{
@@ -629,7 +624,6 @@ static int test__checkevent_pmu(struct evlist *evlist)
return TEST_OK;
}
-#ifdef HAVE_LIBTRACEEVENT
static int test__checkevent_list(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
@@ -671,7 +665,6 @@ static int test__checkevent_list(struct evlist *evlist)
return TEST_OK;
}
-#endif
static int test__checkevent_pmu_name(struct evlist *evlist)
{
@@ -971,7 +964,6 @@ static int test__group2(struct evlist *evlist)
return TEST_OK;
}
-#ifdef HAVE_LIBTRACEEVENT
static int test__group3(struct evlist *evlist __maybe_unused)
{
struct evsel *evsel, *group1_leader = NULL, *group2_leader = NULL;
@@ -1078,7 +1070,6 @@ static int test__group3(struct evlist *evlist __maybe_unused)
}
return TEST_OK;
}
-#endif
static int test__group4(struct evlist *evlist __maybe_unused)
{
@@ -1813,7 +1804,6 @@ static int test__term_equal_legacy(struct evlist *evlist)
return TEST_OK;
}
-#ifdef HAVE_LIBTRACEEVENT
static int count_tracepoints(void)
{
struct dirent *events_ent;
@@ -1867,7 +1857,6 @@ static int test__all_tracepoints(struct evlist *evlist)
return test__checkevent_tracepoint_multi(evlist);
}
-#endif /* HAVE_LIBTRACEVENT */
struct evlist_test {
const char *name;
@@ -1876,7 +1865,6 @@ struct evlist_test {
};
static const struct evlist_test test__events[] = {
-#ifdef HAVE_LIBTRACEEVENT
{
.name = "syscalls:sys_enter_openat",
.check = test__checkevent_tracepoint,
@@ -1887,7 +1875,6 @@ static const struct evlist_test test__events[] = {
.check = test__checkevent_tracepoint_multi,
/* 1 */
},
-#endif
{
.name = "r1a",
.check = test__checkevent_raw,
@@ -1938,7 +1925,6 @@ static const struct evlist_test test__events[] = {
.check = test__checkevent_breakpoint_w,
/* 1 */
},
-#ifdef HAVE_LIBTRACEEVENT
{
.name = "syscalls:sys_enter_openat:k",
.check = test__checkevent_tracepoint_modifier,
@@ -1949,7 +1935,6 @@ static const struct evlist_test test__events[] = {
.check = test__checkevent_tracepoint_multi_modifier,
/* 3 */
},
-#endif
{
.name = "r1a:kp",
.check = test__checkevent_raw_modifier,
@@ -1995,13 +1980,11 @@ static const struct evlist_test test__events[] = {
.check = test__checkevent_breakpoint_w_modifier,
/* 2 */
},
-#ifdef HAVE_LIBTRACEEVENT
{
.name = "r1,syscalls:sys_enter_openat:k,1:1:hp",
.check = test__checkevent_list,
/* 3 */
},
-#endif
{
.name = "instructions:G",
.check = test__checkevent_exclude_host_modifier,
@@ -2032,13 +2015,11 @@ static const struct evlist_test test__events[] = {
.check = test__group2,
/* 9 */
},
-#ifdef HAVE_LIBTRACEEVENT
{
.name = "group1{syscalls:sys_enter_openat:H,cycles:kppp},group2{cycles,1:3}:G,instructions:u",
.check = test__group3,
/* 0 */
},
-#endif
{
.name = "{cycles:u,instructions:kp}:p",
.check = test__group4,
@@ -2049,13 +2030,11 @@ static const struct evlist_test test__events[] = {
.check = test__group5,
/* 2 */
},
-#ifdef HAVE_LIBTRACEEVENT
{
.name = "*:*",
.check = test__all_tracepoints,
/* 3 */
},
-#endif
{
.name = "{cycles,cache-misses:G}:H",
.check = test__group_gh1,
@@ -2111,7 +2090,7 @@ static const struct evlist_test test__events[] = {
.check = test__checkevent_breakpoint_len_rw_modifier,
/* 4 */
},
-#if defined(__s390x__) && defined(HAVE_LIBTRACEEVENT)
+#if defined(__s390x__)
{
.name = "kvm-s390:kvm_s390_create_vm",
.check = test__checkevent_tracepoint,
@@ -2265,13 +2244,11 @@ static const struct evlist_test test__events[] = {
.check = test__checkevent_breakpoint_2_events,
/* 3 */
},
-#ifdef HAVE_LIBTRACEEVENT
{
.name = "9p:9p_client_req",
.check = test__checkevent_tracepoint,
/* 4 */
},
-#endif
};
static const struct evlist_test test__events_pmu[] = {
diff --git a/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh b/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh
index bead723e34af..8226449ac5c3 100755
--- a/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh
+++ b/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-
+# perf_probe :: Reject blacklisted probes (exclusive)
# SPDX-License-Identifier: GPL-2.0
#
@@ -22,7 +22,7 @@ TEST_RESULT=0
BLACKFUNC_LIST=`head -n 5 /sys/kernel/debug/kprobes/blacklist 2> /dev/null | cut -f2`
if [ -z "$BLACKFUNC_LIST" ]; then
print_overall_skipped
- exit 0
+ exit 2
fi
# try to find vmlinux with DWARF debug info
diff --git a/tools/perf/tests/shell/base_probe/test_adding_kernel.sh b/tools/perf/tests/shell/base_probe/test_adding_kernel.sh
index d541ffd44a93..df288cf90cd6 100755
--- a/tools/perf/tests/shell/base_probe/test_adding_kernel.sh
+++ b/tools/perf/tests/shell/base_probe/test_adding_kernel.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Add 'perf probe's, list and remove them
+# perf_probe :: Add probes, list and remove them (exclusive)
# SPDX-License-Identifier: GPL-2.0
#
@@ -33,7 +33,7 @@ fi
check_kprobes_available
if [ $? -ne 0 ]; then
print_overall_skipped
- exit 0
+ exit 2
fi
@@ -169,7 +169,7 @@ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "force-adding probes :: second pr
(( TEST_RESULT += $? ))
# adding existing probe with '--force' should pass
-NO_OF_PROBES=`$CMD_PERF probe -l | wc -l`
+NO_OF_PROBES=`$CMD_PERF probe -l $TEST_PROBE| wc -l`
$CMD_PERF probe --force --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_forceadd_03.err
PERF_EXIT_CODE=$?
@@ -205,7 +205,7 @@ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "using doubled probe"
$CMD_PERF probe --del \* 2> $LOGS_DIR/adding_kernel_removing_wildcard.err
PERF_EXIT_CODE=$?
-../common/check_all_lines_matched.pl "Removed event: probe:$TEST_PROBE" "Removed event: probe:${TEST_PROBE}_1" < $LOGS_DIR/adding_kernel_removing_wildcard.err
+../common/check_all_patterns_found.pl "Removed event: probe:$TEST_PROBE" "Removed event: probe:${TEST_PROBE}_1" < $LOGS_DIR/adding_kernel_removing_wildcard.err
CHECK_EXIT_CODE=$?
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "removing multiple probes"
diff --git a/tools/perf/tests/shell/base_probe/test_basic.sh b/tools/perf/tests/shell/base_probe/test_basic.sh
index 09669ec479f2..9d8b5afbeddd 100755
--- a/tools/perf/tests/shell/base_probe/test_basic.sh
+++ b/tools/perf/tests/shell/base_probe/test_basic.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-
+# perf_probe :: Basic perf probe functionality (exclusive)
# SPDX-License-Identifier: GPL-2.0
#
@@ -19,7 +19,7 @@ TEST_RESULT=0
if ! check_kprobes_available; then
print_overall_skipped
- exit 0
+ exit 2
fi
diff --git a/tools/perf/tests/shell/base_probe/test_invalid_options.sh b/tools/perf/tests/shell/base_probe/test_invalid_options.sh
index 1fedfd8b0d0d..92f7254eb32a 100755
--- a/tools/perf/tests/shell/base_probe/test_invalid_options.sh
+++ b/tools/perf/tests/shell/base_probe/test_invalid_options.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-
+# perf_probe :: Reject invalid options (exclusive)
# SPDX-License-Identifier: GPL-2.0
#
@@ -19,9 +19,12 @@ TEST_RESULT=0
if ! check_kprobes_available; then
print_overall_skipped
- exit 0
+ exit 2
fi
+# Check for presence of DWARF
+$CMD_PERF check feature -q dwarf
+[ $? -ne 0 ] && HINT_FAIL="Some of the tests need DWARF to run"
### missing argument
@@ -75,5 +78,5 @@ done
# print overall results
-print_overall_results "$TEST_RESULT"
+print_overall_results "$TEST_RESULT" $HINT_FAIL
exit $?
diff --git a/tools/perf/tests/shell/base_probe/test_line_semantics.sh b/tools/perf/tests/shell/base_probe/test_line_semantics.sh
index d8f4bde0f585..20435b6bf6bc 100755
--- a/tools/perf/tests/shell/base_probe/test_line_semantics.sh
+++ b/tools/perf/tests/shell/base_probe/test_line_semantics.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-
+# perf_probe :: Check patterns for line semantics (exclusive)
# SPDX-License-Identifier: GPL-2.0
#
@@ -20,9 +20,12 @@ TEST_RESULT=0
if ! check_kprobes_available; then
print_overall_skipped
- exit 0
+ exit 2
fi
+# Check for presence of DWARF
+$CMD_PERF check feature -q dwarf
+[ $? -ne 0 ] && HINT_FAIL="Some of the tests need DWARF to run"
### acceptable --line descriptions
@@ -51,5 +54,5 @@ done
# print overall results
-print_overall_results "$TEST_RESULT"
+print_overall_results "$TEST_RESULT" $HINT_FAIL
exit $?
diff --git a/tools/perf/tests/shell/base_report/setup.sh b/tools/perf/tests/shell/base_report/setup.sh
index 4caa496660c6..b03501b2e8fc 100755
--- a/tools/perf/tests/shell/base_report/setup.sh
+++ b/tools/perf/tests/shell/base_report/setup.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-
+# perftool-testsuite :: perf_report
# SPDX-License-Identifier: GPL-2.0
#
diff --git a/tools/perf/tests/shell/base_report/test_basic.sh b/tools/perf/tests/shell/base_report/test_basic.sh
index 47677cbd4df3..2398eba4d3fd 100755
--- a/tools/perf/tests/shell/base_report/test_basic.sh
+++ b/tools/perf/tests/shell/base_report/test_basic.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-
+# perf_report :: Basic perf report options (exclusive)
# SPDX-License-Identifier: GPL-2.0
#
diff --git a/tools/perf/tests/shell/common/init.sh b/tools/perf/tests/shell/common/init.sh
index 075f17623c8e..26c7525651e0 100644
--- a/tools/perf/tests/shell/common/init.sh
+++ b/tools/perf/tests/shell/common/init.sh
@@ -46,10 +46,13 @@ print_results()
print_overall_results()
{
RETVAL="$1"; shift
+ TASK_COMMENT="$*"
+ test -n "$TASK_COMMENT" && TASK_COMMENT=":: $TASK_COMMENT"
+
if [ $RETVAL -eq 0 ]; then
_echo "$MALLPASS## [ PASS ] ##$MEND $TEST_NAME :: $THIS_TEST_NAME SUMMARY"
else
- _echo "$MALLFAIL## [ FAIL ] ##$MEND $TEST_NAME :: $THIS_TEST_NAME SUMMARY :: $RETVAL failures found"
+ _echo "$MALLFAIL## [ FAIL ] ##$MEND $TEST_NAME :: $THIS_TEST_NAME SUMMARY :: $RETVAL failures found $TASK_COMMENT"
fi
return $RETVAL
}
@@ -85,7 +88,7 @@ consider_skipping()
# the runmode of a testcase needs to be at least the current suite's runmode
if [ $PERFTOOL_TESTSUITE_RUNMODE -lt $TESTCASE_RUNMODE ]; then
print_overall_skipped
- exit 0
+ exit 2
fi
}
diff --git a/tools/perf/tests/shell/coresight/Makefile b/tools/perf/tests/shell/coresight/Makefile
index b070e779703e..fa08fd9a5991 100644
--- a/tools/perf/tests/shell/coresight/Makefile
+++ b/tools/perf/tests/shell/coresight/Makefile
@@ -24,6 +24,6 @@ CLEANDIRS = $(SUBDIRS:%=clean-%)
clean: $(CLEANDIRS)
$(CLEANDIRS):
- $(call QUIET_CLEAN, test-$(@:clean-%=%)) $(Q)$(MAKE) -C $(@:clean-%=%) clean >/dev/null
+ $(call QUIET_CLEAN, test-$(@:clean-%=%)) $(MAKE) -C $(@:clean-%=%) clean >/dev/null
.PHONY: all clean $(SUBDIRS) $(CLEANDIRS) $(INSTALLDIRS)
diff --git a/tools/perf/tests/shell/ftrace.sh b/tools/perf/tests/shell/ftrace.sh
index 2df05052c324..c243731d2fbf 100755
--- a/tools/perf/tests/shell/ftrace.sh
+++ b/tools/perf/tests/shell/ftrace.sh
@@ -67,11 +67,8 @@ test_ftrace_latency() {
test_ftrace_profile() {
echo "perf ftrace profile test"
- perf ftrace profile -m 16M sleep 0.1 > "${output}"
+ perf ftrace profile --graph-opts depth=5 sleep 0.1 > "${output}"
grep ^# "${output}"
- grep sleep "${output}"
- grep schedule "${output}"
- grep execve "${output}"
time_re="[[:space:]]+1[[:digit:]]{5}\.[[:digit:]]{3}"
# 100283.000 100283.000 100283.000 1 __x64_sys_clock_nanosleep
# Check for one *clock_nanosleep line with a Count of just 1 that takes a bit more than 0.1 seconds
diff --git a/tools/perf/tests/shell/lib/perf_json_output_lint.py b/tools/perf/tests/shell/lib/perf_json_output_lint.py
index 8ddb85586131..b066d721f897 100644
--- a/tools/perf/tests/shell/lib/perf_json_output_lint.py
+++ b/tools/perf/tests/shell/lib/perf_json_output_lint.py
@@ -69,16 +69,16 @@ def check_json_output(expected_items):
for item in json.loads(input):
if expected_items != -1:
count = len(item)
- if count != expected_items and count >= 1 and count <= 7 and 'metric-value' in item:
+ if count not in expected_items and count >= 1 and count <= 7 and 'metric-value' in item:
# Events that generate >1 metric may have isolated metric
# values and possibly other prefixes like interval, core,
# aggregate-number, or event-runtime/pcnt-running from multiplexing.
pass
- elif count != expected_items and count >= 1 and count <= 5 and 'metricgroup' in item:
+ elif count not in expected_items and count >= 1 and count <= 5 and 'metricgroup' in item:
pass
- elif count == expected_items + 1 and 'metric-threshold' in item:
+ elif count - 1 in expected_items and 'metric-threshold' in item:
pass
- elif count != expected_items:
+ elif count not in expected_items:
raise RuntimeError(f'wrong number of fields. counted {count} expected {expected_items}'
f' in \'{item}\'')
for key, value in item.items():
@@ -90,11 +90,11 @@ def check_json_output(expected_items):
try:
if args.no_args or args.system_wide or args.event:
- expected_items = 7
+ expected_items = [5, 7]
elif args.interval or args.per_thread or args.system_wide_no_aggr:
- expected_items = 8
+ expected_items = [6, 8]
elif args.per_core or args.per_socket or args.per_node or args.per_die or args.per_cluster or args.per_cache:
- expected_items = 9
+ expected_items = [7, 9]
else:
# If no option is specified, don't check the number of items.
expected_items = -1
diff --git a/tools/perf/tests/shell/perftool-testsuite_probe.sh b/tools/perf/tests/shell/perftool-testsuite_probe.sh
index a0fec33a0358..7b1bfd0f888f 100755
--- a/tools/perf/tests/shell/perftool-testsuite_probe.sh
+++ b/tools/perf/tests/shell/perftool-testsuite_probe.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# perftool-testsuite_probe
+# perftool-testsuite_probe (exclusive)
# SPDX-License-Identifier: GPL-2.0
test -d "$(dirname "$0")/base_probe" || exit 2
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
index 47a26f25db9f..d5e5193cceb6 100755
--- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-# probe libc's inet_pton & backtrace it with ping
+# probe libc's inet_pton & backtrace it with ping (exclusive)
# Installs a probe on libc's inet_pton function, that will use uprobes,
# then use 'perf trace' on a ping to localhost asking for just one packet
@@ -43,17 +43,8 @@ trace_libc_inet_pton_backtrace() {
echo "((__GI_)?getaddrinfo|text_to_binary_address)\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
echo "(gaih_inet|main)\+0x[[:xdigit:]]+[[:space:]]\(inlined|.*/bin/ping.*\)$" >> $expected
;;
- ppc64|ppc64le)
- eventattr='max-stack=4'
- # Add gaih_inet to expected backtrace only if it is part of libc.
- if nm $libc | grep -F -q gaih_inet.; then
- echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
- fi
- echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
- echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected
- ;;
*)
- eventattr='max-stack=3'
+ eventattr='max-stack=4'
echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected
;;
esac
@@ -76,14 +67,25 @@ trace_libc_inet_pton_backtrace() {
fi
perf script -i $perf_data | tac | grep -m1 ^ping -B9 | tac > $perf_script
- exec 3<$perf_script
exec 4<$expected
- while read line <&3 && read -r pattern <&4; do
+ while read -r pattern <&4; do
+ echo "Pattern: $pattern"
[ -z "$pattern" ] && break
- echo $line
- echo "$line" | grep -E -q "$pattern"
- if [ $? -ne 0 ] ; then
- printf "FAIL: expected backtrace entry \"%s\" got \"%s\"\n" "$pattern" "$line"
+
+ found=0
+
+ # Search lines in the perf script result
+ exec 3<$perf_script
+ while read line <&3; do
+ [ -z "$line" ] && break
+ echo " Matching: $line"
+ ! echo "$line" | grep -E -q "$pattern"
+ found=$?
+ [ $found -eq 1 ] && break
+ done
+
+ if [ $found -ne 1 ] ; then
+ printf "FAIL: Didn't find the expected backtrace entry \"%s\"\n" "$pattern"
return 1
fi
done
diff --git a/tools/perf/tests/shell/stat+std_output.sh b/tools/perf/tests/shell/stat+std_output.sh
index cbf2894b2c84..0f7967be60af 100755
--- a/tools/perf/tests/shell/stat+std_output.sh
+++ b/tools/perf/tests/shell/stat+std_output.sh
@@ -13,7 +13,7 @@ stat_output=$(mktemp /tmp/__perf_test.stat_output.std.XXXXX)
event_name=(cpu-clock task-clock context-switches cpu-migrations page-faults stalled-cycles-frontend stalled-cycles-backend cycles instructions branches branch-misses)
event_metric=("CPUs utilized" "CPUs utilized" "/sec" "/sec" "/sec" "frontend cycles idle" "backend cycles idle" "GHz" "insn per cycle" "/sec" "of all branches")
-skip_metric=("stalled cycles per insn" "tma_" "retiring" "frontend_bound" "bad_speculation" "backend_bound")
+skip_metric=("stalled cycles per insn" "tma_" "retiring" "frontend_bound" "bad_speculation" "backend_bound" "TopdownL1" "percent of slots")
cleanup() {
rm -f "${stat_output}"
diff --git a/tools/perf/tests/shell/stat.sh b/tools/perf/tests/shell/stat.sh
index 7a8adf81e4b3..68323d636fb7 100755
--- a/tools/perf/tests/shell/stat.sh
+++ b/tools/perf/tests/shell/stat.sh
@@ -187,7 +187,11 @@ test_hybrid() {
# Run default Perf stat
cycles_events=$(perf stat -- true 2>&1 | grep -E "/cycles/[uH]*| cycles[:uH]* " -c)
- if [ "$pmus" -ne "$cycles_events" ]
+ # The expectation is that default output will have a cycles events on each
+ # hybrid PMU. In situations with no cycles PMU events, like virtualized, this
+ # can fall back to task-clock and so the end count may be 0. Fail if neither
+ # condition holds.
+ if [ "$pmus" -ne "$cycles_events" ] && [ "0" -ne "$cycles_events" ]
then
echo "hybrid test [Found $pmus PMUs but $cycles_events cycles events. Failed]"
err=1
diff --git a/tools/perf/tests/shell/test_arm_spe.sh b/tools/perf/tests/shell/test_arm_spe.sh
index 3258368634f7..a69aab70dd8a 100755
--- a/tools/perf/tests/shell/test_arm_spe.sh
+++ b/tools/perf/tests/shell/test_arm_spe.sh
@@ -107,7 +107,37 @@ arm_spe_system_wide_test() {
arm_spe_report "SPE system-wide testing" $err
}
+arm_spe_discard_test() {
+ echo "SPE discard mode"
+
+ for f in /sys/bus/event_source/devices/arm_spe_*; do
+ if [ -e "$f/format/discard" ]; then
+ cpu=$(cut -c -1 "$f/cpumask")
+ break
+ fi
+ done
+
+ if [ -z $cpu ]; then
+ arm_spe_report "SPE discard mode not present" 2
+ return
+ fi
+
+ # Test can use wildcard SPE instance and Perf will only open the event
+ # on instances that have that format flag. But make sure the target
+ # runs on an instance with discard mode otherwise we're not testing
+ # anything.
+ perf record -o ${perfdata} -e arm_spe/discard/ -N -B --no-bpf-event \
+ -- taskset --cpu-list $cpu true
+
+ if perf report -i ${perfdata} --stats | grep 'AUX events\|AUXTRACE events'; then
+ arm_spe_report "SPE discard mode found unexpected data" 1
+ else
+ arm_spe_report "SPE discard mode" 0
+ fi
+}
+
arm_spe_snapshot_test
arm_spe_system_wide_test
+arm_spe_discard_test
exit $glb_err
diff --git a/tools/perf/tests/shell/test_brstack.sh b/tools/perf/tests/shell/test_brstack.sh
index 5f14d0cb013f..e01df7581393 100755
--- a/tools/perf/tests/shell/test_brstack.sh
+++ b/tools/perf/tests/shell/test_brstack.sh
@@ -30,7 +30,7 @@ test_user_branches() {
echo "Testing user branch stack sampling"
perf record -o $TMPDIR/perf.data --branch-filter any,save_type,u -- ${TESTPROG} > /dev/null 2>&1
- perf script -i $TMPDIR/perf.data --fields brstacksym | xargs -n1 > $TMPDIR/perf.script
+ perf script -i $TMPDIR/perf.data --fields brstacksym | tr -s ' ' '\n' > $TMPDIR/perf.script
# example of branch entries:
# brstack_foo+0x14/brstack_bar+0x40/P/-/-/0/CALL
@@ -59,7 +59,7 @@ test_filter() {
echo "Testing branch stack filtering permutation ($test_filter_filter,$test_filter_expect)"
perf record -o $TMPDIR/perf.data --branch-filter $test_filter_filter,save_type,u -- ${TESTPROG} > /dev/null 2>&1
- perf script -i $TMPDIR/perf.data --fields brstack | xargs -n1 > $TMPDIR/perf.script
+ perf script -i $TMPDIR/perf.data --fields brstack | tr -s ' ' '\n' | grep '.' > $TMPDIR/perf.script
# fail if we find any branch type that doesn't match any of the expected ones
# also consider UNKNOWN branch types (-)
diff --git a/tools/perf/tests/shell/test_intel_pt.sh b/tools/perf/tests/shell/test_intel_pt.sh
index e6f0070975f6..f3a9a040bacc 100755
--- a/tools/perf/tests/shell/test_intel_pt.sh
+++ b/tools/perf/tests/shell/test_intel_pt.sh
@@ -644,6 +644,33 @@ test_pipe()
return 0
}
+test_pause_resume()
+{
+ echo "--- Test with pause / resume ---"
+ if ! perf_record_no_decode -o "${perfdatafile}" -e intel_pt/aux-action=start-paused/u uname ; then
+ echo "SKIP: pause / resume is not supported"
+ return 2
+ fi
+ if ! perf_record_no_bpf -o "${perfdatafile}" \
+ -e intel_pt/aux-action=start-paused/u \
+ -e instructions/period=50000,aux-action=resume,name=Resume/u \
+ -e instructions/period=100000,aux-action=pause,name=Pause/u uname ; then
+ echo "perf record with pause / resume failed"
+ return 1
+ fi
+ if ! perf script -i "${perfdatafile}" --itrace=b -Fperiod,event | \
+ awk 'BEGIN {paused=1;branches=0}
+ /Resume/ {paused=0}
+ /branches/ {if (paused) exit 1;branches=1}
+ /Pause/ {paused=1}
+ END {if (!branches) exit 1}' ; then
+ echo "perf record with pause / resume failed"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
count_result()
{
if [ "$1" -eq 2 ] ; then
@@ -672,6 +699,7 @@ test_power_event || ret=$? ; count_result $ret ; ret=0
test_no_tnt || ret=$? ; count_result $ret ; ret=0
test_event_trace || ret=$? ; count_result $ret ; ret=0
test_pipe || ret=$? ; count_result $ret ; ret=0
+test_pause_resume || ret=$? ; count_result $ret ; ret=0
cleanup
diff --git a/tools/perf/tests/shell/test_task_analyzer.sh b/tools/perf/tests/shell/test_task_analyzer.sh
index 7d76fc63d995..e194fcf61df3 100755
--- a/tools/perf/tests/shell/test_task_analyzer.sh
+++ b/tools/perf/tests/shell/test_task_analyzer.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# perf script task-analyzer tests
+# perf script task-analyzer tests (exclusive)
# SPDX-License-Identifier: GPL-2.0
tmpdir=$(mktemp -d /tmp/perf-script-task-analyzer-XXXXX)
diff --git a/tools/perf/tests/shell/trace_btf_enum.sh b/tools/perf/tests/shell/trace_btf_enum.sh
index 5a3b8a5a9b5c..8d1e6bbeac90 100755
--- a/tools/perf/tests/shell/trace_btf_enum.sh
+++ b/tools/perf/tests/shell/trace_btf_enum.sh
@@ -26,8 +26,12 @@ check_vmlinux() {
trace_landlock() {
echo "Tracing syscall ${syscall}"
- # test flight just to see if landlock_add_rule and libbpf are available
- $TESTPROG
+ # test flight just to see if landlock_add_rule is available
+ if ! perf trace $TESTPROG 2>&1 | grep -q landlock
+ then
+ echo "No landlock system call found, skipping to non-syscall tracing."
+ return
+ fi
if perf trace -e $syscall $TESTPROG 2>&1 | \
grep -q -E ".*landlock_add_rule\(ruleset_fd: 11, rule_type: (LANDLOCK_RULE_PATH_BENEATH|LANDLOCK_RULE_NET_PORT), rule_attr: 0x[a-f0-9]+, flags: 45\) = -1.*"
diff --git a/tools/perf/tests/shell/trace_btf_general.sh b/tools/perf/tests/shell/trace_btf_general.sh
new file mode 100755
index 000000000000..e9ee727f3433
--- /dev/null
+++ b/tools/perf/tests/shell/trace_btf_general.sh
@@ -0,0 +1,94 @@
+#!/bin/bash
+# perf trace BTF general tests
+# SPDX-License-Identifier: GPL-2.0
+
+err=0
+set -e
+
+# shellcheck source=lib/probe.sh
+. "$(dirname $0)"/lib/probe.sh
+
+file1=$(mktemp /tmp/file1_XXXX)
+file2=$(echo $file1 | sed 's/file1/file2/g')
+
+buffer="buffer content"
+perf_config_tmp=$(mktemp /tmp/.perfconfig_XXXXX)
+
+trap cleanup EXIT TERM INT HUP
+
+check_vmlinux() {
+ echo "Checking if vmlinux BTF exists"
+ if [ ! -f /sys/kernel/btf/vmlinux ]
+ then
+ echo "Skipped due to missing vmlinux BTF"
+ return 2
+ fi
+ return 0
+}
+
+trace_test_string() {
+ echo "Testing perf trace's string augmentation"
+ if ! perf trace -e renameat* --max-events=1 -- mv ${file1} ${file2} 2>&1 | \
+ grep -q -E "^mv/[0-9]+ renameat(2)?\(.*, \"${file1}\", .*, \"${file2}\", .*\) += +[0-9]+$"
+ then
+ echo "String augmentation test failed"
+ err=1
+ fi
+}
+
+trace_test_buffer() {
+ echo "Testing perf trace's buffer augmentation"
+ # echo will insert a newline (\10) at the end of the buffer
+ if ! perf trace -e write --max-events=1 -- echo "${buffer}" 2>&1 | \
+ grep -q -E "^echo/[0-9]+ write\([0-9]+, ${buffer}.*, [0-9]+\) += +[0-9]+$"
+ then
+ echo "Buffer augmentation test failed"
+ err=1
+ fi
+}
+
+trace_test_struct_btf() {
+ echo "Testing perf trace's struct augmentation"
+ if ! perf trace -e clock_nanosleep --force-btf --max-events=1 -- sleep 1 2>&1 | \
+ grep -q -E "^sleep/[0-9]+ clock_nanosleep\(0, 0, \{1,\}, 0x[0-9a-f]+\) += +[0-9]+$"
+ then
+ echo "BTF struct augmentation test failed"
+ err=1
+ fi
+}
+
+cleanup() {
+ rm -rf ${file1} ${file2} ${perf_config_tmp}
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+
+# don't overwrite user's perf config
+trace_config() {
+ export PERF_CONFIG=${perf_config_tmp}
+ perf config trace.show_arg_names=false trace.show_duration=false \
+ trace.show_timestamp=false trace.args_alignment=0
+}
+
+skip_if_no_perf_trace || exit 2
+check_vmlinux || exit 2
+
+trace_config
+
+trace_test_string
+
+if [ $err = 0 ]; then
+ trace_test_buffer
+fi
+
+if [ $err = 0 ]; then
+ trace_test_struct_btf
+fi
+
+cleanup
+
+exit $err
diff --git a/tools/perf/tests/sigtrap.c b/tools/perf/tests/sigtrap.c
index e6fd934b027a..a67c756f90b8 100644
--- a/tools/perf/tests/sigtrap.c
+++ b/tools/perf/tests/sigtrap.c
@@ -56,6 +56,7 @@ static struct perf_event_attr make_event_attr(void)
#ifdef HAVE_BPF_SKEL
#include <bpf/btf.h>
+#include <util/btf.h>
static struct btf *btf;
@@ -73,21 +74,6 @@ static void btf__exit(void)
btf = NULL;
}
-static const struct btf_member *__btf_type__find_member_by_name(int type_id, const char *member_name)
-{
- const struct btf_type *t = btf__type_by_id(btf, type_id);
- const struct btf_member *m;
- int i;
-
- for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
- const char *current_member_name = btf__name_by_offset(btf, m->name_off);
- if (!strcmp(current_member_name, member_name))
- return m;
- }
-
- return NULL;
-}
-
static bool attr_has_sigtrap(void)
{
int id;
@@ -101,7 +87,7 @@ static bool attr_has_sigtrap(void)
if (id < 0)
return false;
- return __btf_type__find_member_by_name(id, "sigtrap") != NULL;
+ return __btf_type__find_member_by_name(btf, id, "sigtrap") != NULL;
}
static bool kernel_with_sleepable_spinlocks(void)
@@ -119,7 +105,7 @@ static bool kernel_with_sleepable_spinlocks(void)
return false;
// Only RT has a "lock" member for "struct spinlock"
- member = __btf_type__find_member_by_name(id, "lock");
+ member = __btf_type__find_member_by_name(btf, id, "lock");
if (member == NULL)
return false;
diff --git a/tools/perf/tests/stat.c b/tools/perf/tests/stat.c
index 6468cc0d0204..d60983657bad 100644
--- a/tools/perf/tests/stat.c
+++ b/tools/perf/tests/stat.c
@@ -27,7 +27,7 @@ static int process_stat_config_event(const struct perf_tool *tool __maybe_unused
struct machine *machine __maybe_unused)
{
struct perf_record_stat_config *config = &event->stat_config;
- struct perf_stat_config stat_config = {};
+ struct perf_stat_config test_stat_config = {};
#define HAS(term, val) \
has_term(config, PERF_STAT_CONFIG_TERM__##term, val)
@@ -39,25 +39,27 @@ static int process_stat_config_event(const struct perf_tool *tool __maybe_unused
#undef HAS
- perf_event__read_stat_config(&stat_config, config);
+ perf_event__read_stat_config(&test_stat_config, config);
- TEST_ASSERT_VAL("wrong aggr_mode", stat_config.aggr_mode == AGGR_CORE);
- TEST_ASSERT_VAL("wrong scale", stat_config.scale == 1);
- TEST_ASSERT_VAL("wrong interval", stat_config.interval == 1);
+ TEST_ASSERT_VAL("wrong aggr_mode", test_stat_config.aggr_mode == AGGR_CORE);
+ TEST_ASSERT_VAL("wrong scale", test_stat_config.scale == 1);
+ TEST_ASSERT_VAL("wrong interval", test_stat_config.interval == 1);
return 0;
}
static int test__synthesize_stat_config(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
- struct perf_stat_config stat_config = {
+ struct perf_stat_config test_stat_config = {
.aggr_mode = AGGR_CORE,
.scale = 1,
.interval = 1,
};
TEST_ASSERT_VAL("failed to synthesize stat_config",
- !perf_event__synthesize_stat_config(NULL, &stat_config, process_stat_config_event, NULL));
+ !perf_event__synthesize_stat_config(NULL, &test_stat_config,
+ process_stat_config_event,
+ NULL));
return 0;
}
diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c
index 5cab17a1942e..576f82a15015 100644
--- a/tools/perf/tests/switch-tracking.c
+++ b/tools/perf/tests/switch-tracking.c
@@ -583,4 +583,4 @@ out_err:
goto out;
}
-DEFINE_SUITE("Track with sched_switch", switch_tracking);
+DEFINE_SUITE_EXCLUSIVE("Track with sched_switch", switch_tracking);
diff --git a/tools/perf/tests/tests-scripts.c b/tools/perf/tests/tests-scripts.c
index cf3ae0c1d871..1d5759d08141 100644
--- a/tools/perf/tests/tests-scripts.c
+++ b/tools/perf/tests/tests-scripts.c
@@ -174,7 +174,7 @@ static void append_script(int dir_fd, const char *name, char *desc,
char filename[PATH_MAX], link[128];
struct test_suite *test_suite, **result_tmp;
struct test_case *tests;
- size_t len;
+ ssize_t len;
char *exclusive;
snprintf(link, sizeof(link), "/proc/%d/fd/%d", getpid(), dir_fd);
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index cb58b43aa063..8aea344536b8 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -81,6 +81,16 @@ struct test_suite {
.test_cases = tests__##_name, \
}
+#define DEFINE_SUITE_EXCLUSIVE(description, _name) \
+ struct test_case tests__##_name[] = { \
+ TEST_CASE_EXCLUSIVE(description, _name),\
+ { .name = NULL, } \
+ }; \
+ struct test_suite suite__##_name = { \
+ .desc = description, \
+ .test_cases = tests__##_name, \
+ }
+
/* Tests */
DECLARE_SUITE(vmlinux_matches_kallsyms);
DECLARE_SUITE(openat_syscall_event);
diff --git a/tools/perf/tests/workloads/landlock.c b/tools/perf/tests/workloads/landlock.c
index e2b5ef647c09..1f285b7b6236 100644
--- a/tools/perf/tests/workloads/landlock.c
+++ b/tools/perf/tests/workloads/landlock.c
@@ -10,7 +10,7 @@
* 'perf test' workload) we just add the required types and defines here instead
* of including linux/landlock, that isn't available in older systems.
*
- * We are not interested in the the result of the syscall, just in intercepting
+ * We are not interested in the result of the syscall, just in intercepting
* its arguments.
*/
diff --git a/tools/perf/trace/beauty/arch_errno_names.sh b/tools/perf/trace/beauty/arch_errno_names.sh
index 30d3889b2957..b22890b8d272 100755
--- a/tools/perf/trace/beauty/arch_errno_names.sh
+++ b/tools/perf/trace/beauty/arch_errno_names.sh
@@ -57,7 +57,8 @@ create_arch_errno_table_func()
archlist="$1"
default="$2"
- printf 'arch_syscalls__strerrno_t *arch_syscalls__strerrno_function(const char *arch)\n'
+ printf 'static arch_syscalls__strerrno_t *\n'
+ printf 'arch_syscalls__strerrno_function(const char *arch)\n'
printf '{\n'
for arch in $archlist; do
arch_str=$(arch_string "$arch")
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index d7e727345dab..135d6ce88fb3 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -754,7 +754,7 @@ static int annotate_browser__run(struct annotate_browser *browser,
hbt->timer(hbt->arg);
if (delay_secs != 0) {
- symbol__annotate_decay_histogram(sym, evsel->core.idx);
+ symbol__annotate_decay_histogram(sym, evsel);
hists__scnprintf_title(hists, title, sizeof(title));
annotate_browser__show(&browser->b, title, help);
}
diff --git a/tools/perf/ui/browsers/scripts.c b/tools/perf/ui/browsers/scripts.c
index e437d7889de6..2d04ece833aa 100644
--- a/tools/perf/ui/browsers/scripts.c
+++ b/tools/perf/ui/browsers/scripts.c
@@ -1,16 +1,18 @@
// SPDX-License-Identifier: GPL-2.0
-#include "../../builtin.h"
-#include "../../perf.h"
#include "../../util/util.h" // perf_exe()
#include "../util.h"
+#include "../../util/evlist.h"
#include "../../util/hist.h"
#include "../../util/debug.h"
+#include "../../util/session.h"
#include "../../util/symbol.h"
#include "../browser.h"
#include "../libslang.h"
#include "config.h"
+#include <linux/err.h>
#include <linux/string.h>
#include <linux/zalloc.h>
+#include <subcmd/exec-cmd.h>
#include <stdlib.h>
#define SCRIPT_NAMELEN 128
@@ -78,6 +80,177 @@ static int scripts_config(const char *var, const char *value, void *data)
}
/*
+ * Some scripts specify the required events in their "xxx-record" file,
+ * this function will check if the events in perf.data match those
+ * mentioned in the "xxx-record".
+ *
+ * Fixme: All existing "xxx-record" are all in good formats "-e event ",
+ * which is covered well now. And new parsing code should be added to
+ * cover the future complex formats like event groups etc.
+ */
+static int check_ev_match(int dir_fd, const char *scriptname, struct perf_session *session)
+{
+ char line[BUFSIZ];
+ FILE *fp;
+
+ {
+ char filename[FILENAME_MAX + 5];
+ int fd;
+
+ scnprintf(filename, sizeof(filename), "bin/%s-record", scriptname);
+ fd = openat(dir_fd, filename, O_RDONLY);
+ if (fd == -1)
+ return -1;
+ fp = fdopen(fd, "r");
+ if (!fp)
+ return -1;
+ }
+
+ while (fgets(line, sizeof(line), fp)) {
+ char *p = skip_spaces(line);
+
+ if (*p == '#')
+ continue;
+
+ while (strlen(p)) {
+ int match, len;
+ struct evsel *pos;
+ char evname[128];
+
+ p = strstr(p, "-e");
+ if (!p)
+ break;
+
+ p += 2;
+ p = skip_spaces(p);
+ len = strcspn(p, " \t");
+ if (!len)
+ break;
+
+ snprintf(evname, len + 1, "%s", p);
+
+ match = 0;
+ evlist__for_each_entry(session->evlist, pos) {
+ if (evsel__name_is(pos, evname)) {
+ match = 1;
+ break;
+ }
+ }
+
+ if (!match) {
+ fclose(fp);
+ return -1;
+ }
+ }
+ }
+
+ fclose(fp);
+ return 0;
+}
+
+/*
+ * Return -1 if none is found, otherwise the actual scripts number.
+ *
+ * Currently the only user of this function is the script browser, which
+ * will list all statically runnable scripts, select one, execute it and
+ * show the output in a perf browser.
+ */
+static int find_scripts(char **scripts_array, char **scripts_path_array, int num,
+ int pathlen)
+{
+ struct dirent *script_dirent, *lang_dirent;
+ int scripts_dir_fd, lang_dir_fd;
+ DIR *scripts_dir, *lang_dir;
+ struct perf_session *session;
+ struct perf_data data = {
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ };
+ char *temp;
+ int i = 0;
+ const char *exec_path = get_argv_exec_path();
+
+ session = perf_session__new(&data, NULL);
+ if (IS_ERR(session))
+ return PTR_ERR(session);
+
+ {
+ char scripts_path[PATH_MAX];
+
+ snprintf(scripts_path, sizeof(scripts_path), "%s/scripts", exec_path);
+ scripts_dir_fd = open(scripts_path, O_DIRECTORY);
+ pr_err("Failed to open directory '%s'", scripts_path);
+ if (scripts_dir_fd == -1) {
+ perf_session__delete(session);
+ return -1;
+ }
+ }
+ scripts_dir = fdopendir(scripts_dir_fd);
+ if (!scripts_dir) {
+ close(scripts_dir_fd);
+ perf_session__delete(session);
+ return -1;
+ }
+
+ while ((lang_dirent = readdir(scripts_dir)) != NULL) {
+ if (lang_dirent->d_type != DT_DIR &&
+ (lang_dirent->d_type == DT_UNKNOWN &&
+ !is_directory_at(scripts_dir_fd, lang_dirent->d_name)))
+ continue;
+ if (!strcmp(lang_dirent->d_name, ".") || !strcmp(lang_dirent->d_name, ".."))
+ continue;
+
+#ifndef HAVE_LIBPERL_SUPPORT
+ if (strstr(lang_dirent->d_name, "perl"))
+ continue;
+#endif
+#ifndef HAVE_LIBPYTHON_SUPPORT
+ if (strstr(lang_dirent->d_name, "python"))
+ continue;
+#endif
+
+ lang_dir_fd = openat(scripts_dir_fd, lang_dirent->d_name, O_DIRECTORY);
+ if (lang_dir_fd == -1)
+ continue;
+ lang_dir = fdopendir(lang_dir_fd);
+ if (!lang_dir) {
+ close(lang_dir_fd);
+ continue;
+ }
+ while ((script_dirent = readdir(lang_dir)) != NULL) {
+ if (script_dirent->d_type == DT_DIR)
+ continue;
+ if (script_dirent->d_type == DT_UNKNOWN &&
+ is_directory_at(lang_dir_fd, script_dirent->d_name))
+ continue;
+ /* Skip those real time scripts: xxxtop.p[yl] */
+ if (strstr(script_dirent->d_name, "top."))
+ continue;
+ if (i >= num)
+ break;
+ scnprintf(scripts_path_array[i], pathlen, "%s/scripts/%s/%s",
+ exec_path,
+ lang_dirent->d_name,
+ script_dirent->d_name);
+ temp = strchr(script_dirent->d_name, '.');
+ snprintf(scripts_array[i],
+ (temp - script_dirent->d_name) + 1,
+ "%s", script_dirent->d_name);
+
+ if (check_ev_match(lang_dir_fd, scripts_array[i], session))
+ continue;
+
+ i++;
+ }
+ closedir(lang_dir);
+ }
+
+ closedir(scripts_dir);
+ perf_session__delete(session);
+ return i;
+}
+
+/*
* When success, will copy the full path of the selected script
* into the buffer pointed by script_name, and return 0.
* Return -1 on failure.
diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c
index 6da24aa039eb..8920e298420a 100644
--- a/tools/perf/ui/gtk/annotate.c
+++ b/tools/perf/ui/gtk/annotate.c
@@ -3,6 +3,7 @@
#include "util/sort.h"
#include "util/debug.h"
#include "util/annotate.h"
+#include "util/evlist.h"
#include "util/evsel.h"
#include "util/map.h"
#include "util/dso.h"
@@ -26,7 +27,7 @@ static const char *const col_names[] = {
};
static int perf_gtk__get_percent(char *buf, size_t size, struct symbol *sym,
- struct disasm_line *dl, int evidx)
+ struct disasm_line *dl, const struct evsel *evsel)
{
struct annotation *notes;
struct sym_hist *symhist;
@@ -42,8 +43,8 @@ static int perf_gtk__get_percent(char *buf, size_t size, struct symbol *sym,
return 0;
notes = symbol__annotation(sym);
- symhist = annotation__histogram(notes, evidx);
- entry = annotated_source__hist_entry(notes->src, evidx, dl->al.offset);
+ symhist = annotation__histogram(notes, evsel);
+ entry = annotated_source__hist_entry(notes->src, evsel, dl->al.offset);
if (entry)
nr_samples = entry->nr_samples;
@@ -139,16 +140,17 @@ static int perf_gtk__annotate_symbol(GtkWidget *window, struct map_symbol *ms,
gtk_list_store_append(store, &iter);
if (evsel__is_group_event(evsel)) {
- for (i = 0; i < evsel->core.nr_members; i++) {
+ struct evsel *cur_evsel;
+
+ for_each_group_evsel(cur_evsel, evsel__leader(evsel)) {
ret += perf_gtk__get_percent(s + ret,
sizeof(s) - ret,
sym, pos,
- evsel->core.idx + i);
+ cur_evsel);
ret += scnprintf(s + ret, sizeof(s) - ret, " ");
}
} else {
- ret = perf_gtk__get_percent(s, sizeof(s), sym, pos,
- evsel->core.idx);
+ ret = perf_gtk__get_percent(s, sizeof(s), sym, pos, evsel);
}
if (ret)
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index e5491995adf0..34fda1d5eccb 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -121,7 +121,7 @@ int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
const char *fmtstr, hpp_snprint_fn print_fn,
enum perf_hpp_fmt_type fmtype)
{
- int len = fmt->user_len ?: fmt->len;
+ int len = max(fmt->user_len ?: fmt->len, (int)strlen(fmt->name));
if (symbol_conf.field_sep) {
return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index c06d2ee9024c..5ec97e8d6b6d 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -86,7 +86,7 @@ perf-util-y += pmu-bison.o
perf-util-y += hwmon_pmu.o
perf-util-y += tool_pmu.o
perf-util-y += svghelper.o
-perf-util-$(CONFIG_LIBTRACEEVENT) += trace-event-info.o
+perf-util-y += trace-event-info.o
perf-util-y += trace-event-scripting.o
perf-util-$(CONFIG_LIBTRACEEVENT) += trace-event.o
perf-util-$(CONFIG_LIBTRACEEVENT) += trace-event-parse.o
@@ -121,8 +121,10 @@ perf-util-y += spark.o
perf-util-y += topdown.o
perf-util-y += iostat.o
perf-util-y += stream.o
+perf-util-y += kvm-stat.o
+perf-util-y += lock-contention.o
perf-util-$(CONFIG_AUXTRACE) += auxtrace.o
-perf-util-$(CONFIG_AUXTRACE) += intel-pt-decoder/
+perf-util-y += intel-pt-decoder/
perf-util-$(CONFIG_AUXTRACE) += intel-pt.o
perf-util-$(CONFIG_AUXTRACE) += intel-bts.o
perf-util-$(CONFIG_AUXTRACE) += arm-spe.o
@@ -168,6 +170,7 @@ perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf_off_cpu.o
perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf-filter.o
perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf-filter-flex.o
perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf-filter-bison.o
+perf-util-$(CONFIG_PERF_BPF_SKEL) += btf.o
ifeq ($(CONFIG_LIBTRACEEVENT),y)
perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf_lock_contention.o
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 32e15c9f53f3..31bb326b07a6 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -209,7 +209,7 @@ static int __symbol__account_cycles(struct cyc_hist *ch,
}
static int __symbol__inc_addr_samples(struct map_symbol *ms,
- struct annotated_source *src, int evidx, u64 addr,
+ struct annotated_source *src, struct evsel *evsel, u64 addr,
struct perf_sample *sample)
{
struct symbol *sym = ms->sym;
@@ -228,14 +228,14 @@ static int __symbol__inc_addr_samples(struct map_symbol *ms,
}
offset = addr - sym->start;
- h = annotated_source__histogram(src, evidx);
+ h = annotated_source__histogram(src, evsel);
if (h == NULL) {
pr_debug("%s(%d): ENOMEM! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 ", func: %d\n",
__func__, __LINE__, sym->name, sym->start, addr, sym->end, sym->type == STT_FUNC);
return -ENOMEM;
}
- hash_key = offset << 16 | evidx;
+ hash_key = offset << 16 | evsel->core.idx;
if (!hashmap__find(src->samples, hash_key, &entry)) {
entry = zalloc(sizeof(*entry));
if (entry == NULL)
@@ -252,7 +252,7 @@ static int __symbol__inc_addr_samples(struct map_symbol *ms,
pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64
", evidx=%d] => nr_samples: %" PRIu64 ", period: %" PRIu64 "\n",
- sym->start, sym->name, addr, addr - sym->start, evidx,
+ sym->start, sym->name, addr, addr - sym->start, evsel->core.idx,
entry->nr_samples, entry->period);
return 0;
}
@@ -323,7 +323,7 @@ static int symbol__inc_addr_samples(struct map_symbol *ms,
if (sym == NULL)
return 0;
src = symbol__hists(sym, evsel->evlist->core.nr_entries);
- return src ? __symbol__inc_addr_samples(ms, src, evsel->core.idx, addr, sample) : 0;
+ return src ? __symbol__inc_addr_samples(ms, src, evsel, addr, sample) : 0;
}
static int symbol__account_br_cntr(struct annotated_branch *branch,
@@ -861,15 +861,14 @@ static void calc_percent(struct annotation *notes,
s64 offset, s64 end)
{
struct hists *hists = evsel__hists(evsel);
- int evidx = evsel->core.idx;
- struct sym_hist *sym_hist = annotation__histogram(notes, evidx);
+ struct sym_hist *sym_hist = annotation__histogram(notes, evsel);
unsigned int hits = 0;
u64 period = 0;
while (offset < end) {
struct sym_hist_entry *entry;
- entry = annotated_source__hist_entry(notes->src, evidx, offset);
+ entry = annotated_source__hist_entry(notes->src, evsel, offset);
if (entry) {
hits += entry->nr_samples;
period += entry->period;
@@ -1140,15 +1139,14 @@ static void print_summary(struct rb_root *root, const char *filename)
static void symbol__annotate_hits(struct symbol *sym, struct evsel *evsel)
{
- int evidx = evsel->core.idx;
struct annotation *notes = symbol__annotation(sym);
- struct sym_hist *h = annotation__histogram(notes, evidx);
+ struct sym_hist *h = annotation__histogram(notes, evsel);
u64 len = symbol__size(sym), offset;
for (offset = 0; offset < len; ++offset) {
struct sym_hist_entry *entry;
- entry = annotated_source__hist_entry(notes->src, evidx, offset);
+ entry = annotated_source__hist_entry(notes->src, evsel, offset);
if (entry && entry->nr_samples != 0)
printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
sym->start + offset, entry->nr_samples);
@@ -1178,7 +1176,7 @@ int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel)
const char *d_filename;
const char *evsel_name = evsel__name(evsel);
struct annotation *notes = symbol__annotation(sym);
- struct sym_hist *h = annotation__histogram(notes, evsel->core.idx);
+ struct sym_hist *h = annotation__histogram(notes, evsel);
struct annotation_line *pos, *queue = NULL;
struct annotation_options *opts = &annotate_opts;
u64 start = map__rip_2objdump(map, sym->start);
@@ -1364,18 +1362,18 @@ out_free_filename:
return err;
}
-void symbol__annotate_zero_histogram(struct symbol *sym, int evidx)
+void symbol__annotate_zero_histogram(struct symbol *sym, struct evsel *evsel)
{
struct annotation *notes = symbol__annotation(sym);
- struct sym_hist *h = annotation__histogram(notes, evidx);
+ struct sym_hist *h = annotation__histogram(notes, evsel);
memset(h, 0, sizeof(*notes->src->histograms) * notes->src->nr_histograms);
}
-void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
+void symbol__annotate_decay_histogram(struct symbol *sym, struct evsel *evsel)
{
struct annotation *notes = symbol__annotation(sym);
- struct sym_hist *h = annotation__histogram(notes, evidx);
+ struct sym_hist *h = annotation__histogram(notes, evsel);
struct annotation_line *al;
h->nr_samples = 0;
@@ -1385,7 +1383,7 @@ void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
if (al->offset == -1)
continue;
- entry = annotated_source__hist_entry(notes->src, evidx, al->offset);
+ entry = annotated_source__hist_entry(notes->src, evsel, al->offset);
if (entry == NULL)
continue;
@@ -2102,6 +2100,57 @@ int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
return 0;
}
+const char * const perf_disassembler__strs[] = {
+ [PERF_DISASM_UNKNOWN] = "unknown",
+ [PERF_DISASM_LLVM] = "llvm",
+ [PERF_DISASM_CAPSTONE] = "capstone",
+ [PERF_DISASM_OBJDUMP] = "objdump",
+};
+
+
+static void annotation_options__add_disassembler(struct annotation_options *options,
+ enum perf_disassembler dis)
+{
+ for (u8 i = 0; i < ARRAY_SIZE(options->disassemblers); i++) {
+ if (options->disassemblers[i] == dis) {
+ /* Disassembler is already present then don't add again. */
+ return;
+ }
+ if (options->disassemblers[i] == PERF_DISASM_UNKNOWN) {
+ /* Found a free slot. */
+ options->disassemblers[i] = dis;
+ return;
+ }
+ }
+ pr_err("Failed to add disassembler %d\n", dis);
+}
+
+static int annotation_options__add_disassemblers_str(struct annotation_options *options,
+ const char *str)
+{
+ while (str && *str != '\0') {
+ const char *comma = strchr(str, ',');
+ int len = comma ? comma - str : (int)strlen(str);
+ bool match = false;
+
+ for (u8 i = 0; i < ARRAY_SIZE(perf_disassembler__strs); i++) {
+ const char *dis_str = perf_disassembler__strs[i];
+
+ if (len == (int)strlen(dis_str) && !strncmp(str, dis_str, len)) {
+ annotation_options__add_disassembler(options, i);
+ match = true;
+ break;
+ }
+ }
+ if (!match) {
+ pr_err("Invalid disassembler '%.*s'\n", len, str);
+ return -1;
+ }
+ str = comma ? comma + 1 : NULL;
+ }
+ return 0;
+}
+
static int annotation__config(const char *var, const char *value, void *data)
{
struct annotation_options *opt = data;
@@ -2117,11 +2166,10 @@ static int annotation__config(const char *var, const char *value, void *data)
else if (opt->offset_level < ANNOTATION__MIN_OFFSET_LEVEL)
opt->offset_level = ANNOTATION__MIN_OFFSET_LEVEL;
} else if (!strcmp(var, "annotate.disassemblers")) {
- opt->disassemblers_str = strdup(value);
- if (!opt->disassemblers_str) {
- pr_err("Not enough memory for annotate.disassemblers\n");
- return -1;
- }
+ int err = annotation_options__add_disassemblers_str(opt, value);
+
+ if (err)
+ return err;
} else if (!strcmp(var, "annotate.hide_src_code")) {
opt->hide_src_code = perf_config_bool("hide_src_code", value);
} else if (!strcmp(var, "annotate.jump_arrows")) {
@@ -2187,9 +2235,25 @@ void annotation_options__exit(void)
zfree(&annotate_opts.objdump_path);
}
+static void annotation_options__default_init_disassemblers(struct annotation_options *options)
+{
+ if (options->disassemblers[0] != PERF_DISASM_UNKNOWN) {
+ /* Already initialized. */
+ return;
+ }
+#ifdef HAVE_LIBLLVM_SUPPORT
+ annotation_options__add_disassembler(options, PERF_DISASM_LLVM);
+#endif
+#ifdef HAVE_LIBCAPSTONE_SUPPORT
+ annotation_options__add_disassembler(options, PERF_DISASM_CAPSTONE);
+#endif
+ annotation_options__add_disassembler(options, PERF_DISASM_OBJDUMP);
+}
+
void annotation_config__init(void)
{
perf_config(annotation__config, &annotate_opts);
+ annotation_options__default_init_disassemblers(&annotate_opts);
}
static unsigned int parse_percent_type(char *str1, char *str2)
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 194a05cbc506..98db1b88daf4 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -15,6 +15,7 @@
#include "hashmap.h"
#include "disasm.h"
#include "branch.h"
+#include "evsel.h"
struct hist_browser_timer;
struct hist_entry;
@@ -23,7 +24,6 @@ struct map_symbol;
struct addr_map_symbol;
struct option;
struct perf_sample;
-struct evsel;
struct symbol;
struct annotated_data_type;
@@ -34,8 +34,13 @@ struct annotated_data_type;
#define ANNOTATION__BR_CNTR_WIDTH 30
#define ANNOTATION_DUMMY_LEN 256
-// llvm, capstone, objdump
-#define MAX_DISASSEMBLERS 3
+enum perf_disassembler {
+ PERF_DISASM_UNKNOWN = 0,
+ PERF_DISASM_LLVM,
+ PERF_DISASM_CAPSTONE,
+ PERF_DISASM_OBJDUMP,
+};
+#define MAX_DISASSEMBLERS (PERF_DISASM_OBJDUMP + 1)
struct annotation_options {
bool hide_src_code,
@@ -52,14 +57,12 @@ struct annotation_options {
annotate_src,
full_addr;
u8 offset_level;
- u8 nr_disassemblers;
+ u8 disassemblers[MAX_DISASSEMBLERS];
int min_pcnt;
int max_lines;
int context;
char *objdump_path;
char *disassembler_style;
- const char *disassemblers_str;
- const char *disassemblers[MAX_DISASSEMBLERS];
const char *prefix;
const char *prefix_strip;
unsigned int percent_type;
@@ -134,6 +137,8 @@ struct disasm_line {
struct annotation_line al;
};
+extern const char * const perf_disassembler__strs[];
+
void annotation_line__add(struct annotation_line *al, struct list_head *head);
static inline double annotation_data__percent(struct annotation_data *data,
@@ -373,21 +378,23 @@ static inline u8 annotation__br_cntr_width(void)
void annotation__update_column_widths(struct annotation *notes);
void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *ms);
-static inline struct sym_hist *annotated_source__histogram(struct annotated_source *src, int idx)
+static inline struct sym_hist *annotated_source__histogram(struct annotated_source *src,
+ const struct evsel *evsel)
{
- return &src->histograms[idx];
+ return &src->histograms[evsel->core.idx];
}
-static inline struct sym_hist *annotation__histogram(struct annotation *notes, int idx)
+static inline struct sym_hist *annotation__histogram(struct annotation *notes,
+ const struct evsel *evsel)
{
- return annotated_source__histogram(notes->src, idx);
+ return annotated_source__histogram(notes->src, evsel);
}
static inline struct sym_hist_entry *
-annotated_source__hist_entry(struct annotated_source *src, int idx, u64 offset)
+annotated_source__hist_entry(struct annotated_source *src, const struct evsel *evsel, u64 offset)
{
struct sym_hist_entry *entry;
- long key = offset << 16 | idx;
+ long key = offset << 16 | evsel->core.idx;
if (!hashmap__find(src->samples, key, &entry))
return NULL;
@@ -441,6 +448,7 @@ enum symbol_disassemble_errno {
SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP,
SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE,
SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF,
+ SYMBOL_ANNOTATE_ERRNO__COULDNT_DETERMINE_FILE_TYPE,
__SYMBOL_ANNOTATE_ERRNO__END,
};
@@ -448,8 +456,8 @@ enum symbol_disassemble_errno {
int symbol__strerror_disassemble(struct map_symbol *ms, int errnum, char *buf, size_t buflen);
int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel);
-void symbol__annotate_zero_histogram(struct symbol *sym, int evidx);
-void symbol__annotate_decay_histogram(struct symbol *sym, int evidx);
+void symbol__annotate_zero_histogram(struct symbol *sym, struct evsel *evsel);
+void symbol__annotate_decay_histogram(struct symbol *sym, struct evsel *evsel);
void annotated_source__purge(struct annotated_source *as);
int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel);
diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
index 358c611eeddb..4bcd627e859f 100644
--- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
+++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
@@ -67,6 +67,15 @@ enum arm_spe_common_data_source {
ARM_SPE_COMMON_DS_DRAM = 0xe,
};
+enum arm_spe_ampereone_data_source {
+ ARM_SPE_AMPEREONE_LOCAL_CHIP_CACHE_OR_DEVICE = 0x0,
+ ARM_SPE_AMPEREONE_SLC = 0x3,
+ ARM_SPE_AMPEREONE_REMOTE_CHIP_CACHE = 0x5,
+ ARM_SPE_AMPEREONE_DDR = 0x7,
+ ARM_SPE_AMPEREONE_L1D = 0x8,
+ ARM_SPE_AMPEREONE_L2D = 0x9,
+};
+
struct arm_spe_record {
enum arm_spe_sample_type type;
int err;
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
index dbf13f47879c..12761c39788f 100644
--- a/tools/perf/util/arm-spe.c
+++ b/tools/perf/util/arm-spe.c
@@ -103,6 +103,18 @@ struct arm_spe_queue {
u32 flags;
};
+struct data_source_handle {
+ const struct midr_range *midr_ranges;
+ void (*ds_synth)(const struct arm_spe_record *record,
+ union perf_mem_data_src *data_src);
+};
+
+#define DS(range, func) \
+ { \
+ .midr_ranges = range, \
+ .ds_synth = arm_spe__synth_##func, \
+ }
+
static void arm_spe_dump(struct arm_spe *spe __maybe_unused,
unsigned char *buf, size_t len)
{
@@ -443,6 +455,11 @@ static const struct midr_range common_ds_encoding_cpus[] = {
{},
};
+static const struct midr_range ampereone_ds_encoding_cpus[] = {
+ MIDR_ALL_VERSIONS(MIDR_AMPERE1A),
+ {},
+};
+
static void arm_spe__sample_flags(struct arm_spe_queue *speq)
{
const struct arm_spe_record *record = &speq->decoder->record;
@@ -532,6 +549,49 @@ static void arm_spe__synth_data_source_common(const struct arm_spe_record *recor
}
}
+/*
+ * Source is IMPDEF. Here we convert the source code used on AmpereOne cores
+ * to the common (Neoverse, Cortex) to avoid duplicating the decoding code.
+ */
+static void arm_spe__synth_data_source_ampereone(const struct arm_spe_record *record,
+ union perf_mem_data_src *data_src)
+{
+ struct arm_spe_record common_record;
+
+ switch (record->source) {
+ case ARM_SPE_AMPEREONE_LOCAL_CHIP_CACHE_OR_DEVICE:
+ common_record.source = ARM_SPE_COMMON_DS_PEER_CORE;
+ break;
+ case ARM_SPE_AMPEREONE_SLC:
+ common_record.source = ARM_SPE_COMMON_DS_SYS_CACHE;
+ break;
+ case ARM_SPE_AMPEREONE_REMOTE_CHIP_CACHE:
+ common_record.source = ARM_SPE_COMMON_DS_REMOTE;
+ break;
+ case ARM_SPE_AMPEREONE_DDR:
+ common_record.source = ARM_SPE_COMMON_DS_DRAM;
+ break;
+ case ARM_SPE_AMPEREONE_L1D:
+ common_record.source = ARM_SPE_COMMON_DS_L1D;
+ break;
+ case ARM_SPE_AMPEREONE_L2D:
+ common_record.source = ARM_SPE_COMMON_DS_L2;
+ break;
+ default:
+ pr_warning_once("AmpereOne: Unknown data source (0x%x)\n",
+ record->source);
+ return;
+ }
+
+ common_record.op = record->op;
+ arm_spe__synth_data_source_common(&common_record, data_src);
+}
+
+static const struct data_source_handle data_source_handles[] = {
+ DS(common_ds_encoding_cpus, data_source_common),
+ DS(ampereone_ds_encoding_cpus, data_source_ampereone),
+};
+
static void arm_spe__synth_memory_level(const struct arm_spe_record *record,
union perf_mem_data_src *data_src)
{
@@ -555,12 +615,14 @@ static void arm_spe__synth_memory_level(const struct arm_spe_record *record,
data_src->mem_lvl |= PERF_MEM_LVL_REM_CCE1;
}
-static bool arm_spe__is_common_ds_encoding(struct arm_spe_queue *speq)
+static bool arm_spe__synth_ds(struct arm_spe_queue *speq,
+ const struct arm_spe_record *record,
+ union perf_mem_data_src *data_src)
{
struct arm_spe *spe = speq->spe;
- bool is_in_cpu_list;
u64 *metadata = NULL;
- u64 midr = 0;
+ u64 midr;
+ unsigned int i;
/* Metadata version 1 assumes all CPUs are the same (old behavior) */
if (spe->metadata_ver == 1) {
@@ -592,18 +654,20 @@ static bool arm_spe__is_common_ds_encoding(struct arm_spe_queue *speq)
midr = metadata[ARM_SPE_CPU_MIDR];
}
- is_in_cpu_list = is_midr_in_range_list(midr, common_ds_encoding_cpus);
- if (is_in_cpu_list)
- return true;
- else
- return false;
+ for (i = 0; i < ARRAY_SIZE(data_source_handles); i++) {
+ if (is_midr_in_range_list(midr, data_source_handles[i].midr_ranges)) {
+ data_source_handles[i].ds_synth(record, data_src);
+ return true;
+ }
+ }
+
+ return false;
}
static u64 arm_spe__synth_data_source(struct arm_spe_queue *speq,
const struct arm_spe_record *record)
{
union perf_mem_data_src data_src = { .mem_op = PERF_MEM_OP_NA };
- bool is_common = arm_spe__is_common_ds_encoding(speq);
if (record->op & ARM_SPE_OP_LD)
data_src.mem_op = PERF_MEM_OP_LOAD;
@@ -612,9 +676,7 @@ static u64 arm_spe__synth_data_source(struct arm_spe_queue *speq,
else
return 0;
- if (is_common)
- arm_spe__synth_data_source_common(record, &data_src);
- else
+ if (!arm_spe__synth_ds(speq, record, &data_src))
arm_spe__synth_memory_level(record, &data_src);
if (record->type & (ARM_SPE_TLB_ACCESS | ARM_SPE_TLB_MISS)) {
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index ca8682966fae..4d1633d87eff 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -810,19 +810,76 @@ no_opt:
return auxtrace_validate_aux_sample_size(evlist, opts);
}
-void auxtrace_regroup_aux_output(struct evlist *evlist)
+static struct aux_action_opt {
+ const char *str;
+ u32 aux_action;
+ bool aux_event_opt;
+} aux_action_opts[] = {
+ {"start-paused", BIT(0), true},
+ {"pause", BIT(1), false},
+ {"resume", BIT(2), false},
+ {.str = NULL},
+};
+
+static const struct aux_action_opt *auxtrace_parse_aux_action_str(const char *str)
+{
+ const struct aux_action_opt *opt;
+
+ if (!str)
+ return NULL;
+
+ for (opt = aux_action_opts; opt->str; opt++)
+ if (!strcmp(str, opt->str))
+ return opt;
+
+ return NULL;
+}
+
+int auxtrace_parse_aux_action(struct evlist *evlist)
{
- struct evsel *evsel, *aux_evsel = NULL;
struct evsel_config_term *term;
+ struct evsel *aux_evsel = NULL;
+ struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
- if (evsel__is_aux_event(evsel))
+ bool is_aux_event = evsel__is_aux_event(evsel);
+ const struct aux_action_opt *opt;
+
+ if (is_aux_event)
aux_evsel = evsel;
- term = evsel__get_config_term(evsel, AUX_OUTPUT);
+ term = evsel__get_config_term(evsel, AUX_ACTION);
+ if (!term) {
+ if (evsel__get_config_term(evsel, AUX_OUTPUT))
+ goto regroup;
+ continue;
+ }
+ opt = auxtrace_parse_aux_action_str(term->val.str);
+ if (!opt) {
+ pr_err("Bad aux-action '%s'\n", term->val.str);
+ return -EINVAL;
+ }
+ if (opt->aux_event_opt && !is_aux_event) {
+ pr_err("aux-action '%s' can only be used with AUX area event\n",
+ term->val.str);
+ return -EINVAL;
+ }
+ if (!opt->aux_event_opt && is_aux_event) {
+ pr_err("aux-action '%s' cannot be used for AUX area event itself\n",
+ term->val.str);
+ return -EINVAL;
+ }
+ evsel->core.attr.aux_action = opt->aux_action;
+regroup:
/* If possible, group with the AUX event */
- if (term && aux_evsel)
+ if (aux_evsel)
evlist__regroup(evlist, aux_evsel, evsel);
+ if (!evsel__is_aux_event(evsel__leader(evsel))) {
+ pr_err("Events with aux-action must have AUX area event group leader\n");
+ return -EINVAL;
+ }
}
+
+ return 0;
}
struct auxtrace_record *__weak
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index dddaf4f3ffed..b0db84d27b25 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -578,7 +578,7 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
int auxtrace_parse_sample_options(struct auxtrace_record *itr,
struct evlist *evlist,
struct record_opts *opts, const char *str);
-void auxtrace_regroup_aux_output(struct evlist *evlist);
+int auxtrace_parse_aux_action(struct evlist *evlist);
int auxtrace_record__options(struct auxtrace_record *itr,
struct evlist *evlist,
struct record_opts *opts);
@@ -799,8 +799,10 @@ int auxtrace_parse_sample_options(struct auxtrace_record *itr __maybe_unused,
}
static inline
-void auxtrace_regroup_aux_output(struct evlist *evlist __maybe_unused)
+int auxtrace_parse_aux_action(struct evlist *evlist __maybe_unused)
{
+ pr_err("AUX area tracing not supported\n");
+ return -EINVAL;
}
static inline
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
index 13608237c50e..c81444059ad0 100644
--- a/tools/perf/util/bpf-event.c
+++ b/tools/perf/util/bpf-event.c
@@ -289,7 +289,10 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
}
info_node->info_linear = info_linear;
- perf_env__insert_bpf_prog_info(env, info_node);
+ if (!perf_env__insert_bpf_prog_info(env, info_node)) {
+ free(info_linear);
+ free(info_node);
+ }
info_linear = NULL;
/*
@@ -480,7 +483,10 @@ static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
info_node = malloc(sizeof(struct bpf_prog_info_node));
if (info_node) {
info_node->info_linear = info_linear;
- perf_env__insert_bpf_prog_info(env, info_node);
+ if (!perf_env__insert_bpf_prog_info(env, info_node)) {
+ free(info_linear);
+ free(info_node);
+ }
} else
free(info_linear);
diff --git a/tools/perf/util/bpf_ftrace.c b/tools/perf/util/bpf_ftrace.c
index 06d1c4018407..25fc280e414a 100644
--- a/tools/perf/util/bpf_ftrace.c
+++ b/tools/perf/util/bpf_ftrace.c
@@ -11,6 +11,7 @@
#include "util/debug.h"
#include "util/evlist.h"
#include "util/bpf_counter.h"
+#include "util/stat.h"
#include "util/bpf_skel/func_latency.skel.h"
@@ -36,6 +37,9 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
return -1;
}
+ skel->rodata->bucket_range = ftrace->bucket_range;
+ skel->rodata->min_latency = ftrace->min_latency;
+
/* don't need to set cpu filter for system-wide mode */
if (ftrace->target.cpu_list) {
ncpus = perf_cpu_map__nr(ftrace->evlist->core.user_requested_cpus);
@@ -83,6 +87,8 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
}
}
+ skel->bss->min = INT64_MAX;
+
skel->links.func_begin = bpf_program__attach_kprobe(skel->progs.func_begin,
false, func->name);
if (IS_ERR(skel->links.func_begin)) {
@@ -119,7 +125,7 @@ int perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace __maybe_unused)
}
int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused,
- int buckets[])
+ int buckets[], struct stats *stats)
{
int i, fd, err;
u32 idx;
@@ -143,6 +149,13 @@ int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused,
buckets[idx] += hist[i];
}
+ if (skel->bss->count) {
+ stats->mean = skel->bss->total / skel->bss->count;
+ stats->n = skel->bss->count;
+ stats->max = skel->bss->max;
+ stats->min = skel->bss->min;
+ }
+
free(hist);
return 0;
}
diff --git a/tools/perf/util/bpf_kwork.c b/tools/perf/util/bpf_kwork.c
index 6c7126b7670d..5cff755c71fa 100644
--- a/tools/perf/util/bpf_kwork.c
+++ b/tools/perf/util/bpf_kwork.c
@@ -285,7 +285,7 @@ static int add_work(struct perf_kwork *kwork,
(bpf_trace->get_work_name(key, &tmp.name)))
return -1;
- work = perf_kwork_add_work(kwork, tmp.class, &tmp);
+ work = kwork->add_work(kwork, tmp.class, &tmp);
if (work == NULL)
return -1;
diff --git a/tools/perf/util/bpf_kwork_top.c b/tools/perf/util/bpf_kwork_top.c
index 7261cad43468..b6f187dd9136 100644
--- a/tools/perf/util/bpf_kwork_top.c
+++ b/tools/perf/util/bpf_kwork_top.c
@@ -255,7 +255,7 @@ static int add_work(struct perf_kwork *kwork, struct work_key *key,
bpf_trace = kwork_class_bpf_supported_list[type];
tmp.class = bpf_trace->class;
- work = perf_kwork_add_work(kwork, tmp.class, &tmp);
+ work = kwork->add_work(kwork, tmp.class, &tmp);
if (!work)
return -1;
diff --git a/tools/perf/util/bpf_lock_contention.c b/tools/perf/util/bpf_lock_contention.c
index 41a1ad087895..fc8666222399 100644
--- a/tools/perf/util/bpf_lock_contention.c
+++ b/tools/perf/util/bpf_lock_contention.c
@@ -2,6 +2,7 @@
#include "util/cgroup.h"
#include "util/debug.h"
#include "util/evlist.h"
+#include "util/hashmap.h"
#include "util/machine.h"
#include "util/map.h"
#include "util/symbol.h"
@@ -12,17 +13,106 @@
#include <linux/zalloc.h>
#include <linux/string.h>
#include <bpf/bpf.h>
+#include <bpf/btf.h>
#include <inttypes.h>
#include "bpf_skel/lock_contention.skel.h"
#include "bpf_skel/lock_data.h"
static struct lock_contention_bpf *skel;
+static bool has_slab_iter;
+static struct hashmap slab_hash;
+
+static size_t slab_cache_hash(long key, void *ctx __maybe_unused)
+{
+ return key;
+}
+
+static bool slab_cache_equal(long key1, long key2, void *ctx __maybe_unused)
+{
+ return key1 == key2;
+}
+
+static void check_slab_cache_iter(struct lock_contention *con)
+{
+ struct btf *btf = btf__load_vmlinux_btf();
+ s32 ret;
+
+ hashmap__init(&slab_hash, slab_cache_hash, slab_cache_equal, /*ctx=*/NULL);
+
+ if (btf == NULL) {
+ pr_debug("BTF loading failed: %s\n", strerror(errno));
+ return;
+ }
+
+ ret = btf__find_by_name_kind(btf, "bpf_iter__kmem_cache", BTF_KIND_STRUCT);
+ if (ret < 0) {
+ bpf_program__set_autoload(skel->progs.slab_cache_iter, false);
+ pr_debug("slab cache iterator is not available: %d\n", ret);
+ goto out;
+ }
+
+ has_slab_iter = true;
+
+ bpf_map__set_max_entries(skel->maps.slab_caches, con->map_nr_entries);
+out:
+ btf__free(btf);
+}
+
+static void run_slab_cache_iter(void)
+{
+ int fd;
+ char buf[256];
+ long key, *prev_key;
+
+ if (!has_slab_iter)
+ return;
+
+ fd = bpf_iter_create(bpf_link__fd(skel->links.slab_cache_iter));
+ if (fd < 0) {
+ pr_debug("cannot create slab cache iter: %d\n", fd);
+ return;
+ }
+
+ /* This will run the bpf program */
+ while (read(fd, buf, sizeof(buf)) > 0)
+ continue;
+
+ close(fd);
+
+ /* Read the slab cache map and build a hash with IDs */
+ fd = bpf_map__fd(skel->maps.slab_caches);
+ prev_key = NULL;
+ while (!bpf_map_get_next_key(fd, prev_key, &key)) {
+ struct slab_cache_data *data;
+
+ data = malloc(sizeof(*data));
+ if (data == NULL)
+ break;
+
+ if (bpf_map_lookup_elem(fd, &key, data) < 0)
+ break;
+
+ hashmap__add(&slab_hash, data->id, data);
+ prev_key = &key;
+ }
+}
+
+static void exit_slab_cache_iter(void)
+{
+ struct hashmap_entry *cur;
+ unsigned bkt;
+
+ hashmap__for_each_entry(&slab_hash, cur, bkt)
+ free(cur->pvalue);
+
+ hashmap__clear(&slab_hash);
+}
int lock_contention_prepare(struct lock_contention *con)
{
int i, fd;
- int ncpus = 1, ntasks = 1, ntypes = 1, naddrs = 1, ncgrps = 1;
+ int ncpus = 1, ntasks = 1, ntypes = 1, naddrs = 1, ncgrps = 1, nslabs = 1;
struct evlist *evlist = con->evlist;
struct target *target = con->target;
@@ -109,6 +199,15 @@ int lock_contention_prepare(struct lock_contention *con)
skel->rodata->use_cgroup_v2 = 1;
}
+ check_slab_cache_iter(con);
+
+ if (con->filters->nr_slabs && has_slab_iter) {
+ skel->rodata->has_slab = 1;
+ nslabs = con->filters->nr_slabs;
+ }
+
+ bpf_map__set_max_entries(skel->maps.slab_filter, nslabs);
+
if (lock_contention_bpf__load(skel) < 0) {
pr_err("Failed to load lock-contention BPF skeleton\n");
return -1;
@@ -179,6 +278,36 @@ int lock_contention_prepare(struct lock_contention *con)
bpf_program__set_autoload(skel->progs.collect_lock_syms, false);
lock_contention_bpf__attach(skel);
+
+ /* run the slab iterator after attaching */
+ run_slab_cache_iter();
+
+ if (con->filters->nr_slabs) {
+ u8 val = 1;
+ int cache_fd;
+ long key, *prev_key;
+
+ fd = bpf_map__fd(skel->maps.slab_filter);
+
+ /* Read the slab cache map and build a hash with its address */
+ cache_fd = bpf_map__fd(skel->maps.slab_caches);
+ prev_key = NULL;
+ while (!bpf_map_get_next_key(cache_fd, prev_key, &key)) {
+ struct slab_cache_data data;
+
+ if (bpf_map_lookup_elem(cache_fd, &key, &data) < 0)
+ break;
+
+ for (i = 0; i < con->filters->nr_slabs; i++) {
+ if (!strcmp(con->filters->slabs[i], data.name)) {
+ bpf_map_update_elem(fd, &key, &val, BPF_ANY);
+ break;
+ }
+ }
+ prev_key = &key;
+ }
+ }
+
return 0;
}
@@ -347,6 +476,7 @@ static const char *lock_contention_get_name(struct lock_contention *con,
if (con->aggr_mode == LOCK_AGGR_ADDR) {
int lock_fd = bpf_map__fd(skel->maps.lock_syms);
+ struct slab_cache_data *slab_data;
/* per-process locks set upper bits of the flags */
if (flags & LCD_F_MMAP_LOCK)
@@ -365,6 +495,12 @@ static const char *lock_contention_get_name(struct lock_contention *con,
return "rq_lock";
}
+ /* look slab_hash for dynamic locks in a slab object */
+ if (hashmap__find(&slab_hash, flags & LCB_F_SLAB_ID_MASK, &slab_data)) {
+ snprintf(name_buf, sizeof(name_buf), "&%s", slab_data->name);
+ return name_buf;
+ }
+
return "";
}
@@ -458,7 +594,7 @@ int lock_contention_read(struct lock_contention *con)
if (con->save_callstack) {
bpf_map_lookup_elem(stack, &key.stack_id, stack_trace);
- if (!match_callstack_filter(machine, stack_trace)) {
+ if (!match_callstack_filter(machine, stack_trace, con->max_stack)) {
con->nr_filtered += data.count;
goto next;
}
@@ -539,5 +675,7 @@ int lock_contention_finish(struct lock_contention *con)
cgroup__put(cgrp);
}
+ exit_slab_cache_iter();
+
return 0;
}
diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c
index a590a8ac1f9d..4269b41d1771 100644
--- a/tools/perf/util/bpf_off_cpu.c
+++ b/tools/perf/util/bpf_off_cpu.c
@@ -100,6 +100,11 @@ static void check_sched_switch_args(void)
const struct btf_type *t1, *t2, *t3;
u32 type_id;
+ if (!btf) {
+ pr_debug("Missing btf, check if CONFIG_DEBUG_INFO_BTF is enabled\n");
+ goto cleanup;
+ }
+
type_id = btf__find_by_name_kind(btf, "btf_trace_sched_switch",
BTF_KIND_TYPEDEF);
if ((s32)type_id < 0)
diff --git a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
index 4a62ed593e84..e4352881e3fa 100644
--- a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
+++ b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
@@ -431,9 +431,9 @@ static bool pid_filter__has(struct pids_filtered *pids, pid_t pid)
static int augment_sys_enter(void *ctx, struct syscall_enter_args *args)
{
bool augmented, do_output = false;
- int zero = 0, size, aug_size, index,
- value_size = sizeof(struct augmented_arg) - offsetof(struct augmented_arg, value);
+ int zero = 0, index, value_size = sizeof(struct augmented_arg) - offsetof(struct augmented_arg, value);
u64 output = 0; /* has to be u64, otherwise it won't pass the verifier */
+ s64 aug_size, size;
unsigned int nr, *beauty_map;
struct beauty_payload_enter *payload;
void *arg, *payload_offset;
@@ -484,14 +484,11 @@ static int augment_sys_enter(void *ctx, struct syscall_enter_args *args)
} else if (size > 0 && size <= value_size) { /* struct */
if (!bpf_probe_read_user(((struct augmented_arg *)payload_offset)->value, size, arg))
augmented = true;
- } else if (size < 0 && size >= -6) { /* buffer */
+ } else if ((int)size < 0 && size >= -6) { /* buffer */
index = -(size + 1);
barrier_var(index); // Prevent clang (noticed with v18) from removing the &= 7 trick.
index &= 7; // Satisfy the bounds checking with the verifier in some kernels.
- aug_size = args->args[index];
-
- if (aug_size > TRACE_AUG_MAX_BUF)
- aug_size = TRACE_AUG_MAX_BUF;
+ aug_size = args->args[index] > TRACE_AUG_MAX_BUF ? TRACE_AUG_MAX_BUF : args->args[index];
if (aug_size > 0) {
if (!bpf_probe_read_user(((struct augmented_arg *)payload_offset)->value, aug_size, arg))
diff --git a/tools/perf/util/bpf_skel/func_latency.bpf.c b/tools/perf/util/bpf_skel/func_latency.bpf.c
index f613dc9cb123..fb144811b34f 100644
--- a/tools/perf/util/bpf_skel/func_latency.bpf.c
+++ b/tools/perf/util/bpf_skel/func_latency.bpf.c
@@ -38,9 +38,18 @@ struct {
int enabled = 0;
+// stats
+__s64 total;
+__s64 count;
+__s64 max;
+__s64 min;
+
const volatile int has_cpu = 0;
const volatile int has_task = 0;
const volatile int use_nsec = 0;
+const volatile unsigned int bucket_range;
+const volatile unsigned int min_latency;
+const volatile unsigned int max_latency;
SEC("kprobe/func")
int BPF_PROG(func_begin)
@@ -92,7 +101,7 @@ int BPF_PROG(func_end)
start = bpf_map_lookup_elem(&functime, &tid);
if (start) {
__s64 delta = bpf_ktime_get_ns() - *start;
- __u32 key;
+ __u32 key = 0;
__u64 *hist;
bpf_map_delete_elem(&functime, &tid);
@@ -100,17 +109,52 @@ int BPF_PROG(func_end)
if (delta < 0)
return 0;
+ if (bucket_range != 0) {
+ delta /= cmp_base;
+
+ if (min_latency > 0) {
+ if (delta > min_latency)
+ delta -= min_latency;
+ else
+ goto do_lookup;
+ }
+
+ // Less than 1 unit (ms or ns), or, in the future,
+ // than the min latency desired.
+ if (delta > 0) { // 1st entry: [ 1 unit .. bucket_range units )
+ // clang 12 doesn't like s64 / u32 division
+ key = (__u64)delta / bucket_range + 1;
+ if (key >= NUM_BUCKET ||
+ delta >= max_latency - min_latency)
+ key = NUM_BUCKET - 1;
+ }
+
+ delta += min_latency;
+ goto do_lookup;
+ }
// calculate index using delta
for (key = 0; key < (NUM_BUCKET - 1); key++) {
if (delta < (cmp_base << key))
break;
}
+do_lookup:
hist = bpf_map_lookup_elem(&latency, &key);
if (!hist)
return 0;
*hist += 1;
+
+ if (bucket_range == 0)
+ delta /= cmp_base;
+
+ __sync_fetch_and_add(&total, delta);
+ __sync_fetch_and_add(&count, 1);
+
+ if (delta > max)
+ max = delta;
+ if (delta < min)
+ min = delta;
}
return 0;
diff --git a/tools/perf/util/bpf_skel/kwork_top.bpf.c b/tools/perf/util/bpf_skel/kwork_top.bpf.c
index 594da91965a2..73e32e063030 100644
--- a/tools/perf/util/bpf_skel/kwork_top.bpf.c
+++ b/tools/perf/util/bpf_skel/kwork_top.bpf.c
@@ -18,7 +18,9 @@ enum kwork_class_type {
};
#define MAX_ENTRIES 102400
-#define MAX_NR_CPUS 2048
+#ifndef MAX_NR_CPUS
+#define MAX_NR_CPUS 4096
+#endif
#define PF_KTHREAD 0x00200000
#define MAX_COMMAND_LEN 16
diff --git a/tools/perf/util/bpf_skel/lock_contention.bpf.c b/tools/perf/util/bpf_skel/lock_contention.bpf.c
index 1069bda5d733..6533ea9b044c 100644
--- a/tools/perf/util/bpf_skel/lock_contention.bpf.c
+++ b/tools/perf/util/bpf_skel/lock_contention.bpf.c
@@ -100,6 +100,20 @@ struct {
__uint(max_entries, 1);
} cgroup_filter SEC(".maps");
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(long));
+ __uint(value_size, sizeof(__u8));
+ __uint(max_entries, 1);
+} slab_filter SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(long));
+ __uint(value_size, sizeof(struct slab_cache_data));
+ __uint(max_entries, 1);
+} slab_caches SEC(".maps");
+
struct rw_semaphore___old {
struct task_struct *owner;
} __attribute__((preserve_access_index));
@@ -116,12 +130,15 @@ struct mm_struct___new {
struct rw_semaphore mmap_lock;
} __attribute__((preserve_access_index));
+extern struct kmem_cache *bpf_get_kmem_cache(u64 addr) __ksym __weak;
+
/* control flags */
const volatile int has_cpu;
const volatile int has_task;
const volatile int has_type;
const volatile int has_addr;
const volatile int has_cgroup;
+const volatile int has_slab;
const volatile int needs_callstack;
const volatile int stack_skip;
const volatile int lock_owner;
@@ -136,6 +153,8 @@ int perf_subsys_id = -1;
__u64 end_ts;
+__u32 slab_cache_id;
+
/* error stat */
int task_fail;
int stack_fail;
@@ -202,7 +221,7 @@ static inline int can_record(u64 *ctx)
__u64 addr = ctx[0];
ok = bpf_map_lookup_elem(&addr_filter, &addr);
- if (!ok)
+ if (!ok && !has_slab)
return 0;
}
@@ -215,6 +234,17 @@ static inline int can_record(u64 *ctx)
return 0;
}
+ if (has_slab && bpf_get_kmem_cache) {
+ __u8 *ok;
+ __u64 addr = ctx[0];
+ long kmem_cache_addr;
+
+ kmem_cache_addr = (long)bpf_get_kmem_cache(addr);
+ ok = bpf_map_lookup_elem(&slab_filter, &kmem_cache_addr);
+ if (!ok)
+ return 0;
+ }
+
return 1;
}
@@ -487,8 +517,28 @@ int contention_end(u64 *ctx)
};
int err;
- if (aggr_mode == LOCK_AGGR_ADDR)
- first.flags |= check_lock_type(pelem->lock, pelem->flags);
+ if (aggr_mode == LOCK_AGGR_ADDR) {
+ first.flags |= check_lock_type(pelem->lock,
+ pelem->flags & LCB_F_TYPE_MASK);
+
+ /* Check if it's from a slab object */
+ if (bpf_get_kmem_cache) {
+ struct kmem_cache *s;
+ struct slab_cache_data *d;
+
+ s = bpf_get_kmem_cache(pelem->lock);
+ if (s != NULL) {
+ /*
+ * Save the ID of the slab cache in the flags
+ * (instead of full address) to reduce the
+ * space in the contention_data.
+ */
+ d = bpf_map_lookup_elem(&slab_caches, &s);
+ if (d != NULL)
+ first.flags |= d->id;
+ }
+ }
+ }
err = bpf_map_update_elem(&lock_stat, &key, &first, BPF_NOEXIST);
if (err < 0) {
@@ -563,4 +613,43 @@ int BPF_PROG(end_timestamp)
return 0;
}
+/*
+ * bpf_iter__kmem_cache added recently so old kernels don't have it in the
+ * vmlinux.h. But we cannot add it here since it will cause a compiler error
+ * due to redefinition of the struct on later kernels.
+ *
+ * So it uses a CO-RE trick to access the member only if it has the type.
+ * This will support both old and new kernels without compiler errors.
+ */
+struct bpf_iter__kmem_cache___new {
+ struct kmem_cache *s;
+} __attribute__((preserve_access_index));
+
+SEC("iter/kmem_cache")
+int slab_cache_iter(void *ctx)
+{
+ struct kmem_cache *s = NULL;
+ struct slab_cache_data d;
+ const char *nameptr;
+
+ if (bpf_core_type_exists(struct bpf_iter__kmem_cache)) {
+ struct bpf_iter__kmem_cache___new *iter = ctx;
+
+ s = iter->s;
+ }
+
+ if (s == NULL)
+ return 0;
+
+ nameptr = s->name;
+ bpf_probe_read_kernel_str(d.name, sizeof(d.name), nameptr);
+
+ d.id = ++slab_cache_id << LCB_F_SLAB_ID_SHIFT;
+ if (d.id >= LCB_F_SLAB_ID_END)
+ return 0;
+
+ bpf_map_update_elem(&slab_caches, &s, &d, BPF_NOEXIST);
+ return 0;
+}
+
char LICENSE[] SEC("license") = "Dual BSD/GPL";
diff --git a/tools/perf/util/bpf_skel/lock_data.h b/tools/perf/util/bpf_skel/lock_data.h
index de12892f992f..c15f734d7fc4 100644
--- a/tools/perf/util/bpf_skel/lock_data.h
+++ b/tools/perf/util/bpf_skel/lock_data.h
@@ -32,7 +32,15 @@ struct contention_task_data {
#define LCD_F_MMAP_LOCK (1U << 31)
#define LCD_F_SIGHAND_LOCK (1U << 30)
-#define LCB_F_MAX_FLAGS (1U << 7)
+#define LCB_F_SLAB_ID_SHIFT 16
+#define LCB_F_SLAB_ID_START (1U << 16)
+#define LCB_F_SLAB_ID_END (1U << 26)
+#define LCB_F_SLAB_ID_MASK 0x03FF0000U
+
+#define LCB_F_TYPE_MAX (1U << 7)
+#define LCB_F_TYPE_MASK 0x0000007FU
+
+#define SLAB_NAME_MAX 28
struct contention_data {
u64 total_time;
@@ -54,4 +62,9 @@ enum lock_class_sym {
LOCK_CLASS_RQLOCK,
};
+struct slab_cache_data {
+ u32 id;
+ char name[SLAB_NAME_MAX];
+};
+
#endif /* UTIL_BPF_SKEL_LOCK_DATA_H */
diff --git a/tools/perf/util/bpf_skel/vmlinux/vmlinux.h b/tools/perf/util/bpf_skel/vmlinux/vmlinux.h
index 4dcad7b682bd..7b81d3173917 100644
--- a/tools/perf/util/bpf_skel/vmlinux/vmlinux.h
+++ b/tools/perf/util/bpf_skel/vmlinux/vmlinux.h
@@ -195,4 +195,12 @@ struct bpf_perf_event_data_kern {
*/
struct rq {};
+struct kmem_cache {
+ const char *name;
+} __attribute__((preserve_access_index));
+
+struct bpf_iter__kmem_cache {
+ struct kmem_cache *s;
+} __attribute__((preserve_access_index));
+
#endif // __VMLINUX_H
diff --git a/tools/perf/util/btf.c b/tools/perf/util/btf.c
new file mode 100644
index 000000000000..bb163fe87767
--- /dev/null
+++ b/tools/perf/util/btf.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Copyright (C) 2024, Red Hat, Inc
+ */
+
+#include <bpf/btf.h>
+#include <util/btf.h>
+#include <string.h>
+
+const struct btf_member *__btf_type__find_member_by_name(struct btf *btf,
+ int type_id, const char *member_name)
+{
+ const struct btf_type *t = btf__type_by_id(btf, type_id);
+ const struct btf_member *m;
+ int i;
+
+ for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
+ const char *current_member_name = btf__name_by_offset(btf, m->name_off);
+
+ if (!strcmp(current_member_name, member_name))
+ return m;
+ }
+
+ return NULL;
+}
diff --git a/tools/perf/util/btf.h b/tools/perf/util/btf.h
new file mode 100644
index 000000000000..05e6e5bf23d6
--- /dev/null
+++ b/tools/perf/util/btf.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_UTIL_BTF
+#define __PERF_UTIL_BTF 1
+
+struct btf;
+struct btf_member;
+
+const struct btf_member *__btf_type__find_member_by_name(struct btf *btf,
+ int type_id, const char *member_name);
+#endif // __PERF_UTIL_BTF
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
index 0f759dd96db7..fbcc0626f9ce 100644
--- a/tools/perf/util/cgroup.c
+++ b/tools/perf/util/cgroup.c
@@ -473,7 +473,7 @@ int evlist__expand_cgroup(struct evlist *evlist, const char *str,
leader = NULL;
evlist__for_each_entry(orig_list, pos) {
- evsel = evsel__clone(pos);
+ evsel = evsel__clone(/*dest=*/NULL, pos);
if (evsel == NULL)
goto out_err;
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 68f9407ca74b..2d07c9257a1a 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -13,6 +13,7 @@
#include <sys/param.h>
#include "cache.h"
#include "callchain.h"
+#include "header.h"
#include <subcmd/exec-cmd.h>
#include "util/event.h" /* proc_map_timeout */
#include "util/hist.h" /* perf_hist_config */
@@ -34,6 +35,22 @@
#define DEBUG_CACHE_DIR ".debug"
+#define METRIC_ONLY_LEN 20
+
+struct perf_stat_config stat_config = {
+ .aggr_mode = AGGR_GLOBAL,
+ .aggr_level = MAX_CACHE_LVL + 1,
+ .scale = true,
+ .unit_width = 4, /* strlen("unit") */
+ .run_count = 1,
+ .metric_only_len = METRIC_ONLY_LEN,
+ .walltime_nsecs_stats = &walltime_nsecs_stats,
+ .ru_stats = &ru_stats,
+ .big_num = true,
+ .ctl_fd = -1,
+ .ctl_fd_ack = -1,
+ .iostat_run = false,
+};
char buildid_dir[MAXPATHLEN]; /* root dir for buildid, binary cache */
@@ -455,6 +472,16 @@ static int perf_ui_config(const char *var, const char *value)
return 0;
}
+void perf_stat__set_big_num(int set)
+{
+ stat_config.big_num = (set != 0);
+}
+
+static void perf_stat__set_no_csv_summary(int set)
+{
+ stat_config.no_csv_summary = (set != 0);
+}
+
static int perf_stat_config(const char *var, const char *value)
{
if (!strcmp(var, "stat.big-num"))
diff --git a/tools/perf/util/config.h b/tools/perf/util/config.h
index 9971313d61c1..a727c95cb119 100644
--- a/tools/perf/util/config.h
+++ b/tools/perf/util/config.h
@@ -50,6 +50,7 @@ int perf_config_set__collect(struct perf_config_set *set, const char *file_name,
const char *var, const char *value);
void perf_config__exit(void);
void perf_config__refresh(void);
+int perf_config__set_variable(const char *var, const char *value);
/**
* perf_config_sections__for_each - iterate thru all the sections
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 27094211edd8..5c329ad614e9 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -293,7 +293,7 @@ struct aggr_cpu_id aggr_cpu_id__die(struct perf_cpu cpu, void *data)
die = cpu__get_die_id(cpu);
/* There is no die_id on legacy system. */
- if (die == -1)
+ if (die < 0)
die = 0;
/*
@@ -322,7 +322,7 @@ struct aggr_cpu_id aggr_cpu_id__cluster(struct perf_cpu cpu, void *data)
struct aggr_cpu_id id;
/* There is no cluster_id on legacy system. */
- if (cluster == -1)
+ if (cluster < 0)
cluster = 0;
id = aggr_cpu_id__die(cpu, data);
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
index f0599c61fab4..5e7ff09fbc95 100644
--- a/tools/perf/util/data-convert-bt.c
+++ b/tools/perf/util/data-convert-bt.c
@@ -426,8 +426,9 @@ static int add_tracepoint_values(struct ctf_writer *cw,
struct evsel *evsel,
struct perf_sample *sample)
{
- struct tep_format_field *common_fields = evsel->tp_format->format.common_fields;
- struct tep_format_field *fields = evsel->tp_format->format.fields;
+ const struct tep_event *tp_format = evsel__tp_format(evsel);
+ struct tep_format_field *common_fields = tp_format->format.common_fields;
+ struct tep_format_field *fields = tp_format->format.fields;
int ret;
ret = add_tracepoint_fields_values(cw, event_class, event,
@@ -1064,8 +1065,9 @@ static int add_tracepoint_types(struct ctf_writer *cw,
struct evsel *evsel,
struct bt_ctf_event_class *class)
{
- struct tep_format_field *common_fields = evsel->tp_format->format.common_fields;
- struct tep_format_field *fields = evsel->tp_format->format.fields;
+ const struct tep_event *tp_format = evsel__tp_format(evsel);
+ struct tep_format_field *common_fields = tp_format ? tp_format->format.common_fields : NULL;
+ struct tep_format_field *fields = tp_format ? tp_format->format.fields : NULL;
int ret;
ret = add_tracepoint_fields_types(cw, common_fields, class);
diff --git a/tools/perf/util/data-convert-json.c b/tools/perf/util/data-convert-json.c
index 8304cd2d4a9c..d9f805bf6fb0 100644
--- a/tools/perf/util/data-convert-json.c
+++ b/tools/perf/util/data-convert-json.c
@@ -230,12 +230,12 @@ static int process_sample_event(const struct perf_tool *tool,
#ifdef HAVE_LIBTRACEEVENT
if (sample->raw_data) {
- int i;
- struct tep_format_field **fields;
+ struct tep_event *tp_format = evsel__tp_format(evsel);
+ struct tep_format_field **fields = tp_format ? tep_event_fields(tp_format) : NULL;
- fields = tep_event_fields(evsel->tp_format);
if (fields) {
- i = 0;
+ int i = 0;
+
while (fields[i]) {
struct trace_seq s;
diff --git a/tools/perf/util/disasm.c b/tools/perf/util/disasm.c
index 41a2b08670dc..50c5c206b70e 100644
--- a/tools/perf/util/disasm.c
+++ b/tools/perf/util/disasm.c
@@ -1245,6 +1245,9 @@ int symbol__strerror_disassemble(struct map_symbol *ms, int errnum, char *buf, s
scnprintf(buf, buflen, "The %s BPF file has no BTF section, compile with -g or use pahole -J.",
dso__long_name(dso));
break;
+ case SYMBOL_ANNOTATE_ERRNO__COULDNT_DETERMINE_FILE_TYPE:
+ scnprintf(buf, buflen, "Couldn't determine the file %s type.", dso__long_name(dso));
+ break;
default:
scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
break;
@@ -2213,56 +2216,6 @@ out_free_command:
return err;
}
-static int annotation_options__init_disassemblers(struct annotation_options *options)
-{
- char *disassembler;
-
- if (options->disassemblers_str == NULL) {
- const char *default_disassemblers_str =
-#ifdef HAVE_LIBLLVM_SUPPORT
- "llvm,"
-#endif
-#ifdef HAVE_LIBCAPSTONE_SUPPORT
- "capstone,"
-#endif
- "objdump";
-
- options->disassemblers_str = strdup(default_disassemblers_str);
- if (!options->disassemblers_str)
- goto out_enomem;
- }
-
- disassembler = strdup(options->disassemblers_str);
- if (disassembler == NULL)
- goto out_enomem;
-
- while (1) {
- char *comma = strchr(disassembler, ',');
-
- if (comma != NULL)
- *comma = '\0';
-
- options->disassemblers[options->nr_disassemblers++] = strim(disassembler);
-
- if (comma == NULL)
- break;
-
- disassembler = comma + 1;
-
- if (options->nr_disassemblers >= MAX_DISASSEMBLERS) {
- pr_debug("annotate.disassemblers can have at most %d entries, ignoring \"%s\"\n",
- MAX_DISASSEMBLERS, disassembler);
- break;
- }
- }
-
- return 0;
-
-out_enomem:
- pr_err("Not enough memory for annotate.disassemblers\n");
- return -1;
-}
-
int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
{
struct annotation_options *options = args->options;
@@ -2271,7 +2224,6 @@ int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
char symfs_filename[PATH_MAX];
bool delete_extract = false;
struct kcore_extract kce;
- const char *disassembler;
bool decomp = false;
int err = dso__disassemble_filename(dso, symfs_filename, sizeof(symfs_filename));
@@ -2289,7 +2241,7 @@ int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
} else if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_IMAGE) {
return symbol__disassemble_bpf_image(sym, args);
} else if (dso__binary_type(dso) == DSO_BINARY_TYPE__NOT_FOUND) {
- return -1;
+ return SYMBOL_ANNOTATE_ERRNO__COULDNT_DETERMINE_FILE_TYPE;
} else if (dso__is_kcore(dso)) {
kce.addr = map__rip_2objdump(map, sym->start);
kce.kcore_filename = symfs_filename;
@@ -2331,28 +2283,26 @@ int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
}
}
- err = annotation_options__init_disassemblers(options);
- if (err)
- goto out_remove_tmp;
-
err = -1;
+ for (u8 i = 0; i < ARRAY_SIZE(options->disassemblers) && err != 0; i++) {
+ enum perf_disassembler dis = options->disassemblers[i];
- for (int i = 0; i < options->nr_disassemblers && err != 0; ++i) {
- disassembler = options->disassemblers[i];
-
- if (!strcmp(disassembler, "llvm"))
+ switch (dis) {
+ case PERF_DISASM_LLVM:
err = symbol__disassemble_llvm(symfs_filename, sym, args);
- else if (!strcmp(disassembler, "capstone"))
+ break;
+ case PERF_DISASM_CAPSTONE:
err = symbol__disassemble_capstone(symfs_filename, sym, args);
- else if (!strcmp(disassembler, "objdump"))
+ break;
+ case PERF_DISASM_OBJDUMP:
err = symbol__disassemble_objdump(symfs_filename, sym, args);
- else
- pr_debug("Unknown disassembler %s, skipping...\n", disassembler);
- }
-
- if (err == 0) {
- pr_debug("Disassembled with %s\nannotate.disassemblers=%s\n",
- disassembler, options->disassemblers_str);
+ break;
+ case PERF_DISASM_UNKNOWN: /* End of disassemblers. */
+ default:
+ goto out_remove_tmp;
+ }
+ if (err == 0)
+ pr_debug("Disassembled with %s\n", perf_disassembler__strs[dis]);
}
out_remove_tmp:
if (decomp)
diff --git a/tools/perf/util/dlfilter.c b/tools/perf/util/dlfilter.c
index 7d180bdaedbc..ddacef881af2 100644
--- a/tools/perf/util/dlfilter.c
+++ b/tools/perf/util/dlfilter.c
@@ -234,7 +234,8 @@ static const __u8 *dlfilter__insn(void *ctx, __u32 *len)
struct machine *machine = maps__machine(thread__maps(al->thread));
if (machine)
- script_fetch_insn(d->sample, al->thread, machine);
+ script_fetch_insn(d->sample, al->thread, machine,
+ /*native_arch=*/true);
}
}
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index e2843ca2edd9..cae4f6d63318 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -24,15 +24,19 @@ struct perf_env perf_env;
#include "bpf-utils.h"
#include <bpf/libbpf.h>
-void perf_env__insert_bpf_prog_info(struct perf_env *env,
+bool perf_env__insert_bpf_prog_info(struct perf_env *env,
struct bpf_prog_info_node *info_node)
{
+ bool ret;
+
down_write(&env->bpf_progs.lock);
- __perf_env__insert_bpf_prog_info(env, info_node);
+ ret = __perf_env__insert_bpf_prog_info(env, info_node);
up_write(&env->bpf_progs.lock);
+
+ return ret;
}
-void __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
+bool __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
{
__u32 prog_id = info_node->info_linear->info.id;
struct bpf_prog_info_node *node;
@@ -50,13 +54,14 @@ void __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info
p = &(*p)->rb_right;
} else {
pr_debug("duplicated bpf prog info %u\n", prog_id);
- return;
+ return false;
}
}
rb_link_node(&info_node->rb_node, parent, p);
rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
env->bpf_progs.infos_cnt++;
+ return true;
}
struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
@@ -326,10 +331,13 @@ int perf_env__read_cpu_topology_map(struct perf_env *env)
for (idx = 0; idx < nr_cpus; ++idx) {
struct perf_cpu cpu = { .cpu = idx };
+ int core_id = cpu__get_core_id(cpu);
+ int socket_id = cpu__get_socket_id(cpu);
+ int die_id = cpu__get_die_id(cpu);
- env->cpu[idx].core_id = cpu__get_core_id(cpu);
- env->cpu[idx].socket_id = cpu__get_socket_id(cpu);
- env->cpu[idx].die_id = cpu__get_die_id(cpu);
+ env->cpu[idx].core_id = core_id >= 0 ? core_id : -1;
+ env->cpu[idx].socket_id = socket_id >= 0 ? socket_id : -1;
+ env->cpu[idx].die_id = die_id >= 0 ? die_id : -1;
}
env->nr_cpus_avail = nr_cpus;
@@ -472,15 +480,19 @@ const char *perf_env__arch(struct perf_env *env)
return normalize_arch(arch_name);
}
+#if defined(HAVE_LIBTRACEEVENT)
+#include "trace/beauty/arch_errno_names.c"
+#endif
+
const char *perf_env__arch_strerrno(struct perf_env *env __maybe_unused, int err __maybe_unused)
{
-#if defined(HAVE_SYSCALL_TABLE_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
+#if defined(HAVE_LIBTRACEEVENT)
if (env->arch_strerrno == NULL)
env->arch_strerrno = arch_syscalls__strerrno_function(perf_env__arch(env));
return env->arch_strerrno ? env->arch_strerrno(err) : "no arch specific strerrno function";
#else
- return "!(HAVE_SYSCALL_TABLE_SUPPORT && HAVE_LIBTRACEEVENT)";
+ return "!HAVE_LIBTRACEEVENT";
#endif
}
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index ae604c4edbb7..d90e343cf1fa 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -56,8 +56,6 @@ struct pmu_caps {
typedef const char *(arch_syscalls__strerrno_t)(int err);
-arch_syscalls__strerrno_t *arch_syscalls__strerrno_function(const char *arch);
-
struct perf_env {
char *hostname;
char *os_release;
@@ -176,9 +174,9 @@ const char *perf_env__raw_arch(struct perf_env *env);
int perf_env__nr_cpus_avail(struct perf_env *env);
void perf_env__init(struct perf_env *env);
-void __perf_env__insert_bpf_prog_info(struct perf_env *env,
+bool __perf_env__insert_bpf_prog_info(struct perf_env *env,
struct bpf_prog_info_node *info_node);
-void perf_env__insert_bpf_prog_info(struct perf_env *env,
+bool perf_env__insert_bpf_prog_info(struct perf_env *env,
struct bpf_prog_info_node *info_node);
struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
__u32 prog_id);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index d22c5df1701e..bc144388f892 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -395,6 +395,7 @@ void evsel__init(struct evsel *evsel,
evsel->group_pmu_name = NULL;
evsel->skippable = false;
evsel->alternate_hw_config = PERF_COUNT_HW_MAX;
+ evsel->script_output_type = -1; // FIXME: OUTPUT_TYPE_UNSET, see builtin-script.c
}
struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx)
@@ -454,7 +455,7 @@ static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src)
* The assumption is that @orig is not configured nor opened yet.
* So we only care about the attributes that can be set while it's parsed.
*/
-struct evsel *evsel__clone(struct evsel *orig)
+struct evsel *evsel__clone(struct evsel *dest, struct evsel *orig)
{
struct evsel *evsel;
@@ -467,7 +468,11 @@ struct evsel *evsel__clone(struct evsel *orig)
if (orig->bpf_obj)
return NULL;
- evsel = evsel__new(&orig->core.attr);
+ if (dest)
+ evsel = dest;
+ else
+ evsel = evsel__new(&orig->core.attr);
+
if (evsel == NULL)
return NULL;
@@ -512,11 +517,12 @@ struct evsel *evsel__clone(struct evsel *orig)
evsel->core.leader = orig->core.leader;
evsel->max_events = orig->max_events;
- free((char *)evsel->unit);
- evsel->unit = strdup(orig->unit);
- if (evsel->unit == NULL)
- goto out_err;
-
+ zfree(&evsel->unit);
+ if (orig->unit) {
+ evsel->unit = strdup(orig->unit);
+ if (evsel->unit == NULL)
+ goto out_err;
+ }
evsel->scale = orig->scale;
evsel->snapshot = orig->snapshot;
evsel->per_pkg = orig->per_pkg;
@@ -544,53 +550,101 @@ out_err:
return NULL;
}
+static int trace_event__id(const char *sys, const char *name)
+{
+ char *tp_dir = get_events_file(sys);
+ char path[PATH_MAX];
+ int id, err;
+
+ if (!tp_dir)
+ return -1;
+
+ scnprintf(path, PATH_MAX, "%s/%s/id", tp_dir, name);
+ put_events_file(tp_dir);
+ err = filename__read_int(path, &id);
+ if (err)
+ return err;
+
+ return id;
+}
+
/*
* Returns pointer with encoded error via <linux/err.h> interface.
*/
-#ifdef HAVE_LIBTRACEEVENT
struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx, bool format)
{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_TRACEPOINT,
+ .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
+ PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
+ };
struct evsel *evsel = zalloc(perf_evsel__object.size);
- int err = -ENOMEM;
+ int err = -ENOMEM, id = -1;
- if (evsel == NULL) {
+ if (evsel == NULL)
goto out_err;
- } else {
- struct perf_event_attr attr = {
- .type = PERF_TYPE_TRACEPOINT,
- .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
- PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
- };
- if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
- goto out_free;
- event_attr_init(&attr);
+ if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
+ goto out_free;
- if (format) {
- evsel->tp_format = trace_event__tp_format(sys, name);
- if (IS_ERR(evsel->tp_format)) {
- err = PTR_ERR(evsel->tp_format);
- goto out_free;
- }
- attr.config = evsel->tp_format->id;
- } else {
- attr.config = (__u64) -1;
- }
+#ifdef HAVE_LIBTRACEEVENT
+ evsel->tp_sys = strdup(sys);
+ if (!evsel->tp_sys)
+ goto out_free;
+ evsel->tp_name = strdup(name);
+ if (!evsel->tp_name)
+ goto out_free;
+#endif
- attr.sample_period = 1;
- evsel__init(evsel, &attr, idx);
- }
+ event_attr_init(&attr);
+ if (format) {
+ id = trace_event__id(sys, name);
+ if (id < 0) {
+ err = id;
+ goto out_free;
+ }
+ }
+ attr.config = (__u64)id;
+ attr.sample_period = 1;
+ evsel__init(evsel, &attr, idx);
return evsel;
out_free:
zfree(&evsel->name);
+#ifdef HAVE_LIBTRACEEVENT
+ zfree(&evsel->tp_sys);
+ zfree(&evsel->tp_name);
+#endif
free(evsel);
out_err:
return ERR_PTR(err);
}
+
+#ifdef HAVE_LIBTRACEEVENT
+struct tep_event *evsel__tp_format(struct evsel *evsel)
+{
+ struct tep_event *tp_format = evsel->tp_format;
+
+ if (tp_format)
+ return tp_format;
+
+ if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
+ return NULL;
+
+ tp_format = trace_event__tp_format(evsel->tp_sys, evsel->tp_name);
+ if (IS_ERR(tp_format)) {
+ int err = -PTR_ERR(evsel->tp_format);
+
+ pr_err("Error getting tracepoint format '%s' '%s'(%d)\n",
+ evsel__name(evsel), strerror(err), err);
+ return NULL;
+ }
+ evsel->tp_format = tp_format;
+ return evsel->tp_format;
+}
#endif
const char *const evsel__hw_names[PERF_COUNT_HW_MAX] = {
@@ -1103,6 +1157,9 @@ static void evsel__apply_config_terms(struct evsel *evsel,
case EVSEL__CONFIG_TERM_AUX_OUTPUT:
attr->aux_output = term->val.aux_output ? 1 : 0;
break;
+ case EVSEL__CONFIG_TERM_AUX_ACTION:
+ /* Already applied by auxtrace */
+ break;
case EVSEL__CONFIG_TERM_AUX_SAMPLE_SIZE:
/* Already applied by auxtrace */
break;
@@ -1587,6 +1644,10 @@ void evsel__exit(struct evsel *evsel)
perf_thread_map__put(evsel->core.threads);
zfree(&evsel->group_name);
zfree(&evsel->name);
+#ifdef HAVE_LIBTRACEEVENT
+ zfree(&evsel->tp_sys);
+ zfree(&evsel->tp_name);
+#endif
zfree(&evsel->filter);
zfree(&evsel->group_pmu_name);
zfree(&evsel->unit);
@@ -2090,16 +2151,17 @@ int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
return err;
}
-static bool has_attr_feature(struct perf_event_attr *attr, unsigned long flags)
+static bool __has_attr_feature(struct perf_event_attr *attr,
+ struct perf_cpu cpu, unsigned long flags)
{
- int fd = syscall(SYS_perf_event_open, attr, /*pid=*/0, /*cpu=*/-1,
+ int fd = syscall(SYS_perf_event_open, attr, /*pid=*/0, cpu.cpu,
/*group_fd=*/-1, flags);
close(fd);
if (fd < 0) {
attr->exclude_kernel = 1;
- fd = syscall(SYS_perf_event_open, attr, /*pid=*/0, /*cpu=*/-1,
+ fd = syscall(SYS_perf_event_open, attr, /*pid=*/0, cpu.cpu,
/*group_fd=*/-1, flags);
close(fd);
}
@@ -2107,7 +2169,7 @@ static bool has_attr_feature(struct perf_event_attr *attr, unsigned long flags)
if (fd < 0) {
attr->exclude_hv = 1;
- fd = syscall(SYS_perf_event_open, attr, /*pid=*/0, /*cpu=*/-1,
+ fd = syscall(SYS_perf_event_open, attr, /*pid=*/0, cpu.cpu,
/*group_fd=*/-1, flags);
close(fd);
}
@@ -2115,7 +2177,7 @@ static bool has_attr_feature(struct perf_event_attr *attr, unsigned long flags)
if (fd < 0) {
attr->exclude_guest = 1;
- fd = syscall(SYS_perf_event_open, attr, /*pid=*/0, /*cpu=*/-1,
+ fd = syscall(SYS_perf_event_open, attr, /*pid=*/0, cpu.cpu,
/*group_fd=*/-1, flags);
close(fd);
}
@@ -2127,6 +2189,13 @@ static bool has_attr_feature(struct perf_event_attr *attr, unsigned long flags)
return fd >= 0;
}
+static bool has_attr_feature(struct perf_event_attr *attr, unsigned long flags)
+{
+ struct perf_cpu cpu = {.cpu = -1};
+
+ return __has_attr_feature(attr, cpu, flags);
+}
+
static void evsel__detect_missing_pmu_features(struct evsel *evsel)
{
struct perf_event_attr attr = {
@@ -2215,7 +2284,65 @@ found:
errno = old_errno;
}
-static bool evsel__detect_missing_features(struct evsel *evsel)
+static bool evsel__probe_aux_action(struct evsel *evsel, struct perf_cpu cpu)
+{
+ struct perf_event_attr attr = evsel->core.attr;
+ int old_errno = errno;
+
+ attr.disabled = 1;
+ attr.aux_start_paused = 1;
+
+ if (__has_attr_feature(&attr, cpu, /*flags=*/0)) {
+ errno = old_errno;
+ return true;
+ }
+
+ /*
+ * EOPNOTSUPP means the kernel supports the feature but the PMU does
+ * not, so keep that distinction if possible.
+ */
+ if (errno != EOPNOTSUPP)
+ errno = old_errno;
+
+ return false;
+}
+
+static void evsel__detect_missing_aux_action_feature(struct evsel *evsel, struct perf_cpu cpu)
+{
+ static bool detection_done;
+ struct evsel *leader;
+
+ /*
+ * Don't bother probing aux_action if it is not being used or has been
+ * probed before.
+ */
+ if (!evsel->core.attr.aux_action || detection_done)
+ return;
+
+ detection_done = true;
+
+ /*
+ * The leader is an AUX area event. If it has failed, assume the feature
+ * is not supported.
+ */
+ leader = evsel__leader(evsel);
+ if (evsel == leader) {
+ perf_missing_features.aux_action = true;
+ return;
+ }
+
+ /*
+ * AUX area event with aux_action must have been opened successfully
+ * already, so feature is supported.
+ */
+ if (leader->core.attr.aux_action)
+ return;
+
+ if (!evsel__probe_aux_action(leader, cpu))
+ perf_missing_features.aux_action = true;
+}
+
+static bool evsel__detect_missing_features(struct evsel *evsel, struct perf_cpu cpu)
{
static bool detection_done = false;
struct perf_event_attr attr = {
@@ -2225,6 +2352,8 @@ static bool evsel__detect_missing_features(struct evsel *evsel)
};
int old_errno;
+ evsel__detect_missing_aux_action_feature(evsel, cpu);
+
evsel__detect_missing_pmu_features(evsel);
if (evsel__has_br_stack(evsel))
@@ -2439,6 +2568,7 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
int idx, thread, nthreads;
int pid = -1, err, old_errno;
enum rlimit_action set_rlimit = NO_CHANGE;
+ struct perf_cpu cpu;
if (evsel__is_retire_lat(evsel))
return tpebs_start(evsel->evlist);
@@ -2476,6 +2606,7 @@ fallback_missing_features:
}
for (idx = start_cpu_map_idx; idx < end_cpu_map_idx; idx++) {
+ cpu = perf_cpu_map__cpu(cpus, idx);
for (thread = 0; thread < nthreads; thread++) {
int fd, group_fd;
@@ -2496,10 +2627,9 @@ retry_open:
/* Debug message used by test scripts */
pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
- pid, perf_cpu_map__cpu(cpus, idx).cpu, group_fd, evsel->open_flags);
+ pid, cpu.cpu, group_fd, evsel->open_flags);
- fd = sys_perf_event_open(&evsel->core.attr, pid,
- perf_cpu_map__cpu(cpus, idx).cpu,
+ fd = sys_perf_event_open(&evsel->core.attr, pid, cpu.cpu,
group_fd, evsel->open_flags);
FD(evsel, idx, thread) = fd;
@@ -2515,8 +2645,7 @@ retry_open:
bpf_counter__install_pe(evsel, idx, fd);
if (unlikely(test_attr__enabled())) {
- test_attr__open(&evsel->core.attr, pid,
- perf_cpu_map__cpu(cpus, idx),
+ test_attr__open(&evsel->core.attr, pid, cpu,
fd, group_fd, evsel->open_flags);
}
@@ -2571,7 +2700,7 @@ try_fallback:
if (err == -EMFILE && rlimit__increase_nofile(&set_rlimit))
goto retry_open;
- if (err == -EINVAL && evsel__detect_missing_features(evsel))
+ if (err == -EINVAL && evsel__detect_missing_features(evsel, cpu))
goto fallback_missing_features;
if (evsel__precise_ip_fallback(evsel))
@@ -3218,12 +3347,16 @@ u16 evsel__id_hdr_size(const struct evsel *evsel)
#ifdef HAVE_LIBTRACEEVENT
struct tep_format_field *evsel__field(struct evsel *evsel, const char *name)
{
- return tep_find_field(evsel->tp_format, name);
+ struct tep_event *tp_format = evsel__tp_format(evsel);
+
+ return tp_format ? tep_find_field(tp_format, name) : NULL;
}
struct tep_format_field *evsel__common_field(struct evsel *evsel, const char *name)
{
- return tep_find_common_field(evsel->tp_format, name);
+ struct tep_event *tp_format = evsel__tp_format(evsel);
+
+ return tp_format ? tep_find_common_field(tp_format, name) : NULL;
}
void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name)
@@ -3448,6 +3581,78 @@ static bool find_process(const char *name)
return ret ? false : true;
}
+static int dump_perf_event_processes(char *msg, size_t size)
+{
+ DIR *proc_dir;
+ struct dirent *proc_entry;
+ int printed = 0;
+
+ proc_dir = opendir(procfs__mountpoint());
+ if (!proc_dir)
+ return 0;
+
+ /* Walk through the /proc directory. */
+ while ((proc_entry = readdir(proc_dir)) != NULL) {
+ char buf[256];
+ DIR *fd_dir;
+ struct dirent *fd_entry;
+ int fd_dir_fd;
+
+ if (proc_entry->d_type != DT_DIR ||
+ !isdigit(proc_entry->d_name[0]) ||
+ strlen(proc_entry->d_name) > sizeof(buf) - 4)
+ continue;
+
+ scnprintf(buf, sizeof(buf), "%s/fd", proc_entry->d_name);
+ fd_dir_fd = openat(dirfd(proc_dir), buf, O_DIRECTORY);
+ if (fd_dir_fd == -1)
+ continue;
+ fd_dir = fdopendir(fd_dir_fd);
+ if (!fd_dir) {
+ close(fd_dir_fd);
+ continue;
+ }
+ while ((fd_entry = readdir(fd_dir)) != NULL) {
+ ssize_t link_size;
+
+ if (fd_entry->d_type != DT_LNK)
+ continue;
+ link_size = readlinkat(fd_dir_fd, fd_entry->d_name, buf, sizeof(buf));
+ if (link_size < 0)
+ continue;
+ /* Take care as readlink doesn't null terminate the string. */
+ if (!strncmp(buf, "anon_inode:[perf_event]", link_size)) {
+ int cmdline_fd;
+ ssize_t cmdline_size;
+
+ scnprintf(buf, sizeof(buf), "%s/cmdline", proc_entry->d_name);
+ cmdline_fd = openat(dirfd(proc_dir), buf, O_RDONLY);
+ if (cmdline_fd == -1)
+ continue;
+ cmdline_size = read(cmdline_fd, buf, sizeof(buf) - 1);
+ close(cmdline_fd);
+ if (cmdline_size < 0)
+ continue;
+ buf[cmdline_size] = '\0';
+ for (ssize_t i = 0; i < cmdline_size; i++) {
+ if (buf[i] == '\0')
+ buf[i] = ' ';
+ }
+
+ if (printed == 0)
+ printed += scnprintf(msg, size, "Possible processes:\n");
+
+ printed += scnprintf(msg + printed, size - printed,
+ "%s %s\n", proc_entry->d_name, buf);
+ break;
+ }
+ }
+ closedir(fd_dir);
+ }
+ closedir(proc_dir);
+ return printed;
+}
+
int __weak arch_evsel__open_strerror(struct evsel *evsel __maybe_unused,
char *msg __maybe_unused,
size_t size __maybe_unused)
@@ -3481,7 +3686,7 @@ int evsel__open_strerror(struct evsel *evsel, struct target *target,
printed += scnprintf(msg, size,
"No permission to enable %s event.\n\n", evsel__name(evsel));
- return scnprintf(msg + printed, size - printed,
+ return printed + scnprintf(msg + printed, size - printed,
"Consider adjusting /proc/sys/kernel/perf_event_paranoid setting to open\n"
"access to performance monitoring and observability operations for processes\n"
"without CAP_PERFMON, CAP_SYS_PTRACE or CAP_SYS_ADMIN Linux capability.\n"
@@ -3526,6 +3731,10 @@ int evsel__open_strerror(struct evsel *evsel, struct target *target,
return scnprintf(msg, size,
"%s: PMU Hardware doesn't support 'aux_output' feature",
evsel__name(evsel));
+ if (evsel->core.attr.aux_action)
+ return scnprintf(msg, size,
+ "%s: PMU Hardware doesn't support 'aux_action' feature",
+ evsel__name(evsel));
if (evsel->core.attr.sample_period != 0)
return scnprintf(msg, size,
"%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'",
@@ -3544,6 +3753,11 @@ int evsel__open_strerror(struct evsel *evsel, struct target *target,
return scnprintf(msg, size,
"The PMU counters are busy/taken by another profiler.\n"
"We found oprofile daemon running, please stop it and try again.");
+ printed += scnprintf(
+ msg, size,
+ "The PMU %s counters are busy and in use by another process.\n",
+ evsel->pmu ? evsel->pmu->name : "");
+ return printed + dump_perf_event_processes(msg + printed, size - printed);
break;
case EINVAL:
if (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE && perf_missing_features.code_page_size)
@@ -3556,6 +3770,8 @@ int evsel__open_strerror(struct evsel *evsel, struct target *target,
return scnprintf(msg, size, "clockid feature not supported.");
if (perf_missing_features.clockid_wrong)
return scnprintf(msg, size, "wrong clockid (%d).", clockid);
+ if (perf_missing_features.aux_action)
+ return scnprintf(msg, size, "The 'aux_action' feature is not supported, update the kernel.");
if (perf_missing_features.aux_output)
return scnprintf(msg, size, "The 'aux_output' feature is not supported, update the kernel.");
if (!target__has_cpu(target))
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 04934a7af174..5e789fa80590 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -59,6 +59,8 @@ struct evsel {
char *group_name;
const char *group_pmu_name;
#ifdef HAVE_LIBTRACEEVENT
+ char *tp_sys;
+ char *tp_name;
struct tep_event *tp_format;
#endif
char *filter;
@@ -119,6 +121,7 @@ struct evsel {
bool default_metricgroup; /* A member of the Default metricgroup */
struct hashmap *per_pkg_mask;
int err;
+ int script_output_type;
struct {
evsel__sb_cb_t *cb;
void *data;
@@ -205,6 +208,7 @@ struct perf_missing_features {
bool weight_struct;
bool read_lost;
bool branch_counters;
+ bool aux_action;
bool inherit_sample_read;
};
@@ -241,26 +245,23 @@ static inline struct evsel *evsel__new(struct perf_event_attr *attr)
return evsel__new_idx(attr, 0);
}
-struct evsel *evsel__clone(struct evsel *orig);
+struct evsel *evsel__clone(struct evsel *dest, struct evsel *orig);
int copy_config_terms(struct list_head *dst, struct list_head *src);
void free_config_terms(struct list_head *config_terms);
-#ifdef HAVE_LIBTRACEEVENT
-struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx, bool format);
-
/*
* Returns pointer with encoded error via <linux/err.h> interface.
*/
+struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx, bool format);
static inline struct evsel *evsel__newtp(const char *sys, const char *name)
{
return evsel__newtp_idx(sys, name, 0, true);
}
-#endif
#ifdef HAVE_LIBTRACEEVENT
-struct tep_event *event_format__new(const char *sys, const char *name);
+struct tep_event *evsel__tp_format(struct evsel *evsel);
#endif
void evsel__init(struct evsel *evsel, struct perf_event_attr *attr, int idx);
diff --git a/tools/perf/util/evsel_config.h b/tools/perf/util/evsel_config.h
index aee6f808b512..af52a1516d0b 100644
--- a/tools/perf/util/evsel_config.h
+++ b/tools/perf/util/evsel_config.h
@@ -25,6 +25,7 @@ enum evsel_term_type {
EVSEL__CONFIG_TERM_BRANCH,
EVSEL__CONFIG_TERM_PERCORE,
EVSEL__CONFIG_TERM_AUX_OUTPUT,
+ EVSEL__CONFIG_TERM_AUX_ACTION,
EVSEL__CONFIG_TERM_AUX_SAMPLE_SIZE,
EVSEL__CONFIG_TERM_CFG_CHG,
};
diff --git a/tools/perf/util/evsel_fprintf.c b/tools/perf/util/evsel_fprintf.c
index 86b7f46f9e2a..103984b29b1e 100644
--- a/tools/perf/util/evsel_fprintf.c
+++ b/tools/perf/util/evsel_fprintf.c
@@ -81,13 +81,15 @@ int evsel__fprintf(struct evsel *evsel, struct perf_attr_details *details, FILE
#ifdef HAVE_LIBTRACEEVENT
if (details->trace_fields) {
struct tep_format_field *field;
+ const struct tep_event *tp_format;
if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
printed += comma_fprintf(fp, &first, " (not a tracepoint)");
goto out;
}
- field = evsel->tp_format->format.fields;
+ tp_format = evsel__tp_format(evsel);
+ field = tp_format ? tp_format->format.fields : NULL;
if (field == NULL) {
printed += comma_fprintf(fp, &first, " (no trace field)");
goto out;
diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c
index f289044a1f7c..c221dcce6666 100644
--- a/tools/perf/util/expr.c
+++ b/tools/perf/util/expr.c
@@ -285,7 +285,7 @@ struct expr_parse_ctx *expr__ctx_new(void)
{
struct expr_parse_ctx *ctx;
- ctx = malloc(sizeof(struct expr_parse_ctx));
+ ctx = calloc(1, sizeof(struct expr_parse_ctx));
if (!ctx)
return NULL;
@@ -294,9 +294,6 @@ struct expr_parse_ctx *expr__ctx_new(void)
free(ctx);
return NULL;
}
- ctx->sctx.user_requested_cpu_list = NULL;
- ctx->sctx.runtime = 0;
- ctx->sctx.system_wide = false;
return ctx;
}
diff --git a/tools/perf/util/ftrace.h b/tools/perf/util/ftrace.h
index bae649ef50e8..5dee2caba0fe 100644
--- a/tools/perf/util/ftrace.h
+++ b/tools/perf/util/ftrace.h
@@ -7,6 +7,7 @@
struct evlist;
struct hashamp;
+struct stats;
struct perf_ftrace {
struct evlist *evlist;
@@ -20,6 +21,9 @@ struct perf_ftrace {
unsigned long percpu_buffer_size;
bool inherit;
bool use_nsec;
+ unsigned int bucket_range;
+ unsigned int min_latency;
+ unsigned int max_latency;
int graph_depth;
int func_stack_trace;
int func_irq_info;
@@ -43,7 +47,7 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace);
int perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace);
int perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace);
int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace,
- int buckets[]);
+ int buckets[], struct stats *stats);
int perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace);
#else /* !HAVE_BPF_SKEL */
@@ -68,7 +72,8 @@ perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace __maybe_unused)
static inline int
perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused,
- int buckets[] __maybe_unused)
+ int buckets[] __maybe_unused,
+ struct stats *stats __maybe_unused)
{
return -1;
}
diff --git a/tools/perf/util/generate-cmdlist.sh b/tools/perf/util/generate-cmdlist.sh
index 1b5140e5ce99..6a73c903d690 100755
--- a/tools/perf/util/generate-cmdlist.sh
+++ b/tools/perf/util/generate-cmdlist.sh
@@ -38,7 +38,7 @@ do
done
echo "#endif /* HAVE_LIBELF_SUPPORT */"
-echo "#if defined(HAVE_LIBTRACEEVENT) && (defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT))"
+echo "#if defined(HAVE_LIBTRACEEVENT)"
sed -n -e 's/^perf-\([^ ]*\)[ ].* audit*/\1/p' command-list.txt |
sort |
while read cmd
@@ -51,7 +51,7 @@ do
p
}' "Documentation/perf-$cmd.txt"
done
-echo "#endif /* HAVE_LIBTRACEEVENT && (HAVE_LIBAUDIT_SUPPORT || HAVE_SYSCALL_TABLE_SUPPORT) */"
+echo "#endif /* HAVE_LIBTRACEEVENT */"
echo "#ifdef HAVE_LIBTRACEEVENT"
sed -n -e 's/^perf-\([^ ]*\)[ ].* traceevent.*/\1/p' command-list.txt |
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 3451e542b69a..d06aa86352d3 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -3158,7 +3158,10 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
/* after reading from file, translate offset to address */
bpil_offs_to_addr(info_linear);
info_node->info_linear = info_linear;
- __perf_env__insert_bpf_prog_info(env, info_node);
+ if (!__perf_env__insert_bpf_prog_info(env, info_node)) {
+ free(info_linear);
+ free(info_node);
+ }
}
up_write(&env->bpf_progs.lock);
@@ -3205,7 +3208,8 @@ static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
if (__do_read(ff, node->data, data_size))
goto out;
- __perf_env__insert_btf(env, node);
+ if (!__perf_env__insert_btf(env, node))
+ free(node);
node = NULL;
}
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index fff134565801..0f30f843c566 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -32,6 +32,9 @@
#include <linux/time64.h>
#include <linux/zalloc.h>
+static int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right);
+static int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right);
+
static bool hists__filter_entry_by_dso(struct hists *hists,
struct hist_entry *he);
static bool hists__filter_entry_by_thread(struct hists *hists,
@@ -1292,19 +1295,35 @@ out:
return err;
}
-int64_t
-hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
+static int64_t
+hist_entry__cmp_impl(struct perf_hpp_list *hpp_list, struct hist_entry *left,
+ struct hist_entry *right, unsigned long fn_offset,
+ bool ignore_dynamic, bool ignore_skipped)
{
struct hists *hists = left->hists;
struct perf_hpp_fmt *fmt;
- int64_t cmp = 0;
+ perf_hpp_fmt_cmp_t *fn;
+ int64_t cmp;
+
+ /*
+ * Never collapse filtered and non-filtered entries.
+ * Note this is not the same as having an extra (invisible) fmt
+ * that corresponds to the filtered status.
+ */
+ cmp = (int64_t)!!left->filtered - (int64_t)!!right->filtered;
+ if (cmp)
+ return cmp;
- hists__for_each_sort_list(hists, fmt) {
- if (perf_hpp__is_dynamic_entry(fmt) &&
+ perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
+ if (ignore_dynamic && perf_hpp__is_dynamic_entry(fmt) &&
!perf_hpp__defined_dynamic_entry(fmt, hists))
continue;
- cmp = fmt->cmp(fmt, left, right);
+ if (ignore_skipped && perf_hpp__should_skip(fmt, hists))
+ continue;
+
+ fn = (void *)fmt + fn_offset;
+ cmp = (*fn)(fmt, left, right);
if (cmp)
break;
}
@@ -1313,23 +1332,33 @@ hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
}
int64_t
-hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
+hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
{
- struct hists *hists = left->hists;
- struct perf_hpp_fmt *fmt;
- int64_t cmp = 0;
+ return hist_entry__cmp_impl(left->hists->hpp_list, left, right,
+ offsetof(struct perf_hpp_fmt, cmp), true, false);
+}
- hists__for_each_sort_list(hists, fmt) {
- if (perf_hpp__is_dynamic_entry(fmt) &&
- !perf_hpp__defined_dynamic_entry(fmt, hists))
- continue;
+static int64_t
+hist_entry__sort(struct hist_entry *left, struct hist_entry *right)
+{
+ return hist_entry__cmp_impl(left->hists->hpp_list, left, right,
+ offsetof(struct perf_hpp_fmt, sort), false, true);
+}
- cmp = fmt->collapse(fmt, left, right);
- if (cmp)
- break;
- }
+int64_t
+hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
+{
+ return hist_entry__cmp_impl(left->hists->hpp_list, left, right,
+ offsetof(struct perf_hpp_fmt, collapse), true, false);
+}
- return cmp;
+static int64_t
+hist_entry__collapse_hierarchy(struct perf_hpp_list *hpp_list,
+ struct hist_entry *left,
+ struct hist_entry *right)
+{
+ return hist_entry__cmp_impl(hpp_list, left, right,
+ offsetof(struct perf_hpp_fmt, collapse), false, false);
}
void hist_entry__delete(struct hist_entry *he)
@@ -1503,14 +1532,7 @@ static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct hist_entry, rb_node_in);
-
- cmp = 0;
- perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
- cmp = fmt->collapse(fmt, iter, he);
- if (cmp)
- break;
- }
-
+ cmp = hist_entry__collapse_hierarchy(hpp_list, iter, he);
if (!cmp) {
he_stat__add_stat(&iter->stat, &he->stat);
return iter;
@@ -1730,24 +1752,6 @@ int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
return 0;
}
-static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
-{
- struct hists *hists = a->hists;
- struct perf_hpp_fmt *fmt;
- int64_t cmp = 0;
-
- hists__for_each_sort_list(hists, fmt) {
- if (perf_hpp__should_skip(fmt, a->hists))
- continue;
-
- cmp = fmt->sort(fmt, a, b);
- if (cmp)
- break;
- }
-
- return cmp;
-}
-
static void hists__reset_filter_stats(struct hists *hists)
{
hists->nr_non_filtered_entries = 0;
@@ -2449,21 +2453,15 @@ static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
struct rb_node **p;
struct rb_node *parent = NULL;
struct hist_entry *he;
- struct perf_hpp_fmt *fmt;
bool leftmost = true;
p = &root->rb_root.rb_node;
while (*p != NULL) {
- int64_t cmp = 0;
+ int64_t cmp;
parent = *p;
he = rb_entry(parent, struct hist_entry, rb_node_in);
-
- perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
- cmp = fmt->collapse(fmt, he, pair);
- if (cmp)
- break;
- }
+ cmp = hist_entry__collapse_hierarchy(he->hpp_list, he, pair);
if (!cmp)
goto out;
@@ -2521,16 +2519,10 @@ static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *roo
while (n) {
struct hist_entry *iter;
- struct perf_hpp_fmt *fmt;
- int64_t cmp = 0;
+ int64_t cmp;
iter = rb_entry(n, struct hist_entry, rb_node_in);
- perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
- cmp = fmt->collapse(fmt, iter, he);
- if (cmp)
- break;
- }
-
+ cmp = hist_entry__collapse_hierarchy(he->hpp_list, iter, he);
if (cmp < 0)
n = n->rb_left;
else if (cmp > 0)
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 1131056924d9..46c8373e3146 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -342,8 +342,6 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
struct perf_hpp;
struct perf_hpp_fmt;
-int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right);
-int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right);
int hist_entry__transaction_len(void);
int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size,
struct hists *hists);
@@ -452,6 +450,9 @@ struct perf_hpp {
bool skip;
};
+typedef int64_t (*perf_hpp_fmt_cmp_t)(
+ struct perf_hpp_fmt *, struct hist_entry *, struct hist_entry *);
+
struct perf_hpp_fmt {
const char *name;
int (*header)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
@@ -463,12 +464,9 @@ struct perf_hpp_fmt {
struct hist_entry *he);
int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he);
- int64_t (*cmp)(struct perf_hpp_fmt *fmt,
- struct hist_entry *a, struct hist_entry *b);
- int64_t (*collapse)(struct perf_hpp_fmt *fmt,
- struct hist_entry *a, struct hist_entry *b);
- int64_t (*sort)(struct perf_hpp_fmt *fmt,
- struct hist_entry *a, struct hist_entry *b);
+ perf_hpp_fmt_cmp_t cmp;
+ perf_hpp_fmt_cmp_t collapse;
+ perf_hpp_fmt_cmp_t sort;
bool (*equal)(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b);
void (*free)(struct perf_hpp_fmt *fmt);
diff --git a/tools/perf/util/intel-pt-decoder/Build b/tools/perf/util/intel-pt-decoder/Build
index 30793d08c6d4..5b8f0149167d 100644
--- a/tools/perf/util/intel-pt-decoder/Build
+++ b/tools/perf/util/intel-pt-decoder/Build
@@ -7,16 +7,24 @@ $(OUTPUT)util/intel-pt-decoder/inat-tables.c: $(inat_tables_script) $(inat_table
$(call rule_mkdir)
@$(call echo-cmd,gen)$(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ || rm -f $@
-# Busybox's diff doesn't have -I, avoid warning in the case
+ifeq ($(SRCARCH),x86)
+ perf-util-y += inat.o insn.o
+else
+ perf-util-$(CONFIG_AUXTRACE) += inat.o insn.o
+endif
-$(OUTPUT)util/intel-pt-decoder/intel-pt-insn-decoder.o: util/intel-pt-decoder/intel-pt-insn-decoder.c $(OUTPUT)util/intel-pt-decoder/inat-tables.c
+$(OUTPUT)util/intel-pt-decoder/inat.o: $(srctree)/tools/arch/x86/lib/inat.c $(OUTPUT)util/intel-pt-decoder/inat-tables.c
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
-CFLAGS_intel-pt-insn-decoder.o += -I$(OUTPUT)util/intel-pt-decoder
+CFLAGS_inat.o += -I$(OUTPUT)util/intel-pt-decoder
+
+$(OUTPUT)util/intel-pt-decoder/insn.o: $(srctree)/tools/arch/x86/lib/insn.c
+ $(call rule_mkdir)
+ $(call if_changed_dep,cc_o_c)
ifeq ($(CC_NO_CLANG), 1)
- CFLAGS_intel-pt-insn-decoder.o += -Wno-override-init
+ CFLAGS_insn.o += -Wno-override-init
endif
-CFLAGS_intel-pt-insn-decoder.o += -Wno-packed
+CFLAGS_insn.o += -Wno-packed
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
index 47cf35799a4d..8fabddc1c0da 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
@@ -11,9 +11,6 @@
#include <byteswap.h>
#include "../../../arch/x86/include/asm/insn.h"
-#include "../../../arch/x86/lib/inat.c"
-#include "../../../arch/x86/lib/insn.c"
-
#include "event.h"
#include "intel-pt-insn-decoder.h"
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index 346513e5e9b7..f23e21502bf8 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -737,7 +737,7 @@ jit_inject(struct jit_buf_desc *jd, const char *path)
* as captured in the RECORD_MMAP record
*/
static int
-jit_detect(const char *mmap_name, pid_t pid, struct nsinfo *nsi)
+jit_detect(const char *mmap_name, pid_t pid, struct nsinfo *nsi, bool *in_pidns)
{
char *p;
char *end = NULL;
@@ -773,11 +773,16 @@ jit_detect(const char *mmap_name, pid_t pid, struct nsinfo *nsi)
if (!end)
return -1;
+ *in_pidns = pid == nsinfo__nstgid(nsi);
/*
* pid does not match mmap pid
* pid==0 in system-wide mode (synthesized)
+ *
+ * If the pid in the file name is equal to the nstgid, then
+ * the agent ran inside a container and perf outside the
+ * container, so record it for further use in jit_inject().
*/
- if (pid && pid2 != nsinfo__nstgid(nsi))
+ if (pid && !(pid2 == pid || *in_pidns))
return -1;
/*
* validate suffix
@@ -830,6 +835,7 @@ jit_process(struct perf_session *session,
struct nsinfo *nsi;
struct evsel *first;
struct jit_buf_desc jd;
+ bool in_pidns = false;
int ret;
thread = machine__findnew_thread(machine, pid, tid);
@@ -844,7 +850,7 @@ jit_process(struct perf_session *session,
/*
* first, detect marker mmap (i.e., the jitdump mmap)
*/
- if (jit_detect(filename, pid, nsi)) {
+ if (jit_detect(filename, pid, nsi, &in_pidns)) {
nsinfo__put(nsi);
/*
@@ -866,6 +872,9 @@ jit_process(struct perf_session *session,
jd.machine = machine;
jd.nsi = nsi;
+ if (in_pidns)
+ nsinfo__set_in_pidns(nsi);
+
/*
* track sample_type to compute id_all layout
* perf sets the same sample type to all events as of now
diff --git a/tools/perf/util/kvm-stat.c b/tools/perf/util/kvm-stat.c
new file mode 100644
index 000000000000..38ace736db5c
--- /dev/null
+++ b/tools/perf/util/kvm-stat.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "debug.h"
+#include "evsel.h"
+#include "kvm-stat.h"
+
+#if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
+
+bool kvm_exit_event(struct evsel *evsel)
+{
+ return evsel__name_is(evsel, kvm_exit_trace);
+}
+
+void exit_event_get_key(struct evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ key->info = 0;
+ key->key = evsel__intval(evsel, sample, kvm_exit_reason);
+}
+
+
+bool exit_event_begin(struct evsel *evsel,
+ struct perf_sample *sample, struct event_key *key)
+{
+ if (kvm_exit_event(evsel)) {
+ exit_event_get_key(evsel, sample, key);
+ return true;
+ }
+
+ return false;
+}
+
+bool kvm_entry_event(struct evsel *evsel)
+{
+ return evsel__name_is(evsel, kvm_entry_trace);
+}
+
+bool exit_event_end(struct evsel *evsel,
+ struct perf_sample *sample __maybe_unused,
+ struct event_key *key __maybe_unused)
+{
+ return kvm_entry_event(evsel);
+}
+
+static const char *get_exit_reason(struct perf_kvm_stat *kvm,
+ struct exit_reasons_table *tbl,
+ u64 exit_code)
+{
+ while (tbl->reason != NULL) {
+ if (tbl->exit_code == exit_code)
+ return tbl->reason;
+ tbl++;
+ }
+
+ pr_err("unknown kvm exit code:%lld on %s\n",
+ (unsigned long long)exit_code, kvm->exit_reasons_isa);
+ return "UNKNOWN";
+}
+
+void exit_event_decode_key(struct perf_kvm_stat *kvm,
+ struct event_key *key,
+ char *decode)
+{
+ const char *exit_reason = get_exit_reason(kvm, key->exit_reasons,
+ key->key);
+
+ scnprintf(decode, KVM_EVENT_NAME_LEN, "%s", exit_reason);
+}
+
+#endif
diff --git a/tools/perf/util/kvm-stat.h b/tools/perf/util/kvm-stat.h
index 3e9ac754c3d1..4249542544bb 100644
--- a/tools/perf/util/kvm-stat.h
+++ b/tools/perf/util/kvm-stat.h
@@ -115,6 +115,8 @@ struct kvm_reg_events_ops {
struct kvm_events_ops *ops;
};
+#if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
+
void exit_event_get_key(struct evsel *evsel,
struct perf_sample *sample,
struct event_key *key);
@@ -127,6 +129,7 @@ bool exit_event_end(struct evsel *evsel,
void exit_event_decode_key(struct perf_kvm_stat *kvm,
struct event_key *key,
char *decode);
+#endif
bool kvm_exit_event(struct evsel *evsel);
bool kvm_entry_event(struct evsel *evsel);
diff --git a/tools/perf/util/kwork.h b/tools/perf/util/kwork.h
index 76fe2a821bcf..db00269b73f2 100644
--- a/tools/perf/util/kwork.h
+++ b/tools/perf/util/kwork.h
@@ -1,6 +1,7 @@
#ifndef PERF_UTIL_KWORK_H
#define PERF_UTIL_KWORK_H
+#include "perf.h"
#include "util/tool.h"
#include "util/time-utils.h"
@@ -251,12 +252,14 @@ struct perf_kwork {
* perf kwork top data
*/
struct kwork_top_stat top_stat;
-};
-struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
+ /* Add work callback. */
+ struct kwork_work *(*add_work)(struct perf_kwork *kwork,
struct kwork_class *class,
struct kwork_work *key);
+};
+
#ifdef HAVE_BPF_SKEL
int perf_kwork__trace_prepare_bpf(struct perf_kwork *kwork);
diff --git a/tools/perf/util/llvm-c-helpers.cpp b/tools/perf/util/llvm-c-helpers.cpp
index 663bcaba2041..004081bd12c9 100644
--- a/tools/perf/util/llvm-c-helpers.cpp
+++ b/tools/perf/util/llvm-c-helpers.cpp
@@ -18,7 +18,6 @@
extern "C" {
#include <linux/zalloc.h>
}
-#include "symbol_conf.h"
#include "llvm-c-helpers.h"
extern "C"
diff --git a/tools/perf/util/lock-contention.c b/tools/perf/util/lock-contention.c
new file mode 100644
index 000000000000..92e7b7b572a2
--- /dev/null
+++ b/tools/perf/util/lock-contention.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "debug.h"
+#include "env.h"
+#include "lock-contention.h"
+#include "machine.h"
+#include "symbol.h"
+
+#include <limits.h>
+#include <string.h>
+
+#include <linux/hash.h>
+#include <linux/zalloc.h>
+
+#define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
+#define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
+
+struct callstack_filter {
+ struct list_head list;
+ char name[];
+};
+
+static LIST_HEAD(callstack_filters);
+struct hlist_head *lockhash_table;
+
+int parse_call_stack(const struct option *opt __maybe_unused, const char *str,
+ int unset __maybe_unused)
+{
+ char *s, *tmp, *tok;
+ int ret = 0;
+
+ s = strdup(str);
+ if (s == NULL)
+ return -1;
+
+ for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
+ struct callstack_filter *entry;
+
+ entry = malloc(sizeof(*entry) + strlen(tok) + 1);
+ if (entry == NULL) {
+ pr_err("Memory allocation failure\n");
+ free(s);
+ return -1;
+ }
+
+ strcpy(entry->name, tok);
+ list_add_tail(&entry->list, &callstack_filters);
+ }
+
+ free(s);
+ return ret;
+}
+
+bool needs_callstack(void)
+{
+ return !list_empty(&callstack_filters);
+}
+
+struct lock_stat *lock_stat_find(u64 addr)
+{
+ struct hlist_head *entry = lockhashentry(addr);
+ struct lock_stat *ret;
+
+ hlist_for_each_entry(ret, entry, hash_entry) {
+ if (ret->addr == addr)
+ return ret;
+ }
+ return NULL;
+}
+
+struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags)
+{
+ struct hlist_head *entry = lockhashentry(addr);
+ struct lock_stat *ret, *new;
+
+ hlist_for_each_entry(ret, entry, hash_entry) {
+ if (ret->addr == addr)
+ return ret;
+ }
+
+ new = zalloc(sizeof(struct lock_stat));
+ if (!new)
+ goto alloc_failed;
+
+ new->addr = addr;
+ new->name = strdup(name);
+ if (!new->name) {
+ free(new);
+ goto alloc_failed;
+ }
+
+ new->flags = flags;
+ new->wait_time_min = ULLONG_MAX;
+
+ hlist_add_head(&new->hash_entry, entry);
+ return new;
+
+alloc_failed:
+ pr_err("memory allocation failed\n");
+ return NULL;
+}
+
+bool match_callstack_filter(struct machine *machine, u64 *callstack, int max_stack_depth)
+{
+ struct map *kmap;
+ struct symbol *sym;
+ u64 ip;
+ const char *arch = perf_env__arch(machine->env);
+
+ if (list_empty(&callstack_filters))
+ return true;
+
+ for (int i = 0; i < max_stack_depth; i++) {
+ struct callstack_filter *filter;
+
+ /*
+ * In powerpc, the callchain saved by kernel always includes
+ * first three entries as the NIP (next instruction pointer),
+ * LR (link register), and the contents of LR save area in the
+ * second stack frame. In certain scenarios its possible to have
+ * invalid kernel instruction addresses in either LR or the second
+ * stack frame's LR. In that case, kernel will store that address as
+ * zero.
+ *
+ * The below check will continue to look into callstack,
+ * incase first or second callstack index entry has 0
+ * address for powerpc.
+ */
+ if (!callstack || (!callstack[i] && (strcmp(arch, "powerpc") ||
+ (i != 1 && i != 2))))
+ break;
+
+ ip = callstack[i];
+ sym = machine__find_kernel_symbol(machine, ip, &kmap);
+ if (sym == NULL)
+ continue;
+
+ list_for_each_entry(filter, &callstack_filters, list) {
+ if (strstr(sym->name, filter->name))
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/tools/perf/util/lock-contention.h b/tools/perf/util/lock-contention.h
index 1a7248ff3889..a09f7fe877df 100644
--- a/tools/perf/util/lock-contention.h
+++ b/tools/perf/util/lock-contention.h
@@ -10,10 +10,12 @@ struct lock_filter {
int nr_addrs;
int nr_syms;
int nr_cgrps;
+ int nr_slabs;
unsigned int *types;
unsigned long *addrs;
char **syms;
u64 *cgrps;
+ char **slabs;
};
struct lock_stat {
@@ -67,10 +69,11 @@ struct lock_stat {
*/
#define MAX_LOCK_DEPTH 48
-struct lock_stat *lock_stat_find(u64 addr);
-struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags);
+/* based on kernel/lockdep.c */
+#define LOCKHASH_BITS 12
+#define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
-bool match_callstack_filter(struct machine *machine, u64 *callstack);
+extern struct hlist_head *lockhash_table;
/*
* struct lock_seq_stat:
@@ -148,8 +151,17 @@ struct lock_contention {
bool save_callstack;
};
-#ifdef HAVE_BPF_SKEL
+struct option;
+int parse_call_stack(const struct option *opt, const char *str, int unset);
+bool needs_callstack(void);
+struct lock_stat *lock_stat_find(u64 addr);
+struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags);
+
+bool match_callstack_filter(struct machine *machine, u64 *callstack, int max_stack_depth);
+
+
+#ifdef HAVE_BPF_SKEL
int lock_contention_prepare(struct lock_contention *con);
int lock_contention_start(void);
int lock_contention_stop(void);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 27d5345d2b30..2d51badfbf2e 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1003,7 +1003,7 @@ static int machine__get_running_kernel_start(struct machine *machine,
err = kallsyms__get_symbol_start(filename, "_edata", &addr);
if (err)
- err = kallsyms__get_function_start(filename, "_etext", &addr);
+ err = kallsyms__get_symbol_start(filename, "_etext", &addr);
if (!err)
*end = addr;
@@ -1468,6 +1468,8 @@ static int machine__create_modules(struct machine *machine)
if (modules__parse(modules, machine, machine__create_module))
return -1;
+ maps__fixup_end(machine__kernel_maps(machine));
+
if (!machine__set_modules_path(machine))
return 0;
diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c
index 432399cbe5dd..09c9cc326c08 100644
--- a/tools/perf/util/maps.c
+++ b/tools/perf/util/maps.c
@@ -1136,8 +1136,13 @@ struct map *maps__find_next_entry(struct maps *maps, struct map *map)
struct map *result = NULL;
down_read(maps__lock(maps));
+ while (!maps__maps_by_address_sorted(maps)) {
+ up_read(maps__lock(maps));
+ maps__sort_by_address(maps);
+ down_read(maps__lock(maps));
+ }
i = maps__by_address_index(maps, map);
- if (i < maps__nr_maps(maps))
+ if (++i < maps__nr_maps(maps))
result = map__get(maps__maps_by_address(maps)[i]);
up_read(maps__lock(maps));
diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index bf5090f5220b..3692e988c86e 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -258,6 +258,7 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr)
const char *s;
char *copy;
struct perf_cpu_map *cpu_map = NULL;
+ int ret;
while ((pmu = perf_pmus__scan_mem(pmu)) != NULL) {
for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
@@ -283,7 +284,9 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr)
rec_argv[i++] = "-e";
rec_argv[i++] = copy;
- cpu_map = perf_cpu_map__merge(cpu_map, pmu->cpus);
+ ret = perf_cpu_map__merge(&cpu_map, pmu->cpus);
+ if (ret < 0)
+ return ret;
}
}
diff --git a/tools/perf/util/namespaces.c b/tools/perf/util/namespaces.c
index cb185c5659d6..68f5de2d79c7 100644
--- a/tools/perf/util/namespaces.c
+++ b/tools/perf/util/namespaces.c
@@ -266,11 +266,16 @@ pid_t nsinfo__pid(const struct nsinfo *nsi)
return RC_CHK_ACCESS(nsi)->pid;
}
-pid_t nsinfo__in_pidns(const struct nsinfo *nsi)
+bool nsinfo__in_pidns(const struct nsinfo *nsi)
{
return RC_CHK_ACCESS(nsi)->in_pidns;
}
+void nsinfo__set_in_pidns(struct nsinfo *nsi)
+{
+ RC_CHK_ACCESS(nsi)->in_pidns = true;
+}
+
void nsinfo__mountns_enter(struct nsinfo *nsi,
struct nscookie *nc)
{
diff --git a/tools/perf/util/namespaces.h b/tools/perf/util/namespaces.h
index 8c0731c6cbb7..e95c79b80e27 100644
--- a/tools/perf/util/namespaces.h
+++ b/tools/perf/util/namespaces.h
@@ -58,7 +58,8 @@ void nsinfo__clear_need_setns(struct nsinfo *nsi);
pid_t nsinfo__tgid(const struct nsinfo *nsi);
pid_t nsinfo__nstgid(const struct nsinfo *nsi);
pid_t nsinfo__pid(const struct nsinfo *nsi);
-pid_t nsinfo__in_pidns(const struct nsinfo *nsi);
+bool nsinfo__in_pidns(const struct nsinfo *nsi);
+void nsinfo__set_in_pidns(struct nsinfo *nsi);
void nsinfo__mountns_enter(struct nsinfo *nsi, struct nscookie *nc);
void nsinfo__mountns_exit(struct nscookie *nc);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index afeb8d815bbf..1e23faa364b1 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -489,7 +489,6 @@ int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
return found_supported ? 0 : -EINVAL;
}
-#ifdef HAVE_LIBTRACEEVENT
static void tracepoint_error(struct parse_events_error *e, int err,
const char *sys, const char *name, int column)
{
@@ -644,7 +643,6 @@ static int add_tracepoint_multi_sys(struct parse_events_state *parse_state,
closedir(events_dir);
return ret;
}
-#endif /* HAVE_LIBTRACEEVENT */
size_t default_breakpoint_len(void)
{
@@ -795,6 +793,7 @@ const char *parse_events__term_type_str(enum parse_events__term_type term_type)
[PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config",
[PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore",
[PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output",
+ [PARSE_EVENTS__TERM_TYPE_AUX_ACTION] = "aux-action",
[PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size",
[PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id",
[PARSE_EVENTS__TERM_TYPE_RAW] = "raw",
@@ -844,6 +843,7 @@ config_term_avail(enum parse_events__term_type term_type, struct parse_events_er
case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
+ case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
case PARSE_EVENTS__TERM_TYPE_RAW:
case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
@@ -963,6 +963,9 @@ do { \
case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
CHECK_TYPE_VAL(NUM);
break;
+ case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
+ CHECK_TYPE_VAL(STR);
+ break;
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
CHECK_TYPE_VAL(NUM);
if (term->val.num > UINT_MAX) {
@@ -1066,7 +1069,6 @@ static int config_term_pmu(struct perf_event_attr *attr,
return config_term_common(attr, term, err);
}
-#ifdef HAVE_LIBTRACEEVENT
static int config_term_tracepoint(struct perf_event_attr *attr,
struct parse_events_term *term,
struct parse_events_error *err)
@@ -1081,6 +1083,7 @@ static int config_term_tracepoint(struct perf_event_attr *attr,
case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
+ case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
return config_term_common(attr, term, err);
case PARSE_EVENTS__TERM_TYPE_USER:
@@ -1111,7 +1114,6 @@ static int config_term_tracepoint(struct perf_event_attr *attr,
return 0;
}
-#endif
static int config_attr(struct perf_event_attr *attr,
const struct parse_events_terms *head,
@@ -1217,6 +1219,9 @@ do { \
ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output,
term->val.num ? 1 : 0, term->weak);
break;
+ case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
+ ADD_CONFIG_TERM_STR(AUX_ACTION, term->val.str, term->weak);
+ break;
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size,
term->val.num, term->weak);
@@ -1279,6 +1284,7 @@ static int get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head
case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
case PARSE_EVENTS__TERM_TYPE_PERCORE:
case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
+ case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
case PARSE_EVENTS__TERM_TYPE_RAW:
@@ -1303,7 +1309,7 @@ int parse_events_add_tracepoint(struct parse_events_state *parse_state,
struct parse_events_terms *head_config, void *loc_)
{
YYLTYPE *loc = loc_;
-#ifdef HAVE_LIBTRACEEVENT
+
if (head_config) {
struct perf_event_attr attr;
@@ -1318,16 +1324,6 @@ int parse_events_add_tracepoint(struct parse_events_state *parse_state,
else
return add_tracepoint_event(parse_state, list, sys, event,
err, head_config, loc);
-#else
- (void)parse_state;
- (void)list;
- (void)sys;
- (void)event;
- (void)head_config;
- parse_events_error__handle(err, loc->first_column, strdup("unsupported tracepoint"),
- strdup("libtraceevent is necessary for tracepoint support"));
- return -1;
-#endif
}
static int __parse_events_add_numeric(struct parse_events_state *parse_state,
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 3f4334ec6231..e176a34ab088 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -74,6 +74,7 @@ enum parse_events__term_type {
PARSE_EVENTS__TERM_TYPE_DRV_CFG,
PARSE_EVENTS__TERM_TYPE_PERCORE,
PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT,
+ PARSE_EVENTS__TERM_TYPE_AUX_ACTION,
PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE,
PARSE_EVENTS__TERM_TYPE_METRIC_ID,
PARSE_EVENTS__TERM_TYPE_RAW,
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 14e5bd856a18..bf7f73548605 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -321,6 +321,7 @@ overwrite { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_OVERWRITE); }
no-overwrite { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NOOVERWRITE); }
percore { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_PERCORE); }
aux-output { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT); }
+aux-action { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_AUX_ACTION); }
aux-sample-size { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE); }
metric-id { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_METRIC_ID); }
cpu-cycles|cycles { return hw_term(yyscanner, PERF_COUNT_HW_CPU_CYCLES); }
diff --git a/tools/perf/util/path.c b/tools/perf/util/path.c
index 00adf872bf00..2e62f272fda8 100644
--- a/tools/perf/util/path.c
+++ b/tools/perf/util/path.c
@@ -68,14 +68,12 @@ bool is_directory(const char *base_path, const struct dirent *dent)
return S_ISDIR(st.st_mode);
}
-bool is_executable_file(const char *base_path, const struct dirent *dent)
+bool is_directory_at(int dir_fd, const char *path)
{
- char path[PATH_MAX];
struct stat st;
- snprintf(path, sizeof(path), "%s/%s", base_path, dent->d_name);
- if (stat(path, &st))
+ if (fstatat(dir_fd, path, &st, /*flags=*/0))
return false;
- return !S_ISDIR(st.st_mode) && (st.st_mode & S_IXUSR);
+ return S_ISDIR(st.st_mode);
}
diff --git a/tools/perf/util/path.h b/tools/perf/util/path.h
index d94902c22222..fb850fb55c60 100644
--- a/tools/perf/util/path.h
+++ b/tools/perf/util/path.h
@@ -12,6 +12,6 @@ int path__join3(char *bf, size_t size, const char *path1, const char *path2, con
bool is_regular_file(const char *file);
bool is_directory(const char *base_path, const struct dirent *dent);
-bool is_executable_file(const char *base_path, const struct dirent *dent);
+bool is_directory_at(int dir_fd, const char *path);
#endif /* _PERF_PATH_H */
diff --git a/tools/perf/util/perf_event_attr_fprintf.c b/tools/perf/util/perf_event_attr_fprintf.c
index 59fbbba79697..c7f3543b9921 100644
--- a/tools/perf/util/perf_event_attr_fprintf.c
+++ b/tools/perf/util/perf_event_attr_fprintf.c
@@ -212,7 +212,6 @@ static void __p_config_hw_cache_id(char *buf, size_t size, u64 value)
}
}
-#ifdef HAVE_LIBTRACEEVENT
static void __p_config_tracepoint_id(char *buf, size_t size, u64 value)
{
char *str = tracepoint_id_to_name(value);
@@ -220,7 +219,6 @@ static void __p_config_tracepoint_id(char *buf, size_t size, u64 value)
print_id_hex(str);
free(str);
}
-#endif
static void __p_config_id(struct perf_pmu *pmu, char *buf, size_t size, u32 type, u64 value)
{
@@ -238,9 +236,7 @@ static void __p_config_id(struct perf_pmu *pmu, char *buf, size_t size, u32 type
case PERF_TYPE_HW_CACHE:
return __p_config_hw_cache_id(buf, size, value);
case PERF_TYPE_TRACEPOINT:
-#ifdef HAVE_LIBTRACEEVENT
return __p_config_tracepoint_id(buf, size, value);
-#endif
case PERF_TYPE_RAW:
case PERF_TYPE_BREAKPOINT:
default:
@@ -335,6 +331,9 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
PRINT_ATTRf(sample_max_stack, p_unsigned);
PRINT_ATTRf(aux_sample_size, p_unsigned);
PRINT_ATTRf(sig_data, p_unsigned);
+ PRINT_ATTRf(aux_start_paused, p_unsigned);
+ PRINT_ATTRf(aux_pause, p_unsigned);
+ PRINT_ATTRf(aux_resume, p_unsigned);
return ret;
}
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 08a9d0bd9301..6206c8fe2bf9 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -12,6 +12,7 @@
#include <stdbool.h>
#include <dirent.h>
#include <api/fs/fs.h>
+#include <api/io.h>
#include <locale.h>
#include <fnmatch.h>
#include <math.h>
@@ -748,26 +749,35 @@ static int pmu_alias_terms(struct perf_pmu_alias *alias, int err_loc, struct lis
* Uncore PMUs have a "cpumask" file under sysfs. CPU PMUs (e.g. on arm/arm64)
* may have a "cpus" file.
*/
-static struct perf_cpu_map *pmu_cpumask(int dirfd, const char *name, bool is_core)
+static struct perf_cpu_map *pmu_cpumask(int dirfd, const char *pmu_name, bool is_core)
{
- struct perf_cpu_map *cpus;
const char *templates[] = {
"cpumask",
"cpus",
NULL
};
const char **template;
- char pmu_name[PATH_MAX];
- struct perf_pmu pmu = {.name = pmu_name};
- FILE *file;
- strlcpy(pmu_name, name, sizeof(pmu_name));
for (template = templates; *template; template++) {
- file = perf_pmu__open_file_at(&pmu, dirfd, *template);
- if (!file)
+ struct io io;
+ char buf[128];
+ char *cpumask = NULL;
+ size_t cpumask_len;
+ ssize_t ret;
+ struct perf_cpu_map *cpus;
+
+ io.fd = perf_pmu__pathname_fd(dirfd, pmu_name, *template, O_RDONLY);
+ if (io.fd < 0)
continue;
- cpus = perf_cpu_map__read(file);
- fclose(file);
+
+ io__init(&io, io.fd, buf, sizeof(buf));
+ ret = io__getline(&io, &cpumask, &cpumask_len);
+ close(io.fd);
+ if (ret < 0)
+ continue;
+
+ cpus = perf_cpu_map__new(cpumask);
+ free(cpumask);
if (cpus)
return cpus;
}
@@ -1763,6 +1773,7 @@ int perf_pmu__for_each_format(struct perf_pmu *pmu, void *state, pmu_format_call
"no-overwrite",
"percore",
"aux-output",
+ "aux-action=(pause|resume|start-paused)",
"aux-sample-size=number",
};
struct perf_pmu_format *format;
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index eaa0318e9b87..307ad6242a4e 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -1383,20 +1383,20 @@ int parse_line_range_desc(const char *arg, struct line_range *lr)
if (p == buf) {
semantic_error("No file/function name in '%s'.\n", p);
err = -EINVAL;
- goto err;
+ goto out;
}
*(p++) = '\0';
err = parse_line_num(&p, &lr->start, "start line");
if (err)
- goto err;
+ goto out;
if (*p == '+' || *p == '-') {
const char c = *(p++);
err = parse_line_num(&p, &lr->end, "end line");
if (err)
- goto err;
+ goto out;
if (c == '+') {
lr->end += lr->start;
@@ -1416,11 +1416,11 @@ int parse_line_range_desc(const char *arg, struct line_range *lr)
if (lr->start > lr->end) {
semantic_error("Start line must be smaller"
" than end line.\n");
- goto err;
+ goto out;
}
if (*p != '\0') {
semantic_error("Tailing with invalid str '%s'.\n", p);
- goto err;
+ goto out;
}
}
@@ -1431,7 +1431,7 @@ int parse_line_range_desc(const char *arg, struct line_range *lr)
lr->file = strdup_esq(p);
if (lr->file == NULL) {
err = -ENOMEM;
- goto err;
+ goto out;
}
}
if (*buf != '\0')
@@ -1439,7 +1439,7 @@ int parse_line_range_desc(const char *arg, struct line_range *lr)
if (!lr->function && !lr->file) {
semantic_error("Only '@*' is not allowed.\n");
err = -EINVAL;
- goto err;
+ goto out;
}
} else if (strpbrk_esq(buf, "/."))
lr->file = strdup_esq(buf);
@@ -1448,10 +1448,10 @@ int parse_line_range_desc(const char *arg, struct line_range *lr)
else { /* Invalid name */
semantic_error("'%s' is not a valid function name.\n", buf);
err = -EINVAL;
- goto err;
+ goto out;
}
-err:
+out:
free(buf);
return err;
}
@@ -2775,7 +2775,7 @@ int show_perf_probe_events(struct strfilter *filter)
static int get_new_event_name(char *buf, size_t len, const char *base,
struct strlist *namelist, bool ret_event,
- bool allow_suffix)
+ bool allow_suffix, bool not_C_symname)
{
int i, ret;
char *p, *nbase;
@@ -2786,10 +2786,24 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
if (!nbase)
return -ENOMEM;
- /* Cut off the dot suffixes (e.g. .const, .isra) and version suffixes */
- p = strpbrk(nbase, ".@");
- if (p && p != nbase)
- *p = '\0';
+ if (not_C_symname) {
+ /* Replace non-alnum with '_' */
+ char *s, *d;
+
+ s = d = nbase;
+ do {
+ if (*s && !isalnum(*s)) {
+ if (d != nbase && *(d - 1) != '_')
+ *d++ = '_';
+ } else
+ *d++ = *s;
+ } while (*s++);
+ } else {
+ /* Cut off the dot suffixes (e.g. .const, .isra) and version suffixes */
+ p = strpbrk(nbase, ".@");
+ if (p && p != nbase)
+ *p = '\0';
+ }
/* Try no suffix number */
ret = e_snprintf(buf, len, "%s%s", nbase, ret_event ? "__return" : "");
@@ -2884,6 +2898,7 @@ static int probe_trace_event__set_name(struct probe_trace_event *tev,
bool allow_suffix)
{
const char *event, *group;
+ bool not_C_symname = true;
char buf[MAX_EVENT_NAME_LEN];
int ret;
@@ -2898,8 +2913,10 @@ static int probe_trace_event__set_name(struct probe_trace_event *tev,
(strncmp(pev->point.function, "0x", 2) != 0) &&
!strisglob(pev->point.function))
event = pev->point.function;
- else
+ else {
event = tev->point.realname;
+ not_C_symname = !is_known_C_lang(tev->lang);
+ }
}
if (pev->group && !pev->sdt)
group = pev->group;
@@ -2916,7 +2933,8 @@ static int probe_trace_event__set_name(struct probe_trace_event *tev,
/* Get an unused new event name */
ret = get_new_event_name(buf, sizeof(buf), event, namelist,
- tev->point.retprobe, allow_suffix);
+ tev->point.retprobe, allow_suffix,
+ not_C_symname);
if (ret < 0)
return ret;
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index 61a5f4ff4e9c..71905ede0207 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -58,6 +58,7 @@ struct probe_trace_event {
char *group; /* Group name */
struct probe_trace_point point; /* Trace point */
int nargs; /* Number of args */
+ int lang; /* Dwarf language code */
bool uprobes; /* uprobes only */
struct probe_trace_arg *args; /* Arguments */
};
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 7f2ee0cb43ca..1e769b68da37 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -35,6 +35,19 @@
/* Kprobe tracer basic type is up to u64 */
#define MAX_BASIC_TYPE_BITS 64
+bool is_known_C_lang(int lang)
+{
+ switch (lang) {
+ case DW_LANG_C89:
+ case DW_LANG_C:
+ case DW_LANG_C99:
+ case DW_LANG_C11:
+ return true;
+ default:
+ return false;
+ }
+}
+
/*
* Probe finder related functions
*/
@@ -1270,6 +1283,8 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
goto end;
}
+ tev->lang = dwarf_srclang(dwarf_diecu(sc_die, &pf->cu_die, NULL, NULL));
+
pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
tev->point.offset);
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index be7b46ea2460..dcf6cc1e1cbe 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -26,6 +26,9 @@ static inline int is_c_varname(const char *name)
#include "dwarf-aux.h"
#include "debuginfo.h"
+/* Check the language code is known C */
+bool is_known_C_lang(int lang);
+
/* Find probe_trace_events specified by perf_probe_event from debuginfo */
int debuginfo__find_trace_events(struct debuginfo *dbg,
struct perf_probe_event *pev,
@@ -103,6 +106,8 @@ struct line_finder {
int found;
};
+#else
+#define is_known_C_lang(lang) (false)
#endif /* HAVE_LIBDW_SUPPORT */
#endif /*_PROBE_FINDER_H */
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 2096cdbaa53b..b4bc57859f73 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -13,30 +13,12 @@
#include "evsel.h"
#include "event.h"
#include "print_binary.h"
+#include "strbuf.h"
#include "thread_map.h"
#include "trace-event.h"
#include "mmap.h"
-#include "util/bpf-filter.h"
-#include "util/env.h"
-#include "util/kvm-stat.h"
-#include "util/stat.h"
-#include "util/kwork.h"
#include "util/sample.h"
-#include "util/lock-contention.h"
#include <internal/lib.h>
-#include "../builtin.h"
-
-#if PY_MAJOR_VERSION < 3
-#define _PyUnicode_FromString(arg) \
- PyString_FromString(arg)
-#define _PyUnicode_AsString(arg) \
- PyString_AsString(arg)
-#define _PyUnicode_FromFormat(...) \
- PyString_FromFormat(__VA_ARGS__)
-#define _PyLong_FromLong(arg) \
- PyInt_FromLong(arg)
-
-#else
#define _PyUnicode_FromString(arg) \
PyUnicode_FromString(arg)
@@ -44,22 +26,8 @@
PyUnicode_FromFormat(__VA_ARGS__)
#define _PyLong_FromLong(arg) \
PyLong_FromLong(arg)
-#endif
-
-#ifndef Py_TYPE
-#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
-#endif
-
-/* Define PyVarObject_HEAD_INIT for python 2.5 */
-#ifndef PyVarObject_HEAD_INIT
-# define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
-#endif
-#if PY_MAJOR_VERSION < 3
-PyMODINIT_FUNC initperf(void);
-#else
PyMODINIT_FUNC PyInit_perf(void);
-#endif
#define member_def(type, member, ptype, help) \
{ #member, ptype, \
@@ -89,7 +57,7 @@ struct pyrf_event {
sample_member_def(sample_period, period, T_ULONGLONG, "event period"), \
sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"),
-static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
+static const char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
static PyMemberDef pyrf_mmap_event__members[] = {
sample_members
@@ -104,7 +72,7 @@ static PyMemberDef pyrf_mmap_event__members[] = {
{ .name = NULL, },
};
-static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent)
+static PyObject *pyrf_mmap_event__repr(const struct pyrf_event *pevent)
{
PyObject *ret;
char *s;
@@ -117,7 +85,7 @@ static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent)
pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) {
ret = PyErr_NoMemory();
} else {
- ret = _PyUnicode_FromString(s);
+ ret = PyUnicode_FromString(s);
free(s);
}
return ret;
@@ -133,7 +101,7 @@ static PyTypeObject pyrf_mmap_event__type = {
.tp_repr = (reprfunc)pyrf_mmap_event__repr,
};
-static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object.");
+static const char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object.");
static PyMemberDef pyrf_task_event__members[] = {
sample_members
@@ -146,9 +114,9 @@ static PyMemberDef pyrf_task_event__members[] = {
{ .name = NULL, },
};
-static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent)
+static PyObject *pyrf_task_event__repr(const struct pyrf_event *pevent)
{
- return _PyUnicode_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, "
+ return PyUnicode_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, "
"ptid: %u, time: %" PRI_lu64 "}",
pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit",
pevent->event.fork.pid,
@@ -168,7 +136,7 @@ static PyTypeObject pyrf_task_event__type = {
.tp_repr = (reprfunc)pyrf_task_event__repr,
};
-static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object.");
+static const char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object.");
static PyMemberDef pyrf_comm_event__members[] = {
sample_members
@@ -179,9 +147,9 @@ static PyMemberDef pyrf_comm_event__members[] = {
{ .name = NULL, },
};
-static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent)
+static PyObject *pyrf_comm_event__repr(const struct pyrf_event *pevent)
{
- return _PyUnicode_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }",
+ return PyUnicode_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }",
pevent->event.comm.pid,
pevent->event.comm.tid,
pevent->event.comm.comm);
@@ -197,7 +165,7 @@ static PyTypeObject pyrf_comm_event__type = {
.tp_repr = (reprfunc)pyrf_comm_event__repr,
};
-static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object.");
+static const char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object.");
static PyMemberDef pyrf_throttle_event__members[] = {
sample_members
@@ -208,11 +176,12 @@ static PyMemberDef pyrf_throttle_event__members[] = {
{ .name = NULL, },
};
-static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent)
+static PyObject *pyrf_throttle_event__repr(const struct pyrf_event *pevent)
{
- struct perf_record_throttle *te = (struct perf_record_throttle *)(&pevent->event.header + 1);
+ const struct perf_record_throttle *te = (const struct perf_record_throttle *)
+ (&pevent->event.header + 1);
- return _PyUnicode_FromFormat("{ type: %sthrottle, time: %" PRI_lu64 ", id: %" PRI_lu64
+ return PyUnicode_FromFormat("{ type: %sthrottle, time: %" PRI_lu64 ", id: %" PRI_lu64
", stream_id: %" PRI_lu64 " }",
pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un",
te->time, te->id, te->stream_id);
@@ -228,7 +197,7 @@ static PyTypeObject pyrf_throttle_event__type = {
.tp_repr = (reprfunc)pyrf_throttle_event__repr,
};
-static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object.");
+static const char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object.");
static PyMemberDef pyrf_lost_event__members[] = {
sample_members
@@ -237,7 +206,7 @@ static PyMemberDef pyrf_lost_event__members[] = {
{ .name = NULL, },
};
-static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent)
+static PyObject *pyrf_lost_event__repr(const struct pyrf_event *pevent)
{
PyObject *ret;
char *s;
@@ -247,7 +216,7 @@ static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent)
pevent->event.lost.id, pevent->event.lost.lost) < 0) {
ret = PyErr_NoMemory();
} else {
- ret = _PyUnicode_FromString(s);
+ ret = PyUnicode_FromString(s);
free(s);
}
return ret;
@@ -263,7 +232,7 @@ static PyTypeObject pyrf_lost_event__type = {
.tp_repr = (reprfunc)pyrf_lost_event__repr,
};
-static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object.");
+static const char pyrf_read_event__doc[] = PyDoc_STR("perf read event object.");
static PyMemberDef pyrf_read_event__members[] = {
sample_members
@@ -272,9 +241,9 @@ static PyMemberDef pyrf_read_event__members[] = {
{ .name = NULL, },
};
-static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent)
+static PyObject *pyrf_read_event__repr(const struct pyrf_event *pevent)
{
- return _PyUnicode_FromFormat("{ type: read, pid: %u, tid: %u }",
+ return PyUnicode_FromFormat("{ type: read, pid: %u, tid: %u }",
pevent->event.read.pid,
pevent->event.read.tid);
/*
@@ -293,7 +262,7 @@ static PyTypeObject pyrf_read_event__type = {
.tp_repr = (reprfunc)pyrf_read_event__repr,
};
-static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object.");
+static const char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object.");
static PyMemberDef pyrf_sample_event__members[] = {
sample_members
@@ -301,7 +270,7 @@ static PyMemberDef pyrf_sample_event__members[] = {
{ .name = NULL, },
};
-static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent)
+static PyObject *pyrf_sample_event__repr(const struct pyrf_event *pevent)
{
PyObject *ret;
char *s;
@@ -309,20 +278,20 @@ static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent)
if (asprintf(&s, "{ type: sample }") < 0) {
ret = PyErr_NoMemory();
} else {
- ret = _PyUnicode_FromString(s);
+ ret = PyUnicode_FromString(s);
free(s);
}
return ret;
}
#ifdef HAVE_LIBTRACEEVENT
-static bool is_tracepoint(struct pyrf_event *pevent)
+static bool is_tracepoint(const struct pyrf_event *pevent)
{
return pevent->evsel->core.attr.type == PERF_TYPE_TRACEPOINT;
}
static PyObject*
-tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field)
+tracepoint_field(const struct pyrf_event *pe, struct tep_format_field *field)
{
struct tep_handle *pevent = field->event->tep;
void *data = pe->sample.raw_data;
@@ -343,7 +312,7 @@ tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field)
}
if (field->flags & TEP_FIELD_IS_STRING &&
is_printable_array(data + offset, len)) {
- ret = _PyUnicode_FromString((char *)data + offset);
+ ret = PyUnicode_FromString((char *)data + offset);
} else {
ret = PyByteArray_FromStringAndSize((const char *) data + offset, len);
field->flags &= ~TEP_FIELD_IS_STRING;
@@ -411,7 +380,7 @@ static PyTypeObject pyrf_sample_event__type = {
.tp_getattro = (getattrofunc) pyrf_sample_event__getattro,
};
-static char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object.");
+static const char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object.");
static PyMemberDef pyrf_context_switch_event__members[] = {
sample_members
@@ -421,7 +390,7 @@ static PyMemberDef pyrf_context_switch_event__members[] = {
{ .name = NULL, },
};
-static PyObject *pyrf_context_switch_event__repr(struct pyrf_event *pevent)
+static PyObject *pyrf_context_switch_event__repr(const struct pyrf_event *pevent)
{
PyObject *ret;
char *s;
@@ -432,7 +401,7 @@ static PyObject *pyrf_context_switch_event__repr(struct pyrf_event *pevent)
!!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) {
ret = PyErr_NoMemory();
} else {
- ret = _PyUnicode_FromString(s);
+ ret = PyUnicode_FromString(s);
free(s);
}
return ret;
@@ -501,7 +470,7 @@ static PyTypeObject *pyrf_event__type[] = {
[PERF_RECORD_SWITCH_CPU_WIDE] = &pyrf_context_switch_event__type,
};
-static PyObject *pyrf_event__new(union perf_event *event)
+static PyObject *pyrf_event__new(const union perf_event *event)
{
struct pyrf_event *pevent;
PyTypeObject *ptype;
@@ -569,7 +538,7 @@ static PySequenceMethods pyrf_cpu_map__sequence_methods = {
.sq_item = pyrf_cpu_map__item,
};
-static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object.");
+static const char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object.");
static PyTypeObject pyrf_cpu_map__type = {
PyVarObject_HEAD_INIT(NULL, 0)
@@ -638,7 +607,7 @@ static PySequenceMethods pyrf_thread_map__sequence_methods = {
.sq_item = pyrf_thread_map__item,
};
-static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object.");
+static const char pyrf_thread_map__doc[] = PyDoc_STR("thread map object.");
static PyTypeObject pyrf_thread_map__type = {
PyVarObject_HEAD_INIT(NULL, 0)
@@ -812,6 +781,17 @@ static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
return Py_None;
}
+static PyObject *pyrf_evsel__str(PyObject *self)
+{
+ struct pyrf_evsel *pevsel = (void *)self;
+ struct evsel *evsel = &pevsel->evsel;
+
+ if (!evsel->pmu)
+ return PyUnicode_FromFormat("evsel(%s)", evsel__name(evsel));
+
+ return PyUnicode_FromFormat("evsel(%s/%s/)", evsel->pmu->name, evsel__name(evsel));
+}
+
static PyMethodDef pyrf_evsel__methods[] = {
{
.ml_name = "open",
@@ -822,7 +802,7 @@ static PyMethodDef pyrf_evsel__methods[] = {
{ .ml_name = NULL, }
};
-static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object.");
+static const char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object.");
static PyTypeObject pyrf_evsel__type = {
PyVarObject_HEAD_INIT(NULL, 0)
@@ -833,6 +813,8 @@ static PyTypeObject pyrf_evsel__type = {
.tp_doc = pyrf_evsel__doc,
.tp_methods = pyrf_evsel__methods,
.tp_init = (initproc)pyrf_evsel__init,
+ .tp_str = pyrf_evsel__str,
+ .tp_repr = pyrf_evsel__str,
};
static int pyrf_evsel__setup_types(void)
@@ -918,17 +900,8 @@ static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
for (i = 0; i < evlist->core.pollfd.nr; ++i) {
PyObject *file;
-#if PY_MAJOR_VERSION < 3
- FILE *fp = fdopen(evlist->core.pollfd.entries[i].fd, "r");
-
- if (fp == NULL)
- goto free_list;
-
- file = PyFile_FromFile(fp, "perf", "r", NULL);
-#else
file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1,
NULL, NULL, NULL, 0);
-#endif
if (file == NULL)
goto free_list;
@@ -1098,8 +1071,10 @@ static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i)
struct pyrf_evlist *pevlist = (void *)obj;
struct evsel *pos;
- if (i >= pevlist->evlist.core.nr_entries)
+ if (i >= pevlist->evlist.core.nr_entries) {
+ PyErr_SetString(PyExc_IndexError, "Index out of range");
return NULL;
+ }
evlist__for_each_entry(&pevlist->evlist, pos) {
if (i-- == 0)
@@ -1109,12 +1084,36 @@ static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i)
return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel));
}
+static PyObject *pyrf_evlist__str(PyObject *self)
+{
+ struct pyrf_evlist *pevlist = (void *)self;
+ struct evsel *pos;
+ struct strbuf sb = STRBUF_INIT;
+ bool first = true;
+ PyObject *result;
+
+ strbuf_addstr(&sb, "evlist([");
+ evlist__for_each_entry(&pevlist->evlist, pos) {
+ if (!first)
+ strbuf_addch(&sb, ',');
+ if (!pos->pmu)
+ strbuf_addstr(&sb, evsel__name(pos));
+ else
+ strbuf_addf(&sb, "%s/%s/", pos->pmu->name, evsel__name(pos));
+ first = false;
+ }
+ strbuf_addstr(&sb, "])");
+ result = PyUnicode_FromString(sb.buf);
+ strbuf_release(&sb);
+ return result;
+}
+
static PySequenceMethods pyrf_evlist__sequence_methods = {
.sq_length = pyrf_evlist__length,
.sq_item = pyrf_evlist__item,
};
-static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object.");
+static const char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object.");
static PyTypeObject pyrf_evlist__type = {
PyVarObject_HEAD_INIT(NULL, 0)
@@ -1126,6 +1125,8 @@ static PyTypeObject pyrf_evlist__type = {
.tp_doc = pyrf_evlist__doc,
.tp_methods = pyrf_evlist__methods,
.tp_init = (initproc)pyrf_evlist__init,
+ .tp_repr = pyrf_evlist__str,
+ .tp_str = pyrf_evlist__str,
};
static int pyrf_evlist__setup_types(void)
@@ -1136,10 +1137,12 @@ static int pyrf_evlist__setup_types(void)
#define PERF_CONST(name) { #name, PERF_##name }
-static struct {
+struct perf_constant {
const char *name;
int value;
-} perf__constants[] = {
+};
+
+static const struct perf_constant perf__constants[] = {
PERF_CONST(TYPE_HARDWARE),
PERF_CONST(TYPE_SOFTWARE),
PERF_CONST(TYPE_TRACEPOINT),
@@ -1234,12 +1237,66 @@ static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel,
tp_format = trace_event__tp_format(sys, name);
if (IS_ERR(tp_format))
- return _PyLong_FromLong(-1);
+ return PyLong_FromLong(-1);
- return _PyLong_FromLong(tp_format->id);
+ return PyLong_FromLong(tp_format->id);
#endif // HAVE_LIBTRACEEVENT
}
+static PyObject *pyrf_evsel__from_evsel(struct evsel *evsel)
+{
+ struct pyrf_evsel *pevsel = PyObject_New(struct pyrf_evsel, &pyrf_evsel__type);
+
+ if (!pevsel)
+ return NULL;
+
+ memset(&pevsel->evsel, 0, sizeof(pevsel->evsel));
+ evsel__init(&pevsel->evsel, &evsel->core.attr, evsel->core.idx);
+
+ evsel__clone(&pevsel->evsel, evsel);
+ return (PyObject *)pevsel;
+}
+
+static PyObject *pyrf_evlist__from_evlist(struct evlist *evlist)
+{
+ struct pyrf_evlist *pevlist = PyObject_New(struct pyrf_evlist, &pyrf_evlist__type);
+ struct evsel *pos;
+
+ if (!pevlist)
+ return NULL;
+
+ memset(&pevlist->evlist, 0, sizeof(pevlist->evlist));
+ evlist__init(&pevlist->evlist, evlist->core.all_cpus, evlist->core.threads);
+ evlist__for_each_entry(evlist, pos) {
+ struct pyrf_evsel *pevsel = (void *)pyrf_evsel__from_evsel(pos);
+
+ evlist__add(&pevlist->evlist, &pevsel->evsel);
+ }
+ return (PyObject *)pevlist;
+}
+
+static PyObject *pyrf__parse_events(PyObject *self, PyObject *args)
+{
+ const char *input;
+ struct evlist evlist = {};
+ struct parse_events_error err;
+ PyObject *result;
+
+ if (!PyArg_ParseTuple(args, "s", &input))
+ return NULL;
+
+ parse_events_error__init(&err);
+ evlist__init(&evlist, NULL, NULL);
+ if (parse_events(&evlist, input, &err)) {
+ parse_events_error__print(&err, input);
+ PyErr_SetFromErrno(PyExc_OSError);
+ return NULL;
+ }
+ result = pyrf_evlist__from_evlist(&evlist);
+ evlist__exit(&evlist);
+ return result;
+}
+
static PyMethodDef perf__methods[] = {
{
.ml_name = "tracepoint",
@@ -1247,21 +1304,20 @@ static PyMethodDef perf__methods[] = {
.ml_flags = METH_VARARGS | METH_KEYWORDS,
.ml_doc = PyDoc_STR("Get tracepoint config.")
},
+ {
+ .ml_name = "parse_events",
+ .ml_meth = (PyCFunction) pyrf__parse_events,
+ .ml_flags = METH_VARARGS,
+ .ml_doc = PyDoc_STR("Parse a string of events and return an evlist.")
+ },
{ .ml_name = NULL, }
};
-#if PY_MAJOR_VERSION < 3
-PyMODINIT_FUNC initperf(void)
-#else
PyMODINIT_FUNC PyInit_perf(void)
-#endif
{
PyObject *obj;
int i;
PyObject *dict;
-#if PY_MAJOR_VERSION < 3
- PyObject *module = Py_InitModule("perf", perf__methods);
-#else
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"perf", /* m_name */
@@ -1274,7 +1330,6 @@ PyMODINIT_FUNC PyInit_perf(void)
NULL, /* m_free */
};
PyObject *module = PyModule_Create(&moduledef);
-#endif
if (module == NULL ||
pyrf_event__setup_types() < 0 ||
@@ -1282,11 +1337,7 @@ PyMODINIT_FUNC PyInit_perf(void)
pyrf_evsel__setup_types() < 0 ||
pyrf_thread_map__setup_types() < 0 ||
pyrf_cpu_map__setup_types() < 0)
-#if PY_MAJOR_VERSION < 3
- return;
-#else
return module;
-#endif
/* The page_size is placed in util object. */
page_size = sysconf(_SC_PAGE_SIZE);
@@ -1335,7 +1386,7 @@ PyMODINIT_FUNC PyInit_perf(void)
goto error;
for (i = 0; perf__constants[i].name != NULL; i++) {
- obj = _PyLong_FromLong(perf__constants[i].value);
+ obj = PyLong_FromLong(perf__constants[i].value);
if (obj == NULL)
goto error;
PyDict_SetItemString(dict, perf__constants[i].name, obj);
@@ -1345,109 +1396,5 @@ PyMODINIT_FUNC PyInit_perf(void)
error:
if (PyErr_Occurred())
PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
-#if PY_MAJOR_VERSION >= 3
return module;
-#endif
-}
-
-
-/* The following are stubs to avoid dragging in builtin-* objects. */
-/* TODO: move the code out of the builtin-* file into util. */
-
-unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
-
-#ifdef HAVE_KVM_STAT_SUPPORT
-bool kvm_entry_event(struct evsel *evsel __maybe_unused)
-{
- return false;
-}
-
-bool kvm_exit_event(struct evsel *evsel __maybe_unused)
-{
- return false;
-}
-
-bool exit_event_begin(struct evsel *evsel __maybe_unused,
- struct perf_sample *sample __maybe_unused,
- struct event_key *key __maybe_unused)
-{
- return false;
-}
-
-bool exit_event_end(struct evsel *evsel __maybe_unused,
- struct perf_sample *sample __maybe_unused,
- struct event_key *key __maybe_unused)
-{
- return false;
-}
-
-void exit_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
- struct event_key *key __maybe_unused,
- char *decode __maybe_unused)
-{
-}
-#endif // HAVE_KVM_STAT_SUPPORT
-
-int find_scripts(char **scripts_array __maybe_unused, char **scripts_path_array __maybe_unused,
- int num __maybe_unused, int pathlen __maybe_unused)
-{
- return -1;
-}
-
-void perf_stat__set_no_csv_summary(int set __maybe_unused)
-{
-}
-
-void perf_stat__set_big_num(int set __maybe_unused)
-{
-}
-
-int script_spec_register(const char *spec __maybe_unused, struct scripting_ops *ops __maybe_unused)
-{
- return -1;
-}
-
-arch_syscalls__strerrno_t *arch_syscalls__strerrno_function(const char *arch __maybe_unused)
-{
- return NULL;
-}
-
-struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork __maybe_unused,
- struct kwork_class *class __maybe_unused,
- struct kwork_work *key __maybe_unused)
-{
- return NULL;
-}
-
-void script_fetch_insn(struct perf_sample *sample __maybe_unused,
- struct thread *thread __maybe_unused,
- struct machine *machine __maybe_unused)
-{
-}
-
-int perf_sample__sprintf_flags(u32 flags __maybe_unused, char *str __maybe_unused,
- size_t sz __maybe_unused)
-{
- return -1;
-}
-
-bool match_callstack_filter(struct machine *machine __maybe_unused, u64 *callstack __maybe_unused)
-{
- return false;
-}
-
-struct lock_stat *lock_stat_find(u64 addr __maybe_unused)
-{
- return NULL;
-}
-
-struct lock_stat *lock_stat_findnew(u64 addr __maybe_unused, const char *name __maybe_unused,
- int flags __maybe_unused)
-{
- return NULL;
-}
-
-int cmd_inject(int argc __maybe_unused, const char *argv[] __maybe_unused)
-{
- return -1;
}
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 85b7f188f729..e261a57b87d4 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -344,7 +344,7 @@ static void perl_process_tracepoint(struct perf_sample *sample,
struct addr_location *al)
{
struct thread *thread = al->thread;
- struct tep_event *event = evsel->tp_format;
+ struct tep_event *event;
struct tep_format_field *field;
static char handler[256];
unsigned long long val;
@@ -362,6 +362,7 @@ static void perl_process_tracepoint(struct perf_sample *sample,
if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
return;
+ event = evsel__tp_format(evsel);
if (!event) {
pr_debug("ug! no event found for type %" PRIu64, (u64)evsel->core.attr.config);
return;
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 8bdae066e839..b1b5e94537e4 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -58,22 +58,6 @@
#include "mem-events.h"
#include "util/perf_regs.h"
-#if PY_MAJOR_VERSION < 3
-#define _PyUnicode_FromString(arg) \
- PyString_FromString(arg)
-#define _PyUnicode_FromStringAndSize(arg1, arg2) \
- PyString_FromStringAndSize((arg1), (arg2))
-#define _PyBytes_FromStringAndSize(arg1, arg2) \
- PyString_FromStringAndSize((arg1), (arg2))
-#define _PyLong_FromLong(arg) \
- PyInt_FromLong(arg)
-#define _PyLong_AsLong(arg) \
- PyInt_AsLong(arg)
-#define _PyCapsule_New(arg1, arg2, arg3) \
- PyCObject_FromVoidPtr((arg1), (arg2))
-
-PyMODINIT_FUNC initperf_trace_context(void);
-#else
#define _PyUnicode_FromString(arg) \
PyUnicode_FromString(arg)
#define _PyUnicode_FromStringAndSize(arg1, arg2) \
@@ -88,7 +72,6 @@ PyMODINIT_FUNC initperf_trace_context(void);
PyCapsule_New((arg1), (arg2), (arg3))
PyMODINIT_FUNC PyInit_perf_trace_context(void);
-#endif
#ifdef HAVE_LIBTRACEEVENT
#define TRACE_EVENT_TYPE_MAX \
@@ -181,17 +164,7 @@ static int get_argument_count(PyObject *handler)
{
int arg_count = 0;
- /*
- * The attribute for the code object is func_code in Python 2,
- * whereas it is __code__ in Python 3.0+.
- */
- PyObject *code_obj = PyObject_GetAttrString(handler,
- "func_code");
- if (PyErr_Occurred()) {
- PyErr_Clear();
- code_obj = PyObject_GetAttrString(handler,
- "__code__");
- }
+ PyObject *code_obj = code_obj = PyObject_GetAttrString(handler, "__code__");
PyErr_Clear();
if (code_obj) {
PyObject *arg_count_obj = PyObject_GetAttrString(code_obj,
@@ -949,7 +922,7 @@ static void python_process_tracepoint(struct perf_sample *sample,
struct addr_location *al,
struct addr_location *addr_al)
{
- struct tep_event *event = evsel->tp_format;
+ struct tep_event *event;
PyObject *handler, *context, *t, *obj = NULL, *callchain;
PyObject *dict = NULL, *all_entries_dict = NULL;
static char handler_name[256];
@@ -966,6 +939,7 @@ static void python_process_tracepoint(struct perf_sample *sample,
bitmap_zero(events_defined, TRACE_EVENT_TYPE_MAX);
+ event = evsel__tp_format(evsel);
if (!event) {
snprintf(handler_name, sizeof(handler_name),
"ug! no event found for type %" PRIu64, (u64)evsel->core.attr.config);
@@ -1902,12 +1876,6 @@ static void set_table_handlers(struct tables *tables)
tables->synth_handler = get_handler("synth_data");
}
-#if PY_MAJOR_VERSION < 3
-static void _free_command_line(const char **command_line, int num)
-{
- free(command_line);
-}
-#else
static void _free_command_line(wchar_t **command_line, int num)
{
int i;
@@ -1915,7 +1883,6 @@ static void _free_command_line(wchar_t **command_line, int num)
PyMem_RawFree(command_line[i]);
free(command_line);
}
-#endif
/*
@@ -1925,30 +1892,12 @@ static int python_start_script(const char *script, int argc, const char **argv,
struct perf_session *session)
{
struct tables *tables = &tables_global;
-#if PY_MAJOR_VERSION < 3
- const char **command_line;
-#else
wchar_t **command_line;
-#endif
- /*
- * Use a non-const name variable to cope with python 2.6's
- * PyImport_AppendInittab prototype
- */
- char buf[PATH_MAX], name[19] = "perf_trace_context";
+ char buf[PATH_MAX];
int i, err = 0;
FILE *fp;
scripting_context->session = session;
-#if PY_MAJOR_VERSION < 3
- command_line = malloc((argc + 1) * sizeof(const char *));
- if (!command_line)
- return -1;
-
- command_line[0] = script;
- for (i = 1; i < argc + 1; i++)
- command_line[i] = argv[i - 1];
- PyImport_AppendInittab(name, initperf_trace_context);
-#else
command_line = malloc((argc + 1) * sizeof(wchar_t *));
if (!command_line)
return -1;
@@ -1956,15 +1905,10 @@ static int python_start_script(const char *script, int argc, const char **argv,
command_line[0] = Py_DecodeLocale(script, NULL);
for (i = 1; i < argc + 1; i++)
command_line[i] = Py_DecodeLocale(argv[i - 1], NULL);
- PyImport_AppendInittab(name, PyInit_perf_trace_context);
-#endif
+ PyImport_AppendInittab("perf_trace_context", PyInit_perf_trace_context);
Py_Initialize();
-#if PY_MAJOR_VERSION < 3
- PySys_SetArgv(argc + 1, (char **)command_line);
-#else
PySys_SetArgv(argc + 1, command_line);
-#endif
fp = fopen(script, "r");
if (!fp) {
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 507e6cba9545..c06e3020a976 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -37,6 +37,7 @@
#include "arch/common.h"
#include "units.h"
#include "annotate.h"
+#include "perf.h"
#include <internal/lib.h>
static int perf_session__deliver_event(struct perf_session *session,
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 9dd60c7869a2..3dd33721823f 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -1038,17 +1038,19 @@ static char *get_trace_output(struct hist_entry *he)
.data = he->raw_data,
.size = he->raw_size,
};
+ struct tep_event *tp_format;
evsel = hists_to_evsel(he->hists);
trace_seq_init(&seq);
- if (symbol_conf.raw_trace) {
- tep_print_fields(&seq, he->raw_data, he->raw_size,
- evsel->tp_format);
- } else {
- tep_print_event(evsel->tp_format->tep,
- &seq, &rec, "%s", TEP_PRINT_INFO);
+ tp_format = evsel__tp_format(evsel);
+ if (tp_format) {
+ if (symbol_conf.raw_trace)
+ tep_print_fields(&seq, he->raw_data, he->raw_size, tp_format);
+ else
+ tep_print_event(tp_format->tep, &seq, &rec, "%s", TEP_PRINT_INFO);
}
+
/*
* Trim the buffer, it starts at 4KB and we're not going to
* add anything more to this buffer.
@@ -3293,9 +3295,8 @@ static int __dynamic_dimension__add(struct evsel *evsel,
static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level)
{
int ret;
- struct tep_format_field *field;
-
- field = evsel->tp_format->format.fields;
+ struct tep_event *tp_format = evsel__tp_format(evsel);
+ struct tep_format_field *field = tp_format ? tp_format->format.fields : NULL;
while (field) {
ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
if (ret < 0)
@@ -3328,13 +3329,19 @@ static int add_all_matching_fields(struct evlist *evlist,
{
int ret = -ESRCH;
struct evsel *evsel;
- struct tep_format_field *field;
evlist__for_each_entry(evlist, evsel) {
+ struct tep_event *tp_format;
+ struct tep_format_field *field;
+
if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
continue;
- field = tep_find_any_field(evsel->tp_format, field_name);
+ tp_format = evsel__tp_format(evsel);
+ if (tp_format == NULL)
+ continue;
+
+ field = tep_find_any_field(tp_format, field_name);
if (field == NULL)
continue;
@@ -3416,7 +3423,9 @@ static int add_dynamic_entry(struct evlist *evlist, const char *tok,
if (!strcmp(field_name, "*")) {
ret = add_evsel_fields(evsel, raw_trace, level);
} else {
- struct tep_format_field *field = tep_find_any_field(evsel->tp_format, field_name);
+ struct tep_event *tp_format = evsel__tp_format(evsel);
+ struct tep_format_field *field =
+ tp_format ? tep_find_any_field(tp_format, field_name) : NULL;
if (field == NULL) {
pr_debug("Cannot find event field for %s.%s\n",
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index 53dcdf07f5a2..ba79f73e1cf5 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -114,23 +114,59 @@ static void print_running_csv(struct perf_stat_config *config, u64 run, u64 ena)
fprintf(config->output, "%s%" PRIu64 "%s%.2f",
config->csv_sep, run, config->csv_sep, enabled_percent);
}
+struct outstate {
+ /* Std mode: insert a newline before the next metric */
+ bool newline;
+ /* JSON mode: track need for comma for a previous field or not */
+ bool first;
+ /* Num CSV separators remaining to pad out when not all fields are printed */
+ int csv_col_pad;
+
+ /*
+ * The following don't track state across fields, but are here as a shortcut to
+ * pass data to the print functions. The alternative would be to update the
+ * function signatures of the entire print stack to pass them through.
+ */
+ /* Place to output to */
+ FILE * const fh;
+ /* Lines are timestamped in --interval-print mode */
+ char timestamp[64];
+ /* Num items aggregated in current line. See struct perf_stat_aggr.nr */
+ int aggr_nr;
+ /* Core/socket/die etc ID for the current line */
+ struct aggr_cpu_id id;
+ /* Event for current line */
+ struct evsel *evsel;
+ /* Cgroup for current line */
+ struct cgroup *cgrp;
+};
+
+static const char *json_sep(struct outstate *os)
+{
+ const char *sep = os->first ? "" : ", ";
+
+ os->first = false;
+ return sep;
+}
+
+#define json_out(os, format, ...) fprintf((os)->fh, "%s" format, json_sep(os), ##__VA_ARGS__)
-static void print_running_json(struct perf_stat_config *config, u64 run, u64 ena)
+static void print_running_json(struct outstate *os, u64 run, u64 ena)
{
double enabled_percent = 100;
if (run != ena)
enabled_percent = 100 * run / ena;
- fprintf(config->output, "\"event-runtime\" : %" PRIu64 ", \"pcnt-running\" : %.2f, ",
- run, enabled_percent);
+ json_out(os, "\"event-runtime\" : %" PRIu64 ", \"pcnt-running\" : %.2f",
+ run, enabled_percent);
}
-static void print_running(struct perf_stat_config *config,
+static void print_running(struct perf_stat_config *config, struct outstate *os,
u64 run, u64 ena, bool before_metric)
{
if (config->json_output) {
if (before_metric)
- print_running_json(config, run, ena);
+ print_running_json(os, run, ena);
} else if (config->csv_output) {
if (before_metric)
print_running_csv(config, run, ena);
@@ -153,20 +189,20 @@ static void print_noise_pct_csv(struct perf_stat_config *config,
fprintf(config->output, "%s%.2f%%", config->csv_sep, pct);
}
-static void print_noise_pct_json(struct perf_stat_config *config,
+static void print_noise_pct_json(struct outstate *os,
double pct)
{
- fprintf(config->output, "\"variance\" : %.2f, ", pct);
+ json_out(os, "\"variance\" : %.2f", pct);
}
-static void print_noise_pct(struct perf_stat_config *config,
+static void print_noise_pct(struct perf_stat_config *config, struct outstate *os,
double total, double avg, bool before_metric)
{
double pct = rel_stddev_stats(total, avg);
if (config->json_output) {
if (before_metric)
- print_noise_pct_json(config, pct);
+ print_noise_pct_json(os, pct);
} else if (config->csv_output) {
if (before_metric)
print_noise_pct_csv(config, pct);
@@ -176,7 +212,7 @@ static void print_noise_pct(struct perf_stat_config *config,
}
}
-static void print_noise(struct perf_stat_config *config,
+static void print_noise(struct perf_stat_config *config, struct outstate *os,
struct evsel *evsel, double avg, bool before_metric)
{
struct perf_stat_evsel *ps;
@@ -185,7 +221,7 @@ static void print_noise(struct perf_stat_config *config,
return;
ps = evsel->stats;
- print_noise_pct(config, stddev_stats(&ps->res_stats), avg, before_metric);
+ print_noise_pct(config, os, stddev_stats(&ps->res_stats), avg, before_metric);
}
static void print_cgroup_std(struct perf_stat_config *config, const char *cgrp_name)
@@ -198,18 +234,19 @@ static void print_cgroup_csv(struct perf_stat_config *config, const char *cgrp_n
fprintf(config->output, "%s%s", config->csv_sep, cgrp_name);
}
-static void print_cgroup_json(struct perf_stat_config *config, const char *cgrp_name)
+static void print_cgroup_json(struct outstate *os, const char *cgrp_name)
{
- fprintf(config->output, "\"cgroup\" : \"%s\", ", cgrp_name);
+ json_out(os, "\"cgroup\" : \"%s\"", cgrp_name);
}
-static void print_cgroup(struct perf_stat_config *config, struct cgroup *cgrp)
+static void print_cgroup(struct perf_stat_config *config, struct outstate *os,
+ struct cgroup *cgrp)
{
if (nr_cgroups || config->cgroup_list) {
const char *cgrp_name = cgrp ? cgrp->name : "";
if (config->json_output)
- print_cgroup_json(config, cgrp_name);
+ print_cgroup_json(os, cgrp_name);
else if (config->csv_output)
print_cgroup_csv(config, cgrp_name);
else
@@ -324,47 +361,45 @@ static void print_aggr_id_csv(struct perf_stat_config *config,
}
}
-static void print_aggr_id_json(struct perf_stat_config *config,
+static void print_aggr_id_json(struct perf_stat_config *config, struct outstate *os,
struct evsel *evsel, struct aggr_cpu_id id, int aggr_nr)
{
- FILE *output = config->output;
-
switch (config->aggr_mode) {
case AGGR_CORE:
- fprintf(output, "\"core\" : \"S%d-D%d-C%d\", \"aggregate-number\" : %d, ",
+ json_out(os, "\"core\" : \"S%d-D%d-C%d\", \"aggregate-number\" : %d",
id.socket, id.die, id.core, aggr_nr);
break;
case AGGR_CACHE:
- fprintf(output, "\"cache\" : \"S%d-D%d-L%d-ID%d\", \"aggregate-number\" : %d, ",
+ json_out(os, "\"cache\" : \"S%d-D%d-L%d-ID%d\", \"aggregate-number\" : %d",
id.socket, id.die, id.cache_lvl, id.cache, aggr_nr);
break;
case AGGR_CLUSTER:
- fprintf(output, "\"cluster\" : \"S%d-D%d-CLS%d\", \"aggregate-number\" : %d, ",
+ json_out(os, "\"cluster\" : \"S%d-D%d-CLS%d\", \"aggregate-number\" : %d",
id.socket, id.die, id.cluster, aggr_nr);
break;
case AGGR_DIE:
- fprintf(output, "\"die\" : \"S%d-D%d\", \"aggregate-number\" : %d, ",
+ json_out(os, "\"die\" : \"S%d-D%d\", \"aggregate-number\" : %d",
id.socket, id.die, aggr_nr);
break;
case AGGR_SOCKET:
- fprintf(output, "\"socket\" : \"S%d\", \"aggregate-number\" : %d, ",
+ json_out(os, "\"socket\" : \"S%d\", \"aggregate-number\" : %d",
id.socket, aggr_nr);
break;
case AGGR_NODE:
- fprintf(output, "\"node\" : \"N%d\", \"aggregate-number\" : %d, ",
+ json_out(os, "\"node\" : \"N%d\", \"aggregate-number\" : %d",
id.node, aggr_nr);
break;
case AGGR_NONE:
if (evsel->percore && !config->percore_show_thread) {
- fprintf(output, "\"core\" : \"S%d-D%d-C%d\"",
+ json_out(os, "\"core\" : \"S%d-D%d-C%d\"",
id.socket, id.die, id.core);
} else if (id.cpu.cpu > -1) {
- fprintf(output, "\"cpu\" : \"%d\", ",
+ json_out(os, "\"cpu\" : \"%d\"",
id.cpu.cpu);
}
break;
case AGGR_THREAD:
- fprintf(output, "\"thread\" : \"%s-%d\", ",
+ json_out(os, "\"thread\" : \"%s-%d\"",
perf_thread_map__comm(evsel->core.threads, id.thread_idx),
perf_thread_map__pid(evsel->core.threads, id.thread_idx));
break;
@@ -376,29 +411,17 @@ static void print_aggr_id_json(struct perf_stat_config *config,
}
}
-static void aggr_printout(struct perf_stat_config *config,
+static void aggr_printout(struct perf_stat_config *config, struct outstate *os,
struct evsel *evsel, struct aggr_cpu_id id, int aggr_nr)
{
if (config->json_output)
- print_aggr_id_json(config, evsel, id, aggr_nr);
+ print_aggr_id_json(config, os, evsel, id, aggr_nr);
else if (config->csv_output)
print_aggr_id_csv(config, evsel, id, aggr_nr);
else
print_aggr_id_std(config, evsel, id, aggr_nr);
}
-struct outstate {
- FILE *fh;
- bool newline;
- bool first;
- const char *prefix;
- int nfields;
- int aggr_nr;
- struct aggr_cpu_id id;
- struct evsel *evsel;
- struct cgroup *cgrp;
-};
-
static void new_line_std(struct perf_stat_config *config __maybe_unused,
void *ctx)
{
@@ -411,9 +434,9 @@ static inline void __new_line_std_csv(struct perf_stat_config *config,
struct outstate *os)
{
fputc('\n', os->fh);
- if (os->prefix)
- fputs(os->prefix, os->fh);
- aggr_printout(config, os->evsel, os->id, os->aggr_nr);
+ if (config->interval)
+ fputs(os->timestamp, os->fh);
+ aggr_printout(config, os, os->evsel, os->id, os->aggr_nr);
}
static inline void __new_line_std(struct outstate *os)
@@ -464,7 +487,7 @@ static void new_line_csv(struct perf_stat_config *config, void *ctx)
int i;
__new_line_std_csv(config, os);
- for (i = 0; i < os->nfields; i++)
+ for (i = 0; i < os->csv_col_pad; i++)
fputs(config->csv_sep, os->fh);
}
@@ -499,9 +522,9 @@ static void print_metric_json(struct perf_stat_config *config __maybe_unused,
FILE *out = os->fh;
if (unit) {
- fprintf(out, "\"metric-value\" : \"%f\", \"metric-unit\" : \"%s\"", val, unit);
+ json_out(os, "\"metric-value\" : \"%f\", \"metric-unit\" : \"%s\"", val, unit);
if (thresh != METRIC_THRESHOLD_UNKNOWN) {
- fprintf(out, ", \"metric-threshold\" : \"%s\"",
+ json_out(os, "\"metric-threshold\" : \"%s\"",
metric_threshold_classify__str(thresh));
}
}
@@ -514,9 +537,11 @@ static void new_line_json(struct perf_stat_config *config, void *ctx)
struct outstate *os = ctx;
fputs("\n{", os->fh);
- if (os->prefix)
- fprintf(os->fh, "%s", os->prefix);
- aggr_printout(config, os->evsel, os->id, os->aggr_nr);
+ os->first = true;
+ if (config->interval)
+ json_out(os, "%s", os->timestamp);
+
+ aggr_printout(config, os, os->evsel, os->id, os->aggr_nr);
}
static void print_metricgroup_header_json(struct perf_stat_config *config,
@@ -526,7 +551,7 @@ static void print_metricgroup_header_json(struct perf_stat_config *config,
if (!metricgroup_name)
return;
- fprintf(config->output, "\"metricgroup\" : \"%s\"}", metricgroup_name);
+ json_out((struct outstate *) ctx, "\"metricgroup\" : \"%s\"}", metricgroup_name);
new_line_json(config, ctx);
}
@@ -539,12 +564,12 @@ static void print_metricgroup_header_csv(struct perf_stat_config *config,
if (!metricgroup_name) {
/* Leave space for running and enabling */
- for (i = 0; i < os->nfields - 2; i++)
+ for (i = 0; i < os->csv_col_pad - 2; i++)
fputs(config->csv_sep, os->fh);
return;
}
- for (i = 0; i < os->nfields; i++)
+ for (i = 0; i < os->csv_col_pad; i++)
fputs(config->csv_sep, os->fh);
fprintf(config->output, "%s", metricgroup_name);
new_line_csv(config, ctx);
@@ -644,7 +669,6 @@ static void print_metric_only_json(struct perf_stat_config *config __maybe_unuse
const char *unit, double val)
{
struct outstate *os = ctx;
- FILE *out = os->fh;
char buf[64], *ends;
char tbuf[1024];
const char *vals;
@@ -661,13 +685,7 @@ static void print_metric_only_json(struct perf_stat_config *config __maybe_unuse
*ends = 0;
if (!vals[0])
vals = "none";
- fprintf(out, "%s\"%s\" : \"%s\"", os->first ? "" : ", ", unit, vals);
- os->first = false;
-}
-
-static void new_line_metric(struct perf_stat_config *config __maybe_unused,
- void *ctx __maybe_unused)
-{
+ json_out(os, "\"%s\" : \"%s\"", unit, vals);
}
static void print_metric_header(struct perf_stat_config *config,
@@ -743,28 +761,27 @@ static void print_counter_value_csv(struct perf_stat_config *config,
fprintf(output, "%s", evsel__name(evsel));
}
-static void print_counter_value_json(struct perf_stat_config *config,
+static void print_counter_value_json(struct outstate *os,
struct evsel *evsel, double avg, bool ok)
{
- FILE *output = config->output;
const char *bad_count = evsel->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED;
if (ok)
- fprintf(output, "\"counter-value\" : \"%f\", ", avg);
+ json_out(os, "\"counter-value\" : \"%f\"", avg);
else
- fprintf(output, "\"counter-value\" : \"%s\", ", bad_count);
+ json_out(os, "\"counter-value\" : \"%s\"", bad_count);
if (evsel->unit)
- fprintf(output, "\"unit\" : \"%s\", ", evsel->unit);
+ json_out(os, "\"unit\" : \"%s\"", evsel->unit);
- fprintf(output, "\"event\" : \"%s\", ", evsel__name(evsel));
+ json_out(os, "\"event\" : \"%s\"", evsel__name(evsel));
}
-static void print_counter_value(struct perf_stat_config *config,
+static void print_counter_value(struct perf_stat_config *config, struct outstate *os,
struct evsel *evsel, double avg, bool ok)
{
if (config->json_output)
- print_counter_value_json(config, evsel, avg, ok);
+ print_counter_value_json(os, evsel, avg, ok);
else if (config->csv_output)
print_counter_value_csv(config, evsel, avg, ok);
else
@@ -772,12 +789,13 @@ static void print_counter_value(struct perf_stat_config *config,
}
static void abs_printout(struct perf_stat_config *config,
+ struct outstate *os,
struct aggr_cpu_id id, int aggr_nr,
struct evsel *evsel, double avg, bool ok)
{
- aggr_printout(config, evsel, id, aggr_nr);
- print_counter_value(config, evsel, avg, ok);
- print_cgroup(config, evsel->cgrp);
+ aggr_printout(config, os, evsel, id, aggr_nr);
+ print_counter_value(config, os, evsel, avg, ok);
+ print_cgroup(config, os, evsel->cgrp);
}
static bool is_mixed_hw_group(struct evsel *counter)
@@ -831,22 +849,23 @@ static void printout(struct perf_stat_config *config, struct outstate *os,
if (config->csv_output) {
pm = config->metric_only ? print_metric_only_csv : print_metric_csv;
- nl = config->metric_only ? new_line_metric : new_line_csv;
+ nl = config->metric_only ? NULL : new_line_csv;
pmh = print_metricgroup_header_csv;
- os->nfields = 4 + (counter->cgrp ? 1 : 0);
+ os->csv_col_pad = 4 + (counter->cgrp ? 1 : 0);
} else if (config->json_output) {
pm = config->metric_only ? print_metric_only_json : print_metric_json;
- nl = config->metric_only ? new_line_metric : new_line_json;
+ nl = config->metric_only ? NULL : new_line_json;
pmh = print_metricgroup_header_json;
} else {
pm = config->metric_only ? print_metric_only : print_metric_std;
- nl = config->metric_only ? new_line_metric : new_line_std;
+ nl = config->metric_only ? NULL : new_line_std;
pmh = print_metricgroup_header_std;
}
if (run == 0 || ena == 0 || counter->counts->scaled == -1) {
if (config->metric_only) {
- pm(config, os, METRIC_THRESHOLD_UNKNOWN, "", "", 0);
+ pm(config, os, METRIC_THRESHOLD_UNKNOWN, /*format=*/NULL,
+ /*unit=*/NULL, /*val=*/0);
return;
}
@@ -868,17 +887,17 @@ static void printout(struct perf_stat_config *config, struct outstate *os,
out.force_header = false;
if (!config->metric_only && !counter->default_metricgroup) {
- abs_printout(config, os->id, os->aggr_nr, counter, uval, ok);
+ abs_printout(config, os, os->id, os->aggr_nr, counter, uval, ok);
- print_noise(config, counter, noise, /*before_metric=*/true);
- print_running(config, run, ena, /*before_metric=*/true);
+ print_noise(config, os, counter, noise, /*before_metric=*/true);
+ print_running(config, os, run, ena, /*before_metric=*/true);
}
if (ok) {
if (!config->metric_only && counter->default_metricgroup) {
void *from = NULL;
- aggr_printout(config, os->evsel, os->id, os->aggr_nr);
+ aggr_printout(config, os, os->evsel, os->id, os->aggr_nr);
/* Print out all the metricgroup with the same metric event. */
do {
int num = 0;
@@ -891,8 +910,8 @@ static void printout(struct perf_stat_config *config, struct outstate *os,
__new_line_std_csv(config, os);
}
- print_noise(config, counter, noise, /*before_metric=*/true);
- print_running(config, run, ena, /*before_metric=*/true);
+ print_noise(config, os, counter, noise, /*before_metric=*/true);
+ print_running(config, os, run, ena, /*before_metric=*/true);
from = perf_stat__print_shadow_stats_metricgroup(config, counter, aggr_idx,
&num, from, &out,
&config->metric_events);
@@ -901,12 +920,12 @@ static void printout(struct perf_stat_config *config, struct outstate *os,
perf_stat__print_shadow_stats(config, counter, uval, aggr_idx,
&out, &config->metric_events);
} else {
- pm(config, os, METRIC_THRESHOLD_UNKNOWN, /*format=*/NULL, /*unit=*/"", /*val=*/0);
+ pm(config, os, METRIC_THRESHOLD_UNKNOWN, /*format=*/NULL, /*unit=*/NULL, /*val=*/0);
}
if (!config->metric_only) {
- print_noise(config, counter, noise, /*before_metric=*/false);
- print_running(config, run, ena, /*before_metric=*/false);
+ print_noise(config, os, counter, noise, /*before_metric=*/false);
+ print_running(config, os, run, ena, /*before_metric=*/false);
}
}
@@ -1083,12 +1102,17 @@ static void print_counter_aggrdata(struct perf_stat_config *config,
return;
if (!metric_only) {
- if (config->json_output)
+ if (config->json_output) {
+ os->first = true;
fputc('{', output);
- if (os->prefix)
- fprintf(output, "%s", os->prefix);
- else if (config->summary && config->csv_output &&
- !config->no_csv_summary && !config->interval)
+ }
+ if (config->interval) {
+ if (config->json_output)
+ json_out(os, "%s", os->timestamp);
+ else
+ fprintf(output, "%s", os->timestamp);
+ } else if (config->summary && config->csv_output &&
+ !config->no_csv_summary)
fprintf(output, "%s%s", "summary", config->csv_sep);
}
@@ -1114,15 +1138,19 @@ static void print_metric_begin(struct perf_stat_config *config,
if (config->json_output)
fputc('{', config->output);
- if (os->prefix)
- fprintf(config->output, "%s", os->prefix);
+ if (config->interval) {
+ if (config->json_output)
+ json_out(os, "%s", os->timestamp);
+ else
+ fprintf(config->output, "%s", os->timestamp);
+ }
evsel = evlist__first(evlist);
id = config->aggr_map->map[aggr_idx];
aggr = &evsel->stats->aggr[aggr_idx];
- aggr_printout(config, evsel, id, aggr->nr);
+ aggr_printout(config, os, evsel, id, aggr->nr);
- print_cgroup(config, os->cgrp ? : evsel->cgrp);
+ print_cgroup(config, os, os->cgrp ? : evsel->cgrp);
}
static void print_metric_end(struct perf_stat_config *config, struct outstate *os)
@@ -1301,7 +1329,7 @@ static void print_metric_headers(struct perf_stat_config *config,
struct perf_stat_output_ctx out = {
.ctx = &os,
.print_metric = print_metric_header,
- .new_line = new_line_metric,
+ .new_line = NULL,
.force_header = true,
};
@@ -1336,20 +1364,20 @@ static void print_metric_headers(struct perf_stat_config *config,
fputc('\n', config->output);
}
-static void prepare_interval(struct perf_stat_config *config,
- char *prefix, size_t len, struct timespec *ts)
+static void prepare_timestamp(struct perf_stat_config *config,
+ struct outstate *os, struct timespec *ts)
{
if (config->iostat_run)
return;
if (config->json_output)
- scnprintf(prefix, len, "\"interval\" : %lu.%09lu, ",
+ scnprintf(os->timestamp, sizeof(os->timestamp), "\"interval\" : %lu.%09lu",
(unsigned long) ts->tv_sec, ts->tv_nsec);
else if (config->csv_output)
- scnprintf(prefix, len, "%lu.%09lu%s",
+ scnprintf(os->timestamp, sizeof(os->timestamp), "%lu.%09lu%s",
(unsigned long) ts->tv_sec, ts->tv_nsec, config->csv_sep);
else
- scnprintf(prefix, len, "%6lu.%09lu ",
+ scnprintf(os->timestamp, sizeof(os->timestamp), "%6lu.%09lu ",
(unsigned long) ts->tv_sec, ts->tv_nsec);
}
@@ -1557,7 +1585,7 @@ static void print_footer(struct perf_stat_config *config)
fprintf(output, " %17.*f +- %.*f seconds time elapsed",
precision, avg, precision, sd);
- print_noise_pct(config, sd, avg, /*before_metric=*/false);
+ print_noise_pct(config, NULL, sd, avg, /*before_metric=*/false);
}
fprintf(output, "\n\n");
@@ -1672,9 +1700,7 @@ void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *conf
int argc, const char **argv)
{
bool metric_only = config->metric_only;
- int interval = config->interval;
struct evsel *counter;
- char buf[64];
struct outstate os = {
.fh = config->output,
.first = true,
@@ -1685,10 +1711,8 @@ void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *conf
if (config->iostat_run)
evlist->selected = evlist__first(evlist);
- if (interval) {
- os.prefix = buf;
- prepare_interval(config, buf, sizeof(buf), ts);
- }
+ if (config->interval)
+ prepare_timestamp(config, &os, ts);
print_header(config, _target, evlist, argc, argv);
@@ -1707,7 +1731,7 @@ void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *conf
case AGGR_THREAD:
case AGGR_GLOBAL:
if (config->iostat_run) {
- iostat_print_counters(evlist, config, ts, buf,
+ iostat_print_counters(evlist, config, ts, os.timestamp,
(iostat_print_counter_t)print_counter, &os);
} else if (config->cgroup_list) {
print_cgroup_counter(config, evlist, &os);
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 47718610d5d8..fa8b2a1048ff 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -327,7 +327,8 @@ static void print_instructions(struct perf_stat_config *config,
"insn per cycle", 0);
}
if (max_stalled && instructions) {
- out->new_line(config, ctxp);
+ if (out->new_line)
+ out->new_line(config, ctxp);
print_metric(config, ctxp, METRIC_THRESHOLD_UNKNOWN, "%7.2f ",
"stalled cycles per insn", max_stalled / instructions);
}
@@ -670,7 +671,7 @@ void *perf_stat__print_shadow_stats_metricgroup(struct perf_stat_config *config,
}
}
- if ((*num)++ > 0)
+ if ((*num)++ > 0 && out->new_line)
out->new_line(config, ctxp);
generic_metric(config, mexp, evsel, aggr_idx, out);
}
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index 6f8cff3cd39a..2fda9acd7374 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -117,8 +117,9 @@ struct perf_stat_config {
unsigned int topdown_level;
};
+extern struct perf_stat_config stat_config;
+
void perf_stat__set_big_num(int set);
-void perf_stat__set_no_csv_summary(int set);
void update_stats(struct stats *stats, u64 val);
double avg_stats(struct stats *stats);
diff --git a/tools/perf/util/stream.c b/tools/perf/util/stream.c
index 545e44981a27..3de4a6130853 100644
--- a/tools/perf/util/stream.c
+++ b/tools/perf/util/stream.c
@@ -52,7 +52,6 @@ static struct evlist_streams *evlist_streams__new(int nr_evsel,
goto err;
s->nr_streams_max = nr_streams_max;
- s->evsel_idx = -1;
}
els->ev_streams = es;
@@ -139,7 +138,7 @@ static int evlist__init_callchain_streams(struct evlist *evlist,
hists__output_resort(hists, NULL);
init_hot_callchain(hists, &es[i]);
- es[i].evsel_idx = pos->core.idx;
+ es[i].evsel = pos;
i++;
}
@@ -166,12 +165,12 @@ struct evlist_streams *evlist__create_streams(struct evlist *evlist,
}
struct evsel_streams *evsel_streams__entry(struct evlist_streams *els,
- int evsel_idx)
+ const struct evsel *evsel)
{
struct evsel_streams *es = els->ev_streams;
for (int i = 0; i < els->nr_evsel; i++) {
- if (es[i].evsel_idx == evsel_idx)
+ if (es[i].evsel == evsel)
return &es[i];
}
diff --git a/tools/perf/util/stream.h b/tools/perf/util/stream.h
index bee768874fea..50f7e6e04982 100644
--- a/tools/perf/util/stream.h
+++ b/tools/perf/util/stream.h
@@ -2,7 +2,9 @@
#ifndef __PERF_STREAM_H
#define __PERF_STREAM_H
-#include "callchain.h"
+struct callchain_node;
+struct evlist;
+struct evsel;
struct stream {
struct callchain_node *cnode;
@@ -11,9 +13,9 @@ struct stream {
struct evsel_streams {
struct stream *streams;
+ const struct evsel *evsel;
int nr_streams_max;
int nr_streams;
- int evsel_idx;
u64 streams_hits;
};
@@ -22,15 +24,13 @@ struct evlist_streams {
int nr_evsel;
};
-struct evlist;
-
void evlist_streams__delete(struct evlist_streams *els);
struct evlist_streams *evlist__create_streams(struct evlist *evlist,
int nr_streams_max);
struct evsel_streams *evsel_streams__entry(struct evlist_streams *els,
- int evsel_idx);
+ const struct evsel *evsel);
void evsel_streams__match(struct evsel_streams *es_base,
struct evsel_streams *es_pair);
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index 308fc7ec88cc..c0e927bbadf6 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -254,11 +254,20 @@ char *strpbrk_esc(char *str, const char *stopset)
do {
ptr = strpbrk(str, stopset);
- if (ptr == str ||
- (ptr == str + 1 && *(ptr - 1) != '\\'))
+ if (!ptr) {
+ /* stopset not in str. */
break;
+ }
+ if (ptr == str) {
+ /* stopset character is first in str. */
+ break;
+ }
+ if (ptr == str + 1 && str[0] != '\\') {
+ /* stopset chacter is second and wasn't preceded by a '\'. */
+ break;
+ }
str = ptr + 1;
- } while (ptr && *(ptr - 1) == '\\' && *(ptr - 2) != '\\');
+ } while (ptr[-1] == '\\' && ptr[-2] != '\\');
return ptr;
}
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index 2b04f47f4db0..b1d259f590e9 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -21,6 +21,7 @@
#include <perf/cpumap.h>
#include "env.h"
+#include "perf.h"
#include "svghelper.h"
static u64 first_time, last_time;
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index e398abfd13a0..66fd1249660a 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -287,8 +287,9 @@ static bool want_demangle(bool is_kernel_sym)
* Demangle C++ function signature, typically replaced by demangle-cxx.cpp
* version.
*/
-__weak char *cxx_demangle_sym(const char *str __maybe_unused, bool params __maybe_unused,
- bool modifiers __maybe_unused)
+#ifndef HAVE_CXA_DEMANGLE_SUPPORT
+char *cxx_demangle_sym(const char *str __maybe_unused, bool params __maybe_unused,
+ bool modifiers __maybe_unused)
{
#ifdef HAVE_LIBBFD_SUPPORT
int flags = (params ? DMGL_PARAMS : 0) | (modifiers ? DMGL_ANSI : 0);
@@ -302,6 +303,7 @@ __weak char *cxx_demangle_sym(const char *str __maybe_unused, bool params __mayb
return NULL;
#endif
}
+#endif /* !HAVE_CXA_DEMANGLE_SUPPORT */
static char *demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
{
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 0037f1163919..49b08adc6ee3 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -154,6 +154,13 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
else if ((a == 0) && (b > 0))
return SYMBOL_B;
+ if (syma->type != symb->type) {
+ if (syma->type == STT_NOTYPE)
+ return SYMBOL_B;
+ if (symb->type == STT_NOTYPE)
+ return SYMBOL_A;
+ }
+
/* Prefer a non weak symbol over a weak one */
a = syma->binding == STB_WEAK;
b = symb->binding == STB_WEAK;
@@ -257,7 +264,7 @@ void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms)
* like in:
* ffffffffc1937000 T hdmi_driver_init [snd_hda_codec_hdmi]
*/
- if (prev->end == prev->start && prev->type != STT_NOTYPE) {
+ if (prev->end == prev->start) {
const char *prev_mod;
const char *curr_mod;
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index a58444c4aed1..6923b0d5efed 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -1686,12 +1686,16 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo
}
if (type & PERF_SAMPLE_RAW) {
- u.val32[0] = sample->raw_size;
- *array = u.val64;
- array = (void *)array + sizeof(u32);
+ u32 *array32 = (void *)array;
+
+ *array32 = sample->raw_size;
+ array32++;
+
+ memcpy(array32, sample->raw_data, sample->raw_size);
+ array = (void *)(array32 + (sample->raw_size / sizeof(u32)));
- memcpy(array, sample->raw_data, sample->raw_size);
- array = (void *)array + sample->raw_size;
+ /* make sure the array is 64-bit aligned */
+ BUG_ON(((long)array) % sizeof(u64));
}
if (type & PERF_SAMPLE_BRANCH_STACK) {
diff --git a/tools/perf/util/syscalltbl.c b/tools/perf/util/syscalltbl.c
index 69d8dcf5cf28..928aca4cd6e9 100644
--- a/tools/perf/util/syscalltbl.c
+++ b/tools/perf/util/syscalltbl.c
@@ -10,52 +10,12 @@
#include <linux/compiler.h>
#include <linux/zalloc.h>
-#ifdef HAVE_SYSCALL_TABLE_SUPPORT
#include <string.h>
#include "string2.h"
-#if defined(__x86_64__)
-#include <asm/syscalls_64.c>
-const int syscalltbl_native_max_id = SYSCALLTBL_x86_64_MAX_ID;
-static const char *const *syscalltbl_native = syscalltbl_x86_64;
-#elif defined(__i386__)
-#include <asm/syscalls_32.c>
-const int syscalltbl_native_max_id = SYSCALLTBL_x86_MAX_ID;
-static const char *const *syscalltbl_native = syscalltbl_x86;
-#elif defined(__s390x__)
-#include <asm/syscalls_64.c>
-const int syscalltbl_native_max_id = SYSCALLTBL_S390_64_MAX_ID;
-static const char *const *syscalltbl_native = syscalltbl_s390_64;
-#elif defined(__powerpc64__)
-#include <asm/syscalls_64.c>
-const int syscalltbl_native_max_id = SYSCALLTBL_POWERPC_64_MAX_ID;
-static const char *const *syscalltbl_native = syscalltbl_powerpc_64;
-#elif defined(__powerpc__)
-#include <asm/syscalls_32.c>
-const int syscalltbl_native_max_id = SYSCALLTBL_POWERPC_32_MAX_ID;
-static const char *const *syscalltbl_native = syscalltbl_powerpc_32;
-#elif defined(__aarch64__)
-#include <asm/syscalls.c>
-const int syscalltbl_native_max_id = SYSCALLTBL_ARM64_MAX_ID;
-static const char *const *syscalltbl_native = syscalltbl_arm64;
-#elif defined(__mips__)
-#include <asm/syscalls_n64.c>
-const int syscalltbl_native_max_id = SYSCALLTBL_MIPS_N64_MAX_ID;
-static const char *const *syscalltbl_native = syscalltbl_mips_n64;
-#elif defined(__loongarch__)
-#include <asm/syscalls.c>
-const int syscalltbl_native_max_id = SYSCALLTBL_LOONGARCH_MAX_ID;
-static const char *const *syscalltbl_native = syscalltbl_loongarch;
-#elif defined(__riscv)
-#include <asm/syscalls.c>
-const int syscalltbl_native_max_id = SYSCALLTBL_RISCV_MAX_ID;
-static const char *const *syscalltbl_native = syscalltbl_riscv;
-#else
-const int syscalltbl_native_max_id = 0;
-static const char *const syscalltbl_native[] = {
- [0] = "unknown",
-};
-#endif
+#include <syscall_table.h>
+const int syscalltbl_native_max_id = SYSCALLTBL_MAX_ID;
+static const char *const *syscalltbl_native = syscalltbl;
struct syscall {
int id;
@@ -163,47 +123,3 @@ int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_g
*idx = -1;
return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx);
}
-
-#else /* HAVE_SYSCALL_TABLE_SUPPORT */
-
-#include <libaudit.h>
-
-struct syscalltbl *syscalltbl__new(void)
-{
- struct syscalltbl *tbl = zalloc(sizeof(*tbl));
- if (tbl)
- tbl->audit_machine = audit_detect_machine();
- return tbl;
-}
-
-void syscalltbl__delete(struct syscalltbl *tbl)
-{
- free(tbl);
-}
-
-const char *syscalltbl__name(const struct syscalltbl *tbl, int id)
-{
- return audit_syscall_to_name(id, tbl->audit_machine);
-}
-
-int syscalltbl__id(struct syscalltbl *tbl, const char *name)
-{
- return audit_name_to_syscall(name, tbl->audit_machine);
-}
-
-int syscalltbl__id_at_idx(struct syscalltbl *tbl __maybe_unused, int idx)
-{
- return idx;
-}
-
-int syscalltbl__strglobmatch_next(struct syscalltbl *tbl __maybe_unused,
- const char *syscall_glob __maybe_unused, int *idx __maybe_unused)
-{
- return -1;
-}
-
-int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_glob, int *idx)
-{
- return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx);
-}
-#endif /* HAVE_SYSCALL_TABLE_SUPPORT */
diff --git a/tools/perf/util/syscalltbl.h b/tools/perf/util/syscalltbl.h
index 2b53b7ed25a6..362411a6d849 100644
--- a/tools/perf/util/syscalltbl.h
+++ b/tools/perf/util/syscalltbl.h
@@ -3,7 +3,6 @@
#define __PERF_SYSCALLTBL_H
struct syscalltbl {
- int audit_machine;
struct {
int max_id;
int nr_entries;
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 41d53e1b43e7..9c015fc2bcfb 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -99,7 +99,7 @@ unsigned long long read_size(struct tep_event *event, void *ptr, int size)
return tep_read_number(event->tep, ptr, size);
}
-void event_format__fprintf(struct tep_event *event,
+void event_format__fprintf(const struct tep_event *event,
int cpu, void *data, int size, FILE *fp)
{
struct tep_record record;
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
index 5596fcda2c10..4e81e02a4f18 100644
--- a/tools/perf/util/trace-event-scripting.c
+++ b/tools/perf/util/trace-event-scripting.c
@@ -13,14 +13,94 @@
#include <event-parse.h>
#endif
+#include "archinsn.h"
#include "debug.h"
+#include "event.h"
#include "trace-event.h"
#include "evsel.h"
+#include <linux/perf_event.h>
#include <linux/zalloc.h>
#include "util/sample.h"
+unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
+
struct scripting_context *scripting_context;
+struct script_spec {
+ struct list_head node;
+ struct scripting_ops *ops;
+ char spec[];
+};
+
+static LIST_HEAD(script_specs);
+
+static struct script_spec *script_spec__new(const char *spec,
+ struct scripting_ops *ops)
+{
+ struct script_spec *s = malloc(sizeof(*s) + strlen(spec) + 1);
+
+ if (s != NULL) {
+ strcpy(s->spec, spec);
+ s->ops = ops;
+ }
+
+ return s;
+}
+
+static void script_spec__add(struct script_spec *s)
+{
+ list_add_tail(&s->node, &script_specs);
+}
+
+static struct script_spec *script_spec__find(const char *spec)
+{
+ struct script_spec *s;
+
+ list_for_each_entry(s, &script_specs, node)
+ if (strcasecmp(s->spec, spec) == 0)
+ return s;
+ return NULL;
+}
+
+static int script_spec_register(const char *spec, struct scripting_ops *ops)
+{
+ struct script_spec *s;
+
+ s = script_spec__find(spec);
+ if (s)
+ return -1;
+
+ s = script_spec__new(spec, ops);
+ if (!s)
+ return -1;
+
+ script_spec__add(s);
+ return 0;
+}
+
+struct scripting_ops *script_spec__lookup(const char *spec)
+{
+ struct script_spec *s = script_spec__find(spec);
+
+ if (!s)
+ return NULL;
+
+ return s->ops;
+}
+
+int script_spec__for_each(int (*cb)(struct scripting_ops *ops, const char *spec))
+{
+ struct script_spec *s;
+ int ret = 0;
+
+ list_for_each_entry(s, &script_specs, node) {
+ ret = cb(s->ops, s->spec);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
void scripting_context__update(struct scripting_context *c,
union perf_event *event,
struct perf_sample *sample,
@@ -28,12 +108,14 @@ void scripting_context__update(struct scripting_context *c,
struct addr_location *al,
struct addr_location *addr_al)
{
- c->event_data = sample->raw_data;
- c->pevent = NULL;
#ifdef HAVE_LIBTRACEEVENT
- if (evsel->tp_format)
- c->pevent = evsel->tp_format->tep;
+ const struct tep_event *tp_format = evsel__tp_format(evsel);
+
+ c->pevent = tp_format ? tp_format->tep : NULL;
+#else
+ c->pevent = NULL;
#endif
+ c->event_data = sample->raw_data;
c->event = event;
c->sample = sample;
c->evsel = evsel;
@@ -191,3 +273,100 @@ void setup_perl_scripting(void)
}
#endif
#endif
+
+#if !defined(__i386__) && !defined(__x86_64__)
+void arch_fetch_insn(struct perf_sample *sample __maybe_unused,
+ struct thread *thread __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+}
+#endif
+
+void script_fetch_insn(struct perf_sample *sample, struct thread *thread,
+ struct machine *machine, bool native_arch)
+{
+ if (sample->insn_len == 0 && native_arch)
+ arch_fetch_insn(sample, thread, machine);
+}
+
+static const struct {
+ u32 flags;
+ const char *name;
+} sample_flags[] = {
+ {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL, "call"},
+ {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN, "return"},
+ {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CONDITIONAL, "jcc"},
+ {PERF_IP_FLAG_BRANCH, "jmp"},
+ {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_INTERRUPT, "int"},
+ {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | PERF_IP_FLAG_INTERRUPT, "iret"},
+ {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_SYSCALLRET, "syscall"},
+ {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | PERF_IP_FLAG_SYSCALLRET, "sysret"},
+ {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_ASYNC, "async"},
+ {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC | PERF_IP_FLAG_INTERRUPT,
+ "hw int"},
+ {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT, "tx abrt"},
+ {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_BEGIN, "tr strt"},
+ {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_END, "tr end"},
+ {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_VMENTRY, "vmentry"},
+ {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_VMEXIT, "vmexit"},
+ {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_BRANCH_MISS, "br miss"},
+ {0, NULL}
+};
+
+static const char *sample_flags_to_name(u32 flags)
+{
+ int i;
+
+ for (i = 0; sample_flags[i].name ; i++) {
+ if (sample_flags[i].flags == flags)
+ return sample_flags[i].name;
+ }
+
+ return NULL;
+}
+
+int perf_sample__sprintf_flags(u32 flags, char *str, size_t sz)
+{
+ u32 xf = PERF_IP_FLAG_IN_TX | PERF_IP_FLAG_INTR_DISABLE |
+ PERF_IP_FLAG_INTR_TOGGLE;
+ const char *chars = PERF_IP_FLAG_CHARS;
+ const size_t n = strlen(PERF_IP_FLAG_CHARS);
+ const char *name = NULL;
+ size_t i, pos = 0;
+ char xs[16] = {0};
+
+ if (flags & xf)
+ snprintf(xs, sizeof(xs), "(%s%s%s)",
+ flags & PERF_IP_FLAG_IN_TX ? "x" : "",
+ flags & PERF_IP_FLAG_INTR_DISABLE ? "D" : "",
+ flags & PERF_IP_FLAG_INTR_TOGGLE ? "t" : "");
+
+ name = sample_flags_to_name(flags & ~xf);
+ if (name)
+ return snprintf(str, sz, "%-15s%6s", name, xs);
+
+ if (flags & PERF_IP_FLAG_TRACE_BEGIN) {
+ name = sample_flags_to_name(flags & ~(xf | PERF_IP_FLAG_TRACE_BEGIN));
+ if (name)
+ return snprintf(str, sz, "tr strt %-7s%6s", name, xs);
+ }
+
+ if (flags & PERF_IP_FLAG_TRACE_END) {
+ name = sample_flags_to_name(flags & ~(xf | PERF_IP_FLAG_TRACE_END));
+ if (name)
+ return snprintf(str, sz, "tr end %-7s%6s", name, xs);
+ }
+
+ for (i = 0; i < n; i++, flags >>= 1) {
+ if ((flags & 1) && pos < sz)
+ str[pos++] = chars[i];
+ }
+ for (; i < 32; i++, flags >>= 1) {
+ if ((flags & 1) && pos < sz)
+ str[pos++] = '?';
+ }
+ if (pos < sz)
+ str[pos] = 0;
+
+ return pos;
+}
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index 79b939f947dd..ac9fde2f980c 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -39,7 +39,7 @@ trace_event__tp_format(const char *sys, const char *name);
struct tep_event *trace_event__tp_format_id(int id);
-void event_format__fprintf(struct tep_event *event,
+void event_format__fprintf(const struct tep_event *event,
int cpu, void *data, int size, FILE *fp);
int parse_ftrace_file(struct tep_handle *pevent, char *buf, unsigned long size);
@@ -113,10 +113,11 @@ struct scripting_ops {
extern unsigned int scripting_max_stack;
-int script_spec_register(const char *spec, struct scripting_ops *ops);
+struct scripting_ops *script_spec__lookup(const char *spec);
+int script_spec__for_each(int (*cb)(struct scripting_ops *ops, const char *spec));
void script_fetch_insn(struct perf_sample *sample, struct thread *thread,
- struct machine *machine);
+ struct machine *machine, bool native_arch);
void setup_perl_scripting(void);
void setup_python_scripting(void);
diff --git a/tools/perf/util/values.c b/tools/perf/util/values.c
index b9823f414f10..ec72d29f3d58 100644
--- a/tools/perf/util/values.c
+++ b/tools/perf/util/values.c
@@ -8,6 +8,7 @@
#include "values.h"
#include "debug.h"
+#include "evsel.h"
int perf_read_values_init(struct perf_read_values *values)
{
@@ -22,21 +23,17 @@ int perf_read_values_init(struct perf_read_values *values)
values->threads = 0;
values->counters_max = 16;
- values->counterrawid = malloc(values->counters_max
- * sizeof(*values->counterrawid));
- values->countername = malloc(values->counters_max
- * sizeof(*values->countername));
- if (!values->counterrawid || !values->countername) {
- pr_debug("failed to allocate read_values counters arrays");
+ values->counters = malloc(values->counters_max * sizeof(*values->counters));
+ if (!values->counters) {
+ pr_debug("failed to allocate read_values counters array");
goto out_free_counter;
}
- values->counters = 0;
+ values->num_counters = 0;
return 0;
out_free_counter:
- zfree(&values->counterrawid);
- zfree(&values->countername);
+ zfree(&values->counters);
out_free_pid:
zfree(&values->pid);
zfree(&values->tid);
@@ -56,10 +53,7 @@ void perf_read_values_destroy(struct perf_read_values *values)
zfree(&values->value);
zfree(&values->pid);
zfree(&values->tid);
- zfree(&values->counterrawid);
- for (i = 0; i < values->counters; i++)
- zfree(&values->countername[i]);
- zfree(&values->countername);
+ zfree(&values->counters);
}
static int perf_read_values__enlarge_threads(struct perf_read_values *values)
@@ -116,81 +110,71 @@ static int perf_read_values__findnew_thread(struct perf_read_values *values,
static int perf_read_values__enlarge_counters(struct perf_read_values *values)
{
- char **countername;
- int i, counters_max = values->counters_max * 2;
- u64 *counterrawid = realloc(values->counterrawid, counters_max * sizeof(*values->counterrawid));
+ int counters_max = values->counters_max * 2;
+ struct evsel **new_counters = realloc(values->counters,
+ counters_max * sizeof(*values->counters));
- if (!counterrawid) {
- pr_debug("failed to enlarge read_values rawid array");
+ if (!new_counters) {
+ pr_debug("failed to enlarge read_values counters array");
goto out_enomem;
}
- countername = realloc(values->countername, counters_max * sizeof(*values->countername));
- if (!countername) {
- pr_debug("failed to enlarge read_values rawid array");
- goto out_free_rawid;
- }
-
- for (i = 0; i < values->threads; i++) {
+ for (int i = 0; i < values->threads; i++) {
u64 *value = realloc(values->value[i], counters_max * sizeof(**values->value));
- int j;
if (!value) {
pr_debug("failed to enlarge read_values ->values array");
- goto out_free_name;
+ goto out_free_counters;
}
- for (j = values->counters_max; j < counters_max; j++)
+ for (int j = values->counters_max; j < counters_max; j++)
value[j] = 0;
values->value[i] = value;
}
values->counters_max = counters_max;
- values->counterrawid = counterrawid;
- values->countername = countername;
+ values->counters = new_counters;
return 0;
-out_free_name:
- free(countername);
-out_free_rawid:
- free(counterrawid);
+out_free_counters:
+ free(new_counters);
out_enomem:
return -ENOMEM;
}
static int perf_read_values__findnew_counter(struct perf_read_values *values,
- u64 rawid, const char *name)
+ struct evsel *evsel)
{
int i;
- for (i = 0; i < values->counters; i++)
- if (values->counterrawid[i] == rawid)
+ for (i = 0; i < values->num_counters; i++)
+ if (values->counters[i] == evsel)
return i;
- if (values->counters == values->counters_max) {
- i = perf_read_values__enlarge_counters(values);
- if (i)
- return i;
+ if (values->num_counters == values->counters_max) {
+ int err = perf_read_values__enlarge_counters(values);
+
+ if (err)
+ return err;
}
- i = values->counters++;
- values->counterrawid[i] = rawid;
- values->countername[i] = strdup(name);
+ i = values->num_counters++;
+ values->counters[i] = evsel;
return i;
}
int perf_read_values_add_value(struct perf_read_values *values,
u32 pid, u32 tid,
- u64 rawid, const char *name, u64 value)
+ struct evsel *evsel, u64 value)
{
int tindex, cindex;
tindex = perf_read_values__findnew_thread(values, pid, tid);
if (tindex < 0)
return tindex;
- cindex = perf_read_values__findnew_counter(values, rawid, name);
+ cindex = perf_read_values__findnew_counter(values, evsel);
if (cindex < 0)
return cindex;
@@ -205,15 +189,15 @@ static void perf_read_values__display_pretty(FILE *fp,
int pidwidth, tidwidth;
int *counterwidth;
- counterwidth = malloc(values->counters * sizeof(*counterwidth));
+ counterwidth = malloc(values->num_counters * sizeof(*counterwidth));
if (!counterwidth) {
fprintf(fp, "INTERNAL ERROR: Failed to allocate counterwidth array\n");
return;
}
tidwidth = 3;
pidwidth = 3;
- for (j = 0; j < values->counters; j++)
- counterwidth[j] = strlen(values->countername[j]);
+ for (j = 0; j < values->num_counters; j++)
+ counterwidth[j] = strlen(evsel__name(values->counters[j]));
for (i = 0; i < values->threads; i++) {
int width;
@@ -223,7 +207,7 @@ static void perf_read_values__display_pretty(FILE *fp,
width = snprintf(NULL, 0, "%d", values->tid[i]);
if (width > tidwidth)
tidwidth = width;
- for (j = 0; j < values->counters; j++) {
+ for (j = 0; j < values->num_counters; j++) {
width = snprintf(NULL, 0, "%" PRIu64, values->value[i][j]);
if (width > counterwidth[j])
counterwidth[j] = width;
@@ -231,14 +215,14 @@ static void perf_read_values__display_pretty(FILE *fp,
}
fprintf(fp, "# %*s %*s", pidwidth, "PID", tidwidth, "TID");
- for (j = 0; j < values->counters; j++)
- fprintf(fp, " %*s", counterwidth[j], values->countername[j]);
+ for (j = 0; j < values->num_counters; j++)
+ fprintf(fp, " %*s", counterwidth[j], evsel__name(values->counters[j]));
fprintf(fp, "\n");
for (i = 0; i < values->threads; i++) {
fprintf(fp, " %*d %*d", pidwidth, values->pid[i],
tidwidth, values->tid[i]);
- for (j = 0; j < values->counters; j++)
+ for (j = 0; j < values->num_counters; j++)
fprintf(fp, " %*" PRIu64,
counterwidth[j], values->value[i][j]);
fprintf(fp, "\n");
@@ -266,16 +250,16 @@ static void perf_read_values__display_raw(FILE *fp,
if (width > tidwidth)
tidwidth = width;
}
- for (j = 0; j < values->counters; j++) {
- width = strlen(values->countername[j]);
+ for (j = 0; j < values->num_counters; j++) {
+ width = strlen(evsel__name(values->counters[j]));
if (width > namewidth)
namewidth = width;
- width = snprintf(NULL, 0, "%" PRIx64, values->counterrawid[j]);
+ width = snprintf(NULL, 0, "%x", values->counters[j]->core.idx);
if (width > rawwidth)
rawwidth = width;
}
for (i = 0; i < values->threads; i++) {
- for (j = 0; j < values->counters; j++) {
+ for (j = 0; j < values->num_counters; j++) {
width = snprintf(NULL, 0, "%" PRIu64, values->value[i][j]);
if (width > countwidth)
countwidth = width;
@@ -287,12 +271,12 @@ static void perf_read_values__display_raw(FILE *fp,
namewidth, "Name", rawwidth, "Raw",
countwidth, "Count");
for (i = 0; i < values->threads; i++)
- for (j = 0; j < values->counters; j++)
- fprintf(fp, " %*d %*d %*s %*" PRIx64 " %*" PRIu64,
+ for (j = 0; j < values->num_counters; j++)
+ fprintf(fp, " %*d %*d %*s %*x %*" PRIu64,
pidwidth, values->pid[i],
tidwidth, values->tid[i],
- namewidth, values->countername[j],
- rawwidth, values->counterrawid[j],
+ namewidth, evsel__name(values->counters[j]),
+ rawwidth, values->counters[j]->core.idx,
countwidth, values->value[i][j]);
}
diff --git a/tools/perf/util/values.h b/tools/perf/util/values.h
index 791c1ad606c2..bbca33daca19 100644
--- a/tools/perf/util/values.h
+++ b/tools/perf/util/values.h
@@ -5,14 +5,15 @@
#include <stdio.h>
#include <linux/types.h>
+struct evsel;
+
struct perf_read_values {
int threads;
int threads_max;
u32 *pid, *tid;
- int counters;
+ int num_counters;
int counters_max;
- u64 *counterrawid;
- char **countername;
+ struct evsel **counters;
u64 **value;
};
@@ -21,7 +22,7 @@ void perf_read_values_destroy(struct perf_read_values *values);
int perf_read_values_add_value(struct perf_read_values *values,
u32 pid, u32 tid,
- u64 rawid, const char *name, u64 value);
+ struct evsel *evsel, u64 value);
void perf_read_values_display(FILE *fp, struct perf_read_values *values,
int raw);
diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c
index 5127be34869e..fadfb02b8611 100644
--- a/tools/power/x86/intel-speed-select/isst-config.c
+++ b/tools/power/x86/intel-speed-select/isst-config.c
@@ -16,7 +16,7 @@ struct process_cmd_struct {
int arg;
};
-static const char *version_str = "v1.20";
+static const char *version_str = "v1.21";
static const int supported_api_ver = 3;
static struct isst_if_platform_info isst_platform_info;
diff --git a/tools/power/x86/intel-speed-select/isst-core-tpmi.c b/tools/power/x86/intel-speed-select/isst-core-tpmi.c
index 32ea70c7dbd8..da53aaa27fc9 100644
--- a/tools/power/x86/intel-speed-select/isst-core-tpmi.c
+++ b/tools/power/x86/intel-speed-select/isst-core-tpmi.c
@@ -329,7 +329,7 @@ static int tpmi_get_get_trls(struct isst_id *id, int config_index,
return 0;
}
-static int tpmi_get_get_trl(struct isst_id *id, int level, int config_index,
+static int tpmi_get_get_trl(struct isst_id *id, int config_index, int level,
int *trl)
{
struct isst_pkg_ctdp_level_info ctdp_level;
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index a7f7ed01421c..99bf905ade81 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -136,7 +136,7 @@ displays the statistics gathered since it was forked.
The system configuration dump (if --quiet is not used) is followed by statistics. The first row of the statistics labels the content of each column (below). The second row of statistics is the system summary line. The system summary line has a '-' in the columns for the Package, Core, and CPU. The contents of the system summary line depends on the type of column. Columns that count items (eg. IRQ) show the sum across all CPUs in the system. Columns that show a percentage show the average across all CPUs in the system. Columns that dump raw MSR values simply show 0 in the summary. After the system summary row, each row describes a specific Package/Core/CPU. Note that if the --cpu parameter is used to limit which specific CPUs are displayed, turbostat will still collect statistics for all CPUs in the system and will still show the system summary for all CPUs in the system.
.SH COLUMN DESCRIPTIONS
.PP
-\fBusec\fP For each CPU, the number of microseconds elapsed during counter collection, including thread migration -- if any. This counter is disabled by default, and is enabled with "--enable usec", or --debug. On the summary row, usec refers to the total elapsed time to collect the counters on all cpus.
+\fBusec\fP For each CPU, the number of microseconds elapsed during counter collection, including thread migration -- if any. This counter is disabled by default, and is enabled with "--enable usec", or --debug. On the summary row, usec refers to the total elapsed time to snapshot the procfs/sysfs and collect the counters on all cpus.
.PP
\fBTime_Of_Day_Seconds\fP For each CPU, the gettimeofday(2) value (seconds.subsec since Epoch) when the counters ending the measurement interval were collected. This column is disabled by default, and can be enabled with "--enable Time_Of_Day_Seconds" or "--debug". On the summary row, Time_Of_Day_Seconds refers to the timestamp following collection of counters on the last CPU.
.PP
@@ -190,7 +190,7 @@ The system configuration dump (if --quiet is not used) is followed by statistics
.PP
\fBRAMWatt\fP Watts consumed by the DRAM DIMMS -- available only on server processors.
.PP
-\fBSysWatt\fP Watts consumed by the whole platform (RAPL PSYS). Disabled by default. Enable with --enable SysWatt.
+\fBSysWatt\fP Watts consumed by the whole platform (RAPL PSYS).
.PP
\fBPKG_%\fP percent of the interval that RAPL throttling was active on the Package. Note that the system summary is the sum of the package throttling time, and thus may be higher than 100% on a multi-package system. Note that the meaning of this field is model specific. For example, some hardware increments this counter when RAPL responds to thermal limits, but does not increment this counter when RAPL responds to power limits. Comparing PkgWatt and PkgTmp to system limits is necessary.
.PP
@@ -516,14 +516,40 @@ that they count at TSC rate, which is true on all processors tested to date.
Volume 3B: System Programming Guide"
https://www.intel.com/products/processor/manuals/
+.SH RUN THE LATEST VERSION
+If turbostat complains that it doesn't recognize your processor,
+please try the latest version.
+
+The latest version of turbostat does not require the latest version of the Linux kernel.
+However, some features, such as perf(1) counters, do require kernel support.
+
+The latest turbostat release is available in the upstream Linux Kernel source tree.
+eg. "git pull https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git"
+and run make in tools/power/x86/turbostat/.
+
+n.b. "make install" will update your system manually, but a distro update may subsequently downgrade your turbostat to an older version.
+For this reason, manually installing to /usr/local/bin may be what you want.
+
+Note that turbostat/Makefile has a "make snapshot" target, which will create a tar file
+that can build without a local kernel source tree.
+
+If the upstream version isn't new enough, the development tree can be found here:
+"git pull https://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux.git turbostat"
+
+If the development tree doesn't work, please contact the author via chat,
+or via email with the word "turbostat" on the Subject line.
+
.SH FILES
.ta
.nf
+/sys/bus/event_source/devices/
/dev/cpu/*/msr
+/sys/class/intel_pmt/
+/sys/devices/system/cpu/
.fi
.SH "SEE ALSO"
-msr(4), vmstat(8)
+perf(1), msr(4), vmstat(8)
.PP
.SH AUTHOR
.nf
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 58a487c225a7..8d5011a0bf60 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -3,7 +3,7 @@
* turbostat -- show CPU frequency and C-state residency
* on modern Intel and AMD processors.
*
- * Copyright (c) 2024 Intel Corporation.
+ * Copyright (c) 2025 Intel Corporation.
* Len Brown <len.brown@intel.com>
*/
@@ -95,6 +95,8 @@
#define INTEL_ECORE_TYPE 0x20
#define INTEL_PCORE_TYPE 0x40
+#define ROUND_UP_TO_PAGE_SIZE(n) (((n) + 0x1000UL-1UL) & ~(0x1000UL-1UL))
+
enum counter_scope { SCOPE_CPU, SCOPE_CORE, SCOPE_PACKAGE };
enum counter_type { COUNTER_ITEMS, COUNTER_CYCLES, COUNTER_SECONDS, COUNTER_USEC, COUNTER_K2M };
enum counter_format { FORMAT_RAW, FORMAT_DELTA, FORMAT_PERCENT, FORMAT_AVERAGE };
@@ -202,6 +204,8 @@ struct msr_counter bic[] = {
{ 0x0, "Die%c6", NULL, 0, 0, 0, NULL, 0 },
{ 0x0, "SysWatt", NULL, 0, 0, 0, NULL, 0 },
{ 0x0, "Sys_J", NULL, 0, 0, 0, NULL, 0 },
+ { 0x0, "NMI", NULL, 0, 0, 0, NULL, 0 },
+ { 0x0, "CPU%c1e", NULL, 0, 0, 0, NULL, 0 },
};
#define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter))
@@ -266,14 +270,16 @@ struct msr_counter bic[] = {
#define BIC_Diec6 (1ULL << 58)
#define BIC_SysWatt (1ULL << 59)
#define BIC_Sys_J (1ULL << 60)
+#define BIC_NMI (1ULL << 61)
+#define BIC_CPU_c1e (1ULL << 62)
-#define BIC_TOPOLOGY (BIC_Package | BIC_Node | BIC_CoreCnt | BIC_PkgCnt | BIC_Core | BIC_CPU | BIC_Die )
-#define BIC_THERMAL_PWR ( BIC_CoreTmp | BIC_PkgTmp | BIC_PkgWatt | BIC_CorWatt | BIC_GFXWatt | BIC_RAMWatt | BIC_PKG__ | BIC_RAM__)
+#define BIC_TOPOLOGY (BIC_Package | BIC_Node | BIC_CoreCnt | BIC_PkgCnt | BIC_Core | BIC_CPU | BIC_Die)
+#define BIC_THERMAL_PWR (BIC_CoreTmp | BIC_PkgTmp | BIC_PkgWatt | BIC_CorWatt | BIC_GFXWatt | BIC_RAMWatt | BIC_PKG__ | BIC_RAM__ | BIC_SysWatt)
#define BIC_FREQUENCY (BIC_Avg_MHz | BIC_Busy | BIC_Bzy_MHz | BIC_TSC_MHz | BIC_GFXMHz | BIC_GFXACTMHz | BIC_SAMMHz | BIC_SAMACTMHz | BIC_UNCORE_MHZ)
-#define BIC_IDLE (BIC_sysfs | BIC_CPU_c1 | BIC_CPU_c3 | BIC_CPU_c6 | BIC_CPU_c7 | BIC_GFX_rc6 | BIC_Pkgpc2 | BIC_Pkgpc3 | BIC_Pkgpc6 | BIC_Pkgpc7 | BIC_Pkgpc8 | BIC_Pkgpc9 | BIC_Pkgpc10 | BIC_CPU_LPI | BIC_SYS_LPI | BIC_Mod_c6 | BIC_Totl_c0 | BIC_Any_c0 | BIC_GFX_c0 | BIC_CPUGFX | BIC_SAM_mc6 | BIC_Diec6)
-#define BIC_OTHER ( BIC_IRQ | BIC_SMI | BIC_ThreadC | BIC_CoreTmp | BIC_IPC)
+#define BIC_IDLE (BIC_Busy | BIC_sysfs | BIC_CPU_c1 | BIC_CPU_c3 | BIC_CPU_c6 | BIC_CPU_c7 | BIC_GFX_rc6 | BIC_Pkgpc2 | BIC_Pkgpc3 | BIC_Pkgpc6 | BIC_Pkgpc7 | BIC_Pkgpc8 | BIC_Pkgpc9 | BIC_Pkgpc10 | BIC_CPU_LPI | BIC_SYS_LPI | BIC_Mod_c6 | BIC_Totl_c0 | BIC_Any_c0 | BIC_GFX_c0 | BIC_CPUGFX | BIC_SAM_mc6 | BIC_Diec6)
+#define BIC_OTHER (BIC_IRQ | BIC_NMI | BIC_SMI | BIC_ThreadC | BIC_CoreTmp | BIC_IPC)
-#define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC | BIC_SysWatt | BIC_Sys_J)
+#define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC)
unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAULT);
unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC;
@@ -326,6 +332,7 @@ unsigned int rapl_joules;
unsigned int summary_only;
unsigned int list_header_only;
unsigned int dump_only;
+unsigned int force_load;
unsigned int has_aperf;
unsigned int has_aperf_access;
unsigned int has_epb;
@@ -353,7 +360,7 @@ unsigned long long cpuidle_cur_sys_lpi_us;
unsigned int tj_max;
unsigned int tj_max_override;
double rapl_power_units, rapl_time_units;
-double rapl_dram_energy_units, rapl_energy_units;
+double rapl_dram_energy_units, rapl_energy_units, rapl_psys_energy_units;
double rapl_joule_counter_range;
unsigned int crystal_hz;
unsigned long long tsc_hz;
@@ -365,6 +372,9 @@ unsigned int has_hwp_activity_window; /* IA32_HWP_REQUEST[bits 41:32] */
unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */
unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */
unsigned int first_counter_read = 1;
+
+static struct timeval procsysfs_tv_begin;
+
int ignore_stdin;
bool no_msr;
bool no_perf;
@@ -419,6 +429,7 @@ struct platform_features {
bool has_per_core_rapl; /* Indicates cores energy collection is per-core, not per-package. AMD specific for now */
bool has_rapl_divisor; /* Divisor for Energy unit raw value from MSR_RAPL_POWER_UNIT */
bool has_fixed_rapl_unit; /* Fixed Energy Unit used for DRAM RAPL Domain */
+ bool has_fixed_rapl_psys_unit; /* Fixed Energy Unit used for PSYS RAPL Domain */
int rapl_quirk_tdp; /* Hardcoded TDP value when cannot be retrieved from hardware */
int tcc_offset_bits; /* TCC Offset bits in MSR_IA32_TEMPERATURE_TARGET */
bool enable_tsc_tweak; /* Use CPU Base freq instead of TSC freq for aperf/mperf counter */
@@ -819,6 +830,7 @@ static const struct platform_features spr_features = {
.has_msr_core_c1_res = 1,
.has_irtl_msrs = 1,
.has_cst_prewake_bit = 1,
+ .has_fixed_rapl_psys_unit = 1,
.trl_msrs = TRL_BASE | TRL_CORECOUNT,
.rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL | RAPL_PSYS,
};
@@ -1024,6 +1036,7 @@ static const struct platform_data turbostat_pdata[] = {
{ INTEL_ARROWLAKE_U, &adl_features },
{ INTEL_ARROWLAKE, &adl_features },
{ INTEL_LUNARLAKE_M, &lnl_features },
+ { INTEL_PANTHERLAKE_L, &lnl_features },
{ INTEL_ATOM_SILVERMONT, &slv_features },
{ INTEL_ATOM_SILVERMONT_D, &slvd_features },
{ INTEL_ATOM_AIRMONT, &amt_features },
@@ -1036,6 +1049,7 @@ static const struct platform_data turbostat_pdata[] = {
{ INTEL_ATOM_GRACEMONT, &adl_features },
{ INTEL_ATOM_CRESTMONT_X, &srf_features },
{ INTEL_ATOM_CRESTMONT, &grr_features },
+ { INTEL_ATOM_DARKMONT_X, &srf_features },
{ INTEL_XEON_PHI_KNL, &knl_features },
{ INTEL_XEON_PHI_KNM, &knl_features },
/*
@@ -1054,9 +1068,10 @@ void probe_platform_features(unsigned int family, unsigned int model)
{
int i;
- platform = &default_features;
if (authentic_amd || hygon_genuine) {
+ /* fallback to default features on unsupported models */
+ force_load++;
if (max_extended_level >= 0x80000007) {
unsigned int eax, ebx, ecx, edx;
@@ -1065,11 +1080,11 @@ void probe_platform_features(unsigned int family, unsigned int model)
if ((edx & (1 << 14)) && family >= 0x17)
platform = &amd_features_with_rapl;
}
- return;
+ goto end;
}
if (!genuine_intel)
- return;
+ goto end;
for (i = 0; turbostat_pdata[i].features; i++) {
if (VFM_FAMILY(turbostat_pdata[i].vfm) == family && VFM_MODEL(turbostat_pdata[i].vfm) == model) {
@@ -1077,6 +1092,19 @@ void probe_platform_features(unsigned int family, unsigned int model)
return;
}
}
+
+end:
+ if (force_load && !platform) {
+ fprintf(outf, "Forced to run on unsupported platform!\n");
+ platform = &default_features;
+ }
+
+ if (platform)
+ return;
+
+ fprintf(stderr, "Unsupported platform detected.\n"
+ "\tSee RUN THE LATEST VERSION on turbostat(8)\n");
+ exit(1);
}
/* Model specific support End */
@@ -1094,8 +1122,8 @@ int backwards_count;
char *progname;
#define CPU_SUBSET_MAXCPUS 1024 /* need to use before probe... */
-cpu_set_t *cpu_present_set, *cpu_effective_set, *cpu_allowed_set, *cpu_affinity_set, *cpu_subset;
-size_t cpu_present_setsize, cpu_effective_setsize, cpu_allowed_setsize, cpu_affinity_setsize, cpu_subset_size;
+cpu_set_t *cpu_present_set, *cpu_possible_set, *cpu_effective_set, *cpu_allowed_set, *cpu_affinity_set, *cpu_subset;
+size_t cpu_present_setsize, cpu_possible_setsize, cpu_effective_setsize, cpu_allowed_setsize, cpu_affinity_setsize, cpu_subset_size;
#define MAX_ADDED_THREAD_COUNTERS 24
#define MAX_ADDED_CORE_COUNTERS 8
#define MAX_ADDED_PACKAGE_COUNTERS 16
@@ -1271,7 +1299,7 @@ static const struct rapl_counter_arch_info rapl_counter_arch_infos[] = {
.msr = MSR_PLATFORM_ENERGY_STATUS,
.msr_mask = 0x00000000FFFFFFFF,
.msr_shift = 0,
- .platform_rapl_msr_scale = &rapl_energy_units,
+ .platform_rapl_msr_scale = &rapl_psys_energy_units,
.rci_index = RAPL_RCI_INDEX_ENERGY_PLATFORM,
.bic = BIC_SysWatt | BIC_Sys_J,
.compat_scale = 1.0,
@@ -1510,6 +1538,17 @@ static struct msr_counter_arch_info msr_counter_arch_infos[] = {
#define PMT_COUNTER_MTL_DC6_LSB 0
#define PMT_COUNTER_MTL_DC6_MSB 63
#define PMT_MTL_DC6_GUID 0x1a067102
+#define PMT_MTL_DC6_SEQ 0
+
+#define PMT_COUNTER_CWF_MC1E_OFFSET_BASE 20936
+#define PMT_COUNTER_CWF_MC1E_OFFSET_INCREMENT 24
+#define PMT_COUNTER_CWF_MC1E_NUM_MODULES_PER_FILE 12
+#define PMT_COUNTER_CWF_CPUS_PER_MODULE 4
+#define PMT_COUNTER_CWF_MC1E_LSB 0
+#define PMT_COUNTER_CWF_MC1E_MSB 63
+#define PMT_CWF_MC1E_GUID 0x14421519
+
+unsigned long long tcore_clock_freq_hz = 800000000;
#define PMT_COUNTER_NAME_SIZE_BYTES 16
#define PMT_COUNTER_TYPE_NAME_SIZE_BYTES 32
@@ -1533,6 +1572,7 @@ struct pmt_mmio {
enum pmt_datatype {
PMT_TYPE_RAW,
PMT_TYPE_XTAL_TIME,
+ PMT_TYPE_TCORE_CLOCK,
};
struct pmt_domain_info {
@@ -1562,6 +1602,93 @@ struct pmt_counter {
struct pmt_domain_info *domains;
};
+/*
+ * PMT telemetry directory iterator.
+ * Used to iterate telemetry files in sysfs in correct order.
+ */
+struct pmt_diriter_t {
+ DIR *dir;
+ struct dirent **namelist;
+ unsigned int num_names;
+ unsigned int current_name_idx;
+};
+
+int pmt_telemdir_filter(const struct dirent *e)
+{
+ unsigned int dummy;
+
+ return sscanf(e->d_name, "telem%u", &dummy);
+}
+
+int pmt_telemdir_sort(const struct dirent **a, const struct dirent **b)
+{
+ unsigned int aidx = 0, bidx = 0;
+
+ sscanf((*a)->d_name, "telem%u", &aidx);
+ sscanf((*b)->d_name, "telem%u", &bidx);
+
+ return aidx >= bidx;
+}
+
+const struct dirent *pmt_diriter_next(struct pmt_diriter_t *iter)
+{
+ const struct dirent *ret = NULL;
+
+ if (!iter->dir)
+ return NULL;
+
+ if (iter->current_name_idx >= iter->num_names)
+ return NULL;
+
+ ret = iter->namelist[iter->current_name_idx];
+ ++iter->current_name_idx;
+
+ return ret;
+}
+
+const struct dirent *pmt_diriter_begin(struct pmt_diriter_t *iter, const char *pmt_root_path)
+{
+ int num_names = iter->num_names;
+
+ if (!iter->dir) {
+ iter->dir = opendir(pmt_root_path);
+ if (iter->dir == NULL)
+ return NULL;
+
+ num_names = scandir(pmt_root_path, &iter->namelist, pmt_telemdir_filter, pmt_telemdir_sort);
+ if (num_names == -1)
+ return NULL;
+ }
+
+ iter->current_name_idx = 0;
+ iter->num_names = num_names;
+
+ return pmt_diriter_next(iter);
+}
+
+void pmt_diriter_init(struct pmt_diriter_t *iter)
+{
+ memset(iter, 0, sizeof(*iter));
+}
+
+void pmt_diriter_remove(struct pmt_diriter_t *iter)
+{
+ if (iter->namelist) {
+ for (unsigned int i = 0; i < iter->num_names; i++) {
+ free(iter->namelist[i]);
+ iter->namelist[i] = NULL;
+ }
+ }
+
+ free(iter->namelist);
+ iter->namelist = NULL;
+ iter->num_names = 0;
+ iter->current_name_idx = 0;
+
+ closedir(iter->dir);
+ iter->dir = NULL;
+}
+
unsigned int pmt_counter_get_width(const struct pmt_counter *p)
{
return (p->msb - p->lsb) + 1;
@@ -1611,6 +1738,7 @@ struct thread_data {
unsigned long long c1;
unsigned long long instr_count;
unsigned long long irq_count;
+ unsigned long long nmi_count;
unsigned int smi_count;
unsigned int cpu_id;
unsigned int apic_id;
@@ -1917,6 +2045,7 @@ struct timeval tv_even, tv_odd, tv_delta;
int *irq_column_2_cpu; /* /proc/interrupts column numbers */
int *irqs_per_cpu; /* indexed by cpu_num */
+int *nmi_per_cpu; /* indexed by cpu_num */
void setup_all_buffers(bool startup);
@@ -1944,6 +2073,8 @@ int for_all_cpus(int (func) (struct thread_data *, struct core_data *, struct pk
{
int retval, pkg_no, core_no, thread_no, node_no;
+ retval = 0;
+
for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
for (node_no = 0; node_no < topo.nodes_per_pkg; node_no++) {
for (core_no = 0; core_no < topo.cores_per_node; ++core_no) {
@@ -1959,14 +2090,12 @@ int for_all_cpus(int (func) (struct thread_data *, struct core_data *, struct pk
c = GET_CORE(core_base, core_no, node_no, pkg_no);
p = GET_PKG(pkg_base, pkg_no);
- retval = func(t, c, p);
- if (retval)
- return retval;
+ retval |= func(t, c, p);
}
}
}
}
- return 0;
+ return retval;
}
int is_cpu_first_thread_in_core(struct thread_data *t, struct core_data *c, struct pkg_data *p)
@@ -2085,13 +2214,17 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr)
int probe_msr(int cpu, off_t offset)
{
ssize_t retval;
- unsigned long long dummy;
+ unsigned long long value;
assert(!no_msr);
- retval = pread(get_msr_fd(cpu), &dummy, sizeof(dummy), offset);
+ retval = pread(get_msr_fd(cpu), &value, sizeof(value), offset);
- if (retval != sizeof(dummy))
+ /*
+ * Expect MSRs to accumulate some non-zero value since the system was powered on.
+ * Treat zero as a read failure.
+ */
+ if (retval != sizeof(value) || value == 0)
return 1;
return 0;
@@ -2135,41 +2268,54 @@ void help(void)
"when COMMAND completes.\n"
"If no COMMAND is specified, turbostat wakes every 5-seconds\n"
"to print statistics, until interrupted.\n"
- " -a, --add add a counter\n"
+ " -a, --add counter\n"
+ " add a counter\n"
" eg. --add msr0x10,u64,cpu,delta,MY_TSC\n"
" eg. --add perf/cstate_pkg/c2-residency,package,delta,percent,perfPC2\n"
" eg. --add pmt,name=XTAL,type=raw,domain=package0,offset=0,lsb=0,msb=63,guid=0x1a067102\n"
- " -c, --cpu cpu-set limit output to summary plus cpu-set:\n"
+ " -c, --cpu cpu-set\n"
+ " limit output to summary plus cpu-set:\n"
" {core | package | j,k,l..m,n-p }\n"
- " -d, --debug displays usec, Time_Of_Day_Seconds and more debugging\n"
+ " -d, --debug\n"
+ " displays usec, Time_Of_Day_Seconds and more debugging\n"
" debug messages are printed to stderr\n"
- " -D, --Dump displays the raw counter values\n"
- " -e, --enable [all | column]\n"
+ " -D, --Dump\n"
+ " displays the raw counter values\n"
+ " -e, --enable [all | column]\n"
" shows all or the specified disabled column\n"
- " -H, --hide [column|column,column,...]\n"
+ " -f, --force\n"
+ " force load turbostat with minimum default features on unsupported platforms.\n"
+ " -H, --hide [column | column,column,...]\n"
" hide the specified column(s)\n"
" -i, --interval sec.subsec\n"
- " Override default 5-second measurement interval\n"
- " -J, --Joules displays energy in Joules instead of Watts\n"
- " -l, --list list column headers only\n"
- " -M, --no-msr Disable all uses of the MSR driver\n"
- " -P, --no-perf Disable all uses of the perf API\n"
+ " override default 5-second measurement interval\n"
+ " -J, --Joules\n"
+ " displays energy in Joules instead of Watts\n"
+ " -l, --list\n"
+ " list column headers only\n"
+ " -M, --no-msr\n"
+ " disable all uses of the MSR driver\n"
+ " -P, --no-perf\n"
+ " disable all uses of the perf API\n"
" -n, --num_iterations num\n"
" number of the measurement iterations\n"
" -N, --header_iterations num\n"
" print header every num iterations\n"
" -o, --out file\n"
" create or truncate \"file\" for all output\n"
- " -q, --quiet skip decoding system configuration header\n"
- " -s, --show [column|column,column,...]\n"
+ " -q, --quiet\n"
+ " skip decoding system configuration header\n"
+ " -s, --show [column | column,column,...]\n"
" show only the specified column(s)\n"
" -S, --Summary\n"
" limits output to 1-line system summary per interval\n"
" -T, --TCC temperature\n"
" sets the Thermal Control Circuit temperature in\n"
" degrees Celsius\n"
- " -h, --help print this help message\n"
- " -v, --version print version information\n" "\n" "For more help, run \"man turbostat\"\n");
+ " -h, --help\n"
+ " print this help message\n"
+ " -v, --version\n"
+ " print version information\n\nFor more help, run \"man turbostat\"\n");
}
/*
@@ -2289,6 +2435,12 @@ void print_header(char *delim)
else
outp += sprintf(outp, "%sIRQ", (printed++ ? delim : ""));
}
+ if (DO_BIC(BIC_NMI)) {
+ if (sums_need_wide_columns)
+ outp += sprintf(outp, "%s NMI", (printed++ ? delim : ""));
+ else
+ outp += sprintf(outp, "%sNMI", (printed++ ? delim : ""));
+ }
if (DO_BIC(BIC_SMI))
outp += sprintf(outp, "%sSMI", (printed++ ? delim : ""));
@@ -2335,6 +2487,7 @@ void print_header(char *delim)
break;
case PMT_TYPE_XTAL_TIME:
+ case PMT_TYPE_TCORE_CLOCK:
outp += sprintf(outp, "%s%s", (printed++ ? delim : ""), ppmt->name);
break;
}
@@ -2409,6 +2562,7 @@ void print_header(char *delim)
break;
case PMT_TYPE_XTAL_TIME:
+ case PMT_TYPE_TCORE_CLOCK:
outp += sprintf(outp, "%s%s", (printed++ ? delim : ""), ppmt->name);
break;
}
@@ -2540,6 +2694,7 @@ void print_header(char *delim)
break;
case PMT_TYPE_XTAL_TIME:
+ case PMT_TYPE_TCORE_CLOCK:
outp += sprintf(outp, "%s%s", (printed++ ? delim : ""), ppmt->name);
break;
}
@@ -2575,6 +2730,8 @@ int dump_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p
if (DO_BIC(BIC_IRQ))
outp += sprintf(outp, "IRQ: %lld\n", t->irq_count);
+ if (DO_BIC(BIC_NMI))
+ outp += sprintf(outp, "IRQ: %lld\n", t->nmi_count);
if (DO_BIC(BIC_SMI))
outp += sprintf(outp, "SMI: %d\n", t->smi_count);
@@ -2794,6 +2951,14 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data
outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), t->irq_count);
}
+ /* NMI */
+ if (DO_BIC(BIC_NMI)) {
+ if (sums_need_wide_columns)
+ outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), t->nmi_count);
+ else
+ outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), t->nmi_count);
+ }
+
/* SMI */
if (DO_BIC(BIC_SMI))
outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->smi_count);
@@ -2848,7 +3013,7 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data
for (i = 0, ppmt = sys.pmt_tp; ppmt; i++, ppmt = ppmt->next) {
const unsigned long value_raw = t->pmt_counter[i];
- const double value_converted = 100.0 * value_raw / crystal_hz / interval_float;
+ double value_converted;
switch (ppmt->type) {
case PMT_TYPE_RAW:
if (pmt_counter_get_width(ppmt) <= 32)
@@ -2860,8 +3025,13 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data
break;
case PMT_TYPE_XTAL_TIME:
+ value_converted = 100.0 * value_raw / crystal_hz / interval_float;
outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), value_converted);
break;
+
+ case PMT_TYPE_TCORE_CLOCK:
+ value_converted = 100.0 * value_raw / tcore_clock_freq_hz / interval_float;
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), value_converted);
}
}
@@ -2928,7 +3098,7 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data
for (i = 0, ppmt = sys.pmt_cp; ppmt; i++, ppmt = ppmt->next) {
const unsigned long value_raw = c->pmt_counter[i];
- const double value_converted = 100.0 * value_raw / crystal_hz / interval_float;
+ double value_converted;
switch (ppmt->type) {
case PMT_TYPE_RAW:
if (pmt_counter_get_width(ppmt) <= 32)
@@ -2940,8 +3110,13 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data
break;
case PMT_TYPE_XTAL_TIME:
+ value_converted = 100.0 * value_raw / crystal_hz / interval_float;
outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), value_converted);
break;
+
+ case PMT_TYPE_TCORE_CLOCK:
+ value_converted = 100.0 * value_raw / tcore_clock_freq_hz / interval_float;
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), value_converted);
}
}
@@ -3126,7 +3301,7 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data
for (i = 0, ppmt = sys.pmt_pp; ppmt; i++, ppmt = ppmt->next) {
const unsigned long value_raw = p->pmt_counter[i];
- const double value_converted = 100.0 * value_raw / crystal_hz / interval_float;
+ double value_converted;
switch (ppmt->type) {
case PMT_TYPE_RAW:
if (pmt_counter_get_width(ppmt) <= 32)
@@ -3138,8 +3313,13 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data
break;
case PMT_TYPE_XTAL_TIME:
+ value_converted = 100.0 * value_raw / crystal_hz / interval_float;
outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), value_converted);
break;
+
+ case PMT_TYPE_TCORE_CLOCK:
+ value_converted = 100.0 * value_raw / tcore_clock_freq_hz / interval_float;
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), value_converted);
}
}
@@ -3409,6 +3589,9 @@ int delta_thread(struct thread_data *new, struct thread_data *old, struct core_d
if (DO_BIC(BIC_IRQ))
old->irq_count = new->irq_count - old->irq_count;
+ if (DO_BIC(BIC_NMI))
+ old->nmi_count = new->nmi_count - old->nmi_count;
+
if (DO_BIC(BIC_SMI))
old->smi_count = new->smi_count - old->smi_count;
@@ -3447,12 +3630,10 @@ int delta_cpu(struct thread_data *t, struct core_data *c,
/* always calculate thread delta */
retval = delta_thread(t, t2, c2); /* c2 is core delta */
- if (retval)
- return retval;
/* calculate package delta only for 1st core in package */
if (is_cpu_first_core_in_package(t, c, p))
- retval = delta_package(p, p2);
+ retval |= delta_package(p, p2);
return retval;
}
@@ -3489,6 +3670,7 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
t->instr_count = 0;
t->irq_count = 0;
+ t->nmi_count = 0;
t->smi_count = 0;
c->c3 = 0;
@@ -3580,7 +3762,7 @@ int sum_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
/* remember first tv_begin */
if (average.threads.tv_begin.tv_sec == 0)
- average.threads.tv_begin = t->tv_begin;
+ average.threads.tv_begin = procsysfs_tv_begin;
/* remember last tv_end */
average.threads.tv_end = t->tv_end;
@@ -3593,6 +3775,7 @@ int sum_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
average.threads.instr_count += t->instr_count;
average.threads.irq_count += t->irq_count;
+ average.threads.nmi_count += t->nmi_count;
average.threads.smi_count += t->smi_count;
for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
@@ -3734,6 +3917,9 @@ void compute_average(struct thread_data *t, struct core_data *c, struct pkg_data
if (average.threads.irq_count > 9999999)
sums_need_wide_columns = 1;
+ if (average.threads.nmi_count > 9999999)
+ sums_need_wide_columns = 1;
+
average.cores.c3 /= topo.allowed_cores;
average.cores.c6 /= topo.allowed_cores;
@@ -4546,7 +4732,8 @@ unsigned long pmt_gen_value_mask(unsigned int lsb, unsigned int msb)
unsigned long pmt_read_counter(struct pmt_counter *ppmt, unsigned int domain_id)
{
- assert(domain_id < ppmt->num_domains);
+ if (domain_id >= ppmt->num_domains)
+ return 0;
const unsigned long *pmmio = ppmt->domains[domain_id].pcounter;
const unsigned long value = pmmio ? *pmmio : 0;
@@ -4590,6 +4777,8 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
if (DO_BIC(BIC_IRQ))
t->irq_count = irqs_per_cpu[cpu];
+ if (DO_BIC(BIC_NMI))
+ t->nmi_count = nmi_per_cpu[cpu];
get_cstate_counters(cpu, t, c, p);
@@ -5335,6 +5524,7 @@ void free_all_buffers(void)
free(irq_column_2_cpu);
free(irqs_per_cpu);
+ free(nmi_per_cpu);
for (i = 0; i <= topo.max_cpu_num; ++i) {
if (cpus[i].put_ids)
@@ -5566,6 +5756,8 @@ int for_all_cpus_2(int (func) (struct thread_data *, struct core_data *,
{
int retval, pkg_no, node_no, core_no, thread_no;
+ retval = 0;
+
for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
for (node_no = 0; node_no < topo.nodes_per_pkg; ++node_no) {
for (core_no = 0; core_no < topo.cores_per_node; ++core_no) {
@@ -5587,14 +5779,12 @@ int for_all_cpus_2(int (func) (struct thread_data *, struct core_data *,
p = GET_PKG(pkg_base, pkg_no);
p2 = GET_PKG(pkg_base2, pkg_no);
- retval = func(t, c, p, t2, c2, p2);
- if (retval)
- return retval;
+ retval |= func(t, c, p, t2, c2, p2);
}
}
}
}
- return 0;
+ return retval;
}
/*
@@ -5791,31 +5981,37 @@ int snapshot_proc_interrupts(void)
irq_column_2_cpu[column] = cpu_number;
irqs_per_cpu[cpu_number] = 0;
+ nmi_per_cpu[cpu_number] = 0;
}
/* read /proc/interrupt count lines and sum up irqs per cpu */
while (1) {
int column;
char buf[64];
+ int this_row_is_nmi = 0;
- retval = fscanf(fp, " %s:", buf); /* flush irq# "N:" */
+ retval = fscanf(fp, " %s:", buf); /* irq# "N:" */
if (retval != 1)
break;
+ if (strncmp(buf, "NMI", strlen("NMI")) == 0)
+ this_row_is_nmi = 1;
+
/* read the count per cpu */
for (column = 0; column < topo.num_cpus; ++column) {
int cpu_number, irq_count;
retval = fscanf(fp, " %d", &irq_count);
+
if (retval != 1)
break;
cpu_number = irq_column_2_cpu[column];
irqs_per_cpu[cpu_number] += irq_count;
-
+ if (this_row_is_nmi)
+ nmi_per_cpu[cpu_number] += irq_count;
}
-
while (getc(fp) != '\n') ; /* flush interrupt description */
}
@@ -5912,7 +6108,9 @@ int snapshot_sys_lpi_us(void)
*/
int snapshot_proc_sysfs_files(void)
{
- if (DO_BIC(BIC_IRQ))
+ gettimeofday(&procsysfs_tv_begin, (struct timezone *)NULL);
+
+ if (DO_BIC(BIC_IRQ) || DO_BIC(BIC_NMI))
if (snapshot_proc_interrupts())
return 1;
@@ -7043,6 +7241,11 @@ void rapl_probe_intel(void)
else
rapl_dram_energy_units = rapl_energy_units;
+ if (platform->has_fixed_rapl_psys_unit)
+ rapl_psys_energy_units = 1.0;
+ else
+ rapl_psys_energy_units = rapl_energy_units;
+
time_unit = msr >> 16 & 0xF;
if (time_unit == 0)
time_unit = 0xA;
@@ -8233,6 +8436,7 @@ void process_cpuid()
aperf_mperf_multiplier = platform->need_perf_multiplier ? 1024 : 1;
BIC_PRESENT(BIC_IRQ);
+ BIC_PRESENT(BIC_NMI);
BIC_PRESENT(BIC_TSC_MHz);
}
@@ -8292,6 +8496,33 @@ int dir_filter(const struct dirent *dirp)
return 0;
}
+char *possible_file = "/sys/devices/system/cpu/possible";
+char possible_buf[1024];
+
+int initialize_cpu_possible_set(void)
+{
+ FILE *fp;
+
+ fp = fopen(possible_file, "r");
+ if (!fp) {
+ warn("open %s", possible_file);
+ return -1;
+ }
+ if (fread(possible_buf, sizeof(char), 1024, fp) == 0) {
+ warn("read %s", possible_file);
+ goto err;
+ }
+ if (parse_cpu_str(possible_buf, cpu_possible_set, cpu_possible_setsize)) {
+ warnx("%s: cpu str malformat %s\n", possible_file, cpu_effective_str);
+ goto err;
+ }
+ return 0;
+
+err:
+ fclose(fp);
+ return -1;
+}
+
void topology_probe(bool startup)
{
int i;
@@ -8324,6 +8555,16 @@ void topology_probe(bool startup)
for_all_proc_cpus(mark_cpu_present);
/*
+ * Allocate and initialize cpu_possible_set
+ */
+ cpu_possible_set = CPU_ALLOC((topo.max_cpu_num + 1));
+ if (cpu_possible_set == NULL)
+ err(3, "CPU_ALLOC");
+ cpu_possible_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
+ CPU_ZERO_S(cpu_possible_setsize, cpu_possible_set);
+ initialize_cpu_possible_set();
+
+ /*
* Allocate and initialize cpu_effective_set
*/
cpu_effective_set = CPU_ALLOC((topo.max_cpu_num + 1));
@@ -8583,7 +8824,11 @@ void allocate_irq_buffers(void)
irqs_per_cpu = calloc(topo.max_cpu_num + 1, sizeof(int));
if (irqs_per_cpu == NULL)
- err(-1, "calloc %d", topo.max_cpu_num + 1);
+ err(-1, "calloc %d IRQ", topo.max_cpu_num + 1);
+
+ nmi_per_cpu = calloc(topo.max_cpu_num + 1, sizeof(int));
+ if (nmi_per_cpu == NULL)
+ err(-1, "calloc %d NMI", topo.max_cpu_num + 1);
}
int update_topo(struct thread_data *t, struct core_data *c, struct pkg_data *p)
@@ -8854,49 +9099,36 @@ int parse_telem_info_file(int fd_dir, const char *info_filename, const char *for
struct pmt_mmio *pmt_mmio_open(unsigned int target_guid)
{
- DIR *dirp;
- struct dirent *entry;
+ struct pmt_diriter_t pmt_iter;
+ const struct dirent *entry;
struct stat st;
- unsigned int telem_idx;
int fd_telem_dir, fd_pmt;
unsigned long guid, size, offset;
size_t mmap_size;
void *mmio;
- struct pmt_mmio *ret = NULL;
+ struct pmt_mmio *head = NULL, *last = NULL;
+ struct pmt_mmio *new_pmt = NULL;
if (stat(SYSFS_TELEM_PATH, &st) == -1)
return NULL;
- dirp = opendir(SYSFS_TELEM_PATH);
- if (dirp == NULL)
+ pmt_diriter_init(&pmt_iter);
+ entry = pmt_diriter_begin(&pmt_iter, SYSFS_TELEM_PATH);
+ if (!entry) {
+ pmt_diriter_remove(&pmt_iter);
return NULL;
+ }
- for (;;) {
- entry = readdir(dirp);
-
- if (entry == NULL)
- break;
-
- if (strcmp(entry->d_name, ".") == 0)
- continue;
-
- if (strcmp(entry->d_name, "..") == 0)
- continue;
-
- if (sscanf(entry->d_name, "telem%u", &telem_idx) != 1)
- continue;
-
- if (fstatat(dirfd(dirp), entry->d_name, &st, 0) == -1) {
+ for ( ; entry != NULL; entry = pmt_diriter_next(&pmt_iter)) {
+ if (fstatat(dirfd(pmt_iter.dir), entry->d_name, &st, 0) == -1)
break;
- }
if (!S_ISDIR(st.st_mode))
continue;
- fd_telem_dir = openat(dirfd(dirp), entry->d_name, O_RDONLY);
- if (fd_telem_dir == -1) {
+ fd_telem_dir = openat(dirfd(pmt_iter.dir), entry->d_name, O_RDONLY);
+ if (fd_telem_dir == -1)
break;
- }
if (parse_telem_info_file(fd_telem_dir, "guid", "%lx", &guid)) {
close(fd_telem_dir);
@@ -8924,38 +9156,54 @@ struct pmt_mmio *pmt_mmio_open(unsigned int target_guid)
if (fd_pmt == -1)
goto loop_cleanup_and_break;
- mmap_size = (size + 0x1000UL) & (~0x1000UL);
+ mmap_size = ROUND_UP_TO_PAGE_SIZE(size);
mmio = mmap(0, mmap_size, PROT_READ, MAP_SHARED, fd_pmt, 0);
if (mmio != MAP_FAILED) {
-
if (debug)
fprintf(stderr, "%s: 0x%lx mmaped at: %p\n", __func__, guid, mmio);
- ret = calloc(1, sizeof(*ret));
+ new_pmt = calloc(1, sizeof(*new_pmt));
- if (!ret) {
+ if (!new_pmt) {
fprintf(stderr, "%s: Failed to allocate pmt_mmio\n", __func__);
exit(1);
}
- ret->guid = guid;
- ret->mmio_base = mmio;
- ret->pmt_offset = offset;
- ret->size = size;
+ /*
+ * Create linked list of mmaped regions,
+ * but preserve the ordering from sysfs.
+ * Ordering is important for the user to
+ * use the seq=%u parameter when adding a counter.
+ */
+ new_pmt->guid = guid;
+ new_pmt->mmio_base = mmio;
+ new_pmt->pmt_offset = offset;
+ new_pmt->size = size;
+ new_pmt->next = pmt_mmios;
+
+ if (last)
+ last->next = new_pmt;
+ else
+ head = new_pmt;
- ret->next = pmt_mmios;
- pmt_mmios = ret;
+ last = new_pmt;
}
loop_cleanup_and_break:
close(fd_pmt);
close(fd_telem_dir);
- break;
}
- closedir(dirp);
+ pmt_diriter_remove(&pmt_iter);
- return ret;
+ /*
+ * If we found something, stick just
+ * created linked list to the front.
+ */
+ if (head)
+ pmt_mmios = head;
+
+ return head;
}
struct pmt_mmio *pmt_mmio_find(unsigned int guid)
@@ -8992,7 +9240,7 @@ void *pmt_get_counter_pointer(struct pmt_mmio *pmmio, unsigned long counter_offs
return ret;
}
-struct pmt_mmio *pmt_add_guid(unsigned int guid)
+struct pmt_mmio *pmt_add_guid(unsigned int guid, unsigned int seq)
{
struct pmt_mmio *ret;
@@ -9000,6 +9248,11 @@ struct pmt_mmio *pmt_add_guid(unsigned int guid)
if (!ret)
ret = pmt_mmio_open(guid);
+ while (ret && seq) {
+ ret = ret->next;
+ --seq;
+ }
+
return ret;
}
@@ -9046,7 +9299,7 @@ void pmt_counter_add_domain(struct pmt_counter *pcounter, unsigned long *pmmio,
pcounter->domains[domain_id].pcounter = pmmio;
}
-int pmt_add_counter(unsigned int guid, const char *name, enum pmt_datatype type,
+int pmt_add_counter(unsigned int guid, unsigned int seq, const char *name, enum pmt_datatype type,
unsigned int lsb, unsigned int msb, unsigned int offset, enum counter_scope scope,
enum counter_format format, unsigned int domain_id, enum pmt_open_mode mode)
{
@@ -9066,10 +9319,10 @@ int pmt_add_counter(unsigned int guid, const char *name, enum pmt_datatype type,
exit(1);
}
- mmio = pmt_add_guid(guid);
+ mmio = pmt_add_guid(guid, seq);
if (!mmio) {
if (mode != PMT_OPEN_TRY) {
- fprintf(stderr, "%s: failed to map PMT MMIO for guid %x\n", __func__, guid);
+ fprintf(stderr, "%s: failed to map PMT MMIO for guid %x, seq %u\n", __func__, guid, seq);
exit(1);
}
@@ -9124,10 +9377,68 @@ int pmt_add_counter(unsigned int guid, const char *name, enum pmt_datatype type,
void pmt_init(void)
{
+ int cpu_num;
+ unsigned long seq, offset, mod_num;
+
if (BIC_IS_ENABLED(BIC_Diec6)) {
- pmt_add_counter(PMT_MTL_DC6_GUID, "Die%c6", PMT_TYPE_XTAL_TIME, PMT_COUNTER_MTL_DC6_LSB,
- PMT_COUNTER_MTL_DC6_MSB, PMT_COUNTER_MTL_DC6_OFFSET, SCOPE_PACKAGE, FORMAT_DELTA,
- 0, PMT_OPEN_TRY);
+ pmt_add_counter(PMT_MTL_DC6_GUID, PMT_MTL_DC6_SEQ, "Die%c6", PMT_TYPE_XTAL_TIME,
+ PMT_COUNTER_MTL_DC6_LSB, PMT_COUNTER_MTL_DC6_MSB, PMT_COUNTER_MTL_DC6_OFFSET,
+ SCOPE_PACKAGE, FORMAT_DELTA, 0, PMT_OPEN_TRY);
+ }
+
+ if (BIC_IS_ENABLED(BIC_CPU_c1e)) {
+ seq = 0;
+ offset = PMT_COUNTER_CWF_MC1E_OFFSET_BASE;
+ mod_num = 0; /* Relative module number for current PMT file. */
+
+ /* Open the counter for each CPU. */
+ for (cpu_num = 0; cpu_num < topo.max_cpu_num;) {
+
+ if (cpu_is_not_allowed(cpu_num))
+ goto next_loop_iter;
+
+ /*
+ * Set the scope to CPU, even though CWF report the counter per module.
+ * CPUs inside the same module will read from the same location, instead of reporting zeros.
+ *
+ * CWF with newer firmware might require a PMT_TYPE_XTAL_TIME intead of PMT_TYPE_TCORE_CLOCK.
+ */
+ pmt_add_counter(PMT_CWF_MC1E_GUID, seq, "CPU%c1e", PMT_TYPE_TCORE_CLOCK,
+ PMT_COUNTER_CWF_MC1E_LSB, PMT_COUNTER_CWF_MC1E_MSB, offset, SCOPE_CPU,
+ FORMAT_DELTA, cpu_num, PMT_OPEN_TRY);
+
+ /*
+ * Rather complex logic for each time we go to the next loop iteration,
+ * so keep it as a label.
+ */
+next_loop_iter:
+ /*
+ * Advance the cpu number and check if we should also advance offset to
+ * the next counter inside the PMT file.
+ *
+ * On Clearwater Forest platform, the counter is reported per module,
+ * so open the same counter for all of the CPUs inside the module.
+ * That way, reported table show the correct value for all of the CPUs inside the module,
+ * instead of zeros.
+ */
+ ++cpu_num;
+ if (cpu_num % PMT_COUNTER_CWF_CPUS_PER_MODULE == 0) {
+ offset += PMT_COUNTER_CWF_MC1E_OFFSET_INCREMENT;
+ ++mod_num;
+ }
+
+ /*
+ * There are PMT_COUNTER_CWF_MC1E_NUM_MODULES_PER_FILE in each PMT file.
+ *
+ * If that number is reached, seq must be incremented to advance to the next file in a sequence.
+ * Offset inside that file and a module counter has to be reset.
+ */
+ if (mod_num == PMT_COUNTER_CWF_MC1E_NUM_MODULES_PER_FILE) {
+ ++seq;
+ offset = PMT_COUNTER_CWF_MC1E_OFFSET_BASE;
+ mod_num = 0;
+ }
+ }
}
}
@@ -9163,6 +9474,18 @@ void turbostat_init()
}
}
+void affinitize_child(void)
+{
+ /* Prefer cpu_possible_set, if available */
+ if (sched_setaffinity(0, cpu_possible_setsize, cpu_possible_set)) {
+ warn("sched_setaffinity cpu_possible_set");
+
+ /* Otherwise, allow child to run on same cpu set as turbostat */
+ if (sched_setaffinity(0, cpu_allowed_setsize, cpu_allowed_set))
+ warn("sched_setaffinity cpu_allowed_set");
+ }
+}
+
int fork_it(char **argv)
{
pid_t child_pid;
@@ -9178,6 +9501,7 @@ int fork_it(char **argv)
child_pid = fork();
if (!child_pid) {
/* child */
+ affinitize_child();
execvp(argv[0], argv);
err(errno, "exec %s", argv[0]);
} else {
@@ -9204,10 +9528,9 @@ int fork_it(char **argv)
timersub(&tv_odd, &tv_even, &tv_delta);
if (for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS))
fprintf(outf, "%s: Counter reset detected\n", progname);
- else {
- compute_average(EVEN_COUNTERS);
- format_all_counters(EVEN_COUNTERS);
- }
+
+ compute_average(EVEN_COUNTERS);
+ format_all_counters(EVEN_COUNTERS);
fprintf(outf, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec / 1000000.0);
@@ -9236,7 +9559,7 @@ int get_and_dump_counters(void)
void print_version()
{
- fprintf(outf, "turbostat version 2024.11.30 - Len Brown <lenb@kernel.org>\n");
+ fprintf(outf, "turbostat version 2025.02.02 - Len Brown <lenb@kernel.org>\n");
}
#define COMMAND_LINE_SIZE 2048
@@ -9561,7 +9884,7 @@ next:
}
if ((msr_num == 0) && (path == NULL) && (perf_device[0] == '\0' || perf_event[0] == '\0')) {
- fprintf(stderr, "--add: (msrDDD | msr0xXXX | /path_to_counter | perf/device/event ) required\n");
+ fprintf(stderr, "--add: (msrDDD | msr0xXXX | /path_to_counter | perf/device/event) required\n");
fail++;
}
@@ -9599,15 +9922,100 @@ bool starts_with(const char *str, const char *prefix)
return strncmp(prefix, str, strlen(prefix)) == 0;
}
+int pmt_parse_from_path(const char *target_path, unsigned int *out_guid, unsigned int *out_seq)
+{
+ struct pmt_diriter_t pmt_iter;
+ const struct dirent *dirname;
+ struct stat stat, target_stat;
+ int fd_telem_dir = -1;
+ int fd_target_dir;
+ unsigned int seq = 0;
+ unsigned long guid, target_guid;
+ int ret = -1;
+
+ fd_target_dir = open(target_path, O_RDONLY | O_DIRECTORY);
+ if (fd_target_dir == -1) {
+ return -1;
+ }
+
+ if (fstat(fd_target_dir, &target_stat) == -1) {
+ fprintf(stderr, "%s: Failed to stat the target: %s", __func__, strerror(errno));
+ exit(1);
+ }
+
+ if (parse_telem_info_file(fd_target_dir, "guid", "%lx", &target_guid)) {
+ fprintf(stderr, "%s: Failed to parse the target guid file: %s", __func__, strerror(errno));
+ exit(1);
+ }
+
+ close(fd_target_dir);
+
+ pmt_diriter_init(&pmt_iter);
+
+ for (dirname = pmt_diriter_begin(&pmt_iter, SYSFS_TELEM_PATH); dirname != NULL;
+ dirname = pmt_diriter_next(&pmt_iter)) {
+
+ fd_telem_dir = openat(dirfd(pmt_iter.dir), dirname->d_name, O_RDONLY | O_DIRECTORY);
+ if (fd_telem_dir == -1)
+ continue;
+
+ if (parse_telem_info_file(fd_telem_dir, "guid", "%lx", &guid)) {
+ fprintf(stderr, "%s: Failed to parse the guid file: %s", __func__, strerror(errno));
+ continue;
+ }
+
+ if (fstat(fd_telem_dir, &stat) == -1) {
+ fprintf(stderr, "%s: Failed to stat %s directory: %s", __func__,
+ dirname->d_name, strerror(errno));
+ continue;
+ }
+
+ /*
+ * If reached the same directory as target, exit the loop.
+ * Seq has the correct value now.
+ */
+ if (stat.st_dev == target_stat.st_dev && stat.st_ino == target_stat.st_ino) {
+ ret = 0;
+ break;
+ }
+
+ /*
+ * If reached directory with the same guid,
+ * but it's not the target directory yet,
+ * increment seq and continue the search.
+ */
+ if (guid == target_guid)
+ ++seq;
+
+ close(fd_telem_dir);
+ fd_telem_dir = -1;
+ }
+
+ pmt_diriter_remove(&pmt_iter);
+
+ if (fd_telem_dir != -1)
+ close(fd_telem_dir);
+
+ if (!ret) {
+ *out_guid = target_guid;
+ *out_seq = seq;
+ }
+
+ return ret;
+}
+
void parse_add_command_pmt(char *add_command)
{
char *name = NULL;
char *type_name = NULL;
char *format_name = NULL;
+ char *direct_path = NULL;
+ static const char direct_path_prefix[] = "path=";
unsigned int offset;
unsigned int lsb;
unsigned int msb;
unsigned int guid;
+ unsigned int seq = 0; /* By default, pick first file in a sequence with a given GUID. */
unsigned int domain_id;
enum counter_scope scope = 0;
enum pmt_datatype type = PMT_TYPE_RAW;
@@ -9687,6 +10095,13 @@ void parse_add_command_pmt(char *add_command)
goto next;
}
+ if (sscanf(add_command, "seq=%x", &seq) == 1)
+ goto next;
+
+ if (strncmp(add_command, direct_path_prefix, strlen(direct_path_prefix)) == 0) {
+ direct_path = add_command + strlen(direct_path_prefix);
+ goto next;
+ }
next:
add_command = strchr(add_command, ',');
if (add_command) {
@@ -9737,6 +10152,11 @@ next:
has_type = true;
}
+ if (strcmp("tcore_clock", type_name) == 0) {
+ type = PMT_TYPE_TCORE_CLOCK;
+ has_type = true;
+ }
+
if (!has_type) {
printf("%s: invalid %s: %s\n", __func__, "type", type_name);
exit(1);
@@ -9758,8 +10178,24 @@ next:
exit(1);
}
+ if (direct_path && has_guid) {
+ printf("%s: path and guid+seq parameters are mutually exclusive\n"
+ "notice: passed guid=0x%x and path=%s\n", __func__, guid, direct_path);
+ exit(1);
+ }
+
+ if (direct_path) {
+ if (pmt_parse_from_path(direct_path, &guid, &seq)) {
+ printf("%s: failed to parse PMT file from %s\n", __func__, direct_path);
+ exit(1);
+ }
+
+ /* GUID was just infered from the direct path. */
+ has_guid = true;
+ }
+
if (!has_guid) {
- printf("%s: missing %s\n", __func__, "guid");
+ printf("%s: missing %s\n", __func__, "guid or path");
exit(1);
}
@@ -9773,7 +10209,7 @@ next:
exit(1);
}
- pmt_add_counter(guid, name, type, lsb, msb, offset, scope, format, domain_id, PMT_OPEN_REQUIRED);
+ pmt_add_counter(guid, seq, name, type, lsb, msb, offset, scope, format, domain_id, PMT_OPEN_REQUIRED);
}
void parse_add_command(char *add_command)
@@ -9921,6 +10357,7 @@ void cmdline(int argc, char **argv)
{ "Dump", no_argument, 0, 'D' },
{ "debug", no_argument, 0, 'd' }, /* internal, not documented */
{ "enable", required_argument, 0, 'e' },
+ { "force", no_argument, 0, 'f' },
{ "interval", required_argument, 0, 'i' },
{ "IPC", no_argument, 0, 'I' },
{ "num_iterations", required_argument, 0, 'n' },
@@ -9981,6 +10418,9 @@ void cmdline(int argc, char **argv)
/* --enable specified counter */
bic_enabled = bic_enabled | bic_lookup(optarg, SHOW_LIST);
break;
+ case 'f':
+ force_load++;
+ break;
case 'd':
debug++;
ENABLE_BIC(BIC_DISABLED_BY_DEFAULT);
diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h
index 625f5b046776..f3e15e9efa76 100644
--- a/tools/sched_ext/include/scx/common.bpf.h
+++ b/tools/sched_ext/include/scx/common.bpf.h
@@ -9,7 +9,7 @@
#ifdef LSP
#define __bpf__
-#include "../vmlinux/vmlinux.h"
+#include "../vmlinux.h"
#else
#include "vmlinux.h"
#endif
@@ -24,6 +24,10 @@
#define PF_EXITING 0x00000004
#define CLOCK_MONOTONIC 1
+extern int LINUX_KERNEL_VERSION __kconfig;
+extern const char CONFIG_CC_VERSION_TEXT[64] __kconfig __weak;
+extern const char CONFIG_LOCALVERSION[64] __kconfig __weak;
+
/*
* Earlier versions of clang/pahole lost upper 32bits in 64bit enums which can
* lead to really confusing misbehaviors. Let's trigger a build failure.
@@ -72,6 +76,7 @@ bool scx_bpf_task_running(const struct task_struct *p) __ksym;
s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym;
struct rq *scx_bpf_cpu_rq(s32 cpu) __ksym;
struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) __ksym __weak;
+u64 scx_bpf_now(void) __ksym __weak;
/*
* Use the following as @it__iter when calling scx_bpf_dsq_move[_vtime]() from
@@ -98,7 +103,7 @@ void ___scx_bpf_bstr_format_checker(const char *fmt, ...) {}
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
___bpf_fill(___param, args); \
- _Pragma("GCC diagnostic pop") \
+ _Pragma("GCC diagnostic pop")
/*
* scx_bpf_exit() wraps the scx_bpf_exit_bstr() kfunc with variadic arguments
@@ -136,6 +141,20 @@ void ___scx_bpf_bstr_format_checker(const char *fmt, ...) {}
___scx_bpf_bstr_format_checker(fmt, ##args); \
})
+/*
+ * scx_bpf_dump_header() is a wrapper around scx_bpf_dump that adds a header
+ * of system information for debugging.
+ */
+#define scx_bpf_dump_header() \
+({ \
+ scx_bpf_dump("kernel: %d.%d.%d %s\ncc: %s\n", \
+ LINUX_KERNEL_VERSION >> 16, \
+ LINUX_KERNEL_VERSION >> 8 & 0xFF, \
+ LINUX_KERNEL_VERSION & 0xFF, \
+ CONFIG_LOCALVERSION, \
+ CONFIG_CC_VERSION_TEXT); \
+})
+
#define BPF_STRUCT_OPS(name, args...) \
SEC("struct_ops/"#name) \
BPF_PROG(name, ##args)
@@ -317,6 +336,66 @@ u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1,
const struct cpumask *src2) __ksym;
u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym;
+int bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words) __ksym;
+int *bpf_iter_bits_next(struct bpf_iter_bits *it) __ksym;
+void bpf_iter_bits_destroy(struct bpf_iter_bits *it) __ksym;
+
+#define def_iter_struct(name) \
+struct bpf_iter_##name { \
+ struct bpf_iter_bits it; \
+ const struct cpumask *bitmap; \
+};
+
+#define def_iter_new(name) \
+static inline int bpf_iter_##name##_new( \
+ struct bpf_iter_##name *it, const u64 *unsafe_ptr__ign, u32 nr_words) \
+{ \
+ it->bitmap = scx_bpf_get_##name##_cpumask(); \
+ return bpf_iter_bits_new(&it->it, (const u64 *)it->bitmap, \
+ sizeof(struct cpumask) / 8); \
+}
+
+#define def_iter_next(name) \
+static inline int *bpf_iter_##name##_next(struct bpf_iter_##name *it) { \
+ return bpf_iter_bits_next(&it->it); \
+}
+
+#define def_iter_destroy(name) \
+static inline void bpf_iter_##name##_destroy(struct bpf_iter_##name *it) { \
+ scx_bpf_put_cpumask(it->bitmap); \
+ bpf_iter_bits_destroy(&it->it); \
+}
+#define def_for_each_cpu(cpu, name) for_each_##name##_cpu(cpu)
+
+/// Provides iterator for possible and online cpus.
+///
+/// # Example
+///
+/// ```
+/// static inline void example_use() {
+/// int *cpu;
+///
+/// for_each_possible_cpu(cpu){
+/// bpf_printk("CPU %d is possible", *cpu);
+/// }
+///
+/// for_each_online_cpu(cpu){
+/// bpf_printk("CPU %d is online", *cpu);
+/// }
+/// }
+/// ```
+def_iter_struct(possible);
+def_iter_new(possible);
+def_iter_next(possible);
+def_iter_destroy(possible);
+#define for_each_possible_cpu(cpu) bpf_for_each(possible, cpu, NULL, 0)
+
+def_iter_struct(online);
+def_iter_new(online);
+def_iter_next(online);
+def_iter_destroy(online);
+#define for_each_online_cpu(cpu) bpf_for_each(online, cpu, NULL, 0)
+
/*
* Access a cpumask in read-only mode (typically to check bits).
*/
@@ -329,6 +408,100 @@ static __always_inline const struct cpumask *cast_mask(struct bpf_cpumask *mask)
void bpf_rcu_read_lock(void) __ksym;
void bpf_rcu_read_unlock(void) __ksym;
+/*
+ * Time helpers, most of which are from jiffies.h.
+ */
+
+/**
+ * time_delta - Calculate the delta between new and old time stamp
+ * @after: first comparable as u64
+ * @before: second comparable as u64
+ *
+ * Return: the time difference, which is >= 0
+ */
+static inline s64 time_delta(u64 after, u64 before)
+{
+ return (s64)(after - before) > 0 ? : 0;
+}
+
+/**
+ * time_after - returns true if the time a is after time b.
+ * @a: first comparable as u64
+ * @b: second comparable as u64
+ *
+ * Do this with "<0" and ">=0" to only test the sign of the result. A
+ * good compiler would generate better code (and a really good compiler
+ * wouldn't care). Gcc is currently neither.
+ *
+ * Return: %true is time a is after time b, otherwise %false.
+ */
+static inline bool time_after(u64 a, u64 b)
+{
+ return (s64)(b - a) < 0;
+}
+
+/**
+ * time_before - returns true if the time a is before time b.
+ * @a: first comparable as u64
+ * @b: second comparable as u64
+ *
+ * Return: %true is time a is before time b, otherwise %false.
+ */
+static inline bool time_before(u64 a, u64 b)
+{
+ return time_after(b, a);
+}
+
+/**
+ * time_after_eq - returns true if the time a is after or the same as time b.
+ * @a: first comparable as u64
+ * @b: second comparable as u64
+ *
+ * Return: %true is time a is after or the same as time b, otherwise %false.
+ */
+static inline bool time_after_eq(u64 a, u64 b)
+{
+ return (s64)(a - b) >= 0;
+}
+
+/**
+ * time_before_eq - returns true if the time a is before or the same as time b.
+ * @a: first comparable as u64
+ * @b: second comparable as u64
+ *
+ * Return: %true is time a is before or the same as time b, otherwise %false.
+ */
+static inline bool time_before_eq(u64 a, u64 b)
+{
+ return time_after_eq(b, a);
+}
+
+/**
+ * time_in_range - Calculate whether a is in the range of [b, c].
+ * @a: time to test
+ * @b: beginning of the range
+ * @c: end of the range
+ *
+ * Return: %true is time a is in the range [b, c], otherwise %false.
+ */
+static inline bool time_in_range(u64 a, u64 b, u64 c)
+{
+ return time_after_eq(a, b) && time_before_eq(a, c);
+}
+
+/**
+ * time_in_range_open - Calculate whether a is in the range of [b, c).
+ * @a: time to test
+ * @b: beginning of the range
+ * @c: end of the range
+ *
+ * Return: %true is time a is in the range [b, c), otherwise %false.
+ */
+static inline bool time_in_range_open(u64 a, u64 b, u64 c)
+{
+ return time_after_eq(a, b) && time_before(a, c);
+}
+
/*
* Other helpers
@@ -423,5 +596,6 @@ static inline u32 log2_u64(u64 v)
}
#include "compat.bpf.h"
+#include "enums.bpf.h"
#endif /* __SCX_COMMON_BPF_H */
diff --git a/tools/sched_ext/include/scx/common.h b/tools/sched_ext/include/scx/common.h
index 5b0f90152152..dc18b99e55cd 100644
--- a/tools/sched_ext/include/scx/common.h
+++ b/tools/sched_ext/include/scx/common.h
@@ -71,5 +71,11 @@ typedef int64_t s64;
#include "user_exit_info.h"
#include "compat.h"
+#include "enums.h"
+
+/* not available when building kernel tools/sched_ext */
+#if __has_include(<lib/sdt_task.h>)
+#include <lib/sdt_task.h>
+#endif
#endif /* __SCHED_EXT_COMMON_H */
diff --git a/tools/sched_ext/include/scx/compat.bpf.h b/tools/sched_ext/include/scx/compat.bpf.h
index d56520100a26..50e1499ae093 100644
--- a/tools/sched_ext/include/scx/compat.bpf.h
+++ b/tools/sched_ext/include/scx/compat.bpf.h
@@ -125,6 +125,11 @@ bool scx_bpf_dispatch_vtime_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter,
false; \
})
+#define scx_bpf_now() \
+ (bpf_ksym_exists(scx_bpf_now) ? \
+ scx_bpf_now() : \
+ bpf_ktime_get_ns())
+
/*
* Define sched_ext_ops. This may be expanded to define multiple variants for
* backward compatibility. See compat.h::SCX_OPS_LOAD/ATTACH().
diff --git a/tools/sched_ext/include/scx/compat.h b/tools/sched_ext/include/scx/compat.h
index cc56ff9aa252..b50280e2ba2b 100644
--- a/tools/sched_ext/include/scx/compat.h
+++ b/tools/sched_ext/include/scx/compat.h
@@ -149,6 +149,7 @@ static inline long scx_hotplug_seq(void)
__skel = __scx_name##__open(); \
SCX_BUG_ON(!__skel, "Could not open " #__scx_name); \
__skel->struct_ops.__ops_name->hotplug_seq = scx_hotplug_seq(); \
+ SCX_ENUM_INIT(__skel); \
__skel; \
})
diff --git a/tools/sched_ext/include/scx/enums.autogen.bpf.h b/tools/sched_ext/include/scx/enums.autogen.bpf.h
new file mode 100644
index 000000000000..0e941a0d6f88
--- /dev/null
+++ b/tools/sched_ext/include/scx/enums.autogen.bpf.h
@@ -0,0 +1,105 @@
+/*
+ * WARNING: This file is autogenerated from scripts/gen_enums.py. If you would
+ * like to access an enum that is currently missing, add it to the script
+ * and run it from the root directory to update this file.
+ */
+
+const volatile u64 __SCX_OPS_NAME_LEN __weak;
+#define SCX_OPS_NAME_LEN __SCX_OPS_NAME_LEN
+
+const volatile u64 __SCX_SLICE_DFL __weak;
+#define SCX_SLICE_DFL __SCX_SLICE_DFL
+
+const volatile u64 __SCX_SLICE_INF __weak;
+#define SCX_SLICE_INF __SCX_SLICE_INF
+
+const volatile u64 __SCX_DSQ_FLAG_BUILTIN __weak;
+#define SCX_DSQ_FLAG_BUILTIN __SCX_DSQ_FLAG_BUILTIN
+
+const volatile u64 __SCX_DSQ_FLAG_LOCAL_ON __weak;
+#define SCX_DSQ_FLAG_LOCAL_ON __SCX_DSQ_FLAG_LOCAL_ON
+
+const volatile u64 __SCX_DSQ_INVALID __weak;
+#define SCX_DSQ_INVALID __SCX_DSQ_INVALID
+
+const volatile u64 __SCX_DSQ_GLOBAL __weak;
+#define SCX_DSQ_GLOBAL __SCX_DSQ_GLOBAL
+
+const volatile u64 __SCX_DSQ_LOCAL __weak;
+#define SCX_DSQ_LOCAL __SCX_DSQ_LOCAL
+
+const volatile u64 __SCX_DSQ_LOCAL_ON __weak;
+#define SCX_DSQ_LOCAL_ON __SCX_DSQ_LOCAL_ON
+
+const volatile u64 __SCX_DSQ_LOCAL_CPU_MASK __weak;
+#define SCX_DSQ_LOCAL_CPU_MASK __SCX_DSQ_LOCAL_CPU_MASK
+
+const volatile u64 __SCX_TASK_QUEUED __weak;
+#define SCX_TASK_QUEUED __SCX_TASK_QUEUED
+
+const volatile u64 __SCX_TASK_RESET_RUNNABLE_AT __weak;
+#define SCX_TASK_RESET_RUNNABLE_AT __SCX_TASK_RESET_RUNNABLE_AT
+
+const volatile u64 __SCX_TASK_DEQD_FOR_SLEEP __weak;
+#define SCX_TASK_DEQD_FOR_SLEEP __SCX_TASK_DEQD_FOR_SLEEP
+
+const volatile u64 __SCX_TASK_STATE_SHIFT __weak;
+#define SCX_TASK_STATE_SHIFT __SCX_TASK_STATE_SHIFT
+
+const volatile u64 __SCX_TASK_STATE_BITS __weak;
+#define SCX_TASK_STATE_BITS __SCX_TASK_STATE_BITS
+
+const volatile u64 __SCX_TASK_STATE_MASK __weak;
+#define SCX_TASK_STATE_MASK __SCX_TASK_STATE_MASK
+
+const volatile u64 __SCX_TASK_CURSOR __weak;
+#define SCX_TASK_CURSOR __SCX_TASK_CURSOR
+
+const volatile u64 __SCX_TASK_NONE __weak;
+#define SCX_TASK_NONE __SCX_TASK_NONE
+
+const volatile u64 __SCX_TASK_INIT __weak;
+#define SCX_TASK_INIT __SCX_TASK_INIT
+
+const volatile u64 __SCX_TASK_READY __weak;
+#define SCX_TASK_READY __SCX_TASK_READY
+
+const volatile u64 __SCX_TASK_ENABLED __weak;
+#define SCX_TASK_ENABLED __SCX_TASK_ENABLED
+
+const volatile u64 __SCX_TASK_NR_STATES __weak;
+#define SCX_TASK_NR_STATES __SCX_TASK_NR_STATES
+
+const volatile u64 __SCX_TASK_DSQ_ON_PRIQ __weak;
+#define SCX_TASK_DSQ_ON_PRIQ __SCX_TASK_DSQ_ON_PRIQ
+
+const volatile u64 __SCX_KICK_IDLE __weak;
+#define SCX_KICK_IDLE __SCX_KICK_IDLE
+
+const volatile u64 __SCX_KICK_PREEMPT __weak;
+#define SCX_KICK_PREEMPT __SCX_KICK_PREEMPT
+
+const volatile u64 __SCX_KICK_WAIT __weak;
+#define SCX_KICK_WAIT __SCX_KICK_WAIT
+
+const volatile u64 __SCX_ENQ_WAKEUP __weak;
+#define SCX_ENQ_WAKEUP __SCX_ENQ_WAKEUP
+
+const volatile u64 __SCX_ENQ_HEAD __weak;
+#define SCX_ENQ_HEAD __SCX_ENQ_HEAD
+
+const volatile u64 __SCX_ENQ_PREEMPT __weak;
+#define SCX_ENQ_PREEMPT __SCX_ENQ_PREEMPT
+
+const volatile u64 __SCX_ENQ_REENQ __weak;
+#define SCX_ENQ_REENQ __SCX_ENQ_REENQ
+
+const volatile u64 __SCX_ENQ_LAST __weak;
+#define SCX_ENQ_LAST __SCX_ENQ_LAST
+
+const volatile u64 __SCX_ENQ_CLEAR_OPSS __weak;
+#define SCX_ENQ_CLEAR_OPSS __SCX_ENQ_CLEAR_OPSS
+
+const volatile u64 __SCX_ENQ_DSQ_PRIQ __weak;
+#define SCX_ENQ_DSQ_PRIQ __SCX_ENQ_DSQ_PRIQ
+
diff --git a/tools/sched_ext/include/scx/enums.autogen.h b/tools/sched_ext/include/scx/enums.autogen.h
new file mode 100644
index 000000000000..88137a140e72
--- /dev/null
+++ b/tools/sched_ext/include/scx/enums.autogen.h
@@ -0,0 +1,41 @@
+/*
+ * WARNING: This file is autogenerated from scripts/gen_enums.py. If you would
+ * like to access an enum that is currently missing, add it to the script
+ * and run it from the root directory to update this file.
+ */
+
+#define SCX_ENUM_INIT(skel) do { \
+ SCX_ENUM_SET(skel, scx_public_consts, SCX_OPS_NAME_LEN); \
+ SCX_ENUM_SET(skel, scx_public_consts, SCX_SLICE_DFL); \
+ SCX_ENUM_SET(skel, scx_public_consts, SCX_SLICE_INF); \
+ SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_FLAG_BUILTIN); \
+ SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_FLAG_LOCAL_ON); \
+ SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_INVALID); \
+ SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_GLOBAL); \
+ SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_LOCAL); \
+ SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_LOCAL_ON); \
+ SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_LOCAL_CPU_MASK); \
+ SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_QUEUED); \
+ SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_RESET_RUNNABLE_AT); \
+ SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_DEQD_FOR_SLEEP); \
+ SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_STATE_SHIFT); \
+ SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_STATE_BITS); \
+ SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_STATE_MASK); \
+ SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_CURSOR); \
+ SCX_ENUM_SET(skel, scx_task_state, SCX_TASK_NONE); \
+ SCX_ENUM_SET(skel, scx_task_state, SCX_TASK_INIT); \
+ SCX_ENUM_SET(skel, scx_task_state, SCX_TASK_READY); \
+ SCX_ENUM_SET(skel, scx_task_state, SCX_TASK_ENABLED); \
+ SCX_ENUM_SET(skel, scx_task_state, SCX_TASK_NR_STATES); \
+ SCX_ENUM_SET(skel, scx_ent_dsq_flags, SCX_TASK_DSQ_ON_PRIQ); \
+ SCX_ENUM_SET(skel, scx_kick_flags, SCX_KICK_IDLE); \
+ SCX_ENUM_SET(skel, scx_kick_flags, SCX_KICK_PREEMPT); \
+ SCX_ENUM_SET(skel, scx_kick_flags, SCX_KICK_WAIT); \
+ SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_WAKEUP); \
+ SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_HEAD); \
+ SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_PREEMPT); \
+ SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_REENQ); \
+ SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_LAST); \
+ SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_CLEAR_OPSS); \
+ SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_DSQ_PRIQ); \
+} while (0)
diff --git a/tools/sched_ext/include/scx/enums.bpf.h b/tools/sched_ext/include/scx/enums.bpf.h
new file mode 100644
index 000000000000..af704c5d6334
--- /dev/null
+++ b/tools/sched_ext/include/scx/enums.bpf.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Convenience macros for getting/setting struct scx_enums instances.
+ *
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ */
+#ifndef __SCX_ENUMS_BPF_H
+#define __SCX_ENUMS_BPF_H
+
+#include "enums.autogen.bpf.h"
+
+#endif /* __SCX_ENUMS_BPF_H */
diff --git a/tools/sched_ext/include/scx/enums.h b/tools/sched_ext/include/scx/enums.h
new file mode 100644
index 000000000000..34cbebe974b7
--- /dev/null
+++ b/tools/sched_ext/include/scx/enums.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Define struct scx_enums that stores the load-time values of enums
+ * used by the BPF program.
+ *
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ */
+
+#ifndef __SCX_ENUMS_H
+#define __SCX_ENUMS_H
+
+static inline void __ENUM_set(u64 *val, char *type, char *name)
+{
+ bool res;
+
+ res = __COMPAT_read_enum(type, name, val);
+ SCX_BUG_ON(!res, "enum not found(%s)", name);
+}
+
+#define SCX_ENUM_SET(skel, type, name) do { \
+ __ENUM_set(&skel->rodata->__##name, #type, #name); \
+ } while (0)
+
+
+#include "enums.autogen.h"
+
+#endif /* __SCX_ENUMS_H */
diff --git a/tools/sched_ext/include/scx/user_exit_info.h b/tools/sched_ext/include/scx/user_exit_info.h
index 8ce2734402e1..66f856640ee7 100644
--- a/tools/sched_ext/include/scx/user_exit_info.h
+++ b/tools/sched_ext/include/scx/user_exit_info.h
@@ -10,6 +10,11 @@
#ifndef __USER_EXIT_INFO_H
#define __USER_EXIT_INFO_H
+#ifdef LSP
+#define __bpf__
+#include "../vmlinux.h"
+#endif
+
enum uei_sizes {
UEI_REASON_LEN = 128,
UEI_MSG_LEN = 1024,
@@ -25,9 +30,7 @@ struct user_exit_info {
#ifdef __bpf__
-#ifdef LSP
-#include "../vmlinux/vmlinux.h"
-#else
+#ifndef LSP
#include "vmlinux.h"
#endif
#include <bpf/bpf_core_read.h>
diff --git a/tools/sched_ext/scx_central.bpf.c b/tools/sched_ext/scx_central.bpf.c
index e6fad6211f6c..50bc1737c167 100644
--- a/tools/sched_ext/scx_central.bpf.c
+++ b/tools/sched_ext/scx_central.bpf.c
@@ -57,7 +57,7 @@ enum {
const volatile s32 central_cpu;
const volatile u32 nr_cpu_ids = 1; /* !0 for veristat, set during init */
-const volatile u64 slice_ns = SCX_SLICE_DFL;
+const volatile u64 slice_ns;
bool timer_pinned = true;
u64 nr_total, nr_locals, nr_queued, nr_lost_pids;
@@ -87,11 +87,6 @@ struct {
__type(value, struct central_timer);
} central_timer SEC(".maps");
-static bool vtime_before(u64 a, u64 b)
-{
- return (s64)(a - b) < 0;
-}
-
s32 BPF_STRUCT_OPS(central_select_cpu, struct task_struct *p,
s32 prev_cpu, u64 wake_flags)
{
@@ -245,7 +240,7 @@ void BPF_STRUCT_OPS(central_running, struct task_struct *p)
s32 cpu = scx_bpf_task_cpu(p);
u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
if (started_at)
- *started_at = bpf_ktime_get_ns() ?: 1; /* 0 indicates idle */
+ *started_at = scx_bpf_now() ?: 1; /* 0 indicates idle */
}
void BPF_STRUCT_OPS(central_stopping, struct task_struct *p, bool runnable)
@@ -258,7 +253,7 @@ void BPF_STRUCT_OPS(central_stopping, struct task_struct *p, bool runnable)
static int central_timerfn(void *map, int *key, struct bpf_timer *timer)
{
- u64 now = bpf_ktime_get_ns();
+ u64 now = scx_bpf_now();
u64 nr_to_kick = nr_queued;
s32 i, curr_cpu;
@@ -279,7 +274,7 @@ static int central_timerfn(void *map, int *key, struct bpf_timer *timer)
/* kick iff the current one exhausted its slice */
started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
if (started_at && *started_at &&
- vtime_before(now, *started_at + slice_ns))
+ time_before(now, *started_at + slice_ns))
continue;
/* and there's something pending */
diff --git a/tools/sched_ext/scx_central.c b/tools/sched_ext/scx_central.c
index e938156ed0a0..1e9f74525d8f 100644
--- a/tools/sched_ext/scx_central.c
+++ b/tools/sched_ext/scx_central.c
@@ -58,6 +58,7 @@ restart:
skel->rodata->central_cpu = 0;
skel->rodata->nr_cpu_ids = libbpf_num_possible_cpus();
+ skel->rodata->slice_ns = __COMPAT_ENUM_OR_ZERO("scx_public_consts", "SCX_SLICE_DFL");
while ((opt = getopt(argc, argv, "s:c:pvh")) != -1) {
switch (opt) {
diff --git a/tools/sched_ext/scx_flatcg.bpf.c b/tools/sched_ext/scx_flatcg.bpf.c
index 4e3afcd260bf..2c720e3ecad5 100644
--- a/tools/sched_ext/scx_flatcg.bpf.c
+++ b/tools/sched_ext/scx_flatcg.bpf.c
@@ -57,7 +57,7 @@ enum {
char _license[] SEC("license") = "GPL";
const volatile u32 nr_cpus = 32; /* !0 for veristat, set during init */
-const volatile u64 cgrp_slice_ns = SCX_SLICE_DFL;
+const volatile u64 cgrp_slice_ns;
const volatile bool fifo_sched;
u64 cvtime_now;
@@ -137,11 +137,6 @@ static u64 div_round_up(u64 dividend, u64 divisor)
return (dividend + divisor - 1) / divisor;
}
-static bool vtime_before(u64 a, u64 b)
-{
- return (s64)(a - b) < 0;
-}
-
static bool cgv_node_less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
{
struct cgv_node *cgc_a, *cgc_b;
@@ -271,7 +266,7 @@ static void cgrp_cap_budget(struct cgv_node *cgv_node, struct fcg_cgrp_ctx *cgc)
*/
max_budget = (cgrp_slice_ns * nr_cpus * cgc->hweight) /
(2 * FCG_HWEIGHT_ONE);
- if (vtime_before(cvtime, cvtime_now - max_budget))
+ if (time_before(cvtime, cvtime_now - max_budget))
cvtime = cvtime_now - max_budget;
cgv_node->cvtime = cvtime;
@@ -401,7 +396,7 @@ void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags)
* Limit the amount of budget that an idling task can accumulate
* to one slice.
*/
- if (vtime_before(tvtime, cgc->tvtime_now - SCX_SLICE_DFL))
+ if (time_before(tvtime, cgc->tvtime_now - SCX_SLICE_DFL))
tvtime = cgc->tvtime_now - SCX_SLICE_DFL;
scx_bpf_dsq_insert_vtime(p, cgrp->kn->id, SCX_SLICE_DFL,
@@ -535,7 +530,7 @@ void BPF_STRUCT_OPS(fcg_running, struct task_struct *p)
* from multiple CPUs and thus racy. Any error should be
* contained and temporary. Let's just live with it.
*/
- if (vtime_before(cgc->tvtime_now, p->scx.dsq_vtime))
+ if (time_before(cgc->tvtime_now, p->scx.dsq_vtime))
cgc->tvtime_now = p->scx.dsq_vtime;
}
bpf_cgroup_release(cgrp);
@@ -645,7 +640,7 @@ static bool try_pick_next_cgroup(u64 *cgidp)
cgv_node = container_of(rb_node, struct cgv_node, rb_node);
cgid = cgv_node->cgid;
- if (vtime_before(cvtime_now, cgv_node->cvtime))
+ if (time_before(cvtime_now, cgv_node->cvtime))
cvtime_now = cgv_node->cvtime;
/*
@@ -734,7 +729,7 @@ void BPF_STRUCT_OPS(fcg_dispatch, s32 cpu, struct task_struct *prev)
struct fcg_cpu_ctx *cpuc;
struct fcg_cgrp_ctx *cgc;
struct cgroup *cgrp;
- u64 now = bpf_ktime_get_ns();
+ u64 now = scx_bpf_now();
bool picked_next = false;
cpuc = find_cpu_ctx();
@@ -744,7 +739,7 @@ void BPF_STRUCT_OPS(fcg_dispatch, s32 cpu, struct task_struct *prev)
if (!cpuc->cur_cgid)
goto pick_next_cgroup;
- if (vtime_before(now, cpuc->cur_at + cgrp_slice_ns)) {
+ if (time_before(now, cpuc->cur_at + cgrp_slice_ns)) {
if (scx_bpf_dsq_move_to_local(cpuc->cur_cgid)) {
stat_inc(FCG_STAT_CNS_KEEP);
return;
@@ -920,14 +915,14 @@ void BPF_STRUCT_OPS(fcg_cgroup_move, struct task_struct *p,
struct cgroup *from, struct cgroup *to)
{
struct fcg_cgrp_ctx *from_cgc, *to_cgc;
- s64 vtime_delta;
+ s64 delta;
/* find_cgrp_ctx() triggers scx_ops_error() on lookup failures */
if (!(from_cgc = find_cgrp_ctx(from)) || !(to_cgc = find_cgrp_ctx(to)))
return;
- vtime_delta = p->scx.dsq_vtime - from_cgc->tvtime_now;
- p->scx.dsq_vtime = to_cgc->tvtime_now + vtime_delta;
+ delta = time_delta(p->scx.dsq_vtime, from_cgc->tvtime_now);
+ p->scx.dsq_vtime = to_cgc->tvtime_now + delta;
}
s32 BPF_STRUCT_OPS_SLEEPABLE(fcg_init)
diff --git a/tools/sched_ext/scx_flatcg.c b/tools/sched_ext/scx_flatcg.c
index 5d24ca9c29d9..6dd423eeb4ff 100644
--- a/tools/sched_ext/scx_flatcg.c
+++ b/tools/sched_ext/scx_flatcg.c
@@ -137,6 +137,7 @@ restart:
skel = SCX_OPS_OPEN(flatcg_ops, scx_flatcg);
skel->rodata->nr_cpus = libbpf_num_possible_cpus();
+ skel->rodata->cgrp_slice_ns = __COMPAT_ENUM_OR_ZERO("scx_public_consts", "SCX_SLICE_DFL");
while ((opt = getopt(argc, argv, "s:i:dfvh")) != -1) {
double v;
diff --git a/tools/sched_ext/scx_qmap.bpf.c b/tools/sched_ext/scx_qmap.bpf.c
index ee264947e0c3..3a20bb0c014a 100644
--- a/tools/sched_ext/scx_qmap.bpf.c
+++ b/tools/sched_ext/scx_qmap.bpf.c
@@ -33,7 +33,7 @@ enum consts {
char _license[] SEC("license") = "GPL";
-const volatile u64 slice_ns = SCX_SLICE_DFL;
+const volatile u64 slice_ns;
const volatile u32 stall_user_nth;
const volatile u32 stall_kernel_nth;
const volatile u32 dsp_inf_loop_after;
diff --git a/tools/sched_ext/scx_qmap.c b/tools/sched_ext/scx_qmap.c
index ac45a02b4055..c4912ab2e76f 100644
--- a/tools/sched_ext/scx_qmap.c
+++ b/tools/sched_ext/scx_qmap.c
@@ -64,6 +64,8 @@ int main(int argc, char **argv)
skel = SCX_OPS_OPEN(qmap_ops, scx_qmap);
+ skel->rodata->slice_ns = __COMPAT_ENUM_OR_ZERO("scx_public_consts", "SCX_SLICE_DFL");
+
while ((opt = getopt(argc, argv, "s:e:t:T:l:b:PHd:D:Spvh")) != -1) {
switch (opt) {
case 's':
diff --git a/tools/sched_ext/scx_simple.bpf.c b/tools/sched_ext/scx_simple.bpf.c
index 31f915b286c6..e6de99dba7db 100644
--- a/tools/sched_ext/scx_simple.bpf.c
+++ b/tools/sched_ext/scx_simple.bpf.c
@@ -52,11 +52,6 @@ static void stat_inc(u32 idx)
(*cnt_p)++;
}
-static inline bool vtime_before(u64 a, u64 b)
-{
- return (s64)(a - b) < 0;
-}
-
s32 BPF_STRUCT_OPS(simple_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags)
{
bool is_idle = false;
@@ -84,7 +79,7 @@ void BPF_STRUCT_OPS(simple_enqueue, struct task_struct *p, u64 enq_flags)
* Limit the amount of budget that an idling task can accumulate
* to one slice.
*/
- if (vtime_before(vtime, vtime_now - SCX_SLICE_DFL))
+ if (time_before(vtime, vtime_now - SCX_SLICE_DFL))
vtime = vtime_now - SCX_SLICE_DFL;
scx_bpf_dsq_insert_vtime(p, SHARED_DSQ, SCX_SLICE_DFL, vtime,
@@ -108,7 +103,7 @@ void BPF_STRUCT_OPS(simple_running, struct task_struct *p)
* thus racy. Any error should be contained and temporary. Let's just
* live with it.
*/
- if (vtime_before(vtime_now, p->scx.dsq_vtime))
+ if (time_before(vtime_now, p->scx.dsq_vtime))
vtime_now = p->scx.dsq_vtime;
}
diff --git a/tools/scripts/syscall.tbl b/tools/scripts/syscall.tbl
new file mode 100644
index 000000000000..ebbdb3c42e9f
--- /dev/null
+++ b/tools/scripts/syscall.tbl
@@ -0,0 +1,409 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# This file contains the system call numbers for all of the
+# more recently added architectures.
+#
+# As a basic principle, no duplication of functionality
+# should be added, e.g. we don't use lseek when llseek
+# is present. New architectures should use this file
+# and implement the less feature-full calls in user space.
+#
+0 common io_setup sys_io_setup compat_sys_io_setup
+1 common io_destroy sys_io_destroy
+2 common io_submit sys_io_submit compat_sys_io_submit
+3 common io_cancel sys_io_cancel
+4 time32 io_getevents sys_io_getevents_time32
+4 64 io_getevents sys_io_getevents
+5 common setxattr sys_setxattr
+6 common lsetxattr sys_lsetxattr
+7 common fsetxattr sys_fsetxattr
+8 common getxattr sys_getxattr
+9 common lgetxattr sys_lgetxattr
+10 common fgetxattr sys_fgetxattr
+11 common listxattr sys_listxattr
+12 common llistxattr sys_llistxattr
+13 common flistxattr sys_flistxattr
+14 common removexattr sys_removexattr
+15 common lremovexattr sys_lremovexattr
+16 common fremovexattr sys_fremovexattr
+17 common getcwd sys_getcwd
+18 common lookup_dcookie sys_ni_syscall
+19 common eventfd2 sys_eventfd2
+20 common epoll_create1 sys_epoll_create1
+21 common epoll_ctl sys_epoll_ctl
+22 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
+23 common dup sys_dup
+24 common dup3 sys_dup3
+25 32 fcntl64 sys_fcntl64 compat_sys_fcntl64
+25 64 fcntl sys_fcntl
+26 common inotify_init1 sys_inotify_init1
+27 common inotify_add_watch sys_inotify_add_watch
+28 common inotify_rm_watch sys_inotify_rm_watch
+29 common ioctl sys_ioctl compat_sys_ioctl
+30 common ioprio_set sys_ioprio_set
+31 common ioprio_get sys_ioprio_get
+32 common flock sys_flock
+33 common mknodat sys_mknodat
+34 common mkdirat sys_mkdirat
+35 common unlinkat sys_unlinkat
+36 common symlinkat sys_symlinkat
+37 common linkat sys_linkat
+# renameat is superseded with flags by renameat2
+38 renameat renameat sys_renameat
+39 common umount2 sys_umount
+40 common mount sys_mount
+41 common pivot_root sys_pivot_root
+42 common nfsservctl sys_ni_syscall
+43 32 statfs64 sys_statfs64 compat_sys_statfs64
+43 64 statfs sys_statfs
+44 32 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
+44 64 fstatfs sys_fstatfs
+45 32 truncate64 sys_truncate64 compat_sys_truncate64
+45 64 truncate sys_truncate
+46 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64
+46 64 ftruncate sys_ftruncate
+47 common fallocate sys_fallocate compat_sys_fallocate
+48 common faccessat sys_faccessat
+49 common chdir sys_chdir
+50 common fchdir sys_fchdir
+51 common chroot sys_chroot
+52 common fchmod sys_fchmod
+53 common fchmodat sys_fchmodat
+54 common fchownat sys_fchownat
+55 common fchown sys_fchown
+56 common openat sys_openat
+57 common close sys_close
+58 common vhangup sys_vhangup
+59 common pipe2 sys_pipe2
+60 common quotactl sys_quotactl
+61 common getdents64 sys_getdents64
+62 32 llseek sys_llseek
+62 64 lseek sys_lseek
+63 common read sys_read
+64 common write sys_write
+65 common readv sys_readv sys_readv
+66 common writev sys_writev sys_writev
+67 common pread64 sys_pread64 compat_sys_pread64
+68 common pwrite64 sys_pwrite64 compat_sys_pwrite64
+69 common preadv sys_preadv compat_sys_preadv
+70 common pwritev sys_pwritev compat_sys_pwritev
+71 32 sendfile64 sys_sendfile64
+71 64 sendfile sys_sendfile64
+72 time32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32
+72 64 pselect6 sys_pselect6
+73 time32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32
+73 64 ppoll sys_ppoll
+74 common signalfd4 sys_signalfd4 compat_sys_signalfd4
+75 common vmsplice sys_vmsplice
+76 common splice sys_splice
+77 common tee sys_tee
+78 common readlinkat sys_readlinkat
+79 stat64 fstatat64 sys_fstatat64
+79 64 newfstatat sys_newfstatat
+80 stat64 fstat64 sys_fstat64
+80 64 fstat sys_newfstat
+81 common sync sys_sync
+82 common fsync sys_fsync
+83 common fdatasync sys_fdatasync
+84 common sync_file_range sys_sync_file_range compat_sys_sync_file_range
+85 common timerfd_create sys_timerfd_create
+86 time32 timerfd_settime sys_timerfd_settime32
+86 64 timerfd_settime sys_timerfd_settime
+87 time32 timerfd_gettime sys_timerfd_gettime32
+87 64 timerfd_gettime sys_timerfd_gettime
+88 time32 utimensat sys_utimensat_time32
+88 64 utimensat sys_utimensat
+89 common acct sys_acct
+90 common capget sys_capget
+91 common capset sys_capset
+92 common personality sys_personality
+93 common exit sys_exit
+94 common exit_group sys_exit_group
+95 common waitid sys_waitid compat_sys_waitid
+96 common set_tid_address sys_set_tid_address
+97 common unshare sys_unshare
+98 time32 futex sys_futex_time32
+98 64 futex sys_futex
+99 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
+100 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
+101 time32 nanosleep sys_nanosleep_time32
+101 64 nanosleep sys_nanosleep
+102 common getitimer sys_getitimer compat_sys_getitimer
+103 common setitimer sys_setitimer compat_sys_setitimer
+104 common kexec_load sys_kexec_load compat_sys_kexec_load
+105 common init_module sys_init_module
+106 common delete_module sys_delete_module
+107 common timer_create sys_timer_create compat_sys_timer_create
+108 time32 timer_gettime sys_timer_gettime32
+108 64 timer_gettime sys_timer_gettime
+109 common timer_getoverrun sys_timer_getoverrun
+110 time32 timer_settime sys_timer_settime32
+110 64 timer_settime sys_timer_settime
+111 common timer_delete sys_timer_delete
+112 time32 clock_settime sys_clock_settime32
+112 64 clock_settime sys_clock_settime
+113 time32 clock_gettime sys_clock_gettime32
+113 64 clock_gettime sys_clock_gettime
+114 time32 clock_getres sys_clock_getres_time32
+114 64 clock_getres sys_clock_getres
+115 time32 clock_nanosleep sys_clock_nanosleep_time32
+115 64 clock_nanosleep sys_clock_nanosleep
+116 common syslog sys_syslog
+117 common ptrace sys_ptrace compat_sys_ptrace
+118 common sched_setparam sys_sched_setparam
+119 common sched_setscheduler sys_sched_setscheduler
+120 common sched_getscheduler sys_sched_getscheduler
+121 common sched_getparam sys_sched_getparam
+122 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
+123 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
+124 common sched_yield sys_sched_yield
+125 common sched_get_priority_max sys_sched_get_priority_max
+126 common sched_get_priority_min sys_sched_get_priority_min
+127 time32 sched_rr_get_interval sys_sched_rr_get_interval_time32
+127 64 sched_rr_get_interval sys_sched_rr_get_interval
+128 common restart_syscall sys_restart_syscall
+129 common kill sys_kill
+130 common tkill sys_tkill
+131 common tgkill sys_tgkill
+132 common sigaltstack sys_sigaltstack compat_sys_sigaltstack
+133 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
+134 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
+135 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
+136 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
+137 time32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32
+137 64 rt_sigtimedwait sys_rt_sigtimedwait
+138 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+139 common rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn
+140 common setpriority sys_setpriority
+141 common getpriority sys_getpriority
+142 common reboot sys_reboot
+143 common setregid sys_setregid
+144 common setgid sys_setgid
+145 common setreuid sys_setreuid
+146 common setuid sys_setuid
+147 common setresuid sys_setresuid
+148 common getresuid sys_getresuid
+149 common setresgid sys_setresgid
+150 common getresgid sys_getresgid
+151 common setfsuid sys_setfsuid
+152 common setfsgid sys_setfsgid
+153 common times sys_times compat_sys_times
+154 common setpgid sys_setpgid
+155 common getpgid sys_getpgid
+156 common getsid sys_getsid
+157 common setsid sys_setsid
+158 common getgroups sys_getgroups
+159 common setgroups sys_setgroups
+160 common uname sys_newuname
+161 common sethostname sys_sethostname
+162 common setdomainname sys_setdomainname
+# getrlimit and setrlimit are superseded with prlimit64
+163 rlimit getrlimit sys_getrlimit compat_sys_getrlimit
+164 rlimit setrlimit sys_setrlimit compat_sys_setrlimit
+165 common getrusage sys_getrusage compat_sys_getrusage
+166 common umask sys_umask
+167 common prctl sys_prctl
+168 common getcpu sys_getcpu
+169 time32 gettimeofday sys_gettimeofday compat_sys_gettimeofday
+169 64 gettimeofday sys_gettimeofday
+170 time32 settimeofday sys_settimeofday compat_sys_settimeofday
+170 64 settimeofday sys_settimeofday
+171 time32 adjtimex sys_adjtimex_time32
+171 64 adjtimex sys_adjtimex
+172 common getpid sys_getpid
+173 common getppid sys_getppid
+174 common getuid sys_getuid
+175 common geteuid sys_geteuid
+176 common getgid sys_getgid
+177 common getegid sys_getegid
+178 common gettid sys_gettid
+179 common sysinfo sys_sysinfo compat_sys_sysinfo
+180 common mq_open sys_mq_open compat_sys_mq_open
+181 common mq_unlink sys_mq_unlink
+182 time32 mq_timedsend sys_mq_timedsend_time32
+182 64 mq_timedsend sys_mq_timedsend
+183 time32 mq_timedreceive sys_mq_timedreceive_time32
+183 64 mq_timedreceive sys_mq_timedreceive
+184 common mq_notify sys_mq_notify compat_sys_mq_notify
+185 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
+186 common msgget sys_msgget
+187 common msgctl sys_msgctl compat_sys_msgctl
+188 common msgrcv sys_msgrcv compat_sys_msgrcv
+189 common msgsnd sys_msgsnd compat_sys_msgsnd
+190 common semget sys_semget
+191 common semctl sys_semctl compat_sys_semctl
+192 time32 semtimedop sys_semtimedop_time32
+192 64 semtimedop sys_semtimedop
+193 common semop sys_semop
+194 common shmget sys_shmget
+195 common shmctl sys_shmctl compat_sys_shmctl
+196 common shmat sys_shmat compat_sys_shmat
+197 common shmdt sys_shmdt
+198 common socket sys_socket
+199 common socketpair sys_socketpair
+200 common bind sys_bind
+201 common listen sys_listen
+202 common accept sys_accept
+203 common connect sys_connect
+204 common getsockname sys_getsockname
+205 common getpeername sys_getpeername
+206 common sendto sys_sendto
+207 common recvfrom sys_recvfrom compat_sys_recvfrom
+208 common setsockopt sys_setsockopt sys_setsockopt
+209 common getsockopt sys_getsockopt sys_getsockopt
+210 common shutdown sys_shutdown
+211 common sendmsg sys_sendmsg compat_sys_sendmsg
+212 common recvmsg sys_recvmsg compat_sys_recvmsg
+213 common readahead sys_readahead compat_sys_readahead
+214 common brk sys_brk
+215 common munmap sys_munmap
+216 common mremap sys_mremap
+217 common add_key sys_add_key
+218 common request_key sys_request_key
+219 common keyctl sys_keyctl compat_sys_keyctl
+220 common clone sys_clone
+221 common execve sys_execve compat_sys_execve
+222 32 mmap2 sys_mmap2
+222 64 mmap sys_mmap
+223 32 fadvise64_64 sys_fadvise64_64 compat_sys_fadvise64_64
+223 64 fadvise64 sys_fadvise64_64
+224 common swapon sys_swapon
+225 common swapoff sys_swapoff
+226 common mprotect sys_mprotect
+227 common msync sys_msync
+228 common mlock sys_mlock
+229 common munlock sys_munlock
+230 common mlockall sys_mlockall
+231 common munlockall sys_munlockall
+232 common mincore sys_mincore
+233 common madvise sys_madvise
+234 common remap_file_pages sys_remap_file_pages
+235 common mbind sys_mbind
+236 common get_mempolicy sys_get_mempolicy
+237 common set_mempolicy sys_set_mempolicy
+238 common migrate_pages sys_migrate_pages
+239 common move_pages sys_move_pages
+240 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+241 common perf_event_open sys_perf_event_open
+242 common accept4 sys_accept4
+243 time32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32
+243 64 recvmmsg sys_recvmmsg
+# Architectures may provide up to 16 syscalls of their own between 244 and 259
+244 arc cacheflush sys_cacheflush
+245 arc arc_settls sys_arc_settls
+246 arc arc_gettls sys_arc_gettls
+247 arc sysfs sys_sysfs
+248 arc arc_usr_cmpxchg sys_arc_usr_cmpxchg
+
+244 csky set_thread_area sys_set_thread_area
+245 csky cacheflush sys_cacheflush
+
+244 nios2 cacheflush sys_cacheflush
+
+244 or1k or1k_atomic sys_or1k_atomic
+
+258 riscv riscv_hwprobe sys_riscv_hwprobe
+259 riscv riscv_flush_icache sys_riscv_flush_icache
+
+260 time32 wait4 sys_wait4 compat_sys_wait4
+260 64 wait4 sys_wait4
+261 common prlimit64 sys_prlimit64
+262 common fanotify_init sys_fanotify_init
+263 common fanotify_mark sys_fanotify_mark
+264 common name_to_handle_at sys_name_to_handle_at
+265 common open_by_handle_at sys_open_by_handle_at
+266 time32 clock_adjtime sys_clock_adjtime32
+266 64 clock_adjtime sys_clock_adjtime
+267 common syncfs sys_syncfs
+268 common setns sys_setns
+269 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
+270 common process_vm_readv sys_process_vm_readv
+271 common process_vm_writev sys_process_vm_writev
+272 common kcmp sys_kcmp
+273 common finit_module sys_finit_module
+274 common sched_setattr sys_sched_setattr
+275 common sched_getattr sys_sched_getattr
+276 common renameat2 sys_renameat2
+277 common seccomp sys_seccomp
+278 common getrandom sys_getrandom
+279 common memfd_create sys_memfd_create
+280 common bpf sys_bpf
+281 common execveat sys_execveat compat_sys_execveat
+282 common userfaultfd sys_userfaultfd
+283 common membarrier sys_membarrier
+284 common mlock2 sys_mlock2
+285 common copy_file_range sys_copy_file_range
+286 common preadv2 sys_preadv2 compat_sys_preadv2
+287 common pwritev2 sys_pwritev2 compat_sys_pwritev2
+288 common pkey_mprotect sys_pkey_mprotect
+289 common pkey_alloc sys_pkey_alloc
+290 common pkey_free sys_pkey_free
+291 common statx sys_statx
+292 time32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents
+292 64 io_pgetevents sys_io_pgetevents
+293 common rseq sys_rseq
+294 common kexec_file_load sys_kexec_file_load
+# 295 through 402 are unassigned to sync up with generic numbers don't use
+403 32 clock_gettime64 sys_clock_gettime
+404 32 clock_settime64 sys_clock_settime
+405 32 clock_adjtime64 sys_clock_adjtime
+406 32 clock_getres_time64 sys_clock_getres
+407 32 clock_nanosleep_time64 sys_clock_nanosleep
+408 32 timer_gettime64 sys_timer_gettime
+409 32 timer_settime64 sys_timer_settime
+410 32 timerfd_gettime64 sys_timerfd_gettime
+411 32 timerfd_settime64 sys_timerfd_settime
+412 32 utimensat_time64 sys_utimensat
+413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
+414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
+416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
+417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
+418 32 mq_timedsend_time64 sys_mq_timedsend
+419 32 mq_timedreceive_time64 sys_mq_timedreceive
+420 32 semtimedop_time64 sys_semtimedop
+421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
+422 32 futex_time64 sys_futex
+423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
+434 common pidfd_open sys_pidfd_open
+435 common clone3 sys_clone3
+436 common close_range sys_close_range
+437 common openat2 sys_openat2
+438 common pidfd_getfd sys_pidfd_getfd
+439 common faccessat2 sys_faccessat2
+440 common process_madvise sys_process_madvise
+441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
+447 memfd_secret memfd_secret sys_memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
+463 common setxattrat sys_setxattrat
+464 common getxattrat sys_getxattrat
+465 common listxattrat sys_listxattrat
+466 common removexattrat sys_removexattrat
diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
index d0337c11f9ee..cc8948f49117 100644
--- a/tools/testing/cxl/test/cxl.c
+++ b/tools/testing/cxl/test/cxl.c
@@ -725,7 +725,7 @@ static void default_mock_decoder(struct cxl_decoder *cxld)
cxld->reset = mock_decoder_reset;
}
-static int first_decoder(struct device *dev, void *data)
+static int first_decoder(struct device *dev, const void *data)
{
struct cxl_decoder *cxld;
diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
index 347c1e7b37bd..8d731bd63988 100644
--- a/tools/testing/cxl/test/mem.c
+++ b/tools/testing/cxl/test/mem.c
@@ -401,6 +401,10 @@ struct cxl_test_gen_media gen_media = {
.channel = 1,
.rank = 30,
},
+ .component_id = { 0x3, 0x74, 0xc5, 0x8, 0x9a, 0x1a, 0xb, 0xfc, 0xd2, 0x7e, 0x2f, 0x31, 0x9b, 0x3c, 0x81, 0x4d },
+ .cme_threshold_ev_flags = 3,
+ .cme_count = { 33, 0, 0 },
+ .sub_type = 0x2,
},
};
@@ -429,6 +433,11 @@ struct cxl_test_dram dram = {
.bank_group = 5,
.bank = 2,
.column = {0xDE, 0xAD},
+ .component_id = { 0x1, 0x74, 0xc5, 0x8, 0x9a, 0x1a, 0xb, 0xfc, 0xd2, 0x7e, 0x2f, 0x31, 0x9b, 0x3c, 0x81, 0x4d },
+ .sub_channel = 8,
+ .cme_threshold_ev_flags = 2,
+ .cvme_count = { 14, 0, 0 },
+ .sub_type = 0x5,
},
};
@@ -456,7 +465,10 @@ struct cxl_test_mem_module mem_module = {
.dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef },
.cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
.cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
- }
+ },
+ /* .validity_flags = <set below> */
+ .component_id = { 0x2, 0x74, 0xc5, 0x8, 0x9a, 0x1a, 0xb, 0xfc, 0xd2, 0x7e, 0x2f, 0x31, 0x9b, 0x3c, 0x81, 0x4d },
+ .event_sub_type = 0x3,
},
};
@@ -478,13 +490,18 @@ static int mock_set_timestamp(struct cxl_dev_state *cxlds,
static void cxl_mock_add_event_logs(struct mock_event_store *mes)
{
- put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK,
+ put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK |
+ CXL_GMER_VALID_COMPONENT | CXL_GMER_VALID_COMPONENT_ID_FORMAT,
&gen_media.rec.media_hdr.validity_flags);
put_unaligned_le16(CXL_DER_VALID_CHANNEL | CXL_DER_VALID_BANK_GROUP |
- CXL_DER_VALID_BANK | CXL_DER_VALID_COLUMN,
+ CXL_DER_VALID_BANK | CXL_DER_VALID_COLUMN | CXL_DER_VALID_SUB_CHANNEL |
+ CXL_DER_VALID_COMPONENT | CXL_DER_VALID_COMPONENT_ID_FORMAT,
&dram.rec.media_hdr.validity_flags);
+ put_unaligned_le16(CXL_MMER_VALID_COMPONENT | CXL_MMER_VALID_COMPONENT_ID_FORMAT,
+ &mem_module.rec.validity_flags);
+
mes_add_event(mes, CXL_EVENT_TYPE_INFO, &maint_needed);
mes_add_event(mes, CXL_EVENT_TYPE_INFO,
(struct cxl_event_record_raw *)&gen_media);
diff --git a/tools/testing/cxl/test/mock.c b/tools/testing/cxl/test/mock.c
index 450c7566c33f..af2594e4f35d 100644
--- a/tools/testing/cxl/test/mock.c
+++ b/tools/testing/cxl/test/mock.c
@@ -228,16 +228,16 @@ int __wrap_cxl_hdm_decode_init(struct cxl_dev_state *cxlds,
}
EXPORT_SYMBOL_NS_GPL(__wrap_cxl_hdm_decode_init, "CXL");
-int __wrap_cxl_dvsec_rr_decode(struct device *dev, struct cxl_port *port,
+int __wrap_cxl_dvsec_rr_decode(struct cxl_dev_state *cxlds,
struct cxl_endpoint_dvsec_info *info)
{
int rc = 0, index;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
- if (ops && ops->is_mock_dev(dev))
+ if (ops && ops->is_mock_dev(cxlds->dev))
rc = 0;
else
- rc = cxl_dvsec_rr_decode(dev, port, info);
+ rc = cxl_dvsec_rr_decode(cxlds, info);
put_cxl_mock_ops(index);
return rc;
diff --git a/tools/testing/ktest/examples/include/defaults.conf b/tools/testing/ktest/examples/include/defaults.conf
index 63a1a83f4f0b..f6d8517a471e 100644
--- a/tools/testing/ktest/examples/include/defaults.conf
+++ b/tools/testing/ktest/examples/include/defaults.conf
@@ -46,7 +46,7 @@ CLEAR_LOG = 1
SSH_USER = root
-# For accesing the machine, we will ssh to root@machine.
+# For accessing the machine, we will ssh to root@machine.
SSH := ssh ${SSH_USER}@${MACHINE}
# Update this. The default here is ktest will ssh to the target box
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index dacad94e2be4..8c8da966c641 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -1245,7 +1245,7 @@ sub __read_config {
# Config variables are only active while reading the
# config and can be defined anywhere. They also ignore
# TEST_START and DEFAULTS, but are skipped if they are in
- # on of these sections that have SKIP defined.
+ # one of these sections that have SKIP defined.
# The save variable can be
# defined multiple times and the new one simply overrides
# the previous one.
@@ -2419,6 +2419,11 @@ sub get_version {
return if ($have_version);
doprint "$make kernelrelease ... ";
$version = `$make -s kernelrelease | tail -1`;
+ if (!length($version)) {
+ run_command "$make allnoconfig" or return 0;
+ doprint "$make kernelrelease ... ";
+ $version = `$make -s kernelrelease | tail -1`;
+ }
chomp($version);
doprint "$version\n";
$have_version = 1;
@@ -2960,8 +2965,6 @@ sub run_bisect_test {
my $failed = 0;
my $result;
- my $output;
- my $ret;
$in_bisect = 1;
diff --git a/tools/testing/kunit/configs/all_tests.config b/tools/testing/kunit/configs/all_tests.config
index b3b00269a52a..b0049be00c70 100644
--- a/tools/testing/kunit/configs/all_tests.config
+++ b/tools/testing/kunit/configs/all_tests.config
@@ -38,9 +38,6 @@ CONFIG_IWLWIFI=y
CONFIG_DAMON=y
CONFIG_DAMON_VADDR=y
CONFIG_DAMON_PADDR=y
-CONFIG_DEBUG_FS=y
-CONFIG_DAMON_DBGFS=y
-CONFIG_DAMON_DBGFS_DEPRECATED=y
CONFIG_REGMAP_BUILD=y
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index 676fa99a8b19..7f9ae55fd6d5 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -312,7 +312,16 @@ def massage_argv(argv: Sequence[str]) -> Sequence[str]:
return list(map(massage_arg, argv))
def get_default_jobs() -> int:
- return len(os.sched_getaffinity(0))
+ if sys.version_info >= (3, 13):
+ if (ncpu := os.process_cpu_count()) is not None:
+ return ncpu
+ raise RuntimeError("os.process_cpu_count() returned None")
+ # See https://github.com/python/cpython/blob/b61fece/Lib/os.py#L1175-L1186.
+ if sys.platform != "darwin":
+ return len(os.sched_getaffinity(0))
+ if (ncpu := os.cpu_count()) is not None:
+ return ncpu
+ raise RuntimeError("os.cpu_count() returned None")
def add_common_opts(parser: argparse.ArgumentParser) -> None:
parser.add_argument('--build_dir',
diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
index e76d7894b6c5..d30f90eae9a4 100644
--- a/tools/testing/kunit/kunit_kernel.py
+++ b/tools/testing/kunit/kunit_kernel.py
@@ -125,6 +125,9 @@ class LinuxSourceTreeOperationsQemu(LinuxSourceTreeOperations):
'-append', ' '.join(params + [self._kernel_command_line]),
'-no-reboot',
'-nographic',
+ '-accel', 'kvm',
+ '-accel', 'hvf',
+ '-accel', 'tcg',
'-serial', self._serial] + self._extra_qemu_params
# Note: shlex.join() does what we want, but requires python 3.8+.
print('Running tests with:\n$', ' '.join(shlex.quote(arg) for arg in qemu_command))
diff --git a/tools/testing/kunit/qemu_configs/arm64.py b/tools/testing/kunit/qemu_configs/arm64.py
index d3ff27024755..5c44d3a87e6d 100644
--- a/tools/testing/kunit/qemu_configs/arm64.py
+++ b/tools/testing/kunit/qemu_configs/arm64.py
@@ -9,4 +9,4 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y''',
qemu_arch='aarch64',
kernel_path='arch/arm64/boot/Image.gz',
kernel_command_line='console=ttyAMA0',
- extra_qemu_params=['-machine', 'virt', '-cpu', 'max,pauth-impdef=on'])
+ extra_qemu_params=['-machine', 'virt', '-cpu', 'max'])
diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c
index cffaf2245d4f..eaff1b036989 100644
--- a/tools/testing/radix-tree/multiorder.c
+++ b/tools/testing/radix-tree/multiorder.c
@@ -227,6 +227,7 @@ static void *load_creator(void *ptr)
unsigned long index = (3 << RADIX_TREE_MAP_SHIFT) -
(1 << order);
item_insert_order(tree, index, order);
+ xa_set_mark(tree, index, XA_MARK_1);
item_delete_rcu(tree, index);
}
}
@@ -242,8 +243,11 @@ static void *load_worker(void *ptr)
rcu_register_thread();
while (!stop_iteration) {
+ unsigned long find_index = (2 << RADIX_TREE_MAP_SHIFT) + 1;
struct item *item = xa_load(ptr, index);
assert(!xa_is_internal(item));
+ item = xa_find(ptr, &find_index, index, XA_MARK_1);
+ assert(!xa_is_internal(item));
}
rcu_unregister_thread();
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 2401e973c359..8daac70c2f9d 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -18,6 +18,7 @@ TARGETS += devices/error_logs
TARGETS += devices/probe
TARGETS += dmabuf-heaps
TARGETS += drivers/dma-buf
+TARGETS += drivers/ntsync
TARGETS += drivers/s390x/uvdevice
TARGETS += drivers/net
TARGETS += drivers/net/bonding
@@ -72,6 +73,7 @@ TARGETS += net/packetdrill
TARGETS += net/rds
TARGETS += net/tcp_ao
TARGETS += nsfs
+TARGETS += pci_endpoint
TARGETS += pcie_bwctrl
TARGETS += perf_events
TARGETS += pidfd
diff --git a/tools/testing/selftests/acct/acct_syscall.c b/tools/testing/selftests/acct/acct_syscall.c
index e44e8fe1f4a3..87c044fb9293 100644
--- a/tools/testing/selftests/acct/acct_syscall.c
+++ b/tools/testing/selftests/acct/acct_syscall.c
@@ -24,7 +24,7 @@ int main(void)
// Check if test is run a root
if (geteuid()) {
- ksft_test_result_skip("This test needs root to run!\n");
+ ksft_exit_skip("This test needs root to run!\n");
return 1;
}
diff --git a/tools/testing/selftests/arm64/fp/kernel-test.c b/tools/testing/selftests/arm64/fp/kernel-test.c
index 859345379044..348e8bef62c7 100644
--- a/tools/testing/selftests/arm64/fp/kernel-test.c
+++ b/tools/testing/selftests/arm64/fp/kernel-test.c
@@ -46,8 +46,7 @@ static void handle_kick_signal(int sig, siginfo_t *info, void *context)
}
static char *drivers[] = {
- "crct10dif-arm64-ce",
- /* "crct10dif-arm64-neon", - Same priority as generic */
+ "crct10dif-arm64",
"sha1-ce",
"sha224-arm64",
"sha224-arm64-neon",
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index e9c377001f93..e2a2c46c008b 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -18,7 +18,6 @@ feature
urandom_read
test_sockmap
test_lirc_mode2_user
-test_flow_dissector
flow_dissector_load
test_tcpnotify_user
test_libbpf
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 0a016cd71cba..87551628e112 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -41,7 +41,7 @@ srctree := $(patsubst %/,%,$(dir $(srctree)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
endif
-CFLAGS += -g $(OPT_FLAGS) -rdynamic \
+CFLAGS += -g $(OPT_FLAGS) -rdynamic -std=gnu11 \
-Wall -Werror -fno-omit-frame-pointer \
$(GENFLAGS) $(SAN_CFLAGS) $(LIBELF_CFLAGS) \
-I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \
@@ -54,21 +54,6 @@ PCAP_LIBS := $(shell $(PKG_CONFIG) --libs libpcap 2>/dev/null)
LDLIBS += $(PCAP_LIBS)
CFLAGS += $(PCAP_CFLAGS)
-# The following tests perform type punning and they may break strict
-# aliasing rules, which are exploited by both GCC and clang by default
-# while optimizing. This can lead to broken programs.
-progs/bind4_prog.c-CFLAGS := -fno-strict-aliasing
-progs/bind6_prog.c-CFLAGS := -fno-strict-aliasing
-progs/dynptr_fail.c-CFLAGS := -fno-strict-aliasing
-progs/linked_list_fail.c-CFLAGS := -fno-strict-aliasing
-progs/map_kptr_fail.c-CFLAGS := -fno-strict-aliasing
-progs/syscall.c-CFLAGS := -fno-strict-aliasing
-progs/test_pkt_md_access.c-CFLAGS := -fno-strict-aliasing
-progs/test_sk_lookup.c-CFLAGS := -fno-strict-aliasing
-progs/timer_crash.c-CFLAGS := -fno-strict-aliasing
-progs/test_global_func9.c-CFLAGS := -fno-strict-aliasing
-progs/verifier_nocsr.c-CFLAGS := -fno-strict-aliasing
-
# Some utility functions use LLVM libraries
jit_disasm_helpers.c-CFLAGS = $(LLVM_CFLAGS)
@@ -103,18 +88,6 @@ progs/btf_dump_test_case_packing.c-bpf_gcc-CFLAGS := -Wno-error
progs/btf_dump_test_case_padding.c-bpf_gcc-CFLAGS := -Wno-error
progs/btf_dump_test_case_syntax.c-bpf_gcc-CFLAGS := -Wno-error
-# The following tests do type-punning, via the __imm_insn macro, from
-# `struct bpf_insn' to long and then uses the value. This triggers an
-# "is used uninitialized" warning in GCC due to strict-aliasing
-# rules.
-progs/verifier_ref_tracking.c-bpf_gcc-CFLAGS := -fno-strict-aliasing
-progs/verifier_unpriv.c-bpf_gcc-CFLAGS := -fno-strict-aliasing
-progs/verifier_cgroup_storage.c-bpf_gcc-CFLAGS := -fno-strict-aliasing
-progs/verifier_ld_ind.c-bpf_gcc-CFLAGS := -fno-strict-aliasing
-progs/verifier_map_ret_val.c-bpf_gcc-CFLAGS := -fno-strict-aliasing
-progs/verifier_spill_fill.c-bpf_gcc-CFLAGS := -fno-strict-aliasing
-progs/verifier_subprog_precision.c-bpf_gcc-CFLAGS := -fno-strict-aliasing
-progs/verifier_uninit.c-bpf_gcc-CFLAGS := -fno-strict-aliasing
endif
ifneq ($(CLANG_CPUV4),)
@@ -127,12 +100,10 @@ TEST_FILES = xsk_prereqs.sh $(wildcard progs/btf_dump_test_case_*.c)
# Order correspond to 'make run_tests' order
TEST_PROGS := test_kmod.sh \
- test_xdp_redirect.sh \
test_xdp_redirect_multi.sh \
test_tunnel.sh \
test_lwt_seg6local.sh \
test_lirc_mode2.sh \
- test_flow_dissector.sh \
test_xdp_vlan_mode_generic.sh \
test_xdp_vlan_mode_native.sh \
test_lwt_ip_encap.sh \
@@ -150,17 +121,16 @@ TEST_PROGS_EXTENDED := with_addr.sh \
with_tunnels.sh ima_setup.sh verify_sig_setup.sh \
test_xdp_vlan.sh test_bpftool.py
+TEST_KMODS := bpf_testmod.ko bpf_test_no_cfi.ko bpf_test_modorder_x.ko \
+ bpf_test_modorder_y.ko
+TEST_KMOD_TARGETS = $(addprefix $(OUTPUT)/,$(TEST_KMODS))
+
# Compile but not part of 'make run_tests'
TEST_GEN_PROGS_EXTENDED = \
bench \
- bpf_testmod.ko \
- bpf_test_modorder_x.ko \
- bpf_test_modorder_y.ko \
- bpf_test_no_cfi.ko \
flow_dissector_load \
runqslower \
test_cpp \
- test_flow_dissector \
test_lirc_mode2_user \
veristat \
xdp_features \
@@ -183,8 +153,9 @@ override define CLEAN
$(Q)$(RM) -r $(TEST_GEN_PROGS)
$(Q)$(RM) -r $(TEST_GEN_PROGS_EXTENDED)
$(Q)$(RM) -r $(TEST_GEN_FILES)
+ $(Q)$(RM) -r $(TEST_KMODS)
$(Q)$(RM) -r $(EXTRA_CLEAN)
- $(Q)$(MAKE) -C bpf_testmod clean
+ $(Q)$(MAKE) -C test_kmods clean
$(Q)$(MAKE) docs-clean
endef
@@ -202,9 +173,9 @@ ifeq ($(shell expr $(MAKE_VERSION) \>= 4.4), 1)
$(let OUTPUT,$(OUTPUT)/,\
$(eval include ../../../build/Makefile.feature))
else
-OUTPUT := $(OUTPUT)/
+override OUTPUT := $(OUTPUT)/
$(eval include ../../../build/Makefile.feature)
-OUTPUT := $(patsubst %/,%,$(OUTPUT))
+override OUTPUT := $(patsubst %/,%,$(OUTPUT))
endif
endif
@@ -250,7 +221,7 @@ endif
# to build individual tests.
# NOTE: Semicolon at the end is critical to override lib.mk's default static
# rule for binaries.
-$(notdir $(TEST_GEN_PROGS) \
+$(notdir $(TEST_GEN_PROGS) $(TEST_KMODS) \
$(TEST_GEN_PROGS_EXTENDED)): %: $(OUTPUT)/% ;
# sort removes libbpf duplicates when not cross-building
@@ -304,37 +275,19 @@ $(OUTPUT)/sign-file: ../../../../scripts/sign-file.c
$< -o $@ \
$(shell $(PKG_CONFIG) --libs libcrypto 2> /dev/null || echo -lcrypto)
-$(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_testmod/Makefile bpf_testmod/*.[ch])
- $(call msg,MOD,,$@)
- $(Q)$(RM) bpf_testmod/bpf_testmod.ko # force re-compilation
- $(Q)$(MAKE) $(submake_extras) -C bpf_testmod \
- RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \
- EXTRA_CFLAGS='' EXTRA_LDFLAGS=''
- $(Q)cp bpf_testmod/bpf_testmod.ko $@
-
-$(OUTPUT)/bpf_test_no_cfi.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_no_cfi/Makefile bpf_test_no_cfi/*.[ch])
- $(call msg,MOD,,$@)
- $(Q)$(RM) bpf_test_no_cfi/bpf_test_no_cfi.ko # force re-compilation
- $(Q)$(MAKE) $(submake_extras) -C bpf_test_no_cfi \
- RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \
+# This should really be a grouped target, but make versions before 4.3 don't
+# support that for regular rules. However, pattern matching rules are implicitly
+# treated as grouped even with older versions of make, so as a workaround, the
+# subst() turns the rule into a pattern matching rule
+$(addprefix test_kmods/,$(subst .ko,%ko,$(TEST_KMODS))): $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard test_kmods/Makefile test_kmods/*.[ch])
+ $(Q)$(RM) test_kmods/*.ko test_kmods/*.mod.o # force re-compilation
+ $(Q)$(MAKE) $(submake_extras) -C test_kmods \
+ RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \
EXTRA_CFLAGS='' EXTRA_LDFLAGS=''
- $(Q)cp bpf_test_no_cfi/bpf_test_no_cfi.ko $@
-$(OUTPUT)/bpf_test_modorder_x.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_modorder_x/Makefile bpf_test_modorder_x/*.[ch])
+$(TEST_KMOD_TARGETS): $(addprefix test_kmods/,$(TEST_KMODS))
$(call msg,MOD,,$@)
- $(Q)$(RM) bpf_test_modorder_x/bpf_test_modorder_x.ko # force re-compilation
- $(Q)$(MAKE) $(submake_extras) -C bpf_test_modorder_x \
- RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \
- EXTRA_CFLAGS='' EXTRA_LDFLAGS=''
- $(Q)cp bpf_test_modorder_x/bpf_test_modorder_x.ko $@
-
-$(OUTPUT)/bpf_test_modorder_y.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_modorder_y/Makefile bpf_test_modorder_y/*.[ch])
- $(call msg,MOD,,$@)
- $(Q)$(RM) bpf_test_modorder_y/bpf_test_modorder_y.ko # force re-compilation
- $(Q)$(MAKE) $(submake_extras) -C bpf_test_modorder_y \
- RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \
- EXTRA_CFLAGS='' EXTRA_LDFLAGS=''
- $(Q)cp bpf_test_modorder_y/bpf_test_modorder_y.ko $@
+ $(Q)cp test_kmods/$(@F) $@
DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool
@@ -479,10 +432,10 @@ $(shell $(1) $(2) -dM -E - </dev/null | grep -E 'MIPS(EL|EB)|_MIPS_SZ(PTR|LONG)
endef
# Determine target endianness.
-IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - </dev/null | \
+IS_LITTLE_ENDIAN := $(shell $(CC) -dM -E - </dev/null | \
grep 'define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__')
-MENDIAN=$(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian)
-BPF_TARGET_ENDIAN=$(if $(IS_LITTLE_ENDIAN),--target=bpfel,--target=bpfeb)
+MENDIAN:=$(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian)
+BPF_TARGET_ENDIAN:=$(if $(IS_LITTLE_ENDIAN),--target=bpfel,--target=bpfeb)
ifneq ($(CROSS_COMPILE),)
CLANG_TARGET_ARCH = --target=$(notdir $(CROSS_COMPILE:%-=%))
@@ -492,6 +445,8 @@ CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH))
BPF_CFLAGS = -g -Wall -Werror -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \
-I$(INCLUDE_DIR) -I$(CURDIR) -I$(APIDIR) \
-I$(abspath $(OUTPUT)/../usr/include) \
+ -std=gnu11 \
+ -fno-strict-aliasing \
-Wno-compare-distinct-pointer-types
# TODO: enable me -Wsign-compare
@@ -759,14 +714,12 @@ TRUNNER_EXTRA_SOURCES := test_progs.c \
json_writer.c \
flow_dissector_load.h \
ip_check_defrag_frags.h
-TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \
- $(OUTPUT)/bpf_test_no_cfi.ko \
- $(OUTPUT)/bpf_test_modorder_x.ko \
- $(OUTPUT)/bpf_test_modorder_y.ko \
+TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read \
$(OUTPUT)/liburandom_read.so \
$(OUTPUT)/xdp_synproxy \
$(OUTPUT)/sign-file \
$(OUTPUT)/uprobe_multi \
+ $(TEST_KMOD_TARGETS) \
ima_setup.sh \
verify_sig_setup.sh \
$(wildcard progs/btf_dump_test_case_*.c) \
@@ -833,9 +786,12 @@ $(OUTPUT)/xdp_features: xdp_features.c $(OUTPUT)/network_helpers.o $(OUTPUT)/xdp
$(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
# Make sure we are able to include and link libbpf against c++.
+CXXFLAGS += $(CFLAGS)
+CXXFLAGS := $(subst -D_GNU_SOURCE=,,$(CXXFLAGS))
+CXXFLAGS := $(subst -std=gnu11,-std=gnu++11,$(CXXFLAGS))
$(OUTPUT)/test_cpp: test_cpp.cpp $(OUTPUT)/test_core_extern.skel.h $(BPFOBJ)
$(call msg,CXX,,$@)
- $(Q)$(CXX) $(subst -D_GNU_SOURCE=,,$(CFLAGS)) $(filter %.a %.o %.cpp,$^) $(LDLIBS) -o $@
+ $(Q)$(CXX) $(CXXFLAGS) $(filter %.a %.o %.cpp,$^) $(LDLIBS) -o $@
# Benchmark runner
$(OUTPUT)/bench_%.o: benchs/bench_%.c bench.h $(BPFOBJ)
@@ -893,12 +849,9 @@ $(OUTPUT)/uprobe_multi: uprobe_multi.c uprobe_multi.ld
EXTRA_CLEAN := $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
prog_tests/tests.h map_tests/tests.h verifier/tests.h \
- feature bpftool \
+ feature bpftool $(TEST_KMOD_TARGETS) \
$(addprefix $(OUTPUT)/,*.o *.d *.skel.h *.lskel.h *.subskel.h \
- no_alu32 cpuv4 bpf_gcc bpf_testmod.ko \
- bpf_test_no_cfi.ko \
- bpf_test_modorder_x.ko \
- bpf_test_modorder_y.ko \
+ no_alu32 cpuv4 bpf_gcc \
liburandom_read.so) \
$(OUTPUT)/FEATURE-DUMP.selftests
diff --git a/tools/testing/selftests/bpf/bpf_test_modorder_x/Makefile b/tools/testing/selftests/bpf/bpf_test_modorder_x/Makefile
deleted file mode 100644
index 40b25b98ad1b..000000000000
--- a/tools/testing/selftests/bpf/bpf_test_modorder_x/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-BPF_TESTMOD_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
-KDIR ?= $(abspath $(BPF_TESTMOD_DIR)/../../../../..)
-
-ifeq ($(V),1)
-Q =
-else
-Q = @
-endif
-
-MODULES = bpf_test_modorder_x.ko
-
-obj-m += bpf_test_modorder_x.o
-
-all:
- +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) modules
-
-clean:
- +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) clean
-
diff --git a/tools/testing/selftests/bpf/bpf_test_modorder_y/Makefile b/tools/testing/selftests/bpf/bpf_test_modorder_y/Makefile
deleted file mode 100644
index 52c3ab9d84e2..000000000000
--- a/tools/testing/selftests/bpf/bpf_test_modorder_y/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-BPF_TESTMOD_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
-KDIR ?= $(abspath $(BPF_TESTMOD_DIR)/../../../../..)
-
-ifeq ($(V),1)
-Q =
-else
-Q = @
-endif
-
-MODULES = bpf_test_modorder_y.ko
-
-obj-m += bpf_test_modorder_y.o
-
-all:
- +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) modules
-
-clean:
- +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) clean
-
diff --git a/tools/testing/selftests/bpf/bpf_test_no_cfi/Makefile b/tools/testing/selftests/bpf/bpf_test_no_cfi/Makefile
deleted file mode 100644
index ed5143b79edf..000000000000
--- a/tools/testing/selftests/bpf/bpf_test_no_cfi/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-BPF_TEST_NO_CFI_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
-KDIR ?= $(abspath $(BPF_TEST_NO_CFI_DIR)/../../../../..)
-
-ifeq ($(V),1)
-Q =
-else
-Q = @
-endif
-
-MODULES = bpf_test_no_cfi.ko
-
-obj-m += bpf_test_no_cfi.o
-
-all:
- +$(Q)make -C $(KDIR) M=$(BPF_TEST_NO_CFI_DIR) modules
-
-clean:
- +$(Q)make -C $(KDIR) M=$(BPF_TEST_NO_CFI_DIR) clean
-
diff --git a/tools/testing/selftests/bpf/bpf_testmod/Makefile b/tools/testing/selftests/bpf/bpf_testmod/Makefile
deleted file mode 100644
index 15cb36c4483a..000000000000
--- a/tools/testing/selftests/bpf/bpf_testmod/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
-BPF_TESTMOD_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
-KDIR ?= $(abspath $(BPF_TESTMOD_DIR)/../../../../..)
-
-ifeq ($(V),1)
-Q =
-else
-Q = @
-endif
-
-MODULES = bpf_testmod.ko
-
-obj-m += bpf_testmod.o
-CFLAGS_bpf_testmod.o = -I$(src)
-
-all:
- +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) modules
-
-clean:
- +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) clean
-
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index 4ca84c8d9116..c378d5d07e02 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -58,6 +58,7 @@ CONFIG_MPLS=y
CONFIG_MPLS_IPTUNNEL=y
CONFIG_MPLS_ROUTING=y
CONFIG_MPTCP=y
+CONFIG_NET_ACT_GACT=y
CONFIG_NET_ACT_SKBMOD=y
CONFIG_NET_CLS=y
CONFIG_NET_CLS_ACT=y
diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
index 27784946b01b..80844a5fb1fe 100644
--- a/tools/testing/selftests/bpf/network_helpers.c
+++ b/tools/testing/selftests/bpf/network_helpers.c
@@ -21,7 +21,7 @@
#include <linux/limits.h>
#include <linux/ip.h>
-#include <linux/udp.h>
+#include <netinet/udp.h>
#include <netinet/tcp.h>
#include <net/if.h>
diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h
index 5764155b6d25..ebec8a8d6f81 100644
--- a/tools/testing/selftests/bpf/network_helpers.h
+++ b/tools/testing/selftests/bpf/network_helpers.h
@@ -14,6 +14,7 @@ typedef __u16 __sum16;
#include <linux/sockios.h>
#include <linux/err.h>
#include <netinet/tcp.h>
+#include <netinet/udp.h>
#include <bpf/bpf_endian.h>
#include <net/if.h>
@@ -105,6 +106,45 @@ static __u16 csum_fold(__u32 csum)
return (__u16)~csum;
}
+static __wsum csum_partial(const void *buf, int len, __wsum sum)
+{
+ __u16 *p = (__u16 *)buf;
+ int num_u16 = len >> 1;
+ int i;
+
+ for (i = 0; i < num_u16; i++)
+ sum += p[i];
+
+ return sum;
+}
+
+static inline __sum16 build_ip_csum(struct iphdr *iph)
+{
+ __u32 sum = 0;
+ __u16 *p;
+
+ iph->check = 0;
+ p = (void *)iph;
+ sum = csum_partial(p, iph->ihl << 2, 0);
+
+ return csum_fold(sum);
+}
+
+/**
+ * csum_tcpudp_magic - compute IP pseudo-header checksum
+ *
+ * Compute the IPv4 pseudo header checksum. The helper can take a
+ * accumulated sum from the transport layer to accumulate it and directly
+ * return the transport layer
+ *
+ * @saddr: IP source address
+ * @daddr: IP dest address
+ * @len: IP data size
+ * @proto: transport layer protocol
+ * @csum: The accumulated partial sum to add to the computation
+ *
+ * Returns the folded sum
+ */
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
__u32 len, __u8 proto,
__wsum csum)
@@ -120,6 +160,21 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
return csum_fold((__u32)s);
}
+/**
+ * csum_ipv6_magic - compute IPv6 pseudo-header checksum
+ *
+ * Compute the ipv6 pseudo header checksum. The helper can take a
+ * accumulated sum from the transport layer to accumulate it and directly
+ * return the transport layer
+ *
+ * @saddr: IPv6 source address
+ * @daddr: IPv6 dest address
+ * @len: IPv6 data size
+ * @proto: transport layer protocol
+ * @csum: The accumulated partial sum to add to the computation
+ *
+ * Returns the folded sum
+ */
static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, __u8 proto,
@@ -139,6 +194,47 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
return csum_fold((__u32)s);
}
+/**
+ * build_udp_v4_csum - compute UDP checksum for UDP over IPv4
+ *
+ * Compute the checksum to embed in UDP header, composed of the sum of IP
+ * pseudo-header checksum, UDP header checksum and UDP data checksum
+ * @iph IP header
+ * @udph UDP header, which must be immediately followed by UDP data
+ *
+ * Returns the total checksum
+ */
+
+static inline __sum16 build_udp_v4_csum(const struct iphdr *iph,
+ const struct udphdr *udph)
+{
+ unsigned long sum;
+
+ sum = csum_partial(udph, ntohs(udph->len), 0);
+ return csum_tcpudp_magic(iph->saddr, iph->daddr, ntohs(udph->len),
+ IPPROTO_UDP, sum);
+}
+
+/**
+ * build_udp_v6_csum - compute UDP checksum for UDP over IPv6
+ *
+ * Compute the checksum to embed in UDP header, composed of the sum of IPv6
+ * pseudo-header checksum, UDP header checksum and UDP data checksum
+ * @ip6h IPv6 header
+ * @udph UDP header, which must be immediately followed by UDP data
+ *
+ * Returns the total checksum
+ */
+static inline __sum16 build_udp_v6_csum(const struct ipv6hdr *ip6h,
+ const struct udphdr *udph)
+{
+ unsigned long sum;
+
+ sum = csum_partial(udph, ntohs(udph->len), 0);
+ return csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ntohs(udph->len),
+ IPPROTO_UDP, sum);
+}
+
struct tmonitor_ctx;
#ifdef TRAFFIC_MONITOR
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_distill.c b/tools/testing/selftests/bpf/prog_tests/btf_distill.c
index ca84726d5ac1..fb67ae195a73 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_distill.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_distill.c
@@ -385,7 +385,7 @@ static void test_distilled_base_missing_err(void)
"[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED");
btf5 = btf__new_empty();
if (!ASSERT_OK_PTR(btf5, "empty_reloc_btf"))
- return;
+ goto cleanup;
btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [1] int */
VALIDATE_RAW_BTF(
btf5,
@@ -478,7 +478,7 @@ static void test_distilled_base_multi_err2(void)
"[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED");
btf5 = btf__new_empty();
if (!ASSERT_OK_PTR(btf5, "empty_reloc_btf"))
- return;
+ goto cleanup;
btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [1] int */
btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [2] int */
VALIDATE_RAW_BTF(
@@ -601,6 +601,76 @@ cleanup:
btf__free(base);
}
+/* If a needed composite type, which is the member of composite type
+ * in the split BTF, has a different size in the base BTF we wish to
+ * relocate with, btf__relocate() should error out.
+ */
+static void test_distilled_base_embedded_err(void)
+{
+ struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL, *btf5 = NULL;
+
+ btf1 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf1, "empty_main_btf"))
+ return;
+
+ btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ btf__add_struct(btf1, "s1", 4); /* [2] struct s1 { */
+ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
+ /* } */
+ VALIDATE_RAW_BTF(
+ btf1,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] STRUCT 's1' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0");
+
+ btf2 = btf__new_empty_split(btf1);
+ if (!ASSERT_OK_PTR(btf2, "empty_split_btf"))
+ goto cleanup;
+
+ btf__add_struct(btf2, "with_embedded", 8); /* [3] struct with_embedded { */
+ btf__add_field(btf2, "e1", 2, 0, 0); /* struct s1 e1; */
+ /* } */
+
+ VALIDATE_RAW_BTF(
+ btf2,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] STRUCT 's1' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[3] STRUCT 'with_embedded' size=8 vlen=1\n"
+ "\t'e1' type_id=2 bits_offset=0");
+
+ if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4),
+ "distilled_base") ||
+ !ASSERT_OK_PTR(btf3, "distilled_base") ||
+ !ASSERT_OK_PTR(btf4, "distilled_split") ||
+ !ASSERT_EQ(2, btf__type_cnt(btf3), "distilled_base_type_cnt"))
+ goto cleanup;
+
+ VALIDATE_RAW_BTF(
+ btf4,
+ "[1] STRUCT 's1' size=4 vlen=0",
+ "[2] STRUCT 'with_embedded' size=8 vlen=1\n"
+ "\t'e1' type_id=1 bits_offset=0");
+
+ btf5 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf5, "empty_reloc_btf"))
+ goto cleanup;
+
+ btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ /* struct with the same name but different size */
+ btf__add_struct(btf5, "s1", 8); /* [2] struct s1 { */
+ btf__add_field(btf5, "f1", 1, 0, 0); /* int f1; */
+ /* } */
+
+ ASSERT_EQ(btf__relocate(btf4, btf5), -EINVAL, "relocate_split");
+cleanup:
+ btf__free(btf5);
+ btf__free(btf4);
+ btf__free(btf3);
+ btf__free(btf2);
+ btf__free(btf1);
+}
+
void test_btf_distill(void)
{
if (test__start_subtest("distilled_base"))
@@ -613,6 +683,8 @@ void test_btf_distill(void)
test_distilled_base_multi_err();
if (test__start_subtest("distilled_base_multi_err2"))
test_distilled_base_multi_err2();
+ if (test__start_subtest("distilled_base_embedded_err"))
+ test_distilled_base_embedded_err();
if (test__start_subtest("distilled_base_vmlinux"))
test_distilled_base_vmlinux();
if (test__start_subtest("distilled_endianness"))
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_skb_direct_packet_access.c b/tools/testing/selftests/bpf/prog_tests/cgroup_skb_direct_packet_access.c
new file mode 100644
index 000000000000..e1a90c10db8c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_skb_direct_packet_access.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "cgroup_skb_direct_packet_access.skel.h"
+
+void test_cgroup_skb_prog_run_direct_packet_access(void)
+{
+ int err;
+ struct cgroup_skb_direct_packet_access *skel;
+ char test_skb[64] = {};
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = test_skb,
+ .data_size_in = sizeof(test_skb),
+ );
+
+ skel = cgroup_skb_direct_packet_access__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "cgroup_skb_direct_packet_access__open_and_load"))
+ return;
+
+ err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.direct_packet_access), &topts);
+ ASSERT_OK(err, "bpf_prog_test_run_opts err");
+ ASSERT_EQ(topts.retval, 1, "retval");
+
+ ASSERT_NEQ(skel->bss->data_end, 0, "data_end");
+
+ cgroup_skb_direct_packet_access__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
index 1c682550e0e7..e10ea92c3fe2 100644
--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
@@ -2,7 +2,7 @@
#define _GNU_SOURCE
#include <test_progs.h>
#include "progs/core_reloc_types.h"
-#include "bpf_testmod/bpf_testmod.h"
+#include "test_kmods/bpf_testmod.h"
#include <linux/limits.h>
#include <sys/mman.h>
#include <sys/syscall.h>
diff --git a/tools/testing/selftests/bpf/prog_tests/fd_array.c b/tools/testing/selftests/bpf/prog_tests/fd_array.c
new file mode 100644
index 000000000000..a1d52e73fb16
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/fd_array.c
@@ -0,0 +1,441 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+
+#include <linux/btf.h>
+#include <bpf/bpf.h>
+
+#include "../test_btf.h"
+
+static inline int new_map(void)
+{
+ const char *name = NULL;
+ __u32 max_entries = 1;
+ __u32 value_size = 8;
+ __u32 key_size = 4;
+
+ return bpf_map_create(BPF_MAP_TYPE_ARRAY, name,
+ key_size, value_size,
+ max_entries, NULL);
+}
+
+static int new_btf(void)
+{
+ struct btf_blob {
+ struct btf_header btf_hdr;
+ __u32 types[8];
+ __u32 str;
+ } raw_btf = {
+ .btf_hdr = {
+ .magic = BTF_MAGIC,
+ .version = BTF_VERSION,
+ .hdr_len = sizeof(struct btf_header),
+ .type_len = sizeof(raw_btf.types),
+ .str_off = offsetof(struct btf_blob, str) - offsetof(struct btf_blob, types),
+ .str_len = sizeof(raw_btf.str),
+ },
+ .types = {
+ /* long */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), /* [1] */
+ /* unsigned long */
+ BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
+ },
+ };
+
+ return bpf_btf_load(&raw_btf, sizeof(raw_btf), NULL);
+}
+
+#define Close(FD) do { \
+ if ((FD) >= 0) { \
+ close(FD); \
+ FD = -1; \
+ } \
+} while(0)
+
+static bool map_exists(__u32 id)
+{
+ int fd;
+
+ fd = bpf_map_get_fd_by_id(id);
+ if (fd >= 0) {
+ close(fd);
+ return true;
+ }
+ return false;
+}
+
+static bool btf_exists(__u32 id)
+{
+ int fd;
+
+ fd = bpf_btf_get_fd_by_id(id);
+ if (fd >= 0) {
+ close(fd);
+ return true;
+ }
+ return false;
+}
+
+static inline int bpf_prog_get_map_ids(int prog_fd, __u32 *nr_map_ids, __u32 *map_ids)
+{
+ __u32 len = sizeof(struct bpf_prog_info);
+ struct bpf_prog_info info;
+ int err;
+
+ memset(&info, 0, len);
+ info.nr_map_ids = *nr_map_ids,
+ info.map_ids = ptr_to_u64(map_ids),
+
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
+ return -1;
+
+ *nr_map_ids = info.nr_map_ids;
+
+ return 0;
+}
+
+static int __load_test_prog(int map_fd, const int *fd_array, int fd_array_cnt)
+{
+ /* A trivial program which uses one map */
+ struct bpf_insn insns[] = {
+ BPF_LD_MAP_FD(BPF_REG_1, map_fd),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
+
+ opts.fd_array = fd_array;
+ opts.fd_array_cnt = fd_array_cnt;
+
+ return bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, ARRAY_SIZE(insns), &opts);
+}
+
+static int load_test_prog(const int *fd_array, int fd_array_cnt)
+{
+ int map_fd;
+ int ret;
+
+ map_fd = new_map();
+ if (!ASSERT_GE(map_fd, 0, "new_map"))
+ return map_fd;
+
+ ret = __load_test_prog(map_fd, fd_array, fd_array_cnt);
+ close(map_fd);
+ return ret;
+}
+
+static bool check_expected_map_ids(int prog_fd, int expected, __u32 *map_ids, __u32 *nr_map_ids)
+{
+ int err;
+
+ err = bpf_prog_get_map_ids(prog_fd, nr_map_ids, map_ids);
+ if (!ASSERT_OK(err, "bpf_prog_get_map_ids"))
+ return false;
+ if (!ASSERT_EQ(*nr_map_ids, expected, "unexpected nr_map_ids"))
+ return false;
+
+ return true;
+}
+
+/*
+ * Load a program, which uses one map. No fd_array maps are present.
+ * On return only one map is expected to be bound to prog.
+ */
+static void check_fd_array_cnt__no_fd_array(void)
+{
+ __u32 map_ids[16];
+ __u32 nr_map_ids;
+ int prog_fd = -1;
+
+ prog_fd = load_test_prog(NULL, 0);
+ if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD"))
+ return;
+ nr_map_ids = ARRAY_SIZE(map_ids);
+ check_expected_map_ids(prog_fd, 1, map_ids, &nr_map_ids);
+ close(prog_fd);
+}
+
+/*
+ * Load a program, which uses one map, and pass two extra, non-equal, maps in
+ * fd_array with fd_array_cnt=2. On return three maps are expected to be bound
+ * to the program.
+ */
+static void check_fd_array_cnt__fd_array_ok(void)
+{
+ int extra_fds[2] = { -1, -1 };
+ __u32 map_ids[16];
+ __u32 nr_map_ids;
+ int prog_fd = -1;
+
+ extra_fds[0] = new_map();
+ if (!ASSERT_GE(extra_fds[0], 0, "new_map"))
+ goto cleanup;
+ extra_fds[1] = new_map();
+ if (!ASSERT_GE(extra_fds[1], 0, "new_map"))
+ goto cleanup;
+ prog_fd = load_test_prog(extra_fds, 2);
+ if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD"))
+ goto cleanup;
+ nr_map_ids = ARRAY_SIZE(map_ids);
+ if (!check_expected_map_ids(prog_fd, 3, map_ids, &nr_map_ids))
+ goto cleanup;
+
+ /* maps should still exist when original file descriptors are closed */
+ Close(extra_fds[0]);
+ Close(extra_fds[1]);
+ if (!ASSERT_EQ(map_exists(map_ids[0]), true, "map_ids[0] should exist"))
+ goto cleanup;
+ if (!ASSERT_EQ(map_exists(map_ids[1]), true, "map_ids[1] should exist"))
+ goto cleanup;
+
+ /* some fds might be invalid, so ignore return codes */
+cleanup:
+ Close(extra_fds[1]);
+ Close(extra_fds[0]);
+ Close(prog_fd);
+}
+
+/*
+ * Load a program with a few extra maps duplicated in the fd_array.
+ * After the load maps should only be referenced once.
+ */
+static void check_fd_array_cnt__duplicated_maps(void)
+{
+ int extra_fds[4] = { -1, -1, -1, -1 };
+ __u32 map_ids[16];
+ __u32 nr_map_ids;
+ int prog_fd = -1;
+
+ extra_fds[0] = extra_fds[2] = new_map();
+ if (!ASSERT_GE(extra_fds[0], 0, "new_map"))
+ goto cleanup;
+ extra_fds[1] = extra_fds[3] = new_map();
+ if (!ASSERT_GE(extra_fds[1], 0, "new_map"))
+ goto cleanup;
+ prog_fd = load_test_prog(extra_fds, 4);
+ if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD"))
+ goto cleanup;
+ nr_map_ids = ARRAY_SIZE(map_ids);
+ if (!check_expected_map_ids(prog_fd, 3, map_ids, &nr_map_ids))
+ goto cleanup;
+
+ /* maps should still exist when original file descriptors are closed */
+ Close(extra_fds[0]);
+ Close(extra_fds[1]);
+ if (!ASSERT_EQ(map_exists(map_ids[0]), true, "map should exist"))
+ goto cleanup;
+ if (!ASSERT_EQ(map_exists(map_ids[1]), true, "map should exist"))
+ goto cleanup;
+
+ /* some fds might be invalid, so ignore return codes */
+cleanup:
+ Close(extra_fds[1]);
+ Close(extra_fds[0]);
+ Close(prog_fd);
+}
+
+/*
+ * Check that if maps which are referenced by a program are
+ * passed in fd_array, then they will be referenced only once
+ */
+static void check_fd_array_cnt__referenced_maps_in_fd_array(void)
+{
+ int extra_fds[1] = { -1 };
+ __u32 map_ids[16];
+ __u32 nr_map_ids;
+ int prog_fd = -1;
+
+ extra_fds[0] = new_map();
+ if (!ASSERT_GE(extra_fds[0], 0, "new_map"))
+ goto cleanup;
+ prog_fd = __load_test_prog(extra_fds[0], extra_fds, 1);
+ if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD"))
+ goto cleanup;
+ nr_map_ids = ARRAY_SIZE(map_ids);
+ if (!check_expected_map_ids(prog_fd, 1, map_ids, &nr_map_ids))
+ goto cleanup;
+
+ /* map should still exist when original file descriptor is closed */
+ Close(extra_fds[0]);
+ if (!ASSERT_EQ(map_exists(map_ids[0]), true, "map should exist"))
+ goto cleanup;
+
+ /* some fds might be invalid, so ignore return codes */
+cleanup:
+ Close(extra_fds[0]);
+ Close(prog_fd);
+}
+
+static int get_btf_id_by_fd(int btf_fd, __u32 *id)
+{
+ struct bpf_btf_info info;
+ __u32 info_len = sizeof(info);
+ int err;
+
+ memset(&info, 0, info_len);
+ err = bpf_btf_get_info_by_fd(btf_fd, &info, &info_len);
+ if (err)
+ return err;
+ if (id)
+ *id = info.id;
+ return 0;
+}
+
+/*
+ * Check that fd_array operates properly for btfs. Namely, to check that
+ * passing a btf fd in fd_array increases its reference count, do the
+ * following:
+ * 1) Create a new btf, it's referenced only by a file descriptor, so refcnt=1
+ * 2) Load a BPF prog with fd_array[0] = btf_fd; now btf's refcnt=2
+ * 3) Close the btf_fd, now refcnt=1
+ * Wait and check that BTF stil exists.
+ */
+static void check_fd_array_cnt__referenced_btfs(void)
+{
+ int extra_fds[1] = { -1 };
+ int prog_fd = -1;
+ __u32 btf_id;
+ int tries;
+ int err;
+
+ extra_fds[0] = new_btf();
+ if (!ASSERT_GE(extra_fds[0], 0, "new_btf"))
+ goto cleanup;
+ prog_fd = load_test_prog(extra_fds, 1);
+ if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD"))
+ goto cleanup;
+
+ /* btf should still exist when original file descriptor is closed */
+ err = get_btf_id_by_fd(extra_fds[0], &btf_id);
+ if (!ASSERT_GE(err, 0, "get_btf_id_by_fd"))
+ goto cleanup;
+
+ Close(extra_fds[0]);
+
+ if (!ASSERT_GE(kern_sync_rcu(), 0, "kern_sync_rcu 1"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(btf_exists(btf_id), true, "btf should exist"))
+ goto cleanup;
+
+ Close(prog_fd);
+
+ /* The program is freed by a workqueue, so no reliable
+ * way to sync, so just wait a bit (max ~1 second). */
+ for (tries = 100; tries >= 0; tries--) {
+ usleep(1000);
+
+ if (!btf_exists(btf_id))
+ break;
+
+ if (tries)
+ continue;
+
+ PRINT_FAIL("btf should have been freed");
+ }
+
+ /* some fds might be invalid, so ignore return codes */
+cleanup:
+ Close(extra_fds[0]);
+ Close(prog_fd);
+}
+
+/*
+ * Test that a program with trash in fd_array can't be loaded:
+ * only map and BTF file descriptors should be accepted.
+ */
+static void check_fd_array_cnt__fd_array_with_trash(void)
+{
+ int extra_fds[3] = { -1, -1, -1 };
+ int prog_fd = -1;
+
+ extra_fds[0] = new_map();
+ if (!ASSERT_GE(extra_fds[0], 0, "new_map"))
+ goto cleanup;
+ extra_fds[1] = new_btf();
+ if (!ASSERT_GE(extra_fds[1], 0, "new_btf"))
+ goto cleanup;
+
+ /* trash 1: not a file descriptor */
+ extra_fds[2] = 0xbeef;
+ prog_fd = load_test_prog(extra_fds, 3);
+ if (!ASSERT_EQ(prog_fd, -EBADF, "prog should have been rejected with -EBADF"))
+ goto cleanup;
+
+ /* trash 2: not a map or btf */
+ extra_fds[2] = socket(AF_INET, SOCK_STREAM, 0);
+ if (!ASSERT_GE(extra_fds[2], 0, "socket"))
+ goto cleanup;
+
+ prog_fd = load_test_prog(extra_fds, 3);
+ if (!ASSERT_EQ(prog_fd, -EINVAL, "prog should have been rejected with -EINVAL"))
+ goto cleanup;
+
+ /* Validate that the prog is ok if trash is removed */
+ Close(extra_fds[2]);
+ extra_fds[2] = new_btf();
+ if (!ASSERT_GE(extra_fds[2], 0, "new_btf"))
+ goto cleanup;
+
+ prog_fd = load_test_prog(extra_fds, 3);
+ if (!ASSERT_GE(prog_fd, 0, "prog should have been loaded"))
+ goto cleanup;
+
+ /* some fds might be invalid, so ignore return codes */
+cleanup:
+ Close(extra_fds[2]);
+ Close(extra_fds[1]);
+ Close(extra_fds[0]);
+}
+
+/*
+ * Test that a program with too big fd_array can't be loaded.
+ */
+static void check_fd_array_cnt__fd_array_too_big(void)
+{
+ int extra_fds[65];
+ int prog_fd = -1;
+ int i;
+
+ for (i = 0; i < 65; i++) {
+ extra_fds[i] = new_map();
+ if (!ASSERT_GE(extra_fds[i], 0, "new_map"))
+ goto cleanup_fds;
+ }
+
+ prog_fd = load_test_prog(extra_fds, 65);
+ ASSERT_EQ(prog_fd, -E2BIG, "prog should have been rejected with -E2BIG");
+
+cleanup_fds:
+ while (i > 0)
+ Close(extra_fds[--i]);
+}
+
+void test_fd_array_cnt(void)
+{
+ if (test__start_subtest("no-fd-array"))
+ check_fd_array_cnt__no_fd_array();
+
+ if (test__start_subtest("fd-array-ok"))
+ check_fd_array_cnt__fd_array_ok();
+
+ if (test__start_subtest("fd-array-dup-input"))
+ check_fd_array_cnt__duplicated_maps();
+
+ if (test__start_subtest("fd-array-ref-maps-in-array"))
+ check_fd_array_cnt__referenced_maps_in_fd_array();
+
+ if (test__start_subtest("fd-array-ref-btfs"))
+ check_fd_array_cnt__referenced_btfs();
+
+ if (test__start_subtest("fd-array-trash-input"))
+ check_fd_array_cnt__fd_array_with_trash();
+
+ if (test__start_subtest("fd-array-2big"))
+ check_fd_array_cnt__fd_array_too_big();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
index d50cbd8040d4..e59af2aa6601 100644
--- a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
+++ b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
@@ -171,6 +171,10 @@ static void test_kprobe_fill_link_info(struct test_fill_link_info *skel,
/* See also arch_adjust_kprobe_addr(). */
if (skel->kconfig->CONFIG_X86_KERNEL_IBT)
entry_offset = 4;
+ if (skel->kconfig->CONFIG_PPC64 &&
+ skel->kconfig->CONFIG_KPROBES_ON_FTRACE &&
+ !skel->kconfig->CONFIG_PPC_FTRACE_OUT_OF_LINE)
+ entry_offset = 4;
err = verify_perf_link_info(link_fd, type, kprobe_addr, 0, entry_offset);
ASSERT_OK(err, "verify_perf_link_info");
} else {
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
index cfcc90cb7ffb..08bae13248c4 100644
--- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
@@ -7,39 +7,14 @@
#include "bpf_flow.skel.h"
+#define TEST_NS "flow_dissector_ns"
#define FLOW_CONTINUE_SADDR 0x7f00007f /* 127.0.0.127 */
+#define TEST_NAME_MAX_LEN 64
#ifndef IP_MF
#define IP_MF 0x2000
#endif
-#define CHECK_FLOW_KEYS(desc, got, expected) \
- _CHECK(memcmp(&got, &expected, sizeof(got)) != 0, \
- desc, \
- topts.duration, \
- "nhoff=%u/%u " \
- "thoff=%u/%u " \
- "addr_proto=0x%x/0x%x " \
- "is_frag=%u/%u " \
- "is_first_frag=%u/%u " \
- "is_encap=%u/%u " \
- "ip_proto=0x%x/0x%x " \
- "n_proto=0x%x/0x%x " \
- "flow_label=0x%x/0x%x " \
- "sport=%u/%u " \
- "dport=%u/%u\n", \
- got.nhoff, expected.nhoff, \
- got.thoff, expected.thoff, \
- got.addr_proto, expected.addr_proto, \
- got.is_frag, expected.is_frag, \
- got.is_first_frag, expected.is_first_frag, \
- got.is_encap, expected.is_encap, \
- got.ip_proto, expected.ip_proto, \
- got.n_proto, expected.n_proto, \
- got.flow_label, expected.flow_label, \
- got.sport, expected.sport, \
- got.dport, expected.dport)
-
struct ipv4_pkt {
struct ethhdr eth;
struct iphdr iph;
@@ -89,6 +64,19 @@ struct dvlan_ipv6_pkt {
struct tcphdr tcp;
} __packed;
+struct gre_base_hdr {
+ __be16 flags;
+ __be16 protocol;
+} gre_base_hdr;
+
+struct gre_minimal_pkt {
+ struct ethhdr eth;
+ struct iphdr iph;
+ struct gre_base_hdr gre_hdr;
+ struct iphdr iph_inner;
+ struct tcphdr tcp;
+} __packed;
+
struct test {
const char *name;
union {
@@ -98,6 +86,7 @@ struct test {
struct ipv6_pkt ipv6;
struct ipv6_frag_pkt ipv6_frag;
struct dvlan_ipv6_pkt dvlan_ipv6;
+ struct gre_minimal_pkt gre_minimal;
} pkt;
struct bpf_flow_keys keys;
__u32 flags;
@@ -106,7 +95,6 @@ struct test {
#define VLAN_HLEN 4
-static __u32 duration;
struct test tests[] = {
{
.name = "ipv4",
@@ -444,8 +432,137 @@ struct test tests[] = {
},
.retval = BPF_FLOW_DISSECTOR_CONTINUE,
},
+ {
+ .name = "ip-gre",
+ .pkt.gre_minimal = {
+ .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
+ .iph.ihl = 5,
+ .iph.protocol = IPPROTO_GRE,
+ .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
+ .gre_hdr = {
+ .flags = 0,
+ .protocol = __bpf_constant_htons(ETH_P_IP),
+ },
+ .iph_inner.ihl = 5,
+ .iph_inner.protocol = IPPROTO_TCP,
+ .iph_inner.tot_len =
+ __bpf_constant_htons(MAGIC_BYTES -
+ sizeof(struct iphdr)),
+ .tcp.doff = 5,
+ .tcp.source = 80,
+ .tcp.dest = 8080,
+ },
+ .keys = {
+ .nhoff = ETH_HLEN,
+ .thoff = ETH_HLEN + sizeof(struct iphdr) * 2 +
+ sizeof(struct gre_base_hdr),
+ .addr_proto = ETH_P_IP,
+ .ip_proto = IPPROTO_TCP,
+ .n_proto = __bpf_constant_htons(ETH_P_IP),
+ .is_encap = true,
+ .sport = 80,
+ .dport = 8080,
+ },
+ .retval = BPF_OK,
+ },
+ {
+ .name = "ip-gre-no-encap",
+ .pkt.ipip = {
+ .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
+ .iph.ihl = 5,
+ .iph.protocol = IPPROTO_GRE,
+ .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
+ .iph_inner.ihl = 5,
+ .iph_inner.protocol = IPPROTO_TCP,
+ .iph_inner.tot_len =
+ __bpf_constant_htons(MAGIC_BYTES -
+ sizeof(struct iphdr)),
+ .tcp.doff = 5,
+ .tcp.source = 80,
+ .tcp.dest = 8080,
+ },
+ .keys = {
+ .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
+ .nhoff = ETH_HLEN,
+ .thoff = ETH_HLEN + sizeof(struct iphdr)
+ + sizeof(struct gre_base_hdr),
+ .addr_proto = ETH_P_IP,
+ .ip_proto = IPPROTO_GRE,
+ .n_proto = __bpf_constant_htons(ETH_P_IP),
+ .is_encap = true,
+ },
+ .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
+ .retval = BPF_OK,
+ },
};
+void serial_test_flow_dissector_namespace(void)
+{
+ struct bpf_flow *skel;
+ struct nstoken *ns;
+ int err, prog_fd;
+
+ skel = bpf_flow__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open/load skeleton"))
+ return;
+
+ prog_fd = bpf_program__fd(skel->progs._dissect);
+ if (!ASSERT_OK_FD(prog_fd, "get dissector fd"))
+ goto out_destroy_skel;
+
+ /* We must be able to attach a flow dissector to root namespace */
+ err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
+ if (!ASSERT_OK(err, "attach on root namespace ok"))
+ goto out_destroy_skel;
+
+ err = make_netns(TEST_NS);
+ if (!ASSERT_OK(err, "create non-root net namespace"))
+ goto out_destroy_skel;
+
+ /* We must not be able to additionally attach a flow dissector to a
+ * non-root net namespace
+ */
+ ns = open_netns(TEST_NS);
+ if (!ASSERT_OK_PTR(ns, "enter non-root net namespace"))
+ goto out_clean_ns;
+ err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
+ if (!ASSERT_ERR(err,
+ "refuse new flow dissector in non-root net namespace"))
+ bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
+ else
+ ASSERT_EQ(errno, EEXIST,
+ "refused because of already attached prog");
+ close_netns(ns);
+
+ /* If no flow dissector is attached to the root namespace, we must
+ * be able to attach one to a non-root net namespace
+ */
+ bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
+ ns = open_netns(TEST_NS);
+ ASSERT_OK_PTR(ns, "enter non-root net namespace");
+ err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
+ close_netns(ns);
+ ASSERT_OK(err, "accept new flow dissector in non-root net namespace");
+
+ /* If a flow dissector is attached to non-root net namespace, attaching
+ * a flow dissector to root namespace must fail
+ */
+ err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
+ if (!ASSERT_ERR(err, "refuse new flow dissector on root namespace"))
+ bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
+ else
+ ASSERT_EQ(errno, EEXIST,
+ "refused because of already attached prog");
+
+ ns = open_netns(TEST_NS);
+ bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
+ close_netns(ns);
+out_clean_ns:
+ remove_netns(TEST_NS);
+out_destroy_skel:
+ bpf_flow__destroy(skel);
+}
+
static int create_tap(const char *ifname)
{
struct ifreq ifr = {
@@ -533,22 +650,27 @@ static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array)
return 0;
}
-static void run_tests_skb_less(int tap_fd, struct bpf_map *keys)
+static void run_tests_skb_less(int tap_fd, struct bpf_map *keys,
+ char *test_suffix)
{
+ char test_name[TEST_NAME_MAX_LEN];
int i, err, keys_fd;
keys_fd = bpf_map__fd(keys);
- if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd))
+ if (!ASSERT_OK_FD(keys_fd, "bpf_map__fd"))
return;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
/* Keep in sync with 'flags' from eth_get_headlen. */
__u32 eth_get_headlen_flags =
BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG;
- LIBBPF_OPTS(bpf_test_run_opts, topts);
struct bpf_flow_keys flow_keys = {};
__u32 key = (__u32)(tests[i].keys.sport) << 16 |
tests[i].keys.dport;
+ snprintf(test_name, TEST_NAME_MAX_LEN, "%s-%s", tests[i].name,
+ test_suffix);
+ if (!test__start_subtest(test_name))
+ continue;
/* For skb-less case we can't pass input flags; run
* only the tests that have a matching set of flags.
@@ -558,78 +680,139 @@ static void run_tests_skb_less(int tap_fd, struct bpf_map *keys)
continue;
err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt));
- CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno);
+ if (!ASSERT_EQ(err, sizeof(tests[i].pkt), "tx_tap"))
+ continue;
/* check the stored flow_keys only if BPF_OK expected */
if (tests[i].retval != BPF_OK)
continue;
err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys);
- ASSERT_OK(err, "bpf_map_lookup_elem");
+ if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
+ continue;
- CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
+ ASSERT_MEMEQ(&flow_keys, &tests[i].keys,
+ sizeof(struct bpf_flow_keys),
+ "returned flow keys");
err = bpf_map_delete_elem(keys_fd, &key);
ASSERT_OK(err, "bpf_map_delete_elem");
}
}
-static void test_skb_less_prog_attach(struct bpf_flow *skel, int tap_fd)
+void test_flow_dissector_skb_less_direct_attach(void)
{
- int err, prog_fd;
+ int err, prog_fd, tap_fd;
+ struct bpf_flow *skel;
+ struct netns_obj *ns;
- prog_fd = bpf_program__fd(skel->progs._dissect);
- if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd))
+ ns = netns_new("flow_dissector_skb_less_indirect_attach_ns", true);
+ if (!ASSERT_OK_PTR(ns, "create and open netns"))
return;
+ skel = bpf_flow__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open/load skeleton"))
+ goto out_clean_ns;
+
+ err = init_prog_array(skel->obj, skel->maps.jmp_table);
+ if (!ASSERT_OK(err, "init_prog_array"))
+ goto out_destroy_skel;
+
+ prog_fd = bpf_program__fd(skel->progs._dissect);
+ if (!ASSERT_OK_FD(prog_fd, "bpf_program__fd"))
+ goto out_destroy_skel;
+
err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
- if (CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno))
- return;
+ if (!ASSERT_OK(err, "bpf_prog_attach"))
+ goto out_destroy_skel;
+
+ tap_fd = create_tap("tap0");
+ if (!ASSERT_OK_FD(tap_fd, "create_tap"))
+ goto out_destroy_skel;
+ err = ifup("tap0");
+ if (!ASSERT_OK(err, "ifup"))
+ goto out_close_tap;
- run_tests_skb_less(tap_fd, skel->maps.last_dissection);
+ run_tests_skb_less(tap_fd, skel->maps.last_dissection,
+ "non-skb-direct-attach");
err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
- CHECK(err, "bpf_prog_detach2", "err %d errno %d\n", err, errno);
+ ASSERT_OK(err, "bpf_prog_detach2");
+
+out_close_tap:
+ close(tap_fd);
+out_destroy_skel:
+ bpf_flow__destroy(skel);
+out_clean_ns:
+ netns_free(ns);
}
-static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd)
+void test_flow_dissector_skb_less_indirect_attach(void)
{
+ int err, net_fd, tap_fd;
+ struct bpf_flow *skel;
struct bpf_link *link;
- int err, net_fd;
+ struct netns_obj *ns;
- net_fd = open("/proc/self/ns/net", O_RDONLY);
- if (CHECK(net_fd < 0, "open(/proc/self/ns/net)", "err %d\n", errno))
+ ns = netns_new("flow_dissector_skb_less_indirect_attach_ns", true);
+ if (!ASSERT_OK_PTR(ns, "create and open netns"))
return;
+ skel = bpf_flow__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open/load skeleton"))
+ goto out_clean_ns;
+
+ net_fd = open("/proc/self/ns/net", O_RDONLY);
+ if (!ASSERT_OK_FD(net_fd, "open(/proc/self/ns/net"))
+ goto out_destroy_skel;
+
+ err = init_prog_array(skel->obj, skel->maps.jmp_table);
+ if (!ASSERT_OK(err, "init_prog_array"))
+ goto out_destroy_skel;
+
+ tap_fd = create_tap("tap0");
+ if (!ASSERT_OK_FD(tap_fd, "create_tap"))
+ goto out_close_ns;
+ err = ifup("tap0");
+ if (!ASSERT_OK(err, "ifup"))
+ goto out_close_tap;
+
link = bpf_program__attach_netns(skel->progs._dissect, net_fd);
if (!ASSERT_OK_PTR(link, "attach_netns"))
- goto out_close;
+ goto out_close_tap;
- run_tests_skb_less(tap_fd, skel->maps.last_dissection);
+ run_tests_skb_less(tap_fd, skel->maps.last_dissection,
+ "non-skb-indirect-attach");
err = bpf_link__destroy(link);
- CHECK(err, "bpf_link__destroy", "err %d\n", err);
-out_close:
+ ASSERT_OK(err, "bpf_link__destroy");
+
+out_close_tap:
+ close(tap_fd);
+out_close_ns:
close(net_fd);
+out_destroy_skel:
+ bpf_flow__destroy(skel);
+out_clean_ns:
+ netns_free(ns);
}
-void test_flow_dissector(void)
+void test_flow_dissector_skb(void)
{
- int i, err, prog_fd, keys_fd = -1, tap_fd;
+ char test_name[TEST_NAME_MAX_LEN];
struct bpf_flow *skel;
+ int i, err, prog_fd;
skel = bpf_flow__open_and_load();
- if (CHECK(!skel, "skel", "failed to open/load skeleton\n"))
+ if (!ASSERT_OK_PTR(skel, "open/load skeleton"))
return;
- prog_fd = bpf_program__fd(skel->progs._dissect);
- if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd))
- goto out_destroy_skel;
- keys_fd = bpf_map__fd(skel->maps.last_dissection);
- if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd))
- goto out_destroy_skel;
err = init_prog_array(skel->obj, skel->maps.jmp_table);
- if (CHECK(err, "init_prog_array", "err %d\n", err))
+ if (!ASSERT_OK(err, "init_prog_array"))
+ goto out_destroy_skel;
+
+ prog_fd = bpf_program__fd(skel->progs._dissect);
+ if (!ASSERT_OK_FD(prog_fd, "bpf_program__fd"))
goto out_destroy_skel;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
@@ -641,6 +824,10 @@ void test_flow_dissector(void)
);
static struct bpf_flow_keys ctx = {};
+ snprintf(test_name, TEST_NAME_MAX_LEN, "%s-skb", tests[i].name);
+ if (!test__start_subtest(test_name))
+ continue;
+
if (tests[i].flags) {
topts.ctx_in = &ctx;
topts.ctx_size_in = sizeof(ctx);
@@ -656,26 +843,12 @@ void test_flow_dissector(void)
continue;
ASSERT_EQ(topts.data_size_out, sizeof(flow_keys),
"test_run data_size_out");
- CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
+ ASSERT_MEMEQ(&flow_keys, &tests[i].keys,
+ sizeof(struct bpf_flow_keys),
+ "returned flow keys");
}
- /* Do the same tests but for skb-less flow dissector.
- * We use a known path in the net/tun driver that calls
- * eth_get_headlen and we manually export bpf_flow_keys
- * via BPF map in this case.
- */
-
- tap_fd = create_tap("tap0");
- CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno);
- err = ifup("tap0");
- CHECK(err, "ifup", "err %d errno %d\n", err, errno);
-
- /* Test direct prog attachment */
- test_skb_less_prog_attach(skel, tap_fd);
- /* Test indirect prog attachment via link */
- test_skb_less_link_create(skel, tap_fd);
-
- close(tap_fd);
out_destroy_skel:
bpf_flow__destroy(skel);
}
+
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_classification.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_classification.c
new file mode 100644
index 000000000000..3729fbfd3084
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_classification.c
@@ -0,0 +1,792 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <bpf/bpf.h>
+#include <linux/bpf.h>
+#include <bpf/libbpf.h>
+#include <arpa/inet.h>
+#include <asm/byteorder.h>
+#include <netinet/udp.h>
+#include <poll.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "bpf_util.h"
+#include "bpf_flow.skel.h"
+
+#define CFG_PORT_INNER 8000
+#define CFG_PORT_GUE 6080
+#define SUBTEST_NAME_MAX_LEN 32
+#define TEST_NAME_MAX_LEN (32 + SUBTEST_NAME_MAX_LEN)
+#define MAX_SOURCE_PORTS 3
+#define TEST_PACKETS_COUNT 10
+#define TEST_PACKET_LEN 100
+#define TEST_PACKET_PATTERN 'a'
+#define TEST_IPV4 "192.168.0.1/32"
+#define TEST_IPV6 "100::a/128"
+#define TEST_TUNNEL_REMOTE "127.0.0.2"
+#define TEST_TUNNEL_LOCAL "127.0.0.1"
+
+#define INIT_ADDR4(addr4, port) \
+ { \
+ .sin_family = AF_INET, \
+ .sin_port = __constant_htons(port), \
+ .sin_addr.s_addr = __constant_htonl(addr4), \
+ }
+
+#define INIT_ADDR6(addr6, port) \
+ { \
+ .sin6_family = AF_INET6, \
+ .sin6_port = __constant_htons(port), \
+ .sin6_addr = addr6, \
+ }
+#define TEST_IN4_SRC_ADDR_DEFAULT INIT_ADDR4(INADDR_LOOPBACK + 2, 0)
+#define TEST_IN4_DST_ADDR_DEFAULT INIT_ADDR4(INADDR_LOOPBACK, CFG_PORT_INNER)
+#define TEST_OUT4_SRC_ADDR_DEFAULT INIT_ADDR4(INADDR_LOOPBACK + 1, 0)
+#define TEST_OUT4_DST_ADDR_DEFAULT INIT_ADDR4(INADDR_LOOPBACK, 0)
+
+#define TEST_IN6_SRC_ADDR_DEFAULT INIT_ADDR6(IN6ADDR_LOOPBACK_INIT, 0)
+#define TEST_IN6_DST_ADDR_DEFAULT \
+ INIT_ADDR6(IN6ADDR_LOOPBACK_INIT, CFG_PORT_INNER)
+#define TEST_OUT6_SRC_ADDR_DEFAULT INIT_ADDR6(IN6ADDR_LOOPBACK_INIT, 0)
+#define TEST_OUT6_DST_ADDR_DEFAULT INIT_ADDR6(IN6ADDR_LOOPBACK_INIT, 0)
+
+#define TEST_IN4_SRC_ADDR_DISSECT_CONTINUE INIT_ADDR4(INADDR_LOOPBACK + 126, 0)
+#define TEST_IN4_SRC_ADDR_IPIP INIT_ADDR4((in_addr_t)0x01010101, 0)
+#define TEST_IN4_DST_ADDR_IPIP INIT_ADDR4((in_addr_t)0xC0A80001, CFG_PORT_INNER)
+
+struct grehdr {
+ uint16_t unused;
+ uint16_t protocol;
+} __packed;
+
+struct guehdr {
+ union {
+ struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 hlen : 5, control : 1, version : 2;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 version : 2, control : 1, hlen : 5;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __u8 proto_ctype;
+ __be16 flags;
+ };
+ __be32 word;
+ };
+};
+
+static char buf[ETH_DATA_LEN];
+
+struct test_configuration {
+ char name[SUBTEST_NAME_MAX_LEN];
+ int (*test_setup)(void);
+ void (*test_teardown)(void);
+ int source_ports[MAX_SOURCE_PORTS];
+ int cfg_l3_inner;
+ struct sockaddr_in in_saddr4;
+ struct sockaddr_in in_daddr4;
+ struct sockaddr_in6 in_saddr6;
+ struct sockaddr_in6 in_daddr6;
+ int cfg_l3_outer;
+ struct sockaddr_in out_saddr4;
+ struct sockaddr_in out_daddr4;
+ struct sockaddr_in6 out_saddr6;
+ struct sockaddr_in6 out_daddr6;
+ int cfg_encap_proto;
+ uint8_t cfg_dsfield_inner;
+ uint8_t cfg_dsfield_outer;
+ int cfg_l3_extra;
+ struct sockaddr_in extra_saddr4;
+ struct sockaddr_in extra_daddr4;
+ struct sockaddr_in6 extra_saddr6;
+ struct sockaddr_in6 extra_daddr6;
+};
+
+static unsigned long util_gettime(void)
+{
+ struct timeval tv;
+
+ gettimeofday(&tv, NULL);
+ return (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
+}
+
+static void build_ipv4_header(void *header, uint8_t proto, uint32_t src,
+ uint32_t dst, int payload_len, uint8_t tos)
+{
+ struct iphdr *iph = header;
+
+ iph->ihl = 5;
+ iph->version = 4;
+ iph->tos = tos;
+ iph->ttl = 8;
+ iph->tot_len = htons(sizeof(*iph) + payload_len);
+ iph->id = htons(1337);
+ iph->protocol = proto;
+ iph->saddr = src;
+ iph->daddr = dst;
+ iph->check = build_ip_csum((void *)iph);
+}
+
+static void ipv6_set_dsfield(struct ipv6hdr *ip6h, uint8_t dsfield)
+{
+ uint16_t val, *ptr = (uint16_t *)ip6h;
+
+ val = ntohs(*ptr);
+ val &= 0xF00F;
+ val |= ((uint16_t)dsfield) << 4;
+ *ptr = htons(val);
+}
+
+static void build_ipv6_header(void *header, uint8_t proto,
+ const struct sockaddr_in6 *src,
+ const struct sockaddr_in6 *dst, int payload_len,
+ uint8_t dsfield)
+{
+ struct ipv6hdr *ip6h = header;
+
+ ip6h->version = 6;
+ ip6h->payload_len = htons(payload_len);
+ ip6h->nexthdr = proto;
+ ip6h->hop_limit = 8;
+ ipv6_set_dsfield(ip6h, dsfield);
+
+ memcpy(&ip6h->saddr, &src->sin6_addr, sizeof(ip6h->saddr));
+ memcpy(&ip6h->daddr, &dst->sin6_addr, sizeof(ip6h->daddr));
+}
+
+static void build_udp_header(void *header, int payload_len, uint16_t sport,
+ uint16_t dport, int family)
+{
+ struct udphdr *udph = header;
+ int len = sizeof(*udph) + payload_len;
+
+ udph->source = htons(sport);
+ udph->dest = htons(dport);
+ udph->len = htons(len);
+ udph->check = 0;
+ if (family == AF_INET)
+ udph->check = build_udp_v4_csum(header - sizeof(struct iphdr),
+ udph);
+ else
+ udph->check = build_udp_v6_csum(header - sizeof(struct ipv6hdr),
+ udph);
+}
+
+static void build_gue_header(void *header, uint8_t proto)
+{
+ struct guehdr *gueh = header;
+
+ gueh->proto_ctype = proto;
+}
+
+static void build_gre_header(void *header, uint16_t proto)
+{
+ struct grehdr *greh = header;
+
+ greh->protocol = htons(proto);
+}
+
+static int l3_length(int family)
+{
+ if (family == AF_INET)
+ return sizeof(struct iphdr);
+ else
+ return sizeof(struct ipv6hdr);
+}
+
+static int build_packet(const struct test_configuration *test, uint16_t sport)
+{
+ int ol3_len = 0, ol4_len = 0, il3_len = 0, il4_len = 0;
+ int el3_len = 0, packet_len;
+
+ memset(buf, 0, ETH_DATA_LEN);
+
+ if (test->cfg_l3_extra)
+ el3_len = l3_length(test->cfg_l3_extra);
+
+ /* calculate header offsets */
+ if (test->cfg_encap_proto) {
+ ol3_len = l3_length(test->cfg_l3_outer);
+
+ if (test->cfg_encap_proto == IPPROTO_GRE)
+ ol4_len = sizeof(struct grehdr);
+ else if (test->cfg_encap_proto == IPPROTO_UDP)
+ ol4_len = sizeof(struct udphdr) + sizeof(struct guehdr);
+ }
+
+ il3_len = l3_length(test->cfg_l3_inner);
+ il4_len = sizeof(struct udphdr);
+
+ packet_len = el3_len + ol3_len + ol4_len + il3_len + il4_len +
+ TEST_PACKET_LEN;
+ if (!ASSERT_LE(packet_len, sizeof(buf), "check packet size"))
+ return -1;
+
+ /*
+ * Fill packet from inside out, to calculate correct checksums.
+ * But create ip before udp headers, as udp uses ip for pseudo-sum.
+ */
+ memset(buf + el3_len + ol3_len + ol4_len + il3_len + il4_len,
+ TEST_PACKET_PATTERN, TEST_PACKET_LEN);
+
+ /* add zero byte for udp csum padding */
+ buf[el3_len + ol3_len + ol4_len + il3_len + il4_len + TEST_PACKET_LEN] =
+ 0;
+
+ switch (test->cfg_l3_inner) {
+ case PF_INET:
+ build_ipv4_header(buf + el3_len + ol3_len + ol4_len,
+ IPPROTO_UDP, test->in_saddr4.sin_addr.s_addr,
+ test->in_daddr4.sin_addr.s_addr,
+ il4_len + TEST_PACKET_LEN,
+ test->cfg_dsfield_inner);
+ break;
+ case PF_INET6:
+ build_ipv6_header(buf + el3_len + ol3_len + ol4_len,
+ IPPROTO_UDP, &test->in_saddr6,
+ &test->in_daddr6, il4_len + TEST_PACKET_LEN,
+ test->cfg_dsfield_inner);
+ break;
+ }
+
+ build_udp_header(buf + el3_len + ol3_len + ol4_len + il3_len,
+ TEST_PACKET_LEN, sport, CFG_PORT_INNER,
+ test->cfg_l3_inner);
+
+ if (!test->cfg_encap_proto)
+ return il3_len + il4_len + TEST_PACKET_LEN;
+
+ switch (test->cfg_l3_outer) {
+ case PF_INET:
+ build_ipv4_header(buf + el3_len, test->cfg_encap_proto,
+ test->out_saddr4.sin_addr.s_addr,
+ test->out_daddr4.sin_addr.s_addr,
+ ol4_len + il3_len + il4_len + TEST_PACKET_LEN,
+ test->cfg_dsfield_outer);
+ break;
+ case PF_INET6:
+ build_ipv6_header(buf + el3_len, test->cfg_encap_proto,
+ &test->out_saddr6, &test->out_daddr6,
+ ol4_len + il3_len + il4_len + TEST_PACKET_LEN,
+ test->cfg_dsfield_outer);
+ break;
+ }
+
+ switch (test->cfg_encap_proto) {
+ case IPPROTO_UDP:
+ build_gue_header(buf + el3_len + ol3_len + ol4_len -
+ sizeof(struct guehdr),
+ test->cfg_l3_inner == PF_INET ? IPPROTO_IPIP :
+ IPPROTO_IPV6);
+ build_udp_header(buf + el3_len + ol3_len,
+ sizeof(struct guehdr) + il3_len + il4_len +
+ TEST_PACKET_LEN,
+ sport, CFG_PORT_GUE, test->cfg_l3_outer);
+ break;
+ case IPPROTO_GRE:
+ build_gre_header(buf + el3_len + ol3_len,
+ test->cfg_l3_inner == PF_INET ? ETH_P_IP :
+ ETH_P_IPV6);
+ break;
+ }
+
+ switch (test->cfg_l3_extra) {
+ case PF_INET:
+ build_ipv4_header(buf,
+ test->cfg_l3_outer == PF_INET ? IPPROTO_IPIP :
+ IPPROTO_IPV6,
+ test->extra_saddr4.sin_addr.s_addr,
+ test->extra_daddr4.sin_addr.s_addr,
+ ol3_len + ol4_len + il3_len + il4_len +
+ TEST_PACKET_LEN,
+ 0);
+ break;
+ case PF_INET6:
+ build_ipv6_header(buf,
+ test->cfg_l3_outer == PF_INET ? IPPROTO_IPIP :
+ IPPROTO_IPV6,
+ &test->extra_saddr6, &test->extra_daddr6,
+ ol3_len + ol4_len + il3_len + il4_len +
+ TEST_PACKET_LEN,
+ 0);
+ break;
+ }
+
+ return el3_len + ol3_len + ol4_len + il3_len + il4_len +
+ TEST_PACKET_LEN;
+}
+
+/* sender transmits encapsulated over RAW or unencap'd over UDP */
+static int setup_tx(const struct test_configuration *test)
+{
+ int family, fd, ret;
+
+ if (test->cfg_l3_extra)
+ family = test->cfg_l3_extra;
+ else if (test->cfg_l3_outer)
+ family = test->cfg_l3_outer;
+ else
+ family = test->cfg_l3_inner;
+
+ fd = socket(family, SOCK_RAW, IPPROTO_RAW);
+ if (!ASSERT_OK_FD(fd, "setup tx socket"))
+ return fd;
+
+ if (test->cfg_l3_extra) {
+ if (test->cfg_l3_extra == PF_INET)
+ ret = connect(fd, (void *)&test->extra_daddr4,
+ sizeof(test->extra_daddr4));
+ else
+ ret = connect(fd, (void *)&test->extra_daddr6,
+ sizeof(test->extra_daddr6));
+ if (!ASSERT_OK(ret, "connect")) {
+ close(fd);
+ return ret;
+ }
+ } else if (test->cfg_l3_outer) {
+ /* connect to destination if not encapsulated */
+ if (test->cfg_l3_outer == PF_INET)
+ ret = connect(fd, (void *)&test->out_daddr4,
+ sizeof(test->out_daddr4));
+ else
+ ret = connect(fd, (void *)&test->out_daddr6,
+ sizeof(test->out_daddr6));
+ if (!ASSERT_OK(ret, "connect")) {
+ close(fd);
+ return ret;
+ }
+ } else {
+ /* otherwise using loopback */
+ if (test->cfg_l3_inner == PF_INET)
+ ret = connect(fd, (void *)&test->in_daddr4,
+ sizeof(test->in_daddr4));
+ else
+ ret = connect(fd, (void *)&test->in_daddr6,
+ sizeof(test->in_daddr6));
+ if (!ASSERT_OK(ret, "connect")) {
+ close(fd);
+ return ret;
+ }
+ }
+
+ return fd;
+}
+
+/* receiver reads unencapsulated UDP */
+static int setup_rx(const struct test_configuration *test)
+{
+ int fd, ret;
+
+ fd = socket(test->cfg_l3_inner, SOCK_DGRAM, 0);
+ if (!ASSERT_OK_FD(fd, "socket rx"))
+ return fd;
+
+ if (test->cfg_l3_inner == PF_INET)
+ ret = bind(fd, (void *)&test->in_daddr4,
+ sizeof(test->in_daddr4));
+ else
+ ret = bind(fd, (void *)&test->in_daddr6,
+ sizeof(test->in_daddr6));
+ if (!ASSERT_OK(ret, "bind rx")) {
+ close(fd);
+ return ret;
+ }
+
+ return fd;
+}
+
+static int do_tx(int fd, const char *pkt, int len)
+{
+ int ret;
+
+ ret = write(fd, pkt, len);
+ return ret != len;
+}
+
+static int do_poll(int fd, short events, int timeout)
+{
+ struct pollfd pfd;
+ int ret;
+
+ pfd.fd = fd;
+ pfd.events = events;
+
+ ret = poll(&pfd, 1, timeout);
+ return ret;
+}
+
+static int do_rx(int fd)
+{
+ char rbuf;
+ int ret, num = 0;
+
+ while (1) {
+ ret = recv(fd, &rbuf, 1, MSG_DONTWAIT);
+ if (ret == -1 && errno == EAGAIN)
+ break;
+ if (ret < 0)
+ return -1;
+ if (!ASSERT_EQ(rbuf, TEST_PACKET_PATTERN, "check pkt pattern"))
+ return -1;
+ num++;
+ }
+
+ return num;
+}
+
+static int run_test(const struct test_configuration *test,
+ int source_port_index)
+{
+ int fdt = -1, fdr = -1, len, tx = 0, rx = 0, err;
+ unsigned long tstop, tcur;
+
+ fdr = setup_rx(test);
+ fdt = setup_tx(test);
+ if (!ASSERT_OK_FD(fdr, "setup rx") || !ASSERT_OK_FD(fdt, "setup tx")) {
+ err = -1;
+ goto out_close_sockets;
+ }
+
+ len = build_packet(test,
+ (uint16_t)test->source_ports[source_port_index]);
+ if (!ASSERT_GT(len, 0, "build test packet"))
+ return -1;
+
+ tcur = util_gettime();
+ tstop = tcur;
+
+ while (tx < TEST_PACKETS_COUNT) {
+ if (!ASSERT_OK(do_tx(fdt, buf, len), "do_tx"))
+ break;
+ tx++;
+ err = do_rx(fdr);
+ if (!ASSERT_GE(err, 0, "do_rx"))
+ break;
+ rx += err;
+ }
+
+ /* read straggler packets, if any */
+ if (rx < tx) {
+ tstop = util_gettime() + 100;
+ while (rx < tx) {
+ tcur = util_gettime();
+ if (tcur >= tstop)
+ break;
+
+ err = do_poll(fdr, POLLIN, tstop - tcur);
+ if (err < 0)
+ break;
+ err = do_rx(fdr);
+ if (err >= 0)
+ rx += err;
+ }
+ }
+
+out_close_sockets:
+ close(fdt);
+ close(fdr);
+ return rx;
+}
+
+static int attach_and_configure_program(struct bpf_flow *skel)
+{
+ struct bpf_map *prog_array = skel->maps.jmp_table;
+ int main_prog_fd, sub_prog_fd, map_fd, i, err;
+ struct bpf_program *prog;
+ char prog_name[32];
+
+ main_prog_fd = bpf_program__fd(skel->progs._dissect);
+ if (main_prog_fd < 0)
+ return main_prog_fd;
+
+ err = bpf_prog_attach(main_prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
+ if (err)
+ return err;
+
+ map_fd = bpf_map__fd(prog_array);
+ if (map_fd < 0)
+ return map_fd;
+
+ for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
+ snprintf(prog_name, sizeof(prog_name), "flow_dissector_%d", i);
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!prog)
+ return -1;
+
+ sub_prog_fd = bpf_program__fd(prog);
+ if (sub_prog_fd < 0)
+ return -1;
+
+ err = bpf_map_update_elem(map_fd, &i, &sub_prog_fd, BPF_ANY);
+ if (err)
+ return -1;
+ }
+
+ return main_prog_fd;
+}
+
+static void detach_program(struct bpf_flow *skel, int prog_fd)
+{
+ bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
+}
+
+static int set_port_drop(int pf, bool multi_port)
+{
+ SYS(fail, "tc qdisc add dev lo ingress");
+ SYS(fail_delete_qdisc, "tc filter add %s %s %s %s %s %s %s %s %s %s",
+ "dev lo",
+ "parent FFFF:",
+ "protocol", pf == PF_INET6 ? "ipv6" : "ip",
+ "pref 1337",
+ "flower",
+ "ip_proto udp",
+ "src_port", multi_port ? "8-10" : "9",
+ "action drop");
+ return 0;
+
+fail_delete_qdisc:
+ SYS_NOFAIL("tc qdisc del dev lo ingress");
+fail:
+ return 1;
+}
+
+static void remove_filter(void)
+{
+ SYS_NOFAIL("tc filter del dev lo ingress");
+ SYS_NOFAIL("tc qdisc del dev lo ingress");
+}
+
+static int ipv4_setup(void)
+{
+ return set_port_drop(PF_INET, false);
+}
+
+static int ipv6_setup(void)
+{
+ return set_port_drop(PF_INET6, false);
+}
+
+static int port_range_setup(void)
+{
+ return set_port_drop(PF_INET, true);
+}
+
+static int set_addresses(void)
+{
+ SYS(out, "ip -4 addr add %s dev lo", TEST_IPV4);
+ SYS(out_remove_ipv4, "ip -6 addr add %s dev lo", TEST_IPV6);
+ return 0;
+out_remove_ipv4:
+ SYS_NOFAIL("ip -4 addr del %s dev lo", TEST_IPV4);
+out:
+ return -1;
+}
+
+static void unset_addresses(void)
+{
+ SYS_NOFAIL("ip -4 addr del %s dev lo", TEST_IPV4);
+ SYS_NOFAIL("ip -6 addr del %s dev lo", TEST_IPV6);
+}
+
+static int ipip_setup(void)
+{
+ if (!ASSERT_OK(set_addresses(), "configure addresses"))
+ return -1;
+ if (!ASSERT_OK(set_port_drop(PF_INET, false), "set filter"))
+ goto out_unset_addresses;
+ SYS(out_remove_filter,
+ "ip link add ipip_test type ipip remote %s local %s dev lo",
+ TEST_TUNNEL_REMOTE, TEST_TUNNEL_LOCAL);
+ SYS(out_clean_netif, "ip link set ipip_test up");
+ return 0;
+
+out_clean_netif:
+ SYS_NOFAIL("ip link del ipip_test");
+out_remove_filter:
+ remove_filter();
+out_unset_addresses:
+ unset_addresses();
+ return -1;
+}
+
+static void ipip_shutdown(void)
+{
+ SYS_NOFAIL("ip link del ipip_test");
+ remove_filter();
+ unset_addresses();
+}
+
+static int gre_setup(void)
+{
+ if (!ASSERT_OK(set_addresses(), "configure addresses"))
+ return -1;
+ if (!ASSERT_OK(set_port_drop(PF_INET, false), "set filter"))
+ goto out_unset_addresses;
+ SYS(out_remove_filter,
+ "ip link add gre_test type gre remote %s local %s dev lo",
+ TEST_TUNNEL_REMOTE, TEST_TUNNEL_LOCAL);
+ SYS(out_clean_netif, "ip link set gre_test up");
+ return 0;
+
+out_clean_netif:
+ SYS_NOFAIL("ip link del ipip_test");
+out_remove_filter:
+ remove_filter();
+out_unset_addresses:
+ unset_addresses();
+ return -1;
+}
+
+static void gre_shutdown(void)
+{
+ SYS_NOFAIL("ip link del gre_test");
+ remove_filter();
+ unset_addresses();
+}
+
+static const struct test_configuration tests_input[] = {
+ {
+ .name = "ipv4",
+ .test_setup = ipv4_setup,
+ .test_teardown = remove_filter,
+ .source_ports = { 8, 9, 10 },
+ .cfg_l3_inner = PF_INET,
+ .in_saddr4 = TEST_IN4_SRC_ADDR_DEFAULT,
+ .in_daddr4 = TEST_IN4_DST_ADDR_DEFAULT
+ },
+ {
+ .name = "ipv4_continue_dissect",
+ .test_setup = ipv4_setup,
+ .test_teardown = remove_filter,
+ .source_ports = { 8, 9, 10 },
+ .cfg_l3_inner = PF_INET,
+ .in_saddr4 = TEST_IN4_SRC_ADDR_DISSECT_CONTINUE,
+ .in_daddr4 = TEST_IN4_DST_ADDR_DEFAULT },
+ {
+ .name = "ipip",
+ .test_setup = ipip_setup,
+ .test_teardown = ipip_shutdown,
+ .source_ports = { 8, 9, 10 },
+ .cfg_l3_inner = PF_INET,
+ .in_saddr4 = TEST_IN4_SRC_ADDR_IPIP,
+ .in_daddr4 = TEST_IN4_DST_ADDR_IPIP,
+ .out_saddr4 = TEST_OUT4_SRC_ADDR_DEFAULT,
+ .out_daddr4 = TEST_OUT4_DST_ADDR_DEFAULT,
+ .cfg_l3_outer = PF_INET,
+ .cfg_encap_proto = IPPROTO_IPIP,
+
+ },
+ {
+ .name = "gre",
+ .test_setup = gre_setup,
+ .test_teardown = gre_shutdown,
+ .source_ports = { 8, 9, 10 },
+ .cfg_l3_inner = PF_INET,
+ .in_saddr4 = TEST_IN4_SRC_ADDR_IPIP,
+ .in_daddr4 = TEST_IN4_DST_ADDR_IPIP,
+ .out_saddr4 = TEST_OUT4_SRC_ADDR_DEFAULT,
+ .out_daddr4 = TEST_OUT4_DST_ADDR_DEFAULT,
+ .cfg_l3_outer = PF_INET,
+ .cfg_encap_proto = IPPROTO_GRE,
+ },
+ {
+ .name = "port_range",
+ .test_setup = port_range_setup,
+ .test_teardown = remove_filter,
+ .source_ports = { 7, 9, 11 },
+ .cfg_l3_inner = PF_INET,
+ .in_saddr4 = TEST_IN4_SRC_ADDR_DEFAULT,
+ .in_daddr4 = TEST_IN4_DST_ADDR_DEFAULT },
+ {
+ .name = "ipv6",
+ .test_setup = ipv6_setup,
+ .test_teardown = remove_filter,
+ .source_ports = { 8, 9, 10 },
+ .cfg_l3_inner = PF_INET6,
+ .in_saddr6 = TEST_IN6_SRC_ADDR_DEFAULT,
+ .in_daddr6 = TEST_IN6_DST_ADDR_DEFAULT
+ },
+};
+
+struct test_ctx {
+ struct bpf_flow *skel;
+ struct netns_obj *ns;
+ int prog_fd;
+};
+
+static int test_global_init(struct test_ctx *ctx)
+{
+ int err;
+
+ ctx->skel = bpf_flow__open_and_load();
+ if (!ASSERT_OK_PTR(ctx->skel, "open and load flow_dissector"))
+ return -1;
+
+ ctx->ns = netns_new("flow_dissector_classification", true);
+ if (!ASSERT_OK_PTR(ctx->ns, "switch ns"))
+ goto out_destroy_skel;
+
+ err = write_sysctl("/proc/sys/net/ipv4/conf/default/rp_filter", "0");
+ err |= write_sysctl("/proc/sys/net/ipv4/conf/all/rp_filter", "0");
+ err |= write_sysctl("/proc/sys/net/ipv4/conf/lo/rp_filter", "0");
+ if (!ASSERT_OK(err, "configure net tunables"))
+ goto out_clean_ns;
+
+ ctx->prog_fd = attach_and_configure_program(ctx->skel);
+ if (!ASSERT_OK_FD(ctx->prog_fd, "attach and configure program"))
+ goto out_clean_ns;
+ return 0;
+out_clean_ns:
+ netns_free(ctx->ns);
+out_destroy_skel:
+ bpf_flow__destroy(ctx->skel);
+ return -1;
+}
+
+static void test_global_shutdown(struct test_ctx *ctx)
+{
+ detach_program(ctx->skel, ctx->prog_fd);
+ netns_free(ctx->ns);
+ bpf_flow__destroy(ctx->skel);
+}
+
+void test_flow_dissector_classification(void)
+{
+ struct test_ctx ctx;
+ const struct test_configuration *test;
+ int i;
+
+ if (test_global_init(&ctx))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(tests_input); i++) {
+ if (!test__start_subtest(tests_input[i].name))
+ continue;
+ test = &tests_input[i];
+ /* All tests are expected to have one rx-ok port first,
+ * then a non-working rx port, and finally a rx-ok port
+ */
+ if (test->test_setup &&
+ !ASSERT_OK(test->test_setup(), "init filter"))
+ continue;
+
+ ASSERT_EQ(run_test(test, 0), TEST_PACKETS_COUNT,
+ "test first port");
+ ASSERT_EQ(run_test(test, 1), 0, "test second port");
+ ASSERT_EQ(run_test(test, 2), TEST_PACKETS_COUNT,
+ "test third port");
+ if (test->test_teardown)
+ test->test_teardown();
+ }
+ test_global_shutdown(&ctx);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/free_timer.c b/tools/testing/selftests/bpf/prog_tests/free_timer.c
new file mode 100644
index 000000000000..b7b77a6b2979
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/free_timer.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025. Huawei Technologies Co., Ltd */
+#define _GNU_SOURCE
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <test_progs.h>
+
+#include "free_timer.skel.h"
+
+struct run_ctx {
+ struct bpf_program *start_prog;
+ struct bpf_program *overwrite_prog;
+ pthread_barrier_t notify;
+ int loop;
+ bool start;
+ bool stop;
+};
+
+static void start_threads(struct run_ctx *ctx)
+{
+ ctx->start = true;
+}
+
+static void stop_threads(struct run_ctx *ctx)
+{
+ ctx->stop = true;
+ /* Guarantee the order between ->stop and ->start */
+ __atomic_store_n(&ctx->start, true, __ATOMIC_RELEASE);
+}
+
+static int wait_for_start(struct run_ctx *ctx)
+{
+ while (!__atomic_load_n(&ctx->start, __ATOMIC_ACQUIRE))
+ usleep(10);
+
+ return ctx->stop;
+}
+
+static void *overwrite_timer_fn(void *arg)
+{
+ struct run_ctx *ctx = arg;
+ int loop, fd, err;
+ cpu_set_t cpuset;
+ long ret = 0;
+
+ /* Pin on CPU 0 */
+ CPU_ZERO(&cpuset);
+ CPU_SET(0, &cpuset);
+ pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
+
+ /* Is the thread being stopped ? */
+ err = wait_for_start(ctx);
+ if (err)
+ return NULL;
+
+ fd = bpf_program__fd(ctx->overwrite_prog);
+ loop = ctx->loop;
+ while (loop-- > 0) {
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+
+ /* Wait for start thread to complete */
+ pthread_barrier_wait(&ctx->notify);
+
+ /* Overwrite timers */
+ err = bpf_prog_test_run_opts(fd, &opts);
+ if (err)
+ ret |= 1;
+ else if (opts.retval)
+ ret |= 2;
+
+ /* Notify start thread to start timers */
+ pthread_barrier_wait(&ctx->notify);
+ }
+
+ return (void *)ret;
+}
+
+static void *start_timer_fn(void *arg)
+{
+ struct run_ctx *ctx = arg;
+ int loop, fd, err;
+ cpu_set_t cpuset;
+ long ret = 0;
+
+ /* Pin on CPU 1 */
+ CPU_ZERO(&cpuset);
+ CPU_SET(1, &cpuset);
+ pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
+
+ /* Is the thread being stopped ? */
+ err = wait_for_start(ctx);
+ if (err)
+ return NULL;
+
+ fd = bpf_program__fd(ctx->start_prog);
+ loop = ctx->loop;
+ while (loop-- > 0) {
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+
+ /* Run the prog to start timer */
+ err = bpf_prog_test_run_opts(fd, &opts);
+ if (err)
+ ret |= 4;
+ else if (opts.retval)
+ ret |= 8;
+
+ /* Notify overwrite thread to do overwrite */
+ pthread_barrier_wait(&ctx->notify);
+
+ /* Wait for overwrite thread to complete */
+ pthread_barrier_wait(&ctx->notify);
+ }
+
+ return (void *)ret;
+}
+
+void test_free_timer(void)
+{
+ struct free_timer *skel;
+ struct bpf_program *prog;
+ struct run_ctx ctx;
+ pthread_t tid[2];
+ void *ret;
+ int err;
+
+ skel = free_timer__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_load"))
+ return;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ prog = bpf_object__find_program_by_name(skel->obj, "start_timer");
+ if (!ASSERT_OK_PTR(prog, "find start prog"))
+ goto out;
+ ctx.start_prog = prog;
+
+ prog = bpf_object__find_program_by_name(skel->obj, "overwrite_timer");
+ if (!ASSERT_OK_PTR(prog, "find overwrite prog"))
+ goto out;
+ ctx.overwrite_prog = prog;
+
+ pthread_barrier_init(&ctx.notify, NULL, 2);
+ ctx.loop = 10;
+
+ err = pthread_create(&tid[0], NULL, start_timer_fn, &ctx);
+ if (!ASSERT_OK(err, "create start_timer"))
+ goto out;
+
+ err = pthread_create(&tid[1], NULL, overwrite_timer_fn, &ctx);
+ if (!ASSERT_OK(err, "create overwrite_timer")) {
+ stop_threads(&ctx);
+ goto out;
+ }
+
+ start_threads(&ctx);
+
+ ret = NULL;
+ err = pthread_join(tid[0], &ret);
+ ASSERT_EQ(err | (long)ret, 0, "start_timer");
+ ret = NULL;
+ err = pthread_join(tid[1], &ret);
+ ASSERT_EQ(err | (long)ret, 0, "overwrite_timer");
+out:
+ free_timer__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
index 66ab1cae923e..e19ef509ebf8 100644
--- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
@@ -397,6 +397,31 @@ cleanup:
kprobe_multi_session_cookie__destroy(skel);
}
+static void test_unique_match(void)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ struct kprobe_multi *skel = NULL;
+ struct bpf_link *link = NULL;
+
+ skel = kprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "kprobe_multi__open_and_load"))
+ return;
+
+ opts.unique_match = true;
+ skel->bss->pid = getpid();
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
+ "bpf_fentry_test*", &opts);
+ if (!ASSERT_ERR_PTR(link, "bpf_program__attach_kprobe_multi_opts"))
+ bpf_link__destroy(link);
+
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
+ "bpf_fentry_test8*", &opts);
+ if (ASSERT_OK_PTR(link, "bpf_program__attach_kprobe_multi_opts"))
+ bpf_link__destroy(link);
+
+ kprobe_multi__destroy(skel);
+}
+
static size_t symbol_hash(long key, void *ctx __maybe_unused)
{
return str_hash((const char *) key);
@@ -765,5 +790,7 @@ void test_kprobe_multi_test(void)
test_session_skel_api();
if (test__start_subtest("session_cookie"))
test_session_cookie_skel_api();
+ if (test__start_subtest("unique_match"))
+ test_unique_match();
RUN_TESTS(kprobe_multi_verifier);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/missed.c b/tools/testing/selftests/bpf/prog_tests/missed.c
index 70d90c43537c..ed8857ae914a 100644
--- a/tools/testing/selftests/bpf/prog_tests/missed.c
+++ b/tools/testing/selftests/bpf/prog_tests/missed.c
@@ -85,6 +85,7 @@ static void test_missed_kprobe_recursion(void)
ASSERT_GE(get_missed_count(bpf_program__fd(skel->progs.test3)), 1, "test3_recursion_misses");
ASSERT_GE(get_missed_count(bpf_program__fd(skel->progs.test4)), 1, "test4_recursion_misses");
ASSERT_GE(get_missed_count(bpf_program__fd(skel->progs.test5)), 1, "test5_recursion_misses");
+ ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test6)), 1, "test6_recursion_misses");
cleanup:
missed_kprobe_recursion__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
index 05d0e07da394..ba6b3ec1156a 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
@@ -2,7 +2,7 @@
#include <test_progs.h>
#include "cgroup_helpers.h"
-#include <linux/tcp.h>
+#include <netinet/tcp.h>
#include <linux/netlink.h>
#include "sockopt_sk.skel.h"
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
index 3ee40ee9413a..8a0e1ff8a2dc 100644
--- a/tools/testing/selftests/bpf/prog_tests/verifier.c
+++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
@@ -52,6 +52,8 @@
#include "verifier_map_ptr_mixing.skel.h"
#include "verifier_map_ret_val.skel.h"
#include "verifier_masking.skel.h"
+#include "verifier_may_goto_1.skel.h"
+#include "verifier_may_goto_2.skel.h"
#include "verifier_meta_access.skel.h"
#include "verifier_movsx.skel.h"
#include "verifier_mtu.skel.h"
@@ -98,6 +100,7 @@
#include "verifier_xdp_direct_packet_access.skel.h"
#include "verifier_bits_iter.skel.h"
#include "verifier_lsm.skel.h"
+#include "irq.skel.h"
#define MAX_ENTRIES 11
@@ -181,6 +184,8 @@ void test_verifier_map_ptr(void) { RUN(verifier_map_ptr); }
void test_verifier_map_ptr_mixing(void) { RUN(verifier_map_ptr_mixing); }
void test_verifier_map_ret_val(void) { RUN(verifier_map_ret_val); }
void test_verifier_masking(void) { RUN(verifier_masking); }
+void test_verifier_may_goto_1(void) { RUN(verifier_may_goto_1); }
+void test_verifier_may_goto_2(void) { RUN(verifier_may_goto_2); }
void test_verifier_meta_access(void) { RUN(verifier_meta_access); }
void test_verifier_movsx(void) { RUN(verifier_movsx); }
void test_verifier_netfilter_ctx(void) { RUN(verifier_netfilter_ctx); }
@@ -225,6 +230,7 @@ void test_verifier_xdp(void) { RUN(verifier_xdp); }
void test_verifier_xdp_direct_packet_access(void) { RUN(verifier_xdp_direct_packet_access); }
void test_verifier_bits_iter(void) { RUN(verifier_bits_iter); }
void test_verifier_lsm(void) { RUN(verifier_lsm); }
+void test_irq(void) { RUN(irq); }
void test_verifier_mtu(void) { RUN(verifier_mtu); }
static int init_test_val_map(struct bpf_object *obj, char *map_name)
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
index 53d6ad8c2257..b2b2d85dbb1b 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
@@ -82,6 +82,8 @@ static void test_xdp_adjust_tail_grow2(void)
/* SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) */
#if defined(__s390x__)
int tailroom = 512;
+#elif defined(__powerpc__)
+ int tailroom = 384;
#else
int tailroom = 320;
#endif
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c
index 6d8b54124cb3..fb952703653e 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c
@@ -17,7 +17,7 @@
#include "network_helpers.h"
#include <linux/if_bonding.h>
#include <linux/limits.h>
-#include <linux/udp.h>
+#include <netinet/udp.h>
#include <uapi/linux/netdev.h>
#include "xdp_dummy.skel.h"
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
index bad0ea167be7..7dac044664ac 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
@@ -7,10 +7,11 @@
#include <linux/if_link.h>
#include <linux/ipv6.h>
#include <linux/in6.h>
-#include <linux/udp.h>
+#include <netinet/udp.h>
#include <bpf/bpf_endian.h>
#include <uapi/linux/netdev.h>
#include "test_xdp_do_redirect.skel.h"
+#include "xdp_dummy.skel.h"
struct udp_packet {
struct ethhdr eth;
@@ -246,3 +247,166 @@ out:
SYS_NOFAIL("ip netns del testns");
test_xdp_do_redirect__destroy(skel);
}
+
+#define NS_NB 3
+#define NS0 "NS0"
+#define NS1 "NS1"
+#define NS2 "NS2"
+#define IPV4_NETWORK "10.1.1"
+#define VETH1_INDEX 111
+#define VETH2_INDEX 222
+
+struct test_data {
+ struct netns_obj *ns[NS_NB];
+ u32 xdp_flags;
+};
+
+static void cleanup(struct test_data *data)
+{
+ int i;
+
+ for (i = 0; i < NS_NB; i++)
+ netns_free(data->ns[i]);
+}
+
+/**
+ * ping_setup -
+ * Create two veth peers and forward packets in-between using XDP
+ *
+ * ------------ ------------
+ * | NS1 | | NS2 |
+ * | veth0 | | veth0 |
+ * | 10.1.1.1 | | 10.1.1.2 |
+ * -----|------ ------|-----
+ * | |
+ * | |
+ * -----|-----------------------|-------
+ * | veth1 veth2 |
+ * | (id:111) (id:222) |
+ * | | | |
+ * | ----- xdp forwarding ----- |
+ * | |
+ * | NS0 |
+ * -------------------------------------
+ */
+static int ping_setup(struct test_data *data)
+{
+ int i;
+
+ data->ns[0] = netns_new(NS0, false);
+ if (!ASSERT_OK_PTR(data->ns[0], "create ns"))
+ return -1;
+
+ for (i = 1; i < NS_NB; i++) {
+ char ns_name[4] = {};
+
+ snprintf(ns_name, 4, "NS%d", i);
+ data->ns[i] = netns_new(ns_name, false);
+ if (!ASSERT_OK_PTR(data->ns[i], "create ns"))
+ goto fail;
+
+ SYS(fail,
+ "ip -n %s link add veth%d index %d%d%d type veth peer name veth0 netns %s",
+ NS0, i, i, i, i, ns_name);
+ SYS(fail, "ip -n %s link set veth%d up", NS0, i);
+
+ SYS(fail, "ip -n %s addr add %s.%d/24 dev veth0", ns_name, IPV4_NETWORK, i);
+ SYS(fail, "ip -n %s link set veth0 up", ns_name);
+ }
+
+ return 0;
+
+fail:
+ cleanup(data);
+ return -1;
+}
+
+static void ping_test(struct test_data *data)
+{
+ struct test_xdp_do_redirect *skel = NULL;
+ struct xdp_dummy *skel_dummy = NULL;
+ struct nstoken *nstoken = NULL;
+ int i, ret;
+
+ skel_dummy = xdp_dummy__open_and_load();
+ if (!ASSERT_OK_PTR(skel_dummy, "open and load xdp_dummy skeleton"))
+ goto close;
+
+ for (i = 1; i < NS_NB; i++) {
+ char ns_name[4] = {};
+
+ snprintf(ns_name, 4, "NS%d", i);
+ nstoken = open_netns(ns_name);
+ if (!ASSERT_OK_PTR(nstoken, "open ns"))
+ goto close;
+
+ ret = bpf_xdp_attach(if_nametoindex("veth0"),
+ bpf_program__fd(skel_dummy->progs.xdp_dummy_prog),
+ data->xdp_flags, NULL);
+ if (!ASSERT_GE(ret, 0, "bpf_xdp_attach dummy_prog"))
+ goto close;
+
+ close_netns(nstoken);
+ nstoken = NULL;
+ }
+
+ skel = test_xdp_do_redirect__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open and load skeleton"))
+ goto close;
+
+ nstoken = open_netns(NS0);
+ if (!ASSERT_OK_PTR(nstoken, "open NS0"))
+ goto close;
+
+ ret = bpf_xdp_attach(VETH2_INDEX,
+ bpf_program__fd(skel->progs.xdp_redirect_to_111),
+ data->xdp_flags, NULL);
+ if (!ASSERT_GE(ret, 0, "bpf_xdp_attach"))
+ goto close;
+
+ ret = bpf_xdp_attach(VETH1_INDEX,
+ bpf_program__fd(skel->progs.xdp_redirect_to_222),
+ data->xdp_flags, NULL);
+ if (!ASSERT_GE(ret, 0, "bpf_xdp_attach"))
+ goto close;
+
+ close_netns(nstoken);
+ nstoken = NULL;
+
+ nstoken = open_netns(NS1);
+ if (!ASSERT_OK_PTR(nstoken, "open NS1"))
+ goto close;
+
+ SYS(close, "ping -c 1 %s.2 > /dev/null", IPV4_NETWORK);
+
+close:
+ close_netns(nstoken);
+ xdp_dummy__destroy(skel_dummy);
+ test_xdp_do_redirect__destroy(skel);
+}
+
+
+static void xdp_redirect_ping(u32 xdp_flags)
+{
+ struct test_data data = {};
+
+ if (ping_setup(&data) < 0)
+ return;
+
+ data.xdp_flags = xdp_flags;
+ ping_test(&data);
+ cleanup(&data);
+}
+
+void test_xdp_index_redirect(void)
+{
+ if (test__start_subtest("noflag"))
+ xdp_redirect_ping(0);
+
+ if (test__start_subtest("drvflag"))
+ xdp_redirect_ping(XDP_FLAGS_DRV_MODE);
+
+ if (test__start_subtest("skbflag"))
+ xdp_redirect_ping(XDP_FLAGS_SKB_MODE);
+}
+
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c b/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c
index e1bf141d3401..3f9146d83d79 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c
@@ -3,7 +3,7 @@
#include <network_helpers.h>
#include <bpf/btf.h>
#include <linux/if_link.h>
-#include <linux/udp.h>
+#include <netinet/udp.h>
#include <net/if.h>
#include <unistd.h>
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c b/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c
index c87ee2bf558c..3d47878ef6bf 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c
@@ -10,7 +10,7 @@
#include <linux/errqueue.h>
#include <linux/if_link.h>
#include <linux/net_tstamp.h>
-#include <linux/udp.h>
+#include <netinet/udp.h>
#include <sys/mman.h>
#include <net/if.h>
#include <poll.h>
@@ -133,23 +133,6 @@ static void close_xsk(struct xsk *xsk)
munmap(xsk->umem_area, UMEM_SIZE);
}
-static void ip_csum(struct iphdr *iph)
-{
- __u32 sum = 0;
- __u16 *p;
- int i;
-
- iph->check = 0;
- p = (void *)iph;
- for (i = 0; i < sizeof(*iph) / sizeof(*p); i++)
- sum += p[i];
-
- while (sum >> 16)
- sum = (sum & 0xffff) + (sum >> 16);
-
- iph->check = ~sum;
-}
-
static int generate_packet(struct xsk *xsk, __u16 dst_port)
{
struct xsk_tx_metadata *meta;
@@ -192,7 +175,7 @@ static int generate_packet(struct xsk *xsk, __u16 dst_port)
iph->protocol = IPPROTO_UDP;
ASSERT_EQ(inet_pton(FAMILY, TX_ADDR, &iph->saddr), 1, "inet_pton(TX_ADDR)");
ASSERT_EQ(inet_pton(FAMILY, RX_ADDR, &iph->daddr), 1, "inet_pton(RX_ADDR)");
- ip_csum(iph);
+ iph->check = build_ip_csum(iph);
udph->source = htons(UDP_SOURCE_PORT);
udph->dest = htons(dst_port);
diff --git a/tools/testing/selftests/bpf/progs/bad_struct_ops.c b/tools/testing/selftests/bpf/progs/bad_struct_ops.c
index b7e175cd0af0..b3f77b4561c8 100644
--- a/tools/testing/selftests/bpf/progs/bad_struct_ops.c
+++ b/tools/testing/selftests/bpf/progs/bad_struct_ops.c
@@ -3,7 +3,7 @@
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include "../bpf_testmod/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/cb_refs.c b/tools/testing/selftests/bpf/progs/cb_refs.c
index 56c764df8196..5d6fc7f01ebb 100644
--- a/tools/testing/selftests/bpf/progs/cb_refs.c
+++ b/tools/testing/selftests/bpf/progs/cb_refs.c
@@ -2,7 +2,7 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
-#include "../bpf_testmod/bpf_testmod_kfunc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
struct map_value {
struct prog_test_ref_kfunc __kptr *ptr;
diff --git a/tools/testing/selftests/bpf/progs/cgroup_skb_direct_packet_access.c b/tools/testing/selftests/bpf/progs/cgroup_skb_direct_packet_access.c
new file mode 100644
index 000000000000..e32b07d802bb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_skb_direct_packet_access.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+__u32 data_end;
+
+SEC("cgroup_skb/ingress")
+int direct_packet_access(struct __sk_buff *skb)
+{
+ data_end = skb->data_end;
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c
index dfd817d0348c..bd8f15229f5c 100644
--- a/tools/testing/selftests/bpf/progs/dynptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c
@@ -192,7 +192,7 @@ done:
/* Can't add a dynptr to a map */
SEC("?raw_tp")
-__failure __msg("invalid indirect read from stack")
+__failure __msg("invalid read from stack")
int add_dynptr_to_map1(void *ctx)
{
struct bpf_dynptr ptr;
@@ -210,7 +210,7 @@ int add_dynptr_to_map1(void *ctx)
/* Can't add a struct with an embedded dynptr to a map */
SEC("?raw_tp")
-__failure __msg("invalid indirect read from stack")
+__failure __msg("invalid read from stack")
int add_dynptr_to_map2(void *ctx)
{
struct test_info x;
@@ -398,7 +398,7 @@ int data_slice_missing_null_check2(void *ctx)
* dynptr argument
*/
SEC("?raw_tp")
-__failure __msg("invalid indirect read from stack")
+__failure __msg("invalid read from stack")
int invalid_helper1(void *ctx)
{
struct bpf_dynptr ptr;
diff --git a/tools/testing/selftests/bpf/progs/epilogue_exit.c b/tools/testing/selftests/bpf/progs/epilogue_exit.c
index 33d3a57bee90..35fec7c75bef 100644
--- a/tools/testing/selftests/bpf/progs/epilogue_exit.c
+++ b/tools/testing/selftests/bpf/progs/epilogue_exit.c
@@ -4,8 +4,8 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
-#include "../bpf_testmod/bpf_testmod.h"
-#include "../bpf_testmod/bpf_testmod_kfunc.h"
+#include "../test_kmods/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/epilogue_tailcall.c b/tools/testing/selftests/bpf/progs/epilogue_tailcall.c
index 7275dd594de0..153514691ba4 100644
--- a/tools/testing/selftests/bpf/progs/epilogue_tailcall.c
+++ b/tools/testing/selftests/bpf/progs/epilogue_tailcall.c
@@ -4,8 +4,8 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
-#include "../bpf_testmod/bpf_testmod.h"
-#include "../bpf_testmod/bpf_testmod_kfunc.h"
+#include "../test_kmods/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/exceptions_fail.c b/tools/testing/selftests/bpf/progs/exceptions_fail.c
index fe0f3fa5aab6..8a0fdff89927 100644
--- a/tools/testing/selftests/bpf/progs/exceptions_fail.c
+++ b/tools/testing/selftests/bpf/progs/exceptions_fail.c
@@ -131,7 +131,7 @@ int reject_subprog_with_lock(void *ctx)
}
SEC("?tc")
-__failure __msg("BPF_EXIT instruction cannot be used inside bpf_rcu_read_lock-ed region")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_rcu_read_lock-ed region")
int reject_with_rcu_read_lock(void *ctx)
{
bpf_rcu_read_lock();
@@ -147,7 +147,7 @@ __noinline static int throwing_subprog(struct __sk_buff *ctx)
}
SEC("?tc")
-__failure __msg("BPF_EXIT instruction cannot be used inside bpf_rcu_read_lock-ed region")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_rcu_read_lock-ed region")
int reject_subprog_with_rcu_read_lock(void *ctx)
{
bpf_rcu_read_lock();
diff --git a/tools/testing/selftests/bpf/progs/find_vma.c b/tools/testing/selftests/bpf/progs/find_vma.c
index 38034fb82530..02b82774469c 100644
--- a/tools/testing/selftests/bpf/progs/find_vma.c
+++ b/tools/testing/selftests/bpf/progs/find_vma.c
@@ -25,7 +25,7 @@ static long check_vma(struct task_struct *task, struct vm_area_struct *vma,
{
if (vma->vm_file)
bpf_probe_read_kernel_str(d_iname, DNAME_INLINE_LEN - 1,
- vma->vm_file->f_path.dentry->d_iname);
+ vma->vm_file->f_path.dentry->d_shortname.string);
/* check for VM_EXEC */
if (vma->vm_flags & VM_EXEC)
diff --git a/tools/testing/selftests/bpf/progs/free_timer.c b/tools/testing/selftests/bpf/progs/free_timer.c
new file mode 100644
index 000000000000..4501ae8fc414
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/free_timer.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025. Huawei Technologies Co., Ltd */
+#include <linux/bpf.h>
+#include <time.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#define MAX_ENTRIES 8
+
+struct map_value {
+ struct bpf_timer timer;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, MAX_ENTRIES);
+} map SEC(".maps");
+
+static int timer_cb(void *map, void *key, struct map_value *value)
+{
+ volatile int sum = 0;
+ int i;
+
+ bpf_for(i, 0, 1024 * 1024) sum += i;
+
+ return 0;
+}
+
+static int start_cb(int key)
+{
+ struct map_value *value;
+
+ value = bpf_map_lookup_elem(&map, (void *)&key);
+ if (!value)
+ return 0;
+
+ bpf_timer_init(&value->timer, &map, CLOCK_MONOTONIC);
+ bpf_timer_set_callback(&value->timer, timer_cb);
+ /* Hope 100us will be enough to wake-up and run the overwrite thread */
+ bpf_timer_start(&value->timer, 100000, BPF_F_TIMER_CPU_PIN);
+
+ return 0;
+}
+
+static int overwrite_cb(int key)
+{
+ struct map_value zero = {};
+
+ /* Free the timer which may run on other CPU */
+ bpf_map_update_elem(&map, (void *)&key, &zero, BPF_ANY);
+
+ return 0;
+}
+
+SEC("syscall")
+int BPF_PROG(start_timer)
+{
+ bpf_loop(MAX_ENTRIES, start_cb, NULL, 0);
+ return 0;
+}
+
+SEC("syscall")
+int BPF_PROG(overwrite_timer)
+{
+ bpf_loop(MAX_ENTRIES, overwrite_cb, NULL, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/irq.c b/tools/testing/selftests/bpf/progs/irq.c
new file mode 100644
index 000000000000..b0b53d980964
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/irq.c
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+unsigned long global_flags;
+
+extern void bpf_local_irq_save(unsigned long *) __weak __ksym;
+extern void bpf_local_irq_restore(unsigned long *) __weak __ksym;
+extern int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void *unsafe_ptr__ign, u64 flags) __weak __ksym;
+
+SEC("?tc")
+__failure __msg("arg#0 doesn't point to an irq flag on stack")
+int irq_save_bad_arg(struct __sk_buff *ctx)
+{
+ bpf_local_irq_save(&global_flags);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("arg#0 doesn't point to an irq flag on stack")
+int irq_restore_bad_arg(struct __sk_buff *ctx)
+{
+ bpf_local_irq_restore(&global_flags);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
+int irq_restore_missing_2(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+
+ bpf_local_irq_save(&flags1);
+ bpf_local_irq_save(&flags2);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
+int irq_restore_missing_3(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ bpf_local_irq_save(&flags1);
+ bpf_local_irq_save(&flags2);
+ bpf_local_irq_save(&flags3);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
+int irq_restore_missing_3_minus_2(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ bpf_local_irq_save(&flags1);
+ bpf_local_irq_save(&flags2);
+ bpf_local_irq_save(&flags3);
+ bpf_local_irq_restore(&flags3);
+ bpf_local_irq_restore(&flags2);
+ return 0;
+}
+
+static __noinline void local_irq_save(unsigned long *flags)
+{
+ bpf_local_irq_save(flags);
+}
+
+static __noinline void local_irq_restore(unsigned long *flags)
+{
+ bpf_local_irq_restore(flags);
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
+int irq_restore_missing_1_subprog(struct __sk_buff *ctx)
+{
+ unsigned long flags;
+
+ local_irq_save(&flags);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
+int irq_restore_missing_2_subprog(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+
+ local_irq_save(&flags1);
+ local_irq_save(&flags2);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
+int irq_restore_missing_3_subprog(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ local_irq_save(&flags1);
+ local_irq_save(&flags2);
+ local_irq_save(&flags3);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
+int irq_restore_missing_3_minus_2_subprog(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ local_irq_save(&flags1);
+ local_irq_save(&flags2);
+ local_irq_save(&flags3);
+ local_irq_restore(&flags3);
+ local_irq_restore(&flags2);
+ return 0;
+}
+
+SEC("?tc")
+__success
+int irq_balance(struct __sk_buff *ctx)
+{
+ unsigned long flags;
+
+ local_irq_save(&flags);
+ local_irq_restore(&flags);
+ return 0;
+}
+
+SEC("?tc")
+__success
+int irq_balance_n(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ local_irq_save(&flags1);
+ local_irq_save(&flags2);
+ local_irq_save(&flags3);
+ local_irq_restore(&flags3);
+ local_irq_restore(&flags2);
+ local_irq_restore(&flags1);
+ return 0;
+}
+
+static __noinline void local_irq_balance(void)
+{
+ unsigned long flags;
+
+ local_irq_save(&flags);
+ local_irq_restore(&flags);
+}
+
+static __noinline void local_irq_balance_n(void)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ local_irq_save(&flags1);
+ local_irq_save(&flags2);
+ local_irq_save(&flags3);
+ local_irq_restore(&flags3);
+ local_irq_restore(&flags2);
+ local_irq_restore(&flags1);
+}
+
+SEC("?tc")
+__success
+int irq_balance_subprog(struct __sk_buff *ctx)
+{
+ local_irq_balance();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("sleepable helper bpf_copy_from_user#")
+int irq_sleepable_helper(void *ctx)
+{
+ unsigned long flags;
+ u32 data;
+
+ local_irq_save(&flags);
+ bpf_copy_from_user(&data, sizeof(data), NULL);
+ local_irq_restore(&flags);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("kernel func bpf_copy_from_user_str is sleepable within IRQ-disabled region")
+int irq_sleepable_kfunc(void *ctx)
+{
+ unsigned long flags;
+ u32 data;
+
+ local_irq_save(&flags);
+ bpf_copy_from_user_str(&data, sizeof(data), NULL, 0);
+ local_irq_restore(&flags);
+ return 0;
+}
+
+int __noinline global_local_irq_balance(void)
+{
+ local_irq_balance_n();
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("global function calls are not allowed with IRQs disabled")
+int irq_global_subprog(struct __sk_buff *ctx)
+{
+ unsigned long flags;
+
+ bpf_local_irq_save(&flags);
+ global_local_irq_balance();
+ bpf_local_irq_restore(&flags);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot restore irq state out of order")
+int irq_restore_ooo(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+
+ bpf_local_irq_save(&flags1);
+ bpf_local_irq_save(&flags2);
+ bpf_local_irq_restore(&flags1);
+ bpf_local_irq_restore(&flags2);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot restore irq state out of order")
+int irq_restore_ooo_3(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ bpf_local_irq_save(&flags1);
+ bpf_local_irq_save(&flags2);
+ bpf_local_irq_restore(&flags2);
+ bpf_local_irq_save(&flags3);
+ bpf_local_irq_restore(&flags1);
+ bpf_local_irq_restore(&flags3);
+ return 0;
+}
+
+static __noinline void local_irq_save_3(unsigned long *flags1, unsigned long *flags2,
+ unsigned long *flags3)
+{
+ local_irq_save(flags1);
+ local_irq_save(flags2);
+ local_irq_save(flags3);
+}
+
+SEC("?tc")
+__success
+int irq_restore_3_subprog(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ local_irq_save_3(&flags1, &flags2, &flags3);
+ bpf_local_irq_restore(&flags3);
+ bpf_local_irq_restore(&flags2);
+ bpf_local_irq_restore(&flags1);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot restore irq state out of order")
+int irq_restore_4_subprog(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+ unsigned long flags4;
+
+ local_irq_save_3(&flags1, &flags2, &flags3);
+ bpf_local_irq_restore(&flags3);
+ bpf_local_irq_save(&flags4);
+ bpf_local_irq_restore(&flags4);
+ bpf_local_irq_restore(&flags1);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot restore irq state out of order")
+int irq_restore_ooo_3_subprog(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ local_irq_save_3(&flags1, &flags2, &flags3);
+ bpf_local_irq_restore(&flags3);
+ bpf_local_irq_restore(&flags2);
+ bpf_local_irq_save(&flags3);
+ bpf_local_irq_restore(&flags1);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("expected an initialized")
+int irq_restore_invalid(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags = 0xfaceb00c;
+
+ bpf_local_irq_save(&flags1);
+ bpf_local_irq_restore(&flags);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("expected uninitialized")
+int irq_save_invalid(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+
+ bpf_local_irq_save(&flags1);
+ bpf_local_irq_save(&flags1);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("expected an initialized")
+int irq_restore_iter(struct __sk_buff *ctx)
+{
+ struct bpf_iter_num it;
+
+ bpf_iter_num_new(&it, 0, 42);
+ bpf_local_irq_restore((unsigned long *)&it);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference id=1")
+int irq_save_iter(struct __sk_buff *ctx)
+{
+ struct bpf_iter_num it;
+
+ /* Ensure same sized slot has st->ref_obj_id set, so we reject based on
+ * slot_type != STACK_IRQ_FLAG...
+ */
+ _Static_assert(sizeof(it) == sizeof(unsigned long), "broken iterator size");
+
+ bpf_iter_num_new(&it, 0, 42);
+ bpf_local_irq_save((unsigned long *)&it);
+ bpf_local_irq_restore((unsigned long *)&it);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("expected an initialized")
+int irq_flag_overwrite(struct __sk_buff *ctx)
+{
+ unsigned long flags;
+
+ bpf_local_irq_save(&flags);
+ flags = 0xdeadbeef;
+ bpf_local_irq_restore(&flags);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("expected an initialized")
+int irq_flag_overwrite_partial(struct __sk_buff *ctx)
+{
+ unsigned long flags;
+
+ bpf_local_irq_save(&flags);
+ *(((char *)&flags) + 1) = 0xff;
+ bpf_local_irq_restore(&flags);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot restore irq state out of order")
+int irq_ooo_refs_array(struct __sk_buff *ctx)
+{
+ unsigned long flags[4];
+ struct { int i; } *p;
+
+ /* refs=1 */
+ bpf_local_irq_save(&flags[0]);
+
+ /* refs=1,2 */
+ p = bpf_obj_new(typeof(*p));
+ if (!p) {
+ bpf_local_irq_restore(&flags[0]);
+ return 0;
+ }
+
+ /* refs=1,2,3 */
+ bpf_local_irq_save(&flags[1]);
+
+ /* refs=1,2,3,4 */
+ bpf_local_irq_save(&flags[2]);
+
+ /* Now when we remove ref=2, the verifier must not break the ordering in
+ * the refs array between 1,3,4. With an older implementation, the
+ * verifier would swap the last element with the removed element, but to
+ * maintain the stack property we need to use memmove.
+ */
+ bpf_obj_drop(p);
+
+ /* Save and restore to reset active_irq_id to 3, as the ordering is now
+ * refs=1,4,3. When restoring the linear scan will find prev_id in order
+ * as 3 instead of 4.
+ */
+ bpf_local_irq_save(&flags[3]);
+ bpf_local_irq_restore(&flags[3]);
+
+ /* With the incorrect implementation, we can release flags[1], flags[2],
+ * and flags[0], i.e. in the wrong order.
+ */
+ bpf_local_irq_restore(&flags[1]);
+ bpf_local_irq_restore(&flags[2]);
+ bpf_local_irq_restore(&flags[0]);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c
index 7c969c127573..190822b2f08b 100644
--- a/tools/testing/selftests/bpf/progs/iters.c
+++ b/tools/testing/selftests/bpf/progs/iters.c
@@ -524,11 +524,11 @@ int iter_subprog_iters(const void *ctx)
}
struct {
- __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(type, BPF_MAP_TYPE_HASH);
__type(key, int);
__type(value, int);
__uint(max_entries, 1000);
-} arr_map SEC(".maps");
+} hash_map SEC(".maps");
SEC("?raw_tp")
__failure __msg("invalid mem access 'scalar'")
@@ -539,7 +539,7 @@ int iter_err_too_permissive1(const void *ctx)
MY_PID_GUARD();
- map_val = bpf_map_lookup_elem(&arr_map, &key);
+ map_val = bpf_map_lookup_elem(&hash_map, &key);
if (!map_val)
return 0;
@@ -561,12 +561,12 @@ int iter_err_too_permissive2(const void *ctx)
MY_PID_GUARD();
- map_val = bpf_map_lookup_elem(&arr_map, &key);
+ map_val = bpf_map_lookup_elem(&hash_map, &key);
if (!map_val)
return 0;
bpf_repeat(1000000) {
- map_val = bpf_map_lookup_elem(&arr_map, &key);
+ map_val = bpf_map_lookup_elem(&hash_map, &key);
}
*map_val = 123;
@@ -585,7 +585,7 @@ int iter_err_too_permissive3(const void *ctx)
MY_PID_GUARD();
bpf_repeat(1000000) {
- map_val = bpf_map_lookup_elem(&arr_map, &key);
+ map_val = bpf_map_lookup_elem(&hash_map, &key);
found = true;
}
@@ -606,7 +606,7 @@ int iter_tricky_but_fine(const void *ctx)
MY_PID_GUARD();
bpf_repeat(1000000) {
- map_val = bpf_map_lookup_elem(&arr_map, &key);
+ map_val = bpf_map_lookup_elem(&hash_map, &key);
if (map_val) {
found = true;
break;
diff --git a/tools/testing/selftests/bpf/progs/iters_testmod.c b/tools/testing/selftests/bpf/progs/iters_testmod.c
index df1d3db60b1b..9e4b45201e69 100644
--- a/tools/testing/selftests/bpf/progs/iters_testmod.c
+++ b/tools/testing/selftests/bpf/progs/iters_testmod.c
@@ -4,7 +4,7 @@
#include "bpf_experimental.h"
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
-#include "../bpf_testmod/bpf_testmod_kfunc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/jit_probe_mem.c b/tools/testing/selftests/bpf/progs/jit_probe_mem.c
index f9789e668297..82190d79de37 100644
--- a/tools/testing/selftests/bpf/progs/jit_probe_mem.c
+++ b/tools/testing/selftests/bpf/progs/jit_probe_mem.c
@@ -3,7 +3,7 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
-#include "../bpf_testmod/bpf_testmod_kfunc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
static struct prog_test_ref_kfunc __kptr *v;
long total_sum = -1;
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c b/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c
index 7632d9ecb253..b9670e9a6e3d 100644
--- a/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
-#include "../bpf_testmod/bpf_testmod_kfunc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
SEC("tc")
int kfunc_destructive_test(void)
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c
index 08fae306539c..a1963497f0bf 100644
--- a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2021 Facebook */
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
-#include "../bpf_testmod/bpf_testmod_kfunc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
struct syscall_test_args {
__u8 data[16];
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_race.c b/tools/testing/selftests/bpf/progs/kfunc_call_race.c
index d532af07decf..48f64827cd93 100644
--- a/tools/testing/selftests/bpf/progs/kfunc_call_race.c
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_race.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
-#include "../bpf_testmod/bpf_testmod_kfunc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
SEC("tc")
int kfunc_call_fail(struct __sk_buff *ctx)
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
index f502f755f567..8b86113a0126 100644
--- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2021 Facebook */
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
-#include "../bpf_testmod/bpf_testmod_kfunc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
SEC("tc")
int kfunc_call_test4(struct __sk_buff *skb)
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c
index 2380c75e74ce..8e150e85b50d 100644
--- a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
-#include "../bpf_testmod/bpf_testmod_kfunc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
extern const int bpf_prog_active __ksym;
int active_res = -1;
diff --git a/tools/testing/selftests/bpf/progs/local_kptr_stash.c b/tools/testing/selftests/bpf/progs/local_kptr_stash.c
index b092a72b2c9d..d736506a4c80 100644
--- a/tools/testing/selftests/bpf/progs/local_kptr_stash.c
+++ b/tools/testing/selftests/bpf/progs/local_kptr_stash.c
@@ -6,7 +6,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include "../bpf_experimental.h"
-#include "../bpf_testmod/bpf_testmod_kfunc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
struct plain_local;
diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c
index ab0ce1d01a4a..edaba481db9d 100644
--- a/tools/testing/selftests/bpf/progs/map_kptr.c
+++ b/tools/testing/selftests/bpf/progs/map_kptr.c
@@ -2,7 +2,7 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
-#include "../bpf_testmod/bpf_testmod_kfunc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
struct map_value {
struct prog_test_ref_kfunc __kptr_untrusted *unref_ptr;
diff --git a/tools/testing/selftests/bpf/progs/map_kptr_fail.c b/tools/testing/selftests/bpf/progs/map_kptr_fail.c
index 450bb373b179..4c0ff01f1a96 100644
--- a/tools/testing/selftests/bpf/progs/map_kptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/map_kptr_fail.c
@@ -4,7 +4,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include "bpf_misc.h"
-#include "../bpf_testmod/bpf_testmod_kfunc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
struct map_value {
char buf[8];
@@ -345,7 +345,7 @@ int reject_indirect_global_func_access(struct __sk_buff *ctx)
}
SEC("?tc")
-__failure __msg("Unreleased reference id=5 alloc_insn=")
+__failure __msg("Unreleased reference id=4 alloc_insn=")
int kptr_xchg_ref_state(struct __sk_buff *ctx)
{
struct prog_test_ref_kfunc *p;
diff --git a/tools/testing/selftests/bpf/progs/missed_kprobe.c b/tools/testing/selftests/bpf/progs/missed_kprobe.c
index 7f9ef701f5de..51a4fe64c917 100644
--- a/tools/testing/selftests/bpf/progs/missed_kprobe.c
+++ b/tools/testing/selftests/bpf/progs/missed_kprobe.c
@@ -2,7 +2,7 @@
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include "../bpf_testmod/bpf_testmod_kfunc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c b/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c
index 8ea71cbd6c45..29c18d869ec1 100644
--- a/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c
+++ b/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c
@@ -2,7 +2,7 @@
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include "../bpf_testmod/bpf_testmod_kfunc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";
@@ -46,3 +46,9 @@ int test5(struct pt_regs *ctx)
{
return 0;
}
+
+SEC("kprobe.session/bpf_kfunc_common_test")
+int test6(struct pt_regs *ctx)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/nested_acquire.c b/tools/testing/selftests/bpf/progs/nested_acquire.c
index 8e521a21d995..49ad7b9adf56 100644
--- a/tools/testing/selftests/bpf/progs/nested_acquire.c
+++ b/tools/testing/selftests/bpf/progs/nested_acquire.c
@@ -4,7 +4,7 @@
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
-#include "../bpf_testmod/bpf_testmod_kfunc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/preempt_lock.c b/tools/testing/selftests/bpf/progs/preempt_lock.c
index 885377e83607..6c5797bf0ead 100644
--- a/tools/testing/selftests/bpf/progs/preempt_lock.c
+++ b/tools/testing/selftests/bpf/progs/preempt_lock.c
@@ -5,8 +5,10 @@
#include "bpf_misc.h"
#include "bpf_experimental.h"
+extern int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void *unsafe_ptr__ign, u64 flags) __weak __ksym;
+
SEC("?tc")
-__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
int preempt_lock_missing_1(struct __sk_buff *ctx)
{
bpf_preempt_disable();
@@ -14,7 +16,7 @@ int preempt_lock_missing_1(struct __sk_buff *ctx)
}
SEC("?tc")
-__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
int preempt_lock_missing_2(struct __sk_buff *ctx)
{
bpf_preempt_disable();
@@ -23,7 +25,7 @@ int preempt_lock_missing_2(struct __sk_buff *ctx)
}
SEC("?tc")
-__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
int preempt_lock_missing_3(struct __sk_buff *ctx)
{
bpf_preempt_disable();
@@ -33,7 +35,7 @@ int preempt_lock_missing_3(struct __sk_buff *ctx)
}
SEC("?tc")
-__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
int preempt_lock_missing_3_minus_2(struct __sk_buff *ctx)
{
bpf_preempt_disable();
@@ -55,7 +57,7 @@ static __noinline void preempt_enable(void)
}
SEC("?tc")
-__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
int preempt_lock_missing_1_subprog(struct __sk_buff *ctx)
{
preempt_disable();
@@ -63,7 +65,7 @@ int preempt_lock_missing_1_subprog(struct __sk_buff *ctx)
}
SEC("?tc")
-__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
int preempt_lock_missing_2_subprog(struct __sk_buff *ctx)
{
preempt_disable();
@@ -72,7 +74,7 @@ int preempt_lock_missing_2_subprog(struct __sk_buff *ctx)
}
SEC("?tc")
-__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
int preempt_lock_missing_2_minus_1_subprog(struct __sk_buff *ctx)
{
preempt_disable();
@@ -113,6 +115,18 @@ int preempt_sleepable_helper(void *ctx)
return 0;
}
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("kernel func bpf_copy_from_user_str is sleepable within non-preemptible region")
+int preempt_sleepable_kfunc(void *ctx)
+{
+ u32 data;
+
+ bpf_preempt_disable();
+ bpf_copy_from_user_str(&data, sizeof(data), NULL, 0);
+ bpf_preempt_enable();
+ return 0;
+}
+
int __noinline preempt_global_subprog(void)
{
preempt_balance_subprog();
diff --git a/tools/testing/selftests/bpf/progs/pro_epilogue.c b/tools/testing/selftests/bpf/progs/pro_epilogue.c
index 44bc3f06b4b6..d97d6e07ef5c 100644
--- a/tools/testing/selftests/bpf/progs/pro_epilogue.c
+++ b/tools/testing/selftests/bpf/progs/pro_epilogue.c
@@ -4,8 +4,8 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
-#include "../bpf_testmod/bpf_testmod.h"
-#include "../bpf_testmod/bpf_testmod_kfunc.h"
+#include "../test_kmods/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c b/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c
index 3529e53be355..6048d79be48b 100644
--- a/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c
+++ b/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c
@@ -4,8 +4,8 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
-#include "../bpf_testmod/bpf_testmod.h"
-#include "../bpf_testmod/bpf_testmod_kfunc.h"
+#include "../test_kmods/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/sock_addr_kern.c b/tools/testing/selftests/bpf/progs/sock_addr_kern.c
index 8386bb15ccdc..84ad515eafd6 100644
--- a/tools/testing/selftests/bpf/progs/sock_addr_kern.c
+++ b/tools/testing/selftests/bpf/progs/sock_addr_kern.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2024 Google LLC */
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
-#include "../bpf_testmod/bpf_testmod_kfunc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
SEC("syscall")
int init_sock(struct init_sock_args *args)
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_detach.c b/tools/testing/selftests/bpf/progs/struct_ops_detach.c
index d7fdcabe7d90..284a5b008e0c 100644
--- a/tools/testing/selftests/bpf/progs/struct_ops_detach.c
+++ b/tools/testing/selftests/bpf/progs/struct_ops_detach.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
-#include "../bpf_testmod/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_forgotten_cb.c b/tools/testing/selftests/bpf/progs/struct_ops_forgotten_cb.c
index 3c822103bd40..d8cc99f5c2e2 100644
--- a/tools/testing/selftests/bpf/progs/struct_ops_forgotten_cb.c
+++ b/tools/testing/selftests/bpf/progs/struct_ops_forgotten_cb.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
-#include "../bpf_testmod/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c
index b450f72e744a..ccab3935aa42 100644
--- a/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c
+++ b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
-#include "../bpf_testmod/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c
index 6283099ec383..8b5515f4f724 100644
--- a/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c
+++ b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
-#include "../bpf_testmod/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_module.c b/tools/testing/selftests/bpf/progs/struct_ops_module.c
index 4c56d4a9d9f4..71c420c3a5a6 100644
--- a/tools/testing/selftests/bpf/progs/struct_ops_module.c
+++ b/tools/testing/selftests/bpf/progs/struct_ops_module.c
@@ -3,7 +3,7 @@
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include "../bpf_testmod/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c b/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c
index 9efcc6e4d356..5b23ea817f1f 100644
--- a/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c
+++ b/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c
@@ -3,7 +3,7 @@
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include "../bpf_testmod/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_nulled_out_cb.c b/tools/testing/selftests/bpf/progs/struct_ops_nulled_out_cb.c
index fa2021388485..5d0937fa07be 100644
--- a/tools/testing/selftests/bpf/progs/struct_ops_nulled_out_cb.c
+++ b/tools/testing/selftests/bpf/progs/struct_ops_nulled_out_cb.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
-#include "../bpf_testmod/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c
index 8ea57e5348ab..0e4d2ff63ab8 100644
--- a/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c
+++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c
@@ -3,7 +3,7 @@
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include "../bpf_testmod/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c
index 1f55ec4cee37..58d5d8dc2235 100644
--- a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c
+++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c
@@ -3,7 +3,7 @@
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include "../bpf_testmod/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c
index f2f300d50988..31e58389bb8b 100644
--- a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c
+++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c
@@ -3,7 +3,7 @@
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include "../bpf_testmod/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/syscall.c b/tools/testing/selftests/bpf/progs/syscall.c
index 0f4dfb770c32..b698cc62a371 100644
--- a/tools/testing/selftests/bpf/progs/syscall.c
+++ b/tools/testing/selftests/bpf/progs/syscall.c
@@ -76,9 +76,9 @@ static int btf_load(void)
.magic = BTF_MAGIC,
.version = BTF_VERSION,
.hdr_len = sizeof(struct btf_header),
- .type_len = sizeof(__u32) * 8,
- .str_off = sizeof(__u32) * 8,
- .str_len = sizeof(__u32),
+ .type_len = sizeof(raw_btf.types),
+ .str_off = offsetof(struct btf_blob, str) - offsetof(struct btf_blob, types),
+ .str_len = sizeof(raw_btf.str),
},
.types = {
/* long */
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.c b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
index 683c8aaa63da..f344c6835e84 100644
--- a/tools/testing/selftests/bpf/progs/test_cls_redirect.c
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
@@ -15,7 +15,7 @@
#include <linux/ipv6.h>
#include <linux/pkt_cls.h>
#include <linux/tcp.h>
-#include <linux/udp.h>
+#include <netinet/udp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.h b/tools/testing/selftests/bpf/progs/test_cls_redirect.h
index 233b089d1fba..eb55cb8a3dbd 100644
--- a/tools/testing/selftests/bpf/progs/test_cls_redirect.h
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.h
@@ -10,7 +10,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
-#include <linux/udp.h>
+#include <netinet/udp.h>
/* offsetof() is used in static asserts, and the libbpf-redefined CO-RE
* friendly version breaks compilation for older clang versions <= 15
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c
index 464515b824b9..d0f7670351e5 100644
--- a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c
@@ -15,7 +15,7 @@
#include <linux/ipv6.h>
#include <linux/pkt_cls.h>
#include <linux/tcp.h>
-#include <linux/udp.h>
+#include <netinet/udp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
diff --git a/tools/testing/selftests/bpf/progs/test_fill_link_info.c b/tools/testing/selftests/bpf/progs/test_fill_link_info.c
index 6afa834756e9..fac33a14f200 100644
--- a/tools/testing/selftests/bpf/progs/test_fill_link_info.c
+++ b/tools/testing/selftests/bpf/progs/test_fill_link_info.c
@@ -6,13 +6,20 @@
#include <stdbool.h>
extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak;
+extern bool CONFIG_PPC_FTRACE_OUT_OF_LINE __kconfig __weak;
+extern bool CONFIG_KPROBES_ON_FTRACE __kconfig __weak;
+extern bool CONFIG_PPC64 __kconfig __weak;
-/* This function is here to have CONFIG_X86_KERNEL_IBT
- * used and added to object BTF.
+/* This function is here to have CONFIG_X86_KERNEL_IBT,
+ * CONFIG_PPC_FTRACE_OUT_OF_LINE, CONFIG_KPROBES_ON_FTRACE,
+ * CONFIG_PPC6 used and added to object BTF.
*/
int unused(void)
{
- return CONFIG_X86_KERNEL_IBT ? 0 : 1;
+ return CONFIG_X86_KERNEL_IBT ||
+ CONFIG_PPC_FTRACE_OUT_OF_LINE ||
+ CONFIG_KPROBES_ON_FTRACE ||
+ CONFIG_PPC64 ? 0 : 1;
}
SEC("kprobe")
diff --git a/tools/testing/selftests/bpf/progs/test_global_func10.c b/tools/testing/selftests/bpf/progs/test_global_func10.c
index 5da001ca57a5..09d027bd3ea8 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func10.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func10.c
@@ -26,7 +26,7 @@ __noinline int foo(const struct Big *big)
}
SEC("cgroup_skb/ingress")
-__failure __msg("invalid indirect access to stack")
+__failure __msg("invalid read from stack")
int global_func10(struct __sk_buff *skb)
{
const struct Small small = {.x = skb->len };
diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c b/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c
index 7ac7e1de34d8..0ad1bf1ede8d 100644
--- a/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c
+++ b/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c
@@ -4,7 +4,7 @@
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#include "bpf_kfuncs.h"
-#include "../bpf_testmod/bpf_testmod_kfunc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
SEC("tc")
int kfunc_dynptr_nullable_test1(struct __sk_buff *skb)
diff --git a/tools/testing/selftests/bpf/progs/test_module_attach.c b/tools/testing/selftests/bpf/progs/test_module_attach.c
index cc1a012d038f..fb07f5773888 100644
--- a/tools/testing/selftests/bpf/progs/test_module_attach.c
+++ b/tools/testing/selftests/bpf/progs/test_module_attach.c
@@ -5,7 +5,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
-#include "../bpf_testmod/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod.h"
__u32 raw_tp_read_sz = 0;
diff --git a/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c
index bba3e37f749b..39ff06f2c834 100644
--- a/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c
+++ b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c
@@ -3,7 +3,7 @@
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include "../bpf_testmod/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod.h"
#include "bpf_misc.h"
SEC("tp_btf/bpf_testmod_test_nullable_bare")
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c
index 81bb38d72ced..dc74d8cf9e3f 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c
@@ -10,6 +10,8 @@ int _xdp_adjust_tail_grow(struct xdp_md *xdp)
/* SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) */
#if defined(__TARGET_ARCH_s390)
int tailroom = 512;
+#elif defined(__TARGET_ARCH_powerpc)
+ int tailroom = 384;
#else
int tailroom = 320;
#endif
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c b/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c
index 3abf068b8446..5928ed0911ca 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c
@@ -98,6 +98,18 @@ int xdp_count_pkts(struct xdp_md *xdp)
return XDP_DROP;
}
+SEC("xdp")
+int xdp_redirect_to_111(struct xdp_md *xdp)
+{
+ return bpf_redirect(111, 0);
+}
+
+SEC("xdp")
+int xdp_redirect_to_222(struct xdp_md *xdp)
+{
+ return bpf_redirect(222, 0);
+}
+
SEC("tc")
int tc_count_pkts(struct __sk_buff *skb)
{
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_redirect.c b/tools/testing/selftests/bpf/progs/test_xdp_redirect.c
deleted file mode 100644
index b778cad45485..000000000000
--- a/tools/testing/selftests/bpf/progs/test_xdp_redirect.c
+++ /dev/null
@@ -1,26 +0,0 @@
-/* Copyright (c) 2017 VMware
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-#include <linux/bpf.h>
-#include <bpf/bpf_helpers.h>
-
-SEC("redirect_to_111")
-int xdp_redirect_to_111(struct xdp_md *xdp)
-{
- return bpf_redirect(111, 0);
-}
-SEC("redirect_to_222")
-int xdp_redirect_to_222(struct xdp_md *xdp)
-{
- return bpf_redirect(222, 0);
-}
-
-char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/uninit_stack.c b/tools/testing/selftests/bpf/progs/uninit_stack.c
index 8a403470e557..046a204c8fc6 100644
--- a/tools/testing/selftests/bpf/progs/uninit_stack.c
+++ b/tools/testing/selftests/bpf/progs/uninit_stack.c
@@ -70,7 +70,8 @@ __naked int helper_uninit_to_misc(void *ctx)
r1 = r10; \
r1 += -128; \
r2 = 32; \
- call %[bpf_trace_printk]; \
+ r3 = 0; \
+ call %[bpf_probe_read_user]; \
/* Call to dummy() forces print_verifier_state(..., true), \
* thus showing the stack state, matched by __msg(). \
*/ \
@@ -79,7 +80,7 @@ __naked int helper_uninit_to_misc(void *ctx)
exit; \
"
:
- : __imm(bpf_trace_printk),
+ : __imm(bpf_probe_read_user),
__imm(dummy)
: __clobber_all);
}
diff --git a/tools/testing/selftests/bpf/progs/unsupported_ops.c b/tools/testing/selftests/bpf/progs/unsupported_ops.c
index 9180365a3568..8aa2e0dd624e 100644
--- a/tools/testing/selftests/bpf/progs/unsupported_ops.c
+++ b/tools/testing/selftests/bpf/progs/unsupported_ops.c
@@ -4,7 +4,7 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
-#include "../bpf_testmod/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_array_access.c b/tools/testing/selftests/bpf/progs/verifier_array_access.c
index 4195aa824ba5..29eb9568633f 100644
--- a/tools/testing/selftests/bpf/progs/verifier_array_access.c
+++ b/tools/testing/selftests/bpf/progs/verifier_array_access.c
@@ -29,6 +29,20 @@ struct {
} map_array_wo SEC(".maps");
struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 2);
+ __type(key, __u32);
+ __type(value, struct test_val);
+} map_array_pcpu SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 2);
+ __type(key, __u32);
+ __type(value, struct test_val);
+} map_array SEC(".maps");
+
+struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
@@ -525,4 +539,178 @@ l0_%=: exit; \
: __clobber_all);
}
+SEC("socket")
+__description("valid map access into an array using constant without nullness")
+__success __retval(4) __log_level(2)
+__msg("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -8) = {{(1|r[0-9])}}")
+unsigned int an_array_with_a_constant_no_nullness(void)
+{
+ /* Need 8-byte alignment for spill tracking */
+ __u32 __attribute__((aligned(8))) key = 1;
+ struct test_val *val;
+
+ val = bpf_map_lookup_elem(&map_array, &key);
+ val->index = offsetof(struct test_val, foo);
+
+ return val->index;
+}
+
+SEC("socket")
+__description("valid multiple map access into an array using constant without nullness")
+__success __retval(8) __log_level(2)
+__msg("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -16) = {{(0|r[0-9])}}")
+__msg("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -8) = {{(1|r[0-9])}}")
+unsigned int multiple_array_with_a_constant_no_nullness(void)
+{
+ __u32 __attribute__((aligned(8))) key = 1;
+ __u32 __attribute__((aligned(8))) key2 = 0;
+ struct test_val *val, *val2;
+
+ val = bpf_map_lookup_elem(&map_array, &key);
+ val->index = offsetof(struct test_val, foo);
+
+ val2 = bpf_map_lookup_elem(&map_array, &key2);
+ val2->index = offsetof(struct test_val, foo);
+
+ return val->index + val2->index;
+}
+
+SEC("socket")
+__description("valid map access into an array using natural aligned 32-bit constant 0 without nullness")
+__success __retval(4)
+unsigned int an_array_with_a_32bit_constant_0_no_nullness(void)
+{
+ /* Unlike the above tests, 32-bit zeroing is precisely tracked even
+ * if writes are not aligned to BPF_REG_SIZE. This tests that our
+ * STACK_ZERO handling functions.
+ */
+ struct test_val *val;
+ __u32 key = 0;
+
+ val = bpf_map_lookup_elem(&map_array, &key);
+ val->index = offsetof(struct test_val, foo);
+
+ return val->index;
+}
+
+SEC("socket")
+__description("valid map access into a pcpu array using constant without nullness")
+__success __retval(4) __log_level(2)
+__msg("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -8) = {{(1|r[0-9])}}")
+unsigned int a_pcpu_array_with_a_constant_no_nullness(void)
+{
+ __u32 __attribute__((aligned(8))) key = 1;
+ struct test_val *val;
+
+ val = bpf_map_lookup_elem(&map_array_pcpu, &key);
+ val->index = offsetof(struct test_val, foo);
+
+ return val->index;
+}
+
+SEC("socket")
+__description("invalid map access into an array using constant without nullness")
+__failure __msg("R0 invalid mem access 'map_value_or_null'")
+unsigned int an_array_with_a_constant_no_nullness_out_of_bounds(void)
+{
+ /* Out of bounds */
+ __u32 __attribute__((aligned(8))) key = 3;
+ struct test_val *val;
+
+ val = bpf_map_lookup_elem(&map_array, &key);
+ val->index = offsetof(struct test_val, foo);
+
+ return val->index;
+}
+
+SEC("socket")
+__description("invalid map access into an array using constant smaller than key_size")
+__failure __msg("R0 invalid mem access 'map_value_or_null'")
+unsigned int an_array_with_a_constant_too_small(void)
+{
+ __u32 __attribute__((aligned(8))) key;
+ struct test_val *val;
+
+ /* Mark entire key as STACK_MISC */
+ bpf_probe_read_user(&key, sizeof(key), NULL);
+
+ /* Spilling only the bottom byte results in a tnum const of 1.
+ * We want to check that the verifier rejects it, as the spill is < 4B.
+ */
+ *(__u8 *)&key = 1;
+ val = bpf_map_lookup_elem(&map_array, &key);
+
+ /* Should fail, as verifier cannot prove in-bound lookup */
+ val->index = offsetof(struct test_val, foo);
+
+ return val->index;
+}
+
+SEC("socket")
+__description("invalid map access into an array using constant larger than key_size")
+__failure __msg("R0 invalid mem access 'map_value_or_null'")
+unsigned int an_array_with_a_constant_too_big(void)
+{
+ struct test_val *val;
+ __u64 key = 1;
+
+ /* Even if the constant value is < max_entries, if the spill size is
+ * larger than the key size, the set bits may not be where we expect them
+ * to be on different endian architectures.
+ */
+ val = bpf_map_lookup_elem(&map_array, &key);
+ val->index = offsetof(struct test_val, foo);
+
+ return val->index;
+}
+
+SEC("socket")
+__description("invalid elided lookup using const and non-const key")
+__failure __msg("R0 invalid mem access 'map_value_or_null'")
+unsigned int mixed_const_and_non_const_key_lookup(void)
+{
+ __u32 __attribute__((aligned(8))) key;
+ struct test_val *val;
+ __u32 rand;
+
+ rand = bpf_get_prandom_u32();
+ key = rand > 42 ? 1 : rand;
+ val = bpf_map_lookup_elem(&map_array, &key);
+
+ return val->index;
+}
+
+SEC("socket")
+__failure __msg("invalid read from stack R2 off=4096 size=4")
+__naked void key_lookup_at_invalid_fp(void)
+{
+ asm volatile (" \
+ r1 = %[map_array] ll; \
+ r2 = r10; \
+ r2 += 4096; \
+ call %[bpf_map_lookup_elem]; \
+ r0 = *(u64*)(r0 + 0); \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array)
+ : __clobber_all);
+}
+
+volatile __u32 __attribute__((aligned(8))) global_key;
+
+SEC("socket")
+__description("invalid elided lookup using non-stack key")
+__failure __msg("R0 invalid mem access 'map_value_or_null'")
+unsigned int non_stack_key_lookup(void)
+{
+ struct test_val *val;
+
+ global_key = 1;
+ val = bpf_map_lookup_elem(&map_array, (void *)&global_key);
+ val->index = offsetof(struct test_val, foo);
+
+ return val->index;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_basic_stack.c b/tools/testing/selftests/bpf/progs/verifier_basic_stack.c
index 8d77cc5323d3..fb62e09f2114 100644
--- a/tools/testing/selftests/bpf/progs/verifier_basic_stack.c
+++ b/tools/testing/selftests/bpf/progs/verifier_basic_stack.c
@@ -28,7 +28,7 @@ __naked void stack_out_of_bounds(void)
SEC("socket")
__description("uninitialized stack1")
__success __log_level(4) __msg("stack depth 8")
-__failure_unpriv __msg_unpriv("invalid indirect read from stack")
+__failure_unpriv __msg_unpriv("invalid read from stack")
__naked void uninitialized_stack1(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds.c b/tools/testing/selftests/bpf/progs/verifier_bounds.c
index a0bb7fb40ea5..0eb33bb801b5 100644
--- a/tools/testing/selftests/bpf/progs/verifier_bounds.c
+++ b/tools/testing/selftests/bpf/progs/verifier_bounds.c
@@ -1200,4 +1200,138 @@ l0_%=: r0 = 0; \
: __clobber_all);
}
+SEC("tc")
+__description("multiply mixed sign bounds. test 1")
+__success __log_level(2)
+__msg("r6 *= r7 {{.*}}; R6_w=scalar(smin=umin=0x1bc16d5cd4927ee1,smax=umax=0x1bc16d674ec80000,smax32=0x7ffffeff,umax32=0xfffffeff,var_off=(0x1bc16d4000000000; 0x3ffffffeff))")
+__naked void mult_mixed0_sign(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "r6 = r0;"
+ "call %[bpf_get_prandom_u32];"
+ "r7 = r0;"
+ "r6 &= 0xf;"
+ "r6 -= 1000000000;"
+ "r7 &= 0xf;"
+ "r7 -= 2000000000;"
+ "r6 *= r7;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_skb_store_bytes)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("multiply mixed sign bounds. test 2")
+__success __log_level(2)
+__msg("r6 *= r7 {{.*}}; R6_w=scalar(smin=smin32=-100,smax=smax32=200)")
+__naked void mult_mixed1_sign(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "r6 = r0;"
+ "call %[bpf_get_prandom_u32];"
+ "r7 = r0;"
+ "r6 &= 0xf;"
+ "r6 -= 0xa;"
+ "r7 &= 0xf;"
+ "r7 -= 0x14;"
+ "r6 *= r7;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_skb_store_bytes)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("multiply negative bounds")
+__success __log_level(2)
+__msg("r6 *= r7 {{.*}}; R6_w=scalar(smin=umin=smin32=umin32=0x3ff280b0,smax=umax=smax32=umax32=0x3fff0001,var_off=(0x3ff00000; 0xf81ff))")
+__naked void mult_sign_bounds(void)
+{
+ asm volatile (
+ "r8 = 0x7fff;"
+ "call %[bpf_get_prandom_u32];"
+ "r6 = r0;"
+ "call %[bpf_get_prandom_u32];"
+ "r7 = r0;"
+ "r6 &= 0xa;"
+ "r6 -= r8;"
+ "r7 &= 0xf;"
+ "r7 -= r8;"
+ "r6 *= r7;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_skb_store_bytes)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("multiply bounds that don't cross signed boundary")
+__success __log_level(2)
+__msg("r8 *= r6 {{.*}}; R6_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=11,var_off=(0x0; 0xb)) R8_w=scalar(smin=0,smax=umax=0x7b96bb0a94a3a7cd,var_off=(0x0; 0x7fffffffffffffff))")
+__naked void mult_no_sign_crossing(void)
+{
+ asm volatile (
+ "r6 = 0xb;"
+ "r8 = 0xb3c3f8c99262687 ll;"
+ "call %[bpf_get_prandom_u32];"
+ "r7 = r0;"
+ "r6 &= r7;"
+ "r8 *= r6;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_skb_store_bytes)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("multiplication overflow, result in unbounded reg. test 1")
+__success __log_level(2)
+__msg("r6 *= r7 {{.*}}; R6_w=scalar()")
+__naked void mult_unsign_ovf(void)
+{
+ asm volatile (
+ "r8 = 0x7ffffffffff ll;"
+ "call %[bpf_get_prandom_u32];"
+ "r6 = r0;"
+ "call %[bpf_get_prandom_u32];"
+ "r7 = r0;"
+ "r6 &= 0x7fffffff;"
+ "r7 &= r8;"
+ "r6 *= r7;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_skb_store_bytes)
+ : __clobber_all);
+}
+
+SEC("tc")
+__description("multiplication overflow, result in unbounded reg. test 2")
+__success __log_level(2)
+__msg("r6 *= r7 {{.*}}; R6_w=scalar()")
+__naked void mult_sign_ovf(void)
+{
+ asm volatile (
+ "r8 = 0x7ffffffff ll;"
+ "call %[bpf_get_prandom_u32];"
+ "r6 = r0;"
+ "call %[bpf_get_prandom_u32];"
+ "r7 = r0;"
+ "r6 &= 0xa;"
+ "r6 -= r8;"
+ "r7 &= 0x7fffffff;"
+ "r6 *= r7;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_skb_store_bytes)
+ : __clobber_all);
+}
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_const_or.c b/tools/testing/selftests/bpf/progs/verifier_const_or.c
index ba8922b2eebd..68c568c3c3a0 100644
--- a/tools/testing/selftests/bpf/progs/verifier_const_or.c
+++ b/tools/testing/selftests/bpf/progs/verifier_const_or.c
@@ -25,7 +25,7 @@ __naked void constant_should_keep_constant_type(void)
SEC("tracepoint")
__description("constant register |= constant should not bypass stack boundary checks")
-__failure __msg("invalid indirect access to stack R1 off=-48 size=58")
+__failure __msg("invalid write to stack R1 off=-48 size=58")
__naked void not_bypass_stack_boundary_checks_1(void)
{
asm volatile (" \
@@ -62,7 +62,7 @@ __naked void register_should_keep_constant_type(void)
SEC("tracepoint")
__description("constant register |= constant register should not bypass stack boundary checks")
-__failure __msg("invalid indirect access to stack R1 off=-48 size=58")
+__failure __msg("invalid write to stack R1 off=-48 size=58")
__naked void not_bypass_stack_boundary_checks_2(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c b/tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c
index 50c6b22606f6..f2c54e4d89eb 100644
--- a/tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c
+++ b/tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c
@@ -67,7 +67,7 @@ SEC("socket")
__description("helper access to variable memory: stack, bitwise AND, zero included")
/* in privileged mode reads from uninitialized stack locations are permitted */
__success __failure_unpriv
-__msg_unpriv("invalid indirect read from stack R2 off -64+0 size 64")
+__msg_unpriv("invalid read from stack R2 off -64+0 size 64")
__retval(0)
__naked void stack_bitwise_and_zero_included(void)
{
@@ -100,7 +100,7 @@ __naked void stack_bitwise_and_zero_included(void)
SEC("tracepoint")
__description("helper access to variable memory: stack, bitwise AND + JMP, wrong max")
-__failure __msg("invalid indirect access to stack R1 off=-64 size=65")
+__failure __msg("invalid write to stack R1 off=-64 size=65")
__naked void bitwise_and_jmp_wrong_max(void)
{
asm volatile (" \
@@ -187,7 +187,7 @@ l0_%=: r0 = 0; \
SEC("tracepoint")
__description("helper access to variable memory: stack, JMP, bounds + offset")
-__failure __msg("invalid indirect access to stack R1 off=-64 size=65")
+__failure __msg("invalid write to stack R1 off=-64 size=65")
__naked void memory_stack_jmp_bounds_offset(void)
{
asm volatile (" \
@@ -211,7 +211,7 @@ l0_%=: r0 = 0; \
SEC("tracepoint")
__description("helper access to variable memory: stack, JMP, wrong max")
-__failure __msg("invalid indirect access to stack R1 off=-64 size=65")
+__failure __msg("invalid write to stack R1 off=-64 size=65")
__naked void memory_stack_jmp_wrong_max(void)
{
asm volatile (" \
@@ -260,7 +260,7 @@ SEC("socket")
__description("helper access to variable memory: stack, JMP, no min check")
/* in privileged mode reads from uninitialized stack locations are permitted */
__success __failure_unpriv
-__msg_unpriv("invalid indirect read from stack R2 off -64+0 size 64")
+__msg_unpriv("invalid read from stack R2 off -64+0 size 64")
__retval(0)
__naked void stack_jmp_no_min_check(void)
{
@@ -750,7 +750,7 @@ SEC("socket")
__description("helper access to variable memory: 8 bytes leak")
/* in privileged mode reads from uninitialized stack locations are permitted */
__success __failure_unpriv
-__msg_unpriv("invalid indirect read from stack R2 off -64+32 size 64")
+__msg_unpriv("invalid read from stack R2 off -64+32 size 64")
__retval(0)
__naked void variable_memory_8_bytes_leak(void)
{
diff --git a/tools/testing/selftests/bpf/progs/verifier_int_ptr.c b/tools/testing/selftests/bpf/progs/verifier_int_ptr.c
index 5f2efb895edb..59e34d558654 100644
--- a/tools/testing/selftests/bpf/progs/verifier_int_ptr.c
+++ b/tools/testing/selftests/bpf/progs/verifier_int_ptr.c
@@ -96,7 +96,7 @@ __naked void arg_ptr_to_long_misaligned(void)
SEC("cgroup/sysctl")
__description("arg pointer to long size < sizeof(long)")
-__failure __msg("invalid indirect access to stack R4 off=-4 size=8")
+__failure __msg("invalid write to stack R4 off=-4 size=8")
__naked void to_long_size_sizeof_long(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/progs/verifier_map_in_map.c b/tools/testing/selftests/bpf/progs/verifier_map_in_map.c
index 4eaab1468eb7..7d088ba99ea5 100644
--- a/tools/testing/selftests/bpf/progs/verifier_map_in_map.c
+++ b/tools/testing/selftests/bpf/progs/verifier_map_in_map.c
@@ -47,7 +47,7 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("map in map state pruning")
-__success __msg("processed 26 insns")
+__success __msg("processed 15 insns")
__log_level(2) __retval(0) __flag(BPF_F_TEST_STATE_FREQ)
__naked void map_in_map_state_pruning(void)
{
diff --git a/tools/testing/selftests/bpf/progs/verifier_may_goto_1.c b/tools/testing/selftests/bpf/progs/verifier_may_goto_1.c
new file mode 100644
index 000000000000..e81097c96fe2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_may_goto_1.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "../../../include/linux/filter.h"
+#include "bpf_misc.h"
+
+SEC("raw_tp")
+__description("may_goto 0")
+__arch_x86_64
+__xlated("0: r0 = 1")
+__xlated("1: exit")
+__success
+__naked void may_goto_simple(void)
+{
+ asm volatile (
+ ".8byte %[may_goto];"
+ "r0 = 1;"
+ ".8byte %[may_goto];"
+ "exit;"
+ :
+ : __imm_insn(may_goto, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0 /* offset */, 0))
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__description("batch 2 of may_goto 0")
+__arch_x86_64
+__xlated("0: r0 = 1")
+__xlated("1: exit")
+__success
+__naked void may_goto_batch_0(void)
+{
+ asm volatile (
+ ".8byte %[may_goto1];"
+ ".8byte %[may_goto1];"
+ "r0 = 1;"
+ ".8byte %[may_goto1];"
+ ".8byte %[may_goto1];"
+ "exit;"
+ :
+ : __imm_insn(may_goto1, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0 /* offset */, 0))
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__description("may_goto batch with offsets 2/1/0")
+__arch_x86_64
+__xlated("0: r0 = 1")
+__xlated("1: exit")
+__success
+__naked void may_goto_batch_1(void)
+{
+ asm volatile (
+ ".8byte %[may_goto1];"
+ ".8byte %[may_goto2];"
+ ".8byte %[may_goto3];"
+ "r0 = 1;"
+ ".8byte %[may_goto1];"
+ ".8byte %[may_goto2];"
+ ".8byte %[may_goto3];"
+ "exit;"
+ :
+ : __imm_insn(may_goto1, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 2 /* offset */, 0)),
+ __imm_insn(may_goto2, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 1 /* offset */, 0)),
+ __imm_insn(may_goto3, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0 /* offset */, 0))
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__description("may_goto batch with offsets 2/0")
+__arch_x86_64
+__xlated("0: *(u64 *)(r10 -8) = 8388608")
+__xlated("1: r11 = *(u64 *)(r10 -8)")
+__xlated("2: if r11 == 0x0 goto pc+3")
+__xlated("3: r11 -= 1")
+__xlated("4: *(u64 *)(r10 -8) = r11")
+__xlated("5: r0 = 1")
+__xlated("6: r0 = 2")
+__xlated("7: exit")
+__success
+__naked void may_goto_batch_2(void)
+{
+ asm volatile (
+ ".8byte %[may_goto1];"
+ ".8byte %[may_goto3];"
+ "r0 = 1;"
+ "r0 = 2;"
+ "exit;"
+ :
+ : __imm_insn(may_goto1, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 2 /* offset */, 0)),
+ __imm_insn(may_goto3, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0 /* offset */, 0))
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_may_goto_2.c b/tools/testing/selftests/bpf/progs/verifier_may_goto_2.c
new file mode 100644
index 000000000000..b891faf50660
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_may_goto_2.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+int gvar;
+
+SEC("raw_tp")
+__description("C code with may_goto 0")
+__success
+int may_goto_c_code(void)
+{
+ int i, tmp[3];
+
+ for (i = 0; i < 3 && can_loop; i++)
+ tmp[i] = 0;
+
+ for (i = 0; i < 3 && can_loop; i++)
+ tmp[i] = gvar - i;
+
+ for (i = 0; i < 3 && can_loop; i++)
+ gvar += tmp[i];
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_mtu.c b/tools/testing/selftests/bpf/progs/verifier_mtu.c
index 4ccf1ebc42d1..256956ea1ac5 100644
--- a/tools/testing/selftests/bpf/progs/verifier_mtu.c
+++ b/tools/testing/selftests/bpf/progs/verifier_mtu.c
@@ -8,7 +8,7 @@ SEC("tc/ingress")
__description("uninit/mtu: write rejected")
__success
__caps_unpriv(CAP_BPF|CAP_NET_ADMIN)
-__failure_unpriv __msg_unpriv("invalid indirect read from stack")
+__failure_unpriv __msg_unpriv("invalid read from stack")
int tc_uninit_mtu(struct __sk_buff *ctx)
{
__u32 mtu;
diff --git a/tools/testing/selftests/bpf/progs/verifier_raw_stack.c b/tools/testing/selftests/bpf/progs/verifier_raw_stack.c
index 7cc83acac727..c689665e07b9 100644
--- a/tools/testing/selftests/bpf/progs/verifier_raw_stack.c
+++ b/tools/testing/selftests/bpf/progs/verifier_raw_stack.c
@@ -236,7 +236,7 @@ __naked void load_bytes_spilled_regs_data(void)
SEC("tc")
__description("raw_stack: skb_load_bytes, invalid access 1")
-__failure __msg("invalid indirect access to stack R3 off=-513 size=8")
+__failure __msg("invalid write to stack R3 off=-513 size=8")
__naked void load_bytes_invalid_access_1(void)
{
asm volatile (" \
@@ -255,7 +255,7 @@ __naked void load_bytes_invalid_access_1(void)
SEC("tc")
__description("raw_stack: skb_load_bytes, invalid access 2")
-__failure __msg("invalid indirect access to stack R3 off=-1 size=8")
+__failure __msg("invalid write to stack R3 off=-1 size=8")
__naked void load_bytes_invalid_access_2(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/progs/verifier_spin_lock.c b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c
index 3f679de73229..d9d7b05cf6d2 100644
--- a/tools/testing/selftests/bpf/progs/verifier_spin_lock.c
+++ b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c
@@ -187,7 +187,7 @@ l0_%=: r6 = r0; \
SEC("cgroup/skb")
__description("spin_lock: test6 missing unlock")
-__failure __msg("BPF_EXIT instruction cannot be used inside bpf_spin_lock-ed region")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_spin_lock-ed region")
__failure_unpriv __msg_unpriv("")
__naked void spin_lock_test6_missing_unlock(void)
{
@@ -530,4 +530,30 @@ l1_%=: exit; \
: __clobber_all);
}
+SEC("tc")
+__description("spin_lock: loop within a locked region")
+__success __failure_unpriv __msg_unpriv("")
+__retval(0)
+int bpf_loop_inside_locked_region(void)
+{
+ const int zero = 0;
+ struct val *val;
+ int i, j = 0;
+
+ val = bpf_map_lookup_elem(&map_spin_lock, &zero);
+ if (!val)
+ return -1;
+
+ bpf_spin_lock(&val->l);
+ bpf_for(i, 0, 10) {
+ j++;
+ /* Silence "unused variable" warnings. */
+ if (j == 10)
+ break;
+ }
+ bpf_spin_unlock(&val->l);
+
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_unpriv.c b/tools/testing/selftests/bpf/progs/verifier_unpriv.c
index 7ea535bfbacd..a4a5e2071604 100644
--- a/tools/testing/selftests/bpf/progs/verifier_unpriv.c
+++ b/tools/testing/selftests/bpf/progs/verifier_unpriv.c
@@ -199,7 +199,7 @@ __naked void pass_pointer_to_helper_function(void)
SEC("socket")
__description("unpriv: indirectly pass pointer on stack to helper function")
__success __failure_unpriv
-__msg_unpriv("invalid indirect read from stack R2 off -8+0 size 8")
+__msg_unpriv("invalid read from stack R2 off -8+0 size 8")
__retval(0)
__naked void on_stack_to_helper_function(void)
{
diff --git a/tools/testing/selftests/bpf/progs/verifier_var_off.c b/tools/testing/selftests/bpf/progs/verifier_var_off.c
index c810f4f6f479..1d36d01b746e 100644
--- a/tools/testing/selftests/bpf/progs/verifier_var_off.c
+++ b/tools/testing/selftests/bpf/progs/verifier_var_off.c
@@ -203,7 +203,7 @@ __naked void stack_write_clobbers_spilled_regs(void)
SEC("sockops")
__description("indirect variable-offset stack access, unbounded")
-__failure __msg("invalid unbounded variable-offset indirect access to stack R4")
+__failure __msg("invalid unbounded variable-offset write to stack R4")
__naked void variable_offset_stack_access_unbounded(void)
{
asm volatile (" \
@@ -236,7 +236,7 @@ l0_%=: r0 = 0; \
SEC("lwt_in")
__description("indirect variable-offset stack access, max out of bound")
-__failure __msg("invalid variable-offset indirect access to stack R2")
+__failure __msg("invalid variable-offset read from stack R2")
__naked void access_max_out_of_bound(void)
{
asm volatile (" \
@@ -269,7 +269,7 @@ __naked void access_max_out_of_bound(void)
*/
SEC("socket")
__description("indirect variable-offset stack access, zero-sized, max out of bound")
-__failure __msg("invalid variable-offset indirect access to stack R1")
+__failure __msg("invalid variable-offset write to stack R1")
__naked void zero_sized_access_max_out_of_bound(void)
{
asm volatile (" \
@@ -294,7 +294,7 @@ __naked void zero_sized_access_max_out_of_bound(void)
SEC("lwt_in")
__description("indirect variable-offset stack access, min out of bound")
-__failure __msg("invalid variable-offset indirect access to stack R2")
+__failure __msg("invalid variable-offset read from stack R2")
__naked void access_min_out_of_bound(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/progs/wq.c b/tools/testing/selftests/bpf/progs/wq.c
index f8d3ae0c29ae..2f1ba08c293e 100644
--- a/tools/testing/selftests/bpf/progs/wq.c
+++ b/tools/testing/selftests/bpf/progs/wq.c
@@ -5,7 +5,7 @@
#include "bpf_experimental.h"
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
-#include "../bpf_testmod/bpf_testmod_kfunc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/wq_failures.c b/tools/testing/selftests/bpf/progs/wq_failures.c
index 25b51a72fe0f..4240211a1900 100644
--- a/tools/testing/selftests/bpf/progs/wq_failures.c
+++ b/tools/testing/selftests/bpf/progs/wq_failures.c
@@ -5,7 +5,7 @@
#include "bpf_experimental.h"
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
-#include "../bpf_testmod/bpf_testmod_kfunc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_bpftool_synctypes.py b/tools/testing/selftests/bpf/test_bpftool_synctypes.py
index 0ed67b6b31dd..238121fda5b6 100755
--- a/tools/testing/selftests/bpf/test_bpftool_synctypes.py
+++ b/tools/testing/selftests/bpf/test_bpftool_synctypes.py
@@ -66,7 +66,7 @@ class ArrayParser(BlockParser):
def __init__(self, reader, array_name):
self.array_name = array_name
- self.start_marker = re.compile(f'(static )?const bool {self.array_name}\[.*\] = {{\n')
+ self.start_marker = re.compile(fr'(static )?const bool {self.array_name}\[.*\] = {{\n')
super().__init__(reader)
def search_block(self):
@@ -80,7 +80,7 @@ class ArrayParser(BlockParser):
Parse a block and return data as a dictionary. Items to extract must be
on separate lines in the file.
"""
- pattern = re.compile('\[(BPF_\w*)\]\s*= (true|false),?$')
+ pattern = re.compile(r'\[(BPF_\w*)\]\s*= (true|false),?$')
entries = set()
while True:
line = self.reader.readline()
@@ -178,7 +178,7 @@ class FileExtractor(object):
@enum_name: name of the enum to parse
"""
start_marker = re.compile(f'enum {enum_name} {{\n')
- pattern = re.compile('^\s*(BPF_\w+),?(\s+/\*.*\*/)?$')
+ pattern = re.compile(r'^\s*(BPF_\w+),?(\s+/\*.*\*/)?$')
end_marker = re.compile('^};')
parser = BlockParser(self.reader)
parser.search_block(start_marker)
@@ -226,8 +226,8 @@ class FileExtractor(object):
@block_name: name of the blog to parse, 'TYPE' in the example
"""
- start_marker = re.compile(f'\*{block_name}\* := {{')
- pattern = re.compile('\*\*([\w/-]+)\*\*')
+ start_marker = re.compile(fr'\*{block_name}\* := {{')
+ pattern = re.compile(r'\*\*([\w/-]+)\*\*')
end_marker = re.compile('}\n')
return self.__get_description_list(start_marker, pattern, end_marker)
@@ -245,8 +245,8 @@ class FileExtractor(object):
@block_name: name of the blog to parse, 'TYPE' in the example
"""
- start_marker = re.compile(f'"\s*{block_name} := {{')
- pattern = re.compile('([\w/]+) [|}]')
+ start_marker = re.compile(fr'"\s*{block_name} := {{')
+ pattern = re.compile(r'([\w/]+) [|}]')
end_marker = re.compile('}')
return self.__get_description_list(start_marker, pattern, end_marker)
@@ -264,8 +264,8 @@ class FileExtractor(object):
@macro: macro starting the block, 'HELP_SPEC_OPTIONS' in the example
"""
- start_marker = re.compile(f'"\s*{macro}\s*" [|}}]')
- pattern = re.compile('([\w-]+) ?(?:\||}[ }\]])')
+ start_marker = re.compile(fr'"\s*{macro}\s*" [|}}]')
+ pattern = re.compile(r'([\w-]+) ?(?:\||}[ }\]])')
end_marker = re.compile('}\\\\n')
return self.__get_description_list(start_marker, pattern, end_marker)
@@ -283,8 +283,8 @@ class FileExtractor(object):
@block_name: name of the blog to parse, 'TYPE' in the example
"""
- start_marker = re.compile(f'local {block_name}=\'')
- pattern = re.compile('(?:.*=\')?([\w/]+)')
+ start_marker = re.compile(fr'local {block_name}=\'')
+ pattern = re.compile(r'(?:.*=\')?([\w/]+)')
end_marker = re.compile('\'$')
return self.__get_description_list(start_marker, pattern, end_marker)
@@ -316,7 +316,7 @@ class MainHeaderFileExtractor(SourceFileExtractor):
{'-p', '-d', '--pretty', '--debug', '--json', '-j'}
"""
start_marker = re.compile(f'"OPTIONS :=')
- pattern = re.compile('([\w-]+) ?(?:\||}[ }\]"])')
+ pattern = re.compile(r'([\w-]+) ?(?:\||}[ }\]"])')
end_marker = re.compile('#define')
parser = InlineListParser(self.reader)
@@ -338,8 +338,8 @@ class ManSubstitutionsExtractor(SourceFileExtractor):
{'-p', '-d', '--pretty', '--debug', '--json', '-j'}
"""
- start_marker = re.compile('\|COMMON_OPTIONS\| replace:: {')
- pattern = re.compile('\*\*([\w/-]+)\*\*')
+ start_marker = re.compile(r'\|COMMON_OPTIONS\| replace:: {')
+ pattern = re.compile(r'\*\*([\w/-]+)\*\*')
end_marker = re.compile('}$')
parser = InlineListParser(self.reader)
diff --git a/tools/testing/selftests/bpf/test_flow_dissector.c b/tools/testing/selftests/bpf/test_flow_dissector.c
deleted file mode 100644
index 571cc076dd7d..000000000000
--- a/tools/testing/selftests/bpf/test_flow_dissector.c
+++ /dev/null
@@ -1,780 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Inject packets with all sorts of encapsulation into the kernel.
- *
- * IPv4/IPv6 outer layer 3
- * GRE/GUE/BARE outer layer 4, where bare is IPIP/SIT/IPv4-in-IPv6/..
- * IPv4/IPv6 inner layer 3
- */
-
-#define _GNU_SOURCE
-
-#include <stddef.h>
-#include <arpa/inet.h>
-#include <asm/byteorder.h>
-#include <error.h>
-#include <errno.h>
-#include <linux/if_packet.h>
-#include <linux/if_ether.h>
-#include <linux/ipv6.h>
-#include <netinet/ip.h>
-#include <netinet/in.h>
-#include <netinet/udp.h>
-#include <poll.h>
-#include <stdbool.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#define CFG_PORT_INNER 8000
-
-/* Add some protocol definitions that do not exist in userspace */
-
-struct grehdr {
- uint16_t unused;
- uint16_t protocol;
-} __attribute__((packed));
-
-struct guehdr {
- union {
- struct {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u8 hlen:5,
- control:1,
- version:2;
-#elif defined (__BIG_ENDIAN_BITFIELD)
- __u8 version:2,
- control:1,
- hlen:5;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
- __u8 proto_ctype;
- __be16 flags;
- };
- __be32 word;
- };
-};
-
-static uint8_t cfg_dsfield_inner;
-static uint8_t cfg_dsfield_outer;
-static uint8_t cfg_encap_proto;
-static bool cfg_expect_failure = false;
-static int cfg_l3_extra = AF_UNSPEC; /* optional SIT prefix */
-static int cfg_l3_inner = AF_UNSPEC;
-static int cfg_l3_outer = AF_UNSPEC;
-static int cfg_num_pkt = 10;
-static int cfg_num_secs = 0;
-static char cfg_payload_char = 'a';
-static int cfg_payload_len = 100;
-static int cfg_port_gue = 6080;
-static bool cfg_only_rx;
-static bool cfg_only_tx;
-static int cfg_src_port = 9;
-
-static char buf[ETH_DATA_LEN];
-
-#define INIT_ADDR4(name, addr4, port) \
- static struct sockaddr_in name = { \
- .sin_family = AF_INET, \
- .sin_port = __constant_htons(port), \
- .sin_addr.s_addr = __constant_htonl(addr4), \
- };
-
-#define INIT_ADDR6(name, addr6, port) \
- static struct sockaddr_in6 name = { \
- .sin6_family = AF_INET6, \
- .sin6_port = __constant_htons(port), \
- .sin6_addr = addr6, \
- };
-
-INIT_ADDR4(in_daddr4, INADDR_LOOPBACK, CFG_PORT_INNER)
-INIT_ADDR4(in_saddr4, INADDR_LOOPBACK + 2, 0)
-INIT_ADDR4(out_daddr4, INADDR_LOOPBACK, 0)
-INIT_ADDR4(out_saddr4, INADDR_LOOPBACK + 1, 0)
-INIT_ADDR4(extra_daddr4, INADDR_LOOPBACK, 0)
-INIT_ADDR4(extra_saddr4, INADDR_LOOPBACK + 1, 0)
-
-INIT_ADDR6(in_daddr6, IN6ADDR_LOOPBACK_INIT, CFG_PORT_INNER)
-INIT_ADDR6(in_saddr6, IN6ADDR_LOOPBACK_INIT, 0)
-INIT_ADDR6(out_daddr6, IN6ADDR_LOOPBACK_INIT, 0)
-INIT_ADDR6(out_saddr6, IN6ADDR_LOOPBACK_INIT, 0)
-INIT_ADDR6(extra_daddr6, IN6ADDR_LOOPBACK_INIT, 0)
-INIT_ADDR6(extra_saddr6, IN6ADDR_LOOPBACK_INIT, 0)
-
-static unsigned long util_gettime(void)
-{
- struct timeval tv;
-
- gettimeofday(&tv, NULL);
- return (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
-}
-
-static void util_printaddr(const char *msg, struct sockaddr *addr)
-{
- unsigned long off = 0;
- char nbuf[INET6_ADDRSTRLEN];
-
- switch (addr->sa_family) {
- case PF_INET:
- off = __builtin_offsetof(struct sockaddr_in, sin_addr);
- break;
- case PF_INET6:
- off = __builtin_offsetof(struct sockaddr_in6, sin6_addr);
- break;
- default:
- error(1, 0, "printaddr: unsupported family %u\n",
- addr->sa_family);
- }
-
- if (!inet_ntop(addr->sa_family, ((void *) addr) + off, nbuf,
- sizeof(nbuf)))
- error(1, errno, "inet_ntop");
-
- fprintf(stderr, "%s: %s\n", msg, nbuf);
-}
-
-static unsigned long add_csum_hword(const uint16_t *start, int num_u16)
-{
- unsigned long sum = 0;
- int i;
-
- for (i = 0; i < num_u16; i++)
- sum += start[i];
-
- return sum;
-}
-
-static uint16_t build_ip_csum(const uint16_t *start, int num_u16,
- unsigned long sum)
-{
- sum += add_csum_hword(start, num_u16);
-
- while (sum >> 16)
- sum = (sum & 0xffff) + (sum >> 16);
-
- return ~sum;
-}
-
-static void build_ipv4_header(void *header, uint8_t proto,
- uint32_t src, uint32_t dst,
- int payload_len, uint8_t tos)
-{
- struct iphdr *iph = header;
-
- iph->ihl = 5;
- iph->version = 4;
- iph->tos = tos;
- iph->ttl = 8;
- iph->tot_len = htons(sizeof(*iph) + payload_len);
- iph->id = htons(1337);
- iph->protocol = proto;
- iph->saddr = src;
- iph->daddr = dst;
- iph->check = build_ip_csum((void *) iph, iph->ihl << 1, 0);
-}
-
-static void ipv6_set_dsfield(struct ipv6hdr *ip6h, uint8_t dsfield)
-{
- uint16_t val, *ptr = (uint16_t *)ip6h;
-
- val = ntohs(*ptr);
- val &= 0xF00F;
- val |= ((uint16_t) dsfield) << 4;
- *ptr = htons(val);
-}
-
-static void build_ipv6_header(void *header, uint8_t proto,
- struct sockaddr_in6 *src,
- struct sockaddr_in6 *dst,
- int payload_len, uint8_t dsfield)
-{
- struct ipv6hdr *ip6h = header;
-
- ip6h->version = 6;
- ip6h->payload_len = htons(payload_len);
- ip6h->nexthdr = proto;
- ip6h->hop_limit = 8;
- ipv6_set_dsfield(ip6h, dsfield);
-
- memcpy(&ip6h->saddr, &src->sin6_addr, sizeof(ip6h->saddr));
- memcpy(&ip6h->daddr, &dst->sin6_addr, sizeof(ip6h->daddr));
-}
-
-static uint16_t build_udp_v4_csum(const struct iphdr *iph,
- const struct udphdr *udph,
- int num_words)
-{
- unsigned long pseudo_sum;
- int num_u16 = sizeof(iph->saddr); /* halfwords: twice byte len */
-
- pseudo_sum = add_csum_hword((void *) &iph->saddr, num_u16);
- pseudo_sum += htons(IPPROTO_UDP);
- pseudo_sum += udph->len;
- return build_ip_csum((void *) udph, num_words, pseudo_sum);
-}
-
-static uint16_t build_udp_v6_csum(const struct ipv6hdr *ip6h,
- const struct udphdr *udph,
- int num_words)
-{
- unsigned long pseudo_sum;
- int num_u16 = sizeof(ip6h->saddr); /* halfwords: twice byte len */
-
- pseudo_sum = add_csum_hword((void *) &ip6h->saddr, num_u16);
- pseudo_sum += htons(ip6h->nexthdr);
- pseudo_sum += ip6h->payload_len;
- return build_ip_csum((void *) udph, num_words, pseudo_sum);
-}
-
-static void build_udp_header(void *header, int payload_len,
- uint16_t dport, int family)
-{
- struct udphdr *udph = header;
- int len = sizeof(*udph) + payload_len;
-
- udph->source = htons(cfg_src_port);
- udph->dest = htons(dport);
- udph->len = htons(len);
- udph->check = 0;
- if (family == AF_INET)
- udph->check = build_udp_v4_csum(header - sizeof(struct iphdr),
- udph, len >> 1);
- else
- udph->check = build_udp_v6_csum(header - sizeof(struct ipv6hdr),
- udph, len >> 1);
-}
-
-static void build_gue_header(void *header, uint8_t proto)
-{
- struct guehdr *gueh = header;
-
- gueh->proto_ctype = proto;
-}
-
-static void build_gre_header(void *header, uint16_t proto)
-{
- struct grehdr *greh = header;
-
- greh->protocol = htons(proto);
-}
-
-static int l3_length(int family)
-{
- if (family == AF_INET)
- return sizeof(struct iphdr);
- else
- return sizeof(struct ipv6hdr);
-}
-
-static int build_packet(void)
-{
- int ol3_len = 0, ol4_len = 0, il3_len = 0, il4_len = 0;
- int el3_len = 0;
-
- if (cfg_l3_extra)
- el3_len = l3_length(cfg_l3_extra);
-
- /* calculate header offsets */
- if (cfg_encap_proto) {
- ol3_len = l3_length(cfg_l3_outer);
-
- if (cfg_encap_proto == IPPROTO_GRE)
- ol4_len = sizeof(struct grehdr);
- else if (cfg_encap_proto == IPPROTO_UDP)
- ol4_len = sizeof(struct udphdr) + sizeof(struct guehdr);
- }
-
- il3_len = l3_length(cfg_l3_inner);
- il4_len = sizeof(struct udphdr);
-
- if (el3_len + ol3_len + ol4_len + il3_len + il4_len + cfg_payload_len >=
- sizeof(buf))
- error(1, 0, "packet too large\n");
-
- /*
- * Fill packet from inside out, to calculate correct checksums.
- * But create ip before udp headers, as udp uses ip for pseudo-sum.
- */
- memset(buf + el3_len + ol3_len + ol4_len + il3_len + il4_len,
- cfg_payload_char, cfg_payload_len);
-
- /* add zero byte for udp csum padding */
- buf[el3_len + ol3_len + ol4_len + il3_len + il4_len + cfg_payload_len] = 0;
-
- switch (cfg_l3_inner) {
- case PF_INET:
- build_ipv4_header(buf + el3_len + ol3_len + ol4_len,
- IPPROTO_UDP,
- in_saddr4.sin_addr.s_addr,
- in_daddr4.sin_addr.s_addr,
- il4_len + cfg_payload_len,
- cfg_dsfield_inner);
- break;
- case PF_INET6:
- build_ipv6_header(buf + el3_len + ol3_len + ol4_len,
- IPPROTO_UDP,
- &in_saddr6, &in_daddr6,
- il4_len + cfg_payload_len,
- cfg_dsfield_inner);
- break;
- }
-
- build_udp_header(buf + el3_len + ol3_len + ol4_len + il3_len,
- cfg_payload_len, CFG_PORT_INNER, cfg_l3_inner);
-
- if (!cfg_encap_proto)
- return il3_len + il4_len + cfg_payload_len;
-
- switch (cfg_l3_outer) {
- case PF_INET:
- build_ipv4_header(buf + el3_len, cfg_encap_proto,
- out_saddr4.sin_addr.s_addr,
- out_daddr4.sin_addr.s_addr,
- ol4_len + il3_len + il4_len + cfg_payload_len,
- cfg_dsfield_outer);
- break;
- case PF_INET6:
- build_ipv6_header(buf + el3_len, cfg_encap_proto,
- &out_saddr6, &out_daddr6,
- ol4_len + il3_len + il4_len + cfg_payload_len,
- cfg_dsfield_outer);
- break;
- }
-
- switch (cfg_encap_proto) {
- case IPPROTO_UDP:
- build_gue_header(buf + el3_len + ol3_len + ol4_len -
- sizeof(struct guehdr),
- cfg_l3_inner == PF_INET ? IPPROTO_IPIP
- : IPPROTO_IPV6);
- build_udp_header(buf + el3_len + ol3_len,
- sizeof(struct guehdr) + il3_len + il4_len +
- cfg_payload_len,
- cfg_port_gue, cfg_l3_outer);
- break;
- case IPPROTO_GRE:
- build_gre_header(buf + el3_len + ol3_len,
- cfg_l3_inner == PF_INET ? ETH_P_IP
- : ETH_P_IPV6);
- break;
- }
-
- switch (cfg_l3_extra) {
- case PF_INET:
- build_ipv4_header(buf,
- cfg_l3_outer == PF_INET ? IPPROTO_IPIP
- : IPPROTO_IPV6,
- extra_saddr4.sin_addr.s_addr,
- extra_daddr4.sin_addr.s_addr,
- ol3_len + ol4_len + il3_len + il4_len +
- cfg_payload_len, 0);
- break;
- case PF_INET6:
- build_ipv6_header(buf,
- cfg_l3_outer == PF_INET ? IPPROTO_IPIP
- : IPPROTO_IPV6,
- &extra_saddr6, &extra_daddr6,
- ol3_len + ol4_len + il3_len + il4_len +
- cfg_payload_len, 0);
- break;
- }
-
- return el3_len + ol3_len + ol4_len + il3_len + il4_len +
- cfg_payload_len;
-}
-
-/* sender transmits encapsulated over RAW or unencap'd over UDP */
-static int setup_tx(void)
-{
- int family, fd, ret;
-
- if (cfg_l3_extra)
- family = cfg_l3_extra;
- else if (cfg_l3_outer)
- family = cfg_l3_outer;
- else
- family = cfg_l3_inner;
-
- fd = socket(family, SOCK_RAW, IPPROTO_RAW);
- if (fd == -1)
- error(1, errno, "socket tx");
-
- if (cfg_l3_extra) {
- if (cfg_l3_extra == PF_INET)
- ret = connect(fd, (void *) &extra_daddr4,
- sizeof(extra_daddr4));
- else
- ret = connect(fd, (void *) &extra_daddr6,
- sizeof(extra_daddr6));
- if (ret)
- error(1, errno, "connect tx");
- } else if (cfg_l3_outer) {
- /* connect to destination if not encapsulated */
- if (cfg_l3_outer == PF_INET)
- ret = connect(fd, (void *) &out_daddr4,
- sizeof(out_daddr4));
- else
- ret = connect(fd, (void *) &out_daddr6,
- sizeof(out_daddr6));
- if (ret)
- error(1, errno, "connect tx");
- } else {
- /* otherwise using loopback */
- if (cfg_l3_inner == PF_INET)
- ret = connect(fd, (void *) &in_daddr4,
- sizeof(in_daddr4));
- else
- ret = connect(fd, (void *) &in_daddr6,
- sizeof(in_daddr6));
- if (ret)
- error(1, errno, "connect tx");
- }
-
- return fd;
-}
-
-/* receiver reads unencapsulated UDP */
-static int setup_rx(void)
-{
- int fd, ret;
-
- fd = socket(cfg_l3_inner, SOCK_DGRAM, 0);
- if (fd == -1)
- error(1, errno, "socket rx");
-
- if (cfg_l3_inner == PF_INET)
- ret = bind(fd, (void *) &in_daddr4, sizeof(in_daddr4));
- else
- ret = bind(fd, (void *) &in_daddr6, sizeof(in_daddr6));
- if (ret)
- error(1, errno, "bind rx");
-
- return fd;
-}
-
-static int do_tx(int fd, const char *pkt, int len)
-{
- int ret;
-
- ret = write(fd, pkt, len);
- if (ret == -1)
- error(1, errno, "send");
- if (ret != len)
- error(1, errno, "send: len (%d < %d)\n", ret, len);
-
- return 1;
-}
-
-static int do_poll(int fd, short events, int timeout)
-{
- struct pollfd pfd;
- int ret;
-
- pfd.fd = fd;
- pfd.events = events;
-
- ret = poll(&pfd, 1, timeout);
- if (ret == -1)
- error(1, errno, "poll");
- if (ret && !(pfd.revents & POLLIN))
- error(1, errno, "poll: unexpected event 0x%x\n", pfd.revents);
-
- return ret;
-}
-
-static int do_rx(int fd)
-{
- char rbuf;
- int ret, num = 0;
-
- while (1) {
- ret = recv(fd, &rbuf, 1, MSG_DONTWAIT);
- if (ret == -1 && errno == EAGAIN)
- break;
- if (ret == -1)
- error(1, errno, "recv");
- if (rbuf != cfg_payload_char)
- error(1, 0, "recv: payload mismatch");
- num++;
- }
-
- return num;
-}
-
-static int do_main(void)
-{
- unsigned long tstop, treport, tcur;
- int fdt = -1, fdr = -1, len, tx = 0, rx = 0;
-
- if (!cfg_only_tx)
- fdr = setup_rx();
- if (!cfg_only_rx)
- fdt = setup_tx();
-
- len = build_packet();
-
- tcur = util_gettime();
- treport = tcur + 1000;
- tstop = tcur + (cfg_num_secs * 1000);
-
- while (1) {
- if (!cfg_only_rx)
- tx += do_tx(fdt, buf, len);
-
- if (!cfg_only_tx)
- rx += do_rx(fdr);
-
- if (cfg_num_secs) {
- tcur = util_gettime();
- if (tcur >= tstop)
- break;
- if (tcur >= treport) {
- fprintf(stderr, "pkts: tx=%u rx=%u\n", tx, rx);
- tx = 0;
- rx = 0;
- treport = tcur + 1000;
- }
- } else {
- if (tx == cfg_num_pkt)
- break;
- }
- }
-
- /* read straggler packets, if any */
- if (rx < tx) {
- tstop = util_gettime() + 100;
- while (rx < tx) {
- tcur = util_gettime();
- if (tcur >= tstop)
- break;
-
- do_poll(fdr, POLLIN, tstop - tcur);
- rx += do_rx(fdr);
- }
- }
-
- fprintf(stderr, "pkts: tx=%u rx=%u\n", tx, rx);
-
- if (fdr != -1 && close(fdr))
- error(1, errno, "close rx");
- if (fdt != -1 && close(fdt))
- error(1, errno, "close tx");
-
- /*
- * success (== 0) only if received all packets
- * unless failure is expected, in which case none must arrive.
- */
- if (cfg_expect_failure)
- return rx != 0;
- else
- return rx != tx;
-}
-
-
-static void __attribute__((noreturn)) usage(const char *filepath)
-{
- fprintf(stderr, "Usage: %s [-e gre|gue|bare|none] [-i 4|6] [-l len] "
- "[-O 4|6] [-o 4|6] [-n num] [-t secs] [-R] [-T] "
- "[-s <osrc> [-d <odst>] [-S <isrc>] [-D <idst>] "
- "[-x <otos>] [-X <itos>] [-f <isport>] [-F]\n",
- filepath);
- exit(1);
-}
-
-static void parse_addr(int family, void *addr, const char *optarg)
-{
- int ret;
-
- ret = inet_pton(family, optarg, addr);
- if (ret == -1)
- error(1, errno, "inet_pton");
- if (ret == 0)
- error(1, 0, "inet_pton: bad string");
-}
-
-static void parse_addr4(struct sockaddr_in *addr, const char *optarg)
-{
- parse_addr(AF_INET, &addr->sin_addr, optarg);
-}
-
-static void parse_addr6(struct sockaddr_in6 *addr, const char *optarg)
-{
- parse_addr(AF_INET6, &addr->sin6_addr, optarg);
-}
-
-static int parse_protocol_family(const char *filepath, const char *optarg)
-{
- if (!strcmp(optarg, "4"))
- return PF_INET;
- if (!strcmp(optarg, "6"))
- return PF_INET6;
-
- usage(filepath);
-}
-
-static void parse_opts(int argc, char **argv)
-{
- int c;
-
- while ((c = getopt(argc, argv, "d:D:e:f:Fhi:l:n:o:O:Rs:S:t:Tx:X:")) != -1) {
- switch (c) {
- case 'd':
- if (cfg_l3_outer == AF_UNSPEC)
- error(1, 0, "-d must be preceded by -o");
- if (cfg_l3_outer == AF_INET)
- parse_addr4(&out_daddr4, optarg);
- else
- parse_addr6(&out_daddr6, optarg);
- break;
- case 'D':
- if (cfg_l3_inner == AF_UNSPEC)
- error(1, 0, "-D must be preceded by -i");
- if (cfg_l3_inner == AF_INET)
- parse_addr4(&in_daddr4, optarg);
- else
- parse_addr6(&in_daddr6, optarg);
- break;
- case 'e':
- if (!strcmp(optarg, "gre"))
- cfg_encap_proto = IPPROTO_GRE;
- else if (!strcmp(optarg, "gue"))
- cfg_encap_proto = IPPROTO_UDP;
- else if (!strcmp(optarg, "bare"))
- cfg_encap_proto = IPPROTO_IPIP;
- else if (!strcmp(optarg, "none"))
- cfg_encap_proto = IPPROTO_IP; /* == 0 */
- else
- usage(argv[0]);
- break;
- case 'f':
- cfg_src_port = strtol(optarg, NULL, 0);
- break;
- case 'F':
- cfg_expect_failure = true;
- break;
- case 'h':
- usage(argv[0]);
- break;
- case 'i':
- if (!strcmp(optarg, "4"))
- cfg_l3_inner = PF_INET;
- else if (!strcmp(optarg, "6"))
- cfg_l3_inner = PF_INET6;
- else
- usage(argv[0]);
- break;
- case 'l':
- cfg_payload_len = strtol(optarg, NULL, 0);
- break;
- case 'n':
- cfg_num_pkt = strtol(optarg, NULL, 0);
- break;
- case 'o':
- cfg_l3_outer = parse_protocol_family(argv[0], optarg);
- break;
- case 'O':
- cfg_l3_extra = parse_protocol_family(argv[0], optarg);
- break;
- case 'R':
- cfg_only_rx = true;
- break;
- case 's':
- if (cfg_l3_outer == AF_INET)
- parse_addr4(&out_saddr4, optarg);
- else
- parse_addr6(&out_saddr6, optarg);
- break;
- case 'S':
- if (cfg_l3_inner == AF_INET)
- parse_addr4(&in_saddr4, optarg);
- else
- parse_addr6(&in_saddr6, optarg);
- break;
- case 't':
- cfg_num_secs = strtol(optarg, NULL, 0);
- break;
- case 'T':
- cfg_only_tx = true;
- break;
- case 'x':
- cfg_dsfield_outer = strtol(optarg, NULL, 0);
- break;
- case 'X':
- cfg_dsfield_inner = strtol(optarg, NULL, 0);
- break;
- }
- }
-
- if (cfg_only_rx && cfg_only_tx)
- error(1, 0, "options: cannot combine rx-only and tx-only");
-
- if (cfg_encap_proto && cfg_l3_outer == AF_UNSPEC)
- error(1, 0, "options: must specify outer with encap");
- else if ((!cfg_encap_proto) && cfg_l3_outer != AF_UNSPEC)
- error(1, 0, "options: cannot combine no-encap and outer");
- else if ((!cfg_encap_proto) && cfg_l3_extra != AF_UNSPEC)
- error(1, 0, "options: cannot combine no-encap and extra");
-
- if (cfg_l3_inner == AF_UNSPEC)
- cfg_l3_inner = AF_INET6;
- if (cfg_l3_inner == AF_INET6 && cfg_encap_proto == IPPROTO_IPIP)
- cfg_encap_proto = IPPROTO_IPV6;
-
- /* RFC 6040 4.2:
- * on decap, if outer encountered congestion (CE == 0x3),
- * but inner cannot encode ECN (NoECT == 0x0), then drop packet.
- */
- if (((cfg_dsfield_outer & 0x3) == 0x3) &&
- ((cfg_dsfield_inner & 0x3) == 0x0))
- cfg_expect_failure = true;
-}
-
-static void print_opts(void)
-{
- if (cfg_l3_inner == PF_INET6) {
- util_printaddr("inner.dest6", (void *) &in_daddr6);
- util_printaddr("inner.source6", (void *) &in_saddr6);
- } else {
- util_printaddr("inner.dest4", (void *) &in_daddr4);
- util_printaddr("inner.source4", (void *) &in_saddr4);
- }
-
- if (!cfg_l3_outer)
- return;
-
- fprintf(stderr, "encap proto: %u\n", cfg_encap_proto);
-
- if (cfg_l3_outer == PF_INET6) {
- util_printaddr("outer.dest6", (void *) &out_daddr6);
- util_printaddr("outer.source6", (void *) &out_saddr6);
- } else {
- util_printaddr("outer.dest4", (void *) &out_daddr4);
- util_printaddr("outer.source4", (void *) &out_saddr4);
- }
-
- if (!cfg_l3_extra)
- return;
-
- if (cfg_l3_outer == PF_INET6) {
- util_printaddr("extra.dest6", (void *) &extra_daddr6);
- util_printaddr("extra.source6", (void *) &extra_saddr6);
- } else {
- util_printaddr("extra.dest4", (void *) &extra_daddr4);
- util_printaddr("extra.source4", (void *) &extra_saddr4);
- }
-
-}
-
-int main(int argc, char **argv)
-{
- parse_opts(argc, argv);
- print_opts();
- return do_main();
-}
diff --git a/tools/testing/selftests/bpf/test_flow_dissector.sh b/tools/testing/selftests/bpf/test_flow_dissector.sh
deleted file mode 100755
index 4b298863797a..000000000000
--- a/tools/testing/selftests/bpf/test_flow_dissector.sh
+++ /dev/null
@@ -1,178 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-#
-# Load BPF flow dissector and verify it correctly dissects traffic
-
-BPF_FILE="bpf_flow.bpf.o"
-export TESTNAME=test_flow_dissector
-unmount=0
-
-# Kselftest framework requirement - SKIP code is 4.
-ksft_skip=4
-
-msg="skip all tests:"
-if [ $UID != 0 ]; then
- echo $msg please run this as root >&2
- exit $ksft_skip
-fi
-
-# This test needs to be run in a network namespace with in_netns.sh. Check if
-# this is the case and run it with in_netns.sh if it is being run in the root
-# namespace.
-if [[ -z $(ip netns identify $$) ]]; then
- err=0
- if bpftool="$(which bpftool)"; then
- echo "Testing global flow dissector..."
-
- $bpftool prog loadall $BPF_FILE /sys/fs/bpf/flow \
- type flow_dissector
-
- if ! unshare --net $bpftool prog attach pinned \
- /sys/fs/bpf/flow/_dissect flow_dissector; then
- echo "Unexpected unsuccessful attach in namespace" >&2
- err=1
- fi
-
- $bpftool prog attach pinned /sys/fs/bpf/flow/_dissect \
- flow_dissector
-
- if unshare --net $bpftool prog attach pinned \
- /sys/fs/bpf/flow/_dissect flow_dissector; then
- echo "Unexpected successful attach in namespace" >&2
- err=1
- fi
-
- if ! $bpftool prog detach pinned \
- /sys/fs/bpf/flow/_dissect flow_dissector; then
- echo "Failed to detach flow dissector" >&2
- err=1
- fi
-
- rm -rf /sys/fs/bpf/flow
- else
- echo "Skipping root flow dissector test, bpftool not found" >&2
- fi
-
- # Run the rest of the tests in a net namespace.
- ../net/in_netns.sh "$0" "$@"
- err=$(( $err + $? ))
-
- if (( $err == 0 )); then
- echo "selftests: $TESTNAME [PASS]";
- else
- echo "selftests: $TESTNAME [FAILED]";
- fi
-
- exit $err
-fi
-
-# Determine selftest success via shell exit code
-exit_handler()
-{
- set +e
-
- # Cleanup
- tc filter del dev lo ingress pref 1337 2> /dev/null
- tc qdisc del dev lo ingress 2> /dev/null
- ./flow_dissector_load -d 2> /dev/null
- if [ $unmount -ne 0 ]; then
- umount bpffs 2> /dev/null
- fi
-}
-
-# Exit script immediately (well catched by trap handler) if any
-# program/thing exits with a non-zero status.
-set -e
-
-# (Use 'trap -l' to list meaning of numbers)
-trap exit_handler 0 2 3 6 9
-
-# Mount BPF file system
-if /bin/mount | grep /sys/fs/bpf > /dev/null; then
- echo "bpffs already mounted"
-else
- echo "bpffs not mounted. Mounting..."
- unmount=1
- /bin/mount bpffs /sys/fs/bpf -t bpf
-fi
-
-# Attach BPF program
-./flow_dissector_load -p $BPF_FILE -s _dissect
-
-# Setup
-tc qdisc add dev lo ingress
-echo 0 > /proc/sys/net/ipv4/conf/default/rp_filter
-echo 0 > /proc/sys/net/ipv4/conf/all/rp_filter
-echo 0 > /proc/sys/net/ipv4/conf/lo/rp_filter
-
-echo "Testing IPv4..."
-# Drops all IP/UDP packets coming from port 9
-tc filter add dev lo parent ffff: protocol ip pref 1337 flower ip_proto \
- udp src_port 9 action drop
-
-# Send 10 IPv4/UDP packets from port 8. Filter should not drop any.
-./test_flow_dissector -i 4 -f 8
-# Send 10 IPv4/UDP packets from port 9. Filter should drop all.
-./test_flow_dissector -i 4 -f 9 -F
-# Send 10 IPv4/UDP packets from port 10. Filter should not drop any.
-./test_flow_dissector -i 4 -f 10
-
-echo "Testing IPv4 from 127.0.0.127 (fallback to generic dissector)..."
-# Send 10 IPv4/UDP packets from port 8. Filter should not drop any.
-./test_flow_dissector -i 4 -S 127.0.0.127 -f 8
-# Send 10 IPv4/UDP packets from port 9. Filter should drop all.
-./test_flow_dissector -i 4 -S 127.0.0.127 -f 9 -F
-# Send 10 IPv4/UDP packets from port 10. Filter should not drop any.
-./test_flow_dissector -i 4 -S 127.0.0.127 -f 10
-
-echo "Testing IPIP..."
-# Send 10 IPv4/IPv4/UDP packets from port 8. Filter should not drop any.
-./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e bare -i 4 \
- -D 192.168.0.1 -S 1.1.1.1 -f 8
-# Send 10 IPv4/IPv4/UDP packets from port 9. Filter should drop all.
-./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e bare -i 4 \
- -D 192.168.0.1 -S 1.1.1.1 -f 9 -F
-# Send 10 IPv4/IPv4/UDP packets from port 10. Filter should not drop any.
-./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e bare -i 4 \
- -D 192.168.0.1 -S 1.1.1.1 -f 10
-
-echo "Testing IPv4 + GRE..."
-# Send 10 IPv4/GRE/IPv4/UDP packets from port 8. Filter should not drop any.
-./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e gre -i 4 \
- -D 192.168.0.1 -S 1.1.1.1 -f 8
-# Send 10 IPv4/GRE/IPv4/UDP packets from port 9. Filter should drop all.
-./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e gre -i 4 \
- -D 192.168.0.1 -S 1.1.1.1 -f 9 -F
-# Send 10 IPv4/GRE/IPv4/UDP packets from port 10. Filter should not drop any.
-./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e gre -i 4 \
- -D 192.168.0.1 -S 1.1.1.1 -f 10
-
-tc filter del dev lo ingress pref 1337
-
-echo "Testing port range..."
-# Drops all IP/UDP packets coming from port 8-10
-tc filter add dev lo parent ffff: protocol ip pref 1337 flower ip_proto \
- udp src_port 8-10 action drop
-
-# Send 10 IPv4/UDP packets from port 7. Filter should not drop any.
-./test_flow_dissector -i 4 -f 7
-# Send 10 IPv4/UDP packets from port 9. Filter should drop all.
-./test_flow_dissector -i 4 -f 9 -F
-# Send 10 IPv4/UDP packets from port 11. Filter should not drop any.
-./test_flow_dissector -i 4 -f 11
-
-tc filter del dev lo ingress pref 1337
-
-echo "Testing IPv6..."
-# Drops all IPv6/UDP packets coming from port 9
-tc filter add dev lo parent ffff: protocol ipv6 pref 1337 flower ip_proto \
- udp src_port 9 action drop
-
-# Send 10 IPv6/UDP packets from port 8. Filter should not drop any.
-./test_flow_dissector -i 6 -f 8
-# Send 10 IPv6/UDP packets from port 9. Filter should drop all.
-./test_flow_dissector -i 6 -f 9 -F
-# Send 10 IPv6/UDP packets from port 10. Filter should not drop any.
-./test_flow_dissector -i 6 -f 10
-
-exit 0
diff --git a/tools/testing/selftests/bpf/bpf_testmod/.gitignore b/tools/testing/selftests/bpf/test_kmods/.gitignore
index ded513777281..ded513777281 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/.gitignore
+++ b/tools/testing/selftests/bpf/test_kmods/.gitignore
diff --git a/tools/testing/selftests/bpf/test_kmods/Makefile b/tools/testing/selftests/bpf/test_kmods/Makefile
new file mode 100644
index 000000000000..d4e50c4509c9
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_kmods/Makefile
@@ -0,0 +1,21 @@
+TEST_KMOD_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
+KDIR ?= $(abspath $(TEST_KMOD_DIR)/../../../../..)
+
+ifeq ($(V),1)
+Q =
+else
+Q = @
+endif
+
+MODULES = bpf_testmod.ko bpf_test_no_cfi.ko bpf_test_modorder_x.ko \
+ bpf_test_modorder_y.ko
+
+$(foreach m,$(MODULES),$(eval obj-m += $(m:.ko=.o)))
+
+CFLAGS_bpf_testmod.o = -I$(src)
+
+all:
+ $(Q)$(MAKE) -C $(KDIR) M=$(TEST_KMOD_DIR) modules
+
+clean:
+ $(Q)$(MAKE) -C $(KDIR) M=$(TEST_KMOD_DIR) clean
diff --git a/tools/testing/selftests/bpf/bpf_test_modorder_x/bpf_test_modorder_x.c b/tools/testing/selftests/bpf/test_kmods/bpf_test_modorder_x.c
index 0cc747fa912f..0cc747fa912f 100644
--- a/tools/testing/selftests/bpf/bpf_test_modorder_x/bpf_test_modorder_x.c
+++ b/tools/testing/selftests/bpf/test_kmods/bpf_test_modorder_x.c
diff --git a/tools/testing/selftests/bpf/bpf_test_modorder_y/bpf_test_modorder_y.c b/tools/testing/selftests/bpf/test_kmods/bpf_test_modorder_y.c
index c627ee085d13..c627ee085d13 100644
--- a/tools/testing/selftests/bpf/bpf_test_modorder_y/bpf_test_modorder_y.c
+++ b/tools/testing/selftests/bpf/test_kmods/bpf_test_modorder_y.c
diff --git a/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c b/tools/testing/selftests/bpf/test_kmods/bpf_test_no_cfi.c
index 948eb3962732..948eb3962732 100644
--- a/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c
+++ b/tools/testing/selftests/bpf/test_kmods/bpf_test_no_cfi.c
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h b/tools/testing/selftests/bpf/test_kmods/bpf_testmod-events.h
index aeef86b3da74..aeef86b3da74 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h
+++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod-events.h
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
index cc9dde507aba..cc9dde507aba 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.h
index 356803d1c10e..356803d1c10e 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h
+++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.h
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h b/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h
index b58817938deb..b58817938deb 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
+++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 6088d8222d59..c9e745d49493 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -1282,6 +1282,21 @@ void crash_handler(int signum)
backtrace_symbols_fd(bt, sz, STDERR_FILENO);
}
+void hexdump(const char *prefix, const void *buf, size_t len)
+{
+ for (int i = 0; i < len; i++) {
+ if (!(i % 16)) {
+ if (i)
+ fprintf(stdout, "\n");
+ fprintf(stdout, "%s", prefix);
+ }
+ if (i && !(i % 8) && (i % 16))
+ fprintf(stdout, "\t");
+ fprintf(stdout, "%02X ", ((uint8_t *)(buf))[i]);
+ }
+ fprintf(stdout, "\n");
+}
+
static void sigint_handler(int signum)
{
int i;
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
index 74de33ae37e5..404d0d4915d5 100644
--- a/tools/testing/selftests/bpf/test_progs.h
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -185,6 +185,7 @@ void test__end_subtest(void);
void test__skip(void);
void test__fail(void);
int test__join_cgroup(const char *path);
+void hexdump(const char *prefix, const void *buf, size_t len);
#define PRINT_FAIL(format...) \
({ \
@@ -344,6 +345,20 @@ int test__join_cgroup(const char *path);
___ok; \
})
+#define ASSERT_MEMEQ(actual, expected, len, name) ({ \
+ static int duration = 0; \
+ const void *__act = actual; \
+ const void *__exp = expected; \
+ int __len = len; \
+ bool ___ok = memcmp(__act, __exp, __len) == 0; \
+ CHECK(!___ok, (name), "unexpected memory mismatch\n"); \
+ fprintf(stdout, "actual:\n"); \
+ hexdump("\t", __act, __len); \
+ fprintf(stdout, "expected:\n"); \
+ hexdump("\t", __exp, __len); \
+ ___ok; \
+})
+
#define ASSERT_OK(res, name) ({ \
static int duration = 0; \
long long ___res = (res); \
diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh
index 7989ec608454..cb55a908bb0d 100755
--- a/tools/testing/selftests/bpf/test_tc_tunnel.sh
+++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh
@@ -305,6 +305,7 @@ else
client_connect
verify_data
server_listen
+ wait_for_port ${port} ${netcat_opt}
fi
# serverside, use BPF for decap
diff --git a/tools/testing/selftests/bpf/test_xdp_redirect.sh b/tools/testing/selftests/bpf/test_xdp_redirect.sh
deleted file mode 100755
index 0746a4fde9d3..000000000000
--- a/tools/testing/selftests/bpf/test_xdp_redirect.sh
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/bin/bash
-# Create 2 namespaces with two veth peers, and
-# forward packets in-between using generic XDP
-#
-# NS1(veth11) NS2(veth22)
-# | |
-# | |
-# (veth1, ------ (veth2,
-# id:111) id:222)
-# | xdp forwarding |
-# ------------------
-
-readonly NS1="ns1-$(mktemp -u XXXXXX)"
-readonly NS2="ns2-$(mktemp -u XXXXXX)"
-ret=0
-
-setup()
-{
-
- local xdpmode=$1
-
- ip netns add ${NS1}
- ip netns add ${NS2}
-
- ip link add veth1 index 111 type veth peer name veth11 netns ${NS1}
- ip link add veth2 index 222 type veth peer name veth22 netns ${NS2}
-
- ip link set veth1 up
- ip link set veth2 up
- ip -n ${NS1} link set dev veth11 up
- ip -n ${NS2} link set dev veth22 up
-
- ip -n ${NS1} addr add 10.1.1.11/24 dev veth11
- ip -n ${NS2} addr add 10.1.1.22/24 dev veth22
-}
-
-cleanup()
-{
- ip link del veth1 2> /dev/null
- ip link del veth2 2> /dev/null
- ip netns del ${NS1} 2> /dev/null
- ip netns del ${NS2} 2> /dev/null
-}
-
-test_xdp_redirect()
-{
- local xdpmode=$1
-
- setup
-
- ip link set dev veth1 $xdpmode off &> /dev/null
- if [ $? -ne 0 ];then
- echo "selftests: test_xdp_redirect $xdpmode [SKIP]"
- return 0
- fi
-
- ip -n ${NS1} link set veth11 $xdpmode obj xdp_dummy.bpf.o sec xdp &> /dev/null
- ip -n ${NS2} link set veth22 $xdpmode obj xdp_dummy.bpf.o sec xdp &> /dev/null
- ip link set dev veth1 $xdpmode obj test_xdp_redirect.bpf.o sec redirect_to_222 &> /dev/null
- ip link set dev veth2 $xdpmode obj test_xdp_redirect.bpf.o sec redirect_to_111 &> /dev/null
-
- if ip netns exec ${NS1} ping -c 1 10.1.1.22 &> /dev/null &&
- ip netns exec ${NS2} ping -c 1 10.1.1.11 &> /dev/null; then
- echo "selftests: test_xdp_redirect $xdpmode [PASS]";
- else
- ret=1
- echo "selftests: test_xdp_redirect $xdpmode [FAILED]";
- fi
-
- cleanup
-}
-
-set -e
-trap cleanup 2 3 6 9
-
-test_xdp_redirect xdpgeneric
-test_xdp_redirect xdpdrv
-
-exit $ret
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index 7afc2619ab14..18596ae0b0c1 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -2252,7 +2252,7 @@
BPF_EXIT_INSN(),
},
.fixup_map_hash_48b = { 7 },
- .errstr_unpriv = "invalid indirect read from stack R2 off -8+0 size 8",
+ .errstr_unpriv = "invalid read from stack R2 off -8+0 size 8",
.result_unpriv = REJECT,
/* in privileged mode reads from uninitialized stack locations are permitted */
.result = ACCEPT,
diff --git a/tools/testing/selftests/bpf/verifier/map_kptr.c b/tools/testing/selftests/bpf/verifier/map_kptr.c
index f420c0312aa0..4b39f8472f9b 100644
--- a/tools/testing/selftests/bpf/verifier/map_kptr.c
+++ b/tools/testing/selftests/bpf/verifier/map_kptr.c
@@ -373,7 +373,7 @@
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_kptr = { 1 },
.result = REJECT,
- .errstr = "Unreleased reference id=5 alloc_insn=20",
+ .errstr = "Unreleased reference id=4 alloc_insn=20",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_acquire", 15 },
}
diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c
index e12ef953fba8..06af5029885b 100644
--- a/tools/testing/selftests/bpf/veristat.c
+++ b/tools/testing/selftests/bpf/veristat.c
@@ -21,11 +21,20 @@
#include <gelf.h>
#include <float.h>
#include <math.h>
+#include <limits.h>
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
#endif
+#ifndef max
+#define max(a, b) ((a) > (b) ? (a) : (b))
+#endif
+
+#ifndef min
+#define min(a, b) ((a) < (b) ? (a) : (b))
+#endif
+
enum stat_id {
VERDICT,
DURATION,
@@ -34,6 +43,11 @@ enum stat_id {
PEAK_STATES,
MAX_STATES_PER_INSN,
MARK_READ_MAX_LEN,
+ SIZE,
+ JITED_SIZE,
+ STACK,
+ PROG_TYPE,
+ ATTACH_TYPE,
FILE_NAME,
PROG_NAME,
@@ -203,7 +217,8 @@ const char argp_program_doc[] =
"\n"
"USAGE: veristat <obj-file> [<obj-file>...]\n"
" OR: veristat -C <baseline.csv> <comparison.csv>\n"
-" OR: veristat -R <results.csv>\n";
+" OR: veristat -R <results.csv>\n"
+" OR: veristat -vl2 <to_analyze.bpf.o>\n";
enum {
OPT_LOG_FIXED = 1000,
@@ -215,7 +230,7 @@ static const struct argp_option opts[] = {
{ "version", 'V', NULL, 0, "Print version" },
{ "verbose", 'v', NULL, 0, "Verbose mode" },
{ "debug", 'd', NULL, 0, "Debug mode (turns on libbpf debug logging)" },
- { "log-level", 'l', "LEVEL", 0, "Verifier log level (default 0 for normal mode, 1 for verbose mode)" },
+ { "log-level", 'l', "LEVEL", 0, "Verifier log level (default 0 for normal mode, 1 for verbose mode, 2 for full verification log)" },
{ "log-fixed", OPT_LOG_FIXED, NULL, 0, "Disable verifier log rotation" },
{ "log-size", OPT_LOG_SIZE, "BYTES", 0, "Customize verifier log size (default to 16MB)" },
{ "top-n", 'n', "N", 0, "Emit only up to first N results." },
@@ -640,19 +655,21 @@ cleanup:
}
static const struct stat_specs default_output_spec = {
- .spec_cnt = 7,
+ .spec_cnt = 8,
.ids = {
FILE_NAME, PROG_NAME, VERDICT, DURATION,
- TOTAL_INSNS, TOTAL_STATES, PEAK_STATES,
+ TOTAL_INSNS, TOTAL_STATES, SIZE, JITED_SIZE
},
};
static const struct stat_specs default_csv_output_spec = {
- .spec_cnt = 9,
+ .spec_cnt = 14,
.ids = {
FILE_NAME, PROG_NAME, VERDICT, DURATION,
TOTAL_INSNS, TOTAL_STATES, PEAK_STATES,
MAX_STATES_PER_INSN, MARK_READ_MAX_LEN,
+ SIZE, JITED_SIZE, PROG_TYPE, ATTACH_TYPE,
+ STACK,
},
};
@@ -688,6 +705,11 @@ static struct stat_def {
[PEAK_STATES] = { "Peak states", {"peak_states"}, },
[MAX_STATES_PER_INSN] = { "Max states per insn", {"max_states_per_insn"}, },
[MARK_READ_MAX_LEN] = { "Max mark read length", {"max_mark_read_len", "mark_read"}, },
+ [SIZE] = { "Program size", {"prog_size"}, },
+ [JITED_SIZE] = { "Jited size", {"prog_size_jited"}, },
+ [STACK] = {"Stack depth", {"stack_depth", "stack"}, },
+ [PROG_TYPE] = { "Program type", {"prog_type"}, },
+ [ATTACH_TYPE] = { "Attach type", {"attach_type", }, },
};
static bool parse_stat_id_var(const char *name, size_t len, int *id,
@@ -835,7 +857,8 @@ static char verif_log_buf[64 * 1024];
static int parse_verif_log(char * const buf, size_t buf_sz, struct verif_stats *s)
{
const char *cur;
- int pos, lines;
+ int pos, lines, sub_stack, cnt = 0;
+ char *state = NULL, *token, stack[512];
buf[buf_sz - 1] = '\0';
@@ -853,15 +876,22 @@ static int parse_verif_log(char * const buf, size_t buf_sz, struct verif_stats *
if (1 == sscanf(cur, "verification time %ld usec\n", &s->stats[DURATION]))
continue;
- if (6 == sscanf(cur, "processed %ld insns (limit %*d) max_states_per_insn %ld total_states %ld peak_states %ld mark_read %ld",
+ if (5 == sscanf(cur, "processed %ld insns (limit %*d) max_states_per_insn %ld total_states %ld peak_states %ld mark_read %ld",
&s->stats[TOTAL_INSNS],
&s->stats[MAX_STATES_PER_INSN],
&s->stats[TOTAL_STATES],
&s->stats[PEAK_STATES],
&s->stats[MARK_READ_MAX_LEN]))
continue;
- }
+ if (1 == sscanf(cur, "stack depth %511s", stack))
+ continue;
+ }
+ while ((token = strtok_r(cnt++ ? NULL : stack, "+", &state))) {
+ if (sscanf(token, "%d", &sub_stack) == 0)
+ break;
+ s->stats[STACK] += sub_stack;
+ }
return 0;
}
@@ -884,7 +914,7 @@ static int line_cnt_cmp(const void *a, const void *b)
const struct line_cnt *b_cnt = (const struct line_cnt *)b;
if (a_cnt->cnt != b_cnt->cnt)
- return a_cnt->cnt < b_cnt->cnt ? -1 : 1;
+ return a_cnt->cnt > b_cnt->cnt ? -1 : 1;
return strcmp(a_cnt->line, b_cnt->line);
}
@@ -1032,6 +1062,41 @@ static int guess_prog_type_by_ctx_name(const char *ctx_name,
return -ESRCH;
}
+/* Make sure only target program is referenced from struct_ops map,
+ * otherwise libbpf would automatically set autocreate for all
+ * referenced programs.
+ * See libbpf.c:bpf_object_adjust_struct_ops_autoload.
+ */
+static void mask_unrelated_struct_ops_progs(struct bpf_object *obj,
+ struct bpf_map *map,
+ struct bpf_program *prog)
+{
+ struct btf *btf = bpf_object__btf(obj);
+ const struct btf_type *t, *mt;
+ struct btf_member *m;
+ int i, moff;
+ size_t data_sz, ptr_sz = sizeof(void *);
+ void *data;
+
+ t = btf__type_by_id(btf, bpf_map__btf_value_type_id(map));
+ if (!btf_is_struct(t))
+ return;
+
+ data = bpf_map__initial_value(map, &data_sz);
+ for (i = 0; i < btf_vlen(t); i++) {
+ m = &btf_members(t)[i];
+ mt = btf__type_by_id(btf, m->type);
+ if (!btf_is_ptr(mt))
+ continue;
+ moff = m->offset / 8;
+ if (moff + ptr_sz > data_sz)
+ continue;
+ if (memcmp(data + moff, &prog, ptr_sz) == 0)
+ continue;
+ memset(data + moff, 0, ptr_sz);
+ }
+}
+
static void fixup_obj(struct bpf_object *obj, struct bpf_program *prog, const char *filename)
{
struct bpf_map *map;
@@ -1047,6 +1112,9 @@ static void fixup_obj(struct bpf_object *obj, struct bpf_program *prog, const ch
case BPF_MAP_TYPE_INODE_STORAGE:
case BPF_MAP_TYPE_CGROUP_STORAGE:
break;
+ case BPF_MAP_TYPE_STRUCT_OPS:
+ mask_unrelated_struct_ops_progs(obj, map, prog);
+ break;
default:
if (bpf_map__max_entries(map) == 0)
bpf_map__set_max_entries(map, 1);
@@ -1146,8 +1214,11 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf
char *buf;
int buf_sz, log_level;
struct verif_stats *stats;
+ struct bpf_prog_info info;
+ __u32 info_len = sizeof(info);
int err = 0;
void *tmp;
+ int fd;
if (!should_process_file_prog(base_filename, bpf_program__name(prog))) {
env.progs_skipped++;
@@ -1196,6 +1267,15 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf
stats->file_name = strdup(base_filename);
stats->prog_name = strdup(bpf_program__name(prog));
stats->stats[VERDICT] = err == 0; /* 1 - success, 0 - failure */
+ stats->stats[SIZE] = bpf_program__insn_cnt(prog);
+ stats->stats[PROG_TYPE] = bpf_program__type(prog);
+ stats->stats[ATTACH_TYPE] = bpf_program__expected_attach_type(prog);
+
+ memset(&info, 0, info_len);
+ fd = bpf_program__fd(prog);
+ if (fd > 0 && bpf_prog_get_info_by_fd(fd, &info, &info_len) == 0)
+ stats->stats[JITED_SIZE] = info.jited_prog_len;
+
parse_verif_log(buf, buf_sz, stats);
if (env.verbose) {
@@ -1309,6 +1389,11 @@ static int cmp_stat(const struct verif_stats *s1, const struct verif_stats *s2,
case PROG_NAME:
cmp = strcmp(s1->prog_name, s2->prog_name);
break;
+ case ATTACH_TYPE:
+ case PROG_TYPE:
+ case SIZE:
+ case JITED_SIZE:
+ case STACK:
case VERDICT:
case DURATION:
case TOTAL_INSNS:
@@ -1523,12 +1608,27 @@ static void prepare_value(const struct verif_stats *s, enum stat_id id,
else
*str = s->stats[VERDICT] ? "success" : "failure";
break;
+ case ATTACH_TYPE:
+ if (!s)
+ *str = "N/A";
+ else
+ *str = libbpf_bpf_attach_type_str(s->stats[ATTACH_TYPE]) ?: "N/A";
+ break;
+ case PROG_TYPE:
+ if (!s)
+ *str = "N/A";
+ else
+ *str = libbpf_bpf_prog_type_str(s->stats[PROG_TYPE]) ?: "N/A";
+ break;
case DURATION:
case TOTAL_INSNS:
case TOTAL_STATES:
case PEAK_STATES:
case MAX_STATES_PER_INSN:
case MARK_READ_MAX_LEN:
+ case STACK:
+ case SIZE:
+ case JITED_SIZE:
*val = s ? s->stats[id] : 0;
break;
default:
@@ -1612,7 +1712,10 @@ static int parse_stat_value(const char *str, enum stat_id id, struct verif_stats
case TOTAL_STATES:
case PEAK_STATES:
case MAX_STATES_PER_INSN:
- case MARK_READ_MAX_LEN: {
+ case MARK_READ_MAX_LEN:
+ case SIZE:
+ case JITED_SIZE:
+ case STACK: {
long val;
int err, n;
@@ -1625,6 +1728,42 @@ static int parse_stat_value(const char *str, enum stat_id id, struct verif_stats
st->stats[id] = val;
break;
}
+ case PROG_TYPE: {
+ enum bpf_prog_type prog_type = 0;
+ const char *type;
+
+ while ((type = libbpf_bpf_prog_type_str(prog_type))) {
+ if (strcmp(type, str) == 0) {
+ st->stats[id] = prog_type;
+ break;
+ }
+ prog_type++;
+ }
+
+ if (!type) {
+ fprintf(stderr, "Unrecognized prog type %s\n", str);
+ return -EINVAL;
+ }
+ break;
+ }
+ case ATTACH_TYPE: {
+ enum bpf_attach_type attach_type = 0;
+ const char *type;
+
+ while ((type = libbpf_bpf_attach_type_str(attach_type))) {
+ if (strcmp(type, str) == 0) {
+ st->stats[id] = attach_type;
+ break;
+ }
+ attach_type++;
+ }
+
+ if (!type) {
+ fprintf(stderr, "Unrecognized attach type %s\n", str);
+ return -EINVAL;
+ }
+ break;
+ }
default:
fprintf(stderr, "Unrecognized stat #%d\n", id);
return -EINVAL;
diff --git a/tools/testing/selftests/bpf/xdp_hw_metadata.c b/tools/testing/selftests/bpf/xdp_hw_metadata.c
index e38675d9b118..6f7b15d6c6ed 100644
--- a/tools/testing/selftests/bpf/xdp_hw_metadata.c
+++ b/tools/testing/selftests/bpf/xdp_hw_metadata.c
@@ -27,7 +27,7 @@
#include <linux/errqueue.h>
#include <linux/if_link.h>
#include <linux/net_tstamp.h>
-#include <linux/udp.h>
+#include <netinet/udp.h>
#include <linux/sockios.h>
#include <linux/if_xdp.h>
#include <sys/mman.h>
diff --git a/tools/testing/selftests/damon/.gitignore b/tools/testing/selftests/damon/.gitignore
index 2ab675fecb6b..2f0297657c81 100644
--- a/tools/testing/selftests/damon/.gitignore
+++ b/tools/testing/selftests/damon/.gitignore
@@ -1,6 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-huge_count_read_write
-debugfs_target_ids_read_before_terminate_race
-debugfs_target_ids_pid_leak
access_memory
access_memory_even
diff --git a/tools/testing/selftests/damon/Makefile b/tools/testing/selftests/damon/Makefile
index 812f656260fb..ecbf07afc6dd 100644
--- a/tools/testing/selftests/damon/Makefile
+++ b/tools/testing/selftests/damon/Makefile
@@ -1,15 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for damon selftests
-TEST_GEN_FILES += huge_count_read_write
-TEST_GEN_FILES += debugfs_target_ids_read_before_terminate_race
-TEST_GEN_FILES += debugfs_target_ids_pid_leak
TEST_GEN_FILES += access_memory access_memory_even
-TEST_FILES = _chk_dependency.sh _debugfs_common.sh _damon_sysfs.py
+TEST_FILES = _chk_dependency.sh _damon_sysfs.py
# functionality tests
-TEST_PROGS = debugfs_attrs.sh debugfs_schemes.sh debugfs_target_ids.sh
TEST_PROGS += sysfs.sh
TEST_PROGS += sysfs_update_schemes_tried_regions_wss_estimation.py
TEST_PROGS += damos_quota.py damos_quota_goal.py damos_apply_interval.py
@@ -17,11 +13,6 @@ TEST_PROGS += damos_tried_regions.py damon_nr_regions.py
TEST_PROGS += reclaim.sh lru_sort.sh
# regression tests (reproducers of previously found bugs)
-TEST_PROGS += debugfs_empty_targets.sh debugfs_huge_count_read_write.sh
-TEST_PROGS += debugfs_duplicate_context_creation.sh
-TEST_PROGS += debugfs_rm_non_contexts.sh
-TEST_PROGS += debugfs_target_ids_read_before_terminate_race.sh
-TEST_PROGS += debugfs_target_ids_pid_leak.sh
TEST_PROGS += sysfs_update_removed_scheme_dir.sh
TEST_PROGS += sysfs_update_schemes_tried_regions_hang.py
diff --git a/tools/testing/selftests/damon/config b/tools/testing/selftests/damon/config
index 0daf38974eb0..a68a9fead5dc 100644
--- a/tools/testing/selftests/damon/config
+++ b/tools/testing/selftests/damon/config
@@ -1,6 +1,5 @@
CONFIG_DAMON=y
CONFIG_DAMON_SYSFS=y
-CONFIG_DAMON_DBGFS=y
CONFIG_DAMON_PADDR=y
CONFIG_DAMON_VADDR=y
CONFIG_DAMON_RECLAIM=y
diff --git a/tools/testing/selftests/damon/debugfs_attrs.sh b/tools/testing/selftests/damon/debugfs_attrs.sh
deleted file mode 100755
index 902e312bca89..000000000000
--- a/tools/testing/selftests/damon/debugfs_attrs.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-
-source _debugfs_common.sh
-
-# Test attrs file
-# ===============
-
-file="$DBGFS/attrs"
-orig_content=$(cat "$file")
-
-test_write_succ "$file" "1 2 3 4 5" "$orig_content" "valid input"
-test_write_fail "$file" "1 2 3 4" "$orig_content" "no enough fields"
-test_write_fail "$file" "1 2 3 5 4" "$orig_content" \
- "min_nr_regions > max_nr_regions"
-test_content "$file" "$orig_content" "1 2 3 4 5" "successfully written"
-echo "$orig_content" > "$file"
diff --git a/tools/testing/selftests/damon/debugfs_duplicate_context_creation.sh b/tools/testing/selftests/damon/debugfs_duplicate_context_creation.sh
deleted file mode 100755
index bd6c22d96ead..000000000000
--- a/tools/testing/selftests/damon/debugfs_duplicate_context_creation.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-
-source _debugfs_common.sh
-
-# Test duplicated context creation
-# ================================
-
-if ! echo foo > "$DBGFS/mk_contexts"
-then
- echo "context creation failed"
- exit 1
-fi
-
-if echo foo > "$DBGFS/mk_contexts" 2> /dev/null
-then
- echo "duplicate context creation success"
- exit 1
-fi
-
-if ! echo foo > "$DBGFS/rm_contexts"
-then
- echo "context deletion failed"
- exit 1
-fi
-
-exit 0
diff --git a/tools/testing/selftests/damon/debugfs_empty_targets.sh b/tools/testing/selftests/damon/debugfs_empty_targets.sh
deleted file mode 100755
index effbea33dc16..000000000000
--- a/tools/testing/selftests/damon/debugfs_empty_targets.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-
-source _debugfs_common.sh
-
-# Test empty targets case
-# =======================
-
-orig_target_ids=$(cat "$DBGFS/target_ids")
-echo "" > "$DBGFS/target_ids"
-
-if [ -f "$DBGFS/monitor_on_DEPRECATED" ]
-then
- monitor_on_file="$DBGFS/monitor_on_DEPRECATED"
-else
- monitor_on_file="$DBGFS/monitor_on"
-fi
-
-orig_monitor_on=$(cat "$monitor_on_file")
-test_write_fail "$monitor_on_file" "on" "orig_monitor_on" "empty target ids"
-echo "$orig_target_ids" > "$DBGFS/target_ids"
diff --git a/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh b/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh
deleted file mode 100755
index 922cadac2950..000000000000
--- a/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-
-source _debugfs_common.sh
-
-# Test huge count read write
-# ==========================
-
-dmesg -C
-
-for file in "$DBGFS/"*
-do
- ./huge_count_read_write "$file"
-done
-
-if dmesg | grep -q WARNING
-then
- dmesg
- exit 1
-else
- exit 0
-fi
diff --git a/tools/testing/selftests/damon/debugfs_rm_non_contexts.sh b/tools/testing/selftests/damon/debugfs_rm_non_contexts.sh
deleted file mode 100755
index f3ffeb1343cf..000000000000
--- a/tools/testing/selftests/damon/debugfs_rm_non_contexts.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-
-source _debugfs_common.sh
-
-# Test putting non-ctx files/dirs to rm_contexts file
-# ===================================================
-
-dmesg -C
-
-for file in "$DBGFS/"*
-do
- (echo "$(basename "$f")" > "$DBGFS/rm_contexts") &> /dev/null
- if dmesg | grep -q BUG
- then
- dmesg
- exit 1
- fi
-done
diff --git a/tools/testing/selftests/damon/debugfs_schemes.sh b/tools/testing/selftests/damon/debugfs_schemes.sh
deleted file mode 100755
index 5b39ab44731c..000000000000
--- a/tools/testing/selftests/damon/debugfs_schemes.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-
-source _debugfs_common.sh
-
-# Test schemes file
-# =================
-
-file="$DBGFS/schemes"
-orig_content=$(cat "$file")
-
-test_write_succ "$file" "1 2 3 4 5 6 4 0 0 0 1 2 3 1 100 3 2 1" \
- "$orig_content" "valid input"
-test_write_fail "$file" "1 2
-3 4 5 6 3 0 0 0 1 2 3 1 100 3 2 1" "$orig_content" "multi lines"
-test_write_succ "$file" "" "$orig_content" "disabling"
-test_write_fail "$file" "2 1 2 1 10 1 3 10 1 1 1 1 1 1 1 1 2 3" \
- "$orig_content" "wrong condition ranges"
-echo "$orig_content" > "$file"
diff --git a/tools/testing/selftests/damon/debugfs_target_ids.sh b/tools/testing/selftests/damon/debugfs_target_ids.sh
deleted file mode 100755
index 49aeabdb0aae..000000000000
--- a/tools/testing/selftests/damon/debugfs_target_ids.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-
-source _debugfs_common.sh
-
-# Test target_ids file
-# ====================
-
-file="$DBGFS/target_ids"
-orig_content=$(cat "$file")
-
-test_write_succ "$file" "1 2 3 4" "$orig_content" "valid input"
-test_write_succ "$file" "1 2 abc 4" "$orig_content" "still valid input"
-test_content "$file" "$orig_content" "1 2" "non-integer was there"
-test_write_succ "$file" "abc 2 3" "$orig_content" "the file allows wrong input"
-test_content "$file" "$orig_content" "" "wrong input written"
-test_write_succ "$file" "" "$orig_content" "empty input"
-test_content "$file" "$orig_content" "" "empty input written"
-echo "$orig_content" > "$file"
diff --git a/tools/testing/selftests/damon/debugfs_target_ids_pid_leak.c b/tools/testing/selftests/damon/debugfs_target_ids_pid_leak.c
deleted file mode 100644
index 0cc2eef7d142..000000000000
--- a/tools/testing/selftests/damon/debugfs_target_ids_pid_leak.c
+++ /dev/null
@@ -1,68 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Author: SeongJae Park <sj@kernel.org>
- */
-
-#define _GNU_SOURCE
-
-#include <fcntl.h>
-#include <stdbool.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <sys/time.h>
-#include <unistd.h>
-
-#define DBGFS_TARGET_IDS "/sys/kernel/debug/damon/target_ids"
-
-static void write_targetid_exit(void)
-{
- int target_ids_fd = open(DBGFS_TARGET_IDS, O_RDWR);
- char pid_str[128];
-
- snprintf(pid_str, sizeof(pid_str), "%d", getpid());
- write(target_ids_fd, pid_str, sizeof(pid_str));
- close(target_ids_fd);
- exit(0);
-}
-
-unsigned long msec_timestamp(void)
-{
- struct timeval tv;
-
- gettimeofday(&tv, NULL);
- return tv.tv_sec * 1000UL + tv.tv_usec / 1000;
-}
-
-int main(int argc, char *argv[])
-{
- unsigned long start_ms;
- int time_to_run, nr_forks = 0;
-
- if (argc != 2) {
- fprintf(stderr, "Usage: %s <msecs to run>\n", argv[0]);
- exit(1);
- }
- time_to_run = atoi(argv[1]);
-
- start_ms = msec_timestamp();
- while (true) {
- int pid = fork();
-
- if (pid < 0) {
- fprintf(stderr, "fork() failed\n");
- exit(1);
- }
- if (pid == 0)
- write_targetid_exit();
- wait(NULL);
- nr_forks++;
-
- if (msec_timestamp() - start_ms > time_to_run)
- break;
- }
- printf("%d\n", nr_forks);
- return 0;
-}
diff --git a/tools/testing/selftests/damon/debugfs_target_ids_pid_leak.sh b/tools/testing/selftests/damon/debugfs_target_ids_pid_leak.sh
deleted file mode 100755
index 31fe33c2b032..000000000000
--- a/tools/testing/selftests/damon/debugfs_target_ids_pid_leak.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-
-before=$(grep "^pid " /proc/slabinfo | awk '{print $2}')
-
-nr_leaks=$(./debugfs_target_ids_pid_leak 1000)
-expected_after_max=$((before + nr_leaks / 2))
-
-after=$(grep "^pid " /proc/slabinfo | awk '{print $2}')
-
-echo > /sys/kernel/debug/damon/target_ids
-
-echo "tried $nr_leaks pid leak"
-echo "number of active pid slabs: $before -> $after"
-echo "(up to $expected_after_max expected)"
-if [ $after -gt $expected_after_max ]
-then
- echo "maybe pids are leaking"
- exit 1
-else
- exit 0
-fi
diff --git a/tools/testing/selftests/damon/debugfs_target_ids_read_before_terminate_race.c b/tools/testing/selftests/damon/debugfs_target_ids_read_before_terminate_race.c
deleted file mode 100644
index b06f52a8ce2d..000000000000
--- a/tools/testing/selftests/damon/debugfs_target_ids_read_before_terminate_race.c
+++ /dev/null
@@ -1,80 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Author: SeongJae Park <sj@kernel.org>
- */
-#define _GNU_SOURCE
-
-#include <fcntl.h>
-#include <stdbool.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <time.h>
-#include <unistd.h>
-
-#define DBGFS_MONITOR_ON "/sys/kernel/debug/damon/monitor_on_DEPRECATED"
-#define DBGFS_TARGET_IDS "/sys/kernel/debug/damon/target_ids"
-
-static void turn_damon_on_exit(void)
-{
- int target_ids_fd = open(DBGFS_TARGET_IDS, O_RDWR);
- int monitor_on_fd = open(DBGFS_MONITOR_ON, O_RDWR);
- char pid_str[128];
-
- snprintf(pid_str, sizeof(pid_str), "%d", getpid());
- write(target_ids_fd, pid_str, sizeof(pid_str));
- write(monitor_on_fd, "on\n", 3);
- close(target_ids_fd);
- close(monitor_on_fd);
- usleep(1000);
- exit(0);
-}
-
-static void try_race(void)
-{
- int target_ids_fd = open(DBGFS_TARGET_IDS, O_RDWR);
- int pid = fork();
- int buf[256];
-
- if (pid < 0) {
- fprintf(stderr, "fork() failed\n");
- exit(1);
- }
- if (pid == 0)
- turn_damon_on_exit();
- while (true) {
- int status;
-
- read(target_ids_fd, buf, sizeof(buf));
- if (waitpid(-1, &status, WNOHANG) == pid)
- break;
- }
- close(target_ids_fd);
-}
-
-static inline uint64_t ts_to_ms(struct timespec *ts)
-{
- return (uint64_t)ts->tv_sec * 1000 + (uint64_t)ts->tv_nsec / 1000000;
-}
-
-int main(int argc, char *argv[])
-{
- struct timespec start_time, now;
- int runtime_ms;
-
- if (argc != 2) {
- fprintf(stderr, "Usage: %s <runtime in ms>\n", argv[0]);
- exit(1);
- }
- runtime_ms = atoi(argv[1]);
- clock_gettime(CLOCK_MONOTONIC, &start_time);
- while (true) {
- try_race();
- clock_gettime(CLOCK_MONOTONIC, &now);
- if (ts_to_ms(&now) - ts_to_ms(&start_time) > runtime_ms)
- break;
- }
- return 0;
-}
diff --git a/tools/testing/selftests/damon/debugfs_target_ids_read_before_terminate_race.sh b/tools/testing/selftests/damon/debugfs_target_ids_read_before_terminate_race.sh
deleted file mode 100755
index fc793c4c9aea..000000000000
--- a/tools/testing/selftests/damon/debugfs_target_ids_read_before_terminate_race.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-
-dmesg -C
-
-./debugfs_target_ids_read_before_terminate_race 5000
-
-if dmesg | grep -q dbgfs_target_ids_read
-then
- dmesg
- exit 1
-else
- exit 0
-fi
diff --git a/tools/testing/selftests/damon/huge_count_read_write.c b/tools/testing/selftests/damon/huge_count_read_write.c
deleted file mode 100644
index 53e69a669668..000000000000
--- a/tools/testing/selftests/damon/huge_count_read_write.c
+++ /dev/null
@@ -1,46 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Author: SeongJae Park <sj@kernel.org>
- */
-
-#include <fcntl.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <stdio.h>
-
-#pragma GCC diagnostic push
-#if __GNUC__ >= 11 && __GNUC_MINOR__ >= 1
-/* Ignore read(2) overflow and write(2) overread compile warnings */
-#pragma GCC diagnostic ignored "-Wstringop-overread"
-#pragma GCC diagnostic ignored "-Wstringop-overflow"
-#endif
-
-void write_read_with_huge_count(char *file)
-{
- int filedesc = open(file, O_RDWR);
- char buf[256];
- int ret;
-
- printf("%s %s\n", __func__, file);
- if (filedesc < 0) {
- fprintf(stderr, "failed opening %s\n", file);
- exit(1);
- }
-
- write(filedesc, "", 0xfffffffful);
- ret = read(filedesc, buf, 0xfffffffful);
- close(filedesc);
-}
-
-#pragma GCC diagnostic pop
-
-int main(int argc, char *argv[])
-{
- if (argc != 2) {
- fprintf(stderr, "Usage: %s <file>\n", argv[0]);
- exit(1);
- }
- write_read_with_huge_count(argv[1]);
-
- return 0;
-}
diff --git a/tools/testing/selftests/drivers/net/hw/rss_ctx.py b/tools/testing/selftests/drivers/net/hw/rss_ctx.py
index ca8a7edff3dd..319aaa004c40 100755
--- a/tools/testing/selftests/drivers/net/hw/rss_ctx.py
+++ b/tools/testing/selftests/drivers/net/hw/rss_ctx.py
@@ -252,6 +252,7 @@ def test_rss_queue_reconfigure(cfg, main_ctx=True):
try:
# this targets queue 4, which doesn't exist
ntuple2 = ethtool_create(cfg, "-N", flow)
+ defer(ethtool, f"-N {cfg.ifname} delete {ntuple2}")
except CmdExitFailure:
pass
else:
@@ -259,7 +260,13 @@ def test_rss_queue_reconfigure(cfg, main_ctx=True):
# change the table to target queues 0 and 2
ethtool(f"-X {cfg.ifname} {ctx_ref} weight 1 0 1 0")
# ntuple rule therefore targets queues 1 and 3
- ntuple2 = ethtool_create(cfg, "-N", flow)
+ try:
+ ntuple2 = ethtool_create(cfg, "-N", flow)
+ except CmdExitFailure:
+ ksft_pr("Driver does not support rss + queue offset")
+ return
+
+ defer(ethtool, f"-N {cfg.ifname} delete {ntuple2}")
# should replace existing filter
ksft_eq(ntuple, ntuple2)
_send_traffic_check(cfg, port, ctx_ref, { 'target': (1, 3),
diff --git a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
index 384cfa3d38a6..92c2f0376c08 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
@@ -142,7 +142,7 @@ function pre_ethtool {
}
function check_table {
- local path=$NSIM_DEV_DFS/ports/$port/udp_ports_table$1
+ local path=$NSIM_DEV_DFS/ports/$port/udp_ports/table$1
local -n expected=$2
local last=$3
@@ -212,7 +212,7 @@ function check_tables {
}
function print_table {
- local path=$NSIM_DEV_DFS/ports/$port/udp_ports_table$1
+ local path=$NSIM_DEV_DFS/ports/$port/udp_ports/table$1
read -a have < $path
tree $NSIM_DEV_DFS/
@@ -641,7 +641,7 @@ for port in 0 1; do
NSIM_NETDEV=`get_netdev_name old_netdevs`
ip link set dev $NSIM_NETDEV up
- echo 110 > $NSIM_DEV_DFS/ports/$port/udp_ports_inject_error
+ echo 110 > $NSIM_DEV_DFS/ports/$port/udp_ports/inject_error
msg="1 - create VxLANs v6"
exp0=( 0 0 0 0 )
@@ -663,7 +663,7 @@ for port in 0 1; do
new_geneve gnv0 20000
msg="2 - destroy GENEVE"
- echo 2 > $NSIM_DEV_DFS/ports/$port/udp_ports_inject_error
+ echo 2 > $NSIM_DEV_DFS/ports/$port/udp_ports/inject_error
exp1=( `mke 20000 2` 0 0 0 )
del_dev gnv0
@@ -764,7 +764,7 @@ for port in 0 1; do
msg="create VxLANs v4"
new_vxlan vxlan0 10000 $NSIM_NETDEV
- echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset
+ echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports/reset
check_tables
msg="NIC device goes down"
@@ -775,7 +775,7 @@ for port in 0 1; do
fi
check_tables
- echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset
+ echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports/reset
check_tables
msg="NIC device goes up again"
@@ -789,7 +789,7 @@ for port in 0 1; do
del_dev vxlan0
check_tables
- echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset
+ echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports/reset
check_tables
msg="destroy NIC"
@@ -896,7 +896,7 @@ msg="vacate VxLAN in overflow table"
exp0=( `mke 10000 1` `mke 10004 1` 0 `mke 10003 1` )
del_dev vxlan2
-echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset
+echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports/reset
check_tables
msg="tunnels destroyed 2"
diff --git a/tools/testing/selftests/drivers/ntsync/.gitignore b/tools/testing/selftests/drivers/ntsync/.gitignore
new file mode 100644
index 000000000000..848573a3d3ea
--- /dev/null
+++ b/tools/testing/selftests/drivers/ntsync/.gitignore
@@ -0,0 +1 @@
+ntsync
diff --git a/tools/testing/selftests/drivers/ntsync/Makefile b/tools/testing/selftests/drivers/ntsync/Makefile
new file mode 100644
index 000000000000..dbf2b055c0b2
--- /dev/null
+++ b/tools/testing/selftests/drivers/ntsync/Makefile
@@ -0,0 +1,7 @@
+# SPDX-LICENSE-IDENTIFIER: GPL-2.0-only
+TEST_GEN_PROGS := ntsync
+
+CFLAGS += $(KHDR_INCLUDES)
+LDLIBS += -lpthread
+
+include ../../lib.mk
diff --git a/tools/testing/selftests/drivers/ntsync/config b/tools/testing/selftests/drivers/ntsync/config
new file mode 100644
index 000000000000..60539c826d06
--- /dev/null
+++ b/tools/testing/selftests/drivers/ntsync/config
@@ -0,0 +1 @@
+CONFIG_WINESYNC=y
diff --git a/tools/testing/selftests/drivers/ntsync/ntsync.c b/tools/testing/selftests/drivers/ntsync/ntsync.c
new file mode 100644
index 000000000000..3aad311574c4
--- /dev/null
+++ b/tools/testing/selftests/drivers/ntsync/ntsync.c
@@ -0,0 +1,1343 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Various unit tests for the "ntsync" synchronization primitive driver.
+ *
+ * Copyright (C) 2021-2022 Elizabeth Figura <zfigura@codeweavers.com>
+ */
+
+#define _GNU_SOURCE
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <time.h>
+#include <pthread.h>
+#include <linux/ntsync.h>
+#include "../../kselftest_harness.h"
+
+static int read_sem_state(int sem, __u32 *count, __u32 *max)
+{
+ struct ntsync_sem_args args;
+ int ret;
+
+ memset(&args, 0xcc, sizeof(args));
+ ret = ioctl(sem, NTSYNC_IOC_SEM_READ, &args);
+ *count = args.count;
+ *max = args.max;
+ return ret;
+}
+
+#define check_sem_state(sem, count, max) \
+ ({ \
+ __u32 __count, __max; \
+ int ret = read_sem_state((sem), &__count, &__max); \
+ EXPECT_EQ(0, ret); \
+ EXPECT_EQ((count), __count); \
+ EXPECT_EQ((max), __max); \
+ })
+
+static int release_sem(int sem, __u32 *count)
+{
+ return ioctl(sem, NTSYNC_IOC_SEM_RELEASE, count);
+}
+
+static int read_mutex_state(int mutex, __u32 *count, __u32 *owner)
+{
+ struct ntsync_mutex_args args;
+ int ret;
+
+ memset(&args, 0xcc, sizeof(args));
+ ret = ioctl(mutex, NTSYNC_IOC_MUTEX_READ, &args);
+ *count = args.count;
+ *owner = args.owner;
+ return ret;
+}
+
+#define check_mutex_state(mutex, count, owner) \
+ ({ \
+ __u32 __count, __owner; \
+ int ret = read_mutex_state((mutex), &__count, &__owner); \
+ EXPECT_EQ(0, ret); \
+ EXPECT_EQ((count), __count); \
+ EXPECT_EQ((owner), __owner); \
+ })
+
+static int unlock_mutex(int mutex, __u32 owner, __u32 *count)
+{
+ struct ntsync_mutex_args args;
+ int ret;
+
+ args.owner = owner;
+ args.count = 0xdeadbeef;
+ ret = ioctl(mutex, NTSYNC_IOC_MUTEX_UNLOCK, &args);
+ *count = args.count;
+ return ret;
+}
+
+static int read_event_state(int event, __u32 *signaled, __u32 *manual)
+{
+ struct ntsync_event_args args;
+ int ret;
+
+ memset(&args, 0xcc, sizeof(args));
+ ret = ioctl(event, NTSYNC_IOC_EVENT_READ, &args);
+ *signaled = args.signaled;
+ *manual = args.manual;
+ return ret;
+}
+
+#define check_event_state(event, signaled, manual) \
+ ({ \
+ __u32 __signaled, __manual; \
+ int ret = read_event_state((event), &__signaled, &__manual); \
+ EXPECT_EQ(0, ret); \
+ EXPECT_EQ((signaled), __signaled); \
+ EXPECT_EQ((manual), __manual); \
+ })
+
+static int wait_objs(int fd, unsigned long request, __u32 count,
+ const int *objs, __u32 owner, int alert, __u32 *index)
+{
+ struct ntsync_wait_args args = {0};
+ struct timespec timeout;
+ int ret;
+
+ clock_gettime(CLOCK_MONOTONIC, &timeout);
+
+ args.timeout = timeout.tv_sec * 1000000000 + timeout.tv_nsec;
+ args.count = count;
+ args.objs = (uintptr_t)objs;
+ args.owner = owner;
+ args.index = 0xdeadbeef;
+ args.alert = alert;
+ ret = ioctl(fd, request, &args);
+ *index = args.index;
+ return ret;
+}
+
+static int wait_any(int fd, __u32 count, const int *objs, __u32 owner, __u32 *index)
+{
+ return wait_objs(fd, NTSYNC_IOC_WAIT_ANY, count, objs, owner, 0, index);
+}
+
+static int wait_all(int fd, __u32 count, const int *objs, __u32 owner, __u32 *index)
+{
+ return wait_objs(fd, NTSYNC_IOC_WAIT_ALL, count, objs, owner, 0, index);
+}
+
+static int wait_any_alert(int fd, __u32 count, const int *objs,
+ __u32 owner, int alert, __u32 *index)
+{
+ return wait_objs(fd, NTSYNC_IOC_WAIT_ANY,
+ count, objs, owner, alert, index);
+}
+
+static int wait_all_alert(int fd, __u32 count, const int *objs,
+ __u32 owner, int alert, __u32 *index)
+{
+ return wait_objs(fd, NTSYNC_IOC_WAIT_ALL,
+ count, objs, owner, alert, index);
+}
+
+TEST(semaphore_state)
+{
+ struct ntsync_sem_args sem_args;
+ struct timespec timeout;
+ __u32 count, index;
+ int fd, ret, sem;
+
+ clock_gettime(CLOCK_MONOTONIC, &timeout);
+
+ fd = open("/dev/ntsync", O_CLOEXEC | O_RDONLY);
+ ASSERT_LE(0, fd);
+
+ sem_args.count = 3;
+ sem_args.max = 2;
+ sem = ioctl(fd, NTSYNC_IOC_CREATE_SEM, &sem_args);
+ EXPECT_EQ(-1, sem);
+ EXPECT_EQ(EINVAL, errno);
+
+ sem_args.count = 2;
+ sem_args.max = 2;
+ sem = ioctl(fd, NTSYNC_IOC_CREATE_SEM, &sem_args);
+ EXPECT_LE(0, sem);
+ check_sem_state(sem, 2, 2);
+
+ count = 0;
+ ret = release_sem(sem, &count);
+ EXPECT_EQ(0, ret);
+ EXPECT_EQ(2, count);
+ check_sem_state(sem, 2, 2);
+
+ count = 1;
+ ret = release_sem(sem, &count);
+ EXPECT_EQ(-1, ret);
+ EXPECT_EQ(EOVERFLOW, errno);
+ check_sem_state(sem, 2, 2);
+
+ ret = wait_any(fd, 1, &sem, 123, &index);
+ EXPECT_EQ(0, ret);
+ EXPECT_EQ(0, index);
+ check_sem_state(sem, 1, 2);
+
+ ret = wait_any(fd, 1, &sem, 123, &index);
+ EXPECT_EQ(0, ret);
+ EXPECT_EQ(0, index);
+ check_sem_state(sem, 0, 2);
+
+ ret = wait_any(fd, 1, &sem, 123, &index);
+ EXPECT_EQ(-1, ret);
+ EXPECT_EQ(ETIMEDOUT, errno);
+
+ count = 3;
+ ret = release_sem(sem, &count);
+ EXPECT_EQ(-1, ret);
+ EXPECT_EQ(EOVERFLOW, errno);
+ check_sem_state(sem, 0, 2);
+
+ count = 2;
+ ret = release_sem(sem, &count);
+ EXPECT_EQ(0, ret);
+ EXPECT_EQ(0, count);
+ check_sem_state(sem, 2, 2);
+
+ ret = wait_any(fd, 1, &sem, 123, &index);
+ EXPECT_EQ(0, ret);
+ ret = wait_any(fd, 1, &sem, 123, &index);
+ EXPECT_EQ(0, ret);
+
+ count = 1;
+ ret = release_sem(sem, &count);
+ EXPECT_EQ(0, ret);
+ EXPECT_EQ(0, count);
+ check_sem_state(sem, 1, 2);
+
+ count = ~0u;
+ ret = release_sem(sem, &count);
+ EXPECT_EQ(-1, ret);
+ EXPECT_EQ(EOVERFLOW, errno);
+ check_sem_state(sem, 1, 2);
+
+ close(sem);
+
+ close(fd);
+}
+
+TEST(mutex_state)
+{
+ struct ntsync_mutex_args mutex_args;
+ __u32 owner, count, index;
+ struct timespec timeout;
+ int fd, ret, mutex;
+
+ clock_gettime(CLOCK_MONOTONIC, &timeout);
+
+ fd = open("/dev/ntsync", O_CLOEXEC | O_RDONLY);
+ ASSERT_LE(0, fd);
+
+ mutex_args.owner = 123;
+ mutex_args.count = 0;
+ mutex = ioctl(fd, NTSYNC_IOC_CREATE_MUTEX, &mutex_args);
+ EXPECT_EQ(-1, mutex);
+ EXPECT_EQ(EINVAL, errno);
+
+ mutex_args.owner = 0;
+ mutex_args.count = 2;
+ mutex = ioctl(fd, NTSYNC_IOC_CREATE_MUTEX, &mutex_args);
+ EXPECT_EQ(-1, mutex);
+ EXPECT_EQ(EINVAL, errno);
+
+ mutex_args.owner = 123;
+ mutex_args.count = 2;
+ mutex = ioctl(fd, NTSYNC_IOC_CREATE_MUTEX, &mutex_args);
+ EXPECT_LE(0, mutex);
+ check_mutex_state(mutex, 2, 123);
+
+ ret = unlock_mutex(mutex, 0, &count);
+ EXPECT_EQ(-1, ret);
+ EXPECT_EQ(EINVAL, errno);
+
+ ret = unlock_mutex(mutex, 456, &count);
+ EXPECT_EQ(-1, ret);
+ EXPECT_EQ(EPERM, errno);
+ check_mutex_state(mutex, 2, 123);
+
+ ret = unlock_mutex(mutex, 123, &count);
+ EXPECT_EQ(0, ret);
+ EXPECT_EQ(2, count);
+ check_mutex_state(mutex, 1, 123);
+
+ ret = unlock_mutex(mutex, 123, &count);
+ EXPECT_EQ(0, ret);
+ EXPECT_EQ(1, count);
+ check_mutex_state(mutex, 0, 0);
+
+ ret = unlock_mutex(mutex, 123, &count);
+ EXPECT_EQ(-1, ret);
+ EXPECT_EQ(EPERM, errno);
+
+ ret = wait_any(fd, 1, &mutex, 456, &index);
+ EXPECT_EQ(0, ret);
+ EXPECT_EQ(0, index);
+ check_mutex_state(mutex, 1, 456);
+
+ ret = wait_any(fd, 1, &mutex, 456, &index);
+ EXPECT_EQ(0, ret);
+ EXPECT_EQ(0, index);
+ check_mutex_state(mutex, 2, 456);
+
+ ret = unlock_mutex(mutex, 456, &count);
+ EXPECT_EQ(0, ret);
+ EXPECT_EQ(2, count);
+ check_mutex_state(mutex, 1, 456);
+
+ ret = wait_any(fd, 1, &mutex, 123, &index);
+ EXPECT_EQ(-1, ret);
+ EXPECT_EQ(ETIMEDOUT, errno);
+
+ owner = 0;
+ ret = ioctl(mutex, NTSYNC_IOC_MUTEX_KILL, &owner);
+ EXPECT_EQ(-1, ret);
+ EXPECT_EQ(EINVAL, errno);
+
+ owner = 123;
+ ret = ioctl(mutex, NTSYNC_IOC_MUTEX_KILL, &owner);
+ EXPECT_EQ(-1, ret);
+ EXPECT_EQ(EPERM, errno);
+ check_mutex_state(mutex, 1, 456);
+
+ owner = 456;
+ ret = ioctl(mutex, NTSYNC_IOC_MUTEX_KILL, &owner);
+ EXPECT_EQ(0, ret);
+
+ memset(&mutex_args, 0xcc, sizeof(mutex_args));
+ ret = ioctl(mutex, NTSYNC_IOC_MUTEX_READ, &mutex_args);
+ EXPECT_EQ(-1, ret);
+ EXPECT_EQ(EOWNERDEAD, errno);
+ EXPECT_EQ(0, mutex_args.count);
+ EXPECT_EQ(0, mutex_args.owner);
+
+ memset(&mutex_args, 0xcc, sizeof(mutex_args));
+ ret = ioctl(mutex, NTSYNC_IOC_MUTEX_READ, &mutex_args);
+ EXPECT_EQ(-1, ret);
+ EXPECT_EQ(EOWNERDEAD, errno);
+ EXPECT_EQ(0, mutex_args.count);
+ EXPECT_EQ(0, mutex_args.owner);
+
+ ret = wait_any(fd, 1, &mutex, 123, &index);
+ EXPECT_EQ(-1, ret);
+ EXPECT_EQ(EOWNERDEAD, errno);
+ EXPECT_EQ(0, index);
+ check_mutex_state(mutex, 1, 123);
+
+ owner = 123;
+ ret = ioctl(mutex, NTSYNC_IOC_MUTEX_KILL, &owner);
+ EXPECT_EQ(0, ret);
+
+ memset(&mutex_args, 0xcc, sizeof(mutex_args));
+ ret = ioctl(mutex, NTSYNC_IOC_MUTEX_READ, &mutex_args);
+ EXPECT_EQ(-1, ret);
+ EXPECT_EQ(EOWNERDEAD, errno);
+ EXPECT_EQ(0, mutex_args.count);
+ EXPECT_EQ(0, mutex_args.owner);
+
+ ret = wait_any(fd, 1, &mutex, 123, &index);
+ EXPECT_EQ(-1, ret);
+ EXPECT_EQ(EOWNERDEAD, errno);
+ EXPECT_EQ(0, index);
+ check_mutex_state(mutex, 1, 123);
+
+ close(mutex);
+
+ mutex_args.owner = 0;
+ mutex_args.count = 0;
+ mutex = ioctl(fd, NTSYNC_IOC_CREATE_MUTEX, &